aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/Kconfig8
-rw-r--r--drivers/acpi/acpi_lpss.c26
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h17
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/dbinput.c10
-rw-r--r--drivers/acpi/acpica/dbmethod.c8
-rw-r--r--drivers/acpi/acpica/dbxface.c10
-rw-r--r--drivers/acpi/acpica/dsfield.c34
-rw-r--r--drivers/acpi/acpica/hwregs.c9
-rw-r--r--drivers/acpi/acpica/hwsleep.c10
-rw-r--r--drivers/acpi/acpica/nsaccess.c20
-rw-r--r--drivers/acpi/acpica/nssearch.c1
-rw-r--r--drivers/acpi/acpica/psloop.c48
-rw-r--r--drivers/acpi/acpica/tbdata.c4
-rw-r--r--drivers/acpi/acpica/utdelete.c7
-rw-r--r--drivers/acpi/acpica/uterror.c6
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c26
-rw-r--r--drivers/acpi/acpica/utstrtoul64.c2
-rw-r--r--drivers/acpi/arm64/iort.c48
-rw-r--r--drivers/acpi/battery.c78
-rw-r--r--drivers/acpi/bus.c40
-rw-r--r--drivers/acpi/button.c17
-rw-r--r--drivers/acpi/ec.c16
-rw-r--r--drivers/acpi/nfit/core.c50
-rw-r--r--drivers/acpi/nfit/nfit.h1
-rw-r--r--drivers/acpi/osi.c8
-rw-r--r--drivers/acpi/osl.c72
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c109
-rw-r--r--drivers/acpi/pptt.c10
-rw-r--r--drivers/acpi/processor_core.c1
-rw-r--r--drivers/acpi/property.c209
-rw-r--r--drivers/acpi/scan.c23
-rw-r--r--drivers/acpi/sleep.c30
-rw-r--r--drivers/acpi/x86/utils.c22
-rw-r--r--drivers/android/Kconfig2
-rw-r--r--drivers/android/binder.c9
-rw-r--r--drivers/android/binder_alloc.c43
-rw-r--r--drivers/android/binder_trace.h7
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c98
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_brcm.c2
-rw-r--r--drivers/ata/ahci_ceva.c2
-rw-r--r--drivers/ata/ahci_da850.c2
-rw-r--r--drivers/ata/ahci_dm816.c2
-rw-r--r--drivers/ata/ahci_imx.c2
-rw-r--r--drivers/ata/ahci_mtk.c2
-rw-r--r--drivers/ata/ahci_mvebu.c4
-rw-r--r--drivers/ata/ahci_platform.c4
-rw-r--r--drivers/ata/ahci_qoriq.c2
-rw-r--r--drivers/ata/ahci_seattle.c2
-rw-r--r--drivers/ata/ahci_st.c2
-rw-r--r--drivers/ata/ahci_sunxi.c2
-rw-r--r--drivers/ata/ahci_tegra.c2
-rw-r--r--drivers/ata/ahci_xgene.c2
-rw-r--r--drivers/ata/libahci.c34
-rw-r--r--drivers/ata/libahci_platform.c49
-rw-r--r--drivers/ata/libata-core.c9
-rw-r--r--drivers/ata/libata-eh.c41
-rw-r--r--drivers/ata/libata-scsi.c41
-rw-r--r--drivers/ata/libata-sff.c30
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_cmd640.c2
-rw-r--r--drivers/ata/pata_icside.c2
-rw-r--r--drivers/ata/pata_imx.c3
-rw-r--r--drivers/ata/pata_legacy.c6
-rw-r--r--drivers/ata/pata_palmld.c2
-rw-r--r--drivers/ata/pata_pcmcia.c2
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_pxa.c10
-rw-r--r--drivers/ata/pata_samsung_cf.c1
-rw-r--r--drivers/ata/pata_via.c2
-rw-r--r--drivers/ata/sata_fsl.c9
-rw-r--r--drivers/ata/sata_nv.c3
-rw-r--r--drivers/ata/sata_rcar.c76
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/zatm.c4
-rw-r--r--drivers/auxdisplay/arm-charlcd.c6
-rw-r--r--drivers/auxdisplay/charlcd.c5
-rw-r--r--drivers/auxdisplay/hd44780.c1
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/cacheinfo.c24
-rw-r--r--drivers/base/core.c87
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dd.c142
-rw-r--r--drivers/base/firmware_loader/fallback.c12
-rw-r--r--drivers/base/init.c2
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/node.c49
-rw-r--r--drivers/base/power/common.c17
-rw-r--r--drivers/base/power/domain.c49
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h3
-rw-r--r--drivers/base/regmap/regmap-sccb.c128
-rw-r--r--drivers/base/regmap/regmap-slimbus.c23
-rw-r--r--drivers/base/regmap/regmap.c79
-rw-r--r--drivers/bcma/Kconfig3
-rw-r--r--drivers/block/DAC960.c51
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/Makefile5
-rw-r--r--drivers/block/aoe/aoecmd.c1
-rw-r--r--drivers/block/aoe/aoedev.c4
-rw-r--r--drivers/block/brd.c14
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_main.c12
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_req.c8
-rw-r--r--drivers/block/drbd/drbd_worker.c6
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c3
-rw-r--r--drivers/block/nbd.c102
-rw-r--r--drivers/block/null_blk.h108
-rw-r--r--drivers/block/null_blk_main.c (renamed from drivers/block/null_blk.c)129
-rw-r--r--drivers/block/null_blk_zoned.c149
-rw-r--r--drivers/block/paride/bpck.c3
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/pktcdvd.c110
-rw-r--r--drivers/block/rbd.c127
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/skd_main.c16
-rw-r--r--drivers/block/xen-blkfront.c9
-rw-r--r--drivers/block/zram/zram_drv.c41
-rw-r--r--drivers/bluetooth/Kconfig25
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/bfusb.c2
-rw-r--r--drivers/bluetooth/bluecard_cs.c2
-rw-r--r--drivers/bluetooth/bpa10x.c6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c2
-rw-r--r--drivers/bluetooth/btmtkuart.c629
-rw-r--r--drivers/bluetooth/btqca.c123
-rw-r--r--drivers/bluetooth/btqca.h22
-rw-r--r--drivers/bluetooth/btrtl.c512
-rw-r--r--drivers/bluetooth/btrtl.h53
-rw-r--r--drivers/bluetooth/btusb.c116
-rw-r--r--drivers/bluetooth/hci_h5.c206
-rw-r--r--drivers/bluetooth/hci_intel.c2
-rw-r--r--drivers/bluetooth/hci_qca.c490
-rw-r--r--drivers/bus/Kconfig10
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/imx-weim.c7
-rw-r--r--drivers/bus/sun50i-de2.c48
-rw-r--r--drivers/bus/ti-sysc.c72
-rw-r--r--drivers/cdrom/cdrom.c30
-rw-r--r--drivers/char/Kconfig14
-rw-r--r--drivers/char/agp/alpha-agp.c2
-rw-r--r--drivers/char/agp/amd64-agp.c4
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/atmel-rng.c1
-rw-r--r--drivers/char/hw_random/exynos-trng.c1
-rw-r--r--drivers/char/hw_random/imx-rngc.c1
-rw-r--r--drivers/char/hw_random/msm-rng.c183
-rw-r--r--drivers/char/hw_random/powernv-rng.c1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c6
-rw-r--r--drivers/char/ipmi/kcs_bmc.c31
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c4
-rw-r--r--drivers/char/random.c88
-rw-r--r--drivers/char/rtc.c13
-rw-r--r--drivers/char/tpm/tpm-chip.c68
-rw-r--r--drivers/char/tpm/tpm-interface.c72
-rw-r--r--drivers/char/tpm/tpm.h31
-rw-r--r--drivers/char/tpm/tpm2-cmd.c258
-rw-r--r--drivers/char/tpm/tpm2-space.c12
-rw-r--r--drivers/char/tpm/tpm_crb.c101
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c8
-rw-r--r--drivers/char/tpm/tpm_tis_core.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.h1
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c9
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c2
-rw-r--r--drivers/char/virtio_console.c60
-rw-r--r--drivers/clk/Kconfig6
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/actions/Kconfig7
-rw-r--r--drivers/clk/actions/Makefile1
-rw-r--r--drivers/clk/actions/owl-s700.c606
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-i2s-mux.c116
-rw-r--r--drivers/clk/clk-aspeed.c61
-rw-r--r--drivers/clk/clk-cs2000-cp.c5
-rw-r--r--drivers/clk/clk-fixed-factor.c9
-rw-r--r--drivers/clk/clk-max9485.c387
-rw-r--r--drivers/clk/clk-scmi.c5
-rw-r--r--drivers/clk/clk-si514.c38
-rw-r--r--drivers/clk/clk-si544.c38
-rw-r--r--drivers/clk/clk.c221
-rw-r--r--drivers/clk/clkdev.c5
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c2
-rw-r--r--drivers/clk/davinci/psc-da830.c3
-rw-r--r--drivers/clk/davinci/psc-da850.c3
-rw-r--r--drivers/clk/davinci/psc-dm365.c3
-rw-r--r--drivers/clk/davinci/psc-dm644x.c3
-rw-r--r--drivers/clk/davinci/psc-dm646x.c3
-rw-r--r--drivers/clk/davinci/psc.h2
-rw-r--r--drivers/clk/imx/clk-imx51-imx53.c44
-rw-r--r--drivers/clk/imx/clk-imx6q.c16
-rw-r--r--drivers/clk/imx/clk-imx6sl.c12
-rw-r--r--drivers/clk/imx/clk-imx6sll.c7
-rw-r--r--drivers/clk/imx/clk-imx6sx.c41
-rw-r--r--drivers/clk/imx/clk-imx6ul.c29
-rw-r--r--drivers/clk/imx/clk-imx7d.c1
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c4
-rw-r--r--drivers/clk/meson/Kconfig28
-rw-r--r--drivers/clk/meson/Makefile4
-rw-r--r--drivers/clk/meson/axg-audio.c845
-rw-r--r--drivers/clk/meson/axg-audio.h127
-rw-r--r--drivers/clk/meson/axg.c244
-rw-r--r--drivers/clk/meson/axg.h8
-rw-r--r--drivers/clk/meson/clk-audio-divider.c110
-rw-r--r--drivers/clk/meson/clk-phase.c63
-rw-r--r--drivers/clk/meson/clk-triphase.c68
-rw-r--r--drivers/clk/meson/clkc-audio.h28
-rw-r--r--drivers/clk/meson/clkc.h11
-rw-r--r--drivers/clk/meson/gxbb.c119
-rw-r--r--drivers/clk/meson/gxbb.h5
-rw-r--r--drivers/clk/meson/sclk-div.c243
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c47
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c6
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c7
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c7
-rw-r--r--drivers/clk/qcom/Kconfig19
-rw-r--r--drivers/clk/qcom/Makefile2
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c10
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h14
-rw-r--r--drivers/clk/qcom/clk-branch.c10
-rw-r--r--drivers/clk/qcom/clk-branch.h14
-rw-r--r--drivers/clk/qcom/clk-rcg.h2
-rw-r--r--drivers/clk/qcom/clk-regmap.c10
-rw-r--r--drivers/clk/qcom/clk-regmap.h14
-rw-r--r--drivers/clk/qcom/clk-rpmh.c329
-rw-r--r--drivers/clk/qcom/common.c10
-rw-r--r--drivers/clk/qcom/common.h15
-rw-r--r--drivers/clk/qcom/dispcc-sdm845.c685
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c3
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c2
-rw-r--r--drivers/clk/qcom/gcc-mdm9615.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c5
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c5
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c2
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c45
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c3
-rw-r--r--drivers/clk/qcom/videocc-sdm845.c2
-rw-r--r--drivers/clk/renesas/Kconfig6
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c893
-rw-r--r--drivers/clk/rockchip/Makefile2
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c227
-rw-r--r--drivers/clk/rockchip/clk-px30.c1039
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c3
-rw-r--r--drivers/clk/rockchip/clk.c10
-rw-r--r--drivers/clk/rockchip/clk.h126
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-exynos4412-isp.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c167
-rw-r--r--drivers/clk/socfpga/clk-s10.c9
-rw-r--r--drivers/clk/sunxi-ng/Makefile39
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c58
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.h8
-rw-r--r--drivers/clk/tegra/Makefile2
-rw-r--r--drivers/clk/tegra/clk-bpmp.c12
-rw-r--r--drivers/clk/tegra/clk-divider.c35
-rw-r--r--drivers/clk/tegra/clk-emc.c2
-rw-r--r--drivers/clk/tegra/clk-id.h2
-rw-r--r--drivers/clk/tegra/clk-sdmmc-mux.c251
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c11
-rw-r--r--drivers/clk/tegra/clk-tegra124.c3
-rw-r--r--drivers/clk/tegra/clk-tegra210.c14
-rw-r--r--drivers/clk/tegra/clk-utils.c43
-rw-r--r--drivers/clk/tegra/clk.h30
-rw-r--r--drivers/clk/ti/clk-7xx.c1
-rw-r--r--drivers/clk/uniphier/clk-uniphier-peri.c9
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c58
-rw-r--r--drivers/clocksource/Kconfig11
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/mtk_timer.c268
-rw-r--r--drivers/clocksource/riscv_timer.c105
-rw-r--r--drivers/clocksource/tegra20_timer.c4
-rw-r--r--drivers/clocksource/timer-atcpit100.c2
-rw-r--r--drivers/clocksource/timer-keystone.c2
-rw-r--r--drivers/clocksource/timer-mediatek.c328
-rw-r--r--drivers/clocksource/timer-sprd.c50
-rw-r--r--drivers/clocksource/timer-ti-32k.c3
-rw-r--r--drivers/clocksource/zevio-timer.c2
-rw-r--r--drivers/connector/connector.c3
-rw-r--r--drivers/cpufreq/Kconfig.arm14
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c163
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c52
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_governor.c12
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c452
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c21
-rw-r--r--drivers/cpufreq/intel_pstate.c61
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c13
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c20
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c14
-rw-r--r--drivers/cpuidle/cpuidle-arm.c3
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c158
-rw-r--r--drivers/cpuidle/governors/menu.c45
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c18
-rw-r--r--drivers/crypto/atmel-ecc.c35
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c28
-rw-r--r--drivers/crypto/bcm/cipher.c8
-rw-r--r--drivers/crypto/caam/caamhash.c3
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h2
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h2
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c3
-rw-r--r--drivers/crypto/ccp/psp-dev.c35
-rw-r--r--drivers/crypto/ccp/psp-dev.h19
-rw-r--r--drivers/crypto/ccp/sp-dev.h7
-rw-r--r--drivers/crypto/ccp/sp-pci.c36
-rw-r--r--drivers/crypto/ccree/cc_aead.c16
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c8
-rw-r--r--drivers/crypto/ccree/cc_cipher.c170
-rw-r--r--drivers/crypto/ccree/cc_cipher.h1
-rw-r--r--drivers/crypto/ccree/cc_driver.c4
-rw-r--r--drivers/crypto/ccree/cc_driver.h1
-rw-r--r--drivers/crypto/ccree/cc_hash.c85
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_hw.c2
-rw-r--r--drivers/crypto/hisilicon/Kconfig14
-rw-r--r--drivers/crypto/hisilicon/Makefile2
-rw-r--r--drivers/crypto/hisilicon/sec/Makefile3
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c1122
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c1323
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h428
-rw-r--r--drivers/crypto/inside-secure/safexcel.c474
-rw-r--r--drivers/crypto/inside-secure/safexcel.h201
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c492
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c560
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c63
-rw-r--r--drivers/crypto/marvell/hash.c3
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c1
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c31
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c1
-rw-r--r--drivers/crypto/nx/nx-sha256.c1
-rw-r--r--drivers/crypto/nx/nx-sha512.c1
-rw-r--r--drivers/crypto/omap-sham.c36
-rw-r--r--drivers/crypto/padlock-aes.c8
-rw-r--r--drivers/crypto/padlock-sha.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/qce/core.c1
-rw-r--r--drivers/crypto/qce/sha.c3
-rw-r--r--drivers/crypto/qcom-rng.c229
-rw-r--r--drivers/crypto/s5p-sss.c9
-rw-r--r--drivers/crypto/sahara.c10
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c62
-rw-r--r--drivers/crypto/stm32/stm32-hash.c95
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c72
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c20
-rw-r--r--drivers/crypto/talitos.c37
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c1
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c16
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c116
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h25
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c33
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c81
-rw-r--r--drivers/crypto/vmx/ghash.c2
-rw-r--r--drivers/crypto/vmx/ghashp8-ppc.pl12
-rw-r--r--drivers/dax/device.c2
-rw-r--r--drivers/dax/super.c8
-rw-r--r--drivers/devfreq/devfreq.c16
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c6
-rw-r--r--drivers/devfreq/rk3399_dmc.c102
-rw-r--r--drivers/devfreq/tegra-devfreq.c1
-rw-r--r--drivers/dma-buf/dma-buf.c56
-rw-r--r--drivers/dma-buf/dma-fence-array.c1
-rw-r--r--drivers/dma-buf/dma-fence.c167
-rw-r--r--drivers/dma-buf/reservation.c8
-rw-r--r--drivers/dma-buf/sw_sync.c1
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmaengine.c53
-rw-r--r--drivers/dma/ep93xx_dma.c1
-rw-r--r--drivers/dma/hsu/hsu.c8
-rw-r--r--drivers/dma/idma64.c8
-rw-r--r--drivers/dma/imx-sdma.c578
-rw-r--r--drivers/dma/ioat/dma.c6
-rw-r--r--drivers/dma/k3dma.c2
-rw-r--r--drivers/dma/mic_x100_dma.c8
-rw-r--r--drivers/dma/mv_xor_v2.c16
-rw-r--r--drivers/dma/nbpfaxi.c1
-rw-r--r--drivers/dma/owl-dma.c971
-rw-r--r--drivers/dma/pl330.c14
-rw-r--r--drivers/dma/pxa_dma.c15
-rw-r--r--drivers/dma/s3c24xx-dma.c1
-rw-r--r--drivers/dma/sh/rcar-dmac.c112
-rw-r--r--drivers/dma/ste_dma40.c15
-rw-r--r--drivers/dma/stm32-dma.c4
-rw-r--r--drivers/dma/stm32-mdma.c8
-rw-r--r--drivers/dma/ti/omap-dma.c6
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c22
-rw-r--r--drivers/edac/altera_edac.c3
-rw-r--r--drivers/edac/edac_mc.c1
-rw-r--r--drivers/edac/edac_mc_sysfs.c6
-rw-r--r--drivers/edac/i7core_edac.c22
-rw-r--r--drivers/edac/sb_edac.c17
-rw-r--r--drivers/edac/thunderx_edac.c14
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c1
-rw-r--r--drivers/extcon/extcon-intel-int3496.c2
-rw-r--r--drivers/extcon/extcon-max3355.c1
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c1
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c22
-rw-r--r--drivers/extcon/extcon.c3
-rw-r--r--drivers/firewire/core-cdev.c8
-rw-r--r--drivers/firmware/arm_scmi/perf.c5
-rw-r--r--drivers/firmware/efi/Kconfig12
-rw-r--r--drivers/firmware/efi/arm-init.c1
-rw-r--r--drivers/firmware/efi/arm-runtime.c18
-rw-r--r--drivers/firmware/efi/cper.c19
-rw-r--r--drivers/firmware/efi/efi-bgrt.c2
-rw-r--r--drivers/firmware/efi/efi.c23
-rw-r--r--drivers/firmware/efi/esrt.c8
-rw-r--r--drivers/firmware/efi/libstub/Makefile8
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c32
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c31
-rw-r--r--drivers/firmware/efi/libstub/efistub.h3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c202
-rw-r--r--drivers/firmware/google/vpd.c5
-rw-r--r--drivers/firmware/psci_checker.c83
-rw-r--r--drivers/firmware/qemu_fw_cfg.c1
-rw-r--r--drivers/firmware/raspberrypi.c29
-rw-r--r--drivers/fpga/Kconfig68
-rw-r--r--drivers/fpga/Makefile14
-rw-r--r--drivers/fpga/altera-cvp.c6
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c463
-rw-r--r--drivers/fpga/dfl-afu-main.c636
-rw-r--r--drivers/fpga/dfl-afu-region.c166
-rw-r--r--drivers/fpga/dfl-afu.h100
-rw-r--r--drivers/fpga/dfl-fme-br.c114
-rw-r--r--drivers/fpga/dfl-fme-main.c279
-rw-r--r--drivers/fpga/dfl-fme-mgr.c349
-rw-r--r--drivers/fpga/dfl-fme-pr.c479
-rw-r--r--drivers/fpga/dfl-fme-pr.h84
-rw-r--r--drivers/fpga/dfl-fme-region.c89
-rw-r--r--drivers/fpga/dfl-fme.h38
-rw-r--r--drivers/fpga/dfl-pci.c243
-rw-r--r--drivers/fpga/dfl.c1044
-rw-r--r--drivers/fpga/dfl.h410
-rw-r--r--drivers/fpga/fpga-mgr.c28
-rw-r--r--drivers/fpga/fpga-region.c22
-rw-r--r--drivers/fsi/Kconfig32
-rw-r--r--drivers/fsi/Makefile2
-rw-r--r--drivers/fsi/cf-fsi-fw.h157
-rw-r--r--drivers/fsi/fsi-core.c590
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c1440
-rw-r--r--drivers/fsi/fsi-master-gpio.c471
-rw-r--r--drivers/fsi/fsi-master-hub.c5
-rw-r--r--drivers/fsi/fsi-master.h37
-rw-r--r--drivers/fsi/fsi-sbefifo.c1066
-rw-r--r--drivers/fsi/fsi-scom.c558
-rw-r--r--drivers/gnss/Kconfig43
-rw-r--r--drivers/gnss/Makefile16
-rw-r--r--drivers/gnss/core.c420
-rw-r--r--drivers/gnss/serial.c275
-rw-r--r--drivers/gnss/serial.h47
-rw-r--r--drivers/gnss/sirf.c408
-rw-r--r--drivers/gnss/ubx.c153
-rw-r--r--drivers/gpio/Kconfig18
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-aspeed.c426
-rw-r--r--drivers/gpio/gpio-ath79.c2
-rw-r--r--drivers/gpio/gpio-bcm-kona.c6
-rw-r--r--drivers/gpio/gpio-davinci.c70
-rw-r--r--drivers/gpio/gpio-dwapb.c6
-rw-r--r--drivers/gpio/gpio-em.c6
-rw-r--r--drivers/gpio/gpio-it87.c13
-rw-r--r--drivers/gpio/gpio-madera.c206
-rw-r--r--drivers/gpio/gpio-max732x.c12
-rw-r--r--drivers/gpio/gpio-menz127.c4
-rw-r--r--drivers/gpio/gpio-ml-ioh.c3
-rw-r--r--drivers/gpio/gpio-mmio.c108
-rw-r--r--drivers/gpio/gpio-mt7621.c332
-rw-r--r--drivers/gpio/gpio-mxc.c73
-rw-r--r--drivers/gpio/gpio-mxs.c3
-rw-r--r--drivers/gpio/gpio-omap.c88
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pisosr.c22
-rw-r--r--drivers/gpio/gpio-pxa.c42
-rw-r--r--drivers/gpio/gpio-rc5t583.c2
-rw-r--r--drivers/gpio/gpio-rcar.c10
-rw-r--r--drivers/gpio/gpio-rdc321x.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpio-sch.c11
-rw-r--r--drivers/gpio/gpio-sch311x.c70
-rw-r--r--drivers/gpio/gpio-spear-spics.c2
-rw-r--r--drivers/gpio/gpio-sta2x11.c41
-rw-r--r--drivers/gpio/gpio-stmpe.c2
-rw-r--r--drivers/gpio/gpio-stp-xway.c18
-rw-r--r--drivers/gpio/gpio-syscon.c33
-rw-r--r--drivers/gpio/gpio-tb10x.c3
-rw-r--r--drivers/gpio/gpio-tegra.c30
-rw-r--r--drivers/gpio/gpio-tegra186.c74
-rw-r--r--drivers/gpio/gpio-timberdale.c2
-rw-r--r--drivers/gpio/gpio-uniphier.c9
-rw-r--r--drivers/gpio/gpio-vr41xx.c8
-rw-r--r--drivers/gpio/gpio-xgene-sb.c6
-rw-r--r--drivers/gpio/gpio-xilinx.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c67
-rw-r--r--drivers/gpio/gpiolib-of.c6
-rw-r--r--drivers/gpio/gpiolib.c82
-rw-r--r--drivers/gpio/gpiolib.h2
-rw-r--r--drivers/gpu/drm/Kconfig21
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c184
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c263
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c401
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c266
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c202
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c242
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c418
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c235
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c199
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c402
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c299
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c507
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_int.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h458
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c69
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c57
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c48
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c122
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c283
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c118
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c43
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig17
-rw-r--r--drivers/gpu/drm/amd/display/TODO8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c163
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c722
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h)18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c49
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c104
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c562
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c329
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/log_helpers.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c405
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c196
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c220
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c215
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c115
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c500
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c937
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c728
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c437
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c362
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c518
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c128
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c159
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h180
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_defs.h46
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h16
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h136
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h59
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c147
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/luts_1d.h51
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c4
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h46
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h20
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h20
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h37
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h55
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h98
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h50
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h50
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h (renamed from drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h)11
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h33
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h37
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h40
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c109
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c96
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c155
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c43
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c57
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c179
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c1137
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h23
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c74
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c108
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c80
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c150
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c80
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c168
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c220
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c85
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c5
-rw-r--r--drivers/gpu/drm/arc/arcpgu_sim.c2
-rw-r--r--drivers/gpu/drm/arm/Makefile1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c35
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c76
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c190
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h26
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c300
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h40
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c250
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.h14
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c9
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h24
-rw-r--r--drivers/gpu/drm/armada/Makefile2
-rw-r--r--drivers/gpu/drm/armada/armada_510.c24
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1020
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h56
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h14
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c54
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c30
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h6
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c4
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c17
-rw-r--r--drivers/gpu/drm/armada/armada_hw.h16
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c663
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c289
-rw-r--r--drivers/gpu/drm/armada/armada_plane.h15
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c19
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c16
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c8
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c6
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c6
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c4
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c4
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c4
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c4
-rw-r--r--drivers/gpu/drm/bridge/panel.c2
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c2
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c4
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c86
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c20
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c43
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c14
-rw-r--r--drivers/gpu/drm/drm_atomic.c408
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c122
-rw-r--r--drivers/gpu/drm/drm_client.c406
-rw-r--r--drivers/gpu/drm/drm_connector.c219
-rw-r--r--drivers/gpu/drm/drm_context.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c53
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h28
-rw-r--r--drivers/gpu/drm/drm_debugfs.c11
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c9
-rw-r--r--drivers/gpu/drm/drm_dp_cec.c428
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c33
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c4
-rw-r--r--drivers/gpu/drm/drm_drv.c23
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c29
-rw-r--r--drivers/gpu/drm/drm_edid.c282
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c355
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c359
-rw-r--r--drivers/gpu/drm/drm_file.c306
-rw-r--r--drivers/gpu/drm/drm_fourcc.c42
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c49
-rw-r--r--drivers/gpu/drm/drm_gem.c9
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c2
-rw-r--r--drivers/gpu/drm/drm_global.c2
-rw-r--r--drivers/gpu/drm/drm_internal.h2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c13
-rw-r--r--drivers/gpu/drm/drm_lease.c16
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c91
-rw-r--r--drivers/gpu/drm/drm_mode_config.c5
-rw-r--r--drivers/gpu/drm/drm_mode_object.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c23
-rw-r--r--drivers/gpu/drm/drm_of.c27
-rw-r--r--drivers/gpu/drm/drm_panel.c27
-rw-r--r--drivers/gpu/drm/drm_pci.c58
-rw-r--r--drivers/gpu/drm/drm_plane.c169
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c12
-rw-r--r--drivers/gpu/drm/drm_prime.c34
-rw-r--r--drivers/gpu/drm/drm_print.c111
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c19
-rw-r--r--drivers/gpu/drm/drm_property.c6
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c6
-rw-r--r--drivers/gpu/drm/drm_syncobj.c1
-rw-r--r--drivers/gpu/drm/drm_vm.c10
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c1
-rw-r--r--drivers/gpu/drm/drm_writeback.c353
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c35
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c37
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c52
-rw-r--r--drivers/gpu/drm/exynos/Makefile2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c119
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h47
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c300
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.h11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c51
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c120
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c4
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c7
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c76
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h1
-rw-r--r--drivers/gpu/drm/gma500/gem.c29
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c20
-rw-r--r--drivers/gpu/drm/gma500/gtt.h2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h38
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c1
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c5
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c387
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug12
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c22
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c26
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c44
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c68
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c68
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c492
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c46
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h53
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c462
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c144
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c215
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c80
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c374
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c177
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h131
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c621
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c174
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c115
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1282
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h85
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c31
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c92
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c680
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c20
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c144
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c68
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h8
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4089
-rw-r--r--drivers/gpu/drm/i915/i915_request.c100
-rw-r--r--drivers/gpu/drm/i915/i915_request.h14
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h2
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h148
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h6
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c387
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h53
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c127
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c27
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c7
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c18
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c70
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c129
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c16
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c61
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c55
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c195
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c803
-rw-r--r--drivers/gpu/drm/i915/intel_display.h50
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c531
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c12
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c39
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c17
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c205
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h18
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h104
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h34
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c15
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c288
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c129
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c9
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c160
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h20
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c70
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.h26
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c138
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c17
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c153
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c118
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c84
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c979
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h7
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c204
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c31
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c28
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c445
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c109
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c631
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c419
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h139
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c333
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c49
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c311
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c23
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c165
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c74
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h23
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h14
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c167
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c82
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c301
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c39
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c174
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c70
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c55
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_wedge_me.h58
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c260
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c133
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c24
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c49
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c65
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c (renamed from drivers/gpu/drm/i915/intel_dsi.c)117
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi_pll.c (renamed from drivers/gpu/drm/i915/intel_dsi_pll.c)98
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c47
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h1
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c14
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c3
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c235
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c102
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c76
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c4
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c12
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c22
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c656
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h4
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c378
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h3
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c4
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile34
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h57
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h24
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c30
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h193
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c22
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h483
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c249
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h4562
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c1207
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h162
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h382
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c818
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h60
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c435
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h127
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c49
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c217
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h497
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c479
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h153
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c637
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h133
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2138
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h423
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c2393
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h103
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2500
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h177
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h430
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c905
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c922
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c1173
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h88
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c155
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h53
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c511
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h804
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h168
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c323
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h139
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c540
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h218
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c1183
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h257
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c349
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h128
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c261
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h122
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h465
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c250
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h136
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c753
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h424
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c398
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h202
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c368
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h348
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c275
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h128
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c203
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h57
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c66
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h59
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c1345
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h290
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c245
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c1963
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h175
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c249
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h225
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c1079
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h199
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h1007
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c384
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h94
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h1376
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h26
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c7
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h26
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c52
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c154
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_common.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h23
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h13
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c56
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h12
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c431
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c135
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h26
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h26
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c7
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c93
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c287
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h103
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c54
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c33
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c207
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h70
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h29
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c99
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c73
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c109
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c286
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c19
-rw-r--r--drivers/gpu/drm/panel/Kconfig9
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c503
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c352
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c1
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c1
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c58
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c1
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c1
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c301
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c1
-rw-r--r--drivers/gpu/drm/pl111/Makefile1
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c56
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h5
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c61
-rw-r--r--drivers/gpu/drm/pl111/pl111_nomadik.c36
-rw-r--r--drivers/gpu/drm/pl111/pl111_nomadik.h18
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c7
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c20
-rw-r--r--drivers/gpu/drm/radeon/cik.c22
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c94
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/si.c22
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c6
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c4
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c16
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c4
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c86
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c99
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h23
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c8
-rw-r--r--drivers/gpu/drm/savage/savage_state.c2
-rw-r--r--drivers/gpu/drm/scheduler/Makefile1
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c386
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c13
-rw-r--r--drivers/gpu/drm/selftests/drm_mm_selftests.h2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c71
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c8
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c6
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c8
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/stm/drv.c10
-rw-r--r--drivers/gpu/drm/stm/ltdc.c21
-rw-r--r--drivers/gpu/drm/stm/ltdc.h1
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig7
-rw-r--r--drivers/gpu/drm/sun4i/Makefile6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c127
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c67
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c117
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c44
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h8
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c54
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c90
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c81
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c274
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.h44
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c61
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c61
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c5
-rw-r--r--drivers/gpu/drm/tegra/gem.c14
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c2
-rw-r--r--drivers/gpu/drm/tegra/output.c6
-rw-r--r--drivers/gpu/drm/tegra/rgb.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c9
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c4
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig11
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile1
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c3
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c1
-rw-r--r--drivers/gpu/drm/tinydrm/ili9341.c232
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c1
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c2
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c1
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c63
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c62
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c59
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c25
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c18
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h5
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c28
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c15
-rw-r--r--drivers/gpu/drm/udl/udl_main.c45
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c7
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c49
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c28
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h11
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c17
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c13
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h1
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c26
-rw-r--r--drivers/gpu/drm/vc4/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c147
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h10
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c57
-rw-r--r--drivers/gpu/drm/vc4/vc4_fence.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c101
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h6
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c477
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c34
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c4
-rw-r--r--drivers/gpu/drm/vkms/Makefile3
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c130
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c156
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h78
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c179
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c111
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c57
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile4
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h233
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h86
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h300
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h1094
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h334
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_escape.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h211
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_types.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c1123
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c376
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c72
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h186
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c122
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c41
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c604
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h80
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c709
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c146
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c109
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c553
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c (renamed from drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c)10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_va.c4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.h4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.c2
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c4
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c2
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c2
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c4
-rw-r--r--drivers/gpu/host1x/dev.c3
-rw-r--r--drivers/gpu/host1x/job.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c29
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c37
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c3
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c63
-rw-r--r--drivers/hid/Kconfig10
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-core.c45
-rw-r--r--drivers/hid/hid-cougar.c312
-rw-r--r--drivers/hid/hid-debug.c8
-rw-r--r--drivers/hid/hid-elan.c235
-rw-r--r--drivers/hid/hid-hyperv.c3
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-microsoft.c49
-rw-r--r--drivers/hid/hid-multitouch.c989
-rw-r--r--drivers/hid/hid-ntrig.c2
-rw-r--r--drivers/hid/hid-redragon.c26
-rw-r--r--drivers/hid/hid-sony.c164
-rw-r--r--drivers/hid/hid-wiimote-core.c14
-rw-r--r--drivers/hid/hid-wiimote-modules.c440
-rw-r--r--drivers/hid/hid-wiimote.h3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c59
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c9
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c13
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c2
-rw-r--r--drivers/hid/usbhid/hid-core.c7
-rw-r--r--drivers/hid/usbhid/hiddev.c11
-rw-r--r--drivers/hid/wacom_sys.c123
-rw-r--r--drivers/hid/wacom_wac.c30
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c1
-rw-r--r--drivers/hv/channel.c67
-rw-r--r--drivers/hv/channel_mgmt.c10
-rw-r--r--drivers/hv/hv.c44
-rw-r--r--drivers/hv/hv_balloon.c3
-rw-r--r--drivers/hv/hv_util.c3
-rw-r--r--drivers/hv/ring_buffer.c65
-rw-r--r--drivers/hv/vmbus_drv.c120
-rw-r--r--drivers/hwmon/Kconfig32
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/adt7475.c340
-rw-r--r--drivers/hwmon/emc1403.c2
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwmon/ibmpowernv.c238
-rw-r--r--drivers/hwmon/iio_hwmon.c67
-rw-r--r--drivers/hwmon/k10temp.c8
-rw-r--r--drivers/hwmon/max197.c1
-rw-r--r--drivers/hwmon/mc13783-adc.c1
-rw-r--r--drivers/hwmon/mlxreg-fan.c489
-rw-r--r--drivers/hwmon/nct6775.c6
-rw-r--r--drivers/hwmon/nct7904.c68
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c1057
-rw-r--r--drivers/hwmon/pmbus/Kconfig2
-rw-r--r--drivers/hwmon/pmbus/max34440.c93
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c166
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c223
-rw-r--r--drivers/hwtracing/coresight/Kconfig11
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c577
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h119
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c47
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h10
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c45
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c1074
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c83
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h113
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c7
-rw-r--r--drivers/hwtracing/coresight/coresight.c53
-rw-r--r--drivers/hwtracing/intel_th/msu.c2
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c8
-rw-r--r--drivers/i2c/busses/Kconfig42
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c1
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c40
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c8
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c3
-rw-r--r--drivers/i2c/busses/i2c-davinci.c12
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c10
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c91
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h24
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c223
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c17
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c68
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c62
-rw-r--r--drivers/i2c/busses/i2c-emev2.c5
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c22
-rw-r--r--drivers/i2c/busses/i2c-fsi.c752
-rw-r--r--drivers/i2c/busses/i2c-gpio.c101
-rw-r--r--drivers/i2c/busses/i2c-highlander.c5
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c28
-rw-r--r--drivers/i2c/busses/i2c-mxs.c8
-rw-r--r--drivers/i2c/busses/i2c-ocores.c4
-rw-r--r--drivers/i2c/busses/i2c-owl.c495
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c3
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c17
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c673
-rw-r--r--drivers/i2c/busses/i2c-rcar.c133
-rw-r--r--drivers/i2c/busses/i2c-riic.c5
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c20
-rw-r--r--drivers/i2c/busses/i2c-sprd.c8
-rw-r--r--drivers/i2c/busses/i2c-stu300.c14
-rw-r--r--drivers/i2c/busses/i2c-tegra.c107
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c41
-rw-r--r--drivers/i2c/busses/i2c-xlr.c11
-rw-r--r--drivers/i2c/i2c-core-acpi.c19
-rw-r--r--drivers/i2c/i2c-core-base.c93
-rw-r--r--drivers/i2c/i2c-core-slave.c8
-rw-r--r--drivers/i2c/i2c-core-smbus.c42
-rw-r--r--drivers/i2c/i2c-mux.c8
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c28
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c75
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c51
-rw-r--r--drivers/ide/hpt366.c5
-rw-r--r--drivers/ide/ide-cd.c58
-rw-r--r--drivers/ide/ide-cd.h6
-rw-r--r--drivers/ide/ide-cd_ioctl.c62
-rw-r--r--drivers/ide/ide-floppy.c1
-rw-r--r--drivers/ide/ide-io.c4
-rw-r--r--drivers/ide/ide-probe.c1
-rw-r--r--drivers/ide/ide-tape.c2
-rw-r--r--drivers/ide/ide-taskfile.c3
-rw-r--r--drivers/ide/sis5513.c1
-rw-r--r--drivers/iio/accel/Kconfig4
-rw-r--r--drivers/iio/accel/adxl345.h7
-rw-r--r--drivers/iio/accel/adxl345_core.c140
-rw-r--r--drivers/iio/accel/adxl345_i2c.c7
-rw-r--r--drivers/iio/accel/adxl345_spi.c6
-rw-r--r--drivers/iio/accel/mma8452.c3
-rw-r--r--drivers/iio/accel/sca3000.c1
-rw-r--r--drivers/iio/accel/st_accel_i2c.c64
-rw-r--r--drivers/iio/adc/Kconfig13
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c5
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c783
-rw-r--r--drivers/iio/adc/hx711.c39
-rw-r--r--drivers/iio/adc/ina2xx-adc.c17
-rw-r--r--drivers/iio/adc/max1363.c8
-rw-r--r--drivers/iio/adc/meson_saradc.c9
-rw-r--r--drivers/iio/adc/sc27xx_adc.c522
-rw-r--r--drivers/iio/adc/ti-ads7950.c43
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c44
-rw-r--r--drivers/iio/adc/xilinx-xadc.h1
-rw-r--r--drivers/iio/chemical/Kconfig23
-rw-r--r--drivers/iio/chemical/Makefile3
-rw-r--r--drivers/iio/chemical/bme680.h96
-rw-r--r--drivers/iio/chemical/bme680_core.c959
-rw-r--r--drivers/iio/chemical/bme680_i2c.c85
-rw-r--r--drivers/iio/chemical/bme680_spi.c125
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c3
-rw-r--r--drivers/iio/counter/104-quad-8.c87
-rw-r--r--drivers/iio/dac/Kconfig10
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5686.c7
-rw-r--r--drivers/iio/dac/ad5686.h1
-rw-r--r--drivers/iio/dac/ad5696-i2c.c1
-rw-r--r--drivers/iio/dac/ad5758.c897
-rw-r--r--drivers/iio/dac/ltc2632.c7
-rw-r--r--drivers/iio/dac/ti-dac5571.c6
-rw-r--r--drivers/iio/frequency/ad9523.c68
-rw-r--r--drivers/iio/imu/adis.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c39
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c5
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h38
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c143
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c16
-rw-r--r--drivers/iio/industrialio-core.c29
-rw-r--r--drivers/iio/light/Kconfig17
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/si1133.c1071
-rw-r--r--drivers/iio/light/tsl2772.c2
-rw-r--r--drivers/iio/light/vcnl4000.c220
-rw-r--r--drivers/iio/pressure/bmp280-core.c5
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c3
-rw-r--r--drivers/iio/proximity/Kconfig13
-rw-r--r--drivers/iio/proximity/Makefile1
-rw-r--r--drivers/iio/proximity/isl29501.c1027
-rw-r--r--drivers/iio/temperature/mlx90614.c4
-rw-r--r--drivers/infiniband/Kconfig4
-rw-r--r--drivers/infiniband/core/Makefile5
-rw-r--r--drivers/infiniband/core/addr.c16
-rw-r--r--drivers/infiniband/core/cache.c724
-rw-r--r--drivers/infiniband/core/cm.c147
-rw-r--r--drivers/infiniband/core/cm_msgs.h7
-rw-r--r--drivers/infiniband/core/cma.c362
-rw-r--r--drivers/infiniband/core/core_priv.h4
-rw-r--r--drivers/infiniband/core/device.c23
-rw-r--r--drivers/infiniband/core/mad.c113
-rw-r--r--drivers/infiniband/core/mad_priv.h7
-rw-r--r--drivers/infiniband/core/multicast.c40
-rw-r--r--drivers/infiniband/core/nldev.c16
-rw-r--r--drivers/infiniband/core/rdma_core.c1020
-rw-r--r--drivers/infiniband/core/rdma_core.h96
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c306
-rw-r--r--drivers/infiniband/core/rw.c8
-rw-r--r--drivers/infiniband/core/sa_query.c138
-rw-r--r--drivers/infiniband/core/sysfs.c66
-rw-r--r--drivers/infiniband/core/ucm.c15
-rw-r--r--drivers/infiniband/core/umem.c62
-rw-r--r--drivers/infiniband/core/umem_odp.c33
-rw-r--r--drivers/infiniband/core/user_mad.c1
-rw-r--r--drivers/infiniband/core/uverbs.h34
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c684
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c709
-rw-r--r--drivers/infiniband/core/uverbs_ioctl_merge.c664
-rw-r--r--drivers/infiniband/core/uverbs_main.c232
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c200
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c108
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c154
-rw-r--r--drivers/infiniband/core/uverbs_std_types_dm.c61
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c170
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c88
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c346
-rw-r--r--drivers/infiniband/core/verbs.c523
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c144
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h15
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cq.c64
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c44
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h8
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c32
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c90
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c269
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c20
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h57
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c56
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1051
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c51
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h164
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h68
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c24
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c205
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h30
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c63
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c10
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h243
-rw-r--r--drivers/infiniband/hw/hfi1/init.c44
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c11
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c23
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c14
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c6
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h24
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c8
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c14
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c10
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c18
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h4
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c21
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h45
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c440
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c698
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h136
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c55
-rw-r--r--drivers/infiniband/hw/i40iw/Kconfig1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c26
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c83
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c54
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c70
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c29
-rw-r--r--drivers/infiniband/hw/mlx4/main.c41
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h21
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c421
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c11
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c27
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h2
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c9
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c1119
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c252
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c8
-rw-r--r--drivers/infiniband/hw/mlx5/main.c572
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h85
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c34
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c294
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h24
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c24
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c8
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c74
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c32
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c26
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c50
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h12
-rw-r--r--drivers/infiniband/hw/qedr/main.c107
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h43
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h11
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c12
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c37
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.h8
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c625
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h17
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h5
-rw-r--r--drivers/infiniband/hw/usnic/Kconfig2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c10
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h8
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c40
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c26
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c52
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c11
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h15
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c27
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.h12
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c30
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c67
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c31
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c24
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c70
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h32
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c81
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c444
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c261
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c16
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c7
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c14
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c26
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c4
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c27
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c82
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h4
-rw-r--r--drivers/input/evbug.c4
-rw-r--r--drivers/input/evdev.c16
-rw-r--r--drivers/input/gameport/emu10k1-gp.c4
-rw-r--r--drivers/input/gameport/lightning.c4
-rw-r--r--drivers/input/gameport/ns558.c4
-rw-r--r--drivers/input/input-mt.c12
-rw-r--r--drivers/input/input.c16
-rw-r--r--drivers/input/joystick/a3d.c4
-rw-r--r--drivers/input/joystick/adi.c4
-rw-r--r--drivers/input/joystick/amijoy.c4
-rw-r--r--drivers/input/joystick/analog.c4
-rw-r--r--drivers/input/joystick/cobra.c4
-rw-r--r--drivers/input/joystick/db9.c9
-rw-r--r--drivers/input/joystick/gamecon.c4
-rw-r--r--drivers/input/joystick/gf2k.c4
-rw-r--r--drivers/input/joystick/grip.c4
-rw-r--r--drivers/input/joystick/guillemot.c4
-rw-r--r--drivers/input/joystick/iforce/iforce-ff.c8
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c30
-rw-r--r--drivers/input/joystick/iforce/iforce-packets.c16
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c4
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c4
-rw-r--r--drivers/input/joystick/iforce/iforce.h6
-rw-r--r--drivers/input/joystick/interact.c4
-rw-r--r--drivers/input/joystick/joydump.c4
-rw-r--r--drivers/input/joystick/magellan.c4
-rw-r--r--drivers/input/joystick/pxrc.c166
-rw-r--r--drivers/input/joystick/sidewinder.c4
-rw-r--r--drivers/input/joystick/spaceball.c4
-rw-r--r--drivers/input/joystick/spaceorb.c4
-rw-r--r--drivers/input/joystick/stinger.c4
-rw-r--r--drivers/input/joystick/tmdc.c4
-rw-r--r--drivers/input/joystick/turbografx.c4
-rw-r--r--drivers/input/joystick/warrior.c4
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/adp5589-keys.c1
-rw-r--r--drivers/input/keyboard/amikbd.c4
-rw-r--r--drivers/input/keyboard/atakbd.c4
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c69
-rw-r--r--drivers/input/keyboard/goldfish_events.c9
-rw-r--r--drivers/input/keyboard/gpio_keys.c8
-rw-r--r--drivers/input/keyboard/hilkbd.c4
-rw-r--r--drivers/input/keyboard/imx_keypad.c12
-rw-r--r--drivers/input/keyboard/newtonkbd.c4
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c15
-rw-r--r--drivers/input/keyboard/stowaway.c4
-rw-r--r--drivers/input/keyboard/sunkbd.c4
-rw-r--r--drivers/input/keyboard/xtkbd.c4
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/keyspan_remote.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c70
-rw-r--r--drivers/input/misc/powermate.c2
-rw-r--r--drivers/input/misc/sc27xx-vibra.c154
-rw-r--r--drivers/input/misc/xen-kbdfront.c183
-rw-r--r--drivers/input/misc/yealink.c4
-rw-r--r--drivers/input/mouse/appletouch.c7
-rw-r--r--drivers/input/mouse/cyapa_gen5.c1
-rw-r--r--drivers/input/mouse/cyapa_gen6.c1
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c5
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c12
-rw-r--r--drivers/input/mouse/elantech.c13
-rw-r--r--drivers/input/mouse/inport.c4
-rw-r--r--drivers/input/mouse/logibm.c4
-rw-r--r--drivers/input/mouse/pc110pad.c4
-rw-r--r--drivers/input/mouse/psmouse-base.c12
-rw-r--r--drivers/input/mouse/sermouse.c8
-rw-r--r--drivers/input/rmi4/Kconfig1
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c34
-rw-r--r--drivers/input/rmi4/rmi_bus.c50
-rw-r--r--drivers/input/rmi4/rmi_bus.h10
-rw-r--r--drivers/input/rmi4/rmi_driver.c52
-rw-r--r--drivers/input/rmi4/rmi_f01.c10
-rw-r--r--drivers/input/rmi4/rmi_f03.c9
-rw-r--r--drivers/input/rmi4/rmi_f11.c42
-rw-r--r--drivers/input/rmi4/rmi_f12.c8
-rw-r--r--drivers/input/rmi4/rmi_f30.c9
-rw-r--r--drivers/input/rmi4/rmi_f34.c5
-rw-r--r--drivers/input/rmi4/rmi_f54.c6
-rw-r--r--drivers/input/serio/ams_delta_serio.c198
-rw-r--r--drivers/input/serio/ct82c710.c4
-rw-r--r--drivers/input/serio/hyperv-keyboard.c3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/serio/i8042.c3
-rw-r--r--drivers/input/serio/q40kbd.c4
-rw-r--r--drivers/input/serio/rpckbd.c4
-rw-r--r--drivers/input/serio/serio.c4
-rw-r--r--drivers/input/tablet/aiptek.c2
-rw-r--r--drivers/input/touchscreen/Kconfig25
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c223
-rw-r--r--drivers/input/touchscreen/bu21029_ts.c484
-rw-r--r--drivers/input/touchscreen/eeti_ts.c37
-rw-r--r--drivers/input/touchscreen/egalax_ts.c5
-rw-r--r--drivers/input/touchscreen/elo.c1
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c21
-rw-r--r--drivers/input/touchscreen/gunze.c4
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c14
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c4
-rw-r--r--drivers/input/touchscreen/resistive-adc-touch.c204
-rw-r--r--drivers/input/touchscreen/rohm_bu21023.c4
-rw-r--r--drivers/input/touchscreen/silead.c1
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c2
-rw-r--r--drivers/iommu/Kconfig38
-rw-r--r--drivers/iommu/Makefile4
-rw-r--r--drivers/iommu/amd_iommu.c40
-rw-r--r--drivers/iommu/amd_iommu_debugfs.c33
-rw-r--r--drivers/iommu/amd_iommu_init.c57
-rw-r--r--drivers/iommu/amd_iommu_proto.h6
-rw-r--r--drivers/iommu/amd_iommu_types.h22
-rw-r--r--drivers/iommu/amd_iommu_v2.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c28
-rw-r--r--drivers/iommu/arm-smmu.c24
-rw-r--r--drivers/iommu/dma-iommu.c3
-rw-r--r--drivers/iommu/dmar.c6
-rw-r--r--drivers/iommu/exynos-iommu.c3
-rw-r--r--drivers/iommu/intel-iommu.c255
-rw-r--r--drivers/iommu/intel-pasid.c239
-rw-r--r--drivers/iommu/intel-pasid.h39
-rw-r--r--drivers/iommu/intel-svm.c83
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c7
-rw-r--r--drivers/iommu/io-pgtable-arm.c3
-rw-r--r--drivers/iommu/iommu-debugfs.c66
-rw-r--r--drivers/iommu/iommu.c44
-rw-r--r--drivers/iommu/ipmmu-vmsa.c62
-rw-r--r--drivers/iommu/msm_iommu.c19
-rw-r--r--drivers/iommu/mtk_iommu.c1
-rw-r--r--drivers/iommu/mtk_iommu_v1.c1
-rw-r--r--drivers/iommu/of_iommu.c21
-rw-r--r--drivers/iommu/omap-iommu.c5
-rw-r--r--drivers/iommu/qcom_iommu.c3
-rw-r--r--drivers/iommu/rockchip-iommu.c48
-rw-r--r--drivers/iommu/tegra-gart.c1
-rw-r--r--drivers/iommu/tegra-smmu.c1
-rw-r--r--drivers/ipack/carriers/tpci200.c7
-rw-r--r--drivers/irqchip/Kconfig28
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c16
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c243
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-ingenic.c1
-rw-r--r--drivers/irqchip/irq-renesas-h8s.c6
-rw-r--r--drivers/irqchip/irq-sifive-plic.c260
-rw-r--r--drivers/irqchip/irq-stm32-exti.c1
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c2
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c2
-rw-r--r--drivers/isdn/capi/capi.c5
-rw-r--r--drivers/isdn/capi/capidrv.c3
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c29
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c1
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c3
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c36
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c1
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c4
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hisax/avm_pci.c1
-rw-r--r--drivers/isdn/hisax/callc.c3
-rw-r--r--drivers/isdn/hisax/config.c9
-rw-r--r--drivers/isdn/hisax/gazel.c4
-rw-r--r--drivers/isdn/hisax/hfc_usb.c10
-rw-r--r--drivers/isdn/hisax/isar.c2
-rw-r--r--drivers/isdn/hisax/l3_1tr6.c1
-rw-r--r--drivers/isdn/hisax/l3dss1.c1
-rw-r--r--drivers/isdn/hisax/st5481_usb.c11
-rw-r--r--drivers/isdn/hysdn/hysdn_boot.c2
-rw-r--r--drivers/isdn/i4l/isdn_common.c8
-rw-r--r--drivers/isdn/i4l/isdn_tty.c4
-rw-r--r--drivers/isdn/i4l/isdn_v110.c9
-rw-r--r--drivers/isdn/mISDN/socket.c2
-rw-r--r--drivers/isdn/mISDN/stack.c1
-rw-r--r--drivers/leds/Kconfig5
-rw-r--r--drivers/leds/led-triggers.c39
-rw-r--r--drivers/leds/leds-apu.c44
-rw-r--r--drivers/leds/leds-lm3692x.c181
-rw-r--r--drivers/leds/leds-lt3593.c190
-rw-r--r--drivers/leds/leds-max8997.c2
-rw-r--r--drivers/leds/leds-ns2.c4
-rw-r--r--drivers/leds/trigger/Kconfig15
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c51
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c64
-rw-r--r--drivers/leds/trigger/ledtrig-camera.c3
-rw-r--r--drivers/leds/trigger/ledtrig-default-on.c20
-rw-r--r--drivers/leds/trigger/ledtrig-gpio.c92
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c49
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c101
-rw-r--r--drivers/leds/trigger/ledtrig-oneshot.c91
-rw-r--r--drivers/leds/trigger/ledtrig-timer.c58
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c102
-rw-r--r--drivers/lightnvm/Kconfig30
-rw-r--r--drivers/lightnvm/pblk-cache.c9
-rw-r--r--drivers/lightnvm/pblk-core.c78
-rw-r--r--drivers/lightnvm/pblk-gc.c34
-rw-r--r--drivers/lightnvm/pblk-init.c98
-rw-r--r--drivers/lightnvm/pblk-rb.c24
-rw-r--r--drivers/lightnvm/pblk-read.c247
-rw-r--r--drivers/lightnvm/pblk-recovery.c47
-rw-r--r--drivers/lightnvm/pblk-sysfs.c13
-rw-r--r--drivers/lightnvm/pblk-write.c35
-rw-r--r--drivers/lightnvm/pblk.h48
-rw-r--r--drivers/macintosh/Kconfig19
-rw-r--r--drivers/macintosh/Makefile1
-rw-r--r--drivers/macintosh/adb.c2
-rw-r--r--drivers/macintosh/therm_windtunnel.c25
-rw-r--r--drivers/macintosh/via-pmu.c346
-rw-r--r--drivers/macintosh/via-pmu68k.c850
-rw-r--r--drivers/mailbox/Kconfig16
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/imx-mailbox.c358
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c6
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c571
-rw-r--r--drivers/mailbox/omap-mailbox.c31
-rw-r--r--drivers/mailbox/ti-msgmgr.c353
-rw-r--r--drivers/md/bcache/Kconfig7
-rw-r--r--drivers/md/bcache/alloc.c39
-rw-r--r--drivers/md/bcache/bcache.h230
-rw-r--r--drivers/md/bcache/bset.c205
-rw-r--r--drivers/md/bcache/bset.h146
-rw-r--r--drivers/md/bcache/btree.c135
-rw-r--r--drivers/md/bcache/btree.h88
-rw-r--r--drivers/md/bcache/closure.c19
-rw-r--r--drivers/md/bcache/closure.h10
-rw-r--r--drivers/md/bcache/debug.c40
-rw-r--r--drivers/md/bcache/debug.h6
-rw-r--r--drivers/md/bcache/extents.c37
-rw-r--r--drivers/md/bcache/extents.h6
-rw-r--r--drivers/md/bcache/io.c24
-rw-r--r--drivers/md/bcache/journal.c28
-rw-r--r--drivers/md/bcache/journal.h28
-rw-r--r--drivers/md/bcache/movinggc.c14
-rw-r--r--drivers/md/bcache/request.c136
-rw-r--r--drivers/md/bcache/request.h18
-rw-r--r--drivers/md/bcache/stats.c15
-rw-r--r--drivers/md/bcache/stats.h15
-rw-r--r--drivers/md/bcache/super.c166
-rw-r--r--drivers/md/bcache/sysfs.c84
-rw-r--r--drivers/md/bcache/sysfs.h6
-rw-r--r--drivers/md/bcache/util.c135
-rw-r--r--drivers/md/bcache/util.h43
-rw-r--r--drivers/md/bcache/writeback.c159
-rw-r--r--drivers/md/bcache/writeback.h38
-rw-r--r--drivers/md/dm-cache-metadata.c13
-rw-r--r--drivers/md/dm-cache-target.c35
-rw-r--r--drivers/md/dm-crypt.c66
-rw-r--r--drivers/md/dm-delay.c249
-rw-r--r--drivers/md/dm-integrity.c501
-rw-r--r--drivers/md/dm-kcopyd.c18
-rw-r--r--drivers/md/dm-raid.c8
-rw-r--r--drivers/md/dm-raid1.c17
-rw-r--r--drivers/md/dm-snap.c41
-rw-r--r--drivers/md/dm-table.c7
-rw-r--r--drivers/md/dm-thin-metadata.c9
-rw-r--r--drivers/md/dm-thin.c42
-rw-r--r--drivers/md/dm-writecache.c59
-rw-r--r--drivers/md/dm-zoned-reclaim.c6
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c14
-rw-r--r--drivers/md/md-bitmap.c305
-rw-r--r--drivers/md/md-bitmap.h60
-rw-r--r--drivers/md/md-cluster.c65
-rw-r--r--drivers/md/md.c81
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c20
-rw-r--r--drivers/md/raid1.c35
-rw-r--r--drivers/md/raid10.c59
-rw-r--r--drivers/md/raid5-cache.c10
-rw-r--r--drivers/md/raid5.c56
-rw-r--r--drivers/media/cec/cec-adap.c18
-rw-r--r--drivers/media/cec/cec-api.c8
-rw-r--r--drivers/media/cec/cec-notifier.c11
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c10
-rw-r--r--drivers/media/common/siano/smsdvb-main.c6
-rw-r--r--drivers/media/common/siano/smsdvb.h7
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c5
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c3
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c2
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c84
-rw-r--r--drivers/media/dvb-core/dvbdev.c18
-rw-r--r--drivers/media/dvb-frontends/Kconfig10
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/af9013.c15
-rw-r--r--drivers/media/dvb-frontends/af9033.c7
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c6
-rw-r--r--drivers/media/dvb-frontends/ascot2e.c6
-rw-r--r--drivers/media/dvb-frontends/atbm8830.c6
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c6
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c6
-rw-r--r--drivers/media/dvb-frontends/cx22700.c6
-rw-r--r--drivers/media/dvb-frontends/cx22702.c6
-rw-r--r--drivers/media/dvb-frontends/cx24110.c8
-rw-r--r--drivers/media/dvb-frontends/cx24113.c8
-rw-r--r--drivers/media/dvb-frontends/cx24116.c8
-rw-r--r--drivers/media/dvb-frontends/cx24117.c8
-rw-r--r--drivers/media/dvb-frontends/cx24120.c8
-rw-r--r--drivers/media/dvb-frontends/cx24123.c8
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t2.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c9
-rw-r--r--drivers/media/dvb-frontends/cxd2880/cxd2880_top.c6
-rw-r--r--drivers/media/dvb-frontends/dib0070.c8
-rw-r--r--drivers/media/dvb-frontends/dib0090.c12
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c6
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c6
-rw-r--r--drivers/media/dvb-frontends/dib7000m.c6
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c6
-rw-r--r--drivers/media/dvb-frontends/dib8000.c6
-rw-r--r--drivers/media/dvb-frontends/dib9000.c6
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c25
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c13
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c30
-rw-r--r--drivers/media/dvb-frontends/ds3000.c8
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c27
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.c24
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.c6
-rw-r--r--drivers/media/dvb-frontends/helene.c105
-rw-r--r--drivers/media/dvb-frontends/helene.h3
-rw-r--r--drivers/media/dvb-frontends/horus3a.c6
-rw-r--r--drivers/media/dvb-frontends/itd1000.c8
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c8
-rw-r--r--drivers/media/dvb-frontends/l64781.c7
-rw-r--r--drivers/media/dvb-frontends/lg2160.c12
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c12
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c6
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c12
-rw-r--r--drivers/media/dvb-frontends/lgs8gl5.c7
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.c6
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c6
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c8
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c7
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c6
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c802
-rw-r--r--drivers/media/dvb-frontends/mn88443x.h27
-rw-r--r--drivers/media/dvb-frontends/mt312.c10
-rw-r--r--drivers/media/dvb-frontends/mt352.c7
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c6
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c6
-rw-r--r--drivers/media/dvb-frontends/nxt6000.c6
-rw-r--r--drivers/media/dvb-frontends/or51132.c6
-rw-r--r--drivers/media/dvb-frontends/or51211.c8
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c16
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c10
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c6
-rw-r--r--drivers/media/dvb-frontends/s5h1409.c6
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c6
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c8
-rw-r--r--drivers/media/dvb-frontends/s5h1432.c6
-rw-r--r--drivers/media/dvb-frontends/s921.c7
-rw-r--r--drivers/media/dvb-frontends/si2165.c2
-rw-r--r--drivers/media/dvb-frontends/si21xx.c7
-rw-r--r--drivers/media/dvb-frontends/sp8870.c6
-rw-r--r--drivers/media/dvb-frontends/sp887x.c6
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c6
-rw-r--r--drivers/media/dvb-frontends/stb6000.c4
-rw-r--r--drivers/media/dvb-frontends/stb6100.c5
-rw-r--r--drivers/media/dvb-frontends/stv0288.c7
-rw-r--r--drivers/media/dvb-frontends/stv0297.c6
-rw-r--r--drivers/media/dvb-frontends/stv0299.c7
-rw-r--r--drivers/media/dvb-frontends/stv0367.c20
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c7
-rw-r--r--drivers/media/dvb-frontends/stv090x.c6
-rw-r--r--drivers/media/dvb-frontends/stv0910.c10
-rw-r--r--drivers/media/dvb-frontends/stv6110.c6
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c7
-rw-r--r--drivers/media/dvb-frontends/stv6111.c5
-rw-r--r--drivers/media/dvb-frontends/tc90522.c10
-rw-r--r--drivers/media/dvb-frontends/tda10021.c10
-rw-r--r--drivers/media/dvb-frontends/tda10023.c6
-rw-r--r--drivers/media/dvb-frontends/tda10048.c6
-rw-r--r--drivers/media/dvb-frontends/tda1004x.c18
-rw-r--r--drivers/media/dvb-frontends/tda10071.c10
-rw-r--r--drivers/media/dvb-frontends/tda10086.c6
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c7
-rw-r--r--drivers/media/dvb-frontends/tda665x.c6
-rw-r--r--drivers/media/dvb-frontends/tda8083.c7
-rw-r--r--drivers/media/dvb-frontends/tda8261.c9
-rw-r--r--drivers/media/dvb-frontends/tda826x.c4
-rw-r--r--drivers/media/dvb-frontends/ts2020.c4
-rw-r--r--drivers/media/dvb-frontends/tua6100.c6
-rw-r--r--drivers/media/dvb-frontends/ves1820.c6
-rw-r--r--drivers/media/dvb-frontends/ves1x93.c8
-rw-r--r--drivers/media/dvb-frontends/zl10036.c8
-rw-r--r--drivers/media/dvb-frontends/zl10353.c7
-rw-r--r--drivers/media/firewire/firedtv-fe.c26
-rw-r--r--drivers/media/i2c/Kconfig115
-rw-r--r--drivers/media/i2c/Makefile5
-rw-r--r--drivers/media/i2c/ad9389b.c1
-rw-r--r--drivers/media/i2c/adv7180.c32
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c2
-rw-r--r--drivers/media/i2c/adv7511.c1
-rw-r--r--drivers/media/i2c/adv7604.c8
-rw-r--r--drivers/media/i2c/adv7842.c9
-rw-r--r--drivers/media/i2c/ak7375.c292
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.h33
-rw-r--r--drivers/media/i2c/dw9807-vcm.c329
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c1
-rw-r--r--drivers/media/i2c/imx258.c8
-rw-r--r--drivers/media/i2c/imx274.c742
-rw-r--r--drivers/media/i2c/lm3560.c3
-rw-r--r--drivers/media/i2c/mt9m032.c1
-rw-r--r--drivers/media/i2c/mt9p031.c1
-rw-r--r--drivers/media/i2c/mt9t001.c1
-rw-r--r--drivers/media/i2c/mt9v032.c1
-rw-r--r--drivers/media/i2c/mt9v111.c1298
-rw-r--r--drivers/media/i2c/ov2680.c1186
-rw-r--r--drivers/media/i2c/ov5640.c175
-rw-r--r--drivers/media/i2c/ov5645.c13
-rw-r--r--drivers/media/i2c/ov7670.c6
-rw-r--r--drivers/media/i2c/ov772x.c353
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c1437
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c20
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c2
-rw-r--r--drivers/media/i2c/tc358743.c5
-rw-r--r--drivers/media/i2c/tda1997x.c2
-rw-r--r--drivers/media/i2c/tvp514x.c2
-rw-r--r--drivers/media/i2c/tvp5150.c2
-rw-r--r--drivers/media/i2c/tvp7002.c2
-rw-r--r--drivers/media/i2c/video-i2c.c81
-rw-r--r--drivers/media/i2c/vs6624.c4
-rw-r--r--drivers/media/media-device.c16
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/pci/bt8xx/dst.c26
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c12
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx23885/altera-ci.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c82
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.c679
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.h58
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-gpio.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.c673
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.h135
-rw-r--r--drivers/media/pci/cx25821/cx25821.h12
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c7
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c4
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c20
-rw-r--r--drivers/media/pci/ddbridge/Makefile3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c45
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-hw.c3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-i2c.c5
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-max.c18
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-max.h2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-mci.c409
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-mci.h192
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-regs.h8
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-sx8.c488
-rw-r--r--drivers/media/pci/ddbridge/ddbridge.h14
-rw-r--r--drivers/media/pci/dm1105/dm1105.c3
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c1
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/pci/mantis/mantis_vp3030.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c5
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c6
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c7
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c11
-rw-r--r--drivers/media/platform/Kconfig20
-rw-r--r--drivers/media/platform/Makefile5
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c27
-rw-r--r--drivers/media/platform/cadence/Kconfig2
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c1
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c1
-rw-r--r--drivers/media/platform/cec-gpio/cec-gpio.c54
-rw-r--r--drivers/media/platform/coda/coda-bit.c123
-rw-r--r--drivers/media/platform/coda/coda-common.c189
-rw-r--r--drivers/media/platform/coda/coda-h264.c319
-rw-r--r--drivers/media/platform/coda/coda.h4
-rw-r--r--drivers/media/platform/coda/coda_regs.h1
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c1
-rw-r--r--drivers/media/platform/cros-ec-cec/Makefile1
-rw-r--r--drivers/media/platform/cros-ec-cec/cros-ec-cec.c347
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c1
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c1
-rw-r--r--drivers/media/platform/davinci/vpif_display.c24
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c11
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c6
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c6
-rw-r--r--drivers/media/platform/fsl-viu.c38
-rw-r--r--drivers/media/platform/m2m-deinterlace.c25
-rw-r--r--drivers/media/platform/meson/ao-cec.c2
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c5
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c25
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c23
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c16
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c2
-rw-r--r--drivers/media/platform/mx2_emmaprp.c21
-rw-r--r--drivers/media/platform/omap/Kconfig1
-rw-r--r--drivers/media/platform/omap3isp/isp.c6
-rw-r--r--drivers/media/platform/pxa_camera.c22
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.h123
-rw-r--r--drivers/media/platform/qcom/camss/Makefile (renamed from drivers/media/platform/qcom/camss-8x16/Makefile)4
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-csid.c)471
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h (renamed from drivers/media/platform/qcom/camss-8x16/camss-csid.h)17
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c176
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c256
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-csiphy.c)363
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h (renamed from drivers/media/platform/qcom/camss-8x16/camss-csiphy.h)37
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-ispif.c)264
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.h (renamed from drivers/media/platform/qcom/camss-8x16/camss-ispif.h)23
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c1018
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c1140
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-vfe.c)1569
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h186
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-video.c)133
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.h (renamed from drivers/media/platform/qcom/camss-8x16/camss-video.h)12
-rw-r--r--drivers/media/platform/qcom/camss/camss.c (renamed from drivers/media/platform/qcom/camss-8x16/camss.c)450
-rw-r--r--drivers/media/platform/qcom/camss/camss.h (renamed from drivers/media/platform/qcom/camss-8x16/camss.h)43
-rw-r--r--drivers/media/platform/qcom/venus/Makefile3
-rw-r--r--drivers/media/platform/qcom/venus/core.c107
-rw-r--r--drivers/media/platform/qcom/venus/core.h100
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c568
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h23
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c12
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h10
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c62
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h112
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c407
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c278
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.h110
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c108
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h10
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c329
-rw-r--r--drivers/media/platform/qcom/venus/vdec_ctrls.c10
-rw-r--r--drivers/media/platform/qcom/venus/venc.c227
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c10
-rw-r--r--drivers/media/platform/rcar-fcp.c7
-rw-r--r--drivers/media/platform/rcar-vin/Kconfig1
-rw-r--r--drivers/media/platform/rcar-vin/Makefile1
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c321
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c20
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c63
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c18
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h37
-rw-r--r--drivers/media/platform/rcar_drif.c8
-rw-r--r--drivers/media/platform/rcar_fdp1.c6
-rw-r--r--drivers/media/platform/rcar_jpu.c27
-rw-r--r--drivers/media/platform/renesas-ceu.c91
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c45
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c20
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h2
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c4
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c19
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.h1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c7
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c29
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c15
-rw-r--r--drivers/media/platform/sh_veu.c5
-rw-r--r--drivers/media/platform/sh_vou.c5
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c5
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c18
-rw-r--r--drivers/media/platform/sti/hva/hva-v4l2.c1
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c259
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c20
-rw-r--r--drivers/media/platform/vicodec/Kconfig13
-rw-r--r--drivers/media/platform/vicodec/Makefile4
-rw-r--r--drivers/media/platform/vicodec/vicodec-codec.c797
-rw-r--r--drivers/media/platform/vicodec/vicodec-codec.h129
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c1506
-rw-r--r--drivers/media/platform/video-mux.c119
-rw-r--r--drivers/media/platform/vim2m.c42
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c1
-rw-r--r--drivers/media/platform/vimc/vimc-core.c1
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c1
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c1
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c1
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c2
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1.h3
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c433
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.h28
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c12
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c20
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h5
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c72
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c6
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si4713/radio-usb-si4713.c3
-rw-r--r--drivers/media/rc/bpf-lirc.c25
-rw-r--r--drivers/media/rc/rc-ir-raw.c8
-rw-r--r--drivers/media/rc/rc-main.c12
-rw-r--r--drivers/media/tuners/e4000.c6
-rw-r--r--drivers/media/tuners/fc0011.c6
-rw-r--r--drivers/media/tuners/fc0012.c7
-rw-r--r--drivers/media/tuners/fc0013.c7
-rw-r--r--drivers/media/tuners/fc2580.c6
-rw-r--r--drivers/media/tuners/it913x.c6
-rw-r--r--drivers/media/tuners/m88rs6000t.c6
-rw-r--r--drivers/media/tuners/max2165.c8
-rw-r--r--drivers/media/tuners/mc44s803.c8
-rw-r--r--drivers/media/tuners/mt2060.c8
-rw-r--r--drivers/media/tuners/mt2063.c7
-rw-r--r--drivers/media/tuners/mt2131.c8
-rw-r--r--drivers/media/tuners/mt2266.c8
-rw-r--r--drivers/media/tuners/mxl301rf.c4
-rw-r--r--drivers/media/tuners/mxl5005s.c8
-rw-r--r--drivers/media/tuners/mxl5007t.c2
-rw-r--r--drivers/media/tuners/qm1d1b0004.c4
-rw-r--r--drivers/media/tuners/qm1d1c0042.c4
-rw-r--r--drivers/media/tuners/qt1010.c8
-rw-r--r--drivers/media/tuners/qt1010_priv.h14
-rw-r--r--drivers/media/tuners/r820t.c6
-rw-r--r--drivers/media/tuners/si2157.c6
-rw-r--r--drivers/media/tuners/tda18212.c8
-rw-r--r--drivers/media/tuners/tda18218.c8
-rw-r--r--drivers/media/tuners/tda18250.c6
-rw-r--r--drivers/media/tuners/tda18271-common.c8
-rw-r--r--drivers/media/tuners/tda18271-fe.c6
-rw-r--r--drivers/media/tuners/tda827x.c12
-rw-r--r--drivers/media/tuners/tua9001.c6
-rw-r--r--drivers/media/tuners/tuner-simple.c5
-rw-r--r--drivers/media/tuners/tuner-xc2028.c15
-rw-r--r--drivers/media/tuners/xc4000.c16
-rw-r--r--drivers/media/tuners/xc5000.c12
-rw-r--r--drivers/media/usb/au0828/au0828-video.c2
-rw-r--r--drivers/media/usb/cx231xx/Kconfig2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c14
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c10
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c7
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig5
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c492
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/usb_urb.c4
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig6
-rw-r--r--drivers/media/usb/dvb-usb/Makefile3
-rw-r--r--drivers/media/usb/dvb-usb/af9005-fe.c6
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c6
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c1
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c6
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c19
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c11
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c3
-rw-r--r--drivers/media/usb/dvb-usb/usb-urb.c4
-rw-r--r--drivers/media/usb/dvb-usb/vp702x-fe.c7
-rw-r--r--drivers/media/usb/dvb-usb/vp7045-fe.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c39
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c4
-rw-r--r--drivers/media/usb/go7007/go7007-driver.c9
-rw-r--r--drivers/media/usb/go7007/snd-go7007.c11
-rw-r--r--drivers/media/usb/gspca/kinect.c2
-rw-r--r--drivers/media/usb/hackrf/hackrf.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c3
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c5
-rw-r--r--drivers/media/usb/tm6000/tm6000-i2c.c2
-rw-r--r--drivers/media/usb/ttusb-dec/ttusbdecfe.c12
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c5
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c215
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c5
-rw-r--r--drivers/media/usb/uvc/uvc_status.c121
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c4
-rw-r--r--drivers/media/usb/uvc/uvc_video.c62
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h18
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c128
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c266
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c22
-rw-r--r--drivers/memory/Kconfig6
-rw-r--r--drivers/memory/tegra/mc.c16
-rw-r--r--drivers/memory/tegra/tegra186.c1
-rw-r--r--drivers/memory/ti-emif-pm.c33
-rw-r--r--drivers/message/fusion/mptbase.c5
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mfd/88pm860x-i2c.c8
-rw-r--r--drivers/mfd/Kconfig83
-rw-r--r--drivers/mfd/Makefile17
-rw-r--r--drivers/mfd/arizona-core.c34
-rw-r--r--drivers/mfd/as3722.c12
-rw-r--r--drivers/mfd/atmel-hlcdc.c1
-rw-r--r--drivers/mfd/axp20x-i2c.c2
-rw-r--r--drivers/mfd/axp20x.c28
-rw-r--r--drivers/mfd/cros_ec_dev.c33
-rw-r--r--drivers/mfd/cs47l35-tables.c1609
-rw-r--r--drivers/mfd/cs47l85-tables.c3009
-rw-r--r--drivers/mfd/cs47l90-tables.c2674
-rw-r--r--drivers/mfd/da9063-core.c44
-rw-r--r--drivers/mfd/da9063-i2c.c239
-rw-r--r--drivers/mfd/da9063-irq.c264
-rw-r--r--drivers/mfd/dln2.c10
-rw-r--r--drivers/mfd/hi655x-pmic.c2
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/kempld-core.c15
-rw-r--r--drivers/mfd/madera-core.c609
-rw-r--r--drivers/mfd/madera-i2c.c140
-rw-r--r--drivers/mfd/madera-spi.c139
-rw-r--r--drivers/mfd/madera.h44
-rw-r--r--drivers/mfd/rave-sp.c119
-rw-r--r--drivers/mfd/rohm-bd718x7.c211
-rw-r--r--drivers/mfd/sec-core.c1
-rw-r--r--drivers/mfd/sm501.c1
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c3
-rw-r--r--drivers/mfd/wm8994-core.c15
-rw-r--r--drivers/misc/aspeed-lpc-snoop.c84
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c12
-rw-r--r--drivers/misc/cxl/Kconfig8
-rw-r--r--drivers/misc/cxl/Makefile2
-rw-r--r--drivers/misc/cxl/api.c154
-rw-r--r--drivers/misc/cxl/base.c83
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h41
-rw-r--r--drivers/misc/cxl/cxllib.c4
-rw-r--r--drivers/misc/cxl/debugfs.c5
-rw-r--r--drivers/misc/cxl/fault.c4
-rw-r--r--drivers/misc/cxl/guest.c8
-rw-r--r--drivers/misc/cxl/main.c7
-rw-r--r--drivers/misc/cxl/native.c3
-rw-r--r--drivers/misc/cxl/pci.c392
-rw-r--r--drivers/misc/cxl/phb.c44
-rw-r--r--drivers/misc/cxl/vphb.c46
-rw-r--r--drivers/misc/eeprom/at24.c17
-rw-r--r--drivers/misc/eeprom/at25.c4
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c6
-rw-r--r--drivers/misc/eeprom/max6875.c3
-rw-r--r--drivers/misc/genwqe/card_base.h2
-rw-r--r--drivers/misc/genwqe/card_debugfs.c4
-rw-r--r--drivers/misc/genwqe/card_dev.c5
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c27
-rw-r--r--drivers/misc/ibmvmc.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c92
-rw-r--r--drivers/misc/mei/bus.c36
-rw-r--r--drivers/misc/mei/client.c128
-rw-r--r--drivers/misc/mei/client.h2
-rw-r--r--drivers/misc/mei/debugfs.c2
-rw-r--r--drivers/misc/mei/hbm.c37
-rw-r--r--drivers/misc/mei/hw-me.c77
-rw-r--r--drivers/misc/mei/hw-me.h6
-rw-r--r--drivers/misc/mei/hw-txe.c66
-rw-r--r--drivers/misc/mei/hw.h73
-rw-r--r--drivers/misc/mei/interrupt.c22
-rw-r--r--drivers/misc/mei/main.c41
-rw-r--r--drivers/misc/mei/mei_dev.h77
-rw-r--r--drivers/misc/mic/cosm/cosm_main.h5
-rw-r--r--drivers/misc/mic/cosm/cosm_scif_server.c6
-rw-r--r--drivers/misc/mic/cosm_client/cosm_scif_client.c6
-rw-r--r--drivers/misc/mic/scif/scif_api.c21
-rw-r--r--drivers/misc/mic/scif/scif_dma.c7
-rw-r--r--drivers/misc/ocxl/context.c22
-rw-r--r--drivers/misc/ocxl/link.c27
-rw-r--r--drivers/misc/ocxl/sysfs.c5
-rw-r--r--drivers/misc/pci_endpoint_test.c290
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c7
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c3
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c5
-rw-r--r--drivers/misc/sram.c18
-rw-r--r--drivers/misc/ti-st/Kconfig3
-rw-r--r--drivers/misc/ti-st/st_kim.c7
-rw-r--r--drivers/misc/tsl2550.c2
-rw-r--r--drivers/misc/vexpress-syscfg.c10
-rw-r--r--drivers/misc/vmw_balloon.c185
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c9
-rw-r--r--drivers/mmc/core/core.c48
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/mmc.c16
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/sdio.c1
-rw-r--r--drivers/mmc/core/slot-gpio.c2
-rw-r--r--drivers/mmc/host/Kconfig11
-rw-r--r--drivers/mmc/host/Makefile4
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c33
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/mmci.c82
-rw-r--r--drivers/mmc/host/mmci.h81
-rw-r--r--drivers/mmc/host/mmci_qcom_dml.c18
-rw-r--r--drivers/mmc/host/mmci_qcom_dml.h5
-rw-r--r--drivers/mmc/host/mxcmmc.c3
-rw-r--r--drivers/mmc/host/pxamci.c236
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c139
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c35
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c17
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c50
-rw-r--r--drivers/mmc/host/sdhci-msm.c510
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c16
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c116
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c91
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c3
-rw-r--r--drivers/mmc/host/sdhci-pci-dwc-mshc.c84
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c129
-rw-r--r--drivers/mmc/host/sdhci-pci.h5
-rw-r--r--drivers/mmc/host/sdhci-tegra.c37
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c1
-rw-r--r--drivers/mmc/host/sdhci.c25
-rw-r--r--drivers/mmc/host/sdhci.h5
-rw-r--r--drivers/mmc/host/sunxi-mmc.c14
-rw-r--r--drivers/mmc/host/tmio_mmc.h6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c52
-rw-r--r--drivers/mtd/Kconfig24
-rw-r--r--drivers/mtd/chips/Kconfig2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c21
-rw-r--r--drivers/mtd/chips/gen_probe.c7
-rw-r--r--drivers/mtd/devices/Kconfig4
-rw-r--r--drivers/mtd/devices/m25p80.c8
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c4
-rw-r--r--drivers/mtd/devices/powernv_flash.c1
-rw-r--r--drivers/mtd/devices/sst25l.c5
-rw-r--r--drivers/mtd/lpddr/Kconfig8
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c2
-rw-r--r--drivers/mtd/maps/Kconfig24
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c3
-rw-r--r--drivers/mtd/maps/impa7.c5
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c2
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c5
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c3
-rw-r--r--drivers/mtd/maps/solutionengine.c6
-rw-r--r--drivers/mtd/mtdchar.c10
-rw-r--r--drivers/mtd/mtdcore.c28
-rw-r--r--drivers/mtd/mtdpart.c33
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/onenand/generic.c5
-rw-r--r--drivers/mtd/nand/onenand/samsung.c5
-rw-r--r--drivers/mtd/nand/raw/Kconfig157
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c175
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c1
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c67
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c1
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c143
-rw-r--r--drivers/mtd/nand/raw/cmx270_nand.c4
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c3
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c231
-rw-r--r--drivers/mtd/nand/raw/denali.c216
-rw-r--r--drivers/mtd/nand/raw/denali.h1
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c66
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c1
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/docg4.c89
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c25
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c25
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c183
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c15
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c76
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h11
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c78
-rw-r--r--drivers/mtd/nand/raw/jz4740_nand.c51
-rw-r--r--drivers/mtd/nand/raw/jz4780_nand.c41
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c61
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c77
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c319
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c79
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c162
-rw-r--r--drivers/mtd/nand/raw/nand_base.c347
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c10
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c23
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c48
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c353
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c32
-rw-r--r--drivers/mtd/nand/raw/nandsim.c84
-rw-r--r--drivers/mtd/nand/raw/ndfc.c4
-rw-r--r--drivers/mtd/nand/raw/omap2.c463
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c9
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c4
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c591
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c52
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c36
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c5
-rw-r--r--drivers/mtd/nand/raw/sm_common.c39
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c55
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c44
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c1246
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c44
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c127
-rw-r--r--drivers/mtd/nand/spi/Kconfig7
-rw-r--r--drivers/mtd/nand/spi/Makefile3
-rw-r--r--drivers/mtd/nand/spi/core.c1155
-rw-r--r--drivers/mtd/nand/spi/macronix.c144
-rw-r--r--drivers/mtd/nand/spi/micron.c133
-rw-r--r--drivers/mtd/nand/spi/winbond.c141
-rw-r--r--drivers/mtd/nftlmount.c3
-rw-r--r--drivers/mtd/parsers/parser_trx.c7
-rw-r--r--drivers/mtd/spi-nor/atmel-quadspi.c23
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c26
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c2
-rw-r--r--drivers/mtd/spi-nor/nxp-spifi.c1
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c18
-rw-r--r--drivers/mtd/spi-nor/stm32-quadspi.c6
-rw-r--r--drivers/mtd/ubi/cdev.c11
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/mtd/ubi/ubi-media.h6
-rw-r--r--drivers/mtd/ubi/ubi.h4
-rw-r--r--drivers/mtd/ubi/vmt.c12
-rw-r--r--drivers/mtd/ubi/vtbl.c23
-rw-r--r--drivers/mux/Kconfig10
-rw-r--r--drivers/mux/Makefile2
-rw-r--r--drivers/mux/adgs1408.c131
-rw-r--r--drivers/net/bonding/bond_main.c17
-rw-r--r--drivers/net/bonding/bond_options.c23
-rw-r--r--drivers/net/bonding/bond_sysfs.c7
-rw-r--r--drivers/net/can/Kconfig6
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/dev.c14
-rw-r--r--drivers/net/can/flexcan.c33
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/m_can/m_can.c18
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c5
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c2
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c39
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/sun4i_can.c2
-rw-r--r--drivers/net/can/usb/Kconfig48
-rw-r--r--drivers/net/can/usb/Makefile7
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2085
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h188
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c835
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2028
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c1358
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/can/usb/ucan.c1613
-rw-r--r--drivers/net/can/xilinx_can.c784
-rw-r--r--drivers/net/dsa/Kconfig24
-rw-r--r--drivers/net/dsa/Makefile3
-rw-r--r--drivers/net/dsa/b53/b53_srab.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c19
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c46
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c9
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c268
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h47
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_avb.c25
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c134
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h14
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c109
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h23
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c101
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h71
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c439
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h24
-rw-r--r--drivers/net/dsa/realtek-smi.c489
-rw-r--r--drivers/net/dsa/realtek-smi.h144
-rw-r--r--drivers/net/dsa/rtl8366.c515
-rw-r--r--drivers/net/dsa/rtl8366rb.c1454
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.c1365
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c1
-rw-r--r--drivers/net/ethernet/8390/mac8390.c20
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile9
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c5
-rw-r--r--drivers/net/ethernet/amd/Kconfig6
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c4
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c6
-rw-r--r--drivers/net/ethernet/apple/bmac.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c117
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h24
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c59
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c49
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c57
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c52
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h4
-rw-r--r--drivers/net/ethernet/arc/Kconfig6
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/aurora/Kconfig1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig14
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c227
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h14
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c257
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h66
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c121
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c375
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1214
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c30
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/cadence/Kconfig6
-rw-r--r--drivers/net/ethernet/cadence/macb.h11
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c126
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c6
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1
-rw-r--r--drivers/net/ethernet/cavium/Kconfig18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c47
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c45
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c26
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c708
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c186
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c167
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c376
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c89
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c4
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c80
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c135
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig40
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h57
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c96
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c103
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c39
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c22
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c27
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c20
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c21
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c18
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c9
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig17
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c56
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c408
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c115
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h120
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c676
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c75
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c177
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c36
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c19
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c211
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c347
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c27
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c69
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c13
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c267
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c389
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c10
-rw-r--r--drivers/net/ethernet/marvell/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c438
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c15
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h134
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c973
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h203
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c703
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c179
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c223
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h75
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c71
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c220
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c947
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h175
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h276
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h210
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c603
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c293
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c219
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c230
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/vxlan.h)43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c175
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/ib.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h973
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c473
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h260
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c428
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c342
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c271
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c536
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c1168
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c285
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h124
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c438
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h228
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c302
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c193
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c463
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c354
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c392
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h36
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c3
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c3
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c723
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.h11
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c284
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h234
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c1159
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h74
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c456
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c2
-rw-r--r--drivers/net/ethernet/neterion/Kconfig23
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c31
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c313
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c54
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h58
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c79
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c92
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c213
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h37
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c48
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c56
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c169
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c8
-rw-r--r--drivers/net/ethernet/ni/nixge.c12
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Makefile2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h40
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c262
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h35
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c19
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c193
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h2
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c129
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c119
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c29
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h20
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c138
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c423
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c31
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c195
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c21
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/Kconfig3
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1095
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c102
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c141
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h14
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c30
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c30
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/rx.c7
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c13
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h1
-rw-r--r--drivers/net/ethernet/socionext/netsec.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c69
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h228
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c371
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c280
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c411
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c75
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c133
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c60
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c16
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c488
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c2
-rw-r--r--drivers/net/ethernet/ti/cpts.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c33
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c13
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c112
-rw-r--r--drivers/net/fjes/fjes_main.c8
-rw-r--r--drivers/net/geneve.c19
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/6pack.c5
-rw-r--r--drivers/net/hyperv/hyperv_net.h13
-rw-r--r--drivers/net/hyperv/netvsc.c54
-rw-r--r--drivers/net/hyperv/netvsc_drv.c132
-rw-r--r--drivers/net/hyperv/rndis_filter.c60
-rw-r--r--drivers/net/ieee802154/Kconfig11
-rw-r--r--drivers/net/ieee802154/Makefile1
-rw-r--r--drivers/net/ieee802154/adf7242.c34
-rw-r--r--drivers/net/ieee802154/at86rf230.c15
-rw-r--r--drivers/net/ieee802154/fakelb.c5
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c937
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.h73
-rw-r--r--drivers/net/ieee802154/mcr20a.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c39
-rw-r--r--drivers/net/macvlan.c11
-rw-r--r--drivers/net/net_failover.c11
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/bpf.c91
-rw-r--r--drivers/net/netdevsim/devlink.c1
-rw-r--r--drivers/net/netdevsim/ipsec.c297
-rw-r--r--drivers/net/netdevsim/netdev.c119
-rw-r--r--drivers/net/netdevsim/netdevsim.h71
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/dp83640.c5
-rw-r--r--drivers/net/phy/dp83tc811.c48
-rw-r--r--drivers/net/phy/fixed_phy.c7
-rw-r--r--drivers/net/phy/marvell.c56
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c110
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c22
-rw-r--r--drivers/net/phy/mscc.c2
-rw-r--r--drivers/net/phy/phy.c105
-rw-r--r--drivers/net/phy/phy_device.c15
-rw-r--r--drivers/net/phy/phylink.c30
-rw-r--r--drivers/net/phy/realtek.c80
-rw-r--r--drivers/net/phy/sfp-bus.c35
-rw-r--r--drivers/net/phy/sfp.c803
-rw-r--r--drivers/net/phy/vitesse.c175
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c20
-rw-r--r--drivers/net/ppp/ppp_mppe.c56
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/team/team.c8
-rw-r--r--drivers/net/tun.c11
-rw-r--r--drivers/net/usb/asix_devices.c42
-rw-r--r--drivers/net/usb/catc.c1
-rw-r--r--drivers/net/usb/cdc-phonet.c6
-rw-r--r--drivers/net/usb/hso.c44
-rw-r--r--drivers/net/usb/kaweth.c8
-rw-r--r--drivers/net/usb/lan78xx.c48
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c18
-rw-r--r--drivers/net/usb/rtl8150.c7
-rw-r--r--drivers/net/usb/smsc75xx.c62
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/veth.c744
-rw-r--r--drivers/net/virtio_net.c267
-rw-r--r--drivers/net/vxlan.c144
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c6
-rw-r--r--drivers/net/wan/lmc/lmc_main.c4
-rw-r--r--drivers/net/wimax/i2400m/control.c3
-rw-r--r--drivers/net/wimax/i2400m/fw.c3
-rw-r--r--drivers/net/wimax/i2400m/netdev.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig24
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h42
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c70
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c89
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h42
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c47
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c85
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c101
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h24
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c67
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c30
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c95
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c80
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c847
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c289
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c425
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c73
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c129
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c53
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h59
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c708
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h112
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c1608
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h568
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h316
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c723
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h824
-rw-r--r--drivers/net/wireless/atmel/atmel.c18
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c48
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h43
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c2
-rw-r--r--drivers/net/wireless/cisco/airo.c8
-rw-r--r--drivers/net/wireless/cisco/airo_cs.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c25
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.h12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-debug.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c163
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c70
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/common_rx.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c284
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h286
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c211
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c364
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c207
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h294
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c388
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c92
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c8
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c17
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c123
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c12
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c30
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c7
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c95
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c36
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c55
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c32
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig27
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile20
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c77
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h174
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/core.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.c522
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c445
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c720
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h282
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h772
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.c658
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.h154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c403
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c656
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h101
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h330
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c1008
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.h81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/regs.h651
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.h313
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/tx.c270
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c381
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/util.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_common.c350
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c377
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init.c305
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c259
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.c641
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c699
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_main.c326
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy.c360
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c349
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_regs.h30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx.c161
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_usb.c142
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u.h83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_core.c108
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_init.c318
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c240
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_main.c185
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c463
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c303
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c845
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_mcu.c242
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h71
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Kconfig2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c106
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c211
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c26
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h105
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c18
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00pci.h6
-rw-r--r--drivers/net/wireless/ray_cs.c6
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c180
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c38
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c23
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h3
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c29
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c90
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c538
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c146
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c30
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h1
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_chip.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c21
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c34
-rw-r--r--drivers/nfc/nfcmrvl/usb.c5
-rw-r--r--drivers/nfc/pn533/usb.c4
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c3
-rw-r--r--drivers/nubus/bus.c3
-rw-r--r--drivers/nvdimm/btt.c12
-rw-r--r--drivers/nvdimm/nd.h7
-rw-r--r--drivers/nvdimm/pmem.c16
-rw-r--r--drivers/nvme/host/core.c171
-rw-r--r--drivers/nvme/host/fabrics.c12
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/fc.c3
-rw-r--r--drivers/nvme/host/lightnvm.c27
-rw-r--r--drivers/nvme/host/multipath.c349
-rw-r--r--drivers/nvme/host/nvme.h78
-rw-r--r--drivers/nvme/host/pci.c89
-rw-r--r--drivers/nvme/host/rdma.c254
-rw-r--r--drivers/nvme/host/trace.c11
-rw-r--r--drivers/nvme/host/trace.h142
-rw-r--r--drivers/nvme/target/admin-cmd.c221
-rw-r--r--drivers/nvme/target/configfs.c259
-rw-r--r--drivers/nvme/target/core.c106
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fc.c44
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c7
-rw-r--r--drivers/nvme/target/io-cmd-file.c80
-rw-r--r--drivers/nvme/target/loop.c3
-rw-r--r--drivers/nvme/target/nvmet.h62
-rw-r--r--drivers/nvme/target/rdma.c202
-rw-r--r--drivers/nvmem/Kconfig11
-rw-r--r--drivers/nvmem/Makefile3
-rw-r--r--drivers/nvmem/core.c6
-rw-r--r--drivers/nvmem/imx-ocotp.c7
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c1
-rw-r--r--drivers/nvmem/mtk-efuse.c1
-rw-r--r--drivers/nvmem/qfprom.c1
-rw-r--r--drivers/nvmem/rave-sp-eeprom.c6
-rw-r--r--drivers/nvmem/sc27xx-efuse.c264
-rw-r--r--drivers/nvmem/uniphier-efuse.c1
-rw-r--r--drivers/of/address.c6
-rw-r--r--drivers/of/base.c6
-rw-r--r--drivers/of/device.c21
-rw-r--r--drivers/of/fdt.c9
-rw-r--r--drivers/of/of_mdio.c17
-rw-r--r--drivers/of/of_private.h2
-rw-r--r--drivers/of/overlay.c11
-rw-r--r--drivers/of/platform.c3
-rw-r--r--drivers/parport/ieee1284.c3
-rw-r--r--drivers/parport/parport_serial.c9
-rw-r--r--drivers/parport/parport_sunbpp.c8
-rw-r--r--drivers/pci/Makefile6
-rw-r--r--drivers/pci/ats.c3
-rw-r--r--drivers/pci/bus.c6
-rw-r--r--drivers/pci/controller/Kconfig15
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/dwc/Kconfig1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c1
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c210
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h29
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c28
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c1
-rw-r--r--drivers/pci/controller/pci-aardvark.c81
-rw-r--r--drivers/pci/controller/pci-ftpci100.c4
-rw-r--r--drivers/pci/controller/pci-hyperv.c11
-rw-r--r--drivers/pci/controller/pci-mvebu.c153
-rw-r--r--drivers/pci/controller/pci-v3-semi.c2
-rw-r--r--drivers/pci/controller/pci-versatile.c2
-rw-r--r--drivers/pci/controller/pci-xgene.c2
-rw-r--r--drivers/pci/controller/pcie-cadence-ep.c21
-rw-r--r--drivers/pci/controller/pcie-cadence-host.c33
-rw-r--r--drivers/pci/controller/pcie-cadence.c123
-rw-r--r--drivers/pci/controller/pcie-cadence.h13
-rw-r--r--drivers/pci/controller/pcie-iproc.c159
-rw-r--r--drivers/pci/controller/pcie-iproc.h8
-rw-r--r--drivers/pci/controller/pcie-mediatek.c2
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c6
-rw-r--r--drivers/pci/controller/pcie-rcar.c16
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c2
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c2
-rw-r--r--drivers/pci/controller/pcie-xilinx.c1
-rw-r--r--drivers/pci/controller/vmd.c13
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c86
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c24
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c68
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c62
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c40
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c22
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c14
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c16
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c15
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c20
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c134
-rw-r--r--drivers/pci/hotplug/pciehp.h117
-rw-r--r--drivers/pci/hotplug/pciehp_core.c127
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c351
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c281
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c4
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c348
-rw-r--r--drivers/pci/hotplug/pnv_php.c5
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c13
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c13
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c9
-rw-r--r--drivers/pci/hotplug/shpchp_core.c41
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c2
-rw-r--r--drivers/pci/iov.c20
-rw-r--r--drivers/pci/irq.c6
-rw-r--r--drivers/pci/msi.c3
-rw-r--r--drivers/pci/of.c16
-rw-r--r--drivers/pci/pci-acpi.c29
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/pci-sysfs.c6
-rw-r--r--drivers/pci/pci.c543
-rw-r--r--drivers/pci/pci.h84
-rw-r--r--drivers/pci/pcie/aer.c341
-rw-r--r--drivers/pci/pcie/aspm.c8
-rw-r--r--drivers/pci/pcie/dpc.c107
-rw-r--r--drivers/pci/pcie/err.c23
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/pcie/portdrv_core.c30
-rw-r--r--drivers/pci/pcie/portdrv_pci.c27
-rw-r--r--drivers/pci/probe.c106
-rw-r--r--drivers/pci/quirks.c398
-rw-r--r--drivers/pci/remove.c6
-rw-r--r--drivers/pci/rom.c11
-rw-r--r--drivers/pci/switch/switchtec.c14
-rw-r--r--drivers/pci/vpd.c7
-rw-r--r--drivers/pcmcia/pcmcia_resource.c36
-rw-r--r--drivers/perf/arm-cci.c38
-rw-r--r--drivers/perf/arm-ccn.c15
-rw-r--r--drivers/perf/arm_pmu.c38
-rw-r--r--drivers/perf/arm_pmu_platform.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c12
-rw-r--r--drivers/perf/xgene_pmu.c2
-rw-r--r--drivers/phy/broadcom/Kconfig10
-rw-r--r--drivers/phy/broadcom/Makefile2
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-pcie.c305
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c4
-rw-r--r--drivers/phy/marvell/phy-berlin-sata.c5
-rw-r--r--drivers/phy/marvell/phy-berlin-usb.c5
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c5
-rw-r--r--drivers/phy/mediatek/Makefile1
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c85
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c1
-rw-r--r--drivers/phy/renesas/Kconfig7
-rw-r--r--drivers/phy/renesas/Makefile1
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-pcie.c151
-rw-r--r--drivers/pinctrl/Kconfig10
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/actions/Kconfig1
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c270
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.h22
-rw-r--r--drivers/pinctrl/actions/pinctrl-s900.c31
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c6
-rw-r--r--drivers/pinctrl/berlin/Kconfig5
-rw-r--r--drivers/pinctrl/berlin/Makefile1
-rw-r--r--drivers/pinctrl/berlin/berlin.c14
-rw-r--r--drivers/pinctrl/berlin/pinctrl-as370.c368
-rw-r--r--drivers/pinctrl/cirrus/Kconfig14
-rw-r--r--drivers/pinctrl/cirrus/Makefile13
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs47l35.c45
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs47l85.c59
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs47l90.c57
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c1076
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera.h41
-rw-r--r--drivers/pinctrl/core.c36
-rw-r--r--drivers/pinctrl/core.h6
-rw-r--r--drivers/pinctrl/devicetree.c15
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mq.c351
-rw-r--r--drivers/pinctrl/intel/Kconfig12
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c17
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c97
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-geminilake.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c436
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c37
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h5
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c5
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c53
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c8
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c118
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c11
-rw-r--r--drivers/pinctrl/pinctrl-amd.c17
-rw-r--r--drivers/pinctrl/pinctrl-amd.h4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c46
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c26
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c2
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c2
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c104
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c24
-rw-r--r--drivers/pinctrl/pinctrl-single.c127
-rw-r--r--drivers/pinctrl/pinctrl-u300.c1
-rw-r--r--drivers/pinctrl/pinmux.c17
-rw-r--r--drivers/pinctrl/pinmux.h7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c32
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c16
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c68
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h11
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c333
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77970.c14
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c69
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c43
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c6
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra114.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra124.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra210.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra30.c8
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c11
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c21
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c6
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c11
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c11
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c16
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c11
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c11
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c6
-rw-r--r--drivers/platform/chrome/Kconfig20
-rw-r--r--drivers/platform/chrome/Makefile2
-rw-r--r--drivers/platform/chrome/chromeos_tbmc.c10
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c (renamed from drivers/mfd/cros_ec_i2c.c)0
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c40
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c (renamed from drivers/mfd/cros_ec_spi.c)0
-rw-r--r--drivers/platform/goldfish/Kconfig5
-rw-r--r--drivers/platform/goldfish/Makefile1
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/goldfish/pdev_bus.c232
-rw-r--r--drivers/platform/mellanox/Kconfig11
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c60
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c245
-rw-r--r--drivers/platform/mips/cpu_hwmon.c3
-rw-r--r--drivers/platform/x86/Kconfig27
-rw-r--r--drivers/platform/x86/Makefile3
-rw-r--r--drivers/platform/x86/acer-wmi.c7
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c1
-rw-r--r--drivers/platform/x86/asus-wireless.c24
-rw-r--r--drivers/platform/x86/asus-wmi.c46
-rw-r--r--drivers/platform/x86/dell-laptop.c2
-rw-r--r--drivers/platform/x86/dell-smbios-base.c35
-rw-r--r--drivers/platform/x86/dell-smbios-smm.c4
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c2
-rw-r--r--drivers/platform/x86/eeepc-laptop.c12
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c132
-rw-r--r--drivers/platform/x86/ideapad-laptop.c18
-rw-r--r--drivers/platform/x86/intel-hid.c178
-rw-r--r--drivers/platform/x86/intel-vbtn.c5
-rw-r--r--drivers/platform/x86/intel_bxtwc_tmu.c1
-rw-r--r--drivers/platform/x86/intel_ips.c5
-rw-r--r--drivers/platform/x86/intel_pmc_core.c120
-rw-r--r--drivers/platform/x86/intel_pmc_core.h6
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c2
-rw-r--r--drivers/platform/x86/mlx-platform.c486
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c136
-rw-r--r--drivers/platform/x86/toshiba_acpi.c33
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c (renamed from drivers/platform/x86/silead_dmi.c)639
-rw-r--r--drivers/platform/x86/wmi.c9
-rw-r--r--drivers/power/avs/smartreflex.c1
-rw-r--r--drivers/power/reset/Kconfig11
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/gemini-poweroff.c12
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c5
-rw-r--r--drivers/power/reset/qcom-pon.c91
-rw-r--r--drivers/power/reset/vexpress-poweroff.c12
-rw-r--r--drivers/power/reset/zx-reboot.c1
-rw-r--r--drivers/power/supply/Kconfig23
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/ab8500_fg.c14
-rw-r--r--drivers/power/supply/adp5061.c745
-rw-r--r--drivers/power/supply/axp20x_usb_power.c1
-rw-r--r--drivers/power/supply/axp288_charger.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery.c3
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c545
-rw-r--r--drivers/power/supply/ds2760_battery.c348
-rw-r--r--drivers/power/supply/generic-adc-battery.c25
-rw-r--r--drivers/power/supply/lego_ev3_battery.c20
-rw-r--r--drivers/power/supply/max1721x_battery.c2
-rw-r--r--drivers/power/supply/max77693_charger.c1
-rw-r--r--drivers/power/supply/max8998_charger.c1
-rw-r--r--drivers/power/supply/olpc_battery.c1
-rw-r--r--drivers/power/supply/power_supply_core.c11
-rw-r--r--drivers/power/supply/sbs-battery.c67
-rw-r--r--drivers/power/supply/tps65217_charger.c22
-rw-r--r--drivers/power/supply/wm8350_power.c3
-rw-r--r--drivers/powercap/Kconfig10
-rw-r--r--drivers/powercap/Makefile1
-rw-r--r--drivers/powercap/idle_inject.c356
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/ptp/ptp_chardev.c1
-rw-r--r--drivers/ptp/ptp_dte.c1
-rw-r--r--drivers/ptp/ptp_qoriq.c217
-rw-r--r--drivers/pwm/Kconfig2
-rw-r--r--drivers/pwm/pwm-berlin.c45
-rw-r--r--drivers/pwm/pwm-cros-ec.c9
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c57
-rw-r--r--drivers/pwm/pwm-imx.c5
-rw-r--r--drivers/pwm/pwm-mediatek.c19
-rw-r--r--drivers/pwm/pwm-meson.c3
-rw-r--r--drivers/pwm/pwm-mxs.c8
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c5
-rw-r--r--drivers/pwm/pwm-stm32-lp.c4
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c14
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c2
-rw-r--r--drivers/regulator/Kconfig25
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/arizona-ldo1.c27
-rw-r--r--drivers/regulator/bd71837-regulator.c44
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c72
-rw-r--r--drivers/regulator/core.c44
-rw-r--r--drivers/regulator/cpcap-regulator.c103
-rw-r--r--drivers/regulator/da9063-regulator.c84
-rw-r--r--drivers/regulator/max14577-regulator.c22
-rw-r--r--drivers/regulator/max77686-regulator.c32
-rw-r--r--drivers/regulator/max77693-regulator.c32
-rw-r--r--drivers/regulator/max77802-regulator.c34
-rw-r--r--drivers/regulator/max8997-regulator.c33
-rw-r--r--drivers/regulator/max8998.c28
-rw-r--r--drivers/regulator/pfuze100-regulator.c112
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c769
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c38
-rw-r--r--drivers/regulator/s2mpa01.c14
-rw-r--r--drivers/regulator/s2mps11.c21
-rw-r--r--drivers/regulator/s5m8767.c16
-rw-r--r--drivers/regulator/tps65217-regulator.c2
-rw-r--r--drivers/regulator/tps65912-regulator.c1
-rw-r--r--drivers/regulator/uniphier-regulator.c213
-rw-r--r--drivers/remoteproc/Kconfig23
-rw-r--r--drivers/remoteproc/Makefile2
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c37
-rw-r--r--drivers/remoteproc/qcom_adsp_pil.c156
-rw-r--r--drivers/remoteproc/qcom_common.c26
-rw-r--r--drivers/remoteproc/qcom_q6v5.c252
-rw-r--r--drivers/remoteproc/qcom_q6v5.h46
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c158
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c601
-rw-r--r--drivers/remoteproc/qcom_sysmon.c5
-rw-r--r--drivers/remoteproc/remoteproc_core.c117
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c4
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c2
-rw-r--r--drivers/remoteproc/st_slim_rproc.c3
-rw-r--r--drivers/reset/Kconfig26
-rw-r--r--drivers/reset/Makefile3
-rw-r--r--drivers/reset/reset-ath79.c1
-rw-r--r--drivers/reset/reset-axs10x.c1
-rw-r--r--drivers/reset/reset-imx7.c3
-rw-r--r--drivers/reset/reset-meson-audio-arb.c168
-rw-r--r--drivers/reset/reset-qcom-aoss.c133
-rw-r--r--drivers/reset/reset-simple.c1
-rw-r--r--drivers/reset/reset-uniphier-usb3.c171
-rw-r--r--drivers/reset/reset-uniphier.c9
-rw-r--r--drivers/rpmsg/qcom_glink_native.c51
-rw-r--r--drivers/rpmsg/qcom_smd.c10
-rw-r--r--drivers/rpmsg/rpmsg_char.c2
-rw-r--r--drivers/rpmsg/rpmsg_core.c7
-rw-r--r--drivers/rtc/Kconfig21
-rw-r--r--drivers/rtc/class.c5
-rw-r--r--drivers/rtc/interface.c105
-rw-r--r--drivers/rtc/rtc-armada38x.c23
-rw-r--r--drivers/rtc/rtc-bq4802.c4
-rw-r--r--drivers/rtc/rtc-coh901331.c1
-rw-r--r--drivers/rtc/rtc-core.h14
-rw-r--r--drivers/rtc/rtc-cpcap.c1
-rw-r--r--drivers/rtc/rtc-dev.c8
-rw-r--r--drivers/rtc/rtc-ds1307.c18
-rw-r--r--drivers/rtc/rtc-ds1685.c590
-rw-r--r--drivers/rtc/rtc-ftrtc010.c1
-rw-r--r--drivers/rtc/rtc-isl1208.c192
-rw-r--r--drivers/rtc/rtc-m48t59.c4
-rw-r--r--drivers/rtc/rtc-max77686.c20
-rw-r--r--drivers/rtc/rtc-max8997.c20
-rw-r--r--drivers/rtc/rtc-max8998.c20
-rw-r--r--drivers/rtc/rtc-mc13xxx.c1
-rw-r--r--drivers/rtc/rtc-mrst.c4
-rw-r--r--drivers/rtc/rtc-mxc_v2.c1
-rw-r--r--drivers/rtc/rtc-omap.c23
-rw-r--r--drivers/rtc/rtc-pcf2127.c68
-rw-r--r--drivers/rtc/rtc-pcf85063.c21
-rw-r--r--drivers/rtc/rtc-r7301.c1
-rw-r--r--drivers/rtc/rtc-s5m.c22
-rw-r--r--drivers/rtc/rtc-sa1100.c1
-rw-r--r--drivers/rtc/rtc-sh.c91
-rw-r--r--drivers/rtc/rtc-snvs.c105
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c20
-rw-r--r--drivers/rtc/rtc-sysfs.c43
-rw-r--r--drivers/rtc/rtc-tegra.c1
-rw-r--r--drivers/rtc/rtc-test.c2
-rw-r--r--drivers/s390/block/dasd.c30
-rw-r--r--drivers/s390/block/dasd_alias.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c12
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/block/dasd_int.h8
-rw-r--r--drivers/s390/block/scm_blk.c1
-rw-r--r--drivers/s390/char/Makefile5
-rw-r--r--drivers/s390/char/keyboard.c58
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/sclp_async.c38
-rw-r--r--drivers/s390/char/tape_3590.c8
-rw-r--r--drivers/s390/char/tape_class.c6
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/cio/chp.c21
-rw-r--r--drivers/s390/cio/chsc.c18
-rw-r--r--drivers/s390/cio/chsc.h18
-rw-r--r--drivers/s390/cio/cio.c77
-rw-r--r--drivers/s390/cio/cio.h1
-rw-r--r--drivers/s390/cio/css.c82
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/qdio_main.c5
-rw-r--r--drivers/s390/cio/trace.h102
-rw-r--r--drivers/s390/crypto/ap_asm.h236
-rw-r--r--drivers/s390/crypto/ap_bus.c457
-rw-r--r--drivers/s390/crypto/ap_bus.h37
-rw-r--r--drivers/s390/crypto/ap_card.c51
-rw-r--r--drivers/s390/crypto/ap_queue.c58
-rw-r--r--drivers/s390/crypto/pkey_api.c93
-rw-r--r--drivers/s390/crypto/zcrypt_api.c30
-rw-r--r--drivers/s390/crypto/zcrypt_api.h2
-rw-r--r--drivers/s390/crypto/zcrypt_card.c33
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h32
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h18
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c2
-rw-r--r--drivers/s390/crypto/zcrypt_error.h2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c17
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h8
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c54
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c6
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h2
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c27
-rw-r--r--drivers/s390/net/Kconfig10
-rw-r--r--drivers/s390/net/Makefile3
-rw-r--r--drivers/s390/net/ism.h221
-rw-r--r--drivers/s390/net/ism_drv.c623
-rw-r--r--drivers/s390/net/qeth_core.h52
-rw-r--r--drivers/s390/net/qeth_core_main.c661
-rw-r--r--drivers/s390/net/qeth_core_mpc.c11
-rw-r--r--drivers/s390/net/qeth_core_mpc.h7
-rw-r--r--drivers/s390/net/qeth_core_sys.c18
-rw-r--r--drivers/s390/net/qeth_l2.h5
-rw-r--r--drivers/s390/net/qeth_l2_main.c235
-rw-r--r--drivers/s390/net/qeth_l3_main.c403
-rw-r--r--drivers/s390/net/qeth_l3_sys.c6
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/scsi/3w-9xxx.c6
-rw-r--r--drivers/scsi/3w-sas.c3
-rw-r--r--drivers/scsi/3w-xxxx.c4
-rw-r--r--drivers/scsi/Kconfig33
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/NCR_D700.c405
-rw-r--r--drivers/scsi/NCR_D700.h30
-rw-r--r--drivers/scsi/NCR_Q720.c376
-rw-r--r--drivers/scsi/NCR_Q720.h29
-rw-r--r--drivers/scsi/a100u2w.c4
-rw-r--r--drivers/scsi/aacraid/aachba.c56
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/rx.c2
-rw-r--r--drivers/scsi/aacraid/sa.c2
-rw-r--r--drivers/scsi/aacraid/src.c6
-rw-r--r--drivers/scsi/advansys.c10
-rw-r--r--drivers/scsi/aha152x.c71
-rw-r--r--drivers/scsi/aha1740.c9
-rw-r--r--drivers/scsi/aha1740.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c7
-rw-r--r--drivers/scsi/atp870u.c16
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c15
-rw-r--r--drivers/scsi/be2iscsi/be_main.c23
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c23
-rw-r--r--drivers/scsi/bfa/bfad_im.c19
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c115
-rw-r--r--drivers/scsi/csiostor/csio_wr.c84
-rw-r--r--drivers/scsi/cxlflash/main.h4
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c30
-rw-r--r--drivers/scsi/cxlflash/superpipe.c14
-rw-r--r--drivers/scsi/cxlflash/vlun.c7
-rw-r--r--drivers/scsi/dc395x.c5
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c18
-rw-r--r--drivers/scsi/gdth.c67
-rw-r--r--drivers/scsi/gdth.h10
-rw-r--r--drivers/scsi/gdth_proc.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c319
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c23
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c21
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c290
-rw-r--r--drivers/scsi/hosts.c32
-rw-r--r--drivers/scsi/hpsa.c25
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c12
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c6
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/ipr.c27
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/libfc/fc_disc.c47
-rw-r--r--drivers/scsi/libfc/fc_lport.c108
-rw-r--r--drivers/scsi/libfc/fc_rport.c101
-rw-r--r--drivers/scsi/libiscsi.c14
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c47
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c4
-rw-r--r--drivers/scsi/lpfc/Makefile4
-rw-r--r--drivers/scsi/lpfc/lpfc.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c462
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h46
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c60
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c50
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h4
-rw-r--r--drivers/scsi/megaraid.c29
-rw-r--r--drivers/scsi/megaraid.h14
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h33
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c61
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c36
-rw-r--r--drivers/scsi/mesh.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c454
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h30
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c81
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c395
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c463
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c62
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c18
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c3
-rw-r--r--drivers/scsi/ncr53c8xx.c10
-rw-r--r--drivers/scsi/nsp32_debug.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c12
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c30
-rw-r--r--drivers/scsi/qedi/qedi_main.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c34
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h24
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c108
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c738
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c207
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c161
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c188
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c20
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi.h3
-rw-r--r--drivers/scsi/scsi_debug.c59
-rw-r--r--drivers/scsi/scsi_error.c20
-rw-r--r--drivers/scsi/scsi_ioctl.c4
-rw-r--r--drivers/scsi/scsi_lib.c411
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/scsi_transport_fc.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/scsi_typedefs.h2
-rw-r--r--drivers/scsi/sd.c23
-rw-r--r--drivers/scsi/sd.h9
-rw-r--r--drivers/scsi/sd_dif.c113
-rw-r--r--drivers/scsi/sd_zbc.c11
-rw-r--r--drivers/scsi/sg.c73
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h12
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c160
-rw-r--r--drivers/scsi/snic/snic_debugfs.c10
-rw-r--r--drivers/scsi/snic/snic_trc.c6
-rw-r--r--drivers/scsi/sr.c29
-rw-r--r--drivers/scsi/sr_ioctl.c22
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c3
-rw-r--r--drivers/scsi/ufs/Kconfig9
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c619
-rw-r--r--drivers/scsi/ufs/ufs-hisi.h115
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c21
-rw-r--r--drivers/scsi/ufs/ufshcd.c52
-rw-r--r--drivers/scsi/ufs/ufshcd.h3
-rw-r--r--drivers/scsi/virtio_scsi.c8
-rw-r--r--drivers/scsi/vmw_pvscsi.c11
-rw-r--r--drivers/sh/maple/maple.c7
-rw-r--r--drivers/siox/siox-bus-gpio.c1
-rw-r--r--drivers/siox/siox-core.c30
-rw-r--r--drivers/slimbus/Kconfig12
-rw-r--r--drivers/slimbus/Makefile5
-rw-r--r--drivers/slimbus/core.c41
-rw-r--r--drivers/slimbus/messaging.c79
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c1526
-rw-r--r--drivers/slimbus/slimbus.h198
-rw-r--r--drivers/slimbus/stream.c477
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c16
-rw-r--r--drivers/soc/fsl/Kconfig15
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/dpio/Makefile (renamed from drivers/staging/fsl-mc/bus/dpio/Makefile)0
-rw-r--r--drivers/soc/fsl/dpio/dpio-cmd.h (renamed from drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h)0
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c (renamed from drivers/staging/fsl-mc/bus/dpio/dpio-driver.c)2
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c (renamed from drivers/staging/fsl-mc/bus/dpio/dpio-service.c)2
-rw-r--r--drivers/soc/fsl/dpio/dpio.c (renamed from drivers/staging/fsl-mc/bus/dpio/dpio.c)0
-rw-r--r--drivers/soc/fsl/dpio/dpio.h (renamed from drivers/staging/fsl-mc/bus/dpio/dpio.h)0
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c (renamed from drivers/staging/fsl-mc/bus/dpio/qbman-portal.c)2
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h (renamed from drivers/staging/fsl-mc/bus/dpio/qbman-portal.h)2
-rw-r--r--drivers/soc/fsl/qbman/Kconfig2
-rw-r--r--drivers/soc/fsl/qe/Kconfig2
-rw-r--r--drivers/soc/fsl/qe/gpio.c28
-rw-r--r--drivers/soc/imx/gpc.c41
-rw-r--r--drivers/soc/imx/gpcv2.c13
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c81
-rw-r--r--drivers/soc/qcom/Kconfig30
-rw-r--r--drivers/soc/qcom/Makefile6
-rw-r--r--drivers/soc/qcom/llcc-sdm845.c94
-rw-r--r--drivers/soc/qcom/llcc-slice.c338
-rw-r--r--drivers/soc/qcom/mdt_loader.c87
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c3
-rw-r--r--drivers/soc/qcom/rpmh-internal.h114
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c693
-rw-r--r--drivers/soc/qcom/rpmh.c513
-rw-r--r--drivers/soc/qcom/smem.c10
-rw-r--r--drivers/soc/qcom/trace-rpmh.h82
-rw-r--r--drivers/soc/renesas/Makefile3
-rw-r--r--drivers/soc/renesas/r9a06g032-smp.c96
-rw-r--r--drivers/soc/renesas/rcar-sysc.c99
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c87
-rw-r--r--drivers/soc/ti/Kconfig14
-rw-r--r--drivers/soc/ti/pm33xx.c16
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c76
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-ath79.c2
-rw-r--r--drivers/spi/spi-bitbang.c50
-rw-r--r--drivers/spi/spi-butterfly.c4
-rw-r--r--drivers/spi/spi-cadence.c4
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-dw-mmio.c90
-rw-r--r--drivers/spi/spi-dw.c9
-rw-r--r--drivers/spi/spi-dw.h2
-rw-r--r--drivers/spi/spi-fsl-dspi.c505
-rw-r--r--drivers/spi/spi-fsl-espi.c5
-rw-r--r--drivers/spi/spi-gpio.c49
-rw-r--r--drivers/spi/spi-img-spfi.c3
-rw-r--r--drivers/spi/spi-imx.c162
-rw-r--r--drivers/spi/spi-lm70llp.c5
-rw-r--r--drivers/spi/spi-mem.c28
-rw-r--r--drivers/spi/spi-omap2-mcspi.c9
-rw-r--r--drivers/spi/spi-orion.c77
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-sh-msiof.c53
-rw-r--r--drivers/spi/spi-sh-sci.c20
-rw-r--r--drivers/spi/spi-sprd-adi.c11
-rw-r--r--drivers/spi/spi-uniphier.c523
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c2
-rw-r--r--drivers/ssb/Kconfig21
-rw-r--r--drivers/ssb/b43_pci_bridge.c4
-rw-r--r--drivers/ssb/bridge_pcmcia_80211.c6
-rw-r--r--drivers/ssb/driver_chipcommon.c14
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c40
-rw-r--r--drivers/ssb/driver_chipcommon_sflash.c6
-rw-r--r--drivers/ssb/driver_extif.c4
-rw-r--r--drivers/ssb/driver_gige.c2
-rw-r--r--drivers/ssb/driver_gpio.c8
-rw-r--r--drivers/ssb/driver_mipscore.c17
-rw-r--r--drivers/ssb/driver_pcicore.c23
-rw-r--r--drivers/ssb/embedded.c18
-rw-r--r--drivers/ssb/host_soc.c16
-rw-r--r--drivers/ssb/main.c83
-rw-r--r--drivers/ssb/pci.c75
-rw-r--r--drivers/ssb/pcmcia.c62
-rw-r--r--drivers/ssb/scan.c38
-rw-r--r--drivers/ssb/sdio.c16
-rw-r--r--drivers/ssb/sprom.c4
-rw-r--r--drivers/ssb/ssb_private.h39
-rw-r--r--drivers/staging/Kconfig16
-rw-r--r--drivers/staging/Makefile7
-rw-r--r--drivers/staging/android/ashmem.c48
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c2
-rw-r--r--drivers/staging/android/ion/ion_heap.c2
-rw-r--r--drivers/staging/android/vsoc.c11
-rw-r--r--drivers/staging/axis-fifo/Kconfig9
-rw-r--r--drivers/staging/axis-fifo/Makefile1
-rw-r--r--drivers/staging/axis-fifo/README (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.)0
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c1107
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.txt89
-rw-r--r--drivers/staging/clocking-wizard/Kconfig2
-rw-r--r--drivers/staging/comedi/Kconfig8
-rw-r--r--drivers/staging/comedi/comedi.h2
-rw-r--r--drivers/staging/comedi/comedi_compat32.h2
-rw-r--r--drivers/staging/comedi/comedi_fops.c6
-rw-r--r--drivers/staging/comedi/comedi_pci.h2
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.h2
-rw-r--r--drivers/staging/comedi/comedidev.h4
-rw-r--r--drivers/staging/comedi/comedilib.h2
-rw-r--r--drivers/staging/comedi/drivers.c26
-rw-r--r--drivers/staging/comedi/drivers/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.h6
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c12
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c4
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c5
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c4
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c4
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c778
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c7
-rw-r--r--drivers/staging/erofs/Kconfig141
-rw-r--r--drivers/staging/erofs/Makefile13
-rw-r--r--drivers/staging/erofs/TODO45
-rw-r--r--drivers/staging/erofs/data.c385
-rw-r--r--drivers/staging/erofs/dir.c145
-rw-r--r--drivers/staging/erofs/erofs_fs.h266
-rw-r--r--drivers/staging/erofs/include/linux/tagptr.h110
-rw-r--r--drivers/staging/erofs/include/trace/events/erofs.h240
-rw-r--r--drivers/staging/erofs/inode.c283
-rw-r--r--drivers/staging/erofs/internal.h556
-rw-r--r--drivers/staging/erofs/lz4defs.h227
-rw-r--r--drivers/staging/erofs/namei.c251
-rw-r--r--drivers/staging/erofs/super.c649
-rw-r--r--drivers/staging/erofs/unzip_lz4.c251
-rw-r--r--drivers/staging/erofs/unzip_pagevec.h172
-rw-r--r--drivers/staging/erofs/unzip_vle.c1656
-rw-r--r--drivers/staging/erofs/unzip_vle.h239
-rw-r--r--drivers/staging/erofs/unzip_vle_lz4.c209
-rw-r--r--drivers/staging/erofs/utils.c271
-rw-r--r--drivers/staging/erofs/xattr.c577
-rw-r--r--drivers/staging/erofs/xattr.h93
-rw-r--r--drivers/staging/fbtft/fb_hx8347d.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9163.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9341.c2
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c31
-rw-r--r--drivers/staging/fbtft/fb_s6d1121.c3
-rw-r--r--drivers/staging/fbtft/fb_sh1106.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c9
-rw-r--r--drivers/staging/fbtft/fb_ssd1306.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1325.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c41
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c15
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c13
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c2
-rw-r--r--drivers/staging/fbtft/fb_watterott.c26
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c17
-rw-r--r--drivers/staging/fbtft/fbtft-core.c41
-rw-r--r--drivers/staging/fbtft/fbtft-io.c17
-rw-r--r--drivers/staging/fbtft/fbtft-sysfs.c3
-rw-r--r--drivers/staging/fbtft/fbtft.h15
-rw-r--r--drivers/staging/fbtft/fbtft_device.c50
-rw-r--r--drivers/staging/fbtft/flexfb.c31
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/Makefile1
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h29
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c75
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h65
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c31
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpkg.h450
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h30
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.c30
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.h159
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/ethernet-driver.rst (renamed from drivers/staging/fsl-dpaa2/ethernet/README)39
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/net.h480
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/dpsw.h27
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c2
-rw-r--r--drivers/staging/fsl-mc/Kconfig2
-rw-r--r--drivers/staging/fsl-mc/Makefile3
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig16
-rw-r--r--drivers/staging/fsl-mc/bus/Makefile9
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt135
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-fd.h426
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-global.h177
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-io.h115
-rw-r--r--drivers/staging/gasket/Kconfig23
-rw-r--r--drivers/staging/gasket/Makefile9
-rw-r--r--drivers/staging/gasket/TODO9
-rw-r--r--drivers/staging/gasket/apex.h30
-rw-r--r--drivers/staging/gasket/apex_driver.c741
-rw-r--r--drivers/staging/gasket/gasket.h122
-rw-r--r--drivers/staging/gasket/gasket_constants.h47
-rw-r--r--drivers/staging/gasket/gasket_core.c1816
-rw-r--r--drivers/staging/gasket/gasket_core.h649
-rw-r--r--drivers/staging/gasket/gasket_interrupt.c550
-rw-r--r--drivers/staging/gasket/gasket_interrupt.h117
-rw-r--r--drivers/staging/gasket/gasket_ioctl.c391
-rw-r--r--drivers/staging/gasket/gasket_ioctl.h28
-rw-r--r--drivers/staging/gasket/gasket_page_table.c1381
-rw-r--r--drivers/staging/gasket/gasket_page_table.h249
-rw-r--r--drivers/staging/gasket/gasket_sysfs.c400
-rw-r--r--drivers/staging/gasket/gasket_sysfs.h179
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c18
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c5
-rw-r--r--drivers/staging/goldfish/README6
-rw-r--r--drivers/staging/goldfish/goldfish_audio.c79
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/Makefile1
-rw-r--r--drivers/staging/iio/accel/adis16203.c6
-rw-r--r--drivers/staging/iio/accel/adis16240.c11
-rw-r--r--drivers/staging/iio/adc/ad7606.h2
-rw-r--r--drivers/staging/iio/gyro/Kconfig16
-rw-r--r--drivers/staging/iio/gyro/Makefile6
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c234
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c57
-rw-r--r--drivers/staging/ks7010/ks_hostif.c14
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c6
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h2
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c5
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c38
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c112
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c1
-rw-r--r--drivers/staging/media/imx/imx-media.h2
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c3
-rw-r--r--drivers/staging/most/dim2/dim2.c2
-rw-r--r--drivers/staging/most/sound/sound.c1
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts24
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi110
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c19
-rw-r--r--drivers/staging/mt7621-gpio/Kconfig6
-rw-r--r--drivers/staging/mt7621-gpio/Makefile3
-rw-r--r--drivers/staging/mt7621-gpio/TODO3
-rw-r--r--drivers/staging/mt7621-gpio/gpio-mt7621.c370
-rw-r--r--drivers/staging/mt7621-gpio/mediatek,mt7621-gpio.txt68
-rw-r--r--drivers/staging/mt7621-mmc/board.h8
-rw-r--r--drivers/staging/mt7621-mmc/dbg.c54
-rw-r--r--drivers/staging/mt7621-mmc/dbg.h6
-rw-r--r--drivers/staging/mt7621-mmc/mt6575_sd.h588
-rw-r--r--drivers/staging/mt7621-mmc/sd.c584
-rw-r--r--drivers/staging/mt7621-pci/Kconfig7
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c827
-rw-r--r--drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c174
-rw-r--r--drivers/staging/mt7621-spi/spi-mt7621.c206
-rw-r--r--drivers/staging/netlogic/xlr_net.c9
-rw-r--r--drivers/staging/octeon-usb/Makefile1
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c1
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.h1
-rw-r--r--drivers/staging/olpc_dcon/Kconfig6
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h6
-rw-r--r--drivers/staging/pi433/pi433_if.c93
-rw-r--r--drivers/staging/pi433/pi433_if.h9
-rw-r--r--drivers/staging/pi433/rf69.c283
-rw-r--r--drivers/staging/pi433/rf69.h12
-rw-r--r--drivers/staging/pi433/rf69_enum.h3
-rw-r--r--drivers/staging/pi433/rf69_registers.h3
-rw-r--r--drivers/staging/rtl8188eu/Kconfig1
-rw-r--r--drivers/staging/rtl8188eu/Makefile4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c126
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c65
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c121
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c28
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_iol.c17
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c19
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c56
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c180
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_rf.c10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c184
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sreset.c37
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c71
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c83
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c74
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c13
-rw-r--r--drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c (renamed from drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c)10
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_com.c75
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c26
-rw-r--r--drivers/staging/rtl8188eu/hal/mac_cfg.c19
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c21
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_rtl8188e.c (renamed from drivers/staging/rtl8188eu/hal/odm_RTL8188E.c)10
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c14
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseq.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c11
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c18
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c15
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c16
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c32
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c11
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c11
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c43
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c13
-rw-r--r--drivers/staging/rtl8188eu/include/HalVerDef.h10
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h12
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h10
-rw-r--r--drivers/staging/rtl8188eu/include/fw.h13
-rw-r--r--drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h (renamed from drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h)10
-rw-r--r--drivers/staging/rtl8188eu/include/hal8188e_phy_reg.h (renamed from drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h)10
-rw-r--r--drivers/staging/rtl8188eu/include/hal8188e_rate_adaptive.h (renamed from drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h)2
-rw-r--r--drivers/staging/rtl8188eu/include/hal_com.h10
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h12
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h25
-rw-r--r--drivers/staging/rtl8188eu/include/mlme_osdep.h10
-rw-r--r--drivers/staging/rtl8188eu/include/mon.h10
-rw-r--r--drivers/staging/rtl8188eu/include/mp_custom_oid.h347
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h12
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h11
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegDefine11N.h166
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h10
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h16
-rw-r--r--drivers/staging/rtl8188eu/include/odm_reg.h10
-rw-r--r--drivers/staging/rtl8188eu/include/odm_rtl8188e.h (renamed from drivers/staging/rtl8188eu/include/odm_RTL8188E.h)10
-rw-r--r--drivers/staging/rtl8188eu/include/odm_types.h10
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h10
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h10
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseq.h11
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseqcmd.h10
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_cmd.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_dm.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h14
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_led.h28
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h11
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_android.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ap.h14
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h12
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_event.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ht.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h74
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_set.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_iol.h13
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h11
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h14
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h1086
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_qos.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_sreset.h11
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h12
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h10
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h10
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h18
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h10
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h10
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c72
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c13
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mon.c10
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c10
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c27
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c10
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c38
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c22
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c56
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c60
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.h59
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h22
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c57
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c86
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c1018
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c18
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c179
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h127
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c643
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h595
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h126
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c280
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c44
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U.h114
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c75
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c195
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h240
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c38
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.h33
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c276
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.h71
-rw-r--r--drivers/staging/rtl8192u/r819xU_phyreg.h735
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c5
-rw-r--r--drivers/staging/rtl8712/wifi.h1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c14
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c18
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c143
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c108
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c268
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c2
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_phycfg.h2
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h4
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8192c_rf.h28
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_rf.h1
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_beamforming.h127
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_br_ext.h55
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h1
-rw-r--r--drivers/staging/rtl8723bs/include/xmit_osdep.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c19
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c108
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c7
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c19
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c5
-rw-r--r--drivers/staging/rtlwifi/base.c109
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.h1
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h17
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.c2
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.c2
-rw-r--r--drivers/staging/rtlwifi/wifi.h1
-rw-r--r--drivers/staging/rts5208/Makefile2
-rw-r--r--drivers/staging/rts5208/ms.c337
-rw-r--r--drivers/staging/rts5208/rtsx.c38
-rw-r--r--drivers/staging/rts5208/rtsx.h25
-rw-r--r--drivers/staging/rts5208/rtsx_card.c29
-rw-r--r--drivers/staging/rts5208/rtsx_card.h1
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c140
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h18
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c216
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c1
-rw-r--r--drivers/staging/rts5208/sd.c328
-rw-r--r--drivers/staging/rts5208/spi.c71
-rw-r--r--drivers/staging/rts5208/trace.c27
-rw-r--r--drivers/staging/rts5208/trace.h40
-rw-r--r--drivers/staging/rts5208/xd.c115
-rw-r--r--drivers/staging/skein/Kconfig16
-rw-r--r--drivers/staging/skein/Makefile11
-rw-r--r--drivers/staging/skein/TODO8
-rw-r--r--drivers/staging/skein/skein_api.c231
-rw-r--r--drivers/staging/skein/skein_api.h230
-rw-r--r--drivers/staging/skein/skein_base.c870
-rw-r--r--drivers/staging/skein/skein_base.h336
-rw-r--r--drivers/staging/skein/skein_block.c469
-rw-r--r--drivers/staging/skein/skein_block.h347
-rw-r--r--drivers/staging/skein/skein_generic.c214
-rw-r--r--drivers/staging/skein/skein_iv.h187
-rw-r--r--drivers/staging/skein/threefish_api.c78
-rw-r--r--drivers/staging/skein/threefish_api.h171
-rw-r--r--drivers/staging/skein/threefish_block.c8244
-rw-r--r--drivers/staging/sm750fb/sm750.c2
-rw-r--r--drivers/staging/speakup/kobjects.c4
-rw-r--r--drivers/staging/speakup/speakup_soft.c6
-rw-r--r--drivers/staging/speakup/spk_types.h2
-rw-r--r--drivers/staging/speakup/synth.c40
-rw-r--r--drivers/staging/speakup/varhandlers.c5
-rw-r--r--drivers/staging/typec/Kconfig22
-rw-r--r--drivers/staging/typec/Makefile2
-rw-r--r--drivers/staging/typec/TODO5
-rw-r--r--drivers/staging/vboxvideo/TODO1
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c34
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h2
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c2
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c38
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c127
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c31
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c11
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c4
-rw-r--r--drivers/staging/vt6655/dpc.c4
-rw-r--r--drivers/staging/vt6656/dpc.c4
-rw-r--r--drivers/staging/vt6656/rxtx.c5
-rw-r--r--drivers/staging/wilc1000/Makefile3
-rw-r--r--drivers/staging/wilc1000/TODO11
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c89
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.h25
-rw-r--r--drivers/staging/wilc1000/host_interface.c2401
-rw-r--r--drivers/staging/wilc1000/host_interface.h34
-rw-r--r--drivers/staging/wilc1000/linux_mon.c23
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c177
-rw-r--r--drivers/staging/wilc1000/microchip,wilc1000,sdio.txt32
-rw-r--r--drivers/staging/wilc1000/microchip,wilc1000,spi.txt26
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c28
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c57
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c54
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c347
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.h5
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h41
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c291
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h21
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c38
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.h16
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h70
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c44
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h2
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h30
-rw-r--r--drivers/staging/wlan-ng/p80211types.h27
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c7
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c41
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c8
-rw-r--r--drivers/target/Kconfig5
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c55
-rw-r--r--drivers/target/loopback/Kconfig1
-rw-r--r--drivers/target/loopback/tcm_loop.c15
-rw-r--r--drivers/target/sbp/sbp_target.c18
-rw-r--r--drivers/target/target_core_configfs.c12
-rw-r--r--drivers/target/target_core_device.c53
-rw-r--r--drivers/target/target_core_fabric_configfs.c5
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pr.c15
-rw-r--r--drivers/target/target_core_sbc.c7
-rw-r--r--drivers/target/target_core_tmr.c30
-rw-r--r--drivers/target/target_core_transport.c285
-rw-r--r--drivers/target/target_core_ua.c43
-rw-r--r--drivers/target/target_core_ua.h3
-rw-r--r--drivers/target/target_core_user.c421
-rw-r--r--drivers/target/target_core_xcopy.c5
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c10
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c5
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c5
-rw-r--r--drivers/tee/optee/Kconfig8
-rw-r--r--drivers/tee/optee/core.c2
-rw-r--r--drivers/tee/optee/rpc.c2
-rw-r--r--drivers/tee/tee_shm.c6
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/armada_thermal.c534
-rw-r--r--drivers/thermal/imx_thermal.c33
-rw-r--r--drivers/thermal/intel_powerclamp.c2
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c26
-rw-r--r--drivers/thermal/qcom/Makefile2
-rw-r--r--drivers/thermal/qcom/tsens-8996.c84
-rw-r--r--drivers/thermal/qcom/tsens-common.c29
-rw-r--r--drivers/thermal/qcom/tsens-v2.c77
-rw-r--r--drivers/thermal/qcom/tsens.c3
-rw-r--r--drivers/thermal/qcom/tsens.h8
-rw-r--r--drivers/thermal/rcar_thermal.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_hwmon.c3
-rw-r--r--drivers/thermal/ti-soc-thermal/dra752-bandgap.h68
-rw-r--r--drivers/thermal/ti-soc-thermal/dra752-thermal-data.c65
-rw-r--r--drivers/thermal/ti-soc-thermal/omap3-thermal-data.c6
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4-thermal-data.c10
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h10
-rw-r--r--drivers/thermal/ti-soc-thermal/omap5-thermal-data.c46
-rw-r--r--drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h41
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c370
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.h43
-rw-r--r--drivers/thunderbolt/domain.c59
-rw-r--r--drivers/thunderbolt/icm.c174
-rw-r--r--drivers/thunderbolt/nhi.c46
-rw-r--r--drivers/thunderbolt/switch.c65
-rw-r--r--drivers/thunderbolt/tb.h10
-rw-r--r--drivers/thunderbolt/tb_msgs.h4
-rw-r--r--drivers/thunderbolt/tb_regs.h2
-rw-r--r--drivers/thunderbolt/xdomain.c18
-rw-r--r--drivers/tty/goldfish.c1
-rw-r--r--drivers/tty/hvc/hvc_console.c194
-rw-r--r--drivers/tty/hvc/hvc_console.h1
-rw-r--r--drivers/tty/hvc/hvc_opal.c33
-rw-r--r--drivers/tty/n_tty.c55
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serdev/core.c49
-rw-r--r--drivers/tty/serial/8250/8250_core.c6
-rw-r--r--drivers/tty/serial/8250/8250_dw.c74
-rw-r--r--drivers/tty/serial/8250/8250_em.c1
-rw-r--r--drivers/tty/serial/8250/8250_exar.c54
-rw-r--r--drivers/tty/serial/8250/8250_of.c6
-rw-r--r--drivers/tty/serial/8250/8250_omap.c1
-rw-r--r--drivers/tty/serial/8250/8250_pci.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c63
-rw-r--r--drivers/tty/serial/8250/serial_cs.c6
-rw-r--r--drivers/tty/serial/imx.c5
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c3
-rw-r--r--drivers/tty/serial/max310x.c14
-rw-r--r--drivers/tty/serial/pxa.c3
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c261
-rw-r--r--drivers/tty/serial/sccnxp.c1
-rw-r--r--drivers/tty/serial/serial_core.c17
-rw-r--r--drivers/tty/serial/sh-sci.c191
-rw-r--r--drivers/tty/serial/uartlite.c112
-rw-r--r--drivers/tty/serial/xilinx_uartps.c23
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/tty/tty_audit.c2
-rw-r--r--drivers/tty/tty_baudrate.c22
-rw-r--r--drivers/tty/tty_io.c21
-rw-r--r--drivers/tty/tty_ldsem.c82
-rw-r--r--drivers/tty/vt/keyboard.c34
-rw-r--r--drivers/tty/vt/selection.c48
-rw-r--r--drivers/tty/vt/vc_screen.c90
-rw-r--r--drivers/tty/vt/vt.c371
-rw-r--r--drivers/uio/uio.c151
-rw-r--r--drivers/uio/uio_cif.c4
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c1
-rw-r--r--drivers/uio/uio_hv_generic.c4
-rw-r--r--drivers/uio/uio_netx.c3
-rw-r--r--drivers/uio/uio_pci_generic.c3
-rw-r--r--drivers/uio/uio_pruss.c69
-rw-r--r--drivers/uio/uio_sercos3.c1
-rw-r--r--drivers/usb/chipidea/Kconfig9
-rw-r--r--drivers/usb/chipidea/Makefile3
-rw-r--r--drivers/usb/chipidea/ci.h8
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c4
-rw-r--r--drivers/usb/chipidea/host.c5
-rw-r--r--drivers/usb/chipidea/ulpi.c3
-rw-r--r--drivers/usb/class/cdc-acm.c46
-rw-r--r--drivers/usb/class/cdc-wdm.c47
-rw-r--r--drivers/usb/class/usblp.c10
-rw-r--r--drivers/usb/class/usbtmc.c320
-rw-r--r--drivers/usb/core/devio.c5
-rw-r--r--drivers/usb/core/hub.c50
-rw-r--r--drivers/usb/core/ledtrig-usbport.c34
-rw-r--r--drivers/usb/core/message.c9
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core.c241
-rw-r--r--drivers/usb/dwc2/core.h112
-rw-r--r--drivers/usb/dwc2/core_intr.c118
-rw-r--r--drivers/usb/dwc2/debugfs.c55
-rw-r--r--drivers/usb/dwc2/gadget.c557
-rw-r--r--drivers/usb/dwc2/hcd.c604
-rw-r--r--drivers/usb/dwc2/hcd.h18
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c10
-rw-r--r--drivers/usb/dwc2/hcd_intr.c116
-rw-r--r--drivers/usb/dwc2/hcd_queue.c15
-rw-r--r--drivers/usb/dwc2/params.c24
-rw-r--r--drivers/usb/dwc2/platform.c19
-rw-r--r--drivers/usb/dwc3/Kconfig13
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c141
-rw-r--r--drivers/usb/dwc3/core.h17
-rw-r--r--drivers/usb/dwc3/dwc3-haps.c137
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c32
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c223
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c13
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/dwc3/gadget.h2
-rw-r--r--drivers/usb/gadget/composite.c4
-rw-r--r--drivers/usb/gadget/configfs.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c28
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c31
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.h4
-rw-r--r--drivers/usb/gadget/function/f_tcm.c19
-rw-r--r--drivers/usb/gadget/function/f_uac2.c24
-rw-r--r--drivers/usb/gadget/function/f_uvc.c31
-rw-r--r--drivers/usb/gadget/function/f_uvc.h6
-rw-r--r--drivers/usb/gadget/function/u_audio.c88
-rw-r--r--drivers/usb/gadget/function/u_uvc.h5
-rw-r--r--drivers/usb/gadget/function/uvc.h53
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c12
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h12
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c3
-rw-r--r--drivers/usb/gadget/function/uvc_video.h2
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c2
-rw-r--r--drivers/usb/gadget/legacy/webcam.c4
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/ep0.c11
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c14
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h33
-rw-r--r--drivers/usb/gadget/udc/core.c18
-rw-r--r--drivers/usb/gadget/udc/fsl_mxc_udc.c1
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c84
-rw-r--r--drivers/usb/host/Kconfig8
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-exynos.c7
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/ehci-hub.c8
-rw-r--r--drivers/usb/host/ehci-npcm7xx.c212
-rw-r--r--drivers/usb/host/ehci-ps3.c6
-rw-r--r--drivers/usb/host/ehci-sched.c4
-rw-r--r--drivers/usb/host/ohci-exynos.c6
-rw-r--r--drivers/usb/host/ohci-ps3.c6
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/whci/pzl.c2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c14
-rw-r--r--drivers/usb/host/xhci-hub.c11
-rw-r--r--drivers/usb/host/xhci-mem.c6
-rw-r--r--drivers/usb/host/xhci-plat.c1
-rw-r--r--drivers/usb/host/xhci-rcar.c43
-rw-r--r--drivers/usb/host/xhci-tegra.c6
-rw-r--r--drivers/usb/host/xhci-trace.h36
-rw-r--r--drivers/usb/host/xhci.c49
-rw-r--r--drivers/usb/host/xhci.h6
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c2
-rw-r--r--drivers/usb/misc/adutux.c10
-rw-r--r--drivers/usb/misc/appledisplay.c1
-rw-r--r--drivers/usb/misc/iowarrior.c5
-rw-r--r--drivers/usb/misc/ldusb.c7
-rw-r--r--drivers/usb/misc/legousbtower.c5
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c4
-rw-r--r--drivers/usb/misc/usbtest.c10
-rw-r--r--drivers/usb/misc/uss720.c2
-rw-r--r--drivers/usb/misc/yurex.c23
-rw-r--r--drivers/usb/mon/mon_bin.c8
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c4
-rw-r--r--drivers/usb/renesas_usbhs/Kconfig1
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c1
-rw-r--r--drivers/usb/serial/ch341.c2
-rw-r--r--drivers/usb/serial/cp210x.c436
-rw-r--r--drivers/usb/serial/cyberjack.c17
-rw-r--r--drivers/usb/serial/digi_acceleport.c35
-rw-r--r--drivers/usb/serial/io_edgeport.c17
-rw-r--r--drivers/usb/serial/io_ti.c5
-rw-r--r--drivers/usb/serial/ir-usb.c2
-rw-r--r--drivers/usb/serial/iuu_phoenix.c5
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/kl5kusb105.c1
-rw-r--r--drivers/usb/serial/kl5kusb105.h3
-rw-r--r--drivers/usb/serial/kobil_sct.c24
-rw-r--r--drivers/usb/serial/mos7720.c14
-rw-r--r--drivers/usb/serial/mos7840.c8
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/quatech2.c7
-rw-r--r--drivers/usb/serial/sierra.c13
-rw-r--r--drivers/usb/serial/ssu100.c2
-rw-r--r--drivers/usb/serial/symbolserial.c5
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c9
-rw-r--r--drivers/usb/serial/usb_wwan.c5
-rw-r--r--drivers/usb/typec/Kconfig18
-rw-r--r--drivers/usb/typec/Makefile5
-rw-r--r--drivers/usb/typec/altmodes/Kconfig14
-rw-r--r--drivers/usb/typec/altmodes/Makefile2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c580
-rw-r--r--drivers/usb/typec/bus.c402
-rw-r--r--drivers/usb/typec/bus.h38
-rw-r--r--drivers/usb/typec/class.c544
-rw-r--r--drivers/usb/typec/fusb302/fusb302.c12
-rw-r--r--drivers/usb/typec/mux.c6
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c13
-rw-r--r--drivers/usb/typec/tcpci.c (renamed from drivers/staging/typec/tcpci.c)66
-rw-r--r--drivers/usb/typec/tcpci.h (renamed from drivers/staging/typec/tcpci.h)0
-rw-r--r--drivers/usb/typec/tcpci_rt1711h.c (renamed from drivers/staging/typec/tcpci_rt1711h.c)0
-rw-r--r--drivers/usb/typec/tcpm.c347
-rw-r--r--drivers/usb/typec/tps6598x.c11
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c13
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c5
-rw-r--r--drivers/usb/usb-skeleton.c10
-rw-r--r--drivers/usb/usbip/vudc_dev.c2
-rw-r--r--drivers/usb/wusbcore/security.c2
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c8
-rw-r--r--drivers/uwb/hwa-rc.c1
-rw-r--r--drivers/vfio/pci/Kconfig12
-rw-r--r--drivers/vfio/pci/vfio_pci.c25
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c73
-rw-r--r--drivers/vfio/vfio_iommu_type1.c17
-rw-r--r--drivers/vhost/Kconfig2
-rw-r--r--drivers/vhost/net.c373
-rw-r--r--drivers/vhost/scsi.c18
-rw-r--r--drivers/vhost/vhost.c80
-rw-r--r--drivers/vhost/vhost.h11
-rw-r--r--drivers/video/backlight/adp8860_bl.c1
-rw-r--r--drivers/video/backlight/pwm_bl.c232
-rw-r--r--drivers/video/console/Kconfig11
-rw-r--r--drivers/video/console/dummycon.c67
-rw-r--r--drivers/video/console/vgacon.c5
-rw-r--r--drivers/video/fbdev/amifb.c4
-rw-r--r--drivers/video/fbdev/core/fbcon.c123
-rw-r--r--drivers/video/fbdev/core/fbmem.c56
-rw-r--r--drivers/video/fbdev/core/modedb.c93
-rw-r--r--drivers/video/fbdev/efifb.c234
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c23
-rw-r--r--drivers/video/fbdev/goldfishfb.c15
-rw-r--r--drivers/video/fbdev/hyperv_fb.c6
-rw-r--r--drivers/video/fbdev/i740fb.c1
-rw-r--r--drivers/video/fbdev/metronomefb.c10
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/Makefile4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c3
-rw-r--r--drivers/video/fbdev/pm2fb.c2
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c10
-rw-r--r--drivers/video/fbdev/pxafb.c97
-rw-r--r--drivers/video/fbdev/pxafb.h3
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/fbdev/tdfxfb.c1
-rw-r--r--drivers/video/fbdev/tridentfb.c3
-rw-r--r--drivers/video/fbdev/udlfb.c230
-rw-r--r--drivers/video/fbdev/via/lcd.c1
-rw-r--r--drivers/video/fbdev/via/viafbdev.c3
-rw-r--r--drivers/virtio/virtio_balloon.c127
-rw-r--r--drivers/virtio/virtio_mmio.c20
-rw-r--r--drivers/virtio/virtio_pci_common.c7
-rw-r--r--drivers/virtio/virtio_pci_common.h2
-rw-r--r--drivers/virtio/virtio_pci_legacy.c14
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c2
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/w1/masters/ds2482.c2
-rw-r--r--drivers/w1/masters/ds2490.c16
-rw-r--r--drivers/w1/masters/mxc_w1.c1
-rw-r--r--drivers/w1/slaves/Kconfig12
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2760.c175
-rw-r--r--drivers/w1/slaves/w1_ds2760.h59
-rw-r--r--drivers/w1/w1.c3
-rw-r--r--drivers/watchdog/Kconfig10
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/coh901327_wdt.c1
-rw-r--r--drivers/watchdog/davinci_wdt.c1
-rw-r--r--drivers/watchdog/imgpdc_wdt.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c5
-rw-r--r--drivers/watchdog/kempld_wdt.c5
-rw-r--r--drivers/watchdog/max63xx_wdt.c1
-rw-r--r--drivers/watchdog/max77620_wdt.c1
-rw-r--r--drivers/watchdog/menz69_wdt.c170
-rw-r--r--drivers/watchdog/moxart_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c1
-rw-r--r--drivers/watchdog/orion_wdt.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c63
-rw-r--r--drivers/watchdog/sprd_wdt.c4
-rw-r--r--drivers/watchdog/stm32_iwdg.c116
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c5
-rw-r--r--drivers/watchdog/tangox_wdt.c1
-rw-r--r--drivers/xen/Kconfig24
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/balloon.c75
-rw-r--r--drivers/xen/biomerge.c2
-rw-r--r--drivers/xen/gntdev-common.h94
-rw-r--r--drivers/xen/gntdev-dmabuf.c856
-rw-r--r--drivers/xen/gntdev-dmabuf.h33
-rw-r--r--drivers/xen/gntdev.c264
-rw-r--r--drivers/xen/grant-table.c151
-rw-r--r--drivers/xen/mcelog.c2
-rw-r--r--drivers/xen/mem-reservation.c118
-rw-r--r--drivers/xen/xen-acpi-processor.c6
-rw-r--r--drivers/xen/xen-balloon.c2
-rw-r--r--drivers/xen/xen-scsiback.c17
5321 files changed, 324283 insertions, 118733 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 95b9ccc08165..ab4d43923c4d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -9,6 +9,8 @@ source "drivers/bus/Kconfig"
source "drivers/connector/Kconfig"
+source "drivers/gnss/Kconfig"
+
source "drivers/mtd/Kconfig"
source "drivers/of/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 24cd47014657..578f469f72fb 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -76,7 +76,7 @@ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
-obj-$(CONFIG_SCSI) += scsi/
+obj-y += scsi/
obj-y += nvme/
obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_TARGET_CORE) += target/
@@ -185,3 +185,4 @@ obj-$(CONFIG_TEE) += tee/
obj-$(CONFIG_MULTIPLEXER) += mux/
obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/
+obj-$(CONFIG_GNSS) += gnss/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b533eeb6139d..dd1eea90f67f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -3,13 +3,15 @@
# ACPI Configuration
#
+config ARCH_SUPPORTS_ACPI
+ bool
+
menuconfig ACPI
bool "ACPI (Advanced Configuration and Power Interface) Support"
- depends on !IA64_HP_SIM
- depends on IA64 || X86 || ARM64
+ depends on ARCH_SUPPORTS_ACPI
depends on PCI
select PNP
- default y if (IA64 || X86)
+ default y if X86
help
Advanced Configuration and Power Interface (ACPI) support for
Linux requires an ACPI-compliant platform (hardware/firmware),
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index f8fecfec5df9..9706613eecf9 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -879,6 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
#define LPSS_GPIODEF0_DMA_LLP BIT(13)
static DEFINE_MUTEX(lpss_iosf_mutex);
+static bool lpss_iosf_d3_entered;
static void lpss_iosf_enter_d3_state(void)
{
@@ -921,6 +922,9 @@ static void lpss_iosf_enter_d3_state(void)
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
LPSS_IOSF_GPIODEF0, value1, mask1);
+
+ lpss_iosf_d3_entered = true;
+
exit:
mutex_unlock(&lpss_iosf_mutex);
}
@@ -935,6 +939,11 @@ static void lpss_iosf_exit_d3_state(void)
mutex_lock(&lpss_iosf_mutex);
+ if (!lpss_iosf_d3_entered)
+ goto exit;
+
+ lpss_iosf_d3_entered = false;
+
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
LPSS_IOSF_GPIODEF0, value1, mask1);
@@ -944,13 +953,13 @@ static void lpss_iosf_exit_d3_state(void)
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
LPSS_IOSF_PMCSR, value2, mask2);
+exit:
mutex_unlock(&lpss_iosf_mutex);
}
-static int acpi_lpss_suspend(struct device *dev, bool runtime)
+static int acpi_lpss_suspend(struct device *dev, bool wakeup)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
- bool wakeup = runtime || device_may_wakeup(dev);
int ret;
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
@@ -963,14 +972,14 @@ static int acpi_lpss_suspend(struct device *dev, bool runtime)
* wrong status for devices being about to be powered off. See
* lpss_iosf_enter_d3_state() for further information.
*/
- if ((runtime || !pm_suspend_via_firmware()) &&
+ if (acpi_target_system_state() == ACPI_STATE_S0 &&
lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_enter_d3_state();
return ret;
}
-static int acpi_lpss_resume(struct device *dev, bool runtime)
+static int acpi_lpss_resume(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
@@ -979,8 +988,7 @@ static int acpi_lpss_resume(struct device *dev, bool runtime)
* This call is kept first to be in symmetry with
* acpi_lpss_runtime_suspend() one.
*/
- if ((runtime || !pm_resume_via_firmware()) &&
- lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+ if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_exit_d3_state();
ret = acpi_dev_resume(dev);
@@ -1004,12 +1012,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
return 0;
ret = pm_generic_suspend_late(dev);
- return ret ? ret : acpi_lpss_suspend(dev, false);
+ return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
}
static int acpi_lpss_resume_early(struct device *dev)
{
- int ret = acpi_lpss_resume(dev, false);
+ int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
@@ -1024,7 +1032,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
static int acpi_lpss_runtime_resume(struct device *dev)
{
- int ret = acpi_lpss_resume(dev, true);
+ int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_runtime_resume(dev);
}
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 51c386bf230d..0f28a38a43ea 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -164,8 +164,8 @@ struct acpi_namespace_node {
#define ANOBJ_SUBTREE_HAS_INI 0x10 /* Used to optimize device initialization */
#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */
#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */
+#define ANOBJ_NODE_EARLY_INIT 0x80 /* acpi_exec only: Node was create via init file (-fi) */
-#define IMPLICIT_EXTERNAL 0x02 /* iASL only: This object created implicitly via External */
#define ANOBJ_IS_EXTERNAL 0x08 /* iASL only: This object created via External() */
#define ANOBJ_METHOD_NO_RETVAL 0x10 /* iASL only: Method has no return value */
#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* iASL only: Method has at least one return value */
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 3825df923480..bbb3b4d1e796 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -25,14 +25,15 @@
/* Flags for acpi_ns_lookup, acpi_ns_search_and_enter */
#define ACPI_NS_NO_UPSEARCH 0
-#define ACPI_NS_SEARCH_PARENT 0x01
-#define ACPI_NS_DONT_OPEN_SCOPE 0x02
-#define ACPI_NS_NO_PEER_SEARCH 0x04
-#define ACPI_NS_ERROR_IF_FOUND 0x08
-#define ACPI_NS_PREFIX_IS_SCOPE 0x10
-#define ACPI_NS_EXTERNAL 0x20
-#define ACPI_NS_TEMPORARY 0x40
-#define ACPI_NS_OVERRIDE_IF_FOUND 0x80
+#define ACPI_NS_SEARCH_PARENT 0x0001
+#define ACPI_NS_DONT_OPEN_SCOPE 0x0002
+#define ACPI_NS_NO_PEER_SEARCH 0x0004
+#define ACPI_NS_ERROR_IF_FOUND 0x0008
+#define ACPI_NS_PREFIX_IS_SCOPE 0x0010
+#define ACPI_NS_EXTERNAL 0x0020
+#define ACPI_NS_TEMPORARY 0x0040
+#define ACPI_NS_OVERRIDE_IF_FOUND 0x0080
+#define ACPI_NS_EARLY_INIT 0x0100
/* Flags for acpi_ns_walk_namespace */
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 2733cd4e418c..3374d41582b5 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -180,6 +180,8 @@ char acpi_ut_remove_leading_zeros(char **string);
u8 acpi_ut_detect_hex_prefix(char **string);
+void acpi_ut_remove_hex_prefix(char **string);
+
u8 acpi_ut_detect_octal_prefix(char **string);
/*
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 556ff59bbbfc..3e5f95390f0d 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -763,7 +763,12 @@ acpi_db_command_dispatch(char *input_buffer,
case CMD_DISASSEMBLE:
case CMD_DISASM:
+#ifdef ACPI_DISASSEMBLER
(void)acpi_db_disassemble_method(acpi_gbl_db_args[1]);
+#else
+ acpi_os_printf
+ ("The AML Disassembler is not configured/present\n");
+#endif
break;
case CMD_DUMP:
@@ -872,7 +877,12 @@ acpi_db_command_dispatch(char *input_buffer,
case CMD_LIST:
+#ifdef ACPI_DISASSEMBLER
acpi_db_disassemble_aml(acpi_gbl_db_args[1], op);
+#else
+ acpi_os_printf
+ ("The AML Disassembler is not configured/present\n");
+#endif
break;
case CMD_LOCKS:
diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c
index 9fcecf104ba0..d8b7a0fe92ec 100644
--- a/drivers/acpi/acpica/dbmethod.c
+++ b/drivers/acpi/acpica/dbmethod.c
@@ -216,6 +216,7 @@ cleanup:
acpi_ut_remove_reference(obj_desc);
}
+#ifdef ACPI_DISASSEMBLER
/*******************************************************************************
*
* FUNCTION: acpi_db_disassemble_aml
@@ -242,9 +243,8 @@ void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op)
if (statements) {
num_statements = strtoul(statements, NULL, 0);
}
-#ifdef ACPI_DISASSEMBLER
+
acpi_dm_disassemble(NULL, op, num_statements);
-#endif
}
/*******************************************************************************
@@ -317,8 +317,6 @@ acpi_status acpi_db_disassemble_method(char *name)
walk_state->parse_flags |= ACPI_PARSE_DISASSEMBLE;
status = acpi_ps_parse_aml(walk_state);
-
-#ifdef ACPI_DISASSEMBLER
(void)acpi_dm_parse_deferred_ops(op);
/* Now we can disassemble the method */
@@ -326,7 +324,6 @@ acpi_status acpi_db_disassemble_method(char *name)
acpi_gbl_dm_opt_verbose = FALSE;
acpi_dm_disassemble(NULL, op, 0);
acpi_gbl_dm_opt_verbose = TRUE;
-#endif
acpi_ps_delete_parse_tree(op);
@@ -337,6 +334,7 @@ acpi_status acpi_db_disassemble_method(char *name)
acpi_ut_release_owner_id(&obj_desc->method.owner_id);
return (AE_OK);
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c
index 4647aa8efecb..f2526726daf6 100644
--- a/drivers/acpi/acpica/dbxface.c
+++ b/drivers/acpi/acpica/dbxface.c
@@ -10,6 +10,7 @@
#include "amlcode.h"
#include "acdebug.h"
#include "acinterp.h"
+#include "acparser.h"
#define _COMPONENT ACPI_CA_DEBUGGER
ACPI_MODULE_NAME("dbxface")
@@ -262,10 +263,17 @@ acpi_db_single_step(struct acpi_walk_state *walk_state,
}
}
- /* Now we can display it */
+ /* Now we can disassemble and display it */
#ifdef ACPI_DISASSEMBLER
acpi_dm_disassemble(walk_state, display_op, ACPI_UINT32_MAX);
+#else
+ /*
+ * The AML Disassembler is not configured - at least we can
+ * display the opcode value and name
+ */
+ acpi_os_printf("AML Opcode: %4.4X %s\n", op->common.aml_opcode,
+ acpi_ps_get_opcode_name(op->common.aml_opcode));
#endif
if ((op->common.aml_opcode == AML_IF_OP) ||
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 7c937595dfcb..30fe89545d6a 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -15,6 +15,10 @@
#include "acnamesp.h"
#include "acparser.h"
+#ifdef ACPI_EXEC_APP
+#include "aecommon.h"
+#endif
+
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsfield")
@@ -259,6 +263,13 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
u64 position;
union acpi_parse_object *child;
+#ifdef ACPI_EXEC_APP
+ u64 value = 0;
+ union acpi_operand_object *result_desc;
+ union acpi_operand_object *obj_desc;
+ char *name_path;
+#endif
+
ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
/* First field starts at bit zero */
@@ -391,6 +402,25 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
+#ifdef ACPI_EXEC_APP
+ name_path =
+ acpi_ns_get_external_pathname(info->
+ field_node);
+ obj_desc =
+ acpi_ut_create_integer_object
+ (value);
+ if (ACPI_SUCCESS
+ (ae_lookup_init_file_entry
+ (name_path, &value))) {
+ acpi_ex_write_data_to_field
+ (obj_desc,
+ acpi_ns_get_attached_object
+ (info->field_node),
+ &result_desc);
+ }
+ acpi_ut_remove_reference(obj_desc);
+ ACPI_FREE(name_path);
+#endif
}
}
@@ -573,7 +603,9 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
!(walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
flags |= ACPI_NS_TEMPORARY;
}
-
+#ifdef ACPI_EXEC_APP
+ flags |= ACPI_NS_OVERRIDE_IF_FOUND;
+#endif
/*
* Walk the list of entries in the field_list
* Note: field_list can be of zero length. In this case, Arg will be NULL.
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 3de794bcf8fa..69603ba52a3a 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -528,13 +528,18 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
status =
acpi_hw_read(&value64, &acpi_gbl_FADT.xpm2_control_block);
- value = (u32)value64;
+ if (ACPI_SUCCESS(status)) {
+ value = (u32)value64;
+ }
break;
case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
status = acpi_hw_read(&value64, &acpi_gbl_FADT.xpm_timer_block);
- value = (u32)value64;
+ if (ACPI_SUCCESS(status)) {
+ value = (u32)value64;
+ }
+
break;
case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index fc0c2e2328cd..d8b8fc2ff563 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -51,16 +51,18 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
return_ACPI_STATUS(status);
}
- /*
- * 1) Disable all GPEs
- * 2) Enable all wakeup GPEs
- */
+ /* Disable all GPEs */
status = acpi_hw_disable_all_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
+ status = acpi_hw_clear_acpi_status();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
acpi_gbl_system_awake_and_running = FALSE;
+ /* Enable all wakeup GPEs */
status = acpi_hw_enable_all_wakeup_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 220a718fbce9..e3f10afde5ff 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -558,6 +558,14 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
(char *)&current_node->name,
current_node));
}
+#ifdef ACPI_EXEC_APP
+ if ((status == AE_ALREADY_EXISTS) &&
+ (this_node->flags & ANOBJ_NODE_EARLY_INIT)) {
+ this_node->flags &= ~ANOBJ_NODE_EARLY_INIT;
+ status = AE_OK;
+ }
+#endif
+
#ifdef ACPI_ASL_COMPILER
/*
* If this ACPI name already exists within the namespace as an
@@ -613,13 +621,6 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
/* Special handling for the last segment (num_segments == 0) */
else {
-#ifdef ACPI_ASL_COMPILER
- if (!acpi_gbl_disasm_flag
- && (this_node->flags & ANOBJ_IS_EXTERNAL)) {
- this_node->flags &= ~IMPLICIT_EXTERNAL;
- }
-#endif
-
/*
* Sanity typecheck of the target object:
*
@@ -683,6 +684,11 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
}
}
}
+#ifdef ACPI_EXEC_APP
+ if (flags & ACPI_NS_EARLY_INIT) {
+ this_node->flags |= ANOBJ_NODE_EARLY_INIT;
+ }
+#endif
*return_node = this_node;
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index e9c9a63bb6a4..f594ab75a5fe 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -381,7 +381,6 @@ acpi_ns_search_and_enter(u32 target_name,
if (flags & ACPI_NS_EXTERNAL ||
(walk_state && walk_state->opcode == AML_SCOPE_OP)) {
new_node->flags |= ANOBJ_IS_EXTERNAL;
- new_node->flags |= IMPLICIT_EXTERNAL;
}
#endif
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index bc5f05906bd1..34fc2f7476ed 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -22,6 +22,7 @@
#include "acdispat.h"
#include "amlcode.h"
#include "acconvert.h"
+#include "acnamesp.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psloop")
@@ -497,6 +498,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
status =
acpi_ps_create_op(walk_state, aml_op_start, &op);
if (ACPI_FAILURE(status)) {
+ /*
+ * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
+ * executing it as a control method. However, if we encounter
+ * an error while loading the table, we need to keep trying to
+ * load the table rather than aborting the table load. Set the
+ * status to AE_OK to proceed with the table load.
+ */
+ if ((walk_state->
+ parse_flags & ACPI_PARSE_MODULE_LEVEL)
+ && status == AE_ALREADY_EXISTS) {
+ status = AE_OK;
+ }
if (status == AE_CTRL_PARSE_CONTINUE) {
continue;
}
@@ -515,12 +528,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- if (walk_state->opcode == AML_SCOPE_OP) {
+ if (acpi_ns_opens_scope
+ (acpi_ps_get_opcode_info
+ (walk_state->opcode)->object_type)) {
/*
- * If the scope op fails to parse, skip the body of the
- * scope op because the parse failure indicates that the
- * device may not exist.
+ * If the scope/device op fails to parse, skip the body of
+ * the scope op because the parse failure indicates that
+ * the device may not exist.
*/
+ ACPI_ERROR((AE_INFO,
+ "Skip parsing opcode %s",
+ acpi_ps_get_opcode_name
+ (walk_state->opcode)));
walk_state->parser_state.aml =
walk_state->aml + 1;
walk_state->parser_state.aml =
@@ -528,8 +547,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
(&walk_state->parser_state);
walk_state->aml =
walk_state->parser_state.aml;
- ACPI_ERROR((AE_INFO,
- "Skipping Scope block"));
}
continue;
@@ -694,6 +711,25 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
acpi_ps_next_parse_state(walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
+ } else
+ if ((walk_state->
+ parse_flags & ACPI_PARSE_MODULE_LEVEL)
+ && (ACPI_AML_EXCEPTION(status)
+ || status == AE_ALREADY_EXISTS
+ || status == AE_NOT_FOUND)) {
+ /*
+ * ACPI_PARSE_MODULE_LEVEL flag means that we
+ * are currently loading a table by executing
+ * it as a control method. However, if we
+ * encounter an error while loading the table,
+ * we need to keep trying to load the table
+ * rather than aborting the table load (setting
+ * the status to AE_OK continues the table
+ * load). If we get a failure at this point, it
+ * means that the dispatcher got an error while
+ * trying to execute the Op.
+ */
+ status = AE_OK;
}
}
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 51891f9fb057..862149c8a208 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -516,9 +516,9 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
acpi_tb_check_duplication(table_desc, table_index);
if (ACPI_FAILURE(status)) {
if (status != AE_CTRL_TERMINATE) {
- ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
+ ACPI_EXCEPTION((AE_INFO, status,
"%4.4s 0x%8.8X%8.8X"
- " Table is duplicated",
+ " Table is already loaded",
acpi_ut_valid_nameseg
(table_desc->signature.
ascii) ? table_desc->
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 118f3ff1fbb5..8cc4392c61f3 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -355,6 +355,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
u16 original_count;
u16 new_count = 0;
acpi_cpu_flags lock_flags;
+ char *message;
ACPI_FUNCTION_NAME(ut_update_ref_count);
@@ -391,6 +392,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
object, object->common.type,
acpi_ut_get_object_type_name(object),
new_count));
+ message = "Incremement";
break;
case REF_DECREMENT:
@@ -420,6 +422,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
if (new_count == 0) {
acpi_ut_delete_internal_obj(object);
}
+ message = "Decrement";
break;
default:
@@ -436,8 +439,8 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
*/
if (new_count > ACPI_MAX_REFERENCE_COUNT) {
ACPI_WARNING((AE_INFO,
- "Large Reference Count (0x%X) in object %p, Type=0x%.2X",
- new_count, object, object->common.type));
+ "Large Reference Count (0x%X) in object %p, Type=0x%.2X Operation=%s",
+ new_count, object, object->common.type, message));
}
}
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 5a64ddaed8a3..e47430272692 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
switch (lookup_status) {
case AE_ALREADY_EXISTS:
- acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
message = "Failure creating";
break;
case AE_NOT_FOUND:
- acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
message = "Could not resolve";
break;
default:
- acpi_os_printf("\n" ACPI_MSG_ERROR);
+ acpi_os_printf(ACPI_MSG_ERROR);
message = "Failure resolving";
break;
}
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
index 954f8e3e35cd..05ff20049b87 100644
--- a/drivers/acpi/acpica/utstrsuppt.c
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -231,14 +231,34 @@ char acpi_ut_remove_whitespace(char **string)
u8 acpi_ut_detect_hex_prefix(char **string)
{
+ char *initial_position = *string;
+ acpi_ut_remove_hex_prefix(string);
+ if (*string != initial_position) {
+ return (TRUE); /* String is past leading 0x */
+ }
+
+ return (FALSE); /* Not a hex string */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_hex_prefix
+ *
+ * PARAMETERS: string - Pointer to input ASCII string
+ *
+ * RETURN: none
+ *
+ * DESCRIPTION: Remove a hex "0x" prefix
+ *
+ ******************************************************************************/
+
+void acpi_ut_remove_hex_prefix(char **string)
+{
if ((**string == ACPI_ASCII_ZERO) &&
(tolower((int)*(*string + 1)) == 'x')) {
*string += 2; /* Go past the leading 0x */
- return (TRUE);
}
-
- return (FALSE); /* Not a hex string */
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c
index 8fadad242db6..5fde619a8bbd 100644
--- a/drivers/acpi/acpica/utstrtoul64.c
+++ b/drivers/acpi/acpica/utstrtoul64.c
@@ -218,7 +218,7 @@ u64 acpi_ut_implicit_strtoul64(char *string)
* implicit conversions, and the "0x" prefix is "not allowed".
* However, allow a "0x" prefix as an ACPI extension.
*/
- acpi_ut_detect_hex_prefix(&string);
+ acpi_ut_remove_hex_prefix(&string);
if (!acpi_ut_remove_leading_zeros(&string)) {
return_VALUE(0);
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 7a3a541046ed..08f26db2da7e 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -947,6 +947,24 @@ static int nc_dma_get_range(struct device *dev, u64 *size)
return 0;
}
+static int rc_dma_get_range(struct device *dev, u64 *size)
+{
+ struct acpi_iort_node *node;
+ struct acpi_iort_root_complex *rc;
+
+ node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
+ iort_match_node_callback, dev);
+ if (!node || node->revision < 1)
+ return -ENODEV;
+
+ rc = (struct acpi_iort_root_complex *)node->node_data;
+
+ *size = rc->memory_address_limit >= 64 ? U64_MAX :
+ 1ULL<<rc->memory_address_limit;
+
+ return 0;
+}
+
/**
* iort_dma_setup() - Set-up device DMA parameters.
*
@@ -960,25 +978,28 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
int ret, msb;
/*
- * Set default coherent_dma_mask to 32 bit. Drivers are expected to
- * setup the correct supported mask.
+ * If @dev is expected to be DMA-capable then the bus code that created
+ * it should have initialised its dma_mask pointer by this point. For
+ * now, we'll continue the legacy behaviour of coercing it to the
+ * coherent mask if not, but we'll no longer do so quietly.
*/
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
-
- /*
- * Set it to coherent_dma_mask by default if the architecture
- * code has not set it.
- */
- if (!dev->dma_mask)
+ if (!dev->dma_mask) {
+ dev_warn(dev, "DMA mask not set\n");
dev->dma_mask = &dev->coherent_dma_mask;
+ }
- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+ if (dev->coherent_dma_mask)
+ size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+ else
+ size = 1ULL << 32;
- if (dev_is_pci(dev))
+ if (dev_is_pci(dev)) {
ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
- else
+ if (ret == -ENODEV)
+ ret = rc_dma_get_range(dev, &size);
+ } else {
ret = nc_dma_get_range(dev, &size);
+ }
if (!ret) {
msb = fls64(dmaaddr + size - 1);
@@ -993,6 +1014,7 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
* Limit coherent and dma mask based on size
* retrieved from firmware.
*/
+ dev->bus_dma_mask = mask;
dev->coherent_dma_mask = mask;
*dev->dma_mask = mask;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b0113a5802a3..cb97b6105f52 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -23,18 +23,18 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/async.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/jiffies.h>
-#include <linux/async.h>
-#include <linux/dmi.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <linux/types.h>
+
#include <asm/unaligned.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
@@ -364,6 +364,20 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
+static enum power_supply_property energy_battery_full_cap_broken_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
/* --------------------------------------------------------------------------
Battery Management
-------------------------------------------------------------------------- */
@@ -577,8 +591,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
(s16)(battery->rate_now) < 0) {
battery->rate_now = abs((s16)battery->rate_now);
- printk_once(KERN_WARNING FW_BUG
- "battery: (dis)charge rate invalid.\n");
+ pr_warn_once(FW_BUG "battery: (dis)charge rate invalid.\n");
}
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
@@ -717,10 +730,11 @@ void battery_hook_register(struct acpi_battery_hook *hook)
*/
pr_err("extension failed to load: %s", hook->name);
__battery_hook_unregister(hook, 0);
- return;
+ goto end;
}
}
pr_info("new extension: %s\n", hook->name);
+end:
mutex_unlock(&hook_mutex);
}
EXPORT_SYMBOL_GPL(battery_hook_register);
@@ -732,7 +746,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register);
*/
static void battery_hook_add_battery(struct acpi_battery *battery)
{
- struct acpi_battery_hook *hook_node;
+ struct acpi_battery_hook *hook_node, *tmp;
mutex_lock(&hook_mutex);
INIT_LIST_HEAD(&battery->list);
@@ -744,15 +758,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
* when a battery gets hotplugged or initialized
* during the battery module initialization.
*/
- list_for_each_entry(hook_node, &battery_hook_list, list) {
+ list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
if (hook_node->add_battery(battery->bat)) {
/*
* The notification of the extensions has failed, to
* prevent further errors we will unload the extension.
*/
- __battery_hook_unregister(hook_node, 0);
pr_err("error in extension, unloading: %s",
hook_node->name);
+ __battery_hook_unregister(hook_node, 0);
}
}
mutex_unlock(&hook_mutex);
@@ -798,6 +812,11 @@ static int sysfs_add_battery(struct acpi_battery *battery)
battery->bat_desc.properties = charge_battery_props;
battery->bat_desc.num_properties =
ARRAY_SIZE(charge_battery_props);
+ } else if (battery->full_charge_capacity == 0) {
+ battery->bat_desc.properties =
+ energy_battery_full_cap_broken_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(energy_battery_full_cap_broken_props);
} else {
battery->bat_desc.properties = energy_battery_props;
battery->bat_desc.num_properties =
@@ -917,10 +936,11 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
static int acpi_battery_update(struct acpi_battery *battery, bool resume)
{
- int result, old_present = acpi_battery_present(battery);
- result = acpi_battery_get_status(battery);
+ int result = acpi_battery_get_status(battery);
+
if (result)
return result;
+
if (!acpi_battery_present(battery)) {
sysfs_remove_battery(battery);
battery->update_time = 0;
@@ -930,8 +950,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
if (resume)
return 0;
- if (!battery->update_time ||
- old_present != acpi_battery_present(battery)) {
+ if (!battery->update_time) {
result = acpi_battery_get_info(battery);
if (result)
return result;
@@ -1020,7 +1039,7 @@ static int acpi_battery_info_proc_show(struct seq_file *seq, void *offset)
acpi_battery_units(battery));
seq_printf(seq, "battery technology: %srechargeable\n",
- (!battery->technology)?"non-":"");
+ battery->technology ? "" : "non-");
if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "design voltage: unknown\n");
@@ -1111,11 +1130,12 @@ static int acpi_battery_alarm_proc_show(struct seq_file *seq, void *offset)
goto end;
}
seq_printf(seq, "alarm: ");
- if (!battery->alarm)
- seq_printf(seq, "unsupported\n");
- else
+ if (battery->alarm) {
seq_printf(seq, "%u %sh\n", battery->alarm,
acpi_battery_units(battery));
+ } else {
+ seq_printf(seq, "unsupported\n");
+ }
end:
if (result)
seq_printf(seq, "ERROR: Unable to read battery alarm\n");
@@ -1148,9 +1168,9 @@ static ssize_t acpi_battery_write_alarm(struct file *file,
}
result = acpi_battery_set_alarm(battery);
end:
- if (!result)
- return count;
- return result;
+ if (result)
+ return result;
+ return count;
}
static int acpi_battery_alarm_proc_open(struct inode *inode, struct file *file)
@@ -1169,8 +1189,7 @@ static const struct file_operations acpi_battery_alarm_fops = {
static int acpi_battery_add_fs(struct acpi_device *device)
{
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+ pr_warning(PREFIX "Deprecated procfs I/F for battery is loaded, please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
if (!acpi_device_dir(device)) {
acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
acpi_battery_dir);
@@ -1246,7 +1265,9 @@ static int battery_notify(struct notifier_block *nb,
if (!acpi_battery_present(battery))
return 0;
- if (!battery->bat) {
+ if (battery->bat) {
+ acpi_battery_refresh(battery);
+ } else {
result = acpi_battery_get_info(battery);
if (result)
return result;
@@ -1254,8 +1275,7 @@ static int battery_notify(struct notifier_block *nb,
result = sysfs_add_battery(battery);
if (result)
return result;
- } else
- acpi_battery_refresh(battery);
+ }
acpi_battery_init_alarm(battery);
acpi_battery_get_state(battery);
@@ -1397,7 +1417,7 @@ static int acpi_battery_add(struct acpi_device *device)
}
#endif
- printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
+ pr_info(PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
device->status.battery_present ? "present" : "absent");
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 84b4a62018eb..292088fcc624 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -66,37 +66,10 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
return 0;
}
#endif
-static int set_gbl_term_list(const struct dmi_system_id *id)
-{
- acpi_gbl_execute_tables_as_methods = 1;
- return 0;
-}
-static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
- /*
- * Touchpad on Dell XPS 9570/Precision M5530 doesn't work under I2C
- * mode.
- * https://bugzilla.kernel.org/show_bug.cgi?id=198515
- */
- {
- .callback = set_gbl_term_list,
- .ident = "Dell Precision M5530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision M5530"),
- },
- },
- {
- .callback = set_gbl_term_list,
- .ident = "Dell XPS 15 9570",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"),
- },
- },
+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
/*
* Invoke DSDT corruption work-around on all Toshiba Satellite.
- * DSDT will be copied to memory.
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
*/
{
@@ -110,7 +83,7 @@ static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
{}
};
#else
-static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
{}
};
#endif
@@ -962,7 +935,7 @@ static int acpi_device_probe(struct device *dev)
return 0;
}
-static int acpi_device_remove(struct device * dev)
+static int acpi_device_remove(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = acpi_dev->driver;
@@ -1060,8 +1033,11 @@ void __init acpi_early_init(void)
acpi_permanent_mmap = true;
- /* Check machine-specific quirks */
- dmi_check_system(acpi_quirks_dmi_table);
+ /*
+ * If the machine falls into the DMI check table,
+ * DSDT will be copied to memory
+ */
+ dmi_check_system(dsdt_dmi_table);
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 2345a5ee2dbb..a19ff3977ac4 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) "ACPI: button: " fmt
+#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -235,9 +236,6 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
button->last_time = ktime_get();
}
- if (state)
- acpi_pm_wakeup_event(&device->dev);
-
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
if (ret == NOTIFY_DONE)
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
@@ -252,7 +250,8 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
return ret;
}
-static int acpi_button_state_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused acpi_button_state_seq_show(struct seq_file *seq,
+ void *offset)
{
struct acpi_device *device = seq->private;
int state;
@@ -366,7 +365,8 @@ int acpi_lid_open(void)
}
EXPORT_SYMBOL(acpi_lid_open);
-static int acpi_lid_update_state(struct acpi_device *device)
+static int acpi_lid_update_state(struct acpi_device *device,
+ bool signal_wakeup)
{
int state;
@@ -374,6 +374,9 @@ static int acpi_lid_update_state(struct acpi_device *device)
if (state < 0)
return state;
+ if (state && signal_wakeup)
+ acpi_pm_wakeup_event(&device->dev);
+
return acpi_lid_notify_state(device, state);
}
@@ -384,7 +387,7 @@ static void acpi_lid_initialize_state(struct acpi_device *device)
(void)acpi_lid_notify_state(device, 1);
break;
case ACPI_BUTTON_LID_INIT_METHOD:
- (void)acpi_lid_update_state(device);
+ (void)acpi_lid_update_state(device, false);
break;
case ACPI_BUTTON_LID_INIT_IGNORE:
default:
@@ -409,7 +412,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
users = button->input->users;
mutex_unlock(&button->input->mutex);
if (users)
- acpi_lid_update_state(device);
+ acpi_lid_update_state(device, true);
} else {
int keycode;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 442a9e24f439..d4e5610e09c5 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2042,7 +2042,21 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
.ident = "Thinkpad X1 Carbon 6th",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
+ },
+ },
+ {
+ .ident = "ThinkPad X1 Carbon 6th",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"),
+ },
+ },
+ {
+ .ident = "ThinkPad X1 Yoga 3rd",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
},
},
{ },
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 31f6325ab3b5..b072cfc5f20e 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -408,7 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
const guid_t *guid;
int rc, i;
- *cmd_rc = -EINVAL;
+ if (cmd_rc)
+ *cmd_rc = -EINVAL;
func = cmd;
if (cmd == ND_CMD_CALL) {
call_pkg = buf;
@@ -519,7 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
* If we return an error (like elsewhere) then caller wouldn't
* be able to rely upon data returned to make calculation.
*/
- *cmd_rc = 0;
+ if (cmd_rc)
+ *cmd_rc = 0;
return 0;
}
@@ -1275,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev,
mutex_lock(&acpi_desc->init_mutex);
rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
- work_busy(&acpi_desc->dwork.work)
+ acpi_desc->scrub_busy
&& !acpi_desc->cancel ? "+\n" : "\n");
mutex_unlock(&acpi_desc->init_mutex);
}
@@ -2956,6 +2958,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
return 0;
}
+static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
+{
+ lockdep_assert_held(&acpi_desc->init_mutex);
+
+ acpi_desc->scrub_busy = 1;
+ /* note this should only be set from within the workqueue */
+ if (tmo)
+ acpi_desc->scrub_tmo = tmo;
+ queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
+}
+
+static void sched_ars(struct acpi_nfit_desc *acpi_desc)
+{
+ __sched_ars(acpi_desc, 0);
+}
+
+static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
+{
+ lockdep_assert_held(&acpi_desc->init_mutex);
+
+ acpi_desc->scrub_busy = 0;
+ acpi_desc->scrub_count++;
+ if (acpi_desc->scrub_count_state)
+ sysfs_notify_dirent(acpi_desc->scrub_count_state);
+}
+
static void acpi_nfit_scrub(struct work_struct *work)
{
struct acpi_nfit_desc *acpi_desc;
@@ -2966,14 +2994,10 @@ static void acpi_nfit_scrub(struct work_struct *work)
mutex_lock(&acpi_desc->init_mutex);
query_rc = acpi_nfit_query_poison(acpi_desc);
tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
- if (tmo) {
- queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
- acpi_desc->scrub_tmo = tmo;
- } else {
- acpi_desc->scrub_count++;
- if (acpi_desc->scrub_count_state)
- sysfs_notify_dirent(acpi_desc->scrub_count_state);
- }
+ if (tmo)
+ __sched_ars(acpi_desc, tmo);
+ else
+ notify_ars_done(acpi_desc);
memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
mutex_unlock(&acpi_desc->init_mutex);
}
@@ -3054,7 +3078,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
break;
}
- queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
+ sched_ars(acpi_desc);
return 0;
}
@@ -3257,7 +3281,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
}
}
if (scheduled) {
- queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
+ sched_ars(acpi_desc);
dev_dbg(dev, "ars_scan triggered\n");
}
mutex_unlock(&acpi_desc->init_mutex);
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 29466961426e..d1274ea2d251 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -204,6 +204,7 @@ struct acpi_nfit_desc {
unsigned int max_ars;
unsigned int scrub_count;
unsigned int scrub_mode;
+ unsigned int scrub_busy:1;
unsigned int cancel:1;
unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en;
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index 8a8f43568510..b2a16ed7e81a 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -66,6 +66,14 @@ osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
* be removed if both new and old graphics cards are supported.
*/
{"Linux-Dell-Video", true},
+ /*
+ * Linux-Lenovo-NV-HDMI-Audio is used by BIOS to power on NVidia's HDMI
+ * audio device which is turned off for power-saving in Windows OS.
+ * This power management feature observed on some Lenovo Thinkpad
+ * systems which will not be able to output audio via HDMI without
+ * a BIOS workaround.
+ */
+ {"Linux-Lenovo-NV-HDMI-Audio", true},
};
static u32 acpi_osi_handler(acpi_string interface, u32 supported)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7ca41bf023c9..8df9abfa947b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -45,6 +45,8 @@
#include <linux/uaccess.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include "acpica/accommon.h"
+#include "acpica/acnamesp.h"
#include "internal.h"
#define _COMPONENT ACPI_OS_SERVICES
@@ -1490,6 +1492,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
}
EXPORT_SYMBOL(acpi_check_region);
+static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
+ void *_res, void **return_value)
+{
+ struct acpi_mem_space_context **mem_ctx;
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *region_obj2;
+ union acpi_operand_object *region_obj;
+ struct resource *res = _res;
+ acpi_status status;
+
+ region_obj = acpi_ns_get_attached_object(handle);
+ if (!region_obj)
+ return AE_OK;
+
+ handler_obj = region_obj->region.handler;
+ if (!handler_obj)
+ return AE_OK;
+
+ if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return AE_OK;
+
+ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
+ return AE_OK;
+
+ region_obj2 = acpi_ns_get_secondary_object(region_obj);
+ if (!region_obj2)
+ return AE_OK;
+
+ mem_ctx = (void *)&region_obj2->extra.region_context;
+
+ if (!(mem_ctx[0]->address >= res->start &&
+ mem_ctx[0]->address < res->end))
+ return AE_OK;
+
+ status = handler_obj->address_space.setup(region_obj,
+ ACPI_REGION_DEACTIVATE,
+ NULL, (void **)mem_ctx);
+ if (ACPI_SUCCESS(status))
+ region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
+
+ return status;
+}
+
+/**
+ * acpi_release_memory - Release any mappings done to a memory region
+ * @handle: Handle to namespace node
+ * @res: Memory resource
+ * @level: A level that terminates the search
+ *
+ * Walks through @handle and unmaps all SystemMemory Operation Regions that
+ * overlap with @res and that have already been activated (mapped).
+ *
+ * This is a helper that allows drivers to place special requirements on memory
+ * region that may overlap with operation regions, primarily allowing them to
+ * safely map the region as non-cached memory.
+ *
+ * The unmapped Operation Regions will be automatically remapped next time they
+ * are called, so the drivers do not need to do anything else.
+ */
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+ u32 level)
+{
+ if (!(res->flags & IORESOURCE_MEM))
+ return AE_TYPE;
+
+ return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
+ acpi_deactivate_mem_region, NULL, res, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_release_memory);
+
/*
* Let drivers know whether the resource checks are effective
*/
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index 7ffa74048107..22c9e374c923 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -25,16 +25,121 @@
#define PMIC_A0LOCK_REG 0xc5
static struct pmic_table power_table[] = {
+/* {
+ .address = 0x00,
+ .reg = ??,
+ .bit = ??,
+ }, ** VSYS */
+ {
+ .address = 0x04,
+ .reg = 0x63,
+ .bit = 0x00,
+ }, /* SYSX -> VSYS_SX */
+ {
+ .address = 0x08,
+ .reg = 0x62,
+ .bit = 0x00,
+ }, /* SYSU -> VSYS_U */
+ {
+ .address = 0x0c,
+ .reg = 0x64,
+ .bit = 0x00,
+ }, /* SYSS -> VSYS_S */
+ {
+ .address = 0x10,
+ .reg = 0x6a,
+ .bit = 0x00,
+ }, /* V50S -> V5P0S */
+ {
+ .address = 0x14,
+ .reg = 0x6b,
+ .bit = 0x00,
+ }, /* HOST -> VHOST, USB2/3 host */
+ {
+ .address = 0x18,
+ .reg = 0x6c,
+ .bit = 0x00,
+ }, /* VBUS -> VBUS, USB2/3 OTG */
+ {
+ .address = 0x1c,
+ .reg = 0x6d,
+ .bit = 0x00,
+ }, /* HDMI -> VHDMI */
+/* {
+ .address = 0x20,
+ .reg = ??,
+ .bit = ??,
+ }, ** S285 */
{
.address = 0x24,
.reg = 0x66,
.bit = 0x00,
- },
+ }, /* X285 -> V2P85SX, camera */
+/* {
+ .address = 0x28,
+ .reg = ??,
+ .bit = ??,
+ }, ** V33A */
+ {
+ .address = 0x2c,
+ .reg = 0x69,
+ .bit = 0x00,
+ }, /* V33S -> V3P3S, display/ssd/audio */
+ {
+ .address = 0x30,
+ .reg = 0x68,
+ .bit = 0x00,
+ }, /* V33U -> V3P3U, SDIO wifi&bt */
+/* {
+ .address = 0x34 .. 0x40,
+ .reg = ??,
+ .bit = ??,
+ }, ** V33I, V18A, REFQ, V12A */
+ {
+ .address = 0x44,
+ .reg = 0x5c,
+ .bit = 0x00,
+ }, /* V18S -> V1P8S, SOC/USB PHY/SIM */
{
.address = 0x48,
.reg = 0x5d,
.bit = 0x00,
- },
+ }, /* V18X -> V1P8SX, eMMC/camara/audio */
+ {
+ .address = 0x4c,
+ .reg = 0x5b,
+ .bit = 0x00,
+ }, /* V18U -> V1P8U, LPDDR */
+ {
+ .address = 0x50,
+ .reg = 0x61,
+ .bit = 0x00,
+ }, /* V12X -> V1P2SX, SOC SFR */
+ {
+ .address = 0x54,
+ .reg = 0x60,
+ .bit = 0x00,
+ }, /* V12S -> V1P2S, MIPI */
+/* {
+ .address = 0x58,
+ .reg = ??,
+ .bit = ??,
+ }, ** V10A */
+ {
+ .address = 0x5c,
+ .reg = 0x56,
+ .bit = 0x00,
+ }, /* V10S -> V1P0S, SOC GFX */
+ {
+ .address = 0x60,
+ .reg = 0x57,
+ .bit = 0x00,
+ }, /* V10X -> V1P0SX, SOC display/DDR IO/PCIe */
+ {
+ .address = 0x64,
+ .reg = 0x59,
+ .bit = 0x00,
+ }, /* V105 -> V1P05S, L2 SRAM */
};
static struct pmic_table thermal_table[] = {
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index e5ea1974d1e3..d1e26cb599bf 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
if (cpu_node) {
cpu_node = acpi_find_processor_package_id(table, cpu_node,
level, flag);
- /* Only the first level has a guaranteed id */
- if (level == 0)
+ /*
+ * As per specification if the processor structure represents
+ * an actual processor, then ACPI processor ID must be valid.
+ * For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID
+ * should be set if the UID is valid
+ */
+ if (level == 0 ||
+ cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
return cpu_node->acpi_processor_id;
return ACPI_PTR_DIFF(cpu_node, table);
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index b933061b6b60..8c0a54d50d0e 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -205,6 +205,7 @@ phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
return phys_id;
}
+EXPORT_SYMBOL_GPL(acpi_get_phys_id);
int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
{
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 5815356ea6ad..693cf05b0cc4 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -542,6 +542,23 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
return 0;
}
+static struct fwnode_handle *
+acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname)
+{
+ struct fwnode_handle *child;
+
+ /*
+ * Find first matching named child node of this fwnode.
+ * For ACPI this will be a data only sub-node.
+ */
+ fwnode_for_each_child_node(fwnode, child)
+ if (acpi_data_node_match(child, childname))
+ return child;
+
+ return NULL;
+}
+
/**
* __acpi_node_get_property_reference - returns handle to the referenced object
* @fwnode: Firmware node to get the property from
@@ -579,7 +596,7 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
*/
int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *propname, size_t index, size_t num_args,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
@@ -607,7 +624,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (ret)
return ret == -ENODEV ? -EINVAL : ret;
- args->adev = device;
+ args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
return 0;
}
@@ -633,6 +650,8 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
u32 nargs, i;
if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
+ struct fwnode_handle *ref_fwnode;
+
ret = acpi_bus_get_device(element->reference.handle,
&device);
if (ret)
@@ -641,6 +660,19 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
nargs = 0;
element++;
+ /*
+ * Find the referred data extension node under the
+ * referred device node.
+ */
+ for (ref_fwnode = acpi_fwnode_handle(device);
+ element < end && element->type == ACPI_TYPE_STRING;
+ element++) {
+ ref_fwnode = acpi_fwnode_get_named_child_node(
+ ref_fwnode, element->string.pointer);
+ if (!ref_fwnode)
+ return -EINVAL;
+ }
+
/* assume following integer elements are all args */
for (i = 0; element + i < end && i < num_args; i++) {
int type = element[i].type;
@@ -653,11 +685,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
}
- if (nargs > MAX_ACPI_REFERENCE_ARGS)
+ if (nargs > NR_FWNODE_REFERENCE_ARGS)
return -EINVAL;
if (idx == index) {
- args->adev = device;
+ args->fwnode = ref_fwnode;
args->nargs = nargs;
for (i = 0; i < nargs; i++)
args->args[i] = element[i].integer.value;
@@ -995,16 +1027,36 @@ struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
return NULL;
}
+/*
+ * Return true if the node is an ACPI graph node. Called on either ports
+ * or endpoints.
+ */
+static bool is_acpi_graph_node(struct fwnode_handle *fwnode,
+ const char *str)
+{
+ unsigned int len = strlen(str);
+ const char *name;
+
+ if (!len || !is_acpi_data_node(fwnode))
+ return false;
+
+ name = to_acpi_data_node(fwnode)->name;
+
+ return (fwnode_property_present(fwnode, "reg") &&
+ !strncmp(name, str, len) && name[len] == '@') ||
+ fwnode_property_present(fwnode, str);
+}
+
/**
* acpi_graph_get_next_endpoint - Get next endpoint ACPI firmware node
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
*
* Looks up next endpoint ACPI firmware node below a given @fwnode. Returns
- * %NULL if there is no next endpoint, ERR_PTR() in case of error. In case
- * of success the next endpoint is returned.
+ * %NULL if there is no next endpoint or in case of error. In case of success
+ * the next endpoint is returned.
*/
-struct fwnode_handle *acpi_graph_get_next_endpoint(
+static struct fwnode_handle *acpi_graph_get_next_endpoint(
const struct fwnode_handle *fwnode, struct fwnode_handle *prev)
{
struct fwnode_handle *port = NULL;
@@ -1013,8 +1065,14 @@ struct fwnode_handle *acpi_graph_get_next_endpoint(
if (!prev) {
do {
port = fwnode_get_next_child_node(fwnode, port);
- /* Ports must have port property */
- if (fwnode_property_present(port, "port"))
+ /*
+ * The names of the port nodes begin with "port@"
+ * followed by the number of the port node and they also
+ * have a "reg" property that also has the number of the
+ * port node. For compatibility reasons a node is also
+ * recognised as a port node from the "port" property.
+ */
+ if (is_acpi_graph_node(port, "port"))
break;
} while (port);
} else {
@@ -1029,15 +1087,19 @@ struct fwnode_handle *acpi_graph_get_next_endpoint(
port = fwnode_get_next_child_node(fwnode, port);
if (!port)
break;
- if (fwnode_property_present(port, "port"))
+ if (is_acpi_graph_node(port, "port"))
endpoint = fwnode_get_next_child_node(port, NULL);
}
- if (endpoint) {
- /* Endpoints must have "endpoint" property */
- if (!fwnode_property_present(endpoint, "endpoint"))
- return ERR_PTR(-EPROTO);
- }
+ /*
+ * The names of the endpoint nodes begin with "endpoint@" followed by
+ * the number of the endpoint node and they also have a "reg" property
+ * that also has the number of the endpoint node. For compatibility
+ * reasons a node is also recognised as an endpoint node from the
+ * "endpoint" property.
+ */
+ if (!is_acpi_graph_node(endpoint, "endpoint"))
+ return NULL;
return endpoint;
}
@@ -1074,65 +1136,42 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
/**
* acpi_graph_get_remote_enpoint - Parses and returns remote end of an endpoint
* @fwnode: Endpoint firmware node pointing to a remote device
- * @parent: Firmware node of remote port parent is filled here if not %NULL
- * @port: Firmware node of remote port is filled here if not %NULL
* @endpoint: Firmware node of remote endpoint is filled here if not %NULL
*
- * Function parses remote end of ACPI firmware remote endpoint and fills in
- * fields requested by the caller. Returns %0 in case of success and
- * negative errno otherwise.
+ * Returns the remote endpoint corresponding to @__fwnode. NULL on error.
*/
-int acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode,
- struct fwnode_handle **parent,
- struct fwnode_handle **port,
- struct fwnode_handle **endpoint)
+static struct fwnode_handle *
+acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode)
{
struct fwnode_handle *fwnode;
unsigned int port_nr, endpoint_nr;
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
int ret;
memset(&args, 0, sizeof(args));
ret = acpi_node_get_property_reference(__fwnode, "remote-endpoint", 0,
&args);
if (ret)
- return ret;
+ return NULL;
+
+ /* Direct endpoint reference? */
+ if (!is_acpi_device_node(args.fwnode))
+ return args.nargs ? NULL : args.fwnode;
/*
* Always require two arguments with the reference: port and
* endpoint indices.
*/
if (args.nargs != 2)
- return -EPROTO;
+ return NULL;
- fwnode = acpi_fwnode_handle(args.adev);
+ fwnode = args.fwnode;
port_nr = args.args[0];
endpoint_nr = args.args[1];
- if (parent)
- *parent = fwnode;
-
- if (!port && !endpoint)
- return 0;
-
fwnode = acpi_graph_get_child_prop_value(fwnode, "port", port_nr);
- if (!fwnode)
- return -EPROTO;
-
- if (port)
- *port = fwnode;
-
- if (!endpoint)
- return 0;
- fwnode = acpi_graph_get_child_prop_value(fwnode, "endpoint",
- endpoint_nr);
- if (!fwnode)
- return -EPROTO;
-
- *endpoint = fwnode;
-
- return 0;
+ return acpi_graph_get_child_prop_value(fwnode, "endpoint", endpoint_nr);
}
static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
@@ -1186,70 +1225,14 @@ acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
val, nval);
}
-static struct fwnode_handle *
-acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
- const char *childname)
-{
- struct fwnode_handle *child;
-
- /*
- * Find first matching named child node of this fwnode.
- * For ACPI this will be a data only sub-node.
- */
- fwnode_for_each_child_node(fwnode, child)
- if (acpi_data_node_match(child, childname))
- return child;
-
- return NULL;
-}
-
static int
acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int args_count, unsigned int index,
struct fwnode_reference_args *args)
{
- struct acpi_reference_args acpi_args;
- unsigned int i;
- int ret;
-
- ret = __acpi_node_get_property_reference(fwnode, prop, index,
- args_count, &acpi_args);
- if (ret < 0)
- return ret;
- if (!args)
- return 0;
-
- args->nargs = acpi_args.nargs;
- args->fwnode = acpi_fwnode_handle(acpi_args.adev);
-
- for (i = 0; i < NR_FWNODE_REFERENCE_ARGS; i++)
- args->args[i] = i < acpi_args.nargs ? acpi_args.args[i] : 0;
-
- return 0;
-}
-
-static struct fwnode_handle *
-acpi_fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
- struct fwnode_handle *prev)
-{
- struct fwnode_handle *endpoint;
-
- endpoint = acpi_graph_get_next_endpoint(fwnode, prev);
- if (IS_ERR(endpoint))
- return NULL;
-
- return endpoint;
-}
-
-static struct fwnode_handle *
-acpi_fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
-{
- struct fwnode_handle *endpoint = NULL;
-
- acpi_graph_get_remote_endpoint(fwnode, NULL, NULL, &endpoint);
-
- return endpoint;
+ return __acpi_node_get_property_reference(fwnode, prop, index,
+ args_count, args);
}
static struct fwnode_handle *
@@ -1265,8 +1248,10 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
endpoint->local_fwnode = fwnode;
- fwnode_property_read_u32(port_fwnode, "port", &endpoint->port);
- fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id);
+ if (fwnode_property_read_u32(port_fwnode, "reg", &endpoint->port))
+ fwnode_property_read_u32(port_fwnode, "port", &endpoint->port);
+ if (fwnode_property_read_u32(fwnode, "reg", &endpoint->id))
+ fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id);
return 0;
}
@@ -1292,9 +1277,9 @@ acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
.get_named_child_node = acpi_fwnode_get_named_child_node, \
.get_reference_args = acpi_fwnode_get_reference_args, \
.graph_get_next_endpoint = \
- acpi_fwnode_graph_get_next_endpoint, \
+ acpi_graph_get_next_endpoint, \
.graph_get_remote_endpoint = \
- acpi_fwnode_graph_get_remote_endpoint, \
+ acpi_graph_get_remote_endpoint, \
.graph_get_port_parent = acpi_fwnode_get_parent, \
.graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint, \
}; \
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 970dd87d347c..e1b6231cfa1c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1528,7 +1528,7 @@ static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
static bool acpi_is_indirect_io_slave(struct acpi_device *device)
{
struct acpi_device *parent = device->parent;
- const struct acpi_device_id indirect_io_hosts[] = {
+ static const struct acpi_device_id indirect_io_hosts[] = {
{"HISI0191", 0},
{}
};
@@ -1540,6 +1540,18 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{
struct list_head resource_list;
bool is_serial_bus_slave = false;
+ /*
+ * These devices have multiple I2cSerialBus resources and an i2c-client
+ * must be instantiated for each, each with its own i2c_device_id.
+ * Normally we only instantiate an i2c-client for the first resource,
+ * using the ACPI HID as id. These special cases are handled by the
+ * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows
+ * which i2c_device_id to use for each resource.
+ */
+ static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
+ {"BSG1160", },
+ {}
+ };
if (acpi_is_indirect_io_slave(device))
return true;
@@ -1551,6 +1563,10 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
fwnode_property_present(&device->fwnode, "baud")))
return true;
+ /* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */
+ if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids))
+ return false;
+
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list,
acpi_check_serial_bus_slave,
@@ -1612,7 +1628,8 @@ static int acpi_add_single_object(struct acpi_device **child,
* Note this must be done before the get power-/wakeup_dev-flags calls.
*/
if (type == ACPI_BUS_TYPE_DEVICE)
- acpi_bus_get_status(device);
+ if (acpi_bus_get_status(device) < 0)
+ acpi_set_device_status(device, 0);
acpi_bus_get_power_flags(device);
acpi_bus_get_wakeup_device_flags(device);
@@ -1690,7 +1707,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
* acpi_add_single_object updates this once we've an acpi_device
* so that acpi_bus_get_status' quirk handling can be used.
*/
- *sta = 0;
+ *sta = ACPI_STA_DEFAULT;
break;
case ACPI_TYPE_PROCESSOR:
*type = ACPI_BUS_TYPE_PROCESSOR;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 5d0486f1cfcd..754d59f95500 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -338,6 +338,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
},
},
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Asus 1025C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
+ },
+ },
/*
* https://bugzilla.kernel.org/show_bug.cgi?id=189431
* Lenovo G50-45 is a platform later than 2012, but needs nvs memory
@@ -718,9 +726,6 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_ENTRY 5
#define ACPI_LPS0_EXIT 6
-#define ACPI_LPS0_SCREEN_MASK ((1 << ACPI_LPS0_SCREEN_OFF) | (1 << ACPI_LPS0_SCREEN_ON))
-#define ACPI_LPS0_PLATFORM_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
-
static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
static char lps0_dsm_func_mask;
@@ -924,17 +929,14 @@ static int lps0_device_attach(struct acpi_device *adev,
if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
char bitmask = *(char *)out_obj->buffer.pointer;
- if ((bitmask & ACPI_LPS0_PLATFORM_MASK) == ACPI_LPS0_PLATFORM_MASK ||
- (bitmask & ACPI_LPS0_SCREEN_MASK) == ACPI_LPS0_SCREEN_MASK) {
- lps0_dsm_func_mask = bitmask;
- lps0_device_handle = adev->handle;
- /*
- * Use suspend-to-idle by default if the default
- * suspend mode was not set from the command line.
- */
- if (mem_sleep_default > PM_SUSPEND_MEM)
- mem_sleep_current = PM_SUSPEND_TO_IDLE;
- }
+ lps0_dsm_func_mask = bitmask;
+ lps0_device_handle = adev->handle;
+ /*
+ * Use suspend-to-idle by default if the default
+ * suspend mode was not set from the command line.
+ */
+ if (mem_sleep_default > PM_SUSPEND_MEM)
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
bitmask);
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index ec5b0f190231..06c31ec3cc70 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -62,14 +62,20 @@ static const struct always_present_id always_present_ids[] = {
*/
ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
/*
- * On the Dell Venue 11 Pro 7130 the DSDT hides the touchscreen ACPI
- * device until a certain time after _SB.PCI0.GFX0.LCD.LCD1._ON gets
- * called has passed *and* _STA has been called at least 3 times since.
+ * On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides
+ * the touchscreen ACPI device until a certain time
+ * after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed
+ * *and* _STA has been called at least 3 times since.
*/
ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
}),
+ ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"),
+ }),
+
/*
* The GPD win BIOS dated 20170221 has disabled the accelerometer, the
* drivers sometimes cause crashes under Windows and this is how the
@@ -103,13 +109,9 @@ static const struct always_present_id always_present_ids[] = {
bool acpi_device_always_present(struct acpi_device *adev)
{
- u32 *status = (u32 *)&adev->status;
- u32 old_status = *status;
bool ret = false;
unsigned int i;
- /* acpi_match_device_ids checks status, so set it to default */
- *status = ACPI_STA_DEFAULT;
for (i = 0; i < ARRAY_SIZE(always_present_ids); i++) {
if (acpi_match_device_ids(adev, always_present_ids[i].hid))
continue;
@@ -125,15 +127,9 @@ bool acpi_device_always_present(struct acpi_device *adev)
!dmi_check_system(always_present_ids[i].dmi_ids))
continue;
- if (old_status != ACPI_STA_DEFAULT) /* Log only once */
- dev_info(&adev->dev,
- "Device [%s] is in always present list\n",
- adev->pnp.bus_id);
-
ret = true;
break;
}
- *status = old_status;
return ret;
}
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index ee4880bfdcdc..432e9ad77070 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -10,7 +10,7 @@ if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
- depends on MMU && !M68K
+ depends on MMU
default n
---help---
Binder is used in Android for both communication between processes,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 95283f3bb51c..d58763b6b009 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -51,7 +51,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/cacheflush.h>
#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/freezer.h>
@@ -71,8 +70,12 @@
#include <linux/pid_namespace.h>
#include <linux/security.h>
#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
#include <uapi/linux/android/binder.h>
+
+#include <asm/cacheflush.h>
+
#include "binder_alloc.h"
#include "binder_trace.h"
@@ -161,13 +164,13 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
#define binder_debug(mask, x...) \
do { \
if (binder_debug_mask & mask) \
- pr_info(x); \
+ pr_info_ratelimited(x); \
} while (0)
#define binder_user_error(x...) \
do { \
if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
- pr_info(x); \
+ pr_info_ratelimited(x); \
if (binder_stop_on_user_error) \
binder_stop_on_user_error = 2; \
} while (0)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2628806c64a2..3f3b7b253445 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -17,7 +17,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/cacheflush.h>
#include <linux/list.h>
#include <linux/sched/mm.h>
#include <linux/module.h>
@@ -28,6 +27,8 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/list_lru.h>
+#include <linux/ratelimit.h>
+#include <asm/cacheflush.h>
#include "binder_alloc.h"
#include "binder_trace.h"
@@ -36,11 +37,12 @@ struct list_lru binder_alloc_lru;
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
+ BINDER_DEBUG_USER_ERROR = 1U << 0,
BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
};
-static uint32_t binder_alloc_debug_mask;
+static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
module_param_named(debug_mask, binder_alloc_debug_mask,
uint, 0644);
@@ -48,7 +50,7 @@ module_param_named(debug_mask, binder_alloc_debug_mask,
#define binder_alloc_debug(mask, x...) \
do { \
if (binder_alloc_debug_mask & mask) \
- pr_info(x); \
+ pr_info_ratelimited(x); \
} while (0)
static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
@@ -152,8 +154,10 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
* free the buffer twice
*/
if (buffer->free_in_progress) {
- pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
- alloc->pid, current->pid, (u64)user_ptr);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+ alloc->pid, current->pid,
+ (u64)user_ptr);
return NULL;
}
buffer->free_in_progress = 1;
@@ -224,8 +228,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
if (!vma && need_mm) {
- pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
- alloc->pid);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+ alloc->pid);
goto err_no_vma;
}
@@ -344,8 +349,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
int ret;
if (alloc->vma == NULL) {
- pr_err("%d: binder_alloc_buf, no vma\n",
- alloc->pid);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
return ERR_PTR(-ESRCH);
}
@@ -417,11 +423,14 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
if (buffer_size > largest_free_size)
largest_free_size = buffer_size;
}
- pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
- alloc->pid, size);
- pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
- total_alloc_size, allocated_buffers, largest_alloc_size,
- total_free_size, free_buffers, largest_free_size);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: binder_alloc_buf size %zd failed, no address space\n",
+ alloc->pid, size);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+ total_alloc_size, allocated_buffers,
+ largest_alloc_size, total_free_size,
+ free_buffers, largest_free_size);
return ERR_PTR(-ENOSPC);
}
if (n == NULL) {
@@ -731,8 +740,10 @@ err_alloc_pages_failed:
err_get_vm_area_failed:
err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock);
- pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
- alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%s: %d %lx-%lx %s failed %d\n", __func__,
+ alloc->pid, vma->vm_start, vma->vm_end,
+ failure_string, ret);
return ret;
}
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 76e3b9c8a8a2..588eb3ec3507 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -248,14 +248,17 @@ DECLARE_EVENT_CLASS(binder_buffer_class,
__field(int, debug_id)
__field(size_t, data_size)
__field(size_t, offsets_size)
+ __field(size_t, extra_buffers_size)
),
TP_fast_assign(
__entry->debug_id = buf->debug_id;
__entry->data_size = buf->data_size;
__entry->offsets_size = buf->offsets_size;
+ __entry->extra_buffers_size = buf->extra_buffers_size;
),
- TP_printk("transaction=%d data_size=%zd offsets_size=%zd",
- __entry->debug_id, __entry->data_size, __entry->offsets_size)
+ TP_printk("transaction=%d data_size=%zd offsets_size=%zd extra_buffers_size=%zd",
+ __entry->debug_id, __entry->data_size, __entry->offsets_size,
+ __entry->extra_buffers_size)
);
DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf,
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2b16e7c8fff3..39b181d6bd0d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -398,7 +398,6 @@ config SATA_DWC_VDEBUG
config SATA_HIGHBANK
tristate "Calxeda Highbank SATA support"
- depends on HAS_DMA
depends on ARCH_HIGHBANK || COMPILE_TEST
help
This option enables support for the Calxeda Highbank SoC's
@@ -408,7 +407,6 @@ config SATA_HIGHBANK
config SATA_MV
tristate "Marvell SATA support"
- depends on HAS_DMA
depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
select GENERIC_PHY
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 738fb22978dd..021ce46e2e57 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -400,6 +400,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
{ PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
+ { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -609,7 +610,7 @@ static int marvell_enable = 1;
module_param(marvell_enable, int, 0644);
MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
-static int mobile_lpm_policy = CONFIG_SATA_MOBILE_LPM_POLICY;
+static int mobile_lpm_policy = -1;
module_param(mobile_lpm_policy, int, 0644);
MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
@@ -1280,6 +1281,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
return strcmp(buf, dmi->driver_data) < 0;
}
+static bool ahci_broken_lpm(struct pci_dev *pdev)
+{
+ static const struct dmi_system_id sysids[] = {
+ /* Various Lenovo 50 series have LPM issues with older BIOSen */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
+ },
+ .driver_data = "20180406", /* 1.31 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
+ },
+ .driver_data = "20180420", /* 1.28 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
+ },
+ .driver_data = "20180315", /* 1.33 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
+ },
+ /*
+ * Note date based on release notes, 2.35 has been
+ * reported to be good, but I've been unable to get
+ * a hold of the reporter to get the DMI BIOS date.
+ * TODO: fix this.
+ */
+ .driver_data = "20180310", /* 2.35 */
+ },
+ { } /* terminate list */
+ };
+ const struct dmi_system_id *dmi = dmi_first_match(sysids);
+ int year, month, date;
+ char buf[9];
+
+ if (!dmi)
+ return false;
+
+ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+
+ return strcmp(buf, dmi->driver_data) < 0;
+}
+
static bool ahci_broken_online(struct pci_dev *pdev)
{
#define ENCODE_BUSDEVFN(bus, slot, func) \
@@ -1550,6 +1604,37 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
}
+static void ahci_update_initial_lpm_policy(struct ata_port *ap,
+ struct ahci_host_priv *hpriv)
+{
+ int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
+
+
+ /* Ignore processing for non mobile platforms */
+ if (!(hpriv->flags & AHCI_HFLAG_IS_MOBILE))
+ return;
+
+ /* user modified policy via module param */
+ if (mobile_lpm_policy != -1) {
+ policy = mobile_lpm_policy;
+ goto update_policy;
+ }
+
+#ifdef CONFIG_ACPI
+ if (policy > ATA_LPM_MED_POWER &&
+ (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ if (hpriv->cap & HOST_CAP_PART)
+ policy = ATA_LPM_MIN_POWER_WITH_PARTIAL;
+ else if (hpriv->cap & HOST_CAP_SSC)
+ policy = ATA_LPM_MIN_POWER;
+ }
+#endif
+
+update_policy:
+ if (policy >= ATA_LPM_UNKNOWN && policy <= ATA_LPM_MIN_POWER)
+ ap->target_lpm_policy = policy;
+}
+
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int board_id = ent->driver_data;
@@ -1694,6 +1779,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
"quirky BIOS, skipping spindown on poweroff\n");
}
+ if (ahci_broken_lpm(pdev)) {
+ pi.flags |= ATA_FLAG_NO_LPM;
+ dev_warn(&pdev->dev,
+ "BIOS update required for Link Power Management support\n");
+ }
+
if (ahci_broken_suspend(pdev)) {
hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
dev_warn(&pdev->dev,
@@ -1747,10 +1838,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
- if ((hpriv->flags & AHCI_HFLAG_IS_MOBILE) &&
- mobile_lpm_policy >= ATA_LPM_UNKNOWN &&
- mobile_lpm_policy <= ATA_LPM_MIN_POWER)
- ap->target_lpm_policy = mobile_lpm_policy;
+ ahci_update_initial_lpm_policy(ap, hpriv);
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 1609ebab4e23..6a1515f0da40 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -350,6 +350,7 @@ struct ahci_host_priv {
u32 em_msg_type; /* EM message type */
bool got_runtime_pm; /* Did we do pm_runtime_get? */
struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
+ struct reset_control *rsts; /* Optional */
struct regulator **target_pwrs; /* Optional */
/*
* If platform uses PHYs. There is a 1:1 relation between the port number and
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index ea430819c80b..f3d557777d82 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -425,7 +425,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
brcm_sata_phys_enable(priv);
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
hpriv->plat_data = priv;
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 5ecc9d46cb54..dc78c98cb9f1 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -213,7 +213,7 @@ static int ceva_ahci_probe(struct platform_device *pdev)
cevapriv->ahci_pdev = pdev;
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 9b34dff64536..ebaa657f28c4 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -171,7 +171,7 @@ static int ahci_da850_probe(struct platform_device *pdev)
u32 mpy;
int rc;
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c
index fbd827c3a75c..89509c3efb01 100644
--- a/drivers/ata/ahci_dm816.c
+++ b/drivers/ata/ahci_dm816.c
@@ -148,7 +148,7 @@ static int ahci_dm816_probe(struct platform_device *pdev)
struct ahci_host_priv *hpriv;
int rc;
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 6822e2f33f7e..b00799d208f5 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -1127,7 +1127,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
return ret;
}
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index 0ae6971c2a4c..8bc1a26ffc31 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -142,7 +142,7 @@ static int mtk_ahci_probe(struct platform_device *pdev)
if (!plat)
return -ENOMEM;
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 0045dacd814b..f9cb51be38eb 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -82,7 +82,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
*
* Return: 0 on success; Error code otherwise.
*/
-int ahci_mvebu_stop_engine(struct ata_port *ap)
+static int ahci_mvebu_stop_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp, port_fbs;
@@ -158,7 +158,7 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
const struct mbus_dram_target_info *dram;
int rc;
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 99f9a895a459..46f0bd75eff7 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -43,7 +43,8 @@ static int ahci_probe(struct platform_device *pdev)
struct ahci_host_priv *hpriv;
int rc;
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev,
+ AHCI_PLATFORM_GET_RESETS);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
@@ -75,7 +76,6 @@ static const struct of_device_id ahci_of_match[] = {
{ .compatible = "generic-ahci", },
/* Keep the following compatibles for device tree compatibility */
{ .compatible = "snps,spear-ahci", },
- { .compatible = "snps,exynos5440-ahci", },
{ .compatible = "ibm,476gtr-ahci", },
{ .compatible = "snps,dwc-ahci", },
{ .compatible = "hisilicon,hisi-ahci", },
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index cfdef4d44ae9..ce59253ec158 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -250,7 +250,7 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
struct resource *res;
int rc;
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
index 1d31c0c0fc20..e57b6f92c288 100644
--- a/drivers/ata/ahci_seattle.c
+++ b/drivers/ata/ahci_seattle.c
@@ -164,7 +164,7 @@ static int ahci_seattle_probe(struct platform_device *pdev)
int rc;
struct ahci_host_priv *hpriv;
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index bc345f249555..21c5c44832ef 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -156,7 +156,7 @@ static int st_ahci_probe(struct platform_device *pdev)
if (!drv_data)
return -ENOMEM;
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
hpriv->plat_data = drv_data;
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index b26437430163..631610b72aa5 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -181,7 +181,7 @@ static int ahci_sunxi_probe(struct platform_device *pdev)
struct ahci_host_priv *hpriv;
int rc;
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index 64d848409fe2..004f2608818e 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -494,7 +494,7 @@ static int tegra_ahci_probe(struct platform_device *pdev)
int ret;
unsigned int i;
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index ad58da7c9aff..7e157e1bf65e 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -759,7 +759,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
&xgene_ahci_v2_port_info };
int rc;
- hpriv = ahci_platform_get_resources(pdev);
+ hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 965842a08743..b5f57c69c487 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
+#include <linux/nospec.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -800,6 +801,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
cmd |= PORT_CMD_ALPE;
if (policy == ATA_LPM_MIN_POWER)
cmd |= PORT_CMD_ASP;
+ else if (policy == ATA_LPM_MIN_POWER_WITH_PARTIAL)
+ cmd &= ~PORT_CMD_ASP;
/* write out new cmd value */
writel(cmd, port_mmio + PORT_CMD);
@@ -810,7 +813,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
if ((hpriv->cap2 & HOST_CAP2_SDS) &&
(hpriv->cap2 & HOST_CAP2_SADM) &&
(link->device->flags & ATA_DFLAG_DEVSLP)) {
- if (policy == ATA_LPM_MIN_POWER)
+ if (policy == ATA_LPM_MIN_POWER ||
+ policy == ATA_LPM_MIN_POWER_WITH_PARTIAL)
ahci_set_aggressive_devslp(ap, true);
else
ahci_set_aggressive_devslp(ap, false);
@@ -1146,10 +1150,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < EM_MAX_SLOTS)
+ if (pmp < EM_MAX_SLOTS) {
+ pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
emp = &pp->em_priv[pmp];
- else
+ } else {
return -EINVAL;
+ }
/* mask off the activity bits if we are in sw_activity
* mode, user should turn off sw_activity before setting
@@ -2104,7 +2110,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_device *dev = ap->link.device;
- u32 devslp, dm, dito, mdat, deto;
+ u32 devslp, dm, dito, mdat, deto, dito_conf;
int rc;
unsigned int err_mask;
@@ -2128,8 +2134,15 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
return;
}
- /* device sleep was already enabled */
- if (devslp & PORT_DEVSLP_ADSE)
+ dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
+ dito = devslp_idle_timeout / (dm + 1);
+ if (dito > 0x3ff)
+ dito = 0x3ff;
+
+ dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
+
+ /* device sleep was already enabled and same dito */
+ if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
return;
/* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
@@ -2137,11 +2150,6 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
if (rc)
return;
- dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
- dito = devslp_idle_timeout / (dm + 1);
- if (dito > 0x3ff)
- dito = 0x3ff;
-
/* Use the nominal value 10 ms if the read MDAT is zero,
* the nominal value of DETO is 20 ms.
*/
@@ -2159,6 +2167,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
deto = 20;
}
+ /* Make dito, mdat, deto bits to 0s */
+ devslp &= ~GENMASK_ULL(24, 2);
devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
(mdat << PORT_DEVSLP_MDAT_OFFSET) |
(deto << PORT_DEVSLP_DETO_OFFSET) |
@@ -2436,6 +2446,8 @@ static void ahci_port_stop(struct ata_port *ap)
* re-enabling INTx.
*/
writel(1 << ap->port_no, host_mmio + HOST_IRQ_STAT);
+
+ ahci_rpm_put_port(ap);
}
void ahci_print_info(struct ata_host *host, const char *scc_s)
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 30cc8f1a31e1..c92c10d55374 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -25,6 +25,7 @@
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/of_platform.h>
+#include <linux/reset.h>
#include "ahci.h"
static void ahci_host_stop(struct ata_host *host);
@@ -195,7 +196,8 @@ EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
* following order:
* 1) Regulator
* 2) Clocks (through ahci_platform_enable_clks)
- * 3) Phys
+ * 3) Resets
+ * 4) Phys
*
* If resource enabling fails at any point the previous enabled resources
* are disabled in reverse order.
@@ -215,12 +217,19 @@ int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
if (rc)
goto disable_regulator;
- rc = ahci_platform_enable_phys(hpriv);
+ rc = reset_control_deassert(hpriv->rsts);
if (rc)
goto disable_clks;
+ rc = ahci_platform_enable_phys(hpriv);
+ if (rc)
+ goto disable_resets;
+
return 0;
+disable_resets:
+ reset_control_assert(hpriv->rsts);
+
disable_clks:
ahci_platform_disable_clks(hpriv);
@@ -238,13 +247,16 @@ EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
* This function disables all ahci_platform managed resources in the
* following order:
* 1) Phys
- * 2) Clocks (through ahci_platform_disable_clks)
- * 3) Regulator
+ * 2) Resets
+ * 3) Clocks (through ahci_platform_disable_clks)
+ * 4) Regulator
*/
void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
{
ahci_platform_disable_phys(hpriv);
+ reset_control_assert(hpriv->rsts);
+
ahci_platform_disable_clks(hpriv);
ahci_platform_disable_regulators(hpriv);
@@ -332,6 +344,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
/**
* ahci_platform_get_resources - Get platform resources
* @pdev: platform device to get resources for
+ * @flags: bitmap representing the resource to get
*
* This function allocates an ahci_host_priv struct, and gets the following
* resources, storing a reference to them inside the returned struct:
@@ -340,18 +353,20 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
* 2) regulator for controlling the targets power (optional)
* 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
* or for non devicetree enabled platforms a single clock
- * 4) phys (optional)
+ * 4) resets, if flags has AHCI_PLATFORM_GET_RESETS (optional)
+ * 5) phys (optional)
*
* RETURNS:
* The allocated ahci_host_priv on success, otherwise an ERR_PTR value
*/
-struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
+struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
+ unsigned int flags)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct clk *clk;
struct device_node *child;
- int i, sz, enabled_ports = 0, rc = -ENOMEM, child_nodes;
+ int i, enabled_ports = 0, rc = -ENOMEM, child_nodes;
u32 mask_port_map = 0;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
@@ -393,6 +408,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
hpriv->clks[i] = clk;
}
+ if (flags & AHCI_PLATFORM_GET_RESETS) {
+ hpriv->rsts = devm_reset_control_array_get_optional_shared(dev);
+ if (IS_ERR(hpriv->rsts)) {
+ rc = PTR_ERR(hpriv->rsts);
+ goto err_out;
+ }
+ }
+
hpriv->nports = child_nodes = of_get_child_count(dev->of_node);
/*
@@ -403,14 +426,16 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
if (!child_nodes)
hpriv->nports = 1;
- sz = hpriv->nports * sizeof(*hpriv->phys);
- hpriv->phys = devm_kzalloc(dev, sz, GFP_KERNEL);
+ hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);
if (!hpriv->phys) {
rc = -ENOMEM;
goto err_out;
}
- sz = hpriv->nports * sizeof(*hpriv->target_pwrs);
- hpriv->target_pwrs = kzalloc(sz, GFP_KERNEL);
+ /*
+ * We cannot use devm_ here, since ahci_platform_put_resources() uses
+ * target_pwrs after devm_ have freed memory
+ */
+ hpriv->target_pwrs = kcalloc(hpriv->nports, sizeof(*hpriv->target_pwrs), GFP_KERNEL);
if (!hpriv->target_pwrs) {
rc = -ENOMEM;
goto err_out;
@@ -605,7 +630,7 @@ static void ahci_host_stop(struct ata_host *host)
/**
* ahci_platform_shutdown - Disable interrupts and stop DMA for host ports
- * @dev: platform device pointer for the host
+ * @pdev: platform device pointer for the host
*
* This function is called during system shutdown and performs the minimal
* deconfiguration required to ensure that an ahci_platform host cannot
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 27d15ed7fa3d..172e32840256 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2493,6 +2493,9 @@ int ata_dev_configure(struct ata_device *dev)
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
dev->horkage |= ATA_HORKAGE_NOLPM;
+ if (ap->flags & ATA_FLAG_NO_LPM)
+ dev->horkage |= ATA_HORKAGE_NOLPM;
+
if (dev->horkage & ATA_HORKAGE_NOLPM) {
ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
@@ -3967,6 +3970,7 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
scontrol |= (0x6 << 8);
break;
case ATA_LPM_MED_POWER_WITH_DIPM:
+ case ATA_LPM_MIN_POWER_WITH_PARTIAL:
case ATA_LPM_MIN_POWER:
if (ata_link_nr_enabled(link) > 0)
/* no restrictions on LPM transitions */
@@ -5063,7 +5067,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
if (n_elem < 1)
return -1;
- DPRINTK("%d sg elements mapped\n", n_elem);
+ VPRINTK("%d sg elements mapped\n", n_elem);
qc->orig_n_elem = qc->n_elem;
qc->n_elem = n_elem;
qc->flags |= ATA_QCFLAG_DMAMAP;
@@ -6421,6 +6425,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
host->n_tags = ATA_MAX_QUEUE;
host->dev = dev;
host->ops = ops;
+ kref_init(&host->kref);
}
void __ata_port_probe(struct ata_port *ap)
@@ -7388,3 +7393,5 @@ EXPORT_SYMBOL_GPL(ata_cable_80wire);
EXPORT_SYMBOL_GPL(ata_cable_unknown);
EXPORT_SYMBOL_GPL(ata_cable_ignore);
EXPORT_SYMBOL_GPL(ata_cable_sata);
+EXPORT_SYMBOL_GPL(ata_host_get);
+EXPORT_SYMBOL_GPL(ata_host_put); \ No newline at end of file
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index d5412145d76d..01306c018398 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -614,8 +614,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
struct ata_queued_cmd *qc;
- for (i = 0; i < ATA_MAX_QUEUE; i++) {
- qc = __ata_qc_from_tag(ap, i);
+ ata_qc_for_each_raw(ap, qc, i) {
if (qc->flags & ATA_QCFLAG_ACTIVE &&
qc->scsicmd == scmd)
break;
@@ -818,14 +817,13 @@ EXPORT_SYMBOL_GPL(ata_port_wait_eh);
static int ata_eh_nr_in_flight(struct ata_port *ap)
{
+ struct ata_queued_cmd *qc;
unsigned int tag;
int nr = 0;
/* count only non-internal commands */
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- if (ata_tag_internal(tag))
- continue;
- if (ata_qc_from_tag(ap, tag))
+ ata_qc_for_each(ap, qc, tag) {
+ if (qc)
nr++;
}
@@ -847,13 +845,13 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
goto out_unlock;
if (cnt == ap->fastdrain_cnt) {
+ struct ata_queued_cmd *qc;
unsigned int tag;
/* No progress during the last interval, tag all
* in-flight qcs as timed out and freeze the port.
*/
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
+ ata_qc_for_each(ap, qc, tag) {
if (qc)
qc->err_mask |= AC_ERR_TIMEOUT;
}
@@ -999,6 +997,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
{
+ struct ata_queued_cmd *qc;
int tag, nr_aborted = 0;
WARN_ON(!ap->ops->error_handler);
@@ -1007,9 +1006,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
ata_eh_set_pending(ap, 0);
/* include internal tag in iteration */
- for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_with_internal(ap, qc, tag) {
if (qc && (!link || qc->dev->link == link)) {
qc->flags |= ATA_QCFLAG_FAILED;
ata_qc_complete(qc);
@@ -1712,9 +1709,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
return;
/* has LLDD analyzed already? */
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- qc = __ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_FAILED))
continue;
@@ -2136,6 +2131,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_queued_cmd *qc;
struct ata_device *dev;
unsigned int all_err_mask = 0, eflags = 0;
int tag, nr_failed = 0, nr_quiet = 0;
@@ -2168,9 +2164,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
all_err_mask |= ehc->i.err_mask;
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_FAILED) ||
ata_dev_phys_link(qc->dev) != link)
continue;
@@ -2436,6 +2430,7 @@ static void ata_eh_link_report(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_queued_cmd *qc;
const char *frozen, *desc;
char tries_buf[6] = "";
int tag, nr_failed = 0;
@@ -2447,9 +2442,7 @@ static void ata_eh_link_report(struct ata_link *link)
if (ehc->i.desc[0] != '\0')
desc = ehc->i.desc;
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_FAILED) ||
ata_dev_phys_link(qc->dev) != link ||
((qc->flags & ATA_QCFLAG_QUIET) &&
@@ -2511,8 +2504,7 @@ static void ata_eh_link_report(struct ata_link *link)
ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
#endif
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+ ata_qc_for_each_raw(ap, qc, tag) {
struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
char data_buf[20] = "";
char cdb_buf[70] = "";
@@ -3992,12 +3984,11 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
*/
void ata_eh_finish(struct ata_port *ap)
{
+ struct ata_queued_cmd *qc;
int tag;
/* retry or finish qcs */
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_FAILED))
continue;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 6a91d04351d9..1984fc78c750 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -110,6 +110,7 @@ static const char *ata_lpm_policy_names[] = {
[ATA_LPM_MAX_POWER] = "max_performance",
[ATA_LPM_MED_POWER] = "medium_power",
[ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm",
+ [ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial",
[ATA_LPM_MIN_POWER] = "min_power",
};
@@ -597,8 +598,9 @@ static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
{
int rc = 0;
+ u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
u8 scsi_cmd[MAX_COMMAND_SIZE];
- u8 args[4], *argbuf = NULL, *sensebuf = NULL;
+ u8 args[4], *argbuf = NULL;
int argsize = 0;
enum dma_data_direction data_dir;
struct scsi_sense_hdr sshdr;
@@ -610,10 +612,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
if (copy_from_user(args, arg, sizeof(args)))
return -EFAULT;
- sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
- if (!sensebuf)
- return -ENOMEM;
-
+ memset(sensebuf, 0, sizeof(sensebuf));
memset(scsi_cmd, 0, sizeof(scsi_cmd));
if (args[3]) {
@@ -685,7 +684,6 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
&& copy_to_user(arg + sizeof(args), argbuf, argsize))
rc = -EFAULT;
error:
- kfree(sensebuf);
kfree(argbuf);
return rc;
}
@@ -704,8 +702,9 @@ error:
int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
{
int rc = 0;
+ u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
u8 scsi_cmd[MAX_COMMAND_SIZE];
- u8 args[7], *sensebuf = NULL;
+ u8 args[7];
struct scsi_sense_hdr sshdr;
int cmd_result;
@@ -715,10 +714,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
if (copy_from_user(args, arg, sizeof(args)))
return -EFAULT;
- sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
- if (!sensebuf)
- return -ENOMEM;
-
+ memset(sensebuf, 0, sizeof(sensebuf));
memset(scsi_cmd, 0, sizeof(scsi_cmd));
scsi_cmd[0] = ATA_16;
scsi_cmd[1] = (3 << 1); /* Non-data */
@@ -769,7 +765,6 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
}
error:
- kfree(sensebuf);
return rc;
}
@@ -3805,10 +3800,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
*/
goto invalid_param_len;
}
- if (block > dev->n_sectors)
- goto out_of_range;
all = cdb[14] & 0x1;
+ if (all) {
+ /*
+ * Ignore the block address (zone ID) as defined by ZBC.
+ */
+ block = 0;
+ } else if (block >= dev->n_sectors) {
+ /*
+ * Block must be a valid zone ID (a zone start LBA).
+ */
+ fp = 2;
+ goto invalid_fld;
+ }
if (ata_ncq_enabled(qc->dev) &&
ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
@@ -3837,10 +3842,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
invalid_fld:
ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
return 1;
- out_of_range:
- /* "Logical Block Address out of range" */
- ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
- return 1;
invalid_param_len:
/* "Parameter list length error" */
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
@@ -4288,10 +4289,10 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
static inline void ata_scsi_dump_cdb(struct ata_port *ap,
struct scsi_cmnd *cmd)
{
-#ifdef ATA_DEBUG
+#ifdef ATA_VERBOSE_DEBUG
struct scsi_device *scsidev = cmd->device;
- DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
+ VPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
ap->print_id,
scsidev->channel, scsidev->id, scsidev->lun,
cmd->cmnd);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index cc2f2e35f4c2..c5ea0fc635e5 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -658,36 +658,6 @@ unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
/**
- * ata_sff_data_xfer_noirq - Transfer data by PIO
- * @qc: queued command
- * @buf: data buffer
- * @buflen: buffer length
- * @rw: read/write
- *
- * Transfer data from/to the device data register by PIO. Do the
- * transfer with interrupts disabled.
- *
- * LOCKING:
- * Inherited from caller.
- *
- * RETURNS:
- * Bytes consumed.
- */
-unsigned int ata_sff_data_xfer_noirq(struct ata_queued_cmd *qc, unsigned char *buf,
- unsigned int buflen, int rw)
-{
- unsigned long flags;
- unsigned int consumed;
-
- local_irq_save(flags);
- consumed = ata_sff_data_xfer32(qc, buf, buflen, rw);
- local_irq_restore(flags);
-
- return consumed;
-}
-EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
-
-/**
* ata_pio_sector - Transfer a sector of data.
* @qc: Command on going
*
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 9e21c49cf6be..f953cb4bb1ba 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -100,8 +100,6 @@ extern int ata_port_probe(struct ata_port *ap);
extern void __ata_port_probe(struct ata_port *ap);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
-extern void ata_host_get(struct ata_host *host);
-extern void ata_host_put(struct ata_host *host);
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index c47caa807fa9..e3532eda7b05 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -178,7 +178,7 @@ static struct scsi_host_template cmd640_sht = {
static struct ata_port_operations cmd640_port_ops = {
.inherits = &ata_sff_port_ops,
/* In theory xfer_noirq is not needed once we kill the prefetcher */
- .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .sff_data_xfer = ata_sff_data_xfer32,
.sff_irq_check = cmd640_sff_irq_check,
.qc_issue = cmd640_qc_issue,
.cable_detect = ata_cable_40wire,
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 188f2f2eb21f..c272f2cbb47c 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -324,7 +324,7 @@ static struct ata_port_operations pata_icside_port_ops = {
.inherits = &ata_bmdma_port_ops,
/* no need to build any PRD tables for DMA */
.qc_prep = ata_noop_qc_prep,
- .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .sff_data_xfer = ata_sff_data_xfer32,
.bmdma_setup = pata_icside_bmdma_setup,
.bmdma_start = pata_icside_bmdma_start,
.bmdma_stop = pata_icside_bmdma_stop,
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index d4caa23f5a88..2e538726802b 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/libata.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#define DRV_NAME "pata_imx"
@@ -102,7 +103,7 @@ static struct scsi_host_template pata_imx_sht = {
static struct ata_port_operations pata_imx_port_ops = {
.inherits = &ata_sff_port_ops,
- .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .sff_data_xfer = ata_sff_data_xfer32,
.cable_detect = ata_cable_unknown,
.set_piomode = pata_imx_set_piomode,
};
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 53828b6c3044..8ea4b8431fc8 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -246,12 +246,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
static struct ata_port_operations simple_port_ops = {
.inherits = &legacy_base_port_ops,
- .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .sff_data_xfer = ata_sff_data_xfer32,
};
static struct ata_port_operations legacy_port_ops = {
.inherits = &legacy_base_port_ops,
- .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .sff_data_xfer = ata_sff_data_xfer32,
.set_mode = legacy_set_mode,
};
@@ -341,7 +341,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_queued_cmd *qc,
}
local_irq_restore(flags);
} else
- buflen = ata_sff_data_xfer_noirq(qc, buf, buflen, rw);
+ buflen = ata_sff_data_xfer32(qc, buf, buflen, rw);
return buflen;
}
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index 8c0d7d736b7a..d071ab6864a8 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -44,7 +44,7 @@ static struct scsi_host_template palmld_sht = {
static struct ata_port_operations palmld_port_ops = {
.inherits = &ata_sff_port_ops,
- .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .sff_data_xfer = ata_sff_data_xfer32,
.cable_detect = ata_cable_40wire,
};
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index a541eacc5e95..9b0e6c72e3f9 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -151,7 +151,7 @@ static struct scsi_host_template pcmcia_sht = {
static struct ata_port_operations pcmcia_port_ops = {
.inherits = &ata_sff_port_ops,
- .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .sff_data_xfer = ata_sff_data_xfer32,
.cable_detect = ata_cable_40wire,
.set_mode = pcmcia_set_mode,
};
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index c503ded87bb8..d6f8f5406442 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -49,7 +49,7 @@ static struct scsi_host_template pata_platform_sht = {
static struct ata_port_operations pata_platform_port_ops = {
.inherits = &ata_sff_port_ops,
- .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .sff_data_xfer = ata_sff_data_xfer32,
.cable_detect = ata_cable_unknown,
.set_mode = pata_platform_set_mode,
};
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index f6c46e9a4dc0..e8b6a2e464c9 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -25,7 +25,6 @@
#include <linux/libata.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
-#include <linux/dma/pxa-dma.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/completion.h>
@@ -180,8 +179,6 @@ static int pxa_ata_probe(struct platform_device *pdev)
struct resource *irq_res;
struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
struct dma_slave_config config;
- dma_cap_mask_t mask;
- struct pxad_param param;
int ret = 0;
/*
@@ -278,10 +275,6 @@ static int pxa_ata_probe(struct platform_device *pdev)
ap->private_data = data;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- param.prio = PXAD_PRIO_LOWEST;
- param.drcmr = pdata->dma_dreq;
memset(&config, 0, sizeof(config));
config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
@@ -294,8 +287,7 @@ static int pxa_ata_probe(struct platform_device *pdev)
* Request the DMA channel
*/
data->dma_chan =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param, &pdev->dev, "data");
+ dma_request_slave_channel(&pdev->dev, "data");
if (!data->dma_chan)
return -EBUSY;
ret = dmaengine_slave_config(data->dma_chan, &config);
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index bb96dc35950d..f5bd44b8bd63 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/libata.h>
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 1ca6bcab369f..fd19f1ce83aa 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -471,7 +471,7 @@ static struct ata_port_operations via_port_ops = {
static struct ata_port_operations via_port_ops_noirq = {
.inherits = &via_port_ops,
- .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .sff_data_xfer = ata_sff_data_xfer32,
};
/**
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index b8d9cfc60374..4dc528bf8e85 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -395,12 +395,6 @@ static inline unsigned int sata_fsl_tag(unsigned int tag,
{
/* We let libATA core do actual (queue) tag allocation */
- /* all non NCQ/queued commands should have tag#0 */
- if (ata_tag_internal(tag)) {
- DPRINTK("mapping internal cmds to tag#0\n");
- return 0;
- }
-
if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
DPRINTK("tag %d invalid : out of range\n", tag);
return 0;
@@ -1229,8 +1223,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
/* Workaround for data length mismatch errata */
if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) {
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- qc = ata_qc_from_tag(ap, tag);
+ ata_qc_for_each_with_internal(ap, qc, tag) {
if (qc && ata_is_atapi(qc->tf.protocol)) {
u32 hcontrol;
/* Set HControl[27] to clear error registers */
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 10ae11aa1926..72c9b922a77b 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -675,7 +675,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
struct nv_adma_port_priv *port0, *port1;
- struct scsi_device *sdev0, *sdev1;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long segment_boundary, flags;
unsigned short sg_tablesize;
@@ -736,8 +735,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
port0 = ap->host->ports[0]->private_data;
port1 = ap->host->ports[1]->private_data;
- sdev0 = ap->host->ports[0]->link.device[0].sdev;
- sdev1 = ap->host->ports[1]->link.device[0].sdev;
if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
(port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
/*
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 6456e07db72a..10ecb232245d 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -17,7 +17,7 @@
#include <linux/libata.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/err.h>
#define DRV_NAME "sata_rcar"
@@ -109,6 +109,8 @@
#define SATAINTMASK_ERRMSK BIT(2)
#define SATAINTMASK_ERRCRTMSK BIT(1)
#define SATAINTMASK_ATAMSK BIT(0)
+#define SATAINTMASK_ALL_GEN1 0x7ff
+#define SATAINTMASK_ALL_GEN2 0xfff
#define SATA_RCAR_INT_MASK (SATAINTMASK_SERRMSK | \
SATAINTMASK_ATAMSK)
@@ -152,7 +154,7 @@ enum sata_rcar_type {
struct sata_rcar_priv {
void __iomem *base;
- struct clk *clk;
+ u32 sataint_mask;
enum sata_rcar_type type;
};
@@ -226,7 +228,7 @@ static void sata_rcar_freeze(struct ata_port *ap)
struct sata_rcar_priv *priv = ap->host->private_data;
/* mask */
- iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+ iowrite32(priv->sataint_mask, priv->base + SATAINTMASK_REG);
ata_sff_freeze(ap);
}
@@ -242,7 +244,7 @@ static void sata_rcar_thaw(struct ata_port *ap)
ata_sff_thaw(ap);
/* unmask */
- iowrite32(0x7ff & ~SATA_RCAR_INT_MASK, base + SATAINTMASK_REG);
+ iowrite32(priv->sataint_mask & ~SATA_RCAR_INT_MASK, base + SATAINTMASK_REG);
}
static void sata_rcar_ioread16_rep(void __iomem *reg, void *buffer, int count)
@@ -736,7 +738,7 @@ static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
if (!sataintstat)
goto done;
/* ack */
- iowrite32(~sataintstat & 0x7ff, base + SATAINTSTAT_REG);
+ iowrite32(~sataintstat & priv->sataint_mask, base + SATAINTSTAT_REG);
ap = host->ports[0];
@@ -809,7 +811,7 @@ static void sata_rcar_init_module(struct sata_rcar_priv *priv)
/* ack and mask */
iowrite32(0, base + SATAINTSTAT_REG);
- iowrite32(0x7ff, base + SATAINTMASK_REG);
+ iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
/* enable interrupts */
iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);
@@ -819,16 +821,20 @@ static void sata_rcar_init_controller(struct ata_host *host)
{
struct sata_rcar_priv *priv = host->private_data;
+ priv->sataint_mask = SATAINTMASK_ALL_GEN2;
+
/* reset and setup phy */
switch (priv->type) {
case RCAR_GEN1_SATA:
+ priv->sataint_mask = SATAINTMASK_ALL_GEN1;
sata_rcar_gen1_phy_init(priv);
break;
case RCAR_GEN2_SATA:
- case RCAR_GEN3_SATA:
case RCAR_R8A7790_ES1_SATA:
sata_rcar_gen2_phy_init(priv);
break;
+ case RCAR_GEN3_SATA:
+ break;
default:
dev_warn(host->dev, "SATA phy is not initialized\n");
break;
@@ -881,6 +887,7 @@ MODULE_DEVICE_TABLE(of, sata_rcar_match);
static int sata_rcar_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct ata_host *host;
struct sata_rcar_priv *priv;
struct resource *mem;
@@ -891,36 +898,31 @@ static int sata_rcar_probe(struct platform_device *pdev)
if (irq <= 0)
return -EINVAL;
- priv = devm_kzalloc(&pdev->dev, sizeof(struct sata_rcar_priv),
- GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(struct sata_rcar_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->type = (enum sata_rcar_type)of_device_get_match_data(&pdev->dev);
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "failed to get access to sata clock\n");
- return PTR_ERR(priv->clk);
- }
+ priv->type = (enum sata_rcar_type)of_device_get_match_data(dev);
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm_disable;
- host = ata_host_alloc(&pdev->dev, 1);
+ host = ata_host_alloc(dev, 1);
if (!host) {
- dev_err(&pdev->dev, "ata_host_alloc failed\n");
+ dev_err(dev, "ata_host_alloc failed\n");
ret = -ENOMEM;
- goto cleanup;
+ goto err_pm_put;
}
host->private_data = priv;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ priv->base = devm_ioremap_resource(dev, mem);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
- goto cleanup;
+ goto err_pm_put;
}
/* setup port */
@@ -934,9 +936,10 @@ static int sata_rcar_probe(struct platform_device *pdev)
if (!ret)
return 0;
-cleanup:
- clk_disable_unprepare(priv->clk);
-
+err_pm_put:
+ pm_runtime_put(dev);
+err_pm_disable:
+ pm_runtime_disable(dev);
return ret;
}
@@ -952,9 +955,10 @@ static int sata_rcar_remove(struct platform_device *pdev)
iowrite32(0, base + ATAPI_INT_ENABLE_REG);
/* ack and mask */
iowrite32(0, base + SATAINTSTAT_REG);
- iowrite32(0x7ff, base + SATAINTMASK_REG);
+ iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
- clk_disable_unprepare(priv->clk);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -972,9 +976,9 @@ static int sata_rcar_suspend(struct device *dev)
/* disable interrupts */
iowrite32(0, base + ATAPI_INT_ENABLE_REG);
/* mask */
- iowrite32(0x7ff, base + SATAINTMASK_REG);
+ iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
- clk_disable_unprepare(priv->clk);
+ pm_runtime_put(dev);
}
return ret;
@@ -987,17 +991,16 @@ static int sata_rcar_resume(struct device *dev)
void __iomem *base = priv->base;
int ret;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
return ret;
if (priv->type == RCAR_GEN3_SATA) {
- sata_rcar_gen2_phy_init(priv);
sata_rcar_init_module(priv);
} else {
/* ack and mask */
iowrite32(0, base + SATAINTSTAT_REG);
- iowrite32(0x7ff, base + SATAINTMASK_REG);
+ iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
/* enable interrupts */
iowrite32(ATAPI_INT_ENABLE_SATAINT,
@@ -1012,11 +1015,10 @@ static int sata_rcar_resume(struct device *dev)
static int sata_rcar_restore(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
- struct sata_rcar_priv *priv = host->private_data;
int ret;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
return ret;
sata_rcar_setup_port(host);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index ff81a576347e..82532c299bb5 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
skb_queue_head_init(&iadev->rx_dma_q);
iadev->rx_free_desc_qhead = NULL;
- iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL);
+ iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
if (!iadev->rx_open) {
printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
dev->number);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index a8d2eb0ceb8d..e89146ddede6 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1385,14 +1385,12 @@ static void zatm_close(struct atm_vcc *vcc)
static int zatm_open(struct atm_vcc *vcc)
{
- struct zatm_dev *zatm_dev;
struct zatm_vcc *zatm_vcc;
short vpi = vcc->vpi;
int vci = vcc->vci;
int error;
DPRINTK(">zatm_open\n");
- zatm_dev = ZATM_DEV(vcc->dev);
if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
vcc->dev_data = NULL;
if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
@@ -1483,6 +1481,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
return -EFAULT;
if (pool < 0 || pool > ZATM_LAST_POOL)
return -EINVAL;
+ pool = array_index_nospec(pool,
+ ZATM_LAST_POOL + 1);
if (copy_from_user(&info,
&((struct zatm_pool_req __user *) arg)->info,
sizeof(info))) return -EFAULT;
diff --git a/drivers/auxdisplay/arm-charlcd.c b/drivers/auxdisplay/arm-charlcd.c
index 296fb30dfa00..dea031484cc4 100644
--- a/drivers/auxdisplay/arm-charlcd.c
+++ b/drivers/auxdisplay/arm-charlcd.c
@@ -331,8 +331,7 @@ out_no_resource:
static int charlcd_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct charlcd *lcd = platform_get_drvdata(pdev);
+ struct charlcd *lcd = dev_get_drvdata(dev);
/* Power the display off */
charlcd_4bit_command(lcd, HD_DISPCTRL);
@@ -341,8 +340,7 @@ static int charlcd_suspend(struct device *dev)
static int charlcd_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct charlcd *lcd = platform_get_drvdata(pdev);
+ struct charlcd *lcd = dev_get_drvdata(dev);
/* Turn the display back on */
charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON);
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 8673fc2b9eb8..81c22d20d9d9 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -99,10 +99,7 @@ static atomic_t charlcd_available = ATOMIC_INIT(1);
/* sleeps that many milliseconds with a reschedule */
static void long_sleep(int ms)
{
- if (in_interrupt())
- mdelay(ms);
- else
- schedule_timeout_interruptible(msecs_to_jiffies(ms));
+ schedule_timeout_interruptible(msecs_to_jiffies(ms));
}
/* turn the backlight on or off */
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 78d8f1986fec..f1a42f0f1ded 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
diff --git a/drivers/base/base.h b/drivers/base/base.h
index a75c3025fb78..7a419a7a6235 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -84,8 +84,6 @@ struct device_private {
#define to_device_private_bus(obj) \
container_of(obj, struct device_private, knode_bus)
-extern int device_private_init(struct device *dev);
-
/* initialisation functions */
extern int devices_init(void);
extern int buses_init(void);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 2880e2ab01f5..5d5b5988e88b 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -74,52 +74,48 @@ static inline int get_cacheinfo_idx(enum cache_type type)
static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
- const __be32 *cache_size;
int ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].size_prop;
- cache_size = of_get_property(np, propname, NULL);
- if (cache_size)
- this_leaf->size = of_read_number(cache_size, 1);
+ if (of_property_read_u32(np, propname, &this_leaf->size))
+ this_leaf->size = 0;
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
static void cache_get_line_size(struct cacheinfo *this_leaf,
struct device_node *np)
{
- const __be32 *line_size;
int i, lim, ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
for (i = 0; i < lim; i++) {
+ int ret;
+ u32 line_size;
const char *propname;
propname = cache_type_info[ct_idx].line_size_props[i];
- line_size = of_get_property(np, propname, NULL);
- if (line_size)
+ ret = of_property_read_u32(np, propname, &line_size);
+ if (!ret) {
+ this_leaf->coherency_line_size = line_size;
break;
+ }
}
-
- if (line_size)
- this_leaf->coherency_line_size = of_read_number(line_size, 1);
}
static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
- const __be32 *nr_sets;
int ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].nr_sets_prop;
- nr_sets = of_get_property(np, propname, NULL);
- if (nr_sets)
- this_leaf->number_of_sets = of_read_number(nr_sets, 1);
+ if (of_property_read_u32(np, propname, &this_leaf->number_of_sets))
+ this_leaf->number_of_sets = 0;
}
static void cache_associativity(struct cacheinfo *this_leaf)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index df3e1a44707a..04bbcd779e11 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -105,7 +105,7 @@ static int device_is_dependent(struct device *dev, void *target)
struct device_link *link;
int ret;
- if (WARN_ON(dev == target))
+ if (dev == target)
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
@@ -113,7 +113,7 @@ static int device_is_dependent(struct device *dev, void *target)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (WARN_ON(link->consumer == target))
+ if (link->consumer == target)
return 1;
ret = device_is_dependent(link->consumer, target);
@@ -178,10 +178,10 @@ void device_pm_move_to_tail(struct device *dev)
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
* ignored.
*
- * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
- * when the consumer device driver unbinds from it. The combination of both
- * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
- * to be returned.
+ * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed
+ * automatically when the consumer device driver unbinds from it.
+ * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS
+ * set is invalid and will cause NULL to be returned.
*
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
@@ -198,7 +198,8 @@ struct device_link *device_link_add(struct device *consumer,
struct device_link *link;
if (!consumer || !supplier ||
- ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
+ ((flags & DL_FLAG_STATELESS) &&
+ (flags & DL_FLAG_AUTOREMOVE_CONSUMER)))
return NULL;
device_links_write_lock();
@@ -372,6 +373,36 @@ void device_link_del(struct device_link *link)
}
EXPORT_SYMBOL_GPL(device_link_del);
+/**
+ * device_link_remove - remove a link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM.
+ */
+void device_link_remove(void *consumer, struct device *supplier)
+{
+ struct device_link *link;
+
+ if (WARN_ON(consumer == supplier))
+ return;
+
+ device_links_write_lock();
+ device_pm_lock();
+
+ list_for_each_entry(link, &supplier->links.consumers, s_node) {
+ if (link->consumer == consumer) {
+ kref_put(&link->kref, __device_link_del);
+ break;
+ }
+ }
+
+ device_pm_unlock();
+ device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_remove);
+
static void device_links_missing_supplier(struct device *dev)
{
struct device_link *link;
@@ -479,7 +510,7 @@ static void __device_links_no_driver(struct device *dev)
if (link->flags & DL_FLAG_STATELESS)
continue;
- if (link->flags & DL_FLAG_AUTOREMOVE)
+ if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
kref_put(&link->kref, __device_link_del);
else if (link->status != DL_STATE_SUPPLIER_UNBIND)
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
@@ -515,8 +546,18 @@ void device_links_driver_cleanup(struct device *dev)
if (link->flags & DL_FLAG_STATELESS)
continue;
- WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
+ WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
+
+ /*
+ * autoremove the links between this @dev and its consumer
+ * devices that are not active, i.e. where the link state
+ * has moved to DL_STATE_SUPPLIER_UNBIND.
+ */
+ if (link->status == DL_STATE_SUPPLIER_UNBIND &&
+ link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ kref_put(&link->kref, __device_link_del);
+
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
@@ -866,10 +907,19 @@ static const void *device_namespace(struct kobject *kobj)
return ns;
}
+static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ if (dev->class && dev->class->get_ownership)
+ dev->class->get_ownership(dev, uid, gid);
+}
+
static struct kobj_type device_ktype = {
.release = device_release,
.sysfs_ops = &dev_sysfs_ops,
.namespace = device_namespace,
+ .get_ownership = device_get_ownership,
};
@@ -1597,6 +1647,8 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
return;
mutex_lock(&gdp_mutex);
+ if (!kobject_has_children(glue_dir))
+ kobject_del(glue_dir);
kobject_put(glue_dir);
mutex_unlock(&gdp_mutex);
}
@@ -1736,7 +1788,7 @@ static void device_remove_sys_dev_entry(struct device *dev)
}
}
-int device_private_init(struct device *dev)
+static int device_private_init(struct device *dev)
{
dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
if (!dev->p)
@@ -2809,6 +2861,9 @@ void device_shutdown(void)
{
struct device *dev, *parent;
+ wait_for_device_probe();
+ device_block_probing();
+
spin_lock(&devices_kset->list_lock);
/*
* Walk the devices list backward, shutting down each in turn.
@@ -3002,12 +3057,12 @@ void func(const struct device *dev, const char *fmt, ...) \
} \
EXPORT_SYMBOL(func);
-define_dev_printk_level(dev_emerg, KERN_EMERG);
-define_dev_printk_level(dev_alert, KERN_ALERT);
-define_dev_printk_level(dev_crit, KERN_CRIT);
-define_dev_printk_level(dev_err, KERN_ERR);
-define_dev_printk_level(dev_warn, KERN_WARNING);
-define_dev_printk_level(dev_notice, KERN_NOTICE);
+define_dev_printk_level(_dev_emerg, KERN_EMERG);
+define_dev_printk_level(_dev_alert, KERN_ALERT);
+define_dev_printk_level(_dev_crit, KERN_CRIT);
+define_dev_printk_level(_dev_err, KERN_ERR);
+define_dev_printk_level(_dev_warn, KERN_WARNING);
+define_dev_printk_level(_dev_notice, KERN_NOTICE);
define_dev_printk_level(_dev_info, KERN_INFO);
#endif
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 30cc9c877ebb..eb9443d5bae1 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -540,16 +540,24 @@ ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
&dev_attr_spectre_v1.attr,
&dev_attr_spectre_v2.attr,
&dev_attr_spec_store_bypass.attr,
+ &dev_attr_l1tf.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1435d7281c66..edfc9f0b1180 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -16,6 +16,7 @@
* Copyright (c) 2007-2009 Novell Inc.
*/
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -53,6 +54,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+static struct dentry *deferred_devices;
static bool initcalls_done;
/*
@@ -63,26 +65,6 @@ static bool initcalls_done;
static bool defer_all_probes;
/*
- * For initcall_debug, show the deferred probes executed in late_initcall
- * processing.
- */
-static void deferred_probe_debug(struct device *dev)
-{
- ktime_t calltime, delta, rettime;
- unsigned long long duration;
-
- printk(KERN_DEBUG "deferred probe %s @ %i\n", dev_name(dev),
- task_pid_nr(current));
- calltime = ktime_get();
- bus_probe_device(dev);
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- printk(KERN_DEBUG "deferred probe %s returned after %lld usecs\n",
- dev_name(dev), duration);
-}
-
-/*
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
static void deferred_probe_work_func(struct work_struct *work)
@@ -125,11 +107,7 @@ static void deferred_probe_work_func(struct work_struct *work)
device_pm_move_to_tail(dev);
dev_dbg(dev, "Retrying from deferred list\n");
- if (initcall_debug && !initcalls_done)
- deferred_probe_debug(dev);
- else
- bus_probe_device(dev);
-
+ bus_probe_device(dev);
mutex_lock(&deferred_probe_mutex);
put_device(dev);
@@ -224,6 +202,69 @@ void device_unblock_probing(void)
driver_deferred_probe_trigger();
}
+/*
+ * deferred_devs_show() - Show the devices in the deferred probe pending list.
+ */
+static int deferred_devs_show(struct seq_file *s, void *data)
+{
+ struct device_private *curr;
+
+ mutex_lock(&deferred_probe_mutex);
+
+ list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
+ seq_printf(s, "%s\n", dev_name(curr->device));
+
+ mutex_unlock(&deferred_probe_mutex);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(deferred_devs);
+
+static int deferred_probe_timeout = -1;
+static int __init deferred_probe_timeout_setup(char *str)
+{
+ deferred_probe_timeout = simple_strtol(str, NULL, 10);
+ return 1;
+}
+__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
+
+/**
+ * driver_deferred_probe_check_state() - Check deferred probe state
+ * @dev: device to check
+ *
+ * Returns -ENODEV if init is done and all built-in drivers have had a chance
+ * to probe (i.e. initcalls are done), -ETIMEDOUT if deferred probe debug
+ * timeout has expired, or -EPROBE_DEFER if none of those conditions are met.
+ *
+ * Drivers or subsystems can opt-in to calling this function instead of directly
+ * returning -EPROBE_DEFER.
+ */
+int driver_deferred_probe_check_state(struct device *dev)
+{
+ if (initcalls_done) {
+ if (!deferred_probe_timeout) {
+ dev_WARN(dev, "deferred probe timeout, ignoring dependency");
+ return -ETIMEDOUT;
+ }
+ dev_warn(dev, "ignoring dependency for device, assuming no driver");
+ return -ENODEV;
+ }
+ return -EPROBE_DEFER;
+}
+
+static void deferred_probe_timeout_work_func(struct work_struct *work)
+{
+ struct device_private *private, *p;
+
+ deferred_probe_timeout = 0;
+ driver_deferred_probe_trigger();
+ flush_work(&deferred_probe_work);
+
+ list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
+ dev_info(private->device, "deferred probe pending");
+}
+static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
+
/**
* deferred_probe_initcall() - Enable probing of deferred devices
*
@@ -233,15 +274,36 @@ void device_unblock_probing(void)
*/
static int deferred_probe_initcall(void)
{
+ deferred_devices = debugfs_create_file("devices_deferred", 0444, NULL,
+ NULL, &deferred_devs_fops);
+
driver_deferred_probe_enable = true;
driver_deferred_probe_trigger();
/* Sort as many dependencies as possible before exiting initcalls */
flush_work(&deferred_probe_work);
initcalls_done = true;
+
+ /*
+ * Trigger deferred probe again, this time we won't defer anything
+ * that is optional
+ */
+ driver_deferred_probe_trigger();
+ flush_work(&deferred_probe_work);
+
+ if (deferred_probe_timeout > 0) {
+ schedule_delayed_work(&deferred_probe_timeout_work,
+ deferred_probe_timeout * HZ);
+ }
return 0;
}
late_initcall(deferred_probe_initcall);
+static void __exit deferred_probe_exit(void)
+{
+ debugfs_remove_recursive(deferred_devices);
+}
+__exitcall(deferred_probe_exit);
+
/**
* device_is_bound() - Check if device is bound to a driver
* @dev: device to check
@@ -434,14 +496,6 @@ re_probe:
goto probe_failed;
}
- /*
- * Ensure devices are listed in devices_kset in correct order
- * It's important to move Dev to the end of devices_kset before
- * calling .probe, because it could be recursive and parent Dev
- * should always go first
- */
- devices_kset_move_last(dev);
-
if (dev->bus->probe) {
ret = dev->bus->probe(dev);
if (ret)
@@ -527,6 +581,23 @@ done:
return ret;
}
+/*
+ * For initcall_debug, show the driver probe time.
+ */
+static int really_probe_debug(struct device *dev, struct device_driver *drv)
+{
+ ktime_t calltime, delta, rettime;
+ int ret;
+
+ calltime = ktime_get();
+ ret = really_probe(dev, drv);
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
+ dev_name(dev), ret, (s64) ktime_to_us(delta));
+ return ret;
+}
+
/**
* driver_probe_done
* Determine if the probe sequence is finished or not.
@@ -585,7 +656,10 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pm_runtime_get_sync(dev->parent);
pm_runtime_barrier(dev);
- ret = really_probe(dev, drv);
+ if (initcall_debug)
+ ret = really_probe_debug(dev, drv);
+ else
+ ret = really_probe(dev, drv);
pm_request_idle(dev);
if (dev->parent)
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 7f732744f0d3..b5c865fe263b 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -219,11 +219,6 @@ static ssize_t firmware_loading_show(struct device *dev,
return sprintf(buf, "%d\n", loading);
}
-/* Some architectures don't have PAGE_KERNEL_RO */
-#ifndef PAGE_KERNEL_RO
-#define PAGE_KERNEL_RO PAGE_KERNEL
-#endif
-
/* one pages buffer should be mapped/unmapped only once */
static int map_fw_priv_pages(struct fw_priv *fw_priv)
{
@@ -651,6 +646,8 @@ static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
{
+ int ret;
+
if (fw_fallback_config.ignore_sysfs_fallback) {
pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n");
return false;
@@ -659,6 +656,11 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
if ((opt_flags & FW_OPT_NOFALLBACK))
return false;
+ /* Also permit LSMs and IMA to fail firmware sysfs fallback */
+ ret = security_kernel_load_data(LOADING_FIRMWARE);
+ if (ret < 0)
+ return ret;
+
return fw_force_sysfs_fallback(opt_flags);
}
diff --git a/drivers/base/init.c b/drivers/base/init.c
index dd85b05a6a16..908e6520e804 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -30,9 +30,9 @@ void __init driver_init(void)
/* These are also core pieces, but must come after the
* core core pieces.
*/
+ of_core_init();
platform_bus_init();
cpu_dev_init();
memory_dev_init();
container_dev_init();
- of_core_init();
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index f5e560188a18..c8a1cb0b6136 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -736,8 +736,6 @@ int hotplug_memory_register(int nid, struct mem_section *section)
mem->section_count++;
}
- if (mem->section_count == sections_per_block)
- ret = register_mem_sect_under_node(mem, nid, false);
out:
mutex_unlock(&mem_sysfs_mutex);
return ret;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index a5e821d09656..1ac4c36e13bb 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -399,18 +399,12 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
}
/* register memory section under specified node if it spans that node */
-int register_mem_sect_under_node(struct memory_block *mem_blk, int nid,
- bool check_nid)
+int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
{
- int ret;
+ int ret, nid = *(int *)arg;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
- if (!mem_blk)
- return -EFAULT;
-
mem_blk->nid = nid;
- if (!node_online(nid))
- return 0;
sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
@@ -433,7 +427,7 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid,
* case, during hotplug we know that all pages in the memory
* block belong to the same node.
*/
- if (check_nid) {
+ if (system_state == SYSTEM_BOOTING) {
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
@@ -490,41 +484,10 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
return 0;
}
-int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages,
- bool check_nid)
+int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
{
- unsigned long end_pfn = start_pfn + nr_pages;
- unsigned long pfn;
- struct memory_block *mem_blk = NULL;
- int err = 0;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- unsigned long section_nr = pfn_to_section_nr(pfn);
- struct mem_section *mem_sect;
- int ret;
-
- if (!present_section_nr(section_nr))
- continue;
- mem_sect = __nr_to_section(section_nr);
-
- /* same memblock ? */
- if (mem_blk)
- if ((section_nr >= mem_blk->start_section_nr) &&
- (section_nr <= mem_blk->end_section_nr))
- continue;
-
- mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
-
- ret = register_mem_sect_under_node(mem_blk, nid, check_nid);
- if (!err)
- err = ret;
-
- /* discard ref obtained in find_memory_block() */
- }
-
- if (mem_blk)
- kobject_put(&mem_blk->dev.kobj);
- return err;
+ return walk_memory_range(start_pfn, end_pfn, (void *)&nid,
+ register_mem_sect_under_node);
}
#ifdef CONFIG_HUGETLBFS
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index df41b4780b3b..b413951c6abc 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -153,6 +153,23 @@ struct device *dev_pm_domain_attach_by_id(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id);
/**
+ * dev_pm_domain_attach_by_name - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @name: The name of the PM domain.
+ *
+ * For a detailed function description, see dev_pm_domain_attach_by_id().
+ */
+struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ char *name)
+{
+ if (dev->pm_domain)
+ return ERR_PTR(-EEXIST);
+
+ return genpd_dev_pm_attach_by_name(dev, name);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
+
+/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 4925af5c4cf0..4b5714199490 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2235,7 +2235,7 @@ static void genpd_dev_pm_sync(struct device *dev)
}
static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
- unsigned int index)
+ unsigned int index, bool power_on)
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
@@ -2253,7 +2253,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- return -EPROBE_DEFER;
+ return driver_deferred_probe_check_state(dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
@@ -2271,9 +2271,11 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
- genpd_lock(pd);
- ret = genpd_power_on(pd, 0);
- genpd_unlock(pd);
+ if (power_on) {
+ genpd_lock(pd);
+ ret = genpd_power_on(pd, 0);
+ genpd_unlock(pd);
+ }
if (ret)
genpd_remove_device(pd, dev);
@@ -2307,7 +2309,7 @@ int genpd_dev_pm_attach(struct device *dev)
"#power-domain-cells") != 1)
return 0;
- return __genpd_dev_pm_attach(dev, dev->of_node, 0);
+ return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
@@ -2359,19 +2361,43 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
}
/* Try to attach the device to the PM domain at the specified index. */
- ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index);
+ ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
if (ret < 1) {
device_unregister(genpd_dev);
return ret ? ERR_PTR(ret) : NULL;
}
- pm_runtime_set_active(genpd_dev);
pm_runtime_enable(genpd_dev);
+ genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
return genpd_dev;
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
+/**
+ * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @name: The name of the PM domain.
+ *
+ * Parse device's OF node to find a PM domain specifier using the
+ * power-domain-names DT property. For further description see
+ * genpd_dev_pm_attach_by_id().
+ */
+struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name)
+{
+ int index;
+
+ if (!dev->of_node)
+ return NULL;
+
+ index = of_property_match_string(dev->of_node, "power-domain-names",
+ name);
+ if (index < 0)
+ return NULL;
+
+ return genpd_dev_pm_attach_by_id(dev, index);
+}
+
static const struct of_device_id idle_state_match[] = {
{ .compatible = "domain-idle-state", },
{ }
@@ -2487,10 +2513,9 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
* power domain corresponding to a DT node's "required-opps" property.
*
* @dev: Device for which the performance-state needs to be found.
- * @opp_node: DT node where the "required-opps" property is present. This can be
+ * @np: DT node where the "required-opps" property is present. This can be
* the device node itself (if it doesn't have an OPP table) or a node
* within the OPP table of a device (if device has an OPP table).
- * @state: Pointer to return performance state.
*
* Returns performance state corresponding to the "required-opps" property of
* a DT node. This calls platform specific genpd->opp_to_performance_state()
@@ -2499,7 +2524,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
* Returns performance state on success and 0 on failure.
*/
unsigned int of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *opp_node)
+ struct device_node *np)
{
struct generic_pm_domain *genpd;
struct dev_pm_opp *opp;
@@ -2514,7 +2539,7 @@ unsigned int of_genpd_opp_to_performance_state(struct device *dev,
genpd_lock(genpd);
- opp = of_dev_pm_opp_find_required_opp(&genpd->dev, opp_node);
+ opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np);
if (IS_ERR(opp)) {
dev_err(dev, "Failed to find required OPP: %ld\n",
PTR_ERR(opp));
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index aff34c0c2a3e..6ad5ef48b61e 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -45,3 +45,7 @@ config REGMAP_IRQ
config REGMAP_SOUNDWIRE
tristate
depends on SOUNDWIRE_BUS
+
+config REGMAP_SCCB
+ tristate
+ depends on I2C
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 5ed0023fabda..f5b4e8851d00 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o
+obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 53785e0e297a..a6bf34d6394e 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -94,10 +94,12 @@ struct regmap {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
const struct regmap_access_table *volatile_table;
const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *rd_noinc_table;
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
@@ -181,6 +183,7 @@ bool regmap_writeable(struct regmap *map, unsigned int reg);
bool regmap_readable(struct regmap *map, unsigned int reg);
bool regmap_volatile(struct regmap *map, unsigned int reg);
bool regmap_precious(struct regmap *map, unsigned int reg);
+bool regmap_readable_noinc(struct regmap *map, unsigned int reg);
int _regmap_write(struct regmap *map, unsigned int reg,
unsigned int val);
diff --git a/drivers/base/regmap/regmap-sccb.c b/drivers/base/regmap/regmap-sccb.c
new file mode 100644
index 000000000000..597042e2d009
--- /dev/null
+++ b/drivers/base/regmap/regmap-sccb.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+// Register map access API - SCCB support
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "internal.h"
+
+/**
+ * sccb_is_available - Check if the adapter supports SCCB protocol
+ * @adap: I2C adapter
+ *
+ * Return true if the I2C adapter is capable of using SCCB helper functions,
+ * false otherwise.
+ */
+static bool sccb_is_available(struct i2c_adapter *adap)
+{
+ u32 needed_funcs = I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+
+ /*
+ * If we ever want support for hardware doing SCCB natively, we will
+ * introduce a sccb_xfer() callback to struct i2c_algorithm and check
+ * for it here.
+ */
+
+ return (i2c_get_functionality(adap) & needed_funcs) == needed_funcs;
+}
+
+/**
+ * regmap_sccb_read - Read data from SCCB slave device
+ * @context: Device that will be interacted with
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * This executes the 2-phase write transmission cycle that is followed by a
+ * 2-phase read transmission cycle, returning negative errno else zero on
+ * success.
+ */
+static int regmap_sccb_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+ union i2c_smbus_data data;
+
+ i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
+
+ ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE, NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &data);
+ if (ret < 0)
+ goto out;
+
+ *val = data.byte;
+out:
+ i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
+
+ return ret;
+}
+
+/**
+ * regmap_sccb_write - Write data to SCCB slave device
+ * @context: Device that will be interacted with
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * This executes the SCCB 3-phase write transmission cycle, returning negative
+ * errno else zero on success.
+ */
+static int regmap_sccb_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ return i2c_smbus_write_byte_data(i2c, reg, val);
+}
+
+static struct regmap_bus regmap_sccb_bus = {
+ .reg_write = regmap_sccb_write,
+ .reg_read = regmap_sccb_read,
+};
+
+static const struct regmap_bus *regmap_get_sccb_bus(struct i2c_client *i2c,
+ const struct regmap_config *config)
+{
+ if (config->val_bits == 8 && config->reg_bits == 8 &&
+ sccb_is_available(i2c->adapter))
+ return &regmap_sccb_bus;
+
+ return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sccb);
+
+struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sccb);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
index 91d501eda8a9..0968059f1ef5 100644
--- a/drivers/base/regmap/regmap-slimbus.c
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -7,33 +7,24 @@
#include "internal.h"
-static int regmap_slimbus_byte_reg_read(void *context, unsigned int reg,
- unsigned int *val)
+static int regmap_slimbus_write(void *context, const void *data, size_t count)
{
struct slim_device *sdev = context;
- int v;
- v = slim_readb(sdev, reg);
-
- if (v < 0)
- return v;
-
- *val = v;
-
- return 0;
+ return slim_write(sdev, *(u16 *)data, count - 2, (u8 *)data + 2);
}
-static int regmap_slimbus_byte_reg_write(void *context, unsigned int reg,
- unsigned int val)
+static int regmap_slimbus_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
{
struct slim_device *sdev = context;
- return slim_writeb(sdev, reg, val);
+ return slim_read(sdev, *(u16 *)reg, val_size, val);
}
static struct regmap_bus regmap_slimbus_bus = {
- .reg_write = regmap_slimbus_byte_reg_write,
- .reg_read = regmap_slimbus_byte_reg_read,
+ .write = regmap_slimbus_write,
+ .read = regmap_slimbus_read,
.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 3bc84885eb91..0360a90ad6b6 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -168,6 +168,17 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
return false;
}
+bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
+{
+ if (map->readable_noinc_reg)
+ return map->readable_noinc_reg(map->dev, reg);
+
+ if (map->rd_noinc_table)
+ return regmap_check_range_table(map, reg, map->rd_noinc_table);
+
+ return true;
+}
+
static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
size_t num)
{
@@ -766,10 +777,12 @@ struct regmap *__regmap_init(struct device *dev,
map->rd_table = config->rd_table;
map->volatile_table = config->volatile_table;
map->precious_table = config->precious_table;
+ map->rd_noinc_table = config->rd_noinc_table;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
+ map->readable_noinc_reg = config->readable_noinc_reg;
map->cache_type = config->cache_type;
spin_lock_init(&map->async_lock);
@@ -1285,6 +1298,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
+ map->readable_noinc_reg = config->readable_noinc_reg;
map->cache_type = config->cache_type;
regmap_debugfs_init(map, config->name);
@@ -2564,7 +2578,70 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
EXPORT_SYMBOL_GPL(regmap_raw_read);
/**
- * regmap_field_read() - Read a value to a single register field
+ * regmap_noinc_read(): Read data from a register without incrementing the
+ * register number
+ *
+ * @map: Register map to read from
+ * @reg: Register to read from
+ * @val: Pointer to data buffer
+ * @val_len: Length of output buffer in bytes.
+ *
+ * The regmap API usually assumes that bulk bus read operations will read a
+ * range of registers. Some devices have certain registers for which a read
+ * operation read will read from an internal FIFO.
+ *
+ * The target register must be volatile but registers after it can be
+ * completely unrelated cacheable registers.
+ *
+ * This will attempt multiple reads as required to read val_len bytes.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ size_t read_len;
+ int ret;
+
+ if (!map->bus)
+ return -EINVAL;
+ if (!map->bus->read)
+ return -ENOTSUPP;
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+ if (val_len == 0)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ while (val_len) {
+ if (map->max_raw_read && map->max_raw_read < val_len)
+ read_len = map->max_raw_read;
+ else
+ read_len = val_len;
+ ret = _regmap_raw_read(map, reg, val, read_len);
+ if (ret)
+ goto out_unlock;
+ val = ((u8 *)val) + read_len;
+ val_len -= read_len;
+ }
+
+out_unlock:
+ map->unlock(map->lock_arg);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_noinc_read);
+
+/**
+ * regmap_field_read(): Read a value to a single register field
*
* @field: Register field to read from
* @val: Pointer to store read value
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index cb0f1aad20b7..b9558ff20830 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -30,6 +30,7 @@ config BCMA_HOST_PCI
config BCMA_HOST_SOC
bool "Support for BCMA in a SoC"
+ depends on HAS_IOMEM
help
Host interface for a Broadcom AIX bus directly mapped into
the memory. This only works with the Broadcom SoCs from the
@@ -61,7 +62,7 @@ config BCMA_DRIVER_PCI_HOSTMODE
config BCMA_DRIVER_MIPS
bool "BCMA Broadcom MIPS core driver"
- depends on MIPS
+ depends on MIPS || COMPILE_TEST
help
Driver for the Broadcom MIPS core attached to Broadcom specific
Advanced Microcontroller Bus.
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index f6518067aa7d..581312ac375f 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -21,6 +21,7 @@
#define DAC960_DriverDate "21 Aug 2007"
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
@@ -2427,16 +2428,20 @@ static bool DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T
{
DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
- unsigned char *ReadCacheStatus[] = { "Read Cache Disabled",
- "Read Cache Enabled",
- "Read Ahead Enabled",
- "Intelligent Read Ahead Enabled",
- "-", "-", "-", "-" };
- unsigned char *WriteCacheStatus[] = { "Write Cache Disabled",
- "Logical Device Read Only",
- "Write Cache Enabled",
- "Intelligent Write Cache Enabled",
- "-", "-", "-", "-" };
+ static const unsigned char *ReadCacheStatus[] = {
+ "Read Cache Disabled",
+ "Read Cache Enabled",
+ "Read Ahead Enabled",
+ "Intelligent Read Ahead Enabled",
+ "-", "-", "-", "-"
+ };
+ static const unsigned char *WriteCacheStatus[] = {
+ "Write Cache Disabled",
+ "Logical Device Read Only",
+ "Write Cache Enabled",
+ "Intelligent Write Cache Enabled",
+ "-", "-", "-", "-"
+ };
unsigned char *GeometryTranslation;
if (LogicalDeviceInfo == NULL) continue;
switch (LogicalDeviceInfo->DriveGeometry)
@@ -4338,14 +4343,16 @@ static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command)
static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command)
{
DAC960_Controller_T *Controller = Command->Controller;
- unsigned char *SenseErrors[] = { "NO SENSE", "RECOVERED ERROR",
- "NOT READY", "MEDIUM ERROR",
- "HARDWARE ERROR", "ILLEGAL REQUEST",
- "UNIT ATTENTION", "DATA PROTECT",
- "BLANK CHECK", "VENDOR-SPECIFIC",
- "COPY ABORTED", "ABORTED COMMAND",
- "EQUAL", "VOLUME OVERFLOW",
- "MISCOMPARE", "RESERVED" };
+ static const unsigned char *SenseErrors[] = {
+ "NO SENSE", "RECOVERED ERROR",
+ "NOT READY", "MEDIUM ERROR",
+ "HARDWARE ERROR", "ILLEGAL REQUEST",
+ "UNIT ATTENTION", "DATA PROTECT",
+ "BLANK CHECK", "VENDOR-SPECIFIC",
+ "COPY ABORTED", "ABORTED COMMAND",
+ "EQUAL", "VOLUME OVERFLOW",
+ "MISCOMPARE", "RESERVED"
+ };
unsigned char *CommandName = "UNKNOWN";
switch (Command->CommandType)
{
@@ -6426,7 +6433,7 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
return true;
}
-static int dac960_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused dac960_proc_show(struct seq_file *m, void *v)
{
unsigned char *StatusMessage = "OK\n";
int ControllerNumber;
@@ -6446,14 +6453,16 @@ static int dac960_proc_show(struct seq_file *m, void *v)
return 0;
}
-static int dac960_initial_status_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused dac960_initial_status_proc_show(struct seq_file *m,
+ void *v)
{
DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer);
return 0;
}
-static int dac960_current_status_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused dac960_current_status_proc_show(struct seq_file *m,
+ void *v)
{
DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private;
unsigned char *StatusMessage =
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ad9b687a236a..d4913516823f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -74,12 +74,12 @@ config AMIGA_Z2RAM
config CDROM
tristate
+ select BLK_SCSI_REQUEST
config GDROM
tristate "SEGA Dreamcast GD-ROM drive"
depends on SH_DREAMCAST
select CDROM
- select BLK_SCSI_REQUEST # only for the generic cdrom code
help
A standard SEGA Dreamcast comes with a modified CD ROM drive called a
"GD-ROM" by SEGA to signify it is capable of reading special disks
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index dc061158b403..8566b188368b 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -36,8 +36,11 @@ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
-obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
obj-$(CONFIG_ZRAM) += zram/
+obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
+null_blk-objs := null_blk_main.o
+null_blk-$(CONFIG_BLK_DEV_ZONED) += null_blk_zoned.o
+
skd-y := skd_main.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 096882e54095..136dc507d020 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1137,6 +1137,7 @@ noskb: if (buf)
break;
}
bvcpy(skb, f->buf->bio, f->iter, n);
+ /* fall through */
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
spin_lock_irq(&d->lock);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 697f735b07a4..41060e9cedf2 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -284,8 +284,8 @@ freedev(struct aoedev *d)
e = t + d->ntargets;
for (; t < e && *t; t++)
freetgt(d, *t);
- if (d->bufpool)
- mempool_destroy(d->bufpool);
+
+ mempool_destroy(d->bufpool);
skbpoolfree(d);
minor_free(d->sysminor);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index bb976598ee43..df8103dd40ac 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -254,20 +254,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, bool is_write,
+ unsigned int len, unsigned int off, unsigned int op,
sector_t sector)
{
void *mem;
int err = 0;
- if (is_write) {
+ if (op_is_write(op)) {
err = copy_to_brd_setup(brd, sector, len);
if (err)
goto out;
}
mem = kmap_atomic(page);
- if (!is_write) {
+ if (!op_is_write(op)) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
@@ -296,7 +296,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
int err;
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(bio_op(bio)), sector);
+ bio_op(bio), sector);
if (err)
goto io_error;
sector += len >> SECTOR_SHIFT;
@@ -310,15 +310,15 @@ io_error:
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, bool is_write)
+ struct page *page, unsigned int op)
{
struct brd_device *brd = bdev->bd_disk->private_data;
int err;
if (PageTransHuge(page))
return -ENOTSUPP;
- err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
- page_endio(page, is_write, err);
+ err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
+ page_endio(page, op_is_write(op), err);
return err;
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index bc4ed2ed40a2..e35a234b0a8f 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -55,12 +55,10 @@
# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
-# define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call")))
#else
# define __protected_by(x)
# define __protected_read_by(x)
# define __protected_write_by(x)
-# define __must_hold(x)
#endif
/* shared module parameters, defined in drbd_main.c */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index a80809bd3057..ef8212a4b73e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2103,14 +2103,10 @@ static void drbd_destroy_mempools(void)
mempool_exit(&drbd_md_io_page_pool);
mempool_exit(&drbd_ee_mempool);
mempool_exit(&drbd_request_mempool);
- if (drbd_ee_cache)
- kmem_cache_destroy(drbd_ee_cache);
- if (drbd_request_cache)
- kmem_cache_destroy(drbd_request_cache);
- if (drbd_bm_ext_cache)
- kmem_cache_destroy(drbd_bm_ext_cache);
- if (drbd_al_ext_cache)
- kmem_cache_destroy(drbd_al_ext_cache);
+ kmem_cache_destroy(drbd_ee_cache);
+ kmem_cache_destroy(drbd_request_cache);
+ kmem_cache_destroy(drbd_bm_ext_cache);
+ kmem_cache_destroy(drbd_al_ext_cache);
drbd_ee_cache = NULL;
drbd_request_cache = NULL;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index be9450f5ad1c..75f6b47169e6 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2674,8 +2674,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
if (c_min_rate == 0)
return false;
- curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
- (int)part_stat_read(&disk->part0, sectors[1]) -
+ curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
atomic_read(&device->rs_sect_ev);
if (atomic_read(&device->ap_actlog_cnt)
@@ -2790,6 +2789,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
then we would do something smarter here than reading
the block... */
peer_req->flags |= EE_RS_THIN_REQ;
+ /* fall through */
case P_RS_DATA_REQUEST:
peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
@@ -2968,6 +2968,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
/* Else fall through to one of the other strategies... */
drbd_warn(device, "Discard younger/older primary did not find a decision\n"
"Using discard-least-changes instead\n");
+ /* fall through */
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
@@ -2979,6 +2980,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
}
if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
+ /* else: fall through */
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
rv = -1;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index a47e4987ee46..19cac36e9737 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -38,7 +38,7 @@ static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request
{
struct request_queue *q = device->rq_queue;
- generic_start_io_acct(q, bio_data_dir(req->master_bio),
+ generic_start_io_acct(q, bio_op(req->master_bio),
req->i.size >> 9, &device->vdisk->part0);
}
@@ -47,7 +47,7 @@ static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *r
{
struct request_queue *q = device->rq_queue;
- generic_end_io_acct(q, bio_data_dir(req->master_bio),
+ generic_end_io_acct(q, bio_op(req->master_bio),
&device->vdisk->part0, req->start_jif);
}
@@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
_drbd_start_io_acct(device, req);
/* process discards always from our submitter thread */
- if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) ||
- (bio_op(bio) & REQ_OP_DISCARD))
+ if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
+ bio_op(bio) == REQ_OP_DISCARD)
goto queue_for_submitter_thread;
if (rw == WRITE && req->private_bio && req->i.size
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 1476cb3439f4..b8f77e83d456 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio)
what = COMPLETED_OK;
}
- bio_put(req->private_bio);
req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
+ bio_put(bio);
/* not req_mod(), we need irqsave here! */
spin_lock_irqsave(&device->resource->req_lock, flags);
@@ -1690,9 +1690,7 @@ void drbd_rs_controller_reset(struct drbd_device *device)
atomic_set(&device->rs_sect_in, 0);
atomic_set(&device->rs_sect_ev, 0);
device->rs_in_flight = 0;
- device->rs_last_events =
- (int)part_stat_read(&disk->part0, sectors[0]) +
- (int)part_stat_read(&disk->part0, sectors[1]);
+ device->rs_last_events = (int)part_stat_read_accum(&disk->part0, sectors);
/* Updating the RCU protected object in place is necessary since
this function gets called from atomic context.
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 8871b5044d9e..48f622728ce6 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1461,7 +1461,6 @@ static void setup_rw_floppy(void)
int i;
int r;
int flags;
- int dflags;
unsigned long ready_date;
void (*function)(void);
@@ -1485,8 +1484,6 @@ static void setup_rw_floppy(void)
if (fd_wait_for_completion(ready_date, function))
return;
}
- dflags = DRS->flags;
-
if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
setup_DMA();
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d6b6f434fd4b..ea9debf59b22 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -690,7 +690,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
unsigned int arg)
{
struct file *file, *old_file;
- struct inode *inode;
int error;
error = -ENXIO;
@@ -711,7 +710,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (error)
goto out_putf;
- inode = file->f_mapping->host;
old_file = lo->lo_backing_file;
error = -EINVAL;
@@ -1611,8 +1609,10 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_GET_STATUS64:
case LOOP_SET_STATUS64:
arg = (unsigned long) compat_ptr(arg);
+ /* fall through */
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
+ case LOOP_SET_BLOCK_SIZE:
err = lo_ioctl(bdev, mode, cmd, arg);
break;
default:
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index c73626decb46..db253cd5b32a 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2575,8 +2575,7 @@ static int mtip_hw_debugfs_init(struct driver_data *dd)
static void mtip_hw_debugfs_exit(struct driver_data *dd)
{
- if (dd->dfs_node)
- debugfs_remove_recursive(dd->dfs_node);
+ debugfs_remove_recursive(dd->dfs_node);
}
/*
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 74a05561b620..3863c00372bb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -112,12 +112,16 @@ struct nbd_device {
struct task_struct *task_setup;
};
+#define NBD_CMD_REQUEUED 1
+
struct nbd_cmd {
struct nbd_device *nbd;
+ struct mutex lock;
int index;
int cookie;
- struct completion send_complete;
blk_status_t status;
+ unsigned long flags;
+ u32 cmd_cookie;
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -146,6 +150,35 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
return disk_to_dev(nbd->disk);
}
+static void nbd_requeue_cmd(struct nbd_cmd *cmd)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+
+ if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
+ blk_mq_requeue_request(req, true);
+}
+
+#define NBD_COOKIE_BITS 32
+
+static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+ u32 tag = blk_mq_unique_tag(req);
+ u64 cookie = cmd->cmd_cookie;
+
+ return (cookie << NBD_COOKIE_BITS) | tag;
+}
+
+static u32 nbd_handle_to_tag(u64 handle)
+{
+ return (u32)handle;
+}
+
+static u32 nbd_handle_to_cookie(u64 handle)
+{
+ return (u32)(handle >> NBD_COOKIE_BITS);
+}
+
static const char *nbdcmd_to_ascii(int cmd)
{
switch (cmd) {
@@ -319,6 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
config = nbd->config;
+ if (!mutex_trylock(&cmd->lock))
+ return BLK_EH_RESET_TIMER;
+
if (config->num_connections > 1) {
dev_err_ratelimited(nbd_to_dev(nbd),
"Connection timed out, retrying (%d/%d alive)\n",
@@ -343,7 +379,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
}
- blk_mq_requeue_request(req, true);
+ mutex_unlock(&cmd->lock);
+ nbd_requeue_cmd(cmd);
nbd_config_put(nbd);
return BLK_EH_DONE;
}
@@ -353,6 +390,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
set_bit(NBD_TIMEDOUT, &config->runtime_flags);
cmd->status = BLK_STS_IOERR;
+ mutex_unlock(&cmd->lock);
sock_shutdown(nbd);
nbd_config_put(nbd);
done:
@@ -430,9 +468,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
struct iov_iter from;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
+ u64 handle;
u32 type;
u32 nbd_cmd_flags = 0;
- u32 tag = blk_mq_unique_tag(req);
int sent = nsock->sent, skip = 0;
iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
@@ -474,6 +512,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
goto send_pages;
}
iov_iter_advance(&from, sent);
+ } else {
+ cmd->cmd_cookie++;
}
cmd->index = index;
cmd->cookie = nsock->cookie;
@@ -482,7 +522,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
}
- memcpy(request.handle, &tag, sizeof(tag));
+ handle = nbd_cmd_handle(cmd);
+ memcpy(request.handle, &handle, sizeof(handle));
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
req, nbdcmd_to_ascii(type),
@@ -500,6 +541,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
nsock->pending = req;
nsock->sent = sent;
}
+ set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -541,6 +583,7 @@ send_pages:
*/
nsock->pending = req;
nsock->sent = sent;
+ set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
}
dev_err(disk_to_dev(nbd->disk),
@@ -573,10 +616,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
struct nbd_reply reply;
struct nbd_cmd *cmd;
struct request *req = NULL;
+ u64 handle;
u16 hwq;
u32 tag;
struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
struct iov_iter to;
+ int ret = 0;
reply.magic = 0;
iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
@@ -594,8 +639,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
return ERR_PTR(-EPROTO);
}
- memcpy(&tag, reply.handle, sizeof(u32));
-
+ memcpy(&handle, reply.handle, sizeof(handle));
+ tag = nbd_handle_to_tag(handle);
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
@@ -606,11 +651,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
return ERR_PTR(-ENOENT);
}
cmd = blk_mq_rq_to_pdu(req);
+
+ mutex_lock(&cmd->lock);
+ if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
+ dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
+ req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
+ ret = -ENOENT;
+ goto out;
+ }
+ if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
+ dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
+ req);
+ ret = -ENOENT;
+ goto out;
+ }
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
cmd->status = BLK_STS_IOERR;
- return cmd;
+ goto out;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
@@ -635,18 +694,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
if (nbd_disconnected(config) ||
config->num_connections <= 1) {
cmd->status = BLK_STS_IOERR;
- return cmd;
+ goto out;
}
- return ERR_PTR(-EIO);
+ ret = -EIO;
+ goto out;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
req, bvec.bv_len);
}
- } else {
- /* See the comment in nbd_queue_rq. */
- wait_for_completion(&cmd->send_complete);
}
- return cmd;
+out:
+ mutex_unlock(&cmd->lock);
+ return ret ? ERR_PTR(ret) : cmd;
}
static void recv_work(struct work_struct *work)
@@ -805,7 +864,7 @@ again:
*/
blk_mq_start_request(req);
if (unlikely(nsock->pending && nsock->pending != req)) {
- blk_mq_requeue_request(req, true);
+ nbd_requeue_cmd(cmd);
ret = 0;
goto out;
}
@@ -818,7 +877,7 @@ again:
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
- blk_mq_requeue_request(req, true);
+ nbd_requeue_cmd(cmd);
ret = 0;
}
out:
@@ -842,7 +901,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* that the server is misbehaving (or there was an error) before we're
* done sending everything over the wire.
*/
- init_completion(&cmd->send_complete);
+ mutex_lock(&cmd->lock);
+ clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
/* We can be called directly from the user space process, which means we
* could possibly have signals pending so our sendmsg will fail. In
@@ -854,7 +914,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
ret = BLK_STS_IOERR;
else if (!ret)
ret = BLK_STS_OK;
- complete(&cmd->send_complete);
+ mutex_unlock(&cmd->lock);
return ret;
}
@@ -1460,6 +1520,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->nbd = set->driver_data;
+ cmd->flags = 0;
+ mutex_init(&cmd->lock);
return 0;
}
@@ -1571,7 +1633,7 @@ static int find_free_cb(int id, void *ptr, void *data)
}
/* Netlink interface. */
-static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
+static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
[NBD_ATTR_INDEX] = { .type = NLA_U32 },
[NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
[NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
@@ -1583,14 +1645,14 @@ static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
[NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
};
-static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
+static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
[NBD_SOCK_FD] = { .type = NLA_U32 },
};
/* We don't use this right now since we don't parse the incoming list, but we
* still want it here so userspace knows what to expect.
*/
-static struct nla_policy __attribute__((unused))
+static const struct nla_policy __attribute__((unused))
nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
[NBD_DEVICE_INDEX] = { .type = NLA_U32 },
[NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
new file mode 100644
index 000000000000..d81781f22dba
--- /dev/null
+++ b/drivers/block/null_blk.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BLK_NULL_BLK_H
+#define __BLK_NULL_BLK_H
+
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/blk-mq.h>
+#include <linux/hrtimer.h>
+#include <linux/configfs.h>
+#include <linux/badblocks.h>
+#include <linux/fault-inject.h>
+
+struct nullb_cmd {
+ struct list_head list;
+ struct llist_node ll_list;
+ struct __call_single_data csd;
+ struct request *rq;
+ struct bio *bio;
+ unsigned int tag;
+ blk_status_t error;
+ struct nullb_queue *nq;
+ struct hrtimer timer;
+};
+
+struct nullb_queue {
+ unsigned long *tag_map;
+ wait_queue_head_t wait;
+ unsigned int queue_depth;
+ struct nullb_device *dev;
+ unsigned int requeue_selection;
+
+ struct nullb_cmd *cmds;
+};
+
+struct nullb_device {
+ struct nullb *nullb;
+ struct config_item item;
+ struct radix_tree_root data; /* data stored in the disk */
+ struct radix_tree_root cache; /* disk cache data */
+ unsigned long flags; /* device flags */
+ unsigned int curr_cache;
+ struct badblocks badblocks;
+
+ unsigned int nr_zones;
+ struct blk_zone *zones;
+ sector_t zone_size_sects;
+
+ unsigned long size; /* device size in MB */
+ unsigned long completion_nsec; /* time in ns to complete a request */
+ unsigned long cache_size; /* disk cache size in MB */
+ unsigned long zone_size; /* zone size in MB if device is zoned */
+ unsigned int submit_queues; /* number of submission queues */
+ unsigned int home_node; /* home node for the device */
+ unsigned int queue_mode; /* block interface */
+ unsigned int blocksize; /* block size */
+ unsigned int irqmode; /* IRQ completion handler */
+ unsigned int hw_queue_depth; /* queue depth */
+ unsigned int index; /* index of the disk, only valid with a disk */
+ unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
+ bool blocking; /* blocking blk-mq device */
+ bool use_per_node_hctx; /* use per-node allocation for hardware context */
+ bool power; /* power on/off the device */
+ bool memory_backed; /* if data is stored in memory */
+ bool discard; /* if support discard */
+ bool zoned; /* if device is zoned */
+};
+
+struct nullb {
+ struct nullb_device *dev;
+ struct list_head list;
+ unsigned int index;
+ struct request_queue *q;
+ struct gendisk *disk;
+ struct blk_mq_tag_set *tag_set;
+ struct blk_mq_tag_set __tag_set;
+ unsigned int queue_depth;
+ atomic_long_t cur_bytes;
+ struct hrtimer bw_timer;
+ unsigned long cache_flush_pos;
+ spinlock_t lock;
+
+ struct nullb_queue *queues;
+ unsigned int nr_queues;
+ char disk_name[DISK_NAME_LEN];
+};
+
+#ifdef CONFIG_BLK_DEV_ZONED
+int null_zone_init(struct nullb_device *dev);
+void null_zone_exit(struct nullb_device *dev);
+blk_status_t null_zone_report(struct nullb *nullb,
+ struct nullb_cmd *cmd);
+void null_zone_write(struct nullb_cmd *cmd);
+void null_zone_reset(struct nullb_cmd *cmd);
+#else
+static inline int null_zone_init(struct nullb_device *dev)
+{
+ return -EINVAL;
+}
+static inline void null_zone_exit(struct nullb_device *dev) {}
+static inline blk_status_t null_zone_report(struct nullb *nullb,
+ struct nullb_cmd *cmd)
+{
+ return BLK_STS_NOTSUPP;
+}
+static inline void null_zone_write(struct nullb_cmd *cmd) {}
+static inline void null_zone_reset(struct nullb_cmd *cmd) {}
+#endif /* CONFIG_BLK_DEV_ZONED */
+#endif /* __NULL_BLK_H */
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk_main.c
index 042c778e5a4e..6127e3ff7b4b 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk_main.c
@@ -7,14 +7,8 @@
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/fs.h>
-#include <linux/blkdev.h>
#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/blk-mq.h>
-#include <linux/hrtimer.h>
-#include <linux/configfs.h>
-#include <linux/badblocks.h>
-#include <linux/fault-inject.h>
+#include "null_blk.h"
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
@@ -35,28 +29,6 @@ static inline u64 mb_per_tick(int mbps)
return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
}
-struct nullb_cmd {
- struct list_head list;
- struct llist_node ll_list;
- struct __call_single_data csd;
- struct request *rq;
- struct bio *bio;
- unsigned int tag;
- blk_status_t error;
- struct nullb_queue *nq;
- struct hrtimer timer;
-};
-
-struct nullb_queue {
- unsigned long *tag_map;
- wait_queue_head_t wait;
- unsigned int queue_depth;
- struct nullb_device *dev;
- unsigned int requeue_selection;
-
- struct nullb_cmd *cmds;
-};
-
/*
* Status flags for nullb_device.
*
@@ -92,52 +64,6 @@ struct nullb_page {
#define NULLB_PAGE_LOCK (MAP_SZ - 1)
#define NULLB_PAGE_FREE (MAP_SZ - 2)
-struct nullb_device {
- struct nullb *nullb;
- struct config_item item;
- struct radix_tree_root data; /* data stored in the disk */
- struct radix_tree_root cache; /* disk cache data */
- unsigned long flags; /* device flags */
- unsigned int curr_cache;
- struct badblocks badblocks;
-
- unsigned long size; /* device size in MB */
- unsigned long completion_nsec; /* time in ns to complete a request */
- unsigned long cache_size; /* disk cache size in MB */
- unsigned int submit_queues; /* number of submission queues */
- unsigned int home_node; /* home node for the device */
- unsigned int queue_mode; /* block interface */
- unsigned int blocksize; /* block size */
- unsigned int irqmode; /* IRQ completion handler */
- unsigned int hw_queue_depth; /* queue depth */
- unsigned int index; /* index of the disk, only valid with a disk */
- unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
- bool blocking; /* blocking blk-mq device */
- bool use_per_node_hctx; /* use per-node allocation for hardware context */
- bool power; /* power on/off the device */
- bool memory_backed; /* if data is stored in memory */
- bool discard; /* if support discard */
-};
-
-struct nullb {
- struct nullb_device *dev;
- struct list_head list;
- unsigned int index;
- struct request_queue *q;
- struct gendisk *disk;
- struct blk_mq_tag_set *tag_set;
- struct blk_mq_tag_set __tag_set;
- unsigned int queue_depth;
- atomic_long_t cur_bytes;
- struct hrtimer bw_timer;
- unsigned long cache_flush_pos;
- spinlock_t lock;
-
- struct nullb_queue *queues;
- unsigned int nr_queues;
- char disk_name[DISK_NAME_LEN];
-};
-
static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
@@ -254,6 +180,14 @@ static bool g_use_per_node_hctx;
module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
+static bool g_zoned;
+module_param_named(zoned, g_zoned, bool, S_IRUGO);
+MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
+
+static unsigned long g_zone_size = 256;
+module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
+MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -357,6 +291,8 @@ NULLB_DEVICE_ATTR(memory_backed, bool);
NULLB_DEVICE_ATTR(discard, bool);
NULLB_DEVICE_ATTR(mbps, uint);
NULLB_DEVICE_ATTR(cache_size, ulong);
+NULLB_DEVICE_ATTR(zoned, bool);
+NULLB_DEVICE_ATTR(zone_size, ulong);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -390,6 +326,7 @@ static ssize_t nullb_device_power_store(struct config_item *item,
null_del_dev(dev->nullb);
mutex_unlock(&lock);
clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+ clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
}
return count;
@@ -468,6 +405,8 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_mbps,
&nullb_device_attr_cache_size,
&nullb_device_attr_badblocks,
+ &nullb_device_attr_zoned,
+ &nullb_device_attr_zone_size,
NULL,
};
@@ -520,7 +459,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n");
+ return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -579,6 +518,8 @@ static struct nullb_device *null_alloc_dev(void)
dev->hw_queue_depth = g_hw_queue_depth;
dev->blocking = g_blocking;
dev->use_per_node_hctx = g_use_per_node_hctx;
+ dev->zoned = g_zoned;
+ dev->zone_size = g_zone_size;
return dev;
}
@@ -587,6 +528,7 @@ static void null_free_dev(struct nullb_device *dev)
if (!dev)
return;
+ null_zone_exit(dev);
badblocks_exit(&dev->badblocks);
kfree(dev);
}
@@ -862,7 +804,9 @@ static struct nullb_page *null_lookup_page(struct nullb *nullb,
}
static struct nullb_page *null_insert_page(struct nullb *nullb,
- sector_t sector, bool ignore_cache)
+ sector_t sector, bool ignore_cache)
+ __releases(&nullb->lock)
+ __acquires(&nullb->lock)
{
u64 idx;
struct nullb_page *t_page;
@@ -1219,6 +1163,11 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
struct nullb *nullb = dev->nullb;
int err = 0;
+ if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
+ cmd->error = null_zone_report(nullb, cmd);
+ goto out;
+ }
+
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
struct request *rq = cmd->rq;
@@ -1283,6 +1232,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
}
}
cmd->error = errno_to_blk_status(err);
+
+ if (!cmd->error && dev->zoned) {
+ if (req_op(cmd->rq) == REQ_OP_WRITE)
+ null_zone_write(cmd);
+ else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET)
+ null_zone_reset(cmd);
+ }
out:
/* Complete IO by inline, softirq or timer */
switch (dev->irqmode) {
@@ -1810,6 +1766,15 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_flush_queueable(nullb->q, true);
}
+ if (dev->zoned) {
+ rv = null_zone_init(dev);
+ if (rv)
+ goto out_cleanup_blk_queue;
+
+ blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
+ nullb->q->limits.zoned = BLK_ZONED_HM;
+ }
+
nullb->q->queuedata = nullb;
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
@@ -1828,13 +1793,16 @@ static int null_add_dev(struct nullb_device *dev)
rv = null_gendisk_register(nullb);
if (rv)
- goto out_cleanup_blk_queue;
+ goto out_cleanup_zone;
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
mutex_unlock(&lock);
return 0;
+out_cleanup_zone:
+ if (dev->zoned)
+ null_zone_exit(dev);
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
@@ -1861,6 +1829,11 @@ static int __init null_init(void)
g_bs = PAGE_SIZE;
}
+ if (!is_power_of_2(g_zone_size)) {
+ pr_err("null_blk: zone_size must be power-of-two\n");
+ return -EINVAL;
+ }
+
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.\n",
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
new file mode 100644
index 000000000000..a979ca00d7be
--- /dev/null
+++ b/drivers/block/null_blk_zoned.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/vmalloc.h>
+#include "null_blk.h"
+
+/* zone_size in MBs to sectors. */
+#define ZONE_SIZE_SHIFT 11
+
+static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
+{
+ return sect >> ilog2(dev->zone_size_sects);
+}
+
+int null_zone_init(struct nullb_device *dev)
+{
+ sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
+ sector_t sector = 0;
+ unsigned int i;
+
+ if (!is_power_of_2(dev->zone_size)) {
+ pr_err("null_blk: zone_size must be power-of-two\n");
+ return -EINVAL;
+ }
+
+ dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
+ dev->nr_zones = dev_size >>
+ (SECTOR_SHIFT + ilog2(dev->zone_size_sects));
+ dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!dev->zones)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->nr_zones; i++) {
+ struct blk_zone *zone = &dev->zones[i];
+
+ zone->start = zone->wp = sector;
+ zone->len = dev->zone_size_sects;
+ zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ zone->cond = BLK_ZONE_COND_EMPTY;
+
+ sector += dev->zone_size_sects;
+ }
+
+ return 0;
+}
+
+void null_zone_exit(struct nullb_device *dev)
+{
+ kvfree(dev->zones);
+}
+
+static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
+ unsigned int zno, unsigned int nr_zones)
+{
+ struct blk_zone_report_hdr *hdr = NULL;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ void *addr;
+ unsigned int zones_to_cpy;
+
+ bio_for_each_segment(bvec, rq->bio, iter) {
+ addr = kmap_atomic(bvec.bv_page);
+
+ zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
+
+ if (!hdr) {
+ hdr = (struct blk_zone_report_hdr *)addr;
+ hdr->nr_zones = nr_zones;
+ zones_to_cpy--;
+ addr += sizeof(struct blk_zone_report_hdr);
+ }
+
+ zones_to_cpy = min_t(unsigned int, zones_to_cpy, nr_zones);
+
+ memcpy(addr, &dev->zones[zno],
+ zones_to_cpy * sizeof(struct blk_zone));
+
+ kunmap_atomic(addr);
+
+ nr_zones -= zones_to_cpy;
+ zno += zones_to_cpy;
+
+ if (!nr_zones)
+ break;
+ }
+}
+
+blk_status_t null_zone_report(struct nullb *nullb,
+ struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = nullb->dev;
+ struct request *rq = cmd->rq;
+ unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+ unsigned int nr_zones = dev->nr_zones - zno;
+ unsigned int max_zones = (blk_rq_bytes(rq) /
+ sizeof(struct blk_zone)) - 1;
+
+ nr_zones = min_t(unsigned int, nr_zones, max_zones);
+
+ null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
+
+ return BLK_STS_OK;
+}
+
+void null_zone_write(struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ struct request *rq = cmd->rq;
+ sector_t sector = blk_rq_pos(rq);
+ unsigned int rq_sectors = blk_rq_sectors(rq);
+ unsigned int zno = null_zone_no(dev, sector);
+ struct blk_zone *zone = &dev->zones[zno];
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_FULL:
+ /* Cannot write to a full zone */
+ cmd->error = BLK_STS_IOERR;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_IMP_OPEN:
+ /* Writes must be at the write pointer position */
+ if (blk_rq_pos(rq) != zone->wp) {
+ cmd->error = BLK_STS_IOERR;
+ break;
+ }
+
+ if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ zone->wp += rq_sectors;
+ if (zone->wp == zone->start + zone->len)
+ zone->cond = BLK_ZONE_COND_FULL;
+ break;
+ default:
+ /* Invalid zone condition */
+ cmd->error = BLK_STS_IOERR;
+ break;
+ }
+}
+
+void null_zone_reset(struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ struct request *rq = cmd->rq;
+ unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+ struct blk_zone *zone = &dev->zones[zno];
+
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+}
diff --git a/drivers/block/paride/bpck.c b/drivers/block/paride/bpck.c
index 4f27e7392e38..f5f63ca2889d 100644
--- a/drivers/block/paride/bpck.c
+++ b/drivers/block/paride/bpck.c
@@ -347,7 +347,7 @@ static int bpck_test_proto( PIA *pi, char * scratch, int verbose )
static void bpck_read_eeprom ( PIA *pi, char * buf )
-{ int i,j,k,n,p,v,f, om, od;
+{ int i, j, k, p, v, f, om, od;
bpck_force_spp(pi);
@@ -356,7 +356,6 @@ static void bpck_read_eeprom ( PIA *pi, char * buf )
bpck_connect(pi);
- n = 0;
WR(4,0);
for (i=0;i<64;i++) {
WR(6,8);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 8961b190e256..7cf947586fe4 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -426,6 +426,7 @@ static void run_fsm(void)
pd_claimed = 1;
if (!pi_schedule_claimed(pi_current, run_fsm))
return;
+ /* fall through */
case 1:
pd_claimed = 2;
pi_current->proto->connect(pi_current);
@@ -445,6 +446,7 @@ static void run_fsm(void)
spin_unlock_irqrestore(&pd_lock, saved_flags);
if (stop)
return;
+ /* fall through */
case Hold:
schedule_fsm();
return;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b3f83cd96f33..6f1d25c1eb64 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -67,7 +67,7 @@
#include <scsi/scsi.h>
#include <linux/debugfs.h>
#include <linux/device.h>
-
+#include <linux/nospec.h>
#include <linux/uaccess.h>
#define DRIVER_NAME "pktcdvd"
@@ -748,13 +748,13 @@ static const char *sense_key_string(__u8 index)
static void pkt_dump_sense(struct pktcdvd_device *pd,
struct packet_command *cgc)
{
- struct request_sense *sense = cgc->sense;
+ struct scsi_sense_hdr *sshdr = cgc->sshdr;
- if (sense)
+ if (sshdr)
pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
CDROM_PACKET_SIZE, cgc->cmd,
- sense->sense_key, sense->asc, sense->ascq,
- sense_key_string(sense->sense_key));
+ sshdr->sense_key, sshdr->asc, sshdr->ascq,
+ sense_key_string(sshdr->sense_key));
else
pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
}
@@ -787,18 +787,19 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
unsigned write_speed, unsigned read_speed)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
int ret;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_SET_SPEED;
cgc.cmd[2] = (read_speed >> 8) & 0xff;
cgc.cmd[3] = read_speed & 0xff;
cgc.cmd[4] = (write_speed >> 8) & 0xff;
cgc.cmd[5] = write_speed & 0xff;
- if ((ret = pkt_generic_packet(pd, &cgc)))
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
pkt_dump_sense(pd, &cgc);
return ret;
@@ -1562,7 +1563,8 @@ static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
cgc.cmd[8] = cgc.buflen = 2;
cgc.quiet = 1;
- if ((ret = pkt_generic_packet(pd, &cgc)))
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
return ret;
/* not all drives have the same disc_info length, so requeue
@@ -1591,7 +1593,8 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type,
cgc.cmd[8] = 8;
cgc.quiet = 1;
- if ((ret = pkt_generic_packet(pd, &cgc)))
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
return ret;
cgc.buflen = be16_to_cpu(ti->track_information_length) +
@@ -1612,17 +1615,20 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
__u32 last_track;
int ret = -1;
- if ((ret = pkt_get_disc_info(pd, &di)))
+ ret = pkt_get_disc_info(pd, &di);
+ if (ret)
return ret;
last_track = (di.last_track_msb << 8) | di.last_track_lsb;
- if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
+ ret = pkt_get_track_info(pd, last_track, 1, &ti);
+ if (ret)
return ret;
/* if this track is blank, try the previous. */
if (ti.blank) {
last_track--;
- if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
+ ret = pkt_get_track_info(pd, last_track, 1, &ti);
+ if (ret)
return ret;
}
@@ -1645,7 +1651,7 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
write_param_page *wp;
char buffer[128];
int ret, size;
@@ -1656,8 +1662,9 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
memset(buffer, 0, sizeof(buffer));
init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
- cgc.sense = &sense;
- if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
+ cgc.sshdr = &sshdr;
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
+ if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1671,8 +1678,9 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
* now get it all
*/
init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
- cgc.sense = &sense;
- if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
+ cgc.sshdr = &sshdr;
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
+ if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1714,7 +1722,8 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
cgc.buflen = cgc.cmd[8] = size;
- if ((ret = pkt_mode_select(pd, &cgc))) {
+ ret = pkt_mode_select(pd, &cgc);
+ if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1819,7 +1828,8 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
memset(&di, 0, sizeof(disc_information));
memset(&ti, 0, sizeof(track_information));
- if ((ret = pkt_get_disc_info(pd, &di))) {
+ ret = pkt_get_disc_info(pd, &di);
+ if (ret) {
pkt_err(pd, "failed get_disc\n");
return ret;
}
@@ -1830,7 +1840,8 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
- if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
+ ret = pkt_get_track_info(pd, track, 1, &ti);
+ if (ret) {
pkt_err(pd, "failed get_track\n");
return ret;
}
@@ -1905,12 +1916,12 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
int set)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
unsigned char buf[64];
int ret;
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.buflen = pd->mode_offset + 12;
/*
@@ -1918,7 +1929,8 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
*/
cgc.quiet = 1;
- if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
+ if (ret)
return ret;
buf[pd->mode_offset + 10] |= (!!set << 2);
@@ -1950,14 +1962,14 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
unsigned *write_speed)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
unsigned char buf[256+18];
unsigned char *cap_buf;
int ret, offset;
cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
if (ret) {
@@ -2011,13 +2023,13 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
unsigned *speed)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
unsigned char buf[64];
unsigned int size, st, sp;
int ret;
init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
cgc.cmd[1] = 2;
cgc.cmd[2] = 4; /* READ ATIP */
@@ -2032,7 +2044,7 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
size = sizeof(buf);
init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
cgc.cmd[1] = 2;
cgc.cmd[2] = 4;
@@ -2083,17 +2095,18 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
int ret;
pkt_dbg(2, pd, "Performing OPC\n");
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.timeout = 60*HZ;
cgc.cmd[0] = GPCMD_SEND_OPC;
cgc.cmd[1] = 1;
- if ((ret = pkt_generic_packet(pd, &cgc)))
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -2103,19 +2116,22 @@ static int pkt_open_write(struct pktcdvd_device *pd)
int ret;
unsigned int write_speed, media_write_speed, read_speed;
- if ((ret = pkt_probe_settings(pd))) {
+ ret = pkt_probe_settings(pd);
+ if (ret) {
pkt_dbg(2, pd, "failed probe\n");
return ret;
}
- if ((ret = pkt_set_write_settings(pd))) {
+ ret = pkt_set_write_settings(pd);
+ if (ret) {
pkt_dbg(1, pd, "failed saving write settings\n");
return -EIO;
}
pkt_write_caching(pd, USE_WCACHING);
- if ((ret = pkt_get_max_speed(pd, &write_speed)))
+ ret = pkt_get_max_speed(pd, &write_speed);
+ if (ret)
write_speed = 16 * 177;
switch (pd->mmc3_profile) {
case 0x13: /* DVD-RW */
@@ -2124,7 +2140,8 @@ static int pkt_open_write(struct pktcdvd_device *pd)
pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
break;
default:
- if ((ret = pkt_media_speed(pd, &media_write_speed)))
+ ret = pkt_media_speed(pd, &media_write_speed);
+ if (ret)
media_write_speed = 16;
write_speed = min(write_speed, media_write_speed * 177);
pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
@@ -2132,14 +2149,16 @@ static int pkt_open_write(struct pktcdvd_device *pd)
}
read_speed = write_speed;
- if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
+ ret = pkt_set_speed(pd, write_speed, read_speed);
+ if (ret) {
pkt_dbg(1, pd, "couldn't set write speed\n");
return -EIO;
}
pd->write_speed = write_speed;
pd->read_speed = read_speed;
- if ((ret = pkt_perform_opc(pd))) {
+ ret = pkt_perform_opc(pd);
+ if (ret) {
pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
}
@@ -2161,10 +2180,12 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
* so bdget() can't fail.
*/
bdget(pd->bdev->bd_dev);
- if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
+ ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd);
+ if (ret)
goto out;
- if ((ret = pkt_get_last_written(pd, &lba))) {
+ ret = pkt_get_last_written(pd, &lba);
+ if (ret) {
pkt_err(pd, "pkt_get_last_written failed\n");
goto out_putdev;
}
@@ -2175,7 +2196,8 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
q = bdev_get_queue(pd->bdev);
if (write) {
- if ((ret = pkt_open_write(pd)))
+ ret = pkt_open_write(pd);
+ if (ret)
goto out_putdev;
/*
* Some CDRW drives can not handle writes larger than one packet,
@@ -2190,7 +2212,8 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
clear_bit(PACKET_WRITABLE, &pd->flags);
}
- if ((ret = pkt_set_segment_merging(pd, q)))
+ ret = pkt_set_segment_merging(pd, q);
+ if (ret)
goto out_putdev;
if (write) {
@@ -2231,6 +2254,8 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
{
if (dev_minor >= MAX_WRITERS)
return NULL;
+
+ dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
return pkt_devs[dev_minor];
}
@@ -2715,6 +2740,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->write_congestion_on = write_congestion_on;
pd->write_congestion_off = write_congestion_off;
+ ret = -ENOMEM;
disk = alloc_disk(1);
if (!disk)
goto out_mem;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index fa0729c1e776..7915f3b03736 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -61,7 +61,7 @@ static int atomic_inc_return_safe(atomic_t *v)
{
unsigned int counter;
- counter = (unsigned int)__atomic_add_unless(v, 1, 0);
+ counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
if (counter <= (unsigned int)INT_MAX)
return (int)counter;
@@ -181,6 +181,7 @@ struct rbd_image_header {
struct rbd_spec {
u64 pool_id;
const char *pool_name;
+ const char *pool_ns; /* NULL if default, never "" */
const char *image_id;
const char *image_name;
@@ -735,6 +736,7 @@ enum {
Opt_lock_timeout,
Opt_last_int,
/* int args above */
+ Opt_pool_ns,
Opt_last_string,
/* string args above */
Opt_read_only,
@@ -749,6 +751,7 @@ static match_table_t rbd_opts_tokens = {
{Opt_queue_depth, "queue_depth=%d"},
{Opt_lock_timeout, "lock_timeout=%d"},
/* int args above */
+ {Opt_pool_ns, "_pool_ns=%s"},
/* string args above */
{Opt_read_only, "read_only"},
{Opt_read_only, "ro"}, /* Alternate spelling */
@@ -776,9 +779,14 @@ struct rbd_options {
#define RBD_EXCLUSIVE_DEFAULT false
#define RBD_TRIM_DEFAULT true
+struct parse_rbd_opts_ctx {
+ struct rbd_spec *spec;
+ struct rbd_options *opts;
+};
+
static int parse_rbd_opts_token(char *c, void *private)
{
- struct rbd_options *rbd_opts = private;
+ struct parse_rbd_opts_ctx *pctx = private;
substring_t argstr[MAX_OPT_ARGS];
int token, intval, ret;
@@ -786,7 +794,7 @@ static int parse_rbd_opts_token(char *c, void *private)
if (token < Opt_last_int) {
ret = match_int(&argstr[0], &intval);
if (ret < 0) {
- pr_err("bad mount option arg (not int) at '%s'\n", c);
+ pr_err("bad option arg (not int) at '%s'\n", c);
return ret;
}
dout("got int token %d val %d\n", token, intval);
@@ -802,7 +810,7 @@ static int parse_rbd_opts_token(char *c, void *private)
pr_err("queue_depth out of range\n");
return -EINVAL;
}
- rbd_opts->queue_depth = intval;
+ pctx->opts->queue_depth = intval;
break;
case Opt_lock_timeout:
/* 0 is "wait forever" (i.e. infinite timeout) */
@@ -810,22 +818,28 @@ static int parse_rbd_opts_token(char *c, void *private)
pr_err("lock_timeout out of range\n");
return -EINVAL;
}
- rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000);
+ pctx->opts->lock_timeout = msecs_to_jiffies(intval * 1000);
+ break;
+ case Opt_pool_ns:
+ kfree(pctx->spec->pool_ns);
+ pctx->spec->pool_ns = match_strdup(argstr);
+ if (!pctx->spec->pool_ns)
+ return -ENOMEM;
break;
case Opt_read_only:
- rbd_opts->read_only = true;
+ pctx->opts->read_only = true;
break;
case Opt_read_write:
- rbd_opts->read_only = false;
+ pctx->opts->read_only = false;
break;
case Opt_lock_on_read:
- rbd_opts->lock_on_read = true;
+ pctx->opts->lock_on_read = true;
break;
case Opt_exclusive:
- rbd_opts->exclusive = true;
+ pctx->opts->exclusive = true;
break;
case Opt_notrim:
- rbd_opts->trim = false;
+ pctx->opts->trim = false;
break;
default:
/* libceph prints "bad option" msg */
@@ -1452,7 +1466,7 @@ static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
struct ceph_osd_request *osd_req = obj_request->osd_req;
osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
- ktime_get_real_ts(&osd_req->r_mtime);
+ ktime_get_real_ts64(&osd_req->r_mtime);
osd_req->r_data_offset = obj_request->ex.oe_off;
}
@@ -1475,7 +1489,13 @@ rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
req->r_callback = rbd_osd_req_callback;
req->r_priv = obj_req;
+ /*
+ * Data objects may be stored in a separate pool, but always in
+ * the same namespace in that pool as the header in its pool.
+ */
+ ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
req->r_base_oloc.pool = rbd_dev->layout.pool_id;
+
if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
rbd_dev->header.object_prefix, obj_req->ex.oe_objno))
goto err_req;
@@ -4119,6 +4139,14 @@ static ssize_t rbd_pool_id_show(struct device *dev,
(unsigned long long) rbd_dev->spec->pool_id);
}
+static ssize_t rbd_pool_ns_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+
+ return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
+}
+
static ssize_t rbd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -4217,6 +4245,7 @@ static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
+static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
@@ -4235,6 +4264,7 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_config_info.attr,
&dev_attr_pool.attr,
&dev_attr_pool_id.attr,
+ &dev_attr_pool_ns.attr,
&dev_attr_name.attr,
&dev_attr_image_id.attr,
&dev_attr_current_snap.attr,
@@ -4295,6 +4325,7 @@ static void rbd_spec_free(struct kref *kref)
struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
kfree(spec->pool_name);
+ kfree(spec->pool_ns);
kfree(spec->image_id);
kfree(spec->image_name);
kfree(spec->snap_name);
@@ -4353,6 +4384,12 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
rbd_dev->header.data_pool_id = CEPH_NOPOOL;
ceph_oid_init(&rbd_dev->header_oid);
rbd_dev->header_oloc.pool = spec->pool_id;
+ if (spec->pool_ns) {
+ WARN_ON(!*spec->pool_ns);
+ rbd_dev->header_oloc.pool_ns =
+ ceph_find_or_create_string(spec->pool_ns,
+ strlen(spec->pool_ns));
+ }
mutex_init(&rbd_dev->watch_mutex);
rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
@@ -4633,6 +4670,17 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
parent_spec->pool_id = pool_id;
parent_spec->image_id = image_id;
parent_spec->snap_id = snap_id;
+
+ /* TODO: support cloning across namespaces */
+ if (rbd_dev->spec->pool_ns) {
+ parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns,
+ GFP_KERNEL);
+ if (!parent_spec->pool_ns) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ }
+
rbd_dev->parent_spec = parent_spec;
parent_spec = NULL; /* rbd_dev now owns this */
} else {
@@ -5146,8 +5194,7 @@ static int rbd_add_parse_args(const char *buf,
const char *mon_addrs;
char *snap_name;
size_t mon_addrs_size;
- struct rbd_spec *spec = NULL;
- struct rbd_options *rbd_opts = NULL;
+ struct parse_rbd_opts_ctx pctx = { 0 };
struct ceph_options *copts;
int ret;
@@ -5171,22 +5218,22 @@ static int rbd_add_parse_args(const char *buf,
goto out_err;
}
- spec = rbd_spec_alloc();
- if (!spec)
+ pctx.spec = rbd_spec_alloc();
+ if (!pctx.spec)
goto out_mem;
- spec->pool_name = dup_token(&buf, NULL);
- if (!spec->pool_name)
+ pctx.spec->pool_name = dup_token(&buf, NULL);
+ if (!pctx.spec->pool_name)
goto out_mem;
- if (!*spec->pool_name) {
+ if (!*pctx.spec->pool_name) {
rbd_warn(NULL, "no pool name provided");
goto out_err;
}
- spec->image_name = dup_token(&buf, NULL);
- if (!spec->image_name)
+ pctx.spec->image_name = dup_token(&buf, NULL);
+ if (!pctx.spec->image_name)
goto out_mem;
- if (!*spec->image_name) {
+ if (!*pctx.spec->image_name) {
rbd_warn(NULL, "no image name provided");
goto out_err;
}
@@ -5207,24 +5254,24 @@ static int rbd_add_parse_args(const char *buf,
if (!snap_name)
goto out_mem;
*(snap_name + len) = '\0';
- spec->snap_name = snap_name;
+ pctx.spec->snap_name = snap_name;
/* Initialize all rbd options to the defaults */
- rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
- if (!rbd_opts)
+ pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
+ if (!pctx.opts)
goto out_mem;
- rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
- rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
- rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
- rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
- rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
- rbd_opts->trim = RBD_TRIM_DEFAULT;
+ pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
+ pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
+ pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
+ pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
+ pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
+ pctx.opts->trim = RBD_TRIM_DEFAULT;
copts = ceph_parse_options(options, mon_addrs,
- mon_addrs + mon_addrs_size - 1,
- parse_rbd_opts_token, rbd_opts);
+ mon_addrs + mon_addrs_size - 1,
+ parse_rbd_opts_token, &pctx);
if (IS_ERR(copts)) {
ret = PTR_ERR(copts);
goto out_err;
@@ -5232,15 +5279,15 @@ static int rbd_add_parse_args(const char *buf,
kfree(options);
*ceph_opts = copts;
- *opts = rbd_opts;
- *rbd_spec = spec;
+ *opts = pctx.opts;
+ *rbd_spec = pctx.spec;
return 0;
out_mem:
ret = -ENOMEM;
out_err:
- kfree(rbd_opts);
- rbd_spec_put(spec);
+ kfree(pctx.opts);
+ rbd_spec_put(pctx.spec);
kfree(options);
return ret;
@@ -5586,8 +5633,10 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
ret = rbd_register_watch(rbd_dev);
if (ret) {
if (ret == -ENOENT)
- pr_info("image %s/%s does not exist\n",
+ pr_info("image %s/%s%s%s does not exist\n",
rbd_dev->spec->pool_name,
+ rbd_dev->spec->pool_ns ?: "",
+ rbd_dev->spec->pool_ns ? "/" : "",
rbd_dev->spec->image_name);
goto err_out_format;
}
@@ -5609,8 +5658,10 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
ret = rbd_spec_fill_names(rbd_dev);
if (ret) {
if (ret == -ENOENT)
- pr_info("snap %s/%s@%s does not exist\n",
+ pr_info("snap %s/%s%s%s@%s does not exist\n",
rbd_dev->spec->pool_name,
+ rbd_dev->spec->pool_ns ?: "",
+ rbd_dev->spec->pool_ns ? "/" : "",
rbd_dev->spec->image_name,
rbd_dev->spec->snap_name);
goto err_out_probe;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index dddb3f2490b6..1a92f9e65937 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -112,7 +112,7 @@ static const struct block_device_operations rsxx_fops = {
static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
{
- generic_start_io_acct(card->queue, bio_data_dir(bio), bio_sectors(bio),
+ generic_start_io_acct(card->queue, bio_op(bio), bio_sectors(bio),
&card->gendisk->part0);
}
@@ -120,8 +120,8 @@ static void disk_stats_complete(struct rsxx_cardinfo *card,
struct bio *bio,
unsigned long start_time)
{
- generic_end_io_acct(card->queue, bio_data_dir(bio),
- &card->gendisk->part0, start_time);
+ generic_end_io_acct(card->queue, bio_op(bio),
+ &card->gendisk->part0, start_time);
}
static void bio_dma_done_cb(struct rsxx_cardinfo *card,
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index bc7aea6d7b7c..87b9e7fbf062 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -657,8 +657,8 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
if (unlikely(skdev->dbg_level > 1)) {
dev_dbg(&skdev->pdev->dev,
- "skreq=%x sksg_list=%p sksg_dma=%llx\n",
- skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
+ "skreq=%x sksg_list=%p sksg_dma=%pad\n",
+ skreq->id, skreq->sksg_list, &skreq->sksg_dma_address);
for (i = 0; i < n_sg; i++) {
struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
@@ -1190,8 +1190,8 @@ static void skd_send_fitmsg(struct skd_device *skdev,
{
u64 qcmd;
- dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
- skmsg->mb_dma_address, skd_in_flight(skdev));
+ dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n",
+ &skmsg->mb_dma_address, skd_in_flight(skdev));
dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
qcmd = skmsg->mb_dma_address;
@@ -1250,9 +1250,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
}
dev_dbg(&skdev->pdev->dev,
- "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
+ "skspcl=%p id=%04x sksg_list=%p sksg_dma=%pad\n",
skspcl, skspcl->req.id, skspcl->req.sksg_list,
- skspcl->req.sksg_dma_address);
+ &skspcl->req.sksg_dma_address);
for (i = 0; i < skspcl->req.n_sg; i++) {
struct fit_sg_descriptor *sgd =
&skspcl->req.sksg_list[i];
@@ -2685,8 +2685,8 @@ static int skd_cons_skmsg(struct skd_device *skdev)
WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
(FIT_QCMD_ALIGN - 1),
- "not aligned: msg_buf %p mb_dma_address %#llx\n",
- skmsg->msg_buf, skmsg->mb_dma_address);
+ "not aligned: msg_buf %p mb_dma_address %pad\n",
+ skmsg->msg_buf, &skmsg->mb_dma_address);
memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b5cedccb5d7d..8986adab9bf5 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -251,14 +251,9 @@ static DEFINE_SPINLOCK(minor_lock);
#define GRANTS_PER_INDIRECT_FRAME \
(XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
-#define PSEGS_PER_INDIRECT_FRAME \
- (GRANTS_INDIRECT_FRAME / GRANTS_PSEGS)
-
#define INDIRECT_GREFS(_grants) \
DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
-#define GREFS(_psegs) ((_psegs) * GRANTS_PER_PSEG)
-
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
static void blkfront_gather_backend_features(struct blkfront_info *info);
static int negotiate_mq(struct blkfront_info *info);
@@ -1441,7 +1436,7 @@ static bool blkif_completion(unsigned long *id,
/* Wait the second response if not yet here. */
if (s2->status == REQ_WAITING)
- return 0;
+ return false;
bret->status = blkif_get_final_status(s->status,
s2->status);
@@ -1542,7 +1537,7 @@ static bool blkif_completion(unsigned long *id,
}
}
- return 1;
+ return true;
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7436b2d27fa3..a1d6b5597c17 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -298,7 +298,8 @@ static void reset_bdev(struct zram *zram)
zram->backing_dev = NULL;
zram->old_block_size = 0;
zram->bdev = NULL;
-
+ zram->disk->queue->backing_dev_info->capabilities |=
+ BDI_CAP_SYNCHRONOUS_IO;
kvfree(zram->bitmap);
zram->bitmap = NULL;
}
@@ -336,6 +337,7 @@ static ssize_t backing_dev_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
char *file_name;
+ size_t sz;
struct file *backing_dev = NULL;
struct inode *inode;
struct address_space *mapping;
@@ -356,7 +358,11 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- strlcpy(file_name, buf, len);
+ strlcpy(file_name, buf, PATH_MAX);
+ /* ignore trailing newline */
+ sz = strlen(file_name);
+ if (sz > 0 && file_name[sz - 1] == '\n')
+ file_name[sz - 1] = 0x00;
backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
if (IS_ERR(backing_dev)) {
@@ -400,6 +406,18 @@ static ssize_t backing_dev_store(struct device *dev,
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
+ /*
+ * With writeback feature, zram does asynchronous IO so it's no longer
+ * synchronous device so let's remove synchronous io flag. Othewise,
+ * upper layer(e.g., swap) could wait IO completion rather than
+ * (submit and return), which will cause system sluggish.
+ * Furthermore, when the IO function returns(e.g., swap_readpage),
+ * upper layer expects IO was done so it could deallocate the page
+ * freely but in fact, IO is going on so finally could cause
+ * use-after-free when the IO is really done.
+ */
+ zram->disk->queue->backing_dev_info->capabilities &=
+ ~BDI_CAP_SYNCHRONOUS_IO;
up_write(&zram->init_lock);
pr_info("setup backing device %s\n", file_name);
@@ -1274,17 +1292,16 @@ static void zram_bio_discard(struct zram *zram, u32 index,
* Returns 1 if IO request was successfully submitted.
*/
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, bool is_write, struct bio *bio)
+ int offset, unsigned int op, struct bio *bio)
{
unsigned long start_time = jiffies;
- int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
struct request_queue *q = zram->disk->queue;
int ret;
- generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT,
+ generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0);
- if (!is_write) {
+ if (!op_is_write(op)) {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset, bio);
flush_dcache_page(bvec->bv_page);
@@ -1293,14 +1310,14 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset, bio);
}
- generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time);
+ generic_end_io_acct(q, op, &zram->disk->part0, start_time);
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
if (unlikely(ret < 0)) {
- if (!is_write)
+ if (!op_is_write(op))
atomic64_inc(&zram->stats.failed_reads);
else
atomic64_inc(&zram->stats.failed_writes);
@@ -1338,7 +1355,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
unwritten);
if (zram_bvec_rw(zram, &bv, index, offset,
- op_is_write(bio_op(bio)), bio) < 0)
+ bio_op(bio), bio) < 0)
goto out;
bv.bv_offset += bv.bv_len;
@@ -1390,7 +1407,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, bool is_write)
+ struct page *page, unsigned int op)
{
int offset, ret;
u32 index;
@@ -1414,7 +1431,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL);
+ ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
out:
/*
* If I/O fails, just return error(ie, non-zero) without
@@ -1429,7 +1446,7 @@ out:
switch (ret) {
case 0:
- page_endio(page, is_write, 0);
+ page_endio(page, op_is_write(op), 0);
break;
case 1:
ret = 0;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index f3c643a0473c..2df11cc08a46 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -159,6 +159,7 @@ config BT_HCIUART_LL
config BT_HCIUART_3WIRE
bool "Three-wire UART (H5) protocol support"
depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
help
The HCI Three-wire UART Transport Layer makes it possible to
user the Bluetooth HCI over a serial port interface. The HCI
@@ -194,6 +195,19 @@ config BT_HCIUART_BCM
Say Y here to compile support for Broadcom protocol.
+config BT_HCIUART_RTL
+ bool "Realtek protocol support"
+ depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
+ depends on GPIOLIB
+ select BT_HCIUART_3WIRE
+ select BT_RTL
+ help
+ The Realtek protocol support enables Bluetooth HCI over 3-Wire
+ serial port internface for Realtek Bluetooth controllers.
+
+ Say Y here to compile support for Realtek protocol.
+
config BT_HCIUART_QCA
bool "Qualcomm Atheros protocol support"
depends on BT_HCIUART
@@ -364,6 +378,17 @@ config BT_WILINK
Say Y here to compile support for Texas Instrument's WiLink7 driver
into the kernel or say M to compile it as module (btwilink).
+config BT_MTKUART
+ tristate "MediaTek HCI UART driver"
+ depends on SERIAL_DEV_BUS
+ help
+ MediaTek Bluetooth HCI UART driver.
+ This driver is required if you want to use MediaTek Bluetooth
+ with serial interface.
+
+ Say Y here to compile support for MediaTek Bluetooth UART devices
+ into the kernel or say M to compile it as module (btmtkuart).
+
config BT_QCOMSMD
tristate "Qualcomm SMD based HCI support"
depends on RPMSG || (COMPILE_TEST && RPMSG=n)
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index ec16c55eb6e9..b7e393cfc1e3 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
+obj-$(CONFIG_BT_MTKUART) += btmtkuart.o
obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index ab090a313a5f..0588639b899a 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -490,7 +490,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
count = skb->len;
/* Max HCI frame size seems to be 1511 + 1 */
- nskb = bt_skb_alloc(count + 32, GFP_ATOMIC);
+ nskb = bt_skb_alloc(count + 32, GFP_KERNEL);
if (!nskb) {
BT_ERR("Can't allocate memory for new packet");
return -ENOMEM;
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 82437a69f99c..cc6e56223656 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -565,7 +565,7 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
/* Ericsson baud rate command */
unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 };
- skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_KERNEL);
if (!skb) {
BT_ERR("Can't allocate mem for new packet");
return -1;
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index c6f7cc57db14..d1c2adf08576 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -289,7 +289,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb->dev = (void *) hdev;
- urb = usb_alloc_urb(0, GFP_ATOMIC);
+ urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
@@ -298,7 +298,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
switch (hci_skb_pkt_type(skb)) {
case HCI_COMMAND_PKT:
- dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
+ dr = kmalloc(sizeof(*dr), GFP_KERNEL);
if (!dr) {
usb_free_urb(urb);
return -ENOMEM;
@@ -343,7 +343,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
usb_anchor_urb(urb, &data->tx_anchor);
- err = usb_submit_urb(urb, GFP_ATOMIC);
+ err = usb_submit_urb(urb, GFP_KERNEL);
if (err < 0) {
bt_dev_err(hdev, "urb %p submission failed", urb);
kfree(urb->setup_packet);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 888bac49a87b..fb3d03928460 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -718,7 +718,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
}
/* Allocate buffer */
- skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
+ skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_KERNEL);
if (!skb) {
BT_ERR("No free skb");
ret = -ENOMEM;
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
new file mode 100644
index 000000000000..ed2a5c7cb77f
--- /dev/null
+++ b/drivers/bluetooth/btmtkuart.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 MediaTek Inc.
+
+/*
+ * Bluetooth support for MediaTek serial devices
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/serdev.h>
+#include <linux/skbuff.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "h4_recv.h"
+
+#define VERSION "0.1"
+
+#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
+
+#define MTK_STP_TLR_SIZE 2
+
+#define BTMTKUART_TX_STATE_ACTIVE 1
+#define BTMTKUART_TX_STATE_WAKEUP 2
+#define BTMTKUART_TX_WAIT_VND_EVT 3
+
+enum {
+ MTK_WMT_PATCH_DWNLD = 0x1,
+ MTK_WMT_FUNC_CTRL = 0x6,
+ MTK_WMT_RST = 0x7
+};
+
+struct mtk_stp_hdr {
+ u8 prefix;
+ __be16 dlen;
+ u8 cs;
+} __packed;
+
+struct mtk_wmt_hdr {
+ u8 dir;
+ u8 op;
+ __le16 dlen;
+ u8 flag;
+} __packed;
+
+struct mtk_hci_wmt_cmd {
+ struct mtk_wmt_hdr hdr;
+ u8 data[256];
+} __packed;
+
+struct btmtkuart_dev {
+ struct hci_dev *hdev;
+ struct serdev_device *serdev;
+ struct clk *clk;
+
+ struct work_struct tx_work;
+ unsigned long tx_state;
+ struct sk_buff_head txq;
+
+ struct sk_buff *rx_skb;
+
+ u8 stp_pad[6];
+ u8 stp_cursor;
+ u16 stp_dlen;
+};
+
+static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen,
+ const void *param)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct mtk_hci_wmt_cmd wc;
+ struct mtk_wmt_hdr *hdr;
+ u32 hlen;
+ int err;
+
+ hlen = sizeof(*hdr) + plen;
+ if (hlen > 255)
+ return -EINVAL;
+
+ hdr = (struct mtk_wmt_hdr *)&wc;
+ hdr->dir = 1;
+ hdr->op = op;
+ hdr->dlen = cpu_to_le16(plen + 1);
+ hdr->flag = flag;
+ memcpy(wc.data, param, plen);
+
+ set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
+
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+ if (err < 0) {
+ clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
+ return err;
+ }
+
+ /* The vendor specific WMT commands are all answered by a vendor
+ * specific event and will not have the Command Status or Command
+ * Complete as with usual HCI command flow control.
+ *
+ * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
+ * state to be cleared. The driver speicfic event receive routine
+ * will clear that state and with that indicate completion of the
+ * WMT command.
+ */
+ err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
+ TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
+ if (err == -EINTR) {
+ bt_dev_err(hdev, "Execution of wmt command interrupted");
+ return err;
+ }
+
+ if (err) {
+ bt_dev_err(hdev, "Execution of wmt command timed out");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mtk_setup_fw(struct hci_dev *hdev)
+{
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ size_t fw_size;
+ int err, dlen;
+ u8 flag;
+
+ err = request_firmware(&fw, FIRMWARE_MT7622, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+ /* The size of patch header is 30 bytes, should be skip */
+ if (fw_size < 30)
+ return -EINVAL;
+
+ fw_size -= 30;
+ fw_ptr += 30;
+ flag = 1;
+
+ while (fw_size > 0) {
+ dlen = min_t(int, 250, fw_size);
+
+ /* Tell device the position in sequence */
+ if (fw_size - dlen <= 0)
+ flag = 3;
+ else if (fw_size < fw->size - 30)
+ flag = 2;
+
+ err = mtk_hci_wmt_sync(hdev, MTK_WMT_PATCH_DWNLD, flag, dlen,
+ fw_ptr);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ break;
+ }
+
+ fw_size -= dlen;
+ fw_ptr += dlen;
+ }
+
+ release_firmware(fw);
+
+ return err;
+}
+
+static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct hci_event_hdr *hdr = (void *)skb->data;
+ int err;
+
+ /* Fix up the vendor event id with 0xff for vendor specific instead
+ * of 0xe4 so that event send via monitoring socket can be parsed
+ * properly.
+ */
+ if (hdr->evt == 0xe4)
+ hdr->evt = HCI_EV_VENDOR;
+
+ err = hci_recv_frame(hdev, skb);
+
+ if (hdr->evt == HCI_EV_VENDOR) {
+ if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
+ &bdev->tx_state)) {
+ /* Barrier to sync with other CPUs */
+ smp_mb__after_atomic();
+ wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
+ }
+ }
+
+ return err;
+}
+
+static const struct h4_recv_pkt mtk_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = btmtkuart_recv_event },
+};
+
+static void btmtkuart_tx_work(struct work_struct *work)
+{
+ struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
+ tx_work);
+ struct serdev_device *serdev = bdev->serdev;
+ struct hci_dev *hdev = bdev->hdev;
+
+ while (1) {
+ clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
+
+ while (1) {
+ struct sk_buff *skb = skb_dequeue(&bdev->txq);
+ int len;
+
+ if (!skb)
+ break;
+
+ len = serdev_device_write_buf(serdev, skb->data,
+ skb->len);
+ hdev->stat.byte_tx += len;
+
+ skb_pull(skb, len);
+ if (skb->len > 0) {
+ skb_queue_head(&bdev->txq, skb);
+ break;
+ }
+
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+ }
+
+ kfree_skb(skb);
+ }
+
+ if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
+ break;
+ }
+
+ clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
+}
+
+static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
+{
+ if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
+ set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
+
+ schedule_work(&bdev->tx_work);
+}
+
+static const unsigned char *
+mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
+ int *sz_h4)
+{
+ struct mtk_stp_hdr *shdr;
+
+ /* The cursor is reset when all the data of STP is consumed out */
+ if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
+ bdev->stp_cursor = 0;
+
+ /* Filling pad until all STP info is obtained */
+ while (bdev->stp_cursor < 6 && count > 0) {
+ bdev->stp_pad[bdev->stp_cursor] = *data;
+ bdev->stp_cursor++;
+ data++;
+ count--;
+ }
+
+ /* Retrieve STP info and have a sanity check */
+ if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
+ shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
+ bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
+
+ /* Resync STP when unexpected data is being read */
+ if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
+ bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
+ shdr->prefix, bdev->stp_dlen);
+ bdev->stp_cursor = 2;
+ bdev->stp_dlen = 0;
+ }
+ }
+
+ /* Directly quit when there's no data found for H4 can process */
+ if (count <= 0)
+ return NULL;
+
+ /* Tranlate to how much the size of data H4 can handle so far */
+ *sz_h4 = min_t(int, count, bdev->stp_dlen);
+
+ /* Update the remaining size of STP packet */
+ bdev->stp_dlen -= *sz_h4;
+
+ /* Data points to STP payload which can be handled by H4 */
+ return data;
+}
+
+static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ const unsigned char *p_left = data, *p_h4;
+ int sz_left = count, sz_h4, adv;
+ int err;
+
+ while (sz_left > 0) {
+ /* The serial data received from MT7622 BT controller is
+ * at all time padded around with the STP header and tailer.
+ *
+ * A full STP packet is looking like
+ * -----------------------------------
+ * | STP header | H:4 | STP tailer |
+ * -----------------------------------
+ * but it doesn't guarantee to contain a full H:4 packet which
+ * means that it's possible for multiple STP packets forms a
+ * full H:4 packet that means extra STP header + length doesn't
+ * indicate a full H:4 frame, things can fragment. Whose length
+ * recorded in STP header just shows up the most length the
+ * H:4 engine can handle currently.
+ */
+
+ p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
+ if (!p_h4)
+ break;
+
+ adv = p_h4 - p_left;
+ sz_left -= adv;
+ p_left += adv;
+
+ bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
+ sz_h4, mtk_recv_pkts,
+ ARRAY_SIZE(mtk_recv_pkts));
+ if (IS_ERR(bdev->rx_skb)) {
+ err = PTR_ERR(bdev->rx_skb);
+ bt_dev_err(bdev->hdev,
+ "Frame reassembly failed (%d)", err);
+ bdev->rx_skb = NULL;
+ return err;
+ }
+
+ sz_left -= sz_h4;
+ p_left += sz_h4;
+ }
+
+ return 0;
+}
+
+static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
+ size_t count)
+{
+ struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+ int err;
+
+ err = btmtkuart_recv(bdev->hdev, data, count);
+ if (err < 0)
+ return err;
+
+ bdev->hdev->stat.byte_rx += count;
+
+ return count;
+}
+
+static void btmtkuart_write_wakeup(struct serdev_device *serdev)
+{
+ struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+
+ btmtkuart_tx_wakeup(bdev);
+}
+
+static const struct serdev_device_ops btmtkuart_client_ops = {
+ .receive_buf = btmtkuart_receive_buf,
+ .write_wakeup = btmtkuart_write_wakeup,
+};
+
+static int btmtkuart_open(struct hci_dev *hdev)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct device *dev;
+ int err;
+
+ err = serdev_device_open(bdev->serdev);
+ if (err) {
+ bt_dev_err(hdev, "Unable to open UART device %s",
+ dev_name(&bdev->serdev->dev));
+ goto err_open;
+ }
+
+ bdev->stp_cursor = 2;
+ bdev->stp_dlen = 0;
+
+ dev = &bdev->serdev->dev;
+
+ /* Enable the power domain and clock the device requires */
+ pm_runtime_enable(dev);
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(dev);
+ goto err_disable_rpm;
+ }
+
+ err = clk_prepare_enable(bdev->clk);
+ if (err < 0)
+ goto err_put_rpm;
+
+ return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(dev);
+err_disable_rpm:
+ pm_runtime_disable(dev);
+err_open:
+ return err;
+}
+
+static int btmtkuart_close(struct hci_dev *hdev)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct device *dev = &bdev->serdev->dev;
+
+ /* Shutdown the clock and power domain the device requires */
+ clk_disable_unprepare(bdev->clk);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ serdev_device_close(bdev->serdev);
+
+ return 0;
+}
+
+static int btmtkuart_flush(struct hci_dev *hdev)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+
+ /* Flush any pending characters */
+ serdev_device_write_flush(bdev->serdev);
+ skb_queue_purge(&bdev->txq);
+
+ cancel_work_sync(&bdev->tx_work);
+
+ kfree_skb(bdev->rx_skb);
+ bdev->rx_skb = NULL;
+
+ bdev->stp_cursor = 2;
+ bdev->stp_dlen = 0;
+
+ return 0;
+}
+
+static int btmtkuart_setup(struct hci_dev *hdev)
+{
+ u8 param = 0x1;
+ int err = 0;
+
+ /* Setup a firmware which the device definitely requires */
+ err = mtk_setup_fw(hdev);
+ if (err < 0)
+ return err;
+
+ /* Activate function the firmware providing to */
+ err = mtk_hci_wmt_sync(hdev, MTK_WMT_RST, 0x4, 0, 0);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
+ return err;
+ }
+
+ /* Enable Bluetooth protocol */
+ err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param),
+ &param);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int btmtkuart_shutdown(struct hci_dev *hdev)
+{
+ u8 param = 0x0;
+ int err;
+
+ /* Disable the device */
+ err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param),
+ &param);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct mtk_stp_hdr *shdr;
+ int err, dlen, type = 0;
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+
+ /* Make sure that there is enough rooms for STP header and trailer */
+ if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
+ (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
+ err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
+ GFP_ATOMIC);
+ if (err < 0)
+ return err;
+ }
+
+ /* Add the STP header */
+ dlen = skb->len;
+ shdr = skb_push(skb, sizeof(*shdr));
+ shdr->prefix = 0x80;
+ shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12));
+ shdr->cs = 0; /* MT7622 doesn't care about checksum value */
+
+ /* Add the STP trailer */
+ skb_put_zero(skb, MTK_STP_TLR_SIZE);
+
+ skb_queue_tail(&bdev->txq, skb);
+
+ btmtkuart_tx_wakeup(bdev);
+ return 0;
+}
+
+static int btmtkuart_probe(struct serdev_device *serdev)
+{
+ struct btmtkuart_dev *bdev;
+ struct hci_dev *hdev;
+
+ bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ bdev->clk = devm_clk_get(&serdev->dev, "ref");
+ if (IS_ERR(bdev->clk))
+ return PTR_ERR(bdev->clk);
+
+ bdev->serdev = serdev;
+ serdev_device_set_drvdata(serdev, bdev);
+
+ serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
+
+ INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
+ skb_queue_head_init(&bdev->txq);
+
+ /* Initialize and register HCI device */
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ dev_err(&serdev->dev, "Can't allocate HCI device\n");
+ return -ENOMEM;
+ }
+
+ bdev->hdev = hdev;
+
+ hdev->bus = HCI_UART;
+ hci_set_drvdata(hdev, bdev);
+
+ hdev->open = btmtkuart_open;
+ hdev->close = btmtkuart_close;
+ hdev->flush = btmtkuart_flush;
+ hdev->setup = btmtkuart_setup;
+ hdev->shutdown = btmtkuart_shutdown;
+ hdev->send = btmtkuart_send_frame;
+ SET_HCIDEV_DEV(hdev, &serdev->dev);
+
+ hdev->manufacturer = 70;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+
+ if (hci_register_dev(hdev) < 0) {
+ dev_err(&serdev->dev, "Can't register HCI device\n");
+ hci_free_dev(hdev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void btmtkuart_remove(struct serdev_device *serdev)
+{
+ struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+ struct hci_dev *hdev = bdev->hdev;
+
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mtk_of_match_table[] = {
+ { .compatible = "mediatek,mt7622-bluetooth"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, mtk_of_match_table);
+#endif
+
+static struct serdev_device_driver btmtkuart_driver = {
+ .probe = btmtkuart_probe,
+ .remove = btmtkuart_remove,
+ .driver = {
+ .name = "btmtkuart",
+ .of_match_table = of_match_ptr(mtk_of_match_table),
+ },
+};
+
+module_serdev_device_driver(btmtkuart_driver);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE_MT7622);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 8219816c54a0..ec9e03a6b778 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -27,7 +27,7 @@
#define VERSION "0.1"
-static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
+int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
@@ -35,36 +35,35 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
char cmd;
int err = 0;
- BT_DBG("%s: ROME Patch Version Request", hdev->name);
+ bt_dev_dbg(hdev, "QCA Version Request");
cmd = EDL_PATCH_VER_REQ_CMD;
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
- &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ &cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name,
- err);
+ bt_dev_err(hdev, "Reading QCA version information failed (%d)",
+ err);
return err;
}
if (skb->len != sizeof(*edl) + sizeof(*ver)) {
- BT_ERR("%s: Version size mismatch len %d", hdev->name,
- skb->len);
+ bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len);
err = -EILSEQ;
goto out;
}
edl = (struct edl_event_hdr *)(skb->data);
if (!edl) {
- BT_ERR("%s: TLV with no header", hdev->name);
+ bt_dev_err(hdev, "QCA TLV with no header");
err = -EILSEQ;
goto out;
}
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
edl->rtype != EDL_APP_VER_RES_EVT) {
- BT_ERR("%s: Wrong packet received %d %d", hdev->name,
- edl->cresp, edl->rtype);
+ bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
+ edl->rtype);
err = -EIO;
goto out;
}
@@ -76,30 +75,35 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
- /* ROME chipset version can be decided by patch and SoC
+ /* QCA chipset version can be decided by patch and SoC
* version, combination with upper 2 bytes from SoC
* and lower 2 bytes from patch will be used.
*/
- *rome_version = (le32_to_cpu(ver->soc_id) << 16) |
+ *soc_version = (le32_to_cpu(ver->soc_id) << 16) |
(le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+ if (*soc_version == 0)
+ err = -EILSEQ;
out:
kfree_skb(skb);
+ if (err)
+ bt_dev_err(hdev, "QCA Failed to get version (%d)", err);
return err;
}
+EXPORT_SYMBOL_GPL(qca_read_soc_version);
-static int rome_reset(struct hci_dev *hdev)
+static int qca_send_reset(struct hci_dev *hdev)
{
struct sk_buff *skb;
int err;
- BT_DBG("%s: ROME HCI_RESET", hdev->name);
+ bt_dev_dbg(hdev, "QCA HCI_RESET");
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Reset failed (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Reset failed (%d)", err);
return err;
}
@@ -108,7 +112,7 @@ static int rome_reset(struct hci_dev *hdev)
return 0;
}
-static void rome_tlv_check_data(struct rome_config *config,
+static void qca_tlv_check_data(struct rome_config *config,
const struct firmware *fw)
{
const u8 *data;
@@ -207,7 +211,7 @@ static void rome_tlv_check_data(struct rome_config *config,
}
}
-static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size,
+static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
const u8 *data, enum rome_tlv_dnld_mode mode)
{
struct sk_buff *skb;
@@ -225,22 +229,22 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size,
cmd);
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
- HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err);
return err;
}
if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
- BT_ERR("%s: TLV response size mismatch", hdev->name);
+ bt_dev_err(hdev, "QCA TLV response size mismatch");
err = -EILSEQ;
goto out;
}
edl = (struct edl_event_hdr *)(skb->data);
if (!edl) {
- BT_ERR("%s: TLV with no header", hdev->name);
+ bt_dev_err(hdev, "TLV with no header");
err = -EILSEQ;
goto out;
}
@@ -249,8 +253,8 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size,
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
- BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)",
- hdev->name, edl->cresp, edl->rtype, tlv_resp->result);
+ bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)",
+ edl->cresp, edl->rtype, tlv_resp->result);
err = -EIO;
}
@@ -260,23 +264,23 @@ out:
return err;
}
-static int rome_download_firmware(struct hci_dev *hdev,
+static int qca_download_firmware(struct hci_dev *hdev,
struct rome_config *config)
{
const struct firmware *fw;
const u8 *segment;
int ret, remain, i = 0;
- bt_dev_info(hdev, "ROME Downloading %s", config->fwname);
+ bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
ret = request_firmware(&fw, config->fwname, &hdev->dev);
if (ret) {
- BT_ERR("%s: Failed to request file: %s (%d)", hdev->name,
- config->fwname, ret);
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config->fwname, ret);
return ret;
}
- rome_tlv_check_data(config, fw);
+ qca_tlv_check_data(config, fw);
segment = fw->data;
remain = fw->size;
@@ -290,7 +294,7 @@ static int rome_download_firmware(struct hci_dev *hdev,
if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT)
config->dnld_mode = ROME_SKIP_EVT_NONE;
- ret = rome_tlv_send_segment(hdev, segsize, segment,
+ ret = qca_tlv_send_segment(hdev, segsize, segment,
config->dnld_mode);
if (ret)
break;
@@ -314,11 +318,10 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
cmd[2] = sizeof(bdaddr_t); /* size */
memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
- HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Change address command failed (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "QCA Change address command failed (%d)", err);
return err;
}
@@ -328,57 +331,65 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
-int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
+int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ enum qca_btsoc_type soc_type, u32 soc_ver)
{
- u32 rome_ver = 0;
struct rome_config config;
int err;
+ u8 rom_ver;
- BT_DBG("%s: ROME setup on UART", hdev->name);
+ bt_dev_dbg(hdev, "QCA setup on UART");
config.user_baud_rate = baudrate;
- /* Get ROME version information */
- err = rome_patch_ver_req(hdev, &rome_ver);
- if (err < 0 || rome_ver == 0) {
- BT_ERR("%s: Failed to get version 0x%x", hdev->name, err);
- return err;
- }
-
- bt_dev_info(hdev, "ROME controller version 0x%08x", rome_ver);
-
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
- snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin",
- rome_ver);
- err = rome_download_firmware(hdev, &config);
+ if (soc_type == QCA_WCN3990) {
+ /* Firmware files to download are based on ROM version.
+ * ROM version is derived from last two bytes of soc_ver.
+ */
+ rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
+ (soc_ver & 0x0000000f);
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crbtfw%02x.tlv", rom_ver);
+ } else {
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/rampatch_%08x.bin", soc_ver);
+ }
+
+ err = qca_download_firmware(hdev, &config);
if (err < 0) {
- BT_ERR("%s: Failed to download patch (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Failed to download patch (%d)", err);
return err;
}
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
- snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
- rome_ver);
- err = rome_download_firmware(hdev, &config);
+ if (soc_type == QCA_WCN3990)
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crnv%02x.bin", rom_ver);
+ else
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/nvm_%08x.bin", soc_ver);
+
+ err = qca_download_firmware(hdev, &config);
if (err < 0) {
- BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err);
return err;
}
/* Perform HCI reset */
- err = rome_reset(hdev);
+ err = qca_send_reset(hdev);
if (err < 0) {
- BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Failed to run HCI_RESET (%d)", err);
return err;
}
- bt_dev_info(hdev, "ROME setup on UART is completed");
+ bt_dev_info(hdev, "QCA setup on UART is completed");
return 0;
}
-EXPORT_SYMBOL_GPL(qca_uart_setup_rome);
+EXPORT_SYMBOL_GPL(qca_uart_setup);
MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 13d77fd873b6..0c01f375fe83 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -37,6 +37,9 @@
#define EDL_TAG_ID_HCI (17)
#define EDL_TAG_ID_DEEP_SLEEP (27)
+#define QCA_WCN3990_POWERON_PULSE 0xFC
+#define QCA_WCN3990_POWEROFF_PULSE 0xC0
+
enum qca_bardrate {
QCA_BAUDRATE_115200 = 0,
QCA_BAUDRATE_57600,
@@ -124,10 +127,19 @@ struct tlv_type_hdr {
__u8 data[0];
} __packed;
+enum qca_btsoc_type {
+ QCA_INVALID = -1,
+ QCA_AR3002,
+ QCA_ROME,
+ QCA_WCN3990
+};
+
#if IS_ENABLED(CONFIG_BT_QCA)
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
-int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate);
+int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ enum qca_btsoc_type soc_type, u32 soc_ver);
+int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
#else
@@ -136,7 +148,13 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad
return -EOPNOTSUPP;
}
-static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed)
+static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ enum qca_btsoc_type soc_type, u32 soc_ver)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 437f080deaab..7f9ea8e4c1b2 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -34,9 +34,12 @@
#define RTL_ROM_LMP_8821A 0x8821
#define RTL_ROM_LMP_8761A 0x8761
#define RTL_ROM_LMP_8822B 0x8822
+#define RTL_CONFIG_MAGIC 0x8723ab55
#define IC_MATCH_FL_LMPSUBV (1 << 0)
#define IC_MATCH_FL_HCIREV (1 << 1)
+#define IC_MATCH_FL_HCIVER (1 << 2)
+#define IC_MATCH_FL_HCIBUS (1 << 3)
#define IC_INFO(lmps, hcir) \
.match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV, \
.lmp_subver = (lmps), \
@@ -46,49 +49,130 @@ struct id_table {
__u16 match_flags;
__u16 lmp_subver;
__u16 hci_rev;
+ __u8 hci_ver;
+ __u8 hci_bus;
bool config_needed;
+ bool has_rom_version;
char *fw_name;
char *cfg_name;
};
+struct btrtl_device_info {
+ const struct id_table *ic_info;
+ u8 rom_version;
+ u8 *fw_data;
+ int fw_len;
+ u8 *cfg_data;
+ int cfg_len;
+};
+
static const struct id_table ic_id_table[] = {
+ { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8723A, 0x0,
+ .config_needed = false,
+ .has_rom_version = false,
+ .fw_name = "rtl_bt/rtl8723a_fw.bin",
+ .cfg_name = NULL },
+
+ { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_3499, 0x0,
+ .config_needed = false,
+ .has_rom_version = false,
+ .fw_name = "rtl_bt/rtl8723a_fw.bin",
+ .cfg_name = NULL },
+
+ /* 8723BS */
+ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
+ IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS,
+ .lmp_subver = RTL_ROM_LMP_8723B,
+ .hci_rev = 0xb,
+ .hci_ver = 6,
+ .hci_bus = HCI_UART,
+ .config_needed = true,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8723bs_fw.bin",
+ .cfg_name = "rtl_bt/rtl8723bs_config" },
+
/* 8723B */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xb),
.config_needed = false,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8723b_fw.bin",
- .cfg_name = "rtl_bt/rtl8723b_config.bin" },
+ .cfg_name = "rtl_bt/rtl8723b_config" },
/* 8723D */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xd),
.config_needed = true,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8723d_fw.bin",
- .cfg_name = "rtl_bt/rtl8723d_config.bin" },
+ .cfg_name = "rtl_bt/rtl8723d_config" },
+
+ /* 8723DS */
+ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
+ IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS,
+ .lmp_subver = RTL_ROM_LMP_8723B,
+ .hci_rev = 0xd,
+ .hci_ver = 8,
+ .hci_bus = HCI_UART,
+ .config_needed = true,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8723ds_fw.bin",
+ .cfg_name = "rtl_bt/rtl8723ds_config" },
/* 8821A */
{ IC_INFO(RTL_ROM_LMP_8821A, 0xa),
.config_needed = false,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8821a_fw.bin",
- .cfg_name = "rtl_bt/rtl8821a_config.bin" },
+ .cfg_name = "rtl_bt/rtl8821a_config" },
/* 8821C */
{ IC_INFO(RTL_ROM_LMP_8821A, 0xc),
.config_needed = false,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8821c_fw.bin",
- .cfg_name = "rtl_bt/rtl8821c_config.bin" },
+ .cfg_name = "rtl_bt/rtl8821c_config" },
/* 8761A */
{ IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8761A, 0x0,
.config_needed = false,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8761a_fw.bin",
- .cfg_name = "rtl_bt/rtl8761a_config.bin" },
+ .cfg_name = "rtl_bt/rtl8761a_config" },
/* 8822B */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xb),
.config_needed = true,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8822b_fw.bin",
- .cfg_name = "rtl_bt/rtl8822b_config.bin" },
+ .cfg_name = "rtl_bt/rtl8822b_config" },
};
+static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
+ u8 hci_ver, u8 hci_bus)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) {
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) &&
+ (ic_id_table[i].lmp_subver != lmp_subver))
+ continue;
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) &&
+ (ic_id_table[i].hci_rev != hci_rev))
+ continue;
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIVER) &&
+ (ic_id_table[i].hci_ver != hci_ver))
+ continue;
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) &&
+ (ic_id_table[i].hci_bus != hci_bus))
+ continue;
+
+ break;
+ }
+ if (i >= ARRAY_SIZE(ic_id_table))
+ return NULL;
+
+ return &ic_id_table[i];
+}
+
static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
{
struct rtl_rom_version_evt *rom_version;
@@ -97,20 +181,20 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
/* Read RTL ROM version command */
skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: Read ROM version failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ rtl_dev_err(hdev, "Read ROM version failed (%ld)\n",
+ PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != sizeof(*rom_version)) {
- BT_ERR("%s: RTL version event length mismatch", hdev->name);
+ rtl_dev_err(hdev, "RTL version event length mismatch\n");
kfree_skb(skb);
return -EIO;
}
rom_version = (struct rtl_rom_version_evt *)skb->data;
- bt_dev_info(hdev, "rom_version status=%x version=%x",
- rom_version->status, rom_version->version);
+ rtl_dev_info(hdev, "rom_version status=%x version=%x\n",
+ rom_version->status, rom_version->version);
*version = rom_version->version;
@@ -118,16 +202,16 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
return 0;
}
-static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
- const struct firmware *fw,
+static int rtlbt_parse_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
unsigned char **_buf)
{
const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
struct rtl_epatch_header *epatch_info;
unsigned char *buf;
- int i, ret, len;
+ int i, len;
size_t min_size;
- u8 opcode, length, data, rom_version = 0;
+ u8 opcode, length, data;
int project_id = -1;
const unsigned char *fwptr, *chip_id_base;
const unsigned char *patch_length_base, *patch_offset_base;
@@ -146,17 +230,13 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
{ RTL_ROM_LMP_8821A, 10 }, /* 8821C */
};
- ret = rtl_read_rom_version(hdev, &rom_version);
- if (ret)
- return ret;
-
min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
- if (fw->size < min_size)
+ if (btrtl_dev->fw_len < min_size)
return -EINVAL;
- fwptr = fw->data + fw->size - sizeof(extension_sig);
+ fwptr = btrtl_dev->fw_data + btrtl_dev->fw_len - sizeof(extension_sig);
if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
- BT_ERR("%s: extension section signature mismatch", hdev->name);
+ rtl_dev_err(hdev, "extension section signature mismatch\n");
return -EINVAL;
}
@@ -166,7 +246,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
* Once we have that, we double-check that that project_id is suitable
* for the hardware we are working with.
*/
- while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
+ while (fwptr >= btrtl_dev->fw_data + (sizeof(*epatch_info) + 3)) {
opcode = *--fwptr;
length = *--fwptr;
data = *--fwptr;
@@ -177,8 +257,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
break;
if (length == 0) {
- BT_ERR("%s: found instruction with length 0",
- hdev->name);
+ rtl_dev_err(hdev, "found instruction with length 0\n");
return -EINVAL;
}
@@ -191,7 +270,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
}
if (project_id < 0) {
- BT_ERR("%s: failed to find version instruction", hdev->name);
+ rtl_dev_err(hdev, "failed to find version instruction\n");
return -EINVAL;
}
@@ -202,19 +281,21 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
}
if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) {
- BT_ERR("%s: unknown project id %d", hdev->name, project_id);
+ rtl_dev_err(hdev, "unknown project id %d\n", project_id);
return -EINVAL;
}
- if (lmp_subver != project_id_to_lmp_subver[i].lmp_subver) {
- BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
- project_id_to_lmp_subver[i].lmp_subver, lmp_subver);
+ if (btrtl_dev->ic_info->lmp_subver !=
+ project_id_to_lmp_subver[i].lmp_subver) {
+ rtl_dev_err(hdev, "firmware is for %x but this is a %x\n",
+ project_id_to_lmp_subver[i].lmp_subver,
+ btrtl_dev->ic_info->lmp_subver);
return -EINVAL;
}
- epatch_info = (struct rtl_epatch_header *)fw->data;
+ epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data;
if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
- BT_ERR("%s: bad EPATCH signature", hdev->name);
+ rtl_dev_err(hdev, "bad EPATCH signature\n");
return -EINVAL;
}
@@ -229,16 +310,16 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
* Find the right patch for this chip.
*/
min_size += 8 * num_patches;
- if (fw->size < min_size)
+ if (btrtl_dev->fw_len < min_size)
return -EINVAL;
- chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
+ chip_id_base = btrtl_dev->fw_data + sizeof(struct rtl_epatch_header);
patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
for (i = 0; i < num_patches; i++) {
u16 chip_id = get_unaligned_le16(chip_id_base +
(i * sizeof(u16)));
- if (chip_id == rom_version + 1) {
+ if (chip_id == btrtl_dev->rom_version + 1) {
patch_length = get_unaligned_le16(patch_length_base +
(i * sizeof(u16)));
patch_offset = get_unaligned_le32(patch_offset_base +
@@ -248,21 +329,22 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
}
if (!patch_offset) {
- BT_ERR("%s: didn't find patch for chip id %d",
- hdev->name, rom_version);
+ rtl_dev_err(hdev, "didn't find patch for chip id %d",
+ btrtl_dev->rom_version);
return -EINVAL;
}
BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
min_size = patch_offset + patch_length;
- if (fw->size < min_size)
+ if (btrtl_dev->fw_len < min_size)
return -EINVAL;
/* Copy the firmware into a new buffer and write the version at
* the end.
*/
len = patch_length;
- buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
+ buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length,
+ GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -301,15 +383,14 @@ static int rtl_download_firmware(struct hci_dev *hdev,
skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: download fw command failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ rtl_dev_err(hdev, "download fw command failed (%ld)\n",
+ PTR_ERR(skb));
ret = -PTR_ERR(skb);
goto out;
}
if (skb->len != sizeof(struct rtl_download_response)) {
- BT_ERR("%s: download fw event length mismatch",
- hdev->name);
+ rtl_dev_err(hdev, "download fw event length mismatch\n");
kfree_skb(skb);
ret = -EIO;
goto out;
@@ -324,12 +405,12 @@ out:
return ret;
}
-static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
+static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
{
const struct firmware *fw;
int ret;
- bt_dev_info(hdev, "rtl: loading %s", name);
+ rtl_dev_info(hdev, "rtl: loading %s\n", name);
ret = request_firmware(&fw, name, &hdev->dev);
if (ret < 0)
return ret;
@@ -343,96 +424,37 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
return ret;
}
-static int btrtl_setup_rtl8723a(struct hci_dev *hdev)
+static int btrtl_setup_rtl8723a(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
{
- const struct firmware *fw;
- int ret;
-
- bt_dev_info(hdev, "rtl: loading rtl_bt/rtl8723a_fw.bin");
- ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev);
- if (ret < 0) {
- BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
- return ret;
- }
-
- if (fw->size < 8) {
- ret = -EINVAL;
- goto out;
- }
+ if (btrtl_dev->fw_len < 8)
+ return -EINVAL;
/* Check that the firmware doesn't have the epatch signature
* (which is only for RTL8723B and newer).
*/
- if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
- BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
- ret = -EINVAL;
- goto out;
+ if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) {
+ rtl_dev_err(hdev, "unexpected EPATCH signature!\n");
+ return -EINVAL;
}
- ret = rtl_download_firmware(hdev, fw->data, fw->size);
-
-out:
- release_firmware(fw);
- return ret;
+ return rtl_download_firmware(hdev, btrtl_dev->fw_data,
+ btrtl_dev->fw_len);
}
-static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 hci_rev,
- u16 lmp_subver)
+static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
{
unsigned char *fw_data = NULL;
- const struct firmware *fw;
int ret;
- int cfg_sz;
- u8 *cfg_buff = NULL;
u8 *tbuff;
- char *cfg_name = NULL;
- char *fw_name = NULL;
- int i;
- for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) {
- if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) &&
- (ic_id_table[i].lmp_subver != lmp_subver))
- continue;
- if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) &&
- (ic_id_table[i].hci_rev != hci_rev))
- continue;
-
- break;
- }
-
- if (i >= ARRAY_SIZE(ic_id_table)) {
- BT_ERR("%s: unknown IC info, lmp subver %04x, hci rev %04x",
- hdev->name, lmp_subver, hci_rev);
- return -EINVAL;
- }
-
- cfg_name = ic_id_table[i].cfg_name;
-
- if (cfg_name) {
- cfg_sz = rtl_load_config(hdev, cfg_name, &cfg_buff);
- if (cfg_sz < 0) {
- cfg_sz = 0;
- if (ic_id_table[i].config_needed)
- BT_ERR("Necessary config file %s not found\n",
- cfg_name);
- }
- } else
- cfg_sz = 0;
-
- fw_name = ic_id_table[i].fw_name;
- bt_dev_info(hdev, "rtl: loading %s", fw_name);
- ret = request_firmware(&fw, fw_name, &hdev->dev);
- if (ret < 0) {
- BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
- goto err_req_fw;
- }
-
- ret = rtlbt_parse_firmware(hdev, lmp_subver, fw, &fw_data);
+ ret = rtlbt_parse_firmware(hdev, btrtl_dev, &fw_data);
if (ret < 0)
goto out;
- if (cfg_sz) {
- tbuff = kzalloc(ret + cfg_sz, GFP_KERNEL);
+ if (btrtl_dev->cfg_len > 0) {
+ tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
if (!tbuff) {
ret = -ENOMEM;
goto out;
@@ -441,22 +463,18 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 hci_rev,
memcpy(tbuff, fw_data, ret);
kfree(fw_data);
- memcpy(tbuff + ret, cfg_buff, cfg_sz);
- ret += cfg_sz;
+ memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
+ ret += btrtl_dev->cfg_len;
fw_data = tbuff;
}
- bt_dev_info(hdev, "cfg_sz %d, total size %d", cfg_sz, ret);
+ rtl_dev_info(hdev, "cfg_sz %d, total sz %d\n", btrtl_dev->cfg_len, ret);
ret = rtl_download_firmware(hdev, fw_data, ret);
out:
- release_firmware(fw);
kfree(fw_data);
-err_req_fw:
- if (cfg_sz)
- kfree(cfg_buff);
return ret;
}
@@ -467,14 +485,13 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION failed (%ld)\n",
+ PTR_ERR(skb));
return skb;
}
if (skb->len != sizeof(struct hci_rp_read_local_version)) {
- BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
- hdev->name);
+ rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION event length mismatch\n");
kfree_skb(skb);
return ERR_PTR(-EIO);
}
@@ -482,49 +499,264 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
return skb;
}
-int btrtl_setup_realtek(struct hci_dev *hdev)
+void btrtl_free(struct btrtl_device_info *btrtl_dev)
{
+ kfree(btrtl_dev->fw_data);
+ kfree(btrtl_dev->cfg_data);
+ kfree(btrtl_dev);
+}
+EXPORT_SYMBOL_GPL(btrtl_free);
+
+struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ const char *postfix)
+{
+ struct btrtl_device_info *btrtl_dev;
struct sk_buff *skb;
struct hci_rp_read_local_version *resp;
+ char cfg_name[40];
u16 hci_rev, lmp_subver;
+ u8 hci_ver;
+ int ret;
+
+ btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL);
+ if (!btrtl_dev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
skb = btrtl_read_local_version(hdev);
- if (IS_ERR(skb))
- return -PTR_ERR(skb);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ goto err_free;
+ }
resp = (struct hci_rp_read_local_version *)skb->data;
- bt_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x "
- "lmp_ver=%02x lmp_subver=%04x",
- resp->hci_ver, resp->hci_rev,
- resp->lmp_ver, resp->lmp_subver);
+ rtl_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x\n",
+ resp->hci_ver, resp->hci_rev,
+ resp->lmp_ver, resp->lmp_subver);
+ hci_ver = resp->hci_ver;
hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver);
kfree_skb(skb);
+ btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+ hdev->bus);
+
+ if (!btrtl_dev->ic_info) {
+ rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
+ lmp_subver, hci_rev, hci_ver);
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ if (btrtl_dev->ic_info->has_rom_version) {
+ ret = rtl_read_rom_version(hdev, &btrtl_dev->rom_version);
+ if (ret)
+ goto err_free;
+ }
+
+ btrtl_dev->fw_len = rtl_load_file(hdev, btrtl_dev->ic_info->fw_name,
+ &btrtl_dev->fw_data);
+ if (btrtl_dev->fw_len < 0) {
+ rtl_dev_err(hdev, "firmware file %s not found\n",
+ btrtl_dev->ic_info->fw_name);
+ ret = btrtl_dev->fw_len;
+ goto err_free;
+ }
+
+ if (btrtl_dev->ic_info->cfg_name) {
+ if (postfix) {
+ snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin",
+ btrtl_dev->ic_info->cfg_name, postfix);
+ } else {
+ snprintf(cfg_name, sizeof(cfg_name), "%s.bin",
+ btrtl_dev->ic_info->cfg_name);
+ }
+ btrtl_dev->cfg_len = rtl_load_file(hdev, cfg_name,
+ &btrtl_dev->cfg_data);
+ if (btrtl_dev->ic_info->config_needed &&
+ btrtl_dev->cfg_len <= 0) {
+ rtl_dev_err(hdev, "mandatory config file %s not found\n",
+ btrtl_dev->ic_info->cfg_name);
+ ret = btrtl_dev->cfg_len;
+ goto err_free;
+ }
+ }
+
+ return btrtl_dev;
+
+err_free:
+ btrtl_free(btrtl_dev);
+err_alloc:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(btrtl_initialize);
+
+int btrtl_download_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
+{
/* Match a set of subver values that correspond to stock firmware,
* which is not compatible with standard btusb.
* If matched, upload an alternative firmware that does conform to
* standard btusb. Once that firmware is uploaded, the subver changes
* to a different value.
*/
- switch (lmp_subver) {
+ switch (btrtl_dev->ic_info->lmp_subver) {
case RTL_ROM_LMP_8723A:
case RTL_ROM_LMP_3499:
- return btrtl_setup_rtl8723a(hdev);
+ return btrtl_setup_rtl8723a(hdev, btrtl_dev);
case RTL_ROM_LMP_8723B:
case RTL_ROM_LMP_8821A:
case RTL_ROM_LMP_8761A:
case RTL_ROM_LMP_8822B:
- return btrtl_setup_rtl8723b(hdev, hci_rev, lmp_subver);
+ return btrtl_setup_rtl8723b(hdev, btrtl_dev);
default:
- bt_dev_info(hdev, "rtl: assuming no firmware upload needed");
+ rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
return 0;
}
}
+EXPORT_SYMBOL_GPL(btrtl_download_firmware);
+
+int btrtl_setup_realtek(struct hci_dev *hdev)
+{
+ struct btrtl_device_info *btrtl_dev;
+ int ret;
+
+ btrtl_dev = btrtl_initialize(hdev, NULL);
+ if (IS_ERR(btrtl_dev))
+ return PTR_ERR(btrtl_dev);
+
+ ret = btrtl_download_firmware(hdev, btrtl_dev);
+
+ btrtl_free(btrtl_dev);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
+static unsigned int btrtl_convert_baudrate(u32 device_baudrate)
+{
+ switch (device_baudrate) {
+ case 0x0252a00a:
+ return 230400;
+
+ case 0x05f75004:
+ return 921600;
+
+ case 0x00005004:
+ return 1000000;
+
+ case 0x04928002:
+ case 0x01128002:
+ return 1500000;
+
+ case 0x00005002:
+ return 2000000;
+
+ case 0x0000b001:
+ return 2500000;
+
+ case 0x04928001:
+ return 3000000;
+
+ case 0x052a6001:
+ return 3500000;
+
+ case 0x00005001:
+ return 4000000;
+
+ case 0x0252c014:
+ default:
+ return 115200;
+ }
+}
+
+int btrtl_get_uart_settings(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
+ unsigned int *controller_baudrate,
+ u32 *device_baudrate, bool *flow_control)
+{
+ struct rtl_vendor_config *config;
+ struct rtl_vendor_config_entry *entry;
+ int i, total_data_len;
+ bool found = false;
+
+ total_data_len = btrtl_dev->cfg_len - sizeof(*config);
+ if (total_data_len <= 0) {
+ rtl_dev_warn(hdev, "no config loaded\n");
+ return -EINVAL;
+ }
+
+ config = (struct rtl_vendor_config *)btrtl_dev->cfg_data;
+ if (le32_to_cpu(config->signature) != RTL_CONFIG_MAGIC) {
+ rtl_dev_err(hdev, "invalid config magic\n");
+ return -EINVAL;
+ }
+
+ if (total_data_len < le16_to_cpu(config->total_len)) {
+ rtl_dev_err(hdev, "config is too short\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < total_data_len; ) {
+ entry = ((void *)config->entry) + i;
+
+ switch (le16_to_cpu(entry->offset)) {
+ case 0xc:
+ if (entry->len < sizeof(*device_baudrate)) {
+ rtl_dev_err(hdev, "invalid UART config entry\n");
+ return -EINVAL;
+ }
+
+ *device_baudrate = get_unaligned_le32(entry->data);
+ *controller_baudrate = btrtl_convert_baudrate(
+ *device_baudrate);
+
+ if (entry->len >= 13)
+ *flow_control = !!(entry->data[12] & BIT(2));
+ else
+ *flow_control = false;
+
+ found = true;
+ break;
+
+ default:
+ rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)\n",
+ le16_to_cpu(entry->offset), entry->len);
+ break;
+ };
+
+ i += sizeof(*entry) + entry->len;
+ }
+
+ if (!found) {
+ rtl_dev_err(hdev, "no UART config entry found\n");
+ return -ENOENT;
+ }
+
+ rtl_dev_dbg(hdev, "device baudrate = 0x%08x\n", *device_baudrate);
+ rtl_dev_dbg(hdev, "controller baudrate = %u\n", *controller_baudrate);
+ rtl_dev_dbg(hdev, "flow control %d\n", *flow_control);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btrtl_get_uart_settings);
+
MODULE_AUTHOR("Daniel Drake <drake@endlessm.com>");
MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("rtl_bt/rtl8723a_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723b_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723b_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723bs_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723bs_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761a_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin");
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
index 38ffe4890cd1..f5e36f3993a8 100644
--- a/drivers/bluetooth/btrtl.h
+++ b/drivers/bluetooth/btrtl.h
@@ -17,6 +17,13 @@
#define RTL_FRAG_LEN 252
+#define rtl_dev_err(dev, fmt, ...) bt_dev_err(dev, "RTL: " fmt, ##__VA_ARGS__)
+#define rtl_dev_warn(dev, fmt, ...) bt_dev_warn(dev, "RTL: " fmt, ##__VA_ARGS__)
+#define rtl_dev_info(dev, fmt, ...) bt_dev_info(dev, "RTL: " fmt, ##__VA_ARGS__)
+#define rtl_dev_dbg(dev, fmt, ...) bt_dev_dbg(dev, "RTL: " fmt, ##__VA_ARGS__)
+
+struct btrtl_device_info;
+
struct rtl_download_cmd {
__u8 index;
__u8 data[RTL_FRAG_LEN];
@@ -38,15 +45,61 @@ struct rtl_epatch_header {
__le16 num_patches;
} __packed;
+struct rtl_vendor_config_entry {
+ __le16 offset;
+ __u8 len;
+ __u8 data[0];
+} __packed;
+
+struct rtl_vendor_config {
+ __le32 signature;
+ __le16 total_len;
+ struct rtl_vendor_config_entry entry[0];
+} __packed;
+
#if IS_ENABLED(CONFIG_BT_RTL)
+struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ const char *postfix);
+void btrtl_free(struct btrtl_device_info *btrtl_dev);
+int btrtl_download_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev);
int btrtl_setup_realtek(struct hci_dev *hdev);
+int btrtl_get_uart_settings(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
+ unsigned int *controller_baudrate,
+ u32 *device_baudrate, bool *flow_control);
#else
+static inline struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ const char *postfix)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void btrtl_free(struct btrtl_device_info *btrtl_dev)
+{
+}
+
+static inline int btrtl_download_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int btrtl_setup_realtek(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}
+static inline int btrtl_get_uart_settings(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
+ unsigned int *controller_baudrate,
+ u32 *device_baudrate,
+ bool *flow_control)
+{
+ return -ENOENT;
+}
+
#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f73a27ea28cc..cd2e5cf14ea5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -374,6 +374,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8723DE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
@@ -509,9 +510,10 @@ static inline void btusb_free_frags(struct btusb_data *data)
static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
{
struct sk_buff *skb;
+ unsigned long flags;
int err = 0;
- spin_lock(&data->rxlock);
+ spin_lock_irqsave(&data->rxlock, flags);
skb = data->evt_skb;
while (count) {
@@ -556,7 +558,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
}
data->evt_skb = skb;
- spin_unlock(&data->rxlock);
+ spin_unlock_irqrestore(&data->rxlock, flags);
return err;
}
@@ -564,9 +566,10 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
{
struct sk_buff *skb;
+ unsigned long flags;
int err = 0;
- spin_lock(&data->rxlock);
+ spin_lock_irqsave(&data->rxlock, flags);
skb = data->acl_skb;
while (count) {
@@ -613,7 +616,7 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
}
data->acl_skb = skb;
- spin_unlock(&data->rxlock);
+ spin_unlock_irqrestore(&data->rxlock, flags);
return err;
}
@@ -621,9 +624,10 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count)
{
struct sk_buff *skb;
+ unsigned long flags;
int err = 0;
- spin_lock(&data->rxlock);
+ spin_lock_irqsave(&data->rxlock, flags);
skb = data->sco_skb;
while (count) {
@@ -668,7 +672,7 @@ static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count)
}
data->sco_skb = skb;
- spin_unlock(&data->rxlock);
+ spin_unlock_irqrestore(&data->rxlock, flags);
return err;
}
@@ -1066,6 +1070,7 @@ static void btusb_tx_complete(struct urb *urb)
struct sk_buff *skb = urb->context;
struct hci_dev *hdev = (struct hci_dev *)skb->dev;
struct btusb_data *data = hci_get_drvdata(hdev);
+ unsigned long flags;
BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status,
urb->actual_length);
@@ -1079,9 +1084,9 @@ static void btusb_tx_complete(struct urb *urb)
hdev->stat.err_tx++;
done:
- spin_lock(&data->txlock);
+ spin_lock_irqsave(&data->txlock, flags);
data->tx_in_flight--;
- spin_unlock(&data->txlock);
+ spin_unlock_irqrestore(&data->txlock, flags);
kfree(urb->setup_packet);
@@ -1593,13 +1598,13 @@ static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
ret = request_firmware(&fw, fwname, &hdev->dev);
if (ret < 0) {
if (ret == -EINVAL) {
- BT_ERR("%s Intel firmware file request failed (%d)",
- hdev->name, ret);
+ bt_dev_err(hdev, "Intel firmware file request failed (%d)",
+ ret);
return NULL;
}
- BT_ERR("%s failed to open Intel firmware file: %s(%d)",
- hdev->name, fwname, ret);
+ bt_dev_err(hdev, "failed to open Intel firmware file: %s (%d)",
+ fwname, ret);
/* If the correct firmware patch file is not found, use the
* default firmware patch file instead
@@ -1607,8 +1612,8 @@ static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bseq",
ver->hw_platform, ver->hw_variant);
if (request_firmware(&fw, fwname, &hdev->dev) < 0) {
- BT_ERR("%s failed to open default Intel fw file: %s",
- hdev->name, fwname);
+ bt_dev_err(hdev, "failed to open default fw file: %s",
+ fwname);
return NULL;
}
}
@@ -1637,7 +1642,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* process.
*/
if (remain > HCI_COMMAND_HDR_SIZE && *fw_ptr[0] != 0x01) {
- BT_ERR("%s Intel fw corrupted: invalid cmd read", hdev->name);
+ bt_dev_err(hdev, "Intel fw corrupted: invalid cmd read");
return -EINVAL;
}
(*fw_ptr)++;
@@ -1651,7 +1656,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* of command parameter. If not, the firmware file is corrupted.
*/
if (remain < cmd->plen) {
- BT_ERR("%s Intel fw corrupted: invalid cmd len", hdev->name);
+ bt_dev_err(hdev, "Intel fw corrupted: invalid cmd len");
return -EFAULT;
}
@@ -1684,8 +1689,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
remain -= sizeof(*evt);
if (remain < evt->plen) {
- BT_ERR("%s Intel fw corrupted: invalid evt len",
- hdev->name);
+ bt_dev_err(hdev, "Intel fw corrupted: invalid evt len");
return -EFAULT;
}
@@ -1699,15 +1703,15 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* file is corrupted.
*/
if (!evt || !evt_param || remain < 0) {
- BT_ERR("%s Intel fw corrupted: invalid evt read", hdev->name);
+ bt_dev_err(hdev, "Intel fw corrupted: invalid evt read");
return -EFAULT;
}
skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen,
cmd_param, evt->evt, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
- hdev->name, cmd->opcode, PTR_ERR(skb));
+ bt_dev_err(hdev, "sending Intel patch command (0x%4.4x) failed (%ld)",
+ cmd->opcode, PTR_ERR(skb));
return PTR_ERR(skb);
}
@@ -1716,15 +1720,15 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* the contents of the event.
*/
if (skb->len != evt->plen) {
- BT_ERR("%s mismatch event length (opcode 0x%4.4x)", hdev->name,
- le16_to_cpu(cmd->opcode));
+ bt_dev_err(hdev, "mismatch event length (opcode 0x%4.4x)",
+ le16_to_cpu(cmd->opcode));
kfree_skb(skb);
return -EFAULT;
}
if (memcmp(skb->data, evt_param, evt->plen)) {
- BT_ERR("%s mismatch event parameter (opcode 0x%4.4x)",
- hdev->name, le16_to_cpu(cmd->opcode));
+ bt_dev_err(hdev, "mismatch event parameter (opcode 0x%4.4x)",
+ le16_to_cpu(cmd->opcode));
kfree_skb(skb);
return -EFAULT;
}
@@ -1753,8 +1757,8 @@ static int btusb_setup_intel(struct hci_dev *hdev)
*/
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s sending initial HCI reset command failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ bt_dev_err(hdev, "sending initial HCI reset command failed (%ld)",
+ PTR_ERR(skb));
return PTR_ERR(skb);
}
kfree_skb(skb);
@@ -1890,7 +1894,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
struct hci_event_hdr *hdr;
struct hci_ev_cmd_complete *evt;
- skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+ skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -2084,8 +2088,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* for now only accept this single value.
*/
if (ver.hw_platform != 0x37) {
- BT_ERR("%s: Unsupported Intel hardware platform (%u)",
- hdev->name, ver.hw_platform);
+ bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
+ ver.hw_platform);
return -EINVAL;
}
@@ -2104,8 +2108,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
case 0x14: /* QnJ, IcP */
break;
default:
- BT_ERR("%s: Unsupported Intel hardware variant (%u)",
- hdev->name, ver.hw_variant);
+ bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
+ ver.hw_variant);
return -EINVAL;
}
@@ -2134,8 +2138,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* choice is to return an error and abort the device initialization.
*/
if (ver.fw_variant != 0x06) {
- BT_ERR("%s: Unsupported Intel firmware variant (%u)",
- hdev->name, ver.fw_variant);
+ bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)",
+ ver.fw_variant);
return -ENODEV;
}
@@ -2151,8 +2155,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* that this bootloader does not send them, then abort the setup.
*/
if (params.limited_cce != 0x00) {
- BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
- hdev->name, params.limited_cce);
+ bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
+ params.limited_cce);
return -EINVAL;
}
@@ -2202,14 +2206,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
le16_to_cpu(ver.fw_revision));
break;
default:
- BT_ERR("%s: Unsupported Intel firmware naming", hdev->name);
+ bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
- BT_ERR("%s: Failed to load Intel firmware file (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", err);
return err;
}
@@ -2235,13 +2238,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
le16_to_cpu(ver.fw_revision));
break;
default:
- BT_ERR("%s: Unsupported Intel firmware naming", hdev->name);
+ bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
if (fw->size < 644) {
- BT_ERR("%s: Invalid size of firmware file (%zu)",
- hdev->name, fw->size);
+ bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
+ fw->size);
err = -EBADF;
goto done;
}
@@ -2272,18 +2275,18 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
TASK_INTERRUPTIBLE,
msecs_to_jiffies(5000));
if (err == -EINTR) {
- BT_ERR("%s: Firmware loading interrupted", hdev->name);
+ bt_dev_err(hdev, "Firmware loading interrupted");
goto done;
}
if (err) {
- BT_ERR("%s: Firmware loading timeout", hdev->name);
+ bt_dev_err(hdev, "Firmware loading timeout");
err = -ETIMEDOUT;
goto done;
}
if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) {
- BT_ERR("%s: Firmware loading failed", hdev->name);
+ bt_dev_err(hdev, "Firmware loading failed");
err = -ENOEXEC;
goto done;
}
@@ -2322,12 +2325,12 @@ done:
msecs_to_jiffies(1000));
if (err == -EINTR) {
- BT_ERR("%s: Device boot interrupted", hdev->name);
+ bt_dev_err(hdev, "Device boot interrupted");
return -EINTR;
}
if (err) {
- BT_ERR("%s: Device boot timeout", hdev->name);
+ bt_dev_err(hdev, "Device boot timeout");
return -ETIMEDOUT;
}
@@ -2364,6 +2367,22 @@ static int btusb_shutdown_intel(struct hci_dev *hdev)
struct sk_buff *skb;
long ret;
+ /* In the shutdown sequence where Bluetooth is turned off followed
+ * by WiFi being turned off, turning WiFi back on causes issue with
+ * the RF calibration.
+ *
+ * To ensure that any RF activity has been stopped, issue HCI Reset
+ * command to clear all ongoing activity including advertising,
+ * scanning etc.
+ */
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "HCI reset during shutdown failed");
+ return ret;
+ }
+ kfree_skb(skb);
+
/* Some platforms have an issue with BT LED when the interface is
* down or BT radio is turned off, which takes 5 seconds to BT LED
* goes off. This command turns off the BT LED immediately.
@@ -2371,8 +2390,7 @@ static int btusb_shutdown_intel(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
- BT_ERR("%s: turning off Intel device LED failed (%ld)",
- hdev->name, ret);
+ bt_dev_err(hdev, "turning off Intel device LED failed");
return ret;
}
kfree_skb(skb);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 6a8d0d06aba7..8eede1197cd2 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -21,13 +21,18 @@
*
*/
-#include <linux/kernel.h>
+#include <linux/acpi.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/serdev.h>
#include <linux/skbuff.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include "btrtl.h"
#include "hci_uart.h"
#define HCI_3WIRE_ACK_PKT 0
@@ -65,6 +70,9 @@ enum {
};
struct h5 {
+ /* Must be the first member, hci_serdev.c expects this. */
+ struct hci_uart serdev_hu;
+
struct sk_buff_head unack; /* Unack'ed packets queue */
struct sk_buff_head rel; /* Reliable packets queue */
struct sk_buff_head unrel; /* Unreliable packets queue */
@@ -95,6 +103,19 @@ struct h5 {
H5_SLEEPING,
H5_WAKING_UP,
} sleep;
+
+ const struct h5_vnd *vnd;
+ const char *id;
+
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *device_wake_gpio;
+};
+
+struct h5_vnd {
+ int (*setup)(struct h5 *h5);
+ void (*open)(struct h5 *h5);
+ void (*close)(struct h5 *h5);
+ const struct acpi_gpio_mapping *acpi_gpio_map;
};
static void h5_reset_rx(struct h5 *h5);
@@ -193,9 +214,13 @@ static int h5_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
- h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
- if (!h5)
- return -ENOMEM;
+ if (hu->serdev) {
+ h5 = serdev_device_get_drvdata(hu->serdev);
+ } else {
+ h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
+ if (!h5)
+ return -ENOMEM;
+ }
hu->priv = h5;
h5->hu = hu;
@@ -210,6 +235,9 @@ static int h5_open(struct hci_uart *hu)
h5->tx_win = H5_TX_WIN_MAX;
+ if (h5->vnd && h5->vnd->open)
+ h5->vnd->open(h5);
+
set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
/* Send initial sync request */
@@ -229,7 +257,21 @@ static int h5_close(struct hci_uart *hu)
skb_queue_purge(&h5->rel);
skb_queue_purge(&h5->unrel);
- kfree(h5);
+ if (h5->vnd && h5->vnd->close)
+ h5->vnd->close(h5);
+
+ if (!hu->serdev)
+ kfree(h5);
+
+ return 0;
+}
+
+static int h5_setup(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+
+ if (h5->vnd && h5->vnd->setup)
+ return h5->vnd->setup(h5);
return 0;
}
@@ -744,18 +786,172 @@ static const struct hci_uart_proto h5p = {
.name = "Three-wire (H5)",
.open = h5_open,
.close = h5_close,
+ .setup = h5_setup,
.recv = h5_recv,
.enqueue = h5_enqueue,
.dequeue = h5_dequeue,
.flush = h5_flush,
};
+static int h5_serdev_probe(struct serdev_device *serdev)
+{
+ const struct acpi_device_id *match;
+ struct device *dev = &serdev->dev;
+ struct h5 *h5;
+
+ h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
+ if (!h5)
+ return -ENOMEM;
+
+ set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
+
+ h5->hu = &h5->serdev_hu;
+ h5->serdev_hu.serdev = serdev;
+ serdev_device_set_drvdata(serdev, h5);
+
+ if (has_acpi_companion(dev)) {
+ match = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!match)
+ return -ENODEV;
+
+ h5->vnd = (const struct h5_vnd *)match->driver_data;
+ h5->id = (char *)match->id;
+
+ if (h5->vnd->acpi_gpio_map)
+ devm_acpi_dev_add_driver_gpios(dev,
+ h5->vnd->acpi_gpio_map);
+ }
+
+ h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(h5->enable_gpio))
+ return PTR_ERR(h5->enable_gpio);
+
+ h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(h5->device_wake_gpio))
+ return PTR_ERR(h5->device_wake_gpio);
+
+ return hci_uart_register_device(&h5->serdev_hu, &h5p);
+}
+
+static void h5_serdev_remove(struct serdev_device *serdev)
+{
+ struct h5 *h5 = serdev_device_get_drvdata(serdev);
+
+ hci_uart_unregister_device(&h5->serdev_hu);
+}
+
+#ifdef CONFIG_BT_HCIUART_RTL
+static int h5_btrtl_setup(struct h5 *h5)
+{
+ struct btrtl_device_info *btrtl_dev;
+ struct sk_buff *skb;
+ __le32 baudrate_data;
+ u32 device_baudrate;
+ unsigned int controller_baudrate;
+ bool flow_control;
+ int err;
+
+ btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
+ if (IS_ERR(btrtl_dev))
+ return PTR_ERR(btrtl_dev);
+
+ err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
+ &controller_baudrate, &device_baudrate,
+ &flow_control);
+ if (err)
+ goto out_free;
+
+ baudrate_data = cpu_to_le32(device_baudrate);
+ skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
+ &baudrate_data, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
+ err = PTR_ERR(skb);
+ goto out_free;
+ } else {
+ kfree_skb(skb);
+ }
+ /* Give the device some time to set up the new baudrate. */
+ usleep_range(10000, 20000);
+
+ serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
+ serdev_device_set_flow_control(h5->hu->serdev, flow_control);
+
+ err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
+ /* Give the device some time before the hci-core sends it a reset */
+ usleep_range(10000, 20000);
+
+out_free:
+ btrtl_free(btrtl_dev);
+
+ return err;
+}
+
+static void h5_btrtl_open(struct h5 *h5)
+{
+ /* Devices always start with these fixed parameters */
+ serdev_device_set_flow_control(h5->hu->serdev, false);
+ serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
+ serdev_device_set_baudrate(h5->hu->serdev, 115200);
+
+ /* The controller needs up to 500ms to wakeup */
+ gpiod_set_value_cansleep(h5->enable_gpio, 1);
+ gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
+ msleep(500);
+}
+
+static void h5_btrtl_close(struct h5 *h5)
+{
+ gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
+ gpiod_set_value_cansleep(h5->enable_gpio, 0);
+}
+
+static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
+static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
+static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
+static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
+ { "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
+ { "enable-gpios", &btrtl_enable_gpios, 1 },
+ { "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
+ {},
+};
+
+static struct h5_vnd rtl_vnd = {
+ .setup = h5_btrtl_setup,
+ .open = h5_btrtl_open,
+ .close = h5_btrtl_close,
+ .acpi_gpio_map = acpi_btrtl_gpios,
+};
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id h5_acpi_match[] = {
+#ifdef CONFIG_BT_HCIUART_RTL
+ { "OBDA8723", (kernel_ulong_t)&rtl_vnd },
+#endif
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
+#endif
+
+static struct serdev_device_driver h5_serdev_driver = {
+ .probe = h5_serdev_probe,
+ .remove = h5_serdev_remove,
+ .driver = {
+ .name = "hci_uart_h5",
+ .acpi_match_table = ACPI_PTR(h5_acpi_match),
+ },
+};
+
int __init h5_init(void)
{
+ serdev_device_driver_register(&h5_serdev_driver);
return hci_uart_register_proto(&h5p);
}
int __exit h5_deinit(void)
{
+ serdev_device_driver_unregister(&h5_serdev_driver);
return hci_uart_unregister_proto(&h5p);
}
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 7c166e3b308b..46ace321bf60 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -458,7 +458,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
struct hci_event_hdr *hdr;
struct hci_ev_cmd_complete *evt;
- skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+ skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
if (!skb)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 51790dd02afb..e182f6019f68 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -5,7 +5,7 @@
* protocol extension to H4.
*
* Copyright (C) 2007 Texas Instruments, Inc.
- * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@@ -31,9 +31,14 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
#include <net/bluetooth/bluetooth.h>
@@ -119,12 +124,51 @@ struct qca_data {
u64 votes_off;
};
+enum qca_speed_type {
+ QCA_INIT_SPEED = 1,
+ QCA_OPER_SPEED
+};
+
+/*
+ * Voltage regulator information required for configuring the
+ * QCA Bluetooth chipset
+ */
+struct qca_vreg {
+ const char *name;
+ unsigned int min_uV;
+ unsigned int max_uV;
+ unsigned int load_uA;
+};
+
+struct qca_vreg_data {
+ enum qca_btsoc_type soc_type;
+ struct qca_vreg *vregs;
+ size_t num_vregs;
+};
+
+/*
+ * Platform data for the QCA Bluetooth power driver.
+ */
+struct qca_power {
+ struct device *dev;
+ const struct qca_vreg_data *vreg_data;
+ struct regulator_bulk_data *vreg_bulk;
+ bool vregs_on;
+};
+
struct qca_serdev {
struct hci_uart serdev_hu;
struct gpio_desc *bt_en;
struct clk *susclk;
+ enum qca_btsoc_type btsoc_type;
+ struct qca_power *bt_power;
+ u32 init_speed;
+ u32 oper_speed;
};
+static int qca_power_setup(struct hci_uart *hu, bool on);
+static void qca_power_shutdown(struct hci_dev *hdev);
+
static void __serial_clock_on(struct tty_struct *tty)
{
/* TODO: Some chipset requires to enable UART clock on client
@@ -402,10 +446,11 @@ static int qca_open(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
struct qca_data *qca;
+ int ret;
BT_DBG("hu %p qca_open", hu);
- qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
+ qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
if (!qca)
return -ENOMEM;
@@ -453,19 +498,32 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca;
- timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
- qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
-
- timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
- qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
-
if (hu->serdev) {
serdev_device_open(hu->serdev);
qcadev = serdev_device_get_drvdata(hu->serdev);
- gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ if (qcadev->btsoc_type != QCA_WCN3990) {
+ gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ } else {
+ hu->init_speed = qcadev->init_speed;
+ hu->oper_speed = qcadev->oper_speed;
+ ret = qca_power_setup(hu, true);
+ if (ret) {
+ destroy_workqueue(qca->workqueue);
+ kfree_skb(qca->rx_skb);
+ hu->priv = NULL;
+ kfree(qca);
+ return ret;
+ }
+ }
}
+ timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
+ qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
+
+ timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
+ qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans);
@@ -549,10 +607,13 @@ static int qca_close(struct hci_uart *hu)
qca->hu = NULL;
if (hu->serdev) {
- serdev_device_close(hu->serdev);
-
qcadev = serdev_device_get_drvdata(hu->serdev);
- gpiod_set_value_cansleep(qcadev->bt_en, 0);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ qca_power_shutdown(hu->hdev);
+ else
+ gpiod_set_value_cansleep(qcadev->bt_en, 0);
+
+ serdev_device_close(hu->serdev);
}
kfree_skb(qca->rx_skb);
@@ -872,6 +933,8 @@ static uint8_t qca_get_baudrate_value(int speed)
return QCA_BAUDRATE_2000000;
case 3000000:
return QCA_BAUDRATE_3000000;
+ case 3200000:
+ return QCA_BAUDRATE_3200000;
case 3500000:
return QCA_BAUDRATE_3500000;
default:
@@ -884,19 +947,27 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
struct sk_buff *skb;
+ struct qca_serdev *qcadev;
u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
- if (baudrate > QCA_BAUDRATE_3000000)
+ if (baudrate > QCA_BAUDRATE_3200000)
return -EINVAL;
cmd[4] = baudrate;
- skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
+ skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
if (!skb) {
bt_dev_err(hdev, "Failed to allocate baudrate packet");
return -ENOMEM;
}
+ /* Disabling hardware flow control is mandatory while
+ * sending change baudrate request to wcn3990 SoC.
+ */
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ hci_uart_set_flow_control(hu, true);
+
/* Assign commands to change baudrate and packet type. */
skb_put_data(skb, cmd, sizeof(cmd));
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
@@ -912,6 +983,9 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
set_current_state(TASK_RUNNING);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ hci_uart_set_flow_control(hu, false);
+
return 0;
}
@@ -923,50 +997,195 @@ static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
hci_uart_set_baudrate(hu, speed);
}
+static int qca_send_power_pulse(struct hci_dev *hdev, u8 cmd)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ struct sk_buff *skb;
+
+ /* These power pulses are single byte command which are sent
+ * at required baudrate to wcn3990. On wcn3990, we have an external
+ * circuit at Tx pin which decodes the pulse sent at specific baudrate.
+ * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT
+ * and also we use the same power inputs to turn on and off for
+ * Wi-Fi/BT. Powering up the power sources will not enable BT, until
+ * we send a power on pulse at 115200 bps. This algorithm will help to
+ * save power. Disabling hardware flow control is mandatory while
+ * sending power pulses to SoC.
+ */
+ bt_dev_dbg(hdev, "sending power pulse %02x to SoC", cmd);
+
+ skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hci_uart_set_flow_control(hu, true);
+
+ skb_put_u8(skb, cmd);
+ hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
+
+ skb_queue_tail(&qca->txq, skb);
+ hci_uart_tx_wakeup(hu);
+
+ /* Wait for 100 uS for SoC to settle down */
+ usleep_range(100, 200);
+ hci_uart_set_flow_control(hu, false);
+
+ return 0;
+}
+
+static unsigned int qca_get_speed(struct hci_uart *hu,
+ enum qca_speed_type speed_type)
+{
+ unsigned int speed = 0;
+
+ if (speed_type == QCA_INIT_SPEED) {
+ if (hu->init_speed)
+ speed = hu->init_speed;
+ else if (hu->proto->init_speed)
+ speed = hu->proto->init_speed;
+ } else {
+ if (hu->oper_speed)
+ speed = hu->oper_speed;
+ else if (hu->proto->oper_speed)
+ speed = hu->proto->oper_speed;
+ }
+
+ return speed;
+}
+
+static int qca_check_speeds(struct hci_uart *hu)
+{
+ struct qca_serdev *qcadev;
+
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (qcadev->btsoc_type == QCA_WCN3990) {
+ if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
+ !qca_get_speed(hu, QCA_OPER_SPEED))
+ return -EINVAL;
+ } else {
+ if (!qca_get_speed(hu, QCA_INIT_SPEED) ||
+ !qca_get_speed(hu, QCA_OPER_SPEED))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
+{
+ unsigned int speed, qca_baudrate;
+ int ret;
+
+ if (speed_type == QCA_INIT_SPEED) {
+ speed = qca_get_speed(hu, QCA_INIT_SPEED);
+ if (speed)
+ host_set_baudrate(hu, speed);
+ } else {
+ speed = qca_get_speed(hu, QCA_OPER_SPEED);
+ if (!speed)
+ return 0;
+
+ qca_baudrate = qca_get_baudrate_value(speed);
+ bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed);
+ ret = qca_set_baudrate(hu->hdev, qca_baudrate);
+ if (ret)
+ return ret;
+
+ host_set_baudrate(hu, speed);
+ }
+
+ return 0;
+}
+
+static int qca_wcn3990_init(struct hci_uart *hu)
+{
+ struct hci_dev *hdev = hu->hdev;
+ int ret;
+
+ /* Forcefully enable wcn3990 to enter in to boot mode. */
+ host_set_baudrate(hu, 2400);
+ ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
+ if (ret)
+ return ret;
+
+ qca_set_speed(hu, QCA_INIT_SPEED);
+ ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWERON_PULSE);
+ if (ret)
+ return ret;
+
+ /* Wait for 100 ms for SoC to boot */
+ msleep(100);
+
+ /* Now the device is in ready state to communicate with host.
+ * To sync host with device we need to reopen port.
+ * Without this, we will have RTS and CTS synchronization
+ * issues.
+ */
+ serdev_device_close(hu->serdev);
+ ret = serdev_device_open(hu->serdev);
+ if (ret) {
+ bt_dev_err(hu->hdev, "failed to open port");
+ return ret;
+ }
+
+ hci_uart_set_flow_control(hu, false);
+
+ return 0;
+}
+
static int qca_setup(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
struct qca_data *qca = hu->priv;
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
+ struct qca_serdev *qcadev;
int ret;
+ int soc_ver = 0;
+
+ qcadev = serdev_device_get_drvdata(hu->serdev);
- bt_dev_info(hdev, "ROME setup");
+ ret = qca_check_speeds(hu);
+ if (ret)
+ return ret;
/* Patch downloading has to be done without IBS mode */
clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
- /* Setup initial baudrate */
- speed = 0;
- if (hu->init_speed)
- speed = hu->init_speed;
- else if (hu->proto->init_speed)
- speed = hu->proto->init_speed;
+ if (qcadev->btsoc_type == QCA_WCN3990) {
+ bt_dev_info(hdev, "setting up wcn3990");
+ ret = qca_wcn3990_init(hu);
+ if (ret)
+ return ret;
- if (speed)
- host_set_baudrate(hu, speed);
+ ret = qca_read_soc_version(hdev, &soc_ver);
+ if (ret)
+ return ret;
+ } else {
+ bt_dev_info(hdev, "ROME setup");
+ qca_set_speed(hu, QCA_INIT_SPEED);
+ }
/* Setup user speed if needed */
- speed = 0;
- if (hu->oper_speed)
- speed = hu->oper_speed;
- else if (hu->proto->oper_speed)
- speed = hu->proto->oper_speed;
-
+ speed = qca_get_speed(hu, QCA_OPER_SPEED);
if (speed) {
+ ret = qca_set_speed(hu, QCA_OPER_SPEED);
+ if (ret)
+ return ret;
+
qca_baudrate = qca_get_baudrate_value(speed);
+ }
- bt_dev_info(hdev, "Set UART speed to %d", speed);
- ret = qca_set_baudrate(hdev, qca_baudrate);
- if (ret) {
- bt_dev_err(hdev, "Failed to change the baud rate (%d)",
- ret);
+ if (qcadev->btsoc_type != QCA_WCN3990) {
+ /* Get QCA version information */
+ ret = qca_read_soc_version(hdev, &soc_ver);
+ if (ret)
return ret;
- }
- host_set_baudrate(hu, speed);
}
+ bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
/* Setup patch / NVM configurations */
- ret = qca_uart_setup_rome(hdev, qca_baudrate);
+ ret = qca_uart_setup(hdev, qca_baudrate, qcadev->btsoc_type, soc_ver);
if (!ret) {
set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
@@ -1002,9 +1221,123 @@ static struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
+static const struct qca_vreg_data qca_soc_data = {
+ .soc_type = QCA_WCN3990,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 1800000, 1900000, 15000 },
+ { "vddxo", 1800000, 1900000, 80000 },
+ { "vddrf", 1300000, 1350000, 300000 },
+ { "vddch0", 3300000, 3400000, 450000 },
+ },
+ .num_vregs = 4,
+};
+
+static void qca_power_shutdown(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
+ host_set_baudrate(hu, 2400);
+ qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
+ qca_power_setup(hu, false);
+}
+
+static int qca_enable_regulator(struct qca_vreg vregs,
+ struct regulator *regulator)
+{
+ int ret;
+
+ ret = regulator_set_voltage(regulator, vregs.min_uV,
+ vregs.max_uV);
+ if (ret)
+ return ret;
+
+ if (vregs.load_uA)
+ ret = regulator_set_load(regulator,
+ vregs.load_uA);
+
+ if (ret)
+ return ret;
+
+ return regulator_enable(regulator);
+
+}
+
+static void qca_disable_regulator(struct qca_vreg vregs,
+ struct regulator *regulator)
+{
+ regulator_disable(regulator);
+ regulator_set_voltage(regulator, 0, vregs.max_uV);
+ if (vregs.load_uA)
+ regulator_set_load(regulator, 0);
+
+}
+
+static int qca_power_setup(struct hci_uart *hu, bool on)
+{
+ struct qca_vreg *vregs;
+ struct regulator_bulk_data *vreg_bulk;
+ struct qca_serdev *qcadev;
+ int i, num_vregs, ret = 0;
+
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (!qcadev || !qcadev->bt_power || !qcadev->bt_power->vreg_data ||
+ !qcadev->bt_power->vreg_bulk)
+ return -EINVAL;
+
+ vregs = qcadev->bt_power->vreg_data->vregs;
+ vreg_bulk = qcadev->bt_power->vreg_bulk;
+ num_vregs = qcadev->bt_power->vreg_data->num_vregs;
+ BT_DBG("on: %d", on);
+ if (on && !qcadev->bt_power->vregs_on) {
+ for (i = 0; i < num_vregs; i++) {
+ ret = qca_enable_regulator(vregs[i],
+ vreg_bulk[i].consumer);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ BT_ERR("failed to enable regulator:%s", vregs[i].name);
+ /* turn off regulators which are enabled */
+ for (i = i - 1; i >= 0; i--)
+ qca_disable_regulator(vregs[i],
+ vreg_bulk[i].consumer);
+ } else {
+ qcadev->bt_power->vregs_on = true;
+ }
+ } else if (!on && qcadev->bt_power->vregs_on) {
+ /* turn off regulator in reverse order */
+ i = qcadev->bt_power->vreg_data->num_vregs - 1;
+ for ( ; i >= 0; i--)
+ qca_disable_regulator(vregs[i], vreg_bulk[i].consumer);
+
+ qcadev->bt_power->vregs_on = false;
+ }
+
+ return ret;
+}
+
+static int qca_init_regulators(struct qca_power *qca,
+ const struct qca_vreg *vregs, size_t num_vregs)
+{
+ int i;
+
+ qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs *
+ sizeof(struct regulator_bulk_data),
+ GFP_KERNEL);
+ if (!qca->vreg_bulk)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vregs; i++)
+ qca->vreg_bulk[i].supply = vregs[i].name;
+
+ return devm_regulator_bulk_get(qca->dev, num_vregs, qca->vreg_bulk);
+}
+
static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
+ const struct qca_vreg_data *data;
int err;
qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
@@ -1012,47 +1345,84 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
qcadev->serdev_hu.serdev = serdev;
+ data = of_device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
+ if (data && data->soc_type == QCA_WCN3990) {
+ qcadev->btsoc_type = QCA_WCN3990;
+ qcadev->bt_power = devm_kzalloc(&serdev->dev,
+ sizeof(struct qca_power),
+ GFP_KERNEL);
+ if (!qcadev->bt_power)
+ return -ENOMEM;
+
+ qcadev->bt_power->dev = &serdev->dev;
+ qcadev->bt_power->vreg_data = data;
+ err = qca_init_regulators(qcadev->bt_power, data->vregs,
+ data->num_vregs);
+ if (err) {
+ BT_ERR("Failed to init regulators:%d", err);
+ goto out;
+ }
- qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
- GPIOD_OUT_LOW);
- if (IS_ERR(qcadev->bt_en)) {
- dev_err(&serdev->dev, "failed to acquire enable gpio\n");
- return PTR_ERR(qcadev->bt_en);
- }
+ qcadev->bt_power->vregs_on = false;
- qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
- if (IS_ERR(qcadev->susclk)) {
- dev_err(&serdev->dev, "failed to acquire clk\n");
- return PTR_ERR(qcadev->susclk);
- }
+ device_property_read_u32(&serdev->dev, "max-speed",
+ &qcadev->oper_speed);
+ if (!qcadev->oper_speed)
+ BT_DBG("UART will pick default operating speed");
- err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
- if (err)
- return err;
+ err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
+ if (err) {
+ BT_ERR("wcn3990 serdev registration failed");
+ goto out;
+ }
+ } else {
+ qcadev->btsoc_type = QCA_ROME;
+ qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(qcadev->bt_en)) {
+ dev_err(&serdev->dev, "failed to acquire enable gpio\n");
+ return PTR_ERR(qcadev->bt_en);
+ }
- err = clk_prepare_enable(qcadev->susclk);
- if (err)
- return err;
+ qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
+ if (IS_ERR(qcadev->susclk)) {
+ dev_err(&serdev->dev, "failed to acquire clk\n");
+ return PTR_ERR(qcadev->susclk);
+ }
- err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
- if (err)
- clk_disable_unprepare(qcadev->susclk);
+ err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(qcadev->susclk);
+ if (err)
+ return err;
+
+ err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
+ if (err)
+ clk_disable_unprepare(qcadev->susclk);
+ }
+
+out: return err;
- return err;
}
static void qca_serdev_remove(struct serdev_device *serdev)
{
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
- hci_uart_unregister_device(&qcadev->serdev_hu);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ qca_power_shutdown(qcadev->serdev_hu.hdev);
+ else
+ clk_disable_unprepare(qcadev->susclk);
- clk_disable_unprepare(qcadev->susclk);
+ hci_uart_unregister_device(&qcadev->serdev_hu);
}
static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
+ { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index d1c0b60e9326..1851112ccc29 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -103,6 +103,16 @@ config SIMPLE_PM_BUS
Controller (BSC, sometimes called "LBSC within Bus Bridge", or
"External Bus Interface") as found on several Renesas ARM SoCs.
+config SUN50I_DE2_BUS
+ bool "Allwinner A64 DE2 Bus Driver"
+ default ARM64
+ depends on ARCH_SUNXI
+ select SUNXI_SRAM
+ help
+ Say y here to enable support for Allwinner A64 DE2 bus driver. It's
+ mostly transparent, but a SRAM region needs to be claimed in the SRAM
+ controller to make the all blocks in the DE2 part accessible.
+
config SUNXI_RSB
tristate "Allwinner sunXi Reduced Serial Bus Driver"
default MACH_SUN8I || MACH_SUN9I || ARM64
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index b8f036cca7ff..ca300b1914ce 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
+obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 3d56ebcda720..6a94aa6a22c2 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -45,6 +45,8 @@ static const struct imx_weim_devtype imx51_weim_devtype = {
.cs_stride = 0x18,
};
+#define MAX_CS_REGS_COUNT 6
+
static const struct of_device_id weim_id_table[] = {
/* i.MX1/21 */
{ .compatible = "fsl,imx1-weim", .data = &imx1_weim_devtype, },
@@ -112,9 +114,12 @@ err:
static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
const struct imx_weim_devtype *devtype)
{
- u32 cs_idx, value[devtype->cs_regs_count];
+ u32 cs_idx, value[MAX_CS_REGS_COUNT];
int i, ret;
+ if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT))
+ return -EINVAL;
+
/* get the CS index from this child node's "reg" property. */
ret = of_property_read_u32(np, "reg", &cs_idx);
if (ret)
diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c
new file mode 100644
index 000000000000..672518741f86
--- /dev/null
+++ b/drivers/bus/sun50i-de2.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner A64 Display Engine 2.0 Bus Driver
+ *
+ * Copyright (C) 2018 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
+
+static int sun50i_de2_bus_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = sunxi_sram_claim(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
+ return ret;
+ }
+
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
+ return 0;
+}
+
+static int sun50i_de2_bus_remove(struct platform_device *pdev)
+{
+ sunxi_sram_release(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id sun50i_de2_bus_of_match[] = {
+ { .compatible = "allwinner,sun50i-a64-de2", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver sun50i_de2_bus_driver = {
+ .probe = sun50i_de2_bus_probe,
+ .remove = sun50i_de2_bus_remove,
+ .driver = {
+ .name = "sun50i-de2-bus",
+ .of_match_table = sun50i_de2_bus_of_match,
+ },
+};
+
+builtin_platform_driver(sun50i_de2_bus_driver);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 1cc29629d238..c9bac9dc4637 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -23,11 +23,14 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/iopoll.h>
#include <linux/platform_data/ti-sysc.h>
#include <dt-bindings/bus/ti-sysc.h>
+#define MAX_MODULE_SOFTRESET_WAIT 10000
+
static const char * const reg_names[] = { "rev", "sysc", "syss", };
enum sysc_clocks {
@@ -88,6 +91,11 @@ struct sysc {
struct delayed_work idle_work;
};
+void sysc_write(struct sysc *ddata, int offset, u32 value)
+{
+ writel_relaxed(value, ddata->module_va + offset);
+}
+
static u32 sysc_read(struct sysc *ddata, int offset)
{
if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
@@ -169,9 +177,9 @@ static int sysc_get_clocks(struct sysc *ddata)
const char *name;
int nr_fck = 0, nr_ick = 0, i, error = 0;
- ddata->clock_roles = devm_kzalloc(ddata->dev,
- sizeof(*ddata->clock_roles) *
+ ddata->clock_roles = devm_kcalloc(ddata->dev,
SYSC_MAX_CLOCKS,
+ sizeof(*ddata->clock_roles),
GFP_KERNEL);
if (!ddata->clock_roles)
return -ENOMEM;
@@ -200,8 +208,8 @@ static int sysc_get_clocks(struct sysc *ddata)
return -EINVAL;
}
- ddata->clocks = devm_kzalloc(ddata->dev,
- sizeof(*ddata->clocks) * ddata->nr_clocks,
+ ddata->clocks = devm_kcalloc(ddata->dev,
+ ddata->nr_clocks, sizeof(*ddata->clocks),
GFP_KERNEL);
if (!ddata->clocks)
return -ENOMEM;
@@ -943,6 +951,36 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
}
}
+static int sysc_reset(struct sysc *ddata)
+{
+ int offset = ddata->offsets[SYSC_SYSCONFIG];
+ int val;
+
+ if (ddata->legacy_mode || offset < 0 ||
+ ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
+ return 0;
+
+ /*
+ * Currently only support reset status in sysstatus.
+ * Warn and return error in all other cases
+ */
+ if (!ddata->cfg.syss_mask) {
+ dev_err(ddata->dev, "No ti,syss-mask. Reset failed\n");
+ return -EINVAL;
+ }
+
+ val = sysc_read(ddata, offset);
+ val |= (0x1 << ddata->cap->regbits->srst_shift);
+ sysc_write(ddata, offset, val);
+
+ /* Poll on reset status */
+ offset = ddata->offsets[SYSC_SYSSTATUS];
+
+ return readl_poll_timeout(ddata->module_va + offset, val,
+ (val & ddata->cfg.syss_mask) == 0x0,
+ 100, MAX_MODULE_SOFTRESET_WAIT);
+}
+
/* At this point the module is configured enough to read the revision */
static int sysc_init_module(struct sysc *ddata)
{
@@ -960,6 +998,14 @@ static int sysc_init_module(struct sysc *ddata)
return 0;
}
+ error = sysc_reset(ddata);
+ if (error) {
+ dev_err(ddata->dev, "Reset failed with %d\n", error);
+ pm_runtime_put_sync(ddata->dev);
+
+ return error;
+ }
+
ddata->revision = sysc_read_revision(ddata);
pm_runtime_put_sync(ddata->dev);
@@ -1552,6 +1598,23 @@ static const struct sysc_capabilities sysc_omap4_usb_host_fs = {
.regbits = &sysc_regbits_omap4_usb_host_fs,
};
+static const struct sysc_regbits sysc_regbits_dra7_mcan = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = -ENODEV,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = 4,
+ .srst_shift = 0,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_dra7_mcan = {
+ .type = TI_SYSC_DRA7_MCAN,
+ .sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET,
+ .regbits = &sysc_regbits_dra7_mcan,
+};
+
static int sysc_init_pdata(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
@@ -1743,6 +1806,7 @@ static const struct of_device_id sysc_match[] = {
{ .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, },
{ .compatible = "ti,sysc-usb-host-fs",
.data = &sysc_omap4_usb_host_fs, },
+ { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
{ },
};
MODULE_DEVICE_TABLE(of, sysc_match);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index a78b8e7085e9..113fc6edb2b0 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -282,6 +282,7 @@
#include <linux/blkdev.h>
#include <linux/times.h>
#include <linux/uaccess.h>
+#include <scsi/scsi_common.h>
#include <scsi/scsi_request.h>
/* used to tell the module to turn on full debugging messages */
@@ -345,10 +346,10 @@ static LIST_HEAD(cdrom_list);
int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
- if (cgc->sense) {
- cgc->sense->sense_key = 0x05;
- cgc->sense->asc = 0x20;
- cgc->sense->ascq = 0x00;
+ if (cgc->sshdr) {
+ cgc->sshdr->sense_key = 0x05;
+ cgc->sshdr->asc = 0x20;
+ cgc->sshdr->ascq = 0x00;
}
cgc->stat = -EIO;
@@ -2222,9 +2223,12 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
blk_execute_rq(q, cdi->disk, rq, 0);
if (scsi_req(rq)->result) {
- struct request_sense *s = req->sense;
+ struct scsi_sense_hdr sshdr;
+
ret = -EIO;
- cdi->last_sense = s->sense_key;
+ scsi_normalize_sense(req->sense, req->sense_len,
+ &sshdr);
+ cdi->last_sense = sshdr.sense_key;
}
if (blk_rq_unmap_user(bio))
@@ -2943,7 +2947,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int cmd)
{
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
struct cdrom_msf msf;
int blocksize = 0, format = 0, lba;
int ret;
@@ -2971,13 +2975,13 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
if (cgc->buffer == NULL)
return -ENOMEM;
- memset(&sense, 0, sizeof(sense));
- cgc->sense = &sense;
+ memset(&sshdr, 0, sizeof(sshdr));
+ cgc->sshdr = &sshdr;
cgc->data_direction = CGC_DATA_READ;
ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize);
- if (ret && sense.sense_key == 0x05 &&
- sense.asc == 0x20 &&
- sense.ascq == 0x00) {
+ if (ret && sshdr.sense_key == 0x05 &&
+ sshdr.asc == 0x20 &&
+ sshdr.ascq == 0x00) {
/*
* SCSI-II devices are not required to support
* READ_CD, so let's try switching block size
@@ -2986,7 +2990,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
ret = cdrom_switch_blocksize(cdi, blocksize);
if (ret)
goto out;
- cgc->sense = NULL;
+ cgc->sshdr = NULL;
ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1);
ret |= cdrom_switch_blocksize(cdi, blocksize);
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 212f447938ae..ce277ee0a28a 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -554,3 +554,17 @@ config ADI
endmenu
+config RANDOM_TRUST_CPU
+ bool "Trust the CPU manufacturer to initialize Linux's CRNG"
+ depends on X86 || S390 || PPC
+ default n
+ help
+ Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
+ RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
+ for the purposes of initializing Linux's CRNG. Since this is not
+ something that can be independently audited, this amounts to trusting
+ that CPU manufacturer (perhaps with the insistence or mandate
+ of a Nation State's intelligence or law enforcement agencies)
+ has not installed a hidden back door to compromise the CPU's
+ random number generation facilities.
+
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index 53fe633df1e8..c9bf2c219841 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -11,7 +11,7 @@
#include "agp.h"
-static int alpha_core_agp_vm_fault(struct vm_fault *vmf)
+static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
dma_addr_t dma_addr;
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index e50c29c97ca7..c69e39fdd02b 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -156,7 +156,7 @@ static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
/* Address to map to */
pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
- aperturebase = tmp << 25;
+ aperturebase = (u64)tmp << 25;
aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
enable_gart_translation(hammer, gatt_table);
@@ -277,7 +277,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
nb_order = (nb_order >> 1) & 7;
pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
- nb_aper = nb_base << 25;
+ nb_aper = (u64)nb_base << 25;
/* Northbridge seems to contain crap. Try the AGP bridge. */
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index be426eb2a353..4a22b4b41aef 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -579,7 +579,6 @@ hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
struct hpet_info *info)
{
struct hpet_timer __iomem *timer;
- struct hpet __iomem *hpet;
struct hpets *hpetp;
int err;
unsigned long v;
@@ -591,7 +590,6 @@ hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
case HPET_DPI:
case HPET_IRQFREQ:
timer = devp->hd_timer;
- hpet = devp->hd_hpet;
hpetp = devp->hd_hpets;
break;
case HPET_IE_ON:
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c34b257d852d..dac895dc01b9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -307,19 +307,6 @@ config HW_RANDOM_HISI
If unsure, say Y.
-config HW_RANDOM_MSM
- tristate "Qualcomm SoCs Random Number Generator support"
- depends on HW_RANDOM && ARCH_QCOM
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on Qualcomm SoCs.
-
- To compile this driver as a module, choose M here. the
- module will be called msm-rng.
-
- If unsure, say Y.
-
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
depends on HW_RANDOM && ARCH_STI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 533e913c93d1..e35ec3ce3a20 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
-obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 661c82cde0f2..433426242b87 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/clk.h>
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 1947aed7c044..94235761955c 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -19,6 +19,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 250123bc4905..14730be54edf 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -13,6 +13,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/clk.h>
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
deleted file mode 100644
index 841fee845ec9..000000000000
--- a/drivers/char/hw_random/msm-rng.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/hw_random.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-/* Device specific register offsets */
-#define PRNG_DATA_OUT 0x0000
-#define PRNG_STATUS 0x0004
-#define PRNG_LFSR_CFG 0x0100
-#define PRNG_CONFIG 0x0104
-
-/* Device specific register masks and config values */
-#define PRNG_LFSR_CFG_MASK 0x0000ffff
-#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
-#define PRNG_CONFIG_HW_ENABLE BIT(1)
-#define PRNG_STATUS_DATA_AVAIL BIT(0)
-
-#define MAX_HW_FIFO_DEPTH 16
-#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4)
-#define WORD_SZ 4
-
-struct msm_rng {
- void __iomem *base;
- struct clk *clk;
- struct hwrng hwrng;
-};
-
-#define to_msm_rng(p) container_of(p, struct msm_rng, hwrng)
-
-static int msm_rng_enable(struct hwrng *hwrng, int enable)
-{
- struct msm_rng *rng = to_msm_rng(hwrng);
- u32 val;
- int ret;
-
- ret = clk_prepare_enable(rng->clk);
- if (ret)
- return ret;
-
- if (enable) {
- /* Enable PRNG only if it is not already enabled */
- val = readl_relaxed(rng->base + PRNG_CONFIG);
- if (val & PRNG_CONFIG_HW_ENABLE)
- goto already_enabled;
-
- val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
- val &= ~PRNG_LFSR_CFG_MASK;
- val |= PRNG_LFSR_CFG_CLOCKS;
- writel(val, rng->base + PRNG_LFSR_CFG);
-
- val = readl_relaxed(rng->base + PRNG_CONFIG);
- val |= PRNG_CONFIG_HW_ENABLE;
- writel(val, rng->base + PRNG_CONFIG);
- } else {
- val = readl_relaxed(rng->base + PRNG_CONFIG);
- val &= ~PRNG_CONFIG_HW_ENABLE;
- writel(val, rng->base + PRNG_CONFIG);
- }
-
-already_enabled:
- clk_disable_unprepare(rng->clk);
- return 0;
-}
-
-static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
-{
- struct msm_rng *rng = to_msm_rng(hwrng);
- size_t currsize = 0;
- u32 *retdata = data;
- size_t maxsize;
- int ret;
- u32 val;
-
- /* calculate max size bytes to transfer back to caller */
- maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
-
- ret = clk_prepare_enable(rng->clk);
- if (ret)
- return ret;
-
- /* read random data from hardware */
- do {
- val = readl_relaxed(rng->base + PRNG_STATUS);
- if (!(val & PRNG_STATUS_DATA_AVAIL))
- break;
-
- val = readl_relaxed(rng->base + PRNG_DATA_OUT);
- if (!val)
- break;
-
- *retdata++ = val;
- currsize += WORD_SZ;
-
- /* make sure we stay on 32bit boundary */
- if ((maxsize - currsize) < WORD_SZ)
- break;
- } while (currsize < maxsize);
-
- clk_disable_unprepare(rng->clk);
-
- return currsize;
-}
-
-static int msm_rng_init(struct hwrng *hwrng)
-{
- return msm_rng_enable(hwrng, 1);
-}
-
-static void msm_rng_cleanup(struct hwrng *hwrng)
-{
- msm_rng_enable(hwrng, 0);
-}
-
-static int msm_rng_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct msm_rng *rng;
- int ret;
-
- rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
- if (!rng)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, rng);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rng->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(rng->base))
- return PTR_ERR(rng->base);
-
- rng->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(rng->clk))
- return PTR_ERR(rng->clk);
-
- rng->hwrng.name = KBUILD_MODNAME,
- rng->hwrng.init = msm_rng_init,
- rng->hwrng.cleanup = msm_rng_cleanup,
- rng->hwrng.read = msm_rng_read,
-
- ret = devm_hwrng_register(&pdev->dev, &rng->hwrng);
- if (ret) {
- dev_err(&pdev->dev, "failed to register hwrng\n");
- return ret;
- }
-
- return 0;
-}
-
-static const struct of_device_id msm_rng_of_match[] = {
- { .compatible = "qcom,prng", },
- {}
-};
-MODULE_DEVICE_TABLE(of, msm_rng_of_match);
-
-static struct platform_driver msm_rng_driver = {
- .probe = msm_rng_probe,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = of_match_ptr(msm_rng_of_match),
- }
-};
-module_platform_driver(msm_rng_driver);
-
-MODULE_ALIAS("platform:" KBUILD_MODNAME);
-MODULE_AUTHOR("The Linux Foundation");
-MODULE_DESCRIPTION("Qualcomm MSM random number generator driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
index 263a5bb8e605..791182aa8e04 100644
--- a/drivers/char/hw_random/powernv-rng.c
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/random.h>
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index ad353be871bf..90ec010bffbd 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2088,8 +2088,10 @@ static int try_smi_init(struct smi_info *new_smi)
return 0;
out_err:
- ipmi_unregister_smi(new_smi->intf);
- new_smi->intf = NULL;
+ if (new_smi->intf) {
+ ipmi_unregister_smi(new_smi->intf);
+ new_smi->intf = NULL;
+ }
kfree(init_name);
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
index fbfc05e3f3d1..bb882ab161fe 100644
--- a/drivers/char/ipmi/kcs_bmc.c
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -210,34 +210,23 @@ static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
{
unsigned long flags;
- int ret = 0;
+ int ret = -ENODATA;
u8 status;
spin_lock_irqsave(&kcs_bmc->lock, flags);
- if (!kcs_bmc->running) {
- kcs_force_abort(kcs_bmc);
- ret = -ENODEV;
- goto out_unlock;
- }
-
- status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT);
-
- switch (status) {
- case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT:
- kcs_bmc_handle_cmd(kcs_bmc);
- break;
-
- case KCS_STATUS_IBF:
- kcs_bmc_handle_data(kcs_bmc);
- break;
+ status = read_status(kcs_bmc);
+ if (status & KCS_STATUS_IBF) {
+ if (!kcs_bmc->running)
+ kcs_force_abort(kcs_bmc);
+ else if (status & KCS_STATUS_CMD_DAT)
+ kcs_bmc_handle_cmd(kcs_bmc);
+ else
+ kcs_bmc_handle_data(kcs_bmc);
- default:
- ret = -ENODATA;
- break;
+ ret = 0;
}
-out_unlock:
spin_unlock_irqrestore(&kcs_bmc->lock, flags);
return ret;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index ffeb60d3434c..7b4e4de778e4 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -708,6 +708,7 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
#endif
if (vma->vm_flags & VM_SHARED)
return shmem_zero_setup(vma);
+ vma_set_anonymous(vma);
return 0;
}
@@ -765,6 +766,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
switch (orig) {
case SEEK_CUR:
offset += file->f_pos;
+ /* fall through */
case SEEK_SET:
/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
if ((unsigned long long)offset >= -MAX_ERRNO) {
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 370e0a64ead1..a219964cb770 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1748,8 +1748,6 @@ static int cm4000_config_check(struct pcmcia_device *p_dev, void *priv_data)
static int cm4000_config(struct pcmcia_device * link, int devno)
{
- struct cm4000_dev *dev;
-
link->config_flags |= CONF_AUTO_SET_IO;
/* read the config-tuples */
@@ -1759,8 +1757,6 @@ static int cm4000_config(struct pcmcia_device * link, int devno)
if (pcmcia_enable_device(link))
goto cs_release;
- dev = link->priv;
-
return 0;
cs_release:
diff --git a/drivers/char/random.c b/drivers/char/random.c
index a8fb0020ba5c..bf5f99fc36f1 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -402,7 +402,8 @@ static struct poolinfo {
/*
* Static global variables
*/
-static DECLARE_WAIT_QUEUE_HEAD(random_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static struct fasync_struct *fasync;
static DEFINE_SPINLOCK(random_ready_list_lock);
@@ -721,8 +722,8 @@ retry:
/* should we wake readers? */
if (entropy_bits >= random_read_wakeup_bits &&
- wq_has_sleeper(&random_wait)) {
- wake_up_interruptible_poll(&random_wait, POLLIN);
+ wq_has_sleeper(&random_read_wait)) {
+ wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
/* If the input pool is getting full, send some
@@ -781,6 +782,7 @@ static void invalidate_batched_entropy(void);
static void crng_initialize(struct crng_state *crng)
{
int i;
+ int arch_init = 1;
unsigned long rv;
memcpy(&crng->state[0], "expand 32-byte k", 16);
@@ -791,10 +793,18 @@ static void crng_initialize(struct crng_state *crng)
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
+ !arch_get_random_long(&rv)) {
rv = random_get_entropy();
+ arch_init = 0;
+ }
crng->state[i] ^= rv;
}
+#ifdef CONFIG_RANDOM_TRUST_CPU
+ if (arch_init) {
+ crng_init = 2;
+ pr_notice("random: crng done (trusting CPU's manufacturer)\n");
+ }
+#endif
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
@@ -1121,8 +1131,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
} sample;
long delta, delta2, delta3;
- preempt_disable();
-
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
@@ -1160,8 +1168,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* and limit entropy entimate to 12 bits.
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
-
- preempt_enable();
}
void add_input_randomness(unsigned int type, unsigned int code,
@@ -1396,7 +1402,7 @@ retry:
trace_debit_entropy(r->name, 8 * ibytes);
if (ibytes &&
(r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
- wake_up_interruptible_poll(&random_wait, POLLOUT);
+ wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
@@ -1658,6 +1664,21 @@ int wait_for_random_bytes(void)
EXPORT_SYMBOL(wait_for_random_bytes);
/*
+ * Returns whether or not the urandom pool has been seeded and thus guaranteed
+ * to supply cryptographically secure random numbers. This applies to: the
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
+ * ,u64,int,long} family of functions.
+ *
+ * Returns: true if the urandom pool has been seeded.
+ * false if the urandom pool has not been seeded.
+ */
+bool rng_is_initialized(void)
+{
+ return crng_ready();
+}
+EXPORT_SYMBOL(rng_is_initialized);
+
+/*
* Add a callback function that will be invoked when the nonblocking
* pool is initialised.
*
@@ -1724,30 +1745,31 @@ EXPORT_SYMBOL(del_random_ready_callback);
* key known by the NSA). So it's useful if we need the speed, but
* only if we're willing to trust the hardware manufacturer not to
* have put in a back door.
+ *
+ * Return number of bytes filled in.
*/
-void get_random_bytes_arch(void *buf, int nbytes)
+int __must_check get_random_bytes_arch(void *buf, int nbytes)
{
+ int left = nbytes;
char *p = buf;
- trace_get_random_bytes_arch(nbytes, _RET_IP_);
- while (nbytes) {
+ trace_get_random_bytes_arch(left, _RET_IP_);
+ while (left) {
unsigned long v;
- int chunk = min(nbytes, (int)sizeof(unsigned long));
+ int chunk = min_t(int, left, sizeof(unsigned long));
if (!arch_get_random_long(&v))
break;
-
+
memcpy(p, &v, chunk);
p += chunk;
- nbytes -= chunk;
+ left -= chunk;
}
- if (nbytes)
- get_random_bytes(p, nbytes);
+ return nbytes - left;
}
EXPORT_SYMBOL(get_random_bytes_arch);
-
/*
* init_std_data - initialize pool with system data
*
@@ -1838,7 +1860,7 @@ _random_read(int nonblock, char __user *buf, size_t nbytes)
if (nonblock)
return -EAGAIN;
- wait_event_interruptible(random_wait,
+ wait_event_interruptible(random_read_wait,
ENTROPY_BITS(&input_pool) >=
random_read_wakeup_bits);
if (signal_pending(current))
@@ -1875,17 +1897,14 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return ret;
}
-static struct wait_queue_head *
-random_get_poll_head(struct file *file, __poll_t events)
-{
- return &random_wait;
-}
-
static __poll_t
-random_poll_mask(struct file *file, __poll_t events)
+random_poll(struct file *file, poll_table * wait)
{
- __poll_t mask = 0;
+ __poll_t mask;
+ poll_wait(file, &random_read_wait, wait);
+ poll_wait(file, &random_write_wait, wait);
+ mask = 0;
if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
mask |= EPOLLIN | EPOLLRDNORM;
if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
@@ -1897,14 +1916,22 @@ static int
write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
{
size_t bytes;
- __u32 buf[16];
+ __u32 t, buf[16];
const char __user *p = buffer;
while (count > 0) {
+ int b, i = 0;
+
bytes = min(count, sizeof(buf));
if (copy_from_user(&buf, p, bytes))
return -EFAULT;
+ for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
+ if (!arch_get_random_int(&t))
+ break;
+ buf[i] ^= t;
+ }
+
count -= bytes;
p += bytes;
@@ -1992,8 +2019,7 @@ static int random_fasync(int fd, struct file *filp, int on)
const struct file_operations random_fops = {
.read = random_read,
.write = random_write,
- .get_poll_head = random_get_poll_head,
- .poll_mask = random_poll_mask,
+ .poll = random_poll,
.unlocked_ioctl = random_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
@@ -2326,7 +2352,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
* We'll be woken up again once below random_write_wakeup_thresh,
* or when the calling thread is about to terminate.
*/
- wait_event_interruptible(random_wait, kthread_should_stop() ||
+ wait_event_interruptible(random_write_wait, kthread_should_stop() ||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
mix_pool_bytes(poolp, buffer, count);
credit_entropy_bits(poolp, entropy);
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 94fedeeec035..4948c8bda6b1 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -193,14 +193,6 @@ static unsigned long rtc_freq; /* Current periodic IRQ rate */
static unsigned long rtc_irq_data; /* our output to the world */
static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
-#ifdef RTC_IRQ
-/*
- * rtc_task_lock nests inside rtc_lock.
- */
-static DEFINE_SPINLOCK(rtc_task_lock);
-static rtc_task_t *rtc_callback;
-#endif
-
/*
* If this driver ever becomes modularised, it will be really nice
* to make the epoch retain its value across module reload...
@@ -264,11 +256,6 @@ static irqreturn_t rtc_interrupt(int irq, void *dev_id)
spin_unlock(&rtc_lock);
- /* Now do the rest of the actions */
- spin_lock(&rtc_task_lock);
- if (rtc_callback)
- rtc_callback->func(rtc_callback->private_data);
- spin_unlock(&rtc_task_lock);
wake_up_interruptible(&rtc_wait);
kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 0a62c19937b6..46caadca916a 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -81,42 +81,66 @@ void tpm_put_ops(struct tpm_chip *chip)
EXPORT_SYMBOL_GPL(tpm_put_ops);
/**
- * tpm_chip_find_get() - find and reserve a TPM chip
+ * tpm_default_chip() - find a TPM chip and get a reference to it
+ */
+struct tpm_chip *tpm_default_chip(void)
+{
+ struct tpm_chip *chip, *res = NULL;
+ int chip_num = 0;
+ int chip_prev;
+
+ mutex_lock(&idr_lock);
+
+ do {
+ chip_prev = chip_num;
+ chip = idr_get_next(&dev_nums_idr, &chip_num);
+ if (chip) {
+ get_device(&chip->dev);
+ res = chip;
+ break;
+ }
+ } while (chip_prev != chip_num);
+
+ mutex_unlock(&idr_lock);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(tpm_default_chip);
+
+/**
+ * tpm_find_get_ops() - find and reserve a TPM chip
* @chip: a &struct tpm_chip instance, %NULL for the default chip
*
* Finds a TPM chip and reserves its class device and operations. The chip must
- * be released with tpm_chip_put_ops() after use.
+ * be released with tpm_put_ops() after use.
+ * This function is for internal use only. It supports existing TPM callers
+ * by accepting NULL, but those callers should be converted to pass in a chip
+ * directly.
*
* Return:
* A reserved &struct tpm_chip instance.
* %NULL if a chip is not found.
* %NULL if the chip is not available.
*/
-struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip)
+struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip)
{
- struct tpm_chip *res = NULL;
- int chip_num = 0;
- int chip_prev;
-
- mutex_lock(&idr_lock);
+ int rc;
- if (!chip) {
- do {
- chip_prev = chip_num;
- chip = idr_get_next(&dev_nums_idr, &chip_num);
- if (chip && !tpm_try_get_ops(chip)) {
- res = chip;
- break;
- }
- } while (chip_prev != chip_num);
- } else {
+ if (chip) {
if (!tpm_try_get_ops(chip))
- res = chip;
+ return chip;
+ return NULL;
}
- mutex_unlock(&idr_lock);
-
- return res;
+ chip = tpm_default_chip();
+ if (!chip)
+ return NULL;
+ rc = tpm_try_get_ops(chip);
+ /* release additional reference we got from tpm_default_chip() */
+ put_device(&chip->dev);
+ if (rc)
+ return NULL;
+ return chip;
}
/**
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index e32f6e85dc6d..1a803b0cf980 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -29,7 +29,6 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
-#include <linux/pm_runtime.h>
#include <linux/tpm_eventlog.h>
#include "tpm.h"
@@ -369,10 +368,13 @@ err_len:
return -EINVAL;
}
-static int tpm_request_locality(struct tpm_chip *chip)
+static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags)
{
int rc;
+ if (flags & TPM_TRANSMIT_NESTED)
+ return 0;
+
if (!chip->ops->request_locality)
return 0;
@@ -385,10 +387,13 @@ static int tpm_request_locality(struct tpm_chip *chip)
return 0;
}
-static void tpm_relinquish_locality(struct tpm_chip *chip)
+static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags)
{
int rc;
+ if (flags & TPM_TRANSMIT_NESTED)
+ return;
+
if (!chip->ops->relinquish_locality)
return;
@@ -399,6 +404,28 @@ static void tpm_relinquish_locality(struct tpm_chip *chip)
chip->locality = -1;
}
+static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags)
+{
+ if (flags & TPM_TRANSMIT_NESTED)
+ return 0;
+
+ if (!chip->ops->cmd_ready)
+ return 0;
+
+ return chip->ops->cmd_ready(chip);
+}
+
+static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags)
+{
+ if (flags & TPM_TRANSMIT_NESTED)
+ return 0;
+
+ if (!chip->ops->go_idle)
+ return 0;
+
+ return chip->ops->go_idle(chip);
+}
+
static ssize_t tpm_try_transmit(struct tpm_chip *chip,
struct tpm_space *space,
u8 *buf, size_t bufsiz,
@@ -423,7 +450,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
TSS2_RESMGR_TPM_RC_LAYER);
- return bufsiz;
+ return sizeof(*header);
}
if (bufsiz > TPM_BUFSIZE)
@@ -439,24 +466,24 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
return -E2BIG;
}
- if (!(flags & TPM_TRANSMIT_UNLOCKED))
+ if (!(flags & TPM_TRANSMIT_UNLOCKED) && !(flags & TPM_TRANSMIT_NESTED))
mutex_lock(&chip->tpm_mutex);
-
if (chip->ops->clk_enable != NULL)
chip->ops->clk_enable(chip, true);
/* Store the decision as chip->locality will be changed. */
need_locality = chip->locality == -1;
- if (!(flags & TPM_TRANSMIT_RAW) && need_locality) {
- rc = tpm_request_locality(chip);
+ if (need_locality) {
+ rc = tpm_request_locality(chip, flags);
if (rc < 0)
goto out_no_locality;
}
- if (chip->dev.parent)
- pm_runtime_get_sync(chip->dev.parent);
+ rc = tpm_cmd_ready(chip, flags);
+ if (rc)
+ goto out;
rc = tpm2_prepare_space(chip, space, ordinal, buf);
if (rc)
@@ -516,19 +543,22 @@ out_recv:
}
rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
+ if (rc)
+ dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
out:
- if (chip->dev.parent)
- pm_runtime_put_sync(chip->dev.parent);
+ rc = tpm_go_idle(chip, flags);
+ if (rc)
+ goto out;
if (need_locality)
- tpm_relinquish_locality(chip);
+ tpm_relinquish_locality(chip, flags);
out_no_locality:
if (chip->ops->clk_enable != NULL)
chip->ops->clk_enable(chip, false);
- if (!(flags & TPM_TRANSMIT_UNLOCKED))
+ if (!(flags & TPM_TRANSMIT_UNLOCKED) && !(flags & TPM_TRANSMIT_NESTED))
mutex_unlock(&chip->tpm_mutex);
return rc ? rc : len;
}
@@ -930,7 +960,7 @@ int tpm_is_tpm2(struct tpm_chip *chip)
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
@@ -954,7 +984,7 @@ int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
@@ -1013,7 +1043,7 @@ int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash)
u32 count = 0;
int i;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
@@ -1142,7 +1172,7 @@ int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
@@ -1262,7 +1292,7 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
@@ -1324,7 +1354,7 @@ int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload,
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
@@ -1352,7 +1382,7 @@ int tpm_unseal_trusted(struct tpm_chip *chip,
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 4426649e431c..f3501d05264f 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -424,23 +424,24 @@ struct tpm_buf {
u8 *data;
};
-static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
{
struct tpm_input_header *head;
+ head = (struct tpm_input_header *)buf->data;
+ head->tag = cpu_to_be16(tag);
+ head->length = cpu_to_be32(sizeof(*head));
+ head->ordinal = cpu_to_be32(ordinal);
+}
+static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
buf->data_page = alloc_page(GFP_HIGHUSER);
if (!buf->data_page)
return -ENOMEM;
buf->flags = 0;
buf->data = kmap(buf->data_page);
-
- head = (struct tpm_input_header *) buf->data;
-
- head->tag = cpu_to_be16(tag);
- head->length = cpu_to_be32(sizeof(*head));
- head->ordinal = cpu_to_be32(ordinal);
-
+ tpm_buf_reset(buf, tag, ordinal);
return 0;
}
@@ -511,9 +512,17 @@ extern const struct file_operations tpm_fops;
extern const struct file_operations tpmrm_fops;
extern struct idr dev_nums_idr;
+/**
+ * enum tpm_transmit_flags - flags for tpm_transmit()
+ *
+ * @TPM_TRANSMIT_UNLOCKED: do not lock the chip
+ * @TPM_TRANSMIT_NESTED: discard setup steps (power management,
+ * locality) including locking (i.e. implicit
+ * UNLOCKED)
+ */
enum tpm_transmit_flags {
TPM_TRANSMIT_UNLOCKED = BIT(0),
- TPM_TRANSMIT_RAW = BIT(1),
+ TPM_TRANSMIT_NESTED = BIT(1),
};
ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
@@ -538,7 +547,7 @@ static inline void tpm_msleep(unsigned int delay_msec)
delay_msec * 1000);
};
-struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip);
+struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
@@ -569,7 +578,7 @@ static inline u32 tpm2_rc_value(u32 rc)
int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count,
struct tpm2_digest *digests);
-int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max);
+int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max);
void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
unsigned int flags);
int tpm2_seal_trusted(struct tpm_chip *chip,
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index d31b09099216..c31b490bd41d 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -27,46 +27,6 @@ enum tpm2_session_attributes {
TPM2_SA_CONTINUE_SESSION = BIT(0),
};
-struct tpm2_startup_in {
- __be16 startup_type;
-} __packed;
-
-struct tpm2_get_tpm_pt_in {
- __be32 cap_id;
- __be32 property_id;
- __be32 property_cnt;
-} __packed;
-
-struct tpm2_get_tpm_pt_out {
- u8 more_data;
- __be32 subcap_id;
- __be32 property_cnt;
- __be32 property_id;
- __be32 value;
-} __packed;
-
-struct tpm2_get_random_in {
- __be16 size;
-} __packed;
-
-struct tpm2_get_random_out {
- __be16 size;
- u8 buffer[TPM_MAX_RNG_DATA];
-} __packed;
-
-union tpm2_cmd_params {
- struct tpm2_startup_in startup_in;
- struct tpm2_get_tpm_pt_in get_tpm_pt_in;
- struct tpm2_get_tpm_pt_out get_tpm_pt_out;
- struct tpm2_get_random_in getrandom_in;
- struct tpm2_get_random_out getrandom_out;
-};
-
-struct tpm2_cmd {
- tpm_cmd_header header;
- union tpm2_cmd_params params;
-} __packed;
-
struct tpm2_hash {
unsigned int crypto_id;
unsigned int tpm_id;
@@ -321,82 +281,72 @@ int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count,
}
-#define TPM2_GETRANDOM_IN_SIZE \
- (sizeof(struct tpm_input_header) + \
- sizeof(struct tpm2_get_random_in))
-
-static const struct tpm_input_header tpm2_getrandom_header = {
- .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
- .length = cpu_to_be32(TPM2_GETRANDOM_IN_SIZE),
- .ordinal = cpu_to_be32(TPM2_CC_GET_RANDOM)
-};
+struct tpm2_get_random_out {
+ __be16 size;
+ u8 buffer[TPM_MAX_RNG_DATA];
+} __packed;
/**
* tpm2_get_random() - get random bytes from the TPM RNG
*
- * @chip: TPM chip to use
- * @out: destination buffer for the random bytes
- * @max: the max number of bytes to write to @out
+ * @chip: a &tpm_chip instance
+ * @dest: destination buffer
+ * @max: the max number of random bytes to pull
*
* Return:
- * Size of the output buffer, or -EIO on error.
+ * size of the buffer on success,
+ * -errno otherwise
*/
-int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max)
+int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
{
- struct tpm2_cmd cmd;
- u32 recd, rlength;
- u32 num_bytes;
+ struct tpm2_get_random_out *out;
+ struct tpm_buf buf;
+ u32 recd;
+ u32 num_bytes = max;
int err;
int total = 0;
int retries = 5;
- u8 *dest = out;
+ u8 *dest_ptr = dest;
- num_bytes = min_t(u32, max, sizeof(cmd.params.getrandom_out.buffer));
-
- if (!out || !num_bytes ||
- max > sizeof(cmd.params.getrandom_out.buffer))
+ if (!num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- do {
- cmd.header.in = tpm2_getrandom_header;
- cmd.params.getrandom_in.size = cpu_to_be16(num_bytes);
+ err = tpm_buf_init(&buf, 0, 0);
+ if (err)
+ return err;
- err = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd),
+ do {
+ tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM);
+ tpm_buf_append_u16(&buf, num_bytes);
+ err = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
offsetof(struct tpm2_get_random_out,
buffer),
0, "attempting get random");
if (err)
- break;
+ goto out;
- recd = min_t(u32, be16_to_cpu(cmd.params.getrandom_out.size),
- num_bytes);
- rlength = be32_to_cpu(cmd.header.out.length);
- if (rlength < offsetof(struct tpm2_get_random_out, buffer) +
- recd)
- return -EFAULT;
- memcpy(dest, cmd.params.getrandom_out.buffer, recd);
+ out = (struct tpm2_get_random_out *)
+ &buf.data[TPM_HEADER_SIZE];
+ recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
+ if (tpm_buf_length(&buf) <
+ offsetof(struct tpm2_get_random_out, buffer) + recd) {
+ err = -EFAULT;
+ goto out;
+ }
+ memcpy(dest_ptr, out->buffer, recd);
- dest += recd;
+ dest_ptr += recd;
total += recd;
num_bytes -= recd;
} while (retries-- && total < max);
+ tpm_buf_destroy(&buf);
return total ? total : -EIO;
+out:
+ tpm_buf_destroy(&buf);
+ return err;
}
-#define TPM2_GET_TPM_PT_IN_SIZE \
- (sizeof(struct tpm_input_header) + \
- sizeof(struct tpm2_get_tpm_pt_in))
-
-#define TPM2_GET_TPM_PT_OUT_BODY_SIZE \
- sizeof(struct tpm2_get_tpm_pt_out)
-
-static const struct tpm_input_header tpm2_get_tpm_pt_header = {
- .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
- .length = cpu_to_be32(TPM2_GET_TPM_PT_IN_SIZE),
- .ordinal = cpu_to_be32(TPM2_CC_GET_CAPABILITY)
-};
-
/**
* tpm2_flush_context_cmd() - execute a TPM2_FlushContext command
* @chip: TPM chip to use
@@ -471,7 +421,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
{
unsigned int blob_len;
struct tpm_buf buf;
- u32 hash, rlength;
+ u32 hash;
int i;
int rc;
@@ -546,8 +496,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
rc = -E2BIG;
goto out;
}
- rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)->header.out.length);
- if (rlength < TPM_HEADER_SIZE + 4 + blob_len) {
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 4 + blob_len) {
rc = -EFAULT;
goto out;
}
@@ -657,7 +606,6 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
u16 data_len;
u8 *data;
int rc;
- u32 rlength;
rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
if (rc)
@@ -685,9 +633,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
goto out;
}
- rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
- ->header.out.length);
- if (rlength < TPM_HEADER_SIZE + 6 + data_len) {
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 6 + data_len) {
rc = -EFAULT;
goto out;
}
@@ -733,69 +679,71 @@ out:
return rc;
}
+struct tpm2_get_cap_out {
+ u8 more_data;
+ __be32 subcap_id;
+ __be32 property_cnt;
+ __be32 property_id;
+ __be32 value;
+} __packed;
+
/**
* tpm2_get_tpm_pt() - get value of a TPM_CAP_TPM_PROPERTIES type property
- * @chip: TPM chip to use.
+ * @chip: a &tpm_chip instance
* @property_id: property ID.
* @value: output variable.
* @desc: passed to tpm_transmit_cmd()
*
- * Return: Same as with tpm_transmit_cmd.
+ * Return:
+ * 0 on success,
+ * -errno or a TPM return code otherwise
*/
ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
const char *desc)
{
- struct tpm2_cmd cmd;
+ struct tpm2_get_cap_out *out;
+ struct tpm_buf buf;
int rc;
- cmd.header.in = tpm2_get_tpm_pt_header;
- cmd.params.get_tpm_pt_in.cap_id = cpu_to_be32(TPM2_CAP_TPM_PROPERTIES);
- cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(property_id);
- cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd),
- TPM2_GET_TPM_PT_OUT_BODY_SIZE, 0, desc);
- if (!rc)
- *value = be32_to_cpu(cmd.params.get_tpm_pt_out.value);
-
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
+ return rc;
+ tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
+ tpm_buf_append_u32(&buf, property_id);
+ tpm_buf_append_u32(&buf, 1);
+ rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, NULL);
+ if (!rc) {
+ out = (struct tpm2_get_cap_out *)
+ &buf.data[TPM_HEADER_SIZE];
+ *value = be32_to_cpu(out->value);
+ }
+ tpm_buf_destroy(&buf);
return rc;
}
EXPORT_SYMBOL_GPL(tpm2_get_tpm_pt);
-#define TPM2_SHUTDOWN_IN_SIZE \
- (sizeof(struct tpm_input_header) + \
- sizeof(struct tpm2_startup_in))
-
-static const struct tpm_input_header tpm2_shutdown_header = {
- .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
- .length = cpu_to_be32(TPM2_SHUTDOWN_IN_SIZE),
- .ordinal = cpu_to_be32(TPM2_CC_SHUTDOWN)
-};
-
/**
- * tpm2_shutdown() - send shutdown command to the TPM chip
+ * tpm2_shutdown() - send a TPM shutdown command
+ *
+ * Sends a TPM shutdown command. The shutdown command is used in call
+ * sites where the system is going down. If it fails, there is not much
+ * that can be done except print an error message.
*
- * @chip: TPM chip to use.
- * @shutdown_type: shutdown type. The value is either
- * TPM_SU_CLEAR or TPM_SU_STATE.
+ * @chip: a &tpm_chip instance
+ * @shutdown_type: TPM_SU_CLEAR or TPM_SU_STATE.
*/
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
{
- struct tpm2_cmd cmd;
+ struct tpm_buf buf;
int rc;
- cmd.header.in = tpm2_shutdown_header;
- cmd.params.startup_in.startup_type = cpu_to_be16(shutdown_type);
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd), 0, 0,
- "stopping the TPM");
-
- /* In places where shutdown command is sent there's no much we can do
- * except print the error code on a system failure.
- */
- if (rc < 0 && rc != -EPIPE)
- dev_warn(&chip->dev, "transmit returned %d while stopping the TPM",
- rc);
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_SHUTDOWN);
+ if (rc)
+ return;
+ tpm_buf_append_u16(&buf, shutdown_type);
+ tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
+ "stopping the TPM");
+ tpm_buf_destroy(&buf);
}
/*
@@ -863,31 +811,37 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
}
/**
- * tpm2_probe() - probe TPM 2.0
- * @chip: TPM chip to use
+ * tpm2_probe() - probe for the TPM 2.0 protocol
+ * @chip: a &tpm_chip instance
*
- * Return: < 0 error and 0 on success.
+ * Send an idempotent TPM 2.0 command and see whether there is TPM2 chip in the
+ * other end based on the response tag. The flag TPM_CHIP_FLAG_TPM2 is set by
+ * this function if this is the case.
*
- * Send idempotent TPM 2.0 command and see whether TPM 2.0 chip replied based on
- * the reply tag.
+ * Return:
+ * 0 on success,
+ * -errno otherwise
*/
int tpm2_probe(struct tpm_chip *chip)
{
- struct tpm2_cmd cmd;
+ struct tpm_output_header *out;
+ struct tpm_buf buf;
int rc;
- cmd.header.in = tpm2_get_tpm_pt_header;
- cmd.params.get_tpm_pt_in.cap_id = cpu_to_be32(TPM2_CAP_TPM_PROPERTIES);
- cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(0x100);
- cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd), 0, 0, NULL);
- if (rc < 0)
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
return rc;
-
- if (be16_to_cpu(cmd.header.out.tag) == TPM2_ST_NO_SESSIONS)
- chip->flags |= TPM_CHIP_FLAG_TPM2;
-
+ tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
+ tpm_buf_append_u32(&buf, TPM_PT_TOTAL_COMMANDS);
+ tpm_buf_append_u32(&buf, 1);
+ rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, NULL);
+ /* We ignore TPM return codes on purpose. */
+ if (rc >= 0) {
+ out = (struct tpm_output_header *)buf.data;
+ if (be16_to_cpu(out->tag) == TPM2_ST_NO_SESSIONS)
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ }
+ tpm_buf_destroy(&buf);
return 0;
}
EXPORT_SYMBOL_GPL(tpm2_probe);
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 6122d3276f72..d2e101b32482 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -39,7 +39,7 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
if (space->session_tbl[i])
tpm2_flush_context_cmd(chip, space->session_tbl[i],
- TPM_TRANSMIT_UNLOCKED);
+ TPM_TRANSMIT_NESTED);
}
}
@@ -84,7 +84,7 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
tpm_buf_append(&tbuf, &buf[*offset], body_size);
rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4,
- TPM_TRANSMIT_UNLOCKED, NULL);
+ TPM_TRANSMIT_NESTED, NULL);
if (rc < 0) {
dev_warn(&chip->dev, "%s: failed with a system error %d\n",
__func__, rc);
@@ -133,7 +133,7 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
tpm_buf_append_u32(&tbuf, handle);
rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0,
- TPM_TRANSMIT_UNLOCKED, NULL);
+ TPM_TRANSMIT_NESTED, NULL);
if (rc < 0) {
dev_warn(&chip->dev, "%s: failed with a system error %d\n",
__func__, rc);
@@ -170,7 +170,7 @@ static void tpm2_flush_space(struct tpm_chip *chip)
for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
if (space->context_tbl[i] && ~space->context_tbl[i])
tpm2_flush_context_cmd(chip, space->context_tbl[i],
- TPM_TRANSMIT_UNLOCKED);
+ TPM_TRANSMIT_NESTED);
tpm2_flush_sessions(chip, space);
}
@@ -377,7 +377,7 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
return 0;
out_no_slots:
- tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_UNLOCKED);
+ tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_NESTED);
dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__,
phandle);
return -ENOMEM;
@@ -465,7 +465,7 @@ static int tpm2_save_space(struct tpm_chip *chip)
return rc;
tpm2_flush_context_cmd(chip, space->context_tbl[i],
- TPM_TRANSMIT_UNLOCKED);
+ TPM_TRANSMIT_NESTED);
space->context_tbl[i] = ~0;
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 34fbc6cb097b..36952ef98f90 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -132,7 +132,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
}
/**
- * crb_go_idle - request tpm crb device to go the idle state
+ * __crb_go_idle - request tpm crb device to go the idle state
*
* @dev: crb device
* @priv: crb private data
@@ -147,7 +147,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
*
* Return: 0 always
*/
-static int crb_go_idle(struct device *dev, struct crb_priv *priv)
+static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
{
if ((priv->sm == ACPI_TPM2_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
@@ -163,11 +163,20 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
dev_warn(dev, "goIdle timed out\n");
return -ETIME;
}
+
return 0;
}
+static int crb_go_idle(struct tpm_chip *chip)
+{
+ struct device *dev = &chip->dev;
+ struct crb_priv *priv = dev_get_drvdata(dev);
+
+ return __crb_go_idle(dev, priv);
+}
+
/**
- * crb_cmd_ready - request tpm crb device to enter ready state
+ * __crb_cmd_ready - request tpm crb device to enter ready state
*
* @dev: crb device
* @priv: crb private data
@@ -181,7 +190,7 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
*
* Return: 0 on success -ETIME on timeout;
*/
-static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
{
if ((priv->sm == ACPI_TPM2_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
@@ -200,6 +209,14 @@ static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
return 0;
}
+static int crb_cmd_ready(struct tpm_chip *chip)
+{
+ struct device *dev = &chip->dev;
+ struct crb_priv *priv = dev_get_drvdata(dev);
+
+ return __crb_cmd_ready(dev, priv);
+}
+
static int __crb_request_locality(struct device *dev,
struct crb_priv *priv, int loc)
{
@@ -401,6 +418,8 @@ static const struct tpm_class_ops tpm_crb = {
.send = crb_send,
.cancel = crb_cancel,
.req_canceled = crb_req_canceled,
+ .go_idle = crb_go_idle,
+ .cmd_ready = crb_cmd_ready,
.request_locality = crb_request_locality,
.relinquish_locality = crb_relinquish_locality,
.req_complete_mask = CRB_DRV_STS_COMPLETE,
@@ -520,7 +539,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* PTT HW bug w/a: wake up the device to access
* possibly not retained registers.
*/
- ret = crb_cmd_ready(dev, priv);
+ ret = __crb_cmd_ready(dev, priv);
if (ret)
goto out_relinquish_locality;
@@ -565,7 +584,7 @@ out:
if (!ret)
priv->cmd_size = cmd_size;
- crb_go_idle(dev, priv);
+ __crb_go_idle(dev, priv);
out_relinquish_locality:
@@ -628,32 +647,7 @@ static int crb_acpi_add(struct acpi_device *device)
chip->acpi_dev_handle = device->handle;
chip->flags = TPM_CHIP_FLAG_TPM2;
- rc = __crb_request_locality(dev, priv, 0);
- if (rc)
- return rc;
-
- rc = crb_cmd_ready(dev, priv);
- if (rc)
- goto out;
-
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- rc = tpm_chip_register(chip);
- if (rc) {
- crb_go_idle(dev, priv);
- pm_runtime_put_noidle(dev);
- pm_runtime_disable(dev);
- goto out;
- }
-
- pm_runtime_put_sync(dev);
-
-out:
- __crb_relinquish_locality(dev, priv, 0);
-
- return rc;
+ return tpm_chip_register(chip);
}
static int crb_acpi_remove(struct acpi_device *device)
@@ -663,52 +657,11 @@ static int crb_acpi_remove(struct acpi_device *device)
tpm_chip_unregister(chip);
- pm_runtime_disable(dev);
-
return 0;
}
-static int __maybe_unused crb_pm_runtime_suspend(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
-
- return crb_go_idle(dev, priv);
-}
-
-static int __maybe_unused crb_pm_runtime_resume(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
-
- return crb_cmd_ready(dev, priv);
-}
-
-static int __maybe_unused crb_pm_suspend(struct device *dev)
-{
- int ret;
-
- ret = tpm_pm_suspend(dev);
- if (ret)
- return ret;
-
- return crb_pm_runtime_suspend(dev);
-}
-
-static int __maybe_unused crb_pm_resume(struct device *dev)
-{
- int ret;
-
- ret = crb_pm_runtime_resume(dev);
- if (ret)
- return ret;
-
- return tpm_pm_resume(dev);
-}
-
static const struct dev_pm_ops crb_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(crb_pm_suspend, crb_pm_resume)
- SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
};
static const struct acpi_device_id crb_device_ids[] = {
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 6116cd05e228..9086edc9066b 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -117,7 +117,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
/* Lock the adapter for the duration of the whole sequence. */
if (!tpm_dev.client->adapter->algo->master_xfer)
return -EOPNOTSUPP;
- i2c_lock_adapter(tpm_dev.client->adapter);
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
if (tpm_dev.chip_type == SLB9645) {
/* use a combined read for newer chips
@@ -192,7 +192,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
}
out:
- i2c_unlock_adapter(tpm_dev.client->adapter);
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* take care of 'guard time' */
usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
@@ -224,7 +224,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
if (!tpm_dev.client->adapter->algo->master_xfer)
return -EOPNOTSUPP;
- i2c_lock_adapter(tpm_dev.client->adapter);
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* prepend the 'register address' to the buffer */
tpm_dev.buf[0] = addr;
@@ -243,7 +243,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
usleep_range(sleep_low, sleep_hi);
}
- i2c_unlock_adapter(tpm_dev.client->adapter);
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* take care of 'guard time' */
usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 8b46aaa9e049..d2345d9fd7b5 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -875,6 +875,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
chip->acpi_dev_handle = acpi_dev_handle;
#endif
+ chip->hwrng.quality = priv->rng_quality;
+
/* Maximum timeouts */
chip->timeout_a = msecs_to_jiffies(TIS_TIMEOUT_A_MAX);
chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX);
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index f6e1dbe212a7..f48125f1e6e0 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -99,6 +99,7 @@ struct tpm_tis_data {
wait_queue_head_t int_queue;
wait_queue_head_t read_queue;
const struct tpm_tis_phy_ops *phy_ops;
+ unsigned short rng_quality;
};
struct tpm_tis_phy_ops {
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 424ff2fde1f2..9914f6973463 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -199,6 +199,7 @@ static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
static int tpm_tis_spi_probe(struct spi_device *dev)
{
struct tpm_tis_spi_phy *phy;
+ int irq;
phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
GFP_KERNEL);
@@ -211,7 +212,13 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
if (!phy->iobuf)
return -ENOMEM;
- return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
+ /* If the SPI device has an IRQ then use that */
+ if (dev->irq > 0)
+ irq = dev->irq;
+ else
+ irq = -1;
+
+ return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
NULL);
}
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index e4f79f920450..87a0ce47f201 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -418,7 +418,7 @@ static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality)
proxy_dev->state |= STATE_DRIVER_COMMAND;
rc = tpm_transmit_cmd(chip, NULL, buf.data, tpm_buf_length(&buf), 0,
- TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW,
+ TPM_TRANSMIT_NESTED,
"attempting to set locality");
proxy_dev->state &= ~STATE_DRIVER_COMMAND;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 17084cfcf53e..5b5b5d72eab7 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1309,51 +1309,35 @@ static const struct attribute_group port_attribute_group = {
.attrs = port_sysfs_entries,
};
-static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
- size_t count, loff_t *offp)
+static int debugfs_show(struct seq_file *s, void *data)
{
- struct port *port;
- char *buf;
- ssize_t ret, out_offset, out_count;
+ struct port *port = s->private;
+
+ seq_printf(s, "name: %s\n", port->name ? port->name : "");
+ seq_printf(s, "guest_connected: %d\n", port->guest_connected);
+ seq_printf(s, "host_connected: %d\n", port->host_connected);
+ seq_printf(s, "outvq_full: %d\n", port->outvq_full);
+ seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent);
+ seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received);
+ seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded);
+ seq_printf(s, "is_console: %s\n",
+ is_console_port(port) ? "yes" : "no");
+ seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno);
- out_count = 1024;
- buf = kmalloc(out_count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ return 0;
+}
- port = filp->private_data;
- out_offset = 0;
- out_offset += snprintf(buf + out_offset, out_count,
- "name: %s\n", port->name ? port->name : "");
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "guest_connected: %d\n", port->guest_connected);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "host_connected: %d\n", port->host_connected);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "outvq_full: %d\n", port->outvq_full);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "bytes_sent: %lu\n", port->stats.bytes_sent);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "bytes_received: %lu\n",
- port->stats.bytes_received);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "bytes_discarded: %lu\n",
- port->stats.bytes_discarded);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "is_console: %s\n",
- is_console_port(port) ? "yes" : "no");
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "console_vtermno: %u\n", port->cons.vtermno);
-
- ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
- kfree(buf);
- return ret;
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
}
static const struct file_operations port_debugfs_ops = {
.owner = THIS_MODULE,
- .open = simple_open,
- .read = debugfs_read,
+ .open = debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void set_console_size(struct port *port, u16 rows, u16 cols)
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 721572a8c429..292056bbb30e 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -45,6 +45,12 @@ config COMMON_CLK_MAX77686
This driver supports Maxim 77620/77686/77802 crystal oscillator
clock.
+config COMMON_CLK_MAX9485
+ tristate "Maxim 9485 Programmable Clock Generator"
+ depends on I2C
+ help
+ This driver supports Maxim 9485 Programmable Audio Clock Generator
+
config COMMON_CLK_RK808
tristate "Clock driver for RK805/RK808/RK818"
depends on MFD_RK808
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index ae40cbe770f0..a84c5573cabe 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
+obj-$(CONFIG_COMMON_CLK_MAX9485) += clk-max9485.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o
@@ -96,7 +97,7 @@ obj-$(CONFIG_ARCH_SPRD) += sprd/
obj-$(CONFIG_ARCH_STI) += st/
obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
-obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/
+obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
diff --git a/drivers/clk/actions/Kconfig b/drivers/clk/actions/Kconfig
index 8854adb37847..dc38c85a4833 100644
--- a/drivers/clk/actions/Kconfig
+++ b/drivers/clk/actions/Kconfig
@@ -1,14 +1,21 @@
config CLK_ACTIONS
bool "Clock driver for Actions Semi SoCs"
depends on ARCH_ACTIONS || COMPILE_TEST
+ select REGMAP_MMIO
default ARCH_ACTIONS
if CLK_ACTIONS
# SoC Drivers
+config CLK_OWL_S700
+ bool "Support for the Actions Semi OWL S700 clocks"
+ depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST
+ default ARM64 && ARCH_ACTIONS
+
config CLK_OWL_S900
bool "Support for the Actions Semi OWL S900 clocks"
depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST
default ARM64 && ARCH_ACTIONS
+
endif
diff --git a/drivers/clk/actions/Makefile b/drivers/clk/actions/Makefile
index 76e431434d10..78c17d56f991 100644
--- a/drivers/clk/actions/Makefile
+++ b/drivers/clk/actions/Makefile
@@ -9,4 +9,5 @@ clk-owl-y += owl-composite.o
clk-owl-y += owl-pll.o
# SoC support
+obj-$(CONFIG_CLK_OWL_S700) += owl-s700.o
obj-$(CONFIG_CLK_OWL_S900) += owl-s900.o
diff --git a/drivers/clk/actions/owl-s700.c b/drivers/clk/actions/owl-s700.c
new file mode 100644
index 000000000000..5e9531392ee5
--- /dev/null
+++ b/drivers/clk/actions/owl-s700.c
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Actions Semi S700 clock driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Author: Pathiban Nallathambi <pn@denx.de>
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "owl-common.h"
+#include "owl-composite.h"
+#include "owl-divider.h"
+#include "owl-factor.h"
+#include "owl-fixed-factor.h"
+#include "owl-gate.h"
+#include "owl-mux.h"
+#include "owl-pll.h"
+
+#include <dt-bindings/clock/actions,s700-cmu.h>
+
+#define CMU_COREPLL (0x0000)
+#define CMU_DEVPLL (0x0004)
+#define CMU_DDRPLL (0x0008)
+#define CMU_NANDPLL (0x000C)
+#define CMU_DISPLAYPLL (0x0010)
+#define CMU_AUDIOPLL (0x0014)
+#define CMU_TVOUTPLL (0x0018)
+#define CMU_BUSCLK (0x001C)
+#define CMU_SENSORCLK (0x0020)
+#define CMU_LCDCLK (0x0024)
+#define CMU_DSIPLLCLK (0x0028)
+#define CMU_CSICLK (0x002C)
+#define CMU_DECLK (0x0030)
+#define CMU_SICLK (0x0034)
+#define CMU_BUSCLK1 (0x0038)
+#define CMU_HDECLK (0x003C)
+#define CMU_VDECLK (0x0040)
+#define CMU_VCECLK (0x0044)
+#define CMU_NANDCCLK (0x004C)
+#define CMU_SD0CLK (0x0050)
+#define CMU_SD1CLK (0x0054)
+#define CMU_SD2CLK (0x0058)
+#define CMU_UART0CLK (0x005C)
+#define CMU_UART1CLK (0x0060)
+#define CMU_UART2CLK (0x0064)
+#define CMU_UART3CLK (0x0068)
+#define CMU_UART4CLK (0x006C)
+#define CMU_UART5CLK (0x0070)
+#define CMU_UART6CLK (0x0074)
+#define CMU_PWM0CLK (0x0078)
+#define CMU_PWM1CLK (0x007C)
+#define CMU_PWM2CLK (0x0080)
+#define CMU_PWM3CLK (0x0084)
+#define CMU_PWM4CLK (0x0088)
+#define CMU_PWM5CLK (0x008C)
+#define CMU_GPU3DCLK (0x0090)
+#define CMU_CORECTL (0x009C)
+#define CMU_DEVCLKEN0 (0x00A0)
+#define CMU_DEVCLKEN1 (0x00A4)
+#define CMU_DEVRST0 (0x00A8)
+#define CMU_DEVRST1 (0x00AC)
+#define CMU_USBPLL (0x00B0)
+#define CMU_ETHERNETPLL (0x00B4)
+#define CMU_CVBSPLL (0x00B8)
+#define CMU_SSTSCLK (0x00C0)
+
+static struct clk_pll_table clk_audio_pll_table[] = {
+ {0, 45158400}, {1, 49152000},
+ {0, 0},
+};
+
+static struct clk_pll_table clk_cvbs_pll_table[] = {
+ {27, 29 * 12000000}, {28, 30 * 12000000}, {29, 31 * 12000000},
+ {30, 32 * 12000000}, {31, 33 * 12000000}, {32, 34 * 12000000},
+ {33, 35 * 12000000}, {34, 36 * 12000000}, {35, 37 * 12000000},
+ {36, 38 * 12000000}, {37, 39 * 12000000}, {38, 40 * 12000000},
+ {39, 41 * 12000000}, {40, 42 * 12000000}, {41, 43 * 12000000},
+ {42, 44 * 12000000}, {43, 45 * 12000000}, {0, 0},
+};
+
+/* pll clocks */
+static OWL_PLL_NO_PARENT(clk_core_pll, "core_pll", CMU_COREPLL, 12000000, 9, 0, 8, 4, 174, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_dev_pll, "dev_pll", CMU_DEVPLL, 6000000, 8, 0, 8, 8, 126, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_ddr_pll, "ddr_pll", CMU_DDRPLL, 6000000, 8, 0, 8, 2, 180, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_nand_pll, "nand_pll", CMU_NANDPLL, 6000000, 8, 0, 8, 2, 86, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_display_pll, "display_pll", CMU_DISPLAYPLL, 6000000, 8, 0, 8, 2, 140, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_cvbs_pll, "cvbs_pll", CMU_CVBSPLL, 0, 8, 0, 8, 27, 43, clk_cvbs_pll_table, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_audio_pll, "audio_pll", CMU_AUDIOPLL, 0, 4, 0, 1, 0, 0, clk_audio_pll_table, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_ethernet_pll, "ethernet_pll", CMU_ETHERNETPLL, 500000000, 0, 0, 0, 0, 0, NULL, CLK_IGNORE_UNUSED);
+
+static const char *cpu_clk_mux_p[] = {"losc", "hosc", "core_pll", "noc1_clk_div"};
+static const char *dev_clk_p[] = { "hosc", "dev_pll"};
+static const char *noc_clk_mux_p[] = { "dev_clk", "display_pll", "nand_pll", "ddr_pll", "cvbs_pll"};
+
+static const char *csi_clk_mux_p[] = { "display_pll", "dev_clk"};
+static const char *de_clk_mux_p[] = { "display_pll", "dev_clk"};
+static const char *hde_clk_mux_p[] = { "dev_clk", "display_pll", "nand_pll", "ddr_pll"};
+static const char *nand_clk_mux_p[] = { "nand_pll", "display_pll", "dev_clk", "ddr_pll"};
+static const char *sd_clk_mux_p[] = { "dev_clk", "nand_pll", };
+static const char *uart_clk_mux_p[] = { "hosc", "dev_pll"};
+static const char *pwm_clk_mux_p[] = { "losc", "hosc"};
+static const char *gpu_clk_mux_p[] = { "dev_clk", "display_pll", "nand_pll", "ddr_clk", "cvbs_pll"};
+static const char *lcd_clk_mux_p[] = { "display_pll", "dev_clk" };
+static const char *i2s_clk_mux_p[] = { "audio_pll" };
+static const char *sensor_clk_mux_p[] = { "hosc", "si"};
+
+/* mux clocks */
+static OWL_MUX(clk_cpu, "cpu_clk", cpu_clk_mux_p, CMU_BUSCLK, 0, 2, CLK_SET_RATE_PARENT);
+static OWL_MUX(clk_dev, "dev_clk", dev_clk_p, CMU_DEVPLL, 12, 1, CLK_SET_RATE_PARENT);
+static OWL_MUX(clk_noc0_clk_mux, "noc0_clk_mux", noc_clk_mux_p, CMU_BUSCLK, 4, 3, CLK_SET_RATE_PARENT);
+static OWL_MUX(clk_noc1_clk_mux, "noc1_clk_mux", noc_clk_mux_p, CMU_BUSCLK1, 4, 3, CLK_SET_RATE_PARENT);
+static OWL_MUX(clk_hp_clk_mux, "hp_clk_mux", noc_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT);
+
+static struct clk_factor_table sd_factor_table[] = {
+ /* bit0 ~ 4 */
+ {0, 1, 1}, {1, 1, 2}, {2, 1, 3}, {3, 1, 4},
+ {4, 1, 5}, {5, 1, 6}, {6, 1, 7}, {7, 1, 8},
+ {8, 1, 9}, {9, 1, 10}, {10, 1, 11}, {11, 1, 12},
+ {12, 1, 13}, {13, 1, 14}, {14, 1, 15}, {15, 1, 16},
+ {16, 1, 17}, {17, 1, 18}, {18, 1, 19}, {19, 1, 20},
+ {20, 1, 21}, {21, 1, 22}, {22, 1, 23}, {23, 1, 24},
+ {24, 1, 25}, {25, 1, 26},
+
+ /* bit8: /128 */
+ {256, 1, 1 * 128}, {257, 1, 2 * 128}, {258, 1, 3 * 128}, {259, 1, 4 * 128},
+ {260, 1, 5 * 128}, {261, 1, 6 * 128}, {262, 1, 7 * 128}, {263, 1, 8 * 128},
+ {264, 1, 9 * 128}, {265, 1, 10 * 128}, {266, 1, 11 * 128}, {267, 1, 12 * 128},
+ {268, 1, 13 * 128}, {269, 1, 14 * 128}, {270, 1, 15 * 128}, {271, 1, 16 * 128},
+ {272, 1, 17 * 128}, {273, 1, 18 * 128}, {274, 1, 19 * 128}, {275, 1, 20 * 128},
+ {276, 1, 21 * 128}, {277, 1, 22 * 128}, {278, 1, 23 * 128}, {279, 1, 24 * 128},
+ {280, 1, 25 * 128}, {281, 1, 26 * 128},
+
+ {0, 0},
+};
+
+static struct clk_factor_table lcd_factor_table[] = {
+ /* bit0 ~ 3 */
+ {0, 1, 1}, {1, 1, 2}, {2, 1, 3}, {3, 1, 4},
+ {4, 1, 5}, {5, 1, 6}, {6, 1, 7}, {7, 1, 8},
+ {8, 1, 9}, {9, 1, 10}, {10, 1, 11}, {11, 1, 12},
+
+ /* bit8: /7 */
+ {256, 1, 1 * 7}, {257, 1, 2 * 7}, {258, 1, 3 * 7}, {259, 1, 4 * 7},
+ {260, 1, 5 * 7}, {261, 1, 6 * 7}, {262, 1, 7 * 7}, {263, 1, 8 * 7},
+ {264, 1, 9 * 7}, {265, 1, 10 * 7}, {266, 1, 11 * 7}, {267, 1, 12 * 7},
+ {0, 0},
+};
+
+static struct clk_div_table hdmia_div_table[] = {
+ {0, 1}, {1, 2}, {2, 3}, {3, 4},
+ {4, 6}, {5, 8}, {6, 12}, {7, 16},
+ {8, 24},
+ {0, 0},
+};
+
+static struct clk_div_table rmii_div_table[] = {
+ {0, 4}, {1, 10},
+};
+
+/* divider clocks */
+static OWL_DIVIDER(clk_noc0, "noc0_clk", "noc0_clk_mux", CMU_BUSCLK, 16, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_noc1, "noc1_clk", "noc1_clk_mux", CMU_BUSCLK1, 16, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_noc1_clk_div, "noc1_clk_div", "noc1_clk", CMU_BUSCLK1, 20, 1, NULL, 0, 0);
+static OWL_DIVIDER(clk_hp_clk_div, "hp_clk_div", "hp_clk_mux", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_ahb, "ahb_clk", "hp_clk_div", CMU_BUSCLK1, 2, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_apb, "apb_clk", "ahb_clk", CMU_BUSCLK1, 14, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_sensor0, "sensor0", "sensor_src", CMU_SENSORCLK, 0, 4, NULL, 0, 0);
+static OWL_DIVIDER(clk_sensor1, "sensor1", "sensor_src", CMU_SENSORCLK, 8, 4, NULL, 0, 0);
+static OWL_DIVIDER(clk_rmii_ref, "rmii_ref", "ethernet_pll", CMU_ETHERNETPLL, 2, 1, rmii_div_table, 0, 0);
+
+static struct clk_factor_table de_factor_table[] = {
+ {0, 1, 1}, {1, 2, 3}, {2, 1, 2}, {3, 2, 5},
+ {4, 1, 3}, {5, 1, 4}, {6, 1, 6}, {7, 1, 8},
+ {8, 1, 12}, {0, 0, 0},
+};
+
+static struct clk_factor_table hde_factor_table[] = {
+ {0, 1, 1}, {1, 2, 3}, {2, 1, 2}, {3, 2, 5},
+ {4, 1, 3}, {5, 1, 4}, {6, 1, 6}, {7, 1, 8},
+ {0, 0, 0},
+};
+
+/* gate clocks */
+static OWL_GATE(clk_gpio, "gpio", "apb_clk", CMU_DEVCLKEN1, 25, 0, 0);
+static OWL_GATE(clk_dmac, "dmac", "hp_clk_div", CMU_DEVCLKEN0, 17, 0, 0);
+static OWL_GATE(clk_timer, "timer", "hosc", CMU_DEVCLKEN1, 22, 0, 0);
+static OWL_GATE_NO_PARENT(clk_dsi, "dsi_clk", CMU_DEVCLKEN0, 2, 0, 0);
+static OWL_GATE_NO_PARENT(clk_tvout, "tvout_clk", CMU_DEVCLKEN0, 3, 0, 0);
+static OWL_GATE_NO_PARENT(clk_hdmi_dev, "hdmi_dev", CMU_DEVCLKEN0, 5, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb3_480mpll0, "usb3_480mpll0", CMU_USBPLL, 3, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb3_480mphy0, "usb3_480mphy0", CMU_USBPLL, 2, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb3_5gphy, "usb3_5gphy", CMU_USBPLL, 1, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb3_cce, "usb3_cce", CMU_DEVCLKEN0, 25, 0, 0);
+static OWL_GATE(clk_i2c0, "i2c0", "hosc", CMU_DEVCLKEN1, 0, 0, 0);
+static OWL_GATE(clk_i2c1, "i2c1", "hosc", CMU_DEVCLKEN1, 1, 0, 0);
+static OWL_GATE(clk_i2c2, "i2c2", "hosc", CMU_DEVCLKEN1, 2, 0, 0);
+static OWL_GATE(clk_i2c3, "i2c3", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
+static OWL_GATE(clk_spi0, "spi0", "ahb_clk", CMU_DEVCLKEN1, 4, 0, 0);
+static OWL_GATE(clk_spi1, "spi1", "ahb_clk", CMU_DEVCLKEN1, 5, 0, 0);
+static OWL_GATE(clk_spi2, "spi2", "ahb_clk", CMU_DEVCLKEN1, 6, 0, 0);
+static OWL_GATE(clk_spi3, "spi3", "ahb_clk", CMU_DEVCLKEN1, 7, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h0_pllen, "usbh0_pllen", CMU_USBPLL, 12, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h0_phy, "usbh0_phy", CMU_USBPLL, 10, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h0_cce, "usbh0_cce", CMU_DEVCLKEN0, 26, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h1_pllen, "usbh1_pllen", CMU_USBPLL, 13, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h1_phy, "usbh1_phy", CMU_USBPLL, 11, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h1_cce, "usbh1_cce", CMU_DEVCLKEN0, 27, 0, 0);
+static OWL_GATE_NO_PARENT(clk_irc_switch, "irc_switch", CMU_DEVCLKEN1, 15, 0, 0);
+
+/* composite clocks */
+
+static OWL_COMP_DIV(clk_csi, "csi", csi_clk_mux_p,
+ OWL_MUX_HW(CMU_CSICLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 13, 0),
+ OWL_DIVIDER_HW(CMU_CSICLK, 0, 4, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_si, "si", csi_clk_mux_p,
+ OWL_MUX_HW(CMU_SICLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+ OWL_DIVIDER_HW(CMU_SICLK, 0, 4, 0, NULL),
+ 0);
+
+static OWL_COMP_FACTOR(clk_de, "de", de_clk_mux_p,
+ OWL_MUX_HW(CMU_DECLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 0, 0),
+ OWL_FACTOR_HW(CMU_DECLK, 0, 3, 0, de_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_hde, "hde", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_HDECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 9, 0),
+ OWL_FACTOR_HW(CMU_HDECLK, 0, 3, 0, hde_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_vde, "vde", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_VDECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 10, 0),
+ OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, hde_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_vce, "vce", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_VCECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 11, 0),
+ OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, hde_factor_table),
+ 0);
+
+static OWL_COMP_DIV(clk_nand, "nand", nand_clk_mux_p,
+ OWL_MUX_HW(CMU_NANDCCLK, 8, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 21, 0),
+ OWL_DIVIDER_HW(CMU_NANDCCLK, 0, 3, 0, NULL),
+ CLK_SET_RATE_PARENT);
+
+static OWL_COMP_FACTOR(clk_sd0, "sd0", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD0CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 22, 0),
+ OWL_FACTOR_HW(CMU_SD0CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_sd1, "sd1", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD1CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 23, 0),
+ OWL_FACTOR_HW(CMU_SD1CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_sd2, "sd2", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD2CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 24, 0),
+ OWL_FACTOR_HW(CMU_SD2CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_DIV(clk_uart0, "uart0", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART0CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 8, 0),
+ OWL_DIVIDER_HW(CMU_UART0CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart1, "uart1", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART1CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 9, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart2, "uart2", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART2CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 10, 0),
+ OWL_DIVIDER_HW(CMU_UART2CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart3, "uart3", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART3CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 11, 0),
+ OWL_DIVIDER_HW(CMU_UART3CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart4, "uart4", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART4CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 12, 0),
+ OWL_DIVIDER_HW(CMU_UART4CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart5, "uart5", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART5CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 13, 0),
+ OWL_DIVIDER_HW(CMU_UART5CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart6, "uart6", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART6CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 14, 0),
+ OWL_DIVIDER_HW(CMU_UART6CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm0, "pwm0", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM0CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 16, 0),
+ OWL_DIVIDER_HW(CMU_PWM0CLK, 0, 10, 0, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(clk_pwm1, "pwm1", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM1CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 17, 0),
+ OWL_DIVIDER_HW(CMU_PWM1CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm2, "pwm2", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM2CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 18, 0),
+ OWL_DIVIDER_HW(CMU_PWM2CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm3, "pwm3", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM3CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 19, 0),
+ OWL_DIVIDER_HW(CMU_PWM3CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm4, "pwm4", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM4CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 20, 0),
+ OWL_DIVIDER_HW(CMU_PWM4CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm5, "pwm5", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM5CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 21, 0),
+ OWL_DIVIDER_HW(CMU_PWM5CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_FACTOR(clk_gpu3d, "gpu3d", gpu_clk_mux_p,
+ OWL_MUX_HW(CMU_GPU3DCLK, 4, 3),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 8, 0),
+ OWL_FACTOR_HW(CMU_GPU3DCLK, 0, 3, 0, hde_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_lcd, "lcd", lcd_clk_mux_p,
+ OWL_MUX_HW(CMU_LCDCLK, 12, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 1, 0),
+ OWL_FACTOR_HW(CMU_LCDCLK, 0, 9, 0, lcd_factor_table),
+ 0);
+
+static OWL_COMP_DIV(clk_hdmi_audio, "hdmia", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1), /*CMU_AUDIOPLL 24,1 unused*/
+ OWL_GATE_HW(CMU_DEVCLKEN1, 28, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 24, 4, 0, hdmia_div_table),
+ 0);
+
+static OWL_COMP_DIV(clk_i2srx, "i2srx", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 27, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 20, 4, 0, hdmia_div_table),
+ 0);
+
+static OWL_COMP_DIV(clk_i2stx, "i2stx", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 26, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 16, 4, 0, hdmia_div_table),
+ 0);
+
+/* for bluetooth pcm communication */
+static OWL_COMP_FIXED_FACTOR(clk_pcm1, "pcm1", "audio_pll",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 31, 0),
+ 1, 2, 0);
+
+static OWL_COMP_DIV(clk_sensor_src, "sensor_src", sensor_clk_mux_p,
+ OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
+ {0},
+ OWL_DIVIDER_HW(CMU_SENSORCLK, 5, 2, 0, NULL),
+ 0);
+
+static OWL_COMP_FIXED_FACTOR(clk_ethernet, "ethernet", "ethernet_pll",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 23, 0),
+ 1, 20, 0);
+
+static OWL_COMP_DIV_FIXED(clk_thermal_sensor, "thermal_sensor", "hosc",
+ OWL_GATE_HW(CMU_DEVCLKEN0, 31, 0),
+ OWL_DIVIDER_HW(CMU_SSTSCLK, 20, 10, 0, NULL),
+ 0);
+
+static struct owl_clk_common *s700_clks[] = {
+ &clk_core_pll.common,
+ &clk_dev_pll.common,
+ &clk_ddr_pll.common,
+ &clk_nand_pll.common,
+ &clk_display_pll.common,
+ &clk_cvbs_pll .common,
+ &clk_audio_pll.common,
+ &clk_ethernet_pll.common,
+ &clk_cpu.common,
+ &clk_dev.common,
+ &clk_ahb.common,
+ &clk_apb.common,
+ &clk_dmac.common,
+ &clk_noc0_clk_mux.common,
+ &clk_noc1_clk_mux.common,
+ &clk_hp_clk_mux.common,
+ &clk_hp_clk_div.common,
+ &clk_noc1_clk_div.common,
+ &clk_noc0.common,
+ &clk_noc1.common,
+ &clk_sensor_src.common,
+ &clk_gpio.common,
+ &clk_timer.common,
+ &clk_dsi.common,
+ &clk_csi.common,
+ &clk_si.common,
+ &clk_de.common,
+ &clk_hde.common,
+ &clk_vde.common,
+ &clk_vce.common,
+ &clk_nand.common,
+ &clk_sd0.common,
+ &clk_sd1.common,
+ &clk_sd2.common,
+ &clk_uart0.common,
+ &clk_uart1.common,
+ &clk_uart2.common,
+ &clk_uart3.common,
+ &clk_uart4.common,
+ &clk_uart5.common,
+ &clk_uart6.common,
+ &clk_pwm0.common,
+ &clk_pwm1.common,
+ &clk_pwm2.common,
+ &clk_pwm3.common,
+ &clk_pwm4.common,
+ &clk_pwm5.common,
+ &clk_gpu3d.common,
+ &clk_i2c0.common,
+ &clk_i2c1.common,
+ &clk_i2c2.common,
+ &clk_i2c3.common,
+ &clk_spi0.common,
+ &clk_spi1.common,
+ &clk_spi2.common,
+ &clk_spi3.common,
+ &clk_usb3_480mpll0.common,
+ &clk_usb3_480mphy0.common,
+ &clk_usb3_5gphy.common,
+ &clk_usb3_cce.common,
+ &clk_lcd.common,
+ &clk_hdmi_audio.common,
+ &clk_i2srx.common,
+ &clk_i2stx.common,
+ &clk_sensor0.common,
+ &clk_sensor1.common,
+ &clk_hdmi_dev.common,
+ &clk_ethernet.common,
+ &clk_rmii_ref.common,
+ &clk_usb2h0_pllen.common,
+ &clk_usb2h0_phy.common,
+ &clk_usb2h0_cce.common,
+ &clk_usb2h1_pllen.common,
+ &clk_usb2h1_phy.common,
+ &clk_usb2h1_cce.common,
+ &clk_tvout.common,
+ &clk_thermal_sensor.common,
+ &clk_irc_switch.common,
+ &clk_pcm1.common,
+};
+
+static struct clk_hw_onecell_data s700_hw_clks = {
+ .hws = {
+ [CLK_CORE_PLL] = &clk_core_pll.common.hw,
+ [CLK_DEV_PLL] = &clk_dev_pll.common.hw,
+ [CLK_DDR_PLL] = &clk_ddr_pll.common.hw,
+ [CLK_NAND_PLL] = &clk_nand_pll.common.hw,
+ [CLK_DISPLAY_PLL] = &clk_display_pll.common.hw,
+ [CLK_CVBS_PLL] = &clk_cvbs_pll .common.hw,
+ [CLK_AUDIO_PLL] = &clk_audio_pll.common.hw,
+ [CLK_ETHERNET_PLL] = &clk_ethernet_pll.common.hw,
+ [CLK_CPU] = &clk_cpu.common.hw,
+ [CLK_DEV] = &clk_dev.common.hw,
+ [CLK_AHB] = &clk_ahb.common.hw,
+ [CLK_APB] = &clk_apb.common.hw,
+ [CLK_DMAC] = &clk_dmac.common.hw,
+ [CLK_NOC0_CLK_MUX] = &clk_noc0_clk_mux.common.hw,
+ [CLK_NOC1_CLK_MUX] = &clk_noc1_clk_mux.common.hw,
+ [CLK_HP_CLK_MUX] = &clk_hp_clk_mux.common.hw,
+ [CLK_HP_CLK_DIV] = &clk_hp_clk_div.common.hw,
+ [CLK_NOC1_CLK_DIV] = &clk_noc1_clk_div.common.hw,
+ [CLK_NOC0] = &clk_noc0.common.hw,
+ [CLK_NOC1] = &clk_noc1.common.hw,
+ [CLK_SENOR_SRC] = &clk_sensor_src.common.hw,
+ [CLK_GPIO] = &clk_gpio.common.hw,
+ [CLK_TIMER] = &clk_timer.common.hw,
+ [CLK_DSI] = &clk_dsi.common.hw,
+ [CLK_CSI] = &clk_csi.common.hw,
+ [CLK_SI] = &clk_si.common.hw,
+ [CLK_DE] = &clk_de.common.hw,
+ [CLK_HDE] = &clk_hde.common.hw,
+ [CLK_VDE] = &clk_vde.common.hw,
+ [CLK_VCE] = &clk_vce.common.hw,
+ [CLK_NAND] = &clk_nand.common.hw,
+ [CLK_SD0] = &clk_sd0.common.hw,
+ [CLK_SD1] = &clk_sd1.common.hw,
+ [CLK_SD2] = &clk_sd2.common.hw,
+ [CLK_UART0] = &clk_uart0.common.hw,
+ [CLK_UART1] = &clk_uart1.common.hw,
+ [CLK_UART2] = &clk_uart2.common.hw,
+ [CLK_UART3] = &clk_uart3.common.hw,
+ [CLK_UART4] = &clk_uart4.common.hw,
+ [CLK_UART5] = &clk_uart5.common.hw,
+ [CLK_UART6] = &clk_uart6.common.hw,
+ [CLK_PWM0] = &clk_pwm0.common.hw,
+ [CLK_PWM1] = &clk_pwm1.common.hw,
+ [CLK_PWM2] = &clk_pwm2.common.hw,
+ [CLK_PWM3] = &clk_pwm3.common.hw,
+ [CLK_PWM4] = &clk_pwm4.common.hw,
+ [CLK_PWM5] = &clk_pwm5.common.hw,
+ [CLK_GPU3D] = &clk_gpu3d.common.hw,
+ [CLK_I2C0] = &clk_i2c0.common.hw,
+ [CLK_I2C1] = &clk_i2c1.common.hw,
+ [CLK_I2C2] = &clk_i2c2.common.hw,
+ [CLK_I2C3] = &clk_i2c3.common.hw,
+ [CLK_SPI0] = &clk_spi0.common.hw,
+ [CLK_SPI1] = &clk_spi1.common.hw,
+ [CLK_SPI2] = &clk_spi2.common.hw,
+ [CLK_SPI3] = &clk_spi3.common.hw,
+ [CLK_USB3_480MPLL0] = &clk_usb3_480mpll0.common.hw,
+ [CLK_USB3_480MPHY0] = &clk_usb3_480mphy0.common.hw,
+ [CLK_USB3_5GPHY] = &clk_usb3_5gphy.common.hw,
+ [CLK_USB3_CCE] = &clk_usb3_cce.common.hw,
+ [CLK_LCD] = &clk_lcd.common.hw,
+ [CLK_HDMI_AUDIO] = &clk_hdmi_audio.common.hw,
+ [CLK_I2SRX] = &clk_i2srx.common.hw,
+ [CLK_I2STX] = &clk_i2stx.common.hw,
+ [CLK_SENSOR0] = &clk_sensor0.common.hw,
+ [CLK_SENSOR1] = &clk_sensor1.common.hw,
+ [CLK_HDMI_DEV] = &clk_hdmi_dev.common.hw,
+ [CLK_ETHERNET] = &clk_ethernet.common.hw,
+ [CLK_RMII_REF] = &clk_rmii_ref.common.hw,
+ [CLK_USB2H0_PLLEN] = &clk_usb2h0_pllen.common.hw,
+ [CLK_USB2H0_PHY] = &clk_usb2h0_phy.common.hw,
+ [CLK_USB2H0_CCE] = &clk_usb2h0_cce.common.hw,
+ [CLK_USB2H1_PLLEN] = &clk_usb2h1_pllen.common.hw,
+ [CLK_USB2H1_PHY] = &clk_usb2h1_phy.common.hw,
+ [CLK_USB2H1_CCE] = &clk_usb2h1_cce.common.hw,
+ [CLK_TVOUT] = &clk_tvout.common.hw,
+ [CLK_THERMAL_SENSOR] = &clk_thermal_sensor.common.hw,
+ [CLK_IRC_SWITCH] = &clk_irc_switch.common.hw,
+ [CLK_PCM1] = &clk_pcm1.common.hw,
+ },
+ .num = CLK_NR_CLKS,
+};
+
+static const struct owl_clk_desc s700_clk_desc = {
+ .clks = s700_clks,
+ .num_clks = ARRAY_SIZE(s700_clks),
+
+ .hw_clks = &s700_hw_clks,
+};
+
+static int s700_clk_probe(struct platform_device *pdev)
+{
+ const struct owl_clk_desc *desc;
+
+ desc = &s700_clk_desc;
+ owl_clk_regmap_init(pdev, desc);
+
+ return owl_clk_probe(&pdev->dev, desc->hw_clks);
+}
+
+static const struct of_device_id s700_clk_of_match[] = {
+ { .compatible = "actions,s700-cmu", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver s700_clk_driver = {
+ .probe = s700_clk_probe,
+ .driver = {
+ .name = "s700-cmu",
+ .of_match_table = s700_clk_of_match
+ },
+};
+
+static int __init s700_clk_init(void)
+{
+ return platform_driver_register(&s700_clk_driver);
+}
+core_initcall(s700_clk_init);
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 082596f37c1d..facc169ebb68 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_HAVE_AT91_USB_CLK) += clk-usb.o
obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o
obj-$(CONFIG_HAVE_AT91_H32MX) += clk-h32mx.o
obj-$(CONFIG_HAVE_AT91_GENERATED_CLK) += clk-generated.o
+obj-$(CONFIG_HAVE_AT91_I2S_MUX_CLK) += clk-i2s-mux.o
diff --git a/drivers/clk/at91/clk-i2s-mux.c b/drivers/clk/at91/clk-i2s-mux.c
new file mode 100644
index 000000000000..f0c3c3079f04
--- /dev/null
+++ b/drivers/clk/at91/clk-i2s-mux.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Microchip Technology Inc,
+ * Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
+ *
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <soc/at91/atmel-sfr.h>
+
+#define I2S_BUS_NR 2
+
+struct clk_i2s_mux {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u8 bus_id;
+};
+
+#define to_clk_i2s_mux(hw) container_of(hw, struct clk_i2s_mux, hw)
+
+static u8 clk_i2s_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_i2s_mux *mux = to_clk_i2s_mux(hw);
+ u32 val;
+
+ regmap_read(mux->regmap, AT91_SFR_I2SCLKSEL, &val);
+
+ return (val & BIT(mux->bus_id)) >> mux->bus_id;
+}
+
+static int clk_i2s_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_i2s_mux *mux = to_clk_i2s_mux(hw);
+
+ return regmap_update_bits(mux->regmap, AT91_SFR_I2SCLKSEL,
+ BIT(mux->bus_id), index << mux->bus_id);
+}
+
+static const struct clk_ops clk_i2s_mux_ops = {
+ .get_parent = clk_i2s_mux_get_parent,
+ .set_parent = clk_i2s_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static struct clk_hw * __init
+at91_clk_i2s_mux_register(struct regmap *regmap, const char *name,
+ const char * const *parent_names,
+ unsigned int num_parents, u8 bus_id)
+{
+ struct clk_init_data init = {};
+ struct clk_i2s_mux *i2s_ck;
+ int ret;
+
+ i2s_ck = kzalloc(sizeof(*i2s_ck), GFP_KERNEL);
+ if (!i2s_ck)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_i2s_mux_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ i2s_ck->hw.init = &init;
+ i2s_ck->bus_id = bus_id;
+ i2s_ck->regmap = regmap;
+
+ ret = clk_hw_register(NULL, &i2s_ck->hw);
+ if (ret) {
+ kfree(i2s_ck);
+ return ERR_PTR(ret);
+ }
+
+ return &i2s_ck->hw;
+}
+
+static void __init of_sama5d2_clk_i2s_mux_setup(struct device_node *np)
+{
+ struct regmap *regmap_sfr;
+ u8 bus_id;
+ const char *parent_names[2];
+ struct device_node *i2s_mux_np;
+ struct clk_hw *hw;
+ int ret;
+
+ regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
+ if (IS_ERR(regmap_sfr))
+ return;
+
+ for_each_child_of_node(np, i2s_mux_np) {
+ if (of_property_read_u8(i2s_mux_np, "reg", &bus_id))
+ continue;
+
+ if (bus_id > I2S_BUS_NR)
+ continue;
+
+ ret = of_clk_parent_fill(i2s_mux_np, parent_names, 2);
+ if (ret != 2)
+ continue;
+
+ hw = at91_clk_i2s_mux_register(regmap_sfr, i2s_mux_np->name,
+ parent_names, 2, bus_id);
+ if (IS_ERR(hw))
+ continue;
+
+ of_clk_add_hw_provider(i2s_mux_np, of_clk_hw_simple_get, hw);
+ }
+}
+
+CLK_OF_DECLARE(sama5d2_clk_i2s_mux, "atmel,sama5d2-clk-i2s-mux",
+ of_sama5d2_clk_i2s_mux_setup);
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 38b366b00c57..596136793fc4 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -24,7 +24,7 @@
#define ASPEED_MPLL_PARAM 0x20
#define ASPEED_HPLL_PARAM 0x24
#define AST2500_HPLL_BYPASS_EN BIT(20)
-#define AST2400_HPLL_STRAPPED BIT(18)
+#define AST2400_HPLL_PROGRAMMED BIT(18)
#define AST2400_HPLL_BYPASS_EN BIT(17)
#define ASPEED_MISC_CTRL 0x2c
#define UART_DIV13_EN BIT(12)
@@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = {
[ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
[ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
[ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
- [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
- [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */
+ [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
+ [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
[ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
[ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
[ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */
@@ -109,7 +109,7 @@ static const struct aspeed_gate_data aspeed_gates[] = {
[ASPEED_CLK_GATE_RSACLK] = { 24, -1, "rsaclk-gate", NULL, 0 }, /* RSA */
[ASPEED_CLK_GATE_UART3CLK] = { 25, -1, "uart3clk-gate", "uart", 0 }, /* UART3 */
[ASPEED_CLK_GATE_UART4CLK] = { 26, -1, "uart4clk-gate", "uart", 0 }, /* UART4 */
- [ASPEED_CLK_GATE_SDCLKCLK] = { 27, 16, "sdclk-gate", NULL, 0 }, /* SDIO/SD */
+ [ASPEED_CLK_GATE_SDCLK] = { 27, 16, "sdclk-gate", NULL, 0 }, /* SDIO/SD */
[ASPEED_CLK_GATE_LHCCLK] = { 28, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */
};
@@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
u32 clk = BIT(gate->clock_idx);
+ u32 rst = BIT(gate->reset_idx);
u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
u32 reg;
+ /*
+ * If the IP is in reset, treat the clock as not enabled,
+ * this happens with some clocks such as the USB one when
+ * coming from cold reset. Without this, aspeed_clk_enable()
+ * will fail to lift the reset.
+ */
+ if (gate->reset_idx >= 0) {
+ regmap_read(gate->map, ASPEED_RESET_CTRL, &reg);
+ if (reg & rst)
+ return 0;
+ }
+
regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
return ((reg & clk) == enval) ? 1 : 0;
@@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver);
static void __init aspeed_ast2400_cc(struct regmap *map)
{
struct clk_hw *hw;
- u32 val, freq, div;
+ u32 val, div, clkin, hpll;
+ const u16 hpll_rates[][4] = {
+ {384, 360, 336, 408},
+ {400, 375, 350, 425},
+ };
+ int rate;
/*
* CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
* strapping
*/
regmap_read(map, ASPEED_STRAP, &val);
- if (val & CLKIN_25MHZ_EN)
- freq = 25000000;
- else if (val & AST2400_CLK_SOURCE_SEL)
- freq = 48000000;
- else
- freq = 24000000;
- hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq);
- pr_debug("clkin @%u MHz\n", freq / 1000000);
+ rate = (val >> 8) & 3;
+ if (val & CLKIN_25MHZ_EN) {
+ clkin = 25000000;
+ hpll = hpll_rates[1][rate];
+ } else if (val & AST2400_CLK_SOURCE_SEL) {
+ clkin = 48000000;
+ hpll = hpll_rates[0][rate];
+ } else {
+ clkin = 24000000;
+ hpll = hpll_rates[0][rate];
+ }
+ hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
+ pr_debug("clkin @%u MHz\n", clkin / 1000000);
/*
* High-speed PLL clock derived from the crystal. This the CPU clock,
- * and we assume that it is enabled
+ * and we assume that it is enabled. It can be configured through the
+ * HPLL_PARAM register, or set to a specified frequency by strapping.
*/
regmap_read(map, ASPEED_HPLL_PARAM, &val);
- WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured");
- aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val);
+ if (val & AST2400_HPLL_PROGRAMMED)
+ hw = aspeed_ast2400_calc_pll("hpll", val);
+ else
+ hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
+ hpll * 1000000);
+
+ aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
/*
* Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index a2f8c42e527a..92bc4aca0f95 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CS2000 -- CIRRUS LOGIC Fractional-N Clock Synthesizer & Clock Multiplier
*
* Copyright (C) 2015 Renesas Electronics Corporation
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index a5d402de5584..20724abd38bd 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -177,8 +177,15 @@ static struct clk *_of_fixed_factor_clk_setup(struct device_node *node)
clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags,
mult, div);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ /*
+ * If parent clock is not registered, registration would fail.
+ * Clear OF_POPULATED flag so that clock registration can be
+ * attempted again from probe function.
+ */
+ of_node_clear_flag(node, OF_POPULATED);
return clk;
+ }
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
if (ret) {
diff --git a/drivers/clk/clk-max9485.c b/drivers/clk/clk-max9485.c
new file mode 100644
index 000000000000..5e80f3d090f3
--- /dev/null
+++ b/drivers/clk/clk-max9485.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+
+#include <dt-bindings/clock/maxim,max9485.h>
+
+#define MAX9485_NUM_CLKS 4
+
+/* This chip has only one register of 8 bit width. */
+
+#define MAX9485_FS_12KHZ (0 << 0)
+#define MAX9485_FS_32KHZ (1 << 0)
+#define MAX9485_FS_44_1KHZ (2 << 0)
+#define MAX9485_FS_48KHZ (3 << 0)
+
+#define MAX9485_SCALE_256 (0 << 2)
+#define MAX9485_SCALE_384 (1 << 2)
+#define MAX9485_SCALE_768 (2 << 2)
+
+#define MAX9485_DOUBLE BIT(4)
+#define MAX9485_CLKOUT1_ENABLE BIT(5)
+#define MAX9485_CLKOUT2_ENABLE BIT(6)
+#define MAX9485_MCLK_ENABLE BIT(7)
+#define MAX9485_FREQ_MASK 0x1f
+
+struct max9485_rate {
+ unsigned long out;
+ u8 reg_value;
+};
+
+/*
+ * Ordered by frequency. For frequency the hardware can generate with
+ * multiple settings, the one with lowest jitter is listed first.
+ */
+static const struct max9485_rate max9485_rates[] = {
+ { 3072000, MAX9485_FS_12KHZ | MAX9485_SCALE_256 },
+ { 4608000, MAX9485_FS_12KHZ | MAX9485_SCALE_384 },
+ { 8192000, MAX9485_FS_32KHZ | MAX9485_SCALE_256 },
+ { 9126000, MAX9485_FS_12KHZ | MAX9485_SCALE_768 },
+ { 11289600, MAX9485_FS_44_1KHZ | MAX9485_SCALE_256 },
+ { 12288000, MAX9485_FS_48KHZ | MAX9485_SCALE_256 },
+ { 12288000, MAX9485_FS_32KHZ | MAX9485_SCALE_384 },
+ { 16384000, MAX9485_FS_32KHZ | MAX9485_SCALE_256 | MAX9485_DOUBLE },
+ { 16934400, MAX9485_FS_44_1KHZ | MAX9485_SCALE_384 },
+ { 18384000, MAX9485_FS_48KHZ | MAX9485_SCALE_384 },
+ { 22579200, MAX9485_FS_44_1KHZ | MAX9485_SCALE_256 | MAX9485_DOUBLE },
+ { 24576000, MAX9485_FS_48KHZ | MAX9485_SCALE_256 | MAX9485_DOUBLE },
+ { 24576000, MAX9485_FS_32KHZ | MAX9485_SCALE_384 | MAX9485_DOUBLE },
+ { 24576000, MAX9485_FS_32KHZ | MAX9485_SCALE_768 },
+ { 33868800, MAX9485_FS_44_1KHZ | MAX9485_SCALE_384 | MAX9485_DOUBLE },
+ { 33868800, MAX9485_FS_44_1KHZ | MAX9485_SCALE_768 },
+ { 36864000, MAX9485_FS_48KHZ | MAX9485_SCALE_384 | MAX9485_DOUBLE },
+ { 36864000, MAX9485_FS_48KHZ | MAX9485_SCALE_768 },
+ { 49152000, MAX9485_FS_32KHZ | MAX9485_SCALE_768 | MAX9485_DOUBLE },
+ { 67737600, MAX9485_FS_44_1KHZ | MAX9485_SCALE_768 | MAX9485_DOUBLE },
+ { 73728000, MAX9485_FS_48KHZ | MAX9485_SCALE_768 | MAX9485_DOUBLE },
+ { } /* sentinel */
+};
+
+struct max9485_driver_data;
+
+struct max9485_clk_hw {
+ struct clk_hw hw;
+ struct clk_init_data init;
+ u8 enable_bit;
+ struct max9485_driver_data *drvdata;
+};
+
+struct max9485_driver_data {
+ struct clk *xclk;
+ struct i2c_client *client;
+ u8 reg_value;
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+ struct max9485_clk_hw hw[MAX9485_NUM_CLKS];
+};
+
+static inline struct max9485_clk_hw *to_max9485_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct max9485_clk_hw, hw);
+}
+
+static int max9485_update_bits(struct max9485_driver_data *drvdata,
+ u8 mask, u8 value)
+{
+ int ret;
+
+ drvdata->reg_value &= ~mask;
+ drvdata->reg_value |= value;
+
+ dev_dbg(&drvdata->client->dev,
+ "updating mask 0x%02x value 0x%02x -> 0x%02x\n",
+ mask, value, drvdata->reg_value);
+
+ ret = i2c_master_send(drvdata->client,
+ &drvdata->reg_value,
+ sizeof(drvdata->reg_value));
+
+ return ret < 0 ? ret : 0;
+}
+
+static int max9485_clk_prepare(struct clk_hw *hw)
+{
+ struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
+
+ return max9485_update_bits(clk_hw->drvdata,
+ clk_hw->enable_bit,
+ clk_hw->enable_bit);
+}
+
+static void max9485_clk_unprepare(struct clk_hw *hw)
+{
+ struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
+
+ max9485_update_bits(clk_hw->drvdata, clk_hw->enable_bit, 0);
+}
+
+/*
+ * CLKOUT - configurable clock output
+ */
+static int max9485_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
+ const struct max9485_rate *entry;
+
+ for (entry = max9485_rates; entry->out != 0; entry++)
+ if (entry->out == rate)
+ break;
+
+ if (entry->out == 0)
+ return -EINVAL;
+
+ return max9485_update_bits(clk_hw->drvdata,
+ MAX9485_FREQ_MASK,
+ entry->reg_value);
+}
+
+static unsigned long max9485_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
+ struct max9485_driver_data *drvdata = clk_hw->drvdata;
+ u8 val = drvdata->reg_value & MAX9485_FREQ_MASK;
+ const struct max9485_rate *entry;
+
+ for (entry = max9485_rates; entry->out != 0; entry++)
+ if (val == entry->reg_value)
+ return entry->out;
+
+ return 0;
+}
+
+static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct max9485_rate *curr, *prev = NULL;
+
+ for (curr = max9485_rates; curr->out != 0; curr++) {
+ /* Exact matches */
+ if (curr->out == rate)
+ return rate;
+
+ /*
+ * Find the first entry that has a frequency higher than the
+ * requested one.
+ */
+ if (curr->out > rate) {
+ unsigned int mid;
+
+ /*
+ * If this is the first entry, clamp the value to the
+ * lowest possible frequency.
+ */
+ if (!prev)
+ return curr->out;
+
+ /*
+ * Otherwise, determine whether the previous entry or
+ * current one is closer.
+ */
+ mid = prev->out + ((curr->out - prev->out) / 2);
+
+ return (mid > rate) ? prev->out : curr->out;
+ }
+
+ prev = curr;
+ }
+
+ /* If the last entry was still too high, clamp the value */
+ return prev->out;
+}
+
+struct max9485_clk {
+ const char *name;
+ int parent_index;
+ const struct clk_ops ops;
+ u8 enable_bit;
+};
+
+static const struct max9485_clk max9485_clks[MAX9485_NUM_CLKS] = {
+ [MAX9485_MCLKOUT] = {
+ .name = "mclkout",
+ .parent_index = -1,
+ .enable_bit = MAX9485_MCLK_ENABLE,
+ .ops = {
+ .prepare = max9485_clk_prepare,
+ .unprepare = max9485_clk_unprepare,
+ },
+ },
+ [MAX9485_CLKOUT] = {
+ .name = "clkout",
+ .parent_index = -1,
+ .ops = {
+ .set_rate = max9485_clkout_set_rate,
+ .round_rate = max9485_clkout_round_rate,
+ .recalc_rate = max9485_clkout_recalc_rate,
+ },
+ },
+ [MAX9485_CLKOUT1] = {
+ .name = "clkout1",
+ .parent_index = MAX9485_CLKOUT,
+ .enable_bit = MAX9485_CLKOUT1_ENABLE,
+ .ops = {
+ .prepare = max9485_clk_prepare,
+ .unprepare = max9485_clk_unprepare,
+ },
+ },
+ [MAX9485_CLKOUT2] = {
+ .name = "clkout2",
+ .parent_index = MAX9485_CLKOUT,
+ .enable_bit = MAX9485_CLKOUT2_ENABLE,
+ .ops = {
+ .prepare = max9485_clk_prepare,
+ .unprepare = max9485_clk_unprepare,
+ },
+ },
+};
+
+static struct clk_hw *
+max9485_of_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct max9485_driver_data *drvdata = data;
+ unsigned int idx = clkspec->args[0];
+
+ return &drvdata->hw[idx].hw;
+}
+
+static int max9485_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max9485_driver_data *drvdata;
+ struct device *dev = &client->dev;
+ const char *xclk_name;
+ int i, ret;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->xclk = devm_clk_get(dev, "xclk");
+ if (IS_ERR(drvdata->xclk))
+ return PTR_ERR(drvdata->xclk);
+
+ xclk_name = __clk_get_name(drvdata->xclk);
+
+ drvdata->supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(drvdata->supply))
+ return PTR_ERR(drvdata->supply);
+
+ ret = regulator_enable(drvdata->supply);
+ if (ret < 0)
+ return ret;
+
+ drvdata->reset_gpio =
+ devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(drvdata->reset_gpio))
+ return PTR_ERR(drvdata->reset_gpio);
+
+ i2c_set_clientdata(client, drvdata);
+ drvdata->client = client;
+
+ ret = i2c_master_recv(drvdata->client, &drvdata->reg_value,
+ sizeof(drvdata->reg_value));
+ if (ret < 0) {
+ dev_warn(dev, "Unable to read device register: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < MAX9485_NUM_CLKS; i++) {
+ int parent_index = max9485_clks[i].parent_index;
+ const char *name;
+
+ if (of_property_read_string_index(dev->of_node,
+ "clock-output-names",
+ i, &name) == 0) {
+ drvdata->hw[i].init.name = name;
+ } else {
+ drvdata->hw[i].init.name = max9485_clks[i].name;
+ }
+
+ drvdata->hw[i].init.ops = &max9485_clks[i].ops;
+ drvdata->hw[i].init.num_parents = 1;
+ drvdata->hw[i].init.flags = 0;
+
+ if (parent_index > 0) {
+ drvdata->hw[i].init.parent_names =
+ &drvdata->hw[parent_index].init.name;
+ drvdata->hw[i].init.flags |= CLK_SET_RATE_PARENT;
+ } else {
+ drvdata->hw[i].init.parent_names = &xclk_name;
+ }
+
+ drvdata->hw[i].enable_bit = max9485_clks[i].enable_bit;
+ drvdata->hw[i].hw.init = &drvdata->hw[i].init;
+ drvdata->hw[i].drvdata = drvdata;
+
+ ret = devm_clk_hw_register(dev, &drvdata->hw[i].hw);
+ if (ret < 0)
+ return ret;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, max9485_of_clk_get, drvdata);
+}
+
+static int __maybe_unused max9485_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max9485_driver_data *drvdata = i2c_get_clientdata(client);
+
+ gpiod_set_value_cansleep(drvdata->reset_gpio, 0);
+
+ return 0;
+}
+
+static int __maybe_unused max9485_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max9485_driver_data *drvdata = i2c_get_clientdata(client);
+ int ret;
+
+ gpiod_set_value_cansleep(drvdata->reset_gpio, 1);
+
+ ret = i2c_master_send(client, &drvdata->reg_value,
+ sizeof(drvdata->reg_value));
+
+ return ret < 0 ? ret : 0;
+}
+
+static const struct dev_pm_ops max9485_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(max9485_suspend, max9485_resume)
+};
+
+static const struct of_device_id max9485_dt_ids[] = {
+ { .compatible = "maxim,max9485", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max9485_dt_ids);
+
+static const struct i2c_device_id max9485_i2c_ids[] = {
+ { .name = "max9485", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max9485_i2c_ids);
+
+static struct i2c_driver max9485_driver = {
+ .driver = {
+ .name = "max9485",
+ .pm = &max9485_pm_ops,
+ .of_match_table = max9485_dt_ids,
+ },
+ .probe = max9485_i2c_probe,
+ .id_table = max9485_i2c_ids,
+};
+module_i2c_driver(max9485_driver);
+
+MODULE_AUTHOR("Daniel Mack <daniel@zonque.org>");
+MODULE_DESCRIPTION("MAX9485 Programmable Audio Clock Generator");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index bb2a6f2f5516..a985bf5e1ac6 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -38,7 +38,6 @@ static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
- int step;
u64 fmin, fmax, ftmp;
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -60,9 +59,9 @@ static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
ftmp = rate - fmin;
ftmp += clk->info->range.step_size - 1; /* to round up */
- step = do_div(ftmp, clk->info->range.step_size);
+ do_div(ftmp, clk->info->range.step_size);
- return step * clk->info->range.step_size + fmin;
+ return ftmp * clk->info->range.step_size + fmin;
}
static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 09b6718956bd..153b3a2b5857 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -74,6 +74,33 @@ static int si514_enable_output(struct clk_si514 *data, bool enable)
SI514_CONTROL_OE, enable ? SI514_CONTROL_OE : 0);
}
+static int si514_prepare(struct clk_hw *hw)
+{
+ struct clk_si514 *data = to_clk_si514(hw);
+
+ return si514_enable_output(data, true);
+}
+
+static void si514_unprepare(struct clk_hw *hw)
+{
+ struct clk_si514 *data = to_clk_si514(hw);
+
+ si514_enable_output(data, false);
+}
+
+static int si514_is_prepared(struct clk_hw *hw)
+{
+ struct clk_si514 *data = to_clk_si514(hw);
+ unsigned int val;
+ int err;
+
+ err = regmap_read(data->regmap, SI514_REG_CONTROL, &val);
+ if (err < 0)
+ return err;
+
+ return !!(val & SI514_CONTROL_OE);
+}
+
/* Retrieve clock multiplier and dividers from hardware */
static int si514_get_muldiv(struct clk_si514 *data,
struct clk_si514_muldiv *settings)
@@ -235,12 +262,17 @@ static int si514_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_si514 *data = to_clk_si514(hw);
struct clk_si514_muldiv settings;
+ unsigned int old_oe_state;
int err;
err = si514_calc_muldiv(&settings, rate);
if (err)
return err;
+ err = regmap_read(data->regmap, SI514_REG_CONTROL, &old_oe_state);
+ if (err)
+ return err;
+
si514_enable_output(data, false);
err = si514_set_muldiv(data, &settings);
@@ -255,12 +287,16 @@ static int si514_set_rate(struct clk_hw *hw, unsigned long rate,
/* Applying a new frequency can take up to 10ms */
usleep_range(10000, 12000);
- si514_enable_output(data, true);
+ if (old_oe_state & SI514_CONTROL_OE)
+ si514_enable_output(data, true);
return err;
}
static const struct clk_ops si514_clk_ops = {
+ .prepare = si514_prepare,
+ .unprepare = si514_unprepare,
+ .is_prepared = si514_is_prepared,
.recalc_rate = si514_recalc_rate,
.round_rate = si514_round_rate,
.set_rate = si514_set_rate,
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
index 1e2a3b8f9454..64e607f3232a 100644
--- a/drivers/clk/clk-si544.c
+++ b/drivers/clk/clk-si544.c
@@ -86,6 +86,33 @@ static int si544_enable_output(struct clk_si544 *data, bool enable)
SI544_OE_STATE_ODC_OE, enable ? SI544_OE_STATE_ODC_OE : 0);
}
+static int si544_prepare(struct clk_hw *hw)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+
+ return si544_enable_output(data, true);
+}
+
+static void si544_unprepare(struct clk_hw *hw)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+
+ si544_enable_output(data, false);
+}
+
+static int si544_is_prepared(struct clk_hw *hw)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+ unsigned int val;
+ int err;
+
+ err = regmap_read(data->regmap, SI544_REG_OE_STATE, &val);
+ if (err < 0)
+ return err;
+
+ return !!(val & SI544_OE_STATE_ODC_OE);
+}
+
/* Retrieve clock multiplier and dividers from hardware */
static int si544_get_muldiv(struct clk_si544 *data,
struct clk_si544_muldiv *settings)
@@ -273,6 +300,7 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_si544 *data = to_clk_si544(hw);
struct clk_si544_muldiv settings;
+ unsigned int old_oe_state;
int err;
if (!is_valid_frequency(data, rate))
@@ -282,6 +310,10 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
if (err)
return err;
+ err = regmap_read(data->regmap, SI544_REG_OE_STATE, &old_oe_state);
+ if (err)
+ return err;
+
si544_enable_output(data, false);
/* Allow FCAL for this frequency update */
@@ -303,12 +335,16 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
/* Applying a new frequency can take up to 10ms */
usleep_range(10000, 12000);
- si544_enable_output(data, true);
+ if (old_oe_state & SI544_OE_STATE_ODC_OE)
+ si544_enable_output(data, true);
return err;
}
static const struct clk_ops si544_clk_ops = {
+ .prepare = si544_prepare,
+ .unprepare = si544_unprepare,
+ .is_prepared = si544_is_prepared,
.recalc_rate = si544_recalc_rate,
.round_rate = si544_round_rate,
.set_rate = si544_set_rate,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9760b526ca31..d31055ae6ec6 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -24,7 +24,6 @@
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/clkdev.h>
-#include <linux/stringify.h>
#include "clk.h"
@@ -68,6 +67,7 @@ struct clk_core {
unsigned long max_rate;
unsigned long accuracy;
int phase;
+ struct clk_duty duty;
struct hlist_head children;
struct hlist_node child_node;
struct hlist_head clks;
@@ -691,6 +691,9 @@ static void clk_core_unprepare(struct clk_core *core)
"Unpreparing critical %s\n", core->name))
return;
+ if (core->flags & CLK_SET_RATE_GATE)
+ clk_core_rate_unprotect(core);
+
if (--core->prepare_count > 0)
return;
@@ -765,6 +768,16 @@ static int clk_core_prepare(struct clk_core *core)
core->prepare_count++;
+ /*
+ * CLK_SET_RATE_GATE is a special case of clock protection
+ * Instead of a consumer claiming exclusive rate control, it is
+ * actually the provider which prevents any consumer from making any
+ * operation which could result in a rate change or rate glitch while
+ * the clock is prepared.
+ */
+ if (core->flags & CLK_SET_RATE_GATE)
+ clk_core_rate_protect(core);
+
return 0;
unprepare:
clk_core_unprepare(core->parent);
@@ -1888,9 +1901,6 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
if (clk_core_rate_is_protected(core))
return -EBUSY;
- if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
- return -EBUSY;
-
/* calculate new rates and get the topmost changed clock */
top = clk_calc_new_rates(core, req_rate);
if (!top)
@@ -2402,6 +2412,172 @@ int clk_get_phase(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_get_phase);
+static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
+{
+ /* Assume a default value of 50% */
+ core->duty.num = 1;
+ core->duty.den = 2;
+}
+
+static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
+
+static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
+{
+ struct clk_duty *duty = &core->duty;
+ int ret = 0;
+
+ if (!core->ops->get_duty_cycle)
+ return clk_core_update_duty_cycle_parent_nolock(core);
+
+ ret = core->ops->get_duty_cycle(core->hw, duty);
+ if (ret)
+ goto reset;
+
+ /* Don't trust the clock provider too much */
+ if (duty->den == 0 || duty->num > duty->den) {
+ ret = -EINVAL;
+ goto reset;
+ }
+
+ return 0;
+
+reset:
+ clk_core_reset_duty_cycle_nolock(core);
+ return ret;
+}
+
+static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
+{
+ int ret = 0;
+
+ if (core->parent &&
+ core->flags & CLK_DUTY_CYCLE_PARENT) {
+ ret = clk_core_update_duty_cycle_nolock(core->parent);
+ memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
+ } else {
+ clk_core_reset_duty_cycle_nolock(core);
+ }
+
+ return ret;
+}
+
+static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
+ struct clk_duty *duty);
+
+static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
+ struct clk_duty *duty)
+{
+ int ret;
+
+ lockdep_assert_held(&prepare_lock);
+
+ if (clk_core_rate_is_protected(core))
+ return -EBUSY;
+
+ trace_clk_set_duty_cycle(core, duty);
+
+ if (!core->ops->set_duty_cycle)
+ return clk_core_set_duty_cycle_parent_nolock(core, duty);
+
+ ret = core->ops->set_duty_cycle(core->hw, duty);
+ if (!ret)
+ memcpy(&core->duty, duty, sizeof(*duty));
+
+ trace_clk_set_duty_cycle_complete(core, duty);
+
+ return ret;
+}
+
+static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
+ struct clk_duty *duty)
+{
+ int ret = 0;
+
+ if (core->parent &&
+ core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
+ ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
+ memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
+ }
+
+ return ret;
+}
+
+/**
+ * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @num: numerator of the duty cycle ratio to be applied
+ * @den: denominator of the duty cycle ratio to be applied
+ *
+ * Apply the duty cycle ratio if the ratio is valid and the clock can
+ * perform this operation
+ *
+ * Returns (0) on success, a negative errno otherwise.
+ */
+int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
+{
+ int ret;
+ struct clk_duty duty;
+
+ if (!clk)
+ return 0;
+
+ /* sanity check the ratio */
+ if (den == 0 || num > den)
+ return -EINVAL;
+
+ duty.num = num;
+ duty.den = den;
+
+ clk_prepare_lock();
+
+ if (clk->exclusive_count)
+ clk_core_rate_unprotect(clk->core);
+
+ ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
+
+ if (clk->exclusive_count)
+ clk_core_rate_protect(clk->core);
+
+ clk_prepare_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
+
+static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
+ unsigned int scale)
+{
+ struct clk_duty *duty = &core->duty;
+ int ret;
+
+ clk_prepare_lock();
+
+ ret = clk_core_update_duty_cycle_nolock(core);
+ if (!ret)
+ ret = mult_frac(scale, duty->num, duty->den);
+
+ clk_prepare_unlock();
+
+ return ret;
+}
+
+/**
+ * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @scale: scaling factor to be applied to represent the ratio as an integer
+ *
+ * Returns the duty cycle ratio of a clock node multiplied by the provided
+ * scaling factor, or negative errno on error.
+ */
+int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
+{
+ if (!clk)
+ return 0;
+
+ return clk_core_get_scaled_duty_cycle(clk->core, scale);
+}
+EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
+
/**
* clk_is_match - check if two clk's point to the same hardware clock
* @p: clk compared against q
@@ -2455,12 +2631,13 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
if (!c)
return;
- seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n",
+ seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, c->protect_count,
clk_core_get_rate(c), clk_core_get_accuracy(c),
- clk_core_get_phase(c));
+ clk_core_get_phase(c),
+ clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@@ -2482,9 +2659,9 @@ static int clk_summary_show(struct seq_file *s, void *data)
struct clk_core *c;
struct hlist_head **lists = (struct hlist_head **)s->private;
- seq_puts(s, " enable prepare protect \n");
- seq_puts(s, " clock count count count rate accuracy phase\n");
- seq_puts(s, "----------------------------------------------------------------------------------------\n");
+ seq_puts(s, " enable prepare protect duty\n");
+ seq_puts(s, " clock count count count rate accuracy phase cycle\n");
+ seq_puts(s, "---------------------------------------------------------------------------------------------\n");
clk_prepare_lock();
@@ -2511,6 +2688,8 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
+ seq_printf(s, "\"duty_cycle\": %u",
+ clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
@@ -2559,7 +2738,7 @@ static const struct {
unsigned long flag;
const char *name;
} clk_flags[] = {
-#define ENTRY(f) { f, __stringify(f) }
+#define ENTRY(f) { f, #f }
ENTRY(CLK_SET_RATE_GATE),
ENTRY(CLK_SET_PARENT_GATE),
ENTRY(CLK_SET_RATE_PARENT),
@@ -2572,6 +2751,7 @@ static const struct {
ENTRY(CLK_SET_RATE_UNGATE),
ENTRY(CLK_IS_CRITICAL),
ENTRY(CLK_OPS_PARENT_ENABLE),
+ ENTRY(CLK_DUTY_CYCLE_PARENT),
#undef ENTRY
};
@@ -2610,6 +2790,17 @@ static int possible_parents_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(possible_parents);
+static int clk_duty_cycle_show(struct seq_file *s, void *data)
+{
+ struct clk_core *core = s->private;
+ struct clk_duty *duty = &core->duty;
+
+ seq_printf(s, "%u/%u\n", duty->num, duty->den);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
+
static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
{
struct dentry *root;
@@ -2628,6 +2819,8 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
+ debugfs_create_file("clk_duty_cycle", 0444, root, core,
+ &clk_duty_cycle_fops);
if (core->num_parents > 1)
debugfs_create_file("clk_possible_parents", 0444, root, core,
@@ -2846,6 +3039,11 @@ static int __clk_core_init(struct clk_core *core)
core->phase = 0;
/*
+ * Set clk's duty cycle.
+ */
+ clk_core_update_duty_cycle_nolock(core);
+
+ /*
* Set clk's rate. The preferred method is to use .recalc_rate. For
* simple clocks and lazy developers the default fallback is to use the
* parent's rate. If a clock doesn't have a parent (or is orphaned)
@@ -2934,6 +3132,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
return clk;
}
+/* keep in sync with __clk_put */
void __clk_free_clk(struct clk *clk)
{
clk_prepare_lock();
@@ -3313,6 +3512,7 @@ int __clk_get(struct clk *clk)
return 1;
}
+/* keep in sync with __clk_free_clk */
void __clk_put(struct clk *clk)
{
struct module *owner;
@@ -3346,6 +3546,7 @@ void __clk_put(struct clk *clk)
module_put(owner);
+ kfree_const(clk->con_id);
kfree(clk);
}
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 7513411140b6..9ab3db8b3988 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -35,9 +35,6 @@ static struct clk *__of_clk_get(struct device_node *np, int index,
struct clk *clk;
int rc;
- if (index < 0)
- return ERR_PTR(-EINVAL);
-
rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
&clkspec);
if (rc)
@@ -199,7 +196,7 @@ struct clk *clk_get(struct device *dev, const char *con_id)
const char *dev_id = dev ? dev_name(dev) : NULL;
struct clk *clk;
- if (dev) {
+ if (dev && dev->of_node) {
clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
return clk;
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index aae62a5b8734..d1bbee19ed0f 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -672,7 +672,7 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap)
usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
if (IS_ERR(usb1)) {
- if (PTR_ERR(usb0) == -EPROBE_DEFER)
+ if (PTR_ERR(usb1) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n",
diff --git a/drivers/clk/davinci/psc-da830.c b/drivers/clk/davinci/psc-da830.c
index 081b039fcb02..6481337382a6 100644
--- a/drivers/clk/davinci/psc-da830.c
+++ b/drivers/clk/davinci/psc-da830.c
@@ -14,6 +14,7 @@
#include "psc.h"
+LPSC_CLKDEV1(aemif_clkdev, NULL, "ti-aemif");
LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
LPSC_CLKDEV1(mmcsd_clkdev, NULL, "da830-mmc.0");
LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
@@ -22,7 +23,7 @@ static const struct davinci_lpsc_clk_info da830_psc0_info[] = {
LPSC(0, 0, tpcc, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
LPSC(1, 0, tptc0, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
LPSC(2, 0, tptc1, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(3, 0, aemif, pll0_sysclk3, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(3, 0, aemif, pll0_sysclk3, aemif_clkdev, LPSC_ALWAYS_ENABLED),
LPSC(4, 0, spi0, pll0_sysclk2, spi0_clkdev, 0),
LPSC(5, 0, mmcsd, pll0_sysclk2, mmcsd_clkdev, 0),
LPSC(6, 0, aintc, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
diff --git a/drivers/clk/davinci/psc-da850.c b/drivers/clk/davinci/psc-da850.c
index d196dcbed560..5a18bca464cd 100644
--- a/drivers/clk/davinci/psc-da850.c
+++ b/drivers/clk/davinci/psc-da850.c
@@ -16,8 +16,7 @@
#include "psc.h"
-LPSC_CLKDEV2(emifa_clkdev, NULL, "ti-aemif",
- "aemif", "davinci_nand.0");
+LPSC_CLKDEV1(emifa_clkdev, NULL, "ti-aemif");
LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "da830-mmc.0");
LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
diff --git a/drivers/clk/davinci/psc-dm365.c b/drivers/clk/davinci/psc-dm365.c
index 8c73086cc676..c75424f4ea3b 100644
--- a/drivers/clk/davinci/psc-dm365.c
+++ b/drivers/clk/davinci/psc-dm365.c
@@ -21,7 +21,8 @@ LPSC_CLKDEV1(mmcsd1_clkdev, NULL, "da830-mmc.1");
LPSC_CLKDEV1(asp0_clkdev, NULL, "davinci-mcbsp");
LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
LPSC_CLKDEV1(spi2_clkdev, NULL, "spi_davinci.2");
-LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV2(aemif_clkdev, "aemif", NULL,
+ NULL, "ti-aemif");
LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "da830-mmc.0");
LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
diff --git a/drivers/clk/davinci/psc-dm644x.c b/drivers/clk/davinci/psc-dm644x.c
index fc0230e3a3d6..0cea6e0bd5f0 100644
--- a/drivers/clk/davinci/psc-dm644x.c
+++ b/drivers/clk/davinci/psc-dm644x.c
@@ -21,7 +21,8 @@ LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
"fck", "davinci_mdio.0");
LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
LPSC_CLKDEV1(ide_clkdev, NULL, "palm_bk3710");
-LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV2(aemif_clkdev, "aemif", NULL,
+ NULL, "ti-aemif");
LPSC_CLKDEV1(mmcsd_clkdev, NULL, "dm6441-mmc.0");
LPSC_CLKDEV1(asp0_clkdev, NULL, "davinci-mcbsp");
LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
diff --git a/drivers/clk/davinci/psc-dm646x.c b/drivers/clk/davinci/psc-dm646x.c
index c3f82ed70a80..20012dc7471a 100644
--- a/drivers/clk/davinci/psc-dm646x.c
+++ b/drivers/clk/davinci/psc-dm646x.c
@@ -18,7 +18,8 @@
LPSC_CLKDEV1(ide_clkdev, NULL, "palm_bk3710");
LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
"fck", "davinci_mdio.0");
-LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV2(aemif_clkdev, "aemif", NULL,
+ NULL, "ti-aemif");
LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
LPSC_CLKDEV1(mcasp1_clkdev, NULL, "davinci-mcasp.1");
LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index 6a42529d31a9..cc5614567a70 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -107,7 +107,7 @@ extern const struct davinci_psc_init_data of_da850_psc1_init_data;
#ifdef CONFIG_ARCH_DAVINCI_DM355
extern const struct davinci_psc_init_data dm355_psc_init_data;
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM356
+#ifdef CONFIG_ARCH_DAVINCI_DM365
extern const struct davinci_psc_init_data dm365_psc_init_data;
#endif
#ifdef CONFIG_ARCH_DAVINCI_DM644x
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c
index caa8bd40692c..fc8e782d817b 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx51-imx53.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/sizes.h>
#include <soc/imx/revision.h>
#include <dt-bindings/clock/imx5-clock.h>
@@ -175,13 +176,13 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_PER_ROOT] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1,
per_root_sel, ARRAY_SIZE(per_root_sel));
clk[IMX5_CLK_AHB] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
- clk[IMX5_CLK_AHB_MAX] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
- clk[IMX5_CLK_AIPS_TZ1] = imx_clk_gate2("aips_tz1", "ahb", MXC_CCM_CCGR0, 24);
- clk[IMX5_CLK_AIPS_TZ2] = imx_clk_gate2("aips_tz2", "ahb", MXC_CCM_CCGR0, 26);
- clk[IMX5_CLK_TMAX1] = imx_clk_gate2("tmax1", "ahb", MXC_CCM_CCGR1, 0);
- clk[IMX5_CLK_TMAX2] = imx_clk_gate2("tmax2", "ahb", MXC_CCM_CCGR1, 2);
- clk[IMX5_CLK_TMAX3] = imx_clk_gate2("tmax3", "ahb", MXC_CCM_CCGR1, 4);
- clk[IMX5_CLK_SPBA] = imx_clk_gate2("spba", "ipg", MXC_CCM_CCGR5, 0);
+ clk[IMX5_CLK_AHB_MAX] = imx_clk_gate2_flags("ahb_max", "ahb", MXC_CCM_CCGR0, 28, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_AIPS_TZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", MXC_CCM_CCGR0, 24, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_AIPS_TZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", MXC_CCM_CCGR0, 26, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_TMAX1] = imx_clk_gate2_flags("tmax1", "ahb", MXC_CCM_CCGR1, 0, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_TMAX2] = imx_clk_gate2_flags("tmax2", "ahb", MXC_CCM_CCGR1, 2, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_TMAX3] = imx_clk_gate2_flags("tmax3", "ahb", MXC_CCM_CCGR1, 4, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_SPBA] = imx_clk_gate2_flags("spba", "ipg", MXC_CCM_CCGR5, 0, CLK_IS_CRITICAL);
clk[IMX5_CLK_IPG] = imx_clk_divider("ipg", "ahb", MXC_CCM_CBCDR, 8, 2);
clk[IMX5_CLK_AXI_A] = imx_clk_divider("axi_a", "main_bus", MXC_CCM_CBCDR, 16, 3);
clk[IMX5_CLK_AXI_B] = imx_clk_divider("axi_b", "main_bus", MXC_CCM_CBCDR, 19, 3);
@@ -252,8 +253,8 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_ECSPI2_PER_GATE] = imx_clk_gate2("ecspi2_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 24);
clk[IMX5_CLK_CSPI_IPG_GATE] = imx_clk_gate2("cspi_ipg_gate", "ipg", MXC_CCM_CCGR4, 26);
clk[IMX5_CLK_SDMA_GATE] = imx_clk_gate2("sdma_gate", "ipg", MXC_CCM_CCGR4, 30);
- clk[IMX5_CLK_EMI_FAST_GATE] = imx_clk_gate2("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14);
- clk[IMX5_CLK_EMI_SLOW_GATE] = imx_clk_gate2("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16);
+ clk[IMX5_CLK_EMI_FAST_GATE] = imx_clk_gate2_flags("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_EMI_SLOW_GATE] = imx_clk_gate2_flags("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16, CLK_IS_CRITICAL);
clk[IMX5_CLK_IPU_SEL] = imx_clk_mux("ipu_sel", MXC_CCM_CBCMR, 6, 2, ipu_sel, ARRAY_SIZE(ipu_sel));
clk[IMX5_CLK_IPU_GATE] = imx_clk_gate2("ipu_gate", "ipu_sel", MXC_CCM_CCGR5, 10);
clk[IMX5_CLK_NFC_GATE] = imx_clk_gate2("nfc_gate", "nfc_podf", MXC_CCM_CCGR5, 20);
@@ -267,7 +268,7 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_VPU_SEL] = imx_clk_mux("vpu_sel", MXC_CCM_CBCMR, 14, 2, vpu_sel, ARRAY_SIZE(vpu_sel));
clk[IMX5_CLK_VPU_GATE] = imx_clk_gate2("vpu_gate", "vpu_sel", MXC_CCM_CCGR5, 6);
clk[IMX5_CLK_VPU_REFERENCE_GATE] = imx_clk_gate2("vpu_reference_gate", "osc", MXC_CCM_CCGR5, 8);
- clk[IMX5_CLK_GPC_DVFS] = imx_clk_gate2("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24);
+ clk[IMX5_CLK_GPC_DVFS] = imx_clk_gate2_flags("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24, CLK_IS_CRITICAL);
clk[IMX5_CLK_SSI_APM] = imx_clk_mux("ssi_apm", MXC_CCM_CSCMR1, 8, 2, ssi_apm_sels, ARRAY_SIZE(ssi_apm_sels));
clk[IMX5_CLK_SSI1_ROOT_SEL] = imx_clk_mux("ssi1_root_sel", MXC_CCM_CSCMR1, 14, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
@@ -316,21 +317,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
/* move usb phy clk to 24MHz */
clk_set_parent(clk[IMX5_CLK_USB_PHY_SEL], clk[IMX5_CLK_OSC]);
-
- clk_prepare_enable(clk[IMX5_CLK_GPC_DVFS]);
- clk_prepare_enable(clk[IMX5_CLK_AHB_MAX]); /* esdhc3 */
- clk_prepare_enable(clk[IMX5_CLK_AIPS_TZ1]);
- clk_prepare_enable(clk[IMX5_CLK_AIPS_TZ2]); /* fec */
- clk_prepare_enable(clk[IMX5_CLK_SPBA]);
- clk_prepare_enable(clk[IMX5_CLK_EMI_FAST_GATE]); /* fec */
- clk_prepare_enable(clk[IMX5_CLK_EMI_SLOW_GATE]); /* eim */
- clk_prepare_enable(clk[IMX5_CLK_MIPI_HSC1_GATE]);
- clk_prepare_enable(clk[IMX5_CLK_MIPI_HSC2_GATE]);
- clk_prepare_enable(clk[IMX5_CLK_MIPI_ESC_GATE]);
- clk_prepare_enable(clk[IMX5_CLK_MIPI_HSP_GATE]);
- clk_prepare_enable(clk[IMX5_CLK_TMAX1]);
- clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */
- clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */
}
static void __init mx50_clocks_init(struct device_node *np)
@@ -442,10 +428,10 @@ static void __init mx51_clocks_init(struct device_node *np)
clk[IMX5_CLK_ESDHC4_PER_GATE] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[IMX5_CLK_USB_PHY_GATE] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
clk[IMX5_CLK_HSI2C_GATE] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
- clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6);
- clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8);
- clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10);
- clk[IMX5_CLK_MIPI_HSP_GATE] = imx_clk_gate2("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12);
+ clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2_flags("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2_flags("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2_flags("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_MIPI_HSP_GATE] = imx_clk_gate2_flags("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12, CLK_IS_CRITICAL);
clk[IMX5_CLK_SPDIF_XTAL_SEL] = imx_clk_mux("spdif_xtal_sel", MXC_CCM_CSCMR1, 2, 2,
mx51_spdif_xtal_sel, ARRAY_SIZE(mx51_spdif_xtal_sel));
clk[IMX5_CLK_SPDIF1_SEL] = imx_clk_mux("spdif1_sel", MXC_CCM_CSCMR2, 2, 2,
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index b9ea7037e193..8c7c2fcb8d94 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -65,7 +65,7 @@ static const char *ipg_per_sels[] = { "ipg", "osc", };
static const char *ecspi_sels[] = { "pll3_60m", "osc", };
static const char *can_sels[] = { "pll3_60m", "osc", "pll3_80m", };
static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
- "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
+ "video_27m", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
"ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio_div", };
static const char *cko2_sels[] = {
"mmdc_ch0_axi", "mmdc_ch1_axi", "usdhc4", "usdhc1",
@@ -96,12 +96,6 @@ static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
static struct clk *clk[IMX6QDL_CLK_END];
static struct clk_onecell_data clk_data;
-static unsigned int const clks_init_on[] __initconst = {
- IMX6QDL_CLK_MMDC_CH0_AXI,
- IMX6QDL_CLK_ROM,
- IMX6QDL_CLK_ARM,
-};
-
static struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
@@ -417,7 +411,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *anatop_base, *base;
- int i;
int ret;
clk[IMX6QDL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -794,7 +787,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "mlb_podf", base + 0x74, 18);
else
clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "axi", base + 0x74, 18);
- clk[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20);
+ clk[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_gate2_flags("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20, CLK_IS_CRITICAL);
clk[IMX6QDL_CLK_MMDC_CH1_AXI] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22);
clk[IMX6QDL_CLK_OCRAM] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28);
clk[IMX6QDL_CLK_OPENVG_AXI] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30);
@@ -808,7 +801,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_GPMI_BCH] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26);
clk[IMX6QDL_CLK_GPMI_IO] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
clk[IMX6QDL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
- clk[IMX6QDL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
+ clk[IMX6QDL_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
clk[IMX6QDL_CLK_SATA] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
clk[IMX6QDL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clk[IMX6QDL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
@@ -878,9 +871,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
*/
clk_set_parent(clk[IMX6QDL_CLK_ENFC_SEL], clk[IMX6QDL_CLK_PLL2_PFD2_396M]);
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clk[clks_init_on[i]]);
-
if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
clk_prepare_enable(clk[IMX6QDL_CLK_USBPHY1_GATE]);
clk_prepare_enable(clk[IMX6QDL_CLK_USBPHY2_GATE]);
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 66b1dd1cfad0..eb6bcbf345a3 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -104,10 +104,6 @@ static struct clk_onecell_data clk_data;
static void __iomem *ccm_base;
static void __iomem *anatop_base;
-static const u32 clks_init_on[] __initconst = {
- IMX6SL_CLK_IPG, IMX6SL_CLK_ARM, IMX6SL_CLK_MMDC_ROOT,
-};
-
/*
* ERR005311 CCM: After exit from WAIT mode, unwanted interrupt(s) taken
* during WAIT mode entry process could cause cache memory
@@ -195,7 +191,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
- int i;
int ret;
clks[IMX6SL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -426,13 +421,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
pr_warn("%s: failed to set AHB clock rate %d!\n",
__func__, ret);
- /*
- * Make sure those always on clocks are enabled to maintain the correct
- * usecount and enabling/disabling of parent PLLs.
- */
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clks[clks_init_on[i]]);
-
if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
clk_prepare_enable(clks[IMX6SL_CLK_USBPHY1_GATE]);
clk_prepare_enable(clks[IMX6SL_CLK_USBPHY2_GATE]);
diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
index 3651c77fbabe..52379ee49aec 100644
--- a/drivers/clk/imx/clk-imx6sll.c
+++ b/drivers/clk/imx/clk-imx6sll.c
@@ -92,6 +92,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sll-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
WARN_ON(!base);
/* Do not bypass PLLs initially */
@@ -253,6 +254,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clks[IMX6SLL_CLK_DCP] = imx_clk_gate2("dcp", "ahb", base + 0x68, 10);
clks[IMX6SLL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28);
clks[IMX6SLL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
+ clks[IMX6SLL_CLK_GPIO2] = imx_clk_gate2("gpio2", "ipg", base + 0x68, 30);
/* CCGR1 */
clks[IMX6SLL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
@@ -267,13 +269,17 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clks[IMX6SLL_CLK_GPT_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
clks[IMX6SLL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
clks[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24);
+ clks[IMX6SLL_CLK_GPIO1] = imx_clk_gate2("gpio1", "ipg", base + 0x6c, 26);
+ clks[IMX6SLL_CLK_GPIO5] = imx_clk_gate2("gpio5", "ipg", base + 0x6c, 30);
/* CCGR2 */
+ clks[IMX6SLL_CLK_GPIO6] = imx_clk_gate2("gpio6", "ipg", base + 0x70, 0);
clks[IMX6SLL_CLK_CSI] = imx_clk_gate2("csi", "axi", base + 0x70, 2);
clks[IMX6SLL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6);
clks[IMX6SLL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8);
clks[IMX6SLL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
clks[IMX6SLL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
+ clks[IMX6SLL_CLK_GPIO3] = imx_clk_gate2("gpio3", "ipg", base + 0x70, 26);
clks[IMX6SLL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28);
clks[IMX6SLL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30);
@@ -283,6 +289,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clks[IMX6SLL_CLK_EPDC_AXI] = imx_clk_gate2("epdc_aclk", "axi", base + 0x74, 4);
clks[IMX6SLL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_podf", base + 0x74, 4);
clks[IMX6SLL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
+ clks[IMX6SLL_CLK_GPIO4] = imx_clk_gate2("gpio4", "ipg", base + 0x74, 12);
clks[IMX6SLL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16);
clks[IMX6SLL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
clks[IMX6SLL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index 10c771b91ef6..d9f2890ffe62 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -92,14 +92,6 @@ static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
static struct clk *clks[IMX6SX_CLK_CLK_END];
static struct clk_onecell_data clk_data;
-static int const clks_init_on[] __initconst = {
- IMX6SX_CLK_AIPS_TZ1, IMX6SX_CLK_AIPS_TZ2, IMX6SX_CLK_AIPS_TZ3,
- IMX6SX_CLK_IPMUX1, IMX6SX_CLK_IPMUX2, IMX6SX_CLK_IPMUX3,
- IMX6SX_CLK_WAKEUP, IMX6SX_CLK_MMDC_P0_FAST, IMX6SX_CLK_MMDC_P0_IPG,
- IMX6SX_CLK_ROM, IMX6SX_CLK_ARM, IMX6SX_CLK_IPG, IMX6SX_CLK_OCRAM,
- IMX6SX_CLK_PER2_MAIN, IMX6SX_CLK_PERCLK, IMX6SX_CLK_TZASC1,
-};
-
static const struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
@@ -142,7 +134,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
- int i;
clks[IMX6SX_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -332,7 +323,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_QSPI1_PODF] = imx_clk_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3);
clks[IMX6SX_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3);
clks[IMX6SX_CLK_LCDIF2_PODF] = imx_clk_divider("lcdif2_podf", "lcdif2_pred", base + 0x1c, 20, 3);
- clks[IMX6SX_CLK_PERCLK] = imx_clk_divider("perclk", "perclk_sel", base + 0x1c, 0, 6);
+ clks[IMX6SX_CLK_PERCLK] = imx_clk_divider_flags("perclk", "perclk_sel", base + 0x1c, 0, 6, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_VID_PODF] = imx_clk_divider("vid_podf", "vid_sel", base + 0x20, 24, 2);
clks[IMX6SX_CLK_CAN_PODF] = imx_clk_divider("can_podf", "can_sel", base + 0x20, 2, 6);
clks[IMX6SX_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3);
@@ -380,8 +371,8 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
/* name parent_name reg shift */
/* CCGR0 */
- clks[IMX6SX_CLK_AIPS_TZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0);
- clks[IMX6SX_CLK_AIPS_TZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2);
+ clks[IMX6SX_CLK_AIPS_TZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_AIPS_TZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_APBH_DMA] = imx_clk_gate2("apbh_dma", "usdhc3", base + 0x68, 4);
clks[IMX6SX_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
clks[IMX6SX_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
@@ -394,7 +385,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_podf", base + 0x68, 20);
clks[IMX6SX_CLK_DCIC1] = imx_clk_gate2("dcic1", "display_podf", base + 0x68, 24);
clks[IMX6SX_CLK_DCIC2] = imx_clk_gate2("dcic2", "display_podf", base + 0x68, 26);
- clks[IMX6SX_CLK_AIPS_TZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30);
+ clks[IMX6SX_CLK_AIPS_TZ3] = imx_clk_gate2_flags("aips_tz3", "ahb", base + 0x68, 30, CLK_IS_CRITICAL);
/* CCGR1 */
clks[IMX6SX_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
@@ -407,10 +398,11 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_ESAI_EXTAL] = imx_clk_gate2_shared("esai_extal", "esai_podf", base + 0x6c, 16, &share_count_esai);
clks[IMX6SX_CLK_ESAI_IPG] = imx_clk_gate2_shared("esai_ipg", "ahb", base + 0x6c, 16, &share_count_esai);
clks[IMX6SX_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai);
- clks[IMX6SX_CLK_WAKEUP] = imx_clk_gate2("wakeup", "ipg", base + 0x6c, 18);
+ clks[IMX6SX_CLK_WAKEUP] = imx_clk_gate2_flags("wakeup", "ipg", base + 0x6c, 18, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_GPT_BUS] = imx_clk_gate2("gpt_bus", "perclk", base + 0x6c, 20);
clks[IMX6SX_CLK_GPT_SERIAL] = imx_clk_gate2("gpt_serial", "perclk", base + 0x6c, 22);
clks[IMX6SX_CLK_GPU] = imx_clk_gate2("gpu", "gpu_core_podf", base + 0x6c, 26);
+ clks[IMX6SX_CLK_OCRAM_S] = imx_clk_gate2("ocram_s", "ahb", base + 0x6c, 28);
clks[IMX6SX_CLK_CANFD] = imx_clk_gate2("canfd", "can_podf", base + 0x6c, 30);
/* CCGR2 */
@@ -420,10 +412,10 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
clks[IMX6SX_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
clks[IMX6SX_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif1_podf", base + 0x70, 14);
- clks[IMX6SX_CLK_IPMUX1] = imx_clk_gate2("ipmux1", "ahb", base + 0x70, 16);
- clks[IMX6SX_CLK_IPMUX2] = imx_clk_gate2("ipmux2", "ahb", base + 0x70, 18);
- clks[IMX6SX_CLK_IPMUX3] = imx_clk_gate2("ipmux3", "ahb", base + 0x70, 20);
- clks[IMX6SX_CLK_TZASC1] = imx_clk_gate2("tzasc1", "mmdc_podf", base + 0x70, 22);
+ clks[IMX6SX_CLK_IPMUX1] = imx_clk_gate2_flags("ipmux1", "ahb", base + 0x70, 16, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_IPMUX2] = imx_clk_gate2_flags("ipmux2", "ahb", base + 0x70, 18, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_IPMUX3] = imx_clk_gate2_flags("ipmux3", "ahb", base + 0x70, 20, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_TZASC1] = imx_clk_gate2_flags("tzasc1", "mmdc_podf", base + 0x70, 22, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "display_podf", base + 0x70, 28);
clks[IMX6SX_CLK_PXP_AXI] = imx_clk_gate2("pxp_axi", "display_podf", base + 0x70, 30);
@@ -437,15 +429,15 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_div_sel", base + 0x74, 12);
clks[IMX6SX_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_podf", base + 0x74, 14);
clks[IMX6SX_CLK_MLB] = imx_clk_gate2("mlb", "ahb", base + 0x74, 18);
- clks[IMX6SX_CLK_MMDC_P0_FAST] = imx_clk_gate2("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20);
- clks[IMX6SX_CLK_MMDC_P0_IPG] = imx_clk_gate2("mmdc_p0_ipg", "ipg", base + 0x74, 24);
- clks[IMX6SX_CLK_OCRAM] = imx_clk_gate2("ocram", "ocram_podf", base + 0x74, 28);
+ clks[IMX6SX_CLK_MMDC_P0_FAST] = imx_clk_gate2_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_OCRAM] = imx_clk_gate2_flags("ocram", "ocram_podf", base + 0x74, 28, CLK_IS_CRITICAL);
/* CCGR4 */
clks[IMX6SX_CLK_PCIE_AXI] = imx_clk_gate2("pcie_axi", "display_podf", base + 0x78, 0);
clks[IMX6SX_CLK_QSPI2] = imx_clk_gate2("qspi2", "qspi2_podf", base + 0x78, 10);
clks[IMX6SX_CLK_PER1_BCH] = imx_clk_gate2("per1_bch", "usdhc3", base + 0x78, 12);
- clks[IMX6SX_CLK_PER2_MAIN] = imx_clk_gate2("per2_main", "ahb", base + 0x78, 14);
+ clks[IMX6SX_CLK_PER2_MAIN] = imx_clk_gate2_flags("per2_main", "ahb", base + 0x78, 14, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16);
clks[IMX6SX_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18);
clks[IMX6SX_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20);
@@ -456,7 +448,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
/* CCGR5 */
- clks[IMX6SX_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
+ clks[IMX6SX_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clks[IMX6SX_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
clks[IMX6SX_CLK_AUDIO] = imx_clk_gate2_shared("audio", "audio_podf", base + 0x7c, 14, &share_count_audio);
@@ -502,9 +494,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clk_data.clk_num = ARRAY_SIZE(clks);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clks[clks_init_on[i]]);
-
if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
clk_prepare_enable(clks[IMX6SX_CLK_USBPHY1_GATE]);
clk_prepare_enable(clks[IMX6SX_CLK_USBPHY2_GATE]);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index ba563ba50b40..361b43f9742e 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -79,12 +79,6 @@ static const char *cko_sels[] = { "cko1", "cko2", };
static struct clk *clks[IMX6UL_CLK_END];
static struct clk_onecell_data clk_data;
-static int const clks_init_on[] __initconst = {
- IMX6UL_CLK_AIPSTZ1, IMX6UL_CLK_AIPSTZ2,
- IMX6UL_CLK_AXI, IMX6UL_CLK_ARM, IMX6UL_CLK_ROM,
- IMX6UL_CLK_MMDC_P0_FAST, IMX6UL_CLK_MMDC_P0_IPG,
-};
-
static const struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
@@ -129,7 +123,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
- int i;
clks[IMX6UL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -142,6 +135,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
WARN_ON(!base);
clks[IMX6UL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
@@ -336,8 +330,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
/* CCGR0 */
- clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0);
- clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2);
+ clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL);
+ clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL);
clks[IMX6UL_CLK_APBHDMA] = imx_clk_gate2("apbh_dma", "bch_podf", base + 0x68, 4);
clks[IMX6UL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
clks[IMX6UL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
@@ -360,6 +354,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
if (clk_on_imx6ull())
clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x80, 18);
+ clks[IMX6UL_CLK_GPIO2] = imx_clk_gate2("gpio2", "ipg", base + 0x68, 30);
/* CCGR1 */
clks[IMX6UL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
@@ -376,6 +371,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_GPT1_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
clks[IMX6UL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
clks[IMX6UL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serial", "uart_podf", base + 0x6c, 24);
+ clks[IMX6UL_CLK_GPIO1] = imx_clk_gate2("gpio1", "ipg", base + 0x6c, 26);
+ clks[IMX6UL_CLK_GPIO5] = imx_clk_gate2("gpio5", "ipg", base + 0x6c, 30);
/* CCGR2 */
if (clk_on_imx6ull()) {
@@ -389,6 +386,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
clks[IMX6UL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
clks[IMX6UL_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif_podf", base + 0x70, 14);
+ clks[IMX6UL_CLK_GPIO3] = imx_clk_gate2("gpio3", "ipg", base + 0x70, 26);
clks[IMX6UL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28);
clks[IMX6UL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30);
@@ -405,11 +403,12 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_UART6_IPG] = imx_clk_gate2("uart6_ipg", "ipg", base + 0x74, 6);
clks[IMX6UL_CLK_UART6_SERIAL] = imx_clk_gate2("uart6_serial", "uart_podf", base + 0x74, 6);
clks[IMX6UL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
+ clks[IMX6UL_CLK_GPIO4] = imx_clk_gate2("gpio4", "ipg", base + 0x74, 12);
clks[IMX6UL_CLK_QSPI] = imx_clk_gate2("qspi1", "qspi1_podf", base + 0x74, 14);
clks[IMX6UL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16);
- clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20);
- clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2("mmdc_p0_ipg", "ipg", base + 0x74, 24);
- clks[IMX6UL_CLK_AXI] = imx_clk_gate("axi", "axi_podf", base + 0x74, 28);
+ clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
+ clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
+ clks[IMX6UL_CLK_AXI] = imx_clk_gate_flags("axi", "axi_podf", base + 0x74, 28, CLK_IS_CRITICAL);
/* CCGR4 */
clks[IMX6UL_CLK_PER_BCH] = imx_clk_gate2("per_bch", "bch_podf", base + 0x78, 12);
@@ -423,7 +422,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "bch_podf", base + 0x78, 30);
/* CCGR5 */
- clks[IMX6UL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
+ clks[IMX6UL_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
clks[IMX6UL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clks[IMX6UL_CLK_KPP] = imx_clk_gate2("kpp", "ipg", base + 0x7c, 8);
clks[IMX6UL_CLK_WDOG2] = imx_clk_gate2("wdog2", "ipg", base + 0x7c, 10);
@@ -497,10 +496,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_rate(clks[IMX6UL_CLK_ENET2_REF], 50000000);
clk_set_rate(clks[IMX6UL_CLK_CSI], 24000000);
- /* keep all the clks on just for bringup */
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clks[clks_init_on[i]]);
-
if (clk_on_imx6ull())
clk_prepare_enable(clks[IMX6UL_CLK_AIPSTZ3]);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 27217a7ea17e..881b772c4ac9 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -797,6 +797,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate4("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0);
clks[IMX7D_OCOTP_CLK] = imx_clk_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0);
clks[IMX7D_SNVS_CLK] = imx_clk_gate4("snvs_clk", "ipg_root_clk", base + 0x4250, 0);
+ clks[IMX7D_MU_ROOT_CLK] = imx_clk_gate4("mu_root_clk", "ipg_root_clk", base + 0x4270, 0);
clks[IMX7D_CAAM_CLK] = imx_clk_gate4("caam_clk", "ipg_root_clk", base + 0x4240, 0);
clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4690, 0);
clks[IMX7D_SDMA_CORE_CLK] = imx_clk_gate4("sdma_root_clk", "ahb_root_clk", base + 0x4480, 0);
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 32fcc75f6f77..4479c102e899 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -134,7 +134,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
"i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 31, 1 },
- .div = { CGU_REG_I2SCDR, 0, 1, 8, -1, -1, -1 },
+ .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 },
.gate = { CGU_REG_CLKGR, 6 },
},
@@ -161,7 +161,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
},
[JZ4740_CLK_UDC] = {
- "udc", CGU_CLK_MUX | CGU_CLK_DIV,
+ "udc", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 29, 1 },
.div = { CGU_REG_CPCCR, 23, 1, 6, -1, -1, -1 },
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 815659eebea3..efaa70f682b4 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -1,13 +1,19 @@
config COMMON_CLK_AMLOGIC
bool
- depends on OF
depends on ARCH_MESON || COMPILE_TEST
+ select COMMON_CLK_REGMAP_MESON
+
+config COMMON_CLK_AMLOGIC_AUDIO
+ bool
+ depends on ARCH_MESON || COMPILE_TEST
+ select COMMON_CLK_AMLOGIC
config COMMON_CLK_MESON_AO
bool
depends on OF
depends on ARCH_MESON || COMPILE_TEST
select COMMON_CLK_REGMAP_MESON
+ select RESET_CONTROLLER
config COMMON_CLK_REGMAP_MESON
bool
@@ -15,9 +21,8 @@ config COMMON_CLK_REGMAP_MESON
config COMMON_CLK_MESON8B
bool
- depends on COMMON_CLK_AMLOGIC
+ select COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
- select COMMON_CLK_REGMAP_MESON
help
Support for the clock controller on AmLogic S802 (Meson8),
S805 (Meson8b) and S812 (Meson8m2) devices. Say Y if you
@@ -25,10 +30,8 @@ config COMMON_CLK_MESON8B
config COMMON_CLK_GXBB
bool
- depends on COMMON_CLK_AMLOGIC
- select RESET_CONTROLLER
+ select COMMON_CLK_AMLOGIC
select COMMON_CLK_MESON_AO
- select COMMON_CLK_REGMAP_MESON
select MFD_SYSCON
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
@@ -36,11 +39,18 @@ config COMMON_CLK_GXBB
config COMMON_CLK_AXG
bool
- depends on COMMON_CLK_AMLOGIC
- select RESET_CONTROLLER
+ select COMMON_CLK_AMLOGIC
select COMMON_CLK_MESON_AO
- select COMMON_CLK_REGMAP_MESON
select MFD_SYSCON
help
Support for the clock controller on AmLogic A113D devices, aka axg.
Say Y if you want peripherals and CPU frequency scaling to work.
+
+config COMMON_CLK_AXG_AUDIO
+ tristate "Meson AXG Audio Clock Controller Driver"
+ depends on COMMON_CLK_AXG
+ select COMMON_CLK_AMLOGIC_AUDIO
+ select MFD_SYSCON
+ help
+ Support for the audio clock controller on AmLogic A113D devices,
+ aka axg, Say Y if you want audio subsystem to work.
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index d0d13aeb369a..72ec8c40d848 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -2,9 +2,11 @@
# Makefile for Meson specific clk
#
-obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-audio-divider.o
+obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-phase.o
+obj-$(CONFIG_COMMON_CLK_AMLOGIC_AUDIO) += clk-triphase.o sclk-div.o
obj-$(CONFIG_COMMON_CLK_MESON_AO) += meson-aoclk.o
obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-32k.o
obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
+obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
obj-$(CONFIG_COMMON_CLK_REGMAP_MESON) += clk-regmap.o
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
new file mode 100644
index 000000000000..a0ed41e73bde
--- /dev/null
+++ b/drivers/clk/meson/axg-audio.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "clkc-audio.h"
+#include "axg-audio.h"
+
+#define AXG_MST_IN_COUNT 8
+#define AXG_SLV_SCLK_COUNT 10
+#define AXG_SLV_LRCLK_COUNT 10
+
+#define AXG_AUD_GATE(_name, _reg, _bit, _pname, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct clk_regmap_gate_data){ \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_"#_name, \
+ .ops = &clk_regmap_gate_ops, \
+ .parent_names = (const char *[]){ _pname }, \
+ .num_parents = 1, \
+ .flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
+ }, \
+}
+
+#define AXG_AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pnames, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct clk_regmap_mux_data){ \
+ .offset = (_reg), \
+ .mask = (_mask), \
+ .shift = (_shift), \
+ .flags = (_dflags), \
+ }, \
+ .hw.init = &(struct clk_init_data){ \
+ .name = "axg_"#_name, \
+ .ops = &clk_regmap_mux_ops, \
+ .parent_names = (_pnames), \
+ .num_parents = ARRAY_SIZE(_pnames), \
+ .flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
+ }, \
+}
+
+#define AXG_AUD_DIV(_name, _reg, _shift, _width, _dflags, _pname, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct clk_regmap_div_data){ \
+ .offset = (_reg), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .flags = (_dflags), \
+ }, \
+ .hw.init = &(struct clk_init_data){ \
+ .name = "axg_"#_name, \
+ .ops = &clk_regmap_divider_ops, \
+ .parent_names = (const char *[]) { _pname }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define AXG_PCLK_GATE(_name, _bit) \
+ AXG_AUD_GATE(_name, AUDIO_CLK_GATE_EN, _bit, "axg_audio_pclk", 0)
+
+/* Audio peripheral clocks */
+static AXG_PCLK_GATE(ddr_arb, 0);
+static AXG_PCLK_GATE(pdm, 1);
+static AXG_PCLK_GATE(tdmin_a, 2);
+static AXG_PCLK_GATE(tdmin_b, 3);
+static AXG_PCLK_GATE(tdmin_c, 4);
+static AXG_PCLK_GATE(tdmin_lb, 5);
+static AXG_PCLK_GATE(tdmout_a, 6);
+static AXG_PCLK_GATE(tdmout_b, 7);
+static AXG_PCLK_GATE(tdmout_c, 8);
+static AXG_PCLK_GATE(frddr_a, 9);
+static AXG_PCLK_GATE(frddr_b, 10);
+static AXG_PCLK_GATE(frddr_c, 11);
+static AXG_PCLK_GATE(toddr_a, 12);
+static AXG_PCLK_GATE(toddr_b, 13);
+static AXG_PCLK_GATE(toddr_c, 14);
+static AXG_PCLK_GATE(loopback, 15);
+static AXG_PCLK_GATE(spdifin, 16);
+static AXG_PCLK_GATE(spdifout, 17);
+static AXG_PCLK_GATE(resample, 18);
+static AXG_PCLK_GATE(power_detect, 19);
+
+/* Audio Master Clocks */
+static const char * const mst_mux_parent_names[] = {
+ "axg_mst_in0", "axg_mst_in1", "axg_mst_in2", "axg_mst_in3",
+ "axg_mst_in4", "axg_mst_in5", "axg_mst_in6", "axg_mst_in7",
+};
+
+#define AXG_MST_MCLK_MUX(_name, _reg) \
+ AXG_AUD_MUX(_name##_sel, _reg, 0x7, 24, CLK_MUX_ROUND_CLOSEST, \
+ mst_mux_parent_names, CLK_SET_RATE_PARENT)
+
+static AXG_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AXG_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AXG_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AXG_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AXG_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AXG_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AXG_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AXG_MST_MCLK_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AXG_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AXG_MST_MCLK_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+
+#define AXG_MST_MCLK_DIV(_name, _reg) \
+ AXG_AUD_DIV(_name##_div, _reg, 0, 16, CLK_DIVIDER_ROUND_CLOSEST, \
+ "axg_"#_name"_sel", CLK_SET_RATE_PARENT) \
+
+static AXG_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AXG_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AXG_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AXG_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AXG_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AXG_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AXG_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AXG_MST_MCLK_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AXG_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AXG_MST_MCLK_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+
+#define AXG_MST_MCLK_GATE(_name, _reg) \
+ AXG_AUD_GATE(_name, _reg, 31, "axg_"#_name"_div", \
+ CLK_SET_RATE_PARENT)
+
+static AXG_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AXG_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AXG_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AXG_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AXG_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AXG_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AXG_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AXG_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AXG_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AXG_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+
+/* Sample Clocks */
+#define AXG_MST_SCLK_PRE_EN(_name, _reg) \
+ AXG_AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \
+ "axg_mst_"#_name"_mclk", 0)
+
+static AXG_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AXG_AUD_SCLK_DIV(_name, _reg, _div_shift, _div_width, \
+ _hi_shift, _hi_width, _pname, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct meson_sclk_div_data) { \
+ .div = { \
+ .reg_off = (_reg), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ }, \
+ .hi = { \
+ .reg_off = (_reg), \
+ .shift = (_hi_shift), \
+ .width = (_hi_width), \
+ }, \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_"#_name, \
+ .ops = &meson_sclk_div_ops, \
+ .parent_names = (const char *[]) { _pname }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define AXG_MST_SCLK_DIV(_name, _reg) \
+ AXG_AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \
+ "axg_mst_"#_name"_sclk_pre_en", \
+ CLK_SET_RATE_PARENT)
+
+static AXG_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AXG_MST_SCLK_POST_EN(_name, _reg) \
+ AXG_AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
+ "axg_mst_"#_name"_sclk_div", CLK_SET_RATE_PARENT)
+
+static AXG_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AXG_AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \
+ _pname, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct meson_clk_triphase_data) { \
+ .ph0 = { \
+ .reg_off = (_reg), \
+ .shift = (_shift0), \
+ .width = (_width), \
+ }, \
+ .ph1 = { \
+ .reg_off = (_reg), \
+ .shift = (_shift1), \
+ .width = (_width), \
+ }, \
+ .ph2 = { \
+ .reg_off = (_reg), \
+ .shift = (_shift2), \
+ .width = (_width), \
+ }, \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_"#_name, \
+ .ops = &meson_clk_triphase_ops, \
+ .parent_names = (const char *[]) { _pname }, \
+ .num_parents = 1, \
+ .flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
+ }, \
+}
+
+#define AXG_MST_SCLK(_name, _reg) \
+ AXG_AUD_TRIPHASE(mst_##_name##_sclk, _reg, 1, 0, 2, 4, \
+ "axg_mst_"#_name"_sclk_post_en", CLK_SET_RATE_PARENT)
+
+static AXG_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static AXG_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static AXG_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static AXG_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static AXG_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static AXG_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+
+#define AXG_MST_LRCLK_DIV(_name, _reg) \
+ AXG_AUD_SCLK_DIV(mst_##_name##_lrclk_div, _reg, 0, 10, 10, 10, \
+ "axg_mst_"#_name"_sclk_post_en", 0) \
+
+static AXG_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AXG_MST_LRCLK(_name, _reg) \
+ AXG_AUD_TRIPHASE(mst_##_name##_lrclk, _reg, 1, 1, 3, 5, \
+ "axg_mst_"#_name"_lrclk_div", CLK_SET_RATE_PARENT)
+
+static AXG_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static AXG_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static AXG_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static AXG_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static AXG_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static AXG_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+
+static const char * const tdm_sclk_parent_names[] = {
+ "axg_mst_a_sclk", "axg_mst_b_sclk", "axg_mst_c_sclk",
+ "axg_mst_d_sclk", "axg_mst_e_sclk", "axg_mst_f_sclk",
+ "axg_slv_sclk0", "axg_slv_sclk1", "axg_slv_sclk2",
+ "axg_slv_sclk3", "axg_slv_sclk4", "axg_slv_sclk5",
+ "axg_slv_sclk6", "axg_slv_sclk7", "axg_slv_sclk8",
+ "axg_slv_sclk9"
+};
+
+#define AXG_TDM_SCLK_MUX(_name, _reg) \
+ AXG_AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \
+ CLK_MUX_ROUND_CLOSEST, \
+ tdm_sclk_parent_names, 0)
+
+static AXG_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AXG_TDM_SCLK_PRE_EN(_name, _reg) \
+ AXG_AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \
+ "axg_tdm"#_name"_sclk_sel", CLK_SET_RATE_PARENT)
+
+static AXG_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AXG_TDM_SCLK_POST_EN(_name, _reg) \
+ AXG_AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \
+ "axg_tdm"#_name"_sclk_pre_en", CLK_SET_RATE_PARENT)
+
+static AXG_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AXG_TDM_SCLK(_name, _reg) \
+ struct clk_regmap axg_tdm##_name##_sclk = { \
+ .data = &(struct meson_clk_phase_data) { \
+ .ph = { \
+ .reg_off = (_reg), \
+ .shift = 29, \
+ .width = 1, \
+ }, \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_tdm"#_name"_sclk", \
+ .ops = &meson_clk_phase_ops, \
+ .parent_names = (const char *[]) \
+ { "axg_tdm"#_name"_sclk_post_en" }, \
+ .num_parents = 1, \
+ .flags = CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT, \
+ }, \
+}
+
+static AXG_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+static const char * const tdm_lrclk_parent_names[] = {
+ "axg_mst_a_lrclk", "axg_mst_b_lrclk", "axg_mst_c_lrclk",
+ "axg_mst_d_lrclk", "axg_mst_e_lrclk", "axg_mst_f_lrclk",
+ "axg_slv_lrclk0", "axg_slv_lrclk1", "axg_slv_lrclk2",
+ "axg_slv_lrclk3", "axg_slv_lrclk4", "axg_slv_lrclk5",
+ "axg_slv_lrclk6", "axg_slv_lrclk7", "axg_slv_lrclk8",
+ "axg_slv_lrclk9"
+};
+
+#define AXG_TDM_LRLCK(_name, _reg) \
+ AXG_AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
+ CLK_MUX_ROUND_CLOSEST, \
+ tdm_lrclk_parent_names, 0)
+
+static AXG_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+/*
+ * Array of all clocks provided by this provider
+ * The input clocks of the controller will be populated at runtime
+ */
+static struct clk_hw_onecell_data axg_audio_hw_onecell_data = {
+ .hws = {
+ [AUD_CLKID_DDR_ARB] = &axg_ddr_arb.hw,
+ [AUD_CLKID_PDM] = &axg_pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &axg_tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &axg_tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &axg_tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &axg_tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &axg_tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &axg_tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &axg_tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &axg_frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &axg_frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &axg_frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &axg_toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &axg_toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &axg_toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &axg_loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &axg_spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &axg_spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &axg_resample.hw,
+ [AUD_CLKID_POWER_DETECT] = &axg_power_detect.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &axg_mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &axg_mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &axg_mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &axg_mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &axg_mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &axg_mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &axg_mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &axg_mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &axg_mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &axg_mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &axg_mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &axg_mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &axg_mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &axg_mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &axg_mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &axg_mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &axg_mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &axg_mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &axg_spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &axg_spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &axg_spdifout_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &axg_spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &axg_spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &axg_spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &axg_pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &axg_pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &axg_pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &axg_pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &axg_pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &axg_pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &axg_mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &axg_mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &axg_mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &axg_mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &axg_mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &axg_mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &axg_mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &axg_mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &axg_mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &axg_mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &axg_mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &axg_mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &axg_mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &axg_mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &axg_mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &axg_mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &axg_mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &axg_mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &axg_mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &axg_mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &axg_mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &axg_mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &axg_mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &axg_mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &axg_mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &axg_mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &axg_mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &axg_mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &axg_mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &axg_mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &axg_mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &axg_mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &axg_mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &axg_mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &axg_mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &axg_mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &axg_tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &axg_tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &axg_tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &axg_tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &axg_tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &axg_tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &axg_tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &axg_tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &axg_tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &axg_tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &axg_tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &axg_tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &axg_tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &axg_tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &axg_tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &axg_tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &axg_tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &axg_tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &axg_tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &axg_tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &axg_tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &axg_tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &axg_tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &axg_tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &axg_tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &axg_tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &axg_tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &axg_tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &axg_tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &axg_tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &axg_tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &axg_tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &axg_tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &axg_tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &axg_tdmout_c_lrclk.hw,
+ [NR_CLKS] = NULL,
+ },
+ .num = NR_CLKS,
+};
+
+/* Convenience table to populate regmap in .probe() */
+static struct clk_regmap *const axg_audio_clk_regmaps[] = {
+ &axg_ddr_arb,
+ &axg_pdm,
+ &axg_tdmin_a,
+ &axg_tdmin_b,
+ &axg_tdmin_c,
+ &axg_tdmin_lb,
+ &axg_tdmout_a,
+ &axg_tdmout_b,
+ &axg_tdmout_c,
+ &axg_frddr_a,
+ &axg_frddr_b,
+ &axg_frddr_c,
+ &axg_toddr_a,
+ &axg_toddr_b,
+ &axg_toddr_c,
+ &axg_loopback,
+ &axg_spdifin,
+ &axg_spdifout,
+ &axg_resample,
+ &axg_power_detect,
+ &axg_mst_a_mclk_sel,
+ &axg_mst_b_mclk_sel,
+ &axg_mst_c_mclk_sel,
+ &axg_mst_d_mclk_sel,
+ &axg_mst_e_mclk_sel,
+ &axg_mst_f_mclk_sel,
+ &axg_mst_a_mclk_div,
+ &axg_mst_b_mclk_div,
+ &axg_mst_c_mclk_div,
+ &axg_mst_d_mclk_div,
+ &axg_mst_e_mclk_div,
+ &axg_mst_f_mclk_div,
+ &axg_mst_a_mclk,
+ &axg_mst_b_mclk,
+ &axg_mst_c_mclk,
+ &axg_mst_d_mclk,
+ &axg_mst_e_mclk,
+ &axg_mst_f_mclk,
+ &axg_spdifout_clk_sel,
+ &axg_spdifout_clk_div,
+ &axg_spdifout_clk,
+ &axg_spdifin_clk_sel,
+ &axg_spdifin_clk_div,
+ &axg_spdifin_clk,
+ &axg_pdm_dclk_sel,
+ &axg_pdm_dclk_div,
+ &axg_pdm_dclk,
+ &axg_pdm_sysclk_sel,
+ &axg_pdm_sysclk_div,
+ &axg_pdm_sysclk,
+ &axg_mst_a_sclk_pre_en,
+ &axg_mst_b_sclk_pre_en,
+ &axg_mst_c_sclk_pre_en,
+ &axg_mst_d_sclk_pre_en,
+ &axg_mst_e_sclk_pre_en,
+ &axg_mst_f_sclk_pre_en,
+ &axg_mst_a_sclk_div,
+ &axg_mst_b_sclk_div,
+ &axg_mst_c_sclk_div,
+ &axg_mst_d_sclk_div,
+ &axg_mst_e_sclk_div,
+ &axg_mst_f_sclk_div,
+ &axg_mst_a_sclk_post_en,
+ &axg_mst_b_sclk_post_en,
+ &axg_mst_c_sclk_post_en,
+ &axg_mst_d_sclk_post_en,
+ &axg_mst_e_sclk_post_en,
+ &axg_mst_f_sclk_post_en,
+ &axg_mst_a_sclk,
+ &axg_mst_b_sclk,
+ &axg_mst_c_sclk,
+ &axg_mst_d_sclk,
+ &axg_mst_e_sclk,
+ &axg_mst_f_sclk,
+ &axg_mst_a_lrclk_div,
+ &axg_mst_b_lrclk_div,
+ &axg_mst_c_lrclk_div,
+ &axg_mst_d_lrclk_div,
+ &axg_mst_e_lrclk_div,
+ &axg_mst_f_lrclk_div,
+ &axg_mst_a_lrclk,
+ &axg_mst_b_lrclk,
+ &axg_mst_c_lrclk,
+ &axg_mst_d_lrclk,
+ &axg_mst_e_lrclk,
+ &axg_mst_f_lrclk,
+ &axg_tdmin_a_sclk_sel,
+ &axg_tdmin_b_sclk_sel,
+ &axg_tdmin_c_sclk_sel,
+ &axg_tdmin_lb_sclk_sel,
+ &axg_tdmout_a_sclk_sel,
+ &axg_tdmout_b_sclk_sel,
+ &axg_tdmout_c_sclk_sel,
+ &axg_tdmin_a_sclk_pre_en,
+ &axg_tdmin_b_sclk_pre_en,
+ &axg_tdmin_c_sclk_pre_en,
+ &axg_tdmin_lb_sclk_pre_en,
+ &axg_tdmout_a_sclk_pre_en,
+ &axg_tdmout_b_sclk_pre_en,
+ &axg_tdmout_c_sclk_pre_en,
+ &axg_tdmin_a_sclk_post_en,
+ &axg_tdmin_b_sclk_post_en,
+ &axg_tdmin_c_sclk_post_en,
+ &axg_tdmin_lb_sclk_post_en,
+ &axg_tdmout_a_sclk_post_en,
+ &axg_tdmout_b_sclk_post_en,
+ &axg_tdmout_c_sclk_post_en,
+ &axg_tdmin_a_sclk,
+ &axg_tdmin_b_sclk,
+ &axg_tdmin_c_sclk,
+ &axg_tdmin_lb_sclk,
+ &axg_tdmout_a_sclk,
+ &axg_tdmout_b_sclk,
+ &axg_tdmout_c_sclk,
+ &axg_tdmin_a_lrclk,
+ &axg_tdmin_b_lrclk,
+ &axg_tdmin_c_lrclk,
+ &axg_tdmin_lb_lrclk,
+ &axg_tdmout_a_lrclk,
+ &axg_tdmout_b_lrclk,
+ &axg_tdmout_c_lrclk,
+};
+
+static struct clk *devm_clk_get_enable(struct device *dev, char *id)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get %s", id);
+ return clk;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "failed to enable %s", id);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
+ if (ret) {
+ dev_err(dev, "failed to add reset action on %s", id);
+ return ERR_PTR(ret);
+ }
+
+ return clk;
+}
+
+static const struct clk_ops axg_clk_no_ops = {};
+
+static struct clk_hw *axg_clk_hw_register_bypass(struct device *dev,
+ const char *name,
+ const char *parent_name)
+{
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ char *clk_name;
+ int ret;
+
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
+
+ clk_name = kasprintf(GFP_KERNEL, "axg_%s", name);
+ if (!clk_name)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = clk_name;
+ init.ops = &axg_clk_no_ops;
+ init.flags = 0;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, hw);
+ kfree(clk_name);
+
+ return ret ? ERR_PTR(ret) : hw;
+}
+
+static int axg_register_clk_hw_input(struct device *dev,
+ const char *name,
+ unsigned int clkid)
+{
+ struct clk *parent_clk = devm_clk_get(dev, name);
+ struct clk_hw *hw = NULL;
+
+ if (IS_ERR(parent_clk)) {
+ int err = PTR_ERR(parent_clk);
+
+ /* It is ok if an input clock is missing */
+ if (err == -ENOENT) {
+ dev_dbg(dev, "%s not provided", name);
+ } else {
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to get %s clock", name);
+ return err;
+ }
+ } else {
+ hw = axg_clk_hw_register_bypass(dev, name,
+ __clk_get_name(parent_clk));
+ }
+
+ if (IS_ERR(hw)) {
+ dev_err(dev, "failed to register %s clock", name);
+ return PTR_ERR(hw);
+ }
+
+ axg_audio_hw_onecell_data.hws[clkid] = hw;
+ return 0;
+}
+
+static int axg_register_clk_hw_inputs(struct device *dev,
+ const char *basename,
+ unsigned int count,
+ unsigned int clkid)
+{
+ char *name;
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ name = kasprintf(GFP_KERNEL, "%s%d", basename, i);
+ if (!name)
+ return -ENOMEM;
+
+ ret = axg_register_clk_hw_input(dev, name, clkid + i);
+ kfree(name);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config axg_audio_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = AUDIO_CLK_PDMIN_CTRL1,
+};
+
+static int axg_audio_clkc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regmap *map;
+ struct resource *res;
+ void __iomem *regs;
+ struct clk *clk;
+ struct clk_hw *hw;
+ int ret, i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ map = devm_regmap_init_mmio(dev, regs, &axg_audio_regmap_cfg);
+ if (IS_ERR(map)) {
+ dev_err(dev, "failed to init regmap: %ld\n", PTR_ERR(map));
+ return PTR_ERR(map);
+ }
+
+ /* Get the mandatory peripheral clock */
+ clk = devm_clk_get_enable(dev, "pclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = device_reset(dev);
+ if (ret) {
+ dev_err(dev, "failed to reset device\n");
+ return ret;
+ }
+
+ /* Register the peripheral input clock */
+ hw = axg_clk_hw_register_bypass(dev, "audio_pclk",
+ __clk_get_name(clk));
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ axg_audio_hw_onecell_data.hws[AUD_CLKID_PCLK] = hw;
+
+ /* Register optional input master clocks */
+ ret = axg_register_clk_hw_inputs(dev, "mst_in",
+ AXG_MST_IN_COUNT,
+ AUD_CLKID_MST0);
+ if (ret)
+ return ret;
+
+ /* Register optional input slave sclks */
+ ret = axg_register_clk_hw_inputs(dev, "slv_sclk",
+ AXG_SLV_SCLK_COUNT,
+ AUD_CLKID_SLV_SCLK0);
+ if (ret)
+ return ret;
+
+ /* Register optional input slave lrclks */
+ ret = axg_register_clk_hw_inputs(dev, "slv_lrclk",
+ AXG_SLV_LRCLK_COUNT,
+ AUD_CLKID_SLV_LRCLK0);
+ if (ret)
+ return ret;
+
+ /* Populate regmap for the regmap backed clocks */
+ for (i = 0; i < ARRAY_SIZE(axg_audio_clk_regmaps); i++)
+ axg_audio_clk_regmaps[i]->map = map;
+
+ /* Take care to skip the registered input clocks */
+ for (i = AUD_CLKID_DDR_ARB; i < axg_audio_hw_onecell_data.num; i++) {
+ hw = axg_audio_hw_onecell_data.hws[i];
+ /* array might be sparse */
+ if (!hw)
+ continue;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "failed to register clock %s\n",
+ hw->init->name);
+ return ret;
+ }
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &axg_audio_hw_onecell_data);
+}
+
+static const struct of_device_id clkc_match_table[] = {
+ { .compatible = "amlogic,axg-audio-clkc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, clkc_match_table);
+
+static struct platform_driver axg_audio_driver = {
+ .probe = axg_audio_clkc_probe,
+ .driver = {
+ .name = "axg-audio-clkc",
+ .of_match_table = clkc_match_table,
+ },
+};
+module_platform_driver(axg_audio_driver);
+
+MODULE_DESCRIPTION("Amlogic A113x Audio Clock driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h
new file mode 100644
index 000000000000..7191b39c9d65
--- /dev/null
+++ b/drivers/clk/meson/axg-audio.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __AXG_AUDIO_CLKC_H
+#define __AXG_AUDIO_CLKC_H
+
+/*
+ * Audio Clock register offsets
+ *
+ * Register offsets from the datasheet must be multiplied by 4 before
+ * to get the right offset
+ */
+#define AUDIO_CLK_GATE_EN 0x000
+#define AUDIO_MCLK_A_CTRL 0x004
+#define AUDIO_MCLK_B_CTRL 0x008
+#define AUDIO_MCLK_C_CTRL 0x00C
+#define AUDIO_MCLK_D_CTRL 0x010
+#define AUDIO_MCLK_E_CTRL 0x014
+#define AUDIO_MCLK_F_CTRL 0x018
+#define AUDIO_MST_A_SCLK_CTRL0 0x040
+#define AUDIO_MST_A_SCLK_CTRL1 0x044
+#define AUDIO_MST_B_SCLK_CTRL0 0x048
+#define AUDIO_MST_B_SCLK_CTRL1 0x04C
+#define AUDIO_MST_C_SCLK_CTRL0 0x050
+#define AUDIO_MST_C_SCLK_CTRL1 0x054
+#define AUDIO_MST_D_SCLK_CTRL0 0x058
+#define AUDIO_MST_D_SCLK_CTRL1 0x05C
+#define AUDIO_MST_E_SCLK_CTRL0 0x060
+#define AUDIO_MST_E_SCLK_CTRL1 0x064
+#define AUDIO_MST_F_SCLK_CTRL0 0x068
+#define AUDIO_MST_F_SCLK_CTRL1 0x06C
+#define AUDIO_CLK_TDMIN_A_CTRL 0x080
+#define AUDIO_CLK_TDMIN_B_CTRL 0x084
+#define AUDIO_CLK_TDMIN_C_CTRL 0x088
+#define AUDIO_CLK_TDMIN_LB_CTRL 0x08C
+#define AUDIO_CLK_TDMOUT_A_CTRL 0x090
+#define AUDIO_CLK_TDMOUT_B_CTRL 0x094
+#define AUDIO_CLK_TDMOUT_C_CTRL 0x098
+#define AUDIO_CLK_SPDIFIN_CTRL 0x09C
+#define AUDIO_CLK_SPDIFOUT_CTRL 0x0A0
+#define AUDIO_CLK_RESAMPLE_CTRL 0x0A4
+#define AUDIO_CLK_LOCKER_CTRL 0x0A8
+#define AUDIO_CLK_PDMIN_CTRL0 0x0AC
+#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
+
+/*
+ * CLKID index values
+ * These indices are entirely contrived and do not map onto the hardware.
+ */
+
+#define AUD_CLKID_PCLK 0
+#define AUD_CLKID_MST0 1
+#define AUD_CLKID_MST1 2
+#define AUD_CLKID_MST2 3
+#define AUD_CLKID_MST3 4
+#define AUD_CLKID_MST4 5
+#define AUD_CLKID_MST5 6
+#define AUD_CLKID_MST6 7
+#define AUD_CLKID_MST7 8
+#define AUD_CLKID_MST_A_MCLK_SEL 59
+#define AUD_CLKID_MST_B_MCLK_SEL 60
+#define AUD_CLKID_MST_C_MCLK_SEL 61
+#define AUD_CLKID_MST_D_MCLK_SEL 62
+#define AUD_CLKID_MST_E_MCLK_SEL 63
+#define AUD_CLKID_MST_F_MCLK_SEL 64
+#define AUD_CLKID_MST_A_MCLK_DIV 65
+#define AUD_CLKID_MST_B_MCLK_DIV 66
+#define AUD_CLKID_MST_C_MCLK_DIV 67
+#define AUD_CLKID_MST_D_MCLK_DIV 68
+#define AUD_CLKID_MST_E_MCLK_DIV 69
+#define AUD_CLKID_MST_F_MCLK_DIV 70
+#define AUD_CLKID_SPDIFOUT_CLK_SEL 71
+#define AUD_CLKID_SPDIFOUT_CLK_DIV 72
+#define AUD_CLKID_SPDIFIN_CLK_SEL 73
+#define AUD_CLKID_SPDIFIN_CLK_DIV 74
+#define AUD_CLKID_PDM_DCLK_SEL 75
+#define AUD_CLKID_PDM_DCLK_DIV 76
+#define AUD_CLKID_PDM_SYSCLK_SEL 77
+#define AUD_CLKID_PDM_SYSCLK_DIV 78
+#define AUD_CLKID_MST_A_SCLK_PRE_EN 92
+#define AUD_CLKID_MST_B_SCLK_PRE_EN 93
+#define AUD_CLKID_MST_C_SCLK_PRE_EN 94
+#define AUD_CLKID_MST_D_SCLK_PRE_EN 95
+#define AUD_CLKID_MST_E_SCLK_PRE_EN 96
+#define AUD_CLKID_MST_F_SCLK_PRE_EN 97
+#define AUD_CLKID_MST_A_SCLK_DIV 98
+#define AUD_CLKID_MST_B_SCLK_DIV 99
+#define AUD_CLKID_MST_C_SCLK_DIV 100
+#define AUD_CLKID_MST_D_SCLK_DIV 101
+#define AUD_CLKID_MST_E_SCLK_DIV 102
+#define AUD_CLKID_MST_F_SCLK_DIV 103
+#define AUD_CLKID_MST_A_SCLK_POST_EN 104
+#define AUD_CLKID_MST_B_SCLK_POST_EN 105
+#define AUD_CLKID_MST_C_SCLK_POST_EN 106
+#define AUD_CLKID_MST_D_SCLK_POST_EN 107
+#define AUD_CLKID_MST_E_SCLK_POST_EN 108
+#define AUD_CLKID_MST_F_SCLK_POST_EN 109
+#define AUD_CLKID_MST_A_LRCLK_DIV 110
+#define AUD_CLKID_MST_B_LRCLK_DIV 111
+#define AUD_CLKID_MST_C_LRCLK_DIV 112
+#define AUD_CLKID_MST_D_LRCLK_DIV 113
+#define AUD_CLKID_MST_E_LRCLK_DIV 114
+#define AUD_CLKID_MST_F_LRCLK_DIV 115
+#define AUD_CLKID_TDMIN_A_SCLK_PRE_EN 137
+#define AUD_CLKID_TDMIN_B_SCLK_PRE_EN 138
+#define AUD_CLKID_TDMIN_C_SCLK_PRE_EN 139
+#define AUD_CLKID_TDMIN_LB_SCLK_PRE_EN 140
+#define AUD_CLKID_TDMOUT_A_SCLK_PRE_EN 141
+#define AUD_CLKID_TDMOUT_B_SCLK_PRE_EN 142
+#define AUD_CLKID_TDMOUT_C_SCLK_PRE_EN 143
+#define AUD_CLKID_TDMIN_A_SCLK_POST_EN 144
+#define AUD_CLKID_TDMIN_B_SCLK_POST_EN 145
+#define AUD_CLKID_TDMIN_C_SCLK_POST_EN 146
+#define AUD_CLKID_TDMIN_LB_SCLK_POST_EN 147
+#define AUD_CLKID_TDMOUT_A_SCLK_POST_EN 148
+#define AUD_CLKID_TDMOUT_B_SCLK_POST_EN 149
+#define AUD_CLKID_TDMOUT_C_SCLK_POST_EN 150
+
+/* include the CLKIDs which are part of the DT bindings */
+#include <dt-bindings/clock/axg-audio-clkc.h>
+
+#define NR_CLKS 151
+
+#endif /*__AXG_AUDIO_CLKC_H */
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index bd4dbc696b88..00ce62ad6416 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -12,7 +12,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
@@ -626,6 +625,137 @@ static struct clk_regmap axg_mpll3 = {
},
};
+static const struct pll_rate_table axg_pcie_pll_rate_table[] = {
+ {
+ .rate = 100000000,
+ .m = 200,
+ .n = 3,
+ .od = 1,
+ .od2 = 3,
+ },
+ { /* sentinel */ },
+};
+
+static const struct reg_sequence axg_pcie_init_regs[] = {
+ { .reg = HHI_PCIE_PLL_CNTL, .def = 0x400106c8 },
+ { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x0084a2aa },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0xb75020be },
+ { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x0a47488e },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0xc000004d },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x00078000 },
+ { .reg = HHI_PCIE_PLL_CNTL6, .def = 0x002323c6 },
+};
+
+static struct clk_regmap axg_pcie_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .od2 = {
+ .reg_off = HHI_PCIE_PLL_CNTL6,
+ .shift = 6,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_PCIE_PLL_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = axg_pcie_pll_rate_table,
+ .init_regs = axg_pcie_init_regs,
+ .init_count = ARRAY_SIZE(axg_pcie_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap axg_pcie_mux = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .mask = 0x1,
+ .shift = 2,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_mux",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "mpll3", "pcie_pll" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_pcie_ref = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .mask = 0x1,
+ .shift = 1,
+ /* skip the parent 0, reserved for debug */
+ .table = (u32[]){ 1 },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_ref",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "pcie_mux" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_pcie_cml_en0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "pcie_cml_en0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "pcie_ref" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+
+ },
+};
+
+static struct clk_regmap axg_pcie_cml_en1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "pcie_cml_en1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "pcie_ref" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
static const char * const clk81_parent_names[] = {
"xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
@@ -779,6 +909,63 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
},
};
+static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
+ 9, 10, 11, 13, 14, };
+static const char * const gen_clk_parent_names[] = {
+ "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3",
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
+};
+
+static struct clk_regmap axg_gen_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 12,
+ .table = mux_table_gen_clk,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 15:12 selects from 14 possible parents:
+ * xtal, [rtc_oscin_i], [sys_cpu_div16], [ddr_dpll_pt],
+ * hifi_pll, mpll0, mpll1, mpll2, mpll3, fdiv4,
+ * fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
+ */
+ .parent_names = gen_clk_parent_names,
+ .num_parents = ARRAY_SIZE(gen_clk_parent_names),
+ },
+};
+
+static struct clk_regmap axg_gen_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .shift = 0,
+ .width = 11,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "gen_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_gen_clk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .bit_idx = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "gen_clk_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(axg_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(axg_audio_locker, HHI_GCLK_MPEG0, 2);
@@ -821,6 +1008,7 @@ static MESON_GATE(axg_mmc_pclk, HHI_GCLK_MPEG2, 11);
static MESON_GATE(axg_vpu_intr, HHI_GCLK_MPEG2, 25);
static MESON_GATE(axg_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
static MESON_GATE(axg_gic, HHI_GCLK_MPEG2, 30);
+static MESON_GATE(axg_mipi_enable, HHI_MIPI_CNTL0, 29);
/* Always On (AO) domain gates */
@@ -910,6 +1098,15 @@ static struct clk_hw_onecell_data axg_hw_onecell_data = {
[CLKID_FCLK_DIV4_DIV] = &axg_fclk_div4_div.hw,
[CLKID_FCLK_DIV5_DIV] = &axg_fclk_div5_div.hw,
[CLKID_FCLK_DIV7_DIV] = &axg_fclk_div7_div.hw,
+ [CLKID_PCIE_PLL] = &axg_pcie_pll.hw,
+ [CLKID_PCIE_MUX] = &axg_pcie_mux.hw,
+ [CLKID_PCIE_REF] = &axg_pcie_ref.hw,
+ [CLKID_PCIE_CML_EN0] = &axg_pcie_cml_en0.hw,
+ [CLKID_PCIE_CML_EN1] = &axg_pcie_cml_en1.hw,
+ [CLKID_MIPI_ENABLE] = &axg_mipi_enable.hw,
+ [CLKID_GEN_CLK_SEL] = &axg_gen_clk_sel.hw,
+ [CLKID_GEN_CLK_DIV] = &axg_gen_clk_div.hw,
+ [CLKID_GEN_CLK] = &axg_gen_clk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -988,6 +1185,15 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_fclk_div4,
&axg_fclk_div5,
&axg_fclk_div7,
+ &axg_pcie_pll,
+ &axg_pcie_mux,
+ &axg_pcie_ref,
+ &axg_pcie_cml_en0,
+ &axg_pcie_cml_en1,
+ &axg_mipi_enable,
+ &axg_gen_clk_sel,
+ &axg_gen_clk_div,
+ &axg_gen_clk,
};
static const struct of_device_id clkc_match_table[] = {
@@ -995,49 +1201,17 @@ static const struct of_device_id clkc_match_table[] = {
{}
};
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static int axg_clkc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
- void __iomem *clk_base = NULL;
struct regmap *map;
int ret, i;
/* Get the hhi system controller node if available */
map = syscon_node_to_regmap(of_get_parent(dev->of_node));
if (IS_ERR(map)) {
- dev_err(dev,
- "failed to get HHI regmap - Trying obsolete regs\n");
-
- /*
- * FIXME: HHI registers should be accessed through
- * the appropriate system controller. This is required because
- * there is more than just clocks in this register space
- *
- * This fallback method is only provided temporarily until
- * all the platform DTs are properly using the syscon node
- */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
-
- clk_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!clk_base) {
- dev_err(dev, "Unable to map clk base\n");
- return -ENXIO;
- }
-
- map = devm_regmap_init_mmio(dev, clk_base,
- &clkc_regmap_config);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ dev_err(dev, "failed to get HHI regmap\n");
+ return PTR_ERR(map);
}
/* Populate regmap for the regmap backed clocks */
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
index b421df6a7ea0..1d04144a1b2c 100644
--- a/drivers/clk/meson/axg.h
+++ b/drivers/clk/meson/axg.h
@@ -16,6 +16,7 @@
* Register offsets from the data sheet must be multiplied by 4 before
* adding them to the base address to get the right value.
*/
+#define HHI_MIPI_CNTL0 0x00
#define HHI_GP0_PLL_CNTL 0x40
#define HHI_GP0_PLL_CNTL2 0x44
#define HHI_GP0_PLL_CNTL3 0x48
@@ -127,8 +128,13 @@
#define CLKID_FCLK_DIV4_DIV 73
#define CLKID_FCLK_DIV5_DIV 74
#define CLKID_FCLK_DIV7_DIV 75
+#define CLKID_PCIE_PLL 76
+#define CLKID_PCIE_MUX 77
+#define CLKID_PCIE_REF 78
+#define CLKID_GEN_CLK_SEL 82
+#define CLKID_GEN_CLK_DIV 83
-#define NR_CLKS 76
+#define NR_CLKS 85
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/axg-clkc.h>
diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c
deleted file mode 100644
index 58f546e04807..000000000000
--- a/drivers/clk/meson/clk-audio-divider.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2017 AmLogic, Inc.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-/*
- * i2s master clock divider: The algorithm of the generic clk-divider used with
- * a very precise clock parent such as the mpll tends to select a low divider
- * factor. This gives poor results with this particular divider, especially with
- * high frequencies (> 100 MHz)
- *
- * This driver try to select the maximum possible divider with the rate the
- * upstream clock can provide.
- */
-
-#include <linux/clk-provider.h>
-#include "clkc.h"
-
-static inline struct meson_clk_audio_div_data *
-meson_clk_audio_div_data(struct clk_regmap *clk)
-{
- return (struct meson_clk_audio_div_data *)clk->data;
-}
-
-static int _div_round(unsigned long parent_rate, unsigned long rate,
- unsigned long flags)
-{
- if (flags & CLK_DIVIDER_ROUND_CLOSEST)
- return DIV_ROUND_CLOSEST_ULL((u64)parent_rate, rate);
-
- return DIV_ROUND_UP_ULL((u64)parent_rate, rate);
-}
-
-static int _get_val(unsigned long parent_rate, unsigned long rate)
-{
- return DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1;
-}
-
-static int _valid_divider(unsigned int width, int divider)
-{
- int max_divider = 1 << width;
-
- return clamp(divider, 1, max_divider);
-}
-
-static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
- unsigned long divider;
-
- divider = meson_parm_read(clk->map, &adiv->div);
-
- return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
-}
-
-static long audio_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
- unsigned long max_prate;
- int divider;
-
- if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
- divider = _div_round(*parent_rate, rate, adiv->flags);
- divider = _valid_divider(adiv->div.width, divider);
- return DIV_ROUND_UP_ULL((u64)*parent_rate, divider);
- }
-
- /* Get the maximum parent rate */
- max_prate = clk_hw_round_rate(clk_hw_get_parent(hw), ULONG_MAX);
-
- /* Get the corresponding rounded down divider */
- divider = max_prate / rate;
- divider = _valid_divider(adiv->div.width, divider);
-
- /* Get actual rate of the parent */
- *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
- divider * rate);
-
- return DIV_ROUND_UP_ULL((u64)*parent_rate, divider);
-}
-
-static int audio_divider_set_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
- int val = _get_val(parent_rate, rate);
-
- meson_parm_write(clk->map, &adiv->div, val);
-
- return 0;
-}
-
-const struct clk_ops meson_clk_audio_divider_ro_ops = {
- .recalc_rate = audio_divider_recalc_rate,
- .round_rate = audio_divider_round_rate,
-};
-
-const struct clk_ops meson_clk_audio_divider_ops = {
- .recalc_rate = audio_divider_recalc_rate,
- .round_rate = audio_divider_round_rate,
- .set_rate = audio_divider_set_rate,
-};
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
new file mode 100644
index 000000000000..cba43748ce3d
--- /dev/null
+++ b/drivers/clk/meson/clk-phase.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include "clkc.h"
+
+#define phase_step(_width) (360 / (1 << (_width)))
+
+static inline struct meson_clk_phase_data *
+meson_clk_phase_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_phase_data *)clk->data;
+}
+
+int meson_clk_degrees_from_val(unsigned int val, unsigned int width)
+{
+ return phase_step(width) * val;
+}
+EXPORT_SYMBOL_GPL(meson_clk_degrees_from_val);
+
+unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
+{
+ unsigned int val = DIV_ROUND_CLOSEST(degrees, phase_step(width));
+
+ /*
+ * This last calculation is here for cases when degrees is rounded
+ * to 360, in which case val == (1 << width).
+ */
+ return val % (1 << width);
+}
+EXPORT_SYMBOL_GPL(meson_clk_degrees_to_val);
+
+static int meson_clk_phase_get_phase(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_phase_data *phase = meson_clk_phase_data(clk);
+ unsigned int val;
+
+ val = meson_parm_read(clk->map, &phase->ph);
+
+ return meson_clk_degrees_from_val(val, phase->ph.width);
+}
+
+static int meson_clk_phase_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_phase_data *phase = meson_clk_phase_data(clk);
+ unsigned int val;
+
+ val = meson_clk_degrees_to_val(degrees, phase->ph.width);
+ meson_parm_write(clk->map, &phase->ph, val);
+
+ return 0;
+}
+
+const struct clk_ops meson_clk_phase_ops = {
+ .get_phase = meson_clk_phase_get_phase,
+ .set_phase = meson_clk_phase_set_phase,
+};
+EXPORT_SYMBOL_GPL(meson_clk_phase_ops);
diff --git a/drivers/clk/meson/clk-triphase.c b/drivers/clk/meson/clk-triphase.c
new file mode 100644
index 000000000000..4a59936251e5
--- /dev/null
+++ b/drivers/clk/meson/clk-triphase.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include "clkc-audio.h"
+
+/*
+ * This is a special clock for the audio controller.
+ * The phase of mst_sclk clock output can be controlled independently
+ * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2).
+ * Controlling these 3 phases as just one makes things simpler and
+ * give the same clock view to all the element on the i2s bus.
+ * If necessary, we can still control the phase in the tdm block
+ * which makes these independent control redundant.
+ */
+static inline struct meson_clk_triphase_data *
+meson_clk_triphase_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_triphase_data *)clk->data;
+}
+
+static void meson_clk_triphase_sync(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ /* Get phase 0 and sync it to phase 1 and 2 */
+ val = meson_parm_read(clk->map, &tph->ph0);
+ meson_parm_write(clk->map, &tph->ph1, val);
+ meson_parm_write(clk->map, &tph->ph2, val);
+}
+
+static int meson_clk_triphase_get_phase(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ /* Phase are in sync, reading phase 0 is enough */
+ val = meson_parm_read(clk->map, &tph->ph0);
+
+ return meson_clk_degrees_from_val(val, tph->ph0.width);
+}
+
+static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ val = meson_clk_degrees_to_val(degrees, tph->ph0.width);
+ meson_parm_write(clk->map, &tph->ph0, val);
+ meson_parm_write(clk->map, &tph->ph1, val);
+ meson_parm_write(clk->map, &tph->ph2, val);
+
+ return 0;
+}
+
+const struct clk_ops meson_clk_triphase_ops = {
+ .init = meson_clk_triphase_sync,
+ .get_phase = meson_clk_triphase_get_phase,
+ .set_phase = meson_clk_triphase_set_phase,
+};
+EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
diff --git a/drivers/clk/meson/clkc-audio.h b/drivers/clk/meson/clkc-audio.h
new file mode 100644
index 000000000000..0a7c157ebf81
--- /dev/null
+++ b/drivers/clk/meson/clkc-audio.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLKC_AUDIO_H
+#define __MESON_CLKC_AUDIO_H
+
+#include "clkc.h"
+
+struct meson_clk_triphase_data {
+ struct parm ph0;
+ struct parm ph1;
+ struct parm ph2;
+};
+
+struct meson_sclk_div_data {
+ struct parm div;
+ struct parm hi;
+ unsigned int cached_div;
+ struct clk_duty cached_duty;
+};
+
+extern const struct clk_ops meson_clk_triphase_ops;
+extern const struct clk_ops meson_sclk_div_ops;
+
+#endif /* __MESON_CLKC_AUDIO_H */
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index 2fb084330ee9..24cec16b6038 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -91,11 +91,13 @@ struct meson_clk_mpll_data {
#define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
-struct meson_clk_audio_div_data {
- struct parm div;
- u8 flags;
+struct meson_clk_phase_data {
+ struct parm ph;
};
+int meson_clk_degrees_from_val(unsigned int val, unsigned int width);
+unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width);
+
#define MESON_GATE(_name, _reg, _bit) \
struct clk_regmap _name = { \
.data = &(struct clk_regmap_gate_data){ \
@@ -117,7 +119,6 @@ extern const struct clk_ops meson_clk_pll_ops;
extern const struct clk_ops meson_clk_cpu_ops;
extern const struct clk_ops meson_clk_mpll_ro_ops;
extern const struct clk_ops meson_clk_mpll_ops;
-extern const struct clk_ops meson_clk_audio_divider_ro_ops;
-extern const struct clk_ops meson_clk_audio_divider_ops;
+extern const struct clk_ops meson_clk_phase_ops;
#endif /* __CLKC_H */
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 240658404367..86d3ae58e84c 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
@@ -498,6 +497,7 @@ static struct clk_regmap gxbb_fclk_div2 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div2_div" },
.num_parents = 1,
+ .flags = CLK_IS_CRITICAL,
},
};
@@ -970,28 +970,26 @@ static struct clk_regmap gxbb_cts_amclk_sel = {
.mask = 0x3,
.shift = 9,
.table = (u32[]){ 1, 2, 3 },
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
.num_parents = 3,
- .flags = CLK_SET_RATE_PARENT,
},
};
static struct clk_regmap gxbb_cts_amclk_div = {
- .data = &(struct meson_clk_audio_div_data){
- .div = {
- .reg_off = HHI_AUD_CLK_CNTL,
- .shift = 0,
- .width = 8,
- },
+ .data = &(struct clk_regmap_div_data) {
+ .offset = HHI_AUD_CLK_CNTL,
+ .shift = 0,
+ .width = 8,
.flags = CLK_DIVIDER_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_div",
- .ops = &meson_clk_audio_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "cts_amclk_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1018,13 +1016,13 @@ static struct clk_regmap gxbb_cts_mclk_i958_sel = {
.mask = 0x3,
.shift = 25,
.table = (u32[]){ 1, 2, 3 },
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
.num_parents = 3,
- .flags = CLK_SET_RATE_PARENT,
},
};
@@ -1626,6 +1624,63 @@ static struct clk_regmap gxbb_vdec_hevc = {
},
};
+static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
+ 9, 10, 11, 13, 14, };
+static const char * const gen_clk_parent_names[] = {
+ "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2",
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
+};
+
+static struct clk_regmap gxbb_gen_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 12,
+ .table = mux_table_gen_clk,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 15:12 selects from 14 possible parents:
+ * xtal, [rtc_oscin_i], [sys_cpu_div16], [ddr_dpll_pt],
+ * vid_pll, vid2_pll (hevc), mpll0, mpll1, mpll2, fdiv4,
+ * fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
+ */
+ .parent_names = gen_clk_parent_names,
+ .num_parents = ARRAY_SIZE(gen_clk_parent_names),
+ },
+};
+
+static struct clk_regmap gxbb_gen_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .shift = 0,
+ .width = 11,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "gen_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap gxbb_gen_clk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .bit_idx = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "gen_clk_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
@@ -1875,6 +1930,9 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_VDEC_HEVC_SEL] = &gxbb_vdec_hevc_sel.hw,
[CLKID_VDEC_HEVC_DIV] = &gxbb_vdec_hevc_div.hw,
[CLKID_VDEC_HEVC] = &gxbb_vdec_hevc.hw,
+ [CLKID_GEN_CLK_SEL] = &gxbb_gen_clk_sel.hw,
+ [CLKID_GEN_CLK_DIV] = &gxbb_gen_clk_div.hw,
+ [CLKID_GEN_CLK] = &gxbb_gen_clk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -2037,6 +2095,9 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_VDEC_HEVC_SEL] = &gxbb_vdec_hevc_sel.hw,
[CLKID_VDEC_HEVC_DIV] = &gxbb_vdec_hevc_div.hw,
[CLKID_VDEC_HEVC] = &gxbb_vdec_hevc.hw,
+ [CLKID_GEN_CLK_SEL] = &gxbb_gen_clk_sel.hw,
+ [CLKID_GEN_CLK_DIV] = &gxbb_gen_clk_div.hw,
+ [CLKID_GEN_CLK] = &gxbb_gen_clk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -2201,6 +2262,9 @@ static struct clk_regmap *const gx_clk_regmaps[] = {
&gxbb_vdec_hevc_sel,
&gxbb_vdec_hevc_div,
&gxbb_vdec_hevc,
+ &gxbb_gen_clk_sel,
+ &gxbb_gen_clk_div,
+ &gxbb_gen_clk,
};
struct clkc_data {
@@ -2227,17 +2291,9 @@ static const struct of_device_id clkc_match_table[] = {
{},
};
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static int gxbb_clkc_probe(struct platform_device *pdev)
{
const struct clkc_data *clkc_data;
- struct resource *res;
- void __iomem *clk_base;
struct regmap *map;
int ret, i;
struct device *dev = &pdev->dev;
@@ -2249,31 +2305,8 @@ static int gxbb_clkc_probe(struct platform_device *pdev)
/* Get the hhi system controller node if available */
map = syscon_node_to_regmap(of_get_parent(dev->of_node));
if (IS_ERR(map)) {
- dev_err(dev,
- "failed to get HHI regmap - Trying obsolete regs\n");
-
- /*
- * FIXME: HHI registers should be accessed through
- * the appropriate system controller. This is required because
- * there is more than just clocks in this register space
- *
- * This fallback method is only provided temporarily until
- * all the platform DTs are properly using the syscon node
- */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
- clk_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!clk_base) {
- dev_err(dev, "Unable to map clk base\n");
- return -ENXIO;
- }
-
- map = devm_regmap_init_mmio(dev, clk_base,
- &clkc_regmap_config);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ dev_err(dev, "failed to get HHI regmap\n");
+ return PTR_ERR(map);
}
/* Populate regmap for the common regmap backed clocks */
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index ec1a812bf1fd..20dfb1daf5b8 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -66,7 +66,6 @@
#define HHI_USB_CLK_CNTL 0x220 /* 0x88 offset in data sheet */
#define HHI_32K_CLK_CNTL 0x224 /* 0x89 offset in data sheet */
#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */
-#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */
#define HHI_PCM_CLK_CNTL 0x258 /* 0x96 offset in data sheet */
#define HHI_NAND_CLK_CNTL 0x25C /* 0x97 offset in data sheet */
@@ -158,8 +157,10 @@
#define CLKID_VDEC_1_DIV 152
#define CLKID_VDEC_HEVC_SEL 154
#define CLKID_VDEC_HEVC_DIV 155
+#define CLKID_GEN_CLK_SEL 157
+#define CLKID_GEN_CLK_DIV 158
-#define NR_CLKS 157
+#define NR_CLKS 160
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
new file mode 100644
index 000000000000..bc64019b8eeb
--- /dev/null
+++ b/drivers/clk/meson/sclk-div.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * Sample clock generator divider:
+ * This HW divider gates with value 0 but is otherwise a zero based divider:
+ *
+ * val >= 1
+ * divider = val + 1
+ *
+ * The duty cycle may also be set for the LR clock variant. The duty cycle
+ * ratio is:
+ *
+ * hi = [0 - val]
+ * duty_cycle = (1 + hi) / (1 + val)
+ */
+
+#include "clkc-audio.h"
+
+static inline struct meson_sclk_div_data *
+meson_sclk_div_data(struct clk_regmap *clk)
+{
+ return (struct meson_sclk_div_data *)clk->data;
+}
+
+static int sclk_div_maxval(struct meson_sclk_div_data *sclk)
+{
+ return (1 << sclk->div.width) - 1;
+}
+
+static int sclk_div_maxdiv(struct meson_sclk_div_data *sclk)
+{
+ return sclk_div_maxval(sclk) + 1;
+}
+
+static int sclk_div_getdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, int maxdiv)
+{
+ int div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
+
+ return clamp(div, 2, maxdiv);
+}
+
+static int sclk_div_bestdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate,
+ struct meson_sclk_div_data *sclk)
+{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ int bestdiv = 0, i;
+ unsigned long maxdiv, now, parent_now;
+ unsigned long best = 0, best_parent = 0;
+
+ if (!rate)
+ rate = 1;
+
+ maxdiv = sclk_div_maxdiv(sclk);
+
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT))
+ return sclk_div_getdiv(hw, rate, *prate, maxdiv);
+
+ /*
+ * The maximum divider we can use without overflowing
+ * unsigned long in rate * i below
+ */
+ maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+ for (i = 2; i <= maxdiv; i++) {
+ /*
+ * It's the most ideal case if the requested rate can be
+ * divided from parent clock without needing to change
+ * parent rate, so return the divider immediately.
+ */
+ if (rate * i == *prate)
+ return i;
+
+ parent_now = clk_hw_round_rate(parent, rate * i);
+ now = DIV_ROUND_UP_ULL((u64)parent_now, i);
+
+ if (abs(rate - now) < abs(rate - best)) {
+ bestdiv = i;
+ best = now;
+ best_parent = parent_now;
+ }
+ }
+
+ if (!bestdiv)
+ bestdiv = sclk_div_maxdiv(sclk);
+ else
+ *prate = best_parent;
+
+ return bestdiv;
+}
+
+static long sclk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+ int div;
+
+ div = sclk_div_bestdiv(hw, rate, prate, sclk);
+
+ return DIV_ROUND_UP_ULL((u64)*prate, div);
+}
+
+static void sclk_apply_ratio(struct clk_regmap *clk,
+ struct meson_sclk_div_data *sclk)
+{
+ unsigned int hi = DIV_ROUND_CLOSEST(sclk->cached_div *
+ sclk->cached_duty.num,
+ sclk->cached_duty.den);
+
+ if (hi)
+ hi -= 1;
+
+ meson_parm_write(clk->map, &sclk->hi, hi);
+}
+
+static int sclk_div_set_duty_cycle(struct clk_hw *hw,
+ struct clk_duty *duty)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ if (MESON_PARM_APPLICABLE(&sclk->hi)) {
+ memcpy(&sclk->cached_duty, duty, sizeof(*duty));
+ sclk_apply_ratio(clk, sclk);
+ }
+
+ return 0;
+}
+
+static int sclk_div_get_duty_cycle(struct clk_hw *hw,
+ struct clk_duty *duty)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+ int hi;
+
+ if (!MESON_PARM_APPLICABLE(&sclk->hi)) {
+ duty->num = 1;
+ duty->den = 2;
+ return 0;
+ }
+
+ hi = meson_parm_read(clk->map, &sclk->hi);
+ duty->num = hi + 1;
+ duty->den = sclk->cached_div;
+ return 0;
+}
+
+static void sclk_apply_divider(struct clk_regmap *clk,
+ struct meson_sclk_div_data *sclk)
+{
+ if (MESON_PARM_APPLICABLE(&sclk->hi))
+ sclk_apply_ratio(clk, sclk);
+
+ meson_parm_write(clk->map, &sclk->div, sclk->cached_div - 1);
+}
+
+static int sclk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+ unsigned long maxdiv = sclk_div_maxdiv(sclk);
+
+ sclk->cached_div = sclk_div_getdiv(hw, rate, prate, maxdiv);
+
+ if (clk_hw_is_enabled(hw))
+ sclk_apply_divider(clk, sclk);
+
+ return 0;
+}
+
+static unsigned long sclk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ return DIV_ROUND_UP_ULL((u64)prate, sclk->cached_div);
+}
+
+static int sclk_div_enable(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ sclk_apply_divider(clk, sclk);
+
+ return 0;
+}
+
+static void sclk_div_disable(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ meson_parm_write(clk->map, &sclk->div, 0);
+}
+
+static int sclk_div_is_enabled(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ if (meson_parm_read(clk->map, &sclk->div))
+ return 1;
+
+ return 0;
+}
+
+static void sclk_div_init(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+ unsigned int val;
+
+ val = meson_parm_read(clk->map, &sclk->div);
+
+ /* if the divider is initially disabled, assume max */
+ if (!val)
+ sclk->cached_div = sclk_div_maxdiv(sclk);
+ else
+ sclk->cached_div = val + 1;
+
+ sclk_div_get_duty_cycle(hw, &sclk->cached_duty);
+}
+
+const struct clk_ops meson_sclk_div_ops = {
+ .recalc_rate = sclk_div_recalc_rate,
+ .round_rate = sclk_div_round_rate,
+ .set_rate = sclk_div_set_rate,
+ .enable = sclk_div_enable,
+ .disable = sclk_div_disable,
+ .is_enabled = sclk_div_is_enabled,
+ .get_duty_cycle = sclk_div_get_duty_cycle,
+ .set_duty_cycle = sclk_div_set_duty_cycle,
+ .init = sclk_div_init,
+};
+EXPORT_SYMBOL_GPL(meson_sclk_div_ops);
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 6860bd5a37c5..499f5962c8b0 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Marvell Armada 37xx SoC Peripheral clocks
*
@@ -5,10 +6,6 @@
*
* Gregory CLEMENT <gregory.clement@free-electrons.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2 or later. This program is licensed "as is"
- * without any warranty of any kind, whether express or implied.
- *
* Most of the peripheral clocks can be modelled like this:
* _____ _______ _______
* TBG-A-P --| | | | | | ______
@@ -35,6 +32,7 @@
#define CLK_SEL 0x10
#define CLK_DIS 0x14
+#define ARMADA_37XX_DVFS_LOAD_1 1
#define LOAD_LEVEL_NR 4
#define ARMADA_37XX_NB_L0L1 0x18
@@ -418,7 +416,6 @@ static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap *base)
static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
{
struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
- int num_parents = clk_hw_get_num_parents(hw);
u32 val;
if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) {
@@ -428,9 +425,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
val &= pm_cpu->mask_mux;
}
- if (val >= num_parents)
- return -EINVAL;
-
return val;
}
@@ -507,6 +501,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
+/*
+ * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
+ * respectively) to L0 frequency (1.2 Ghz) requires a significant
+ * amount of time to let VDD stabilize to the appropriate
+ * voltage. This amount of time is large enough that it cannot be
+ * covered by the hardware countdown register. Due to this, the CPU
+ * might start operating at L0 before the voltage is stabilized,
+ * leading to CPU stalls.
+ *
+ * To work around this problem, we prevent switching directly from the
+ * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
+ * frequency in-between. The sequence therefore becomes:
+ * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
+ * 2. Sleep 20ms for stabling VDD voltage
+ * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
+ */
+static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
+{
+ unsigned int cur_level;
+
+ if (rate != 1200 * 1000 * 1000)
+ return;
+
+ regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
+ cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
+ if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
+ return;
+
+ regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
+ ARMADA_37XX_NB_CPU_LOAD_MASK,
+ ARMADA_37XX_DVFS_LOAD_1);
+ msleep(20);
+}
+
static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -537,6 +565,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
*/
reg = ARMADA_37XX_NB_CPU_LOAD;
mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+ clk_pm_cpu_set_rate_wa(rate, base);
+
regmap_update_bits(base, reg, mask, load_level);
return rate;
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index 6416c1f8e632..e88f8e01fe3a 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -292,8 +292,10 @@ static void __init pxa25x_register_plls(void)
{
clk_register_fixed_rate(NULL, "osc_3_6864mhz", NULL,
CLK_GET_RATE_NOCACHE, 3686400);
- clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE, 32768);
+ clkdev_pxa_register(CLK_OSC32k768, "osc_32_768khz", NULL,
+ clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+ CLK_GET_RATE_NOCACHE,
+ 32768));
clk_register_fixed_rate(NULL, "clk_dummy", NULL, 0, 0);
clk_register_fixed_factor(NULL, "ppll_95_85mhz", "osc_3_6864mhz",
0, 26, 1);
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 25a30194d27a..d40b63e7bbce 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -314,9 +314,10 @@ static void __init pxa27x_register_plls(void)
clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
CLK_GET_RATE_NOCACHE,
13 * MHz);
- clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE,
- 32768 * KHz);
+ clkdev_pxa_register(CLK_OSC32k768, "osc_32_768khz", NULL,
+ clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+ CLK_GET_RATE_NOCACHE,
+ 32768 * KHz));
clk_register_fixed_rate(NULL, "clk_dummy", NULL, 0, 0);
clk_register_fixed_factor(NULL, "ppll_312mhz", "osc_13mhz", 0, 24, 1);
}
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 2d126df2bccd..7aa120c3bd08 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -286,9 +286,10 @@ static void __init pxa3xx_register_plls(void)
clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
CLK_GET_RATE_NOCACHE,
13 * MHz);
- clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE,
- 32768);
+ clkdev_pxa_register(CLK_OSC32k768, "osc_32_768khz", NULL,
+ clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+ CLK_GET_RATE_NOCACHE,
+ 32768));
clk_register_fixed_rate(NULL, "ring_osc_120mhz", NULL,
CLK_GET_RATE_NOCACHE,
120 * MHz);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 9c3480dcc38a..064768699fe7 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -59,6 +59,15 @@ config QCOM_CLK_SMD_RPM
Say Y if you want to support the clocks exposed by the RPM on
platforms such as apq8016, apq8084, msm8974 etc.
+config QCOM_CLK_RPMH
+ tristate "RPMh Clock Driver"
+ depends on COMMON_CLK_QCOM && QCOM_RPMH
+ help
+ RPMh manages shared resources on some Qualcomm Technologies, Inc.
+ SoCs. It accepts requests from other hardware subsystems via RSC.
+ Say Y if you want to support the clocks exposed by RPMh on
+ platforms such as SDM845.
+
config APQ_GCC_8084
tristate "APQ8084 Global Clock Controller"
select QCOM_GDSC
@@ -245,6 +254,16 @@ config SDM_VIDEOCC_845
Say Y if you want to support video devices and functionality such as
video encode and decode.
+config SDM_DISPCC_845
+ tristate "SDM845 Display Clock Controller"
+ select SDM_GCC_845
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SDM845 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SPMI_PMIC_CLKDIV
tristate "SPMI PMIC clkdiv Support"
depends on (COMMON_CLK_QCOM && SPMI) || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 762c01137c2f..21a45035930d 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -37,7 +37,9 @@ obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
+obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 3c49a60072f1..a91d97cecbad 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index f981b486c468..66755f0f84fc 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved. */
#ifndef __QCOM_CLK_ALPHA_PLL_H__
#define __QCOM_CLK_ALPHA_PLL_H__
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index c58c5538b1b6..bc2205c450b6 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 1702efb1c511..b3561e0a3984 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved. */
#ifndef __QCOM_CLK_BRANCH_H__
#define __QCOM_CLK_BRANCH_H__
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index b209a2fe86b9..dbd5a9e83554 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -7,6 +7,8 @@
#include <linux/clk-provider.h>
#include "clk-regmap.h"
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
struct freq_tbl {
unsigned long freq;
u8 src;
diff --git a/drivers/clk/qcom/clk-regmap.c b/drivers/clk/qcom/clk-regmap.c
index 1c856d330733..ce80db27ccf2 100644
--- a/drivers/clk/qcom/clk-regmap.c
+++ b/drivers/clk/qcom/clk-regmap.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
diff --git a/drivers/clk/qcom/clk-regmap.h b/drivers/clk/qcom/clk-regmap.h
index 90d95cd11ec6..6cfc1bccb255 100644
--- a/drivers/clk/qcom/clk-regmap.h
+++ b/drivers/clk/qcom/clk-regmap.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved. */
#ifndef __QCOM_CLK_REGMAP_H__
#define __QCOM_CLK_REGMAP_H__
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
new file mode 100644
index 000000000000..9f4fc7773fb2
--- /dev/null
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+
+#include <dt-bindings/clock/qcom,rpmh.h>
+
+#define CLK_RPMH_ARC_EN_OFFSET 0
+#define CLK_RPMH_VRM_EN_OFFSET 4
+
+/**
+ * struct clk_rpmh - individual rpmh clock data structure
+ * @hw: handle between common and hardware-specific interfaces
+ * @res_name: resource name for the rpmh clock
+ * @div: clock divider to compute the clock rate
+ * @res_addr: base address of the rpmh resource within the RPMh
+ * @res_on_val: rpmh clock enable value
+ * @state: rpmh clock requested state
+ * @aggr_state: rpmh clock aggregated state
+ * @last_sent_aggr_state: rpmh clock last aggr state sent to RPMh
+ * @valid_state_mask: mask to determine the state of the rpmh clock
+ * @dev: device to which it is attached
+ * @peer: pointer to the clock rpmh sibling
+ */
+struct clk_rpmh {
+ struct clk_hw hw;
+ const char *res_name;
+ u8 div;
+ u32 res_addr;
+ u32 res_on_val;
+ u32 state;
+ u32 aggr_state;
+ u32 last_sent_aggr_state;
+ u32 valid_state_mask;
+ struct device *dev;
+ struct clk_rpmh *peer;
+};
+
+struct clk_rpmh_desc {
+ struct clk_hw **clks;
+ size_t num_clks;
+};
+
+static DEFINE_MUTEX(rpmh_clk_lock);
+
+#define __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \
+ _res_en_offset, _res_on, _div) \
+ static struct clk_rpmh _platform##_##_name_active; \
+ static struct clk_rpmh _platform##_##_name = { \
+ .res_name = _res_name, \
+ .res_addr = _res_en_offset, \
+ .res_on_val = _res_on, \
+ .div = _div, \
+ .peer = &_platform##_##_name_active, \
+ .valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) | \
+ BIT(RPMH_ACTIVE_ONLY_STATE) | \
+ BIT(RPMH_SLEEP_STATE)), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpmh_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpmh _platform##_##_name_active = { \
+ .res_name = _res_name, \
+ .res_addr = _res_en_offset, \
+ .res_on_val = _res_on, \
+ .div = _div, \
+ .peer = &_platform##_##_name, \
+ .valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) | \
+ BIT(RPMH_ACTIVE_ONLY_STATE)), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpmh_ops, \
+ .name = #_name_active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_RPMH_ARC(_platform, _name, _name_active, _res_name, \
+ _res_on, _div) \
+ __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \
+ CLK_RPMH_ARC_EN_OFFSET, _res_on, _div)
+
+#define DEFINE_CLK_RPMH_VRM(_platform, _name, _name_active, _res_name, \
+ _div) \
+ __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \
+ CLK_RPMH_VRM_EN_OFFSET, 1, _div)
+
+static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw)
+{
+ return container_of(_hw, struct clk_rpmh, hw);
+}
+
+static inline bool has_state_changed(struct clk_rpmh *c, u32 state)
+{
+ return (c->last_sent_aggr_state & BIT(state))
+ != (c->aggr_state & BIT(state));
+}
+
+static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
+{
+ struct tcs_cmd cmd = { 0 };
+ u32 cmd_state, on_val;
+ enum rpmh_state state = RPMH_SLEEP_STATE;
+ int ret;
+
+ cmd.addr = c->res_addr;
+ cmd_state = c->aggr_state;
+ on_val = c->res_on_val;
+
+ for (; state <= RPMH_ACTIVE_ONLY_STATE; state++) {
+ if (has_state_changed(c, state)) {
+ if (cmd_state & BIT(state))
+ cmd.data = on_val;
+
+ ret = rpmh_write_async(c->dev, state, &cmd, 1);
+ if (ret) {
+ dev_err(c->dev, "set %s state of %s failed: (%d)\n",
+ !state ? "sleep" :
+ state == RPMH_WAKE_ONLY_STATE ?
+ "wake" : "active", c->res_name, ret);
+ return ret;
+ }
+ }
+ }
+
+ c->last_sent_aggr_state = c->aggr_state;
+ c->peer->last_sent_aggr_state = c->last_sent_aggr_state;
+
+ return 0;
+}
+
+/*
+ * Update state and aggregate state values based on enable value.
+ */
+static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c,
+ bool enable)
+{
+ int ret;
+
+ /* Nothing required to be done if already off or on */
+ if (enable == c->state)
+ return 0;
+
+ c->state = enable ? c->valid_state_mask : 0;
+ c->aggr_state = c->state | c->peer->state;
+ c->peer->aggr_state = c->aggr_state;
+
+ ret = clk_rpmh_send_aggregate_command(c);
+ if (!ret)
+ return 0;
+
+ if (ret && enable)
+ c->state = 0;
+ else if (ret)
+ c->state = c->valid_state_mask;
+
+ WARN(1, "clk: %s failed to %s\n", c->res_name,
+ enable ? "enable" : "disable");
+ return ret;
+}
+
+static int clk_rpmh_prepare(struct clk_hw *hw)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+ int ret = 0;
+
+ mutex_lock(&rpmh_clk_lock);
+ ret = clk_rpmh_aggregate_state_send_command(c, true);
+ mutex_unlock(&rpmh_clk_lock);
+
+ return ret;
+};
+
+static void clk_rpmh_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+
+ mutex_lock(&rpmh_clk_lock);
+ clk_rpmh_aggregate_state_send_command(c, false);
+ mutex_unlock(&rpmh_clk_lock);
+};
+
+static unsigned long clk_rpmh_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_rpmh *r = to_clk_rpmh(hw);
+
+ /*
+ * RPMh clocks have a fixed rate. Return static rate.
+ */
+ return prate / r->div;
+}
+
+static const struct clk_ops clk_rpmh_ops = {
+ .prepare = clk_rpmh_prepare,
+ .unprepare = clk_rpmh_unprepare,
+ .recalc_rate = clk_rpmh_recalc_rate,
+};
+
+/* Resource name must match resource id present in cmd-db. */
+DEFINE_CLK_RPMH_ARC(sdm845, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
+DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2);
+DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
+DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1);
+DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1);
+DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1);
+
+static struct clk_hw *sdm845_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
+ .clks = sdm845_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sdm845_rpmh_clocks),
+};
+
+static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct clk_rpmh_desc *rpmh = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= rpmh->num_clks) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return rpmh->clks[idx];
+}
+
+static int clk_rpmh_probe(struct platform_device *pdev)
+{
+ struct clk_hw **hw_clks;
+ struct clk_rpmh *rpmh_clk;
+ const struct clk_rpmh_desc *desc;
+ int ret, i;
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -ENODEV;
+
+ hw_clks = desc->clks;
+
+ for (i = 0; i < desc->num_clks; i++) {
+ u32 res_addr;
+
+ rpmh_clk = to_clk_rpmh(hw_clks[i]);
+ res_addr = cmd_db_read_addr(rpmh_clk->res_name);
+ if (!res_addr) {
+ dev_err(&pdev->dev, "missing RPMh resource address for %s\n",
+ rpmh_clk->res_name);
+ return -ENODEV;
+ }
+ rpmh_clk->res_addr += res_addr;
+ rpmh_clk->dev = &pdev->dev;
+
+ ret = devm_clk_hw_register(&pdev->dev, hw_clks[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register %s\n",
+ hw_clks[i]->init->name);
+ return ret;
+ }
+ }
+
+ /* typecast to silence compiler warning */
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_rpmh_hw_get,
+ (void *)desc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add clock provider\n");
+ return ret;
+ }
+
+ dev_dbg(&pdev->dev, "Registered RPMh clocks\n");
+
+ return 0;
+}
+
+static const struct of_device_id clk_rpmh_match_table[] = {
+ { .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
+ { }
+};
+MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
+
+static struct platform_driver clk_rpmh_driver = {
+ .probe = clk_rpmh_probe,
+ .driver = {
+ .name = "clk-rpmh",
+ .of_match_table = clk_rpmh_match_table,
+ },
+};
+
+static int __init clk_rpmh_init(void)
+{
+ return platform_driver_register(&clk_rpmh_driver);
+}
+subsys_initcall(clk_rpmh_init);
+
+static void __exit clk_rpmh_exit(void)
+{
+ platform_driver_unregister(&clk_rpmh_driver);
+}
+module_exit(clk_rpmh_exit);
+
+MODULE_DESCRIPTION("QCOM RPMh Clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 39ce64c2783b..db9b2471ac40 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/export.h>
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 00196ee15e73..4aa33ee70bae 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,15 +1,6 @@
-/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved. */
+
#ifndef __QCOM_CLK_COMMON_H__
#define __QCOM_CLK_COMMON_H__
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
new file mode 100644
index 000000000000..0cc4909b5dbe
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -0,0 +1,685 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_0[] = {
+ "bi_tcxo",
+ "dsi0_phy_pll_out_byteclk",
+ "dsi1_phy_pll_out_byteclk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_2[] = {
+ "bi_tcxo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_MAIN, 4 },
+ { P_GPLL0_OUT_MAIN_DIV, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_3[] = {
+ "bi_tcxo",
+ "disp_cc_pll0",
+ "gcc_disp_gpll0_clk_src",
+ "gcc_disp_gpll0_div_clk_src",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_4[] = {
+ "bi_tcxo",
+ "dsi0_phy_pll_out_dsiclk",
+ "dsi1_phy_pll_out_dsiclk",
+ "core_bi_pll_test_se",
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_pll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x20d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x20ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x2108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x2120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_GPLL0_OUT_MAIN, 7, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(344000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(430000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x2088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_names = disp_cc_parent_names_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x2058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_names = disp_cc_parent_names_4,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x2070,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_names = disp_cc_parent_names_4,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(344000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(430000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x20a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_names = disp_cc_parent_names_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x20b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_names = disp_cc_parent_names_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x4004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_axi_clk = {
+ .halt_reg = 0x4008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x2028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x20e8,
+ .shift = 0,
+ .width = 2,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte0_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte0_div_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x2030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x2104,
+ .shift = 0,
+ .width = 2,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte1_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x2034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte1_div_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x2038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_esc0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_esc1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_mdp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_mdp_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_pclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_pclk1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x2014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_rot_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0x5004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0x5008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_vsync_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_vsync_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x3000,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *disp_cc_sdm845_clocks[] = {
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AXI_CLK] = &disp_cc_mdss_axi_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] =
+ &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] =
+ &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sdm845_resets[] = {
+ [DISP_CC_MDSS_RSCC_BCR] = { 0x5000 },
+};
+
+static struct gdsc *disp_cc_sdm845_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+};
+
+static const struct regmap_config disp_cc_sdm845_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sdm845_desc = {
+ .config = &disp_cc_sdm845_regmap_config,
+ .clks = disp_cc_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sdm845_clocks),
+ .resets = disp_cc_sdm845_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sdm845_resets),
+ .gdscs = disp_cc_sdm845_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sdm845_gdscs),
+};
+
+static const struct of_device_id disp_cc_sdm845_match_table[] = {
+ { .compatible = "qcom,sdm845-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sdm845_match_table);
+
+static int disp_cc_sdm845_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct alpha_pll_config disp_cc_pll0_config = {};
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sdm845_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ disp_cc_pll0_config.l = 0x2c;
+ disp_cc_pll0_config.alpha = 0xcaaa;
+
+ clk_fabia_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+ /* Enable hardware clock gating for DSI and MDP clocks */
+ regmap_update_bits(regmap, 0x8000, 0x7f0, 0x7f0);
+
+ return qcom_cc_really_probe(pdev, &disp_cc_sdm845_desc, regmap);
+}
+
+static struct platform_driver disp_cc_sdm845_driver = {
+ .probe = disp_cc_sdm845_probe,
+ .driver = {
+ .name = "disp_cc-sdm845",
+ .of_match_table = disp_cc_sdm845_match_table,
+ },
+};
+
+static int __init disp_cc_sdm845_init(void)
+{
+ return platform_driver_register(&disp_cc_sdm845_driver);
+}
+subsys_initcall(disp_cc_sdm845_init);
+
+static void __exit disp_cc_sdm845_exit(void)
+{
+ platform_driver_unregister(&disp_cc_sdm845_driver);
+}
+module_exit(disp_cc_sdm845_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI DISPCC SDM845 Driver");
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index 486d9610355c..9c99a71ea71e 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -106,8 +106,6 @@ static const char * const gcc_xo_pcie_sleep[] = {
"sleep_clk_src",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll gpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index 46cb256b4aa2..8902ad42ba87 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -179,8 +179,6 @@ static const char * const gcc_xo_ddr_500_200[] = {
"ddrpllapss",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = {
F(48000000, P_XO, 1, 0, 0),
F(200000000, P_FEPLL200, 1, 0, 0),
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 28eb200d0f1e..5f61225657ab 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -1220,7 +1220,6 @@ static struct clk_rcg sdc1_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1269,7 +1268,6 @@ static struct clk_rcg sdc3_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1353,7 +1351,6 @@ static struct clk_rcg tsif_ref_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 0462f4a8c932..505c6263141d 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -32,8 +32,6 @@
#include "clk-regmap-mux.h"
#include "reset.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_XO,
P_GPLL0,
diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
index b99dd406e907..849046fbed6d 100644
--- a/drivers/clk/qcom/gcc-mdm9615.c
+++ b/drivers/clk/qcom/gcc-mdm9615.c
@@ -947,7 +947,6 @@ static struct clk_rcg sdc1_src = {
.parent_names = gcc_cxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -996,7 +995,6 @@ static struct clk_rcg sdc2_src = {
.parent_names = gcc_cxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index c347a0d44bc8..7e930e25c79f 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -1558,7 +1558,6 @@ static struct clk_rcg sdc1_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1607,7 +1606,6 @@ static struct clk_rcg sdc2_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1656,7 +1654,6 @@ static struct clk_rcg sdc3_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1705,7 +1702,6 @@ static struct clk_rcg sdc4_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1754,7 +1750,6 @@ static struct clk_rcg sdc5_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index d6c7f50ba86a..ac2b0aa1e8b5 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -264,8 +264,6 @@ static const char * const gcc_xo_gpll1_emclk_sleep[] = {
"sleep_clk",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll gpll0 = {
.l_reg = 0x21004,
.m_reg = 0x21008,
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index eb551c75fba6..fd495e0471bb 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -1628,7 +1628,6 @@ static struct clk_rcg sdc1_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1677,7 +1676,6 @@ static struct clk_rcg sdc2_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1726,7 +1724,6 @@ static struct clk_rcg sdc3_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1775,7 +1772,6 @@ static struct clk_rcg sdc4_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1824,7 +1820,6 @@ static struct clk_rcg sdc5_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 348e30da4f18..08e2900d172c 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -62,8 +62,6 @@ static const char * const gcc_xo_gpll0_gpll4[] = {
"gpll4_vote",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll gpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 1e38efc37180..53f0f369a33e 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -57,8 +57,6 @@ static const char * const gcc_xo_gpll0_gpll4[] = {
"gpll4",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_fixed_factor xo = {
.mult = 1,
.div = 1,
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 9f35b3fe1d97..9a3290fdd01b 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -32,8 +32,6 @@
#include "reset.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_XO,
P_GPLL0,
@@ -2781,6 +2779,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
.halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x75018,
.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 78d87f5c7098..9f0ae403d5f5 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -25,8 +25,6 @@
#include "reset.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_AUD_REF_CLK,
P_CORE_BI_PLL_TEST_SE,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index e78e6f5b99fc..fa1a196350f1 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -25,8 +25,6 @@
#include "gdsc.h"
#include "reset.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_BI_TCXO,
P_AUD_REF_CLK,
@@ -1103,6 +1101,7 @@ static struct clk_branch gcc_camera_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camera_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1129,6 +1128,7 @@ static struct clk_branch gcc_camera_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camera_xo_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1270,6 +1270,7 @@ static struct clk_branch gcc_disp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_disp_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1328,6 +1329,7 @@ static struct clk_branch gcc_disp_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_disp_xo_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1397,6 +1399,7 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -2985,6 +2988,7 @@ static struct clk_branch gcc_video_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_video_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -3011,6 +3015,7 @@ static struct clk_branch gcc_video_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_video_xo_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -3049,6 +3054,36 @@ static struct clk_branch gcc_vs_ctrl_clk = {
},
};
+static struct clk_branch gcc_cpuss_dvm_bus_clk = {
+ .halt_reg = 0x48190,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48190,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_dvm_bus_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc pcie_0_gdsc = {
.gdscr = 0x6b004,
.pd = {
@@ -3344,6 +3379,8 @@ static struct clk_regmap *gcc_sdm845_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
[GPLL4] = &gpll4.clkr,
+ [GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
};
static const struct qcom_reset_map gcc_sdm845_resets[] = {
@@ -3433,10 +3470,6 @@ static int gcc_sdm845_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3);
regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
- /* Enable CPUSS clocks */
- regmap_update_bits(regmap, 0x48190, BIT(0), 0x1);
- regmap_update_bits(regmap, 0x52004, BIT(22), 0x1);
-
return qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
}
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 30777f9f1a43..4ce1d7c88377 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -219,8 +219,6 @@ static const char * const mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = {
"sleep_clk_src",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll mmpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 715e7cd94125..91818516c3e0 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -184,8 +184,6 @@ static const char * const mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
"dsi1pllbyte",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll mmpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 1a25ee4f3658..7d4ee109435c 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -34,8 +34,6 @@
#include "reset.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_XO,
P_MMPLL0,
@@ -2910,6 +2908,7 @@ static struct gdsc mmagic_bimc_gdsc = {
.name = "mmagic_bimc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = ALWAYS_ON,
};
static struct gdsc mmagic_video_gdsc = {
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 9073b7a710ac..5d6a7724a194 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -18,8 +18,6 @@
#include "clk-pll.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_BI_TCXO,
P_CORE_BI_PLL_TEST_SE,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index f9ba71311727..9022bbe1297e 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -21,6 +21,7 @@ config CLK_RENESAS
select CLK_R8A77980 if ARCH_R8A77980
select CLK_R8A77990 if ARCH_R8A77990
select CLK_R8A77995 if ARCH_R8A77995
+ select CLK_R9A06G032 if ARCH_R9A06G032
select CLK_SH73A0 if ARCH_SH73A0
if CLK_RENESAS
@@ -125,6 +126,11 @@ config CLK_R8A77995
bool "R-Car D3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R9A06G032
+ bool "Renesas R9A06G032 clock driver"
+ help
+ This is a driver for R9A06G032 clocks
+
config CLK_SH73A0
bool "SH-Mobile AG5 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index fe5bac9215e5..e4aa3d6143d2 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_CLK_R8A77970) += r8a77970-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77980) += r8a77980-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77990) += r8a77990-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
+obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index 775b0ceaa337..a85dd50e8911 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -103,6 +103,7 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_SDSRC, 0x26c),
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cr", R8A7795_CLK_CR, CLK_PLL1_DIV4, 2, 1),
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
DEF_DIV6P1("canfd", R8A7795_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
@@ -132,6 +133,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S0D3),
DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S0D3),
DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S0D3),
+ DEF_MOD("sceg-pub", 229, R8A7795_CLK_CR),
DEF_MOD("cmt3", 300, R8A7795_CLK_R),
DEF_MOD("cmt2", 301, R8A7795_CLK_R),
DEF_MOD("cmt1", 302, R8A7795_CLK_R),
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
new file mode 100644
index 000000000000..a0b6ecdc63dd
--- /dev/null
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -0,0 +1,893 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R9A09G032 clock driver
+ *
+ * Copyright (C) 2018 Renesas Electronics Europe Limited
+ *
+ * Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <dt-bindings/clock/r9a06g032-sysctrl.h>
+
+struct r9a06g032_gate {
+ u16 gate, reset, ready, midle,
+ scon, mirack, mistat;
+};
+
+/* This is used to describe a clock for instantiation */
+struct r9a06g032_clkdesc {
+ const char *name;
+ uint32_t type: 3;
+ uint32_t index: 8;
+ uint32_t source : 8; /* source index + 1 (0 == none) */
+ /* these are used to populate the bitsel struct */
+ union {
+ struct r9a06g032_gate gate;
+ /* for dividers */
+ struct {
+ unsigned int div_min : 10, div_max : 10, reg: 10;
+ u16 div_table[4];
+ };
+ /* For fixed-factor ones */
+ struct {
+ u16 div, mul;
+ };
+ unsigned int factor;
+ unsigned int frequency;
+ /* for dual gate */
+ struct {
+ uint16_t group : 1, index: 3;
+ u16 sel, g1, r1, g2, r2;
+ } dual;
+ };
+} __packed;
+
+#define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \
+ { .gate = _clk, .reset = _rst, \
+ .ready = _rdy, .midle = _midle, \
+ .scon = _scon, .mirack = _mirack, .mistat = _mistat }
+#define D_GATE(_idx, _n, _src, ...) \
+ { .type = K_GATE, .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, .name = _n, \
+ .gate = I_GATE(__VA_ARGS__), }
+#define D_ROOT(_idx, _n, _mul, _div) \
+ { .type = K_FFC, .index = R9A06G032_##_idx, .name = _n, \
+ .div = _div, .mul = _mul }
+#define D_FFC(_idx, _n, _src, _div) \
+ { .type = K_FFC, .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, .name = _n, \
+ .div = _div, .mul = 1}
+#define D_DIV(_idx, _n, _src, _reg, _min, _max, ...) \
+ { .type = K_DIV, .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, .name = _n, \
+ .reg = _reg, .div_min = _min, .div_max = _max, \
+ .div_table = { __VA_ARGS__ } }
+#define D_UGATE(_idx, _n, _src, _g, _gi, _g1, _r1, _g2, _r2) \
+ { .type = K_DUALGATE, .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, .name = _n, \
+ .dual = { .group = _g, .index = _gi, \
+ .g1 = _g1, .r1 = _r1, .g2 = _g2, .r2 = _r2 }, }
+
+enum { K_GATE = 0, K_FFC, K_DIV, K_BITSEL, K_DUALGATE };
+
+/* Internal clock IDs */
+#define R9A06G032_CLKOUT 0
+#define R9A06G032_CLKOUT_D10 2
+#define R9A06G032_CLKOUT_D16 3
+#define R9A06G032_CLKOUT_D160 4
+#define R9A06G032_CLKOUT_D1OR2 5
+#define R9A06G032_CLKOUT_D20 6
+#define R9A06G032_CLKOUT_D40 7
+#define R9A06G032_CLKOUT_D5 8
+#define R9A06G032_CLKOUT_D8 9
+#define R9A06G032_DIV_ADC 10
+#define R9A06G032_DIV_I2C 11
+#define R9A06G032_DIV_NAND 12
+#define R9A06G032_DIV_P1_PG 13
+#define R9A06G032_DIV_P2_PG 14
+#define R9A06G032_DIV_P3_PG 15
+#define R9A06G032_DIV_P4_PG 16
+#define R9A06G032_DIV_P5_PG 17
+#define R9A06G032_DIV_P6_PG 18
+#define R9A06G032_DIV_QSPI0 19
+#define R9A06G032_DIV_QSPI1 20
+#define R9A06G032_DIV_REF_SYNC 21
+#define R9A06G032_DIV_SDIO0 22
+#define R9A06G032_DIV_SDIO1 23
+#define R9A06G032_DIV_SWITCH 24
+#define R9A06G032_DIV_UART 25
+#define R9A06G032_DIV_MOTOR 64
+#define R9A06G032_CLK_DDRPHY_PLLCLK_D4 78
+#define R9A06G032_CLK_ECAT100_D4 79
+#define R9A06G032_CLK_HSR100_D2 80
+#define R9A06G032_CLK_REF_SYNC_D4 81
+#define R9A06G032_CLK_REF_SYNC_D8 82
+#define R9A06G032_CLK_SERCOS100_D2 83
+#define R9A06G032_DIV_CA7 84
+
+#define R9A06G032_UART_GROUP_012 154
+#define R9A06G032_UART_GROUP_34567 155
+
+#define R9A06G032_CLOCK_COUNT (R9A06G032_UART_GROUP_34567 + 1)
+
+static const struct r9a06g032_clkdesc r9a06g032_clocks[] __initconst = {
+ D_ROOT(CLKOUT, "clkout", 25, 1),
+ D_ROOT(CLK_PLL_USB, "clk_pll_usb", 12, 10),
+ D_FFC(CLKOUT_D10, "clkout_d10", CLKOUT, 10),
+ D_FFC(CLKOUT_D16, "clkout_d16", CLKOUT, 16),
+ D_FFC(CLKOUT_D160, "clkout_d160", CLKOUT, 160),
+ D_DIV(CLKOUT_D1OR2, "clkout_d1or2", CLKOUT, 0, 1, 2),
+ D_FFC(CLKOUT_D20, "clkout_d20", CLKOUT, 20),
+ D_FFC(CLKOUT_D40, "clkout_d40", CLKOUT, 40),
+ D_FFC(CLKOUT_D5, "clkout_d5", CLKOUT, 5),
+ D_FFC(CLKOUT_D8, "clkout_d8", CLKOUT, 8),
+ D_DIV(DIV_ADC, "div_adc", CLKOUT, 77, 50, 250),
+ D_DIV(DIV_I2C, "div_i2c", CLKOUT, 78, 12, 16),
+ D_DIV(DIV_NAND, "div_nand", CLKOUT, 82, 12, 32),
+ D_DIV(DIV_P1_PG, "div_p1_pg", CLKOUT, 68, 12, 200),
+ D_DIV(DIV_P2_PG, "div_p2_pg", CLKOUT, 62, 12, 128),
+ D_DIV(DIV_P3_PG, "div_p3_pg", CLKOUT, 64, 8, 128),
+ D_DIV(DIV_P4_PG, "div_p4_pg", CLKOUT, 66, 8, 128),
+ D_DIV(DIV_P5_PG, "div_p5_pg", CLKOUT, 71, 10, 40),
+ D_DIV(DIV_P6_PG, "div_p6_pg", CLKOUT, 18, 12, 64),
+ D_DIV(DIV_QSPI0, "div_qspi0", CLKOUT, 73, 3, 7),
+ D_DIV(DIV_QSPI1, "div_qspi1", CLKOUT, 25, 3, 7),
+ D_DIV(DIV_REF_SYNC, "div_ref_sync", CLKOUT, 56, 2, 16, 2, 4, 8, 16),
+ D_DIV(DIV_SDIO0, "div_sdio0", CLKOUT, 74, 20, 128),
+ D_DIV(DIV_SDIO1, "div_sdio1", CLKOUT, 75, 20, 128),
+ D_DIV(DIV_SWITCH, "div_switch", CLKOUT, 37, 5, 40),
+ D_DIV(DIV_UART, "div_uart", CLKOUT, 79, 12, 128),
+ D_GATE(CLK_25_PG4, "clk_25_pg4", CLKOUT_D40, 0x749, 0x74a, 0x74b, 0, 0xae3, 0, 0),
+ D_GATE(CLK_25_PG5, "clk_25_pg5", CLKOUT_D40, 0x74c, 0x74d, 0x74e, 0, 0xae4, 0, 0),
+ D_GATE(CLK_25_PG6, "clk_25_pg6", CLKOUT_D40, 0x74f, 0x750, 0x751, 0, 0xae5, 0, 0),
+ D_GATE(CLK_25_PG7, "clk_25_pg7", CLKOUT_D40, 0x752, 0x753, 0x754, 0, 0xae6, 0, 0),
+ D_GATE(CLK_25_PG8, "clk_25_pg8", CLKOUT_D40, 0x755, 0x756, 0x757, 0, 0xae7, 0, 0),
+ D_GATE(CLK_ADC, "clk_adc", DIV_ADC, 0x1ea, 0x1eb, 0, 0, 0, 0, 0),
+ D_GATE(CLK_ECAT100, "clk_ecat100", CLKOUT_D10, 0x405, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_HSR100, "clk_hsr100", CLKOUT_D10, 0x483, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_I2C0, "clk_i2c0", DIV_I2C, 0x1e6, 0x1e7, 0, 0, 0, 0, 0),
+ D_GATE(CLK_I2C1, "clk_i2c1", DIV_I2C, 0x1e8, 0x1e9, 0, 0, 0, 0, 0),
+ D_GATE(CLK_MII_REF, "clk_mii_ref", CLKOUT_D40, 0x342, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_NAND, "clk_nand", DIV_NAND, 0x284, 0x285, 0, 0, 0, 0, 0),
+ D_GATE(CLK_NOUSBP2_PG6, "clk_nousbp2_pg6", DIV_P2_PG, 0x774, 0x775, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P1_PG2, "clk_p1_pg2", DIV_P1_PG, 0x862, 0x863, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P1_PG3, "clk_p1_pg3", DIV_P1_PG, 0x864, 0x865, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P1_PG4, "clk_p1_pg4", DIV_P1_PG, 0x866, 0x867, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P4_PG3, "clk_p4_pg3", DIV_P4_PG, 0x824, 0x825, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P4_PG4, "clk_p4_pg4", DIV_P4_PG, 0x826, 0x827, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P6_PG1, "clk_p6_pg1", DIV_P6_PG, 0x8a0, 0x8a1, 0x8a2, 0, 0xb60, 0, 0),
+ D_GATE(CLK_P6_PG2, "clk_p6_pg2", DIV_P6_PG, 0x8a3, 0x8a4, 0x8a5, 0, 0xb61, 0, 0),
+ D_GATE(CLK_P6_PG3, "clk_p6_pg3", DIV_P6_PG, 0x8a6, 0x8a7, 0x8a8, 0, 0xb62, 0, 0),
+ D_GATE(CLK_P6_PG4, "clk_p6_pg4", DIV_P6_PG, 0x8a9, 0x8aa, 0x8ab, 0, 0xb63, 0, 0),
+ D_GATE(CLK_QSPI0, "clk_qspi0", DIV_QSPI0, 0x2a4, 0x2a5, 0, 0, 0, 0, 0),
+ D_GATE(CLK_QSPI1, "clk_qspi1", DIV_QSPI1, 0x484, 0x485, 0, 0, 0, 0, 0),
+ D_GATE(CLK_RGMII_REF, "clk_rgmii_ref", CLKOUT_D8, 0x340, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_RMII_REF, "clk_rmii_ref", CLKOUT_D20, 0x341, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SDIO0, "clk_sdio0", DIV_SDIO0, 0x64, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SDIO1, "clk_sdio1", DIV_SDIO1, 0x644, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SERCOS100, "clk_sercos100", CLKOUT_D10, 0x425, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SLCD, "clk_slcd", DIV_P1_PG, 0x860, 0x861, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI0, "clk_spi0", DIV_P3_PG, 0x7e0, 0x7e1, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI1, "clk_spi1", DIV_P3_PG, 0x7e2, 0x7e3, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI2, "clk_spi2", DIV_P3_PG, 0x7e4, 0x7e5, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI3, "clk_spi3", DIV_P3_PG, 0x7e6, 0x7e7, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI4, "clk_spi4", DIV_P4_PG, 0x820, 0x821, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI5, "clk_spi5", DIV_P4_PG, 0x822, 0x823, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SWITCH, "clk_switch", DIV_SWITCH, 0x982, 0x983, 0, 0, 0, 0, 0),
+ D_DIV(DIV_MOTOR, "div_motor", CLKOUT_D5, 84, 2, 8),
+ D_GATE(HCLK_ECAT125, "hclk_ecat125", CLKOUT_D8, 0x400, 0x401, 0, 0x402, 0, 0x440, 0x441),
+ D_GATE(HCLK_PINCONFIG, "hclk_pinconfig", CLKOUT_D40, 0x740, 0x741, 0x742, 0, 0xae0, 0, 0),
+ D_GATE(HCLK_SERCOS, "hclk_sercos", CLKOUT_D10, 0x420, 0x422, 0, 0x421, 0, 0x460, 0x461),
+ D_GATE(HCLK_SGPIO2, "hclk_sgpio2", DIV_P5_PG, 0x8c3, 0x8c4, 0x8c5, 0, 0xb41, 0, 0),
+ D_GATE(HCLK_SGPIO3, "hclk_sgpio3", DIV_P5_PG, 0x8c6, 0x8c7, 0x8c8, 0, 0xb42, 0, 0),
+ D_GATE(HCLK_SGPIO4, "hclk_sgpio4", DIV_P5_PG, 0x8c9, 0x8ca, 0x8cb, 0, 0xb43, 0, 0),
+ D_GATE(HCLK_TIMER0, "hclk_timer0", CLKOUT_D40, 0x743, 0x744, 0x745, 0, 0xae1, 0, 0),
+ D_GATE(HCLK_TIMER1, "hclk_timer1", CLKOUT_D40, 0x746, 0x747, 0x748, 0, 0xae2, 0, 0),
+ D_GATE(HCLK_USBF, "hclk_usbf", CLKOUT_D8, 0xe3, 0, 0, 0xe4, 0, 0x102, 0x103),
+ D_GATE(HCLK_USBH, "hclk_usbh", CLKOUT_D8, 0xe0, 0xe1, 0, 0xe2, 0, 0x100, 0x101),
+ D_GATE(HCLK_USBPM, "hclk_usbpm", CLKOUT_D8, 0xe5, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_48_PG_F, "clk_48_pg_f", CLK_48, 0x78c, 0x78d, 0, 0x78e, 0, 0xb04, 0xb05),
+ D_GATE(CLK_48_PG4, "clk_48_pg4", CLK_48, 0x789, 0x78a, 0x78b, 0, 0xb03, 0, 0),
+ D_FFC(CLK_DDRPHY_PLLCLK_D4, "clk_ddrphy_pllclk_d4", CLK_DDRPHY_PLLCLK, 4),
+ D_FFC(CLK_ECAT100_D4, "clk_ecat100_d4", CLK_ECAT100, 4),
+ D_FFC(CLK_HSR100_D2, "clk_hsr100_d2", CLK_HSR100, 2),
+ D_FFC(CLK_REF_SYNC_D4, "clk_ref_sync_d4", CLK_REF_SYNC, 4),
+ D_FFC(CLK_REF_SYNC_D8, "clk_ref_sync_d8", CLK_REF_SYNC, 8),
+ D_FFC(CLK_SERCOS100_D2, "clk_sercos100_d2", CLK_SERCOS100, 2),
+ D_DIV(DIV_CA7, "div_ca7", CLK_REF_SYNC, 57, 1, 4, 1, 2, 4),
+ D_GATE(HCLK_CAN0, "hclk_can0", CLK_48, 0x783, 0x784, 0x785, 0, 0xb01, 0, 0),
+ D_GATE(HCLK_CAN1, "hclk_can1", CLK_48, 0x786, 0x787, 0x788, 0, 0xb02, 0, 0),
+ D_GATE(HCLK_DELTASIGMA, "hclk_deltasigma", DIV_MOTOR, 0x1ef, 0x1f0, 0x1f1, 0, 0, 0, 0),
+ D_GATE(HCLK_PWMPTO, "hclk_pwmpto", DIV_MOTOR, 0x1ec, 0x1ed, 0x1ee, 0, 0, 0, 0),
+ D_GATE(HCLK_RSV, "hclk_rsv", CLK_48, 0x780, 0x781, 0x782, 0, 0xb00, 0, 0),
+ D_GATE(HCLK_SGPIO0, "hclk_sgpio0", DIV_MOTOR, 0x1e0, 0x1e1, 0x1e2, 0, 0, 0, 0),
+ D_GATE(HCLK_SGPIO1, "hclk_sgpio1", DIV_MOTOR, 0x1e3, 0x1e4, 0x1e5, 0, 0, 0, 0),
+ D_DIV(RTOS_MDC, "rtos_mdc", CLK_REF_SYNC, 100, 80, 640, 80, 160, 320, 640),
+ D_GATE(CLK_CM3, "clk_cm3", CLK_REF_SYNC_D4, 0xba0, 0xba1, 0, 0xba2, 0, 0xbc0, 0xbc1),
+ D_GATE(CLK_DDRC, "clk_ddrc", CLK_DDRPHY_PLLCLK_D4, 0x323, 0x324, 0, 0, 0, 0, 0),
+ D_GATE(CLK_ECAT25, "clk_ecat25", CLK_ECAT100_D4, 0x403, 0x404, 0, 0, 0, 0, 0),
+ D_GATE(CLK_HSR50, "clk_hsr50", CLK_HSR100_D2, 0x484, 0x485, 0, 0, 0, 0, 0),
+ D_GATE(CLK_HW_RTOS, "clk_hw_rtos", CLK_REF_SYNC_D4, 0xc60, 0xc61, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SERCOS50, "clk_sercos50", CLK_SERCOS100_D2, 0x424, 0x423, 0, 0, 0, 0, 0),
+ D_GATE(HCLK_ADC, "hclk_adc", CLK_REF_SYNC_D8, 0x1af, 0x1b0, 0x1b1, 0, 0, 0, 0),
+ D_GATE(HCLK_CM3, "hclk_cm3", CLK_REF_SYNC_D4, 0xc20, 0xc21, 0xc22, 0, 0, 0, 0),
+ D_GATE(HCLK_CRYPTO_EIP150, "hclk_crypto_eip150", CLK_REF_SYNC_D4, 0x123, 0x124, 0x125, 0, 0x142, 0, 0),
+ D_GATE(HCLK_CRYPTO_EIP93, "hclk_crypto_eip93", CLK_REF_SYNC_D4, 0x120, 0x121, 0, 0x122, 0, 0x140, 0x141),
+ D_GATE(HCLK_DDRC, "hclk_ddrc", CLK_REF_SYNC_D4, 0x320, 0x322, 0, 0x321, 0, 0x3a0, 0x3a1),
+ D_GATE(HCLK_DMA0, "hclk_dma0", CLK_REF_SYNC_D4, 0x260, 0x261, 0x262, 0x263, 0x2c0, 0x2c1, 0x2c2),
+ D_GATE(HCLK_DMA1, "hclk_dma1", CLK_REF_SYNC_D4, 0x264, 0x265, 0x266, 0x267, 0x2c3, 0x2c4, 0x2c5),
+ D_GATE(HCLK_GMAC0, "hclk_gmac0", CLK_REF_SYNC_D4, 0x360, 0x361, 0x362, 0x363, 0x3c0, 0x3c1, 0x3c2),
+ D_GATE(HCLK_GMAC1, "hclk_gmac1", CLK_REF_SYNC_D4, 0x380, 0x381, 0x382, 0x383, 0x3e0, 0x3e1, 0x3e2),
+ D_GATE(HCLK_GPIO0, "hclk_gpio0", CLK_REF_SYNC_D4, 0x212, 0x213, 0x214, 0, 0, 0, 0),
+ D_GATE(HCLK_GPIO1, "hclk_gpio1", CLK_REF_SYNC_D4, 0x215, 0x216, 0x217, 0, 0, 0, 0),
+ D_GATE(HCLK_GPIO2, "hclk_gpio2", CLK_REF_SYNC_D4, 0x229, 0x22a, 0x22b, 0, 0, 0, 0),
+ D_GATE(HCLK_HSR, "hclk_hsr", CLK_HSR100_D2, 0x480, 0x482, 0, 0x481, 0, 0x4c0, 0x4c1),
+ D_GATE(HCLK_I2C0, "hclk_i2c0", CLK_REF_SYNC_D8, 0x1a9, 0x1aa, 0x1ab, 0, 0, 0, 0),
+ D_GATE(HCLK_I2C1, "hclk_i2c1", CLK_REF_SYNC_D8, 0x1ac, 0x1ad, 0x1ae, 0, 0, 0, 0),
+ D_GATE(HCLK_LCD, "hclk_lcd", CLK_REF_SYNC_D4, 0x7a0, 0x7a1, 0x7a2, 0, 0xb20, 0, 0),
+ D_GATE(HCLK_MSEBI_M, "hclk_msebi_m", CLK_REF_SYNC_D4, 0x164, 0x165, 0x166, 0, 0x183, 0, 0),
+ D_GATE(HCLK_MSEBI_S, "hclk_msebi_s", CLK_REF_SYNC_D4, 0x160, 0x161, 0x162, 0x163, 0x180, 0x181, 0x182),
+ D_GATE(HCLK_NAND, "hclk_nand", CLK_REF_SYNC_D4, 0x280, 0x281, 0x282, 0x283, 0x2e0, 0x2e1, 0x2e2),
+ D_GATE(HCLK_PG_I, "hclk_pg_i", CLK_REF_SYNC_D4, 0x7ac, 0x7ad, 0, 0x7ae, 0, 0xb24, 0xb25),
+ D_GATE(HCLK_PG19, "hclk_pg19", CLK_REF_SYNC_D4, 0x22c, 0x22d, 0x22e, 0, 0, 0, 0),
+ D_GATE(HCLK_PG20, "hclk_pg20", CLK_REF_SYNC_D4, 0x22f, 0x230, 0x231, 0, 0, 0, 0),
+ D_GATE(HCLK_PG3, "hclk_pg3", CLK_REF_SYNC_D4, 0x7a6, 0x7a7, 0x7a8, 0, 0xb22, 0, 0),
+ D_GATE(HCLK_PG4, "hclk_pg4", CLK_REF_SYNC_D4, 0x7a9, 0x7aa, 0x7ab, 0, 0xb23, 0, 0),
+ D_GATE(HCLK_QSPI0, "hclk_qspi0", CLK_REF_SYNC_D4, 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x300, 0x301, 0x302),
+ D_GATE(HCLK_QSPI1, "hclk_qspi1", CLK_REF_SYNC_D4, 0x480, 0x481, 0x482, 0x483, 0x4c0, 0x4c1, 0x4c2),
+ D_GATE(HCLK_ROM, "hclk_rom", CLK_REF_SYNC_D4, 0xaa0, 0xaa1, 0xaa2, 0, 0xb80, 0, 0),
+ D_GATE(HCLK_RTC, "hclk_rtc", CLK_REF_SYNC_D8, 0xa00, 0, 0, 0, 0, 0, 0),
+ D_GATE(HCLK_SDIO0, "hclk_sdio0", CLK_REF_SYNC_D4, 0x60, 0x61, 0x62, 0x63, 0x80, 0x81, 0x82),
+ D_GATE(HCLK_SDIO1, "hclk_sdio1", CLK_REF_SYNC_D4, 0x640, 0x641, 0x642, 0x643, 0x660, 0x661, 0x662),
+ D_GATE(HCLK_SEMAP, "hclk_semap", CLK_REF_SYNC_D4, 0x7a3, 0x7a4, 0x7a5, 0, 0xb21, 0, 0),
+ D_GATE(HCLK_SPI0, "hclk_spi0", CLK_REF_SYNC_D4, 0x200, 0x201, 0x202, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI1, "hclk_spi1", CLK_REF_SYNC_D4, 0x203, 0x204, 0x205, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI2, "hclk_spi2", CLK_REF_SYNC_D4, 0x206, 0x207, 0x208, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI3, "hclk_spi3", CLK_REF_SYNC_D4, 0x209, 0x20a, 0x20b, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI4, "hclk_spi4", CLK_REF_SYNC_D4, 0x20c, 0x20d, 0x20e, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI5, "hclk_spi5", CLK_REF_SYNC_D4, 0x20f, 0x210, 0x211, 0, 0, 0, 0),
+ D_GATE(HCLK_SWITCH, "hclk_switch", CLK_REF_SYNC_D4, 0x980, 0, 0x981, 0, 0, 0, 0),
+ D_GATE(HCLK_SWITCH_RG, "hclk_switch_rg", CLK_REF_SYNC_D4, 0xc40, 0xc41, 0xc42, 0, 0, 0, 0),
+ D_GATE(HCLK_UART0, "hclk_uart0", CLK_REF_SYNC_D8, 0x1a0, 0x1a1, 0x1a2, 0, 0, 0, 0),
+ D_GATE(HCLK_UART1, "hclk_uart1", CLK_REF_SYNC_D8, 0x1a3, 0x1a4, 0x1a5, 0, 0, 0, 0),
+ D_GATE(HCLK_UART2, "hclk_uart2", CLK_REF_SYNC_D8, 0x1a6, 0x1a7, 0x1a8, 0, 0, 0, 0),
+ D_GATE(HCLK_UART3, "hclk_uart3", CLK_REF_SYNC_D4, 0x218, 0x219, 0x21a, 0, 0, 0, 0),
+ D_GATE(HCLK_UART4, "hclk_uart4", CLK_REF_SYNC_D4, 0x21b, 0x21c, 0x21d, 0, 0, 0, 0),
+ D_GATE(HCLK_UART5, "hclk_uart5", CLK_REF_SYNC_D4, 0x220, 0x221, 0x222, 0, 0, 0, 0),
+ D_GATE(HCLK_UART6, "hclk_uart6", CLK_REF_SYNC_D4, 0x223, 0x224, 0x225, 0, 0, 0, 0),
+ D_GATE(HCLK_UART7, "hclk_uart7", CLK_REF_SYNC_D4, 0x226, 0x227, 0x228, 0, 0, 0, 0),
+ /*
+ * These are not hardware clocks, but are needed to handle the special
+ * case where we have a 'selector bit' that doesn't just change the
+ * parent for a clock, but also the gate it's suposed to use.
+ */
+ {
+ .index = R9A06G032_UART_GROUP_012,
+ .name = "uart_group_012",
+ .type = K_BITSEL,
+ .source = 1 + R9A06G032_DIV_UART,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
+ .dual.sel = ((0xec / 4) << 5) | 24,
+ .dual.group = 0,
+ },
+ {
+ .index = R9A06G032_UART_GROUP_34567,
+ .name = "uart_group_34567",
+ .type = K_BITSEL,
+ .source = 1 + R9A06G032_DIV_P2_PG,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
+ .dual.sel = ((0x34 / 4) << 5) | 30,
+ .dual.group = 1,
+ },
+ D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5),
+ D_UGATE(CLK_UART1, "clk_uart1", UART_GROUP_012, 0, 1, 0x1b6, 0x1b7, 0x1b8, 0x1b9),
+ D_UGATE(CLK_UART2, "clk_uart2", UART_GROUP_012, 0, 2, 0x1ba, 0x1bb, 0x1bc, 0x1bd),
+ D_UGATE(CLK_UART3, "clk_uart3", UART_GROUP_34567, 1, 0, 0x760, 0x761, 0x762, 0x763),
+ D_UGATE(CLK_UART4, "clk_uart4", UART_GROUP_34567, 1, 1, 0x764, 0x765, 0x766, 0x767),
+ D_UGATE(CLK_UART5, "clk_uart5", UART_GROUP_34567, 1, 2, 0x768, 0x769, 0x76a, 0x76b),
+ D_UGATE(CLK_UART6, "clk_uart6", UART_GROUP_34567, 1, 3, 0x76c, 0x76d, 0x76e, 0x76f),
+ D_UGATE(CLK_UART7, "clk_uart7", UART_GROUP_34567, 1, 4, 0x770, 0x771, 0x772, 0x773),
+};
+
+struct r9a06g032_priv {
+ struct clk_onecell_data data;
+ spinlock_t lock; /* protects concurent access to gates */
+ void __iomem *reg;
+};
+
+/* register/bit pairs are encoded as an uint16_t */
+static void
+clk_rdesc_set(struct r9a06g032_priv *clocks,
+ u16 one, unsigned int on)
+{
+ u32 __iomem *reg = clocks->reg + (4 * (one >> 5));
+ u32 val = readl(reg);
+
+ val = (val & ~(1U << (one & 0x1f))) | ((!!on) << (one & 0x1f));
+ writel(val, reg);
+}
+
+static int
+clk_rdesc_get(struct r9a06g032_priv *clocks,
+ uint16_t one)
+{
+ u32 __iomem *reg = clocks->reg + (4 * (one >> 5));
+ u32 val = readl(reg);
+
+ return !!(val & (1U << (one & 0x1f)));
+}
+
+/*
+ * This implements the R9A09G032 clock gate 'driver'. We cannot use the system's
+ * clock gate framework as the gates on the R9A09G032 have a special enabling
+ * sequence, therefore we use this little proxy.
+ */
+struct r9a06g032_clk_gate {
+ struct clk_hw hw;
+ struct r9a06g032_priv *clocks;
+ u16 index;
+
+ struct r9a06g032_gate gate;
+};
+
+#define to_r9a06g032_gate(_hw) container_of(_hw, struct r9a06g032_clk_gate, hw)
+
+static void
+r9a06g032_clk_gate_set(struct r9a06g032_priv *clocks,
+ struct r9a06g032_gate *g, int on)
+{
+ unsigned long flags;
+
+ WARN_ON(!g->gate);
+
+ spin_lock_irqsave(&clocks->lock, flags);
+ clk_rdesc_set(clocks, g->gate, on);
+ /* De-assert reset */
+ if (g->reset)
+ clk_rdesc_set(clocks, g->reset, 1);
+ spin_unlock_irqrestore(&clocks->lock, flags);
+
+ /* Hardware manual recommends 5us delay after enabling clock & reset */
+ udelay(5);
+
+ /* If the peripheral is memory mapped (i.e. an AXI slave), there is an
+ * associated SLVRDY bit in the System Controller that needs to be set
+ * so that the FlexWAY bus fabric passes on the read/write requests.
+ */
+ if (g->ready || g->midle) {
+ spin_lock_irqsave(&clocks->lock, flags);
+ if (g->ready)
+ clk_rdesc_set(clocks, g->ready, on);
+ /* Clear 'Master Idle Request' bit */
+ if (g->midle)
+ clk_rdesc_set(clocks, g->midle, !on);
+ spin_unlock_irqrestore(&clocks->lock, flags);
+ }
+ /* Note: We don't wait for FlexWAY Socket Connection signal */
+}
+
+static int r9a06g032_clk_gate_enable(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_gate *g = to_r9a06g032_gate(hw);
+
+ r9a06g032_clk_gate_set(g->clocks, &g->gate, 1);
+ return 0;
+}
+
+static void r9a06g032_clk_gate_disable(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_gate *g = to_r9a06g032_gate(hw);
+
+ r9a06g032_clk_gate_set(g->clocks, &g->gate, 0);
+}
+
+static int r9a06g032_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_gate *g = to_r9a06g032_gate(hw);
+
+ /* if clock is in reset, the gate might be on, and still not 'be' on */
+ if (g->gate.reset && !clk_rdesc_get(g->clocks, g->gate.reset))
+ return 0;
+
+ return clk_rdesc_get(g->clocks, g->gate.gate);
+}
+
+static const struct clk_ops r9a06g032_clk_gate_ops = {
+ .enable = r9a06g032_clk_gate_enable,
+ .disable = r9a06g032_clk_gate_disable,
+ .is_enabled = r9a06g032_clk_gate_is_enabled,
+};
+
+static struct clk *
+r9a06g032_register_gate(struct r9a06g032_priv *clocks,
+ const char *parent_name,
+ const struct r9a06g032_clkdesc *desc)
+{
+ struct clk *clk;
+ struct r9a06g032_clk_gate *g;
+ struct clk_init_data init;
+
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return NULL;
+
+ init.name = desc->name;
+ init.ops = &r9a06g032_clk_gate_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ g->clocks = clocks;
+ g->index = desc->index;
+ g->gate = desc->gate;
+ g->hw.init = &init;
+
+ /*
+ * important here, some clocks are already in use by the CM3, we
+ * have to assume they are not Linux's to play with and try to disable
+ * at the end of the boot!
+ */
+ if (r9a06g032_clk_gate_is_enabled(&g->hw)) {
+ init.flags |= CLK_IS_CRITICAL;
+ pr_debug("%s was enabled, making read-only\n", desc->name);
+ }
+
+ clk = clk_register(NULL, &g->hw);
+ if (IS_ERR(clk)) {
+ kfree(g);
+ return NULL;
+ }
+ return clk;
+}
+
+struct r9a06g032_clk_div {
+ struct clk_hw hw;
+ struct r9a06g032_priv *clocks;
+ u16 index;
+ u16 reg;
+ u16 min, max;
+ u8 table_size;
+ u16 table[8]; /* we know there are no more than 8 */
+};
+
+#define to_r9a06g032_div(_hw) \
+ container_of(_hw, struct r9a06g032_clk_div, hw)
+
+static unsigned long
+r9a06g032_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct r9a06g032_clk_div *clk = to_r9a06g032_div(hw);
+ u32 __iomem *reg = clk->clocks->reg + (4 * clk->reg);
+ u32 div = readl(reg);
+
+ if (div < clk->min)
+ div = clk->min;
+ else if (div > clk->max)
+ div = clk->max;
+ return DIV_ROUND_UP(parent_rate, div);
+}
+
+/*
+ * Attempts to find a value that is in range of min,max,
+ * and if a table of set dividers was specified for this
+ * register, try to find the fixed divider that is the closest
+ * to the target frequency
+ */
+static long
+r9a06g032_div_clamp_div(struct r9a06g032_clk_div *clk,
+ unsigned long rate, unsigned long prate)
+{
+ /* + 1 to cope with rates that have the remainder dropped */
+ u32 div = DIV_ROUND_UP(prate, rate + 1);
+ int i;
+
+ if (div <= clk->min)
+ return clk->min;
+ if (div >= clk->max)
+ return clk->max;
+
+ for (i = 0; clk->table_size && i < clk->table_size - 1; i++) {
+ if (div >= clk->table[i] && div <= clk->table[i + 1]) {
+ unsigned long m = rate -
+ DIV_ROUND_UP(prate, clk->table[i]);
+ unsigned long p =
+ DIV_ROUND_UP(prate, clk->table[i + 1]) -
+ rate;
+ /*
+ * select the divider that generates
+ * the value closest to the ideal frequency
+ */
+ div = p >= m ? clk->table[i] : clk->table[i + 1];
+ return div;
+ }
+ }
+ return div;
+}
+
+static long
+r9a06g032_div_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *prate)
+{
+ struct r9a06g032_clk_div *clk = to_r9a06g032_div(hw);
+ u32 div = DIV_ROUND_UP(*prate, rate);
+
+ pr_devel("%s %pC %ld (prate %ld) (wanted div %u)\n", __func__,
+ hw->clk, rate, *prate, div);
+ pr_devel(" min %d (%ld) max %d (%ld)\n",
+ clk->min, DIV_ROUND_UP(*prate, clk->min),
+ clk->max, DIV_ROUND_UP(*prate, clk->max));
+
+ div = r9a06g032_div_clamp_div(clk, rate, *prate);
+ /*
+ * this is a hack. Currently the serial driver asks for a clock rate
+ * that is 16 times the baud rate -- and that is wildly outside the
+ * range of the UART divider, somehow there is no provision for that
+ * case of 'let the divider as is if outside range'.
+ * The serial driver *shouldn't* play with these clocks anyway, there's
+ * several uarts attached to this divider, and changing this impacts
+ * everyone.
+ */
+ if (clk->index == R9A06G032_DIV_UART) {
+ pr_devel("%s div uart hack!\n", __func__);
+ return clk_get_rate(hw->clk);
+ }
+ pr_devel("%s %pC %ld / %u = %ld\n", __func__, hw->clk,
+ *prate, div, DIV_ROUND_UP(*prate, div));
+ return DIV_ROUND_UP(*prate, div);
+}
+
+static int
+r9a06g032_div_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct r9a06g032_clk_div *clk = to_r9a06g032_div(hw);
+ /* + 1 to cope with rates that have the remainder dropped */
+ u32 div = DIV_ROUND_UP(parent_rate, rate + 1);
+ u32 __iomem *reg = clk->clocks->reg + (4 * clk->reg);
+
+ pr_devel("%s %pC rate %ld parent %ld div %d\n", __func__, hw->clk,
+ rate, parent_rate, div);
+
+ /*
+ * Need to write the bit 31 with the divider value to
+ * latch it. Technically we should wait until it has been
+ * cleared too.
+ * TODO: Find whether this callback is sleepable, in case
+ * the hardware /does/ require some sort of spinloop here.
+ */
+ writel(div | BIT(31), reg);
+
+ return 0;
+}
+
+static const struct clk_ops r9a06g032_clk_div_ops = {
+ .recalc_rate = r9a06g032_div_recalc_rate,
+ .round_rate = r9a06g032_div_round_rate,
+ .set_rate = r9a06g032_div_set_rate,
+};
+
+static struct clk *
+r9a06g032_register_div(struct r9a06g032_priv *clocks,
+ const char *parent_name,
+ const struct r9a06g032_clkdesc *desc)
+{
+ struct r9a06g032_clk_div *div;
+ struct clk *clk;
+ struct clk_init_data init;
+ unsigned int i;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return NULL;
+
+ init.name = desc->name;
+ init.ops = &r9a06g032_clk_div_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ div->clocks = clocks;
+ div->index = desc->index;
+ div->reg = desc->reg;
+ div->hw.init = &init;
+ div->min = desc->div_min;
+ div->max = desc->div_max;
+ /* populate (optional) divider table fixed values */
+ for (i = 0; i < ARRAY_SIZE(div->table) &&
+ i < ARRAY_SIZE(desc->div_table) && desc->div_table[i]; i++) {
+ div->table[div->table_size++] = desc->div_table[i];
+ }
+
+ clk = clk_register(NULL, &div->hw);
+ if (IS_ERR(clk)) {
+ kfree(div);
+ return NULL;
+ }
+ return clk;
+}
+
+/*
+ * This clock provider handles the case of the R9A06G032 where you have
+ * peripherals that have two potential clock source and two gates, one for
+ * each of the clock source - the used clock source (for all sub clocks)
+ * is selected by a single bit.
+ * That single bit affects all sub-clocks, and therefore needs to change the
+ * active gate (and turn the others off) and force a recalculation of the rates.
+ *
+ * This implements two clock providers, one 'bitselect' that
+ * handles the switch between both parents, and another 'dualgate'
+ * that knows which gate to poke at, depending on the parent's bit position.
+ */
+struct r9a06g032_clk_bitsel {
+ struct clk_hw hw;
+ struct r9a06g032_priv *clocks;
+ u16 index;
+ u16 selector; /* selector register + bit */
+};
+
+#define to_clk_bitselect(_hw) \
+ container_of(_hw, struct r9a06g032_clk_bitsel, hw)
+
+static u8 r9a06g032_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_bitsel *set = to_clk_bitselect(hw);
+
+ return clk_rdesc_get(set->clocks, set->selector);
+}
+
+static int r9a06g032_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct r9a06g032_clk_bitsel *set = to_clk_bitselect(hw);
+
+ /* a single bit in the register selects one of two parent clocks */
+ clk_rdesc_set(set->clocks, set->selector, !!index);
+
+ return 0;
+}
+
+static const struct clk_ops clk_bitselect_ops = {
+ .get_parent = r9a06g032_clk_mux_get_parent,
+ .set_parent = r9a06g032_clk_mux_set_parent,
+};
+
+static struct clk *
+r9a06g032_register_bitsel(struct r9a06g032_priv *clocks,
+ const char *parent_name,
+ const struct r9a06g032_clkdesc *desc)
+{
+ struct clk *clk;
+ struct r9a06g032_clk_bitsel *g;
+ struct clk_init_data init;
+ const char *names[2];
+
+ /* allocate the gate */
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return NULL;
+
+ names[0] = parent_name;
+ names[1] = "clk_pll_usb";
+
+ init.name = desc->name;
+ init.ops = &clk_bitselect_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = names;
+ init.num_parents = 2;
+
+ g->clocks = clocks;
+ g->index = desc->index;
+ g->selector = desc->dual.sel;
+ g->hw.init = &init;
+
+ clk = clk_register(NULL, &g->hw);
+ if (IS_ERR(clk)) {
+ kfree(g);
+ return NULL;
+ }
+ return clk;
+}
+
+struct r9a06g032_clk_dualgate {
+ struct clk_hw hw;
+ struct r9a06g032_priv *clocks;
+ u16 index;
+ u16 selector; /* selector register + bit */
+ struct r9a06g032_gate gate[2];
+};
+
+#define to_clk_dualgate(_hw) \
+ container_of(_hw, struct r9a06g032_clk_dualgate, hw)
+
+static int
+r9a06g032_clk_dualgate_setenable(struct r9a06g032_clk_dualgate *g, int enable)
+{
+ u8 sel_bit = clk_rdesc_get(g->clocks, g->selector);
+
+ /* we always turn off the 'other' gate, regardless */
+ r9a06g032_clk_gate_set(g->clocks, &g->gate[!sel_bit], 0);
+ r9a06g032_clk_gate_set(g->clocks, &g->gate[sel_bit], enable);
+
+ return 0;
+}
+
+static int r9a06g032_clk_dualgate_enable(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_dualgate *gate = to_clk_dualgate(hw);
+
+ r9a06g032_clk_dualgate_setenable(gate, 1);
+
+ return 0;
+}
+
+static void r9a06g032_clk_dualgate_disable(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_dualgate *gate = to_clk_dualgate(hw);
+
+ r9a06g032_clk_dualgate_setenable(gate, 0);
+}
+
+static int r9a06g032_clk_dualgate_is_enabled(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_dualgate *g = to_clk_dualgate(hw);
+ u8 sel_bit = clk_rdesc_get(g->clocks, g->selector);
+
+ return clk_rdesc_get(g->clocks, g->gate[sel_bit].gate);
+}
+
+static const struct clk_ops r9a06g032_clk_dualgate_ops = {
+ .enable = r9a06g032_clk_dualgate_enable,
+ .disable = r9a06g032_clk_dualgate_disable,
+ .is_enabled = r9a06g032_clk_dualgate_is_enabled,
+};
+
+static struct clk *
+r9a06g032_register_dualgate(struct r9a06g032_priv *clocks,
+ const char *parent_name,
+ const struct r9a06g032_clkdesc *desc,
+ uint16_t sel)
+{
+ struct r9a06g032_clk_dualgate *g;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /* allocate the gate */
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return NULL;
+ g->clocks = clocks;
+ g->index = desc->index;
+ g->selector = sel;
+ g->gate[0].gate = desc->dual.g1;
+ g->gate[0].reset = desc->dual.r1;
+ g->gate[1].gate = desc->dual.g2;
+ g->gate[1].reset = desc->dual.r2;
+
+ init.name = desc->name;
+ init.ops = &r9a06g032_clk_dualgate_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ g->hw.init = &init;
+ /*
+ * important here, some clocks are already in use by the CM3, we
+ * have to assume they are not Linux's to play with and try to disable
+ * at the end of the boot!
+ */
+ if (r9a06g032_clk_dualgate_is_enabled(&g->hw)) {
+ init.flags |= CLK_IS_CRITICAL;
+ pr_debug("%s was enabled, making read-only\n", desc->name);
+ }
+
+ clk = clk_register(NULL, &g->hw);
+ if (IS_ERR(clk)) {
+ kfree(g);
+ return NULL;
+ }
+ return clk;
+}
+
+static void r9a06g032_clocks_del_clk_provider(void *data)
+{
+ of_clk_del_provider(data);
+}
+
+static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct r9a06g032_priv *clocks;
+ struct clk **clks;
+ struct clk *mclk;
+ unsigned int i;
+ u16 uart_group_sel[2];
+ int error;
+
+ clocks = devm_kzalloc(dev, sizeof(*clocks), GFP_KERNEL);
+ clks = devm_kcalloc(dev, R9A06G032_CLOCK_COUNT, sizeof(struct clk *),
+ GFP_KERNEL);
+ if (!clocks || !clks)
+ return -ENOMEM;
+
+ spin_lock_init(&clocks->lock);
+
+ clocks->data.clks = clks;
+ clocks->data.clk_num = R9A06G032_CLOCK_COUNT;
+
+ mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(mclk))
+ return PTR_ERR(mclk);
+
+ clocks->reg = of_iomap(np, 0);
+ if (WARN_ON(!clocks->reg))
+ return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(r9a06g032_clocks); ++i) {
+ const struct r9a06g032_clkdesc *d = &r9a06g032_clocks[i];
+ const char *parent_name = d->source ?
+ __clk_get_name(clocks->data.clks[d->source - 1]) :
+ __clk_get_name(mclk);
+ struct clk *clk = NULL;
+
+ switch (d->type) {
+ case K_FFC:
+ clk = clk_register_fixed_factor(NULL, d->name,
+ parent_name, 0,
+ d->mul, d->div);
+ break;
+ case K_GATE:
+ clk = r9a06g032_register_gate(clocks, parent_name, d);
+ break;
+ case K_DIV:
+ clk = r9a06g032_register_div(clocks, parent_name, d);
+ break;
+ case K_BITSEL:
+ /* keep that selector register around */
+ uart_group_sel[d->dual.group] = d->dual.sel;
+ clk = r9a06g032_register_bitsel(clocks, parent_name, d);
+ break;
+ case K_DUALGATE:
+ clk = r9a06g032_register_dualgate(clocks, parent_name,
+ d,
+ uart_group_sel[d->dual.group]);
+ break;
+ }
+ clocks->data.clks[d->index] = clk;
+ }
+ error = of_clk_add_provider(np, of_clk_src_onecell_get, &clocks->data);
+ if (error)
+ return error;
+
+ return devm_add_action_or_reset(dev,
+ r9a06g032_clocks_del_clk_provider, np);
+}
+
+static const struct of_device_id r9a06g032_match[] = {
+ { .compatible = "renesas,r9a06g032-sysctrl" },
+ { }
+};
+
+static struct platform_driver r9a06g032_clock_driver = {
+ .driver = {
+ .name = "renesas,r9a06g032-sysctrl",
+ .of_match_table = r9a06g032_match,
+ },
+};
+
+static int __init r9a06g032_clocks_init(void)
+{
+ return platform_driver_probe(&r9a06g032_clock_driver,
+ r9a06g032_clocks_probe);
+}
+
+subsys_initcall(r9a06g032_clocks_init);
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 98e7b9429b83..ff35ab463a6f 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -6,12 +6,14 @@
obj-y += clk.o
obj-y += clk-pll.o
obj-y += clk-cpu.o
+obj-y += clk-half-divider.o
obj-y += clk-inverter.o
obj-y += clk-mmc-phase.o
obj-y += clk-muxgrf.o
obj-y += clk-ddr.o
obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
+obj-y += clk-px30.o
obj-y += clk-rv1108.o
obj-y += clk-rk3036.o
obj-y += clk-rk3128.o
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
new file mode 100644
index 000000000000..b8da6e799423
--- /dev/null
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include "clk.h"
+
+#define div_mask(width) ((1 << (width)) - 1)
+
+static bool _is_best_half_div(unsigned long rate, unsigned long now,
+ unsigned long best, unsigned long flags)
+{
+ if (flags & CLK_DIVIDER_ROUND_CLOSEST)
+ return abs(rate - now) < abs(rate - best);
+
+ return now <= rate && now > best;
+}
+
+static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int val;
+
+ val = clk_readl(divider->reg) >> divider->shift;
+ val &= div_mask(divider->width);
+ val = val * 2 + 3;
+
+ return DIV_ROUND_UP_ULL(((u64)parent_rate * 2), val);
+}
+
+static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate, u8 width,
+ unsigned long flags)
+{
+ unsigned int i, bestdiv = 0;
+ unsigned long parent_rate, best = 0, now, maxdiv;
+ unsigned long parent_rate_saved = *best_parent_rate;
+
+ if (!rate)
+ rate = 1;
+
+ maxdiv = div_mask(width);
+
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ parent_rate = *best_parent_rate;
+ bestdiv = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
+ if (bestdiv < 3)
+ bestdiv = 0;
+ else
+ bestdiv = (bestdiv - 3) / 2;
+ bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
+ return bestdiv;
+ }
+
+ /*
+ * The maximum divider we can use without overflowing
+ * unsigned long in rate * i below
+ */
+ maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+ for (i = 0; i <= maxdiv; i++) {
+ if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) {
+ /*
+ * It's the most ideal case if the requested rate can be
+ * divided from parent clock without needing to change
+ * parent rate, so return the divider immediately.
+ */
+ *best_parent_rate = parent_rate_saved;
+ return i;
+ }
+ parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ ((u64)rate * (i * 2 + 3)) / 2);
+ now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2),
+ (i * 2 + 3));
+
+ if (_is_best_half_div(rate, now, best, flags)) {
+ bestdiv = i;
+ best = now;
+ *best_parent_rate = parent_rate;
+ }
+ }
+
+ if (!bestdiv) {
+ bestdiv = div_mask(width);
+ *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
+ }
+
+ return bestdiv;
+}
+
+static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int div;
+
+ div = clk_half_divider_bestdiv(hw, rate, prate,
+ divider->width,
+ divider->flags);
+
+ return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3);
+}
+
+static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int value;
+ unsigned long flags = 0;
+ u32 val;
+
+ value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
+ value = (value - 3) / 2;
+ value = min_t(unsigned int, value, div_mask(divider->width));
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
+ val = div_mask(divider->width) << (divider->shift + 16);
+ } else {
+ val = clk_readl(divider->reg);
+ val &= ~(div_mask(divider->width) << divider->shift);
+ }
+ val |= value << divider->shift;
+ clk_writel(val, divider->reg);
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return 0;
+}
+
+const struct clk_ops clk_half_divider_ops = {
+ .recalc_rate = clk_half_divider_recalc_rate,
+ .round_rate = clk_half_divider_round_rate,
+ .set_rate = clk_half_divider_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_half_divider_ops);
+
+/**
+ * Register a clock branch.
+ * Most clock branches have a form like
+ *
+ * src1 --|--\
+ * |M |--[GATE]-[DIV]-
+ * src2 --|--/
+ *
+ * sometimes without one of those components.
+ */
+struct clk *rockchip_clk_register_halfdiv(const char *name,
+ const char *const *parent_names,
+ u8 num_parents, void __iomem *base,
+ int muxdiv_offset, u8 mux_shift,
+ u8 mux_width, u8 mux_flags,
+ u8 div_shift, u8 div_width,
+ u8 div_flags, int gate_offset,
+ u8 gate_shift, u8 gate_flags,
+ unsigned long flags,
+ spinlock_t *lock)
+{
+ struct clk *clk;
+ struct clk_mux *mux = NULL;
+ struct clk_gate *gate = NULL;
+ struct clk_divider *div = NULL;
+ const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
+ *gate_ops = NULL;
+
+ if (num_parents > 1) {
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->reg = base + muxdiv_offset;
+ mux->shift = mux_shift;
+ mux->mask = BIT(mux_width) - 1;
+ mux->flags = mux_flags;
+ mux->lock = lock;
+ mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
+ : &clk_mux_ops;
+ }
+
+ if (gate_offset >= 0) {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto err_gate;
+
+ gate->flags = gate_flags;
+ gate->reg = base + gate_offset;
+ gate->bit_idx = gate_shift;
+ gate->lock = lock;
+ gate_ops = &clk_gate_ops;
+ }
+
+ if (div_width > 0) {
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ goto err_div;
+
+ div->flags = div_flags;
+ div->reg = base + muxdiv_offset;
+ div->shift = div_shift;
+ div->width = div_width;
+ div->lock = lock;
+ div_ops = &clk_half_divider_ops;
+ }
+
+ clk = clk_register_composite(NULL, name, parent_names, num_parents,
+ mux ? &mux->hw : NULL, mux_ops,
+ div ? &div->hw : NULL, div_ops,
+ gate ? &gate->hw : NULL, gate_ops,
+ flags);
+
+ return clk;
+err_div:
+ kfree(gate);
+err_gate:
+ kfree(mux);
+ return ERR_PTR(-ENOMEM);
+}
diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c
new file mode 100644
index 000000000000..601a77f1af78
--- /dev/null
+++ b/drivers/clk/rockchip/clk-px30.c
@@ -0,0 +1,1039 @@
+/*
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ * Author: Elaine Zhang<zhangqing@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/px30-cru.h>
+#include "clk.h"
+
+#define PX30_GRF_SOC_STATUS0 0x480
+
+enum px30_plls {
+ apll, dpll, cpll, npll, apll_b_h, apll_b_l,
+};
+
+enum px30_pmu_plls {
+ gpll,
+};
+
+static struct rockchip_pll_rate_table px30_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0),
+ RK3036_PLL_RATE(984000000, 1, 82, 2, 1, 1, 0),
+ RK3036_PLL_RATE(960000000, 1, 80, 2, 1, 1, 0),
+ RK3036_PLL_RATE(936000000, 1, 78, 2, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(900000000, 4, 300, 2, 1, 1, 0),
+ RK3036_PLL_RATE(888000000, 1, 74, 2, 1, 1, 0),
+ RK3036_PLL_RATE(864000000, 1, 72, 2, 1, 1, 0),
+ RK3036_PLL_RATE(840000000, 1, 70, 2, 1, 1, 0),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(800000000, 6, 400, 2, 1, 1, 0),
+ RK3036_PLL_RATE(700000000, 6, 350, 2, 1, 1, 0),
+ RK3036_PLL_RATE(696000000, 1, 58, 2, 1, 1, 0),
+ RK3036_PLL_RATE(624000000, 1, 52, 2, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 75, 3, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 2, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(504000000, 1, 63, 3, 1, 1, 0),
+ RK3036_PLL_RATE(500000000, 6, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 52, 2, 2, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(96000000, 1, 64, 4, 4, 1, 0),
+ { /* sentinel */ },
+};
+
+#define PX30_DIV_ACLKM_MASK 0x7
+#define PX30_DIV_ACLKM_SHIFT 12
+#define PX30_DIV_PCLK_DBG_MASK 0xf
+#define PX30_DIV_PCLK_DBG_SHIFT 8
+
+#define PX30_CLKSEL0(_aclk_core, _pclk_dbg) \
+{ \
+ .reg = PX30_CLKSEL_CON(0), \
+ .val = HIWORD_UPDATE(_aclk_core, PX30_DIV_ACLKM_MASK, \
+ PX30_DIV_ACLKM_SHIFT) | \
+ HIWORD_UPDATE(_pclk_dbg, PX30_DIV_PCLK_DBG_MASK, \
+ PX30_DIV_PCLK_DBG_SHIFT), \
+}
+
+#define PX30_CPUCLK_RATE(_prate, _aclk_core, _pclk_dbg) \
+{ \
+ .prate = _prate, \
+ .divs = { \
+ PX30_CLKSEL0(_aclk_core, _pclk_dbg), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table px30_cpuclk_rates[] __initdata = {
+ PX30_CPUCLK_RATE(1608000000, 1, 7),
+ PX30_CPUCLK_RATE(1584000000, 1, 7),
+ PX30_CPUCLK_RATE(1560000000, 1, 7),
+ PX30_CPUCLK_RATE(1536000000, 1, 7),
+ PX30_CPUCLK_RATE(1512000000, 1, 7),
+ PX30_CPUCLK_RATE(1488000000, 1, 5),
+ PX30_CPUCLK_RATE(1464000000, 1, 5),
+ PX30_CPUCLK_RATE(1440000000, 1, 5),
+ PX30_CPUCLK_RATE(1416000000, 1, 5),
+ PX30_CPUCLK_RATE(1392000000, 1, 5),
+ PX30_CPUCLK_RATE(1368000000, 1, 5),
+ PX30_CPUCLK_RATE(1344000000, 1, 5),
+ PX30_CPUCLK_RATE(1320000000, 1, 5),
+ PX30_CPUCLK_RATE(1296000000, 1, 5),
+ PX30_CPUCLK_RATE(1272000000, 1, 5),
+ PX30_CPUCLK_RATE(1248000000, 1, 5),
+ PX30_CPUCLK_RATE(1224000000, 1, 5),
+ PX30_CPUCLK_RATE(1200000000, 1, 5),
+ PX30_CPUCLK_RATE(1104000000, 1, 5),
+ PX30_CPUCLK_RATE(1008000000, 1, 5),
+ PX30_CPUCLK_RATE(912000000, 1, 5),
+ PX30_CPUCLK_RATE(816000000, 1, 3),
+ PX30_CPUCLK_RATE(696000000, 1, 3),
+ PX30_CPUCLK_RATE(600000000, 1, 3),
+ PX30_CPUCLK_RATE(408000000, 1, 1),
+ PX30_CPUCLK_RATE(312000000, 1, 1),
+ PX30_CPUCLK_RATE(216000000, 1, 1),
+ PX30_CPUCLK_RATE(96000000, 1, 1),
+};
+
+static const struct rockchip_cpuclk_reg_data px30_cpuclk_data = {
+ .core_reg = PX30_CLKSEL_CON(0),
+ .div_core_shift = 0,
+ .div_core_mask = 0xf,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
+ .mux_core_shift = 7,
+ .mux_core_mask = 0x1,
+};
+
+PNAME(mux_pll_p) = { "xin24m"};
+PNAME(mux_usb480m_p) = { "xin24m", "usb480m_phy", "clk_rtc32k_pmu" };
+PNAME(mux_armclk_p) = { "apll_core", "gpll_core" };
+PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" };
+PNAME(mux_ddrstdby_p) = { "clk_ddrphy1x", "clk_stdby_2wrap" };
+PNAME(mux_4plls_p) = { "gpll", "dummy_cpll", "usb480m", "npll" };
+PNAME(mux_cpll_npll_p) = { "cpll", "npll" };
+PNAME(mux_npll_cpll_p) = { "npll", "cpll" };
+PNAME(mux_gpll_cpll_p) = { "gpll", "dummy_cpll" };
+PNAME(mux_gpll_npll_p) = { "gpll", "npll" };
+PNAME(mux_gpll_xin24m_p) = { "gpll", "xin24m"};
+PNAME(mux_gpll_cpll_npll_p) = { "gpll", "dummy_cpll", "npll" };
+PNAME(mux_gpll_cpll_npll_xin24m_p) = { "gpll", "dummy_cpll", "npll", "xin24m" };
+PNAME(mux_gpll_xin24m_npll_p) = { "gpll", "xin24m", "npll"};
+PNAME(mux_pdm_p) = { "clk_pdm_src", "clk_pdm_frac" };
+PNAME(mux_i2s0_tx_p) = { "clk_i2s0_tx_src", "clk_i2s0_tx_frac", "mclk_i2s0_tx_in", "xin12m"};
+PNAME(mux_i2s0_rx_p) = { "clk_i2s0_rx_src", "clk_i2s0_rx_frac", "mclk_i2s0_rx_in", "xin12m"};
+PNAME(mux_i2s1_p) = { "clk_i2s1_src", "clk_i2s1_frac", "i2s1_clkin", "xin12m"};
+PNAME(mux_i2s2_p) = { "clk_i2s2_src", "clk_i2s2_frac", "i2s2_clkin", "xin12m"};
+PNAME(mux_i2s0_tx_out_p) = { "clk_i2s0_tx", "xin12m", "clk_i2s0_rx"};
+PNAME(mux_i2s0_rx_out_p) = { "clk_i2s0_rx", "xin12m", "clk_i2s0_tx"};
+PNAME(mux_i2s1_out_p) = { "clk_i2s1", "xin12m"};
+PNAME(mux_i2s2_out_p) = { "clk_i2s2", "xin12m"};
+PNAME(mux_i2s0_tx_rx_p) = { "clk_i2s0_tx_mux", "clk_i2s0_rx_mux"};
+PNAME(mux_i2s0_rx_tx_p) = { "clk_i2s0_rx_mux", "clk_i2s0_tx_mux"};
+PNAME(mux_uart_src_p) = { "gpll", "xin24m", "usb480m", "npll" };
+PNAME(mux_uart1_p) = { "clk_uart1_src", "clk_uart1_np5", "clk_uart1_frac" };
+PNAME(mux_uart2_p) = { "clk_uart2_src", "clk_uart2_np5", "clk_uart2_frac" };
+PNAME(mux_uart3_p) = { "clk_uart3_src", "clk_uart3_np5", "clk_uart3_frac" };
+PNAME(mux_uart4_p) = { "clk_uart4_src", "clk_uart4_np5", "clk_uart4_frac" };
+PNAME(mux_uart5_p) = { "clk_uart5_src", "clk_uart5_np5", "clk_uart5_frac" };
+PNAME(mux_cif_out_p) = { "xin24m", "dummy_cpll", "npll", "usb480m" };
+PNAME(mux_dclk_vopb_p) = { "dclk_vopb_src", "dclk_vopb_frac", "xin24m" };
+PNAME(mux_dclk_vopl_p) = { "dclk_vopl_src", "dclk_vopl_frac", "xin24m" };
+PNAME(mux_gmac_p) = { "clk_gmac_src", "gmac_clkin" };
+PNAME(mux_gmac_rmii_sel_p) = { "clk_gmac_rx_tx_div20", "clk_gmac_rx_tx_div2" };
+PNAME(mux_rtc32k_pmu_p) = { "xin32k", "pmu_pvtm_32k", "clk_rtc32k_frac", };
+PNAME(mux_wifi_pmu_p) = { "xin24m", "clk_wifi_pmu_src" };
+PNAME(mux_uart0_pmu_p) = { "clk_uart0_pmu_src", "clk_uart0_np5", "clk_uart0_frac" };
+PNAME(mux_usbphy_ref_p) = { "xin24m", "clk_ref24m_pmu" };
+PNAME(mux_mipidsiphy_ref_p) = { "xin24m", "clk_ref24m_pmu" };
+PNAME(mux_gpu_p) = { "clk_gpu_div", "clk_gpu_np5" };
+
+static struct rockchip_pll_clock px30_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3328, PLL_APLL, "apll", mux_pll_p,
+ 0, PX30_PLL_CON(0),
+ PX30_MODE_CON, 0, 0, 0, px30_pll_rates),
+ [dpll] = PLL(pll_rk3328, PLL_DPLL, "dpll", mux_pll_p,
+ 0, PX30_PLL_CON(8),
+ PX30_MODE_CON, 4, 1, 0, NULL),
+ [cpll] = PLL(pll_rk3328, PLL_CPLL, "cpll", mux_pll_p,
+ 0, PX30_PLL_CON(16),
+ PX30_MODE_CON, 2, 2, 0, px30_pll_rates),
+ [npll] = PLL(pll_rk3328, PLL_NPLL, "npll", mux_pll_p,
+ 0, PX30_PLL_CON(24),
+ PX30_MODE_CON, 6, 4, 0, px30_pll_rates),
+};
+
+static struct rockchip_pll_clock px30_pmu_pll_clks[] __initdata = {
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p, 0, PX30_PMU_PLL_CON(0),
+ PX30_PMU_MODE, 0, 3, 0, px30_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch px30_pdm_fracmux __initdata =
+ MUX(0, "clk_pdm_mux", mux_pdm_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(26), 15, 1, MFLAGS);
+
+static struct rockchip_clk_branch px30_i2s0_tx_fracmux __initdata =
+ MUX(0, "clk_i2s0_tx_mux", mux_i2s0_tx_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(28), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_i2s0_rx_fracmux __initdata =
+ MUX(0, "clk_i2s0_rx_mux", mux_i2s0_rx_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(58), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_i2s1_fracmux __initdata =
+ MUX(0, "clk_i2s1_mux", mux_i2s1_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(30), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_i2s2_fracmux __initdata =
+ MUX(0, "clk_i2s2_mux", mux_i2s2_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(32), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart1_fracmux __initdata =
+ MUX(0, "clk_uart1_mux", mux_uart1_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(35), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart2_fracmux __initdata =
+ MUX(0, "clk_uart2_mux", mux_uart2_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(38), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart3_fracmux __initdata =
+ MUX(0, "clk_uart3_mux", mux_uart3_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(41), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart4_fracmux __initdata =
+ MUX(0, "clk_uart4_mux", mux_uart4_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(44), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart5_fracmux __initdata =
+ MUX(0, "clk_uart5_mux", mux_uart5_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(47), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_dclk_vopb_fracmux __initdata =
+ MUX(0, "dclk_vopb_mux", mux_dclk_vopb_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(5), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_dclk_vopl_fracmux __initdata =
+ MUX(0, "dclk_vopl_mux", mux_dclk_vopl_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(8), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_rtc32k_pmu_fracmux __initdata =
+ MUX(SCLK_RTC32K_PMU, "clk_rtc32k_pmu", mux_rtc32k_pmu_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(0), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart0_pmu_fracmux __initdata =
+ MUX(0, "clk_uart0_pmu_mux", mux_uart0_pmu_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(4), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_clk_branches[] __initdata = {
+ /*
+ * Clock-Architecture Diagram 1
+ */
+
+ MUX(USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT,
+ PX30_MODE_CON, 8, 2, MFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
+ /*
+ * Clock-Architecture Diagram 3
+ */
+
+ /* PD_CORE */
+ GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_dbg", "armclk", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(0), 8, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ PX30_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE_NOMUX(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(0), 12, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ PX30_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(0, "aclk_core_niu", "aclk_core", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(0, "aclk_core_prf", "aclk_core", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(17), 5, GFLAGS),
+ GATE(0, "pclk_dbg_niu", "pclk_dbg", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(0, "pclk_core_dbg", "pclk_dbg", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(0, "pclk_core_grf", "pclk_dbg", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(17), 6, GFLAGS),
+
+ GATE(0, "clk_jtag", "jtag_clkin", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(SCLK_PVTM, "clk_pvtm", "xin24m", 0,
+ PX30_CLKGATE_CON(17), 4, GFLAGS),
+
+ /* PD_GPU */
+ COMPOSITE_NODIV(0, "clk_gpu_src", mux_4plls_p, 0,
+ PX30_CLKSEL_CON(1), 6, 2, MFLAGS,
+ PX30_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_NOMUX(0, "clk_gpu_div", "clk_gpu_src", 0,
+ PX30_CLKSEL_CON(1), 0, 4, DFLAGS,
+ PX30_CLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_gpu_np5", "clk_gpu_src", 0,
+ PX30_CLKSEL_CON(1), 8, 4, DFLAGS,
+ PX30_CLKGATE_CON(0), 9, GFLAGS),
+ COMPOSITE_NODIV(SCLK_GPU, "clk_gpu", mux_gpu_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(1), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE_NOMUX(0, "aclk_gpu", "clk_gpu", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(1), 13, 2, DFLAGS,
+ PX30_CLKGATE_CON(17), 10, GFLAGS),
+ GATE(0, "aclk_gpu_niu", "aclk_gpu", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 11, GFLAGS),
+ GATE(0, "aclk_gpu_prf", "aclk_gpu", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(17), 8, GFLAGS),
+ GATE(0, "pclk_gpu_grf", "aclk_gpu", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(17), 9, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 4
+ */
+
+ /* PD_DDR */
+ GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 13, GFLAGS),
+ COMPOSITE_NOGATE(SCLK_DDRCLK, "sclk_ddrc", mux_ddrphy_p, CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(2), 7, 1, MFLAGS, 0, 3, DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+ COMPOSITE_NOGATE(0, "clk_ddrphy4x", mux_ddrphy_p, CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(2), 7, 1, MFLAGS, 0, 3, DFLAGS),
+ FACTOR_GATE(0, "clk_ddrphy1x", "clk_ddrphy4x", CLK_IGNORE_UNUSED, 1, 4,
+ PX30_CLKGATE_CON(0), 14, GFLAGS),
+ FACTOR_GATE(0, "clk_stdby_2wrap", "clk_ddrphy4x", CLK_IGNORE_UNUSED, 1, 4,
+ PX30_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_ddrstdby", mux_ddrstdby_p, CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(2), 4, 1, MFLAGS,
+ PX30_CLKGATE_CON(1), 13, GFLAGS),
+ GATE(0, "aclk_split", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 15, GFLAGS),
+ GATE(0, "clk_msch", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 8, GFLAGS),
+ GATE(0, "aclk_ddrc", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(0, "clk_core_ddrc", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(0, "aclk_cmd_buff", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(0, "clk_ddrmon", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 11, GFLAGS),
+
+ GATE(0, "clk_ddrmon_timer", "xin24m", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 15, GFLAGS),
+
+ COMPOSITE_NOMUX(PCLK_DDR, "pclk_ddr", "gpll", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(2), 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(0, "pclk_ddrmon", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 10, GFLAGS),
+ GATE(0, "pclk_ddrc", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 7, GFLAGS),
+ GATE(0, "pclk_msch", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 9, GFLAGS),
+ GATE(0, "pclk_stdby", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 12, GFLAGS),
+ GATE(0, "pclk_ddr_grf", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 14, GFLAGS),
+ GATE(0, "pclk_cmdbuff", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 3, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 5
+ */
+
+ /* PD_VI */
+ COMPOSITE(ACLK_VI_PRE, "aclk_vi_pre", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(4), 8, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VI_PRE, "hclk_vi_pre", "aclk_vi_pre", 0,
+ PX30_CLKSEL_CON(11), 8, 4, DFLAGS,
+ PX30_CLKGATE_CON(4), 12, GFLAGS),
+ COMPOSITE(SCLK_ISP, "clk_isp", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(12), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(4), 9, GFLAGS),
+ COMPOSITE(SCLK_CIF_OUT, "clk_cif_out", mux_cif_out_p, 0,
+ PX30_CLKSEL_CON(13), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ PX30_CLKGATE_CON(4), 11, GFLAGS),
+ GATE(PCLK_ISP, "pclkin_isp", "ext_pclkin", 0,
+ PX30_CLKGATE_CON(4), 13, GFLAGS),
+ GATE(PCLK_CIF, "pclkin_cif", "ext_pclkin", 0,
+ PX30_CLKGATE_CON(4), 14, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 6
+ */
+
+ /* PD_VO */
+ COMPOSITE(ACLK_VO_PRE, "aclk_vo_pre", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(3), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VO_PRE, "hclk_vo_pre", "aclk_vo_pre", 0,
+ PX30_CLKSEL_CON(3), 8, 4, DFLAGS,
+ PX30_CLKGATE_CON(2), 12, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_VO_PRE, "pclk_vo_pre", "aclk_vo_pre", 0,
+ PX30_CLKSEL_CON(3), 12, 4, DFLAGS,
+ PX30_CLKGATE_CON(2), 13, GFLAGS),
+ COMPOSITE(SCLK_RGA_CORE, "clk_rga_core", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(4), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(2), 1, GFLAGS),
+
+ COMPOSITE(SCLK_VOPB_PWM, "clk_vopb_pwm", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(7), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(2), 5, GFLAGS),
+ COMPOSITE(0, "dclk_vopb_src", mux_cpll_npll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(5), 11, 1, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(2), 2, GFLAGS),
+ COMPOSITE_FRACMUX(0, "dclk_vopb_frac", "dclk_vopb_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(6), 0,
+ PX30_CLKGATE_CON(2), 3, GFLAGS,
+ &px30_dclk_vopb_fracmux),
+ GATE(DCLK_VOPB, "dclk_vopb", "dclk_vopb_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(2), 4, GFLAGS),
+ COMPOSITE(0, "dclk_vopl_src", mux_npll_cpll_p, 0,
+ PX30_CLKSEL_CON(8), 11, 1, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE_FRACMUX(0, "dclk_vopl_frac", "dclk_vopl_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(9), 0,
+ PX30_CLKGATE_CON(2), 7, GFLAGS,
+ &px30_dclk_vopl_fracmux),
+ GATE(DCLK_VOPL, "dclk_vopl", "dclk_vopl_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(2), 8, GFLAGS),
+
+ /* PD_VPU */
+ COMPOSITE(0, "aclk_vpu_pre", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(10), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(4), 0, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_vpu_pre", "aclk_vpu_pre", 0,
+ PX30_CLKSEL_CON(10), 8, 4, DFLAGS,
+ PX30_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(SCLK_CORE_VPU, "sclk_core_vpu", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(13), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(4), 1, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 7
+ */
+
+ COMPOSITE_NODIV(ACLK_PERI_SRC, "aclk_peri_src", mux_gpll_cpll_p, 0,
+ PX30_CLKSEL_CON(14), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(5), 7, GFLAGS),
+ COMPOSITE_NOMUX(ACLK_PERI_PRE, "aclk_peri_pre", "aclk_peri_src", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(14), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(5), 8, GFLAGS),
+ DIV(HCLK_PERI_PRE, "hclk_peri_pre", "aclk_peri_src", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(14), 8, 5, DFLAGS),
+
+ /* PD_MMC_NAND */
+ GATE(HCLK_MMC_NAND, "hclk_mmc_nand", "hclk_peri_pre", 0,
+ PX30_CLKGATE_CON(6), 0, GFLAGS),
+ COMPOSITE(SCLK_NANDC, "clk_nandc", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(5), 13, GFLAGS),
+
+ COMPOSITE(SCLK_SDIO, "clk_sdio", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(18), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 3, GFLAGS),
+
+ COMPOSITE(SCLK_EMMC, "clk_emmc", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(20), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 6, GFLAGS),
+
+ COMPOSITE(SCLK_SFC, "clk_sfc", mux_gpll_cpll_p, 0,
+ PX30_CLKSEL_CON(22), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(6), 7, GFLAGS),
+
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc",
+ PX30_SDMMC_CON0, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc",
+ PX30_SDMMC_CON1, 1),
+
+ MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio",
+ PX30_SDIO_CON0, 1),
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio",
+ PX30_SDIO_CON1, 1),
+
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc",
+ PX30_EMMC_CON0, 1),
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc",
+ PX30_EMMC_CON1, 1),
+
+ /* PD_SDCARD */
+ GATE(0, "hclk_sdmmc_pre", "hclk_peri_pre", 0,
+ PX30_CLKGATE_CON(6), 12, GFLAGS),
+ COMPOSITE(SCLK_SDMMC, "clk_sdmmc", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(16), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 15, GFLAGS),
+
+ /* PD_USB */
+ GATE(HCLK_USB, "hclk_usb", "hclk_peri_pre", 0,
+ PX30_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(SCLK_OTG_ADP, "clk_otg_adp", "clk_rtc32k_pmu", 0,
+ PX30_CLKGATE_CON(7), 3, GFLAGS),
+
+ /* PD_GMAC */
+ COMPOSITE(SCLK_GMAC_SRC, "clk_gmac_src", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(22), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(7), 11, GFLAGS),
+ MUX(SCLK_GMAC, "clk_gmac", mux_gmac_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(23), 6, 1, MFLAGS),
+ GATE(SCLK_MAC_REF, "clk_mac_ref", "clk_gmac", 0,
+ PX30_CLKGATE_CON(7), 15, GFLAGS),
+ GATE(SCLK_GMAC_RX_TX, "clk_gmac_rx_tx", "clk_gmac", 0,
+ PX30_CLKGATE_CON(7), 13, GFLAGS),
+ FACTOR(0, "clk_gmac_rx_tx_div2", "clk_gmac_rx_tx", 0, 1, 2),
+ FACTOR(0, "clk_gmac_rx_tx_div20", "clk_gmac_rx_tx", 0, 1, 20),
+ MUX(SCLK_GMAC_RMII, "clk_gmac_rmii_sel", mux_gmac_rmii_sel_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(23), 7, 1, MFLAGS),
+
+ GATE(0, "aclk_gmac_pre", "aclk_peri_pre", 0,
+ PX30_CLKGATE_CON(7), 10, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_gmac_pre", "aclk_gmac_pre", 0,
+ PX30_CLKSEL_CON(23), 0, 4, DFLAGS,
+ PX30_CLKGATE_CON(7), 12, GFLAGS),
+
+ COMPOSITE(SCLK_MAC_OUT, "clk_mac_out", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 5, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 8
+ */
+
+ /* PD_BUS */
+ COMPOSITE_NODIV(ACLK_BUS_SRC, "aclk_bus_src", mux_gpll_cpll_p, CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(23), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(8), 6, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_BUS_PRE, "hclk_bus_pre", "aclk_bus_src", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(24), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 8, GFLAGS),
+ COMPOSITE_NOMUX(ACLK_BUS_PRE, "aclk_bus_pre", "aclk_bus_src", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(23), 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 7, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_BUS_PRE, "pclk_bus_pre", "aclk_bus_pre", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(24), 8, 2, DFLAGS,
+ PX30_CLKGATE_CON(8), 9, GFLAGS),
+ GATE(0, "pclk_top_pre", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(8), 10, GFLAGS),
+
+ COMPOSITE(0, "clk_pdm_src", mux_gpll_xin24m_npll_p, 0,
+ PX30_CLKSEL_CON(26), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(9), 9, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_pdm_frac", "clk_pdm_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(27), 0,
+ PX30_CLKGATE_CON(9), 10, GFLAGS,
+ &px30_pdm_fracmux),
+ GATE(SCLK_PDM, "clk_pdm", "clk_pdm_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(9), 11, GFLAGS),
+
+ COMPOSITE(0, "clk_i2s0_tx_src", mux_gpll_npll_p, 0,
+ PX30_CLKSEL_CON(28), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(9), 12, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s0_tx_frac", "clk_i2s0_tx_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(29), 0,
+ PX30_CLKGATE_CON(9), 13, GFLAGS,
+ &px30_i2s0_tx_fracmux),
+ COMPOSITE_NODIV(SCLK_I2S0_TX, "clk_i2s0_tx", mux_i2s0_tx_rx_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(28), 12, 1, MFLAGS,
+ PX30_CLKGATE_CON(9), 14, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_i2s0_tx_out_pre", mux_i2s0_tx_out_p, 0,
+ PX30_CLKSEL_CON(28), 14, 2, MFLAGS,
+ PX30_CLKGATE_CON(9), 15, GFLAGS),
+ GATE(SCLK_I2S0_TX_OUT, "clk_i2s0_tx_out", "clk_i2s0_tx_out_pre", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 8, CLK_GATE_HIWORD_MASK),
+
+ COMPOSITE(0, "clk_i2s0_rx_src", mux_gpll_npll_p, 0,
+ PX30_CLKSEL_CON(58), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(17), 0, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s0_rx_frac", "clk_i2s0_rx_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(59), 0,
+ PX30_CLKGATE_CON(17), 1, GFLAGS,
+ &px30_i2s0_rx_fracmux),
+ COMPOSITE_NODIV(SCLK_I2S0_RX, "clk_i2s0_rx", mux_i2s0_rx_tx_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(58), 12, 1, MFLAGS,
+ PX30_CLKGATE_CON(17), 2, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_i2s0_rx_out_pre", mux_i2s0_rx_out_p, 0,
+ PX30_CLKSEL_CON(58), 14, 2, MFLAGS,
+ PX30_CLKGATE_CON(17), 3, GFLAGS),
+ GATE(SCLK_I2S0_RX_OUT, "clk_i2s0_rx_out", "clk_i2s0_rx_out_pre", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 11, CLK_GATE_HIWORD_MASK),
+
+ COMPOSITE(0, "clk_i2s1_src", mux_gpll_npll_p, 0,
+ PX30_CLKSEL_CON(30), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(10), 0, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s1_frac", "clk_i2s1_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(31), 0,
+ PX30_CLKGATE_CON(10), 1, GFLAGS,
+ &px30_i2s1_fracmux),
+ GATE(SCLK_I2S1, "clk_i2s1", "clk_i2s1_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 2, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_i2s1_out_pre", mux_i2s1_out_p, 0,
+ PX30_CLKSEL_CON(30), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(10), 3, GFLAGS),
+ GATE(SCLK_I2S1_OUT, "clk_i2s1_out", "clk_i2s1_out_pre", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 9, CLK_GATE_HIWORD_MASK),
+
+ COMPOSITE(0, "clk_i2s2_src", mux_gpll_npll_p, 0,
+ PX30_CLKSEL_CON(32), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(10), 4, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s2_frac", "clk_i2s2_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(33), 0,
+ PX30_CLKGATE_CON(10), 5, GFLAGS,
+ &px30_i2s2_fracmux),
+ GATE(SCLK_I2S2, "clk_i2s2", "clk_i2s2_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 6, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_i2s2_out_pre", mux_i2s2_out_p, 0,
+ PX30_CLKSEL_CON(32), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(SCLK_I2S2_OUT, "clk_i2s2_out", "clk_i2s2_out_pre", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 10, CLK_GATE_HIWORD_MASK),
+
+ COMPOSITE(SCLK_UART1_SRC, "clk_uart1_src", mux_uart_src_p, CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(34), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(10), 12, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart1_np5", "clk_uart1_src", 0,
+ PX30_CLKSEL_CON(35), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(10), 13, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart1_frac", "clk_uart1_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(36), 0,
+ PX30_CLKGATE_CON(10), 14, GFLAGS,
+ &px30_uart1_fracmux),
+ GATE(SCLK_UART1, "clk_uart1", "clk_uart1_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 15, GFLAGS),
+
+ COMPOSITE(SCLK_UART2_SRC, "clk_uart2_src", mux_uart_src_p, 0,
+ PX30_CLKSEL_CON(37), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 0, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart2_np5", "clk_uart2_src", 0,
+ PX30_CLKSEL_CON(38), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 1, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart2_frac", "clk_uart2_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(39), 0,
+ PX30_CLKGATE_CON(11), 2, GFLAGS,
+ &px30_uart2_fracmux),
+ GATE(SCLK_UART2, "clk_uart2", "clk_uart2_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(11), 3, GFLAGS),
+
+ COMPOSITE(0, "clk_uart3_src", mux_uart_src_p, 0,
+ PX30_CLKSEL_CON(40), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 4, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart3_np5", "clk_uart3_src", 0,
+ PX30_CLKSEL_CON(41), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 5, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart3_frac", "clk_uart3_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(42), 0,
+ PX30_CLKGATE_CON(11), 6, GFLAGS,
+ &px30_uart3_fracmux),
+ GATE(SCLK_UART3, "clk_uart3", "clk_uart3_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(11), 7, GFLAGS),
+
+ COMPOSITE(0, "clk_uart4_src", mux_uart_src_p, 0,
+ PX30_CLKSEL_CON(43), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 8, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart4_np5", "clk_uart4_src", 0,
+ PX30_CLKSEL_CON(44), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 9, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart4_frac", "clk_uart4_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(45), 0,
+ PX30_CLKGATE_CON(11), 10, GFLAGS,
+ &px30_uart4_fracmux),
+ GATE(SCLK_UART4, "clk_uart4", "clk_uart4_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(11), 11, GFLAGS),
+
+ COMPOSITE(0, "clk_uart5_src", mux_uart_src_p, 0,
+ PX30_CLKSEL_CON(46), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 12, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart5_np5", "clk_uart5_src", 0,
+ PX30_CLKSEL_CON(47), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 13, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart5_frac", "clk_uart5_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(48), 0,
+ PX30_CLKGATE_CON(11), 14, GFLAGS,
+ &px30_uart5_fracmux),
+ GATE(SCLK_UART5, "clk_uart5", "clk_uart5_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(11), 15, GFLAGS),
+
+ COMPOSITE(SCLK_I2C0, "clk_i2c0", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(49), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 0, GFLAGS),
+ COMPOSITE(SCLK_I2C1, "clk_i2c1", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(49), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 1, GFLAGS),
+ COMPOSITE(SCLK_I2C2, "clk_i2c2", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(50), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 2, GFLAGS),
+ COMPOSITE(SCLK_I2C3, "clk_i2c3", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(50), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 3, GFLAGS),
+ COMPOSITE(SCLK_PWM0, "clk_pwm0", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(52), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 5, GFLAGS),
+ COMPOSITE(SCLK_PWM1, "clk_pwm1", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(52), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 6, GFLAGS),
+ COMPOSITE(SCLK_SPI0, "clk_spi0", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(53), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 7, GFLAGS),
+ COMPOSITE(SCLK_SPI1, "clk_spi1", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(53), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 8, GFLAGS),
+
+ GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 0, GFLAGS),
+ GATE(SCLK_TIMER1, "sclk_timer1", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(SCLK_TIMER2, "sclk_timer2", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 2, GFLAGS),
+ GATE(SCLK_TIMER3, "sclk_timer3", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 3, GFLAGS),
+ GATE(SCLK_TIMER4, "sclk_timer4", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 4, GFLAGS),
+ GATE(SCLK_TIMER5, "sclk_timer5", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 5, GFLAGS),
+
+ COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "xin24m", 0,
+ PX30_CLKSEL_CON(54), 0, 11, DFLAGS,
+ PX30_CLKGATE_CON(12), 9, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_SARADC, "clk_saradc", "xin24m", 0,
+ PX30_CLKSEL_CON(55), 0, 11, DFLAGS,
+ PX30_CLKGATE_CON(12), 10, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_OTP, "clk_otp", "xin24m", 0,
+ PX30_CLKSEL_CON(56), 0, 3, DFLAGS,
+ PX30_CLKGATE_CON(12), 11, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_OTP_USR, "clk_otp_usr", "clk_otp", 0,
+ PX30_CLKSEL_CON(56), 4, 2, DFLAGS,
+ PX30_CLKGATE_CON(13), 6, GFLAGS),
+
+ GATE(0, "clk_cpu_boost", "xin24m", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(12), 12, GFLAGS),
+
+ /* PD_CRYPTO */
+ GATE(0, "aclk_crypto_pre", "aclk_bus_pre", 0,
+ PX30_CLKGATE_CON(8), 12, GFLAGS),
+ GATE(0, "hclk_crypto_pre", "hclk_bus_pre", 0,
+ PX30_CLKGATE_CON(8), 13, GFLAGS),
+ COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 14, GFLAGS),
+ COMPOSITE(SCLK_CRYPTO_APK, "clk_crypto_apk", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(25), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 15, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 9
+ */
+
+ /* PD_BUS_TOP */
+ GATE(0, "pclk_top_niu", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 0, GFLAGS),
+ GATE(0, "pclk_top_cru", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 1, GFLAGS),
+ GATE(PCLK_OTP_PHY, "pclk_otp_phy", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 2, GFLAGS),
+ GATE(0, "pclk_ddrphy", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 3, GFLAGS),
+ GATE(PCLK_MIPIDSIPHY, "pclk_mipidsiphy", "pclk_top_pre", 0, PX30_CLKGATE_CON(16), 4, GFLAGS),
+ GATE(PCLK_MIPICSIPHY, "pclk_mipicsiphy", "pclk_top_pre", 0, PX30_CLKGATE_CON(16), 5, GFLAGS),
+ GATE(PCLK_USB_GRF, "pclk_usb_grf", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 6, GFLAGS),
+ GATE(0, "pclk_cpu_hoost", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 7, GFLAGS),
+
+ /* PD_VI */
+ GATE(0, "aclk_vi_niu", "aclk_vi_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(4), 15, GFLAGS),
+ GATE(ACLK_CIF, "aclk_cif", "aclk_vi_pre", 0, PX30_CLKGATE_CON(5), 1, GFLAGS),
+ GATE(ACLK_ISP, "aclk_isp", "aclk_vi_pre", 0, PX30_CLKGATE_CON(5), 3, GFLAGS),
+ GATE(0, "hclk_vi_niu", "hclk_vi_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(5), 0, GFLAGS),
+ GATE(HCLK_CIF, "hclk_cif", "hclk_vi_pre", 0, PX30_CLKGATE_CON(5), 2, GFLAGS),
+ GATE(HCLK_ISP, "hclk_isp", "hclk_vi_pre", 0, PX30_CLKGATE_CON(5), 4, GFLAGS),
+
+ /* PD_VO */
+ GATE(0, "aclk_vo_niu", "aclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 0, GFLAGS),
+ GATE(ACLK_VOPB, "aclk_vopb", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 3, GFLAGS),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 7, GFLAGS),
+ GATE(ACLK_VOPL, "aclk_vopl", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 5, GFLAGS),
+
+ GATE(0, "hclk_vo_niu", "hclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 1, GFLAGS),
+ GATE(HCLK_VOPB, "hclk_vopb", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 4, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 8, GFLAGS),
+ GATE(HCLK_VOPL, "hclk_vopl", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 6, GFLAGS),
+
+ GATE(0, "pclk_vo_niu", "pclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 2, GFLAGS),
+ GATE(PCLK_MIPI_DSI, "pclk_mipi_dsi", "pclk_vo_pre", 0, PX30_CLKGATE_CON(3), 9, GFLAGS),
+
+ /* PD_BUS */
+ GATE(0, "aclk_bus_niu", "aclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 8, GFLAGS),
+ GATE(0, "aclk_intmem", "aclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 11, GFLAGS),
+ GATE(ACLK_GIC, "aclk_gic", "aclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 12, GFLAGS),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_bus_pre", 0, PX30_CLKGATE_CON(13), 15, GFLAGS),
+
+ GATE(0, "hclk_bus_niu", "hclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 9, GFLAGS),
+ GATE(0, "hclk_rom", "hclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 14, GFLAGS),
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 1, GFLAGS),
+ GATE(HCLK_I2S0, "hclk_i2s0", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 2, GFLAGS),
+ GATE(HCLK_I2S1, "hclk_i2s1", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 3, GFLAGS),
+ GATE(HCLK_I2S2, "hclk_i2s2", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 4, GFLAGS),
+
+ GATE(0, "pclk_bus_niu", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 10, GFLAGS),
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 0, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 5, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 6, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 7, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 8, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 9, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 10, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 11, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 12, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 13, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 14, GFLAGS),
+ GATE(PCLK_PWM0, "pclk_pwm0", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 15, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 0, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 1, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 2, GFLAGS),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 3, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 4, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 5, GFLAGS),
+ GATE(PCLK_OTP_NS, "pclk_otp_ns", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(15), 6, GFLAGS),
+ GATE(PCLK_WDT_NS, "pclk_wdt_ns", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(15), 7, GFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 8, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 9, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 10, GFLAGS),
+ GATE(0, "pclk_grf", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(15), 11, GFLAGS),
+ GATE(0, "pclk_sgrf", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(15), 12, GFLAGS),
+
+ /* PD_VPU */
+ GATE(0, "hclk_vpu_niu", "hclk_vpu_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(4), 7, GFLAGS),
+ GATE(HCLK_VPU, "hclk_vpu", "hclk_vpu_pre", 0, PX30_CLKGATE_CON(4), 6, GFLAGS),
+ GATE(0, "aclk_vpu_niu", "aclk_vpu_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(4), 5, GFLAGS),
+ GATE(ACLK_VPU, "aclk_vpu", "aclk_vpu_pre", 0, PX30_CLKGATE_CON(4), 4, GFLAGS),
+
+ /* PD_CRYPTO */
+ GATE(0, "hclk_crypto_niu", "hclk_crypto_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(9), 3, GFLAGS),
+ GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_crypto_pre", 0, PX30_CLKGATE_CON(9), 5, GFLAGS),
+ GATE(0, "aclk_crypto_niu", "aclk_crypto_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_crypto_pre", 0, PX30_CLKGATE_CON(9), 4, GFLAGS),
+
+ /* PD_SDCARD */
+ GATE(0, "hclk_sdmmc_niu", "hclk_sdmmc_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(7), 0, GFLAGS),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_sdmmc_pre", 0, PX30_CLKGATE_CON(7), 1, GFLAGS),
+
+ /* PD_PERI */
+ GATE(0, "aclk_peri_niu", "aclk_peri_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(5), 9, GFLAGS),
+
+ /* PD_MMC_NAND */
+ GATE(HCLK_NANDC, "hclk_nandc", "hclk_mmc_nand", 0, PX30_CLKGATE_CON(5), 15, GFLAGS),
+ GATE(0, "hclk_mmc_nand_niu", "hclk_mmc_nand", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(6), 8, GFLAGS),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_mmc_nand", 0, PX30_CLKGATE_CON(6), 9, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_mmc_nand", 0, PX30_CLKGATE_CON(6), 10, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "hclk_mmc_nand", 0, PX30_CLKGATE_CON(6), 11, GFLAGS),
+
+ /* PD_USB */
+ GATE(0, "hclk_usb_niu", "hclk_usb", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(HCLK_OTG, "hclk_otg", "hclk_usb", 0, PX30_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(HCLK_HOST, "hclk_host", "hclk_usb", 0, PX30_CLKGATE_CON(7), 6, GFLAGS),
+ GATE(HCLK_HOST_ARB, "hclk_host_arb", "hclk_usb", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(7), 8, GFLAGS),
+
+ /* PD_GMAC */
+ GATE(0, "aclk_gmac_niu", "aclk_gmac_pre", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(8), 0, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_gmac_pre", 0,
+ PX30_CLKGATE_CON(8), 2, GFLAGS),
+ GATE(0, "pclk_gmac_niu", "pclk_gmac_pre", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(8), 1, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_gmac_pre", 0,
+ PX30_CLKGATE_CON(8), 3, GFLAGS),
+};
+
+static struct rockchip_clk_branch px30_clk_pmu_branches[] __initdata = {
+ /*
+ * Clock-Architecture Diagram 2
+ */
+
+ COMPOSITE_FRACMUX(0, "clk_rtc32k_frac", "xin24m", CLK_IGNORE_UNUSED,
+ PX30_PMU_CLKSEL_CON(1), 0,
+ PX30_PMU_CLKGATE_CON(0), 13, GFLAGS,
+ &px30_rtc32k_pmu_fracmux),
+
+ COMPOSITE_NOMUX(XIN24M_DIV, "xin24m_div", "xin24m", CLK_IGNORE_UNUSED,
+ PX30_PMU_CLKSEL_CON(0), 8, 5, DFLAGS,
+ PX30_PMU_CLKGATE_CON(0), 12, GFLAGS),
+
+ COMPOSITE_NOMUX(0, "clk_wifi_pmu_src", "gpll", 0,
+ PX30_PMU_CLKSEL_CON(2), 8, 6, DFLAGS,
+ PX30_PMU_CLKGATE_CON(0), 14, GFLAGS),
+ COMPOSITE_NODIV(SCLK_WIFI_PMU, "clk_wifi_pmu", mux_wifi_pmu_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(2), 15, 1, MFLAGS,
+ PX30_PMU_CLKGATE_CON(0), 15, GFLAGS),
+
+ COMPOSITE(0, "clk_uart0_pmu_src", mux_uart_src_p, 0,
+ PX30_PMU_CLKSEL_CON(3), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart0_np5", "clk_uart0_pmu_src", 0,
+ PX30_PMU_CLKSEL_CON(4), 0, 5, DFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart0_frac", "clk_uart0_pmu_src", CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(5), 0,
+ PX30_PMU_CLKGATE_CON(1), 2, GFLAGS,
+ &px30_uart0_pmu_fracmux),
+ GATE(SCLK_UART0_PMU, "clk_uart0_pmu", "clk_uart0_pmu_mux", CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKGATE_CON(1), 3, GFLAGS),
+
+ GATE(SCLK_PVTM_PMU, "clk_pvtm_pmu", "xin24m", 0,
+ PX30_PMU_CLKGATE_CON(1), 4, GFLAGS),
+
+ COMPOSITE_NOMUX(PCLK_PMU_PRE, "pclk_pmu_pre", "gpll", 0,
+ PX30_PMU_CLKSEL_CON(0), 0, 5, DFLAGS,
+ PX30_PMU_CLKGATE_CON(0), 0, GFLAGS),
+
+ COMPOSITE_NOMUX(SCLK_REF24M_PMU, "clk_ref24m_pmu", "gpll", 0,
+ PX30_PMU_CLKSEL_CON(2), 0, 6, DFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE_NODIV(SCLK_USBPHY_REF, "clk_usbphy_ref", mux_usbphy_ref_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(2), 6, 1, MFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 9, GFLAGS),
+ COMPOSITE_NODIV(SCLK_MIPIDSIPHY_REF, "clk_mipidsiphy_ref", mux_mipidsiphy_ref_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(2), 7, 1, MFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 10, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 9
+ */
+
+ /* PD_PMU */
+ GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(0, "pclk_pmu_sgrf", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(0, "pclk_pmu_grf", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(0, "pclk_pmu_mem", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_GPIO0_PMU, "pclk_gpio0_pmu", "pclk_pmu_pre", 0, PX30_PMU_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(PCLK_UART0_PMU, "pclk_uart0_pmu", "pclk_pmu_pre", 0, PX30_PMU_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(0, "pclk_cru_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 8, GFLAGS),
+};
+
+static const char *const px30_pmucru_critical_clocks[] __initconst = {
+ "aclk_bus_pre",
+ "pclk_bus_pre",
+ "hclk_bus_pre",
+ "aclk_peri_pre",
+ "hclk_peri_pre",
+ "aclk_gpu_niu",
+ "pclk_top_pre",
+ "pclk_pmu_pre",
+ "hclk_usb_niu",
+ "pll_npll",
+ "usb480m",
+ "clk_uart2",
+ "pclk_uart2",
+};
+
+static void __init px30_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+ struct clk *clk;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ /* aclk_dmac is controlled by sgrf_soc_con1[11]. */
+ clk = clk_register_fixed_factor(NULL, "aclk_dmac", "aclk_bus_pre", 0, 1, 1);
+ if (IS_ERR(clk))
+ pr_warn("%s: could not register clock aclk_dmac: %ld\n",
+ __func__, PTR_ERR(clk));
+ else
+ rockchip_clk_add_lookup(ctx, clk, ACLK_DMAC);
+
+ rockchip_clk_register_plls(ctx, px30_pll_clks,
+ ARRAY_SIZE(px30_pll_clks),
+ PX30_GRF_SOC_STATUS0);
+ rockchip_clk_register_branches(ctx, px30_clk_branches,
+ ARRAY_SIZE(px30_clk_branches));
+
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
+ mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
+ &px30_cpuclk_data, px30_cpuclk_rates,
+ ARRAY_SIZE(px30_cpuclk_rates));
+
+ rockchip_register_softrst(np, 12, reg_base + PX30_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_register_restart_notifier(ctx, PX30_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE(px30_cru, "rockchip,px30-cru", px30_clk_init);
+
+static void __init px30_pmu_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru pmu region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip pmu clk init failed\n", __func__);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, px30_pmu_pll_clks,
+ ARRAY_SIZE(px30_pmu_pll_clks), PX30_GRF_SOC_STATUS0);
+
+ rockchip_clk_register_branches(ctx, px30_clk_pmu_branches,
+ ARRAY_SIZE(px30_clk_pmu_branches));
+
+ rockchip_clk_protect_critical(px30_pmucru_critical_clocks,
+ ARRAY_SIZE(px30_pmucru_critical_clocks));
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE(px30_cru_pmu, "rockchip,px30-pmucru", px30_pmu_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index bca10d618f0a..5a628148f3f0 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -631,7 +631,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT,
RK3399_CLKSEL_CON(31), 0, 2, MFLAGS),
COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT,
- RK3399_CLKSEL_CON(30), 8, 2, MFLAGS,
+ RK3399_CLKSEL_CON(31), 2, 1, MFLAGS,
RK3399_CLKGATE_CON(8), 12, GFLAGS),
/* uart */
@@ -1523,6 +1523,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
"pclk_pmu_src",
"fclk_cm0s_src_pmu",
"clk_timer_src_pmu",
+ "pclk_rkpwm_pmu",
};
static void __init rk3399_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 326b3fa44f5d..c3ad92965823 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -492,6 +492,16 @@ void __init rockchip_clk_register_branches(
list->gate_flags, flags, list->child,
&ctx->lock);
break;
+ case branch_half_divider:
+ clk = rockchip_clk_register_halfdiv(list->name,
+ list->parent_names, list->num_parents,
+ ctx->reg_base, list->muxdiv_offset,
+ list->mux_shift, list->mux_width,
+ list->mux_flags, list->div_shift,
+ list->div_width, list->div_flags,
+ list->gate_offset, list->gate_shift,
+ list->gate_flags, flags, &ctx->lock);
+ break;
case branch_gate:
flags |= CLK_SET_RATE_PARENT;
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index ef601dded32c..6b53fff4cc96 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -34,7 +34,46 @@ struct clk;
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
-/* register positions shared by RV1108, RK2928, RK3036, RK3066, RK3188 and RK3228 */
+/* register positions shared by PX30, RV1108, RK2928, RK3036, RK3066, RK3188 and RK3228 */
+#define BOOST_PLL_H_CON(x) ((x) * 0x4)
+#define BOOST_CLK_CON 0x0008
+#define BOOST_BOOST_CON 0x000c
+#define BOOST_SWITCH_CNT 0x0010
+#define BOOST_HIGH_PERF_CNT0 0x0014
+#define BOOST_HIGH_PERF_CNT1 0x0018
+#define BOOST_STATIS_THRESHOLD 0x001c
+#define BOOST_SHORT_SWITCH_CNT 0x0020
+#define BOOST_SWITCH_THRESHOLD 0x0024
+#define BOOST_FSM_STATUS 0x0028
+#define BOOST_PLL_L_CON(x) ((x) * 0x4 + 0x2c)
+#define BOOST_RECOVERY_MASK 0x1
+#define BOOST_RECOVERY_SHIFT 1
+#define BOOST_SW_CTRL_MASK 0x1
+#define BOOST_SW_CTRL_SHIFT 2
+#define BOOST_LOW_FREQ_EN_MASK 0x1
+#define BOOST_LOW_FREQ_EN_SHIFT 3
+#define BOOST_BUSY_STATE BIT(8)
+
+#define PX30_PLL_CON(x) ((x) * 0x4)
+#define PX30_CLKSEL_CON(x) ((x) * 0x4 + 0x100)
+#define PX30_CLKGATE_CON(x) ((x) * 0x4 + 0x200)
+#define PX30_GLB_SRST_FST 0xb8
+#define PX30_GLB_SRST_SND 0xbc
+#define PX30_SOFTRST_CON(x) ((x) * 0x4 + 0x300)
+#define PX30_MODE_CON 0xa0
+#define PX30_MISC_CON 0xa4
+#define PX30_SDMMC_CON0 0x380
+#define PX30_SDMMC_CON1 0x384
+#define PX30_SDIO_CON0 0x388
+#define PX30_SDIO_CON1 0x38c
+#define PX30_EMMC_CON0 0x390
+#define PX30_EMMC_CON1 0x394
+
+#define PX30_PMU_PLL_CON(x) ((x) * 0x4)
+#define PX30_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x40)
+#define PX30_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x80)
+#define PX30_PMU_MODE 0x0020
+
#define RV1108_PLL_CON(x) ((x) * 0x4)
#define RV1108_CLKSEL_CON(x) ((x) * 0x4 + 0x60)
#define RV1108_CLKGATE_CON(x) ((x) * 0x4 + 0x120)
@@ -354,6 +393,7 @@ enum rockchip_clk_branch_type {
branch_inverter,
branch_factor,
branch_ddrclk,
+ branch_half_divider,
};
struct rockchip_clk_branch {
@@ -684,6 +724,79 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define COMPOSITE_HALFDIV(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\
+ df, go, gs, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_half_divider, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .mux_shift = ms, \
+ .mux_width = mw, \
+ .mux_flags = mf, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .gate_offset = go, \
+ .gate_shift = gs, \
+ .gate_flags = gf, \
+ }
+
+#define COMPOSITE_NOGATE_HALFDIV(_id, cname, pnames, f, mo, ms, mw, mf, \
+ ds, dw, df) \
+ { \
+ .id = _id, \
+ .branch_type = branch_half_divider, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .mux_shift = ms, \
+ .mux_width = mw, \
+ .mux_flags = mf, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .gate_offset = -1, \
+ }
+
+#define COMPOSITE_NOMUX_HALFDIV(_id, cname, pname, f, mo, ds, dw, df, \
+ go, gs, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_half_divider, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .gate_offset = go, \
+ .gate_shift = gs, \
+ .gate_flags = gf, \
+ }
+
+#define DIV_HALF(_id, cname, pname, f, o, s, w, df) \
+ { \
+ .id = _id, \
+ .branch_type = branch_half_divider, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .muxdiv_offset = o, \
+ .div_shift = s, \
+ .div_width = w, \
+ .div_flags = df, \
+ .gate_offset = -1, \
+ }
+
struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
void __iomem *base, unsigned long nr_clks);
void rockchip_clk_of_add_provider(struct device_node *np,
@@ -708,6 +821,17 @@ void rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
#define ROCKCHIP_SOFTRST_HIWORD_MASK BIT(0)
+struct clk *rockchip_clk_register_halfdiv(const char *name,
+ const char *const *parent_names,
+ u8 num_parents, void __iomem *base,
+ int muxdiv_offset, u8 mux_shift,
+ u8 mux_width, u8 mux_flags,
+ u8 div_shift, u8 div_width,
+ u8 div_flags, int gate_offset,
+ u8 gate_shift, u8 gate_flags,
+ unsigned long flags,
+ spinlock_t *lock);
+
#ifdef CONFIG_RESET_CONTROLLER
void rockchip_register_softrst(struct device_node *np,
unsigned int num_regs,
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 513826393158..1a4e6b787978 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5-subcmu.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
-obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c
index d5f1ccb36300..cfaa057035ad 100644
--- a/drivers/clk/samsung/clk-exynos4412-isp.c
+++ b/drivers/clk/samsung/clk-exynos4412-isp.c
@@ -37,8 +37,6 @@ static const unsigned long exynos4x12_clk_isp_save[] __initconst = {
E4X12_GATE_ISP1,
};
-PNAME(mout_user_aclk400_mcuisp_p4x12) = { "fin_pll", "div_aclk400_mcuisp", };
-
static struct samsung_div_clock exynos4x12_isp_div_clks[] = {
DIV(CLK_ISP_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3),
DIV(CLK_ISP_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3),
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
deleted file mode 100644
index b08bd54c5e76..000000000000
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * Author: Thomas Abraham <thomas.ab@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Common Clock Framework support for Exynos5440 SoC.
-*/
-
-#include <dt-bindings/clock/exynos5440.h>
-#include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-
-#include "clk.h"
-#include "clk-pll.h"
-
-#define CLKEN_OV_VAL 0xf8
-#define CPU_CLK_STATUS 0xfc
-#define MISC_DOUT1 0x558
-
-static void __iomem *reg_base;
-
-/* parent clock name list */
-PNAME(mout_armclk_p) = { "cplla", "cpllb" };
-PNAME(mout_spi_p) = { "div125", "div200" };
-
-/* fixed rate clocks generated outside the soc */
-static struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initdata = {
- FRATE(0, "xtal", NULL, 0, 0),
-};
-
-/* fixed rate clocks */
-static const struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initconst = {
- FRATE(0, "ppll", NULL, 0, 1000000000),
- FRATE(0, "usb_phy0", NULL, 0, 60000000),
- FRATE(0, "usb_phy1", NULL, 0, 60000000),
- FRATE(0, "usb_ohci12", NULL, 0, 12000000),
- FRATE(0, "usb_ohci48", NULL, 0, 48000000),
-};
-
-/* fixed factor clocks */
-static const struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initconst = {
- FFACTOR(0, "div250", "ppll", 1, 4, 0),
- FFACTOR(0, "div200", "ppll", 1, 5, 0),
- FFACTOR(0, "div125", "div250", 1, 2, 0),
-};
-
-/* mux clocks */
-static const struct samsung_mux_clock exynos5440_mux_clks[] __initconst = {
- MUX(0, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
- MUX(CLK_ARM_CLK, "arm_clk", mout_armclk_p, CPU_CLK_STATUS, 0, 1),
-};
-
-/* divider clocks */
-static const struct samsung_div_clock exynos5440_div_clks[] __initconst = {
- DIV(CLK_SPI_BAUD, "div_spi", "mout_spi", MISC_DOUT1, 3, 2),
-};
-
-/* gate clocks */
-static const struct samsung_gate_clock exynos5440_gate_clks[] __initconst = {
- GATE(CLK_PB0_250, "pb0_250", "div250", CLKEN_OV_VAL, 3, 0, 0),
- GATE(CLK_PR0_250, "pr0_250", "div250", CLKEN_OV_VAL, 4, 0, 0),
- GATE(CLK_PR1_250, "pr1_250", "div250", CLKEN_OV_VAL, 5, 0, 0),
- GATE(CLK_B_250, "b_250", "div250", CLKEN_OV_VAL, 9, 0, 0),
- GATE(CLK_B_125, "b_125", "div125", CLKEN_OV_VAL, 10, 0, 0),
- GATE(CLK_B_200, "b_200", "div200", CLKEN_OV_VAL, 11, 0, 0),
- GATE(CLK_SATA, "sata", "div200", CLKEN_OV_VAL, 12, 0, 0),
- GATE(CLK_USB, "usb", "div200", CLKEN_OV_VAL, 13, 0, 0),
- GATE(CLK_GMAC0, "gmac0", "div200", CLKEN_OV_VAL, 14, 0, 0),
- GATE(CLK_CS250, "cs250", "div250", CLKEN_OV_VAL, 19, 0, 0),
- GATE(CLK_PB0_250_O, "pb0_250_o", "pb0_250", CLKEN_OV_VAL, 3, 0, 0),
- GATE(CLK_PR0_250_O, "pr0_250_o", "pr0_250", CLKEN_OV_VAL, 4, 0, 0),
- GATE(CLK_PR1_250_O, "pr1_250_o", "pr1_250", CLKEN_OV_VAL, 5, 0, 0),
- GATE(CLK_B_250_O, "b_250_o", "b_250", CLKEN_OV_VAL, 9, 0, 0),
- GATE(CLK_B_125_O, "b_125_o", "b_125", CLKEN_OV_VAL, 10, 0, 0),
- GATE(CLK_B_200_O, "b_200_o", "b_200", CLKEN_OV_VAL, 11, 0, 0),
- GATE(CLK_SATA_O, "sata_o", "sata", CLKEN_OV_VAL, 12, 0, 0),
- GATE(CLK_USB_O, "usb_o", "usb", CLKEN_OV_VAL, 13, 0, 0),
- GATE(CLK_GMAC0_O, "gmac0_o", "gmac", CLKEN_OV_VAL, 14, 0, 0),
- GATE(CLK_CS250_O, "cs250_o", "cs250", CLKEN_OV_VAL, 19, 0, 0),
-};
-
-static const struct of_device_id ext_clk_match[] __initconst = {
- { .compatible = "samsung,clock-xtal", .data = (void *)0, },
- {},
-};
-
-static int exynos5440_clk_restart_notify(struct notifier_block *this,
- unsigned long code, void *unused)
-{
- u32 val, status;
-
- status = readl_relaxed(reg_base + 0xbc);
- val = readl_relaxed(reg_base + 0xcc);
- val = (val & 0xffff0000) | (status & 0xffff);
- writel_relaxed(val, reg_base + 0xcc);
-
- return NOTIFY_DONE;
-}
-
-/*
- * Exynos5440 Clock restart notifier, handles restart functionality
- */
-static struct notifier_block exynos5440_clk_restart_handler = {
- .notifier_call = exynos5440_clk_restart_notify,
- .priority = 128,
-};
-
-static const struct samsung_pll_clock exynos5440_plls[] __initconst = {
- PLL(pll_2550x, CLK_CPLLA, "cplla", "xtal", 0, 0x4c, NULL),
- PLL(pll_2550x, CLK_CPLLB, "cpllb", "xtal", 0, 0x50, NULL),
-};
-
-/*
- * Clock aliases for legacy clkdev look-up.
- */
-static const struct samsung_clock_alias exynos5440_aliases[] __initconst = {
- ALIAS(CLK_ARM_CLK, NULL, "armclk"),
-};
-
-/* register exynos5440 clocks */
-static void __init exynos5440_clk_init(struct device_node *np)
-{
- struct samsung_clk_provider *ctx;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- pr_err("%s: failed to map clock controller registers,"
- " aborting clock initialization\n", __func__);
- return;
- }
-
- ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
-
- samsung_clk_of_register_fixed_ext(ctx, exynos5440_fixed_rate_ext_clks,
- ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match);
-
- samsung_clk_register_pll(ctx, exynos5440_plls,
- ARRAY_SIZE(exynos5440_plls), ctx->reg_base);
-
- samsung_clk_register_fixed_rate(ctx, exynos5440_fixed_rate_clks,
- ARRAY_SIZE(exynos5440_fixed_rate_clks));
- samsung_clk_register_fixed_factor(ctx, exynos5440_fixed_factor_clks,
- ARRAY_SIZE(exynos5440_fixed_factor_clks));
- samsung_clk_register_mux(ctx, exynos5440_mux_clks,
- ARRAY_SIZE(exynos5440_mux_clks));
- samsung_clk_register_div(ctx, exynos5440_div_clks,
- ARRAY_SIZE(exynos5440_div_clks));
- samsung_clk_register_gate(ctx, exynos5440_gate_clks,
- ARRAY_SIZE(exynos5440_gate_clks));
- samsung_clk_register_alias(ctx, exynos5440_aliases,
- ARRAY_SIZE(exynos5440_aliases));
-
- samsung_clk_of_add_provider(np, ctx);
-
- if (register_restart_handler(&exynos5440_clk_restart_handler))
- pr_warn("exynos5440 clock can't register restart handler\n");
-
- pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("arm_clk"));
- pr_info("exynos5440 clock initialization complete\n");
-}
-CLK_OF_DECLARE(exynos5440_clk, "samsung,exynos5440-clock", exynos5440_clk_init);
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 72714633e39c..5b238fc314ac 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -28,7 +28,7 @@ static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
static const char * const emac_ptp_free_mux[] = {"peri_emac_ptp_clk", "boot_clk"};
static const char * const gpio_db_free_mux[] = {"peri_gpio_db_clk", "boot_clk"};
-static const char * const sdmmc_free_mux[] = {"peri_sdmmc_clk", "boot_clk"};
+static const char * const sdmmc_free_mux[] = {"main_sdmmc_clk", "boot_clk"};
static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"};
static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
@@ -37,6 +37,11 @@ static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
+static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
+ "peri_mpu_base_clk",
+ "osc1", "cb_intosc_hs_div2_clk",
+ "f2s_free_clk"};
+
/* clocks in AO (always on) controller */
static const struct stratix10_pll_clock s10_pll_clks[] = {
{ STRATIX10_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
@@ -57,7 +62,7 @@ static const struct stratix10_perip_c_clock s10_main_perip_c_clks[] = {
};
static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
- { STRATIX10_MPU_FREE_CLK, "mpu_free_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ { STRATIX10_MPU_FREE_CLK, "mpu_free_clk", NULL, mpu_free_mux, ARRAY_SIZE(mpu_free_mux),
0, 0x48, 0, 0, 0},
{ STRATIX10_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
0, 0x4C, 0, 0, 0},
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index acaa14cfa25c..49454700f2e5 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -1,24 +1,24 @@
# SPDX-License-Identifier: GPL-2.0
# Common objects
-lib-$(CONFIG_SUNXI_CCU) += ccu_common.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o
+obj-y += ccu_common.o
+obj-y += ccu_mmc_timing.o
+obj-y += ccu_reset.o
# Base clock types
-lib-$(CONFIG_SUNXI_CCU) += ccu_div.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_frac.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_gate.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_mux.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_mult.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_phase.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_sdm.o
+obj-y += ccu_div.o
+obj-y += ccu_frac.o
+obj-y += ccu_gate.o
+obj-y += ccu_mux.o
+obj-y += ccu_mult.o
+obj-y += ccu_phase.o
+obj-y += ccu_sdm.o
# Multi-factor clocks
-lib-$(CONFIG_SUNXI_CCU) += ccu_nk.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_nkm.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_nkmp.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_nm.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o
+obj-y += ccu_nk.o
+obj-y += ccu_nkm.o
+obj-y += ccu_nkmp.o
+obj-y += ccu_nm.o
+obj-y += ccu_mp.o
# SoC support
obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
@@ -38,12 +38,3 @@ obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o
-
-# The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our
-# case, we want to use that goal, but even though lib.a will be properly
-# generated, it will not be linked in, eventually resulting in a linker error
-# for missing symbols.
-#
-# We can work around that by explicitly adding lib.a to the obj-y goal. This is
-# an undocumented behaviour, but works well for now.
-obj-$(CONFIG_SUNXI_CCU) += lib.a
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 468d1abaf0ee..bae5ee67a797 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -289,16 +289,13 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
.data = &sun8i_v3s_de2_clk_desc,
},
{
+ .compatible = "allwinner,sun50i-a64-de2-clk",
+ .data = &sun50i_a64_de2_clk_desc,
+ },
+ {
.compatible = "allwinner,sun50i-h5-de2-clk",
.data = &sun50i_a64_de2_clk_desc,
},
- /*
- * The Allwinner A64 SoC needs some bit to be poke in syscon to make
- * DE2 really working.
- * So there's currently no A64 compatible here.
- * H5 shares the same reset line with A64, so here H5 is using the
- * clock description of A64.
- */
{ }
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 65ba6455feb7..0f388f6944d5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -66,17 +66,18 @@ static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
CLK_SET_RATE_UNGATE);
/* TODO: The result of N/M is required to be in [8, 25] range. */
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0",
- "osc24M", 0x0010,
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video0_clk, "pll-video0",
+ "osc24M", 0x0010,
+ 192000000, /* Minimum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
/* TODO: The result of N/M is required to be in [8, 25] range. */
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
@@ -152,17 +153,18 @@ static struct ccu_nk pll_periph1_clk = {
};
/* TODO: The result of N/M is required to be in [8, 25] range. */
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video1_clk, "pll-video1",
- "osc24M", 0x030,
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video1_clk, "pll-video1",
+ "osc24M", 0x030,
+ 192000000, /* Minimum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static struct ccu_nkm pll_sata_clk = {
.enable = BIT(31),
@@ -654,7 +656,8 @@ static SUNXI_CCU_GATE(dram_deinterlace_clk, "dram-deinterlace", "dram",
static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
- 0x104, 0, 4, 24, 3, BIT(31), 0);
+ 0x104, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(mp_clk, "mp", de_parents,
0x108, 0, 4, 24, 3, BIT(31), 0);
@@ -666,9 +669,11 @@ static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd0_clk, "tcon-lcd0", tcon_parents,
static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd1_clk, "tcon-lcd1", tcon_parents,
0x114, 24, 3, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0", tcon_parents,
- 0x118, 0, 4, 24, 3, BIT(31), 0);
+ 0x118, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_tv1_clk, "tcon-tv1", tcon_parents,
- 0x11c, 0, 4, 24, 3, BIT(31), 0);
+ 0x11c, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static const char * const deinterlace_parents[] = { "pll-periph0",
"pll-periph1" };
@@ -698,7 +703,8 @@ static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
static const char * const hdmi_parents[] = { "pll-video0", "pll-video1" };
static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents,
- 0x150, 0, 4, 24, 2, BIT(31), 0);
+ 0x150, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(hdmi_slow_clk, "hdmi-slow", "osc24M",
0x154, BIT(31), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.h b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
index 0db8e1e97af8..db2a1243f9ff 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
@@ -25,7 +25,9 @@
#define CLK_PLL_AUDIO_2X 4
#define CLK_PLL_AUDIO_4X 5
#define CLK_PLL_AUDIO_8X 6
-#define CLK_PLL_VIDEO0 7
+
+/* PLL_VIDEO0 is exported */
+
#define CLK_PLL_VIDEO0_2X 8
#define CLK_PLL_VE 9
#define CLK_PLL_DDR0 10
@@ -34,7 +36,9 @@
#define CLK_PLL_PERIPH0_2X 13
#define CLK_PLL_PERIPH1 14
#define CLK_PLL_PERIPH1_2X 15
-#define CLK_PLL_VIDEO1 16
+
+/* PLL_VIDEO1 is exported */
+
#define CLK_PLL_VIDEO1_2X 17
#define CLK_PLL_SATA 18
#define CLK_PLL_SATA_OUT 19
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index b71692391bd6..6507acc843c7 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -8,6 +8,7 @@ obj-y += clk-periph-fixed.o
obj-y += clk-periph-gate.o
obj-y += clk-pll.o
obj-y += clk-pll-out.o
+obj-y += clk-sdmmc-mux.o
obj-y += clk-super.o
obj-y += clk-tegra-audio.o
obj-y += clk-tegra-periph.o
@@ -24,3 +25,4 @@ obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
obj-y += cvb.o
obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o
obj-$(CONFIG_CLK_TEGRA_BPMP) += clk-bpmp.o
+obj-y += clk-utils.o
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index a896692b74ec..01dada561c10 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -586,9 +586,15 @@ static struct clk_hw *tegra_bpmp_clk_of_xlate(struct of_phandle_args *clkspec,
unsigned int id = clkspec->args[0], i;
struct tegra_bpmp *bpmp = data;
- for (i = 0; i < bpmp->num_clocks; i++)
- if (bpmp->clocks[i]->id == id)
- return &bpmp->clocks[i]->hw;
+ for (i = 0; i < bpmp->num_clocks; i++) {
+ struct tegra_bpmp_clk *clk = bpmp->clocks[i];
+
+ if (!clk)
+ continue;
+
+ if (clk->id == id)
+ return &clk->hw;
+ }
return NULL;
}
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 16e0aee14773..205fe8ff63f0 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -32,35 +32,15 @@
static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
unsigned long parent_rate)
{
- u64 divider_ux1 = parent_rate;
- u8 flags = divider->flags;
- int mul;
-
- if (!rate)
- return 0;
-
- mul = get_mul(divider);
-
- if (!(flags & TEGRA_DIVIDER_INT))
- divider_ux1 *= mul;
-
- if (flags & TEGRA_DIVIDER_ROUND_UP)
- divider_ux1 += rate - 1;
-
- do_div(divider_ux1, rate);
-
- if (flags & TEGRA_DIVIDER_INT)
- divider_ux1 *= mul;
+ int div;
- divider_ux1 -= mul;
+ div = div_frac_get(rate, parent_rate, divider->width,
+ divider->frac_width, divider->flags);
- if ((s64)divider_ux1 < 0)
+ if (div < 0)
return 0;
- if (divider_ux1 > get_max_div(divider))
- return get_max_div(divider);
-
- return divider_ux1;
+ return div;
}
static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
@@ -194,6 +174,7 @@ static const struct clk_div_table mc_div_table[] = {
struct clk *tegra_clk_register_mc(const char *name, const char *parent_name,
void __iomem *reg, spinlock_t *lock)
{
- return clk_register_divider_table(NULL, name, parent_name, 0, reg,
- 16, 1, 0, mc_div_table, lock);
+ return clk_register_divider_table(NULL, name, parent_name,
+ CLK_IS_CRITICAL, reg, 16, 1, 0,
+ mc_div_table, lock);
}
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 5234acd30e89..0621a3a82ea6 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -132,7 +132,7 @@ static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
timing = tegra->timings + i;
if (timing->rate > req->max_rate) {
- i = min(i, 1);
+ i = max(i, 1);
req->rate = tegra->timings[i - 1].rate;
return 0;
}
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index b616e33c5255..de466b4446da 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -227,13 +227,11 @@ enum clk_id {
tegra_clk_sdmmc1_9,
tegra_clk_sdmmc2,
tegra_clk_sdmmc2_8,
- tegra_clk_sdmmc2_9,
tegra_clk_sdmmc3,
tegra_clk_sdmmc3_8,
tegra_clk_sdmmc3_9,
tegra_clk_sdmmc4,
tegra_clk_sdmmc4_8,
- tegra_clk_sdmmc4_9,
tegra_clk_se,
tegra_clk_soc_therm,
tegra_clk_soc_therm_8,
diff --git a/drivers/clk/tegra/clk-sdmmc-mux.c b/drivers/clk/tegra/clk-sdmmc-mux.c
new file mode 100644
index 000000000000..473d418533cb
--- /dev/null
+++ b/drivers/clk/tegra/clk-sdmmc-mux.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved.
+ *
+ * based on clk-mux.c
+ *
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+#define DIV_MASK GENMASK(7, 0)
+#define MUX_SHIFT 29
+#define MUX_MASK GENMASK(MUX_SHIFT + 2, MUX_SHIFT)
+#define SDMMC_MUL 2
+
+#define get_max_div(d) DIV_MASK
+#define get_div_field(val) ((val) & DIV_MASK)
+#define get_mux_field(val) (((val) & MUX_MASK) >> MUX_SHIFT)
+
+static const char * const mux_sdmmc_parents[] = {
+ "pll_p", "pll_c4_out2", "pll_c4_out0", "pll_c4_out1", "clk_m"
+};
+
+static const u8 mux_lj_idx[] = {
+ [0] = 0, [1] = 1, [2] = 2, [3] = 5, [4] = 6
+};
+
+static const u8 mux_non_lj_idx[] = {
+ [0] = 0, [1] = 3, [2] = 7, [3] = 4, [4] = 6
+};
+
+static u8 clk_sdmmc_mux_get_parent(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ int num_parents, i;
+ u32 src, val;
+ const u8 *mux_idx;
+
+ num_parents = clk_hw_get_num_parents(hw);
+
+ val = readl_relaxed(sdmmc_mux->reg);
+ src = get_mux_field(val);
+ if (get_div_field(val))
+ mux_idx = mux_non_lj_idx;
+ else
+ mux_idx = mux_lj_idx;
+
+ for (i = 0; i < num_parents; i++) {
+ if (mux_idx[i] == src)
+ return i;
+ }
+
+ WARN(1, "Unknown parent selector %d\n", src);
+
+ return 0;
+}
+
+static int clk_sdmmc_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ u32 val;
+
+
+ val = readl_relaxed(sdmmc_mux->reg);
+ if (get_div_field(val))
+ index = mux_non_lj_idx[index];
+ else
+ index = mux_lj_idx[index];
+
+ val &= ~MUX_MASK;
+ val |= index << MUX_SHIFT;
+
+ writel(val, sdmmc_mux->reg);
+
+ return 0;
+}
+
+static unsigned long clk_sdmmc_mux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ u32 val;
+ int div;
+ u64 rate = parent_rate;
+
+ val = readl_relaxed(sdmmc_mux->reg);
+ div = get_div_field(val);
+
+ div += SDMMC_MUL;
+
+ rate *= SDMMC_MUL;
+ rate += div - 1;
+ do_div(rate, div);
+
+ return rate;
+}
+
+static int clk_sdmmc_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ int div;
+ unsigned long output_rate = req->best_parent_rate;
+
+ req->rate = max(req->rate, req->min_rate);
+ req->rate = min(req->rate, req->max_rate);
+
+ if (!req->rate)
+ return output_rate;
+
+ div = div_frac_get(req->rate, output_rate, 8, 1, sdmmc_mux->div_flags);
+ if (div < 0)
+ div = 0;
+
+ if (sdmmc_mux->div_flags & TEGRA_DIVIDER_ROUND_UP)
+ req->rate = DIV_ROUND_UP(output_rate * SDMMC_MUL,
+ div + SDMMC_MUL);
+ else
+ req->rate = output_rate * SDMMC_MUL / (div + SDMMC_MUL);
+
+ return 0;
+}
+
+static int clk_sdmmc_mux_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ int div;
+ unsigned long flags = 0;
+ u32 val;
+ u8 src;
+
+ div = div_frac_get(rate, parent_rate, 8, 1, sdmmc_mux->div_flags);
+ if (div < 0)
+ return div;
+
+ if (sdmmc_mux->lock)
+ spin_lock_irqsave(sdmmc_mux->lock, flags);
+
+ src = clk_sdmmc_mux_get_parent(hw);
+ if (div)
+ src = mux_non_lj_idx[src];
+ else
+ src = mux_lj_idx[src];
+
+ val = src << MUX_SHIFT;
+ val |= div;
+ writel(val, sdmmc_mux->reg);
+ fence_udelay(2, sdmmc_mux->reg);
+
+ if (sdmmc_mux->lock)
+ spin_unlock_irqrestore(sdmmc_mux->lock, flags);
+
+ return 0;
+}
+
+static int clk_sdmmc_mux_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+ struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+
+ return gate_ops->is_enabled(gate_hw);
+}
+
+static int clk_sdmmc_mux_enable(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+ struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+
+ return gate_ops->enable(gate_hw);
+}
+
+static void clk_sdmmc_mux_disable(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+ struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+ gate_ops->disable(gate_hw);
+}
+
+static const struct clk_ops tegra_clk_sdmmc_mux_ops = {
+ .get_parent = clk_sdmmc_mux_get_parent,
+ .set_parent = clk_sdmmc_mux_set_parent,
+ .determine_rate = clk_sdmmc_mux_determine_rate,
+ .recalc_rate = clk_sdmmc_mux_recalc_rate,
+ .set_rate = clk_sdmmc_mux_set_rate,
+ .is_enabled = clk_sdmmc_mux_is_enabled,
+ .enable = clk_sdmmc_mux_enable,
+ .disable = clk_sdmmc_mux_disable,
+};
+
+struct clk *tegra_clk_register_sdmmc_mux_div(const char *name,
+ void __iomem *clk_base, u32 offset, u32 clk_num, u8 div_flags,
+ unsigned long flags, void *lock)
+{
+ struct clk *clk;
+ struct clk_init_data init;
+ const struct tegra_clk_periph_regs *bank;
+ struct tegra_sdmmc_mux *sdmmc_mux;
+
+ init.ops = &tegra_clk_sdmmc_mux_ops;
+ init.name = name;
+ init.flags = flags;
+ init.parent_names = mux_sdmmc_parents;
+ init.num_parents = ARRAY_SIZE(mux_sdmmc_parents);
+
+ bank = get_reg_bank(clk_num);
+ if (!bank)
+ return ERR_PTR(-EINVAL);
+
+ sdmmc_mux = kzalloc(sizeof(*sdmmc_mux), GFP_KERNEL);
+ if (!sdmmc_mux)
+ return ERR_PTR(-ENOMEM);
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ sdmmc_mux->hw.init = &init;
+ sdmmc_mux->reg = clk_base + offset;
+ sdmmc_mux->lock = lock;
+ sdmmc_mux->gate.clk_base = clk_base;
+ sdmmc_mux->gate.regs = bank;
+ sdmmc_mux->gate.enable_refcnt = periph_clk_enb_refcnt;
+ sdmmc_mux->gate.clk_num = clk_num;
+ sdmmc_mux->gate.flags = TEGRA_PERIPH_ON_APB;
+ sdmmc_mux->div_flags = div_flags;
+ sdmmc_mux->gate_ops = &tegra_clk_periph_gate_ops;
+
+ clk = clk_register(NULL, &sdmmc_mux->hw);
+ if (IS_ERR(clk)) {
+ kfree(sdmmc_mux);
+ return clk;
+ }
+
+ sdmmc_mux->gate.hw.clk = clk;
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 2acba2986bc6..38c4eb28c8bf 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -451,15 +451,6 @@ static u32 mux_pllp_pllc4_out2_pllc4_out1_clkm_pllc4_out0_idx[] = {
[0] = 0, [1] = 3, [2] = 4, [3] = 6, [4] = 7,
};
-static const char *mux_pllp_clkm_pllc4_out2_out1_out0_lj[] = {
- "pll_p",
- "pll_c4_out2", "pll_c4_out0", /* LJ input */
- "pll_c4_out2", "pll_c4_out1",
- "pll_c4_out1", /* LJ input */
- "clk_m", "pll_c4_out0"
-};
-#define mux_pllp_clkm_pllc4_out2_out1_out0_lj_idx NULL
-
static const char *mux_pllp_pllc2_c_c3_clkm[] = {
"pll_p", "pll_c2", "pll_c", "pll_c3", "clk_m"
};
@@ -686,9 +677,7 @@ static struct tegra_periph_init_data periph_clks[] = {
MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3),
MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4),
MUX8("sdmmc1", mux_pllp_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1_9),
- MUX8("sdmmc2", mux_pllp_clkm_pllc4_out2_out1_out0_lj, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2_9),
MUX8("sdmmc3", mux_pllp_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3_9),
- MUX8("sdmmc4", mux_pllp_clkm_pllc4_out2_out1_out0_lj, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4_9),
MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la),
MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace),
MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr),
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 0c69c7970950..b6cf28ca2ed2 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1267,7 +1267,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_I2S2, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S3, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
- { TEGRA124_CLK_VDE, TEGRA124_CLK_CLK_MAX, 600000000, 0 },
+ { TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_C3, 600000000, 0 },
{ TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1 },
{ TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0 },
{ TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0 },
@@ -1290,6 +1290,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1 },
{ TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1 },
{ TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0 },
+ { TEGRA124_CLK_VIC03, TEGRA124_CLK_PLL_C3, 0, 0 },
/* must be the last entry */
{ TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 5435d01c636a..9eb1cb14fce1 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -44,6 +44,8 @@
#define CLK_SOURCE_EMC 0x19c
#define CLK_SOURCE_SOR1 0x410
#define CLK_SOURCE_LA 0x1f8
+#define CLK_SOURCE_SDMMC2 0x154
+#define CLK_SOURCE_SDMMC4 0x164
#define PLLC_BASE 0x80
#define PLLC_OUT 0x84
@@ -2286,11 +2288,9 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_rtc] = { .dt_id = TEGRA210_CLK_RTC, .present = true },
[tegra_clk_timer] = { .dt_id = TEGRA210_CLK_TIMER, .present = true },
[tegra_clk_uarta_8] = { .dt_id = TEGRA210_CLK_UARTA, .present = true },
- [tegra_clk_sdmmc2_9] = { .dt_id = TEGRA210_CLK_SDMMC2, .present = true },
[tegra_clk_i2s1] = { .dt_id = TEGRA210_CLK_I2S1, .present = true },
[tegra_clk_i2c1] = { .dt_id = TEGRA210_CLK_I2C1, .present = true },
[tegra_clk_sdmmc1_9] = { .dt_id = TEGRA210_CLK_SDMMC1, .present = true },
- [tegra_clk_sdmmc4_9] = { .dt_id = TEGRA210_CLK_SDMMC4, .present = true },
[tegra_clk_pwm] = { .dt_id = TEGRA210_CLK_PWM, .present = true },
[tegra_clk_i2s2] = { .dt_id = TEGRA210_CLK_I2S2, .present = true },
[tegra_clk_usbd] = { .dt_id = TEGRA210_CLK_USBD, .present = true },
@@ -3030,6 +3030,16 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
0, NULL);
clks[TEGRA210_CLK_ACLK] = clk;
+ clk = tegra_clk_register_sdmmc_mux_div("sdmmc2", clk_base,
+ CLK_SOURCE_SDMMC2, 9,
+ TEGRA_DIVIDER_ROUND_UP, 0, NULL);
+ clks[TEGRA210_CLK_SDMMC2] = clk;
+
+ clk = tegra_clk_register_sdmmc_mux_div("sdmmc4", clk_base,
+ CLK_SOURCE_SDMMC4, 15,
+ TEGRA_DIVIDER_ROUND_UP, 0, NULL);
+ clks[TEGRA210_CLK_SDMMC4] = clk;
+
for (i = 0; i < ARRAY_SIZE(tegra210_periph); i++) {
struct tegra_periph_init_data *init = &tegra210_periph[i];
struct clk **clkp;
diff --git a/drivers/clk/tegra/clk-utils.c b/drivers/clk/tegra/clk-utils.c
new file mode 100644
index 000000000000..1a5daae4e501
--- /dev/null
+++ b/drivers/clk/tegra/clk-utils.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <asm/div64.h>
+
+#include "clk.h"
+
+#define div_mask(w) ((1 << (w)) - 1)
+
+int div_frac_get(unsigned long rate, unsigned parent_rate, u8 width,
+ u8 frac_width, u8 flags)
+{
+ u64 divider_ux1 = parent_rate;
+ int mul;
+
+ if (!rate)
+ return 0;
+
+ mul = 1 << frac_width;
+
+ if (!(flags & TEGRA_DIVIDER_INT))
+ divider_ux1 *= mul;
+
+ if (flags & TEGRA_DIVIDER_ROUND_UP)
+ divider_ux1 += rate - 1;
+
+ do_div(divider_ux1, rate);
+
+ if (flags & TEGRA_DIVIDER_INT)
+ divider_ux1 *= mul;
+
+ if (divider_ux1 < mul)
+ return 0;
+
+ divider_ux1 -= mul;
+
+ if (divider_ux1 > div_mask(width))
+ return div_mask(width);
+
+ return divider_ux1;
+}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index e1f88463b600..d2c3a010f8e9 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/delay.h>
/**
* struct tegra_clk_sync_source - external clock source from codec
@@ -705,6 +706,32 @@ struct clk *tegra_clk_register_super_clk(const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags, void __iomem *reg, u8 clk_super_flags,
spinlock_t *lock);
+
+/**
+ * struct tegra_sdmmc_mux - switch divider with Low Jitter inputs for SDMMC
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register controlling mux and divider
+ * @flags: hardware-specific flags
+ * @lock: optional register lock
+ * @gate: gate clock
+ * @gate_ops: gate clock ops
+ */
+struct tegra_sdmmc_mux {
+ struct clk_hw hw;
+ void __iomem *reg;
+ spinlock_t *lock;
+ const struct clk_ops *gate_ops;
+ struct tegra_clk_periph_gate gate;
+ u8 div_flags;
+};
+
+#define to_clk_sdmmc_mux(_hw) container_of(_hw, struct tegra_sdmmc_mux, hw)
+
+struct clk *tegra_clk_register_sdmmc_mux_div(const char *name,
+ void __iomem *clk_base, u32 offset, u32 clk_num, u8 div_flags,
+ unsigned long flags, void *lock);
+
/**
* struct clk_init_table - clock initialization table
* @clk_id: clock id as mentioned in device tree bindings
@@ -811,6 +838,9 @@ extern tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
int tegra_pll_wait_for_lock(struct tegra_clk_pll *pll);
u16 tegra_pll_get_fixed_mdiv(struct clk_hw *hw, unsigned long input_rate);
int tegra_pll_p_div_to_hw(struct tegra_clk_pll *pll, u8 p_div);
+int div_frac_get(unsigned long rate, unsigned parent_rate, u8 width,
+ u8 frac_width, u8 flags);
+
/* Combined read fence with delay */
#define fence_udelay(delay, reg) \
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index fb249a1637a5..71a122b2dc67 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -708,6 +708,7 @@ static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initcons
{ DRA7_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ DRA7_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0060:24" },
{ DRA7_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0068:24" },
+ { DRA7_ADC_CLKCTRL, NULL, CLKF_SW_SUP, "mcan_clk"},
{ 0 },
};
diff --git a/drivers/clk/uniphier/clk-uniphier-peri.c b/drivers/clk/uniphier/clk-uniphier-peri.c
index 521c80e9a06f..89b3ac378b3f 100644
--- a/drivers/clk/uniphier/clk-uniphier-peri.c
+++ b/drivers/clk/uniphier/clk-uniphier-peri.c
@@ -27,6 +27,12 @@
#define UNIPHIER_PERI_CLK_FI2C(idx, ch) \
UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c", 0x24, 24 + (ch))
+#define UNIPHIER_PERI_CLK_SCSSI(idx) \
+ UNIPHIER_CLK_GATE("scssi", (idx), "spi", 0x20, 17)
+
+#define UNIPHIER_PERI_CLK_MCSSI(idx) \
+ UNIPHIER_CLK_GATE("mcssi", (idx), "spi", 0x24, 14)
+
const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_UART(0, 0),
UNIPHIER_PERI_CLK_UART(1, 1),
@@ -38,6 +44,7 @@ const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_I2C(6, 2),
UNIPHIER_PERI_CLK_I2C(7, 3),
UNIPHIER_PERI_CLK_I2C(8, 4),
+ UNIPHIER_PERI_CLK_SCSSI(11),
{ /* sentinel */ }
};
@@ -53,5 +60,7 @@ const struct uniphier_clk_data uniphier_pro4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_FI2C(8, 4),
UNIPHIER_PERI_CLK_FI2C(9, 5),
UNIPHIER_PERI_CLK_FI2C(10, 6),
+ UNIPHIER_PERI_CLK_SCSSI(11),
+ UNIPHIER_PERI_CLK_MCSSI(12),
{ /* sentinel */ }
};
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index 4f5ff9fa11fd..2bb5a8570adc 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -29,18 +29,20 @@
UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 10), \
UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 15)
-/* Denali driver requires clk_x rate (clk: 50MHz, clk_x & ecc_clk: 200MHz) */
#define UNIPHIER_LD4_SYS_CLK_NAND(idx) \
- UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 8), \
- UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x2104, 2)
+ UNIPHIER_CLK_FACTOR("nand-50m", -1, "spll", 1, 32), \
+ UNIPHIER_CLK_GATE("nand", (idx), "nand-50m", 0x2104, 2)
#define UNIPHIER_PRO5_SYS_CLK_NAND(idx) \
- UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 12), \
- UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x2104, 2)
+ UNIPHIER_CLK_FACTOR("nand-50m", -1, "spll", 1, 48), \
+ UNIPHIER_CLK_GATE("nand", (idx), "nand-50m", 0x2104, 2)
#define UNIPHIER_LD11_SYS_CLK_NAND(idx) \
- UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 10), \
- UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x210c, 0)
+ UNIPHIER_CLK_FACTOR("nand-50m", -1, "spll", 1, 40), \
+ UNIPHIER_CLK_GATE("nand", (idx), "nand-50m", 0x210c, 0)
+
+#define UNIPHIER_SYS_CLK_NAND_4X(idx) \
+ UNIPHIER_CLK_FACTOR("nand-4x", (idx), "nand", 4, 1)
#define UNIPHIER_LD11_SYS_CLK_EMMC(idx) \
UNIPHIER_CLK_GATE("emmc", (idx), NULL, 0x210c, 2)
@@ -93,7 +95,9 @@ const struct uniphier_clk_data uniphier_ld4_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 5625, 512), /* 270 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 16),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 32),
UNIPHIER_LD4_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */
@@ -108,7 +112,9 @@ const struct uniphier_clk_data uniphier_pro4_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("gpll", -1, "ref", 10, 1), /* 250 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 8),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 32),
+ UNIPHIER_CLK_FACTOR("spi", 1, "spll", 1, 32),
UNIPHIER_LD4_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
UNIPHIER_PRO4_SYS_CLK_ETHER(6),
@@ -118,6 +124,9 @@ const struct uniphier_clk_data uniphier_pro4_sys_clk_data[] = {
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* Ether, SATA, USB3 */
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
+ UNIPHIER_CLK_FACTOR("usb30-hsphy0", 16, "upll", 1, 12),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy0", 17, "ref", 1, 1),
+ UNIPHIER_CLK_FACTOR("usb31-ssphy0", 20, "ref", 1, 1),
UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x2104, 18),
UNIPHIER_CLK_GATE("sata1", 29, NULL, 0x2104, 19),
UNIPHIER_PRO4_SYS_CLK_AIO(40),
@@ -130,7 +139,9 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 20),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 32),
UNIPHIER_LD4_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */
@@ -143,7 +154,9 @@ const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125), /* 2949.12 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 48),
UNIPHIER_PRO5_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_PRO5_SYS_CLK_SD,
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC */
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* PCIe, USB3 */
@@ -158,7 +171,9 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 96, 1), /* 2400 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 27),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 48),
UNIPHIER_PRO5_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_PRO5_SYS_CLK_SD,
UNIPHIER_PRO4_SYS_CLK_ETHER(6),
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC, RLE */
@@ -166,8 +181,11 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
/* The document mentions 0x2104 bit 18, but not functional */
- UNIPHIER_CLK_GATE("usb30-phy", 16, NULL, 0x2104, 19),
- UNIPHIER_CLK_GATE("usb31-phy", 20, NULL, 0x2104, 20),
+ UNIPHIER_CLK_GATE("usb30-hsphy0", 16, NULL, 0x2104, 19),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy0", 17, "ref", 1, 1),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy1", 18, "ref", 1, 1),
+ UNIPHIER_CLK_GATE("usb31-hsphy0", 20, NULL, 0x2104, 20),
+ UNIPHIER_CLK_FACTOR("usb31-ssphy0", 21, "ref", 1, 1),
UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x2104, 22),
UNIPHIER_PRO5_SYS_CLK_AIO(40),
{ /* sentinel */ }
@@ -180,7 +198,9 @@ const struct uniphier_clk_data uniphier_ld11_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vspll", -1, "ref", 80, 1), /* 2000 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 40),
UNIPHIER_LD11_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
/* Index 5 reserved for eMMC PHY */
UNIPHIER_LD11_SYS_CLK_ETHER(6),
@@ -213,7 +233,9 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vppll", -1, "ref", 504, 5), /* 2520 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 40),
UNIPHIER_LD11_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
/* Index 5 reserved for eMMC PHY */
UNIPHIER_LD20_SYS_CLK_SD,
@@ -226,8 +248,10 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
* We do not use bit 15 here.
*/
UNIPHIER_CLK_GATE("usb30", 14, NULL, 0x210c, 14),
- UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 12),
- UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 13),
+ UNIPHIER_CLK_GATE("usb30-hsphy0", 16, NULL, 0x210c, 12),
+ UNIPHIER_CLK_GATE("usb30-hsphy1", 17, NULL, 0x210c, 13),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy0", 18, "ref", 1, 1),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy1", 19, "ref", 1, 1),
UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x210c, 4),
UNIPHIER_LD11_SYS_CLK_AIO(40),
UNIPHIER_LD11_SYS_CLK_EVEA(41),
@@ -254,19 +278,21 @@ const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("s2pll", -1, "ref", 88, 1), /* IPP: 2400 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 40),
UNIPHIER_LD20_SYS_CLK_SD,
UNIPHIER_LD11_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
UNIPHIER_CLK_GATE("ether0", 6, NULL, 0x210c, 9),
UNIPHIER_CLK_GATE("ether1", 7, NULL, 0x210c, 10),
UNIPHIER_CLK_GATE("usb30", 12, NULL, 0x210c, 4), /* =GIO0 */
UNIPHIER_CLK_GATE("usb31-0", 13, NULL, 0x210c, 5), /* =GIO1 */
UNIPHIER_CLK_GATE("usb31-1", 14, NULL, 0x210c, 6), /* =GIO1-1 */
- UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 16),
- UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 18),
- UNIPHIER_CLK_GATE("usb30-phy2", 18, NULL, 0x210c, 20),
- UNIPHIER_CLK_GATE("usb31-phy0", 20, NULL, 0x210c, 17),
- UNIPHIER_CLK_GATE("usb31-phy1", 21, NULL, 0x210c, 19),
+ UNIPHIER_CLK_GATE("usb30-hsphy0", 16, NULL, 0x210c, 16),
+ UNIPHIER_CLK_GATE("usb30-ssphy0", 17, NULL, 0x210c, 18),
+ UNIPHIER_CLK_GATE("usb30-ssphy1", 18, NULL, 0x210c, 20),
+ UNIPHIER_CLK_GATE("usb31-hsphy0", 20, NULL, 0x210c, 17),
+ UNIPHIER_CLK_GATE("usb31-ssphy0", 21, NULL, 0x210c, 19),
UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x210c, 3),
UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x210c, 7),
UNIPHIER_CLK_GATE("sata1", 29, NULL, 0x210c, 8),
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index dec0dd88ec15..a11f4ba98b05 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -609,4 +609,15 @@ config ATCPIT100_TIMER
help
This option enables support for the Andestech ATCPIT100 timers.
+config RISCV_TIMER
+ bool "Timer for the RISC-V platform"
+ depends on RISCV
+ default y
+ select TIMER_PROBE
+ select TIMER_OF
+ help
+ This enables the per-hart timer built into all RISC-V systems, which
+ is accessed via both the SBI and the rdcycle instruction. This is
+ required for all RISC-V systems.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 00caf37e52f9..db51b2427e8a 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
-obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
+obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
@@ -78,3 +78,4 @@ obj-$(CONFIG_H8300_TPU) += h8300_tpu.o
obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o
obj-$(CONFIG_X86_NUMACHIP) += numachip.o
obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o
+obj-$(CONFIG_RISCV_TIMER) += riscv_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 57cb2f00fc07..d8c7f5750cdb 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -735,7 +735,7 @@ static void __arch_timer_setup(unsigned type,
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
clk->rating = 400;
- clk->cpumask = cpu_all_mask;
+ clk->cpumask = cpu_possible_mask;
if (arch_timer_mem_use_virtual) {
clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
deleted file mode 100644
index f9b724fd9950..000000000000
--- a/drivers/clocksource/mtk_timer.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Mediatek SoCs General-Purpose Timer handling.
- *
- * Copyright (C) 2014 Matthias Brugger
- *
- * Matthias Brugger <matthias.bgg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqreturn.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-#include <linux/slab.h>
-
-#define GPT_IRQ_EN_REG 0x00
-#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
-#define GPT_IRQ_ACK_REG 0x08
-#define GPT_IRQ_ACK(val) BIT((val) - 1)
-
-#define TIMER_CTRL_REG(val) (0x10 * (val))
-#define TIMER_CTRL_OP(val) (((val) & 0x3) << 4)
-#define TIMER_CTRL_OP_ONESHOT (0)
-#define TIMER_CTRL_OP_REPEAT (1)
-#define TIMER_CTRL_OP_FREERUN (3)
-#define TIMER_CTRL_CLEAR (2)
-#define TIMER_CTRL_ENABLE (1)
-#define TIMER_CTRL_DISABLE (0)
-
-#define TIMER_CLK_REG(val) (0x04 + (0x10 * (val)))
-#define TIMER_CLK_SRC(val) (((val) & 0x1) << 4)
-#define TIMER_CLK_SRC_SYS13M (0)
-#define TIMER_CLK_SRC_RTC32K (1)
-#define TIMER_CLK_DIV1 (0x0)
-#define TIMER_CLK_DIV2 (0x1)
-
-#define TIMER_CNT_REG(val) (0x08 + (0x10 * (val)))
-#define TIMER_CMP_REG(val) (0x0C + (0x10 * (val)))
-
-#define GPT_CLK_EVT 1
-#define GPT_CLK_SRC 2
-
-struct mtk_clock_event_device {
- void __iomem *gpt_base;
- u32 ticks_per_jiffy;
- struct clock_event_device dev;
-};
-
-static void __iomem *gpt_sched_reg __read_mostly;
-
-static u64 notrace mtk_read_sched_clock(void)
-{
- return readl_relaxed(gpt_sched_reg);
-}
-
-static inline struct mtk_clock_event_device *to_mtk_clk(
- struct clock_event_device *c)
-{
- return container_of(c, struct mtk_clock_event_device, dev);
-}
-
-static void mtk_clkevt_time_stop(struct mtk_clock_event_device *evt, u8 timer)
-{
- u32 val;
-
- val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
- writel(val & ~TIMER_CTRL_ENABLE, evt->gpt_base +
- TIMER_CTRL_REG(timer));
-}
-
-static void mtk_clkevt_time_setup(struct mtk_clock_event_device *evt,
- unsigned long delay, u8 timer)
-{
- writel(delay, evt->gpt_base + TIMER_CMP_REG(timer));
-}
-
-static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt,
- bool periodic, u8 timer)
-{
- u32 val;
-
- /* Acknowledge interrupt */
- writel(GPT_IRQ_ACK(timer), evt->gpt_base + GPT_IRQ_ACK_REG);
-
- val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
-
- /* Clear 2 bit timer operation mode field */
- val &= ~TIMER_CTRL_OP(0x3);
-
- if (periodic)
- val |= TIMER_CTRL_OP(TIMER_CTRL_OP_REPEAT);
- else
- val |= TIMER_CTRL_OP(TIMER_CTRL_OP_ONESHOT);
-
- writel(val | TIMER_CTRL_ENABLE | TIMER_CTRL_CLEAR,
- evt->gpt_base + TIMER_CTRL_REG(timer));
-}
-
-static int mtk_clkevt_shutdown(struct clock_event_device *clk)
-{
- mtk_clkevt_time_stop(to_mtk_clk(clk), GPT_CLK_EVT);
- return 0;
-}
-
-static int mtk_clkevt_set_periodic(struct clock_event_device *clk)
-{
- struct mtk_clock_event_device *evt = to_mtk_clk(clk);
-
- mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
- mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
- mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
- return 0;
-}
-
-static int mtk_clkevt_next_event(unsigned long event,
- struct clock_event_device *clk)
-{
- struct mtk_clock_event_device *evt = to_mtk_clk(clk);
-
- mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
- mtk_clkevt_time_setup(evt, event, GPT_CLK_EVT);
- mtk_clkevt_time_start(evt, false, GPT_CLK_EVT);
-
- return 0;
-}
-
-static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
-{
- struct mtk_clock_event_device *evt = dev_id;
-
- /* Acknowledge timer0 irq */
- writel(GPT_IRQ_ACK(GPT_CLK_EVT), evt->gpt_base + GPT_IRQ_ACK_REG);
- evt->dev.event_handler(&evt->dev);
-
- return IRQ_HANDLED;
-}
-
-static void
-__init mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
-{
- writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
- evt->gpt_base + TIMER_CTRL_REG(timer));
-
- writel(TIMER_CLK_SRC(TIMER_CLK_SRC_SYS13M) | TIMER_CLK_DIV1,
- evt->gpt_base + TIMER_CLK_REG(timer));
-
- writel(0x0, evt->gpt_base + TIMER_CMP_REG(timer));
-
- writel(TIMER_CTRL_OP(option) | TIMER_CTRL_ENABLE,
- evt->gpt_base + TIMER_CTRL_REG(timer));
-}
-
-static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
-{
- u32 val;
-
- /* Disable all interrupts */
- writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
-
- /* Acknowledge all spurious pending interrupts */
- writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
-
- val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
- writel(val | GPT_IRQ_ENABLE(timer),
- evt->gpt_base + GPT_IRQ_EN_REG);
-}
-
-static int __init mtk_timer_init(struct device_node *node)
-{
- struct mtk_clock_event_device *evt;
- struct resource res;
- unsigned long rate = 0;
- struct clk *clk;
-
- evt = kzalloc(sizeof(*evt), GFP_KERNEL);
- if (!evt)
- return -ENOMEM;
-
- evt->dev.name = "mtk_tick";
- evt->dev.rating = 300;
- evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- evt->dev.set_state_shutdown = mtk_clkevt_shutdown;
- evt->dev.set_state_periodic = mtk_clkevt_set_periodic;
- evt->dev.set_state_oneshot = mtk_clkevt_shutdown;
- evt->dev.tick_resume = mtk_clkevt_shutdown;
- evt->dev.set_next_event = mtk_clkevt_next_event;
- evt->dev.cpumask = cpu_possible_mask;
-
- evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer");
- if (IS_ERR(evt->gpt_base)) {
- pr_err("Can't get resource\n");
- goto err_kzalloc;
- }
-
- evt->dev.irq = irq_of_parse_and_map(node, 0);
- if (evt->dev.irq <= 0) {
- pr_err("Can't parse IRQ\n");
- goto err_mem;
- }
-
- clk = of_clk_get(node, 0);
- if (IS_ERR(clk)) {
- pr_err("Can't get timer clock\n");
- goto err_irq;
- }
-
- if (clk_prepare_enable(clk)) {
- pr_err("Can't prepare clock\n");
- goto err_clk_put;
- }
- rate = clk_get_rate(clk);
-
- if (request_irq(evt->dev.irq, mtk_timer_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
- pr_err("failed to setup irq %d\n", evt->dev.irq);
- goto err_clk_disable;
- }
-
- evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
-
- /* Configure clock source */
- mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
- clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
- node->name, rate, 300, 32, clocksource_mmio_readl_up);
- gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC);
- sched_clock_register(mtk_read_sched_clock, 32, rate);
-
- /* Configure clock event */
- mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
- clockevents_config_and_register(&evt->dev, rate, 0x3,
- 0xffffffff);
-
- mtk_timer_enable_irq(evt, GPT_CLK_EVT);
-
- return 0;
-
-err_clk_disable:
- clk_disable_unprepare(clk);
-err_clk_put:
- clk_put(clk);
-err_irq:
- irq_dispose_mapping(evt->dev.irq);
-err_mem:
- iounmap(evt->gpt_base);
- of_address_to_resource(node, 0, &res);
- release_mem_region(res.start, resource_size(&res));
-err_kzalloc:
- kfree(evt);
-
- return -EINVAL;
-}
-TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/riscv_timer.c b/drivers/clocksource/riscv_timer.c
new file mode 100644
index 000000000000..4e8b347e43e2
--- /dev/null
+++ b/drivers/clocksource/riscv_timer.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <asm/sbi.h>
+
+/*
+ * All RISC-V systems have a timer attached to every hart. These timers can be
+ * read by the 'rdcycle' pseudo instruction, and can use the SBI to setup
+ * events. In order to abstract the architecture-specific timer reading and
+ * setting functions away from the clock event insertion code, we provide
+ * function pointers to the clockevent subsystem that perform two basic
+ * operations: rdtime() reads the timer on the current CPU, and
+ * next_event(delta) sets the next timer event to 'delta' cycles in the future.
+ * As the timers are inherently a per-cpu resource, these callbacks perform
+ * operations on the current hart. There is guaranteed to be exactly one timer
+ * per hart on all RISC-V systems.
+ */
+
+static int riscv_clock_next_event(unsigned long delta,
+ struct clock_event_device *ce)
+{
+ csr_set(sie, SIE_STIE);
+ sbi_set_timer(get_cycles64() + delta);
+ return 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
+ .name = "riscv_timer_clockevent",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 100,
+ .set_next_event = riscv_clock_next_event,
+};
+
+/*
+ * It is guaranteed that all the timers across all the harts are synchronized
+ * within one tick of each other, so while this could technically go
+ * backwards when hopping between CPUs, practically it won't happen.
+ */
+static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
+{
+ return get_cycles64();
+}
+
+static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
+ .name = "riscv_clocksource",
+ .rating = 300,
+ .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .read = riscv_clocksource_rdtime,
+};
+
+static int riscv_timer_starting_cpu(unsigned int cpu)
+{
+ struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
+
+ ce->cpumask = cpumask_of(cpu);
+ clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
+
+ csr_set(sie, SIE_STIE);
+ return 0;
+}
+
+static int riscv_timer_dying_cpu(unsigned int cpu)
+{
+ csr_clear(sie, SIE_STIE);
+ return 0;
+}
+
+/* called directly from the low-level interrupt handler */
+void riscv_timer_interrupt(void)
+{
+ struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
+
+ csr_clear(sie, SIE_STIE);
+ evdev->event_handler(evdev);
+}
+
+static int __init riscv_timer_init_dt(struct device_node *n)
+{
+ int cpu_id = riscv_of_processor_hart(n), error;
+ struct clocksource *cs;
+
+ if (cpu_id != smp_processor_id())
+ return 0;
+
+ cs = per_cpu_ptr(&riscv_clocksource, cpu_id);
+ clocksource_register_hz(cs, riscv_timebase);
+
+ error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
+ "clockevents/riscv/timer:starting",
+ riscv_timer_starting_cpu, riscv_timer_dying_cpu);
+ if (error)
+ pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
+ error, cpu_id);
+ return error;
+}
+
+TIMER_OF_DECLARE(riscv_timer, "riscv", riscv_timer_init_dt);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index c337a8100a7b..aa624885e0e2 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -230,7 +230,7 @@ static int __init tegra20_init_timer(struct device_node *np)
return ret;
}
- tegra_clockevent.cpumask = cpu_all_mask;
+ tegra_clockevent.cpumask = cpu_possible_mask;
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_config_and_register(&tegra_clockevent, 1000000,
0x1, 0x1fffffff);
@@ -259,6 +259,6 @@ static int __init tegra20_init_rtc(struct device_node *np)
else
clk_prepare_enable(clk);
- return register_persistent_clock(NULL, tegra_read_persistent_clock64);
+ return register_persistent_clock(tegra_read_persistent_clock64);
}
TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/timer-atcpit100.c b/drivers/clocksource/timer-atcpit100.c
index 5e23d7b4a722..b4bd2f5b801d 100644
--- a/drivers/clocksource/timer-atcpit100.c
+++ b/drivers/clocksource/timer-atcpit100.c
@@ -185,7 +185,7 @@ static struct timer_of to = {
.set_state_oneshot = atcpit100_clkevt_set_oneshot,
.tick_resume = atcpit100_clkevt_shutdown,
.set_next_event = atcpit100_clkevt_next_event,
- .cpumask = cpu_all_mask,
+ .cpumask = cpu_possible_mask,
},
.of_irq = {
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index 0eee03250cfc..f5b2eda30bf3 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -211,7 +211,7 @@ static int __init keystone_timer_init(struct device_node *np)
event_dev->set_state_shutdown = keystone_shutdown;
event_dev->set_state_periodic = keystone_set_periodic;
event_dev->set_state_oneshot = keystone_shutdown;
- event_dev->cpumask = cpu_all_mask;
+ event_dev->cpumask = cpu_possible_mask;
event_dev->owner = THIS_MODULE;
event_dev->name = TIMER_NAME;
event_dev->irq = irq;
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
new file mode 100644
index 000000000000..eb10321f8517
--- /dev/null
+++ b/drivers/clocksource/timer-mediatek.c
@@ -0,0 +1,328 @@
+/*
+ * Mediatek SoCs General-Purpose Timer handling.
+ *
+ * Copyright (C) 2014 Matthias Brugger
+ *
+ * Matthias Brugger <matthias.bgg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include "timer-of.h"
+
+#define TIMER_CLK_EVT (1)
+#define TIMER_CLK_SRC (2)
+
+#define TIMER_SYNC_TICKS (3)
+
+/* gpt */
+#define GPT_IRQ_EN_REG 0x00
+#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
+#define GPT_IRQ_ACK_REG 0x08
+#define GPT_IRQ_ACK(val) BIT((val) - 1)
+
+#define GPT_CTRL_REG(val) (0x10 * (val))
+#define GPT_CTRL_OP(val) (((val) & 0x3) << 4)
+#define GPT_CTRL_OP_ONESHOT (0)
+#define GPT_CTRL_OP_REPEAT (1)
+#define GPT_CTRL_OP_FREERUN (3)
+#define GPT_CTRL_CLEAR (2)
+#define GPT_CTRL_ENABLE (1)
+#define GPT_CTRL_DISABLE (0)
+
+#define GPT_CLK_REG(val) (0x04 + (0x10 * (val)))
+#define GPT_CLK_SRC(val) (((val) & 0x1) << 4)
+#define GPT_CLK_SRC_SYS13M (0)
+#define GPT_CLK_SRC_RTC32K (1)
+#define GPT_CLK_DIV1 (0x0)
+#define GPT_CLK_DIV2 (0x1)
+
+#define GPT_CNT_REG(val) (0x08 + (0x10 * (val)))
+#define GPT_CMP_REG(val) (0x0C + (0x10 * (val)))
+
+/* system timer */
+#define SYST_BASE (0x40)
+
+#define SYST_CON (SYST_BASE + 0x0)
+#define SYST_VAL (SYST_BASE + 0x4)
+
+#define SYST_CON_REG(to) (timer_of_base(to) + SYST_CON)
+#define SYST_VAL_REG(to) (timer_of_base(to) + SYST_VAL)
+
+/*
+ * SYST_CON_EN: Clock enable. Shall be set to
+ * - Start timer countdown.
+ * - Allow timeout ticks being updated.
+ * - Allow changing interrupt functions.
+ *
+ * SYST_CON_IRQ_EN: Set to allow interrupt.
+ *
+ * SYST_CON_IRQ_CLR: Set to clear interrupt.
+ */
+#define SYST_CON_EN BIT(0)
+#define SYST_CON_IRQ_EN BIT(1)
+#define SYST_CON_IRQ_CLR BIT(4)
+
+static void __iomem *gpt_sched_reg __read_mostly;
+
+static void mtk_syst_ack_irq(struct timer_of *to)
+{
+ /* Clear and disable interrupt */
+ writel(SYST_CON_IRQ_CLR | SYST_CON_EN, SYST_CON_REG(to));
+}
+
+static irqreturn_t mtk_syst_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ mtk_syst_ack_irq(to);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_syst_clkevt_next_event(unsigned long ticks,
+ struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ /* Enable clock to allow timeout tick update later */
+ writel(SYST_CON_EN, SYST_CON_REG(to));
+
+ /*
+ * Write new timeout ticks. Timer shall start countdown
+ * after timeout ticks are updated.
+ */
+ writel(ticks, SYST_VAL_REG(to));
+
+ /* Enable interrupt */
+ writel(SYST_CON_EN | SYST_CON_IRQ_EN, SYST_CON_REG(to));
+
+ return 0;
+}
+
+static int mtk_syst_clkevt_shutdown(struct clock_event_device *clkevt)
+{
+ /* Disable timer */
+ writel(0, SYST_CON_REG(to_timer_of(clkevt)));
+
+ return 0;
+}
+
+static int mtk_syst_clkevt_resume(struct clock_event_device *clkevt)
+{
+ return mtk_syst_clkevt_shutdown(clkevt);
+}
+
+static int mtk_syst_clkevt_oneshot(struct clock_event_device *clkevt)
+{
+ return 0;
+}
+
+static u64 notrace mtk_gpt_read_sched_clock(void)
+{
+ return readl_relaxed(gpt_sched_reg);
+}
+
+static void mtk_gpt_clkevt_time_stop(struct timer_of *to, u8 timer)
+{
+ u32 val;
+
+ val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
+ writel(val & ~GPT_CTRL_ENABLE, timer_of_base(to) +
+ GPT_CTRL_REG(timer));
+}
+
+static void mtk_gpt_clkevt_time_setup(struct timer_of *to,
+ unsigned long delay, u8 timer)
+{
+ writel(delay, timer_of_base(to) + GPT_CMP_REG(timer));
+}
+
+static void mtk_gpt_clkevt_time_start(struct timer_of *to,
+ bool periodic, u8 timer)
+{
+ u32 val;
+
+ /* Acknowledge interrupt */
+ writel(GPT_IRQ_ACK(timer), timer_of_base(to) + GPT_IRQ_ACK_REG);
+
+ val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
+
+ /* Clear 2 bit timer operation mode field */
+ val &= ~GPT_CTRL_OP(0x3);
+
+ if (periodic)
+ val |= GPT_CTRL_OP(GPT_CTRL_OP_REPEAT);
+ else
+ val |= GPT_CTRL_OP(GPT_CTRL_OP_ONESHOT);
+
+ writel(val | GPT_CTRL_ENABLE | GPT_CTRL_CLEAR,
+ timer_of_base(to) + GPT_CTRL_REG(timer));
+}
+
+static int mtk_gpt_clkevt_shutdown(struct clock_event_device *clk)
+{
+ mtk_gpt_clkevt_time_stop(to_timer_of(clk), TIMER_CLK_EVT);
+
+ return 0;
+}
+
+static int mtk_gpt_clkevt_set_periodic(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_setup(to, to->of_clk.period, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_start(to, true, TIMER_CLK_EVT);
+
+ return 0;
+}
+
+static int mtk_gpt_clkevt_next_event(unsigned long event,
+ struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_setup(to, event, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_start(to, false, TIMER_CLK_EVT);
+
+ return 0;
+}
+
+static irqreturn_t mtk_gpt_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ /* Acknowledge timer0 irq */
+ writel(GPT_IRQ_ACK(TIMER_CLK_EVT), timer_of_base(to) + GPT_IRQ_ACK_REG);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static void
+__init mtk_gpt_setup(struct timer_of *to, u8 timer, u8 option)
+{
+ writel(GPT_CTRL_CLEAR | GPT_CTRL_DISABLE,
+ timer_of_base(to) + GPT_CTRL_REG(timer));
+
+ writel(GPT_CLK_SRC(GPT_CLK_SRC_SYS13M) | GPT_CLK_DIV1,
+ timer_of_base(to) + GPT_CLK_REG(timer));
+
+ writel(0x0, timer_of_base(to) + GPT_CMP_REG(timer));
+
+ writel(GPT_CTRL_OP(option) | GPT_CTRL_ENABLE,
+ timer_of_base(to) + GPT_CTRL_REG(timer));
+}
+
+static void mtk_gpt_enable_irq(struct timer_of *to, u8 timer)
+{
+ u32 val;
+
+ /* Disable all interrupts */
+ writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
+
+ /* Acknowledge all spurious pending interrupts */
+ writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
+
+ val = readl(timer_of_base(to) + GPT_IRQ_EN_REG);
+ writel(val | GPT_IRQ_ENABLE(timer),
+ timer_of_base(to) + GPT_IRQ_EN_REG);
+}
+
+static struct timer_of to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = "mtk-clkevt",
+ .rating = 300,
+ .cpumask = cpu_possible_mask,
+ },
+
+ .of_irq = {
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+};
+
+static int __init mtk_syst_init(struct device_node *node)
+{
+ int ret;
+
+ to.clkevt.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT;
+ to.clkevt.set_state_shutdown = mtk_syst_clkevt_shutdown;
+ to.clkevt.set_state_oneshot = mtk_syst_clkevt_oneshot;
+ to.clkevt.tick_resume = mtk_syst_clkevt_resume;
+ to.clkevt.set_next_event = mtk_syst_clkevt_next_event;
+ to.of_irq.handler = mtk_syst_handler;
+
+ ret = timer_of_init(node, &to);
+ if (ret)
+ goto err;
+
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ TIMER_SYNC_TICKS, 0xffffffff);
+
+ return 0;
+err:
+ timer_of_cleanup(&to);
+ return ret;
+}
+
+static int __init mtk_gpt_init(struct device_node *node)
+{
+ int ret;
+
+ to.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ to.clkevt.set_state_shutdown = mtk_gpt_clkevt_shutdown;
+ to.clkevt.set_state_periodic = mtk_gpt_clkevt_set_periodic;
+ to.clkevt.set_state_oneshot = mtk_gpt_clkevt_shutdown;
+ to.clkevt.tick_resume = mtk_gpt_clkevt_shutdown;
+ to.clkevt.set_next_event = mtk_gpt_clkevt_next_event;
+ to.of_irq.handler = mtk_gpt_interrupt;
+
+ ret = timer_of_init(node, &to);
+ if (ret)
+ goto err;
+
+ /* Configure clock source */
+ mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
+ clocksource_mmio_init(timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC),
+ node->name, timer_of_rate(&to), 300, 32,
+ clocksource_mmio_readl_up);
+ gpt_sched_reg = timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC);
+ sched_clock_register(mtk_gpt_read_sched_clock, 32, timer_of_rate(&to));
+
+ /* Configure clock event */
+ mtk_gpt_setup(&to, TIMER_CLK_EVT, GPT_CTRL_OP_REPEAT);
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ TIMER_SYNC_TICKS, 0xffffffff);
+
+ mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
+
+ return 0;
+err:
+ timer_of_cleanup(&to);
+ return ret;
+}
+TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
+TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c
index ef9ebeafb3ed..430cb99d8d79 100644
--- a/drivers/clocksource/timer-sprd.c
+++ b/drivers/clocksource/timer-sprd.c
@@ -156,4 +156,54 @@ static int __init sprd_timer_init(struct device_node *np)
return 0;
}
+static struct timer_of suspend_to = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+};
+
+static u64 sprd_suspend_timer_read(struct clocksource *cs)
+{
+ return ~(u64)readl_relaxed(timer_of_base(&suspend_to) +
+ TIMER_VALUE_SHDW_LO) & cs->mask;
+}
+
+static int sprd_suspend_timer_enable(struct clocksource *cs)
+{
+ sprd_timer_update_counter(timer_of_base(&suspend_to),
+ TIMER_VALUE_LO_MASK);
+ sprd_timer_enable(timer_of_base(&suspend_to), TIMER_CTL_PERIOD_MODE);
+
+ return 0;
+}
+
+static void sprd_suspend_timer_disable(struct clocksource *cs)
+{
+ sprd_timer_disable(timer_of_base(&suspend_to));
+}
+
+static struct clocksource suspend_clocksource = {
+ .name = "sprd_suspend_timer",
+ .rating = 200,
+ .read = sprd_suspend_timer_read,
+ .enable = sprd_suspend_timer_enable,
+ .disable = sprd_suspend_timer_disable,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+};
+
+static int __init sprd_suspend_timer_init(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &suspend_to);
+ if (ret)
+ return ret;
+
+ clocksource_register_hz(&suspend_clocksource,
+ timer_of_rate(&suspend_to));
+
+ return 0;
+}
+
TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
+TIMER_OF_DECLARE(sc9860_persistent_timer, "sprd,sc9860-suspend-timer",
+ sprd_suspend_timer_init);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 880a861ab3c8..29e2e1a78a43 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -78,8 +78,7 @@ static struct ti_32k ti_32k_timer = {
.rating = 250,
.read = ti_32k_read_cycles,
.mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS |
- CLOCK_SOURCE_SUSPEND_NONSTOP,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
};
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index a6a0338eea77..f74689334f7c 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -162,7 +162,7 @@ static int __init zevio_timer_add(struct device_node *node)
timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot;
timer->clkevt.tick_resume = zevio_timer_set_oneshot;
timer->clkevt.rating = 200;
- timer->clkevt.cpumask = cpu_all_mask;
+ timer->clkevt.cpumask = cpu_possible_mask;
timer->clkevt.features = CLOCK_EVT_FEAT_ONESHOT;
timer->clkevt.irq = irqnr;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index e718b8c69a56..eeb7d31cbda5 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -239,7 +240,7 @@ void cn_del_callback(struct cb_id *id)
}
EXPORT_SYMBOL_GPL(cn_del_callback);
-static int cn_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
{
struct cn_queue_dev *dev = cdev.cbdev;
struct cn_callback_entry *cbq;
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 52f5f1a2040c..0cd8eb76ad59 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -71,20 +71,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ
Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
-config ARM_EXYNOS5440_CPUFREQ
- tristate "SAMSUNG EXYNOS5440"
- depends on SOC_EXYNOS5440
- depends on HAVE_CLK && OF
- select PM_OPP
- default y
- help
- This adds the CPUFreq driver for Samsung EXYNOS5440
- SoC. The nature of exynos5440 clock controller is
- different than previous exynos controllers so not using
- the common exynos framework.
-
- If in doubt, say N.
-
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index fb4a2ecac43b..c1ffeabe4ecf 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -56,7 +56,6 @@ obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
-obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 739da90ff3f6..75491fc841a6 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -51,6 +51,16 @@
#define ARMADA_37XX_DVFS_LOAD_2 2
#define ARMADA_37XX_DVFS_LOAD_3 3
+/* AVS register set */
+#define ARMADA_37XX_AVS_CTL0 0x0
+#define ARMADA_37XX_AVS_ENABLE BIT(30)
+#define ARMADA_37XX_AVS_HIGH_VDD_LIMIT 16
+#define ARMADA_37XX_AVS_LOW_VDD_LIMIT 22
+#define ARMADA_37XX_AVS_VDD_MASK 0x3F
+#define ARMADA_37XX_AVS_CTL2 0x8
+#define ARMADA_37XX_AVS_LOW_VDD_EN BIT(6)
+#define ARMADA_37XX_AVS_VSET(x) (0x1C + 4 * (x))
+
/*
* On Armada 37xx the Power management manages 4 level of CPU load,
* each level can be associated with a CPU clock source, a CPU
@@ -58,6 +68,17 @@
*/
#define LOAD_LEVEL_NR 4
+#define MIN_VOLT_MV 1000
+
+/* AVS value for the corresponding voltage (in mV) */
+static int avs_map[] = {
+ 747, 758, 770, 782, 793, 805, 817, 828, 840, 852, 863, 875, 887, 898,
+ 910, 922, 933, 945, 957, 968, 980, 992, 1003, 1015, 1027, 1038, 1050,
+ 1062, 1073, 1085, 1097, 1108, 1120, 1132, 1143, 1155, 1167, 1178, 1190,
+ 1202, 1213, 1225, 1237, 1248, 1260, 1272, 1283, 1295, 1307, 1318, 1330,
+ 1342
+};
+
struct armada37xx_cpufreq_state {
struct regmap *regmap;
u32 nb_l0l1;
@@ -71,6 +92,7 @@ static struct armada37xx_cpufreq_state *armada37xx_cpufreq_state;
struct armada_37xx_dvfs {
u32 cpu_freq_max;
u8 divider[LOAD_LEVEL_NR];
+ u32 avs[LOAD_LEVEL_NR];
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
@@ -148,6 +170,128 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
clk_set_parent(clk, parent);
}
+/*
+ * Find out the armada 37x supported AVS value whose voltage value is
+ * the round-up closest to the target voltage value.
+ */
+static u32 armada_37xx_avs_val_match(int target_vm)
+{
+ u32 avs;
+
+ /* Find out the round-up closest supported voltage value */
+ for (avs = 0; avs < ARRAY_SIZE(avs_map); avs++)
+ if (avs_map[avs] >= target_vm)
+ break;
+
+ /*
+ * If all supported voltages are smaller than target one,
+ * choose the largest supported voltage
+ */
+ if (avs == ARRAY_SIZE(avs_map))
+ avs = ARRAY_SIZE(avs_map) - 1;
+
+ return avs;
+}
+
+/*
+ * For Armada 37xx soc, L0(VSET0) VDD AVS value is set to SVC revision
+ * value or a default value when SVC is not supported.
+ * - L0 can be read out from the register of AVS_CTRL_0 and L0 voltage
+ * can be got from the mapping table of avs_map.
+ * - L1 voltage should be about 100mv smaller than L0 voltage
+ * - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
+ * This function calculates L1 & L2 & L3 AVS values dynamically based
+ * on L0 voltage and fill all AVS values to the AVS value table.
+ */
+static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
+ struct armada_37xx_dvfs *dvfs)
+{
+ unsigned int target_vm;
+ int load_level = 0;
+ u32 l0_vdd_min;
+
+ if (base == NULL)
+ return;
+
+ /* Get L0 VDD min value */
+ regmap_read(base, ARMADA_37XX_AVS_CTL0, &l0_vdd_min);
+ l0_vdd_min = (l0_vdd_min >> ARMADA_37XX_AVS_LOW_VDD_LIMIT) &
+ ARMADA_37XX_AVS_VDD_MASK;
+ if (l0_vdd_min >= ARRAY_SIZE(avs_map)) {
+ pr_err("L0 VDD MIN %d is not correct.\n", l0_vdd_min);
+ return;
+ }
+ dvfs->avs[0] = l0_vdd_min;
+
+ if (avs_map[l0_vdd_min] <= MIN_VOLT_MV) {
+ /*
+ * If L0 voltage is smaller than 1000mv, then all VDD sets
+ * use L0 voltage;
+ */
+ u32 avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV);
+
+ for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
+ dvfs->avs[load_level] = avs_min;
+
+ return;
+ }
+
+ /*
+ * L1 voltage is equal to L0 voltage - 100mv and it must be
+ * larger than 1000mv
+ */
+
+ target_vm = avs_map[l0_vdd_min] - 100;
+ target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
+
+ /*
+ * L2 & L3 voltage is equal to L0 voltage - 150mv and it must
+ * be larger than 1000mv
+ */
+ target_vm = avs_map[l0_vdd_min] - 150;
+ target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
+}
+
+static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
+ struct armada_37xx_dvfs *dvfs)
+{
+ unsigned int avs_val = 0, freq;
+ int load_level = 0;
+
+ if (base == NULL)
+ return;
+
+ /* Disable AVS before the configuration */
+ regmap_update_bits(base, ARMADA_37XX_AVS_CTL0,
+ ARMADA_37XX_AVS_ENABLE, 0);
+
+
+ /* Enable low voltage mode */
+ regmap_update_bits(base, ARMADA_37XX_AVS_CTL2,
+ ARMADA_37XX_AVS_LOW_VDD_EN,
+ ARMADA_37XX_AVS_LOW_VDD_EN);
+
+
+ for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++) {
+ freq = dvfs->cpu_freq_max / dvfs->divider[load_level];
+
+ avs_val = dvfs->avs[load_level];
+ regmap_update_bits(base, ARMADA_37XX_AVS_VSET(load_level-1),
+ ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
+ ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_LOW_VDD_LIMIT,
+ avs_val << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
+ avs_val << ARMADA_37XX_AVS_LOW_VDD_LIMIT);
+ }
+
+ /* Enable AVS after the configuration */
+ regmap_update_bits(base, ARMADA_37XX_AVS_CTL0,
+ ARMADA_37XX_AVS_ENABLE,
+ ARMADA_37XX_AVS_ENABLE);
+
+}
+
static void armada37xx_cpufreq_disable_dvfs(struct regmap *base)
{
unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
@@ -216,7 +360,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
struct platform_device *pdev;
unsigned long freq;
unsigned int cur_frequency;
- struct regmap *nb_pm_base;
+ struct regmap *nb_pm_base, *avs_base;
struct device *cpu_dev;
int load_lvl, ret;
struct clk *clk;
@@ -227,6 +371,14 @@ static int __init armada37xx_cpufreq_driver_init(void)
if (IS_ERR(nb_pm_base))
return -ENODEV;
+ avs_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-avs");
+
+ /* if AVS is not present don't use it but still try to setup dvfs */
+ if (IS_ERR(avs_base)) {
+ pr_info("Syscon failed for Adapting Voltage Scaling: skip it\n");
+ avs_base = NULL;
+ }
/* Before doing any configuration on the DVFS first, disable it */
armada37xx_cpufreq_disable_dvfs(nb_pm_base);
@@ -270,16 +422,21 @@ static int __init armada37xx_cpufreq_driver_init(void)
armada37xx_cpufreq_state->regmap = nb_pm_base;
+ armada37xx_cpufreq_avs_configure(avs_base, dvfs);
+ armada37xx_cpufreq_avs_setup(avs_base, dvfs);
+
armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
clk_put(clk);
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
load_lvl++) {
+ unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
freq = cur_frequency / dvfs->divider[load_lvl];
-
- ret = dev_pm_opp_add(cpu_dev, freq, 0);
+ ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
if (ret)
goto remove_opp;
+
+
}
/* Now that everything is setup, enable the DVFS at hardware level */
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index a9d3eec32795..30f302149730 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -296,10 +296,62 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret;
}
+static inline u64 get_delta(u64 t1, u64 t0)
+{
+ if (t1 > t0 || t0 > ~(u32)0)
+ return t1 - t0;
+
+ return (u32)t1 - (u32)t0;
+}
+
+static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu,
+ struct cppc_perf_fb_ctrs fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs fb_ctrs_t1)
+{
+ u64 delta_reference, delta_delivered;
+ u64 reference_perf, delivered_perf;
+
+ reference_perf = fb_ctrs_t0.reference_perf;
+
+ delta_reference = get_delta(fb_ctrs_t1.reference,
+ fb_ctrs_t0.reference);
+ delta_delivered = get_delta(fb_ctrs_t1.delivered,
+ fb_ctrs_t0.delivered);
+
+ /* Check to avoid divide-by zero */
+ if (delta_reference || delta_delivered)
+ delivered_perf = (reference_perf * delta_delivered) /
+ delta_reference;
+ else
+ delivered_perf = cpu->perf_ctrls.desired_perf;
+
+ return cppc_cpufreq_perf_to_khz(cpu, delivered_perf);
+}
+
+static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
+{
+ struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
+ struct cppc_cpudata *cpu = all_cpu_data[cpunum];
+ int ret;
+
+ ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
+ if (ret)
+ return ret;
+
+ udelay(2); /* 2usec delay between sampling */
+
+ ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t1);
+ if (ret)
+ return ret;
+
+ return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
+}
+
static struct cpufreq_driver cppc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = cppc_verify_policy,
.target = cppc_cpufreq_set_target,
+ .get = cppc_cpufreq_get_rate,
.init = cppc_cpufreq_cpu_init,
.stop_cpu = cppc_cpufreq_stop_cpu,
.name = "cppc_cpufreq",
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b0dfd3222013..f53fb41efb7b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -923,7 +923,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- cpus_read_lock();
+ /*
+ * cpus_read_trylock() is used here to work around a circular lock
+ * dependency problem with respect to the cpufreq_register_driver().
+ */
+ if (!cpus_read_trylock())
+ return -EBUSY;
if (cpu_online(policy->cpu)) {
down_write(&policy->rwsem);
@@ -2236,6 +2241,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->min = new_policy->min;
policy->max = new_policy->max;
+ trace_cpu_frequency_limits(policy);
policy->cached_target_freq = UINT_MAX;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 1d50e97d49f1..6d53f7d9fc7a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -555,12 +555,20 @@ EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
{
- struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct policy_dbs_info *policy_dbs;
+
+ /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
+ mutex_lock(&gov_dbs_data_mutex);
+ policy_dbs = policy->governor_data;
+ if (!policy_dbs)
+ goto out;
mutex_lock(&policy_dbs->update_mutex);
cpufreq_policy_apply_limits(policy);
gov_update_sample_delay(policy_dbs, 0);
-
mutex_unlock(&policy_dbs->update_mutex);
+
+out:
+ mutex_unlock(&gov_dbs_data_mutex);
}
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
deleted file mode 100644
index 932caa386ece..000000000000
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ /dev/null
@@ -1,452 +0,0 @@
-/*
- * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Amit Daniel Kachhap <amit.daniel@samsung.com>
- *
- * EXYNOS5440 - CPU frequency scaling support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/clk.h>
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/pm_opp.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-/* Register definitions */
-#define XMU_DVFS_CTRL 0x0060
-#define XMU_PMU_P0_7 0x0064
-#define XMU_C0_3_PSTATE 0x0090
-#define XMU_P_LIMIT 0x00a0
-#define XMU_P_STATUS 0x00a4
-#define XMU_PMUEVTEN 0x00d0
-#define XMU_PMUIRQEN 0x00d4
-#define XMU_PMUIRQ 0x00d8
-
-/* PMU mask and shift definations */
-#define P_VALUE_MASK 0x7
-
-#define XMU_DVFS_CTRL_EN_SHIFT 0
-
-#define P0_7_CPUCLKDEV_SHIFT 21
-#define P0_7_CPUCLKDEV_MASK 0x7
-#define P0_7_ATBCLKDEV_SHIFT 18
-#define P0_7_ATBCLKDEV_MASK 0x7
-#define P0_7_CSCLKDEV_SHIFT 15
-#define P0_7_CSCLKDEV_MASK 0x7
-#define P0_7_CPUEMA_SHIFT 28
-#define P0_7_CPUEMA_MASK 0xf
-#define P0_7_L2EMA_SHIFT 24
-#define P0_7_L2EMA_MASK 0xf
-#define P0_7_VDD_SHIFT 8
-#define P0_7_VDD_MASK 0x7f
-#define P0_7_FREQ_SHIFT 0
-#define P0_7_FREQ_MASK 0xff
-
-#define C0_3_PSTATE_VALID_SHIFT 8
-#define C0_3_PSTATE_CURR_SHIFT 4
-#define C0_3_PSTATE_NEW_SHIFT 0
-
-#define PSTATE_CHANGED_EVTEN_SHIFT 0
-
-#define PSTATE_CHANGED_IRQEN_SHIFT 0
-
-#define PSTATE_CHANGED_SHIFT 0
-
-/* some constant values for clock divider calculation */
-#define CPU_DIV_FREQ_MAX 500
-#define CPU_DBG_FREQ_MAX 375
-#define CPU_ATB_FREQ_MAX 500
-
-#define PMIC_LOW_VOLT 0x30
-#define PMIC_HIGH_VOLT 0x28
-
-#define CPUEMA_HIGH 0x2
-#define CPUEMA_MID 0x4
-#define CPUEMA_LOW 0x7
-
-#define L2EMA_HIGH 0x1
-#define L2EMA_MID 0x3
-#define L2EMA_LOW 0x4
-
-#define DIV_TAB_MAX 2
-/* frequency unit is 20MHZ */
-#define FREQ_UNIT 20
-#define MAX_VOLTAGE 1550000 /* In microvolt */
-#define VOLTAGE_STEP 12500 /* In microvolt */
-
-#define CPUFREQ_NAME "exynos5440_dvfs"
-#define DEF_TRANS_LATENCY 100000
-
-enum cpufreq_level_index {
- L0, L1, L2, L3, L4,
- L5, L6, L7, L8, L9,
-};
-#define CPUFREQ_LEVEL_END (L7 + 1)
-
-struct exynos_dvfs_data {
- void __iomem *base;
- struct resource *mem;
- int irq;
- struct clk *cpu_clk;
- unsigned int latency;
- struct cpufreq_frequency_table *freq_table;
- unsigned int freq_count;
- struct device *dev;
- bool dvfs_enabled;
- struct work_struct irq_work;
-};
-
-static struct exynos_dvfs_data *dvfs_info;
-static DEFINE_MUTEX(cpufreq_lock);
-static struct cpufreq_freqs freqs;
-
-static int init_div_table(void)
-{
- struct cpufreq_frequency_table *pos, *freq_tbl = dvfs_info->freq_table;
- unsigned int tmp, clk_div, ema_div, freq, volt_id, idx;
- struct dev_pm_opp *opp;
-
- cpufreq_for_each_entry_idx(pos, freq_tbl, idx) {
- opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
- pos->frequency * 1000, true);
- if (IS_ERR(opp)) {
- dev_err(dvfs_info->dev,
- "failed to find valid OPP for %u KHZ\n",
- pos->frequency);
- return PTR_ERR(opp);
- }
-
- freq = pos->frequency / 1000; /* In MHZ */
- clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
- << P0_7_CPUCLKDEV_SHIFT;
- clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
- << P0_7_ATBCLKDEV_SHIFT;
- clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK)
- << P0_7_CSCLKDEV_SHIFT;
-
- /* Calculate EMA */
- volt_id = dev_pm_opp_get_voltage(opp);
-
- volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
- if (volt_id < PMIC_HIGH_VOLT) {
- ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
- (L2EMA_HIGH << P0_7_L2EMA_SHIFT);
- } else if (volt_id > PMIC_LOW_VOLT) {
- ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) |
- (L2EMA_LOW << P0_7_L2EMA_SHIFT);
- } else {
- ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) |
- (L2EMA_MID << P0_7_L2EMA_SHIFT);
- }
-
- tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
- | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
-
- __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * idx);
- dev_pm_opp_put(opp);
- }
-
- return 0;
-}
-
-static void exynos_enable_dvfs(unsigned int cur_frequency)
-{
- unsigned int tmp, cpu;
- struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
- struct cpufreq_frequency_table *pos;
- /* Disable DVFS */
- __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL);
-
- /* Enable PSTATE Change Event */
- tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
- tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
- __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
-
- /* Enable PSTATE Change IRQ */
- tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
- tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
- __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
-
- /* Set initial performance index */
- cpufreq_for_each_entry(pos, freq_table)
- if (pos->frequency == cur_frequency)
- break;
-
- if (pos->frequency == CPUFREQ_TABLE_END) {
- dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
- /* Assign the highest frequency */
- pos = freq_table;
- cur_frequency = pos->frequency;
- }
-
- dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
- cur_frequency);
-
- for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
- tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
- tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
- tmp |= ((pos - freq_table) << C0_3_PSTATE_NEW_SHIFT);
- __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
- }
-
- /* Enable DVFS */
- __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT,
- dvfs_info->base + XMU_DVFS_CTRL);
-}
-
-static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
-{
- unsigned int tmp;
- int i;
- struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
-
- mutex_lock(&cpufreq_lock);
-
- freqs.old = policy->cur;
- freqs.new = freq_table[index].frequency;
-
- cpufreq_freq_transition_begin(policy, &freqs);
-
- /* Set the target frequency in all C0_3_PSTATE register */
- for_each_cpu(i, policy->cpus) {
- tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
- tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
- tmp |= (index << C0_3_PSTATE_NEW_SHIFT);
-
- __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
- }
- mutex_unlock(&cpufreq_lock);
- return 0;
-}
-
-static void exynos_cpufreq_work(struct work_struct *work)
-{
- unsigned int cur_pstate, index;
- struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
- struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
-
- /* Ensure we can access cpufreq structures */
- if (unlikely(dvfs_info->dvfs_enabled == false))
- goto skip_work;
-
- mutex_lock(&cpufreq_lock);
- freqs.old = policy->cur;
-
- cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
- if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
- index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK;
- else
- index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK;
-
- if (likely(index < dvfs_info->freq_count)) {
- freqs.new = freq_table[index].frequency;
- } else {
- dev_crit(dvfs_info->dev, "New frequency out of range\n");
- freqs.new = freqs.old;
- }
- cpufreq_freq_transition_end(policy, &freqs, 0);
-
- cpufreq_cpu_put(policy);
- mutex_unlock(&cpufreq_lock);
-skip_work:
- enable_irq(dvfs_info->irq);
-}
-
-static irqreturn_t exynos_cpufreq_irq(int irq, void *id)
-{
- unsigned int tmp;
-
- tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ);
- if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) {
- __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ);
- disable_irq_nosync(irq);
- schedule_work(&dvfs_info->irq_work);
- }
- return IRQ_HANDLED;
-}
-
-static void exynos_sort_descend_freq_table(void)
-{
- struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
- int i = 0, index;
- unsigned int tmp_freq;
- /*
- * Exynos5440 clock controller state logic expects the cpufreq table to
- * be in descending order. But the OPP library constructs the table in
- * ascending order. So to make the table descending we just need to
- * swap the i element with the N - i element.
- */
- for (i = 0; i < dvfs_info->freq_count / 2; i++) {
- index = dvfs_info->freq_count - i - 1;
- tmp_freq = freq_tbl[i].frequency;
- freq_tbl[i].frequency = freq_tbl[index].frequency;
- freq_tbl[index].frequency = tmp_freq;
- }
-}
-
-static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- policy->clk = dvfs_info->cpu_clk;
- return cpufreq_generic_init(policy, dvfs_info->freq_table,
- dvfs_info->latency);
-}
-
-static struct cpufreq_driver exynos_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = exynos_target,
- .get = cpufreq_generic_get,
- .init = exynos_cpufreq_cpu_init,
- .name = CPUFREQ_NAME,
- .attr = cpufreq_generic_attr,
-};
-
-static const struct of_device_id exynos_cpufreq_match[] = {
- {
- .compatible = "samsung,exynos5440-cpufreq",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, exynos_cpufreq_match);
-
-static int exynos_cpufreq_probe(struct platform_device *pdev)
-{
- int ret = -EINVAL;
- struct device_node *np;
- struct resource res;
- unsigned int cur_frequency;
-
- np = pdev->dev.of_node;
- if (!np)
- return -ENODEV;
-
- dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL);
- if (!dvfs_info) {
- ret = -ENOMEM;
- goto err_put_node;
- }
-
- dvfs_info->dev = &pdev->dev;
-
- ret = of_address_to_resource(np, 0, &res);
- if (ret)
- goto err_put_node;
-
- dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res);
- if (IS_ERR(dvfs_info->base)) {
- ret = PTR_ERR(dvfs_info->base);
- goto err_put_node;
- }
-
- dvfs_info->irq = irq_of_parse_and_map(np, 0);
- if (!dvfs_info->irq) {
- dev_err(dvfs_info->dev, "No cpufreq irq found\n");
- ret = -ENODEV;
- goto err_put_node;
- }
-
- ret = dev_pm_opp_of_add_table(dvfs_info->dev);
- if (ret) {
- dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret);
- goto err_put_node;
- }
-
- ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev,
- &dvfs_info->freq_table);
- if (ret) {
- dev_err(dvfs_info->dev,
- "failed to init cpufreq table: %d\n", ret);
- goto err_free_opp;
- }
- dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
- exynos_sort_descend_freq_table();
-
- if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
- dvfs_info->latency = DEF_TRANS_LATENCY;
-
- dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk");
- if (IS_ERR(dvfs_info->cpu_clk)) {
- dev_err(dvfs_info->dev, "Failed to get cpu clock\n");
- ret = PTR_ERR(dvfs_info->cpu_clk);
- goto err_free_table;
- }
-
- cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
- if (!cur_frequency) {
- dev_err(dvfs_info->dev, "Failed to get clock rate\n");
- ret = -EINVAL;
- goto err_free_table;
- }
- cur_frequency /= 1000;
-
- INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
- ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
- exynos_cpufreq_irq, IRQF_TRIGGER_NONE,
- CPUFREQ_NAME, dvfs_info);
- if (ret) {
- dev_err(dvfs_info->dev, "Failed to register IRQ\n");
- goto err_free_table;
- }
-
- ret = init_div_table();
- if (ret) {
- dev_err(dvfs_info->dev, "Failed to initialise div table\n");
- goto err_free_table;
- }
-
- exynos_enable_dvfs(cur_frequency);
- ret = cpufreq_register_driver(&exynos_driver);
- if (ret) {
- dev_err(dvfs_info->dev,
- "%s: failed to register cpufreq driver\n", __func__);
- goto err_free_table;
- }
-
- of_node_put(np);
- dvfs_info->dvfs_enabled = true;
- return 0;
-
-err_free_table:
- dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
-err_free_opp:
- dev_pm_opp_of_remove_table(dvfs_info->dev);
-err_put_node:
- of_node_put(np);
- dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
- return ret;
-}
-
-static int exynos_cpufreq_remove(struct platform_device *pdev)
-{
- cpufreq_unregister_driver(&exynos_driver);
- dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
- dev_pm_opp_of_remove_table(dvfs_info->dev);
- return 0;
-}
-
-static struct platform_driver exynos_cpufreq_platdrv = {
- .driver = {
- .name = "exynos5440-cpufreq",
- .of_match_table = exynos_cpufreq_match,
- },
- .probe = exynos_cpufreq_probe,
- .remove = exynos_cpufreq_remove,
-};
-module_platform_driver(exynos_cpufreq_platdrv);
-
-MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
-MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 8b3c2a79ad6c..b2ff423ad7f8 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_cooling.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -50,6 +51,7 @@ static struct clk_bulk_data clks[] = {
};
static struct device *cpu_dev;
+static struct thermal_cooling_device *cdev;
static bool free_opp;
static struct cpufreq_frequency_table *freq_table;
static unsigned int max_freq;
@@ -191,6 +193,16 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
return 0;
}
+static void imx6q_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ cdev = of_cpufreq_cooling_register(policy);
+
+ if (!cdev)
+ dev_err(cpu_dev,
+ "running cpufreq without cooling device: %ld\n",
+ PTR_ERR(cdev));
+}
+
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
@@ -202,13 +214,22 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
+static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_cooling_unregister(cdev);
+
+ return 0;
+}
+
static struct cpufreq_driver imx6q_cpufreq_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = imx6q_set_target,
.get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
+ .exit = imx6q_cpufreq_exit,
.name = "imx6q-cpufreq",
+ .ready = imx6q_cpufreq_ready,
.attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index ece120da3353..b6a1aadaff9f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -311,12 +311,20 @@ static DEFINE_MUTEX(intel_pstate_limits_lock);
#ifdef CONFIG_ACPI
-static bool intel_pstate_get_ppc_enable_status(void)
+static bool intel_pstate_acpi_pm_profile_server(void)
{
if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
return true;
+ return false;
+}
+
+static bool intel_pstate_get_ppc_enable_status(void)
+{
+ if (intel_pstate_acpi_pm_profile_server())
+ return true;
+
return acpi_ppc;
}
@@ -459,6 +467,11 @@ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *pol
static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
{
}
+
+static inline bool intel_pstate_acpi_pm_profile_server(void)
+{
+ return false;
+}
#endif
static inline void update_turbo_state(void)
@@ -657,21 +670,18 @@ static ssize_t store_energy_performance_preference(
{
struct cpudata *cpu_data = all_cpu_data[policy->cpu];
char str_preference[21];
- int ret, i = 0;
+ int ret;
ret = sscanf(buf, "%20s", str_preference);
if (ret != 1)
return -EINVAL;
- while (energy_perf_strings[i] != NULL) {
- if (!strcmp(str_preference, energy_perf_strings[i])) {
- intel_pstate_set_energy_pref_index(cpu_data, i);
- return count;
- }
- ++i;
- }
+ ret = match_string(energy_perf_strings, -1, str_preference);
+ if (ret < 0)
+ return ret;
- return -EINVAL;
+ intel_pstate_set_energy_pref_index(cpu_data, ret);
+ return count;
}
static ssize_t show_energy_performance_preference(
@@ -1841,7 +1851,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_hwp_enable(cpu);
id = x86_match_cpu(intel_pstate_hwp_boost_ids);
- if (id)
+ if (id && intel_pstate_acpi_pm_profile_server())
hwp_boost = true;
}
@@ -1998,7 +2008,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
struct cpudata *cpu)
{
- if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
+ if (!hwp_active &&
+ cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_freq) {
pr_debug("policy->max > max non turbo frequency\n");
@@ -2072,6 +2083,15 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
+ if (hwp_active) {
+ unsigned int max_freq;
+
+ max_freq = global.turbo_disabled ?
+ cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+ if (max_freq < policy->cpuinfo.max_freq)
+ policy->cpuinfo.max_freq = max_freq;
+ }
+
intel_pstate_init_acpi_perf_limits(policy);
policy->fast_switch_possible = true;
@@ -2394,6 +2414,18 @@ static bool __init intel_pstate_no_acpi_pss(void)
return true;
}
+static bool __init intel_pstate_no_acpi_pcch(void)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, "\\_SB", &handle);
+ if (ACPI_FAILURE(status))
+ return true;
+
+ return !acpi_has_method(handle, "PCCH");
+}
+
static bool __init intel_pstate_has_acpi_ppc(void)
{
int i;
@@ -2453,7 +2485,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
switch (plat_info[idx].data) {
case PSS:
- return intel_pstate_no_acpi_pss();
+ if (!intel_pstate_no_acpi_pss())
+ return false;
+
+ return intel_pstate_no_acpi_pcch();
case PPC:
return intel_pstate_has_acpi_ppc() && !force_load;
}
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 3f0ce2ae35ee..099a849396f6 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -580,6 +580,10 @@ static int __init pcc_cpufreq_init(void)
{
int ret;
+ /* Skip initialization if another cpufreq driver is there. */
+ if (cpufreq_get_current_driver())
+ return 0;
+
if (acpi_disabled)
return 0;
@@ -589,6 +593,15 @@ static int __init pcc_cpufreq_init(void)
return ret;
}
+ if (num_present_cpus() > 4) {
+ pcc_cpufreq_driver.flags |= CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING;
+ pr_err("%s: Too many CPUs, dynamic performance scaling disabled\n",
+ __func__);
+ pr_err("%s: Try to enable another scaling driver through BIOS settings\n",
+ __func__);
+ pr_err("%s: and complain to the system vendor\n", __func__);
+ }
+
ret = cpufreq_register_driver(&pcc_cpufreq_driver);
return ret;
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 54edaec1e608..bf6519cf64bc 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -758,8 +758,13 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
cur_msec = jiffies_to_msecs(get_jiffies_64());
- spin_lock(&gpstates->gpstate_lock);
freq_data.pstate_id = idx_to_pstate(new_index);
+ if (!gpstates) {
+ freq_data.gpstate_id = freq_data.pstate_id;
+ goto no_gpstate;
+ }
+
+ spin_lock(&gpstates->gpstate_lock);
if (!gpstates->last_sampled_time) {
gpstate_idx = new_index;
@@ -809,6 +814,7 @@ gpstates_done:
spin_unlock(&gpstates->gpstate_lock);
+no_gpstate:
/*
* Use smp_call_function to send IPI and execute the
* mtspr on target CPU. We could do that without IPI
@@ -843,6 +849,13 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
kernfs_put(kn);
}
+ policy->freq_table = powernv_freqs;
+ policy->fast_switch_possible = true;
+
+ if (pvr_version_is(PVR_POWER9))
+ return 0;
+
+ /* Initialise Gpstate ramp-down timer only on POWER8 */
gpstates = kzalloc(sizeof(*gpstates), GFP_KERNEL);
if (!gpstates)
return -ENOMEM;
@@ -857,8 +870,6 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
spin_lock_init(&gpstates->gpstate_lock);
- policy->freq_table = powernv_freqs;
- policy->fast_switch_possible = true;
return 0;
}
@@ -998,7 +1009,8 @@ static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min);
freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
- del_timer_sync(&gpstates->timer);
+ if (gpstates)
+ del_timer_sync(&gpstates->timer);
}
static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index 01bddacf5c3b..a1830fa25fc5 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -87,8 +87,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
int ret;
cpu_dev = get_cpu_device(0);
- if (NULL == cpu_dev)
- ret = -ENODEV;
+ if (!cpu_dev)
+ return -ENODEV;
msm8996_version = qcom_cpufreq_kryo_get_msm_id();
if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
@@ -97,8 +97,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
}
np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
- if (IS_ERR(np))
- return PTR_ERR(np);
+ if (!np)
+ return -ENOENT;
ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
if (!ret) {
@@ -109,8 +109,9 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
of_node_put(np);
if (IS_ERR(speedbin_nvmem)) {
- dev_err(cpu_dev, "Could not get nvmem cell: %ld\n",
- PTR_ERR(speedbin_nvmem));
+ if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
+ dev_err(cpu_dev, "Could not get nvmem cell: %ld\n",
+ PTR_ERR(speedbin_nvmem));
return PTR_ERR(speedbin_nvmem);
}
@@ -183,6 +184,7 @@ static struct platform_driver qcom_cpufreq_kryo_driver = {
static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
+ {}
};
/*
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index e07bc7ace774..073557f433eb 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -105,7 +105,8 @@ static int __init arm_idle_init_cpu(int cpu)
ret = cpuidle_register_driver(drv);
if (ret) {
- pr_err("Failed to register cpuidle driver\n");
+ if (ret != -EBUSY)
+ pr_err("Failed to register cpuidle driver\n");
goto out_kfree_drv;
}
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index d29e4f041efe..84b1ebe212b3 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -242,6 +242,7 @@ static inline void add_powernv_state(int index, const char *name,
powernv_states[index].target_residency = target_residency;
powernv_states[index].exit_latency = exit_latency;
powernv_states[index].enter = idle_fn;
+ /* For power8 and below psscr_* will be 0 */
stop_psscr_table[index].val = psscr_val;
stop_psscr_table[index].mask = psscr_mask;
}
@@ -263,186 +264,80 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
extern u32 pnv_get_supported_cpuidle_states(void);
static int powernv_add_idle_states(void)
{
- struct device_node *power_mgt;
int nr_idle_states = 1; /* Snooze */
- int dt_idle_states, count;
- u32 latency_ns[CPUIDLE_STATE_MAX];
- u32 residency_ns[CPUIDLE_STATE_MAX];
- u32 flags[CPUIDLE_STATE_MAX];
- u64 psscr_val[CPUIDLE_STATE_MAX];
- u64 psscr_mask[CPUIDLE_STATE_MAX];
- const char *names[CPUIDLE_STATE_MAX];
+ int dt_idle_states;
u32 has_stop_states = 0;
- int i, rc;
+ int i;
u32 supported_flags = pnv_get_supported_cpuidle_states();
/* Currently we have snooze statically defined */
-
- power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
- if (!power_mgt) {
- pr_warn("opal: PowerMgmt Node not found\n");
- goto out;
- }
-
- /* Read values of any property to determine the num of idle states */
- dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
- if (dt_idle_states < 0) {
- pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+ if (nr_pnv_idle_states <= 0) {
+ pr_warn("cpuidle-powernv : Only Snooze is available\n");
goto out;
}
- count = of_property_count_u32_elems(power_mgt,
- "ibm,cpu-idle-state-latencies-ns");
-
- if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags", dt_idle_states,
- "ibm,cpu-idle-state-latencies-ns",
- count) != 0)
- goto out;
-
- count = of_property_count_strings(power_mgt,
- "ibm,cpu-idle-state-names");
- if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags", dt_idle_states,
- "ibm,cpu-idle-state-names",
- count) != 0)
- goto out;
+ /* TODO: Count only states which are eligible for cpuidle */
+ dt_idle_states = nr_pnv_idle_states;
/*
* Since snooze is used as first idle state, max idle states allowed is
* CPUIDLE_STATE_MAX -1
*/
- if (dt_idle_states > CPUIDLE_STATE_MAX - 1) {
+ if (nr_pnv_idle_states > CPUIDLE_STATE_MAX - 1) {
pr_warn("cpuidle-powernv: discovered idle states more than allowed");
dt_idle_states = CPUIDLE_STATE_MAX - 1;
}
- if (of_property_read_u32_array(power_mgt,
- "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
- pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
- goto out;
- }
-
- if (of_property_read_u32_array(power_mgt,
- "ibm,cpu-idle-state-latencies-ns", latency_ns,
- dt_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
- goto out;
- }
- if (of_property_read_string_array(power_mgt,
- "ibm,cpu-idle-state-names", names, dt_idle_states) < 0) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
- goto out;
- }
-
/*
* If the idle states use stop instruction, probe for psscr values
* and psscr mask which are necessary to specify required stop level.
*/
- has_stop_states = (flags[0] &
+ has_stop_states = (pnv_idle_states[0].flags &
(OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP));
- if (has_stop_states) {
- count = of_property_count_u64_elems(power_mgt,
- "ibm,cpu-idle-state-psscr");
- if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags",
- dt_idle_states,
- "ibm,cpu-idle-state-psscr",
- count) != 0)
- goto out;
-
- count = of_property_count_u64_elems(power_mgt,
- "ibm,cpu-idle-state-psscr-mask");
- if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags",
- dt_idle_states,
- "ibm,cpu-idle-state-psscr-mask",
- count) != 0)
- goto out;
-
- if (of_property_read_u64_array(power_mgt,
- "ibm,cpu-idle-state-psscr", psscr_val, dt_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
- goto out;
- }
-
- if (of_property_read_u64_array(power_mgt,
- "ibm,cpu-idle-state-psscr-mask",
- psscr_mask, dt_idle_states)) {
- pr_warn("cpuidle-powernv:Missing ibm,cpu-idle-state-psscr-mask in DT\n");
- goto out;
- }
- }
-
- count = of_property_count_u32_elems(power_mgt,
- "ibm,cpu-idle-state-residency-ns");
-
- if (count < 0) {
- rc = count;
- } else if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags",
- dt_idle_states,
- "ibm,cpu-idle-state-residency-ns",
- count) != 0) {
- goto out;
- } else {
- rc = of_property_read_u32_array(power_mgt,
- "ibm,cpu-idle-state-residency-ns",
- residency_ns, dt_idle_states);
- }
for (i = 0; i < dt_idle_states; i++) {
unsigned int exit_latency, target_residency;
bool stops_timebase = false;
+ struct pnv_idle_states_t *state = &pnv_idle_states[i];
/*
* Skip the platform idle state whose flag isn't in
* the supported_cpuidle_states flag mask.
*/
- if ((flags[i] & supported_flags) != flags[i])
+ if ((state->flags & supported_flags) != state->flags)
continue;
/*
* If an idle state has exit latency beyond
* POWERNV_THRESHOLD_LATENCY_NS then don't use it
* in cpu-idle.
*/
- if (latency_ns[i] > POWERNV_THRESHOLD_LATENCY_NS)
+ if (state->latency_ns > POWERNV_THRESHOLD_LATENCY_NS)
continue;
/*
* Firmware passes residency and latency values in ns.
* cpuidle expects it in us.
*/
- exit_latency = DIV_ROUND_UP(latency_ns[i], 1000);
- if (!rc)
- target_residency = DIV_ROUND_UP(residency_ns[i], 1000);
- else
- target_residency = 0;
-
- if (has_stop_states) {
- int err = validate_psscr_val_mask(&psscr_val[i],
- &psscr_mask[i],
- flags[i]);
- if (err) {
- report_invalid_psscr_val(psscr_val[i], err);
+ exit_latency = DIV_ROUND_UP(state->latency_ns, 1000);
+ target_residency = DIV_ROUND_UP(state->residency_ns, 1000);
+
+ if (has_stop_states && !(state->valid))
continue;
- }
- }
- if (flags[i] & OPAL_PM_TIMEBASE_STOP)
+ if (state->flags & OPAL_PM_TIMEBASE_STOP)
stops_timebase = true;
- /*
- * For nap and fastsleep, use default target_residency
- * values if f/w does not expose it.
- */
- if (flags[i] & OPAL_PM_NAP_ENABLED) {
- if (!rc)
- target_residency = 100;
+ if (state->flags & OPAL_PM_NAP_ENABLED) {
/* Add NAP state */
add_powernv_state(nr_idle_states, "Nap",
CPUIDLE_FLAG_NONE, nap_loop,
target_residency, exit_latency, 0, 0);
} else if (has_stop_states && !stops_timebase) {
- add_powernv_state(nr_idle_states, names[i],
+ add_powernv_state(nr_idle_states, state->name,
CPUIDLE_FLAG_NONE, stop_loop,
target_residency, exit_latency,
- psscr_val[i], psscr_mask[i]);
+ state->psscr_val,
+ state->psscr_mask);
}
/*
@@ -450,20 +345,19 @@ static int powernv_add_idle_states(void)
* within this config dependency check.
*/
#ifdef CONFIG_TICK_ONESHOT
- else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
- flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
- if (!rc)
- target_residency = 300000;
+ else if (state->flags & OPAL_PM_SLEEP_ENABLED ||
+ state->flags & OPAL_PM_SLEEP_ENABLED_ER1) {
/* Add FASTSLEEP state */
add_powernv_state(nr_idle_states, "FastSleep",
CPUIDLE_FLAG_TIMER_STOP,
fastsleep_loop,
target_residency, exit_latency, 0, 0);
} else if (has_stop_states && stops_timebase) {
- add_powernv_state(nr_idle_states, names[i],
+ add_powernv_state(nr_idle_states, state->name,
CPUIDLE_FLAG_TIMER_STOP, stop_loop,
target_residency, exit_latency,
- psscr_val[i], psscr_mask[i]);
+ state->psscr_val,
+ state->psscr_mask);
}
#endif
else
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 1aef60d160eb..110483f0e3fb 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -328,9 +328,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
unsigned int polling_threshold;
/*
- * We want to default to C1 (hlt), not to busy polling
- * unless the timer is happening really really soon, or
- * C1's exit latency exceeds the user configured limit.
+ * Default to a physical idle state, not to busy polling, unless
+ * a timer is going to trigger really really soon.
*/
polling_threshold = max_t(unsigned int, 20, s->target_residency);
if (data->next_timer_us > polling_threshold &&
@@ -349,14 +348,12 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* If the tick is already stopped, the cost of possible short
* idle duration misprediction is much higher, because the CPU
* may be stuck in a shallow idle state for a long time as a
- * result of it. In that case say we might mispredict and try
- * to force the CPU into a state for which we would have stopped
- * the tick, unless a timer is going to expire really soon
- * anyway.
+ * result of it. In that case say we might mispredict and use
+ * the known time till the closest timer event for the idle
+ * state selection.
*/
if (data->predicted_us < TICK_USEC)
- data->predicted_us = min_t(unsigned int, TICK_USEC,
- ktime_to_us(delta_next));
+ data->predicted_us = ktime_to_us(delta_next);
} else {
/*
* Use the performance multiplier and the user-configurable
@@ -381,8 +378,22 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
continue;
if (idx == -1)
idx = i; /* first enabled state */
- if (s->target_residency > data->predicted_us)
- break;
+ if (s->target_residency > data->predicted_us) {
+ if (!tick_nohz_tick_stopped())
+ break;
+
+ /*
+ * If the state selected so far is shallow and this
+ * state's target residency matches the time till the
+ * closest timer event, select this one to avoid getting
+ * stuck in the shallow one for too long.
+ */
+ if (drv->states[idx].target_residency < TICK_USEC &&
+ s->target_residency <= ktime_to_us(delta_next))
+ idx = i;
+
+ goto out;
+ }
if (s->exit_latency > latency_req) {
/*
* If we break out of the loop for latency reasons, use
@@ -403,14 +414,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* Don't stop the tick if the selected state is a polling one or if the
* expected idle duration is shorter than the tick period length.
*/
- if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
- expected_interval < TICK_USEC) {
+ if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
+ expected_interval < TICK_USEC) && !tick_nohz_tick_stopped()) {
unsigned int delta_next_us = ktime_to_us(delta_next);
*stop_tick = false;
- if (!tick_nohz_tick_stopped() && idx > 0 &&
- drv->states[idx].target_residency > delta_next_us) {
+ if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
/*
* The tick is not going to be stopped and the target
* residency of the state to be returned is not within
@@ -418,8 +428,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* tick, so try to correct that.
*/
for (i = idx - 1; i >= 0; i--) {
- if (drv->states[i].disabled ||
- dev->states_usage[i].disable)
+ if (drv->states[i].disabled ||
+ dev->states_usage[i].disable)
continue;
idx = i;
@@ -429,6 +439,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
}
}
+out:
data->last_state_idx = idx;
return data->last_state_idx;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 43cccf6aff61..a8c4ce07fc9d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -585,6 +585,17 @@ config CRYPTO_DEV_QCE
hardware. To compile this driver as a module, choose M here. The
module will be called qcrypto.
+config CRYPTO_DEV_QCOM_RNG
+ tristate "Qualcomm Random Number Generator Driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select CRYPTO_RNG
+ help
+ This driver provides support for the Random Number
+ Generator hardware found on Qualcomm SoCs.
+
+ To compile this driver as a module, choose M here. The
+ module will be called qcom-rng. If unsure, say N.
+
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
depends on PPC64 && VSX
@@ -689,8 +700,10 @@ config CRYPTO_DEV_SAFEXCEL
select CRYPTO_AES
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
select CRYPTO_HASH
select CRYPTO_HMAC
+ select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
@@ -746,4 +759,6 @@ config CRYPTO_DEV_CCREE
cryptographic operations on the system REE.
If unsure say Y.
+source "drivers/crypto/hisilicon/Kconfig"
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 7ae87b4f6c8d..c23396f32c8a 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
@@ -45,3 +46,4 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
+obj-y += hisilicon/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 05981ccd9901..6eaec9ba0f68 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1132,8 +1132,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1153,8 +1152,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "cfb(aes)",
.cra_driver_name = "cfb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1174,8 +1172,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK |
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1196,8 +1193,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1217,8 +1213,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1237,8 +1232,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ofb(aes)",
.cra_driver_name = "ofb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index e66f18a0ddd0..74f083f45e97 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -186,7 +186,10 @@ static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
* always be the same. Use a macro for the key size to avoid unnecessary
* computations.
*/
- copied = sg_copy_to_buffer(pubkey, 1, cmd->data, ATMEL_ECC_PUBKEY_SIZE);
+ copied = sg_copy_to_buffer(pubkey,
+ sg_nents_for_len(pubkey,
+ ATMEL_ECC_PUBKEY_SIZE),
+ cmd->data, ATMEL_ECC_PUBKEY_SIZE);
if (copied != ATMEL_ECC_PUBKEY_SIZE)
return -EINVAL;
@@ -268,15 +271,17 @@ static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
struct kpp_request *req = areq;
struct atmel_ecdh_ctx *ctx = work_data->ctx;
struct atmel_ecc_cmd *cmd = &work_data->cmd;
- size_t copied;
- size_t n_sz = ctx->n_sz;
+ size_t copied, n_sz;
if (status)
goto free_work_data;
+ /* might want less than we've got */
+ n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
+
/* copy the shared secret */
- copied = sg_copy_from_buffer(req->dst, 1, &cmd->data[RSP_DATA_IDX],
- n_sz);
+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
+ &cmd->data[RSP_DATA_IDX], n_sz);
if (copied != n_sz)
status = -EINVAL;
@@ -440,7 +445,7 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
- size_t copied;
+ size_t copied, nbytes;
int ret = 0;
if (ctx->do_fallback) {
@@ -448,10 +453,14 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
return crypto_kpp_generate_public_key(req);
}
+ /* might want less than we've got */
+ nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
+
/* public key was saved at private key generation */
- copied = sg_copy_from_buffer(req->dst, 1, ctx->public_key,
- ATMEL_ECC_PUBKEY_SIZE);
- if (copied != ATMEL_ECC_PUBKEY_SIZE)
+ copied = sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, nbytes),
+ ctx->public_key, nbytes);
+ if (copied != nbytes)
ret = -EINVAL;
return ret;
@@ -470,6 +479,10 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
return crypto_kpp_compute_shared_secret(req);
}
+ /* must have exactly two points to be on the curve */
+ if (req->src_len != ATMEL_ECC_PUBKEY_SIZE)
+ return -EINVAL;
+
gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
GFP_ATOMIC;
@@ -554,10 +567,6 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
}
crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm));
-
- dev_info(&ctx->client->dev, "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name(crypto_kpp_tfm(fallback)));
-
ctx->fallback = fallback;
return 0;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 4d43081120db..8a19df2fba6a 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2316,9 +2316,7 @@ struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode)
goto error;
}
- tfm = crypto_alloc_ahash(name,
- CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_ahash(name, 0, 0);
if (IS_ERR(tfm)) {
err = PTR_ERR(tfm);
goto error;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 0fb8bbf41a8d..7f07a5085e9b 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2704,7 +2704,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha1",
.cra_driver_name = "artpec-sha1",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2727,7 +2727,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha256",
.cra_driver_name = "artpec-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2751,7 +2751,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "artpec-hmac-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2777,7 +2777,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "sha384",
.cra_driver_name = "artpec-sha384",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2801,7 +2801,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "artpec-hmac-sha384",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2824,7 +2824,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "sha512",
.cra_driver_name = "artpec-sha512",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2848,7 +2848,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "artpec-hmac-sha512",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2867,8 +2867,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "artpec6-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2888,8 +2887,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "artpec6-ctr-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
@@ -2911,8 +2909,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "artpec6-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2933,8 +2930,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "xts(aes)",
.cra_driver_name = "artpec6-xts-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2964,7 +2960,7 @@ static struct aead_alg aead_algos[] = {
.cra_name = "gcm(aes)",
.cra_driver_name = "artpec-gcm-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 309c67c7012f..2d1f1db9f807 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -3914,8 +3914,7 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-iproc",
.cra_blocksize = MD5_BLOCK_WORDS * 4,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.cipher_info = {
@@ -4649,8 +4648,7 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg)
hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
hash->halg.base.cra_init = ahash_cra_init;
hash->halg.base.cra_exit = generic_cra_exit;
- hash->halg.base.cra_type = &crypto_ahash_type;
- hash->halg.base.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
+ hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
hash->halg.statesize = sizeof(struct spu_hash_export_s);
if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
@@ -4691,7 +4689,7 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg)
aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
INIT_LIST_HEAD(&aead->base.cra_list);
- aead->base.cra_flags |= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
/* setkey set in alg initialization */
aead->setauthsize = aead_setauthsize;
aead->encrypt = aead_encrypt;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 0beb28196e20..43975ab5f09c 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1846,8 +1846,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
- alg->cra_type = &crypto_ahash_type;
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
t_alg->alg_type = template->alg_type;
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
index 31b440757146..b5b4c12179df 100644
--- a/drivers/crypto/caam/sg_sw_qm2.h
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -35,7 +35,7 @@
#ifndef _SG_SW_QM2_H_
#define _SG_SW_QM2_H_
-#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+#include <soc/fsl/dpaa2-fd.h>
static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index e586ffab8358..dbfa9fce33e0 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -12,7 +12,7 @@
#include "ctrl.h"
#include "regs.h"
#include "sg_sw_qm2.h"
-#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+#include <soc/fsl/dpaa2-fd.h>
struct sec4_sg_entry {
u64 ptr;
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index df21d996db7e..600336d169a9 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -351,7 +351,7 @@ static int cvm_enc_dec_init(struct crypto_tfm *tfm)
return 0;
}
-struct crypto_alg algs[] = { {
+static struct crypto_alg algs[] = { {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cvm_enc_ctx),
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4fdc921ba611..ebe267379ac9 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -148,7 +148,7 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
void *vaddr;
dma_addr_t dma;
- vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_ATOMIC | __GFP_ZERO), &dma);
+ vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
if (!vaddr)
return NULL;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 26687f318de6..3c6fe57f91f8 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -399,13 +399,12 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
- base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+ base->cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = AES_BLOCK_SIZE;
base->cra_ctxsize = sizeof(struct ccp_ctx);
base->cra_priority = CCP_CRA_PRIORITY;
- base->cra_type = &crypto_ahash_type;
base->cra_init = ccp_aes_cmac_cra_init;
base->cra_exit = ccp_aes_cmac_cra_exit;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 871c9628a2ee..2ca64bb57d2e 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -497,13 +497,12 @@ static int ccp_register_sha_alg(struct list_head *head,
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
- base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+ base->cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = def->block_size;
base->cra_ctxsize = sizeof(struct ccp_ctx);
base->cra_priority = CCP_CRA_PRIORITY;
- base->cra_type = &crypto_ahash_type;
base->cra_init = ccp_sha_cra_init;
base->cra_exit = ccp_sha_cra_exit;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index ff478d826d7d..218739b961fe 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -62,14 +62,14 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
int reg;
/* Read the interrupt status: */
- status = ioread32(psp->io_regs + PSP_P2CMSG_INTSTS);
+ status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
/* Check if it is command completion: */
- if (!(status & BIT(PSP_CMD_COMPLETE_REG)))
+ if (!(status & PSP_CMD_COMPLETE))
goto done;
/* Check if it is SEV command completion: */
- reg = ioread32(psp->io_regs + PSP_CMDRESP);
+ reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
if (reg & PSP_CMDRESP_RESP) {
psp->sev_int_rcvd = 1;
wake_up(&psp->sev_int_queue);
@@ -77,17 +77,15 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
done:
/* Clear the interrupt status by writing the same value we read. */
- iowrite32(status, psp->io_regs + PSP_P2CMSG_INTSTS);
+ iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
return IRQ_HANDLED;
}
static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
{
- psp->sev_int_rcvd = 0;
-
wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
- *reg = ioread32(psp->io_regs + PSP_CMDRESP);
+ *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
}
static int sev_cmd_buffer_len(int cmd)
@@ -145,13 +143,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
sev_cmd_buffer_len(cmd), false);
- iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
- iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
+ iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg);
+
+ psp->sev_int_rcvd = 0;
reg = cmd;
reg <<= PSP_CMDRESP_CMD_SHIFT;
reg |= PSP_CMDRESP_IOC;
- iowrite32(reg, psp->io_regs + PSP_CMDRESP);
+ iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
/* wait for command completion */
sev_wait_cmd_ioc(psp, &reg);
@@ -789,7 +789,7 @@ static int sev_misc_init(struct psp_device *psp)
static int sev_init(struct psp_device *psp)
{
/* Check if device supports SEV feature */
- if (!(ioread32(psp->io_regs + PSP_FEATURE_REG) & 1)) {
+ if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) {
dev_dbg(psp->dev, "device does not support SEV\n");
return 1;
}
@@ -817,11 +817,11 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
- psp->io_regs = sp->io_map + psp->vdata->offset;
+ psp->io_regs = sp->io_map;
/* Disable and clear interrupts until ready */
- iowrite32(0, psp->io_regs + PSP_P2CMSG_INTEN);
- iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTSTS);
+ iowrite32(0, psp->io_regs + psp->vdata->inten_reg);
+ iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg);
/* Request an irq */
ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp);
@@ -838,7 +838,9 @@ int psp_dev_init(struct sp_device *sp)
sp->set_psp_master_device(sp);
/* Enable interrupt */
- iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTEN);
+ iowrite32(-1, psp->io_regs + psp->vdata->inten_reg);
+
+ dev_notice(dev, "psp enabled\n");
return 0;
@@ -856,6 +858,9 @@ void psp_dev_destroy(struct sp_device *sp)
{
struct psp_device *psp = sp->psp_data;
+ if (!psp)
+ return;
+
if (psp->sev_misc)
kref_put(&misc_dev->refcount, sev_exit);
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index c7e9098a233c..8b53a9674ecb 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -30,24 +30,7 @@
#include "sp-dev.h"
-#define PSP_C2PMSG(_num) ((_num) << 2)
-#define PSP_CMDRESP PSP_C2PMSG(32)
-#define PSP_CMDBUFF_ADDR_LO PSP_C2PMSG(56)
-#define PSP_CMDBUFF_ADDR_HI PSP_C2PMSG(57)
-#define PSP_FEATURE_REG PSP_C2PMSG(63)
-
-#define PSP_P2CMSG(_num) ((_num) << 2)
-#define PSP_CMD_COMPLETE_REG 1
-#define PSP_CMD_COMPLETE PSP_P2CMSG(PSP_CMD_COMPLETE_REG)
-
-#define PSP_P2CMSG_INTEN 0x0110
-#define PSP_P2CMSG_INTSTS 0x0114
-
-#define PSP_C2PMSG_ATTR_0 0x0118
-#define PSP_C2PMSG_ATTR_1 0x011c
-#define PSP_C2PMSG_ATTR_2 0x0120
-#define PSP_C2PMSG_ATTR_3 0x0124
-#define PSP_P2CMSG_ATTR_0 0x0128
+#define PSP_CMD_COMPLETE BIT(1)
#define PSP_CMDRESP_CMD_SHIFT 16
#define PSP_CMDRESP_IOC BIT(0)
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index acb197b66ced..14398cad1625 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -44,7 +44,12 @@ struct ccp_vdata {
};
struct psp_vdata {
- const unsigned int offset;
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
+ const unsigned int feature_reg;
+ const unsigned int inten_reg;
+ const unsigned int intsts_reg;
};
/* Structure to hold SP device data */
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index f5f43c50698a..7da93e9bebed 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -269,38 +269,62 @@ static int sp_pci_resume(struct pci_dev *pdev)
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
-static const struct psp_vdata psp_entry = {
- .offset = 0x10500,
+static const struct psp_vdata pspv1 = {
+ .cmdresp_reg = 0x10580,
+ .cmdbuff_addr_lo_reg = 0x105e0,
+ .cmdbuff_addr_hi_reg = 0x105e4,
+ .feature_reg = 0x105fc,
+ .inten_reg = 0x10610,
+ .intsts_reg = 0x10614,
+};
+
+static const struct psp_vdata pspv2 = {
+ .cmdresp_reg = 0x10980,
+ .cmdbuff_addr_lo_reg = 0x109e0,
+ .cmdbuff_addr_hi_reg = 0x109e4,
+ .feature_reg = 0x109fc,
+ .inten_reg = 0x10690,
+ .intsts_reg = 0x10694,
};
#endif
static const struct sp_dev_vdata dev_vdata[] = {
- {
+ { /* 0 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv3,
#endif
},
- {
+ { /* 1 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv5a,
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
- .psp_vdata = &psp_entry
+ .psp_vdata = &pspv1,
#endif
},
- {
+ { /* 2 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv5b,
#endif
},
+ { /* 3 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv2,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
+ { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 03f4b9fce556..01b82b82f8b8 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -2344,7 +2344,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2364,7 +2363,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2384,7 +2382,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha256),cbc(aes))",
.driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2404,7 +2401,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha256),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2424,7 +2420,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(xcbc(aes),cbc(aes))",
.driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2444,7 +2439,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2464,7 +2458,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2484,7 +2477,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
.driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2504,7 +2496,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "ccm(aes)",
.driver_name = "ccm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_ccm_setauthsize,
@@ -2524,7 +2515,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "rfc4309(ccm(aes))",
.driver_name = "rfc4309-ccm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_rfc4309_ccm_setkey,
.setauthsize = cc_rfc4309_ccm_setauthsize,
@@ -2544,7 +2534,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "gcm(aes)",
.driver_name = "gcm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_gcm_setauthsize,
@@ -2564,7 +2553,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "rfc4106(gcm(aes))",
.driver_name = "rfc4106-gcm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_rfc4106_gcm_setkey,
.setauthsize = cc_rfc4106_gcm_setauthsize,
@@ -2584,7 +2572,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "rfc4543(gcm(aes))",
.driver_name = "rfc4543-gcm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_rfc4543_gcm_setkey,
.setauthsize = cc_rfc4543_gcm_setauthsize,
@@ -2621,8 +2608,7 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
alg->base.cra_priority = CC_CRA_PRIO;
alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- tmpl->type;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = cc_aead_init;
alg->exit = cc_aead_exit;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index b32577477b4c..dd948e1df9e5 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -454,9 +454,7 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
- ivsize,
- req_ctx->is_giv ? DMA_BIDIRECTIONAL :
- DMA_TO_DEVICE);
+ ivsize, DMA_TO_DEVICE);
}
/* Release pool */
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
@@ -498,9 +496,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
- ivsize,
- req_ctx->is_giv ? DMA_BIDIRECTIONAL :
- DMA_TO_DEVICE);
+ ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d2810c183b73..7623b29911af 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -19,8 +19,6 @@
#define template_skcipher template_u.skcipher
-#define CC_MIN_AES_XTS_SIZE 0x10
-#define CC_MAX_AES_XTS_SIZE 0x2000
struct cc_cipher_handle {
struct list_head alg_list;
};
@@ -98,8 +96,7 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p,
case S_DIN_to_AES:
switch (ctx_p->cipher_mode) {
case DRV_CIPHER_XTS:
- if (size >= CC_MIN_AES_XTS_SIZE &&
- size <= CC_MAX_AES_XTS_SIZE &&
+ if (size >= AES_BLOCK_SIZE &&
IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
@@ -593,34 +590,82 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
}
}
+/*
+ * Update a CTR-AES 128 bit counter
+ */
+static void cc_update_ctr(u8 *ctr, unsigned int increment)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ IS_ALIGNED((unsigned long)ctr, 8)) {
+
+ __be64 *high_be = (__be64 *)ctr;
+ __be64 *low_be = high_be + 1;
+ u64 orig_low = __be64_to_cpu(*low_be);
+ u64 new_low = orig_low + (u64)increment;
+
+ *low_be = __cpu_to_be64(new_low);
+
+ if (new_low < orig_low)
+ *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
+ } else {
+ u8 *pos = (ctr + AES_BLOCK_SIZE);
+ u8 val;
+ unsigned int size;
+
+ for (; increment; increment--)
+ for (size = AES_BLOCK_SIZE; size; size--) {
+ val = *--pos + 1;
+ *pos = val;
+ if (val)
+ break;
+ }
+ }
+}
+
static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
struct skcipher_request *req = (struct skcipher_request *)cc_req;
struct scatterlist *dst = req->dst;
struct scatterlist *src = req->src;
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+ unsigned int len;
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
- kzfree(req_ctx->iv);
+ switch (ctx_p->cipher_mode) {
+ case DRV_CIPHER_CBC:
+ /*
+ * The crypto API expects us to set the req->iv to the last
+ * ciphertext block. For encrypt, simply copy from the result.
+ * For decrypt, we must copy from a saved buffer since this
+ * could be an in-place decryption operation and the src is
+ * lost by this point.
+ */
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ memcpy(req->iv, req_ctx->backup_info, ivsize);
+ kzfree(req_ctx->backup_info);
+ } else if (!err) {
+ len = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req->iv, req->dst, len,
+ ivsize, 0);
+ }
+ break;
- /*
- * The crypto API expects us to set the req->iv to the last
- * ciphertext block. For encrypt, simply copy from the result.
- * For decrypt, we must copy from a saved buffer since this
- * could be an in-place decryption operation and the src is
- * lost by this point.
- */
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- memcpy(req->iv, req_ctx->backup_info, ivsize);
- kzfree(req_ctx->backup_info);
- } else if (!err) {
- scatterwalk_map_and_copy(req->iv, req->dst,
- (req->cryptlen - ivsize),
- ivsize, 0);
+ case DRV_CIPHER_CTR:
+ /* Compute the counter of the last block */
+ len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
+ cc_update_ctr((u8 *)req->iv, len);
+ break;
+
+ default:
+ break;
}
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ kzfree(req_ctx->iv);
+
skcipher_request_complete(req, err);
}
@@ -639,7 +684,7 @@ static int cc_cipher_process(struct skcipher_request *req,
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
struct cc_crypto_req cc_req = {};
- int rc, cts_restore_flag = 0;
+ int rc;
unsigned int seq_len = 0;
gfp_t flags = cc_gfp_flags(&req->base);
@@ -671,23 +716,10 @@ static int cc_cipher_process(struct skcipher_request *req,
goto exit_process;
}
- /*For CTS in case of data size aligned to 16 use CBC mode*/
- if (((nbytes % AES_BLOCK_SIZE) == 0) &&
- ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
- ctx_p->cipher_mode = DRV_CIPHER_CBC;
- cts_restore_flag = 1;
- }
-
/* Setup request structure */
cc_req.user_cb = (void *)cc_cipher_complete;
cc_req.user_arg = (void *)req;
-#ifdef ENABLE_CYCLE_COUNT
- cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
- STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
-
-#endif
-
/* Setup request context */
req_ctx->gen_ctx.op_type = direction;
@@ -708,14 +740,6 @@ static int cc_cipher_process(struct skcipher_request *req,
cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
&seq_len);
- /* do we need to generate IV? */
- if (req_ctx->is_giv) {
- cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
- cc_req.ivgen_dma_addr_len = 1;
- /* set the IV size (8/16 B long)*/
- cc_req.ivgen_size = ivsize;
- }
-
/* STAT_PHASE_3: Lock HW and push sequence */
rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
@@ -728,9 +752,6 @@ static int cc_cipher_process(struct skcipher_request *req,
}
exit_process:
- if (cts_restore_flag)
- ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
-
if (rc != -EINPROGRESS && rc != -EBUSY) {
kzfree(req_ctx->backup_info);
kzfree(req_ctx->iv);
@@ -743,8 +764,7 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
{
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- req_ctx->is_giv = false;
- req_ctx->backup_info = NULL;
+ memset(req_ctx, 0, sizeof(*req_ctx));
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
}
@@ -752,21 +772,28 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
static int cc_cipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
gfp_t flags = cc_gfp_flags(&req->base);
+ unsigned int len;
- /*
- * Allocate and save the last IV sized bytes of the source, which will
- * be lost in case of in-place decryption and might be needed for CTS.
- */
- req_ctx->backup_info = kmalloc(ivsize, flags);
- if (!req_ctx->backup_info)
- return -ENOMEM;
+ memset(req_ctx, 0, sizeof(*req_ctx));
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
+
+ /* Allocate and save the last IV sized bytes of the source,
+ * which will be lost in case of in-place decryption.
+ */
+ req_ctx->backup_info = kzalloc(ivsize, flags);
+ if (!req_ctx->backup_info)
+ return -ENOMEM;
- scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
- (req->cryptlen - ivsize), ivsize, 0);
- req_ctx->is_giv = false;
+ len = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
+ ivsize, 0);
+ }
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
@@ -927,7 +954,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(paes)",
.driver_name = "ecb-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -944,7 +970,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(paes)",
.driver_name = "cbc-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -961,7 +986,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ofb(paes)",
.driver_name = "ofb-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -975,10 +999,9 @@ static const struct cc_alg_template skcipher_algs[] = {
.min_hw_rev = CC_HW_REV_712,
},
{
- .name = "cts1(cbc(paes))",
- .driver_name = "cts1-cbc-paes-ccree",
+ .name = "cts(cbc(paes))",
+ .driver_name = "cts-cbc-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -995,7 +1018,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ctr(paes)",
.driver_name = "ctr-paes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -1162,7 +1184,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(aes)",
.driver_name = "ecb-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1179,7 +1200,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(aes)",
.driver_name = "cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1196,7 +1216,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ofb(aes)",
.driver_name = "ofb-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1210,10 +1229,9 @@ static const struct cc_alg_template skcipher_algs[] = {
.min_hw_rev = CC_HW_REV_630,
},
{
- .name = "cts1(cbc(aes))",
- .driver_name = "cts1-cbc-aes-ccree",
+ .name = "cts(cbc(aes))",
+ .driver_name = "cts-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1230,7 +1248,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ctr(aes)",
.driver_name = "ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1247,7 +1264,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(des3_ede)",
.driver_name = "cbc-3des-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1264,7 +1280,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(des3_ede)",
.driver_name = "ecb-3des-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1281,7 +1296,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(des)",
.driver_name = "cbc-des-ccree",
.blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1298,7 +1312,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(des)",
.driver_name = "ecb-des-ccree",
.blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1338,8 +1351,7 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
alg->base.cra_init = cc_cipher_init;
alg->base.cra_exit = cc_cipher_exit;
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_TYPE_SKCIPHER;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
t_alg->cipher_mode = tmpl->cipher_mode;
t_alg->flow_mode = tmpl->flow_mode;
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 68444cfa936b..4dbc0a1e6d5c 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -22,7 +22,6 @@ struct cipher_req_ctx {
u32 out_mlli_nents;
u8 *backup_info; /*store iv for generated IV flow*/
u8 *iv;
- bool is_giv;
struct mlli_params mlli_params;
};
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index bd974fef05e4..1ff229c2aeab 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -131,8 +131,8 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
}
if (irr) {
- dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
- irr);
+ dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
/* Just warning */
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 95f82b2d1e70..d608a4faf662 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -148,7 +148,6 @@ struct cc_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
- u32 type;
union {
struct skcipher_alg skcipher;
struct aead_alg aead;
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 96ff777474d7..b9313306c36f 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -602,66 +602,7 @@ static int cc_hash_update(struct ahash_request *req)
return rc;
}
-static int cc_hash_finup(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- u32 digestsize = crypto_ahash_digestsize(tfm);
- struct scatterlist *src = req->src;
- unsigned int nbytes = req->nbytes;
- u8 *result = req->result;
- struct device *dev = drvdata_to_dev(ctx->drvdata);
- bool is_hmac = ctx->is_hmac;
- struct cc_crypto_req cc_req = {};
- struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
- unsigned int idx = 0;
- int rc;
- gfp_t flags = cc_gfp_flags(&req->base);
-
- dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
- nbytes);
-
- if (cc_map_req(dev, state, ctx)) {
- dev_err(dev, "map_ahash_source() failed\n");
- return -EINVAL;
- }
-
- if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
- flags)) {
- dev_err(dev, "map_ahash_request_final() failed\n");
- cc_unmap_req(dev, state, ctx);
- return -ENOMEM;
- }
- if (cc_map_result(dev, state, digestsize)) {
- dev_err(dev, "map_ahash_digest() failed\n");
- cc_unmap_hash_request(dev, state, src, true);
- cc_unmap_req(dev, state, ctx);
- return -ENOMEM;
- }
-
- /* Setup request structure */
- cc_req.user_cb = cc_hash_complete;
- cc_req.user_arg = req;
-
- idx = cc_restore_hash(desc, ctx, state, idx);
-
- if (is_hmac)
- idx = cc_fin_hmac(desc, req, idx);
-
- idx = cc_fin_result(desc, req, idx);
-
- rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
- if (rc != -EINPROGRESS && rc != -EBUSY) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- cc_unmap_hash_request(dev, state, src, true);
- cc_unmap_result(dev, state, digestsize, result);
- cc_unmap_req(dev, state, ctx);
- }
- return rc;
-}
-
-static int cc_hash_final(struct ahash_request *req)
+static int cc_do_finup(struct ahash_request *req, bool update)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -678,21 +619,20 @@ static int cc_hash_final(struct ahash_request *req)
int rc;
gfp_t flags = cc_gfp_flags(&req->base);
- dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
- nbytes);
+ dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
+ update ? "finup" : "final", nbytes);
if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -EINVAL;
}
- if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
-
if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
cc_unmap_hash_request(dev, state, src, true);
@@ -706,7 +646,7 @@ static int cc_hash_final(struct ahash_request *req)
idx = cc_restore_hash(desc, ctx, state, idx);
- /* "DO-PAD" must be enabled only when writing current length to HW */
+ /* Pad the hash */
hw_desc_init(&desc[idx]);
set_cipher_do(&desc[idx], DO_PAD);
set_cipher_mode(&desc[idx], ctx->hw_mode);
@@ -731,6 +671,17 @@ static int cc_hash_final(struct ahash_request *req)
return rc;
}
+static int cc_hash_finup(struct ahash_request *req)
+{
+ return cc_do_finup(req, true);
+}
+
+
+static int cc_hash_final(struct ahash_request *req)
+{
+ return cc_do_finup(req, false);
+}
+
static int cc_hash_init(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
@@ -1813,9 +1764,7 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
alg->cra_exit = cc_cra_exit;
alg->cra_init = cc_cra_init;
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
- alg->cra_type = &crypto_ahash_type;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
t_crypto_alg->hash_mode = template->hash_mode;
t_crypto_alg->hw_mode = template->hw_mode;
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index b916c4eb608c..5c539af8ed60 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -4203,7 +4203,6 @@ static int chcr_unregister_alg(void)
#define SZ_AHASH_CTX sizeof(struct chcr_context)
#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
-#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
/*
* chcr_register_alg - Register crypto algorithms with kernel framework.
@@ -4237,8 +4236,7 @@ static int chcr_register_alg(void)
break;
case CRYPTO_ALG_TYPE_AEAD:
driver_algs[i].alg.aead.base.cra_flags =
- CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK;
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
driver_algs[i].alg.aead.init = chcr_aead_cra_init;
@@ -4258,10 +4256,9 @@ static int chcr_register_alg(void)
a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
a_hash->halg.base.cra_module = THIS_MODULE;
- a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
+ a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
a_hash->halg.base.cra_alignmask = 0;
a_hash->halg.base.cra_exit = NULL;
- a_hash->halg.base.cra_type = &crypto_ahash_type;
if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
a_hash->halg.base.cra_init = chcr_hmac_cra_init;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 2bb6f0380758..0997e166ea57 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -1673,7 +1673,7 @@ static void chtls_timewait(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
tp->rcv_nxt++;
- tp->rx_opt.ts_recent_stamp = get_seconds();
+ tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
tp->srtt_us = 0;
tcp_time_wait(sk, TCP_TIME_WAIT, 0);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 55d50140f9e5..490960755864 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -97,7 +97,7 @@ static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
{
return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
- val << bit_pos);
+ (u64)val << bit_pos);
}
static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
new file mode 100644
index 000000000000..8ca9c503bcb0
--- /dev/null
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config CRYPTO_DEV_HISI_SEC
+ tristate "Support for Hisilicon SEC crypto block cipher accelerator"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_ALGAPI
+ select SG_SPLIT
+ depends on ARM64 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Support for Hisilicon SEC Engine in Hip06 and Hip07
+
+ To compile this as a module, choose M here: the module
+ will be called hisi_sec.
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
new file mode 100644
index 000000000000..463f46ace182
--- /dev/null
+++ b/drivers/crypto/hisilicon/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
diff --git a/drivers/crypto/hisilicon/sec/Makefile b/drivers/crypto/hisilicon/sec/Makefile
new file mode 100644
index 000000000000..a55b698e0c27
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += hisi_sec.o
+hisi_sec-y = sec_algs.o sec_drv.o
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
new file mode 100644
index 000000000000..f7d6d690116e
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -0,0 +1,1122 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include <crypto/skcipher.h>
+#include <crypto/xts.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sec_drv.h"
+
+#define SEC_MAX_CIPHER_KEY 64
+#define SEC_REQ_LIMIT SZ_32M
+
+struct sec_c_alg_cfg {
+ unsigned c_alg : 3;
+ unsigned c_mode : 3;
+ unsigned key_len : 2;
+ unsigned c_width : 2;
+};
+
+static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = {
+ [SEC_C_DES_ECB_64] = {
+ .c_alg = SEC_C_ALG_DES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_DES,
+ },
+ [SEC_C_DES_CBC_64] = {
+ .c_alg = SEC_C_ALG_DES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_DES,
+ },
+ [SEC_C_3DES_ECB_192_3KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_3DES_3_KEY,
+ },
+ [SEC_C_3DES_ECB_192_2KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_3DES_2_KEY,
+ },
+ [SEC_C_3DES_CBC_192_3KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_3DES_3_KEY,
+ },
+ [SEC_C_3DES_CBC_192_2KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_3DES_2_KEY,
+ },
+ [SEC_C_AES_ECB_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_ECB_192] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_AES_192,
+ },
+ [SEC_C_AES_ECB_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_AES_CBC_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_CBC_192] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_AES_192,
+ },
+ [SEC_C_AES_CBC_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_AES_CTR_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CTR,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_CTR_192] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CTR,
+ .key_len = SEC_KEY_LEN_AES_192,
+ },
+ [SEC_C_AES_CTR_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CTR,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_AES_XTS_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_XTS,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_XTS_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_XTS,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_NULL] = {
+ },
+};
+
+/*
+ * Mutex used to ensure safe operation of reference count of
+ * alg providers
+ */
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
+ struct sec_bd_info *req,
+ enum sec_cipher_alg alg)
+{
+ const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
+
+ memset(req, 0, sizeof(*req));
+ req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
+ req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
+ req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
+ req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
+
+ req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
+ req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
+}
+
+static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
+ const u8 *key,
+ unsigned int keylen,
+ enum sec_cipher_alg alg)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
+ struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->cipher_alg = alg;
+ memcpy(ctx->key, key, keylen);
+ sec_alg_skcipher_init_template(ctx, &ctx->req_template,
+ ctx->cipher_alg);
+}
+
+static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
+ dma_addr_t *psec_sgl,
+ struct scatterlist *sgl,
+ int count,
+ struct sec_dev_info *info)
+{
+ struct sec_hw_sgl *sgl_current = NULL;
+ struct sec_hw_sgl *sgl_next;
+ dma_addr_t sgl_next_dma;
+ struct scatterlist *sg;
+ int ret, sge_index, i;
+
+ if (!count)
+ return -EINVAL;
+
+ for_each_sg(sgl, sg, count, i) {
+ sge_index = i % SEC_MAX_SGE_NUM;
+ if (sge_index == 0) {
+ sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
+ GFP_KERNEL, &sgl_next_dma);
+ if (!sgl_next) {
+ ret = -ENOMEM;
+ goto err_free_hw_sgls;
+ }
+
+ if (!sgl_current) { /* First one */
+ *psec_sgl = sgl_next_dma;
+ *sec_sgl = sgl_next;
+ } else { /* Chained */
+ sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
+ sgl_current->next_sgl = sgl_next_dma;
+ sgl_current->next = sgl_next;
+ }
+ sgl_current = sgl_next;
+ }
+ sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
+ sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
+ sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
+ }
+ sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
+ sgl_current->next_sgl = 0;
+ (*sec_sgl)->entry_sum_in_chain = count;
+
+ return 0;
+
+err_free_hw_sgls:
+ sgl_current = *sec_sgl;
+ while (sgl_current) {
+ sgl_next = sgl_current->next;
+ dma_pool_free(info->hw_sgl_pool, sgl_current,
+ sgl_current->next_sgl);
+ sgl_current = sgl_next;
+ }
+ *psec_sgl = 0;
+
+ return ret;
+}
+
+static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
+ dma_addr_t psec_sgl, struct sec_dev_info *info)
+{
+ struct sec_hw_sgl *sgl_current, *sgl_next;
+
+ if (!hw_sgl)
+ return;
+ sgl_current = hw_sgl;
+ while (sgl_current->next) {
+ sgl_next = sgl_current->next;
+ dma_pool_free(info->hw_sgl_pool, sgl_current,
+ sgl_current->next_sgl);
+ sgl_current = sgl_next;
+ }
+ dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
+}
+
+static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen,
+ enum sec_cipher_alg alg)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct device *dev = ctx->queue->dev_info->dev;
+
+ mutex_lock(&ctx->lock);
+ if (ctx->key) {
+ /* rekeying */
+ memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
+ } else {
+ /* new key */
+ ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
+ &ctx->pkey, GFP_KERNEL);
+ if (!ctx->key) {
+ mutex_unlock(&ctx->lock);
+ return -ENOMEM;
+ }
+ }
+ mutex_unlock(&ctx->lock);
+ sec_alg_skcipher_init_context(tfm, key, keylen, alg);
+
+ return 0;
+}
+
+static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ alg = SEC_C_AES_ECB_128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = SEC_C_AES_ECB_192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = SEC_C_AES_ECB_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ alg = SEC_C_AES_CBC_128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = SEC_C_AES_CBC_192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = SEC_C_AES_CBC_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ alg = SEC_C_AES_CTR_128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = SEC_C_AES_CTR_192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = SEC_C_AES_CTR_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+ int ret;
+
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128 * 2:
+ alg = SEC_C_AES_XTS_128;
+ break;
+ case AES_KEYSIZE_256 * 2:
+ alg = SEC_C_AES_XTS_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
+}
+
+static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
+}
+
+static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE * 3)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen,
+ SEC_C_3DES_ECB_192_3KEY);
+}
+
+static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen,
+ SEC_C_3DES_CBC_192_3KEY);
+}
+
+static void sec_alg_free_el(struct sec_request_el *el,
+ struct sec_dev_info *info)
+{
+ sec_free_hw_sgl(el->out, el->dma_out, info);
+ sec_free_hw_sgl(el->in, el->dma_in, info);
+ kfree(el->sgl_in);
+ kfree(el->sgl_out);
+ kfree(el);
+}
+
+/* queuelock must be held */
+static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
+{
+ struct sec_request_el *el, *temp;
+ int ret = 0;
+
+ mutex_lock(&sec_req->lock);
+ list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
+ /*
+ * Add to hardware queue only under following circumstances
+ * 1) Software and hardware queue empty so no chain dependencies
+ * 2) No dependencies as new IV - (check software queue empty
+ * to maintain order)
+ * 3) No dependencies because the mode does no chaining.
+ *
+ * In other cases first insert onto the software queue which
+ * is then emptied as requests complete
+ */
+ if (!queue->havesoftqueue ||
+ (kfifo_is_empty(&queue->softqueue) &&
+ sec_queue_empty(queue))) {
+ ret = sec_queue_send(queue, &el->req, sec_req);
+ if (ret == -EAGAIN) {
+ /* Wait unti we can send then try again */
+ /* DEAD if here - should not happen */
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+ } else {
+ kfifo_put(&queue->softqueue, el);
+ }
+ }
+err_unlock:
+ mutex_unlock(&sec_req->lock);
+
+ return ret;
+}
+
+static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
+ struct crypto_async_request *req_base)
+{
+ struct skcipher_request *skreq = container_of(req_base,
+ struct skcipher_request,
+ base);
+ struct sec_request *sec_req = skcipher_request_ctx(skreq);
+ struct sec_request *backlog_req;
+ struct sec_request_el *sec_req_el, *nextrequest;
+ struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
+ struct device *dev = ctx->queue->dev_info->dev;
+ int icv_or_skey_en, ret;
+ bool done;
+
+ sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
+ head);
+ icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
+ SEC_BD_W0_ICV_OR_SKEY_EN_S;
+ if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
+ dev_err(dev, "Got an invalid answer %lu %d\n",
+ sec_resp->w1 & SEC_BD_W1_BD_INVALID,
+ icv_or_skey_en);
+ sec_req->err = -EINVAL;
+ /*
+ * We need to muddle on to avoid getting stuck with elements
+ * on the queue. Error will be reported so requester so
+ * it should be able to handle appropriately.
+ */
+ }
+
+ mutex_lock(&ctx->queue->queuelock);
+ /* Put the IV in place for chained cases */
+ switch (ctx->cipher_alg) {
+ case SEC_C_AES_CBC_128:
+ case SEC_C_AES_CBC_192:
+ case SEC_C_AES_CBC_256:
+ if (sec_req_el->req.w0 & SEC_BD_W0_DE)
+ sg_pcopy_to_buffer(sec_req_el->sgl_out,
+ sg_nents(sec_req_el->sgl_out),
+ skreq->iv,
+ crypto_skcipher_ivsize(atfm),
+ sec_req_el->el_length -
+ crypto_skcipher_ivsize(atfm));
+ else
+ sg_pcopy_to_buffer(sec_req_el->sgl_in,
+ sg_nents(sec_req_el->sgl_in),
+ skreq->iv,
+ crypto_skcipher_ivsize(atfm),
+ sec_req_el->el_length -
+ crypto_skcipher_ivsize(atfm));
+ /* No need to sync to the device as coherent DMA */
+ break;
+ case SEC_C_AES_CTR_128:
+ case SEC_C_AES_CTR_192:
+ case SEC_C_AES_CTR_256:
+ crypto_inc(skreq->iv, 16);
+ break;
+ default:
+ /* Do not update */
+ break;
+ }
+
+ if (ctx->queue->havesoftqueue &&
+ !kfifo_is_empty(&ctx->queue->softqueue) &&
+ sec_queue_empty(ctx->queue)) {
+ ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
+ if (ret <= 0)
+ dev_err(dev,
+ "Error getting next element from kfifo %d\n",
+ ret);
+ else
+ /* We know there is space so this cannot fail */
+ sec_queue_send(ctx->queue, &nextrequest->req,
+ nextrequest->sec_req);
+ } else if (!list_empty(&ctx->backlog)) {
+ /* Need to verify there is room first */
+ backlog_req = list_first_entry(&ctx->backlog,
+ typeof(*backlog_req),
+ backlog_head);
+ if (sec_queue_can_enqueue(ctx->queue,
+ backlog_req->num_elements) ||
+ (ctx->queue->havesoftqueue &&
+ kfifo_avail(&ctx->queue->softqueue) >
+ backlog_req->num_elements)) {
+ sec_send_request(backlog_req, ctx->queue);
+ backlog_req->req_base->complete(backlog_req->req_base,
+ -EINPROGRESS);
+ list_del(&backlog_req->backlog_head);
+ }
+ }
+ mutex_unlock(&ctx->queue->queuelock);
+
+ mutex_lock(&sec_req->lock);
+ list_del(&sec_req_el->head);
+ mutex_unlock(&sec_req->lock);
+ sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
+
+ /*
+ * Request is done.
+ * The dance is needed as the lock is freed in the completion
+ */
+ mutex_lock(&sec_req->lock);
+ done = list_empty(&sec_req->elements);
+ mutex_unlock(&sec_req->lock);
+ if (done) {
+ if (crypto_skcipher_ivsize(atfm)) {
+ dma_unmap_single(dev, sec_req->dma_iv,
+ crypto_skcipher_ivsize(atfm),
+ DMA_TO_DEVICE);
+ }
+ dma_unmap_sg(dev, skreq->src, sec_req->len_in,
+ DMA_BIDIRECTIONAL);
+ if (skreq->src != skreq->dst)
+ dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
+ DMA_BIDIRECTIONAL);
+ skreq->base.complete(&skreq->base, sec_req->err);
+ }
+}
+
+void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
+{
+ struct sec_request *sec_req = shadow;
+
+ sec_req->cb(resp, sec_req->req_base);
+}
+
+static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
+ int *steps)
+{
+ size_t *sizes;
+ int i;
+
+ /* Split into suitable sized blocks */
+ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
+ sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
+ if (!sizes)
+ return -ENOMEM;
+
+ for (i = 0; i < *steps - 1; i++)
+ sizes[i] = SEC_REQ_LIMIT;
+ sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
+ *split_sizes = sizes;
+
+ return 0;
+}
+
+static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
+ int steps, struct scatterlist ***splits,
+ int **splits_nents,
+ int sgl_len_in,
+ struct device *dev)
+{
+ int ret, count;
+
+ count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+ if (!count)
+ return -EINVAL;
+
+ *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
+ if (!*splits) {
+ ret = -ENOMEM;
+ goto err_unmap_sg;
+ }
+ *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
+ if (!*splits_nents) {
+ ret = -ENOMEM;
+ goto err_free_splits;
+ }
+
+ /* output the scatter list before and after this */
+ ret = sg_split(sgl, count, 0, steps, split_sizes,
+ *splits, *splits_nents, GFP_KERNEL);
+ if (ret) {
+ ret = -ENOMEM;
+ goto err_free_splits_nents;
+ }
+
+ return 0;
+
+err_free_splits_nents:
+ kfree(*splits_nents);
+err_free_splits:
+ kfree(*splits);
+err_unmap_sg:
+ dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+
+ return ret;
+}
+
+/*
+ * Reverses the sec_map_and_split_sg call for messages not yet added to
+ * the queues.
+ */
+static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
+ struct scatterlist **splits, int *splits_nents,
+ int sgl_len_in, struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < steps; i++)
+ kfree(splits[i]);
+ kfree(splits_nents);
+ kfree(splits);
+
+ dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+}
+
+static struct sec_request_el
+*sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
+ int el_size, bool different_dest,
+ struct scatterlist *sgl_in, int n_ents_in,
+ struct scatterlist *sgl_out, int n_ents_out,
+ struct sec_dev_info *info)
+{
+ struct sec_request_el *el;
+ struct sec_bd_info *req;
+ int ret;
+
+ el = kzalloc(sizeof(*el), GFP_KERNEL);
+ if (!el)
+ return ERR_PTR(-ENOMEM);
+ el->el_length = el_size;
+ req = &el->req;
+ memcpy(req, template, sizeof(*req));
+
+ req->w0 &= ~SEC_BD_W0_CIPHER_M;
+ if (encrypt)
+ req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
+ else
+ req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
+
+ req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
+ req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
+ SEC_BD_W0_C_GRAN_SIZE_19_16_M;
+
+ req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
+ req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
+ SEC_BD_W0_C_GRAN_SIZE_21_20_M;
+
+ /* Writing whole u32 so no need to take care of masking */
+ req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
+ ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
+ SEC_BD_W2_C_GRAN_SIZE_15_0_M);
+
+ req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
+ req->w1 |= SEC_BD_W1_ADDR_TYPE;
+
+ el->sgl_in = sgl_in;
+
+ ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
+ n_ents_in, info);
+ if (ret)
+ goto err_free_el;
+
+ req->data_addr_lo = lower_32_bits(el->dma_in);
+ req->data_addr_hi = upper_32_bits(el->dma_in);
+
+ if (different_dest) {
+ el->sgl_out = sgl_out;
+ ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
+ el->sgl_out,
+ n_ents_out, info);
+ if (ret)
+ goto err_free_hw_sgl_in;
+
+ req->w0 |= SEC_BD_W0_DE;
+ req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
+ req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
+
+ } else {
+ req->w0 &= ~SEC_BD_W0_DE;
+ req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
+ req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
+ }
+
+ return el;
+
+err_free_hw_sgl_in:
+ sec_free_hw_sgl(el->in, el->dma_in, info);
+err_free_el:
+ kfree(el);
+
+ return ERR_PTR(ret);
+}
+
+static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
+ bool encrypt)
+{
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
+ struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sec_queue *queue = ctx->queue;
+ struct sec_request *sec_req = skcipher_request_ctx(skreq);
+ struct sec_dev_info *info = queue->dev_info;
+ int i, ret, steps;
+ size_t *split_sizes;
+ struct scatterlist **splits_in;
+ struct scatterlist **splits_out = NULL;
+ int *splits_in_nents;
+ int *splits_out_nents = NULL;
+ struct sec_request_el *el, *temp;
+
+ mutex_init(&sec_req->lock);
+ sec_req->req_base = &skreq->base;
+ sec_req->err = 0;
+ /* SGL mapping out here to allow us to break it up as necessary */
+ sec_req->len_in = sg_nents(skreq->src);
+
+ ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
+ &steps);
+ if (ret)
+ return ret;
+ sec_req->num_elements = steps;
+ ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
+ &splits_in_nents, sec_req->len_in,
+ info->dev);
+ if (ret)
+ goto err_free_split_sizes;
+
+ if (skreq->src != skreq->dst) {
+ sec_req->len_out = sg_nents(skreq->dst);
+ ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
+ &splits_out, &splits_out_nents,
+ sec_req->len_out, info->dev);
+ if (ret)
+ goto err_unmap_in_sg;
+ }
+ /* Shared info stored in seq_req - applies to all BDs */
+ sec_req->tfm_ctx = ctx;
+ sec_req->cb = sec_skcipher_alg_callback;
+ INIT_LIST_HEAD(&sec_req->elements);
+
+ /*
+ * Future optimization.
+ * In the chaining case we can't use a dma pool bounce buffer
+ * but in the case where we know there is no chaining we can
+ */
+ if (crypto_skcipher_ivsize(atfm)) {
+ sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
+ crypto_skcipher_ivsize(atfm),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
+ ret = -ENOMEM;
+ goto err_unmap_out_sg;
+ }
+ }
+
+ /* Set them all up then queue - cleaner error handling. */
+ for (i = 0; i < steps; i++) {
+ el = sec_alg_alloc_and_fill_el(&ctx->req_template,
+ encrypt ? 1 : 0,
+ split_sizes[i],
+ skreq->src != skreq->dst,
+ splits_in[i], splits_in_nents[i],
+ splits_out[i],
+ splits_out_nents[i], info);
+ if (IS_ERR(el)) {
+ ret = PTR_ERR(el);
+ goto err_free_elements;
+ }
+ el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
+ el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
+ el->sec_req = sec_req;
+ list_add_tail(&el->head, &sec_req->elements);
+ }
+
+ /*
+ * Only attempt to queue if the whole lot can fit in the queue -
+ * we can't successfully cleanup after a partial queing so this
+ * must succeed or fail atomically.
+ *
+ * Big hammer test of both software and hardware queues - could be
+ * more refined but this is unlikely to happen so no need.
+ */
+
+ /* Cleanup - all elements in pointer arrays have been coppied */
+ kfree(splits_in_nents);
+ kfree(splits_in);
+ kfree(splits_out_nents);
+ kfree(splits_out);
+ kfree(split_sizes);
+
+ /* Grab a big lock for a long time to avoid concurrency issues */
+ mutex_lock(&queue->queuelock);
+
+ /*
+ * Can go on to queue if we have space in either:
+ * 1) The hardware queue and no software queue
+ * 2) The software queue
+ * AND there is nothing in the backlog. If there is backlog we
+ * have to only queue to the backlog queue and return busy.
+ */
+ if ((!sec_queue_can_enqueue(queue, steps) &&
+ (!queue->havesoftqueue ||
+ kfifo_avail(&queue->softqueue) > steps)) ||
+ !list_empty(&ctx->backlog)) {
+ if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ list_add_tail(&sec_req->backlog_head, &ctx->backlog);
+ mutex_unlock(&queue->queuelock);
+ return -EBUSY;
+ }
+
+ ret = -EBUSY;
+ mutex_unlock(&queue->queuelock);
+ goto err_free_elements;
+ }
+ ret = sec_send_request(sec_req, queue);
+ mutex_unlock(&queue->queuelock);
+ if (ret)
+ goto err_free_elements;
+
+ return -EINPROGRESS;
+
+err_free_elements:
+ list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
+ list_del(&el->head);
+ sec_alg_free_el(el, info);
+ }
+ if (crypto_skcipher_ivsize(atfm))
+ dma_unmap_single(info->dev, sec_req->dma_iv,
+ crypto_skcipher_ivsize(atfm),
+ DMA_BIDIRECTIONAL);
+err_unmap_out_sg:
+ if (skreq->src != skreq->dst)
+ sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
+ splits_out_nents, sec_req->len_out,
+ info->dev);
+err_unmap_in_sg:
+ sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
+ sec_req->len_in, info->dev);
+err_free_split_sizes:
+ kfree(split_sizes);
+
+ return ret;
+}
+
+static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
+{
+ return sec_alg_skcipher_crypto(req, true);
+}
+
+static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
+{
+ return sec_alg_skcipher_crypto(req, false);
+}
+
+static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ mutex_init(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->backlog);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
+
+ ctx->queue = sec_queue_alloc_start_safe();
+ if (IS_ERR(ctx->queue))
+ return PTR_ERR(ctx->queue);
+
+ mutex_init(&ctx->queue->queuelock);
+ ctx->queue->havesoftqueue = false;
+
+ return 0;
+}
+
+static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct device *dev = ctx->queue->dev_info->dev;
+
+ if (ctx->key) {
+ memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
+ dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
+ ctx->pkey);
+ }
+ sec_queue_stop_release(ctx->queue);
+}
+
+static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ret = sec_alg_skcipher_init(tfm);
+ if (ret)
+ return ret;
+
+ INIT_KFIFO(ctx->queue->softqueue);
+ ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
+ if (ret) {
+ sec_alg_skcipher_exit(tfm);
+ return ret;
+ }
+ ctx->queue->havesoftqueue = true;
+
+ return 0;
+}
+
+static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ kfifo_free(&ctx->queue->softqueue);
+ sec_alg_skcipher_exit(tfm);
+}
+
+static struct skcipher_alg sec_algs[] = {
+ {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "hisi_sec_aes_ecb",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_aes_ecb,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
+ }, {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "hisi_sec_aes_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_aes_cbc,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "hisi_sec_aes_ctr",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_aes_ctr,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "hisi_sec_aes_xts",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_aes_xts,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ /* Unable to find any test vectors so untested */
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "hisi_sec_des_ecb",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_des_ecb,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
+ }, {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "hisi_sec_des_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_des_cbc,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "hisi_sec_3des_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_3des_cbc,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "hisi_sec_3des_ecb",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_3des_ecb,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ }
+};
+
+int sec_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs != 1)
+ goto unlock;
+
+ ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+ if (ret)
+ --active_devs;
+unlock:
+ mutex_unlock(&algs_lock);
+
+ return ret;
+}
+
+void sec_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs != 0)
+ goto unlock;
+ crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+
+unlock:
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
new file mode 100644
index 000000000000..c1ee4e7bf996
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -0,0 +1,1323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Hisilicon SEC units found on Hip06 Hip07
+ *
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ */
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "sec_drv.h"
+
+#define SEC_QUEUE_AR_FROCE_ALLOC 0
+#define SEC_QUEUE_AR_FROCE_NOALLOC 1
+#define SEC_QUEUE_AR_FROCE_DIS 2
+
+#define SEC_QUEUE_AW_FROCE_ALLOC 0
+#define SEC_QUEUE_AW_FROCE_NOALLOC 1
+#define SEC_QUEUE_AW_FROCE_DIS 2
+
+/* SEC_ALGSUB registers */
+#define SEC_ALGSUB_CLK_EN_REG 0x03b8
+#define SEC_ALGSUB_CLK_DIS_REG 0x03bc
+#define SEC_ALGSUB_CLK_ST_REG 0x535c
+#define SEC_ALGSUB_RST_REQ_REG 0x0aa8
+#define SEC_ALGSUB_RST_DREQ_REG 0x0aac
+#define SEC_ALGSUB_RST_ST_REG 0x5a54
+#define SEC_ALGSUB_RST_ST_IS_RST BIT(0)
+
+#define SEC_ALGSUB_BUILD_RST_REQ_REG 0x0ab8
+#define SEC_ALGSUB_BUILD_RST_DREQ_REG 0x0abc
+#define SEC_ALGSUB_BUILD_RST_ST_REG 0x5a5c
+#define SEC_ALGSUB_BUILD_RST_ST_IS_RST BIT(0)
+
+#define SEC_SAA_BASE 0x00001000UL
+
+/* SEC_SAA registers */
+#define SEC_SAA_CTRL_REG(x) ((x) * SEC_SAA_ADDR_SIZE)
+#define SEC_SAA_CTRL_GET_QM_EN BIT(0)
+
+#define SEC_ST_INTMSK1_REG 0x0200
+#define SEC_ST_RINT1_REG 0x0400
+#define SEC_ST_INTSTS1_REG 0x0600
+#define SEC_BD_MNG_STAT_REG 0x0800
+#define SEC_PARSING_STAT_REG 0x0804
+#define SEC_LOAD_TIME_OUT_CNT_REG 0x0808
+#define SEC_CORE_WORK_TIME_OUT_CNT_REG 0x080c
+#define SEC_BACK_TIME_OUT_CNT_REG 0x0810
+#define SEC_BD1_PARSING_RD_TIME_OUT_CNT_REG 0x0814
+#define SEC_BD1_PARSING_WR_TIME_OUT_CNT_REG 0x0818
+#define SEC_BD2_PARSING_RD_TIME_OUT_CNT_REG 0x081c
+#define SEC_BD2_PARSING_WR_TIME_OUT_CNT_REG 0x0820
+#define SEC_SAA_ACC_REG 0x083c
+#define SEC_BD_NUM_CNT_IN_SEC_REG 0x0858
+#define SEC_LOAD_WORK_TIME_CNT_REG 0x0860
+#define SEC_CORE_WORK_WORK_TIME_CNT_REG 0x0864
+#define SEC_BACK_WORK_TIME_CNT_REG 0x0868
+#define SEC_SAA_IDLE_TIME_CNT_REG 0x086c
+#define SEC_SAA_CLK_CNT_REG 0x0870
+
+/* SEC_COMMON registers */
+#define SEC_CLK_EN_REG 0x0000
+#define SEC_CTRL_REG 0x0004
+
+#define SEC_COMMON_CNT_CLR_CE_REG 0x0008
+#define SEC_COMMON_CNT_CLR_CE_CLEAR BIT(0)
+#define SEC_COMMON_CNT_CLR_CE_SNAP_EN BIT(1)
+
+#define SEC_SECURE_CTRL_REG 0x000c
+#define SEC_AXI_CACHE_CFG_REG 0x0010
+#define SEC_AXI_QOS_CFG_REG 0x0014
+#define SEC_IPV4_MASK_TABLE_REG 0x0020
+#define SEC_IPV6_MASK_TABLE_X_REG(x) (0x0024 + (x) * 4)
+#define SEC_FSM_MAX_CNT_REG 0x0064
+
+#define SEC_CTRL2_REG 0x0068
+#define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M GENMASK(3, 0)
+#define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S 0
+#define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M GENMASK(6, 4)
+#define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S 4
+#define SEC_CTRL2_CLK_GATE_EN BIT(7)
+#define SEC_CTRL2_ENDIAN_BD BIT(8)
+#define SEC_CTRL2_ENDIAN_BD_TYPE BIT(9)
+
+#define SEC_CNT_PRECISION_CFG_REG 0x006c
+#define SEC_DEBUG_BD_CFG_REG 0x0070
+#define SEC_DEBUG_BD_CFG_WB_NORMAL BIT(0)
+#define SEC_DEBUG_BD_CFG_WB_EN BIT(1)
+
+#define SEC_Q_SIGHT_SEL 0x0074
+#define SEC_Q_SIGHT_HIS_CLR 0x0078
+#define SEC_Q_VMID_CFG_REG(q) (0x0100 + (q) * 4)
+#define SEC_Q_WEIGHT_CFG_REG(q) (0x200 + (q) * 4)
+#define SEC_STAT_CLR_REG 0x0a00
+#define SEC_SAA_IDLE_CNT_CLR_REG 0x0a04
+#define SEC_QM_CPL_Q_IDBUF_DFX_CFG_REG 0x0b00
+#define SEC_QM_CPL_Q_IDBUF_DFX_RESULT_REG 0x0b04
+#define SEC_QM_BD_DFX_CFG_REG 0x0b08
+#define SEC_QM_BD_DFX_RESULT_REG 0x0b0c
+#define SEC_QM_BDID_DFX_RESULT_REG 0x0b10
+#define SEC_QM_BD_DFIFO_STATUS_REG 0x0b14
+#define SEC_QM_BD_DFX_CFG2_REG 0x0b1c
+#define SEC_QM_BD_DFX_RESULT2_REG 0x0b20
+#define SEC_QM_BD_IDFIFO_STATUS_REG 0x0b18
+#define SEC_QM_BD_DFIFO_STATUS2_REG 0x0b28
+#define SEC_QM_BD_IDFIFO_STATUS2_REG 0x0b2c
+
+#define SEC_HASH_IPV4_MASK 0xfff00000
+#define SEC_MAX_SAA_NUM 0xa
+#define SEC_SAA_ADDR_SIZE 0x1000
+
+#define SEC_Q_INIT_REG 0x0
+#define SEC_Q_INIT_WO_STAT_CLEAR 0x2
+#define SEC_Q_INIT_AND_STAT_CLEAR 0x3
+
+#define SEC_Q_CFG_REG 0x8
+#define SEC_Q_CFG_REORDER BIT(0)
+
+#define SEC_Q_PROC_NUM_CFG_REG 0x10
+#define SEC_QUEUE_ENB_REG 0x18
+
+#define SEC_Q_DEPTH_CFG_REG 0x50
+#define SEC_Q_DEPTH_CFG_DEPTH_M GENMASK(11, 0)
+#define SEC_Q_DEPTH_CFG_DEPTH_S 0
+
+#define SEC_Q_BASE_HADDR_REG 0x54
+#define SEC_Q_BASE_LADDR_REG 0x58
+#define SEC_Q_WR_PTR_REG 0x5c
+#define SEC_Q_OUTORDER_BASE_HADDR_REG 0x60
+#define SEC_Q_OUTORDER_BASE_LADDR_REG 0x64
+#define SEC_Q_OUTORDER_RD_PTR_REG 0x68
+#define SEC_Q_OT_TH_REG 0x6c
+
+#define SEC_Q_ARUSER_CFG_REG 0x70
+#define SEC_Q_ARUSER_CFG_FA BIT(0)
+#define SEC_Q_ARUSER_CFG_FNA BIT(1)
+#define SEC_Q_ARUSER_CFG_RINVLD BIT(2)
+#define SEC_Q_ARUSER_CFG_PKG BIT(3)
+
+#define SEC_Q_AWUSER_CFG_REG 0x74
+#define SEC_Q_AWUSER_CFG_FA BIT(0)
+#define SEC_Q_AWUSER_CFG_FNA BIT(1)
+#define SEC_Q_AWUSER_CFG_PKG BIT(2)
+
+#define SEC_Q_ERR_BASE_HADDR_REG 0x7c
+#define SEC_Q_ERR_BASE_LADDR_REG 0x80
+#define SEC_Q_CFG_VF_NUM_REG 0x84
+#define SEC_Q_SOFT_PROC_PTR_REG 0x88
+#define SEC_Q_FAIL_INT_MSK_REG 0x300
+#define SEC_Q_FLOW_INT_MKS_REG 0x304
+#define SEC_Q_FAIL_RINT_REG 0x400
+#define SEC_Q_FLOW_RINT_REG 0x404
+#define SEC_Q_FAIL_INT_STATUS_REG 0x500
+#define SEC_Q_FLOW_INT_STATUS_REG 0x504
+#define SEC_Q_STATUS_REG 0x600
+#define SEC_Q_RD_PTR_REG 0x604
+#define SEC_Q_PRO_PTR_REG 0x608
+#define SEC_Q_OUTORDER_WR_PTR_REG 0x60c
+#define SEC_Q_OT_CNT_STATUS_REG 0x610
+#define SEC_Q_INORDER_BD_NUM_ST_REG 0x650
+#define SEC_Q_INORDER_GET_FLAG_ST_REG 0x654
+#define SEC_Q_INORDER_ADD_FLAG_ST_REG 0x658
+#define SEC_Q_INORDER_TASK_INT_NUM_LEFT_ST_REG 0x65c
+#define SEC_Q_RD_DONE_PTR_REG 0x660
+#define SEC_Q_CPL_Q_BD_NUM_ST_REG 0x700
+#define SEC_Q_CPL_Q_PTR_ST_REG 0x704
+#define SEC_Q_CPL_Q_H_ADDR_ST_REG 0x708
+#define SEC_Q_CPL_Q_L_ADDR_ST_REG 0x70c
+#define SEC_Q_CPL_TASK_INT_NUM_LEFT_ST_REG 0x710
+#define SEC_Q_WRR_ID_CHECK_REG 0x714
+#define SEC_Q_CPLQ_FULL_CHECK_REG 0x718
+#define SEC_Q_SUCCESS_BD_CNT_REG 0x800
+#define SEC_Q_FAIL_BD_CNT_REG 0x804
+#define SEC_Q_GET_BD_CNT_REG 0x808
+#define SEC_Q_IVLD_CNT_REG 0x80c
+#define SEC_Q_BD_PROC_GET_CNT_REG 0x810
+#define SEC_Q_BD_PROC_DONE_CNT_REG 0x814
+#define SEC_Q_LAT_CLR_REG 0x850
+#define SEC_Q_PKT_LAT_MAX_REG 0x854
+#define SEC_Q_PKT_LAT_AVG_REG 0x858
+#define SEC_Q_PKT_LAT_MIN_REG 0x85c
+#define SEC_Q_ID_CLR_CFG_REG 0x900
+#define SEC_Q_1ST_BD_ERR_ID_REG 0x904
+#define SEC_Q_1ST_AUTH_FAIL_ID_REG 0x908
+#define SEC_Q_1ST_RD_ERR_ID_REG 0x90c
+#define SEC_Q_1ST_ECC2_ERR_ID_REG 0x910
+#define SEC_Q_1ST_IVLD_ID_REG 0x914
+#define SEC_Q_1ST_BD_WR_ERR_ID_REG 0x918
+#define SEC_Q_1ST_ERR_BD_WR_ERR_ID_REG 0x91c
+#define SEC_Q_1ST_BD_MAC_WR_ERR_ID_REG 0x920
+
+struct sec_debug_bd_info {
+#define SEC_DEBUG_BD_INFO_SOFT_ERR_CHECK_M GENMASK(22, 0)
+ u32 soft_err_check;
+#define SEC_DEBUG_BD_INFO_HARD_ERR_CHECK_M GENMASK(9, 0)
+ u32 hard_err_check;
+ u32 icv_mac1st_word;
+#define SEC_DEBUG_BD_INFO_GET_ID_M GENMASK(19, 0)
+ u32 sec_get_id;
+ /* W4---W15 */
+ u32 reserv_left[12];
+};
+
+struct sec_out_bd_info {
+#define SEC_OUT_BD_INFO_Q_ID_M GENMASK(11, 0)
+#define SEC_OUT_BD_INFO_ECC_2BIT_ERR BIT(14)
+ u16 data;
+};
+
+#define SEC_MAX_DEVICES 8
+static struct sec_dev_info *sec_devices[SEC_MAX_DEVICES];
+static DEFINE_MUTEX(sec_id_lock);
+
+static int sec_queue_map_io(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+ struct resource *res;
+
+ res = platform_get_resource(to_platform_device(dev),
+ IORESOURCE_MEM,
+ 2 + queue->queue_id);
+ if (!res) {
+ dev_err(dev, "Failed to get queue %d memory resource\n",
+ queue->queue_id);
+ return -ENOMEM;
+ }
+ queue->regs = ioremap(res->start, resource_size(res));
+ if (!queue->regs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_queue_unmap_io(struct sec_queue *queue)
+{
+ iounmap(queue->regs);
+}
+
+static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg)
+{
+ void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (ar_pkg)
+ regval |= SEC_Q_ARUSER_CFG_PKG;
+ else
+ regval &= ~SEC_Q_ARUSER_CFG_PKG;
+ writel_relaxed(regval, addr);
+
+ return 0;
+}
+
+static int sec_queue_aw_pkgattr(struct sec_queue *queue, u32 aw_pkg)
+{
+ void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval |= SEC_Q_AWUSER_CFG_PKG;
+ writel_relaxed(regval, addr);
+
+ return 0;
+}
+
+static int sec_clk_en(struct sec_dev_info *info)
+{
+ void __iomem *base = info->regs[SEC_COMMON];
+ u32 i = 0;
+
+ writel_relaxed(0x7, base + SEC_ALGSUB_CLK_EN_REG);
+ do {
+ usleep_range(1000, 10000);
+ if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0x7)
+ return 0;
+ i++;
+ } while (i < 10);
+ dev_err(info->dev, "sec clock enable fail!\n");
+
+ return -EIO;
+}
+
+static int sec_clk_dis(struct sec_dev_info *info)
+{
+ void __iomem *base = info->regs[SEC_COMMON];
+ u32 i = 0;
+
+ writel_relaxed(0x7, base + SEC_ALGSUB_CLK_DIS_REG);
+ do {
+ usleep_range(1000, 10000);
+ if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0)
+ return 0;
+ i++;
+ } while (i < 10);
+ dev_err(info->dev, "sec clock disable fail!\n");
+
+ return -EIO;
+}
+
+static int sec_reset_whole_module(struct sec_dev_info *info)
+{
+ void __iomem *base = info->regs[SEC_COMMON];
+ bool is_reset, b_is_reset;
+ u32 i = 0;
+
+ writel_relaxed(1, base + SEC_ALGSUB_RST_REQ_REG);
+ writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_REQ_REG);
+ while (1) {
+ usleep_range(1000, 10000);
+ is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
+ SEC_ALGSUB_RST_ST_IS_RST;
+ b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
+ SEC_ALGSUB_BUILD_RST_ST_IS_RST;
+ if (is_reset && b_is_reset)
+ break;
+ i++;
+ if (i > 10) {
+ dev_err(info->dev, "Reset req failed\n");
+ return -EIO;
+ }
+ }
+
+ i = 0;
+ writel_relaxed(1, base + SEC_ALGSUB_RST_DREQ_REG);
+ writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_DREQ_REG);
+ while (1) {
+ usleep_range(1000, 10000);
+ is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
+ SEC_ALGSUB_RST_ST_IS_RST;
+ b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
+ SEC_ALGSUB_BUILD_RST_ST_IS_RST;
+ if (!is_reset && !b_is_reset)
+ break;
+
+ i++;
+ if (i > 10) {
+ dev_err(info->dev, "Reset dreq failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static void sec_bd_endian_little(struct sec_dev_info *info)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~(SEC_CTRL2_ENDIAN_BD | SEC_CTRL2_ENDIAN_BD_TYPE);
+ writel_relaxed(regval, addr);
+}
+
+/*
+ * sec_cache_config - configure optimum cache placement
+ */
+static void sec_cache_config(struct sec_dev_info *info)
+{
+ struct iommu_domain *domain;
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL_REG;
+
+ domain = iommu_get_domain_for_dev(info->dev);
+
+ /* Check that translation is occurring */
+ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
+ writel_relaxed(0x44cf9e, addr);
+ else
+ writel_relaxed(0x4cfd9, addr);
+}
+
+static void sec_data_axiwr_otsd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
+ regval |= (cfg << SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S) &
+ SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_data_axird_otsd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
+ regval |= (cfg << SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S) &
+ SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_clk_gate_en(struct sec_dev_info *info, bool clkgate)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (clkgate)
+ regval |= SEC_CTRL2_CLK_GATE_EN;
+ else
+ regval &= ~SEC_CTRL2_CLK_GATE_EN;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_comm_cnt_cfg(struct sec_dev_info *info, bool clr_ce)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (clr_ce)
+ regval |= SEC_COMMON_CNT_CLR_CE_CLEAR;
+ else
+ regval &= ~SEC_COMMON_CNT_CLR_CE_CLEAR;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_commsnap_en(struct sec_dev_info *info, bool snap_en)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (snap_en)
+ regval |= SEC_COMMON_CNT_CLR_CE_SNAP_EN;
+ else
+ regval &= ~SEC_COMMON_CNT_CLR_CE_SNAP_EN;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[])
+{
+ void __iomem *base = info->regs[SEC_SAA];
+ int i;
+
+ for (i = 0; i < 10; i++)
+ writel_relaxed(hash_mask[0],
+ base + SEC_IPV6_MASK_TABLE_X_REG(i));
+}
+
+static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask)
+{
+ if (hash_mask & SEC_HASH_IPV4_MASK) {
+ dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n ");
+ return -EINVAL;
+ }
+
+ writel_relaxed(hash_mask,
+ info->regs[SEC_SAA] + SEC_IPV4_MASK_TABLE_REG);
+
+ return 0;
+}
+
+static void sec_set_dbg_bd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_DEBUG_BD_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ /* Always disable write back of normal bd */
+ regval &= ~SEC_DEBUG_BD_CFG_WB_NORMAL;
+
+ if (cfg)
+ regval &= ~SEC_DEBUG_BD_CFG_WB_EN;
+ else
+ regval |= SEC_DEBUG_BD_CFG_WB_EN;
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_saa_getqm_en(struct sec_dev_info *info, u32 saa_indx, u32 en)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_SAA_BASE +
+ SEC_SAA_CTRL_REG(saa_indx);
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (en)
+ regval |= SEC_SAA_CTRL_GET_QM_EN;
+ else
+ regval &= ~SEC_SAA_CTRL_GET_QM_EN;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_saa_int_mask(struct sec_dev_info *info, u32 saa_indx,
+ u32 saa_int_mask)
+{
+ writel_relaxed(saa_int_mask,
+ info->regs[SEC_SAA] + SEC_SAA_BASE + SEC_ST_INTMSK1_REG +
+ saa_indx * SEC_SAA_ADDR_SIZE);
+}
+
+static void sec_streamid(struct sec_dev_info *info, int i)
+{
+ #define SEC_SID 0x600
+ #define SEC_VMID 0
+
+ writel_relaxed((SEC_VMID | ((SEC_SID & 0xffff) << 8)),
+ info->regs[SEC_SAA] + SEC_Q_VMID_CFG_REG(i));
+}
+
+static void sec_queue_ar_alloc(struct sec_queue *queue, u32 alloc)
+{
+ void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (alloc == SEC_QUEUE_AR_FROCE_ALLOC) {
+ regval |= SEC_Q_ARUSER_CFG_FA;
+ regval &= ~SEC_Q_ARUSER_CFG_FNA;
+ } else {
+ regval &= ~SEC_Q_ARUSER_CFG_FA;
+ regval |= SEC_Q_ARUSER_CFG_FNA;
+ }
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_queue_aw_alloc(struct sec_queue *queue, u32 alloc)
+{
+ void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (alloc == SEC_QUEUE_AW_FROCE_ALLOC) {
+ regval |= SEC_Q_AWUSER_CFG_FA;
+ regval &= ~SEC_Q_AWUSER_CFG_FNA;
+ } else {
+ regval &= ~SEC_Q_AWUSER_CFG_FA;
+ regval |= SEC_Q_AWUSER_CFG_FNA;
+ }
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_queue_reorder(struct sec_queue *queue, bool reorder)
+{
+ void __iomem *base = queue->regs;
+ u32 regval;
+
+ regval = readl_relaxed(base + SEC_Q_CFG_REG);
+ if (reorder)
+ regval |= SEC_Q_CFG_REORDER;
+ else
+ regval &= ~SEC_Q_CFG_REORDER;
+ writel_relaxed(regval, base + SEC_Q_CFG_REG);
+}
+
+static void sec_queue_depth(struct sec_queue *queue, u32 depth)
+{
+ void __iomem *addr = queue->regs + SEC_Q_DEPTH_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~SEC_Q_DEPTH_CFG_DEPTH_M;
+ regval |= (depth << SEC_Q_DEPTH_CFG_DEPTH_S) & SEC_Q_DEPTH_CFG_DEPTH_M;
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_queue_cmdbase_addr(struct sec_queue *queue, u64 addr)
+{
+ writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_BASE_HADDR_REG);
+ writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_BASE_LADDR_REG);
+}
+
+static void sec_queue_outorder_addr(struct sec_queue *queue, u64 addr)
+{
+ writel_relaxed(upper_32_bits(addr),
+ queue->regs + SEC_Q_OUTORDER_BASE_HADDR_REG);
+ writel_relaxed(lower_32_bits(addr),
+ queue->regs + SEC_Q_OUTORDER_BASE_LADDR_REG);
+}
+
+static void sec_queue_errbase_addr(struct sec_queue *queue, u64 addr)
+{
+ writel_relaxed(upper_32_bits(addr),
+ queue->regs + SEC_Q_ERR_BASE_HADDR_REG);
+ writel_relaxed(lower_32_bits(addr),
+ queue->regs + SEC_Q_ERR_BASE_LADDR_REG);
+}
+
+static void sec_queue_irq_disable(struct sec_queue *queue)
+{
+ writel_relaxed((u32)~0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
+}
+
+static void sec_queue_irq_enable(struct sec_queue *queue)
+{
+ writel_relaxed(0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
+}
+
+static void sec_queue_abn_irq_disable(struct sec_queue *queue)
+{
+ writel_relaxed((u32)~0, queue->regs + SEC_Q_FAIL_INT_MSK_REG);
+}
+
+static void sec_queue_stop(struct sec_queue *queue)
+{
+ disable_irq(queue->task_irq);
+ sec_queue_irq_disable(queue);
+ writel_relaxed(0x0, queue->regs + SEC_QUEUE_ENB_REG);
+}
+
+static void sec_queue_start(struct sec_queue *queue)
+{
+ sec_queue_irq_enable(queue);
+ enable_irq(queue->task_irq);
+ queue->expected = 0;
+ writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
+ writel_relaxed(0x1, queue->regs + SEC_QUEUE_ENB_REG);
+}
+
+static struct sec_queue *sec_alloc_queue(struct sec_dev_info *info)
+{
+ int i;
+
+ mutex_lock(&info->dev_lock);
+
+ /* Get the first idle queue in SEC device */
+ for (i = 0; i < SEC_Q_NUM; i++)
+ if (!info->queues[i].in_use) {
+ info->queues[i].in_use = true;
+ info->queues_in_use++;
+ mutex_unlock(&info->dev_lock);
+
+ return &info->queues[i];
+ }
+ mutex_unlock(&info->dev_lock);
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int sec_queue_free(struct sec_queue *queue)
+{
+ struct sec_dev_info *info = queue->dev_info;
+
+ if (queue->queue_id >= SEC_Q_NUM) {
+ dev_err(info->dev, "No queue %d\n", queue->queue_id);
+ return -ENODEV;
+ }
+
+ if (!queue->in_use) {
+ dev_err(info->dev, "Queue %d is idle\n", queue->queue_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&info->dev_lock);
+ queue->in_use = false;
+ info->queues_in_use--;
+ mutex_unlock(&info->dev_lock);
+
+ return 0;
+}
+
+static irqreturn_t sec_isr_handle_th(int irq, void *q)
+{
+ sec_queue_irq_disable(q);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t sec_isr_handle(int irq, void *q)
+{
+ struct sec_queue *queue = q;
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+ struct sec_queue_ring_cq *cq_ring = &queue->ring_cq;
+ struct sec_out_bd_info *outorder_msg;
+ struct sec_bd_info *msg;
+ u32 ooo_read, ooo_write;
+ void __iomem *base = queue->regs;
+ int q_id;
+
+ ooo_read = readl(base + SEC_Q_OUTORDER_RD_PTR_REG);
+ ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
+ outorder_msg = cq_ring->vaddr + ooo_read;
+ q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
+ msg = msg_ring->vaddr + q_id;
+
+ while ((ooo_write != ooo_read) && msg->w0 & SEC_BD_W0_DONE) {
+ /*
+ * Must be before callback otherwise blocks adding other chained
+ * elements
+ */
+ set_bit(q_id, queue->unprocessed);
+ if (q_id == queue->expected)
+ while (test_bit(queue->expected, queue->unprocessed)) {
+ clear_bit(queue->expected, queue->unprocessed);
+ msg = msg_ring->vaddr + queue->expected;
+ msg->w0 &= ~SEC_BD_W0_DONE;
+ msg_ring->callback(msg,
+ queue->shadow[queue->expected]);
+ queue->shadow[queue->expected] = NULL;
+ queue->expected = (queue->expected + 1) %
+ SEC_QUEUE_LEN;
+ atomic_dec(&msg_ring->used);
+ }
+
+ ooo_read = (ooo_read + 1) % SEC_QUEUE_LEN;
+ writel(ooo_read, base + SEC_Q_OUTORDER_RD_PTR_REG);
+ ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
+ outorder_msg = cq_ring->vaddr + ooo_read;
+ q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
+ msg = msg_ring->vaddr + q_id;
+ }
+
+ sec_queue_irq_enable(queue);
+
+ return IRQ_HANDLED;
+}
+
+static int sec_queue_irq_init(struct sec_queue *queue)
+{
+ struct sec_dev_info *info = queue->dev_info;
+ int irq = queue->task_irq;
+ int ret;
+
+ ret = request_threaded_irq(irq, sec_isr_handle_th, sec_isr_handle,
+ IRQF_TRIGGER_RISING, queue->name, queue);
+ if (ret) {
+ dev_err(info->dev, "request irq(%d) failed %d\n", irq, ret);
+ return ret;
+ }
+ disable_irq(irq);
+
+ return 0;
+}
+
+static int sec_queue_irq_uninit(struct sec_queue *queue)
+{
+ free_irq(queue->task_irq, queue);
+
+ return 0;
+}
+
+static struct sec_dev_info *sec_device_get(void)
+{
+ struct sec_dev_info *sec_dev = NULL;
+ struct sec_dev_info *this_sec_dev;
+ int least_busy_n = SEC_Q_NUM + 1;
+ int i;
+
+ /* Find which one is least busy and use that first */
+ for (i = 0; i < SEC_MAX_DEVICES; i++) {
+ this_sec_dev = sec_devices[i];
+ if (this_sec_dev &&
+ this_sec_dev->queues_in_use < least_busy_n) {
+ least_busy_n = this_sec_dev->queues_in_use;
+ sec_dev = this_sec_dev;
+ }
+ }
+
+ return sec_dev;
+}
+
+static struct sec_queue *sec_queue_alloc_start(struct sec_dev_info *info)
+{
+ struct sec_queue *queue;
+
+ queue = sec_alloc_queue(info);
+ if (IS_ERR(queue)) {
+ dev_err(info->dev, "alloc sec queue failed! %ld\n",
+ PTR_ERR(queue));
+ return queue;
+ }
+
+ sec_queue_start(queue);
+
+ return queue;
+}
+
+/**
+ * sec_queue_alloc_start_safe - get a hw queue from appropriate instance
+ *
+ * This function does extremely simplistic load balancing. It does not take into
+ * account NUMA locality of the accelerator, or which cpu has requested the
+ * queue. Future work may focus on optimizing this in order to improve full
+ * machine throughput.
+ */
+struct sec_queue *sec_queue_alloc_start_safe(void)
+{
+ struct sec_dev_info *info;
+ struct sec_queue *queue = ERR_PTR(-ENODEV);
+
+ mutex_lock(&sec_id_lock);
+ info = sec_device_get();
+ if (!info)
+ goto unlock;
+
+ queue = sec_queue_alloc_start(info);
+
+unlock:
+ mutex_unlock(&sec_id_lock);
+
+ return queue;
+}
+
+/**
+ * sec_queue_stop_release() - free up a hw queue for reuse
+ * @queue: The queue we are done with.
+ *
+ * This will stop the current queue, terminanting any transactions
+ * that are inflight an return it to the pool of available hw queuess
+ */
+int sec_queue_stop_release(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+ int ret;
+
+ sec_queue_stop(queue);
+
+ ret = sec_queue_free(queue);
+ if (ret)
+ dev_err(dev, "Releasing queue failed %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * sec_queue_empty() - Is this hardware queue currently empty.
+ *
+ * We need to know if we have an empty queue for some of the chaining modes
+ * as if it is not empty we may need to hold the message in a software queue
+ * until the hw queue is drained.
+ */
+bool sec_queue_empty(struct sec_queue *queue)
+{
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+
+ return !atomic_read(&msg_ring->used);
+}
+
+/**
+ * sec_queue_send() - queue up a single operation in the hw queue
+ * @queue: The queue in which to put the message
+ * @msg: The message
+ * @ctx: Context to be put in the shadow array and passed back to cb on result.
+ *
+ * This function will return -EAGAIN if the queue is currently full.
+ */
+int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx)
+{
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+ void __iomem *base = queue->regs;
+ u32 write, read;
+
+ mutex_lock(&msg_ring->lock);
+ read = readl(base + SEC_Q_RD_PTR_REG);
+ write = readl(base + SEC_Q_WR_PTR_REG);
+ if (write == read && atomic_read(&msg_ring->used) == SEC_QUEUE_LEN) {
+ mutex_unlock(&msg_ring->lock);
+ return -EAGAIN;
+ }
+ memcpy(msg_ring->vaddr + write, msg, sizeof(*msg));
+ queue->shadow[write] = ctx;
+ write = (write + 1) % SEC_QUEUE_LEN;
+
+ /* Ensure content updated before queue advance */
+ wmb();
+ writel(write, base + SEC_Q_WR_PTR_REG);
+
+ atomic_inc(&msg_ring->used);
+ mutex_unlock(&msg_ring->lock);
+
+ return 0;
+}
+
+bool sec_queue_can_enqueue(struct sec_queue *queue, int num)
+{
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+
+ return SEC_QUEUE_LEN - atomic_read(&msg_ring->used) >= num;
+}
+
+static void sec_queue_hw_init(struct sec_queue *queue)
+{
+ sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
+ sec_queue_aw_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
+ sec_queue_ar_pkgattr(queue, 1);
+ sec_queue_aw_pkgattr(queue, 1);
+
+ /* Enable out of order queue */
+ sec_queue_reorder(queue, true);
+
+ /* Interrupt after a single complete element */
+ writel_relaxed(1, queue->regs + SEC_Q_PROC_NUM_CFG_REG);
+
+ sec_queue_depth(queue, SEC_QUEUE_LEN - 1);
+
+ sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr);
+
+ sec_queue_outorder_addr(queue, queue->ring_cq.paddr);
+
+ sec_queue_errbase_addr(queue, queue->ring_db.paddr);
+
+ writel_relaxed(0x100, queue->regs + SEC_Q_OT_TH_REG);
+
+ sec_queue_abn_irq_disable(queue);
+ sec_queue_irq_disable(queue);
+ writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
+}
+
+static int sec_hw_init(struct sec_dev_info *info)
+{
+ struct iommu_domain *domain;
+ u32 sec_ipv4_mask = 0;
+ u32 sec_ipv6_mask[10] = {};
+ u32 i, ret;
+
+ domain = iommu_get_domain_for_dev(info->dev);
+
+ /*
+ * Enable all available processing unit clocks.
+ * Only the first cluster is usable with translations.
+ */
+ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
+ info->num_saas = 5;
+
+ else
+ info->num_saas = 10;
+
+ writel_relaxed(GENMASK(info->num_saas - 1, 0),
+ info->regs[SEC_SAA] + SEC_CLK_EN_REG);
+
+ /* 32 bit little endian */
+ sec_bd_endian_little(info);
+
+ sec_cache_config(info);
+
+ /* Data axi port write and read outstanding config as per datasheet */
+ sec_data_axiwr_otsd_cfg(info, 0x7);
+ sec_data_axird_otsd_cfg(info, 0x7);
+
+ /* Enable clock gating */
+ sec_clk_gate_en(info, true);
+
+ /* Set CNT_CYC register not read clear */
+ sec_comm_cnt_cfg(info, false);
+
+ /* Enable CNT_CYC */
+ sec_commsnap_en(info, false);
+
+ writel_relaxed((u32)~0, info->regs[SEC_SAA] + SEC_FSM_MAX_CNT_REG);
+
+ ret = sec_ipv4_hashmask(info, sec_ipv4_mask);
+ if (ret) {
+ dev_err(info->dev, "Failed to set ipv4 hashmask %d\n", ret);
+ return -EIO;
+ }
+
+ sec_ipv6_hashmask(info, sec_ipv6_mask);
+
+ /* do not use debug bd */
+ sec_set_dbg_bd_cfg(info, 0);
+
+ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING)) {
+ for (i = 0; i < SEC_Q_NUM; i++) {
+ sec_streamid(info, i);
+ /* Same QoS for all queues */
+ writel_relaxed(0x3f,
+ info->regs[SEC_SAA] +
+ SEC_Q_WEIGHT_CFG_REG(i));
+ }
+ }
+
+ for (i = 0; i < info->num_saas; i++) {
+ sec_saa_getqm_en(info, i, 1);
+ sec_saa_int_mask(info, i, 0);
+ }
+
+ return 0;
+}
+
+static void sec_hw_exit(struct sec_dev_info *info)
+{
+ int i;
+
+ for (i = 0; i < SEC_MAX_SAA_NUM; i++) {
+ sec_saa_int_mask(info, i, (u32)~0);
+ sec_saa_getqm_en(info, i, 0);
+ }
+}
+
+static void sec_queue_base_init(struct sec_dev_info *info,
+ struct sec_queue *queue, int queue_id)
+{
+ queue->dev_info = info;
+ queue->queue_id = queue_id;
+ snprintf(queue->name, sizeof(queue->name),
+ "%s_%d", dev_name(info->dev), queue->queue_id);
+}
+
+static int sec_map_io(struct sec_dev_info *info, struct platform_device *pdev)
+{
+ struct resource *res;
+ int i;
+
+ for (i = 0; i < SEC_NUM_ADDR_REGIONS; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+
+ if (!res) {
+ dev_err(info->dev, "Memory resource %d not found\n", i);
+ return -EINVAL;
+ }
+
+ info->regs[i] = devm_ioremap(info->dev, res->start,
+ resource_size(res));
+ if (!info->regs[i]) {
+ dev_err(info->dev,
+ "Memory resource %d could not be remapped\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int sec_base_init(struct sec_dev_info *info,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ ret = sec_map_io(info, pdev);
+ if (ret)
+ return ret;
+
+ ret = sec_clk_en(info);
+ if (ret)
+ return ret;
+
+ ret = sec_reset_whole_module(info);
+ if (ret)
+ goto sec_clk_disable;
+
+ ret = sec_hw_init(info);
+ if (ret)
+ goto sec_clk_disable;
+
+ return 0;
+
+sec_clk_disable:
+ sec_clk_dis(info);
+
+ return ret;
+}
+
+static void sec_base_exit(struct sec_dev_info *info)
+{
+ sec_hw_exit(info);
+ sec_clk_dis(info);
+}
+
+#define SEC_Q_CMD_SIZE \
+ round_up(SEC_QUEUE_LEN * sizeof(struct sec_bd_info), PAGE_SIZE)
+#define SEC_Q_CQ_SIZE \
+ round_up(SEC_QUEUE_LEN * sizeof(struct sec_out_bd_info), PAGE_SIZE)
+#define SEC_Q_DB_SIZE \
+ round_up(SEC_QUEUE_LEN * sizeof(struct sec_debug_bd_info), PAGE_SIZE)
+
+static int sec_queue_res_cfg(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+ struct sec_queue_ring_cmd *ring_cmd = &queue->ring_cmd;
+ struct sec_queue_ring_cq *ring_cq = &queue->ring_cq;
+ struct sec_queue_ring_db *ring_db = &queue->ring_db;
+ int ret;
+
+ ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE,
+ &ring_cmd->paddr,
+ GFP_KERNEL);
+ if (!ring_cmd->vaddr)
+ return -ENOMEM;
+
+ atomic_set(&ring_cmd->used, 0);
+ mutex_init(&ring_cmd->lock);
+ ring_cmd->callback = sec_alg_callback;
+
+ ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE,
+ &ring_cq->paddr,
+ GFP_KERNEL);
+ if (!ring_cq->vaddr) {
+ ret = -ENOMEM;
+ goto err_free_ring_cmd;
+ }
+
+ ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE,
+ &ring_db->paddr,
+ GFP_KERNEL);
+ if (!ring_db->vaddr) {
+ ret = -ENOMEM;
+ goto err_free_ring_cq;
+ }
+ queue->task_irq = platform_get_irq(to_platform_device(dev),
+ queue->queue_id * 2 + 1);
+ if (queue->task_irq <= 0) {
+ ret = -EINVAL;
+ goto err_free_ring_db;
+ }
+
+ return 0;
+
+err_free_ring_db:
+ dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
+ queue->ring_db.paddr);
+err_free_ring_cq:
+ dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
+ queue->ring_cq.paddr);
+err_free_ring_cmd:
+ dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
+ queue->ring_cmd.paddr);
+
+ return ret;
+}
+
+static void sec_queue_free_ring_pages(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+
+ dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
+ queue->ring_db.paddr);
+ dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
+ queue->ring_cq.paddr);
+ dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
+ queue->ring_cmd.paddr);
+}
+
+static int sec_queue_config(struct sec_dev_info *info, struct sec_queue *queue,
+ int queue_id)
+{
+ int ret;
+
+ sec_queue_base_init(info, queue, queue_id);
+
+ ret = sec_queue_res_cfg(queue);
+ if (ret)
+ return ret;
+
+ ret = sec_queue_map_io(queue);
+ if (ret) {
+ dev_err(info->dev, "Queue map failed %d\n", ret);
+ sec_queue_free_ring_pages(queue);
+ return ret;
+ }
+
+ sec_queue_hw_init(queue);
+
+ return 0;
+}
+
+static void sec_queue_unconfig(struct sec_dev_info *info,
+ struct sec_queue *queue)
+{
+ sec_queue_unmap_io(queue);
+ sec_queue_free_ring_pages(queue);
+}
+
+static int sec_id_alloc(struct sec_dev_info *info)
+{
+ int ret = 0;
+ int i;
+
+ mutex_lock(&sec_id_lock);
+
+ for (i = 0; i < SEC_MAX_DEVICES; i++)
+ if (!sec_devices[i])
+ break;
+ if (i == SEC_MAX_DEVICES) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ info->sec_id = i;
+ sec_devices[info->sec_id] = info;
+
+unlock:
+ mutex_unlock(&sec_id_lock);
+
+ return ret;
+}
+
+static void sec_id_free(struct sec_dev_info *info)
+{
+ mutex_lock(&sec_id_lock);
+ sec_devices[info->sec_id] = NULL;
+ mutex_unlock(&sec_id_lock);
+}
+
+static int sec_probe(struct platform_device *pdev)
+{
+ struct sec_dev_info *info;
+ struct device *dev = &pdev->dev;
+ int i, j;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(dev, "Failed to set 64 bit dma mask %d", ret);
+ return -ENODEV;
+ }
+
+ info = devm_kzalloc(dev, (sizeof(*info)), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ mutex_init(&info->dev_lock);
+
+ info->hw_sgl_pool = dmam_pool_create("sgl", dev,
+ sizeof(struct sec_hw_sgl), 64, 0);
+ if (!info->hw_sgl_pool) {
+ dev_err(dev, "Failed to create sec sgl dma pool\n");
+ return -ENOMEM;
+ }
+
+ ret = sec_base_init(info, pdev);
+ if (ret) {
+ dev_err(dev, "Base initialization fail! %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < SEC_Q_NUM; i++) {
+ ret = sec_queue_config(info, &info->queues[i], i);
+ if (ret)
+ goto queues_unconfig;
+
+ ret = sec_queue_irq_init(&info->queues[i]);
+ if (ret) {
+ sec_queue_unconfig(info, &info->queues[i]);
+ goto queues_unconfig;
+ }
+ }
+
+ ret = sec_algs_register();
+ if (ret) {
+ dev_err(dev, "Failed to register algorithms with crypto %d\n",
+ ret);
+ goto queues_unconfig;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ ret = sec_id_alloc(info);
+ if (ret)
+ goto algs_unregister;
+
+ return 0;
+
+algs_unregister:
+ sec_algs_unregister();
+queues_unconfig:
+ for (j = i - 1; j >= 0; j--) {
+ sec_queue_irq_uninit(&info->queues[j]);
+ sec_queue_unconfig(info, &info->queues[j]);
+ }
+ sec_base_exit(info);
+
+ return ret;
+}
+
+static int sec_remove(struct platform_device *pdev)
+{
+ struct sec_dev_info *info = platform_get_drvdata(pdev);
+ int i;
+
+ /* Unexpose as soon as possible, reuse during remove is fine */
+ sec_id_free(info);
+
+ sec_algs_unregister();
+
+ for (i = 0; i < SEC_Q_NUM; i++) {
+ sec_queue_irq_uninit(&info->queues[i]);
+ sec_queue_unconfig(info, &info->queues[i]);
+ }
+
+ sec_base_exit(info);
+
+ return 0;
+}
+
+static const __maybe_unused struct of_device_id sec_match[] = {
+ { .compatible = "hisilicon,hip06-sec" },
+ { .compatible = "hisilicon,hip07-sec" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sec_match);
+
+static const __maybe_unused struct acpi_device_id sec_acpi_match[] = {
+ { "HISI02C1", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sec_acpi_match);
+
+static struct platform_driver sec_driver = {
+ .probe = sec_probe,
+ .remove = sec_remove,
+ .driver = {
+ .name = "hisi_sec_platform_driver",
+ .of_match_table = sec_match,
+ .acpi_match_table = ACPI_PTR(sec_acpi_match),
+ },
+};
+module_platform_driver(sec_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Security Accelerators");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com");
+MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>");
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
new file mode 100644
index 000000000000..2d2f186674ba
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef _SEC_DRV_H_
+#define _SEC_DRV_H_
+
+#include <crypto/algapi.h>
+#include <linux/kfifo.h>
+
+#define SEC_MAX_SGE_NUM 64
+#define SEC_HW_RING_NUM 3
+
+#define SEC_CMD_RING 0
+#define SEC_OUTORDER_RING 1
+#define SEC_DBG_RING 2
+
+/* A reasonable length to balance memory use against flexibility */
+#define SEC_QUEUE_LEN 512
+
+#define SEC_MAX_SGE_NUM 64
+
+struct sec_bd_info {
+#define SEC_BD_W0_T_LEN_M GENMASK(4, 0)
+#define SEC_BD_W0_T_LEN_S 0
+
+#define SEC_BD_W0_C_WIDTH_M GENMASK(6, 5)
+#define SEC_BD_W0_C_WIDTH_S 5
+#define SEC_C_WIDTH_AES_128BIT 0
+#define SEC_C_WIDTH_AES_8BIT 1
+#define SEC_C_WIDTH_AES_1BIT 2
+#define SEC_C_WIDTH_DES_64BIT 0
+#define SEC_C_WIDTH_DES_8BIT 1
+#define SEC_C_WIDTH_DES_1BIT 2
+
+#define SEC_BD_W0_C_MODE_M GENMASK(9, 7)
+#define SEC_BD_W0_C_MODE_S 7
+#define SEC_C_MODE_ECB 0
+#define SEC_C_MODE_CBC 1
+#define SEC_C_MODE_CTR 4
+#define SEC_C_MODE_CCM 5
+#define SEC_C_MODE_GCM 6
+#define SEC_C_MODE_XTS 7
+
+#define SEC_BD_W0_SEQ BIT(10)
+#define SEC_BD_W0_DE BIT(11)
+#define SEC_BD_W0_DAT_SKIP_M GENMASK(13, 12)
+#define SEC_BD_W0_DAT_SKIP_S 12
+#define SEC_BD_W0_C_GRAN_SIZE_19_16_M GENMASK(17, 14)
+#define SEC_BD_W0_C_GRAN_SIZE_19_16_S 14
+
+#define SEC_BD_W0_CIPHER_M GENMASK(19, 18)
+#define SEC_BD_W0_CIPHER_S 18
+#define SEC_CIPHER_NULL 0
+#define SEC_CIPHER_ENCRYPT 1
+#define SEC_CIPHER_DECRYPT 2
+
+#define SEC_BD_W0_AUTH_M GENMASK(21, 20)
+#define SEC_BD_W0_AUTH_S 20
+#define SEC_AUTH_NULL 0
+#define SEC_AUTH_MAC 1
+#define SEC_AUTH_VERIF 2
+
+#define SEC_BD_W0_AI_GEN BIT(22)
+#define SEC_BD_W0_CI_GEN BIT(23)
+#define SEC_BD_W0_NO_HPAD BIT(24)
+#define SEC_BD_W0_HM_M GENMASK(26, 25)
+#define SEC_BD_W0_HM_S 25
+#define SEC_BD_W0_ICV_OR_SKEY_EN_M GENMASK(28, 27)
+#define SEC_BD_W0_ICV_OR_SKEY_EN_S 27
+
+/* Multi purpose field - gran size bits for send, flag for recv */
+#define SEC_BD_W0_FLAG_M GENMASK(30, 29)
+#define SEC_BD_W0_C_GRAN_SIZE_21_20_M GENMASK(30, 29)
+#define SEC_BD_W0_FLAG_S 29
+#define SEC_BD_W0_C_GRAN_SIZE_21_20_S 29
+
+#define SEC_BD_W0_DONE BIT(31)
+ u32 w0;
+
+#define SEC_BD_W1_AUTH_GRAN_SIZE_M GENMASK(21, 0)
+#define SEC_BD_W1_AUTH_GRAN_SIZE_S 0
+#define SEC_BD_W1_M_KEY_EN BIT(22)
+#define SEC_BD_W1_BD_INVALID BIT(23)
+#define SEC_BD_W1_ADDR_TYPE BIT(24)
+
+#define SEC_BD_W1_A_ALG_M GENMASK(28, 25)
+#define SEC_BD_W1_A_ALG_S 25
+#define SEC_A_ALG_SHA1 0
+#define SEC_A_ALG_SHA256 1
+#define SEC_A_ALG_MD5 2
+#define SEC_A_ALG_SHA224 3
+#define SEC_A_ALG_HMAC_SHA1 8
+#define SEC_A_ALG_HMAC_SHA224 10
+#define SEC_A_ALG_HMAC_SHA256 11
+#define SEC_A_ALG_HMAC_MD5 12
+#define SEC_A_ALG_AES_XCBC 13
+#define SEC_A_ALG_AES_CMAC 14
+
+#define SEC_BD_W1_C_ALG_M GENMASK(31, 29)
+#define SEC_BD_W1_C_ALG_S 29
+#define SEC_C_ALG_DES 0
+#define SEC_C_ALG_3DES 1
+#define SEC_C_ALG_AES 2
+
+ u32 w1;
+
+#define SEC_BD_W2_C_GRAN_SIZE_15_0_M GENMASK(15, 0)
+#define SEC_BD_W2_C_GRAN_SIZE_15_0_S 0
+#define SEC_BD_W2_GRAN_NUM_M GENMASK(31, 16)
+#define SEC_BD_W2_GRAN_NUM_S 16
+ u32 w2;
+
+#define SEC_BD_W3_AUTH_LEN_OFFSET_M GENMASK(9, 0)
+#define SEC_BD_W3_AUTH_LEN_OFFSET_S 0
+#define SEC_BD_W3_CIPHER_LEN_OFFSET_M GENMASK(19, 10)
+#define SEC_BD_W3_CIPHER_LEN_OFFSET_S 10
+#define SEC_BD_W3_MAC_LEN_M GENMASK(24, 20)
+#define SEC_BD_W3_MAC_LEN_S 20
+#define SEC_BD_W3_A_KEY_LEN_M GENMASK(29, 25)
+#define SEC_BD_W3_A_KEY_LEN_S 25
+#define SEC_BD_W3_C_KEY_LEN_M GENMASK(31, 30)
+#define SEC_BD_W3_C_KEY_LEN_S 30
+#define SEC_KEY_LEN_AES_128 0
+#define SEC_KEY_LEN_AES_192 1
+#define SEC_KEY_LEN_AES_256 2
+#define SEC_KEY_LEN_DES 1
+#define SEC_KEY_LEN_3DES_3_KEY 1
+#define SEC_KEY_LEN_3DES_2_KEY 3
+ u32 w3;
+
+ /* W4,5 */
+ union {
+ u32 authkey_addr_lo;
+ u32 authiv_addr_lo;
+ };
+ union {
+ u32 authkey_addr_hi;
+ u32 authiv_addr_hi;
+ };
+
+ /* W6,7 */
+ u32 cipher_key_addr_lo;
+ u32 cipher_key_addr_hi;
+
+ /* W8,9 */
+ u32 cipher_iv_addr_lo;
+ u32 cipher_iv_addr_hi;
+
+ /* W10,11 */
+ u32 data_addr_lo;
+ u32 data_addr_hi;
+
+ /* W12,13 */
+ u32 mac_addr_lo;
+ u32 mac_addr_hi;
+
+ /* W14,15 */
+ u32 cipher_destin_addr_lo;
+ u32 cipher_destin_addr_hi;
+};
+
+enum sec_mem_region {
+ SEC_COMMON = 0,
+ SEC_SAA,
+ SEC_NUM_ADDR_REGIONS
+};
+
+#define SEC_NAME_SIZE 64
+#define SEC_Q_NUM 16
+
+
+/**
+ * struct sec_queue_ring_cmd - store information about a SEC HW cmd ring
+ * @used: Local counter used to cheaply establish if the ring is empty.
+ * @lock: Protect against simultaneous adjusting of the read and write pointers.
+ * @vaddr: Virtual address for the ram pages used for the ring.
+ * @paddr: Physical address of the dma mapped region of ram used for the ring.
+ * @callback: Callback function called on a ring element completing.
+ */
+struct sec_queue_ring_cmd {
+ atomic_t used;
+ struct mutex lock;
+ struct sec_bd_info *vaddr;
+ dma_addr_t paddr;
+ void (*callback)(struct sec_bd_info *resp, void *ctx);
+};
+
+struct sec_debug_bd_info;
+struct sec_queue_ring_db {
+ struct sec_debug_bd_info *vaddr;
+ dma_addr_t paddr;
+};
+
+struct sec_out_bd_info;
+struct sec_queue_ring_cq {
+ struct sec_out_bd_info *vaddr;
+ dma_addr_t paddr;
+};
+
+struct sec_dev_info;
+
+enum sec_cipher_alg {
+ SEC_C_DES_ECB_64,
+ SEC_C_DES_CBC_64,
+
+ SEC_C_3DES_ECB_192_3KEY,
+ SEC_C_3DES_ECB_192_2KEY,
+
+ SEC_C_3DES_CBC_192_3KEY,
+ SEC_C_3DES_CBC_192_2KEY,
+
+ SEC_C_AES_ECB_128,
+ SEC_C_AES_ECB_192,
+ SEC_C_AES_ECB_256,
+
+ SEC_C_AES_CBC_128,
+ SEC_C_AES_CBC_192,
+ SEC_C_AES_CBC_256,
+
+ SEC_C_AES_CTR_128,
+ SEC_C_AES_CTR_192,
+ SEC_C_AES_CTR_256,
+
+ SEC_C_AES_XTS_128,
+ SEC_C_AES_XTS_256,
+
+ SEC_C_NULL,
+};
+
+/**
+ * struct sec_alg_tfm_ctx - hardware specific tranformation context
+ * @cipher_alg: Cipher algorithm enabled include encryption mode.
+ * @key: Key storage if required.
+ * @pkey: DMA address for the key storage.
+ * @req_template: Request template to save time on setup.
+ * @queue: The hardware queue associated with this tfm context.
+ * @lock: Protect key and pkey to ensure they are consistent
+ * @auth_buf: Current context buffer for auth operations.
+ * @backlog: The backlog queue used for cases where our buffers aren't
+ * large enough.
+ */
+struct sec_alg_tfm_ctx {
+ enum sec_cipher_alg cipher_alg;
+ u8 *key;
+ dma_addr_t pkey;
+ struct sec_bd_info req_template;
+ struct sec_queue *queue;
+ struct mutex lock;
+ u8 *auth_buf;
+ struct list_head backlog;
+};
+
+/**
+ * struct sec_request - data associate with a single crypto request
+ * @elements: List of subparts of this request (hardware size restriction)
+ * @num_elements: The number of subparts (used as an optimization)
+ * @lock: Protect elements of this structure against concurrent change.
+ * @tfm_ctx: hardware specific context.
+ * @len_in: length of in sgl from upper layers
+ * @len_out: length of out sgl from upper layers
+ * @dma_iv: initialization vector - phsyical address
+ * @err: store used to track errors across subelements of this request.
+ * @req_base: pointer to base element of associate crypto context.
+ * This is needed to allow shared handling skcipher, ahash etc.
+ * @cb: completion callback.
+ * @backlog_head: list head to allow backlog maintenance.
+ *
+ * The hardware is limited in the maximum size of data that it can
+ * process from a single BD. Typically this is fairly large (32MB)
+ * but still requires the complexity of splitting the incoming
+ * skreq up into a number of elements complete with appropriate
+ * iv chaining.
+ */
+struct sec_request {
+ struct list_head elements;
+ int num_elements;
+ struct mutex lock;
+ struct sec_alg_tfm_ctx *tfm_ctx;
+ int len_in;
+ int len_out;
+ dma_addr_t dma_iv;
+ int err;
+ struct crypto_async_request *req_base;
+ void (*cb)(struct sec_bd_info *resp, struct crypto_async_request *req);
+ struct list_head backlog_head;
+};
+
+/**
+ * struct sec_request_el - A subpart of a request.
+ * @head: allow us to attach this to the list in the sec_request
+ * @req: hardware block descriptor corresponding to this request subpart
+ * @in: hardware sgl for input - virtual address
+ * @dma_in: hardware sgl for input - physical address
+ * @sgl_in: scatterlist for this request subpart
+ * @out: hardware sgl for output - virtual address
+ * @dma_out: hardware sgl for output - physical address
+ * @sgl_out: scatterlist for this request subpart
+ * @sec_req: The request which this subpart forms a part of
+ * @el_length: Number of bytes in this subpart. Needed to locate
+ * last ivsize chunk for iv chaining.
+ */
+struct sec_request_el {
+ struct list_head head;
+ struct sec_bd_info req;
+ struct sec_hw_sgl *in;
+ dma_addr_t dma_in;
+ struct scatterlist *sgl_in;
+ struct sec_hw_sgl *out;
+ dma_addr_t dma_out;
+ struct scatterlist *sgl_out;
+ struct sec_request *sec_req;
+ size_t el_length;
+};
+
+/**
+ * struct sec_queue - All the information about a HW queue
+ * @dev_info: The parent SEC device to which this queue belongs.
+ * @task_irq: Completion interrupt for the queue.
+ * @name: Human readable queue description also used as irq name.
+ * @ring: The several HW rings associated with one queue.
+ * @regs: The iomapped device registers
+ * @queue_id: Index of the queue used for naming and resource selection.
+ * @in_use: Flag to say if the queue is in use.
+ * @expected: The next expected element to finish assuming we were in order.
+ * @uprocessed: A bitmap to track which OoO elements are done but not handled.
+ * @softqueue: A software queue used when chaining requirements prevent direct
+ * use of the hardware queues.
+ * @havesoftqueue: A flag to say we have a queues - as we may need one for the
+ * current mode.
+ * @queuelock: Protect the soft queue from concurrent changes to avoid some
+ * potential loss of data races.
+ * @shadow: Pointers back to the shadow copy of the hardware ring element
+ * need because we can't store any context reference in the bd element.
+ */
+struct sec_queue {
+ struct sec_dev_info *dev_info;
+ int task_irq;
+ char name[SEC_NAME_SIZE];
+ struct sec_queue_ring_cmd ring_cmd;
+ struct sec_queue_ring_cq ring_cq;
+ struct sec_queue_ring_db ring_db;
+ void __iomem *regs;
+ u32 queue_id;
+ bool in_use;
+ int expected;
+
+ DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN);
+ DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *));
+ bool havesoftqueue;
+ struct mutex queuelock;
+ void *shadow[SEC_QUEUE_LEN];
+};
+
+/**
+ * struct sec_hw_sge: Track each of the 64 element SEC HW SGL entries
+ * @buf: The IOV dma address for this entry.
+ * @len: Length of this IOV.
+ * @pad: Reserved space.
+ */
+struct sec_hw_sge {
+ dma_addr_t buf;
+ unsigned int len;
+ unsigned int pad;
+};
+
+/**
+ * struct sec_hw_sgl: One hardware SGL entry.
+ * @next_sgl: The next entry if we need to chain dma address. Null if last.
+ * @entry_sum_in_chain: The full count of SGEs - only matters for first SGL.
+ * @entry_sum_in_sgl: The number of SGEs in this SGL element.
+ * @flag: Unused in skciphers.
+ * @serial_num: Unsued in skciphers.
+ * @cpuid: Currently unused.
+ * @data_bytes_in_sgl: Count of bytes from all SGEs in this SGL.
+ * @next: Virtual address used to stash the next sgl - useful in completion.
+ * @reserved: A reserved field not currently used.
+ * @sge_entries: The (up to) 64 Scatter Gather Entries, representing IOVs.
+ * @node: Currently unused.
+ */
+struct sec_hw_sgl {
+ dma_addr_t next_sgl;
+ u16 entry_sum_in_chain;
+ u16 entry_sum_in_sgl;
+ u32 flag;
+ u64 serial_num;
+ u32 cpuid;
+ u32 data_bytes_in_sgl;
+ struct sec_hw_sgl *next;
+ u64 reserved;
+ struct sec_hw_sge sge_entries[SEC_MAX_SGE_NUM];
+ u8 node[16];
+};
+
+struct dma_pool;
+
+/**
+ * struct sec_dev_info: The full SEC unit comprising queues and processors.
+ * @sec_id: Index used to track which SEC this is when more than one is present.
+ * @num_saas: The number of backed processors enabled.
+ * @regs: iomapped register regions shared by whole SEC unit.
+ * @dev_lock: Protects concurrent queue allocation / freeing for the SEC.
+ * @queues: The 16 queues that this SEC instance provides.
+ * @dev: Device pointer.
+ * @hw_sgl_pool: DMA pool used to mimise mapping for the scatter gather lists.
+ */
+struct sec_dev_info {
+ int sec_id;
+ int num_saas;
+ void __iomem *regs[SEC_NUM_ADDR_REGIONS];
+ struct mutex dev_lock;
+ int queues_in_use;
+ struct sec_queue queues[SEC_Q_NUM];
+ struct device *dev;
+ struct dma_pool *hw_sgl_pool;
+};
+
+int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx);
+bool sec_queue_can_enqueue(struct sec_queue *queue, int num);
+int sec_queue_stop_release(struct sec_queue *queue);
+struct sec_queue *sec_queue_alloc_start_safe(void);
+bool sec_queue_empty(struct sec_queue *queue);
+
+/* Algorithm specific elements from sec_algs.c */
+void sec_alg_callback(struct sec_bd_info *resp, void *ctx);
+int sec_algs_register(void);
+void sec_algs_unregister(void);
+
+#endif /* _SEC_DRV_H_ */
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4e86f864a952..7e71043457a6 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
@@ -33,7 +30,19 @@ MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
{
u32 val, htable_offset;
- int i;
+ int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
+
+ if (priv->version == EIP197B) {
+ cs_rc_max = EIP197B_CS_RC_MAX;
+ cs_ht_wc = EIP197B_CS_HT_WC;
+ cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
+ cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
+ } else {
+ cs_rc_max = EIP197D_CS_RC_MAX;
+ cs_ht_wc = EIP197D_CS_HT_WC;
+ cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
+ cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
+ }
/* Enable the record cache memory access */
val = readl(priv->base + EIP197_CS_RAM_CTRL);
@@ -54,7 +63,7 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
writel(val, priv->base + EIP197_TRC_PARAMS);
/* Clear all records */
- for (i = 0; i < EIP197_CS_RC_MAX; i++) {
+ for (i = 0; i < cs_rc_max; i++) {
u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
@@ -64,14 +73,14 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
if (i == 0)
val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
- else if (i == EIP197_CS_RC_MAX - 1)
+ else if (i == cs_rc_max - 1)
val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
writel(val, priv->base + offset + sizeof(u32));
}
/* Clear the hash table entries */
- htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
- for (i = 0; i < 64; i++)
+ htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
+ for (i = 0; i < cs_ht_wc; i++)
writel(GENMASK(29, 0),
priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
@@ -82,23 +91,23 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
/* Write head and tail pointers of the record free chain */
val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
- EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
+ EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
writel(val, priv->base + EIP197_TRC_FREECHAIN);
/* Configure the record cache #1 */
- val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
- EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
+ val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
+ EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
writel(val, priv->base + EIP197_TRC_PARAMS2);
/* Configure the record cache #2 */
- val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
+ val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
EIP197_TRC_PARAMS_HTABLE_SZ(2);
writel(val, priv->base + EIP197_TRC_PARAMS);
}
static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
- const struct firmware *fw, u32 ctrl,
+ const struct firmware *fw, int pe, u32 ctrl,
u32 prog_en)
{
const u32 *data = (const u32 *)fw->data;
@@ -112,7 +121,7 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
EIP197_PE(priv) + ctrl);
/* Enable access to the program memory */
- writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
+ writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
@@ -120,7 +129,7 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
/* Disable access to the program memory */
- writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
+ writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
/* Release engine from reset */
val = readl(EIP197_PE(priv) + ctrl);
@@ -132,35 +141,62 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
{
const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
const struct firmware *fw[FW_NB];
- int i, j, ret = 0;
+ char fw_path[31], *dir = NULL;
+ int i, j, ret = 0, pe;
u32 val;
+ switch (priv->version) {
+ case EIP197B:
+ dir = "eip197b";
+ break;
+ case EIP197D:
+ dir = "eip197d";
+ break;
+ default:
+ /* No firmware is required */
+ return 0;
+ }
+
for (i = 0; i < FW_NB; i++) {
- ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+ snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
+ ret = request_firmware(&fw[i], fw_path, priv->dev);
if (ret) {
- dev_err(priv->dev,
- "Failed to request firmware %s (%d)\n",
- fw_name[i], ret);
- goto release_fw;
- }
- }
-
- /* Clear the scratchpad memory */
- val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
- val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
- EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
- EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
- EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
- writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
+ if (priv->version != EIP197B)
+ goto release_fw;
- memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
- EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
-
- eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
- EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+ /* Fallback to the old firmware location for the
+ * EIP197b.
+ */
+ ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+ if (ret) {
+ dev_err(priv->dev,
+ "Failed to request firmware %s (%d)\n",
+ fw_name[i], ret);
+ goto release_fw;
+ }
+ }
+ }
- eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
- EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+ for (pe = 0; pe < priv->config.pes; pe++) {
+ /* Clear the scratchpad memory */
+ val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+ val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
+ EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
+ EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
+ EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
+ writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+
+ memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
+ EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
+
+ eip197_write_firmware(priv, fw[FW_IFPP], pe,
+ EIP197_PE_ICE_FPP_CTRL(pe),
+ EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+
+ eip197_write_firmware(priv, fw[FW_IPUE], pe,
+ EIP197_PE_ICE_PUE_CTRL(pe),
+ EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+ }
release_fw:
for (j = 0; j < i; j++)
@@ -256,7 +292,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
{
u32 version, val;
- int i, ret;
+ int i, ret, pe;
/* Determine endianess and configure byte swap */
version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
@@ -267,6 +303,10 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
+ /* For EIP197 set maximum number of TX commands to 2^5 = 32 */
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
+
writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
/* Configure wr/rd cache values */
@@ -282,82 +322,94 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Clear any pending interrupt */
writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- /* Data Fetch Engine configuration */
-
- /* Reset all DFE threads */
- writel(EIP197_DxE_THR_CTRL_RESET_PE,
- EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
-
- if (priv->version == EIP197) {
- /* Reset HIA input interface arbiter */
- writel(EIP197_HIA_RA_PE_CTRL_RESET,
- EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
- }
-
- /* DMA transfer size to use */
- val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
- val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
- val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
- val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
- writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
-
- /* Leave the DFE threads reset state */
- writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
-
- /* Configure the procesing engine thresholds */
- writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
- EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
- writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
- EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
-
- if (priv->version == EIP197) {
- /* enable HIA input interface arbiter and rings */
- writel(EIP197_HIA_RA_PE_CTRL_EN |
- GENMASK(priv->config.rings - 1, 0),
- EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
- }
-
- /* Data Store Engine configuration */
-
- /* Reset all DSE threads */
- writel(EIP197_DxE_THR_CTRL_RESET_PE,
- EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
-
- /* Wait for all DSE threads to complete */
- while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
- GENMASK(15, 12)) != GENMASK(15, 12))
- ;
-
- /* DMA transfer size to use */
- val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
- val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
- val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
- /* FIXME: instability issues can occur for EIP97 but disabling it impact
- * performances.
- */
- if (priv->version == EIP197)
- val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
- writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
+ /* Processing Engine configuration */
+ for (pe = 0; pe < priv->config.pes; pe++) {
+ /* Data Fetch Engine configuration */
- /* Leave the DSE threads reset state */
- writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
+ /* Reset all DFE threads */
+ writel(EIP197_DxE_THR_CTRL_RESET_PE,
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
- /* Configure the procesing engine thresholds */
- writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
- EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
+ if (priv->version == EIP197B || priv->version == EIP197D) {
+ /* Reset HIA input interface arbiter */
+ writel(EIP197_HIA_RA_PE_CTRL_RESET,
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+ }
- /* Processing Engine configuration */
+ /* DMA transfer size to use */
+ val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
+ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
+ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
+ val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
+ EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
+ val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
+ val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
+ writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
+
+ /* Leave the DFE threads reset state */
+ writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+ /* Configure the processing engine thresholds */
+ writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+ EIP197_PE_IN_xBUF_THRES_MAX(9),
+ EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
+ writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+ EIP197_PE_IN_xBUF_THRES_MAX(7),
+ EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
+
+ if (priv->version == EIP197B || priv->version == EIP197D) {
+ /* enable HIA input interface arbiter and rings */
+ writel(EIP197_HIA_RA_PE_CTRL_EN |
+ GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+ }
- /* H/W capabilities selection */
- val = EIP197_FUNCTION_RSVD;
- val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
- val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
- val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
- val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
- val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
- writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
+ /* Data Store Engine configuration */
+
+ /* Reset all DSE threads */
+ writel(EIP197_DxE_THR_CTRL_RESET_PE,
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+ /* Wait for all DSE threads to complete */
+ while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
+ GENMASK(15, 12)) != GENMASK(15, 12))
+ ;
+
+ /* DMA transfer size to use */
+ val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
+ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
+ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
+ val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
+ val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
+ /* FIXME: instability issues can occur for EIP97 but disabling it impact
+ * performances.
+ */
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+ writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
+
+ /* Leave the DSE threads reset state */
+ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+ /* Configure the procesing engine thresholds */
+ writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
+ EIP197_PE_OUT_DBUF_THRES_MAX(8),
+ EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
+
+ /* Processing Engine configuration */
+
+ /* H/W capabilities selection */
+ val = EIP197_FUNCTION_RSVD;
+ val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
+ val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
+ val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
+ val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
+ val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
+ val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
+ val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
+ val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
+ }
/* Command Descriptor Rings prepare */
for (i = 0; i < priv->config.rings; i++) {
@@ -408,18 +460,20 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
- /* Enable command descriptor rings */
- writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
+ for (pe = 0; pe < priv->config.pes; pe++) {
+ /* Enable command descriptor rings */
+ writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
- /* Enable result descriptor rings */
- writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
+ /* Enable result descriptor rings */
+ writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+ }
/* Clear any HIA interrupt */
writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- if (priv->version == EIP197) {
+ if (priv->version == EIP197B || priv->version == EIP197D) {
eip197_trc_cache_init(priv);
ret = eip197_load_firmwares(priv);
@@ -452,7 +506,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
struct safexcel_context *ctx;
- struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
/* If a request wasn't properly dequeued because of a lack of resources,
@@ -476,16 +529,10 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
}
handle_req:
- request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
- if (!request)
- goto request_failed;
-
ctx = crypto_tfm_ctx(req->tfm);
- ret = ctx->send(req, ring, request, &commands, &results);
- if (ret) {
- kfree(request);
+ ret = ctx->send(req, ring, &commands, &results);
+ if (ret)
goto request_failed;
- }
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -494,14 +541,8 @@ handle_req:
* to the engine because the input data was cached, continue to
* dequeue other requests as this is valid and not an error.
*/
- if (!commands && !results) {
- kfree(request);
+ if (!commands && !results)
continue;
- }
-
- spin_lock_bh(&priv->ring[ring].egress_lock);
- list_add_tail(&request->list, &priv->ring[ring].list);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
cdesc += commands;
rdesc += results;
@@ -519,7 +560,7 @@ finalize:
if (!nreq)
return;
- spin_lock_bh(&priv->ring[ring].egress_lock);
+ spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests += nreq;
@@ -528,7 +569,7 @@ finalize:
priv->ring[ring].busy = true;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ spin_unlock_bh(&priv->ring[ring].lock);
/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2,
@@ -560,6 +601,24 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
return -EINVAL;
}
+inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc,
+ struct crypto_async_request *req)
+{
+ int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
+
+ priv->ring[ring].rdr_req[i] = req;
+}
+
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
+{
+ int i = safexcel_ring_first_rdr_index(priv, ring);
+
+ return priv->ring[ring].rdr_req[i];
+}
+
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
struct safexcel_command_desc *cdesc;
@@ -588,21 +647,16 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
- dma_addr_t ctxr_dma, int ring,
- struct safexcel_request *request)
+ dma_addr_t ctxr_dma, int ring)
{
struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc;
int ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* Prepare command descriptor */
cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
- if (IS_ERR(cdesc)) {
- ret = PTR_ERR(cdesc);
- goto unlock;
- }
+ if (IS_ERR(cdesc))
+ return PTR_ERR(cdesc);
cdesc->control_data.type = EIP197_TYPE_EXTENDED;
cdesc->control_data.options = 0;
@@ -617,21 +671,20 @@ int safexcel_invalidate_cache(struct crypto_async_request *async,
goto cdesc_rollback;
}
- request->req = async;
- goto unlock;
+ safexcel_rdr_req_set(priv, ring, rdesc, async);
+
+ return ret;
cdesc_rollback:
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
-unlock:
- spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}
static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
int ring)
{
- struct safexcel_request *sreq;
+ struct crypto_async_request *req;
struct safexcel_context *ctx;
int ret, i, nreq, ndesc, tot_descs, handled = 0;
bool should_complete;
@@ -646,28 +699,22 @@ handle_results:
goto requests_left;
for (i = 0; i < nreq; i++) {
- spin_lock_bh(&priv->ring[ring].egress_lock);
- sreq = list_first_entry(&priv->ring[ring].list,
- struct safexcel_request, list);
- list_del(&sreq->list);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
- ctx = crypto_tfm_ctx(sreq->req->tfm);
- ndesc = ctx->handle_result(priv, ring, sreq->req,
+ req = safexcel_rdr_req_get(priv, ring);
+
+ ctx = crypto_tfm_ctx(req->tfm);
+ ndesc = ctx->handle_result(priv, ring, req,
&should_complete, &ret);
if (ndesc < 0) {
- kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
goto acknowledge;
}
if (should_complete) {
local_bh_disable();
- sreq->req->complete(sreq->req, ret);
+ req->complete(req, ret);
local_bh_enable();
}
- kfree(sreq);
tot_descs += ndesc;
handled++;
}
@@ -686,7 +733,7 @@ acknowledge:
goto handle_results;
requests_left:
- spin_lock_bh(&priv->ring[ring].egress_lock);
+ spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests -= handled;
safexcel_try_push_requests(priv, ring);
@@ -694,7 +741,7 @@ requests_left:
if (!priv->ring[ring].requests)
priv->ring[ring].busy = false;
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ spin_unlock_bh(&priv->ring[ring].lock);
}
static void safexcel_dequeue_work(struct work_struct *work)
@@ -785,17 +832,29 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n
}
static struct safexcel_alg_template *safexcel_algs[] = {
+ &safexcel_alg_ecb_des,
+ &safexcel_alg_cbc_des,
+ &safexcel_alg_ecb_des3_ede,
+ &safexcel_alg_cbc_des3_ede,
&safexcel_alg_ecb_aes,
&safexcel_alg_cbc_aes,
+ &safexcel_alg_md5,
&safexcel_alg_sha1,
&safexcel_alg_sha224,
&safexcel_alg_sha256,
+ &safexcel_alg_sha384,
+ &safexcel_alg_sha512,
+ &safexcel_alg_hmac_md5,
&safexcel_alg_hmac_sha1,
&safexcel_alg_hmac_sha224,
&safexcel_alg_hmac_sha256,
+ &safexcel_alg_hmac_sha384,
+ &safexcel_alg_hmac_sha512,
&safexcel_alg_authenc_hmac_sha1_cbc_aes,
&safexcel_alg_authenc_hmac_sha224_cbc_aes,
&safexcel_alg_authenc_hmac_sha256_cbc_aes,
+ &safexcel_alg_authenc_hmac_sha384_cbc_aes,
+ &safexcel_alg_authenc_hmac_sha512_cbc_aes,
};
static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -805,6 +864,9 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
safexcel_algs[i]->priv = priv;
+ if (!(safexcel_algs[i]->engines & priv->version))
+ continue;
+
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -820,6 +882,9 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
fail:
for (j = 0; j < i; j++) {
+ if (!(safexcel_algs[j]->engines & priv->version))
+ continue;
+
if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -836,6 +901,9 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
int i;
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+ if (!(safexcel_algs[i]->engines & priv->version))
+ continue;
+
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -847,9 +915,21 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
- u32 val, mask;
+ u32 val, mask = 0;
val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+
+ /* Read number of PEs from the engine */
+ switch (priv->version) {
+ case EIP197B:
+ case EIP197D:
+ mask = EIP197_N_PES_MASK;
+ break;
+ default:
+ mask = EIP97_N_PES_MASK;
+ }
+ priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
+
val = (val & GENMASK(27, 25)) >> 25;
mask = BIT(val) - 1;
@@ -867,7 +947,9 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
{
struct safexcel_register_offsets *offsets = &priv->offsets;
- if (priv->version == EIP197) {
+ switch (priv->version) {
+ case EIP197B:
+ case EIP197D:
offsets->hia_aic = EIP197_HIA_AIC_BASE;
offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
@@ -878,7 +960,8 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
offsets->pe = EIP197_PE_BASE;
- } else {
+ break;
+ case EIP97IES:
offsets->hia_aic = EIP97_HIA_AIC_BASE;
offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
@@ -889,6 +972,7 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
offsets->pe = EIP97_PE_BASE;
+ break;
}
}
@@ -906,6 +990,9 @@ static int safexcel_probe(struct platform_device *pdev)
priv->dev = dev;
priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ priv->flags |= EIP197_TRC_CACHE;
+
safexcel_init_register_offsets(priv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -957,6 +1044,13 @@ static int safexcel_probe(struct platform_device *pdev)
safexcel_configure(priv);
+ priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
+ GFP_KERNEL);
+ if (!priv->ring) {
+ ret = -ENOMEM;
+ goto err_reg_clk;
+ }
+
for (i = 0; i < priv->config.rings; i++) {
char irq_name[6] = {0}; /* "ringX\0" */
char wq_name[9] = {0}; /* "wq_ringX\0" */
@@ -969,6 +1063,14 @@ static int safexcel_probe(struct platform_device *pdev)
if (ret)
goto err_reg_clk;
+ priv->ring[i].rdr_req = devm_kzalloc(dev,
+ sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
+ GFP_KERNEL);
+ if (!priv->ring[i].rdr_req) {
+ ret = -ENOMEM;
+ goto err_reg_clk;
+ }
+
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
if (!ring_irq) {
ret = -ENOMEM;
@@ -1004,9 +1106,7 @@ static int safexcel_probe(struct platform_device *pdev)
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
- INIT_LIST_HEAD(&priv->ring[i].list);
spin_lock_init(&priv->ring[i].lock);
- spin_lock_init(&priv->ring[i].egress_lock);
spin_lock_init(&priv->ring[i].queue_lock);
}
@@ -1034,6 +1134,24 @@ err_core_clk:
return ret;
}
+static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->config.rings; i++) {
+ /* clear any pending interrupt */
+ writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
+ writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
+
+ /* Reset the CDR base address */
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+ /* Reset the RDR base address */
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ }
+}
static int safexcel_remove(struct platform_device *pdev)
{
@@ -1041,6 +1159,8 @@ static int safexcel_remove(struct platform_device *pdev)
int i;
safexcel_unregister_algorithms(priv);
+ safexcel_hw_reset_rings(priv);
+
clk_disable_unprepare(priv->clk);
for (i = 0; i < priv->config.rings; i++)
@@ -1051,12 +1171,26 @@ static int safexcel_remove(struct platform_device *pdev)
static const struct of_device_id safexcel_of_match_table[] = {
{
+ .compatible = "inside-secure,safexcel-eip97ies",
+ .data = (void *)EIP97IES,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197b",
+ .data = (void *)EIP197B,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197d",
+ .data = (void *)EIP197D,
+ },
+ {
+ /* Deprecated. Kept for backward compatibility. */
.compatible = "inside-secure,safexcel-eip97",
- .data = (void *)EIP97,
+ .data = (void *)EIP97IES,
},
{
+ /* Deprecated. Kept for backward compatibility. */
.compatible = "inside-secure,safexcel-eip197",
- .data = (void *)EIP197,
+ .data = (void *)EIP197B,
},
{},
};
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 8b3ee9b59f53..65624a81f0fd 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef __SAFEXCEL_H__
@@ -95,13 +92,13 @@
#define EIP197_HIA_xDR_STAT 0x003c
/* register offsets */
-#define EIP197_HIA_DFE_CFG 0x0000
-#define EIP197_HIA_DFE_THR_CTRL 0x0000
-#define EIP197_HIA_DFE_THR_STAT 0x0004
-#define EIP197_HIA_DSE_CFG 0x0000
-#define EIP197_HIA_DSE_THR_CTRL 0x0000
-#define EIP197_HIA_DSE_THR_STAT 0x0004
-#define EIP197_HIA_RA_PE_CTRL 0x0010
+#define EIP197_HIA_DFE_CFG(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_CTRL(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_STAT(n) (0x0004 + (128 * (n)))
+#define EIP197_HIA_DSE_CFG(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_CTRL(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_STAT(n) (0x0004 + (128 * (n)))
+#define EIP197_HIA_RA_PE_CTRL(n) (0x0010 + (8 * (n)))
#define EIP197_HIA_RA_PE_STAT 0x0014
#define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000)
#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r))
@@ -114,18 +111,18 @@
#define EIP197_HIA_MST_CTRL 0xfff4
#define EIP197_HIA_OPTIONS 0xfff8
#define EIP197_HIA_VERSION 0xfffc
-#define EIP197_PE_IN_DBUF_THRES 0x0000
-#define EIP197_PE_IN_TBUF_THRES 0x0100
-#define EIP197_PE_ICE_SCRATCH_RAM 0x0800
-#define EIP197_PE_ICE_PUE_CTRL 0x0c80
-#define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04
-#define EIP197_PE_ICE_FPP_CTRL 0x0d80
-#define EIP197_PE_ICE_RAM_CTRL 0x0ff0
-#define EIP197_PE_EIP96_FUNCTION_EN 0x1004
-#define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008
-#define EIP197_PE_EIP96_CONTEXT_STAT 0x100c
-#define EIP197_PE_OUT_DBUF_THRES 0x1c00
-#define EIP197_PE_OUT_TBUF_THRES 0x1d00
+#define EIP197_PE_IN_DBUF_THRES(n) (0x0000 + (0x2000 * (n)))
+#define EIP197_PE_IN_TBUF_THRES(n) (0x0100 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_RAM(n) (0x0800 + (0x2000 * (n)))
+#define EIP197_PE_ICE_PUE_CTRL(n) (0x0c80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n)))
+#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
+#define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n)))
+#define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n)))
#define EIP197_MST_CTRL 0xfff4
/* EIP197-specific registers, no indirection */
@@ -184,6 +181,11 @@
#define EIP197_HIA_RA_PE_CTRL_RESET BIT(31)
#define EIP197_HIA_RA_PE_CTRL_EN BIT(30)
+/* EIP197_HIA_OPTIONS */
+#define EIP197_N_PES_OFFSET 4
+#define EIP197_N_PES_MASK GENMASK(4, 0)
+#define EIP97_N_PES_MASK GENMASK(2, 0)
+
/* EIP197_HIA_AIC_R_ENABLE_CTRL */
#define EIP197_CDR_IRQ(n) BIT((n) * 2)
#define EIP197_RDR_IRQ(n) BIT((n) * 2 + 1)
@@ -217,6 +219,7 @@
#define WR_CACHE_4BITS (WR_CACHE_3BITS << 1 | BIT(0))
#define EIP197_MST_CTRL_RD_CACHE(n) (((n) & 0xf) << 0)
#define EIP197_MST_CTRL_WD_CACHE(n) (((n) & 0xf) << 4)
+#define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20)
#define EIP197_MST_CTRL_BYTE_SWAP BIT(24)
#define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25)
@@ -287,7 +290,7 @@ struct safexcel_context_record {
u32 control0;
u32 control1;
- __le32 data[24];
+ __le32 data[40];
} __packed;
/* control0 */
@@ -305,14 +308,19 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_NO_FINISH_HASH BIT(5)
#define CONTEXT_CONTROL_SIZE(n) ((n) << 8)
#define CONTEXT_CONTROL_KEY_EN BIT(16)
+#define CONTEXT_CONTROL_CRYPTO_ALG_DES (0x0 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_3DES (0x2 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES128 (0x5 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17)
#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21)
#define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21)
+#define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA224 (0x4 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23)
#define CONTEXT_CONTROL_INV_FR (0x5 << 24)
#define CONTEXT_CONTROL_INV_TR (0x6 << 24)
@@ -327,6 +335,11 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_COUNTER_MODE BIT(10)
#define CONTEXT_CONTROL_HASH_STORE BIT(19)
+/* The hash counter given to the engine in the context has a granularity of
+ * 64 bits.
+ */
+#define EIP197_COUNTER_BLOCK_SIZE 64
+
/* EIP197_CS_RAM_CTRL */
#define EIP197_TRC_ENABLE_0 BIT(4)
#define EIP197_TRC_ENABLE_1 BIT(5)
@@ -349,13 +362,19 @@ struct safexcel_context_record {
#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18)
/* Cache helpers */
-#define EIP197_CS_RC_MAX 52
+#define EIP197B_CS_RC_MAX 52
+#define EIP197D_CS_RC_MAX 96
#define EIP197_CS_RC_SIZE (4 * sizeof(u32))
#define EIP197_CS_RC_NEXT(x) (x)
#define EIP197_CS_RC_PREV(x) ((x) << 10)
#define EIP197_RC_NULL 0x3ff
-#define EIP197_CS_TRC_REC_WC 59
-#define EIP197_CS_TRC_LG_REC_WC 73
+#define EIP197B_CS_TRC_REC_WC 59
+#define EIP197D_CS_TRC_REC_WC 64
+#define EIP197B_CS_TRC_LG_REC_WC 73
+#define EIP197D_CS_TRC_LG_REC_WC 80
+#define EIP197B_CS_HT_WC 64
+#define EIP197D_CS_HT_WC 256
+
/* Result data */
struct result_data_desc {
@@ -450,6 +469,7 @@ struct safexcel_control_data_desc {
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
#define EIP197_OPTION_64BIT_CTX BIT(1)
#define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8)
+#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
#define EIP197_TYPE_EXTENDED 0x3
@@ -480,7 +500,7 @@ enum eip197_fw {
FW_NB
};
-struct safexcel_ring {
+struct safexcel_desc_ring {
void *base;
void *base_end;
dma_addr_t base_dma;
@@ -489,8 +509,7 @@ struct safexcel_ring {
void *write;
void *read;
- /* number of elements used in the ring */
- unsigned nr;
+ /* descriptor element offset */
unsigned offset;
};
@@ -500,12 +519,8 @@ enum safexcel_alg_type {
SAFEXCEL_ALG_TYPE_AHASH,
};
-struct safexcel_request {
- struct list_head list;
- struct crypto_async_request *req;
-};
-
struct safexcel_config {
+ u32 pes;
u32 rings;
u32 cd_size;
@@ -521,9 +536,40 @@ struct safexcel_work_data {
int ring;
};
+struct safexcel_ring {
+ spinlock_t lock;
+
+ struct workqueue_struct *workqueue;
+ struct safexcel_work_data work_data;
+
+ /* command/result rings */
+ struct safexcel_desc_ring cdr;
+ struct safexcel_desc_ring rdr;
+
+ /* result ring crypto API request */
+ struct crypto_async_request **rdr_req;
+
+ /* queue */
+ struct crypto_queue queue;
+ spinlock_t queue_lock;
+
+ /* Number of requests in the engine. */
+ int requests;
+
+ /* The ring is currently handling at least one request */
+ bool busy;
+
+ /* Store for current requests when bailing out of the dequeueing
+ * function when no enough resources are available.
+ */
+ struct crypto_async_request *req;
+ struct crypto_async_request *backlog;
+};
+
enum safexcel_eip_version {
- EIP97,
- EIP197,
+ EIP97IES = BIT(0),
+ EIP197B = BIT(1),
+ EIP197D = BIT(2),
};
struct safexcel_register_offsets {
@@ -539,6 +585,10 @@ struct safexcel_register_offsets {
u32 pe;
};
+enum safexcel_flags {
+ EIP197_TRC_CACHE = BIT(0),
+};
+
struct safexcel_crypto_priv {
void __iomem *base;
struct device *dev;
@@ -548,46 +598,19 @@ struct safexcel_crypto_priv {
enum safexcel_eip_version version;
struct safexcel_register_offsets offsets;
+ u32 flags;
/* context DMA pool */
struct dma_pool *context_pool;
atomic_t ring_used;
- struct {
- spinlock_t lock;
- spinlock_t egress_lock;
-
- struct list_head list;
- struct workqueue_struct *workqueue;
- struct safexcel_work_data work_data;
-
- /* command/result rings */
- struct safexcel_ring cdr;
- struct safexcel_ring rdr;
-
- /* queue */
- struct crypto_queue queue;
- spinlock_t queue_lock;
-
- /* Number of requests in the engine. */
- int requests;
-
- /* The ring is currently handling at least one request */
- bool busy;
-
- /* Store for current requests when bailing out of the dequeueing
- * function when no enough resources are available.
- */
- struct crypto_async_request *req;
- struct crypto_async_request *backlog;
- } ring[EIP197_MAX_RINGS];
+ struct safexcel_ring *ring;
};
struct safexcel_context {
int (*send)(struct crypto_async_request *req, int ring,
- struct safexcel_request *request, int *commands,
- int *results);
+ int *commands, int *results);
int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *req, bool *complete,
int *ret);
@@ -600,13 +623,13 @@ struct safexcel_context {
};
struct safexcel_ahash_export_state {
- u64 len;
- u64 processed;
+ u64 len[2];
+ u64 processed[2];
u32 digest;
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
- u8 cache[SHA256_BLOCK_SIZE];
+ u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u8 cache[SHA512_BLOCK_SIZE];
};
/*
@@ -617,6 +640,7 @@ struct safexcel_ahash_export_state {
struct safexcel_alg_template {
struct safexcel_crypto_priv *priv;
enum safexcel_alg_type type;
+ u32 engines;
union {
struct skcipher_alg skcipher;
struct aead_alg aead;
@@ -635,16 +659,16 @@ int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
- dma_addr_t ctxr_dma, int ring,
- struct safexcel_request *request);
+ dma_addr_t ctxr_dma, int ring);
int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *cdr,
- struct safexcel_ring *rdr);
+ struct safexcel_desc_ring *cdr,
+ struct safexcel_desc_ring *rdr);
int safexcel_select_ring(struct safexcel_crypto_priv *priv);
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring);
+ struct safexcel_desc_ring *ring);
+void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int ring);
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring);
+ struct safexcel_desc_ring *ring);
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
@@ -655,21 +679,44 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
int ring_id,
bool first, bool last,
dma_addr_t data, u32 len);
+int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+ int ring);
+int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc);
+void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc,
+ struct crypto_async_request *req);
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
void safexcel_inv_complete(struct crypto_async_request *req, int error);
int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
void *istate, void *ostate);
/* available algorithms */
+extern struct safexcel_alg_template safexcel_alg_ecb_des;
+extern struct safexcel_alg_template safexcel_alg_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_ecb_aes;
extern struct safexcel_alg_template safexcel_alg_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_md5;
extern struct safexcel_alg_template safexcel_alg_sha1;
extern struct safexcel_alg_template safexcel_alg_sha224;
extern struct safexcel_alg_template safexcel_alg_sha256;
+extern struct safexcel_alg_template safexcel_alg_sha384;
+extern struct safexcel_alg_template safexcel_alg_sha512;
+extern struct safexcel_alg_template safexcel_alg_hmac_md5;
extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha384;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha512;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes;
#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 6bb60fda2043..3aef1d43e435 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/device.h>
@@ -15,6 +12,7 @@
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/authenc.h>
+#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
@@ -27,21 +25,28 @@ enum safexcel_cipher_direction {
SAFEXCEL_DECRYPT,
};
+enum safexcel_cipher_alg {
+ SAFEXCEL_DES,
+ SAFEXCEL_3DES,
+ SAFEXCEL_AES,
+};
+
struct safexcel_cipher_ctx {
struct safexcel_context base;
struct safexcel_crypto_priv *priv;
u32 mode;
+ enum safexcel_cipher_alg alg;
bool aead;
__le32 key[8];
unsigned int key_len;
/* All the below is AEAD specific */
- u32 alg;
+ u32 hash_alg;
u32 state_sz;
- u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
};
struct safexcel_cipher_req {
@@ -57,10 +62,24 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
unsigned offset = 0;
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
- offset = AES_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+ switch (ctx->alg) {
+ case SAFEXCEL_DES:
+ offset = DES_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
+ cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+ break;
+ case SAFEXCEL_3DES:
+ offset = DES3_EDE_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
+ cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+ break;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ case SAFEXCEL_AES:
+ offset = AES_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ break;
+ }
}
token = (struct safexcel_token *)(cdesc->control_data.token + offset);
@@ -145,7 +164,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
return ret;
}
- if (priv->version == EIP197 && ctx->base.ctxr_dma) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
ctx->base.needs_inv = true;
@@ -179,12 +198,12 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
goto badkey;
/* Encryption key */
- if (priv->version == EIP197 && ctx->base.ctxr_dma &&
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
memcmp(ctx->key, keys.enckey, keys.enckeylen))
ctx->base.needs_inv = true;
/* Auth key */
- switch (ctx->alg) {
+ switch (ctx->hash_alg) {
case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey,
keys.authkeylen, &istate, &ostate))
@@ -200,6 +219,16 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
keys.authkeylen, &istate, &ostate))
goto badkey;
break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SHA384:
+ if (safexcel_hmac_setkey("safexcel-sha384", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SHA512:
+ if (safexcel_hmac_setkey("safexcel-sha512", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
default:
dev_err(priv->dev, "aead: unsupported hash algorithm\n");
goto badkey;
@@ -208,7 +237,7 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
CRYPTO_TFM_RES_MASK);
- if (priv->version == EIP197 && ctx->base.ctxr_dma &&
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
(memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
memcmp(ctx->opad, ostate.state, ctx->state_sz)))
ctx->base.needs_inv = true;
@@ -258,22 +287,28 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
if (ctx->aead)
cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC |
- ctx->alg;
-
- switch (ctx->key_len) {
- case AES_KEYSIZE_128:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
- break;
- case AES_KEYSIZE_192:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
- break;
- case AES_KEYSIZE_256:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
- break;
- default:
- dev_err(priv->dev, "aes keysize not supported: %u\n",
- ctx->key_len);
- return -EINVAL;
+ ctx->hash_alg;
+
+ if (ctx->alg == SAFEXCEL_DES) {
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_DES;
+ } else if (ctx->alg == SAFEXCEL_3DES) {
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES;
+ } else if (ctx->alg == SAFEXCEL_AES) {
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
+ break;
+ default:
+ dev_err(priv->dev, "aes keysize not supported: %u\n",
+ ctx->key_len);
+ return -EINVAL;
+ }
}
ctrl_size = ctx->key_len / sizeof(u32);
@@ -298,7 +333,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
do {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
@@ -315,7 +349,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
} while (!rdesc->last_seg);
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (src == dst) {
dma_unmap_sg(priv->dev, src,
@@ -335,8 +368,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
return ndesc;
}
-static int safexcel_aes_send(struct crypto_async_request *base, int ring,
- struct safexcel_request *request,
+static int safexcel_send_req(struct crypto_async_request *base, int ring,
struct safexcel_cipher_req *sreq,
struct scatterlist *src, struct scatterlist *dst,
unsigned int cryptlen, unsigned int assoclen,
@@ -346,7 +378,7 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring,
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_command_desc *cdesc;
- struct safexcel_result_desc *rdesc;
+ struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
struct scatterlist *sg;
unsigned int totlen = cryptlen + assoclen;
int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
@@ -386,8 +418,6 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring,
ctx->opad, ctx->state_sz);
}
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* command descriptors */
for_each_sg(src, sg, nr_src, i) {
int len = sg_dma_len(sg);
@@ -434,12 +464,12 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring,
ret = PTR_ERR(rdesc);
goto rdesc_rollback;
}
+ if (first)
+ first_rdesc = rdesc;
n_rdesc++;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
- request->req = base;
+ safexcel_rdr_req_set(priv, ring, first_rdesc, base);
*commands = n_cdesc;
*results = n_rdesc;
@@ -452,8 +482,6 @@ cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
if (src == dst) {
dma_unmap_sg(priv->dev, src,
sg_nents_for_len(src, totlen),
@@ -481,7 +509,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
do {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
@@ -491,17 +518,13 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
break;
}
- if (rdesc->result_data.error_code) {
- dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
- rdesc->result_data.error_code);
- *ret = -EIO;
- }
+ if (likely(!*ret))
+ *ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
} while (!rdesc->last_seg);
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -577,15 +600,13 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
}
static int safexcel_cipher_send_inv(struct crypto_async_request *base,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring,
- request);
+ ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
@@ -596,7 +617,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *base,
}
static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request,
int *commands, int *results)
{
struct skcipher_request *req = skcipher_request_cast(async);
@@ -605,21 +625,19 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+ BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv)
- ret = safexcel_cipher_send_inv(async, ring, request, commands,
- results);
+ ret = safexcel_cipher_send_inv(async, ring, commands, results);
else
- ret = safexcel_aes_send(async, ring, request, sreq, req->src,
+ ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, req->iv,
commands, results);
return ret;
}
static int safexcel_aead_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request, int *commands,
- int *results)
+ int *commands, int *results)
{
struct aead_request *req = aead_request_cast(async);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -628,14 +646,13 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+ BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv)
- ret = safexcel_cipher_send_inv(async, ring, request, commands,
- results);
+ ret = safexcel_cipher_send_inv(async, ring, commands, results);
else
- ret = safexcel_aes_send(async, ring, request, sreq, req->src,
- req->dst, req->cryptlen, req->assoclen,
+ ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
+ req->cryptlen, req->assoclen,
crypto_aead_authsize(tfm), req->iv,
commands, results);
return ret;
@@ -705,9 +722,10 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
}
-static int safexcel_aes(struct crypto_async_request *base,
+static int safexcel_queue_req(struct crypto_async_request *base,
struct safexcel_cipher_req *sreq,
- enum safexcel_cipher_direction dir, u32 mode)
+ enum safexcel_cipher_direction dir, u32 mode,
+ enum safexcel_cipher_alg alg)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
@@ -715,10 +733,11 @@ static int safexcel_aes(struct crypto_async_request *base,
sreq->needs_inv = false;
sreq->direction = dir;
+ ctx->alg = alg;
ctx->mode = mode;
if (ctx->base.ctxr) {
- if (priv->version == EIP197 && ctx->base.needs_inv) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
sreq->needs_inv = true;
ctx->base.needs_inv = false;
}
@@ -745,14 +764,16 @@ static int safexcel_aes(struct crypto_async_request *base,
static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_AES);
}
static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_AES);
}
static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
@@ -795,7 +816,7 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
if (safexcel_cipher_cra_exit(tfm))
return;
- if (priv->version == EIP197) {
+ if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_skcipher_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "skcipher: invalidation error %d\n",
@@ -815,7 +836,7 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
if (safexcel_cipher_cra_exit(tfm))
return;
- if (priv->version == EIP197) {
+ if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_aead_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "aead: invalidation error %d\n",
@@ -828,6 +849,7 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
struct safexcel_alg_template safexcel_alg_ecb_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.skcipher = {
.setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_ecb_aes_encrypt,
@@ -838,7 +860,7 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = {
.cra_name = "ecb(aes)",
.cra_driver_name = "safexcel-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -852,18 +874,21 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = {
static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_AES);
}
static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_AES);
}
struct safexcel_alg_template safexcel_alg_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.skcipher = {
.setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_cbc_aes_encrypt,
@@ -875,7 +900,7 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
.cra_name = "cbc(aes)",
.cra_driver_name = "safexcel-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -887,20 +912,234 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
},
};
+static int safexcel_cbc_des_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_DES);
+}
+
+static int safexcel_cbc_des_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_DES);
+}
+
+static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+ int ret;
+
+ if (len != DES_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+ if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ /* if context exits and key changed, need to invalidate it */
+ if (ctx->base.ctxr_dma)
+ if (memcmp(ctx->key, key, len))
+ ctx->base.needs_inv = true;
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des_setkey,
+ .encrypt = safexcel_cbc_des_encrypt,
+ .decrypt = safexcel_cbc_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "safexcel-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_ecb_des_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_DES);
+}
+
+static int safexcel_ecb_des_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des_setkey,
+ .encrypt = safexcel_ecb_des_encrypt,
+ .decrypt = safexcel_ecb_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "safexcel-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_cbc_des3_ede_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_3DES);
+}
+
+static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_3DES);
+}
+
+static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (len != DES3_EDE_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* if context exits and key changed, need to invalidate it */
+ if (ctx->base.ctxr_dma) {
+ if (memcmp(ctx->key, key, len))
+ ctx->base.needs_inv = true;
+ }
+
+ memcpy(ctx->key, key, len);
+
+ ctx->key_len = len;
+
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des3_ede_setkey,
+ .encrypt = safexcel_cbc_des3_ede_encrypt,
+ .decrypt = safexcel_cbc_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "safexcel-cbc-des3_ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_ecb_des3_ede_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_3DES);
+}
+
+static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_3DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des3_ede_setkey,
+ .encrypt = safexcel_ecb_des3_ede_encrypt,
+ .decrypt = safexcel_ecb_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "safexcel-ecb-des3_ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_encrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
- return safexcel_aes(&req->base, creq, SAFEXCEL_ENCRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT,
+ CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
}
static int safexcel_aead_decrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
- return safexcel_aes(&req->base, creq, SAFEXCEL_DECRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT,
+ CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
}
static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
@@ -926,13 +1165,14 @@ static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
ctx->state_sz = SHA1_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.aead = {
.setkey = safexcel_aead_aes_setkey,
.encrypt = safexcel_aead_encrypt,
@@ -943,7 +1183,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -960,13 +1200,14 @@ static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
ctx->state_sz = SHA256_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.aead = {
.setkey = safexcel_aead_aes_setkey,
.encrypt = safexcel_aead_encrypt,
@@ -977,7 +1218,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -994,13 +1235,14 @@ static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
ctx->state_sz = SHA256_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.aead = {
.setkey = safexcel_aead_aes_setkey,
.encrypt = safexcel_aead_encrypt,
@@ -1011,7 +1253,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1022,3 +1264,73 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
},
},
};
+
+static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+ ctx->state_sz = SHA512_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.aead = {
+ .setkey = safexcel_aead_aes_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha512_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+ ctx->state_sz = SHA512_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.aead = {
+ .setkey = safexcel_aead_aes_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha384_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index c77b0e1655a8..ac9282c1a5ec 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -1,14 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <crypto/hmac.h>
+#include <crypto/md5.h>
#include <crypto/sha.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -22,8 +20,8 @@ struct safexcel_ahash_ctx {
u32 alg;
- u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
};
struct safexcel_ahash_req {
@@ -38,18 +36,26 @@ struct safexcel_ahash_req {
u32 digest;
u8 state_sz; /* expected sate size, only set once */
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+ u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
- u64 len;
- u64 processed;
+ u64 len[2];
+ u64 processed[2];
- u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
dma_addr_t cache_dma;
unsigned int cache_sz;
- u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
};
+static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
+{
+ if (req->len[1] > req->processed[1])
+ return 0xffffffff - (req->len[0] - req->processed[0]);
+
+ return req->len[0] - req->processed[0];
+}
+
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
u32 input_length, u32 result_length)
{
@@ -72,9 +78,9 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
struct safexcel_ahash_req *req,
struct safexcel_command_desc *cdesc,
- unsigned int digestsize,
- unsigned int blocksize)
+ unsigned int digestsize)
{
+ struct safexcel_crypto_priv *priv = ctx->priv;
int i;
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
@@ -82,12 +88,17 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
cdesc->control_data.control0 |= req->digest;
if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
- if (req->processed) {
- if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+ if (req->processed[0] || req->processed[1]) {
+ if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ||
+ ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17);
cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
} else {
@@ -102,12 +113,28 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
* fields. Do this now as we need it to setup the first command
* descriptor.
*/
- if (req->processed) {
+ if (req->processed[0] || req->processed[1]) {
for (i = 0; i < digestsize / sizeof(u32); i++)
ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
- if (req->finish)
- ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
+ if (req->finish) {
+ u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+ count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) *
+ req->processed[1]);
+
+ /* This is a haredware limitation, as the
+ * counter must fit into an u32. This represents
+ * a farily big amount of input data, so we
+ * shouldn't see this.
+ */
+ if (unlikely(count & 0xffff0000)) {
+ dev_warn(priv->dev,
+ "Input data is too big\n");
+ return;
+ }
+
+ ctx->base.ctxr->data[i] = cpu_to_le32(count);
+ }
}
} else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
@@ -126,11 +153,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
- int cache_len;
+ u64 cache_len;
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
@@ -141,7 +167,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
}
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (sreq->nents) {
dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
@@ -164,7 +189,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
memcpy(areq->result, sreq->state,
crypto_ahash_digestsize(ahash));
- cache_len = sreq->len - sreq->processed;
+ cache_len = safexcel_queued_len(sreq);
if (cache_len)
memcpy(sreq->cache, sreq->cache_next, cache_len);
@@ -174,7 +199,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
}
static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
- struct safexcel_request *request,
int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
@@ -185,9 +209,10 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+ int i, extra, n_cdesc = 0, ret = 0;
+ u64 queued, len, cache_len;
- queued = len = req->len - req->processed;
+ queued = len = safexcel_queued_len(req);
if (queued <= crypto_ahash_blocksize(ahash))
cache_len = queued;
else
@@ -220,16 +245,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
}
}
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* Add a command descriptor for the cached data, if any */
if (cache_len) {
req->cache_dma = dma_map_single(priv->dev, req->cache,
cache_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->dev, req->cache_dma)) {
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ if (dma_mapping_error(priv->dev, req->cache_dma))
return -EINVAL;
- }
req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
@@ -260,7 +281,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
int sglen = sg_dma_len(sg);
/* Do not overflow the request */
- if (queued - sglen < 0)
+ if (queued < sglen)
sglen = queued;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
@@ -282,8 +303,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
send_command:
/* Setup the context options */
- safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
- crypto_ahash_blocksize(ahash));
+ safexcel_context_control(ctx, req, first_cdesc, req->state_sz);
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
@@ -303,10 +323,11 @@ send_command:
goto unmap_result;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
- req->processed += len;
- request->req = &areq->base;
+ req->processed[0] += len;
+ if (req->processed[0] < len)
+ req->processed[1]++;
*commands = n_cdesc;
*results = 1;
@@ -327,7 +348,6 @@ unmap_cache:
req->cache_sz = 0;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}
@@ -335,16 +355,18 @@ static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
unsigned int state_w_sz = req->state_sz / sizeof(u32);
+ u64 processed;
int i;
+ processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+ processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1];
+
for (i = 0; i < state_w_sz; i++)
if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
return true;
- if (ctx->base.ctxr->data[state_w_sz] !=
- cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
+ if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed))
return true;
return false;
@@ -363,21 +385,16 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
"hash: invalidate: could not retrieve the result descriptor\n");
*ret = PTR_ERR(rdesc);
- } else if (rdesc->result_data.error_code) {
- dev_err(priv->dev,
- "hash: invalidate: result descriptor error (%d)\n",
- rdesc->result_data.error_code);
- *ret = -EINVAL;
+ } else {
+ *ret = safexcel_rdesc_check_errors(priv, rdesc);
}
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -413,7 +430,7 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int err;
- BUG_ON(priv->version == EIP97 && req->needs_inv);
+ BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
if (req->needs_inv) {
req->needs_inv = false;
@@ -428,15 +445,14 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
}
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
ret = safexcel_invalidate_cache(async, ctx->priv,
- ctx->base.ctxr_dma, ring, request);
+ ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
@@ -447,19 +463,17 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
}
static int safexcel_ahash_send(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int ret;
if (req->needs_inv)
- ret = safexcel_ahash_send_inv(async, ring, request,
- commands, results);
+ ret = safexcel_ahash_send_inv(async, ring, commands, results);
else
- ret = safexcel_ahash_send_req(async, ring, request,
- commands, results);
+ ret = safexcel_ahash_send_req(async, ring, commands, results);
+
return ret;
}
@@ -509,17 +523,17 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
- int queued, cache_len;
+ u64 queued, cache_len;
- /* cache_len: everyting accepted by the driver but not sent yet,
- * tot sz handled by update() - last req sz - tot sz handled by send()
- */
- cache_len = req->len - areq->nbytes - req->processed;
/* queued: everything accepted by the driver which will be handled by
* the next send() calls.
* tot sz handled by update() - tot sz handled by send()
*/
- queued = req->len - req->processed;
+ queued = safexcel_queued_len(req);
+ /* cache_len: everything accepted by the driver but not sent yet,
+ * tot sz handled by update() - last req sz - tot sz handled by send()
+ */
+ cache_len = queued - areq->nbytes;
/*
* In case there isn't enough bytes to proceed (less than a
@@ -546,8 +560,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
req->needs_inv = false;
if (ctx->base.ctxr) {
- if (priv->version == EIP197 &&
- !ctx->base.needs_inv && req->processed &&
+ if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
+ (req->processed[0] || req->processed[1]) &&
req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
/* We're still setting needs_inv here, even though it is
* cleared right away, because the needs_inv flag can be
@@ -590,7 +604,9 @@ static int safexcel_ahash_update(struct ahash_request *areq)
if (!areq->nbytes)
return 0;
- req->len += areq->nbytes;
+ req->len[0] += areq->nbytes;
+ if (req->len[0] < areq->nbytes)
+ req->len[1]++;
safexcel_ahash_cache(areq);
@@ -605,7 +621,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
return safexcel_ahash_enqueue(areq);
if (!req->last_req &&
- req->len - req->processed > crypto_ahash_blocksize(ahash))
+ safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
return safexcel_ahash_enqueue(areq);
return 0;
@@ -620,8 +636,11 @@ static int safexcel_ahash_final(struct ahash_request *areq)
req->finish = true;
/* If we have an overall 0 length request */
- if (!(req->len + areq->nbytes)) {
- if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+ if (!req->len[0] && !req->len[1] && !areq->nbytes) {
+ if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+ memcpy(areq->result, md5_zero_message_hash,
+ MD5_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
memcpy(areq->result, sha1_zero_message_hash,
SHA1_DIGEST_SIZE);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
@@ -630,6 +649,12 @@ static int safexcel_ahash_final(struct ahash_request *areq)
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
memcpy(areq->result, sha256_zero_message_hash,
SHA256_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
+ memcpy(areq->result, sha384_zero_message_hash,
+ SHA384_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+ memcpy(areq->result, sha512_zero_message_hash,
+ SHA512_DIGEST_SIZE);
return 0;
}
@@ -654,8 +679,10 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct safexcel_ahash_export_state *export = out;
- export->len = req->len;
- export->processed = req->processed;
+ export->len[0] = req->len[0];
+ export->len[1] = req->len[1];
+ export->processed[0] = req->processed[0];
+ export->processed[1] = req->processed[1];
export->digest = req->digest;
@@ -676,8 +703,10 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
if (ret)
return ret;
- req->len = export->len;
- req->processed = export->processed;
+ req->len[0] = export->len[0];
+ req->len[1] = export->len[1];
+ req->processed[0] = export->processed[0];
+ req->processed[1] = export->processed[1];
req->digest = export->digest;
@@ -743,7 +772,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
if (!ctx->base.ctxr)
return;
- if (priv->version == EIP197) {
+ if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_ahash_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
@@ -755,6 +784,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
struct safexcel_alg_template safexcel_alg_sha1 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_sha1_init,
.update = safexcel_ahash_update,
@@ -908,8 +938,7 @@ int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
u8 *ipad, *opad;
int ret;
- tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_ahash(alg, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
@@ -963,7 +992,7 @@ static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- if (priv->version == EIP197 && ctx->base.ctxr) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) {
for (i = 0; i < state_sz / sizeof(u32); i++) {
if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
@@ -988,6 +1017,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_hmac_sha1_init,
.update = safexcel_ahash_update,
@@ -1051,6 +1081,7 @@ static int safexcel_sha256_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_sha256 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_sha256_init,
.update = safexcel_ahash_update,
@@ -1113,6 +1144,7 @@ static int safexcel_sha224_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_sha224 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_sha224_init,
.update = safexcel_ahash_update,
@@ -1168,6 +1200,7 @@ static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_hmac_sha224_init,
.update = safexcel_ahash_update,
@@ -1224,6 +1257,7 @@ static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_hmac_sha256_init,
.update = safexcel_ahash_update,
@@ -1251,3 +1285,375 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
},
},
};
+
+static int safexcel_sha512_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ req->state[0] = lower_32_bits(SHA512_H0);
+ req->state[1] = upper_32_bits(SHA512_H0);
+ req->state[2] = lower_32_bits(SHA512_H1);
+ req->state[3] = upper_32_bits(SHA512_H1);
+ req->state[4] = lower_32_bits(SHA512_H2);
+ req->state[5] = upper_32_bits(SHA512_H2);
+ req->state[6] = lower_32_bits(SHA512_H3);
+ req->state[7] = upper_32_bits(SHA512_H3);
+ req->state[8] = lower_32_bits(SHA512_H4);
+ req->state[9] = upper_32_bits(SHA512_H4);
+ req->state[10] = lower_32_bits(SHA512_H5);
+ req->state[11] = upper_32_bits(SHA512_H5);
+ req->state[12] = lower_32_bits(SHA512_H6);
+ req->state[13] = upper_32_bits(SHA512_H6);
+ req->state[14] = lower_32_bits(SHA512_H7);
+ req->state[15] = upper_32_bits(SHA512_H7);
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA512_DIGEST_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sha512_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_sha512_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_sha512_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_sha512_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "safexcel-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha384_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ req->state[0] = lower_32_bits(SHA384_H0);
+ req->state[1] = upper_32_bits(SHA384_H0);
+ req->state[2] = lower_32_bits(SHA384_H1);
+ req->state[3] = upper_32_bits(SHA384_H1);
+ req->state[4] = lower_32_bits(SHA384_H2);
+ req->state[5] = upper_32_bits(SHA384_H2);
+ req->state[6] = lower_32_bits(SHA384_H3);
+ req->state[7] = upper_32_bits(SHA384_H3);
+ req->state[8] = lower_32_bits(SHA384_H4);
+ req->state[9] = upper_32_bits(SHA384_H4);
+ req->state[10] = lower_32_bits(SHA384_H5);
+ req->state[11] = upper_32_bits(SHA384_H5);
+ req->state[12] = lower_32_bits(SHA384_H6);
+ req->state[13] = upper_32_bits(SHA384_H6);
+ req->state[14] = lower_32_bits(SHA384_H7);
+ req->state[15] = upper_32_bits(SHA384_H7);
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA512_DIGEST_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sha384_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_sha384_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_sha384_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_sha384_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "safexcel-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
+ SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha512_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha512_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sha512_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha512_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sha512_digest,
+ .setkey = safexcel_hmac_sha512_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "safexcel-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
+ SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha384_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha384_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sha384_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha384_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sha384_digest,
+ .setkey = safexcel_hmac_sha384_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "safexcel-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_md5_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ req->state[0] = MD5_H0;
+ req->state[1] = MD5_H1;
+ req->state[2] = MD5_H2;
+ req->state[3] = MD5_H3;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = MD5_DIGEST_SIZE;
+
+ return 0;
+}
+
+static int safexcel_md5_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_md5_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_md5 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_md5_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_md5_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "safexcel-md5",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_md5_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_md5_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
+ MD5_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_md5_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_md5_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_md5 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_hmac_md5_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_md5_digest,
+ .setkey = safexcel_hmac_md5_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "safexcel-hmac-md5",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index c9d2a8716b5b..eb75fa684876 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/dma-mapping.h>
@@ -14,8 +11,8 @@
#include "safexcel.h"
int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *cdr,
- struct safexcel_ring *rdr)
+ struct safexcel_desc_ring *cdr,
+ struct safexcel_desc_ring *rdr)
{
cdr->offset = sizeof(u32) * priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
@@ -24,7 +21,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
if (!cdr->base)
return -ENOMEM;
cdr->write = cdr->base;
- cdr->base_end = cdr->base + cdr->offset * EIP197_DEFAULT_RING_SIZE;
+ cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
rdr->offset = sizeof(u32) * priv->config.rd_offset;
@@ -34,7 +31,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
if (!rdr->base)
return -ENOMEM;
rdr->write = rdr->base;
- rdr->base_end = rdr->base + rdr->offset * EIP197_DEFAULT_RING_SIZE;
+ rdr->base_end = rdr->base + rdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
rdr->read = rdr->base;
return 0;
@@ -46,49 +43,73 @@ inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
}
static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring)
+ struct safexcel_desc_ring *ring)
{
void *ptr = ring->write;
- if (ring->nr == EIP197_DEFAULT_RING_SIZE - 1)
+ if ((ring->write == ring->read - ring->offset) ||
+ (ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
- ring->write += ring->offset;
if (ring->write == ring->base_end)
ring->write = ring->base;
+ else
+ ring->write += ring->offset;
- ring->nr++;
return ptr;
}
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring)
+ struct safexcel_desc_ring *ring)
{
void *ptr = ring->read;
- if (!ring->nr)
+ if (ring->write == ring->read)
return ERR_PTR(-ENOENT);
- ring->read += ring->offset;
if (ring->read == ring->base_end)
ring->read = ring->base;
+ else
+ ring->read += ring->offset;
- ring->nr--;
return ptr;
}
+inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
+ int ring)
+{
+ struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+ return rdr->read;
+}
+
+inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+ int ring)
+{
+ struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+ return (rdr->read - rdr->base) / rdr->offset;
+}
+
+inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc)
+{
+ struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+ return ((void *)rdesc - rdr->base) / rdr->offset;
+}
+
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring)
+ struct safexcel_desc_ring *ring)
{
- if (!ring->nr)
+ if (ring->write == ring->read)
return;
if (ring->write == ring->base)
- ring->write = ring->base_end - ring->offset;
+ ring->write = ring->base_end;
else
ring->write -= ring->offset;
-
- ring->nr--;
}
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index e34d80b6b7e5..99ff54cc8a15 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -1183,8 +1183,7 @@ static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
u8 *opad;
int ret;
- tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index b182e941b0cd..ee0404e27a0f 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "mtk-platform.h"
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index ab6235b7ff22..55f34cfc43ff 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1487,8 +1487,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
base->cra_priority = N2_CRA_PRIORITY;
- base->cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = tmpl->block_size;
base->cra_ctxsize = sizeof(struct n2_hash_ctx);
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 36afd6d8753c..c68df7e8bee1 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -24,6 +24,8 @@
#include <asm/icswx.h>
#include <asm/vas.h>
#include <asm/reg.h>
+#include <asm/opal-api.h>
+#include <asm/opal.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
@@ -753,7 +755,7 @@ static int nx842_open_percpu_txwins(void)
}
static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
- int vasid)
+ int vasid, int *ct)
{
struct vas_window *rxwin = NULL;
struct vas_rx_win_attr rxattr;
@@ -837,6 +839,15 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
coproc->vas.id = vasid;
nx842_add_coprocs_list(coproc, chip_id);
+ /*
+ * (lpid, pid, tid) combination has to be unique for each
+ * coprocessor instance in the system. So to make it
+ * unique, skiboot uses coprocessor type such as 842 or
+ * GZIP for pid and provides this value to kernel in pid
+ * device-tree property.
+ */
+ *ct = pid;
+
return 0;
err_out:
@@ -850,6 +861,7 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn)
struct device_node *dn;
int chip_id, vasid, ret = 0;
int nx_fifo_found = 0;
+ int uninitialized_var(ct);
chip_id = of_get_ibm_chip_id(pn);
if (chip_id < 0) {
@@ -865,7 +877,7 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn)
for_each_child_of_node(pn, dn) {
if (of_device_is_compatible(dn, "ibm,p9-nx-842")) {
- ret = vas_cfg_coproc_info(dn, chip_id, vasid);
+ ret = vas_cfg_coproc_info(dn, chip_id, vasid, &ct);
if (ret) {
of_node_put(dn);
return ret;
@@ -876,9 +888,22 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn)
if (!nx_fifo_found) {
pr_err("NX842 FIFO nodes are missing\n");
- ret = -EINVAL;
+ return -EINVAL;
}
+ /*
+ * Initialize NX instance for both high and normal priority FIFOs.
+ */
+ if (opal_check_token(OPAL_NX_COPROC_INIT)) {
+ ret = opal_nx_coproc_init(chip_id, ct);
+ if (ret) {
+ pr_err("Failed to initialize NX for chip(%d): %d\n",
+ chip_id, ret);
+ ret = opal_error_code(ret);
+ }
+ } else
+ pr_warn("Firmware doesn't support NX initialization\n");
+
return ret;
}
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index c2f7d4befb55..ad3358e74f5c 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -386,7 +386,6 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.cra_name = "xcbc(aes)",
.cra_driver_name = "xcbc-aes-nx",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index becb738c897b..a6764af83c6d 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -288,7 +288,6 @@ struct shash_alg nx_shash_sha256_alg = {
.cra_name = "sha256",
.cra_driver_name = "sha256-nx",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index b6e183d58d73..92956bc6e45e 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -294,7 +294,6 @@ struct shash_alg nx_shash_sha512_alg = {
.cra_name = "sha512",
.cra_driver_name = "sha512-nx",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index d1a1c74fb56a..0641185bd82f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1464,8 +1464,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "sha1",
.cra_driver_name = "omap-sha1",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1487,8 +1486,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "md5",
.cra_driver_name = "omap-md5",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1511,8 +1509,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "omap-hmac-sha1",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1536,8 +1533,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "omap-hmac-md5",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1564,8 +1560,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha224",
.cra_driver_name = "omap-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1586,8 +1581,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "omap-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1609,8 +1603,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "omap-hmac-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1633,8 +1626,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "omap-hmac-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1659,8 +1651,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha384",
.cra_driver_name = "omap-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1681,8 +1672,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha512",
.cra_driver_name = "omap-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1704,8 +1694,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "omap-hmac-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1728,8 +1717,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "omap-hmac-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 1c6cbda56afe..09d823d36d3a 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
return;
}
+ count -= initial;
+
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
@@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
- : "d"(control_word), "b"(key), "c"(count - initial));
+ : "d"(control_word), "b"(key), "c"(count));
}
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
@@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
if (count < cbc_fetch_blocks)
return cbc_crypt(input, output, key, iv, control_word, count);
+ count -= initial;
+
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
@@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
- : "d" (control_word), "b" (key), "c" (count-initial));
+ : "d" (control_word), "b" (key), "c" (count));
return iv;
}
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index d32c79328876..21e5cae0a1e0 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -247,8 +247,7 @@ static struct shash_alg sha1_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -271,8 +270,7 @@ static struct shash_alg sha256_alg = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -484,7 +482,6 @@ static struct shash_alg sha1_alg_nano = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -503,7 +500,6 @@ static struct shash_alg sha256_alg_nano = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index da8a2d3b5e9a..9225d060e18f 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -163,7 +163,7 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
return 0;
set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
- reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC);
+ reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
if (!reset_data)
return -ENOMEM;
reset_data->accel_dev = accel_dev;
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 718b32a3112e..1c3b36b75467 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 53227d70d397..d8a5db11b7ea 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -378,8 +378,7 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
else
return -EINVAL;
- ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
if (IS_ERR(ahash_tfm))
return PTR_ERR(ahash_tfm);
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
new file mode 100644
index 000000000000..e54249ccc009
--- /dev/null
+++ b/drivers/crypto/qcom-rng.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-18 Linaro Limited
+//
+// Based on msm-rng.c and downstream driver
+
+#include <crypto/internal/rng.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT 0x0000
+#define PRNG_STATUS 0x0004
+#define PRNG_LFSR_CFG 0x0100
+#define PRNG_CONFIG 0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK 0x0000ffff
+#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
+#define PRNG_CONFIG_HW_ENABLE BIT(1)
+#define PRNG_STATUS_DATA_AVAIL BIT(0)
+
+#define WORD_SZ 4
+
+struct qcom_rng {
+ struct mutex lock;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int skip_init;
+};
+
+struct qcom_rng_ctx {
+ struct qcom_rng *rng;
+};
+
+static struct qcom_rng *qcom_rng_dev;
+
+static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
+{
+ unsigned int currsize = 0;
+ u32 val;
+
+ /* read random data from hardware */
+ do {
+ val = readl_relaxed(rng->base + PRNG_STATUS);
+ if (!(val & PRNG_STATUS_DATA_AVAIL))
+ break;
+
+ val = readl_relaxed(rng->base + PRNG_DATA_OUT);
+ if (!val)
+ break;
+
+ if ((max - currsize) >= WORD_SZ) {
+ memcpy(data, &val, WORD_SZ);
+ data += WORD_SZ;
+ currsize += WORD_SZ;
+ } else {
+ /* copy only remaining bytes */
+ memcpy(data, &val, max - currsize);
+ break;
+ }
+ } while (currsize < max);
+
+ return currsize;
+}
+
+static int qcom_rng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dstn, unsigned int dlen)
+{
+ struct qcom_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct qcom_rng *rng = ctx->rng;
+ int ret;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ mutex_lock(&rng->lock);
+
+ ret = qcom_rng_read(rng, dstn, dlen);
+
+ mutex_unlock(&rng->lock);
+ clk_disable_unprepare(rng->clk);
+
+ return 0;
+}
+
+static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ return 0;
+}
+
+static int qcom_rng_enable(struct qcom_rng *rng)
+{
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ /* Enable PRNG only if it is not already enabled */
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ if (val & PRNG_CONFIG_HW_ENABLE)
+ goto already_enabled;
+
+ val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
+ val &= ~PRNG_LFSR_CFG_MASK;
+ val |= PRNG_LFSR_CFG_CLOCKS;
+ writel(val, rng->base + PRNG_LFSR_CFG);
+
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ val |= PRNG_CONFIG_HW_ENABLE;
+ writel(val, rng->base + PRNG_CONFIG);
+
+already_enabled:
+ clk_disable_unprepare(rng->clk);
+
+ return 0;
+}
+
+static int qcom_rng_init(struct crypto_tfm *tfm)
+{
+ struct qcom_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->rng = qcom_rng_dev;
+
+ if (!ctx->rng->skip_init)
+ return qcom_rng_enable(ctx->rng);
+
+ return 0;
+}
+
+static struct rng_alg qcom_rng_alg = {
+ .generate = qcom_rng_generate,
+ .seed = qcom_rng_seed,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "qcom-rng",
+ .cra_flags = CRYPTO_ALG_TYPE_RNG,
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct qcom_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = qcom_rng_init,
+ }
+};
+
+static int qcom_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct qcom_rng *rng;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rng);
+ mutex_init(&rng->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rng->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rng->base))
+ return PTR_ERR(rng->base);
+
+ /* ACPI systems have clk already on, so skip clk_get */
+ if (!has_acpi_companion(&pdev->dev)) {
+ rng->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(rng->clk))
+ return PTR_ERR(rng->clk);
+ }
+
+
+ rng->skip_init = (unsigned long)device_get_match_data(&pdev->dev);
+
+ qcom_rng_dev = rng;
+ ret = crypto_register_rng(&qcom_rng_alg);
+ if (ret) {
+ dev_err(&pdev->dev, "Register crypto rng failed: %d\n", ret);
+ qcom_rng_dev = NULL;
+ }
+
+ return ret;
+}
+
+static int qcom_rng_remove(struct platform_device *pdev)
+{
+ crypto_unregister_rng(&qcom_rng_alg);
+
+ qcom_rng_dev = NULL;
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id qcom_rng_acpi_match[] = {
+ { .id = "QCOM8160", .driver_data = 1 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
+#endif
+
+static const struct of_device_id qcom_rng_of_match[] = {
+ { .compatible = "qcom,prng", .data = (void *)0},
+ { .compatible = "qcom,prng-ee", .data = (void *)1},
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_rng_of_match);
+
+static struct platform_driver qcom_rng_driver = {
+ .probe = qcom_rng_probe,
+ .remove = qcom_rng_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(qcom_rng_of_match),
+ .acpi_match_table = ACPI_PTR(qcom_rng_acpi_match),
+ }
+};
+module_platform_driver(qcom_rng_driver);
+
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_DESCRIPTION("Qualcomm random number generator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index bf7163042569..faa282074e5a 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1765,8 +1765,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
.cra_name = "sha1",
.cra_driver_name = "exynos-sha1",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
@@ -1791,8 +1790,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
.cra_name = "md5",
.cra_driver_name = "exynos-md5",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
@@ -1817,8 +1815,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "exynos-sha256",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0f2245e1af2b..e7540a5b8197 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1253,8 +1253,7 @@ static struct ahash_alg sha_v3_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sahara-sha1",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sahara_ctx),
@@ -1280,8 +1279,7 @@ static struct ahash_alg sha_v4_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "sahara-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sahara_ctx),
@@ -1351,7 +1349,7 @@ err_sha_v4_algs:
err_sha_v3_algs:
for (j = 0; j < k; j++)
- crypto_unregister_ahash(&sha_v4_algs[j]);
+ crypto_unregister_ahash(&sha_v3_algs[j]);
err_aes_algs:
for (j = 0; j < i; j++)
@@ -1367,7 +1365,7 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_alg(&aes_algs[i]);
- for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+ for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
crypto_unregister_ahash(&sha_v3_algs[i]);
if (dev->version > SAHARA_VERSION_3)
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index c5d3efc54a4f..23b0b7bd64c7 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <crypto/aes.h>
@@ -105,6 +106,7 @@
#define GCM_CTR_INIT 2
#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+#define CRYP_AUTOSUSPEND_DELAY 50
struct stm32_cryp_caps {
bool swap_final;
@@ -519,6 +521,8 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
int ret;
u32 cfg, hw_mode;
+ pm_runtime_get_sync(cryp->dev);
+
/* Disable interrupt */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
@@ -638,6 +642,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
free_pages((unsigned long)buf_out, pages);
}
+ pm_runtime_mark_last_busy(cryp->dev);
+ pm_runtime_put_autosuspend(cryp->dev);
+
if (is_gcm(cryp) || is_ccm(cryp)) {
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
cryp->areq = NULL;
@@ -1969,6 +1976,13 @@ static int stm32_cryp_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_autosuspend_delay(dev, CRYP_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
rst = devm_reset_control_get(dev, NULL);
if (!IS_ERR(rst)) {
reset_control_assert(rst);
@@ -2008,6 +2022,8 @@ static int stm32_cryp_probe(struct platform_device *pdev)
dev_info(dev, "Initialized\n");
+ pm_runtime_put_sync(dev);
+
return 0;
err_aead_algs:
@@ -2020,6 +2036,11 @@ err_engine1:
list_del(&cryp->list);
spin_unlock(&cryp_list.lock);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
clk_disable_unprepare(cryp->clk);
return ret;
@@ -2028,10 +2049,15 @@ err_engine1:
static int stm32_cryp_remove(struct platform_device *pdev)
{
struct stm32_cryp *cryp = platform_get_drvdata(pdev);
+ int ret;
if (!cryp)
return -ENODEV;
+ ret = pm_runtime_get_sync(cryp->dev);
+ if (ret < 0)
+ return ret;
+
crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
@@ -2041,16 +2067,52 @@ static int stm32_cryp_remove(struct platform_device *pdev)
list_del(&cryp->list);
spin_unlock(&cryp_list.lock);
+ pm_runtime_disable(cryp->dev);
+ pm_runtime_put_noidle(cryp->dev);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_cryp_runtime_suspend(struct device *dev)
+{
+ struct stm32_cryp *cryp = dev_get_drvdata(dev);
+
clk_disable_unprepare(cryp->clk);
return 0;
}
+static int stm32_cryp_runtime_resume(struct device *dev)
+{
+ struct stm32_cryp *cryp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(cryp->clk);
+ if (ret) {
+ dev_err(cryp->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_cryp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_cryp_runtime_suspend,
+ stm32_cryp_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_cryp_driver = {
.probe = stm32_cryp_probe,
.remove = stm32_cryp_remove,
.driver = {
.name = DRIVER_NAME,
+ .pm = &stm32_cryp_pm_ops,
.of_match_table = stm32_dt_ids,
},
};
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index cdc96f1bb917..590d7352837e 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <crypto/engine.h>
@@ -121,6 +122,8 @@ enum stm32_hash_data_format {
#define HASH_QUEUE_LENGTH 16
#define HASH_DMA_THRESHOLD 50
+#define HASH_AUTOSUSPEND_DELAY 50
+
struct stm32_hash_ctx {
struct crypto_engine_ctx enginectx;
struct stm32_hash_dev *hdev;
@@ -814,12 +817,17 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
rctx->flags |= HASH_FLAGS_ERRORS;
}
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+
crypto_finalize_hash_request(hdev->engine, req, err);
}
static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
struct stm32_hash_request_ctx *rctx)
{
+ pm_runtime_get_sync(hdev->dev);
+
if (!(HASH_FLAGS_INIT & hdev->flags)) {
stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
stm32_hash_write(hdev, HASH_STR, 0);
@@ -967,6 +975,8 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
u32 *preg;
unsigned int i;
+ pm_runtime_get_sync(hdev->dev);
+
while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
cpu_relax();
@@ -982,6 +992,9 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+
memcpy(out, rctx, sizeof(*rctx));
return 0;
@@ -1000,6 +1013,8 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
preg = rctx->hw_context;
+ pm_runtime_get_sync(hdev->dev);
+
stm32_hash_write(hdev, HASH_IMR, *preg++);
stm32_hash_write(hdev, HASH_STR, *preg++);
stm32_hash_write(hdev, HASH_CR, *preg);
@@ -1009,6 +1024,9 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
stm32_hash_write(hdev, HASH_CSR(i), *preg++);
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+
kfree(rctx->hw_context);
return 0;
@@ -1132,8 +1150,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "md5",
.cra_driver_name = "stm32-md5",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1159,8 +1176,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "stm32-hmac-md5",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1185,8 +1201,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "sha1",
.cra_driver_name = "stm32-sha1",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1212,8 +1227,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "stm32-hmac-sha1",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1241,8 +1255,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha224",
.cra_driver_name = "stm32-sha224",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1268,8 +1281,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "stm32-hmac-sha224",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1294,8 +1306,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "stm32-sha256",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1321,8 +1332,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "stm32-hmac-sha256",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1482,6 +1492,13 @@ static int stm32_hash_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
if (!IS_ERR(hdev->rst)) {
reset_control_assert(hdev->rst);
@@ -1522,6 +1539,8 @@ static int stm32_hash_probe(struct platform_device *pdev)
dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
+ pm_runtime_put_sync(dev);
+
return 0;
err_algs:
@@ -1535,6 +1554,9 @@ err_engine:
if (hdev->dma_lch)
dma_release_channel(hdev->dma_lch);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
clk_disable_unprepare(hdev->clk);
return ret;
@@ -1543,11 +1565,16 @@ err_engine:
static int stm32_hash_remove(struct platform_device *pdev)
{
static struct stm32_hash_dev *hdev;
+ int ret;
hdev = platform_get_drvdata(pdev);
if (!hdev)
return -ENODEV;
+ ret = pm_runtime_get_sync(hdev->dev);
+ if (ret < 0)
+ return ret;
+
stm32_hash_unregister_algs(hdev);
crypto_engine_exit(hdev->engine);
@@ -1559,16 +1586,52 @@ static int stm32_hash_remove(struct platform_device *pdev)
if (hdev->dma_lch)
dma_release_channel(hdev->dma_lch);
+ pm_runtime_disable(hdev->dev);
+ pm_runtime_put_noidle(hdev->dev);
+
clk_disable_unprepare(hdev->clk);
return 0;
}
+#ifdef CONFIG_PM
+static int stm32_hash_runtime_suspend(struct device *dev)
+{
+ struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(hdev->clk);
+
+ return 0;
+}
+
+static int stm32_hash_runtime_resume(struct device *dev)
+{
+ struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(hdev->clk);
+ if (ret) {
+ dev_err(hdev->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_hash_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
+ stm32_hash_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_hash_driver = {
.probe = stm32_hash_probe,
.remove = stm32_hash_remove,
.driver = {
.name = "stm32-hash",
+ .pm = &stm32_hash_pm_ops,
.of_match_table = stm32_hash_of_match,
}
};
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 8f09b8430893..29d2095d9dfd 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -6,8 +6,11 @@
#include <linux/bitrev.h>
#include <linux/clk.h>
+#include <linux/crc32poly.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <crypto/internal/hash.h>
@@ -28,9 +31,7 @@
#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
#define CRC_INIT_DEFAULT 0xFFFFFFFF
-/* Polynomial reversed */
-#define POLY_CRC32 0xEDB88320
-#define POLY_CRC32C 0x82F63B78
+#define CRC_AUTOSUSPEND_DELAY 50
struct stm32_crc {
struct list_head list;
@@ -66,7 +67,7 @@ static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
- mctx->poly = POLY_CRC32;
+ mctx->poly = CRC32_POLY_LE;
return 0;
}
@@ -75,7 +76,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
- mctx->poly = POLY_CRC32C;
+ mctx->poly = CRC32C_POLY_LE;
return 0;
}
@@ -106,6 +107,8 @@ static int stm32_crc_init(struct shash_desc *desc)
}
spin_unlock_bh(&crc_list.lock);
+ pm_runtime_get_sync(ctx->crc->dev);
+
/* Reset, set key, poly and configure in bit reverse mode */
writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
@@ -115,6 +118,9 @@ static int stm32_crc_init(struct shash_desc *desc)
ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
ctx->crc->nb_pending_bytes = 0;
+ pm_runtime_mark_last_busy(ctx->crc->dev);
+ pm_runtime_put_autosuspend(ctx->crc->dev);
+
return 0;
}
@@ -126,6 +132,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
u32 *d32;
unsigned int i;
+ pm_runtime_get_sync(crc->dev);
+
if (unlikely(crc->nb_pending_bytes)) {
while (crc->nb_pending_bytes != sizeof(u32) && length) {
/* Fill in pending data */
@@ -149,6 +157,9 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
+ pm_runtime_mark_last_busy(crc->dev);
+ pm_runtime_put_autosuspend(crc->dev);
+
/* Check for pending data (non 32 bits) */
length &= 3;
if (likely(!length))
@@ -174,7 +185,7 @@ static int stm32_crc_final(struct shash_desc *desc, u8 *out)
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
/* Send computed CRC */
- put_unaligned_le32(mctx->poly == POLY_CRC32C ?
+ put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
~ctx->partial : ctx->partial, out);
return 0;
@@ -272,6 +283,13 @@ static int stm32_crc_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
platform_set_drvdata(pdev, crc);
spin_lock(&crc_list.lock);
@@ -287,12 +305,18 @@ static int stm32_crc_probe(struct platform_device *pdev)
dev_info(dev, "Initialized\n");
+ pm_runtime_put_sync(dev);
+
return 0;
}
static int stm32_crc_remove(struct platform_device *pdev)
{
struct stm32_crc *crc = platform_get_drvdata(pdev);
+ int ret = pm_runtime_get_sync(crc->dev);
+
+ if (ret < 0)
+ return ret;
spin_lock(&crc_list.lock);
list_del(&crc->list);
@@ -300,11 +324,46 @@ static int stm32_crc_remove(struct platform_device *pdev)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ pm_runtime_disable(crc->dev);
+ pm_runtime_put_noidle(crc->dev);
+
+ clk_disable_unprepare(crc->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_crc_runtime_suspend(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+
clk_disable_unprepare(crc->clk);
return 0;
}
+static int stm32_crc_runtime_resume(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(crc->clk);
+ if (ret) {
+ dev_err(crc->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_crc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
+ stm32_crc_runtime_resume, NULL)
+};
+
static const struct of_device_id stm32_dt_ids[] = {
{ .compatible = "st,stm32f7-crc", },
{},
@@ -316,6 +375,7 @@ static struct platform_driver stm32_crc_driver = {
.remove = stm32_crc_remove,
.driver = {
.name = DRIVER_NAME,
+ .pm = &stm32_crc_pm_ops,
.of_match_table = stm32_dt_ids,
},
};
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index a81d89b3b7d8..89adf9e0fed2 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -45,11 +45,9 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "md5-sun4i-ss",
.cra_priority = 300,
.cra_alignmask = 3,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_type = &crypto_ahash_type,
.cra_init = sun4i_hash_crainit
}
}
@@ -73,11 +71,9 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "sha1-sun4i-ss",
.cra_priority = 300,
.cra_alignmask = 3,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_type = &crypto_ahash_type,
.cra_init = sun4i_hash_crainit
}
}
@@ -96,8 +92,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -118,8 +113,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -140,8 +134,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -161,8 +154,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -183,8 +175,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -205,7 +196,6 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index cf14f099ce4a..6988012deca4 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2822,8 +2822,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-talitos",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2838,8 +2837,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2854,8 +2852,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha224",
.cra_driver_name = "sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2870,8 +2867,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2886,8 +2882,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha384",
.cra_driver_name = "sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2902,8 +2897,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha512",
.cra_driver_name = "sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2918,8 +2912,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "hmac-md5-talitos",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2934,8 +2927,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2950,8 +2942,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "hmac-sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2966,8 +2957,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2982,8 +2972,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "hmac-sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2998,8 +2987,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "hmac-sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -3186,7 +3174,6 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
alg->cra_exit = talitos_cra_exit;
- alg->cra_type = &crypto_ahash_type;
t_alg->algt.alg.hash.init = ahash_init;
t_alg->algt.alg.hash.update = ahash_update;
t_alg->algt.alg.hash.final = ahash_final;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index cb31b59c9d53..d2663a4e1f5e 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -20,6 +20,7 @@
#include <linux/irqreturn.h>
#include <linux/klist.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/semaphore.h>
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 2d0a677bcc76..633321a8dd03 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -21,6 +21,7 @@
#include <linux/klist.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/crypto.h>
@@ -1524,8 +1525,7 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
.cra_init = hash_cra_init,
@@ -1548,11 +1548,9 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
- .cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
@@ -1574,11 +1572,9 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
- .cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
@@ -1600,11 +1596,9 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
- .cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index af6a908dfa7a..2c573d1aaa64 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -49,12 +49,18 @@ struct virtio_crypto_sym_request {
bool encrypt;
};
+struct virtio_crypto_algo {
+ uint32_t algonum;
+ uint32_t service;
+ unsigned int active_devs;
+ struct crypto_alg algo;
+};
+
/*
* The algs_lock protects the below global virtio_crypto_active_devs
* and crypto algorithms registion.
*/
static DEFINE_MUTEX(algs_lock);
-static unsigned int virtio_crypto_active_devs;
static void virtio_crypto_ablkcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req,
@@ -312,15 +318,21 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
unsigned int keylen)
{
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ uint32_t alg;
int ret;
+ ret = virtio_crypto_alg_validate_key(keylen, &alg);
+ if (ret)
+ return ret;
+
if (!ctx->vcrypto) {
/* New key */
int node = virtio_crypto_get_current_node();
struct virtio_crypto *vcrypto =
- virtcrypto_get_dev_node(node);
+ virtcrypto_get_dev_node(node,
+ VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
if (!vcrypto) {
- pr_err("virtio_crypto: Could not find a virtio device in the system\n");
+ pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
return -ENODEV;
}
@@ -371,12 +383,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
/* Why 3? outhdr + iv + inhdr */
sg_total = src_nents + dst_nents + 3;
- sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_ATOMIC,
+ sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
dev_to_node(&vcrypto->vdev->dev));
if (!sgs)
return -ENOMEM;
- req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
+ req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
dev_to_node(&vcrypto->vdev->dev));
if (!req_data) {
kfree(sgs);
@@ -571,57 +583,85 @@ static void virtio_crypto_ablkcipher_finalize_req(
virtcrypto_clear_request(&vc_sym_req->base);
}
-static struct crypto_alg virtio_crypto_algs[] = { {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "virtio_crypto_aes_cbc",
- .cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = virtio_crypto_ablkcipher_init,
- .cra_exit = virtio_crypto_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = virtio_crypto_ablkcipher_setkey,
- .decrypt = virtio_crypto_ablkcipher_decrypt,
- .encrypt = virtio_crypto_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+static struct virtio_crypto_algo virtio_crypto_algs[] = { {
+ .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
+ .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
+ .algo = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "virtio_crypto_aes_cbc",
+ .cra_priority = 150,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = virtio_crypto_ablkcipher_init,
+ .cra_exit = virtio_crypto_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .setkey = virtio_crypto_ablkcipher_setkey,
+ .decrypt = virtio_crypto_ablkcipher_decrypt,
+ .encrypt = virtio_crypto_ablkcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
},
},
} };
-int virtio_crypto_algs_register(void)
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
{
int ret = 0;
+ int i = 0;
mutex_lock(&algs_lock);
- if (++virtio_crypto_active_devs != 1)
- goto unlock;
- ret = crypto_register_algs(virtio_crypto_algs,
- ARRAY_SIZE(virtio_crypto_algs));
- if (ret)
- virtio_crypto_active_devs--;
+ for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+ uint32_t service = virtio_crypto_algs[i].service;
+ uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+ if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+ continue;
+
+ if (virtio_crypto_algs[i].active_devs == 0) {
+ ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
+ if (ret)
+ goto unlock;
+ }
+
+ virtio_crypto_algs[i].active_devs++;
+ dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
+ virtio_crypto_algs[i].algo.cra_name);
+ }
unlock:
mutex_unlock(&algs_lock);
return ret;
}
-void virtio_crypto_algs_unregister(void)
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
{
+ int i = 0;
+
mutex_lock(&algs_lock);
- if (--virtio_crypto_active_devs != 0)
- goto unlock;
- crypto_unregister_algs(virtio_crypto_algs,
- ARRAY_SIZE(virtio_crypto_algs));
+ for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+ uint32_t service = virtio_crypto_algs[i].service;
+ uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+ if (virtio_crypto_algs[i].active_devs == 0 ||
+ !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+ continue;
+
+ if (virtio_crypto_algs[i].active_devs == 1)
+ crypto_unregister_alg(&virtio_crypto_algs[i].algo);
+
+ virtio_crypto_algs[i].active_devs--;
+ }
-unlock:
mutex_unlock(&algs_lock);
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 66501a5a2b7b..63ef7f7924ea 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -55,6 +55,20 @@ struct virtio_crypto {
/* Number of queue currently used by the driver */
u32 curr_queue;
+ /*
+ * Specifies the services mask which the device support,
+ * see VIRTIO_CRYPTO_SERVICE_*
+ */
+ u32 crypto_services;
+
+ /* Detailed algorithms mask */
+ u32 cipher_algo_l;
+ u32 cipher_algo_h;
+ u32 hash_algo;
+ u32 mac_algo_l;
+ u32 mac_algo_h;
+ u32 aead_algo;
+
/* Maximum length of cipher key */
u32 max_cipher_key_len;
/* Maximum length of authenticated key */
@@ -102,7 +116,12 @@ int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
-struct virtio_crypto *virtcrypto_get_dev_node(int node);
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev,
+ uint32_t service,
+ uint32_t algo);
+struct virtio_crypto *virtcrypto_get_dev_node(int node,
+ uint32_t service,
+ uint32_t algo);
int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
int virtio_crypto_ablkcipher_crypt_req(
@@ -122,7 +141,7 @@ static inline int virtio_crypto_get_current_node(void)
return node;
}
-int virtio_crypto_algs_register(void);
-void virtio_crypto_algs_unregister(void);
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 83326986c113..3c9e120287af 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -146,7 +146,7 @@ static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
if (vi->affinity_hint_set) {
for (i = 0; i < vi->max_data_queues; i++)
- virtqueue_set_affinity(vi->data_vq[i].vq, -1);
+ virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
vi->affinity_hint_set = false;
}
@@ -173,7 +173,7 @@ static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
*
*/
for_each_online_cpu(cpu) {
- virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
+ virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
if (++i >= vcrypto->max_data_queues)
break;
}
@@ -303,6 +303,13 @@ static int virtcrypto_probe(struct virtio_device *vdev)
u32 max_data_queues = 0, max_cipher_key_len = 0;
u32 max_auth_key_len = 0;
u64 max_size = 0;
+ u32 cipher_algo_l = 0;
+ u32 cipher_algo_h = 0;
+ u32 hash_algo = 0;
+ u32 mac_algo_l = 0;
+ u32 mac_algo_h = 0;
+ u32 aead_algo = 0;
+ u32 crypto_services = 0;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
@@ -339,6 +346,20 @@ static int virtcrypto_probe(struct virtio_device *vdev)
max_auth_key_len, &max_auth_key_len);
virtio_cread(vdev, struct virtio_crypto_config,
max_size, &max_size);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ crypto_services, &crypto_services);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ cipher_algo_l, &cipher_algo_l);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ cipher_algo_h, &cipher_algo_h);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ hash_algo, &hash_algo);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ mac_algo_l, &mac_algo_l);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ mac_algo_h, &mac_algo_h);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ aead_algo, &aead_algo);
/* Add virtio crypto device to global table */
err = virtcrypto_devmgr_add_dev(vcrypto);
@@ -358,6 +379,14 @@ static int virtcrypto_probe(struct virtio_device *vdev)
vcrypto->max_cipher_key_len = max_cipher_key_len;
vcrypto->max_auth_key_len = max_auth_key_len;
vcrypto->max_size = max_size;
+ vcrypto->crypto_services = crypto_services;
+ vcrypto->cipher_algo_l = cipher_algo_l;
+ vcrypto->cipher_algo_h = cipher_algo_h;
+ vcrypto->mac_algo_l = mac_algo_l;
+ vcrypto->mac_algo_h = mac_algo_h;
+ vcrypto->hash_algo = hash_algo;
+ vcrypto->aead_algo = aead_algo;
+
dev_info(&vdev->dev,
"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
index a69ff71de2c4..d70de3a4f7d7 100644
--- a/drivers/crypto/virtio/virtio_crypto_mgr.c
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -181,14 +181,20 @@ int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
/*
* virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
* @node: Node id the driver works.
+ * @service: Crypto service that needs to be supported by the
+ * dev
+ * @algo: The algorithm number that needs to be supported by the
+ * dev
*
- * Function returns the virtio crypto device used fewest on the node.
+ * Function returns the virtio crypto device used fewest on the node,
+ * and supports the given crypto service and algorithm.
*
* To be used by virtio crypto device specific drivers.
*
* Return: pointer to vcrypto_dev or NULL if not found.
*/
-struct virtio_crypto *virtcrypto_get_dev_node(int node)
+struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
+ uint32_t algo)
{
struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
unsigned long best = ~0;
@@ -199,7 +205,8 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node)
if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
dev_to_node(&tmp_dev->vdev->dev) < 0) &&
- virtcrypto_dev_started(tmp_dev)) {
+ virtcrypto_dev_started(tmp_dev) &&
+ virtcrypto_algo_is_supported(tmp_dev, service, algo)) {
ctr = atomic_read(&tmp_dev->ref_count);
if (best > ctr) {
vcrypto_dev = tmp_dev;
@@ -214,7 +221,9 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node)
/* Get any started device */
list_for_each_entry(tmp_dev,
virtcrypto_devmgr_get_head(), list) {
- if (virtcrypto_dev_started(tmp_dev)) {
+ if (virtcrypto_dev_started(tmp_dev) &&
+ virtcrypto_algo_is_supported(tmp_dev,
+ service, algo)) {
vcrypto_dev = tmp_dev;
break;
}
@@ -240,7 +249,7 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node)
*/
int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
{
- if (virtio_crypto_algs_register()) {
+ if (virtio_crypto_algs_register(vcrypto)) {
pr_err("virtio_crypto: Failed to register crypto algs\n");
return -EFAULT;
}
@@ -260,5 +269,65 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
*/
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
{
- virtio_crypto_algs_unregister();
+ virtio_crypto_algs_unregister(vcrypto);
+}
+
+/*
+ * vcrypto_algo_is_supported()
+ * @vcrypto: Pointer to virtio crypto device.
+ * @service: The bit number for service validate.
+ * See VIRTIO_CRYPTO_SERVICE_*
+ * @algo : The bit number for the algorithm to validate.
+ *
+ *
+ * Validate if the virtio crypto device supports a service and
+ * algo.
+ *
+ * Return true if device supports a service and algo.
+ */
+
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
+ uint32_t service,
+ uint32_t algo)
+{
+ uint32_t service_mask = 1u << service;
+ uint32_t algo_mask = 0;
+ bool low = true;
+
+ if (algo > 31) {
+ algo -= 32;
+ low = false;
+ }
+
+ if (!(vcrypto->crypto_services & service_mask))
+ return false;
+
+ switch (service) {
+ case VIRTIO_CRYPTO_SERVICE_CIPHER:
+ if (low)
+ algo_mask = vcrypto->cipher_algo_l;
+ else
+ algo_mask = vcrypto->cipher_algo_h;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_HASH:
+ algo_mask = vcrypto->hash_algo;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_MAC:
+ if (low)
+ algo_mask = vcrypto->mac_algo_l;
+ else
+ algo_mask = vcrypto->mac_algo_h;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_AEAD:
+ algo_mask = vcrypto->aead_algo;
+ break;
+ }
+
+ if (!(algo_mask & (1u << algo)))
+ return false;
+
+ return true;
}
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 1c4b5b889fba..dd8b8716467a 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -215,7 +215,7 @@ struct shash_alg p8_ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "p8_ghash",
.cra_priority = 1000,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct p8_ghash_ctx),
.cra_module = THIS_MODULE,
diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl
index f746af271460..38b06503ede0 100644
--- a/drivers/crypto/vmx/ghashp8-ppc.pl
+++ b/drivers/crypto/vmx/ghashp8-ppc.pl
@@ -129,9 +129,9 @@ $code=<<___;
le?vperm $IN,$IN,$IN,$lemask
vxor $zero,$zero,$zero
- vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
- vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
- vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
+ vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
+ vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
+ vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
vpmsumd $t2,$Xl,$xC2 # 1st phase
@@ -187,11 +187,11 @@ $code=<<___;
.align 5
Loop:
subic $len,$len,16
- vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
+ vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
subfe. r0,r0,r0 # borrow?-1:0
- vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
+ vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
and r0,r0,$len
- vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
+ vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
add $inp,$inp,r0
vpmsumd $t2,$Xl,$xC2 # 1st phase
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 108c37fca782..0a2acd7993f0 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -474,7 +474,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
return rc;
vma->vm_ops = &dax_vm_ops;
- vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+ vma->vm_flags |= VM_HUGEPAGE;
return 0;
}
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index c2f42b1f3363..6e928f37d084 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -86,6 +86,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
{
struct dax_device *dax_dev;
bool dax_enabled = false;
+ struct request_queue *q;
pgoff_t pgoff;
int err, id;
pfn_t pfn;
@@ -98,6 +99,13 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
return false;
}
+ q = bdev_get_queue(bdev);
+ if (!q || !blk_queue_dax(q)) {
+ pr_debug("%s: error: request queue doesn't support dax\n",
+ bdevname(bdev, buf));
+ return false;
+ }
+
err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
if (err) {
pr_debug("%s: error: unaligned partition for dax\n",
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 0b5b3abe054e..4c49bb1330b5 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -604,28 +604,29 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_lock(&devfreq->lock);
}
- devfreq->min_freq = find_available_min_freq(devfreq);
- if (!devfreq->min_freq) {
+ devfreq->scaling_min_freq = find_available_min_freq(devfreq);
+ if (!devfreq->scaling_min_freq) {
mutex_unlock(&devfreq->lock);
err = -EINVAL;
goto err_dev;
}
- devfreq->scaling_min_freq = devfreq->min_freq;
+ devfreq->min_freq = devfreq->scaling_min_freq;
- devfreq->max_freq = find_available_max_freq(devfreq);
- if (!devfreq->max_freq) {
+ devfreq->scaling_max_freq = find_available_max_freq(devfreq);
+ if (!devfreq->scaling_max_freq) {
mutex_unlock(&devfreq->lock);
err = -EINVAL;
goto err_dev;
}
- devfreq->scaling_max_freq = devfreq->max_freq;
+ devfreq->max_freq = devfreq->scaling_max_freq;
dev_set_name(&devfreq->dev, "devfreq%d",
atomic_inc_return(&devfreq_no));
err = device_register(&devfreq->dev);
if (err) {
mutex_unlock(&devfreq->lock);
- goto err_dev;
+ put_device(&devfreq->dev);
+ goto err_out;
}
devfreq->trans_table =
@@ -672,6 +673,7 @@ err_init:
mutex_unlock(&devfreq_list_lock);
device_unregister(&devfreq->dev);
+ devfreq = NULL;
err_dev:
if (devfreq)
kfree(devfreq);
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 3cd6a184fe7c..a9c64f0d3284 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -627,11 +627,9 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
size = sizeof(struct devfreq_event_dev *) * info->num_events;
info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!info->edev) {
- dev_err(&pdev->dev,
- "failed to allocate memory devfreq-event devices\n");
+ if (!info->edev)
return -ENOMEM;
- }
+
edev = info->edev;
platform_set_drvdata(pdev, info);
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 5dfbfa3cc878..e795ad2b3f6b 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -68,15 +68,6 @@ struct rk3399_dmcfreq {
struct devfreq_event_dev *edev;
struct mutex lock;
struct dram_timing timing;
-
- /*
- * DDR Converser of Frequency (DCF) is used to implement DDR frequency
- * conversion without the participation of CPU, we will implement and
- * control it in arm trust firmware.
- */
- wait_queue_head_t wait_dcf_queue;
- int irq;
- int wait_dcf_flag;
struct regulator *vdd_center;
unsigned long rate, target_rate;
unsigned long volt, target_volt;
@@ -112,31 +103,22 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
target_volt);
if (err) {
- dev_err(dev, "Cannot to set voltage %lu uV\n",
+ dev_err(dev, "Cannot set voltage %lu uV\n",
target_volt);
goto out;
}
}
- dmcfreq->wait_dcf_flag = 1;
err = clk_set_rate(dmcfreq->dmc_clk, target_rate);
if (err) {
- dev_err(dev, "Cannot to set frequency %lu (%d)\n",
- target_rate, err);
+ dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
+ err);
regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
dmcfreq->volt);
goto out;
}
/*
- * Wait until bcf irq happen, it means freq scaling finish in
- * arm trust firmware, use 100ms as timeout time.
- */
- if (!wait_event_timeout(dmcfreq->wait_dcf_queue,
- !dmcfreq->wait_dcf_flag, HZ / 10))
- dev_warn(dev, "Timeout waiting for dcf interrupt\n");
-
- /*
* Check the dpll rate,
* There only two result we will get,
* 1. Ddr frequency scaling fail, we still get the old rate.
@@ -146,8 +128,8 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
/* If get the incorrect rate, set voltage to old value. */
if (dmcfreq->rate != target_rate) {
- dev_err(dev, "Get wrong ddr frequency, Request frequency %lu,\
- Current frequency %lu\n", target_rate, dmcfreq->rate);
+ dev_err(dev, "Got wrong frequency, Request %lu, Current %lu\n",
+ target_rate, dmcfreq->rate);
regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
dmcfreq->volt);
goto out;
@@ -155,7 +137,7 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
target_volt);
if (err)
- dev_err(dev, "Cannot to set vol %lu uV\n", target_volt);
+ dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
dmcfreq->rate = target_rate;
dmcfreq->volt = target_volt;
@@ -241,22 +223,6 @@ static __maybe_unused int rk3399_dmcfreq_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rk3399_dmcfreq_pm, rk3399_dmcfreq_suspend,
rk3399_dmcfreq_resume);
-static irqreturn_t rk3399_dmc_irq(int irq, void *dev_id)
-{
- struct rk3399_dmcfreq *dmcfreq = dev_id;
- struct arm_smccc_res res;
-
- dmcfreq->wait_dcf_flag = 0;
- wake_up(&dmcfreq->wait_dcf_queue);
-
- /* Clear the DCF interrupt */
- arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
- ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ,
- 0, 0, 0, 0, &res);
-
- return IRQ_HANDLED;
-}
-
static int of_get_ddr_timings(struct dram_timing *timing,
struct device_node *np)
{
@@ -330,16 +296,10 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct rk3399_dmcfreq *data;
- int ret, irq, index, size;
+ int ret, index, size;
uint32_t *timing;
struct dev_pm_opp *opp;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev,
- "Cannot get the dmc interrupt resource: %d\n", irq);
- return irq;
- }
data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -348,27 +308,22 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
data->vdd_center = devm_regulator_get(dev, "center");
if (IS_ERR(data->vdd_center)) {
+ if (PTR_ERR(data->vdd_center) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
dev_err(dev, "Cannot get the regulator \"center\"\n");
return PTR_ERR(data->vdd_center);
}
data->dmc_clk = devm_clk_get(dev, "dmc_clk");
if (IS_ERR(data->dmc_clk)) {
+ if (PTR_ERR(data->dmc_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
dev_err(dev, "Cannot get the clk dmc_clk\n");
return PTR_ERR(data->dmc_clk);
};
- data->irq = irq;
- ret = devm_request_irq(dev, irq, rk3399_dmc_irq, 0,
- dev_name(dev), data);
- if (ret) {
- dev_err(dev, "Failed to request dmc irq: %d\n", ret);
- return ret;
- }
-
- init_waitqueue_head(&data->wait_dcf_queue);
- data->wait_dcf_flag = 0;
-
data->edev = devfreq_event_get_edev_by_phandle(dev, 0);
if (IS_ERR(data->edev))
return -EPROBE_DEFER;
@@ -420,8 +375,10 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
data->rate = clk_get_rate(data->dmc_clk);
opp = devfreq_recommended_opp(dev, &data->rate, 0);
- if (IS_ERR(opp))
- return PTR_ERR(opp);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto err_free_opp;
+ }
data->rate = dev_pm_opp_get_freq(opp);
data->volt = dev_pm_opp_get_voltage(opp);
@@ -433,14 +390,34 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
&rk3399_devfreq_dmc_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
&data->ondemand_data);
- if (IS_ERR(data->devfreq))
- return PTR_ERR(data->devfreq);
+ if (IS_ERR(data->devfreq)) {
+ ret = PTR_ERR(data->devfreq);
+ goto err_free_opp;
+ }
+
devm_devfreq_register_opp_notifier(dev, data->devfreq);
data->dev = dev;
platform_set_drvdata(pdev, data);
return 0;
+
+err_free_opp:
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ return ret;
+}
+
+static int rk3399_dmcfreq_remove(struct platform_device *pdev)
+{
+ struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
+
+ /*
+ * Before remove the opp table we need to unregister the opp notifier.
+ */
+ devm_devfreq_unregister_opp_notifier(dmcfreq->dev, dmcfreq->devfreq);
+ dev_pm_opp_of_remove_table(dmcfreq->dev);
+
+ return 0;
}
static const struct of_device_id rk3399dmc_devfreq_of_match[] = {
@@ -451,6 +428,7 @@ MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match);
static struct platform_driver rk3399_dmcfreq_driver = {
.probe = rk3399_dmcfreq_probe,
+ .remove = rk3399_dmcfreq_remove,
.driver = {
.name = "rk3399-dmc-freq",
.pm = &rk3399_dmcfreq_pm,
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index ae712159246f..c59d2eee5d30 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/reset.h>
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index d78d5fc173dc..13884474d158 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -405,7 +405,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf
|| !exp_info->ops->release
- || !exp_info->ops->map_atomic
|| !exp_info->ops->map
|| !exp_info->ops->mmap)) {
return ERR_PTR(-EINVAL);
@@ -568,7 +567,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
mutex_lock(&dmabuf->lock);
if (dmabuf->ops->attach) {
- ret = dmabuf->ops->attach(dmabuf, dev, attach);
+ ret = dmabuf->ops->attach(dmabuf, attach);
if (ret)
goto err_attach;
}
@@ -687,26 +686,14 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
* void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
*
- * There are also atomic variants of these interfaces. Like for kmap they
- * facilitate non-blocking fast-paths. Neither the importer nor the exporter
- * (in the callback) is allowed to block when using these.
- *
- * Interfaces::
- * void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
- * void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
- *
- * For importers all the restrictions of using kmap apply, like the limited
- * supply of kmap_atomic slots. Hence an importer shall only hold onto at
- * max 2 atomic dma_buf kmaps at the same time (in any given process context).
+ * Implementing the functions is optional for exporters and for importers all
+ * the restrictions of using kmap apply.
*
* dma_buf kmap calls outside of the range specified in begin_cpu_access are
* undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
* the partial chunks at the beginning and end but may return stale or bogus
* data outside of the range (in these partial chunks).
*
- * Note that these calls need to always succeed. The exporter needs to
- * complete any preparations that might fail in begin_cpu_access.
- *
* For some cases the overhead of kmap can be too high, a vmap interface
* is introduced. This interface should be used very carefully, as vmalloc
* space is a limited resources on many architectures.
@@ -860,41 +847,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
/**
- * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
- * space. The same restrictions as for kmap_atomic and friends apply.
- * @dmabuf: [in] buffer to map page from.
- * @page_num: [in] page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
-{
- WARN_ON(!dmabuf);
-
- return dmabuf->ops->map_atomic(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
-
-/**
- * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
- * @dmabuf: [in] buffer to unmap page from.
- * @page_num: [in] page in PAGE_SIZE units to unmap.
- * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
- void *vaddr)
-{
- WARN_ON(!dmabuf);
-
- if (dmabuf->ops->unmap_atomic)
- dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
-
-/**
* dma_buf_kmap - Map a page of the buffer object into kernel address space. The
* same restrictions as for kmap and friends apply.
* @dmabuf: [in] buffer to map page from.
@@ -907,6 +859,8 @@ void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
{
WARN_ON(!dmabuf);
+ if (!dmabuf->ops->map)
+ return NULL;
return dmabuf->ops->map(dmabuf, page_num);
}
EXPORT_SYMBOL_GPL(dma_buf_kmap);
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index dd1edfb27b61..a8c254497251 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -104,7 +104,6 @@ const struct dma_fence_ops dma_fence_array_ops = {
.get_timeline_name = dma_fence_array_get_timeline_name,
.enable_signaling = dma_fence_array_enable_signaling,
.signaled = dma_fence_array_signaled,
- .wait = dma_fence_default_wait,
.release = dma_fence_array_release,
};
EXPORT_SYMBOL(dma_fence_array_ops);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 4edb9fd3cf47..1551ca7df394 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -39,11 +39,42 @@ EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
/**
+ * DOC: DMA fences overview
+ *
+ * DMA fences, represented by &struct dma_fence, are the kernel internal
+ * synchronization primitive for DMA operations like GPU rendering, video
+ * encoding/decoding, or displaying buffers on a screen.
+ *
+ * A fence is initialized using dma_fence_init() and completed using
+ * dma_fence_signal(). Fences are associated with a context, allocated through
+ * dma_fence_context_alloc(), and all fences on the same context are
+ * fully ordered.
+ *
+ * Since the purposes of fences is to facilitate cross-device and
+ * cross-application synchronization, there's multiple ways to use one:
+ *
+ * - Individual fences can be exposed as a &sync_file, accessed as a file
+ * descriptor from userspace, created by calling sync_file_create(). This is
+ * called explicit fencing, since userspace passes around explicit
+ * synchronization points.
+ *
+ * - Some subsystems also have their own explicit fencing primitives, like
+ * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
+ * fence to be updated.
+ *
+ * - Then there's also implicit fencing, where the synchronization points are
+ * implicitly passed around as part of shared &dma_buf instances. Such
+ * implicit fences are stored in &struct reservation_object through the
+ * &dma_buf.resv pointer.
+ */
+
+/**
* dma_fence_context_alloc - allocate an array of fence contexts
- * @num: [in] amount of contexts to allocate
+ * @num: amount of contexts to allocate
*
- * This function will return the first index of the number of fences allocated.
- * The fence context is used for setting fence->context to a unique number.
+ * This function will return the first index of the number of fence contexts
+ * allocated. The fence context is used for setting &dma_fence.context to a
+ * unique number by passing the context to dma_fence_init().
*/
u64 dma_fence_context_alloc(unsigned num)
{
@@ -59,10 +90,14 @@ EXPORT_SYMBOL(dma_fence_context_alloc);
* Signal completion for software callbacks on a fence, this will unblock
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
- * can only go from unsignaled to signaled state, it will only be effective
- * the first time.
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
*
- * Unlike dma_fence_signal, this function must be called with fence->lock held.
+ * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
+ * held.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
*/
int dma_fence_signal_locked(struct dma_fence *fence)
{
@@ -102,8 +137,11 @@ EXPORT_SYMBOL(dma_fence_signal_locked);
* Signal completion for software callbacks on a fence, this will unblock
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
- * can only go from unsignaled to signaled state, it will only be effective
- * the first time.
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
*/
int dma_fence_signal(struct dma_fence *fence)
{
@@ -136,9 +174,9 @@ EXPORT_SYMBOL(dma_fence_signal);
/**
* dma_fence_wait_timeout - sleep until the fence gets signaled
* or until timeout elapses
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
- * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
* remaining timeout in jiffies on success. Other error values may be
@@ -148,6 +186,8 @@ EXPORT_SYMBOL(dma_fence_signal);
* directly or indirectly (buf-mgr between reservation and committing)
* holds a reference to the fence, otherwise the fence might be
* freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait() and dma_fence_wait_any_timeout().
*/
signed long
dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
@@ -158,12 +198,22 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
return -EINVAL;
trace_dma_fence_wait_start(fence);
- ret = fence->ops->wait(fence, intr, timeout);
+ if (fence->ops->wait)
+ ret = fence->ops->wait(fence, intr, timeout);
+ else
+ ret = dma_fence_default_wait(fence, intr, timeout);
trace_dma_fence_wait_end(fence);
return ret;
}
EXPORT_SYMBOL(dma_fence_wait_timeout);
+/**
+ * dma_fence_release - default relese function for fences
+ * @kref: &dma_fence.recfount
+ *
+ * This is the default release functions for &dma_fence. Drivers shouldn't call
+ * this directly, but instead call dma_fence_put().
+ */
void dma_fence_release(struct kref *kref)
{
struct dma_fence *fence =
@@ -181,6 +231,13 @@ void dma_fence_release(struct kref *kref)
}
EXPORT_SYMBOL(dma_fence_release);
+/**
+ * dma_fence_free - default release function for &dma_fence.
+ * @fence: fence to release
+ *
+ * This is the default implementation for &dma_fence_ops.release. It calls
+ * kfree_rcu() on @fence.
+ */
void dma_fence_free(struct dma_fence *fence)
{
kfree_rcu(fence, rcu);
@@ -189,10 +246,11 @@ EXPORT_SYMBOL(dma_fence_free);
/**
* dma_fence_enable_sw_signaling - enable signaling on fence
- * @fence: [in] the fence to enable
+ * @fence: the fence to enable
*
- * this will request for sw signaling to be enabled, to make the fence
- * complete as soon as possible
+ * This will request for sw signaling to be enabled, to make the fence
+ * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
+ * internally.
*/
void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
@@ -200,7 +258,8 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+ fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
spin_lock_irqsave(fence->lock, flags);
@@ -216,24 +275,24 @@ EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
/**
* dma_fence_add_callback - add a callback to be called when the fence
* is signaled
- * @fence: [in] the fence to wait on
- * @cb: [in] the callback to register
- * @func: [in] the function to call
+ * @fence: the fence to wait on
+ * @cb: the callback to register
+ * @func: the function to call
*
- * cb will be initialized by dma_fence_add_callback, no initialization
+ * @cb will be initialized by dma_fence_add_callback(), no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
* Note that the callback can be called from an atomic context. If
* fence is already signaled, this function will return -ENOENT (and
- * *not* call the callback)
+ * *not* call the callback).
*
* Add a software callback to the fence. Same restrictions apply to
- * refcount as it does to dma_fence_wait, however the caller doesn't need to
- * keep a refcount to fence afterwards: when software access is enabled,
- * the creator of the fence is required to keep the fence alive until
- * after it signals with dma_fence_signal. The callback itself can be called
- * from irq context.
+ * refcount as it does to dma_fence_wait(), however the caller doesn't need to
+ * keep a refcount to fence afterward dma_fence_add_callback() has returned:
+ * when software access is enabled, the creator of the fence is required to keep
+ * the fence alive until after it signals with dma_fence_signal(). The callback
+ * itself can be called from irq context.
*
* Returns 0 in case of success, -ENOENT if the fence is already signaled
* and -EINVAL in case of error.
@@ -260,7 +319,7 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
ret = -ENOENT;
- else if (!was_set) {
+ else if (!was_set && fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
@@ -282,7 +341,7 @@ EXPORT_SYMBOL(dma_fence_add_callback);
/**
* dma_fence_get_status - returns the status upon completion
- * @fence: [in] the dma_fence to query
+ * @fence: the dma_fence to query
*
* This wraps dma_fence_get_status_locked() to return the error status
* condition on a signaled fence. See dma_fence_get_status_locked() for more
@@ -307,8 +366,8 @@ EXPORT_SYMBOL(dma_fence_get_status);
/**
* dma_fence_remove_callback - remove a callback from the signaling list
- * @fence: [in] the fence to wait on
- * @cb: [in] the callback to remove
+ * @fence: the fence to wait on
+ * @cb: the callback to remove
*
* Remove a previously queued callback from the fence. This function returns
* true if the callback is successfully removed, or false if the fence has
@@ -319,6 +378,9 @@ EXPORT_SYMBOL(dma_fence_get_status);
* doing, since deadlocks and race conditions could occur all too easily. For
* this reason, it should only ever be done on hardware lockup recovery,
* with a reference held to the fence.
+ *
+ * Behaviour is undefined if @cb has not been added to @fence using
+ * dma_fence_add_callback() beforehand.
*/
bool
dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
@@ -355,9 +417,9 @@ dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
/**
* dma_fence_default_wait - default sleep until the fence gets signaled
* or until timeout elapses
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
- * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
* remaining timeout in jiffies on success. If timeout is zero the value one is
@@ -388,7 +450,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
goto out;
- if (!was_set) {
+ if (!was_set && fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
@@ -450,12 +512,12 @@ dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
/**
* dma_fence_wait_any_timeout - sleep until any fence gets signaled
* or until timeout elapses
- * @fences: [in] array of fences to wait on
- * @count: [in] number of fences to wait on
- * @intr: [in] if true, do an interruptible wait
- * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
- * @idx: [out] the first signaled fence index, meaningful only on
- * positive return
+ * @fences: array of fences to wait on
+ * @count: number of fences to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @idx: used to store the first signaled fence index, meaningful only on
+ * positive return
*
* Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
* interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
@@ -464,6 +526,8 @@ dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
* Synchronous waits for the first fence in the array to be signaled. The
* caller needs to hold a reference to all fences in the array, otherwise a
* fence might be freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait() and dma_fence_wait_timeout().
*/
signed long
dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
@@ -496,11 +560,6 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
for (i = 0; i < count; ++i) {
struct dma_fence *fence = fences[i];
- if (fence->ops->wait != dma_fence_default_wait) {
- ret = -EINVAL;
- goto fence_rm_cb;
- }
-
cb[i].task = current;
if (dma_fence_add_callback(fence, &cb[i].base,
dma_fence_default_wait_cb)) {
@@ -541,27 +600,25 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
* dma_fence_init - Initialize a custom fence.
- * @fence: [in] the fence to initialize
- * @ops: [in] the dma_fence_ops for operations on this fence
- * @lock: [in] the irqsafe spinlock to use for locking this fence
- * @context: [in] the execution context this fence is run on
- * @seqno: [in] a linear increasing sequence number for this context
+ * @fence: the fence to initialize
+ * @ops: the dma_fence_ops for operations on this fence
+ * @lock: the irqsafe spinlock to use for locking this fence
+ * @context: the execution context this fence is run on
+ * @seqno: a linear increasing sequence number for this context
*
* Initializes an allocated fence, the caller doesn't have to keep its
* refcount after committing with this fence, but it will need to hold a
- * refcount again if dma_fence_ops.enable_signaling gets called. This can
- * be used for other implementing other types of fence.
+ * refcount again if &dma_fence_ops.enable_signaling gets called.
*
* context and seqno are used for easy comparison between fences, allowing
- * to check which fence is later by simply using dma_fence_later.
+ * to check which fence is later by simply using dma_fence_later().
*/
void
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
- BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
- !ops->get_driver_name || !ops->get_timeline_name);
+ BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
kref_init(&fence->refcount);
fence->ops = ops;
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 314eb1071cce..6c95f61a32e7 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -46,7 +46,7 @@
* write-side updates.
*/
-DEFINE_WW_CLASS(reservation_ww_class);
+DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
struct lock_class_key reservation_seqcount_class;
@@ -141,6 +141,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
if (signaled) {
RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
} else {
+ BUG_ON(fobj->shared_count >= fobj->shared_max);
RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
fobj->shared_count++;
}
@@ -230,10 +231,9 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
old = reservation_object_get_list(obj);
obj->staged = NULL;
- if (!fobj) {
- BUG_ON(old->shared_count >= old->shared_max);
+ if (!fobj)
reservation_object_add_shared_inplace(obj, old, fence);
- } else
+ else
reservation_object_add_shared_replace(obj, old, fobj, fence);
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 3d78ca89a605..53c1d6d36a64 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -188,7 +188,6 @@ static const struct dma_fence_ops timeline_fence_ops = {
.get_timeline_name = timeline_fence_get_timeline_name,
.enable_signaling = timeline_fence_enable_signaling,
.signaled = timeline_fence_signaled,
- .wait = dma_fence_default_wait,
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
.timeline_value_str = timeline_fence_timeline_value_str,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ca1680afa20a..dacf3f42426d 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -250,6 +250,7 @@ config IMX_SDMA
tristate "i.MX SDMA support"
depends on ARCH_MXC
select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
help
Support the i.MX SDMA engine. This engine is integrated into
Freescale i.MX25/31/35/51/53/6 chips.
@@ -413,6 +414,14 @@ config NBPFAXI_DMA
help
Support for "Type-AXI" NBPF DMA IPs from Renesas
+config OWL_DMA
+ tristate "Actions Semi Owl SoCs DMA support"
+ depends on ARCH_ACTIONS
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the Actions Semi Owl SoCs DMA controller.
+
config PCH_DMA
tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
depends on PCI && (X86_32 || COMPILE_TEST)
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 203a99d68315..c91702d88b95 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
+obj-$(CONFIG_OWL_DMA) += owl-dma.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 08ba8473a284..272bed6c8ba7 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -500,12 +500,8 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
caps->max_burst = device->max_burst;
caps->residue_granularity = device->residue_granularity;
caps->descriptor_reuse = device->descriptor_reuse;
-
- /*
- * Some devices implement only pause (e.g. to get residuum) but no
- * resume. However cmd_pause is advertised as pause AND resume.
- */
- caps->cmd_pause = !!(device->device_pause && device->device_resume);
+ caps->cmd_pause = !!device->device_pause;
+ caps->cmd_resume = !!device->device_resume;
caps->cmd_terminate = !!device->device_terminate_all;
return 0;
@@ -774,8 +770,14 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
return ERR_PTR(-ENODEV);
chan = __dma_request_channel(mask, NULL, NULL);
- if (!chan)
- chan = ERR_PTR(-ENODEV);
+ if (!chan) {
+ mutex_lock(&dma_list_mutex);
+ if (list_empty(&dma_device_list))
+ chan = ERR_PTR(-EPROBE_DEFER);
+ else
+ chan = ERR_PTR(-ENODEV);
+ mutex_unlock(&dma_list_mutex);
+ }
return chan;
}
@@ -1139,6 +1141,41 @@ void dma_async_device_unregister(struct dma_device *device)
}
EXPORT_SYMBOL(dma_async_device_unregister);
+static void dmam_device_release(struct device *dev, void *res)
+{
+ struct dma_device *device;
+
+ device = *(struct dma_device **)res;
+ dma_async_device_unregister(device);
+}
+
+/**
+ * dmaenginem_async_device_register - registers DMA devices found
+ * @device: &dma_device
+ *
+ * The operation is managed and will be undone on driver detach.
+ */
+int dmaenginem_async_device_register(struct dma_device *device)
+{
+ void *p;
+ int ret;
+
+ p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ ret = dma_async_device_register(device);
+ if (!ret) {
+ *(struct dma_device **)p = device;
+ devres_add(device->dev, p);
+ } else {
+ devres_free(p);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dmaenginem_async_device_register);
+
struct dmaengine_unmap_pool {
struct kmem_cache *cache;
const char *name;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index ec240592f5c8..a15592383d4e 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 29d04ca71d52..202ffa9f7611 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -413,6 +413,13 @@ static void hsu_dma_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(to_virt_chan(chan));
}
+static void hsu_dma_synchronize(struct dma_chan *chan)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+
+ vchan_synchronize(&hsuc->vchan);
+}
+
int hsu_dma_probe(struct hsu_dma_chip *chip)
{
struct hsu_dma *hsu;
@@ -459,6 +466,7 @@ int hsu_dma_probe(struct hsu_dma_chip *chip)
hsu->dma.device_pause = hsu_dma_pause;
hsu->dma.device_resume = hsu_dma_resume;
hsu->dma.device_terminate_all = hsu_dma_terminate_all;
+ hsu->dma.device_synchronize = hsu_dma_synchronize;
hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index e5c911200bdb..1fbf9cb9b742 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -496,6 +496,13 @@ static int idma64_terminate_all(struct dma_chan *chan)
return 0;
}
+static void idma64_synchronize(struct dma_chan *chan)
+{
+ struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+ vchan_synchronize(&idma64c->vchan);
+}
+
static int idma64_alloc_chan_resources(struct dma_chan *chan)
{
struct idma64_chan *idma64c = to_idma64_chan(chan);
@@ -583,6 +590,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.device_pause = idma64_pause;
idma64->dma.device_resume = idma64_resume;
idma64->dma.device_terminate_all = idma64_terminate_all;
+ idma64->dma.device_synchronize = idma64_synchronize;
idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f077992635c2..b4ec2d20e661 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,6 +24,7 @@
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -41,6 +42,7 @@
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include "dmaengine.h"
+#include "virt-dma.h"
/* SDMA registers */
#define SDMA_H_C0PTR 0x000
@@ -183,6 +185,7 @@
* Mode/Count of data node descriptors - IPCv2
*/
struct sdma_mode_count {
+#define SDMA_BD_MAX_CNT 0xffff
u32 count : 16; /* size of the buffer pointed by this BD */
u32 status : 8; /* E,R,I,C,W,D status bits stored here */
u32 command : 8; /* command mostly used for channel 0 */
@@ -200,9 +203,9 @@ struct sdma_buffer_descriptor {
/**
* struct sdma_channel_control - Channel control Block
*
- * @current_bd_ptr current buffer descriptor processed
- * @base_bd_ptr first element of buffer descriptor array
- * @unused padding. The SDMA engine expects an array of 128 byte
+ * @current_bd_ptr: current buffer descriptor processed
+ * @base_bd_ptr: first element of buffer descriptor array
+ * @unused: padding. The SDMA engine expects an array of 128 byte
* control blocks
*/
struct sdma_channel_control {
@@ -215,10 +218,13 @@ struct sdma_channel_control {
* struct sdma_state_registers - SDMA context for a channel
*
* @pc: program counter
+ * @unused1: unused
* @t: test bit: status of arithmetic & test instruction
* @rpc: return program counter
+ * @unused0: unused
* @sf: source fault while loading data
* @spc: loop start program counter
+ * @unused2: unused
* @df: destination fault while storing data
* @epc: loop end program counter
* @lm: loop mode
@@ -256,6 +262,14 @@ struct sdma_state_registers {
* @dsa: dedicated core source address register
* @ds: dedicated core status register
* @dd: dedicated core data register
+ * @scratch0: 1st word of dedicated ram for context switch
+ * @scratch1: 2nd word of dedicated ram for context switch
+ * @scratch2: 3rd word of dedicated ram for context switch
+ * @scratch3: 4th word of dedicated ram for context switch
+ * @scratch4: 5th word of dedicated ram for context switch
+ * @scratch5: 6th word of dedicated ram for context switch
+ * @scratch6: 7th word of dedicated ram for context switch
+ * @scratch7: 8th word of dedicated ram for context switch
*/
struct sdma_context_data {
struct sdma_state_registers channel_state;
@@ -284,25 +298,67 @@ struct sdma_context_data {
u32 scratch7;
} __attribute__ ((packed));
-#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
struct sdma_engine;
/**
+ * struct sdma_desc - descriptor structor for one transfer
+ * @vd: descriptor for virt dma
+ * @num_bd: number of descriptors currently handling
+ * @bd_phys: physical address of bd
+ * @buf_tail: ID of the buffer that was processed
+ * @buf_ptail: ID of the previous buffer that was processed
+ * @period_len: period length, used in cyclic.
+ * @chn_real_count: the real count updated from bd->mode.count
+ * @chn_count: the transfer count set
+ * @sdmac: sdma_channel pointer
+ * @bd: pointer of allocate bd
+ */
+struct sdma_desc {
+ struct virt_dma_desc vd;
+ unsigned int num_bd;
+ dma_addr_t bd_phys;
+ unsigned int buf_tail;
+ unsigned int buf_ptail;
+ unsigned int period_len;
+ unsigned int chn_real_count;
+ unsigned int chn_count;
+ struct sdma_channel *sdmac;
+ struct sdma_buffer_descriptor *bd;
+};
+
+/**
* struct sdma_channel - housekeeping for a SDMA channel
*
- * @sdma pointer to the SDMA engine for this channel
- * @channel the channel number, matches dmaengine chan_id + 1
- * @direction transfer type. Needed for setting SDMA script
- * @peripheral_type Peripheral type. Needed for setting SDMA script
- * @event_id0 aka dma request line
- * @event_id1 for channels that use 2 events
- * @word_size peripheral access size
- * @buf_tail ID of the buffer that was processed
- * @buf_ptail ID of the previous buffer that was processed
- * @num_bd max NUM_BD. number of descriptors currently handling
+ * @vc: virt_dma base structure
+ * @desc: sdma description including vd and other special member
+ * @sdma: pointer to the SDMA engine for this channel
+ * @channel: the channel number, matches dmaengine chan_id + 1
+ * @direction: transfer type. Needed for setting SDMA script
+ * @peripheral_type: Peripheral type. Needed for setting SDMA script
+ * @event_id0: aka dma request line
+ * @event_id1: for channels that use 2 events
+ * @word_size: peripheral access size
+ * @pc_from_device: script address for those device_2_memory
+ * @pc_to_device: script address for those memory_2_device
+ * @device_to_device: script address for those device_2_device
+ * @pc_to_pc: script address for those memory_2_memory
+ * @flags: loop mode or not
+ * @per_address: peripheral source or destination address in common case
+ * destination address in p_2_p case
+ * @per_address2: peripheral source address in p_2_p case
+ * @event_mask: event mask used in p_2_p script
+ * @watermark_level: value for gReg[7], some script will extend it from
+ * basic watermark such as p_2_p
+ * @shp_addr: value for gReg[6]
+ * @per_addr: value for gReg[2]
+ * @status: status of dma channel
+ * @data: specific sdma interface structure
+ * @bd_pool: dma_pool for bd
*/
struct sdma_channel {
+ struct virt_dma_chan vc;
+ struct sdma_desc *desc;
struct sdma_engine *sdma;
unsigned int channel;
enum dma_transfer_direction direction;
@@ -310,28 +366,17 @@ struct sdma_channel {
unsigned int event_id0;
unsigned int event_id1;
enum dma_slave_buswidth word_size;
- unsigned int buf_tail;
- unsigned int buf_ptail;
- unsigned int num_bd;
- unsigned int period_len;
- struct sdma_buffer_descriptor *bd;
- dma_addr_t bd_phys;
unsigned int pc_from_device, pc_to_device;
unsigned int device_to_device;
+ unsigned int pc_to_pc;
unsigned long flags;
dma_addr_t per_address, per_address2;
unsigned long event_mask[2];
unsigned long watermark_level;
u32 shp_addr, per_addr;
- struct dma_chan chan;
- spinlock_t lock;
- struct dma_async_tx_descriptor desc;
enum dma_status status;
- unsigned int chn_count;
- unsigned int chn_real_count;
- struct tasklet_struct tasklet;
struct imx_dma_data data;
- bool enabled;
+ struct dma_pool *bd_pool;
};
#define IMX_DMA_SG_LOOP BIT(0)
@@ -346,15 +391,15 @@ struct sdma_channel {
/**
* struct sdma_firmware_header - Layout of the firmware image
*
- * @magic "SDMA"
- * @version_major increased whenever layout of struct sdma_script_start_addrs
- * changes.
- * @version_minor firmware minor version (for binary compatible changes)
- * @script_addrs_start offset of struct sdma_script_start_addrs in this image
- * @num_script_addrs Number of script addresses in this image
- * @ram_code_start offset of SDMA ram image in this firmware image
- * @ram_code_size size of SDMA ram image
- * @script_addrs Stores the start address of the SDMA scripts
+ * @magic: "SDMA"
+ * @version_major: increased whenever layout of struct
+ * sdma_script_start_addrs changes.
+ * @version_minor: firmware minor version (for binary compatible changes)
+ * @script_addrs_start: offset of struct sdma_script_start_addrs in this image
+ * @num_script_addrs: Number of script addresses in this image
+ * @ram_code_start: offset of SDMA ram image in this firmware image
+ * @ram_code_size: size of SDMA ram image
+ * @script_addrs: Stores the start address of the SDMA scripts
* (in SDMA memory space)
*/
struct sdma_firmware_header {
@@ -391,6 +436,8 @@ struct sdma_engine {
u32 spba_start_addr;
u32 spba_end_addr;
unsigned int irq;
+ dma_addr_t bd0_phys;
+ struct sdma_buffer_descriptor *bd0;
};
static struct sdma_driver_data sdma_imx31 = {
@@ -590,14 +637,7 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
{
- unsigned long flags;
- struct sdma_channel *sdmac = &sdma->channel[channel];
-
writel(BIT(channel), sdma->regs + SDMA_H_START);
-
- spin_lock_irqsave(&sdmac->lock, flags);
- sdmac->enabled = true;
- spin_unlock_irqrestore(&sdmac->lock, flags);
}
/*
@@ -625,7 +665,7 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
u32 address)
{
- struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ struct sdma_buffer_descriptor *bd0 = sdma->bd0;
void *buf_virt;
dma_addr_t buf_phys;
int ret;
@@ -681,26 +721,49 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
writel_relaxed(val, sdma->regs + chnenbl);
}
+static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct sdma_desc, vd.tx);
+}
+
+static void sdma_start_desc(struct sdma_channel *sdmac)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
+ struct sdma_desc *desc;
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+
+ if (!vd) {
+ sdmac->desc = NULL;
+ return;
+ }
+ sdmac->desc = desc = to_sdma_desc(&vd->tx);
+ /*
+ * Do not delete the node in desc_issued list in cyclic mode, otherwise
+ * the desc allocated will never be freed in vchan_dma_desc_free_list
+ */
+ if (!(sdmac->flags & IMX_DMA_SG_LOOP))
+ list_del(&vd->node);
+
+ sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
+ sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
+ sdma_enable_channel(sdma, sdmac->channel);
+}
+
static void sdma_update_channel_loop(struct sdma_channel *sdmac)
{
struct sdma_buffer_descriptor *bd;
int error = 0;
enum dma_status old_status = sdmac->status;
- unsigned long flags;
-
- spin_lock_irqsave(&sdmac->lock, flags);
- if (!sdmac->enabled) {
- spin_unlock_irqrestore(&sdmac->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&sdmac->lock, flags);
/*
* loop mode. Iterate over descriptors, re-setup them and
* call callback function.
*/
- while (1) {
- bd = &sdmac->bd[sdmac->buf_tail];
+ while (sdmac->desc) {
+ struct sdma_desc *desc = sdmac->desc;
+
+ bd = &desc->bd[desc->buf_tail];
if (bd->mode.status & BD_DONE)
break;
@@ -716,11 +779,11 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
* the number of bytes present in the current buffer descriptor.
*/
- sdmac->chn_real_count = bd->mode.count;
+ desc->chn_real_count = bd->mode.count;
bd->mode.status |= BD_DONE;
- bd->mode.count = sdmac->period_len;
- sdmac->buf_ptail = sdmac->buf_tail;
- sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
+ bd->mode.count = desc->period_len;
+ desc->buf_ptail = desc->buf_tail;
+ desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
/*
* The callback is called from the interrupt context in order
@@ -728,41 +791,38 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
* SDMA transaction status by the time the client tasklet is
* executed.
*/
-
- dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
+ spin_unlock(&sdmac->vc.lock);
+ dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
+ spin_lock(&sdmac->vc.lock);
if (error)
sdmac->status = old_status;
}
}
-static void mxc_sdma_handle_channel_normal(unsigned long data)
+static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
{
struct sdma_channel *sdmac = (struct sdma_channel *) data;
struct sdma_buffer_descriptor *bd;
int i, error = 0;
- sdmac->chn_real_count = 0;
+ sdmac->desc->chn_real_count = 0;
/*
* non loop mode. Iterate over all descriptors, collect
* errors and call callback function
*/
- for (i = 0; i < sdmac->num_bd; i++) {
- bd = &sdmac->bd[i];
+ for (i = 0; i < sdmac->desc->num_bd; i++) {
+ bd = &sdmac->desc->bd[i];
if (bd->mode.status & (BD_DONE | BD_RROR))
error = -EIO;
- sdmac->chn_real_count += bd->mode.count;
+ sdmac->desc->chn_real_count += bd->mode.count;
}
if (error)
sdmac->status = DMA_ERROR;
else
sdmac->status = DMA_COMPLETE;
-
- dma_cookie_complete(&sdmac->desc);
-
- dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
}
static irqreturn_t sdma_int_handler(int irq, void *dev_id)
@@ -778,12 +838,21 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
while (stat) {
int channel = fls(stat) - 1;
struct sdma_channel *sdmac = &sdma->channel[channel];
+ struct sdma_desc *desc;
+
+ spin_lock(&sdmac->vc.lock);
+ desc = sdmac->desc;
+ if (desc) {
+ if (sdmac->flags & IMX_DMA_SG_LOOP) {
+ sdma_update_channel_loop(sdmac);
+ } else {
+ mxc_sdma_handle_channel_normal(sdmac);
+ vchan_cookie_complete(&desc->vd);
+ sdma_start_desc(sdmac);
+ }
+ }
- if (sdmac->flags & IMX_DMA_SG_LOOP)
- sdma_update_channel_loop(sdmac);
- else
- tasklet_schedule(&sdmac->tasklet);
-
+ spin_unlock(&sdmac->vc.lock);
__clear_bit(channel, &stat);
}
@@ -802,14 +871,16 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
* These are needed once we start to support transfers between
* two peripherals or memory-to-memory transfers
*/
- int per_2_per = 0;
+ int per_2_per = 0, emi_2_emi = 0;
sdmac->pc_from_device = 0;
sdmac->pc_to_device = 0;
sdmac->device_to_device = 0;
+ sdmac->pc_to_pc = 0;
switch (peripheral_type) {
case IMX_DMATYPE_MEMORY:
+ emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
break;
case IMX_DMATYPE_DSP:
emi_2_per = sdma->script_addrs->bp_2_ap_addr;
@@ -882,6 +953,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
sdmac->pc_from_device = per_2_emi;
sdmac->pc_to_device = emi_2_per;
sdmac->device_to_device = per_2_per;
+ sdmac->pc_to_pc = emi_2_emi;
}
static int sdma_load_context(struct sdma_channel *sdmac)
@@ -890,7 +962,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
int channel = sdmac->channel;
int load_address;
struct sdma_context_data *context = sdma->context;
- struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ struct sdma_buffer_descriptor *bd0 = sdma->bd0;
int ret;
unsigned long flags;
@@ -898,6 +970,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
load_address = sdmac->pc_from_device;
else if (sdmac->direction == DMA_DEV_TO_DEV)
load_address = sdmac->device_to_device;
+ else if (sdmac->direction == DMA_MEM_TO_MEM)
+ load_address = sdmac->pc_to_pc;
else
load_address = sdmac->pc_to_device;
@@ -939,7 +1013,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
{
- return container_of(chan, struct sdma_channel, chan);
+ return container_of(chan, struct sdma_channel, vc.chan);
}
static int sdma_disable_channel(struct dma_chan *chan)
@@ -947,21 +1021,25 @@ static int sdma_disable_channel(struct dma_chan *chan)
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
int channel = sdmac->channel;
- unsigned long flags;
writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
sdmac->status = DMA_ERROR;
- spin_lock_irqsave(&sdmac->lock, flags);
- sdmac->enabled = false;
- spin_unlock_irqrestore(&sdmac->lock, flags);
-
return 0;
}
static int sdma_disable_channel_with_delay(struct dma_chan *chan)
{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
sdma_disable_channel(chan);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vchan_get_all_descriptors(&sdmac->vc, &head);
+ sdmac->desc = NULL;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ vchan_dma_desc_free_list(&sdmac->vc, &head);
/*
* According to NXP R&D team a delay of one BD SDMA cost time
@@ -1090,52 +1168,81 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
return 0;
}
-static int sdma_request_channel(struct sdma_channel *sdmac)
+static int sdma_request_channel0(struct sdma_engine *sdma)
{
- struct sdma_engine *sdma = sdmac->sdma;
- int channel = sdmac->channel;
int ret = -EBUSY;
- sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
- GFP_KERNEL);
- if (!sdmac->bd) {
+ sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
+ GFP_NOWAIT);
+ if (!sdma->bd0) {
ret = -ENOMEM;
goto out;
}
- sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+ sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
+ sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
- sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
+ sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
return 0;
out:
return ret;
}
-static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
+
+static int sdma_alloc_bd(struct sdma_desc *desc)
{
- unsigned long flags;
- struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
- dma_cookie_t cookie;
+ int ret = 0;
- spin_lock_irqsave(&sdmac->lock, flags);
+ desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT,
+ &desc->bd_phys);
+ if (!desc->bd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+out:
+ return ret;
+}
- cookie = dma_cookie_assign(tx);
+static void sdma_free_bd(struct sdma_desc *desc)
+{
+ dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
+}
- spin_unlock_irqrestore(&sdmac->lock, flags);
+static void sdma_desc_free(struct virt_dma_desc *vd)
+{
+ struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
- return cookie;
+ sdma_free_bd(desc);
+ kfree(desc);
}
static int sdma_alloc_chan_resources(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct imx_dma_data *data = chan->private;
+ struct imx_dma_data mem_data;
int prio, ret;
- if (!data)
- return -EINVAL;
+ /*
+ * MEMCPY may never setup chan->private by filter function such as
+ * dmatest, thus create 'struct imx_dma_data mem_data' for this case.
+ * Please note in any other slave case, you have to setup chan->private
+ * with 'struct imx_dma_data' in your own filter function if you want to
+ * request dma channel by dma_request_channel() rather than
+ * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
+ * to warn you to correct your filter function.
+ */
+ if (!data) {
+ dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
+ mem_data.priority = 2;
+ mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
+ mem_data.dma_request = 0;
+ mem_data.dma_request2 = 0;
+ data = &mem_data;
+
+ sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
+ }
switch (data->priority) {
case DMA_PRIO_HIGH:
@@ -1161,18 +1268,13 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
if (ret)
goto disable_clk_ipg;
- ret = sdma_request_channel(sdmac);
- if (ret)
- goto disable_clk_ahb;
-
ret = sdma_set_channel_priority(sdmac, prio);
if (ret)
goto disable_clk_ahb;
- dma_async_tx_descriptor_init(&sdmac->desc, chan);
- sdmac->desc.tx_submit = sdma_tx_submit;
- /* txd.flags will be overwritten in prep funcs */
- sdmac->desc.flags = DMA_CTRL_ACK;
+ sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
+ sizeof(struct sdma_buffer_descriptor),
+ 32, 0);
return 0;
@@ -1188,7 +1290,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- sdma_disable_channel(chan);
+ sdma_disable_channel_with_delay(chan);
if (sdmac->event_id0)
sdma_event_disable(sdmac, sdmac->event_id0);
@@ -1200,10 +1302,105 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_set_channel_priority(sdmac, 0);
- dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
-
clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
+
+ dma_pool_destroy(sdmac->bd_pool);
+ sdmac->bd_pool = NULL;
+}
+
+static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
+ enum dma_transfer_direction direction, u32 bds)
+{
+ struct sdma_desc *desc;
+
+ desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
+ if (!desc)
+ goto err_out;
+
+ sdmac->status = DMA_IN_PROGRESS;
+ sdmac->direction = direction;
+ sdmac->flags = 0;
+
+ desc->chn_count = 0;
+ desc->chn_real_count = 0;
+ desc->buf_tail = 0;
+ desc->buf_ptail = 0;
+ desc->sdmac = sdmac;
+ desc->num_bd = bds;
+
+ if (sdma_alloc_bd(desc))
+ goto err_desc_out;
+
+ /* No slave_config called in MEMCPY case, so do here */
+ if (direction == DMA_MEM_TO_MEM)
+ sdma_config_ownership(sdmac, false, true, false);
+
+ if (sdma_load_context(sdmac))
+ goto err_desc_out;
+
+ return desc;
+
+err_desc_out:
+ kfree(desc);
+err_out:
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *sdma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ size_t count;
+ int i = 0, param;
+ struct sdma_buffer_descriptor *bd;
+ struct sdma_desc *desc;
+
+ if (!chan || !len)
+ return NULL;
+
+ dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
+ &dma_src, &dma_dst, len, channel);
+
+ desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
+ len / SDMA_BD_MAX_CNT + 1);
+ if (!desc)
+ return NULL;
+
+ do {
+ count = min_t(size_t, len, SDMA_BD_MAX_CNT);
+ bd = &desc->bd[i];
+ bd->buffer_addr = dma_src;
+ bd->ext_buffer_addr = dma_dst;
+ bd->mode.count = count;
+ desc->chn_count += count;
+ bd->mode.command = 0;
+
+ dma_src += count;
+ dma_dst += count;
+ len -= count;
+ i++;
+
+ param = BD_DONE | BD_EXTD | BD_CONT;
+ /* last bd */
+ if (!len) {
+ param |= BD_INTR;
+ param |= BD_LAST;
+ param &= ~BD_CONT;
+ }
+
+ dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
+ i, count, bd->buffer_addr,
+ param & BD_WRAP ? "wrap" : "",
+ param & BD_INTR ? " intr" : "");
+
+ bd->mode.status = param;
+ } while (len);
+
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
}
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
@@ -1213,75 +1410,54 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- int ret, i, count;
+ int i, count;
int channel = sdmac->channel;
struct scatterlist *sg;
+ struct sdma_desc *desc;
- if (sdmac->status == DMA_IN_PROGRESS)
- return NULL;
- sdmac->status = DMA_IN_PROGRESS;
-
- sdmac->flags = 0;
-
- sdmac->buf_tail = 0;
- sdmac->buf_ptail = 0;
- sdmac->chn_real_count = 0;
+ desc = sdma_transfer_init(sdmac, direction, sg_len);
+ if (!desc)
+ goto err_out;
dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
sg_len, channel);
- sdmac->direction = direction;
- ret = sdma_load_context(sdmac);
- if (ret)
- goto err_out;
-
- if (sg_len > NUM_BD) {
- dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
- channel, sg_len, NUM_BD);
- ret = -EINVAL;
- goto err_out;
- }
-
- sdmac->chn_count = 0;
for_each_sg(sgl, sg, sg_len, i) {
- struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+ struct sdma_buffer_descriptor *bd = &desc->bd[i];
int param;
bd->buffer_addr = sg->dma_address;
count = sg_dma_len(sg);
- if (count > 0xffff) {
+ if (count > SDMA_BD_MAX_CNT) {
dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
- channel, count, 0xffff);
- ret = -EINVAL;
- goto err_out;
+ channel, count, SDMA_BD_MAX_CNT);
+ goto err_bd_out;
}
bd->mode.count = count;
- sdmac->chn_count += count;
+ desc->chn_count += count;
- if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
- ret = -EINVAL;
- goto err_out;
- }
+ if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
+ goto err_bd_out;
switch (sdmac->word_size) {
case DMA_SLAVE_BUSWIDTH_4_BYTES:
bd->mode.command = 0;
if (count & 3 || sg->dma_address & 3)
- return NULL;
+ goto err_bd_out;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
bd->mode.command = 2;
if (count & 1 || sg->dma_address & 1)
- return NULL;
+ goto err_bd_out;
break;
case DMA_SLAVE_BUSWIDTH_1_BYTE:
bd->mode.command = 1;
break;
default:
- return NULL;
+ goto err_bd_out;
}
param = BD_DONE | BD_EXTD | BD_CONT;
@@ -1300,10 +1476,10 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
bd->mode.status = param;
}
- sdmac->num_bd = sg_len;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
-
- return &sdmac->desc;
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
+err_bd_out:
+ sdma_free_bd(desc);
+ kfree(desc);
err_out:
sdmac->status = DMA_ERROR;
return NULL;
@@ -1318,40 +1494,27 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct sdma_engine *sdma = sdmac->sdma;
int num_periods = buf_len / period_len;
int channel = sdmac->channel;
- int ret, i = 0, buf = 0;
+ int i = 0, buf = 0;
+ struct sdma_desc *desc;
dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
- if (sdmac->status == DMA_IN_PROGRESS)
- return NULL;
-
- sdmac->status = DMA_IN_PROGRESS;
+ desc = sdma_transfer_init(sdmac, direction, num_periods);
+ if (!desc)
+ goto err_out;
- sdmac->buf_tail = 0;
- sdmac->buf_ptail = 0;
- sdmac->chn_real_count = 0;
- sdmac->period_len = period_len;
+ desc->period_len = period_len;
sdmac->flags |= IMX_DMA_SG_LOOP;
- sdmac->direction = direction;
- ret = sdma_load_context(sdmac);
- if (ret)
- goto err_out;
-
- if (num_periods > NUM_BD) {
- dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
- channel, num_periods, NUM_BD);
- goto err_out;
- }
- if (period_len > 0xffff) {
+ if (period_len > SDMA_BD_MAX_CNT) {
dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
- channel, period_len, 0xffff);
- goto err_out;
+ channel, period_len, SDMA_BD_MAX_CNT);
+ goto err_bd_out;
}
while (buf < buf_len) {
- struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+ struct sdma_buffer_descriptor *bd = &desc->bd[i];
int param;
bd->buffer_addr = dma_addr;
@@ -1359,7 +1522,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
bd->mode.count = period_len;
if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
- goto err_out;
+ goto err_bd_out;
if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
bd->mode.command = 0;
else
@@ -1382,10 +1545,10 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
i++;
}
- sdmac->num_bd = num_periods;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
-
- return &sdmac->desc;
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
+err_bd_out:
+ sdma_free_bd(desc);
+ kfree(desc);
err_out:
sdmac->status = DMA_ERROR;
return NULL;
@@ -1424,13 +1587,31 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_desc *desc;
u32 residue;
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
- if (sdmac->flags & IMX_DMA_SG_LOOP)
- residue = (sdmac->num_bd - sdmac->buf_ptail) *
- sdmac->period_len - sdmac->chn_real_count;
- else
- residue = sdmac->chn_count - sdmac->chn_real_count;
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vd = vchan_find_desc(&sdmac->vc, cookie);
+ if (vd) {
+ desc = to_sdma_desc(&vd->tx);
+ if (sdmac->flags & IMX_DMA_SG_LOOP)
+ residue = (desc->num_bd - desc->buf_ptail) *
+ desc->period_len - desc->chn_real_count;
+ else
+ residue = desc->chn_count - desc->chn_real_count;
+ } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
+ residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
+ } else {
+ residue = 0;
+ }
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
residue);
@@ -1441,10 +1622,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
static void sdma_issue_pending(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- struct sdma_engine *sdma = sdmac->sdma;
+ unsigned long flags;
- if (sdmac->status == DMA_IN_PROGRESS)
- sdma_enable_channel(sdma, sdmac->channel);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
+ sdma_start_desc(sdmac);
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
}
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
@@ -1650,7 +1833,7 @@ static int sdma_init(struct sdma_engine *sdma)
for (i = 0; i < MAX_DMA_CHANNELS; i++)
writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
- ret = sdma_request_channel(&sdma->channel[0]);
+ ret = sdma_request_channel0(sdma);
if (ret)
goto err_dma_alloc;
@@ -1805,6 +1988,7 @@ static int sdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+ dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
INIT_LIST_HEAD(&sdma->dma_device.channels);
/* Initialize channel parameters */
@@ -1812,22 +1996,16 @@ static int sdma_probe(struct platform_device *pdev)
struct sdma_channel *sdmac = &sdma->channel[i];
sdmac->sdma = sdma;
- spin_lock_init(&sdmac->lock);
- sdmac->chan.device = &sdma->dma_device;
- dma_cookie_init(&sdmac->chan);
sdmac->channel = i;
-
- tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
- (unsigned long) sdmac);
+ sdmac->vc.desc_free = sdma_desc_free;
/*
* Add the channel to the DMAC list. Do not add channel 0 though
* because we need it internally in the SDMA driver. This also means
* that channel 0 in dmaengine counting matches sdma channel 1.
*/
if (i)
- list_add_tail(&sdmac->chan.device_node,
- &sdma->dma_device.channels);
+ vchan_init(&sdmac->vc, &sdma->dma_device);
}
ret = sdma_init(sdma);
@@ -1877,9 +2055,10 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
- dma_set_max_seg_size(sdma->dma_device.dev, 65535);
+ dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
platform_set_drvdata(pdev, sdma);
@@ -1932,7 +2111,8 @@ static int sdma_remove(struct platform_device *pdev)
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct sdma_channel *sdmac = &sdma->channel[i];
- tasklet_kill(&sdmac->tasklet);
+ tasklet_kill(&sdmac->vc.task);
+ sdma_free_chan_resources(&sdmac->vc.chan);
}
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 8b5b23a8ace9..23fb2fa04000 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -688,6 +688,12 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
{
u64 phys_complete;
+ /* set the completion address register again */
+ writel(lower_32_bits(ioat_chan->completion_dma),
+ ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+ writel(upper_32_bits(ioat_chan->completion_dma),
+ ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
ioat_quiesce(ioat_chan, 0);
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
__cleanup(ioat_chan, phys_complete);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index fa31cccbe04f..6bfa217ed6d0 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -794,7 +794,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct k3_dma_dev *d = ofdma->of_dma_data;
unsigned int request = dma_spec->args[0];
- if (request > d->dma_requests)
+ if (request >= d->dma_requests)
return NULL;
return dma_get_slave_channel(&(d->chans[request].vc.chan));
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 68dd79783b54..b76cb17d879c 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -470,11 +470,6 @@ static void mic_dma_chan_destroy(struct mic_dma_chan *ch)
mic_dma_chan_mask_intr(ch);
}
-static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev)
-{
- dma_async_device_unregister(&mic_dma_dev->dma_dev);
-}
-
static int mic_dma_setup_irq(struct mic_dma_chan *ch)
{
ch->cookie =
@@ -630,7 +625,7 @@ static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev,
list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node,
&mic_dma_dev->dma_dev.channels);
}
- return dma_async_device_register(&mic_dma_dev->dma_dev);
+ return dmaenginem_async_device_register(&mic_dma_dev->dma_dev);
}
/*
@@ -678,7 +673,6 @@ alloc_error:
static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
{
- mic_dma_unregister_dma_device(mic_dma_dev);
mic_dma_uninit(mic_dma_dev);
kfree(mic_dma_dev);
}
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index c6589ccf1b9a..8dc0aa4d73ab 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -174,6 +174,7 @@ struct mv_xor_v2_device {
int desc_size;
unsigned int npendings;
unsigned int hw_queue_idx;
+ struct msi_desc *msi_desc;
};
/**
@@ -588,11 +589,9 @@ static void mv_xor_v2_tasklet(unsigned long data)
*/
dma_cookie_complete(&next_pending_sw_desc->async_tx);
- if (next_pending_sw_desc->async_tx.callback)
- next_pending_sw_desc->async_tx.callback(
- next_pending_sw_desc->async_tx.callback_param);
-
dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
+ dmaengine_desc_get_callback_invoke(
+ &next_pending_sw_desc->async_tx, NULL);
}
dma_run_dependencies(&next_pending_sw_desc->async_tx);
@@ -643,9 +642,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
/* write the DESQ address to the DMA enngine*/
- writel(xor_dev->hw_desq & 0xFFFFFFFF,
+ writel(lower_32_bits(xor_dev->hw_desq),
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
- writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
+ writel(upper_32_bits(xor_dev->hw_desq),
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
/*
@@ -780,6 +779,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
msi_desc = first_msi_entry(&pdev->dev);
if (!msi_desc)
goto free_msi_irqs;
+ xor_dev->msi_desc = msi_desc;
ret = devm_request_irq(&pdev->dev, msi_desc->irq,
mv_xor_v2_interrupt_handler, 0,
@@ -897,8 +897,12 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
xor_dev->hw_desq_virt, xor_dev->hw_desq);
+ devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev);
+
platform_msi_domain_free_irqs(&pdev->dev);
+ tasklet_kill(&xor_dev->irq_tasklet);
+
clk_disable_unprepare(xor_dev->clk);
return 0;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 2f9974ddfbb2..8c7b2e8703da 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -479,6 +479,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
default:
pr_warn("%s(): invalid bus width %u\n", __func__, width);
+ /* fall through */
case DMA_SLAVE_BUSWIDTH_1_BYTE:
size = burst;
}
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
new file mode 100644
index 000000000000..7812a6338acd
--- /dev/null
+++ b/drivers/dma/owl-dma.c
@@ -0,0 +1,971 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Actions Semi Owl SoCs DMA driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include "virt-dma.h"
+
+#define OWL_DMA_FRAME_MAX_LENGTH 0xfffff
+
+/* Global DMA Controller Registers */
+#define OWL_DMA_IRQ_PD0 0x00
+#define OWL_DMA_IRQ_PD1 0x04
+#define OWL_DMA_IRQ_PD2 0x08
+#define OWL_DMA_IRQ_PD3 0x0C
+#define OWL_DMA_IRQ_EN0 0x10
+#define OWL_DMA_IRQ_EN1 0x14
+#define OWL_DMA_IRQ_EN2 0x18
+#define OWL_DMA_IRQ_EN3 0x1C
+#define OWL_DMA_SECURE_ACCESS_CTL 0x20
+#define OWL_DMA_NIC_QOS 0x24
+#define OWL_DMA_DBGSEL 0x28
+#define OWL_DMA_IDLE_STAT 0x2C
+
+/* Channel Registers */
+#define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100)
+#define OWL_DMAX_MODE 0x00
+#define OWL_DMAX_SOURCE 0x04
+#define OWL_DMAX_DESTINATION 0x08
+#define OWL_DMAX_FRAME_LEN 0x0C
+#define OWL_DMAX_FRAME_CNT 0x10
+#define OWL_DMAX_REMAIN_FRAME_CNT 0x14
+#define OWL_DMAX_REMAIN_CNT 0x18
+#define OWL_DMAX_SOURCE_STRIDE 0x1C
+#define OWL_DMAX_DESTINATION_STRIDE 0x20
+#define OWL_DMAX_START 0x24
+#define OWL_DMAX_PAUSE 0x28
+#define OWL_DMAX_CHAINED_CTL 0x2C
+#define OWL_DMAX_CONSTANT 0x30
+#define OWL_DMAX_LINKLIST_CTL 0x34
+#define OWL_DMAX_NEXT_DESCRIPTOR 0x38
+#define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C
+#define OWL_DMAX_INT_CTL 0x40
+#define OWL_DMAX_INT_STATUS 0x44
+#define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48
+#define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C
+
+/* OWL_DMAX_MODE Bits */
+#define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0)
+#define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8)
+#define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0)
+#define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2)
+#define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3)
+#define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10)
+#define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0)
+#define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2)
+#define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3)
+#define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16)
+#define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0)
+#define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1)
+#define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2)
+#define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18)
+#define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0)
+#define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1)
+#define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2)
+#define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20)
+#define OWL_DMA_MODE_CB BIT(23)
+#define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28)
+#define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0)
+#define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1)
+#define OWL_DMA_MODE_CFE BIT(29)
+#define OWL_DMA_MODE_LME BIT(30)
+#define OWL_DMA_MODE_CME BIT(31)
+
+/* OWL_DMAX_LINKLIST_CTL Bits */
+#define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8)
+#define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0)
+#define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1)
+#define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2)
+#define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10)
+#define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0)
+#define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1)
+#define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2)
+#define OWL_DMA_LLC_SUSPEND BIT(16)
+
+/* OWL_DMAX_INT_CTL Bits */
+#define OWL_DMA_INTCTL_BLOCK BIT(0)
+#define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1)
+#define OWL_DMA_INTCTL_FRAME BIT(2)
+#define OWL_DMA_INTCTL_HALF_FRAME BIT(3)
+#define OWL_DMA_INTCTL_LAST_FRAME BIT(4)
+
+/* OWL_DMAX_INT_STATUS Bits */
+#define OWL_DMA_INTSTAT_BLOCK BIT(0)
+#define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1)
+#define OWL_DMA_INTSTAT_FRAME BIT(2)
+#define OWL_DMA_INTSTAT_HALF_FRAME BIT(3)
+#define OWL_DMA_INTSTAT_LAST_FRAME BIT(4)
+
+/* Pack shift and newshift in a single word */
+#define BIT_FIELD(val, width, shift, newshift) \
+ ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift))
+
+/**
+ * struct owl_dma_lli_hw - Hardware link list for dma transfer
+ * @next_lli: physical address of the next link list
+ * @saddr: source physical address
+ * @daddr: destination physical address
+ * @flen: frame length
+ * @fcnt: frame count
+ * @src_stride: source stride
+ * @dst_stride: destination stride
+ * @ctrla: dma_mode and linklist ctrl config
+ * @ctrlb: interrupt config
+ * @const_num: data for constant fill
+ */
+struct owl_dma_lli_hw {
+ u32 next_lli;
+ u32 saddr;
+ u32 daddr;
+ u32 flen:20;
+ u32 fcnt:12;
+ u32 src_stride;
+ u32 dst_stride;
+ u32 ctrla;
+ u32 ctrlb;
+ u32 const_num;
+};
+
+/**
+ * struct owl_dma_lli - Link list for dma transfer
+ * @hw: hardware link list
+ * @phys: physical address of hardware link list
+ * @node: node for txd's lli_list
+ */
+struct owl_dma_lli {
+ struct owl_dma_lli_hw hw;
+ dma_addr_t phys;
+ struct list_head node;
+};
+
+/**
+ * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
+ * @vd: virtual DMA descriptor
+ * @lli_list: link list of lli nodes
+ */
+struct owl_dma_txd {
+ struct virt_dma_desc vd;
+ struct list_head lli_list;
+};
+
+/**
+ * struct owl_dma_pchan - Holder for the physical channels
+ * @id: physical index to this channel
+ * @base: virtual memory base for the dma channel
+ * @vchan: the virtual channel currently being served by this physical channel
+ * @lock: a lock to use when altering an instance of this struct
+ */
+struct owl_dma_pchan {
+ u32 id;
+ void __iomem *base;
+ struct owl_dma_vchan *vchan;
+ spinlock_t lock;
+};
+
+/**
+ * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
+ * @vc: wrappped virtual channel
+ * @pchan: the physical channel utilized by this channel
+ * @txd: active transaction on this channel
+ */
+struct owl_dma_vchan {
+ struct virt_dma_chan vc;
+ struct owl_dma_pchan *pchan;
+ struct owl_dma_txd *txd;
+};
+
+/**
+ * struct owl_dma - Holder for the Owl DMA controller
+ * @dma: dma engine for this instance
+ * @base: virtual memory base for the DMA controller
+ * @clk: clock for the DMA controller
+ * @lock: a lock to use when change DMA controller global register
+ * @lli_pool: a pool for the LLI descriptors
+ * @nr_pchans: the number of physical channels
+ * @pchans: array of data for the physical channels
+ * @nr_vchans: the number of physical channels
+ * @vchans: array of data for the physical channels
+ */
+struct owl_dma {
+ struct dma_device dma;
+ void __iomem *base;
+ struct clk *clk;
+ spinlock_t lock;
+ struct dma_pool *lli_pool;
+ int irq;
+
+ unsigned int nr_pchans;
+ struct owl_dma_pchan *pchans;
+
+ unsigned int nr_vchans;
+ struct owl_dma_vchan *vchans;
+};
+
+static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
+ u32 val, bool state)
+{
+ u32 regval;
+
+ regval = readl(pchan->base + reg);
+
+ if (state)
+ regval |= val;
+ else
+ regval &= ~val;
+
+ writel(val, pchan->base + reg);
+}
+
+static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
+{
+ writel(data, pchan->base + reg);
+}
+
+static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg)
+{
+ return readl(pchan->base + reg);
+}
+
+static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
+{
+ u32 regval;
+
+ regval = readl(od->base + reg);
+
+ if (state)
+ regval |= val;
+ else
+ regval &= ~val;
+
+ writel(val, od->base + reg);
+}
+
+static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
+{
+ writel(data, od->base + reg);
+}
+
+static u32 dma_readl(struct owl_dma *od, u32 reg)
+{
+ return readl(od->base + reg);
+}
+
+static inline struct owl_dma *to_owl_dma(struct dma_device *dd)
+{
+ return container_of(dd, struct owl_dma, dma);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan)
+{
+ return container_of(chan, struct owl_dma_vchan, vc.chan);
+}
+
+static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct owl_dma_txd, vd.tx);
+}
+
+static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl)
+{
+ u32 ctl;
+
+ ctl = BIT_FIELD(mode, 4, 28, 28) |
+ BIT_FIELD(mode, 8, 16, 20) |
+ BIT_FIELD(mode, 4, 8, 16) |
+ BIT_FIELD(mode, 6, 0, 10) |
+ BIT_FIELD(llc_ctl, 2, 10, 8) |
+ BIT_FIELD(llc_ctl, 2, 8, 6);
+
+ return ctl;
+}
+
+static inline u32 llc_hw_ctrlb(u32 int_ctl)
+{
+ u32 ctl;
+
+ ctl = BIT_FIELD(int_ctl, 7, 0, 18);
+
+ return ctl;
+}
+
+static void owl_dma_free_lli(struct owl_dma *od,
+ struct owl_dma_lli *lli)
+{
+ list_del(&lli->node);
+ dma_pool_free(od->lli_pool, lli, lli->phys);
+}
+
+static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
+{
+ struct owl_dma_lli *lli;
+ dma_addr_t phys;
+
+ lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys);
+ if (!lli)
+ return NULL;
+
+ INIT_LIST_HEAD(&lli->node);
+ lli->phys = phys;
+
+ return lli;
+}
+
+static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
+ struct owl_dma_lli *prev,
+ struct owl_dma_lli *next)
+{
+ list_add_tail(&next->node, &txd->lli_list);
+
+ if (prev) {
+ prev->hw.next_lli = next->phys;
+ prev->hw.ctrla |= llc_hw_ctrla(OWL_DMA_MODE_LME, 0);
+ }
+
+ return next;
+}
+
+static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
+ struct owl_dma_lli *lli,
+ dma_addr_t src, dma_addr_t dst,
+ u32 len, enum dma_transfer_direction dir)
+{
+ struct owl_dma_lli_hw *hw = &lli->hw;
+ u32 mode;
+
+ mode = OWL_DMA_MODE_PW(0);
+
+ switch (dir) {
+ case DMA_MEM_TO_MEM:
+ mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU |
+ OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC |
+ OWL_DMA_MODE_DAM_INC;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw->next_lli = 0; /* One link list by default */
+ hw->saddr = src;
+ hw->daddr = dst;
+
+ hw->fcnt = 1; /* Frame count fixed as 1 */
+ hw->flen = len; /* Max frame length is 1MB */
+ hw->src_stride = 0;
+ hw->dst_stride = 0;
+ hw->ctrla = llc_hw_ctrla(mode,
+ OWL_DMA_LLC_SAV_LOAD_NEXT |
+ OWL_DMA_LLC_DAV_LOAD_NEXT);
+
+ hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
+
+ return 0;
+}
+
+static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
+ struct owl_dma_vchan *vchan)
+{
+ struct owl_dma_pchan *pchan = NULL;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < od->nr_pchans; i++) {
+ pchan = &od->pchans[i];
+
+ spin_lock_irqsave(&pchan->lock, flags);
+ if (!pchan->vchan) {
+ pchan->vchan = vchan;
+ spin_unlock_irqrestore(&pchan->lock, flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(&pchan->lock, flags);
+ }
+
+ return pchan;
+}
+
+static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan)
+{
+ unsigned int val;
+
+ val = dma_readl(od, OWL_DMA_IDLE_STAT);
+
+ return !(val & (1 << pchan->id));
+}
+
+static void owl_dma_terminate_pchan(struct owl_dma *od,
+ struct owl_dma_pchan *pchan)
+{
+ unsigned long flags;
+ u32 irq_pd;
+
+ pchan_writel(pchan, OWL_DMAX_START, 0);
+ pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
+
+ spin_lock_irqsave(&od->lock, flags);
+ dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false);
+
+ irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0);
+ if (irq_pd & (1 << pchan->id)) {
+ dev_warn(od->dma.dev,
+ "terminating pchan %d that still has pending irq\n",
+ pchan->id);
+ dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id));
+ }
+
+ pchan->vchan = NULL;
+
+ spin_unlock_irqrestore(&od->lock, flags);
+}
+
+static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
+{
+ struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
+ struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc);
+ struct owl_dma_pchan *pchan = vchan->pchan;
+ struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
+ struct owl_dma_lli *lli;
+ unsigned long flags;
+ u32 int_ctl;
+
+ list_del(&vd->node);
+
+ vchan->txd = txd;
+
+ /* Wait for channel inactive */
+ while (owl_dma_pchan_busy(od, pchan))
+ cpu_relax();
+
+ lli = list_first_entry(&txd->lli_list,
+ struct owl_dma_lli, node);
+
+ int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
+
+ pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
+ pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
+ OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT);
+ pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys);
+ pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl);
+
+ /* Clear IRQ status for this pchan */
+ pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
+
+ spin_lock_irqsave(&od->lock, flags);
+
+ dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true);
+
+ spin_unlock_irqrestore(&od->lock, flags);
+
+ dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id);
+
+ /* Start DMA transfer for this pchan */
+ pchan_writel(pchan, OWL_DMAX_START, 0x1);
+
+ return 0;
+}
+
+static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan)
+{
+ /* Ensure that the physical channel is stopped */
+ owl_dma_terminate_pchan(od, vchan->pchan);
+
+ vchan->pchan = NULL;
+}
+
+static irqreturn_t owl_dma_interrupt(int irq, void *dev_id)
+{
+ struct owl_dma *od = dev_id;
+ struct owl_dma_vchan *vchan;
+ struct owl_dma_pchan *pchan;
+ unsigned long pending;
+ int i;
+ unsigned int global_irq_pending, chan_irq_pending;
+
+ spin_lock(&od->lock);
+
+ pending = dma_readl(od, OWL_DMA_IRQ_PD0);
+
+ /* Clear IRQ status for each pchan */
+ for_each_set_bit(i, &pending, od->nr_pchans) {
+ pchan = &od->pchans[i];
+ pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
+ }
+
+ /* Clear pending IRQ */
+ dma_writel(od, OWL_DMA_IRQ_PD0, pending);
+
+ /* Check missed pending IRQ */
+ for (i = 0; i < od->nr_pchans; i++) {
+ pchan = &od->pchans[i];
+ chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) &
+ pchan_readl(pchan, OWL_DMAX_INT_STATUS);
+
+ /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */
+ dma_readl(od, OWL_DMA_IRQ_PD0);
+
+ global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0);
+
+ if (chan_irq_pending && !(global_irq_pending & BIT(i))) {
+ dev_dbg(od->dma.dev,
+ "global and channel IRQ pending match err\n");
+
+ /* Clear IRQ status for this pchan */
+ pchan_update(pchan, OWL_DMAX_INT_STATUS,
+ 0xff, false);
+
+ /* Update global IRQ pending */
+ pending |= BIT(i);
+ }
+ }
+
+ spin_unlock(&od->lock);
+
+ for_each_set_bit(i, &pending, od->nr_pchans) {
+ struct owl_dma_txd *txd;
+
+ pchan = &od->pchans[i];
+
+ vchan = pchan->vchan;
+ if (!vchan) {
+ dev_warn(od->dma.dev, "no vchan attached on pchan %d\n",
+ pchan->id);
+ continue;
+ }
+
+ spin_lock(&vchan->vc.lock);
+
+ txd = vchan->txd;
+ if (txd) {
+ vchan->txd = NULL;
+
+ vchan_cookie_complete(&txd->vd);
+
+ /*
+ * Start the next descriptor (if any),
+ * otherwise free this channel.
+ */
+ if (vchan_next_desc(&vchan->vc))
+ owl_dma_start_next_txd(vchan);
+ else
+ owl_dma_phy_free(od, vchan);
+ }
+
+ spin_unlock(&vchan->vc.lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd)
+{
+ struct owl_dma_lli *lli, *_lli;
+
+ if (unlikely(!txd))
+ return;
+
+ list_for_each_entry_safe(lli, _lli, &txd->lli_list, node)
+ owl_dma_free_lli(od, lli);
+
+ kfree(txd);
+}
+
+static void owl_dma_desc_free(struct virt_dma_desc *vd)
+{
+ struct owl_dma *od = to_owl_dma(vd->tx.chan->device);
+ struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
+
+ owl_dma_free_txd(od, txd);
+}
+
+static int owl_dma_terminate_all(struct dma_chan *chan)
+{
+ struct owl_dma *od = to_owl_dma(chan->device);
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ if (vchan->pchan)
+ owl_dma_phy_free(od, vchan);
+
+ if (vchan->txd) {
+ owl_dma_desc_free(&vchan->txd->vd);
+ vchan->txd = NULL;
+ }
+
+ vchan_get_all_descriptors(&vchan->vc, &head);
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ return 0;
+}
+
+static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan)
+{
+ struct owl_dma_pchan *pchan;
+ struct owl_dma_txd *txd;
+ struct owl_dma_lli *lli;
+ unsigned int next_lli_phy;
+ size_t bytes;
+
+ pchan = vchan->pchan;
+ txd = vchan->txd;
+
+ if (!pchan || !txd)
+ return 0;
+
+ /* Get remain count of current node in link list */
+ bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT);
+
+ /* Loop through the preceding nodes to get total remaining bytes */
+ if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) {
+ next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR);
+ list_for_each_entry(lli, &txd->lli_list, node) {
+ /* Start from the next active node */
+ if (lli->phys == next_lli_phy) {
+ list_for_each_entry(lli, &txd->lli_list, node)
+ bytes += lli->hw.flen;
+ break;
+ }
+ }
+ }
+
+ return bytes;
+}
+
+static enum dma_status owl_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ struct owl_dma_lli *lli;
+ struct virt_dma_desc *vd;
+ struct owl_dma_txd *txd;
+ enum dma_status ret;
+ unsigned long flags;
+ size_t bytes = 0;
+
+ ret = dma_cookie_status(chan, cookie, state);
+ if (ret == DMA_COMPLETE || !state)
+ return ret;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ vd = vchan_find_desc(&vchan->vc, cookie);
+ if (vd) {
+ txd = to_owl_txd(&vd->tx);
+ list_for_each_entry(lli, &txd->lli_list, node)
+ bytes += lli->hw.flen;
+ } else {
+ bytes = owl_dma_getbytes_chan(vchan);
+ }
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ dma_set_residue(state, bytes);
+
+ return ret;
+}
+
+static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan)
+{
+ struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
+ struct owl_dma_pchan *pchan;
+
+ pchan = owl_dma_get_pchan(od, vchan);
+ if (!pchan)
+ return;
+
+ dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id);
+
+ vchan->pchan = pchan;
+ owl_dma_start_next_txd(vchan);
+}
+
+static void owl_dma_issue_pending(struct dma_chan *chan)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+ if (vchan_issue_pending(&vchan->vc)) {
+ if (!vchan->pchan)
+ owl_dma_phy_alloc_and_start(vchan);
+ }
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor
+ *owl_dma_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct owl_dma *od = to_owl_dma(chan->device);
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ struct owl_dma_txd *txd;
+ struct owl_dma_lli *lli, *prev = NULL;
+ size_t offset, bytes;
+ int ret;
+
+ if (!len)
+ return NULL;
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ INIT_LIST_HEAD(&txd->lli_list);
+
+ /* Process the transfer as frame by frame */
+ for (offset = 0; offset < len; offset += bytes) {
+ lli = owl_dma_alloc_lli(od);
+ if (!lli) {
+ dev_warn(chan2dev(chan), "failed to allocate lli\n");
+ goto err_txd_free;
+ }
+
+ bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
+
+ ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
+ bytes, DMA_MEM_TO_MEM);
+ if (ret) {
+ dev_warn(chan2dev(chan), "failed to config lli\n");
+ goto err_txd_free;
+ }
+
+ prev = owl_dma_add_lli(txd, prev, lli);
+ }
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
+err_txd_free:
+ owl_dma_free_txd(od, txd);
+ return NULL;
+}
+
+static void owl_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+
+ /* Ensure all queued descriptors are freed */
+ vchan_free_chan_resources(&vchan->vc);
+}
+
+static inline void owl_dma_free(struct owl_dma *od)
+{
+ struct owl_dma_vchan *vchan = NULL;
+ struct owl_dma_vchan *next;
+
+ list_for_each_entry_safe(vchan,
+ next, &od->dma.channels, vc.chan.device_node) {
+ list_del(&vchan->vc.chan.device_node);
+ tasklet_kill(&vchan->vc.task);
+ }
+}
+
+static int owl_dma_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct owl_dma *od;
+ struct resource *res;
+ int ret, i, nr_channels, nr_requests;
+
+ od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
+ if (!od)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ od->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(od->base))
+ return PTR_ERR(od->base);
+
+ ret = of_property_read_u32(np, "dma-channels", &nr_channels);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get dma-channels\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "dma-requests", &nr_requests);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get dma-requests\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
+ nr_channels, nr_requests);
+
+ od->nr_pchans = nr_channels;
+ od->nr_vchans = nr_requests;
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+ platform_set_drvdata(pdev, od);
+ spin_lock_init(&od->lock);
+
+ dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
+
+ od->dma.dev = &pdev->dev;
+ od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
+ od->dma.device_tx_status = owl_dma_tx_status;
+ od->dma.device_issue_pending = owl_dma_issue_pending;
+ od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
+ od->dma.device_terminate_all = owl_dma_terminate_all;
+ od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ od->dma.directions = BIT(DMA_MEM_TO_MEM);
+ od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ INIT_LIST_HEAD(&od->dma.channels);
+
+ od->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(od->clk)) {
+ dev_err(&pdev->dev, "unable to get clock\n");
+ return PTR_ERR(od->clk);
+ }
+
+ /*
+ * Eventhough the DMA controller is capable of generating 4
+ * IRQ's for DMA priority feature, we only use 1 IRQ for
+ * simplification.
+ */
+ od->irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0,
+ dev_name(&pdev->dev), od);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request IRQ\n");
+ return ret;
+ }
+
+ /* Init physical channel */
+ od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans,
+ sizeof(struct owl_dma_pchan), GFP_KERNEL);
+ if (!od->pchans)
+ return -ENOMEM;
+
+ for (i = 0; i < od->nr_pchans; i++) {
+ struct owl_dma_pchan *pchan = &od->pchans[i];
+
+ pchan->id = i;
+ pchan->base = od->base + OWL_DMA_CHAN_BASE(i);
+ }
+
+ /* Init virtual channel */
+ od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans,
+ sizeof(struct owl_dma_vchan), GFP_KERNEL);
+ if (!od->vchans)
+ return -ENOMEM;
+
+ for (i = 0; i < od->nr_vchans; i++) {
+ struct owl_dma_vchan *vchan = &od->vchans[i];
+
+ vchan->vc.desc_free = owl_dma_desc_free;
+ vchan_init(&vchan->vc, &od->dma);
+ }
+
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev,
+ sizeof(struct owl_dma_lli),
+ __alignof__(struct owl_dma_lli),
+ 0);
+ if (!od->lli_pool) {
+ dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ clk_prepare_enable(od->clk);
+
+ ret = dma_async_device_register(&od->dma);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register DMA engine device\n");
+ goto err_pool_free;
+ }
+
+ return 0;
+
+err_pool_free:
+ clk_disable_unprepare(od->clk);
+ dma_pool_destroy(od->lli_pool);
+
+ return ret;
+}
+
+static int owl_dma_remove(struct platform_device *pdev)
+{
+ struct owl_dma *od = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&od->dma);
+
+ /* Mask all interrupts for this execution environment */
+ dma_writel(od, OWL_DMA_IRQ_EN0, 0x0);
+
+ /* Make sure we won't have any further interrupts */
+ devm_free_irq(od->dma.dev, od->irq, od);
+
+ owl_dma_free(od);
+
+ clk_disable_unprepare(od->clk);
+
+ return 0;
+}
+
+static const struct of_device_id owl_dma_match[] = {
+ { .compatible = "actions,s900-dma", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, owl_dma_match);
+
+static struct platform_driver owl_dma_driver = {
+ .probe = owl_dma_probe,
+ .remove = owl_dma_remove,
+ .driver = {
+ .name = "dma-owl",
+ .of_match_table = of_match_ptr(owl_dma_match),
+ },
+};
+
+static int owl_dma_init(void)
+{
+ return platform_driver_register(&owl_dma_driver);
+}
+subsys_initcall(owl_dma_init);
+
+static void __exit owl_dma_exit(void)
+{
+ platform_driver_unregister(&owl_dma_driver);
+}
+module_exit(owl_dma_exit);
+
+MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index defcdde4d358..88750a34e859 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1046,13 +1046,16 @@ static bool _start(struct pl330_thread *thrd)
if (_state(thrd) == PL330_STATE_KILLING)
UNTIL(thrd, PL330_STATE_STOPPED)
+ /* fall through */
case PL330_STATE_FAULTING:
_stop(thrd);
+ /* fall through */
case PL330_STATE_KILLING:
case PL330_STATE_COMPLETING:
UNTIL(thrd, PL330_STATE_STOPPED)
+ /* fall through */
case PL330_STATE_STOPPED:
return _trigger(thrd);
@@ -1779,8 +1782,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
static void pl330_release_channel(struct pl330_thread *thrd)
{
- struct pl330_dmac *pl330;
-
if (!thrd || thrd->free)
return;
@@ -1789,8 +1790,6 @@ static void pl330_release_channel(struct pl330_thread *thrd)
dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
- pl330 = thrd->dmac;
-
_free_event(thrd, thrd->ev);
thrd->free = true;
}
@@ -2257,13 +2256,14 @@ static int pl330_terminate_all(struct dma_chan *chan)
pm_runtime_get_sync(pl330->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
+
spin_lock(&pl330->lock);
_stop(pch->thread);
- spin_unlock(&pl330->lock);
-
pch->thread->req[0].desc = NULL;
pch->thread->req[1].desc = NULL;
pch->thread->req_running = -1;
+ spin_unlock(&pl330->lock);
+
power_down = pch->active;
pch->active = false;
@@ -3033,7 +3033,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
1 : PL330_MAX_BURST);
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index b53fb618bbf6..b31c28b67ad3 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -179,6 +179,8 @@ static unsigned int pxad_drcmr(unsigned int line)
return 0x1000 + line * 4;
}
+bool pxad_filter_fn(struct dma_chan *chan, void *param);
+
/*
* Debug fs
*/
@@ -760,6 +762,8 @@ static void pxad_free_chan_resources(struct dma_chan *dchan)
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
+ chan->drcmr = U32_MAX;
+ chan->prio = PXAD_PRIO_LOWEST;
}
static void pxad_free_desc(struct virt_dma_desc *vd)
@@ -1384,6 +1388,9 @@ static int pxad_init_dmadev(struct platform_device *op,
c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
+
+ c->drcmr = U32_MAX;
+ c->prio = PXAD_PRIO_LOWEST;
c->vc.desc_free = pxad_free_desc;
vchan_init(&c->vc, &pdev->slave);
init_waitqueue_head(&c->wq_state);
@@ -1396,9 +1403,10 @@ static int pxad_probe(struct platform_device *op)
{
struct pxad_device *pdev;
const struct of_device_id *of_id;
+ const struct dma_slave_map *slave_map = NULL;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
struct resource *iores;
- int ret, dma_channels = 0, nb_requestors = 0;
+ int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -1429,6 +1437,8 @@ static int pxad_probe(struct platform_device *op)
} else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels;
nb_requestors = pdata->nb_requestors;
+ slave_map = pdata->slave_map;
+ slave_map_cnt = pdata->slave_map_cnt;
} else {
dma_channels = 32; /* default 32 channel */
}
@@ -1440,6 +1450,9 @@ static int pxad_probe(struct platform_device *op)
pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
+ pdev->slave.filter.map = slave_map;
+ pdev->slave.filter.mapcnt = slave_map_cnt;
+ pdev->slave.filter.fn = pxad_filter_fn;
pdev->slave.copy_align = PDMA_ALIGNMENT;
pdev->slave.src_addr_widths = widths;
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 7056fe7513b4..64744eb88720 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/platform_data/dma-s3c24xx.h>
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2a2ccd9c78e4..48ee35e2bce6 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen2 DMA Controller Driver
*
* Copyright (C) 2014 Renesas Electronics Inc.
*
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
@@ -431,7 +428,8 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
}
- rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR,
+ chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE);
}
static int rcar_dmac_init(struct rcar_dmac *dmac)
@@ -761,21 +759,15 @@ static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
dev_err(chan->chan.device->dev, "CHCR DE check error\n");
}
-static void rcar_dmac_sync_tcr(struct rcar_dmac_chan *chan)
+static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan)
{
u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
- if (!(chcr & RCAR_DMACHCR_DE))
- return;
-
/* set DE=0 and flush remaining data */
rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
/* make sure all remaining data was flushed */
rcar_dmac_chcr_de_barrier(chan);
-
- /* back DE */
- rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
}
static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
@@ -783,7 +775,8 @@ static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
- RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
+ RCAR_DMACHCR_TE | RCAR_DMACHCR_DE |
+ RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE);
rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
rcar_dmac_chcr_de_barrier(chan);
}
@@ -812,12 +805,7 @@ static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
}
}
-static void rcar_dmac_stop(struct rcar_dmac *dmac)
-{
- rcar_dmac_write(dmac, RCAR_DMAOR, 0);
-}
-
-static void rcar_dmac_abort(struct rcar_dmac *dmac)
+static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
{
unsigned int i;
@@ -826,14 +814,24 @@ static void rcar_dmac_abort(struct rcar_dmac *dmac)
struct rcar_dmac_chan *chan = &dmac->channels[i];
/* Stop and reinitialize the channel. */
- spin_lock(&chan->lock);
+ spin_lock_irq(&chan->lock);
rcar_dmac_chan_halt(chan);
- spin_unlock(&chan->lock);
-
- rcar_dmac_chan_reinit(chan);
+ spin_unlock_irq(&chan->lock);
}
}
+static int rcar_dmac_chan_pause(struct dma_chan *chan)
+{
+ unsigned long flags;
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+
+ spin_lock_irqsave(&rchan->lock, flags);
+ rcar_dmac_clear_chcr_de(rchan);
+ spin_unlock_irqrestore(&rchan->lock, flags);
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* Descriptors preparation
*/
@@ -1355,9 +1353,6 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
residue += chunk->size;
}
- if (desc->direction == DMA_DEV_TO_MEM)
- rcar_dmac_sync_tcr(chan);
-
/* Add the residue for the current chunk. */
residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
@@ -1522,11 +1517,26 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
struct rcar_dmac_chan *chan = dev;
irqreturn_t ret = IRQ_NONE;
+ bool reinit = false;
u32 chcr;
spin_lock(&chan->lock);
chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+ if (chcr & RCAR_DMACHCR_CAE) {
+ struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device);
+
+ /*
+ * We don't need to call rcar_dmac_chan_halt()
+ * because channel is already stopped in error case.
+ * We need to clear register and check DE bit as recovery.
+ */
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index);
+ rcar_dmac_chcr_de_barrier(chan);
+ reinit = true;
+ goto spin_lock_end;
+ }
+
if (chcr & RCAR_DMACHCR_TE)
mask |= RCAR_DMACHCR_DE;
rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
@@ -1539,8 +1549,16 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
if (chcr & RCAR_DMACHCR_TE)
ret |= rcar_dmac_isr_transfer_end(chan);
+spin_lock_end:
spin_unlock(&chan->lock);
+ if (reinit) {
+ dev_err(chan->chan.device->dev, "Channel Address Error\n");
+
+ rcar_dmac_chan_reinit(chan);
+ ret = IRQ_HANDLED;
+ }
+
return ret;
}
@@ -1597,24 +1615,6 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
return IRQ_HANDLED;
}
-static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
-{
- struct rcar_dmac *dmac = data;
-
- if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
- return IRQ_NONE;
-
- /*
- * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
- * abort transfers on all channels, and reinitialize the DMAC.
- */
- rcar_dmac_stop(dmac);
- rcar_dmac_abort(dmac);
- rcar_dmac_init(dmac);
-
- return IRQ_HANDLED;
-}
-
/* -----------------------------------------------------------------------------
* OF xlate and channel filter
*/
@@ -1784,8 +1784,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
struct rcar_dmac *dmac;
struct resource *mem;
unsigned int i;
- char *irqname;
- int irq;
int ret;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -1824,17 +1822,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
if (IS_ERR(dmac->iomem))
return PTR_ERR(dmac->iomem);
- irq = platform_get_irq_byname(pdev, "error");
- if (irq < 0) {
- dev_err(&pdev->dev, "no error IRQ specified\n");
- return -ENODEV;
- }
-
- irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
- dev_name(dmac->dev));
- if (!irqname)
- return -ENOMEM;
-
/* Enable runtime PM and initialize the device. */
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
@@ -1871,6 +1858,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
engine->device_config = rcar_dmac_device_config;
+ engine->device_pause = rcar_dmac_chan_pause;
engine->device_terminate_all = rcar_dmac_chan_terminate_all;
engine->device_tx_status = rcar_dmac_tx_status;
engine->device_issue_pending = rcar_dmac_issue_pending;
@@ -1885,14 +1873,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
goto error;
}
- ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
- irqname, dmac);
- if (ret) {
- dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
- irq, ret);
- return ret;
- }
-
/* Register the DMAC as a DMA provider for DT. */
ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
NULL);
@@ -1932,7 +1912,7 @@ static void rcar_dmac_shutdown(struct platform_device *pdev)
{
struct rcar_dmac *dmac = platform_get_drvdata(pdev);
- rcar_dmac_stop(dmac);
+ rcar_dmac_stop_all_chan(dmac);
}
static const struct of_device_id rcar_dmac_of_ids[] = {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 1bc149af990e..f4edfc56f34e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -555,6 +555,7 @@ struct d40_gen_dmac {
* @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
* later
* @reg_val_backup_chan: Backup data for standard channel parameter registers.
+ * @regs_interrupt: Scratch space for registers during interrupt.
* @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
* @gen_dmac: the struct for generic registers values to represent u8500/8540
* DMA controller
@@ -592,6 +593,7 @@ struct d40_base {
u32 reg_val_backup[BACKUP_REGS_SZ];
u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
u32 *reg_val_backup_chan;
+ u32 *regs_interrupt;
u16 gcc_pwr_off_mask;
struct d40_gen_dmac gen_dmac;
};
@@ -1637,7 +1639,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
struct d40_chan *d40c;
unsigned long flags;
struct d40_base *base = data;
- u32 regs[base->gen_dmac.il_size];
+ u32 *regs = base->regs_interrupt;
struct d40_interrupt_lookup *il = base->gen_dmac.il;
u32 il_size = base->gen_dmac.il_size;
@@ -3258,13 +3260,22 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if (!base->lcla_pool.alloc_map)
goto free_backup_chan;
+ base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
+ sizeof(*base->regs_interrupt),
+ GFP_KERNEL);
+ if (!base->regs_interrupt)
+ goto free_map;
+
base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (base->desc_slab == NULL)
- goto free_map;
+ goto free_regs;
+
return base;
+ free_regs:
+ kfree(base->regs_interrupt);
free_map:
kfree(base->lcla_pool.alloc_map);
free_backup_chan:
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 8c5807362a25..379e8d534e61 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -594,7 +594,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
chan->busy = true;
- dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
}
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
@@ -693,7 +693,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
- dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
stm32_dma_start_transfer(chan);
}
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 9dc450b7ace6..06dd1725375e 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1170,7 +1170,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
chan->busy = true;
- dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
}
static void stm32_mdma_issue_pending(struct dma_chan *c)
@@ -1183,7 +1183,7 @@ static void stm32_mdma_issue_pending(struct dma_chan *c)
if (!vchan_issue_pending(&chan->vchan))
goto end;
- dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
if (!chan->desc && !chan->busy)
stm32_mdma_start_transfer(chan);
@@ -1203,7 +1203,7 @@ static int stm32_mdma_pause(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
if (!ret)
- dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
return ret;
}
@@ -1240,7 +1240,7 @@ static int stm32_mdma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
return 0;
}
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 9b5ca8691f27..a4a931ddf6f6 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1485,7 +1485,11 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ if (__dma_omap15xx(od->plat->dma_attr))
+ od->ddev.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ else
+ od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 27b523530c4a..c12442312595 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -115,6 +115,9 @@
#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
+#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
+#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
+
/* HW specific definitions */
#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
@@ -340,6 +343,7 @@ struct xilinx_dma_tx_descriptor {
* @start_transfer: Differentiate b/w DMA IP's transfer
* @stop_transfer: Differentiate b/w DMA IP's quiesce
* @tdest: TDEST value for mcdma
+ * @has_vflip: S2MM vertical flip
*/
struct xilinx_dma_chan {
struct xilinx_dma_device *xdev;
@@ -376,6 +380,7 @@ struct xilinx_dma_chan {
void (*start_transfer)(struct xilinx_dma_chan *chan);
int (*stop_transfer)(struct xilinx_dma_chan *chan);
u16 tdest;
+ bool has_vflip;
};
/**
@@ -1092,6 +1097,14 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
desc->async_tx.phys);
/* Configure the hardware using info in the config structure */
+ if (chan->has_vflip) {
+ reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
+ reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
+ reg |= config->vflip_en;
+ dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
+ reg);
+ }
+
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
if (config->frm_cnt_en)
@@ -2105,6 +2118,8 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
}
chan->config.frm_cnt_en = cfg->frm_cnt_en;
+ chan->config.vflip_en = cfg->vflip_en;
+
if (cfg->park)
chan->config.park_frm = cfg->park_frm;
else
@@ -2428,6 +2443,13 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->direction = DMA_DEV_TO_MEM;
chan->id = chan_id;
chan->tdest = chan_id - xdev->nr_channels;
+ chan->has_vflip = of_property_read_bool(node,
+ "xlnx,enable-vert-flip");
+ if (chan->has_vflip) {
+ chan->config.vflip_en = dma_read(chan,
+ XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
+ XILINX_VDMA_ENABLE_VERTICAL_FLIP;
+ }
chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index d0d5c4dbe097..5762c3c383f2 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -730,7 +730,8 @@ static int altr_s10_sdram_probe(struct platform_device *pdev)
S10_DDR0_IRQ_MASK)) {
edac_printk(KERN_ERR, EDAC_MC,
"Error clearing SDRAM ECC count\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err2;
}
if (regmap_update_bits(drvdata->mc_vbase, priv->ecc_irq_en_offset,
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 3bb82e511eca..7d3edd713932 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -215,6 +215,7 @@ const char * const edac_mem_types[] = {
[MEM_LRDDR3] = "Load-Reduced-DDR3-RAM",
[MEM_DDR4] = "Unbuffered-DDR4",
[MEM_RDDR4] = "Registered-DDR4",
+ [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
[MEM_NVDIMM] = "Non-volatile-RAM",
};
EXPORT_SYMBOL_GPL(edac_mem_types);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 7481955160a4..20374b8248f0 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -1075,14 +1075,14 @@ int __init edac_mc_sysfs_init(void)
err = device_add(mci_pdev);
if (err < 0)
- goto out_dev_free;
+ goto out_put_device;
edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
return 0;
- out_dev_free:
- kfree(mci_pdev);
+ out_put_device:
+ put_device(mci_pdev);
out:
return err;
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 8ed4dd9c571b..8e120bf60624 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1177,15 +1177,14 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
rc = device_add(pvt->addrmatch_dev);
if (rc < 0)
- return rc;
+ goto err_put_addrmatch;
if (!pvt->is_registered) {
pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
GFP_KERNEL);
if (!pvt->chancounts_dev) {
- put_device(pvt->addrmatch_dev);
- device_del(pvt->addrmatch_dev);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_del_addrmatch;
}
pvt->chancounts_dev->type = &all_channel_counts_type;
@@ -1199,9 +1198,18 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
rc = device_add(pvt->chancounts_dev);
if (rc < 0)
- return rc;
+ goto err_put_chancounts;
}
return 0;
+
+err_put_chancounts:
+ put_device(pvt->chancounts_dev);
+err_del_addrmatch:
+ device_del(pvt->addrmatch_dev);
+err_put_addrmatch:
+ put_device(pvt->addrmatch_dev);
+
+ return rc;
}
static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
@@ -1211,11 +1219,11 @@ static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
edac_dbg(1, "\n");
if (!pvt->is_registered) {
- put_device(pvt->chancounts_dev);
device_del(pvt->chancounts_dev);
+ put_device(pvt->chancounts_dev);
}
- put_device(pvt->addrmatch_dev);
device_del(pvt->addrmatch_dev);
+ put_device(pvt->addrmatch_dev);
}
/****************************************************************************
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4a89c8093307..07726fb00321 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -352,6 +352,7 @@ struct pci_id_table {
struct sbridge_dev {
struct list_head list;
+ int seg;
u8 bus, mc;
u8 node_id, source_id;
struct pci_dev **pdev;
@@ -729,7 +730,8 @@ static inline int numcol(u32 mtr)
return 1 << cols;
}
-static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bus,
+static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom,
+ int multi_bus,
struct sbridge_dev *prev)
{
struct sbridge_dev *sbridge_dev;
@@ -747,14 +749,15 @@ static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bu
: sbridge_edac_list.next, struct sbridge_dev, list);
list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
- if (sbridge_dev->bus == bus && (dom == SOCK || dom == sbridge_dev->dom))
+ if ((sbridge_dev->seg == seg) && (sbridge_dev->bus == bus) &&
+ (dom == SOCK || dom == sbridge_dev->dom))
return sbridge_dev;
}
return NULL;
}
-static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom,
+static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
const struct pci_id_table *table)
{
struct sbridge_dev *sbridge_dev;
@@ -771,6 +774,7 @@ static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom,
return NULL;
}
+ sbridge_dev->seg = seg;
sbridge_dev->bus = bus;
sbridge_dev->dom = dom;
sbridge_dev->n_devs = table->n_devs_per_imc;
@@ -2246,6 +2250,7 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
struct sbridge_dev *sbridge_dev = NULL;
const struct pci_id_descr *dev_descr = &table->descr[devno];
struct pci_dev *pdev = NULL;
+ int seg = 0;
u8 bus = 0;
int i = 0;
@@ -2276,10 +2281,12 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
/* End of list, leave */
return -ENODEV;
}
+ seg = pci_domain_nr(pdev->bus);
bus = pdev->bus->number;
next_imc:
- sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
+ sbridge_dev = get_sbridge_dev(seg, bus, dev_descr->dom,
+ multi_bus, sbridge_dev);
if (!sbridge_dev) {
/* If the HA1 wasn't found, don't create EDAC second memory controller */
if (dev_descr->dom == IMC1 && devno != 1) {
@@ -2292,7 +2299,7 @@ next_imc:
if (dev_descr->dom == SOCK)
goto out_imc;
- sbridge_dev = alloc_sbridge_dev(bus, dev_descr->dom, table);
+ sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table);
if (!sbridge_dev) {
pci_dev_put(pdev);
return -ENOMEM;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 4803c6468bab..c009d94f40c5 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -408,26 +408,29 @@ static ssize_t thunderx_lmc_inject_ecc_write(struct file *file,
size_t count, loff_t *ppos)
{
struct thunderx_lmc *lmc = file->private_data;
-
unsigned int cline_size = cache_line_size();
-
- u8 tmp[cline_size];
+ u8 *tmp;
void __iomem *addr;
unsigned int offs, timeout = 100000;
atomic_set(&lmc->ecc_int, 0);
lmc->mem = alloc_pages_node(lmc->node, GFP_KERNEL, 0);
-
if (!lmc->mem)
return -ENOMEM;
+ tmp = kmalloc(cline_size, GFP_KERNEL);
+ if (!tmp) {
+ __free_pages(lmc->mem, 0);
+ return -ENOMEM;
+ }
+
addr = page_address(lmc->mem);
while (!atomic_read(&lmc->ecc_int) && timeout--) {
stop_machine(inject_ecc_fn, lmc, NULL);
- for (offs = 0; offs < PAGE_SIZE; offs += sizeof(tmp)) {
+ for (offs = 0; offs < PAGE_SIZE; offs += cline_size) {
/*
* Do a load from the previously rigged location
* This should generate an error interrupt.
@@ -437,6 +440,7 @@ static ssize_t thunderx_lmc_inject_ecc_write(struct file *file,
}
}
+ kfree(tmp);
__free_pages(lmc->mem, 0);
return count;
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index b7e9ea377d70..5e1dd2772278 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index acaccb128fc4..fd24debe58a3 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -20,7 +20,7 @@
#include <linux/acpi.h>
#include <linux/extcon-provider.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/extcon/extcon-max3355.c b/drivers/extcon/extcon-max3355.c
index 0aa410836f4e..1335a476bfec 100644
--- a/drivers/extcon/extcon-max3355.c
+++ b/drivers/extcon/extcon-max3355.c
@@ -14,6 +14,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
struct max3355_data {
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index 660bbf163bf5..72bc0f2478e2 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 6721ab01fe7d..43c0a936ab82 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -1,18 +1,8 @@
-/**
- * drivers/extcon/extcon-usbc-cros-ec - ChromeOS Embedded Controller extcon
- *
- * Copyright (C) 2017 Google, Inc
- * Author: Benson Leung <bleung@chromium.org>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+// ChromeOS Embedded Controller extcon
+//
+// Copyright (C) 2017 Google, Inc.
+// Author: Benson Leung <bleung@chromium.org>
#include <linux/extcon-provider.h>
#include <linux/kernel.h>
@@ -548,4 +538,4 @@ module_platform_driver(extcon_cros_ec_driver);
MODULE_DESCRIPTION("ChromeOS Embedded Controller extcon driver");
MODULE_AUTHOR("Benson Leung <bleung@chromium.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index af83ad58819c..b9d27c8fe57e 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -433,8 +433,8 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
return index;
spin_lock_irqsave(&edev->lock, flags);
-
state = !!(edev->state & BIT(index));
+ spin_unlock_irqrestore(&edev->lock, flags);
/*
* Call functions in a raw notifier chain for the specific one
@@ -448,6 +448,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
*/
raw_notifier_call_chain(&edev->nh_all, state, edev);
+ spin_lock_irqsave(&edev->lock, flags);
/* This could be in interrupt handler */
prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
if (!prop_buf) {
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index f0587273940e..d8e185582642 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1205,7 +1205,7 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
struct fw_card *card = client->device->card;
- struct timespec ts = {0, 0};
+ struct timespec64 ts = {0, 0};
u32 cycle_time;
int ret = 0;
@@ -1214,9 +1214,9 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
switch (a->clk_id) {
- case CLOCK_REALTIME: getnstimeofday(&ts); break;
- case CLOCK_MONOTONIC: ktime_get_ts(&ts); break;
- case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
+ case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break;
+ case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break;
+ case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break;
default:
ret = -EINVAL;
}
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 2a219b1261b1..721e6c57beae 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -363,8 +363,6 @@ static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
return domain;
dom = pi->dom_info + domain;
- if (!dom)
- return -EIO;
for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
freq = opp->perf * dom->mult_factor;
@@ -394,9 +392,6 @@ static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
return domain;
dom = pi->dom_info + domain;
- if (!dom)
- return -EIO;
-
/* uS to nS */
return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 781a4a337557..d8e159feb573 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -87,6 +87,18 @@ config EFI_RUNTIME_WRAPPERS
config EFI_ARMSTUB
bool
+config EFI_ARMSTUB_DTB_LOADER
+ bool "Enable the DTB loader"
+ depends on EFI_ARMSTUB
+ help
+ Select this config option to add support for the dtb= command
+ line parameter, allowing a device tree blob to be loaded into
+ memory from the EFI System Partition by the stub.
+
+ The device tree is typically provided by the platform or by
+ the bootloader, so this option is mostly for development
+ purposes only.
+
config EFI_BOOTLOADER_CONTROL
tristate "EFI Bootloader Control"
depends on EFI_VARS
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index b5214c143fee..388a929baf95 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -259,7 +259,6 @@ void __init efi_init(void)
reserve_regions();
efi_esrt_init();
- efi_memmap_unmap();
memblock_reserve(params.mmap & PAGE_MASK,
PAGE_ALIGN(params.mmap_size +
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 5889cbea60b8..922cfb813109 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -110,11 +110,20 @@ static int __init arm_enable_runtime_services(void)
{
u64 mapsize;
- if (!efi_enabled(EFI_BOOT)) {
+ if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) {
pr_info("EFI services will not be available.\n");
return 0;
}
+ efi_memmap_unmap();
+
+ mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+ if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
+ pr_err("Failed to remap EFI memory map\n");
+ return 0;
+ }
+
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
return 0;
@@ -127,13 +136,6 @@ static int __init arm_enable_runtime_services(void)
pr_info("Remapping and enabling EFI services.\n");
- mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
-
- if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
- pr_err("Failed to remap EFI memory map\n");
- return -ENOMEM;
- }
-
if (!efi_virtmap_init()) {
pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
return -ENOMEM;
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 3bf0dca378a6..a7902fccdcfa 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -48,8 +48,21 @@ u64 cper_next_record_id(void)
{
static atomic64_t seq;
- if (!atomic64_read(&seq))
- atomic64_set(&seq, ((u64)get_seconds()) << 32);
+ if (!atomic64_read(&seq)) {
+ time64_t time = ktime_get_real_seconds();
+
+ /*
+ * This code is unlikely to still be needed in year 2106,
+ * but just in case, let's use a few more bits for timestamps
+ * after y2038 to be sure they keep increasing monotonically
+ * for the next few hundred years...
+ */
+ if (time < 0x80000000)
+ atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
+ else
+ atomic64_set(&seq, 0x8000000000000000ull |
+ ktime_get_real_seconds() << 24);
+ }
return atomic64_inc_return(&seq);
}
@@ -459,7 +472,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
else
goto err_section_too_small;
#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
- } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_ARM)) {
+ } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: ARM processor error\n", newpfx);
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index 50793fda7819..b22ccfb0c991 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -20,7 +20,7 @@
#include <linux/efi-bgrt.h>
struct acpi_table_bgrt bgrt_tab;
-size_t __initdata bgrt_image_size;
+size_t bgrt_image_size;
struct bmp_header {
u16 id;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 232f4915223b..2a29dd9c986d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -82,8 +82,11 @@ struct mm_struct efi_mm = {
.mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
+ .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
};
+struct workqueue_struct *efi_rts_wq;
+
static bool disable_runtime;
static int __init setup_noefi(char *arg)
{
@@ -337,6 +340,18 @@ static int __init efisubsys_init(void)
if (!efi_enabled(EFI_BOOT))
return 0;
+ /*
+ * Since we process only one efi_runtime_service() at a time, an
+ * ordered workqueue (which creates only one execution context)
+ * should suffice all our needs.
+ */
+ efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
+ if (!efi_rts_wq) {
+ pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return 0;
+ }
+
/* We register the efi directory at /sys/firmware/efi */
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
@@ -388,7 +403,7 @@ subsys_initcall(efisubsys_init);
* and if so, populate the supplied memory descriptor with the appropriate
* data.
*/
-int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
{
efi_memory_desc_t *md;
@@ -406,12 +421,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
u64 size;
u64 end;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_DATA &&
- md->type != EFI_RUNTIME_SERVICES_DATA) {
- continue;
- }
-
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
if (phys_addr >= md->phys_addr && phys_addr < end) {
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 1ab80e06e7c5..5d06bd247d07 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -250,7 +250,10 @@ void __init efi_esrt_init(void)
return;
rc = efi_mem_desc_lookup(efi.esrt, &md);
- if (rc < 0) {
+ if (rc < 0 ||
+ (!(md.attribute & EFI_MEMORY_RUNTIME) &&
+ md.type != EFI_BOOT_SERVICES_DATA &&
+ md.type != EFI_RUNTIME_SERVICES_DATA)) {
pr_warn("ESRT header is not in the memory map.\n");
return;
}
@@ -326,7 +329,8 @@ void __init efi_esrt_init(void)
end = esrt_data + size;
pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
- efi_mem_reserve(esrt_data, esrt_data_size);
+ if (md.type == EFI_BOOT_SERVICES_DATA)
+ efi_mem_reserve(esrt_data, esrt_data_size);
pr_debug("esrt-init: loaded.\n");
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index a34e9290a699..14c40a7750d1 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,10 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
+# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
+# disable the stackleak plugin
+cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \
+ $(DISABLE_STACKLEAK_PLUGIN)
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic -mno-single-pic-base
@@ -20,7 +23,8 @@ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
-D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
- $(call cc-option,-fno-stack-protector)
+ $(call cc-option,-fno-stack-protector) \
+ -D__DISABLE_EXPORTS
GCOV_PROFILE := n
KASAN_SANITIZE := n
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 01a9d78ee415..6920033de6d4 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -40,31 +40,6 @@
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
- void *__image, void **__fh)
-{
- efi_file_io_interface_t *io;
- efi_loaded_image_t *image = __image;
- efi_file_handle_t *fh;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
- efi_status_t status;
- void *handle = (void *)(unsigned long)image->device_handle;
-
- status = sys_table_arg->boottime->handle_protocol(handle,
- &fs_proto, (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
- return status;
- }
-
- status = io->open_volume(io, &fh);
- if (status != EFI_SUCCESS)
- efi_printk(sys_table_arg, "Failed to open volume\n");
-
- *__fh = fh;
- return status;
-}
-
void efi_char16_printk(efi_system_table_t *sys_table_arg,
efi_char16_t *str)
{
@@ -202,9 +177,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
* 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure
* boot is enabled if we can't determine its state.
*/
- if (secure_boot != efi_secureboot_mode_disabled &&
- strstr(cmdline_ptr, "dtb=")) {
- pr_efi(sys_table, "Ignoring DTB from command line.\n");
+ if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
+ secure_boot != efi_secureboot_mode_disabled) {
+ if (strstr(cmdline_ptr, "dtb="))
+ pr_efi(sys_table, "Ignoring DTB from command line.\n");
} else {
status = handle_cmdline_files(sys_table, image, cmdline_ptr,
"dtb=",
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 50a9cab5a834..e94975f4655b 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -413,6 +413,34 @@ static efi_status_t efi_file_close(void *handle)
return efi_call_proto(efi_file_handle, close, handle);
}
+static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
+ efi_loaded_image_t *image,
+ efi_file_handle_t **__fh)
+{
+ efi_file_io_interface_t *io;
+ efi_file_handle_t *fh;
+ efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+ efi_status_t status;
+ void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
+ device_handle,
+ image);
+
+ status = efi_call_early(handle_protocol, handle,
+ &fs_proto, (void **)&io);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+ return status;
+ }
+
+ status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
+ if (status != EFI_SUCCESS)
+ efi_printk(sys_table_arg, "Failed to open volume\n");
+ else
+ *__fh = fh;
+
+ return status;
+}
+
/*
* Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
* option, e.g. efi=nochunk.
@@ -563,8 +591,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
/* Only open the volume once. */
if (!i) {
- status = efi_open_volume(sys_table_arg, image,
- (void **)&fh);
+ status = efi_open_volume(sys_table_arg, image, &fh);
if (status != EFI_SUCCESS)
goto free_files;
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index f59564b72ddc..32799cf039ef 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -36,9 +36,6 @@ extern int __pure is_quiet(void);
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
- void **__fh);
-
unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index ae54870b2788..aa66cbf23512 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -1,6 +1,15 @@
/*
* runtime-wrappers.c - Runtime Services function call wrappers
*
+ * Implementation summary:
+ * -----------------------
+ * 1. When user/kernel thread requests to execute efi_runtime_service(),
+ * enqueue work to efi_rts_wq.
+ * 2. Caller thread waits for completion until the work is finished
+ * because it's dependent on the return status and execution of
+ * efi_runtime_service().
+ * For instance, get_variable() and get_next_variable().
+ *
* Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* Split off from arch/x86/platform/efi/efi.c
@@ -22,6 +31,9 @@
#include <linux/mutex.h>
#include <linux/semaphore.h>
#include <linux/stringify.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
#include <asm/efi.h>
/*
@@ -33,6 +45,76 @@
#define __efi_call_virt(f, args...) \
__efi_call_virt_pointer(efi.systab->runtime, f, args)
+/* efi_runtime_service() function identifiers */
+enum efi_rts_ids {
+ GET_TIME,
+ SET_TIME,
+ GET_WAKEUP_TIME,
+ SET_WAKEUP_TIME,
+ GET_VARIABLE,
+ GET_NEXT_VARIABLE,
+ SET_VARIABLE,
+ QUERY_VARIABLE_INFO,
+ GET_NEXT_HIGH_MONO_COUNT,
+ UPDATE_CAPSULE,
+ QUERY_CAPSULE_CAPS,
+};
+
+/*
+ * efi_runtime_work: Details of EFI Runtime Service work
+ * @arg<1-5>: EFI Runtime Service function arguments
+ * @status: Status of executing EFI Runtime Service
+ * @efi_rts_id: EFI Runtime Service function identifier
+ * @efi_rts_comp: Struct used for handling completions
+ */
+struct efi_runtime_work {
+ void *arg1;
+ void *arg2;
+ void *arg3;
+ void *arg4;
+ void *arg5;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+};
+
+/*
+ * efi_queue_work: Queue efi_runtime_service() and wait until it's done
+ * @rts: efi_runtime_service() function identifier
+ * @rts_arg<1-5>: efi_runtime_service() function arguments
+ *
+ * Accesses to efi_runtime_services() are serialized by a binary
+ * semaphore (efi_runtime_lock) and caller waits until the work is
+ * finished, hence _only_ one work is queued at a time and the caller
+ * thread waits for completion.
+ */
+#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5) \
+({ \
+ struct efi_runtime_work efi_rts_work; \
+ efi_rts_work.status = EFI_ABORTED; \
+ \
+ init_completion(&efi_rts_work.efi_rts_comp); \
+ INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \
+ efi_rts_work.arg1 = _arg1; \
+ efi_rts_work.arg2 = _arg2; \
+ efi_rts_work.arg3 = _arg3; \
+ efi_rts_work.arg4 = _arg4; \
+ efi_rts_work.arg5 = _arg5; \
+ efi_rts_work.efi_rts_id = _rts; \
+ \
+ /* \
+ * queue_work() returns 0 if work was already on queue, \
+ * _ideally_ this should never happen. \
+ */ \
+ if (queue_work(efi_rts_wq, &efi_rts_work.work)) \
+ wait_for_completion(&efi_rts_work.efi_rts_comp); \
+ else \
+ pr_err("Failed to queue work to efi_rts_wq.\n"); \
+ \
+ efi_rts_work.status; \
+})
+
void efi_call_virt_check_flags(unsigned long flags, const char *call)
{
unsigned long cur_flags, mismatch;
@@ -90,13 +172,98 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
*/
static DEFINE_SEMAPHORE(efi_runtime_lock);
+/*
+ * Calls the appropriate efi_runtime_service() with the appropriate
+ * arguments.
+ *
+ * Semantics followed by efi_call_rts() to understand efi_runtime_work:
+ * 1. If argument was a pointer, recast it from void pointer to original
+ * pointer type.
+ * 2. If argument was a value, recast it from void pointer to original
+ * pointer type and dereference it.
+ */
+static void efi_call_rts(struct work_struct *work)
+{
+ struct efi_runtime_work *efi_rts_work;
+ void *arg1, *arg2, *arg3, *arg4, *arg5;
+ efi_status_t status = EFI_NOT_FOUND;
+
+ efi_rts_work = container_of(work, struct efi_runtime_work, work);
+ arg1 = efi_rts_work->arg1;
+ arg2 = efi_rts_work->arg2;
+ arg3 = efi_rts_work->arg3;
+ arg4 = efi_rts_work->arg4;
+ arg5 = efi_rts_work->arg5;
+
+ switch (efi_rts_work->efi_rts_id) {
+ case GET_TIME:
+ status = efi_call_virt(get_time, (efi_time_t *)arg1,
+ (efi_time_cap_t *)arg2);
+ break;
+ case SET_TIME:
+ status = efi_call_virt(set_time, (efi_time_t *)arg1);
+ break;
+ case GET_WAKEUP_TIME:
+ status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
+ (efi_bool_t *)arg2, (efi_time_t *)arg3);
+ break;
+ case SET_WAKEUP_TIME:
+ status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
+ (efi_time_t *)arg2);
+ break;
+ case GET_VARIABLE:
+ status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
+ (efi_guid_t *)arg2, (u32 *)arg3,
+ (unsigned long *)arg4, (void *)arg5);
+ break;
+ case GET_NEXT_VARIABLE:
+ status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
+ (efi_char16_t *)arg2,
+ (efi_guid_t *)arg3);
+ break;
+ case SET_VARIABLE:
+ status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
+ (efi_guid_t *)arg2, *(u32 *)arg3,
+ *(unsigned long *)arg4, (void *)arg5);
+ break;
+ case QUERY_VARIABLE_INFO:
+ status = efi_call_virt(query_variable_info, *(u32 *)arg1,
+ (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
+ break;
+ case GET_NEXT_HIGH_MONO_COUNT:
+ status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
+ break;
+ case UPDATE_CAPSULE:
+ status = efi_call_virt(update_capsule,
+ (efi_capsule_header_t **)arg1,
+ *(unsigned long *)arg2,
+ *(unsigned long *)arg3);
+ break;
+ case QUERY_CAPSULE_CAPS:
+ status = efi_call_virt(query_capsule_caps,
+ (efi_capsule_header_t **)arg1,
+ *(unsigned long *)arg2, (u64 *)arg3,
+ (int *)arg4);
+ break;
+ default:
+ /*
+ * Ideally, we should never reach here because a caller of this
+ * function should have put the right efi_runtime_service()
+ * function identifier into efi_rts_work->efi_rts_id
+ */
+ pr_err("Requested executing invalid EFI Runtime Service.\n");
+ }
+ efi_rts_work->status = status;
+ complete(&efi_rts_work->efi_rts_comp);
+}
+
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_time, tm, tc);
+ status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -107,7 +274,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(set_time, tm);
+ status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -120,7 +287,8 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
+ status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
+ NULL);
up(&efi_runtime_lock);
return status;
}
@@ -131,7 +299,8 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(set_wakeup_time, enabled, tm);
+ status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
+ NULL);
up(&efi_runtime_lock);
return status;
}
@@ -146,8 +315,8 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_variable, name, vendor, attr, data_size,
- data);
+ status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
+ data);
up(&efi_runtime_lock);
return status;
}
@@ -160,7 +329,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_next_variable, name_size, name, vendor);
+ status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
+ NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -175,8 +345,8 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(set_variable, name, vendor, attr, data_size,
- data);
+ status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
+ data);
up(&efi_runtime_lock);
return status;
}
@@ -210,8 +380,8 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(query_variable_info, attr, storage_space,
- remaining_space, max_variable_size);
+ status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
+ remaining_space, max_variable_size, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -242,7 +412,8 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_next_high_mono_count, count);
+ status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
+ NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -272,7 +443,8 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(update_capsule, capsules, count, sg_list);
+ status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
+ NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -289,8 +461,8 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(query_capsule_caps, capsules, count, max_size,
- reset_type);
+ status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
+ max_size, reset_type, NULL);
up(&efi_runtime_lock);
return status;
}
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index e9db895916c3..1aa67bb5d8c0 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -246,6 +246,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
kfree(sec->raw_name);
memunmap(sec->baseaddr);
+ sec->enabled = false;
}
return 0;
@@ -279,8 +280,10 @@ static int vpd_sections_init(phys_addr_t physaddr)
ret = vpd_section_init("rw", &rw_vpd,
physaddr + sizeof(struct vpd_cbmem) +
header.ro_size, header.rw_size);
- if (ret)
+ if (ret) {
+ vpd_section_destroy(&ro_vpd);
return ret;
+ }
}
return 0;
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index bb1c068bff19..346943657962 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,28 +77,6 @@ static int psci_ops_check(void)
return 0;
}
-static int find_cpu_groups(const struct cpumask *cpus,
- const struct cpumask **cpu_groups)
-{
- unsigned int nb = 0;
- cpumask_var_t tmp;
-
- if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
- return -ENOMEM;
- cpumask_copy(tmp, cpus);
-
- while (!cpumask_empty(tmp)) {
- const struct cpumask *cpu_group =
- topology_core_cpumask(cpumask_any(tmp));
-
- cpu_groups[nb++] = cpu_group;
- cpumask_andnot(tmp, tmp, cpu_group);
- }
-
- free_cpumask_var(tmp);
- return nb;
-}
-
/*
* offlined_cpus is a temporary array but passing it as an argument avoids
* multiple allocations.
@@ -166,29 +144,66 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
return err;
}
+static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups)
+{
+ int i;
+ cpumask_var_t *cpu_groups = *pcpu_groups;
+
+ for (i = 0; i < num; ++i)
+ free_cpumask_var(cpu_groups[i]);
+ kfree(cpu_groups);
+}
+
+static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
+{
+ int num_groups = 0;
+ cpumask_var_t tmp, *cpu_groups;
+
+ if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
+ GFP_KERNEL);
+ if (!cpu_groups)
+ return -ENOMEM;
+
+ cpumask_copy(tmp, cpu_online_mask);
+
+ while (!cpumask_empty(tmp)) {
+ const struct cpumask *cpu_group =
+ topology_core_cpumask(cpumask_any(tmp));
+
+ if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
+ free_cpu_groups(num_groups, &cpu_groups);
+ return -ENOMEM;
+ }
+ cpumask_copy(cpu_groups[num_groups++], cpu_group);
+ cpumask_andnot(tmp, tmp, cpu_group);
+ }
+
+ free_cpumask_var(tmp);
+ *pcpu_groups = cpu_groups;
+
+ return num_groups;
+}
+
static int hotplug_tests(void)
{
- int err;
- cpumask_var_t offlined_cpus;
- int i, nb_cpu_group;
- const struct cpumask **cpu_groups;
+ int i, nb_cpu_group, err = -ENOMEM;
+ cpumask_var_t offlined_cpus, *cpu_groups;
char *page_buf;
- err = -ENOMEM;
if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
return err;
- /* We may have up to nb_available_cpus cpu_groups. */
- cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
- GFP_KERNEL);
- if (!cpu_groups)
+
+ nb_cpu_group = alloc_init_cpu_groups(&cpu_groups);
+ if (nb_cpu_group < 0)
goto out_free_cpus;
page_buf = (char *)__get_free_page(GFP_KERNEL);
if (!page_buf)
goto out_free_cpu_groups;
err = 0;
- nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
-
/*
* Of course the last CPU cannot be powered down and cpu_down() should
* refuse doing that.
@@ -212,7 +227,7 @@ static int hotplug_tests(void)
free_page((unsigned long)page_buf);
out_free_cpu_groups:
- kfree(cpu_groups);
+ free_cpu_groups(nb_cpu_group, &cpu_groups);
out_free_cpus:
free_cpumask_var(offlined_cpus);
return err;
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 14fedbeca724..039e0f91dba8 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -28,6 +28,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/slab.h>
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index 6692888f04cf..a200a2174611 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -21,6 +21,10 @@
#define MBOX_DATA28(msg) ((msg) & ~0xf)
#define MBOX_CHAN_PROPERTY 8
+#define MAX_RPI_FW_PROP_BUF_SIZE 32
+
+static struct platform_device *rpi_hwmon;
+
struct rpi_firmware {
struct mbox_client cl;
struct mbox_chan *chan; /* The property channel. */
@@ -143,18 +147,22 @@ int rpi_firmware_property(struct rpi_firmware *fw,
/* Single tags are very small (generally 8 bytes), so the
* stack should be safe.
*/
- u8 data[buf_size + sizeof(struct rpi_firmware_property_tag_header)];
+ u8 data[sizeof(struct rpi_firmware_property_tag_header) +
+ MAX_RPI_FW_PROP_BUF_SIZE];
struct rpi_firmware_property_tag_header *header =
(struct rpi_firmware_property_tag_header *)data;
int ret;
+ if (WARN_ON(buf_size > sizeof(data) - sizeof(*header)))
+ return -EINVAL;
+
header->tag = tag;
header->buf_size = buf_size;
header->req_resp_size = 0;
memcpy(data + sizeof(struct rpi_firmware_property_tag_header),
tag_data, buf_size);
- ret = rpi_firmware_property_list(fw, &data, sizeof(data));
+ ret = rpi_firmware_property_list(fw, &data, buf_size + sizeof(*header));
memcpy(tag_data,
data + sizeof(struct rpi_firmware_property_tag_header),
buf_size);
@@ -183,6 +191,20 @@ rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
}
}
+static void
+rpi_register_hwmon_driver(struct device *dev, struct rpi_firmware *fw)
+{
+ u32 packet;
+ int ret = rpi_firmware_property(fw, RPI_FIRMWARE_GET_THROTTLED,
+ &packet, sizeof(packet));
+
+ if (ret)
+ return;
+
+ rpi_hwmon = platform_device_register_data(dev, "raspberrypi-hwmon",
+ -1, NULL, 0);
+}
+
static int rpi_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -209,6 +231,7 @@ static int rpi_firmware_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fw);
rpi_firmware_print_firmware_revision(fw);
+ rpi_register_hwmon_driver(dev, fw);
return 0;
}
@@ -217,6 +240,8 @@ static int rpi_firmware_remove(struct platform_device *pdev)
{
struct rpi_firmware *fw = platform_get_drvdata(pdev);
+ platform_device_unregister(rpi_hwmon);
+ rpi_hwmon = NULL;
mbox_free_channel(fw->chan);
return 0;
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index ee9c5420c47f..1ebcef4bab5b 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -130,4 +130,72 @@ config OF_FPGA_REGION
Support for loading FPGA images by applying a Device Tree
overlay.
+config FPGA_DFL
+ tristate "FPGA Device Feature List (DFL) support"
+ select FPGA_BRIDGE
+ select FPGA_REGION
+ help
+ Device Feature List (DFL) defines a feature list structure that
+ creates a linked list of feature headers within the MMIO space
+ to provide an extensible way of adding features for FPGA.
+ Driver can walk through the feature headers to enumerate feature
+ devices (e.g. FPGA Management Engine, Port and Accelerator
+ Function Unit) and their private features for target FPGA devices.
+
+ Select this option to enable common support for Field-Programmable
+ Gate Array (FPGA) solutions which implement Device Feature List.
+ It provides enumeration APIs and feature device infrastructure.
+
+config FPGA_DFL_FME
+ tristate "FPGA DFL FME Driver"
+ depends on FPGA_DFL
+ help
+ The FPGA Management Engine (FME) is a feature device implemented
+ under Device Feature List (DFL) framework. Select this option to
+ enable the platform device driver for FME which implements all
+ FPGA platform level management features. There shall be one FME
+ per DFL based FPGA device.
+
+config FPGA_DFL_FME_MGR
+ tristate "FPGA DFL FME Manager Driver"
+ depends on FPGA_DFL_FME && HAS_IOMEM
+ help
+ Say Y to enable FPGA Manager driver for FPGA Management Engine.
+
+config FPGA_DFL_FME_BRIDGE
+ tristate "FPGA DFL FME Bridge Driver"
+ depends on FPGA_DFL_FME && HAS_IOMEM
+ help
+ Say Y to enable FPGA Bridge driver for FPGA Management Engine.
+
+config FPGA_DFL_FME_REGION
+ tristate "FPGA DFL FME Region Driver"
+ depends on FPGA_DFL_FME && HAS_IOMEM
+ help
+ Say Y to enable FPGA Region driver for FPGA Management Engine.
+
+config FPGA_DFL_AFU
+ tristate "FPGA DFL AFU Driver"
+ depends on FPGA_DFL
+ help
+ This is the driver for FPGA Accelerated Function Unit (AFU) which
+ implements AFU and Port management features. A User AFU connects
+ to the FPGA infrastructure via a Port. There may be more than one
+ Port/AFU per DFL based FPGA device.
+
+config FPGA_DFL_PCI
+ tristate "FPGA DFL PCIe Device Driver"
+ depends on PCI && FPGA_DFL
+ help
+ Select this option to enable PCIe driver for PCIe-based
+ Field-Programmable Gate Array (FPGA) solutions which implement
+ the Device Feature List (DFL). This driver provides interfaces
+ for userspace applications to configure, enumerate, open and access
+ FPGA accelerators on the FPGA DFL devices, enables system level
+ management functions such as FPGA partial reconfiguration, power
+ management and virtualization with DFL framework and DFL feature
+ device drivers.
+
+ To compile this as a module, choose M here.
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index f9803dad6919..7a2d73ba7122 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -28,3 +28,17 @@ obj-$(CONFIG_XILINX_PR_DECOUPLER) += xilinx-pr-decoupler.o
# High Level Interfaces
obj-$(CONFIG_FPGA_REGION) += fpga-region.o
obj-$(CONFIG_OF_FPGA_REGION) += of-fpga-region.o
+
+# FPGA Device Feature List Support
+obj-$(CONFIG_FPGA_DFL) += dfl.o
+obj-$(CONFIG_FPGA_DFL_FME) += dfl-fme.o
+obj-$(CONFIG_FPGA_DFL_FME_MGR) += dfl-fme-mgr.o
+obj-$(CONFIG_FPGA_DFL_FME_BRIDGE) += dfl-fme-br.o
+obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
+obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
+
+dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o
+dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
+
+# Drivers for FPGAs which implement DFL
+obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index dd4edd8f22ce..7fa793672a7a 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -455,8 +455,10 @@ static int altera_cvp_probe(struct pci_dev *pdev,
mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name,
&altera_cvp_ops, conf);
- if (!mgr)
- return -ENOMEM;
+ if (!mgr) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
pci_set_drvdata(pdev, mgr);
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
new file mode 100644
index 000000000000..0e81d33af856
--- /dev/null
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+#include "dfl-afu.h"
+
+static void put_all_pages(struct page **pages, int npages)
+{
+ int i;
+
+ for (i = 0; i < npages; i++)
+ if (pages[i])
+ put_page(pages[i]);
+}
+
+void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+
+ afu->dma_regions = RB_ROOT;
+}
+
+/**
+ * afu_dma_adjust_locked_vm - adjust locked memory
+ * @dev: port device
+ * @npages: number of pages
+ * @incr: increase or decrease locked memory
+ *
+ * Increase or decrease the locked memory size with npages input.
+ *
+ * Return 0 on success.
+ * Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK.
+ */
+static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr)
+{
+ unsigned long locked, lock_limit;
+ int ret = 0;
+
+ /* the task is exiting. */
+ if (!current->mm)
+ return 0;
+
+ down_write(&current->mm->mmap_sem);
+
+ if (incr) {
+ locked = current->mm->locked_vm + npages;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ ret = -ENOMEM;
+ else
+ current->mm->locked_vm += npages;
+ } else {
+ if (WARN_ON_ONCE(npages > current->mm->locked_vm))
+ npages = current->mm->locked_vm;
+ current->mm->locked_vm -= npages;
+ }
+
+ dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid,
+ incr ? '+' : '-', npages << PAGE_SHIFT,
+ current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK),
+ ret ? "- execeeded" : "");
+
+ up_write(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+/**
+ * afu_dma_pin_pages - pin pages of given dma memory region
+ * @pdata: feature device platform data
+ * @region: dma memory region to be pinned
+ *
+ * Pin all the pages of given dfl_afu_dma_region.
+ * Return 0 for success or negative error code.
+ */
+static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ int npages = region->length >> PAGE_SHIFT;
+ struct device *dev = &pdata->dev->dev;
+ int ret, pinned;
+
+ ret = afu_dma_adjust_locked_vm(dev, npages, true);
+ if (ret)
+ return ret;
+
+ region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!region->pages) {
+ ret = -ENOMEM;
+ goto unlock_vm;
+ }
+
+ pinned = get_user_pages_fast(region->user_addr, npages, 1,
+ region->pages);
+ if (pinned < 0) {
+ ret = pinned;
+ goto put_pages;
+ } else if (pinned != npages) {
+ ret = -EFAULT;
+ goto free_pages;
+ }
+
+ dev_dbg(dev, "%d pages pinned\n", pinned);
+
+ return 0;
+
+put_pages:
+ put_all_pages(region->pages, pinned);
+free_pages:
+ kfree(region->pages);
+unlock_vm:
+ afu_dma_adjust_locked_vm(dev, npages, false);
+ return ret;
+}
+
+/**
+ * afu_dma_unpin_pages - unpin pages of given dma memory region
+ * @pdata: feature device platform data
+ * @region: dma memory region to be unpinned
+ *
+ * Unpin all the pages of given dfl_afu_dma_region.
+ * Return 0 for success or negative error code.
+ */
+static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ long npages = region->length >> PAGE_SHIFT;
+ struct device *dev = &pdata->dev->dev;
+
+ put_all_pages(region->pages, npages);
+ kfree(region->pages);
+ afu_dma_adjust_locked_vm(dev, npages, false);
+
+ dev_dbg(dev, "%ld pages unpinned\n", npages);
+}
+
+/**
+ * afu_dma_check_continuous_pages - check if pages are continuous
+ * @region: dma memory region
+ *
+ * Return true if pages of given dma memory region have continuous physical
+ * address, otherwise return false.
+ */
+static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region)
+{
+ int npages = region->length >> PAGE_SHIFT;
+ int i;
+
+ for (i = 0; i < npages - 1; i++)
+ if (page_to_pfn(region->pages[i]) + 1 !=
+ page_to_pfn(region->pages[i + 1]))
+ return false;
+
+ return true;
+}
+
+/**
+ * dma_region_check_iova - check if memory area is fully contained in the region
+ * @region: dma memory region
+ * @iova: address of the dma memory area
+ * @size: size of the dma memory area
+ *
+ * Compare the dma memory area defined by @iova and @size with given dma region.
+ * Return true if memory area is fully contained in the region, otherwise false.
+ */
+static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
+ u64 iova, u64 size)
+{
+ if (!size && region->iova != iova)
+ return false;
+
+ return (region->iova <= iova) &&
+ (region->length + region->iova >= iova + size);
+}
+
+/**
+ * afu_dma_region_add - add given dma region to rbtree
+ * @pdata: feature device platform data
+ * @region: dma region to be added
+ *
+ * Return 0 for success, -EEXIST if dma region has already been added.
+ *
+ * Needs to be called with pdata->lock heold.
+ */
+static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct rb_node **new, *parent = NULL;
+
+ dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+
+ new = &afu->dma_regions.rb_node;
+
+ while (*new) {
+ struct dfl_afu_dma_region *this;
+
+ this = container_of(*new, struct dfl_afu_dma_region, node);
+
+ parent = *new;
+
+ if (dma_region_check_iova(this, region->iova, region->length))
+ return -EEXIST;
+
+ if (region->iova < this->iova)
+ new = &((*new)->rb_left);
+ else if (region->iova > this->iova)
+ new = &((*new)->rb_right);
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(&region->node, parent, new);
+ rb_insert_color(&region->node, &afu->dma_regions);
+
+ return 0;
+}
+
+/**
+ * afu_dma_region_remove - remove given dma region from rbtree
+ * @pdata: feature device platform data
+ * @region: dma region to be removed
+ *
+ * Needs to be called with pdata->lock heold.
+ */
+static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ struct dfl_afu *afu;
+
+ dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+
+ afu = dfl_fpga_pdata_get_private(pdata);
+ rb_erase(&region->node, &afu->dma_regions);
+}
+
+/**
+ * afu_dma_region_destroy - destroy all regions in rbtree
+ * @pdata: feature device platform data
+ *
+ * Needs to be called with pdata->lock heold.
+ */
+void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct rb_node *node = rb_first(&afu->dma_regions);
+ struct dfl_afu_dma_region *region;
+
+ while (node) {
+ region = container_of(node, struct dfl_afu_dma_region, node);
+
+ dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+
+ rb_erase(node, &afu->dma_regions);
+
+ if (region->iova)
+ dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova, region->length,
+ DMA_BIDIRECTIONAL);
+
+ if (region->pages)
+ afu_dma_unpin_pages(pdata, region);
+
+ node = rb_next(node);
+ kfree(region);
+ }
+}
+
+/**
+ * afu_dma_region_find - find the dma region from rbtree based on iova and size
+ * @pdata: feature device platform data
+ * @iova: address of the dma memory area
+ * @size: size of the dma memory area
+ *
+ * It finds the dma region from the rbtree based on @iova and @size:
+ * - if @size == 0, it finds the dma region which starts from @iova
+ * - otherwise, it finds the dma region which fully contains
+ * [@iova, @iova+size)
+ * If nothing is matched returns NULL.
+ *
+ * Needs to be called with pdata->lock held.
+ */
+struct dfl_afu_dma_region *
+afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct rb_node *node = afu->dma_regions.rb_node;
+ struct device *dev = &pdata->dev->dev;
+
+ while (node) {
+ struct dfl_afu_dma_region *region;
+
+ region = container_of(node, struct dfl_afu_dma_region, node);
+
+ if (dma_region_check_iova(region, iova, size)) {
+ dev_dbg(dev, "find region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+ return region;
+ }
+
+ if (iova < region->iova)
+ node = node->rb_left;
+ else if (iova > region->iova)
+ node = node->rb_right;
+ else
+ /* the iova region is not fully covered. */
+ break;
+ }
+
+ dev_dbg(dev, "region with iova %llx and size %llx is not found\n",
+ (unsigned long long)iova, (unsigned long long)size);
+
+ return NULL;
+}
+
+/**
+ * afu_dma_region_find_iova - find the dma region from rbtree by iova
+ * @pdata: feature device platform data
+ * @iova: address of the dma region
+ *
+ * Needs to be called with pdata->lock held.
+ */
+static struct dfl_afu_dma_region *
+afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
+{
+ return afu_dma_region_find(pdata, iova, 0);
+}
+
+/**
+ * afu_dma_map_region - map memory region for dma
+ * @pdata: feature device platform data
+ * @user_addr: address of the memory region
+ * @length: size of the memory region
+ * @iova: pointer of iova address
+ *
+ * Map memory region defined by @user_addr and @length, and return dma address
+ * of the memory region via @iova.
+ * Return 0 for success, otherwise error code.
+ */
+int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+ u64 user_addr, u64 length, u64 *iova)
+{
+ struct dfl_afu_dma_region *region;
+ int ret;
+
+ /*
+ * Check Inputs, only accept page-aligned user memory region with
+ * valid length.
+ */
+ if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length)
+ return -EINVAL;
+
+ /* Check overflow */
+ if (user_addr + length < user_addr)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE, (void __user *)(unsigned long)user_addr,
+ length))
+ return -EINVAL;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->user_addr = user_addr;
+ region->length = length;
+
+ /* Pin the user memory region */
+ ret = afu_dma_pin_pages(pdata, region);
+ if (ret) {
+ dev_err(&pdata->dev->dev, "failed to pin memory region\n");
+ goto free_region;
+ }
+
+ /* Only accept continuous pages, return error else */
+ if (!afu_dma_check_continuous_pages(region)) {
+ dev_err(&pdata->dev->dev, "pages are not continuous\n");
+ ret = -EINVAL;
+ goto unpin_pages;
+ }
+
+ /* As pages are continuous then start to do DMA mapping */
+ region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
+ region->pages[0], 0,
+ region->length,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdata->dev->dev, region->iova)) {
+ dev_err(&pdata->dev->dev, "failed to map for dma\n");
+ ret = -EFAULT;
+ goto unpin_pages;
+ }
+
+ *iova = region->iova;
+
+ mutex_lock(&pdata->lock);
+ ret = afu_dma_region_add(pdata, region);
+ mutex_unlock(&pdata->lock);
+ if (ret) {
+ dev_err(&pdata->dev->dev, "failed to add dma region\n");
+ goto unmap_dma;
+ }
+
+ return 0;
+
+unmap_dma:
+ dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova, region->length, DMA_BIDIRECTIONAL);
+unpin_pages:
+ afu_dma_unpin_pages(pdata, region);
+free_region:
+ kfree(region);
+ return ret;
+}
+
+/**
+ * afu_dma_unmap_region - unmap dma memory region
+ * @pdata: feature device platform data
+ * @iova: dma address of the region
+ *
+ * Unmap dma memory region based on @iova.
+ * Return 0 for success, otherwise error code.
+ */
+int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
+{
+ struct dfl_afu_dma_region *region;
+
+ mutex_lock(&pdata->lock);
+ region = afu_dma_region_find_iova(pdata, iova);
+ if (!region) {
+ mutex_unlock(&pdata->lock);
+ return -EINVAL;
+ }
+
+ if (region->in_use) {
+ mutex_unlock(&pdata->lock);
+ return -EBUSY;
+ }
+
+ afu_dma_region_remove(pdata, region);
+ mutex_unlock(&pdata->lock);
+
+ dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova, region->length, DMA_BIDIRECTIONAL);
+ afu_dma_unpin_pages(pdata, region);
+ kfree(region);
+
+ return 0;
+}
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
new file mode 100644
index 000000000000..02baa6a227c0
--- /dev/null
+++ b/drivers/fpga/dfl-afu-main.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/fpga-dfl.h>
+
+#include "dfl-afu.h"
+
+/**
+ * port_enable - enable a port
+ * @pdev: port platform device.
+ *
+ * Enable Port by clear the port soft reset bit, which is set by default.
+ * The AFU is unable to respond to any MMIO access while in reset.
+ * port_enable function should only be used after port_disable function.
+ */
+static void port_enable(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __iomem *base;
+ u64 v;
+
+ WARN_ON(!pdata->disable_count);
+
+ if (--pdata->disable_count != 0)
+ return;
+
+ base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+
+ /* Clear port soft reset */
+ v = readq(base + PORT_HDR_CTRL);
+ v &= ~PORT_CTRL_SFTRST;
+ writeq(v, base + PORT_HDR_CTRL);
+}
+
+#define RST_POLL_INVL 10 /* us */
+#define RST_POLL_TIMEOUT 1000 /* us */
+
+/**
+ * port_disable - disable a port
+ * @pdev: port platform device.
+ *
+ * Disable Port by setting the port soft reset bit, it puts the port into
+ * reset.
+ */
+static int port_disable(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __iomem *base;
+ u64 v;
+
+ if (pdata->disable_count++ != 0)
+ return 0;
+
+ base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+
+ /* Set port soft reset */
+ v = readq(base + PORT_HDR_CTRL);
+ v |= PORT_CTRL_SFTRST;
+ writeq(v, base + PORT_HDR_CTRL);
+
+ /*
+ * HW sets ack bit to 1 when all outstanding requests have been drained
+ * on this port and minimum soft reset pulse width has elapsed.
+ * Driver polls port_soft_reset_ack to determine if reset done by HW.
+ */
+ if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
+ RST_POLL_INVL, RST_POLL_TIMEOUT)) {
+ dev_err(&pdev->dev, "timeout, fail to reset device\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * This function resets the FPGA Port and its accelerator (AFU) by function
+ * __port_disable and __port_enable (set port soft reset bit and then clear
+ * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
+ * Reconfiguration. But it should never cause any system level issue, only
+ * functional failure (e.g. DMA or PR operation failure) and be recoverable
+ * from the failure.
+ *
+ * Note: the accelerator (AFU) is not accessible when its port is in reset
+ * (disabled). Any attempts on MMIO access to AFU while in reset, will
+ * result errors reported via port error reporting sub feature (if present).
+ */
+static int __port_reset(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = port_disable(pdev);
+ if (!ret)
+ port_enable(pdev);
+
+ return ret;
+}
+
+static int port_reset(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ mutex_lock(&pdata->lock);
+ ret = __port_reset(pdev);
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static int port_get_id(struct platform_device *pdev)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+
+ return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
+}
+
+static ssize_t
+id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int id = port_get_id(to_platform_device(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", id);
+}
+static DEVICE_ATTR_RO(id);
+
+static const struct attribute *port_hdr_attrs[] = {
+ &dev_attr_id.attr,
+ NULL,
+};
+
+static int port_hdr_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ dev_dbg(&pdev->dev, "PORT HDR Init.\n");
+
+ port_reset(pdev);
+
+ return sysfs_create_files(&pdev->dev.kobj, port_hdr_attrs);
+}
+
+static void port_hdr_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ dev_dbg(&pdev->dev, "PORT HDR UInit.\n");
+
+ sysfs_remove_files(&pdev->dev.kobj, port_hdr_attrs);
+}
+
+static long
+port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case DFL_FPGA_PORT_RESET:
+ if (!arg)
+ ret = port_reset(pdev);
+ else
+ ret = -EINVAL;
+ break;
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static const struct dfl_feature_ops port_hdr_ops = {
+ .init = port_hdr_init,
+ .uinit = port_hdr_uinit,
+ .ioctl = port_hdr_ioctl,
+};
+
+static ssize_t
+afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 guidl, guidh;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
+
+ mutex_lock(&pdata->lock);
+ if (pdata->disable_count) {
+ mutex_unlock(&pdata->lock);
+ return -EBUSY;
+ }
+
+ guidl = readq(base + GUID_L);
+ guidh = readq(base + GUID_H);
+ mutex_unlock(&pdata->lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
+}
+static DEVICE_ATTR_RO(afu_id);
+
+static const struct attribute *port_afu_attrs[] = {
+ &dev_attr_afu_id.attr,
+ NULL
+};
+
+static int port_afu_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct resource *res = &pdev->resource[feature->resource_index];
+ int ret;
+
+ dev_dbg(&pdev->dev, "PORT AFU Init.\n");
+
+ ret = afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ DFL_PORT_REGION_INDEX_AFU, resource_size(res),
+ res->start, DFL_PORT_REGION_READ |
+ DFL_PORT_REGION_WRITE | DFL_PORT_REGION_MMAP);
+ if (ret)
+ return ret;
+
+ return sysfs_create_files(&pdev->dev.kobj, port_afu_attrs);
+}
+
+static void port_afu_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ dev_dbg(&pdev->dev, "PORT AFU UInit.\n");
+
+ sysfs_remove_files(&pdev->dev.kobj, port_afu_attrs);
+}
+
+static const struct dfl_feature_ops port_afu_ops = {
+ .init = port_afu_init,
+ .uinit = port_afu_uinit,
+};
+
+static struct dfl_feature_driver port_feature_drvs[] = {
+ {
+ .id = PORT_FEATURE_ID_HEADER,
+ .ops = &port_hdr_ops,
+ },
+ {
+ .id = PORT_FEATURE_ID_AFU,
+ .ops = &port_afu_ops,
+ },
+ {
+ .ops = NULL,
+ }
+};
+
+static int afu_open(struct inode *inode, struct file *filp)
+{
+ struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
+ struct dfl_feature_platform_data *pdata;
+ int ret;
+
+ pdata = dev_get_platdata(&fdev->dev);
+ if (WARN_ON(!pdata))
+ return -ENODEV;
+
+ ret = dfl_feature_dev_use_begin(pdata);
+ if (ret)
+ return ret;
+
+ dev_dbg(&fdev->dev, "Device File Open\n");
+ filp->private_data = fdev;
+
+ return 0;
+}
+
+static int afu_release(struct inode *inode, struct file *filp)
+{
+ struct platform_device *pdev = filp->private_data;
+ struct dfl_feature_platform_data *pdata;
+
+ dev_dbg(&pdev->dev, "Device File Release\n");
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ __port_reset(pdev);
+ afu_dma_region_destroy(pdata);
+ mutex_unlock(&pdata->lock);
+
+ dfl_feature_dev_use_end(pdata);
+
+ return 0;
+}
+
+static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
+{
+ /* No extension support for now */
+ return 0;
+}
+
+static long
+afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
+{
+ struct dfl_fpga_port_info info;
+ struct dfl_afu *afu;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
+
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ info.flags = 0;
+ info.num_regions = afu->num_regions;
+ info.num_umsgs = afu->num_umsgs;
+ mutex_unlock(&pdata->lock);
+
+ if (copy_to_user(arg, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
+ void __user *arg)
+{
+ struct dfl_fpga_port_region_info rinfo;
+ struct dfl_afu_mmio_region region;
+ unsigned long minsz;
+ long ret;
+
+ minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
+
+ if (copy_from_user(&rinfo, arg, minsz))
+ return -EFAULT;
+
+ if (rinfo.argsz < minsz || rinfo.padding)
+ return -EINVAL;
+
+ ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
+ if (ret)
+ return ret;
+
+ rinfo.flags = region.flags;
+ rinfo.size = region.size;
+ rinfo.offset = region.offset;
+
+ if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long
+afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
+{
+ struct dfl_fpga_port_dma_map map;
+ unsigned long minsz;
+ long ret;
+
+ minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
+
+ if (copy_from_user(&map, arg, minsz))
+ return -EFAULT;
+
+ if (map.argsz < minsz || map.flags)
+ return -EINVAL;
+
+ ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(arg, &map, sizeof(map))) {
+ afu_dma_unmap_region(pdata, map.iova);
+ return -EFAULT;
+ }
+
+ dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
+ (unsigned long long)map.user_addr,
+ (unsigned long long)map.length,
+ (unsigned long long)map.iova);
+
+ return 0;
+}
+
+static long
+afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
+{
+ struct dfl_fpga_port_dma_unmap unmap;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
+
+ if (copy_from_user(&unmap, arg, minsz))
+ return -EFAULT;
+
+ if (unmap.argsz < minsz || unmap.flags)
+ return -EINVAL;
+
+ return afu_dma_unmap_region(pdata, unmap.iova);
+}
+
+static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct platform_device *pdev = filp->private_data;
+ struct dfl_feature_platform_data *pdata;
+ struct dfl_feature *f;
+ long ret;
+
+ dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ switch (cmd) {
+ case DFL_FPGA_GET_API_VERSION:
+ return DFL_FPGA_API_VERSION;
+ case DFL_FPGA_CHECK_EXTENSION:
+ return afu_ioctl_check_extension(pdata, arg);
+ case DFL_FPGA_PORT_GET_INFO:
+ return afu_ioctl_get_info(pdata, (void __user *)arg);
+ case DFL_FPGA_PORT_GET_REGION_INFO:
+ return afu_ioctl_get_region_info(pdata, (void __user *)arg);
+ case DFL_FPGA_PORT_DMA_MAP:
+ return afu_ioctl_dma_map(pdata, (void __user *)arg);
+ case DFL_FPGA_PORT_DMA_UNMAP:
+ return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
+ default:
+ /*
+ * Let sub-feature's ioctl function to handle the cmd
+ * Sub-feature's ioctl returns -ENODEV when cmd is not
+ * handled in this sub feature, and returns 0 and other
+ * error code if cmd is handled.
+ */
+ dfl_fpga_dev_for_each_feature(pdata, f)
+ if (f->ops && f->ops->ioctl) {
+ ret = f->ops->ioctl(pdev, f, cmd, arg);
+ if (ret != -ENODEV)
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct platform_device *pdev = filp->private_data;
+ struct dfl_feature_platform_data *pdata;
+ u64 size = vma->vm_end - vma->vm_start;
+ struct dfl_afu_mmio_region region;
+ u64 offset;
+ int ret;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
+ if (ret)
+ return ret;
+
+ if (!(region.flags & DFL_PORT_REGION_MMAP))
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
+ return -EPERM;
+
+ if ((vma->vm_flags & VM_WRITE) &&
+ !(region.flags & DFL_PORT_REGION_WRITE))
+ return -EPERM;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ return remap_pfn_range(vma, vma->vm_start,
+ (region.phys + (offset - region.offset)) >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+}
+
+static const struct file_operations afu_fops = {
+ .owner = THIS_MODULE,
+ .open = afu_open,
+ .release = afu_release,
+ .unlocked_ioctl = afu_ioctl,
+ .mmap = afu_mmap,
+};
+
+static int afu_dev_init(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_afu *afu;
+
+ afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
+ if (!afu)
+ return -ENOMEM;
+
+ afu->pdata = pdata;
+
+ mutex_lock(&pdata->lock);
+ dfl_fpga_pdata_set_private(pdata, afu);
+ afu_mmio_region_init(pdata);
+ afu_dma_region_init(pdata);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static int afu_dev_destroy(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_afu *afu;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ afu_mmio_region_destroy(pdata);
+ afu_dma_region_destroy(pdata);
+ dfl_fpga_pdata_set_private(pdata, NULL);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static int port_enable_set(struct platform_device *pdev, bool enable)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret = 0;
+
+ mutex_lock(&pdata->lock);
+ if (enable)
+ port_enable(pdev);
+ else
+ ret = port_disable(pdev);
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static struct dfl_fpga_port_ops afu_port_ops = {
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
+ .owner = THIS_MODULE,
+ .get_id = port_get_id,
+ .enable_set = port_enable_set,
+};
+
+static int afu_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ ret = afu_dev_init(pdev);
+ if (ret)
+ goto exit;
+
+ ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
+ if (ret)
+ goto dev_destroy;
+
+ ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
+ if (ret) {
+ dfl_fpga_dev_feature_uinit(pdev);
+ goto dev_destroy;
+ }
+
+ return 0;
+
+dev_destroy:
+ afu_dev_destroy(pdev);
+exit:
+ return ret;
+}
+
+static int afu_remove(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ dfl_fpga_dev_ops_unregister(pdev);
+ dfl_fpga_dev_feature_uinit(pdev);
+ afu_dev_destroy(pdev);
+
+ return 0;
+}
+
+static struct platform_driver afu_driver = {
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
+ },
+ .probe = afu_probe,
+ .remove = afu_remove,
+};
+
+static int __init afu_init(void)
+{
+ int ret;
+
+ dfl_fpga_port_ops_add(&afu_port_ops);
+
+ ret = platform_driver_register(&afu_driver);
+ if (ret)
+ dfl_fpga_port_ops_del(&afu_port_ops);
+
+ return ret;
+}
+
+static void __exit afu_exit(void)
+{
+ platform_driver_unregister(&afu_driver);
+
+ dfl_fpga_port_ops_del(&afu_port_ops);
+}
+
+module_init(afu_init);
+module_exit(afu_exit);
+
+MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-port");
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
new file mode 100644
index 000000000000..0804b7a0c298
--- /dev/null
+++ b/drivers/fpga/dfl-afu-region.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU) MMIO Region Management
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+#include "dfl-afu.h"
+
+/**
+ * afu_mmio_region_init - init function for afu mmio region support
+ * @pdata: afu platform device's pdata.
+ */
+void afu_mmio_region_init(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+
+ INIT_LIST_HEAD(&afu->regions);
+}
+
+#define for_each_region(region, afu) \
+ list_for_each_entry((region), &(afu)->regions, node)
+
+static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
+ u32 region_index)
+{
+ struct dfl_afu_mmio_region *region;
+
+ for_each_region(region, afu)
+ if (region->index == region_index)
+ return region;
+
+ return NULL;
+}
+
+/**
+ * afu_mmio_region_add - add a mmio region to given feature dev.
+ *
+ * @region_index: region index.
+ * @region_size: region size.
+ * @phys: region's physical address of this region.
+ * @flags: region flags (access permission).
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+ u32 region_index, u64 region_size, u64 phys, u32 flags)
+{
+ struct dfl_afu_mmio_region *region;
+ struct dfl_afu *afu;
+ int ret = 0;
+
+ region = devm_kzalloc(&pdata->dev->dev, sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->index = region_index;
+ region->size = region_size;
+ region->phys = phys;
+ region->flags = flags;
+
+ mutex_lock(&pdata->lock);
+
+ afu = dfl_fpga_pdata_get_private(pdata);
+
+ /* check if @index already exists */
+ if (get_region_by_index(afu, region_index)) {
+ mutex_unlock(&pdata->lock);
+ ret = -EEXIST;
+ goto exit;
+ }
+
+ region_size = PAGE_ALIGN(region_size);
+ region->offset = afu->region_cur_offset;
+ list_add(&region->node, &afu->regions);
+
+ afu->region_cur_offset += region_size;
+ afu->num_regions++;
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+
+exit:
+ devm_kfree(&pdata->dev->dev, region);
+ return ret;
+}
+
+/**
+ * afu_mmio_region_destroy - destroy all mmio regions under given feature dev.
+ * @pdata: afu platform device's pdata.
+ */
+void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu_mmio_region *tmp, *region;
+
+ list_for_each_entry_safe(region, tmp, &afu->regions, node)
+ devm_kfree(&pdata->dev->dev, region);
+}
+
+/**
+ * afu_mmio_region_get_by_index - find an afu region by index.
+ * @pdata: afu platform device's pdata.
+ * @region_index: region index.
+ * @pregion: ptr to region for result.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+ u32 region_index,
+ struct dfl_afu_mmio_region *pregion)
+{
+ struct dfl_afu_mmio_region *region;
+ struct dfl_afu *afu;
+ int ret = 0;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ region = get_region_by_index(afu, region_index);
+ if (!region) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ *pregion = *region;
+exit:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
+
+/**
+ * afu_mmio_region_get_by_offset - find an afu mmio region by offset and size
+ *
+ * @pdata: afu platform device's pdata.
+ * @offset: region offset from start of the device fd.
+ * @size: region size.
+ * @pregion: ptr to region for result.
+ *
+ * Find the region which fully contains the region described by input
+ * parameters (offset and size) from the feature dev's region linked list.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+ u64 offset, u64 size,
+ struct dfl_afu_mmio_region *pregion)
+{
+ struct dfl_afu_mmio_region *region;
+ struct dfl_afu *afu;
+ int ret = 0;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ for_each_region(region, afu)
+ if (region->offset <= offset &&
+ region->offset + region->size >= offset + size) {
+ *pregion = *region;
+ goto exit;
+ }
+ ret = -EINVAL;
+exit:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
new file mode 100644
index 000000000000..0c7630ae3cda
--- /dev/null
+++ b/drivers/fpga/dfl-afu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for FPGA Accelerated Function Unit (AFU) Driver
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#ifndef __DFL_AFU_H
+#define __DFL_AFU_H
+
+#include <linux/mm.h>
+
+#include "dfl.h"
+
+/**
+ * struct dfl_afu_mmio_region - afu mmio region data structure
+ *
+ * @index: region index.
+ * @flags: region flags (access permission).
+ * @size: region size.
+ * @offset: region offset from start of the device fd.
+ * @phys: region's physical address.
+ * @node: node to add to afu feature dev's region list.
+ */
+struct dfl_afu_mmio_region {
+ u32 index;
+ u32 flags;
+ u64 size;
+ u64 offset;
+ u64 phys;
+ struct list_head node;
+};
+
+/**
+ * struct fpga_afu_dma_region - afu DMA region data structure
+ *
+ * @user_addr: region userspace virtual address.
+ * @length: region length.
+ * @iova: region IO virtual address.
+ * @pages: ptr to pages of this region.
+ * @node: rb tree node.
+ * @in_use: flag to indicate if this region is in_use.
+ */
+struct dfl_afu_dma_region {
+ u64 user_addr;
+ u64 length;
+ u64 iova;
+ struct page **pages;
+ struct rb_node node;
+ bool in_use;
+};
+
+/**
+ * struct dfl_afu - afu device data structure
+ *
+ * @region_cur_offset: current region offset from start to the device fd.
+ * @num_regions: num of mmio regions.
+ * @regions: the mmio region linked list of this afu feature device.
+ * @dma_regions: root of dma regions rb tree.
+ * @num_umsgs: num of umsgs.
+ * @pdata: afu platform device's pdata.
+ */
+struct dfl_afu {
+ u64 region_cur_offset;
+ int num_regions;
+ u8 num_umsgs;
+ struct list_head regions;
+ struct rb_root dma_regions;
+
+ struct dfl_feature_platform_data *pdata;
+};
+
+void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
+int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+ u32 region_index, u64 region_size, u64 phys, u32 flags);
+void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata);
+int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+ u32 region_index,
+ struct dfl_afu_mmio_region *pregion);
+int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+ u64 offset, u64 size,
+ struct dfl_afu_mmio_region *pregion);
+void afu_dma_region_init(struct dfl_feature_platform_data *pdata);
+void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata);
+int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+ u64 user_addr, u64 length, u64 *iova);
+int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
+struct dfl_afu_dma_region *
+afu_dma_region_find(struct dfl_feature_platform_data *pdata,
+ u64 iova, u64 size);
+#endif /* __DFL_AFU_H */
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
new file mode 100644
index 000000000000..7cc041def8b3
--- /dev/null
+++ b/drivers/fpga/dfl-fme-br.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Bridge Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fpga/fpga-bridge.h>
+
+#include "dfl.h"
+#include "dfl-fme-pr.h"
+
+struct fme_br_priv {
+ struct dfl_fme_br_pdata *pdata;
+ struct dfl_fpga_port_ops *port_ops;
+ struct platform_device *port_pdev;
+};
+
+static int fme_bridge_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ struct fme_br_priv *priv = bridge->priv;
+ struct platform_device *port_pdev;
+ struct dfl_fpga_port_ops *ops;
+
+ if (!priv->port_pdev) {
+ port_pdev = dfl_fpga_cdev_find_port(priv->pdata->cdev,
+ &priv->pdata->port_id,
+ dfl_fpga_check_port_id);
+ if (!port_pdev)
+ return -ENODEV;
+
+ priv->port_pdev = port_pdev;
+ }
+
+ if (priv->port_pdev && !priv->port_ops) {
+ ops = dfl_fpga_port_ops_get(priv->port_pdev);
+ if (!ops || !ops->enable_set)
+ return -ENOENT;
+
+ priv->port_ops = ops;
+ }
+
+ return priv->port_ops->enable_set(priv->port_pdev, enable);
+}
+
+static const struct fpga_bridge_ops fme_bridge_ops = {
+ .enable_set = fme_bridge_enable_set,
+};
+
+static int fme_br_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fme_br_priv *priv;
+ struct fpga_bridge *br;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdata = dev_get_platdata(dev);
+
+ br = fpga_bridge_create(dev, "DFL FPGA FME Bridge",
+ &fme_bridge_ops, priv);
+ if (!br)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, br);
+
+ ret = fpga_bridge_register(br);
+ if (ret)
+ fpga_bridge_free(br);
+
+ return ret;
+}
+
+static int fme_br_remove(struct platform_device *pdev)
+{
+ struct fpga_bridge *br = platform_get_drvdata(pdev);
+ struct fme_br_priv *priv = br->priv;
+
+ fpga_bridge_unregister(br);
+
+ if (priv->port_pdev)
+ put_device(&priv->port_pdev->dev);
+ if (priv->port_ops)
+ dfl_fpga_port_ops_put(priv->port_ops);
+
+ return 0;
+}
+
+static struct platform_driver fme_br_driver = {
+ .driver = {
+ .name = DFL_FPGA_FME_BRIDGE,
+ },
+ .probe = fme_br_probe,
+ .remove = fme_br_remove,
+};
+
+module_platform_driver(fme_br_driver);
+
+MODULE_DESCRIPTION("FPGA Bridge for DFL FPGA Management Engine");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme-bridge");
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
new file mode 100644
index 000000000000..086ad2420ade
--- /dev/null
+++ b/drivers/fpga/dfl-fme-main.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fpga-dfl.h>
+
+#include "dfl.h"
+#include "dfl-fme.h"
+
+static ssize_t ports_num_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS, v));
+}
+static DEVICE_ATTR_RO(ports_num);
+
+/*
+ * Bitstream (static FPGA region) identifier number. It contains the
+ * detailed version and other information of this static FPGA region.
+ */
+static ssize_t bitstream_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_BITSTREAM_ID);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
+}
+static DEVICE_ATTR_RO(bitstream_id);
+
+/*
+ * Bitstream (static FPGA region) meta data. It contains the synthesis
+ * date, seed and other information of this static FPGA region.
+ */
+static ssize_t bitstream_metadata_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_BITSTREAM_MD);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
+}
+static DEVICE_ATTR_RO(bitstream_metadata);
+
+static const struct attribute *fme_hdr_attrs[] = {
+ &dev_attr_ports_num.attr,
+ &dev_attr_bitstream_id.attr,
+ &dev_attr_bitstream_metadata.attr,
+ NULL,
+};
+
+static int fme_hdr_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ void __iomem *base = feature->ioaddr;
+ int ret;
+
+ dev_dbg(&pdev->dev, "FME HDR Init.\n");
+ dev_dbg(&pdev->dev, "FME cap %llx.\n",
+ (unsigned long long)readq(base + FME_HDR_CAP));
+
+ ret = sysfs_create_files(&pdev->dev.kobj, fme_hdr_attrs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void fme_hdr_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ dev_dbg(&pdev->dev, "FME HDR UInit.\n");
+ sysfs_remove_files(&pdev->dev.kobj, fme_hdr_attrs);
+}
+
+static const struct dfl_feature_ops fme_hdr_ops = {
+ .init = fme_hdr_init,
+ .uinit = fme_hdr_uinit,
+};
+
+static struct dfl_feature_driver fme_feature_drvs[] = {
+ {
+ .id = FME_FEATURE_ID_HEADER,
+ .ops = &fme_hdr_ops,
+ },
+ {
+ .id = FME_FEATURE_ID_PR_MGMT,
+ .ops = &pr_mgmt_ops,
+ },
+ {
+ .ops = NULL,
+ },
+};
+
+static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
+{
+ /* No extension support for now */
+ return 0;
+}
+
+static int fme_open(struct inode *inode, struct file *filp)
+{
+ struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
+ int ret;
+
+ if (WARN_ON(!pdata))
+ return -ENODEV;
+
+ ret = dfl_feature_dev_use_begin(pdata);
+ if (ret)
+ return ret;
+
+ dev_dbg(&fdev->dev, "Device File Open\n");
+ filp->private_data = pdata;
+
+ return 0;
+}
+
+static int fme_release(struct inode *inode, struct file *filp)
+{
+ struct dfl_feature_platform_data *pdata = filp->private_data;
+ struct platform_device *pdev = pdata->dev;
+
+ dev_dbg(&pdev->dev, "Device File Release\n");
+ dfl_feature_dev_use_end(pdata);
+
+ return 0;
+}
+
+static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct dfl_feature_platform_data *pdata = filp->private_data;
+ struct platform_device *pdev = pdata->dev;
+ struct dfl_feature *f;
+ long ret;
+
+ dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
+
+ switch (cmd) {
+ case DFL_FPGA_GET_API_VERSION:
+ return DFL_FPGA_API_VERSION;
+ case DFL_FPGA_CHECK_EXTENSION:
+ return fme_ioctl_check_extension(pdata, arg);
+ default:
+ /*
+ * Let sub-feature's ioctl function to handle the cmd.
+ * Sub-feature's ioctl returns -ENODEV when cmd is not
+ * handled in this sub feature, and returns 0 or other
+ * error code if cmd is handled.
+ */
+ dfl_fpga_dev_for_each_feature(pdata, f) {
+ if (f->ops && f->ops->ioctl) {
+ ret = f->ops->ioctl(pdev, f, cmd, arg);
+ if (ret != -ENODEV)
+ return ret;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int fme_dev_init(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme *fme;
+
+ fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
+ if (!fme)
+ return -ENOMEM;
+
+ fme->pdata = pdata;
+
+ mutex_lock(&pdata->lock);
+ dfl_fpga_pdata_set_private(pdata, fme);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static void fme_dev_destroy(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme *fme;
+
+ mutex_lock(&pdata->lock);
+ fme = dfl_fpga_pdata_get_private(pdata);
+ dfl_fpga_pdata_set_private(pdata, NULL);
+ mutex_unlock(&pdata->lock);
+}
+
+static const struct file_operations fme_fops = {
+ .owner = THIS_MODULE,
+ .open = fme_open,
+ .release = fme_release,
+ .unlocked_ioctl = fme_ioctl,
+};
+
+static int fme_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = fme_dev_init(pdev);
+ if (ret)
+ goto exit;
+
+ ret = dfl_fpga_dev_feature_init(pdev, fme_feature_drvs);
+ if (ret)
+ goto dev_destroy;
+
+ ret = dfl_fpga_dev_ops_register(pdev, &fme_fops, THIS_MODULE);
+ if (ret)
+ goto feature_uinit;
+
+ return 0;
+
+feature_uinit:
+ dfl_fpga_dev_feature_uinit(pdev);
+dev_destroy:
+ fme_dev_destroy(pdev);
+exit:
+ return ret;
+}
+
+static int fme_remove(struct platform_device *pdev)
+{
+ dfl_fpga_dev_ops_unregister(pdev);
+ dfl_fpga_dev_feature_uinit(pdev);
+ fme_dev_destroy(pdev);
+
+ return 0;
+}
+
+static struct platform_driver fme_driver = {
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_FME,
+ },
+ .probe = fme_probe,
+ .remove = fme_remove,
+};
+
+module_platform_driver(fme_driver);
+
+MODULE_DESCRIPTION("FPGA Management Engine driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme");
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
new file mode 100644
index 000000000000..b5ef405b6d88
--- /dev/null
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Manager Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Christopher Rauer <christopher.rauer@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/fpga/fpga-mgr.h>
+
+#include "dfl-fme-pr.h"
+
+/* FME Partial Reconfiguration Sub Feature Register Set */
+#define FME_PR_DFH 0x0
+#define FME_PR_CTRL 0x8
+#define FME_PR_STS 0x10
+#define FME_PR_DATA 0x18
+#define FME_PR_ERR 0x20
+#define FME_PR_INTFC_ID_H 0xA8
+#define FME_PR_INTFC_ID_L 0xB0
+
+/* FME PR Control Register Bitfield */
+#define FME_PR_CTRL_PR_RST BIT_ULL(0) /* Reset PR engine */
+#define FME_PR_CTRL_PR_RSTACK BIT_ULL(4) /* Ack for PR engine reset */
+#define FME_PR_CTRL_PR_RGN_ID GENMASK_ULL(9, 7) /* PR Region ID */
+#define FME_PR_CTRL_PR_START BIT_ULL(12) /* Start to request PR service */
+#define FME_PR_CTRL_PR_COMPLETE BIT_ULL(13) /* PR data push completion */
+
+/* FME PR Status Register Bitfield */
+/* Number of available entries in HW queue inside the PR engine. */
+#define FME_PR_STS_PR_CREDIT GENMASK_ULL(8, 0)
+#define FME_PR_STS_PR_STS BIT_ULL(16) /* PR operation status */
+#define FME_PR_STS_PR_STS_IDLE 0
+#define FME_PR_STS_PR_CTRLR_STS GENMASK_ULL(22, 20) /* Controller status */
+#define FME_PR_STS_PR_HOST_STS GENMASK_ULL(27, 24) /* PR host status */
+
+/* FME PR Data Register Bitfield */
+/* PR data from the raw-binary file. */
+#define FME_PR_DATA_PR_DATA_RAW GENMASK_ULL(32, 0)
+
+/* FME PR Error Register */
+/* PR Operation errors detected. */
+#define FME_PR_ERR_OPERATION_ERR BIT_ULL(0)
+/* CRC error detected. */
+#define FME_PR_ERR_CRC_ERR BIT_ULL(1)
+/* Incompatible PR bitstream detected. */
+#define FME_PR_ERR_INCOMPATIBLE_BS BIT_ULL(2)
+/* PR data push protocol violated. */
+#define FME_PR_ERR_PROTOCOL_ERR BIT_ULL(3)
+/* PR data fifo overflow error detected */
+#define FME_PR_ERR_FIFO_OVERFLOW BIT_ULL(4)
+
+#define PR_WAIT_TIMEOUT 8000000
+#define PR_HOST_STATUS_IDLE 0
+
+struct fme_mgr_priv {
+ void __iomem *ioaddr;
+ u64 pr_error;
+};
+
+static u64 pr_error_to_mgr_status(u64 err)
+{
+ u64 status = 0;
+
+ if (err & FME_PR_ERR_OPERATION_ERR)
+ status |= FPGA_MGR_STATUS_OPERATION_ERR;
+ if (err & FME_PR_ERR_CRC_ERR)
+ status |= FPGA_MGR_STATUS_CRC_ERR;
+ if (err & FME_PR_ERR_INCOMPATIBLE_BS)
+ status |= FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR;
+ if (err & FME_PR_ERR_PROTOCOL_ERR)
+ status |= FPGA_MGR_STATUS_IP_PROTOCOL_ERR;
+ if (err & FME_PR_ERR_FIFO_OVERFLOW)
+ status |= FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR;
+
+ return status;
+}
+
+static u64 fme_mgr_pr_error_handle(void __iomem *fme_pr)
+{
+ u64 pr_status, pr_error;
+
+ pr_status = readq(fme_pr + FME_PR_STS);
+ if (!(pr_status & FME_PR_STS_PR_STS))
+ return 0;
+
+ pr_error = readq(fme_pr + FME_PR_ERR);
+ writeq(pr_error, fme_pr + FME_PR_ERR);
+
+ return pr_error;
+}
+
+static int fme_mgr_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct device *dev = &mgr->dev;
+ struct fme_mgr_priv *priv = mgr->priv;
+ void __iomem *fme_pr = priv->ioaddr;
+ u64 pr_ctrl, pr_status;
+
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ dev_err(dev, "only supports partial reconfiguration.\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "resetting PR before initiated PR\n");
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl |= FME_PR_CTRL_PR_RST;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ if (readq_poll_timeout(fme_pr + FME_PR_CTRL, pr_ctrl,
+ pr_ctrl & FME_PR_CTRL_PR_RSTACK, 1,
+ PR_WAIT_TIMEOUT)) {
+ dev_err(dev, "PR Reset ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl &= ~FME_PR_CTRL_PR_RST;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ dev_dbg(dev,
+ "waiting for PR resource in HW to be initialized and ready\n");
+
+ if (readq_poll_timeout(fme_pr + FME_PR_STS, pr_status,
+ (pr_status & FME_PR_STS_PR_STS) ==
+ FME_PR_STS_PR_STS_IDLE, 1, PR_WAIT_TIMEOUT)) {
+ dev_err(dev, "PR Status timeout\n");
+ priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "check and clear previous PR error\n");
+ priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
+ if (priv->pr_error)
+ dev_dbg(dev, "previous PR error detected %llx\n",
+ (unsigned long long)priv->pr_error);
+
+ dev_dbg(dev, "set PR port ID\n");
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl &= ~FME_PR_CTRL_PR_RGN_ID;
+ pr_ctrl |= FIELD_PREP(FME_PR_CTRL_PR_RGN_ID, info->region_id);
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ return 0;
+}
+
+static int fme_mgr_write(struct fpga_manager *mgr,
+ const char *buf, size_t count)
+{
+ struct device *dev = &mgr->dev;
+ struct fme_mgr_priv *priv = mgr->priv;
+ void __iomem *fme_pr = priv->ioaddr;
+ u64 pr_ctrl, pr_status, pr_data;
+ int delay = 0, pr_credit, i = 0;
+
+ dev_dbg(dev, "start request\n");
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl |= FME_PR_CTRL_PR_START;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ dev_dbg(dev, "pushing data from bitstream to HW\n");
+
+ /*
+ * driver can push data to PR hardware using PR_DATA register once HW
+ * has enough pr_credit (> 1), pr_credit reduces one for every 32bit
+ * pr data write to PR_DATA register. If pr_credit <= 1, driver needs
+ * to wait for enough pr_credit from hardware by polling.
+ */
+ pr_status = readq(fme_pr + FME_PR_STS);
+ pr_credit = FIELD_GET(FME_PR_STS_PR_CREDIT, pr_status);
+
+ while (count > 0) {
+ while (pr_credit <= 1) {
+ if (delay++ > PR_WAIT_TIMEOUT) {
+ dev_err(dev, "PR_CREDIT timeout\n");
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+
+ pr_status = readq(fme_pr + FME_PR_STS);
+ pr_credit = FIELD_GET(FME_PR_STS_PR_CREDIT, pr_status);
+ }
+
+ if (count < 4) {
+ dev_err(dev, "Invaild PR bitstream size\n");
+ return -EINVAL;
+ }
+
+ pr_data = 0;
+ pr_data |= FIELD_PREP(FME_PR_DATA_PR_DATA_RAW,
+ *(((u32 *)buf) + i));
+ writeq(pr_data, fme_pr + FME_PR_DATA);
+ count -= 4;
+ pr_credit--;
+ i++;
+ }
+
+ return 0;
+}
+
+static int fme_mgr_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct device *dev = &mgr->dev;
+ struct fme_mgr_priv *priv = mgr->priv;
+ void __iomem *fme_pr = priv->ioaddr;
+ u64 pr_ctrl;
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl |= FME_PR_CTRL_PR_COMPLETE;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ dev_dbg(dev, "green bitstream push complete\n");
+ dev_dbg(dev, "waiting for HW to release PR resource\n");
+
+ if (readq_poll_timeout(fme_pr + FME_PR_CTRL, pr_ctrl,
+ !(pr_ctrl & FME_PR_CTRL_PR_START), 1,
+ PR_WAIT_TIMEOUT)) {
+ dev_err(dev, "PR Completion ACK timeout.\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "PR operation complete, checking status\n");
+ priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
+ if (priv->pr_error) {
+ dev_dbg(dev, "PR error detected %llx\n",
+ (unsigned long long)priv->pr_error);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "PR done successfully\n");
+
+ return 0;
+}
+
+static enum fpga_mgr_states fme_mgr_state(struct fpga_manager *mgr)
+{
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static u64 fme_mgr_status(struct fpga_manager *mgr)
+{
+ struct fme_mgr_priv *priv = mgr->priv;
+
+ return pr_error_to_mgr_status(priv->pr_error);
+}
+
+static const struct fpga_manager_ops fme_mgr_ops = {
+ .write_init = fme_mgr_write_init,
+ .write = fme_mgr_write,
+ .write_complete = fme_mgr_write_complete,
+ .state = fme_mgr_state,
+ .status = fme_mgr_status,
+};
+
+static void fme_mgr_get_compat_id(void __iomem *fme_pr,
+ struct fpga_compat_id *id)
+{
+ id->id_l = readq(fme_pr + FME_PR_INTFC_ID_L);
+ id->id_h = readq(fme_pr + FME_PR_INTFC_ID_H);
+}
+
+static int fme_mgr_probe(struct platform_device *pdev)
+{
+ struct dfl_fme_mgr_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct fpga_compat_id *compat_id;
+ struct device *dev = &pdev->dev;
+ struct fme_mgr_priv *priv;
+ struct fpga_manager *mgr;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (pdata->ioaddr)
+ priv->ioaddr = pdata->ioaddr;
+
+ if (!priv->ioaddr) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->ioaddr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ioaddr))
+ return PTR_ERR(priv->ioaddr);
+ }
+
+ compat_id = devm_kzalloc(dev, sizeof(*compat_id), GFP_KERNEL);
+ if (!compat_id)
+ return -ENOMEM;
+
+ fme_mgr_get_compat_id(priv->ioaddr, compat_id);
+
+ mgr = fpga_mgr_create(dev, "DFL FME FPGA Manager",
+ &fme_mgr_ops, priv);
+ if (!mgr)
+ return -ENOMEM;
+
+ mgr->compat_id = compat_id;
+ platform_set_drvdata(pdev, mgr);
+
+ ret = fpga_mgr_register(mgr);
+ if (ret)
+ fpga_mgr_free(mgr);
+
+ return ret;
+}
+
+static int fme_mgr_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+
+ fpga_mgr_unregister(mgr);
+
+ return 0;
+}
+
+static struct platform_driver fme_mgr_driver = {
+ .driver = {
+ .name = DFL_FPGA_FME_MGR,
+ },
+ .probe = fme_mgr_probe,
+ .remove = fme_mgr_remove,
+};
+
+module_platform_driver(fme_mgr_driver);
+
+MODULE_DESCRIPTION("FPGA Manager for DFL FPGA Management Engine");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme-mgr");
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
new file mode 100644
index 000000000000..fc9fd2d0482f
--- /dev/null
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME) Partial Reconfiguration
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Christopher Rauer <christopher.rauer@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-region.h>
+#include <linux/fpga-dfl.h>
+
+#include "dfl.h"
+#include "dfl-fme.h"
+#include "dfl-fme-pr.h"
+
+static struct dfl_fme_region *
+dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id)
+{
+ struct dfl_fme_region *fme_region;
+
+ list_for_each_entry(fme_region, &fme->region_list, node)
+ if (fme_region->port_id == port_id)
+ return fme_region;
+
+ return NULL;
+}
+
+static int dfl_fme_region_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
+{
+ struct dfl_fme_region *fme_region;
+ struct fpga_region *region;
+
+ fme_region = dfl_fme_region_find_by_port_id(fme, port_id);
+ if (!fme_region)
+ return NULL;
+
+ region = fpga_region_class_find(NULL, &fme_region->region->dev,
+ dfl_fme_region_match);
+ if (!region)
+ return NULL;
+
+ return region;
+}
+
+static int fme_pr(struct platform_device *pdev, unsigned long arg)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __user *argp = (void __user *)arg;
+ struct dfl_fpga_fme_port_pr port_pr;
+ struct fpga_image_info *info;
+ struct fpga_region *region;
+ void __iomem *fme_hdr;
+ struct dfl_fme *fme;
+ unsigned long minsz;
+ void *buf = NULL;
+ int ret = 0;
+ u64 v;
+
+ minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address);
+
+ if (copy_from_user(&port_pr, argp, minsz))
+ return -EFAULT;
+
+ if (port_pr.argsz < minsz || port_pr.flags)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(port_pr.buffer_size, 4))
+ return -EINVAL;
+
+ /* get fme header region */
+ fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
+ FME_FEATURE_ID_HEADER);
+
+ /* check port id */
+ v = readq(fme_hdr + FME_HDR_CAP);
+ if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) {
+ dev_dbg(&pdev->dev, "port number more than maximum\n");
+ return -EINVAL;
+ }
+
+ if (!access_ok(VERIFY_READ,
+ (void __user *)(unsigned long)port_pr.buffer_address,
+ port_pr.buffer_size))
+ return -EFAULT;
+
+ buf = vmalloc(port_pr.buffer_size);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf,
+ (void __user *)(unsigned long)port_pr.buffer_address,
+ port_pr.buffer_size)) {
+ ret = -EFAULT;
+ goto free_exit;
+ }
+
+ /* prepare fpga_image_info for PR */
+ info = fpga_image_info_alloc(&pdev->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto free_exit;
+ }
+
+ info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
+
+ mutex_lock(&pdata->lock);
+ fme = dfl_fpga_pdata_get_private(pdata);
+ /* fme device has been unregistered. */
+ if (!fme) {
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
+
+ region = dfl_fme_region_find(fme, port_pr.port_id);
+ if (!region) {
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
+
+ fpga_image_info_free(region->info);
+
+ info->buf = buf;
+ info->count = port_pr.buffer_size;
+ info->region_id = port_pr.port_id;
+ region->info = info;
+
+ ret = fpga_region_program_fpga(region);
+
+ /*
+ * it allows userspace to reset the PR region's logic by disabling and
+ * reenabling the bridge to clear things out between accleration runs.
+ * so no need to hold the bridges after partial reconfiguration.
+ */
+ if (region->get_bridges)
+ fpga_bridges_put(&region->bridge_list);
+
+ put_device(&region->dev);
+unlock_exit:
+ mutex_unlock(&pdata->lock);
+free_exit:
+ vfree(buf);
+ if (copy_to_user((void __user *)arg, &port_pr, minsz))
+ return -EFAULT;
+
+ return ret;
+}
+
+/**
+ * dfl_fme_create_mgr - create fpga mgr platform device as child device
+ *
+ * @pdata: fme platform_device's pdata
+ *
+ * Return: mgr platform device if successful, and error code otherwise.
+ */
+static struct platform_device *
+dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
+ struct dfl_feature *feature)
+{
+ struct platform_device *mgr, *fme = pdata->dev;
+ struct dfl_fme_mgr_pdata mgr_pdata;
+ int ret = -ENOMEM;
+
+ if (!feature->ioaddr)
+ return ERR_PTR(-ENODEV);
+
+ mgr_pdata.ioaddr = feature->ioaddr;
+
+ /*
+ * Each FME has only one fpga-mgr, so allocate platform device using
+ * the same FME platform device id.
+ */
+ mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id);
+ if (!mgr)
+ return ERR_PTR(ret);
+
+ mgr->dev.parent = &fme->dev;
+
+ ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata));
+ if (ret)
+ goto create_mgr_err;
+
+ ret = platform_device_add(mgr);
+ if (ret)
+ goto create_mgr_err;
+
+ return mgr;
+
+create_mgr_err:
+ platform_device_put(mgr);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dfl_fme_destroy_mgr - destroy fpga mgr platform device
+ * @pdata: fme platform device's pdata
+ */
+static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+
+ platform_device_unregister(priv->mgr);
+}
+
+/**
+ * dfl_fme_create_bridge - create fme fpga bridge platform device as child
+ *
+ * @pdata: fme platform device's pdata
+ * @port_id: port id for the bridge to be created.
+ *
+ * Return: bridge platform device if successful, and error code otherwise.
+ */
+static struct dfl_fme_bridge *
+dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
+{
+ struct device *dev = &pdata->dev->dev;
+ struct dfl_fme_br_pdata br_pdata;
+ struct dfl_fme_bridge *fme_br;
+ int ret = -ENOMEM;
+
+ fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL);
+ if (!fme_br)
+ return ERR_PTR(ret);
+
+ br_pdata.cdev = pdata->dfl_cdev;
+ br_pdata.port_id = port_id;
+
+ fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
+ PLATFORM_DEVID_AUTO);
+ if (!fme_br->br)
+ return ERR_PTR(ret);
+
+ fme_br->br->dev.parent = dev;
+
+ ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata));
+ if (ret)
+ goto create_br_err;
+
+ ret = platform_device_add(fme_br->br);
+ if (ret)
+ goto create_br_err;
+
+ return fme_br;
+
+create_br_err:
+ platform_device_put(fme_br->br);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dfl_fme_destroy_bridge - destroy fpga bridge platform device
+ * @fme_br: fme bridge to destroy
+ */
+static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
+{
+ platform_device_unregister(fme_br->br);
+}
+
+/**
+ * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
+ * @pdata: fme platform device's pdata
+ */
+static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme_bridge *fbridge, *tmp;
+
+ list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
+ list_del(&fbridge->node);
+ dfl_fme_destroy_bridge(fbridge);
+ }
+}
+
+/**
+ * dfl_fme_create_region - create fpga region platform device as child
+ *
+ * @pdata: fme platform device's pdata
+ * @mgr: mgr platform device needed for region
+ * @br: br platform device needed for region
+ * @port_id: port id
+ *
+ * Return: fme region if successful, and error code otherwise.
+ */
+static struct dfl_fme_region *
+dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
+ struct platform_device *mgr,
+ struct platform_device *br, int port_id)
+{
+ struct dfl_fme_region_pdata region_pdata;
+ struct device *dev = &pdata->dev->dev;
+ struct dfl_fme_region *fme_region;
+ int ret = -ENOMEM;
+
+ fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL);
+ if (!fme_region)
+ return ERR_PTR(ret);
+
+ region_pdata.mgr = mgr;
+ region_pdata.br = br;
+
+ /*
+ * Each FPGA device may have more than one port, so allocate platform
+ * device using the same port platform device id.
+ */
+ fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id);
+ if (!fme_region->region)
+ return ERR_PTR(ret);
+
+ fme_region->region->dev.parent = dev;
+
+ ret = platform_device_add_data(fme_region->region, &region_pdata,
+ sizeof(region_pdata));
+ if (ret)
+ goto create_region_err;
+
+ ret = platform_device_add(fme_region->region);
+ if (ret)
+ goto create_region_err;
+
+ fme_region->port_id = port_id;
+
+ return fme_region;
+
+create_region_err:
+ platform_device_put(fme_region->region);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dfl_fme_destroy_region - destroy fme region
+ * @fme_region: fme region to destroy
+ */
+static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
+{
+ platform_device_unregister(fme_region->region);
+}
+
+/**
+ * dfl_fme_destroy_regions - destroy all fme regions
+ * @pdata: fme platform device's pdata
+ */
+static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme_region *fme_region, *tmp;
+
+ list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
+ list_del(&fme_region->node);
+ dfl_fme_destroy_region(fme_region);
+ }
+}
+
+static int pr_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme_region *fme_region;
+ struct dfl_fme_bridge *fme_br;
+ struct platform_device *mgr;
+ struct dfl_fme *priv;
+ void __iomem *fme_hdr;
+ int ret = -ENODEV, i = 0;
+ u64 fme_cap, port_offset;
+
+ fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
+ FME_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ priv = dfl_fpga_pdata_get_private(pdata);
+
+ /* Initialize the region and bridge sub device list */
+ INIT_LIST_HEAD(&priv->region_list);
+ INIT_LIST_HEAD(&priv->bridge_list);
+
+ /* Create fpga mgr platform device */
+ mgr = dfl_fme_create_mgr(pdata, feature);
+ if (IS_ERR(mgr)) {
+ dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
+ goto unlock;
+ }
+
+ priv->mgr = mgr;
+
+ /* Read capability register to check number of regions and bridges */
+ fme_cap = readq(fme_hdr + FME_HDR_CAP);
+ for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) {
+ port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i));
+ if (!(port_offset & FME_PORT_OFST_IMP))
+ continue;
+
+ /* Create bridge for each port */
+ fme_br = dfl_fme_create_bridge(pdata, i);
+ if (IS_ERR(fme_br)) {
+ ret = PTR_ERR(fme_br);
+ goto destroy_region;
+ }
+
+ list_add(&fme_br->node, &priv->bridge_list);
+
+ /* Create region for each port */
+ fme_region = dfl_fme_create_region(pdata, mgr,
+ fme_br->br, i);
+ if (!fme_region) {
+ ret = PTR_ERR(fme_region);
+ goto destroy_region;
+ }
+
+ list_add(&fme_region->node, &priv->region_list);
+ }
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+
+destroy_region:
+ dfl_fme_destroy_regions(pdata);
+ dfl_fme_destroy_bridges(pdata);
+ dfl_fme_destroy_mgr(pdata);
+unlock:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
+
+static void pr_mgmt_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme *priv;
+
+ mutex_lock(&pdata->lock);
+ priv = dfl_fpga_pdata_get_private(pdata);
+
+ dfl_fme_destroy_regions(pdata);
+ dfl_fme_destroy_bridges(pdata);
+ dfl_fme_destroy_mgr(pdata);
+ mutex_unlock(&pdata->lock);
+}
+
+static long fme_pr_ioctl(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case DFL_FPGA_FME_PORT_PR:
+ ret = fme_pr(pdev, arg);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+const struct dfl_feature_ops pr_mgmt_ops = {
+ .init = pr_mgmt_init,
+ .uinit = pr_mgmt_uinit,
+ .ioctl = fme_pr_ioctl,
+};
diff --git a/drivers/fpga/dfl-fme-pr.h b/drivers/fpga/dfl-fme-pr.h
new file mode 100644
index 000000000000..096a699089d3
--- /dev/null
+++ b/drivers/fpga/dfl-fme-pr.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for FPGA Management Engine (FME) Partial Reconfiguration Driver
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#ifndef __DFL_FME_PR_H
+#define __DFL_FME_PR_H
+
+#include <linux/platform_device.h>
+
+/**
+ * struct dfl_fme_region - FME fpga region data structure
+ *
+ * @region: platform device of the FPGA region.
+ * @node: used to link fme_region to a list.
+ * @port_id: indicate which port this region connected to.
+ */
+struct dfl_fme_region {
+ struct platform_device *region;
+ struct list_head node;
+ int port_id;
+};
+
+/**
+ * struct dfl_fme_region_pdata - platform data for FME region platform device.
+ *
+ * @mgr: platform device of the FPGA manager.
+ * @br: platform device of the FPGA bridge.
+ * @region_id: region id (same as port_id).
+ */
+struct dfl_fme_region_pdata {
+ struct platform_device *mgr;
+ struct platform_device *br;
+ int region_id;
+};
+
+/**
+ * struct dfl_fme_bridge - FME fpga bridge data structure
+ *
+ * @br: platform device of the FPGA bridge.
+ * @node: used to link fme_bridge to a list.
+ */
+struct dfl_fme_bridge {
+ struct platform_device *br;
+ struct list_head node;
+};
+
+/**
+ * struct dfl_fme_bridge_pdata - platform data for FME bridge platform device.
+ *
+ * @cdev: container device.
+ * @port_id: port id.
+ */
+struct dfl_fme_br_pdata {
+ struct dfl_fpga_cdev *cdev;
+ int port_id;
+};
+
+/**
+ * struct dfl_fme_mgr_pdata - platform data for FME manager platform device.
+ *
+ * @ioaddr: mapped io address for FME manager platform device.
+ */
+struct dfl_fme_mgr_pdata {
+ void __iomem *ioaddr;
+};
+
+#define DFL_FPGA_FME_MGR "dfl-fme-mgr"
+#define DFL_FPGA_FME_BRIDGE "dfl-fme-bridge"
+#define DFL_FPGA_FME_REGION "dfl-fme-region"
+
+#endif /* __DFL_FME_PR_H */
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
new file mode 100644
index 000000000000..0b7e19c27c6d
--- /dev/null
+++ b/drivers/fpga/dfl-fme-region.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Region Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fpga/fpga-region.h>
+
+#include "dfl-fme-pr.h"
+
+static int fme_region_get_bridges(struct fpga_region *region)
+{
+ struct dfl_fme_region_pdata *pdata = region->priv;
+ struct device *dev = &pdata->br->dev;
+
+ return fpga_bridge_get_to_list(dev, region->info, &region->bridge_list);
+}
+
+static int fme_region_probe(struct platform_device *pdev)
+{
+ struct dfl_fme_region_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct fpga_region *region;
+ struct fpga_manager *mgr;
+ int ret;
+
+ mgr = fpga_mgr_get(&pdata->mgr->dev);
+ if (IS_ERR(mgr))
+ return -EPROBE_DEFER;
+
+ region = fpga_region_create(dev, mgr, fme_region_get_bridges);
+ if (!region) {
+ ret = -ENOMEM;
+ goto eprobe_mgr_put;
+ }
+
+ region->priv = pdata;
+ region->compat_id = mgr->compat_id;
+ platform_set_drvdata(pdev, region);
+
+ ret = fpga_region_register(region);
+ if (ret)
+ goto region_free;
+
+ dev_dbg(dev, "DFL FME FPGA Region probed\n");
+
+ return 0;
+
+region_free:
+ fpga_region_free(region);
+eprobe_mgr_put:
+ fpga_mgr_put(mgr);
+ return ret;
+}
+
+static int fme_region_remove(struct platform_device *pdev)
+{
+ struct fpga_region *region = dev_get_drvdata(&pdev->dev);
+
+ fpga_region_unregister(region);
+ fpga_mgr_put(region->mgr);
+
+ return 0;
+}
+
+static struct platform_driver fme_region_driver = {
+ .driver = {
+ .name = DFL_FPGA_FME_REGION,
+ },
+ .probe = fme_region_probe,
+ .remove = fme_region_remove,
+};
+
+module_platform_driver(fme_region_driver);
+
+MODULE_DESCRIPTION("FPGA Region for DFL FPGA Management Engine");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme-region");
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
new file mode 100644
index 000000000000..5394a216c5c0
--- /dev/null
+++ b/drivers/fpga/dfl-fme.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for FPGA Management Engine (FME) Driver
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#ifndef __DFL_FME_H
+#define __DFL_FME_H
+
+/**
+ * struct dfl_fme - dfl fme private data
+ *
+ * @mgr: FME's FPGA manager platform device.
+ * @region_list: linked list of FME's FPGA regions.
+ * @bridge_list: linked list of FME's FPGA bridges.
+ * @pdata: fme platform device's pdata.
+ */
+struct dfl_fme {
+ struct platform_device *mgr;
+ struct list_head region_list;
+ struct list_head bridge_list;
+ struct dfl_feature_platform_data *pdata;
+};
+
+extern const struct dfl_feature_ops pr_mgmt_ops;
+
+#endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
new file mode 100644
index 000000000000..66b5720582bb
--- /dev/null
+++ b/drivers/fpga/dfl-pci.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Device Feature List (DFL) PCIe device
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Zhang Yi <Yi.Z.Zhang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/aer.h>
+
+#include "dfl.h"
+
+#define DRV_VERSION "0.8"
+#define DRV_NAME "dfl-pci"
+
+struct cci_drvdata {
+ struct dfl_fpga_cdev *cdev; /* container device */
+};
+
+static void __iomem *cci_pci_ioremap_bar(struct pci_dev *pcidev, int bar)
+{
+ if (pcim_iomap_regions(pcidev, BIT(bar), DRV_NAME))
+ return NULL;
+
+ return pcim_iomap_table(pcidev)[bar];
+}
+
+/* PCI Device ID */
+#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
+#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
+#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
+/* VF Device */
+#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
+#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
+#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
+
+static struct pci_device_id cci_pcie_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
+
+static int cci_init_drvdata(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata;
+
+ drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ pci_set_drvdata(pcidev, drvdata);
+
+ return 0;
+}
+
+static void cci_remove_feature_devs(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+
+ /* remove all children feature devices */
+ dfl_fpga_feature_devs_remove(drvdata->cdev);
+}
+
+/* enumerate feature devices under pci device */
+static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+ struct dfl_fpga_enum_info *info;
+ struct dfl_fpga_cdev *cdev;
+ resource_size_t start, len;
+ int port_num, bar, i, ret = 0;
+ void __iomem *base;
+ u32 offset;
+ u64 v;
+
+ /* allocate enumeration info via pci_dev */
+ info = dfl_fpga_enum_info_alloc(&pcidev->dev);
+ if (!info)
+ return -ENOMEM;
+
+ /* start to find Device Feature List from Bar 0 */
+ base = cci_pci_ioremap_bar(pcidev, 0);
+ if (!base) {
+ ret = -ENOMEM;
+ goto enum_info_free_exit;
+ }
+
+ /*
+ * PF device has FME and Ports/AFUs, and VF device only has one
+ * Port/AFU. Check them and add related "Device Feature List" info
+ * for the next step enumeration.
+ */
+ if (dfl_feature_is_fme(base)) {
+ start = pci_resource_start(pcidev, 0);
+ len = pci_resource_len(pcidev, 0);
+
+ dfl_fpga_enum_info_add_dfl(info, start, len, base);
+
+ /*
+ * find more Device Feature Lists (e.g. Ports) per information
+ * indicated by FME module.
+ */
+ v = readq(base + FME_HDR_CAP);
+ port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
+
+ WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
+
+ for (i = 0; i < port_num; i++) {
+ v = readq(base + FME_HDR_PORT_OFST(i));
+
+ /* skip ports which are not implemented. */
+ if (!(v & FME_PORT_OFST_IMP))
+ continue;
+
+ /*
+ * add Port's Device Feature List information for next
+ * step enumeration.
+ */
+ bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
+ offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
+ base = cci_pci_ioremap_bar(pcidev, bar);
+ if (!base)
+ continue;
+
+ start = pci_resource_start(pcidev, bar) + offset;
+ len = pci_resource_len(pcidev, bar) - offset;
+
+ dfl_fpga_enum_info_add_dfl(info, start, len,
+ base + offset);
+ }
+ } else if (dfl_feature_is_port(base)) {
+ start = pci_resource_start(pcidev, 0);
+ len = pci_resource_len(pcidev, 0);
+
+ dfl_fpga_enum_info_add_dfl(info, start, len, base);
+ } else {
+ ret = -ENODEV;
+ goto enum_info_free_exit;
+ }
+
+ /* start enumeration with prepared enumeration information */
+ cdev = dfl_fpga_feature_devs_enumerate(info);
+ if (IS_ERR(cdev)) {
+ dev_err(&pcidev->dev, "Enumeration failure\n");
+ ret = PTR_ERR(cdev);
+ goto enum_info_free_exit;
+ }
+
+ drvdata->cdev = cdev;
+
+enum_info_free_exit:
+ dfl_fpga_enum_info_free(info);
+
+ return ret;
+}
+
+static
+int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
+{
+ int ret;
+
+ ret = pcim_enable_device(pcidev);
+ if (ret < 0) {
+ dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
+ return ret;
+ }
+
+ ret = pci_enable_pcie_error_reporting(pcidev);
+ if (ret && ret != -EINVAL)
+ dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret);
+
+ pci_set_master(pcidev);
+
+ if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
+ if (ret)
+ goto disable_error_report_exit;
+ } else if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
+ if (ret)
+ goto disable_error_report_exit;
+ } else {
+ ret = -EIO;
+ dev_err(&pcidev->dev, "No suitable DMA support available.\n");
+ goto disable_error_report_exit;
+ }
+
+ ret = cci_init_drvdata(pcidev);
+ if (ret) {
+ dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
+ goto disable_error_report_exit;
+ }
+
+ ret = cci_enumerate_feature_devs(pcidev);
+ if (ret) {
+ dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
+ goto disable_error_report_exit;
+ }
+
+ return ret;
+
+disable_error_report_exit:
+ pci_disable_pcie_error_reporting(pcidev);
+ return ret;
+}
+
+static void cci_pci_remove(struct pci_dev *pcidev)
+{
+ cci_remove_feature_devs(pcidev);
+ pci_disable_pcie_error_reporting(pcidev);
+}
+
+static struct pci_driver cci_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cci_pcie_id_tbl,
+ .probe = cci_pci_probe,
+ .remove = cci_pci_remove,
+};
+
+module_pci_driver(cci_pci_driver);
+
+MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
new file mode 100644
index 000000000000..a9b521bccb06
--- /dev/null
+++ b/drivers/fpga/dfl.c
@@ -0,0 +1,1044 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Device Feature List (DFL) Support
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Zhang Yi <yi.z.zhang@intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+#include <linux/module.h>
+
+#include "dfl.h"
+
+static DEFINE_MUTEX(dfl_id_mutex);
+
+/*
+ * when adding a new feature dev support in DFL framework, it's required to
+ * add a new item in enum dfl_id_type and provide related information in below
+ * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
+ * platform device creation (define name strings in dfl.h, as they could be
+ * reused by platform device drivers).
+ *
+ * if the new feature dev needs chardev support, then it's required to add
+ * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
+ * index to dfl_chardevs table. If no chardev support just set devt_type
+ * as one invalid index (DFL_FPGA_DEVT_MAX).
+ */
+enum dfl_id_type {
+ FME_ID, /* fme id allocation and mapping */
+ PORT_ID, /* port id allocation and mapping */
+ DFL_ID_MAX,
+};
+
+enum dfl_fpga_devt_type {
+ DFL_FPGA_DEVT_FME,
+ DFL_FPGA_DEVT_PORT,
+ DFL_FPGA_DEVT_MAX,
+};
+
+/**
+ * dfl_dev_info - dfl feature device information.
+ * @name: name string of the feature platform device.
+ * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
+ * @id: idr id of the feature dev.
+ * @devt_type: index to dfl_chrdevs[].
+ */
+struct dfl_dev_info {
+ const char *name;
+ u32 dfh_id;
+ struct idr id;
+ enum dfl_fpga_devt_type devt_type;
+};
+
+/* it is indexed by dfl_id_type */
+static struct dfl_dev_info dfl_devs[] = {
+ {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
+ .devt_type = DFL_FPGA_DEVT_FME},
+ {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
+ .devt_type = DFL_FPGA_DEVT_PORT},
+};
+
+/**
+ * dfl_chardev_info - chardev information of dfl feature device
+ * @name: nmae string of the char device.
+ * @devt: devt of the char device.
+ */
+struct dfl_chardev_info {
+ const char *name;
+ dev_t devt;
+};
+
+/* indexed by enum dfl_fpga_devt_type */
+static struct dfl_chardev_info dfl_chrdevs[] = {
+ {.name = DFL_FPGA_FEATURE_DEV_FME},
+ {.name = DFL_FPGA_FEATURE_DEV_PORT},
+};
+
+static void dfl_ids_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ idr_init(&dfl_devs[i].id);
+}
+
+static void dfl_ids_destroy(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ idr_destroy(&dfl_devs[i].id);
+}
+
+static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
+{
+ int id;
+
+ WARN_ON(type >= DFL_ID_MAX);
+ mutex_lock(&dfl_id_mutex);
+ id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
+ mutex_unlock(&dfl_id_mutex);
+
+ return id;
+}
+
+static void dfl_id_free(enum dfl_id_type type, int id)
+{
+ WARN_ON(type >= DFL_ID_MAX);
+ mutex_lock(&dfl_id_mutex);
+ idr_remove(&dfl_devs[type].id, id);
+ mutex_unlock(&dfl_id_mutex);
+}
+
+static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ if (!strcmp(dfl_devs[i].name, pdev->name))
+ return i;
+
+ return DFL_ID_MAX;
+}
+
+static enum dfl_id_type dfh_id_to_type(u32 id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ if (dfl_devs[i].dfh_id == id)
+ return i;
+
+ return DFL_ID_MAX;
+}
+
+/*
+ * introduce a global port_ops list, it allows port drivers to register ops
+ * in such list, then other feature devices (e.g. FME), could use the port
+ * functions even related port platform device is hidden. Below is one example,
+ * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
+ * enabled, port (and it's AFU) is turned into VF and port platform device
+ * is hidden from system but it's still required to access port to finish FPGA
+ * reconfiguration function in FME.
+ */
+
+static DEFINE_MUTEX(dfl_port_ops_mutex);
+static LIST_HEAD(dfl_port_ops_list);
+
+/**
+ * dfl_fpga_port_ops_get - get matched port ops from the global list
+ * @pdev: platform device to match with associated port ops.
+ * Return: matched port ops on success, NULL otherwise.
+ *
+ * Please note that must dfl_fpga_port_ops_put after use the port_ops.
+ */
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
+{
+ struct dfl_fpga_port_ops *ops = NULL;
+
+ mutex_lock(&dfl_port_ops_mutex);
+ if (list_empty(&dfl_port_ops_list))
+ goto done;
+
+ list_for_each_entry(ops, &dfl_port_ops_list, node) {
+ /* match port_ops using the name of platform device */
+ if (!strcmp(pdev->name, ops->name)) {
+ if (!try_module_get(ops->owner))
+ ops = NULL;
+ goto done;
+ }
+ }
+
+ ops = NULL;
+done:
+ mutex_unlock(&dfl_port_ops_mutex);
+ return ops;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
+
+/**
+ * dfl_fpga_port_ops_put - put port ops
+ * @ops: port ops.
+ */
+void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
+{
+ if (ops && ops->owner)
+ module_put(ops->owner);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
+
+/**
+ * dfl_fpga_port_ops_add - add port_ops to global list
+ * @ops: port ops to add.
+ */
+void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
+{
+ mutex_lock(&dfl_port_ops_mutex);
+ list_add_tail(&ops->node, &dfl_port_ops_list);
+ mutex_unlock(&dfl_port_ops_mutex);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
+
+/**
+ * dfl_fpga_port_ops_del - remove port_ops from global list
+ * @ops: port ops to del.
+ */
+void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
+{
+ mutex_lock(&dfl_port_ops_mutex);
+ list_del(&ops->node);
+ mutex_unlock(&dfl_port_ops_mutex);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
+
+/**
+ * dfl_fpga_check_port_id - check the port id
+ * @pdev: port platform device.
+ * @pport_id: port id to compare.
+ *
+ * Return: 1 if port device matches with given port id, otherwise 0.
+ */
+int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
+{
+ struct dfl_fpga_port_ops *port_ops = dfl_fpga_port_ops_get(pdev);
+ int port_id;
+
+ if (!port_ops || !port_ops->get_id)
+ return 0;
+
+ port_id = port_ops->get_id(pdev);
+ dfl_fpga_port_ops_put(port_ops);
+
+ return port_id == *(int *)pport_id;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
+
+/**
+ * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
+ * @pdev: feature device.
+ */
+void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature *feature;
+
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (feature->ops) {
+ feature->ops->uinit(pdev, feature);
+ feature->ops = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
+
+static int dfl_feature_instance_init(struct platform_device *pdev,
+ struct dfl_feature_platform_data *pdata,
+ struct dfl_feature *feature,
+ struct dfl_feature_driver *drv)
+{
+ int ret;
+
+ ret = drv->ops->init(pdev, feature);
+ if (ret)
+ return ret;
+
+ feature->ops = drv->ops;
+
+ return ret;
+}
+
+/**
+ * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
+ * @pdev: feature device.
+ * @feature_drvs: drvs for sub features.
+ *
+ * This function will match sub features with given feature drvs list and
+ * use matched drv to init related sub feature.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_dev_feature_init(struct platform_device *pdev,
+ struct dfl_feature_driver *feature_drvs)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_driver *drv = feature_drvs;
+ struct dfl_feature *feature;
+ int ret;
+
+ while (drv->ops) {
+ dfl_fpga_dev_for_each_feature(pdata, feature) {
+ /* match feature and drv using id */
+ if (feature->id == drv->id) {
+ ret = dfl_feature_instance_init(pdev, pdata,
+ feature, drv);
+ if (ret)
+ goto exit;
+ }
+ }
+ drv++;
+ }
+
+ return 0;
+exit:
+ dfl_fpga_dev_feature_uinit(pdev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
+
+static void dfl_chardev_uinit(void)
+{
+ int i;
+
+ for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
+ if (MAJOR(dfl_chrdevs[i].devt)) {
+ unregister_chrdev_region(dfl_chrdevs[i].devt,
+ MINORMASK);
+ dfl_chrdevs[i].devt = MKDEV(0, 0);
+ }
+}
+
+static int dfl_chardev_init(void)
+{
+ int i, ret;
+
+ for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
+ ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, MINORMASK,
+ dfl_chrdevs[i].name);
+ if (ret)
+ goto exit;
+ }
+
+ return 0;
+
+exit:
+ dfl_chardev_uinit();
+ return ret;
+}
+
+static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
+{
+ if (type >= DFL_FPGA_DEVT_MAX)
+ return 0;
+
+ return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
+}
+
+/**
+ * dfl_fpga_dev_ops_register - register cdev ops for feature dev
+ *
+ * @pdev: feature dev.
+ * @fops: file operations for feature dev's cdev.
+ * @owner: owning module/driver.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_dev_ops_register(struct platform_device *pdev,
+ const struct file_operations *fops,
+ struct module *owner)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ cdev_init(&pdata->cdev, fops);
+ pdata->cdev.owner = owner;
+
+ /*
+ * set parent to the feature device so that its refcount is
+ * decreased after the last refcount of cdev is gone, that
+ * makes sure the feature device is valid during device
+ * file's life-cycle.
+ */
+ pdata->cdev.kobj.parent = &pdev->dev.kobj;
+
+ return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
+
+/**
+ * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
+ * @pdev: feature dev.
+ */
+void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ cdev_del(&pdata->cdev);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
+
+/**
+ * struct build_feature_devs_info - info collected during feature dev build.
+ *
+ * @dev: device to enumerate.
+ * @cdev: the container device for all feature devices.
+ * @feature_dev: current feature device.
+ * @ioaddr: header register region address of feature device in enumeration.
+ * @sub_features: a sub features linked list for feature device in enumeration.
+ * @feature_num: number of sub features for feature device in enumeration.
+ */
+struct build_feature_devs_info {
+ struct device *dev;
+ struct dfl_fpga_cdev *cdev;
+ struct platform_device *feature_dev;
+ void __iomem *ioaddr;
+ struct list_head sub_features;
+ int feature_num;
+};
+
+/**
+ * struct dfl_feature_info - sub feature info collected during feature dev build
+ *
+ * @fid: id of this sub feature.
+ * @mmio_res: mmio resource of this sub feature.
+ * @ioaddr: mapped base address of mmio resource.
+ * @node: node in sub_features linked list.
+ */
+struct dfl_feature_info {
+ u64 fid;
+ struct resource mmio_res;
+ void __iomem *ioaddr;
+ struct list_head node;
+};
+
+static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
+ struct platform_device *port)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
+
+ mutex_lock(&cdev->lock);
+ list_add(&pdata->node, &cdev->port_dev_list);
+ get_device(&pdata->dev->dev);
+ mutex_unlock(&cdev->lock);
+}
+
+/*
+ * register current feature device, it is called when we need to switch to
+ * another feature parsing or we have parsed all features on given device
+ * feature list.
+ */
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct platform_device *fdev = binfo->feature_dev;
+ struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_info *finfo, *p;
+ int ret, index = 0;
+
+ if (!fdev)
+ return 0;
+
+ /*
+ * we do not need to care for the memory which is associated with
+ * the platform device. After calling platform_device_unregister(),
+ * it will be automatically freed by device's release() callback,
+ * platform_device_release().
+ */
+ pdata = kzalloc(dfl_feature_platform_data_size(binfo->feature_num),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->dev = fdev;
+ pdata->num = binfo->feature_num;
+ pdata->dfl_cdev = binfo->cdev;
+ mutex_init(&pdata->lock);
+
+ /*
+ * the count should be initialized to 0 to make sure
+ *__fpga_port_enable() following __fpga_port_disable()
+ * works properly for port device.
+ * and it should always be 0 for fme device.
+ */
+ WARN_ON(pdata->disable_count);
+
+ fdev->dev.platform_data = pdata;
+
+ /* each sub feature has one MMIO resource */
+ fdev->num_resources = binfo->feature_num;
+ fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
+ GFP_KERNEL);
+ if (!fdev->resource)
+ return -ENOMEM;
+
+ /* fill features and resource information for feature dev */
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ struct dfl_feature *feature = &pdata->features[index];
+
+ /* save resource information for each feature */
+ feature->id = finfo->fid;
+ feature->resource_index = index;
+ feature->ioaddr = finfo->ioaddr;
+ fdev->resource[index++] = finfo->mmio_res;
+
+ list_del(&finfo->node);
+ kfree(finfo);
+ }
+
+ ret = platform_device_add(binfo->feature_dev);
+ if (!ret) {
+ if (feature_dev_id_type(binfo->feature_dev) == PORT_ID)
+ dfl_fpga_cdev_add_port_dev(binfo->cdev,
+ binfo->feature_dev);
+ else
+ binfo->cdev->fme_dev =
+ get_device(&binfo->feature_dev->dev);
+ /*
+ * reset it to avoid build_info_free() freeing their resource.
+ *
+ * The resource of successfully registered feature devices
+ * will be freed by platform_device_unregister(). See the
+ * comments in build_info_create_dev().
+ */
+ binfo->feature_dev = NULL;
+ }
+
+ return ret;
+}
+
+static int
+build_info_create_dev(struct build_feature_devs_info *binfo,
+ enum dfl_id_type type, void __iomem *ioaddr)
+{
+ struct platform_device *fdev;
+ int ret;
+
+ if (type >= DFL_ID_MAX)
+ return -EINVAL;
+
+ /* we will create a new device, commit current device first */
+ ret = build_info_commit_dev(binfo);
+ if (ret)
+ return ret;
+
+ /*
+ * we use -ENODEV as the initialization indicator which indicates
+ * whether the id need to be reclaimed
+ */
+ fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
+ if (!fdev)
+ return -ENOMEM;
+
+ binfo->feature_dev = fdev;
+ binfo->feature_num = 0;
+ binfo->ioaddr = ioaddr;
+ INIT_LIST_HEAD(&binfo->sub_features);
+
+ fdev->id = dfl_id_alloc(type, &fdev->dev);
+ if (fdev->id < 0)
+ return fdev->id;
+
+ fdev->dev.parent = &binfo->cdev->region->dev;
+ fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
+
+ return 0;
+}
+
+static void build_info_free(struct build_feature_devs_info *binfo)
+{
+ struct dfl_feature_info *finfo, *p;
+
+ /*
+ * it is a valid id, free it. See comments in
+ * build_info_create_dev()
+ */
+ if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
+ dfl_id_free(feature_dev_id_type(binfo->feature_dev),
+ binfo->feature_dev->id);
+
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ list_del(&finfo->node);
+ kfree(finfo);
+ }
+ }
+
+ platform_device_put(binfo->feature_dev);
+
+ devm_kfree(binfo->dev, binfo);
+}
+
+static inline u32 feature_size(void __iomem *start)
+{
+ u64 v = readq(start + DFH);
+ u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
+ /* workaround for private features with invalid size, use 4K instead */
+ return ofst ? ofst : 4096;
+}
+
+static u64 feature_id(void __iomem *start)
+{
+ u64 v = readq(start + DFH);
+ u16 id = FIELD_GET(DFH_ID, v);
+ u8 type = FIELD_GET(DFH_TYPE, v);
+
+ if (type == DFH_TYPE_FIU)
+ return FEATURE_ID_FIU_HEADER;
+ else if (type == DFH_TYPE_PRIVATE)
+ return id;
+ else if (type == DFH_TYPE_AFU)
+ return FEATURE_ID_AFU;
+
+ WARN_ON(1);
+ return 0;
+}
+
+/*
+ * when create sub feature instances, for private features, it doesn't need
+ * to provide resource size and feature id as they could be read from DFH
+ * register. For afu sub feature, its register region only contains user
+ * defined registers, so never trust any information from it, just use the
+ * resource size information provided by its parent FIU.
+ */
+static int
+create_feature_instance(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
+ resource_size_t size, u64 fid)
+{
+ struct dfl_feature_info *finfo;
+
+ /* read feature size and id if inputs are invalid */
+ size = size ? size : feature_size(dfl->ioaddr + ofst);
+ fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
+
+ if (dfl->len - ofst < size)
+ return -EINVAL;
+
+ finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
+ if (!finfo)
+ return -ENOMEM;
+
+ finfo->fid = fid;
+ finfo->mmio_res.start = dfl->start + ofst;
+ finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
+ finfo->mmio_res.flags = IORESOURCE_MEM;
+ finfo->ioaddr = dfl->ioaddr + ofst;
+
+ list_add_tail(&finfo->node, &binfo->sub_features);
+ binfo->feature_num++;
+
+ return 0;
+}
+
+static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl,
+ resource_size_t ofst)
+{
+ u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
+ u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
+
+ WARN_ON(!size);
+
+ return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
+}
+
+static int parse_feature_afu(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl,
+ resource_size_t ofst)
+{
+ if (!binfo->feature_dev) {
+ dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
+ return -EINVAL;
+ }
+
+ switch (feature_dev_id_type(binfo->feature_dev)) {
+ case PORT_ID:
+ return parse_feature_port_afu(binfo, dfl, ofst);
+ default:
+ dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
+ binfo->feature_dev->name);
+ }
+
+ return 0;
+}
+
+static int parse_feature_fiu(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl,
+ resource_size_t ofst)
+{
+ u32 id, offset;
+ u64 v;
+ int ret = 0;
+
+ v = readq(dfl->ioaddr + ofst + DFH);
+ id = FIELD_GET(DFH_ID, v);
+
+ /* create platform device for dfl feature dev */
+ ret = build_info_create_dev(binfo, dfh_id_to_type(id),
+ dfl->ioaddr + ofst);
+ if (ret)
+ return ret;
+
+ ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
+ if (ret)
+ return ret;
+ /*
+ * find and parse FIU's child AFU via its NEXT_AFU register.
+ * please note that only Port has valid NEXT_AFU pointer per spec.
+ */
+ v = readq(dfl->ioaddr + ofst + NEXT_AFU);
+
+ offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
+ if (offset)
+ return parse_feature_afu(binfo, dfl, ofst + offset);
+
+ dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
+
+ return ret;
+}
+
+static int parse_feature_private(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl,
+ resource_size_t ofst)
+{
+ if (!binfo->feature_dev) {
+ dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
+ (unsigned long long)feature_id(dfl->ioaddr + ofst));
+ return -EINVAL;
+ }
+
+ return create_feature_instance(binfo, dfl, ofst, 0, 0);
+}
+
+/**
+ * parse_feature - parse a feature on given device feature list
+ *
+ * @binfo: build feature devices information.
+ * @dfl: device feature list to parse
+ * @ofst: offset to feature header on this device feature list
+ */
+static int parse_feature(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
+{
+ u64 v;
+ u32 type;
+
+ v = readq(dfl->ioaddr + ofst + DFH);
+ type = FIELD_GET(DFH_TYPE, v);
+
+ switch (type) {
+ case DFH_TYPE_AFU:
+ return parse_feature_afu(binfo, dfl, ofst);
+ case DFH_TYPE_PRIVATE:
+ return parse_feature_private(binfo, dfl, ofst);
+ case DFH_TYPE_FIU:
+ return parse_feature_fiu(binfo, dfl, ofst);
+ default:
+ dev_info(binfo->dev,
+ "Feature Type %x is not supported.\n", type);
+ }
+
+ return 0;
+}
+
+static int parse_feature_list(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl)
+{
+ void __iomem *start = dfl->ioaddr;
+ void __iomem *end = dfl->ioaddr + dfl->len;
+ int ret = 0;
+ u32 ofst = 0;
+ u64 v;
+
+ /* walk through the device feature list via DFH's next DFH pointer. */
+ for (; start < end; start += ofst) {
+ if (end - start < DFH_SIZE) {
+ dev_err(binfo->dev, "The region is too small to contain a feature.\n");
+ return -EINVAL;
+ }
+
+ ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
+ if (ret)
+ return ret;
+
+ v = readq(start + DFH);
+ ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
+
+ /* stop parsing if EOL(End of List) is set or offset is 0 */
+ if ((v & DFH_EOL) || !ofst)
+ break;
+ }
+
+ /* commit current feature device when reach the end of list */
+ return build_info_commit_dev(binfo);
+}
+
+struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
+{
+ struct dfl_fpga_enum_info *info;
+
+ get_device(dev);
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ put_device(dev);
+ return NULL;
+ }
+
+ info->dev = dev;
+ INIT_LIST_HEAD(&info->dfls);
+
+ return info;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
+
+void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
+{
+ struct dfl_fpga_enum_dfl *tmp, *dfl;
+ struct device *dev;
+
+ if (!info)
+ return;
+
+ dev = info->dev;
+
+ /* remove all device feature lists in the list. */
+ list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
+ list_del(&dfl->node);
+ devm_kfree(dev, dfl);
+ }
+
+ devm_kfree(dev, info);
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
+
+/**
+ * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
+ *
+ * @info: ptr to dfl_fpga_enum_info
+ * @start: mmio resource address of the device feature list.
+ * @len: mmio resource length of the device feature list.
+ * @ioaddr: mapped mmio resource address of the device feature list.
+ *
+ * One FPGA device may have one or more Device Feature Lists (DFLs), use this
+ * function to add information of each DFL to common data structure for next
+ * step enumeration.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
+ resource_size_t start, resource_size_t len,
+ void __iomem *ioaddr)
+{
+ struct dfl_fpga_enum_dfl *dfl;
+
+ dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
+ if (!dfl)
+ return -ENOMEM;
+
+ dfl->start = start;
+ dfl->len = len;
+ dfl->ioaddr = ioaddr;
+
+ list_add_tail(&dfl->node, &info->dfls);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
+
+static int remove_feature_dev(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ enum dfl_id_type type = feature_dev_id_type(pdev);
+ int id = pdev->id;
+
+ platform_device_unregister(pdev);
+
+ dfl_id_free(type, id);
+
+ return 0;
+}
+
+static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
+{
+ device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
+}
+
+/**
+ * dfl_fpga_feature_devs_enumerate - enumerate feature devices
+ * @info: information for enumeration.
+ *
+ * This function creates a container device (base FPGA region), enumerates
+ * feature devices based on the enumeration info and creates platform devices
+ * under the container device.
+ *
+ * Return: dfl_fpga_cdev struct on success, -errno on failure
+ */
+struct dfl_fpga_cdev *
+dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
+{
+ struct build_feature_devs_info *binfo;
+ struct dfl_fpga_enum_dfl *dfl;
+ struct dfl_fpga_cdev *cdev;
+ int ret = 0;
+
+ if (!info->dev)
+ return ERR_PTR(-ENODEV);
+
+ cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return ERR_PTR(-ENOMEM);
+
+ cdev->region = fpga_region_create(info->dev, NULL, NULL);
+ if (!cdev->region) {
+ ret = -ENOMEM;
+ goto free_cdev_exit;
+ }
+
+ cdev->parent = info->dev;
+ mutex_init(&cdev->lock);
+ INIT_LIST_HEAD(&cdev->port_dev_list);
+
+ ret = fpga_region_register(cdev->region);
+ if (ret)
+ goto free_region_exit;
+
+ /* create and init build info for enumeration */
+ binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
+ if (!binfo) {
+ ret = -ENOMEM;
+ goto unregister_region_exit;
+ }
+
+ binfo->dev = info->dev;
+ binfo->cdev = cdev;
+
+ /*
+ * start enumeration for all feature devices based on Device Feature
+ * Lists.
+ */
+ list_for_each_entry(dfl, &info->dfls, node) {
+ ret = parse_feature_list(binfo, dfl);
+ if (ret) {
+ remove_feature_devs(cdev);
+ build_info_free(binfo);
+ goto unregister_region_exit;
+ }
+ }
+
+ build_info_free(binfo);
+
+ return cdev;
+
+unregister_region_exit:
+ fpga_region_unregister(cdev->region);
+free_region_exit:
+ fpga_region_free(cdev->region);
+free_cdev_exit:
+ devm_kfree(info->dev, cdev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
+
+/**
+ * dfl_fpga_feature_devs_remove - remove all feature devices
+ * @cdev: fpga container device.
+ *
+ * Remove the container device and all feature devices under given container
+ * devices.
+ */
+void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
+{
+ struct dfl_feature_platform_data *pdata, *ptmp;
+
+ remove_feature_devs(cdev);
+
+ mutex_lock(&cdev->lock);
+ if (cdev->fme_dev) {
+ /* the fme should be unregistered. */
+ WARN_ON(device_is_registered(cdev->fme_dev));
+ put_device(cdev->fme_dev);
+ }
+
+ list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
+ struct platform_device *port_dev = pdata->dev;
+
+ /* the port should be unregistered. */
+ WARN_ON(device_is_registered(&port_dev->dev));
+ list_del(&pdata->node);
+ put_device(&port_dev->dev);
+ }
+ mutex_unlock(&cdev->lock);
+
+ fpga_region_unregister(cdev->region);
+ devm_kfree(cdev->parent, cdev);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
+
+/**
+ * __dfl_fpga_cdev_find_port - find a port under given container device
+ *
+ * @cdev: container device
+ * @data: data passed to match function
+ * @match: match function used to find specific port from the port device list
+ *
+ * Find a port device under container device. This function needs to be
+ * invoked with lock held.
+ *
+ * Return: pointer to port's platform device if successful, NULL otherwise.
+ *
+ * NOTE: you will need to drop the device reference with put_device() after use.
+ */
+struct platform_device *
+__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct platform_device *, void *))
+{
+ struct dfl_feature_platform_data *pdata;
+ struct platform_device *port_dev;
+
+ list_for_each_entry(pdata, &cdev->port_dev_list, node) {
+ port_dev = pdata->dev;
+
+ if (match(port_dev, data) && get_device(&port_dev->dev))
+ return port_dev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
+
+static int __init dfl_fpga_init(void)
+{
+ int ret;
+
+ dfl_ids_init();
+
+ ret = dfl_chardev_init();
+ if (ret)
+ dfl_ids_destroy();
+
+ return ret;
+}
+
+static void __exit dfl_fpga_exit(void)
+{
+ dfl_chardev_uinit();
+ dfl_ids_destroy();
+}
+
+module_init(dfl_fpga_init);
+module_exit(dfl_fpga_exit);
+
+MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
new file mode 100644
index 000000000000..a8b869e9e5b7
--- /dev/null
+++ b/drivers/fpga/dfl.h
@@ -0,0 +1,410 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Driver Header File for FPGA Device Feature List (DFL) Support
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Zhang Yi <yi.z.zhang@intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+
+#ifndef __FPGA_DFL_H
+#define __FPGA_DFL_H
+
+#include <linux/bitfield.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/fpga/fpga-region.h>
+
+/* maximum supported number of ports */
+#define MAX_DFL_FPGA_PORT_NUM 4
+/* plus one for fme device */
+#define MAX_DFL_FEATURE_DEV_NUM (MAX_DFL_FPGA_PORT_NUM + 1)
+
+/* Reserved 0x0 for Header Group Register and 0xff for AFU */
+#define FEATURE_ID_FIU_HEADER 0x0
+#define FEATURE_ID_AFU 0xff
+
+#define FME_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
+#define FME_FEATURE_ID_THERMAL_MGMT 0x1
+#define FME_FEATURE_ID_POWER_MGMT 0x2
+#define FME_FEATURE_ID_GLOBAL_IPERF 0x3
+#define FME_FEATURE_ID_GLOBAL_ERR 0x4
+#define FME_FEATURE_ID_PR_MGMT 0x5
+#define FME_FEATURE_ID_HSSI 0x6
+#define FME_FEATURE_ID_GLOBAL_DPERF 0x7
+
+#define PORT_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
+#define PORT_FEATURE_ID_AFU FEATURE_ID_AFU
+#define PORT_FEATURE_ID_ERROR 0x10
+#define PORT_FEATURE_ID_UMSG 0x11
+#define PORT_FEATURE_ID_UINT 0x12
+#define PORT_FEATURE_ID_STP 0x13
+
+/*
+ * Device Feature Header Register Set
+ *
+ * For FIUs, they all have DFH + GUID + NEXT_AFU as common header registers.
+ * For AFUs, they have DFH + GUID as common header registers.
+ * For private features, they only have DFH register as common header.
+ */
+#define DFH 0x0
+#define GUID_L 0x8
+#define GUID_H 0x10
+#define NEXT_AFU 0x18
+
+#define DFH_SIZE 0x8
+
+/* Device Feature Header Register Bitfield */
+#define DFH_ID GENMASK_ULL(11, 0) /* Feature ID */
+#define DFH_ID_FIU_FME 0
+#define DFH_ID_FIU_PORT 1
+#define DFH_REVISION GENMASK_ULL(15, 12) /* Feature revision */
+#define DFH_NEXT_HDR_OFST GENMASK_ULL(39, 16) /* Offset to next DFH */
+#define DFH_EOL BIT_ULL(40) /* End of list */
+#define DFH_TYPE GENMASK_ULL(63, 60) /* Feature type */
+#define DFH_TYPE_AFU 1
+#define DFH_TYPE_PRIVATE 3
+#define DFH_TYPE_FIU 4
+
+/* Next AFU Register Bitfield */
+#define NEXT_AFU_NEXT_DFH_OFST GENMASK_ULL(23, 0) /* Offset to next AFU */
+
+/* FME Header Register Set */
+#define FME_HDR_DFH DFH
+#define FME_HDR_GUID_L GUID_L
+#define FME_HDR_GUID_H GUID_H
+#define FME_HDR_NEXT_AFU NEXT_AFU
+#define FME_HDR_CAP 0x30
+#define FME_HDR_PORT_OFST(n) (0x38 + ((n) * 0x8))
+#define FME_HDR_BITSTREAM_ID 0x60
+#define FME_HDR_BITSTREAM_MD 0x68
+
+/* FME Fab Capability Register Bitfield */
+#define FME_CAP_FABRIC_VERID GENMASK_ULL(7, 0) /* Fabric version ID */
+#define FME_CAP_SOCKET_ID BIT_ULL(8) /* Socket ID */
+#define FME_CAP_PCIE0_LINK_AVL BIT_ULL(12) /* PCIE0 Link */
+#define FME_CAP_PCIE1_LINK_AVL BIT_ULL(13) /* PCIE1 Link */
+#define FME_CAP_COHR_LINK_AVL BIT_ULL(14) /* Coherent Link */
+#define FME_CAP_IOMMU_AVL BIT_ULL(16) /* IOMMU available */
+#define FME_CAP_NUM_PORTS GENMASK_ULL(19, 17) /* Number of ports */
+#define FME_CAP_ADDR_WIDTH GENMASK_ULL(29, 24) /* Address bus width */
+#define FME_CAP_CACHE_SIZE GENMASK_ULL(43, 32) /* cache size in KB */
+#define FME_CAP_CACHE_ASSOC GENMASK_ULL(47, 44) /* Associativity */
+
+/* FME Port Offset Register Bitfield */
+/* Offset to port device feature header */
+#define FME_PORT_OFST_DFH_OFST GENMASK_ULL(23, 0)
+/* PCI Bar ID for this port */
+#define FME_PORT_OFST_BAR_ID GENMASK_ULL(34, 32)
+/* AFU MMIO access permission. 1 - VF, 0 - PF. */
+#define FME_PORT_OFST_ACC_CTRL BIT_ULL(55)
+#define FME_PORT_OFST_ACC_PF 0
+#define FME_PORT_OFST_ACC_VF 1
+#define FME_PORT_OFST_IMP BIT_ULL(60)
+
+/* PORT Header Register Set */
+#define PORT_HDR_DFH DFH
+#define PORT_HDR_GUID_L GUID_L
+#define PORT_HDR_GUID_H GUID_H
+#define PORT_HDR_NEXT_AFU NEXT_AFU
+#define PORT_HDR_CAP 0x30
+#define PORT_HDR_CTRL 0x38
+
+/* Port Capability Register Bitfield */
+#define PORT_CAP_PORT_NUM GENMASK_ULL(1, 0) /* ID of this port */
+#define PORT_CAP_MMIO_SIZE GENMASK_ULL(23, 8) /* MMIO size in KB */
+#define PORT_CAP_SUPP_INT_NUM GENMASK_ULL(35, 32) /* Interrupts num */
+
+/* Port Control Register Bitfield */
+#define PORT_CTRL_SFTRST BIT_ULL(0) /* Port soft reset */
+/* Latency tolerance reporting. '1' >= 40us, '0' < 40us.*/
+#define PORT_CTRL_LATENCY BIT_ULL(2)
+#define PORT_CTRL_SFTRST_ACK BIT_ULL(4) /* HW ack for reset */
+/**
+ * struct dfl_fpga_port_ops - port ops
+ *
+ * @name: name of this port ops, to match with port platform device.
+ * @owner: pointer to the module which owns this port ops.
+ * @node: node to link port ops to global list.
+ * @get_id: get port id from hardware.
+ * @enable_set: enable/disable the port.
+ */
+struct dfl_fpga_port_ops {
+ const char *name;
+ struct module *owner;
+ struct list_head node;
+ int (*get_id)(struct platform_device *pdev);
+ int (*enable_set)(struct platform_device *pdev, bool enable);
+};
+
+void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops);
+void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops);
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev);
+void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
+int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
+
+/**
+ * struct dfl_feature_driver - sub feature's driver
+ *
+ * @id: sub feature id.
+ * @ops: ops of this sub feature.
+ */
+struct dfl_feature_driver {
+ u64 id;
+ const struct dfl_feature_ops *ops;
+};
+
+/**
+ * struct dfl_feature - sub feature of the feature devices
+ *
+ * @id: sub feature id.
+ * @resource_index: each sub feature has one mmio resource for its registers.
+ * this index is used to find its mmio resource from the
+ * feature dev (platform device)'s reources.
+ * @ioaddr: mapped mmio resource address.
+ * @ops: ops of this sub feature.
+ */
+struct dfl_feature {
+ u64 id;
+ int resource_index;
+ void __iomem *ioaddr;
+ const struct dfl_feature_ops *ops;
+};
+
+#define DEV_STATUS_IN_USE 0
+
+/**
+ * struct dfl_feature_platform_data - platform data for feature devices
+ *
+ * @node: node to link feature devs to container device's port_dev_list.
+ * @lock: mutex to protect platform data.
+ * @cdev: cdev of feature dev.
+ * @dev: ptr to platform device linked with this platform data.
+ * @dfl_cdev: ptr to container device.
+ * @disable_count: count for port disable.
+ * @num: number for sub features.
+ * @dev_status: dev status (e.g. DEV_STATUS_IN_USE).
+ * @private: ptr to feature dev private data.
+ * @features: sub features of this feature dev.
+ */
+struct dfl_feature_platform_data {
+ struct list_head node;
+ struct mutex lock;
+ struct cdev cdev;
+ struct platform_device *dev;
+ struct dfl_fpga_cdev *dfl_cdev;
+ unsigned int disable_count;
+ unsigned long dev_status;
+ void *private;
+ int num;
+ struct dfl_feature features[0];
+};
+
+static inline
+int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata)
+{
+ /* Test and set IN_USE flags to ensure file is exclusively used */
+ if (test_and_set_bit_lock(DEV_STATUS_IN_USE, &pdata->dev_status))
+ return -EBUSY;
+
+ return 0;
+}
+
+static inline
+void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
+{
+ clear_bit_unlock(DEV_STATUS_IN_USE, &pdata->dev_status);
+}
+
+static inline
+void dfl_fpga_pdata_set_private(struct dfl_feature_platform_data *pdata,
+ void *private)
+{
+ pdata->private = private;
+}
+
+static inline
+void *dfl_fpga_pdata_get_private(struct dfl_feature_platform_data *pdata)
+{
+ return pdata->private;
+}
+
+struct dfl_feature_ops {
+ int (*init)(struct platform_device *pdev, struct dfl_feature *feature);
+ void (*uinit)(struct platform_device *pdev,
+ struct dfl_feature *feature);
+ long (*ioctl)(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg);
+};
+
+#define DFL_FPGA_FEATURE_DEV_FME "dfl-fme"
+#define DFL_FPGA_FEATURE_DEV_PORT "dfl-port"
+
+static inline int dfl_feature_platform_data_size(const int num)
+{
+ return sizeof(struct dfl_feature_platform_data) +
+ num * sizeof(struct dfl_feature);
+}
+
+void dfl_fpga_dev_feature_uinit(struct platform_device *pdev);
+int dfl_fpga_dev_feature_init(struct platform_device *pdev,
+ struct dfl_feature_driver *feature_drvs);
+
+int dfl_fpga_dev_ops_register(struct platform_device *pdev,
+ const struct file_operations *fops,
+ struct module *owner);
+void dfl_fpga_dev_ops_unregister(struct platform_device *pdev);
+
+static inline
+struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode)
+{
+ struct dfl_feature_platform_data *pdata;
+
+ pdata = container_of(inode->i_cdev, struct dfl_feature_platform_data,
+ cdev);
+ return pdata->dev;
+}
+
+#define dfl_fpga_dev_for_each_feature(pdata, feature) \
+ for ((feature) = (pdata)->features; \
+ (feature) < (pdata)->features + (pdata)->num; (feature)++)
+
+static inline
+struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u64 id)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature *feature;
+
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (feature->id == id)
+ return feature;
+
+ return NULL;
+}
+
+static inline
+void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u64 id)
+{
+ struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
+
+ if (feature && feature->ioaddr)
+ return feature->ioaddr;
+
+ WARN_ON(1);
+ return NULL;
+}
+
+static inline bool is_dfl_feature_present(struct device *dev, u64 id)
+{
+ return !!dfl_get_feature_ioaddr_by_id(dev, id);
+}
+
+static inline
+struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata)
+{
+ return pdata->dev->dev.parent->parent;
+}
+
+static inline bool dfl_feature_is_fme(void __iomem *base)
+{
+ u64 v = readq(base + DFH);
+
+ return (FIELD_GET(DFH_TYPE, v) == DFH_TYPE_FIU) &&
+ (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_FME);
+}
+
+static inline bool dfl_feature_is_port(void __iomem *base)
+{
+ u64 v = readq(base + DFH);
+
+ return (FIELD_GET(DFH_TYPE, v) == DFH_TYPE_FIU) &&
+ (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_PORT);
+}
+
+/**
+ * struct dfl_fpga_enum_info - DFL FPGA enumeration information
+ *
+ * @dev: parent device.
+ * @dfls: list of device feature lists.
+ */
+struct dfl_fpga_enum_info {
+ struct device *dev;
+ struct list_head dfls;
+};
+
+/**
+ * struct dfl_fpga_enum_dfl - DFL FPGA enumeration device feature list info
+ *
+ * @start: base address of this device feature list.
+ * @len: size of this device feature list.
+ * @ioaddr: mapped base address of this device feature list.
+ * @node: node in list of device feature lists.
+ */
+struct dfl_fpga_enum_dfl {
+ resource_size_t start;
+ resource_size_t len;
+
+ void __iomem *ioaddr;
+
+ struct list_head node;
+};
+
+struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev);
+int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
+ resource_size_t start, resource_size_t len,
+ void __iomem *ioaddr);
+void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);
+
+/**
+ * struct dfl_fpga_cdev - container device of DFL based FPGA
+ *
+ * @parent: parent device of this container device.
+ * @region: base fpga region.
+ * @fme_dev: FME feature device under this container device.
+ * @lock: mutex lock to protect the port device list.
+ * @port_dev_list: list of all port feature devices under this container device.
+ */
+struct dfl_fpga_cdev {
+ struct device *parent;
+ struct fpga_region *region;
+ struct device *fme_dev;
+ struct mutex lock;
+ struct list_head port_dev_list;
+};
+
+struct dfl_fpga_cdev *
+dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info);
+void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev);
+
+/*
+ * need to drop the device reference with put_device() after use port platform
+ * device returned by __dfl_fpga_cdev_find_port and dfl_fpga_cdev_find_port
+ * functions.
+ */
+struct platform_device *
+__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct platform_device *, void *));
+
+static inline struct platform_device *
+dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct platform_device *, void *))
+{
+ struct platform_device *pdev;
+
+ mutex_lock(&cdev->lock);
+ pdev = __dfl_fpga_cdev_find_port(cdev, data, match);
+ mutex_unlock(&cdev->lock);
+
+ return pdev;
+}
+#endif /* __FPGA_DFL_H */
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index c1564cf827fe..a41b07e37884 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -406,12 +406,40 @@ static ssize_t state_show(struct device *dev,
return sprintf(buf, "%s\n", state_str[mgr->state]);
}
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+ u64 status;
+ int len = 0;
+
+ if (!mgr->mops->status)
+ return -ENOENT;
+
+ status = mgr->mops->status(mgr);
+
+ if (status & FPGA_MGR_STATUS_OPERATION_ERR)
+ len += sprintf(buf + len, "reconfig operation error\n");
+ if (status & FPGA_MGR_STATUS_CRC_ERR)
+ len += sprintf(buf + len, "reconfig CRC error\n");
+ if (status & FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR)
+ len += sprintf(buf + len, "reconfig incompatible image\n");
+ if (status & FPGA_MGR_STATUS_IP_PROTOCOL_ERR)
+ len += sprintf(buf + len, "reconfig IP protocol error\n");
+ if (status & FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR)
+ len += sprintf(buf + len, "reconfig fifo overflow error\n");
+
+ return len;
+}
+
static DEVICE_ATTR_RO(name);
static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_RO(status);
static struct attribute *fpga_mgr_attrs[] = {
&dev_attr_name.attr,
&dev_attr_state.attr,
+ &dev_attr_status.attr,
NULL,
};
ATTRIBUTE_GROUPS(fpga_mgr);
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 6d214d75c7be..0d65220d5ec5 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -158,6 +158,27 @@ err_put_region:
}
EXPORT_SYMBOL_GPL(fpga_region_program_fpga);
+static ssize_t compat_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_region *region = to_fpga_region(dev);
+
+ if (!region->compat_id)
+ return -ENOENT;
+
+ return sprintf(buf, "%016llx%016llx\n",
+ (unsigned long long)region->compat_id->id_h,
+ (unsigned long long)region->compat_id->id_l);
+}
+
+static DEVICE_ATTR_RO(compat_id);
+
+static struct attribute *fpga_region_attrs[] = {
+ &dev_attr_compat_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(fpga_region);
+
/**
* fpga_region_create - alloc and init a struct fpga_region
* @dev: device parent
@@ -258,6 +279,7 @@ static int __init fpga_region_init(void)
if (IS_ERR(fpga_region_class))
return PTR_ERR(fpga_region_class);
+ fpga_region_class->dev_groups = fpga_region_groups;
fpga_region_class->dev_release = fpga_region_dev_release;
return 0;
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index a326ed663d3c..af3a20dd5aa4 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -12,6 +12,21 @@ menuconfig FSI
if FSI
+config FSI_NEW_DEV_NODE
+ bool "Create '/dev/fsi' directory for char devices"
+ default n
+ ---help---
+ This option causes char devices created for FSI devices to be
+ located under a common /dev/fsi/ directory. Set to N unless your
+ userspace has been updated to handle the new location.
+
+ Additionally, it also causes the char device names to be offset
+ by one so that chip 0 will have /dev/scom1 and chip1 /dev/scom2
+ to match old userspace expectations.
+
+ New userspace will use udev rules to generate predictable access
+ symlinks in /dev/fsi/by-path when this option is enabled.
+
config FSI_MASTER_GPIO
tristate "GPIO-based FSI master"
depends on GPIOLIB
@@ -27,9 +42,26 @@ config FSI_MASTER_HUB
allow chaining of FSI links to an arbitrary depth. This allows for
a high target device fanout.
+config FSI_MASTER_AST_CF
+ tristate "FSI master based on Aspeed ColdFire coprocessor"
+ depends on GPIOLIB
+ depends on GPIO_ASPEED
+ ---help---
+ This option enables a FSI master using the AST2400 and AST2500 GPIO
+ lines driven by the internal ColdFire coprocessor. This requires
+ the corresponding machine specific ColdFire firmware to be available.
+
config FSI_SCOM
tristate "SCOM FSI client device driver"
---help---
This option enables an FSI based SCOM device driver.
+config FSI_SBEFIFO
+ tristate "SBEFIFO FSI client device driver"
+ depends on OF_ADDRESS
+ ---help---
+ This option enables an FSI based SBEFIFO device driver. The SBEFIFO is
+ a pipe-like FSI device for communicating with the self boot engine
+ (SBE) on POWER processors.
+
endif
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
index 65eb99dfafdb..a50d6ce22fb3 100644
--- a/drivers/fsi/Makefile
+++ b/drivers/fsi/Makefile
@@ -2,4 +2,6 @@
obj-$(CONFIG_FSI) += fsi-core.o
obj-$(CONFIG_FSI_MASTER_HUB) += fsi-master-hub.o
obj-$(CONFIG_FSI_MASTER_GPIO) += fsi-master-gpio.o
+obj-$(CONFIG_FSI_MASTER_AST_CF) += fsi-master-ast-cf.o
obj-$(CONFIG_FSI_SCOM) += fsi-scom.o
+obj-$(CONFIG_FSI_SBEFIFO) += fsi-sbefifo.o
diff --git a/drivers/fsi/cf-fsi-fw.h b/drivers/fsi/cf-fsi-fw.h
new file mode 100644
index 000000000000..712df0461911
--- /dev/null
+++ b/drivers/fsi/cf-fsi-fw.h
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0+
+#ifndef __CF_FSI_FW_H
+#define __CF_FSI_FW_H
+
+/*
+ * uCode file layout
+ *
+ * 0000...03ff : m68k exception vectors
+ * 0400...04ff : Header info & boot config block
+ * 0500....... : Code & stack
+ */
+
+/*
+ * Header info & boot config area
+ *
+ * The Header info is built into the ucode and provide version and
+ * platform information.
+ *
+ * the Boot config needs to be adjusted by the ARM prior to starting
+ * the ucode if the Command/Status area isn't at 0x320000 in CF space
+ * (ie. beginning of SRAM).
+ */
+
+#define HDR_OFFSET 0x400
+
+/* Info: Signature & version */
+#define HDR_SYS_SIG 0x00 /* 2 bytes system signature */
+#define SYS_SIG_SHARED 0x5348
+#define SYS_SIG_SPLIT 0x5350
+#define HDR_FW_VERS 0x02 /* 2 bytes Major.Minor */
+#define HDR_API_VERS 0x04 /* 2 bytes Major.Minor */
+#define API_VERSION_MAJ 2 /* Current version */
+#define API_VERSION_MIN 1
+#define HDR_FW_OPTIONS 0x08 /* 4 bytes option flags */
+#define FW_OPTION_TRACE_EN 0x00000001 /* FW tracing enabled */
+#define FW_OPTION_CONT_CLOCK 0x00000002 /* Continuous clocking supported */
+#define HDR_FW_SIZE 0x10 /* 4 bytes size for combo image */
+
+/* Boot Config: Address of Command/Status area */
+#define HDR_CMD_STAT_AREA 0x80 /* 4 bytes CF address */
+#define HDR_FW_CONTROL 0x84 /* 4 bytes control flags */
+#define FW_CONTROL_CONT_CLOCK 0x00000002 /* Continuous clocking enabled */
+#define FW_CONTROL_DUMMY_RD 0x00000004 /* Extra dummy read (AST2400) */
+#define FW_CONTROL_USE_STOP 0x00000008 /* Use STOP instructions */
+#define HDR_CLOCK_GPIO_VADDR 0x90 /* 2 bytes offset from GPIO base */
+#define HDR_CLOCK_GPIO_DADDR 0x92 /* 2 bytes offset from GPIO base */
+#define HDR_DATA_GPIO_VADDR 0x94 /* 2 bytes offset from GPIO base */
+#define HDR_DATA_GPIO_DADDR 0x96 /* 2 bytes offset from GPIO base */
+#define HDR_TRANS_GPIO_VADDR 0x98 /* 2 bytes offset from GPIO base */
+#define HDR_TRANS_GPIO_DADDR 0x9a /* 2 bytes offset from GPIO base */
+#define HDR_CLOCK_GPIO_BIT 0x9c /* 1 byte bit number */
+#define HDR_DATA_GPIO_BIT 0x9d /* 1 byte bit number */
+#define HDR_TRANS_GPIO_BIT 0x9e /* 1 byte bit number */
+
+/*
+ * Command/Status area layout: Main part
+ */
+
+/* Command/Status register:
+ *
+ * +---------------------------+
+ * | STAT | RLEN | CLEN | CMD |
+ * | 8 | 8 | 8 | 8 |
+ * +---------------------------+
+ * | | | |
+ * status | | |
+ * Response len | |
+ * (in bits) | |
+ * | |
+ * Command len |
+ * (in bits) |
+ * |
+ * Command code
+ *
+ * Due to the big endian layout, that means that a byte read will
+ * return the status byte
+ */
+#define CMD_STAT_REG 0x00
+#define CMD_REG_CMD_MASK 0x000000ff
+#define CMD_REG_CMD_SHIFT 0
+#define CMD_NONE 0x00
+#define CMD_COMMAND 0x01
+#define CMD_BREAK 0x02
+#define CMD_IDLE_CLOCKS 0x03 /* clen = #clocks */
+#define CMD_INVALID 0xff
+#define CMD_REG_CLEN_MASK 0x0000ff00
+#define CMD_REG_CLEN_SHIFT 8
+#define CMD_REG_RLEN_MASK 0x00ff0000
+#define CMD_REG_RLEN_SHIFT 16
+#define CMD_REG_STAT_MASK 0xff000000
+#define CMD_REG_STAT_SHIFT 24
+#define STAT_WORKING 0x00
+#define STAT_COMPLETE 0x01
+#define STAT_ERR_INVAL_CMD 0x80
+#define STAT_ERR_INVAL_IRQ 0x81
+#define STAT_ERR_MTOE 0x82
+
+/* Response tag & CRC */
+#define STAT_RTAG 0x04
+
+/* Response CRC */
+#define STAT_RCRC 0x05
+
+/* Echo and Send delay */
+#define ECHO_DLY_REG 0x08
+#define SEND_DLY_REG 0x09
+
+/* Command data area
+ *
+ * Last byte of message must be left aligned
+ */
+#define CMD_DATA 0x10 /* 64 bit of data */
+
+/* Response data area, right aligned, unused top bits are 1 */
+#define RSP_DATA 0x20 /* 32 bit of data */
+
+/* Misc */
+#define INT_CNT 0x30 /* 32-bit interrupt count */
+#define BAD_INT_VEC 0x34 /* 32-bit bad interrupt vector # */
+#define CF_STARTED 0x38 /* byte, set to -1 when copro started */
+#define CLK_CNT 0x3c /* 32-bit, clock count (debug only) */
+
+/*
+ * SRAM layout: GPIO arbitration part
+ */
+#define ARB_REG 0x40
+#define ARB_ARM_REQ 0x01
+#define ARB_ARM_ACK 0x02
+
+/* Misc2 */
+#define CF_RESET_D0 0x50
+#define CF_RESET_D1 0x54
+#define BAD_INT_S0 0x58
+#define BAD_INT_S1 0x5c
+#define STOP_CNT 0x60
+
+/* Internal */
+
+/*
+ * SRAM layout: Trace buffer (debug builds only)
+ */
+#define TRACEBUF 0x100
+#define TR_CLKOBIT0 0xc0
+#define TR_CLKOBIT1 0xc1
+#define TR_CLKOSTART 0x82
+#define TR_OLEN 0x83 /* + len */
+#define TR_CLKZ 0x84 /* + count */
+#define TR_CLKWSTART 0x85
+#define TR_CLKTAG 0x86 /* + tag */
+#define TR_CLKDATA 0x87 /* + len */
+#define TR_CLKCRC 0x88 /* + raw crc */
+#define TR_CLKIBIT0 0x90
+#define TR_CLKIBIT1 0x91
+#define TR_END 0xff
+
+#endif /* __CF_FSI_FW_H */
+
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 4c03d6933646..2c31563fdcae 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -11,6 +11,11 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
+ * TODO:
+ * - Rework topology
+ * - s/chip_id/chip_loc
+ * - s/cfam/chip (cfam_id -> chip_id etc...)
*/
#include <linux/crc4.h>
@@ -21,6 +26,9 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
#include "fsi-master.h"
@@ -78,9 +86,15 @@ static DEFINE_IDA(master_ida);
struct fsi_slave {
struct device dev;
struct fsi_master *master;
- int id;
- int link;
+ struct cdev cdev;
+ int cdev_idx;
+ int id; /* FSI address */
+ int link; /* FSI link# */
+ u32 cfam_id;
+ int chip_id;
uint32_t size; /* size of slave address space */
+ u8 t_send_delay;
+ u8 t_echo_delay;
};
#define to_fsi_master(d) container_of(d, struct fsi_master, dev)
@@ -89,6 +103,13 @@ struct fsi_slave {
static const int slave_retries = 2;
static int discard_errors;
+static dev_t fsi_base_dev;
+static DEFINE_IDA(fsi_minor_ida);
+#define FSI_CHAR_MAX_DEVICES 0x1000
+
+/* Legacy /dev numbering: 4 devices per chip, 16 chips */
+#define FSI_CHAR_LEGACY_TOP 64
+
static int fsi_master_read(struct fsi_master *master, int link,
uint8_t slave_id, uint32_t addr, void *val, size_t size);
static int fsi_master_write(struct fsi_master *master, int link,
@@ -190,7 +211,7 @@ static int fsi_slave_calc_addr(struct fsi_slave *slave, uint32_t *addrp,
static int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
{
struct fsi_master *master = slave->master;
- uint32_t irq, stat;
+ __be32 irq, stat;
int rc, link;
uint8_t id;
@@ -215,7 +236,53 @@ static int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
&irq, sizeof(irq));
}
-static int fsi_slave_set_smode(struct fsi_master *master, int link, int id);
+/* Encode slave local bus echo delay */
+static inline uint32_t fsi_smode_echodly(int x)
+{
+ return (x & FSI_SMODE_ED_MASK) << FSI_SMODE_ED_SHIFT;
+}
+
+/* Encode slave local bus send delay */
+static inline uint32_t fsi_smode_senddly(int x)
+{
+ return (x & FSI_SMODE_SD_MASK) << FSI_SMODE_SD_SHIFT;
+}
+
+/* Encode slave local bus clock rate ratio */
+static inline uint32_t fsi_smode_lbcrr(int x)
+{
+ return (x & FSI_SMODE_LBCRR_MASK) << FSI_SMODE_LBCRR_SHIFT;
+}
+
+/* Encode slave ID */
+static inline uint32_t fsi_smode_sid(int x)
+{
+ return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
+}
+
+static uint32_t fsi_slave_smode(int id, u8 t_senddly, u8 t_echodly)
+{
+ return FSI_SMODE_WSC | FSI_SMODE_ECRC
+ | fsi_smode_sid(id)
+ | fsi_smode_echodly(t_echodly - 1) | fsi_smode_senddly(t_senddly - 1)
+ | fsi_smode_lbcrr(0x8);
+}
+
+static int fsi_slave_set_smode(struct fsi_slave *slave)
+{
+ uint32_t smode;
+ __be32 data;
+
+ /* set our smode register with the slave ID field to 0; this enables
+ * extended slave addressing
+ */
+ smode = fsi_slave_smode(slave->id, slave->t_send_delay, slave->t_echo_delay);
+ data = cpu_to_be32(smode);
+
+ return fsi_master_write(slave->master, slave->link, slave->id,
+ FSI_SLAVE_BASE + FSI_SMODE,
+ &data, sizeof(data));
+}
static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
uint32_t addr, size_t size)
@@ -223,7 +290,7 @@ static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
struct fsi_master *master = slave->master;
int rc, link;
uint32_t reg;
- uint8_t id;
+ uint8_t id, send_delay, echo_delay;
if (discard_errors)
return -1;
@@ -254,15 +321,26 @@ static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
}
}
+ send_delay = slave->t_send_delay;
+ echo_delay = slave->t_echo_delay;
+
/* getting serious, reset the slave via BREAK */
rc = fsi_master_break(master, link);
if (rc)
return rc;
- rc = fsi_slave_set_smode(master, link, id);
+ slave->t_send_delay = send_delay;
+ slave->t_echo_delay = echo_delay;
+
+ rc = fsi_slave_set_smode(slave);
if (rc)
return rc;
+ if (master->link_config)
+ master->link_config(master, link,
+ slave->t_send_delay,
+ slave->t_echo_delay);
+
return fsi_slave_report_and_clear_errors(slave);
}
@@ -390,7 +468,6 @@ static struct device_node *fsi_device_find_of_node(struct fsi_device *dev)
static int fsi_slave_scan(struct fsi_slave *slave)
{
uint32_t engine_addr;
- uint32_t conf;
int rc, i;
/*
@@ -404,15 +481,17 @@ static int fsi_slave_scan(struct fsi_slave *slave)
for (i = 2; i < engine_page_size / sizeof(uint32_t); i++) {
uint8_t slots, version, type, crc;
struct fsi_device *dev;
+ uint32_t conf;
+ __be32 data;
- rc = fsi_slave_read(slave, (i + 1) * sizeof(conf),
- &conf, sizeof(conf));
+ rc = fsi_slave_read(slave, (i + 1) * sizeof(data),
+ &data, sizeof(data));
if (rc) {
dev_warn(&slave->dev,
"error reading slave registers\n");
return -1;
}
- conf = be32_to_cpu(conf);
+ conf = be32_to_cpu(data);
crc = crc4(0, conf, 32);
if (crc) {
@@ -539,79 +618,11 @@ static const struct bin_attribute fsi_slave_raw_attr = {
.write = fsi_slave_sysfs_raw_write,
};
-static ssize_t fsi_slave_sysfs_term_write(struct file *file,
- struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
- struct fsi_master *master = slave->master;
-
- if (!master->term)
- return -ENODEV;
-
- master->term(master, slave->link, slave->id);
- return count;
-}
-
-static const struct bin_attribute fsi_slave_term_attr = {
- .attr = {
- .name = "term",
- .mode = 0200,
- },
- .size = 0,
- .write = fsi_slave_sysfs_term_write,
-};
-
-/* Encode slave local bus echo delay */
-static inline uint32_t fsi_smode_echodly(int x)
-{
- return (x & FSI_SMODE_ED_MASK) << FSI_SMODE_ED_SHIFT;
-}
-
-/* Encode slave local bus send delay */
-static inline uint32_t fsi_smode_senddly(int x)
-{
- return (x & FSI_SMODE_SD_MASK) << FSI_SMODE_SD_SHIFT;
-}
-
-/* Encode slave local bus clock rate ratio */
-static inline uint32_t fsi_smode_lbcrr(int x)
-{
- return (x & FSI_SMODE_LBCRR_MASK) << FSI_SMODE_LBCRR_SHIFT;
-}
-
-/* Encode slave ID */
-static inline uint32_t fsi_smode_sid(int x)
-{
- return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
-}
-
-static uint32_t fsi_slave_smode(int id)
-{
- return FSI_SMODE_WSC | FSI_SMODE_ECRC
- | fsi_smode_sid(id)
- | fsi_smode_echodly(0xf) | fsi_smode_senddly(0xf)
- | fsi_smode_lbcrr(0x8);
-}
-
-static int fsi_slave_set_smode(struct fsi_master *master, int link, int id)
-{
- uint32_t smode;
-
- /* set our smode register with the slave ID field to 0; this enables
- * extended slave addressing
- */
- smode = fsi_slave_smode(id);
- smode = cpu_to_be32(smode);
-
- return fsi_master_write(master, link, id, FSI_SLAVE_BASE + FSI_SMODE,
- &smode, sizeof(smode));
-}
-
static void fsi_slave_release(struct device *dev)
{
struct fsi_slave *slave = to_fsi_slave(dev);
+ fsi_free_minor(slave->dev.devt);
of_node_put(dev->of_node);
kfree(slave);
}
@@ -659,11 +670,303 @@ static struct device_node *fsi_slave_find_of_node(struct fsi_master *master,
return NULL;
}
+static ssize_t cfam_read(struct file *filep, char __user *buf, size_t count,
+ loff_t *offset)
+{
+ struct fsi_slave *slave = filep->private_data;
+ size_t total_len, read_len;
+ loff_t off = *offset;
+ ssize_t rc;
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+ return -EINVAL;
+
+ for (total_len = 0; total_len < count; total_len += read_len) {
+ __be32 data;
+
+ read_len = min_t(size_t, count, 4);
+ read_len -= off & 0x3;
+
+ rc = fsi_slave_read(slave, off, &data, read_len);
+ if (rc)
+ goto fail;
+ rc = copy_to_user(buf + total_len, &data, read_len);
+ if (rc) {
+ rc = -EFAULT;
+ goto fail;
+ }
+ off += read_len;
+ }
+ rc = count;
+ fail:
+ *offset = off;
+ return count;
+}
+
+static ssize_t cfam_write(struct file *filep, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct fsi_slave *slave = filep->private_data;
+ size_t total_len, write_len;
+ loff_t off = *offset;
+ ssize_t rc;
+
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+ return -EINVAL;
+
+ for (total_len = 0; total_len < count; total_len += write_len) {
+ __be32 data;
+
+ write_len = min_t(size_t, count, 4);
+ write_len -= off & 0x3;
+
+ rc = copy_from_user(&data, buf + total_len, write_len);
+ if (rc) {
+ rc = -EFAULT;
+ goto fail;
+ }
+ rc = fsi_slave_write(slave, off, &data, write_len);
+ if (rc)
+ goto fail;
+ off += write_len;
+ }
+ rc = count;
+ fail:
+ *offset = off;
+ return count;
+}
+
+static loff_t cfam_llseek(struct file *file, loff_t offset, int whence)
+{
+ switch (whence) {
+ case SEEK_CUR:
+ break;
+ case SEEK_SET:
+ file->f_pos = offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return offset;
+}
+
+static int cfam_open(struct inode *inode, struct file *file)
+{
+ struct fsi_slave *slave = container_of(inode->i_cdev, struct fsi_slave, cdev);
+
+ file->private_data = slave;
+
+ return 0;
+}
+
+static const struct file_operations cfam_fops = {
+ .owner = THIS_MODULE,
+ .open = cfam_open,
+ .llseek = cfam_llseek,
+ .read = cfam_read,
+ .write = cfam_write,
+};
+
+static ssize_t send_term_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+ struct fsi_master *master = slave->master;
+
+ if (!master->term)
+ return -ENODEV;
+
+ master->term(master, slave->link, slave->id);
+ return count;
+}
+
+static DEVICE_ATTR_WO(send_term);
+
+static ssize_t slave_send_echo_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+ return sprintf(buf, "%u\n", slave->t_send_delay);
+}
+
+static ssize_t slave_send_echo_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+ struct fsi_master *master = slave->master;
+ unsigned long val;
+ int rc;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val < 1 || val > 16)
+ return -EINVAL;
+
+ if (!master->link_config)
+ return -ENXIO;
+
+ /* Current HW mandates that send and echo delay are identical */
+ slave->t_send_delay = val;
+ slave->t_echo_delay = val;
+
+ rc = fsi_slave_set_smode(slave);
+ if (rc < 0)
+ return rc;
+ if (master->link_config)
+ master->link_config(master, slave->link,
+ slave->t_send_delay,
+ slave->t_echo_delay);
+
+ return count;
+}
+
+static DEVICE_ATTR(send_echo_delays, 0600,
+ slave_send_echo_show, slave_send_echo_store);
+
+static ssize_t chip_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+ return sprintf(buf, "%d\n", slave->chip_id);
+}
+
+static DEVICE_ATTR_RO(chip_id);
+
+static ssize_t cfam_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+ return sprintf(buf, "0x%x\n", slave->cfam_id);
+}
+
+static DEVICE_ATTR_RO(cfam_id);
+
+static struct attribute *cfam_attr[] = {
+ &dev_attr_send_echo_delays.attr,
+ &dev_attr_chip_id.attr,
+ &dev_attr_cfam_id.attr,
+ &dev_attr_send_term.attr,
+ NULL,
+};
+
+static const struct attribute_group cfam_attr_group = {
+ .attrs = cfam_attr,
+};
+
+static const struct attribute_group *cfam_attr_groups[] = {
+ &cfam_attr_group,
+ NULL,
+};
+
+static char *cfam_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+#ifdef CONFIG_FSI_NEW_DEV_NODE
+ return kasprintf(GFP_KERNEL, "fsi/cfam%d", slave->cdev_idx);
+#else
+ return kasprintf(GFP_KERNEL, "cfam%d", slave->cdev_idx);
+#endif
+}
+
+static const struct device_type cfam_type = {
+ .name = "cfam",
+ .devnode = cfam_devnode,
+ .groups = cfam_attr_groups
+};
+
+static char *fsi_cdev_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
+{
+#ifdef CONFIG_FSI_NEW_DEV_NODE
+ return kasprintf(GFP_KERNEL, "fsi/%s", dev_name(dev));
+#else
+ return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+#endif
+}
+
+const struct device_type fsi_cdev_type = {
+ .name = "fsi-cdev",
+ .devnode = fsi_cdev_devnode,
+};
+EXPORT_SYMBOL_GPL(fsi_cdev_type);
+
+/* Backward compatible /dev/ numbering in "old style" mode */
+static int fsi_adjust_index(int index)
+{
+#ifdef CONFIG_FSI_NEW_DEV_NODE
+ return index;
+#else
+ return index + 1;
+#endif
+}
+
+static int __fsi_get_new_minor(struct fsi_slave *slave, enum fsi_dev_type type,
+ dev_t *out_dev, int *out_index)
+{
+ int cid = slave->chip_id;
+ int id;
+
+ /* Check if we qualify for legacy numbering */
+ if (cid >= 0 && cid < 16 && type < 4) {
+ /* Try reserving the legacy number */
+ id = (cid << 4) | type;
+ id = ida_simple_get(&fsi_minor_ida, id, id + 1, GFP_KERNEL);
+ if (id >= 0) {
+ *out_index = fsi_adjust_index(cid);
+ *out_dev = fsi_base_dev + id;
+ return 0;
+ }
+ /* Other failure */
+ if (id != -ENOSPC)
+ return id;
+ /* Fallback to non-legacy allocation */
+ }
+ id = ida_simple_get(&fsi_minor_ida, FSI_CHAR_LEGACY_TOP,
+ FSI_CHAR_MAX_DEVICES, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ *out_index = fsi_adjust_index(id);
+ *out_dev = fsi_base_dev + id;
+ return 0;
+}
+
+int fsi_get_new_minor(struct fsi_device *fdev, enum fsi_dev_type type,
+ dev_t *out_dev, int *out_index)
+{
+ return __fsi_get_new_minor(fdev->slave, type, out_dev, out_index);
+}
+EXPORT_SYMBOL_GPL(fsi_get_new_minor);
+
+void fsi_free_minor(dev_t dev)
+{
+ ida_simple_remove(&fsi_minor_ida, MINOR(dev));
+}
+EXPORT_SYMBOL_GPL(fsi_free_minor);
+
static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
{
- uint32_t chip_id, llmode;
+ uint32_t cfam_id;
struct fsi_slave *slave;
uint8_t crc;
+ __be32 data, llmode;
int rc;
/* Currently, we only support single slaves on a link, and use the
@@ -672,31 +975,23 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
if (id != 0)
return -EINVAL;
- rc = fsi_master_read(master, link, id, 0, &chip_id, sizeof(chip_id));
+ rc = fsi_master_read(master, link, id, 0, &data, sizeof(data));
if (rc) {
dev_dbg(&master->dev, "can't read slave %02x:%02x %d\n",
link, id, rc);
return -ENODEV;
}
- chip_id = be32_to_cpu(chip_id);
+ cfam_id = be32_to_cpu(data);
- crc = crc4(0, chip_id, 32);
+ crc = crc4(0, cfam_id, 32);
if (crc) {
- dev_warn(&master->dev, "slave %02x:%02x invalid chip id CRC!\n",
+ dev_warn(&master->dev, "slave %02x:%02x invalid cfam id CRC!\n",
link, id);
return -EIO;
}
dev_dbg(&master->dev, "fsi: found chip %08x at %02x:%02x:%02x\n",
- chip_id, master->idx, link, id);
-
- rc = fsi_slave_set_smode(master, link, id);
- if (rc) {
- dev_warn(&master->dev,
- "can't set smode on slave:%02x:%02x %d\n",
- link, id, rc);
- return -ENODEV;
- }
+ cfam_id, master->idx, link, id);
/* If we're behind a master that doesn't provide a self-running bus
* clock, put the slave into async mode
@@ -719,30 +1014,61 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
if (!slave)
return -ENOMEM;
- slave->master = master;
+ dev_set_name(&slave->dev, "slave@%02x:%02x", link, id);
+ slave->dev.type = &cfam_type;
slave->dev.parent = &master->dev;
slave->dev.of_node = fsi_slave_find_of_node(master, link, id);
slave->dev.release = fsi_slave_release;
+ device_initialize(&slave->dev);
+ slave->cfam_id = cfam_id;
+ slave->master = master;
slave->link = link;
slave->id = id;
slave->size = FSI_SLAVE_SIZE_23b;
+ slave->t_send_delay = 16;
+ slave->t_echo_delay = 16;
+
+ /* Get chip ID if any */
+ slave->chip_id = -1;
+ if (slave->dev.of_node) {
+ uint32_t prop;
+ if (!of_property_read_u32(slave->dev.of_node, "chip-id", &prop))
+ slave->chip_id = prop;
- dev_set_name(&slave->dev, "slave@%02x:%02x", link, id);
- rc = device_register(&slave->dev);
- if (rc < 0) {
- dev_warn(&master->dev, "failed to create slave device: %d\n",
- rc);
- put_device(&slave->dev);
- return rc;
}
+ /* Allocate a minor in the FSI space */
+ rc = __fsi_get_new_minor(slave, fsi_dev_cfam, &slave->dev.devt,
+ &slave->cdev_idx);
+ if (rc)
+ goto err_free;
+
+ /* Create chardev for userspace access */
+ cdev_init(&slave->cdev, &cfam_fops);
+ rc = cdev_device_add(&slave->cdev, &slave->dev);
+ if (rc) {
+ dev_err(&slave->dev, "Error %d creating slave device\n", rc);
+ goto err_free;
+ }
+
+ rc = fsi_slave_set_smode(slave);
+ if (rc) {
+ dev_warn(&master->dev,
+ "can't set smode on slave:%02x:%02x %d\n",
+ link, id, rc);
+ kfree(slave);
+ return -ENODEV;
+ }
+ if (master->link_config)
+ master->link_config(master, link,
+ slave->t_send_delay,
+ slave->t_echo_delay);
+
+ /* Legacy raw file -> to be removed */
rc = device_create_bin_file(&slave->dev, &fsi_slave_raw_attr);
if (rc)
dev_warn(&slave->dev, "failed to create raw attr: %d\n", rc);
- rc = device_create_bin_file(&slave->dev, &fsi_slave_term_attr);
- if (rc)
- dev_warn(&slave->dev, "failed to create term attr: %d\n", rc);
rc = fsi_slave_scan(slave);
if (rc)
@@ -750,6 +1076,10 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
rc);
return rc;
+
+ err_free:
+ put_device(&slave->dev);
+ return rc;
}
/* FSI master support */
@@ -814,12 +1144,16 @@ static int fsi_master_link_enable(struct fsi_master *master, int link)
*/
static int fsi_master_break(struct fsi_master *master, int link)
{
+ int rc = 0;
+
trace_fsi_master_break(master, link);
if (master->send_break)
- return master->send_break(master, link);
+ rc = master->send_break(master, link);
+ if (master->link_config)
+ master->link_config(master, link, 16, 16);
- return 0;
+ return rc;
}
static int fsi_master_scan(struct fsi_master *master)
@@ -854,8 +1188,11 @@ static int fsi_slave_remove_device(struct device *dev, void *arg)
static int fsi_master_remove_slave(struct device *dev, void *arg)
{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
device_for_each_child(dev, NULL, fsi_slave_remove_device);
- device_unregister(dev);
+ cdev_device_del(&slave->cdev, &slave->dev);
+ put_device(dev);
return 0;
}
@@ -866,8 +1203,14 @@ static void fsi_master_unscan(struct fsi_master *master)
int fsi_master_rescan(struct fsi_master *master)
{
+ int rc;
+
+ mutex_lock(&master->scan_lock);
fsi_master_unscan(master);
- return fsi_master_scan(master);
+ rc = fsi_master_scan(master);
+ mutex_unlock(&master->scan_lock);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(fsi_master_rescan);
@@ -903,9 +1246,7 @@ int fsi_master_register(struct fsi_master *master)
int rc;
struct device_node *np;
- if (!master)
- return -EINVAL;
-
+ mutex_init(&master->scan_lock);
master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
dev_set_name(&master->dev, "fsi%d", master->idx);
@@ -917,21 +1258,24 @@ int fsi_master_register(struct fsi_master *master)
rc = device_create_file(&master->dev, &dev_attr_rescan);
if (rc) {
- device_unregister(&master->dev);
+ device_del(&master->dev);
ida_simple_remove(&master_ida, master->idx);
return rc;
}
rc = device_create_file(&master->dev, &dev_attr_break);
if (rc) {
- device_unregister(&master->dev);
+ device_del(&master->dev);
ida_simple_remove(&master_ida, master->idx);
return rc;
}
np = dev_of_node(&master->dev);
- if (!of_property_read_bool(np, "no-scan-on-init"))
+ if (!of_property_read_bool(np, "no-scan-on-init")) {
+ mutex_lock(&master->scan_lock);
fsi_master_scan(master);
+ mutex_unlock(&master->scan_lock);
+ }
return 0;
}
@@ -944,7 +1288,9 @@ void fsi_master_unregister(struct fsi_master *master)
master->idx = -1;
}
+ mutex_lock(&master->scan_lock);
fsi_master_unscan(master);
+ mutex_unlock(&master->scan_lock);
device_unregister(&master->dev);
}
EXPORT_SYMBOL_GPL(fsi_master_unregister);
@@ -996,13 +1342,27 @@ EXPORT_SYMBOL_GPL(fsi_bus_type);
static int __init fsi_init(void)
{
- return bus_register(&fsi_bus_type);
+ int rc;
+
+ rc = alloc_chrdev_region(&fsi_base_dev, 0, FSI_CHAR_MAX_DEVICES, "fsi");
+ if (rc)
+ return rc;
+ rc = bus_register(&fsi_bus_type);
+ if (rc)
+ goto fail_bus;
+ return 0;
+
+ fail_bus:
+ unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
+ return rc;
}
postcore_initcall(fsi_init);
static void fsi_exit(void)
{
bus_unregister(&fsi_bus_type);
+ unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
+ ida_destroy(&fsi_minor_ida);
}
module_exit(fsi_exit);
module_param(discard_errors, int, 0664);
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
new file mode 100644
index 000000000000..04d10ea8d343
--- /dev/null
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -0,0 +1,1440 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corp
+/*
+ * A FSI master controller, using a simple GPIO bit-banging interface
+ */
+
+#include <linux/crc4.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fsi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/firmware.h>
+#include <linux/gpio/aspeed.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/genalloc.h>
+
+#include "fsi-master.h"
+#include "cf-fsi-fw.h"
+
+#define FW_FILE_NAME "cf-fsi-fw.bin"
+
+/* Common SCU based coprocessor control registers */
+#define SCU_COPRO_CTRL 0x100
+#define SCU_COPRO_RESET 0x00000002
+#define SCU_COPRO_CLK_EN 0x00000001
+
+/* AST2500 specific ones */
+#define SCU_2500_COPRO_SEG0 0x104
+#define SCU_2500_COPRO_SEG1 0x108
+#define SCU_2500_COPRO_SEG2 0x10c
+#define SCU_2500_COPRO_SEG3 0x110
+#define SCU_2500_COPRO_SEG4 0x114
+#define SCU_2500_COPRO_SEG5 0x118
+#define SCU_2500_COPRO_SEG6 0x11c
+#define SCU_2500_COPRO_SEG7 0x120
+#define SCU_2500_COPRO_SEG8 0x124
+#define SCU_2500_COPRO_SEG_SWAP 0x00000001
+#define SCU_2500_COPRO_CACHE_CTL 0x128
+#define SCU_2500_COPRO_CACHE_EN 0x00000001
+#define SCU_2500_COPRO_SEG0_CACHE_EN 0x00000002
+#define SCU_2500_COPRO_SEG1_CACHE_EN 0x00000004
+#define SCU_2500_COPRO_SEG2_CACHE_EN 0x00000008
+#define SCU_2500_COPRO_SEG3_CACHE_EN 0x00000010
+#define SCU_2500_COPRO_SEG4_CACHE_EN 0x00000020
+#define SCU_2500_COPRO_SEG5_CACHE_EN 0x00000040
+#define SCU_2500_COPRO_SEG6_CACHE_EN 0x00000080
+#define SCU_2500_COPRO_SEG7_CACHE_EN 0x00000100
+#define SCU_2500_COPRO_SEG8_CACHE_EN 0x00000200
+
+#define SCU_2400_COPRO_SEG0 0x104
+#define SCU_2400_COPRO_SEG2 0x108
+#define SCU_2400_COPRO_SEG4 0x10c
+#define SCU_2400_COPRO_SEG6 0x110
+#define SCU_2400_COPRO_SEG8 0x114
+#define SCU_2400_COPRO_SEG_SWAP 0x80000000
+#define SCU_2400_COPRO_CACHE_CTL 0x118
+#define SCU_2400_COPRO_CACHE_EN 0x00000001
+#define SCU_2400_COPRO_SEG0_CACHE_EN 0x00000002
+#define SCU_2400_COPRO_SEG2_CACHE_EN 0x00000004
+#define SCU_2400_COPRO_SEG4_CACHE_EN 0x00000008
+#define SCU_2400_COPRO_SEG6_CACHE_EN 0x00000010
+#define SCU_2400_COPRO_SEG8_CACHE_EN 0x00000020
+
+/* CVIC registers */
+#define CVIC_EN_REG 0x10
+#define CVIC_TRIG_REG 0x18
+
+/*
+ * System register base address (needed for configuring the
+ * coldfire maps)
+ */
+#define SYSREG_BASE 0x1e600000
+
+/* Amount of SRAM required */
+#define SRAM_SIZE 0x1000
+
+#define LAST_ADDR_INVALID 0x1
+
+struct fsi_master_acf {
+ struct fsi_master master;
+ struct device *dev;
+ struct regmap *scu;
+ struct mutex lock; /* mutex for command ordering */
+ struct gpio_desc *gpio_clk;
+ struct gpio_desc *gpio_data;
+ struct gpio_desc *gpio_trans; /* Voltage translator */
+ struct gpio_desc *gpio_enable; /* FSI enable */
+ struct gpio_desc *gpio_mux; /* Mux control */
+ uint16_t gpio_clk_vreg;
+ uint16_t gpio_clk_dreg;
+ uint16_t gpio_dat_vreg;
+ uint16_t gpio_dat_dreg;
+ uint16_t gpio_tra_vreg;
+ uint16_t gpio_tra_dreg;
+ uint8_t gpio_clk_bit;
+ uint8_t gpio_dat_bit;
+ uint8_t gpio_tra_bit;
+ uint32_t cf_mem_addr;
+ size_t cf_mem_size;
+ void __iomem *cf_mem;
+ void __iomem *cvic;
+ struct gen_pool *sram_pool;
+ void __iomem *sram;
+ bool is_ast2500;
+ bool external_mode;
+ bool trace_enabled;
+ uint32_t last_addr;
+ uint8_t t_send_delay;
+ uint8_t t_echo_delay;
+ uint32_t cvic_sw_irq;
+};
+#define to_fsi_master_acf(m) container_of(m, struct fsi_master_acf, master)
+
+struct fsi_msg {
+ uint64_t msg;
+ uint8_t bits;
+};
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi_master_ast_cf.h>
+
+static void msg_push_bits(struct fsi_msg *msg, uint64_t data, int bits)
+{
+ msg->msg <<= bits;
+ msg->msg |= data & ((1ull << bits) - 1);
+ msg->bits += bits;
+}
+
+static void msg_push_crc(struct fsi_msg *msg)
+{
+ uint8_t crc;
+ int top;
+
+ top = msg->bits & 0x3;
+
+ /* start bit, and any non-aligned top bits */
+ crc = crc4(0, 1 << top | msg->msg >> (msg->bits - top), top + 1);
+
+ /* aligned bits */
+ crc = crc4(crc, msg->msg, msg->bits - top);
+
+ msg_push_bits(msg, crc, 4);
+}
+
+static void msg_finish_cmd(struct fsi_msg *cmd)
+{
+ /* Left align message */
+ cmd->msg <<= (64 - cmd->bits);
+}
+
+static bool check_same_address(struct fsi_master_acf *master, int id,
+ uint32_t addr)
+{
+ /* this will also handle LAST_ADDR_INVALID */
+ return master->last_addr == (((id & 0x3) << 21) | (addr & ~0x3));
+}
+
+static bool check_relative_address(struct fsi_master_acf *master, int id,
+ uint32_t addr, uint32_t *rel_addrp)
+{
+ uint32_t last_addr = master->last_addr;
+ int32_t rel_addr;
+
+ if (last_addr == LAST_ADDR_INVALID)
+ return false;
+
+ /* We may be in 23-bit addressing mode, which uses the id as the
+ * top two address bits. So, if we're referencing a different ID,
+ * use absolute addresses.
+ */
+ if (((last_addr >> 21) & 0x3) != id)
+ return false;
+
+ /* remove the top two bits from any 23-bit addressing */
+ last_addr &= (1 << 21) - 1;
+
+ /* We know that the addresses are limited to 21 bits, so this won't
+ * overflow the signed rel_addr */
+ rel_addr = addr - last_addr;
+ if (rel_addr > 255 || rel_addr < -256)
+ return false;
+
+ *rel_addrp = (uint32_t)rel_addr;
+
+ return true;
+}
+
+static void last_address_update(struct fsi_master_acf *master,
+ int id, bool valid, uint32_t addr)
+{
+ if (!valid)
+ master->last_addr = LAST_ADDR_INVALID;
+ else
+ master->last_addr = ((id & 0x3) << 21) | (addr & ~0x3);
+}
+
+/*
+ * Encode an Absolute/Relative/Same Address command
+ */
+static void build_ar_command(struct fsi_master_acf *master,
+ struct fsi_msg *cmd, uint8_t id,
+ uint32_t addr, size_t size,
+ const void *data)
+{
+ int i, addr_bits, opcode_bits;
+ bool write = !!data;
+ uint8_t ds, opcode;
+ uint32_t rel_addr;
+
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ /* we have 21 bits of address max */
+ addr &= ((1 << 21) - 1);
+
+ /* cmd opcodes are variable length - SAME_AR is only two bits */
+ opcode_bits = 3;
+
+ if (check_same_address(master, id, addr)) {
+ /* we still address the byte offset within the word */
+ addr_bits = 2;
+ opcode_bits = 2;
+ opcode = FSI_CMD_SAME_AR;
+ trace_fsi_master_acf_cmd_same_addr(master);
+
+ } else if (check_relative_address(master, id, addr, &rel_addr)) {
+ /* 8 bits plus sign */
+ addr_bits = 9;
+ addr = rel_addr;
+ opcode = FSI_CMD_REL_AR;
+ trace_fsi_master_acf_cmd_rel_addr(master, rel_addr);
+
+ } else {
+ addr_bits = 21;
+ opcode = FSI_CMD_ABS_AR;
+ trace_fsi_master_acf_cmd_abs_addr(master, addr);
+ }
+
+ /*
+ * The read/write size is encoded in the lower bits of the address
+ * (as it must be naturally-aligned), and the following ds bit.
+ *
+ * size addr:1 addr:0 ds
+ * 1 x x 0
+ * 2 x 0 1
+ * 4 0 1 1
+ *
+ */
+ ds = size > 1 ? 1 : 0;
+ addr &= ~(size - 1);
+ if (size == 4)
+ addr |= 1;
+
+ msg_push_bits(cmd, id, 2);
+ msg_push_bits(cmd, opcode, opcode_bits);
+ msg_push_bits(cmd, write ? 0 : 1, 1);
+ msg_push_bits(cmd, addr, addr_bits);
+ msg_push_bits(cmd, ds, 1);
+ for (i = 0; write && i < size; i++)
+ msg_push_bits(cmd, ((uint8_t *)data)[i], 8);
+
+ msg_push_crc(cmd);
+ msg_finish_cmd(cmd);
+}
+
+static void build_dpoll_command(struct fsi_msg *cmd, uint8_t slave_id)
+{
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_CMD_DPOLL, 3);
+ msg_push_crc(cmd);
+ msg_finish_cmd(cmd);
+}
+
+static void build_epoll_command(struct fsi_msg *cmd, uint8_t slave_id)
+{
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_CMD_EPOLL, 3);
+ msg_push_crc(cmd);
+ msg_finish_cmd(cmd);
+}
+
+static void build_term_command(struct fsi_msg *cmd, uint8_t slave_id)
+{
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_CMD_TERM, 6);
+ msg_push_crc(cmd);
+ msg_finish_cmd(cmd);
+}
+
+static int do_copro_command(struct fsi_master_acf *master, uint32_t op)
+{
+ uint32_t timeout = 10000000;
+ uint8_t stat;
+
+ trace_fsi_master_acf_copro_command(master, op);
+
+ /* Send command */
+ iowrite32be(op, master->sram + CMD_STAT_REG);
+
+ /* Ring doorbell if any */
+ if (master->cvic)
+ iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
+
+ /* Wait for status to indicate completion (or error) */
+ do {
+ if (timeout-- == 0) {
+ dev_warn(master->dev,
+ "Timeout waiting for coprocessor completion\n");
+ return -ETIMEDOUT;
+ }
+ stat = ioread8(master->sram + CMD_STAT_REG);
+ } while(stat < STAT_COMPLETE || stat == 0xff);
+
+ if (stat == STAT_COMPLETE)
+ return 0;
+ switch(stat) {
+ case STAT_ERR_INVAL_CMD:
+ return -EINVAL;
+ case STAT_ERR_INVAL_IRQ:
+ return -EIO;
+ case STAT_ERR_MTOE:
+ return -ESHUTDOWN;
+ }
+ return -ENXIO;
+}
+
+static int clock_zeros(struct fsi_master_acf *master, int count)
+{
+ while (count) {
+ int rc, lcnt = min(count, 255);
+
+ rc = do_copro_command(master,
+ CMD_IDLE_CLOCKS | (lcnt << CMD_REG_CLEN_SHIFT));
+ if (rc)
+ return rc;
+ count -= lcnt;
+ }
+ return 0;
+}
+
+static int send_request(struct fsi_master_acf *master, struct fsi_msg *cmd,
+ unsigned int resp_bits)
+{
+ uint32_t op;
+
+ trace_fsi_master_acf_send_request(master, cmd, resp_bits);
+
+ /* Store message into SRAM */
+ iowrite32be((cmd->msg >> 32), master->sram + CMD_DATA);
+ iowrite32be((cmd->msg & 0xffffffff), master->sram + CMD_DATA + 4);
+
+ op = CMD_COMMAND;
+ op |= cmd->bits << CMD_REG_CLEN_SHIFT;
+ if (resp_bits)
+ op |= resp_bits << CMD_REG_RLEN_SHIFT;
+
+ return do_copro_command(master, op);
+}
+
+static int read_copro_response(struct fsi_master_acf *master, uint8_t size,
+ uint32_t *response, u8 *tag)
+{
+ uint8_t rtag = ioread8(master->sram + STAT_RTAG) & 0xf;
+ uint8_t rcrc = ioread8(master->sram + STAT_RCRC) & 0xf;
+ uint32_t rdata = 0;
+ uint32_t crc;
+ uint8_t ack;
+
+ *tag = ack = rtag & 3;
+
+ /* we have a whole message now; check CRC */
+ crc = crc4(0, 1, 1);
+ crc = crc4(crc, rtag, 4);
+ if (ack == FSI_RESP_ACK && size) {
+ rdata = ioread32be(master->sram + RSP_DATA);
+ crc = crc4(crc, rdata, size);
+ if (response)
+ *response = rdata;
+ }
+ crc = crc4(crc, rcrc, 4);
+
+ trace_fsi_master_acf_copro_response(master, rtag, rcrc, rdata, crc == 0);
+
+ if (crc) {
+ /*
+ * Check if it's all 1's or all 0's, that probably means
+ * the host is off
+ */
+ if ((rtag == 0xf && rcrc == 0xf) || (rtag == 0 && rcrc == 0))
+ return -ENODEV;
+ dev_dbg(master->dev, "Bad response CRC !\n");
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int send_term(struct fsi_master_acf *master, uint8_t slave)
+{
+ struct fsi_msg cmd;
+ uint8_t tag;
+ int rc;
+
+ build_term_command(&cmd, slave);
+
+ rc = send_request(master, &cmd, 0);
+ if (rc) {
+ dev_warn(master->dev, "Error %d sending term\n", rc);
+ return rc;
+ }
+
+ rc = read_copro_response(master, 0, NULL, &tag);
+ if (rc < 0) {
+ dev_err(master->dev,
+ "TERM failed; lost communication with slave\n");
+ return -EIO;
+ } else if (tag != FSI_RESP_ACK) {
+ dev_err(master->dev, "TERM failed; response %d\n", tag);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void dump_ucode_trace(struct fsi_master_acf *master)
+{
+ char trbuf[52];
+ char *p;
+ int i;
+
+ dev_dbg(master->dev,
+ "CMDSTAT:%08x RTAG=%02x RCRC=%02x RDATA=%02x #INT=%08x\n",
+ ioread32be(master->sram + CMD_STAT_REG),
+ ioread8(master->sram + STAT_RTAG),
+ ioread8(master->sram + STAT_RCRC),
+ ioread32be(master->sram + RSP_DATA),
+ ioread32be(master->sram + INT_CNT));
+
+ for (i = 0; i < 512; i++) {
+ uint8_t v;
+ if ((i % 16) == 0)
+ p = trbuf;
+ v = ioread8(master->sram + TRACEBUF + i);
+ p += sprintf(p, "%02x ", v);
+ if (((i % 16) == 15) || v == TR_END)
+ dev_dbg(master->dev, "%s\n", trbuf);
+ if (v == TR_END)
+ break;
+ }
+}
+
+static int handle_response(struct fsi_master_acf *master,
+ uint8_t slave, uint8_t size, void *data)
+{
+ int busy_count = 0, rc;
+ int crc_err_retries = 0;
+ struct fsi_msg cmd;
+ uint32_t response;
+ uint8_t tag;
+retry:
+ rc = read_copro_response(master, size, &response, &tag);
+
+ /* Handle retries on CRC errors */
+ if (rc == -EAGAIN) {
+ /* Too many retries ? */
+ if (crc_err_retries++ > FSI_CRC_ERR_RETRIES) {
+ /*
+ * Pass it up as a -EIO otherwise upper level will retry
+ * the whole command which isn't what we want here.
+ */
+ rc = -EIO;
+ goto bail;
+ }
+ trace_fsi_master_acf_crc_rsp_error(master, crc_err_retries);
+ if (master->trace_enabled)
+ dump_ucode_trace(master);
+ rc = clock_zeros(master, FSI_MASTER_EPOLL_CLOCKS);
+ if (rc) {
+ dev_warn(master->dev,
+ "Error %d clocking zeros for E_POLL\n", rc);
+ return rc;
+ }
+ build_epoll_command(&cmd, slave);
+ rc = send_request(master, &cmd, size);
+ if (rc) {
+ dev_warn(master->dev, "Error %d sending E_POLL\n", rc);
+ return -EIO;
+ }
+ goto retry;
+ }
+ if (rc)
+ return rc;
+
+ switch (tag) {
+ case FSI_RESP_ACK:
+ if (size && data) {
+ if (size == 32)
+ *(__be32 *)data = cpu_to_be32(response);
+ else if (size == 16)
+ *(__be16 *)data = cpu_to_be16(response);
+ else
+ *(u8 *)data = response;
+ }
+ break;
+ case FSI_RESP_BUSY:
+ /*
+ * Its necessary to clock slave before issuing
+ * d-poll, not indicated in the hardware protocol
+ * spec. < 20 clocks causes slave to hang, 21 ok.
+ */
+ dev_dbg(master->dev, "Busy, retrying...\n");
+ if (master->trace_enabled)
+ dump_ucode_trace(master);
+ rc = clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
+ if (rc) {
+ dev_warn(master->dev,
+ "Error %d clocking zeros for D_POLL\n", rc);
+ break;
+ }
+ if (busy_count++ < FSI_MASTER_MAX_BUSY) {
+ build_dpoll_command(&cmd, slave);
+ rc = send_request(master, &cmd, size);
+ if (rc) {
+ dev_warn(master->dev, "Error %d sending D_POLL\n", rc);
+ break;
+ }
+ goto retry;
+ }
+ dev_dbg(master->dev,
+ "ERR slave is stuck in busy state, issuing TERM\n");
+ send_term(master, slave);
+ rc = -EIO;
+ break;
+
+ case FSI_RESP_ERRA:
+ dev_dbg(master->dev, "ERRA received\n");
+ if (master->trace_enabled)
+ dump_ucode_trace(master);
+ rc = -EIO;
+ break;
+ case FSI_RESP_ERRC:
+ dev_dbg(master->dev, "ERRC received\n");
+ if (master->trace_enabled)
+ dump_ucode_trace(master);
+ rc = -EAGAIN;
+ break;
+ }
+ bail:
+ if (busy_count > 0) {
+ trace_fsi_master_acf_poll_response_busy(master, busy_count);
+ }
+
+ return rc;
+}
+
+static int fsi_master_acf_xfer(struct fsi_master_acf *master, uint8_t slave,
+ struct fsi_msg *cmd, size_t resp_len, void *resp)
+{
+ int rc = -EAGAIN, retries = 0;
+
+ resp_len <<= 3;
+ while ((retries++) < FSI_CRC_ERR_RETRIES) {
+ rc = send_request(master, cmd, resp_len);
+ if (rc) {
+ if (rc != -ESHUTDOWN)
+ dev_warn(master->dev, "Error %d sending command\n", rc);
+ break;
+ }
+ rc = handle_response(master, slave, resp_len, resp);
+ if (rc != -EAGAIN)
+ break;
+ rc = -EIO;
+ dev_dbg(master->dev, "ECRC retry %d\n", retries);
+
+ /* Pace it a bit before retry */
+ msleep(1);
+ }
+
+ return rc;
+}
+
+static int fsi_master_acf_read(struct fsi_master *_master, int link,
+ uint8_t id, uint32_t addr, void *val,
+ size_t size)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ struct fsi_msg cmd;
+ int rc;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ dev_dbg(master->dev, "read id %d addr %x size %zd\n", id, addr, size);
+ build_ar_command(master, &cmd, id, addr, size, NULL);
+ rc = fsi_master_acf_xfer(master, id, &cmd, size, val);
+ last_address_update(master, id, rc == 0, addr);
+ if (rc)
+ dev_dbg(master->dev, "read id %d addr 0x%08x err: %d\n",
+ id, addr, rc);
+ mutex_unlock(&master->lock);
+
+ return rc;
+}
+
+static int fsi_master_acf_write(struct fsi_master *_master, int link,
+ uint8_t id, uint32_t addr, const void *val,
+ size_t size)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ struct fsi_msg cmd;
+ int rc;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ build_ar_command(master, &cmd, id, addr, size, val);
+ dev_dbg(master->dev, "write id %d addr %x size %zd raw_data: %08x\n",
+ id, addr, size, *(uint32_t *)val);
+ rc = fsi_master_acf_xfer(master, id, &cmd, 0, NULL);
+ last_address_update(master, id, rc == 0, addr);
+ if (rc)
+ dev_dbg(master->dev, "write id %d addr 0x%08x err: %d\n",
+ id, addr, rc);
+ mutex_unlock(&master->lock);
+
+ return rc;
+}
+
+static int fsi_master_acf_term(struct fsi_master *_master,
+ int link, uint8_t id)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ struct fsi_msg cmd;
+ int rc;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ build_term_command(&cmd, id);
+ dev_dbg(master->dev, "term id %d\n", id);
+ rc = fsi_master_acf_xfer(master, id, &cmd, 0, NULL);
+ last_address_update(master, id, false, 0);
+ mutex_unlock(&master->lock);
+
+ return rc;
+}
+
+static int fsi_master_acf_break(struct fsi_master *_master, int link)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ int rc;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ if (master->external_mode) {
+ mutex_unlock(&master->lock);
+ return -EBUSY;
+ }
+ dev_dbg(master->dev, "sending BREAK\n");
+ rc = do_copro_command(master, CMD_BREAK);
+ last_address_update(master, 0, false, 0);
+ mutex_unlock(&master->lock);
+
+ /* Wait for logic reset to take effect */
+ udelay(200);
+
+ return rc;
+}
+
+static void reset_cf(struct fsi_master_acf *master)
+{
+ regmap_write(master->scu, SCU_COPRO_CTRL, SCU_COPRO_RESET);
+ usleep_range(20,20);
+ regmap_write(master->scu, SCU_COPRO_CTRL, 0);
+ usleep_range(20,20);
+}
+
+static void start_cf(struct fsi_master_acf *master)
+{
+ regmap_write(master->scu, SCU_COPRO_CTRL, SCU_COPRO_CLK_EN);
+}
+
+static void setup_ast2500_cf_maps(struct fsi_master_acf *master)
+{
+ /*
+ * Note about byteswap setting: the bus is wired backwards,
+ * so setting the byteswap bit actually makes the ColdFire
+ * work "normally" for a BE processor, ie, put the MSB in
+ * the lowest address byte.
+ *
+ * We thus need to set the bit for our main memory which
+ * contains our program code. We create two mappings for
+ * the register, one with each setting.
+ *
+ * Segments 2 and 3 has a "swapped" mapping (BE)
+ * and 6 and 7 have a non-swapped mapping (LE) which allows
+ * us to avoid byteswapping register accesses since the
+ * registers are all LE.
+ */
+
+ /* Setup segment 0 to our memory region */
+ regmap_write(master->scu, SCU_2500_COPRO_SEG0, master->cf_mem_addr |
+ SCU_2500_COPRO_SEG_SWAP);
+
+ /* Segments 2 and 3 to sysregs with byteswap (for SRAM) */
+ regmap_write(master->scu, SCU_2500_COPRO_SEG2, SYSREG_BASE |
+ SCU_2500_COPRO_SEG_SWAP);
+ regmap_write(master->scu, SCU_2500_COPRO_SEG3, SYSREG_BASE | 0x100000 |
+ SCU_2500_COPRO_SEG_SWAP);
+
+ /* And segment 6 and 7 to sysregs no byteswap */
+ regmap_write(master->scu, SCU_2500_COPRO_SEG6, SYSREG_BASE);
+ regmap_write(master->scu, SCU_2500_COPRO_SEG7, SYSREG_BASE | 0x100000);
+
+ /* Memory cachable, regs and SRAM not cachable */
+ regmap_write(master->scu, SCU_2500_COPRO_CACHE_CTL,
+ SCU_2500_COPRO_SEG0_CACHE_EN | SCU_2500_COPRO_CACHE_EN);
+}
+
+static void setup_ast2400_cf_maps(struct fsi_master_acf *master)
+{
+ /* Setup segment 0 to our memory region */
+ regmap_write(master->scu, SCU_2400_COPRO_SEG0, master->cf_mem_addr |
+ SCU_2400_COPRO_SEG_SWAP);
+
+ /* Segments 2 to sysregs with byteswap (for SRAM) */
+ regmap_write(master->scu, SCU_2400_COPRO_SEG2, SYSREG_BASE |
+ SCU_2400_COPRO_SEG_SWAP);
+
+ /* And segment 6 to sysregs no byteswap */
+ regmap_write(master->scu, SCU_2400_COPRO_SEG6, SYSREG_BASE);
+
+ /* Memory cachable, regs and SRAM not cachable */
+ regmap_write(master->scu, SCU_2400_COPRO_CACHE_CTL,
+ SCU_2400_COPRO_SEG0_CACHE_EN | SCU_2400_COPRO_CACHE_EN);
+}
+
+static void setup_common_fw_config(struct fsi_master_acf *master,
+ void __iomem *base)
+{
+ iowrite16be(master->gpio_clk_vreg, base + HDR_CLOCK_GPIO_VADDR);
+ iowrite16be(master->gpio_clk_dreg, base + HDR_CLOCK_GPIO_DADDR);
+ iowrite16be(master->gpio_dat_vreg, base + HDR_DATA_GPIO_VADDR);
+ iowrite16be(master->gpio_dat_dreg, base + HDR_DATA_GPIO_DADDR);
+ iowrite16be(master->gpio_tra_vreg, base + HDR_TRANS_GPIO_VADDR);
+ iowrite16be(master->gpio_tra_dreg, base + HDR_TRANS_GPIO_DADDR);
+ iowrite8(master->gpio_clk_bit, base + HDR_CLOCK_GPIO_BIT);
+ iowrite8(master->gpio_dat_bit, base + HDR_DATA_GPIO_BIT);
+ iowrite8(master->gpio_tra_bit, base + HDR_TRANS_GPIO_BIT);
+}
+
+static void setup_ast2500_fw_config(struct fsi_master_acf *master)
+{
+ void __iomem *base = master->cf_mem + HDR_OFFSET;
+
+ setup_common_fw_config(master, base);
+ iowrite32be(FW_CONTROL_USE_STOP, base + HDR_FW_CONTROL);
+}
+
+static void setup_ast2400_fw_config(struct fsi_master_acf *master)
+{
+ void __iomem *base = master->cf_mem + HDR_OFFSET;
+
+ setup_common_fw_config(master, base);
+ iowrite32be(FW_CONTROL_CONT_CLOCK|FW_CONTROL_DUMMY_RD, base + HDR_FW_CONTROL);
+}
+
+static int setup_gpios_for_copro(struct fsi_master_acf *master)
+{
+
+ int rc;
+
+ /* This aren't under ColdFire control, just set them up appropriately */
+ gpiod_direction_output(master->gpio_mux, 1);
+ gpiod_direction_output(master->gpio_enable, 1);
+
+ /* Those are under ColdFire control, let it configure them */
+ rc = aspeed_gpio_copro_grab_gpio(master->gpio_clk, &master->gpio_clk_vreg,
+ &master->gpio_clk_dreg, &master->gpio_clk_bit);
+ if (rc) {
+ dev_err(master->dev, "failed to assign clock gpio to coprocessor\n");
+ return rc;
+ }
+ rc = aspeed_gpio_copro_grab_gpio(master->gpio_data, &master->gpio_dat_vreg,
+ &master->gpio_dat_dreg, &master->gpio_dat_bit);
+ if (rc) {
+ dev_err(master->dev, "failed to assign data gpio to coprocessor\n");
+ aspeed_gpio_copro_release_gpio(master->gpio_clk);
+ return rc;
+ }
+ rc = aspeed_gpio_copro_grab_gpio(master->gpio_trans, &master->gpio_tra_vreg,
+ &master->gpio_tra_dreg, &master->gpio_tra_bit);
+ if (rc) {
+ dev_err(master->dev, "failed to assign trans gpio to coprocessor\n");
+ aspeed_gpio_copro_release_gpio(master->gpio_clk);
+ aspeed_gpio_copro_release_gpio(master->gpio_data);
+ return rc;
+ }
+ return 0;
+}
+
+static void release_copro_gpios(struct fsi_master_acf *master)
+{
+ aspeed_gpio_copro_release_gpio(master->gpio_clk);
+ aspeed_gpio_copro_release_gpio(master->gpio_data);
+ aspeed_gpio_copro_release_gpio(master->gpio_trans);
+}
+
+static int load_copro_firmware(struct fsi_master_acf *master)
+{
+ const struct firmware *fw;
+ uint16_t sig = 0, wanted_sig;
+ const u8 *data;
+ size_t size = 0;
+ int rc;
+
+ /* Get the binary */
+ rc = request_firmware(&fw, FW_FILE_NAME, master->dev);
+ if (rc) {
+ dev_err(
+ master->dev, "Error %d to load firwmare '%s' !\n",
+ rc, FW_FILE_NAME);
+ return rc;
+ }
+
+ /* Which image do we want ? (shared vs. split clock/data GPIOs) */
+ if (master->gpio_clk_vreg == master->gpio_dat_vreg)
+ wanted_sig = SYS_SIG_SHARED;
+ else
+ wanted_sig = SYS_SIG_SPLIT;
+ dev_dbg(master->dev, "Looking for image sig %04x\n", wanted_sig);
+
+ /* Try to find it */
+ for (data = fw->data; data < (fw->data + fw->size);) {
+ sig = be16_to_cpup((__be16 *)(data + HDR_OFFSET + HDR_SYS_SIG));
+ size = be32_to_cpup((__be32 *)(data + HDR_OFFSET + HDR_FW_SIZE));
+ if (sig == wanted_sig)
+ break;
+ data += size;
+ }
+ if (sig != wanted_sig) {
+ dev_err(master->dev, "Failed to locate image sig %04x in FW blob\n",
+ wanted_sig);
+ rc = -ENODEV;
+ goto release_fw;
+ }
+ if (size > master->cf_mem_size) {
+ dev_err(master->dev, "FW size (%zd) bigger than memory reserve (%zd)\n",
+ fw->size, master->cf_mem_size);
+ rc = -ENOMEM;
+ } else {
+ memcpy_toio(master->cf_mem, data, size);
+ }
+
+release_fw:
+ release_firmware(fw);
+ return rc;
+}
+
+static int check_firmware_image(struct fsi_master_acf *master)
+{
+ uint32_t fw_vers, fw_api, fw_options;
+
+ fw_vers = ioread16be(master->cf_mem + HDR_OFFSET + HDR_FW_VERS);
+ fw_api = ioread16be(master->cf_mem + HDR_OFFSET + HDR_API_VERS);
+ fw_options = ioread32be(master->cf_mem + HDR_OFFSET + HDR_FW_OPTIONS);
+ master->trace_enabled = !!(fw_options & FW_OPTION_TRACE_EN);
+
+ /* Check version and signature */
+ dev_info(master->dev, "ColdFire initialized, firmware v%d API v%d.%d (trace %s)\n",
+ fw_vers, fw_api >> 8, fw_api & 0xff,
+ master->trace_enabled ? "enabled" : "disabled");
+
+ if ((fw_api >> 8) != API_VERSION_MAJ) {
+ dev_err(master->dev, "Unsupported coprocessor API version !\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int copro_enable_sw_irq(struct fsi_master_acf *master)
+{
+ int timeout;
+ uint32_t val;
+
+ /*
+ * Enable coprocessor interrupt input. I've had problems getting the
+ * value to stick, so try in a loop
+ */
+ for (timeout = 0; timeout < 10; timeout++) {
+ iowrite32(0x2, master->cvic + CVIC_EN_REG);
+ val = ioread32(master->cvic + CVIC_EN_REG);
+ if (val & 2)
+ break;
+ msleep(1);
+ }
+ if (!(val & 2)) {
+ dev_err(master->dev, "Failed to enable coprocessor interrupt !\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int fsi_master_acf_setup(struct fsi_master_acf *master)
+{
+ int timeout, rc;
+ uint32_t val;
+
+ /* Make sure the ColdFire is stopped */
+ reset_cf(master);
+
+ /*
+ * Clear SRAM. This needs to happen before we setup the GPIOs
+ * as we might start trying to arbitrate as soon as that happens.
+ */
+ memset_io(master->sram, 0, SRAM_SIZE);
+
+ /* Configure GPIOs */
+ rc = setup_gpios_for_copro(master);
+ if (rc)
+ return rc;
+
+ /* Load the firmware into the reserved memory */
+ rc = load_copro_firmware(master);
+ if (rc)
+ return rc;
+
+ /* Read signature and check versions */
+ rc = check_firmware_image(master);
+ if (rc)
+ return rc;
+
+ /* Setup coldfire memory map */
+ if (master->is_ast2500) {
+ setup_ast2500_cf_maps(master);
+ setup_ast2500_fw_config(master);
+ } else {
+ setup_ast2400_cf_maps(master);
+ setup_ast2400_fw_config(master);
+ }
+
+ /* Start the ColdFire */
+ start_cf(master);
+
+ /* Wait for status register to indicate command completion
+ * which signals the initialization is complete
+ */
+ for (timeout = 0; timeout < 10; timeout++) {
+ val = ioread8(master->sram + CF_STARTED);
+ if (val)
+ break;
+ msleep(1);
+ }
+ if (!val) {
+ dev_err(master->dev, "Coprocessor startup timeout !\n");
+ rc = -ENODEV;
+ goto err;
+ }
+
+ /* Configure echo & send delay */
+ iowrite8(master->t_send_delay, master->sram + SEND_DLY_REG);
+ iowrite8(master->t_echo_delay, master->sram + ECHO_DLY_REG);
+
+ /* Enable SW interrupt to copro if any */
+ if (master->cvic) {
+ rc = copro_enable_sw_irq(master);
+ if (rc)
+ goto err;
+ }
+ return 0;
+ err:
+ /* An error occurred, don't leave the coprocessor running */
+ reset_cf(master);
+
+ /* Release the GPIOs */
+ release_copro_gpios(master);
+
+ return rc;
+}
+
+
+static void fsi_master_acf_terminate(struct fsi_master_acf *master)
+{
+ unsigned long flags;
+
+ /*
+ * A GPIO arbitration requestion could come in while this is
+ * happening. To avoid problems, we disable interrupts so it
+ * cannot preempt us on this CPU
+ */
+
+ local_irq_save(flags);
+
+ /* Stop the coprocessor */
+ reset_cf(master);
+
+ /* We mark the copro not-started */
+ iowrite32(0, master->sram + CF_STARTED);
+
+ /* We mark the ARB register as having given up arbitration to
+ * deal with a potential race with the arbitration request
+ */
+ iowrite8(ARB_ARM_ACK, master->sram + ARB_REG);
+
+ local_irq_restore(flags);
+
+ /* Return the GPIOs to the ARM */
+ release_copro_gpios(master);
+}
+
+static void fsi_master_acf_setup_external(struct fsi_master_acf *master)
+{
+ /* Setup GPIOs for external FSI master (FSP box) */
+ gpiod_direction_output(master->gpio_mux, 0);
+ gpiod_direction_output(master->gpio_trans, 0);
+ gpiod_direction_output(master->gpio_enable, 1);
+ gpiod_direction_input(master->gpio_clk);
+ gpiod_direction_input(master->gpio_data);
+}
+
+static int fsi_master_acf_link_enable(struct fsi_master *_master, int link)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ int rc = -EBUSY;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ if (!master->external_mode) {
+ gpiod_set_value(master->gpio_enable, 1);
+ rc = 0;
+ }
+ mutex_unlock(&master->lock);
+
+ return rc;
+}
+
+static int fsi_master_acf_link_config(struct fsi_master *_master, int link,
+ u8 t_send_delay, u8 t_echo_delay)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ master->t_send_delay = t_send_delay;
+ master->t_echo_delay = t_echo_delay;
+ dev_dbg(master->dev, "Changing delays: send=%d echo=%d\n",
+ t_send_delay, t_echo_delay);
+ iowrite8(master->t_send_delay, master->sram + SEND_DLY_REG);
+ iowrite8(master->t_echo_delay, master->sram + ECHO_DLY_REG);
+ mutex_unlock(&master->lock);
+
+ return 0;
+}
+
+static ssize_t external_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fsi_master_acf *master = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%u\n",
+ master->external_mode ? 1 : 0);
+}
+
+static ssize_t external_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fsi_master_acf *master = dev_get_drvdata(dev);
+ unsigned long val;
+ bool external_mode;
+ int err;
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
+
+ external_mode = !!val;
+
+ mutex_lock(&master->lock);
+
+ if (external_mode == master->external_mode) {
+ mutex_unlock(&master->lock);
+ return count;
+ }
+
+ master->external_mode = external_mode;
+ if (master->external_mode) {
+ fsi_master_acf_terminate(master);
+ fsi_master_acf_setup_external(master);
+ } else
+ fsi_master_acf_setup(master);
+
+ mutex_unlock(&master->lock);
+
+ fsi_master_rescan(&master->master);
+
+ return count;
+}
+
+static DEVICE_ATTR(external_mode, 0664,
+ external_mode_show, external_mode_store);
+
+static int fsi_master_acf_gpio_request(void *data)
+{
+ struct fsi_master_acf *master = data;
+ int timeout;
+ u8 val;
+
+ /* Note: This doesn't require holding out mutex */
+
+ /* Write reqest */
+ iowrite8(ARB_ARM_REQ, master->sram + ARB_REG);
+
+ /*
+ * There is a race (which does happen at boot time) when we get an
+ * arbitration request as we are either about to or just starting
+ * the coprocessor.
+ *
+ * To handle it, we first check if we are running. If not yet we
+ * check whether the copro is started in the SCU.
+ *
+ * If it's not started, we can basically just assume we have arbitration
+ * and return. Otherwise, we wait normally expecting for the arbitration
+ * to eventually complete.
+ */
+ if (ioread32(master->sram + CF_STARTED) == 0) {
+ unsigned int reg = 0;
+
+ regmap_read(master->scu, SCU_COPRO_CTRL, &reg);
+ if (!(reg & SCU_COPRO_CLK_EN))
+ return 0;
+ }
+
+ /* Ring doorbell if any */
+ if (master->cvic)
+ iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
+
+ for (timeout = 0; timeout < 10000; timeout++) {
+ val = ioread8(master->sram + ARB_REG);
+ if (val != ARB_ARM_REQ)
+ break;
+ udelay(1);
+ }
+
+ /* If it failed, override anyway */
+ if (val != ARB_ARM_ACK)
+ dev_warn(master->dev, "GPIO request arbitration timeout\n");
+
+ return 0;
+}
+
+static int fsi_master_acf_gpio_release(void *data)
+{
+ struct fsi_master_acf *master = data;
+
+ /* Write release */
+ iowrite8(0, master->sram + ARB_REG);
+
+ /* Ring doorbell if any */
+ if (master->cvic)
+ iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
+
+ return 0;
+}
+
+static void fsi_master_acf_release(struct device *dev)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(dev_to_fsi_master(dev));
+
+ /* Cleanup, stop coprocessor */
+ mutex_lock(&master->lock);
+ fsi_master_acf_terminate(master);
+ aspeed_gpio_copro_set_ops(NULL, NULL);
+ mutex_unlock(&master->lock);
+
+ /* Free resources */
+ gen_pool_free(master->sram_pool, (unsigned long)master->sram, SRAM_SIZE);
+ of_node_put(dev_of_node(master->dev));
+
+ kfree(master);
+}
+
+static const struct aspeed_gpio_copro_ops fsi_master_acf_gpio_ops = {
+ .request_access = fsi_master_acf_gpio_request,
+ .release_access = fsi_master_acf_gpio_release,
+};
+
+static int fsi_master_acf_probe(struct platform_device *pdev)
+{
+ struct device_node *np, *mnode = dev_of_node(&pdev->dev);
+ struct genpool_data_fixed gpdf;
+ struct fsi_master_acf *master;
+ struct gpio_desc *gpio;
+ struct resource res;
+ uint32_t cf_mem_align;
+ int rc;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->dev = &pdev->dev;
+ master->master.dev.parent = master->dev;
+ master->last_addr = LAST_ADDR_INVALID;
+
+ /* AST2400 vs. AST2500 */
+ master->is_ast2500 = of_device_is_compatible(mnode, "aspeed,ast2500-cf-fsi-master");
+
+ /* Grab the SCU, we'll need to access it to configure the coprocessor */
+ if (master->is_ast2500)
+ master->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
+ else
+ master->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2400-scu");
+ if (IS_ERR(master->scu)) {
+ dev_err(&pdev->dev, "failed to find SCU regmap\n");
+ rc = PTR_ERR(master->scu);
+ goto err_free;
+ }
+
+ /* Grab all the GPIOs we need */
+ gpio = devm_gpiod_get(&pdev->dev, "clock", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get clock gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_clk = gpio;
+
+ gpio = devm_gpiod_get(&pdev->dev, "data", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get data gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_data = gpio;
+
+ /* Optional GPIOs */
+ gpio = devm_gpiod_get_optional(&pdev->dev, "trans", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get trans gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_trans = gpio;
+
+ gpio = devm_gpiod_get_optional(&pdev->dev, "enable", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get enable gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_enable = gpio;
+
+ gpio = devm_gpiod_get_optional(&pdev->dev, "mux", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get mux gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_mux = gpio;
+
+ /* Grab the reserved memory region (use DMA API instead ?) */
+ np = of_parse_phandle(mnode, "memory-region", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "Didn't find reserved memory\n");
+ rc = -EINVAL;
+ goto err_free;
+ }
+ rc = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (rc) {
+ dev_err(&pdev->dev, "Couldn't address to resource for reserved memory\n");
+ rc = -ENOMEM;
+ goto err_free;
+ }
+ master->cf_mem_size = resource_size(&res);
+ master->cf_mem_addr = (uint32_t)res.start;
+ cf_mem_align = master->is_ast2500 ? 0x00100000 : 0x00200000;
+ if (master->cf_mem_addr & (cf_mem_align - 1)) {
+ dev_err(&pdev->dev, "Reserved memory has insufficient alignment\n");
+ rc = -ENOMEM;
+ goto err_free;
+ }
+ master->cf_mem = devm_ioremap_resource(&pdev->dev, &res);
+ if (IS_ERR(master->cf_mem)) {
+ rc = PTR_ERR(master->cf_mem);
+ dev_err(&pdev->dev, "Error %d mapping coldfire memory\n", rc);
+ goto err_free;
+ }
+ dev_dbg(&pdev->dev, "DRAM allocation @%x\n", master->cf_mem_addr);
+
+ /* AST2500 has a SW interrupt to the coprocessor */
+ if (master->is_ast2500) {
+ /* Grab the CVIC (ColdFire interrupts controller) */
+ np = of_parse_phandle(mnode, "aspeed,cvic", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "Didn't find CVIC\n");
+ rc = -EINVAL;
+ goto err_free;
+ }
+ master->cvic = devm_of_iomap(&pdev->dev, np, 0, NULL);
+ if (IS_ERR(master->cvic)) {
+ rc = PTR_ERR(master->cvic);
+ dev_err(&pdev->dev, "Error %d mapping CVIC\n", rc);
+ goto err_free;
+ }
+ rc = of_property_read_u32(np, "copro-sw-interrupts",
+ &master->cvic_sw_irq);
+ if (rc) {
+ dev_err(&pdev->dev, "Can't find coprocessor SW interrupt\n");
+ goto err_free;
+ }
+ }
+
+ /* Grab the SRAM */
+ master->sram_pool = of_gen_pool_get(dev_of_node(&pdev->dev), "aspeed,sram", 0);
+ if (!master->sram_pool) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "Can't find sram pool\n");
+ goto err_free;
+ }
+
+ /* Current microcode only deals with fixed location in SRAM */
+ gpdf.offset = 0;
+ master->sram = (void __iomem *)gen_pool_alloc_algo(master->sram_pool, SRAM_SIZE,
+ gen_pool_fixed_alloc, &gpdf);
+ if (!master->sram) {
+ rc = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate sram from pool\n");
+ goto err_free;
+ }
+ dev_dbg(&pdev->dev, "SRAM allocation @%lx\n",
+ (unsigned long)gen_pool_virt_to_phys(master->sram_pool,
+ (unsigned long)master->sram));
+
+ /*
+ * Hookup with the GPIO driver for arbitration of GPIO banks
+ * ownership.
+ */
+ aspeed_gpio_copro_set_ops(&fsi_master_acf_gpio_ops, master);
+
+ /* Default FSI command delays */
+ master->t_send_delay = FSI_SEND_DELAY_CLOCKS;
+ master->t_echo_delay = FSI_ECHO_DELAY_CLOCKS;
+ master->master.n_links = 1;
+ if (master->is_ast2500)
+ master->master.flags = FSI_MASTER_FLAG_SWCLOCK;
+ master->master.read = fsi_master_acf_read;
+ master->master.write = fsi_master_acf_write;
+ master->master.term = fsi_master_acf_term;
+ master->master.send_break = fsi_master_acf_break;
+ master->master.link_enable = fsi_master_acf_link_enable;
+ master->master.link_config = fsi_master_acf_link_config;
+ master->master.dev.of_node = of_node_get(dev_of_node(master->dev));
+ master->master.dev.release = fsi_master_acf_release;
+ platform_set_drvdata(pdev, master);
+ mutex_init(&master->lock);
+
+ mutex_lock(&master->lock);
+ rc = fsi_master_acf_setup(master);
+ mutex_unlock(&master->lock);
+ if (rc)
+ goto release_of_dev;
+
+ rc = device_create_file(&pdev->dev, &dev_attr_external_mode);
+ if (rc)
+ goto stop_copro;
+
+ rc = fsi_master_register(&master->master);
+ if (!rc)
+ return 0;
+
+ device_remove_file(master->dev, &dev_attr_external_mode);
+ put_device(&master->master.dev);
+ return rc;
+
+ stop_copro:
+ fsi_master_acf_terminate(master);
+ release_of_dev:
+ aspeed_gpio_copro_set_ops(NULL, NULL);
+ gen_pool_free(master->sram_pool, (unsigned long)master->sram, SRAM_SIZE);
+ of_node_put(dev_of_node(master->dev));
+ err_free:
+ kfree(master);
+ return rc;
+}
+
+
+static int fsi_master_acf_remove(struct platform_device *pdev)
+{
+ struct fsi_master_acf *master = platform_get_drvdata(pdev);
+
+ device_remove_file(master->dev, &dev_attr_external_mode);
+
+ fsi_master_unregister(&master->master);
+
+ return 0;
+}
+
+static const struct of_device_id fsi_master_acf_match[] = {
+ { .compatible = "aspeed,ast2400-cf-fsi-master" },
+ { .compatible = "aspeed,ast2500-cf-fsi-master" },
+ { },
+};
+
+static struct platform_driver fsi_master_acf = {
+ .driver = {
+ .name = "fsi-master-acf",
+ .of_match_table = fsi_master_acf_match,
+ },
+ .probe = fsi_master_acf_probe,
+ .remove = fsi_master_acf_remove,
+};
+
+module_platform_driver(fsi_master_acf);
+MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
index 3f487449a277..4eb3a766fd4a 100644
--- a/drivers/fsi/fsi-master-gpio.c
+++ b/drivers/fsi/fsi-master-gpio.c
@@ -8,59 +8,31 @@
#include <linux/fsi.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
+#include <linux/irqflags.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include "fsi-master.h"
#define FSI_GPIO_STD_DLY 1 /* Standard pin delay in nS */
-#define FSI_ECHO_DELAY_CLOCKS 16 /* Number clocks for echo delay */
-#define FSI_PRE_BREAK_CLOCKS 50 /* Number clocks to prep for break */
-#define FSI_BREAK_CLOCKS 256 /* Number of clocks to issue break */
-#define FSI_POST_BREAK_CLOCKS 16000 /* Number clocks to set up cfam */
-#define FSI_INIT_CLOCKS 5000 /* Clock out any old data */
-#define FSI_GPIO_STD_DELAY 10 /* Standard GPIO delay in nS */
- /* todo: adjust down as low as */
- /* possible or eliminate */
-#define FSI_GPIO_CMD_DPOLL 0x2
-#define FSI_GPIO_CMD_TERM 0x3f
-#define FSI_GPIO_CMD_ABS_AR 0x4
-
-#define FSI_GPIO_DPOLL_CLOCKS 100 /* < 21 will cause slave to hang */
-
-/* Bus errors */
-#define FSI_GPIO_ERR_BUSY 1 /* Slave stuck in busy state */
-#define FSI_GPIO_RESP_ERRA 2 /* Any (misc) Error */
-#define FSI_GPIO_RESP_ERRC 3 /* Slave reports master CRC error */
-#define FSI_GPIO_MTOE 4 /* Master time out error */
-#define FSI_GPIO_CRC_INVAL 5 /* Master reports slave CRC error */
-
-/* Normal slave responses */
-#define FSI_GPIO_RESP_BUSY 1
-#define FSI_GPIO_RESP_ACK 0
-#define FSI_GPIO_RESP_ACKD 4
-
-#define FSI_GPIO_MAX_BUSY 100
-#define FSI_GPIO_MTOE_COUNT 1000
-#define FSI_GPIO_DRAIN_BITS 20
-#define FSI_GPIO_CRC_SIZE 4
-#define FSI_GPIO_MSG_ID_SIZE 2
-#define FSI_GPIO_MSG_RESPID_SIZE 2
-#define FSI_GPIO_PRIME_SLAVE_CLOCKS 100
+#define LAST_ADDR_INVALID 0x1
struct fsi_master_gpio {
struct fsi_master master;
struct device *dev;
- spinlock_t cmd_lock; /* Lock for commands */
+ struct mutex cmd_lock; /* mutex for command ordering */
struct gpio_desc *gpio_clk;
struct gpio_desc *gpio_data;
struct gpio_desc *gpio_trans; /* Voltage translator */
struct gpio_desc *gpio_enable; /* FSI enable */
struct gpio_desc *gpio_mux; /* Mux control */
bool external_mode;
+ bool no_delays;
+ uint32_t last_addr;
+ uint8_t t_send_delay;
+ uint8_t t_echo_delay;
};
#define CREATE_TRACE_POINTS
@@ -78,19 +50,31 @@ static void clock_toggle(struct fsi_master_gpio *master, int count)
int i;
for (i = 0; i < count; i++) {
- ndelay(FSI_GPIO_STD_DLY);
+ if (!master->no_delays)
+ ndelay(FSI_GPIO_STD_DLY);
gpiod_set_value(master->gpio_clk, 0);
- ndelay(FSI_GPIO_STD_DLY);
+ if (!master->no_delays)
+ ndelay(FSI_GPIO_STD_DLY);
gpiod_set_value(master->gpio_clk, 1);
}
}
-static int sda_in(struct fsi_master_gpio *master)
+static int sda_clock_in(struct fsi_master_gpio *master)
{
int in;
- ndelay(FSI_GPIO_STD_DLY);
+ if (!master->no_delays)
+ ndelay(FSI_GPIO_STD_DLY);
+ gpiod_set_value(master->gpio_clk, 0);
+
+ /* Dummy read to feed the synchronizers */
+ gpiod_get_value(master->gpio_data);
+
+ /* Actual data read */
in = gpiod_get_value(master->gpio_data);
+ if (!master->no_delays)
+ ndelay(FSI_GPIO_STD_DLY);
+ gpiod_set_value(master->gpio_clk, 1);
return in ? 1 : 0;
}
@@ -113,10 +97,17 @@ static void set_sda_output(struct fsi_master_gpio *master, int value)
static void clock_zeros(struct fsi_master_gpio *master, int count)
{
+ trace_fsi_master_gpio_clock_zeros(master, count);
set_sda_output(master, 1);
clock_toggle(master, count);
}
+static void echo_delay(struct fsi_master_gpio *master)
+{
+ clock_zeros(master, master->t_echo_delay);
+}
+
+
static void serial_in(struct fsi_master_gpio *master, struct fsi_gpio_msg *msg,
uint8_t num_bits)
{
@@ -125,8 +116,7 @@ static void serial_in(struct fsi_master_gpio *master, struct fsi_gpio_msg *msg,
set_sda_input(master);
for (bit = 0; bit < num_bits; bit++) {
- clock_toggle(master, 1);
- in_bit = sda_in(master);
+ in_bit = sda_clock_in(master);
msg->msg <<= 1;
msg->msg |= ~in_bit & 0x1; /* Data is active low */
}
@@ -191,22 +181,92 @@ static void msg_push_crc(struct fsi_gpio_msg *msg)
msg_push_bits(msg, crc, 4);
}
+static bool check_same_address(struct fsi_master_gpio *master, int id,
+ uint32_t addr)
+{
+ /* this will also handle LAST_ADDR_INVALID */
+ return master->last_addr == (((id & 0x3) << 21) | (addr & ~0x3));
+}
+
+static bool check_relative_address(struct fsi_master_gpio *master, int id,
+ uint32_t addr, uint32_t *rel_addrp)
+{
+ uint32_t last_addr = master->last_addr;
+ int32_t rel_addr;
+
+ if (last_addr == LAST_ADDR_INVALID)
+ return false;
+
+ /* We may be in 23-bit addressing mode, which uses the id as the
+ * top two address bits. So, if we're referencing a different ID,
+ * use absolute addresses.
+ */
+ if (((last_addr >> 21) & 0x3) != id)
+ return false;
+
+ /* remove the top two bits from any 23-bit addressing */
+ last_addr &= (1 << 21) - 1;
+
+ /* We know that the addresses are limited to 21 bits, so this won't
+ * overflow the signed rel_addr */
+ rel_addr = addr - last_addr;
+ if (rel_addr > 255 || rel_addr < -256)
+ return false;
+
+ *rel_addrp = (uint32_t)rel_addr;
+
+ return true;
+}
+
+static void last_address_update(struct fsi_master_gpio *master,
+ int id, bool valid, uint32_t addr)
+{
+ if (!valid)
+ master->last_addr = LAST_ADDR_INVALID;
+ else
+ master->last_addr = ((id & 0x3) << 21) | (addr & ~0x3);
+}
+
/*
- * Encode an Absolute Address command
+ * Encode an Absolute/Relative/Same Address command
*/
-static void build_abs_ar_command(struct fsi_gpio_msg *cmd,
- uint8_t id, uint32_t addr, size_t size, const void *data)
+static void build_ar_command(struct fsi_master_gpio *master,
+ struct fsi_gpio_msg *cmd, uint8_t id,
+ uint32_t addr, size_t size, const void *data)
{
+ int i, addr_bits, opcode_bits;
bool write = !!data;
- uint8_t ds;
- int i;
+ uint8_t ds, opcode;
+ uint32_t rel_addr;
cmd->bits = 0;
cmd->msg = 0;
- msg_push_bits(cmd, id, 2);
- msg_push_bits(cmd, FSI_GPIO_CMD_ABS_AR, 3);
- msg_push_bits(cmd, write ? 0 : 1, 1);
+ /* we have 21 bits of address max */
+ addr &= ((1 << 21) - 1);
+
+ /* cmd opcodes are variable length - SAME_AR is only two bits */
+ opcode_bits = 3;
+
+ if (check_same_address(master, id, addr)) {
+ /* we still address the byte offset within the word */
+ addr_bits = 2;
+ opcode_bits = 2;
+ opcode = FSI_CMD_SAME_AR;
+ trace_fsi_master_gpio_cmd_same_addr(master);
+
+ } else if (check_relative_address(master, id, addr, &rel_addr)) {
+ /* 8 bits plus sign */
+ addr_bits = 9;
+ addr = rel_addr;
+ opcode = FSI_CMD_REL_AR;
+ trace_fsi_master_gpio_cmd_rel_addr(master, rel_addr);
+
+ } else {
+ addr_bits = 21;
+ opcode = FSI_CMD_ABS_AR;
+ trace_fsi_master_gpio_cmd_abs_addr(master, addr);
+ }
/*
* The read/write size is encoded in the lower bits of the address
@@ -223,7 +283,10 @@ static void build_abs_ar_command(struct fsi_gpio_msg *cmd,
if (size == 4)
addr |= 1;
- msg_push_bits(cmd, addr & ((1 << 21) - 1), 21);
+ msg_push_bits(cmd, id, 2);
+ msg_push_bits(cmd, opcode, opcode_bits);
+ msg_push_bits(cmd, write ? 0 : 1, 1);
+ msg_push_bits(cmd, addr, addr_bits);
msg_push_bits(cmd, ds, 1);
for (i = 0; write && i < size; i++)
msg_push_bits(cmd, ((uint8_t *)data)[i], 8);
@@ -237,14 +300,18 @@ static void build_dpoll_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
cmd->msg = 0;
msg_push_bits(cmd, slave_id, 2);
- msg_push_bits(cmd, FSI_GPIO_CMD_DPOLL, 3);
+ msg_push_bits(cmd, FSI_CMD_DPOLL, 3);
msg_push_crc(cmd);
}
-static void echo_delay(struct fsi_master_gpio *master)
+static void build_epoll_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
{
- set_sda_output(master, 1);
- clock_toggle(master, FSI_ECHO_DELAY_CLOCKS);
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_CMD_EPOLL, 3);
+ msg_push_crc(cmd);
}
static void build_term_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
@@ -253,40 +320,40 @@ static void build_term_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
cmd->msg = 0;
msg_push_bits(cmd, slave_id, 2);
- msg_push_bits(cmd, FSI_GPIO_CMD_TERM, 6);
+ msg_push_bits(cmd, FSI_CMD_TERM, 6);
msg_push_crc(cmd);
}
/*
- * Store information on master errors so handler can detect and clean
- * up the bus
+ * Note: callers rely specifically on this returning -EAGAIN for
+ * a CRC error detected in the response. Use other error code
+ * for other situations. It will be converted to something else
+ * higher up the stack before it reaches userspace.
*/
-static void fsi_master_gpio_error(struct fsi_master_gpio *master, int error)
-{
-
-}
-
static int read_one_response(struct fsi_master_gpio *master,
uint8_t data_size, struct fsi_gpio_msg *msgp, uint8_t *tagp)
{
struct fsi_gpio_msg msg;
- uint8_t id, tag;
+ unsigned long flags;
uint32_t crc;
+ uint8_t tag;
int i;
+ local_irq_save(flags);
+
/* wait for the start bit */
- for (i = 0; i < FSI_GPIO_MTOE_COUNT; i++) {
+ for (i = 0; i < FSI_MASTER_MTOE_COUNT; i++) {
msg.bits = 0;
msg.msg = 0;
serial_in(master, &msg, 1);
if (msg.msg)
break;
}
- if (i == FSI_GPIO_MTOE_COUNT) {
+ if (i == FSI_MASTER_MTOE_COUNT) {
dev_dbg(master->dev,
"Master time out waiting for response\n");
- fsi_master_gpio_error(master, FSI_GPIO_MTOE);
- return -EIO;
+ local_irq_restore(flags);
+ return -ETIMEDOUT;
}
msg.bits = 0;
@@ -295,23 +362,27 @@ static int read_one_response(struct fsi_master_gpio *master,
/* Read slave ID & response tag */
serial_in(master, &msg, 4);
- id = (msg.msg >> FSI_GPIO_MSG_RESPID_SIZE) & 0x3;
tag = msg.msg & 0x3;
/* If we have an ACK and we're expecting data, clock the data in too */
- if (tag == FSI_GPIO_RESP_ACK && data_size)
+ if (tag == FSI_RESP_ACK && data_size)
serial_in(master, &msg, data_size * 8);
/* read CRC */
- serial_in(master, &msg, FSI_GPIO_CRC_SIZE);
+ serial_in(master, &msg, FSI_CRC_SIZE);
+
+ local_irq_restore(flags);
/* we have a whole message now; check CRC */
crc = crc4(0, 1, 1);
crc = crc4(crc, msg.msg, msg.bits);
if (crc) {
- dev_dbg(master->dev, "ERR response CRC\n");
- fsi_master_gpio_error(master, FSI_GPIO_CRC_INVAL);
- return -EIO;
+ /* Check if it's all 1's, that probably means the host is off */
+ if (((~msg.msg) & ((1ull << msg.bits) - 1)) == 0)
+ return -ENODEV;
+ dev_dbg(master->dev, "ERR response CRC msg: 0x%016llx (%d bits)\n",
+ msg.msg, msg.bits);
+ return -EAGAIN;
}
if (msgp)
@@ -325,19 +396,23 @@ static int read_one_response(struct fsi_master_gpio *master,
static int issue_term(struct fsi_master_gpio *master, uint8_t slave)
{
struct fsi_gpio_msg cmd;
+ unsigned long flags;
uint8_t tag;
int rc;
build_term_command(&cmd, slave);
+
+ local_irq_save(flags);
serial_out(master, &cmd);
echo_delay(master);
+ local_irq_restore(flags);
rc = read_one_response(master, 0, NULL, &tag);
if (rc < 0) {
dev_err(master->dev,
"TERM failed; lost communication with slave\n");
return -EIO;
- } else if (tag != FSI_GPIO_RESP_ACK) {
+ } else if (tag != FSI_RESP_ACK) {
dev_err(master->dev, "TERM failed; response %d\n", tag);
return -EIO;
}
@@ -350,16 +425,39 @@ static int poll_for_response(struct fsi_master_gpio *master,
{
struct fsi_gpio_msg response, cmd;
int busy_count = 0, rc, i;
+ unsigned long flags;
uint8_t tag;
uint8_t *data_byte = data;
-
+ int crc_err_retries = 0;
retry:
rc = read_one_response(master, size, &response, &tag);
- if (rc)
- return rc;
+
+ /* Handle retries on CRC errors */
+ if (rc == -EAGAIN) {
+ /* Too many retries ? */
+ if (crc_err_retries++ > FSI_CRC_ERR_RETRIES) {
+ /*
+ * Pass it up as a -EIO otherwise upper level will retry
+ * the whole command which isn't what we want here.
+ */
+ rc = -EIO;
+ goto fail;
+ }
+ dev_dbg(master->dev,
+ "CRC error retry %d\n", crc_err_retries);
+ trace_fsi_master_gpio_crc_rsp_error(master);
+ build_epoll_command(&cmd, slave);
+ local_irq_save(flags);
+ clock_zeros(master, FSI_MASTER_EPOLL_CLOCKS);
+ serial_out(master, &cmd);
+ echo_delay(master);
+ local_irq_restore(flags);
+ goto retry;
+ } else if (rc)
+ goto fail;
switch (tag) {
- case FSI_GPIO_RESP_ACK:
+ case FSI_RESP_ACK:
if (size && data) {
uint64_t val = response.msg;
/* clear crc & mask */
@@ -372,57 +470,89 @@ retry:
}
}
break;
- case FSI_GPIO_RESP_BUSY:
+ case FSI_RESP_BUSY:
/*
* Its necessary to clock slave before issuing
* d-poll, not indicated in the hardware protocol
* spec. < 20 clocks causes slave to hang, 21 ok.
*/
- clock_zeros(master, FSI_GPIO_DPOLL_CLOCKS);
- if (busy_count++ < FSI_GPIO_MAX_BUSY) {
+ if (busy_count++ < FSI_MASTER_MAX_BUSY) {
build_dpoll_command(&cmd, slave);
+ local_irq_save(flags);
+ clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
serial_out(master, &cmd);
echo_delay(master);
+ local_irq_restore(flags);
goto retry;
}
dev_warn(master->dev,
"ERR slave is stuck in busy state, issuing TERM\n");
+ local_irq_save(flags);
+ clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
+ local_irq_restore(flags);
issue_term(master, slave);
rc = -EIO;
break;
- case FSI_GPIO_RESP_ERRA:
- case FSI_GPIO_RESP_ERRC:
- dev_dbg(master->dev, "ERR%c received: 0x%x\n",
- tag == FSI_GPIO_RESP_ERRA ? 'A' : 'C',
- (int)response.msg);
- fsi_master_gpio_error(master, response.msg);
+ case FSI_RESP_ERRA:
+ dev_dbg(master->dev, "ERRA received: 0x%x\n", (int)response.msg);
rc = -EIO;
break;
+ case FSI_RESP_ERRC:
+ dev_dbg(master->dev, "ERRC received: 0x%x\n", (int)response.msg);
+ trace_fsi_master_gpio_crc_cmd_error(master);
+ rc = -EAGAIN;
+ break;
}
- /* Clock the slave enough to be ready for next operation */
- clock_zeros(master, FSI_GPIO_PRIME_SLAVE_CLOCKS);
+ if (busy_count > 0)
+ trace_fsi_master_gpio_poll_response_busy(master, busy_count);
+ fail:
+ /*
+ * tSendDelay clocks, avoids signal reflections when switching
+ * from receive of response back to send of data.
+ */
+ local_irq_save(flags);
+ clock_zeros(master, master->t_send_delay);
+ local_irq_restore(flags);
+
return rc;
}
-static int fsi_master_gpio_xfer(struct fsi_master_gpio *master, uint8_t slave,
- struct fsi_gpio_msg *cmd, size_t resp_len, void *resp)
+static int send_request(struct fsi_master_gpio *master,
+ struct fsi_gpio_msg *cmd)
{
unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&master->cmd_lock, flags);
- if (master->external_mode) {
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ if (master->external_mode)
return -EBUSY;
- }
+ local_irq_save(flags);
serial_out(master, cmd);
echo_delay(master);
- rc = poll_for_response(master, slave, resp_len, resp);
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int fsi_master_gpio_xfer(struct fsi_master_gpio *master, uint8_t slave,
+ struct fsi_gpio_msg *cmd, size_t resp_len, void *resp)
+{
+ int rc = -EAGAIN, retries = 0;
+
+ while ((retries++) < FSI_CRC_ERR_RETRIES) {
+ rc = send_request(master, cmd);
+ if (rc)
+ break;
+ rc = poll_for_response(master, slave, resp_len, resp);
+ if (rc != -EAGAIN)
+ break;
+ rc = -EIO;
+ dev_warn(master->dev, "ECRC retry %d\n", retries);
+
+ /* Pace it a bit before retry */
+ msleep(1);
+ }
return rc;
}
@@ -432,12 +562,18 @@ static int fsi_master_gpio_read(struct fsi_master *_master, int link,
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
struct fsi_gpio_msg cmd;
+ int rc;
if (link != 0)
return -ENODEV;
- build_abs_ar_command(&cmd, id, addr, size, NULL);
- return fsi_master_gpio_xfer(master, id, &cmd, size, val);
+ mutex_lock(&master->cmd_lock);
+ build_ar_command(master, &cmd, id, addr, size, NULL);
+ rc = fsi_master_gpio_xfer(master, id, &cmd, size, val);
+ last_address_update(master, id, rc == 0, addr);
+ mutex_unlock(&master->cmd_lock);
+
+ return rc;
}
static int fsi_master_gpio_write(struct fsi_master *_master, int link,
@@ -445,12 +581,18 @@ static int fsi_master_gpio_write(struct fsi_master *_master, int link,
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
struct fsi_gpio_msg cmd;
+ int rc;
if (link != 0)
return -ENODEV;
- build_abs_ar_command(&cmd, id, addr, size, val);
- return fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+ mutex_lock(&master->cmd_lock);
+ build_ar_command(master, &cmd, id, addr, size, val);
+ rc = fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+ last_address_update(master, id, rc == 0, addr);
+ mutex_unlock(&master->cmd_lock);
+
+ return rc;
}
static int fsi_master_gpio_term(struct fsi_master *_master,
@@ -458,12 +600,18 @@ static int fsi_master_gpio_term(struct fsi_master *_master,
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
struct fsi_gpio_msg cmd;
+ int rc;
if (link != 0)
return -ENODEV;
+ mutex_lock(&master->cmd_lock);
build_term_command(&cmd, id);
- return fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+ rc = fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+ last_address_update(master, id, false, 0);
+ mutex_unlock(&master->cmd_lock);
+
+ return rc;
}
static int fsi_master_gpio_break(struct fsi_master *_master, int link)
@@ -476,11 +624,14 @@ static int fsi_master_gpio_break(struct fsi_master *_master, int link)
trace_fsi_master_gpio_break(master);
- spin_lock_irqsave(&master->cmd_lock, flags);
+ mutex_lock(&master->cmd_lock);
if (master->external_mode) {
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ mutex_unlock(&master->cmd_lock);
return -EBUSY;
}
+
+ local_irq_save(flags);
+
set_sda_output(master, 1);
sda_out(master, 1);
clock_toggle(master, FSI_PRE_BREAK_CLOCKS);
@@ -489,7 +640,11 @@ static int fsi_master_gpio_break(struct fsi_master *_master, int link)
echo_delay(master);
sda_out(master, 1);
clock_toggle(master, FSI_POST_BREAK_CLOCKS);
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+
+ local_irq_restore(flags);
+
+ last_address_update(master, 0, false, 0);
+ mutex_unlock(&master->cmd_lock);
/* Wait for logic reset to take effect */
udelay(200);
@@ -499,6 +654,8 @@ static int fsi_master_gpio_break(struct fsi_master *_master, int link)
static void fsi_master_gpio_init(struct fsi_master_gpio *master)
{
+ unsigned long flags;
+
gpiod_direction_output(master->gpio_mux, 1);
gpiod_direction_output(master->gpio_trans, 1);
gpiod_direction_output(master->gpio_enable, 1);
@@ -506,7 +663,9 @@ static void fsi_master_gpio_init(struct fsi_master_gpio *master)
gpiod_direction_output(master->gpio_data, 1);
/* todo: evaluate if clocks can be reduced */
+ local_irq_save(flags);
clock_zeros(master, FSI_INIT_CLOCKS);
+ local_irq_restore(flags);
}
static void fsi_master_gpio_init_external(struct fsi_master_gpio *master)
@@ -521,22 +680,37 @@ static void fsi_master_gpio_init_external(struct fsi_master_gpio *master)
static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link)
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
- unsigned long flags;
int rc = -EBUSY;
if (link != 0)
return -ENODEV;
- spin_lock_irqsave(&master->cmd_lock, flags);
+ mutex_lock(&master->cmd_lock);
if (!master->external_mode) {
gpiod_set_value(master->gpio_enable, 1);
rc = 0;
}
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ mutex_unlock(&master->cmd_lock);
return rc;
}
+static int fsi_master_gpio_link_config(struct fsi_master *_master, int link,
+ u8 t_send_delay, u8 t_echo_delay)
+{
+ struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->cmd_lock);
+ master->t_send_delay = t_send_delay;
+ master->t_echo_delay = t_echo_delay;
+ mutex_unlock(&master->cmd_lock);
+
+ return 0;
+}
+
static ssize_t external_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -550,7 +724,7 @@ static ssize_t external_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fsi_master_gpio *master = dev_get_drvdata(dev);
- unsigned long flags, val;
+ unsigned long val;
bool external_mode;
int err;
@@ -560,10 +734,10 @@ static ssize_t external_mode_store(struct device *dev,
external_mode = !!val;
- spin_lock_irqsave(&master->cmd_lock, flags);
+ mutex_lock(&master->cmd_lock);
if (external_mode == master->external_mode) {
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ mutex_unlock(&master->cmd_lock);
return count;
}
@@ -572,7 +746,8 @@ static ssize_t external_mode_store(struct device *dev,
fsi_master_gpio_init_external(master);
else
fsi_master_gpio_init(master);
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+
+ mutex_unlock(&master->cmd_lock);
fsi_master_rescan(&master->master);
@@ -582,31 +757,44 @@ static ssize_t external_mode_store(struct device *dev,
static DEVICE_ATTR(external_mode, 0664,
external_mode_show, external_mode_store);
+static void fsi_master_gpio_release(struct device *dev)
+{
+ struct fsi_master_gpio *master = to_fsi_master_gpio(dev_to_fsi_master(dev));
+
+ of_node_put(dev_of_node(master->dev));
+
+ kfree(master);
+}
+
static int fsi_master_gpio_probe(struct platform_device *pdev)
{
struct fsi_master_gpio *master;
struct gpio_desc *gpio;
int rc;
- master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
master->dev = &pdev->dev;
master->master.dev.parent = master->dev;
master->master.dev.of_node = of_node_get(dev_of_node(master->dev));
+ master->master.dev.release = fsi_master_gpio_release;
+ master->last_addr = LAST_ADDR_INVALID;
gpio = devm_gpiod_get(&pdev->dev, "clock", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get clock gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_clk = gpio;
gpio = devm_gpiod_get(&pdev->dev, "data", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get data gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_data = gpio;
@@ -614,24 +802,38 @@ static int fsi_master_gpio_probe(struct platform_device *pdev)
gpio = devm_gpiod_get_optional(&pdev->dev, "trans", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get trans gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_trans = gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "enable", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get enable gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_enable = gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "mux", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get mux gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_mux = gpio;
+ /*
+ * Check if GPIO block is slow enought that no extra delays
+ * are necessary. This improves performance on ast2500 by
+ * an order of magnitude.
+ */
+ master->no_delays = device_property_present(&pdev->dev, "no-gpio-delays");
+
+ /* Default FSI command delays */
+ master->t_send_delay = FSI_SEND_DELAY_CLOCKS;
+ master->t_echo_delay = FSI_ECHO_DELAY_CLOCKS;
+
master->master.n_links = 1;
master->master.flags = FSI_MASTER_FLAG_SWCLOCK;
master->master.read = fsi_master_gpio_read;
@@ -639,34 +841,37 @@ static int fsi_master_gpio_probe(struct platform_device *pdev)
master->master.term = fsi_master_gpio_term;
master->master.send_break = fsi_master_gpio_break;
master->master.link_enable = fsi_master_gpio_link_enable;
+ master->master.link_config = fsi_master_gpio_link_config;
platform_set_drvdata(pdev, master);
- spin_lock_init(&master->cmd_lock);
+ mutex_init(&master->cmd_lock);
fsi_master_gpio_init(master);
rc = device_create_file(&pdev->dev, &dev_attr_external_mode);
if (rc)
- return rc;
+ goto err_free;
- return fsi_master_register(&master->master);
+ rc = fsi_master_register(&master->master);
+ if (rc) {
+ device_remove_file(&pdev->dev, &dev_attr_external_mode);
+ put_device(&master->master.dev);
+ return rc;
+ }
+ return 0;
+ err_free:
+ kfree(master);
+ return rc;
}
+
static int fsi_master_gpio_remove(struct platform_device *pdev)
{
struct fsi_master_gpio *master = platform_get_drvdata(pdev);
- devm_gpiod_put(&pdev->dev, master->gpio_clk);
- devm_gpiod_put(&pdev->dev, master->gpio_data);
- if (master->gpio_trans)
- devm_gpiod_put(&pdev->dev, master->gpio_trans);
- if (master->gpio_enable)
- devm_gpiod_put(&pdev->dev, master->gpio_enable);
- if (master->gpio_mux)
- devm_gpiod_put(&pdev->dev, master->gpio_mux);
- fsi_master_unregister(&master->master);
+ device_remove_file(&pdev->dev, &dev_attr_external_mode);
- of_node_put(master->master.dev.of_node);
+ fsi_master_unregister(&master->master);
return 0;
}
diff --git a/drivers/fsi/fsi-master-hub.c b/drivers/fsi/fsi-master-hub.c
index 5885fc4a1ef0..b3c1e9debcf2 100644
--- a/drivers/fsi/fsi-master-hub.c
+++ b/drivers/fsi/fsi-master-hub.c
@@ -122,7 +122,8 @@ static int hub_master_write(struct fsi_master *master, int link,
static int hub_master_break(struct fsi_master *master, int link)
{
- uint32_t addr, cmd;
+ uint32_t addr;
+ __be32 cmd;
addr = 0x4;
cmd = cpu_to_be32(0xc0de0000);
@@ -205,7 +206,7 @@ static int hub_master_init(struct fsi_master_hub *hub)
if (rc)
return rc;
- reg = ~0;
+ reg = cpu_to_be32(~0);
rc = fsi_device_write(dev, FSI_MSENP0, &reg, sizeof(reg));
if (rc)
return rc;
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
index ee0b46086026..040a7d4cf717 100644
--- a/drivers/fsi/fsi-master.h
+++ b/drivers/fsi/fsi-master.h
@@ -18,7 +18,41 @@
#define DRIVERS_FSI_MASTER_H
#include <linux/device.h>
+#include <linux/mutex.h>
+/* Various protocol delays */
+#define FSI_ECHO_DELAY_CLOCKS 16 /* Number clocks for echo delay */
+#define FSI_SEND_DELAY_CLOCKS 16 /* Number clocks for send delay */
+#define FSI_PRE_BREAK_CLOCKS 50 /* Number clocks to prep for break */
+#define FSI_BREAK_CLOCKS 256 /* Number of clocks to issue break */
+#define FSI_POST_BREAK_CLOCKS 16000 /* Number clocks to set up cfam */
+#define FSI_INIT_CLOCKS 5000 /* Clock out any old data */
+#define FSI_MASTER_DPOLL_CLOCKS 50 /* < 21 will cause slave to hang */
+#define FSI_MASTER_EPOLL_CLOCKS 50 /* Number of clocks for E_POLL retry */
+
+/* Various retry maximums */
+#define FSI_CRC_ERR_RETRIES 10
+#define FSI_MASTER_MAX_BUSY 200
+#define FSI_MASTER_MTOE_COUNT 1000
+
+/* Command encodings */
+#define FSI_CMD_DPOLL 0x2
+#define FSI_CMD_EPOLL 0x3
+#define FSI_CMD_TERM 0x3f
+#define FSI_CMD_ABS_AR 0x4
+#define FSI_CMD_REL_AR 0x5
+#define FSI_CMD_SAME_AR 0x3 /* but only a 2-bit opcode... */
+
+/* Slave responses */
+#define FSI_RESP_ACK 0 /* Success */
+#define FSI_RESP_BUSY 1 /* Slave busy */
+#define FSI_RESP_ERRA 2 /* Any (misc) Error */
+#define FSI_RESP_ERRC 3 /* Slave reports master CRC error */
+
+/* Misc */
+#define FSI_CRC_SIZE 4
+
+/* fsi-master definition and flags */
#define FSI_MASTER_FLAG_SWCLOCK 0x1
struct fsi_master {
@@ -26,6 +60,7 @@ struct fsi_master {
int idx;
int n_links;
int flags;
+ struct mutex scan_lock;
int (*read)(struct fsi_master *, int link, uint8_t id,
uint32_t addr, void *val, size_t size);
int (*write)(struct fsi_master *, int link, uint8_t id,
@@ -33,6 +68,8 @@ struct fsi_master {
int (*term)(struct fsi_master *, int link, uint8_t id);
int (*send_break)(struct fsi_master *, int link);
int (*link_enable)(struct fsi_master *, int link);
+ int (*link_config)(struct fsi_master *, int link,
+ u8 t_send_delay, u8 t_echo_delay);
};
#define dev_to_fsi_master(d) container_of(d, struct fsi_master, dev)
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
new file mode 100644
index 000000000000..ae861342626e
--- /dev/null
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -0,0 +1,1066 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) IBM Corporation 2017
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERGCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fsi.h>
+#include <linux/fsi-sbefifo.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+/*
+ * The SBEFIFO is a pipe-like FSI device for communicating with
+ * the self boot engine on POWER processors.
+ */
+
+#define DEVICE_NAME "sbefifo"
+#define FSI_ENGID_SBE 0x22
+
+/*
+ * Register layout
+ */
+
+/* Register banks */
+#define SBEFIFO_UP 0x00 /* FSI -> Host */
+#define SBEFIFO_DOWN 0x40 /* Host -> FSI */
+
+/* Per-bank registers */
+#define SBEFIFO_FIFO 0x00 /* The FIFO itself */
+#define SBEFIFO_STS 0x04 /* Status register */
+#define SBEFIFO_STS_PARITY_ERR 0x20000000
+#define SBEFIFO_STS_RESET_REQ 0x02000000
+#define SBEFIFO_STS_GOT_EOT 0x00800000
+#define SBEFIFO_STS_MAX_XFER_LIMIT 0x00400000
+#define SBEFIFO_STS_FULL 0x00200000
+#define SBEFIFO_STS_EMPTY 0x00100000
+#define SBEFIFO_STS_ECNT_MASK 0x000f0000
+#define SBEFIFO_STS_ECNT_SHIFT 16
+#define SBEFIFO_STS_VALID_MASK 0x0000ff00
+#define SBEFIFO_STS_VALID_SHIFT 8
+#define SBEFIFO_STS_EOT_MASK 0x000000ff
+#define SBEFIFO_STS_EOT_SHIFT 0
+#define SBEFIFO_EOT_RAISE 0x08 /* (Up only) Set End Of Transfer */
+#define SBEFIFO_REQ_RESET 0x0C /* (Up only) Reset Request */
+#define SBEFIFO_PERFORM_RESET 0x10 /* (Down only) Perform Reset */
+#define SBEFIFO_EOT_ACK 0x14 /* (Down only) Acknowledge EOT */
+#define SBEFIFO_DOWN_MAX 0x18 /* (Down only) Max transfer */
+
+/* CFAM GP Mailbox SelfBoot Message register */
+#define CFAM_GP_MBOX_SBM_ADDR 0x2824 /* Converted 0x2809 */
+
+#define CFAM_SBM_SBE_BOOTED 0x80000000
+#define CFAM_SBM_SBE_ASYNC_FFDC 0x40000000
+#define CFAM_SBM_SBE_STATE_MASK 0x00f00000
+#define CFAM_SBM_SBE_STATE_SHIFT 20
+
+enum sbe_state
+{
+ SBE_STATE_UNKNOWN = 0x0, // Unkown, initial state
+ SBE_STATE_IPLING = 0x1, // IPL'ing - autonomous mode (transient)
+ SBE_STATE_ISTEP = 0x2, // ISTEP - Running IPL by steps (transient)
+ SBE_STATE_MPIPL = 0x3, // MPIPL
+ SBE_STATE_RUNTIME = 0x4, // SBE Runtime
+ SBE_STATE_DMT = 0x5, // Dead Man Timer State (transient)
+ SBE_STATE_DUMP = 0x6, // Dumping
+ SBE_STATE_FAILURE = 0x7, // Internal SBE failure
+ SBE_STATE_QUIESCE = 0x8, // Final state - needs SBE reset to get out
+};
+
+/* FIFO depth */
+#define SBEFIFO_FIFO_DEPTH 8
+
+/* Helpers */
+#define sbefifo_empty(sts) ((sts) & SBEFIFO_STS_EMPTY)
+#define sbefifo_full(sts) ((sts) & SBEFIFO_STS_FULL)
+#define sbefifo_parity_err(sts) ((sts) & SBEFIFO_STS_PARITY_ERR)
+#define sbefifo_populated(sts) (((sts) & SBEFIFO_STS_ECNT_MASK) >> SBEFIFO_STS_ECNT_SHIFT)
+#define sbefifo_vacant(sts) (SBEFIFO_FIFO_DEPTH - sbefifo_populated(sts))
+#define sbefifo_eot_set(sts) (((sts) & SBEFIFO_STS_EOT_MASK) >> SBEFIFO_STS_EOT_SHIFT)
+
+/* Reset request timeout in ms */
+#define SBEFIFO_RESET_TIMEOUT 10000
+
+/* Timeouts for commands in ms */
+#define SBEFIFO_TIMEOUT_START_CMD 10000
+#define SBEFIFO_TIMEOUT_IN_CMD 1000
+#define SBEFIFO_TIMEOUT_START_RSP 10000
+#define SBEFIFO_TIMEOUT_IN_RSP 1000
+
+/* Other constants */
+#define SBEFIFO_MAX_USER_CMD_LEN (0x100000 + PAGE_SIZE)
+#define SBEFIFO_RESET_MAGIC 0x52534554 /* "RSET" */
+
+struct sbefifo {
+ uint32_t magic;
+#define SBEFIFO_MAGIC 0x53424546 /* "SBEF" */
+ struct fsi_device *fsi_dev;
+ struct device dev;
+ struct cdev cdev;
+ struct mutex lock;
+ bool broken;
+ bool dead;
+ bool async_ffdc;
+};
+
+struct sbefifo_user {
+ struct sbefifo *sbefifo;
+ struct mutex file_lock;
+ void *cmd_page;
+ void *pending_cmd;
+ size_t pending_len;
+};
+
+static DEFINE_MUTEX(sbefifo_ffdc_mutex);
+
+
+static void __sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
+ size_t ffdc_sz, bool internal)
+{
+ int pack = 0;
+#define FFDC_LSIZE 60
+ static char ffdc_line[FFDC_LSIZE];
+ char *p = ffdc_line;
+
+ while (ffdc_sz) {
+ u32 w0, w1, w2, i;
+ if (ffdc_sz < 3) {
+ dev_err(dev, "SBE invalid FFDC package size %zd\n", ffdc_sz);
+ return;
+ }
+ w0 = be32_to_cpu(*(ffdc++));
+ w1 = be32_to_cpu(*(ffdc++));
+ w2 = be32_to_cpu(*(ffdc++));
+ ffdc_sz -= 3;
+ if ((w0 >> 16) != 0xFFDC) {
+ dev_err(dev, "SBE invalid FFDC package signature %08x %08x %08x\n",
+ w0, w1, w2);
+ break;
+ }
+ w0 &= 0xffff;
+ if (w0 > ffdc_sz) {
+ dev_err(dev, "SBE FFDC package len %d words but only %zd remaining\n",
+ w0, ffdc_sz);
+ w0 = ffdc_sz;
+ break;
+ }
+ if (internal) {
+ dev_warn(dev, "+---- SBE FFDC package %d for async err -----+\n",
+ pack++);
+ } else {
+ dev_warn(dev, "+---- SBE FFDC package %d for cmd %02x:%02x -----+\n",
+ pack++, (w1 >> 8) & 0xff, w1 & 0xff);
+ }
+ dev_warn(dev, "| Response code: %08x |\n", w2);
+ dev_warn(dev, "|-------------------------------------------|\n");
+ for (i = 0; i < w0; i++) {
+ if ((i & 3) == 0) {
+ p = ffdc_line;
+ p += sprintf(p, "| %04x:", i << 4);
+ }
+ p += sprintf(p, " %08x", be32_to_cpu(*(ffdc++)));
+ ffdc_sz--;
+ if ((i & 3) == 3 || i == (w0 - 1)) {
+ while ((i & 3) < 3) {
+ p += sprintf(p, " ");
+ i++;
+ }
+ dev_warn(dev, "%s |\n", ffdc_line);
+ }
+ }
+ dev_warn(dev, "+-------------------------------------------+\n");
+ }
+}
+
+static void sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
+ size_t ffdc_sz, bool internal)
+{
+ mutex_lock(&sbefifo_ffdc_mutex);
+ __sbefifo_dump_ffdc(dev, ffdc, ffdc_sz, internal);
+ mutex_unlock(&sbefifo_ffdc_mutex);
+}
+
+int sbefifo_parse_status(struct device *dev, u16 cmd, __be32 *response,
+ size_t resp_len, size_t *data_len)
+{
+ u32 dh, s0, s1;
+ size_t ffdc_sz;
+
+ if (resp_len < 3) {
+ pr_debug("sbefifo: cmd %04x, response too small: %zd\n",
+ cmd, resp_len);
+ return -ENXIO;
+ }
+ dh = be32_to_cpu(response[resp_len - 1]);
+ if (dh > resp_len || dh < 3) {
+ dev_err(dev, "SBE cmd %02x:%02x status offset out of range: %d/%zd\n",
+ cmd >> 8, cmd & 0xff, dh, resp_len);
+ return -ENXIO;
+ }
+ s0 = be32_to_cpu(response[resp_len - dh]);
+ s1 = be32_to_cpu(response[resp_len - dh + 1]);
+ if (((s0 >> 16) != 0xC0DE) || ((s0 & 0xffff) != cmd)) {
+ dev_err(dev, "SBE cmd %02x:%02x, status signature invalid: 0x%08x 0x%08x\n",
+ cmd >> 8, cmd & 0xff, s0, s1);
+ return -ENXIO;
+ }
+ if (s1 != 0) {
+ ffdc_sz = dh - 3;
+ dev_warn(dev, "SBE error cmd %02x:%02x status=%04x:%04x\n",
+ cmd >> 8, cmd & 0xff, s1 >> 16, s1 & 0xffff);
+ if (ffdc_sz)
+ sbefifo_dump_ffdc(dev, &response[resp_len - dh + 2],
+ ffdc_sz, false);
+ }
+ if (data_len)
+ *data_len = resp_len - dh;
+
+ /*
+ * Primary status don't have the top bit set, so can't be confused with
+ * Linux negative error codes, so return the status word whole.
+ */
+ return s1;
+}
+EXPORT_SYMBOL_GPL(sbefifo_parse_status);
+
+static int sbefifo_regr(struct sbefifo *sbefifo, int reg, u32 *word)
+{
+ __be32 raw_word;
+ int rc;
+
+ rc = fsi_device_read(sbefifo->fsi_dev, reg, &raw_word,
+ sizeof(raw_word));
+ if (rc)
+ return rc;
+
+ *word = be32_to_cpu(raw_word);
+
+ return 0;
+}
+
+static int sbefifo_regw(struct sbefifo *sbefifo, int reg, u32 word)
+{
+ __be32 raw_word = cpu_to_be32(word);
+
+ return fsi_device_write(sbefifo->fsi_dev, reg, &raw_word,
+ sizeof(raw_word));
+}
+
+static int sbefifo_check_sbe_state(struct sbefifo *sbefifo)
+{
+ __be32 raw_word;
+ u32 sbm;
+ int rc;
+
+ rc = fsi_slave_read(sbefifo->fsi_dev->slave, CFAM_GP_MBOX_SBM_ADDR,
+ &raw_word, sizeof(raw_word));
+ if (rc)
+ return rc;
+ sbm = be32_to_cpu(raw_word);
+
+ /* SBE booted at all ? */
+ if (!(sbm & CFAM_SBM_SBE_BOOTED))
+ return -ESHUTDOWN;
+
+ /* Check its state */
+ switch ((sbm & CFAM_SBM_SBE_STATE_MASK) >> CFAM_SBM_SBE_STATE_SHIFT) {
+ case SBE_STATE_UNKNOWN:
+ return -ESHUTDOWN;
+ case SBE_STATE_IPLING:
+ case SBE_STATE_ISTEP:
+ case SBE_STATE_MPIPL:
+ case SBE_STATE_DMT:
+ return -EBUSY;
+ case SBE_STATE_RUNTIME:
+ case SBE_STATE_DUMP: /* Not sure about that one */
+ break;
+ case SBE_STATE_FAILURE:
+ case SBE_STATE_QUIESCE:
+ return -ESHUTDOWN;
+ }
+
+ /* Is there async FFDC available ? Remember it */
+ if (sbm & CFAM_SBM_SBE_ASYNC_FFDC)
+ sbefifo->async_ffdc = true;
+
+ return 0;
+}
+
+/* Don't flip endianness of data to/from FIFO, just pass through. */
+static int sbefifo_down_read(struct sbefifo *sbefifo, __be32 *word)
+{
+ return fsi_device_read(sbefifo->fsi_dev, SBEFIFO_DOWN, word,
+ sizeof(*word));
+}
+
+static int sbefifo_up_write(struct sbefifo *sbefifo, __be32 word)
+{
+ return fsi_device_write(sbefifo->fsi_dev, SBEFIFO_UP, &word,
+ sizeof(word));
+}
+
+static int sbefifo_request_reset(struct sbefifo *sbefifo)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ u32 status, timeout;
+ int rc;
+
+ dev_dbg(dev, "Requesting FIFO reset\n");
+
+ /* Mark broken first, will be cleared if reset succeeds */
+ sbefifo->broken = true;
+
+ /* Send reset request */
+ rc = sbefifo_regw(sbefifo, SBEFIFO_UP | SBEFIFO_REQ_RESET, 1);
+ if (rc) {
+ dev_err(dev, "Sending reset request failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Wait for it to complete */
+ for (timeout = 0; timeout < SBEFIFO_RESET_TIMEOUT; timeout++) {
+ rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &status);
+ if (rc) {
+ dev_err(dev, "Failed to read UP fifo status during reset"
+ " , rc=%d\n", rc);
+ return rc;
+ }
+
+ if (!(status & SBEFIFO_STS_RESET_REQ)) {
+ dev_dbg(dev, "FIFO reset done\n");
+ sbefifo->broken = false;
+ return 0;
+ }
+
+ msleep(1);
+ }
+ dev_err(dev, "FIFO reset timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static int sbefifo_cleanup_hw(struct sbefifo *sbefifo)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ u32 up_status, down_status;
+ bool need_reset = false;
+ int rc;
+
+ rc = sbefifo_check_sbe_state(sbefifo);
+ if (rc) {
+ dev_dbg(dev, "SBE state=%d\n", rc);
+ return rc;
+ }
+
+ /* If broken, we don't need to look at status, go straight to reset */
+ if (sbefifo->broken)
+ goto do_reset;
+
+ rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &up_status);
+ if (rc) {
+ dev_err(dev, "Cleanup: Reading UP status failed, rc=%d\n", rc);
+
+ /* Will try reset again on next attempt at using it */
+ sbefifo->broken = true;
+ return rc;
+ }
+
+ rc = sbefifo_regr(sbefifo, SBEFIFO_DOWN | SBEFIFO_STS, &down_status);
+ if (rc) {
+ dev_err(dev, "Cleanup: Reading DOWN status failed, rc=%d\n", rc);
+
+ /* Will try reset again on next attempt at using it */
+ sbefifo->broken = true;
+ return rc;
+ }
+
+ /* The FIFO already contains a reset request from the SBE ? */
+ if (down_status & SBEFIFO_STS_RESET_REQ) {
+ dev_info(dev, "Cleanup: FIFO reset request set, resetting\n");
+ rc = sbefifo_regw(sbefifo, SBEFIFO_UP, SBEFIFO_PERFORM_RESET);
+ if (rc) {
+ sbefifo->broken = true;
+ dev_err(dev, "Cleanup: Reset reg write failed, rc=%d\n", rc);
+ return rc;
+ }
+ sbefifo->broken = false;
+ return 0;
+ }
+
+ /* Parity error on either FIFO ? */
+ if ((up_status | down_status) & SBEFIFO_STS_PARITY_ERR)
+ need_reset = true;
+
+ /* Either FIFO not empty ? */
+ if (!((up_status & down_status) & SBEFIFO_STS_EMPTY))
+ need_reset = true;
+
+ if (!need_reset)
+ return 0;
+
+ dev_info(dev, "Cleanup: FIFO not clean (up=0x%08x down=0x%08x)\n",
+ up_status, down_status);
+
+ do_reset:
+
+ /* Mark broken, will be cleared if/when reset succeeds */
+ return sbefifo_request_reset(sbefifo);
+}
+
+static int sbefifo_wait(struct sbefifo *sbefifo, bool up,
+ u32 *status, unsigned long timeout)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ unsigned long end_time;
+ bool ready = false;
+ u32 addr, sts = 0;
+ int rc;
+
+ dev_vdbg(dev, "Wait on %s fifo...\n", up ? "up" : "down");
+
+ addr = (up ? SBEFIFO_UP : SBEFIFO_DOWN) | SBEFIFO_STS;
+
+ end_time = jiffies + timeout;
+ while (!time_after(jiffies, end_time)) {
+ cond_resched();
+ rc = sbefifo_regr(sbefifo, addr, &sts);
+ if (rc < 0) {
+ dev_err(dev, "FSI error %d reading status register\n", rc);
+ return rc;
+ }
+ if (!up && sbefifo_parity_err(sts)) {
+ dev_err(dev, "Parity error in DOWN FIFO\n");
+ return -ENXIO;
+ }
+ ready = !(up ? sbefifo_full(sts) : sbefifo_empty(sts));
+ if (ready)
+ break;
+ }
+ if (!ready) {
+ dev_err(dev, "%s FIFO Timeout ! status=%08x\n", up ? "UP" : "DOWN", sts);
+ return -ETIMEDOUT;
+ }
+ dev_vdbg(dev, "End of wait status: %08x\n", sts);
+
+ *status = sts;
+
+ return 0;
+}
+
+static int sbefifo_send_command(struct sbefifo *sbefifo,
+ const __be32 *command, size_t cmd_len)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ size_t len, chunk, vacant = 0, remaining = cmd_len;
+ unsigned long timeout;
+ u32 status;
+ int rc;
+
+ dev_vdbg(dev, "sending command (%zd words, cmd=%04x)\n",
+ cmd_len, be32_to_cpu(command[1]));
+
+ /* As long as there's something to send */
+ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_CMD);
+ while (remaining) {
+ /* Wait for room in the FIFO */
+ rc = sbefifo_wait(sbefifo, true, &status, timeout);
+ if (rc < 0)
+ return rc;
+ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_CMD);
+
+ vacant = sbefifo_vacant(status);
+ len = chunk = min(vacant, remaining);
+
+ dev_vdbg(dev, " status=%08x vacant=%zd chunk=%zd\n",
+ status, vacant, chunk);
+
+ /* Write as much as we can */
+ while (len--) {
+ rc = sbefifo_up_write(sbefifo, *(command++));
+ if (rc) {
+ dev_err(dev, "FSI error %d writing UP FIFO\n", rc);
+ return rc;
+ }
+ }
+ remaining -= chunk;
+ vacant -= chunk;
+ }
+
+ /* If there's no room left, wait for some to write EOT */
+ if (!vacant) {
+ rc = sbefifo_wait(sbefifo, true, &status, timeout);
+ if (rc)
+ return rc;
+ }
+
+ /* Send an EOT */
+ rc = sbefifo_regw(sbefifo, SBEFIFO_UP | SBEFIFO_EOT_RAISE, 0);
+ if (rc)
+ dev_err(dev, "FSI error %d writing EOT\n", rc);
+ return rc;
+}
+
+static int sbefifo_read_response(struct sbefifo *sbefifo, struct iov_iter *response)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ u32 status, eot_set;
+ unsigned long timeout;
+ bool overflow = false;
+ __be32 data;
+ size_t len;
+ int rc;
+
+ dev_vdbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response));
+
+ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_RSP);
+ for (;;) {
+ /* Grab FIFO status (this will handle parity errors) */
+ rc = sbefifo_wait(sbefifo, false, &status, timeout);
+ if (rc < 0)
+ return rc;
+ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_RSP);
+
+ /* Decode status */
+ len = sbefifo_populated(status);
+ eot_set = sbefifo_eot_set(status);
+
+ dev_vdbg(dev, " chunk size %zd eot_set=0x%x\n", len, eot_set);
+
+ /* Go through the chunk */
+ while(len--) {
+ /* Read the data */
+ rc = sbefifo_down_read(sbefifo, &data);
+ if (rc < 0)
+ return rc;
+
+ /* Was it an EOT ? */
+ if (eot_set & 0x80) {
+ /*
+ * There should be nothing else in the FIFO,
+ * if there is, mark broken, this will force
+ * a reset on next use, but don't fail the
+ * command.
+ */
+ if (len) {
+ dev_warn(dev, "FIFO read hit"
+ " EOT with still %zd data\n",
+ len);
+ sbefifo->broken = true;
+ }
+
+ /* We are done */
+ rc = sbefifo_regw(sbefifo,
+ SBEFIFO_DOWN | SBEFIFO_EOT_ACK, 0);
+
+ /*
+ * If that write fail, still complete the request but mark
+ * the fifo as broken for subsequent reset (not much else
+ * we can do here).
+ */
+ if (rc) {
+ dev_err(dev, "FSI error %d ack'ing EOT\n", rc);
+ sbefifo->broken = true;
+ }
+
+ /* Tell whether we overflowed */
+ return overflow ? -EOVERFLOW : 0;
+ }
+
+ /* Store it if there is room */
+ if (iov_iter_count(response) >= sizeof(__be32)) {
+ if (copy_to_iter(&data, sizeof(__be32), response) < sizeof(__be32))
+ return -EFAULT;
+ } else {
+ dev_vdbg(dev, "Response overflowed !\n");
+
+ overflow = true;
+ }
+
+ /* Next EOT bit */
+ eot_set <<= 1;
+ }
+ }
+ /* Shouldn't happen */
+ return -EIO;
+}
+
+static int sbefifo_do_command(struct sbefifo *sbefifo,
+ const __be32 *command, size_t cmd_len,
+ struct iov_iter *response)
+{
+ /* Try sending the command */
+ int rc = sbefifo_send_command(sbefifo, command, cmd_len);
+ if (rc)
+ return rc;
+
+ /* Now, get the response */
+ return sbefifo_read_response(sbefifo, response);
+}
+
+static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ struct iov_iter ffdc_iter;
+ struct kvec ffdc_iov;
+ __be32 *ffdc;
+ size_t ffdc_sz;
+ __be32 cmd[2];
+ int rc;
+
+ sbefifo->async_ffdc = false;
+ ffdc = vmalloc(SBEFIFO_MAX_FFDC_SIZE);
+ if (!ffdc) {
+ dev_err(dev, "Failed to allocate SBE FFDC buffer\n");
+ return;
+ }
+ ffdc_iov.iov_base = ffdc;
+ ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE;
+ iov_iter_kvec(&ffdc_iter, WRITE | ITER_KVEC, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
+ cmd[0] = cpu_to_be32(2);
+ cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC);
+ rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter);
+ if (rc != 0) {
+ dev_err(dev, "Error %d retrieving SBE FFDC\n", rc);
+ goto bail;
+ }
+ ffdc_sz = SBEFIFO_MAX_FFDC_SIZE - iov_iter_count(&ffdc_iter);
+ ffdc_sz /= sizeof(__be32);
+ rc = sbefifo_parse_status(dev, SBEFIFO_CMD_GET_SBE_FFDC, ffdc,
+ ffdc_sz, &ffdc_sz);
+ if (rc != 0) {
+ dev_err(dev, "Error %d decoding SBE FFDC\n", rc);
+ goto bail;
+ }
+ if (ffdc_sz > 0)
+ sbefifo_dump_ffdc(dev, ffdc, ffdc_sz, true);
+ bail:
+ vfree(ffdc);
+
+}
+
+static int __sbefifo_submit(struct sbefifo *sbefifo,
+ const __be32 *command, size_t cmd_len,
+ struct iov_iter *response)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ int rc;
+
+ if (sbefifo->dead)
+ return -ENODEV;
+
+ if (cmd_len < 2 || be32_to_cpu(command[0]) != cmd_len) {
+ dev_vdbg(dev, "Invalid command len %zd (header: %d)\n",
+ cmd_len, be32_to_cpu(command[0]));
+ return -EINVAL;
+ }
+
+ /* First ensure the HW is in a clean state */
+ rc = sbefifo_cleanup_hw(sbefifo);
+ if (rc)
+ return rc;
+
+ /* Look for async FFDC first if any */
+ if (sbefifo->async_ffdc)
+ sbefifo_collect_async_ffdc(sbefifo);
+
+ rc = sbefifo_do_command(sbefifo, command, cmd_len, response);
+ if (rc != 0 && rc != -EOVERFLOW)
+ goto fail;
+ return rc;
+ fail:
+ /*
+ * On failure, attempt a reset. Ignore the result, it will mark
+ * the fifo broken if the reset fails
+ */
+ sbefifo_request_reset(sbefifo);
+
+ /* Return original error */
+ return rc;
+}
+
+/**
+ * sbefifo_submit() - Submit and SBE fifo command and receive response
+ * @dev: The sbefifo device
+ * @command: The raw command data
+ * @cmd_len: The command size (in 32-bit words)
+ * @response: The output response buffer
+ * @resp_len: In: Response buffer size, Out: Response size
+ *
+ * This will perform the entire operation. If the reponse buffer
+ * overflows, returns -EOVERFLOW
+ */
+int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
+ __be32 *response, size_t *resp_len)
+{
+ struct sbefifo *sbefifo;
+ struct iov_iter resp_iter;
+ struct kvec resp_iov;
+ size_t rbytes;
+ int rc;
+
+ if (!dev)
+ return -ENODEV;
+ sbefifo = dev_get_drvdata(dev);
+ if (!sbefifo)
+ return -ENODEV;
+ if (WARN_ON_ONCE(sbefifo->magic != SBEFIFO_MAGIC))
+ return -ENODEV;
+ if (!resp_len || !command || !response)
+ return -EINVAL;
+
+ /* Prepare iov iterator */
+ rbytes = (*resp_len) * sizeof(__be32);
+ resp_iov.iov_base = response;
+ resp_iov.iov_len = rbytes;
+ iov_iter_kvec(&resp_iter, WRITE | ITER_KVEC, &resp_iov, 1, rbytes);
+
+ /* Perform the command */
+ mutex_lock(&sbefifo->lock);
+ rc = __sbefifo_submit(sbefifo, command, cmd_len, &resp_iter);
+ mutex_unlock(&sbefifo->lock);
+
+ /* Extract the response length */
+ rbytes -= iov_iter_count(&resp_iter);
+ *resp_len = rbytes / sizeof(__be32);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sbefifo_submit);
+
+/*
+ * Char device interface
+ */
+
+static void sbefifo_release_command(struct sbefifo_user *user)
+{
+ if (is_vmalloc_addr(user->pending_cmd))
+ vfree(user->pending_cmd);
+ user->pending_cmd = NULL;
+ user->pending_len = 0;
+}
+
+static int sbefifo_user_open(struct inode *inode, struct file *file)
+{
+ struct sbefifo *sbefifo = container_of(inode->i_cdev, struct sbefifo, cdev);
+ struct sbefifo_user *user;
+
+ user = kzalloc(sizeof(struct sbefifo_user), GFP_KERNEL);
+ if (!user)
+ return -ENOMEM;
+
+ file->private_data = user;
+ user->sbefifo = sbefifo;
+ user->cmd_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!user->cmd_page) {
+ kfree(user);
+ return -ENOMEM;
+ }
+ mutex_init(&user->file_lock);
+
+ return 0;
+}
+
+static ssize_t sbefifo_user_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ struct sbefifo_user *user = file->private_data;
+ struct sbefifo *sbefifo;
+ struct iov_iter resp_iter;
+ struct iovec resp_iov;
+ size_t cmd_len;
+ int rc;
+
+ if (!user)
+ return -EINVAL;
+ sbefifo = user->sbefifo;
+ if (len & 3)
+ return -EINVAL;
+
+ mutex_lock(&user->file_lock);
+
+ /* Cronus relies on -EAGAIN after a short read */
+ if (user->pending_len == 0) {
+ rc = -EAGAIN;
+ goto bail;
+ }
+ if (user->pending_len < 8) {
+ rc = -EINVAL;
+ goto bail;
+ }
+ cmd_len = user->pending_len >> 2;
+
+ /* Prepare iov iterator */
+ resp_iov.iov_base = buf;
+ resp_iov.iov_len = len;
+ iov_iter_init(&resp_iter, WRITE, &resp_iov, 1, len);
+
+ /* Perform the command */
+ mutex_lock(&sbefifo->lock);
+ rc = __sbefifo_submit(sbefifo, user->pending_cmd, cmd_len, &resp_iter);
+ mutex_unlock(&sbefifo->lock);
+ if (rc < 0)
+ goto bail;
+
+ /* Extract the response length */
+ rc = len - iov_iter_count(&resp_iter);
+ bail:
+ sbefifo_release_command(user);
+ mutex_unlock(&user->file_lock);
+ return rc;
+}
+
+static ssize_t sbefifo_user_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *offset)
+{
+ struct sbefifo_user *user = file->private_data;
+ struct sbefifo *sbefifo;
+ int rc = len;
+
+ if (!user)
+ return -EINVAL;
+ sbefifo = user->sbefifo;
+ if (len > SBEFIFO_MAX_USER_CMD_LEN)
+ return -EINVAL;
+ if (len & 3)
+ return -EINVAL;
+
+ mutex_lock(&user->file_lock);
+
+ /* Can we use the pre-allocate buffer ? If not, allocate */
+ if (len <= PAGE_SIZE)
+ user->pending_cmd = user->cmd_page;
+ else
+ user->pending_cmd = vmalloc(len);
+ if (!user->pending_cmd) {
+ rc = -ENOMEM;
+ goto bail;
+ }
+
+ /* Copy the command into the staging buffer */
+ if (copy_from_user(user->pending_cmd, buf, len)) {
+ rc = -EFAULT;
+ goto bail;
+ }
+
+ /* Check for the magic reset command */
+ if (len == 4 && be32_to_cpu(*(__be32 *)user->pending_cmd) ==
+ SBEFIFO_RESET_MAGIC) {
+
+ /* Clear out any pending command */
+ user->pending_len = 0;
+
+ /* Trigger reset request */
+ mutex_lock(&sbefifo->lock);
+ rc = sbefifo_request_reset(user->sbefifo);
+ mutex_unlock(&sbefifo->lock);
+ if (rc == 0)
+ rc = 4;
+ goto bail;
+ }
+
+ /* Update the staging buffer size */
+ user->pending_len = len;
+ bail:
+ if (!user->pending_len)
+ sbefifo_release_command(user);
+
+ mutex_unlock(&user->file_lock);
+
+ /* And that's it, we'll issue the command on a read */
+ return rc;
+}
+
+static int sbefifo_user_release(struct inode *inode, struct file *file)
+{
+ struct sbefifo_user *user = file->private_data;
+
+ if (!user)
+ return -EINVAL;
+
+ sbefifo_release_command(user);
+ free_page((unsigned long)user->cmd_page);
+ kfree(user);
+
+ return 0;
+}
+
+static const struct file_operations sbefifo_fops = {
+ .owner = THIS_MODULE,
+ .open = sbefifo_user_open,
+ .read = sbefifo_user_read,
+ .write = sbefifo_user_write,
+ .release = sbefifo_user_release,
+};
+
+static void sbefifo_free(struct device *dev)
+{
+ struct sbefifo *sbefifo = container_of(dev, struct sbefifo, dev);
+
+ put_device(&sbefifo->fsi_dev->dev);
+ kfree(sbefifo);
+}
+
+/*
+ * Probe/remove
+ */
+
+static int sbefifo_probe(struct device *dev)
+{
+ struct fsi_device *fsi_dev = to_fsi_dev(dev);
+ struct sbefifo *sbefifo;
+ struct device_node *np;
+ struct platform_device *child;
+ char child_name[32];
+ int rc, didx, child_idx = 0;
+
+ dev_dbg(dev, "Found sbefifo device\n");
+
+ sbefifo = kzalloc(sizeof(*sbefifo), GFP_KERNEL);
+ if (!sbefifo)
+ return -ENOMEM;
+
+ /* Grab a reference to the device (parent of our cdev), we'll drop it later */
+ if (!get_device(dev)) {
+ kfree(sbefifo);
+ return -ENODEV;
+ }
+
+ sbefifo->magic = SBEFIFO_MAGIC;
+ sbefifo->fsi_dev = fsi_dev;
+ dev_set_drvdata(dev, sbefifo);
+ mutex_init(&sbefifo->lock);
+
+ /*
+ * Try cleaning up the FIFO. If this fails, we still register the
+ * driver and will try cleaning things up again on the next access.
+ */
+ rc = sbefifo_cleanup_hw(sbefifo);
+ if (rc && rc != -ESHUTDOWN)
+ dev_err(dev, "Initial HW cleanup failed, will retry later\n");
+
+ /* Create chardev for userspace access */
+ sbefifo->dev.type = &fsi_cdev_type;
+ sbefifo->dev.parent = dev;
+ sbefifo->dev.release = sbefifo_free;
+ device_initialize(&sbefifo->dev);
+
+ /* Allocate a minor in the FSI space */
+ rc = fsi_get_new_minor(fsi_dev, fsi_dev_sbefifo, &sbefifo->dev.devt, &didx);
+ if (rc)
+ goto err;
+
+ dev_set_name(&sbefifo->dev, "sbefifo%d", didx);
+ cdev_init(&sbefifo->cdev, &sbefifo_fops);
+ rc = cdev_device_add(&sbefifo->cdev, &sbefifo->dev);
+ if (rc) {
+ dev_err(dev, "Error %d creating char device %s\n",
+ rc, dev_name(&sbefifo->dev));
+ goto err_free_minor;
+ }
+
+ /* Create platform devs for dts child nodes (occ, etc) */
+ for_each_available_child_of_node(dev->of_node, np) {
+ snprintf(child_name, sizeof(child_name), "%s-dev%d",
+ dev_name(&sbefifo->dev), child_idx++);
+ child = of_platform_device_create(np, child_name, dev);
+ if (!child)
+ dev_warn(dev, "failed to create child %s dev\n",
+ child_name);
+ }
+
+ return 0;
+ err_free_minor:
+ fsi_free_minor(sbefifo->dev.devt);
+ err:
+ put_device(&sbefifo->dev);
+ return rc;
+}
+
+static int sbefifo_unregister_child(struct device *dev, void *data)
+{
+ struct platform_device *child = to_platform_device(dev);
+
+ of_device_unregister(child);
+ if (dev->of_node)
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
+
+ return 0;
+}
+
+static int sbefifo_remove(struct device *dev)
+{
+ struct sbefifo *sbefifo = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "Removing sbefifo device...\n");
+
+ mutex_lock(&sbefifo->lock);
+ sbefifo->dead = true;
+ mutex_unlock(&sbefifo->lock);
+
+ cdev_device_del(&sbefifo->cdev, &sbefifo->dev);
+ fsi_free_minor(sbefifo->dev.devt);
+ device_for_each_child(dev, NULL, sbefifo_unregister_child);
+ put_device(&sbefifo->dev);
+
+ return 0;
+}
+
+static struct fsi_device_id sbefifo_ids[] = {
+ {
+ .engine_type = FSI_ENGID_SBE,
+ .version = FSI_VERSION_ANY,
+ },
+ { 0 }
+};
+
+static struct fsi_driver sbefifo_drv = {
+ .id_table = sbefifo_ids,
+ .drv = {
+ .name = DEVICE_NAME,
+ .bus = &fsi_bus_type,
+ .probe = sbefifo_probe,
+ .remove = sbefifo_remove,
+ }
+};
+
+static int sbefifo_init(void)
+{
+ return fsi_driver_register(&sbefifo_drv);
+}
+
+static void sbefifo_exit(void)
+{
+ fsi_driver_unregister(&sbefifo_drv);
+}
+
+module_init(sbefifo_init);
+module_exit(sbefifo_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Brad Bishop <bradleyb@fuzziesquirrel.com>");
+MODULE_AUTHOR("Eddie James <eajames@linux.vnet.ibm.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("Linux device interface to the POWER Self Boot Engine");
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index e13353a2fd7c..df94021dd9d1 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -20,42 +20,73 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
-#include <linux/miscdevice.h>
+#include <linux/cdev.h>
#include <linux/list.h>
-#include <linux/idr.h>
-#define FSI_ENGID_SCOM 0x5
+#include <uapi/linux/fsi.h>
-#define SCOM_FSI2PIB_DELAY 50
+#define FSI_ENGID_SCOM 0x5
/* SCOM engine register set */
#define SCOM_DATA0_REG 0x00
#define SCOM_DATA1_REG 0x04
#define SCOM_CMD_REG 0x08
-#define SCOM_RESET_REG 0x1C
+#define SCOM_FSI2PIB_RESET_REG 0x18
+#define SCOM_STATUS_REG 0x1C /* Read */
+#define SCOM_PIB_RESET_REG 0x1C /* Write */
-#define SCOM_RESET_CMD 0x80000000
+/* Command register */
#define SCOM_WRITE_CMD 0x80000000
+#define SCOM_READ_CMD 0x00000000
+
+/* Status register bits */
+#define SCOM_STATUS_ERR_SUMMARY 0x80000000
+#define SCOM_STATUS_PROTECTION 0x01000000
+#define SCOM_STATUS_PARITY 0x04000000
+#define SCOM_STATUS_PIB_ABORT 0x00100000
+#define SCOM_STATUS_PIB_RESP_MASK 0x00007000
+#define SCOM_STATUS_PIB_RESP_SHIFT 12
+
+#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_ERR_SUMMARY | \
+ SCOM_STATUS_PROTECTION | \
+ SCOM_STATUS_PARITY | \
+ SCOM_STATUS_PIB_ABORT | \
+ SCOM_STATUS_PIB_RESP_MASK)
+/* SCOM address encodings */
+#define XSCOM_ADDR_IND_FLAG BIT_ULL(63)
+#define XSCOM_ADDR_INF_FORM1 BIT_ULL(60)
+
+/* SCOM indirect stuff */
+#define XSCOM_ADDR_DIRECT_PART 0x7fffffffull
+#define XSCOM_ADDR_INDIRECT_PART 0x000fffff00000000ull
+#define XSCOM_DATA_IND_READ BIT_ULL(63)
+#define XSCOM_DATA_IND_COMPLETE BIT_ULL(31)
+#define XSCOM_DATA_IND_ERR_MASK 0x70000000ull
+#define XSCOM_DATA_IND_ERR_SHIFT 28
+#define XSCOM_DATA_IND_DATA 0x0000ffffull
+#define XSCOM_DATA_IND_FORM1_DATA 0x000fffffffffffffull
+#define XSCOM_ADDR_FORM1_LOW 0x000ffffffffull
+#define XSCOM_ADDR_FORM1_HI 0xfff00000000ull
+#define XSCOM_ADDR_FORM1_HI_SHIFT 20
+
+/* Retries */
+#define SCOM_MAX_RETRIES 100 /* Retries on busy */
+#define SCOM_MAX_IND_RETRIES 10 /* Retries indirect not ready */
struct scom_device {
struct list_head link;
struct fsi_device *fsi_dev;
- struct miscdevice mdev;
- char name[32];
- int idx;
+ struct device dev;
+ struct cdev cdev;
+ struct mutex lock;
+ bool dead;
};
-#define to_scom_dev(x) container_of((x), struct scom_device, mdev)
-
-static struct list_head scom_devices;
-
-static DEFINE_IDA(scom_ida);
-
-static int put_scom(struct scom_device *scom_dev, uint64_t value,
- uint32_t addr)
+static int __put_scom(struct scom_device *scom_dev, uint64_t value,
+ uint32_t addr, uint32_t *status)
{
+ __be32 data, raw_status;
int rc;
- uint32_t data;
data = cpu_to_be32((value >> 32) & 0xffffffff);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
@@ -70,53 +101,286 @@ static int put_scom(struct scom_device *scom_dev, uint64_t value,
return rc;
data = cpu_to_be32(SCOM_WRITE_CMD | addr);
- return fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
+ rc = fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
sizeof(uint32_t));
+ if (rc)
+ return rc;
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_STATUS_REG, &raw_status,
+ sizeof(uint32_t));
+ if (rc)
+ return rc;
+ *status = be32_to_cpu(raw_status);
+
+ return 0;
}
-static int get_scom(struct scom_device *scom_dev, uint64_t *value,
- uint32_t addr)
+static int __get_scom(struct scom_device *scom_dev, uint64_t *value,
+ uint32_t addr, uint32_t *status)
{
- uint32_t result, data;
+ __be32 data, raw_status;
int rc;
+
*value = 0ULL;
- data = cpu_to_be32(addr);
+ data = cpu_to_be32(SCOM_READ_CMD | addr);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_STATUS_REG, &raw_status,
+ sizeof(uint32_t));
+ if (rc)
+ return rc;
- rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA0_REG, &result,
+ /*
+ * Read the data registers even on error, so we don't have
+ * to interpret the status register here.
+ */
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
-
- *value |= (uint64_t)cpu_to_be32(result) << 32;
- rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA1_REG, &result,
+ *value |= (uint64_t)be32_to_cpu(data) << 32;
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA1_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
+ *value |= be32_to_cpu(data);
+ *status = be32_to_cpu(raw_status);
- *value |= cpu_to_be32(result);
+ return rc;
+}
+
+static int put_indirect_scom_form0(struct scom_device *scom, uint64_t value,
+ uint64_t addr, uint32_t *status)
+{
+ uint64_t ind_data, ind_addr;
+ int rc, retries, err = 0;
+
+ if (value & ~XSCOM_DATA_IND_DATA)
+ return -EINVAL;
+
+ ind_addr = addr & XSCOM_ADDR_DIRECT_PART;
+ ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | value;
+ rc = __put_scom(scom, ind_data, ind_addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
+
+ for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) {
+ rc = __get_scom(scom, &ind_data, addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
+
+ err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
+ *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
+ if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED))
+ return 0;
+
+ msleep(1);
+ }
+ return rc;
+}
+
+static int put_indirect_scom_form1(struct scom_device *scom, uint64_t value,
+ uint64_t addr, uint32_t *status)
+{
+ uint64_t ind_data, ind_addr;
+
+ if (value & ~XSCOM_DATA_IND_FORM1_DATA)
+ return -EINVAL;
+ ind_addr = addr & XSCOM_ADDR_FORM1_LOW;
+ ind_data = value | (addr & XSCOM_ADDR_FORM1_HI) << XSCOM_ADDR_FORM1_HI_SHIFT;
+ return __put_scom(scom, ind_data, ind_addr, status);
+}
+
+static int get_indirect_scom_form0(struct scom_device *scom, uint64_t *value,
+ uint64_t addr, uint32_t *status)
+{
+ uint64_t ind_data, ind_addr;
+ int rc, retries, err = 0;
+
+ ind_addr = addr & XSCOM_ADDR_DIRECT_PART;
+ ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | XSCOM_DATA_IND_READ;
+ rc = __put_scom(scom, ind_data, ind_addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
+
+ for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) {
+ rc = __get_scom(scom, &ind_data, addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
+
+ err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
+ *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
+ *value = ind_data & XSCOM_DATA_IND_DATA;
+
+ if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED))
+ return 0;
+
+ msleep(1);
+ }
+ return rc;
+}
+
+static int raw_put_scom(struct scom_device *scom, uint64_t value,
+ uint64_t addr, uint32_t *status)
+{
+ if (addr & XSCOM_ADDR_IND_FLAG) {
+ if (addr & XSCOM_ADDR_INF_FORM1)
+ return put_indirect_scom_form1(scom, value, addr, status);
+ else
+ return put_indirect_scom_form0(scom, value, addr, status);
+ } else
+ return __put_scom(scom, value, addr, status);
+}
+
+static int raw_get_scom(struct scom_device *scom, uint64_t *value,
+ uint64_t addr, uint32_t *status)
+{
+ if (addr & XSCOM_ADDR_IND_FLAG) {
+ if (addr & XSCOM_ADDR_INF_FORM1)
+ return -ENXIO;
+ return get_indirect_scom_form0(scom, value, addr, status);
+ } else
+ return __get_scom(scom, value, addr, status);
+}
+
+static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
+{
+ uint32_t dummy = -1;
+
+ if (status & SCOM_STATUS_PROTECTION)
+ return -EPERM;
+ if (status & SCOM_STATUS_PARITY) {
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+ return -EIO;
+ }
+ /* Return -EBUSY on PIB abort to force a retry */
+ if (status & SCOM_STATUS_PIB_ABORT)
+ return -EBUSY;
+ if (status & SCOM_STATUS_ERR_SUMMARY) {
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+ return -EIO;
+ }
return 0;
}
+static int handle_pib_status(struct scom_device *scom, uint8_t status)
+{
+ uint32_t dummy = -1;
+
+ if (status == SCOM_PIB_SUCCESS)
+ return 0;
+ if (status == SCOM_PIB_BLOCKED)
+ return -EBUSY;
+
+ /* Reset the bridge */
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+
+ switch(status) {
+ case SCOM_PIB_OFFLINE:
+ return -ENODEV;
+ case SCOM_PIB_BAD_ADDR:
+ return -ENXIO;
+ case SCOM_PIB_TIMEOUT:
+ return -ETIMEDOUT;
+ case SCOM_PIB_PARTIAL:
+ case SCOM_PIB_CLK_ERR:
+ case SCOM_PIB_PARITY_ERR:
+ default:
+ return -EIO;
+ }
+}
+
+static int put_scom(struct scom_device *scom, uint64_t value,
+ uint64_t addr)
+{
+ uint32_t status, dummy = -1;
+ int rc, retries;
+
+ for (retries = 0; retries < SCOM_MAX_RETRIES; retries++) {
+ rc = raw_put_scom(scom, value, addr, &status);
+ if (rc) {
+ /* Try resetting the bridge if FSI fails */
+ if (rc != -ENODEV && retries == 0) {
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG,
+ &dummy, sizeof(uint32_t));
+ rc = -EBUSY;
+ } else
+ return rc;
+ } else
+ rc = handle_fsi2pib_status(scom, status);
+ if (rc && rc != -EBUSY)
+ break;
+ if (rc == 0) {
+ rc = handle_pib_status(scom,
+ (status & SCOM_STATUS_PIB_RESP_MASK)
+ >> SCOM_STATUS_PIB_RESP_SHIFT);
+ if (rc && rc != -EBUSY)
+ break;
+ }
+ if (rc == 0)
+ break;
+ msleep(1);
+ }
+ return rc;
+}
+
+static int get_scom(struct scom_device *scom, uint64_t *value,
+ uint64_t addr)
+{
+ uint32_t status, dummy = -1;
+ int rc, retries;
+
+ for (retries = 0; retries < SCOM_MAX_RETRIES; retries++) {
+ rc = raw_get_scom(scom, value, addr, &status);
+ if (rc) {
+ /* Try resetting the bridge if FSI fails */
+ if (rc != -ENODEV && retries == 0) {
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG,
+ &dummy, sizeof(uint32_t));
+ rc = -EBUSY;
+ } else
+ return rc;
+ } else
+ rc = handle_fsi2pib_status(scom, status);
+ if (rc && rc != -EBUSY)
+ break;
+ if (rc == 0) {
+ rc = handle_pib_status(scom,
+ (status & SCOM_STATUS_PIB_RESP_MASK)
+ >> SCOM_STATUS_PIB_RESP_SHIFT);
+ if (rc && rc != -EBUSY)
+ break;
+ }
+ if (rc == 0)
+ break;
+ msleep(1);
+ }
+ return rc;
+}
+
static ssize_t scom_read(struct file *filep, char __user *buf, size_t len,
- loff_t *offset)
+ loff_t *offset)
{
- int rc;
- struct miscdevice *mdev =
- (struct miscdevice *)filep->private_data;
- struct scom_device *scom = to_scom_dev(mdev);
+ struct scom_device *scom = filep->private_data;
struct device *dev = &scom->fsi_dev->dev;
uint64_t val;
+ int rc;
if (len != sizeof(uint64_t))
return -EINVAL;
- rc = get_scom(scom, &val, *offset);
+ mutex_lock(&scom->lock);
+ if (scom->dead)
+ rc = -ENODEV;
+ else
+ rc = get_scom(scom, &val, *offset);
+ mutex_unlock(&scom->lock);
if (rc) {
dev_dbg(dev, "get_scom fail:%d\n", rc);
return rc;
@@ -130,11 +394,10 @@ static ssize_t scom_read(struct file *filep, char __user *buf, size_t len,
}
static ssize_t scom_write(struct file *filep, const char __user *buf,
- size_t len, loff_t *offset)
+ size_t len, loff_t *offset)
{
int rc;
- struct miscdevice *mdev = filep->private_data;
- struct scom_device *scom = to_scom_dev(mdev);
+ struct scom_device *scom = filep->private_data;
struct device *dev = &scom->fsi_dev->dev;
uint64_t val;
@@ -147,7 +410,12 @@ static ssize_t scom_write(struct file *filep, const char __user *buf,
return -EINVAL;
}
- rc = put_scom(scom, val, *offset);
+ mutex_lock(&scom->lock);
+ if (scom->dead)
+ rc = -ENODEV;
+ else
+ rc = put_scom(scom, val, *offset);
+ mutex_unlock(&scom->lock);
if (rc) {
dev_dbg(dev, "put_scom failed with:%d\n", rc);
return rc;
@@ -171,50 +439,205 @@ static loff_t scom_llseek(struct file *file, loff_t offset, int whence)
return offset;
}
+static void raw_convert_status(struct scom_access *acc, uint32_t status)
+{
+ acc->pib_status = (status & SCOM_STATUS_PIB_RESP_MASK) >>
+ SCOM_STATUS_PIB_RESP_SHIFT;
+ acc->intf_errors = 0;
+
+ if (status & SCOM_STATUS_PROTECTION)
+ acc->intf_errors |= SCOM_INTF_ERR_PROTECTION;
+ else if (status & SCOM_STATUS_PARITY)
+ acc->intf_errors |= SCOM_INTF_ERR_PARITY;
+ else if (status & SCOM_STATUS_PIB_ABORT)
+ acc->intf_errors |= SCOM_INTF_ERR_ABORT;
+ else if (status & SCOM_STATUS_ERR_SUMMARY)
+ acc->intf_errors |= SCOM_INTF_ERR_UNKNOWN;
+}
+
+static int scom_raw_read(struct scom_device *scom, void __user *argp)
+{
+ struct scom_access acc;
+ uint32_t status;
+ int rc;
+
+ if (copy_from_user(&acc, argp, sizeof(struct scom_access)))
+ return -EFAULT;
+
+ rc = raw_get_scom(scom, &acc.data, acc.addr, &status);
+ if (rc)
+ return rc;
+ raw_convert_status(&acc, status);
+ if (copy_to_user(argp, &acc, sizeof(struct scom_access)))
+ return -EFAULT;
+ return 0;
+}
+
+static int scom_raw_write(struct scom_device *scom, void __user *argp)
+{
+ u64 prev_data, mask, data;
+ struct scom_access acc;
+ uint32_t status;
+ int rc;
+
+ if (copy_from_user(&acc, argp, sizeof(struct scom_access)))
+ return -EFAULT;
+
+ if (acc.mask) {
+ rc = raw_get_scom(scom, &prev_data, acc.addr, &status);
+ if (rc)
+ return rc;
+ if (status & SCOM_STATUS_ANY_ERR)
+ goto fail;
+ mask = acc.mask;
+ } else {
+ prev_data = mask = -1ull;
+ }
+ data = (prev_data & ~mask) | (acc.data & mask);
+ rc = raw_put_scom(scom, data, acc.addr, &status);
+ if (rc)
+ return rc;
+ fail:
+ raw_convert_status(&acc, status);
+ if (copy_to_user(argp, &acc, sizeof(struct scom_access)))
+ return -EFAULT;
+ return 0;
+}
+
+static int scom_reset(struct scom_device *scom, void __user *argp)
+{
+ uint32_t flags, dummy = -1;
+ int rc = 0;
+
+ if (get_user(flags, (__u32 __user *)argp))
+ return -EFAULT;
+ if (flags & SCOM_RESET_PIB)
+ rc = fsi_device_write(scom->fsi_dev, SCOM_PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+ if (!rc && (flags & (SCOM_RESET_PIB | SCOM_RESET_INTF)))
+ rc = fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+ return rc;
+}
+
+static int scom_check(struct scom_device *scom, void __user *argp)
+{
+ /* Still need to find out how to get "protected" */
+ return put_user(SCOM_CHECK_SUPPORTED, (__u32 __user *)argp);
+}
+
+static long scom_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct scom_device *scom = file->private_data;
+ void __user *argp = (void __user *)arg;
+ int rc = -ENOTTY;
+
+ mutex_lock(&scom->lock);
+ if (scom->dead) {
+ mutex_unlock(&scom->lock);
+ return -ENODEV;
+ }
+ switch(cmd) {
+ case FSI_SCOM_CHECK:
+ rc = scom_check(scom, argp);
+ break;
+ case FSI_SCOM_READ:
+ rc = scom_raw_read(scom, argp);
+ break;
+ case FSI_SCOM_WRITE:
+ rc = scom_raw_write(scom, argp);
+ break;
+ case FSI_SCOM_RESET:
+ rc = scom_reset(scom, argp);
+ break;
+ }
+ mutex_unlock(&scom->lock);
+ return rc;
+}
+
+static int scom_open(struct inode *inode, struct file *file)
+{
+ struct scom_device *scom = container_of(inode->i_cdev, struct scom_device, cdev);
+
+ file->private_data = scom;
+
+ return 0;
+}
+
static const struct file_operations scom_fops = {
- .owner = THIS_MODULE,
- .llseek = scom_llseek,
- .read = scom_read,
- .write = scom_write,
+ .owner = THIS_MODULE,
+ .open = scom_open,
+ .llseek = scom_llseek,
+ .read = scom_read,
+ .write = scom_write,
+ .unlocked_ioctl = scom_ioctl,
};
+static void scom_free(struct device *dev)
+{
+ struct scom_device *scom = container_of(dev, struct scom_device, dev);
+
+ put_device(&scom->fsi_dev->dev);
+ kfree(scom);
+}
+
static int scom_probe(struct device *dev)
{
- uint32_t data;
struct fsi_device *fsi_dev = to_fsi_dev(dev);
struct scom_device *scom;
+ int rc, didx;
- scom = devm_kzalloc(dev, sizeof(*scom), GFP_KERNEL);
+ scom = kzalloc(sizeof(*scom), GFP_KERNEL);
if (!scom)
return -ENOMEM;
+ dev_set_drvdata(dev, scom);
+ mutex_init(&scom->lock);
- scom->idx = ida_simple_get(&scom_ida, 1, INT_MAX, GFP_KERNEL);
- snprintf(scom->name, sizeof(scom->name), "scom%d", scom->idx);
+ /* Grab a reference to the device (parent of our cdev), we'll drop it later */
+ if (!get_device(dev)) {
+ kfree(scom);
+ return -ENODEV;
+ }
scom->fsi_dev = fsi_dev;
- scom->mdev.minor = MISC_DYNAMIC_MINOR;
- scom->mdev.fops = &scom_fops;
- scom->mdev.name = scom->name;
- scom->mdev.parent = dev;
- list_add(&scom->link, &scom_devices);
- data = cpu_to_be32(SCOM_RESET_CMD);
- fsi_device_write(fsi_dev, SCOM_RESET_REG, &data, sizeof(uint32_t));
+ /* Create chardev for userspace access */
+ scom->dev.type = &fsi_cdev_type;
+ scom->dev.parent = dev;
+ scom->dev.release = scom_free;
+ device_initialize(&scom->dev);
+
+ /* Allocate a minor in the FSI space */
+ rc = fsi_get_new_minor(fsi_dev, fsi_dev_scom, &scom->dev.devt, &didx);
+ if (rc)
+ goto err;
+
+ dev_set_name(&scom->dev, "scom%d", didx);
+ cdev_init(&scom->cdev, &scom_fops);
+ rc = cdev_device_add(&scom->cdev, &scom->dev);
+ if (rc) {
+ dev_err(dev, "Error %d creating char device %s\n",
+ rc, dev_name(&scom->dev));
+ goto err_free_minor;
+ }
- return misc_register(&scom->mdev);
+ return 0;
+ err_free_minor:
+ fsi_free_minor(scom->dev.devt);
+ err:
+ put_device(&scom->dev);
+ return rc;
}
static int scom_remove(struct device *dev)
{
- struct scom_device *scom, *scom_tmp;
- struct fsi_device *fsi_dev = to_fsi_dev(dev);
+ struct scom_device *scom = dev_get_drvdata(dev);
- list_for_each_entry_safe(scom, scom_tmp, &scom_devices, link) {
- if (scom->fsi_dev == fsi_dev) {
- list_del(&scom->link);
- ida_simple_remove(&scom_ida, scom->idx);
- misc_deregister(&scom->mdev);
- }
- }
+ mutex_lock(&scom->lock);
+ scom->dead = true;
+ mutex_unlock(&scom->lock);
+ cdev_device_del(&scom->cdev, &scom->dev);
+ fsi_free_minor(scom->dev.devt);
+ put_device(&scom->dev);
return 0;
}
@@ -239,20 +662,11 @@ static struct fsi_driver scom_drv = {
static int scom_init(void)
{
- INIT_LIST_HEAD(&scom_devices);
return fsi_driver_register(&scom_drv);
}
static void scom_exit(void)
{
- struct list_head *pos;
- struct scom_device *scom;
-
- list_for_each(pos, &scom_devices) {
- scom = list_entry(pos, struct scom_device, link);
- misc_deregister(&scom->mdev);
- devm_kfree(&scom->fsi_dev->dev, scom);
- }
fsi_driver_unregister(&scom_drv);
}
diff --git a/drivers/gnss/Kconfig b/drivers/gnss/Kconfig
new file mode 100644
index 000000000000..6abc88514512
--- /dev/null
+++ b/drivers/gnss/Kconfig
@@ -0,0 +1,43 @@
+#
+# GNSS receiver configuration
+#
+
+menuconfig GNSS
+ tristate "GNSS receiver support"
+ ---help---
+ Say Y here if you have a GNSS receiver (e.g. a GPS receiver).
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss.
+
+if GNSS
+
+config GNSS_SERIAL
+ tristate
+
+config GNSS_SIRF_SERIAL
+ tristate "SiRFstar GNSS receiver support"
+ depends on SERIAL_DEV_BUS
+ ---help---
+ Say Y here if you have a SiRFstar-based GNSS receiver which uses a
+ serial interface.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss-sirf.
+
+ If unsure, say N.
+
+config GNSS_UBX_SERIAL
+ tristate "u-blox GNSS receiver support"
+ depends on SERIAL_DEV_BUS
+ select GNSS_SERIAL
+ ---help---
+ Say Y here if you have a u-blox GNSS receiver which uses a serial
+ interface.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss-ubx.
+
+ If unsure, say N.
+
+endif # GNSS
diff --git a/drivers/gnss/Makefile b/drivers/gnss/Makefile
new file mode 100644
index 000000000000..5cf0ebe0330a
--- /dev/null
+++ b/drivers/gnss/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the GNSS subsystem.
+#
+
+obj-$(CONFIG_GNSS) += gnss.o
+gnss-y := core.o
+
+obj-$(CONFIG_GNSS_SERIAL) += gnss-serial.o
+gnss-serial-y := serial.o
+
+obj-$(CONFIG_GNSS_SIRF_SERIAL) += gnss-sirf.o
+gnss-sirf-y := sirf.o
+
+obj-$(CONFIG_GNSS_UBX_SERIAL) += gnss-ubx.o
+gnss-ubx-y := ubx.o
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
new file mode 100644
index 000000000000..4291a0dd22aa
--- /dev/null
+++ b/drivers/gnss/core.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GNSS receiver core
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cdev.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/gnss.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#define GNSS_FLAG_HAS_WRITE_RAW BIT(0)
+
+#define GNSS_MINORS 16
+
+static DEFINE_IDA(gnss_minors);
+static dev_t gnss_first;
+
+/* FIFO size must be a power of two */
+#define GNSS_READ_FIFO_SIZE 4096
+#define GNSS_WRITE_BUF_SIZE 1024
+
+#define to_gnss_device(d) container_of((d), struct gnss_device, dev)
+
+static int gnss_open(struct inode *inode, struct file *file)
+{
+ struct gnss_device *gdev;
+ int ret = 0;
+
+ gdev = container_of(inode->i_cdev, struct gnss_device, cdev);
+
+ get_device(&gdev->dev);
+
+ nonseekable_open(inode, file);
+ file->private_data = gdev;
+
+ down_write(&gdev->rwsem);
+ if (gdev->disconnected) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ if (gdev->count++ == 0) {
+ ret = gdev->ops->open(gdev);
+ if (ret)
+ gdev->count--;
+ }
+unlock:
+ up_write(&gdev->rwsem);
+
+ if (ret)
+ put_device(&gdev->dev);
+
+ return ret;
+}
+
+static int gnss_release(struct inode *inode, struct file *file)
+{
+ struct gnss_device *gdev = file->private_data;
+
+ down_write(&gdev->rwsem);
+ if (gdev->disconnected)
+ goto unlock;
+
+ if (--gdev->count == 0) {
+ gdev->ops->close(gdev);
+ kfifo_reset(&gdev->read_fifo);
+ }
+unlock:
+ up_write(&gdev->rwsem);
+
+ put_device(&gdev->dev);
+
+ return 0;
+}
+
+static ssize_t gnss_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct gnss_device *gdev = file->private_data;
+ unsigned int copied;
+ int ret;
+
+ mutex_lock(&gdev->read_mutex);
+ while (kfifo_is_empty(&gdev->read_fifo)) {
+ mutex_unlock(&gdev->read_mutex);
+
+ if (gdev->disconnected)
+ return 0;
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(gdev->read_queue,
+ gdev->disconnected ||
+ !kfifo_is_empty(&gdev->read_fifo));
+ if (ret)
+ return -ERESTARTSYS;
+
+ mutex_lock(&gdev->read_mutex);
+ }
+
+ ret = kfifo_to_user(&gdev->read_fifo, buf, count, &copied);
+ if (ret == 0)
+ ret = copied;
+
+ mutex_unlock(&gdev->read_mutex);
+
+ return ret;
+}
+
+static ssize_t gnss_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct gnss_device *gdev = file->private_data;
+ size_t written = 0;
+ int ret;
+
+ if (gdev->disconnected)
+ return -EIO;
+
+ if (!count)
+ return 0;
+
+ if (!(gdev->flags & GNSS_FLAG_HAS_WRITE_RAW))
+ return -EIO;
+
+ /* Ignoring O_NONBLOCK, write_raw() is synchronous. */
+
+ ret = mutex_lock_interruptible(&gdev->write_mutex);
+ if (ret)
+ return -ERESTARTSYS;
+
+ for (;;) {
+ size_t n = count - written;
+
+ if (n > GNSS_WRITE_BUF_SIZE)
+ n = GNSS_WRITE_BUF_SIZE;
+
+ if (copy_from_user(gdev->write_buf, buf, n)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ /*
+ * Assumes write_raw can always accept GNSS_WRITE_BUF_SIZE
+ * bytes.
+ *
+ * FIXME: revisit
+ */
+ down_read(&gdev->rwsem);
+ if (!gdev->disconnected)
+ ret = gdev->ops->write_raw(gdev, gdev->write_buf, n);
+ else
+ ret = -EIO;
+ up_read(&gdev->rwsem);
+
+ if (ret < 0)
+ break;
+
+ written += ret;
+ buf += ret;
+
+ if (written == count)
+ break;
+ }
+
+ if (written)
+ ret = written;
+out_unlock:
+ mutex_unlock(&gdev->write_mutex);
+
+ return ret;
+}
+
+static __poll_t gnss_poll(struct file *file, poll_table *wait)
+{
+ struct gnss_device *gdev = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &gdev->read_queue, wait);
+
+ if (!kfifo_is_empty(&gdev->read_fifo))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (gdev->disconnected)
+ mask |= EPOLLHUP;
+
+ return mask;
+}
+
+static const struct file_operations gnss_fops = {
+ .owner = THIS_MODULE,
+ .open = gnss_open,
+ .release = gnss_release,
+ .read = gnss_read,
+ .write = gnss_write,
+ .poll = gnss_poll,
+ .llseek = no_llseek,
+};
+
+static struct class *gnss_class;
+
+static void gnss_device_release(struct device *dev)
+{
+ struct gnss_device *gdev = to_gnss_device(dev);
+
+ kfree(gdev->write_buf);
+ kfifo_free(&gdev->read_fifo);
+ ida_simple_remove(&gnss_minors, gdev->id);
+ kfree(gdev);
+}
+
+struct gnss_device *gnss_allocate_device(struct device *parent)
+{
+ struct gnss_device *gdev;
+ struct device *dev;
+ int id;
+ int ret;
+
+ gdev = kzalloc(sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
+ return NULL;
+
+ id = ida_simple_get(&gnss_minors, 0, GNSS_MINORS, GFP_KERNEL);
+ if (id < 0) {
+ kfree(gdev);
+ return NULL;
+ }
+
+ gdev->id = id;
+
+ dev = &gdev->dev;
+ device_initialize(dev);
+ dev->devt = gnss_first + id;
+ dev->class = gnss_class;
+ dev->parent = parent;
+ dev->release = gnss_device_release;
+ dev_set_drvdata(dev, gdev);
+ dev_set_name(dev, "gnss%d", id);
+
+ init_rwsem(&gdev->rwsem);
+ mutex_init(&gdev->read_mutex);
+ mutex_init(&gdev->write_mutex);
+ init_waitqueue_head(&gdev->read_queue);
+
+ ret = kfifo_alloc(&gdev->read_fifo, GNSS_READ_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ goto err_put_device;
+
+ gdev->write_buf = kzalloc(GNSS_WRITE_BUF_SIZE, GFP_KERNEL);
+ if (!gdev->write_buf)
+ goto err_put_device;
+
+ cdev_init(&gdev->cdev, &gnss_fops);
+ gdev->cdev.owner = THIS_MODULE;
+
+ return gdev;
+
+err_put_device:
+ put_device(dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gnss_allocate_device);
+
+void gnss_put_device(struct gnss_device *gdev)
+{
+ put_device(&gdev->dev);
+}
+EXPORT_SYMBOL_GPL(gnss_put_device);
+
+int gnss_register_device(struct gnss_device *gdev)
+{
+ int ret;
+
+ /* Set a flag which can be accessed without holding the rwsem. */
+ if (gdev->ops->write_raw != NULL)
+ gdev->flags |= GNSS_FLAG_HAS_WRITE_RAW;
+
+ ret = cdev_device_add(&gdev->cdev, &gdev->dev);
+ if (ret) {
+ dev_err(&gdev->dev, "failed to add device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnss_register_device);
+
+void gnss_deregister_device(struct gnss_device *gdev)
+{
+ down_write(&gdev->rwsem);
+ gdev->disconnected = true;
+ if (gdev->count) {
+ wake_up_interruptible(&gdev->read_queue);
+ gdev->ops->close(gdev);
+ }
+ up_write(&gdev->rwsem);
+
+ cdev_device_del(&gdev->cdev, &gdev->dev);
+}
+EXPORT_SYMBOL_GPL(gnss_deregister_device);
+
+/*
+ * Caller guarantees serialisation.
+ *
+ * Must not be called for a closed device.
+ */
+int gnss_insert_raw(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count)
+{
+ int ret;
+
+ ret = kfifo_in(&gdev->read_fifo, buf, count);
+
+ wake_up_interruptible(&gdev->read_queue);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnss_insert_raw);
+
+static const char * const gnss_type_names[GNSS_TYPE_COUNT] = {
+ [GNSS_TYPE_NMEA] = "NMEA",
+ [GNSS_TYPE_SIRF] = "SiRF",
+ [GNSS_TYPE_UBX] = "UBX",
+};
+
+static const char *gnss_type_name(struct gnss_device *gdev)
+{
+ const char *name = NULL;
+
+ if (gdev->type < GNSS_TYPE_COUNT)
+ name = gnss_type_names[gdev->type];
+
+ if (!name)
+ dev_WARN(&gdev->dev, "type name not defined\n");
+
+ return name;
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct gnss_device *gdev = to_gnss_device(dev);
+
+ return sprintf(buf, "%s\n", gnss_type_name(gdev));
+}
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *gnss_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(gnss);
+
+static int gnss_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct gnss_device *gdev = to_gnss_device(dev);
+ int ret;
+
+ ret = add_uevent_var(env, "GNSS_TYPE=%s", gnss_type_name(gdev));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __init gnss_module_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&gnss_first, 0, GNSS_MINORS, "gnss");
+ if (ret < 0) {
+ pr_err("failed to allocate device numbers: %d\n", ret);
+ return ret;
+ }
+
+ gnss_class = class_create(THIS_MODULE, "gnss");
+ if (IS_ERR(gnss_class)) {
+ ret = PTR_ERR(gnss_class);
+ pr_err("failed to create class: %d\n", ret);
+ goto err_unregister_chrdev;
+ }
+
+ gnss_class->dev_groups = gnss_groups;
+ gnss_class->dev_uevent = gnss_uevent;
+
+ pr_info("GNSS driver registered with major %d\n", MAJOR(gnss_first));
+
+ return 0;
+
+err_unregister_chrdev:
+ unregister_chrdev_region(gnss_first, GNSS_MINORS);
+
+ return ret;
+}
+module_init(gnss_module_init);
+
+static void __exit gnss_module_exit(void)
+{
+ class_destroy(gnss_class);
+ unregister_chrdev_region(gnss_first, GNSS_MINORS);
+ ida_destroy(&gnss_minors);
+}
+module_exit(gnss_module_exit);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("GNSS receiver core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c
new file mode 100644
index 000000000000..b01ba4438501
--- /dev/null
+++ b/drivers/gnss/serial.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic serial GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+
+#include "serial.h"
+
+static int gnss_serial_open(struct gnss_device *gdev)
+{
+ struct gnss_serial *gserial = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = gserial->serdev;
+ int ret;
+
+ ret = serdev_device_open(serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, gserial->speed);
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = pm_runtime_get_sync(&serdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&serdev->dev);
+ goto err_close;
+ }
+
+ return 0;
+
+err_close:
+ serdev_device_close(serdev);
+
+ return ret;
+}
+
+static void gnss_serial_close(struct gnss_device *gdev)
+{
+ struct gnss_serial *gserial = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = gserial->serdev;
+
+ serdev_device_close(serdev);
+
+ pm_runtime_put(&serdev->dev);
+}
+
+static int gnss_serial_write_raw(struct gnss_device *gdev,
+ const unsigned char *buf, size_t count)
+{
+ struct gnss_serial *gserial = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = gserial->serdev;
+ int ret;
+
+ /* write is only buffered synchronously */
+ ret = serdev_device_write(serdev, buf, count, 0);
+ if (ret < 0)
+ return ret;
+
+ /* FIXME: determine if interrupted? */
+ serdev_device_wait_until_sent(serdev, 0);
+
+ return count;
+}
+
+static const struct gnss_operations gnss_serial_gnss_ops = {
+ .open = gnss_serial_open,
+ .close = gnss_serial_close,
+ .write_raw = gnss_serial_write_raw,
+};
+
+static int gnss_serial_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t count)
+{
+ struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
+ struct gnss_device *gdev = gserial->gdev;
+
+ return gnss_insert_raw(gdev, buf, count);
+}
+
+static const struct serdev_device_ops gnss_serial_serdev_ops = {
+ .receive_buf = gnss_serial_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int gnss_serial_set_power(struct gnss_serial *gserial,
+ enum gnss_serial_pm_state state)
+{
+ if (!gserial->ops || !gserial->ops->set_power)
+ return 0;
+
+ return gserial->ops->set_power(gserial, state);
+}
+
+/*
+ * FIXME: need to provide subdriver defaults or separate dt parsing from
+ * allocation.
+ */
+static int gnss_serial_parse_dt(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
+ struct device_node *node = serdev->dev.of_node;
+ u32 speed = 4800;
+
+ of_property_read_u32(node, "current-speed", &speed);
+
+ gserial->speed = speed;
+
+ return 0;
+}
+
+struct gnss_serial *gnss_serial_allocate(struct serdev_device *serdev,
+ size_t data_size)
+{
+ struct gnss_serial *gserial;
+ struct gnss_device *gdev;
+ int ret;
+
+ gserial = kzalloc(sizeof(*gserial) + data_size, GFP_KERNEL);
+ if (!gserial)
+ return ERR_PTR(-ENOMEM);
+
+ gdev = gnss_allocate_device(&serdev->dev);
+ if (!gdev) {
+ ret = -ENOMEM;
+ goto err_free_gserial;
+ }
+
+ gdev->ops = &gnss_serial_gnss_ops;
+ gnss_set_drvdata(gdev, gserial);
+
+ gserial->serdev = serdev;
+ gserial->gdev = gdev;
+
+ serdev_device_set_drvdata(serdev, gserial);
+ serdev_device_set_client_ops(serdev, &gnss_serial_serdev_ops);
+
+ ret = gnss_serial_parse_dt(serdev);
+ if (ret)
+ goto err_put_device;
+
+ return gserial;
+
+err_put_device:
+ gnss_put_device(gserial->gdev);
+err_free_gserial:
+ kfree(gserial);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(gnss_serial_allocate);
+
+void gnss_serial_free(struct gnss_serial *gserial)
+{
+ gnss_put_device(gserial->gdev);
+ kfree(gserial);
+};
+EXPORT_SYMBOL_GPL(gnss_serial_free);
+
+int gnss_serial_register(struct gnss_serial *gserial)
+{
+ struct serdev_device *serdev = gserial->serdev;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_PM)) {
+ pm_runtime_enable(&serdev->dev);
+ } else {
+ ret = gnss_serial_set_power(gserial, GNSS_SERIAL_ACTIVE);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = gnss_register_device(gserial->gdev);
+ if (ret)
+ goto err_disable_rpm;
+
+ return 0;
+
+err_disable_rpm:
+ if (IS_ENABLED(CONFIG_PM))
+ pm_runtime_disable(&serdev->dev);
+ else
+ gnss_serial_set_power(gserial, GNSS_SERIAL_OFF);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnss_serial_register);
+
+void gnss_serial_deregister(struct gnss_serial *gserial)
+{
+ struct serdev_device *serdev = gserial->serdev;
+
+ gnss_deregister_device(gserial->gdev);
+
+ if (IS_ENABLED(CONFIG_PM))
+ pm_runtime_disable(&serdev->dev);
+ else
+ gnss_serial_set_power(gserial, GNSS_SERIAL_OFF);
+}
+EXPORT_SYMBOL_GPL(gnss_serial_deregister);
+
+#ifdef CONFIG_PM
+static int gnss_serial_runtime_suspend(struct device *dev)
+{
+ struct gnss_serial *gserial = dev_get_drvdata(dev);
+
+ return gnss_serial_set_power(gserial, GNSS_SERIAL_STANDBY);
+}
+
+static int gnss_serial_runtime_resume(struct device *dev)
+{
+ struct gnss_serial *gserial = dev_get_drvdata(dev);
+
+ return gnss_serial_set_power(gserial, GNSS_SERIAL_ACTIVE);
+}
+#endif /* CONFIG_PM */
+
+static int gnss_serial_prepare(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 1;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gnss_serial_suspend(struct device *dev)
+{
+ struct gnss_serial *gserial = dev_get_drvdata(dev);
+ int ret = 0;
+
+ /*
+ * FIXME: serdev currently lacks support for managing the underlying
+ * device's wakeup settings. A workaround would be to close the serdev
+ * device here if it is open.
+ */
+
+ if (!pm_runtime_suspended(dev))
+ ret = gnss_serial_set_power(gserial, GNSS_SERIAL_STANDBY);
+
+ return ret;
+}
+
+static int gnss_serial_resume(struct device *dev)
+{
+ struct gnss_serial *gserial = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (!pm_runtime_suspended(dev))
+ ret = gnss_serial_set_power(gserial, GNSS_SERIAL_ACTIVE);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+const struct dev_pm_ops gnss_serial_pm_ops = {
+ .prepare = gnss_serial_prepare,
+ SET_SYSTEM_SLEEP_PM_OPS(gnss_serial_suspend, gnss_serial_resume)
+ SET_RUNTIME_PM_OPS(gnss_serial_runtime_suspend, gnss_serial_runtime_resume, NULL)
+};
+EXPORT_SYMBOL_GPL(gnss_serial_pm_ops);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("Generic serial GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/serial.h b/drivers/gnss/serial.h
new file mode 100644
index 000000000000..980ffdc86c2a
--- /dev/null
+++ b/drivers/gnss/serial.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Generic serial GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#ifndef _LINUX_GNSS_SERIAL_H
+#define _LINUX_GNSS_SERIAL_H
+
+#include <asm/termbits.h>
+#include <linux/pm.h>
+
+struct gnss_serial {
+ struct serdev_device *serdev;
+ struct gnss_device *gdev;
+ speed_t speed;
+ const struct gnss_serial_ops *ops;
+ unsigned long drvdata[0];
+};
+
+enum gnss_serial_pm_state {
+ GNSS_SERIAL_OFF,
+ GNSS_SERIAL_ACTIVE,
+ GNSS_SERIAL_STANDBY,
+};
+
+struct gnss_serial_ops {
+ int (*set_power)(struct gnss_serial *gserial,
+ enum gnss_serial_pm_state state);
+};
+
+extern const struct dev_pm_ops gnss_serial_pm_ops;
+
+struct gnss_serial *gnss_serial_allocate(struct serdev_device *gserial,
+ size_t data_size);
+void gnss_serial_free(struct gnss_serial *gserial);
+
+int gnss_serial_register(struct gnss_serial *gserial);
+void gnss_serial_deregister(struct gnss_serial *gserial);
+
+static inline void *gnss_serial_get_drvdata(struct gnss_serial *gserial)
+{
+ return gserial->drvdata;
+}
+
+#endif /* _LINUX_GNSS_SERIAL_H */
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
new file mode 100644
index 000000000000..79cb98950013
--- /dev/null
+++ b/drivers/gnss/sirf.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiRFstar GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#define SIRF_BOOT_DELAY 500
+#define SIRF_ON_OFF_PULSE_TIME 100
+#define SIRF_ACTIVATE_TIMEOUT 200
+#define SIRF_HIBERNATE_TIMEOUT 200
+
+struct sirf_data {
+ struct gnss_device *gdev;
+ struct serdev_device *serdev;
+ speed_t speed;
+ struct regulator *vcc;
+ struct gpio_desc *on_off;
+ struct gpio_desc *wakeup;
+ int irq;
+ bool active;
+ wait_queue_head_t power_wait;
+};
+
+static int sirf_open(struct gnss_device *gdev)
+{
+ struct sirf_data *data = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = data->serdev;
+ int ret;
+
+ ret = serdev_device_open(serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, data->speed);
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = pm_runtime_get_sync(&serdev->dev);
+ if (ret < 0) {
+ dev_err(&gdev->dev, "failed to runtime resume: %d\n", ret);
+ pm_runtime_put_noidle(&serdev->dev);
+ goto err_close;
+ }
+
+ return 0;
+
+err_close:
+ serdev_device_close(serdev);
+
+ return ret;
+}
+
+static void sirf_close(struct gnss_device *gdev)
+{
+ struct sirf_data *data = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = data->serdev;
+
+ serdev_device_close(serdev);
+
+ pm_runtime_put(&serdev->dev);
+}
+
+static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count)
+{
+ struct sirf_data *data = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = data->serdev;
+ int ret;
+
+ /* write is only buffered synchronously */
+ ret = serdev_device_write(serdev, buf, count, 0);
+ if (ret < 0)
+ return ret;
+
+ /* FIXME: determine if interrupted? */
+ serdev_device_wait_until_sent(serdev, 0);
+
+ return count;
+}
+
+static const struct gnss_operations sirf_gnss_ops = {
+ .open = sirf_open,
+ .close = sirf_close,
+ .write_raw = sirf_write_raw,
+};
+
+static int sirf_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t count)
+{
+ struct sirf_data *data = serdev_device_get_drvdata(serdev);
+ struct gnss_device *gdev = data->gdev;
+
+ return gnss_insert_raw(gdev, buf, count);
+}
+
+static const struct serdev_device_ops sirf_serdev_ops = {
+ .receive_buf = sirf_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static irqreturn_t sirf_wakeup_handler(int irq, void *dev_id)
+{
+ struct sirf_data *data = dev_id;
+ struct device *dev = &data->serdev->dev;
+ int ret;
+
+ ret = gpiod_get_value_cansleep(data->wakeup);
+ dev_dbg(dev, "%s - wakeup = %d\n", __func__, ret);
+ if (ret < 0)
+ goto out;
+
+ data->active = !!ret;
+ wake_up_interruptible(&data->power_wait);
+out:
+ return IRQ_HANDLED;
+}
+
+static int sirf_wait_for_power_state(struct sirf_data *data, bool active,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_interruptible_timeout(data->power_wait,
+ data->active == active, msecs_to_jiffies(timeout));
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0) {
+ dev_warn(&data->serdev->dev, "timeout waiting for active state = %d\n",
+ active);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void sirf_pulse_on_off(struct sirf_data *data)
+{
+ gpiod_set_value_cansleep(data->on_off, 1);
+ msleep(SIRF_ON_OFF_PULSE_TIME);
+ gpiod_set_value_cansleep(data->on_off, 0);
+}
+
+static int sirf_set_active(struct sirf_data *data, bool active)
+{
+ unsigned long timeout;
+ int retries = 3;
+ int ret;
+
+ if (active)
+ timeout = SIRF_ACTIVATE_TIMEOUT;
+ else
+ timeout = SIRF_HIBERNATE_TIMEOUT;
+
+ while (retries-- > 0) {
+ sirf_pulse_on_off(data);
+ ret = sirf_wait_for_power_state(data, active, timeout);
+ if (ret < 0) {
+ if (ret == -ETIMEDOUT)
+ continue;
+
+ return ret;
+ }
+
+ break;
+ }
+
+ if (retries == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int sirf_runtime_suspend(struct device *dev)
+{
+ struct sirf_data *data = dev_get_drvdata(dev);
+
+ if (!data->on_off)
+ return regulator_disable(data->vcc);
+
+ return sirf_set_active(data, false);
+}
+
+static int sirf_runtime_resume(struct device *dev)
+{
+ struct sirf_data *data = dev_get_drvdata(dev);
+
+ if (!data->on_off)
+ return regulator_enable(data->vcc);
+
+ return sirf_set_active(data, true);
+}
+
+static int __maybe_unused sirf_suspend(struct device *dev)
+{
+ struct sirf_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (!pm_runtime_suspended(dev))
+ ret = sirf_runtime_suspend(dev);
+
+ if (data->wakeup)
+ disable_irq(data->irq);
+
+ return ret;
+}
+
+static int __maybe_unused sirf_resume(struct device *dev)
+{
+ struct sirf_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (data->wakeup)
+ enable_irq(data->irq);
+
+ if (!pm_runtime_suspended(dev))
+ ret = sirf_runtime_resume(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sirf_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sirf_suspend, sirf_resume)
+ SET_RUNTIME_PM_OPS(sirf_runtime_suspend, sirf_runtime_resume, NULL)
+};
+
+static int sirf_parse_dt(struct serdev_device *serdev)
+{
+ struct sirf_data *data = serdev_device_get_drvdata(serdev);
+ struct device_node *node = serdev->dev.of_node;
+ u32 speed = 9600;
+
+ of_property_read_u32(node, "current-speed", &speed);
+
+ data->speed = speed;
+
+ return 0;
+}
+
+static int sirf_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct gnss_device *gdev;
+ struct sirf_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ gdev = gnss_allocate_device(dev);
+ if (!gdev)
+ return -ENOMEM;
+
+ gdev->type = GNSS_TYPE_SIRF;
+ gdev->ops = &sirf_gnss_ops;
+ gnss_set_drvdata(gdev, data);
+
+ data->serdev = serdev;
+ data->gdev = gdev;
+
+ init_waitqueue_head(&data->power_wait);
+
+ serdev_device_set_drvdata(serdev, data);
+ serdev_device_set_client_ops(serdev, &sirf_serdev_ops);
+
+ ret = sirf_parse_dt(serdev);
+ if (ret)
+ goto err_put_device;
+
+ data->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(data->vcc)) {
+ ret = PTR_ERR(data->vcc);
+ goto err_put_device;
+ }
+
+ data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(data->on_off))
+ goto err_put_device;
+
+ if (data->on_off) {
+ data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup",
+ GPIOD_IN);
+ if (IS_ERR(data->wakeup))
+ goto err_put_device;
+
+ /*
+ * Configurations where WAKEUP has been left not connected,
+ * are currently not supported.
+ */
+ if (!data->wakeup) {
+ dev_err(dev, "no wakeup gpio specified\n");
+ ret = -ENODEV;
+ goto err_put_device;
+ }
+ }
+
+ if (data->wakeup) {
+ ret = gpiod_to_irq(data->wakeup);
+ if (ret < 0)
+ goto err_put_device;
+
+ data->irq = ret;
+
+ ret = devm_request_threaded_irq(dev, data->irq, NULL,
+ sirf_wakeup_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "wakeup", data);
+ if (ret)
+ goto err_put_device;
+ }
+
+ if (data->on_off) {
+ ret = regulator_enable(data->vcc);
+ if (ret)
+ goto err_put_device;
+
+ /* Wait for chip to boot into hibernate mode */
+ msleep(SIRF_BOOT_DELAY);
+ }
+
+ if (IS_ENABLED(CONFIG_PM)) {
+ pm_runtime_set_suspended(dev); /* clear runtime_error flag */
+ pm_runtime_enable(dev);
+ } else {
+ ret = sirf_runtime_resume(dev);
+ if (ret < 0)
+ goto err_disable_vcc;
+ }
+
+ ret = gnss_register_device(gdev);
+ if (ret)
+ goto err_disable_rpm;
+
+ return 0;
+
+err_disable_rpm:
+ if (IS_ENABLED(CONFIG_PM))
+ pm_runtime_disable(dev);
+ else
+ sirf_runtime_suspend(dev);
+err_disable_vcc:
+ if (data->on_off)
+ regulator_disable(data->vcc);
+err_put_device:
+ gnss_put_device(data->gdev);
+
+ return ret;
+}
+
+static void sirf_remove(struct serdev_device *serdev)
+{
+ struct sirf_data *data = serdev_device_get_drvdata(serdev);
+
+ gnss_deregister_device(data->gdev);
+
+ if (IS_ENABLED(CONFIG_PM))
+ pm_runtime_disable(&serdev->dev);
+ else
+ sirf_runtime_suspend(&serdev->dev);
+
+ if (data->on_off)
+ regulator_disable(data->vcc);
+
+ gnss_put_device(data->gdev);
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id sirf_of_match[] = {
+ { .compatible = "fastrax,uc430" },
+ { .compatible = "linx,r4" },
+ { .compatible = "wi2wi,w2sg0008i" },
+ { .compatible = "wi2wi,w2sg0084i" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sirf_of_match);
+#endif
+
+static struct serdev_device_driver sirf_driver = {
+ .driver = {
+ .name = "gnss-sirf",
+ .of_match_table = of_match_ptr(sirf_of_match),
+ .pm = &sirf_pm_ops,
+ },
+ .probe = sirf_probe,
+ .remove = sirf_remove,
+};
+module_serdev_device_driver(sirf_driver);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("SiRFstar GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/ubx.c b/drivers/gnss/ubx.c
new file mode 100644
index 000000000000..12568aebb7f6
--- /dev/null
+++ b/drivers/gnss/ubx.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * u-blox GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serdev.h>
+
+#include "serial.h"
+
+struct ubx_data {
+ struct regulator *v_bckp;
+ struct regulator *vcc;
+};
+
+static int ubx_set_active(struct gnss_serial *gserial)
+{
+ struct ubx_data *data = gnss_serial_get_drvdata(gserial);
+ int ret;
+
+ ret = regulator_enable(data->vcc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ubx_set_standby(struct gnss_serial *gserial)
+{
+ struct ubx_data *data = gnss_serial_get_drvdata(gserial);
+ int ret;
+
+ ret = regulator_disable(data->vcc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ubx_set_power(struct gnss_serial *gserial,
+ enum gnss_serial_pm_state state)
+{
+ switch (state) {
+ case GNSS_SERIAL_ACTIVE:
+ return ubx_set_active(gserial);
+ case GNSS_SERIAL_OFF:
+ case GNSS_SERIAL_STANDBY:
+ return ubx_set_standby(gserial);
+ }
+
+ return -EINVAL;
+}
+
+static const struct gnss_serial_ops ubx_gserial_ops = {
+ .set_power = ubx_set_power,
+};
+
+static int ubx_probe(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial;
+ struct ubx_data *data;
+ int ret;
+
+ gserial = gnss_serial_allocate(serdev, sizeof(*data));
+ if (IS_ERR(gserial)) {
+ ret = PTR_ERR(gserial);
+ return ret;
+ }
+
+ gserial->ops = &ubx_gserial_ops;
+
+ gserial->gdev->type = GNSS_TYPE_UBX;
+
+ data = gnss_serial_get_drvdata(gserial);
+
+ data->vcc = devm_regulator_get(&serdev->dev, "vcc");
+ if (IS_ERR(data->vcc)) {
+ ret = PTR_ERR(data->vcc);
+ goto err_free_gserial;
+ }
+
+ data->v_bckp = devm_regulator_get_optional(&serdev->dev, "v-bckp");
+ if (IS_ERR(data->v_bckp)) {
+ ret = PTR_ERR(data->v_bckp);
+ if (ret == -ENODEV)
+ data->v_bckp = NULL;
+ else
+ goto err_free_gserial;
+ }
+
+ if (data->v_bckp) {
+ ret = regulator_enable(data->v_bckp);
+ if (ret)
+ goto err_free_gserial;
+ }
+
+ ret = gnss_serial_register(gserial);
+ if (ret)
+ goto err_disable_v_bckp;
+
+ return 0;
+
+err_disable_v_bckp:
+ if (data->v_bckp)
+ regulator_disable(data->v_bckp);
+err_free_gserial:
+ gnss_serial_free(gserial);
+
+ return ret;
+}
+
+static void ubx_remove(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
+ struct ubx_data *data = gnss_serial_get_drvdata(gserial);
+
+ gnss_serial_deregister(gserial);
+ if (data->v_bckp)
+ regulator_disable(data->v_bckp);
+ gnss_serial_free(gserial);
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id ubx_of_match[] = {
+ { .compatible = "u-blox,neo-8" },
+ { .compatible = "u-blox,neo-m8" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ubx_of_match);
+#endif
+
+static struct serdev_device_driver ubx_driver = {
+ .driver = {
+ .name = "gnss-ubx",
+ .of_match_table = of_match_ptr(ubx_of_match),
+ .pm = &gnss_serial_pm_ops,
+ },
+ .probe = ubx_probe,
+ .remove = ubx_remove,
+};
+module_serdev_device_driver(ubx_driver);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("u-blox GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 71c0ab46f216..4f52c3a8ec99 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -359,6 +359,15 @@ config GPIO_MPC8XXX
Say Y here if you're going to use hardware that connects to the
MPC512x/831x/834x/837x/8572/8610/QorIQ GPIOs.
+config GPIO_MT7621
+ bool "Mediatek MT7621 GPIO Support"
+ depends on SOC_MT7620 || SOC_MT7621 || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the Mediatek MT7621 SoC GPIO device
+
config GPIO_MVEBU
def_bool y
depends on PLAT_ORION || ARCH_MVEBU
@@ -684,7 +693,8 @@ config GPIO_IT87
Say yes here to support GPIO functionality of IT87xx Super I/O chips.
This driver is tested with ITE IT8728 and IT8732 Super I/O chips, and
- supports the IT8761E, IT8620E and IT8628E Super I/O chip as well.
+ supports the IT8761E, IT8613, IT8620E, and IT8628E Super I/O chips as
+ well.
To compile this driver as a module, choose M here: the module will
be called gpio_it87
@@ -1039,6 +1049,12 @@ config GPIO_LP87565
This driver can also be built as a module. If so, the module will be
called gpio-lp87565.
+config GPIO_MADERA
+ tristate "Cirrus Logic Madera class codecs"
+ depends on PINCTRL_MADERA
+ help
+ Support for GPIOs on Cirrus Logic Madera class codecs.
+
config GPIO_MAX77620
tristate "GPIO support for PMIC MAX77620 and MAX20024"
depends on MFD_MAX77620
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 1324c8f966a7..c256aff66a65 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
+obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o
obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
@@ -88,6 +89,7 @@ obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
+obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index b31ae16170e7..2342e154029b 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -12,6 +12,7 @@
#include <asm/div64.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/aspeed.h>
#include <linux/hashtable.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -22,6 +23,15 @@
#include <linux/spinlock.h>
#include <linux/string.h>
+/*
+ * These two headers aren't meant to be used by GPIO drivers. We need
+ * them in order to access gpio_chip_hwgpio() which we need to implement
+ * the aspeed specific API which allows the coprocessor to request
+ * access to some GPIOs and to arbitrate between coprocessor and ARM.
+ */
+#include <linux/gpio/consumer.h>
+#include "gpiolib.h"
+
struct aspeed_bank_props {
unsigned int bank;
u32 input;
@@ -56,83 +66,130 @@ struct aspeed_gpio {
struct clk *clk;
u32 *dcache;
+ u8 *cf_copro_bankmap;
};
struct aspeed_gpio_bank {
- uint16_t val_regs;
+ uint16_t val_regs; /* +0: Rd: read input value, Wr: set write latch
+ * +4: Rd/Wr: Direction (0=in, 1=out)
+ */
+ uint16_t rdata_reg; /* Rd: read write latch, Wr: <none> */
uint16_t irq_regs;
uint16_t debounce_regs;
uint16_t tolerance_regs;
+ uint16_t cmdsrc_regs;
const char names[4][3];
};
+/*
+ * Note: The "value" register returns the input value sampled on the
+ * line even when the GPIO is configured as an output. Since
+ * that input goes through synchronizers, writing, then reading
+ * back may not return the written value right away.
+ *
+ * The "rdata" register returns the content of the write latch
+ * and thus can be used to read back what was last written
+ * reliably.
+ */
+
static const int debounce_timers[4] = { 0x00, 0x50, 0x54, 0x58 };
+static const struct aspeed_gpio_copro_ops *copro_ops;
+static void *copro_data;
+
static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
{
.val_regs = 0x0000,
+ .rdata_reg = 0x00c0,
.irq_regs = 0x0008,
.debounce_regs = 0x0040,
.tolerance_regs = 0x001c,
+ .cmdsrc_regs = 0x0060,
.names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x0020,
+ .rdata_reg = 0x00c4,
.irq_regs = 0x0028,
.debounce_regs = 0x0048,
.tolerance_regs = 0x003c,
+ .cmdsrc_regs = 0x0068,
.names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0070,
+ .rdata_reg = 0x00c8,
.irq_regs = 0x0098,
.debounce_regs = 0x00b0,
.tolerance_regs = 0x00ac,
+ .cmdsrc_regs = 0x0090,
.names = { "I", "J", "K", "L" },
},
{
.val_regs = 0x0078,
+ .rdata_reg = 0x00cc,
.irq_regs = 0x00e8,
.debounce_regs = 0x0100,
.tolerance_regs = 0x00fc,
+ .cmdsrc_regs = 0x00e0,
.names = { "M", "N", "O", "P" },
},
{
.val_regs = 0x0080,
+ .rdata_reg = 0x00d0,
.irq_regs = 0x0118,
.debounce_regs = 0x0130,
.tolerance_regs = 0x012c,
+ .cmdsrc_regs = 0x0110,
.names = { "Q", "R", "S", "T" },
},
{
.val_regs = 0x0088,
+ .rdata_reg = 0x00d4,
.irq_regs = 0x0148,
.debounce_regs = 0x0160,
.tolerance_regs = 0x015c,
+ .cmdsrc_regs = 0x0140,
.names = { "U", "V", "W", "X" },
},
{
.val_regs = 0x01E0,
+ .rdata_reg = 0x00d8,
.irq_regs = 0x0178,
.debounce_regs = 0x0190,
.tolerance_regs = 0x018c,
+ .cmdsrc_regs = 0x0170,
.names = { "Y", "Z", "AA", "AB" },
},
{
.val_regs = 0x01e8,
+ .rdata_reg = 0x00dc,
.irq_regs = 0x01a8,
.debounce_regs = 0x01c0,
.tolerance_regs = 0x01bc,
+ .cmdsrc_regs = 0x01a0,
.names = { "AC", "", "", "" },
},
};
-#define GPIO_BANK(x) ((x) >> 5)
-#define GPIO_OFFSET(x) ((x) & 0x1f)
-#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
+enum aspeed_gpio_reg {
+ reg_val,
+ reg_rdata,
+ reg_dir,
+ reg_irq_enable,
+ reg_irq_type0,
+ reg_irq_type1,
+ reg_irq_type2,
+ reg_irq_status,
+ reg_debounce_sel1,
+ reg_debounce_sel2,
+ reg_tolerance,
+ reg_cmdsrc0,
+ reg_cmdsrc1,
+};
-#define GPIO_DATA 0x00
-#define GPIO_DIR 0x04
+#define GPIO_VAL_VALUE 0x00
+#define GPIO_VAL_DIR 0x04
#define GPIO_IRQ_ENABLE 0x00
#define GPIO_IRQ_TYPE0 0x04
@@ -143,6 +200,53 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
#define GPIO_DEBOUNCE_SEL1 0x00
#define GPIO_DEBOUNCE_SEL2 0x04
+#define GPIO_CMDSRC_0 0x00
+#define GPIO_CMDSRC_1 0x04
+#define GPIO_CMDSRC_ARM 0
+#define GPIO_CMDSRC_LPC 1
+#define GPIO_CMDSRC_COLDFIRE 2
+#define GPIO_CMDSRC_RESERVED 3
+
+/* This will be resolved at compile time */
+static inline void __iomem *bank_reg(struct aspeed_gpio *gpio,
+ const struct aspeed_gpio_bank *bank,
+ const enum aspeed_gpio_reg reg)
+{
+ switch (reg) {
+ case reg_val:
+ return gpio->base + bank->val_regs + GPIO_VAL_VALUE;
+ case reg_rdata:
+ return gpio->base + bank->rdata_reg;
+ case reg_dir:
+ return gpio->base + bank->val_regs + GPIO_VAL_DIR;
+ case reg_irq_enable:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_ENABLE;
+ case reg_irq_type0:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE0;
+ case reg_irq_type1:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE1;
+ case reg_irq_type2:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE2;
+ case reg_irq_status:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
+ case reg_debounce_sel1:
+ return gpio->base + bank->debounce_regs + GPIO_DEBOUNCE_SEL1;
+ case reg_debounce_sel2:
+ return gpio->base + bank->debounce_regs + GPIO_DEBOUNCE_SEL2;
+ case reg_tolerance:
+ return gpio->base + bank->tolerance_regs;
+ case reg_cmdsrc0:
+ return gpio->base + bank->cmdsrc_regs + GPIO_CMDSRC_0;
+ case reg_cmdsrc1:
+ return gpio->base + bank->cmdsrc_regs + GPIO_CMDSRC_1;
+ }
+ BUG();
+}
+
+#define GPIO_BANK(x) ((x) >> 5)
+#define GPIO_OFFSET(x) ((x) & 0x1f)
+#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
+
#define _GPIO_SET_DEBOUNCE(t, o, i) ((!!((t) & BIT(i))) << GPIO_OFFSET(o))
#define GPIO_SET_DEBOUNCE1(t, o) _GPIO_SET_DEBOUNCE(t, o, 1)
#define GPIO_SET_DEBOUNCE2(t, o) _GPIO_SET_DEBOUNCE(t, o, 0)
@@ -201,18 +305,80 @@ static inline bool have_output(struct aspeed_gpio *gpio, unsigned int offset)
return !props || (props->output & GPIO_BIT(offset));
}
-static void __iomem *bank_val_reg(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- unsigned int reg)
+static void aspeed_gpio_change_cmd_source(struct aspeed_gpio *gpio,
+ const struct aspeed_gpio_bank *bank,
+ int bindex, int cmdsrc)
{
- return gpio->base + bank->val_regs + reg;
+ void __iomem *c0 = bank_reg(gpio, bank, reg_cmdsrc0);
+ void __iomem *c1 = bank_reg(gpio, bank, reg_cmdsrc1);
+ u32 bit, reg;
+
+ /*
+ * Each register controls 4 banks, so take the bottom 2
+ * bits of the bank index, and use them to select the
+ * right control bit (0, 8, 16 or 24).
+ */
+ bit = BIT((bindex & 3) << 3);
+
+ /* Source 1 first to avoid illegal 11 combination */
+ reg = ioread32(c1);
+ if (cmdsrc & 2)
+ reg |= bit;
+ else
+ reg &= ~bit;
+ iowrite32(reg, c1);
+
+ /* Then Source 0 */
+ reg = ioread32(c0);
+ if (cmdsrc & 1)
+ reg |= bit;
+ else
+ reg &= ~bit;
+ iowrite32(reg, c0);
}
-static void __iomem *bank_irq_reg(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- unsigned int reg)
+static bool aspeed_gpio_copro_request(struct aspeed_gpio *gpio,
+ unsigned int offset)
{
- return gpio->base + bank->irq_regs + reg;
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+
+ if (!copro_ops || !gpio->cf_copro_bankmap)
+ return false;
+ if (!gpio->cf_copro_bankmap[offset >> 3])
+ return false;
+ if (!copro_ops->request_access)
+ return false;
+
+ /* Pause the coprocessor */
+ copro_ops->request_access(copro_data);
+
+ /* Change command source back to ARM */
+ aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3, GPIO_CMDSRC_ARM);
+
+ /* Update cache */
+ gpio->dcache[GPIO_BANK(offset)] = ioread32(bank_reg(gpio, bank, reg_rdata));
+
+ return true;
+}
+
+static void aspeed_gpio_copro_release(struct aspeed_gpio *gpio,
+ unsigned int offset)
+{
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+
+ if (!copro_ops || !gpio->cf_copro_bankmap)
+ return;
+ if (!gpio->cf_copro_bankmap[offset >> 3])
+ return;
+ if (!copro_ops->release_access)
+ return;
+
+ /* Change command source back to ColdFire */
+ aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3,
+ GPIO_CMDSRC_COLDFIRE);
+
+ /* Restart the coprocessor */
+ copro_ops->release_access(copro_data);
}
static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -220,8 +386,7 @@ static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset)
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
- return !!(ioread32(bank_val_reg(gpio, bank, GPIO_DATA))
- & GPIO_BIT(offset));
+ return !!(ioread32(bank_reg(gpio, bank, reg_val)) & GPIO_BIT(offset));
}
static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
@@ -232,7 +397,7 @@ static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
void __iomem *addr;
u32 reg;
- addr = bank_val_reg(gpio, bank, GPIO_DATA);
+ addr = bank_reg(gpio, bank, reg_val);
reg = gpio->dcache[GPIO_BANK(offset)];
if (val)
@@ -249,11 +414,15 @@ static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
unsigned long flags;
+ bool copro;
spin_lock_irqsave(&gpio->lock, flags);
+ copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
}
@@ -261,7 +430,9 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr = bank_reg(gpio, bank, reg_dir);
unsigned long flags;
+ bool copro;
u32 reg;
if (!have_input(gpio, offset))
@@ -269,8 +440,13 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
spin_lock_irqsave(&gpio->lock, flags);
- reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
- iowrite32(reg & ~GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR));
+ reg = ioread32(addr);
+ reg &= ~GPIO_BIT(offset);
+
+ copro = aspeed_gpio_copro_request(gpio, offset);
+ iowrite32(reg, addr);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
@@ -282,7 +458,9 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr = bank_reg(gpio, bank, reg_dir);
unsigned long flags;
+ bool copro;
u32 reg;
if (!have_output(gpio, offset))
@@ -290,10 +468,15 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
spin_lock_irqsave(&gpio->lock, flags);
+ reg = ioread32(addr);
+ reg |= GPIO_BIT(offset);
+
+ copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
- reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
- iowrite32(reg | GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR));
+ iowrite32(reg, addr);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
@@ -314,7 +497,7 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
spin_lock_irqsave(&gpio->lock, flags);
- val = ioread32(bank_val_reg(gpio, bank, GPIO_DIR)) & GPIO_BIT(offset);
+ val = ioread32(bank_reg(gpio, bank, reg_dir)) & GPIO_BIT(offset);
spin_unlock_irqrestore(&gpio->lock, flags);
@@ -323,24 +506,23 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
}
static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
- struct aspeed_gpio **gpio,
- const struct aspeed_gpio_bank **bank,
- u32 *bit)
+ struct aspeed_gpio **gpio,
+ const struct aspeed_gpio_bank **bank,
+ u32 *bit, int *offset)
{
- int offset;
struct aspeed_gpio *internal;
- offset = irqd_to_hwirq(d);
+ *offset = irqd_to_hwirq(d);
internal = irq_data_get_irq_chip_data(d);
/* This might be a bit of a questionable place to check */
- if (!have_irq(internal, offset))
+ if (!have_irq(internal, *offset))
return -ENOTSUPP;
*gpio = internal;
- *bank = to_bank(offset);
- *bit = GPIO_BIT(offset);
+ *bank = to_bank(*offset);
+ *bit = GPIO_BIT(*offset);
return 0;
}
@@ -351,17 +533,23 @@ static void aspeed_gpio_irq_ack(struct irq_data *d)
struct aspeed_gpio *gpio;
unsigned long flags;
void __iomem *status_addr;
+ int rc, offset;
+ bool copro;
u32 bit;
- int rc;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
if (rc)
return;
- status_addr = bank_irq_reg(gpio, bank, GPIO_IRQ_STATUS);
+ status_addr = bank_reg(gpio, bank, reg_irq_status);
spin_lock_irqsave(&gpio->lock, flags);
+ copro = aspeed_gpio_copro_request(gpio, offset);
+
iowrite32(bit, status_addr);
+
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
}
@@ -372,15 +560,17 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
unsigned long flags;
u32 reg, bit;
void __iomem *addr;
- int rc;
+ int rc, offset;
+ bool copro;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
if (rc)
return;
- addr = bank_irq_reg(gpio, bank, GPIO_IRQ_ENABLE);
+ addr = bank_reg(gpio, bank, reg_irq_enable);
spin_lock_irqsave(&gpio->lock, flags);
+ copro = aspeed_gpio_copro_request(gpio, offset);
reg = ioread32(addr);
if (set)
@@ -389,6 +579,8 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
reg &= ~bit;
iowrite32(reg, addr);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
}
@@ -413,9 +605,10 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
struct aspeed_gpio *gpio;
unsigned long flags;
void __iomem *addr;
- int rc;
+ int rc, offset;
+ bool copro;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
if (rc)
return -EINVAL;
@@ -441,22 +634,25 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
}
spin_lock_irqsave(&gpio->lock, flags);
+ copro = aspeed_gpio_copro_request(gpio, offset);
- addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE0);
+ addr = bank_reg(gpio, bank, reg_irq_type0);
reg = ioread32(addr);
reg = (reg & ~bit) | type0;
iowrite32(reg, addr);
- addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE1);
+ addr = bank_reg(gpio, bank, reg_irq_type1);
reg = ioread32(addr);
reg = (reg & ~bit) | type1;
iowrite32(reg, addr);
- addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE2);
+ addr = bank_reg(gpio, bank, reg_irq_type2);
reg = ioread32(addr);
reg = (reg & ~bit) | type2;
iowrite32(reg, addr);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
irq_set_handler_locked(d, handler);
@@ -477,7 +673,7 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
for (i = 0; i < ARRAY_SIZE(aspeed_gpio_banks); i++) {
const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
- reg = ioread32(bank_irq_reg(data, bank, GPIO_IRQ_STATUS));
+ reg = ioread32(bank_reg(data, bank, reg_irq_status));
for_each_set_bit(p, &reg, 32) {
girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
@@ -549,21 +745,27 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
unsigned int offset, bool enable)
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- const struct aspeed_gpio_bank *bank;
unsigned long flags;
+ void __iomem *treg;
+ bool copro;
u32 val;
- bank = to_bank(offset);
+ treg = bank_reg(gpio, to_bank(offset), reg_tolerance);
spin_lock_irqsave(&gpio->lock, flags);
- val = readl(gpio->base + bank->tolerance_regs);
+ copro = aspeed_gpio_copro_request(gpio, offset);
+
+ val = readl(treg);
if (enable)
val |= GPIO_BIT(offset);
else
val &= ~GPIO_BIT(offset);
- writel(val, gpio->base + bank->tolerance_regs);
+ writel(val, treg);
+
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
@@ -582,13 +784,6 @@ static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
pinctrl_gpio_free(chip->base + offset);
}
-static inline void __iomem *bank_debounce_reg(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- unsigned int reg)
-{
- return gpio->base + bank->debounce_regs + reg;
-}
-
static int usecs_to_cycles(struct aspeed_gpio *gpio, unsigned long usecs,
u32 *cycles)
{
@@ -666,11 +861,14 @@ static void configure_timer(struct aspeed_gpio *gpio, unsigned int offset,
void __iomem *addr;
u32 val;
- addr = bank_debounce_reg(gpio, bank, GPIO_DEBOUNCE_SEL1);
+ /* Note: Debounce timer isn't under control of the command
+ * source registers, so no need to sync with the coprocessor
+ */
+ addr = bank_reg(gpio, bank, reg_debounce_sel1);
val = ioread32(addr);
iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE1(timer, offset), addr);
- addr = bank_debounce_reg(gpio, bank, GPIO_DEBOUNCE_SEL2);
+ addr = bank_reg(gpio, bank, reg_debounce_sel2);
val = ioread32(addr);
iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE2(timer, offset), addr);
}
@@ -812,6 +1010,111 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
return -ENOTSUPP;
}
+/**
+ * aspeed_gpio_copro_set_ops - Sets the callbacks used for handhsaking with
+ * the coprocessor for shared GPIO banks
+ * @ops: The callbacks
+ * @data: Pointer passed back to the callbacks
+ */
+int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data)
+{
+ copro_data = data;
+ copro_ops = ops;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(aspeed_gpio_copro_set_ops);
+
+/**
+ * aspeed_gpio_copro_grab_gpio - Mark a GPIO used by the coprocessor. The entire
+ * bank gets marked and any access from the ARM will
+ * result in handshaking via callbacks.
+ * @desc: The GPIO to be marked
+ * @vreg_offset: If non-NULL, returns the value register offset in the GPIO space
+ * @dreg_offset: If non-NULL, returns the data latch register offset in the GPIO space
+ * @bit: If non-NULL, returns the bit number of the GPIO in the registers
+ */
+int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
+ u16 *vreg_offset, u16 *dreg_offset, u8 *bit)
+{
+ struct gpio_chip *chip = gpiod_to_chip(desc);
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+ int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ unsigned long flags;
+
+ if (!gpio->cf_copro_bankmap)
+ gpio->cf_copro_bankmap = kzalloc(gpio->config->nr_gpios >> 3, GFP_KERNEL);
+ if (!gpio->cf_copro_bankmap)
+ return -ENOMEM;
+ if (offset < 0 || offset > gpio->config->nr_gpios)
+ return -EINVAL;
+ bindex = offset >> 3;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ /* Sanity check, this shouldn't happen */
+ if (gpio->cf_copro_bankmap[bindex] == 0xff) {
+ rc = -EIO;
+ goto bail;
+ }
+ gpio->cf_copro_bankmap[bindex]++;
+
+ /* Switch command source */
+ if (gpio->cf_copro_bankmap[bindex] == 1)
+ aspeed_gpio_change_cmd_source(gpio, bank, bindex,
+ GPIO_CMDSRC_COLDFIRE);
+
+ if (vreg_offset)
+ *vreg_offset = bank->val_regs;
+ if (dreg_offset)
+ *dreg_offset = bank->rdata_reg;
+ if (bit)
+ *bit = GPIO_OFFSET(offset);
+ bail:
+ spin_unlock_irqrestore(&gpio->lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(aspeed_gpio_copro_grab_gpio);
+
+/**
+ * aspeed_gpio_copro_release_gpio - Unmark a GPIO used by the coprocessor.
+ * @desc: The GPIO to be marked
+ */
+int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
+{
+ struct gpio_chip *chip = gpiod_to_chip(desc);
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+ int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ unsigned long flags;
+
+ if (!gpio->cf_copro_bankmap)
+ return -ENXIO;
+
+ if (offset < 0 || offset > gpio->config->nr_gpios)
+ return -EINVAL;
+ bindex = offset >> 3;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ /* Sanity check, this shouldn't happen */
+ if (gpio->cf_copro_bankmap[bindex] == 0) {
+ rc = -EIO;
+ goto bail;
+ }
+ gpio->cf_copro_bankmap[bindex]--;
+
+ /* Switch command source */
+ if (gpio->cf_copro_bankmap[bindex] == 0)
+ aspeed_gpio_change_cmd_source(gpio, bank, bindex,
+ GPIO_CMDSRC_ARM);
+ bail:
+ spin_unlock_irqrestore(&gpio->lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio);
+
/*
* Any banks not specified in a struct aspeed_bank_props array are assumed to
* have the properties:
@@ -902,11 +1205,18 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
if (!gpio->dcache)
return -ENOMEM;
- /* Populate it with initial values read from the HW */
+ /*
+ * Populate it with initial values read from the HW and switch
+ * all command sources to the ARM by default
+ */
for (i = 0; i < banks; i++) {
const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
- gpio->dcache[i] = ioread32(gpio->base + bank->val_regs +
- GPIO_DATA);
+ void __iomem *addr = bank_reg(gpio, bank, reg_rdata);
+ gpio->dcache[i] = ioread32(addr);
+ aspeed_gpio_change_cmd_source(gpio, bank, 0, GPIO_CMDSRC_ARM);
+ aspeed_gpio_change_cmd_source(gpio, bank, 1, GPIO_CMDSRC_ARM);
+ aspeed_gpio_change_cmd_source(gpio, bank, 2, GPIO_CMDSRC_ARM);
+ aspeed_gpio_change_cmd_source(gpio, bank, 3, GPIO_CMDSRC_ARM);
}
rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 684e9d6d6623..0a553d676042 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -51,7 +51,7 @@ static u32 ath79_gpio_read(struct ath79_gpio_ctrl *ctrl, unsigned reg)
static void ath79_gpio_write(struct ath79_gpio_ctrl *ctrl,
unsigned reg, u32 val)
{
- return writel(val, ctrl->base + reg);
+ writel(val, ctrl->base + reg);
}
static bool ath79_gpio_update_bits(
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 00272fa7cc4f..d0707fc23afd 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -485,12 +485,14 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+ int ret;
- if (gpiochip_lock_as_irq(&kona_gpio->gpio_chip, d->hwirq)) {
+ ret = gpiochip_lock_as_irq(&kona_gpio->gpio_chip, d->hwirq);
+ if (ret) {
dev_err(kona_gpio->gpio_chip.parent,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
- return -EINVAL;
+ return ret;
}
return 0;
}
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 035a454eca43..a5ece8ea79bc 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -167,8 +167,8 @@ of_err:
static int davinci_gpio_probe(struct platform_device *pdev)
{
static int ctrl_num, bank_base;
- int gpio, bank, ret = 0;
- unsigned ngpio, nbank;
+ int gpio, bank, i, ret = 0;
+ unsigned int ngpio, nbank, nirq;
struct davinci_gpio_controller *chips;
struct davinci_gpio_platform_data *pdata;
struct device *dev = &pdev->dev;
@@ -197,6 +197,16 @@ static int davinci_gpio_probe(struct platform_device *pdev)
if (WARN_ON(ARCH_NR_GPIOS < ngpio))
ngpio = ARCH_NR_GPIOS;
+ /*
+ * If there are unbanked interrupts then the number of
+ * interrupts is equal to number of gpios else all are banked so
+ * number of interrupts is equal to number of banks(each with 16 gpios)
+ */
+ if (pdata->gpio_unbanked)
+ nirq = pdata->gpio_unbanked;
+ else
+ nirq = DIV_ROUND_UP(ngpio, 16);
+
nbank = DIV_ROUND_UP(ngpio, 32);
chips = devm_kcalloc(dev,
nbank, sizeof(struct davinci_gpio_controller),
@@ -209,6 +219,15 @@ static int davinci_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio_base))
return PTR_ERR(gpio_base);
+ for (i = 0; i < nirq; i++) {
+ chips->irqs[i] = platform_get_irq(pdev, i);
+ if (chips->irqs[i] < 0) {
+ dev_info(dev, "IRQ not populated, err = %d\n",
+ chips->irqs[i]);
+ return chips->irqs[i];
+ }
+ }
+
snprintf(label, MAX_LABEL_SIZE, "davinci_gpio.%d", ctrl_num++);
chips->chip.label = devm_kstrdup(dev, label, GFP_KERNEL);
if (!chips->chip.label)
@@ -377,7 +396,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
* can provide direct-mapped IRQs to AINTC (up to 32 GPIOs).
*/
if (offset < d->gpio_unbanked)
- return d->base_irq + offset;
+ return d->irqs[offset];
else
return -ENODEV;
}
@@ -386,11 +405,18 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
{
struct davinci_gpio_controller *d;
struct davinci_gpio_regs __iomem *g;
- u32 mask;
+ u32 mask, i;
d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
g = (struct davinci_gpio_regs __iomem *)d->regs[0];
- mask = __gpio_mask(data->irq - d->base_irq);
+ for (i = 0; i < MAX_INT_PER_BANK; i++)
+ if (data->irq == d->irqs[i])
+ break;
+
+ if (i == MAX_INT_PER_BANK)
+ return -EINVAL;
+
+ mask = __gpio_mask(i);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
@@ -459,9 +485,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
int ret;
struct clk *clk;
u32 binten = 0;
- unsigned ngpio, bank_irq;
+ unsigned ngpio;
struct device *dev = &pdev->dev;
- struct resource *res;
struct davinci_gpio_controller *chips = platform_get_drvdata(pdev);
struct davinci_gpio_platform_data *pdata = dev->platform_data;
struct davinci_gpio_regs __iomem *g;
@@ -481,24 +506,13 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
gpio_get_irq_chip = (gpio_get_irq_chip_cb_t)match->data;
ngpio = pdata->ngpio;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "Invalid IRQ resource\n");
- return -EBUSY;
- }
-
- bank_irq = res->start;
-
- if (!bank_irq) {
- dev_err(dev, "Invalid IRQ resource\n");
- return -ENODEV;
- }
clk = devm_clk_get(dev, "gpio");
if (IS_ERR(clk)) {
dev_err(dev, "Error %ld getting gpio clock\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
+
ret = clk_prepare_enable(clk);
if (ret)
return ret;
@@ -538,12 +552,11 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
if (pdata->gpio_unbanked) {
/* pass "bank 0" GPIO IRQs to AINTC */
chips->chip.to_irq = gpio_to_irq_unbanked;
- chips->base_irq = bank_irq;
chips->gpio_unbanked = pdata->gpio_unbanked;
binten = GENMASK(pdata->gpio_unbanked / 16, 0);
/* AINTC handles mask/unmask; GPIO handles triggering */
- irq = bank_irq;
+ irq = chips->irqs[0];
irq_chip = gpio_get_irq_chip(irq);
irq_chip->name = "GPIO-AINTC";
irq_chip->irq_set_type = gpio_irq_type_unbanked;
@@ -554,10 +567,11 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
writel_relaxed(~0, &g->set_rising);
/* set the direct IRQs up to use that irqchip */
- for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++, irq++) {
- irq_set_chip(irq, irq_chip);
- irq_set_handler_data(irq, chips);
- irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH);
+ for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++) {
+ irq_set_chip(chips->irqs[gpio], irq_chip);
+ irq_set_handler_data(chips->irqs[gpio], chips);
+ irq_set_status_flags(chips->irqs[gpio],
+ IRQ_TYPE_EDGE_BOTH);
}
goto done;
@@ -567,7 +581,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
* Or, AINTC can handle IRQs for banks of 16 GPIO IRQs, which we
* then chain through our own handler.
*/
- for (gpio = 0, bank = 0; gpio < ngpio; bank++, bank_irq++, gpio += 16) {
+ for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 16) {
/* disabled by default, enabled only as needed
* There are register sets for 32 GPIOs. 2 banks of 16
* GPIOs are covered by each set of registers hence divide by 2
@@ -594,8 +608,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
irqdata->bank_num = bank;
irqdata->chip = chips;
- irq_set_chained_handler_and_data(bank_irq, gpio_irq_handler,
- irqdata);
+ irq_set_chained_handler_and_data(chips->irqs[bank],
+ gpio_irq_handler, irqdata);
binten |= BIT(bank);
}
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 7a2de3de6571..28da700f5f52 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -255,11 +255,13 @@ static int dwapb_irq_reqres(struct irq_data *d)
struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = igc->private;
struct gpio_chip *gc = &gpio->ports[0].gc;
+ int ret;
- if (gpiochip_lock_as_irq(gc, irqd_to_hwirq(d))) {
+ ret = gpiochip_lock_as_irq(gc, irqd_to_hwirq(d));
+ if (ret) {
dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n",
irqd_to_hwirq(d));
- return -EINVAL;
+ return ret;
}
return 0;
}
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 2b466b80e70a..982e699a5b81 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -101,12 +101,14 @@ static void em_gio_irq_enable(struct irq_data *d)
static int em_gio_irq_reqres(struct irq_data *d)
{
struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
+ int ret;
- if (gpiochip_lock_as_irq(&p->gpio_chip, irqd_to_hwirq(d))) {
+ ret = gpiochip_lock_as_irq(&p->gpio_chip, irqd_to_hwirq(d));
+ if (ret) {
dev_err(p->gpio_chip.parent,
"unable to lock HW IRQ %lu for IRQ\n",
irqd_to_hwirq(d));
- return -EINVAL;
+ return ret;
}
return 0;
}
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index 7cad14d3f127..389ecd8b7d26 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -35,12 +35,15 @@
/* Chip Id numbers */
#define NO_DEV_ID 0xffff
+#define IT8613_ID 0x8613
#define IT8620_ID 0x8620
#define IT8628_ID 0x8628
+#define IT8718_ID 0x8718
#define IT8728_ID 0x8728
#define IT8732_ID 0x8732
#define IT8761_ID 0x8761
#define IT8772_ID 0x8772
+#define IT8786_ID 0x8786
/* IO Ports */
#define REG 0x2e
@@ -306,6 +309,14 @@ static int __init it87_gpio_init(void)
it87_gpio->chip = it87_template_chip;
switch (chip_type) {
+ case IT8613_ID:
+ gpio_ba_reg = 0x62;
+ it87_gpio->io_size = 8; /* it8613 only needs 6, use 8 for alignment */
+ it87_gpio->output_base = 0xc8;
+ it87_gpio->simple_base = 0xc0;
+ it87_gpio->simple_size = 6;
+ it87_gpio->chip.ngpio = 64; /* has 48, use 64 for convenient calc */
+ break;
case IT8620_ID:
case IT8628_ID:
gpio_ba_reg = 0x62;
@@ -314,9 +325,11 @@ static int __init it87_gpio_init(void)
it87_gpio->simple_size = 0;
it87_gpio->chip.ngpio = 64;
break;
+ case IT8718_ID:
case IT8728_ID:
case IT8732_ID:
case IT8772_ID:
+ case IT8786_ID:
gpio_ba_reg = 0x62;
it87_gpio->io_size = 8;
it87_gpio->output_base = 0xc8;
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
new file mode 100644
index 000000000000..7ba68d1a0932
--- /dev/null
+++ b/drivers/gpio/gpio-madera.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GPIO support for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/madera/core.h>
+#include <linux/mfd/madera/pdata.h>
+#include <linux/mfd/madera/registers.h>
+
+struct madera_gpio {
+ struct madera *madera;
+ /* storage space for the gpio_chip we're using */
+ struct gpio_chip gpio_chip;
+};
+
+static int madera_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
+ struct madera *madera = madera_gpio->madera;
+ unsigned int reg_offset = 2 * offset;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(madera->regmap, MADERA_GPIO1_CTRL_2 + reg_offset,
+ &val);
+ if (ret < 0)
+ return ret;
+
+ return !!(val & MADERA_GP1_DIR_MASK);
+}
+
+static int madera_gpio_direction_in(struct gpio_chip *chip, unsigned int offset)
+{
+ struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
+ struct madera *madera = madera_gpio->madera;
+ unsigned int reg_offset = 2 * offset;
+
+ return regmap_update_bits(madera->regmap,
+ MADERA_GPIO1_CTRL_2 + reg_offset,
+ MADERA_GP1_DIR_MASK, MADERA_GP1_DIR);
+}
+
+static int madera_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
+ struct madera *madera = madera_gpio->madera;
+ unsigned int reg_offset = 2 * offset;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(madera->regmap, MADERA_GPIO1_CTRL_1 + reg_offset,
+ &val);
+ if (ret < 0)
+ return ret;
+
+ return !!(val & MADERA_GP1_LVL_MASK);
+}
+
+static int madera_gpio_direction_out(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
+ struct madera *madera = madera_gpio->madera;
+ unsigned int reg_offset = 2 * offset;
+ unsigned int reg_val = value ? MADERA_GP1_LVL : 0;
+ int ret;
+
+ ret = regmap_update_bits(madera->regmap,
+ MADERA_GPIO1_CTRL_2 + reg_offset,
+ MADERA_GP1_DIR_MASK, 0);
+ if (ret < 0)
+ return ret;
+
+ return regmap_update_bits(madera->regmap,
+ MADERA_GPIO1_CTRL_1 + reg_offset,
+ MADERA_GP1_LVL_MASK, reg_val);
+}
+
+static void madera_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
+ struct madera *madera = madera_gpio->madera;
+ unsigned int reg_offset = 2 * offset;
+ unsigned int reg_val = value ? MADERA_GP1_LVL : 0;
+ int ret;
+
+ ret = regmap_update_bits(madera->regmap,
+ MADERA_GPIO1_CTRL_1 + reg_offset,
+ MADERA_GP1_LVL_MASK, reg_val);
+
+ /* set() doesn't return an error so log a warning */
+ if (ret)
+ dev_warn(madera->dev, "Failed to write to 0x%x (%d)\n",
+ MADERA_GPIO1_CTRL_1 + reg_offset, ret);
+}
+
+static struct gpio_chip madera_gpio_chip = {
+ .label = "madera",
+ .owner = THIS_MODULE,
+ .request = gpiochip_generic_request,
+ .free = gpiochip_generic_free,
+ .get_direction = madera_gpio_get_direction,
+ .direction_input = madera_gpio_direction_in,
+ .get = madera_gpio_get,
+ .direction_output = madera_gpio_direction_out,
+ .set = madera_gpio_set,
+ .set_config = gpiochip_generic_config,
+ .can_sleep = true,
+};
+
+static int madera_gpio_probe(struct platform_device *pdev)
+{
+ struct madera *madera = dev_get_drvdata(pdev->dev.parent);
+ struct madera_pdata *pdata = dev_get_platdata(madera->dev);
+ struct madera_gpio *madera_gpio;
+ int ret;
+
+ madera_gpio = devm_kzalloc(&pdev->dev, sizeof(*madera_gpio),
+ GFP_KERNEL);
+ if (!madera_gpio)
+ return -ENOMEM;
+
+ madera_gpio->madera = madera;
+
+ /* Construct suitable gpio_chip from the template in madera_gpio_chip */
+ madera_gpio->gpio_chip = madera_gpio_chip;
+ madera_gpio->gpio_chip.parent = pdev->dev.parent;
+
+ switch (madera->type) {
+ case CS47L35:
+ madera_gpio->gpio_chip.ngpio = CS47L35_NUM_GPIOS;
+ break;
+ case CS47L85:
+ case WM1840:
+ madera_gpio->gpio_chip.ngpio = CS47L85_NUM_GPIOS;
+ break;
+ case CS47L90:
+ case CS47L91:
+ madera_gpio->gpio_chip.ngpio = CS47L90_NUM_GPIOS;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown chip variant %d\n", madera->type);
+ return -EINVAL;
+ }
+
+ /* We want to be usable on systems that don't use devicetree or acpi */
+ if (pdata && pdata->gpio_base)
+ madera_gpio->gpio_chip.base = pdata->gpio_base;
+ else
+ madera_gpio->gpio_chip.base = -1;
+
+ ret = devm_gpiochip_add_data(&pdev->dev,
+ &madera_gpio->gpio_chip,
+ madera_gpio);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * This is part of a composite MFD device which can only be used with
+ * the corresponding pinctrl driver. On all supported silicon the GPIO
+ * to pinctrl mapping is fixed in the silicon, so we register it
+ * explicitly instead of requiring a redundant gpio-ranges in the
+ * devicetree.
+ * In any case we also want to work on systems that don't use devicetree
+ * or acpi.
+ */
+ ret = gpiochip_add_pin_range(&madera_gpio->gpio_chip, "madera-pinctrl",
+ 0, 0, madera_gpio->gpio_chip.ngpio);
+ if (ret) {
+ dev_dbg(&pdev->dev, "Failed to add pin range (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver madera_gpio_driver = {
+ .driver = {
+ .name = "madera-gpio",
+ },
+ .probe = madera_gpio_probe,
+};
+
+module_platform_driver(madera_gpio_driver);
+
+MODULE_SOFTDEP("pre: pinctrl-madera");
+MODULE_DESCRIPTION("GPIO interface for Madera codecs");
+MODULE_AUTHOR("Nariman Poushin <nariman@opensource.cirrus.com>");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:madera-gpio");
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 9d8bcc69f245..f03cb0ba7726 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -653,6 +653,12 @@ static int max732x_probe(struct i2c_client *client,
chip->client_group_a = client;
if (nr_port > 8) {
c = i2c_new_dummy(client->adapter, addr_b);
+ if (!c) {
+ dev_err(&client->dev,
+ "Failed to allocate I2C device\n");
+ ret = -ENODEV;
+ goto out_failed;
+ }
chip->client_group_b = chip->client_dummy = c;
}
break;
@@ -660,6 +666,12 @@ static int max732x_probe(struct i2c_client *client,
chip->client_group_b = client;
if (nr_port > 8) {
c = i2c_new_dummy(client->adapter, addr_a);
+ if (!c) {
+ dev_err(&client->dev,
+ "Failed to allocate I2C device\n");
+ ret = -ENODEV;
+ goto out_failed;
+ }
chip->client_group_a = chip->client_dummy = c;
}
break;
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index e1037582e34d..b2635326546e 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -56,9 +56,9 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
rnd = fls(debounce) - 1;
if (rnd && (debounce & BIT(rnd - 1)))
- debounce = round_up(debounce, MEN_Z127_DB_MIN_US);
+ debounce = roundup(debounce, MEN_Z127_DB_MIN_US);
else
- debounce = round_down(debounce, MEN_Z127_DB_MIN_US);
+ debounce = rounddown(debounce, MEN_Z127_DB_MIN_US);
if (debounce > MEN_Z127_DB_MAX_US)
debounce = MEN_Z127_DB_MAX_US;
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index b23d9a36be1f..51c7d1b84c2e 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -496,9 +496,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
return 0;
err_gpiochip_add:
+ chip = chip_save;
while (--i >= 0) {
- chip--;
gpiochip_remove(&chip->gpio);
+ chip++;
}
kfree(chip_save);
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 7b14d6280e44..935292a30c99 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -136,8 +136,20 @@ static unsigned long bgpio_line2mask(struct gpio_chip *gc, unsigned int line)
static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
{
unsigned long pinmask = bgpio_line2mask(gc, gpio);
+ bool dir = !!(gc->bgpio_dir & pinmask);
- if (gc->bgpio_dir & pinmask)
+ /*
+ * If the direction is OUT we read the value from the SET
+ * register, and if the direction is IN we read the value
+ * from the DAT register.
+ *
+ * If the direction bits are inverted, naturally this gets
+ * inverted too.
+ */
+ if (gc->bgpio_dir_inverted)
+ dir = !dir;
+
+ if (dir)
return !!(gc->read_reg(gc->reg_set) & pinmask);
else
return !!(gc->read_reg(gc->reg_dat) & pinmask);
@@ -157,8 +169,13 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
*bits &= ~*mask;
/* Exploit the fact that we know which directions are set */
- set_mask = *mask & gc->bgpio_dir;
- get_mask = *mask & ~gc->bgpio_dir;
+ if (gc->bgpio_dir_inverted) {
+ set_mask = *mask & ~gc->bgpio_dir;
+ get_mask = *mask & gc->bgpio_dir;
+ } else {
+ set_mask = *mask & gc->bgpio_dir;
+ get_mask = *mask & ~gc->bgpio_dir;
+ }
if (set_mask)
*bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -359,7 +376,10 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ if (gc->bgpio_dir_inverted)
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
+ else
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -370,7 +390,10 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
/* Return 0 if output, 1 of input */
- return !(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
+ if (gc->bgpio_dir_inverted)
+ return !!(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
+ else
+ return !(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
}
static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -381,37 +404,10 @@ static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
- gc->write_reg(gc->reg_dir, gc->bgpio_dir);
-
- spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
- return 0;
-}
-
-static int bgpio_dir_in_inv(struct gpio_chip *gc, unsigned int gpio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
- gc->write_reg(gc->reg_dir, gc->bgpio_dir);
-
- spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
- return 0;
-}
-
-static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- unsigned long flags;
-
- gc->set(gc, gpio, val);
-
- spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ if (gc->bgpio_dir_inverted)
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ else
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -419,12 +415,6 @@ static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
return 0;
}
-static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio)
-{
- /* Return 0 if output, 1 if input */
- return !!(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
-}
-
static int bgpio_setup_accessors(struct device *dev,
struct gpio_chip *gc,
bool byte_be)
@@ -560,9 +550,10 @@ static int bgpio_setup_direction(struct gpio_chip *gc,
gc->get_direction = bgpio_get_dir;
} else if (dirin) {
gc->reg_dir = dirin;
- gc->direction_output = bgpio_dir_out_inv;
- gc->direction_input = bgpio_dir_in_inv;
- gc->get_direction = bgpio_get_dir_inv;
+ gc->direction_output = bgpio_dir_out;
+ gc->direction_input = bgpio_dir_in;
+ gc->get_direction = bgpio_get_dir;
+ gc->bgpio_dir_inverted = true;
} else {
if (flags & BGPIOF_NO_OUTPUT)
gc->direction_output = bgpio_dir_out_err;
@@ -582,6 +573,33 @@ static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
return -EINVAL;
}
+/**
+ * bgpio_init() - Initialize generic GPIO accessor functions
+ * @gc: the GPIO chip to set up
+ * @dev: the parent device of the new GPIO chip (compulsory)
+ * @sz: the size (width) of the MMIO registers in bytes, typically 1, 2 or 4
+ * @dat: MMIO address for the register to READ the value of the GPIO lines, it
+ * is expected that a 1 in the corresponding bit in this register means the
+ * line is asserted
+ * @set: MMIO address for the register to SET the value of the GPIO lines, it is
+ * expected that we write the line with 1 in this register to drive the GPIO line
+ * high.
+ * @clr: MMIO address for the register to CLEAR the value of the GPIO lines, it is
+ * expected that we write the line with 1 in this register to drive the GPIO line
+ * low. It is allowed to leave this address as NULL, in that case the SET register
+ * will be assumed to also clear the GPIO lines, by actively writing the line
+ * with 0.
+ * @dirout: MMIO address for the register to set the line as OUTPUT. It is assumed
+ * that setting a line to 1 in this register will turn that line into an
+ * output line. Conversely, setting the line to 0 will turn that line into
+ * an input. Either this or @dirin can be defined, but never both.
+ * @dirin: MMIO address for the register to set this line as INPUT. It is assumed
+ * that setting a line to 1 in this register will turn that line into an
+ * input line. Conversely, setting the line to 0 will turn that line into
+ * an output. Either this or @dirout can be defined, but never both.
+ * @flags: Different flags that will affect the behaviour of the device, such as
+ * endianness etc.
+ */
int bgpio_init(struct gpio_chip *gc, struct device *dev,
unsigned long sz, void __iomem *dat, void __iomem *set,
void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
new file mode 100644
index 000000000000..d72af6f6cdbd
--- /dev/null
+++ b/drivers/gpio/gpio-mt7621.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define MTK_BANK_CNT 3
+#define MTK_BANK_WIDTH 32
+
+#define GPIO_BANK_STRIDE 0x04
+#define GPIO_REG_CTRL 0x00
+#define GPIO_REG_POL 0x10
+#define GPIO_REG_DATA 0x20
+#define GPIO_REG_DSET 0x30
+#define GPIO_REG_DCLR 0x40
+#define GPIO_REG_REDGE 0x50
+#define GPIO_REG_FEDGE 0x60
+#define GPIO_REG_HLVL 0x70
+#define GPIO_REG_LLVL 0x80
+#define GPIO_REG_STAT 0x90
+#define GPIO_REG_EDGE 0xA0
+
+struct mtk_gc {
+ struct gpio_chip chip;
+ spinlock_t lock;
+ int bank;
+ u32 rising;
+ u32 falling;
+ u32 hlevel;
+ u32 llevel;
+};
+
+/**
+ * struct mtk - state container for
+ * data of the platform driver. It is 3
+ * separate gpio-chip each one with its
+ * own irq_chip.
+ * @dev: device instance
+ * @base: memory base address
+ * @gpio_irq: irq number from the device tree
+ * @gc_map: array of the gpio chips
+ */
+struct mtk {
+ struct device *dev;
+ void __iomem *base;
+ int gpio_irq;
+ struct mtk_gc gc_map[MTK_BANK_CNT];
+};
+
+static inline struct mtk_gc *
+to_mediatek_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct mtk_gc, chip);
+}
+
+static inline void
+mtk_gpio_w32(struct mtk_gc *rg, u32 offset, u32 val)
+{
+ struct gpio_chip *gc = &rg->chip;
+ struct mtk *mtk = gpiochip_get_data(gc);
+
+ offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
+ gc->write_reg(mtk->base + offset, val);
+}
+
+static inline u32
+mtk_gpio_r32(struct mtk_gc *rg, u32 offset)
+{
+ struct gpio_chip *gc = &rg->chip;
+ struct mtk *mtk = gpiochip_get_data(gc);
+
+ offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
+ return gc->read_reg(mtk->base + offset);
+}
+
+static irqreturn_t
+mediatek_gpio_irq_handler(int irq, void *data)
+{
+ struct gpio_chip *gc = data;
+ struct mtk_gc *rg = to_mediatek_gpio(gc);
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long pending;
+ int bit;
+
+ pending = mtk_gpio_r32(rg, GPIO_REG_STAT);
+
+ for_each_set_bit(bit, &pending, MTK_BANK_WIDTH) {
+ u32 map = irq_find_mapping(gc->irq.domain, bit);
+
+ generic_handle_irq(map);
+ mtk_gpio_w32(rg, GPIO_REG_STAT, BIT(bit));
+ ret |= IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static void
+mediatek_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mtk_gc *rg = to_mediatek_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 rise, fall, high, low;
+
+ spin_lock_irqsave(&rg->lock, flags);
+ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
+ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
+ high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
+ low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
+ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise | (BIT(pin) & rg->rising));
+ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall | (BIT(pin) & rg->falling));
+ mtk_gpio_w32(rg, GPIO_REG_HLVL, high | (BIT(pin) & rg->hlevel));
+ mtk_gpio_w32(rg, GPIO_REG_LLVL, low | (BIT(pin) & rg->llevel));
+ spin_unlock_irqrestore(&rg->lock, flags);
+}
+
+static void
+mediatek_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mtk_gc *rg = to_mediatek_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 rise, fall, high, low;
+
+ spin_lock_irqsave(&rg->lock, flags);
+ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
+ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
+ high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
+ low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
+ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
+ spin_unlock_irqrestore(&rg->lock, flags);
+}
+
+static int
+mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mtk_gc *rg = to_mediatek_gpio(gc);
+ int pin = d->hwirq;
+ u32 mask = BIT(pin);
+
+ if (type == IRQ_TYPE_PROBE) {
+ if ((rg->rising | rg->falling |
+ rg->hlevel | rg->llevel) & mask)
+ return 0;
+
+ type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
+ }
+
+ rg->rising &= ~mask;
+ rg->falling &= ~mask;
+ rg->hlevel &= ~mask;
+ rg->llevel &= ~mask;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ rg->rising |= mask;
+ rg->falling |= mask;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ rg->rising |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ rg->falling |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ rg->hlevel |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ rg->llevel |= mask;
+ break;
+ }
+
+ return 0;
+}
+
+static struct irq_chip mediatek_gpio_irq_chip = {
+ .irq_unmask = mediatek_gpio_irq_unmask,
+ .irq_mask = mediatek_gpio_irq_mask,
+ .irq_mask_ack = mediatek_gpio_irq_mask,
+ .irq_set_type = mediatek_gpio_irq_type,
+};
+
+static int
+mediatek_gpio_xlate(struct gpio_chip *chip,
+ const struct of_phandle_args *spec, u32 *flags)
+{
+ int gpio = spec->args[0];
+ struct mtk_gc *rg = to_mediatek_gpio(chip);
+
+ if (rg->bank != gpio / MTK_BANK_WIDTH)
+ return -EINVAL;
+
+ if (flags)
+ *flags = spec->args[1];
+
+ return gpio % MTK_BANK_WIDTH;
+}
+
+static int
+mediatek_gpio_bank_probe(struct device *dev,
+ struct device_node *node, int bank)
+{
+ struct mtk *mtk = dev_get_drvdata(dev);
+ struct mtk_gc *rg;
+ void __iomem *dat, *set, *ctrl, *diro;
+ int ret;
+
+ rg = &mtk->gc_map[bank];
+ memset(rg, 0, sizeof(*rg));
+
+ spin_lock_init(&rg->lock);
+ rg->chip.of_node = node;
+ rg->bank = bank;
+
+ dat = mtk->base + GPIO_REG_DATA + (rg->bank * GPIO_BANK_STRIDE);
+ set = mtk->base + GPIO_REG_DSET + (rg->bank * GPIO_BANK_STRIDE);
+ ctrl = mtk->base + GPIO_REG_DCLR + (rg->bank * GPIO_BANK_STRIDE);
+ diro = mtk->base + GPIO_REG_CTRL + (rg->bank * GPIO_BANK_STRIDE);
+
+ ret = bgpio_init(&rg->chip, dev, 4,
+ dat, set, ctrl, diro, NULL, 0);
+ if (ret) {
+ dev_err(dev, "bgpio_init() failed\n");
+ return ret;
+ }
+
+ rg->chip.of_gpio_n_cells = 2;
+ rg->chip.of_xlate = mediatek_gpio_xlate;
+ rg->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
+ dev_name(dev), bank);
+
+ ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
+ if (ret < 0) {
+ dev_err(dev, "Could not register gpio %d, ret=%d\n",
+ rg->chip.ngpio, ret);
+ return ret;
+ }
+
+ if (mtk->gpio_irq) {
+ /*
+ * Manually request the irq here instead of passing
+ * a flow-handler to gpiochip_set_chained_irqchip,
+ * because the irq is shared.
+ */
+ ret = devm_request_irq(dev, mtk->gpio_irq,
+ mediatek_gpio_irq_handler, IRQF_SHARED,
+ rg->chip.label, &rg->chip);
+
+ if (ret) {
+ dev_err(dev, "Error requesting IRQ %d: %d\n",
+ mtk->gpio_irq, ret);
+ return ret;
+ }
+
+ ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
+ 0, handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "failed to add gpiochip_irqchip\n");
+ return ret;
+ }
+
+ gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
+ mtk->gpio_irq, NULL);
+ }
+
+ /* set polarity to low for all gpios */
+ mtk_gpio_w32(rg, GPIO_REG_POL, 0);
+
+ dev_info(dev, "registering %d gpios\n", rg->chip.ngpio);
+
+ return 0;
+}
+
+static int
+mediatek_gpio_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct mtk *mtk;
+ int i;
+
+ mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
+ if (!mtk)
+ return -ENOMEM;
+
+ mtk->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mtk->base))
+ return PTR_ERR(mtk->base);
+
+ mtk->gpio_irq = irq_of_parse_and_map(np, 0);
+ mtk->dev = dev;
+ platform_set_drvdata(pdev, mtk);
+ mediatek_gpio_irq_chip.name = dev_name(dev);
+
+ for (i = 0; i < MTK_BANK_CNT; i++)
+ mediatek_gpio_bank_probe(dev, np, i);
+
+ return 0;
+}
+
+static const struct of_device_id mediatek_gpio_match[] = {
+ { .compatible = "mediatek,mt7621-gpio" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mediatek_gpio_match);
+
+static struct platform_driver mediatek_gpio_driver = {
+ .probe = mediatek_gpio_probe,
+ .driver = {
+ .name = "mt7621_gpio",
+ .of_match_table = mediatek_gpio_match,
+ },
+};
+
+builtin_platform_driver(mediatek_gpio_driver);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 2f2829966d4c..995cf0b9e0b1 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -45,6 +45,15 @@ struct mxc_gpio_hwdata {
unsigned fall_edge;
};
+struct mxc_gpio_reg_saved {
+ u32 icr1;
+ u32 icr2;
+ u32 imr;
+ u32 gdir;
+ u32 edge_sel;
+ u32 dr;
+};
+
struct mxc_gpio_port {
struct list_head node;
void __iomem *base;
@@ -55,6 +64,8 @@ struct mxc_gpio_port {
struct gpio_chip gc;
struct device *dev;
u32 both_edges;
+ struct mxc_gpio_reg_saved gpio_saved_reg;
+ bool power_off;
};
static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = {
@@ -143,6 +154,7 @@ static const struct of_device_id mxc_gpio_dt_ids[] = {
{ .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], },
{ .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], },
{ .compatible = "fsl,imx35-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
+ { .compatible = "fsl,imx7d-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
{ /* sentinel */ }
};
@@ -434,6 +446,9 @@ static int mxc_gpio_probe(struct platform_device *pdev)
return err;
}
+ if (of_device_is_compatible(np, "fsl,imx7d-gpio"))
+ port->power_off = true;
+
/* disable the interrupt and clear the status */
writel(0, port->base + GPIO_IMR);
writel(~0, port->base + GPIO_ISR);
@@ -497,6 +512,8 @@ static int mxc_gpio_probe(struct platform_device *pdev)
list_add_tail(&port->node, &mxc_gpio_ports);
+ platform_set_drvdata(pdev, port);
+
return 0;
out_irqdomain_remove:
@@ -507,11 +524,67 @@ out_bgio:
return err;
}
+static void mxc_gpio_save_regs(struct mxc_gpio_port *port)
+{
+ if (!port->power_off)
+ return;
+
+ port->gpio_saved_reg.icr1 = readl(port->base + GPIO_ICR1);
+ port->gpio_saved_reg.icr2 = readl(port->base + GPIO_ICR2);
+ port->gpio_saved_reg.imr = readl(port->base + GPIO_IMR);
+ port->gpio_saved_reg.gdir = readl(port->base + GPIO_GDIR);
+ port->gpio_saved_reg.edge_sel = readl(port->base + GPIO_EDGE_SEL);
+ port->gpio_saved_reg.dr = readl(port->base + GPIO_DR);
+}
+
+static void mxc_gpio_restore_regs(struct mxc_gpio_port *port)
+{
+ if (!port->power_off)
+ return;
+
+ writel(port->gpio_saved_reg.icr1, port->base + GPIO_ICR1);
+ writel(port->gpio_saved_reg.icr2, port->base + GPIO_ICR2);
+ writel(port->gpio_saved_reg.imr, port->base + GPIO_IMR);
+ writel(port->gpio_saved_reg.gdir, port->base + GPIO_GDIR);
+ writel(port->gpio_saved_reg.edge_sel, port->base + GPIO_EDGE_SEL);
+ writel(port->gpio_saved_reg.dr, port->base + GPIO_DR);
+}
+
+static int __maybe_unused mxc_gpio_noirq_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mxc_gpio_port *port = platform_get_drvdata(pdev);
+
+ mxc_gpio_save_regs(port);
+ clk_disable_unprepare(port->clk);
+
+ return 0;
+}
+
+static int __maybe_unused mxc_gpio_noirq_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mxc_gpio_port *port = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(port->clk);
+ if (ret)
+ return ret;
+ mxc_gpio_restore_regs(port);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mxc_gpio_noirq_suspend, mxc_gpio_noirq_resume)
+};
+
static struct platform_driver mxc_gpio_driver = {
.driver = {
.name = "gpio-mxc",
.of_match_table = mxc_gpio_dt_ids,
.suppress_bind_attrs = true,
+ .pm = &mxc_gpio_dev_pm_ops,
},
.probe = mxc_gpio_probe,
.id_table = mxc_gpio_devtype,
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index e2831ee70cdc..df30490da820 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -126,8 +126,7 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
else
writel(pin_mask, pin_addr + MXS_CLR);
- writel(pin_mask,
- port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
+ writel(pin_mask, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
return 0;
}
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index d1afedf4dcbf..e81008678a38 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -77,6 +77,8 @@ struct gpio_bank {
bool workaround_enabled;
void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
+ void (*set_dataout_multiple)(struct gpio_bank *bank,
+ unsigned long *mask, unsigned long *bits);
int (*get_context_loss_count)(struct device *dev);
struct omap_gpio_reg_offs *regs;
@@ -161,6 +163,51 @@ static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset)
return (readl_relaxed(reg) & (BIT(offset))) != 0;
}
+/* set multiple data out values using dedicate set/clear register */
+static void omap_set_gpio_dataout_reg_multiple(struct gpio_bank *bank,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ void __iomem *reg = bank->base;
+ u32 l;
+
+ l = *bits & *mask;
+ writel_relaxed(l, reg + bank->regs->set_dataout);
+ bank->context.dataout |= l;
+
+ l = ~*bits & *mask;
+ writel_relaxed(l, reg + bank->regs->clr_dataout);
+ bank->context.dataout &= ~l;
+}
+
+/* set multiple data out values using mask register */
+static void omap_set_gpio_dataout_mask_multiple(struct gpio_bank *bank,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ void __iomem *reg = bank->base + bank->regs->dataout;
+ u32 l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask);
+
+ writel_relaxed(l, reg);
+ bank->context.dataout = l;
+}
+
+static unsigned long omap_get_gpio_datain_multiple(struct gpio_bank *bank,
+ unsigned long *mask)
+{
+ void __iomem *reg = bank->base + bank->regs->datain;
+
+ return readl_relaxed(reg) & *mask;
+}
+
+static unsigned long omap_get_gpio_dataout_multiple(struct gpio_bank *bank,
+ unsigned long *mask)
+{
+ void __iomem *reg = bank->base + bank->regs->dataout;
+
+ return readl_relaxed(reg) & *mask;
+}
+
static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
{
int l = readl_relaxed(base + reg);
@@ -968,6 +1015,26 @@ static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
return 0;
}
+static int omap_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_bank *bank = gpiochip_get_data(chip);
+ void __iomem *reg = bank->base + bank->regs->direction;
+ unsigned long in = readl_relaxed(reg), l;
+
+ *bits = 0;
+
+ l = in & *mask;
+ if (l)
+ *bits |= omap_get_gpio_datain_multiple(bank, &l);
+
+ l = ~in & *mask;
+ if (l)
+ *bits |= omap_get_gpio_dataout_multiple(bank, &l);
+
+ return 0;
+}
+
static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
unsigned debounce)
{
@@ -1012,6 +1079,17 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
+static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_bank *bank = gpiochip_get_data(chip);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ bank->set_dataout_multiple(bank, mask, bits);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+}
+
/*---------------------------------------------------------------------*/
static void omap_gpio_show_rev(struct gpio_bank *bank)
@@ -1073,9 +1151,11 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
bank->chip.get_direction = omap_gpio_get_direction;
bank->chip.direction_input = omap_gpio_input;
bank->chip.get = omap_gpio_get;
+ bank->chip.get_multiple = omap_gpio_get_multiple;
bank->chip.direction_output = omap_gpio_output;
bank->chip.set_config = omap_gpio_set_config;
bank->chip.set = omap_gpio_set;
+ bank->chip.set_multiple = omap_gpio_set_multiple;
if (bank->is_mpuio) {
bank->chip.label = "mpuio";
if (bank->regs->wkup_en)
@@ -1209,10 +1289,14 @@ static int omap_gpio_probe(struct platform_device *pdev)
pdata->get_context_loss_count;
}
- if (bank->regs->set_dataout && bank->regs->clr_dataout)
+ if (bank->regs->set_dataout && bank->regs->clr_dataout) {
bank->set_dataout = omap_set_gpio_dataout_reg;
- else
+ bank->set_dataout_multiple = omap_set_gpio_dataout_reg_multiple;
+ } else {
bank->set_dataout = omap_set_gpio_dataout_mask;
+ bank->set_dataout_multiple =
+ omap_set_gpio_dataout_mask_multiple;
+ }
raw_spin_lock_init(&bank->lock);
raw_spin_lock_init(&bank->wa_lock);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index c55ad157e820..023a32cfac42 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -708,7 +708,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
{
struct i2c_client *client = chip->client;
- if (irq_base != -1 && (chip->driver_data & PCA_INT))
+ if (client->irq && irq_base != -1 && (chip->driver_data & PCA_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index f5545049c187..f809a5a8e9eb 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -12,6 +12,8 @@
* GNU General Public License version 2 for more details.
*/
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
@@ -90,6 +92,25 @@ static int pisosr_gpio_get(struct gpio_chip *chip, unsigned offset)
return (gpio->buffer[offset / 8] >> (offset % 8)) & 0x1;
}
+static int pisosr_gpio_get_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct pisosr_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int nbytes = DIV_ROUND_UP(chip->ngpio, 8);
+ unsigned int i, j;
+
+ pisosr_gpio_refresh(gpio);
+
+ bitmap_zero(bits, chip->ngpio);
+ for (i = 0; i < nbytes; i++) {
+ j = i / sizeof(unsigned long);
+ bits[j] |= ((unsigned long) gpio->buffer[i])
+ << (8 * (i % sizeof(unsigned long)));
+ }
+
+ return 0;
+}
+
static const struct gpio_chip template_chip = {
.label = "pisosr-gpio",
.owner = THIS_MODULE,
@@ -97,6 +118,7 @@ static const struct gpio_chip template_chip = {
.direction_input = pisosr_gpio_direction_input,
.direction_output = pisosr_gpio_direction_output,
.get = pisosr_gpio_get,
+ .get_multiple = pisosr_gpio_get_multiple,
.base = -1,
.ngpio = DEFAULT_NGPIO,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 1e66f808051c..c18712dabf93 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -241,6 +241,17 @@ int pxa_irq_to_gpio(int irq)
return irq_gpio0;
}
+static bool pxa_gpio_has_pinctrl(void)
+{
+ switch (gpio_type) {
+ case PXA3XX_GPIO:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct pxa_gpio_chip *pchip = chip_to_pxachip(chip);
@@ -255,9 +266,11 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
int ret;
- ret = pinctrl_gpio_direction_input(chip->base + offset);
- if (!ret)
- return 0;
+ if (pxa_gpio_has_pinctrl()) {
+ ret = pinctrl_gpio_direction_input(chip->base + offset);
+ if (!ret)
+ return 0;
+ }
spin_lock_irqsave(&gpio_lock, flags);
@@ -282,9 +295,11 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
- ret = pinctrl_gpio_direction_output(chip->base + offset);
- if (ret)
- return ret;
+ if (pxa_gpio_has_pinctrl()) {
+ ret = pinctrl_gpio_direction_output(chip->base + offset);
+ if (ret)
+ return ret;
+ }
spin_lock_irqsave(&gpio_lock, flags);
@@ -348,8 +363,12 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
pchip->chip.set = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
- pchip->chip.request = gpiochip_generic_request;
- pchip->chip.free = gpiochip_generic_free;
+
+ if (pxa_gpio_has_pinctrl()) {
+ pchip->chip.request = gpiochip_generic_request;
+ pchip->chip.free = gpiochip_generic_free;
+ }
+
#ifdef CONFIG_OF_GPIO
pchip->chip.of_node = np;
pchip->chip.of_xlate = pxa_gpio_of_xlate;
@@ -607,7 +626,7 @@ static int pxa_gpio_probe(struct platform_device *pdev)
struct pxa_gpio_platform_data *info;
void __iomem *gpio_reg_base;
int gpio, ret;
- int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
+ int irq0 = 0, irq1 = 0, irq_mux;
pchip = devm_kzalloc(&pdev->dev, sizeof(*pchip), GFP_KERNEL);
if (!pchip)
@@ -646,14 +665,13 @@ static int pxa_gpio_probe(struct platform_device *pdev)
pchip->irq0 = irq0;
pchip->irq1 = irq1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
gpio_reg_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!gpio_reg_base)
return -EINVAL;
- if (irq0 > 0)
- gpio_offset = 2;
-
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 3b4dc1a9a68d..a499c633a6c5 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/rc5t583.h>
struct rc5t583_gpio {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 350390c0b290..55cc61086d99 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -15,7 +15,7 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -278,6 +278,13 @@ static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
pm_runtime_put(&p->pdev->dev);
}
+static int gpio_rcar_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpio_rcar_priv *p = gpiochip_get_data(chip);
+
+ return !(gpio_rcar_read(p, INOUTSEL) & BIT(offset));
+}
+
static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
{
gpio_rcar_config_general_input_output_mode(chip, offset, false);
@@ -461,6 +468,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip = &p->gpio_chip;
gpio_chip->request = gpio_rcar_request;
gpio_chip->free = gpio_rcar_free;
+ gpio_chip->get_direction = gpio_rcar_get_direction;
gpio_chip->direction_input = gpio_rcar_direction_input;
gpio_chip->get = gpio_rcar_get;
gpio_chip->direction_output = gpio_rcar_direction_output;
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index cbf0f9e6465b..2938217566d3 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -25,7 +25,7 @@
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/rdc321x.h>
#include <linux/slab.h>
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 249f433aa62d..986eb3b231ac 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 545004445846..e9878f6ede67 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -26,8 +26,7 @@
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/pci_ids.h>
-
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#define GEN 0x00
#define GIO 0x04
@@ -138,6 +137,13 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
return 0;
}
+static int sch_gpio_get_direction(struct gpio_chip *gc, unsigned gpio_num)
+{
+ struct sch_gpio *sch = gpiochip_get_data(gc);
+
+ return sch_gpio_reg_get(sch, gpio_num, GIO);
+}
+
static const struct gpio_chip sch_gpio_chip = {
.label = "sch_gpio",
.owner = THIS_MODULE,
@@ -145,6 +151,7 @@ static const struct gpio_chip sch_gpio_chip = {
.get = sch_gpio_get,
.direction_output = sch_gpio_direction_out,
.set = sch_gpio_set,
+ .get_direction = sch_gpio_get_direction,
};
static int sch_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index b96990c262a1..5497f0a88cf0 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -17,16 +17,15 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/bitops.h>
#include <linux/io.h>
#define DRV_NAME "gpio-sch311x"
-#define SCH311X_GPIO_CONF_OUT 0x00
-#define SCH311X_GPIO_CONF_IN 0x01
-#define SCH311X_GPIO_CONF_INVERT 0x02
-#define SCH311X_GPIO_CONF_OPEN_DRAIN 0x80
+#define SCH311X_GPIO_CONF_DIR BIT(0)
+#define SCH311X_GPIO_CONF_INVERT BIT(1)
+#define SCH311X_GPIO_CONF_OPEN_DRAIN BIT(7)
#define SIO_CONFIG_KEY_ENTER 0x55
#define SIO_CONFIG_KEY_EXIT 0xaa
@@ -163,7 +162,7 @@ static void sch311x_gpio_free(struct gpio_chip *chip, unsigned offset)
static int sch311x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct sch311x_gpio_block *block = gpiochip_get_data(chip);
- unsigned char data;
+ u8 data;
spin_lock(&block->lock);
data = inb(block->runtime_reg + block->data_reg);
@@ -175,7 +174,7 @@ static int sch311x_gpio_get(struct gpio_chip *chip, unsigned offset)
static void __sch311x_gpio_set(struct sch311x_gpio_block *block,
unsigned offset, int value)
{
- unsigned char data = inb(block->runtime_reg + block->data_reg);
+ u8 data = inb(block->runtime_reg + block->data_reg);
if (value)
data |= BIT(offset);
else
@@ -196,10 +195,12 @@ static void sch311x_gpio_set(struct gpio_chip *chip, unsigned offset,
static int sch311x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct sch311x_gpio_block *block = gpiochip_get_data(chip);
+ u8 data;
spin_lock(&block->lock);
- outb(SCH311X_GPIO_CONF_IN, block->runtime_reg +
- block->config_regs[offset]);
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ data |= SCH311X_GPIO_CONF_DIR;
+ outb(data, block->runtime_reg + block->config_regs[offset]);
spin_unlock(&block->lock);
return 0;
@@ -209,18 +210,59 @@ static int sch311x_gpio_direction_out(struct gpio_chip *chip, unsigned offset,
int value)
{
struct sch311x_gpio_block *block = gpiochip_get_data(chip);
+ u8 data;
spin_lock(&block->lock);
- outb(SCH311X_GPIO_CONF_OUT, block->runtime_reg +
- block->config_regs[offset]);
-
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ data &= ~SCH311X_GPIO_CONF_DIR;
+ outb(data, block->runtime_reg + block->config_regs[offset]);
__sch311x_gpio_set(block, offset, value);
spin_unlock(&block->lock);
return 0;
}
+static int sch311x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct sch311x_gpio_block *block = gpiochip_get_data(chip);
+ u8 data;
+
+ spin_lock(&block->lock);
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ spin_unlock(&block->lock);
+
+ return !!(data & SCH311X_GPIO_CONF_DIR);
+}
+
+static int sch311x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
+{
+ struct sch311x_gpio_block *block = gpiochip_get_data(chip);
+ enum pin_config_param param = pinconf_to_config_param(config);
+ u8 data;
+
+ switch (param) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ spin_lock(&block->lock);
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ data |= SCH311X_GPIO_CONF_OPEN_DRAIN;
+ outb(data, block->runtime_reg + block->config_regs[offset]);
+ spin_unlock(&block->lock);
+ return 0;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ spin_lock(&block->lock);
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ data &= ~SCH311X_GPIO_CONF_OPEN_DRAIN;
+ outb(data, block->runtime_reg + block->config_regs[offset]);
+ spin_unlock(&block->lock);
+ return 0;
+ default:
+ break;
+ }
+ return -ENOTSUPP;
+}
+
static int sch311x_gpio_probe(struct platform_device *pdev)
{
struct sch311x_pdev_data *pdata = dev_get_platdata(&pdev->dev);
@@ -253,6 +295,8 @@ static int sch311x_gpio_probe(struct platform_device *pdev)
block->chip.free = sch311x_gpio_free;
block->chip.direction_input = sch311x_gpio_direction_in;
block->chip.direction_output = sch311x_gpio_direction_out;
+ block->chip.get_direction = sch311x_gpio_get_direction;
+ block->chip.set_config = sch311x_gpio_set_config;
block->chip.get = sch311x_gpio_get;
block->chip.set = sch311x_gpio_set;
block->chip.ngpio = 8;
@@ -309,7 +353,7 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr)
{
int err = 0, reg;
unsigned short base_addr;
- unsigned char dev_id;
+ u8 dev_id;
err = sch311x_sio_enter(sio_config_port);
if (err)
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 22267479ba68..ee3039f091f4 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -10,7 +10,7 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/of.h>
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 407359da08f9..2283c869ad5d 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -23,7 +23,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/pci.h>
@@ -58,16 +59,6 @@ struct gsta_gpio {
unsigned irq_type[GSTA_NR_GPIO];
};
-static inline struct gsta_regs __iomem *__regs(struct gsta_gpio *chip, int nr)
-{
- return chip->regs[nr / GSTA_GPIO_PER_BLOCK];
-}
-
-static inline u32 __bit(int nr)
-{
- return 1U << (nr % GSTA_GPIO_PER_BLOCK);
-}
-
/*
* gpio methods
*/
@@ -75,8 +66,8 @@ static inline u32 __bit(int nr)
static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
{
struct gsta_gpio *chip = gpiochip_get_data(gpio);
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
if (val)
writel(bit, &regs->dats);
@@ -87,8 +78,8 @@ static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
static int gsta_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct gsta_gpio *chip = gpiochip_get_data(gpio);
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
return !!(readl(&regs->dat) & bit);
}
@@ -97,8 +88,8 @@ static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
int val)
{
struct gsta_gpio *chip = gpiochip_get_data(gpio);
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
writel(bit, &regs->dirs);
/* Data register after direction, otherwise pullup/down is selected */
@@ -112,8 +103,8 @@ static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
static int gsta_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
{
struct gsta_gpio *chip = gpiochip_get_data(gpio);
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
writel(bit, &regs->dirc);
return 0;
@@ -165,9 +156,9 @@ static void gsta_gpio_setup(struct gsta_gpio *chip) /* called from probe */
*/
static void gsta_set_config(struct gsta_gpio *chip, int nr, unsigned cfg)
{
- struct gsta_regs __iomem *regs = __regs(chip, nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
unsigned long flags;
- u32 bit = __bit(nr);
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
u32 val;
int err = 0;
@@ -234,8 +225,8 @@ static void gsta_irq_disable(struct irq_data *data)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct gsta_gpio *chip = gc->private;
int nr = data->irq - chip->irq_base;
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
u32 val;
unsigned long flags;
@@ -257,8 +248,8 @@ static void gsta_irq_enable(struct irq_data *data)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct gsta_gpio *chip = gc->private;
int nr = data->irq - chip->irq_base;
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
u32 val;
int type;
unsigned long flags;
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 8d6a5a7e612d..65a2315f1673 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -8,7 +8,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/mfd/stmpe.h>
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index c07385b71403..19972084c45b 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -13,9 +13,8 @@
#include <linux/types.h>
#include <linux/of_platform.h>
#include <linux/mutex.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
-#include <linux/of_gpio.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -91,6 +90,20 @@ struct xway_stp {
};
/**
+ * xway_stp_get() - gpio_chip->get - get gpios.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ *
+ * Gets the shadow value.
+ */
+static int xway_stp_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct xway_stp *chip = gpiochip_get_data(gc);
+
+ return (xway_stp_r32(chip->virt, XWAY_STP_CPU0) & BIT(gpio));
+}
+
+/**
* xway_stp_set() - gpio_chip->set - set gpios.
* @gc: Pointer to gpio_chip device structure.
* @gpio: GPIO signal number.
@@ -215,6 +228,7 @@ static int xway_stp_probe(struct platform_device *pdev)
chip->gc.parent = &pdev->dev;
chip->gc.label = "stp-xway";
chip->gc.direction_output = xway_stp_dir_out;
+ chip->gc.get = xway_stp_get;
chip->gc.set = xway_stp_set;
chip->gc.request = xway_stp_request;
chip->gc.base = -1;
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 8b0a69c5ba88..87c18a544513 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -10,7 +10,7 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -135,6 +135,33 @@ static const struct syscon_gpio_data clps711x_mctrl_gpio = {
.dat_bit_offset = 0x40 * 8 + 8,
};
+static void rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct syscon_gpio_priv *priv = gpiochip_get_data(chip);
+ unsigned int offs;
+ u8 bit;
+ u32 data;
+ int ret;
+
+ offs = priv->dreg_offset + priv->data->dat_bit_offset + offset;
+ bit = offs % SYSCON_REG_BITS;
+ data = (val ? BIT(bit) : 0) | BIT(bit + 16);
+ ret = regmap_write(priv->syscon,
+ (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE,
+ data);
+ if (ret < 0)
+ dev_err(chip->parent, "gpio write failed ret(%d)\n", ret);
+}
+
+static const struct syscon_gpio_data rockchip_rk3328_gpio_mute = {
+ /* RK3328 GPIO_MUTE is an output only pin at GRF_SOC_CON10[1] */
+ .flags = GPIO_SYSCON_FEAT_OUT,
+ .bit_count = 1,
+ .dat_bit_offset = 0x0428 * 8 + 1,
+ .set = rockchip_gpio_set,
+};
+
#define KEYSTONE_LOCK_BIT BIT(0)
static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
@@ -175,6 +202,10 @@ static const struct of_device_id syscon_gpio_ids[] = {
.compatible = "ti,keystone-dsp-gpio",
.data = &keystone_dsp_gpio,
},
+ {
+ .compatible = "rockchip,rk3328-grf-gpio",
+ .data = &rockchip_rk3328_gpio_mute,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, syscon_gpio_ids);
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index ac6f2a9841e5..a12cd0b5c972 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -22,7 +22,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
@@ -30,7 +30,6 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 94396caaca75..47dbd19751d0 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -22,7 +22,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -207,7 +207,7 @@ static int tegra_gpio_get_direction(struct gpio_chip *chip,
oe = tegra_gpio_readl(tgi, GPIO_OE(tgi, offset));
- return (oe & pin_mask) ? GPIOF_DIR_OUT : GPIOF_DIR_IN;
+ return !(oe & pin_mask);
}
static int tegra_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
@@ -323,13 +323,6 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- ret = gpiochip_lock_as_irq(&tgi->gc, gpio);
- if (ret) {
- dev_err(tgi->dev,
- "unable to lock Tegra GPIO %u as IRQ\n", gpio);
- return ret;
- }
-
spin_lock_irqsave(&bank->lvl_lock[port], flags);
val = tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio));
@@ -342,6 +335,14 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, gpio), gpio, 0);
tegra_gpio_enable(tgi, gpio);
+ ret = gpiochip_lock_as_irq(&tgi->gc, gpio);
+ if (ret) {
+ dev_err(tgi->dev,
+ "unable to lock Tegra GPIO %u as IRQ\n", gpio);
+ tegra_gpio_disable(tgi, gpio);
+ return ret;
+ }
+
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
irq_set_handler_locked(d, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
@@ -550,13 +551,6 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
-/*
- * This lock class tells lockdep that GPIO irqs are in a different category
- * than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpio_lock_class;
-static struct lock_class_key gpio_request_class;
-
static int tegra_gpio_probe(struct platform_device *pdev)
{
struct tegra_gpio_info *tgi;
@@ -661,8 +655,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank = &tgi->bank_info[GPIO_BANK(gpio)];
- irq_set_lockdep_class(irq, &gpio_lock_class,
- &gpio_request_class);
irq_set_chip_data(irq, bank);
irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
}
@@ -720,4 +712,4 @@ static int __init tegra_gpio_init(void)
{
return platform_driver_register(&tegra_gpio_driver);
}
-postcore_initcall(tegra_gpio_init);
+subsys_initcall(tegra_gpio_init);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 7f1aa4c21e0d..9d0292c8a199 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <dt-bindings/gpio/tegra186-gpio.h>
+#include <dt-bindings/gpio/tegra194-gpio.h>
#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
#define TEGRA186_GPIO_ENABLE_CONFIG_ENABLE BIT(0)
@@ -593,6 +594,73 @@ static const struct tegra_gpio_soc tegra186_aon_soc = {
.name = "tegra186-gpio-aon",
};
+#define TEGRA194_MAIN_GPIO_PORT(port, base, count, controller) \
+ [TEGRA194_MAIN_GPIO_PORT_##port] = { \
+ .name = #port, \
+ .offset = base, \
+ .pins = count, \
+ .irq = controller, \
+ }
+
+static const struct tegra_gpio_port tegra194_main_ports[] = {
+ TEGRA194_MAIN_GPIO_PORT( A, 0x1400, 8, 1),
+ TEGRA194_MAIN_GPIO_PORT( B, 0x4e00, 2, 4),
+ TEGRA194_MAIN_GPIO_PORT( C, 0x4600, 8, 4),
+ TEGRA194_MAIN_GPIO_PORT( D, 0x4800, 4, 4),
+ TEGRA194_MAIN_GPIO_PORT( E, 0x4a00, 8, 4),
+ TEGRA194_MAIN_GPIO_PORT( F, 0x4c00, 6, 4),
+ TEGRA194_MAIN_GPIO_PORT( G, 0x4000, 8, 4),
+ TEGRA194_MAIN_GPIO_PORT( H, 0x4200, 8, 4),
+ TEGRA194_MAIN_GPIO_PORT( I, 0x4400, 5, 4),
+ TEGRA194_MAIN_GPIO_PORT( J, 0x5200, 6, 5),
+ TEGRA194_MAIN_GPIO_PORT( K, 0x3000, 8, 3),
+ TEGRA194_MAIN_GPIO_PORT( L, 0x3200, 4, 3),
+ TEGRA194_MAIN_GPIO_PORT( M, 0x2600, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( N, 0x2800, 3, 2),
+ TEGRA194_MAIN_GPIO_PORT( O, 0x5000, 6, 5),
+ TEGRA194_MAIN_GPIO_PORT( P, 0x2a00, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( Q, 0x2c00, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( R, 0x2e00, 6, 2),
+ TEGRA194_MAIN_GPIO_PORT( S, 0x3600, 8, 3),
+ TEGRA194_MAIN_GPIO_PORT( T, 0x3800, 8, 3),
+ TEGRA194_MAIN_GPIO_PORT( U, 0x3a00, 1, 3),
+ TEGRA194_MAIN_GPIO_PORT( V, 0x1000, 8, 1),
+ TEGRA194_MAIN_GPIO_PORT( W, 0x1200, 2, 1),
+ TEGRA194_MAIN_GPIO_PORT( X, 0x2000, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( Y, 0x2200, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( Z, 0x2400, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT(FF, 0x3400, 2, 3),
+ TEGRA194_MAIN_GPIO_PORT(GG, 0x0000, 2, 0)
+};
+
+static const struct tegra_gpio_soc tegra194_main_soc = {
+ .num_ports = ARRAY_SIZE(tegra194_main_ports),
+ .ports = tegra194_main_ports,
+ .name = "tegra194-gpio",
+};
+
+#define TEGRA194_AON_GPIO_PORT(port, base, count, controller) \
+ [TEGRA194_AON_GPIO_PORT_##port] = { \
+ .name = #port, \
+ .offset = base, \
+ .pins = count, \
+ .irq = controller, \
+ }
+
+static const struct tegra_gpio_port tegra194_aon_ports[] = {
+ TEGRA194_AON_GPIO_PORT(AA, 0x0600, 8, 0),
+ TEGRA194_AON_GPIO_PORT(BB, 0x0800, 4, 0),
+ TEGRA194_AON_GPIO_PORT(CC, 0x0200, 8, 0),
+ TEGRA194_AON_GPIO_PORT(DD, 0x0400, 3, 0),
+ TEGRA194_AON_GPIO_PORT(EE, 0x0000, 7, 0)
+};
+
+static const struct tegra_gpio_soc tegra194_aon_soc = {
+ .num_ports = ARRAY_SIZE(tegra194_aon_ports),
+ .ports = tegra194_aon_ports,
+ .name = "tegra194-gpio-aon",
+};
+
static const struct of_device_id tegra186_gpio_of_match[] = {
{
.compatible = "nvidia,tegra186-gpio",
@@ -601,6 +669,12 @@ static const struct of_device_id tegra186_gpio_of_match[] = {
.compatible = "nvidia,tegra186-gpio-aon",
.data = &tegra186_aon_soc
}, {
+ .compatible = "nvidia,tegra194-gpio",
+ .data = &tegra194_main_soc
+ }, {
+ .compatible = "nvidia,tegra194-gpio-aon",
+ .data = &tegra194_aon_soc
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 6520a8475910..314e300d6ba3 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -22,7 +22,7 @@
*/
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/io.h>
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index d3cf9502e7e7..7fdac9060979 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
fwspec.param_count = 2;
fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
- fwspec.param[1] = IRQ_TYPE_NONE;
+ /*
+ * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
+ * temporarily. Anyway, ->irq_set_type() will override it later.
+ */
+ fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
return irq_create_fwspec_mapping(&fwspec);
}
@@ -306,8 +310,7 @@ static int uniphier_gpio_irq_domain_activate(struct irq_domain *domain,
struct uniphier_gpio_priv *priv = domain->host_data;
struct gpio_chip *chip = &priv->chip;
- gpiochip_lock_as_irq(chip, data->hwirq + UNIPHIER_GPIO_IRQ_OFFSET);
- return 0;
+ return gpiochip_lock_as_irq(chip, data->hwirq + UNIPHIER_GPIO_IRQ_OFFSET);
}
static void uniphier_gpio_irq_domain_deactivate(struct irq_domain *domain,
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index ac8deb01f6f6..027699cec911 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -138,10 +138,16 @@ static void unmask_giuint_low(struct irq_data *d)
static unsigned int startup_giuint(struct irq_data *data)
{
- if (gpiochip_lock_as_irq(&vr41xx_gpio_chip, data->hwirq))
+ int ret;
+
+ ret = gpiochip_lock_as_irq(&vr41xx_gpio_chip, irqd_to_hwirq(data));
+ if (ret) {
dev_err(vr41xx_gpio_chip.parent,
"unable to lock HW IRQ %lu for IRQ\n",
data->hwirq);
+ return ret;
+ }
+
/* Satisfy the .enable semantics by unmasking the line */
unmask_giuint_low(data);
return 0;
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index acd59113e08b..2eb76f35aa7e 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -143,12 +143,14 @@ static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
{
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
+ int ret;
- if (gpiochip_lock_as_irq(&priv->gc, gpio)) {
+ ret = gpiochip_lock_as_irq(&priv->gc, gpio);
+ if (ret) {
dev_err(priv->gc.parent,
"Unable to configure XGene GPIO standby pin %d as IRQ\n",
gpio);
- return -ENOSPC;
+ return ret;
}
xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index e8ec0e33a0a9..8f24478cc18b 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -20,7 +20,7 @@
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
/* Register Offset Definitions */
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index e2232cbcec8b..c48ed9d89ff5 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -25,6 +25,7 @@
struct acpi_gpio_event {
struct list_head node;
+ struct list_head initial_sync_list;
acpi_handle handle;
unsigned int pin;
unsigned int irq;
@@ -50,6 +51,9 @@ struct acpi_gpio_chip {
struct list_head events;
};
+static LIST_HEAD(acpi_gpio_initial_sync_list);
+static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
+
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{
if (!gc->parent)
@@ -85,6 +89,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
return gpiochip_get_desc(chip, pin);
}
+static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
+{
+ mutex_lock(&acpi_gpio_initial_sync_list_lock);
+ list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
+ mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+}
+
+static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
+{
+ mutex_lock(&acpi_gpio_initial_sync_list_lock);
+ if (!list_empty(&event->initial_sync_list))
+ list_del_init(&event->initial_sync_list);
+ mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+}
+
static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
{
struct acpi_gpio_event *event = data;
@@ -136,7 +155,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
irq_handler_t handler = NULL;
struct gpio_desc *desc;
unsigned long irqflags;
- int ret, pin, irq;
+ int ret, pin, irq, value;
if (!acpi_gpio_get_irq_resource(ares, &agpio))
return AE_OK;
@@ -167,6 +186,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
gpiod_direction_input(desc);
+ value = gpiod_get_value(desc);
+
ret = gpiochip_lock_as_irq(chip, pin);
if (ret) {
dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -208,6 +229,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
event->irq = irq;
event->pin = pin;
event->desc = desc;
+ INIT_LIST_HEAD(&event->initial_sync_list);
ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
"ACPI:Event", event);
@@ -222,6 +244,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
enable_irq_wake(irq);
list_add_tail(&event->node, &acpi_gpio->events);
+
+ /*
+ * Make sure we trigger the initial state of the IRQ when using RISING
+ * or FALLING. Note we run the handlers on late_init, the AML code
+ * may refer to OperationRegions from other (builtin) drivers which
+ * may be probed after us.
+ */
+ if (handler == acpi_gpio_irq_handler &&
+ (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+ ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
+ acpi_gpio_add_to_initial_sync_list(event);
+
return AE_OK;
fail_free_event:
@@ -294,6 +328,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
struct gpio_desc *desc;
+ acpi_gpio_del_from_initial_sync_list(event);
+
if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
disable_irq_wake(event->irq);
@@ -353,7 +389,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dev_remove_driver_gpios);
static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
const char *name, int index,
- struct acpi_reference_args *args,
+ struct fwnode_reference_args *args,
unsigned int *quirks)
{
const struct acpi_gpio_mapping *gm;
@@ -365,7 +401,7 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
if (!strcmp(name, gm->name) && gm->data && index < gm->size) {
const struct acpi_gpio_params *par = gm->data + index;
- args->adev = adev;
+ args->fwnode = acpi_fwnode_handle(adev);
args->args[0] = par->crs_entry_index;
args->args[1] = par->line_index;
args->args[2] = par->active_low;
@@ -528,7 +564,7 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
const char *propname, int index,
struct acpi_gpio_lookup *lookup)
{
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
unsigned int quirks = 0;
int ret;
@@ -549,6 +585,8 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* The property was found and resolved, so need to lookup the GPIO based
* on returned args.
*/
+ if (!to_acpi_device_node(args.fwnode))
+ return -EINVAL;
if (args.nargs != 3)
return -EPROTO;
@@ -556,8 +594,9 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
lookup->pin_index = args.args[1];
lookup->active_low = !!args.args[2];
- lookup->info.adev = args.adev;
+ lookup->info.adev = to_acpi_device_node(args.fwnode);
lookup->info.quirks = quirks;
+
return 0;
}
@@ -1158,3 +1197,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
return con_id == NULL;
}
+
+/* Sync the initial state of handlers after all builtin drivers have probed */
+static int acpi_gpio_initial_sync(void)
+{
+ struct acpi_gpio_event *event, *ep;
+
+ mutex_lock(&acpi_gpio_initial_sync_list_lock);
+ list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
+ initial_sync_list) {
+ acpi_evaluate_object(event->handle, NULL, NULL, NULL);
+ list_del_init(&event->initial_sync_list);
+ }
+ mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+
+ return 0;
+}
+/* We must use _sync so that this runs after the first deferred_probe run */
+late_initcall_sync(acpi_gpio_initial_sync);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 28d968088131..a4f1157d6aa0 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
* Note that active low is the default.
*/
if (IS_ENABLED(CONFIG_REGULATOR) &&
- (of_device_is_compatible(np, "reg-fixed-voltage") ||
+ (of_device_is_compatible(np, "regulator-fixed") ||
+ of_device_is_compatible(np, "reg-fixed-voltage") ||
of_device_is_compatible(np, "regulator-gpio"))) {
/*
* The regulator GPIO handles are specified such that the
@@ -620,9 +621,6 @@ int of_gpiochip_add(struct gpio_chip *chip)
{
int status;
- if ((!chip->of_node) && (chip->parent))
- chip->of_node = chip->parent->of_node;
-
if (!chip->of_node)
return 0;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index e11a3bb03820..e8f8a1999393 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -431,7 +431,7 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
int i;
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
- /* TODO: check if descriptors are really input */
+ /* NOTE: It's ok to read values of output lines. */
int ret = gpiod_get_array_value_complex(false,
true,
lh->numdescs,
@@ -449,7 +449,13 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
return 0;
} else if (cmd == GPIOHANDLE_SET_LINE_VALUES_IOCTL) {
- /* TODO: check if descriptors are really output */
+ /*
+ * All line descriptors were created at once with the same
+ * flags so just check if the first one is really output.
+ */
+ if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
+ return -EPERM;
+
if (copy_from_user(&ghd, ip, sizeof(ghd)))
return -EFAULT;
@@ -1256,6 +1262,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
/* If the gpiochip has an assigned OF node this takes precedence */
if (chip->of_node)
gdev->dev.of_node = chip->of_node;
+ else
+ chip->of_node = gdev->dev.of_node;
#endif
gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL);
@@ -1408,9 +1416,9 @@ err_free_descs:
err_free_gdev:
ida_simple_remove(&gpio_ida, gdev->id);
/* failures here can mean systems won't boot... */
- pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
+ pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
gdev->base, gdev->base + gdev->ngpio - 1,
- chip->label ? : "generic");
+ chip->label ? : "generic", status);
kfree(gdev);
return status;
}
@@ -1664,8 +1672,7 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
if (parent_handler) {
if (gpiochip->can_sleep) {
chip_err(gpiochip,
- "you cannot have chained interrupts on a "
- "chip that may sleep\n");
+ "you cannot have chained interrupts on a chip that may sleep\n");
return;
}
/*
@@ -1800,16 +1807,18 @@ static const struct irq_domain_ops gpiochip_domain_ops = {
static int gpiochip_irq_reqres(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ int ret;
if (!try_module_get(chip->gpiodev->owner))
return -ENODEV;
- if (gpiochip_lock_as_irq(chip, d->hwirq)) {
+ ret = gpiochip_lock_as_irq(chip, d->hwirq);
+ if (ret) {
chip_err(chip,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
module_put(chip->gpiodev->owner);
- return -EINVAL;
+ return ret;
}
return 0;
}
@@ -1850,8 +1859,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
return 0;
if (gpiochip->irq.parent_handler && gpiochip->can_sleep) {
- chip_err(gpiochip, "you cannot have chained interrupts on a "
- "chip that may sleep\n");
+ chip_err(gpiochip, "you cannot have chained interrupts on a chip that may sleep\n");
return -EINVAL;
}
@@ -2259,6 +2267,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
struct gpio_chip *chip = desc->gdev->chip;
int status;
unsigned long flags;
+ unsigned offset;
spin_lock_irqsave(&gpio_lock, flags);
@@ -2277,7 +2286,11 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (chip->request) {
/* chip->request may sleep */
spin_unlock_irqrestore(&gpio_lock, flags);
- status = chip->request(chip, gpio_chip_hwgpio(desc));
+ offset = gpio_chip_hwgpio(desc);
+ if (gpiochip_line_is_valid(chip, offset))
+ status = chip->request(chip, offset);
+ else
+ status = -EINVAL;
spin_lock_irqsave(&gpio_lock, flags);
if (status < 0) {
@@ -3194,6 +3207,19 @@ int gpiod_cansleep(const struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(gpiod_cansleep);
/**
+ * gpiod_set_consumer_name() - set the consumer name for the descriptor
+ * @desc: gpio to set the consumer name on
+ * @name: the new consumer name
+ */
+void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
+{
+ VALIDATE_DESC_VOID(desc);
+ /* Just overwrite whatever the previous name was */
+ desc->label = name;
+}
+EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
+
+/**
* gpiod_to_irq() - return the IRQ corresponding to a GPIO
* @desc: gpio whose IRQ will be returned (already requested)
*
@@ -3249,18 +3275,19 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
* behind our back
*/
if (!chip->can_sleep && chip->get_direction) {
- int dir = chip->get_direction(chip, offset);
+ int dir = gpiod_get_direction(desc);
- if (dir)
- clear_bit(FLAG_IS_OUT, &desc->flags);
- else
- set_bit(FLAG_IS_OUT, &desc->flags);
+ if (dir < 0) {
+ chip_err(chip, "%s: cannot get GPIO direction\n",
+ __func__);
+ return dir;
+ }
}
if (test_bit(FLAG_IS_OUT, &desc->flags)) {
chip_err(chip,
- "%s: tried to flag a GPIO set as output for IRQ\n",
- __func__);
+ "%s: tried to flag a GPIO set as output for IRQ\n",
+ __func__);
return -EIO;
}
@@ -3639,9 +3666,16 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
chip = find_chip_by_name(p->chip_label);
if (!chip) {
- dev_err(dev, "cannot find GPIO chip %s\n",
- p->chip_label);
- return ERR_PTR(-ENODEV);
+ /*
+ * As the lookup table indicates a chip with
+ * p->chip_label should exist, assume it may
+ * still appear later and let the interested
+ * consumer be probed again or let the Deferred
+ * Probe infrastructure handle the error.
+ */
+ dev_warn(dev, "cannot find GPIO chip %s, deferring\n",
+ p->chip_label);
+ return ERR_PTR(-EPROBE_DEFER);
}
if (chip->ngpio <= p->chip_hwnum) {
@@ -4215,7 +4249,7 @@ static int __init gpiolib_dev_init(void)
int ret;
/* Register GPIO sysfs bus */
- ret = bus_register(&gpio_bus_type);
+ ret = bus_register(&gpio_bus_type);
if (ret < 0) {
pr_err("gpiolib: could not register GPIO bus type\n");
return ret;
@@ -4259,9 +4293,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s",
gpio, gdesc->name ? gdesc->name : "", gdesc->label,
is_out ? "out" : "in ",
- chip->get
- ? (chip->get(chip, i) ? "hi" : "lo")
- : "? ",
+ chip->get ? (chip->get(chip, i) ? "hi" : "lo") : "? ",
is_irq ? "IRQ" : " ");
seq_printf(s, "\n");
}
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 1a8e20363861..a7e49fef73d4 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -92,7 +92,7 @@ struct acpi_gpio_info {
};
/* gpio suffixes used for ACPI and device tree lookup */
-static const char * const gpio_suffixes[] = { "gpios", "gpio" };
+static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
#ifdef CONFIG_OF_GPIO
struct gpio_desc *of_find_gpio(struct device *dev,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2a72d2feb76d..cb88528e7b10 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -122,6 +122,16 @@ config DRM_LOAD_EDID_FIRMWARE
default case is N. Details and instructions how to build your own
EDID data are given in Documentation/EDID/HOWTO.txt.
+config DRM_DP_CEC
+ bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
+ select CEC_CORE
+ help
+ Choose this option if you want to enable HDMI CEC support for
+ DisplayPort/USB-C to HDMI adapters.
+
+ Note: not all adapters support this feature, and even for those
+ that do support this they often do not hook up the CEC pin.
+
config DRM_TTM
tristate
depends on DRM && MMU
@@ -213,6 +223,17 @@ config DRM_VGEM
as used by Mesa's software renderer for enhanced performance.
If M is selected the module will be called vgem.
+config DRM_VKMS
+ tristate "Virtual KMS (EXPERIMENTAL)"
+ depends on DRM
+ select DRM_KMS_HELPER
+ default n
+ help
+ Virtual Kernel Mode-Setting (VKMS) is used for testing or for
+ running GPU in a headless machines. Choose this option to get
+ a VKMS.
+
+ If M is selected the module will be called vkms.
source "drivers/gpu/drm/exynos/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ef9f3dab287f..a6771cef85e2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
- drm_syncobj.o drm_lease.o
+ drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o
@@ -41,6 +41,7 @@ drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
+drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
@@ -69,6 +70,7 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_VGEM) += vgem/
+obj-$(CONFIG_DRM_VKMS) += vkms/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 06192698bd96..5b393622f592 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -136,6 +136,7 @@
#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
#define GENERIC_OBJECT_ID_MXM_OPM 0x03
#define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
+#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
/****************************************************/
/* Graphics Object ENUM ID Definition */
@@ -714,6 +715,13 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
/****************************************************/
/* Object Cap definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a59c07590cee..447c4c7a36d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -73,6 +73,8 @@
#include "amdgpu_virt.h"
#include "amdgpu_gart.h"
#include "amdgpu_debugfs.h"
+#include "amdgpu_job.h"
+#include "amdgpu_bo_list.h"
/*
* Modules parameters.
@@ -105,11 +107,8 @@ extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
extern int amdgpu_dc;
-extern int amdgpu_dc_log;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
-extern int amdgpu_no_evict;
-extern int amdgpu_direct_gma_size;
extern uint amdgpu_pcie_gen_cap;
extern uint amdgpu_pcie_lane_cap;
extern uint amdgpu_cg_mask;
@@ -190,6 +189,7 @@ struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
+struct amdgpu_atif;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -599,17 +599,6 @@ struct amdgpu_ib {
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
-int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_job **job, struct amdgpu_vm *vm);
-int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job);
-
-void amdgpu_job_free_resources(struct amdgpu_job *job);
-void amdgpu_job_free(struct amdgpu_job *job);
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct drm_sched_entity *entity, void *owner,
- struct dma_fence **f);
-
/*
* Queue manager
*/
@@ -683,8 +672,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
-void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
@@ -702,37 +691,6 @@ struct amdgpu_fpriv {
};
/*
- * residency list
- */
-struct amdgpu_bo_list_entry {
- struct amdgpu_bo *robj;
- struct ttm_validate_buffer tv;
- struct amdgpu_bo_va *bo_va;
- uint32_t priority;
- struct page **user_pages;
- int user_invalidated;
-};
-
-struct amdgpu_bo_list {
- struct mutex lock;
- struct rcu_head rhead;
- struct kref refcount;
- struct amdgpu_bo *gds_obj;
- struct amdgpu_bo *gws_obj;
- struct amdgpu_bo *oa_obj;
- unsigned first_userptr;
- unsigned num_entries;
- struct amdgpu_bo_list_entry *array;
-};
-
-struct amdgpu_bo_list *
-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
-void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
- struct list_head *validated);
-void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
-void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
-
-/*
* GFX stuff
*/
#include "clearstate_defs.h"
@@ -930,6 +888,11 @@ struct amdgpu_ngg {
bool init;
};
+struct sq_work {
+ struct work_struct work;
+ unsigned ih_data;
+};
+
struct amdgpu_gfx {
struct mutex gpu_clock_mutex;
struct amdgpu_gfx_config config;
@@ -968,6 +931,10 @@ struct amdgpu_gfx {
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
struct amdgpu_irq_src priv_inst_irq;
+ struct amdgpu_irq_src cp_ecc_error_irq;
+ struct amdgpu_irq_src sq_irq;
+ struct sq_work sq_work;
+
/* gfx status */
uint32_t gfx_current_status;
/* ce ram size*/
@@ -1019,6 +986,7 @@ struct amdgpu_cs_parser {
/* scheduler job object */
struct amdgpu_job *job;
+ struct amdgpu_ring *ring;
/* buffer objects */
struct ww_acquire_ctx ticket;
@@ -1040,40 +1008,6 @@ struct amdgpu_cs_parser {
struct drm_syncobj **post_dep_syncobjs;
};
-#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
-#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
-#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
-
-struct amdgpu_job {
- struct drm_sched_job base;
- struct amdgpu_device *adev;
- struct amdgpu_vm *vm;
- struct amdgpu_ring *ring;
- struct amdgpu_sync sync;
- struct amdgpu_sync sched_sync;
- struct amdgpu_ib *ibs;
- struct dma_fence *fence; /* the hw fence */
- uint32_t preamble_status;
- uint32_t num_ibs;
- void *owner;
- uint64_t fence_ctx; /* the fence_context this job uses */
- bool vm_needs_flush;
- uint64_t vm_pd_addr;
- unsigned vmid;
- unsigned pasid;
- uint32_t gds_base, gds_size;
- uint32_t gws_base, gws_size;
- uint32_t oa_base, oa_size;
- uint32_t vram_lost_counter;
-
- /* user fence handling */
- uint64_t uf_addr;
- uint64_t uf_sequence;
-
-};
-#define to_amdgpu_job(sched_job) \
- container_of((sched_job), struct amdgpu_job, base)
-
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
uint32_t ib_idx, int idx)
{
@@ -1269,43 +1203,6 @@ struct amdgpu_vram_scratch {
/*
* ACPI
*/
-struct amdgpu_atif_notification_cfg {
- bool enabled;
- int command_code;
-};
-
-struct amdgpu_atif_notifications {
- bool display_switch;
- bool expansion_mode_change;
- bool thermal_state;
- bool forced_power_state;
- bool system_power_state;
- bool display_conf_change;
- bool px_gfx_switch;
- bool brightness_change;
- bool dgpu_display_event;
-};
-
-struct amdgpu_atif_functions {
- bool system_params;
- bool sbios_requests;
- bool select_active_disp;
- bool lid_state;
- bool get_tv_standard;
- bool set_tv_standard;
- bool get_panel_expansion_mode;
- bool set_panel_expansion_mode;
- bool temperature_change;
- bool graphics_device_types;
-};
-
-struct amdgpu_atif {
- struct amdgpu_atif_notifications notifications;
- struct amdgpu_atif_functions functions;
- struct amdgpu_atif_notification_cfg notification_cfg;
- struct amdgpu_encoder *encoder_for_bl;
-};
-
struct amdgpu_atcs_functions {
bool get_ext_state;
bool pcie_perf_req;
@@ -1425,6 +1322,7 @@ enum amd_hw_ip_block_type {
PWR_HWIP,
NBIF_HWIP,
THM_HWIP,
+ CLK_HWIP,
MAX_HWIP
};
@@ -1466,7 +1364,7 @@ struct amdgpu_device {
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
#endif
- struct amdgpu_atif atif;
+ struct amdgpu_atif *atif;
struct amdgpu_atcs atcs;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
@@ -1615,9 +1513,9 @@ struct amdgpu_device {
DECLARE_HASHTABLE(mn_hash, 7);
/* tracking pinned memory */
- u64 vram_pin_size;
- u64 invisible_pin_size;
- u64 gart_pin_size;
+ atomic64_t vram_pin_size;
+ atomic64_t visible_pin_size;
+ atomic64_t gart_pin_size;
/* amdkfd interface */
struct kfd_dev *kfd;
@@ -1812,6 +1710,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
+#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
@@ -1865,8 +1764,6 @@ void amdgpu_display_update_priority(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_device_vram_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc, u64 base);
void amdgpu_device_gart_location(struct amdgpu_device *adev,
@@ -1894,6 +1791,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false;
static inline bool amdgpu_has_atpx(void) { return false; }
#endif
+#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void);
+#else
+static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
+#endif
+
/*
* KMS
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index f4c474a95875..71efcf38f11b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -57,6 +57,10 @@
#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
+#define ACP_BT_PLAY_REGS_START 0x14970
+#define ACP_BT_PLAY_REGS_END 0x14a24
+#define ACP_BT_COMP1_REG_OFFSET 0xac
+#define ACP_BT_COMP2_REG_OFFSET 0xa8
#define mmACP_PGFSM_RETAIN_REG 0x51c9
#define mmACP_PGFSM_CONFIG_REG 0x51ca
@@ -77,7 +81,7 @@
#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
#define ACP_TIMEOUT_LOOP 0x000000FF
-#define ACP_DEVS 3
+#define ACP_DEVS 4
#define ACP_SRC_ID 162
enum {
@@ -316,14 +320,13 @@ static int acp_hw_init(void *handle)
if (adev->acp.acp_cell == NULL)
return -ENOMEM;
- adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL);
-
+ adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
if (adev->acp.acp_res == NULL) {
kfree(adev->acp.acp_cell);
return -ENOMEM;
}
- i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL);
+ i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
if (i2s_pdata == NULL) {
kfree(adev->acp.acp_res);
kfree(adev->acp.acp_cell);
@@ -358,6 +361,20 @@ static int acp_hw_init(void *handle)
i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
+ i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ break;
+ }
+
+ i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
+ i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
+ i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
+ i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
+
adev->acp.acp_res[0].name = "acp2x_dma";
adev->acp.acp_res[0].flags = IORESOURCE_MEM;
adev->acp.acp_res[0].start = acp_base;
@@ -373,13 +390,18 @@ static int acp_hw_init(void *handle)
adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
- adev->acp.acp_res[3].name = "acp2x_dma_irq";
- adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
- adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
- adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
+ adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
+ adev->acp.acp_res[3].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
+ adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
+
+ adev->acp.acp_res[4].name = "acp2x_dma_irq";
+ adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
+ adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
+ adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
- adev->acp.acp_cell[0].num_resources = 4;
+ adev->acp.acp_cell[0].num_resources = 5;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
@@ -396,6 +418,12 @@ static int acp_hw_init(void *handle)
adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
+ adev->acp.acp_cell[3].name = "designware-i2s";
+ adev->acp.acp_cell[3].num_resources = 1;
+ adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
+ adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
+ adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
+
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
ACP_DEVS);
if (r)
@@ -451,7 +479,6 @@ static int acp_hw_init(void *handle)
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 8fa850a070e0..353993218f21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -34,6 +34,45 @@
#include "amd_acpi.h"
#include "atom.h"
+struct amdgpu_atif_notification_cfg {
+ bool enabled;
+ int command_code;
+};
+
+struct amdgpu_atif_notifications {
+ bool display_switch;
+ bool expansion_mode_change;
+ bool thermal_state;
+ bool forced_power_state;
+ bool system_power_state;
+ bool display_conf_change;
+ bool px_gfx_switch;
+ bool brightness_change;
+ bool dgpu_display_event;
+};
+
+struct amdgpu_atif_functions {
+ bool system_params;
+ bool sbios_requests;
+ bool select_active_disp;
+ bool lid_state;
+ bool get_tv_standard;
+ bool set_tv_standard;
+ bool get_panel_expansion_mode;
+ bool set_panel_expansion_mode;
+ bool temperature_change;
+ bool graphics_device_types;
+};
+
+struct amdgpu_atif {
+ acpi_handle handle;
+
+ struct amdgpu_atif_notifications notifications;
+ struct amdgpu_atif_functions functions;
+ struct amdgpu_atif_notification_cfg notification_cfg;
+ struct amdgpu_encoder *encoder_for_bl;
+};
+
/* Call the ATIF method
*/
/**
@@ -46,8 +85,9 @@
* Executes the requested ATIF function (all asics).
* Returns a pointer to the acpi output buffer.
*/
-static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
- struct acpi_buffer *params)
+static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
+ int function,
+ struct acpi_buffer *params)
{
acpi_status status;
union acpi_object atif_arg_elements[2];
@@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
atif_arg_elements[1].integer.value = 0;
}
- status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+ status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
+ &buffer);
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
* (all asics).
* returns 0 on success, error on failure.
*/
-static int amdgpu_atif_verify_interface(acpi_handle handle,
- struct amdgpu_atif *atif)
+static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
{
union acpi_object *info;
struct atif_verify_interface output;
size_t size;
int err = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
if (!info)
return -EIO;
@@ -176,6 +216,35 @@ out:
return err;
}
+static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
+{
+ acpi_handle handle = NULL;
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
+ acpi_status status;
+
+ /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
+ * systems, ATIF is in the dGPU's namespace.
+ */
+ status = acpi_get_handle(dhandle, "ATIF", &handle);
+ if (ACPI_SUCCESS(status))
+ goto out;
+
+ if (amdgpu_has_atpx()) {
+ status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
+ &handle);
+ if (ACPI_SUCCESS(status))
+ goto out;
+ }
+
+ DRM_DEBUG_DRIVER("No ATIF handle found\n");
+ return NULL;
+out:
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
+ return handle;
+}
+
/**
* amdgpu_atif_get_notification_params - determine notify configuration
*
@@ -188,15 +257,16 @@ out:
* where n is specified in the result if a notifier is used.
* Returns 0 on success, error on failure.
*/
-static int amdgpu_atif_get_notification_params(acpi_handle handle,
- struct amdgpu_atif_notification_cfg *n)
+static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
{
union acpi_object *info;
+ struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
struct atif_system_params params;
size_t size;
int err = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
+ NULL);
if (!info) {
err = -EIO;
goto out;
@@ -250,14 +320,15 @@ out:
* (all asics).
* Returns 0 on success, error on failure.
*/
-static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
- struct atif_sbios_requests *req)
+static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
+ struct atif_sbios_requests *req)
{
union acpi_object *info;
size_t size;
int count = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
+ NULL);
if (!info)
return -EIO;
@@ -290,11 +361,9 @@ out:
* Returns NOTIFY code
*/
static int amdgpu_atif_handler(struct amdgpu_device *adev,
- struct acpi_bus_event *event)
+ struct acpi_bus_event *event)
{
- struct amdgpu_atif *atif = &adev->atif;
- struct atif_sbios_requests req;
- acpi_handle handle;
+ struct amdgpu_atif *atif = adev->atif;
int count;
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -303,48 +372,54 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
return NOTIFY_DONE;
- if (!atif->notification_cfg.enabled ||
+ if (!atif ||
+ !atif->notification_cfg.enabled ||
event->type != atif->notification_cfg.command_code)
/* Not our event */
return NOTIFY_DONE;
- /* Check pending SBIOS requests */
- handle = ACPI_HANDLE(&adev->pdev->dev);
- count = amdgpu_atif_get_sbios_requests(handle, &req);
+ if (atif->functions.sbios_requests) {
+ struct atif_sbios_requests req;
- if (count <= 0)
- return NOTIFY_DONE;
+ /* Check pending SBIOS requests */
+ count = amdgpu_atif_get_sbios_requests(atif, &req);
- DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+ if (count <= 0)
+ return NOTIFY_DONE;
- if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
- struct amdgpu_encoder *enc = atif->encoder_for_bl;
+ DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
- if (enc) {
- struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+ /* todo: add DC handling */
+ if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
+ !amdgpu_device_has_dc_support(adev)) {
+ struct amdgpu_encoder *enc = atif->encoder_for_bl;
- DRM_DEBUG_DRIVER("Changing brightness to %d\n",
- req.backlight_level);
+ if (enc) {
+ struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+
+ DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+ req.backlight_level);
- amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
+ amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
- backlight_force_update(dig->bl_dev,
- BACKLIGHT_UPDATE_HOTKEY);
+ backlight_force_update(dig->bl_dev,
+ BACKLIGHT_UPDATE_HOTKEY);
#endif
+ }
}
- }
- if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
- if ((adev->flags & AMD_IS_PX) &&
- amdgpu_atpx_dgpu_req_power_for_displays()) {
- pm_runtime_get_sync(adev->ddev->dev);
- /* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(adev->ddev);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+ if ((adev->flags & AMD_IS_PX) &&
+ amdgpu_atpx_dgpu_req_power_for_displays()) {
+ pm_runtime_get_sync(adev->ddev->dev);
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(adev->ddev);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ }
}
+ /* TODO: check other events */
}
- /* TODO: check other events */
/* We've handled the event, stop the notifier chain. The ACPI interface
* overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
@@ -641,8 +716,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
*/
int amdgpu_acpi_init(struct amdgpu_device *adev)
{
- acpi_handle handle;
- struct amdgpu_atif *atif = &adev->atif;
+ acpi_handle handle, atif_handle;
+ struct amdgpu_atif *atif;
struct amdgpu_atcs *atcs = &adev->atcs;
int ret;
@@ -658,12 +733,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
}
+ /* Probe for ATIF, and initialize it if found */
+ atif_handle = amdgpu_atif_probe_handle(handle);
+ if (!atif_handle)
+ goto out;
+
+ atif = kzalloc(sizeof(*atif), GFP_KERNEL);
+ if (!atif) {
+ DRM_WARN("Not enough memory to initialize ATIF\n");
+ goto out;
+ }
+ atif->handle = atif_handle;
+
/* Call the ATIF method */
- ret = amdgpu_atif_verify_interface(handle, atif);
+ ret = amdgpu_atif_verify_interface(atif);
if (ret) {
DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+ kfree(atif);
goto out;
}
+ adev->atif = atif;
if (atif->notifications.brightness_change) {
struct drm_encoder *tmp;
@@ -693,8 +782,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
if (atif->functions.system_params) {
- ret = amdgpu_atif_get_notification_params(handle,
- &atif->notification_cfg);
+ ret = amdgpu_atif_get_notification_params(atif);
if (ret) {
DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
ret);
@@ -720,4 +808,6 @@ out:
void amdgpu_acpi_fini(struct amdgpu_device *adev)
{
unregister_acpi_notifier(&adev->acpi_nb);
+ if (adev->atif)
+ kfree(adev->atif);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 305143fcc1ce..f8bbbb3a9504 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -243,6 +243,33 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
return r;
}
+int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd)
+ r = kgd2kfd->pre_reset(adev->kfd);
+
+ return r;
+}
+
+int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd)
+ r = kgd2kfd->post_reset(adev->kfd);
+
+ return r;
+}
+
+void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ amdgpu_device_gpu_recover(adev, NULL, false);
+}
+
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr)
@@ -251,7 +278,6 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
struct amdgpu_bo *bo = NULL;
struct amdgpu_bo_param bp;
int r;
- uint64_t gpu_addr_tmp = 0;
void *cpu_ptr_tmp = NULL;
memset(&bp, 0, sizeof(bp));
@@ -275,13 +301,18 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
goto allocate_mem_reserve_bo_failed;
}
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT,
- &gpu_addr_tmp);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r) {
dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
goto allocate_mem_pin_bo_failed;
}
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (r) {
+ dev_err(adev->dev, "%p bind failed\n", bo);
+ goto allocate_mem_kmap_bo_failed;
+ }
+
r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
if (r) {
dev_err(adev->dev,
@@ -290,7 +321,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
}
*mem_obj = bo;
- *gpu_addr = gpu_addr_tmp;
+ *gpu_addr = amdgpu_bo_gpu_offset(bo);
*cpu_ptr = cpu_ptr_tmp;
amdgpu_bo_unreserve(bo);
@@ -457,6 +488,14 @@ err:
return ret;
}
+void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ amdgpu_dpm_switch_power_profile(adev,
+ PP_SMC_POWER_PROFILE_COMPUTE, !idle);
+}
+
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
{
if (adev->kfd) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index a8418a3f4e9d..2f379c183ed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -119,6 +119,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
+void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
@@ -126,6 +127,12 @@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
+int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
+
+int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
+
+void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
+
/* Shared API */
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
@@ -183,6 +190,9 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence **ef);
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
+ struct kfd_vm_fault_info *info);
+
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
index 2c14025e5e76..574c1181ae9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -173,7 +173,5 @@ static const struct dma_fence_ops amdkfd_fence_ops = {
.get_driver_name = amdkfd_fence_get_driver_name,
.get_timeline_name = amdkfd_fence_get_timeline_name,
.enable_signaling = amdkfd_fence_enable_signaling,
- .signaled = NULL,
- .wait = dma_fence_default_wait,
.release = amdkfd_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ea79908dac4c..ea3f698aef5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -145,6 +145,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint32_t page_table_base);
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
+static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
@@ -216,6 +217,10 @@ static const struct kfd2kgd_calls kfd2kgd = {
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.submit_ib = amdgpu_amdkfd_submit_ib,
+ .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
+ .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
@@ -571,6 +576,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
unsigned long flags, end_jiffies;
int retry;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
acquire_queue(kgd, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
@@ -882,6 +890,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
int vmid;
unsigned int tmp;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
@@ -911,3 +922,19 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
RREG32(mmVM_INVALIDATE_RESPONSE);
return 0;
}
+
+ /**
+ * read_vmid_from_vmfault_reg - read vmid from register
+ *
+ * adev: amdgpu_device pointer
+ * @vmid: vmid pointer
+ * read vmid from register (CIK).
+ */
+static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
+
+ return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 19dd665e7307..f6e53e9352bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -176,6 +176,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.submit_ib = amdgpu_amdkfd_submit_ib,
+ .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
@@ -568,6 +571,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
int retry;
struct vi_mqd *m = get_mqd(mqd);
+ if (adev->in_gpu_reset)
+ return -EIO;
+
acquire_queue(kgd, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
@@ -844,6 +850,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
int vmid;
unsigned int tmp;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 1db60aa5b7f0..8efedfcb9dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -213,6 +213,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.submit_ib = amdgpu_amdkfd_submit_ib,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
@@ -679,6 +681,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp;
struct v9_mqd *m = get_mqd(mqd);
+ if (adev->in_gpu_reset)
+ return -EIO;
+
acquire_queue(kgd, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
@@ -866,6 +871,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
int vmid;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
if (ring->ready)
return invalidate_tlbs_with_kiq(adev, pasid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ff8fd75f7ca5..f92597c292fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -206,11 +206,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct amdgpu_amdkfd_fence ***ef_list,
unsigned int *ef_count)
{
- struct reservation_object_list *fobj;
- struct reservation_object *resv;
- unsigned int i = 0, j = 0, k = 0, shared_count;
- unsigned int count = 0;
- struct amdgpu_amdkfd_fence **fence_list;
+ struct reservation_object *resv = bo->tbo.resv;
+ struct reservation_object_list *old, *new;
+ unsigned int i, j, k;
if (!ef && !ef_list)
return -EINVAL;
@@ -220,76 +218,67 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
*ef_count = 0;
}
- resv = bo->tbo.resv;
- fobj = reservation_object_get_list(resv);
-
- if (!fobj)
+ old = reservation_object_get_list(resv);
+ if (!old)
return 0;
- preempt_disable();
- write_seqcount_begin(&resv->seq);
+ new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
+ GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
- /* Go through all the shared fences in the resevation object. If
- * ef is specified and it exists in the list, remove it and reduce the
- * count. If ef is not specified, then get the count of eviction fences
- * present.
+ /* Go through all the shared fences in the resevation object and sort
+ * the interesting ones to the end of the list.
*/
- shared_count = fobj->shared_count;
- for (i = 0; i < shared_count; ++i) {
+ for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
struct dma_fence *f;
- f = rcu_dereference_protected(fobj->shared[i],
+ f = rcu_dereference_protected(old->shared[i],
reservation_object_held(resv));
- if (ef) {
- if (f->context == ef->base.context) {
- dma_fence_put(f);
- fobj->shared_count--;
- } else {
- RCU_INIT_POINTER(fobj->shared[j++], f);
- }
- } else if (to_amdgpu_amdkfd_fence(f))
- count++;
+ if ((ef && f->context == ef->base.context) ||
+ (!ef && to_amdgpu_amdkfd_fence(f)))
+ RCU_INIT_POINTER(new->shared[--j], f);
+ else
+ RCU_INIT_POINTER(new->shared[k++], f);
}
- write_seqcount_end(&resv->seq);
- preempt_enable();
-
- if (ef || !count)
- return 0;
-
- /* Alloc memory for count number of eviction fence pointers. Fill the
- * ef_list array and ef_count
- */
- fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
- GFP_KERNEL);
- if (!fence_list)
- return -ENOMEM;
-
- preempt_disable();
- write_seqcount_begin(&resv->seq);
+ new->shared_max = old->shared_max;
+ new->shared_count = k;
- j = 0;
- for (i = 0; i < shared_count; ++i) {
- struct dma_fence *f;
- struct amdgpu_amdkfd_fence *efence;
+ if (!ef) {
+ unsigned int count = old->shared_count - j;
- f = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(resv));
+ /* Alloc memory for count number of eviction fence pointers.
+ * Fill the ef_list array and ef_count
+ */
+ *ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL);
+ *ef_count = count;
- efence = to_amdgpu_amdkfd_fence(f);
- if (efence) {
- fence_list[k++] = efence;
- fobj->shared_count--;
- } else {
- RCU_INIT_POINTER(fobj->shared[j++], f);
+ if (!*ef_list) {
+ kfree(new);
+ return -ENOMEM;
}
}
+ /* Install the new fence list, seqcount provides the barriers */
+ preempt_disable();
+ write_seqcount_begin(&resv->seq);
+ RCU_INIT_POINTER(resv->fence, new);
write_seqcount_end(&resv->seq);
preempt_enable();
- *ef_list = fence_list;
- *ef_count = k;
+ /* Drop the references to the removed fences or move them to ef_list */
+ for (i = j, k = 0; i < old->shared_count; ++i) {
+ struct dma_fence *f;
+
+ f = rcu_dereference_protected(new->shared[i],
+ reservation_object_held(resv));
+ if (!ef)
+ (*ef_list)[k++] = to_amdgpu_amdkfd_fence(f);
+ else
+ dma_fence_put(f);
+ }
+ kfree_rcu(old, rcu);
return 0;
}
@@ -334,7 +323,7 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
"Called with userptr BO"))
return -EINVAL;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@@ -622,7 +611,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
pr_err("%s: Failed to reserve BO\n", __func__);
goto release_out;
}
- amdgpu_ttm_placement_from_domain(bo, mem->domain);
+ amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
pr_err("%s: failed to validate BO\n", __func__);
@@ -1587,7 +1576,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
goto bo_reserve_failed;
}
- ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (ret) {
pr_err("Failed to pin bo. ret %d\n", ret);
goto pin_failed;
@@ -1621,6 +1610,20 @@ bo_reserve_failed:
return ret;
}
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
+ struct kfd_vm_fault_info *mem)
+{
+ struct amdgpu_device *adev;
+
+ adev = (struct amdgpu_device *)kgd;
+ if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
+ *mem = *adev->gmc.vm_fault_info;
+ mb();
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ }
+ return 0;
+}
+
/* Evict a userptr BO by stopping the queues if necessary
*
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1680,7 +1683,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
if (amdgpu_bo_reserve(bo, true))
return -EAGAIN;
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (ret) {
@@ -1824,7 +1827,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
if (mem->user_pages[0]) {
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
mem->user_pages);
- amdgpu_ttm_placement_from_domain(bo, mem->domain);
+ amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret) {
pr_err("%s: failed to validate BO\n", __func__);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index daa06e7c5bb7..a028661d9e20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -32,7 +32,7 @@ struct amdgpu_atpx_functions {
bool switch_start;
bool switch_end;
bool disp_connectors_mapping;
- bool disp_detetion_ports;
+ bool disp_detection_ports;
};
struct amdgpu_atpx {
@@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
}
+#if defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void) {
+ return amdgpu_atpx_priv.dhandle;
+}
+#endif
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -156,7 +162,7 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
- f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
+ f->disp_detection_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
}
/**
@@ -569,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 19cfff31f2e1..3079ea8523c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -95,11 +95,17 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
r = amdgpu_bo_reserve(sobj, false);
if (unlikely(r != 0))
goto out_cleanup;
- r = amdgpu_bo_pin(sobj, sdomain, &saddr);
+ r = amdgpu_bo_pin(sobj, sdomain);
+ if (r) {
+ amdgpu_bo_unreserve(sobj);
+ goto out_cleanup;
+ }
+ r = amdgpu_ttm_alloc_gart(&sobj->tbo);
amdgpu_bo_unreserve(sobj);
if (r) {
goto out_cleanup;
}
+ saddr = amdgpu_bo_gpu_offset(sobj);
bp.domain = ddomain;
r = amdgpu_bo_create(adev, &bp, &dobj);
if (r) {
@@ -108,11 +114,17 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
r = amdgpu_bo_reserve(dobj, false);
if (unlikely(r != 0))
goto out_cleanup;
- r = amdgpu_bo_pin(dobj, ddomain, &daddr);
+ r = amdgpu_bo_pin(dobj, ddomain);
+ if (r) {
+ amdgpu_bo_unreserve(sobj);
+ goto out_cleanup;
+ }
+ r = amdgpu_ttm_alloc_gart(&dobj->tbo);
amdgpu_bo_unreserve(dobj);
if (r) {
goto out_cleanup;
}
+ daddr = amdgpu_bo_gpu_offset(dobj);
if (adev->mman.buffer_funcs) {
time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 92be7f6de197..d472a2c8399f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -35,92 +35,53 @@
#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct amdgpu_bo_list *list,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries);
+static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
+{
+ struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
+ rhead);
+
+ kvfree(list);
+}
-static void amdgpu_bo_list_release_rcu(struct kref *ref)
+static void amdgpu_bo_list_free(struct kref *ref)
{
- unsigned i;
struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
refcount);
+ struct amdgpu_bo_list_entry *e;
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
+ amdgpu_bo_list_for_each_entry(e, list)
+ amdgpu_bo_unref(&e->robj);
- mutex_destroy(&list->lock);
- kvfree(list->array);
- kfree_rcu(list, rhead);
+ call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
}
-static int amdgpu_bo_list_create(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries,
- int *id)
+int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
+ struct drm_amdgpu_bo_list_entry *info,
+ unsigned num_entries, struct amdgpu_bo_list **result)
{
- int r;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ unsigned last_entry = 0, first_userptr = num_entries;
+ struct amdgpu_bo_list_entry *array;
struct amdgpu_bo_list *list;
+ uint64_t total_size = 0;
+ size_t size;
+ unsigned i;
+ int r;
+
+ if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry))
+ return -EINVAL;
- list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
+ size = sizeof(struct amdgpu_bo_list);
+ size += num_entries * sizeof(struct amdgpu_bo_list_entry);
+ list = kvmalloc(size, GFP_KERNEL);
if (!list)
return -ENOMEM;
- /* initialize bo list*/
- mutex_init(&list->lock);
kref_init(&list->refcount);
- r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
- if (r) {
- kfree(list);
- return r;
- }
-
- /* idr alloc should be called only after initialization of bo list. */
- mutex_lock(&fpriv->bo_list_lock);
- r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
- mutex_unlock(&fpriv->bo_list_lock);
- if (r < 0) {
- amdgpu_bo_list_free(list);
- return r;
- }
- *id = r;
-
- return 0;
-}
-
-static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
-{
- struct amdgpu_bo_list *list;
-
- mutex_lock(&fpriv->bo_list_lock);
- list = idr_remove(&fpriv->bo_list_handles, id);
- mutex_unlock(&fpriv->bo_list_lock);
- if (list)
- kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
-}
-
-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct amdgpu_bo_list *list,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries)
-{
- struct amdgpu_bo_list_entry *array;
- struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
- struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
- struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
-
- unsigned last_entry = 0, first_userptr = num_entries;
- unsigned i;
- int r;
- unsigned long total_size = 0;
+ list->gds_obj = adev->gds.gds_gfx_bo;
+ list->gws_obj = adev->gds.gws_gfx_bo;
+ list->oa_obj = adev->gds.oa_gfx_bo;
- array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
- if (!array)
- return -ENOMEM;
+ array = amdgpu_bo_list_array_entry(list, 0);
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
for (i = 0; i < num_entries; ++i) {
@@ -157,59 +118,56 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
entry->tv.shared = !entry->robj->prime_shared_count;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
- gds_obj = entry->robj;
+ list->gds_obj = entry->robj;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
- gws_obj = entry->robj;
+ list->gws_obj = entry->robj;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
- oa_obj = entry->robj;
+ list->oa_obj = entry->robj;
total_size += amdgpu_bo_size(entry->robj);
trace_amdgpu_bo_list_set(list, entry->robj);
}
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
-
- kvfree(list->array);
-
- list->gds_obj = gds_obj;
- list->gws_obj = gws_obj;
- list->oa_obj = oa_obj;
list->first_userptr = first_userptr;
- list->array = array;
list->num_entries = num_entries;
trace_amdgpu_cs_bo_status(list->num_entries, total_size);
+
+ *result = list;
return 0;
error_free:
while (i--)
amdgpu_bo_unref(&array[i].robj);
- kvfree(array);
+ kvfree(list);
return r;
+
}
-struct amdgpu_bo_list *
-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
+static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
{
- struct amdgpu_bo_list *result;
+ struct amdgpu_bo_list *list;
+
+ mutex_lock(&fpriv->bo_list_lock);
+ list = idr_remove(&fpriv->bo_list_handles, id);
+ mutex_unlock(&fpriv->bo_list_lock);
+ if (list)
+ kref_put(&list->refcount, amdgpu_bo_list_free);
+}
+int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ struct amdgpu_bo_list **result)
+{
rcu_read_lock();
- result = idr_find(&fpriv->bo_list_handles, id);
+ *result = idr_find(&fpriv->bo_list_handles, id);
- if (result) {
- if (kref_get_unless_zero(&result->refcount)) {
- rcu_read_unlock();
- mutex_lock(&result->lock);
- } else {
- rcu_read_unlock();
- result = NULL;
- }
- } else {
+ if (*result && kref_get_unless_zero(&(*result)->refcount)) {
rcu_read_unlock();
+ return 0;
}
- return result;
+ rcu_read_unlock();
+ return -ENOENT;
}
void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
@@ -220,6 +178,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
* concatenated in descending order.
*/
struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
+ struct amdgpu_bo_list_entry *e;
unsigned i;
for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
@@ -230,14 +189,13 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
* in the list, the sort mustn't change the ordering of buffers
* with the same priority, i.e. it must be stable.
*/
- for (i = 0; i < list->num_entries; i++) {
- unsigned priority = list->array[i].priority;
+ amdgpu_bo_list_for_each_entry(e, list) {
+ unsigned priority = e->priority;
- if (!list->array[i].robj->parent)
- list_add_tail(&list->array[i].tv.head,
- &bucket[priority]);
+ if (!e->robj->parent)
+ list_add_tail(&e->tv.head, &bucket[priority]);
- list->array[i].user_pages = NULL;
+ e->user_pages = NULL;
}
/* Connect the sorted buckets in the output list. */
@@ -247,71 +205,82 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
{
- mutex_unlock(&list->lock);
- kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
-}
-
-void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
-{
- unsigned i;
-
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
-
- mutex_destroy(&list->lock);
- kvfree(list->array);
- kfree(list);
+ kref_put(&list->refcount, amdgpu_bo_list_free);
}
-int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
+int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param)
{
+ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
-
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
- union drm_amdgpu_bo_list *args = data;
- uint32_t handle = args->in.list_handle;
- const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
-
struct drm_amdgpu_bo_list_entry *info;
- struct amdgpu_bo_list *list;
-
int r;
- info = kvmalloc_array(args->in.bo_number,
- sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
+ info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
if (!info)
return -ENOMEM;
/* copy the handle array from userspace to a kernel buffer */
r = -EFAULT;
- if (likely(info_size == args->in.bo_info_size)) {
- unsigned long bytes = args->in.bo_number *
- args->in.bo_info_size;
+ if (likely(info_size == in->bo_info_size)) {
+ unsigned long bytes = in->bo_number *
+ in->bo_info_size;
if (copy_from_user(info, uptr, bytes))
goto error_free;
} else {
- unsigned long bytes = min(args->in.bo_info_size, info_size);
+ unsigned long bytes = min(in->bo_info_size, info_size);
unsigned i;
- memset(info, 0, args->in.bo_number * info_size);
- for (i = 0; i < args->in.bo_number; ++i) {
+ memset(info, 0, in->bo_number * info_size);
+ for (i = 0; i < in->bo_number; ++i) {
if (copy_from_user(&info[i], uptr, bytes))
goto error_free;
- uptr += args->in.bo_info_size;
+ uptr += in->bo_info_size;
}
}
+ *info_param = info;
+ return 0;
+
+error_free:
+ kvfree(info);
+ return r;
+}
+
+int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ union drm_amdgpu_bo_list *args = data;
+ uint32_t handle = args->in.list_handle;
+ struct drm_amdgpu_bo_list_entry *info = NULL;
+ struct amdgpu_bo_list *list, *old;
+ int r;
+
+ r = amdgpu_bo_create_list_entry_array(&args->in, &info);
+ if (r)
+ goto error_free;
+
switch (args->in.operation) {
case AMDGPU_BO_LIST_OP_CREATE:
r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
- &handle);
+ &list);
if (r)
goto error_free;
+
+ mutex_lock(&fpriv->bo_list_lock);
+ r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
+ mutex_unlock(&fpriv->bo_list_lock);
+ if (r < 0) {
+ amdgpu_bo_list_put(list);
+ return r;
+ }
+
+ handle = r;
break;
case AMDGPU_BO_LIST_OP_DESTROY:
@@ -320,17 +289,22 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
break;
case AMDGPU_BO_LIST_OP_UPDATE:
- r = -ENOENT;
- list = amdgpu_bo_list_get(fpriv, handle);
- if (!list)
+ r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
+ &list);
+ if (r)
goto error_free;
- r = amdgpu_bo_list_set(adev, filp, list, info,
- args->in.bo_number);
- amdgpu_bo_list_put(list);
- if (r)
+ mutex_lock(&fpriv->bo_list_lock);
+ old = idr_replace(&fpriv->bo_list_handles, list, handle);
+ mutex_unlock(&fpriv->bo_list_lock);
+
+ if (IS_ERR(old)) {
+ amdgpu_bo_list_put(list);
+ r = PTR_ERR(old);
goto error_free;
+ }
+ amdgpu_bo_list_put(old);
break;
default:
@@ -345,6 +319,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
return 0;
error_free:
- kvfree(info);
+ if (info)
+ kvfree(info);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
new file mode 100644
index 000000000000..61b089768e1c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_BO_LIST_H__
+#define __AMDGPU_BO_LIST_H__
+
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/amdgpu_drm.h>
+
+struct amdgpu_device;
+struct amdgpu_bo;
+struct amdgpu_bo_va;
+struct amdgpu_fpriv;
+
+struct amdgpu_bo_list_entry {
+ struct amdgpu_bo *robj;
+ struct ttm_validate_buffer tv;
+ struct amdgpu_bo_va *bo_va;
+ uint32_t priority;
+ struct page **user_pages;
+ int user_invalidated;
+};
+
+struct amdgpu_bo_list {
+ struct rcu_head rhead;
+ struct kref refcount;
+ struct amdgpu_bo *gds_obj;
+ struct amdgpu_bo *gws_obj;
+ struct amdgpu_bo *oa_obj;
+ unsigned first_userptr;
+ unsigned num_entries;
+};
+
+int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ struct amdgpu_bo_list **result);
+void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ struct list_head *validated);
+void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
+int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param);
+
+int amdgpu_bo_list_create(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct drm_amdgpu_bo_list_entry *info,
+ unsigned num_entries,
+ struct amdgpu_bo_list **list);
+
+static inline struct amdgpu_bo_list_entry *
+amdgpu_bo_list_array_entry(struct amdgpu_bo_list *list, unsigned index)
+{
+ struct amdgpu_bo_list_entry *array = (void *)&list[1];
+
+ return &array[index];
+}
+
+#define amdgpu_bo_list_for_each_entry(e, list) \
+ for (e = amdgpu_bo_list_array_entry(list, 0); \
+ e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
+ ++e)
+
+#define amdgpu_bo_list_for_each_userptr_entry(e, list) \
+ for (e = amdgpu_bo_list_array_entry(list, (list)->first_userptr); \
+ e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
+ ++e)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index e950730f1933..693ec5ea4950 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -314,17 +314,17 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0x81) ||
(adev->pdev->device == 0x665f)) {
info->is_kicker = true;
- strcpy(fw_name, "radeon/bonaire_k_smc.bin");
+ strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
} else {
- strcpy(fw_name, "radeon/bonaire_smc.bin");
+ strcpy(fw_name, "amdgpu/bonaire_smc.bin");
}
break;
case CHIP_HAWAII:
if (adev->pdev->revision == 0x80) {
info->is_kicker = true;
- strcpy(fw_name, "radeon/hawaii_k_smc.bin");
+ strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
} else {
- strcpy(fw_name, "radeon/hawaii_smc.bin");
+ strcpy(fw_name, "amdgpu/hawaii_smc.bin");
}
break;
case CHIP_TOPAZ:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 8e66851eb427..c770d73352a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -212,30 +212,21 @@ static void
amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
enum drm_connector_status status)
{
- struct drm_encoder *best_encoder = NULL;
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *best_encoder;
+ struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
int i;
best_encoder = connector_funcs->best_encoder(connector);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
connected = false;
amdgpu_atombios_encoder_set_bios_scratch_regs(connector, encoder, connected);
-
}
}
@@ -246,17 +237,11 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
+
return NULL;
}
@@ -349,22 +334,24 @@ static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
int ret;
if (amdgpu_connector->edid) {
- drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid);
+ drm_connector_update_edid_property(connector, amdgpu_connector->edid);
ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
return ret;
}
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
return 0;
}
static struct drm_encoder *
amdgpu_connector_best_single_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
+ struct drm_encoder *encoder;
+ int i;
+
+ /* pick the first one */
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -985,9 +972,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- struct drm_encoder *encoder = NULL;
const struct drm_encoder_helper_funcs *encoder_funcs;
- int i, r;
+ int r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
@@ -1077,14 +1063,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (amdgpu_connector->dac_load_detect) {
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ struct drm_encoder *encoder;
+ int i;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1132,18 +1114,11 @@ exit:
static struct drm_encoder *
amdgpu_connector_dvi_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (amdgpu_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1158,8 +1133,9 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
+
return NULL;
}
@@ -1296,15 +1272,7 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
struct amdgpu_encoder *amdgpu_encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
switch (amdgpu_encoder->encoder_id) {
@@ -1326,14 +1294,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
int i;
bool found = false;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82312a7bc6ad..502b94fb116a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -31,6 +31,7 @@
#include <drm/drm_syncobj.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "amdgpu_gmc.h"
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *data,
@@ -65,11 +66,35 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
return 0;
}
-static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
+static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_bo_list_in *data)
+{
+ int r;
+ struct drm_amdgpu_bo_list_entry *info = NULL;
+
+ r = amdgpu_bo_create_list_entry_array(data, &info);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
+ &p->bo_list);
+ if (r)
+ goto error_free;
+
+ kvfree(info);
+ return 0;
+
+error_free:
+ if (info)
+ kvfree(info);
+
+ return r;
+}
+
+static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
unsigned size, num_ibs = 0;
@@ -163,6 +188,19 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
break;
+ case AMDGPU_CHUNK_ID_BO_HANDLES:
+ size = sizeof(struct drm_amdgpu_bo_list_in);
+ if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
+ ret = -EINVAL;
+ goto free_partial_kdata;
+ }
+
+ ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
+ if (ret)
+ goto free_partial_kdata;
+
+ break;
+
case AMDGPU_CHUNK_ID_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
@@ -186,6 +224,10 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
if (p->uf_entry.robj)
p->job->uf_addr = uf_offset;
kfree(chunk_array);
+
+ /* Use this opportunity to fill in task info for the vm */
+ amdgpu_vm_set_task_info(vm);
+
return 0;
free_all_kdata:
@@ -257,7 +299,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
return;
}
- total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
+ total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
@@ -302,7 +344,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
/* Do the same for visible VRAM if half of it is free */
- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
u64 total_vis_vram = adev->gmc.visible_vram_size;
u64 used_vis_vram =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
@@ -359,7 +401,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
* to move it. Don't move anything if the threshold is zero.
*/
if (p->bytes_moved < p->bytes_moved_threshold) {
- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
/* And don't move a CPU_ACCESS_REQUIRED BO to limited
* visible VRAM if we've depleted our allowance to do
@@ -377,11 +419,11 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
}
retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
p->bytes_moved += ctx.bytes_moved;
- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
amdgpu_bo_in_cpu_visible_vram(bo))
p->bytes_moved_vis += ctx.bytes_moved;
@@ -434,9 +476,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */
update_bytes_moved_vis =
- adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
- amdgpu_bo_in_cpu_visible_vram(bo);
- amdgpu_ttm_placement_from_domain(bo, other);
+ !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ amdgpu_bo_in_cpu_visible_vram(bo);
+ amdgpu_bo_placement_from_domain(bo, other);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
p->bytes_moved += ctx.bytes_moved;
if (update_bytes_moved_vis)
@@ -490,8 +532,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
/* Check if we have user pages and nobody bound the BO already */
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
lobj->user_pages) {
- amdgpu_ttm_placement_from_domain(bo,
- AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(bo,
+ AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return r;
@@ -519,23 +561,38 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
- unsigned i, tries = 10;
struct amdgpu_bo *gds;
struct amdgpu_bo *gws;
struct amdgpu_bo *oa;
+ unsigned tries = 10;
int r;
INIT_LIST_HEAD(&p->validated);
- p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
- if (p->bo_list) {
- amdgpu_bo_list_get_list(p->bo_list, &p->validated);
- if (p->bo_list->first_userptr != p->bo_list->num_entries)
- p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
+ /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
+ if (cs->in.bo_list_handle) {
+ if (p->bo_list)
+ return -EINVAL;
+
+ r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
+ &p->bo_list);
+ if (r)
+ return r;
+ } else if (!p->bo_list) {
+ /* Create a empty bo_list when no handle is provided */
+ r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
+ &p->bo_list);
+ if (r)
+ return r;
}
+ amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+ if (p->bo_list->first_userptr != p->bo_list->num_entries)
+ p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
+
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
@@ -544,7 +601,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
while (1) {
struct list_head need_pages;
- unsigned i;
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates);
@@ -554,17 +610,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto error_free_pages;
}
- /* Without a BO list we don't have userptr BOs */
- if (!p->bo_list)
- break;
-
INIT_LIST_HEAD(&need_pages);
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- struct amdgpu_bo *bo;
-
- e = &p->bo_list->array[i];
- bo = e->robj;
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->robj;
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) {
@@ -656,23 +704,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis);
- if (p->bo_list) {
- struct amdgpu_vm *vm = &fpriv->vm;
- unsigned i;
-
- gds = p->bo_list->gds_obj;
- gws = p->bo_list->gws_obj;
- oa = p->bo_list->oa_obj;
- for (i = 0; i < p->bo_list->num_entries; i++) {
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+ gds = p->bo_list->gds_obj;
+ gws = p->bo_list->gws_obj;
+ oa = p->bo_list->oa_obj;
- p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
- }
- } else {
- gds = p->adev->gds.gds_gfx_bo;
- gws = p->adev->gds.gws_gfx_bo;
- oa = p->adev->gds.oa_gfx_bo;
- }
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
@@ -700,18 +737,13 @@ error_validate:
error_free_pages:
- if (p->bo_list) {
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- e = &p->bo_list->array[i];
-
- if (!e->user_pages)
- continue;
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ if (!e->user_pages)
+ continue;
- release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages);
- kvfree(e->user_pages);
- }
+ release_pages(e->user_pages,
+ e->robj->tbo.ttm->num_pages);
+ kvfree(e->user_pages);
}
return r;
@@ -773,12 +805,13 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
{
- struct amdgpu_device *adev = p->adev;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_device *adev = p->adev;
struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_list_entry *e;
struct amdgpu_bo_va *bo_va;
struct amdgpu_bo *bo;
- int i, r;
+ int r;
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
@@ -808,29 +841,26 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r;
}
- if (p->bo_list) {
- for (i = 0; i < p->bo_list->num_entries; i++) {
- struct dma_fence *f;
-
- /* ignore duplicates */
- bo = p->bo_list->array[i].robj;
- if (!bo)
- continue;
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct dma_fence *f;
- bo_va = p->bo_list->array[i].bo_va;
- if (bo_va == NULL)
- continue;
+ /* ignore duplicates */
+ bo = e->robj;
+ if (!bo)
+ continue;
- r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r)
- return r;
+ bo_va = e->bo_va;
+ if (bo_va == NULL)
+ continue;
- f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
- if (r)
- return r;
- }
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (r)
+ return r;
+ f = bo_va->last_pt_update;
+ r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+ if (r)
+ return r;
}
r = amdgpu_vm_handle_moved(adev, vm);
@@ -845,15 +875,14 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
- if (amdgpu_vm_debug && p->bo_list) {
+ if (amdgpu_vm_debug) {
/* Invalidate all BOs to test for userspace bugs */
- for (i = 0; i < p->bo_list->num_entries; i++) {
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
/* ignore duplicates */
- bo = p->bo_list->array[i].robj;
- if (!bo)
+ if (!e->robj)
continue;
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(adev, e->robj, false);
}
}
@@ -865,11 +894,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_ring *ring = p->job->ring;
+ struct amdgpu_ring *ring = p->ring;
int r;
/* Only for UVD/VCE VM emulation */
- if (p->job->ring->funcs->parse_cs) {
+ if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) {
unsigned i, j;
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
@@ -910,12 +939,20 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += va_start - offset;
- memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
- amdgpu_bo_kunmap(aobj);
-
- r = amdgpu_ring_parse_cs(ring, p, j);
- if (r)
- return r;
+ if (p->ring->funcs->parse_cs) {
+ memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
+ amdgpu_bo_kunmap(aobj);
+
+ r = amdgpu_ring_parse_cs(ring, p, j);
+ if (r)
+ return r;
+ } else {
+ ib->ptr = (uint32_t *)kptr;
+ r = amdgpu_ring_patch_cs_in_place(ring, p, j);
+ amdgpu_bo_kunmap(aobj);
+ if (r)
+ return r;
+ }
j++;
}
@@ -927,6 +964,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
r = amdgpu_bo_vm_update_pte(p);
if (r)
return r;
+
+ r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
+ if (r)
+ return r;
}
return amdgpu_cs_sync_rings(p);
@@ -979,10 +1020,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
}
}
- if (parser->job->ring && parser->job->ring != ring)
+ if (parser->ring && parser->ring != ring)
return -EINVAL;
- parser->job->ring = ring;
+ parser->ring = ring;
r = amdgpu_ib_get(adev, vm,
ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
@@ -1001,11 +1042,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
/* UVD & VCE fw doesn't support user fences */
if (parser->job->uf_addr && (
- parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
- parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
+ parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+ parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
- return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
+ return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
}
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1156,31 +1197,30 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
- struct amdgpu_ring *ring = p->job->ring;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_ring *ring = p->ring;
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ enum drm_sched_priority priority;
+ struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
- unsigned i;
uint64_t seq;
int r;
amdgpu_mn_lock(p->mn);
- if (p->bo_list) {
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
-
- if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
- amdgpu_mn_unlock(p->mn);
- return -ERESTARTSYS;
- }
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->robj;
+
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+ amdgpu_mn_unlock(p->mn);
+ return -ERESTARTSYS;
}
}
job = p->job;
p->job = NULL;
- r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+ r = drm_sched_job_init(&job->base, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
@@ -1188,7 +1228,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
job->owner = p->filp;
- job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished);
r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
@@ -1206,11 +1245,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->uf_sequence = seq;
amdgpu_job_free_resources(job);
- amdgpu_ring_priority_get(job->ring, job->base.s_priority);
trace_amdgpu_cs_ioctl(job);
+ amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
+ priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
+ ring = to_amdgpu_ring(entity->rq->sched);
+ amdgpu_ring_priority_get(ring, priority);
+
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
@@ -1601,7 +1644,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+ amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c5bb36275e93..df6965761046 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (ring == &adev->gfx.kiq.ring)
continue;
- r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
- rq, &ctx->guilty);
+ r = drm_sched_entity_init(&ctx->rings[i].entity,
+ &rq, 1, &ctx->guilty);
if (r)
goto failed;
}
@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
failed:
for (j = 0; j < i; j++)
- drm_sched_entity_fini(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
+ drm_sched_entity_destroy(&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
- drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
+ drm_sched_entity_destroy(&ctx->rings[i].entity);
}
amdgpu_ctx_fini(ref);
@@ -444,34 +442,36 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
idr_init(&mgr->ctx_handles);
}
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id, i;
+ long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
idp = &mgr->ctx_handles;
+ mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) {
- if (!ctx->adev)
+ if (!ctx->adev) {
+ mutex_unlock(&mgr->lock);
return;
+ }
for (i = 0; i < ctx->adev->num_rings; i++) {
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
- if (kref_read(&ctx->refcount) == 1)
- drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
- else
- DRM_ERROR("ctx %p is still alive\n", ctx);
+ max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
+ max_wait);
}
}
+ mutex_unlock(&mgr->lock);
}
-void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
+void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
@@ -490,8 +490,7 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
continue;
if (kref_read(&ctx->refcount) == 1)
- drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
+ drm_sched_entity_fini(&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
}
@@ -504,7 +503,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
struct idr *idp;
uint32_t id;
- amdgpu_ctx_mgr_entity_cleanup(mgr);
+ amdgpu_ctx_mgr_entity_fini(mgr);
idp = &mgr->ctx_handles;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6e5284e6c028..8ab5ccbc14ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/power_supply.h>
#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
@@ -675,17 +676,15 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_gart_location - try to find GTT location
+ * amdgpu_device_gart_location - try to find GART location
*
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
- * Function will place try to place GTT before or after VRAM.
+ * Function will place try to place GART before or after VRAM.
*
- * If GTT size is bigger than space left then we ajust GTT size.
+ * If GART size is bigger than space left then we ajust GART size.
* Thus function will never fails.
- *
- * FIXME: when reducing GTT size align new size on power of 2.
*/
void amdgpu_device_gart_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc)
@@ -698,13 +697,13 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
size_bf = mc->vram_start;
if (size_bf > size_af) {
if (mc->gart_size > size_bf) {
- dev_warn(adev->dev, "limiting GTT\n");
+ dev_warn(adev->dev, "limiting GART\n");
mc->gart_size = size_bf;
}
mc->gart_start = 0;
} else {
if (mc->gart_size > size_af) {
- dev_warn(adev->dev, "limiting GTT\n");
+ dev_warn(adev->dev, "limiting GART\n");
mc->gart_size = size_af;
}
/* VCE doesn't like it when BOs cross a 4GB segment, so align
@@ -713,7 +712,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
}
mc->gart_end = mc->gart_start + mc->gart_size - 1;
- dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
+ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
}
@@ -1077,7 +1076,7 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
/**
* amdgpu_device_ip_set_clockgating_state - set the CG state
*
- * @adev: amdgpu_device pointer
+ * @dev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
* @state: clockgating state (gate or ungate)
*
@@ -1111,7 +1110,7 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
/**
* amdgpu_device_ip_set_powergating_state - set the PG state
*
- * @adev: amdgpu_device pointer
+ * @dev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
* @state: powergating state (gate or ungate)
*
@@ -1222,7 +1221,7 @@ bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
* amdgpu_device_ip_get_ip_block - get a hw IP pointer
*
* @adev: amdgpu_device pointer
- * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
*
* Returns a pointer to the hardware IP block structure
* if it exists for the asic, otherwise NULL.
@@ -1708,10 +1707,6 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
if (amdgpu_emu_mode == 1)
return 0;
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
-
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -1731,17 +1726,34 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
}
}
- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) {
- /* enable gfx powergating */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
- /* enable gfxoff */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- AMD_PG_STATE_GATE);
- }
+ return 0;
+}
+static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
+{
+ int i = 0, r;
+
+ if (amdgpu_emu_mode == 1)
+ return 0;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->funcs->set_powergating_state) {
+ /* enable powergating to save power */
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+ AMD_PG_STATE_GATE);
+ if (r) {
+ DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ }
+ }
return 0;
}
@@ -1775,6 +1787,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
}
}
+ amdgpu_device_ip_late_set_cg_state(adev);
+ amdgpu_device_ip_late_set_pg_state(adev);
+
queue_delayed_work(system_wq, &adev->late_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
@@ -1813,6 +1828,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
@@ -1901,11 +1918,15 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, late_init_work.work);
- amdgpu_device_ip_late_set_cg_state(adev);
+ int r;
+
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
}
/**
- * amdgpu_device_ip_suspend - run suspend for hardware IPs
+ * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
*
* @adev: amdgpu_device pointer
*
@@ -1915,18 +1936,60 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
if (amdgpu_sriov_vf(adev))
amdgpu_virt_request_full_gpu(adev, false);
- /* ungate SMC block powergating */
- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- AMD_CG_STATE_UNGATE);
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ /* displays are handled separately */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
+ /* ungate blocks so that suspend can properly shut them down */
+ if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ }
+ /* XXX handle errors */
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+ /* XXX handle errors */
+ if (r) {
+ DRM_ERROR("suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ }
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
+ return 0;
+}
+
+/**
+ * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run. suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
/* ungate SMC block first */
r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
@@ -1935,9 +1998,16 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
}
+ /* call smu to disable gfx off feature first when suspend */
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
+ /* displays are handled in phase1 */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
+ continue;
/* ungate blocks so that suspend can properly shut them down */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
@@ -1963,6 +2033,29 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
return 0;
}
+/**
+ * amdgpu_device_ip_suspend - run suspend for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run. suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_device_ip_suspend_phase1(adev);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_suspend_phase2(adev);
+
+ return r;
+}
+
static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
{
int i, r;
@@ -1985,7 +2078,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(adev);
- DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
+ DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@@ -2020,7 +2113,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(adev);
- DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
+ DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@@ -2210,7 +2303,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
* amdgpu_device_init - initialize the driver
*
* @adev: amdgpu_device pointer
- * @pdev: drm dev pointer
+ * @ddev: drm dev pointer
* @pdev: pci dev pointer
* @flags: driver flags
*
@@ -2301,6 +2394,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&adev->late_init_work,
amdgpu_device_ip_late_init_func_handler);
+ adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
+
/* Registers mapping */
/* TODO: block userspace mapping of io register */
if (adev->asic_type >= CHIP_BONAIRE) {
@@ -2581,8 +2676,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
/**
* amdgpu_device_suspend - initiate device suspend
*
- * @pdev: drm dev pointer
- * @state: suspend state
+ * @dev: drm dev pointer
+ * @suspend: suspend state
+ * @fbcon : notify the fbdev of suspend
*
* Puts the hw in the suspend state (all asics).
* Returns 0 for success or an error on failure.
@@ -2606,6 +2702,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
+ if (fbcon)
+ amdgpu_fbdev_set_suspend(adev, 1);
+
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
@@ -2613,44 +2712,46 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
drm_modeset_unlock_all(dev);
- }
-
- amdgpu_amdkfd_suspend(adev);
-
- /* unpin the front buffers and cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct amdgpu_bo *robj;
-
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
+ /* unpin the front buffers and cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct amdgpu_bo *robj;
+
+ if (amdgpu_crtc->cursor_bo) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
}
- }
- if (fb == NULL || fb->obj[0] == NULL) {
- continue;
- }
- robj = gem_to_amdgpu_bo(fb->obj[0]);
- /* don't unpin kernel fb objects */
- if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
- r = amdgpu_bo_reserve(robj, true);
- if (r == 0) {
- amdgpu_bo_unpin(robj);
- amdgpu_bo_unreserve(robj);
+ if (fb == NULL || fb->obj[0] == NULL) {
+ continue;
+ }
+ robj = gem_to_amdgpu_bo(fb->obj[0]);
+ /* don't unpin kernel fb objects */
+ if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+ r = amdgpu_bo_reserve(robj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
+ }
}
}
}
+
+ amdgpu_amdkfd_suspend(adev);
+
+ r = amdgpu_device_ip_suspend_phase1(adev);
+
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_fence_driver_suspend(adev);
- r = amdgpu_device_ip_suspend(adev);
+ r = amdgpu_device_ip_suspend_phase2(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
@@ -2669,18 +2770,15 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
DRM_ERROR("amdgpu asic reset failed\n");
}
- if (fbcon) {
- console_lock();
- amdgpu_fbdev_set_suspend(adev, 1);
- console_unlock();
- }
return 0;
}
/**
* amdgpu_device_resume - initiate device resume
*
- * @pdev: drm dev pointer
+ * @dev: drm dev pointer
+ * @resume: resume state
+ * @fbcon : notify the fbdev of resume
*
* Bring the hw back to operating state (all asics).
* Returns 0 for success or an error on failure.
@@ -2696,15 +2794,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (fbcon)
- console_lock();
-
if (resume) {
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
r = pci_enable_device(dev->pdev);
if (r)
- goto unlock;
+ return r;
}
/* post card */
@@ -2717,29 +2812,30 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
r = amdgpu_device_ip_resume(adev);
if (r) {
DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
- goto unlock;
+ return r;
}
amdgpu_fence_driver_resume(adev);
r = amdgpu_device_ip_late_init(adev);
if (r)
- goto unlock;
-
- /* pin cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- r = amdgpu_bo_pin(aobj,
- AMDGPU_GEM_DOMAIN_VRAM,
- &amdgpu_crtc->cursor_addr);
- if (r != 0)
- DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
- amdgpu_bo_unreserve(aobj);
+ return r;
+
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->cursor_bo) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r != 0)
+ DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
}
}
}
@@ -2747,6 +2843,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (r)
return r;
+ /* Make sure IB tests flushed */
+ flush_delayed_work(&adev->late_init_work);
+
/* blat the mode back in */
if (fbcon) {
if (!amdgpu_device_has_dc_support(adev)) {
@@ -2760,6 +2859,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
drm_modeset_unlock_all(dev);
}
+ amdgpu_fbdev_set_suspend(adev, 0);
}
drm_kms_helper_poll_enable(dev);
@@ -2783,15 +2883,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
#ifdef CONFIG_PM
dev->dev->power.disable_depth--;
#endif
-
- if (fbcon)
- amdgpu_fbdev_set_suspend(adev, 0);
-
-unlock:
- if (fbcon)
- console_unlock();
-
- return r;
+ return 0;
}
/**
@@ -3016,7 +3108,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
long tmo;
if (amdgpu_sriov_runtime(adev))
- tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
+ tmo = msecs_to_jiffies(8000);
else
tmo = msecs_to_jiffies(100);
@@ -3068,7 +3160,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
* @adev: amdgpu device pointer
*
* attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means successed otherwise failed
+ * return 0 means succeeded otherwise failed
*/
static int amdgpu_device_reset(struct amdgpu_device *adev)
{
@@ -3143,9 +3235,10 @@ out:
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
*
* @adev: amdgpu device pointer
+ * @from_hypervisor: request from hypervisor
*
* do VF FLR and reinitialize Asic
- * return 0 means successed otherwise failed
+ * return 0 means succeeded otherwise failed
*/
static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
bool from_hypervisor)
@@ -3190,7 +3283,7 @@ error:
*
* @adev: amdgpu device pointer
* @job: which job trigger hang
- * @force forces reset regardless of amdgpu_gpu_recovery
+ * @force: forces reset regardless of amdgpu_gpu_recovery
*
* Attempt to reset the GPU if it has hung (all asics).
* Returns 0 for success or an error on failure.
@@ -3217,6 +3310,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
atomic_inc(&adev->gpu_reset_counter);
adev->in_gpu_reset = 1;
+ /* Block kfd */
+ amdgpu_amdkfd_pre_reset(adev);
+
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
@@ -3229,10 +3325,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
kthread_park(ring->sched.thread);
- if (job && job->ring->idx != i)
+ if (job && job->base.sched == &ring->sched)
continue;
- drm_sched_hw_job_reset(&ring->sched, &job->base);
+ drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
@@ -3253,7 +3349,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* or all rings (in the case @job is NULL)
* after above amdgpu_reset accomplished
*/
- if ((!job || job->ring->idx == i) && !r)
+ if ((!job || job->base.sched == &ring->sched) && !r)
drm_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
@@ -3270,9 +3366,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
- dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
+ dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
}
+ /*unlock kfd */
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
@@ -3290,8 +3388,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
- u32 mask;
- int ret;
+ struct pci_dev *pdev;
+ enum pci_bus_speed speed_cap;
+ enum pcie_link_width link_width;
if (amdgpu_pcie_gen_cap)
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
@@ -3309,27 +3408,61 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
}
if (adev->pm.pcie_gen_mask == 0) {
- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
- if (!ret) {
- adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ /* asic caps */
+ pdev = adev->pdev;
+ speed_cap = pcie_get_speed_cap(pdev);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
-
- if (mask & DRM_PCIE_SPEED_25)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
- if (mask & DRM_PCIE_SPEED_50)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
- if (mask & DRM_PCIE_SPEED_80)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
} else {
- adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+ if (speed_cap == PCIE_SPEED_16_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
+ else if (speed_cap == PCIE_SPEED_8_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
+ else
+ adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
+ }
+ /* platform caps */
+ pdev = adev->ddev->pdev->bus->self;
+ speed_cap = pcie_get_speed_cap(pdev);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
+ } else {
+ if (speed_cap == PCIE_SPEED_16_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
+ else if (speed_cap == PCIE_SPEED_8_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
+ else
+ adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
+
}
}
if (adev->pm.pcie_mlw_mask == 0) {
- ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
- if (!ret) {
- switch (mask) {
- case 32:
+ pdev = adev->ddev->pdev->bus->self;
+ link_width = pcie_get_width_cap(pdev);
+ if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
+ adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
+ } else {
+ switch (link_width) {
+ case PCIE_LNK_X32:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
@@ -3338,7 +3471,7 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 16:
+ case PCIE_LNK_X16:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
@@ -3346,36 +3479,34 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 12:
+ case PCIE_LNK_X12:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 8:
+ case PCIE_LNK_X8:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 4:
+ case PCIE_LNK_X4:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 2:
+ case PCIE_LNK_X2:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 1:
+ case PCIE_LNK_X1:
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
break;
default:
break;
}
- } else {
- adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 76ee8e04ff11..6748cd7fc129 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -157,7 +157,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct amdgpu_bo *new_abo;
unsigned long flags;
u64 tiling_flags;
- u64 base;
int i, r;
work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -189,12 +188,18 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
- r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
+ r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
if (unlikely(r != 0)) {
DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
}
+ r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("%p bind failed\n", new_abo);
+ goto unpin;
+ }
+
r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
@@ -206,7 +211,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
amdgpu_bo_unreserve(new_abo);
- work->base = base;
+ work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 77ad59ade85c..1c4595562f8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -28,6 +28,7 @@
#include "amdgpu_i2c.h"
#include "amdgpu_dpm.h"
#include "atom.h"
+#include "amd_pcie.h"
void amdgpu_dpm_print_class_info(u32 class, u32 class2)
{
@@ -936,9 +937,11 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
case AMDGPU_PCIE_GEN3:
return AMDGPU_PCIE_GEN3;
default:
- if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
+ if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
+ (default_gen == AMDGPU_PCIE_GEN3))
return AMDGPU_PCIE_GEN3;
- else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
+ else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
+ (default_gen == AMDGPU_PCIE_GEN2))
return AMDGPU_PCIE_GEN2;
else
return AMDGPU_PCIE_GEN1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index dd6203a0a6b7..ff24e1cc5b65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -287,12 +287,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_force_performance_level(adev, l) \
((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
-#define amdgpu_dpm_powergate_uvd(adev, g) \
- ((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)))
-
-#define amdgpu_dpm_powergate_vce(adev, g) \
- ((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)))
-
#define amdgpu_dpm_get_current_power_state(adev) \
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
@@ -347,6 +341,10 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
(adev)->powerplay.pp_handle, msg_id))
+#define amdgpu_dpm_set_powergating_by_smu(adev, block_type, gate) \
+ ((adev)->powerplay.pp_funcs->set_powergating_by_smu(\
+ (adev)->powerplay.pp_handle, block_type, gate))
+
#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
((adev)->powerplay.pp_funcs->get_power_profile_mode(\
(adev)->powerplay.pp_handle, buf))
@@ -359,10 +357,6 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
(adev)->powerplay.pp_handle, type, parameter, size))
-#define amdgpu_dpm_set_mmhub_powergating_by_smu(adev) \
- ((adev)->powerplay.pp_funcs->set_mmhub_powergating_by_smu( \
- (adev)->powerplay.pp_handle))
-
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
@@ -402,7 +396,6 @@ struct amdgpu_dpm {
u32 tdp_adjustment;
u16 load_line_slope;
bool power_control;
- bool ac_power;
/* special states active */
bool thermal_active;
bool uvd_active;
@@ -439,6 +432,7 @@ struct amdgpu_pm {
struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
uint32_t smu_prv_buffer_size;
struct amdgpu_bo *smu_prv_buffer;
+ bool ac_power;
};
#define R600_SSTU_DFLT 0
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b0bf2f24da48..8843a06360fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1,10 +1,3 @@
-/**
- * \file amdgpu_drv.c
- * AMD Amdgpu driver
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
/*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -76,9 +69,10 @@
* - 3.24.0 - Add high priority compute support for gfx9
* - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
* - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
+ * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 26
+#define KMS_DRIVER_MINOR 27
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -110,11 +104,8 @@ int amdgpu_vram_page_split = 512;
int amdgpu_vm_update_mode = -1;
int amdgpu_exp_hw_support = 0;
int amdgpu_dc = -1;
-int amdgpu_dc_log = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
-int amdgpu_no_evict = 0;
-int amdgpu_direct_gma_size = 0;
uint amdgpu_pcie_gen_cap = 0;
uint amdgpu_pcie_lane_cap = 0;
uint amdgpu_cg_mask = 0xffffffff;
@@ -122,7 +113,8 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-uint amdgpu_pp_feature_mask = 0xffff3fff; /* gfxoff (bit 15) disabled by default */
+/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
+uint amdgpu_pp_feature_mask = 0xfffd3fff;
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
@@ -135,163 +127,368 @@ int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode = 0;
uint amdgpu_smu_memory_pool_size = 0;
+/**
+ * DOC: vramlimit (int)
+ * Restrict the total amount of VRAM in MiB for testing. The default is 0 (Use full VRAM).
+ */
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
+/**
+ * DOC: vis_vramlimit (int)
+ * Restrict the amount of CPU visible VRAM in MiB for testing. The default is 0 (Use full CPU visible VRAM).
+ */
MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
+/**
+ * DOC: gartsize (uint)
+ * Restrict the size of GART in Mib (32, 64, etc.) for testing. The default is -1 (The size depends on asic).
+ */
MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)");
module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
+/**
+ * DOC: gttsize (int)
+ * Restrict the size of GTT domain in MiB for testing. The default is -1 (It's VRAM size if 3GB < VRAM < 3/4 RAM,
+ * otherwise 3/4 RAM size).
+ */
MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
+/**
+ * DOC: moverate (int)
+ * Set maximum buffer migration rate in MB/s. The default is -1 (8 MB/s).
+ */
MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
module_param_named(moverate, amdgpu_moverate, int, 0600);
+/**
+ * DOC: benchmark (int)
+ * Run benchmarks. The default is 0 (Skip benchmarks).
+ */
MODULE_PARM_DESC(benchmark, "Run benchmark");
module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
+/**
+ * DOC: test (int)
+ * Test BO GTT->VRAM and VRAM->GTT GPU copies. The default is 0 (Skip test, only set 1 to run test).
+ */
MODULE_PARM_DESC(test, "Run tests");
module_param_named(test, amdgpu_testing, int, 0444);
+/**
+ * DOC: audio (int)
+ * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
+ */
MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(audio, amdgpu_audio, int, 0444);
+/**
+ * DOC: disp_priority (int)
+ * Set display Priority (1 = normal, 2 = high). Only affects non-DC display handling. The default is 0 (auto).
+ */
MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);
+/**
+ * DOC: hw_i2c (int)
+ * To enable hw i2c engine. Only affects non-DC display handling. The default is 0 (Disabled).
+ */
MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);
+/**
+ * DOC: pcie_gen2 (int)
+ * To disable PCIE Gen2/3 mode (0 = disable, 1 = enable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
+/**
+ * DOC: msi (int)
+ * To disable Message Signaled Interrupts (MSI) functionality (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
+/**
+ * DOC: lockup_timeout (int)
+ * Set GPU scheduler timeout value in ms. Value 0 is invalidated, will be adjusted to 10000.
+ * Negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET). The default is 10000.
+ */
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)");
module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
+/**
+ * DOC: dpm (int)
+ * Override for dynamic power management setting (1 = enable, 0 = disable). The default is -1 (auto).
+ */
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(dpm, amdgpu_dpm, int, 0444);
+/**
+ * DOC: fw_load_type (int)
+ * Set different firmware loading type for debugging (0 = direct, 1 = SMU, 2 = PSP). The default is -1 (auto).
+ */
MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)");
module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
+/**
+ * DOC: aspm (int)
+ * To disable ASPM (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(aspm, amdgpu_aspm, int, 0444);
+/**
+ * DOC: runpm (int)
+ * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
+ * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
+ */
MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
+/**
+ * DOC: ip_block_mask (uint)
+ * Override what IP blocks are enabled on the GPU. Each GPU is a collection of IP blocks (gfx, display, video, etc.).
+ * Use this parameter to disable specific blocks. Note that the IP blocks do not have a fixed index. Some asics may not have
+ * some IPs or may include multiple instances of an IP so the ordering various from asic to asic. See the driver output in
+ * the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
+ */
MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
+/**
+ * DOC: bapm (int)
+ * Bidirectional Application Power Management (BAPM) used to dynamically share TDP between CPU and GPU. Set value 0 to disable it.
+ * The default -1 (auto, enabled)
+ */
MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(bapm, amdgpu_bapm, int, 0444);
+/**
+ * DOC: deep_color (int)
+ * Set 1 to enable Deep Color support. Only affects non-DC display handling. The default is 0 (disabled).
+ */
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
module_param_named(deep_color, amdgpu_deep_color, int, 0444);
+/**
+ * DOC: vm_size (int)
+ * Override the size of the GPU's per client virtual address space in GiB. The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
module_param_named(vm_size, amdgpu_vm_size, int, 0444);
+/**
+ * DOC: vm_fragment_size (int)
+ * Override VM fragment size in bits (4, 5, etc. 4 = 64K, 9 = 2M). The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
+/**
+ * DOC: vm_block_size (int)
+ * Override VM page table size in bits (default depending on vm_size and hw setup). The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
+/**
+ * DOC: vm_fault_stop (int)
+ * Stop on VM fault for debugging (0 = never, 1 = print first, 2 = always). The default is 0 (No stop).
+ */
MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
+/**
+ * DOC: vm_debug (int)
+ * Debug VM handling (0 = disabled, 1 = enabled). The default is 0 (Disabled).
+ */
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
+/**
+ * DOC: vm_update_mode (int)
+ * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default
+ * is -1 (Only in large BAR(LB) systems Compute VM tables will be updated by CPU, otherwise 0, never).
+ */
MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
+/**
+ * DOC: vram_page_split (int)
+ * Override the number of pages after we split VRAM allocations (default 512, -1 = disable). The default is 512.
+ */
MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)");
module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
+/**
+ * DOC: exp_hw_support (int)
+ * Enable experimental hw support (1 = enable). The default is 0 (disabled).
+ */
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+/**
+ * DOC: dc (int)
+ * Disable/Enable Display Core driver for debugging (1 = enable, 0 = disable). The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(dc, amdgpu_dc, int, 0444);
-MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = chatty");
-module_param_named(dc_log, amdgpu_dc_log, int, 0444);
-
+/**
+ * DOC: sched_jobs (int)
+ * Override the max number of jobs supported in the sw queue. The default is 32.
+ */
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
+/**
+ * DOC: sched_hw_submission (int)
+ * Override the max number of HW submissions. The default is 2.
+ */
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
+/**
+ * DOC: ppfeaturemask (uint)
+ * Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * The default is the current set of stable power features.
+ */
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
-MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
-module_param_named(no_evict, amdgpu_no_evict, int, 0444);
-
-MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
-module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
-
+/**
+ * DOC: pcie_gen_cap (uint)
+ * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
+ * The default is 0 (automatic for each asic).
+ */
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
+/**
+ * DOC: pcie_lane_cap (uint)
+ * Override PCIE lanes capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
+ * The default is 0 (automatic for each asic).
+ */
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
+/**
+ * DOC: cg_mask (uint)
+ * Override Clockgating features enabled on GPU (0 = disable clock gating). See the AMD_CG_SUPPORT flags in
+ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
+ */
MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
+/**
+ * DOC: pg_mask (uint)
+ * Override Powergating features enabled on GPU (0 = disable power gating). See the AMD_PG_SUPPORT flags in
+ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
+ */
MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+/**
+ * DOC: sdma_phase_quantum (uint)
+ * Override SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change). The default is 32.
+ */
MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);
+/**
+ * DOC: disable_cu (charp)
+ * Set to disable CUs (It's set like se.sh.cu,...). The default is NULL.
+ */
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+/**
+ * DOC: virtual_display (charp)
+ * Set to enable virtual display feature. This feature provides a virtual display hardware on headless boards
+ * or in virtualized environments. It will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x. It's the pci address of
+ * the device, plus the number of crtcs to expose. E.g., 0000:26:00.0,4 would enable 4 virtual crtcs on the pci
+ * device at 26:00.0. The default is NULL.
+ */
MODULE_PARM_DESC(virtual_display,
"Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
+/**
+ * DOC: ngg (int)
+ * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled).
+ */
MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
module_param_named(ngg, amdgpu_ngg, int, 0444);
+/**
+ * DOC: prim_buf_per_se (int)
+ * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
+/**
+ * DOC: pos_buf_per_se (int)
+ * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
+/**
+ * DOC: cntl_sb_buf_per_se (int)
+ * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
+/**
+ * DOC: param_buf_per_se (int)
+ * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
+/**
+ * DOC: job_hang_limit (int)
+ * Set how much time allow a job hang and not drop it. The default is 0.
+ */
MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)");
module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
+/**
+ * DOC: lbpw (int)
+ * Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(lbpw, amdgpu_lbpw, int, 0444);
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+/**
+ * DOC: gpu_recovery (int)
+ * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
+ */
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+/**
+ * DOC: emu_mode (int)
+ * Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
+ */
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
+/**
+ * DOC: si_support (int)
+ * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
+ * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
+ * otherwise using amdgpu driver.
+ */
#ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -305,6 +502,12 @@ MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)")
module_param_named(si_support, amdgpu_si_support, int, 0444);
#endif
+/**
+ * DOC: cik_support (int)
+ * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
+ * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
+ * otherwise using amdgpu driver.
+ */
#ifdef CONFIG_DRM_AMDGPU_CIK
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -318,6 +521,11 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif
+/**
+ * DOC: smu_memory_pool_size (uint)
+ * It is used to reserve gtt for smu debug usage, setting value 0 to disable it. The actual size is value * 256MiB.
+ * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled).
+ */
MODULE_PARM_DESC(smu_memory_pool_size,
"reserve gtt for smu debug usage, 0 = disable,"
"0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
@@ -664,7 +872,7 @@ retry_init:
err_pci:
pci_disable_device(pdev);
err_free:
- drm_dev_unref(dev);
+ drm_dev_put(dev);
return ret;
}
@@ -674,7 +882,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -855,9 +1063,21 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
.runtime_idle = amdgpu_pmops_runtime_idle,
};
+static int amdgpu_flush(struct file *f, fl_owner_t id)
+{
+ struct drm_file *file_priv = f->private_data;
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+
+ amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr);
+
+ return 0;
+}
+
+
static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
+ .flush = amdgpu_flush,
.release = drm_release,
.unlocked_ioctl = amdgpu_drm_ioctl,
.mmap = amdgpu_mmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index 94138abe093b..ae8fac34f7a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -46,7 +46,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->devices & amdgpu_connector->devices) {
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
amdgpu_atombios_encoder_init_backlight(amdgpu_encoder, connector);
adev->mode_info.bl_encoder = amdgpu_encoder;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index bc5fd8ebab5d..69c5d22f29bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED,
- true, NULL, &gobj);
+ ttm_bo_type_kernel, NULL, &gobj);
if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM;
@@ -168,11 +168,19 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
}
- ret = amdgpu_bo_pin(abo, domain, NULL);
+ ret = amdgpu_bo_pin(abo, domain);
if (ret) {
amdgpu_bo_unreserve(abo);
goto out_unref;
}
+
+ ret = amdgpu_ttm_alloc_gart(&abo->tbo);
+ if (ret) {
+ amdgpu_bo_unreserve(abo);
+ dev_err(adev->dev, "%p bind failed\n", abo);
+ goto out_unref;
+ }
+
ret = amdgpu_bo_kmap(abo, NULL);
amdgpu_bo_unreserve(abo);
if (ret) {
@@ -365,8 +373,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
{
if (adev->mode_info.rfbdev)
- drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
- state);
+ drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper,
+ state);
}
int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 39ec6b8890a1..7056925eb386 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -376,7 +376,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
uint64_t index;
- if (ring != &adev->uvd.inst[ring->me].ring) {
+ if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
} else {
@@ -646,7 +646,6 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
- .wait = dma_fence_default_wait,
.release = amdgpu_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index dd11b7313ca0..a54d5655a191 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -143,14 +143,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
*/
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
{
- uint64_t gpu_addr;
int r;
r = amdgpu_bo_reserve(adev->gart.robj, false);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin(adev->gart.robj,
- AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
+ r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM);
if (r) {
amdgpu_bo_unreserve(adev->gart.robj);
return r;
@@ -159,7 +157,7 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
if (r)
amdgpu_bo_unpin(adev->gart.robj);
amdgpu_bo_unreserve(adev->gart.robj);
- adev->gart.table_addr = gpu_addr;
+ adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj);
return r;
}
@@ -234,7 +232,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
}
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++) {
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = NULL;
@@ -243,7 +241,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
if (!adev->gart.ptr)
continue;
- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+ for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
@@ -282,7 +280,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
for (i = 0; i < pages; i++) {
page_base = dma_addr[i];
- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+ for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
}
@@ -319,7 +317,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++)
adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 456295c00291..9f9e9dc87da1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -37,6 +37,8 @@ struct amdgpu_bo;
#define AMDGPU_GPU_PAGE_SHIFT 12
#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
+#define AMDGPU_GPU_PAGES_IN_CPU_PAGE (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE)
+
struct amdgpu_gart {
u64 table_addr;
struct amdgpu_bo *robj;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 5fb156a01774..71792d820ae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -265,7 +265,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
(u32)(0xffffffff & args->in.domains),
- flags, false, resv, &gobj);
+ flags, ttm_bo_type_device, resv, &gobj);
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
if (!r) {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -317,7 +317,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
- 0, 0, NULL, &gobj);
+ 0, ttm_bo_type_device, NULL, &gobj);
if (r)
return r;
@@ -344,7 +344,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto free_pages;
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (r)
@@ -510,7 +510,6 @@ out:
* @adev: amdgpu_device pointer
* @vm: vm to update
* @bo_va: bo_va to update
- * @list: validation list
* @operation: map, unmap or clear
*
* Update the bo_va directly after setting its address. Errors are not
@@ -519,7 +518,6 @@ out:
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo_va *bo_va,
- struct list_head *list,
uint32_t operation)
{
int r;
@@ -612,7 +610,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
tv.bo = &abo->tbo;
- tv.shared = false;
+ tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
list_add(&tv.head, &list);
} else {
gobj = NULL;
@@ -673,7 +671,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
break;
}
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
- amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
+ amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
args->operation);
error_backoff:
@@ -768,7 +766,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
amdgpu_display_supported_domains(adev));
r = amdgpu_gem_object_create(adev, args->size, 0, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- false, NULL, &gobj);
+ ttm_bo_type_device, NULL, &gobj);
if (r)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 893c2490b783..bb5a47a45790 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -105,8 +105,25 @@ struct amdgpu_gmc {
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
bool translate_further;
+ struct kfd_vm_fault_info *vm_fault_info;
+ atomic_t vm_fault_info_updated;
const struct amdgpu_gmc_funcs *gmc_funcs;
};
+/**
+ * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * True if full VRAM is visible through the BAR
+ */
+static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
+{
+ WARN_ON(gmc->real_vram_size < gmc->visible_vram_size);
+
+ return (gmc->real_vram_size == gmc->visible_vram_size);
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index f70eeed9ed76..5518e623fed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -139,7 +139,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* ring tests don't use a job */
if (job) {
vm = job->vm;
- fence_ctx = job->fence_ctx;
+ fence_ctx = job->base.s_fence->scheduled.context;
} else {
vm = NULL;
fence_ctx = 0;
@@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
+ /* wrap the last IB with fence */
+ if (job && job->uf_addr) {
+ amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
+ fence_flags | AMDGPU_FENCE_FLAG_64BIT);
+ }
+
r = amdgpu_fence_emit(ring, f, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
@@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->insert_end)
ring->funcs->insert_end(ring);
- /* wrap the last IB with fence */
- if (job && job->uf_addr) {
- amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
- fence_flags | AMDGPU_FENCE_FLAG_64BIT);
- }
-
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
@@ -353,7 +353,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
tmo = tmo_mm;
else
tmo = tmo_gfx;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index a1c78f90eadf..3a072a7a39f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -578,11 +578,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
-
- adev->vm_manager.fence_context =
- dma_fence_context_alloc(AMDGPU_MAX_RINGS);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- adev->vm_manager.seqno[i] = 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 3a5ca462abf0..1abf5b5bac9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -25,6 +25,23 @@
* Alex Deucher
* Jerome Glisse
*/
+
+/**
+ * DOC: Interrupt Handling
+ *
+ * Interrupts generated within GPU hardware raise interrupt requests that are
+ * passed to amdgpu IRQ handler which is responsible for detecting source and
+ * type of the interrupt and dispatching matching handlers. If handling an
+ * interrupt requires calling kernel functions that may sleep processing is
+ * dispatched to work handlers.
+ *
+ * If MSI functionality is not disabled by module parameter then MSI
+ * support will be enabled.
+ *
+ * For GPU interrupt sources that may be driven by another driver, IRQ domain
+ * support is used (with mapping between virtual and hardware IRQs).
+ */
+
#include <linux/irq.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -43,19 +60,21 @@
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
/**
- * amdgpu_hotplug_work_func - display hotplug work handler
+ * amdgpu_hotplug_work_func - work handler for display hotplug event
*
- * @work: work struct
+ * @work: work struct pointer
*
- * This is the hot plug event work handler (all asics).
- * The work gets scheduled from the irq handler if there
- * was a hot plug interrupt. It walks the connector table
- * and calls the hotplug handler for each one, then sends
- * a drm hotplug event to alert userspace.
+ * This is the hotplug event work handler (all ASICs).
+ * The work gets scheduled from the IRQ handler if there
+ * was a hotplug interrupt. It walks through the connector table
+ * and calls hotplug handler for each connector. After this, it sends
+ * a DRM hotplug event to alert userspace.
+ *
+ * This design approach is required in order to defer hotplug event handling
+ * from the IRQ handler to a work handler because hotplug handler has to use
+ * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
+ * sleep).
*/
static void amdgpu_hotplug_work_func(struct work_struct *work)
{
@@ -74,13 +93,12 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
}
/**
- * amdgpu_irq_reset_work_func - execute gpu reset
+ * amdgpu_irq_reset_work_func - execute GPU reset
*
- * @work: work struct
+ * @work: work struct pointer
*
- * Execute scheduled gpu reset (cayman+).
- * This function is called when the irq handler
- * thinks we need a gpu reset.
+ * Execute scheduled GPU reset (Cayman+).
+ * This function is called when the IRQ handler thinks we need a GPU reset.
*/
static void amdgpu_irq_reset_work_func(struct work_struct *work)
{
@@ -91,7 +109,13 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
amdgpu_device_gpu_recover(adev, NULL, false);
}
-/* Disable *all* interrupts */
+/**
+ * amdgpu_irq_disable_all - disable *all* interrupts
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Disable all types of interrupts from all sources.
+ */
void amdgpu_irq_disable_all(struct amdgpu_device *adev)
{
unsigned long irqflags;
@@ -123,11 +147,15 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_handler - irq handler
+ * amdgpu_irq_handler - IRQ handler
+ *
+ * @irq: IRQ number (unused)
+ * @arg: pointer to DRM device
*
- * @int irq, void *arg: args
+ * IRQ handler for amdgpu driver (all ASICs).
*
- * This is the irq handler for the amdgpu driver (all asics).
+ * Returns:
+ * result of handling the IRQ, as defined by &irqreturn_t
*/
irqreturn_t amdgpu_irq_handler(int irq, void *arg)
{
@@ -142,18 +170,18 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
}
/**
- * amdgpu_msi_ok - asic specific msi checks
+ * amdgpu_msi_ok - check whether MSI functionality is enabled
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu device pointer (unused)
+ *
+ * Checks whether MSI functionality has been disabled via module parameter
+ * (all ASICs).
*
- * Handles asic specific MSI checks to determine if
- * MSIs should be enabled on a particular chip (all asics).
- * Returns true if MSIs should be enabled, false if MSIs
- * should not be enabled.
+ * Returns:
+ * *true* if MSIs are allowed to be enabled or *false* otherwise
*/
static bool amdgpu_msi_ok(struct amdgpu_device *adev)
{
- /* force MSI on */
if (amdgpu_msi == 1)
return true;
else if (amdgpu_msi == 0)
@@ -163,12 +191,15 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_init - init driver interrupt info
+ * amdgpu_irq_init - initialize interrupt handling
*
* @adev: amdgpu device pointer
*
- * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
- * Returns 0 for success, error for failure.
+ * Sets up work functions for hotplug and reset interrupts, enables MSI
+ * functionality, initializes vblank, hotplug and reset interrupt handling.
+ *
+ * Returns:
+ * 0 on success or error code on failure
*/
int amdgpu_irq_init(struct amdgpu_device *adev)
{
@@ -176,7 +207,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
spin_lock_init(&adev->irq.lock);
- /* enable msi */
+ /* Enable MSI if not disabled by module parameter */
adev->irq.msi_enabled = false;
if (amdgpu_msi_ok(adev)) {
@@ -189,7 +220,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (!amdgpu_device_has_dc_support(adev)) {
if (!adev->enable_virtual_display)
- /* Disable vblank irqs aggressively for power-saving */
+ /* Disable vblank IRQs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
adev->ddev->vblank_disable_immediate = true;
@@ -197,7 +228,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (r)
return r;
- /* pre DCE11 */
+ /* Pre-DCE11 */
INIT_WORK(&adev->hotplug_work,
amdgpu_hotplug_work_func);
}
@@ -220,11 +251,13 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_fini - tear down driver interrupt info
+ * amdgpu_irq_fini - shut down interrupt handling
*
* @adev: amdgpu device pointer
*
- * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ * Tears down work functions for hotplug and reset interrupts, disables MSI
+ * functionality, shuts down vblank, hotplug and reset interrupt handling,
+ * turns off interrupts from all sources (all ASICs).
*/
void amdgpu_irq_fini(struct amdgpu_device *adev)
{
@@ -264,12 +297,17 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_add_id - register irq source
+ * amdgpu_irq_add_id - register IRQ source
*
* @adev: amdgpu device pointer
- * @src_id: source id for this source
- * @source: irq source
+ * @client_id: client id
+ * @src_id: source id
+ * @source: IRQ source pointer
+ *
+ * Registers IRQ source on a client.
*
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_add_id(struct amdgpu_device *adev,
unsigned client_id, unsigned src_id,
@@ -312,12 +350,12 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
}
/**
- * amdgpu_irq_dispatch - dispatch irq to IP blocks
+ * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
*
* @adev: amdgpu device pointer
- * @entry: interrupt vector
+ * @entry: interrupt vector pointer
*
- * Dispatches the irq to the different IP blocks
+ * Dispatches IRQ to IP blocks.
*/
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
@@ -361,13 +399,13 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
}
/**
- * amdgpu_irq_update - update hw interrupt state
+ * amdgpu_irq_update - update hardware interrupt state
*
* @adev: amdgpu device pointer
- * @src: interrupt src you want to enable
- * @type: type of interrupt you want to update
+ * @src: interrupt source pointer
+ * @type: type of interrupt
*
- * Updates the interrupt state for a specific src (all asics).
+ * Updates interrupt state for the specific source (all ASICs).
*/
int amdgpu_irq_update(struct amdgpu_device *adev,
struct amdgpu_irq_src *src, unsigned type)
@@ -378,7 +416,7 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
spin_lock_irqsave(&adev->irq.lock, irqflags);
- /* we need to determine after taking the lock, otherwise
+ /* We need to determine after taking the lock, otherwise
we might disable just enabled interrupts again */
if (amdgpu_irq_enabled(adev, src, type))
state = AMDGPU_IRQ_STATE_ENABLE;
@@ -390,6 +428,14 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
return r;
}
+/**
+ * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Updates state of all types of interrupts on all sources on resume after
+ * reset.
+ */
void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{
int i, j, k;
@@ -413,10 +459,13 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
* amdgpu_irq_get - enable interrupt
*
* @adev: amdgpu device pointer
- * @src: interrupt src you want to enable
- * @type: type of interrupt you want to enable
+ * @src: interrupt source pointer
+ * @type: type of interrupt
*
- * Enables the interrupt type for a specific src (all asics).
+ * Enables specified type of interrupt on the specified source (all ASICs).
+ *
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
@@ -440,10 +489,13 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
* amdgpu_irq_put - disable interrupt
*
* @adev: amdgpu device pointer
- * @src: interrupt src you want to disable
- * @type: type of interrupt you want to disable
+ * @src: interrupt source pointer
+ * @type: type of interrupt
+ *
+ * Enables specified type of interrupt on the specified source (all ASICs).
*
- * Disables the interrupt type for a specific src (all asics).
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
@@ -464,12 +516,17 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
}
/**
- * amdgpu_irq_enabled - test if irq is enabled or not
+ * amdgpu_irq_enabled - check whether interrupt is enabled or not
*
* @adev: amdgpu device pointer
- * @idx: interrupt src you want to test
+ * @src: interrupt source pointer
+ * @type: type of interrupt
*
- * Tests if the given interrupt source is enabled or not
+ * Checks whether the given type of interrupt is enabled on the given source.
+ *
+ * Returns:
+ * *true* if interrupt is enabled, *false* if interrupt is disabled or on
+ * invalid parameters
*/
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
@@ -486,7 +543,7 @@ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
return !!atomic_read(&src->enabled_types[type]);
}
-/* gen irq */
+/* XXX: Generic IRQ handling */
static void amdgpu_irq_mask(struct irq_data *irqd)
{
/* XXX */
@@ -497,12 +554,26 @@ static void amdgpu_irq_unmask(struct irq_data *irqd)
/* XXX */
}
+/* amdgpu hardware interrupt chip descriptor */
static struct irq_chip amdgpu_irq_chip = {
.name = "amdgpu-ih",
.irq_mask = amdgpu_irq_mask,
.irq_unmask = amdgpu_irq_unmask,
};
+/**
+ * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
+ *
+ * @d: amdgpu IRQ domain pointer (unused)
+ * @irq: virtual IRQ number
+ * @hwirq: hardware irq number
+ *
+ * Current implementation assigns simple interrupt handler to the given virtual
+ * IRQ.
+ *
+ * Returns:
+ * 0 on success or error code otherwise
+ */
static int amdgpu_irqdomain_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hwirq)
{
@@ -514,17 +585,21 @@ static int amdgpu_irqdomain_map(struct irq_domain *d,
return 0;
}
+/* Implementation of methods for amdgpu IRQ domain */
static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
.map = amdgpu_irqdomain_map,
};
/**
- * amdgpu_irq_add_domain - create a linear irq domain
+ * amdgpu_irq_add_domain - create a linear IRQ domain
*
* @adev: amdgpu device pointer
*
- * Create an irq domain for GPU interrupt sources
+ * Creates an IRQ domain for GPU interrupt sources
* that may be driven by another driver (e.g., ACP).
+ *
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_add_domain(struct amdgpu_device *adev)
{
@@ -539,11 +614,11 @@ int amdgpu_irq_add_domain(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_remove_domain - remove the irq domain
+ * amdgpu_irq_remove_domain - remove the IRQ domain
*
* @adev: amdgpu device pointer
*
- * Remove the irq domain for GPU interrupt sources
+ * Removes the IRQ domain for GPU interrupt sources
* that may be driven by another driver (e.g., ACP).
*/
void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
@@ -555,16 +630,17 @@ void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_create_mapping - create a mapping between a domain irq and a
- * Linux irq
+ * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
*
* @adev: amdgpu device pointer
* @src_id: IH source id
*
- * Create a mapping between a domain irq (GPU IH src id) and a Linux irq
+ * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
* Use this for components that generate a GPU interrupt, but are driven
* by a different driver (e.g., ACP).
- * Returns the Linux irq.
+ *
+ * Returns:
+ * Linux IRQ
*/
unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 2bd56760c744..391e2f7c03aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -30,14 +30,14 @@
static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
- DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
- job->base.sched->name,
- atomic_read(&job->ring->fence_drv.last_seq),
- job->ring->fence_drv.sync_seq);
+ DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
+ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+ ring->fence_drv.sync_seq);
- amdgpu_device_gpu_recover(job->adev, job, false);
+ amdgpu_device_gpu_recover(ring->adev, job, false);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -54,7 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
if (!*job)
return -ENOMEM;
- (*job)->adev = adev;
+ /*
+ * Initialize the scheduler to at least some ring so that we always
+ * have a pointer to adev.
+ */
+ (*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
@@ -86,6 +90,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
+ struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct dma_fence *f;
unsigned i;
@@ -93,14 +98,15 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_ib_free(job->adev, &job->ibs[i], f);
+ amdgpu_ib_free(ring->adev, &job->ibs[i], f);
}
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
- amdgpu_ring_priority_put(job->ring, s_job->s_priority);
+ amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
@@ -117,50 +123,68 @@ void amdgpu_job_free(struct amdgpu_job *job)
kfree(job);
}
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct drm_sched_entity *entity, void *owner,
- struct dma_fence **f)
+int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ void *owner, struct dma_fence **f)
{
+ enum drm_sched_priority priority;
+ struct amdgpu_ring *ring;
int r;
- job->ring = ring;
if (!f)
return -EINVAL;
- r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
+ r = drm_sched_job_init(&job->base, entity, owner);
if (r)
return r;
job->owner = owner;
- job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+ priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
+ ring = to_amdgpu_ring(entity->rq->sched);
+ amdgpu_ring_priority_get(ring, priority);
+
+ return 0;
+}
+
+int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ struct dma_fence **fence)
+{
+ int r;
+
+ job->base.sched = &ring->sched;
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
+ job->fence = dma_fence_get(*fence);
+ if (r)
+ return r;
+
+ amdgpu_job_free(job);
return 0;
}
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity)
{
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
+ struct dma_fence *fence;
bool explicit = false;
int r;
- struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
+ fence = amdgpu_sync_get_fence(&job->sync, &explicit);
if (fence && explicit) {
if (drm_sched_dependency_optimized(fence, s_entity)) {
- r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
+ r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
+ fence, false);
if (r)
- DRM_ERROR("Error adding fence to sync (%d)\n", r);
+ DRM_ERROR("Error adding fence (%d)\n", r);
}
}
while (fence == NULL && vm && !job->vmid) {
- struct amdgpu_ring *ring = job->ring;
-
r = amdgpu_vmid_grab(vm, ring, &job->sync,
&job->base.s_fence->finished,
job);
@@ -175,30 +199,25 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
+ struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
struct dma_fence *fence = NULL, *finished;
- struct amdgpu_device *adev;
struct amdgpu_job *job;
int r;
- if (!sched_job) {
- DRM_ERROR("job is null\n");
- return NULL;
- }
job = to_amdgpu_job(sched_job);
finished = &job->base.s_fence->finished;
- adev = job->adev;
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
- if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+ if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
if (finished->error < 0) {
DRM_INFO("Skip scheduling IBs!\n");
} else {
- r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
&fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
new file mode 100644
index 000000000000..57cfe78a262b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_JOB_H__
+#define __AMDGPU_JOB_H__
+
+/* bit set means command submit involves a preamble IB */
+#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
+/* bit set means preamble IB is first presented in belonging context */
+#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
+/* bit set means context switch occured */
+#define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
+
+#define to_amdgpu_job(sched_job) \
+ container_of((sched_job), struct amdgpu_job, base)
+
+struct amdgpu_fence;
+
+struct amdgpu_job {
+ struct drm_sched_job base;
+ struct amdgpu_vm *vm;
+ struct amdgpu_sync sync;
+ struct amdgpu_sync sched_sync;
+ struct amdgpu_ib *ibs;
+ struct dma_fence *fence; /* the hw fence */
+ uint32_t preamble_status;
+ uint32_t num_ibs;
+ void *owner;
+ bool vm_needs_flush;
+ uint64_t vm_pd_addr;
+ unsigned vmid;
+ unsigned pasid;
+ uint32_t gds_base, gds_size;
+ uint32_t gws_base, gws_size;
+ uint32_t oa_base, oa_size;
+ uint32_t vram_lost_counter;
+
+ /* user fence handling */
+ uint64_t uf_addr;
+ uint64_t uf_sequence;
+
+};
+
+int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+ struct amdgpu_job **job, struct amdgpu_vm *vm);
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+ struct amdgpu_job **job);
+
+void amdgpu_job_free_resources(struct amdgpu_job *job);
+void amdgpu_job_free(struct amdgpu_job *job);
+int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ void *owner, struct dma_fence **f);
+int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ struct dma_fence **fence);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 91517b166a3b..bd98cc5fb97b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -328,61 +328,71 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ring_mask |= adev->gfx.gfx_ring[i].ready << i;
+ ib_start_alignment = 32;
+ ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ring_mask |= adev->gfx.compute_ring[i].ready << i;
+ ib_start_alignment = 32;
+ ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 1;
+ ring_mask |= adev->sdma.instance[i].ring.ready << i;
+ ib_start_alignment = 256;
+ ib_size_alignment = 4;
break;
case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD;
- for (i = 0; i < adev->uvd.num_uvd_inst; i++)
- ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 16;
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
+ ring_mask |= adev->uvd.inst[i].ring.ready;
+ }
+ ib_start_alignment = 64;
+ ib_size_alignment = 64;
break;
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ring_mask |= adev->vce.ring[i].ready << i;
+ ib_start_alignment = 4;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD;
- for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- ring_mask |=
- ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
- (j + i * adev->uvd.num_enc_rings));
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 1;
+ ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
+ }
+ ib_start_alignment = 64;
+ ib_size_alignment = 64;
break;
case AMDGPU_HW_IP_VCN_DEC:
type = AMD_IP_BLOCK_TYPE_VCN;
- ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ring_mask = adev->vcn.ring_dec.ready;
+ ib_start_alignment = 16;
ib_size_alignment = 16;
break;
case AMDGPU_HW_IP_VCN_ENC:
type = AMD_IP_BLOCK_TYPE_VCN;
for (i = 0; i < adev->vcn.num_enc_rings; i++)
- ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ring_mask |= adev->vcn.ring_enc[i].ready << i;
+ ib_start_alignment = 64;
ib_size_alignment = 1;
break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+ ring_mask = adev->vcn.ring_jpeg.ready;
+ ib_start_alignment = 16;
+ ib_size_alignment = 16;
+ break;
default:
return -EINVAL;
}
@@ -427,6 +437,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break;
case AMDGPU_HW_IP_VCN_DEC:
case AMDGPU_HW_IP_VCN_ENC:
+ case AMDGPU_HW_IP_VCN_JPEG:
type = AMD_IP_BLOCK_TYPE_VCN;
break;
default:
@@ -494,13 +505,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_VRAM_GTT: {
struct drm_amdgpu_info_vram_gtt vram_gtt;
- vram_gtt.vram_size = adev->gmc.real_vram_size;
- vram_gtt.vram_size -= adev->vram_pin_size;
- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
- vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
+ vram_gtt.vram_size = adev->gmc.real_vram_size -
+ atomic64_read(&adev->vram_pin_size);
+ vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size);
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
vram_gtt.gtt_size *= PAGE_SIZE;
- vram_gtt.gtt_size -= adev->gart_pin_size;
+ vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
@@ -509,17 +520,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size;
- mem.vram.usable_heap_size =
- adev->gmc.real_vram_size - adev->vram_pin_size;
+ mem.vram.usable_heap_size = adev->gmc.real_vram_size -
+ atomic64_read(&adev->vram_pin_size);
mem.vram.heap_usage =
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
adev->gmc.visible_vram_size;
- mem.cpu_accessible_vram.usable_heap_size =
- adev->gmc.visible_vram_size -
- (adev->vram_pin_size - adev->invisible_pin_size);
+ mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size);
mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.cpu_accessible_vram.max_allocation =
@@ -527,8 +537,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
mem.gtt.total_heap_size *= PAGE_SIZE;
- mem.gtt.usable_heap_size = mem.gtt.total_heap_size
- - adev->gart_pin_size;
+ mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
+ atomic64_read(&adev->gart_pin_size);
mem.gtt.heap_usage =
amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
@@ -930,7 +940,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
return;
pm_runtime_get_sync(dev->dev);
- amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
if (adev->asic_type != CHIP_RAVEN) {
amdgpu_uvd_free_handles(adev, file_priv);
@@ -958,7 +967,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_bo_unref(&pd);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
- amdgpu_bo_list_free(list);
+ amdgpu_bo_list_put(list);
idr_destroy(&fpriv->bo_list_handles);
mutex_destroy(&fpriv->bo_list_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 83e344fbb50a..e55508b39496 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -28,6 +28,21 @@
* Christian König <christian.koenig@amd.com>
*/
+/**
+ * DOC: MMU Notifier
+ *
+ * For coherent userptr handling registers an MMU notifier to inform the driver
+ * about updates on the page tables of a process.
+ *
+ * When somebody tries to invalidate the page tables we block the update until
+ * all operations on the pages in question are completed, then those pages are
+ * marked as accessed and also dirty if it wasn't a read only access.
+ *
+ * New command submissions using the userptrs in question are delayed until all
+ * page table invalidation are completed and we once more see a coherent process
+ * address space.
+ */
+
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/mmu_notifier.h>
@@ -38,6 +53,22 @@
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
+/**
+ * struct amdgpu_mn
+ *
+ * @adev: amdgpu device pointer
+ * @mm: process address space
+ * @mn: MMU notifier structure
+ * @type: type of MMU notifier
+ * @work: destruction work item
+ * @node: hash table node to find structure by adev and mn
+ * @lock: rw semaphore protecting the notifier nodes
+ * @objects: interval tree containing amdgpu_mn_nodes
+ * @read_lock: mutex for recursive locking of @lock
+ * @recursion: depth of recursion
+ *
+ * Data for each amdgpu device and process address space.
+ */
struct amdgpu_mn {
/* constant after initialisation */
struct amdgpu_device *adev;
@@ -58,13 +89,21 @@ struct amdgpu_mn {
atomic_t recursion;
};
+/**
+ * struct amdgpu_mn_node
+ *
+ * @it: interval node defining start-last of the affected address range
+ * @bos: list of all BOs in the affected address range
+ *
+ * Manages all BOs which are affected of a certain range of address space.
+ */
struct amdgpu_mn_node {
struct interval_tree_node it;
struct list_head bos;
};
/**
- * amdgpu_mn_destroy - destroy the rmn
+ * amdgpu_mn_destroy - destroy the MMU notifier
*
* @work: previously sheduled work item
*
@@ -72,47 +111,50 @@ struct amdgpu_mn_node {
*/
static void amdgpu_mn_destroy(struct work_struct *work)
{
- struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
- struct amdgpu_device *adev = rmn->adev;
+ struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
+ struct amdgpu_device *adev = amn->adev;
struct amdgpu_mn_node *node, *next_node;
struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock);
- down_write(&rmn->lock);
- hash_del(&rmn->node);
+ down_write(&amn->lock);
+ hash_del(&amn->node);
rbtree_postorder_for_each_entry_safe(node, next_node,
- &rmn->objects.rb_root, it.rb) {
+ &amn->objects.rb_root, it.rb) {
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
bo->mn = NULL;
list_del_init(&bo->mn_list);
}
kfree(node);
}
- up_write(&rmn->lock);
+ up_write(&amn->lock);
mutex_unlock(&adev->mn_lock);
- mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
- kfree(rmn);
+ mmu_notifier_unregister_no_release(&amn->mn, amn->mm);
+ kfree(amn);
}
/**
* amdgpu_mn_release - callback to notify about mm destruction
*
* @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
*
* Shedule a work item to lazy destroy our notifier.
*/
static void amdgpu_mn_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
- INIT_WORK(&rmn->work, amdgpu_mn_destroy);
- schedule_work(&rmn->work);
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+
+ INIT_WORK(&amn->work, amdgpu_mn_destroy);
+ schedule_work(&amn->work);
}
/**
- * amdgpu_mn_lock - take the write side lock for this mn
+ * amdgpu_mn_lock - take the write side lock for this notifier
+ *
+ * @mn: our notifier
*/
void amdgpu_mn_lock(struct amdgpu_mn *mn)
{
@@ -121,7 +163,9 @@ void amdgpu_mn_lock(struct amdgpu_mn *mn)
}
/**
- * amdgpu_mn_unlock - drop the write side lock for this mn
+ * amdgpu_mn_unlock - drop the write side lock for this notifier
+ *
+ * @mn: our notifier
*/
void amdgpu_mn_unlock(struct amdgpu_mn *mn)
{
@@ -130,40 +174,44 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
}
/**
- * amdgpu_mn_read_lock - take the rmn read lock
+ * amdgpu_mn_read_lock - take the read side lock for this notifier
*
- * @rmn: our notifier
- *
- * Take the rmn read side lock.
+ * @amn: our notifier
*/
-static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
+static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
{
- mutex_lock(&rmn->read_lock);
- if (atomic_inc_return(&rmn->recursion) == 1)
- down_read_non_owner(&rmn->lock);
- mutex_unlock(&rmn->read_lock);
+ if (blockable)
+ mutex_lock(&amn->read_lock);
+ else if (!mutex_trylock(&amn->read_lock))
+ return -EAGAIN;
+
+ if (atomic_inc_return(&amn->recursion) == 1)
+ down_read_non_owner(&amn->lock);
+ mutex_unlock(&amn->read_lock);
+
+ return 0;
}
/**
- * amdgpu_mn_read_unlock - drop the rmn read lock
+ * amdgpu_mn_read_unlock - drop the read side lock for this notifier
*
- * @rmn: our notifier
- *
- * Drop the rmn read side lock.
+ * @amn: our notifier
*/
-static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
+static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
{
- if (atomic_dec_return(&rmn->recursion) == 0)
- up_read_non_owner(&rmn->lock);
+ if (atomic_dec_return(&amn->recursion) == 0)
+ up_read_non_owner(&amn->lock);
}
/**
* amdgpu_mn_invalidate_node - unmap all BOs of a node
*
* @node: the node with the BOs to unmap
+ * @start: start of address range affected
+ * @end: end of address range affected
*
- * We block for all BOs and unmap them by move them
- * into system domain again.
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
*/
static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
unsigned long start,
@@ -190,42 +238,54 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
* amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
*
* @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
- * We block for all BOs between start and end to be idle and
- * unmap them by move them into system domain again.
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
*/
-static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
+static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */
end -= 1;
- amdgpu_mn_read_lock(rmn);
+ /* TODO we should be able to split locking for interval tree and
+ * amdgpu_mn_invalidate_node
+ */
+ if (amdgpu_mn_read_lock(amn, blockable))
+ return -EAGAIN;
- it = interval_tree_iter_first(&rmn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
+ if (!blockable) {
+ amdgpu_mn_read_unlock(amn);
+ return -EAGAIN;
+ }
+
node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end);
amdgpu_mn_invalidate_node(node, start, end);
}
+
+ return 0;
}
/**
* amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change
*
* @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
@@ -233,24 +293,31 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
* necessitates evicting all user-mode queues of the process. The BOs
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
*/
-static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
+static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */
end -= 1;
- amdgpu_mn_read_lock(rmn);
+ if (amdgpu_mn_read_lock(amn, blockable))
+ return -EAGAIN;
- it = interval_tree_iter_first(&rmn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
+ if (!blockable) {
+ amdgpu_mn_read_unlock(amn);
+ return -EAGAIN;
+ }
+
node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end);
@@ -262,13 +329,15 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
amdgpu_amdkfd_evict_userptr(mem, mm);
}
}
+
+ return 0;
}
/**
* amdgpu_mn_invalidate_range_end - callback to notify about mm change
*
* @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
@@ -279,9 +348,9 @@ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
- amdgpu_mn_read_unlock(rmn);
+ amdgpu_mn_read_unlock(amn);
}
static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
@@ -315,7 +384,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
enum amdgpu_mn_type type)
{
struct mm_struct *mm = current->mm;
- struct amdgpu_mn *rmn;
+ struct amdgpu_mn *amn;
unsigned long key = AMDGPU_MN_KEY(mm, type);
int r;
@@ -325,41 +394,41 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
return ERR_PTR(-EINTR);
}
- hash_for_each_possible(adev->mn_hash, rmn, node, key)
- if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key)
+ hash_for_each_possible(adev->mn_hash, amn, node, key)
+ if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
goto release_locks;
- rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
- if (!rmn) {
- rmn = ERR_PTR(-ENOMEM);
+ amn = kzalloc(sizeof(*amn), GFP_KERNEL);
+ if (!amn) {
+ amn = ERR_PTR(-ENOMEM);
goto release_locks;
}
- rmn->adev = adev;
- rmn->mm = mm;
- init_rwsem(&rmn->lock);
- rmn->type = type;
- rmn->mn.ops = &amdgpu_mn_ops[type];
- rmn->objects = RB_ROOT_CACHED;
- mutex_init(&rmn->read_lock);
- atomic_set(&rmn->recursion, 0);
+ amn->adev = adev;
+ amn->mm = mm;
+ init_rwsem(&amn->lock);
+ amn->type = type;
+ amn->mn.ops = &amdgpu_mn_ops[type];
+ amn->objects = RB_ROOT_CACHED;
+ mutex_init(&amn->read_lock);
+ atomic_set(&amn->recursion, 0);
- r = __mmu_notifier_register(&rmn->mn, mm);
+ r = __mmu_notifier_register(&amn->mn, mm);
if (r)
- goto free_rmn;
+ goto free_amn;
- hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type));
+ hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
release_locks:
up_write(&mm->mmap_sem);
mutex_unlock(&adev->mn_lock);
- return rmn;
+ return amn;
-free_rmn:
+free_amn:
up_write(&mm->mmap_sem);
mutex_unlock(&adev->mn_lock);
- kfree(rmn);
+ kfree(amn);
return ERR_PTR(r);
}
@@ -379,14 +448,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
enum amdgpu_mn_type type =
bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
- struct amdgpu_mn *rmn;
+ struct amdgpu_mn *amn;
struct amdgpu_mn_node *node = NULL, *new_node;
struct list_head bos;
struct interval_tree_node *it;
- rmn = amdgpu_mn_get(adev, type);
- if (IS_ERR(rmn))
- return PTR_ERR(rmn);
+ amn = amdgpu_mn_get(adev, type);
+ if (IS_ERR(amn))
+ return PTR_ERR(amn);
new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node)
@@ -394,12 +463,12 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos);
- down_write(&rmn->lock);
+ down_write(&amn->lock);
- while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
+ while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
kfree(node);
node = container_of(it, struct amdgpu_mn_node, it);
- interval_tree_remove(&node->it, &rmn->objects);
+ interval_tree_remove(&node->it, &amn->objects);
addr = min(it->start, addr);
end = max(it->last, end);
list_splice(&node->bos, &bos);
@@ -410,7 +479,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
else
kfree(new_node);
- bo->mn = rmn;
+ bo->mn = amn;
node->it.start = addr;
node->it.last = end;
@@ -418,9 +487,9 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
list_splice(&bos, &node->bos);
list_add(&bo->mn_list, &node->bos);
- interval_tree_insert(&node->it, &rmn->objects);
+ interval_tree_insert(&node->it, &amn->objects);
- up_write(&rmn->lock);
+ up_write(&amn->lock);
return 0;
}
@@ -435,18 +504,18 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_mn *rmn;
+ struct amdgpu_mn *amn;
struct list_head *head;
mutex_lock(&adev->mn_lock);
- rmn = bo->mn;
- if (rmn == NULL) {
+ amn = bo->mn;
+ if (amn == NULL) {
mutex_unlock(&adev->mn_lock);
return;
}
- down_write(&rmn->lock);
+ down_write(&amn->lock);
/* save the next list entry for later */
head = bo->mn_list.next;
@@ -456,12 +525,13 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
if (list_empty(head)) {
struct amdgpu_mn_node *node;
+
node = container_of(head, struct amdgpu_mn_node, bos);
- interval_tree_remove(&node->it, &rmn->objects);
+ interval_tree_remove(&node->it, &amn->objects);
kfree(node);
}
- up_write(&rmn->lock);
+ up_write(&amn->lock);
mutex_unlock(&adev->mn_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 3526efa8960e..b0e14a3d54ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -38,7 +38,20 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
-static bool amdgpu_need_backup(struct amdgpu_device *adev)
+/**
+ * DOC: amdgpu_object
+ *
+ * This defines the interfaces to operate on an &amdgpu_bo buffer object which
+ * represents memory used by driver (VRAM, system memory, etc.). The driver
+ * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
+ * to create/destroy/set buffer object which are then managed by the kernel TTM
+ * memory manager.
+ * The interfaces are also used internally by kernel clients, including gfx,
+ * uvd, etc. for kernel managed allocations used by the GPU.
+ *
+ */
+
+static bool amdgpu_bo_need_backup(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
@@ -50,11 +63,35 @@ static bool amdgpu_need_backup(struct amdgpu_device *adev)
return true;
}
-static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+/**
+ * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
+ *
+ * @bo: &amdgpu_bo buffer object
+ *
+ * This function is called when a BO stops being pinned, and updates the
+ * &amdgpu_device pin_size values accordingly.
+ */
+static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
+ atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
+ &adev->visible_pin_size);
+ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
+ }
+}
+
+static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ if (bo->pin_count > 0)
+ amdgpu_bo_subtract_pin_size(bo);
+
if (bo->kfd_bo)
amdgpu_amdkfd_unreserve_system_memory_limit(bo);
@@ -73,14 +110,32 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
+/**
+ * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
+ * @bo: buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * an &amdgpu_bo.
+ *
+ * Returns:
+ * true if the object belongs to &amdgpu_bo, false if not.
+ */
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
{
- if (bo->destroy == &amdgpu_ttm_bo_destroy)
+ if (bo->destroy == &amdgpu_bo_destroy)
return true;
return false;
}
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
+/**
+ * amdgpu_bo_placement_from_domain - set buffer's placement
+ * @abo: &amdgpu_bo buffer object whose placement is to be set
+ * @domain: requested domain
+ *
+ * Sets buffer's placement according to requested domain and the buffer's
+ * flags.
+ */
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct ttm_placement *placement = &abo->placement;
@@ -161,6 +216,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
+ BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
+
placement->num_placement = c;
placement->placement = places;
@@ -184,7 +241,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
*
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
*
- * Returns 0 on success, negative error code otherwise.
+ * Returns:
+ * 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align,
@@ -220,22 +278,33 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
goto error_free;
}
- r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
+ r = amdgpu_bo_pin(*bo_ptr, domain);
if (r) {
dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
goto error_unreserve;
}
+ r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
+ if (r) {
+ dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
+ goto error_unpin;
+ }
+
+ if (gpu_addr)
+ *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
+
if (cpu_addr) {
r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
- goto error_unreserve;
+ goto error_unpin;
}
}
return 0;
+error_unpin:
+ amdgpu_bo_unpin(*bo_ptr);
error_unreserve:
amdgpu_bo_unreserve(*bo_ptr);
@@ -261,7 +330,8 @@ error_free:
*
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
*
- * Returns 0 on success, negative error code otherwise.
+ * Returns:
+ * 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
@@ -285,6 +355,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
+ * @gpu_addr: pointer to where the BO's GPU memory space address was stored
+ * @cpu_addr: pointer to where the BO's CPU memory space address was stored
*
* unmaps and unpin a BO for kernel internal use.
*/
@@ -418,17 +490,17 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
#endif
bo->tbo.bdev = &adev->mman.bdev;
- amdgpu_ttm_placement_from_domain(bo, bp->domain);
+ amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
&bo->placement, page_align, &ctx, acc_size,
- NULL, bp->resv, &amdgpu_ttm_bo_destroy);
+ NULL, bp->resv, &amdgpu_bo_destroy);
if (unlikely(r != 0))
return r;
- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
@@ -498,6 +570,20 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r;
}
+/**
+ * amdgpu_bo_create - create an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @bo_ptr: pointer to the buffer object pointer
+ *
+ * Creates an &amdgpu_bo buffer object; and if requested, also creates a
+ * shadow object.
+ * Shadow object is used to backup the original buffer object, and is always
+ * in GTT.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_create(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr)
@@ -510,7 +596,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (r)
return r;
- if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
+ if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) {
if (!bp->resv)
WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
NULL));
@@ -527,6 +613,21 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
}
+/**
+ * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @ring: amdgpu_ring for the engine handling the buffer operations
+ * @bo: &amdgpu_bo buffer to be backed up
+ * @resv: reservation object with embedded fence
+ * @fence: dma_fence associated with the operation
+ * @direct: whether to submit the job directly
+ *
+ * Copies an &amdgpu_bo buffer object to its shadow object.
+ * Not used for now.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
@@ -559,6 +660,18 @@ err:
return r;
}
+/**
+ * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
+ * @bo: pointer to the buffer object
+ *
+ * Sets placement according to domain; and changes placement and caching
+ * policy of the buffer object according to the placement.
+ * This is used for validating shadow bos. It calls ttm_bo_validate() to
+ * make sure the buffer is resident where it needs to be.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_validate(struct amdgpu_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
@@ -571,7 +684,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
domain = bo->preferred_domains;
retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
@@ -581,6 +694,22 @@ retry:
return r;
}
+/**
+ * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @ring: amdgpu_ring for the engine handling the buffer operations
+ * @bo: &amdgpu_bo buffer to be restored
+ * @resv: reservation object with embedded fence
+ * @fence: dma_fence associated with the operation
+ * @direct: whether to submit the job directly
+ *
+ * Copies a buffer object's shadow content back to the object.
+ * This is used for recovering a buffer from its shadow in case of a gpu
+ * reset where vram context may be lost.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
@@ -613,6 +742,17 @@ err:
return r;
}
+/**
+ * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be mapped
+ * @ptr: kernel virtual address to be returned
+ *
+ * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
+ * amdgpu_bo_kptr() to get the kernel virtual address.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
{
void *kptr;
@@ -643,6 +783,15 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
+/**
+ * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
+ *
+ * Returns:
+ * the virtual address of a buffer object area.
+ */
void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
{
bool is_iomem;
@@ -650,21 +799,42 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
}
+/**
+ * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be unmapped
+ *
+ * Unmaps a kernel map set up by amdgpu_bo_kmap().
+ */
void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
{
if (bo->kmap.bo)
ttm_bo_kunmap(&bo->kmap);
}
+/**
+ * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * References the contained &ttm_buffer_object.
+ *
+ * Returns:
+ * a refcounted pointer to the &amdgpu_bo buffer object.
+ */
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
{
if (bo == NULL)
return NULL;
- ttm_bo_reference(&bo->tbo);
+ ttm_bo_get(&bo->tbo);
return bo;
}
+/**
+ * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * Unreferences the contained &ttm_buffer_object and clear the pointer
+ */
void amdgpu_bo_unref(struct amdgpu_bo **bo)
{
struct ttm_buffer_object *tbo;
@@ -673,14 +843,34 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
return;
tbo = &((*bo)->tbo);
- ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
+ ttm_bo_put(tbo);
+ *bo = NULL;
}
+/**
+ * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be pinned
+ * @domain: domain to be pinned to
+ * @min_offset: the start of requested address range
+ * @max_offset: the end of requested address range
+ *
+ * Pins the buffer object according to requested domain and address range. If
+ * the memory is unbound gart memory, binds the pages into gart table. Adjusts
+ * pin_count and pin_size accordingly.
+ *
+ * Pinning means to lock pages in memory along with keeping them at a fixed
+ * offset. It is required when a buffer can not be moved, for example, when
+ * a display buffer is being scanned out.
+ *
+ * Compared with amdgpu_bo_pin(), this function gives more flexibility on
+ * where to pin a buffer if there are specific restrictions on where a buffer
+ * must be located.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset,
- u64 *gpu_addr)
+ u64 min_offset, u64 max_offset)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { false, false };
@@ -712,8 +902,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = amdgpu_bo_gpu_offset(bo);
if (max_offset != 0) {
u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
@@ -728,7 +916,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
unsigned fpfn, lpfn;
@@ -749,33 +937,48 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
goto error;
}
- r = amdgpu_ttm_alloc_gart(&bo->tbo);
- if (unlikely(r)) {
- dev_err(adev->dev, "%p bind failed\n", bo);
- goto error;
- }
-
bo->pin_count = 1;
- if (gpu_addr != NULL)
- *gpu_addr = amdgpu_bo_gpu_offset(bo);
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- adev->vram_pin_size += amdgpu_bo_size(bo);
- adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
+ atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
+ atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
+ &adev->visible_pin_size);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- adev->gart_pin_size += amdgpu_bo_size(bo);
+ atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
error:
return r;
}
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
+/**
+ * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be pinned
+ * @domain: domain to be pinned to
+ *
+ * A simple wrapper to amdgpu_bo_pin_restricted().
+ * Provides a simpler API for buffers that do not have any strict restrictions
+ * on where a buffer must be located.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
{
- return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
+ return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
}
+/**
+ * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be unpinned
+ *
+ * Decreases the pin_count, and clears the flags if pin_count reaches 0.
+ * Changes placement and pin size accordingly.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -790,12 +993,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->pin_count)
return 0;
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- adev->vram_pin_size -= amdgpu_bo_size(bo);
- adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- adev->gart_pin_size -= amdgpu_bo_size(bo);
- }
+ amdgpu_bo_subtract_pin_size(bo);
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
@@ -808,6 +1006,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
return r;
}
+/**
+ * amdgpu_bo_evict_vram - evict VRAM buffers
+ * @adev: amdgpu device object
+ *
+ * Evicts all VRAM buffers on the lru list of the memory type.
+ * Mainly used for evicting vram at suspend time.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
{
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
@@ -830,6 +1038,15 @@ static const char *amdgpu_vram_names[] = {
"DDR4",
};
+/**
+ * amdgpu_bo_init - initialize memory manager
+ * @adev: amdgpu device object
+ *
+ * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_init(struct amdgpu_device *adev)
{
/* reserve PAT memory space to WC for VRAM */
@@ -847,6 +1064,16 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
return amdgpu_ttm_init(adev);
}
+/**
+ * amdgpu_bo_late_init - late init
+ * @adev: amdgpu device object
+ *
+ * Calls amdgpu_ttm_late_init() to free resources used earlier during
+ * initialization.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_late_init(struct amdgpu_device *adev)
{
amdgpu_ttm_late_init(adev);
@@ -854,6 +1081,12 @@ int amdgpu_bo_late_init(struct amdgpu_device *adev)
return 0;
}
+/**
+ * amdgpu_bo_fini - tear down memory manager
+ * @adev: amdgpu device object
+ *
+ * Reverses amdgpu_bo_init() to tear down memory manager.
+ */
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
@@ -861,12 +1094,33 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
}
+/**
+ * amdgpu_bo_fbdev_mmap - mmap fbdev memory
+ * @bo: &amdgpu_bo buffer object
+ * @vma: vma as input from the fbdev mmap method
+ *
+ * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
struct vm_area_struct *vma)
{
return ttm_fbdev_mmap(vma, &bo->tbo);
}
+/**
+ * amdgpu_bo_set_tiling_flags - set tiling flags
+ * @bo: &amdgpu_bo buffer object
+ * @tiling_flags: new flags
+ *
+ * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
+ * kernel driver to set the tiling flags on a buffer.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -879,6 +1133,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
return 0;
}
+/**
+ * amdgpu_bo_get_tiling_flags - get tiling flags
+ * @bo: &amdgpu_bo buffer object
+ * @tiling_flags: returned flags
+ *
+ * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
+ * set the tiling flags on a buffer.
+ */
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
{
lockdep_assert_held(&bo->tbo.resv->lock.base);
@@ -887,6 +1149,19 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
*tiling_flags = bo->tiling_flags;
}
+/**
+ * amdgpu_bo_set_metadata - set metadata
+ * @bo: &amdgpu_bo buffer object
+ * @metadata: new metadata
+ * @metadata_size: size of the new metadata
+ * @flags: flags of the new metadata
+ *
+ * Sets buffer object's metadata, its size and flags.
+ * Used via GEM ioctl.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
uint32_t metadata_size, uint64_t flags)
{
@@ -916,6 +1191,21 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
return 0;
}
+/**
+ * amdgpu_bo_get_metadata - get metadata
+ * @bo: &amdgpu_bo buffer object
+ * @buffer: returned metadata
+ * @buffer_size: size of the buffer
+ * @metadata_size: size of the returned metadata
+ * @flags: flags of the returned metadata
+ *
+ * Gets buffer object's metadata, its size and flags. buffer_size shall not be
+ * less than metadata_size.
+ * Used via GEM ioctl.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags)
@@ -939,6 +1229,16 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
return 0;
}
+/**
+ * amdgpu_bo_move_notify - notification about a memory move
+ * @bo: pointer to a buffer object
+ * @evict: if this move is evicting the buffer from the graphics address space
+ * @new_mem: new information of the bufer object
+ *
+ * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
+ * bookkeeping.
+ * TTM driver callback which is called when ttm moves a buffer.
+ */
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_mem_reg *new_mem)
@@ -947,7 +1247,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
abo = ttm_to_amdgpu_bo(bo);
@@ -964,9 +1264,20 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
/* move_notify is called before move happens */
- trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
+ trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
+/**
+ * amdgpu_bo_fault_reserve_notify - notification about a memory fault
+ * @bo: pointer to a buffer object
+ *
+ * Notifies the driver we are taking a fault on this BO and have reserved it,
+ * also performs bookkeeping.
+ * TTM driver callback for dealing with vm faults.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
@@ -975,7 +1286,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
unsigned long offset, size;
int r;
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
return 0;
abo = ttm_to_amdgpu_bo(bo);
@@ -997,8 +1308,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
atomic64_inc(&adev->num_vram_cpu_page_faults);
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT);
/* Avoid costly evictions; only set GTT as a busy placement */
abo->placement.num_busy_placement = 1;
@@ -1040,10 +1351,11 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
- * Returns current GPU offset of the object.
- *
* Note: object should either be pinned or reserved when calling this
* function, it might be useful to add check for this for debugging.
+ *
+ * Returns:
+ * current GPU offset of the object.
*/
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
@@ -1059,6 +1371,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
return bo->tbo.offset;
}
+/**
+ * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
+ * @adev: amdgpu device object
+ * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
+ *
+ * Returns:
+ * Which of the allowed domains is preferred for pinning the BO for scanout.
+ */
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
uint32_t domain)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 731748033878..18945dd6982d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -32,6 +32,7 @@
#include "amdgpu.h"
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+#define AMDGPU_BO_MAX_PLACEMENTS 3
struct amdgpu_bo_param {
unsigned long size;
@@ -77,7 +78,7 @@ struct amdgpu_bo {
/* Protected by tbo.reserved */
u32 preferred_domains;
u32 allowed_domains;
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
@@ -234,6 +235,9 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
+
int amdgpu_bo_create(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr);
@@ -252,10 +256,9 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
void amdgpu_bo_unref(struct amdgpu_bo **bo);
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset,
- u64 *gpu_addr);
+ u64 min_offset, u64 max_offset);
int amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b455da487782..8f98629fbe59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -31,7 +31,7 @@
#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-
+#include <linux/nospec.h>
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
@@ -68,11 +68,11 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
if (power_supply_is_system_supplied() > 0)
- adev->pm.dpm.ac_power = true;
+ adev->pm.ac_power = true;
else
- adev->pm.dpm.ac_power = false;
+ adev->pm.ac_power = false;
if (adev->powerplay.pp_funcs->enable_bapm)
- amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
+ amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
mutex_unlock(&adev->pm.mutex);
}
}
@@ -80,12 +80,15 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
/**
* DOC: power_dpm_state
*
- * This is a legacy interface and is only provided for backwards compatibility.
- * The amdgpu driver provides a sysfs API for adjusting certain power
- * related parameters. The file power_dpm_state is used for this.
+ * The power_dpm_state file is a legacy interface and is only provided for
+ * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
+ * certain power related parameters. The file power_dpm_state is used for this.
* It accepts the following arguments:
+ *
* - battery
+ *
* - balanced
+ *
* - performance
*
* battery
@@ -169,14 +172,21 @@ fail:
* The amdgpu driver provides a sysfs API for adjusting certain power
* related parameters. The file power_dpm_force_performance_level is
* used for this. It accepts the following arguments:
+ *
* - auto
+ *
* - low
+ *
* - high
+ *
* - manual
- * - GPU fan
+ *
* - profile_standard
+ *
* - profile_min_sclk
+ *
* - profile_min_mclk
+ *
* - profile_peak
*
* auto
@@ -393,6 +403,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
count = -EINVAL;
goto fail;
}
+ idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
amdgpu_dpm_get_pp_num_states(adev, &data);
state = data.states[idx];
@@ -463,8 +474,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* this.
*
* Reading the file will display:
+ *
* - a list of engine clock levels and voltages labeled OD_SCLK
+ *
* - a list of memory clock levels and voltages labeled OD_MCLK
+ *
* - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
*
* To manually adjust these settings, first select manual using
@@ -593,40 +607,59 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
return snprintf(buf, PAGE_SIZE, "\n");
}
-static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+/*
+ * Worst case: 32 bits individually specified, in octal at 12 characters
+ * per line (+1 for \n).
+ */
+#define AMDGPU_MASK_BUF_MAX (32 * 13)
+
+static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
- uint32_t mask = 0;
char *sub_str = NULL;
char *tmp;
- char buf_cpy[count];
+ char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
const char delimiter[3] = {' ', '\n', '\0'};
+ size_t bytes;
- memcpy(buf_cpy, buf, count+1);
+ *mask = 0;
+
+ bytes = min(count, sizeof(buf_cpy) - 1);
+ memcpy(buf_cpy, buf, bytes);
+ buf_cpy[bytes] = '\0';
tmp = buf_cpy;
while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
+ sub_str = strsep(&tmp, delimiter);
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
-
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
+ if (ret)
+ return -EINVAL;
+ *mask |= 1 << level;
} else
break;
}
+
+ return 0;
+}
+
+static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ uint32_t mask = 0;
+
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
+
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
-fail:
return count;
}
@@ -651,32 +684,15 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int ret;
- long level;
uint32_t mask = 0;
- char *sub_str = NULL;
- char *tmp;
- char buf_cpy[count];
- const char delimiter[3] = {' ', '\n', '\0'};
- memcpy(buf_cpy, buf, count+1);
- tmp = buf_cpy;
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
- if (strlen(sub_str)) {
- ret = kstrtol(sub_str, 0, &level);
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
- } else
- break;
- }
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
-fail:
return count;
}
@@ -701,33 +717,15 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int ret;
- long level;
uint32_t mask = 0;
- char *sub_str = NULL;
- char *tmp;
- char buf_cpy[count];
- const char delimiter[3] = {' ', '\n', '\0'};
- memcpy(buf_cpy, buf, count+1);
- tmp = buf_cpy;
-
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
- if (strlen(sub_str)) {
- ret = kstrtol(sub_str, 0, &level);
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
- } else
- break;
- }
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
-fail:
return count;
}
@@ -905,6 +903,36 @@ fail:
return -EINVAL;
}
+/**
+ * DOC: busy_percent
+ *
+ * The amdgpu driver provides a sysfs API for reading how busy the GPU
+ * is as a percentage. The file gpu_busy_percent is used for this.
+ * The SMU firmware computes a percentage of load based on the
+ * aggregate activity level in the IP cores.
+ */
+static ssize_t amdgpu_get_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int r, value, size = sizeof(value);
+
+ /* sanity check PP is enabled */
+ if (!(adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor))
+ return -EINVAL;
+
+ /* read the IP busy sensor */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
+ (void *)&value, &size);
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
amdgpu_get_dpm_forced_performance_level,
@@ -938,6 +966,8 @@ static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
amdgpu_get_pp_od_clk_voltage,
amdgpu_set_pp_od_clk_voltage);
+static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
+ amdgpu_get_busy_percent, NULL);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -1156,7 +1186,7 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
int r, size = sizeof(vddnb);
/* only APUs have vddnb */
- if (adev->flags & AMD_IS_APU)
+ if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
/* Can't get voltage when the card is off */
@@ -1285,35 +1315,51 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
* DOC: hwmon
*
* The amdgpu driver exposes the following sensor interfaces:
+ *
* - GPU temperature (via the on-die sensor)
+ *
* - GPU voltage
+ *
* - Northbridge voltage (APUs only)
+ *
* - GPU power
+ *
* - GPU fan
*
* hwmon interfaces for GPU temperature:
+ *
* - temp1_input: the on die GPU temperature in millidegrees Celsius
+ *
* - temp1_crit: temperature critical max value in millidegrees Celsius
+ *
* - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
*
* hwmon interfaces for GPU voltage:
+ *
* - in0_input: the voltage on the GPU in millivolts
+ *
* - in1_input: the voltage on the Northbridge in millivolts
*
* hwmon interfaces for GPU power:
+ *
* - power1_average: average power used by the GPU in microWatts
+ *
* - power1_cap_min: minimum cap supported in microWatts
+ *
* - power1_cap_max: maximum cap supported in microWatts
+ *
* - power1_cap: selected power cap in microWatts
*
* hwmon interfaces for GPU fan:
+ *
* - pwm1: pulse width modulation fan level (0-255)
- * - pwm1_enable: pulse width modulation fan control method
- * 0: no fan speed control
- * 1: manual fan speed control using pwm interface
- * 2: automatic fan speed control
+ *
+ * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
+ *
* - pwm1_min: pulse width modulation fan control minimum level (0)
+ *
* - pwm1_max: pulse width modulation fan control maximum level (255)
+ *
* - fan1_input: fan speed in RPM
*
* You can use hwmon tools like sensors to view this information on your system.
@@ -1668,10 +1714,10 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
- if (adev->powerplay.pp_funcs->powergate_uvd) {
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* enable/disable UVD */
mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_powergate_uvd(adev, !enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
mutex_unlock(&adev->pm.mutex);
} else {
if (enable) {
@@ -1690,10 +1736,10 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- if (adev->powerplay.pp_funcs->powergate_vce) {
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* enable/disable VCE */
mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_powergate_vce(adev, !enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
mutex_unlock(&adev->pm.mutex);
} else {
if (enable) {
@@ -1825,6 +1871,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
"pp_od_clk_voltage\n");
return ret;
}
+ ret = device_create_file(adev->dev,
+ &dev_attr_gpu_busy_percent);
+ if (ret) {
+ DRM_ERROR("failed to create device file "
+ "gpu_busy_level\n");
+ return ret;
+ }
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1860,6 +1913,7 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
&dev_attr_pp_power_profile_mode);
device_remove_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
+ device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -1878,11 +1932,19 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
+ mutex_lock(&adev->pm.mutex);
+ /* update battery/ac status */
+ if (power_supply_is_system_supplied() > 0)
+ adev->pm.ac_power = true;
+ else
+ adev->pm.ac_power = false;
+ mutex_unlock(&adev->pm.mutex);
+
if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) {
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
+ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
@@ -1898,14 +1960,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
} else {
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_get_active_displays(adev);
- /* update battery/ac status */
- if (power_supply_is_system_supplied() > 0)
- adev->pm.dpm.ac_power = true;
- else
- adev->pm.dpm.ac_power = false;
-
amdgpu_dpm_change_power_state_locked(adev);
-
mutex_unlock(&adev->pm.mutex);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 4683626b065f..1c5d97f4b4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -23,6 +23,14 @@
*
* Authors: Alex Deucher
*/
+
+/**
+ * DOC: PRIME Buffer Sharing
+ *
+ * The following callback implementations are used for :ref:`sharing GEM buffer
+ * objects between different devices via PRIME <prime_buffer_sharing>`.
+ */
+
#include <drm/drmP.h>
#include "amdgpu.h"
@@ -32,6 +40,14 @@
static const struct dma_buf_ops amdgpu_dmabuf_ops;
+/**
+ * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
+ * implementation
+ * @obj: GEM buffer object
+ *
+ * Returns:
+ * A scatter/gather table for the pinned pages of the buffer object's memory.
+ */
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -40,6 +56,15 @@ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
}
+/**
+ * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
+ * @obj: GEM buffer object
+ *
+ * Sets up an in-kernel virtual mapping of the buffer object's memory.
+ *
+ * Returns:
+ * The virtual address of the mapping or an error pointer.
+ */
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -53,6 +78,13 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
return bo->dma_buf_vmap.virtual;
}
+/**
+ * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
+ * @obj: GEM buffer object
+ * @vaddr: virtual address (unused)
+ *
+ * Tears down the in-kernel virtual mapping of the buffer object's memory.
+ */
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -60,6 +92,17 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
ttm_bo_kunmap(&bo->dma_buf_vmap);
}
+/**
+ * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
+ * @obj: GEM buffer object
+ * @vma: virtual memory area
+ *
+ * Sets up a userspace mapping of the buffer object's memory in the given
+ * virtual memory area.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -94,6 +137,19 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma
return ret;
}
+/**
+ * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
+ * implementation
+ * @dev: DRM device
+ * @attach: DMA-buf attachment
+ * @sg: Scatter/gather table
+ *
+ * Import shared DMA buffer memory exported by another device.
+ *
+ * Returns:
+ * A new GEM buffer object of the given DRM device, representing the memory
+ * described by the given DMA-buf attachment and scatter/gather table.
+ */
struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
@@ -132,8 +188,19 @@ error:
return ERR_PTR(ret);
}
+/**
+ * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
+ * @dma_buf: shared DMA buffer
+ * @attach: DMA-buf attachment
+ *
+ * Makes sure that the shared DMA buffer can be accessed by the target device.
+ * For now, simply pins it to the GTT domain, where it should be accessible by
+ * all DMA devices.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
- struct device *target_dev,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
@@ -141,7 +208,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
long r;
- r = drm_gem_map_attach(dma_buf, target_dev, attach);
+ r = drm_gem_map_attach(dma_buf, attach);
if (r)
return r;
@@ -165,7 +232,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
}
/* pin buffer into GTT */
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r)
goto error_unreserve;
@@ -181,6 +248,14 @@ error_detach:
return r;
}
+/**
+ * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
+ * @dma_buf: shared DMA buffer
+ * @attach: DMA-buf attachment
+ *
+ * This is called when a shared DMA buffer no longer needs to be accessible by
+ * the other device. For now, simply unpins the buffer from GTT.
+ */
static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
@@ -202,6 +277,13 @@ error:
drm_gem_map_detach(dma_buf, attach);
}
+/**
+ * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
+ * @obj: GEM buffer object
+ *
+ * Returns:
+ * The buffer object's reservation object.
+ */
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -209,6 +291,18 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
return bo->tbo.resv;
}
+/**
+ * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
+ * @dma_buf: shared DMA buffer
+ * @direction: direction of DMA transfer
+ *
+ * This is called before CPU access to the shared DMA buffer's memory. If it's
+ * a read access, the buffer is moved to the GTT domain if possible, for optimal
+ * CPU read performance.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction direction)
{
@@ -229,7 +323,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
@@ -245,14 +339,24 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_gem_begin_cpu_access,
.map = drm_gem_dmabuf_kmap,
- .map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
- .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
};
+/**
+ * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
+ * @dev: DRM device
+ * @gobj: GEM buffer object
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
+ *
+ * The main work is done by the &drm_gem_prime_export helper, which in turn
+ * uses &amdgpu_gem_prime_res_obj.
+ *
+ * Returns:
+ * Shared DMA buffer representing the GEM buffer object from the given device.
+ */
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj,
int flags)
@@ -273,6 +377,17 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
return buf;
}
+/**
+ * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
+ * @dev: DRM device
+ * @dma_buf: Shared DMA buffer
+ *
+ * The main work is done by the &drm_gem_prime_import helper, which in turn
+ * uses &amdgpu_gem_prime_import_sg_table.
+ *
+ * Returns:
+ * GEM buffer object representing the shared DMA buffer for the given device.
+ */
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 9f1a5bd39ae8..5b39d1399630 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -131,6 +131,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
msleep(1);
}
+ if (ucode) {
+ ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
+ ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index 8af16e81c7d4..a172bba32b45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -66,8 +66,6 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
u32 ring,
struct amdgpu_ring **out_ring)
{
- u32 instance;
-
switch (mapper->hw_ip) {
case AMDGPU_HW_IP_GFX:
*out_ring = &adev->gfx.gfx_ring[ring];
@@ -79,16 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
*out_ring = &adev->sdma.instance[ring].ring;
break;
case AMDGPU_HW_IP_UVD:
- instance = ring;
- *out_ring = &adev->uvd.inst[instance].ring;
+ *out_ring = &adev->uvd.inst[0].ring;
break;
case AMDGPU_HW_IP_VCE:
*out_ring = &adev->vce.ring[ring];
break;
case AMDGPU_HW_IP_UVD_ENC:
- instance = ring / adev->uvd.num_enc_rings;
- *out_ring =
- &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
+ *out_ring = &adev->uvd.inst[0].ring_enc[ring];
break;
case AMDGPU_HW_IP_VCN_DEC:
*out_ring = &adev->vcn.ring_dec;
@@ -96,6 +91,9 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCN_ENC:
*out_ring = &adev->vcn.ring_enc[ring];
break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ *out_ring = &adev->vcn.ring_jpeg;
+ break;
default:
*out_ring = NULL;
DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
@@ -216,7 +214,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring)
{
- int r, ip_num_rings;
+ int i, r, ip_num_rings = 0;
struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
if (!adev || !mgr || !out_ring)
@@ -245,14 +243,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
ip_num_rings = adev->sdma.num_instances;
break;
case AMDGPU_HW_IP_UVD:
- ip_num_rings = adev->uvd.num_uvd_inst;
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (!(adev->uvd.harvest_config & (1 << i)))
+ ip_num_rings++;
+ }
break;
case AMDGPU_HW_IP_VCE:
ip_num_rings = adev->vce.num_rings;
break;
case AMDGPU_HW_IP_UVD_ENC:
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (!(adev->uvd.harvest_config & (1 << i)))
+ ip_num_rings++;
+ }
ip_num_rings =
- adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
+ adev->uvd.num_enc_rings * ip_num_rings;
break;
case AMDGPU_HW_IP_VCN_DEC:
ip_num_rings = 1;
@@ -260,6 +265,9 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCN_ENC:
ip_num_rings = adev->vcn.num_enc_rings;
break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ ip_num_rings = 1;
+ break;
default:
DRM_DEBUG("unknown ip type: %d\n", hw_ip);
return -EINVAL;
@@ -287,6 +295,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
case AMDGPU_HW_IP_UVD_ENC:
case AMDGPU_HW_IP_VCN_DEC:
case AMDGPU_HW_IP_VCN_ENC:
+ case AMDGPU_HW_IP_VCN_JPEG:
r = amdgpu_identity_map(adev, mapper, ring, out_ring);
break;
case AMDGPU_HW_IP_DMA:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index c6850b629d0e..93794a85f83d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -211,7 +211,8 @@ void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
if (!ring->funcs->set_priority)
return;
- atomic_inc(&ring->num_jobs[priority]);
+ if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
+ return;
mutex_lock(&ring->priority_mutex);
if (priority <= ring->priority)
@@ -304,7 +305,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
0xffffffffffffffff : ring->buf_mask;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 1513124c5659..d242b9a51e90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -44,6 +44,8 @@
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
+#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
+
enum amdgpu_ring_type {
AMDGPU_RING_TYPE_GFX,
AMDGPU_RING_TYPE_COMPUTE,
@@ -53,7 +55,8 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_KIQ,
AMDGPU_RING_TYPE_UVD_ENC,
AMDGPU_RING_TYPE_VCN_DEC,
- AMDGPU_RING_TYPE_VCN_ENC
+ AMDGPU_RING_TYPE_VCN_ENC,
+ AMDGPU_RING_TYPE_VCN_JPEG
};
struct amdgpu_device;
@@ -112,6 +115,7 @@ struct amdgpu_ring_funcs {
u32 nop;
bool support_64bit_ptrs;
unsigned vmhub;
+ unsigned extra_dw;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
@@ -119,6 +123,7 @@ struct amdgpu_ring_funcs {
void (*set_wptr)(struct amdgpu_ring *ring);
/* validating and patching of IBs */
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
/* constants to calculate how many DW are needed for an emit */
unsigned emit_frame_size;
unsigned emit_ib_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 86a0715d9431..1cafe8d83a4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -53,9 +53,8 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
enum drm_sched_priority priority)
{
- struct file *filp = fcheck(fd);
+ struct file *filp = fget(fd);
struct drm_file *file;
- struct pid *pid;
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
uint32_t id;
@@ -63,20 +62,12 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
if (!filp)
return -EINVAL;
- pid = get_pid(((struct drm_file *)filp->private_data)->pid);
+ file = filp->private_data;
+ fpriv = file->driver_priv;
+ idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
+ amdgpu_ctx_priority_override(ctx, priority);
- mutex_lock(&adev->ddev->filelist_mutex);
- list_for_each_entry(file, &adev->ddev->filelist, lhead) {
- if (file->pid != pid)
- continue;
-
- fpriv = file->driver_priv;
- idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
- amdgpu_ctx_priority_override(ctx, priority);
- }
- mutex_unlock(&adev->ddev->filelist_mutex);
-
- put_pid(pid);
+ fput(filp);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index e3878256743a..8904e62dca7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2009 VMware, Inc.
*
@@ -75,11 +76,12 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(vram_obj, false);
if (unlikely(r != 0))
goto out_unref;
- r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
+ r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM);
if (r) {
DRM_ERROR("Failed to pin VRAM object\n");
goto out_unres;
}
+ vram_addr = amdgpu_bo_gpu_offset(vram_obj);
for (i = 0; i < n; i++) {
void *gtt_map, *vram_map;
void **gart_start, **gart_end;
@@ -96,11 +98,17 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(gtt_obj[i], false);
if (unlikely(r != 0))
goto out_lclean_unref;
- r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr);
+ r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT);
if (r) {
DRM_ERROR("Failed to pin GTT object %d\n", i);
goto out_lclean_unres;
}
+ r = amdgpu_ttm_alloc_gart(&gtt_obj[i]->tbo);
+ if (r) {
+ DRM_ERROR("%p bind failed\n", gtt_obj[i]);
+ goto out_lclean_unpin;
+ }
+ gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]);
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index e96e26d3f3b0..7206a0025b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign(
__entry->bo_list = p->bo_list;
- __entry->ring = p->job->ring->idx;
+ __entry->ring = p->ring->idx;
__entry->dw = p->job->ibs[i].length_dw;
__entry->fences = amdgpu_fence_count_emitted(
- p->job->ring);
+ p->ring);
),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw,
@@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = job->ring->name;
+ __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = job->ring->name;
+ __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -314,6 +314,11 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
TP_ARGS(mapping)
);
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
+ TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+ TP_ARGS(mapping)
+);
+
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags),
@@ -436,7 +441,7 @@ TRACE_EVENT(amdgpu_cs_bo_status,
__entry->total_bo, __entry->total_size)
);
-TRACE_EVENT(amdgpu_ttm_bo_move,
+TRACE_EVENT(amdgpu_bo_move,
TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
TP_ARGS(bo, new_placement, old_placement),
TP_STRUCT__entry(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index e93a0a237dc3..fcf421263fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -92,11 +92,9 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
}
/**
- * amdgpu_ttm_global_init - Initialize global TTM memory reference
- * structures.
+ * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
*
- * @adev: AMDGPU device for which the global structures need to be
- * registered.
+ * @adev: AMDGPU device for which the global structures need to be registered.
*
* This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
* during bring up.
@@ -104,8 +102,6 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
- struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
int r;
/* ensure reference is false in case init fails */
@@ -138,21 +134,10 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
- ring = adev->mman.buffer_funcs_ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
- rq, NULL);
- if (r) {
- DRM_ERROR("Failed setting up TTM BO move run queue.\n");
- goto error_entity;
- }
-
adev->mman.mem_global_referenced = true;
return 0;
-error_entity:
- drm_global_item_unref(&adev->mman.bo_global_ref.ref);
error_bo:
drm_global_item_unref(&adev->mman.mem_global_ref);
error_mem:
@@ -162,8 +147,6 @@ error_mem:
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
{
if (adev->mman.mem_global_referenced) {
- drm_sched_entity_fini(adev->mman.entity.sched,
- &adev->mman.entity);
mutex_destroy(&adev->mman.gtt_window_lock);
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
drm_global_item_unref(&adev->mman.mem_global_ref);
@@ -177,13 +160,12 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
}
/**
- * amdgpu_init_mem_type - Initialize a memory manager for a specific
- * type of memory request.
+ * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
+ * memory request.
*
- * @bdev: The TTM BO device object (contains a reference to
- * amdgpu_device)
- * @type: The type of memory requested
- * @man:
+ * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
+ * @type: The type of memory requested
+ * @man: The memory type manager for each domain
*
* This is called by ttm_bo_init_mm() when a buffer object is being
* initialized.
@@ -263,7 +245,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
/* Object isn't an AMDGPU object so ignore */
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
+ if (!amdgpu_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
placement->busy_placement = &placements;
placement->num_placement = 1;
@@ -276,8 +258,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case TTM_PL_VRAM:
if (!adev->mman.buffer_funcs_enabled) {
/* Move to system memory */
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
- } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
amdgpu_bo_in_cpu_visible_vram(abo)) {
@@ -286,7 +268,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* BO will be evicted to GTT rather than causing other
* BOs to be evicted from VRAM
*/
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT);
abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
abo->placements[0].lpfn = 0;
@@ -294,12 +276,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo->placement.num_busy_placement = 1;
} else {
/* Move to GTT memory */
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
}
break;
case TTM_PL_TT:
default:
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
}
*placement = abo->placement;
}
@@ -307,8 +289,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
/**
* amdgpu_verify_access - Verify access for a mmap call
*
- * @bo: The buffer object to map
- * @filp: The file pointer from the process performing the mmap
+ * @bo: The buffer object to map
+ * @filp: The file pointer from the process performing the mmap
*
* This is called by ttm_bo_mmap() to verify whether a process
* has the right to mmap a BO to their process space.
@@ -333,11 +315,10 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
/**
* amdgpu_move_null - Register memory for a buffer object
*
- * @bo: The bo to assign the memory to
- * @new_mem: The memory to be assigned.
+ * @bo: The bo to assign the memory to
+ * @new_mem: The memory to be assigned.
*
- * Assign the memory from new_mem to the memory of the buffer object
- * bo.
+ * Assign the memory from new_mem to the memory of the buffer object bo.
*/
static void amdgpu_move_null(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
@@ -350,8 +331,12 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT
- * buffer.
+ * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
+ *
+ * @bo: The bo to assign the memory to.
+ * @mm_node: Memory manager node for drm allocator.
+ * @mem: The region where the bo resides.
+ *
*/
static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
struct drm_mm_node *mm_node,
@@ -367,10 +352,12 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_find_mm_node - Helper function finds the drm_mm_node
- * corresponding to @offset. It also modifies
- * the offset to be within the drm_mm_node
- * returned
+ * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
+ * @offset. It also modifies the offset to be within the drm_mm_node returned
+ *
+ * @mem: The region where the bo resides.
+ * @offset: The offset that drm_mm_node is used for finding.
+ *
*/
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
unsigned long *offset)
@@ -512,8 +499,8 @@ error:
/**
* amdgpu_move_blit - Copy an entire buffer to another buffer
*
- * This is a helper called by amdgpu_bo_move() and
- * amdgpu_move_vram_ram() to help move buffers to and from VRAM.
+ * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
+ * help move buffers to and from VRAM.
*/
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
@@ -595,7 +582,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
}
/* blit VRAM to GTT */
- r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -647,7 +634,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
}
/* copy to VRAM */
- r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -809,8 +796,8 @@ struct amdgpu_ttm_tt {
};
/**
- * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to
- * by a USERPTR pointer to memory
+ * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR
+ * pointer to memory
*
* Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
* This provides a wrapper around the get_user_pages() call to provide
@@ -833,8 +820,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
down_read(&mm->mmap_sem);
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
- /* check that we only use anonymous memory
- to prevent problems with writeback */
+ /*
+ * check that we only use anonymous memory to prevent problems
+ * with writeback
+ */
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
struct vm_area_struct *vma;
@@ -885,10 +874,9 @@ release_pages:
}
/**
- * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages
- * as necessary.
+ * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
*
- * Called by amdgpu_cs_list_validate(). This creates the page list
+ * Called by amdgpu_cs_list_validate(). This creates the page list
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
@@ -930,8 +918,7 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
}
/**
- * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the
- * user pages
+ * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
*
* Called by amdgpu_ttm_backend_bind()
**/
@@ -1310,8 +1297,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
/**
- * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt
- * for the current task
+ * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
+ * task
*
* @ttm: The ttm_tt object to bind this userptr object to
* @addr: The address in the current tasks VM space to use
@@ -1361,9 +1348,8 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
}
/**
- * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays
- * inside an address range for the
- * current task.
+ * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
+ * address range for the current task.
*
*/
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
@@ -1401,8 +1387,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
}
/**
- * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been
- * invalidated?
+ * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated?
*/
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated)
@@ -1415,10 +1400,8 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
}
/**
- * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this
- * ttm_tt object been invalidated
- * since the last time they've
- * been set?
+ * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
+ * been invalidated since the last time they've been set?
*/
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
{
@@ -1474,13 +1457,12 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
}
/**
- * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict
- * a buffer object.
+ * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
+ * object.
*
- * Return true if eviction is sensible. Called by
- * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space()
- * which tries to evict buffer objects until it can find space
- * for a new object and by ttm_bo_force_list_clean() which is
+ * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
+ * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
+ * it can find space for a new object and by ttm_bo_force_list_clean() which is
* used to clean out a memory space.
*/
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
@@ -1530,8 +1512,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_ttm_access_memory - Read or Write memory that backs a
- * buffer object.
+ * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
*
* @bo: The buffer object to read/write
* @offset: Offset into buffer object
@@ -1695,7 +1676,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
AMDGPU_GEM_DOMAIN_VRAM,
adev->fw_vram_usage.start_offset,
(adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size), NULL);
+ adev->fw_vram_usage.size));
if (r)
goto error_pin;
r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
@@ -1719,8 +1700,8 @@ error_create:
return r;
}
/**
- * amdgpu_ttm_init - Init the memory management (ttm) as well as
- * various gtt/vram related fields.
+ * amdgpu_ttm_init - Init the memory management (ttm) as well as various
+ * gtt/vram related fields.
*
* This initializes all of the memory space pools that the TTM layer
* will need such as the GTT space (system memory mapped to the device),
@@ -1871,8 +1852,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_ttm_late_init - Handle any late initialization for
- * amdgpu_ttm
+ * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
*/
void amdgpu_ttm_late_init(struct amdgpu_device *adev)
{
@@ -1921,10 +1901,30 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
{
struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
uint64_t size;
+ int r;
- if (!adev->mman.initialized || adev->in_gpu_reset)
+ if (!adev->mman.initialized || adev->in_gpu_reset ||
+ adev->mman.buffer_funcs_enabled == enable)
return;
+ if (enable) {
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+
+ ring = adev->mman.buffer_funcs_ring;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
+ r);
+ return;
+ }
+ } else {
+ drm_sched_entity_destroy(&adev->mman.entity);
+ dma_fence_put(man->move);
+ man->move = NULL;
+ }
+
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
if (enable)
size = adev->gmc.real_vram_size;
@@ -2002,7 +2002,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
if (r)
goto error_free;
- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
if (r)
goto error_free;
@@ -2071,24 +2071,19 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- if (direct_submit) {
- r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
- NULL, fence);
- job->fence = dma_fence_get(*fence);
- if (r)
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ if (direct_submit)
+ r = amdgpu_job_submit_direct(job, ring, fence);
+ else
+ r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
- if (r)
- goto error_free;
- }
+ if (r)
+ goto error_free;
return r;
error_free:
amdgpu_job_free(job);
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
return r;
}
@@ -2171,7 +2166,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index e5da4654b630..8b3cc6687769 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -73,7 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
+u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 08e38579af24..bdc472b6e641 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -194,6 +194,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_SMC,
AMDGPU_UCODE_ID_UVD,
AMDGPU_UCODE_ID_VCE,
+ AMDGPU_UCODE_ID_VCN,
AMDGPU_UCODE_ID_MAXIMUM,
};
@@ -226,6 +227,9 @@ struct amdgpu_firmware_info {
void *kaddr;
/* ucode_size_bytes */
uint32_t ucode_size;
+ /* starting tmr mc address */
+ uint32_t tmr_mc_addr_lo;
+ uint32_t tmr_mc_addr_hi;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 3ff08e326838..e5a6db6beab7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -53,11 +53,11 @@
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
-#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
-#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
-#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
-#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
-#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
+#define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin"
+#define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin"
+#define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin"
+#define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin"
+#define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin"
#endif
#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
@@ -122,12 +122,10 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
- unsigned version_major, version_minor, family_id;
+ unsigned family_id;
int i, j, r;
INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
@@ -208,29 +206,46 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
- version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
- version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
- version_major, version_minor, family_id);
-
- /*
- * Limit the number of UVD handles depending on microcode major
- * and minor versions. The firmware version which has 40 UVD
- * instances support is 1.80. So all subsequent versions should
- * also have the same support.
- */
- if ((version_major > 0x01) ||
- ((version_major == 0x01) && (version_minor >= 0x50)))
- adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
- adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
- (family_id << 8));
+ if (adev->asic_type < CHIP_VEGA20) {
+ unsigned version_major, version_minor;
- if ((adev->asic_type == CHIP_POLARIS10 ||
- adev->asic_type == CHIP_POLARIS11) &&
- (adev->uvd.fw_version < FW_1_66_16))
- DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
- version_major, version_minor);
+ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+ version_major, version_minor, family_id);
+
+ /*
+ * Limit the number of UVD handles depending on microcode major
+ * and minor versions. The firmware version which has 40 UVD
+ * instances support is 1.80. So all subsequent versions should
+ * also have the same support.
+ */
+ if ((version_major > 0x01) ||
+ ((version_major == 0x01) && (version_minor >= 0x50)))
+ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+ (family_id << 8));
+
+ if ((adev->asic_type == CHIP_POLARIS10 ||
+ adev->asic_type == CHIP_POLARIS11) &&
+ (adev->uvd.fw_version < FW_1_66_16))
+ DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+ version_major, version_minor);
+ } else {
+ unsigned int enc_major, enc_minor, dec_minor;
+
+ dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
+ enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
+ DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
+ enc_major, enc_minor, dec_minor, family_id);
+
+ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+ adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
+ }
bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
@@ -238,7 +253,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
-
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
@@ -246,21 +262,13 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
}
+ }
- ring = &adev->uvd.inst[j].ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
- rq, NULL);
- if (r != 0) {
- DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
- return r;
- }
-
- for (i = 0; i < adev->uvd.max_handles; ++i) {
- atomic_set(&adev->uvd.inst[j].handles[i], 0);
- adev->uvd.inst[j].filp[i] = NULL;
- }
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+ atomic_set(&adev->uvd.handles[i], 0);
+ adev->uvd.filp[i] = NULL;
}
+
/* from uvd v5.0 HW addressing capacity increased to 64 bits */
if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
@@ -289,10 +297,12 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i, j;
- for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
- kfree(adev->uvd.inst[j].saved_bo);
+ drm_sched_entity_destroy(&adev->uvd.entity);
- drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
+ kvfree(adev->uvd.inst[j].saved_bo);
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr,
@@ -308,6 +318,29 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
return 0;
}
+/**
+ * amdgpu_uvd_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+ int r;
+
+ ring = &adev->uvd.inst[0].ring;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD kernel entity.\n");
+ return r;
+ }
+
+ return 0;
+}
+
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
unsigned size;
@@ -316,24 +349,26 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
cancel_delayed_work_sync(&adev->uvd.idle_work);
+ /* only valid for physical mode */
+ if (adev->asic_type < CHIP_POLARIS10) {
+ for (i = 0; i < adev->uvd.max_handles; ++i)
+ if (atomic_read(&adev->uvd.handles[i]))
+ break;
+
+ if (i == adev->uvd.max_handles)
+ return 0;
+ }
+
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
if (adev->uvd.inst[j].vcpu_bo == NULL)
continue;
- /* only valid for physical mode */
- if (adev->asic_type < CHIP_POLARIS10) {
- for (i = 0; i < adev->uvd.max_handles; ++i)
- if (atomic_read(&adev->uvd.inst[j].handles[i]))
- break;
-
- if (i == adev->uvd.max_handles)
- continue;
- }
-
size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
ptr = adev->uvd.inst[j].cpu_addr;
- adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
+ adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
@@ -349,6 +384,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
if (adev->uvd.inst[i].vcpu_bo == NULL)
return -EINVAL;
@@ -357,7 +394,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
if (adev->uvd.inst[i].saved_bo != NULL) {
memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
- kfree(adev->uvd.inst[i].saved_bo);
+ kvfree(adev->uvd.inst[i].saved_bo);
adev->uvd.inst[i].saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
@@ -381,30 +418,27 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
{
- struct amdgpu_ring *ring;
- int i, j, r;
-
- for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
- ring = &adev->uvd.inst[j].ring;
+ struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
+ int i, r;
- for (i = 0; i < adev->uvd.max_handles; ++i) {
- uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
- if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
- struct dma_fence *fence;
-
- r = amdgpu_uvd_get_destroy_msg(ring, handle,
- false, &fence);
- if (r) {
- DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
- continue;
- }
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+ uint32_t handle = atomic_read(&adev->uvd.handles[i]);
- dma_fence_wait(fence, false);
- dma_fence_put(fence);
+ if (handle != 0 && adev->uvd.filp[i] == filp) {
+ struct dma_fence *fence;
- adev->uvd.inst[j].filp[i] = NULL;
- atomic_set(&adev->uvd.inst[j].handles[i], 0);
+ r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
+ &fence);
+ if (r) {
+ DRM_ERROR("Error destroying UVD %d!\n", r);
+ continue;
}
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+ adev->uvd.filp[i] = NULL;
+ atomic_set(&adev->uvd.handles[i], 0);
}
}
}
@@ -459,7 +493,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
if (cmd == 0x0 || cmd == 0x3) {
/* yes, force it into VRAM */
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
}
amdgpu_uvd_force_into_uvd_segment(bo);
@@ -679,16 +713,15 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
void *ptr;
long r;
int i;
- uint32_t ip_instance = ctx->parser->job->ring->me;
if (offset & 0x3F) {
- DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
+ DRM_ERROR("UVD messages must be 64 byte aligned!\n");
return -EINVAL;
}
r = amdgpu_bo_kmap(bo, &ptr);
if (r) {
- DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
+ DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
return r;
}
@@ -698,7 +731,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
handle = msg[2];
if (handle == 0) {
- DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
+ DRM_ERROR("Invalid UVD handle!\n");
return -EINVAL;
}
@@ -709,18 +742,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* try to alloc a new handle */
for (i = 0; i < adev->uvd.max_handles; ++i) {
- if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
- DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
+ if (atomic_read(&adev->uvd.handles[i]) == handle) {
+ DRM_ERROR(")Handle 0x%x already in use!\n",
+ handle);
return -EINVAL;
}
- if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
- adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
+ if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
+ adev->uvd.filp[i] = ctx->parser->filp;
return 0;
}
}
- DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
+ DRM_ERROR("No more free UVD handles!\n");
return -ENOSPC;
case 1:
@@ -732,27 +766,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* validate the handle */
for (i = 0; i < adev->uvd.max_handles; ++i) {
- if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
- if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
- DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
+ if (atomic_read(&adev->uvd.handles[i]) == handle) {
+ if (adev->uvd.filp[i] != ctx->parser->filp) {
+ DRM_ERROR("UVD handle collision detected!\n");
return -EINVAL;
}
return 0;
}
}
- DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
+ DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
return -ENOENT;
case 2:
/* it's a destroy msg, free the handle */
for (i = 0; i < adev->uvd.max_handles; ++i)
- atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
+ atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
amdgpu_bo_kunmap(bo);
return 0;
default:
- DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
return -EINVAL;
}
BUG();
@@ -1000,7 +1034,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (!ring->adev->uvd.address_64_bit) {
struct ttm_operation_ctx ctx = { true, false };
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
@@ -1045,19 +1079,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r < 0)
goto err_free;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
-
- amdgpu_job_free(job);
} else {
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r)
goto err_free;
- r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
+ r = amdgpu_job_submit(job, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err_free;
@@ -1149,6 +1180,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
unsigned fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
@@ -1259,7 +1292,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
* necessarily linear. So we need to count
* all non-zero handles.
*/
- if (atomic_read(&adev->uvd.inst->handles[i]))
+ if (atomic_read(&adev->uvd.handles[i]))
used_handles++;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 8b23a1b00c76..a3ab1a41060f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -42,30 +42,34 @@ struct amdgpu_uvd_inst {
void *cpu_addr;
uint64_t gpu_addr;
void *saved_bo;
- atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
- struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq;
- struct drm_sched_entity entity;
- struct drm_sched_entity entity_enc;
uint32_t srbm_soft_reset;
};
+#define AMDGPU_UVD_HARVEST_UVD0 (1 << 0)
+#define AMDGPU_UVD_HARVEST_UVD1 (1 << 1)
+
struct amdgpu_uvd {
const struct firmware *fw; /* UVD firmware */
unsigned fw_version;
unsigned max_handles;
unsigned num_enc_rings;
- uint8_t num_uvd_inst;
+ uint8_t num_uvd_inst;
bool address_64_bit;
bool use_ctx_buf;
- struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
+ struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
+ struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
+ atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
+ struct drm_sched_entity entity;
struct delayed_work idle_work;
+ unsigned harvest_config;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 23d960ec1cf2..0cc5190f4f36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -40,11 +40,11 @@
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
-#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
-#define FIRMWARE_KABINI "radeon/kabini_vce.bin"
-#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
-#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
-#define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
+#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
+#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
+#define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
+#define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
+#define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
#endif
#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
@@ -90,8 +90,6 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
*/
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
{
- struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned ucode_version, version_major, version_minor, binary_id;
@@ -188,15 +186,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return r;
}
- ring = &adev->vce.ring[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
- rq, NULL);
- if (r != 0) {
- DRM_ERROR("Failed setting up VCE run queue.\n");
- return r;
- }
-
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
atomic_set(&adev->vce.handles[i], 0);
adev->vce.filp[i] = NULL;
@@ -222,7 +211,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
- drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
+ drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);
@@ -237,6 +226,29 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
}
/**
+ * amdgpu_vce_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_vce_entity_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+ int r;
+
+ ring = &adev->vce.ring[0];
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up VCE run queue.\n");
+ return r;
+ }
+
+ return 0;
+}
+
+/**
* amdgpu_vce_suspend - unpin VCE fw memory
*
* @adev: amdgpu_device pointer
@@ -470,12 +482,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -532,19 +542,13 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
- if (r)
- goto err;
-
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ if (direct)
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ else
+ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
- if (r)
- goto err;
- }
+ if (r)
+ goto err;
if (fence)
*fence = dma_fence_get(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 71781267ee4c..a1f209eed4c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -55,6 +55,7 @@ struct amdgpu_vce {
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
+int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 127e87b470ff..fd654a4406db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -52,7 +52,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
- unsigned version_major, version_minor, family_id;
+ unsigned char fw_check;
int r;
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
@@ -83,16 +83,38 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
- family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
- version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
- version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
- version_major, version_minor, family_id);
+ /* Bit 20-23, it is encode major and non-zero for new naming convention.
+ * This field is part of version minor and DRM_DISABLED_FLAG in old naming
+ * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
+ * is zero in old naming convention, this field is always zero so far.
+ * These four bits are used to tell which naming convention is present.
+ */
+ fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
+ if (fw_check) {
+ unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
+
+ fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
+ enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
+ enc_major = fw_check;
+ dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
+ vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
+ DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
+ enc_major, enc_minor, dec_ver, vep, fw_rev);
+ } else {
+ unsigned int version_major, version_minor, family_id;
+
+ family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
+ version_major, version_minor, family_id);
+ }
- bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
- + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+ bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+ AMDGPU_VCN_SESSION_SIZE * 40;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
@@ -108,7 +130,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{
int i;
- kfree(adev->vcn.saved_bo);
+ kvfree(adev->vcn.saved_bo);
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr,
@@ -119,6 +141,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
+ amdgpu_ring_fini(&adev->vcn.ring_jpeg);
+
release_firmware(adev->vcn.fw);
return 0;
@@ -137,7 +161,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
size = amdgpu_bo_size(adev->vcn.vcpu_bo);
ptr = adev->vcn.cpu_addr;
- adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
+ adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->vcn.saved_bo)
return -ENOMEM;
@@ -159,18 +183,20 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
if (adev->vcn.saved_bo != NULL) {
memcpy_toio(ptr, adev->vcn.saved_bo, size);
- kfree(adev->vcn.saved_bo);
+ kvfree(adev->vcn.saved_bo);
adev->vcn.saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
unsigned offset;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
- le32_to_cpu(hdr->ucode_size_bytes));
- size -= le32_to_cpu(hdr->ucode_size_bytes);
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ }
memset_io(ptr, 0, size);
}
@@ -188,6 +214,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
}
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
+
if (fences == 0) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
@@ -204,7 +232,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (set_clocks && adev->pm.dpm_enabled) {
+ if (set_clocks) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
else
@@ -283,13 +311,10 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
}
ib->length_dw = 16;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
- amdgpu_job_free(job);
-
amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
@@ -474,12 +499,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -528,12 +551,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -576,3 +597,127 @@ error:
dma_fence_put(fence);
return r;
}
+
+int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
+ r = amdgpu_ring_alloc(ring, 3);
+
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout) {
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
+ struct dma_fence **fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ const unsigned ib_size_dw = 16;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+
+ ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0);
+ ib->ptr[1] = 0xDEADBEEF;
+ for (i = 2; i < 16; i += 2) {
+ ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+ ib->ptr[i+1] = 0;
+ }
+ ib->length_dw = 16;
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ struct dma_fence *fence = NULL;
+ long r = 0;
+
+ r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
+ goto error;
+ }
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ goto error;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ goto error;
+ } else
+ r = 0;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH));
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout)
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else {
+ DRM_ERROR("ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+
+ dma_fence_put(fence);
+
+error:
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 773010b9ff15..0b0b8638d73f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -66,6 +66,7 @@ struct amdgpu_vcn {
const struct firmware *fw; /* VCN firmware */
struct amdgpu_ring ring_dec;
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+ struct amdgpu_ring ring_jpeg;
struct amdgpu_irq_src irq;
unsigned num_enc_rings;
};
@@ -83,4 +84,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b0eb2f537392..ece0ac703e27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -33,9 +33,11 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_gmc.h"
-/*
- * GPUVM
+/**
+ * DOC: GPUVM
+ *
* GPUVM is similar to the legacy gart on older asics, however
* rather than there being a single global gart table
* for the entire GPU, there are multiple VM page tables active
@@ -63,37 +65,84 @@ INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
#undef START
#undef LAST
-/* Local structure. Encapsulate some VM table update parameters to reduce
+/**
+ * struct amdgpu_pte_update_params - Local structure
+ *
+ * Encapsulate some VM table update parameters to reduce
* the number of function parameters
+ *
*/
struct amdgpu_pte_update_params {
- /* amdgpu device we do this update for */
+
+ /**
+ * @adev: amdgpu device we do this update for
+ */
struct amdgpu_device *adev;
- /* optional amdgpu_vm we do this update for */
+
+ /**
+ * @vm: optional amdgpu_vm we do this update for
+ */
struct amdgpu_vm *vm;
- /* address where to copy page table entries from */
+
+ /**
+ * @src: address where to copy page table entries from
+ */
uint64_t src;
- /* indirect buffer to fill with commands */
+
+ /**
+ * @ib: indirect buffer to fill with commands
+ */
struct amdgpu_ib *ib;
- /* Function which actually does the update */
+
+ /**
+ * @func: Function which actually does the update
+ */
void (*func)(struct amdgpu_pte_update_params *params,
struct amdgpu_bo *bo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags);
- /* The next two are used during VM update by CPU
- * DMA addresses to use for mapping
- * Kernel pointer of PD/PT BO that needs to be updated
+ /**
+ * @pages_addr:
+ *
+ * DMA addresses to use for mapping, used during VM update by CPU
*/
dma_addr_t *pages_addr;
+
+ /**
+ * @kptr:
+ *
+ * Kernel pointer of PD/PT BO that needs to be updated,
+ * used during VM update by CPU
+ */
void *kptr;
};
-/* Helper to disable partial resident texture feature from a fence callback */
+/**
+ * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
+ */
struct amdgpu_prt_cb {
+
+ /**
+ * @adev: amdgpu device
+ */
struct amdgpu_device *adev;
+
+ /**
+ * @cb: callback
+ */
struct dma_fence_cb cb;
};
+/**
+ * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
+ *
+ * @base: base structure for tracking BO usage in a VM
+ * @vm: vm to which bo is to be added
+ * @bo: amdgpu buffer object
+ *
+ * Initialize a bo_va_base structure and add it to the appropriate lists
+ *
+ */
static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo)
@@ -107,6 +156,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
return;
list_add_tail(&base->bo_list, &bo->va);
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ list_move(&base->vm_status, &vm->relocated);
+
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
return;
@@ -126,8 +178,10 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
* amdgpu_vm_level_shift - return the addr shift for each level
*
* @adev: amdgpu_device pointer
+ * @level: VMPT level
*
- * Returns the number of bits the pfn needs to be right shifted for a level.
+ * Returns:
+ * The number of bits the pfn needs to be right shifted for a level.
*/
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
unsigned level)
@@ -155,8 +209,10 @@ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
* amdgpu_vm_num_entries - return the number of entries in a PD/PT
*
* @adev: amdgpu_device pointer
+ * @level: VMPT level
*
- * Calculate the number of entries in a page directory or page table.
+ * Returns:
+ * The number of entries in a page directory or page table.
*/
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
unsigned level)
@@ -179,8 +235,10 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
* amdgpu_vm_bo_size - returns the size of the BOs in bytes
*
* @adev: amdgpu_device pointer
+ * @level: VMPT level
*
- * Calculate the size of the BO for a page directory or page table in bytes.
+ * Returns:
+ * The size of the BO for a page directory or page table in bytes.
*/
static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
{
@@ -218,6 +276,9 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
* @param: parameter for the validation callback
*
* Validate the page table BOs on command submission if neccessary.
+ *
+ * Returns:
+ * Validation result.
*/
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*validate)(void *p, struct amdgpu_bo *bo),
@@ -273,6 +334,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @vm: VM to check
*
* Check if all VM PDs/PTs are ready for updates
+ *
+ * Returns:
+ * True if eviction list is empty.
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
@@ -283,10 +347,15 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* amdgpu_vm_clear_bo - initially clear the PDs/PTs
*
* @adev: amdgpu_device pointer
+ * @vm: VM to clear BO from
* @bo: BO to clear
* @level: level this BO is at
+ * @pte_support_ats: indicate ATS support from PTE
*
* Root PD needs to be reserved when calling this.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_bo *bo,
@@ -318,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
ats_entries = 0;
}
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
@@ -356,8 +425,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
+ &fence);
if (r)
goto error_free;
@@ -382,10 +451,16 @@ error:
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @parent: parent PT
* @saddr: start of the address range
* @eaddr: end of the address range
+ * @level: VMPT level
+ * @ats: indicate ATS support from PTE
*
* Make sure the page directories and page tables are allocated
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -420,11 +495,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
eaddr = eaddr & ((1 << shift) - 1);
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ if (vm->root.base.bo->shadow)
+ flags |= AMDGPU_GEM_CREATE_SHADOW;
if (vm->use_cpu_for_update)
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
else
- flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW);
+ flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
/* walk over the address space and allocate the page tables */
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
@@ -468,7 +544,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- list_move(&entry->base.vm_status, &vm->relocated);
}
if (level < AMDGPU_VM_PTB) {
@@ -494,6 +569,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
* @size: Size from start address we need.
*
* Make sure the page tables are allocated.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -559,6 +637,15 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
}
}
+/**
+ * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
+ *
+ * @ring: ring on which the job will be submitted
+ * @job: job to submit
+ *
+ * Returns:
+ * True if sync is needed.
+ */
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job)
{
@@ -586,19 +673,17 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
return vm_flush_needed || gds_switch_needed;
}
-static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
-{
- return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
-}
-
/**
* amdgpu_vm_flush - hardware flush the vm
*
* @ring: ring to use for flush
- * @vmid: vmid number to use
- * @pd_addr: address of the page directory
+ * @job: related job
+ * @need_pipe_sync: is pipe sync needed
*
* Emit a VM flush when it is necessary.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
{
@@ -706,6 +791,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
* Returns the found bo_va or NULL if none is found
*
* Object has to be reserved!
+ *
+ * Returns:
+ * Found bo_va or NULL.
*/
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo)
@@ -787,7 +875,10 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
* @addr: the unmapped addr
*
* Look up the physical address of the page that the pte resolves
- * to and return the pointer for the page table entry.
+ * to.
+ *
+ * Returns:
+ * The pointer for the page table entry.
*/
static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
@@ -840,6 +931,17 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
}
}
+
+/**
+ * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
+ * @owner: fence owner
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *owner)
{
@@ -893,7 +995,10 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
/*
* amdgpu_vm_invalidate_level - mark all PD levels as invalid
*
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
* @parent: parent PD
+ * @level: VMPT level
*
* Mark all PD level as invalid after an error.
*/
@@ -928,7 +1033,9 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
* @vm: requested vm
*
* Makes sure all directories are up to date.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
@@ -978,7 +1085,7 @@ restart:
struct amdgpu_vm_bo_base,
vm_status);
bo_base->moved = false;
- list_move(&bo_base->vm_status, &vm->idle);
+ list_del_init(&bo_base->vm_status);
bo = bo_base->bo->parent;
if (!bo)
@@ -1007,15 +1114,15 @@ restart:
struct amdgpu_ring *ring;
struct dma_fence *fence;
- ring = container_of(vm->entity.sched, struct amdgpu_ring,
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
sched);
amdgpu_ring_pad_ib(ring, params.ib);
amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
AMDGPU_FENCE_OWNER_VM, false);
WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
+ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
+ &fence);
if (r)
goto error;
@@ -1115,14 +1222,15 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
* @params: see amdgpu_pte_update_params definition
- * @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to, the next dst inside the function
* @flags: mapping flags
*
* Update the page tables in the range @start - @end.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
uint64_t start, uint64_t end,
@@ -1176,7 +1284,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
* @end: last PTE to handle
* @dst: addr those PTEs should point to
* @flags: hw mapping flags
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
uint64_t start, uint64_t end,
@@ -1248,7 +1358,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* @fence: optional resulting fence
*
* Fill in the page table entries between @start and @last.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
@@ -1292,7 +1404,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
addr, flags);
}
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
nptes = last - start + 1;
@@ -1324,7 +1436,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
ndw += ncmds * 10;
/* extra commands for begin/end fragments */
- ndw += 2 * 10 * adev->vm_manager.fragment_size;
+ if (vm->root.base.bo->shadow)
+ ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
+ else
+ ndw += 2 * 10 * adev->vm_manager.fragment_size;
params.func = amdgpu_vm_do_set_ptes;
}
@@ -1371,8 +1486,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_ring_pad_ib(ring, params.ib);
WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &f);
+ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error_free;
@@ -1400,7 +1514,9 @@ error_free:
*
* Split the mapping into smaller chunks so that each update fits
* into a SDMA IB.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
@@ -1453,7 +1569,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (nodes) {
addr = nodes->start << PAGE_SHIFT;
max_entries = (nodes->size - pfn) *
- (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE;
} else {
addr = 0;
max_entries = S64_MAX;
@@ -1463,7 +1579,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t count;
max_entries = min(max_entries, 16ull * 1024ull);
- for (count = 1; count < max_entries; ++count) {
+ for (count = 1;
+ count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ ++count) {
uint64_t idx = pfn + count;
if (pages_addr[idx] !=
@@ -1476,7 +1594,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
dma_addr = pages_addr;
} else {
addr = pages_addr[pfn];
- max_entries = count;
+ max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
} else if (flags & AMDGPU_PTE_VALID) {
@@ -1491,7 +1609,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (r)
return r;
- pfn += last - start + 1;
+ pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
if (nodes && nodes->size == pfn) {
pfn = 0;
++nodes;
@@ -1511,7 +1629,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
* @clear: if true clear the entries
*
* Fill in the page table entries for @bo_va.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
@@ -1527,18 +1647,17 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
uint64_t flags;
int r;
- if (clear || !bo_va->base.bo) {
+ if (clear || !bo) {
mem = NULL;
nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
- mem = &bo_va->base.bo->tbo.mem;
+ mem = &bo->tbo.mem;
nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) {
- ttm = container_of(bo_va->base.bo->tbo.ttm,
- struct ttm_dma_tt, ttm);
+ ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
exclusive = reservation_object_get_excl(bo->tbo.resv);
@@ -1606,6 +1725,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
/**
* amdgpu_vm_update_prt_state - update the global PRT state
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
{
@@ -1620,6 +1741,8 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_get - add a PRT user
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
{
@@ -1632,6 +1755,8 @@ static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_put - drop a PRT user
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
{
@@ -1641,6 +1766,9 @@ static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_cb - callback for updating the PRT status
+ *
+ * @fence: fence for the callback
+ * @_cb: the callback function
*/
static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
{
@@ -1652,6 +1780,9 @@ static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
/**
* amdgpu_vm_add_prt_cb - add callback for updating the PRT status
+ *
+ * @adev: amdgpu_device pointer
+ * @fence: fence for the callback
*/
static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
struct dma_fence *fence)
@@ -1743,9 +1874,11 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
* or if an error occurred)
*
* Make sure all freed BOs are cleared in the PT.
- * Returns 0 for success.
- *
* PTs have to be reserved and mutex must be locked!
+ *
+ * Returns:
+ * 0 for success.
+ *
*/
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -1790,10 +1923,11 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
- * @sync: sync object to add fences to
*
* Make sure all BOs which are moved are updated in the PTs.
- * Returns 0 for success.
+ *
+ * Returns:
+ * 0 for success.
*
* PTs have to be reserved!
*/
@@ -1848,7 +1982,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
*
* Add @bo into the requested vm.
* Add @bo to the list of bos associated with the vm
- * Returns newly added bo_va or NULL for failure
+ *
+ * Returns:
+ * Newly added bo_va or NULL for failure
*
* Object has to be reserved!
*/
@@ -1911,10 +2047,13 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
* @bo_va: bo_va to store the address
* @saddr: where to map the BO
* @offset: requested offset in the BO
+ * @size: BO size in bytes
* @flags: attributes of pages (read/write/valid/etc.)
*
* Add a mapping of the BO at the specefied addr into the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -1972,11 +2111,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
* @bo_va: bo_va to store the address
* @saddr: where to map the BO
* @offset: requested offset in the BO
+ * @size: BO size in bytes
* @flags: attributes of pages (read/write/valid/etc.)
*
* Add a mapping of the BO at the specefied addr into the VM. Replace existing
* mappings as we do so.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -2033,7 +2175,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
* @saddr: where to the BO is mapped
*
* Remove a mapping of the BO at the specefied addr from the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -2087,7 +2231,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
* @size: size of the range
*
* Remove all mappings in a range, split them as appropriate.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -2184,8 +2330,13 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
* amdgpu_vm_bo_lookup_mapping - find mapping by address
*
* @vm: the requested VM
+ * @addr: the address
*
* Find a mapping by it's address.
+ *
+ * Returns:
+ * The amdgpu_bo_va_mapping matching for addr or NULL
+ *
*/
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr)
@@ -2194,6 +2345,35 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
}
/**
+ * amdgpu_vm_bo_trace_cs - trace all reserved mappings
+ *
+ * @vm: the requested vm
+ * @ticket: CS ticket
+ *
+ * Trace all mappings of BOs reserved during a command submission.
+ */
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+
+ if (!trace_amdgpu_vm_bo_cs_enabled())
+ return;
+
+ for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
+ mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
+ if (mapping->bo_va && mapping->bo_va->base.bo) {
+ struct amdgpu_bo *bo;
+
+ bo = mapping->bo_va->base.bo;
+ if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
+ continue;
+ }
+
+ trace_amdgpu_vm_bo_cs(mapping);
+ }
+}
+
+/**
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
*
* @adev: amdgpu_device pointer
@@ -2237,8 +2417,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
* amdgpu_vm_bo_invalidate - mark the bo as invalid
*
* @adev: amdgpu_device pointer
- * @vm: requested vm
* @bo: amdgpu buffer object
+ * @evicted: is the BO evicted
*
* Mark @bo as invalid.
*/
@@ -2278,6 +2458,14 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
}
}
+/**
+ * amdgpu_vm_get_block_size - calculate VM page table size as power of two
+ *
+ * @vm_size: VM size
+ *
+ * Returns:
+ * VM page table as power of two
+ */
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
{
/* Total bits covered by PD + PTs */
@@ -2296,6 +2484,10 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
*
* @adev: amdgpu_device pointer
* @vm_size: the default vm size if it's set auto
+ * @fragment_size_default: Default PTE fragment size
+ * @max_level: max VMPT level
+ * @max_bits: max address space size in bits
+ *
*/
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
uint32_t fragment_size_default, unsigned max_level,
@@ -2363,8 +2555,12 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
* @adev: amdgpu_device pointer
* @vm: requested vm
* @vm_context: Indicates if it GFX or Compute context
+ * @pasid: Process address space identifier
*
* Init @vm fields.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context, unsigned int pasid)
@@ -2396,8 +2592,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ring_instance %= adev->vm_manager.vm_pte_num_rings;
ring = adev->vm_manager.vm_pte_rings[ring_instance];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- r = drm_sched_entity_init(&ring->sched, &vm->entity,
- rq, NULL);
+ r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
if (r)
return r;
@@ -2415,14 +2610,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
+ WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
vm->last_update = NULL;
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
if (vm->use_cpu_for_update)
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- else
+ else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
flags |= AMDGPU_GEM_CREATE_SHADOW;
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
@@ -2477,7 +2672,7 @@ error_free_root:
vm->root.base.bo = NULL;
error_free_sched_entity:
- drm_sched_entity_fini(&ring->sched, &vm->entity);
+ drm_sched_entity_destroy(&vm->entity);
return r;
}
@@ -2485,6 +2680,9 @@ error_free_sched_entity:
/**
* amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
*
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
* This only works on GFX VMs that don't have any BOs added and no
* page tables allocated yet.
*
@@ -2494,10 +2692,10 @@ error_free_sched_entity:
* - pasid (old PASID is released, because compute manages its own PASIDs)
*
* Reinitializes the page directory to reflect the changed ATS
- * setting. May leave behind an unused shadow BO for the page
- * directory when switching from SDMA updates to CPU updates.
+ * setting.
*
- * Returns 0 for success, -errno for errors.
+ * Returns:
+ * 0 for success, -errno for errors.
*/
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
@@ -2531,7 +2729,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pte_support_ats = pte_support_ats;
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
+ WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->pasid) {
@@ -2544,6 +2742,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pasid = 0;
}
+ /* Free the shadow bo for compute VM */
+ amdgpu_bo_unref(&vm->root.base.bo->shadow);
+
error:
amdgpu_bo_unreserve(vm->root.base.bo);
return r;
@@ -2610,7 +2811,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- drm_sched_entity_fini(vm->entity.sched, &vm->entity);
+ drm_sched_entity_destroy(&vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
@@ -2652,8 +2853,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @pasid: PASID do identify the VM
*
- * This function is expected to be called in interrupt context. Returns
- * true if there was fault credit, false otherwise
+ * This function is expected to be called in interrupt context.
+ *
+ * Returns:
+ * True if there was fault credit, false otherwise
*/
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
unsigned int pasid)
@@ -2707,7 +2910,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
#ifdef CONFIG_X86_64
if (amdgpu_vm_update_mode == -1) {
- if (amdgpu_vm_is_large_bar(adev))
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc))
adev->vm_manager.vm_update_mode =
AMDGPU_VM_USE_CPU_FOR_COMPUTE;
else
@@ -2737,6 +2940,16 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
amdgpu_vmid_mgr_fini(adev);
}
+/**
+ * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_vm
+ * @filp: drm file pointer
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
union drm_amdgpu_vm *args = data;
@@ -2760,3 +2973,42 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return 0;
}
+
+/**
+ * amdgpu_vm_get_task_info - Extracts task info for a PASID.
+ *
+ * @dev: drm device pointer
+ * @pasid: PASID identifier for VM
+ * @task_info: task_info to fill.
+ */
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+ struct amdgpu_task_info *task_info)
+{
+ struct amdgpu_vm *vm;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm)
+ *task_info = vm->task_info;
+
+ spin_unlock(&adev->vm_manager.pasid_lock);
+}
+
+/**
+ * amdgpu_vm_set_task_info - Sets VMs task info.
+ *
+ * @vm: vm for which to set the info
+ */
+void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
+{
+ if (!vm->task_info.pid) {
+ vm->task_info.pid = current->pid;
+ get_task_comm(vm->task_info.task_name, current);
+
+ if (current->group_leader->mm == current->mm) {
+ vm->task_info.tgid = current->group_leader->pid;
+ get_task_comm(vm->task_info.process_name, current->group_leader);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 061b99a18cb8..67a15d439ac0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -164,6 +164,14 @@ struct amdgpu_vm_pt {
#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
+
+struct amdgpu_task_info {
+ char process_name[TASK_COMM_LEN];
+ char task_name[TASK_COMM_LEN];
+ pid_t pid;
+ pid_t tgid;
+};
+
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
@@ -215,6 +223,9 @@ struct amdgpu_vm {
/* Valid while the PD is reserved or fenced */
uint64_t pd_phys_addr;
+
+ /* Some basic info about the task */
+ struct amdgpu_task_info task_info;
};
struct amdgpu_vm_manager {
@@ -307,6 +318,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size);
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr);
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
@@ -317,4 +329,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job);
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+ struct amdgpu_task_info *task_info);
+
+void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index b6333f92ba45..9cfa8a9ada92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -97,33 +97,29 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
}
/**
- * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
+ * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
*
* @bo: &amdgpu_bo buffer object (must be in VRAM)
*
* Returns:
- * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
+ * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
*/
-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
+u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_mem_reg *mem = &bo->tbo.mem;
struct drm_mm_node *nodes = mem->mm_node;
unsigned pages = mem->num_pages;
- u64 usage = 0;
+ u64 usage;
- if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
- return 0;
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc))
+ return amdgpu_bo_size(bo);
if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
- return amdgpu_bo_size(bo);
+ return 0;
- while (nodes && pages) {
- usage += nodes->size << PAGE_SHIFT;
- usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
- pages -= nodes->size;
- ++nodes;
- }
+ for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
+ usage += amdgpu_vram_mgr_vis_size(adev, nodes);
return usage;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 7fbad2f5f0bd..d2469453dca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -49,10 +49,10 @@
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
-MODULE_FIRMWARE("radeon/bonaire_smc.bin");
-MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
-MODULE_FIRMWARE("radeon/hawaii_smc.bin");
-MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -951,12 +951,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
else
pi->battery_state = false;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
- if (adev->pm.dpm.ac_power == false) {
+ if (adev->pm.ac_power == false) {
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].mclk > max_limits->mclk)
ps->performance_levels[i].mclk = max_limits->mclk;
@@ -4078,7 +4078,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
const struct amdgpu_clock_and_voltage_limits *max_limits;
int i;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4127,7 +4127,7 @@ static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
const struct amdgpu_clock_and_voltage_limits *max_limits;
int i;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4160,7 +4160,7 @@ static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
const struct amdgpu_clock_and_voltage_limits *max_limits;
int i;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4191,7 +4191,7 @@ static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
const struct amdgpu_clock_and_voltage_limits *max_limits;
int i;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -5815,7 +5815,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err)
goto out;
@@ -5846,8 +5846,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
adev->pm.dpm.priv = pi;
pi->sys_pcie_mask =
- (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
- CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
@@ -6767,6 +6766,19 @@ static int ci_dpm_read_sensor(void *handle, int idx,
}
}
+static int ci_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+{
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ ci_dpm_powergate_uvd(handle, gate);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static const struct amd_ip_funcs ci_dpm_ip_funcs = {
.name = "ci_dpm",
.early_init = ci_dpm_early_init,
@@ -6804,7 +6816,7 @@ static const struct amd_pm_funcs ci_dpm_funcs = {
.debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
.force_performance_level = &ci_dpm_force_performance_level,
.vblank_too_short = &ci_dpm_vblank_too_short,
- .powergate_uvd = &ci_dpm_powergate_uvd,
+ .set_powergating_by_smu = &ci_set_powergating_by_smu,
.set_fan_control_mode = &ci_dpm_set_fan_control_mode,
.get_fan_control_mode = &ci_dpm_get_fan_control_mode,
.set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 8ff4c60d1b59..78ab939ae5d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1476,7 +1476,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
- mdelay(100);
+ msleep(100);
/* linkctl */
pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
@@ -2003,9 +2003,9 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
if (amdgpu_dpm == -1)
- amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
- else
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -2024,9 +2024,9 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
if (amdgpu_dpm == -1)
- amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
- else
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index a7576255cc30..d0fa2aac2388 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -54,16 +54,16 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
static int cik_sdma_soft_reset(void *handle);
-MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
-MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
-MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
-MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
-MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
-MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
-MODULE_FIRMWARE("radeon/kabini_sdma.bin");
-MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
-MODULE_FIRMWARE("radeon/mullins_sdma.bin");
-MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/kabini_sdma.bin");
+MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
+MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
@@ -132,9 +132,9 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
if (err)
goto out;
@@ -177,9 +177,8 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+ return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2;
}
/**
@@ -192,9 +191,8 @@ static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me],
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me],
(lower_32_bits(ring->wptr) << 2) & 0x3fffc);
}
@@ -248,7 +246,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
u32 ref_and_mask;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
else
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
@@ -1290,8 +1288,10 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index ada241bfeee9..308f9f238bc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -41,6 +41,8 @@
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1855,15 +1857,14 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2370,13 +2371,14 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put_unlocked(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v10_0_lock_cursor(crtc, true);
@@ -2737,14 +2739,14 @@ static int dce_v10_0_sw_init(void *handle)
return r;
}
- for (i = 8; i < 20; i += 2) {
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index a5b96eac3033..76dfb76f7900 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -41,6 +41,8 @@
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1897,15 +1899,14 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2449,13 +2450,14 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put_unlocked(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v11_0_lock_cursor(crtc, true);
@@ -2858,14 +2860,14 @@ static int dce_v11_0_sw_init(void *handle)
return r;
}
- for (i = 8; i < 20; i += 2) {
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 394cc1e8fe20..c9adc627305d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1811,15 +1811,14 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2263,13 +2262,14 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put_unlocked(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v6_0_lock_cursor(crtc, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index c9b9ab8f1b05..50cd03beac7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1786,15 +1786,14 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2274,13 +2273,14 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put_unlocked(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v8_0_lock_cursor(crtc, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index dbf2ccd0c744..15257634a53a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -36,6 +36,7 @@
#include "dce_v10_0.h"
#include "dce_v11_0.h"
#include "dce_virtual.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
@@ -269,25 +270,18 @@ static int dce_virtual_early_init(void *handle)
static struct drm_encoder *
dce_virtual_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
return encoder;
}
/* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
+
return NULL;
}
@@ -378,7 +372,7 @@ static int dce_virtual_sw_init(void *handle)
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 229, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
if (r)
return r;
@@ -634,7 +628,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
drm_connector_register(connector);
/* link them */
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index cd6bf291a853..de184a886057 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -44,30 +44,30 @@ static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
-MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
-MODULE_FIRMWARE("radeon/tahiti_me.bin");
-MODULE_FIRMWARE("radeon/tahiti_ce.bin");
-MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
-
-MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
-MODULE_FIRMWARE("radeon/pitcairn_me.bin");
-MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
-MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
-
-MODULE_FIRMWARE("radeon/verde_pfp.bin");
-MODULE_FIRMWARE("radeon/verde_me.bin");
-MODULE_FIRMWARE("radeon/verde_ce.bin");
-MODULE_FIRMWARE("radeon/verde_rlc.bin");
-
-MODULE_FIRMWARE("radeon/oland_pfp.bin");
-MODULE_FIRMWARE("radeon/oland_me.bin");
-MODULE_FIRMWARE("radeon/oland_ce.bin");
-MODULE_FIRMWARE("radeon/oland_rlc.bin");
-
-MODULE_FIRMWARE("radeon/hainan_pfp.bin");
-MODULE_FIRMWARE("radeon/hainan_me.bin");
-MODULE_FIRMWARE("radeon/hainan_ce.bin");
-MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_pfp.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_me.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_ce.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/pitcairn_pfp.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_me.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_ce.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/verde_pfp.bin");
+MODULE_FIRMWARE("amdgpu/verde_me.bin");
+MODULE_FIRMWARE("amdgpu/verde_ce.bin");
+MODULE_FIRMWARE("amdgpu/verde_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/oland_pfp.bin");
+MODULE_FIRMWARE("amdgpu/oland_me.bin");
+MODULE_FIRMWARE("amdgpu/oland_ce.bin");
+MODULE_FIRMWARE("amdgpu/oland_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/hainan_pfp.bin");
+MODULE_FIRMWARE("amdgpu/hainan_me.bin");
+MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
+MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
@@ -335,7 +335,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -346,7 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -357,7 +357,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -368,7 +368,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 42b6144c1fd5..95452c5a9df6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -57,36 +57,36 @@ static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
-MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
-MODULE_FIRMWARE("radeon/bonaire_me.bin");
-MODULE_FIRMWARE("radeon/bonaire_ce.bin");
-MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
-MODULE_FIRMWARE("radeon/bonaire_mec.bin");
-
-MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
-MODULE_FIRMWARE("radeon/hawaii_me.bin");
-MODULE_FIRMWARE("radeon/hawaii_ce.bin");
-MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
-MODULE_FIRMWARE("radeon/hawaii_mec.bin");
-
-MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
-MODULE_FIRMWARE("radeon/kaveri_me.bin");
-MODULE_FIRMWARE("radeon/kaveri_ce.bin");
-MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
-MODULE_FIRMWARE("radeon/kaveri_mec.bin");
-MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
-
-MODULE_FIRMWARE("radeon/kabini_pfp.bin");
-MODULE_FIRMWARE("radeon/kabini_me.bin");
-MODULE_FIRMWARE("radeon/kabini_ce.bin");
-MODULE_FIRMWARE("radeon/kabini_rlc.bin");
-MODULE_FIRMWARE("radeon/kabini_mec.bin");
-
-MODULE_FIRMWARE("radeon/mullins_pfp.bin");
-MODULE_FIRMWARE("radeon/mullins_me.bin");
-MODULE_FIRMWARE("radeon/mullins_ce.bin");
-MODULE_FIRMWARE("radeon/mullins_rlc.bin");
-MODULE_FIRMWARE("radeon/mullins_mec.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
+
+MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
+
+MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
+
+MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
+MODULE_FIRMWARE("amdgpu/kabini_me.bin");
+MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
+MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
+MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
+
+MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
+MODULE_FIRMWARE("amdgpu/mullins_me.bin");
+MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
+MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
+MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
@@ -925,7 +925,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -933,7 +933,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -941,7 +941,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -949,7 +949,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -958,7 +958,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
goto out;
if (adev->asic_type == CHIP_KAVERI) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -967,7 +967,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 818874b13c99..5cd45210113f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -51,6 +51,8 @@
#include "smu/smu_7_1_3_d.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
#define GFX8_NUM_GFX_RINGS 1
#define GFX8_MEC_HPD_SIZE 2048
@@ -704,6 +706,17 @@ static const u32 stoney_mgcg_cgcg_init[] =
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
};
+
+static const char * const sq_edc_source_names[] = {
+ "SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
+ "SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
+ "SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
+ "SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
+ "SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
+ "SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
+ "SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
+};
+
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
@@ -866,26 +879,32 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
- uint32_t scratch;
- uint32_t tmp = 0;
+
+ unsigned int index;
+ uint64_t gpu_addr;
+ uint32_t tmp;
long r;
- r = amdgpu_gfx_scratch_get(adev, &scratch);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
- WREG32(scratch, 0xCAFEDEAD);
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err1;
}
- ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
- ib.ptr[2] = 0xDEADBEEF;
- ib.length_dw = 3;
+ ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+ ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+ ib.ptr[2] = lower_32_bits(gpu_addr);
+ ib.ptr[3] = upper_32_bits(gpu_addr);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
@@ -900,20 +919,21 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
- tmp = RREG32(scratch);
+
+ tmp = adev->wb.wb[index];
if (tmp == 0xDEADBEEF) {
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ DRM_ERROR("ib test on ring %d failed\n", ring->idx);
r = -EINVAL;
}
+
err2:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err1:
- amdgpu_gfx_scratch_free(adev, scratch);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1999,6 +2019,8 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
return 0;
}
+static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
+
static int gfx_v8_0_sw_init(void *handle)
{
int i, j, k, r, ring_id;
@@ -2027,27 +2049,43 @@ static int gfx_v8_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
/* KIQ event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, &adev->gfx.kiq.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
if (r)
return r;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
+ /* Add CP EDC/ECC irq */
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
+ &adev->gfx.cp_ecc_error_irq);
+ if (r)
+ return r;
+
+ /* SQ interrupts. */
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
+ &adev->gfx.sq_irq);
+ if (r) {
+ DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
+ return r;
+ }
+
+ INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
+
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
gfx_v8_0_scratch_init(adev);
@@ -5111,6 +5149,10 @@ static int gfx_v8_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
+
+ amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
+
/* disable KCQ to avoid CPC touch memory not valid anymore */
for (i = 0; i < adev->gfx.num_compute_rings; i++)
gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
@@ -5542,9 +5584,19 @@ static int gfx_v8_0_late_init(void *handle)
if (r)
return r;
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r) {
+ DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
+ return r;
+ }
+
+ r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
+ if (r) {
+ DRM_ERROR(
+ "amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
+ r);
+ return r;
+ }
return 0;
}
@@ -5552,14 +5604,12 @@ static int gfx_v8_0_late_init(void *handle)
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- if ((adev->asic_type == CHIP_POLARIS11) ||
+ if (((adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM))
+ (adev->asic_type == CHIP_VEGAM)) &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
/* Send msg to SMU via Powerplay */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- enable ?
- AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
@@ -6787,6 +6837,77 @@ static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ int enable_flag;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ enable_flag = 0;
+ break;
+
+ case AMDGPU_IRQ_STATE_ENABLE:
+ enable_flag = 1;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+
+ return 0;
+}
+
+static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ int enable_flag;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ enable_flag = 1;
+ break;
+
+ case AMDGPU_IRQ_STATE_ENABLE:
+ enable_flag = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
+ enable_flag);
+
+ return 0;
+}
+
static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -6837,6 +6958,114 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("CP EDC/ECC error detected.");
+ return 0;
+}
+
+static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
+{
+ u32 enc, se_id, sh_id, cu_id;
+ char type[20];
+ int sq_edc_source = -1;
+
+ enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
+ se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
+
+ switch (enc) {
+ case 0:
+ DRM_INFO("SQ general purpose intr detected:"
+ "se_id %d, immed_overflow %d, host_reg_overflow %d,"
+ "host_cmd_overflow %d, cmd_timestamp %d,"
+ "reg_timestamp %d, thread_trace_buff_full %d,"
+ "wlt %d, thread_trace %d.\n",
+ se_id,
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
+ );
+ break;
+ case 1:
+ case 2:
+
+ cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
+ sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
+
+ /*
+ * This function can be called either directly from ISR
+ * or from BH in which case we can access SQ_EDC_INFO
+ * instance
+ */
+ if (in_task()) {
+ mutex_lock(&adev->grbm_idx_mutex);
+ gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
+
+ sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
+
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (enc == 1)
+ sprintf(type, "instruction intr");
+ else
+ sprintf(type, "EDC/ECC error");
+
+ DRM_INFO(
+ "SQ %s detected: "
+ "se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
+ "trap %s, sq_ed_info.source %s.\n",
+ type, se_id, sh_id, cu_id,
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
+ (sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
+ );
+ break;
+ default:
+ DRM_ERROR("SQ invalid encoding type\n.");
+ }
+}
+
+static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
+{
+
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
+ struct sq_work *sq_work = container_of(work, struct sq_work, work);
+
+ gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
+}
+
+static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned ih_data = entry->src_data[0];
+
+ /*
+ * Try to submit work so SQ_EDC_INFO can be accessed from
+ * BH. If previous work submission hasn't finished yet
+ * just print whatever info is possible directly from the ISR.
+ */
+ if (work_pending(&adev->gfx.sq_work.work)) {
+ gfx_v8_0_parse_sq_irq(adev, ih_data);
+ } else {
+ adev->gfx.sq_work.ih_data = ih_data;
+ schedule_work(&adev->gfx.sq_work.work);
+ }
+
+ return 0;
+}
+
static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned int type,
@@ -7037,6 +7266,16 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = {
.process = gfx_v8_0_kiq_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
+ .set = gfx_v8_0_set_cp_ecc_int_state,
+ .process = gfx_v8_0_cp_ecc_error_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
+ .set = gfx_v8_0_set_sq_int_state,
+ .process = gfx_v8_0_sq_irq,
+};
+
static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
@@ -7050,6 +7289,12 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs;
+
+ adev->gfx.cp_ecc_error_irq.num_types = 1;
+ adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
+
+ adev->gfx.sq_irq.num_types = 1;
+ adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
}
static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index a69153435ea7..ef00d14f8645 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -38,6 +38,8 @@
#include "clearstate_gfx9.h"
#include "v9_structs.h"
+#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 2048
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -102,11 +104,22 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -648,7 +661,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- if (adev->gfx.rlc.is_rlc_v2_1) {
+ if (adev->gfx.rlc.is_rlc_v2_1 &&
+ adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
+ adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
+ adev->gfx.rlc.save_restore_list_srm_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
info->fw = adev->gfx.rlc_fw;
@@ -943,6 +959,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v9_0_get_csb_buffer(adev, dst_ptr);
amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
@@ -971,6 +988,39 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
return 0;
}
+static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (!r)
+ adev->gfx.rlc.clear_state_gpu_addr =
+ amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
+
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+ return r;
+}
+
+static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!adev->gfx.rlc.clear_state_obj)
+ return;
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
+ if (likely(r == 0)) {
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+}
+
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -1451,23 +1501,23 @@ static int gfx_v9_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
/* KIQ event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq);
if (r)
return r;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
@@ -2148,8 +2198,16 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
{
- if (!adev->gfx.rlc.is_rlc_v2_1)
- return;
+ gfx_v9_0_init_csb(adev);
+
+ /*
+ * Rlc save restore list is workable since v2_1.
+ * And it's needed by gfxoff feature.
+ */
+ if (adev->gfx.rlc.is_rlc_v2_1) {
+ gfx_v9_1_init_rlc_save_restore_list(adev);
+ gfx_v9_0_enable_save_restore_machine(adev);
+ }
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
@@ -2157,10 +2215,6 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS)) {
- gfx_v9_0_init_csb(adev);
- gfx_v9_1_init_rlc_save_restore_list(adev);
- gfx_v9_0_enable_save_restore_machine(adev);
-
WREG32(mmRLC_JUMP_TABLE_RESTORE,
adev->gfx.rlc.cp_table_gpu_addr >> 8);
gfx_v9_0_init_gfx_power_gating(adev);
@@ -2252,9 +2306,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
/* disable CG */
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
- /* disable PG */
- WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
-
gfx_v9_0_rlc_reset(adev);
gfx_v9_0_init_pg(adev);
@@ -3116,6 +3167,10 @@ static int gfx_v9_0_hw_init(void *handle)
gfx_v9_0_gpu_init(adev);
+ r = gfx_v9_0_csb_vram_pin(adev);
+ if (r)
+ return r;
+
r = gfx_v9_0_rlc_resume(adev);
if (r)
return r;
@@ -3224,6 +3279,8 @@ static int gfx_v9_0_hw_fini(void *handle)
gfx_v9_0_cp_enable(adev, false);
gfx_v9_0_rlc_stop(adev);
+ gfx_v9_0_csb_vram_unpin(adev);
+
return 0;
}
@@ -3433,7 +3490,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
@@ -3510,8 +3567,11 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
- data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
- RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
+
+ if (adev->asic_type != CHIP_VEGA12)
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
+
+ data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
@@ -3541,11 +3601,15 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
} else {
/* 1 - MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
- data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
- RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
+
+ if (adev->asic_type != CHIP_VEGA12)
+ data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
+
+ data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
+
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
@@ -3581,9 +3645,11 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
/* update CGCG and CGLS override bits */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
- /* enable 3Dcgcg FSM(0x0020003f) */
+
+ /* enable 3Dcgcg FSM(0x0000363f) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
- data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+
+ data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
@@ -3630,9 +3696,10 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
- /* enable cgcg FSM(0x0020003F) */
+ /* enable cgcg FSM(0x0000363F) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
- data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+
+ data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
@@ -3714,6 +3781,15 @@ static int gfx_v9_0_set_powergating_state(void *handle,
/* update mgcg state */
gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
+
+ /* set gfx off through smu */
+ if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
+ break;
+ case CHIP_VEGA12:
+ /* set gfx off through smu */
+ if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 79f9ac29019b..75317f283c69 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -41,11 +41,11 @@ static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v6_0_wait_for_idle(void *handle);
-MODULE_FIRMWARE("radeon/tahiti_mc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
-MODULE_FIRMWARE("radeon/verde_mc.bin");
-MODULE_FIRMWARE("radeon/oland_mc.bin");
-MODULE_FIRMWARE("radeon/si58_mc.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
+MODULE_FIRMWARE("amdgpu/verde_mc.bin");
+MODULE_FIRMWARE("amdgpu/oland_mc.bin");
+MODULE_FIRMWARE("amdgpu/si58_mc.bin");
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -134,9 +134,9 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
is_58_fw = true;
if (is_58_fw)
- snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
else
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 7147bfe25a23..36dc367c4b45 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -28,6 +28,7 @@
#include "cik.h"
#include "gmc_v7_0.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_amdkfd.h"
#include "bif/bif_4_1_d.h"
#include "bif/bif_4_1_sh_mask.h"
@@ -43,12 +44,14 @@
#include "amdgpu_atombios.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v7_0_wait_for_idle(void *handle);
-MODULE_FIRMWARE("radeon/bonaire_mc.bin");
-MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
static const u32 golden_settings_iceland_a11[] =
@@ -147,10 +150,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- if (adev->asic_type == CHIP_TOPAZ)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
if (err)
@@ -999,11 +999,11 @@ static int gmc_v7_0_sw_init(void *handle)
adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1079,6 +1079,12 @@ static int gmc_v7_0_sw_init(void *handle)
adev->vm_manager.vram_base_offset = 0;
}
+ adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
+ GFP_KERNEL);
+ if (!adev->gmc.vm_fault_info)
+ return -ENOMEM;
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+
return 0;
}
@@ -1088,6 +1094,7 @@ static int gmc_v7_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
+ kfree(adev->gmc.vm_fault_info);
gmc_v7_0_gart_fini(adev);
amdgpu_bo_fini(adev);
release_firmware(adev->gmc.fw);
@@ -1277,7 +1284,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- u32 addr, status, mc_client;
+ u32 addr, status, mc_client, vmid;
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
@@ -1302,6 +1309,29 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
entry->pasid);
}
+ vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ VMID);
+ if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+ && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ u32 protections = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ PROTECTIONS);
+
+ info->vmid = vmid;
+ info->mc_id = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_ID);
+ info->status = status;
+ info->page_addr = addr;
+ info->prot_valid = protections & 0x7 ? true : false;
+ info->prot_read = protections & 0x8 ? true : false;
+ info->prot_write = protections & 0x10 ? true : false;
+ info->prot_exec = protections & 0x20 ? true : false;
+ mb();
+ atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1edbe6b477b5..70fc97b59b4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "gmc_v8_0.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_amdkfd.h"
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
@@ -44,6 +45,7 @@
#include "amdgpu_atombios.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1101,11 +1103,11 @@ static int gmc_v8_0_sw_init(void *handle)
adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1181,6 +1183,12 @@ static int gmc_v8_0_sw_init(void *handle)
adev->vm_manager.vram_base_offset = 0;
}
+ adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
+ GFP_KERNEL);
+ if (!adev->gmc.vm_fault_info)
+ return -ENOMEM;
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+
return 0;
}
@@ -1190,6 +1198,7 @@ static int gmc_v8_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
+ kfree(adev->gmc.vm_fault_info);
gmc_v8_0_gart_fini(adev);
amdgpu_bo_fini(adev);
release_firmware(adev->gmc.fw);
@@ -1425,7 +1434,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- u32 addr, status, mc_client;
+ u32 addr, status, mc_client, vmid;
if (amdgpu_sriov_vf(adev)) {
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
@@ -1447,8 +1456,13 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
gmc_v8_0_set_fault_enable_default(adev, false);
if (printk_ratelimit()) {
- dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
- entry->src_id, entry->src_data[0]);
+ struct amdgpu_task_info task_info = { 0 };
+
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
+ entry->src_id, entry->src_data[0], task_info.process_name,
+ task_info.tgid, task_info.task_name, task_info.pid);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
@@ -1457,6 +1471,29 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
entry->pasid);
}
+ vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ VMID);
+ if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+ && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ u32 protections = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ PROTECTIONS);
+
+ info->vmid = vmid;
+ info->mc_id = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_ID);
+ info->status = status;
+ info->page_addr = addr;
+ info->prot_valid = protections & 0x7 ? true : false;
+ info->prot_read = protections & 0x8 ? true : false;
+ info->prot_write = protections & 0x10 ? true : false;
+ info->prot_exec = protections & 0x20 ? true : false;
+ mb();
+ atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3c0a85d4e4ab..399a5db27649 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -43,6 +43,8 @@
#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
+#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
+
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
@@ -257,12 +259,17 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
}
if (printk_ratelimit()) {
+ struct amdgpu_task_info task_info = { 0 };
+
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
dev_err(adev->dev,
- "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
+ "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n",
entry->vmid_src ? "mmhub" : "gfxhub",
entry->src_id, entry->ring_id, entry->vmid,
- entry->pasid);
- dev_err(adev->dev, " at page 0x%016llx from %d\n",
+ entry->pasid, task_info.process_name, task_info.tgid,
+ task_info.task_name, task_info.pid);
+ dev_err(adev->dev, " at address 0x%016llx from %d\n",
addr, entry->client_id);
if (!amdgpu_sriov_vf(adev))
dev_err(adev->dev,
@@ -872,9 +879,9 @@ static int gmc_v9_0_sw_init(void *handle)
}
/* This interrupt is VMC page fault.*/
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 0,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 0,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 7a1e77c93bf1..3f57f6463dc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -1921,7 +1921,7 @@ static int kv_dpm_set_power_state(void *handle)
int ret;
if (pi->bapm_enable) {
- ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power);
+ ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
if (ret) {
DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
return ret;
@@ -3306,6 +3306,19 @@ static int kv_dpm_read_sensor(void *handle, int idx,
}
}
+static int kv_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+{
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ kv_dpm_powergate_uvd(handle, gate);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static const struct amd_ip_funcs kv_dpm_ip_funcs = {
.name = "kv_dpm",
.early_init = kv_dpm_early_init,
@@ -3342,7 +3355,7 @@ static const struct amd_pm_funcs kv_dpm_funcs = {
.print_power_state = &kv_dpm_print_power_state,
.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
.force_performance_level = &kv_dpm_force_performance_level,
- .powergate_uvd = &kv_dpm_powergate_uvd,
+ .set_powergating_by_smu = kv_set_powergating_by_smu,
.enable_bapm = &kv_dpm_enable_bapm,
.get_vce_clock_state = amdgpu_get_vce_clock_state,
.check_state_equal = kv_check_state_equal,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3d53c4413f13..e70a0d4d6db4 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -471,8 +471,8 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
RENG_EXECUTE_ON_REG_UPDATE, 1);
WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
- if (adev->powerplay.pp_funcs->set_mmhub_powergating_by_smu)
- amdgpu_dpm_set_mmhub_powergating_by_smu(adev);
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
} else {
pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 0ff136d02d9b..02be34e72ed9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -88,6 +88,9 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
case AMDGPU_UCODE_ID_VCE:
*type = GFX_FW_TYPE_VCE;
break;
+ case AMDGPU_UCODE_ID_VCN:
+ *type = GFX_FW_TYPE_VCN;
+ break;
case AMDGPU_UCODE_ID_MAXIMUM:
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index c7190c39c4f5..15ae4bc9c072 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -44,6 +44,8 @@
#include "iceland_sdma_pkt_open.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -202,8 +204,7 @@ static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
- u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
+ u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
return wptr;
}
@@ -218,9 +219,8 @@ static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
}
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
@@ -273,7 +273,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -898,7 +898,7 @@ static int sdma_v2_4_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
@@ -910,7 +910,7 @@ static int sdma_v2_4_sw_init(void *handle)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1213,8 +1213,10 @@ static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index aa9ab299fd32..1e07ff274d73 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -44,6 +44,8 @@
#include "tonga_sdma_pkt_open.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -365,9 +367,7 @@ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
/* XXX check if swapping is necessary on BE */
wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
} else {
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-
- wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
+ wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
}
return wptr;
@@ -394,9 +394,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
} else {
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
}
}
@@ -450,7 +448,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -1179,7 +1177,7 @@ static int sdma_v3_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
@@ -1191,7 +1189,7 @@ static int sdma_v3_0_sw_init(void *handle)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1655,8 +1653,10 @@ static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index ca53b3fba422..e7ca4623cfb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -38,6 +38,9 @@
#include "soc15.h"
#include "vega10_sdma_pkt_open.h"
+#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
+#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
+
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
@@ -296,13 +299,12 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
u32 lowbit, highbit;
- int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
- highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
+ lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
+ highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
- me, highbit, lowbit);
+ ring->me, highbit, lowbit);
wptr = highbit;
wptr = wptr << 32;
wptr |= lowbit;
@@ -339,17 +341,15 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-
DRM_DEBUG("Not using doorbell -- "
"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
"mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- me,
+ ring->me,
lower_32_bits(ring->wptr << 2),
- me,
+ ring->me,
upper_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
}
}
@@ -430,7 +430,7 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
else
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
@@ -1228,13 +1228,13 @@ static int sdma_v4_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
@@ -1651,8 +1651,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 5c97a3671726..db327b412562 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -56,16 +56,16 @@
#define BIOS_SCRATCH_4 0x5cd
-MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
-MODULE_FIRMWARE("radeon/verde_smc.bin");
-MODULE_FIRMWARE("radeon/verde_k_smc.bin");
-MODULE_FIRMWARE("radeon/oland_smc.bin");
-MODULE_FIRMWARE("radeon/oland_k_smc.bin");
-MODULE_FIRMWARE("radeon/hainan_smc.bin");
-MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
-MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/verde_smc.bin");
+MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/oland_smc.bin");
+MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
+MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
static const struct amd_pm_funcs si_dpm_funcs;
@@ -3480,7 +3480,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
disable_sclk_switching = true;
}
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -3489,7 +3489,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
}
- if (adev->pm.dpm.ac_power == false) {
+ if (adev->pm.ac_power == false) {
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].mclk > max_limits->mclk)
ps->performance_levels[i].mclk = max_limits->mclk;
@@ -7318,8 +7318,7 @@ static int si_dpm_init(struct amdgpu_device *adev)
pi = &eg_pi->rv7xx;
si_pi->sys_pcie_mask =
- (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
- CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
@@ -7667,7 +7666,7 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index 8dc29107228f..edfe50821cd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -53,6 +53,29 @@
#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+#define PACKETJ_CONDITION_CHECK0 0
+#define PACKETJ_CONDITION_CHECK1 1
+#define PACKETJ_CONDITION_CHECK2 2
+#define PACKETJ_CONDITION_CHECK3 3
+#define PACKETJ_CONDITION_CHECK4 4
+#define PACKETJ_CONDITION_CHECK5 5
+#define PACKETJ_CONDITION_CHECK6 6
+#define PACKETJ_CONDITION_CHECK7 7
+
+#define PACKETJ_TYPE0 0
+#define PACKETJ_TYPE1 1
+#define PACKETJ_TYPE2 2
+#define PACKETJ_TYPE3 3
+#define PACKETJ_TYPE4 4
+#define PACKETJ_TYPE5 5
+#define PACKETJ_TYPE6 6
+#define PACKETJ_TYPE7 7
+
+#define PACKETJ(reg, r, cond, type) ((reg & 0x3FFFF) | \
+ ((r & 0x3F) << 18) | \
+ ((cond & 0xF) << 24) | \
+ ((type & 0xF) << 28))
+
/* Packet 3 types */
#define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 6fed3d7797a8..8a926d1df939 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -123,6 +123,10 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_uvd_entity_init(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 341ee6d55ce8..50248059412e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -35,6 +35,7 @@
#include "vi.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -104,7 +105,7 @@ static int uvd_v5_0_sw_init(void *handle)
int r;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
if (r)
return r;
@@ -119,6 +120,10 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_uvd_entity_init(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index bfddf97dd13e..6ae82cc2e55e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -36,6 +36,7 @@
#include "bif/bif_5_1_d.h"
#include "gmc/gmc_8_1_d.h"
#include "vi.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
/* Polaris10/11/12 firmware version */
#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
@@ -247,12 +248,10 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -311,19 +310,13 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
- if (r)
- goto err;
-
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ if (direct)
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ else
+ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
- if (r)
- goto err;
- }
+ if (r)
+ goto err;
if (fence)
*fence = dma_fence_get(f);
@@ -400,14 +393,14 @@ static int uvd_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
if (r)
return r;
/* UVD ENC TRAP */
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
if (r)
return r;
}
@@ -425,16 +418,6 @@ static int uvd_v6_0_sw_init(void *handle)
adev->uvd.num_enc_rings = 0;
DRM_INFO("UVD ENC is disabled\n");
- } else {
- struct drm_sched_rq *rq;
- ring = &adev->uvd.inst->ring_enc[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
- rq, NULL);
- if (r) {
- DRM_ERROR("Failed setting up UVD ENC run queue.\n");
- return r;
- }
}
r = amdgpu_uvd_resume(adev);
@@ -457,6 +440,8 @@ static int uvd_v6_0_sw_init(void *handle)
}
}
+ r = amdgpu_uvd_entity_init(adev);
+
return r;
}
@@ -470,8 +455,6 @@ static int uvd_v6_0_sw_fini(void *handle)
return r;
if (uvd_v6_0_enc_support(adev)) {
- drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
-
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
}
@@ -1569,7 +1552,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
- .nop = PACKET0(mmUVD_NO_OP, 0),
.support_64bit_ptrs = false,
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
@@ -1587,7 +1569,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = uvd_v6_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 57d32f21b3a6..9b7f8469bc5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -39,6 +39,13 @@
#include "hdp/hdp_4_0_offset.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
+#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
+
+#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
+#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
+//UVD_PG0_CC_UVD_HARVESTING
+#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
+#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
#define UVD7_MAX_HW_INSTANCES_VEGA20 2
@@ -249,12 +256,10 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -312,19 +317,13 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
- if (r)
- goto err;
-
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ if (direct)
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ else
+ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
- if (r)
- goto err;
- }
+ if (r)
+ goto err;
if (fence)
*fence = dma_fence_get(f);
@@ -377,10 +376,25 @@ error:
static int uvd_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_VEGA20)
+
+ if (adev->asic_type == CHIP_VEGA20) {
+ u32 harvest;
+ int i;
+
adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
- else
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
+ if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
+ adev->uvd.harvest_config |= 1 << i;
+ }
+ }
+ if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
+ AMDGPU_UVD_HARVEST_UVD1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
+ } else {
adev->uvd.num_uvd_inst = 1;
+ }
if (amdgpu_sriov_vf(adev))
adev->uvd.num_enc_rings = 1;
@@ -396,19 +410,21 @@ static int uvd_v7_0_early_init(void *handle)
static int uvd_v7_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
+
int i, j, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 124, &adev->uvd.inst[j].irq);
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
if (r)
return r;
/* UVD ENC TRAP */
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + 119, &adev->uvd.inst[j].irq);
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
if (r)
return r;
}
@@ -428,22 +444,13 @@ static int uvd_v7_0_sw_init(void *handle)
DRM_INFO("PSP loading UVD firmware\n");
}
- for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
- ring = &adev->uvd.inst[j].ring_enc[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
- rq, NULL);
- if (r) {
- DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
- return r;
- }
- }
-
r = amdgpu_uvd_resume(adev);
if (r)
return r;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
sprintf(ring->name, "uvd<%d>", j);
@@ -472,6 +479,10 @@ static int uvd_v7_0_sw_init(void *handle)
}
}
+ r = amdgpu_uvd_entity_init(adev);
+ if (r)
+ return r;
+
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
@@ -491,8 +502,8 @@ static int uvd_v7_0_sw_fini(void *handle)
return r;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
- drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
-
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
@@ -521,6 +532,8 @@ static int uvd_v7_0_hw_init(void *handle)
goto done;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
ring = &adev->uvd.inst[j].ring;
if (!amdgpu_sriov_vf(adev)) {
@@ -600,8 +613,11 @@ static int uvd_v7_0_hw_fini(void *handle)
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
- for (i = 0; i < adev->uvd.num_uvd_inst; ++i)
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
adev->uvd.inst[i].ring.ready = false;
+ }
return 0;
}
@@ -644,6 +660,8 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
@@ -716,6 +734,8 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
adev->uvd.inst[i].ring_enc[0].wptr = 0;
@@ -772,6 +792,8 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
init_table += header->uvd_table_offset;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
ring = &adev->uvd.inst[i].ring;
ring->wptr = 0;
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
@@ -911,6 +933,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
int i, j, k, r;
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
+ if (adev->uvd.harvest_config & (1 << k))
+ continue;
/* disable DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
@@ -923,6 +947,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
uvd_v7_0_mc_resume(adev);
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
+ if (adev->uvd.harvest_config & (1 << k))
+ continue;
ring = &adev->uvd.inst[k].ring;
/* disable clock gating */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
@@ -1090,6 +1116,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
uint8_t i = 0;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
/* force RBC into idle state */
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
@@ -1227,6 +1255,34 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
}
/**
+ * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
+ *
+ * @p: the CS parser with the IBs
+ * @ib_idx: which IB to patch
+ *
+ */
+static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+ uint32_t ib_idx)
+{
+ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ unsigned i;
+
+ /* No patching necessary for the first instance */
+ if (!p->ring->me)
+ return 0;
+
+ for (i = 0; i < ib->length_dw; i += 2) {
+ uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
+
+ reg -= p->adev->reg_offset[UVD_HWIP][0][1];
+ reg += p->adev->reg_offset[UVD_HWIP][1][1];
+
+ amdgpu_set_ib_value(p, ib_idx, i, reg);
+ }
+ return 0;
+}
+
+/**
* uvd_v7_0_ring_emit_ib - execute indirect buffer
*
* @ring: amdgpu_ring pointer
@@ -1718,6 +1774,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.get_rptr = uvd_v7_0_ring_get_rptr,
.get_wptr = uvd_v7_0_ring_get_wptr,
.set_wptr = uvd_v7_0_ring_set_wptr,
+ .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
.emit_frame_size =
6 + /* hdp invalidate */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
@@ -1777,6 +1834,8 @@ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
adev->uvd.inst[i].ring.me = i;
DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
@@ -1788,6 +1847,8 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
int i, j;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
adev->uvd.inst[j].ring_enc[i].me = j;
@@ -1807,6 +1868,8 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 47f70827195b..7eaa54ba016b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -56,7 +56,7 @@ static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(mmVCE_RB_RPTR);
else
return RREG32(mmVCE_RB_RPTR2);
@@ -73,7 +73,7 @@ static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(mmVCE_RB_WPTR);
else
return RREG32(mmVCE_RB_WPTR2);
@@ -90,7 +90,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
else
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
@@ -439,6 +439,8 @@ static int vce_v2_0_sw_init(void *handle)
return r;
}
+ r = amdgpu_vce_entity_init(adev);
+
return r;
}
@@ -627,8 +629,10 @@ static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
+ adev->vce.ring[i].me = i;
+ }
}
static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 0999c843f623..c8390f9adfd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -39,6 +39,7 @@
#include "smu/smu_7_1_2_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
@@ -86,9 +87,9 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
v = RREG32(mmVCE_RB_RPTR);
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
v = RREG32(mmVCE_RB_RPTR2);
else
v = RREG32(mmVCE_RB_RPTR3);
@@ -118,9 +119,9 @@ static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
v = RREG32(mmVCE_RB_WPTR);
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
v = RREG32(mmVCE_RB_WPTR2);
else
v = RREG32(mmVCE_RB_WPTR3);
@@ -149,9 +150,9 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
else
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
@@ -422,7 +423,7 @@ static int vce_v3_0_sw_init(void *handle)
int r, i;
/* VCE */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
if (r)
return r;
@@ -447,6 +448,8 @@ static int vce_v3_0_sw_init(void *handle)
return r;
}
+ r = amdgpu_vce_entity_init(adev);
+
return r;
}
@@ -900,7 +903,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
.emit_frame_size =
4 + /* vce_v3_0_emit_pipeline_sync */
6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
- .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -924,7 +927,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
6 + /* vce_v3_0_emit_vm_flush */
4 + /* vce_v3_0_emit_pipeline_sync */
6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
- .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
+ .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
.emit_ib = vce_v3_0_ring_emit_ib,
.emit_vm_flush = vce_v3_0_emit_vm_flush,
.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
@@ -942,12 +945,16 @@ static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
int i;
if (adev->asic_type >= CHIP_STONEY) {
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
+ adev->vce.ring[i].me = i;
+ }
DRM_INFO("VCE enabled in VM mode\n");
} else {
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
+ adev->vce.ring[i].me = i;
+ }
DRM_INFO("VCE enabled in physical mode\n");
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 8fd1b742985a..2e4d1b5f6243 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -39,6 +39,8 @@
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
+#include "ivsrcid/vce/irqsrcs_vce_4_0.h"
+
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V4_0_FW_SIZE (384 * 1024)
@@ -60,9 +62,9 @@ static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
else
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
@@ -82,9 +84,9 @@ static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
else
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
@@ -108,10 +110,10 @@ static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
return;
}
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
lower_32_bits(ring->wptr));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
lower_32_bits(ring->wptr));
else
@@ -417,6 +419,7 @@ static int vce_v4_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
+
unsigned size;
int r, i;
@@ -436,7 +439,7 @@ static int vce_v4_0_sw_init(void *handle)
const struct common_firmware_header *hdr;
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
- adev->vce.saved_bo = kmalloc(size, GFP_KERNEL);
+ adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->vce.saved_bo)
return -ENOMEM;
@@ -472,6 +475,11 @@ static int vce_v4_0_sw_init(void *handle)
return r;
}
+
+ r = amdgpu_vce_entity_init(adev);
+ if (r)
+ return r;
+
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
@@ -488,7 +496,7 @@ static int vce_v4_0_sw_fini(void *handle)
amdgpu_virt_free_mm_table(adev);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- kfree(adev->vce.saved_bo);
+ kvfree(adev->vce.saved_bo);
adev->vce.saved_bo = NULL;
}
@@ -1088,8 +1096,10 @@ static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
+ adev->vce.ring[i].me = i;
+ }
DRM_INFO("VCE enabled in VM mode\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 29684c3ea4ef..072371ef5975 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -35,10 +35,14 @@
#include "mmhub/mmhub_9_1_offset.h"
#include "mmhub/mmhub_9_1_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
+
static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
/**
* vcn_v1_0_early_init - set function pointers
@@ -55,6 +59,7 @@ static int vcn_v1_0_early_init(void *handle)
vcn_v1_0_set_dec_ring_funcs(adev);
vcn_v1_0_set_enc_ring_funcs(adev);
+ vcn_v1_0_set_jpeg_ring_funcs(adev);
vcn_v1_0_set_irq_funcs(adev);
return 0;
@@ -74,22 +79,37 @@ static int vcn_v1_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCN DEC TRAP */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
if (r)
return r;
/* VCN ENC TRAP */
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
&adev->vcn.irq);
if (r)
return r;
}
+ /* VCN JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
+ if (r)
+ return r;
+
r = amdgpu_vcn_sw_init(adev);
if (r)
return r;
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ const struct common_firmware_header *hdr;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+ DRM_INFO("PSP loading VCN firmware\n");
+ }
+
r = amdgpu_vcn_resume(adev);
if (r)
return r;
@@ -108,6 +128,12 @@ static int vcn_v1_0_sw_init(void *handle)
return r;
}
+ ring = &adev->vcn.ring_jpeg;
+ sprintf(ring->name, "vcn_jpeg");
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+ if (r)
+ return r;
+
return r;
}
@@ -162,6 +188,14 @@ static int vcn_v1_0_hw_init(void *handle)
}
}
+ ring = &adev->vcn.ring_jpeg;
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ goto done;
+ }
+
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully.\n");
@@ -241,26 +275,38 @@ static int vcn_v1_0_resume(void *handle)
static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
{
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
-
- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ uint32_t offset;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ offset = size;
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + size));
+ lower_32_bits(adev->vcn.gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + size));
+ upper_32_bits(adev->vcn.gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
+ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
+ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
@@ -578,12 +624,12 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
/* disable byte swapping */
lmi_swap_cntl = 0;
- vcn_v1_0_mc_resume(adev);
-
vcn_1_0_disable_static_power_gating(adev);
/* disable clock gating */
vcn_v1_0_disable_clock_gating(adev);
+ vcn_v1_0_mc_resume(adev);
+
/* disable interupt */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
~UVD_MASTINT_EN__VCPU_EN_MASK);
@@ -729,6 +775,22 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ ring = &adev->vcn.ring_jpeg;
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+
+ /* initialize wptr */
+ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+
+ /* copy patch commands to the jpeg ring */
+ vcn_v1_0_jpeg_ring_set_patch_ring(ring,
+ (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+
return 0;
}
@@ -1126,6 +1188,383 @@ static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
+
+/**
+ * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x80010000);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x00010000);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x8);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0xffffffff);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x3fbc);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x1);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer.
+ */
+static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+ unsigned vmid, bool ctx_switch)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, ib->length_dw);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
+ amdgpu_ring_write(ring, 0x2);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val,
+ uint32_t mask)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, val);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE3));
+ }
+ amdgpu_ring_write(ring, mask);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ uint32_t data0, data1, mask;
+
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+ /* wait for register write */
+ data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
+ data1 = lower_32_bits(pd_addr);
+ mask = 0xffffffff;
+ vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ }
+ amdgpu_ring_write(ring, val);
+}
+
+static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+
+ WARN_ON(ring->wptr % 2 || count % 2);
+
+ for (i = 0; i < count / 2; i++) {
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
+static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
+{
+ struct amdgpu_device *adev = ring->adev;
+ ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ ring->ring[(*ptr)++] = 0;
+ ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
+ } else {
+ ring->ring[(*ptr)++] = reg_offset;
+ ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
+ }
+ ring->ring[(*ptr)++] = val;
+}
+
+static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ uint32_t reg, reg_offset, val, mask, i;
+
+ // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
+ reg_offset = (reg << 2);
+ val = lower_32_bits(ring->gpu_addr);
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
+ reg_offset = (reg << 2);
+ val = upper_32_bits(ring->gpu_addr);
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 3rd to 5th: issue MEM_READ commands
+ for (i = 0; i <= 2; i++) {
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
+ ring->ring[ptr++] = 0;
+ }
+
+ // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x13;
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 7th: program mmUVD_JRBC_RB_REF_DATA
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
+ reg_offset = (reg << 2);
+ val = 0x1;
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x1;
+ mask = 0x1;
+
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
+ ring->ring[ptr++] = 0x01400200;
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
+ ring->ring[ptr++] = val;
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ ring->ring[ptr++] = 0;
+ ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
+ } else {
+ ring->ring[ptr++] = reg_offset;
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
+ }
+ ring->ring[ptr++] = mask;
+
+ //9th to 21st: insert no-op
+ for (i = 0; i <= 12; i++) {
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+ ring->ring[ptr++] = 0;
+ }
+
+ //22nd: reset mmUVD_JRBC_RB_RPTR
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
+ reg_offset = (reg << 2);
+ val = 0;
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x12;
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+}
+
static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -1150,6 +1589,9 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
case 120:
amdgpu_fence_process(&adev->vcn.ring_enc[1]);
break;
+ case 126:
+ amdgpu_fence_process(&adev->vcn.ring_jpeg);
+ break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -1273,6 +1715,39 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .nop = PACKET0(0x81ff, 0),
+ .support_64bit_ptrs = false,
+ .vmhub = AMDGPU_MMHUB,
+ .extra_dw = 64,
+ .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
+ .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
+ .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
+ .emit_frame_size =
+ 6 + 6 + /* hdp invalidate / flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
+ 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
+ 6,
+ .emit_ib_size = 22, /* vcn_v1_0_dec_ring_emit_ib */
+ .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
+ .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
+ .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
+ .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
+ .insert_nop = vcn_v1_0_jpeg_ring_nop,
+ .insert_start = vcn_v1_0_jpeg_ring_insert_start,
+ .insert_end = vcn_v1_0_jpeg_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
+ .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
+};
+
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
@@ -1289,6 +1764,12 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
DRM_INFO("VCN encode is enabled in VM mode\n");
}
+static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
+ DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
+}
+
static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
.set = vcn_v1_0_set_interrupt_state,
.process = vcn_v1_0_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index 45aafca7f315..c5c9b2bc190d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -51,6 +51,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 52778de93ab0..2d4473557b0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -38,6 +38,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
@@ -46,6 +47,8 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 4ac1288ab7df..88b57a5e9489 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -112,8 +112,8 @@ static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_11, (reg));
- r = RREG32(mmSMC_IND_DATA_11);
+ WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
+ r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
@@ -1363,11 +1363,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
- pp_support_state = AMD_CG_SUPPORT_MC_LS;
+ pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
- pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
+ pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
@@ -1382,11 +1382,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
- pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
+ pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
- pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
+ pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
@@ -1401,11 +1401,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
- pp_support_state = AMD_CG_SUPPORT_HDP_LS;
+ pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
- pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
+ pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 49df6c791cfc..5d2475d5392c 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -25,12 +25,39 @@
#include "cik_int.h"
static bool cik_event_interrupt_isr(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry)
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
{
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
+ const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
unsigned int vmid, pasid;
+ /* This workaround is due to HW/FW limitation on Hawaii that
+ * VMID and PASID are not written into ih_ring_entry
+ */
+ if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
+ dev->device_info->asic_family == CHIP_HAWAII) {
+ struct cik_ih_ring_entry *tmp_ihre =
+ (struct cik_ih_ring_entry *)patched_ihre;
+
+ *patched_flag = true;
+ *tmp_ihre = *ihre;
+
+ vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
+ pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+
+ tmp_ihre->ring_id &= 0x000000ff;
+ tmp_ihre->ring_id |= vmid << 8;
+ tmp_ihre->ring_id |= pasid << 16;
+
+ return (pasid != 0) &&
+ vmid >= dev->vm_info.first_vmid_kfd &&
+ vmid <= dev->vm_info.last_vmid_kfd;
+ }
+
/* Only handle interrupts from KFD VMIDs */
vmid = (ihre->ring_id & 0x0000ff00) >> 8;
if (vmid < dev->vm_info.first_vmid_kfd ||
@@ -48,18 +75,19 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
return ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
- ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE;
+ ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
+ ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT;
}
static void cik_event_interrupt_wq(struct kfd_dev *dev,
const uint32_t *ih_ring_entry)
{
- unsigned int pasid;
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
uint32_t context_id = ihre->data & 0xfffffff;
-
- pasid = (ihre->ring_id & 0xffff0000) >> 16;
+ unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
+ unsigned int pasid = (ihre->ring_id & 0xffff0000) >> 16;
if (pasid == 0)
return;
@@ -72,6 +100,22 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
kfd_signal_event_interrupt(pasid, context_id & 0xff, 8);
else if (ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE)
kfd_signal_hw_exception_event(pasid);
+ else if (ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
+ struct kfd_vm_fault_info info;
+
+ kfd_process_vm_fault(dev->dqm, pasid);
+
+ memset(&info, 0, sizeof(info));
+ dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+ if (!info.page_addr && !info.status)
+ return;
+
+ if (info.vmid == vmid)
+ kfd_signal_vm_fault_event(dev, pasid, &info);
+ else
+ kfd_signal_vm_fault_event(dev, pasid, NULL);
+ }
}
const struct kfd_event_interrupt_class event_interrupt_class_cik = {
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_int.h b/drivers/gpu/drm/amd/amdkfd/cik_int.h
index 109298b9d507..76f8677a7926 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_int.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_int.h
@@ -20,8 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef HSA_RADEON_CIK_INT_H_INCLUDED
-#define HSA_RADEON_CIK_INT_H_INCLUDED
+#ifndef CIK_INT_H_INCLUDED
+#define CIK_INT_H_INCLUDED
#include <linux/types.h>
@@ -34,9 +34,10 @@ struct cik_ih_ring_entry {
#define CIK_INTSRC_CP_END_OF_PIPE 0xB5
#define CIK_INTSRC_CP_BAD_OPCODE 0xB7
-#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
#define CIK_INTSRC_SDMA_TRAP 0xE0
#define CIK_INTSRC_SQ_INTERRUPT_MSG 0xEF
+#define CIK_INTSRC_GFX_PAGE_INV_FAULT 0x92
+#define CIK_INTSRC_GFX_MEM_PROT_FAULT 0x93
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index f68aef02fc1f..3621efbd5759 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -21,18 +21,21 @@
*/
static const uint32_t cwsr_trap_gfx8_hex[] = {
- 0xbf820001, 0xbf820125,
+ 0xbf820001, 0xbf82012b,
0xb8f4f802, 0x89748674,
0xb8f5f803, 0x8675ff75,
- 0x00000400, 0xbf850011,
+ 0x00000400, 0xbf850017,
0xc00a1e37, 0x00000000,
0xbf8c007f, 0x87777978,
- 0xbf840002, 0xb974f802,
- 0xbe801d78, 0xb8f5f803,
- 0x8675ff75, 0x000001ff,
- 0xbf850002, 0x80708470,
- 0x82718071, 0x8671ff71,
- 0x0000ffff, 0xb974f802,
+ 0xbf840005, 0x8f728374,
+ 0xb972e0c2, 0xbf800002,
+ 0xb9740002, 0xbe801d78,
+ 0xb8f5f803, 0x8675ff75,
+ 0x000001ff, 0xbf850002,
+ 0x80708470, 0x82718071,
+ 0x8671ff71, 0x0000ffff,
+ 0x8f728374, 0xb972e0c2,
+ 0xbf800002, 0xb9740002,
0xbe801f70, 0xb8f5f803,
0x8675ff75, 0x00000100,
0xbf840006, 0xbefa0080,
@@ -168,7 +171,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x807c847c, 0x806eff6e,
0x00000400, 0xbf0a757c,
0xbf85ffef, 0xbf9c0000,
- 0xbf8200ca, 0xbef8007e,
+ 0xbf8200cd, 0xbef8007e,
0x8679ff7f, 0x0000ffff,
0x8779ff79, 0x00040000,
0xbefa0080, 0xbefb00ff,
@@ -268,16 +271,18 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x8f739773, 0xb976f807,
0x8671ff71, 0x0000ffff,
0x86fe7e7e, 0x86ea6a6a,
- 0xb974f802, 0xbf8a0000,
- 0x95807370, 0xbf810000,
+ 0x8f768374, 0xb976e0c2,
+ 0xbf800002, 0xb9740002,
+ 0xbf8a0000, 0x95807370,
+ 0xbf810000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf82015a,
+ 0xbf820001, 0xbf82015d,
0xb8f8f802, 0x89788678,
0xb8f1f803, 0x866eff71,
- 0x00000400, 0xbf850034,
+ 0x00000400, 0xbf850037,
0x866eff71, 0x00000800,
0xbf850003, 0x866eff71,
0x00000100, 0xbf840008,
@@ -303,258 +308,261 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x8f6e8b77, 0x866eff6e,
0x001f8000, 0xb96ef807,
0x86fe7e7e, 0x86ea6a6a,
- 0xb978f802, 0xbe801f6c,
- 0x866dff6d, 0x0000ffff,
- 0xbef00080, 0xb9700283,
- 0xb8f02407, 0x8e709c70,
- 0x876d706d, 0xb8f003c7,
- 0x8e709b70, 0x876d706d,
- 0xb8f0f807, 0x8670ff70,
- 0x00007fff, 0xb970f807,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x87708478, 0xb970f802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8f11605,
- 0x80718171, 0x8e718671,
- 0x80707170, 0x80707e70,
- 0x8271807f, 0x8671ff71,
- 0x0000ffff, 0xc0471cb8,
- 0x00000040, 0xbf8cc07f,
- 0xc04b1d38, 0x00000048,
- 0xbf8cc07f, 0xc0431e78,
- 0x00000058, 0xbf8cc07f,
- 0xc0471eb8, 0x0000005c,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x8670ff7f,
- 0x08000000, 0x8f708370,
- 0x87777077, 0x8670ff7f,
- 0x70000000, 0x8f708170,
- 0x87777077, 0xbefb007c,
- 0xbefa0080, 0xb8fa2a05,
- 0x807a817a, 0x8e7a8a7a,
- 0xb8f01605, 0x80708170,
- 0x8e708670, 0x807a707a,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xbefe007c,
- 0xbefc007a, 0xc0611efa,
- 0x0000007c, 0xbf8cc07f,
- 0x807a847a, 0xbefc007e,
+ 0x8f6e8378, 0xb96ee0c2,
+ 0xbf800002, 0xb9780002,
+ 0xbe801f6c, 0x866dff6d,
+ 0x0000ffff, 0xbef00080,
+ 0xb9700283, 0xb8f02407,
+ 0x8e709c70, 0x876d706d,
+ 0xb8f003c7, 0x8e709b70,
+ 0x876d706d, 0xb8f0f807,
+ 0x8670ff70, 0x00007fff,
+ 0xb970f807, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x87708478,
+ 0xb970f802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8f11605, 0x80718171,
+ 0x8e718671, 0x80707170,
+ 0x80707e70, 0x8271807f,
+ 0x8671ff71, 0x0000ffff,
+ 0xc0471cb8, 0x00000040,
+ 0xbf8cc07f, 0xc04b1d38,
+ 0x00000048, 0xbf8cc07f,
+ 0xc0431e78, 0x00000058,
+ 0xbf8cc07f, 0xc0471eb8,
+ 0x0000005c, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x8670ff7f, 0x08000000,
+ 0x8f708370, 0x87777077,
+ 0x8670ff7f, 0x70000000,
+ 0x8f708170, 0x87777077,
+ 0xbefb007c, 0xbefa0080,
+ 0xb8fa2a05, 0x807a817a,
+ 0x8e7a8a7a, 0xb8f01605,
+ 0x80708170, 0x8e708670,
+ 0x807a707a, 0xbef60084,
+ 0xbef600ff, 0x01000000,
0xbefe007c, 0xbefc007a,
- 0xc0611b3a, 0x0000007c,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x807a847a,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611b7a,
+ 0xbefc007a, 0xc0611b3a,
0x0000007c, 0xbf8cc07f,
0x807a847a, 0xbefc007e,
0xbefe007c, 0xbefc007a,
- 0xc0611bba, 0x0000007c,
+ 0xc0611b7a, 0x0000007c,
0xbf8cc07f, 0x807a847a,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611bfa,
+ 0xbefc007a, 0xc0611bba,
0x0000007c, 0xbf8cc07f,
0x807a847a, 0xbefc007e,
0xbefe007c, 0xbefc007a,
- 0xc0611e3a, 0x0000007c,
- 0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0xb8f1f803,
- 0xbefe007c, 0xbefc007a,
- 0xc0611c7a, 0x0000007c,
+ 0xc0611bfa, 0x0000007c,
0xbf8cc07f, 0x807a847a,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611a3a,
+ 0xbefc007a, 0xc0611e3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xb8f1f803, 0xbefe007c,
+ 0xbefc007a, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x807a847a, 0xbefc007e,
0xbefe007c, 0xbefc007a,
- 0xc0611a7a, 0x0000007c,
- 0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0xb8fbf801,
- 0xbefe007c, 0xbefc007a,
- 0xc0611efa, 0x0000007c,
+ 0xc0611a3a, 0x0000007c,
0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0x8670ff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f70, 0xb8fa2a05,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc007a, 0xc0611a7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xb8fbf801, 0xbefe007c,
+ 0xbefc007a, 0xc0611efa,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0x8670ff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f70,
+ 0xb8fa2a05, 0x807a817a,
+ 0x8e7a8a7a, 0xb8f11605,
+ 0x80718171, 0x8e718471,
+ 0x8e768271, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747a74, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a717c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbefa0080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0xe0724000,
+ 0x7a1d0000, 0xe0724100,
+ 0x7a1d0100, 0xe0724200,
+ 0x7a1d0200, 0xe0724300,
+ 0x7a1d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8f14306,
+ 0x8671c171, 0xbf84002c,
+ 0xbf8a0000, 0x8670ff6f,
+ 0x04000000, 0xbf840028,
+ 0x8e718671, 0x8e718271,
+ 0xbef60071, 0xb8fa2a05,
0x807a817a, 0x8e7a8a7a,
- 0xb8f11605, 0x80718171,
- 0x8e718471, 0x8e768271,
+ 0xb8f01605, 0x80708170,
+ 0x8e708670, 0x807a707a,
+ 0x807aff7a, 0x00000080,
0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747a74,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a717c, 0xbf85ffe7,
- 0xbef40172, 0xbefa0080,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x7a1d0002, 0x68040702,
+ 0xd0c9006a, 0x0000e302,
+ 0xbf87fff7, 0xbef70000,
+ 0xbefa00ff, 0x00000400,
0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
+ 0xb8f12a05, 0x80718171,
+ 0x8e718271, 0x8e768871,
0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a717c,
+ 0xbf840015, 0xbf11017c,
+ 0x8071ff71, 0x00001000,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0xe0724000, 0x7a1d0000,
0xe0724100, 0x7a1d0100,
0xe0724200, 0x7a1d0200,
0xe0724300, 0x7a1d0300,
+ 0x807c847c, 0x807aff7a,
+ 0x00000400, 0xbf0a717c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbf8200dc, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x08000000, 0x8f6e836e,
+ 0x87776e77, 0x866eff7f,
+ 0x70000000, 0x8f6e816e,
+ 0x87776e77, 0x866eff7f,
+ 0x04000000, 0xbf84001e,
0xbefe00c1, 0xbeff00c1,
- 0xb8f14306, 0x8671c171,
- 0xbf84002c, 0xbf8a0000,
- 0x8670ff6f, 0x04000000,
- 0xbf840028, 0x8e718671,
- 0x8e718271, 0xbef60071,
- 0xb8fa2a05, 0x807a817a,
- 0x8e7a8a7a, 0xb8f01605,
- 0x80708170, 0x8e708670,
- 0x807a707a, 0x807aff7a,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf840019, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
0x00000080, 0xbef600ff,
0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
- 0xd1060002, 0x00011103,
- 0x7e0602ff, 0x00000200,
- 0xbefc00ff, 0x00010000,
- 0xbe800077, 0x8677ff77,
- 0xff7fffff, 0x8777ff77,
- 0x00058000, 0xd8ec0000,
- 0x00000002, 0xbf8cc07f,
- 0xe0765000, 0x7a1d0002,
- 0x68040702, 0xd0c9006a,
- 0x0000e302, 0xbf87fff7,
- 0xbef70000, 0xbefa00ff,
- 0x00000400, 0xbefe00c1,
- 0xbeff00c1, 0xb8f12a05,
- 0x80718171, 0x8e718271,
- 0x8e768871, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a717c, 0xbf840015,
- 0xbf11017c, 0x8071ff71,
- 0x00001000, 0x7e000300,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
+ 0xbef80080, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef2a05,
+ 0x806f816f, 0x8e6f826f,
+ 0x8e76886f, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0x806fff6f, 0x00008000,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x7a1d0000, 0xe0724100,
- 0x7a1d0100, 0xe0724200,
- 0x7a1d0200, 0xe0724300,
- 0x7a1d0300, 0x807c847c,
- 0x807aff7a, 0x00000400,
- 0xbf0a717c, 0xbf85ffef,
- 0xbf9c0000, 0xbf8200d9,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x08000000,
- 0x8f6e836e, 0x87776e77,
- 0x866eff7f, 0x70000000,
- 0x8f6e816e, 0x87776e77,
- 0x866eff7f, 0x04000000,
- 0xbf84001e, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf840019,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82a05,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xb8f82a05,
0x80788178, 0x8e788a78,
0xb8ee1605, 0x806e816e,
0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbef80080,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8ef2a05, 0x806f816f,
- 0x8e6f826f, 0x8e76886f,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
- 0xbf11087c, 0x806fff6f,
- 0x00008000, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
0xb8f82a05, 0x80788178,
0x8e788a78, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
+ 0x80786e78, 0xbef60084,
0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xc0211bfa,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
0x00000078, 0x80788478,
- 0xc0211b3a, 0x00000078,
- 0x80788478, 0xc0211b7a,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
0x00000078, 0x80788478,
- 0xc0211eba, 0x00000078,
- 0x80788478, 0xc0211efa,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211c3a,
0x00000078, 0x80788478,
- 0xc0211c3a, 0x00000078,
- 0x80788478, 0xc0211c7a,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211a3a,
0x00000078, 0x80788478,
- 0xc0211a3a, 0x00000078,
- 0x80788478, 0xc0211a7a,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
0x00000078, 0x80788478,
- 0xc0211cfa, 0x00000078,
- 0x80788478, 0xbf8cc07f,
- 0xbefc006f, 0xbefe007a,
- 0xbeff007b, 0x866f71ff,
- 0x000003ff, 0xb96f4803,
- 0x866f71ff, 0xfffff800,
- 0x8f6f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee2a05,
- 0x806e816e, 0x8e6e8a6e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc0071cb7, 0x00000040,
- 0xc00b1d37, 0x00000048,
- 0xc0031e77, 0x00000058,
- 0xc0071eb7, 0x0000005c,
- 0xbf8cc07f, 0x866fff6d,
- 0xf0000000, 0x8f6f9c6f,
- 0x8e6f906f, 0xbeee0080,
- 0x876e6f6e, 0x866fff6d,
- 0x08000000, 0x8f6f9b6f,
- 0x8e6f8f6f, 0x876e6f6e,
- 0x866fff70, 0x00800000,
- 0x8f6f976f, 0xb96ef807,
- 0x866dff6d, 0x0000ffff,
- 0x86fe7e7e, 0x86ea6a6a,
- 0xb970f802, 0xbf8a0000,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe007a, 0xbeff007b,
+ 0x866f71ff, 0x000003ff,
+ 0xb96f4803, 0x866f71ff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2a05, 0x806e816e,
+ 0x8e6e8a6e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc0071cb7,
+ 0x00000040, 0xc00b1d37,
+ 0x00000048, 0xc0031e77,
+ 0x00000058, 0xc0071eb7,
+ 0x0000005c, 0xbf8cc07f,
+ 0x866fff6d, 0xf0000000,
+ 0x8f6f9c6f, 0x8e6f906f,
+ 0xbeee0080, 0x876e6f6e,
+ 0x866fff6d, 0x08000000,
+ 0x8f6f9b6f, 0x8e6f8f6f,
+ 0x876e6f6e, 0x866fff70,
+ 0x00800000, 0x8f6f976f,
+ 0xb96ef807, 0x866dff6d,
+ 0x0000ffff, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8370,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9700002, 0xbf8a0000,
0x95806f6c, 0xbf810000,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
index a2a04bb64096..abe1a5da29fb 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -251,7 +255,7 @@ if (!EMU_RUN_HACK)
s_waitcnt lgkmcnt(0)
s_or_b32 ttmp7, ttmp8, ttmp9
s_cbranch_scc0 L_NO_NEXT_TRAP //next level trap handler not been set
- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
s_setpc_b64 [ttmp8,ttmp9] //jump to next level trap handler
L_NO_NEXT_TRAP:
@@ -262,7 +266,7 @@ L_NO_NEXT_TRAP:
s_addc_u32 ttmp1, ttmp1, 0
L_EXCP_CASE:
s_and_b32 ttmp1, ttmp1, 0xFFFF
- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
s_rfe_b64 [ttmp0, ttmp1]
end
// ********* End handling of non-CWSR traps *******************
@@ -1053,7 +1057,7 @@ end
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+ set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
@@ -1134,3 +1138,11 @@ end
function get_hwreg_size_bytes
return 128 //HWREG size 128 bytes
end
+
+function set_status_without_spi_prio(status, tmp)
+ // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
+ s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
+ s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 998be96be736..0bb9c577b3a2 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -317,7 +321,7 @@ L_EXCP_CASE:
// Restore SQ_WAVE_STATUS.
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status
+ set_status_without_spi_prio(s_save_status, ttmp2)
s_rfe_b64 [ttmp0, ttmp1]
end
@@ -1120,7 +1124,7 @@ end
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+ set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
@@ -1212,3 +1216,11 @@ function ack_sqc_store_workaround
s_waitcnt lgkmcnt(0)
end
end
+
+function set_status_without_spi_prio(status, tmp)
+ // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
+ s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
+ s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index f64c5551cdba..297b36c26a05 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -122,6 +122,9 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
+ if (kfd_is_locked())
+ return -EAGAIN;
+
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
@@ -389,6 +392,61 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
return retval;
}
+static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
+ void *data)
+{
+ int retval;
+ const int max_num_cus = 1024;
+ struct kfd_ioctl_set_cu_mask_args *args = data;
+ struct queue_properties properties;
+ uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
+ size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
+
+ if ((args->num_cu_mask % 32) != 0) {
+ pr_debug("num_cu_mask 0x%x must be a multiple of 32",
+ args->num_cu_mask);
+ return -EINVAL;
+ }
+
+ properties.cu_mask_count = args->num_cu_mask;
+ if (properties.cu_mask_count == 0) {
+ pr_debug("CU mask cannot be 0");
+ return -EINVAL;
+ }
+
+ /* To prevent an unreasonably large CU mask size, set an arbitrary
+ * limit of max_num_cus bits. We can then just drop any CU mask bits
+ * past max_num_cus bits and just use the first max_num_cus bits.
+ */
+ if (properties.cu_mask_count > max_num_cus) {
+ pr_debug("CU mask cannot be greater than 1024 bits");
+ properties.cu_mask_count = max_num_cus;
+ cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
+ }
+
+ properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL);
+ if (!properties.cu_mask)
+ return -ENOMEM;
+
+ retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size);
+ if (retval) {
+ pr_debug("Could not copy CU mask from userspace");
+ kfree(properties.cu_mask);
+ return -EFAULT;
+ }
+
+ mutex_lock(&p->mutex);
+
+ retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
+
+ mutex_unlock(&p->mutex);
+
+ if (retval)
+ kfree(properties.cu_mask);
+
+ return retval;
+}
+
static int kfd_ioctl_set_memory_policy(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -754,7 +812,6 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
{
struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_dev *dev;
- struct timespec64 time;
dev = kfd_device_by_id(args->gpu_id);
if (dev)
@@ -766,11 +823,8 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
args->gpu_clock_counter = 0;
/* No access to rdtsc. Using raw monotonic time */
- getrawmonotonic64(&time);
- args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time);
-
- get_monotonic_boottime64(&time);
- args->system_clock_counter = (uint64_t)timespec64_to_ns(&time);
+ args->cpu_clock_counter = ktime_get_raw_ns();
+ args->system_clock_counter = ktime_get_boot_ns();
/* Since the counter is in nano-seconds we use 1GHz frequency */
args->system_clock_freq = 1000000000;
@@ -1558,6 +1612,9 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
kfd_ioctl_unmap_memory_from_gpu, 0),
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
+ kfd_ioctl_set_cu_mask, 0),
+
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 296b3f230280..ee4996029a86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -189,6 +189,21 @@ static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
return 0;
}
+static struct kfd_mem_properties *
+find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
+ struct kfd_topology_device *dev)
+{
+ struct kfd_mem_properties *props;
+
+ list_for_each_entry(props, &dev->mem_props, list) {
+ if (props->heap_type == heap_type
+ && props->flags == flags
+ && props->width == width)
+ return props;
+ }
+
+ return NULL;
+}
/* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
* topology device present in the device_list
*/
@@ -197,36 +212,56 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
{
struct kfd_mem_properties *props;
struct kfd_topology_device *dev;
+ uint32_t heap_type;
+ uint64_t size_in_bytes;
+ uint32_t flags = 0;
+ uint32_t width;
pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
mem->proximity_domain);
list_for_each_entry(dev, device_list, list) {
if (mem->proximity_domain == dev->proximity_domain) {
- props = kfd_alloc_struct(props);
- if (!props)
- return -ENOMEM;
-
/* We're on GPU node */
if (dev->node_props.cpu_cores_count == 0) {
/* APU */
if (mem->visibility_type == 0)
- props->heap_type =
+ heap_type =
HSA_MEM_HEAP_TYPE_FB_PRIVATE;
/* dGPU */
else
- props->heap_type = mem->visibility_type;
+ heap_type = mem->visibility_type;
} else
- props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
+ heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
- props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
+ flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
- props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
+ flags |= HSA_MEM_FLAGS_NON_VOLATILE;
- props->size_in_bytes =
+ size_in_bytes =
((uint64_t)mem->length_high << 32) +
mem->length_low;
- props->width = mem->width;
+ width = mem->width;
+
+ /* Multiple banks of the same type are aggregated into
+ * one. User mode doesn't care about multiple physical
+ * memory segments. It's managed as a single virtual
+ * heap for user mode.
+ */
+ props = find_subtype_mem(heap_type, flags, width, dev);
+ if (props) {
+ props->size_in_bytes += size_in_bytes;
+ break;
+ }
+
+ props = kfd_alloc_struct(props);
+ if (!props)
+ return -ENOMEM;
+
+ props->heap_type = heap_type;
+ props->flags = flags;
+ props->size_in_bytes = size_in_bytes;
+ props->width = width;
dev->node_props.mem_banks_count++;
list_add_tail(&props->list, &dev->mem_props);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index afb26f205d29..a3441b0e385b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -38,7 +38,6 @@
#include "kfd_dbgmgr.h"
#include "kfd_dbgdev.h"
#include "kfd_device_queue_manager.h"
-#include "../../radeon/cik_reg.h"
static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
index 03424c20920c..0619c777b47e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
@@ -60,6 +60,9 @@ enum {
SH_REG_SIZE = SH_REG_END - SH_REG_BASE
};
+/* SQ_CMD definitions */
+#define SQ_CMD 0x8DEC
+
enum SQ_IND_CMD_CMD {
SQ_IND_CMD_CMD_NULL = 0x00000000,
SQ_IND_CMD_CMD_HALT = 0x00000001,
@@ -190,4 +193,38 @@ union ULARGE_INTEGER {
void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
enum DBGDEV_TYPE type);
+union TCP_WATCH_CNTL_BITS {
+ struct {
+ uint32_t mask:24;
+ uint32_t vmid:4;
+ uint32_t atc:1;
+ uint32_t mode:2;
+ uint32_t valid:1;
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+enum {
+ ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+ ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+ ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+ /* extend the mask to 26 bits in order to match the low address field */
+ ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+ ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+};
+
+enum {
+ MAX_TRAPID = 8, /* 3 bits in the bitfield. */
+ MAX_WATCH_ADDRESSES = 4
+};
+
+enum {
+ ADDRESS_WATCH_REG_ADDR_HI = 0,
+ ADDRESS_WATCH_REG_ADDR_LO,
+ ADDRESS_WATCH_REG_CNTL,
+ ADDRESS_WATCH_REG_MAX
+};
+
#endif /* KFD_DBGDEV_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 4bd6ebfaf425..ab37d36d9cd6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -21,6 +21,8 @@
*/
#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
#include "kfd_priv.h"
static struct dentry *debugfs_root;
@@ -32,6 +34,38 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
return single_open(file, show, NULL);
}
+static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
+ const char __user *user_buf, size_t size, loff_t *ppos)
+{
+ struct kfd_dev *dev;
+ char tmp[16];
+ uint32_t gpu_id;
+ int ret = -EINVAL;
+
+ memset(tmp, 0, 16);
+ if (size >= 16) {
+ pr_err("Invalid input for gpu id.\n");
+ goto out;
+ }
+ if (copy_from_user(tmp, user_buf, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (kstrtoint(tmp, 10, &gpu_id)) {
+ pr_err("Invalid input for gpu id.\n");
+ goto out;
+ }
+ dev = kfd_device_by_id(gpu_id);
+ if (dev) {
+ kfd_debugfs_hang_hws(dev);
+ ret = size;
+ } else
+ pr_err("Cannot find device %d.\n", gpu_id);
+
+out:
+ return ret;
+}
+
static const struct file_operations kfd_debugfs_fops = {
.owner = THIS_MODULE,
.open = kfd_debugfs_open,
@@ -40,6 +74,15 @@ static const struct file_operations kfd_debugfs_fops = {
.release = single_release,
};
+static const struct file_operations kfd_debugfs_hang_hws_fops = {
+ .owner = THIS_MODULE,
+ .open = kfd_debugfs_open,
+ .read = seq_read,
+ .write = kfd_debugfs_hang_hws_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void kfd_debugfs_init(void)
{
struct dentry *ent;
@@ -65,6 +108,11 @@ void kfd_debugfs_init(void)
ent = debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
kfd_debugfs_rls_by_device,
&kfd_debugfs_fops);
+
+ ent = debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
+ NULL,
+ &kfd_debugfs_hang_hws_fops);
+
if (!ent)
pr_warn("Failed to create rls in kfd debugfs\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 7ee6cec2c060..1b048715ab8a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -30,7 +30,13 @@
#include "kfd_iommu.h"
#define MQD_SIZE_ALIGNED 768
-static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
+
+/*
+ * kfd_locked is used to lock the kfd driver during suspend or reset
+ * once locked, kfd driver will stop any further GPU execution.
+ * create process (open) will return -EAGAIN.
+ */
+static atomic_t kfd_locked = ATOMIC_INIT(0);
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
@@ -46,6 +52,7 @@ static const struct kfd_device_info kaveri_device_info = {
.supports_cwsr = false,
.needs_iommu_device = true,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info carrizo_device_info = {
@@ -61,6 +68,22 @@ static const struct kfd_device_info carrizo_device_info = {
.supports_cwsr = true,
.needs_iommu_device = true,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+};
+
+static const struct kfd_device_info raven_device_info = {
+ .asic_family = CHIP_RAVEN,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = true,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 1,
};
#endif
@@ -77,6 +100,7 @@ static const struct kfd_device_info hawaii_device_info = {
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info tonga_device_info = {
@@ -91,6 +115,7 @@ static const struct kfd_device_info tonga_device_info = {
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info tonga_vf_device_info = {
@@ -105,6 +130,7 @@ static const struct kfd_device_info tonga_vf_device_info = {
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info fiji_device_info = {
@@ -119,6 +145,7 @@ static const struct kfd_device_info fiji_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info fiji_vf_device_info = {
@@ -133,6 +160,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
@@ -148,6 +176,7 @@ static const struct kfd_device_info polaris10_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info polaris10_vf_device_info = {
@@ -162,6 +191,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info polaris11_device_info = {
@@ -176,6 +206,7 @@ static const struct kfd_device_info polaris11_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info vega10_device_info = {
@@ -190,6 +221,7 @@ static const struct kfd_device_info vega10_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info vega10_vf_device_info = {
@@ -204,6 +236,7 @@ static const struct kfd_device_info vega10_vf_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
@@ -241,6 +274,7 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x9875, &carrizo_device_info }, /* Carrizo */
{ 0x9876, &carrizo_device_info }, /* Carrizo */
{ 0x9877, &carrizo_device_info }, /* Carrizo */
+ { 0x15DD, &raven_device_info }, /* Raven */
#endif
{ 0x67A0, &hawaii_device_info }, /* Hawaii */
{ 0x67A1, &hawaii_device_info }, /* Hawaii */
@@ -514,13 +548,54 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfree(kfd);
}
+int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+{
+ if (!kfd->init_complete)
+ return 0;
+ kgd2kfd_suspend(kfd);
+
+ /* hold dqm->lock to prevent further execution*/
+ dqm_lock(kfd->dqm);
+
+ kfd_signal_reset_event(kfd);
+ return 0;
+}
+
+/*
+ * Fix me. KFD won't be able to resume existing process for now.
+ * We will keep all existing process in a evicted state and
+ * wait the process to be terminated.
+ */
+
+int kgd2kfd_post_reset(struct kfd_dev *kfd)
+{
+ int ret, count;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ dqm_unlock(kfd->dqm);
+
+ ret = kfd_resume(kfd);
+ if (ret)
+ return ret;
+ count = atomic_dec_return(&kfd_locked);
+ WARN_ONCE(count != 0, "KFD reset ref. error");
+ return 0;
+}
+
+bool kfd_is_locked(void)
+{
+ return (atomic_read(&kfd_locked) > 0);
+}
+
void kgd2kfd_suspend(struct kfd_dev *kfd)
{
if (!kfd->init_complete)
return;
/* For first KFD device suspend all the KFD processes */
- if (atomic_inc_return(&kfd_device_suspended) == 1)
+ if (atomic_inc_return(&kfd_locked) == 1)
kfd_suspend_all_processes();
kfd->dqm->ops.stop(kfd->dqm);
@@ -539,7 +614,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
if (ret)
return ret;
- count = atomic_dec_return(&kfd_device_suspended);
+ count = atomic_dec_return(&kfd_locked);
WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
if (count == 0)
ret = kfd_resume_all_processes();
@@ -577,14 +652,24 @@ dqm_start_error:
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
+ uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
+ bool is_patched = false;
+
if (!kfd->init_complete)
return;
+ if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
+ dev_err_once(kfd_device, "Ring entry too small\n");
+ return;
+ }
+
spin_lock(&kfd->interrupt_lock);
if (kfd->interrupts_active
- && interrupt_is_wanted(kfd, ih_ring_entry)
- && enqueue_ih_ring_entry(kfd, ih_ring_entry))
+ && interrupt_is_wanted(kfd, ih_ring_entry,
+ patched_ihre, &is_patched)
+ && enqueue_ih_ring_entry(kfd,
+ is_patched ? patched_ihre : ih_ring_entry))
queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock(&kfd->interrupt_lock);
@@ -739,8 +824,8 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
return -ENOMEM;
- *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
- if ((*mem_obj) == NULL)
+ *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
+ if (!(*mem_obj))
return -ENOMEM;
pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
@@ -857,3 +942,26 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
kfree(mem_obj);
return 0;
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+/* This function will send a package to HIQ to hang the HWS
+ * which will trigger a GPU reset and bring the HWS back to normal state
+ */
+int kfd_debugfs_hang_hws(struct kfd_dev *dev)
+{
+ int r = 0;
+
+ if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
+ pr_err("HWS is not enabled");
+ return -EINVAL;
+ }
+
+ r = pm_debugfs_hang_hws(&dev->dqm->packets);
+ if (!r)
+ r = dqm_debugfs_execute_queues(dev->dqm);
+
+ return r;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 668ad07ebe1f..ec0d62a16e53 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -61,6 +61,8 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
unsigned int sdma_queue_id);
+static void kfd_process_hw_exception(struct work_struct *work);
+
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
{
@@ -99,6 +101,17 @@ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
return dqm->dev->shared_resources.num_pipe_per_mec;
}
+static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
+{
+ return dqm->dev->device_info->num_sdma_engines;
+}
+
+unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
+{
+ return dqm->dev->device_info->num_sdma_engines
+ * KFD_SDMA_QUEUES_PER_ENGINE;
+}
+
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
@@ -240,7 +253,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
print_queue(q);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
@@ -297,7 +310,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
dqm->total_queue_count);
out_unlock:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -346,10 +359,10 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (!mqd)
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (!mqd_mgr)
return -ENOMEM;
retval = allocate_hqd(dqm, q);
@@ -360,7 +373,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
if (retval)
goto out_deallocate_hqd;
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out_deallocate_doorbell;
@@ -374,15 +387,15 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
if (!q->properties.is_active)
return 0;
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
- q->process->mm);
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+ &q->properties, q->process->mm);
if (retval)
goto out_uninit_mqd;
return 0;
out_uninit_mqd:
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
out_deallocate_doorbell:
deallocate_doorbell(qpd, q);
out_deallocate_hqd:
@@ -399,11 +412,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd)
+ if (!mqd_mgr)
return -ENOMEM;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
@@ -420,14 +433,14 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
- retval = mqd->destroy_mqd(mqd, q->mqd,
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
q->pipe, q->queue);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
list_del(&q->list);
if (list_empty(&qpd->queues_list)) {
@@ -457,9 +470,9 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
{
int retval;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -467,19 +480,19 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
bool prev_active = false;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
pdd = kfd_get_process_device_data(q->device, q->process);
if (!pdd) {
retval = -ENODEV;
goto out_unlock;
}
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto out_unlock;
}
@@ -506,7 +519,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
} else if (prev_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
- retval = mqd->destroy_mqd(mqd, q->mqd,
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval) {
@@ -515,7 +528,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
}
}
- retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+ retval = mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
/*
* check active state vs. the previous state and modify
@@ -533,44 +546,44 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
else if (q->properties.is_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA))
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
&q->properties, q->process->mm);
out_unlock:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
static struct mqd_manager *get_mqd_manager(
struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
{
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
pr_debug("mqd type %d\n", type);
- mqd = dqm->mqds[type];
- if (!mqd) {
- mqd = mqd_manager_init(type, dqm->dev);
- if (!mqd)
+ mqd_mgr = dqm->mqd_mgrs[type];
+ if (!mqd_mgr) {
+ mqd_mgr = mqd_manager_init(type, dqm->dev);
+ if (!mqd_mgr)
pr_err("mqd manager is NULL");
- dqm->mqds[type] = mqd;
+ dqm->mqd_mgrs[type] = mqd_mgr;
}
- return mqd;
+ return mqd_mgr;
}
static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
int retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (qpd->evicted++ > 0) /* already evicted, do nothing */
goto out;
@@ -582,16 +595,16 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_active)
continue;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) { /* should not be here */
+ if (!mqd_mgr) { /* should not be here */
pr_err("Cannot evict queue, mqd mgr is NULL\n");
retval = -ENOMEM;
goto out;
}
q->properties.is_evicted = true;
q->properties.is_active = false;
- retval = mqd->destroy_mqd(mqd, q->mqd,
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval)
@@ -600,7 +613,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
}
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -611,7 +624,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
struct kfd_process_device *pdd;
int retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (qpd->evicted++ > 0) /* already evicted, do nothing */
goto out;
@@ -633,7 +646,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -641,7 +654,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
uint32_t pd_base;
int retval = 0;
@@ -650,7 +663,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
/* Retrieve PD base */
pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
goto out;
if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
@@ -677,16 +690,16 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_evicted)
continue;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) { /* should not be here */
+ if (!mqd_mgr) { /* should not be here */
pr_err("Cannot restore queue, mqd mgr is NULL\n");
retval = -ENOMEM;
goto out;
}
q->properties.is_evicted = false;
q->properties.is_active = true;
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
q->queue, &q->properties,
q->process->mm);
if (retval)
@@ -695,7 +708,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
}
qpd->evicted = 0;
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -711,7 +724,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
/* Retrieve PD base */
pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
goto out;
if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
@@ -739,7 +752,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
if (!retval)
qpd->evicted = 0;
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -761,7 +774,7 @@ static int register_process(struct device_queue_manager *dqm,
/* Retrieve PD base */
pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
list_add(&n->list, &dqm->queues);
/* Update PD Base in QPD */
@@ -769,9 +782,10 @@ static int register_process(struct device_queue_manager *dqm,
retval = dqm->asic_ops.update_qpd(dqm, qpd);
- dqm->processes_count++;
+ if (dqm->processes_count++ == 0)
+ dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -786,20 +800,22 @@ static int unregister_process(struct device_queue_manager *dqm,
list_empty(&qpd->queues_list) ? "empty" : "not empty");
retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
list_for_each_entry_safe(cur, next, &dqm->queues, list) {
if (qpd == cur->qpd) {
list_del(&cur->list);
kfree(cur);
- dqm->processes_count--;
+ if (--dqm->processes_count == 0)
+ dqm->dev->kfd2kgd->set_compute_idle(
+ dqm->dev->kgd, true);
goto out;
}
}
/* qpd not found in dqm list */
retval = 1;
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -838,7 +854,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
if (!dqm->allocated_queues)
return -ENOMEM;
- mutex_init(&dqm->lock);
+ mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
dqm->sdma_queue_count = 0;
@@ -853,7 +869,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
}
dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
- dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
+ dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
return 0;
}
@@ -866,8 +882,8 @@ static void uninitialize(struct device_queue_manager *dqm)
kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
- kfree(dqm->mqds[i]);
- mutex_destroy(&dqm->lock);
+ kfree(dqm->mqd_mgrs[i]);
+ mutex_destroy(&dqm->lock_hidden);
kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
}
@@ -901,7 +917,7 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
unsigned int sdma_queue_id)
{
- if (sdma_queue_id >= CIK_SDMA_QUEUES)
+ if (sdma_queue_id >= get_num_sdma_queues(dqm))
return;
dqm->sdma_bitmap |= (1 << sdma_queue_id);
}
@@ -910,19 +926,19 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd)
{
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
int retval;
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
- if (!mqd)
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+ if (!mqd_mgr)
return -ENOMEM;
retval = allocate_sdma_queue(dqm, &q->sdma_id);
if (retval)
return retval;
- q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
- q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+ q->properties.sdma_queue_id = q->sdma_id / get_num_sdma_engines(dqm);
+ q->properties.sdma_engine_id = q->sdma_id % get_num_sdma_engines(dqm);
retval = allocate_doorbell(qpd, q);
if (retval)
@@ -933,19 +949,20 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out_deallocate_doorbell;
- retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties,
+ NULL);
if (retval)
goto out_uninit_mqd;
return 0;
out_uninit_mqd:
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
out_deallocate_doorbell:
deallocate_doorbell(qpd, q);
out_deallocate_sdma_queue:
@@ -1003,12 +1020,14 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
{
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
- mutex_init(&dqm->lock);
+ mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->processes_count = 0;
dqm->sdma_queue_count = 0;
dqm->active_runlist = false;
- dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
+ dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
+
+ INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
return 0;
}
@@ -1041,9 +1060,11 @@ static int start_cpsch(struct device_queue_manager *dqm)
init_interrupts(dqm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
+ /* clear hang status when driver try to start the hw scheduler */
+ dqm->is_hws_hang = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return 0;
fail_allocate_vidmem:
@@ -1055,9 +1076,9 @@ fail_packet_manager_init:
static int stop_cpsch(struct device_queue_manager *dqm)
{
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets);
@@ -1069,11 +1090,11 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new kernel queue because %d queues were already created\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return -EPERM;
}
@@ -1089,7 +1110,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->queue_count++;
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return 0;
}
@@ -1098,7 +1119,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
list_del(&kq->list);
dqm->queue_count--;
qpd->is_debug = false;
@@ -1110,18 +1131,18 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
}
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
@@ -1135,19 +1156,19 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (retval)
goto out_unlock;
q->properties.sdma_queue_id =
- q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
+ q->sdma_id / get_num_sdma_engines(dqm);
q->properties.sdma_engine_id =
- q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+ q->sdma_id % get_num_sdma_engines(dqm);
}
retval = allocate_doorbell(qpd, q);
if (retval)
goto out_deallocate_sdma_queue;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto out_deallocate_doorbell;
}
@@ -1164,7 +1185,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
q->properties.tba_addr = qpd->tba_addr;
q->properties.tma_addr = qpd->tma_addr;
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out_deallocate_doorbell;
@@ -1188,7 +1209,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
out_deallocate_doorbell:
@@ -1197,7 +1218,8 @@ out_deallocate_sdma_queue:
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q->sdma_id);
out_unlock:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
+
return retval;
}
@@ -1210,6 +1232,13 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
while (*fence_addr != fence_value) {
if (time_after(jiffies, end_jiffies)) {
pr_err("qcm fence wait loop timeout expired\n");
+ /* In HWS case, this is used to halt the driver thread
+ * in order not to mess up CP states before doing
+ * scandumps for FW debugging.
+ */
+ while (halt_if_hws_hang)
+ schedule();
+
return -ETIME;
}
schedule();
@@ -1254,6 +1283,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
{
int retval = 0;
+ if (dqm->is_hws_hang)
+ return -EIO;
if (!dqm->active_runlist)
return retval;
@@ -1292,9 +1323,13 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
{
int retval;
+ if (dqm->is_hws_hang)
+ return -EIO;
retval = unmap_queues_cpsch(dqm, filter, filter_param);
if (retval) {
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+ dqm->is_hws_hang = true;
+ schedule_work(&dqm->hw_exception_work);
return retval;
}
@@ -1306,7 +1341,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
bool preempt_all_queues;
preempt_all_queues = false;
@@ -1314,7 +1349,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
retval = 0;
/* remove queue from list to prevent rescheduling after preemption */
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (qpd->is_debug) {
/*
@@ -1326,9 +1361,9 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
}
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto failed;
}
@@ -1350,7 +1385,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = true;
}
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
/*
* Unconditionally decrement this counter, regardless of the queue's
@@ -1360,14 +1395,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
failed:
failed_try_destroy_debugged_queue:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1391,7 +1426,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
if (!dqm->asic_ops.set_cache_memory_policy)
return retval;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (alternate_aperture_size == 0) {
/* base > limit disables APE1 */
@@ -1437,7 +1472,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
qpd->sh_mem_ape1_limit);
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1468,7 +1503,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
struct device_process_node *cur, *next_dpn;
int retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
/* Clear all user mode queues */
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
@@ -1489,7 +1524,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
}
}
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1500,14 +1535,14 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
int retval;
struct queue *q, *next;
struct kernel_queue *kq, *kq_next;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct device_process_node *cur, *next_dpn;
enum kfd_unmap_queues_filter filter =
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
@@ -1542,7 +1577,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
}
retval = execute_queues_cpsch(dqm, filter, 0);
- if (retval || qpd->reset_wavefronts) {
+ if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
qpd->reset_wavefronts = false;
@@ -1550,19 +1585,19 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* lastly, free mqd resources */
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto out;
}
list_del(&q->list);
qpd->queue_count--;
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
}
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1683,6 +1718,30 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
kfree(dqm);
}
+int kfd_process_vm_fault(struct device_queue_manager *dqm,
+ unsigned int pasid)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ int ret = 0;
+
+ if (!p)
+ return -EINVAL;
+ pdd = kfd_get_process_device_data(dqm->dev, p);
+ if (pdd)
+ ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
+ kfd_unref_process(p);
+
+ return ret;
+}
+
+static void kfd_process_hw_exception(struct work_struct *work)
+{
+ struct device_queue_manager *dqm = container_of(work,
+ struct device_queue_manager, hw_exception_work);
+ dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
+}
+
#if defined(CONFIG_DEBUG_FS)
static void seq_reg_dump(struct seq_file *m,
@@ -1746,8 +1805,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) {
- for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) {
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+ for (queue = 0; queue < KFD_SDMA_QUEUES_PER_ENGINE; queue++) {
r = dqm->dev->kfd2kgd->hqd_sdma_dump(
dqm->dev->kgd, pipe, queue, &dump, &n_regs);
if (r)
@@ -1764,4 +1823,16 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
return r;
}
+int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
+{
+ int r = 0;
+
+ dqm_lock(dqm);
+ dqm->active_runlist = true;
+ r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ dqm_unlock(dqm);
+
+ return r;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 59a6b1956932..00da3169a004 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -26,15 +26,14 @@
#include <linux/rwsem.h>
#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/sched/mm.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#define KFD_UNMAP_LATENCY_MS (4000)
#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
-
-#define CIK_SDMA_QUEUES (4)
-#define CIK_SDMA_QUEUES_PER_ENGINE (2)
-#define CIK_SDMA_ENGINE_NUM (2)
+#define KFD_SDMA_QUEUES_PER_ENGINE (2)
struct device_process_node {
struct qcm_process_device *qpd;
@@ -170,11 +169,12 @@ struct device_queue_manager {
struct device_queue_manager_ops ops;
struct device_queue_manager_asic_ops asic_ops;
- struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
+ struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
struct packet_manager packets;
struct kfd_dev *dev;
- struct mutex lock;
+ struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */
struct list_head queues;
+ unsigned int saved_flags;
unsigned int processes_count;
unsigned int queue_count;
unsigned int sdma_queue_count;
@@ -190,6 +190,10 @@ struct device_queue_manager {
struct kfd_mem_obj *fence_mem;
bool active_runlist;
int sched_policy;
+
+ /* hw exception */
+ bool is_hws_hang;
+ struct work_struct hw_exception_work;
};
void device_queue_manager_init_cik(
@@ -207,6 +211,7 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
unsigned int get_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
+unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
@@ -219,4 +224,19 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
return (pdd->lds_base >> 60) & 0x0E;
}
+/* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS
+ * happens while holding this lock anywhere to prevent deadlocks when
+ * an MMU notifier runs in reclaim-FS context.
+ */
+static inline void dqm_lock(struct device_queue_manager *dqm)
+{
+ mutex_lock(&dqm->lock_hidden);
+ dqm->saved_flags = memalloc_nofs_save();
+}
+static inline void dqm_unlock(struct device_queue_manager *dqm)
+{
+ memalloc_nofs_restore(dqm->saved_flags);
+ mutex_unlock(&dqm->lock_hidden);
+}
+
#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 79e5bcf6367c..417515332c35 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -60,7 +60,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config =
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
- if (vega10_noretry &&
+ if (noretry &&
!dqm->dev->device_info->needs_iommu_device)
qpd->sh_mem_config |=
1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index c3744d89352c..ebe79bf00145 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -188,9 +188,9 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
*doorbell_off = kfd->doorbell_id_offset + inx;
pr_debug("Get kernel queue doorbell\n"
- " doorbell offset == 0x%08X\n"
- " kernel address == %p\n",
- *doorbell_off, (kfd->doorbell_kernel_ptr + inx));
+ " doorbell offset == 0x%08X\n"
+ " doorbell index == 0x%x\n",
+ *doorbell_off, inx);
return kfd->doorbell_kernel_ptr + inx;
}
@@ -199,7 +199,8 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
{
unsigned int inx;
- inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
+ inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr)
+ * sizeof(u32) / kfd->device_info->doorbell_size;
mutex_lock(&kfd->doorbell_mutex);
__clear_bit(inx, kfd->doorbell_available_index);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 5562e94e786a..e9f0e0a1b41c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -850,6 +850,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
ev->memory_exception_data = *ev_data;
}
+ if (type == KFD_EVENT_TYPE_MEMORY) {
+ dev_warn(kfd_device,
+ "Sending SIGSEGV to HSA Process with PID %d ",
+ p->lead_thread->pid);
+ send_sig(SIGSEGV, p->lead_thread, 0);
+ }
+
/* Send SIGTERM no event of type "type" has been found*/
if (send_signal) {
if (send_sigterm) {
@@ -904,34 +911,41 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memory_exception_data.failure.NotPresent = 1;
memory_exception_data.failure.NoExecute = 0;
memory_exception_data.failure.ReadOnly = 0;
- if (vma) {
- if (vma->vm_start > address) {
- memory_exception_data.failure.NotPresent = 1;
- memory_exception_data.failure.NoExecute = 0;
+ if (vma && address >= vma->vm_start) {
+ memory_exception_data.failure.NotPresent = 0;
+
+ if (is_write_requested && !(vma->vm_flags & VM_WRITE))
+ memory_exception_data.failure.ReadOnly = 1;
+ else
memory_exception_data.failure.ReadOnly = 0;
- } else {
- memory_exception_data.failure.NotPresent = 0;
- if (is_write_requested && !(vma->vm_flags & VM_WRITE))
- memory_exception_data.failure.ReadOnly = 1;
- else
- memory_exception_data.failure.ReadOnly = 0;
- if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
- memory_exception_data.failure.NoExecute = 1;
- else
- memory_exception_data.failure.NoExecute = 0;
- }
+
+ if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
+ memory_exception_data.failure.NoExecute = 1;
+ else
+ memory_exception_data.failure.NoExecute = 0;
}
up_read(&mm->mmap_sem);
mmput(mm);
- mutex_lock(&p->event_mutex);
+ pr_debug("notpresent %d, noexecute %d, readonly %d\n",
+ memory_exception_data.failure.NotPresent,
+ memory_exception_data.failure.NoExecute,
+ memory_exception_data.failure.ReadOnly);
- /* Lookup events by type and signal them */
- lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
- &memory_exception_data);
+ /* Workaround on Raven to not kill the process when memory is freed
+ * before IOMMU is able to finish processing all the excessive PPRs
+ */
+ if (dev->device_info->asic_family != CHIP_RAVEN) {
+ mutex_lock(&p->event_mutex);
+
+ /* Lookup events by type and signal them */
+ lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
+ &memory_exception_data);
+
+ mutex_unlock(&p->event_mutex);
+ }
- mutex_unlock(&p->event_mutex);
kfd_unref_process(p);
}
#endif /* KFD_SUPPORT_IOMMU_V2 */
@@ -956,3 +970,67 @@ void kfd_signal_hw_exception_event(unsigned int pasid)
mutex_unlock(&p->event_mutex);
kfd_unref_process(p);
}
+
+void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ struct kfd_vm_fault_info *info)
+{
+ struct kfd_event *ev;
+ uint32_t id;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_hsa_memory_exception_data memory_exception_data;
+
+ if (!p)
+ return; /* Presumably process exited. */
+ memset(&memory_exception_data, 0, sizeof(memory_exception_data));
+ memory_exception_data.gpu_id = dev->id;
+ memory_exception_data.failure.imprecise = 1;
+ /* Set failure reason */
+ if (info) {
+ memory_exception_data.va = (info->page_addr) << PAGE_SHIFT;
+ memory_exception_data.failure.NotPresent =
+ info->prot_valid ? 1 : 0;
+ memory_exception_data.failure.NoExecute =
+ info->prot_exec ? 1 : 0;
+ memory_exception_data.failure.ReadOnly =
+ info->prot_write ? 1 : 0;
+ memory_exception_data.failure.imprecise = 0;
+ }
+ mutex_lock(&p->event_mutex);
+
+ id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ idr_for_each_entry_continue(&p->event_idr, ev, id)
+ if (ev->type == KFD_EVENT_TYPE_MEMORY) {
+ ev->memory_exception_data = memory_exception_data;
+ set_event(ev);
+ }
+
+ mutex_unlock(&p->event_mutex);
+ kfd_unref_process(p);
+}
+
+void kfd_signal_reset_event(struct kfd_dev *dev)
+{
+ struct kfd_hsa_hw_exception_data hw_exception_data;
+ struct kfd_process *p;
+ struct kfd_event *ev;
+ unsigned int temp;
+ uint32_t id, idx;
+
+ /* Whole gpu reset caused by GPU hang and memory is lost */
+ memset(&hw_exception_data, 0, sizeof(hw_exception_data));
+ hw_exception_data.gpu_id = dev->id;
+ hw_exception_data.memory_lost = 1;
+
+ idx = srcu_read_lock(&kfd_processes_srcu);
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ mutex_lock(&p->event_mutex);
+ id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ idr_for_each_entry_continue(&p->event_idr, ev, id)
+ if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
+ ev->hw_exception_data = hw_exception_data;
+ set_event(ev);
+ }
+ mutex_unlock(&p->event_mutex);
+ }
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
index abca5bfebbff..c7ac6c73af86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
@@ -66,6 +66,7 @@ struct kfd_event {
/* type specific data */
union {
struct kfd_hsa_memory_exception_data memory_exception_data;
+ struct kfd_hsa_hw_exception_data hw_exception_data;
};
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 37029baa3346..f836897bbf58 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -26,7 +26,9 @@
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry)
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
{
uint16_t source_id, client_id, pasid, vmid;
const uint32_t *data = ih_ring_entry;
@@ -57,7 +59,9 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
source_id == SOC15_INTSRC_SDMA_TRAP ||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
- source_id == SOC15_INTSRC_CP_BAD_OPCODE;
+ source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+ client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_UTCL2;
}
static void event_interrupt_wq_v9(struct kfd_dev *dev,
@@ -82,7 +86,19 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
kfd_signal_hw_exception_event(pasid);
else if (client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_UTCL2) {
- /* TODO */
+ struct kfd_vm_fault_info info = {0};
+ uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+
+ info.vmid = vmid;
+ info.mc_id = client_id;
+ info.page_addr = ih_ring_entry[4] |
+ (uint64_t)(ih_ring_entry[5] & 0xf) << 32;
+ info.prot_valid = ring_id & 0x08;
+ info.prot_read = ring_id & 0x10;
+ info.prot_write = ring_id & 0x20;
+
+ kfd_process_vm_fault(dev->dqm, pasid);
+ kfd_signal_vm_fault_event(dev, pasid, &info);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index db6d9336b80d..c56ac47cd318 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -151,13 +151,15 @@ static void interrupt_wq(struct work_struct *work)
ih_ring_entry);
}
-bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
+bool interrupt_is_wanted(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre, bool *flag)
{
/* integer and bitwise OR so there is no boolean short-circuiting */
unsigned int wanted = 0;
wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
- ih_ring_entry);
+ ih_ring_entry, patched_ihre, flag);
return wanted != 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index c71817963eea..7a61f38c09e6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -190,7 +190,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
{
struct kfd_dev *dev;
- dev_warn(kfd_device,
+ dev_warn_ratelimited(kfd_device,
"Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
PCI_BUS_NUM(pdev->devfn),
PCI_SLOT(pdev->devfn),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 476951d8c91c..9f84b4d9fb88 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -59,7 +59,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
switch (type) {
case KFD_QUEUE_TYPE_DIQ:
case KFD_QUEUE_TYPE_HIQ:
- kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
+ kq->mqd_mgr = dev->dqm->ops.get_mqd_manager(dev->dqm,
KFD_MQD_TYPE_HIQ);
break;
default:
@@ -67,7 +67,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
return false;
}
- if (!kq->mqd)
+ if (!kq->mqd_mgr)
return false;
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
@@ -123,6 +123,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
prop.eop_ring_buffer_address = kq->eop_gpu_addr;
prop.eop_ring_buffer_size = PAGE_SIZE;
+ prop.cu_mask = NULL;
if (init_queue(&kq->queue, &prop) != 0)
goto err_init_queue;
@@ -130,7 +131,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->queue->device = dev;
kq->queue->process = kfd_get_process(current);
- retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd,
+ retval = kq->mqd_mgr->init_mqd(kq->mqd_mgr, &kq->queue->mqd,
&kq->queue->mqd_mem_obj,
&kq->queue->gart_mqd_addr,
&kq->queue->properties);
@@ -142,9 +143,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
pr_debug("Assigning hiq to hqd\n");
kq->queue->pipe = KFD_CIK_HIQ_PIPE;
kq->queue->queue = KFD_CIK_HIQ_QUEUE;
- kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
- kq->queue->queue, &kq->queue->properties,
- NULL);
+ kq->mqd_mgr->load_mqd(kq->mqd_mgr, kq->queue->mqd,
+ kq->queue->pipe, kq->queue->queue,
+ &kq->queue->properties, NULL);
} else {
/* allocate fence for DIQ */
@@ -182,7 +183,7 @@ err_get_kernel_doorbell:
static void uninitialize(struct kernel_queue *kq)
{
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
- kq->mqd->destroy_mqd(kq->mqd,
+ kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
@@ -191,7 +192,8 @@ static void uninitialize(struct kernel_queue *kq)
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
- kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
+ kq->mqd_mgr->uninit_mqd(kq->mqd_mgr, kq->queue->mqd,
+ kq->queue->mqd_mem_obj);
kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 97aff2041a5d..a7116a939029 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -70,7 +70,7 @@ struct kernel_queue {
/* data */
struct kfd_dev *dev;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct queue *queue;
uint64_t pending_wptr64;
uint32_t pending_wptr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 76bf2dc8aec4..6e1f5c7c2d4b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -47,6 +47,8 @@ static const struct kgd2kfd_calls kgd2kfd = {
.resume_mm = kgd2kfd_resume_mm,
.schedule_evict_and_restore_process =
kgd2kfd_schedule_evict_and_restore_process,
+ .pre_reset = kgd2kfd_pre_reset,
+ .post_reset = kgd2kfd_post_reset,
};
int sched_policy = KFD_SCHED_POLICY_HWS;
@@ -61,7 +63,7 @@ MODULE_PARM_DESC(hws_max_conc_proc,
int cwsr_enable = 1;
module_param(cwsr_enable, int, 0444);
-MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
+MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = off, 1 = on (default))");
int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
module_param(max_num_of_queues_per_device, int, 0444);
@@ -83,13 +85,19 @@ module_param(ignore_crat, int, 0444);
MODULE_PARM_DESC(ignore_crat,
"Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
-int vega10_noretry;
-module_param_named(noretry, vega10_noretry, int, 0644);
+int noretry;
+module_param(noretry, int, 0644);
MODULE_PARM_DESC(noretry,
- "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled (default), 1 = retry disabled)");
+ "Set sh_mem_config.retry_disable on GFXv9+ dGPUs (0 = retry enabled (default), 1 = retry disabled)");
+
+int halt_if_hws_hang;
+module_param(halt_if_hws_hang, int, 0644);
+MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+
static int amdkfd_init_completed;
+
int kgd2kfd_init(unsigned int interface_version,
const struct kgd2kfd_calls **g2f)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 4b8eb506642b..3bc25ab84f34 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -21,7 +21,7 @@
*
*/
-#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
@@ -48,3 +48,42 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
return NULL;
}
+
+void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+ const uint32_t *cu_mask, uint32_t cu_mask_count,
+ uint32_t *se_mask)
+{
+ struct kfd_cu_info cu_info;
+ uint32_t cu_per_sh[4] = {0};
+ int i, se, cu = 0;
+
+ mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info);
+
+ if (cu_mask_count > cu_info.cu_active_number)
+ cu_mask_count = cu_info.cu_active_number;
+
+ for (se = 0; se < cu_info.num_shader_engines; se++)
+ for (i = 0; i < 4; i++)
+ cu_per_sh[se] += hweight32(cu_info.cu_bitmap[se][i]);
+
+ /* Symmetrically map cu_mask to all SEs:
+ * cu_mask[0] bit0 -> se_mask[0] bit0;
+ * cu_mask[0] bit1 -> se_mask[1] bit0;
+ * ... (if # SE is 4)
+ * cu_mask[0] bit4 -> se_mask[0] bit1;
+ * ...
+ */
+ se = 0;
+ for (i = 0; i < cu_mask_count; i++) {
+ if (cu_mask[i / 32] & (1 << (i % 32)))
+ se_mask[se] |= 1 << cu;
+
+ do {
+ se++;
+ if (se == cu_info.num_shader_engines) {
+ se = 0;
+ cu++;
+ }
+ } while (cu >= cu_per_sh[se] && cu < 32);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 8972bcfbf701..4e84052d4e21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -93,4 +93,8 @@ struct mqd_manager {
struct kfd_dev *dev;
};
+void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+ const uint32_t *cu_mask, uint32_t cu_mask_count,
+ uint32_t *se_mask);
+
#endif /* KFD_MQD_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 06eaa218eba6..47243165a082 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -41,6 +41,31 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
return (struct cik_sdma_rlc_registers *)mqd;
}
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct cik_mqd *m;
+ uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+ if (q->cu_mask_count == 0)
+ return;
+
+ mqd_symmetrically_map_cu_mask(mm,
+ q->cu_mask, q->cu_mask_count, se_mask);
+
+ m = get_mqd(mqd);
+ m->compute_static_thread_mgmt_se0 = se_mask[0];
+ m->compute_static_thread_mgmt_se1 = se_mask[1];
+ m->compute_static_thread_mgmt_se2 = se_mask[2];
+ m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+ pr_debug("Update cu mask to %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -196,6 +221,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
+ update_cu_mask(mm, mqd, q);
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0 &&
@@ -408,7 +435,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 684054ff02cd..f5fc3675f21e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -41,6 +41,31 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v9_sdma_mqd *)mqd;
}
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct v9_mqd *m;
+ uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+ if (q->cu_mask_count == 0)
+ return;
+
+ mqd_symmetrically_map_cu_mask(mm,
+ q->cu_mask, q->cu_mask_count, se_mask);
+
+ m = get_mqd(mqd);
+ m->compute_static_thread_mgmt_se0 = se_mask[0];
+ m->compute_static_thread_mgmt_se1 = se_mask[1];
+ m->compute_static_thread_mgmt_se2 = se_mask[2];
+ m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+ pr_debug("update cu mask to %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -55,7 +80,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
* instead of sub-allocation function.
*/
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+ *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!*mqd_mem_obj)
return -ENOMEM;
retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd,
@@ -198,6 +223,8 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0;
+ update_cu_mask(mm, mqd, q);
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0 &&
@@ -393,7 +420,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 481307b8b4db..b81fda3754da 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -43,6 +43,31 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct vi_sdma_mqd *)mqd;
}
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct vi_mqd *m;
+ uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+ if (q->cu_mask_count == 0)
+ return;
+
+ mqd_symmetrically_map_cu_mask(mm,
+ q->cu_mask, q->cu_mask_count, se_mask);
+
+ m = get_mqd(mqd);
+ m->compute_static_thread_mgmt_se0 = se_mask[0];
+ m->compute_static_thread_mgmt_se1 = se_mask[1];
+ m->compute_static_thread_mgmt_se2 = se_mask[2];
+ m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+ pr_debug("Update cu mask to %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -196,6 +221,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
+ update_cu_mask(mm, mqd, q);
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0 &&
@@ -394,7 +421,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index c317feb43f69..1092631765cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -418,4 +418,30 @@ out:
return 0;
}
+int pm_debugfs_hang_hws(struct packet_manager *pm)
+{
+ uint32_t *buffer, size;
+ int r = 0;
+
+ size = pm->pmf->query_status_size;
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t), (unsigned int **)&buffer);
+ if (!buffer) {
+ pr_err("Failed to allocate buffer on kernel queue\n");
+ r = -ENOMEM;
+ goto out;
+ }
+ memset(buffer, 0x55, size);
+ pm->priv_queue->ops.submit_packet(pm->priv_queue);
+
+ pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
+ buffer[0], buffer[1], buffer[2], buffer[3],
+ buffer[4], buffer[5], buffer[6]);
+out:
+ mutex_unlock(&pm->lock);
+ return r;
+}
+
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 5e3990bb4c4b..f971710f1c91 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -73,7 +73,7 @@
/*
* When working with cp scheduler we should assign the HIQ manually or via
- * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
+ * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
* definitions for Kaveri. In Kaveri only the first ME queues participates
* in the cp scheduling taking that in mind we set the HIQ slot in the
* second ME.
@@ -142,7 +142,12 @@ extern int ignore_crat;
/*
* Set sh_mem_config.retry_disable on Vega10
*/
-extern int vega10_noretry;
+extern int noretry;
+
+/*
+ * Halt if HWS hang is detected
+ */
+extern int halt_if_hws_hang;
/**
* enum kfd_sched_policy
@@ -180,9 +185,10 @@ enum cache_policy {
struct kfd_event_interrupt_class {
bool (*interrupt_isr)(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry);
+ const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
+ bool *patched_flag);
void (*interrupt_wq)(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry);
+ const uint32_t *ih_ring_entry);
};
struct kfd_device_info {
@@ -197,6 +203,7 @@ struct kfd_device_info {
bool supports_cwsr;
bool needs_iommu_device;
bool needs_pci_atomics;
+ unsigned int num_sdma_engines;
};
struct kfd_mem_obj {
@@ -415,6 +422,9 @@ struct queue_properties {
uint32_t ctl_stack_size;
uint64_t tba_addr;
uint64_t tma_addr;
+ /* Relevant for CU */
+ uint32_t cu_mask_count; /* Must be a multiple of 32 */
+ uint32_t *cu_mask;
};
/**
@@ -806,12 +816,18 @@ int kfd_interrupt_init(struct kfd_dev *dev);
void kfd_interrupt_exit(struct kfd_dev *dev);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
-bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry);
+bool interrupt_is_wanted(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre, bool *flag);
/* Power Management */
void kgd2kfd_suspend(struct kfd_dev *kfd);
int kgd2kfd_resume(struct kfd_dev *kfd);
+/* GPU reset */
+int kgd2kfd_pre_reset(struct kfd_dev *kfd);
+int kgd2kfd_post_reset(struct kfd_dev *kfd);
+
/* amdkfd Apertures */
int kfd_init_apertures(struct kfd_process *process);
@@ -838,6 +854,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
enum kfd_queue_type type);
void kernel_queue_uninit(struct kernel_queue *kq);
+int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
/* Process Queue Manager */
struct process_queue_node {
@@ -858,6 +875,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
+int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+ struct queue_properties *p);
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
unsigned int qid);
@@ -964,10 +983,17 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
uint64_t *event_page_offset, uint32_t *event_slot_index);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ struct kfd_vm_fault_info *info);
+
+void kfd_signal_reset_event(struct kfd_dev *dev);
+
void kfd_flush_tlb(struct kfd_process_device *pdd);
int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
+bool kfd_is_locked(void);
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)
@@ -980,6 +1006,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data);
int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
int pm_debugfs_runlist(struct seq_file *m, void *data);
+int kfd_debugfs_hang_hws(struct kfd_dev *dev);
+int pm_debugfs_hang_hws(struct packet_manager *pm);
+int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
+
#else
static inline void kfd_debugfs_init(void) {}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 1d80b4f7c681..4694386cc623 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -244,6 +244,8 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
return ERR_PTR(-EINVAL);
process = find_process(thread);
+ if (!process)
+ return ERR_PTR(-EINVAL);
return process;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index d65ce0436b31..c8cad9c078ae 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -186,8 +186,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
- if (dev->dqm->queue_count >=
- CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+ if (dev->dqm->queue_count >= get_num_sdma_queues(dev->dqm)) {
pr_err("Over-subscription is not allowed for SDMA.\n");
retval = -EPERM;
goto err_create_queue;
@@ -209,7 +208,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
- pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+ pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
}
@@ -326,6 +325,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (retval != -ETIME)
goto err_destroy_queue;
}
+ kfree(pqn->q->properties.cu_mask);
+ pqn->q->properties.cu_mask = NULL;
uninit_queue(pqn->q);
}
@@ -366,6 +367,34 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
return 0;
}
+int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+ struct queue_properties *p)
+{
+ int retval;
+ struct process_queue_node *pqn;
+
+ pqn = get_queue_by_qid(pqm, qid);
+ if (!pqn) {
+ pr_debug("No queue %d exists for update operation\n", qid);
+ return -EFAULT;
+ }
+
+ /* Free the old CU mask memory if it is already allocated, then
+ * allocate memory for the new CU mask.
+ */
+ kfree(pqn->q->properties.cu_mask);
+
+ pqn->q->properties.cu_mask_count = p->cu_mask_count;
+ pqn->q->properties.cu_mask = p->cu_mask;
+
+ retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
+ pqn->q);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
struct kernel_queue *pqm_get_kernel_queue(
struct process_queue_manager *pqm,
unsigned int qid)
@@ -387,7 +416,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
struct process_queue_node *pqn;
struct queue *q;
enum KFD_MQD_TYPE mqd_type;
- struct mqd_manager *mqd_manager;
+ struct mqd_manager *mqd_mgr;
int r = 0;
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
@@ -410,11 +439,11 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
q->properties.type, q->device->id);
continue;
}
- mqd_manager = q->device->dqm->ops.get_mqd_manager(
+ mqd_mgr = q->device->dqm->ops.get_mqd_manager(
q->device->dqm, mqd_type);
} else if (pqn->kq) {
q = pqn->kq->queue;
- mqd_manager = pqn->kq->mqd;
+ mqd_mgr = pqn->kq->mqd_mgr;
switch (q->properties.type) {
case KFD_QUEUE_TYPE_DIQ:
seq_printf(m, " DIQ on device %x\n",
@@ -434,7 +463,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
continue;
}
- r = mqd_manager->debugfs_show_mqd(m, q->mqd);
+ r = mqd_mgr->debugfs_show_mqd(m, q->mqd);
if (r != 0)
break;
}
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index d5d4586e6176..ed654a76c76a 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -4,27 +4,16 @@ menu "Display Engine Configuration"
config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
+ select DRM_AMD_DC_DCN1_0 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
Raven ASICs.
-config DRM_AMD_DC_FBC
- bool "AMD FBC - Enable Frame Buffer Compression"
- depends on DRM_AMD_DC
- help
- Choose this option if you want to use frame buffer compression
- support.
- This is a power optimisation feature, check its availability
- on your hardware before enabling this option.
-
-
config DRM_AMD_DC_DCN1_0
- bool "DCN 1.0 Raven family"
- depends on DRM_AMD_DC && X86
+ def_bool n
help
- Choose this option if you want to have
- RV family for display engine
+ RV family support for display engine
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
index 357d59648401..a8a6c106e8c7 100644
--- a/drivers/gpu/drm/amd/display/TODO
+++ b/drivers/gpu/drm/amd/display/TODO
@@ -97,10 +97,10 @@ share it with drivers. But that's a very long term goal, and by far not just an
issue with DC - other drivers, especially around DP sink handling, are equally
guilty.
-19. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG
-stuff just isn't up to the challenges either. We need to figure out something
-that integrates better with DRM and linux debug printing, while not being
-useless with filtering output. dynamic debug printing might be an option.
+19. DONE - The DC logger is still a rather sore thing, but I know that the
+DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out
+something that integrates better with DRM and linux debug printing, while not
+being useless with filtering output. dynamic debug printing might be an option.
20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
retimer that we need to program to pass PHY compliance. Currently that's
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index af16973f2c41..94911871eb9b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -28,11 +28,11 @@
AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
ifneq ($(CONFIG_DRM_AMD_DC),)
-AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
endif
ifneq ($(CONFIG_DEBUG_FS),)
-AMDGPUDM += amdgpu_dm_crc.o
+AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
endif
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f9add85157e7..800f481a6995 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -39,6 +39,9 @@
#include "dm_helpers.h"
#include "dm_services_types.h"
#include "amdgpu_dm_mst_types.h"
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -55,8 +58,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
-#include "modules/inc/mod_freesync.h"
-
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "ivsrcid/irqsrcs_dcn_1_0.h"
@@ -347,7 +348,6 @@ static void hotplug_notify_work_func(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
-#if defined(CONFIG_DRM_AMD_DC_FBC)
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{
@@ -388,7 +388,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
}
}
-#endif
/* Init display KMS
@@ -902,14 +901,14 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
(struct edid *) sink->dc_edid.raw_edid;
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
aconnector->edid);
}
amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
} else {
amdgpu_dm_remove_sink_from_freesync_module(connector);
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0;
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
@@ -1040,7 +1039,7 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link->type != dc_connection_mst_branch)
mutex_lock(&aconnector->hpd_lock);
- if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
+ if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
!is_mst_root_connector) {
/* Downstream Port status changed. */
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
@@ -1319,7 +1318,12 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
{
- return bd->props.brightness;
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ int ret = dc_link_get_backlight_level(dm->backlight_link);
+
+ if (ret == DC_ERROR_UNEXPECTED)
+ return bd->props.brightness;
+ return ret;
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
@@ -1334,6 +1338,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
struct backlight_properties props = { 0 };
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -1531,10 +1536,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
}
- /*
- * Temporary disable until pplib/smu interaction is implemented
- */
- dm->dc->debug.disable_stutter = true;
break;
#endif
default:
@@ -1542,6 +1543,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
+ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+ dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+
return 0;
fail:
kfree(aencoder);
@@ -1573,18 +1577,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
/* TODO: implement later */
}
-static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
- u8 level)
-{
- /* TODO: translate amdgpu_encoder to display_index and call DAL */
-}
-
-static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
-{
- /* TODO: translate amdgpu_encoder to display_index and call DAL */
- return 0;
-}
-
static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -1613,10 +1605,8 @@ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
static const struct amdgpu_display_funcs dm_display_funcs = {
.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
- .backlight_set_level =
- dm_set_backlight_level,/* called unconditionally */
- .backlight_get_level =
- dm_get_backlight_level,/* called unconditionally */
+ .backlight_set_level = NULL, /* never called for DC */
+ .backlight_get_level = NULL, /* never called for DC */
.hpd_sense = NULL,/* called unconditionally */
.hpd_set_polarity = NULL, /* called unconditionally */
.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
@@ -2123,13 +2113,8 @@ convert_color_depth_from_display_info(const struct drm_connector *connector)
static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode *mode_in)
{
- int32_t width = mode_in->crtc_hdisplay * 9;
- int32_t height = mode_in->crtc_vdisplay * 16;
-
- if ((width - height) < 10 && (width - height) > -10)
- return ASPECT_RATIO_16_9;
- else
- return ASPECT_RATIO_4_3;
+ /* 1-1 mapping, since both enums follow the HDMI spec. */
+ return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
}
static enum dc_color_space
@@ -2175,6 +2160,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
return color_space;
}
+static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
+{
+ if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+ return;
+
+ timing_out->display_color_depth--;
+}
+
+static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
+ const struct drm_display_info *info)
+{
+ int normalized_clk;
+ if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+ return;
+ do {
+ normalized_clk = timing_out->pix_clk_khz;
+ /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ normalized_clk /= 2;
+ /* Adjusting pix clock following on HDMI spec based on colour depth */
+ switch (timing_out->display_color_depth) {
+ case COLOR_DEPTH_101010:
+ normalized_clk = (normalized_clk * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ normalized_clk = (normalized_clk * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ normalized_clk = (normalized_clk * 48) / 24;
+ break;
+ default:
+ return;
+ }
+ if (normalized_clk <= info->max_tmds_clock)
+ return;
+ reduce_mode_colour_depth(timing_out);
+
+ } while (timing_out->display_color_depth > COLOR_DEPTH_888);
+
+}
/*****************************************************************************/
static void
@@ -2183,6 +2208,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
const struct drm_connector *connector)
{
struct dc_crtc_timing *timing_out = &stream->timing;
+ const struct drm_display_info *info = &connector->display_info;
memset(timing_out, 0, sizeof(struct dc_crtc_timing));
@@ -2191,8 +2217,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
timing_out->v_border_top = 0;
timing_out->v_border_bottom = 0;
/* TODO: un-hardcode */
-
- if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ if (drm_mode_is_420_only(info, mode_in)
+ && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
else
@@ -2228,6 +2256,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ adjust_colour_depth_from_display_info(timing_out, info);
}
static void fill_audio_info(struct audio_info *audio_info,
@@ -3048,14 +3078,24 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
- r = amdgpu_bo_pin(rbo, domain, &afb->address);
- amdgpu_bo_unreserve(rbo);
-
+ r = amdgpu_bo_pin(rbo, domain);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ amdgpu_bo_unreserve(rbo);
+ return r;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&rbo->tbo);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ DRM_ERROR("%p bind failed\n", rbo);
return r;
}
+ amdgpu_bo_unreserve(rbo);
+
+ afb->address = amdgpu_bo_gpu_offset(rbo);
amdgpu_bo_ref(rbo);
@@ -3426,12 +3466,15 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
struct edid *edid = amdgpu_dm_connector->edid;
encoder = helper->best_encoder(connector);
- amdgpu_dm_connector_ddc_get_modes(connector, edid);
- amdgpu_dm_connector_add_common_modes(encoder, connector);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (!edid || !drm_edid_is_valid(edid)) {
+ drm_add_modes_noedid(connector, 640, 480);
+ } else {
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ }
amdgpu_dm_fbc_init(connector);
-#endif
+
return amdgpu_dm_connector->num_modes;
}
@@ -3450,7 +3493,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.stereo_allowed = false;
aconnector->base.dpms = DRM_MODE_DPMS_OFF;
aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
-
mutex_init(&aconnector->hpd_lock);
/* configure support HPD hot plug connector_>polled default value is 0
@@ -3459,9 +3501,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
switch (connector_type) {
case DRM_MODE_CONNECTOR_HDMIA:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
@@ -3614,10 +3660,17 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
link,
link_index);
- drm_mode_connector_attach_encoder(
+ drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
drm_connector_register(&aconnector->base);
+#if defined(CONFIG_DEBUG_FS)
+ res = connector_debugfs_init(aconnector);
+ if (res) {
+ DRM_ERROR("Failed to create debugfs for connector");
+ goto out_free;
+ }
+#endif
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -3914,8 +3967,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
/* Flip */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- /* update crtc fb */
- crtc->primary->fb = fb;
WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
WARN_ON(!acrtc_state->stream);
@@ -3928,10 +3979,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
if (acrtc->base.state->event)
prepare_flip_isr(acrtc);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
surface_updates->flip_addr = &addr;
-
dc_commit_updates_for_stream(adev->dm.dc,
surface_updates,
1,
@@ -3944,9 +3996,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
__func__,
addr.address.grph.addr.high_part,
addr.address.grph.addr.low_part);
-
-
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
/*
@@ -4206,6 +4255,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_connector *connector;
struct drm_connector_state *old_con_state, *new_con_state;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
drm_atomic_helper_update_legacy_modeset_state(dev, state);
@@ -4410,6 +4460,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
bool modeset_needed;
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
+
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
modeset_needed = modeset_required(
@@ -4463,11 +4516,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* so we can put the GPU into runtime suspend if we're not driving any
* displays anymore
*/
+ for (i = 0; i < crtc_disable_count; i++)
+ pm_runtime_put_autosuspend(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- if (old_crtc_state->active && !new_crtc_state->active)
- pm_runtime_put_autosuspend(dev->dev);
- }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d5aa89ad5571..a29dc35954c9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -72,13 +72,11 @@ struct irq_list_head {
struct work_struct work;
};
-#if defined(CONFIG_DRM_AMD_DC_FBC)
struct dm_comressor_info {
void *cpu_addr;
struct amdgpu_bo *bo_ptr;
uint64_t gpu_addr;
};
-#endif
struct amdgpu_display_manager {
@@ -129,9 +127,8 @@ struct amdgpu_display_manager {
* Caches device atomic state for suspend/resume
*/
struct drm_atomic_state *cached_state;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
struct dm_comressor_info compressor;
-#endif
};
struct amdgpu_dm_connector {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index b329393307e5..326f6fb7e0bc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -231,18 +231,21 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
* preparation for hardware commit. If no lut is specified by user, we default
* to SRGB degamma.
*
- * Currently, we only support degamma bypass, or preprogrammed SRGB degamma.
- * Programmable degamma is not supported, and an attempt to do so will return
- * -EINVAL.
+ * We support degamma bypass, predefined SRGB, and custom degamma
*
* RETURNS:
- * 0 on success, -EINVAL if custom degamma curve is given.
+ * 0 on success
+ * -EINVAL if crtc_state has a degamma_lut of invalid size
+ * -ENOMEM if gamma allocation fails
*/
int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
struct dc_plane_state *dc_plane_state)
{
struct drm_property_blob *blob = crtc_state->degamma_lut;
struct drm_color_lut *lut;
+ uint32_t lut_size;
+ struct dc_gamma *gamma;
+ bool ret;
if (!blob) {
/* Default to SRGB */
@@ -258,11 +261,30 @@ int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
return 0;
}
- /* Otherwise, assume SRGB, since programmable degamma is not
- * supported.
- */
- dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
- return -EINVAL;
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ lut_size = blob->length / sizeof(struct drm_color_lut);
+ gamma->num_entries = lut_size;
+ if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
+ gamma->type = GAMMA_CUSTOM;
+ else {
+ dc_gamma_release(&gamma);
+ return -EINVAL;
+ }
+
+ __drm_lut_to_dc_gamma(lut, gamma, false);
+
+ dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
+ ret = mod_color_calculate_degamma_params(dc_plane_state->in_transfer_func, gamma, true);
+ dc_gamma_release(&gamma);
+ if (!ret) {
+ dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
+ DRM_ERROR("Out of memory when calculating degamma params\n");
+ return -ENOMEM;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 52f2c01349e3..9bfb040352e9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -98,10 +98,16 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
*/
void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
{
- struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
- struct dc_stream_state *stream_state = crtc_state->stream;
+ struct dm_crtc_state *crtc_state;
+ struct dc_stream_state *stream_state;
uint32_t crcs[3];
+ if (crtc == NULL)
+ return;
+
+ crtc_state = to_dm_crtc_state(crtc->state);
+ stream_state = crtc_state->stream;
+
/* Early return if CRC capture is not enabled. */
if (!crtc_state->crc_enabled)
return;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
new file mode 100644
index 000000000000..0d9e410ca01e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -0,0 +1,722 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/debugfs.h>
+
+#include "dc.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_debugfs.h"
+
+/* function description
+ * get/ set DP configuration: lane_count, link_rate, spread_spectrum
+ *
+ * valid lane count value: 1, 2, 4
+ * valid link rate value:
+ * 06h = 1.62Gbps per lane
+ * 0Ah = 2.7Gbps per lane
+ * 0Ch = 3.24Gbps per lane
+ * 14h = 5.4Gbps per lane
+ * 1Eh = 8.1Gbps per lane
+ *
+ * debugfs is located at /sys/kernel/debug/dri/0/DP-x/link_settings
+ *
+ * --- to get dp configuration
+ *
+ * cat link_settings
+ *
+ * It will list current, verified, reported, preferred dp configuration.
+ * current -- for current video mode
+ * verified --- maximum configuration which pass link training
+ * reported --- DP rx report caps (DPCD register offset 0, 1 2)
+ * preferred --- user force settings
+ *
+ * --- set (or force) dp configuration
+ *
+ * echo <lane_count> <link_rate> > link_settings
+ *
+ * for example, to force to 2 lane, 2.7GHz,
+ * echo 4 0xa > link_settings
+ *
+ * spread_spectrum could not be changed dynamically.
+ *
+ * in case invalid lane count, link rate are force, no hw programming will be
+ * done. please check link settings after force operation to see if HW get
+ * programming.
+ *
+ * cat link_settings
+ *
+ * check current and preferred settings.
+ *
+ */
+static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ const uint32_t rd_buf_size = 100;
+ uint32_t result = 0;
+ uint8_t str_len = 0;
+ int r;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+ if (!rd_buf)
+ return 0;
+
+ rd_buf_ptr = rd_buf;
+
+ str_len = strlen("Current: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
+ link->cur_link_settings.lane_count,
+ link->cur_link_settings.link_rate,
+ link->cur_link_settings.link_spread);
+ rd_buf_ptr += str_len;
+
+ str_len = strlen("Verified: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
+ link->verified_link_cap.lane_count,
+ link->verified_link_cap.link_rate,
+ link->verified_link_cap.link_spread);
+ rd_buf_ptr += str_len;
+
+ str_len = strlen("Reported: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
+ link->reported_link_cap.lane_count,
+ link->reported_link_cap.link_rate,
+ link->reported_link_cap.link_spread);
+ rd_buf_ptr += str_len;
+
+ str_len = strlen("Preferred: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
+ link->preferred_link_setting.lane_count,
+ link->preferred_link_setting.link_rate,
+ link->preferred_link_setting.link_spread);
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ struct dc *dc = (struct dc *)link->dc;
+ struct dc_link_settings prefer_link_settings;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ const uint32_t wr_buf_size = 40;
+ int r;
+ int bytes_from_user;
+ char *sub_str;
+ /* 0: lane_count; 1: link_rate */
+ uint8_t param_index = 0;
+ long param[2];
+ const char delimiter[3] = {' ', '\n', '\0'};
+ bool valid_input = false;
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return -EINVAL;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not read\n");
+ return -EINVAL;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+
+ while ((*wr_buf_ptr != '\0') && (param_index < 2)) {
+
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+ }
+
+ switch (param[0]) {
+ case LANE_COUNT_ONE:
+ case LANE_COUNT_TWO:
+ case LANE_COUNT_FOUR:
+ valid_input = true;
+ break;
+ default:
+ break;
+ }
+
+ switch (param[1]) {
+ case LINK_RATE_LOW:
+ case LINK_RATE_HIGH:
+ case LINK_RATE_RBR2:
+ case LINK_RATE_HIGH2:
+ case LINK_RATE_HIGH3:
+ valid_input = true;
+ break;
+ default:
+ break;
+ }
+
+ if (!valid_input) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
+ return bytes_from_user;
+ }
+
+ /* save user force lane_count, link_rate to preferred settings
+ * spread spectrum will not be changed
+ */
+ prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
+ prefer_link_settings.lane_count = param[0];
+ prefer_link_settings.link_rate = param[1];
+
+ dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
+
+ kfree(wr_buf);
+ return bytes_from_user;
+}
+
+/* function: get current DP PHY settings: voltage swing, pre-emphasis,
+ * post-cursor2 (defined by VESA DP specification)
+ *
+ * valid values
+ * voltage swing: 0,1,2,3
+ * pre-emphasis : 0,1,2,3
+ * post cursor2 : 0,1,2,3
+ *
+ *
+ * how to use this debugfs
+ *
+ * debugfs is located at /sys/kernel/debug/dri/0/DP-x
+ *
+ * there will be directories, like DP-1, DP-2,DP-3, etc. for DP display
+ *
+ * To figure out which DP-x is the display for DP to be check,
+ * cd DP-x
+ * ls -ll
+ * There should be debugfs file, like link_settings, phy_settings.
+ * cat link_settings
+ * from lane_count, link_rate to figure which DP-x is for display to be worked
+ * on
+ *
+ * To get current DP PHY settings,
+ * cat phy_settings
+ *
+ * To change DP PHY settings,
+ * echo <voltage_swing> <pre-emphasis> <post_cursor2> > phy_settings
+ * for examle, to change voltage swing to 2, pre-emphasis to 3, post_cursor2 to
+ * 0,
+ * echo 2 3 0 > phy_settings
+ *
+ * To check if change be applied, get current phy settings by
+ * cat phy_settings
+ *
+ * In case invalid values are set by user, like
+ * echo 1 4 0 > phy_settings
+ *
+ * HW will NOT be programmed by these settings.
+ * cat phy_settings will show the previous valid settings.
+ */
+static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ char *rd_buf = NULL;
+ const uint32_t rd_buf_size = 20;
+ uint32_t result = 0;
+ int r;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+ if (!rd_buf)
+ return -EINVAL;
+
+ snprintf(rd_buf, rd_buf_size, " %d %d %d ",
+ link->cur_lane_setting.VOLTAGE_SWING,
+ link->cur_lane_setting.PRE_EMPHASIS,
+ link->cur_lane_setting.POST_CURSOR2);
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user((*(rd_buf + result)), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ struct dc *dc = (struct dc *)link->dc;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ uint32_t wr_buf_size = 40;
+ int r;
+ int bytes_from_user;
+ char *sub_str;
+ uint8_t param_index = 0;
+ long param[3];
+ const char delimiter[3] = {' ', '\n', '\0'};
+ bool use_prefer_link_setting;
+ struct link_training_settings link_lane_settings;
+
+ if (size == 0)
+ return 0;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return 0;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return 0;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+
+ while ((*wr_buf_ptr != '\0') && (param_index < 3)) {
+
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+ }
+
+ if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) ||
+ (param[1] > PRE_EMPHASIS_MAX_LEVEL) ||
+ (param[2] > POST_CURSOR2_MAX_LEVEL)) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n");
+ return bytes_from_user;
+ }
+
+ /* get link settings: lane count, link rate */
+ use_prefer_link_setting =
+ ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) &&
+ (link->test_pattern_enabled));
+
+ memset(&link_lane_settings, 0, sizeof(link_lane_settings));
+
+ if (use_prefer_link_setting) {
+ link_lane_settings.link_settings.lane_count =
+ link->preferred_link_setting.lane_count;
+ link_lane_settings.link_settings.link_rate =
+ link->preferred_link_setting.link_rate;
+ link_lane_settings.link_settings.link_spread =
+ link->preferred_link_setting.link_spread;
+ } else {
+ link_lane_settings.link_settings.lane_count =
+ link->cur_link_settings.lane_count;
+ link_lane_settings.link_settings.link_rate =
+ link->cur_link_settings.link_rate;
+ link_lane_settings.link_settings.link_spread =
+ link->cur_link_settings.link_spread;
+ }
+
+ /* apply phy settings from user */
+ for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
+ link_lane_settings.lane_settings[r].VOLTAGE_SWING =
+ (enum dc_voltage_swing) (param[0]);
+ link_lane_settings.lane_settings[r].PRE_EMPHASIS =
+ (enum dc_pre_emphasis) (param[1]);
+ link_lane_settings.lane_settings[r].POST_CURSOR2 =
+ (enum dc_post_cursor2) (param[2]);
+ }
+
+ /* program ASIC registers and DPCD registers */
+ dc_link_set_drive_settings(dc, &link_lane_settings, link);
+
+ kfree(wr_buf);
+ return bytes_from_user;
+}
+
+/* function description
+ *
+ * set PHY layer or Link layer test pattern
+ * PHY test pattern is used for PHY SI check.
+ * Link layer test will not affect PHY SI.
+ *
+ * Reset Test Pattern:
+ * 0 = DP_TEST_PATTERN_VIDEO_MODE
+ *
+ * PHY test pattern supported:
+ * 1 = DP_TEST_PATTERN_D102
+ * 2 = DP_TEST_PATTERN_SYMBOL_ERROR
+ * 3 = DP_TEST_PATTERN_PRBS7
+ * 4 = DP_TEST_PATTERN_80BIT_CUSTOM
+ * 5 = DP_TEST_PATTERN_CP2520_1
+ * 6 = DP_TEST_PATTERN_CP2520_2 = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE
+ * 7 = DP_TEST_PATTERN_CP2520_3
+ *
+ * DP PHY Link Training Patterns
+ * 8 = DP_TEST_PATTERN_TRAINING_PATTERN1
+ * 9 = DP_TEST_PATTERN_TRAINING_PATTERN2
+ * a = DP_TEST_PATTERN_TRAINING_PATTERN3
+ * b = DP_TEST_PATTERN_TRAINING_PATTERN4
+ *
+ * DP Link Layer Test pattern
+ * c = DP_TEST_PATTERN_COLOR_SQUARES
+ * d = DP_TEST_PATTERN_COLOR_SQUARES_CEA
+ * e = DP_TEST_PATTERN_VERTICAL_BARS
+ * f = DP_TEST_PATTERN_HORIZONTAL_BARS
+ * 10= DP_TEST_PATTERN_COLOR_RAMP
+ *
+ * debugfs phy_test_pattern is located at /syskernel/debug/dri/0/DP-x
+ *
+ * --- set test pattern
+ * echo <test pattern #> > test_pattern
+ *
+ * If test pattern # is not supported, NO HW programming will be done.
+ * for DP_TEST_PATTERN_80BIT_CUSTOM, it needs extra 10 bytes of data
+ * for the user pattern. input 10 bytes data are separated by space
+ *
+ * echo 0x4 0x11 0x22 0x33 0x44 0x55 0x66 0x77 0x88 0x99 0xaa > test_pattern
+ *
+ * --- reset test pattern
+ * echo 0 > test_pattern
+ *
+ * --- HPD detection is disabled when set PHY test pattern
+ *
+ * when PHY test pattern (pattern # within [1,7]) is set, HPD pin of HW ASIC
+ * is disable. User could unplug DP display from DP connected and plug scope to
+ * check test pattern PHY SI.
+ * If there is need unplug scope and plug DP display back, do steps below:
+ * echo 0 > phy_test_pattern
+ * unplug scope
+ * plug DP display.
+ *
+ * "echo 0 > phy_test_pattern" will re-enable HPD pin again so that video sw
+ * driver could detect "unplug scope" and "plug DP display"
+ */
+static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ uint32_t wr_buf_size = 100;
+ uint32_t wr_buf_count = 0;
+ int r;
+ int bytes_from_user;
+ char *sub_str = NULL;
+ uint8_t param_index = 0;
+ uint8_t param_nums = 0;
+ long param[11] = {0x0};
+ const char delimiter[3] = {' ', '\n', '\0'};
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+ bool disable_hpd = false;
+ bool valid_test_pattern = false;
+ /* init with defalut 80bit custom pattern */
+ uint8_t custom_pattern[10] = {
+ 0x1f, 0x7c, 0xf0, 0xc1, 0x07,
+ 0x1f, 0x7c, 0xf0, 0xc1, 0x07
+ };
+ struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN,
+ LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
+ struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
+ LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
+ struct link_training_settings link_training_settings;
+ int i;
+
+ if (size == 0)
+ return 0;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return 0;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return 0;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+
+ /* check number of parameters. isspace could not differ space and \n */
+ while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
+ /* skip space*/
+ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ if (wr_buf_count == wr_buf_size)
+ break;
+
+ /* skip non-space*/
+ while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ param_nums++;
+
+ if (wr_buf_count == wr_buf_size)
+ break;
+ }
+
+ /* max 11 parameters */
+ if (param_nums > 11)
+ param_nums = 11;
+
+ wr_buf_ptr = wr_buf; /* reset buf pinter */
+ wr_buf_count = 0; /* number of char already checked */
+
+ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ while (param_index < param_nums) {
+ /* after strsep, wr_buf_ptr will be moved to after space */
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ }
+
+ test_pattern = param[0];
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ valid_test_pattern = true;
+ break;
+
+ case DP_TEST_PATTERN_D102:
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ case DP_TEST_PATTERN_PRBS7:
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
+ case DP_TEST_PATTERN_TRAINING_PATTERN4:
+ disable_hpd = true;
+ valid_test_pattern = true;
+ break;
+
+ default:
+ valid_test_pattern = false;
+ test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+ break;
+ }
+
+ if (!valid_test_pattern) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n");
+ return bytes_from_user;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
+ for (i = 0; i < 10; i++) {
+ if ((uint8_t) param[i + 1] != 0x0)
+ break;
+ }
+
+ if (i < 10) {
+ /* not use default value */
+ for (i = 0; i < 10; i++)
+ custom_pattern[i] = (uint8_t) param[i + 1];
+ }
+ }
+
+ /* Usage: set DP physical test pattern using debugfs with normal DP
+ * panel. Then plug out DP panel and connect a scope to measure
+ * For normal video mode and test pattern generated from CRCT,
+ * they are visibile to user. So do not disable HPD.
+ * Video Mode is also set to clear the test pattern, so enable HPD
+ * because it might have been disabled after a test pattern was set.
+ * AUX depends on HPD * sequence dependent, do not move!
+ */
+ if (!disable_hpd)
+ dc_link_enable_hpd(link);
+
+ prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
+ prefer_link_settings.link_rate = link->verified_link_cap.link_rate;
+ prefer_link_settings.link_spread = link->verified_link_cap.link_spread;
+
+ cur_link_settings.lane_count = link->cur_link_settings.lane_count;
+ cur_link_settings.link_rate = link->cur_link_settings.link_rate;
+ cur_link_settings.link_spread = link->cur_link_settings.link_spread;
+
+ link_training_settings.link_settings = cur_link_settings;
+
+
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN &&
+ prefer_link_settings.link_rate != LINK_RATE_UNKNOWN &&
+ (prefer_link_settings.lane_count != cur_link_settings.lane_count ||
+ prefer_link_settings.link_rate != cur_link_settings.link_rate))
+ link_training_settings.link_settings = prefer_link_settings;
+ }
+
+ for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
+ link_training_settings.lane_settings[i] = link->cur_lane_setting;
+
+ dc_link_set_test_pattern(
+ link,
+ test_pattern,
+ &link_training_settings,
+ custom_pattern,
+ 10);
+
+ /* Usage: Set DP physical test pattern using AMDDP with normal DP panel
+ * Then plug out DP panel and connect a scope to measure DP PHY signal.
+ * Need disable interrupt to avoid SW driver disable DP output. This is
+ * done after the test pattern is set.
+ */
+ if (valid_test_pattern && disable_hpd)
+ dc_link_disable_hpd(link);
+
+ kfree(wr_buf);
+
+ return bytes_from_user;
+}
+
+static const struct file_operations dp_link_settings_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_link_settings_read,
+ .write = dp_link_settings_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_phy_settings_debugfs_fop = {
+ .owner = THIS_MODULE,
+ .read = dp_phy_settings_read,
+ .write = dp_phy_settings_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_phy_test_pattern_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_phy_test_pattern_debugfs_write,
+ .llseek = default_llseek
+};
+
+static const struct {
+ char *name;
+ const struct file_operations *fops;
+} dp_debugfs_entries[] = {
+ {"link_settings", &dp_link_settings_debugfs_fops},
+ {"phy_settings", &dp_phy_settings_debugfs_fop},
+ {"test_pattern", &dp_phy_test_pattern_fops}
+};
+
+int connector_debugfs_init(struct amdgpu_dm_connector *connector)
+{
+ int i;
+ struct dentry *ent, *dir = connector->base.debugfs_entry;
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
+ ent = debugfs_create_file(dp_debugfs_entries[i].name,
+ 0644,
+ dir,
+ connector,
+ dp_debugfs_entries[i].fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ }
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index b43315cc5d58..d9ed1b2aa811 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,18 +19,16 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: AMD
+ *
*/
-#ifndef PP_POWERSOURCE_H
-#define PP_POWERSOURCE_H
+#ifndef __AMDGPU_DM_DEBUGFS_H__
+#define __AMDGPU_DM_DEBUGFS_H__
-enum pp_power_source {
- PP_PowerSource_AC = 0,
- PP_PowerSource_DC,
- PP_PowerSource_LimitedPower,
- PP_PowerSource_LimitedPower_2,
- PP_PowerSource_Max
-};
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+int connector_debugfs_init(struct amdgpu_dm_connector *connector);
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index ec304b1a5973..8403b6a9a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -169,6 +169,11 @@ static void get_payload_table(
mutex_unlock(&mst_mgr->payload_lock);
}
+void dm_helpers_dp_update_branch_info(
+ struct dc_context *ctx,
+ const struct dc_link *link)
+{}
+
/*
* Writes payload allocation table in immediate downstream device.
*/
@@ -454,6 +459,22 @@ bool dm_helpers_submit_i2c(
return result;
}
+bool dm_helpers_is_dp_sink_present(struct dc_link *link)
+{
+ bool dp_sink_present;
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ BUG_ON("Failed to found connector for link!");
+ return true;
+ }
+
+ mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
+ dp_sink_present = dc_link_is_dp_sink_present(link);
+ mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
+ return dp_sink_present;
+}
+
enum dc_edid_status dm_helpers_read_local_edid(
struct dc_context *ctx,
struct dc_link *link,
@@ -497,6 +518,34 @@ enum dc_edid_status dm_helpers_read_local_edid(
DRM_ERROR("EDID err: %d, on connector: %s",
edid_status,
aconnector->base.name);
+ if (link->aux_mode) {
+ union test_request test_request = { {0} };
+ union test_response test_response = { {0} };
+
+ dm_helpers_dp_read_dpcd(ctx,
+ link,
+ DP_TEST_REQUEST,
+ &test_request.raw,
+ sizeof(union test_request));
+
+ if (!test_request.bits.EDID_READ)
+ return edid_status;
+
+ test_response.bits.EDID_CHECKSUM_WRITE = 1;
+
+ dm_helpers_dp_write_dpcd(ctx,
+ link,
+ DP_TEST_EDID_CHECKSUM,
+ &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
+ 1);
+
+ dm_helpers_dp_write_dpcd(ctx,
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+
+ }
return edid_status;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 4304d9e408b8..9a300732ba37 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -80,55 +80,72 @@ static void log_dpcd(uint8_t type,
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
- enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
- I2C_MOT_TRUE : I2C_MOT_FALSE;
- enum ddc_result res;
- uint32_t read_bytes = msg->size;
+ ssize_t result = 0;
+ enum i2caux_transaction_action action;
+ enum aux_transaction_type type;
if (WARN_ON(msg->size > 16))
return -E2BIG;
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_READ:
- res = dal_ddc_service_read_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- false,
- I2C_MOT_UNDEF,
- msg->address,
- msg->buffer,
- msg->size,
- &read_bytes);
+ type = AUX_TRANSACTION_TYPE_DP;
+ action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+
+ result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
break;
case DP_AUX_NATIVE_WRITE:
- res = dal_ddc_service_write_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- false,
- I2C_MOT_UNDEF,
- msg->address,
- msg->buffer,
- msg->size);
+ type = AUX_TRANSACTION_TYPE_DP;
+ action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+
+ dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
+ result = msg->size;
break;
case DP_AUX_I2C_READ:
- res = dal_ddc_service_read_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- true,
- mot,
- msg->address,
- msg->buffer,
- msg->size,
- &read_bytes);
+ type = AUX_TRANSACTION_TYPE_I2C;
+ if (msg->request & DP_AUX_I2C_MOT)
+ action = I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
+ else
+ action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
+
+ result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
break;
case DP_AUX_I2C_WRITE:
- res = dal_ddc_service_write_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- true,
- mot,
- msg->address,
- msg->buffer,
- msg->size);
+ type = AUX_TRANSACTION_TYPE_I2C;
+ if (msg->request & DP_AUX_I2C_MOT)
+ action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
+ else
+ action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+
+ dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
+ result = msg->size;
break;
default:
- return 0;
+ return -EINVAL;
}
#ifdef TRACE_DPCD
@@ -139,9 +156,10 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
r == DDC_RESULT_SUCESSFULL);
#endif
- if (res != DDC_RESULT_SUCESSFULL)
- return -EIO;
- return read_bytes;
+ if (result < 0) /* DC doesn't know about kernel error codes */
+ result = -EIO;
+
+ return result;
}
static enum drm_connector_status
@@ -233,7 +251,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
if (!edid) {
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
&aconnector->base,
NULL);
return ret;
@@ -261,7 +279,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
connector, aconnector->edid);
}
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
&aconnector->base, aconnector->edid);
ret = drm_add_edid_modes(connector, aconnector->edid);
@@ -345,7 +363,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector, connector->base.id, aconnector->mst_port);
aconnector->port = port;
- drm_mode_connector_set_path_property(connector, pathprop);
+ drm_connector_set_path_property(connector, pathprop);
drm_connector_list_iter_end(&conn_iter);
aconnector->mst_connected = true;
@@ -393,7 +411,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
dev->mode_config.tile_property,
0);
- drm_mode_connector_set_path_property(connector, pathprop);
+ drm_connector_set_path_property(connector, pathprop);
/*
* Initialize connector state before adding the connectror to drm and
@@ -441,7 +459,7 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
{
mutex_lock(&connector->dev->mode_config.mutex);
- drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
+ drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
mutex_unlock(&connector->dev->mode_config.mutex);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
new file mode 100644
index 000000000000..fbe878ae1e8c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -0,0 +1,562 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+#include <linux/string.h>
+#include <linux/acpi.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "amdgpu_pm.h"
+#include "dm_pp_smu.h"
+
+
+bool dm_pp_apply_display_requirements(
+ const struct dc_context *ctx,
+ const struct dm_pp_display_configuration *pp_display_cfg)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ int i;
+
+ if (adev->pm.dpm_enabled) {
+
+ memset(&adev->pm.pm_display_cfg, 0,
+ sizeof(adev->pm.pm_display_cfg));
+
+ adev->pm.pm_display_cfg.cpu_cc6_disable =
+ pp_display_cfg->cpu_cc6_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_disable =
+ pp_display_cfg->cpu_pstate_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_separation_time =
+ pp_display_cfg->cpu_pstate_separation_time;
+
+ adev->pm.pm_display_cfg.nb_pstate_switch_disable =
+ pp_display_cfg->nb_pstate_switch_disable;
+
+ adev->pm.pm_display_cfg.num_display =
+ pp_display_cfg->display_count;
+ adev->pm.pm_display_cfg.num_path_including_non_display =
+ pp_display_cfg->display_count;
+
+ adev->pm.pm_display_cfg.min_core_set_clock =
+ pp_display_cfg->min_engine_clock_khz/10;
+ adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
+ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+ adev->pm.pm_display_cfg.min_mem_set_clock =
+ pp_display_cfg->min_memory_clock_khz/10;
+
+ adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
+ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+ adev->pm.pm_display_cfg.min_dcef_set_clk =
+ pp_display_cfg->min_dcfclock_khz/10;
+
+ adev->pm.pm_display_cfg.multi_monitor_in_sync =
+ pp_display_cfg->all_displays_in_sync;
+ adev->pm.pm_display_cfg.min_vblank_time =
+ pp_display_cfg->avail_mclk_switch_time_us;
+
+ adev->pm.pm_display_cfg.display_clk =
+ pp_display_cfg->disp_clk_khz/10;
+
+ adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
+
+ adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
+ adev->pm.pm_display_cfg.line_time_in_us =
+ pp_display_cfg->line_time_in_us;
+
+ adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
+ adev->pm.pm_display_cfg.crossfire_display_index = -1;
+ adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
+
+ for (i = 0; i < pp_display_cfg->display_count; i++) {
+ const struct dm_pp_single_disp_config *dc_cfg =
+ &pp_display_cfg->disp_configs[i];
+ adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+ }
+
+ /* TODO: complete implementation of
+ * pp_display_configuration_change().
+ * Follow example of:
+ * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
+ * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
+ if (adev->powerplay.pp_funcs->display_configuration_change)
+ adev->powerplay.pp_funcs->display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+
+ /* TODO: replace by a separate call to 'apply display cfg'? */
+ amdgpu_pm_compute_clocks(adev);
+ }
+
+ return true;
+}
+
+static void get_default_clock_levels(
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *clks)
+{
+ uint32_t disp_clks_in_khz[6] = {
+ 300000, 400000, 496560, 626090, 685720, 757900 };
+ uint32_t sclks_in_khz[6] = {
+ 300000, 360000, 423530, 514290, 626090, 720000 };
+ uint32_t mclks_in_khz[2] = { 333000, 800000 };
+
+ switch (clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, disp_clks_in_khz,
+ sizeof(disp_clks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, sclks_in_khz,
+ sizeof(sclks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ clks->num_levels = 2;
+ memmove(clks->clocks_in_khz, mclks_in_khz,
+ sizeof(mclks_in_khz));
+ break;
+ default:
+ clks->num_levels = 0;
+ break;
+ }
+}
+
+static enum amd_pp_clock_type dc_to_pp_clock_type(
+ enum dm_pp_clock_type dm_pp_clk_type)
+{
+ enum amd_pp_clock_type amd_pp_clk_type = 0;
+
+ switch (dm_pp_clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ amd_pp_clk_type = amd_pp_disp_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ amd_pp_clk_type = amd_pp_sys_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ amd_pp_clk_type = amd_pp_mem_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DCEFCLK:
+ amd_pp_clk_type = amd_pp_dcef_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DCFCLK:
+ amd_pp_clk_type = amd_pp_dcf_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ amd_pp_clk_type = amd_pp_pixel_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_FCLK:
+ amd_pp_clk_type = amd_pp_f_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ amd_pp_clk_type = amd_pp_phy_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DPPCLK:
+ amd_pp_clk_type = amd_pp_dpp_clock;
+ break;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+ dm_pp_clk_type);
+ break;
+ }
+
+ return amd_pp_clk_type;
+}
+
+static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
+ enum PP_DAL_POWERLEVEL max_clocks_state)
+{
+ switch (max_clocks_state) {
+ case PP_DAL_POWERLEVEL_0:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
+ case PP_DAL_POWERLEVEL_1:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
+ case PP_DAL_POWERLEVEL_2:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
+ case PP_DAL_POWERLEVEL_3:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
+ case PP_DAL_POWERLEVEL_4:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
+ case PP_DAL_POWERLEVEL_5:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
+ case PP_DAL_POWERLEVEL_6:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
+ case PP_DAL_POWERLEVEL_7:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
+ max_clocks_state);
+ return DM_PP_CLOCKS_STATE_INVALID;
+ }
+}
+
+static void pp_to_dc_clock_levels(
+ const struct amd_pp_clocks *pp_clks,
+ struct dm_pp_clock_levels *dc_clks,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->count,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ dc_clks->num_levels = pp_clks->count;
+
+ DRM_INFO("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
+ dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
+ }
+}
+
+static void pp_to_dc_clock_levels_with_latency(
+ const struct pp_clock_levels_with_latency *pp_clks,
+ struct dm_pp_clock_levels_with_latency *clk_level_info,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->num_levels,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ clk_level_info->num_levels = pp_clks->num_levels;
+
+ DRM_DEBUG("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < clk_level_info->num_levels; i++) {
+ DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
+ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+ clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
+ }
+}
+
+static void pp_to_dc_clock_levels_with_voltage(
+ const struct pp_clock_levels_with_voltage *pp_clks,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->num_levels,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ clk_level_info->num_levels = pp_clks->num_levels;
+
+ DRM_INFO("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < clk_level_info->num_levels; i++) {
+ DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
+ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+ clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
+ }
+}
+
+bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *dc_clks)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct amd_pp_clocks pp_clks = { 0 };
+ struct amd_pp_simple_clock_info validation_clks = { 0 };
+ uint32_t i;
+
+ if (adev->powerplay.pp_funcs->get_clock_by_type) {
+ if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
+ dc_to_pp_clock_type(clk_type), &pp_clks)) {
+ /* Error in pplib. Provide default values. */
+ get_default_clock_levels(clk_type, dc_clks);
+ return true;
+ }
+ }
+
+ pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
+
+ if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
+ if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
+ pp_handle, &validation_clks)) {
+ /* Error in pplib. Provide default values. */
+ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+ validation_clks.engine_max_clock = 72000;
+ validation_clks.memory_max_clock = 80000;
+ validation_clks.level = 0;
+ }
+ }
+
+ DRM_INFO("DM_PPLIB: Validation clocks:\n");
+ DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
+ validation_clks.engine_max_clock);
+ DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
+ validation_clks.memory_max_clock);
+ DRM_INFO("DM_PPLIB: level : %d\n",
+ validation_clks.level);
+
+ /* Translate 10 kHz to kHz. */
+ validation_clks.engine_max_clock *= 10;
+ validation_clks.memory_max_clock *= 10;
+
+ /* Determine the highest non-boosted level from the Validation Clocks */
+ if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
+ /* This clock is higher the validation clock.
+ * Than means the previous one is the highest
+ * non-boosted one. */
+ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
+ DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_latency *clk_level_info)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct pp_clock_levels_with_latency pp_clks = { 0 };
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
+ return false;
+
+ if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks))
+ return false;
+
+ pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
+
+ return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct pp_clock_levels_with_voltage pp_clk_info = {0};
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clk_info))
+ return false;
+
+ pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
+
+ return true;
+}
+
+bool dm_pp_notify_wm_clock_changes(
+ const struct dc_context *ctx,
+ struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_power_level_change_request(
+ const struct dc_context *ctx,
+ struct dm_pp_power_level_change_request *level_change_req)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_clock_for_voltage_request(
+ const struct dc_context *ctx,
+ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct pp_display_clock_request pp_clock_request = {0};
+ int ret = 0;
+
+ pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
+ pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
+
+ if (!pp_clock_request.clock_type)
+ return false;
+
+ if (adev->powerplay.pp_funcs->display_clock_voltage_request)
+ ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
+ adev->powerplay.pp_handle,
+ &pp_clock_request);
+ if (ret)
+ return false;
+ return true;
+}
+
+bool dm_pp_get_static_clocks(
+ const struct dc_context *ctx,
+ struct dm_pp_static_clock_info *static_clk_info)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct amd_pp_clock_info pp_clk_info = {0};
+ int ret = 0;
+
+ if (adev->powerplay.pp_funcs->get_current_clocks)
+ ret = adev->powerplay.pp_funcs->get_current_clocks(
+ adev->powerplay.pp_handle,
+ &pp_clk_info);
+ if (ret)
+ return false;
+
+ static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
+ static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
+ static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
+
+ return true;
+}
+
+void pp_rv_set_display_requirement(struct pp_smu *pp,
+ struct pp_smu_display_requirement_rv *req)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->display_configuration_changed)
+ return;
+
+ amdgpu_dpm_display_configuration_changed(adev);
+}
+
+void pp_rv_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
+ int32_t i;
+
+ wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
+ wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+ if (ranges->reader_wm_sets[i].wm_inst > 3)
+ wm_dce_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_dce_clocks[i].wm_set_id =
+ ranges->reader_wm_sets[i].wm_inst;
+ wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].max_drain_clk_khz;
+ wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].min_drain_clk_khz;
+ wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].max_fill_clk_khz;
+ wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].min_fill_clk_khz;
+ }
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+ if (ranges->writer_wm_sets[i].wm_inst > 3)
+ wm_soc_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_soc_clocks[i].wm_set_id =
+ ranges->writer_wm_sets[i].wm_inst;
+ wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].max_fill_clk_khz;
+ wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].min_fill_clk_khz;
+ wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].max_drain_clk_khz;
+ wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].min_drain_clk_khz;
+ }
+
+ pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
+}
+
+void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe)
+ return;
+
+ pp_funcs->notify_smu_enable_pwe(pp_handle);
+}
+
+void dm_pp_get_funcs_rv(
+ struct dc_context *ctx,
+ struct pp_smu_funcs_rv *funcs)
+{
+ funcs->pp_smu.ctx = ctx;
+ funcs->set_display_requirement = pp_rv_set_display_requirement;
+ funcs->set_wm_ranges = pp_rv_set_wm_ranges;
+ funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 5a3346124a01..516795342dd2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -35,13 +35,7 @@
#include "amdgpu_dm_irq.h"
#include "amdgpu_pm.h"
-unsigned long long dm_get_timestamp(struct dc_context *ctx)
-{
- struct timespec64 time;
- getrawmonotonic64(&time);
- return timespec64_to_ns(&time);
-}
unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
unsigned long long current_time_stamp,
@@ -80,326 +74,3 @@ bool dm_read_persistent_data(struct dc_context *ctx,
/**** power component interfaces ****/
-bool dm_pp_apply_display_requirements(
- const struct dc_context *ctx,
- const struct dm_pp_display_configuration *pp_display_cfg)
-{
- struct amdgpu_device *adev = ctx->driver_context;
-
- if (adev->pm.dpm_enabled) {
-
- memset(&adev->pm.pm_display_cfg, 0,
- sizeof(adev->pm.pm_display_cfg));
-
- adev->pm.pm_display_cfg.cpu_cc6_disable =
- pp_display_cfg->cpu_cc6_disable;
-
- adev->pm.pm_display_cfg.cpu_pstate_disable =
- pp_display_cfg->cpu_pstate_disable;
-
- adev->pm.pm_display_cfg.cpu_pstate_separation_time =
- pp_display_cfg->cpu_pstate_separation_time;
-
- adev->pm.pm_display_cfg.nb_pstate_switch_disable =
- pp_display_cfg->nb_pstate_switch_disable;
-
- adev->pm.pm_display_cfg.num_display =
- pp_display_cfg->display_count;
- adev->pm.pm_display_cfg.num_path_including_non_display =
- pp_display_cfg->display_count;
-
- adev->pm.pm_display_cfg.min_core_set_clock =
- pp_display_cfg->min_engine_clock_khz/10;
- adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
- pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
- adev->pm.pm_display_cfg.min_mem_set_clock =
- pp_display_cfg->min_memory_clock_khz/10;
-
- adev->pm.pm_display_cfg.multi_monitor_in_sync =
- pp_display_cfg->all_displays_in_sync;
- adev->pm.pm_display_cfg.min_vblank_time =
- pp_display_cfg->avail_mclk_switch_time_us;
-
- adev->pm.pm_display_cfg.display_clk =
- pp_display_cfg->disp_clk_khz/10;
-
- adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
-
- adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
- adev->pm.pm_display_cfg.line_time_in_us =
- pp_display_cfg->line_time_in_us;
-
- adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
- adev->pm.pm_display_cfg.crossfire_display_index = -1;
- adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
-
- /* TODO: complete implementation of
- * pp_display_configuration_change().
- * Follow example of:
- * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
- * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
- if (adev->powerplay.pp_funcs->display_configuration_change)
- adev->powerplay.pp_funcs->display_configuration_change(
- adev->powerplay.pp_handle,
- &adev->pm.pm_display_cfg);
-
- /* TODO: replace by a separate call to 'apply display cfg'? */
- amdgpu_pm_compute_clocks(adev);
- }
-
- return true;
-}
-
-static void get_default_clock_levels(
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels *clks)
-{
- uint32_t disp_clks_in_khz[6] = {
- 300000, 400000, 496560, 626090, 685720, 757900 };
- uint32_t sclks_in_khz[6] = {
- 300000, 360000, 423530, 514290, 626090, 720000 };
- uint32_t mclks_in_khz[2] = { 333000, 800000 };
-
- switch (clk_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- clks->num_levels = 6;
- memmove(clks->clocks_in_khz, disp_clks_in_khz,
- sizeof(disp_clks_in_khz));
- break;
- case DM_PP_CLOCK_TYPE_ENGINE_CLK:
- clks->num_levels = 6;
- memmove(clks->clocks_in_khz, sclks_in_khz,
- sizeof(sclks_in_khz));
- break;
- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
- clks->num_levels = 2;
- memmove(clks->clocks_in_khz, mclks_in_khz,
- sizeof(mclks_in_khz));
- break;
- default:
- clks->num_levels = 0;
- break;
- }
-}
-
-static enum amd_pp_clock_type dc_to_pp_clock_type(
- enum dm_pp_clock_type dm_pp_clk_type)
-{
- enum amd_pp_clock_type amd_pp_clk_type = 0;
-
- switch (dm_pp_clk_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- amd_pp_clk_type = amd_pp_disp_clock;
- break;
- case DM_PP_CLOCK_TYPE_ENGINE_CLK:
- amd_pp_clk_type = amd_pp_sys_clock;
- break;
- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
- amd_pp_clk_type = amd_pp_mem_clock;
- break;
- default:
- DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
- dm_pp_clk_type);
- break;
- }
-
- return amd_pp_clk_type;
-}
-
-static void pp_to_dc_clock_levels(
- const struct amd_pp_clocks *pp_clks,
- struct dm_pp_clock_levels *dc_clks,
- enum dm_pp_clock_type dc_clk_type)
-{
- uint32_t i;
-
- if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
- DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
- pp_clks->count,
- DM_PP_MAX_CLOCK_LEVELS);
-
- dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
- } else
- dc_clks->num_levels = pp_clks->count;
-
- DRM_INFO("DM_PPLIB: values for %s clock\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
-
- for (i = 0; i < dc_clks->num_levels; i++) {
- DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
- /* translate 10kHz to kHz */
- dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
- }
-}
-
-static void pp_to_dc_clock_levels_with_latency(
- const struct pp_clock_levels_with_latency *pp_clks,
- struct dm_pp_clock_levels_with_latency *clk_level_info,
- enum dm_pp_clock_type dc_clk_type)
-{
- uint32_t i;
-
- if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
- DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
- pp_clks->num_levels,
- DM_PP_MAX_CLOCK_LEVELS);
-
- clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
- } else
- clk_level_info->num_levels = pp_clks->num_levels;
-
- DRM_DEBUG("DM_PPLIB: values for %s clock\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
-
- for (i = 0; i < clk_level_info->num_levels; i++) {
- DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
- clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
- clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
- }
-}
-
-bool dm_pp_get_clock_levels_by_type(
- const struct dc_context *ctx,
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels *dc_clks)
-{
- struct amdgpu_device *adev = ctx->driver_context;
- void *pp_handle = adev->powerplay.pp_handle;
- struct amd_pp_clocks pp_clks = { 0 };
- struct amd_pp_simple_clock_info validation_clks = { 0 };
- uint32_t i;
-
- if (adev->powerplay.pp_funcs->get_clock_by_type) {
- if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
- dc_to_pp_clock_type(clk_type), &pp_clks)) {
- /* Error in pplib. Provide default values. */
- get_default_clock_levels(clk_type, dc_clks);
- return true;
- }
- }
-
- pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
-
- if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
- if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
- pp_handle, &validation_clks)) {
- /* Error in pplib. Provide default values. */
- DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
- validation_clks.engine_max_clock = 72000;
- validation_clks.memory_max_clock = 80000;
- validation_clks.level = 0;
- }
- }
-
- DRM_INFO("DM_PPLIB: Validation clocks:\n");
- DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
- validation_clks.engine_max_clock);
- DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
- validation_clks.memory_max_clock);
- DRM_INFO("DM_PPLIB: level : %d\n",
- validation_clks.level);
-
- /* Translate 10 kHz to kHz. */
- validation_clks.engine_max_clock *= 10;
- validation_clks.memory_max_clock *= 10;
-
- /* Determine the highest non-boosted level from the Validation Clocks */
- if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
- for (i = 0; i < dc_clks->num_levels; i++) {
- if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
- /* This clock is higher the validation clock.
- * Than means the previous one is the highest
- * non-boosted one. */
- DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
- dc_clks->num_levels, i);
- dc_clks->num_levels = i > 0 ? i : 1;
- break;
- }
- }
- } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
- for (i = 0; i < dc_clks->num_levels; i++) {
- if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
- DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
- dc_clks->num_levels, i);
- dc_clks->num_levels = i > 0 ? i : 1;
- break;
- }
- }
- }
-
- return true;
-}
-
-bool dm_pp_get_clock_levels_by_type_with_latency(
- const struct dc_context *ctx,
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels_with_latency *clk_level_info)
-{
- struct amdgpu_device *adev = ctx->driver_context;
- void *pp_handle = adev->powerplay.pp_handle;
- struct pp_clock_levels_with_latency pp_clks = { 0 };
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
- return false;
-
- if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
- dc_to_pp_clock_type(clk_type),
- &pp_clks))
- return false;
-
- pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
-
- return true;
-}
-
-bool dm_pp_get_clock_levels_by_type_with_voltage(
- const struct dc_context *ctx,
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels_with_voltage *clk_level_info)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_notify_wm_clock_changes(
- const struct dc_context *ctx,
- struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_apply_power_level_change_request(
- const struct dc_context *ctx,
- struct dm_pp_power_level_change_request *level_change_req)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_apply_clock_for_voltage_request(
- const struct dc_context *ctx,
- struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_get_static_clocks(
- const struct dc_context *ctx,
- struct dm_pp_static_clock_info *static_clk_info)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-void dm_pp_get_funcs_rv(
- struct dc_context *ctx,
- struct pp_smu_funcs_rv *funcs)
-{}
-
-/**** end of power component interfaces ****/
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index b49ea96b5dae..a50a76471107 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -25,7 +25,7 @@
# subcomponents.
BASICS = conversion.o fixpt31_32.o \
- logger.o log_helpers.o vector.o
+ log_helpers.o vector.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
index 021451549ff7..26583f346c39 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
@@ -28,75 +28,12 @@
#include "include/logger_interface.h"
#include "dm_helpers.h"
-#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
-
-struct dc_signal_type_info {
- enum signal_type type;
- char name[MAX_NAME_LEN];
-};
-
-static const struct dc_signal_type_info signal_type_info_tbl[] = {
- {SIGNAL_TYPE_NONE, "NC"},
- {SIGNAL_TYPE_DVI_SINGLE_LINK, "DVI"},
- {SIGNAL_TYPE_DVI_DUAL_LINK, "DDVI"},
- {SIGNAL_TYPE_HDMI_TYPE_A, "HDMIA"},
- {SIGNAL_TYPE_LVDS, "LVDS"},
- {SIGNAL_TYPE_RGB, "VGA"},
- {SIGNAL_TYPE_DISPLAY_PORT, "DP"},
- {SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
- {SIGNAL_TYPE_EDP, "eDP"},
- {SIGNAL_TYPE_VIRTUAL, "Virtual"}
-};
-
-void dc_conn_log(struct dc_context *ctx,
- const struct dc_link *link,
- uint8_t *hex_data,
- int hex_data_count,
- enum dc_log_type event,
- const char *msg,
- ...)
+void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
{
int i;
- va_list args;
- struct log_entry entry = { 0 };
- enum signal_type signal;
-
- if (link->local_sink)
- signal = link->local_sink->sink_signal;
- else
- signal = link->connector_signal;
-
- if (link->type == dc_connection_mst_branch)
- signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
-
- dm_logger_open(ctx->logger, &entry, event);
-
- for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
- if (signal == signal_type_info_tbl[i].type)
- break;
-
- if (i == NUM_ELEMENTS(signal_type_info_tbl))
- goto fail;
-
- dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
- signal_type_info_tbl[i].name,
- link->link_index);
-
- va_start(args, msg);
- dm_logger_append_va(&entry, msg, args);
-
- if (entry.buf_offset > 0 &&
- entry.buf[entry.buf_offset - 1] == '\n')
- entry.buf_offset--;
if (hex_data)
for (i = 0; i < hex_data_count; i++)
- dm_logger_append(&entry, "%2.2X ", hex_data[i]);
-
- dm_logger_append(&entry, "^\n");
-
-fail:
- dm_logger_close(&entry);
-
- va_end(args);
+ DC_LOG_DEBUG("%2.2X ", hex_data[i]);
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
deleted file mode 100644
index 0866874ae8c6..000000000000
--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-#include "dm_services.h"
-#include "include/logger_interface.h"
-#include "logger.h"
-
-
-#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
-
-static const struct dc_log_type_info log_type_info_tbl[] = {
- {LOG_ERROR, "Error"},
- {LOG_WARNING, "Warning"},
- {LOG_DEBUG, "Debug"},
- {LOG_DC, "DC_Interface"},
- {LOG_SURFACE, "Surface"},
- {LOG_HW_HOTPLUG, "HW_Hotplug"},
- {LOG_HW_LINK_TRAINING, "HW_LKTN"},
- {LOG_HW_SET_MODE, "HW_Mode"},
- {LOG_HW_RESUME_S3, "HW_Resume"},
- {LOG_HW_AUDIO, "HW_Audio"},
- {LOG_HW_HPD_IRQ, "HW_HPDIRQ"},
- {LOG_MST, "MST"},
- {LOG_SCALER, "Scaler"},
- {LOG_BIOS, "BIOS"},
- {LOG_BANDWIDTH_CALCS, "BWCalcs"},
- {LOG_BANDWIDTH_VALIDATION, "BWValidation"},
- {LOG_I2C_AUX, "I2C_AUX"},
- {LOG_SYNC, "Sync"},
- {LOG_BACKLIGHT, "Backlight"},
- {LOG_FEATURE_OVERRIDE, "Override"},
- {LOG_DETECTION_EDID_PARSER, "Edid"},
- {LOG_DETECTION_DP_CAPS, "DP_Caps"},
- {LOG_RESOURCE, "Resource"},
- {LOG_DML, "DML"},
- {LOG_EVENT_MODE_SET, "Mode"},
- {LOG_EVENT_DETECTION, "Detect"},
- {LOG_EVENT_LINK_TRAINING, "LKTN"},
- {LOG_EVENT_LINK_LOSS, "LinkLoss"},
- {LOG_EVENT_UNDERFLOW, "Underflow"},
- {LOG_IF_TRACE, "InterfaceTrace"},
- {LOG_DTN, "DTN"},
- {LOG_DISPLAYSTATS, "DisplayStats"}
-};
-
-
-/* ----------- Object init and destruction ----------- */
-static bool construct(struct dc_context *ctx, struct dal_logger *logger,
- uint32_t log_mask)
-{
- /* malloc buffer and init offsets */
- logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
- logger->log_buffer = kcalloc(logger->log_buffer_size, sizeof(char),
- GFP_KERNEL);
- if (!logger->log_buffer)
- return false;
-
- /* Initialize both offsets to start of buffer (empty) */
- logger->buffer_read_offset = 0;
- logger->buffer_write_offset = 0;
-
- logger->open_count = 0;
-
- logger->flags.bits.ENABLE_CONSOLE = 1;
- logger->flags.bits.ENABLE_BUFFER = 0;
-
- logger->ctx = ctx;
-
- logger->mask = log_mask;
-
- return true;
-}
-
-static void destruct(struct dal_logger *logger)
-{
- if (logger->log_buffer) {
- kfree(logger->log_buffer);
- logger->log_buffer = NULL;
- }
-}
-
-struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask)
-{
- /* malloc struct */
- struct dal_logger *logger = kzalloc(sizeof(struct dal_logger),
- GFP_KERNEL);
-
- if (!logger)
- return NULL;
- if (!construct(ctx, logger, log_mask)) {
- kfree(logger);
- return NULL;
- }
-
- return logger;
-}
-
-uint32_t dal_logger_destroy(struct dal_logger **logger)
-{
- if (logger == NULL || *logger == NULL)
- return 1;
- destruct(*logger);
- kfree(*logger);
- *logger = NULL;
-
- return 0;
-}
-
-/* ------------------------------------------------------------------------ */
-
-
-static bool dal_logger_should_log(
- struct dal_logger *logger,
- enum dc_log_type log_type)
-{
- if (logger->mask & (1 << log_type))
- return true;
-
- return false;
-}
-
-static void log_to_debug_console(struct log_entry *entry)
-{
- struct dal_logger *logger = entry->logger;
-
- if (logger->flags.bits.ENABLE_CONSOLE == 0)
- return;
-
- if (entry->buf_offset) {
- switch (entry->type) {
- case LOG_ERROR:
- dm_error("%s", entry->buf);
- break;
- default:
- dm_output_to_console("%s", entry->buf);
- break;
- }
- }
-}
-
-/* Print everything unread existing in log_buffer to debug console*/
-void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
-{
- char *string_start = &logger->log_buffer[logger->buffer_read_offset];
-
- if (should_warn)
- dm_output_to_console(
- "---------------- FLUSHING LOG BUFFER ----------------\n");
- while (logger->buffer_read_offset < logger->buffer_write_offset) {
-
- if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
- dm_output_to_console("%s", string_start);
- string_start = logger->log_buffer + logger->buffer_read_offset + 1;
- }
- logger->buffer_read_offset++;
- }
- if (should_warn)
- dm_output_to_console(
- "-------------- END FLUSHING LOG BUFFER --------------\n\n");
-}
-
-static void log_to_internal_buffer(struct log_entry *entry)
-{
-
- uint32_t size = entry->buf_offset;
- struct dal_logger *logger = entry->logger;
-
- if (logger->flags.bits.ENABLE_BUFFER == 0)
- return;
-
- if (logger->log_buffer == NULL)
- return;
-
- if (size > 0 && size < logger->log_buffer_size) {
-
- int buffer_space = logger->log_buffer_size -
- logger->buffer_write_offset;
-
- if (logger->buffer_write_offset == logger->buffer_read_offset) {
- /* Buffer is empty, start writing at beginning */
- buffer_space = logger->log_buffer_size;
- logger->buffer_write_offset = 0;
- logger->buffer_read_offset = 0;
- }
-
- if (buffer_space > size) {
- /* No wrap around, copy 'size' bytes
- * from 'entry->buf' to 'log_buffer'
- */
- memmove(logger->log_buffer +
- logger->buffer_write_offset,
- entry->buf, size);
- logger->buffer_write_offset += size;
-
- } else {
- /* Not enough room remaining, we should flush
- * existing logs */
-
- /* Flush existing unread logs to console */
- dm_logger_flush_buffer(logger, true);
-
- /* Start writing to beginning of buffer */
- memmove(logger->log_buffer, entry->buf, size);
- logger->buffer_write_offset = size;
- logger->buffer_read_offset = 0;
- }
-
- }
-}
-
-static void log_heading(struct log_entry *entry)
-{
- int j;
-
- for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
-
- const struct dc_log_type_info *info = &log_type_info_tbl[j];
-
- if (info->type == entry->type)
- dm_logger_append(entry, "[%s]\t", info->name);
- }
-}
-
-static void append_entry(
- struct log_entry *entry,
- char *buffer,
- uint32_t buf_size)
-{
- if (!entry->buf ||
- entry->buf_offset + buf_size > entry->max_buf_bytes
- ) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- /* Todo: check if off by 1 byte due to \0 anywhere */
- memmove(entry->buf + entry->buf_offset, buffer, buf_size);
- entry->buf_offset += buf_size;
-}
-
-/* ------------------------------------------------------------------------ */
-
-/* Warning: Be careful that 'msg' is null terminated and the total size is
- * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
- */
-void dm_logger_write(
- struct dal_logger *logger,
- enum dc_log_type log_type,
- const char *msg,
- ...)
-{
- if (logger && dal_logger_should_log(logger, log_type)) {
- uint32_t size;
- va_list args;
- char buffer[LOG_MAX_LINE_SIZE];
- struct log_entry entry;
-
- va_start(args, msg);
-
- entry.logger = logger;
-
- entry.buf = buffer;
-
- entry.buf_offset = 0;
- entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
-
- entry.type = log_type;
-
- log_heading(&entry);
-
- size = dm_log_to_buffer(
- buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
-
- buffer[entry.buf_offset + size] = '\0';
- entry.buf_offset += size + 1;
-
- /* --Flush log_entry buffer-- */
- /* print to kernel console */
- log_to_debug_console(&entry);
- /* log internally for dsat */
- log_to_internal_buffer(&entry);
-
- va_end(args);
- }
-}
-
-/* Same as dm_logger_write, except without open() and close(), which must
- * be done separately.
- */
-void dm_logger_append(
- struct log_entry *entry,
- const char *msg,
- ...)
-{
- va_list args;
-
- va_start(args, msg);
- dm_logger_append_va(entry, msg, args);
- va_end(args);
-}
-
-void dm_logger_append_va(
- struct log_entry *entry,
- const char *msg,
- va_list args)
-{
- struct dal_logger *logger;
-
- if (!entry) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- logger = entry->logger;
-
- if (logger && logger->open_count > 0 &&
- dal_logger_should_log(logger, entry->type)) {
-
- uint32_t size;
- char buffer[LOG_MAX_LINE_SIZE];
-
- size = dm_log_to_buffer(
- buffer, LOG_MAX_LINE_SIZE, msg, args);
-
- if (size < LOG_MAX_LINE_SIZE - 1) {
- append_entry(entry, buffer, size);
- } else {
- append_entry(entry, "LOG_ERROR, line too long\n", 27);
- }
- }
-}
-
-void dm_logger_open(
- struct dal_logger *logger,
- struct log_entry *entry, /* out */
- enum dc_log_type log_type)
-{
- if (!entry) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- entry->type = log_type;
- entry->logger = logger;
-
- entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE,
- GFP_KERNEL);
-
- entry->buf_offset = 0;
- entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
-
- logger->open_count++;
-
- log_heading(entry);
-}
-
-void dm_logger_close(struct log_entry *entry)
-{
- struct dal_logger *logger = entry->logger;
-
- if (logger && logger->open_count > 0) {
- logger->open_count--;
- } else {
- BREAK_TO_DEBUGGER();
- goto cleanup;
- }
-
- /* --Flush log_entry buffer-- */
- /* print to kernel console */
- log_to_debug_console(entry);
- /* log internally for dsat */
- log_to_internal_buffer(entry);
-
- /* TODO: Write end heading */
-
-cleanup:
- if (entry->buf) {
- kfree(entry->buf);
- entry->buf = NULL;
- entry->buf_offset = 0;
- entry->max_buf_bytes = 0;
- }
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index c7f0b27e457e..be8a2494355a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -3762,6 +3762,200 @@ static struct integrated_info *bios_parser_create_integrated_info(
return NULL;
}
+enum bp_result update_slot_layout_info(
+ struct dc_bios *dcb,
+ unsigned int i,
+ struct slot_layout_info *slot_layout_info,
+ unsigned int record_offset)
+{
+ unsigned int j;
+ struct bios_parser *bp;
+ ATOM_BRACKET_LAYOUT_RECORD *record;
+ ATOM_COMMON_RECORD_HEADER *record_header;
+ enum bp_result result = BP_RESULT_NORECORD;
+
+ bp = BP_FROM_DCB(dcb);
+ record = NULL;
+ record_header = NULL;
+
+ for (;;) {
+
+ record_header = (ATOM_COMMON_RECORD_HEADER *)
+ GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset);
+ if (record_header == NULL) {
+ result = BP_RESULT_BADBIOSTABLE;
+ break;
+ }
+
+ /* the end of the list */
+ if (record_header->ucRecordType == 0xff ||
+ record_header->ucRecordSize == 0) {
+ break;
+ }
+
+ if (record_header->ucRecordType ==
+ ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
+ sizeof(ATOM_BRACKET_LAYOUT_RECORD)
+ <= record_header->ucRecordSize) {
+ record = (ATOM_BRACKET_LAYOUT_RECORD *)
+ (record_header);
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ record_offset += record_header->ucRecordSize;
+ }
+
+ /* return if the record not found */
+ if (result != BP_RESULT_OK)
+ return result;
+
+ /* get slot sizes */
+ slot_layout_info->length = record->ucLength;
+ slot_layout_info->width = record->ucWidth;
+
+ /* get info for each connector in the slot */
+ slot_layout_info->num_of_connectors = record->ucConnNum;
+ for (j = 0; j < slot_layout_info->num_of_connectors; ++j) {
+ slot_layout_info->connectors[j].connector_type =
+ (enum connector_layout_type)
+ (record->asConnInfo[j].ucConnectorType);
+ switch (record->asConnInfo[j].ucConnectorType) {
+ case CONNECTOR_TYPE_DVI_D:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_DVI_D;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_DVI;
+ break;
+
+ case CONNECTOR_TYPE_HDMI:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_HDMI;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_HDMI;
+ break;
+
+ case CONNECTOR_TYPE_DISPLAY_PORT:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_DP;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_DP;
+ break;
+
+ case CONNECTOR_TYPE_MINI_DISPLAY_PORT:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_MINI_DP;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_MINI_DP;
+ break;
+
+ default:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_UNKNOWN;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_UNKNOWN;
+ }
+
+ slot_layout_info->connectors[j].position =
+ record->asConnInfo[j].ucPosition;
+ slot_layout_info->connectors[j].connector_id =
+ object_id_from_bios_object_id(
+ record->asConnInfo[j].usConnectorObjectId);
+ }
+ return result;
+}
+
+
+enum bp_result get_bracket_layout_record(
+ struct dc_bios *dcb,
+ unsigned int bracket_layout_id,
+ struct slot_layout_info *slot_layout_info)
+{
+ unsigned int i;
+ unsigned int record_offset;
+ struct bios_parser *bp;
+ enum bp_result result;
+ ATOM_OBJECT *object;
+ ATOM_OBJECT_TABLE *object_table;
+ unsigned int genericTableOffset;
+
+ bp = BP_FROM_DCB(dcb);
+ object = NULL;
+ if (slot_layout_info == NULL) {
+ DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
+ return BP_RESULT_BADINPUT;
+ }
+
+
+ genericTableOffset = bp->object_info_tbl_offset +
+ bp->object_info_tbl.v1_3->usMiscObjectTableOffset;
+ object_table = (ATOM_OBJECT_TABLE *)
+ GET_IMAGE(ATOM_OBJECT_TABLE, genericTableOffset);
+ if (!object_table)
+ return BP_RESULT_FAILURE;
+
+ result = BP_RESULT_NORECORD;
+ for (i = 0; i < object_table->ucNumberOfObjects; ++i) {
+
+ if (bracket_layout_id ==
+ object_table->asObjects[i].usObjectID) {
+
+ object = &object_table->asObjects[i];
+ record_offset = object->usRecordOffset +
+ bp->object_info_tbl_offset;
+
+ result = update_slot_layout_info(dcb, i,
+ slot_layout_info, record_offset);
+ break;
+ }
+ }
+ return result;
+}
+
+static enum bp_result bios_get_board_layout_info(
+ struct dc_bios *dcb,
+ struct board_layout_info *board_layout_info)
+{
+ unsigned int i;
+ struct bios_parser *bp;
+ enum bp_result record_result;
+
+ const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
+ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
+ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2,
+ 0, 0
+ };
+
+ bp = BP_FROM_DCB(dcb);
+ if (board_layout_info == NULL) {
+ DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
+ return BP_RESULT_BADINPUT;
+ }
+
+ board_layout_info->num_of_slots = 0;
+
+ for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
+ record_result = get_bracket_layout_record(dcb,
+ slot_index_to_vbios_id[i],
+ &board_layout_info->slots[i]);
+
+ if (record_result == BP_RESULT_NORECORD && i > 0)
+ break; /* no more slots present in bios */
+ else if (record_result != BP_RESULT_OK)
+ return record_result; /* fail */
+
+ ++board_layout_info->num_of_slots;
+ }
+
+ /* all data is valid */
+ board_layout_info->is_number_of_slots_valid = 1;
+ board_layout_info->is_slots_size_valid = 1;
+ board_layout_info->is_connector_offsets_valid = 1;
+ board_layout_info->is_connector_lengths_valid = 1;
+
+ return BP_RESULT_OK;
+}
+
/******************************************************************************/
static const struct dc_vbios_funcs vbios_funcs = {
@@ -3836,6 +4030,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
.post_init = bios_parser_post_init, /* patch vbios table for mxm module by reading i2c */
.bios_parser_destroy = bios_parser_destroy,
+
+ .get_board_layout_info = bios_get_board_layout_info,
};
static bool bios_parser_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index b8cef7af3c4a..eab007e1793c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -43,6 +43,29 @@
#include "bios_parser_interface.h"
#include "bios_parser_common.h"
+
+/* Temporarily add in defines until ObjectID.h patch is updated in a few days */
+#ifndef GENERIC_OBJECT_ID_BRACKET_LAYOUT
+#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
+#endif /* GENERIC_OBJECT_ID_BRACKET_LAYOUT */
+
+#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 */
+
+#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 */
+
+#define DC_LOGGER \
+ bp->base.ctx->logger
+
#define LAST_RECORD_TYPE 0xff
#define SMU9_SYSPLL0_ID 0
@@ -86,7 +109,6 @@ static struct atom_encoder_caps_record *get_encoder_cap_record(
#define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
-
static void destruct(struct bios_parser *bp)
{
kfree(bp->base.bios_local_image);
@@ -656,7 +678,7 @@ static enum bp_result bios_parser_get_gpio_pin_info(
return BP_RESULT_BADBIOSTABLE;
if (sizeof(struct atom_common_table_header) +
- sizeof(struct atom_gpio_pin_lut_v2_1)
+ sizeof(struct atom_gpio_pin_assignment)
> le16_to_cpu(header->table_header.structuresize))
return BP_RESULT_BADBIOSTABLE;
@@ -1854,6 +1876,198 @@ static struct integrated_info *bios_parser_create_integrated_info(
return NULL;
}
+static enum bp_result update_slot_layout_info(
+ struct dc_bios *dcb,
+ unsigned int i,
+ struct slot_layout_info *slot_layout_info)
+{
+ unsigned int record_offset;
+ unsigned int j;
+ struct atom_display_object_path_v2 *object;
+ struct atom_bracket_layout_record *record;
+ struct atom_common_record_header *record_header;
+ enum bp_result result;
+ struct bios_parser *bp;
+ struct object_info_table *tbl;
+ struct display_object_info_table_v1_4 *v1_4;
+
+ record = NULL;
+ record_header = NULL;
+ result = BP_RESULT_NORECORD;
+
+ bp = BP_FROM_DCB(dcb);
+ tbl = &bp->object_info_tbl;
+ v1_4 = tbl->v1_4;
+
+ object = &v1_4->display_path[i];
+ record_offset = (unsigned int)
+ (object->disp_recordoffset) +
+ (unsigned int)(bp->object_info_tbl_offset);
+
+ for (;;) {
+
+ record_header = (struct atom_common_record_header *)
+ GET_IMAGE(struct atom_common_record_header,
+ record_offset);
+ if (record_header == NULL) {
+ result = BP_RESULT_BADBIOSTABLE;
+ break;
+ }
+
+ /* the end of the list */
+ if (record_header->record_type == 0xff ||
+ record_header->record_size == 0) {
+ break;
+ }
+
+ if (record_header->record_type ==
+ ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
+ sizeof(struct atom_bracket_layout_record)
+ <= record_header->record_size) {
+ record = (struct atom_bracket_layout_record *)
+ (record_header);
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ record_offset += record_header->record_size;
+ }
+
+ /* return if the record not found */
+ if (result != BP_RESULT_OK)
+ return result;
+
+ /* get slot sizes */
+ slot_layout_info->length = record->bracketlen;
+ slot_layout_info->width = record->bracketwidth;
+
+ /* get info for each connector in the slot */
+ slot_layout_info->num_of_connectors = record->conn_num;
+ for (j = 0; j < slot_layout_info->num_of_connectors; ++j) {
+ slot_layout_info->connectors[j].connector_type =
+ (enum connector_layout_type)
+ (record->conn_info[j].connector_type);
+ switch (record->conn_info[j].connector_type) {
+ case CONNECTOR_TYPE_DVI_D:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_DVI_D;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_DVI;
+ break;
+
+ case CONNECTOR_TYPE_HDMI:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_HDMI;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_HDMI;
+ break;
+
+ case CONNECTOR_TYPE_DISPLAY_PORT:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_DP;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_DP;
+ break;
+
+ case CONNECTOR_TYPE_MINI_DISPLAY_PORT:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_MINI_DP;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_MINI_DP;
+ break;
+
+ default:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_UNKNOWN;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_UNKNOWN;
+ }
+
+ slot_layout_info->connectors[j].position =
+ record->conn_info[j].position;
+ slot_layout_info->connectors[j].connector_id =
+ object_id_from_bios_object_id(
+ record->conn_info[j].connectorobjid);
+ }
+ return result;
+}
+
+
+static enum bp_result get_bracket_layout_record(
+ struct dc_bios *dcb,
+ unsigned int bracket_layout_id,
+ struct slot_layout_info *slot_layout_info)
+{
+ unsigned int i;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result;
+ struct object_info_table *tbl;
+ struct display_object_info_table_v1_4 *v1_4;
+
+ if (slot_layout_info == NULL) {
+ DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
+ return BP_RESULT_BADINPUT;
+ }
+ tbl = &bp->object_info_tbl;
+ v1_4 = tbl->v1_4;
+
+ result = BP_RESULT_NORECORD;
+ for (i = 0; i < v1_4->number_of_path; ++i) {
+
+ if (bracket_layout_id ==
+ v1_4->display_path[i].display_objid) {
+ result = update_slot_layout_info(dcb, i,
+ slot_layout_info);
+ break;
+ }
+ }
+ return result;
+}
+
+static enum bp_result bios_get_board_layout_info(
+ struct dc_bios *dcb,
+ struct board_layout_info *board_layout_info)
+{
+ unsigned int i;
+ struct bios_parser *bp;
+ enum bp_result record_result;
+
+ const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
+ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
+ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2,
+ 0, 0
+ };
+
+ bp = BP_FROM_DCB(dcb);
+ if (board_layout_info == NULL) {
+ DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
+ return BP_RESULT_BADINPUT;
+ }
+
+ board_layout_info->num_of_slots = 0;
+
+ for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
+ record_result = get_bracket_layout_record(dcb,
+ slot_index_to_vbios_id[i],
+ &board_layout_info->slots[i]);
+
+ if (record_result == BP_RESULT_NORECORD && i > 0)
+ break; /* no more slots present in bios */
+ else if (record_result != BP_RESULT_OK)
+ return record_result; /* fail */
+
+ ++board_layout_info->num_of_slots;
+ }
+
+ /* all data is valid */
+ board_layout_info->is_number_of_slots_valid = 1;
+ board_layout_info->is_slots_size_valid = 1;
+ board_layout_info->is_connector_offsets_valid = 1;
+ board_layout_info->is_connector_lengths_valid = 1;
+
+ return BP_RESULT_OK;
+}
+
static const struct dc_vbios_funcs vbios_funcs = {
.get_connectors_number = bios_parser_get_connectors_number,
@@ -1925,6 +2139,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
.bios_parser_destroy = firmware_parser_destroy,
.get_smu_clock_info = bios_parser_get_smu_clock_info,
+
+ .get_board_layout_info = bios_get_board_layout_info,
};
static bool bios_parser_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 651e1fd4622f..a558bfaa0c46 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -808,6 +808,24 @@ static enum bp_result transmitter_control_v1_5(
* (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
* LVDS mode: usPixelClock = pixel clock
*/
+ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.usSymClock =
+ cpu_to_le16((le16_to_cpu(params.usSymClock) * 30) / 24);
+ break;
+ case COLOR_DEPTH_121212:
+ params.usSymClock =
+ cpu_to_le16((le16_to_cpu(params.usSymClock) * 36) / 24);
+ break;
+ case COLOR_DEPTH_161616:
+ params.usSymClock =
+ cpu_to_le16((le16_to_cpu(params.usSymClock) * 48) / 24);
+ break;
+ default:
+ break;
+ }
+ }
if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
result = BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 752b08a42d3e..2b5dc499a35e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -59,36 +59,7 @@
bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
GET_INDEX_INTO_MASTER_TABLE(command, fname))
-static void init_dig_encoder_control(struct bios_parser *bp);
-static void init_transmitter_control(struct bios_parser *bp);
-static void init_set_pixel_clock(struct bios_parser *bp);
-static void init_set_crtc_timing(struct bios_parser *bp);
-
-static void init_select_crtc_source(struct bios_parser *bp);
-static void init_enable_crtc(struct bios_parser *bp);
-
-static void init_external_encoder_control(struct bios_parser *bp);
-static void init_enable_disp_power_gating(struct bios_parser *bp);
-static void init_set_dce_clock(struct bios_parser *bp);
-static void init_get_smu_clock_info(struct bios_parser *bp);
-
-void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
-{
- init_dig_encoder_control(bp);
- init_transmitter_control(bp);
- init_set_pixel_clock(bp);
-
- init_set_crtc_timing(bp);
-
- init_select_crtc_source(bp);
- init_enable_crtc(bp);
-
- init_external_encoder_control(bp);
- init_enable_disp_power_gating(bp);
- init_set_dce_clock(bp);
- init_get_smu_clock_info(bp);
-}
static uint32_t bios_cmd_table_para_revision(void *dev,
uint32_t index)
@@ -829,3 +800,20 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
return 0;
}
+void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+ init_dig_encoder_control(bp);
+ init_transmitter_control(bp);
+ init_set_pixel_clock(bp);
+
+ init_set_crtc_timing(bp);
+
+ init_select_crtc_source(bp);
+ init_enable_crtc(bp);
+
+ init_external_encoder_control(bp);
+ init_enable_disp_power_gating(bp);
+ init_set_dce_clock(bp);
+ init_get_smu_clock_info(bp);
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
index fc3f98fb09ea..62435bfc274d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
+++ b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
@@ -25,10 +25,9 @@
#ifndef _CALCS_CALCS_LOGGER_H_
#define _CALCS_CALCS_LOGGER_H_
-#define DC_LOGGER \
- logger
+#define DC_LOGGER ctx->logger
-static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calcs_dceip *dceip)
+static void print_bw_calcs_dceip(struct dc_context *ctx, const struct bw_calcs_dceip *dceip)
{
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
@@ -122,7 +121,7 @@ static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calc
}
-static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calcs_vbios *vbios)
+static void print_bw_calcs_vbios(struct dc_context *ctx, const struct bw_calcs_vbios *vbios)
{
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
@@ -181,7 +180,7 @@ static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calc
}
-static void print_bw_calcs_data(struct dal_logger *logger, struct bw_calcs_data *data)
+static void print_bw_calcs_data(struct dc_context *ctx, struct bw_calcs_data *data)
{
int i, j, k;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 2c4e8f0cb2dc..160d11a15eac 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -3010,9 +3010,9 @@ bool bw_calcs(struct dc_context *ctx,
struct bw_fixed low_yclk = vbios->low_yclk;
if (ctx->dc->debug.bandwidth_calcs_trace) {
- print_bw_calcs_dceip(ctx->logger, dceip);
- print_bw_calcs_vbios(ctx->logger, vbios);
- print_bw_calcs_data(ctx->logger, data);
+ print_bw_calcs_dceip(ctx, dceip);
+ print_bw_calcs_vbios(ctx, vbios);
+ print_bw_calcs_data(ctx, data);
}
calculate_bandwidth(dceip, vbios, data);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 49a4ea45466d..bd039322f697 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -31,6 +31,8 @@
#include "resource.h"
#include "dcn10/dcn10_resource.h"
+#include "dcn10/dcn10_hubbub.h"
+
#include "dcn_calc_math.h"
#define DC_LOGGER \
@@ -248,7 +250,24 @@ static void pipe_ctx_to_e2e_pipe_params (
else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
input->src.is_hsplit = true;
- input->src.dcc = pipe->plane_state->dcc.enable;
+ if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) {
+ /*
+ * this method requires us to always re-calculate watermark when dcc change
+ * between flip.
+ */
+ input->src.dcc = pipe->plane_state->dcc.enable ? 1 : 0;
+ } else {
+ /*
+ * allow us to disable dcc on the fly without re-calculating WM
+ *
+ * extra overhead for DCC is quite small. for 1080p WM without
+ * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
+ */
+ unsigned int bpe;
+
+ input->src.dcc = pipe->plane_res.dpp->ctx->dc->res_pool->hubbub->funcs->
+ dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0;
+ }
input->src.dcc_rate = 1;
input->src.meta_pitch = pipe->plane_state->dcc.grph.meta_pitch;
input->src.source_scan = dm_horz;
@@ -423,6 +442,10 @@ static void dcn_bw_calc_rq_dlg_ttu(
int total_flip_bytes = 0;
int i;
+ memset(dlg_regs, 0, sizeof(*dlg_regs));
+ memset(ttu_regs, 0, sizeof(*ttu_regs));
+ memset(rq_regs, 0, sizeof(*rq_regs));
+
for (i = 0; i < number_of_planes; i++) {
total_active_bw += v->read_bandwidth[i];
total_prefetch_bw += v->prefetch_bandwidth[i];
@@ -501,6 +524,7 @@ static void split_stream_across_pipes(
resource_build_scaling_params(secondary_pipe);
}
+#if 0
static void calc_wm_sets_and_perf_params(
struct dc_state *context,
struct dcn_bw_internal_vars *v)
@@ -582,6 +606,7 @@ static void calc_wm_sets_and_perf_params(
if (v->voltage_level >= 3)
context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
}
+#endif
static bool dcn_bw_apply_registry_override(struct dc *dc)
{
@@ -651,7 +676,7 @@ static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
}
static void hack_bounding_box(struct dcn_bw_internal_vars *v,
- struct dc_debug *dbg,
+ struct dc_debug_options *dbg,
struct dc_state *context)
{
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
@@ -883,7 +908,26 @@ bool dcn_validate_bandwidth(
ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
|| v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
}
- v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+
+ if (dc->debug.optimized_watermark) {
+ /*
+ * this method requires us to always re-calculate watermark when dcc change
+ * between flip.
+ */
+ v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+ } else {
+ /*
+ * allow us to disable dcc on the fly without re-calculating WM
+ *
+ * extra overhead for DCC is quite small. for 1080p WM without
+ * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
+ */
+ unsigned int bpe;
+
+ v->dcc_enable[input_idx] = dc->res_pool->hubbub->funcs->dcc_support_pixel_format(
+ pipe->plane_state->format, &bpe) ? dcn_bw_yes : dcn_bw_no;
+ }
+
v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
pipe->plane_state->format);
v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs(
@@ -976,43 +1020,60 @@ bool dcn_validate_bandwidth(
bw_consumed = v->fabric_and_dram_bandwidth;
display_pipe_configuration(v);
- calc_wm_sets_and_perf_params(context, v);
- context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ /*calc_wm_sets_and_perf_params(context, v);*/
+ /* Only 1 set is used by dcn since no noticeable
+ * performance improvement was measured and due to hw bug DEGVIDCN10-254
+ */
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
+ context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
+ context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+
+ context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
(ddr4_dram_factor_single_Channel * v->number_of_channels));
if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
- context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+ context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
}
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
- context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+ context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+ context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
- context->bw.dcn.calc_clk.dispclk_khz = (int)(v->dispclk * 1000);
+ context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
if (dc->debug.max_disp_clk == true)
- context->bw.dcn.calc_clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+ context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
- if (context->bw.dcn.calc_clk.dispclk_khz <
+ if (context->bw.dcn.clk.dispclk_khz <
dc->debug.min_disp_clk_khz) {
- context->bw.dcn.calc_clk.dispclk_khz =
+ context->bw.dcn.clk.dispclk_khz =
dc->debug.min_disp_clk_khz;
}
- context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
-
+ context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
+ context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
switch (v->voltage_level) {
case 0:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
break;
case 1:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
break;
case 2:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
break;
default:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
break;
}
@@ -1225,27 +1286,27 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
- struct clocks_value *clocks)
+ struct dc_clocks *clocks)
{
unsigned vdd_level, vdd_level_temp;
unsigned dcf_clk;
/*find a common supported voltage level*/
vdd_level = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_in_khz);
+ dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_in_khz);
+ dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz);
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_in_khz);
+ dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz);
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->dcfclock_in_khz);
+ dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz);
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclock_in_khz);
+ dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz);
/*find that level conresponding dcfclk*/
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
@@ -1331,21 +1392,14 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
{
struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
struct pp_smu_wm_range_sets ranges = {0};
- int max_fclk_khz, nom_fclk_khz, mid_fclk_khz, min_fclk_khz;
- int max_dcfclk_khz, min_dcfclk_khz;
- int socclk_khz;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
- unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
if (!pp->set_wm_ranges)
return;
kernel_fpu_begin();
- max_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000000 / factor;
- nom_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000000 / factor;
- mid_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000000 / factor;
min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
- max_dcfclk_khz = dc->dcn_soc->dcfclkv_max0p9 * 1000;
min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
socclk_khz = dc->dcn_soc->socclk * 1000;
kernel_fpu_end();
@@ -1353,105 +1407,46 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
* Memory clock member variables for Watermarks calculations for each
- * Watermark Set
+ * Watermark Set. Only one watermark set for dcn1 due to hw bug DEGVIDCN10-254.
*/
/* SOCCLK does not affect anytihng but writeback for DCN so for now we dont
* care what the value is, hence min to overdrive level
*/
- ranges.num_reader_wm_sets = WM_COUNT;
- ranges.num_writer_wm_sets = WM_COUNT;
+ ranges.num_reader_wm_sets = WM_SET_COUNT;
+ ranges.num_writer_wm_sets = WM_SET_COUNT;
ranges.reader_wm_sets[0].wm_inst = WM_A;
ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
- ranges.reader_wm_sets[0].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive;
ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[0].max_fill_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive;
ranges.writer_wm_sets[0].wm_inst = WM_A;
ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
- ranges.writer_wm_sets[0].max_drain_clk_khz = min_fclk_khz;
-
- ranges.reader_wm_sets[1].wm_inst = WM_B;
- ranges.reader_wm_sets[1].min_drain_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[1].max_drain_clk_khz = max_dcfclk_khz;
- ranges.reader_wm_sets[1].min_fill_clk_khz = mid_fclk_khz;
- ranges.reader_wm_sets[1].max_fill_clk_khz = mid_fclk_khz;
- ranges.writer_wm_sets[1].wm_inst = WM_B;
- ranges.writer_wm_sets[1].min_fill_clk_khz = socclk_khz;
- ranges.writer_wm_sets[1].max_fill_clk_khz = overdrive;
- ranges.writer_wm_sets[1].min_drain_clk_khz = mid_fclk_khz;
- ranges.writer_wm_sets[1].max_drain_clk_khz = mid_fclk_khz;
-
-
- ranges.reader_wm_sets[2].wm_inst = WM_C;
- ranges.reader_wm_sets[2].min_drain_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[2].max_drain_clk_khz = max_dcfclk_khz;
- ranges.reader_wm_sets[2].min_fill_clk_khz = nom_fclk_khz;
- ranges.reader_wm_sets[2].max_fill_clk_khz = nom_fclk_khz;
- ranges.writer_wm_sets[2].wm_inst = WM_C;
- ranges.writer_wm_sets[2].min_fill_clk_khz = socclk_khz;
- ranges.writer_wm_sets[2].max_fill_clk_khz = overdrive;
- ranges.writer_wm_sets[2].min_drain_clk_khz = nom_fclk_khz;
- ranges.writer_wm_sets[2].max_drain_clk_khz = nom_fclk_khz;
-
- ranges.reader_wm_sets[3].wm_inst = WM_D;
- ranges.reader_wm_sets[3].min_drain_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[3].max_drain_clk_khz = max_dcfclk_khz;
- ranges.reader_wm_sets[3].min_fill_clk_khz = max_fclk_khz;
- ranges.reader_wm_sets[3].max_fill_clk_khz = max_fclk_khz;
- ranges.writer_wm_sets[3].wm_inst = WM_D;
- ranges.writer_wm_sets[3].min_fill_clk_khz = socclk_khz;
- ranges.writer_wm_sets[3].max_fill_clk_khz = overdrive;
- ranges.writer_wm_sets[3].min_drain_clk_khz = max_fclk_khz;
- ranges.writer_wm_sets[3].max_drain_clk_khz = max_fclk_khz;
+ ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive;
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
ranges.reader_wm_sets[0].wm_inst = WM_A;
ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[0].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000;
ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
- ranges.reader_wm_sets[0].max_fill_clk_khz = 800000;
+ ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000;
ranges.writer_wm_sets[0].wm_inst = WM_A;
ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[0].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000;
ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
- ranges.writer_wm_sets[0].max_drain_clk_khz = 800000;
-
- ranges.reader_wm_sets[1].wm_inst = WM_B;
- ranges.reader_wm_sets[1].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[1].max_drain_clk_khz = 654000;
- ranges.reader_wm_sets[1].min_fill_clk_khz = 933000;
- ranges.reader_wm_sets[1].max_fill_clk_khz = 933000;
- ranges.writer_wm_sets[1].wm_inst = WM_B;
- ranges.writer_wm_sets[1].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[1].max_fill_clk_khz = 757000;
- ranges.writer_wm_sets[1].min_drain_clk_khz = 933000;
- ranges.writer_wm_sets[1].max_drain_clk_khz = 933000;
-
-
- ranges.reader_wm_sets[2].wm_inst = WM_C;
- ranges.reader_wm_sets[2].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[2].max_drain_clk_khz = 654000;
- ranges.reader_wm_sets[2].min_fill_clk_khz = 1067000;
- ranges.reader_wm_sets[2].max_fill_clk_khz = 1067000;
- ranges.writer_wm_sets[2].wm_inst = WM_C;
- ranges.writer_wm_sets[2].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[2].max_fill_clk_khz = 757000;
- ranges.writer_wm_sets[2].min_drain_clk_khz = 1067000;
- ranges.writer_wm_sets[2].max_drain_clk_khz = 1067000;
-
- ranges.reader_wm_sets[3].wm_inst = WM_D;
- ranges.reader_wm_sets[3].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[3].max_drain_clk_khz = 654000;
- ranges.reader_wm_sets[3].min_fill_clk_khz = 1200000;
- ranges.reader_wm_sets[3].max_fill_clk_khz = 1200000;
- ranges.writer_wm_sets[3].wm_inst = WM_D;
- ranges.writer_wm_sets[3].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[3].max_fill_clk_khz = 757000;
- ranges.writer_wm_sets[3].min_drain_clk_khz = 1200000;
- ranges.writer_wm_sets[3].max_drain_clk_khz = 1200000;
+ ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000;
}
+ ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0];
+ ranges.reader_wm_sets[1].wm_inst = WM_B;
+
+ ranges.reader_wm_sets[2] = ranges.writer_wm_sets[0];
+ ranges.reader_wm_sets[2].wm_inst = WM_C;
+
+ ranges.reader_wm_sets[3] = ranges.writer_wm_sets[0];
+ ranges.reader_wm_sets[3].wm_inst = WM_D;
+
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
pp->set_wm_ranges(&pp->pp_smu, &ranges);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 644b2187507b..6ae050dc3220 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -52,6 +52,8 @@
#include "dm_helpers.h"
#include "mem_input.h"
#include "hubp.h"
+
+#include "dc_link_dp.h"
#define DC_LOGGER \
dc->ctx->logger
@@ -169,6 +171,22 @@ failed_alloc:
return false;
}
+/**
+ *****************************************************************************
+ * Function: dc_stream_adjust_vmin_vmax
+ *
+ * @brief
+ * Looks up the pipe context of dc_stream_state and updates the
+ * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
+ * Rate, which is a power-saving feature that targets reducing panel
+ * refresh rate while the screen is static
+ *
+ * @param [in] dc: dc reference
+ * @param [in] stream: Initial dc stream state
+ * @param [in] adjust: Updated parameters for vertical_total_min and
+ * vertical_total_max
+ *****************************************************************************
+ */
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state **streams, int num_streams,
int vmin, int vmax)
@@ -368,6 +386,80 @@ void dc_stream_set_static_screen_events(struct dc *dc,
dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
}
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link)
+{
+
+ int i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i] == link)
+ break;
+ }
+
+ if (i >= dc->link_count)
+ ASSERT_CRITICAL(false);
+
+ dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
+}
+
+void dc_link_perform_link_training(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ int i;
+
+ for (i = 0; i < dc->link_count; i++)
+ dc_link_dp_perform_link_training(
+ dc->links[i],
+ link_setting,
+ skip_video_pattern);
+}
+
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ struct dc_link_settings store_settings = *link_setting;
+ struct dc_stream_state *link_stream =
+ link->dc->current_state->res_ctx.pipe_ctx[0].stream;
+
+ link->preferred_link_setting = store_settings;
+ if (link_stream)
+ decide_link_settings(link_stream, &store_settings);
+
+ if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
+ (store_settings.link_rate != LINK_RATE_UNKNOWN))
+ dp_retrain_link_dp_test(link, &store_settings, false);
+}
+
+void dc_link_enable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_enable_hpd(link);
+}
+
+void dc_link_disable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_disable_hpd(link);
+}
+
+
+void dc_link_set_test_pattern(struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ if (link != NULL)
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ p_link_settings,
+ p_custom_pattern,
+ cust_pattern_size);
+}
+
static void destruct(struct dc *dc)
{
dc_release_state(dc->current_state);
@@ -386,9 +478,6 @@ static void destruct(struct dc *dc)
if (dc->ctx->created_bios)
dal_bios_parser_destroy(&dc->ctx->dc_bios);
- if (dc->ctx->logger)
- dal_logger_destroy(&dc->ctx->logger);
-
kfree(dc->ctx);
dc->ctx = NULL;
@@ -411,7 +500,6 @@ static void destruct(struct dc *dc)
static bool construct(struct dc *dc,
const struct dc_init_data *init_params)
{
- struct dal_logger *logger;
struct dc_context *dc_ctx;
struct bw_calcs_dceip *dc_dceip;
struct bw_calcs_vbios *dc_vbios;
@@ -465,6 +553,7 @@ static bool construct(struct dc *dc,
dc_ctx->driver_context = init_params->driver;
dc_ctx->dc = dc;
dc_ctx->asic_id = init_params->asic_id;
+ dc_ctx->dc_sink_id_count = 0;
dc->ctx = dc_ctx;
dc->current_state = dc_create_state();
@@ -475,14 +564,7 @@ static bool construct(struct dc *dc,
}
/* Create logger */
- logger = dal_logger_create(dc_ctx, init_params->log_mask);
- if (!logger) {
- /* can *not* call logger. call base driver 'print error' */
- dm_error("%s: failed to create Logger!\n", __func__);
- goto fail;
- }
- dc_ctx->logger = logger;
dc_ctx->dce_environment = init_params->dce_environment;
dc_version = resource_parse_asic_id(init_params->asic_id);
@@ -901,9 +983,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
for (i = 0; i < context->stream_count; i++) {
struct dc_stream_state *stream = context->streams[i];
- dc_stream_log(stream,
- dc->ctx->logger,
- LOG_DC);
+ dc_stream_log(dc, stream);
}
result = dc_commit_state_no_check(dc, context);
@@ -927,12 +1007,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
dc->optimized_required = false;
- /* 3rd param should be true, temp w/a for RV*/
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
-#else
dc->hwss.set_bandwidth(dc, context, true);
-#endif
return true;
}
@@ -1548,7 +1623,7 @@ struct dc_sink *dc_link_add_remote_sink(
struct dc_sink *dc_sink;
enum dc_edid_status edid_status;
- if (len > MAX_EDID_BUFFER_SIZE) {
+ if (len > DC_MAX_EDID_BUFFER_SIZE) {
dm_error("Max EDID buffer size breached!\n");
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 267c76766dea..e1ebdf7b5eaf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -352,19 +352,19 @@ void context_clock_trace(
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.calc_clk.dispclk_khz,
- context->bw.dcn.calc_clk.dppclk_khz,
- context->bw.dcn.calc_clk.dcfclk_khz,
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.calc_clk.fclk_khz,
- context->bw.dcn.calc_clk.socclk_khz);
+ context->bw.dcn.clk.dispclk_khz,
+ context->bw.dcn.clk.dppclk_khz,
+ context->bw.dcn.clk.dcfclk_khz,
+ context->bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.clk.fclk_khz,
+ context->bw.dcn.clk.socclk_khz);
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.calc_clk.dispclk_khz,
- context->bw.dcn.calc_clk.dppclk_khz,
- context->bw.dcn.calc_clk.dcfclk_khz,
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.calc_clk.fclk_khz,
- context->bw.dcn.calc_clk.socclk_khz);
+ context->bw.dcn.clk.dispclk_khz,
+ context->bw.dcn.clk.dppclk_khz,
+ context->bw.dcn.clk.dcfclk_khz,
+ context->bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.clk.fclk_khz,
+ context->bw.dcn.clk.socclk_khz);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 2fa521812d23..567867915d32 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -33,6 +33,7 @@
#include "dc_link_dp.h"
#include "dc_link_ddc.h"
#include "link_hwss.h"
+#include "opp.h"
#include "link_encoder.h"
#include "hw_sequencer.h"
@@ -59,7 +60,14 @@
enum {
LINK_RATE_REF_FREQ_IN_MHZ = 27,
- PEAK_FACTOR_X1000 = 1006
+ PEAK_FACTOR_X1000 = 1006,
+ /*
+ * Some receivers fail to train on first try and are good
+ * on subsequent tries. 2 retries should be plenty. If we
+ * don't have a successful training then we don't expect to
+ * ever get one.
+ */
+ LINK_TRAINING_MAX_VERIFY_RETRY = 2
};
/*******************************************************************************
@@ -312,7 +320,7 @@ static enum signal_type get_basic_signal_type(
* @brief
* Check whether there is a dongle on DP connector
*/
-static bool is_dp_sink_present(struct dc_link *link)
+bool dc_link_is_dp_sink_present(struct dc_link *link)
{
enum gpio_result gpio_result;
uint32_t clock_pin = 0;
@@ -405,7 +413,7 @@ static enum signal_type link_detect_sink(
* we assume signal is DVI; it could be corrected
* to HDMI after dongle detection
*/
- if (!is_dp_sink_present(link))
+ if (!dm_helpers_is_dp_sink_present(link))
result = SIGNAL_TYPE_DVI_SINGLE_LINK;
}
}
@@ -497,6 +505,10 @@ static bool detect_dp(
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
link->type = dc_connection_mst_branch;
+ dal_ddc_service_set_transaction_type(
+ link->ddc,
+ sink_caps->transaction_type);
+
/*
* This call will initiate MST topology discovery. Which
* will detect MST ports and add new DRM connector DRM
@@ -524,6 +536,10 @@ static bool detect_dp(
if (reason == DETECT_REASON_BOOT)
boot = true;
+ dm_helpers_dp_update_branch_info(
+ link->ctx,
+ link);
+
if (!dm_helpers_dp_mst_start_top_mgr(
link->ctx,
link, boot)) {
@@ -728,6 +744,18 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
break;
case EDID_NO_RESPONSE:
DC_LOG_ERROR("No EDID read.\n");
+
+ /*
+ * Abort detection for non-DP connectors if we have
+ * no EDID
+ *
+ * DP needs to report as connected if HDP is high
+ * even if we have no EDID in order to go to
+ * fail-safe mode
+ */
+ if (dc_is_hdmi_signal(link->connector_signal) ||
+ dc_is_dvi_signal(link->connector_signal))
+ return false;
default:
break;
}
@@ -736,30 +764,41 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
- // If both edid and dpcd are the same, then discard new sink and revert back to original sink
- if ((same_edid) && (same_dpcd)) {
- link_disconnect_remap(prev_sink, link);
- sink = prev_sink;
- prev_sink = NULL;
- } else {
- if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- sink_caps.transaction_type ==
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
- /*
- * TODO debug why Dell 2413 doesn't like
- * two link trainings
- */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX &&
+ reason != DETECT_REASON_HPDRX) {
+ /*
+ * TODO debug why Dell 2413 doesn't like
+ * two link trainings
+ */
+
+ /* deal with non-mst cases */
+ for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
+ int fail_count = 0;
+
+ dp_verify_link_cap(link,
+ &link->reported_link_cap,
+ &fail_count);
- /* deal with non-mst cases */
- dp_hbr_verify_link_cap(link, &link->reported_link_cap);
+ if (fail_count == 0)
+ break;
}
- /* HDMI-DVI Dongle */
- if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
- !sink->edid_caps.edid_hdmi)
- sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ } else {
+ // If edid is the same, then discard new sink and revert back to original sink
+ if (same_edid) {
+ link_disconnect_remap(prev_sink, link);
+ sink = prev_sink;
+ prev_sink = NULL;
+
+ }
}
+ /* HDMI-DVI Dongle */
+ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+ !sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+
/* Connectivity log: detection */
for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
CONN_DATA_DETECT(link,
@@ -1007,6 +1046,9 @@ static bool construct(
goto create_fail;
}
+ if (link->dc->res_pool->funcs->link_init)
+ link->dc->res_pool->funcs->link_init(link);
+
hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
if (hpd_gpio != NULL)
@@ -1284,29 +1326,15 @@ static enum dc_status enable_link_dp(
max_link_rate = LINK_RATE_HIGH3;
if (link_settings.link_rate == max_link_rate) {
- if (state->dis_clk->funcs->set_min_clocks_state) {
- if (state->dis_clk->cur_min_clks_state < DM_PP_CLOCKS_STATE_NOMINAL)
- state->dis_clk->funcs->set_min_clocks_state(
- state->dis_clk, DM_PP_CLOCKS_STATE_NOMINAL);
- } else {
- uint32_t dp_phyclk_in_khz;
- const struct clocks_value clocks_value =
- state->dis_clk->cur_clocks_value;
-
- /* 27mhz = 27000000hz= 27000khz */
- dp_phyclk_in_khz = link_settings.link_rate * 27000;
-
- if (((clocks_value.max_non_dp_phyclk_in_khz != 0) &&
- (dp_phyclk_in_khz > clocks_value.max_non_dp_phyclk_in_khz)) ||
- (dp_phyclk_in_khz > clocks_value.max_dp_phyclk_in_khz)) {
- state->dis_clk->funcs->apply_clock_voltage_request(
- state->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
- dp_phyclk_in_khz,
- false,
- true);
- }
- }
+ struct dc_clocks clocks = state->bw.dcn.clk;
+
+ /* dce/dcn compat, do not update dispclk */
+ clocks.dispclk_khz = 0;
+ /* 27mhz = 27000000hz= 27000khz */
+ clocks.phyclk_khz = link_settings.link_rate * 27000;
+
+ state->dis_clk->funcs->update_clocks(
+ state->dis_clk, &clocks, false);
}
dp_enable_link_phy(
@@ -1784,6 +1812,8 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
bool is_vga_mode = (stream->timing.h_addressable == 640)
&& (stream->timing.v_addressable == 480);
+ if (stream->phy_pix_clk == 0)
+ stream->phy_pix_clk = stream->timing.pix_clk_khz;
if (stream->phy_pix_clk > 340000)
is_over_340mhz = true;
@@ -1861,28 +1891,6 @@ static enum dc_status enable_link(
break;
}
- if (pipe_ctx->stream_res.audio && status == DC_OK) {
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
- /* notify audio driver for audio modes of monitor */
- struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
- unsigned int i, num_audio = 1;
- for (i = 0; i < MAX_PIPES; i++) {
- /*current_state not updated yet*/
- if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
- num_audio++;
- }
-
- pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
-
- if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
- /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
- /* un-mute audio */
- /* TODO: audio should be per stream rather than per link */
- pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
- pipe_ctx->stream_res.stream_enc, false);
- }
-
return status;
}
@@ -2023,6 +2031,15 @@ enum dc_status dc_link_validate_mode_timing(
return DC_OK;
}
+int dc_link_get_backlight_level(const struct dc_link *link)
+{
+ struct abm *abm = link->ctx->dc->res_pool->abm;
+
+ if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL)
+ return DC_ERROR_UNEXPECTED;
+
+ return (int) abm->funcs->get_current_backlight_8_bit(abm);
+}
bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream)
@@ -2415,10 +2432,13 @@ void core_link_enable_stream(
}
}
+ core_dc->hwss.enable_audio_stream(pipe_ctx);
+
/* turn off otg test pattern if enable */
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- COLOR_DEPTH_UNDEFINED);
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ COLOR_DEPTH_UNDEFINED);
core_dc->hwss.enable_stream(pipe_ctx);
@@ -2453,6 +2473,22 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
core_dc->hwss.set_avmute(pipe_ctx, enable);
}
+/**
+ *****************************************************************************
+ * Function: dc_link_enable_hpd_filter
+ *
+ * @brief
+ * If enable is true, programs HPD filter on associated HPD line using
+ * delay_on_disconnect/delay_on_connect values dependent on
+ * link->connector_signal
+ *
+ * If enable is false, programs HPD filter on associated HPD line with no
+ * delays on connect or disconnect
+ *
+ * @param [in] link: pointer to the dc link
+ * @param [in] enable: boolean specifying whether to enable hbd
+ *****************************************************************************
+ */
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
{
struct gpio *hpd;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index ae48d603ebd6..8def0d9fa0ff 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -33,6 +33,7 @@
#include "include/vector.h"
#include "core_types.h"
#include "dc_link_ddc.h"
+#include "aux_engine.h"
#define AUX_POWER_UP_WA_DELAY 500
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
@@ -629,83 +630,61 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
-enum ddc_result dal_ddc_service_read_dpcd_data(
- struct ddc_service *ddc,
- bool i2c,
- enum i2c_mot_mode mot,
- uint32_t address,
- uint8_t *data,
- uint32_t len,
- uint32_t *read)
+int dc_link_aux_transfer(struct ddc_service *ddc,
+ unsigned int address,
+ uint8_t *reply,
+ void *buffer,
+ unsigned int size,
+ enum aux_transaction_type type,
+ enum i2caux_transaction_action action)
{
- struct aux_payload read_payload = {
- .i2c_over_aux = i2c,
- .write = false,
- .address = address,
- .length = len,
- .data = data,
- };
- struct aux_command command = {
- .payloads = &read_payload,
- .number_of_payloads = 1,
- .defer_delay = 0,
- .max_defer_write_retry = 0,
- .mot = mot
- };
-
- *read = 0;
-
- if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
- BREAK_TO_DEBUGGER();
- return DDC_RESULT_FAILED_INVALID_OPERATION;
- }
+ struct ddc *ddc_pin = ddc->ddc_pin;
+ struct aux_engine *aux_engine;
+ enum aux_channel_operation_result operation_result;
+ struct aux_request_transaction_data aux_req;
+ struct aux_reply_transaction_data aux_rep;
+ uint8_t returned_bytes = 0;
+ int res = -1;
+ uint32_t status;
- if (dal_i2caux_submit_aux_command(
- ddc->ctx->i2caux,
- ddc->ddc_pin,
- &command)) {
- *read = command.payloads->length;
- return DDC_RESULT_SUCESSFULL;
- }
+ memset(&aux_req, 0, sizeof(aux_req));
+ memset(&aux_rep, 0, sizeof(aux_rep));
- return DDC_RESULT_FAILED_OPERATION;
-}
+ aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+ aux_engine->funcs->acquire(aux_engine, ddc_pin);
-enum ddc_result dal_ddc_service_write_dpcd_data(
- struct ddc_service *ddc,
- bool i2c,
- enum i2c_mot_mode mot,
- uint32_t address,
- const uint8_t *data,
- uint32_t len)
-{
- struct aux_payload write_payload = {
- .i2c_over_aux = i2c,
- .write = true,
- .address = address,
- .length = len,
- .data = (uint8_t *)data,
- };
- struct aux_command command = {
- .payloads = &write_payload,
- .number_of_payloads = 1,
- .defer_delay = 0,
- .max_defer_write_retry = 0,
- .mot = mot
- };
-
- if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
- BREAK_TO_DEBUGGER();
- return DDC_RESULT_FAILED_INVALID_OPERATION;
- }
+ aux_req.type = type;
+ aux_req.action = action;
+
+ aux_req.address = address;
+ aux_req.delay = 0;
+ aux_req.length = size;
+ aux_req.data = buffer;
- if (dal_i2caux_submit_aux_command(
- ddc->ctx->i2caux,
- ddc->ddc_pin,
- &command))
- return DDC_RESULT_SUCESSFULL;
+ aux_engine->funcs->submit_channel_request(aux_engine, &aux_req);
+ operation_result = aux_engine->funcs->get_channel_status(aux_engine, &returned_bytes);
- return DDC_RESULT_FAILED_OPERATION;
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ res = returned_bytes;
+
+ if (res <= size && res >= 0)
+ res = aux_engine->funcs->read_channel_reply(aux_engine, size,
+ buffer, reply,
+ &status);
+
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ res = 0;
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ res = -1;
+ break;
+ }
+ aux_engine->funcs->release_engine(aux_engine);
+ return res;
}
/*test only function*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 7857cb42b3e6..a7553b6d59c2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3,6 +3,7 @@
#include "dc.h"
#include "dc_link_dp.h"
#include "dm_helpers.h"
+#include "opp.h"
#include "inc/core_types.h"
#include "link_hwss.h"
@@ -38,7 +39,7 @@ static bool decide_fallback_link_setting(
struct dc_link_settings initial_link_settings,
struct dc_link_settings *current_link_setting,
enum link_training_result training_result);
-static struct dc_link_settings get_common_supported_link_settings (
+static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b);
@@ -93,8 +94,8 @@ static void dpcd_set_link_settings(
uint8_t rate = (uint8_t)
(lt_settings->link_settings.link_rate);
- union down_spread_ctrl downspread = {{0}};
- union lane_count_set lane_count_set = {{0}};
+ union down_spread_ctrl downspread = { {0} };
+ union lane_count_set lane_count_set = { {0} };
uint8_t link_set_buffer[2];
downspread.raw = (uint8_t)
@@ -164,11 +165,11 @@ static void dpcd_set_lt_pattern_and_lane_settings(
const struct link_training_settings *lt_settings,
enum hw_dp_training_pattern pattern)
{
- union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+ union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
const uint32_t dpcd_base_lt_offset =
DP_TRAINING_PATTERN_SET;
uint8_t dpcd_lt_buffer[5] = {0};
- union dpcd_training_pattern dpcd_pattern = {{0}};
+ union dpcd_training_pattern dpcd_pattern = { {0} };
uint32_t lane;
uint32_t size_in_bytes;
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
@@ -232,7 +233,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
link,
DP_TRAINING_PATTERN_SET,
&dpcd_pattern.raw,
- sizeof(dpcd_pattern.raw) );
+ sizeof(dpcd_pattern.raw));
core_link_write_dpcd(
link,
@@ -246,7 +247,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
link,
dpcd_base_lt_offset,
dpcd_lt_buffer,
- size_in_bytes + sizeof(dpcd_pattern.raw) );
+ size_in_bytes + sizeof(dpcd_pattern.raw));
link->cur_lane_setting = lt_settings->lane_settings[0];
}
@@ -428,8 +429,8 @@ static void get_lane_status_and_drive_settings(
struct link_training_settings *req_settings)
{
uint8_t dpcd_buf[6] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
- struct link_training_settings request_settings = {{0}};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+ struct link_training_settings request_settings = { {0} };
uint32_t lane;
memset(req_settings, '\0', sizeof(struct link_training_settings));
@@ -651,7 +652,7 @@ static bool perform_post_lt_adj_req_sequence(
if (req_drv_setting_changed) {
update_drive_settings(
- lt_settings,req_settings);
+ lt_settings, req_settings);
dc_link_dp_set_drive_settings(link,
lt_settings);
@@ -724,8 +725,8 @@ static enum link_training_result perform_channel_equalization_sequence(
enum hw_dp_training_pattern hw_tr_pattern;
uint32_t retries_ch_eq;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = {{0}};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
hw_tr_pattern = get_supported_tp(link);
@@ -952,7 +953,10 @@ enum link_training_result dc_link_dp_perform_link_training(
* LINK_SPREAD_05_DOWNSPREAD_30KHZ :
* LINK_SPREAD_DISABLED;
*/
- lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+ if (link->dp_ss_off)
+ lt_settings.link_settings.link_spread = LINK_SPREAD_DISABLED;
+ else
+ lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
/* 1. set link rate, lane count and spread*/
dpcd_set_link_settings(link, &lt_settings);
@@ -1027,6 +1031,9 @@ enum link_training_result dc_link_dp_perform_link_training(
lt_settings.lane_settings[0].VOLTAGE_SWING,
lt_settings.lane_settings[0].PRE_EMPHASIS);
+ if (status != LINK_TRAINING_SUCCESS)
+ link->ctx->dc->debug_data.ltFailCount++;
+
return status;
}
@@ -1082,9 +1089,10 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
return max_link_cap;
}
-bool dp_hbr_verify_link_cap(
+bool dp_verify_link_cap(
struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting)
+ struct dc_link_settings *known_limit_link_setting,
+ int *fail_count)
{
struct dc_link_settings max_link_cap = {0};
struct dc_link_settings cur_link_setting = {0};
@@ -1097,6 +1105,11 @@ bool dp_hbr_verify_link_cap(
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
enum link_training_result status;
+ if (link->dc->debug.skip_detection_link_training) {
+ link->verified_link_cap = *known_limit_link_setting;
+ return true;
+ }
+
success = false;
skip_link_training = false;
@@ -1151,6 +1164,8 @@ bool dp_hbr_verify_link_cap(
skip_video_pattern);
if (status == LINK_TRAINING_SUCCESS)
success = true;
+ else
+ (*fail_count)++;
}
if (success)
@@ -1182,7 +1197,7 @@ bool dp_hbr_verify_link_cap(
return success;
}
-static struct dc_link_settings get_common_supported_link_settings (
+static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b)
{
@@ -1428,6 +1443,7 @@ static uint32_t bandwidth_in_kbps_from_link_settings(
uint32_t lane_count = link_setting->lane_count;
uint32_t kbps = link_rate_in_kbps;
+
kbps *= lane_count;
kbps *= 8; /* 8 bits per byte*/
@@ -1445,9 +1461,9 @@ bool dp_validate_mode_timing(
const struct dc_link_settings *link_setting;
/*always DP fail safe mode*/
- if (timing->pix_clk_khz == (uint32_t)25175 &&
- timing->h_addressable == (uint32_t)640 &&
- timing->v_addressable == (uint32_t)480)
+ if (timing->pix_clk_khz == (uint32_t) 25175 &&
+ timing->h_addressable == (uint32_t) 640 &&
+ timing->v_addressable == (uint32_t) 480)
return true;
/* We always use verified link settings */
@@ -1647,22 +1663,26 @@ static enum dc_status read_hpd_rx_irq_data(
irq_data->raw,
sizeof(union hpd_irq_data));
else {
- /* Read 2 bytes at this location,... */
+ /* Read 14 bytes in a single read and then copy only the required fields.
+ * This is more efficient than doing it in two separate AUX reads. */
+
+ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+
retval = core_link_read_dpcd(
link,
DP_SINK_COUNT_ESI,
- irq_data->raw,
- 2);
+ tmp,
+ sizeof(tmp));
if (retval != DC_OK)
return retval;
- /* ... then read remaining 4 at the other location */
- retval = core_link_read_dpcd(
- link,
- DP_LANE0_1_STATUS_ESI,
- &irq_data->raw[2],
- 4);
+ irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
}
return retval;
@@ -1767,12 +1787,10 @@ static void dp_test_send_link_training(struct dc_link *link)
dp_retrain_link_dp_test(link, &link_settings, false);
}
-/* TODO hbr2 compliance eye output is unstable
+/* TODO Raven hbr2 compliance eye output is unstable
* (toggling on and off) with debugger break
* This caueses intermittent PHY automation failure
* Need to look into the root cause */
-static uint8_t force_tps4_for_cp2520 = 1;
-
static void dp_test_send_phy_test_pattern(struct dc_link *link)
{
union phy_test_pattern dpcd_test_pattern;
@@ -1832,13 +1850,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
break;
case PHY_TEST_PATTERN_CP2520_1:
/* CP2520 pattern is unstable, temporarily use TPS4 instead */
- test_pattern = (force_tps4_for_cp2520 == 1) ?
+ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
DP_TEST_PATTERN_TRAINING_PATTERN4 :
DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
break;
case PHY_TEST_PATTERN_CP2520_2:
/* CP2520 pattern is unstable, temporarily use TPS4 instead */
- test_pattern = (force_tps4_for_cp2520 == 1) ?
+ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
DP_TEST_PATTERN_TRAINING_PATTERN4 :
DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
break;
@@ -1991,12 +2009,16 @@ static void handle_automated_test(struct dc_link *link)
sizeof(test_response));
}
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data)
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
{
- union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
+ union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
union device_service_irq device_service_clear = { { 0 } };
- enum dc_status result = DDC_RESULT_UNKNOWN;
+ enum dc_status result;
+
bool status = false;
+
+ if (out_link_loss)
+ *out_link_loss = false;
/* For use cases related to down stream connection status change,
* PSR and device auto test, refer to function handle_sst_hpd_irq
* in DAL2.1*/
@@ -2071,6 +2093,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
true, LINK_TRAINING_ATTEMPTS);
status = false;
+ if (out_link_loss)
+ *out_link_loss = true;
}
if (link->type == dc_connection_active_dongle &&
@@ -2257,6 +2281,11 @@ static void get_active_converter_info(
link->dpcd_caps.branch_hw_revision =
dp_hw_fw_revision.ieee_hw_rev;
+
+ memmove(
+ link->dpcd_caps.branch_fw_revision,
+ dp_hw_fw_revision.ieee_fw_rev,
+ sizeof(dp_hw_fw_revision.ieee_fw_rev));
}
}
@@ -2305,12 +2334,14 @@ static bool retrieve_link_cap(struct dc_link *link)
{
uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
+ struct dp_device_vendor_id sink_id;
union down_stream_port_count down_strm_port_count;
union edp_configuration_cap edp_config_cap;
union dp_downstream_port_present ds_port = { 0 };
enum dc_status status = DC_ERROR_UNEXPECTED;
uint32_t read_dpcd_retry_cnt = 3;
int i;
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision;
memset(dpcd_data, '\0', sizeof(dpcd_data));
memset(&down_strm_port_count,
@@ -2391,6 +2422,36 @@ static bool retrieve_link_cap(struct dc_link *link)
&link->dpcd_caps.sink_count.raw,
sizeof(link->dpcd_caps.sink_count.raw));
+ /* read sink ieee oui */
+ core_link_read_dpcd(link,
+ DP_SINK_OUI,
+ (uint8_t *)(&sink_id),
+ sizeof(sink_id));
+
+ link->dpcd_caps.sink_dev_id =
+ (sink_id.ieee_oui[0] << 16) +
+ (sink_id.ieee_oui[1] << 8) +
+ (sink_id.ieee_oui[2]);
+
+ memmove(
+ link->dpcd_caps.sink_dev_id_str,
+ sink_id.ieee_device_id,
+ sizeof(sink_id.ieee_device_id));
+
+ core_link_read_dpcd(
+ link,
+ DP_SINK_HW_REVISION_START,
+ (uint8_t *)&dp_hw_fw_revision,
+ sizeof(dp_hw_fw_revision));
+
+ link->dpcd_caps.sink_hw_revision =
+ dp_hw_fw_revision.ieee_hw_rev;
+
+ memmove(
+ link->dpcd_caps.sink_fw_revision,
+ dp_hw_fw_revision.ieee_fw_rev,
+ sizeof(dp_hw_fw_revision.ieee_fw_rev));
+
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -2495,8 +2556,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
pipe_ctx->stream->bit_depth_params = params;
pipe_ctx->stream_res.opp->funcs->
opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
-
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
}
break;
@@ -2508,8 +2569,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
pipe_ctx->stream->bit_depth_params = params;
pipe_ctx->stream_res.opp->funcs->
opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
-
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
color_depth);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 751f3ac9d921..ea6beccfd89d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -268,24 +268,30 @@ bool resource_construct(
return true;
}
+static int find_matching_clock_source(
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i;
+
+ for (i = 0; i < pool->clk_src_count; i++) {
+ if (pool->clock_sources[i] == clock_source)
+ return i;
+ }
+ return -1;
+}
void resource_unreference_clock_source(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct clock_source *clock_source)
{
- int i;
-
- for (i = 0; i < pool->clk_src_count; i++) {
- if (pool->clock_sources[i] != clock_source)
- continue;
+ int i = find_matching_clock_source(pool, clock_source);
+ if (i > -1)
res_ctx->clock_source_ref_count[i]--;
- break;
- }
-
if (pool->dp_clock_source == clock_source)
res_ctx->dp_clock_source_ref_count--;
}
@@ -295,19 +301,31 @@ void resource_reference_clock_source(
const struct resource_pool *pool,
struct clock_source *clock_source)
{
- int i;
- for (i = 0; i < pool->clk_src_count; i++) {
- if (pool->clock_sources[i] != clock_source)
- continue;
+ int i = find_matching_clock_source(pool, clock_source);
+ if (i > -1)
res_ctx->clock_source_ref_count[i]++;
- break;
- }
if (pool->dp_clock_source == clock_source)
res_ctx->dp_clock_source_ref_count++;
}
+int resource_get_clock_source_reference(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i = find_matching_clock_source(pool, clock_source);
+
+ if (i > -1)
+ return res_ctx->clock_source_ref_count[i];
+
+ if (pool->dp_clock_source == clock_source)
+ return res_ctx->dp_clock_source_ref_count;
+
+ return -1;
+}
+
bool resource_are_streams_timing_synchronizable(
struct dc_stream_state *stream1,
struct dc_stream_state *stream2)
@@ -330,6 +348,9 @@ bool resource_are_streams_timing_synchronizable(
!= stream2->timing.pix_clk_khz)
return false;
+ if (stream1->clamping.c_depth != stream2->clamping.c_depth)
+ return false;
+
if (stream1->phy_pix_clk != stream2->phy_pix_clk
&& (!dc_is_dp_signal(stream1->signal)
|| !dc_is_dp_signal(stream2->signal)))
@@ -337,6 +358,20 @@ bool resource_are_streams_timing_synchronizable(
return true;
}
+static bool is_dp_and_hdmi_sharable(
+ struct dc_stream_state *stream1,
+ struct dc_stream_state *stream2)
+{
+ if (stream1->ctx->dc->caps.disable_dp_clk_share)
+ return false;
+
+ if (stream1->clamping.c_depth != COLOR_DEPTH_888 ||
+ stream2->clamping.c_depth != COLOR_DEPTH_888)
+ return false;
+
+ return true;
+
+}
static bool is_sharable_clk_src(
const struct pipe_ctx *pipe_with_clk_src,
@@ -348,15 +383,18 @@ static bool is_sharable_clk_src(
if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
return false;
- if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
+ if (dc_is_dp_signal(pipe_with_clk_src->stream->signal) ||
+ (dc_is_dp_signal(pipe->stream->signal) &&
+ !is_dp_and_hdmi_sharable(pipe_with_clk_src->stream,
+ pipe->stream)))
return false;
if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
- && dc_is_dvi_signal(pipe->stream->signal))
+ && dc_is_dual_link_signal(pipe->stream->signal))
return false;
if (dc_is_hdmi_signal(pipe->stream->signal)
- && dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
+ && dc_is_dual_link_signal(pipe_with_clk_src->stream->signal))
return false;
if (!resource_are_streams_timing_synchronizable(
@@ -522,13 +560,12 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
}
}
-static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
struct rect surf_src = plane_state->src_rect;
struct rect surf_clip = plane_state->clip_rect;
- int recout_full_x, recout_full_y;
bool pri_split = pipe_ctx->bottom_pipe &&
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
bool sec_split = pipe_ctx->top_pipe &&
@@ -597,20 +634,22 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
}
}
/* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
- * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
- * ratio)
+ * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
+ * ratio)
*/
- recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
* stream->dst.width / stream->src.width -
surf_src.x * plane_state->dst_rect.width / surf_src.width
* stream->dst.width / stream->src.width;
- recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height -
surf_src.y * plane_state->dst_rect.height / surf_src.height
* stream->dst.height / stream->src.height;
- recout_skip->width = pipe_ctx->plane_res.scl_data.recout.x - recout_full_x;
- recout_skip->height = pipe_ctx->plane_res.scl_data.recout.y - recout_full_y;
+ recout_full->width = plane_state->dst_rect.width
+ * stream->dst.width / stream->src.width;
+ recout_full->height = plane_state->dst_rect.height
+ * stream->dst.height / stream->src.height;
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -662,7 +701,7 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
}
-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
{
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = pipe_ctx->plane_state->src_rect;
@@ -680,15 +719,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
flip_vert_scan_dir = true;
else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
flip_horz_scan_dir = true;
- if (pipe_ctx->plane_state->horizontal_mirror)
- flip_horz_scan_dir = !flip_horz_scan_dir;
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
rect_swap_helper(&src);
rect_swap_helper(&data->viewport_c);
rect_swap_helper(&data->viewport);
- }
+ } else if (pipe_ctx->plane_state->horizontal_mirror)
+ flip_horz_scan_dir = !flip_horz_scan_dir;
/*
* Init calculated according to formula:
@@ -708,127 +746,286 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
+ if (!flip_horz_scan_dir) {
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+ int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.h, data->ratios.horz));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+ int vp_clip = (src.x + src.width) / vpc_div -
+ data->viewport_c.width - data->viewport_c.x;
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.x) {
+ int int_part;
+
+ data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
+ data->ratios.horz, data->recout.x - recout_full->x));
+ int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
+ if (int_part < data->taps.h_taps) {
+ int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
+ (data->taps.h_taps - int_part) : data->viewport.x;
+ data->viewport.x -= int_adj;
+ data->viewport.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps) {
+ data->viewport.x += int_part - data->taps.h_taps;
+ data->viewport.width -= int_part - data->taps.h_taps;
+ int_part = data->taps.h_taps;
+ }
+ data->inits.h.value &= 0xffffffff;
+ data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
+ }
+
+ if (data->viewport_c.x) {
+ int int_part;
+
+ data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
+ data->ratios.horz_c, data->recout.x - recout_full->x));
+ int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
+ if (int_part < data->taps.h_taps_c) {
+ int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
+ (data->taps.h_taps_c - int_part) : data->viewport_c.x;
+ data->viewport_c.x -= int_adj;
+ data->viewport_c.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps_c) {
+ data->viewport_c.x += int_part - data->taps.h_taps_c;
+ data->viewport_c.width -= int_part - data->taps.h_taps_c;
+ int_part = data->taps.h_taps_c;
+ }
+ data->inits.h_c.value &= 0xffffffff;
+ data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+ }
+ } else {
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.x) {
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.h, data->ratios.horz));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x;
+ data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x;
+ }
+ if (data->viewport_c.x) {
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
+ data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
+ }
- /* Adjust for viewport end clip-off */
- if ((data->viewport.x + data->viewport.width) < (src.x + src.width) && !flip_horz_scan_dir) {
- int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport.y + data->viewport.height) < (src.y + src.height) && !flip_vert_scan_dir) {
- int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v, data->ratios.vert));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div && !flip_horz_scan_dir) {
- int vp_clip = (src.x + src.width) / vpc_div -
- data->viewport_c.width - data->viewport_c.x;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div && !flip_vert_scan_dir) {
- int vp_clip = (src.y + src.height) / vpc_div -
- data->viewport_c.height - data->viewport_c.y;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
- }
-
- /* Adjust for non-0 viewport offset */
- if (data->viewport.x && !flip_horz_scan_dir) {
- int int_part;
-
- data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
- data->ratios.horz, recout_skip->width));
- int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
- if (int_part < data->taps.h_taps) {
- int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
- (data->taps.h_taps - int_part) : data->viewport.x;
- data->viewport.x -= int_adj;
- data->viewport.width += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.h_taps) {
- data->viewport.x += int_part - data->taps.h_taps;
- data->viewport.width -= int_part - data->taps.h_taps;
- int_part = data->taps.h_taps;
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+ int int_part;
+ int end_offset = src.x + src.width
+ - data->viewport.x - data->viewport.width;
+
+ /*
+ * this is init if vp had no offset, keep in mind this is from the
+ * right side of vp due to scan direction
+ */
+ data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
+ data->ratios.horz, data->recout.x - recout_full->x));
+ /*
+ * this is the difference between first pixel of viewport available to read
+ * and init position, takning into account scan direction
+ */
+ int_part = dc_fixpt_floor(data->inits.h) - end_offset;
+ if (int_part < data->taps.h_taps) {
+ int int_adj = end_offset >= (data->taps.h_taps - int_part) ?
+ (data->taps.h_taps - int_part) : end_offset;
+ data->viewport.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps) {
+ data->viewport.width += int_part - data->taps.h_taps;
+ int_part = data->taps.h_taps;
+ }
+ data->inits.h.value &= 0xffffffff;
+ data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
}
- data->inits.h.value &= 0xffffffff;
- data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
- }
-
- if (data->viewport_c.x && !flip_horz_scan_dir) {
- int int_part;
-
- data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
- data->ratios.horz_c, recout_skip->width));
- int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
- if (int_part < data->taps.h_taps_c) {
- int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
- (data->taps.h_taps_c - int_part) : data->viewport_c.x;
- data->viewport_c.x -= int_adj;
- data->viewport_c.width += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.h_taps_c) {
- data->viewport_c.x += int_part - data->taps.h_taps_c;
- data->viewport_c.width -= int_part - data->taps.h_taps_c;
- int_part = data->taps.h_taps_c;
+
+ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+ int int_part;
+ int end_offset = (src.x + src.width) / vpc_div
+ - data->viewport_c.x - data->viewport_c.width;
+
+ /*
+ * this is init if vp had no offset, keep in mind this is from the
+ * right side of vp due to scan direction
+ */
+ data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
+ data->ratios.horz_c, data->recout.x - recout_full->x));
+ /*
+ * this is the difference between first pixel of viewport available to read
+ * and init position, takning into account scan direction
+ */
+ int_part = dc_fixpt_floor(data->inits.h_c) - end_offset;
+ if (int_part < data->taps.h_taps_c) {
+ int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ?
+ (data->taps.h_taps_c - int_part) : end_offset;
+ data->viewport_c.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps_c) {
+ data->viewport_c.width += int_part - data->taps.h_taps_c;
+ int_part = data->taps.h_taps_c;
+ }
+ data->inits.h_c.value &= 0xffffffff;
+ data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+ }
+
+ }
+ if (!flip_vert_scan_dir) {
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+ int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.v, data->ratios.vert));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
}
- data->inits.h_c.value &= 0xffffffff;
- data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
- }
-
- if (data->viewport.y && !flip_vert_scan_dir) {
- int int_part;
-
- data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
- data->ratios.vert, recout_skip->height));
- int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
- if (int_part < data->taps.v_taps) {
- int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
- (data->taps.v_taps - int_part) : data->viewport.y;
- data->viewport.y -= int_adj;
- data->viewport.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps) {
- data->viewport.y += int_part - data->taps.v_taps;
- data->viewport.height -= int_part - data->taps.v_taps;
- int_part = data->taps.v_taps;
+ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+ int vp_clip = (src.y + src.height) / vpc_div -
+ data->viewport_c.height - data->viewport_c.y;
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
}
- data->inits.v.value &= 0xffffffff;
- data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
- }
-
- if (data->viewport_c.y && !flip_vert_scan_dir) {
- int int_part;
-
- data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
- data->ratios.vert_c, recout_skip->height));
- int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
- if (int_part < data->taps.v_taps_c) {
- int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
- (data->taps.v_taps_c - int_part) : data->viewport_c.y;
- data->viewport_c.y -= int_adj;
- data->viewport_c.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps_c) {
- data->viewport_c.y += int_part - data->taps.v_taps_c;
- data->viewport_c.height -= int_part - data->taps.v_taps_c;
- int_part = data->taps.v_taps_c;
+
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.y) {
+ int int_part;
+
+ data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
+ data->ratios.vert, data->recout.y - recout_full->y));
+ int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
+ if (int_part < data->taps.v_taps) {
+ int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
+ (data->taps.v_taps - int_part) : data->viewport.y;
+ data->viewport.y -= int_adj;
+ data->viewport.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps) {
+ data->viewport.y += int_part - data->taps.v_taps;
+ data->viewport.height -= int_part - data->taps.v_taps;
+ int_part = data->taps.v_taps;
+ }
+ data->inits.v.value &= 0xffffffff;
+ data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
+ }
+
+ if (data->viewport_c.y) {
+ int int_part;
+
+ data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
+ data->ratios.vert_c, data->recout.y - recout_full->y));
+ int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
+ if (int_part < data->taps.v_taps_c) {
+ int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
+ (data->taps.v_taps_c - int_part) : data->viewport_c.y;
+ data->viewport_c.y -= int_adj;
+ data->viewport_c.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps_c) {
+ data->viewport_c.y += int_part - data->taps.v_taps_c;
+ data->viewport_c.height -= int_part - data->taps.v_taps_c;
+ int_part = data->taps.v_taps_c;
+ }
+ data->inits.v_c.value &= 0xffffffff;
+ data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
+ }
+ } else {
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.y) {
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.v, data->ratios.vert));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y;
+ data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y;
+ }
+ if (data->viewport_c.y) {
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
+ data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
+ }
+
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+ int int_part;
+ int end_offset = src.y + src.height
+ - data->viewport.y - data->viewport.height;
+
+ /*
+ * this is init if vp had no offset, keep in mind this is from the
+ * right side of vp due to scan direction
+ */
+ data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
+ data->ratios.vert, data->recout.y - recout_full->y));
+ /*
+ * this is the difference between first pixel of viewport available to read
+ * and init position, taking into account scan direction
+ */
+ int_part = dc_fixpt_floor(data->inits.v) - end_offset;
+ if (int_part < data->taps.v_taps) {
+ int int_adj = end_offset >= (data->taps.v_taps - int_part) ?
+ (data->taps.v_taps - int_part) : end_offset;
+ data->viewport.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps) {
+ data->viewport.height += int_part - data->taps.v_taps;
+ int_part = data->taps.v_taps;
+ }
+ data->inits.v.value &= 0xffffffff;
+ data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
+ }
+
+ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+ int int_part;
+ int end_offset = (src.y + src.height) / vpc_div
+ - data->viewport_c.y - data->viewport_c.height;
+
+ /*
+ * this is init if vp had no offset, keep in mind this is from the
+ * right side of vp due to scan direction
+ */
+ data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
+ data->ratios.vert_c, data->recout.y - recout_full->y));
+ /*
+ * this is the difference between first pixel of viewport available to read
+ * and init position, taking into account scan direction
+ */
+ int_part = dc_fixpt_floor(data->inits.v_c) - end_offset;
+ if (int_part < data->taps.v_taps_c) {
+ int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ?
+ (data->taps.v_taps_c - int_part) : end_offset;
+ data->viewport_c.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps_c) {
+ data->viewport_c.height += int_part - data->taps.v_taps_c;
+ int_part = data->taps.v_taps_c;
+ }
+ data->inits.v_c.value &= 0xffffffff;
+ data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
}
- data->inits.v_c.value &= 0xffffffff;
- data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
}
/* Interlaced inits based on final vert inits */
@@ -846,7 +1043,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
- struct view recout_skip = { 0 };
+ struct rect recout_full = { 0 };
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Important: scaling ratio calculation requires pixel format,
@@ -866,7 +1063,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
return false;
- calculate_recout(pipe_ctx, &recout_skip);
+ calculate_recout(pipe_ctx, &recout_full);
/**
* Setting line buffer pixel depth to 24bpp yields banding
@@ -910,7 +1107,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (res)
/* May need to re-check lb size after this in some obscure scenario */
- calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
+ calculate_inits_and_adj_vp(pipe_ctx, &recout_full);
DC_LOG_SCALER(
"%s: Viewport:\nheight:%d width:%d x:%d "
@@ -1546,8 +1743,8 @@ enum dc_status dc_add_stream_to_ctx(
struct dc_context *dc_ctx = dc->ctx;
enum dc_status res;
- if (new_ctx->stream_count >= dc->res_pool->pipe_count) {
- DC_ERROR("Max streams reached, can add stream %p !\n", stream);
+ if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
+ DC_ERROR("Max streams reached, can't add stream %p !\n", stream);
return DC_ERROR_UNEXPECTED;
}
@@ -1789,7 +1986,7 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
- dst_ctx->dis_clk = dc->res_pool->display_clock;
+ dst_ctx->dis_clk = dc->res_pool->dccg;
}
enum dc_status dc_validate_global_state(
@@ -2347,7 +2544,8 @@ static void set_hdr_static_info_packet(
{
/* HDR Static Metadata info packet for HDR10 */
- if (!stream->hdr_static_metadata.valid)
+ if (!stream->hdr_static_metadata.valid ||
+ stream->use_dynamic_meta)
return;
*info_packet = stream->hdr_static_metadata;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index 25fae38409ab..9971b515c3eb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -53,6 +53,10 @@ static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init
sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
sink->converter_disable_audio = init_params->converter_disable_audio;
sink->dc_container_id = NULL;
+ sink->sink_id = init_params->link->ctx->dc_sink_id_count;
+ // increment dc_sink_id_count because we don't want two sinks with same ID
+ // unless they are actually the same
+ init_params->link->ctx->dc_sink_id_count++;
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 3732a1de9d6c..fdcc8ab19bf3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -30,6 +30,8 @@
#include "ipp.h"
#include "timing_generator.h"
+#define DC_LOGGER dc->ctx->logger
+
/*******************************************************************************
* Private functions
******************************************************************************/
@@ -212,6 +214,8 @@ bool dc_stream_set_cursor_attributes(
}
core_dc->hwss.set_cursor_attribute(pipe_ctx);
+ if (core_dc->hwss.set_cursor_sdr_white_level)
+ core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
if (pipe_to_program)
@@ -317,16 +321,10 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
return ret;
}
-
-void dc_stream_log(
- const struct dc_stream_state *stream,
- struct dal_logger *dm_logger,
- enum dc_log_type log_type)
+void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
{
-
- dm_logger_write(dm_logger,
- log_type,
- "core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
+ DC_LOG_DC(
+ "core_stream 0x%p: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
stream,
stream->src.x,
stream->src.y,
@@ -337,21 +335,18 @@ void dc_stream_log(
stream->dst.width,
stream->dst.height,
stream->output_color_space);
- dm_logger_write(dm_logger,
- log_type,
+ DC_LOG_DC(
"\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
stream->timing.pix_clk_khz,
stream->timing.h_total,
stream->timing.v_total,
stream->timing.pixel_encoding,
stream->timing.display_color_depth);
- dm_logger_write(dm_logger,
- log_type,
+ DC_LOG_DC(
"\tsink name: %s, serial: %d\n",
stream->sink->edid_caps.display_name,
stream->sink->edid_caps.serial_number);
- dm_logger_write(dm_logger,
- log_type,
+ DC_LOG_DC(
"\tlink: %d\n",
stream->sink->link->link_index);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 68a71adeb12e..8fb3aefd195c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -84,6 +84,17 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
return plane_state;
}
+/**
+ *****************************************************************************
+ * Function: dc_plane_get_status
+ *
+ * @brief
+ * Looks up the pipe context of plane_state and updates the pending status
+ * of the pipe context. Then returns plane_state->status
+ *
+ * @param [in] plane_state: pointer to the plane_state to get the status of
+ *****************************************************************************
+ */
const struct dc_plane_status *dc_plane_get_status(
const struct dc_plane_state *plane_state)
{
@@ -181,7 +192,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
kref_put(&tf->refcount, dc_transfer_func_free);
}
-struct dc_transfer_func *dc_create_transfer_func()
+struct dc_transfer_func *dc_create_transfer_func(void)
{
struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 9cfde0ccf4e9..6c9990bef267 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
#include "inc/compressor.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.1.44"
+#define DC_VER "3.1.59"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@@ -68,6 +68,7 @@ struct dc_caps {
uint32_t max_planes;
uint32_t max_downscale_ratio;
uint32_t i2c_speed_in_khz;
+ uint32_t dmdata_alloc_size;
unsigned int max_cursor_size;
unsigned int max_video_width;
int linear_pitch_alignment;
@@ -76,6 +77,9 @@ struct dc_caps {
bool is_apu;
bool dual_link_dvi;
bool post_blend_color_processing;
+ bool force_dp_tps4_for_cp2520;
+ bool disable_dp_clk_share;
+ bool psp_setup_panel_mode;
};
struct dc_dcc_surface_param {
@@ -168,6 +172,12 @@ struct dc_config {
bool disable_disp_pll_sharing;
};
+enum visual_confirm {
+ VISUAL_CONFIRM_DISABLE = 0,
+ VISUAL_CONFIRM_SURFACE = 1,
+ VISUAL_CONFIRM_HDR = 2,
+};
+
enum dcc_option {
DCC_ENABLE = 0,
DCC_DISABLE = 1,
@@ -185,6 +195,10 @@ enum wm_report_mode {
WM_REPORT_OVERRIDE = 1,
};
+/*
+ * For any clocks that may differ per pipe
+ * only the max is stored in this structure
+ */
struct dc_clocks {
int dispclk_khz;
int max_supported_dppclk_khz;
@@ -193,10 +207,11 @@ struct dc_clocks {
int socclk_khz;
int dcfclk_deep_sleep_khz;
int fclk_khz;
+ int phyclk_khz;
};
-struct dc_debug {
- bool surface_visual_confirm;
+struct dc_debug_options {
+ enum visual_confirm visual_confirm;
bool sanity_checks;
bool max_disp_clk;
bool surface_trace;
@@ -227,6 +242,7 @@ struct dc_debug {
int urgent_latency_ns;
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
+ bool optimized_watermark;
int always_scale;
bool disable_pplib_clock_request;
bool disable_clock_gate;
@@ -242,8 +258,19 @@ struct dc_debug {
bool always_use_regamma;
bool p010_mpo_support;
bool recovery_enabled;
+ bool avoid_vbios_exec_table;
+ bool scl_reset_length10;
+ bool hdmi20_disable;
+ bool skip_detection_link_training;
+};
+struct dc_debug_data {
+ uint32_t ltFailCount;
+ uint32_t i2cErrorCount;
+ uint32_t auxErrorCount;
};
+
+
struct dc_state;
struct resource_pool;
struct dce_hwseq;
@@ -252,8 +279,7 @@ struct dc {
struct dc_caps caps;
struct dc_cap_funcs cap_funcs;
struct dc_config config;
- struct dc_debug debug;
-
+ struct dc_debug_options debug;
struct dc_context *ctx;
uint8_t link_count;
@@ -288,9 +314,9 @@ struct dc {
bool apply_edp_fast_boot_optimization;
/* FBC compressor */
-#if defined(CONFIG_DRM_AMD_DC_FBC)
struct compressor *fbc_compressor;
-#endif
+
+ struct dc_debug_data debug_data;
};
enum frame_buffer_mode {
@@ -358,6 +384,7 @@ enum dc_transfer_func_type {
TF_TYPE_PREDEFINED,
TF_TYPE_DISTRIBUTED_POINTS,
TF_TYPE_BYPASS,
+ TF_TYPE_HWPWL
};
struct dc_transfer_func_distributed_points {
@@ -377,16 +404,22 @@ enum dc_transfer_func_predefined {
TRANSFER_FUNCTION_PQ,
TRANSFER_FUNCTION_LINEAR,
TRANSFER_FUNCTION_UNITY,
+ TRANSFER_FUNCTION_HLG,
+ TRANSFER_FUNCTION_HLG12,
+ TRANSFER_FUNCTION_GAMMA22
};
struct dc_transfer_func {
struct kref refcount;
- struct dc_transfer_func_distributed_points tf_pts;
enum dc_transfer_func_type type;
enum dc_transfer_func_predefined tf;
/* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
uint32_t sdr_ref_white_level;
struct dc_context *ctx;
+ union {
+ struct pwl_params pwl;
+ struct dc_transfer_func_distributed_points tf_pts;
+ };
};
/*
@@ -616,9 +649,14 @@ struct dpcd_caps {
struct dc_dongle_caps dongle_caps;
uint32_t sink_dev_id;
+ int8_t sink_dev_id_str[6];
+ int8_t sink_hw_revision;
+ int8_t sink_fw_revision[2];
+
uint32_t branch_dev_id;
int8_t branch_dev_name[6];
int8_t branch_hw_revision;
+ int8_t branch_fw_revision[2];
bool allow_invalid_MSA_timing_param;
bool panel_mode_edp;
@@ -661,9 +699,13 @@ struct dc_sink {
struct dc_link *link;
struct dc_context *ctx;
+ uint32_t sink_id;
+
/* private to dc_sink.c */
+ // refcount must be the last member in dc_sink, since we want the
+ // sink structure to be logically cloneable up to (but not including)
+ // refcount
struct kref refcount;
-
};
void dc_sink_retain(struct dc_sink *sink);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index d9b84ec7954c..90082bab71f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -198,6 +198,10 @@ struct dc_vbios_funcs {
void (*post_init)(struct dc_bios *bios);
void (*bios_parser_destroy)(struct dc_bios **dcb);
+
+ enum bp_result (*get_board_layout_info)(
+ struct dc_bios *dcb,
+ struct board_layout_info *board_layout_info);
};
struct bios_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index e1affeb5cc51..05c8c31d8b31 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -25,6 +25,65 @@
#ifndef DC_DDC_TYPES_H_
#define DC_DDC_TYPES_H_
+enum aux_transaction_type {
+ AUX_TRANSACTION_TYPE_DP,
+ AUX_TRANSACTION_TYPE_I2C
+};
+
+
+enum i2caux_transaction_action {
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
+ I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
+
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
+
+ I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
+ I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
+};
+
+enum aux_channel_operation_result {
+ AUX_CHANNEL_OPERATION_SUCCEEDED,
+ AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
+ AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
+ AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+};
+
+
+struct aux_request_transaction_data {
+ enum aux_transaction_type type;
+ enum i2caux_transaction_action action;
+ /* 20-bit AUX channel transaction address */
+ uint32_t address;
+ /* delay, in 100-microsecond units */
+ uint8_t delay;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum aux_transaction_reply {
+ AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
+ AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
+ AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+
+ AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
+ AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
+ AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
+
+ AUX_TRANSACTION_REPLY_HPD_DISCON = 0x40,
+
+ AUX_TRANSACTION_REPLY_INVALID = 0xFF
+};
+
+struct aux_reply_transaction_data {
+ enum aux_transaction_reply status;
+ uint32_t length;
+ uint8_t *data;
+};
+
struct i2c_payload {
bool write;
uint8_t address;
@@ -109,7 +168,7 @@ struct ddc_service {
uint32_t address;
uint32_t edid_buf_len;
- uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
+ uint8_t edid_buf[DC_MAX_EDID_BUFFER_SIZE];
};
#endif /* DC_DDC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 90bccd5ccaa2..da93ab43f2d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -430,7 +430,7 @@ union test_request {
struct {
uint8_t LINK_TRAINING :1;
uint8_t LINK_TEST_PATTRN :1;
- uint8_t EDID_REAT :1;
+ uint8_t EDID_READ :1;
uint8_t PHY_TEST_PATTERN :1;
uint8_t AUDIO_TEST_PATTERN :1;
uint8_t RESERVED :1;
@@ -443,7 +443,8 @@ union test_response {
struct {
uint8_t ACK :1;
uint8_t NO_ACK :1;
- uint8_t RESERVED :6;
+ uint8_t EDID_CHECKSUM_WRITE:1;
+ uint8_t RESERVED :5;
} bits;
uint8_t raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index bd0fda0ceb91..e68077e65565 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -255,3 +255,54 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
return reg_val;
}
+
+void generic_write_indirect_reg(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, uint32_t data)
+{
+ dm_write_reg(ctx, addr_index, index);
+ dm_write_reg(ctx, addr_data, data);
+}
+
+uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index)
+{
+ uint32_t value = 0;
+
+ dm_write_reg(ctx, addr_index, index);
+ value = dm_read_reg(ctx, addr_data);
+
+ return value;
+}
+
+
+uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ uint32_t shift, mask, field_value;
+ int i = 1;
+
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t);
+
+ reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
+ i++;
+ }
+
+ generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val);
+ va_end(ap);
+
+ return reg_val;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index b1f70579d61b..b789cb2b354b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -192,13 +192,14 @@ enum surface_pixel_format {
/*swaped & float*/
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
/*grow graphics here if necessary */
-
+ SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
+ SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
SURFACE_PIXEL_FORMAT_INVALID
/*grow 444 video here if necessary */
@@ -403,9 +404,11 @@ struct dc_cursor_position {
struct dc_cursor_mi_param {
unsigned int pixel_clk_khz;
unsigned int ref_clk_khz;
- unsigned int viewport_x_start;
- unsigned int viewport_width;
+ struct rect viewport;
struct fixed31_32 h_scale_ratio;
+ struct fixed31_32 v_scale_ratio;
+ enum dc_rotation_angle rotation;
+ bool mirror;
};
/* IPP related types */
@@ -414,6 +417,7 @@ enum {
GAMMA_RGB_256_ENTRIES = 256,
GAMMA_RGB_FLOAT_1024_ENTRIES = 1024,
GAMMA_CS_TFM_1D_ENTRIES = 4096,
+ GAMMA_CUSTOM_ENTRIES = 4096,
GAMMA_MAX_ENTRIES = 4096
};
@@ -421,6 +425,7 @@ enum dc_gamma_type {
GAMMA_RGB_256 = 1,
GAMMA_RGB_FLOAT_1024 = 2,
GAMMA_CS_TFM_1D = 3,
+ GAMMA_CUSTOM = 4,
};
struct dc_csc_transform {
@@ -489,6 +494,7 @@ struct dc_cursor_attributes {
uint32_t height;
enum dc_cursor_color_format color_format;
+ uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
/* In case we support HW Cursor rotation in the future */
enum dc_rotation_angle rotation_angle;
@@ -496,6 +502,11 @@ struct dc_cursor_attributes {
union dc_cursor_attribute_flags attribute_flags;
};
+struct dpp_cursor_attributes {
+ int bias;
+ int scale;
+};
+
/* OPP */
enum dc_color_space {
@@ -567,25 +578,25 @@ struct scaling_taps {
};
enum dc_timing_standard {
- TIMING_STANDARD_UNDEFINED,
- TIMING_STANDARD_DMT,
- TIMING_STANDARD_GTF,
- TIMING_STANDARD_CVT,
- TIMING_STANDARD_CVT_RB,
- TIMING_STANDARD_CEA770,
- TIMING_STANDARD_CEA861,
- TIMING_STANDARD_HDMI,
- TIMING_STANDARD_TV_NTSC,
- TIMING_STANDARD_TV_NTSC_J,
- TIMING_STANDARD_TV_PAL,
- TIMING_STANDARD_TV_PAL_M,
- TIMING_STANDARD_TV_PAL_CN,
- TIMING_STANDARD_TV_SECAM,
- TIMING_STANDARD_EXPLICIT,
+ DC_TIMING_STANDARD_UNDEFINED,
+ DC_TIMING_STANDARD_DMT,
+ DC_TIMING_STANDARD_GTF,
+ DC_TIMING_STANDARD_CVT,
+ DC_TIMING_STANDARD_CVT_RB,
+ DC_TIMING_STANDARD_CEA770,
+ DC_TIMING_STANDARD_CEA861,
+ DC_TIMING_STANDARD_HDMI,
+ DC_TIMING_STANDARD_TV_NTSC,
+ DC_TIMING_STANDARD_TV_NTSC_J,
+ DC_TIMING_STANDARD_TV_PAL,
+ DC_TIMING_STANDARD_TV_PAL_M,
+ DC_TIMING_STANDARD_TV_PAL_CN,
+ DC_TIMING_STANDARD_TV_SECAM,
+ DC_TIMING_STANDARD_EXPLICIT,
/*!< For explicit timings from EDID, VBIOS, etc.*/
- TIMING_STANDARD_USER_OVERRIDE,
+ DC_TIMING_STANDARD_USER_OVERRIDE,
/*!< For mode timing override by user*/
- TIMING_STANDARD_MAX
+ DC_TIMING_STANDARD_MAX
};
enum dc_color_depth {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 8a716baa1203..d43cefbc43d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -73,6 +73,7 @@ struct dc_link {
enum dc_irq_source irq_source_hpd;
enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
bool is_hpd_filter_disabled;
+ bool dp_ss_off;
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
@@ -141,6 +142,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream);
+int dc_link_get_backlight_level(const struct dc_link *dc_link);
+
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
@@ -172,7 +175,7 @@ bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
* false - no change in Downstream port status. No further action required
* from DM. */
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
- union hpd_irq_data *hpd_irq_dpcd_data);
+ union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
struct dc_sink_init_data;
@@ -210,10 +213,29 @@ bool dc_link_dp_set_test_pattern(
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
+bool dc_link_is_dp_sink_present(struct dc_link *link);
+
/*
* DPCD access interfaces
*/
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link);
+void dc_link_perform_link_training(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+void dc_link_enable_hpd(const struct dc_link *link);
+void dc_link_disable_hpd(const struct dc_link *link);
+void dc_link_set_test_pattern(struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index d7e6d53bb383..cbfe418006cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -59,6 +59,9 @@ struct dc_stream_state {
struct freesync_context freesync_ctx;
struct dc_info_packet hdr_static_metadata;
+ PHYSICAL_ADDRESS_LOC dmdata_address;
+ bool use_dynamic_meta;
+
struct dc_transfer_func *out_transfer_func;
struct colorspace_transform gamut_remap_matrix;
struct dc_csc_transform csc_color_matrix;
@@ -97,6 +100,7 @@ struct dc_stream_state {
struct dc_cursor_attributes cursor_attributes;
struct dc_cursor_position cursor_position;
+ uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
/* from stream struct */
struct kref refcount;
@@ -144,10 +148,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
/*
* Log the current stream state.
*/
-void dc_stream_log(
- const struct dc_stream_state *stream,
- struct dal_logger *dc_logger,
- enum dc_log_type log_type);
+void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream);
uint8_t dc_get_current_stream_count(struct dc *dc);
struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
@@ -255,6 +256,7 @@ bool dc_stream_set_cursor_position(
struct dc_stream_state *stream,
const struct dc_cursor_position *position);
+
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state **stream,
int num_streams,
@@ -299,9 +301,4 @@ bool dc_stream_get_crtc_position(struct dc *dc,
unsigned int *v_pos,
unsigned int *nom_v_pos);
-void dc_stream_set_static_screen_events(struct dc *dc,
- struct dc_stream_state **stream,
- int num_streams,
- const struct dc_static_screen_events *events);
-
#endif /* DC_STREAM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 76df2534c4a4..8c6eb78b0c3b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -77,8 +77,6 @@ struct dc_context {
struct dc *dc;
void *driver_context; /* e.g. amdgpu_device */
-
- struct dal_logger *logger;
void *cgs_device;
enum dce_environment dce_environment;
@@ -92,13 +90,12 @@ struct dc_context {
bool created_bios;
struct gpio_service *gpio_service;
struct i2caux *i2caux;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ uint32_t dc_sink_id_count;
uint64_t fbc_gpu_addr;
-#endif
};
-#define MAX_EDID_BUFFER_SIZE 512
+#define DC_MAX_EDID_BUFFER_SIZE 512
#define EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
@@ -137,13 +134,13 @@ enum plane_stereo_format {
*/
enum dc_edid_connector_type {
- EDID_CONNECTOR_UNKNOWN = 0,
- EDID_CONNECTOR_ANALOG = 1,
- EDID_CONNECTOR_DIGITAL = 10,
- EDID_CONNECTOR_DVI = 11,
- EDID_CONNECTOR_HDMIA = 12,
- EDID_CONNECTOR_MDDI = 14,
- EDID_CONNECTOR_DISPLAYPORT = 15
+ DC_EDID_CONNECTOR_UNKNOWN = 0,
+ DC_EDID_CONNECTOR_ANALOG = 1,
+ DC_EDID_CONNECTOR_DIGITAL = 10,
+ DC_EDID_CONNECTOR_DVI = 11,
+ DC_EDID_CONNECTOR_HDMIA = 12,
+ DC_EDID_CONNECTOR_MDDI = 14,
+ DC_EDID_CONNECTOR_DISPLAYPORT = 15
};
enum dc_edid_status {
@@ -169,7 +166,7 @@ struct dc_cea_audio_mode {
struct dc_edid {
uint32_t length;
- uint8_t raw_edid[MAX_EDID_BUFFER_SIZE];
+ uint8_t raw_edid[DC_MAX_EDID_BUFFER_SIZE];
};
/* When speaker location data block is not available, DEFAULT_SPEAKER_LOCATION
@@ -195,6 +192,7 @@ union display_content_support {
struct dc_panel_patch {
unsigned int dppowerup_delay;
+ unsigned int extra_t12_ms;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 11401fd8e535..825537bd4545 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -28,7 +28,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o
+dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
new file mode 100644
index 000000000000..3f5b2e6f7553
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -0,0 +1,937 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce_aux.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define CTX \
+ aux110->base.ctx
+#define REG(reg_name)\
+ (aux110->regs->reg_name)
+
+#define DC_LOGGER \
+ engine->ctx->logger
+
+#include "reg_helper.h"
+
+#define FROM_AUX_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine_dce110, base)
+
+#define FROM_ENGINE(ptr) \
+ FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
+
+#define FROM_AUX_ENGINE_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine, base)
+enum {
+ AUX_INVALID_REPLY_RETRY_COUNTER = 1,
+ AUX_TIMED_OUT_RETRY_COUNTER = 2,
+ AUX_DEFER_RETRY_COUNTER = 6
+};
+static void release_engine(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ dal_ddc_close(engine->ddc);
+
+ engine->ddc = NULL;
+
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+}
+
+#define SW_CAN_ACCESS_AUX 1
+#define DMCU_CAN_ACCESS_AUX 2
+
+static bool is_engine_available(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field != DMCU_CAN_ACCESS_AUX);
+}
+static bool acquire_engine(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+ if (field == DMCU_CAN_ACCESS_AUX)
+ return false;
+ /* enable AUX before request SW to access AUX */
+ value = REG_READ(AUX_CONTROL);
+ field = get_reg_field_value(value,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (field == 0) {
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*DP_AUX block as part of the enable sequence*/
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_RESET);
+ }
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*poll HW to make sure reset it done*/
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
+ 1, 11);
+
+ set_reg_field_value(
+ value,
+ 0,
+ AUX_CONTROL,
+ AUX_RESET);
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
+ 1, 11);
+ }
+ } /*if (field)*/
+
+ /* request SW to access AUX */
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
+
+ value = REG_READ(AUX_ARB_CONTROL);
+ field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field == SW_CAN_ACCESS_AUX);
+}
+
+#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
+ ((command) | ((0xF0000 & (address)) >> 16))
+
+#define COMPOSE_AUX_SW_DATA_8_15(address) \
+ ((0xFF00 & (address)) >> 8)
+
+#define COMPOSE_AUX_SW_DATA_0_7(address) \
+ (0xFF & (address))
+
+static void submit_channel_request(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t value;
+ uint32_t length;
+
+ bool is_write =
+ ((request->type == AUX_TRANSACTION_TYPE_DP) &&
+ (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
+ ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
+ ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+ if (REG(AUXN_IMPCAL)) {
+ /* clear_aux_error */
+ REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ /* force_default_calibrate */
+ REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ AUXN_IMPCAL_ENABLE, 1,
+ AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+
+ /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+ 1,
+ 0);
+ }
+ /* set the delay and the number of bytes to write */
+
+ /* The length include
+ * the 4 bit header and the 20 bit address
+ * (that is 3 byte).
+ * If the requested length is non zero this means
+ * an addition byte specifying the length is required.
+ */
+
+ length = request->length ? 4 : 3;
+ if (is_write)
+ length += request->length;
+
+ REG_UPDATE_2(AUX_SW_CONTROL,
+ AUX_SW_START_DELAY, request->delay,
+ AUX_SW_WR_BYTES, length);
+
+ /* program action and address and payload data (if 'is_write') */
+ value = REG_UPDATE_4(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_DATA_RW, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
+
+ value = REG_SET_2(AUX_SW_DATA, value,
+ AUX_SW_AUTOINCREMENT_DISABLE, 0,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
+
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
+
+ if (request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->length - 1);
+ }
+
+ if (is_write) {
+ /* Load the HW buffer with the Data to be sent.
+ * This is relevant for write operation.
+ * For read, the data recived data will be
+ * processed in process_channel_reply().
+ */
+ uint32_t i = 0;
+
+ while (i < request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->data[i]);
+
+ ++i;
+ }
+ }
+
+ REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+ 10, aux110->timeout_period/10);
+ REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
+}
+
+static int read_channel_reply(struct aux_engine *engine, uint32_t size,
+ uint8_t *buffer, uint8_t *reply_result,
+ uint32_t *sw_status)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t bytes_replied;
+ uint32_t reply_result_32;
+
+ *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
+ &bytes_replied);
+
+ /* In case HPD is LOW, exit AUX transaction */
+ if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return -1;
+
+ /* Need at least the status byte */
+ if (!bytes_replied)
+ return -1;
+
+ REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA_RW, 1);
+
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
+ reply_result_32 = reply_result_32 >> 4;
+ *reply_result = (uint8_t)reply_result_32;
+
+ if (reply_result_32 == 0) { /* ACK */
+ uint32_t i = 0;
+
+ /* First byte was already used to get the command status */
+ --bytes_replied;
+
+ /* Do not overflow buffer */
+ if (bytes_replied > size)
+ return -1;
+
+ while (i < bytes_replied) {
+ uint32_t aux_sw_data_val;
+
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
+ buffer[i] = aux_sw_data_val;
+ ++i;
+ }
+
+ return i;
+ }
+
+ return 0;
+}
+
+static void process_channel_reply(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply)
+{
+ int bytes_replied;
+ uint8_t reply_result;
+ uint32_t sw_status;
+
+ bytes_replied = read_channel_reply(engine, reply->length, reply->data,
+ &reply_result, &sw_status);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+ return;
+ }
+
+ if (bytes_replied < 0) {
+ /* Need to handle an error case...
+ * Hopefully, upper layer function won't call this function if
+ * the number of bytes in the reply was 0, because there was
+ * surely an error that was asserted that should have been
+ * handled for hot plug case, this could happens
+ */
+ if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ ASSERT_CRITICAL(false);
+ return;
+ }
+ } else {
+
+ switch (reply_result) {
+ case 0: /* ACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+ break;
+ case 1: /* NACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
+ break;
+ case 2: /* DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
+ break;
+ case 4: /* AUX ACK / I2C NACK */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
+ break;
+ case 8: /* AUX ACK / I2C DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
+ break;
+ default:
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ }
+ }
+}
+
+static enum aux_channel_operation_result get_channel_status(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value;
+
+ if (returned_bytes == NULL) {
+ /*caller pass NULL pointer*/
+ ASSERT_CRITICAL(false);
+ return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
+ }
+ *returned_bytes = 0;
+
+ /* poll to make sure that SW_DONE is asserted */
+ value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+ 10, aux110->timeout_period/10);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+
+ /* Note that the following bits are set in 'status.bits'
+ * during CTS 4.2.1.2 (FW 3.3.1):
+ * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
+ * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
+ *
+ * AUX_SW_RX_MIN_COUNT_VIOL is an internal,
+ * HW debugging bit and should be ignored.
+ */
+ if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
+ if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+
+ else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
+ (value &
+ AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+
+ *returned_bytes = get_reg_field_value(value,
+ AUX_SW_STATUS,
+ AUX_SW_REPLY_BYTE_COUNT);
+
+ if (*returned_bytes == 0)
+ return
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+ else {
+ *returned_bytes -= 1;
+ return AUX_CHANNEL_OPERATION_SUCCEEDED;
+ }
+ } else {
+ /*time_elapsed >= aux_engine->timeout_period
+ * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
+ */
+ ASSERT_CRITICAL(false);
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+ }
+}
+static void process_read_reply(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->defer_retry_aux = 0;
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else if (ctx->returned_byte < ctx->current_read_length) {
+ ctx->current_read_length -= ctx->returned_byte;
+
+ ctx->offset += ctx->returned_byte;
+
+ ++ctx->invalid_reply_retry_aux_on_ack;
+
+ if (ctx->invalid_reply_retry_aux_on_ack >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ }
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->transaction_complete = true;
+ ctx->operation_succeeded = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static void process_read_request(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else {
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->buffer;
+
+ process_read_reply(engine, ctx);
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here
+ */
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static bool read_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct read_command_context ctx;
+
+ ctx.buffer = request->payload.data;
+ ctx.current_read_length = request->payload.length;
+ ctx.offset = 0;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.invalid_reply_retry_aux_on_ack = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ do {
+ memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
+
+ ctx.request.data = ctx.buffer + ctx.offset;
+ ctx.request.length = ctx.current_read_length;
+
+ process_read_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+ ctx.operation_succeeded);
+ }
+
+ return ctx.operation_succeeded;
+}
+
+static void process_write_reply(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->operation_succeeded = true;
+
+ if (ctx->returned_byte) {
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ctx->current_write_length = 0;
+
+ ++ctx->ack_m_retry;
+
+ if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(300);
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->defer_retry_aux = 0;
+ ctx->ack_m_retry = 0;
+ ctx->transaction_complete = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+ ctx->current_write_length = 0;
+
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static void process_write_request(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->reply_data;
+
+ process_write_reply(engine, ctx);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here
+ */
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static bool write_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct write_command_context ctx;
+
+ ctx.mot = middle_of_transaction;
+ ctx.buffer = request->payload.data;
+ ctx.current_write_length = request->payload.length;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.ack_m_retry = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ ctx.max_defer_retry =
+ (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
+ engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
+
+ do {
+ ctx.request.data = ctx.buffer;
+ ctx.request.length = ctx.current_write_length;
+
+ process_write_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+ ctx.operation_succeeded);
+ }
+
+ return ctx.operation_succeeded;
+}
+static bool end_of_transaction_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request)
+{
+ struct i2caux_transaction_request dummy_request;
+ uint8_t dummy_data;
+
+ /* [tcheng] We only need to send the stop (read with MOT = 0)
+ * for I2C-over-Aux, not native AUX
+ */
+
+ if (request->payload.address_space !=
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
+ return false;
+
+ dummy_request.operation = request->operation;
+ dummy_request.payload.address_space = request->payload.address_space;
+ dummy_request.payload.address = request->payload.address;
+
+ /*
+ * Add a dummy byte due to some receiver quirk
+ * where one byte is sent along with MOT = 0.
+ * Ideally this should be 0.
+ */
+
+ dummy_request.payload.length = 0;
+ dummy_request.payload.data = &dummy_data;
+
+ if (request->operation == I2CAUX_TRANSACTION_READ)
+ return read_command(engine, &dummy_request, false);
+ else
+ return write_command(engine, &dummy_request, false);
+
+ /* according Syed, it does not need now DoDummyMOT */
+}
+static bool submit_request(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+
+ bool result;
+ bool mot_used = true;
+
+ switch (request->operation) {
+ case I2CAUX_TRANSACTION_READ:
+ result = read_command(engine, request, mot_used);
+ break;
+ case I2CAUX_TRANSACTION_WRITE:
+ result = write_command(engine, request, mot_used);
+ break;
+ default:
+ result = false;
+ }
+
+ /* [tcheng]
+ * need to send stop for the last transaction to free up the AUX
+ * if the above command fails, this would be the last transaction
+ */
+
+ if (!middle_of_transaction || !result)
+ end_of_transaction_command(engine, request);
+
+ /* mask AUX interrupt */
+
+ return result;
+}
+enum i2caux_engine_type get_engine_type(
+ const struct aux_engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_AUX;
+}
+
+static bool acquire(
+ struct aux_engine *engine,
+ struct ddc *ddc)
+{
+
+ enum gpio_result result;
+
+ if (engine->funcs->is_engine_available) {
+ /*check whether SW could use the engine*/
+ if (!engine->funcs->is_engine_available(engine))
+ return false;
+ }
+
+ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+ GPIO_DDC_CONFIG_TYPE_MODE_AUX);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ if (!engine->funcs->acquire_engine(engine)) {
+ dal_ddc_close(ddc);
+ return false;
+ }
+
+ engine->ddc = ddc;
+
+ return true;
+}
+
+static const struct aux_engine_funcs aux_engine_funcs = {
+ .acquire_engine = acquire_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .read_channel_reply = read_channel_reply,
+ .get_channel_status = get_channel_status,
+ .is_engine_available = is_engine_available,
+ .release_engine = release_engine,
+ .destroy_engine = dce110_engine_destroy,
+ .submit_request = submit_request,
+ .get_engine_type = get_engine_type,
+ .acquire = acquire,
+};
+
+void dce110_engine_destroy(struct aux_engine **engine)
+{
+
+ struct aux_engine_dce110 *engine110 = FROM_AUX_ENGINE(*engine);
+
+ kfree(engine110);
+ *engine = NULL;
+
+}
+struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ uint32_t timeout_period,
+ const struct dce110_aux_registers *regs)
+{
+ aux_engine110->base.ddc = NULL;
+ aux_engine110->base.ctx = ctx;
+ aux_engine110->base.delay = 0;
+ aux_engine110->base.max_defer_write_retry = 0;
+ aux_engine110->base.funcs = &aux_engine_funcs;
+ aux_engine110->base.inst = inst;
+ aux_engine110->timeout_period = timeout_period;
+ aux_engine110->regs = regs;
+
+ return &aux_engine110->base;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
new file mode 100644
index 000000000000..f7caab85dc80
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_DCE110_H__
+#define __DAL_AUX_ENGINE_DCE110_H__
+#include "aux_engine.h"
+
+#define AUX_COMMON_REG_LIST(id)\
+ SRI(AUX_CONTROL, DP_AUX, id), \
+ SRI(AUX_ARB_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_DATA, DP_AUX, id), \
+ SRI(AUX_SW_CONTROL, DP_AUX, id), \
+ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_STATUS, DP_AUX, id), \
+ SR(AUXN_IMPCAL), \
+ SR(AUXP_IMPCAL)
+
+struct dce110_aux_registers {
+ uint32_t AUX_CONTROL;
+ uint32_t AUX_ARB_CONTROL;
+ uint32_t AUX_SW_DATA;
+ uint32_t AUX_SW_CONTROL;
+ uint32_t AUX_INTERRUPT_CONTROL;
+ uint32_t AUX_SW_STATUS;
+ uint32_t AUXN_IMPCAL;
+ uint32_t AUXP_IMPCAL;
+
+ uint32_t AUX_RESET_MASK;
+};
+
+enum { /* This is the timeout as defined in DP 1.2a,
+ * 2.3.4 "Detailed uPacket TX AUX CH State Description".
+ */
+ AUX_TIMEOUT_PERIOD = 400,
+
+ /* Ideally, the SW timeout should be just above 550usec
+ * which is programmed in HW.
+ * But the SW timeout of 600usec is not reliable,
+ * because on some systems, delay_in_microseconds()
+ * returns faster than it should.
+ * EPR #379763: by trial-and-error on different systems,
+ * 700usec is the minimum reliable SW timeout for polling
+ * the AUX_SW_STATUS.AUX_SW_DONE bit.
+ * This timeout expires *only* when there is
+ * AUX Error or AUX Timeout conditions - not during normal operation.
+ * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
+ * at most within ~240usec. That means,
+ * increasing this timeout will not affect normal operation,
+ * and we'll timeout after
+ * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+ * This timeout is especially important for
+ * resume from S3 and CTS.
+ */
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+};
+struct aux_engine_dce110 {
+ struct aux_engine base;
+ const struct dce110_aux_registers *regs;
+ struct {
+ uint32_t aux_control;
+ uint32_t aux_arb_control;
+ uint32_t aux_sw_data;
+ uint32_t aux_sw_control;
+ uint32_t aux_interrupt_control;
+ uint32_t aux_sw_status;
+ } addr;
+ uint32_t timeout_period;
+};
+
+struct aux_engine_dce110_init_data {
+ uint32_t engine_id;
+ uint32_t timeout_period;
+ struct dc_context *ctx;
+ const struct dce110_aux_registers *regs;
+};
+
+struct aux_engine *dce110_aux_engine_construct(
+ struct aux_engine_dce110 *aux_engine110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ uint32_t timeout_period,
+ const struct dce110_aux_registers *regs);
+
+void dce110_engine_destroy(struct aux_engine **engine);
+
+bool dce110_aux_engine_acquire(
+ struct aux_engine *aux_engine,
+ struct ddc *ddc);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 88b09dd758ba..ca137757a69e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -133,7 +133,7 @@ static bool calculate_fb_and_fractional_fb_divider(
uint64_t feedback_divider;
feedback_divider =
- (uint64_t)(target_pix_clk_khz * ref_divider * post_divider);
+ (uint64_t)target_pix_clk_khz * ref_divider * post_divider;
feedback_divider *= 10;
/* additional factor, since we divide by 10 afterwards */
feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor);
@@ -145,8 +145,8 @@ static bool calculate_fb_and_fractional_fb_divider(
* of fractional feedback decimal point and the fractional FB Divider precision
* is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/
- feedback_divider += (uint64_t)
- (5 * calc_pll_cs->fract_fb_divider_precision_factor);
+ feedback_divider += 5ULL *
+ calc_pll_cs->fract_fb_divider_precision_factor;
feedback_divider =
div_u64(feedback_divider,
calc_pll_cs->fract_fb_divider_precision_factor * 10);
@@ -203,8 +203,8 @@ static bool calc_fb_divider_checking_tolerance(
&fract_feedback_divider);
/*Actual calculated value*/
- actual_calc_clk_khz = (uint64_t)(feedback_divider *
- calc_pll_cs->fract_fb_divider_factor) +
+ actual_calc_clk_khz = (uint64_t)feedback_divider *
+ calc_pll_cs->fract_fb_divider_factor +
fract_feedback_divider;
actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz;
actual_calc_clk_khz =
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 8a581c67bf2d..fb1f373d08a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -38,7 +38,7 @@
#include "dal_asic_id.h"
#define TO_DCE_CLOCKS(clocks)\
- container_of(clocks, struct dce_disp_clk, base)
+ container_of(clocks, struct dce_dccg, base)
#define REG(reg) \
(clk_dce->regs->reg)
@@ -101,99 +101,84 @@ static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
/*ClocksStatePerformance*/
{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
-/* Starting point for each divider range.*/
-enum dce_divider_range_start {
- DIVIDER_RANGE_01_START = 200, /* 2.00*/
- DIVIDER_RANGE_02_START = 1600, /* 16.00*/
- DIVIDER_RANGE_03_START = 3200, /* 32.00*/
- DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
+/* Starting DID for each range */
+enum dentist_base_divider_id {
+ DENTIST_BASE_DID_1 = 0x08,
+ DENTIST_BASE_DID_2 = 0x40,
+ DENTIST_BASE_DID_3 = 0x60,
+ DENTIST_BASE_DID_4 = 0x7e,
+ DENTIST_MAX_DID = 0x7f
};
-/* Ranges for divider identifiers (Divider ID or DID)
- mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
-enum dce_divider_id_register_setting {
- DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
- DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
- DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
- DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
+/* Starting point and step size for each divider range.*/
+enum dentist_divider_range {
+ DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
+ DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
+ DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
+ DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
+ DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+ DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
+ DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
+ DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
+ DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
};
-/* Step size between each divider within a range.
- Incrementing the DENTIST_DISPCLK_WDIVIDER by one
- will increment the divider by this much.*/
-enum dce_divider_range_step_size {
- DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
- DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
- DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
-};
-
-static bool dce_divider_range_construct(
- struct dce_divider_range *div_range,
- int range_start,
- int range_step,
- int did_min,
- int did_max)
+static int dentist_get_divider_from_did(int did)
{
- div_range->div_range_start = range_start;
- div_range->div_range_step = range_step;
- div_range->did_min = did_min;
- div_range->did_max = did_max;
-
- if (div_range->div_range_step == 0) {
- div_range->div_range_step = 1;
- /*div_range_step cannot be zero*/
- BREAK_TO_DEBUGGER();
+ if (did < DENTIST_BASE_DID_1)
+ did = DENTIST_BASE_DID_1;
+ if (did > DENTIST_MAX_DID)
+ did = DENTIST_MAX_DID;
+
+ if (did < DENTIST_BASE_DID_2) {
+ return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+ * (did - DENTIST_BASE_DID_1);
+ } else if (did < DENTIST_BASE_DID_3) {
+ return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+ * (did - DENTIST_BASE_DID_2);
+ } else if (did < DENTIST_BASE_DID_4) {
+ return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+ * (did - DENTIST_BASE_DID_3);
+ } else {
+ return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
+ * (did - DENTIST_BASE_DID_4);
}
- /* Calculate this based on the other inputs.*/
- /* See DividerRange.h for explanation of */
- /* the relationship between divider id (DID) and a divider.*/
- /* Number of Divider IDs = (Maximum Divider ID - Minimum Divider ID)*/
- /* Maximum divider identified in this range =
- * (Number of Divider IDs)*Step size between dividers
- * + The start of this range.*/
- div_range->div_range_end = (did_max - did_min) * range_step
- + range_start;
- return true;
-}
-
-static int dce_divider_range_calc_divider(
- struct dce_divider_range *div_range,
- int did)
-{
- /* Is this DID within our range?*/
- if ((did < div_range->did_min) || (did >= div_range->did_max))
- return INVALID_DIVIDER;
-
- return ((did - div_range->did_min) * div_range->div_range_step)
- + div_range->div_range_start;
-
}
-static int dce_divider_range_get_divider(
- struct dce_divider_range *div_range,
- int ranges_num,
- int did)
+/* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+ */
+static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
{
- int div = INVALID_DIVIDER;
- int i;
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+ dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
+ clk_dce->dprefclk_ss_divider), 200);
+ struct fixed31_32 adj_dp_ref_clk_khz;
- for (i = 0; i < ranges_num; i++) {
- /* Calculate divider with given divider ID*/
- div = dce_divider_range_calc_divider(&div_range[i], did);
- /* Found a valid return divider*/
- if (div != INVALID_DIVIDER)
- break;
+ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+ adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
}
- return div;
+ return dp_ref_clk_khz;
}
-static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
+static int dce_get_dp_ref_freq_khz(struct dccg *clk)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
int dprefclk_wdivider;
int dprefclk_src_sel;
int dp_ref_clk_khz = 600000;
- int target_div = INVALID_DIVIDER;
+ int target_div;
/* ASSERT DP Reference Clock source is from DFS*/
REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
@@ -204,80 +189,27 @@ static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
- target_div = dce_divider_range_get_divider(
- clk_dce->divider_ranges,
- DIVIDER_RANGE_MAX,
- dprefclk_wdivider);
-
- if (target_div != INVALID_DIVIDER) {
- /* Calculate the current DFS clock, in kHz.*/
- dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
- * clk_dce->dentist_vco_freq_khz) / target_div;
- }
+ target_div = dentist_get_divider_from_did(dprefclk_wdivider);
- /* SW will adjust DP REF Clock average value for all purposes
- * (DP DTO / DP Audio DTO and DP GTC)
- if clock is spread for all cases:
- -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
- calculations for DS_INCR/DS_MODULO (this is planned to be default case)
- -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
- calculations (not planned to be used, but average clock should still
- be valid)
- -if SS enabled on DP Ref clock and HW de-spreading disabled
- (should not be case with CIK) then SW should program all rates
- generated according to average value (case as with previous ASICs)
- */
- if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
- dc_fixpt_from_fraction(
- clk_dce->dprefclk_ss_percentage,
- clk_dce->dprefclk_ss_divider), 200);
- struct fixed31_32 adj_dp_ref_clk_khz;
+ /* Calculate the current DFS clock, in kHz.*/
+ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_dce->dentist_vco_freq_khz) / target_div;
- ss_percentage = dc_fixpt_sub(dc_fixpt_one,
- ss_percentage);
- adj_dp_ref_clk_khz =
- dc_fixpt_mul_int(
- ss_percentage,
- dp_ref_clk_khz);
- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
- }
-
- return dp_ref_clk_khz;
+ return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
}
-/* TODO: This is DCN DPREFCLK: it could be program by DENTIST by VBIOS
- * or CLK0_CLK11 by SMU. For DCE120, it is wlays 600Mhz. Will re-visit
- * clock implementation
- */
-static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
+static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
- int dp_ref_clk_khz = 600000;
-
- if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
- dc_fixpt_from_fraction(
- clk_dce->dprefclk_ss_percentage,
- clk_dce->dprefclk_ss_divider), 200);
- struct fixed31_32 adj_dp_ref_clk_khz;
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- ss_percentage = dc_fixpt_sub(dc_fixpt_one,
- ss_percentage);
- adj_dp_ref_clk_khz =
- dc_fixpt_mul_int(
- ss_percentage,
- dp_ref_clk_khz);
- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
- }
-
- return dp_ref_clk_khz;
+ return dccg_adjust_dp_ref_freq_for_ss(clk_dce, 600000);
}
+
static enum dm_pp_clocks_state dce_get_required_clocks_state(
- struct display_clock *clk,
- struct state_dependent_clocks *req_clocks)
+ struct dccg *clk,
+ struct dc_clocks *req_clocks)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
int i;
enum dm_pp_clocks_state low_req_clk;
@@ -286,53 +218,30 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
* all required clocks
*/
for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
- if (req_clocks->display_clk_khz >
+ if (req_clocks->dispclk_khz >
clk_dce->max_clks_by_state[i].display_clk_khz
- || req_clocks->pixel_clk_khz >
+ || req_clocks->phyclk_khz >
clk_dce->max_clks_by_state[i].pixel_clk_khz)
break;
low_req_clk = i + 1;
if (low_req_clk > clk->max_clks_state) {
- DC_LOG_WARNING("%s: clocks unsupported disp_clk %d pix_clk %d",
- __func__,
- req_clocks->display_clk_khz,
- req_clocks->pixel_clk_khz);
- low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ /* set max clock state for high phyclock, invalid on exceeding display clock */
+ if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
+ < req_clocks->dispclk_khz)
+ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ else
+ low_req_clk = clk->max_clks_state;
}
return low_req_clk;
}
-static bool dce_clock_set_min_clocks_state(
- struct display_clock *clk,
- enum dm_pp_clocks_state clocks_state)
-{
- struct dm_pp_power_level_change_request level_change_req = {
- clocks_state };
-
- if (clocks_state > clk->max_clks_state) {
- /*Requested state exceeds max supported state.*/
- DC_LOG_WARNING("Requested state exceeds max supported state");
- return false;
- } else if (clocks_state == clk->cur_min_clks_state) {
- /*if we're trying to set the same state, we can just return
- * since nothing needs to be done*/
- return true;
- }
-
- /* get max clock state from PPLIB */
- if (dm_pp_apply_power_level_change_request(clk->ctx, &level_change_req))
- clk->cur_min_clks_state = clocks_state;
-
- return true;
-}
-
static int dce_set_clock(
- struct display_clock *clk,
+ struct dccg *clk,
int requested_clk_khz)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
struct dc_bios *bp = clk->ctx->dc_bios;
int actual_clock = requested_clk_khz;
@@ -364,10 +273,10 @@ static int dce_set_clock(
}
static int dce_psr_set_clock(
- struct display_clock *clk,
+ struct dccg *clk,
int requested_clk_khz)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
struct dc_context *ctx = clk_dce->base.ctx;
struct dc *core_dc = ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
@@ -380,10 +289,10 @@ static int dce_psr_set_clock(
}
static int dce112_set_clock(
- struct display_clock *clk,
+ struct dccg *clk,
int requested_clk_khz)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk->ctx->dc_bios;
struct dc *core_dc = clk->ctx->dc;
@@ -432,9 +341,9 @@ static int dce112_set_clock(
return actual_clock;
}
-static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
+static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
{
- struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
+ struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
struct integrated_info info = { { { 0 } } };
struct dc_firmware_info fw_info = { { 0 } };
@@ -488,11 +397,9 @@ static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
if (!debug->disable_dfs_bypass && bp->integrated_info)
if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
clk_dce->dfs_bypass_enabled = true;
-
- clk_dce->use_max_disp_clk = debug->max_disp_clk;
}
-static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
+static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
{
struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
int ss_info_num = bp->funcs->get_ss_entry_number(
@@ -548,139 +455,265 @@ static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
}
}
-static bool dce_apply_clock_voltage_request(
- struct display_clock *clk,
- enum dm_pp_clock_type clocks_type,
- int clocks_in_khz,
- bool pre_mode_set,
- bool update_dp_phyclk)
+static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+{
+ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+}
+
+static void dce12_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
{
- bool send_request = false;
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
- switch (clocks_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- case DM_PP_CLOCK_TYPE_PIXELCLK:
- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
- break;
- default:
- BREAK_TO_DEBUGGER();
- return false;
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+ new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
}
- clock_voltage_req.clk_type = clocks_type;
- clock_voltage_req.clocks_in_khz = clocks_in_khz;
-
- /* to pplib */
- if (pre_mode_set) {
- switch (clocks_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- if (clocks_in_khz > clk->cur_clocks_value.dispclk_in_khz) {
- clk->cur_clocks_value.dispclk_notify_pplib_done = true;
- send_request = true;
- } else
- clk->cur_clocks_value.dispclk_notify_pplib_done = false;
- /* no matter incrase or decrase clock, update current clock value */
- clk->cur_clocks_value.dispclk_in_khz = clocks_in_khz;
- break;
- case DM_PP_CLOCK_TYPE_PIXELCLK:
- if (clocks_in_khz > clk->cur_clocks_value.max_pixelclk_in_khz) {
- clk->cur_clocks_value.pixelclk_notify_pplib_done = true;
- send_request = true;
- } else
- clk->cur_clocks_value.pixelclk_notify_pplib_done = false;
- /* no matter incrase or decrase clock, update current clock value */
- clk->cur_clocks_value.max_pixelclk_in_khz = clocks_in_khz;
- break;
- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
- if (clocks_in_khz > clk->cur_clocks_value.max_non_dp_phyclk_in_khz) {
- clk->cur_clocks_value.phyclk_notigy_pplib_done = true;
- send_request = true;
- } else
- clk->cur_clocks_value.phyclk_notigy_pplib_done = false;
- /* no matter incrase or decrase clock, update current clock value */
- clk->cur_clocks_value.max_non_dp_phyclk_in_khz = clocks_in_khz;
- break;
- default:
- ASSERT(0);
- break;
- }
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
+ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+}
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
+{
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+ bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
+ int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
+ bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
+
+ /* increase clock, looking for div is 0 for current, request div is 1*/
+ if (dispclk_increase) {
+ /* already divided by 2, no need to reach target clk with 2 steps*/
+ if (cur_dpp_div)
+ return new_clocks->dispclk_khz;
+
+ /* request disp clk is lower than maximum supported dpp clk,
+ * no need to reach target clk with two steps.
+ */
+ if (new_clocks->dispclk_khz <= disp_clk_threshold)
+ return new_clocks->dispclk_khz;
+
+ /* target dpp clk not request divided by 2, still within threshold */
+ if (!request_dpp_div)
+ return new_clocks->dispclk_khz;
} else {
- switch (clocks_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- if (!clk->cur_clocks_value.dispclk_notify_pplib_done)
- send_request = true;
- break;
- case DM_PP_CLOCK_TYPE_PIXELCLK:
- if (!clk->cur_clocks_value.pixelclk_notify_pplib_done)
- send_request = true;
- break;
- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
- if (!clk->cur_clocks_value.phyclk_notigy_pplib_done)
- send_request = true;
- break;
- default:
- ASSERT(0);
- break;
- }
+ /* decrease clock, looking for current dppclk divided by 2,
+ * request dppclk not divided by 2.
+ */
+
+ /* current dpp clk not divided by 2, no need to ramp*/
+ if (!cur_dpp_div)
+ return new_clocks->dispclk_khz;
+
+ /* current disp clk is lower than current maximum dpp clk,
+ * no need to ramp
+ */
+ if (dccg->clks.dispclk_khz <= disp_clk_threshold)
+ return new_clocks->dispclk_khz;
+
+ /* request dpp clk need to be divided by 2 */
+ if (request_dpp_div)
+ return new_clocks->dispclk_khz;
}
- if (send_request) {
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- if (clk->ctx->dce_version >= DCN_VERSION_1_0) {
- struct dc *core_dc = clk->ctx->dc;
- /*use dcfclk request voltage*/
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
- clock_voltage_req.clocks_in_khz =
- dcn_find_dcfclk_suits_all(core_dc, &clk->cur_clocks_value);
- }
+
+ return disp_clk_threshold;
+}
+
+static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
+{
+ struct dc *dc = dccg->ctx->dc;
+ int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+ int i;
+
+ /* set disp clk to dpp clk threshold */
+ dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);
+
+ /* update request dpp clk division option */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+ pipe_ctx->plane_res.dpp,
+ request_dpp_div,
+ true);
+ }
+
+ /* If target clk not same as dppclk threshold, set to target clock */
+ if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+ dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
+ dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+}
+
+static void dcn1_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+{
+ struct dc *dc = dccg->ctx->dc;
+ struct pp_smu_display_requirement_rv *smu_req_cur =
+ &dc->res_pool->pp_smu_req;
+ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+ bool send_request_to_increase = false;
+ bool send_request_to_lower = false;
+
+ if (new_clocks->phyclk_khz)
+ smu_req.display_count = 1;
+ else
+ smu_req.display_count = 0;
+
+ if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
+ || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
+ || new_clocks->fclk_khz > dccg->clks.fclk_khz
+ || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
+ send_request_to_increase = true;
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
+ dccg->clks.fclk_khz = new_clocks->fclk_khz;
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
+ smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
+ dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
+ dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
+
+ send_request_to_lower = true;
+ }
+
+ /* make sure dcf clk is before dpp clk to
+ * make sure we have enough voltage to run dpp clk
+ */
+ if (send_request_to_increase) {
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+ }
+
+ /* dcn1 dppclk is tied to dispclk */
+ /* program dispclk on = as a w/a for sleep resume clock ramping issues */
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
+ || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
+ dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ if (!send_request_to_increase && send_request_to_lower) {
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+ }
+
+
+ *smu_req_cur = smu_req;
+}
#endif
- dm_pp_apply_clock_for_voltage_request(
- clk->ctx, &clock_voltage_req);
+
+static void dce_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+{
+ struct dm_pp_power_level_change_request level_change_req;
+
+ level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > dccg->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+ dccg->cur_min_clks_state = level_change_req.power_level;
}
- if (update_dp_phyclk && (clocks_in_khz >
- clk->cur_clocks_value.max_dp_phyclk_in_khz))
- clk->cur_clocks_value.max_dp_phyclk_in_khz = clocks_in_khz;
- return true;
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+ }
}
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+static const struct display_clock_funcs dcn1_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dcn1_update_clocks
+};
+#endif
static const struct display_clock_funcs dce120_funcs = {
- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
- .apply_clock_voltage_request = dce_apply_clock_voltage_request,
- .set_clock = dce112_set_clock
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dce12_update_clocks
};
static const struct display_clock_funcs dce112_funcs = {
- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
- .get_required_clocks_state = dce_get_required_clocks_state,
- .set_min_clocks_state = dce_clock_set_min_clocks_state,
- .set_clock = dce112_set_clock
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dce_update_clocks
};
static const struct display_clock_funcs dce110_funcs = {
- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
- .get_required_clocks_state = dce_get_required_clocks_state,
- .set_min_clocks_state = dce_clock_set_min_clocks_state,
- .set_clock = dce_psr_set_clock
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce_psr_set_clock,
+ .update_clocks = dce_update_clocks
};
static const struct display_clock_funcs dce_funcs = {
- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
- .get_required_clocks_state = dce_get_required_clocks_state,
- .set_min_clocks_state = dce_clock_set_min_clocks_state,
- .set_clock = dce_set_clock
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce_set_clock,
+ .update_clocks = dce_update_clocks
};
-static void dce_disp_clk_construct(
- struct dce_disp_clk *clk_dce,
+static void dce_dccg_construct(
+ struct dce_dccg *clk_dce,
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask)
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
{
- struct display_clock *base = &clk_dce->base;
+ struct dccg *base = &clk_dce->base;
base->ctx = ctx;
base->funcs = &dce_funcs;
@@ -700,34 +733,15 @@ static void dce_disp_clk_construct(
dce_clock_read_integrated_info(clk_dce);
dce_clock_read_ss_info(clk_dce);
-
- dce_divider_range_construct(
- &clk_dce->divider_ranges[DIVIDER_RANGE_01],
- DIVIDER_RANGE_01_START,
- DIVIDER_RANGE_01_STEP_SIZE,
- DIVIDER_RANGE_01_BASE_DIVIDER_ID,
- DIVIDER_RANGE_02_BASE_DIVIDER_ID);
- dce_divider_range_construct(
- &clk_dce->divider_ranges[DIVIDER_RANGE_02],
- DIVIDER_RANGE_02_START,
- DIVIDER_RANGE_02_STEP_SIZE,
- DIVIDER_RANGE_02_BASE_DIVIDER_ID,
- DIVIDER_RANGE_03_BASE_DIVIDER_ID);
- dce_divider_range_construct(
- &clk_dce->divider_ranges[DIVIDER_RANGE_03],
- DIVIDER_RANGE_03_START,
- DIVIDER_RANGE_03_STEP_SIZE,
- DIVIDER_RANGE_03_BASE_DIVIDER_ID,
- DIVIDER_RANGE_MAX_DIVIDER_ID);
}
-struct display_clock *dce_disp_clk_create(
+struct dccg *dce_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask)
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
{
- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
if (clk_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -738,19 +752,19 @@ struct display_clock *dce_disp_clk_create(
dce80_max_clks_by_state,
sizeof(dce80_max_clks_by_state));
- dce_disp_clk_construct(
+ dce_dccg_construct(
clk_dce, ctx, regs, clk_shift, clk_mask);
return &clk_dce->base;
}
-struct display_clock *dce110_disp_clk_create(
+struct dccg *dce110_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask)
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
{
- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
if (clk_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -761,7 +775,7 @@ struct display_clock *dce110_disp_clk_create(
dce110_max_clks_by_state,
sizeof(dce110_max_clks_by_state));
- dce_disp_clk_construct(
+ dce_dccg_construct(
clk_dce, ctx, regs, clk_shift, clk_mask);
clk_dce->base.funcs = &dce110_funcs;
@@ -769,13 +783,13 @@ struct display_clock *dce110_disp_clk_create(
return &clk_dce->base;
}
-struct display_clock *dce112_disp_clk_create(
+struct dccg *dce112_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask)
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
{
- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
if (clk_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -786,7 +800,7 @@ struct display_clock *dce112_disp_clk_create(
dce112_max_clks_by_state,
sizeof(dce112_max_clks_by_state));
- dce_disp_clk_construct(
+ dce_dccg_construct(
clk_dce, ctx, regs, clk_shift, clk_mask);
clk_dce->base.funcs = &dce112_funcs;
@@ -794,10 +808,9 @@ struct display_clock *dce112_disp_clk_create(
return &clk_dce->base;
}
-struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+struct dccg *dce120_dccg_create(struct dc_context *ctx)
{
- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
- struct dm_pp_clock_levels_with_voltage clk_level_info = {0};
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
if (clk_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -808,28 +821,59 @@ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
dce120_max_clks_by_state,
sizeof(dce120_max_clks_by_state));
- dce_disp_clk_construct(
+ dce_dccg_construct(
clk_dce, ctx, NULL, NULL, NULL);
clk_dce->base.funcs = &dce120_funcs;
- /* new in dce120 */
- if (!ctx->dc->debug.disable_pplib_clock_request &&
- dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clk_level_info)
- && clk_level_info.num_levels)
- clk_dce->max_displ_clk_in_khz =
- clk_level_info.data[clk_level_info.num_levels - 1].clocks_in_khz;
- else
- clk_dce->max_displ_clk_in_khz = 1133000;
+ return &clk_dce->base;
+}
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+{
+ struct dc_debug_options *debug = &ctx->dc->debug;
+ struct dc_bios *bp = ctx->dc_bios;
+ struct dc_firmware_info fw_info = { { 0 } };
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ clk_dce->base.ctx = ctx;
+ clk_dce->base.funcs = &dcn1_funcs;
+
+ clk_dce->dfs_bypass_disp_clk = 0;
+
+ clk_dce->dprefclk_ss_percentage = 0;
+ clk_dce->dprefclk_ss_divider = 1000;
+ clk_dce->ss_on_dprefclk = false;
+
+ if (bp->integrated_info)
+ clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+ clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0)
+ clk_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_dce->dfs_bypass_enabled = true;
+
+ dce_clock_read_ss_info(clk_dce);
return &clk_dce->base;
}
+#endif
-void dce_disp_clk_destroy(struct display_clock **disp_clk)
+void dce_dccg_destroy(struct dccg **dccg)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(*disp_clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
kfree(clk_dce);
- *disp_clk = NULL;
+ *dccg = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
index 0e717e0dc8f0..8a6b2d328467 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
@@ -33,6 +33,9 @@
.DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
.DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+#define CLK_COMMON_REG_LIST_DCN_BASE() \
+ SR(DENTIST_DISPCLK_CNTL)
+
#define CLK_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -40,58 +43,37 @@
CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+
#define CLK_REG_FIELD_LIST(type) \
type DPREFCLK_SRC_SEL; \
- type DENTIST_DPREFCLK_WDIVIDER;
+ type DENTIST_DPREFCLK_WDIVIDER; \
+ type DENTIST_DISPCLK_WDIVIDER; \
+ type DENTIST_DISPCLK_CHG_DONE;
-struct dce_disp_clk_shift {
+struct dccg_shift {
CLK_REG_FIELD_LIST(uint8_t)
};
-struct dce_disp_clk_mask {
+struct dccg_mask {
CLK_REG_FIELD_LIST(uint32_t)
};
-struct dce_disp_clk_registers {
+struct dccg_registers {
uint32_t DPREFCLK_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
};
-/* Array identifiers and count for the divider ranges.*/
-enum dce_divider_range_count {
- DIVIDER_RANGE_01 = 0,
- DIVIDER_RANGE_02,
- DIVIDER_RANGE_03,
- DIVIDER_RANGE_MAX /* == 3*/
-};
-
-enum dce_divider_error_types {
- INVALID_DID = 0,
- INVALID_DIVIDER = 1
-};
-
-struct dce_divider_range {
- int div_range_start;
- /* The end of this range of dividers.*/
- int div_range_end;
- /* The distance between each divider in this range.*/
- int div_range_step;
- /* The divider id for the lowest divider.*/
- int did_min;
- /* The divider id for the highest divider.*/
- int did_max;
-};
-
-struct dce_disp_clk {
- struct display_clock base;
- const struct dce_disp_clk_registers *regs;
- const struct dce_disp_clk_shift *clk_shift;
- const struct dce_disp_clk_mask *clk_mask;
+struct dce_dccg {
+ struct dccg base;
+ const struct dccg_registers *regs;
+ const struct dccg_shift *clk_shift;
+ const struct dccg_mask *clk_mask;
struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
- struct dce_divider_range divider_ranges[DIVIDER_RANGE_MAX];
- bool use_max_disp_clk;
int dentist_vco_freq_khz;
/* Cache the status of DFS-bypass feature*/
@@ -106,32 +88,33 @@ struct dce_disp_clk {
int dprefclk_ss_percentage;
/* DPREFCLK SS percentage Divider (100 or 1000) */
int dprefclk_ss_divider;
-
- /* max disp_clk from PPLIB for max validation display clock*/
- int max_displ_clk_in_khz;
};
-struct display_clock *dce_disp_clk_create(
+struct dccg *dce_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask);
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask);
-struct display_clock *dce110_disp_clk_create(
+struct dccg *dce110_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask);
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask);
-struct display_clock *dce112_disp_clk_create(
+struct dccg *dce112_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask);
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask);
+
+struct dccg *dce120_dccg_create(struct dc_context *ctx);
-struct display_clock *dce120_disp_clk_create(struct dc_context *ctx);
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+struct dccg *dcn1_dccg_create(struct dc_context *ctx);
+#endif
-void dce_disp_clk_destroy(struct display_clock **disp_clk);
+void dce_dccg_destroy(struct dccg **dccg);
#endif /* _DCE_CLOCKS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index a576b8bbb3cd..dea40b322191 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -150,7 +150,7 @@ static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
}
}
-static void dce_dmcu_setup_psr(struct dmcu *dmcu,
+static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -261,6 +261,8 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
}
static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
@@ -545,24 +547,25 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
* least a few frames. Should never hit the max retry assert below.
*/
if (wait == true) {
- for (retryCount = 0; retryCount <= 1000; retryCount++) {
- dcn10_get_dmcu_psr_state(dmcu, &psr_state);
- if (enable) {
- if (psr_state != 0)
- break;
- } else {
- if (psr_state == 0)
- break;
+ for (retryCount = 0; retryCount <= 1000; retryCount++) {
+ dcn10_get_dmcu_psr_state(dmcu, &psr_state);
+ if (enable) {
+ if (psr_state != 0)
+ break;
+ } else {
+ if (psr_state == 0)
+ break;
+ }
+ udelay(500);
}
- udelay(500);
- }
- /* assert if max retry hit */
- ASSERT(retryCount <= 1000);
+ /* assert if max retry hit */
+ if (retryCount >= 1000)
+ ASSERT(0);
}
}
-static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
+static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -577,7 +580,7 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
/* If microcontroller is not running, do nothing */
if (dmcu->dmcu_state != DMCU_RUNNING)
- return;
+ return false;
link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
psr_context->psrExitLinkTrainingRequired);
@@ -677,6 +680,11 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ return true;
}
static void dcn10_psr_wait_loop(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 057b8afd74bc..64dc75378541 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -147,6 +147,7 @@
SR(DCCG_GATE_DISABLE_CNTL2), \
SR(DCFCLK_CNTL),\
SR(DCFCLK_CNTL), \
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
/* todo: get these from GVM instead of reading registers ourselves */\
MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\
MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\
@@ -249,7 +250,6 @@ struct dce_hwseq_registers {
uint32_t DISPCLK_FREQ_CHANGE_CNTL;
uint32_t RBBMIF_TIMEOUT_DIS;
uint32_t RBBMIF_TIMEOUT_DIS_2;
- uint32_t DENTIST_DISPCLK_CNTL;
uint32_t DCHUBBUB_CRC_CTRL;
uint32_t DPP_TOP0_DPP_CRC_CTRL;
uint32_t DPP_TOP0_DPP_CRC_VAL_R_G;
@@ -276,6 +276,8 @@ struct dce_hwseq_registers {
uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR;
uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR;
+ uint32_t AZALIA_AUDIO_DTO;
+ uint32_t AZALIA_CONTROLLER_CLOCK_GATING;
};
/* set field name */
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -362,7 +364,8 @@ struct dce_hwseq_registers {
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
- HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh)
+ HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh), \
+ HWS_SF(, DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh)
#define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -496,14 +499,13 @@ struct dce_hwseq_registers {
type DOMAIN7_PGFSM_PWR_STATUS; \
type DCFCLK_GATE_DIS; \
type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
- type DENTIST_DPPCLK_WDIVIDER; \
- type DENTIST_DISPCLK_WDIVIDER; \
type VGA_TEST_ENABLE; \
type VGA_TEST_RENDER_START; \
type D1VGA_MODE_ENABLE; \
type D2VGA_MODE_ENABLE; \
type D3VGA_MODE_ENABLE; \
- type D4VGA_MODE_ENABLE;
+ type D4VGA_MODE_ENABLE; \
+ type AZALIA_AUDIO_DTO_MODULE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index dbe3b26b6d9e..eff7d22d78fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -256,6 +256,11 @@ static void setup_panel_mode(
enum dp_panel_mode panel_mode)
{
uint32_t value;
+ struct dc_context *ctx = enc110->base.ctx;
+
+ /* if psp set panel mode, dal should be program it */
+ if (ctx->dc->caps.psp_setup_panel_mode)
+ return;
ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
value = REG_READ(DP_DPHY_INTERNAL_CTRL);
@@ -646,6 +651,9 @@ static bool dce110_link_encoder_validate_hdmi_output(
if (!enc110->base.features.flags.bits.HDMI_6GB_EN &&
adjusted_pix_clk_khz >= 300000)
return false;
+ if (enc110->base.ctx->dc->debug.hdmi20_disable &&
+ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
return true;
}
@@ -773,6 +781,9 @@ void dce110_link_encoder_construct(
__func__,
result);
}
+ if (enc110->base.ctx->dc->debug.hdmi20_disable) {
+ enc110->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
}
bool dce110_link_encoder_validate_output_with_stream(
@@ -919,7 +930,7 @@ void dce110_link_encoder_enable_tmds_output(
enum bp_result result;
/* Enable the PHY */
-
+ cntl.connector_obj_id = enc110->base.connector;
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = enc->preferred_engine;
cntl.transmitter = enc110->base.transmitter;
@@ -961,7 +972,7 @@ void dce110_link_encoder_enable_dp_output(
* We need to set number of lanes manually.
*/
configure_encoder(enc110, link_settings);
-
+ cntl.connector_obj_id = enc110->base.connector;
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = enc->preferred_engine;
cntl.transmitter = enc110->base.transmitter;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index b235a75355b8..85686d917636 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -729,7 +729,7 @@ static bool dce_mi_program_surface_flip_and_addr(
return true;
}
-static struct mem_input_funcs dce_mi_funcs = {
+static const struct mem_input_funcs dce_mi_funcs = {
.mem_input_program_display_marks = dce_mi_program_display_marks,
.allocate_mem_input = dce_mi_allocate_dmif,
.free_mem_input = dce_mi_free_dmif,
@@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = {
.mem_input_is_flip_pending = dce_mi_is_flip_pending
};
+static const struct mem_input_funcs dce112_mi_funcs = {
+ .mem_input_program_display_marks = dce112_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mi_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce_mi_program_surface_config,
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
+
+static const struct mem_input_funcs dce120_mi_funcs = {
+ .mem_input_program_display_marks = dce120_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mi_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce_mi_program_surface_config,
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
void dce_mem_input_construct(
struct dce_mem_input *dce_mi,
@@ -769,7 +792,7 @@ void dce112_mem_input_construct(
const struct dce_mem_input_mask *mi_mask)
{
dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
- dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks;
+ dce_mi->base.funcs = &dce112_mi_funcs;
}
void dce120_mem_input_construct(
@@ -781,5 +804,5 @@ void dce120_mem_input_construct(
const struct dce_mem_input_mask *mi_mask)
{
dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
- dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
+ dce_mi->base.funcs = &dce120_mi_funcs;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index c0e813c7ddd4..91642e684858 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -289,11 +289,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- if (REG(DP_DB_CNTL))
- REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
-#endif
-
/* set pixel encoding */
switch (crtc_timing->pixel_encoding) {
case PIXEL_ENCODING_YCBCR422:
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index a02e719d7794..ab63d0d0304c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -155,7 +155,7 @@ static void program_overscan(
int overscan_bottom = data->v_active
- data->recout.y - data->recout.height;
- if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+ if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
overscan_bottom += 2;
overscan_right += 2;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 41f83ecd7469..74c05e878807 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -125,17 +125,50 @@ static void dce100_pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg;
}
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet
+ */
+static uint32_t get_max_pixel_clock_for_all_paths(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ uint32_t max_pix_clk = 0;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ /* do not check under lay */
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+ max_pix_clk =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+ }
+ return max_pix_clk;
+}
+
void dce100_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
{
- if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dce.dispclk_khz * 115 / 100);
- dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
- }
+ struct dc_clocks req_clks;
+
+ req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+ req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ &req_clks,
+ decrease_allowed);
+
dce100_pplib_apply_display_requirements(dc, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 38ec0d609297..3f76e6019546 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -52,6 +52,7 @@
#include "dce/dce_10_0_sh_mask.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
@@ -135,15 +136,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -279,7 +280,20 @@ static const struct dce_opp_shift opp_shift = {
static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
#define audio_regs(id)\
[id] = {\
@@ -572,6 +586,23 @@ struct output_pixel_processor *dce100_opp_create(
return &opp->base;
}
+struct aux_engine *dce100_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
struct clock_source *dce100_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -624,6 +655,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -644,8 +679,8 @@ static void destruct(struct dce110_resource_pool *pool)
dce_aud_destroy(&pool->base.audios[i]);
}
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
if (pool->base.abm != NULL)
dce_abm_destroy(&pool->base.abm);
@@ -678,9 +713,22 @@ bool dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
{
- /* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+ int i;
+ bool at_least_one_pipe = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ at_least_one_pipe = true;
+ }
+
+ if (at_least_one_pipe) {
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+ } else {
+ context->bw.dce.dispclk_khz = 0;
+ context->bw.dce.yclk_khz = 0;
+ }
return true;
}
@@ -817,11 +865,11 @@ static bool construct(
}
}
- pool->base.display_clock = dce_disp_clk_create(ctx,
+ pool->base.dccg = dce_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -851,7 +899,7 @@ static bool construct(
* max_clock_state
*/
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
struct irq_service_init_data init_data;
@@ -871,7 +919,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
-
+ dc->caps.disable_dp_clk_share = true;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
dce100_timing_generator_create(
@@ -915,6 +963,13 @@ static bool construct(
"DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
+ pool->base.engines[i] = dce100_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index e2994d337044..1f7f25013217 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -143,7 +143,7 @@ static void wait_for_fbc_state_changed(
struct dce110_compressor *cp110,
bool enabled)
{
- uint8_t counter = 0;
+ uint32_t counter = 0;
uint32_t addr = mmFBC_STATUS;
uint32_t value;
@@ -158,7 +158,7 @@ static void wait_for_fbc_state_changed(
counter++;
}
- if (counter == 10) {
+ if (counter == 1000) {
DC_LOG_WARNING("%s: wait counter exceeded, changes to HW not applied",
__func__);
} else {
@@ -551,9 +551,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor,
compressor->base.lpt_channels_num = 0;
compressor->base.attached_inst = 0;
compressor->base.is_enabled = false;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
compressor->base.funcs = &dce110_compressor_funcs;
-#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index c29052b6da5a..14384d9675a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -34,9 +34,7 @@
#include "dce/dce_hwseq.h"
#include "gpio_service_interface.h"
-#if defined(CONFIG_DRM_AMD_DC_FBC)
#include "dce110_compressor.h"
-#endif
#include "bios/bios_parser_helper.h"
#include "timing_generator.h"
@@ -667,16 +665,25 @@ static enum dc_status bios_parser_crtc_source_select(
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
{
+ bool is_hdmi;
+ bool is_dp;
+
ASSERT(pipe_ctx->stream);
if (pipe_ctx->stream_res.stream_enc == NULL)
return; /* this is not root pipe */
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ is_hdmi = dc_is_hdmi_signal(pipe_ctx->stream->signal);
+ is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
+
+ if (!is_hdmi && !is_dp)
+ return;
+
+ if (is_hdmi)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
- else if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ else
pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -857,17 +864,22 @@ void hwss_edp_power_control(
if (power_up) {
unsigned long long current_ts = dm_get_timestamp(ctx);
unsigned long long duration_in_ms =
- dm_get_elapse_time_in_ns(
+ div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- div64_u64(link->link_trace.time_stamp.edp_poweroff, 1000000));
+ link->link_trace.time_stamp.edp_poweroff), 1000000);
unsigned long long wait_time_ms = 0;
/* max 500ms from LCDVDD off to on */
+ unsigned long long edp_poweroff_time_ms = 500;
+
+ if (link->local_sink != NULL)
+ edp_poweroff_time_ms =
+ 500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms;
if (link->link_trace.time_stamp.edp_poweroff == 0)
- wait_time_ms = 500;
- else if (duration_in_ms < 500)
- wait_time_ms = 500 - duration_in_ms;
+ wait_time_ms = edp_poweroff_time_ms;
+ else if (duration_in_ms < edp_poweroff_time_ms)
+ wait_time_ms = edp_poweroff_time_ms - duration_in_ms;
if (wait_time_ms) {
msleep(wait_time_ms);
@@ -972,19 +984,35 @@ void hwss_edp_backlight_control(
edp_receiver_ready_T9(link);
}
-void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->sink->link;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ /* notify audio driver for audio modes of monitor */
+ struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
+ unsigned int i, num_audio = 1;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
- pipe_ctx->stream_res.stream_enc);
+ if (pipe_ctx->stream_res.audio) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ /*current_state not updated yet*/
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
+ num_audio++;
+ }
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
- pipe_ctx->stream_res.stream_enc);
+ pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
+
+ if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
+ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+ /* un-mute audio */
+ /* TODO: audio should be per stream rather than per link */
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, false);
+ }
+}
+
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
@@ -1015,7 +1043,23 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
* stream->stream_engine_id);
*/
}
+}
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ dc->hwss.disable_audio_stream(pipe_ctx, option);
link->link_enc->funcs->connect_dig_be_to_fe(
link->link_enc,
@@ -1212,7 +1256,7 @@ static void program_scaler(const struct dc *dc,
return;
#endif
- if (dc->debug.surface_visual_confirm)
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
get_surface_visual_confirm_color(pipe_ctx, &color);
else
color_space_to_black_color(dc,
@@ -1298,6 +1342,30 @@ static enum dc_status apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
pipe_ctx[pipe_ctx->pipe_idx];
+ if (pipe_ctx->stream_res.audio != NULL) {
+ struct audio_output audio_output;
+
+ build_audio_output(context, pipe_ctx, &audio_output);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info,
+ &audio_output.crtc_info);
+
+ pipe_ctx->stream_res.audio->funcs->az_configure(
+ pipe_ctx->stream_res.audio,
+ pipe_ctx->stream->signal,
+ &audio_output.crtc_info,
+ &pipe_ctx->stream->audio_info);
+ }
+
/* */
dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
@@ -1412,7 +1480,7 @@ static void power_down_controllers(struct dc *dc)
{
int i;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
dc->res_pool->timing_generators[i]->funcs->disable_crtc(
dc->res_pool->timing_generators[i]);
}
@@ -1441,10 +1509,8 @@ static void power_down_all_hw_blocks(struct dc *dc)
power_down_clock_sources(dc);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
if (dc->fbc_compressor)
dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
-#endif
}
static void disable_vga_and_power_gate_all_controllers(
@@ -1454,12 +1520,13 @@ static void disable_vga_and_power_gate_all_controllers(
struct timing_generator *tg;
struct dc_context *ctx = dc->ctx;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
tg = dc->res_pool->timing_generators[i];
if (tg->funcs->disable_vga)
tg->funcs->disable_vga(tg);
-
+ }
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
/* Enable CLOCK gating for each pipe BEFORE controller
* powergating. */
enable_display_pipe_clock_gating(ctx,
@@ -1521,7 +1588,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_eDP_fast_boot_optimize = false;
if (edp_link) {
- can_eDP_fast_boot_optimize =
+ /* this seems to cause blank screens on DCE8 */
+ if ((dc->ctx->dce_version == DCE_VERSION_8_0) ||
+ (dc->ctx->dce_version == DCE_VERSION_8_1) ||
+ (dc->ctx->dce_version == DCE_VERSION_8_3))
+ can_eDP_fast_boot_optimize = false;
+ else
+ can_eDP_fast_boot_optimize =
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc);
}
@@ -1602,7 +1675,7 @@ static void dce110_set_displaymarks(
}
}
-static void set_safe_displaymarks(
+void dce110_set_safe_displaymarks(
struct resource_context *res_ctx,
const struct resource_pool *pool)
{
@@ -1686,9 +1759,7 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
if (events->force_trigger)
value |= 0x1;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
value |= 0x84;
-#endif
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
@@ -1696,23 +1767,15 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
}
/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet.
- * TODO: after mode set, pre_mode_set = false,
- * may read PLL register to get pixel clock
+ * may not be programmed yet
*/
static uint32_t get_max_pixel_clock_for_all_paths(
struct dc *dc,
- struct dc_state *context,
- bool pre_mode_set)
+ struct dc_state *context)
{
uint32_t max_pix_clk = 0;
int i;
- if (!pre_mode_set) {
- /* TODO: read ASIC register to get pixel clock */
- ASSERT(0);
- }
-
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -1728,97 +1791,10 @@ static uint32_t get_max_pixel_clock_for_all_paths(
pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
}
- if (max_pix_clk == 0)
- ASSERT(0);
-
return max_pix_clk;
}
/*
- * Find clock state based on clock requested. if clock value is 0, simply
- * set clock state as requested without finding clock state by clock value
- */
-
-static void apply_min_clocks(
- struct dc *dc,
- struct dc_state *context,
- enum dm_pp_clocks_state *clocks_state,
- bool pre_mode_set)
-{
- struct state_dependent_clocks req_clocks = {0};
-
- if (!pre_mode_set) {
- /* set clock_state without verification */
- if (context->dis_clk->funcs->set_min_clocks_state) {
- context->dis_clk->funcs->set_min_clocks_state(
- context->dis_clk, *clocks_state);
- return;
- }
-
- /* TODO: This is incorrect. Figure out how to fix. */
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAY_CLK,
- context->dis_clk->cur_clocks_value.dispclk_in_khz,
- pre_mode_set,
- false);
-
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_PIXELCLK,
- context->dis_clk->cur_clocks_value.max_pixelclk_in_khz,
- pre_mode_set,
- false);
-
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
- context->dis_clk->cur_clocks_value.max_non_dp_phyclk_in_khz,
- pre_mode_set,
- false);
- return;
- }
-
- /* get the required state based on state dependent clocks:
- * display clock and pixel clock
- */
- req_clocks.display_clk_khz = context->bw.dce.dispclk_khz;
-
- req_clocks.pixel_clk_khz = get_max_pixel_clock_for_all_paths(
- dc, context, true);
-
- if (context->dis_clk->funcs->get_required_clocks_state) {
- *clocks_state = context->dis_clk->funcs->get_required_clocks_state(
- context->dis_clk, &req_clocks);
- context->dis_clk->funcs->set_min_clocks_state(
- context->dis_clk, *clocks_state);
- } else {
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAY_CLK,
- req_clocks.display_clk_khz,
- pre_mode_set,
- false);
-
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_PIXELCLK,
- req_clocks.pixel_clk_khz,
- pre_mode_set,
- false);
-
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
- req_clocks.pixel_clk_khz,
- pre_mode_set,
- false);
- }
-}
-
-#if defined(CONFIG_DRM_AMD_DC_FBC)
-
-/*
* Check if FBC can be enabled
*/
static bool should_enable_fbc(struct dc *dc,
@@ -1896,7 +1872,6 @@ static void enable_fbc(struct dc *dc,
compr->funcs->enable_fbc(compr, &params);
}
}
-#endif
static void dce110_reset_hw_ctx_wrap(
struct dc *dc,
@@ -1939,7 +1914,9 @@ static void dce110_reset_hw_ctx_wrap(
pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
- if (old_clk)
+ if (old_clk && 0 == resource_get_clock_source_reference(&context->res_ctx,
+ dc->res_pool,
+ old_clk))
old_clk->funcs->cs_power_down(old_clk);
dc->hwss.disable_plane(dc, pipe_ctx_old);
@@ -1949,97 +1926,12 @@ static void dce110_reset_hw_ctx_wrap(
}
}
-
-enum dc_status dce110_apply_ctx_to_hw(
+static void dce110_setup_audio_dto(
struct dc *dc,
struct dc_state *context)
{
- struct dc_bios *dcb = dc->ctx->dc_bios;
- enum dc_status status;
int i;
- enum dm_pp_clocks_state clocks_state = DM_PP_CLOCKS_STATE_INVALID;
-
- /* Reset old context */
- /* look up the targets that have been removed since last commit */
- dc->hwss.reset_hw_ctx_wrap(dc, context);
-
- /* Skip applying if no targets */
- if (context->stream_count <= 0)
- return DC_OK;
-
- /* Apply new context */
- dcb->funcs->set_scratch_critical_state(dcb, true);
- /* below is for real asic only */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx_old =
- &dc->current_state->res_ctx.pipe_ctx[i];
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
- continue;
-
- if (pipe_ctx->stream == pipe_ctx_old->stream) {
- if (pipe_ctx_old->clock_source != pipe_ctx->clock_source)
- dce_crtc_switch_to_clk_src(dc->hwseq,
- pipe_ctx->clock_source, i);
- continue;
- }
-
- dc->hwss.enable_display_power_gating(
- dc, i, dc->ctx->dc_bios,
- PIPE_GATING_CONTROL_DISABLE);
- }
-
- set_safe_displaymarks(&context->res_ctx, dc->res_pool);
-
-#if defined(CONFIG_DRM_AMD_DC_FBC)
- if (dc->fbc_compressor)
- dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
-#endif
- /*TODO: when pplib works*/
- apply_min_clocks(dc, context, &clocks_state, true);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
- if (context->bw.dcn.calc_clk.fclk_khz
- > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
- struct dm_pp_clock_for_voltage_req clock;
-
- clock.clk_type = DM_PP_CLOCK_TYPE_FCLK;
- clock.clocks_in_khz = context->bw.dcn.calc_clk.fclk_khz;
- dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
- dc->current_state->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
- context->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
- }
- if (context->bw.dcn.calc_clk.dcfclk_khz
- > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
- struct dm_pp_clock_for_voltage_req clock;
-
- clock.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
- clock.clocks_in_khz = context->bw.dcn.calc_clk.dcfclk_khz;
- dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
- dc->current_state->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
- context->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
- }
- if (context->bw.dcn.calc_clk.dispclk_khz
- > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dcn.calc_clk.dispclk_khz);
- dc->current_state->bw.dcn.cur_clk.dispclk_khz =
- context->bw.dcn.calc_clk.dispclk_khz;
- context->bw.dcn.cur_clk.dispclk_khz =
- context->bw.dcn.calc_clk.dispclk_khz;
- }
- } else
-#endif
- if (context->bw.dce.dispclk_khz
- > dc->current_state->bw.dce.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dce.dispclk_khz * 115 / 100);
- }
/* program audio wall clock. use HDMI as clock source if HDMI
* audio active. Otherwise, use DP as clock source
* first, loop to find any HDMI audio, if not, loop find DP audio
@@ -2113,6 +2005,52 @@ enum dc_status dce110_apply_ctx_to_hw(
}
}
}
+}
+
+enum dc_status dce110_apply_ctx_to_hw(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status status;
+ int i;
+
+ /* Reset old context */
+ /* look up the targets that have been removed since last commit */
+ dc->hwss.reset_hw_ctx_wrap(dc, context);
+
+ /* Skip applying if no targets */
+ if (context->stream_count <= 0)
+ return DC_OK;
+
+ /* Apply new context */
+ dcb->funcs->set_scratch_critical_state(dcb, true);
+
+ /* below is for real asic only */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream) {
+ if (pipe_ctx_old->clock_source != pipe_ctx->clock_source)
+ dce_crtc_switch_to_clk_src(dc->hwseq,
+ pipe_ctx->clock_source, i);
+ continue;
+ }
+
+ dc->hwss.enable_display_power_gating(
+ dc, i, dc->ctx->dc_bios,
+ PIPE_GATING_CONTROL_DISABLE);
+ }
+
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
+ dce110_setup_audio_dto(dc, context);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx_old =
@@ -2131,31 +2069,6 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx->top_pipe)
continue;
- if (context->res_ctx.pipe_ctx[i].stream_res.audio != NULL) {
-
- struct audio_output audio_output;
-
- build_audio_output(context, pipe_ctx, &audio_output);
-
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.audio->inst,
- &pipe_ctx->stream->audio_info);
- else
- pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.audio->inst,
- &pipe_ctx->stream->audio_info,
- &audio_output.crtc_info);
-
- pipe_ctx->stream_res.audio->funcs->az_configure(
- pipe_ctx->stream_res.audio,
- pipe_ctx->stream->signal,
- &audio_output.crtc_info,
- &pipe_ctx->stream->audio_info);
- }
-
status = apply_single_controller_ctx_to_hw(
pipe_ctx,
context,
@@ -2165,17 +2078,11 @@ enum dc_status dce110_apply_ctx_to_hw(
return status;
}
- /* to save power */
- apply_min_clocks(dc, context, &clocks_state, false);
-
dcb->funcs->set_scratch_critical_state(dcb, false);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
if (dc->fbc_compressor)
enable_fbc(dc, context);
-#endif
-
return DC_OK;
}
@@ -2490,10 +2397,9 @@ static void init_hw(struct dc *dc)
abm->funcs->init_backlight(abm);
abm->funcs->abm_init(abm);
}
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
if (dc->fbc_compressor)
dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor);
-#endif
}
@@ -2632,7 +2538,7 @@ static void pplib_apply_display_requirements(
/* TODO: dce11.2*/
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
- pp_display_cfg->disp_clk_khz = context->bw.dce.dispclk_khz;
+ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
@@ -2654,20 +2560,25 @@ static void pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg;
}
-static void dce110_set_bandwidth(
+void dce110_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
{
- dce110_set_displaymarks(dc, context);
+ struct dc_clocks req_clks;
- if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dce.dispclk_khz * 115 / 100);
- dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
- }
+ req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+
+ if (decrease_allowed)
+ dce110_set_displaymarks(dc, context);
+ else
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ &req_clks,
+ decrease_allowed);
pplib_apply_display_requirements(dc, context);
}
@@ -2679,9 +2590,7 @@ static void dce110_program_front_end_for_pipe(
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
-#endif
unsigned int i;
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2722,7 +2631,6 @@ static void dce110_program_front_end_for_pipe(
program_scaler(dc, pipe_ctx);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
/* fbc not applicable on Underlay pipe */
if (dc->fbc_compressor && old_pipe->stream &&
pipe_ctx->pipe_idx != underlay_idx) {
@@ -2731,7 +2639,6 @@ static void dce110_program_front_end_for_pipe(
else
enable_fbc(dc, dc->current_state);
}
-#endif
mi->funcs->mem_input_program_surface_config(
mi,
@@ -2907,9 +2814,11 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
- .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
- .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
- .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ .viewport = pipe_ctx->plane_res.scl_data.viewport,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+ .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+ .rotation = pipe_ctx->plane_state->rotation,
+ .mirror = pipe_ctx->plane_state->horizontal_mirror
};
if (pipe_ctx->plane_state->address.type
@@ -2968,6 +2877,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.disable_stream = dce110_disable_stream,
.unblank_stream = dce110_unblank_stream,
.blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
.enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
.enable_display_power_gating = dce110_enable_display_power_gating,
.disable_plane = dce110_power_down_fe,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 5d7e9f516827..e4c5db75c4c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -49,6 +49,10 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
+
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx);
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option);
+
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
@@ -56,10 +60,19 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
void dce110_power_down(struct dc *dc);
+void dce110_set_safe_displaymarks(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
+
void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg);
+void dce110_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed);
+
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 0564c8e31252..9b9fc3d96c07 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -1011,7 +1011,7 @@ void dce110_free_mem_input_v(
{
}
-static struct mem_input_funcs dce110_mem_input_v_funcs = {
+static const struct mem_input_funcs dce110_mem_input_v_funcs = {
.mem_input_program_display_marks =
dce_mem_input_v_program_display_marks,
.mem_input_program_chroma_display_marks =
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index ee33786bdef6..e5e9e92521e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -49,14 +49,14 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
+#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
#define DC_LOGGER \
dc->ctx->logger
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
#include "dce110/dce110_compressor.h"
-#endif
#include "reg_helper.h"
@@ -147,15 +147,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -307,6 +307,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -589,6 +604,23 @@ static struct output_pixel_processor *dce110_opp_create(
return &opp->base;
}
+struct aux_engine *dce110_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
struct clock_source *dce110_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -652,6 +684,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -680,8 +716,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -795,43 +831,38 @@ static bool dce110_validate_bandwidth(
if (memcmp(&dc->current_state->bw.dce,
&context->bw.dce, sizeof(context->bw.dce))) {
- struct log_entry log_entry;
- dm_logger_open(
- dc->ctx->logger,
- &log_entry,
- LOG_BANDWIDTH_CALCS);
- dm_logger_append(&log_entry, "%s: finish,\n"
+
+ DC_LOG_BANDWIDTH_CALCS(
+ "%s: finish,\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n"
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d\n",
+ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
+ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
+ ,
__func__,
context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
context->bw.dce.urgent_wm_ns[0].b_mark,
context->bw.dce.urgent_wm_ns[0].a_mark,
context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark);
- dm_logger_append(&log_entry,
- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d\n",
+ context->bw.dce.stutter_exit_wm_ns[0].a_mark,
context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
context->bw.dce.urgent_wm_ns[1].b_mark,
context->bw.dce.urgent_wm_ns[1].a_mark,
context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark);
- dm_logger_append(&log_entry,
- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+ context->bw.dce.stutter_exit_wm_ns[1].a_mark,
context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
context->bw.dce.urgent_wm_ns[2].b_mark,
context->bw.dce.urgent_wm_ns[2].a_mark,
context->bw.dce.stutter_exit_wm_ns[2].b_mark,
context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable);
- dm_logger_append(&log_entry,
- "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
- "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+ context->bw.dce.stutter_mode_enable,
context->bw.dce.cpuc_state_change_enable,
context->bw.dce.cpup_state_change_enable,
context->bw.dce.nbp_state_change_enable,
@@ -841,7 +872,6 @@ static bool dce110_validate_bandwidth(
context->bw.dce.sclk_deep_sleep_khz,
context->bw.dce.yclk_khz,
context->bw.dce.blackout_recovery_time_us);
- dm_logger_close(&log_entry);
}
return result;
}
@@ -1180,11 +1210,11 @@ static bool construct(
}
}
- pool->base.display_clock = dce110_disp_clk_create(ctx,
+ pool->base.dccg = dce110_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1214,7 +1244,7 @@ static bool construct(
* max_clock_state
*/
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -1265,14 +1295,18 @@ static bool construct(
"DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
+
+ pool->base.engines[i] = dce110_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
-#if defined(CONFIG_DRM_AMD_DC_FBC)
dc->fbc_compressor = dce110_compressor_create(ctx);
-
-
-#endif
if (!underlay_create(ctx, &pool->base))
goto res_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index a7dce060204f..aa8d6b10d2c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -235,7 +235,7 @@ static void program_overscan(
int overscan_right = data->h_active - data->recout.x - data->recout.width;
int overscan_bottom = data->v_active - data->recout.y - data->recout.height;
- if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+ if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
overscan_bottom += 2;
overscan_right += 2;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 00c0a1ef15eb..288129343c77 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -49,6 +49,7 @@
#include "dce112/dce112_hw_sequencer.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "reg_helper.h"
@@ -146,15 +147,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -314,6 +315,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -588,6 +604,23 @@ struct output_pixel_processor *dce112_opp_create(
return &opp->base;
}
+struct aux_engine *dce112_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
struct clock_source *dce112_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -625,6 +658,9 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.opps[i] != NULL)
dce110_opp_destroy(&pool->base.opps[i]);
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
if (pool->base.transforms[i] != NULL)
dce112_transform_destroy(&pool->base.transforms[i]);
@@ -640,6 +676,7 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -668,8 +705,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -744,43 +781,38 @@ bool dce112_validate_bandwidth(
if (memcmp(&dc->current_state->bw.dce,
&context->bw.dce, sizeof(context->bw.dce))) {
- struct log_entry log_entry;
- dm_logger_open(
- dc->ctx->logger,
- &log_entry,
- LOG_BANDWIDTH_CALCS);
- dm_logger_append(&log_entry, "%s: finish,\n"
+
+ DC_LOG_BANDWIDTH_CALCS(
+ "%s: finish,\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n"
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d\n",
+ "stutMark_b: %d stutMark_a: %d\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
+ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
+ ,
__func__,
context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
context->bw.dce.urgent_wm_ns[0].b_mark,
context->bw.dce.urgent_wm_ns[0].a_mark,
context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark);
- dm_logger_append(&log_entry,
- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d\n",
+ context->bw.dce.stutter_exit_wm_ns[0].a_mark,
context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
context->bw.dce.urgent_wm_ns[1].b_mark,
context->bw.dce.urgent_wm_ns[1].a_mark,
context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark);
- dm_logger_append(&log_entry,
- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+ context->bw.dce.stutter_exit_wm_ns[1].a_mark,
context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
context->bw.dce.urgent_wm_ns[2].b_mark,
context->bw.dce.urgent_wm_ns[2].a_mark,
context->bw.dce.stutter_exit_wm_ns[2].b_mark,
context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable);
- dm_logger_append(&log_entry,
- "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
- "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+ context->bw.dce.stutter_mode_enable,
context->bw.dce.cpuc_state_change_enable,
context->bw.dce.cpup_state_change_enable,
context->bw.dce.nbp_state_change_enable,
@@ -790,7 +822,6 @@ bool dce112_validate_bandwidth(
context->bw.dce.sclk_deep_sleep_khz,
context->bw.dce.yclk_khz,
context->bw.dce.blackout_recovery_time_us);
- dm_logger_close(&log_entry);
}
return result;
}
@@ -1000,7 +1031,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
- clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz =
mem_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -1010,7 +1041,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
/* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
- clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz =
mem_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -1020,7 +1051,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
- clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
/* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
@@ -1030,7 +1061,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
/* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
- clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
/* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
@@ -1124,11 +1155,11 @@ static bool construct(
}
}
- pool->base.display_clock = dce112_disp_clk_create(ctx,
+ pool->base.dccg = dce112_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1158,7 +1189,7 @@ static bool construct(
* max_clock_state
*/
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -1214,6 +1245,13 @@ static bool construct(
"DC:failed to create output pixel processor!\n");
goto res_create_fail;
}
+ pool->base.engines[i] = dce112_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index e96ff86d2fc3..5853522a6182 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -244,7 +244,16 @@ static void dce120_update_dchub(
dh_data->dchub_info_valid = false;
}
+static void dce120_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ if (context->stream_count <= 0)
+ return;
+ dce110_set_bandwidth(dc, context, decrease_allowed);
+}
void dce120_hw_sequencer_construct(struct dc *dc)
{
@@ -254,5 +263,6 @@ void dce120_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc);
dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub;
+ dc->hwss.set_bandwidth = dce120_set_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 2d58daccc005..d43f37d99c7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -53,6 +53,7 @@
#include "dce/dce_hwseq.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
@@ -297,6 +298,20 @@ static const struct dce_opp_shift opp_shift = {
static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK)
};
+ #define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
#define audio_regs(id)\
[id] = {\
@@ -361,6 +376,22 @@ struct output_pixel_processor *dce120_opp_create(
ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
+struct aux_engine *dce120_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
static const struct bios_registers bios_regs = {
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
@@ -373,7 +404,7 @@ static const struct resource_caps res_cap = {
.num_pll = 6,
};
-static const struct dc_debug debug_defaults = {
+static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
};
@@ -467,6 +498,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
}
for (i = 0; i < pool->base.audio_count; i++) {
@@ -494,8 +529,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
}
static void read_dce_straps(
@@ -775,7 +810,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
- clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz =
mem_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -785,7 +820,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
/* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
- clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz =
mem_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -795,7 +830,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
- clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
/* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
@@ -805,7 +840,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
/* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
- clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
/* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
@@ -848,6 +883,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
+ dc->caps.psp_setup_panel_mode = true;
dc->debug = debug_defaults;
@@ -894,11 +930,11 @@ static bool construct(
}
}
- pool->base.display_clock = dce120_disp_clk_create(ctx);
- if (pool->base.display_clock == NULL) {
+ pool->base.dccg = dce120_dccg_create(ctx);
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
- goto disp_clk_create_fail;
+ goto dccg_create_fail;
}
pool->base.dmcu = dce_dmcu_create(ctx,
@@ -984,6 +1020,13 @@ static bool construct(
dm_error(
"DC: failed to create output pixel processor!\n");
}
+ pool->base.engines[i] = dce120_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
/* check next valid pipe */
j++;
@@ -1011,7 +1054,7 @@ static bool construct(
irqs_create_fail:
controller_create_fail:
-disp_clk_create_fail:
+dccg_create_fail:
clk_src_create_fail:
res_create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 2ea490f8482e..04b866f0fa1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -772,7 +772,7 @@ void dce120_tg_set_blank(struct timing_generator *tg,
CRTC_REG_SET(
CRTC0_CRTC_DOUBLE_BUFFER_CONTROL,
- CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+ CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 1);
if (enable_blanking)
CRTC_REG_SET(CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 48a068964722..604c62969ead 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -54,6 +54,7 @@
#include "reg_helper.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
/* TODO remove this include */
@@ -153,15 +154,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -298,6 +299,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -448,6 +464,23 @@ static struct output_pixel_processor *dce80_opp_create(
return &opp->base;
}
+struct aux_engine *dce80_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
static struct stream_encoder *dce80_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
@@ -655,6 +688,9 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -683,8 +719,8 @@ static void destruct(struct dce110_resource_pool *pool)
}
}
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -822,11 +858,11 @@ static bool dce80_construct(
}
}
- pool->base.display_clock = dce_disp_clk_create(ctx,
+ pool->base.dccg = dce_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -852,7 +888,7 @@ static bool dce80_construct(
goto res_create_fail;
}
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -899,9 +935,18 @@ static bool dce80_construct(
dm_error("DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
+
+ pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
dc->caps.max_planes = pool->base.pipe_count;
+ dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
@@ -1006,11 +1051,11 @@ static bool dce81_construct(
}
}
- pool->base.display_clock = dce_disp_clk_create(ctx,
+ pool->base.dccg = dce_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1037,7 +1082,7 @@ static bool dce81_construct(
}
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -1087,6 +1132,7 @@ static bool dce81_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+ dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
@@ -1187,11 +1233,11 @@ static bool dce83_construct(
}
}
- pool->base.display_clock = dce_disp_clk_create(ctx,
+ pool->base.dccg = dce_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1218,7 +1264,7 @@ static bool dce83_construct(
}
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -1268,6 +1314,7 @@ static bool dce83_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+ dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index c69fa4bfab0a..bf8b68f8db4f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -145,10 +145,10 @@ static bool dpp_get_optimal_number_of_taps(
pixel_width = scl_data->viewport.width;
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
- if (scl_data->viewport.width != scl_data->h_active &&
- scl_data->viewport.height != scl_data->v_active &&
+ if (scl_data->format == PIXEL_FORMAT_FP16 &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
- scl_data->format == PIXEL_FORMAT_FP16)
+ scl_data->ratios.horz.value != dc_fixpt_one.value &&
+ scl_data->ratios.vert.value != dc_fixpt_one.value)
return false;
if (scl_data->viewport.width > scl_data->h_active &&
@@ -445,10 +445,10 @@ void dpp1_set_cursor_position(
uint32_t width)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
uint32_t cur_en = pos->enable ? 1 : 0;
- if (src_x_offset >= (int)param->viewport_width)
+ if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
if (src_x_offset + (int)width <= 0)
@@ -459,6 +459,18 @@ void dpp1_set_cursor_position(
}
+void dpp1_cnv_set_optional_cursor_attributes(
+ struct dpp *dpp_base,
+ struct dpp_cursor_attributes *attr)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (attr) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ }
+}
+
void dpp1_dppclk_control(
struct dpp *dpp_base,
bool dppclk_div,
@@ -499,6 +511,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_full_bypass = dpp1_full_bypass,
.set_cursor_attributes = dpp1_set_cursor_attributes,
.set_cursor_position = dpp1_set_cursor_position,
+ .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index e862cafa6501..e2889e61b18c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -119,6 +119,7 @@
SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
+ SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \
SRI(DPP_CONTROL, DPP_TOP, id), \
SRI(CM_HDR_MULT_COEF, CM, id)
@@ -324,6 +325,8 @@
TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \
TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh)
@@ -1076,7 +1079,9 @@
type CUR0_COLOR1; \
type DPPCLK_RATE_CONTROL; \
type DPP_CLOCK_ENABLE; \
- type CM_HDR_MULT_COEF;
+ type CM_HDR_MULT_COEF; \
+ type CUR0_FP_BIAS; \
+ type CUR0_FP_SCALE;
struct dcn_dpp_shift {
TF_REG_FIELD_LIST(uint8_t)
@@ -1329,7 +1334,8 @@ struct dcn_dpp_mask {
uint32_t CURSOR0_COLOR0; \
uint32_t CURSOR0_COLOR1; \
uint32_t DPP_CONTROL; \
- uint32_t CM_HDR_MULT_COEF;
+ uint32_t CM_HDR_MULT_COEF; \
+ uint32_t CURSOR0_FP_SCALE_BIAS;
struct dcn_dpp_registers {
DPP_COMMON_REG_VARIABLE_LIST
@@ -1370,6 +1376,10 @@ void dpp1_set_cursor_position(
const struct dc_cursor_mi_param *param,
uint32_t width);
+void dpp1_cnv_set_optional_cursor_attributes(
+ struct dpp *dpp_base,
+ struct dpp_cursor_attributes *attr);
+
bool dpp1_dscl_is_lb_conf_valid(
int ceil_vratio,
int num_partitions,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index f862fd148cca..4a863a5dab41 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -621,6 +621,10 @@ static void dpp1_dscl_set_manual_ratio_init(
static void dpp1_dscl_set_recout(
struct dcn10_dpp *dpp, const struct rect *recout)
{
+ int visual_confirm_on = 0;
+ if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
+ visual_confirm_on = 1;
+
REG_SET_2(RECOUT_START, 0,
/* First pixel of RECOUT */
RECOUT_START_X, recout->x,
@@ -632,8 +636,7 @@ static void dpp1_dscl_set_recout(
RECOUT_WIDTH, recout->width,
/* Number of RECOUT vertical lines */
RECOUT_HEIGHT, recout->height
- - dpp->base.ctx->dc->debug.surface_visual_confirm * 4 *
- (dpp->base.inst + 1));
+ - visual_confirm_on * 4 * (dpp->base.inst + 1));
}
/* Main function to program scaler and line buffer in manual scaling mode */
@@ -655,6 +658,12 @@ void dpp1_dscl_set_scaler_manual_scale(
dpp->scl_data = *scl_data;
+ /* Autocal off */
+ REG_SET_3(DSCL_AUTOCAL, 0,
+ AUTOCAL_MODE, AUTOCAL_MODE_OFF,
+ AUTOCAL_NUM_PIPE, 0,
+ AUTOCAL_PIPE_ID, 0);
+
/* Recout */
dpp1_dscl_set_recout(dpp, &scl_data->recout);
@@ -678,12 +687,6 @@ void dpp1_dscl_set_scaler_manual_scale(
if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
return;
- /* Autocal off */
- REG_SET_3(DSCL_AUTOCAL, 0,
- AUTOCAL_MODE, AUTOCAL_MODE_OFF,
- AUTOCAL_NUM_PIPE, 0,
- AUTOCAL_PIPE_ID, 0);
-
/* Black offsets */
if (ycbcr)
REG_SET_2(SCL_BLACK_OFFSET, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 943143efbb82..1ea91e153d3a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -190,10 +190,17 @@ static uint32_t convert_and_clamp(
}
+void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
+{
+ REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1);
+}
+
void hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
- unsigned int refclk_mhz)
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
{
uint32_t force_en = hubbub->ctx->dc->debug.disable_stutter ? 1 : 0;
/*
@@ -202,191 +209,259 @@ void hubbub1_program_watermarks(
*/
uint32_t prog_wm_value;
- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0);
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
- prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
-
- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.urgent_ns, prog_wm_value);
+ if (safe_to_lower || watermarks->a.urgent_ns > hubbub->watermarks.a.urgent_ns) {
+ hubbub->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
- prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.pte_meta_urgent_ns, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.urgent_ns, prog_wm_value);
+ }
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
- prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub->watermarks.a.pte_meta_urgent_ns) {
+ hubbub->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ watermarks->a.pte_meta_urgent_ns, prog_wm_value);
+ }
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
+ > hubbub->watermarks.a.cstate_pstate.cstate_exit_ns) {
+ hubbub->watermarks.a.cstate_pstate.cstate_exit_ns =
+ watermarks->a.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+ }
+ if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
+ > hubbub->watermarks.a.cstate_pstate.pstate_change_ns) {
+ hubbub->watermarks.a.cstate_pstate.pstate_change_ns =
+ watermarks->a.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.cstate_exit_ns,
+ watermarks->a.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
}
- prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n\n",
- watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
-
-
/* clock state B */
- prog_wm_value = convert_and_clamp(
- watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.urgent_ns, prog_wm_value);
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->b.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.pte_meta_urgent_ns, prog_wm_value);
+ if (safe_to_lower || watermarks->b.urgent_ns > hubbub->watermarks.b.urgent_ns) {
+ hubbub->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.urgent_ns, prog_wm_value);
+ }
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
- prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub->watermarks.b.pte_meta_urgent_ns) {
+ hubbub->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_B calculated =%d\n"
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ watermarks->b.pte_meta_urgent_ns, prog_wm_value);
+ }
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
+ > hubbub->watermarks.b.cstate_pstate.cstate_exit_ns) {
+ hubbub->watermarks.b.cstate_pstate.cstate_exit_ns =
+ watermarks->b.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+ }
+ if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
+ > hubbub->watermarks.b.cstate_pstate.pstate_change_ns) {
+ hubbub->watermarks.b.cstate_pstate.pstate_change_ns =
+ watermarks->b.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.cstate_exit_ns,
+ watermarks->b.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
}
- prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
- "HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
-
/* clock state C */
- prog_wm_value = convert_and_clamp(
- watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.urgent_ns, prog_wm_value);
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->c.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.pte_meta_urgent_ns, prog_wm_value);
+ if (safe_to_lower || watermarks->c.urgent_ns > hubbub->watermarks.c.urgent_ns) {
+ hubbub->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.urgent_ns, prog_wm_value);
+ }
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
- prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub->watermarks.c.pte_meta_urgent_ns) {
+ hubbub->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_C calculated =%d\n"
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ watermarks->c.pte_meta_urgent_ns, prog_wm_value);
+ }
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
+ > hubbub->watermarks.c.cstate_pstate.cstate_exit_ns) {
+ hubbub->watermarks.c.cstate_pstate.cstate_exit_ns =
+ watermarks->c.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+ }
+ if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
+ > hubbub->watermarks.c.cstate_pstate.pstate_change_ns) {
+ hubbub->watermarks.c.cstate_pstate.pstate_change_ns =
+ watermarks->c.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.cstate_exit_ns,
+ watermarks->c.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
}
- prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
- "HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
-
/* clock state D */
- prog_wm_value = convert_and_clamp(
- watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.urgent_ns, prog_wm_value);
-
- prog_wm_value = convert_and_clamp(
- watermarks->d.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.pte_meta_urgent_ns, prog_wm_value);
-
-
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ if (safe_to_lower || watermarks->d.urgent_ns > hubbub->watermarks.d.urgent_ns) {
+ hubbub->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.urgent_ns, prog_wm_value);
+ }
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.cstate_exit_ns,
+ if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub->watermarks.d.pte_meta_urgent_ns) {
+ hubbub->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ watermarks->d.pte_meta_urgent_ns, prog_wm_value);
}
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n\n",
- watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
+ > hubbub->watermarks.d.cstate_pstate.cstate_exit_ns) {
+ hubbub->watermarks.d.cstate_pstate.cstate_exit_ns =
+ watermarks->d.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+ }
- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+ if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
+ > hubbub->watermarks.d.cstate_pstate.pstate_change_ns) {
+ hubbub->watermarks.d.cstate_pstate.pstate_change_ns =
+ watermarks->d.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@ -408,6 +483,11 @@ void hubbub1_update_dchub(
struct hubbub *hubbub,
struct dchub_init_data *dh_data)
{
+ if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
+ ASSERT(false);
+ /*should not come here*/
+ return;
+ }
/* TODO: port code from dal2 */
switch (dh_data->fb_mode) {
case FRAME_BUFFER_MODE_ZFB_ONLY:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 6315a0e6b0d6..d6e596eef4c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -185,6 +185,7 @@ struct hubbub {
const struct dcn_hubbub_shift *shifts;
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
+ struct dcn_watermark_set watermarks;
};
void hubbub1_update_dchub(
@@ -194,10 +195,13 @@ void hubbub1_update_dchub(
bool hubbub1_verify_allow_pstate_change_high(
struct hubbub *hubbub);
+void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
+
void hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
- unsigned int refclk_mhz);
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
void hubbub1_toggle_watermark_change_req(
struct hubbub *hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index c28085be39ff..2138cd3c5d1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -152,21 +152,19 @@ void hubp1_program_tiling(
PIPE_ALIGNED, info->gfx9.pipe_aligned);
}
-void hubp1_program_size_and_rotation(
+void hubp1_program_size(
struct hubp *hubp,
- enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
- struct dc_plane_dcc_param *dcc,
- bool horizontal_mirror)
+ struct dc_plane_dcc_param *dcc)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror;
+ uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c;
/* Program data and meta surface pitch (calculation from addrlib)
* 444 or 420 luma
*/
- if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END) {
ASSERT(plane_size->video.chroma_pitch != 0);
/* Chroma pitch zero can cause system hang! */
@@ -192,13 +190,22 @@ void hubp1_program_size_and_rotation(
if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
+}
+
+void hubp1_program_rotation(
+ struct hubp *hubp,
+ enum dc_rotation_angle rotation,
+ bool horizontal_mirror)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ uint32_t mirror;
+
if (horizontal_mirror)
mirror = 1;
else
mirror = 0;
-
/* Program rotation angle and horz mirror - no mirror */
if (rotation == ROTATION_ANGLE_0)
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
@@ -287,6 +294,10 @@ void hubp1_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 66);
break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 12);
+ break;
default:
BREAK_TO_DEBUGGER();
break;
@@ -450,9 +461,6 @@ bool hubp1_program_surface_flip_and_addr(
hubp->request_address = *address;
- if (flip_immediate)
- hubp->current_address = *address;
-
return true;
}
@@ -481,8 +489,8 @@ void hubp1_program_surface_config(
{
hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
hubp1_program_tiling(hubp, tiling_info, format);
- hubp1_program_size_and_rotation(
- hubp, rotation, format, plane_size, dcc, horizontal_mirror);
+ hubp1_program_size(hubp, format, plane_size, dcc);
+ hubp1_program_rotation(hubp, rotation, horizontal_mirror);
hubp1_program_pixel_format(hubp, format);
}
@@ -688,7 +696,6 @@ bool hubp1_is_flip_pending(struct hubp *hubp)
if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
return true;
- hubp->current_address = hubp->request_address;
return false;
}
@@ -1061,9 +1068,11 @@ void hubp1_cursor_set_position(
const struct dc_cursor_mi_param *param)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
+ int x_hotspot = pos->x_hotspot;
+ int y_hotspot = pos->y_hotspot;
+ uint32_t dst_x_offset;
uint32_t cur_en = pos->enable ? 1 : 0;
- uint32_t dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
/*
* Guard aganst cursor_set_position() from being called with invalid
@@ -1075,6 +1084,18 @@ void hubp1_cursor_set_position(
if (hubp->curs_attr.address.quad_part == 0)
return;
+ if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
+ src_x_offset = pos->y - pos->y_hotspot - param->viewport.x;
+ y_hotspot = pos->x_hotspot;
+ x_hotspot = pos->y_hotspot;
+ }
+
+ if (param->mirror) {
+ x_hotspot = param->viewport.width - x_hotspot;
+ src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
+ }
+
+ dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
dst_x_offset *= param->ref_clk_khz;
dst_x_offset /= param->pixel_clk_khz;
@@ -1085,7 +1106,7 @@ void hubp1_cursor_set_position(
dc_fixpt_from_int(dst_x_offset),
param->h_scale_ratio));
- if (src_x_offset >= (int)param->viewport_width)
+ if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
if (src_x_offset + (int)hubp->curs_attr.width <= 0)
@@ -1102,8 +1123,8 @@ void hubp1_cursor_set_position(
CURSOR_Y_POSITION, pos->y);
REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ CURSOR_HOT_SPOT_X, x_hotspot,
+ CURSOR_HOT_SPOT_Y, y_hotspot);
REG_SET(CURSOR_DST_OFFSET, 0,
CURSOR_DST_X_OFFSET, dst_x_offset);
@@ -1125,7 +1146,7 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
}
-static struct hubp_funcs dcn10_hubp_funcs = {
+static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_program_surface_flip_and_addr =
hubp1_program_surface_flip_and_addr,
.hubp_program_surface_config =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index d901d5092969..f689feace82d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -268,8 +268,6 @@
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
- HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
- HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
@@ -388,6 +386,8 @@
#define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
HUBP_MASK_SH_LIST_DCN(mask_sh),\
HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
@@ -679,12 +679,15 @@ void hubp1_program_pixel_format(
struct hubp *hubp,
enum surface_pixel_format format);
-void hubp1_program_size_and_rotation(
+void hubp1_program_size(
struct hubp *hubp,
- enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
- struct dc_plane_dcc_param *dcc,
+ struct dc_plane_dcc_param *dcc);
+
+void hubp1_program_rotation(
+ struct hubp *hubp,
+ enum dc_rotation_angle rotation,
bool horizontal_mirror);
void hubp1_program_tiling(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index f8e0576af6e0..cfcc54f2ce65 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -337,13 +337,13 @@ void dcn10_log_hw_state(struct dc *dc)
DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
"dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
- dc->current_state->bw.dcn.calc_clk.dcfclk_khz,
- dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.calc_clk.dispclk_khz,
- dc->current_state->bw.dcn.calc_clk.dppclk_khz,
- dc->current_state->bw.dcn.calc_clk.max_supported_dppclk_khz,
- dc->current_state->bw.dcn.calc_clk.fclk_khz,
- dc->current_state->bw.dcn.calc_clk.socclk_khz);
+ dc->current_state->bw.dcn.clk.dcfclk_khz,
+ dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw.dcn.clk.dispclk_khz,
+ dc->current_state->bw.dcn.clk.dppclk_khz,
+ dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
+ dc->current_state->bw.dcn.clk.fclk_khz,
+ dc->current_state->bw.dcn.clk.socclk_khz);
log_mpc_crc(dc);
@@ -415,6 +415,8 @@ static void dpp_pg_control(
if (hws->ctx->dc->debug.disable_dpp_power_gate)
return;
+ if (REG(DOMAIN1_PG_CONFIG) == 0)
+ return;
switch (dpp_inst) {
case 0: /* DPP0 */
@@ -465,6 +467,8 @@ static void hubp_pg_control(
if (hws->ctx->dc->debug.disable_hubp_power_gate)
return;
+ if (REG(DOMAIN0_PG_CONFIG) == 0)
+ return;
switch (hubp_inst) {
case 0: /* DCHUBP0 */
@@ -719,19 +723,7 @@ static void reset_back_end_for_pipe(
if (!pipe_ctx->stream->dpms_off)
core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
else if (pipe_ctx->stream_res.audio) {
- /*
- * if stream is already disabled outside of commit streams path,
- * audio disable was skipped. Need to do it here
- */
- pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
-
- if (dc->caps.dynamic_audio == true) {
- /*we have to dynamic arbitrate the audio endpoints*/
- pipe_ctx->stream_res.audio = NULL;
- /*we free the resource, need reset is_audio_acquired*/
- update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
- }
-
+ dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
}
}
@@ -842,7 +834,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
}
-static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
+void dcn10_verify_allow_pstate_change_high(struct dc *dc)
{
static bool should_log_hw_state; /* prevent hw state log by default */
@@ -877,7 +869,8 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
return;
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
- opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+ if (opp != NULL)
+ opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
dc->optimized_required = true;
@@ -1022,7 +1015,7 @@ static void dcn10_init_hw(struct dc *dc)
/* Reset all MPCC muxes */
dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = dc->res_pool->hubps[i];
@@ -1096,6 +1089,8 @@ static void dcn10_init_hw(struct dc *dc)
}
enable_power_gating_plane(dc->hwseq, true);
+
+ memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
}
static void reset_hw_ctx_wrap(
@@ -1164,12 +1159,19 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c
if (plane_state == NULL)
return;
+
addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
+
pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
pipe_ctx->plane_res.hubp,
&plane_state->address,
plane_state->flip_immediate);
+
plane_state->status.requested_address = plane_state->address;
+
+ if (plane_state->flip_immediate)
+ plane_state->status.current_address = plane_state->address;
+
if (addr_patched)
pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
}
@@ -1213,8 +1215,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
} else if (tf->type == TF_TYPE_BYPASS) {
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
} else {
- /*TF_TYPE_DISTRIBUTED_POINTS*/
- result = false;
+ cm_helper_translate_curve_to_degamma_hw_format(tf,
+ &dpp_base->degamma_params);
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
+ &dpp_base->degamma_params);
+ result = true;
}
return result;
@@ -1355,10 +1360,11 @@ static void dcn10_enable_per_frame_crtc_position_reset(
DC_SYNC_INFO("Setting up\n");
for (i = 0; i < group_size; i++)
- grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
- grouped_pipes[i]->stream_res.tg,
- grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
- &grouped_pipes[i]->stream->triggered_crtc_reset);
+ if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
+ grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
+ grouped_pipes[i]->stream_res.tg,
+ grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
+ &grouped_pipes[i]->stream->triggered_crtc_reset);
DC_SYNC_INFO("Waiting for trigger\n");
@@ -1774,6 +1780,43 @@ static void dcn10_get_surface_visual_confirm_color(
}
}
+static void dcn10_get_hdr_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ // Determine the overscan color based on the top-most (desktop) plane's context
+ struct pipe_ctx *top_pipe_ctx = pipe_ctx;
+
+ while (top_pipe_ctx->top_pipe != NULL)
+ top_pipe_ctx = top_pipe_ctx->top_pipe;
+
+ switch (top_pipe_ctx->plane_res.scl_data.format) {
+ case PIXEL_FORMAT_ARGB2101010:
+ if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_UNITY) {
+ /* HDR10, ARGB2101010 - set boarder color to red */
+ color->color_r_cr = color_value;
+ }
+ break;
+ case PIXEL_FORMAT_FP16:
+ if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
+ /* HDR10, FP16 - set boarder color to blue */
+ color->color_b_cb = color_value;
+ } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ /* FreeSync 2 HDR - set boarder color to green */
+ color->color_g_y = color_value;
+ }
+ break;
+ default:
+ /* SDR - set boarder color to Gray */
+ color->color_r_cr = color_value/2;
+ color->color_b_cb = color_value/2;
+ color->color_g_y = color_value/2;
+ break;
+ }
+}
+
static uint16_t fixed_point_to_int_frac(
struct fixed31_32 arg,
uint8_t integer_bits,
@@ -1854,11 +1897,10 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
-
-static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct mpcc_blnd_cfg blnd_cfg;
+ struct mpcc_blnd_cfg blnd_cfg = {0};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
int mpcc_id;
struct mpcc *new_mpcc;
@@ -1869,13 +1911,17 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
/* TODO: proper fix once fpga works */
- if (dc->debug.surface_visual_confirm)
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
+ dcn10_get_hdr_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
+ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
dcn10_get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
- else
+ } else {
color_space_to_black_color(
- dc, pipe_ctx->stream->output_color_space,
- &blnd_cfg.black_color);
+ dc, pipe_ctx->stream->output_color_space,
+ &blnd_cfg.black_color);
+ }
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
@@ -1964,18 +2010,17 @@ static void update_dchubp_dpp(
* divided by 2
*/
if (plane_state->update_flags.bits.full_update) {
- bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <=
- context->bw.dcn.cur_clk.dispclk_khz / 2;
+ bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
+ dc->res_pool->dccg->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
dpp,
should_divided_by_2,
true);
- dc->current_state->bw.dcn.cur_clk.dppclk_khz =
- should_divided_by_2 ?
- context->bw.dcn.cur_clk.dispclk_khz / 2 :
- context->bw.dcn.cur_clk.dispclk_khz;
+ dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
+ dc->res_pool->dccg->clks.dispclk_khz / 2 :
+ dc->res_pool->dccg->clks.dispclk_khz;
}
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -2001,7 +2046,7 @@ static void update_dchubp_dpp(
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.per_pixel_alpha_change)
- update_mpcc(dc, pipe_ctx);
+ dc->hwss.update_mpcc(dc, pipe_ctx);
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
@@ -2063,12 +2108,13 @@ static void update_dchubp_dpp(
static void dcn10_blank_pixel_data(
struct dc *dc,
- struct stream_resource *stream_res,
- struct dc_stream_state *stream,
+ struct pipe_ctx *pipe_ctx,
bool blank)
{
enum dc_color_space color_space;
struct tg_color black_color = {0};
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+ struct dc_stream_state *stream = pipe_ctx->stream;
/* program otg blank color */
color_space = stream->output_color_space;
@@ -2110,6 +2156,33 @@ static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, hw_mult);
}
+void dcn10_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ if (pipe_ctx->plane_state->update_flags.bits.full_update)
+ dcn10_enable_plane(dc, pipe_ctx, context);
+
+ update_dchubp_dpp(dc, pipe_ctx, context);
+
+ set_hdr_multiplier(pipe_ctx);
+
+ if (pipe_ctx->plane_state->update_flags.bits.full_update ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for full update.
+ * TODO: This can be further optimized/cleaned up
+ * Always call this for now since it does memcmp inside before
+ * doing heavy calculation and programming
+ */
+ if (pipe_ctx->plane_state->update_flags.bits.full_update)
+ dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+}
+
static void program_all_pipe_in_tree(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -2127,31 +2200,12 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg);
- dc->hwss.blank_pixel_data(dc, &pipe_ctx->stream_res,
- pipe_ctx->stream, blank);
+ dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+
}
if (pipe_ctx->plane_state != NULL) {
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dcn10_enable_plane(dc, pipe_ctx, context);
-
- update_dchubp_dpp(dc, pipe_ctx, context);
-
- set_hdr_multiplier(pipe_ctx);
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
-
- /* dcn10_translate_regamma_to_hw_format takes 750us to finish
- * only do gamma programming for full update.
- * TODO: This can be further optimized/cleaned up
- * Always call this for now since it does memcmp inside before
- * doing heavy calculation and programming
- */
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+ dcn10_program_pipe(dc, pipe_ctx, context);
}
if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) {
@@ -2165,12 +2219,12 @@ static void dcn10_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
- pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
- pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
- pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
+ pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+ pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
+ pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
@@ -2232,8 +2286,6 @@ static void dcn10_apply_ctx_for_surface(
int i;
struct timing_generator *tg;
bool removed_pipe[4] = { false };
- unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
- bool program_water_mark = false;
struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -2247,7 +2299,7 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes == 0) {
/* OTG blank before remove all front end */
- dc->hwss.blank_pixel_data(dc, &top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
+ dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
}
/* Disconnect unused mpcc */
@@ -2278,11 +2330,10 @@ static void dcn10_apply_ctx_for_surface(
old_pipe_ctx->plane_state &&
old_pipe_ctx->stream_res.tg == tg) {
- hwss1_plane_atomic_disconnect(dc, old_pipe_ctx);
+ dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
removed_pipe[i] = true;
- DC_LOG_DC(
- "Reset mpcc for pipe %d\n",
+ DC_LOG_DC("Reset mpcc for pipe %d\n",
old_pipe_ctx->pipe_idx);
}
}
@@ -2295,248 +2346,41 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == stream &&
- pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.full_update)
- program_water_mark = true;
-
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
if (removed_pipe[i])
- dcn10_disable_plane(dc, old_pipe_ctx);
- }
-
- if (program_water_mark) {
- if (dc->debug.sanity_checks) {
- /* pstate stuck check after watermark update */
- dcn10_verify_allow_pstate_change_high(dc);
- }
-
- /* watermark is for all pipes */
- hubbub1_program_watermarks(dc->res_pool->hubbub,
- &context->bw.dcn.watermarks, ref_clk_mhz);
+ dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
- if (dc->debug.sanity_checks) {
- /* pstate stuck check after watermark update */
- dcn10_verify_allow_pstate_change_high(dc);
- }
- }
-/* DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
- "\n============== Watermark parameters ==============\n"
- "a.urgent_ns: %d \n"
- "a.cstate_enter_plus_exit: %d \n"
- "a.cstate_exit: %d \n"
- "a.pstate_change: %d \n"
- "a.pte_meta_urgent: %d \n"
- "b.urgent_ns: %d \n"
- "b.cstate_enter_plus_exit: %d \n"
- "b.cstate_exit: %d \n"
- "b.pstate_change: %d \n"
- "b.pte_meta_urgent: %d \n",
- context->bw.dcn.watermarks.a.urgent_ns,
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns,
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns,
- context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns,
- context->bw.dcn.watermarks.a.pte_meta_urgent_ns,
- context->bw.dcn.watermarks.b.urgent_ns,
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns,
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns,
- context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
- context->bw.dcn.watermarks.b.pte_meta_urgent_ns
- );
- DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
- "\nc.urgent_ns: %d \n"
- "c.cstate_enter_plus_exit: %d \n"
- "c.cstate_exit: %d \n"
- "c.pstate_change: %d \n"
- "c.pte_meta_urgent: %d \n"
- "d.urgent_ns: %d \n"
- "d.cstate_enter_plus_exit: %d \n"
- "d.cstate_exit: %d \n"
- "d.pstate_change: %d \n"
- "d.pte_meta_urgent: %d \n"
- "========================================================\n",
- context->bw.dcn.watermarks.c.urgent_ns,
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns,
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns,
- context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns,
- context->bw.dcn.watermarks.c.pte_meta_urgent_ns,
- context->bw.dcn.watermarks.d.urgent_ns,
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns,
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns,
- context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns,
- context->bw.dcn.watermarks.d.pte_meta_urgent_ns
- );
-*/
-}
-
-static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
-{
- return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
-}
-
-static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
-{
- bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
- context->bw.dcn.calc_clk.dppclk_khz;
- bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
- context->bw.dcn.cur_clk.dispclk_khz;
- int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
- bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
- context->bw.dcn.cur_clk.dppclk_khz;
-
- /* increase clock, looking for div is 0 for current, request div is 1*/
- if (dispclk_increase) {
- /* already divided by 2, no need to reach target clk with 2 steps*/
- if (cur_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- /* request disp clk is lower than maximum supported dpp clk,
- * no need to reach target clk with two steps.
- */
- if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- /* target dpp clk not request divided by 2, still within threshold */
- if (!request_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- } else {
- /* decrease clock, looking for current dppclk divided by 2,
- * request dppclk not divided by 2.
- */
-
- /* current dpp clk not divided by 2, no need to ramp*/
- if (!cur_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- /* current disp clk is lower than current maximum dpp clk,
- * no need to ramp
- */
- if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- /* request dpp clk need to be divided by 2 */
- if (request_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
- }
-
- return disp_clk_threshold;
-}
-
-static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
-{
- int i;
- bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
- context->bw.dcn.calc_clk.dppclk_khz;
-
- int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
-
- /* set disp clk to dpp clk threshold */
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- dispclk_to_dpp_threshold);
-
- /* update request dpp clk division option */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx->plane_state)
- continue;
-
- pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
- pipe_ctx->plane_res.dpp,
- request_dpp_div,
- true);
- }
-
- /* If target clk not same as dppclk threshold, set to target clock */
- if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dcn.calc_clk.dispclk_khz);
- }
-
- context->bw.dcn.cur_clk.dispclk_khz =
- context->bw.dcn.calc_clk.dispclk_khz;
- context->bw.dcn.cur_clk.dppclk_khz =
- context->bw.dcn.calc_clk.dppclk_khz;
- context->bw.dcn.cur_clk.max_supported_dppclk_khz =
- context->bw.dcn.calc_clk.max_supported_dppclk_khz;
+ if (dc->hwseq->wa.DEGVIDCN10_254)
+ hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
}
static void dcn10_set_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool decrease_allowed)
+ bool safe_to_lower)
{
- struct pp_smu_display_requirement_rv *smu_req_cur =
- &dc->res_pool->pp_smu_req;
- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-
- if (dc->debug.sanity_checks) {
+ if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
- }
-
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- return;
-
- if (should_set_clock(
- decrease_allowed,
- context->bw.dcn.calc_clk.dcfclk_khz,
- dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
- context->bw.dcn.cur_clk.dcfclk_khz =
- context->bw.dcn.calc_clk.dcfclk_khz;
- smu_req.hard_min_dcefclk_khz =
- context->bw.dcn.calc_clk.dcfclk_khz;
- }
-
- if (should_set_clock(
- decrease_allowed,
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
- context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
- }
-
- if (should_set_clock(
- decrease_allowed,
- context->bw.dcn.calc_clk.fclk_khz,
- dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
- context->bw.dcn.cur_clk.fclk_khz =
- context->bw.dcn.calc_clk.fclk_khz;
- smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
- }
-
- smu_req.display_count = context->stream_count;
-
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- *smu_req_cur = smu_req;
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (context->stream_count == 0)
+ context->bw.dcn.clk.phyclk_khz = 0;
- /* make sure dcf clk is before dpp clk to
- * make sure we have enough voltage to run dpp clk
- */
- if (should_set_clock(
- decrease_allowed,
- context->bw.dcn.calc_clk.dispclk_khz,
- dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ &context->bw.dcn.clk,
+ safe_to_lower);
- ramp_up_dispclk_with_dpp(dc, context);
+ dcn10_pplib_apply_display_requirements(dc, context);
}
- dcn10_pplib_apply_display_requirements(dc, context);
+ hubbub1_program_watermarks(dc->res_pool->hubbub,
+ &context->bw.dcn.watermarks,
+ dc->res_pool->ref_clock_inKhz / 1000,
+ true);
- if (dc->debug.sanity_checks) {
+ if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
- }
-
- /* need to fix this function. not doing the right thing here */
}
static void set_drr(struct pipe_ctx **pipe_ctx,
@@ -2701,16 +2545,20 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
{
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ bool flip_pending;
if (plane_state == NULL)
return;
- plane_state->status.is_flip_pending =
- pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
+ flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
pipe_ctx->plane_res.hubp);
- plane_state->status.current_address = pipe_ctx->plane_res.hubp->current_address;
- if (pipe_ctx->plane_res.hubp->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ plane_state->status.is_flip_pending = flip_pending;
+
+ if (!flip_pending)
+ plane_state->status.current_address = plane_state->status.requested_address;
+
+ if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
tg->funcs->is_stereo_left_eye) {
plane_state->status.is_right_eye =
!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
@@ -2719,8 +2567,14 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
{
- if (hws->ctx->dc->res_pool->hubbub != NULL)
- hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
+ if (hws->ctx->dc->res_pool->hubbub != NULL) {
+ struct hubp *hubp = hws->ctx->dc->res_pool->hubps[0];
+
+ if (hubp->funcs->hubp_update_dchub)
+ hubp->funcs->hubp_update_dchub(hubp, dh_data);
+ else
+ hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
+ }
}
static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
@@ -2731,9 +2585,11 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
- .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
- .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
- .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ .viewport = pipe_ctx->plane_res.scl_data.viewport,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+ .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+ .rotation = pipe_ctx->plane_state->rotation,
+ .mirror = pipe_ctx->plane_state->horizontal_mirror
};
if (pipe_ctx->plane_state->address.type
@@ -2757,6 +2613,33 @@ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, attributes->color_format);
}
+static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
+ struct fixed31_32 multiplier;
+ struct dpp_cursor_attributes opt_attr = { 0 };
+ uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
+ struct custom_float_format fmt;
+
+ if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
+ return;
+
+ fmt.exponenta_bits = 5;
+ fmt.mantissa_bits = 10;
+ fmt.sign = true;
+
+ if (sdr_white_level > 80) {
+ multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
+ convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
+ }
+
+ opt_attr.scale = hw_scale;
+ opt_attr.bias = 0;
+
+ pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
+ pipe_ctx->plane_res.dpp, &opt_attr);
+}
+
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_csc_matrix = program_csc_matrix,
@@ -2764,7 +2647,9 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
.update_plane_addr = dcn10_update_plane_addr,
+ .plane_atomic_disconnect = hwss1_plane_atomic_disconnect,
.update_dchub = dcn10_update_dchub,
+ .update_mpcc = dcn10_update_mpcc,
.update_pending_status = dcn10_update_pending_status,
.set_input_transfer_func = dcn10_set_input_transfer_func,
.set_output_transfer_func = dcn10_set_output_transfer_func,
@@ -2778,6 +2663,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.disable_stream = dce110_disable_stream,
.unblank_stream = dce110_unblank_stream,
.blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.disable_plane = dcn10_disable_plane,
.blank_pixel_data = dcn10_blank_pixel_data,
@@ -2800,7 +2687,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.edp_power_control = hwss_edp_power_control,
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
- .set_cursor_attribute = dcn10_set_cursor_attribute
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 44f734b73f9e..7139fb73e966 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -39,4 +39,11 @@ bool is_rgb_cspace(enum dc_color_space output_color_space);
void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_verify_allow_pstate_change_high(struct dc *dc);
+
+void dcn10_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 21fa40ac0786..6f675206a136 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -65,11 +65,6 @@ enum {
DP_MST_UPDATE_MAX_RETRY = 50
};
-
-
-static void aux_initialize(struct dcn10_link_encoder *enc10);
-
-
static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
.validate_output_with_stream =
dcn10_link_encoder_validate_output_with_stream,
@@ -445,12 +440,11 @@ static uint8_t get_frontend_source(
}
}
-static void configure_encoder(
+void configure_encoder(
struct dcn10_link_encoder *enc10,
const struct dc_link_settings *link_settings)
{
/* set number of lanes */
-
REG_SET(DP_CONFIG, 0,
DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
@@ -602,6 +596,9 @@ static bool dcn10_link_encoder_validate_hdmi_output(
if (!enc10->base.features.flags.bits.HDMI_6GB_EN &&
adjusted_pix_clk_khz >= 300000)
return false;
+ if (enc10->base.ctx->dc->debug.hdmi20_disable &&
+ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
return true;
}
@@ -734,6 +731,9 @@ void dcn10_link_encoder_construct(
__func__,
result);
}
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
}
bool dcn10_link_encoder_validate_output_with_stream(
@@ -812,7 +812,7 @@ void dcn10_link_encoder_hw_init(
ASSERT(result == BP_RESULT_OK);
}
- aux_initialize(enc10);
+ dcn10_aux_initialize(enc10);
/* reinitialize HPD.
* hpd_initialize() will pass DIG_FE id to HW context.
@@ -995,6 +995,8 @@ void dcn10_link_encoder_disable_output(
if (!dcn10_is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
+ /*in DP_Alt_No_Connect case, we turn off the dig already,
+ after excuation the PHY w/a sequence, not allow touch PHY any more*/
return;
}
/* Power-down RX and disable GPU PHY should be paired.
@@ -1347,8 +1349,7 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
FN(reg, f1), v1,\
FN(reg, f2), v2)
-static void aux_initialize(
- struct dcn10_link_encoder *enc10)
+void dcn10_aux_initialize(struct dcn10_link_encoder *enc10)
{
enum hpd_source_id hpd_source = enc10->base.hpd_source;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 2a97cdb2cfbb..49ead12b2532 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -42,6 +42,7 @@
#define LE_DCN_COMMON_REG_LIST(id) \
SRI(DIG_BE_CNTL, DIG, id), \
SRI(DIG_BE_EN_CNTL, DIG, id), \
+ SRI(TMDS_CTL_BITS, DIG, id), \
SRI(DP_CONFIG, DP, id), \
SRI(DP_DPHY_CNTL, DP, id), \
SRI(DP_DPHY_PRBS_CNTL, DP, id), \
@@ -64,6 +65,7 @@
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
+
#define LE_DCN10_REG_LIST(id)\
LE_DCN_COMMON_REG_LIST(id)
@@ -100,6 +102,7 @@ struct dcn10_link_enc_registers {
uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
uint32_t DP_SEC_CNTL1;
+ uint32_t TMDS_CTL_BITS;
};
#define LE_SF(reg_name, field_name, post_fix)\
@@ -110,6 +113,7 @@ struct dcn10_link_enc_registers {
LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
+ LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE1, mask_sh),\
@@ -198,10 +202,11 @@ struct dcn10_link_enc_registers {
type DP_MSE_SAT_SLOT_COUNT3;\
type DP_MSE_SAT_UPDATE;\
type DP_MSE_16_MTP_KEEPOUT;\
+ type DC_HPD_EN;\
+ type TMDS_CTL0;\
type AUX_HPD_SEL;\
type AUX_LS_READ_EN;\
- type AUX_RX_RECEIVE_WINDOW;\
- type DC_HPD_EN
+ type AUX_RX_RECEIVE_WINDOW
struct dcn10_link_enc_shift {
DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
@@ -266,6 +271,10 @@ void dcn10_link_encoder_setup(
struct link_encoder *enc,
enum signal_type signal);
+void configure_encoder(
+ struct dcn10_link_encoder *enc10,
+ const struct dc_link_settings *link_settings);
+
/* enables TMDS PHY output */
/* TODO: still need depth or just pass in adjusted pixel clock? */
void dcn10_link_encoder_enable_tmds_output(
@@ -327,4 +336,6 @@ void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
bool dcn10_is_dig_enabled(struct link_encoder *enc);
+void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
+
#endif /* __DC_LINK_ENCODER__DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 9ca51ae46de7..958994edf2c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -428,7 +428,7 @@ void mpc1_read_mpcc_state(
MPCC_BUSY, &s->busy);
}
-const struct mpc_funcs dcn10_mpc_funcs = {
+static const struct mpc_funcs dcn10_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 77a1a9d541a4..ab958cff3b76 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -385,7 +385,7 @@ void opp1_destroy(struct output_pixel_processor **opp)
*opp = NULL;
}
-static struct opp_funcs dcn10_opp_funcs = {
+static const struct opp_funcs dcn10_opp_funcs = {
.opp_set_dyn_expansion = opp1_set_dyn_expansion,
.opp_program_fmt = opp1_program_fmt,
.opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index f2fbce0e3fc5..411f89218e01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -1257,6 +1257,37 @@ void optc1_read_otg_state(struct optc *optc1,
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
}
+bool optc1_get_otg_active_size(struct timing_generator *optc,
+ uint32_t *otg_active_width,
+ uint32_t *otg_active_height)
+{
+ uint32_t otg_enabled;
+ uint32_t v_blank_start;
+ uint32_t v_blank_end;
+ uint32_t h_blank_start;
+ uint32_t h_blank_end;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+
+ REG_GET(OTG_CONTROL,
+ OTG_MASTER_EN, &otg_enabled);
+
+ if (otg_enabled == 0)
+ return false;
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &v_blank_start,
+ OTG_V_BLANK_END, &v_blank_end);
+
+ REG_GET_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, &h_blank_start,
+ OTG_H_BLANK_END, &h_blank_end);
+
+ *otg_active_width = v_blank_start - v_blank_end;
+ *otg_active_height = h_blank_start - h_blank_end;
+ return true;
+}
+
void optc1_clear_optc_underflow(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -1293,6 +1324,72 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
return (underflow_occurred == 1);
}
+bool optc1_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ /* Cannot configure crc on a CRTC that is disabled */
+ if (!optc1_is_tg_enabled(optc))
+ return false;
+
+ REG_WRITE(OTG_CRC_CNTL, 0);
+
+ if (!params->enable)
+ return true;
+
+ /* Program frame boundaries */
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
+ OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
+ OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
+ OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
+ OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable. Only using CRC0*/
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+
+ return true;
+}
+
+bool optc1_get_crc(struct timing_generator *optc,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
+{
+ uint32_t field = 0;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OTG_CRC_CNTL, OTG_CRC_EN, &field);
+
+ /* Early return if CRC is not enabled for this CRTC */
+ if (!field)
+ return false;
+
+ REG_GET_2(OTG_CRC0_DATA_RG,
+ CRC0_R_CR, r_cr,
+ CRC0_G_Y, g_y);
+
+ REG_GET(OTG_CRC0_DATA_B,
+ CRC0_B_CB, b_cb);
+
+ return true;
+}
+
static const struct timing_generator_funcs dcn10_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -1305,6 +1402,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.get_position = optc1_get_position,
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .get_otg_active_size = optc1_get_otg_active_size,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
@@ -1328,6 +1426,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.is_tg_enabled = optc1_is_tg_enabled,
.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
.clear_optc_underflow = optc1_clear_optc_underflow,
+ .get_crc = optc1_get_crc,
+ .configure_crc = optc1_configure_crc,
};
void dcn10_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index c62052f46460..c1b114209fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -75,7 +75,14 @@
SRI(CONTROL, VTG, inst),\
SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\
- SRI(OTG_GSL_CONTROL, OTG, inst)
+ SRI(OTG_GSL_CONTROL, OTG, inst),\
+ SRI(OTG_CRC_CNTL, OTG, inst),\
+ SRI(OTG_CRC0_DATA_RG, OTG, inst),\
+ SRI(OTG_CRC0_DATA_B, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst)
#define TG_COMMON_REG_LIST_DCN1_0(inst) \
TG_COMMON_REG_LIST_DCN(inst),\
@@ -138,6 +145,13 @@ struct dcn_optc_registers {
uint32_t OTG_GSL_WINDOW_X;
uint32_t OTG_GSL_WINDOW_Y;
uint32_t OTG_VUPDATE_KEEPOUT;
+ uint32_t OTG_CRC_CNTL;
+ uint32_t OTG_CRC0_DATA_RG;
+ uint32_t OTG_CRC0_DATA_B;
+ uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
+ uint32_t OTG_CRC0_WINDOWA_Y_CONTROL;
+ uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
+ uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -232,7 +246,21 @@ struct dcn_optc_registers {
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\
- SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh)
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh)
#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
@@ -363,7 +391,22 @@ struct dcn_optc_registers {
type OTG_MASTER_UPDATE_LOCK_GSL_EN;\
type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\
type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\
- type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;
+ type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;\
+ type OTG_CRC_CONT_EN;\
+ type OTG_CRC0_SELECT;\
+ type OTG_CRC_EN;\
+ type CRC0_R_CR;\
+ type CRC0_G_Y;\
+ type CRC0_B_CB;\
+ type OTG_CRC0_WINDOWA_X_START;\
+ type OTG_CRC0_WINDOWA_X_END;\
+ type OTG_CRC0_WINDOWA_Y_START;\
+ type OTG_CRC0_WINDOWA_Y_END;\
+ type OTG_CRC0_WINDOWB_X_START;\
+ type OTG_CRC0_WINDOWB_X_END;\
+ type OTG_CRC0_WINDOWB_Y_START;\
+ type OTG_CRC0_WINDOWB_Y_END;
+
#define TG_REG_FIELD_LIST(type) \
TG_REG_FIELD_LIST_DCN1_0(type)
@@ -507,4 +550,19 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
+bool optc1_get_otg_active_size(struct timing_generator *optc,
+ uint32_t *otg_active_width,
+ uint32_t *otg_active_height);
+
+void optc1_enable_crtc_reset(
+ struct timing_generator *optc,
+ int source_tg_inst,
+ struct crtc_trigger_info *crtc_tp);
+
+bool optc1_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params);
+
+bool optc1_get_crc(struct timing_generator *optc,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
+
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index df5cb2d1d164..6b44ed3697a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -64,6 +64,69 @@
#include "reg_helper.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
+
+const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = 164,
+ .dpte_buffer_size_in_pte_reqs = 42,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_enable = 1,
+ .pte_chunk_size_kbytes = 2,
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 2,
+ .line_buffer_size_bits = 589824,
+ .max_line_buffer_lines = 12,
+ .IsLineBufferBppFixed = 0,
+ .LineBufferFixedBpp = -1,
+ .writeback_luma_buffer_size_kbytes = 12,
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .max_num_dpp = 4,
+ .max_num_wb = 2,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 4,
+ .max_vscl_ratio = 4,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.10,
+ .min_vblank_lines = 14,
+ .dppclk_delay_subtotal = 90,
+ .dispclk_delay_subtotal = 42,
+ .dcfclk_cstate_latency = 10,
+ .max_inter_dcn_tile_repeaters = 8,
+ .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
+ .bug_forcing_LC_req_same_size_fixed = 0,
+};
+
+const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .urgent_latency_us = 4.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 80.0,
+ .max_request_size_bytes = 256,
+ .downspread_percent = 0.5,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 128,
+ .urgent_out_of_order_return_per_channel_bytes = 256,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 2,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 17.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+};
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
@@ -294,6 +357,21 @@ static const struct dcn10_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN10(_MASK),
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN10(id),\
@@ -417,13 +495,14 @@ static const struct dce110_clk_src_mask cs_mask = {
static const struct resource_caps res_cap = {
.num_timing_generator = 4,
+ .num_opp = 4,
.num_video_plane = 4,
.num_audio = 4,
.num_stream_encoder = 4,
.num_pll = 4,
};
-static const struct dc_debug debug_defaults_drv = {
+static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
.disable_dmcu = true,
.force_abm_enable = false,
@@ -436,7 +515,7 @@ static const struct dc_debug debug_defaults_drv = {
*/
.min_disp_clk_khz = 100000,
- .disable_pplib_clock_request = true,
+ .disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
@@ -451,7 +530,7 @@ static const struct dc_debug debug_defaults_drv = {
.max_downscale_src_width = 3840,
};
-static const struct dc_debug debug_defaults_diags = {
+static const struct dc_debug_options debug_defaults_diags = {
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = true,
@@ -515,6 +594,23 @@ static struct output_pixel_processor *dcn10_opp_create(
return &opp->base;
}
+struct aux_engine *dcn10_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
{
struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
@@ -680,6 +776,7 @@ static struct dce_hwseq *dcn10_hwseq_create(
hws->masks = &hwseq_mask;
hws->wa.DEGVIDCN10_253 = true;
hws->wa.false_optc_underflow = true;
+ hws->wa.DEGVIDCN10_254 = true;
}
return hws;
}
@@ -762,6 +859,9 @@ static void destruct(struct dcn10_resource_pool *pool)
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++)
@@ -790,8 +890,8 @@ static void destruct(struct dcn10_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
kfree(pool->base.pp_smu);
}
@@ -971,11 +1071,11 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
return DC_OK;
}
-static struct dc_cap_funcs cap_funcs = {
+static const struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
};
-static struct resource_funcs dcn10_res_pool_funcs = {
+static const struct resource_funcs dcn10_res_pool_funcs = {
.destroy = dcn10_destroy_resource_pool,
.link_enc_create = dcn10_link_encoder_create,
.validate_bandwidth = dcn_validate_bandwidth,
@@ -1027,6 +1127,8 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = false;
+ /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
+ dc->caps.force_dp_tps4_for_cp2520 = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1070,8 +1172,8 @@ static bool construct(
}
}
- pool->base.display_clock = dce120_disp_clk_create(ctx);
- if (pool->base.display_clock == NULL) {
+ pool->base.dccg = dcn1_dccg_create(ctx);
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto fail;
@@ -1191,6 +1293,14 @@ static bool construct(
goto fail;
}
+ pool->base.engines[i] = dcn10_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto fail;
+ }
+
/* check next valid pipe */
j++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index c928ee4cd382..6f9078f3c4d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -257,20 +257,18 @@ void enc1_stream_encoder_dp_set_stream_attribute(
uint8_t colorimetry_bpc;
uint8_t dynamic_range_rgb = 0; /*full range*/
uint8_t dynamic_range_ycbcr = 1; /*bt709*/
+ uint8_t dp_pixel_encoding = 0;
+ uint8_t dp_component_depth = 0;
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
-
/* set pixel encoding */
switch (crtc_timing->pixel_encoding) {
case PIXEL_ENCODING_YCBCR422:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_YCBCR422);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR422;
break;
case PIXEL_ENCODING_YCBCR444:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_YCBCR444);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR444;
if (crtc_timing->flags.Y_ONLY)
if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
@@ -278,8 +276,8 @@ void enc1_stream_encoder_dp_set_stream_attribute(
* Color depth of Y-only could be
* 8, 10, 12, 16 bits
*/
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_Y_ONLY);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_Y_ONLY;
+
/* Note: DP_MSA_MISC1 bit 7 is the indicator
* of Y-only mode.
* This bit is set in HW if register
@@ -287,48 +285,55 @@ void enc1_stream_encoder_dp_set_stream_attribute(
*/
break;
case PIXEL_ENCODING_YCBCR420:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_YCBCR420);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR420;
REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
break;
default:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_RGB444);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_RGB444;
break;
}
misc1 = REG_READ(DP_MSA_MISC);
+ /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used.
+ * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the
+ * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
+ * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
+ */
+ if ((crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
+ (output_color_space == COLOR_SPACE_2020_YCBCR) ||
+ (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) ||
+ (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
+ misc1 = misc1 | 0x40;
+ else
+ misc1 = misc1 & ~0x40;
/* set color depth */
-
switch (crtc_timing->display_color_depth) {
case COLOR_DEPTH_666:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- 0);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
break;
case COLOR_DEPTH_888:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_8BPC);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_8BPC;
break;
case COLOR_DEPTH_101010:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_10BPC);
-
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_10BPC;
break;
case COLOR_DEPTH_121212:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_12BPC);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_12BPC;
break;
case COLOR_DEPTH_161616:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_16BPC);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_16BPC;
break;
default:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_6BPC);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
break;
}
+ /* Set DP pixel encoding and component depth */
+ REG_UPDATE_2(DP_PIXEL_FORMAT,
+ DP_PIXEL_ENCODING, dp_pixel_encoding,
+ DP_COMPONENT_DEPTH, dp_component_depth);
+
/* set dynamic range and YCbCr range */
switch (crtc_timing->display_color_depth) {
@@ -354,7 +359,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
switch (output_color_space) {
case COLOR_SPACE_SRGB:
- misc0 = misc0 | 0x0;
misc1 = misc1 & ~0x80; /* bit7 = 0*/
dynamic_range_rgb = 0; /*full range*/
break;
@@ -1087,27 +1091,6 @@ static union audio_cea_channels speakers_to_channels(
return cea_channels;
}
-static uint32_t calc_max_audio_packets_per_line(
- const struct audio_crtc_info *crtc_info)
-{
- uint32_t max_packets_per_line;
-
- max_packets_per_line =
- crtc_info->h_total - crtc_info->h_active;
-
- if (crtc_info->pixel_repetition)
- max_packets_per_line *= crtc_info->pixel_repetition;
-
- /* for other hdmi features */
- max_packets_per_line -= 58;
- /* for Control Period */
- max_packets_per_line -= 16;
- /* Number of Audio Packets per Line */
- max_packets_per_line /= 32;
-
- return max_packets_per_line;
-}
-
static void get_audio_clock_info(
enum dc_color_depth color_depth,
uint32_t crtc_pixel_clock_in_khz,
@@ -1201,16 +1184,9 @@ static void enc1_se_setup_hdmi_audio(
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
struct audio_clock_info audio_clock_info = {0};
- uint32_t max_packets_per_line;
-
- /* For now still do calculation, although this field is ignored when
- * above HDMI_PACKET_GEN_VERSION set to 1
- */
- max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
/* HDMI_AUDIO_PACKET_CONTROL */
- REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
- HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
+ REG_UPDATE(HDMI_AUDIO_PACKET_CONTROL,
HDMI_AUDIO_DELAY_EN, 1);
/* AFMT_AUDIO_PACKET_CONTROL */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 034369fbb9e2..5d4527d03045 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -40,6 +40,14 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
const struct dc_edid *edid,
struct dc_edid_caps *edid_caps);
+
+/*
+ * Update DP branch info
+ */
+void dm_helpers_dp_update_branch_info(
+ struct dc_context *ctx,
+ const struct dc_link *link);
+
/*
* Writes payload allocation table in immediate downstream device.
*/
@@ -103,6 +111,9 @@ bool dm_helpers_submit_i2c(
const struct dc_link *link,
struct i2c_command *cmd);
+bool dm_helpers_is_dp_sink_present(
+ struct dc_link *link);
+
enum dc_edid_status dm_helpers_read_local_edid(
struct dc_context *ctx,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index eac4bfe12257..58ed2055ef9f 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -40,7 +40,7 @@ enum wm_set_id {
WM_B,
WM_C,
WM_D,
- WM_COUNT,
+ WM_SET_COUNT,
};
struct pp_smu_wm_set_range {
@@ -53,10 +53,10 @@ struct pp_smu_wm_set_range {
struct pp_smu_wm_range_sets {
uint32_t num_reader_wm_sets;
- struct pp_smu_wm_set_range reader_wm_sets[WM_COUNT];
+ struct pp_smu_wm_set_range reader_wm_sets[WM_SET_COUNT];
uint32_t num_writer_wm_sets;
- struct pp_smu_wm_set_range writer_wm_sets[WM_COUNT];
+ struct pp_smu_wm_set_range writer_wm_sets[WM_SET_COUNT];
};
struct pp_smu_display_requirement_rv {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 4ff9b2bba178..eb5ab3978e84 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -339,7 +339,10 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
#define dm_log_to_buffer(buffer, size, fmt, args)\
vsnprintf(buffer, size, fmt, args)
-unsigned long long dm_get_timestamp(struct dc_context *ctx);
+static inline unsigned long long dm_get_timestamp(struct dc_context *ctx)
+{
+ return ktime_get_raw_ns();
+}
unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
unsigned long long current_time_stamp,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index ab8c77d4e6df..2b83f922ac02 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -137,7 +137,7 @@ struct dm_pp_clock_range_for_wm_set {
enum dm_pp_wm_set_id wm_set_id;
uint32_t wm_min_eng_clk_in_khz;
uint32_t wm_max_eng_clk_in_khz;
- uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_min_mem_clk_in_khz;
uint32_t wm_max_mem_clk_in_khz;
};
@@ -150,7 +150,7 @@ struct dm_pp_clock_range_for_dmif_wm_set_soc15 {
enum dm_pp_wm_set_id wm_set_id;
uint32_t wm_min_dcfclk_clk_in_khz;
uint32_t wm_max_dcfclk_clk_in_khz;
- uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_min_mem_clk_in_khz;
uint32_t wm_max_mem_clk_in_khz;
};
@@ -158,7 +158,7 @@ struct dm_pp_clock_range_for_mcif_wm_set_soc15 {
enum dm_pp_wm_set_id wm_set_id;
uint32_t wm_min_socclk_clk_in_khz;
uint32_t wm_max_socclk_clk_in_khz;
- uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_min_mem_clk_in_khz;
uint32_t wm_max_mem_clk_in_khz;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index f83a608f93e9..d97ca6528f9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -36,11 +36,10 @@ CFLAGS_display_mode_lib.o := $(dml_ccflags)
CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)
CFLAGS_display_rq_dlg_helpers.o := $(dml_ccflags)
-CFLAGS_soc_bounding_box.o := $(dml_ccflags)
CFLAGS_dml_common_defs.o := $(dml_ccflags)
DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
- soc_bounding_box.o dml_common_defs.o
+ dml_common_defs.o
AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index fd9d97aab071..dddeb0d4db8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -26,67 +26,8 @@
#include "display_mode_lib.h"
#include "dc_features.h"
-static const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = 164,
- .dpte_buffer_size_in_pte_reqs = 42,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .pte_enable = 1,
- .pte_chunk_size_kbytes = 2,
- .meta_chunk_size_kbytes = 2,
- .writeback_chunk_size_kbytes = 2,
- .line_buffer_size_bits = 589824,
- .max_line_buffer_lines = 12,
- .IsLineBufferBppFixed = 0,
- .LineBufferFixedBpp = -1,
- .writeback_luma_buffer_size_kbytes = 12,
- .writeback_chroma_buffer_size_kbytes = 8,
- .max_num_dpp = 4,
- .max_num_wb = 2,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 4,
- .max_vscl_ratio = 4,
- .hscl_mults = 4,
- .vscl_mults = 4,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .underscan_factor = 1.10,
- .min_vblank_lines = 14,
- .dppclk_delay_subtotal = 90,
- .dispclk_delay_subtotal = 42,
- .dcfclk_cstate_latency = 10,
- .max_inter_dcn_tile_repeaters = 8,
- .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
- .bug_forcing_LC_req_same_size_fixed = 0,
-};
-
-static const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .urgent_latency_us = 4.0,
- .writeback_latency_us = 12.0,
- .ideal_dram_bw_after_urgent_percent = 80.0,
- .max_request_size_bytes = 256,
- .downspread_percent = 0.5,
- .dram_page_open_time_ns = 50.0,
- .dram_rw_turnaround_time_ns = 17.5,
- .dram_return_buffer_per_channel_bytes = 8192,
- .round_trip_ping_latency_dcfclk_cycles = 128,
- .urgent_out_of_order_return_per_channel_bytes = 256,
- .channel_interleave_bytes = 256,
- .num_banks = 8,
- .num_chans = 2,
- .vmm_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 17.0,
- .writeback_dram_clock_change_latency_us = 23.0,
- .return_bus_width_bytes = 64,
-};
+extern const struct _vcs_dpi_ip_params_st dcn1_0_ip;
+extern const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc;
static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 3c2abcb8a1b0..635206248889 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -27,7 +27,6 @@
#include "dml_common_defs.h"
-#include "soc_bounding_box.h"
#include "dml1_display_rq_dlg_calc.h"
enum dml_project {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 7fa0375939ae..cbafce649e33 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -64,10 +64,9 @@ struct _vcs_dpi_voltage_scaling_st {
double dscclk_mhz;
double dcfclk_mhz;
double socclk_mhz;
- double dram_speed_mhz;
+ double dram_speed_mts;
double fabricclk_mhz;
double dispclk_mhz;
- double dram_bw_per_chan_gbps;
double phyclk_mhz;
double dppclk_mhz;
};
@@ -112,6 +111,8 @@ struct _vcs_dpi_soc_bounding_box_st {
double xfc_bus_transport_time_us;
double xfc_xbuf_latency_tolerance_us;
int use_urgent_burst_bw;
+ double max_hscl_ratio;
+ double max_vscl_ratio;
struct _vcs_dpi_voltage_scaling_st clock_limits[7];
};
@@ -304,6 +305,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned char otg_inst;
unsigned char odm_split_cnt;
unsigned char odm_combine;
+ unsigned char use_maximum_vstartup;
};
struct _vcs_dpi_display_pipe_params_st {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
deleted file mode 100644
index 324239c77958..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-#include "soc_bounding_box.h"
-#include "display_mode_lib.h"
-#include "dc_features.h"
-
-#include "dml_inline_defs.h"
-
-/*
- * NOTE:
- * This file is gcc-parseable HW gospel, coming straight from HW engineers.
- *
- * It doesn't adhere to Linux kernel style and sometimes will do things in odd
- * ways. Unless there is something clearly wrong with it the code should
- * remain as-is as it provides us with a guarantee from HW that it is correct.
- */
-
-void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box)
-{
- to_box->dram_clock_change_latency_us = from_box->dram_clock_change_latency_us;
- to_box->sr_exit_time_us = from_box->sr_exit_time_us;
- to_box->sr_enter_plus_exit_time_us = from_box->sr_enter_plus_exit_time_us;
- to_box->urgent_latency_us = from_box->urgent_latency_us;
- to_box->writeback_latency_us = from_box->writeback_latency_us;
-}
-
-voltage_scaling_st dml_socbb_voltage_scaling(
- const soc_bounding_box_st *soc,
- enum voltage_state voltage)
-{
- const voltage_scaling_st *voltage_state;
- const voltage_scaling_st * const voltage_end = soc->clock_limits + DC__VOLTAGE_STATES;
-
- for (voltage_state = soc->clock_limits;
- voltage_state < voltage_end && voltage_state->state != voltage;
- voltage_state++) {
- }
-
- if (voltage_state < voltage_end)
- return *voltage_state;
- return soc->clock_limits[DC__VOLTAGE_STATES - 1];
-}
-
-double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage)
-{
- double return_bw;
-
- voltage_scaling_st state = dml_socbb_voltage_scaling(box, voltage);
-
- return_bw = dml_min((double) box->return_bus_width_bytes * state.dcfclk_mhz,
- state.dram_bw_per_chan_gbps * 1000.0 * (double) box->num_chans
- * box->ideal_dram_bw_after_urgent_percent / 100.0);
-
- return_bw = dml_min((double) box->return_bus_width_bytes * state.fabricclk_mhz, return_bw);
-
- return return_bw;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
index 9c4a56c738c0..bf40725f982f 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -82,13 +82,16 @@
DDC_GPIO_I2C_REG_LIST(cd),\
.ddc_setup = 0
-#define DDC_MASK_SH_LIST(mask_sh) \
+#define DDC_MASK_SH_LIST_COMMON(mask_sh) \
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_MODE, mask_sh),\
SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1DATA_PD_EN, mask_sh),\
SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1CLK_PD_EN, mask_sh),\
- SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh)
+
+#define DDC_MASK_SH_LIST(mask_sh) \
+ DDC_MASK_SH_LIST_COMMON(mask_sh),\
SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index ab5483c0c502..f20161c5706d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -375,6 +375,7 @@ struct gpio *dal_gpio_create_irq(
case GPIO_ID_GPIO_PAD:
break;
default:
+ id = GPIO_ID_HPD;
ASSERT_CRITICAL(false);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
index bb526ad326e5..0afd2fa57bbe 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
@@ -128,8 +128,20 @@ static void process_read_reply(
ctx->status =
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
ctx->operation_succeeded = false;
+ } else if (ctx->returned_byte < ctx->current_read_length) {
+ ctx->current_read_length -= ctx->returned_byte;
+
+ ctx->offset += ctx->returned_byte;
+
+ ++ctx->invalid_reply_retry_aux_on_ack;
+
+ if (ctx->invalid_reply_retry_aux_on_ack >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ }
} else {
- ctx->current_read_length = ctx->returned_byte;
ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
ctx->transaction_complete = true;
ctx->operation_succeeded = true;
@@ -157,6 +169,10 @@ static void process_read_reply(
ctx->operation_succeeded = false;
}
break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
@@ -215,6 +231,10 @@ static void process_read_request(
* so we should not wait here */
}
break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
@@ -282,7 +302,6 @@ static bool read_command(
ctx.operation_succeeded);
}
- request->payload.length = ctx.reply.length;
return ctx.operation_succeeded;
}
@@ -370,6 +389,10 @@ static void process_write_reply(
ctx->operation_succeeded = false;
}
break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
@@ -422,6 +445,10 @@ static void process_write_request(
* so we should not wait here */
}
break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
index 8e71324ccb10..c33a2898d967 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
@@ -26,46 +26,7 @@
#ifndef __DAL_AUX_ENGINE_H__
#define __DAL_AUX_ENGINE_H__
-enum aux_transaction_type {
- AUX_TRANSACTION_TYPE_DP,
- AUX_TRANSACTION_TYPE_I2C
-};
-
-struct aux_request_transaction_data {
- enum aux_transaction_type type;
- enum i2caux_transaction_action action;
- /* 20-bit AUX channel transaction address */
- uint32_t address;
- /* delay, in 100-microsecond units */
- uint8_t delay;
- uint32_t length;
- uint8_t *data;
-};
-
-enum aux_transaction_reply {
- AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
- AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
- AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
-
- AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
- AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
- AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
-
- AUX_TRANSACTION_REPLY_INVALID = 0xFF
-};
-
-struct aux_reply_transaction_data {
- enum aux_transaction_reply status;
- uint32_t length;
- uint8_t *data;
-};
-
-enum aux_channel_operation_result {
- AUX_CHANNEL_OPERATION_SUCCEEDED,
- AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
- AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
- AUX_CHANNEL_OPERATION_FAILED_TIMEOUT
-};
+#include "dc_ddc_types.h"
struct aux_engine;
@@ -83,6 +44,12 @@ struct aux_engine_funcs {
void (*process_channel_reply)(
struct aux_engine *engine,
struct aux_reply_transaction_data *reply);
+ int (*read_channel_reply)(
+ struct aux_engine *engine,
+ uint32_t size,
+ uint8_t *buffer,
+ uint8_t *reply_result,
+ uint32_t *sw_status);
enum aux_channel_operation_result (*get_channel_status)(
struct aux_engine *engine,
uint8_t *returned_bytes);
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
index e8d3781deaed..8b704ab0471c 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
@@ -97,6 +97,7 @@ struct i2caux *dal_i2caux_dce100_create(
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dce100_aux_regs),
dce100_aux_regs,
dce100_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
index 5f47f6c007ac..ae5caa97caca 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
@@ -198,27 +198,27 @@ static void submit_channel_request(
((request->type == AUX_TRANSACTION_TYPE_I2C) &&
((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
(request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+ if (REG(AUXN_IMPCAL)) {
+ /* clear_aux_error */
+ REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+ 1,
+ 0);
- /* clear_aux_error */
- REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
- 1,
- 0);
-
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
- 1,
- 0);
-
- /* force_default_calibrate */
- REG_UPDATE_1BY1_2(AUXN_IMPCAL,
- AUXN_IMPCAL_ENABLE, 1,
- AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+ 1,
+ 0);
- /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+ /* force_default_calibrate */
+ REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ AUXN_IMPCAL_ENABLE, 1,
+ AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
- 1,
- 0);
+ /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+ 1,
+ 0);
+ }
/* set the delay and the number of bytes to write */
/* The length include
@@ -275,55 +275,92 @@ static void submit_channel_request(
REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
}
-static void process_channel_reply(
- struct aux_engine *engine,
- struct aux_reply_transaction_data *reply)
+static int read_channel_reply(struct aux_engine *engine, uint32_t size,
+ uint8_t *buffer, uint8_t *reply_result,
+ uint32_t *sw_status)
{
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t bytes_replied;
+ uint32_t reply_result_32;
- /* Need to do a read to get the number of bytes to process
- * Alternatively, this information can be passed -
- * but that causes coupling which isn't good either. */
+ *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
+ &bytes_replied);
- uint32_t bytes_replied;
- uint32_t value;
+ /* In case HPD is LOW, exit AUX transaction */
+ if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return -1;
- value = REG_GET(AUX_SW_STATUS,
- AUX_SW_REPLY_BYTE_COUNT, &bytes_replied);
+ /* Need at least the status byte */
+ if (!bytes_replied)
+ return -1;
- if (bytes_replied) {
- uint32_t reply_result;
+ REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA_RW, 1);
- REG_UPDATE_1BY1_3(AUX_SW_DATA,
- AUX_SW_INDEX, 0,
- AUX_SW_AUTOINCREMENT_DISABLE, 1,
- AUX_SW_DATA_RW, 1);
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
+ reply_result_32 = reply_result_32 >> 4;
+ *reply_result = (uint8_t)reply_result_32;
- REG_GET(AUX_SW_DATA,
- AUX_SW_DATA, &reply_result);
+ if (reply_result_32 == 0) { /* ACK */
+ uint32_t i = 0;
- reply_result = reply_result >> 4;
+ /* First byte was already used to get the command status */
+ --bytes_replied;
- switch (reply_result) {
- case 0: /* ACK */ {
- uint32_t i = 0;
+ /* Do not overflow buffer */
+ if (bytes_replied > size)
+ return -1;
- /* first byte was already used
- * to get the command status */
- --bytes_replied;
+ while (i < bytes_replied) {
+ uint32_t aux_sw_data_val;
- while (i < bytes_replied) {
- uint32_t aux_sw_data_val;
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
+ buffer[i] = aux_sw_data_val;
+ ++i;
+ }
- REG_GET(AUX_SW_DATA,
- AUX_SW_DATA, &aux_sw_data_val);
+ return i;
+ }
- reply->data[i] = aux_sw_data_val;
- ++i;
- }
+ return 0;
+}
- reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+static void process_channel_reply(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply)
+{
+ int bytes_replied;
+ uint8_t reply_result;
+ uint32_t sw_status;
+
+ bytes_replied = read_channel_reply(engine, reply->length, reply->data,
+ &reply_result, &sw_status);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+ return;
+ }
+
+ if (bytes_replied < 0) {
+ /* Need to handle an error case...
+ * Hopefully, upper layer function won't call this function if
+ * the number of bytes in the reply was 0, because there was
+ * surely an error that was asserted that should have been
+ * handled for hot plug case, this could happens
+ */
+ if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ ASSERT_CRITICAL(false);
+ return;
}
+ } else {
+
+ switch (reply_result) {
+ case 0: /* ACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
break;
case 1: /* NACK */
reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
@@ -340,15 +377,6 @@ static void process_channel_reply(
default:
reply->status = AUX_TRANSACTION_REPLY_INVALID;
}
- } else {
- /* Need to handle an error case...
- * hopefully, upper layer function won't call this function
- * if the number of bytes in the reply was 0
- * because there was surely an error that was asserted
- * that should have been handled
- * for hot plug case, this could happens*/
- if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
- ASSERT_CRITICAL(false);
}
}
@@ -371,6 +399,10 @@ static enum aux_channel_operation_result get_channel_status(
value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
10, aux110->timeout_period/10);
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+
/* Note that the following bits are set in 'status.bits'
* during CTS 4.2.1.2 (FW 3.3.1):
* AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
@@ -402,10 +434,10 @@ static enum aux_channel_operation_result get_channel_status(
return AUX_CHANNEL_OPERATION_SUCCEEDED;
}
} else {
- /*time_elapsed >= aux_engine->timeout_period */
- if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
- ASSERT_CRITICAL(false);
-
+ /*time_elapsed >= aux_engine->timeout_period
+ * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
+ */
+ ASSERT_CRITICAL(false);
return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
}
}
@@ -415,6 +447,7 @@ static const struct aux_engine_funcs aux_engine_funcs = {
.acquire_engine = acquire_engine,
.submit_channel_request = submit_channel_request,
.process_channel_reply = process_channel_reply,
+ .read_channel_reply = read_channel_reply,
.get_channel_status = get_channel_status,
.is_engine_available = is_engine_available,
};
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
index b7256f595052..9cbe1a7a6bcb 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -62,12 +62,7 @@ enum dc_i2c_arbitration {
DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
};
-enum {
- /* No timeout in HW
- * (timeout implemented in SW by querying status) */
- I2C_SETUP_TIME_LIMIT = 255,
- I2C_HW_BUFFER_SIZE = 538
-};
+
/*
* @brief
@@ -152,6 +147,11 @@ static bool setup_engine(
struct i2c_engine *i2c_engine)
{
struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+ uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
+ uint32_t reset_length = 0;
+
+ if (hw_engine->base.base.setup_limit != 0)
+ i2c_setup_limit = hw_engine->base.base.setup_limit;
/* Program pin select */
REG_UPDATE_6(
@@ -164,11 +164,15 @@ static bool setup_engine(
DC_I2C_DDC_SELECT, hw_engine->engine_id);
/* Program time limit */
- REG_UPDATE_N(
- SETUP, 2,
- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), I2C_SETUP_TIME_LIMIT,
- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
-
+ if (hw_engine->base.base.send_reset_length == 0) {
+ /*pre-dcn*/
+ REG_UPDATE_N(
+ SETUP, 2,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
+ } else {
+ reset_length = hw_engine->base.base.send_reset_length;
+ }
/* Program HW priority
* set to High - interrupt software I2C at any time
* Enable restart of SW I2C that was interrupted by HW
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
index 5bb04085f670..fea2946906ed 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
@@ -192,6 +192,7 @@ struct i2c_hw_engine_dce110 {
/* number of pending transactions (before GO) */
uint32_t transaction_count;
uint32_t engine_keep_power_up_count;
+ uint32_t i2_setup_time_limit;
};
struct i2c_hw_engine_dce110_create_arg {
@@ -207,4 +208,11 @@ struct i2c_hw_engine_dce110_create_arg {
struct i2c_engine *dal_i2c_hw_engine_dce110_create(
const struct i2c_hw_engine_dce110_create_arg *arg);
+enum {
+ I2C_SETUP_TIME_LIMIT_DCE = 255,
+ I2C_SETUP_TIME_LIMIT_DCN = 3,
+ I2C_HW_BUFFER_SIZE = 538,
+ I2C_SEND_RESET_LENGTH_9 = 9,
+ I2C_SEND_RESET_LENGTH_10 = 10,
+};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
index 2a047f8ca0e9..1d748ac1d6d6 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
@@ -43,6 +43,9 @@
#include "i2c_sw_engine_dce110.h"
#include "i2c_hw_engine_dce110.h"
#include "aux_engine_dce110.h"
+#include "../../dc.h"
+#include "dc_types.h"
+
/*
* Post-requisites: headers required by this unit
@@ -199,6 +202,7 @@ static const struct dce110_i2c_hw_engine_mask i2c_mask = {
void dal_i2caux_dce110_construct(
struct i2caux_dce110 *i2caux_dce110,
struct dc_context *ctx,
+ unsigned int num_i2caux_inst,
const struct dce110_aux_registers aux_regs[],
const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[],
const struct dce110_i2c_hw_engine_shift *i2c_shift,
@@ -249,9 +253,22 @@ void dal_i2caux_dce110_construct(
base->i2c_hw_engines[line_id] =
dal_i2c_hw_engine_dce110_create(&hw_arg_dce110);
-
+ if (base->i2c_hw_engines[line_id] != NULL) {
+ switch (ctx->dce_version) {
+ case DCN_VERSION_1_0:
+ base->i2c_hw_engines[line_id]->setup_limit =
+ I2C_SETUP_TIME_LIMIT_DCN;
+ base->i2c_hw_engines[line_id]->send_reset_length = 0;
+ break;
+ default:
+ base->i2c_hw_engines[line_id]->setup_limit =
+ I2C_SETUP_TIME_LIMIT_DCE;
+ base->i2c_hw_engines[line_id]->send_reset_length = 0;
+ break;
+ }
+ }
++i;
- } while (i < ARRAY_SIZE(hw_ddc_lines));
+ } while (i < num_i2caux_inst);
/* Create AUX engines for all lines which has assisted HW AUX
* 'i' (loop counter) used as DDC/AUX engine_id */
@@ -272,7 +289,7 @@ void dal_i2caux_dce110_construct(
dal_aux_engine_dce110_create(&aux_init_data);
++i;
- } while (i < ARRAY_SIZE(hw_aux_lines));
+ } while (i < num_i2caux_inst);
/*TODO Generic I2C SW and HW*/
}
@@ -303,6 +320,7 @@ struct i2caux *dal_i2caux_dce110_create(
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dce110_aux_regs),
dce110_aux_regs,
i2c_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
index 1b1f71c60ac9..d3d8cc58666a 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
@@ -45,6 +45,7 @@ struct i2caux *dal_i2caux_dce110_create(
void dal_i2caux_dce110_construct(
struct i2caux_dce110 *i2caux_dce110,
struct dc_context *ctx,
+ unsigned int num_i2caux_inst,
const struct dce110_aux_registers *aux_regs,
const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs,
const struct dce110_i2c_hw_engine_shift *i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
index dafc1a727f7f..a9db04738724 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
@@ -93,6 +93,7 @@ static void construct(
{
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dce112_aux_regs),
dce112_aux_regs,
dce112_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
index 0e7b18260027..6a4f344c1db4 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
@@ -111,6 +111,7 @@ struct i2caux *dal_i2caux_dce120_create(
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dce120_aux_regs),
dce120_aux_regs,
dce120_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
index e44a8901f38b..a59c1f50c1e8 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
@@ -111,6 +111,7 @@ struct i2caux *dal_i2caux_dcn10_create(
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dcn10_aux_regs),
dcn10_aux_regs,
dcn10_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
index 33de8a8834dc..b16fb1ff687d 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
@@ -26,6 +26,8 @@
#ifndef __DAL_ENGINE_H__
#define __DAL_ENGINE_H__
+#include "dc_ddc_types.h"
+
enum i2caux_transaction_operation {
I2CAUX_TRANSACTION_READ,
I2CAUX_TRANSACTION_WRITE
@@ -53,7 +55,8 @@ enum i2caux_transaction_status {
I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
- I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
+ I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
};
struct i2caux_transaction_request {
@@ -75,19 +78,6 @@ enum i2c_default_speed {
I2CAUX_DEFAULT_I2C_SW_SPEED = 50
};
-enum i2caux_transaction_action {
- I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
- I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
- I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
-
- I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
- I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
- I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
-
- I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
- I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
-};
-
struct engine;
struct engine_funcs {
@@ -106,6 +96,7 @@ struct engine_funcs {
struct engine {
const struct engine_funcs *funcs;
+ uint32_t inst;
struct ddc *ddc;
struct dc_context *ctx;
};
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
index 58fc0f25eceb..ded6ea34b714 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
@@ -86,6 +86,8 @@ struct i2c_engine {
struct engine base;
const struct i2c_engine_funcs *funcs;
uint32_t timeout_delay;
+ uint32_t setup_limit;
+ uint32_t send_reset_length;
};
void dal_i2c_engine_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index 14dc8c94d862..9b0bcc6b769b 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -254,7 +254,6 @@ bool dal_i2caux_submit_aux_command(
break;
}
- cmd->payloads->length = request.payload.length;
++index_of_payload;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index a94942d4e66b..c0b9ca13393b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -92,6 +92,7 @@ struct resource_context;
struct resource_funcs {
void (*destroy)(struct resource_pool **pool);
+ void (*link_init)(struct dc_link *link);
struct link_encoder *(*link_enc_create)(
const struct encoder_init_data *init);
@@ -138,7 +139,7 @@ struct resource_pool {
struct output_pixel_processor *opps[MAX_PIPES];
struct timing_generator *timing_generators[MAX_PIPES];
struct stream_encoder *stream_enc[MAX_PIPES * 2];
-
+ struct aux_engine *engines[MAX_PIPES];
struct hubbub *hubbub;
struct mpc *mpc;
struct pp_smu_funcs_rv *pp_smu;
@@ -162,7 +163,7 @@ struct resource_pool {
unsigned int audio_count;
struct audio_support audio_support;
- struct display_clock *display_clock;
+ struct dccg *dccg;
struct irq_service *irqs;
struct abm *abm;
@@ -255,8 +256,7 @@ struct dce_bw_output {
};
struct dcn_bw_output {
- struct dc_clocks cur_clk;
- struct dc_clocks calc_clk;
+ struct dc_clocks clk;
struct dcn_watermark_set watermarks;
};
@@ -281,7 +281,7 @@ struct dc_state {
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
- struct display_clock *dis_clk;
+ struct dccg *dis_clk;
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 30b3a08b91be..538b83303b86 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -102,22 +102,13 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
-enum ddc_result dal_ddc_service_read_dpcd_data(
- struct ddc_service *ddc,
- bool i2c,
- enum i2c_mot_mode mot,
- uint32_t address,
- uint8_t *data,
- uint32_t len,
- uint32_t *read);
-
-enum ddc_result dal_ddc_service_write_dpcd_data(
- struct ddc_service *ddc,
- bool i2c,
- enum i2c_mot_mode mot,
- uint32_t address,
- const uint8_t *data,
- uint32_t len);
+int dc_link_aux_transfer(struct ddc_service *ddc,
+ unsigned int address,
+ uint8_t *reply,
+ void *buffer,
+ unsigned int size,
+ enum aux_transaction_type type,
+ enum i2caux_transaction_action action);
void dal_ddc_service_write_scdc_data(
struct ddc_service *ddc_service,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 2f783c650084..a37255c757e0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -33,9 +33,10 @@ struct dc_link;
struct dc_stream_state;
struct dc_link_settings;
-bool dp_hbr_verify_link_cap(
+bool dp_verify_link_cap(
struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting);
+ struct dc_link_settings *known_limit_link_setting,
+ int *fail_count);
bool dp_validate_mode_timing(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 132d18d4b293..ddbb673caa08 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -625,7 +625,7 @@ bool dcn_validate_bandwidth(
unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
- struct clocks_value *clocks);
+ struct dc_clocks *clocks);
void dcn_bw_update_from_pplib(struct dc *dc);
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
new file mode 100644
index 000000000000..e79cd4e92919
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_H__
+#define __DAL_AUX_ENGINE_H__
+
+#include "dc_ddc_types.h"
+#include "include/i2caux_interface.h"
+
+enum i2caux_transaction_operation {
+ I2CAUX_TRANSACTION_READ,
+ I2CAUX_TRANSACTION_WRITE
+};
+
+enum i2caux_transaction_address_space {
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
+};
+
+struct i2caux_transaction_payload {
+ enum i2caux_transaction_address_space address_space;
+ uint32_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum i2caux_transaction_status {
+ I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
+ I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
+};
+
+struct i2caux_transaction_request {
+ enum i2caux_transaction_operation operation;
+ struct i2caux_transaction_payload payload;
+ enum i2caux_transaction_status status;
+};
+
+enum i2caux_engine_type {
+ I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
+ I2CAUX_ENGINE_TYPE_AUX,
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_SW
+};
+
+enum i2c_default_speed {
+ I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
+ I2CAUX_DEFAULT_I2C_SW_SPEED = 50
+};
+
+union aux_config;
+
+struct aux_engine {
+ uint32_t inst;
+ struct ddc *ddc;
+ struct dc_context *ctx;
+ const struct aux_engine_funcs *funcs;
+ /* following values are expressed in milliseconds */
+ uint32_t delay;
+ uint32_t max_defer_write_retry;
+ bool acquire_reset;
+};
+
+struct read_command_context {
+ uint8_t *buffer;
+ uint32_t current_read_length;
+ uint32_t offset;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t invalid_reply_retry_aux_on_ack;
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+struct write_command_context {
+ bool mot;
+
+ uint8_t *buffer;
+ uint32_t current_write_length;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t max_defer_retry;
+ uint32_t ack_m_retry;
+
+ uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+
+struct aux_engine_funcs {
+ void (*destroy)(
+ struct aux_engine **ptr);
+ bool (*acquire_engine)(
+ struct aux_engine *engine);
+ void (*configure)(
+ struct aux_engine *engine,
+ union aux_config cfg);
+ void (*submit_channel_request)(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request);
+ void (*process_channel_reply)(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply);
+ int (*read_channel_reply)(
+ struct aux_engine *engine,
+ uint32_t size,
+ uint8_t *buffer,
+ uint8_t *reply_result,
+ uint32_t *sw_status);
+ enum aux_channel_operation_result (*get_channel_status)(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes);
+ bool (*is_engine_available)(struct aux_engine *engine);
+ enum i2caux_engine_type (*get_engine_type)(
+ const struct aux_engine *engine);
+ bool (*acquire)(
+ struct aux_engine *engine,
+ struct ddc *ddc);
+ bool (*submit_request)(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction);
+ void (*release_engine)(
+ struct aux_engine *engine);
+ void (*destroy_engine)(
+ struct aux_engine **engine);
+};
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
index f5f69cd81f6f..3c7ccb68ecdb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
@@ -27,23 +27,7 @@
#define __DISPLAY_CLOCK_H__
#include "dm_services_types.h"
-
-
-struct clocks_value {
- int dispclk_in_khz;
- int max_pixelclk_in_khz;
- int max_non_dp_phyclk_in_khz;
- int max_dp_phyclk_in_khz;
- bool dispclk_notify_pplib_done;
- bool pixelclk_notify_pplib_done;
- bool phyclk_notigy_pplib_done;
- int dcfclock_in_khz;
- int dppclk_in_khz;
- int mclk_in_khz;
- int phyclk_in_khz;
- int common_vdd_level;
-};
-
+#include "dc.h"
/* Structure containing all state-dependent clocks
* (dependent on "enum clocks_state") */
@@ -52,34 +36,23 @@ struct state_dependent_clocks {
int pixel_clk_khz;
};
-struct display_clock {
+struct dccg {
struct dc_context *ctx;
const struct display_clock_funcs *funcs;
enum dm_pp_clocks_state max_clks_state;
enum dm_pp_clocks_state cur_min_clks_state;
- struct clocks_value cur_clocks_value;
+ struct dc_clocks clks;
};
struct display_clock_funcs {
- int (*set_clock)(struct display_clock *disp_clk,
+ void (*update_clocks)(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower);
+ int (*set_dispclk)(struct dccg *dccg,
int requested_clock_khz);
- enum dm_pp_clocks_state (*get_required_clocks_state)(
- struct display_clock *disp_clk,
- struct state_dependent_clocks *req_clocks);
-
- bool (*set_min_clocks_state)(struct display_clock *disp_clk,
- enum dm_pp_clocks_state dm_pp_clocks_state);
-
- int (*get_dp_ref_clk_frequency)(struct display_clock *disp_clk);
-
- bool (*apply_clock_voltage_request)(
- struct display_clock *disp_clk,
- enum dm_pp_clock_type clocks_type,
- int clocks_in_khz,
- bool pre_mode_set,
- bool update_dp_phyclk);
+ int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
};
#endif /* __DISPLAY_CLOCK_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index de60f940030d..4550747fb61c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -48,7 +48,7 @@ struct dmcu_funcs {
const char *src,
unsigned int bytes);
void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
- void (*setup_psr)(struct dmcu *dmcu,
+ bool (*setup_psr)(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context);
void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 582458f028f8..74ad94b0e4f0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -151,6 +151,9 @@ struct dpp_funcs {
void (*dpp_set_hdr_multiplier)(
struct dpp *dpp_base,
uint32_t multiplier);
+ void (*set_optional_cursor_attributes)(
+ struct dpp *dpp_base,
+ struct dpp_cursor_attributes *attr);
void (*dpp_dppclk_control)(
struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 97df82cddf82..4f3f9e68ccfa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -43,10 +43,9 @@ enum cursor_lines_per_chunk {
};
struct hubp {
- struct hubp_funcs *funcs;
+ const struct hubp_funcs *funcs;
struct dc_context *ctx;
struct dc_plane_address request_address;
- struct dc_plane_address current_address;
int inst;
/* run time states */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 47f1dc5a43b7..da89c2edb07c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -64,7 +64,7 @@ struct stutter_modes {
};
struct mem_input {
- struct mem_input_funcs *funcs;
+ const struct mem_input_funcs *funcs;
struct dc_context *ctx;
struct dc_plane_address request_address;
struct dc_plane_address current_address;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 69cb0a105300..af700c7dac50 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -156,6 +156,9 @@ struct timing_generator_funcs {
uint32_t *v_blank_end,
uint32_t *h_position,
uint32_t *v_position);
+ bool (*get_otg_active_size)(struct timing_generator *optc,
+ uint32_t *otg_active_width,
+ uint32_t *otg_active_height);
void (*set_early_control)(struct timing_generator *tg,
uint32_t early_cntl);
void (*wait_for_state)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 63fc6c499789..a14ce4de80b2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -44,6 +44,7 @@ struct dce_hwseq_wa {
bool blnd_crtc_trigger;
bool DEGVIDCN10_253;
bool false_optc_underflow;
+ bool DEGVIDCN10_254;
};
struct hwseq_wa_state {
@@ -101,10 +102,18 @@ struct hw_sequencer_funcs {
const struct dc *dc,
struct pipe_ctx *pipe_ctx);
+ void (*plane_atomic_disconnect)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
void (*update_dchub)(
struct dce_hwseq *hws,
struct dchub_init_data *dh_data);
+ void (*update_mpcc)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
void (*update_pending_status)(
struct pipe_ctx *pipe_ctx);
@@ -154,20 +163,24 @@ struct hw_sequencer_funcs {
struct dc_link_settings *link_settings);
void (*blank_stream)(struct pipe_ctx *pipe_ctx);
+
+ void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx);
+
+ void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx, int option);
+
void (*pipe_control_lock)(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
void (*blank_pixel_data)(
struct dc *dc,
- struct stream_resource *stream_res,
- struct dc_stream_state *stream,
+ struct pipe_ctx *pipe_ctx,
bool blank);
void (*set_bandwidth)(
struct dc *dc,
struct dc_state *context,
- bool decrease_allowed);
+ bool safe_to_lower);
void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
int vmin, int vmax);
@@ -210,6 +223,7 @@ struct hw_sequencer_funcs {
void (*set_cursor_position)(struct pipe_ctx *pipe);
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
+ void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index 3306e7b0b3e3..cf5a84b9e27c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -445,4 +445,50 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
uint8_t shift8, uint32_t mask8, uint32_t *field_value8);
+
+
+/* indirect register access */
+
+#define IX_REG_SET_N(index_reg_name, data_reg_name, index, n, initial_val, ...) \
+ generic_indirect_reg_update_ex(CTX, \
+ REG(index_reg_name), REG(data_reg_name), IND_REG(index), \
+ initial_val, \
+ n, __VA_ARGS__)
+
+#define IX_REG_SET_2(index_reg_name, data_reg_name, index, init_value, f1, v1, f2, v2) \
+ IX_REG_SET_N(index_reg_name, data_reg_name, index, 2, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+
+#define IX_REG_READ(index_reg_name, data_reg_name, index) \
+ generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index))
+
+
+
+#define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...) \
+ generic_indirect_reg_update_ex(CTX, \
+ REG(index_reg_name), REG(data_reg_name), IND_REG(index), \
+ IX_REG_READ(index_reg_name, data_reg_name, index), \
+ n, __VA_ARGS__)
+
+#define IX_REG_UPDATE_2(index_reg_name, data_reg_name, index, f1, v1, f2, v2) \
+ IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, 2,\
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+void generic_write_indirect_reg(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, uint32_t data);
+
+uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index);
+
+uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 640a647f4611..5b321008b0b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -38,6 +38,7 @@ enum dce_version resource_parse_asic_id(
struct resource_caps {
int num_timing_generator;
+ int num_opp;
int num_video_plane;
int num_audio;
int num_stream_encoder;
@@ -102,6 +103,11 @@ void resource_reference_clock_source(
const struct resource_pool *pool,
struct clock_source *clock_source);
+int resource_get_clock_source_reference(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source);
+
bool resource_are_streams_timing_synchronizable(
struct dc_stream_state *stream1,
struct dc_stream_state *stream2);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index dcdfa0f01551..604bea01fc13 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -78,7 +78,7 @@ const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source)
{
- if (source > DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
+ if (source >= DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
return NULL;
return &irq_service->info[source];
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 019e7a095ea1..d968956a10cd 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -40,7 +40,8 @@ enum ddc_result {
DDC_RESULT_FAILED_INCOMPLETE,
DDC_RESULT_FAILED_OPERATION,
DDC_RESULT_FAILED_INVALID_OPERATION,
- DDC_RESULT_FAILED_BUFFER_OVERFLOW
+ DDC_RESULT_FAILED_BUFFER_OVERFLOW,
+ DDC_RESULT_FAILED_HPD_DISCON
};
enum ddc_service_type {
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index d8e52e3b8e3c..1c66166d0a94 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -27,6 +27,9 @@
#define __DAL_DPCD_DEFS_H__
#include <drm/drm_dp_helper.h>
+#ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_SINK_HW_REVISION_START 0x409
+#endif
enum dpcd_revision {
DPCD_REV_10 = 0x10,
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index a981b3e99ab3..52a73332befb 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -26,6 +26,13 @@
#ifndef __DAL_FIXED31_32_H__
#define __DAL_FIXED31_32_H__
+#ifndef LLONG_MAX
+#define LLONG_MAX 9223372036854775807ll
+#endif
+#ifndef LLONG_MIN
+#define LLONG_MIN (-LLONG_MAX - 1ll)
+#endif
+
#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
#ifndef LLONG_MIN
#define LLONG_MIN (1LL<<63)
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 36bbad594267..f312834fef50 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -395,6 +395,8 @@ struct integrated_info {
struct i2c_reg_info dp3_ext_hdmi_reg_settings[9];
unsigned char dp3_ext_hdmi_6g_reg_num;
struct i2c_reg_info dp3_ext_hdmi_6g_reg_settings[3];
+ /* V11 */
+ uint32_t dp_ss_control;
};
/**
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
index 2941b882b0b6..58bb42ed85ca 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
@@ -37,6 +37,10 @@
* ********************************************************************
*/
+#define MAX_CONNECTOR_NUMBER_PER_SLOT (16)
+#define MAX_BOARD_SLOTS (4)
+#define INVALID_CONNECTOR_INDEX ((unsigned int)(-1))
+
/* HPD unit id - HW direct translation */
enum hpd_source_id {
HPD_SOURCEID1 = 0,
@@ -136,5 +140,47 @@ enum sync_source {
SYNC_SOURCE_DUAL_GPU_PIN
};
+/* connector sizes in millimeters - from BiosParserTypes.hpp */
+#define CONNECTOR_SIZE_DVI 40
+#define CONNECTOR_SIZE_VGA 32
+#define CONNECTOR_SIZE_HDMI 16
+#define CONNECTOR_SIZE_DP 16
+#define CONNECTOR_SIZE_MINI_DP 9
+#define CONNECTOR_SIZE_UNKNOWN 30
+
+enum connector_layout_type {
+ CONNECTOR_LAYOUT_TYPE_UNKNOWN,
+ CONNECTOR_LAYOUT_TYPE_DVI_D,
+ CONNECTOR_LAYOUT_TYPE_DVI_I,
+ CONNECTOR_LAYOUT_TYPE_VGA,
+ CONNECTOR_LAYOUT_TYPE_HDMI,
+ CONNECTOR_LAYOUT_TYPE_DP,
+ CONNECTOR_LAYOUT_TYPE_MINI_DP,
+};
+struct connector_layout_info {
+ struct graphics_object_id connector_id;
+ enum connector_layout_type connector_type;
+ unsigned int length;
+ unsigned int position; /* offset in mm from right side of the board */
+};
+
+/* length and width in mm */
+struct slot_layout_info {
+ unsigned int length;
+ unsigned int width;
+ unsigned int num_of_connectors;
+ struct connector_layout_info connectors[MAX_CONNECTOR_NUMBER_PER_SLOT];
+};
+
+struct board_layout_info {
+ unsigned int num_of_slots;
+ /* indicates valid information in bracket layout structure. */
+ unsigned int is_number_of_slots_valid : 1;
+ unsigned int is_slots_size_valid : 1;
+ unsigned int is_connector_offsets_valid : 1;
+ unsigned int is_connector_lengths_valid : 1;
+
+ struct slot_layout_info slots[MAX_BOARD_SLOTS];
+};
#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index c4197432eb7c..33b3d755fe65 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -197,6 +197,11 @@ enum transmitter_color_depth {
TRANSMITTER_COLOR_DEPTH_48 /* 16 bits */
};
+enum dp_alt_mode {
+ DP_Alt_mode__Unknown = 0,
+ DP_Alt_mode__Connect,
+ DP_Alt_mode__NoConnect,
+};
/*
*****************************************************************************
* graphics_object_id struct
@@ -287,4 +292,15 @@ static inline enum engine_id dal_graphics_object_id_get_engine_id(
return (enum engine_id) id.id;
return ENGINE_ID_UNKNOWN;
}
+
+static inline bool dal_graphics_object_id_equal(
+ struct graphics_object_id id_1,
+ struct graphics_object_id id_2)
+{
+ if ((id_1.id == id_2.id) && (id_1.enum_id == id_2.enum_id) &&
+ (id_1.type == id_2.type)) {
+ return true;
+ }
+ return false;
+}
#endif
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index dc98d6d4b2bd..e3c79616682d 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -40,47 +40,7 @@ struct dc_state;
*
*/
-struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask);
-
-uint32_t dal_logger_destroy(struct dal_logger **logger);
-
-void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn);
-
-void dm_logger_write(
- struct dal_logger *logger,
- enum dc_log_type log_type,
- const char *msg,
- ...);
-
-void dm_logger_append(
- struct log_entry *entry,
- const char *msg,
- ...);
-
-void dm_logger_append_va(
- struct log_entry *entry,
- const char *msg,
- va_list args);
-
-void dm_logger_open(
- struct dal_logger *logger,
- struct log_entry *entry,
- enum dc_log_type log_type);
-
-void dm_logger_close(struct log_entry *entry);
-
-void dc_conn_log(struct dc_context *ctx,
- const struct dc_link *link,
- uint8_t *hex_data,
- int hex_data_count,
- enum dc_log_type event,
- const char *msg,
- ...);
-
-void logger_write(struct dal_logger *logger,
- enum dc_log_type log_type,
- const char *msg,
- void *paralist);
+void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);
void pre_surface_trace(
struct dc *dc,
@@ -106,28 +66,31 @@ void context_clock_trace(
* marked by this macro.
* Note that the message will be printed exactly once for every function
* it is used in order to avoid repeating of the same message. */
+
#define DAL_LOGGER_NOT_IMPL(fmt, ...) \
-{ \
- static bool print_not_impl = true; \
-\
- if (print_not_impl == true) { \
- print_not_impl = false; \
- dm_logger_write(ctx->logger, LOG_WARNING, \
- "DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
- } \
-}
+ do { \
+ static bool print_not_impl = true; \
+ if (print_not_impl == true) { \
+ print_not_impl = false; \
+ DRM_WARN("DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
+ } \
+ } while (0)
/******************************************************************************
* Convenience macros to save on typing.
*****************************************************************************/
#define DC_ERROR(...) \
- dm_logger_write(dc_ctx->logger, LOG_ERROR, \
- __VA_ARGS__)
+ do { \
+ (void)(dc_ctx); \
+ DC_LOG_ERROR(__VA_ARGS__); \
+ } while (0)
#define DC_SYNC_INFO(...) \
- dm_logger_write(dc_ctx->logger, LOG_SYNC, \
- __VA_ARGS__)
+ do { \
+ (void)(dc_ctx); \
+ DC_LOG_SYNC(__VA_ARGS__); \
+ } while (0)
/* Connectivity log format:
* [time stamp] [drm] [Major_minor] [connector name] message.....
@@ -137,20 +100,30 @@ void context_clock_trace(
*/
#define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
- dc_conn_log(link->ctx, link, hex_data, hex_len, \
- LOG_EVENT_DETECTION, ##__VA_ARGS__)
+ do { \
+ (void)(link); \
+ dc_conn_log_hex_linux(hex_data, hex_len); \
+ DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
+ } while (0)
#define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
- dc_conn_log(link->ctx, link, hex_data, hex_len, \
- LOG_EVENT_LINK_LOSS, ##__VA_ARGS__)
+ do { \
+ (void)(link); \
+ dc_conn_log_hex_linux(hex_data, hex_len); \
+ DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
+ } while (0)
#define CONN_MSG_LT(link, ...) \
- dc_conn_log(link->ctx, link, NULL, 0, \
- LOG_EVENT_LINK_TRAINING, ##__VA_ARGS__)
+ do { \
+ (void)(link); \
+ DC_LOG_EVENT_LINK_TRAINING(__VA_ARGS__); \
+ } while (0)
#define CONN_MSG_MODE(link, ...) \
- dc_conn_log(link->ctx, link, NULL, 0, \
- LOG_EVENT_MODE_SET, ##__VA_ARGS__)
+ do { \
+ (void)(link); \
+ DC_LOG_EVENT_MODE_SET(__VA_ARGS__); \
+ } while (0)
/*
* Display Test Next logging
@@ -165,38 +138,21 @@ void context_clock_trace(
dm_dtn_log_end(dc_ctx)
#define PERFORMANCE_TRACE_START() \
- unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx); \
- unsigned long long perf_trc_start_log_msk = dc->ctx->logger->mask; \
- unsigned int perf_trc_start_log_flags = dc->ctx->logger->flags.value; \
- if (dc->debug.performance_trace) {\
- dm_logger_flush_buffer(dc->ctx->logger, false);\
- dc->ctx->logger->mask = 1<<LOG_PERF_TRACE;\
- dc->ctx->logger->flags.bits.ENABLE_CONSOLE = 0;\
- dc->ctx->logger->flags.bits.ENABLE_BUFFER = 1;\
- }
-
-#define PERFORMANCE_TRACE_END() do {\
- unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx);\
- if (dc->debug.performance_trace) {\
- dm_logger_write(dc->ctx->logger, \
- LOG_PERF_TRACE, \
- "%s duration: %d ticks\n", __func__,\
+ unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx)
+
+#define PERFORMANCE_TRACE_END() \
+ do { \
+ unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx); \
+ if (dc->debug.performance_trace) { \
+ DC_LOG_PERF_TRACE("%s duration: %lld ticks\n", __func__, \
perf_trc_end_stmp - perf_trc_start_stmp); \
- if (perf_trc_start_log_msk != 1<<LOG_PERF_TRACE) {\
- dc->ctx->logger->mask = perf_trc_start_log_msk;\
- dc->ctx->logger->flags.value = perf_trc_start_log_flags;\
- dm_logger_flush_buffer(dc->ctx->logger, false);\
} \
- } \
-} while (0)
+ } while (0)
-#define DISPLAY_STATS_BEGIN(entry) \
- dm_logger_open(dc->ctx->logger, &entry, LOG_DISPLAYSTATS)
+#define DISPLAY_STATS_BEGIN(entry) (void)(entry)
-#define DISPLAY_STATS(msg, ...) \
- dm_logger_append(&log_entry, msg, ##__VA_ARGS__)
+#define DISPLAY_STATS(msg, ...) DC_LOG_PERF_TRACE(msg, __VA_ARGS__)
-#define DISPLAY_STATS_END(entry) \
- dm_logger_close(&entry)
+#define DISPLAY_STATS_END(entry) (void)(entry)
#endif /* __DAL_LOGGER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 0a540b9897a6..ad3695e67b76 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -138,63 +138,4 @@ enum dc_log_type {
(1 << LOG_HW_AUDIO)| \
(1 << LOG_BANDWIDTH_CALCS)*/
-union logger_flags {
- struct {
- uint32_t ENABLE_CONSOLE:1; /* Print to console */
- uint32_t ENABLE_BUFFER:1; /* Print to buffer */
- uint32_t RESERVED:30;
- } bits;
- uint32_t value;
-};
-
-struct log_entry {
- struct dal_logger *logger;
- enum dc_log_type type;
-
- char *buf;
- uint32_t buf_offset;
- uint32_t max_buf_bytes;
-};
-
-/**
-* Structure for enumerating log types
-*/
-struct dc_log_type_info {
- enum dc_log_type type;
- char name[MAX_NAME_LEN];
-};
-
-/* Structure for keeping track of offsets, buffer, etc */
-
-#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
-
-/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
- * change log line size to 896 to meet the request.
- */
-#define LOG_MAX_LINE_SIZE 896
-
-struct dal_logger {
-
- /* How far into the circular buffer has been read by dsat
- * Read offset should never cross write offset. Write \0's to
- * read data just to be sure?
- */
- uint32_t buffer_read_offset;
-
- /* How far into the circular buffer we have written
- * Write offset should never cross read offset
- */
- uint32_t buffer_write_offset;
-
- uint32_t open_count;
-
- char *log_buffer; /* Pointer to malloc'ed buffer */
- uint32_t log_buffer_size; /* Size of circular buffer */
-
- uint32_t mask; /*array of masks for major elements*/
-
- union logger_flags flags;
- struct dc_context *ctx;
-};
-
#endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index eee0dfad6962..bf29733958c3 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -131,6 +131,63 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
dc_fixpt_div(dc_fixpt_one, m1));
}
+
+/*de gamma, none linear to linear*/
+static void compute_hlg_oetf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
+{
+ struct fixed31_32 a;
+ struct fixed31_32 b;
+ struct fixed31_32 c;
+ struct fixed31_32 threshold;
+ struct fixed31_32 reference_white_level;
+
+ a = dc_fixpt_from_fraction(17883277, 100000000);
+ if (is_light0_12) {
+ /*light 0-12*/
+ b = dc_fixpt_from_fraction(28466892, 100000000);
+ c = dc_fixpt_from_fraction(55991073, 100000000);
+ threshold = dc_fixpt_one;
+ reference_white_level = dc_fixpt_half;
+ } else {
+ /*light 0-1*/
+ b = dc_fixpt_from_fraction(2372241, 100000000);
+ c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
+ threshold = dc_fixpt_from_fraction(1, 12);
+ reference_white_level = dc_fixpt_pow(dc_fixpt_from_fraction(3, 1), dc_fixpt_half);
+ }
+ if (dc_fixpt_lt(threshold, in_x))
+ *out_y = dc_fixpt_add(c, dc_fixpt_mul(a, dc_fixpt_log(dc_fixpt_sub(in_x, b))));
+ else
+ *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_half), reference_white_level);
+}
+
+/*re gamma, linear to none linear*/
+static void compute_hlg_eotf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
+{
+ struct fixed31_32 a;
+ struct fixed31_32 b;
+ struct fixed31_32 c;
+ struct fixed31_32 reference_white_level;
+
+ a = dc_fixpt_from_fraction(17883277, 100000000);
+ if (is_light0_12) {
+ /*light 0-12*/
+ b = dc_fixpt_from_fraction(28466892, 100000000);
+ c = dc_fixpt_from_fraction(55991073, 100000000);
+ reference_white_level = dc_fixpt_from_fraction(4, 1);
+ } else {
+ /*light 0-1*/
+ b = dc_fixpt_from_fraction(2372241, 100000000);
+ c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
+ reference_white_level = dc_fixpt_from_fraction(1, 3);
+ }
+ if (dc_fixpt_lt(dc_fixpt_half, in_x))
+ *out_y = dc_fixpt_add(dc_fixpt_exp(dc_fixpt_div(dc_fixpt_sub(in_x, c), a)), b);
+ else
+ *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_from_fraction(2, 1)), reference_white_level);
+}
+
+
/* one-time pre-compute PQ values - only for sdr_white_level 80 */
void precompute_pq(void)
{
@@ -691,6 +748,48 @@ static void build_degamma(struct pwl_float_data_ex *curve,
}
}
+static void build_hlg_degamma(struct pwl_float_data_ex *degamma,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x, bool is_light0_12)
+{
+ uint32_t i;
+
+ struct pwl_float_data_ex *rgb = degamma;
+ const struct hw_x_point *coord_x = coordinate_x;
+
+ i = 0;
+
+ while (i != hw_points_num + 1) {
+ compute_hlg_oetf(coord_x->x, is_light0_12, &rgb->r);
+ rgb->g = rgb->r;
+ rgb->b = rgb->r;
+ ++coord_x;
+ ++rgb;
+ ++i;
+ }
+}
+
+static void build_hlg_regamma(struct pwl_float_data_ex *regamma,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x, bool is_light0_12)
+{
+ uint32_t i;
+
+ struct pwl_float_data_ex *rgb = regamma;
+ const struct hw_x_point *coord_x = coordinate_x;
+
+ i = 0;
+
+ while (i != hw_points_num + 1) {
+ compute_hlg_eotf(coord_x->x, is_light0_12, &rgb->r);
+ rgb->g = rgb->r;
+ rgb->b = rgb->r;
+ ++coord_x;
+ ++rgb;
+ ++i;
+ }
+}
+
static void scale_gamma(struct pwl_float_data *pwl_rgb,
const struct dc_gamma *ramp,
struct dividers dividers)
@@ -898,7 +997,9 @@ static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
* norm_y = 4095*regamma_y, and index is just truncating to nearest integer
* lut1 = lut1D[index], lut2 = lut1D[index+1]
*
- *adjustedY is then linearly interpolating regamma Y between lut1 and lut2
+ * adjustedY is then linearly interpolating regamma Y between lut1 and lut2
+ *
+ * Custom degamma on Linux uses the same interpolation math, so is handled here
*/
static void apply_lut_1d(
const struct dc_gamma *ramp,
@@ -919,7 +1020,7 @@ static void apply_lut_1d(
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
- if (ramp->type != GAMMA_CS_TFM_1D)
+ if (ramp->type != GAMMA_CS_TFM_1D && ramp->type != GAMMA_CUSTOM)
return; // this is not expected
for (i = 0; i < num_hw_points; i++) {
@@ -1537,7 +1638,9 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
coordinates_x, axix_x, curve,
MAX_HW_POINTS, tf_pts,
- mapUserRamp);
+ mapUserRamp && ramp->type != GAMMA_CUSTOM);
+ if (ramp->type == GAMMA_CUSTOM)
+ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
ret = true;
@@ -1622,6 +1725,25 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
ret = true;
kvfree(rgb_regamma);
+ } else if (trans == TRANSFER_FUNCTION_HLG ||
+ trans == TRANSFER_FUNCTION_HLG12) {
+ rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_regamma),
+ GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+
+ build_hlg_regamma(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ trans == TRANSFER_FUNCTION_HLG12 ? true:false);
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = rgb_regamma[i].r;
+ points->green[i] = rgb_regamma[i].g;
+ points->blue[i] = rgb_regamma[i].b;
+ }
+ ret = true;
+ kvfree(rgb_regamma);
}
rgb_regamma_alloc_fail:
return ret;
@@ -1682,6 +1804,25 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
ret = true;
kvfree(rgb_degamma);
+ } else if (trans == TRANSFER_FUNCTION_HLG ||
+ trans == TRANSFER_FUNCTION_HLG12) {
+ rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_degamma),
+ GFP_KERNEL);
+ if (!rgb_degamma)
+ goto rgb_degamma_alloc_fail;
+
+ build_hlg_degamma(rgb_degamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ trans == TRANSFER_FUNCTION_HLG12 ? true:false);
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = rgb_degamma[i].r;
+ points->green[i] = rgb_degamma[i].g;
+ points->blue[i] = rgb_degamma[i].b;
+ }
+ ret = true;
+ kvfree(rgb_degamma);
}
points->end_exponent = 0;
points->x_point_at_y1_red = 1;
diff --git a/drivers/gpu/drm/amd/display/modules/color/luts_1d.h b/drivers/gpu/drm/amd/display/modules/color/luts_1d.h
new file mode 100644
index 000000000000..66b1fad572ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/color/luts_1d.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef LUTS_1D_H
+#define LUTS_1D_H
+
+#include "hw_shared.h"
+
+struct point_config {
+ uint32_t custom_float_x;
+ uint32_t custom_float_y;
+ uint32_t custom_float_slope;
+};
+
+struct lut_point {
+ uint32_t red;
+ uint32_t green;
+ uint32_t blue;
+ uint32_t delta_red;
+ uint32_t delta_green;
+ uint32_t delta_blue;
+};
+
+struct pwl_1dlut_parameter {
+ struct gamma_curve arr_curve_points[34];
+ struct point_config arr_points[2];
+ struct lut_point rgb_resulted[256];
+ uint32_t hw_points_num;
+};
+#endif // LUTS_1D_H
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index 710852ad03f3..3d4c1b1ab8c4 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -29,7 +29,7 @@
#include "core_types.h"
#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
-#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000001
+#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000000
#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
@@ -238,7 +238,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
for (int i = 0; i < core_stats->entry_id; i++) {
if (event_index < core_stats->event_index &&
i == events[event_index].entry_id) {
- DISPLAY_STATS("%s\n", events[event_index].event_string);
+ DISPLAY_STATS("==Event==%s\n", events[event_index].event_string);
event_index++;
} else if (time_index < core_stats->index &&
i == time[time_index].entry_id) {
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index 5eb895fd98bf..9cb9ceb4d74d 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -27,6 +27,7 @@
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00010000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00020000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00040000
+#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00080000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK 0xFFFF0000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT 16
@@ -34,6 +35,7 @@
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00000001
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00000002
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00000004
+#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00000008
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index b178176b72ac..265621d8945c 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -128,47 +128,57 @@ enum PP_FEATURE_MASK {
PP_OVERDRIVE_MASK = 0x4000,
PP_GFXOFF_MASK = 0x8000,
PP_ACG_MASK = 0x10000,
+ PP_STUTTER_MODE = 0x20000,
};
+/**
+ * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
+ */
struct amd_ip_funcs {
- /* Name of IP block */
+ /** @name: Name of IP block */
char *name;
- /* sets up early driver state (pre sw_init), does not configure hw - Optional */
+ /**
+ * @early_init:
+ *
+ * sets up early driver state (pre sw_init),
+ * does not configure hw - Optional
+ */
int (*early_init)(void *handle);
- /* sets up late driver/hw state (post hw_init) - Optional */
+ /** @late_init: sets up late driver/hw state (post hw_init) - Optional */
int (*late_init)(void *handle);
- /* sets up driver state, does not configure hw */
+ /** @sw_init: sets up driver state, does not configure hw */
int (*sw_init)(void *handle);
- /* tears down driver state, does not configure hw */
+ /** @sw_fini: tears down driver state, does not configure hw */
int (*sw_fini)(void *handle);
- /* sets up the hw state */
+ /** @hw_init: sets up the hw state */
int (*hw_init)(void *handle);
- /* tears down the hw state */
+ /** @hw_fini: tears down the hw state */
int (*hw_fini)(void *handle);
+ /** @late_fini: final cleanup */
void (*late_fini)(void *handle);
- /* handles IP specific hw/sw changes for suspend */
+ /** @suspend: handles IP specific hw/sw changes for suspend */
int (*suspend)(void *handle);
- /* handles IP specific hw/sw changes for resume */
+ /** @resume: handles IP specific hw/sw changes for resume */
int (*resume)(void *handle);
- /* returns current IP block idle status */
+ /** @is_idle: returns current IP block idle status */
bool (*is_idle)(void *handle);
- /* poll for idle */
+ /** @wait_for_idle: poll for idle */
int (*wait_for_idle)(void *handle);
- /* check soft reset the IP block */
+ /** @check_soft_reset: check soft reset the IP block */
bool (*check_soft_reset)(void *handle);
- /* pre soft reset the IP block */
+ /** @pre_soft_reset: pre soft reset the IP block */
int (*pre_soft_reset)(void *handle);
- /* soft reset the IP block */
+ /** @soft_reset: soft reset the IP block */
int (*soft_reset)(void *handle);
- /* post soft reset the IP block */
+ /** @post_soft_reset: post soft reset the IP block */
int (*post_soft_reset)(void *handle);
- /* enable/disable cg for the IP block */
+ /** @set_clockgating_state: enable/disable cg for the IP block */
int (*set_clockgating_state)(void *handle,
enum amd_clockgating_state state);
- /* enable/disable pg for the IP block */
+ /** @set_powergating_state: enable/disable pg for the IP block */
int (*set_powergating_state)(void *handle,
enum amd_powergating_state state);
- /* get current clockgating status */
+ /** @get_clockgating_state: get current clockgating status */
void (*get_clockgating_state)(void *handle, u32 *flags);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
index 18a32477ed1d..fe0cbaade3c3 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
@@ -89,6 +89,8 @@
#define mmUVD_JPEG_RB_SIZE_BASE_IDX 1
#define mmUVD_JPEG_ADDR_CONFIG 0x021f
#define mmUVD_JPEG_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_JPEG_PITCH 0x0222
+#define mmUVD_JPEG_PITCH_BASE_IDX 1
#define mmUVD_JPEG_GPCOM_CMD 0x022c
#define mmUVD_JPEG_GPCOM_CMD_BASE_IDX 1
#define mmUVD_JPEG_GPCOM_DATA0 0x022d
@@ -203,6 +205,8 @@
#define mmUVD_RB_WPTR4_BASE_IDX 1
#define mmUVD_JRBC_RB_RPTR 0x0457
#define mmUVD_JRBC_RB_RPTR_BASE_IDX 1
+#define mmUVD_LMI_JPEG_VMID 0x045d
+#define mmUVD_LMI_JPEG_VMID_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x045e
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x045f
@@ -231,6 +235,8 @@
#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_JRBC_IB_VMID 0x0507
#define mmUVD_LMI_JRBC_IB_VMID_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_VMID 0x0508
+#define mmUVD_LMI_JRBC_RB_VMID_BASE_IDX 1
#define mmUVD_JRBC_RB_WPTR 0x0509
#define mmUVD_JRBC_RB_WPTR_BASE_IDX 1
#define mmUVD_JRBC_RB_CNTL 0x050a
@@ -239,6 +245,20 @@
#define mmUVD_JRBC_IB_SIZE_BASE_IDX 1
#define mmUVD_JRBC_LMI_SWAP_CNTL 0x050d
#define mmUVD_JRBC_LMI_SWAP_CNTL_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x050e
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x050f
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW 0x0510
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH 0x0511
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_JRBC_RB_REF_DATA 0x0512
+#define mmUVD_JRBC_RB_REF_DATA_BASE_IDX 1
+#define mmUVD_JRBC_RB_COND_RD_TIMER 0x0513
+#define mmUVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 1
+#define mmUVD_JRBC_EXTERNAL_REG_BASE 0x0517
+#define mmUVD_JRBC_EXTERNAL_REG_BASE_BASE_IDX 1
#define mmUVD_JRBC_SOFT_RESET 0x0519
#define mmUVD_JRBC_SOFT_RESET_BASE_IDX 1
#define mmUVD_JRBC_STATUS 0x051a
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 092d800b703a..4bc118df3bc4 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1074,7 +1074,7 @@ struct atom_integrated_system_info_v1_11
uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def
uint16_t lvds_misc; // enum of atom_sys_info_lvds_misc_def
uint16_t backlight_pwm_hz; // pwm frequency in hz
- uint8_t memorytype; // enum of atom_sys_mem_type
+ uint8_t memorytype; // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
uint8_t umachannelnumber; // number of memory channels
uint8_t pwr_on_digon_to_de; /* all pwr sequence numbers below are in uint of 4ms */
uint8_t pwr_on_de_to_vary_bl;
@@ -1084,18 +1084,25 @@ struct atom_integrated_system_info_v1_11
uint8_t pwr_on_vary_bl_to_blon;
uint8_t pwr_down_bloff_to_vary_bloff;
uint8_t min_allowed_bl_level;
+ uint8_t htc_hyst_limit;
+ uint8_t htc_tmp_limit;
+ uint8_t reserved1;
+ uint8_t reserved2;
struct atom_external_display_connection_info extdispconninfo;
struct atom_14nm_dpphy_dvihdmi_tuningset dvi_tuningset;
struct atom_14nm_dpphy_dvihdmi_tuningset hdmi_tuningset;
struct atom_14nm_dpphy_dvihdmi_tuningset hdmi6g_tuningset;
- struct atom_14nm_dpphy_dp_tuningset dp_tuningset;
- struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;
+ struct atom_14nm_dpphy_dp_tuningset dp_tuningset; // rbr 1.62G dp tuning set
+ struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset; // HBR3 dp tuning set
struct atom_camera_data camera_info;
struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0
struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1
struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2
struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3
- uint32_t reserved[108];
+ struct atom_14nm_dpphy_dp_tuningset dp_hbr_tuningset; //hbr 2.7G dp tuning set
+ struct atom_14nm_dpphy_dp_tuningset dp_hbr2_tuningset; //hbr2 5.4G dp turnig set
+ struct atom_14nm_dpphy_dp_tuningset edp_tuningset; //edp tuning set
+ uint32_t reserved[66];
};
@@ -1433,7 +1440,10 @@ struct atom_smc_dpm_info_v4_1
uint8_t acggfxclkspreadpercent;
uint16_t acggfxclkspreadfreq;
- uint32_t boardreserved[10];
+ uint8_t Vr2_I2C_address;
+ uint8_t padding_vr2[3];
+
+ uint32_t boardreserved[9];
};
/*
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index 7852952d1fde..1d93a0c574c9 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -23,6 +23,8 @@
#ifndef _DM_PP_INTERFACE_
#define _DM_PP_INTERFACE_
+#include "dm_services_types.h"
+
#define PP_MAX_CLOCK_LEVELS 16
enum amd_pp_display_config_type{
@@ -189,39 +191,4 @@ struct pp_display_clock_request {
uint32_t clock_freq_in_khz;
};
-#define PP_MAX_WM_SETS 4
-
-enum pp_wm_set_id {
- DC_WM_SET_A = 0,
- DC_WM_SET_B,
- DC_WM_SET_C,
- DC_WM_SET_D,
- DC_WM_SET_INVALID = 0xffff,
-};
-
-struct pp_wm_set_with_dmif_clock_range_soc15 {
- enum pp_wm_set_id wm_set_id;
- uint32_t wm_min_dcefclk_in_khz;
- uint32_t wm_max_dcefclk_in_khz;
- uint32_t wm_min_memclk_in_khz;
- uint32_t wm_max_memclk_in_khz;
-};
-
-struct pp_wm_set_with_mcif_clock_range_soc15 {
- enum pp_wm_set_id wm_set_id;
- uint32_t wm_min_socclk_in_khz;
- uint32_t wm_max_socclk_in_khz;
- uint32_t wm_min_memclk_in_khz;
- uint32_t wm_max_memclk_in_khz;
-};
-
-struct pp_wm_sets_with_clock_ranges_soc15 {
- uint32_t num_wm_sets_dmif;
- uint32_t num_wm_sets_mcif;
- struct pp_wm_set_with_dmif_clock_range_soc15
- wm_sets_dmif[PP_MAX_WM_SETS];
- struct pp_wm_set_with_mcif_clock_range_soc15
- wm_sets_mcif[PP_MAX_WM_SETS];
-};
-
#endif /* _DM_PP_INTERFACE_ */
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h
new file mode 100644
index 000000000000..36306c57a2b4
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_GFX_9_0_H__
+#define __IRQSRCS_GFX_9_0_H__
+
+
+#define GFX_9_0__SRCID__CP_RB_INTERRUPT_PKT 176 /* B0 CP_INTERRUPT pkt in RB */
+#define GFX_9_0__SRCID__CP_IB1_INTERRUPT_PKT 177 /* B1 CP_INTERRUPT pkt in IB1 */
+#define GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT 178 /* B2 CP_INTERRUPT pkt in IB2 */
+#define GFX_9_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 /* B4 PM4 Pkt Rsvd Bits Error */
+#define GFX_9_0__SRCID__CP_EOP_INTERRUPT 181 /* B5 End-of-Pipe Interrupt */
+#define GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR 183 /* B7 Bad Opcode Error */
+#define GFX_9_0__SRCID__CP_PRIV_REG_FAULT 184 /* B8 Privileged Register Fault */
+#define GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT 185 /* B9 Privileged Instr Fault */
+#define GFX_9_0__SRCID__CP_WAIT_MEM_SEM_FAULT 186 /* BA Wait Memory Semaphore Fault (Synchronization Object Fault) */
+#define GFX_9_0__SRCID__CP_CTX_EMPTY_INTERRUPT 187 /* BB Context Empty Interrupt */
+#define GFX_9_0__SRCID__CP_CTX_BUSY_INTERRUPT 188 /* BC Context Busy Interrupt */
+#define GFX_9_0__SRCID__CP_ME_WAIT_REG_MEM_POLL_TIMEOUT 192 /* C0 CP.ME Wait_Reg_Mem Poll Timeout */
+#define GFX_9_0__SRCID__CP_SIG_INCOMPLETE 193 /* C1 "Surface Probe Fault Signal Incomplete" */
+#define GFX_9_0__SRCID__CP_PREEMPT_ACK 194 /* C2 Preemption Ack-wledge */
+#define GFX_9_0__SRCID__CP_GPF 195 /* C3 General Protection Fault (GPF) */
+#define GFX_9_0__SRCID__CP_GDS_ALLOC_ERROR 196 /* C4 GDS Alloc Error */
+#define GFX_9_0__SRCID__CP_ECC_ERROR 197 /* C5 ECC Error */
+#define GFX_9_0__SRCID__CP_COMPUTE_QUERY_STATUS 199 /* C7 Compute query status */
+#define GFX_9_0__SRCID__CP_VM_DOORBELL 200 /* C8 Unattached VM Doorbell Received */
+#define GFX_9_0__SRCID__CP_FUE_ERROR 201 /* C9 ECC FUE Error */
+#define GFX_9_0__SRCID__RLC_STRM_PERF_MONITOR_INTERRUPT 202 /* CA Streaming Perf Monitor Interrupt */
+#define GFX_9_0__SRCID__GRBM_RD_TIMEOUT_ERROR 232 /* E8 CRead timeout error */
+#define GFX_9_0__SRCID__GRBM_REG_GUI_IDLE 233 /* E9 Register GUI Idle */
+#define GFX_9_0__SRCID__SQ_INTERRUPT_ID 239 /* EF SQ Interrupt (ttrace wrap, errors) */
+
+#endif /* __IRQSRCS_GFX_9_0_H__ */
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
index c6b6f97de9de..aaed7f59e0e2 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
@@ -198,4 +198,102 @@
#define VISLANDS30_IV_SRCID_HPD_RX_F 42 // 0x2a
#define VISLANDS30_IV_EXTID_HPD_RX_F 11
+#define VISLANDS30_IV_SRCID_GPIO_19 0x00000053 /* 83 */
+
+#define VISLANDS30_IV_SRCID_SRBM_READ_TIMEOUT_ERR 0x00000060 /* 96 */
+#define VISLANDS30_IV_SRCID_SRBM_CTX_SWITCH 0x00000061 /* 97 */
+
+#define VISLANDS30_IV_SRBM_REG_ACCESS_ERROR 0x00000062 /* 98 */
+
+
+#define VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP 0x00000077 /* 119 */
+#define VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE 0x0000007c /* 124 */
+
+#define VISLANDS30_IV_SRCID_BIF_PF_VF_MSGBUF_VALID 0x00000087 /* 135 */
+
+#define VISLANDS30_IV_SRCID_BIF_VF_PF_MSGBUF_ACK 0x0000008a /* 138 */
+
+#define VISLANDS30_IV_SRCID_SYS_PAGE_INV_FAULT 0x0000008c /* 140 */
+#define VISLANDS30_IV_SRCID_SYS_MEM_PROT_FAULT 0x0000008d /* 141 */
+
+#define VISLANDS30_IV_SRCID_SEM_PAGE_INV_FAULT 0x00000090 /* 144 */
+#define VISLANDS30_IV_SRCID_SEM_MEM_PROT_FAULT 0x00000091 /* 145 */
+
+#define VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT 0x00000092 /* 146 */
+#define VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT 0x00000093 /* 147 */
+
+#define VISLANDS30_IV_SRCID_ACP 0x000000a2 /* 162 */
+
+#define VISLANDS30_IV_SRCID_VCE_TRAP 0x000000a7 /* 167 */
+#define VISLANDS30_IV_EXTID_VCE_TRAP_GENERAL_PURPOSE 0
+#define VISLANDS30_IV_EXTID_VCE_TRAP_LOW_LATENCY 1
+#define VISLANDS30_IV_EXTID_VCE_TRAP_REAL_TIME 2
+
+#define VISLANDS30_IV_SRCID_CP_INT_RB 0x000000b0 /* 176 */
+#define VISLANDS30_IV_SRCID_CP_INT_IB1 0x000000b1 /* 177 */
+#define VISLANDS30_IV_SRCID_CP_INT_IB2 0x000000b2 /* 178 */
+#define VISLANDS30_IV_SRCID_CP_PM4_RES_BITS_ERR 0x000000b4 /* 180 */
+#define VISLANDS30_IV_SRCID_CP_END_OF_PIPE 0x000000b5 /* 181 */
+#define VISLANDS30_IV_SRCID_CP_BAD_OPCODE 0x000000b7 /* 183 */
+#define VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT 0x000000b8 /* 184 */
+#define VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT 0x000000b9 /* 185 */
+#define VISLANDS30_IV_SRCID_CP_WAIT_MEM_SEM_FAULT 0x000000ba /* 186 */
+#define VISLANDS30_IV_SRCID_CP_GUI_IDLE 0x000000bb /* 187 */
+#define VISLANDS30_IV_SRCID_CP_GUI_BUSY 0x000000bc /* 188 */
+
+#define VISLANDS30_IV_SRCID_CP_COMPUTE_QUERY_STATUS 0x000000bf /* 191 */
+#define VISLANDS30_IV_SRCID_CP_ECC_ERROR 0x000000c5 /* 197 */
+
+#define CARRIZO_IV_SRCID_CP_COMPUTE_QUERY_STATUS 0x000000c7 /* 199 */
+
+#define VISLANDS30_IV_SRCID_CP_WAIT_REG_MEM_POLL_TIMEOUT 0x000000c0 /* 192 */
+#define VISLANDS30_IV_SRCID_CP_SEM_SIG_INCOMPL 0x000000c1 /* 193 */
+#define VISLANDS30_IV_SRCID_CP_PREEMPT_ACK 0x000000c2 /* 194 */
+#define VISLANDS30_IV_SRCID_CP_GENERAL_PROT_FAULT 0x000000c3 /* 195 */
+#define VISLANDS30_IV_SRCID_CP_GDS_ALLOC_ERROR 0x000000c4 /* 196 */
+#define VISLANDS30_IV_SRCID_CP_ECC_ERROR 0x000000c5 /* 197 */
+
+#define VISLANDS30_IV_SRCID_RLC_STRM_PERF_MONITOR 0x000000ca /* 202 */
+
+#define VISLANDS30_IV_SDMA_ATOMIC_SRC_ID 0x000000da /* 218 */
+
+#define VISLANDS30_IV_SRCID_SDMA_ECC_ERROR 0x000000dc /* 220 */
+
+#define VISLANDS30_IV_SRCID_SDMA_TRAP 0x000000e0 /* 224 */
+#define VISLANDS30_IV_SRCID_SDMA_SEM_INCOMPLETE 0x000000e1 /* 225 */
+#define VISLANDS30_IV_SRCID_SDMA_SEM_WAIT 0x000000e2 /* 226 */
+
+
+#define VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER 0x000000e5 /* 229 */
+
+#define VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH 0x000000e6 /* 230 */
+#define VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW 0x000000e7 /* 231 */
+
+#define VISLANDS30_IV_SRCID_GRBM_READ_TIMEOUT_ERR 0x000000e8 /* 232 */
+#define VISLANDS30_IV_SRCID_GRBM_REG_GUI_IDLE 0x000000e9 /* 233 */
+
+#define VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG 0x000000ef /* 239 */
+
+#define VISLANDS30_IV_SRCID_SDMA_PREEMPT 0x000000f0 /* 240 */
+#define VISLANDS30_IV_SRCID_SDMA_VM_HOLE 0x000000f2 /* 242 */
+#define VISLANDS30_IV_SRCID_SDMA_CTXEMPTY 0x000000f3 /* 243 */
+#define VISLANDS30_IV_SRCID_SDMA_DOORBELL_INVALID 0x000000f4 /* 244 */
+#define VISLANDS30_IV_SRCID_SDMA_FROZEN 0x000000f5 /* 245 */
+#define VISLANDS30_IV_SRCID_SDMA_POLL_TIMEOUT 0x000000f6 /* 246 */
+#define VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE 0x000000f7 /* 247 */
+
+#define VISLANDS30_IV_SRCID_CG_THERMAL_TRIG 0x000000f8 /* 248 */
+
+#define VISLANDS30_IV_SRCID_SMU_DISP_TIMER_TRIGGER 0x000000fd /* 253 */
+
+/* These are not "real" source ids defined by HW */
+#define VISLANDS30_IV_SRCID_VM_CONTEXT_ALL 0x00000100 /* 256 */
+#define VISLANDS30_IV_EXTID_VM_CONTEXT0_ALL 0
+#define VISLANDS30_IV_EXTID_VM_CONTEXT1_ALL 1
+
+
+/* IV Extended IDs */
+#define VISLANDS30_IV_EXTID_NONE 0x00000000
+#define VISLANDS30_IV_EXTID_INVALID 0xffffffff
+
#endif // _IVSRCID_VISLANDS30_H_
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h
new file mode 100644
index 000000000000..802413832fe8
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_SDMA0_4_0_H__
+#define __IRQSRCS_SDMA0_4_0_H__
+
+#define SDMA0_4_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 /* 0xD9 SDMA atomic*_rtn ops complete */
+#define SDMA0_4_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 /* 0xDA SDMA atomic CMPSWAP loop timeout */
+#define SDMA0_4_0__SRCID__SDMA_IB_PREEMPT 219 /* 0xDB sdma mid-command buffer preempt interrupt */
+#define SDMA0_4_0__SRCID__SDMA_ECC 220 /* 0xDC ECC Error */
+#define SDMA0_4_0__SRCID__SDMA_PAGE_FAULT 221 /* 0xDD Page Fault Error from UTCL2 when nack=3 */
+#define SDMA0_4_0__SRCID__SDMA_PAGE_NULL 222 /* 0xDE Page Null from UTCL2 when nack=2 */
+#define SDMA0_4_0__SRCID__SDMA_XNACK 223 /* 0xDF Page retry timeout after UTCL2 return nack=1 */
+#define SDMA0_4_0__SRCID__SDMA_TRAP 224 /* 0xE0 Trap */
+#define SDMA0_4_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 225 /* 0xE1 0xDAGPF (Sem incomplete timeout) */
+#define SDMA0_4_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 226 /* 0xE2 Semaphore wait fail timeout */
+#define SDMA0_4_0__SRCID__SDMA_SRAM_ECC 228 /* 0xE4 SRAM ECC Error */
+#define SDMA0_4_0__SRCID__SDMA_PREEMPT 240 /* 0xF0 SDMA New Run List */
+#define SDMA0_4_0__SRCID__SDMA_VM_HOLE 242 /* 0xF2 MC or SEM address in VM hole */
+#define SDMA0_4_0__SRCID__SDMA_CTXEMPTY 243 /* 0xF3 Context Empty */
+#define SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID 244 /* 0xF4 Doorbell BE invalid */
+#define SDMA0_4_0__SRCID__SDMA_FROZEN 245 /* 0xF5 SDMA Frozen */
+#define SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT 246 /* 0xF6 SRBM read poll timeout */
+#define SDMA0_4_0__SRCID__SDMA_SRBMWRITE 247 /* 0xF7 SRBM write Protection */
+
+#endif /* __IRQSRCS_SDMA_4_0_H__ */
+
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h
new file mode 100644
index 000000000000..d12a35619f9a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_SDMA1_4_0_H__
+#define __IRQSRCS_SDMA1_4_0_H__
+
+#define SDMA1_4_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 /* 0xD9 SDMA atomic*_rtn ops complete */
+#define SDMA1_4_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 /* 0xDA SDMA atomic CMPSWAP loop timeout */
+#define SDMA1_4_0__SRCID__SDMA_IB_PREEMPT 219 /* 0xDB sdma mid-command buffer preempt interrupt */
+#define SDMA1_4_0__SRCID__SDMA_ECC 220 /* 0xDC ECC Error */
+#define SDMA1_4_0__SRCID__SDMA_PAGE_FAULT 221 /* 0xDD Page Fault Error from UTCL2 when nack=3 */
+#define SDMA1_4_0__SRCID__SDMA_PAGE_NULL 222 /* 0xDE Page Null from UTCL2 when nack=2 */
+#define SDMA1_4_0__SRCID__SDMA_XNACK 223 /* 0xDF Page retry timeout after UTCL2 return nack=1 */
+#define SDMA1_4_0__SRCID__SDMA_TRAP 224 /* 0xE0 Trap */
+#define SDMA1_4_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 225 /* 0xE1 0xDAGPF (Sem incomplete timeout) */
+#define SDMA1_4_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 226 /* 0xE2 Semaphore wait fail timeout */
+#define SDMA1_4_0__SRCID__SDMA_SRAM_ECC 228 /* 0xE4 SRAM ECC Error */
+#define SDMA1_4_0__SRCID__SDMA_PREEMPT 240 /* 0xF0 SDMA New Run List */
+#define SDMA1_4_0__SRCID__SDMA_VM_HOLE 242 /* 0xF2 MC or SEM address in VM hole */
+#define SDMA1_4_0__SRCID__SDMA_CTXEMPTY 243 /* 0xF3 Context Empty */
+#define SDMA1_4_0__SRCID__SDMA_DOORBELL_INVALID 244 /* 0xF4 Doorbell BE invalid */
+#define SDMA1_4_0__SRCID__SDMA_FROZEN 245 /* 0xF5 SDMA Frozen */
+#define SDMA1_4_0__SRCID__SDMA_POLL_TIMEOUT 246 /* 0xF6 SRBM read poll timeout */
+#define SDMA1_4_0__SRCID__SDMA_SRBMWRITE 247 /* 0xF7 SRBM write Protection */
+
+#endif /* __IRQSRCS_SDMA1_4_0_H__ */
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h b/drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h
index 7a65206a6d21..02bab4673cd4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h
@@ -23,13 +23,10 @@
*
*/
-#ifndef __SOC_BOUNDING_BOX_H__
-#define __SOC_BOUNDING_BOX_H__
+#ifndef __IRQSRCS_SMUIO_9_0_H__
+#define __IRQSRCS_SMUIO_9_0_H__
-#include "dml_common_defs.h"
+#define SMUIO_9_0__SRCID__SMUIO_GPIO19 83 /* GPIO19 interrupt */
-void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box);
-voltage_scaling_st dml_socbb_voltage_scaling(const soc_bounding_box_st *box, enum voltage_state voltage);
-double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage);
+#endif /* __IRQSRCS_SMUIO_9_0_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h
new file mode 100644
index 000000000000..5218bc53fb2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_THM_9_0_H__
+#define __IRQSRCS_THM_9_0_H__
+
+#define THM_9_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
+#define THM_9_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
+
+#endif /* __IRQSRCS_THM_9_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h b/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h
new file mode 100644
index 000000000000..fb041aee6c66
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_UVD_7_0_H__
+#define __IRQSRCS_UVD_7_0_H__
+
+#define UVD_7_0__SRCID__UVD_ENC_GEN_PURP 119
+#define UVD_7_0__SRCID__UVD_ENC_LOW_LATENCY 120
+#define UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 /* UVD system message interrupt */
+
+#endif /* __IRQSRCS_UVD_7_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h
new file mode 100644
index 000000000000..3440bab565af
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_VCE_4_0_H__
+#define __IRQSRCS_VCE_4_0_H__
+
+#define VCE_4_0__CTXID__VCE_TRAP_GENERAL_PURPOSE 0
+#define VCE_4_0__CTXID__VCE_TRAP_LOW_LATENCY 1
+#define VCE_4_0__CTXID__VCE_TRAP_REAL_TIME 2
+
+#endif /* __IRQSRCS_VCE_4_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h
new file mode 100644
index 000000000000..e5951709bfc3
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_VCN_1_0_H__
+#define __IRQSRCS_VCN_1_0_H__
+
+#define VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE 119 /* 0x77 Encoder General Purpose */
+#define VCN_1_0__SRCID__UVD_ENC_LOW_LATENCY 120 /* 0x78 Encoder Low Latency */
+#define VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 /* 0x7c UVD system message interrupt */
+
+#endif /* __IRQSRCS_VCN_1_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h
new file mode 100644
index 000000000000..d130936c9989
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_VMC_1_0_H__
+#define __IRQSRCS_VMC_1_0_H__
+
+
+#define VMC_1_0__SRCID__VM_FAULT 0
+#define VMC_1_0__SRCID__VM_CONTEXT0_ALL 256
+#define VMC_1_0__SRCID__VM_CONTEXT1_ALL 257
+
+#define UTCL2_1_0__SRCID__FAULT 0 /* UTC L2 has encountered a fault or retry scenario */
+
+
+#endif /* __IRQSRCS_VMC_1_0_H__ */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 5733fbee07f7..14391b06080c 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -47,6 +47,17 @@ enum kfd_preempt_type {
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
};
+struct kfd_vm_fault_info {
+ uint64_t page_addr;
+ uint32_t vmid;
+ uint32_t mc_id;
+ uint32_t status;
+ bool prot_valid;
+ bool prot_read;
+ bool prot_write;
+ bool prot_exec;
+};
+
struct kfd_cu_info {
uint32_t num_shader_engines;
uint32_t num_shader_arrays_per_engine;
@@ -259,6 +270,21 @@ struct tile_config {
* IB to the corresponding ring (ring type). The IB is executed with the
* specified VMID in a user mode context.
*
+ * @get_vm_fault_info: Return information about a recent VM fault on
+ * GFXv7 and v8. If multiple VM faults occurred since the last call of
+ * this function, it will return information about the first of those
+ * faults. On GFXv9 VM fault information is fully contained in the IH
+ * packet and this function is not needed.
+ *
+ * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
+ * IH ring entry. This function allows the KFD ISR to get the VMID
+ * from the fault status register as early as possible.
+ *
+ * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
+ *
+ * @set_compute_idle: Indicates that compute is idle on a device. This
+ * can be used to change power profiles depending on compute activity.
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -374,6 +400,14 @@ struct kfd2kgd_calls {
int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
+
+ int (*get_vm_fault_info)(struct kgd_dev *kgd,
+ struct kfd_vm_fault_info *info);
+ uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
+
+ void (*gpu_recover)(struct kgd_dev *kgd);
+
+ void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
};
/**
@@ -399,6 +433,10 @@ struct kfd2kgd_calls {
* @schedule_evict_and_restore_process: Schedules work queue that will prepare
* for safe eviction of KFD BOs that belong to the specified process.
*
+ * @pre_reset: Notifies amdkfd that amdgpu about to reset the gpu
+ *
+ * @post_reset: Notify amdkfd that amgpu successfully reseted the gpu
+ *
* This structure contains function callback pointers so the kgd driver
* will notify to the amdkfd about certain status changes.
*
@@ -417,6 +455,8 @@ struct kgd2kfd_calls {
int (*resume_mm)(struct mm_struct *mm);
int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
struct dma_fence *fence);
+ int (*pre_reset)(struct kfd_dev *kfd);
+ int (*post_reset)(struct kfd_dev *kfd);
};
int kgd2kfd_init(unsigned interface_version,
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 06f08f34a110..6a41b81c7325 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -192,7 +192,6 @@ struct amd_pp_simple_clock_info;
struct amd_pp_display_configuration;
struct amd_pp_clock_info;
struct pp_display_clock_request;
-struct pp_wm_sets_with_clock_ranges_soc15;
struct pp_clock_levels_with_voltage;
struct pp_clock_levels_with_latency;
struct amd_pp_clocks;
@@ -232,16 +231,19 @@ struct amd_pm_funcs {
void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
/* export to amdgpu */
- void (*powergate_uvd)(void *handle, bool gate);
- void (*powergate_vce)(void *handle, bool gate);
struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
enum amd_pm_state_type *user_state);
int (*load_firmware)(void *handle);
int (*wait_for_fw_loading_complete)(void *handle);
+ int (*set_powergating_by_smu)(void *handle,
+ uint32_t block_type, bool gate);
int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
int (*set_power_limit)(void *handle, uint32_t n);
int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
+ int (*get_power_profile_mode)(void *handle, char *buf);
+ int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+ int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
/* export to DC */
u32 (*get_sclk)(void *handle, bool low);
u32 (*get_mclk)(void *handle, bool low);
@@ -261,15 +263,12 @@ struct amd_pm_funcs {
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
int (*set_watermarks_for_clocks_ranges)(void *handle,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ void *clock_ranges);
int (*display_clock_voltage_request)(void *handle,
struct pp_display_clock_request *clock);
int (*get_display_mode_validation_clocks)(void *handle,
struct amd_pp_simple_clock_info *clocks);
- int (*get_power_profile_mode)(void *handle, char *buf);
- int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
- int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
- int (*set_mmhub_powergating_by_smu)(void *handle);
+ int (*notify_smu_enable_pwe)(void *handle);
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index d567be49c31b..7a646f94b478 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -221,29 +221,7 @@ static int pp_sw_reset(void *handle)
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = handle;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- int ret;
-
- if (!hwmgr || !hwmgr->pm_en)
- return 0;
-
- if (hwmgr->hwmgr_func->gfx_off_control) {
- /* Enable/disable GFX off through SMU */
- ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
- state == AMD_PG_STATE_GATE);
- if (ret)
- pr_err("gfx off control failed!\n");
- }
-
- if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
- pr_debug("%s was not implemented.\n", __func__);
- return 0;
- }
-
- /* Enable/disable GFX per cu powergating through SMU */
- return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
- state == AMD_PG_STATE_GATE);
+ return 0;
}
static int pp_suspend(void *handle)
@@ -1020,7 +998,7 @@ static int pp_get_display_power_level(void *handle,
static int pp_get_current_clocks(void *handle,
struct amd_pp_clock_info *clocks)
{
- struct amd_pp_simple_clock_info simple_clocks;
+ struct amd_pp_simple_clock_info simple_clocks = { 0 };
struct pp_clock_info hw_clocks;
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
@@ -1056,7 +1034,10 @@ static int pp_get_current_clocks(void *handle,
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
- clocks->max_clocks_state = simple_clocks.level;
+ if (simple_clocks.level == 0)
+ clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
+ else
+ clocks->max_clocks_state = simple_clocks.level;
if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
@@ -1118,17 +1099,17 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
}
static int pp_set_watermarks_for_clocks_ranges(void *handle,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
+ if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
- wm_with_clock_ranges);
+ clock_ranges);
mutex_unlock(&hwmgr->smu_lock);
return ret;
@@ -1159,6 +1140,8 @@ static int pp_get_display_mode_validation_clocks(void *handle,
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
+ clocks->level = PP_DAL_POWERLEVEL_7;
+
mutex_lock(&hwmgr->smu_lock);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
@@ -1168,19 +1151,78 @@ static int pp_get_display_mode_validation_clocks(void *handle,
return ret;
}
-static int pp_set_mmhub_powergating_by_smu(void *handle)
+static int pp_dpm_powergate_mmhub(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
+ if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
+}
+
+static int pp_dpm_powergate_gfx(void *handle, bool gate)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return 0;
+
+ if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
+ return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
+}
+
+static int pp_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+{
+ int ret = 0;
+
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ case AMD_IP_BLOCK_TYPE_VCN:
+ pp_dpm_powergate_uvd(handle, gate);
+ break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ pp_dpm_powergate_vce(handle, gate);
+ break;
+ case AMD_IP_BLOCK_TYPE_GMC:
+ pp_dpm_powergate_mmhub(handle);
+ break;
+ case AMD_IP_BLOCK_TYPE_GFX:
+ ret = pp_dpm_powergate_gfx(handle, gate);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int pp_notify_smu_enable_pwe(void *handle)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
}
static const struct amd_pm_funcs pp_dpm_funcs = {
@@ -1189,8 +1231,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.force_performance_level = pp_dpm_force_performance_level,
.get_performance_level = pp_dpm_get_performance_level,
.get_current_power_state = pp_dpm_get_current_power_state,
- .powergate_vce = pp_dpm_powergate_vce,
- .powergate_uvd = pp_dpm_powergate_uvd,
.dispatch_tasks = pp_dpm_dispatch_tasks,
.set_fan_control_mode = pp_dpm_set_fan_control_mode,
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
@@ -1210,6 +1250,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_vce_clock_state = pp_dpm_get_vce_clock_state,
.switch_power_profile = pp_dpm_switch_power_profile,
.set_clockgating_by_smu = pp_set_clockgating_by_smu,
+ .set_powergating_by_smu = pp_set_powergating_by_smu,
.get_power_profile_mode = pp_get_power_profile_mode,
.set_power_profile_mode = pp_set_power_profile_mode,
.odn_edit_dpm_table = pp_odn_edit_dpm_table,
@@ -1227,5 +1268,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
.display_clock_voltage_request = pp_display_clock_voltage_request,
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
- .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
+ .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index a0bb921fac22..6ef3c875fedd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -75,10 +75,12 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = NULL;
int ret = -EINVAL;;
PHM_FUNC_CHECK(hwmgr);
+ adev = hwmgr->adev;
- if (smum_is_dpm_running(hwmgr)) {
+ if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) {
pr_info("dpm has been enabled\n");
return 0;
}
@@ -435,7 +437,7 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
PHM_FUNC_CHECK(hwmgr);
@@ -443,7 +445,7 @@ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
return -EINVAL;
return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
- wm_with_clock_ranges);
+ clock_ranges);
}
int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index e63bc47dc715..8994aa5c8cf8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -81,7 +81,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
return -EINVAL;
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
- hwmgr->power_source = PP_PowerSource_AC;
hwmgr->pp_table_version = PP_TABLE_V1;
hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
@@ -148,10 +147,10 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_AI:
- hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
case CHIP_VEGA20:
+ hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
break;
@@ -236,6 +235,11 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret)
goto err1;
+ /* make sure dc limits are valid */
+ if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ hwmgr->dyn_state.max_clock_voltage_on_dc =
+ hwmgr->dyn_state.max_clock_voltage_on_ac;
ret = psm_init_power_state_table(hwmgr);
if (ret)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 7047e29755c3..01dc46dc9c8a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1544,14 +1544,14 @@ void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
switch (hwmgr->chip_id) {
case CHIP_TONGA:
case CHIP_FIJI:
- *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4);
- *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4);
+ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
+ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
return;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
- *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100);
- *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100);
+ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
+ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
return;
default:
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 5325661fedff..d27c1c9df286 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
return 0;
}
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+ struct atom_firmware_info_v3_2 *fw_info)
+{
+ uint32_t frequency = 0;
+
+ boot_values->ulRevision = fw_info->firmware_revision;
+ boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
+ boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
+ boot_values->usVddc = fw_info->bootup_vddc_mv;
+ boot_values->usVddci = fw_info->bootup_vddci_mv;
+ boot_values->usMvddc = fw_info->bootup_mvddc_mv;
+ boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
+ boot_values->ucCoolingID = fw_info->coolingsolution_id;
+ boot_values->ulSocClk = 0;
+ boot_values->ulDCEFClk = 0;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
+ boot_values->ulSocClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
+ boot_values->ulDCEFClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
+ boot_values->ulEClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
+ boot_values->ulVClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
+ boot_values->ulDClk = frequency;
+}
+
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+ struct atom_firmware_info_v3_1 *fw_info)
+{
+ uint32_t frequency = 0;
+
+ boot_values->ulRevision = fw_info->firmware_revision;
+ boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
+ boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
+ boot_values->usVddc = fw_info->bootup_vddc_mv;
+ boot_values->usVddci = fw_info->bootup_vddci_mv;
+ boot_values->usMvddc = fw_info->bootup_mvddc_mv;
+ boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
+ boot_values->ucCoolingID = fw_info->coolingsolution_id;
+ boot_values->ulSocClk = 0;
+ boot_values->ulDCEFClk = 0;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
+ boot_values->ulSocClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
+ boot_values->ulDCEFClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
+ boot_values->ulEClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
+ boot_values->ulVClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
+ boot_values->ulDClk = frequency;
+}
+
int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values)
{
- struct atom_firmware_info_v3_1 *info = NULL;
+ struct atom_firmware_info_v3_2 *fwinfo_3_2;
+ struct atom_firmware_info_v3_1 *fwinfo_3_1;
+ struct atom_common_table_header *info = NULL;
uint16_t ix;
ix = GetIndexIntoMasterDataTable(firmwareinfo);
- info = (struct atom_firmware_info_v3_1 *)
+ info = (struct atom_common_table_header *)
smu_atom_get_data_table(hwmgr->adev,
ix, NULL, NULL, NULL);
@@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- boot_values->ulRevision = info->firmware_revision;
- boot_values->ulGfxClk = info->bootup_sclk_in10khz;
- boot_values->ulUClk = info->bootup_mclk_in10khz;
- boot_values->usVddc = info->bootup_vddc_mv;
- boot_values->usVddci = info->bootup_vddci_mv;
- boot_values->usMvddc = info->bootup_mvddc_mv;
- boot_values->usVddGfx = info->bootup_vddgfx_mv;
- boot_values->ucCoolingID = info->coolingsolution_id;
- boot_values->ulSocClk = 0;
- boot_values->ulDCEFClk = 0;
+ if ((info->format_revision == 3) && (info->content_revision == 2)) {
+ fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
+ pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
+ boot_values, fwinfo_3_2);
+ } else if ((info->format_revision == 3) && (info->content_revision == 1)) {
+ fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
+ pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
+ boot_values, fwinfo_3_1);
+ } else {
+ pr_info("Fw info table revision does not match!");
+ return -EINVAL;
+ }
return 0;
}
@@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
+ param->Vr2_I2C_address = info->Vr2_I2C_address;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index fe10aa4db5e6..22e21668c93a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values {
uint32_t ulUClk;
uint32_t ulSocClk;
uint32_t ulDCEFClk;
+ uint32_t ulEClk;
+ uint32_t ulVClk;
+ uint32_t ulDClk;
uint16_t usVddc;
uint16_t usVddci;
uint16_t usMvddc;
@@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
uint8_t acggfxclkspreadenabled;
uint8_t acggfxclkspreadpercent;
uint16_t acggfxclkspreadfreq;
+
+ uint8_t Vr2_I2C_address;
};
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 35bd9870ab10..4e1fd5393845 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -183,10 +183,10 @@ static int get_vddc_lookup_table(
ATOM_Tonga_Voltage_Lookup_Record,
entries, vddc_lookup_pp_tables, i);
record->us_calculated = 0;
- record->us_vdd = atom_record->usVdd;
- record->us_cac_low = atom_record->usCACLow;
- record->us_cac_mid = atom_record->usCACMid;
- record->us_cac_high = atom_record->usCACHigh;
+ record->us_vdd = le16_to_cpu(atom_record->usVdd);
+ record->us_cac_low = le16_to_cpu(atom_record->usCACLow);
+ record->us_cac_mid = le16_to_cpu(atom_record->usCACMid);
+ record->us_cac_high = le16_to_cpu(atom_record->usCACHigh);
}
*lookup_table = table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index d4bc83e81389..a63e00653324 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -993,7 +993,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = latency_required ?
smu10_get_mem_latency(hwmgr,
pclk_vol_table->entries[i].clk) :
@@ -1044,7 +1044,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
clocks->num_levels++;
}
@@ -1108,9 +1108,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
}
static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
struct smu10_hwmgr *data = hwmgr->backend;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
Watermarks_t *table = &(data->water_marks_table);
int result = 0;
@@ -1126,7 +1127,7 @@ static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
}
-static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
+static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
{
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
}
@@ -1182,10 +1183,11 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.asic_setup = smu10_setup_asic_task,
.power_state_set = smu10_set_power_state_tasks,
.dynamic_state_management_disable = smu10_disable_dpm_tasks,
- .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
+ .powergate_mmhub = smu10_powergate_mmhub,
.smus_notify_pwe = smu10_smus_notify_pwe,
.gfx_off_control = smu10_gfx_off_control,
.display_clock_voltage_request = smu10_display_clock_voltage_request,
+ .powergate_gfx = smu10_gfx_off_control,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 6d72a5600917..683b29a99366 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -39,13 +39,6 @@ static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
PPSMC_MSG_VCEDPM_Disable);
}
-static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr, enable ?
- PPSMC_MSG_SAMUDPM_Enable :
- PPSMC_MSG_SAMUDPM_Disable);
-}
-
static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
{
if (!bgate)
@@ -60,13 +53,6 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
}
-static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- if (!bgate)
- smum_update_smc_table(hwmgr, SMU_SAMU_TABLE);
- return smu7_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
@@ -107,35 +93,15 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SamuPowerGating))
- return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SAMPowerOFF);
- return 0;
-}
-
-static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SamuPowerGating))
- return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SAMPowerON);
- return 0;
-}
-
int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = false;
data->vce_power_gated = false;
- data->samu_power_gated = false;
smu7_powerup_uvd(hwmgr);
smu7_powerup_vce(hwmgr);
- smu7_powerup_samu(hwmgr);
return 0;
}
@@ -195,26 +161,6 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
}
}
-int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (data->samu_power_gated == bgate)
- return 0;
-
- data->samu_power_gated = bgate;
-
- if (bgate) {
- smu7_update_samu_dpm(hwmgr, true);
- smu7_powerdown_samu(hwmgr);
- } else {
- smu7_powerup_samu(hwmgr);
- smu7_update_samu_dpm(hwmgr, false);
- }
-
- return 0;
-}
-
int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id)
{
@@ -470,7 +416,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
* Powerplay will only control the static per CU Power Gating.
* Dynamic per CU Power Gating will be done in gfx.
*/
-int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
+int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
{
struct amdgpu_device *adev = hwmgr->adev;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index 1ddce023218a..fc8f8a6acc72 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -29,11 +29,10 @@
void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id);
-int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index f8e866ceda02..052e60dfaf9f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -48,6 +48,8 @@
#include "processpptables.h"
#include "pp_thermal.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
#define MC_CG_ARB_FREQ_F2 0x0c
@@ -885,6 +887,60 @@ static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
data->odn_dpm_table.max_vddc = max_vddc;
}
+static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i;
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+
+ if (table_info == NULL)
+ return;
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
+ data->dpm_table.sclk_table.dpm_levels[i].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ break;
+ }
+ }
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
+ data->dpm_table.mclk_table.dpm_levels[i].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ break;
+ }
+ }
+
+ dep_table = table_info->vdd_dep_on_mclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
+
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+ return;
+ }
+ }
+
+ dep_table = table_info->vdd_dep_on_sclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+ return;
+ }
+ }
+ if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+ data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+ }
+}
+
static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -904,10 +960,13 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
/* initialize ODN table */
if (hwmgr->od_enabled) {
- smu7_setup_voltage_range_from_vbios(hwmgr);
- smu7_odn_initial_default_setting(hwmgr);
+ if (data->odn_dpm_table.max_vddc) {
+ smu7_check_dpm_table_updated(hwmgr);
+ } else {
+ smu7_setup_voltage_range_from_vbios(hwmgr);
+ smu7_odn_initial_default_setting(hwmgr);
+ }
}
-
return 0;
}
@@ -1521,7 +1580,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->current_profile_setting.sclk_up_hyst = 0;
data->current_profile_setting.sclk_down_hyst = 100;
data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
- data->current_profile_setting.bupdate_sclk = 1;
+ data->current_profile_setting.bupdate_mclk = 1;
data->current_profile_setting.mclk_up_hyst = 0;
data->current_profile_setting.mclk_down_hyst = 100;
data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
@@ -2820,7 +2879,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *request_ps,
const struct pp_power_state *current_ps)
{
-
+ struct amdgpu_device *adev = hwmgr->adev;
struct smu7_power_state *smu7_ps =
cast_phw_smu7_power_state(&request_ps->hardware);
uint32_t sclk;
@@ -2843,12 +2902,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
"VI should always have 2 performance levels",
);
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+ max_limits = adev->pm.ac_power ?
&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
&(hwmgr->dyn_state.max_clock_voltage_on_dc);
/* Cap clock DPM tables at DC MAX if it is in DC. */
- if (PP_PowerSource_DC == hwmgr->power_source) {
+ if (!adev->pm.ac_power) {
for (i = 0; i < smu7_ps->performance_level_count; i++) {
if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
@@ -3126,7 +3185,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
state_entry->ucPCIEGenLow);
performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
+ state_entry->ucPCIELaneLow);
performance_level = &(smu7_power_state->performance_levels
[smu7_power_state->performance_level_count++]);
@@ -3717,8 +3776,9 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < dpm_table->count; i++) {
- if ((dpm_table->dpm_levels[i].value < low_limit)
- || (dpm_table->dpm_levels[i].value > high_limit))
+ /*skip the trim if od is enabled*/
+ if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
+ || dpm_table->dpm_levels[i].value > high_limit))
dpm_table->dpm_levels[i].enabled = false;
else
dpm_table->dpm_levels[i].enabled = true;
@@ -3762,10 +3822,8 @@ static int smu7_generate_dpm_level_enable_mask(
const struct smu7_power_state *smu7_ps =
cast_const_phw_smu7_power_state(states->pnew_state);
- /*skip the trim if od is enabled*/
- if (!hwmgr->od_enabled)
- result = smu7_trim_dpm_states(hwmgr, smu7_ps);
+ result = smu7_trim_dpm_states(hwmgr, smu7_ps);
if (result)
return result;
@@ -4049,17 +4107,17 @@ static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
AMDGPU_IH_CLIENTID_LEGACY,
- 230,
+ VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
source);
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
AMDGPU_IH_CLIENTID_LEGACY,
- 231,
+ VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
source);
/* Register CTF(GPIO_19) interrupt */
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
AMDGPU_IH_CLIENTID_LEGACY,
- 83,
+ VISLANDS30_IV_SRCID_GPIO_19,
source);
return 0;
@@ -4244,7 +4302,6 @@ static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
data->uvd_power_gated = false;
data->vce_power_gated = false;
- data->samu_power_gated = false;
return 0;
}
@@ -4555,12 +4612,12 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
return -EINVAL;
dep_sclk_table = table_info->vdd_dep_on_sclk;
for (i = 0; i < dep_sclk_table->count; i++)
- clocks->clock[i] = dep_sclk_table->entries[i].clk;
+ clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
clocks->count = dep_sclk_table->count;
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
for (i = 0; i < sclk_table->count; i++)
- clocks->clock[i] = sclk_table->entries[i].clk;
+ clocks->clock[i] = sclk_table->entries[i].clk * 10;
clocks->count = sclk_table->count;
}
@@ -4592,7 +4649,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
return -EINVAL;
dep_mclk_table = table_info->vdd_dep_on_mclk;
for (i = 0; i < dep_mclk_table->count; i++) {
- clocks->clock[i] = dep_mclk_table->entries[i].clk;
+ clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
clocks->latency[i] = smu7_get_mem_latency(hwmgr,
dep_mclk_table->entries[i].clk);
}
@@ -4600,7 +4657,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
for (i = 0; i < mclk_table->count; i++)
- clocks->clock[i] = mclk_table->entries[i].clk;
+ clocks->clock[i] = mclk_table->entries[i].clk * 10;
clocks->count = mclk_table->count;
}
return 0;
@@ -4739,60 +4796,6 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
return true;
}
-static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i;
-
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
- struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
-
- if (table_info == NULL)
- return;
-
- for (i=0; i<data->dpm_table.sclk_table.count; i++) {
- if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
- data->dpm_table.sclk_table.dpm_levels[i].value) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- break;
- }
- }
-
- for (i=0; i<data->dpm_table.mclk_table.count; i++) {
- if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
- data->dpm_table.mclk_table.dpm_levels[i].value) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- break;
- }
- }
-
- dep_table = table_info->vdd_dep_on_mclk;
- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
-
- for (i=0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
- return;
- }
- }
-
- dep_table = table_info->vdd_dep_on_sclk;
- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
- for (i=0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
- return;
- }
- }
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
- data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
- }
-}
-
static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
@@ -5043,7 +5046,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_fan_control_mode = smu7_get_fan_control_mode,
.force_clock_level = smu7_force_clock_level,
.print_clock_levels = smu7_print_clock_levels,
- .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
+ .powergate_gfx = smu7_powergate_gfx,
.get_sclk_od = smu7_get_sclk_od,
.set_sclk_od = smu7_set_sclk_od,
.get_mclk_od = smu7_get_mclk_od,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index c91e75db6a8e..3784ce6e50ab 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -310,7 +310,6 @@ struct smu7_hwmgr {
/* ---- Power Gating States ---- */
bool uvd_power_gated;
bool vce_power_gated;
- bool samu_power_gated;
bool need_long_memory_training;
/* Application power optimization parameters */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index c952845833d7..5e19f5977eb1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
{ ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 50690c72b2ea..0adfc5392cd3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -244,6 +244,7 @@ static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
return 0;
}
+/* convert form 8bit vid to real voltage in mV*4 */
static uint32_t smu8_convert_8Bit_index_to_voltage(
struct pp_hwmgr *hwmgr, uint16_t voltage)
{
@@ -1604,17 +1605,17 @@ static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
switch (type) {
case amd_pp_disp_clock:
for (i = 0; i < clocks->count; i++)
- clocks->clock[i] = data->sys_info.display_clock[i];
+ clocks->clock[i] = data->sys_info.display_clock[i] * 10;
break;
case amd_pp_sys_clock:
table = hwmgr->dyn_state.vddc_dependency_on_sclk;
for (i = 0; i < clocks->count; i++)
- clocks->clock[i] = table->entries[i].clk;
+ clocks->clock[i] = table->entries[i].clk * 10;
break;
case amd_pp_mem_clock:
clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
for (i = 0; i < clocks->count; i++)
- clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
+ clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
break;
default:
return -1;
@@ -1702,13 +1703,13 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
case AMDGPU_PP_SENSOR_VDDNB:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
- vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
+ vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
*((uint32_t *)value) = vddnb;
return 0;
case AMDGPU_PP_SENSOR_VDDGFX:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
- vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+ vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
*((uint32_t *)value) = vddgfx;
return 0;
case AMDGPU_PP_SENSOR_UVD_VCLK:
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 93a3d022ba47..2aab1b475945 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -25,6 +25,9 @@
#include "ppatomctrl.h"
#include "ppsmc.h"
#include "atom.h"
+#include "ivsrcid/thm/irqsrcs_thm_9_0.h"
+#include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
uint8_t convert_to_vid(uint16_t vddc)
{
@@ -543,17 +546,17 @@ int phm_irq_process(struct amdgpu_device *adev,
uint32_t src_id = entry->src_id;
if (client_id == AMDGPU_IH_CLIENTID_LEGACY) {
- if (src_id == 230)
+ if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
PCI_FUNC(adev->pdev->devfn));
- else if (src_id == 231)
+ else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
PCI_FUNC(adev->pdev->devfn));
- else if (src_id == 83)
+ else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
@@ -594,17 +597,17 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
SOC15_IH_CLIENTID_THM,
- 0,
+ THM_9_0__SRCID__THM_DIG_THERM_L2H,
source);
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
SOC15_IH_CLIENTID_THM,
- 1,
+ THM_9_0__SRCID__THM_DIG_THERM_H2L,
source);
/* Register CTF(GPIO_19) interrupt */
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
SOC15_IH_CLIENTID_ROM_SMUIO,
- 83,
+ SMUIO_9_0__SRCID__SMUIO_GPIO19,
source);
return 0;
@@ -652,7 +655,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
}
int smu_set_watermarks_for_clocks_ranges(void *wt_table,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
uint32_t i;
struct watermarks *table = wt_table;
@@ -660,49 +663,49 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
if (!table || !wm_with_clock_ranges)
return -EINVAL;
- if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4)
+ if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
return -EINVAL;
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
+ for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
table->WatermarkRow[1][i].MinClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
+ 1000);
table->WatermarkRow[1][i].MaxClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
100);
table->WatermarkRow[1][i].MinUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[1][i].MaxUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[1][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
+ wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
}
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
+ for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
table->WatermarkRow[0][i].MinClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].MaxClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].MinUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].MaxUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
+ wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
index 916cc01e7652..5454289d5226 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -107,7 +107,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table);
int smu_set_watermarks_for_clocks_ranges(void *wt_table,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 05e680d55dbb..fb86c24394ff 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -55,12 +55,6 @@
static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
-#define MEM_FREQ_LOW_LATENCY 25000
-#define MEM_FREQ_HIGH_LATENCY 80000
-#define MEM_LATENCY_HIGH 245
-#define MEM_LATENCY_LOW 35
-#define MEM_LATENCY_ERR 0xFFFF
-
#define mmDF_CS_AON0_DramBaseAddress0 0x0044
#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
@@ -295,7 +289,15 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
+ struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
uint32_t i;
+ int result;
+
+ result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
+ if (!result) {
+ data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
+ data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
+ }
od_lookup_table = &odn_table->vddc_lookup_table;
vddc_lookup_table = table_info->vddc_lookup_table;
@@ -2078,9 +2080,6 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_AVFS].supported) {
result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
if (!result) {
- data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
- data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
-
pp_table->MinVoltageVid = (uint8_t)
convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
pp_table->MaxVoltageVid = (uint8_t)
@@ -2414,6 +2413,40 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
return result;
}
+static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct phm_ppt_v2_information *table_info = hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+ uint32_t i;
+
+ dep_table = table_info->vdd_dep_on_mclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
+
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+ return;
+ }
+ }
+
+ dep_table = table_info->vdd_dep_on_sclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+ return;
+ }
+ }
+
+ if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+ data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+ }
+}
+
/**
* Initializes the SMC table and uploads it
*
@@ -2430,6 +2463,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct pp_atomfwctrl_voltage_table voltage_table;
struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
+ struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
result = vega10_setup_default_dpm_tables(hwmgr);
PP_ASSERT_WITH_CODE(!result,
@@ -2437,8 +2471,14 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
return result);
/* initialize ODN table */
- if (hwmgr->od_enabled)
- vega10_odn_initial_default_setting(hwmgr);
+ if (hwmgr->od_enabled) {
+ if (odn_table->max_vddc) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+ vega10_check_dpm_table_updated(hwmgr);
+ } else {
+ vega10_odn_initial_default_setting(hwmgr);
+ }
+ }
pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
VOLTAGE_OBJ_SVID2, &voltage_table);
@@ -2861,11 +2901,6 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
vega10_enable_disable_PCC_limit_feature(hwmgr, true);
- if ((hwmgr->smu_version == 0x001c2c00) ||
- (hwmgr->smu_version == 0x001c2d00))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
-
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
@@ -3061,6 +3096,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *request_ps,
const struct pp_power_state *current_ps)
{
+ struct amdgpu_device *adev = hwmgr->adev;
struct vega10_power_state *vega10_ps =
cast_phw_vega10_power_state(&request_ps->hardware);
uint32_t sclk;
@@ -3086,12 +3122,12 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
if (vega10_ps->performance_level_count != 2)
pr_info("VI should always have 2 performance levels");
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+ max_limits = adev->pm.ac_power ?
&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
&(hwmgr->dyn_state.max_clock_voltage_on_dc);
/* Cap clock DPM tables at DC MAX if it is in DC. */
- if (PP_PowerSource_DC == hwmgr->power_source) {
+ if (!adev->pm.ac_power) {
for (i = 0; i < vega10_ps->performance_level_count; i++) {
if (vega10_ps->performance_levels[i].mem_clock >
max_limits->mclk)
@@ -3181,7 +3217,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
/* Find the lowest MCLK frequency that is within
* the tolerable latency defined in DAL
*/
- latency = 0;
+ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
for (i = 0; i < data->mclk_latency_table.count; i++) {
if ((data->mclk_latency_table.entries[i].latency <= latency) &&
(data->mclk_latency_table.entries[i].frequency >=
@@ -3223,10 +3259,25 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
{
int result = 0;
struct vega10_hwmgr *data = hwmgr->backend;
+ struct vega10_dpm_table *dpm_table = &data->dpm_table;
+ struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
+ struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
+ int count;
if (!data->need_update_dpm_table)
return 0;
+ if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+ for (count = 0; count < dpm_table->gfx_table.count; count++)
+ dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
+ }
+
+ odn_clk_table = &odn_table->vdd_dep_on_mclk;
+ if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+ for (count = 0; count < dpm_table->mem_table.count; count++)
+ dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
+ }
+
if (data->need_update_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
result = vega10_populate_all_graphic_levels(hwmgr);
@@ -3674,7 +3725,7 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 0 : 1);
+ has_disp ? 1 : 0);
}
int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
@@ -3749,7 +3800,9 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
uint32_t i;
struct pp_display_clock_request clock_req;
- if (hwmgr->display_config->num_display > 1)
+ if ((hwmgr->display_config->num_display > 1) &&
+ !hwmgr->display_config->multi_monitor_in_sync &&
+ !hwmgr->display_config->nb_pstate_switch_disable)
vega10_notify_smc_display_change(hwmgr, false);
else
vega10_notify_smc_display_change(hwmgr, true);
@@ -3765,7 +3818,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
if (i < dpm_table->count) {
clock_req.clock_type = amd_pp_dcef_clock;
- clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
+ clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
@@ -4022,28 +4075,17 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
table_info->vdd_dep_on_sclk;
uint32_t i;
+ clocks->num_levels = 0;
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].clk) {
clocks->data[clocks->num_levels].clocks_in_khz =
- dep_table->entries[i].clk;
+ dep_table->entries[i].clk * 10;
clocks->num_levels++;
}
}
}
-static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
- uint32_t clock)
-{
- if (clock >= MEM_FREQ_LOW_LATENCY &&
- clock < MEM_FREQ_HIGH_LATENCY)
- return MEM_LATENCY_HIGH;
- else if (clock >= MEM_FREQ_HIGH_LATENCY)
- return MEM_LATENCY_LOW;
- else
- return MEM_LATENCY_ERR;
-}
-
static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
{
@@ -4052,26 +4094,22 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
table_info->vdd_dep_on_mclk;
struct vega10_hwmgr *data = hwmgr->backend;
+ uint32_t j = 0;
uint32_t i;
- clocks->num_levels = 0;
- data->mclk_latency_table.count = 0;
-
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].clk) {
- clocks->data[clocks->num_levels].clocks_in_khz =
- data->mclk_latency_table.entries
- [data->mclk_latency_table.count].frequency =
- dep_table->entries[i].clk;
- clocks->data[clocks->num_levels].latency_in_us =
- data->mclk_latency_table.entries
- [data->mclk_latency_table.count].latency =
- vega10_get_mem_latency(hwmgr,
- dep_table->entries[i].clk);
- clocks->num_levels++;
- data->mclk_latency_table.count++;
+
+ clocks->data[j].clocks_in_khz =
+ dep_table->entries[i].clk * 10;
+ data->mclk_latency_table.entries[j].frequency =
+ dep_table->entries[i].clk;
+ clocks->data[j].latency_in_us =
+ data->mclk_latency_table.entries[j].latency = 25;
+ j++;
}
}
+ clocks->num_levels = data->mclk_latency_table.count = j;
}
static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
@@ -4084,7 +4122,7 @@ static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < dep_table->count; i++) {
- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = 0;
clocks->num_levels++;
}
@@ -4100,7 +4138,7 @@ static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < dep_table->count; i++) {
- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = 0;
clocks->num_levels++;
}
@@ -4160,7 +4198,7 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
for (i = 0; i < dep_table->count; i++) {
- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
entries[dep_table->entries[i].vddInd].us_vdd);
clocks->num_levels++;
@@ -4173,9 +4211,10 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_range)
{
struct vega10_hwmgr *data = hwmgr->backend;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
int result = 0;
@@ -4695,40 +4734,6 @@ static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
return true;
}
-static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
-{
- struct vega10_hwmgr *data = hwmgr->backend;
- struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
- struct phm_ppt_v2_information *table_info = hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
- struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
- uint32_t i;
-
- dep_table = table_info->vdd_dep_on_mclk;
- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
-
- for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
- return;
- }
- }
-
- dep_table = table_info->vdd_dep_on_sclk;
- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
- for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
- return;
- }
- }
-
- if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
- data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
- }
-}
-
static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index aadd6cbc7e85..339820da9e6a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -370,7 +370,6 @@ struct vega10_hwmgr {
/* ---- Power Gating States ---- */
bool uvd_power_gated;
bool vce_power_gated;
- bool samu_power_gated;
bool need_long_memory_training;
/* Internal settings to apply the application power optimization parameters */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 782e2098824d..0789d64246ca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disallowed_features = 0x0;
data->registry_data.od_state_in_dc_support = 0;
+ data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
data->registry_data.log_avfs_param = 0;
@@ -422,6 +423,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
+ if (hwmgr->feature_mask & PP_GFXOFF_MASK)
+ data->gfxoff_controlled_by_driver = true;
+ else
+ data->gfxoff_controlled_by_driver = false;
+
return result;
}
@@ -453,43 +459,36 @@ static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
*/
static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
{
- dpm_state->soft_min_level = 0xff;
- dpm_state->soft_max_level = 0xff;
- dpm_state->hard_min_level = 0xff;
- dpm_state->hard_max_level = 0xff;
+ dpm_state->soft_min_level = 0x0;
+ dpm_state->soft_max_level = 0xffff;
+ dpm_state->hard_min_level = 0x0;
+ dpm_state->hard_max_level = 0xffff;
}
-static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr,
- PPCLK_e clkID, uint32_t *num_dpm_level)
+static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
+ PPCLK_e clk_id, uint32_t *num_of_levels)
{
- int result;
- /*
- * SMU expects the Clock ID to be in the top 16 bits.
- * Lower 16 bits specify the level however 0xFF is a
- * special argument the returns the total number of levels
- */
- PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | 0xFF)) == 0,
- "[GetNumberDpmLevel] Failed to get DPM levels from SMU for CLKID!",
- return -EINVAL);
-
- result = vega12_read_arg_from_smc(hwmgr, num_dpm_level);
+ int ret = 0;
- PP_ASSERT_WITH_CODE(*num_dpm_level < MAX_REGULAR_DPM_NUMBER,
- "[GetNumberDPMLevel] Number of DPM levels is greater than limit",
- return -EINVAL);
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | 0xFF));
+ PP_ASSERT_WITH_CODE(!ret,
+ "[GetNumOfDpmLevel] failed to get dpm levels!",
+ return ret);
- PP_ASSERT_WITH_CODE(*num_dpm_level != 0,
- "[GetNumberDPMLevel] Number of CLK Levels is zero!",
- return -EINVAL);
+ *num_of_levels = smum_get_argument(hwmgr);
+ PP_ASSERT_WITH_CODE(*num_of_levels > 0,
+ "[GetNumOfDpmLevel] number of clk levels is invalid!",
+ return -EINVAL);
- return result;
+ return ret;
}
static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
PPCLK_e clkID, uint32_t index, uint32_t *clock)
{
- int result;
+ int result = 0;
/*
*SMU expects the Clock ID to be in the top 16 bits.
@@ -500,15 +499,36 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
"[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
return -EINVAL);
- result = vega12_read_arg_from_smc(hwmgr, clock);
-
- PP_ASSERT_WITH_CODE(*clock != 0,
- "[GetDPMFrequencyByIndex] Failed to get dpm frequency by index.!",
- return -EINVAL);
+ *clock = smum_get_argument(hwmgr);
return result;
}
+static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
+ struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
+{
+ int ret = 0;
+ uint32_t i, num_of_levels, clk;
+
+ ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupSingleDpmTable] failed to get clk levels!",
+ return ret);
+
+ dpm_table->count = num_of_levels;
+
+ for (i = 0; i < num_of_levels; i++) {
+ ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupSingleDpmTable] failed to get clk of specific level!",
+ return ret);
+ dpm_table->dpm_levels[i].value = clk;
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ return ret;
+}
+
/*
* This function is to initialize all DPM state tables
* for SMU based on the dependency table.
@@ -519,224 +539,136 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
*/
static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
- uint32_t num_levels, i, clock;
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
-
struct vega12_single_dpm_table *dpm_table;
+ int ret = 0;
memset(&data->dpm_table, 0, sizeof(data->dpm_table));
- /* Initialize Sclk DPM and SOC DPM table based on allow Sclk values */
+ /* socclk */
dpm_table = &(data->dpm_table.soc_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_SOCCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_SOCCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* gfxclk */
dpm_table = &(data->dpm_table.gfx_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_GFXCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_GFXCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
- /* Initialize Mclk DPM table based on allow Mclk values */
- dpm_table = &(data->dpm_table.mem_table);
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_UCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_UCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ /* memclk */
+ dpm_table = &(data->dpm_table.mem_table);
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* eclk */
dpm_table = &(data->dpm_table.eclk_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_ECLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_ECLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* vclk */
dpm_table = &(data->dpm_table.vclk_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_VCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_VCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* dclk */
dpm_table = &(data->dpm_table.dclk_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_DCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_DCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
- /* Assume there is no headless Vega12 for now */
+ /* dcefclk */
dpm_table = &(data->dpm_table.dcef_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_DCEFCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_DCEFCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* pixclk */
dpm_table = &(data->dpm_table.pixel_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_PIXCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_PIXCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
- }
-
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
+ return ret);
+ } else
+ dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* dispclk */
dpm_table = &(data->dpm_table.display_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_DISPCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_DISPCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
- }
-
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
+ return ret);
+ } else
+ dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* phyclk */
dpm_table = &(data->dpm_table.phy_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_PHYCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_PHYCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
- }
-
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
+ return ret);
+ } else
+ dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* save a copy of the default DPM table */
@@ -803,6 +735,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
+ data->vbios_boot_state.eclock = boot_up_values.ulEClk;
+ data->vbios_boot_state.dclock = boot_up_values.ulDClk;
+ data->vbios_boot_state.vclock = boot_up_values.ulVClk;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
@@ -844,6 +779,21 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
return 0;
}
+static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ data->uvd_power_gated = true;
+ data->vce_power_gated = true;
+
+ if (data->smu_features[GNLD_DPM_UVD].enabled)
+ data->uvd_power_gated = false;
+
+ if (data->smu_features[GNLD_DPM_VCE].enabled)
+ data->vce_power_gated = false;
+}
+
static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
@@ -862,12 +812,11 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
data->smu_features[i].enabled = enabled;
data->smu_features[i].supported = enabled;
- PP_ASSERT(
- !data->smu_features[i].allowed || enabled,
- "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
}
}
+ vega12_init_powergate_state(hwmgr);
+
return 0;
}
@@ -923,6 +872,48 @@ static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
return result;
}
+static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
+ PPCLK_e clkid, struct vega12_clock_range *clock)
+{
+ /* AC Max */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get max ac clock from SMC!",
+ return -EINVAL);
+ clock->ACMax = smum_get_argument(hwmgr);
+
+ /* AC Min */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get min ac clock from SMC!",
+ return -EINVAL);
+ clock->ACMin = smum_get_argument(hwmgr);
+
+ /* DC Max */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get max dc clock from SMC!",
+ return -EINVAL);
+ clock->DCMax = smum_get_argument(hwmgr);
+
+ return 0;
+}
+
+static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t i;
+
+ for (i = 0; i < PPCLK_COUNT; i++)
+ PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
+ i, &(data->clk_range[i])),
+ "Failed to get clk range from SMC!",
+ return -EINVAL);
+
+ return 0;
+}
+
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
@@ -950,6 +941,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to power control set level!",
result = tmp_result);
+ result = vega12_get_all_clock_ranges(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to get all clock ranges!",
+ return result);
+
result = vega12_odn_initialize_default_settings(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to power control set level!",
@@ -978,76 +974,172 @@ static uint32_t vega12_find_lowest_dpm_level(
break;
}
+ if (i >= table->count) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
return i;
}
static uint32_t vega12_find_highest_dpm_level(
struct vega12_single_dpm_table *table)
{
- uint32_t i = 0;
+ int32_t i = 0;
+ PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
+ "[FindHighestDPMLevel] DPM Table has too many entries!",
+ return MAX_REGULAR_DPM_NUMBER - 1);
- if (table->count <= MAX_REGULAR_DPM_NUMBER) {
- for (i = table->count; i > 0; i--) {
- if (table->dpm_levels[i - 1].enabled)
- return i - 1;
- }
- } else {
- pr_info("DPM Table Has Too Many Entries!");
- return MAX_REGULAR_DPM_NUMBER - 1;
+ for (i = table->count - 1; i >= 0; i--) {
+ if (table->dpm_levels[i].enabled)
+ break;
}
- return i;
+ if (i < 0) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return (uint32_t)i;
}
static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = hwmgr->backend;
- if (data->smc_state_table.gfx_boot_level !=
- data->dpm_table.gfx_table.dpm_state.soft_min_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMinByFreq,
- PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value);
- data->dpm_table.gfx_table.dpm_state.soft_min_level =
- data->smc_state_table.gfx_boot_level;
+ uint32_t min_freq;
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min gfxclk !",
+ return ret);
}
- if (data->smc_state_table.mem_boot_level !=
- data->dpm_table.mem_table.dpm_state.soft_min_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMinByFreq,
- PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value);
- data->dpm_table.mem_table.dpm_state.soft_min_level =
- data->smc_state_table.mem_boot_level;
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min memclk !",
+ return ret);
+
+ min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set hard min memclk !",
+ return ret);
}
- return 0;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min vclk!",
+ return ret);
+
+ min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min dclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min eclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min socclk!",
+ return ret);
+ }
+
+ return ret;
}
static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = hwmgr->backend;
- if (data->smc_state_table.gfx_max_level !=
- data->dpm_table.gfx_table.dpm_state.soft_max_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMaxByFreq,
- /* plus the vale by 1 to align the resolution */
- PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1));
- data->dpm_table.gfx_table.dpm_state.soft_max_level =
- data->smc_state_table.gfx_max_level;
+ uint32_t max_freq;
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max gfxclk!",
+ return ret);
}
- if (data->smc_state_table.mem_max_level !=
- data->dpm_table.mem_table.dpm_state.soft_max_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMaxByFreq,
- /* plus the vale by 1 to align the resolution */
- PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1));
- data->dpm_table.mem_table.dpm_state.soft_max_level =
- data->smc_state_table.mem_max_level;
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max memclk!",
+ return ret);
}
- return 0;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max vclk!",
+ return ret);
+
+ max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max dclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max eclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max socclk!",
+ return ret);
+ }
+
+ return ret;
}
int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
@@ -1123,7 +1215,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
"Failed to get current package power!",
return -EINVAL);
- vega12_read_arg_from_smc(hwmgr, &value);
+ value = smum_get_argument(hwmgr);
/* power value is an integer */
*query = value << 8;
#endif
@@ -1136,14 +1228,11 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
*gfx_freq = 0;
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(
- vega12_read_arg_from_smc(hwmgr, &gfx_clk) == 0,
- "[GetCurrentGfxClkFreq] Attempt to read arg from SMC Failed",
- return -1);
+ return -EINVAL);
+ gfx_clk = smum_get_argument(hwmgr);
*gfx_freq = gfx_clk * 100;
@@ -1159,11 +1248,8 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
"[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(
- vega12_read_arg_from_smc(hwmgr, &mem_clk) == 0,
- "[GetCurrentMClkFreq] Attempt to read arg from SMC Failed",
- return -1);
+ return -EINVAL);
+ mem_clk = smum_get_argument(hwmgr);
*mclk_freq = mem_clk * 100;
@@ -1180,16 +1266,12 @@ static int vega12_get_current_activity_percent(
#if 0
ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
if (!ret) {
- ret = vega12_read_arg_from_smc(hwmgr, &current_activity);
- if (!ret) {
- if (current_activity > 100) {
- PP_ASSERT(false,
- "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
- current_activity = 100;
- }
- } else
+ current_activity = smum_get_argument(hwmgr);
+ if (current_activity > 100) {
PP_ASSERT(false,
- "[GetCurrentActivityPercent] Attempt To Read Average Graphics Activity from SMU Failed!");
+ "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
+ current_activity = 100;
+ }
} else
PP_ASSERT(false,
"[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!");
@@ -1252,7 +1334,7 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 0 : 1);
+ has_disp ? 1 : 0);
return 0;
}
@@ -1270,7 +1352,6 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
switch (clk_type) {
case amd_pp_dcef_clock:
- clk_freq = clock_req->clock_freq_in_khz / 100;
clk_select = PPCLK_DCEFCLK;
break;
case amd_pp_disp_clock:
@@ -1306,9 +1387,10 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
(struct vega12_hwmgr *)(hwmgr->backend);
struct PP_Clocks min_clocks = {0};
struct pp_display_clock_request clock_req;
- uint32_t clk_request;
- if (hwmgr->display_config->num_display > 1)
+ if ((hwmgr->display_config->num_display > 1) &&
+ !hwmgr->display_config->multi_monitor_in_sync &&
+ !hwmgr->display_config->nb_pstate_switch_disable)
vega12_notify_smc_display_change(hwmgr, false);
else
vega12_notify_smc_display_change(hwmgr, true);
@@ -1319,7 +1401,7 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
clock_req.clock_type = amd_pp_dcef_clock;
- clock_req.clock_freq_in_khz = min_clocks.dcefClock;
+ clock_req.clock_freq_in_khz = min_clocks.dcefClock/10;
if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
if (data->smu_features[GNLD_DS_DCEFCLK].supported)
PP_ASSERT_WITH_CODE(
@@ -1333,15 +1415,6 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
}
}
- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
- clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100;
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0,
- "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!",
- return -1);
- data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock;
- }
-
return 0;
}
@@ -1350,12 +1423,19 @@ static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
- data->smc_state_table.gfx_boot_level =
- data->smc_state_table.gfx_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.mem_boot_level =
- data->smc_state_table.mem_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+ uint32_t soft_level;
+
+ soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_level].value;
+
+ soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_level].value;
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload boot level to highest!",
@@ -1372,13 +1452,19 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t soft_level;
+
+ soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_level].value;
- data->smc_state_table.gfx_boot_level =
- data->smc_state_table.gfx_max_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.mem_boot_level =
- data->smc_state_table.mem_max_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+ soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_level].value;
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload boot level to highest!",
@@ -1394,17 +1480,6 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
- struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
- data->smc_state_table.gfx_boot_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.gfx_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.mem_boot_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
- data->smc_state_table.mem_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
-
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload DPM Bootup Levels!",
return -1);
@@ -1412,22 +1487,28 @@ static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
"Failed to upload DPM Max Levels!",
return -1);
+
return 0;
}
-#if 0
static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
{
- struct phm_ppt_v2_information *table_info =
- (struct phm_ppt_v2_information *)(hwmgr->pptable);
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
+ struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
+ struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
- if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
- table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL &&
- table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
+ *sclk_mask = 0;
+ *mclk_mask = 0;
+ *soc_mask = 0;
+
+ if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
+ soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
*sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
- *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
*mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
+ *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
}
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -1435,13 +1516,13 @@ static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
*mclk_mask = 0;
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
- *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
- *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
- *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
+ *sclk_mask = gfx_dpm_table->count - 1;
+ *mclk_mask = mem_dpm_table->count - 1;
+ *soc_mask = soc_dpm_table->count - 1;
}
+
return 0;
}
-#endif
static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
@@ -1465,11 +1546,9 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
int ret = 0;
-#if 0
uint32_t sclk_mask = 0;
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
-#endif
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1485,27 +1564,18 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-#if 0
ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
if (ret)
return ret;
- vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
- vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
-#endif
+ vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
+ vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
-#if 0
- if (!ret) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
- else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
- }
-#endif
+
return ret;
}
@@ -1539,24 +1609,14 @@ static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
PPCLK_e clock_select,
bool max)
{
- int result;
- *clock = 0;
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
- if (max) {
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0,
- "[GetClockRanges] Failed to get max clock from SMC!",
- return -1);
- result = vega12_read_arg_from_smc(hwmgr, clock);
- } else {
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0,
- "[GetClockRanges] Failed to get min clock from SMC!",
- return -1);
- result = vega12_read_arg_from_smc(hwmgr, clock);
- }
+ if (max)
+ *clock = data->clk_range[clock_select].ACMax;
+ else
+ *clock = data->clk_range[clock_select].ACMin;
- return result;
+ return 0;
}
static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
@@ -1571,12 +1631,12 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
return -1;
dpm_table = &(data->dpm_table.gfx_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -1603,13 +1663,12 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
return -1;
dpm_table = &(data->dpm_table.mem_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_UCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_UCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
- clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
-
+ clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000;
+ data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100;
clocks->data[i].latency_in_us =
data->mclk_latency_table.entries[i].latency =
vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
@@ -1633,12 +1692,12 @@ static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
dpm_table = &(data->dpm_table.dcef_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -1661,12 +1720,12 @@ static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
dpm_table = &(data->dpm_table.soc_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -1713,99 +1772,69 @@ static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
- int result = 0;
- uint32_t i;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
if (!data->registry_data.disable_water_mark &&
data->smu_features[GNLD_DPM_DCEFCLK].supported &&
data->smu_features[GNLD_DPM_SOCCLK].supported) {
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
- table->WatermarkRow[WM_DCEFCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
- }
-
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
- table->WatermarkRow[WM_SOCCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MinUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
- }
+ smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
data->water_marks_bitmap |= WaterMarksExist;
data->water_marks_bitmap &= ~WaterMarksLoaded;
}
- return result;
+ return 0;
}
static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
- if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
- AMD_DPM_FORCED_LEVEL_LOW |
- AMD_DPM_FORCED_LEVEL_HIGH))
- return -EINVAL;
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
switch (type) {
case PP_SCLK:
- data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
- data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ ret = vega12_upload_dpm_min_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
- return -EINVAL);
+ return ret);
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ ret = vega12_upload_dpm_max_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
- return -EINVAL);
+ return ret);
break;
case PP_MCLK:
- data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
- data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
+
+ ret = vega12_upload_dpm_min_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
- return -EINVAL);
+ return ret);
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ ret = vega12_upload_dpm_max_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
- return -EINVAL);
+ return ret);
break;
@@ -1838,8 +1867,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 100,
- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
case PP_MCLK:
@@ -1854,8 +1883,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 100,
- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
case PP_PCIE:
@@ -1867,6 +1896,205 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
+static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *dpm_table;
+ bool vblank_too_short = false;
+ bool disable_mclk_switching;
+ uint32_t i, latency;
+
+ disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
+ !hwmgr->display_config->multi_monitor_in_sync) ||
+ vblank_too_short;
+ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
+
+ /* gfxclk */
+ dpm_table = &(data->dpm_table.gfx_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* memclk */
+ dpm_table = &(data->dpm_table.mem_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* honour DAL's UCLK Hardmin */
+ if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
+ dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
+
+ /* Hardmin is dependent on displayconfig */
+ if (disable_mclk_switching) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
+ if (data->mclk_latency_table.entries[i].latency <= latency) {
+ if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
+ break;
+ }
+ }
+ }
+ }
+
+ if (hwmgr->display_config->nb_pstate_switch_disable)
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ /* vclk */
+ dpm_table = &(data->dpm_table.vclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* dclk */
+ dpm_table = &(data->dpm_table.dclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* socclk */
+ dpm_table = &(data->dpm_table.soc_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* eclk */
+ dpm_table = &(data->dpm_table.eclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ return 0;
+}
+
+static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
+ struct vega12_single_dpm_table *dpm_table)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ PP_ASSERT_WITH_CODE(dpm_table->count > 0,
+ "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
+ "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
+ return -EINVAL);
+
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
+ return ret);
+ }
+
+ return ret;
+}
+
+static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_NumOfDisplays, 0);
+
+ ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
+ &data->dpm_table.mem_table);
+
+ return ret;
+}
+
static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
@@ -1911,6 +2139,9 @@ static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ if (data->vce_power_gated == bgate)
+ return;
+
data->vce_power_gated = bgate;
vega12_enable_disable_vce_dpm(hwmgr, !bgate);
}
@@ -1919,6 +2150,9 @@ static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ if (data->uvd_power_gated == bgate)
+ return;
+
data->uvd_power_gated = bgate;
vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
}
@@ -2086,6 +2320,38 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
return 0;
}
+static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ if (data->gfxoff_controlled_by_driver)
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
+
+ return ret;
+}
+
+static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ if (data->gfxoff_controlled_by_driver)
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
+
+ return ret;
+}
+
+static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
+{
+ if (enable)
+ return vega12_enable_gfx_off(hwmgr);
+ else
+ return vega12_disable_gfx_off(hwmgr);
+}
+
static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.backend_init = vega12_hwmgr_backend_init,
.backend_fini = vega12_hwmgr_backend_fini,
@@ -2113,6 +2379,10 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.display_clock_voltage_request = vega12_display_clock_voltage_request,
.force_clock_level = vega12_force_clock_level,
.print_clock_levels = vega12_print_clock_levels,
+ .apply_clocks_adjust_rules =
+ vega12_apply_clocks_adjust_rules,
+ .pre_display_config_changed =
+ vega12_pre_display_configuration_changed_task,
.display_config_changed = vega12_display_configuration_changed_task,
.powergate_uvd = vega12_power_gate_uvd,
.powergate_vce = vega12_power_gate_vce,
@@ -2131,6 +2401,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.get_thermal_temperature_range = vega12_get_thermal_temperature_range,
.register_irq_handlers = smu9_register_irq_handlers,
.start_thermal_controller = vega12_start_thermal_controller,
+ .powergate_gfx = vega12_gfx_off_control,
};
int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index e81ded1ec198..b3e424d28994 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -167,6 +167,9 @@ struct vega12_vbios_boot_state {
uint32_t mem_clock;
uint32_t soc_clock;
uint32_t dcef_clock;
+ uint32_t eclock;
+ uint32_t dclock;
+ uint32_t vclock;
};
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
@@ -301,6 +304,12 @@ struct vega12_odn_fan_table {
bool force_fan_pwm;
};
+struct vega12_clock_range {
+ uint32_t ACMax;
+ uint32_t ACMin;
+ uint32_t DCMax;
+};
+
struct vega12_hwmgr {
struct vega12_dpm_table dpm_table;
struct vega12_dpm_table golden_dpm_table;
@@ -382,6 +391,11 @@ struct vega12_hwmgr {
uint32_t smu_version;
struct smu_features smu_features[GNLD_FEATURES_MAX];
struct vega12_smc_state_table smc_state_table;
+
+ struct vega12_clock_range clk_range[PPCLK_COUNT];
+
+ /* ---- Gfxoff ---- */
+ bool gfxoff_controlled_by_driver;
};
#define VEGA12_DPM2_NEAR_TDP_DEC 10
@@ -432,6 +446,8 @@ struct vega12_hwmgr {
#define VEGA12_UMD_PSTATE_GFXCLK_LEVEL 0x3
#define VEGA12_UMD_PSTATE_SOCCLK_LEVEL 0x3
#define VEGA12_UMD_PSTATE_MCLK_LEVEL 0x2
+#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL 0x3
+#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL 0x3
int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index 888ddca902d8..cb3a5b1737c8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -224,11 +224,9 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
- /* 0xFFFF will disable the ACG feature */
- if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
- ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
- ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
- }
+ ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
+
+ ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
index cfd9e6ccb790..904eb2c9155b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -34,11 +34,9 @@ static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentRpm),
"Attempt to get current RPM from SMC Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!vega12_read_arg_from_smc(hwmgr,
- current_rpm),
- "Attempt to read current RPM from SMC Failed!",
- return -1);
+ return -EINVAL);
+ *current_rpm = smum_get_argument(hwmgr);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index a202247c9894..429c9c4322da 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -455,7 +455,7 @@ extern int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ void *clock_ranges);
extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index b99fb8ac822c..d3d96260f440 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -26,7 +26,6 @@
#include <linux/seq_file.h>
#include "amd_powerplay.h"
#include "hardwaremanager.h"
-#include "pp_power_source.h"
#include "hwmgr_ppt.h"
#include "ppatomctrl.h"
#include "hwmgr_ppt.h"
@@ -195,7 +194,7 @@ struct pp_smumgr_func {
int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr);
int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr,
uint32_t firmware);
- int (*get_argument)(struct pp_hwmgr *hwmgr);
+ uint32_t (*get_argument)(struct pp_hwmgr *hwmgr);
int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg);
int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter);
@@ -294,8 +293,7 @@ struct pp_hwmgr_func {
int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
- int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, void *clock_ranges);
int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock);
int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
@@ -303,7 +301,7 @@ struct pp_hwmgr_func {
int (*power_off_asic)(struct pp_hwmgr *hwmgr);
int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
- int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
+ int (*powergate_gfx)(struct pp_hwmgr *hwmgr, bool enable);
int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
@@ -328,7 +326,7 @@ struct pp_hwmgr_func {
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size);
int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
- int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr);
+ int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
};
@@ -741,7 +739,6 @@ struct pp_hwmgr {
const struct pp_table_func *pptable_func;
struct pp_power_state *ps;
- enum pp_power_source power_source;
uint32_t num_ps;
struct pp_thermal_controller_info thermal_controller;
bool fan_ctrl_is_in_default_mode;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 6c22ed9249bf..82550a8a3a3f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -29,7 +29,6 @@
enum SMU_TABLE {
SMU_UVD_TABLE = 0,
SMU_VCE_TABLE,
- SMU_SAMU_TABLE,
SMU_BIF_TABLE,
};
@@ -47,7 +46,6 @@ enum SMU_MEMBER {
UcodeLoadStatus,
UvdBootLevel,
VceBootLevel,
- SamuBootLevel,
LowSclkInterruptThreshold,
DRAM_LOG_ADDR_H,
DRAM_LOG_ADDR_L,
@@ -82,7 +80,7 @@ enum SMU10_TABLE_ID {
SMU10_CLOCKTABLE,
};
-extern int smum_get_argument(struct pp_hwmgr *hwmgr);
+extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr);
extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index 2f8a3b983cce..b6ffd08784e7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -412,10 +412,10 @@ typedef struct {
QuadraticInt_t ReservedEquation2;
QuadraticInt_t ReservedEquation3;
- uint16_t MinVoltageUlvGfx;
- uint16_t MinVoltageUlvSoc;
+ uint16_t MinVoltageUlvGfx;
+ uint16_t MinVoltageUlvSoc;
- uint32_t Reserved[14];
+ uint32_t Reserved[14];
@@ -483,9 +483,9 @@ typedef struct {
uint8_t padding8_4;
- uint8_t PllGfxclkSpreadEnabled;
- uint8_t PllGfxclkSpreadPercent;
- uint16_t PllGfxclkSpreadFreq;
+ uint8_t PllGfxclkSpreadEnabled;
+ uint8_t PllGfxclkSpreadPercent;
+ uint16_t PllGfxclkSpreadFreq;
uint8_t UclkSpreadEnabled;
uint8_t UclkSpreadPercent;
@@ -495,11 +495,14 @@ typedef struct {
uint8_t SocclkSpreadPercent;
uint16_t SocclkSpreadFreq;
- uint8_t AcgGfxclkSpreadEnabled;
- uint8_t AcgGfxclkSpreadPercent;
- uint16_t AcgGfxclkSpreadFreq;
+ uint8_t AcgGfxclkSpreadEnabled;
+ uint8_t AcgGfxclkSpreadPercent;
+ uint16_t AcgGfxclkSpreadFreq;
- uint32_t BoardReserved[10];
+ uint8_t Vr2_I2C_address;
+ uint8_t padding_vr2[3];
+
+ uint32_t BoardReserved[9];
uint32_t MmHubPadding[7];
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 0a200406a1ec..8d557accaef2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -26,7 +26,7 @@
SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
polaris10_smumgr.o iceland_smumgr.o \
smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \
- vega12_smumgr.o vegam_smumgr.o
+ vega12_smumgr.o vegam_smumgr.o smu9_smumgr.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 2d4ec8ac3a08..fbe3ef4ee45c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -1614,37 +1614,6 @@ static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU7_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_samu_clock_voltage_dependency_table *samu_table =
- hwmgr->dyn_state.samu_clock_voltage_dependency_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(samu_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
- table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
- table->SamuLevel[count].MinPhases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
static int ci_populate_memory_timing_parameters(
struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
@@ -2026,10 +1995,6 @@ static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize ACP Level!", return result);
- result = ci_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
/* need to populate the ARB settings for the initial state. */
result = ci_program_memory_timing_parameters(hwmgr);
@@ -2881,6 +2846,89 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
return 0;
}
+static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct smu7_hwmgr *data = hwmgr->backend;
+ struct ci_smumgr *smu_data = hwmgr->smu_backend;
+ struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+ uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
+ hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
+ int32_t i;
+
+ if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0)
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ else
+ smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1;
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
+ UvdBootLevel, smu_data->smc_state_table.UvdBootLevel);
+
+ data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
+
+ for (i = uvd_table->count - 1; i >= 0; i--) {
+ if (uvd_table->entries[i].v <= max_vddc)
+ data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
+ if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
+ break;
+ }
+ ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.uvd_dpm_enable_mask);
+
+ return 0;
+}
+
+static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct smu7_hwmgr *data = hwmgr->backend;
+ struct phm_vce_clock_voltage_dependency_table *vce_table =
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+ uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
+ hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
+ int32_t i;
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
+ VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/
+
+ data->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
+
+ for (i = vce_table->count - 1; i >= 0; i--) {
+ if (vce_table->entries[i].v <= max_vddc)
+ data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
+ if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
+ break;
+ }
+ ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.vce_dpm_enable_mask);
+
+ return 0;
+}
+
+static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ ci_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ ci_update_vce_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
const struct pp_smumgr_func ci_smu_funcs = {
.smu_init = ci_smu_init,
.smu_fini = ci_smu_fini,
@@ -2903,4 +2951,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
.initialize_mc_reg_table = ci_initialize_mc_reg_table,
.is_dpm_running = ci_is_dpm_running,
.update_dpm_settings = ci_update_dpm_settings,
+ .update_smc_table = ci_update_smc_table,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 53df9405f43a..18048f8e2f13 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -1503,44 +1503,6 @@ static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
int32_t eng_clock, int32_t mem_clock,
struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -2028,10 +1990,6 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize ACP Level!", return result);
- result = fiji_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
/* Since only the initial state is completely set up at this point
* (the other states are just copies of the boot state) we only
* need to populate the ARB settings for the initial state.
@@ -2378,8 +2336,6 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
case VceBootLevel:
return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
case LowSclkInterruptThreshold:
return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
}
@@ -2478,33 +2434,6 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
-static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
{
switch (type) {
@@ -2514,9 +2443,6 @@ static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
case SMU_VCE_TABLE:
fiji_update_vce_smc_table(hwmgr);
break;
- case SMU_SAMU_TABLE:
- fiji_update_samu_smc_table(hwmgr);
- break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 415f691c3fa9..9299b93aa09a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -1578,12 +1578,6 @@ static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
-{
- return 0;
-}
-
static int iceland_populate_memory_timing_parameters(
struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
@@ -1992,10 +1986,6 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize ACP Level!", return result;);
- result = iceland_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result;);
-
/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
/* need to populate the ARB settings for the initial state. */
result = iceland_program_memory_timing_parameters(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index a8c6524f07e4..1276f168ff68 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -1204,7 +1204,6 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
(struct phm_ppt_v1_information *)(hwmgr->pptable);
SMIO_Pattern vol_level;
uint32_t mvdd;
- uint16_t us_mvdd;
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
@@ -1255,16 +1254,11 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
"in Clock Dependency Table",
);
- us_mvdd = 0;
- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!polaris10_populate_mvdd_value(hwmgr,
+ if (!((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled)))
+ polaris10_populate_mvdd_value(hwmgr,
data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
+ &vol_level);
if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
@@ -1337,55 +1331,6 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
return result;
}
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
int32_t eng_clock, int32_t mem_clock,
SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -1566,7 +1511,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
- uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ uint8_t i, stretch_amount, volt_offset = 0;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1617,11 +1562,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
/* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
+ if (stretch_amount == 0 || stretch_amount > 5) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
PP_ASSERT_WITH_CODE(false,
@@ -1865,10 +1806,6 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize VCE Level!", return result);
- result = polaris10_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
/* Since only the initial state is completely set up at this point
* (the other states are just copies of the boot state) we only
* need to populate the ARB settings for the initial state.
@@ -2222,34 +2159,6 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
-static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-
static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
@@ -2276,9 +2185,6 @@ static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
case SMU_VCE_TABLE:
polaris10_update_vce_smc_table(hwmgr);
break;
- case SMU_SAMU_TABLE:
- polaris10_update_samu_smc_table(hwmgr);
- break;
case SMU_BIF_TABLE:
polaris10_update_bif_smc_table(hwmgr);
default:
@@ -2357,8 +2263,6 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
case VceBootLevel:
return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
case LowSclkInterruptThreshold:
return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 0a563f6fe9ea..bb07d43f3874 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -68,7 +68,7 @@ static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
return 0;
}
-static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
+static uint32_t smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index d644a9bb9078..a029e47c2319 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -379,8 +379,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t fw_to_load;
- int result = 0;
- struct SMU_DRAMData_TOC *toc;
+ int r = 0;
if (!hwmgr->reload_fw) {
pr_info("skip reloading...\n");
@@ -421,49 +420,62 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ UCODE_ID_CP_MEC_JT2_MASK;
}
- toc = (struct SMU_DRAMData_TOC *)smu_data->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
+ if (!smu_data->toc) {
+ struct SMU_DRAMData_TOC *toc;
+
+ smu_data->toc = kzalloc(sizeof(struct SMU_DRAMData_TOC), GFP_KERNEL);
+ if (!smu_data->toc)
+ return -ENOMEM;
+ toc = smu_data->toc;
+ toc->num_entries = 0;
+ toc->structure_version = 1;
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- if (!hwmgr->not_vf)
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ if (!hwmgr->not_vf)
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
-
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ }
+ memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
+ sizeof(struct SMU_DRAMData_TOC));
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
pr_err("Fail to Request SMU Load uCode");
- return result;
+ return r;
+
+failed:
+ kfree(smu_data->toc);
+ smu_data->toc = NULL;
+ return r;
}
/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
@@ -570,7 +582,6 @@ int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
int smu7_init(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data;
- uint64_t mc_addr = 0;
int r;
/* Allocate memory for backend private data */
smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
@@ -584,15 +595,12 @@ int smu7_init(struct pp_hwmgr *hwmgr)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&smu_data->header_buffer.handle,
- &mc_addr,
+ &smu_data->header_buffer.mc_addr,
&smu_data->header_buffer.kaddr);
if (r)
return -EINVAL;
- smu_data->header = smu_data->header_buffer.kaddr;
- smu_data->header_buffer.mc_addr = mc_addr;
-
if (!hwmgr->not_vf)
return 0;
@@ -602,7 +610,7 @@ int smu7_init(struct pp_hwmgr *hwmgr)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&smu_data->smu_buffer.handle,
- &mc_addr,
+ &smu_data->smu_buffer.mc_addr,
&smu_data->smu_buffer.kaddr);
if (r) {
@@ -611,7 +619,6 @@ int smu7_init(struct pp_hwmgr *hwmgr)
&smu_data->header_buffer.kaddr);
return -EINVAL;
}
- smu_data->smu_buffer.mc_addr = mc_addr;
if (smum_is_hw_avfs_present(hwmgr))
hwmgr->avfs_supported = true;
@@ -633,6 +640,9 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
&smu_data->smu_buffer.mc_addr,
&smu_data->smu_buffer.kaddr);
+
+ kfree(smu_data->toc);
+ smu_data->toc = NULL;
kfree(hwmgr->smu_backend);
hwmgr->smu_backend = NULL;
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 39c9bfda0ab4..01f0538fba6b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -37,10 +37,9 @@ struct smu7_buffer_entry {
};
struct smu7_smumgr {
- uint8_t *header;
- uint8_t *mec_image;
struct smu7_buffer_entry smu_buffer;
struct smu7_buffer_entry header_buffer;
+ struct SMU_DRAMData_TOC *toc;
uint32_t soft_regs_start;
uint32_t dpm_table_start;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index c861d3023474..f7e3bc22bb93 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -52,10 +52,10 @@ static const enum smu8_scratch_entry firmware_list[] = {
SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
};
-static int smu8_get_argument(struct pp_hwmgr *hwmgr)
+static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
{
if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
+ return 0;
return cgs_read_register(hwmgr->device,
mmSMU_MP1_SRBM2P_ARG_0);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
new file mode 100644
index 000000000000..079fc8e8f709
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smumgr.h"
+#include "vega10_inc.h"
+#include "soc15_common.h"
+#include "pp_debug.h"
+
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+#define smnMP1_FIRMWARE_FLAGS 0x3010028
+
+bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t mp1_fw_flags;
+
+ WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
+ (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
+
+ mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
+
+ if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
+ return true;
+
+ return false;
+}
+
+/*
+ * Check if SMC has responded to previous message.
+ *
+ * @param smumgr the address of the powerplay hardware manager.
+ * @return TRUE SMC has responded, FALSE otherwise.
+ */
+static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t reg;
+ uint32_t ret;
+
+ reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+
+ ret = phm_wait_for_register_unequal(hwmgr, reg,
+ 0, MP1_C2PMSG_90__CONTENT_MASK);
+
+ if (ret)
+ pr_err("No response from smu\n");
+
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
+
+/*
+ * Send a message to the SMC, and do not wait for its response.
+ * @param smumgr the address of the powerplay hardware manager.
+ * @param msg the message to send.
+ * @return Always return 0.
+ */
+static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
+ uint16_t msg)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+
+ return 0;
+}
+
+/*
+ * Send a message to the SMC, and wait for its response.
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param msg the message to send.
+ * @return Always return 0.
+ */
+int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t ret;
+
+ smu9_wait_for_response(hwmgr);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+ ret = smu9_wait_for_response(hwmgr);
+ if (ret != 1)
+ pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
+
+ return 0;
+}
+
+/*
+ * Send a message to the SMC with parameter
+ * @param hwmgr: the address of the powerplay hardware manager.
+ * @param msg: the message to send.
+ * @param parameter: the parameter to send
+ * @return Always return 0.
+ */
+int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t ret;
+
+ smu9_wait_for_response(hwmgr);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
+
+ smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+ ret = smu9_wait_for_response(hwmgr);
+ if (ret != 1)
+ pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
+
+ return 0;
+}
+
+uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
new file mode 100644
index 000000000000..1462279ca128
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _SMU9_SMUMANAGER_H_
+#define _SMU9_SMUMANAGER_H_
+
+bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter);
+uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index c9837935f0f5..99d5e4f98f49 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -96,7 +96,7 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
return 0;
}
-int smum_get_argument(struct pp_hwmgr *hwmgr)
+uint32_t smum_get_argument(struct pp_hwmgr *hwmgr)
{
if (NULL != hwmgr->smumgr_funcs->get_argument)
return hwmgr->smumgr_funcs->get_argument(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 782b19fc2e70..7dabc6c456e1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -1443,51 +1443,6 @@ static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t) (mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->SamuLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->SamuLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->SamuLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- }
-
- return result;
-}
-
static int tonga_populate_memory_timing_parameters(
struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
@@ -2323,10 +2278,6 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize ACP Level !", return result);
- result = tonga_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize SAMU Level !", return result);
-
/* Since only the initial state is completely set up at this
* point (the other states are just copies of the boot state) we only
* need to populate the ARB settings for the initial state.
@@ -2673,8 +2624,6 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
case VceBootLevel:
return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
case LowSclkInterruptThreshold:
return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
}
@@ -2773,32 +2722,6 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
-static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
{
switch (type) {
@@ -2808,9 +2731,6 @@ static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
case SMU_VCE_TABLE:
tonga_update_vce_smc_table(hwmgr);
break;
- case SMU_SAMU_TABLE:
- tonga_update_samu_smc_table(hwmgr);
- break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index e84669c448a3..5d19115f410c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -28,142 +28,11 @@
#include "vega10_hwmgr.h"
#include "vega10_ppsmc.h"
#include "smu9_driver_if.h"
+#include "smu9_smumgr.h"
#include "ppatomctrl.h"
#include "pp_debug.h"
-#define AVFS_EN_MSB 1568
-#define AVFS_EN_LSB 1568
-
-/* Microcode file is stored in this buffer */
-#define BUFFER_SIZE 80000
-#define MAX_STRING_SIZE 15
-#define BUFFER_SIZETWO 131072 /* 128 *1024 */
-
-/* MP Apertures */
-#define MP0_Public 0x03800000
-#define MP0_SRAM 0x03900000
-#define MP1_Public 0x03b00000
-#define MP1_SRAM 0x03c00004
-
-#define smnMP1_FIRMWARE_FLAGS 0x3010028
-#define smnMP0_FW_INTF 0x3010104
-#define smnMP1_PUB_CTRL 0x3010b14
-
-static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t mp1_fw_flags;
-
- WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
- mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
-
- if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
- return true;
-
- return false;
-}
-
-/*
- * Check if SMC has responded to previous message.
- *
- * @param smumgr the address of the powerplay hardware manager.
- * @return TRUE SMC has responded, FALSE otherwise.
- */
-static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
- uint32_t ret;
-
- reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
-
- ret = phm_wait_for_register_unequal(hwmgr, reg,
- 0, MP1_C2PMSG_90__CONTENT_MASK);
-
- if (ret)
- pr_err("No response from smu\n");
-
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
-}
-
-/*
- * Send a message to the SMC, and do not wait for its response.
- * @param smumgr the address of the powerplay hardware manager.
- * @param msg the message to send.
- * @return Always return 0.
- */
-static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
- uint16_t msg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-
- return 0;
-}
-
-/*
- * Send a message to the SMC, and wait for its response.
- * @param hwmgr the address of the powerplay hardware manager.
- * @param msg the message to send.
- * @return Always return 0.
- */
-static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t ret;
-
- vega10_wait_for_response(hwmgr);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- ret = vega10_wait_for_response(hwmgr);
- if (ret != 1)
- pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
-
- return 0;
-}
-
-/*
- * Send a message to the SMC with parameter
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: the parameter to send
- * @return Always return 0.
- */
-static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t ret;
-
- vega10_wait_for_response(hwmgr);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
-
- vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- ret = vega10_wait_for_response(hwmgr);
- if (ret != 1)
- pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
-
- return 0;
-}
-
-static int vega10_get_argument(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
-}
-
static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
@@ -175,13 +44,13 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id);
@@ -206,13 +75,13 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id);
@@ -225,8 +94,8 @@ static int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
if (features_enabled == NULL)
return -EINVAL;
- vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
- *features_enabled = vega10_get_argument(hwmgr);
+ smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
+ *features_enabled = smu9_get_argument(hwmgr);
return 0;
}
@@ -248,10 +117,10 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
struct vega10_smumgr *priv = hwmgr->smu_backend;
if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
}
@@ -265,11 +134,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
uint32_t dev_id;
uint32_t rev_id;
- PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr,
+ PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- smc_driver_if_version = vega10_get_argument(hwmgr);
+ smc_driver_if_version = smu9_get_argument(hwmgr);
dev_id = adev->pdev->device;
rev_id = adev->pdev->revision;
@@ -441,7 +310,7 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
static int vega10_start_smu(struct pp_hwmgr *hwmgr)
{
- if (!vega10_is_smc_ram_running(hwmgr))
+ if (!smu9_is_smc_ram_running(hwmgr))
return -EINVAL;
PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr),
@@ -453,7 +322,8 @@ static int vega10_start_smu(struct pp_hwmgr *hwmgr)
return 0;
}
-static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
+static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
+ uint16_t table_id, bool rw)
{
int ret;
@@ -470,11 +340,11 @@ const struct pp_smumgr_func vega10_smu_funcs = {
.smu_fini = &vega10_smu_fini,
.start_smu = &vega10_start_smu,
.request_smu_load_specific_fw = NULL,
- .send_msg_to_smc = &vega10_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter,
+ .send_msg_to_smc = &smu9_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.is_dpm_running = vega10_is_dpm_running,
- .get_argument = vega10_get_argument,
+ .get_argument = smu9_get_argument,
.smc_table_manager = vega10_smc_table_manager,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 7d9b40e8b1bf..7f0e2109f40d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -24,157 +24,14 @@
#include "smumgr.h"
#include "vega12_inc.h"
#include "soc15_common.h"
+#include "smu9_smumgr.h"
#include "vega12_smumgr.h"
#include "vega12_ppsmc.h"
#include "vega12/smu9_driver_if.h"
-
#include "ppatomctrl.h"
#include "pp_debug.h"
-/* MP Apertures */
-#define MP0_Public 0x03800000
-#define MP0_SRAM 0x03900000
-#define MP1_Public 0x03b00000
-#define MP1_SRAM 0x03c00004
-
-#define smnMP1_FIRMWARE_FLAGS 0x3010028
-#define smnMP0_FW_INTF 0x3010104
-#define smnMP1_PUB_CTRL 0x3010b14
-
-static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t mp1_fw_flags;
-
- WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
- mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
-
- if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
- MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
- return true;
-
- return false;
-}
-
-/*
- * Check if SMC has responded to previous message.
- *
- * @param smumgr the address of the powerplay hardware manager.
- * @return TRUE SMC has responded, FALSE otherwise.
- */
-static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
-
- reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
-
- phm_wait_for_register_unequal(hwmgr, reg,
- 0, MP1_C2PMSG_90__CONTENT_MASK);
-
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
-}
-
-/*
- * Send a message to the SMC, and do not wait for its response.
- * @param smumgr the address of the powerplay hardware manager.
- * @param msg the message to send.
- * @return Always return 0.
- */
-int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
- uint16_t msg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-
- return 0;
-}
-
-/*
- * Send a message to the SMC, and wait for its response.
- * @param hwmgr the address of the powerplay hardware manager.
- * @param msg the message to send.
- * @return Always return 0.
- */
-int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- vega12_wait_for_response(hwmgr);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- if (vega12_wait_for_response(hwmgr) != 1)
- pr_err("Failed to send message: 0x%x\n", msg);
-
- return 0;
-}
-
-/*
- * Send a message to the SMC with parameter
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: the parameter to send
- * @return Always return 0.
- */
-int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- vega12_wait_for_response(hwmgr);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
-
- vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- if (vega12_wait_for_response(hwmgr) != 1)
- pr_err("Failed to send message: 0x%x\n", msg);
-
- return 0;
-}
-
-
-/*
- * Send a message to the SMC with parameter, do not wait for response
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: the parameter to send
- * @return The response that came from the SMC.
- */
-int vega12_send_msg_to_smc_with_parameter_without_waiting(
- struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, parameter);
-
- return vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
-}
-
-/*
- * Retrieve an argument from SMC.
- * @param hwmgr the address of the powerplay hardware manager.
- * @param arg pointer to store the argument from SMC.
- * @return Always return 0.
- */
-int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
-
- return 0;
-}
-
/*
* Copy table from SMC into driver FB
* @param hwmgr the address of the HW manager
@@ -192,16 +49,16 @@ int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -234,17 +91,17 @@ int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -262,20 +119,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
if (enable) {
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
"[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
"[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
return -EINVAL);
} else {
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
"[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
"[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
return -EINVAL);
@@ -292,22 +149,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
if (features_enabled == NULL)
return -EINVAL;
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
"[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr,
- &smc_features_low) == 0,
- "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
+ smc_features_low = smu9_get_argument(hwmgr);
+
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
"[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr,
- &smc_features_high) == 0,
- "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!",
- return -EINVAL);
+ smc_features_high = smu9_get_argument(hwmgr);
*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -333,39 +185,16 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
(struct vega12_smumgr *)(hwmgr->smu_backend);
if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
- if (!vega12_send_msg_to_smc_with_parameter(hwmgr,
+ if (!smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)))
- vega12_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
}
return 0;
}
-#if 0 /* tentatively remove */
-static int vega12_verify_smc_interface(struct pp_hwmgr *hwmgr)
-{
- uint32_t smc_driver_if_version;
-
- PP_ASSERT_WITH_CODE(!vega12_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetDriverIfVersion),
- "Attempt to get SMC IF Version Number Failed!",
- return -EINVAL);
- vega12_read_arg_from_smc(hwmgr, &smc_driver_if_version);
-
- if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
- pr_err("Your firmware(0x%x) doesn't match \
- SMU9_DRIVER_IF_VERSION(0x%x). \
- Please update your firmware!\n",
- smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
- return -EINVAL;
- }
-
- return 0;
-}
-#endif
-
static int vega12_smu_init(struct pp_hwmgr *hwmgr)
{
struct vega12_smumgr *priv;
@@ -513,16 +342,10 @@ static int vega12_smu_fini(struct pp_hwmgr *hwmgr)
static int vega12_start_smu(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(vega12_is_smc_ram_running(hwmgr),
+ PP_ASSERT_WITH_CODE(smu9_is_smc_ram_running(hwmgr),
"SMC is not running!",
return -EINVAL);
-#if 0 /* tentatively remove */
- PP_ASSERT_WITH_CODE(!vega12_verify_smc_interface(hwmgr),
- "Failed to verify SMC interface!",
- return -EINVAL);
-#endif
-
vega12_set_tools_address(hwmgr);
return 0;
@@ -533,9 +356,10 @@ const struct pp_smumgr_func vega12_smu_funcs = {
.smu_fini = &vega12_smu_fini,
.start_smu = &vega12_start_smu,
.request_smu_load_specific_fw = NULL,
- .send_msg_to_smc = &vega12_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = &vega12_send_msg_to_smc_with_parameter,
+ .send_msg_to_smc = &smu9_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.is_dpm_running = vega12_is_dpm_running,
+ .get_argument = smu9_get_argument,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
index 2810d387b611..b285cbc04019 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
@@ -48,7 +48,6 @@ struct vega12_smumgr {
#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
#define SMU_FEATURES_HIGH_SHIFT 32
-int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 2de48959ac93..57420d7caa4e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -393,34 +393,6 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
-static int vegam_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-
static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr)
{
struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
@@ -447,9 +419,6 @@ static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
case SMU_VCE_TABLE:
vegam_update_vce_smc_table(hwmgr);
break;
- case SMU_SAMU_TABLE:
- vegam_update_samu_smc_table(hwmgr);
- break;
case SMU_BIF_TABLE:
vegam_update_bif_smc_table(hwmgr);
break;
@@ -1281,54 +1250,6 @@ static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int vegam_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU75_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
int32_t eng_clock, int32_t mem_clock,
SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -2062,10 +1983,6 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize VCE Level!", return result);
- result = vegam_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize SAMU Level!", return result);
-
/* Since only the initial state is completely set up at this point
* (the other states are just copies of the boot state) we only
* need to populate the ARB settings for the initial state.
@@ -2273,8 +2190,6 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel);
case VceBootLevel:
return offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
case LowSclkInterruptThreshold:
return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
}
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 16903dc7fe0d..965cda48dc13 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -136,9 +136,6 @@ static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
- if (!crtc->primary->fb)
- return;
-
clk_disable_unprepare(arcpgu->clk);
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &
@@ -189,7 +186,7 @@ static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
static void arc_pgu_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c
index b8f6f9a5dfbe..68629e614990 100644
--- a/drivers/gpu/drm/arc/arcpgu_sim.c
+++ b/drivers/gpu/drm/arc/arcpgu_sim.c
@@ -99,7 +99,7 @@ int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
goto error_encoder_cleanup;
}
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
dev_err(drm->dev, "could not attach connector to encoder\n");
drm_connector_unregister(connector);
diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile
index bb8b158ff90d..3bf31d1a4722 100644
--- a/drivers/gpu/drm/arm/Makefile
+++ b/drivers/gpu/drm/arm/Makefile
@@ -1,4 +1,5 @@
hdlcd-y := hdlcd_drv.o hdlcd_crtc.o
obj-$(CONFIG_DRM_HDLCD) += hdlcd.o
mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o
+mali-dp-y += malidp_mw.o
obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index cf5cbd63ecdf..e4d67b70244d 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -229,6 +229,8 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ int i;
+ struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
u32 src_h = state->src_h >> 16;
@@ -238,20 +240,17 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- if (!state->fb || !state->crtc)
- return 0;
-
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
- if (!crtc_state) {
- DRM_DEBUG_KMS("Invalid crtc state\n");
- return -EINVAL;
+ for_each_new_crtc_in_state(state->state, crtc, crtc_state, i) {
+ /* we cannot disable the plane while the CRTC is active */
+ if (!state->fb && crtc_state->active)
+ return -EINVAL;
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
}
- return drm_atomic_helper_check_plane_state(state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ return 0;
}
static void hdlcd_plane_atomic_update(struct drm_plane *plane,
@@ -280,16 +279,10 @@ static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
.atomic_update = hdlcd_plane_atomic_update,
};
-static void hdlcd_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_helper_disable(plane);
- drm_plane_cleanup(plane);
-}
-
static const struct drm_plane_funcs hdlcd_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = hdlcd_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -334,10 +327,8 @@ int hdlcd_setup_crtc(struct drm_device *drm)
ret = drm_crtc_init_with_planes(drm, &hdlcd->crtc, primary, NULL,
&hdlcd_crtc_funcs, NULL);
- if (ret) {
- hdlcd_plane_destroy(primary);
+ if (ret)
return ret;
- }
drm_crtc_helper_add(&hdlcd->crtc, &hdlcd_crtc_helper_funcs);
return 0;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index feaa8bc3d7b7..0ed1cde98cf8 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -27,6 +27,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/drm_of.h>
#include "hdlcd_drv.h"
@@ -100,16 +101,9 @@ setup_fail:
return ret;
}
-static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
-{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
-
- drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
-}
-
static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = hdlcd_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -124,13 +118,6 @@ static void hdlcd_setup_mode_config(struct drm_device *drm)
drm->mode_config.funcs = &hdlcd_mode_config_funcs;
}
-static void hdlcd_lastclose(struct drm_device *drm)
-{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
-
- drm_fbdev_cma_restore_mode(hdlcd->fbdev);
-}
-
static irqreturn_t hdlcd_irq(int irq, void *arg)
{
struct drm_device *drm = arg;
@@ -246,7 +233,7 @@ static struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = hdlcd_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.irq_handler = hdlcd_irq,
.irq_preinstall = hdlcd_irq_preinstall,
.irq_postinstall = hdlcd_irq_postinstall,
@@ -321,14 +308,9 @@ static int hdlcd_drm_bind(struct device *dev)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- hdlcd->fbdev = drm_fbdev_cma_init(drm, 32,
- drm->mode_config.num_connector);
-
- if (IS_ERR(hdlcd->fbdev)) {
- ret = PTR_ERR(hdlcd->fbdev);
- hdlcd->fbdev = NULL;
+ ret = drm_fb_cma_fbdev_init(drm, 32, 0);
+ if (ret)
goto err_fbdev;
- }
ret = drm_dev_register(drm, 0);
if (ret)
@@ -337,15 +319,13 @@ static int hdlcd_drm_bind(struct device *dev)
return 0;
err_register:
- if (hdlcd->fbdev) {
- drm_fbdev_cma_fini(hdlcd->fbdev);
- hdlcd->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
err_fbdev:
drm_kms_helper_poll_fini(drm);
err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
err_unload:
of_node_put(hdlcd->crtc.port);
@@ -366,23 +346,23 @@ static void hdlcd_drm_unbind(struct device *dev)
struct hdlcd_drm_private *hdlcd = drm->dev_private;
drm_dev_unregister(drm);
- if (hdlcd->fbdev) {
- drm_fbdev_cma_fini(hdlcd->fbdev);
- hdlcd->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
component_unbind_all(dev, drm);
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
- pm_runtime_get_sync(drm->dev);
+ pm_runtime_get_sync(dev);
+ drm_crtc_vblank_off(&hdlcd->crtc);
drm_irq_uninstall(drm);
- pm_runtime_put_sync(drm->dev);
- pm_runtime_disable(drm->dev);
- of_reserved_mem_device_release(drm->dev);
+ drm_atomic_helper_shutdown(drm);
+ pm_runtime_put(dev);
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ of_reserved_mem_device_release(dev);
drm_mode_config_cleanup(drm);
- drm_dev_put(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
+ drm_dev_put(drm);
}
static const struct component_master_ops hdlcd_master_ops = {
@@ -427,35 +407,15 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match);
static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
-
- if (!hdlcd)
- return 0;
- drm_kms_helper_poll_disable(drm);
- drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
-
- hdlcd->state = drm_atomic_helper_suspend(drm);
- if (IS_ERR(hdlcd->state)) {
- drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
- drm_kms_helper_poll_enable(drm);
- return PTR_ERR(hdlcd->state);
- }
-
- return 0;
+ return drm_mode_config_helper_suspend(drm);
}
static int __maybe_unused hdlcd_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
-
- if (!hdlcd)
- return 0;
- drm_atomic_helper_resume(drm, hdlcd->state);
- drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
- drm_kms_helper_poll_enable(drm);
+ drm_mode_config_helper_resume(drm);
return 0;
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
index 56f34dfff640..fd438d177b64 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.h
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -9,10 +9,8 @@
struct hdlcd_drm_private {
void __iomem *mmio;
struct clk *clk;
- struct drm_fbdev_cma *fbdev;
struct drm_crtc crtc;
struct drm_plane *plane;
- struct drm_atomic_state *state;
#ifdef CONFIG_DEBUG_FS
atomic_t buffer_underrun_count;
atomic_t bus_error_count;
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index fcc62bc60f6a..ef44202fb43f 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -411,6 +411,16 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
}
}
+ /* If only the writeback routing has changed, we don't need a modeset */
+ if (state->connectors_changed) {
+ u32 old_mask = crtc->state->connector_mask;
+ u32 new_mask = state->connector_mask;
+
+ if ((old_mask ^ new_mask) ==
+ (1 << drm_connector_index(&malidp->mw_connector.base)))
+ state->connectors_changed = false;
+ }
+
ret = malidp_crtc_atomic_check_gamma(crtc, state);
ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state);
ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 8d20faa198cf..08b5bb219816 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -17,6 +17,7 @@
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/pm_runtime.h>
+#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -31,6 +32,7 @@
#include <drm/drm_of.h>
#include "malidp_drv.h"
+#include "malidp_mw.h"
#include "malidp_regs.h"
#include "malidp_hw.h"
@@ -170,14 +172,15 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
struct malidp_hw_device *hwdev = malidp->dev;
int ret;
- atomic_set(&malidp->config_valid, 0);
- hwdev->hw->set_config_valid(hwdev);
+ hwdev->hw->set_config_valid(hwdev, 1);
/* don't wait for config_valid flag if we are in config mode */
- if (hwdev->hw->in_config_mode(hwdev))
+ if (hwdev->hw->in_config_mode(hwdev)) {
+ atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE);
return 0;
+ }
ret = wait_event_interruptible_timeout(malidp->wq,
- atomic_read(&malidp->config_valid) == 1,
+ atomic_read(&malidp->config_valid) == MALIDP_CONFIG_VALID_DONE,
msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT));
return (ret > 0) ? 0 : -ETIMEDOUT;
@@ -216,12 +219,20 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *drm = state->dev;
+ struct malidp_drm *malidp = drm->dev_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
pm_runtime_get_sync(drm->dev);
+ /*
+ * set config_valid to a special value to let IRQ handlers
+ * know that we are updating registers
+ */
+ atomic_set(&malidp->config_valid, MALIDP_CONFIG_START);
+ malidp->dev->hw->set_config_valid(malidp->dev, 0);
+
drm_atomic_helper_commit_modeset_disables(drm, state);
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
@@ -230,7 +241,9 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
malidp_atomic_commit_se_config(crtc, old_crtc_state);
}
- drm_atomic_helper_commit_planes(drm, state, 0);
+ drm_atomic_helper_commit_planes(drm, state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+ malidp_mw_atomic_commit(drm, state);
drm_atomic_helper_commit_modeset_enables(drm, state);
@@ -268,17 +281,22 @@ static int malidp_init(struct drm_device *drm)
drm->mode_config.helper_private = &malidp_mode_config_helpers;
ret = malidp_crtc_init(drm);
- if (ret) {
- drm_mode_config_cleanup(drm);
- return ret;
- }
+ if (ret)
+ goto crtc_fail;
+
+ ret = malidp_mw_connector_init(drm);
+ if (ret)
+ goto crtc_fail;
return 0;
+
+crtc_fail:
+ drm_mode_config_cleanup(drm);
+ return ret;
}
static void malidp_fini(struct drm_device *drm)
{
- drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
}
@@ -286,6 +304,8 @@ static int malidp_irq_init(struct platform_device *pdev)
{
int irq_de, irq_se, ret = 0;
struct drm_device *drm = dev_get_drvdata(&pdev->dev);
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
/* fetch the interrupts from DT */
irq_de = platform_get_irq_byname(pdev, "DE");
@@ -305,7 +325,7 @@ static int malidp_irq_init(struct platform_device *pdev)
ret = malidp_se_irq_init(drm, irq_se);
if (ret) {
- malidp_de_irq_fini(drm);
+ malidp_de_irq_fini(hwdev);
return ret;
}
@@ -327,6 +347,106 @@ static int malidp_dumb_create(struct drm_file *file_priv,
return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
}
+#ifdef CONFIG_DEBUG_FS
+
+static void malidp_error_stats_init(struct malidp_error_stats *error_stats)
+{
+ error_stats->num_errors = 0;
+ error_stats->last_error_status = 0;
+ error_stats->last_error_vblank = -1;
+}
+
+void malidp_error(struct malidp_drm *malidp,
+ struct malidp_error_stats *error_stats, u32 status,
+ u64 vblank)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&malidp->errors_lock, irqflags);
+ error_stats->last_error_status = status;
+ error_stats->last_error_vblank = vblank;
+ error_stats->num_errors++;
+ spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
+}
+
+void malidp_error_stats_dump(const char *prefix,
+ struct malidp_error_stats error_stats,
+ struct seq_file *m)
+{
+ seq_printf(m, "[%s] num_errors : %d\n", prefix,
+ error_stats.num_errors);
+ seq_printf(m, "[%s] last_error_status : 0x%08x\n", prefix,
+ error_stats.last_error_status);
+ seq_printf(m, "[%s] last_error_vblank : %lld\n", prefix,
+ error_stats.last_error_vblank);
+}
+
+static int malidp_show_stats(struct seq_file *m, void *arg)
+{
+ struct drm_device *drm = m->private;
+ struct malidp_drm *malidp = drm->dev_private;
+ unsigned long irqflags;
+ struct malidp_error_stats de_errors, se_errors;
+
+ spin_lock_irqsave(&malidp->errors_lock, irqflags);
+ de_errors = malidp->de_errors;
+ se_errors = malidp->se_errors;
+ spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
+ malidp_error_stats_dump("DE", de_errors, m);
+ malidp_error_stats_dump("SE", se_errors, m);
+ return 0;
+}
+
+static int malidp_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, malidp_show_stats, inode->i_private);
+}
+
+static ssize_t malidp_debugfs_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *drm = m->private;
+ struct malidp_drm *malidp = drm->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&malidp->errors_lock, irqflags);
+ malidp_error_stats_init(&malidp->de_errors);
+ malidp_error_stats_init(&malidp->se_errors);
+ spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
+ return len;
+}
+
+static const struct file_operations malidp_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = malidp_debugfs_open,
+ .read = seq_read,
+ .write = malidp_debugfs_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int malidp_debugfs_init(struct drm_minor *minor)
+{
+ struct malidp_drm *malidp = minor->dev->dev_private;
+ struct dentry *dentry = NULL;
+
+ malidp_error_stats_init(&malidp->de_errors);
+ malidp_error_stats_init(&malidp->se_errors);
+ spin_lock_init(&malidp->errors_lock);
+ dentry = debugfs_create_file("debug",
+ S_IRUGO | S_IWUSR,
+ minor->debugfs_root, minor->dev,
+ &malidp_debugfs_fops);
+ if (!dentry) {
+ DRM_ERROR("Cannot create debug file\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+#endif //CONFIG_DEBUG_FS
+
static struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
DRIVER_PRIME,
@@ -343,6 +463,9 @@ static struct drm_driver malidp_driver = {
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = malidp_debugfs_init,
+#endif
.fops = &fops,
.name = "mali-dp",
.desc = "ARM Mali Display Processor driver",
@@ -459,6 +582,8 @@ static int malidp_runtime_pm_suspend(struct device *dev)
/* we can only suspend if the hardware is in config mode */
WARN_ON(!hwdev->hw->in_config_mode(hwdev));
+ malidp_se_irq_fini(hwdev);
+ malidp_de_irq_fini(hwdev);
hwdev->pm_suspended = true;
clk_disable_unprepare(hwdev->mclk);
clk_disable_unprepare(hwdev->aclk);
@@ -477,6 +602,8 @@ static int malidp_runtime_pm_resume(struct device *dev)
clk_prepare_enable(hwdev->aclk);
clk_prepare_enable(hwdev->mclk);
hwdev->pm_suspended = false;
+ malidp_de_irq_hw_init(hwdev);
+ malidp_se_irq_hw_init(hwdev);
return 0;
}
@@ -489,6 +616,7 @@ static int malidp_bind(struct device *dev)
struct malidp_hw_device *hwdev;
struct platform_device *pdev = to_platform_device(dev);
struct of_device_id const *dev_id;
+ struct drm_encoder *encoder;
/* number of lines for the R, G and B output */
u8 output_width[MAX_OUTPUT_CHANNELS];
int ret = 0, i;
@@ -588,8 +716,9 @@ static int malidp_bind(struct device *dev)
for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
out_depth = (out_depth << 8) | (output_width[i] & 0xf);
malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
+ hwdev->output_color_depth = out_depth;
- atomic_set(&malidp->config_valid, 0);
+ atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_INIT);
init_waitqueue_head(&malidp->wq);
ret = malidp_init(drm);
@@ -609,6 +738,15 @@ static int malidp_bind(struct device *dev)
goto bind_fail;
}
+ /* We expect to have a maximum of two encoders one for the actual
+ * display and a virtual one for the writeback connector
+ */
+ WARN_ON(drm->mode_config.num_encoder > 2);
+ list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) {
+ encoder->possible_clones =
+ (1 << drm->mode_config.num_encoder) - 1;
+ }
+
ret = malidp_irq_init(pdev);
if (ret < 0)
goto irq_init_fail;
@@ -642,10 +780,11 @@ register_fail:
fbdev_fail:
pm_runtime_get_sync(dev);
vblank_fail:
- malidp_se_irq_fini(drm);
- malidp_de_irq_fini(drm);
+ malidp_se_irq_fini(hwdev);
+ malidp_de_irq_fini(hwdev);
drm->irq_enabled = false;
irq_init_fail:
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
bind_fail:
of_node_put(malidp->crtc.port);
@@ -672,15 +811,17 @@ static void malidp_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
drm_dev_unregister(drm);
drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
pm_runtime_get_sync(dev);
drm_crtc_vblank_off(&malidp->crtc);
- malidp_se_irq_fini(drm);
- malidp_de_irq_fini(drm);
+ malidp_se_irq_fini(hwdev);
+ malidp_de_irq_fini(hwdev);
drm->irq_enabled = false;
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
@@ -751,8 +892,25 @@ static int __maybe_unused malidp_pm_resume(struct device *dev)
return 0;
}
+static int __maybe_unused malidp_pm_suspend_late(struct device *dev)
+{
+ if (!pm_runtime_status_suspended(dev)) {
+ malidp_runtime_pm_suspend(dev);
+ pm_runtime_set_suspended(dev);
+ }
+ return 0;
+}
+
+static int __maybe_unused malidp_pm_resume_early(struct device *dev)
+{
+ malidp_runtime_pm_resume(dev);
+ pm_runtime_set_active(dev);
+ return 0;
+}
+
static const struct dev_pm_ops malidp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend_late, malidp_pm_resume_early) \
SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL)
};
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index c70989b93387..e3eb0cb1f385 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -13,18 +13,38 @@
#ifndef __MALIDP_DRV_H__
#define __MALIDP_DRV_H__
+#include <drm/drm_writeback.h>
+#include <drm/drm_encoder.h>
#include <linux/mutex.h>
#include <linux/wait.h>
+#include <linux/spinlock.h>
#include <drm/drmP.h>
#include "malidp_hw.h"
+#define MALIDP_CONFIG_VALID_INIT 0
+#define MALIDP_CONFIG_VALID_DONE 1
+#define MALIDP_CONFIG_START 0xd0
+
+struct malidp_error_stats {
+ s32 num_errors;
+ u32 last_error_status;
+ s64 last_error_vblank;
+};
+
struct malidp_drm {
struct malidp_hw_device *dev;
struct drm_crtc crtc;
+ struct drm_writeback_connector mw_connector;
wait_queue_head_t wq;
struct drm_pending_vblank_event *event;
atomic_t config_valid;
u32 core_id;
+#ifdef CONFIG_DEBUG_FS
+ struct malidp_error_stats de_errors;
+ struct malidp_error_stats se_errors;
+ /* Protects errors stats */
+ spinlock_t errors_lock;
+#endif
};
#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc)
@@ -62,6 +82,12 @@ struct malidp_crtc_state {
int malidp_de_planes_init(struct drm_device *drm);
int malidp_crtc_init(struct drm_device *drm);
+#ifdef CONFIG_DEBUG_FS
+void malidp_error(struct malidp_drm *malidp,
+ struct malidp_error_stats *error_stats, u32 status,
+ u64 vblank);
+#endif
+
/* often used combination of rotational bits */
#define MALIDP_ROTATED_MASK (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270)
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index d789b46dc817..c94a4422e0e9 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -21,15 +21,24 @@
#include "malidp_drv.h"
#include "malidp_hw.h"
+#include "malidp_mw.h"
+
+enum {
+ MW_NOT_ENABLED = 0, /* SE writeback not enabled */
+ MW_ONESHOT, /* SE in one-shot mode for writeback */
+ MW_START, /* SE started writeback */
+ MW_RESTART, /* SE will start another writeback after this one */
+ MW_STOP, /* SE needs to stop after this writeback */
+};
static const struct malidp_format_id malidp500_de_formats[] = {
/* fourcc, layers supporting the format, internal id */
- { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 0 },
- { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 1 },
+ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 0 },
+ { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 1 },
{ DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 2 },
{ DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 3 },
- { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 4 },
- { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 5 },
+ { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 4 },
+ { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 5 },
{ DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 6 },
{ DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 7 },
{ DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 8 },
@@ -38,7 +47,7 @@ static const struct malidp_format_id malidp500_de_formats[] = {
{ DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 },
{ DRM_FORMAT_UYVY, DE_VIDEO1, 12 },
{ DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
- { DRM_FORMAT_NV12, DE_VIDEO1, 14 },
+ { DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 },
{ DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
};
@@ -47,27 +56,27 @@ static const struct malidp_format_id malidp500_de_formats[] = {
#define MALIDP_COMMON_FORMATS \
/* fourcc, layers supporting the format, internal id */ \
- { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 0) }, \
- { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 1) }, \
- { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 2) }, \
- { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 3) }, \
+ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \
+ { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 1) }, \
+ { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 2) }, \
+ { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 3) }, \
{ DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \
{ DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \
{ DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \
{ DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \
- { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 0) }, \
- { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 1) }, \
- { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 2) }, \
- { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 3) }, \
- { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 0) }, \
- { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 1) }, \
+ { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 0) }, \
+ { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 1) }, \
+ { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 2) }, \
+ { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 3) }, \
+ { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(3, 0) }, \
+ { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(3, 1) }, \
{ DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \
{ DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
{ DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
{ DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
{ DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
{ DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
- { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
+ { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \
{ DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
static const struct malidp_format_id malidp550_de_formats[] = {
@@ -223,15 +232,20 @@ static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
return false;
}
-static void malidp500_set_config_valid(struct malidp_hw_device *hwdev)
+static void malidp500_set_config_valid(struct malidp_hw_device *hwdev, u8 value)
{
- malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
+ if (value)
+ malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
+ else
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
}
static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
{
u32 val = 0;
+ malidp_hw_write(hwdev, hwdev->output_color_depth,
+ hwdev->hw->map.out_depth_base);
malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL);
if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
val |= MALIDP500_HSYNCPOL;
@@ -368,6 +382,55 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
return ret;
}
+static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
+ dma_addr_t *addrs, s32 *pitches,
+ int num_planes, u16 w, u16 h, u32 fmt_id)
+{
+ u32 base = MALIDP500_SE_MEMWRITE_BASE;
+ u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+ /* enable the scaling engine block */
+ malidp_hw_setbits(hwdev, MALIDP_SCALE_ENGINE_EN, de_base + MALIDP_DE_DISPLAY_FUNC);
+
+ /* restart the writeback if already enabled */
+ if (hwdev->mw_state != MW_NOT_ENABLED)
+ hwdev->mw_state = MW_RESTART;
+ else
+ hwdev->mw_state = MW_START;
+
+ malidp_hw_write(hwdev, fmt_id, base + MALIDP_MW_FORMAT);
+ switch (num_planes) {
+ case 2:
+ malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
+ malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
+ malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
+ /* fall through */
+ case 1:
+ malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
+ malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
+ malidp_hw_write(hwdev, pitches[0], base + MALIDP_MW_P1_STRIDE);
+ break;
+ default:
+ WARN(1, "Invalid number of planes");
+ }
+
+ malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
+ MALIDP500_SE_MEMWRITE_OUT_SIZE);
+ malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
+
+ return 0;
+}
+
+static void malidp500_disable_memwrite(struct malidp_hw_device *hwdev)
+{
+ u32 base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+ if (hwdev->mw_state == MW_START || hwdev->mw_state == MW_RESTART)
+ hwdev->mw_state = MW_STOP;
+ malidp_hw_clearbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
+ malidp_hw_clearbits(hwdev, MALIDP_SCALE_ENGINE_EN, base + MALIDP_DE_DISPLAY_FUNC);
+}
+
static int malidp550_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
@@ -447,15 +510,20 @@ static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
return false;
}
-static void malidp550_set_config_valid(struct malidp_hw_device *hwdev)
+static void malidp550_set_config_valid(struct malidp_hw_device *hwdev, u8 value)
{
- malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
+ if (value)
+ malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
+ else
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
}
static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
{
u32 val = MALIDP_DE_DEFAULT_PREFETCH_START;
+ malidp_hw_write(hwdev, hwdev->output_color_depth,
+ hwdev->hw->map.out_depth_base);
malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL);
/*
* Mali-DP550 and Mali-DP650 encode the background color like this:
@@ -588,6 +656,51 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
return ret;
}
+static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
+ dma_addr_t *addrs, s32 *pitches,
+ int num_planes, u16 w, u16 h, u32 fmt_id)
+{
+ u32 base = MALIDP550_SE_MEMWRITE_BASE;
+ u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+ /* enable the scaling engine block */
+ malidp_hw_setbits(hwdev, MALIDP_SCALE_ENGINE_EN, de_base + MALIDP_DE_DISPLAY_FUNC);
+
+ hwdev->mw_state = MW_ONESHOT;
+
+ malidp_hw_write(hwdev, fmt_id, base + MALIDP_MW_FORMAT);
+ switch (num_planes) {
+ case 2:
+ malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
+ malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
+ malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
+ /* fall through */
+ case 1:
+ malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
+ malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
+ malidp_hw_write(hwdev, pitches[0], base + MALIDP_MW_P1_STRIDE);
+ break;
+ default:
+ WARN(1, "Invalid number of planes");
+ }
+
+ malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
+ MALIDP550_SE_MEMWRITE_OUT_SIZE);
+ malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
+ MALIDP550_SE_CONTROL);
+
+ return 0;
+}
+
+static void malidp550_disable_memwrite(struct malidp_hw_device *hwdev)
+{
+ u32 base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+ malidp_hw_clearbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
+ MALIDP550_SE_CONTROL);
+ malidp_hw_clearbits(hwdev, MALIDP_SCALE_ENGINE_EN, base + MALIDP_DE_DISPLAY_FUNC);
+}
+
static int malidp650_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
@@ -632,10 +745,18 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
MALIDP500_DE_IRQ_VSYNC |
MALIDP500_DE_IRQ_GLOBAL,
.vsync_irq = MALIDP500_DE_IRQ_VSYNC,
+ .err_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP500_DE_IRQ_AXI_ERR |
+ MALIDP500_DE_IRQ_SATURATION,
},
.se_irq_map = {
- .irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
- .vsync_irq = 0,
+ .irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
+ MALIDP500_SE_IRQ_CONF_VALID |
+ MALIDP500_SE_IRQ_GLOBAL,
+ .vsync_irq = MALIDP500_SE_IRQ_CONF_VALID,
+ .err_mask = MALIDP500_SE_IRQ_INIT_BUSY |
+ MALIDP500_SE_IRQ_AXI_ERROR |
+ MALIDP500_SE_IRQ_OVERRUN,
},
.dc_irq_map = {
.irq_mask = MALIDP500_DE_IRQ_CONF_VALID,
@@ -654,6 +775,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.rotmem_required = malidp500_rotmem_required,
.se_set_scaling_coeffs = malidp500_se_set_scaling_coeffs,
.se_calc_mclk = malidp500_se_calc_mclk,
+ .enable_memwrite = malidp500_enable_memwrite,
+ .disable_memwrite = malidp500_disable_memwrite,
.features = MALIDP_DEVICE_LV_HAS_3_STRIDES,
},
[MALIDP_550] = {
@@ -669,13 +792,20 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.irq_mask = MALIDP_DE_IRQ_UNDERRUN |
MALIDP550_DE_IRQ_VSYNC,
.vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+ .err_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP550_DE_IRQ_SATURATION |
+ MALIDP550_DE_IRQ_AXI_ERR,
},
.se_irq_map = {
- .irq_mask = MALIDP550_SE_IRQ_EOW |
- MALIDP550_SE_IRQ_AXI_ERR,
+ .irq_mask = MALIDP550_SE_IRQ_EOW,
+ .vsync_irq = MALIDP550_SE_IRQ_EOW,
+ .err_mask = MALIDP550_SE_IRQ_AXI_ERR |
+ MALIDP550_SE_IRQ_OVR |
+ MALIDP550_SE_IRQ_IBSY,
},
.dc_irq_map = {
- .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+ .irq_mask = MALIDP550_DC_IRQ_CONF_VALID |
+ MALIDP550_DC_IRQ_SE,
.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
},
.pixel_formats = malidp550_de_formats,
@@ -691,6 +821,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.rotmem_required = malidp550_rotmem_required,
.se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
.se_calc_mclk = malidp550_se_calc_mclk,
+ .enable_memwrite = malidp550_enable_memwrite,
+ .disable_memwrite = malidp550_disable_memwrite,
.features = 0,
},
[MALIDP_650] = {
@@ -707,13 +839,25 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
MALIDP650_DE_IRQ_DRIFT |
MALIDP550_DE_IRQ_VSYNC,
.vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+ .err_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP650_DE_IRQ_DRIFT |
+ MALIDP550_DE_IRQ_SATURATION |
+ MALIDP550_DE_IRQ_AXI_ERR |
+ MALIDP650_DE_IRQ_ACEV1 |
+ MALIDP650_DE_IRQ_ACEV2 |
+ MALIDP650_DE_IRQ_ACEG |
+ MALIDP650_DE_IRQ_AXIEP,
},
.se_irq_map = {
- .irq_mask = MALIDP550_SE_IRQ_EOW |
- MALIDP550_SE_IRQ_AXI_ERR,
+ .irq_mask = MALIDP550_SE_IRQ_EOW,
+ .vsync_irq = MALIDP550_SE_IRQ_EOW,
+ .err_mask = MALIDP550_SE_IRQ_AXI_ERR |
+ MALIDP550_SE_IRQ_OVR |
+ MALIDP550_SE_IRQ_IBSY,
},
.dc_irq_map = {
- .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+ .irq_mask = MALIDP550_DC_IRQ_CONF_VALID |
+ MALIDP550_DC_IRQ_SE,
.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
},
.pixel_formats = malidp550_de_formats,
@@ -729,6 +873,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.rotmem_required = malidp550_rotmem_required,
.se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
.se_calc_mclk = malidp550_se_calc_mclk,
+ .enable_memwrite = malidp550_enable_memwrite,
+ .disable_memwrite = malidp550_disable_memwrite,
.features = 0,
},
};
@@ -790,7 +936,7 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
malidp->event = NULL;
spin_unlock(&drm->event_lock);
}
- atomic_set(&malidp->config_valid, 1);
+ atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE);
ret = IRQ_WAKE_THREAD;
}
@@ -799,10 +945,17 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
return ret;
mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ);
- status &= mask;
+ /* keep the status of the enabled interrupts, plus the error bits */
+ status &= (mask | de->err_mask);
if ((status & de->vsync_irq) && malidp->crtc.enabled)
drm_crtc_handle_vblank(&malidp->crtc);
+#ifdef CONFIG_DEBUG_FS
+ if (status & de->err_mask) {
+ malidp_error(malidp, &malidp->de_errors, status,
+ drm_crtc_vblank_count(&malidp->crtc));
+ }
+#endif
malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status);
return (ret == IRQ_NONE) ? IRQ_HANDLED : ret;
@@ -818,6 +971,23 @@ static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+void malidp_de_irq_hw_init(struct malidp_hw_device *hwdev)
+{
+ /* ensure interrupts are disabled */
+ malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+ malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+
+ /* first enable the DC block IRQs */
+ malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
+ hwdev->hw->map.dc_irq_map.irq_mask);
+
+ /* now enable the DE block IRQs */
+ malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
+ hwdev->hw->map.de_irq_map.irq_mask);
+}
+
int malidp_de_irq_init(struct drm_device *drm, int irq)
{
struct malidp_drm *malidp = drm->dev_private;
@@ -838,22 +1008,13 @@ int malidp_de_irq_init(struct drm_device *drm, int irq)
return ret;
}
- /* first enable the DC block IRQs */
- malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
- hwdev->hw->map.dc_irq_map.irq_mask);
-
- /* now enable the DE block IRQs */
- malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->hw->map.de_irq_map.irq_mask);
+ malidp_de_irq_hw_init(hwdev);
return 0;
}
-void malidp_de_irq_fini(struct drm_device *drm)
+void malidp_de_irq_fini(struct malidp_hw_device *hwdev)
{
- struct malidp_drm *malidp = drm->dev_private;
- struct malidp_hw_device *hwdev = malidp->dev;
-
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
hwdev->hw->map.de_irq_map.irq_mask);
malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
@@ -878,19 +1039,61 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
return IRQ_NONE;
status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
- if (!(status & se->irq_mask))
+ if (!(status & (se->irq_mask | se->err_mask)))
return IRQ_NONE;
+#ifdef CONFIG_DEBUG_FS
+ if (status & se->err_mask)
+ malidp_error(malidp, &malidp->se_errors, status,
+ drm_crtc_vblank_count(&malidp->crtc));
+#endif
mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
- status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
status &= mask;
- /* ToDo: status decoding and firing up of VSYNC and page flip events */
+
+ if (status & se->vsync_irq) {
+ switch (hwdev->mw_state) {
+ case MW_ONESHOT:
+ drm_writeback_signal_completion(&malidp->mw_connector, 0);
+ break;
+ case MW_STOP:
+ drm_writeback_signal_completion(&malidp->mw_connector, 0);
+ /* disable writeback after stop */
+ hwdev->mw_state = MW_NOT_ENABLED;
+ break;
+ case MW_RESTART:
+ drm_writeback_signal_completion(&malidp->mw_connector, 0);
+ /* fall through to a new start */
+ case MW_START:
+ /* writeback started, need to emulate one-shot mode */
+ hw->disable_memwrite(hwdev);
+ /*
+ * only set config_valid HW bit if there is no other update
+ * in progress or if we raced ahead of the DE IRQ handler
+ * and config_valid flag will not be update until later
+ */
+ status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
+ if ((atomic_read(&malidp->config_valid) != MALIDP_CONFIG_START) ||
+ (status & hw->map.dc_irq_map.vsync_irq))
+ hw->set_config_valid(hwdev, 1);
+ break;
+ }
+ }
malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status);
return IRQ_HANDLED;
}
+void malidp_se_irq_hw_init(struct malidp_hw_device *hwdev)
+{
+ /* ensure interrupts are disabled */
+ malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+
+ malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
+ hwdev->hw->map.se_irq_map.irq_mask);
+}
+
static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg)
{
return IRQ_HANDLED;
@@ -914,17 +1117,14 @@ int malidp_se_irq_init(struct drm_device *drm, int irq)
return ret;
}
- malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
- hwdev->hw->map.se_irq_map.irq_mask);
+ hwdev->mw_state = MW_NOT_ENABLED;
+ malidp_se_irq_hw_init(hwdev);
return 0;
}
-void malidp_se_irq_fini(struct drm_device *drm)
+void malidp_se_irq_fini(struct malidp_hw_device *hwdev)
{
- struct malidp_drm *malidp = drm->dev_private;
- struct malidp_hw_device *hwdev = malidp->dev;
-
malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
hwdev->hw->map.se_irq_map.irq_mask);
}
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index b5dd6c73ec9f..ad2e96915d44 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -33,6 +33,7 @@ enum {
DE_GRAPHICS2 = BIT(2), /* used only in DP500 */
DE_VIDEO2 = BIT(3),
DE_SMART = BIT(4),
+ SE_MEMWRITE = BIT(5),
};
struct malidp_format_id {
@@ -52,6 +53,7 @@ struct malidp_format_id {
struct malidp_irq_map {
u32 irq_mask; /* mask of IRQs that can be enabled in the block */
u32 vsync_irq; /* IRQ bit used for signaling during VSYNC */
+ u32 err_mask; /* mask of bits that represent errors */
};
struct malidp_layer {
@@ -151,12 +153,13 @@ struct malidp_hw {
bool (*in_config_mode)(struct malidp_hw_device *hwdev);
/*
- * Set configuration valid flag for hardware parameters that can
- * be changed outside the configuration mode. Hardware will use
- * the new settings when config valid is set after the end of the
- * current buffer scanout
+ * Set/clear configuration valid flag for hardware parameters that can
+ * be changed outside the configuration mode to the given value.
+ * Hardware will use the new settings when config valid is set,
+ * after the end of the current buffer scanout, and will ignore
+ * any new values for those parameters if config valid flag is cleared
*/
- void (*set_config_valid)(struct malidp_hw_device *hwdev);
+ void (*set_config_valid)(struct malidp_hw_device *hwdev, u8 value);
/*
* Set a new mode in hardware. Requires the hardware to be in
@@ -177,6 +180,23 @@ struct malidp_hw {
long (*se_calc_mclk)(struct malidp_hw_device *hwdev,
struct malidp_se_config *se_config,
struct videomode *vm);
+ /*
+ * Enable writing to memory the content of the next frame
+ * @param hwdev - malidp_hw_device structure containing the HW description
+ * @param addrs - array of addresses for each plane
+ * @param pitches - array of pitches for each plane
+ * @param num_planes - number of planes to be written
+ * @param w - width of the output frame
+ * @param h - height of the output frame
+ * @param fmt_id - internal format ID of output buffer
+ */
+ int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
+ s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id);
+
+ /*
+ * Disable the writing to memory of the next frame's content.
+ */
+ void (*disable_memwrite)(struct malidp_hw_device *hwdev);
u8 features;
};
@@ -210,10 +230,14 @@ struct malidp_hw_device {
u8 min_line_size;
u16 max_line_size;
+ u32 output_color_depth;
/* track the device PM state */
bool pm_suspended;
+ /* track the SE memory writeback state */
+ u8 mw_state;
+
/* size of memory used for rotating layers, up to two banks available */
u32 rotation_memory[2];
};
@@ -279,9 +303,11 @@ static inline void malidp_hw_enable_irq(struct malidp_hw_device *hwdev,
}
int malidp_de_irq_init(struct drm_device *drm, int irq);
-void malidp_de_irq_fini(struct drm_device *drm);
+void malidp_se_irq_hw_init(struct malidp_hw_device *hwdev);
+void malidp_de_irq_hw_init(struct malidp_hw_device *hwdev);
+void malidp_de_irq_fini(struct malidp_hw_device *hwdev);
int malidp_se_irq_init(struct drm_device *drm, int irq);
-void malidp_se_irq_fini(struct drm_device *drm);
+void malidp_se_irq_fini(struct malidp_hw_device *hwdev);
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
u8 layer_id, u32 format);
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
new file mode 100644
index 000000000000..ba6ae66387c9
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * ARM Mali DP Writeback connector implementation
+ */
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drmP.h>
+#include <drm/drm_writeback.h>
+
+#include "malidp_drv.h"
+#include "malidp_hw.h"
+#include "malidp_mw.h"
+
+#define to_mw_state(_state) (struct malidp_mw_connector_state *)(_state)
+
+struct malidp_mw_connector_state {
+ struct drm_connector_state base;
+ dma_addr_t addrs[2];
+ s32 pitches[2];
+ u8 format;
+ u8 n_planes;
+};
+
+static int malidp_mw_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static enum drm_mode_status
+malidp_mw_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ int w = mode->hdisplay, h = mode->vdisplay;
+
+ if ((w < mode_config->min_width) || (w > mode_config->max_width))
+ return MODE_BAD_HVALUE;
+
+ if ((h < mode_config->min_height) || (h > mode_config->max_height))
+ return MODE_BAD_VVALUE;
+
+ return MODE_OK;
+}
+
+const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
+ .get_modes = malidp_mw_connector_get_modes,
+ .mode_valid = malidp_mw_connector_mode_valid,
+};
+
+static void malidp_mw_connector_reset(struct drm_connector *connector)
+{
+ struct malidp_mw_connector_state *mw_state =
+ kzalloc(sizeof(*mw_state), GFP_KERNEL);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(connector->state);
+ __drm_atomic_helper_connector_reset(connector, &mw_state->base);
+}
+
+static enum drm_connector_status
+malidp_mw_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void malidp_mw_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_state *
+malidp_mw_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct malidp_mw_connector_state *mw_state;
+
+ if (WARN_ON(!connector->state))
+ return NULL;
+
+ mw_state = kzalloc(sizeof(*mw_state), GFP_KERNEL);
+ if (!mw_state)
+ return NULL;
+
+ /* No need to preserve any of our driver-local data */
+ __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
+
+ return &mw_state->base;
+}
+
+static const struct drm_connector_funcs malidp_mw_connector_funcs = {
+ .reset = malidp_mw_connector_reset,
+ .detect = malidp_mw_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = malidp_mw_connector_destroy,
+ .atomic_duplicate_state = malidp_mw_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int
+malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct malidp_mw_connector_state *mw_state = to_mw_state(conn_state);
+ struct malidp_drm *malidp = encoder->dev->dev_private;
+ struct drm_framebuffer *fb;
+ int i, n_planes;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+ if ((fb->width != crtc_state->mode.hdisplay) ||
+ (fb->height != crtc_state->mode.vdisplay)) {
+ DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
+ fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ mw_state->format =
+ malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE,
+ fb->format->format);
+ if (mw_state->format == MALIDP_INVALID_FORMAT_ID) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->format->format,
+ &format_name));
+ return -EINVAL;
+ }
+
+ n_planes = drm_format_num_planes(fb->format->format);
+ for (i = 0; i < n_planes; i++) {
+ struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, i);
+ /* memory write buffers are never rotated */
+ u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 0);
+
+ if (fb->pitches[i] & (alignment - 1)) {
+ DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
+ fb->pitches[i], i);
+ return -EINVAL;
+ }
+ mw_state->pitches[i] = fb->pitches[i];
+ mw_state->addrs[i] = obj->paddr + fb->offsets[i];
+ }
+ mw_state->n_planes = n_planes;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs malidp_mw_encoder_helper_funcs = {
+ .atomic_check = malidp_mw_encoder_atomic_check,
+};
+
+static u32 *get_writeback_formats(struct malidp_drm *malidp, int *n_formats)
+{
+ const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
+ u32 *formats;
+ int n, i;
+
+ formats = kcalloc(map->n_pixel_formats, sizeof(*formats),
+ GFP_KERNEL);
+ if (!formats)
+ return NULL;
+
+ for (n = 0, i = 0; i < map->n_pixel_formats; i++) {
+ if (map->pixel_formats[i].layer & SE_MEMWRITE)
+ formats[n++] = map->pixel_formats[i].format;
+ }
+
+ *n_formats = n;
+
+ return formats;
+}
+
+int malidp_mw_connector_init(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ u32 *formats;
+ int ret, n_formats;
+
+ if (!malidp->dev->hw->enable_memwrite)
+ return 0;
+
+ malidp->mw_connector.encoder.possible_crtcs = 1 << drm_crtc_index(&malidp->crtc);
+ drm_connector_helper_add(&malidp->mw_connector.base,
+ &malidp_mw_connector_helper_funcs);
+
+ formats = get_writeback_formats(malidp, &n_formats);
+ if (!formats)
+ return -ENOMEM;
+
+ ret = drm_writeback_connector_init(drm, &malidp->mw_connector,
+ &malidp_mw_connector_funcs,
+ &malidp_mw_encoder_helper_funcs,
+ formats, n_formats);
+ kfree(formats);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void malidp_mw_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *old_state)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct drm_writeback_connector *mw_conn = &malidp->mw_connector;
+ struct drm_connector_state *conn_state = mw_conn->base.state;
+ struct malidp_hw_device *hwdev = malidp->dev;
+ struct malidp_mw_connector_state *mw_state;
+
+ if (!conn_state)
+ return;
+
+ mw_state = to_mw_state(conn_state);
+
+ if (conn_state->writeback_job && conn_state->writeback_job->fb) {
+ struct drm_framebuffer *fb = conn_state->writeback_job->fb;
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev,
+ "Enable memwrite %ux%u:%d %pad fmt: %u\n",
+ fb->width, fb->height,
+ mw_state->pitches[0],
+ &mw_state->addrs[0],
+ mw_state->format);
+
+ drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
+ conn_state->writeback_job = NULL;
+
+ hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
+ mw_state->pitches, mw_state->n_planes,
+ fb->width, fb->height, mw_state->format);
+ } else {
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
+ hwdev->hw->disable_memwrite(hwdev);
+ }
+}
diff --git a/drivers/gpu/drm/arm/malidp_mw.h b/drivers/gpu/drm/arm/malidp_mw.h
new file mode 100644
index 000000000000..19a007676a1d
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_mw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ */
+
+#ifndef __MALIDP_MW_H__
+#define __MALIDP_MW_H__
+
+int malidp_mw_connector_init(struct drm_device *drm);
+void malidp_mw_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *old_state);
+#endif
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 7a44897c50fe..29409a65d864 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -23,6 +23,7 @@
/* Layer specific register offsets */
#define MALIDP_LAYER_FORMAT 0x000
+#define LAYER_FORMAT_MASK 0x3f
#define MALIDP_LAYER_CONTROL 0x004
#define LAYER_ENABLE (1 << 0)
#define LAYER_FLOWCFG_MASK 7
@@ -235,8 +236,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
if (state->rotation & MALIDP_ROTATED_MASK) {
int val;
- val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
- state->crtc_w,
+ val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
+ state->crtc_h,
fb->format->format);
if (val < 0)
return val;
@@ -337,7 +338,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
dest_w = plane->state->crtc_w;
dest_h = plane->state->crtc_h;
- malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
+ val = malidp_hw_read(mp->hwdev, mp->layer->base);
+ val = (val & ~LAYER_FORMAT_MASK) | ms->format;
+ malidp_hw_write(mp->hwdev, val, mp->layer->base);
for (i = 0; i < ms->n_planes; i++) {
/* calculate the offset for the layer's plane registers */
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 149024fb4432..3579d36b2a71 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -53,6 +53,8 @@
#define MALIDP550_DE_IRQ_AXI_ERR (1 << 16)
#define MALIDP550_SE_IRQ_EOW (1 << 0)
#define MALIDP550_SE_IRQ_AXI_ERR (1 << 16)
+#define MALIDP550_SE_IRQ_OVR (1 << 17)
+#define MALIDP550_SE_IRQ_IBSY (1 << 18)
#define MALIDP550_DC_IRQ_CONF_VALID (1 << 0)
#define MALIDP550_DC_IRQ_CONF_MODE (1 << 4)
#define MALIDP550_DC_IRQ_CONF_ACTIVE (1 << 16)
@@ -60,12 +62,18 @@
#define MALIDP550_DC_IRQ_SE (1 << 24)
#define MALIDP650_DE_IRQ_DRIFT (1 << 4)
+#define MALIDP650_DE_IRQ_ACEV1 (1 << 17)
+#define MALIDP650_DE_IRQ_ACEV2 (1 << 18)
+#define MALIDP650_DE_IRQ_ACEG (1 << 19)
+#define MALIDP650_DE_IRQ_AXIEP (1 << 28)
/* bit masks that are common between products */
#define MALIDP_CFG_VALID (1 << 0)
#define MALIDP_DISP_FUNC_GAMMA (1 << 0)
#define MALIDP_DISP_FUNC_CADJ (1 << 4)
#define MALIDP_DISP_FUNC_ILACED (1 << 8)
+#define MALIDP_SCALE_ENGINE_EN (1 << 16)
+#define MALIDP_SE_MEMWRITE_EN (2 << 5)
/* register offsets for IRQ management */
#define MALIDP_REG_STATUS 0x00000
@@ -153,6 +161,16 @@
(((x) & MALIDP_SE_ENH_LIMIT_MASK) << 16)
#define MALIDP_SE_ENH_COEFF0 0x04
+
+/* register offsets relative to MALIDP5x0_SE_MEMWRITE_BASE */
+#define MALIDP_MW_FORMAT 0x00000
+#define MALIDP_MW_P1_STRIDE 0x00004
+#define MALIDP_MW_P2_STRIDE 0x00008
+#define MALIDP_MW_P1_PTR_LOW 0x0000c
+#define MALIDP_MW_P1_PTR_HIGH 0x00010
+#define MALIDP_MW_P2_PTR_LOW 0x0002c
+#define MALIDP_MW_P2_PTR_HIGH 0x00030
+
/* register offsets and bits specific to DP500 */
#define MALIDP500_ADDR_SPACE_SIZE 0x01000
#define MALIDP500_DC_BASE 0x00000
@@ -186,7 +204,8 @@
#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
#define MALIDP500_SE_BASE 0x00c00
#define MALIDP500_SE_CONTROL 0x00c0c
-#define MALIDP500_SE_PTR_BASE 0x00e0c
+#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
+#define MALIDP500_SE_MEMWRITE_BASE 0x00e00
#define MALIDP500_DC_IRQ_BASE 0x00f00
#define MALIDP500_CONFIG_VALID 0x00f00
#define MALIDP500_CONFIG_ID 0x00fd4
@@ -217,6 +236,9 @@
#define MALIDP550_DE_PERF_BASE 0x00500
#define MALIDP550_SE_BASE 0x08000
#define MALIDP550_SE_CONTROL 0x08010
+#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7)
+#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
+#define MALIDP550_SE_MEMWRITE_BASE 0x08100
#define MALIDP550_DC_BASE 0x0c000
#define MALIDP550_DC_CONTROL 0x0c010
#define MALIDP550_DC_CONFIG_REQ (1 << 16)
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index ecf25cf9f9f5..9bc3c3213724 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
- armada_gem.o armada_overlay.o armada_trace.o
+ armada_gem.o armada_overlay.o armada_plane.o armada_trace.o
armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
index 41a784f5a5e6..2f7c048c5361 100644
--- a/drivers/gpu/drm/armada/armada_510.c
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -27,6 +27,10 @@ static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev)
/* Lower the watermark so to eliminate jitter at higher bandwidths */
armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+ /* Initialise SPU register */
+ writel_relaxed(ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
+ dcrtc->base + LCD_SPU_ADV_REG);
+
return 0;
}
@@ -75,9 +79,27 @@ static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
return 0;
}
+static void armada510_crtc_disable(struct armada_crtc *dcrtc)
+{
+ if (!IS_ERR(dcrtc->clk)) {
+ clk_disable_unprepare(dcrtc->clk);
+ dcrtc->clk = ERR_PTR(-EINVAL);
+ }
+}
+
+static void armada510_crtc_enable(struct armada_crtc *dcrtc,
+ const struct drm_display_mode *mode)
+{
+ if (IS_ERR(dcrtc->clk)) {
+ dcrtc->clk = dcrtc->extclk[0];
+ WARN_ON(clk_prepare_enable(dcrtc->clk));
+ }
+}
+
const struct armada_variant armada510_ops = {
.has_spu_adv_reg = true,
- .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
.init = armada510_crtc_init,
.compute_clock = armada510_crtc_compute_clock,
+ .disable = armada510_crtc_disable,
+ .enable = armada510_crtc_enable,
};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 03eeee11dd5b..da9360688b55 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -11,6 +11,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -19,33 +20,9 @@
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
+#include "armada_plane.h"
#include "armada_trace.h"
-enum csc_mode {
- CSC_AUTO = 0,
- CSC_YUV_CCIR601 = 1,
- CSC_YUV_CCIR709 = 2,
- CSC_RGB_COMPUTER = 1,
- CSC_RGB_STUDIO = 2,
-};
-
-static const uint32_t armada_primary_formats[] = {
- DRM_FORMAT_UYVY,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_BGR565,
-};
-
/*
* A note about interlacing. Let's consider HDMI 1920x1080i.
* The timing parameters we have from X are:
@@ -115,15 +92,13 @@ armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
}
}
-#define dpms_blanked(dpms) ((dpms) != DRM_MODE_DPMS_ON)
-
-static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
+static void armada_drm_crtc_update(struct armada_crtc *dcrtc, bool enable)
{
uint32_t dumb_ctrl;
dumb_ctrl = dcrtc->cfg_dumb_ctrl;
- if (!dpms_blanked(dcrtc->dpms))
+ if (enable)
dumb_ctrl |= CFG_DUMB_ENA;
/*
@@ -132,295 +107,26 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
* force LCD_D[23:0] to output blank color, overriding the GPIO or
* SPI usage. So leave it as-is unless in DUMB24_RGB888_0 mode.
*/
- if (dpms_blanked(dcrtc->dpms) &&
- (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
+ if (!enable && (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
dumb_ctrl &= ~DUMB_MASK;
dumb_ctrl |= DUMB_BLANK;
}
- /*
- * The documentation doesn't indicate what the normal state of
- * the sync signals are. Sebastian Hesselbart kindly probed
- * these signals on his board to determine their state.
- *
- * The non-inverted state of the sync signals is active high.
- * Setting these bits makes the appropriate signal active low.
- */
- if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
- dumb_ctrl |= CFG_INV_CSYNC;
- if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
- dumb_ctrl |= CFG_INV_HSYNC;
- if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
- dumb_ctrl |= CFG_INV_VSYNC;
-
- if (dcrtc->dumb_ctrl != dumb_ctrl) {
- dcrtc->dumb_ctrl = dumb_ctrl;
- writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
- }
-}
-
-void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
- int x, int y)
-{
- const struct drm_format_info *format = fb->format;
- unsigned int num_planes = format->num_planes;
- u32 addr = drm_fb_obj(fb)->dev_addr;
- int i;
-
- if (num_planes > 3)
- num_planes = 3;
-
- addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
- x * format->cpp[0];
-
- y /= format->vsub;
- x /= format->hsub;
-
- for (i = 1; i < num_planes; i++)
- addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
- x * format->cpp[i];
- for (; i < 3; i++)
- addrs[i] = 0;
-}
-
-static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
- int x, int y, struct armada_regs *regs, bool interlaced)
-{
- unsigned pitch = fb->pitches[0];
- u32 addrs[3], addr_odd, addr_even;
- unsigned i = 0;
-
- DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
- pitch, x, y, fb->format->cpp[0] * 8);
-
- armada_drm_plane_calc_addrs(addrs, fb, x, y);
-
- addr_odd = addr_even = addrs[0];
-
- if (interlaced) {
- addr_even += pitch;
- pitch *= 2;
- }
-
- /* write offset, base, and pitch */
- armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
- armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
- armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
-
- return i;
-}
-
-static void armada_drm_plane_work_call(struct armada_crtc *dcrtc,
- struct armada_plane_work *work,
- void (*fn)(struct armada_crtc *, struct armada_plane_work *))
-{
- struct armada_plane *dplane = drm_to_armada_plane(work->plane);
- struct drm_pending_vblank_event *event;
- struct drm_framebuffer *fb;
-
- if (fn)
- fn(dcrtc, work);
- drm_crtc_vblank_put(&dcrtc->crtc);
-
- event = work->event;
- fb = work->old_fb;
- if (event || fb) {
- struct drm_device *dev = dcrtc->crtc.dev;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- if (event)
- drm_crtc_send_vblank_event(&dcrtc->crtc, event);
- if (fb)
- __armada_drm_queue_unref_work(dev, fb);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
- if (work->need_kfree)
- kfree(work);
-
- wake_up(&dplane->frame_wait);
+ armada_updatel(dumb_ctrl,
+ ~(CFG_INV_CSYNC | CFG_INV_HSYNC | CFG_INV_VSYNC),
+ dcrtc->base + LCD_SPU_DUMB_CTRL);
}
-static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
- struct drm_plane *plane)
-{
- struct armada_plane *dplane = drm_to_armada_plane(plane);
- struct armada_plane_work *work = xchg(&dplane->work, NULL);
-
- /* Handle any pending frame work. */
- if (work)
- armada_drm_plane_work_call(dcrtc, work, work->fn);
-}
-
-int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
-{
- struct armada_plane *plane = drm_to_armada_plane(work->plane);
- int ret;
-
- ret = drm_crtc_vblank_get(&dcrtc->crtc);
- if (ret)
- return ret;
-
- ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
- if (ret)
- drm_crtc_vblank_put(&dcrtc->crtc);
-
- return ret;
-}
-
-int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout)
-{
- return wait_event_timeout(plane->frame_wait, !plane->work, timeout);
-}
-
-void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
- struct armada_plane *dplane)
-{
- struct armada_plane_work *work = xchg(&dplane->work, NULL);
-
- if (work)
- armada_drm_plane_work_call(dcrtc, work, work->cancel);
-}
-
-static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, work->regs);
- spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static void armada_drm_crtc_complete_disable_work(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
-{
- unsigned long flags;
-
- if (dcrtc->plane == work->plane)
- dcrtc->plane = NULL;
-
- spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, work->regs);
- spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static struct armada_plane_work *
-armada_drm_crtc_alloc_plane_work(struct drm_plane *plane)
-{
- struct armada_plane_work *work;
- int i = 0;
-
- work = kzalloc(sizeof(*work), GFP_KERNEL);
- if (!work)
- return NULL;
-
- work->plane = plane;
- work->fn = armada_drm_crtc_complete_frame_work;
- work->need_kfree = true;
- armada_reg_queue_end(work->regs, i);
-
- return work;
-}
-
-static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
- struct drm_framebuffer *fb, bool force)
-{
- struct armada_plane_work *work;
-
- if (!fb)
- return;
-
- if (force) {
- /* Display is disabled, so just drop the old fb */
- drm_framebuffer_put(fb);
- return;
- }
-
- work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
- if (work) {
- work->old_fb = fb;
-
- if (armada_drm_plane_work_queue(dcrtc, work) == 0)
- return;
-
- kfree(work);
- }
-
- /*
- * Oops - just drop the reference immediately and hope for
- * the best. The worst that will happen is the buffer gets
- * reused before it has finished being displayed.
- */
- drm_framebuffer_put(fb);
-}
-
-static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
-{
- /*
- * Tell the DRM core that vblank IRQs aren't going to happen for
- * a while. This cleans up any pending vblank events for us.
- */
- drm_crtc_vblank_off(&dcrtc->crtc);
- armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
-}
-
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
-{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-
- if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
- if (dpms_blanked(dpms))
- armada_drm_vblank_off(dcrtc);
- else if (!IS_ERR(dcrtc->clk))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
- dcrtc->dpms = dpms;
- armada_drm_crtc_update(dcrtc);
- if (!dpms_blanked(dpms))
- drm_crtc_vblank_on(&dcrtc->crtc);
- else if (!IS_ERR(dcrtc->clk))
- clk_disable_unprepare(dcrtc->clk);
- } else if (dcrtc->dpms != dpms) {
- dcrtc->dpms = dpms;
- }
-}
-
-/*
- * Prepare for a mode set. Turn off overlay to ensure that we don't end
- * up with the overlay size being bigger than the active screen size.
- * We rely upon X refreshing this state after the mode set has completed.
- *
- * The mode_config.mutex will be held for this call
- */
-static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
-{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct drm_plane *plane;
-
- /*
- * If we have an overlay plane associated with this CRTC, disable
- * it before the modeset to avoid its coordinates being outside
- * the new mode parameters.
- */
- plane = dcrtc->plane;
- if (plane) {
- drm_plane_force_disable(plane);
- WARN_ON(!armada_drm_plane_work_wait(drm_to_armada_plane(plane),
- HZ));
- }
-}
-
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_commit(struct drm_crtc *crtc)
+static void armada_drm_crtc_queue_state_event(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_pending_vblank_event *event;
- if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
- dcrtc->dpms = DRM_MODE_DPMS_ON;
- armada_drm_crtc_update(dcrtc);
+ /* If we have an event, we need vblank events enabled */
+ event = xchg(&crtc->state->event, NULL);
+ if (event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ dcrtc->event = event;
}
}
@@ -465,8 +171,8 @@ static void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
{
+ struct drm_pending_vblank_event *event;
void __iomem *base = dcrtc->base;
- struct drm_plane *ovl_plane;
if (stat & DMA_FF_UNDERFLOW)
DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
@@ -476,10 +182,6 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
if (stat & VSYNC_IRQ)
drm_crtc_handle_vblank(&dcrtc->crtc);
- ovl_plane = dcrtc->plane;
- if (ovl_plane)
- armada_drm_plane_work_run(dcrtc, ovl_plane);
-
spin_lock(&dcrtc->irq_lock);
if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
@@ -495,22 +197,35 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
writel_relaxed(val, base + LCD_SPU_ADV_REG);
}
- if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
- writel_relaxed(dcrtc->cursor_hw_pos,
- base + LCD_SPU_HWC_OVSA_HPXL_VLN);
- writel_relaxed(dcrtc->cursor_hw_sz,
- base + LCD_SPU_HWC_HPXL_VLN);
- armada_updatel(CFG_HWC_ENA,
- CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
- base + LCD_SPU_DMA_CTRL0);
- dcrtc->cursor_update = false;
+ if (stat & dcrtc->irq_ena & DUMB_FRAMEDONE) {
+ if (dcrtc->update_pending) {
+ armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
+ dcrtc->update_pending = false;
+ }
+ if (dcrtc->cursor_update) {
+ writel_relaxed(dcrtc->cursor_hw_pos,
+ base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+ writel_relaxed(dcrtc->cursor_hw_sz,
+ base + LCD_SPU_HWC_HPXL_VLN);
+ armada_updatel(CFG_HWC_ENA,
+ CFG_HWC_ENA | CFG_HWC_1BITMOD |
+ CFG_HWC_1BITENA,
+ base + LCD_SPU_DMA_CTRL0);
+ dcrtc->cursor_update = false;
+ }
armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
}
-
spin_unlock(&dcrtc->irq_lock);
- if (stat & GRA_FRAME_IRQ)
- armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
+ if (stat & VSYNC_IRQ && !dcrtc->update_pending) {
+ event = xchg(&dcrtc->event, NULL);
+ if (event) {
+ spin_lock(&dcrtc->crtc.dev->event_lock);
+ drm_crtc_send_vblank_event(&dcrtc->crtc, event);
+ spin_unlock(&dcrtc->crtc.dev->event_lock);
+ drm_crtc_vblank_put(&dcrtc->crtc);
+ }
+ }
}
static irqreturn_t armada_drm_irq(int irq, void *arg)
@@ -519,8 +234,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
/*
- * This is rediculous - rather than writing bits to clear, we
- * have to set the actual status register value. This is racy.
+ * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR
+ * is set. Writing has some other effect to acknowledge the IRQ -
+ * without this, we only get a single IRQ.
*/
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
@@ -536,107 +252,16 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
return IRQ_NONE;
}
-static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
-{
- struct drm_display_mode *adj = &dcrtc->crtc.mode;
- uint32_t val = 0;
-
- if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
- val |= CFG_CSC_YUV_CCIR709;
- if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
- val |= CFG_CSC_RGB_STUDIO;
-
- /*
- * In auto mode, set the colorimetry, based upon the HDMI spec.
- * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
- * ITU601. It may be more appropriate to set this depending on
- * the source - but what if the graphic frame is YUV and the
- * video frame is RGB?
- */
- if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
- !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
- (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
- if (dcrtc->csc_yuv_mode == CSC_AUTO)
- val |= CFG_CSC_YUV_CCIR709;
- }
-
- /*
- * We assume we're connected to a TV-like device, so the YUV->RGB
- * conversion should produce a limited range. We should set this
- * depending on the connectors attached to this CRTC, and what
- * kind of device they report being connected.
- */
- if (dcrtc->csc_rgb_mode == CSC_AUTO)
- val |= CFG_CSC_RGB_STUDIO;
-
- return val;
-}
-
-static void armada_drm_gra_plane_regs(struct armada_regs *regs,
- struct drm_framebuffer *fb, struct armada_plane_state *state,
- int x, int y, bool interlaced)
-{
- unsigned int i;
- u32 ctrl0;
-
- i = armada_drm_crtc_calc_fb(fb, x, y, regs, interlaced);
- armada_reg_queue_set(regs, i, state->dst_yx, LCD_SPU_GRA_OVSA_HPXL_VLN);
- armada_reg_queue_set(regs, i, state->src_hw, LCD_SPU_GRA_HPXL_VLN);
- armada_reg_queue_set(regs, i, state->dst_hw, LCD_SPU_GZM_HPXL_VLN);
-
- ctrl0 = state->ctrl0;
- if (interlaced)
- ctrl0 |= CFG_GRA_FTOGGLE;
-
- armada_reg_queue_mod(regs, i, ctrl0, CFG_GRAFORMAT |
- CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
- CFG_SWAPYU | CFG_YUV2RGB) |
- CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
- CFG_GRA_HSMOOTH | CFG_GRA_ENA,
- LCD_SPU_DMA_CTRL0);
- armada_reg_queue_end(regs, i);
-}
-
-static void armada_drm_primary_set(struct drm_crtc *crtc,
- struct drm_plane *plane, int x, int y)
-{
- struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_regs regs[8];
- bool interlaced = dcrtc->interlaced;
-
- armada_drm_gra_plane_regs(regs, plane->fb, state, x, y, interlaced);
- armada_drm_crtc_update_regs(dcrtc, regs);
-}
-
/* The mode_config.mutex will be held for this call */
-static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode, struct drm_display_mode *adj,
- int x, int y, struct drm_framebuffer *old_fb)
+static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
+ struct drm_display_mode *adj = &crtc->state->adjusted_mode;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct armada_regs regs[17];
uint32_t lm, rm, tm, bm, val, sclk;
unsigned long flags;
unsigned i;
- bool interlaced;
-
- drm_framebuffer_get(crtc->primary->fb);
-
- interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
-
- val = CFG_GRA_ENA;
- val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
- val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
-
- if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
- val |= CFG_PALETTE_ENA;
-
- drm_to_armada_plane(crtc->primary)->state.ctrl0 = val;
- drm_to_armada_plane(crtc->primary)->state.src_hw =
- drm_to_armada_plane(crtc->primary)->state.dst_hw =
- adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
- drm_to_armada_plane(crtc->primary)->state.dst_yx = 0;
+ bool interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
i = 0;
rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
@@ -644,35 +269,15 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
tm = adj->crtc_vtotal - adj->crtc_vsync_end;
- DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
- adj->crtc_hdisplay,
- adj->crtc_hsync_start,
- adj->crtc_hsync_end,
- adj->crtc_htotal, lm, rm);
- DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
- adj->crtc_vdisplay,
- adj->crtc_vsync_start,
- adj->crtc_vsync_end,
- adj->crtc_vtotal, tm, bm);
-
- /* Wait for pending flips to complete */
- armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
- MAX_SCHEDULE_TIMEOUT);
-
- drm_crtc_vblank_off(crtc);
-
- val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
- if (val != dcrtc->dumb_ctrl) {
- dcrtc->dumb_ctrl = val;
- writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
- }
-
- /*
- * If we are blanked, we would have disabled the clock. Re-enable
- * it so that compute_clock() does the right thing.
- */
- if (!IS_ERR(dcrtc->clk) && dpms_blanked(dcrtc->dpms))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
+ DRM_DEBUG_KMS("[CRTC:%d:%s] mode " DRM_MODE_FMT "\n",
+ crtc->base.id, crtc->name,
+ adj->base.id, adj->name, adj->vrefresh, adj->clock,
+ adj->crtc_hdisplay, adj->crtc_hsync_start,
+ adj->crtc_hsync_end, adj->crtc_htotal,
+ adj->crtc_vdisplay, adj->crtc_vsync_start,
+ adj->crtc_vsync_end, adj->crtc_vtotal,
+ adj->type, adj->flags);
+ DRM_DEBUG_KMS("lm %d rm %d tm %d bm %d\n", lm, rm, tm, bm);
/* Now compute the divider for real */
dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
@@ -689,25 +294,20 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
spin_lock_irqsave(&dcrtc->irq_lock, flags);
- /* Ensure graphic fifo is enabled */
- armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
-
/* Even interlaced/progressive frame */
dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
adj->crtc_htotal;
dcrtc->v[1].spu_v_porch = tm << 16 | bm;
val = adj->crtc_hsync_start;
- dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- dcrtc->variant->spu_adv_reg;
+ dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
if (interlaced) {
/* Odd interlaced frame */
+ val -= adj->crtc_htotal / 2;
+ dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
(1 << 16);
dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
- val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
- dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- dcrtc->variant->spu_adv_reg;
} else {
dcrtc->v[0] = dcrtc->v[1];
}
@@ -720,77 +320,136 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
LCD_SPUT_V_H_TOTAL);
- if (dcrtc->variant->has_spu_adv_reg) {
+ if (dcrtc->variant->has_spu_adv_reg)
armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
- }
val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
- val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
- armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
+ /*
+ * The documentation doesn't indicate what the normal state of
+ * the sync signals are. Sebastian Hesselbart kindly probed
+ * these signals on his board to determine their state.
+ *
+ * The non-inverted state of the sync signals is active high.
+ * Setting these bits makes the appropriate signal active low.
+ */
+ val = 0;
+ if (adj->flags & DRM_MODE_FLAG_NCSYNC)
+ val |= CFG_INV_CSYNC;
+ if (adj->flags & DRM_MODE_FLAG_NHSYNC)
+ val |= CFG_INV_HSYNC;
+ if (adj->flags & DRM_MODE_FLAG_NVSYNC)
+ val |= CFG_INV_VSYNC;
+ armada_reg_queue_mod(regs, i, val, CFG_INV_CSYNC | CFG_INV_HSYNC |
+ CFG_INV_VSYNC, LCD_SPU_DUMB_CTRL);
armada_reg_queue_end(regs, i);
armada_drm_crtc_update_regs(dcrtc, regs);
-
- armada_drm_primary_set(crtc, crtc->primary, x, y);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
- armada_drm_crtc_update(dcrtc);
+static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- drm_crtc_vblank_on(crtc);
- armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
- return 0;
+ dcrtc->regs_idx = 0;
+ dcrtc->regs = dcrtc->atomic_regs;
}
-/* The mode_config.mutex will be held for this call */
-static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_regs regs[4];
- unsigned i;
- i = armada_drm_crtc_calc_fb(crtc->primary->fb, crtc->x, crtc->y, regs,
- dcrtc->interlaced);
- armada_reg_queue_end(regs, i);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+
+ armada_reg_queue_end(dcrtc->regs, dcrtc->regs_idx);
+
+ /*
+ * If we aren't doing a full modeset, then we need to queue
+ * the event here.
+ */
+ if (!drm_atomic_crtc_needs_modeset(crtc->state)) {
+ dcrtc->update_pending = true;
+ armada_drm_crtc_queue_state_event(crtc);
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ } else {
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ }
+}
- /* Wait for pending flips to complete */
- armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
- MAX_SCHEDULE_TIMEOUT);
+static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_pending_vblank_event *event;
- /* Take a reference to the new fb as we're using it */
- drm_framebuffer_get(crtc->primary->fb);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
- /* Update the base in the CRTC */
- armada_drm_crtc_update_regs(dcrtc, regs);
+ drm_crtc_vblank_off(crtc);
+ armada_drm_crtc_update(dcrtc, false);
- /* Drop our previously held reference */
- armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+ if (!crtc->state->active) {
+ /*
+ * This modeset will be leaving the CRTC disabled, so
+ * call the backend to disable upstream clocks etc.
+ */
+ if (dcrtc->variant->disable)
+ dcrtc->variant->disable(dcrtc);
- return 0;
+ /*
+ * We will not receive any further vblank events.
+ * Send the flip_done event manually.
+ */
+ event = crtc->state->event;
+ crtc->state->event = NULL;
+ if (event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+ }
}
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_disable(struct drm_crtc *crtc)
+static void armada_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
- armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+
+ if (!old_state->active) {
+ /*
+ * This modeset is enabling the CRTC after it having
+ * been disabled. Reverse the call to ->disable in
+ * the atomic_disable().
+ */
+ if (dcrtc->variant->enable)
+ dcrtc->variant->enable(dcrtc, &crtc->state->adjusted_mode);
+ }
+ armada_drm_crtc_update(dcrtc, true);
+ drm_crtc_vblank_on(crtc);
- /* Disable our primary plane when we disable the CRTC. */
- crtc->primary->funcs->disable_plane(crtc->primary, NULL);
+ armada_drm_crtc_queue_state_event(crtc);
}
static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
- .dpms = armada_drm_crtc_dpms,
- .prepare = armada_drm_crtc_prepare,
- .commit = armada_drm_crtc_commit,
.mode_fixup = armada_drm_crtc_mode_fixup,
- .mode_set = armada_drm_crtc_mode_set,
- .mode_set_base = armada_drm_crtc_mode_set_base,
- .disable = armada_drm_crtc_disable,
+ .mode_set_nofb = armada_drm_crtc_mode_set_nofb,
+ .atomic_begin = armada_drm_crtc_atomic_begin,
+ .atomic_flush = armada_drm_crtc_atomic_flush,
+ .atomic_disable = armada_drm_crtc_atomic_disable,
+ .atomic_enable = armada_drm_crtc_atomic_enable,
};
static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
@@ -883,7 +542,6 @@ static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
if (!dcrtc->cursor_obj || !h || !w) {
spin_lock_irq(&dcrtc->irq_lock);
- armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
dcrtc->cursor_update = false;
armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
spin_unlock_irq(&dcrtc->irq_lock);
@@ -907,7 +565,6 @@ static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
spin_lock_irq(&dcrtc->irq_lock);
- armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
dcrtc->cursor_update = false;
armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
spin_unlock_irq(&dcrtc->irq_lock);
@@ -1015,8 +672,8 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
priv->dcrtc[dcrtc->num] = NULL;
drm_crtc_cleanup(&dcrtc->crtc);
- if (!IS_ERR(dcrtc->clk))
- clk_disable_unprepare(dcrtc->clk);
+ if (dcrtc->variant->disable)
+ dcrtc->variant->disable(dcrtc);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
@@ -1025,361 +682,51 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
kfree(dcrtc);
}
-/*
- * The mode_config lock is held here, to prevent races between this
- * and a mode_set.
- */
-static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_plane_work *work;
- unsigned i;
- int ret;
-
- /* We don't support changing the pixel format */
- if (fb->format != crtc->primary->fb->format)
- return -EINVAL;
-
- work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
- if (!work)
- return -ENOMEM;
-
- work->event = event;
- work->old_fb = dcrtc->crtc.primary->fb;
-
- i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
- dcrtc->interlaced);
- armada_reg_queue_end(work->regs, i);
-
- /*
- * Ensure that we hold a reference on the new framebuffer.
- * This has to match the behaviour in mode_set.
- */
- drm_framebuffer_get(fb);
-
- ret = armada_drm_plane_work_queue(dcrtc, work);
- if (ret) {
- /* Undo our reference above */
- drm_framebuffer_put(fb);
- kfree(work);
- return ret;
- }
-
- /*
- * Don't take a reference on the new framebuffer;
- * drm_mode_page_flip_ioctl() has already grabbed a reference and
- * will _not_ drop that reference on successful return from this
- * function. Simply mark this new framebuffer as the current one.
- */
- dcrtc->crtc.primary->fb = fb;
-
- /*
- * Finally, if the display is blanked, we won't receive an
- * interrupt, so complete it now.
- */
- if (dpms_blanked(dcrtc->dpms))
- armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
-
- return 0;
-}
-
-static int
-armada_drm_crtc_set_property(struct drm_crtc *crtc,
- struct drm_property *property, uint64_t val)
-{
- struct armada_private *priv = crtc->dev->dev_private;
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- bool update_csc = false;
-
- if (property == priv->csc_yuv_prop) {
- dcrtc->csc_yuv_mode = val;
- update_csc = true;
- } else if (property == priv->csc_rgb_prop) {
- dcrtc->csc_rgb_mode = val;
- update_csc = true;
- }
-
- if (update_csc) {
- uint32_t val;
-
- val = dcrtc->spu_iopad_ctrl |
- armada_drm_crtc_calculate_csc(dcrtc);
- writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
- }
-
- return 0;
-}
-
/* These are called under the vbl_lock. */
static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ unsigned long flags;
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
return 0;
}
static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ unsigned long flags;
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
}
static const struct drm_crtc_funcs armada_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
.cursor_set = armada_drm_crtc_cursor_set,
.cursor_move = armada_drm_crtc_cursor_move,
.destroy = armada_drm_crtc_destroy,
- .set_config = drm_crtc_helper_set_config,
- .page_flip = armada_drm_crtc_page_flip,
- .set_property = armada_drm_crtc_set_property,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = armada_drm_crtc_enable_vblank,
.disable_vblank = armada_drm_crtc_disable_vblank,
};
-static void armada_drm_primary_update_state(struct drm_plane_state *state,
- struct armada_regs *regs)
-{
- struct armada_plane *dplane = drm_to_armada_plane(state->plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(state->crtc);
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
- bool was_disabled;
- unsigned int idx = 0;
- u32 val;
-
- val = CFG_GRA_FMT(dfb->fmt) | CFG_GRA_MOD(dfb->mod);
- if (dfb->fmt > CFG_420)
- val |= CFG_PALETTE_ENA;
- if (state->visible)
- val |= CFG_GRA_ENA;
- if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
- val |= CFG_GRA_HSMOOTH;
-
- was_disabled = !(dplane->state.ctrl0 & CFG_GRA_ENA);
- if (was_disabled)
- armada_reg_queue_mod(regs, idx,
- 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
-
- dplane->state.ctrl0 = val;
- dplane->state.src_hw = (drm_rect_height(&state->src) & 0xffff0000) |
- drm_rect_width(&state->src) >> 16;
- dplane->state.dst_hw = drm_rect_height(&state->dst) << 16 |
- drm_rect_width(&state->dst);
- dplane->state.dst_yx = state->dst.y1 << 16 | state->dst.x1;
-
- armada_drm_gra_plane_regs(regs + idx, &dfb->fb, &dplane->state,
- state->src.x1 >> 16, state->src.y1 >> 16,
- dcrtc->interlaced);
-
- dplane->state.vsync_update = !was_disabled;
- dplane->state.changed = true;
-}
-
-static int armada_drm_primary_update(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct armada_plane *dplane = drm_to_armada_plane(plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_plane_work *work;
- struct drm_plane_state state = {
- .plane = plane,
- .crtc = crtc,
- .fb = fb,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .rotation = DRM_MODE_ROTATE_0,
- };
- struct drm_crtc_state crtc_state = {
- .crtc = crtc,
- .enable = crtc->enabled,
- .mode = crtc->mode,
- };
- int ret;
-
- ret = drm_atomic_helper_check_plane_state(&state, &crtc_state, 0,
- INT_MAX, true, false);
- if (ret)
- return ret;
-
- work = &dplane->works[dplane->next_work];
- work->fn = armada_drm_crtc_complete_frame_work;
-
- if (plane->fb != fb) {
- /*
- * Take a reference on the new framebuffer - we want to
- * hold on to it while the hardware is displaying it.
- */
- drm_framebuffer_reference(fb);
-
- work->old_fb = plane->fb;
- } else {
- work->old_fb = NULL;
- }
-
- armada_drm_primary_update_state(&state, work->regs);
-
- if (!dplane->state.changed)
- return 0;
-
- /* Wait for pending work to complete */
- if (armada_drm_plane_work_wait(dplane, HZ / 10) == 0)
- armada_drm_plane_work_cancel(dcrtc, dplane);
-
- if (!dplane->state.vsync_update) {
- work->fn(dcrtc, work);
- if (work->old_fb)
- drm_framebuffer_unreference(work->old_fb);
- return 0;
- }
-
- /* Queue it for update on the next interrupt if we are enabled */
- ret = armada_drm_plane_work_queue(dcrtc, work);
- if (ret) {
- work->fn(dcrtc, work);
- if (work->old_fb)
- drm_framebuffer_unreference(work->old_fb);
- }
-
- dplane->next_work = !dplane->next_work;
-
- return 0;
-}
-
-int armada_drm_plane_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct armada_plane *dplane = drm_to_armada_plane(plane);
- struct armada_crtc *dcrtc;
- struct armada_plane_work *work;
- unsigned int idx = 0;
- u32 sram_para1, enable_mask;
-
- if (!plane->crtc)
- return 0;
-
- /*
- * Arrange to power down most RAMs and FIFOs if this is the primary
- * plane, otherwise just the YUV FIFOs for the overlay plane.
- */
- if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
- CFG_PDWN32x32 | CFG_PDWN64x66;
- enable_mask = CFG_GRA_ENA;
- } else {
- sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
- enable_mask = CFG_DMA_ENA;
- }
-
- dplane->state.ctrl0 &= ~enable_mask;
-
- dcrtc = drm_to_armada_crtc(plane->crtc);
-
- /*
- * Try to disable the plane and drop our ref on the framebuffer
- * at the next frame update. If we fail for any reason, disable
- * the plane immediately.
- */
- work = &dplane->works[dplane->next_work];
- work->fn = armada_drm_crtc_complete_disable_work;
- work->cancel = armada_drm_crtc_complete_disable_work;
- work->old_fb = plane->fb;
-
- armada_reg_queue_mod(work->regs, idx,
- 0, enable_mask, LCD_SPU_DMA_CTRL0);
- armada_reg_queue_mod(work->regs, idx,
- sram_para1, 0, LCD_SPU_SRAM_PARA1);
- armada_reg_queue_end(work->regs, idx);
-
- /* Wait for any preceding work to complete, but don't wedge */
- if (WARN_ON(!armada_drm_plane_work_wait(dplane, HZ)))
- armada_drm_plane_work_cancel(dcrtc, dplane);
-
- if (armada_drm_plane_work_queue(dcrtc, work)) {
- work->fn(dcrtc, work);
- if (work->old_fb)
- drm_framebuffer_unreference(work->old_fb);
- }
-
- dplane->next_work = !dplane->next_work;
-
- return 0;
-}
-
-static const struct drm_plane_funcs armada_primary_plane_funcs = {
- .update_plane = armada_drm_primary_update,
- .disable_plane = armada_drm_plane_disable,
- .destroy = drm_primary_helper_destroy,
-};
-
-int armada_drm_plane_init(struct armada_plane *plane)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(plane->works); i++)
- plane->works[i].plane = &plane->base;
-
- init_waitqueue_head(&plane->frame_wait);
-
- return 0;
-}
-
-static const struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
- { CSC_AUTO, "Auto" },
- { CSC_YUV_CCIR601, "CCIR601" },
- { CSC_YUV_CCIR709, "CCIR709" },
-};
-
-static const struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
- { CSC_AUTO, "Auto" },
- { CSC_RGB_COMPUTER, "Computer system" },
- { CSC_RGB_STUDIO, "Studio" },
-};
-
-static int armada_drm_crtc_create_properties(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
-
- if (priv->csc_yuv_prop)
- return 0;
-
- priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
- "CSC_YUV", armada_drm_csc_yuv_enum_list,
- ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
- priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
- "CSC_RGB", armada_drm_csc_rgb_enum_list,
- ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
-
- if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
- return -ENOMEM;
-
- return 0;
-}
-
static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
struct resource *res, int irq, const struct armada_variant *variant,
struct device_node *port)
{
struct armada_private *priv = drm->dev_private;
struct armada_crtc *dcrtc;
- struct armada_plane *primary;
+ struct drm_plane *primary;
void __iomem *base;
int ret;
- ret = armada_drm_crtc_create_properties(drm);
- if (ret)
- return ret;
-
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1397,8 +744,6 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
dcrtc->base = base;
dcrtc->num = drm->mode_config.num_crtc;
dcrtc->clk = ERR_PTR(-EINVAL);
- dcrtc->csc_yuv_mode = CSC_AUTO;
- dcrtc->csc_rgb_mode = CSC_AUTO;
dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
spin_lock_init(&dcrtc->irq_lock);
@@ -1415,6 +760,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
@@ -1441,39 +787,23 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
goto err_crtc;
}
- ret = armada_drm_plane_init(primary);
- if (ret) {
- kfree(primary);
- goto err_crtc;
- }
-
- ret = drm_universal_plane_init(drm, &primary->base, 0,
- &armada_primary_plane_funcs,
- armada_primary_formats,
- ARRAY_SIZE(armada_primary_formats),
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ ret = armada_drm_primary_plane_init(drm, primary);
if (ret) {
kfree(primary);
goto err_crtc;
}
- ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
+ ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, primary, NULL,
&armada_crtc_funcs, NULL);
if (ret)
goto err_crtc_init;
drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
- drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
- dcrtc->csc_yuv_mode);
- drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
- dcrtc->csc_rgb_mode);
-
return armada_overlay_plane_create(drm, 1 << dcrtc->num);
err_crtc_init:
- primary->base.funcs->destroy(&primary->base);
+ primary->funcs->destroy(primary);
err_crtc:
kfree(dcrtc);
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 445829b8877a..7ebd337b60af 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -32,49 +32,8 @@ struct armada_regs {
armada_reg_queue_mod(_r, _i, 0, 0, ~0)
struct armada_crtc;
-struct armada_plane;
struct armada_variant;
-struct armada_plane_work {
- void (*fn)(struct armada_crtc *, struct armada_plane_work *);
- void (*cancel)(struct armada_crtc *, struct armada_plane_work *);
- bool need_kfree;
- struct drm_plane *plane;
- struct drm_framebuffer *old_fb;
- struct drm_pending_vblank_event *event;
- struct armada_regs regs[14];
-};
-
-struct armada_plane_state {
- u16 src_x;
- u16 src_y;
- u32 src_hw;
- u32 dst_hw;
- u32 dst_yx;
- u32 ctrl0;
- bool changed;
- bool vsync_update;
-};
-
-struct armada_plane {
- struct drm_plane base;
- wait_queue_head_t frame_wait;
- bool next_work;
- struct armada_plane_work works[2];
- struct armada_plane_work *work;
- struct armada_plane_state state;
-};
-#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
-
-int armada_drm_plane_init(struct armada_plane *plane);
-int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
- struct armada_plane_work *work);
-int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
-void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
- struct armada_plane *plane);
-void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
- int x, int y);
-
struct armada_crtc {
struct drm_crtc crtc;
const struct armada_variant *variant;
@@ -89,10 +48,6 @@ struct armada_crtc {
} v[2];
bool interlaced;
bool cursor_update;
- uint8_t csc_yuv_mode;
- uint8_t csc_rgb_mode;
-
- struct drm_plane *plane;
struct armada_gem_object *cursor_obj;
int cursor_x;
@@ -102,21 +57,22 @@ struct armada_crtc {
uint32_t cursor_w;
uint32_t cursor_h;
- int dpms;
uint32_t cfg_dumb_ctrl;
- uint32_t dumb_ctrl;
uint32_t spu_iopad_ctrl;
spinlock_t irq_lock;
uint32_t irq_ena;
+
+ bool update_pending;
+ struct drm_pending_vblank_event *event;
+ struct armada_regs atomic_regs[32];
+ struct armada_regs *regs;
+ unsigned int regs_idx;
};
#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
-int armada_drm_plane_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx);
-
extern struct platform_driver armada_lcd_platform_driver;
#endif
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index cc4c557c9f66..f09083ff15d3 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -42,11 +42,12 @@ struct armada_private;
struct armada_variant {
bool has_spu_adv_reg;
- uint32_t spu_adv_reg;
int (*init)(struct armada_crtc *, struct device *);
int (*compute_clock)(struct armada_crtc *,
const struct drm_display_mode *,
uint32_t *);
+ void (*disable)(struct armada_crtc *);
+ void (*enable)(struct armada_crtc *, const struct drm_display_mode *);
};
/* Variant ops */
@@ -54,14 +55,10 @@ extern const struct armada_variant armada510_ops;
struct armada_private {
struct drm_device drm;
- struct work_struct fb_unref_work;
- DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
struct armada_crtc *dcrtc[2];
struct drm_mm linear; /* protected by linear_lock */
struct mutex linear_lock;
- struct drm_property *csc_yuv_prop;
- struct drm_property *csc_rgb_prop;
struct drm_property *colorkey_prop;
struct drm_property *colorkey_min_prop;
struct drm_property *colorkey_max_prop;
@@ -76,13 +73,6 @@ struct armada_private {
#endif
};
-void __armada_drm_queue_unref_work(struct drm_device *,
- struct drm_framebuffer *);
-void armada_drm_queue_unref_work(struct drm_device *,
- struct drm_framebuffer *);
-
-extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
-
int armada_fbdev_init(struct drm_device *);
void armada_fbdev_fini(struct drm_device *);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 4b11b6b52f1d..fa31589b4fc0 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -9,46 +9,18 @@
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_gem.h"
+#include "armada_fb.h"
#include "armada_hw.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
-static void armada_drm_unref_work(struct work_struct *work)
-{
- struct armada_private *priv =
- container_of(work, struct armada_private, fb_unref_work);
- struct drm_framebuffer *fb;
-
- while (kfifo_get(&priv->fb_unref, &fb))
- drm_framebuffer_put(fb);
-}
-
-/* Must be called with dev->event_lock held */
-void __armada_drm_queue_unref_work(struct drm_device *dev,
- struct drm_framebuffer *fb)
-{
- struct armada_private *priv = dev->dev_private;
-
- WARN_ON(!kfifo_put(&priv->fb_unref, fb));
- schedule_work(&priv->fb_unref_work);
-}
-
-void armada_drm_queue_unref_work(struct drm_device *dev,
- struct drm_framebuffer *fb)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- __armada_drm_queue_unref_work(dev, fb);
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
static struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0),
@@ -72,11 +44,18 @@ static struct drm_driver armada_drm_driver = {
.desc = "Armada SoC DRM",
.date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME,
+ DRIVER_PRIME | DRIVER_ATOMIC,
.ioctls = armada_ioctls,
.fops = &armada_drm_fops,
};
+static const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
+ .fb_create = armada_fb_create,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
static int armada_drm_bind(struct device *dev)
{
struct armada_private *priv;
@@ -109,7 +88,7 @@ static int armada_drm_bind(struct device *dev)
/*
* The drm_device structure must be at the start of
- * armada_private for drm_dev_unref() to work correctly.
+ * armada_private for drm_dev_put() to work correctly.
*/
BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0);
@@ -125,9 +104,6 @@ static int armada_drm_bind(struct device *dev)
dev_set_drvdata(dev, &priv->drm);
- INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
- INIT_KFIFO(priv->fb_unref);
-
/* Mode setting support */
drm_mode_config_init(&priv->drm);
priv->drm.mode_config.min_width = 320;
@@ -155,6 +131,8 @@ static int armada_drm_bind(struct device *dev)
priv->drm.irq_enabled = true;
+ drm_mode_config_reset(&priv->drm);
+
ret = armada_fbdev_init(&priv->drm);
if (ret)
goto err_comp;
@@ -179,8 +157,7 @@ static int armada_drm_bind(struct device *dev)
err_kms:
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
- drm_dev_unref(&priv->drm);
+ drm_dev_put(&priv->drm);
return ret;
}
@@ -198,9 +175,8 @@ static void armada_drm_unbind(struct device *dev)
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
- drm_dev_unref(&priv->drm);
+ drm_dev_put(&priv->drm);
}
static int compare_of(struct device *dev, void *data)
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index ac92bce07ecd..6bd638a54579 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -7,30 +7,15 @@
*/
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
-static void armada_fb_destroy(struct drm_framebuffer *fb)
-{
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
-
- drm_framebuffer_cleanup(&dfb->fb);
- drm_gem_object_put_unlocked(&dfb->obj->obj);
- kfree(dfb);
-}
-
-static int armada_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *dfile, unsigned int *handle)
-{
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
- return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
-}
-
static const struct drm_framebuffer_funcs armada_fb_funcs = {
- .destroy = armada_fb_destroy,
- .create_handle = armada_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
};
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
@@ -78,7 +63,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
dfb->fmt = format;
dfb->mod = config;
- dfb->obj = obj;
+ dfb->fb.obj[0] = &obj->obj;
drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);
@@ -99,7 +84,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
return dfb;
}
-static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode)
{
struct armada_gem_object *obj;
@@ -153,8 +138,3 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
return ERR_PTR(ret);
}
-
-const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
- .fb_create = armada_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
-};
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
index 48073c4f54d8..476daad0a36a 100644
--- a/drivers/gpu/drm/armada/armada_fb.h
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -10,15 +10,15 @@
struct armada_framebuffer {
struct drm_framebuffer fb;
- struct armada_gem_object *obj;
uint8_t fmt;
uint8_t mod;
};
#define drm_fb_to_armada_fb(dfb) \
container_of(dfb, struct armada_framebuffer, fb)
-#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+#define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0])
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
-
+struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+ struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode);
#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 2a59db0994b2..8d23700848df 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -24,7 +24,7 @@ static /*const*/ struct fb_ops armada_fb_ops = {
.fb_imageblit = drm_fb_helper_cfb_imageblit,
};
-static int armada_fb_create(struct drm_fb_helper *fbh,
+static int armada_fbdev_create(struct drm_fb_helper *fbh,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fbh->dev;
@@ -108,7 +108,7 @@ static int armada_fb_probe(struct drm_fb_helper *fbh,
int ret = 0;
if (!fbh->fb) {
- ret = armada_fb_create(fbh, sizes);
+ ret = armada_fbdev_create(fbh, sizes);
if (ret == 0)
ret = 1;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index a97f509743a5..892c1d9304bb 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -13,25 +13,14 @@
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
-static int armada_gem_vm_fault(struct vm_fault *vmf)
+static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
{
struct drm_gem_object *gobj = vmf->vma->vm_private_data;
struct armada_gem_object *obj = drm_to_armada_gem(gobj);
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
- int ret;
pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
- ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
-
- switch (ret) {
- case 0:
- case -EBUSY:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
}
const struct vm_operations_struct armada_gem_vm_ops = {
@@ -490,8 +479,6 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
.map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .map_atomic = armada_gem_dmabuf_no_kmap,
- .unmap_atomic = armada_gem_dmabuf_no_kunmap,
.map = armada_gem_dmabuf_no_kmap,
.unmap = armada_gem_dmabuf_no_kunmap,
.mmap = armada_gem_dmabuf_mmap,
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
index 27319a8335e2..277580b36758 100644
--- a/drivers/gpu/drm/armada/armada_hw.h
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -160,6 +160,7 @@ enum {
CFG_ALPHAM_GRA = 0x1 << 16,
CFG_ALPHAM_CFG = 0x2 << 16,
CFG_ALPHA_MASK = 0xff << 8,
+#define CFG_ALPHA(x) ((x) << 8)
CFG_PIXCMD_MASK = 0xff,
};
@@ -315,4 +316,19 @@ enum {
PWRDN_IRQ_LEVEL = 1 << 0,
};
+static inline u32 armada_rect_hw_fp(struct drm_rect *r)
+{
+ return (drm_rect_height(r) & 0xffff0000) | drm_rect_width(r) >> 16;
+}
+
+static inline u32 armada_rect_hw(struct drm_rect *r)
+{
+ return drm_rect_height(r) << 16 | (drm_rect_width(r) & 0x0000ffff);
+}
+
+static inline u32 armada_rect_yx(struct drm_rect *r)
+{
+ return (r)->y1 << 16 | ((r)->x1 & 0x0000ffff);
+}
+
#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index c391955009d6..eb7dfb65ef47 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -7,346 +7,468 @@
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/armada_drm.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
-#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
+#include "armada_plane.h"
#include "armada_trace.h"
-struct armada_ovl_plane_properties {
- uint32_t colorkey_yr;
- uint32_t colorkey_ug;
- uint32_t colorkey_vb;
-#define K2R(val) (((val) >> 0) & 0xff)
-#define K2G(val) (((val) >> 8) & 0xff)
-#define K2B(val) (((val) >> 16) & 0xff)
- int16_t brightness;
- uint16_t contrast;
- uint16_t saturation;
- uint32_t colorkey_mode;
-};
-
-struct armada_ovl_plane {
- struct armada_plane base;
- struct armada_ovl_plane_properties prop;
+#define DEFAULT_BRIGHTNESS 0
+#define DEFAULT_CONTRAST 0x4000
+#define DEFAULT_SATURATION 0x4000
+#define DEFAULT_ENCODING DRM_COLOR_YCBCR_BT601
+
+struct armada_overlay_state {
+ struct drm_plane_state base;
+ u32 colorkey_yr;
+ u32 colorkey_ug;
+ u32 colorkey_vb;
+ u32 colorkey_mode;
+ u32 colorkey_enable;
+ s16 brightness;
+ u16 contrast;
+ u16 saturation;
};
-#define drm_to_armada_ovl_plane(p) \
- container_of(p, struct armada_ovl_plane, base.base)
-
+#define drm_to_overlay_state(s) \
+ container_of(s, struct armada_overlay_state, base)
-static void
-armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
- struct armada_crtc *dcrtc)
+static inline u32 armada_spu_contrast(struct drm_plane_state *state)
{
- writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
- writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
- writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
+ return drm_to_overlay_state(state)->brightness << 16 |
+ drm_to_overlay_state(state)->contrast;
+}
- writel_relaxed(prop->brightness << 16 | prop->contrast,
- dcrtc->base + LCD_SPU_CONTRAST);
+static inline u32 armada_spu_saturation(struct drm_plane_state *state)
+{
/* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
- writel_relaxed(prop->saturation << 16,
- dcrtc->base + LCD_SPU_SATURATION);
- writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
-
- spin_lock_irq(&dcrtc->irq_lock);
- armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
- CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
- dcrtc->base + LCD_SPU_DMA_CTRL1);
+ return drm_to_overlay_state(state)->saturation << 16;
+}
- armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
- spin_unlock_irq(&dcrtc->irq_lock);
+static inline u32 armada_csc(struct drm_plane_state *state)
+{
+ /*
+ * The CFG_CSC_RGB_* settings control the output of the colour space
+ * converter, setting the range of output values it produces. Since
+ * we will be blending with the full-range graphics, we need to
+ * produce full-range RGB output from the conversion.
+ */
+ return CFG_CSC_RGB_COMPUTER |
+ (state->color_encoding == DRM_COLOR_YCBCR_BT709 ?
+ CFG_CSC_YUV_CCIR709 : CFG_CSC_YUV_CCIR601);
}
/* === Plane support === */
-static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
+static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- unsigned long flags;
+ struct drm_plane_state *state = plane->state;
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ unsigned int idx;
+ u32 cfg, cfg_mask, val;
- trace_armada_ovl_plane_work(&dcrtc->crtc, work->plane);
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
- spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, work->regs);
- spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static void armada_ovl_plane_update_state(struct drm_plane_state *state,
- struct armada_regs *regs)
-{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(state->plane);
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
- const struct drm_format_info *format;
- unsigned int idx = 0;
- bool fb_changed;
- u32 val, ctrl0;
- u16 src_x, src_y;
+ if (!state->fb || WARN_ON(!state->crtc))
+ return;
- ctrl0 = CFG_DMA_FMT(dfb->fmt) | CFG_DMA_MOD(dfb->mod) | CFG_CBSH_ENA;
- if (state->visible)
- ctrl0 |= CFG_DMA_ENA;
- if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
- ctrl0 |= CFG_DMA_HSMOOTH;
+ DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
+ plane->base.id, plane->name,
+ state->crtc->base.id, state->crtc->name,
+ state->fb->base.id,
+ old_state->visible, state->visible);
- /*
- * Shifting a YUV packed format image by one pixel causes the U/V
- * planes to swap. Compensate for it by also toggling the UV swap.
- */
- format = dfb->fb.format;
- if (format->num_planes == 1 && state->src.x1 >> 16 & (format->hsub - 1))
- ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+ dcrtc = drm_to_armada_crtc(state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
- if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
- /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
+ idx = 0;
+ if (!old_state->visible && state->visible)
armada_reg_queue_mod(regs, idx,
0, CFG_PDWN16x66 | CFG_PDWN32x66,
LCD_SPU_SRAM_PARA1);
- }
-
- fb_changed = dplane->base.base.fb != &dfb->fb ||
- dplane->base.state.src_x != state->src.x1 >> 16 ||
- dplane->base.state.src_y != state->src.y1 >> 16;
-
- dplane->base.state.vsync_update = fb_changed;
-
+ val = armada_rect_hw_fp(&state->src);
+ if (armada_rect_hw_fp(&old_state->src) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_HPXL_VLN);
+ val = armada_rect_yx(&state->dst);
+ if (armada_rect_yx(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_OVSA_HPXL_VLN);
+ val = armada_rect_hw(&state->dst);
+ if (armada_rect_hw(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DZM_HPXL_VLN);
/* FIXME: overlay on an interlaced display */
- if (fb_changed) {
- u32 addrs[3];
-
- dplane->base.state.src_y = src_y = state->src.y1 >> 16;
- dplane->base.state.src_x = src_x = state->src.x1 >> 16;
+ if (old_state->src.x1 != state->src.x1 ||
+ old_state->src.y1 != state->src.y1 ||
+ old_state->fb != state->fb) {
+ const struct drm_format_info *format;
+ u16 src_x, pitches[3];
+ u32 addrs[2][3];
- armada_drm_plane_calc_addrs(addrs, &dfb->fb, src_x, src_y);
+ armada_drm_plane_calc(state, addrs, pitches, false);
- armada_reg_queue_set(regs, idx, addrs[0],
+ armada_reg_queue_set(regs, idx, addrs[0][0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(regs, idx, addrs[1],
+ armada_reg_queue_set(regs, idx, addrs[0][1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(regs, idx, addrs[2],
+ armada_reg_queue_set(regs, idx, addrs[0][2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(regs, idx, addrs[0],
+ armada_reg_queue_set(regs, idx, addrs[1][0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(regs, idx, addrs[1],
+ armada_reg_queue_set(regs, idx, addrs[1][1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(regs, idx, addrs[2],
+ armada_reg_queue_set(regs, idx, addrs[1][2],
LCD_SPU_DMA_START_ADDR_V1);
- val = dfb->fb.pitches[0] << 16 | dfb->fb.pitches[0];
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_PITCH_YC);
- val = dfb->fb.pitches[1] << 16 | dfb->fb.pitches[2];
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_PITCH_UV);
- }
+ val = pitches[0] << 16 | pitches[0];
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_YC);
+ val = pitches[1] << 16 | pitches[2];
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_UV);
- val = (drm_rect_height(&state->src) & 0xffff0000) |
- drm_rect_width(&state->src) >> 16;
- if (dplane->base.state.src_hw != val) {
- dplane->base.state.src_hw = val;
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_HPXL_VLN);
- }
+ cfg = CFG_DMA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
+ CFG_DMA_MOD(drm_fb_to_armada_fb(state->fb)->mod) |
+ CFG_CBSH_ENA;
+ if (state->visible)
+ cfg |= CFG_DMA_ENA;
- val = drm_rect_height(&state->dst) << 16 | drm_rect_width(&state->dst);
- if (dplane->base.state.dst_hw != val) {
- dplane->base.state.dst_hw = val;
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DZM_HPXL_VLN);
+ /*
+ * Shifting a YUV packed format image by one pixel causes the
+ * U/V planes to swap. Compensate for it by also toggling
+ * the UV swap.
+ */
+ format = state->fb->format;
+ src_x = state->src.x1 >> 16;
+ if (format->num_planes == 1 && src_x & (format->hsub - 1))
+ cfg ^= CFG_DMA_MOD(CFG_SWAPUV);
+ cfg_mask = CFG_CBSH_ENA | CFG_DMAFORMAT |
+ CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_DMA_FTOGGLE | CFG_DMA_TSTMODE |
+ CFG_DMA_ENA;
+ } else if (old_state->visible != state->visible) {
+ cfg = state->visible ? CFG_DMA_ENA : 0;
+ cfg_mask = CFG_DMA_ENA;
+ } else {
+ cfg = cfg_mask = 0;
}
-
- val = state->dst.y1 << 16 | state->dst.x1;
- if (dplane->base.state.dst_yx != val) {
- dplane->base.state.dst_yx = val;
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_OVSA_HPXL_VLN);
+ if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
+ drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
+ cfg_mask |= CFG_DMA_HSMOOTH;
+ if (drm_rect_width(&state->src) >> 16 !=
+ drm_rect_width(&state->dst))
+ cfg |= CFG_DMA_HSMOOTH;
}
- if (dplane->base.state.ctrl0 != ctrl0) {
- dplane->base.state.ctrl0 = ctrl0;
- armada_reg_queue_mod(regs, idx, ctrl0,
- CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
- CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
- CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
- CFG_YUV2RGB) | CFG_DMA_ENA,
- LCD_SPU_DMA_CTRL0);
- dplane->base.state.vsync_update = true;
- }
+ if (cfg_mask)
+ armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
+ LCD_SPU_DMA_CTRL0);
+
+ val = armada_spu_contrast(state);
+ if ((!old_state->visible && state->visible) ||
+ armada_spu_contrast(old_state) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_CONTRAST);
+ val = armada_spu_saturation(state);
+ if ((!old_state->visible && state->visible) ||
+ armada_spu_saturation(old_state) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_SATURATION);
+ if (!old_state->visible && state->visible)
+ armada_reg_queue_set(regs, idx, 0x00002000, LCD_SPU_CBSH_HUE);
+ val = armada_csc(state);
+ if ((!old_state->visible && state->visible) ||
+ armada_csc(old_state) != val)
+ armada_reg_queue_mod(regs, idx, val, CFG_CSC_MASK,
+ LCD_SPU_IOPAD_CONTROL);
+ val = drm_to_overlay_state(state)->colorkey_yr;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_yr != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_Y);
+ val = drm_to_overlay_state(state)->colorkey_ug;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_ug != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_U);
+ val = drm_to_overlay_state(state)->colorkey_vb;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_vb != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_V);
+ val = drm_to_overlay_state(state)->colorkey_mode;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_mode != val)
+ armada_reg_queue_mod(regs, idx, val, CFG_CKMODE_MASK |
+ CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+ LCD_SPU_DMA_CTRL1);
+ val = drm_to_overlay_state(state)->colorkey_enable;
+ if (((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_enable != val) &&
+ dcrtc->variant->has_spu_adv_reg)
+ armada_reg_queue_mod(regs, idx, val, ADV_GRACOLORKEY |
+ ADV_VIDCOLORKEY, LCD_SPU_ADV_REG);
+
+ dcrtc->regs_idx += idx;
+}
+
+static void armada_drm_overlay_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ unsigned int idx = 0;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+ if (!old_state->crtc)
+ return;
- dplane->base.state.changed = idx != 0;
+ DRM_DEBUG_KMS("[PLANE:%d:%s] was on [CRTC:%d:%s] with [FB:%d]\n",
+ plane->base.id, plane->name,
+ old_state->crtc->base.id, old_state->crtc->name,
+ old_state->fb->base.id);
- armada_reg_queue_end(regs, idx);
+ dcrtc = drm_to_armada_crtc(old_state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
+
+ /* Disable plane and power down the YUV FIFOs */
+ armada_reg_queue_mod(regs, idx, 0, CFG_DMA_ENA, LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_mod(regs, idx, CFG_PDWN16x66 | CFG_PDWN32x66, 0,
+ LCD_SPU_SRAM_PARA1);
+
+ dcrtc->regs_idx += idx;
}
+static const struct drm_plane_helper_funcs armada_overlay_plane_helper_funcs = {
+ .prepare_fb = armada_drm_plane_prepare_fb,
+ .cleanup_fb = armada_drm_plane_cleanup_fb,
+ .atomic_check = armada_drm_plane_atomic_check,
+ .atomic_update = armada_drm_overlay_plane_atomic_update,
+ .atomic_disable = armada_drm_overlay_plane_atomic_disable,
+};
+
static int
-armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+armada_overlay_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_plane_work *work;
- struct drm_plane_state state = {
- .plane = plane,
- .crtc = crtc,
- .fb = fb,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .rotation = DRM_MODE_ROTATE_0,
- };
- struct drm_crtc_state crtc_state = {
- .crtc = crtc,
- .enable = crtc->enabled,
- .mode = crtc->mode,
- };
- int ret;
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
trace_armada_ovl_plane_update(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
- ret = drm_atomic_helper_check_plane_state(&state, &crtc_state, 0,
- INT_MAX, true, false);
- if (ret)
- return ret;
-
- work = &dplane->base.works[dplane->base.next_work];
-
- if (plane->fb != fb) {
- /*
- * Take a reference on the new framebuffer - we want to
- * hold on to it while the hardware is displaying it.
- */
- drm_framebuffer_reference(fb);
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
- work->old_fb = plane->fb;
- } else {
- work->old_fb = NULL;
+ state->acquire_ctx = ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
}
- armada_ovl_plane_update_state(&state, work->regs);
-
- if (!dplane->base.state.changed)
- return 0;
-
- /* Wait for pending work to complete */
- if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
- armada_drm_plane_work_cancel(dcrtc, &dplane->base);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto fail;
+
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_h = crtc_h;
+ plane_state->crtc_w = crtc_w;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_h = src_h;
+ plane_state->src_w = src_w;
+
+ ret = drm_atomic_nonblocking_commit(state);
+fail:
+ drm_atomic_state_put(state);
+ return ret;
+}
- /* Just updating the position/size? */
- if (!dplane->base.state.vsync_update) {
- armada_ovl_plane_work(dcrtc, work);
- return 0;
- }
+static void armada_ovl_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_cleanup(plane);
+ kfree(plane);
+}
- if (!dcrtc->plane) {
- dcrtc->plane = plane;
- armada_ovl_update_attr(&dplane->prop, dcrtc);
+static void armada_overlay_reset(struct drm_plane *plane)
+{
+ struct armada_overlay_state *state;
+
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+ kfree(plane->state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ state->base.plane = plane;
+ state->base.color_encoding = DEFAULT_ENCODING;
+ state->base.color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+ state->base.rotation = DRM_MODE_ROTATE_0;
+ state->colorkey_yr = 0xfefefe00;
+ state->colorkey_ug = 0x01010100;
+ state->colorkey_vb = 0x01010100;
+ state->colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ state->colorkey_enable = ADV_GRACOLORKEY;
+ state->brightness = DEFAULT_BRIGHTNESS;
+ state->contrast = DEFAULT_CONTRAST;
+ state->saturation = DEFAULT_SATURATION;
}
-
- /* Queue it for update on the next interrupt if we are enabled */
- ret = armada_drm_plane_work_queue(dcrtc, work);
- if (ret)
- DRM_ERROR("failed to queue plane work: %d\n", ret);
-
- dplane->base.next_work = !dplane->base.next_work;
-
- return 0;
+ plane->state = &state->base;
}
-static void armada_ovl_plane_destroy(struct drm_plane *plane)
+struct drm_plane_state *
+armada_overlay_duplicate_state(struct drm_plane *plane)
{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
+ struct armada_overlay_state *state;
- drm_plane_cleanup(plane);
+ if (WARN_ON(!plane->state))
+ return NULL;
- kfree(dplane);
+ state = kmemdup(plane->state, sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+ return &state->base;
}
-static int armada_ovl_plane_set_property(struct drm_plane *plane,
- struct drm_property *property, uint64_t val)
+static int armada_overlay_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
{
struct armada_private *priv = plane->dev->dev_private;
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
- bool update_attr = false;
+#define K2R(val) (((val) >> 0) & 0xff)
+#define K2G(val) (((val) >> 8) & 0xff)
+#define K2B(val) (((val) >> 16) & 0xff)
if (property == priv->colorkey_prop) {
#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
- dplane->prop.colorkey_yr = CCC(K2R(val));
- dplane->prop.colorkey_ug = CCC(K2G(val));
- dplane->prop.colorkey_vb = CCC(K2B(val));
+ drm_to_overlay_state(state)->colorkey_yr = CCC(K2R(val));
+ drm_to_overlay_state(state)->colorkey_ug = CCC(K2G(val));
+ drm_to_overlay_state(state)->colorkey_vb = CCC(K2B(val));
#undef CCC
- update_attr = true;
} else if (property == priv->colorkey_min_prop) {
- dplane->prop.colorkey_yr &= ~0x00ff0000;
- dplane->prop.colorkey_yr |= K2R(val) << 16;
- dplane->prop.colorkey_ug &= ~0x00ff0000;
- dplane->prop.colorkey_ug |= K2G(val) << 16;
- dplane->prop.colorkey_vb &= ~0x00ff0000;
- dplane->prop.colorkey_vb |= K2B(val) << 16;
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0x00ff0000;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 16;
+ drm_to_overlay_state(state)->colorkey_ug &= ~0x00ff0000;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 16;
+ drm_to_overlay_state(state)->colorkey_vb &= ~0x00ff0000;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 16;
} else if (property == priv->colorkey_max_prop) {
- dplane->prop.colorkey_yr &= ~0xff000000;
- dplane->prop.colorkey_yr |= K2R(val) << 24;
- dplane->prop.colorkey_ug &= ~0xff000000;
- dplane->prop.colorkey_ug |= K2G(val) << 24;
- dplane->prop.colorkey_vb &= ~0xff000000;
- dplane->prop.colorkey_vb |= K2B(val) << 24;
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0xff000000;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 24;
+ drm_to_overlay_state(state)->colorkey_ug &= ~0xff000000;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 24;
+ drm_to_overlay_state(state)->colorkey_vb &= ~0xff000000;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 24;
} else if (property == priv->colorkey_val_prop) {
- dplane->prop.colorkey_yr &= ~0x0000ff00;
- dplane->prop.colorkey_yr |= K2R(val) << 8;
- dplane->prop.colorkey_ug &= ~0x0000ff00;
- dplane->prop.colorkey_ug |= K2G(val) << 8;
- dplane->prop.colorkey_vb &= ~0x0000ff00;
- dplane->prop.colorkey_vb |= K2B(val) << 8;
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0x0000ff00;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 8;
+ drm_to_overlay_state(state)->colorkey_ug &= ~0x0000ff00;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 8;
+ drm_to_overlay_state(state)->colorkey_vb &= ~0x0000ff00;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 8;
} else if (property == priv->colorkey_alpha_prop) {
- dplane->prop.colorkey_yr &= ~0x000000ff;
- dplane->prop.colorkey_yr |= K2R(val);
- dplane->prop.colorkey_ug &= ~0x000000ff;
- dplane->prop.colorkey_ug |= K2G(val);
- dplane->prop.colorkey_vb &= ~0x000000ff;
- dplane->prop.colorkey_vb |= K2B(val);
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0x000000ff;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val);
+ drm_to_overlay_state(state)->colorkey_ug &= ~0x000000ff;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val);
+ drm_to_overlay_state(state)->colorkey_vb &= ~0x000000ff;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val);
} else if (property == priv->colorkey_mode_prop) {
- dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
- dplane->prop.colorkey_mode |= CFG_CKMODE(val);
- update_attr = true;
+ if (val == CKMODE_DISABLE) {
+ drm_to_overlay_state(state)->colorkey_mode =
+ CFG_CKMODE(CKMODE_DISABLE) |
+ CFG_ALPHAM_CFG | CFG_ALPHA(255);
+ drm_to_overlay_state(state)->colorkey_enable = 0;
+ } else {
+ drm_to_overlay_state(state)->colorkey_mode =
+ CFG_CKMODE(val) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ drm_to_overlay_state(state)->colorkey_enable =
+ ADV_GRACOLORKEY;
+ }
} else if (property == priv->brightness_prop) {
- dplane->prop.brightness = val - 256;
- update_attr = true;
+ drm_to_overlay_state(state)->brightness = val - 256;
} else if (property == priv->contrast_prop) {
- dplane->prop.contrast = val;
- update_attr = true;
+ drm_to_overlay_state(state)->contrast = val;
} else if (property == priv->saturation_prop) {
- dplane->prop.saturation = val;
- update_attr = true;
+ drm_to_overlay_state(state)->saturation = val;
+ } else {
+ return -EINVAL;
}
+ return 0;
+}
- if (update_attr && dplane->base.base.crtc)
- armada_ovl_update_attr(&dplane->prop,
- drm_to_armada_crtc(dplane->base.base.crtc));
+static int armada_overlay_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state, struct drm_property *property,
+ uint64_t *val)
+{
+ struct armada_private *priv = plane->dev->dev_private;
+#define C2K(c,s) (((c) >> (s)) & 0xff)
+#define R2BGR(r,g,b,s) (C2K(r,s) << 0 | C2K(g,s) << 8 | C2K(b,s) << 16)
+ if (property == priv->colorkey_prop) {
+ /* Do best-efforts here for this property */
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 16);
+ /* If min != max, or min != val, error out */
+ if (*val != R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 24) ||
+ *val != R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 8))
+ return -EINVAL;
+ } else if (property == priv->colorkey_min_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 16);
+ } else if (property == priv->colorkey_max_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 24);
+ } else if (property == priv->colorkey_val_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 8);
+ } else if (property == priv->colorkey_alpha_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 0);
+ } else if (property == priv->colorkey_mode_prop) {
+ *val = (drm_to_overlay_state(state)->colorkey_mode &
+ CFG_CKMODE_MASK) >> ffs(CFG_CKMODE_MASK);
+ } else if (property == priv->brightness_prop) {
+ *val = drm_to_overlay_state(state)->brightness + 256;
+ } else if (property == priv->contrast_prop) {
+ *val = drm_to_overlay_state(state)->contrast;
+ } else if (property == priv->saturation_prop) {
+ *val = drm_to_overlay_state(state)->saturation;
+ } else {
+ return -EINVAL;
+ }
return 0;
}
static const struct drm_plane_funcs armada_ovl_plane_funcs = {
- .update_plane = armada_ovl_plane_update,
- .disable_plane = armada_drm_plane_disable,
+ .update_plane = armada_overlay_plane_update,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = armada_ovl_plane_destroy,
- .set_property = armada_ovl_plane_set_property,
+ .reset = armada_overlay_reset,
+ .atomic_duplicate_state = armada_overlay_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .atomic_set_property = armada_overlay_set_property,
+ .atomic_get_property = armada_overlay_get_property,
};
static const uint32_t armada_ovl_formats[] = {
@@ -419,46 +541,31 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
{
struct armada_private *priv = dev->dev_private;
struct drm_mode_object *mobj;
- struct armada_ovl_plane *dplane;
+ struct drm_plane *overlay;
int ret;
ret = armada_overlay_create_properties(dev);
if (ret)
return ret;
- dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
- if (!dplane)
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+ if (!overlay)
return -ENOMEM;
- ret = armada_drm_plane_init(&dplane->base);
- if (ret) {
- kfree(dplane);
- return ret;
- }
-
- dplane->base.works[0].fn = armada_ovl_plane_work;
- dplane->base.works[1].fn = armada_ovl_plane_work;
+ drm_plane_helper_add(overlay, &armada_overlay_plane_helper_funcs);
- ret = drm_universal_plane_init(dev, &dplane->base.base, crtcs,
+ ret = drm_universal_plane_init(dev, overlay, crtcs,
&armada_ovl_plane_funcs,
armada_ovl_formats,
ARRAY_SIZE(armada_ovl_formats),
NULL,
DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
- kfree(dplane);
+ kfree(overlay);
return ret;
}
- dplane->prop.colorkey_yr = 0xfefefe00;
- dplane->prop.colorkey_ug = 0x01010100;
- dplane->prop.colorkey_vb = 0x01010100;
- dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
- dplane->prop.brightness = 0;
- dplane->prop.contrast = 0x4000;
- dplane->prop.saturation = 0x4000;
-
- mobj = &dplane->base.base.base;
+ mobj = &overlay->base;
drm_object_attach_property(mobj, priv->colorkey_prop,
0x0101fe);
drm_object_attach_property(mobj, priv->colorkey_min_prop,
@@ -471,11 +578,19 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
0x000000);
drm_object_attach_property(mobj, priv->colorkey_mode_prop,
CKMODE_RGB);
- drm_object_attach_property(mobj, priv->brightness_prop, 256);
+ drm_object_attach_property(mobj, priv->brightness_prop,
+ 256 + DEFAULT_BRIGHTNESS);
drm_object_attach_property(mobj, priv->contrast_prop,
- dplane->prop.contrast);
+ DEFAULT_CONTRAST);
drm_object_attach_property(mobj, priv->saturation_prop,
- dplane->prop.saturation);
+ DEFAULT_SATURATION);
- return 0;
+ ret = drm_plane_create_color_properties(overlay,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+ DEFAULT_ENCODING,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
new file mode 100644
index 000000000000..9f36423dd394
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include "armada_plane.h"
+#include "armada_trace.h"
+
+static const uint32_t armada_primary_formats[] = {
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+};
+
+void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3],
+ u16 pitches[3], bool interlaced)
+{
+ struct drm_framebuffer *fb = state->fb;
+ const struct drm_format_info *format = fb->format;
+ unsigned int num_planes = format->num_planes;
+ unsigned int x = state->src.x1 >> 16;
+ unsigned int y = state->src.y1 >> 16;
+ u32 addr = drm_fb_obj(fb)->dev_addr;
+ int i;
+
+ DRM_DEBUG_KMS("pitch %u x %d y %d bpp %d\n",
+ fb->pitches[0], x, y, format->cpp[0] * 8);
+
+ if (num_planes > 3)
+ num_planes = 3;
+
+ addrs[0][0] = addr + fb->offsets[0] + y * fb->pitches[0] +
+ x * format->cpp[0];
+ pitches[0] = fb->pitches[0];
+
+ y /= format->vsub;
+ x /= format->hsub;
+
+ for (i = 1; i < num_planes; i++) {
+ addrs[0][i] = addr + fb->offsets[i] + y * fb->pitches[i] +
+ x * format->cpp[i];
+ pitches[i] = fb->pitches[i];
+ }
+ for (; i < 3; i++) {
+ addrs[0][i] = 0;
+ pitches[i] = 0;
+ }
+ if (interlaced) {
+ for (i = 0; i < 3; i++) {
+ addrs[1][i] = addrs[0][i] + pitches[i];
+ pitches[i] *= 2;
+ }
+ } else {
+ for (i = 0; i < 3; i++)
+ addrs[1][i] = addrs[0][i];
+ }
+}
+
+static unsigned armada_drm_crtc_calc_fb(struct drm_plane_state *state,
+ struct armada_regs *regs, bool interlaced)
+{
+ u16 pitches[3];
+ u32 addrs[2][3];
+ unsigned i = 0;
+
+ armada_drm_plane_calc(state, addrs, pitches, interlaced);
+
+ /* write offset, base, and pitch */
+ armada_reg_queue_set(regs, i, addrs[0][0], LCD_CFG_GRA_START_ADDR0);
+ armada_reg_queue_set(regs, i, addrs[1][0], LCD_CFG_GRA_START_ADDR1);
+ armada_reg_queue_mod(regs, i, pitches[0], 0xffff, LCD_CFG_GRA_PITCH);
+
+ return i;
+}
+
+int armada_drm_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n",
+ plane->base.id, plane->name,
+ state->fb ? state->fb->base.id : 0);
+
+ /*
+ * Take a reference on the new framebuffer - we want to
+ * hold on to it while the hardware is displaying it.
+ */
+ if (state->fb)
+ drm_framebuffer_get(state->fb);
+ return 0;
+}
+
+void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n",
+ plane->base.id, plane->name,
+ old_state->fb ? old_state->fb->base.id : 0);
+
+ if (old_state->fb)
+ drm_framebuffer_put(old_state->fb);
+}
+
+int armada_drm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (state->fb && !WARN_ON(!state->crtc)) {
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
+
+ if (state->state)
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ else
+ crtc_state = crtc->state;
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ 0, INT_MAX,
+ true, false);
+ } else {
+ state->visible = false;
+ }
+ return 0;
+}
+
+static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ u32 cfg, cfg_mask, val;
+ unsigned int idx;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+ if (!state->fb || WARN_ON(!state->crtc))
+ return;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
+ plane->base.id, plane->name,
+ state->crtc->base.id, state->crtc->name,
+ state->fb->base.id,
+ old_state->visible, state->visible);
+
+ dcrtc = drm_to_armada_crtc(state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
+
+ idx = 0;
+ if (!old_state->visible && state->visible) {
+ val = CFG_PDWN64x66;
+ if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
+ val |= CFG_PDWN256x24;
+ armada_reg_queue_mod(regs, idx, 0, val, LCD_SPU_SRAM_PARA1);
+ }
+ val = armada_rect_hw_fp(&state->src);
+ if (armada_rect_hw_fp(&old_state->src) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_HPXL_VLN);
+ val = armada_rect_yx(&state->dst);
+ if (armada_rect_yx(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_OVSA_HPXL_VLN);
+ val = armada_rect_hw(&state->dst);
+ if (armada_rect_hw(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_GZM_HPXL_VLN);
+ if (old_state->src.x1 != state->src.x1 ||
+ old_state->src.y1 != state->src.y1 ||
+ old_state->fb != state->fb ||
+ state->crtc->state->mode_changed) {
+ idx += armada_drm_crtc_calc_fb(state, regs + idx,
+ dcrtc->interlaced);
+ }
+ if (old_state->fb != state->fb ||
+ state->crtc->state->mode_changed) {
+ cfg = CFG_GRA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
+ CFG_GRA_MOD(drm_fb_to_armada_fb(state->fb)->mod);
+ if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
+ cfg |= CFG_PALETTE_ENA;
+ if (state->visible)
+ cfg |= CFG_GRA_ENA;
+ if (dcrtc->interlaced)
+ cfg |= CFG_GRA_FTOGGLE;
+ cfg_mask = CFG_GRAFORMAT |
+ CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
+ CFG_GRA_ENA;
+ } else if (old_state->visible != state->visible) {
+ cfg = state->visible ? CFG_GRA_ENA : 0;
+ cfg_mask = CFG_GRA_ENA;
+ } else {
+ cfg = cfg_mask = 0;
+ }
+ if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
+ drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
+ cfg_mask |= CFG_GRA_HSMOOTH;
+ if (drm_rect_width(&state->src) >> 16 !=
+ drm_rect_width(&state->dst))
+ cfg |= CFG_GRA_HSMOOTH;
+ }
+
+ if (cfg_mask)
+ armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
+ LCD_SPU_DMA_CTRL0);
+
+ dcrtc->regs_idx += idx;
+}
+
+static void armada_drm_primary_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ unsigned int idx = 0;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+ if (!old_state->crtc)
+ return;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] was on [CRTC:%d:%s] with [FB:%d]\n",
+ plane->base.id, plane->name,
+ old_state->crtc->base.id, old_state->crtc->name,
+ old_state->fb->base.id);
+
+ dcrtc = drm_to_armada_crtc(old_state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
+
+ /* Disable plane and power down most RAMs and FIFOs */
+ armada_reg_queue_mod(regs, idx, 0, CFG_GRA_ENA, LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_mod(regs, idx, CFG_PDWN256x32 | CFG_PDWN256x24 |
+ CFG_PDWN256x8 | CFG_PDWN32x32 | CFG_PDWN64x66,
+ 0, LCD_SPU_SRAM_PARA1);
+
+ dcrtc->regs_idx += idx;
+}
+
+static const struct drm_plane_helper_funcs armada_primary_plane_helper_funcs = {
+ .prepare_fb = armada_drm_plane_prepare_fb,
+ .cleanup_fb = armada_drm_plane_cleanup_fb,
+ .atomic_check = armada_drm_plane_atomic_check,
+ .atomic_update = armada_drm_primary_plane_atomic_update,
+ .atomic_disable = armada_drm_primary_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs armada_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_primary_helper_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+int armada_drm_primary_plane_init(struct drm_device *drm,
+ struct drm_plane *primary)
+{
+ int ret;
+
+ drm_plane_helper_add(primary, &armada_primary_plane_helper_funcs);
+
+ ret = drm_universal_plane_init(drm, primary, 0,
+ &armada_primary_plane_funcs,
+ armada_primary_formats,
+ ARRAY_SIZE(armada_primary_formats),
+ NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/armada/armada_plane.h b/drivers/gpu/drm/armada/armada_plane.h
new file mode 100644
index 000000000000..ff4281ba7fad
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_plane.h
@@ -0,0 +1,15 @@
+#ifndef ARMADA_PLANE_H
+#define ARMADA_PLANE_H
+
+void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3],
+ u16 pitches[3], bool interlaced);
+int armada_drm_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+int armada_drm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state);
+int armada_drm_primary_plane_init(struct drm_device *drm,
+ struct drm_plane *primary);
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 036dff8a1f33..5e77d456d9bb 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -790,12 +790,12 @@ static int ast_get_modes(struct drm_connector *connector)
if (!flags)
edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(&ast_connector->base, edid);
+ drm_connector_update_edid_property(&ast_connector->base, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
return ret;
} else
- drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
+ drm_connector_update_edid_property(&ast_connector->base, NULL);
return 0;
}
@@ -900,7 +900,7 @@ static int ast_connector_init(struct drm_device *dev)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
ast_connector->i2c = ast_i2c_create(dev);
if (!ast_connector->i2c)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index c1ea5c36b006..843cac222e60 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -681,6 +681,7 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
drm_fb_cma_fbdev_fini(dev);
flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
+ drm_atomic_helper_shutdown(dev);
drm_mode_config_cleanup(dev);
pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 47e0992f3908..04440064b9b7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -412,9 +412,10 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_FORMAT_CFG, cfg);
}
-static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane)
+static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_plane_state *state)
{
- struct drm_crtc *crtc = plane->base.crtc;
+ struct drm_crtc *crtc = state->base.crtc;
struct drm_color_lut *lut;
int idx;
@@ -779,7 +780,7 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
atmel_hlcdc_plane_update_pos_and_size(plane, state);
atmel_hlcdc_plane_update_general_settings(plane, state);
atmel_hlcdc_plane_update_format(plane, state);
- atmel_hlcdc_plane_update_clut(plane);
+ atmel_hlcdc_plane_update_clut(plane, state);
atmel_hlcdc_plane_update_buffers(plane, state);
atmel_hlcdc_plane_update_disc_area(plane, state);
@@ -816,16 +817,6 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
}
-static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
-{
- struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
-
- if (plane->base.fb)
- drm_framebuffer_put(plane->base.fb);
-
- drm_plane_cleanup(p);
-}
-
static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
@@ -1002,7 +993,7 @@ static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *p,
static const struct drm_plane_funcs layer_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = atmel_hlcdc_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = atmel_hlcdc_plane_reset,
.atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
.atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 233980a78591..ca5a9afdd5cf 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -259,7 +259,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs_crtc_init(bochs->dev);
bochs_encoder_init(bochs->dev);
bochs_connector_init(bochs->dev);
- drm_mode_connector_attach_encoder(&bochs->connector,
+ drm_connector_attach_encoder(&bochs->connector,
&bochs->encoder);
return 0;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index fa2c7997e2fd..bf6cad6c9178 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -82,9 +82,11 @@ config DRM_PARADE_PS8622
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
- depends on OF && RC_CORE
+ depends on OF
select DRM_KMS_HELPER
imply EXTCON
+ select INPUT
+ select RC_CORE
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 73021b388e12..85c2d407a52e 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -429,6 +429,18 @@ static void adv7511_hpd_work(struct work_struct *work)
else
status = connector_status_disconnected;
+ /*
+ * The bridge resets its registers on unplug. So when we get a plug
+ * event and we're already supposed to be powered, cycle the bridge to
+ * restore its state.
+ */
+ if (status == connector_status_connected &&
+ adv7511->connector.status == connector_status_disconnected &&
+ adv7511->powered) {
+ regcache_mark_dirty(adv7511->regmap);
+ adv7511_power_on(adv7511);
+ }
+
if (adv7511->connector.status != status) {
adv7511->connector.status = status;
if (status == connector_status_disconnected)
@@ -601,7 +613,7 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
__adv7511_power_off(adv7511);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
@@ -860,7 +872,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
}
drm_connector_helper_add(&adv->connector,
&adv7511_connector_helper_funcs);
- drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder);
+ drm_connector_attach_encoder(&adv->connector, bridge->encoder);
if (adv->type == ADV7533)
ret = adv7533_attach_dsi(adv);
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index b49043866be6..f8433c93f463 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -969,8 +969,8 @@ static int anx78xx_get_modes(struct drm_connector *connector)
goto unlock;
}
- err = drm_mode_connector_update_edid_property(connector,
- anx78xx->edid);
+ err = drm_connector_update_edid_property(connector,
+ anx78xx->edid);
if (err) {
DRM_ERROR("Failed to update EDID property: %d\n", err);
goto unlock;
@@ -1048,8 +1048,8 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge)
anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
- err = drm_mode_connector_attach_encoder(&anx78xx->connector,
- bridge->encoder);
+ err = drm_connector_attach_encoder(&anx78xx->connector,
+ bridge->encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 2bcbfadb6ac5..d68986cea132 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1119,8 +1119,8 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, &dp->aux.ddc);
pm_runtime_put(dp->dev);
if (edid) {
- drm_mode_connector_update_edid_property(&dp->connector,
- edid);
+ drm_connector_update_edid_property(&dp->connector,
+ edid);
num_modes += drm_add_edid_modes(&dp->connector, edid);
kfree(edid);
}
@@ -1210,7 +1210,7 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
drm_connector_helper_add(connector,
&analogix_dp_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
}
/*
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index c255fc3e1be5..ce9496d13986 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -1152,7 +1152,7 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
np = of_node_get(dev->dev.of_node);
panel = of_drm_find_panel(np);
- if (panel) {
+ if (!IS_ERR(panel)) {
bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
} else {
bridge = of_drm_find_bridge(dev->dev.of_node);
@@ -1337,7 +1337,7 @@ static const struct mipi_dsi_host_ops cdns_dsi_ops = {
.transfer = cdns_dsi_transfer,
};
-static int cdns_dsi_resume(struct device *dev)
+static int __maybe_unused cdns_dsi_resume(struct device *dev)
{
struct cdns_dsi *dsi = dev_get_drvdata(dev);
@@ -1350,7 +1350,7 @@ static int cdns_dsi_resume(struct device *dev)
return 0;
}
-static int cdns_dsi_suspend(struct device *dev)
+static int __maybe_unused cdns_dsi_suspend(struct device *dev)
{
struct cdns_dsi *dsi = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 9837c8d69e69..9b706789a341 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -55,7 +55,7 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
goto fallback;
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
return ret;
@@ -122,7 +122,7 @@ static int dumb_vga_attach(struct drm_bridge *bridge)
return ret;
}
- drm_mode_connector_attach_encoder(&vga->connector,
+ drm_connector_attach_encoder(&vga->connector,
bridge->encoder);
return 0;
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
index 75b0d3f6e4de..f56c92f7af7c 100644
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ b/drivers/gpu/drm/bridge/lvds-encoder.c
@@ -68,9 +68,9 @@ static int lvds_encoder_probe(struct platform_device *pdev)
panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
- if (!panel) {
+ if (IS_ERR(panel)) {
dev_dbg(&pdev->dev, "panel not found, deferring probe\n");
- return -EPROBE_DEFER;
+ return PTR_ERR(panel);
}
lvds_encoder->panel_bridge =
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 7ccadba7c98c..2136c97aeb8e 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -152,7 +152,7 @@ static int ge_b850v3_lvds_get_modes(struct drm_connector *connector)
ge_b850v3_lvds_ptr->edid = (struct edid *)stdp2690_get_edid(client);
if (ge_b850v3_lvds_ptr->edid) {
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
ge_b850v3_lvds_ptr->edid);
num_modes = drm_add_edid_modes(connector,
ge_b850v3_lvds_ptr->edid);
@@ -241,7 +241,7 @@ static int ge_b850v3_lvds_attach(struct drm_bridge *bridge)
return ret;
}
- ret = drm_mode_connector_attach_encoder(connector, bridge->encoder);
+ ret = drm_connector_attach_encoder(connector, bridge->encoder);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index d64a3283822a..a3e817abace1 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -222,7 +222,7 @@ static int ptn3460_get_modes(struct drm_connector *connector)
}
ptn_bridge->edid = (struct edid *)edid;
- drm_mode_connector_update_edid_property(connector, ptn_bridge->edid);
+ drm_connector_update_edid_property(connector, ptn_bridge->edid);
num_modes = drm_add_edid_modes(connector, ptn_bridge->edid);
@@ -265,7 +265,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge)
drm_connector_helper_add(&ptn_bridge->connector,
&ptn3460_connector_helper_funcs);
drm_connector_register(&ptn_bridge->connector);
- drm_mode_connector_attach_encoder(&ptn_bridge->connector,
+ drm_connector_attach_encoder(&ptn_bridge->connector,
bridge->encoder);
if (ptn_bridge->panel)
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 6d99d4a3beb3..7cbaba213ef6 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -79,7 +79,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge)
return ret;
}
- drm_mode_connector_attach_encoder(&panel_bridge->connector,
+ drm_connector_attach_encoder(&panel_bridge->connector,
bridge->encoder);
ret = drm_panel_attach(panel_bridge->panel, &panel_bridge->connector);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 81198f5e9afa..7334d1b62b71 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -503,7 +503,7 @@ static int ps8622_attach(struct drm_bridge *bridge)
drm_connector_helper_add(&ps8622->connector,
&ps8622_connector_helper_funcs);
drm_connector_register(&ps8622->connector);
- drm_mode_connector_attach_encoder(&ps8622->connector,
+ drm_connector_attach_encoder(&ps8622->connector,
bridge->encoder);
if (ps8622->panel)
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 60373d7eb220..e59a13542333 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -170,7 +170,7 @@ static int sii902x_get_modes(struct drm_connector *connector)
return ret;
edid = drm_get_edid(connector, sii902x->i2c->adapter);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
num = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -324,7 +324,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge)
else
sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
- drm_mode_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 250effa0e6b8..a6e8f4591e63 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -14,6 +14,7 @@
#include <drm/bridge/mhl.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -72,9 +73,7 @@ struct sii8620 {
struct regulator_bulk_data supplies[2];
struct mutex lock; /* context lock, protects fields below */
int error;
- int pixel_clock;
unsigned int use_packed_pixel:1;
- int video_code;
enum sii8620_mode mode;
enum sii8620_sink_type sink_type;
u8 cbus_status;
@@ -82,7 +81,6 @@ struct sii8620 {
u8 xstat[MHL_XDS_SIZE];
u8 devcap[MHL_DCAP_SIZE];
u8 xdevcap[MHL_XDC_SIZE];
- u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
bool feature_complete;
bool devcap_read;
bool sink_detected;
@@ -1017,21 +1015,36 @@ static void sii8620_stop_video(struct sii8620 *ctx)
static void sii8620_set_format(struct sii8620 *ctx)
{
+ u8 out_fmt;
+
if (sii8620_is_mhl3(ctx)) {
sii8620_setbits(ctx, REG_M3_P0CTRL,
BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
ctx->use_packed_pixel ? ~0 : 0);
} else {
+ if (ctx->use_packed_pixel) {
+ sii8620_write_seq_static(ctx,
+ REG_VID_MODE, BIT_VID_MODE_M1080P,
+ REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
+ REG_MHLTX_CTL6, 0x60
+ );
+ } else {
sii8620_write_seq_static(ctx,
REG_VID_MODE, 0,
REG_MHL_TOP_CTL, 1,
REG_MHLTX_CTL6, 0xa0
);
+ }
}
+ if (ctx->use_packed_pixel)
+ out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL);
+ else
+ out_fmt = VAL_TPI_FORMAT(RGB, FULL);
+
sii8620_write_seq(ctx,
REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
- REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
+ REG_TPI_OUTPUT, out_fmt,
);
}
@@ -1082,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
return frm_len;
}
-static void sii8620_set_infoframes(struct sii8620 *ctx)
+static void sii8620_set_infoframes(struct sii8620 *ctx,
+ struct drm_display_mode *mode)
{
struct mhl3_infoframe mhl_frm;
union hdmi_infoframe frm;
u8 buf[31];
int ret;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
+ mode,
+ true);
+ if (ctx->use_packed_pixel)
+ frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
+
+ if (!ret)
+ ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
+ if (ret > 0)
+ sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
+
if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
sii8620_write(ctx, REG_TPI_SC,
BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
- sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
- ARRAY_SIZE(ctx->avif) - 3);
sii8620_write(ctx, REG_PKT_FILTER_0,
BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1102,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
return;
}
- ret = hdmi_avi_infoframe_init(&frm.avi);
- frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
- frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
- frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
- frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
- frm.avi.video_code = ctx->video_code;
- if (!ret)
- ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
- if (ret > 0)
- sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
sii8620_write(ctx, REG_PKT_FILTER_0,
BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1131,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
static void sii8620_start_video(struct sii8620 *ctx)
{
+ struct drm_display_mode *mode =
+ &ctx->bridge.encoder->crtc->state->adjusted_mode;
+
if (!sii8620_is_mhl3(ctx))
sii8620_stop_video(ctx);
@@ -1149,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx)
sii8620_set_format(ctx);
if (!sii8620_is_mhl3(ctx)) {
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED);
+ u8 link_mode = MHL_DST_LM_PATH_ENABLED;
+
+ if (ctx->use_packed_pixel)
+ link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+ else
+ link_mode |= MHL_DST_LM_CLK_MODE_NORMAL;
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode);
sii8620_set_auto_zone(ctx);
} else {
static const struct {
@@ -1167,7 +1189,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
};
u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
- int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
+ int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3);
int i;
for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
@@ -1196,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
clk_spec[i].link_rate);
}
- sii8620_set_infoframes(ctx);
+ sii8620_set_infoframes(ctx, mode);
}
static void sii8620_disable_hpd(struct sii8620 *ctx)
@@ -1661,14 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx)
static void sii8620_status_changed_path(struct sii8620 *ctx)
{
- if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL
- | MHL_DST_LM_PATH_ENABLED);
- } else {
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL);
- }
+ u8 link_mode;
+
+ if (ctx->use_packed_pixel)
+ link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+ else
+ link_mode = MHL_DST_LM_CLK_MODE_NORMAL;
+
+ if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+ link_mode |= MHL_DST_LM_PATH_ENABLED;
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ link_mode);
}
static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
@@ -2242,8 +2268,6 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
mutex_lock(&ctx->lock);
ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
- ctx->video_code = drm_match_cea_mode(adjusted_mode);
- ctx->pixel_clock = adjusted_mode->clock;
mutex_unlock(&ctx->lock);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 3c136f2b954f..5971976284bf 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1922,7 +1922,7 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -1974,7 +1974,7 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
drm_connector_init(bridge->dev, connector, &dw_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 0fd9cf27542c..8e28e738cb52 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1140,7 +1140,7 @@ static int tc_connector_get_modes(struct drm_connector *connector)
if (!edid)
return 0;
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
return count;
@@ -1195,7 +1195,7 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
drm_display_info_set_bus_formats(&tc->connector.display_info,
&bus_format, 1);
- drm_mode_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
+ drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index acb857030951..c3e32138c6bb 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -62,7 +62,7 @@ static int tfp410_get_modes(struct drm_connector *connector)
goto fallback;
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
return drm_add_edid_modes(connector, edid);
fallback:
@@ -132,7 +132,7 @@ static int tfp410_attach(struct drm_bridge *bridge)
return ret;
}
- drm_mode_connector_attach_encoder(&dvi->connector,
+ drm_connector_attach_encoder(&dvi->connector,
bridge->encoder);
return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index be2d7e488062..ce9db7aab225 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -92,7 +92,6 @@
#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
-#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
struct cirrus_crtc {
struct drm_crtc base;
@@ -117,11 +116,6 @@ struct cirrus_connector {
struct drm_connector base;
};
-struct cirrus_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
struct cirrus_mc {
resource_size_t vram_size;
resource_size_t vram_base;
@@ -152,7 +146,7 @@ struct cirrus_device {
struct cirrus_fbdev {
struct drm_fb_helper helper;
- struct cirrus_framebuffer gfb;
+ struct drm_framebuffer gfb;
void *sysram;
int size;
int x1, y1, x2, y2; /* dirty rect */
@@ -198,7 +192,7 @@ int cirrus_dumb_create(struct drm_file *file,
struct drm_mode_create_dumb *args);
int cirrus_framebuffer_init(struct drm_device *dev,
- struct cirrus_framebuffer *gfb,
+ struct drm_framebuffer *gfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 32fbfba2c623..b643ac92801c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -22,14 +22,14 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
struct drm_gem_object *obj;
struct cirrus_bo *bo;
int src_offset, dst_offset;
- int bpp = afbdev->gfb.base.format->cpp[0];
+ int bpp = afbdev->gfb.format->cpp[0];
int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
- obj = afbdev->gfb.obj;
+ obj = afbdev->gfb.obj[0];
bo = gem_to_cirrus_bo(obj);
/*
@@ -82,7 +82,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
}
for (i = y; i < y + height; i++) {
/* assume equal stride for now */
- src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+ src_offset = dst_offset = i * afbdev->gfb.pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
}
@@ -204,7 +204,7 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
gfbdev->sysram = sysram;
gfbdev->size = size;
- fb = &gfbdev->gfb.base;
+ fb = &gfbdev->gfb;
if (!fb) {
DRM_INFO("fb is NULL\n");
return -EINVAL;
@@ -246,19 +246,19 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
- struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+ struct drm_framebuffer *gfb = &gfbdev->gfb;
drm_fb_helper_unregister_fbi(&gfbdev->helper);
- if (gfb->obj) {
- drm_gem_object_put_unlocked(gfb->obj);
- gfb->obj = NULL;
+ if (gfb->obj[0]) {
+ drm_gem_object_put_unlocked(gfb->obj[0]);
+ gfb->obj[0] = NULL;
}
vfree(gfbdev->sysram);
drm_fb_helper_fini(&gfbdev->helper);
- drm_framebuffer_unregister_private(&gfb->base);
- drm_framebuffer_cleanup(&gfb->base);
+ drm_framebuffer_unregister_private(gfb);
+ drm_framebuffer_cleanup(gfb);
return 0;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 26df1e8cd490..60d54e10a34d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -10,42 +10,25 @@
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "cirrus_drv.h"
-static int cirrus_create_handle(struct drm_framebuffer *fb,
- struct drm_file* file_priv,
- unsigned int* handle)
-{
- struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-
- return drm_gem_handle_create(file_priv, cirrus_fb->obj, handle);
-}
-
-static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-
- drm_gem_object_put_unlocked(cirrus_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-}
-
static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
- .create_handle = cirrus_create_handle,
- .destroy = cirrus_user_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
int cirrus_framebuffer_init(struct drm_device *dev,
- struct cirrus_framebuffer *gfb,
+ struct drm_framebuffer *gfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
- drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
- gfb->obj = obj;
- ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+ drm_helper_mode_fill_fb_struct(dev, gfb, mode_cmd);
+ gfb->obj[0] = obj;
+ ret = drm_framebuffer_init(dev, gfb, &cirrus_fb_funcs);
if (ret) {
DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
return ret;
@@ -60,7 +43,7 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
{
struct cirrus_device *cdev = dev->dev_private;
struct drm_gem_object *obj;
- struct cirrus_framebuffer *cirrus_fb;
+ struct drm_framebuffer *fb;
u32 bpp;
int ret;
@@ -74,19 +57,19 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
if (obj == NULL)
return ERR_PTR(-ENOENT);
- cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
- if (!cirrus_fb) {
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb) {
drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
- ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+ ret = cirrus_framebuffer_init(dev, fb, mode_cmd, obj);
if (ret) {
drm_gem_object_put_unlocked(obj);
- kfree(cirrus_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
- return &cirrus_fb->base;
+ return fb;
}
static const struct drm_mode_config_funcs cirrus_mode_funcs = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index c91b9b054e3f..336bfda40125 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -101,17 +101,13 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
int x, int y, int atomic)
{
struct cirrus_device *cdev = crtc->dev->dev_private;
- struct drm_gem_object *obj;
- struct cirrus_framebuffer *cirrus_fb;
struct cirrus_bo *bo;
int ret;
u64 gpu_addr;
/* push the previous fb to system ram */
if (!atomic && fb) {
- cirrus_fb = to_cirrus_framebuffer(fb);
- obj = cirrus_fb->obj;
- bo = gem_to_cirrus_bo(obj);
+ bo = gem_to_cirrus_bo(fb->obj[0]);
ret = cirrus_bo_reserve(bo, false);
if (ret)
return ret;
@@ -119,9 +115,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
cirrus_bo_unreserve(bo);
}
- cirrus_fb = to_cirrus_framebuffer(crtc->primary->fb);
- obj = cirrus_fb->obj;
- bo = gem_to_cirrus_bo(obj);
+ bo = gem_to_cirrus_bo(crtc->primary->fb->obj[0]);
ret = cirrus_bo_reserve(bo, false);
if (ret)
@@ -133,7 +127,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
return ret;
}
- if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+ if (&cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
/* if pushing console in kmap it */
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret)
@@ -536,7 +530,7 @@ int cirrus_modeset_init(struct cirrus_device *cdev)
return -1;
}
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
ret = cirrus_fbdev_init(cdev);
if (ret) {
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 895741e9cd7d..3eb061e11e2e 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
+#include <drm/drm_writeback.h>
#include <linux/sync_file.h>
#include "drm_crtc_internal.h"
@@ -325,6 +326,35 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
return fence_ptr;
}
+static int set_out_fence_for_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector,
+ s32 __user *fence_ptr)
+{
+ unsigned int index = drm_connector_index(connector);
+
+ if (!fence_ptr)
+ return 0;
+
+ if (put_user(-1, fence_ptr))
+ return -EFAULT;
+
+ state->connectors[index].out_fence_ptr = fence_ptr;
+
+ return 0;
+}
+
+static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ unsigned int index = drm_connector_index(connector);
+ s32 __user *fence_ptr;
+
+ fence_ptr = state->connectors[index].out_fence_ptr;
+ state->connectors[index].out_fence_ptr = NULL;
+
+ return fence_ptr;
+}
+
/**
* drm_atomic_set_mode_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
@@ -339,6 +369,7 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
const struct drm_display_mode *mode)
{
+ struct drm_crtc *crtc = state->crtc;
struct drm_mode_modeinfo umode;
/* Early return for no change. */
@@ -359,13 +390,13 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
drm_mode_copy(&state->mode, mode);
state->enable = true;
- DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
- mode->name, state);
+ DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+ mode->name, crtc->base.id, crtc->name, state);
} else {
memset(&state->mode, 0, sizeof(state->mode));
state->enable = false;
- DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
- state);
+ DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+ crtc->base.id, crtc->name, state);
}
return 0;
@@ -388,6 +419,8 @@ EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
struct drm_property_blob *blob)
{
+ struct drm_crtc *crtc = state->crtc;
+
if (blob == state->mode_blob)
return 0;
@@ -397,19 +430,34 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
memset(&state->mode, 0, sizeof(state->mode));
if (blob) {
- if (blob->length != sizeof(struct drm_mode_modeinfo) ||
- drm_mode_convert_umode(state->crtc->dev, &state->mode,
- blob->data))
+ int ret;
+
+ if (blob->length != sizeof(struct drm_mode_modeinfo)) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
+ crtc->base.id, crtc->name,
+ blob->length);
return -EINVAL;
+ }
+
+ ret = drm_mode_convert_umode(crtc->dev,
+ &state->mode, blob->data);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
+ crtc->base.id, crtc->name,
+ ret, drm_get_mode_status_name(state->mode.status));
+ drm_mode_debug_printmodeline(&state->mode);
+ return -EINVAL;
+ }
state->mode_blob = drm_property_blob_get(blob);
state->enable = true;
- DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
- state->mode.name, state);
+ DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+ state->mode.name, crtc->base.id, crtc->name,
+ state);
} else {
state->enable = false;
- DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
- state);
+ DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+ crtc->base.id, crtc->name, state);
}
return 0;
@@ -539,10 +587,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
return -EFAULT;
set_out_fence_for_crtc(state->state, crtc, fence_ptr);
- } else if (crtc->funcs->atomic_set_property)
+ } else if (crtc->funcs->atomic_set_property) {
return crtc->funcs->atomic_set_property(crtc, state, property, val);
- else
+ } else {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
+ crtc->base.id, crtc->name,
+ property->base.id, property->name);
return -EINVAL;
+ }
return 0;
}
@@ -677,6 +729,51 @@ static void drm_atomic_crtc_print_state(struct drm_printer *p,
}
/**
+ * drm_atomic_connector_check - check connector state
+ * @connector: connector to check
+ * @state: connector state to check
+ *
+ * Provides core sanity checks for connector state.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int drm_atomic_connector_check(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_writeback_job *writeback_job = state->writeback_job;
+
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
+ return 0;
+
+ if (writeback_job->fb && !state->crtc) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ if (state->crtc)
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+
+ if (writeback_job->fb && !crtc_state->active) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
+ connector->base.id, connector->name,
+ state->crtc->base.id);
+ return -EINVAL;
+ }
+
+ if (writeback_job->out_fence && !writeback_job->fb) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
* @plane: plane to get state object for
@@ -700,6 +797,11 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
WARN_ON(!state->acquire_ctx);
+ /* the legacy pointers should never be set */
+ WARN_ON(plane->fb);
+ WARN_ON(plane->old_fb);
+ WARN_ON(plane->crtc);
+
plane_state = drm_atomic_get_existing_plane_state(state, plane);
if (plane_state)
return plane_state;
@@ -794,8 +896,11 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
} else if (property == plane->alpha_property) {
state->alpha = val;
} else if (property == plane->rotation_property) {
- if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
+ if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
+ plane->base.id, plane->name, val);
return -EINVAL;
+ }
state->rotation = val;
} else if (property == plane->zpos_property) {
state->zpos = val;
@@ -807,6 +912,9 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
return plane->funcs->atomic_set_property(plane, state,
property, val);
} else {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
+ plane->base.id, plane->name,
+ property->base.id, property->name);
return -EINVAL;
}
@@ -914,10 +1022,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* either *both* CRTC and FB must be set, or neither */
if (state->crtc && !state->fb) {
- DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
+ plane->base.id, plane->name);
return -EINVAL;
} else if (state->fb && !state->crtc) {
- DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
+ plane->base.id, plane->name);
return -EINVAL;
}
@@ -927,7 +1037,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
- DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
+ DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
+ state->crtc->base.id, state->crtc->name,
+ plane->base.id, plane->name);
return -EINVAL;
}
@@ -936,7 +1048,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
- DRM_DEBUG_ATOMIC("Invalid pixel format %s, modifier 0x%llx\n",
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
+ plane->base.id, plane->name,
drm_get_format_name(state->fb->format->format,
&format_name),
state->fb->modifier);
@@ -948,7 +1061,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
state->crtc_h > INT_MAX ||
state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
- DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane->base.id, plane->name,
state->crtc_w, state->crtc_h,
state->crtc_x, state->crtc_y);
return -ERANGE;
@@ -962,8 +1076,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->src_x > fb_width - state->src_w ||
state->src_h > fb_height ||
state->src_y > fb_height - state->src_h) {
- DRM_DEBUG_ATOMIC("Invalid source coordinates "
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
+ plane->base.id, plane->name,
state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
@@ -996,6 +1111,7 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
drm_printf(p, "\trotation=%x\n", state->rotation);
+ drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
drm_printf(p, "\tcolor-encoding=%s\n",
drm_get_color_encoding_name(state->color_encoding));
drm_printf(p, "\tcolor-range=%s\n",
@@ -1120,6 +1236,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
state->private_objs[index].old_state = obj->state;
state->private_objs[index].new_state = obj_state;
state->private_objs[index].ptr = obj;
+ obj_state->state = state;
state->num_private_objs = num_objs;
@@ -1278,6 +1395,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state->link_status = val;
} else if (property == config->aspect_ratio_property) {
state->picture_aspect_ratio = val;
+ } else if (property == config->content_type_property) {
+ state->content_type = val;
} else if (property == connector->scaling_mode_property) {
state->scaling_mode = val;
} else if (property == connector->content_protection_property) {
@@ -1286,10 +1405,24 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return -EINVAL;
}
state->content_protection = val;
+ } else if (property == config->writeback_fb_id_property) {
+ struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
+ int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
+ if (fb)
+ drm_framebuffer_put(fb);
+ return ret;
+ } else if (property == config->writeback_out_fence_ptr_property) {
+ s32 __user *fence_ptr = u64_to_user_ptr(val);
+
+ return set_out_fence_for_connector(state->state, connector,
+ fence_ptr);
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
} else {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
+ connector->base.id, connector->name,
+ property->base.id, property->name);
return -EINVAL;
}
@@ -1304,6 +1437,10 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ if (state->writeback_job && state->writeback_job->fb)
+ drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
+
if (connector->funcs->atomic_print_state)
connector->funcs->atomic_print_state(p, state);
}
@@ -1363,10 +1500,17 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->link_status;
} else if (property == config->aspect_ratio_property) {
*val = state->picture_aspect_ratio;
+ } else if (property == config->content_type_property) {
+ *val = state->content_type;
} else if (property == connector->scaling_mode_property) {
*val = state->scaling_mode;
} else if (property == connector->content_protection_property) {
*val = state->content_protection;
+ } else if (property == config->writeback_fb_id_property) {
+ /* Writeback framebuffer is one-shot, write and forget */
+ *val = 0;
+ } else if (property == config->writeback_out_fence_ptr_property) {
+ *val = 0;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
@@ -1442,7 +1586,7 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
if (WARN_ON(IS_ERR(crtc_state)))
return PTR_ERR(crtc_state);
- crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
+ crtc_state->plane_mask &= ~drm_plane_mask(plane);
}
plane_state->crtc = crtc;
@@ -1452,15 +1596,16 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- crtc_state->plane_mask |= (1 << drm_plane_index(plane));
+ crtc_state->plane_mask |= drm_plane_mask(plane);
}
if (crtc)
- DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
- plane_state, crtc->base.id, crtc->name);
+ DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
+ plane->base.id, plane->name, plane_state,
+ crtc->base.id, crtc->name);
else
- DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
- plane_state);
+ DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
+ plane->base.id, plane->name, plane_state);
return 0;
}
@@ -1480,12 +1625,15 @@ void
drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb)
{
+ struct drm_plane *plane = plane_state->plane;
+
if (fb)
- DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
- fb->base.id, plane_state);
- else
- DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
+ DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
+ fb->base.id, plane->base.id, plane->name,
plane_state);
+ else
+ DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
+ plane->base.id, plane->name, plane_state);
drm_framebuffer_assign(&plane_state->fb, fb);
}
@@ -1546,6 +1694,7 @@ int
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc)
{
+ struct drm_connector *connector = conn_state->connector;
struct drm_crtc_state *crtc_state;
if (conn_state->crtc == crtc)
@@ -1556,7 +1705,7 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
conn_state->crtc);
crtc_state->connector_mask &=
- ~(1 << drm_connector_index(conn_state->connector));
+ ~drm_connector_mask(conn_state->connector);
drm_connector_put(conn_state->connector);
conn_state->crtc = NULL;
@@ -1568,15 +1717,17 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
return PTR_ERR(crtc_state);
crtc_state->connector_mask |=
- 1 << drm_connector_index(conn_state->connector);
+ drm_connector_mask(conn_state->connector);
drm_connector_get(conn_state->connector);
conn_state->crtc = crtc;
- DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
+ DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
+ connector->base.id, connector->name,
conn_state, crtc->base.id, crtc->name);
} else {
- DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
+ DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
+ connector->base.id, connector->name,
conn_state);
}
@@ -1584,6 +1735,70 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
}
EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
+/*
+ * drm_atomic_get_writeback_job - return or allocate a writeback job
+ * @conn_state: Connector state to get the job for
+ *
+ * Writeback jobs have a different lifetime to the atomic state they are
+ * associated with. This convenience function takes care of allocating a job
+ * if there isn't yet one associated with the connector state, otherwise
+ * it just returns the existing job.
+ *
+ * Returns: The writeback job for the given connector state
+ */
+static struct drm_writeback_job *
+drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
+{
+ WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+
+ if (!conn_state->writeback_job)
+ conn_state->writeback_job =
+ kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
+
+ return conn_state->writeback_job;
+}
+
+/**
+ * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer
+ * @conn_state: atomic state object for the connector
+ * @fb: fb to use for the connector
+ *
+ * This is used to set the framebuffer for a writeback connector, which outputs
+ * to a buffer instead of an actual physical connector.
+ * Changing the assigned framebuffer requires us to grab a reference to the new
+ * fb and drop the reference to the old fb, if there is one. This function
+ * takes care of all these details besides updating the pointer in the
+ * state object itself.
+ *
+ * Note: The only way conn_state can already have an fb set is if the commit
+ * sets the property more than once.
+ *
+ * See also: drm_writeback_connector_init()
+ *
+ * Returns: 0 on success
+ */
+int drm_atomic_set_writeback_fb_for_connector(
+ struct drm_connector_state *conn_state,
+ struct drm_framebuffer *fb)
+{
+ struct drm_writeback_job *job =
+ drm_atomic_get_writeback_job(conn_state);
+ if (!job)
+ return -ENOMEM;
+
+ drm_framebuffer_assign(&job->fb, fb);
+
+ if (fb)
+ DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
+ fb->base.id, conn_state);
+ else
+ DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
+ conn_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector);
+
/**
* drm_atomic_add_affected_connectors - add connectors for crtc
* @state: atomic state
@@ -1629,7 +1844,7 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
*/
drm_connector_list_iter_begin(state->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
- if (!(crtc_state->connector_mask & (1 << drm_connector_index(connector))))
+ if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
continue;
conn_state = drm_atomic_get_connector_state(state, connector);
@@ -1672,6 +1887,9 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
+ DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
+ crtc->base.id, crtc->name, state);
+
drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
@@ -1702,6 +1920,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
struct drm_plane_state *plane_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
int i, ret = 0;
DRM_DEBUG_ATOMIC("checking %p\n", state);
@@ -1724,6 +1944,15 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
+ ret = drm_atomic_connector_check(conn, conn_state);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
+ conn->base.id, conn->name);
+ return ret;
+ }
+ }
+
if (config->funcs->atomic_check) {
ret = config->funcs->atomic_check(state->dev, state);
@@ -2048,45 +2277,6 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
/**
- * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
- *
- * @dev: drm device to check.
- * @plane_mask: plane mask for planes that were updated.
- * @ret: return value, can be -EDEADLK for a retry.
- *
- * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
- * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
- * is a common operation for each atomic update, so this call is split off as a
- * helper.
- */
-void drm_atomic_clean_old_fb(struct drm_device *dev,
- unsigned plane_mask,
- int ret)
-{
- struct drm_plane *plane;
-
- /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
- * locks (ie. while it is still safe to deref plane->state). We
- * need to do this here because the driver entry points cannot
- * distinguish between legacy and atomic ioctls.
- */
- drm_for_each_plane_mask(plane, dev, plane_mask) {
- if (ret == 0) {
- struct drm_framebuffer *new_fb = plane->state->fb;
- if (new_fb)
- drm_framebuffer_get(new_fb);
- plane->fb = new_fb;
- plane->crtc = plane->state->crtc;
-
- if (plane->old_fb)
- drm_framebuffer_put(plane->old_fb);
- }
- plane->old_fb = NULL;
- }
-}
-EXPORT_SYMBOL(drm_atomic_clean_old_fb);
-
-/**
* DOC: explicit fencing properties
*
* Explicit fencing allows userspace to control the buffer synchronization
@@ -2161,7 +2351,7 @@ static int setup_out_fence(struct drm_out_fence_state *fence_state,
return 0;
}
-static int prepare_crtc_signaling(struct drm_device *dev,
+static int prepare_signaling(struct drm_device *dev,
struct drm_atomic_state *state,
struct drm_mode_atomic *arg,
struct drm_file *file_priv,
@@ -2170,6 +2360,8 @@ static int prepare_crtc_signaling(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
int i, c = 0, ret;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
@@ -2235,6 +2427,45 @@ static int prepare_crtc_signaling(struct drm_device *dev,
c++;
}
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
+ struct drm_writeback_connector *wb_conn;
+ struct drm_writeback_job *job;
+ struct drm_out_fence_state *f;
+ struct dma_fence *fence;
+ s32 __user *fence_ptr;
+
+ fence_ptr = get_out_fence_for_connector(state, conn);
+ if (!fence_ptr)
+ continue;
+
+ job = drm_atomic_get_writeback_job(conn_state);
+ if (!job)
+ return -ENOMEM;
+
+ f = krealloc(*fence_state, sizeof(**fence_state) *
+ (*num_fences + 1), GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ memset(&f[*num_fences], 0, sizeof(*f));
+
+ f[*num_fences].out_fence_ptr = fence_ptr;
+ *fence_state = f;
+
+ wb_conn = drm_connector_to_writeback(conn);
+ fence = drm_writeback_get_out_fence(wb_conn);
+ if (!fence)
+ return -ENOMEM;
+
+ ret = setup_out_fence(&f[(*num_fences)++], fence);
+ if (ret) {
+ dma_fence_put(fence);
+ return ret;
+ }
+
+ job->out_fence = fence;
+ }
+
/*
* Having this flag means user mode pends on event which will never
* reach due to lack of at least one CRTC for signaling
@@ -2245,11 +2476,11 @@ static int prepare_crtc_signaling(struct drm_device *dev,
return 0;
}
-static void complete_crtc_signaling(struct drm_device *dev,
- struct drm_atomic_state *state,
- struct drm_out_fence_state *fence_state,
- unsigned int num_fences,
- bool install_fds)
+static void complete_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_out_fence_state *fence_state,
+ unsigned int num_fences,
+ bool install_fds)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -2306,9 +2537,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
unsigned int copied_objs, copied_props;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
- struct drm_plane *plane;
struct drm_out_fence_state *fence_state;
- unsigned plane_mask;
int ret = 0;
unsigned int i, j, num_fences;
@@ -2348,7 +2577,6 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
retry:
- plane_mask = 0;
copied_objs = 0;
copied_props = 0;
fence_state = NULL;
@@ -2419,17 +2647,11 @@ retry:
copied_props++;
}
- if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
- !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
- plane = obj_to_plane(obj);
- plane_mask |= (1 << drm_plane_index(plane));
- plane->old_fb = plane->fb;
- }
drm_mode_object_put(obj);
}
- ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
- &num_fences);
+ ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
+ &num_fences);
if (ret)
goto out;
@@ -2445,9 +2667,7 @@ retry:
}
out:
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
- complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
+ complete_signaling(dev, state, fence_state, num_fences, !ret);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 130da5195f3b..80be74df7ba6 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -30,6 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_writeback.h>
#include <linux/dma-fence.h>
#include "drm_crtc_helper_internal.h"
@@ -120,7 +121,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
new_encoder = drm_atomic_helper_best_encoder(connector);
if (new_encoder) {
- if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
+ if (encoder_mask & drm_encoder_mask(new_encoder)) {
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
new_encoder->base.id, new_encoder->name,
connector->base.id, connector->name);
@@ -128,7 +129,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
return -EINVAL;
}
- encoder_mask |= 1 << drm_encoder_index(new_encoder);
+ encoder_mask |= drm_encoder_mask(new_encoder);
}
}
@@ -154,7 +155,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
continue;
encoder = connector->state->best_encoder;
- if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder))))
+ if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
continue;
if (!disable_conflicting_encoders) {
@@ -222,7 +223,7 @@ set_best_encoder(struct drm_atomic_state *state,
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
crtc_state->encoder_mask &=
- ~(1 << drm_encoder_index(conn_state->best_encoder));
+ ~drm_encoder_mask(conn_state->best_encoder);
}
}
@@ -233,7 +234,7 @@ set_best_encoder(struct drm_atomic_state *state,
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
crtc_state->encoder_mask |=
- 1 << drm_encoder_index(encoder);
+ drm_encoder_mask(encoder);
}
}
@@ -644,7 +645,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (ret)
return ret;
- connectors_mask += BIT(i);
+ connectors_mask |= BIT(i);
}
/*
@@ -1172,6 +1173,27 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
+static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
+
+ for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ const struct drm_connector_helper_funcs *funcs;
+
+ funcs = connector->helper_private;
+ if (!funcs->atomic_commit)
+ continue;
+
+ if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
+ WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+ funcs->atomic_commit(connector, new_conn_state);
+ }
+ }
+}
+
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
* @dev: DRM device
@@ -1251,6 +1273,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_bridge_enable(encoder->bridge);
}
+
+ drm_atomic_helper_commit_writebacks(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
@@ -1426,6 +1450,8 @@ void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ drm_atomic_helper_fake_vblank(old_state);
+
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@ -1455,6 +1481,8 @@ void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_planes(dev, old_state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_fake_vblank(old_state);
+
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@ -1510,8 +1538,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- struct drm_plane *plane;
- struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane *plane = NULL;
+ struct drm_plane_state *old_plane_state = NULL;
+ struct drm_plane_state *new_plane_state = NULL;
const struct drm_plane_helper_funcs *funcs;
int i, n_planes = 0;
@@ -1527,7 +1556,8 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
if (n_planes != 1)
return -EINVAL;
- if (!new_plane_state->crtc)
+ if (!new_plane_state->crtc ||
+ old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
funcs = plane->helper_private;
@@ -2030,6 +2060,45 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
/**
+ * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
+ * @old_state: atomic state object with old state structures
+ *
+ * This function walks all CRTCs and fake VBLANK events on those with
+ * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
+ * The primary use of this function is writeback connectors working in oneshot
+ * mode and faking VBLANK events. In this case they only fake the VBLANK event
+ * when a job is queued, and any change to the pipeline that does not touch the
+ * connector is leading to timeouts when calling
+ * drm_atomic_helper_wait_for_vblanks() or
+ * drm_atomic_helper_wait_for_flip_done().
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
+{
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ unsigned long flags;
+
+ if (!new_crtc_state->no_vblank)
+ continue;
+
+ spin_lock_irqsave(&old_state->dev->event_lock, flags);
+ if (new_crtc_state->event) {
+ drm_crtc_send_vblank_event(crtc,
+ new_crtc_state->event);
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
+
+/**
* drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
* @old_state: atomic state object with old state structures
*
@@ -2320,11 +2389,13 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_crtc *crtc = old_crtc_state->crtc;
struct drm_atomic_state *old_state = old_crtc_state->state;
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(old_state, crtc);
struct drm_plane *plane;
unsigned plane_mask;
plane_mask = old_crtc_state->plane_mask;
- plane_mask |= crtc->state->plane_mask;
+ plane_mask |= new_crtc_state->plane_mask;
crtc_funcs = crtc->helper_private;
if (crtc_funcs && crtc_funcs->atomic_begin)
@@ -2333,6 +2404,8 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
struct drm_plane_state *old_plane_state =
drm_atomic_get_old_plane_state(old_state, plane);
+ struct drm_plane_state *new_plane_state =
+ drm_atomic_get_new_plane_state(old_state, plane);
const struct drm_plane_helper_funcs *plane_funcs;
plane_funcs = plane->helper_private;
@@ -2340,13 +2413,14 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
if (!old_plane_state || !plane_funcs)
continue;
- WARN_ON(plane->state->crtc && plane->state->crtc != crtc);
+ WARN_ON(new_plane_state->crtc &&
+ new_plane_state->crtc != crtc);
- if (drm_atomic_plane_disabling(old_plane_state, plane->state) &&
+ if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
plane_funcs->atomic_disable)
plane_funcs->atomic_disable(plane, old_plane_state);
- else if (plane->state->crtc ||
- drm_atomic_plane_disabling(old_plane_state, plane->state))
+ else if (new_plane_state->crtc ||
+ drm_atomic_plane_disabling(old_plane_state, new_plane_state))
plane_funcs->atomic_update(plane, old_plane_state);
}
@@ -2795,7 +2869,7 @@ static int update_output_state(struct drm_atomic_state *state,
* resets the "link-status" property to GOOD, to force any link
* re-training. The SETCRTC ioctl does not define whether an update does
* need a full modeset or just a plane update, hence we're allowed to do
- * that. See also drm_mode_connector_set_link_status_property().
+ * that. See also drm_connector_set_link_status_property().
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
@@ -2914,7 +2988,6 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_plane *plane;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- unsigned plane_mask = 0;
int ret, i;
state = drm_atomic_state_alloc(dev);
@@ -2957,17 +3030,10 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
goto free;
drm_atomic_set_fb_for_plane(plane_state, NULL);
-
- if (clean_old_fbs) {
- plane->old_fb = plane->fb;
- plane_mask |= BIT(drm_plane_index(plane));
- }
}
ret = drm_atomic_commit(state);
free:
- if (plane_mask)
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
drm_atomic_state_put(state);
return ret;
}
@@ -3129,13 +3195,8 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
state->acquire_ctx = ctx;
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- WARN_ON(plane->crtc != new_plane_state->crtc);
- WARN_ON(plane->fb != new_plane_state->fb);
- WARN_ON(plane->old_fb);
-
+ for_each_new_plane_in_state(state, plane, new_plane_state, i)
state->planes[i].old_state = plane->state;
- }
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
state->crtcs[i].old_state = crtc->state;
@@ -3660,6 +3721,9 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
if (state->crtc)
drm_connector_get(connector);
state->commit = NULL;
+
+ /* Don't copy over a writeback job, they are used only once */
+ state->writeback_job = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
new file mode 100644
index 000000000000..baff50a4c234
--- /dev/null
+++ b/drivers/gpu/drm/drm_client.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Noralf Trønnes
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include <drm/drm_client.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_print.h>
+#include <drm/drmP.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * This library provides support for clients running in the kernel like fbdev and bootsplash.
+ * Currently it's only partially implemented, just enough to support fbdev.
+ *
+ * GEM drivers which provide a GEM based dumb buffer with a virtual address are supported.
+ */
+
+static int drm_client_open(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+ struct drm_file *file;
+
+ file = drm_file_alloc(dev->primary);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&dev->filelist_mutex);
+ list_add(&file->lhead, &dev->filelist_internal);
+ mutex_unlock(&dev->filelist_mutex);
+
+ client->file = file;
+
+ return 0;
+}
+
+static void drm_client_close(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+
+ mutex_lock(&dev->filelist_mutex);
+ list_del(&client->file->lhead);
+ mutex_unlock(&dev->filelist_mutex);
+
+ drm_file_free(client->file);
+}
+EXPORT_SYMBOL(drm_client_close);
+
+/**
+ * drm_client_new - Create a DRM client
+ * @dev: DRM device
+ * @client: DRM client
+ * @name: Client name
+ * @funcs: DRM client functions (optional)
+ *
+ * The caller needs to hold a reference on @dev before calling this function.
+ * The client is freed when the &drm_device is unregistered. See drm_client_release().
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
+ const char *name, const struct drm_client_funcs *funcs)
+{
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) ||
+ !dev->driver->dumb_create || !dev->driver->gem_prime_vmap)
+ return -ENOTSUPP;
+
+ if (funcs && !try_module_get(funcs->owner))
+ return -ENODEV;
+
+ client->dev = dev;
+ client->name = name;
+ client->funcs = funcs;
+
+ ret = drm_client_open(client);
+ if (ret)
+ goto err_put_module;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_add(&client->list, &dev->clientlist);
+ mutex_unlock(&dev->clientlist_mutex);
+
+ drm_dev_get(dev);
+
+ return 0;
+
+err_put_module:
+ if (funcs)
+ module_put(funcs->owner);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_client_new);
+
+/**
+ * drm_client_release - Release DRM client resources
+ * @client: DRM client
+ *
+ * Releases resources by closing the &drm_file that was opened by drm_client_new().
+ * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
+ *
+ * This function should only be called from the unregister callback. An exception
+ * is fbdev which cannot free the buffer if userspace has open file descriptors.
+ *
+ * Note:
+ * Clients cannot initiate a release by themselves. This is done to keep the code simple.
+ * The driver has to be unloaded before the client can be unloaded.
+ */
+void drm_client_release(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+
+ DRM_DEV_DEBUG_KMS(dev->dev, "%s\n", client->name);
+
+ drm_client_close(client);
+ drm_dev_put(dev);
+ if (client->funcs)
+ module_put(client->funcs->owner);
+}
+EXPORT_SYMBOL(drm_client_release);
+
+void drm_client_dev_unregister(struct drm_device *dev)
+{
+ struct drm_client_dev *client, *tmp;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry_safe(client, tmp, &dev->clientlist, list) {
+ list_del(&client->list);
+ if (client->funcs && client->funcs->unregister) {
+ client->funcs->unregister(client);
+ } else {
+ drm_client_release(client);
+ kfree(client);
+ }
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+
+/**
+ * drm_client_dev_hotplug - Send hotplug event to clients
+ * @dev: DRM device
+ *
+ * This function calls the &drm_client_funcs.hotplug callback on the attached clients.
+ *
+ * drm_kms_helper_hotplug_event() calls this function, so drivers that use it
+ * don't need to call this function themselves.
+ */
+void drm_client_dev_hotplug(struct drm_device *dev)
+{
+ struct drm_client_dev *client;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list) {
+ if (!client->funcs || !client->funcs->hotplug)
+ continue;
+
+ ret = client->funcs->hotplug(client);
+ DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_dev_hotplug);
+
+void drm_client_dev_restore(struct drm_device *dev)
+{
+ struct drm_client_dev *client;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list) {
+ if (!client->funcs || !client->funcs->restore)
+ continue;
+
+ ret = client->funcs->restore(client);
+ DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+ if (!ret) /* The first one to return zero gets the privilege to restore */
+ break;
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+
+static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
+{
+ struct drm_device *dev = buffer->client->dev;
+
+ if (buffer->vaddr && dev->driver->gem_prime_vunmap)
+ dev->driver->gem_prime_vunmap(buffer->gem, buffer->vaddr);
+
+ if (buffer->gem)
+ drm_gem_object_put_unlocked(buffer->gem);
+
+ if (buffer->handle)
+ drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
+
+ kfree(buffer);
+}
+
+static struct drm_client_buffer *
+drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+{
+ struct drm_mode_create_dumb dumb_args = { };
+ struct drm_device *dev = client->dev;
+ struct drm_client_buffer *buffer;
+ struct drm_gem_object *obj;
+ void *vaddr;
+ int ret;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ buffer->client = client;
+
+ dumb_args.width = width;
+ dumb_args.height = height;
+ dumb_args.bpp = drm_format_plane_cpp(format, 0) * 8;
+ ret = drm_mode_create_dumb(dev, &dumb_args, client->file);
+ if (ret)
+ goto err_delete;
+
+ buffer->handle = dumb_args.handle;
+ buffer->pitch = dumb_args.pitch;
+
+ obj = drm_gem_object_lookup(client->file, dumb_args.handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto err_delete;
+ }
+
+ buffer->gem = obj;
+
+ /*
+ * FIXME: The dependency on GEM here isn't required, we could
+ * convert the driver handle to a dma-buf instead and use the
+ * backend-agnostic dma-buf vmap support instead. This would
+ * require that the handle2fd prime ioctl is reworked to pull the
+ * fd_install step out of the driver backend hooks, to make that
+ * final step optional for internal users.
+ */
+ vaddr = dev->driver->gem_prime_vmap(obj);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto err_delete;
+ }
+
+ buffer->vaddr = vaddr;
+
+ return buffer;
+
+err_delete:
+ drm_client_buffer_delete(buffer);
+
+ return ERR_PTR(ret);
+}
+
+static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
+{
+ int ret;
+
+ if (!buffer->fb)
+ return;
+
+ ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file);
+ if (ret)
+ DRM_DEV_ERROR(buffer->client->dev->dev,
+ "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
+
+ buffer->fb = NULL;
+}
+
+static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
+ u32 width, u32 height, u32 format)
+{
+ struct drm_client_dev *client = buffer->client;
+ struct drm_mode_fb_cmd fb_req = { };
+ const struct drm_format_info *info;
+ int ret;
+
+ info = drm_format_info(format);
+ fb_req.bpp = info->cpp[0] * 8;
+ fb_req.depth = info->depth;
+ fb_req.width = width;
+ fb_req.height = height;
+ fb_req.handle = buffer->handle;
+ fb_req.pitch = buffer->pitch;
+
+ ret = drm_mode_addfb(client->dev, &fb_req, client->file);
+ if (ret)
+ return ret;
+
+ buffer->fb = drm_framebuffer_lookup(client->dev, buffer->client->file, fb_req.fb_id);
+ if (WARN_ON(!buffer->fb))
+ return -ENOENT;
+
+ /* drop the reference we picked up in framebuffer lookup */
+ drm_framebuffer_put(buffer->fb);
+
+ strscpy(buffer->fb->comm, client->name, TASK_COMM_LEN);
+
+ return 0;
+}
+
+/**
+ * drm_client_framebuffer_create - Create a client framebuffer
+ * @client: DRM client
+ * @width: Framebuffer width
+ * @height: Framebuffer height
+ * @format: Buffer format
+ *
+ * This function creates a &drm_client_buffer which consists of a
+ * &drm_framebuffer backed by a dumb buffer.
+ * Call drm_client_framebuffer_delete() to free the buffer.
+ *
+ * Returns:
+ * Pointer to a client buffer or an error pointer on failure.
+ */
+struct drm_client_buffer *
+drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+{
+ struct drm_client_buffer *buffer;
+ int ret;
+
+ buffer = drm_client_buffer_create(client, width, height, format);
+ if (IS_ERR(buffer))
+ return buffer;
+
+ ret = drm_client_buffer_addfb(buffer, width, height, format);
+ if (ret) {
+ drm_client_buffer_delete(buffer);
+ return ERR_PTR(ret);
+ }
+
+ return buffer;
+}
+EXPORT_SYMBOL(drm_client_framebuffer_create);
+
+/**
+ * drm_client_framebuffer_delete - Delete a client framebuffer
+ * @buffer: DRM client buffer (can be NULL)
+ */
+void drm_client_framebuffer_delete(struct drm_client_buffer *buffer)
+{
+ if (!buffer)
+ return;
+
+ drm_client_buffer_rmfb(buffer);
+ drm_client_buffer_delete(buffer);
+}
+EXPORT_SYMBOL(drm_client_framebuffer_delete);
+
+#ifdef CONFIG_DEBUG_FS
+static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_client_dev *client;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list)
+ drm_printf(&p, "%s\n", client->name);
+ mutex_unlock(&dev->clientlist_mutex);
+
+ return 0;
+}
+
+static const struct drm_info_list drm_client_debugfs_list[] = {
+ { "internal_clients", drm_client_debugfs_internal_clients, 0 },
+};
+
+int drm_client_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(drm_client_debugfs_list,
+ ARRAY_SIZE(drm_client_debugfs_list),
+ minor->debugfs_root, minor);
+}
+#endif
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 9b9ba5d5ec0c..6011d769d50b 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -48,7 +48,7 @@
*
* Connectors must be attached to an encoder to be used. For devices that map
* connectors to encoders 1:1, the connector should be attached at
- * initialization time with a call to drm_mode_connector_attach_encoder(). The
+ * initialization time with a call to drm_connector_attach_encoder(). The
* driver must also set the &drm_connector.encoder field to point to the
* attached encoder.
*
@@ -87,6 +87,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
{ DRM_MODE_CONNECTOR_DPI, "DPI" },
+ { DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" },
};
void drm_connector_ida_init(void)
@@ -195,6 +196,10 @@ int drm_connector_init(struct drm_device *dev,
struct ida *connector_ida =
&drm_connector_enum_list[connector_type].ida;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+ (!funcs->atomic_destroy_state ||
+ !funcs->atomic_duplicate_state));
+
ret = __drm_mode_object_add(dev, &connector->base,
DRM_MODE_OBJECT_CONNECTOR,
false, drm_connector_free);
@@ -249,7 +254,8 @@ int drm_connector_init(struct drm_device *dev,
config->num_connector++;
spin_unlock_irq(&config->connector_list_lock);
- if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
+ connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
drm_object_attach_property(&connector->base,
config->edid_property,
0);
@@ -285,7 +291,7 @@ out_put:
EXPORT_SYMBOL(drm_connector_init);
/**
- * drm_mode_connector_attach_encoder - attach a connector to an encoder
+ * drm_connector_attach_encoder - attach a connector to an encoder
* @connector: connector to attach
* @encoder: encoder to attach @connector to
*
@@ -296,8 +302,8 @@ EXPORT_SYMBOL(drm_connector_init);
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
+int drm_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
{
int i;
@@ -315,7 +321,7 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
if (WARN_ON(connector->encoder))
return -EINVAL;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ for (i = 0; i < ARRAY_SIZE(connector->encoder_ids); i++) {
if (connector->encoder_ids[i] == 0) {
connector->encoder_ids[i] = encoder->base.id;
return 0;
@@ -323,7 +329,30 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
}
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+EXPORT_SYMBOL(drm_connector_attach_encoder);
+
+/**
+ * drm_connector_has_possible_encoder - check if the connector and encoder are assosicated with each other
+ * @connector: the connector
+ * @encoder: the encoder
+ *
+ * Returns:
+ * True if @encoder is one of the possible encoders for @connector.
+ */
+bool drm_connector_has_possible_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ struct drm_encoder *enc;
+ int i;
+
+ drm_connector_for_each_possible_encoder(connector, enc, i) {
+ if (enc == encoder)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_connector_has_possible_encoder);
static void drm_mode_remove(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -577,7 +606,7 @@ __drm_connector_put_safe(struct drm_connector *conn)
/**
* drm_connector_list_iter_next - return next connector
- * @iter: connectr_list iterator
+ * @iter: connector_list iterator
*
* Returns the next connector for @iter, or NULL when the list walk has
* completed.
@@ -720,6 +749,14 @@ static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
};
+static const struct drm_prop_enum_list drm_content_type_enum_list[] = {
+ { DRM_MODE_CONTENT_TYPE_NO_DATA, "No Data" },
+ { DRM_MODE_CONTENT_TYPE_GRAPHICS, "Graphics" },
+ { DRM_MODE_CONTENT_TYPE_PHOTO, "Photo" },
+ { DRM_MODE_CONTENT_TYPE_CINEMA, "Cinema" },
+ { DRM_MODE_CONTENT_TYPE_GAME, "Game" },
+};
+
static const struct drm_prop_enum_list drm_panel_orientation_enum_list[] = {
{ DRM_MODE_PANEL_ORIENTATION_NORMAL, "Normal" },
{ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, "Upside Down" },
@@ -777,7 +814,7 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* Blob property which contains the current EDID read from the sink. This
* is useful to parse sink identification information like vendor, model
* and serial. Drivers should update this property by calling
- * drm_mode_connector_update_edid_property(), usually after having parsed
+ * drm_connector_update_edid_property(), usually after having parsed
* the EDID using drm_add_edid_modes(). Userspace cannot change this
* property.
* DPMS:
@@ -815,7 +852,7 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* PATH:
* Connector path property to identify how this sink is physically
* connected. Used by DP MST. This should be set by calling
- * drm_mode_connector_set_path_property(), in the case of DP MST with the
+ * drm_connector_set_path_property(), in the case of DP MST with the
* path property the MST manager created. Userspace cannot change this
* property.
* TILE:
@@ -826,14 +863,14 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* are not gen-locked. Note that for tiled panels which are genlocked, like
* dual-link LVDS or dual-link DSI, the driver should try to not expose the
* tiling and virtualize both &drm_crtc and &drm_plane if needed. Drivers
- * should update this value using drm_mode_connector_set_tile_property().
+ * should update this value using drm_connector_set_tile_property().
* Userspace cannot change this property.
* link-status:
* Connector link-status property to indicate the status of link. The
* default value of link-status is "GOOD". If something fails during or
* after modeset, the kernel driver may set this to "BAD" and issue a
* hotplug uevent. Drivers should update this value using
- * drm_mode_connector_set_link_status_property().
+ * drm_connector_set_link_status_property().
* non_desktop:
* Indicates the output should be ignored for purposes of displaying a
* standard desktop environment or console. This is most likely because
@@ -997,6 +1034,82 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
/**
+ * DOC: HDMI connector properties
+ *
+ * content type (HDMI specific):
+ * Indicates content type setting to be used in HDMI infoframes to indicate
+ * content type for the external device, so that it adjusts it's display
+ * settings accordingly.
+ *
+ * The value of this property can be one of the following:
+ *
+ * No Data:
+ * Content type is unknown
+ * Graphics:
+ * Content type is graphics
+ * Photo:
+ * Content type is photo
+ * Cinema:
+ * Content type is cinema
+ * Game:
+ * Content type is game
+ *
+ * Drivers can set up this property by calling
+ * drm_connector_attach_content_type_property(). Decoding to
+ * infoframe values is done through drm_hdmi_avi_infoframe_content_type().
+ */
+
+/**
+ * drm_connector_attach_content_type_property - attach content-type property
+ * @connector: connector to attach content type property on.
+ *
+ * Called by a driver the first time a HDMI connector is made.
+ */
+int drm_connector_attach_content_type_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_content_type_property(connector->dev))
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.content_type_property,
+ DRM_MODE_CONTENT_TYPE_NO_DATA);
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_content_type_property);
+
+
+/**
+ * drm_hdmi_avi_infoframe_content_type() - fill the HDMI AVI infoframe
+ * content type information, based
+ * on correspondent DRM property.
+ * @frame: HDMI AVI infoframe
+ * @conn_state: DRM display connector state
+ *
+ */
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ switch (conn_state->content_type) {
+ case DRM_MODE_CONTENT_TYPE_GRAPHICS:
+ frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+ break;
+ case DRM_MODE_CONTENT_TYPE_CINEMA:
+ frame->content_type = HDMI_CONTENT_TYPE_CINEMA;
+ break;
+ case DRM_MODE_CONTENT_TYPE_GAME:
+ frame->content_type = HDMI_CONTENT_TYPE_GAME;
+ break;
+ case DRM_MODE_CONTENT_TYPE_PHOTO:
+ frame->content_type = HDMI_CONTENT_TYPE_PHOTO;
+ break;
+ default:
+ /* Graphics is the default(0) */
+ frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+ }
+
+ frame->itc = conn_state->content_type != DRM_MODE_CONTENT_TYPE_NO_DATA;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type);
+
+/**
* drm_create_tv_properties - create TV specific connector properties
* @dev: DRM device
* @num_modes: number of different TV formats (modes) supported
@@ -1261,6 +1374,33 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
/**
+ * drm_mode_create_content_type_property - create content type property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_create_content_type_property(struct drm_device *dev)
+{
+ if (dev->mode_config.content_type_property)
+ return 0;
+
+ dev->mode_config.content_type_property =
+ drm_property_create_enum(dev, 0, "content type",
+ drm_content_type_enum_list,
+ ARRAY_SIZE(drm_content_type_enum_list));
+
+ if (dev->mode_config.content_type_property == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_content_type_property);
+
+/**
* drm_mode_create_suggested_offset_properties - create suggests offset properties
* @dev: DRM device
*
@@ -1285,7 +1425,7 @@ int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
/**
- * drm_mode_connector_set_path_property - set tile property on connector
+ * drm_connector_set_path_property - set tile property on connector
* @connector: connector to set property on.
* @path: path to use for property; must not be NULL.
*
@@ -1297,8 +1437,8 @@ EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_connector_set_path_property(struct drm_connector *connector,
- const char *path)
+int drm_connector_set_path_property(struct drm_connector *connector,
+ const char *path)
{
struct drm_device *dev = connector->dev;
int ret;
@@ -1311,10 +1451,10 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
dev->mode_config.path_property);
return ret;
}
-EXPORT_SYMBOL(drm_mode_connector_set_path_property);
+EXPORT_SYMBOL(drm_connector_set_path_property);
/**
- * drm_mode_connector_set_tile_property - set tile property on connector
+ * drm_connector_set_tile_property - set tile property on connector
* @connector: connector to set property on.
*
* This looks up the tile information for a connector, and creates a
@@ -1324,7 +1464,7 @@ EXPORT_SYMBOL(drm_mode_connector_set_path_property);
* Returns:
* Zero on success, errno on failure.
*/
-int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+int drm_connector_set_tile_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
char tile[256];
@@ -1354,10 +1494,10 @@ int drm_mode_connector_set_tile_property(struct drm_connector *connector)
dev->mode_config.tile_property);
return ret;
}
-EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
+EXPORT_SYMBOL(drm_connector_set_tile_property);
/**
- * drm_mode_connector_update_edid_property - update the edid property of a connector
+ * drm_connector_update_edid_property - update the edid property of a connector
* @connector: drm connector
* @edid: new value of the edid property
*
@@ -1367,8 +1507,8 @@ EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- const struct edid *edid)
+int drm_connector_update_edid_property(struct drm_connector *connector,
+ const struct edid *edid)
{
struct drm_device *dev = connector->dev;
size_t size = 0;
@@ -1406,10 +1546,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
dev->mode_config.edid_property);
return ret;
}
-EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+EXPORT_SYMBOL(drm_connector_update_edid_property);
/**
- * drm_mode_connector_set_link_status_property - Set link status property of a connector
+ * drm_connector_set_link_status_property - Set link status property of a connector
* @connector: drm connector
* @link_status: new value of link status property (0: Good, 1: Bad)
*
@@ -1427,8 +1567,8 @@ EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
* it is not limited to DP or link training. For example, if we implement
* asynchronous setcrtc, this property can be used to report any failures in that.
*/
-void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
- uint64_t link_status)
+void drm_connector_set_link_status_property(struct drm_connector *connector,
+ uint64_t link_status)
{
struct drm_device *dev = connector->dev;
@@ -1436,7 +1576,7 @@ void drm_mode_connector_set_link_status_property(struct drm_connector *connector
connector->state->link_status = link_status;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
-EXPORT_SYMBOL(drm_mode_connector_set_link_status_property);
+EXPORT_SYMBOL(drm_connector_set_link_status_property);
/**
* drm_connector_init_panel_orientation_property -
@@ -1489,7 +1629,7 @@ int drm_connector_init_panel_orientation_property(
}
EXPORT_SYMBOL(drm_connector_init_panel_orientation_property);
-int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+int drm_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
@@ -1507,8 +1647,8 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
struct drm_mode_connector_set_property *conn_set_prop = data;
struct drm_mode_obj_set_property obj_set_prop = {
@@ -1589,22 +1729,19 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (!connector)
return -ENOENT;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
- if (connector->encoder_ids[i] != 0)
- encoders_count++;
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ encoders_count++;
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
- if (put_user(connector->encoder_ids[i],
- encoder_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ if (put_user(encoder->base.id, encoder_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
}
+ copied++;
}
}
out_resp->count_encoders = encoders_count;
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 3c4000facb36..f973d287696a 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -372,7 +372,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
ctx->handle = drm_legacy_ctxbitmap_next(dev);
}
DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle == -1) {
+ if (ctx->handle < 0) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 98a36e6c69ad..bae43938c8f6 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -225,16 +225,9 @@ static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence)
return crtc->timeline_name;
}
-static bool drm_crtc_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static const struct dma_fence_ops drm_crtc_fence_ops = {
.get_driver_name = drm_crtc_fence_get_driver_name,
.get_timeline_name = drm_crtc_fence_get_timeline_name,
- .enable_signaling = drm_crtc_fence_enable_signaling,
- .wait = dma_fence_default_wait,
};
struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
@@ -286,6 +279,10 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (WARN_ON(config->num_crtc >= 32))
return -EINVAL;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+ (!funcs->atomic_destroy_state ||
+ !funcs->atomic_duplicate_state));
+
crtc->dev = dev;
crtc->funcs = funcs;
@@ -325,9 +322,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->primary = primary;
crtc->cursor = cursor;
if (primary && !primary->possible_crtcs)
- primary->possible_crtcs = 1 << drm_crtc_index(crtc);
+ primary->possible_crtcs = drm_crtc_mask(crtc);
if (cursor && !cursor->possible_crtcs)
- cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
+ cursor->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_crtc_crc_init(crtc);
if (ret) {
@@ -464,32 +461,42 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set,
struct drm_crtc *tmp;
int ret;
+ WARN_ON(drm_drv_uses_atomic_modeset(crtc->dev));
+
/*
* NOTE: ->set_config can also disable other crtcs (if we steal all
* connectors from it), hence we need to refcount the fbs across all
* crtcs. Atomic modeset will have saner semantics ...
*/
- drm_for_each_crtc(tmp, crtc->dev)
- tmp->primary->old_fb = tmp->primary->fb;
+ drm_for_each_crtc(tmp, crtc->dev) {
+ struct drm_plane *plane = tmp->primary;
+
+ plane->old_fb = plane->fb;
+ }
fb = set->fb;
ret = crtc->funcs->set_config(set, ctx);
if (ret == 0) {
- crtc->primary->crtc = fb ? crtc : NULL;
- crtc->primary->fb = fb;
+ struct drm_plane *plane = crtc->primary;
+
+ plane->crtc = fb ? crtc : NULL;
+ plane->fb = fb;
}
drm_for_each_crtc(tmp, crtc->dev) {
- if (tmp->primary->fb)
- drm_framebuffer_get(tmp->primary->fb);
- if (tmp->primary->old_fb)
- drm_framebuffer_put(tmp->primary->old_fb);
- tmp->primary->old_fb = NULL;
+ struct drm_plane *plane = tmp->primary;
+
+ if (plane->fb)
+ drm_framebuffer_get(plane->fb);
+ if (plane->old_fb)
+ drm_framebuffer_put(plane->old_fb);
+ plane->old_fb = NULL;
}
return ret;
}
+
/**
* drm_mode_set_config_internal - helper to call &drm_mode_config_funcs.set_config
* @set: modeset config to set
@@ -640,7 +647,9 @@ retry:
ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
if (ret) {
- DRM_DEBUG_KMS("Invalid mode\n");
+ DRM_DEBUG_KMS("Invalid mode (ret=%d, status=%s)\n",
+ ret, drm_get_mode_status_name(mode->status));
+ drm_mode_debug_printmodeline(mode);
goto out;
}
@@ -732,7 +741,11 @@ retry:
set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors;
set.fb = fb;
- ret = __drm_mode_set_config_internal(&set, &ctx);
+
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = crtc->funcs->set_config(&set, &ctx);
+ else
+ ret = __drm_mode_set_config_internal(&set, &ctx);
out:
if (fb)
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 5d307b23a4e6..b61322763394 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -56,12 +56,21 @@ int drm_mode_setcrtc(struct drm_device *dev,
int drm_modeset_register_all(struct drm_device *dev);
void drm_modeset_unregister_all(struct drm_device *dev);
+/* drm_modes.c */
+const char *drm_get_mode_status_name(enum drm_mode_status status);
+
/* IOCTLs */
int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv);
/* drm_dumb_buffers.c */
+int drm_mode_create_dumb(struct drm_device *dev,
+ struct drm_mode_create_dumb *args,
+ struct drm_file *file_priv);
+int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
+ struct drm_file *file_priv);
+
/* IOCTLs */
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
@@ -139,7 +148,7 @@ void drm_connector_ida_init(void);
void drm_connector_ida_destroy(void);
void drm_connector_unregister_all(struct drm_device *dev);
int drm_connector_register_all(struct drm_device *dev);
-int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+int drm_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value);
int drm_connector_create_standard_properties(struct drm_device *dev);
@@ -147,8 +156,8 @@ const char *drm_get_connector_force_name(enum drm_connector_force force);
void drm_connector_free_work_fn(struct work_struct *work);
/* IOCTL */
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+int drm_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_getconnector(struct drm_device *dev,
void *data, struct drm_file *file_priv);
@@ -163,14 +172,19 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
const struct drm_framebuffer *fb);
void drm_fb_release(struct drm_file *file_priv);
+int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
+ struct drm_file *file_priv);
+int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
+ struct drm_file *file_priv);
+
/* IOCTL */
-int drm_mode_addfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+int drm_mode_addfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_mode_rmfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+int drm_mode_rmfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index b2482818fee8..6f28fe58f169 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include <drm/drm_client.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_edid.h>
#include <drm/drm_atomic.h>
@@ -164,6 +165,12 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
DRM_ERROR("Failed to create framebuffer debugfs file\n");
return ret;
}
+
+ ret = drm_client_debugfs_init(minor);
+ if (ret) {
+ DRM_ERROR("Failed to create client debugfs file\n");
+ return ret;
+ }
}
if (dev->driver->debugfs_init) {
@@ -307,13 +314,13 @@ static ssize_t edid_write(struct file *file, const char __user *ubuf,
if (len == 5 && !strncmp(buf, "reset", 5)) {
connector->override_edid = false;
- ret = drm_mode_connector_update_edid_property(connector, NULL);
+ ret = drm_connector_update_edid_property(connector, NULL);
} else if (len < EDID_LENGTH ||
EDID_LENGTH * (1 + edid->extensions) > len)
ret = -EINVAL;
else {
connector->override_edid = false;
- ret = drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_connector_update_edid_property(connector, edid);
if (!ret)
connector->override_edid = true;
}
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 9f8312137cad..99961192bf03 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -139,6 +139,7 @@ static int crtc_crc_data_count(struct drm_crtc_crc *crc)
static void crtc_crc_cleanup(struct drm_crtc_crc *crc)
{
kfree(crc->entries);
+ crc->overflow = false;
crc->entries = NULL;
crc->head = 0;
crc->tail = 0;
@@ -391,8 +392,14 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
tail = crc->tail;
if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
+ bool was_overflow = crc->overflow;
+
+ crc->overflow = true;
spin_unlock(&crc->lock);
- DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+
+ if (!was_overflow)
+ DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+
return -ENOBUFS;
}
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
new file mode 100644
index 000000000000..988513346e9c
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DisplayPort CEC-Tunneling-over-AUX support
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <drm/drm_dp_helper.h>
+#include <media/cec.h>
+
+/*
+ * Unfortunately it turns out that we have a chicken-and-egg situation
+ * here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
+ * have a converter chip that supports CEC-Tunneling-over-AUX (usually the
+ * Parade PS176), but they do not wire up the CEC pin, thus making CEC
+ * useless.
+ *
+ * Sadly there is no way for this driver to know this. What happens is
+ * that a /dev/cecX device is created that is isolated and unable to see
+ * any of the other CEC devices. Quite literally the CEC wire is cut
+ * (or in this case, never connected in the first place).
+ *
+ * The reason so few adapters support this is that this tunneling protocol
+ * was never supported by any OS. So there was no easy way of testing it,
+ * and no incentive to correctly wire up the CEC pin.
+ *
+ * Hopefully by creating this driver it will be easier for vendors to
+ * finally fix their adapters and test the CEC functionality.
+ *
+ * I keep a list of known working adapters here:
+ *
+ * https://hverkuil.home.xs4all.nl/cec-status.txt
+ *
+ * Please mail me (hverkuil@xs4all.nl) if you find an adapter that works
+ * and is not yet listed there.
+ *
+ * Note that the current implementation does not support CEC over an MST hub.
+ * As far as I can see there is no mechanism defined in the DisplayPort
+ * standard to transport CEC interrupts over an MST device. It might be
+ * possible to do this through polling, but I have not been able to get that
+ * to work.
+ */
+
+/**
+ * DOC: dp cec helpers
+ *
+ * These functions take care of supporting the CEC-Tunneling-over-AUX
+ * feature of DisplayPort-to-HDMI adapters.
+ */
+
+/*
+ * When the EDID is unset because the HPD went low, then the CEC DPCD registers
+ * typically can no longer be read (true for a DP-to-HDMI adapter since it is
+ * powered by the HPD). However, some displays toggle the HPD off and on for a
+ * short period for one reason or another, and that would cause the CEC adapter
+ * to be removed and added again, even though nothing else changed.
+ *
+ * This module parameter sets a delay in seconds before the CEC adapter is
+ * actually unregistered. Only if the HPD does not return within that time will
+ * the CEC adapter be unregistered.
+ *
+ * If it is set to a value >= NEVER_UNREG_DELAY, then the CEC adapter will never
+ * be unregistered for as long as the connector remains registered.
+ *
+ * If it is set to 0, then the CEC adapter will be unregistered immediately as
+ * soon as the HPD disappears.
+ *
+ * The default is one second to prevent short HPD glitches from unregistering
+ * the CEC adapter.
+ *
+ * Note that for integrated HDMI branch devices that support CEC the DPCD
+ * registers remain available even if the HPD goes low since it is not powered
+ * by the HPD. In that case the CEC adapter will never be unregistered during
+ * the life time of the connector. At least, this is the theory since I do not
+ * have hardware with an integrated HDMI branch device that supports CEC.
+ */
+#define NEVER_UNREG_DELAY 1000
+static unsigned int drm_dp_cec_unregister_delay = 1;
+module_param(drm_dp_cec_unregister_delay, uint, 0600);
+MODULE_PARM_DESC(drm_dp_cec_unregister_delay,
+ "CEC unregister delay in seconds, 0: no delay, >= 1000: never unregister");
+
+static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
+ ssize_t err = 0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ return (enable && err < 0) ? err : 0;
+}
+
+static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ /* Bit 15 (logical address 15) should always be set */
+ u16 la_mask = 1 << CEC_LOG_ADDR_BROADCAST;
+ u8 mask[2];
+ ssize_t err;
+
+ if (addr != CEC_LOG_ADDR_INVALID)
+ la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
+ mask[0] = la_mask & 0xff;
+ mask[1] = la_mask >> 8;
+ err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
+ return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
+}
+
+static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ unsigned int retries = min(5, attempts - 1);
+ ssize_t err;
+
+ err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
+ msg->msg, msg->len);
+ if (err < 0)
+ return err;
+
+ err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
+ (msg->len - 1) | (retries << 4) |
+ DP_CEC_TX_MESSAGE_SEND);
+ return err < 0 ? err : 0;
+}
+
+static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+ bool enable)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ ssize_t err;
+ u8 val;
+
+ if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
+ if (err >= 0) {
+ if (enable)
+ val |= DP_CEC_SNOOPING_ENABLE;
+ else
+ val &= ~DP_CEC_SNOOPING_ENABLE;
+ err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ }
+ return (enable && err < 0) ? err : 0;
+}
+
+static void drm_dp_cec_adap_status(struct cec_adapter *adap,
+ struct seq_file *file)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ struct drm_dp_desc desc;
+ struct drm_dp_dpcd_ident *id = &desc.ident;
+
+ if (drm_dp_read_desc(aux, &desc, true))
+ return;
+ seq_printf(file, "OUI: %*phD\n",
+ (int)sizeof(id->oui), id->oui);
+ seq_printf(file, "ID: %*pE\n",
+ (int)strnlen(id->device_id, sizeof(id->device_id)),
+ id->device_id);
+ seq_printf(file, "HW Rev: %d.%d\n", id->hw_rev >> 4, id->hw_rev & 0xf);
+ /*
+ * Show this both in decimal and hex: at least one vendor
+ * always reports this in hex.
+ */
+ seq_printf(file, "FW/SW Rev: %d.%d (0x%02x.0x%02x)\n",
+ id->sw_major_rev, id->sw_minor_rev,
+ id->sw_major_rev, id->sw_minor_rev);
+}
+
+static const struct cec_adap_ops drm_dp_cec_adap_ops = {
+ .adap_enable = drm_dp_cec_adap_enable,
+ .adap_log_addr = drm_dp_cec_adap_log_addr,
+ .adap_transmit = drm_dp_cec_adap_transmit,
+ .adap_monitor_all_enable = drm_dp_cec_adap_monitor_all_enable,
+ .adap_status = drm_dp_cec_adap_status,
+};
+
+static int drm_dp_cec_received(struct drm_dp_aux *aux)
+{
+ struct cec_adapter *adap = aux->cec.adap;
+ struct cec_msg msg;
+ u8 rx_msg_info;
+ ssize_t err;
+
+ err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
+ if (err < 0)
+ return err;
+
+ if (!(rx_msg_info & DP_CEC_RX_MESSAGE_ENDED))
+ return 0;
+
+ msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
+ err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
+ if (err < 0)
+ return err;
+
+ cec_received_msg(adap, &msg);
+ return 0;
+}
+
+static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
+{
+ struct cec_adapter *adap = aux->cec.adap;
+ u8 flags;
+
+ if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
+ return;
+
+ if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
+ drm_dp_cec_received(aux);
+
+ if (flags & DP_CEC_TX_MESSAGE_SENT)
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK);
+ else if (flags & DP_CEC_TX_LINE_ERROR)
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR |
+ CEC_TX_STATUS_MAX_RETRIES);
+ else if (flags &
+ (DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
+ CEC_TX_STATUS_MAX_RETRIES);
+ drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
+}
+
+/**
+ * drm_dp_cec_irq() - handle CEC interrupt, if any
+ * @aux: DisplayPort AUX channel
+ *
+ * Should be called when handling an IRQ_HPD request. If CEC-tunneling-over-AUX
+ * is present, then it will check for a CEC_IRQ and handle it accordingly.
+ */
+void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+ u8 cec_irq;
+ int ret;
+
+ mutex_lock(&aux->cec.lock);
+ if (!aux->cec.adap)
+ goto unlock;
+
+ ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
+ &cec_irq);
+ if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
+ goto unlock;
+
+ drm_dp_cec_handle_irq(aux);
+ drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
+unlock:
+ mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_irq);
+
+static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
+{
+ u8 cap = 0;
+
+ if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
+ !(cap & DP_CEC_TUNNELING_CAPABLE))
+ return false;
+ if (cec_cap)
+ *cec_cap = cap;
+ return true;
+}
+
+/*
+ * Called if the HPD was low for more than drm_dp_cec_unregister_delay
+ * seconds. This unregisters the CEC adapter.
+ */
+static void drm_dp_cec_unregister_work(struct work_struct *work)
+{
+ struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
+ cec.unregister_work.work);
+
+ mutex_lock(&aux->cec.lock);
+ cec_unregister_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+ mutex_unlock(&aux->cec.lock);
+}
+
+/*
+ * A new EDID is set. If there is no CEC adapter, then create one. If
+ * there was a CEC adapter, then check if the CEC adapter properties
+ * were unchanged and just update the CEC physical address. Otherwise
+ * unregister the old CEC adapter and create a new one.
+ */
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
+{
+ u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
+ unsigned int num_las = 1;
+ u8 cap;
+
+#ifndef CONFIG_MEDIA_CEC_RC
+ /*
+ * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
+ * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
+ *
+ * Do this here as well to ensure the tests against cec_caps are
+ * correct.
+ */
+ cec_caps &= ~CEC_CAP_RC;
+#endif
+ cancel_delayed_work_sync(&aux->cec.unregister_work);
+
+ mutex_lock(&aux->cec.lock);
+ if (!drm_dp_cec_cap(aux, &cap)) {
+ /* CEC is not supported, unregister any existing adapter */
+ cec_unregister_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+ goto unlock;
+ }
+
+ if (cap & DP_CEC_SNOOPING_CAPABLE)
+ cec_caps |= CEC_CAP_MONITOR_ALL;
+ if (cap & DP_CEC_MULTIPLE_LA_CAPABLE)
+ num_las = CEC_MAX_LOG_ADDRS;
+
+ if (aux->cec.adap) {
+ if (aux->cec.adap->capabilities == cec_caps &&
+ aux->cec.adap->available_log_addrs == num_las) {
+ /* Unchanged, so just set the phys addr */
+ cec_s_phys_addr_from_edid(aux->cec.adap, edid);
+ goto unlock;
+ }
+ /*
+ * The capabilities changed, so unregister the old
+ * adapter first.
+ */
+ cec_unregister_adapter(aux->cec.adap);
+ }
+
+ /* Create a new adapter */
+ aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
+ aux, aux->cec.name, cec_caps,
+ num_las);
+ if (IS_ERR(aux->cec.adap)) {
+ aux->cec.adap = NULL;
+ goto unlock;
+ }
+ if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
+ cec_delete_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+ } else {
+ /*
+ * Update the phys addr for the new CEC adapter. When called
+ * from drm_dp_cec_register_connector() edid == NULL, so in
+ * that case the phys addr is just invalidated.
+ */
+ cec_s_phys_addr_from_edid(aux->cec.adap, edid);
+ }
+unlock:
+ mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_set_edid);
+
+/*
+ * The EDID disappeared (likely because of the HPD going down).
+ */
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+ cancel_delayed_work_sync(&aux->cec.unregister_work);
+
+ mutex_lock(&aux->cec.lock);
+ if (!aux->cec.adap)
+ goto unlock;
+
+ cec_phys_addr_invalidate(aux->cec.adap);
+ /*
+ * We're done if we want to keep the CEC device
+ * (drm_dp_cec_unregister_delay is >= NEVER_UNREG_DELAY) or if the
+ * DPCD still indicates the CEC capability (expected for an integrated
+ * HDMI branch device).
+ */
+ if (drm_dp_cec_unregister_delay < NEVER_UNREG_DELAY &&
+ !drm_dp_cec_cap(aux, NULL)) {
+ /*
+ * Unregister the CEC adapter after drm_dp_cec_unregister_delay
+ * seconds. This to debounce short HPD off-and-on cycles from
+ * displays.
+ */
+ schedule_delayed_work(&aux->cec.unregister_work,
+ drm_dp_cec_unregister_delay * HZ);
+ }
+unlock:
+ mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_unset_edid);
+
+/**
+ * drm_dp_cec_register_connector() - register a new connector
+ * @aux: DisplayPort AUX channel
+ * @name: name of the CEC device
+ * @parent: parent device
+ *
+ * A new connector was registered with associated CEC adapter name and
+ * CEC adapter parent device. After registering the name and parent
+ * drm_dp_cec_set_edid() is called to check if the connector supports
+ * CEC and to register a CEC adapter if that is the case.
+ */
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
+ struct device *parent)
+{
+ WARN_ON(aux->cec.adap);
+ aux->cec.name = name;
+ aux->cec.parent = parent;
+ INIT_DELAYED_WORK(&aux->cec.unregister_work,
+ drm_dp_cec_unregister_work);
+
+ drm_dp_cec_set_edid(aux, NULL);
+}
+EXPORT_SYMBOL(drm_dp_cec_register_connector);
+
+/**
+ * drm_dp_cec_unregister_connector() - unregister the CEC adapter, if any
+ * @aux: DisplayPort AUX channel
+ */
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+ if (!aux->cec.adap)
+ return;
+ cancel_delayed_work_sync(&aux->cec.unregister_work);
+ cec_unregister_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+}
+EXPORT_SYMBOL(drm_dp_cec_unregister_connector);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index a7ba602a43a8..0cccbcb2d03e 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -185,6 +185,20 @@ EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
#define AUX_RETRY_INTERVAL 500 /* us */
+static inline void
+drm_dp_dump_access(const struct drm_dp_aux *aux,
+ u8 request, uint offset, void *buffer, int ret)
+{
+ const char *arrow = request == DP_AUX_NATIVE_READ ? "->" : "<-";
+
+ if (ret > 0)
+ drm_dbg(DRM_UT_DP, "%s: 0x%05x AUX %s (ret=%3d) %*ph\n",
+ aux->name, offset, arrow, ret, min(ret, 20), buffer);
+ else
+ drm_dbg(DRM_UT_DP, "%s: 0x%05x AUX %s (ret=%3d)\n",
+ aux->name, offset, arrow, ret);
+}
+
/**
* DOC: dp helpers
*
@@ -288,10 +302,14 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
1);
if (ret != 1)
- return ret;
+ goto out;
- return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
- size);
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
+ size);
+
+out:
+ drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
+ return ret;
}
EXPORT_SYMBOL(drm_dp_dpcd_read);
@@ -312,8 +330,12 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
- return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
- size);
+ int ret;
+
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
+ size);
+ drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret);
+ return ret;
}
EXPORT_SYMBOL(drm_dp_dpcd_write);
@@ -1087,6 +1109,7 @@ static void drm_dp_aux_crc_work(struct work_struct *work)
void drm_dp_aux_init(struct drm_dp_aux *aux)
{
mutex_init(&aux->hw_mutex);
+ mutex_init(&aux->cec.lock);
INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work);
aux->ddc.algo = &drm_dp_i2c_algo;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 658830620ca3..7780567aa669 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1215,7 +1215,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
port->pdt == DP_PEER_DEVICE_SST_SINK) &&
port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
- drm_mode_connector_set_tile_property(port->connector);
+ drm_connector_set_tile_property(port->connector);
}
(*mstb->mgr->cbs->register_connector)(port->connector);
}
@@ -2559,7 +2559,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
edid = drm_edid_duplicate(port->cached_edid);
else {
edid = drm_get_edid(connector, &port->aux.ddc);
- drm_mode_connector_set_tile_property(connector);
+ drm_connector_set_tile_property(connector);
}
port->has_audio = drm_detect_monitor_audio(edid);
drm_dp_put_port(port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7af748ed1c58..ea4941da9b27 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/srcu.h>
+#include <drm/drm_client.h>
#include <drm/drm_drv.h>
#include <drm/drmP.h>
@@ -53,13 +54,14 @@ MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
-"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
-"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
-"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
-"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
-"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
-"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
-"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)");
+"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
+"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
+"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
+"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
+"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
+"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
+"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
+"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
@@ -505,6 +507,8 @@ int drm_dev_init(struct drm_device *dev,
dev->driver = driver;
INIT_LIST_HEAD(&dev->filelist);
+ INIT_LIST_HEAD(&dev->filelist_internal);
+ INIT_LIST_HEAD(&dev->clientlist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
@@ -514,6 +518,7 @@ int drm_dev_init(struct drm_device *dev,
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
+ mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->master_mutex);
@@ -569,6 +574,7 @@ err_minors:
err_free:
mutex_destroy(&dev->master_mutex);
mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
return ret;
@@ -603,6 +609,7 @@ void drm_dev_fini(struct drm_device *dev)
mutex_destroy(&dev->master_mutex);
mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
kfree(dev->unique);
@@ -858,6 +865,8 @@ void drm_dev_unregister(struct drm_device *dev)
dev->registered = false;
+ drm_client_dev_unregister(dev);
+
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_unregister_all(dev);
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 9e2ae02f31e0..81dfdd33753a 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -53,10 +53,10 @@
* a hardware-specific ioctl to allocate suitable buffer objects.
*/
-int drm_mode_create_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_mode_create_dumb(struct drm_device *dev,
+ struct drm_mode_create_dumb *args,
+ struct drm_file *file_priv)
{
- struct drm_mode_create_dumb *args = data;
u32 cpp, stride, size;
if (!dev->driver->dumb_create)
@@ -92,6 +92,12 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
return dev->driver->dumb_create(file_priv, dev, args);
}
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ return drm_mode_create_dumb(dev, data, file_priv);
+}
+
/**
* drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
* @dev: DRM device
@@ -123,17 +129,22 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
&args->offset);
}
-int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
+ struct drm_file *file_priv)
{
- struct drm_mode_destroy_dumb *args = data;
-
if (!dev->driver->dumb_create)
return -ENOSYS;
if (dev->driver->dumb_destroy)
- return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+ return dev->driver->dumb_destroy(file_priv, dev, handle);
else
- return drm_gem_dumb_destroy(file_priv, dev, args->handle);
+ return drm_gem_dumb_destroy(file_priv, dev, handle);
}
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ return drm_mode_destroy_dumb(dev, args->handle, file_priv);
+}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a5808382bdf0..3c9fc99648b7 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -116,6 +116,9 @@ static const struct edid_quirk {
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
{ "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+ /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
+ { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -163,8 +166,9 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
- /* HTC Vive VR Headset */
+ /* HTC Vive and Vive Pro VR Headsets */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
+ { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
/* Oculus Rift DK1, DK2, and CV1 VR Headsets */
{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
@@ -687,562 +691,562 @@ static const struct minimode extra_modes[] = {
static const struct drm_display_mode edid_cea_modes[] = {
/* 0 - dummy, VICs start at 1 */
{ },
- /* 1 - 640x480@60Hz */
+ /* 1 - 640x480@60Hz 4:3 */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 2 - 720x480@60Hz */
+ /* 2 - 720x480@60Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 3 - 720x480@60Hz */
+ /* 3 - 720x480@60Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 4 - 1280x720@60Hz */
+ /* 4 - 1280x720@60Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 5 - 1920x1080i@60Hz */
+ /* 5 - 1920x1080i@60Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 6 - 720(1440)x480i@60Hz */
+ /* 6 - 720(1440)x480i@60Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 7 - 720(1440)x480i@60Hz */
+ /* 7 - 720(1440)x480i@60Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 8 - 720(1440)x240@60Hz */
+ /* 8 - 720(1440)x240@60Hz 4:3 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 9 - 720(1440)x240@60Hz */
+ /* 9 - 720(1440)x240@60Hz 16:9 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 10 - 2880x480i@60Hz */
+ /* 10 - 2880x480i@60Hz 4:3 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 11 - 2880x480i@60Hz */
+ /* 11 - 2880x480i@60Hz 16:9 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 12 - 2880x240@60Hz */
+ /* 12 - 2880x240@60Hz 4:3 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 13 - 2880x240@60Hz */
+ /* 13 - 2880x240@60Hz 16:9 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 14 - 1440x480@60Hz */
+ /* 14 - 1440x480@60Hz 4:3 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 15 - 1440x480@60Hz */
+ /* 15 - 1440x480@60Hz 16:9 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 16 - 1920x1080@60Hz */
+ /* 16 - 1920x1080@60Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 17 - 720x576@50Hz */
+ /* 17 - 720x576@50Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 18 - 720x576@50Hz */
+ /* 18 - 720x576@50Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 19 - 1280x720@50Hz */
+ /* 19 - 1280x720@50Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 20 - 1920x1080i@50Hz */
+ /* 20 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 21 - 720(1440)x576i@50Hz */
+ /* 21 - 720(1440)x576i@50Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 22 - 720(1440)x576i@50Hz */
+ /* 22 - 720(1440)x576i@50Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 23 - 720(1440)x288@50Hz */
+ /* 23 - 720(1440)x288@50Hz 4:3 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 24 - 720(1440)x288@50Hz */
+ /* 24 - 720(1440)x288@50Hz 16:9 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 25 - 2880x576i@50Hz */
+ /* 25 - 2880x576i@50Hz 4:3 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 26 - 2880x576i@50Hz */
+ /* 26 - 2880x576i@50Hz 16:9 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 27 - 2880x288@50Hz */
+ /* 27 - 2880x288@50Hz 4:3 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 28 - 2880x288@50Hz */
+ /* 28 - 2880x288@50Hz 16:9 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 29 - 1440x576@50Hz */
+ /* 29 - 1440x576@50Hz 4:3 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 30 - 1440x576@50Hz */
+ /* 30 - 1440x576@50Hz 16:9 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 31 - 1920x1080@50Hz */
+ /* 31 - 1920x1080@50Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 32 - 1920x1080@24Hz */
+ /* 32 - 1920x1080@24Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 33 - 1920x1080@25Hz */
+ /* 33 - 1920x1080@25Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 34 - 1920x1080@30Hz */
+ /* 34 - 1920x1080@30Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 35 - 2880x480@60Hz */
+ /* 35 - 2880x480@60Hz 4:3 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 36 - 2880x480@60Hz */
+ /* 36 - 2880x480@60Hz 16:9 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 37 - 2880x576@50Hz */
+ /* 37 - 2880x576@50Hz 4:3 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 38 - 2880x576@50Hz */
+ /* 38 - 2880x576@50Hz 16:9 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 39 - 1920x1080i@50Hz */
+ /* 39 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 40 - 1920x1080i@100Hz */
+ /* 40 - 1920x1080i@100Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 41 - 1280x720@100Hz */
+ /* 41 - 1280x720@100Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 42 - 720x576@100Hz */
+ /* 42 - 720x576@100Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 43 - 720x576@100Hz */
+ /* 43 - 720x576@100Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 44 - 720(1440)x576i@100Hz */
+ /* 44 - 720(1440)x576i@100Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 45 - 720(1440)x576i@100Hz */
+ /* 45 - 720(1440)x576i@100Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 46 - 1920x1080i@120Hz */
+ /* 46 - 1920x1080i@120Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 47 - 1280x720@120Hz */
+ /* 47 - 1280x720@120Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 48 - 720x480@120Hz */
+ /* 48 - 720x480@120Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 49 - 720x480@120Hz */
+ /* 49 - 720x480@120Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 50 - 720(1440)x480i@120Hz */
+ /* 50 - 720(1440)x480i@120Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 51 - 720(1440)x480i@120Hz */
+ /* 51 - 720(1440)x480i@120Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 52 - 720x576@200Hz */
+ /* 52 - 720x576@200Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 53 - 720x576@200Hz */
+ /* 53 - 720x576@200Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 54 - 720(1440)x576i@200Hz */
+ /* 54 - 720(1440)x576i@200Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 55 - 720(1440)x576i@200Hz */
+ /* 55 - 720(1440)x576i@200Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 56 - 720x480@240Hz */
+ /* 56 - 720x480@240Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 57 - 720x480@240Hz */
+ /* 57 - 720x480@240Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 58 - 720(1440)x480i@240Hz */
+ /* 58 - 720(1440)x480i@240Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 59 - 720(1440)x480i@240Hz */
+ /* 59 - 720(1440)x480i@240Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 60 - 1280x720@24Hz */
+ /* 60 - 1280x720@24Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 61 - 1280x720@25Hz */
+ /* 61 - 1280x720@25Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 62 - 1280x720@30Hz */
+ /* 62 - 1280x720@30Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 63 - 1920x1080@120Hz */
+ /* 63 - 1920x1080@120Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 64 - 1920x1080@100Hz */
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 64 - 1920x1080@100Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 65 - 1280x720@24Hz */
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 65 - 1280x720@24Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 66 - 1280x720@25Hz */
+ /* 66 - 1280x720@25Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 67 - 1280x720@30Hz */
+ /* 67 - 1280x720@30Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 68 - 1280x720@50Hz */
+ /* 68 - 1280x720@50Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 69 - 1280x720@60Hz */
+ /* 69 - 1280x720@60Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 70 - 1280x720@100Hz */
+ /* 70 - 1280x720@100Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 71 - 1280x720@120Hz */
+ /* 71 - 1280x720@120Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 72 - 1920x1080@24Hz */
+ /* 72 - 1920x1080@24Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 73 - 1920x1080@25Hz */
+ /* 73 - 1920x1080@25Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 74 - 1920x1080@30Hz */
+ /* 74 - 1920x1080@30Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 75 - 1920x1080@50Hz */
+ /* 75 - 1920x1080@50Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 76 - 1920x1080@60Hz */
+ /* 76 - 1920x1080@60Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 77 - 1920x1080@100Hz */
+ /* 77 - 1920x1080@100Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 78 - 1920x1080@120Hz */
+ /* 78 - 1920x1080@120Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 79 - 1680x720@24Hz */
+ /* 79 - 1680x720@24Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 80 - 1680x720@25Hz */
+ /* 80 - 1680x720@25Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
2948, 3168, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 81 - 1680x720@30Hz */
+ /* 81 - 1680x720@30Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
2420, 2640, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 82 - 1680x720@50Hz */
+ /* 82 - 1680x720@50Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 83 - 1680x720@60Hz */
+ /* 83 - 1680x720@60Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 84 - 1680x720@100Hz */
+ /* 84 - 1680x720@100Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 85 - 1680x720@120Hz */
+ /* 85 - 1680x720@120Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 86 - 2560x1080@24Hz */
+ /* 86 - 2560x1080@24Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 87 - 2560x1080@25Hz */
+ /* 87 - 2560x1080@25Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 88 - 2560x1080@30Hz */
+ /* 88 - 2560x1080@30Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 89 - 2560x1080@50Hz */
+ /* 89 - 2560x1080@50Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 90 - 2560x1080@60Hz */
+ /* 90 - 2560x1080@60Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 91 - 2560x1080@100Hz */
+ /* 91 - 2560x1080@100Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 92 - 2560x1080@120Hz */
+ /* 92 - 2560x1080@120Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 93 - 3840x2160p@24Hz 16:9 */
+ /* 93 - 3840x2160@24Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 94 - 3840x2160p@25Hz 16:9 */
+ /* 94 - 3840x2160@25Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 95 - 3840x2160p@30Hz 16:9 */
+ /* 95 - 3840x2160@30Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 96 - 3840x2160p@50Hz 16:9 */
+ /* 96 - 3840x2160@50Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 97 - 3840x2160p@60Hz 16:9 */
+ /* 97 - 3840x2160@60Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 98 - 4096x2160p@24Hz 256:135 */
+ /* 98 - 4096x2160@24Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 99 - 4096x2160p@25Hz 256:135 */
+ /* 99 - 4096x2160@25Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 100 - 4096x2160p@30Hz 256:135 */
+ /* 100 - 4096x2160@30Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 101 - 4096x2160p@50Hz 256:135 */
+ /* 101 - 4096x2160@50Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 102 - 4096x2160p@60Hz 256:135 */
+ /* 102 - 4096x2160@60Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 103 - 3840x2160p@24Hz 64:27 */
+ /* 103 - 3840x2160@24Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 104 - 3840x2160p@25Hz 64:27 */
+ /* 104 - 3840x2160@25Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 105 - 3840x2160p@30Hz 64:27 */
+ /* 105 - 3840x2160@30Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 106 - 3840x2160p@50Hz 64:27 */
+ /* 106 - 3840x2160@50Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 107 - 3840x2160p@60Hz 64:27 */
+ /* 107 - 3840x2160@60Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
@@ -4874,6 +4878,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
/*
+ * As some drivers don't support atomic, we can't use connector state.
+ * So just initialize the frame with default values, just the same way
+ * as it's done with other properties here.
+ */
+ frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+ frame->itc = 0;
+
+ /*
* Populate picture aspect ratio from either
* user input (if specified) or from the CEA mode list.
*/
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 186d00adfb5f..9da36a6271d3 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,6 +18,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_client.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
@@ -26,11 +27,8 @@
#include <drm/drm_print.h>
#include <linux/module.h>
-#define DEFAULT_FBDEFIO_DELAY_MS 50
-
struct drm_fbdev_cma {
struct drm_fb_helper fb_helper;
- const struct drm_framebuffer_funcs *fb_funcs;
};
/**
@@ -44,36 +42,6 @@ struct drm_fbdev_cma {
*
* An fbdev framebuffer backed by cma is also available by calling
* drm_fb_cma_fbdev_init(). drm_fb_cma_fbdev_fini() tears it down.
- * If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be
- * set up automatically. &drm_framebuffer_funcs.dirty is called by
- * drm_fb_helper_deferred_io() in process context (&struct delayed_work).
- *
- * Example fbdev deferred io code::
- *
- * static int driver_fb_dirty(struct drm_framebuffer *fb,
- * struct drm_file *file_priv,
- * unsigned flags, unsigned color,
- * struct drm_clip_rect *clips,
- * unsigned num_clips)
- * {
- * struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
- * ... push changes ...
- * return 0;
- * }
- *
- * static struct drm_framebuffer_funcs driver_fb_funcs = {
- * .destroy = drm_gem_fb_destroy,
- * .create_handle = drm_gem_fb_create_handle,
- * .dirty = driver_fb_dirty,
- * };
- *
- * Initialize::
- *
- * fbdev = drm_fb_cma_fbdev_init_with_funcs(dev, 16,
- * dev->mode_config.num_crtc,
- * dev->mode_config.num_connector,
- * &driver_fb_funcs);
- *
*/
static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
@@ -131,236 +99,28 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
-static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- return dma_mmap_writecombine(info->device, vma, info->screen_base,
- info->fix.smem_start, info->fix.smem_len);
-}
-
-static struct fb_ops drm_fbdev_cma_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_mmap = drm_fb_cma_mmap,
-};
-
-static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- fb_deferred_io_mmap(info, vma);
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- return 0;
-}
-
-static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
- struct drm_gem_cma_object *cma_obj)
-{
- struct fb_deferred_io *fbdefio;
- struct fb_ops *fbops;
-
- /*
- * Per device structures are needed because:
- * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
- * fbdefio: individual delays
- */
- fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
- fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
- if (!fbdefio || !fbops) {
- kfree(fbdefio);
- kfree(fbops);
- return -ENOMEM;
- }
-
- /* can't be offset from vaddr since dirty() uses cma_obj */
- fbi->screen_buffer = cma_obj->vaddr;
- /* fb_deferred_io_fault() needs a physical address */
- fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
-
- *fbops = *fbi->fbops;
- fbi->fbops = fbops;
-
- fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
- fbdefio->deferred_io = drm_fb_helper_deferred_io;
- fbi->fbdefio = fbdefio;
- fb_deferred_io_init(fbi);
- fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
-
- return 0;
-}
-
-static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
-{
- if (!fbi->fbdefio)
- return;
-
- fb_deferred_io_cleanup(fbi);
- kfree(fbi->fbdefio);
- kfree(fbi->fbops);
-}
-
-static int
-drm_fbdev_cma_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
- struct drm_device *dev = helper->dev;
- struct drm_gem_cma_object *obj;
- struct drm_framebuffer *fb;
- unsigned int bytes_per_pixel;
- unsigned long offset;
- struct fb_info *fbi;
- size_t size;
- int ret;
-
- DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
- sizes->surface_width, sizes->surface_height,
- sizes->surface_bpp);
-
- bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
- size = sizes->surface_width * sizes->surface_height * bytes_per_pixel;
- obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(obj))
- return -ENOMEM;
-
- fbi = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(fbi)) {
- ret = PTR_ERR(fbi);
- goto err_gem_free_object;
- }
-
- fb = drm_gem_fbdev_fb_create(dev, sizes, 0, &obj->base,
- fbdev_cma->fb_funcs);
- if (IS_ERR(fb)) {
- dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
- ret = PTR_ERR(fb);
- goto err_fb_info_destroy;
- }
-
- helper->fb = fb;
-
- fbi->par = helper;
- fbi->flags = FBINFO_FLAG_DEFAULT;
- fbi->fbops = &drm_fbdev_cma_ops;
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
-
- offset = fbi->var.xoffset * bytes_per_pixel;
- offset += fbi->var.yoffset * fb->pitches[0];
-
- dev->mode_config.fb_base = (resource_size_t)obj->paddr;
- fbi->screen_base = obj->vaddr + offset;
- fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
- fbi->screen_size = size;
- fbi->fix.smem_len = size;
-
- if (fb->funcs->dirty) {
- ret = drm_fbdev_cma_defio_init(fbi, obj);
- if (ret)
- goto err_cma_destroy;
- }
-
- return 0;
-
-err_cma_destroy:
- drm_framebuffer_remove(fb);
-err_fb_info_destroy:
- drm_fb_helper_fini(helper);
-err_gem_free_object:
- drm_gem_object_put_unlocked(&obj->base);
- return ret;
-}
-
-static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
- .fb_probe = drm_fbdev_cma_create,
-};
-
/**
- * drm_fb_cma_fbdev_init_with_funcs() - Allocate and initialize fbdev emulation
+ * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device.
* @dev->mode_config.preferred_depth is used if this is zero.
* @max_conn_count: Maximum number of connectors.
* @dev->mode_config.num_connector is used if this is zero.
- * @funcs: Framebuffer functions, in particular a custom dirty() callback.
- * Can be NULL.
*
* Returns:
* Zero on success or negative error code on failure.
*/
-int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count,
- const struct drm_framebuffer_funcs *funcs)
+int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
+ unsigned int max_conn_count)
{
struct drm_fbdev_cma *fbdev_cma;
- struct drm_fb_helper *fb_helper;
- int ret;
-
- if (!preferred_bpp)
- preferred_bpp = dev->mode_config.preferred_depth;
- if (!preferred_bpp)
- preferred_bpp = 32;
-
- if (!max_conn_count)
- max_conn_count = dev->mode_config.num_connector;
-
- fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
- if (!fbdev_cma)
- return -ENOMEM;
- fbdev_cma->fb_funcs = funcs;
- fb_helper = &fbdev_cma->fb_helper;
-
- drm_fb_helper_prepare(dev, fb_helper, &drm_fb_cma_helper_funcs);
-
- ret = drm_fb_helper_init(dev, fb_helper, max_conn_count);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "Failed to initialize fbdev helper.\n");
- goto err_free;
- }
-
- ret = drm_fb_helper_single_add_all_connectors(fb_helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "Failed to add connectors.\n");
- goto err_drm_fb_helper_fini;
- }
-
- ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "Failed to set fbdev configuration.\n");
- goto err_drm_fb_helper_fini;
- }
+ /* dev->fb_helper will indirectly point to fbdev_cma after this call */
+ fbdev_cma = drm_fbdev_cma_init(dev, preferred_bpp, max_conn_count);
+ if (IS_ERR(fbdev_cma))
+ return PTR_ERR(fbdev_cma);
return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_free:
- kfree(fbdev_cma);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init_with_funcs);
-
-/**
- * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device.
- * @dev->mode_config.preferred_depth is used if this is zero.
- * @max_conn_count: Maximum number of connectors.
- * @dev->mode_config.num_connector is used if this is zero.
- *
- * Returns:
- * Zero on success or negative error code on failure.
- */
-int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
- unsigned int max_conn_count)
-{
- return drm_fb_cma_fbdev_init_with_funcs(dev, preferred_bpp,
- max_conn_count, NULL);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
@@ -370,104 +130,54 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
*/
void drm_fb_cma_fbdev_fini(struct drm_device *dev)
{
- struct drm_fb_helper *fb_helper = dev->fb_helper;
-
- if (!fb_helper)
- return;
-
- /* Unregister if it hasn't been done already */
- if (fb_helper->fbdev && fb_helper->fbdev->dev)
- drm_fb_helper_unregister_fbi(fb_helper);
-
- if (fb_helper->fbdev)
- drm_fbdev_cma_defio_fini(fb_helper->fbdev);
-
- if (fb_helper->fb)
- drm_framebuffer_remove(fb_helper->fb);
-
- drm_fb_helper_fini(fb_helper);
- kfree(to_fbdev_cma(fb_helper));
+ if (dev->fb_helper)
+ drm_fbdev_cma_fini(to_fbdev_cma(dev->fb_helper));
}
EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_fini);
+static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+ .fb_probe = drm_fb_helper_generic_probe,
+};
+
/**
- * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device
* @max_conn_count: Maximum number of connectors
- * @funcs: fb helper functions, in particular a custom dirty() callback
*
* Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
*/
-struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count,
- const struct drm_framebuffer_funcs *funcs)
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int max_conn_count)
{
struct drm_fbdev_cma *fbdev_cma;
- struct drm_fb_helper *helper;
+ struct drm_fb_helper *fb_helper;
int ret;
fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
- if (!fbdev_cma) {
- dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
+ if (!fbdev_cma)
return ERR_PTR(-ENOMEM);
- }
- fbdev_cma->fb_funcs = funcs;
- helper = &fbdev_cma->fb_helper;
-
- drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
+ fb_helper = &fbdev_cma->fb_helper;
- ret = drm_fb_helper_init(dev, helper, max_conn_count);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
+ ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
+ if (ret)
goto err_free;
- }
-
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to add connectors.\n");
- goto err_drm_fb_helper_fini;
-
- }
- ret = drm_fb_helper_initial_config(helper, preferred_bpp);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to set initial hw configuration.\n");
- goto err_drm_fb_helper_fini;
- }
+ ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_cma_helper_funcs,
+ preferred_bpp, max_conn_count);
+ if (ret)
+ goto err_client_put;
return fbdev_cma;
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(helper);
+err_client_put:
+ drm_client_release(&fb_helper->client);
err_free:
kfree(fbdev_cma);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
-
-static const struct drm_framebuffer_funcs drm_fb_cma_funcs = {
- .destroy = drm_gem_fb_destroy,
- .create_handle = drm_gem_fb_create_handle,
-};
-
-/**
- * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device
- * @max_conn_count: Maximum number of connectors
- *
- * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
- */
-struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count)
-{
- return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp,
- max_conn_count,
- &drm_fb_cma_funcs);
-}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
/**
@@ -477,14 +187,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
- if (fbdev_cma->fb_helper.fbdev)
- drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
-
- if (fbdev_cma->fb_helper.fb)
- drm_framebuffer_remove(fbdev_cma->fb_helper.fb);
-
- drm_fb_helper_fini(&fbdev_cma->fb_helper);
- kfree(fbdev_cma);
+ /* All resources have now been freed by drm_fbdev_fb_destroy() */
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 2ee1eaa66188..4b0dd20bccb8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
+#include <linux/dma-buf.h>
#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/slab.h>
@@ -66,6 +67,9 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*
+ * Drivers that support a dumb buffer with a virtual address and mmap support,
+ * should try out the generic fbdev emulation using drm_fbdev_generic_setup().
+ *
* Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
* down by calling drm_fb_helper_fbdev_teardown().
*
@@ -368,7 +372,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
struct drm_plane *plane;
struct drm_atomic_state *state;
int i, ret;
- unsigned int plane_mask;
struct drm_modeset_acquire_ctx ctx;
drm_modeset_acquire_init(&ctx, 0);
@@ -381,7 +384,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
state->acquire_ctx = &ctx;
retry:
- plane_mask = 0;
drm_for_each_plane(plane, dev) {
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@@ -391,9 +393,6 @@ retry:
plane_state->rotation = DRM_MODE_ROTATE_0;
- plane->old_fb = plane->fb;
- plane_mask |= 1 << drm_plane_index(plane);
-
/* disable non-primary: */
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
continue;
@@ -430,8 +429,6 @@ retry:
ret = drm_atomic_commit(state);
out_state:
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
if (ret == -EDEADLK)
goto backoff;
@@ -745,6 +742,24 @@ static void drm_fb_helper_resume_worker(struct work_struct *work)
console_unlock();
}
+static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+ unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp;
+ void *src = fb_helper->fbdev->screen_buffer + offset;
+ void *dst = fb_helper->buffer->vaddr + offset;
+ size_t len = (clip->x2 - clip->x1) * cpp;
+ unsigned int y;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ memcpy(dst, src, len);
+ src += fb->pitches[0];
+ dst += fb->pitches[0];
+ }
+}
+
static void drm_fb_helper_dirty_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
@@ -760,8 +775,12 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
spin_unlock_irqrestore(&helper->dirty_lock, flags);
/* call dirty callback only when it has been really touched */
- if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
+ if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
+ /* Generic fbdev uses a shadow buffer */
+ if (helper->buffer)
+ drm_fb_helper_dirty_blit_real(helper, &clip_copy);
helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ }
}
/**
@@ -1164,7 +1183,7 @@ EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
* @info: fbdev registered by the helper
* @rect: info about rectangle to fill
*
- * A wrapper around cfb_imageblit implemented by fbdev core
+ * A wrapper around cfb_fillrect implemented by fbdev core
*/
void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
@@ -2330,6 +2349,20 @@ retry:
return true;
}
+static bool connector_has_possible_crtc(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ int i;
+
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ if (encoder->possible_crtcs & drm_crtc_mask(crtc))
+ return true;
+ }
+
+ return false;
+}
+
static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **best_crtcs,
struct drm_display_mode **modes,
@@ -2338,7 +2371,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
int c, o;
struct drm_connector *connector;
const struct drm_connector_helper_funcs *connector_funcs;
- struct drm_encoder *encoder;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn;
@@ -2370,27 +2402,14 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
connector_funcs = connector->helper_private;
/*
- * If the DRM device implements atomic hooks and ->best_encoder() is
- * NULL we fallback to the default drm_atomic_helper_best_encoder()
- * helper.
- */
- if (drm_drv_uses_atomic_modeset(fb_helper->dev) &&
- !connector_funcs->best_encoder)
- encoder = drm_atomic_helper_best_encoder(connector);
- else
- encoder = connector_funcs->best_encoder(connector);
-
- if (!encoder)
- goto out;
-
- /*
* select a crtc for this connector and then attempt to configure
* remaining connectors
*/
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
- if ((encoder->possible_crtcs & (1 << c)) == 0)
+ if (!connector_has_possible_crtc(connector,
+ crtc->mode_set.crtc))
continue;
for (o = 0; o < n; o++)
@@ -2417,7 +2436,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
sizeof(struct drm_fb_helper_crtc *));
}
}
-out:
+
kfree(crtcs);
return best_score;
}
@@ -2928,6 +2947,294 @@ void drm_fb_helper_output_poll_changed(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_fb_helper_output_poll_changed);
+/* @user: 1=userspace, 0=fbcon */
+static int drm_fbdev_fb_open(struct fb_info *info, int user)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ if (!try_module_get(fb_helper->dev->driver->fops->owner))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int drm_fbdev_fb_release(struct fb_info *info, int user)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ module_put(fb_helper->dev->driver->fops->owner);
+
+ return 0;
+}
+
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
+ * unregister_framebuffer() or fb_release().
+ */
+static void drm_fbdev_fb_destroy(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct fb_info *fbi = fb_helper->fbdev;
+ struct fb_ops *fbops = NULL;
+ void *shadow = NULL;
+
+ if (fbi->fbdefio) {
+ fb_deferred_io_cleanup(fbi);
+ shadow = fbi->screen_buffer;
+ fbops = fbi->fbops;
+ }
+
+ drm_fb_helper_fini(fb_helper);
+
+ if (shadow) {
+ vfree(shadow);
+ kfree(fbops);
+ }
+
+ drm_client_framebuffer_delete(fb_helper->buffer);
+ /*
+ * FIXME:
+ * Remove conditional when all CMA drivers have been moved over to using
+ * drm_fbdev_generic_setup().
+ */
+ if (fb_helper->client.funcs) {
+ drm_client_release(&fb_helper->client);
+ kfree(fb_helper);
+ }
+}
+
+static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ if (fb_helper->dev->driver->gem_prime_mmap)
+ return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
+ else
+ return -ENODEV;
+}
+
+static struct fb_ops drm_fbdev_fb_ops = {
+ .owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_open = drm_fbdev_fb_open,
+ .fb_release = drm_fbdev_fb_release,
+ .fb_destroy = drm_fbdev_fb_destroy,
+ .fb_mmap = drm_fbdev_fb_mmap,
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+};
+
+static struct fb_deferred_io drm_fbdev_defio = {
+ .delay = HZ / 20,
+ .deferred_io = drm_fb_helper_deferred_io,
+};
+
+/**
+ * drm_fb_helper_generic_probe - Generic fbdev emulation probe helper
+ * @fb_helper: fbdev helper structure
+ * @sizes: describes fbdev size and scanout surface size
+ *
+ * This function uses the client API to crate a framebuffer backed by a dumb buffer.
+ *
+ * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect,
+ * fb_copyarea, fb_imageblit.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_client_buffer *buffer;
+ struct drm_framebuffer *fb;
+ struct fb_info *fbi;
+ u32 format;
+ int ret;
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
+ buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ sizes->surface_height, format);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ fb_helper->buffer = buffer;
+ fb_helper->fb = buffer->fb;
+ fb = buffer->fb;
+
+ fbi = drm_fb_helper_alloc_fbi(fb_helper);
+ if (IS_ERR(fbi)) {
+ ret = PTR_ERR(fbi);
+ goto err_free_buffer;
+ }
+
+ fbi->par = fb_helper;
+ fbi->fbops = &drm_fbdev_fb_ops;
+ fbi->screen_size = fb->height * fb->pitches[0];
+ fbi->fix.smem_len = fbi->screen_size;
+ fbi->screen_buffer = buffer->vaddr;
+ strcpy(fbi->fix.id, "DRM emulated");
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
+ drm_fb_helper_fill_var(fbi, fb_helper, sizes->fb_width, sizes->fb_height);
+
+ if (fb->funcs->dirty) {
+ struct fb_ops *fbops;
+ void *shadow;
+
+ /*
+ * fb_deferred_io_cleanup() clears &fbops->fb_mmap so a per
+ * instance version is necessary.
+ */
+ fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
+ shadow = vzalloc(fbi->screen_size);
+ if (!fbops || !shadow) {
+ kfree(fbops);
+ vfree(shadow);
+ ret = -ENOMEM;
+ goto err_fb_info_destroy;
+ }
+
+ *fbops = *fbi->fbops;
+ fbi->fbops = fbops;
+ fbi->screen_buffer = shadow;
+ fbi->fbdefio = &drm_fbdev_defio;
+
+ fb_deferred_io_init(fbi);
+ }
+
+ return 0;
+
+err_fb_info_destroy:
+ drm_fb_helper_fini(fb_helper);
+err_free_buffer:
+ drm_client_framebuffer_delete(buffer);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_fb_helper_generic_probe);
+
+static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
+ .fb_probe = drm_fb_helper_generic_probe,
+};
+
+static void drm_fbdev_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (fb_helper->fbdev) {
+ drm_fb_helper_unregister_fbi(fb_helper);
+ /* drm_fbdev_fb_destroy() takes care of cleanup */
+ return;
+ }
+
+ /* Did drm_fb_helper_fbdev_setup() run? */
+ if (fb_helper->dev)
+ drm_fb_helper_fini(fb_helper);
+
+ drm_client_release(client);
+ kfree(fb_helper);
+}
+
+static int drm_fbdev_client_restore(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+
+ return 0;
+}
+
+static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ /* If drm_fb_helper_fbdev_setup() failed, we only try once */
+ if (!fb_helper->dev && fb_helper->funcs)
+ return 0;
+
+ if (dev->fb_helper)
+ return drm_fb_helper_hotplug_event(dev->fb_helper);
+
+ if (!dev->mode_config.num_connector)
+ return 0;
+
+ ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs,
+ fb_helper->preferred_bpp, 0);
+ if (ret) {
+ fb_helper->dev = NULL;
+ fb_helper->fbdev = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct drm_client_funcs drm_fbdev_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = drm_fbdev_client_unregister,
+ .restore = drm_fbdev_client_restore,
+ .hotplug = drm_fbdev_client_hotplug,
+};
+
+/**
+ * drm_fb_helper_generic_fbdev_setup() - Setup generic fbdev emulation
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ * @dev->mode_config.preferred_depth is used if this is zero.
+ *
+ * This function sets up generic fbdev emulation for drivers that supports
+ * dumb buffers with a virtual address and that can be mmap'ed.
+ *
+ * Restore, hotplug events and teardown are all taken care of. Drivers that do
+ * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
+ * Simple drivers might use drm_mode_config_helper_suspend().
+ *
+ * Drivers that set the dirty callback on their framebuffer will get a shadow
+ * fbdev buffer that is blitted onto the real buffer. This is done in order to
+ * make deferred I/O work with all kinds of buffers.
+ *
+ * This function is safe to call even when there are no connectors present.
+ * Setup will be retried on the next hotplug event.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ if (!drm_fbdev_emulation)
+ return 0;
+
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper)
+ return -ENOMEM;
+
+ ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+ if (ret) {
+ kfree(fb_helper);
+ return ret;
+ }
+
+ fb_helper->preferred_bpp = preferred_bpp;
+
+ drm_fbdev_client_hotplug(&fb_helper->client);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fbdev_generic_setup);
+
/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 6d9b9453707c..ffa8dc35515f 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/module.h>
+#include <drm/drm_client.h>
#include <drm/drm_file.h>
#include <drm/drmP.h>
@@ -101,6 +102,166 @@ DEFINE_MUTEX(drm_global_mutex);
static int drm_open_helper(struct file *filp, struct drm_minor *minor);
+/**
+ * drm_file_alloc - allocate file context
+ * @minor: minor to allocate on
+ *
+ * This allocates a new DRM file context. It is not linked into any context and
+ * can be used by the caller freely. Note that the context keeps a pointer to
+ * @minor, so it must be freed before @minor is.
+ *
+ * RETURNS:
+ * Pointer to newly allocated context, ERR_PTR on failure.
+ */
+struct drm_file *drm_file_alloc(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct drm_file *file;
+ int ret;
+
+ file = kzalloc(sizeof(*file), GFP_KERNEL);
+ if (!file)
+ return ERR_PTR(-ENOMEM);
+
+ file->pid = get_pid(task_pid(current));
+ file->minor = minor;
+
+ /* for compatibility root is always authenticated */
+ file->authenticated = capable(CAP_SYS_ADMIN);
+ file->lock_count = 0;
+
+ INIT_LIST_HEAD(&file->lhead);
+ INIT_LIST_HEAD(&file->fbs);
+ mutex_init(&file->fbs_lock);
+ INIT_LIST_HEAD(&file->blobs);
+ INIT_LIST_HEAD(&file->pending_event_list);
+ INIT_LIST_HEAD(&file->event_list);
+ init_waitqueue_head(&file->event_wait);
+ file->event_space = 4096; /* set aside 4k for event buffer */
+
+ mutex_init(&file->event_read_lock);
+
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_open(dev, file);
+
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ drm_syncobj_open(file);
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_init_file_private(&file->prime);
+
+ if (dev->driver->open) {
+ ret = dev->driver->open(dev, file);
+ if (ret < 0)
+ goto out_prime_destroy;
+ }
+
+ return file;
+
+out_prime_destroy:
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&file->prime);
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ drm_syncobj_release(file);
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_release(dev, file);
+ put_pid(file->pid);
+ kfree(file);
+
+ return ERR_PTR(ret);
+}
+
+static void drm_events_release(struct drm_file *file_priv)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e, *et;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /* Unlink pending events */
+ list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
+ pending_link) {
+ list_del(&e->pending_link);
+ e->file_priv = NULL;
+ }
+
+ /* Remove unconsumed events */
+ list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
+ list_del(&e->link);
+ kfree(e);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+/**
+ * drm_file_free - free file context
+ * @file: context to free, or NULL
+ *
+ * This destroys and deallocates a DRM file context previously allocated via
+ * drm_file_alloc(). The caller must make sure to unlink it from any contexts
+ * before calling this.
+ *
+ * If NULL is passed, this is a no-op.
+ *
+ * RETURNS:
+ * 0 on success, or error code on failure.
+ */
+void drm_file_free(struct drm_file *file)
+{
+ struct drm_device *dev;
+
+ if (!file)
+ return;
+
+ dev = file->minor->dev;
+
+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file->minor->kdev->devt),
+ dev->open_count);
+
+ if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
+ dev->driver->preclose)
+ dev->driver->preclose(dev, file);
+
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
+ drm_legacy_lock_release(dev, file->filp);
+
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ drm_legacy_reclaim_buffers(dev, file);
+
+ drm_events_release(file);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_fb_release(file);
+ drm_property_destroy_user_blobs(dev, file);
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ drm_syncobj_release(file);
+
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_release(dev, file);
+
+ drm_legacy_ctxbitmap_flush(dev, file);
+
+ if (drm_is_primary_client(file))
+ drm_master_release(file);
+
+ if (dev->driver->postclose)
+ dev->driver->postclose(dev, file);
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&file->prime);
+
+ WARN_ON(!list_empty(&file->event_list));
+
+ put_pid(file->pid);
+ kfree(file);
+}
+
static int drm_setup(struct drm_device * dev)
{
int ret;
@@ -207,52 +368,22 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- filp->private_data = priv;
- filp->f_mode |= FMODE_UNSIGNED_OFFSET;
- priv->filp = filp;
- priv->pid = get_pid(task_pid(current));
- priv->minor = minor;
-
- /* for compatibility root is always authenticated */
- priv->authenticated = capable(CAP_SYS_ADMIN);
- priv->lock_count = 0;
-
- INIT_LIST_HEAD(&priv->lhead);
- INIT_LIST_HEAD(&priv->fbs);
- mutex_init(&priv->fbs_lock);
- INIT_LIST_HEAD(&priv->blobs);
- INIT_LIST_HEAD(&priv->pending_event_list);
- INIT_LIST_HEAD(&priv->event_list);
- init_waitqueue_head(&priv->event_wait);
- priv->event_space = 4096; /* set aside 4k for event buffer */
-
- mutex_init(&priv->event_read_lock);
-
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_open(dev, priv);
-
- if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- drm_syncobj_open(priv);
-
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_init_file_private(&priv->prime);
-
- if (dev->driver->open) {
- ret = dev->driver->open(dev, priv);
- if (ret < 0)
- goto out_prime_destroy;
- }
+ priv = drm_file_alloc(minor);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
if (drm_is_primary_client(priv)) {
ret = drm_master_open(priv);
- if (ret)
- goto out_close;
+ if (ret) {
+ drm_file_free(priv);
+ return ret;
+ }
}
+ filp->private_data = priv;
+ filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+ priv->filp = filp;
+
mutex_lock(&dev->filelist_mutex);
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->filelist_mutex);
@@ -278,45 +409,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
#endif
return 0;
-
-out_close:
- if (dev->driver->postclose)
- dev->driver->postclose(dev, priv);
-out_prime_destroy:
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_destroy_file_private(&priv->prime);
- if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- drm_syncobj_release(priv);
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_release(dev, priv);
- put_pid(priv->pid);
- kfree(priv);
- filp->private_data = NULL;
- return ret;
-}
-
-static void drm_events_release(struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_pending_event *e, *et;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- /* Unlink pending events */
- list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
- pending_link) {
- list_del(&e->pending_link);
- e->file_priv = NULL;
- }
-
- /* Remove unconsumed events */
- list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
- list_del(&e->link);
- kfree(e);
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
static void drm_legacy_dev_reinit(struct drm_device *dev)
@@ -353,6 +445,8 @@ void drm_lastclose(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_legacy_dev_reinit(dev);
+
+ drm_client_dev_restore(dev);
}
/**
@@ -383,57 +477,7 @@ int drm_release(struct inode *inode, struct file *filp)
list_del(&file_priv->lhead);
mutex_unlock(&dev->filelist_mutex);
- if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
- dev->driver->preclose)
- dev->driver->preclose(dev, file_priv);
-
- /* ========================================================
- * Begin inline drm_release
- */
-
- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- dev->open_count);
-
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- drm_legacy_lock_release(dev, filp);
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- drm_legacy_reclaim_buffers(dev, file_priv);
-
- drm_events_release(file_priv);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_fb_release(file_priv);
- drm_property_destroy_user_blobs(dev, file_priv);
- }
-
- if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- drm_syncobj_release(file_priv);
-
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_release(dev, file_priv);
-
- drm_legacy_ctxbitmap_flush(dev, file_priv);
-
- if (drm_is_primary_client(file_priv))
- drm_master_release(file_priv);
-
- if (dev->driver->postclose)
- dev->driver->postclose(dev, file_priv);
-
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_destroy_file_private(&file_priv->prime);
-
- WARN_ON(!list_empty(&file_priv->event_list));
-
- put_pid(file_priv->pid);
- kfree(file_priv);
-
- /* ========================================================
- * End inline drm_release
- */
+ drm_file_free(file_priv);
if (!--dev->open_count) {
drm_lastclose(dev);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 5ca6395cd4d3..35c1e2742c27 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -152,27 +152,27 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_XBGR8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
- { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
- { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
- { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
- { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
- { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
- { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
- { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
- { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
- { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true },
+ { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
};
unsigned int i;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index bfedceff87bb..781af1d42d76 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -95,21 +95,20 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
/**
* drm_mode_addfb - add an FB to the graphics configuration
* @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
+ * @or: pointer to request structure
+ * @file_priv: drm file
*
* Add a new FB to the specified CRTC, given a user request. This is the
* original addfb ioctl which only supported RGB formats.
*
- * Called by the user via ioctl.
+ * Called by the user via ioctl, or by an in-kernel client.
*
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_addfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
+ struct drm_file *file_priv)
{
- struct drm_mode_fb_cmd *or = data;
struct drm_mode_fb_cmd2 r = {};
int ret;
@@ -134,6 +133,12 @@ int drm_mode_addfb(struct drm_device *dev,
return 0;
}
+int drm_mode_addfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ return drm_mode_addfb(dev, data, file_priv);
+}
+
static int fb_plane_width(int width,
const struct drm_format_info *format, int plane)
{
@@ -367,29 +372,28 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w)
/**
* drm_mode_rmfb - remove an FB from the configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
+ * @dev: drm device
+ * @fb_id: id of framebuffer to remove
+ * @file_priv: drm file
*
- * Remove the FB specified by the user.
+ * Remove the specified FB.
*
- * Called by the user via ioctl.
+ * Called by the user via ioctl, or by an in-kernel client.
*
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_rmfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
+ struct drm_file *file_priv)
{
struct drm_framebuffer *fb = NULL;
struct drm_framebuffer *fbl = NULL;
- uint32_t *id = data;
int found = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- fb = drm_framebuffer_lookup(dev, file_priv, *id);
+ fb = drm_framebuffer_lookup(dev, file_priv, fb_id);
if (!fb)
return -ENOENT;
@@ -435,6 +439,14 @@ fail_unref:
return -ENOENT;
}
+int drm_mode_rmfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ uint32_t *fb_id = data;
+
+ return drm_mode_rmfb(dev, *fb_id, file_priv);
+}
+
/**
* drm_mode_getfb - get FB info
* @dev: drm device for the ioctl
@@ -835,9 +847,7 @@ retry:
if (ret)
goto unlock;
- plane_mask |= BIT(drm_plane_index(plane));
-
- plane->old_fb = plane->fb;
+ plane_mask |= drm_plane_mask(plane);
}
/* This list is only filled when disable_crtcs is set. */
@@ -852,9 +862,6 @@ retry:
ret = drm_atomic_commit(state);
unlock:
- if (plane_mask)
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4a16d7b26c89..bf90625df3c5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1036,6 +1036,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return -EACCES;
}
+ if (node->readonly) {
+ if (vma->vm_flags & VM_WRITE) {
+ drm_gem_object_put_unlocked(obj);
+ return -EINVAL;
+ }
+
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index acfbc0641a06..2810d4131411 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -253,7 +253,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct dma_buf *dma_buf;
struct dma_fence *fence;
- if (plane->state->fb == state->fb || !state->fb)
+ if (!state->fb)
return 0;
dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index b2dc21e33ae0..5799e2782dd1 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b72242e93ea4..40179c5fc6b8 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -26,6 +26,8 @@
/* drm_file.c */
extern struct mutex drm_global_mutex;
+struct drm_file *drm_file_alloc(struct drm_minor *minor);
+void drm_file_free(struct drm_file *file);
void drm_lastclose(struct drm_device *dev);
/* drm_pci.c */
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 0d4cfb232576..ea10e9a26aad 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -334,6 +334,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->aspect_ratio_allowed = req->value;
break;
+ case DRM_CLIENT_CAP_WRITEBACK_CONNECTORS:
+ if (!file_priv->atomic)
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->writeback_connectors = req->value;
+ break;
default:
return -EINVAL;
}
@@ -634,12 +641,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 50c73c0a20b9..b54fb78a283c 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -553,24 +553,13 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
/* Clone the lessor file to create a new file for us */
DRM_DEBUG_LEASE("Allocating lease file\n");
- path_get(&lessor_file->f_path);
- lessee_file = alloc_file(&lessor_file->f_path,
- lessor_file->f_mode,
- fops_get(lessor_file->f_inode->i_fop));
-
+ lessee_file = file_clone_open(lessor_file);
if (IS_ERR(lessee_file)) {
ret = PTR_ERR(lessee_file);
goto out_lessee;
}
- /* Initialize the new file for DRM */
- DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open);
- ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file);
- if (ret)
- goto out_lessee_file;
-
lessee_priv = lessee_file->private_data;
-
/* Change the file to a master one */
drm_master_put(&lessee_priv->master);
lessee_priv->master = lessee;
@@ -588,9 +577,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
return 0;
-out_lessee_file:
- fput(lessee_file);
-
out_lessee:
drm_master_put(&lessee);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index bc73b7f5b9fc..80b75501f5c6 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -392,6 +392,7 @@ bool mipi_dsi_packet_format_is_short(u8 type)
case MIPI_DSI_DCS_SHORT_WRITE:
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
case MIPI_DSI_DCS_READ:
+ case MIPI_DSI_DCS_COMPRESSION_MODE:
case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
return true;
}
@@ -410,6 +411,7 @@ EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
bool mipi_dsi_packet_format_is_long(u8 type)
{
switch (type) {
+ case MIPI_DSI_PPS_LONG_WRITE:
case MIPI_DSI_NULL_PACKET:
case MIPI_DSI_BLANKING_PACKET:
case MIPI_DSI_GENERIC_LONG_WRITE:
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 3166026a1874..3cc5fbd78ee2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -239,6 +239,32 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
#define HOLE_SIZE(NODE) ((NODE)->hole_size)
#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
+static u64 rb_to_hole_size(struct rb_node *rb)
+{
+ return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
+}
+
+static void insert_hole_size(struct rb_root_cached *root,
+ struct drm_mm_node *node)
+{
+ struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
+ u64 x = node->hole_size;
+ bool first = true;
+
+ while (*link) {
+ rb = *link;
+ if (x > rb_to_hole_size(rb)) {
+ link = &rb->rb_left;
+ } else {
+ link = &rb->rb_right;
+ first = false;
+ }
+ }
+
+ rb_link_node(&node->rb_hole_size, rb, link);
+ rb_insert_color_cached(&node->rb_hole_size, root, first);
+}
+
static void add_hole(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
@@ -247,7 +273,7 @@ static void add_hole(struct drm_mm_node *node)
__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
- RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
+ insert_hole_size(&mm->holes_size, node);
RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
list_add(&node->hole_stack, &mm->hole_stack);
@@ -258,7 +284,7 @@ static void rm_hole(struct drm_mm_node *node)
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
list_del(&node->hole_stack);
- rb_erase(&node->rb_hole_size, &node->mm->holes_size);
+ rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
node->hole_size = 0;
@@ -282,38 +308,39 @@ static inline u64 rb_hole_size(struct rb_node *rb)
static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
{
- struct rb_node *best = NULL;
- struct rb_node **link = &mm->holes_size.rb_node;
+ struct rb_node *rb = mm->holes_size.rb_root.rb_node;
+ struct drm_mm_node *best = NULL;
- while (*link) {
- struct rb_node *rb = *link;
+ do {
+ struct drm_mm_node *node =
+ rb_entry(rb, struct drm_mm_node, rb_hole_size);
- if (size <= rb_hole_size(rb)) {
- link = &rb->rb_left;
- best = rb;
+ if (size <= node->hole_size) {
+ best = node;
+ rb = rb->rb_right;
} else {
- link = &rb->rb_right;
+ rb = rb->rb_left;
}
- }
+ } while (rb);
- return rb_hole_size_to_node(best);
+ return best;
}
static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
{
+ struct rb_node *rb = mm->holes_addr.rb_node;
struct drm_mm_node *node = NULL;
- struct rb_node **link = &mm->holes_addr.rb_node;
- while (*link) {
+ while (rb) {
u64 hole_start;
- node = rb_hole_addr_to_node(*link);
+ node = rb_hole_addr_to_node(rb);
hole_start = __drm_mm_hole_node_start(node);
if (addr < hole_start)
- link = &node->rb_hole_addr.rb_left;
+ rb = node->rb_hole_addr.rb_left;
else if (addr > hole_start + node->hole_size)
- link = &node->rb_hole_addr.rb_right;
+ rb = node->rb_hole_addr.rb_right;
else
break;
}
@@ -326,9 +353,6 @@ first_hole(struct drm_mm *mm,
u64 start, u64 end, u64 size,
enum drm_mm_insert_mode mode)
{
- if (RB_EMPTY_ROOT(&mm->holes_size))
- return NULL;
-
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
@@ -355,7 +379,7 @@ next_hole(struct drm_mm *mm,
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
- return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
+ return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
case DRM_MM_INSERT_LOW:
return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
@@ -426,6 +450,11 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_reserve_node);
+static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
+{
+ return rb ? rb_to_hole_size(rb) : 0;
+}
+
/**
* drm_mm_insert_node_in_range - ranged search for space and insert @node
* @mm: drm_mm to allocate from
@@ -451,18 +480,26 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
{
struct drm_mm_node *hole;
u64 remainder_mask;
+ bool once;
DRM_MM_BUG_ON(range_start >= range_end);
if (unlikely(size == 0 || range_end - range_start < size))
return -ENOSPC;
+ if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
+ return -ENOSPC;
+
if (alignment <= 1)
alignment = 0;
+ once = mode & DRM_MM_INSERT_ONCE;
+ mode &= ~DRM_MM_INSERT_ONCE;
+
remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
- for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
- hole = next_hole(mm, hole, mode)) {
+ for (hole = first_hole(mm, range_start, range_end, size, mode);
+ hole;
+ hole = once ? NULL : next_hole(mm, hole, mode)) {
u64 hole_start = __drm_mm_hole_node_start(hole);
u64 hole_end = hole_start + hole->hole_size;
u64 adj_start, adj_end;
@@ -587,9 +624,9 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
if (drm_mm_hole_follows(old)) {
list_replace(&old->hole_stack, &new->hole_stack);
- rb_replace_node(&old->rb_hole_size,
- &new->rb_hole_size,
- &mm->holes_size);
+ rb_replace_node_cached(&old->rb_hole_size,
+ &new->rb_hole_size,
+ &mm->holes_size);
rb_replace_node(&old->rb_hole_addr,
&new->rb_hole_addr,
&mm->holes_addr);
@@ -885,7 +922,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
INIT_LIST_HEAD(&mm->hole_stack);
mm->interval_tree = RB_ROOT_CACHED;
- mm->holes_size = RB_ROOT;
+ mm->holes_size = RB_ROOT_CACHED;
mm->holes_addr = RB_ROOT;
/* Clever trick to avoid a special case in the free hole tracking. */
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index e5c653357024..21e353bd3948 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -145,6 +145,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
+ /* only expose writeback connectors if userspace understands them */
+ if (!file_priv->writeback_connectors &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
+ continue;
+
if (drm_lease_held(file_priv, connector->base.id)) {
if (count < card_res->count_connectors &&
put_user(connector->base.id, connector_id + count)) {
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index ce4d2fb32810..fcb0ab0abb75 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -433,8 +433,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
drm_modeset_lock_all(dev);
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR:
- ret = drm_mode_connector_set_obj_prop(obj, prop,
- prop_value);
+ ret = drm_connector_set_obj_prop(obj, prop, prop_value);
break;
case DRM_MODE_OBJECT_CRTC:
ret = drm_mode_crtc_set_obj_prop(obj, prop, prop_value);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c78ca0e84ffd..02db9ac82d7a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -659,10 +659,12 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
* drm_bus_flags_from_videomode - extract information about pixelclk and
* DE polarity from videomode and store it in a separate variable
* @vm: videomode structure to use
- * @bus_flags: information about pixelclk and DE polarity will be stored here
+ * @bus_flags: information about pixelclk, sync and DE polarity will be stored
+ * here
*
- * Sets DRM_BUS_FLAG_DE_(LOW|HIGH) and DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE
- * in @bus_flags according to DISPLAY_FLAGS found in @vm
+ * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE and
+ * DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS
+ * found in @vm
*/
void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
{
@@ -672,6 +674,11 @@ void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
*bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
+ *bus_flags |= DRM_BUS_FLAG_SYNC_POSEDGE;
+ if (vm->flags & DISPLAY_FLAGS_SYNC_NEGEDGE)
+ *bus_flags |= DRM_BUS_FLAG_SYNC_NEGEDGE;
+
if (vm->flags & DISPLAY_FLAGS_DE_LOW)
*bus_flags |= DRM_BUS_FLAG_DE_LOW;
if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
@@ -684,7 +691,7 @@ EXPORT_SYMBOL_GPL(drm_bus_flags_from_videomode);
* of_get_drm_display_mode - get a drm_display_mode from devicetree
* @np: device_node with the timing specification
* @dmode: will be set to the return value
- * @bus_flags: information about pixelclk and DE polarity
+ * @bus_flags: information about pixelclk, sync and DE polarity
* @index: index into the list of display timings in devicetree
*
* This function is expensive and should only be used, if only one mode is to be
@@ -1257,7 +1264,7 @@ static const char * const drm_mode_status_names[] = {
#undef MODE_STATUS
-static const char *drm_get_mode_status_name(enum drm_mode_status status)
+const char *drm_get_mode_status_name(enum drm_mode_status status)
{
int index = status + 3;
@@ -1346,7 +1353,7 @@ void drm_mode_sort(struct list_head *mode_list)
EXPORT_SYMBOL(drm_mode_sort);
/**
- * drm_mode_connector_list_update - update the mode list for the connector
+ * drm_connector_list_update - update the mode list for the connector
* @connector: the connector to update
*
* This moves the modes from the @connector probed_modes list
@@ -1356,7 +1363,7 @@ EXPORT_SYMBOL(drm_mode_sort);
* This is just a helper functions doesn't validate any modes itself and also
* doesn't prune any invalid modes. Callers need to do that themselves.
*/
-void drm_mode_connector_list_update(struct drm_connector *connector)
+void drm_connector_list_update(struct drm_connector *connector)
{
struct drm_display_mode *pmode, *pt;
@@ -1405,7 +1412,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
}
}
}
-EXPORT_SYMBOL(drm_mode_connector_list_update);
+EXPORT_SYMBOL(drm_connector_list_update);
/**
* drm_mode_parse_command_line_for_connector - parse command line modeline for connector
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 1fe122461298..2763a5ec845b 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -9,21 +9,28 @@
#include <drm/drm_panel.h>
#include <drm/drm_of.h>
+/**
+ * DOC: overview
+ *
+ * A set of helper functions to aid DRM drivers in parsing standard DT
+ * properties.
+ */
+
static void drm_release_of(struct device *dev, void *data)
{
of_node_put(data);
}
/**
- * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
+ * drm_of_crtc_port_mask - find the mask of a registered CRTC by port OF node
* @dev: DRM device
* @port: port OF node
*
* Given a port OF node, return the possible mask of the corresponding
* CRTC within a device's list of CRTCs. Returns zero if not found.
*/
-static uint32_t drm_crtc_port_mask(struct drm_device *dev,
- struct device_node *port)
+uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+ struct device_node *port)
{
unsigned int index = 0;
struct drm_crtc *tmp;
@@ -37,6 +44,7 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev,
return 0;
}
+EXPORT_SYMBOL(drm_of_crtc_port_mask);
/**
* drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port
@@ -62,7 +70,7 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
return 0;
}
- possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
+ possible_crtcs |= drm_of_crtc_port_mask(dev, remote_port);
of_node_put(remote_port);
}
@@ -93,7 +101,7 @@ EXPORT_SYMBOL_GPL(drm_of_component_match_add);
* drm_of_component_probe - Generic probe function for a component based master
* @dev: master device containing the OF node
* @compare_of: compare function used for matching components
- * @master_ops: component master ops to be used
+ * @m_ops: component master ops to be used
*
* Parse the platform device OF node and bind all the components associated
* with the master. Interface ports are added before the encoders in order to
@@ -238,10 +246,17 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
if (!remote)
return -ENODEV;
+ if (!of_device_is_available(remote)) {
+ of_node_put(remote);
+ return -ENODEV;
+ }
+
if (panel) {
*panel = of_drm_find_panel(remote);
- if (*panel)
+ if (!IS_ERR(*panel))
ret = 0;
+ else
+ *panel = NULL;
}
/* No panel found yet, check for a bridge next. */
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 308d442a531b..b902361dee6e 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/module.h>
+#include <drm/drm_device.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
@@ -94,6 +95,9 @@ EXPORT_SYMBOL(drm_panel_remove);
*
* An error is returned if the panel is already attached to another connector.
*
+ * When unloading, the driver should detach from the panel by calling
+ * drm_panel_detach().
+ *
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
@@ -101,6 +105,13 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
if (panel->connector)
return -EBUSY;
+ panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
+ if (!panel->link) {
+ dev_err(panel->dev, "failed to link panel to %s\n",
+ dev_name(connector->dev->dev));
+ return -EINVAL;
+ }
+
panel->connector = connector;
panel->drm = connector->dev;
@@ -115,10 +126,15 @@ EXPORT_SYMBOL(drm_panel_attach);
* Detaches a panel from the connector it is attached to. If a panel is not
* attached to any connector this is effectively a no-op.
*
+ * This function should not be called by the panel device itself. It
+ * is only for the drm device that called drm_panel_attach().
+ *
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_detach(struct drm_panel *panel)
{
+ device_link_del(panel->link);
+
panel->connector = NULL;
panel->drm = NULL;
@@ -135,12 +151,19 @@ EXPORT_SYMBOL(drm_panel_detach);
* tree node. If a matching panel is found, return a pointer to it.
*
* Return: A pointer to the panel registered for the specified device tree
- * node or NULL if no panel matching the device tree node can be found.
+ * node or an ERR_PTR() if no panel matching the device tree node can be found.
+ * Possible error codes returned by this function:
+ * - EPROBE_DEFER: the panel device has not been probed yet, and the caller
+ * should retry later
+ * - ENODEV: the device is not available (status != "okay" or "ok")
*/
struct drm_panel *of_drm_find_panel(const struct device_node *np)
{
struct drm_panel *panel;
+ if (!of_device_is_available(np))
+ return ERR_PTR(-ENODEV);
+
mutex_lock(&panel_lock);
list_for_each_entry(panel, &panel_list, list) {
@@ -151,7 +174,7 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np)
}
mutex_unlock(&panel_lock);
- return NULL;
+ return ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL(of_drm_find_panel);
#endif
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 4db9c515b74f..896e42a34895 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -326,64 +326,6 @@ int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
}
EXPORT_SYMBOL(drm_legacy_pci_init);
-int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
-{
- struct pci_dev *root;
- u32 lnkcap, lnkcap2;
-
- *mask = 0;
- if (!dev->pdev)
- return -EINVAL;
-
- root = dev->pdev->bus->self;
-
- /* we've been informed via and serverworks don't make the cut */
- if (root->vendor == PCI_VENDOR_ID_VIA ||
- root->vendor == PCI_VENDOR_ID_SERVERWORKS)
- return -EINVAL;
-
- pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
- pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
-
- if (lnkcap2) { /* PCIe r3.0-compliant */
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
- *mask |= DRM_PCIE_SPEED_25;
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
- *mask |= DRM_PCIE_SPEED_50;
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
- *mask |= DRM_PCIE_SPEED_80;
- } else { /* pre-r3.0 */
- if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
- *mask |= DRM_PCIE_SPEED_25;
- if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
- *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
- }
-
- DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
- return 0;
-}
-EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
-
-int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw)
-{
- struct pci_dev *root;
- u32 lnkcap;
-
- *mlw = 0;
- if (!dev->pdev)
- return -EINVAL;
-
- root = dev->pdev->bus->self;
-
- pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
-
- *mlw = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
-
- DRM_INFO("probing mlw for device %x:%x = %x\n", root->vendor, root->device, lnkcap);
- return 0;
-}
-EXPORT_SYMBOL(drm_pcie_get_max_link_width);
-
#else
void drm_pci_agp_destroy(struct drm_device *dev) {}
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 035054455301..6153cbda239f 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -177,6 +177,10 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (WARN_ON(config->num_total_plane >= 32))
return -EINVAL;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+ (!funcs->atomic_destroy_state ||
+ !funcs->atomic_duplicate_state));
+
ret = drm_mode_object_add(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
return ret;
@@ -561,19 +565,66 @@ int drm_plane_check_pixel_format(struct drm_plane *plane,
if (i == plane->format_count)
return -EINVAL;
- if (!plane->modifier_count)
- return 0;
+ if (plane->funcs->format_mod_supported) {
+ if (!plane->funcs->format_mod_supported(plane, format, modifier))
+ return -EINVAL;
+ } else {
+ if (!plane->modifier_count)
+ return 0;
- for (i = 0; i < plane->modifier_count; i++) {
- if (modifier == plane->modifiers[i])
- break;
+ for (i = 0; i < plane->modifier_count; i++) {
+ if (modifier == plane->modifiers[i])
+ break;
+ }
+ if (i == plane->modifier_count)
+ return -EINVAL;
}
- if (i == plane->modifier_count)
- return -EINVAL;
- if (plane->funcs->format_mod_supported &&
- !plane->funcs->format_mod_supported(plane, format, modifier))
+ return 0;
+}
+
+static int __setplane_check(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ int ret;
+
+ /* Check whether this plane is usable on this CRTC */
+ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
+ DRM_DEBUG_KMS("Invalid crtc for plane\n");
return -EINVAL;
+ }
+
+ /* Check whether this plane supports the fb pixel format. */
+ ret = drm_plane_check_pixel_format(plane, fb->format->format,
+ fb->modifier);
+ if (ret) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
+ drm_get_format_name(fb->format->format,
+ &format_name),
+ fb->modifier);
+ return ret;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (crtc_w > INT_MAX ||
+ crtc_x > INT_MAX - (int32_t) crtc_w ||
+ crtc_h > INT_MAX ||
+ crtc_y > INT_MAX - (int32_t) crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ crtc_w, crtc_h, crtc_x, crtc_y);
+ return -ERANGE;
+ }
+
+ ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
+ if (ret)
+ return ret;
return 0;
}
@@ -598,6 +649,8 @@ static int __setplane_internal(struct drm_plane *plane,
{
int ret = 0;
+ WARN_ON(drm_drv_uses_atomic_modeset(plane->dev));
+
/* No fb means shut it down */
if (!fb) {
plane->old_fb = plane->fb;
@@ -611,37 +664,9 @@ static int __setplane_internal(struct drm_plane *plane,
goto out;
}
- /* Check whether this plane is usable on this CRTC */
- if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
- DRM_DEBUG_KMS("Invalid crtc for plane\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Check whether this plane supports the fb pixel format. */
- ret = drm_plane_check_pixel_format(plane, fb->format->format,
- fb->modifier);
- if (ret) {
- struct drm_format_name_buf format_name;
- DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
- drm_get_format_name(fb->format->format,
- &format_name),
- fb->modifier);
- goto out;
- }
-
- /* Give drivers some help against integer overflows */
- if (crtc_w > INT_MAX ||
- crtc_x > INT_MAX - (int32_t) crtc_w ||
- crtc_h > INT_MAX ||
- crtc_y > INT_MAX - (int32_t) crtc_h) {
- DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
- crtc_w, crtc_h, crtc_x, crtc_y);
- ret = -ERANGE;
- goto out;
- }
-
- ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
+ ret = __setplane_check(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
if (ret)
goto out;
@@ -665,6 +690,41 @@ out:
return ret;
}
+static int __setplane_atomic(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ int ret;
+
+ WARN_ON(!drm_drv_uses_atomic_modeset(plane->dev));
+
+ /* No fb means shut it down */
+ if (!fb)
+ return plane->funcs->disable_plane(plane, ctx);
+
+ /*
+ * FIXME: This is redundant with drm_atomic_plane_check(),
+ * but the legacy cursor/"async" .update_plane() tricks
+ * don't call that so we still need this here. Should remove
+ * this when all .update_plane() implementations have been
+ * fixed to call drm_atomic_plane_check().
+ */
+ ret = __setplane_check(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+ if (ret)
+ return ret;
+
+ return plane->funcs->update_plane(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, ctx);
+}
+
static int setplane_internal(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -682,9 +742,15 @@ retry:
ret = drm_modeset_lock_all_ctx(plane->dev, &ctx);
if (ret)
goto fail;
- ret = __setplane_internal(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h, &ctx);
+
+ if (drm_drv_uses_atomic_modeset(plane->dev))
+ ret = __setplane_atomic(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, &ctx);
+ else
+ ret = __setplane_internal(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, &ctx);
fail:
if (ret == -EDEADLK) {
@@ -816,9 +882,14 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
src_h = fb->height << 16;
}
- ret = __setplane_internal(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- 0, 0, src_w, src_h, ctx);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = __setplane_atomic(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ 0, 0, src_w, src_h, ctx);
+ else
+ ret = __setplane_internal(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ 0, 0, src_w, src_h, ctx);
if (fb)
drm_framebuffer_put(fb);
@@ -1092,8 +1163,10 @@ retry:
/* Keep the old fb, don't unref it. */
plane->old_fb = NULL;
} else {
- plane->fb = fb;
- drm_framebuffer_get(fb);
+ if (!plane->state) {
+ plane->fb = fb;
+ drm_framebuffer_get(fb);
+ }
}
out:
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index f88f68161519..621f17643bb0 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -440,6 +440,7 @@ out:
* @src_y: y offset of @fb for panning
* @src_w: width of source rectangle in @fb
* @src_h: height of source rectangle in @fb
+ * @ctx: lock acquire context, not used here
*
* Provides a default plane update handler using the atomic plane update
* functions. It is fully left to the driver to check plane constraints and
@@ -455,7 +456,8 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_plane_state *plane_state;
@@ -489,6 +491,7 @@ EXPORT_SYMBOL(drm_plane_helper_update);
/**
* drm_plane_helper_disable() - Transitional helper for plane disable
* @plane: plane to disable
+ * @ctx: lock acquire context, not used here
*
* Provides a default plane disable handler using the atomic plane update
* functions. It is fully left to the driver to check plane constraints and
@@ -499,9 +502,11 @@ EXPORT_SYMBOL(drm_plane_helper_update);
* RETURNS:
* Zero on success, error code on failure
*/
-int drm_plane_helper_disable(struct drm_plane *plane)
+int drm_plane_helper_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_plane_state *plane_state;
+ struct drm_framebuffer *old_fb;
/* crtc helpers love to call disable functions for already disabled hw
* functions. So cope with that. */
@@ -521,8 +526,9 @@ int drm_plane_helper_disable(struct drm_plane *plane)
plane_state->plane = plane;
plane_state->crtc = NULL;
+ old_fb = plane_state->fb;
drm_atomic_set_fb_for_plane(plane_state, NULL);
- return drm_plane_helper_commit(plane, plane_state, plane->fb);
+ return drm_plane_helper_commit(plane, plane_state, old_fb);
}
EXPORT_SYMBOL(drm_plane_helper_disable);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 397b46b33739..186db2e4c57a 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -186,7 +186,6 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
/**
* drm_gem_map_attach - dma_buf attach implementation for GEM
* @dma_buf: buffer to attach device to
- * @target_dev: not used
* @attach: buffer attachment data
*
* Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
@@ -195,7 +194,7 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
*
* Returns 0 on success, negative error code on failure.
*/
-int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_prime_attachment *prime_attach;
@@ -435,35 +434,6 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
/**
- * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
- * @dma_buf: buffer to be mapped
- * @page_num: page number within the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
- */
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- return NULL;
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
-
-/**
- * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
- * @dma_buf: buffer to be unmapped
- * @page_num: page number within the buffer
- * @addr: virtual address of the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
- */
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
-
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
-
-/**
* drm_gem_dmabuf_kmap - map implementation for GEM
* @dma_buf: buffer to be mapped
* @page_num: page number within the buffer
@@ -520,9 +490,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = drm_gem_dmabuf_kmap,
- .map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
- .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index b25f98f33f6c..0e7fc3e7dfb4 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -30,6 +30,100 @@
#include <drm/drmP.h>
#include <drm/drm_print.h>
+void __drm_puts_coredump(struct drm_printer *p, const char *str)
+{
+ struct drm_print_iterator *iterator = p->arg;
+ ssize_t len;
+
+ if (!iterator->remain)
+ return;
+
+ if (iterator->offset < iterator->start) {
+ ssize_t copy;
+
+ len = strlen(str);
+
+ if (iterator->offset + len <= iterator->start) {
+ iterator->offset += len;
+ return;
+ }
+
+ copy = len - (iterator->start - iterator->offset);
+
+ if (copy > iterator->remain)
+ copy = iterator->remain;
+
+ /* Copy out the bit of the string that we need */
+ memcpy(iterator->data,
+ str + (iterator->start - iterator->offset), copy);
+
+ iterator->offset = iterator->start + copy;
+ iterator->remain -= copy;
+ } else {
+ ssize_t pos = iterator->offset - iterator->start;
+
+ len = min_t(ssize_t, strlen(str), iterator->remain);
+
+ memcpy(iterator->data + pos, str, len);
+
+ iterator->offset += len;
+ iterator->remain -= len;
+ }
+}
+EXPORT_SYMBOL(__drm_puts_coredump);
+
+void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf)
+{
+ struct drm_print_iterator *iterator = p->arg;
+ size_t len;
+ char *buf;
+
+ if (!iterator->remain)
+ return;
+
+ /* Figure out how big the string will be */
+ len = snprintf(NULL, 0, "%pV", vaf);
+
+ /* This is the easiest path, we've already advanced beyond the offset */
+ if (iterator->offset + len <= iterator->start) {
+ iterator->offset += len;
+ return;
+ }
+
+ /* Then check if we can directly copy into the target buffer */
+ if ((iterator->offset >= iterator->start) && (len < iterator->remain)) {
+ ssize_t pos = iterator->offset - iterator->start;
+
+ snprintf(((char *) iterator->data) + pos,
+ iterator->remain, "%pV", vaf);
+
+ iterator->offset += len;
+ iterator->remain -= len;
+
+ return;
+ }
+
+ /*
+ * Finally, hit the slow path and make a temporary string to copy over
+ * using _drm_puts_coredump
+ */
+ buf = kmalloc(len + 1, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!buf)
+ return;
+
+ snprintf(buf, len + 1, "%pV", vaf);
+ __drm_puts_coredump(p, (const char *) buf);
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(__drm_printfn_coredump);
+
+void __drm_puts_seq_file(struct drm_printer *p, const char *str)
+{
+ seq_puts(p->arg, str);
+}
+EXPORT_SYMBOL(__drm_puts_seq_file);
+
void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf)
{
seq_printf(p->arg, "%pV", vaf);
@@ -49,6 +143,23 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
EXPORT_SYMBOL(__drm_printfn_debug);
/**
+ * drm_puts - print a const string to a &drm_printer stream
+ * @p: the &drm printer
+ * @str: const string
+ *
+ * Allow &drm_printer types that have a constant string
+ * option to use it.
+ */
+void drm_puts(struct drm_printer *p, const char *str)
+{
+ if (p->puts)
+ p->puts(p, str);
+ else
+ drm_printf(p, "%s", str);
+}
+EXPORT_SYMBOL(drm_puts);
+
+/**
* drm_printf - print to a &drm_printer stream
* @p: the &drm_printer
* @f: format string
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 527743394150..a1bb157bfdfa 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -33,6 +33,7 @@
#include <linux/moduleparam.h>
#include <drm/drmP.h>
+#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_crtc_helper.h>
@@ -88,9 +89,9 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- uint32_t *ids = connector->encoder_ids;
enum drm_mode_status ret = MODE_OK;
- unsigned int i;
+ struct drm_encoder *encoder;
+ int i;
/* Step 1: Validate against connector */
ret = drm_connector_mode_valid(connector, mode);
@@ -98,13 +99,9 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
return ret;
/* Step 2: Validate against encoders and crtcs */
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct drm_encoder *encoder = drm_encoder_find(dev, NULL, ids[i]);
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
struct drm_crtc *crtc;
- if (!encoder)
- continue;
-
ret = drm_encoder_mode_valid(encoder, mode);
if (ret != MODE_OK) {
/* No point in continuing for crtc check as this encoder
@@ -363,7 +360,7 @@ EXPORT_SYMBOL(drm_helper_probe_detect);
* using the VESA GTF/CVT formulas.
*
* 3. Modes are moved from the probed_modes list to the modes list. Potential
- * duplicates are merged together (see drm_mode_connector_list_update()).
+ * duplicates are merged together (see drm_connector_list_update()).
* After this step the probed_modes list will be empty again.
*
* 4. Any non-stale mode on the modes list then undergoes validation
@@ -475,7 +472,7 @@ retry:
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, connector->name);
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
verbose_prune = false;
goto prune;
}
@@ -488,7 +485,7 @@ retry:
if (count == 0)
goto prune;
- drm_mode_connector_list_update(connector);
+ drm_connector_list_update(connector);
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
@@ -563,6 +560,8 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev)
drm_sysfs_hotplug_event(dev);
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
+
+ drm_client_dev_hotplug(dev);
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 1f8031e30f53..cdb10f885a4f 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref)
drm_mode_object_unregister(blob->dev, &blob->base);
- kfree(blob);
+ kvfree(blob);
}
/**
@@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
- blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+ blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
if (!blob)
return ERR_PTR(-ENOMEM);
@@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
true, drm_property_free_blob);
if (ret) {
- kfree(blob);
+ kvfree(blob);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 7a00455ca568..51fa978f0d23 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -52,7 +52,7 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
bool has_primary = state->plane_mask &
- BIT(drm_plane_index(crtc->primary));
+ drm_plane_mask(crtc->primary);
/* We always want to have an active plane with an active CRTC */
if (has_primary != state->enable)
@@ -281,13 +281,13 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
if (ret)
return ret;
- encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret || !connector)
return ret;
- return drm_mode_connector_attach_encoder(connector, encoder);
+ return drm_connector_attach_encoder(connector, encoder);
}
EXPORT_SYMBOL(drm_simple_display_pipe_init);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index d4f4ce484529..adb3cb27d31e 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -207,7 +207,6 @@ static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
.get_driver_name = drm_syncobj_null_fence_get_name,
.get_timeline_name = drm_syncobj_null_fence_get_name,
.enable_signaling = drm_syncobj_null_fence_enable_signaling,
- .wait = dma_fence_default_wait,
.release = NULL,
};
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 2660543ad86a..c3301046dfaa 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -100,7 +100,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
* map, get the page, increment the use count and return it.
*/
#if IS_ENABLED(CONFIG_AGP)
-static int drm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
@@ -173,7 +173,7 @@ vm_fault_error:
return VM_FAULT_SIGBUS; /* Disallow mremap */
}
#else
-static int drm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
@@ -189,7 +189,7 @@ static int drm_vm_fault(struct vm_fault *vmf)
* Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
-static int drm_vm_shm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
@@ -291,7 +291,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
*
* Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/
-static int drm_vm_dma_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
@@ -326,7 +326,7 @@ static int drm_vm_dma_fault(struct vm_fault *vmf)
*
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/
-static int drm_vm_sg_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 23c749c05b5a..a6b2fe36b025 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* Copyright (c) 2012 David Airlie <airlied@linux.ie>
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
new file mode 100644
index 000000000000..c20e6fe00cb3
--- /dev/null
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_property.h>
+#include <drm/drm_writeback.h>
+#include <drm/drmP.h>
+#include <linux/dma-fence.h>
+
+/**
+ * DOC: overview
+ *
+ * Writeback connectors are used to expose hardware which can write the output
+ * from a CRTC to a memory buffer. They are used and act similarly to other
+ * types of connectors, with some important differences:
+ *
+ * * Writeback connectors don't provide a way to output visually to the user.
+ *
+ * * Writeback connectors are visible to userspace only when the client sets
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS.
+ *
+ * * Writeback connectors don't have EDID.
+ *
+ * A framebuffer may only be attached to a writeback connector when the
+ * connector is attached to a CRTC. The WRITEBACK_FB_ID property which sets the
+ * framebuffer applies only to a single commit (see below). A framebuffer may
+ * not be attached while the CRTC is off.
+ *
+ * Unlike with planes, when a writeback framebuffer is removed by userspace DRM
+ * makes no attempt to remove it from active use by the connector. This is
+ * because no method is provided to abort a writeback operation, and in any
+ * case making a new commit whilst a writeback is ongoing is undefined (see
+ * WRITEBACK_OUT_FENCE_PTR below). As soon as the current writeback is finished,
+ * the framebuffer will automatically no longer be in active use. As it will
+ * also have already been removed from the framebuffer list, there will be no
+ * way for any userspace application to retrieve a reference to it in the
+ * intervening period.
+ *
+ * Writeback connectors have some additional properties, which userspace
+ * can use to query and control them:
+ *
+ * "WRITEBACK_FB_ID":
+ * Write-only object property storing a DRM_MODE_OBJECT_FB: it stores the
+ * framebuffer to be written by the writeback connector. This property is
+ * similar to the FB_ID property on planes, but will always read as zero
+ * and is not preserved across commits.
+ * Userspace must set this property to an output buffer every time it
+ * wishes the buffer to get filled.
+ *
+ * "WRITEBACK_PIXEL_FORMATS":
+ * Immutable blob property to store the supported pixel formats table. The
+ * data is an array of u32 DRM_FORMAT_* fourcc values.
+ * Userspace can use this blob to find out what pixel formats are supported
+ * by the connector's writeback engine.
+ *
+ * "WRITEBACK_OUT_FENCE_PTR":
+ * Userspace can use this property to provide a pointer for the kernel to
+ * fill with a sync_file file descriptor, which will signal once the
+ * writeback is finished. The value should be the address of a 32-bit
+ * signed integer, cast to a u64.
+ * Userspace should wait for this fence to signal before making another
+ * commit affecting any of the same CRTCs, Planes or Connectors.
+ * **Failure to do so will result in undefined behaviour.**
+ * For this reason it is strongly recommended that all userspace
+ * applications making use of writeback connectors *always* retrieve an
+ * out-fence for the commit and use it appropriately.
+ * From userspace, this property will always read as zero.
+ */
+
+#define fence_to_wb_connector(x) container_of(x->lock, \
+ struct drm_writeback_connector, \
+ fence_lock)
+
+static const char *drm_writeback_fence_get_driver_name(struct dma_fence *fence)
+{
+ struct drm_writeback_connector *wb_connector =
+ fence_to_wb_connector(fence);
+
+ return wb_connector->base.dev->driver->name;
+}
+
+static const char *
+drm_writeback_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct drm_writeback_connector *wb_connector =
+ fence_to_wb_connector(fence);
+
+ return wb_connector->timeline_name;
+}
+
+static bool drm_writeback_fence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static const struct dma_fence_ops drm_writeback_fence_ops = {
+ .get_driver_name = drm_writeback_fence_get_driver_name,
+ .get_timeline_name = drm_writeback_fence_get_timeline_name,
+ .enable_signaling = drm_writeback_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+};
+
+static int create_writeback_properties(struct drm_device *dev)
+{
+ struct drm_property *prop;
+
+ if (!dev->mode_config.writeback_fb_id_property) {
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "WRITEBACK_FB_ID",
+ DRM_MODE_OBJECT_FB);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.writeback_fb_id_property = prop;
+ }
+
+ if (!dev->mode_config.writeback_pixel_formats_property) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_ATOMIC |
+ DRM_MODE_PROP_IMMUTABLE,
+ "WRITEBACK_PIXEL_FORMATS", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.writeback_pixel_formats_property = prop;
+ }
+
+ if (!dev->mode_config.writeback_out_fence_ptr_property) {
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "WRITEBACK_OUT_FENCE_PTR", 0,
+ U64_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.writeback_out_fence_ptr_property = prop;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs drm_writeback_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+/**
+ * drm_writeback_connector_init - Initialize a writeback connector and its properties
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @con_funcs: Connector funcs vtable
+ * @enc_helper_funcs: Encoder helper funcs vtable to be used by the internal encoder
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values. It will also create an internal encoder associated with the
+ * drm_writeback_connector and set it to use the @enc_helper_funcs vtable for
+ * the encoder helper.
+ *
+ * Drivers should always use this function instead of drm_connector_init() to
+ * set up writeback connectors.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ const struct drm_encoder_helper_funcs *enc_helper_funcs,
+ const u32 *formats, int n_formats)
+{
+ struct drm_property_blob *blob;
+ struct drm_connector *connector = &wb_connector->base;
+ struct drm_mode_config *config = &dev->mode_config;
+ int ret = create_writeback_properties(dev);
+
+ if (ret != 0)
+ return ret;
+
+ blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
+ formats);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
+
+ drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
+ ret = drm_encoder_init(dev, &wb_connector->encoder,
+ &drm_writeback_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret)
+ goto fail;
+
+ connector->interlace_allowed = 0;
+
+ ret = drm_connector_init(dev, connector, con_funcs,
+ DRM_MODE_CONNECTOR_WRITEBACK);
+ if (ret)
+ goto connector_fail;
+
+ ret = drm_connector_attach_encoder(connector,
+ &wb_connector->encoder);
+ if (ret)
+ goto attach_fail;
+
+ INIT_LIST_HEAD(&wb_connector->job_queue);
+ spin_lock_init(&wb_connector->job_lock);
+
+ wb_connector->fence_context = dma_fence_context_alloc(1);
+ spin_lock_init(&wb_connector->fence_lock);
+ snprintf(wb_connector->timeline_name,
+ sizeof(wb_connector->timeline_name),
+ "CONNECTOR:%d-%s", connector->base.id, connector->name);
+
+ drm_object_attach_property(&connector->base,
+ config->writeback_out_fence_ptr_property, 0);
+
+ drm_object_attach_property(&connector->base,
+ config->writeback_fb_id_property, 0);
+
+ drm_object_attach_property(&connector->base,
+ config->writeback_pixel_formats_property,
+ blob->base.id);
+ wb_connector->pixel_formats_blob_ptr = blob;
+
+ return 0;
+
+attach_fail:
+ drm_connector_cleanup(connector);
+connector_fail:
+ drm_encoder_cleanup(&wb_connector->encoder);
+fail:
+ drm_property_blob_put(blob);
+ return ret;
+}
+EXPORT_SYMBOL(drm_writeback_connector_init);
+
+/**
+ * drm_writeback_queue_job - Queue a writeback job for later signalling
+ * @wb_connector: The writeback connector to queue a job on
+ * @job: The job to queue
+ *
+ * This function adds a job to the job_queue for a writeback connector. It
+ * should be considered to take ownership of the writeback job, and so any other
+ * references to the job must be cleared after calling this function.
+ *
+ * Drivers must ensure that for a given writeback connector, jobs are queued in
+ * exactly the same order as they will be completed by the hardware (and
+ * signaled via drm_writeback_signal_completion).
+ *
+ * For every call to drm_writeback_queue_job() there must be exactly one call to
+ * drm_writeback_signal_completion()
+ *
+ * See also: drm_writeback_signal_completion()
+ */
+void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
+ struct drm_writeback_job *job)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wb_connector->job_lock, flags);
+ list_add_tail(&job->list_entry, &wb_connector->job_queue);
+ spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+}
+EXPORT_SYMBOL(drm_writeback_queue_job);
+
+/*
+ * @cleanup_work: deferred cleanup of a writeback job
+ *
+ * The job cannot be cleaned up directly in drm_writeback_signal_completion,
+ * because it may be called in interrupt context. Dropping the framebuffer
+ * reference can sleep, and so the cleanup is deferred to a workqueue.
+ */
+static void cleanup_work(struct work_struct *work)
+{
+ struct drm_writeback_job *job = container_of(work,
+ struct drm_writeback_job,
+ cleanup_work);
+ drm_framebuffer_put(job->fb);
+ kfree(job);
+}
+
+
+/**
+ * drm_writeback_signal_completion - Signal the completion of a writeback job
+ * @wb_connector: The writeback connector whose job is complete
+ * @status: Status code to set in the writeback out_fence (0 for success)
+ *
+ * Drivers should call this to signal the completion of a previously queued
+ * writeback job. It should be called as soon as possible after the hardware
+ * has finished writing, and may be called from interrupt context.
+ * It is the driver's responsibility to ensure that for a given connector, the
+ * hardware completes writeback jobs in the same order as they are queued.
+ *
+ * Unless the driver is holding its own reference to the framebuffer, it must
+ * not be accessed after calling this function.
+ *
+ * See also: drm_writeback_queue_job()
+ */
+void
+drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
+ int status)
+{
+ unsigned long flags;
+ struct drm_writeback_job *job;
+
+ spin_lock_irqsave(&wb_connector->job_lock, flags);
+ job = list_first_entry_or_null(&wb_connector->job_queue,
+ struct drm_writeback_job,
+ list_entry);
+ if (job) {
+ list_del(&job->list_entry);
+ if (job->out_fence) {
+ if (status)
+ dma_fence_set_error(job->out_fence, status);
+ dma_fence_signal(job->out_fence);
+ dma_fence_put(job->out_fence);
+ }
+ }
+ spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+
+ if (WARN_ON(!job))
+ return;
+
+ INIT_WORK(&job->cleanup_work, cleanup_work);
+ queue_work(system_long_wq, &job->cleanup_work);
+}
+EXPORT_SYMBOL(drm_writeback_signal_completion);
+
+struct dma_fence *
+drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector)
+{
+ struct dma_fence *fence;
+
+ if (WARN_ON(wb_connector->base.connector_type !=
+ DRM_MODE_CONNECTOR_WRITEBACK))
+ return NULL;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ dma_fence_init(fence, &drm_writeback_fence_ops,
+ &wb_connector->fence_lock, wb_connector->fence_context,
+ ++wb_connector->fence_seqno);
+
+ return fence;
+}
+EXPORT_SYMBOL(drm_writeback_get_out_fence);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index e5013a999147..9b2720b41571 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
+ struct drm_sched_rq *rq;
if (gpu) {
- drm_sched_entity_init(&gpu->sched,
- &ctx->sched_entity[i],
- &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
- NULL);
+ rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ drm_sched_entity_init(&ctx->sched_entity[i],
+ &rq, 1, NULL);
}
}
@@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
gpu->lastctx = NULL;
mutex_unlock(&gpu->lock);
- drm_sched_entity_fini(&gpu->sched,
- &ctx->sched_entity[i]);
+ drm_sched_entity_destroy(&ctx->sched_entity[i]);
}
}
@@ -631,8 +630,11 @@ static struct platform_driver etnaviv_platform_driver = {
},
};
+static struct platform_device *etnaviv_drm;
+
static int __init etnaviv_init(void)
{
+ struct platform_device *pdev;
int ret;
struct device_node *np;
@@ -644,7 +646,7 @@ static int __init etnaviv_init(void)
ret = platform_driver_register(&etnaviv_platform_driver);
if (ret != 0)
- platform_driver_unregister(&etnaviv_gpu_driver);
+ goto unregister_gpu_driver;
/*
* If the DT contains at least one available GPU device, instantiate
@@ -653,20 +655,33 @@ static int __init etnaviv_init(void)
for_each_compatible_node(np, NULL, "vivante,gc") {
if (!of_device_is_available(np))
continue;
-
- platform_device_register_simple("etnaviv", -1, NULL, 0);
+ pdev = platform_device_register_simple("etnaviv", -1,
+ NULL, 0);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ of_node_put(np);
+ goto unregister_platform_driver;
+ }
+ etnaviv_drm = pdev;
of_node_put(np);
break;
}
+ return 0;
+
+unregister_platform_driver:
+ platform_driver_unregister(&etnaviv_platform_driver);
+unregister_gpu_driver:
+ platform_driver_unregister(&etnaviv_gpu_driver);
return ret;
}
module_init(etnaviv_init);
static void __exit etnaviv_exit(void)
{
- platform_driver_unregister(&etnaviv_gpu_driver);
+ platform_device_unregister(etnaviv_drm);
platform_driver_unregister(&etnaviv_platform_driver);
+ platform_driver_unregister(&etnaviv_gpu_driver);
}
module_exit(etnaviv_exit);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index d36c7bbe66db..8d02d1b7dcf5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -18,6 +18,7 @@
#include <linux/time64.h>
#include <linux/types.h>
#include <linux/sizes.h>
+#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -53,7 +54,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int etnaviv_gem_fault(struct vm_fault *vmf);
+vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 209ef1274b80..1fa74226db91 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -169,31 +169,30 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return obj->ops->mmap(obj, vma);
}
-int etnaviv_gem_fault(struct vm_fault *vmf)
+vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct page **pages, *page;
pgoff_t pgoff;
- int ret;
+ int err;
/*
* Make sure we don't parallel update on a fault, nor move or remove
- * something from beneath our feet. Note that vm_insert_page() is
+ * something from beneath our feet. Note that vmf_insert_page() is
* specifically coded to take care of this, so we don't have to.
*/
- ret = mutex_lock_interruptible(&etnaviv_obj->lock);
- if (ret)
- goto out;
-
+ err = mutex_lock_interruptible(&etnaviv_obj->lock);
+ if (err)
+ return VM_FAULT_NOPAGE;
/* make sure we have pages attached now */
pages = etnaviv_gem_get_pages(etnaviv_obj);
mutex_unlock(&etnaviv_obj->lock);
if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto out;
+ err = PTR_ERR(pages);
+ return vmf_error(err);
}
/* We don't use vmf->pgoff since that has the fake offset: */
@@ -204,25 +203,7 @@ int etnaviv_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
- ret = vm_insert_page(vma, vmf->address, page);
-
-out:
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return vmf_insert_page(vma, vmf->address, page);
}
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 46ecd3e66ac9..983e67f19e45 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -388,9 +388,9 @@ static void submit_cleanup(struct kref *kref)
dma_fence_put(submit->in_fence);
if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */
- mutex_lock(&submit->gpu->fence_idr_lock);
+ mutex_lock(&submit->gpu->fence_lock);
idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
- mutex_unlock(&submit->gpu->fence_idr_lock);
+ mutex_unlock(&submit->gpu->fence_lock);
dma_fence_put(submit->out_fence);
}
kfree(submit->pmrs);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 686f6552db48..f225fbc6edd2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -799,6 +799,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
free_buffer:
etnaviv_cmdbuf_free(&gpu->buffer);
+ gpu->buffer.suballoc = NULL;
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
@@ -1027,11 +1028,6 @@ static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
return dev_name(f->gpu->dev);
}
-static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static bool etnaviv_fence_signaled(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
@@ -1049,9 +1045,7 @@ static void etnaviv_fence_release(struct dma_fence *fence)
static const struct dma_fence_ops etnaviv_fence_ops = {
.get_driver_name = etnaviv_fence_get_driver_name,
.get_timeline_name = etnaviv_fence_get_timeline_name,
- .enable_signaling = etnaviv_fence_enable_signaling,
.signaled = etnaviv_fence_signaled,
- .wait = dma_fence_default_wait,
.release = etnaviv_fence_release,
};
@@ -1733,7 +1727,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
- mutex_init(&gpu->fence_idr_lock);
+ mutex_init(&gpu->fence_lock);
/* Map registers: */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index dd430f0f8ff5..9a75a6937268 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -118,7 +118,7 @@ struct etnaviv_gpu {
u32 idle_mask;
/* Fencing support */
- struct mutex fence_idr_lock;
+ struct mutex fence_lock;
struct idr fence_idr;
u32 next_fence;
u32 active_fence;
@@ -131,6 +131,9 @@ struct etnaviv_gpu {
struct work_struct sync_point_work;
int sync_point_event;
+ /* hang detection */
+ u32 hangcheck_dma_addr;
+
void __iomem *mmio;
int irq;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 71fbc1f96cb6..f1c88d8ad5ba 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -119,8 +119,7 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
{
- u32 *p;
- int ret, i;
+ int ret;
/* allocate scratch page */
etnaviv_domain->base.bad_page_cpu =
@@ -131,9 +130,9 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
ret = -ENOMEM;
goto fail_mem;
}
- p = etnaviv_domain->base.bad_page_cpu;
- for (i = 0; i < SZ_4K / 4; i++)
- *p++ = 0xdead55aa;
+
+ memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
+ SZ_4K / sizeof(u32));
etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
SZ_4K, &etnaviv_domain->pta_dma,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index a74eb57af15b..69e9b431bf1f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -10,6 +10,7 @@
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_sched.h"
+#include "state.xml.h"
static int etnaviv_job_hang_limit = 0;
module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
@@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct etnaviv_gpu *gpu = submit->gpu;
+ u32 dma_addr;
+ int change;
+
+ /*
+ * If the GPU managed to complete this jobs fence, the timout is
+ * spurious. Bail out.
+ */
+ if (fence_completed(gpu, submit->out_fence->seqno))
+ return;
+
+ /*
+ * If the GPU is still making forward progress on the front-end (which
+ * should never loop) we shift out the timeout to give it a chance to
+ * finish the job.
+ */
+ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+ change = dma_addr - gpu->hangcheck_dma_addr;
+ if (change < 0 || change > 16) {
+ gpu->hangcheck_dma_addr = dma_addr;
+ schedule_delayed_work(&sched_job->work_tdr,
+ sched_job->sched->timeout);
+ return;
+ }
/* block scheduler */
kthread_park(gpu->sched.thread);
@@ -116,28 +140,38 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
struct etnaviv_gem_submit *submit)
{
- int ret;
+ int ret = 0;
- ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
- sched_entity, submit->cmdbuf.ctx);
+ /*
+ * Hold the fence lock across the whole operation to avoid jobs being
+ * pushed out of order with regard to their sched fence seqnos as
+ * allocated in drm_sched_job_init.
+ */
+ mutex_lock(&submit->gpu->fence_lock);
+
+ ret = drm_sched_job_init(&submit->sched_job, sched_entity,
+ submit->cmdbuf.ctx);
if (ret)
- return ret;
+ goto out_unlock;
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
- mutex_lock(&submit->gpu->fence_idr_lock);
submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
submit->out_fence, 0,
INT_MAX, GFP_KERNEL);
- mutex_unlock(&submit->gpu->fence_idr_lock);
- if (submit->out_fence_id < 0)
- return -ENOMEM;
+ if (submit->out_fence_id < 0) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);
drm_sched_entity_push_job(&submit->sched_job, sched_entity);
- return 0;
+out_unlock:
+ mutex_unlock(&submit->gpu->fence_lock);
+
+ return ret;
}
int etnaviv_sched_init(struct etnaviv_gpu *gpu)
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 3b323f1e0475..2ad146bbf4f5 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -4,7 +4,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
- exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
+ exynos_drm_gem.o exynos_drm_plane.o
exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 82c95c34447f..94529aa82339 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
unsigned long val;
val = readl(ctx->addr + DECON_WINCONx(win));
- val &= ~WINCONx_BPPMODE_MASK;
+ val &= WINCONx_ENWIN_F;
switch (fb->format->format) {
case DRM_FORMAT_XRGB1555:
@@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->addr + DECON_VIDOSDxB(win));
}
- val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
- VIDOSD_Wx_ALPHA_B_F(0x0);
+ val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
+ VIDOSD_Wx_ALPHA_B_F(0xff);
writel(val, ctx->addr + DECON_VIDOSDxC(win));
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -673,6 +673,8 @@ err:
static const struct dev_pm_ops exynos5433_decon_pm_ops = {
SET_RUNTIME_PM_OPS(exynos5433_decon_suspend, exynos5433_decon_resume,
NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 3931d5e33fe0..88cbd000eb09 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -832,6 +832,8 @@ static int exynos7_decon_resume(struct device *dev)
static const struct dev_pm_ops exynos7_decon_pm_ops = {
SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume,
NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver decon_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 86330f396784..c8449ae4f4fe 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/of_graph.h>
#include <linux/component.h>
+#include <linux/pm_runtime.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
@@ -232,9 +233,11 @@ static int exynos_dp_probe(struct platform_device *pdev)
np = of_parse_phandle(dev->of_node, "panel", 0);
if (np) {
dp->plat_data.panel = of_drm_find_panel(np);
+
of_node_put(np);
- if (!dp->plat_data.panel)
- return -EPROBE_DEFER;
+ if (IS_ERR(dp->plat_data.panel))
+ return PTR_ERR(dp->plat_data.panel);
+
goto out;
}
@@ -276,6 +279,8 @@ static int exynos_dp_resume(struct device *dev)
static const struct dev_pm_ops exynos_dp_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id exynos_dp_match[] = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
deleted file mode 100644
index b0c0621fcdf7..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/* exynos_drm_core.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author:
- * Inki Dae <inki.dae@samsung.com>
- * Joonyoung Shim <jy0922.shim@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_crtc.h"
-
-static LIST_HEAD(exynos_drm_subdrv_list);
-
-int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
-{
- if (!subdrv)
- return -EINVAL;
-
- list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
-
- return 0;
-}
-
-int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
-{
- if (!subdrv)
- return -EINVAL;
-
- list_del(&subdrv->list);
-
- return 0;
-}
-
-int exynos_drm_device_subdrv_probe(struct drm_device *dev)
-{
- struct exynos_drm_subdrv *subdrv, *n;
- int err;
-
- if (!dev)
- return -EINVAL;
-
- list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
- if (subdrv->probe) {
- subdrv->drm_dev = dev;
-
- /*
- * this probe callback would be called by sub driver
- * after setting of all resources to this sub driver,
- * such as clock, irq and register map are done.
- */
- err = subdrv->probe(dev, subdrv->dev);
- if (err) {
- DRM_DEBUG("exynos drm subdrv probe failed.\n");
- list_del(&subdrv->list);
- continue;
- }
- }
- }
-
- return 0;
-}
-
-int exynos_drm_device_subdrv_remove(struct drm_device *dev)
-{
- struct exynos_drm_subdrv *subdrv;
-
- if (!dev) {
- WARN(1, "Unexpected drm device unregister!\n");
- return -EINVAL;
- }
-
- list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->remove)
- subdrv->remove(dev, subdrv->dev);
- }
-
- return 0;
-}
-
-int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
-{
- struct exynos_drm_subdrv *subdrv;
- int ret;
-
- list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->open) {
- ret = subdrv->open(dev, subdrv->dev, file);
- if (ret)
- goto err;
- }
- }
-
- return 0;
-
-err:
- list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->close)
- subdrv->close(dev, subdrv->dev, file);
- }
- return ret;
-}
-
-void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
-{
- struct exynos_drm_subdrv *subdrv;
-
- list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->close)
- subdrv->close(dev, subdrv->dev, file);
- }
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 66945e0dc57f..2f0babb67c51 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -113,7 +113,7 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder)
}
drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -240,8 +240,8 @@ struct drm_encoder *exynos_dpi_probe(struct device *dev)
if (ctx->panel_node) {
ctx->panel = of_drm_find_panel(ctx->panel_node);
- if (!ctx->panel)
- return ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ctx->panel))
+ return ERR_CAST(ctx->panel);
}
return &ctx->encoder;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index a81b4a5e24a7..b599f74692e5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -55,8 +55,7 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
return -ENOMEM;
file->driver_priv = file_priv;
-
- ret = exynos_drm_subdrv_open(dev, file);
+ ret = g2d_open(dev, file);
if (ret)
goto err_file_priv_free;
@@ -70,7 +69,7 @@ err_file_priv_free:
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
- exynos_drm_subdrv_close(dev, file);
+ g2d_close(dev, file);
kfree(file->driver_priv);
file->driver_priv = NULL;
}
@@ -147,13 +146,12 @@ static struct drm_driver exynos_drm_driver = {
.minor = DRIVER_MINOR,
};
-#ifdef CONFIG_PM_SLEEP
static int exynos_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct exynos_drm_private *private;
- if (pm_runtime_suspended(dev) || !drm_dev)
+ if (!drm_dev)
return 0;
private = drm_dev->dev_private;
@@ -170,25 +168,23 @@ static int exynos_drm_suspend(struct device *dev)
return 0;
}
-static int exynos_drm_resume(struct device *dev)
+static void exynos_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct exynos_drm_private *private;
- if (pm_runtime_suspended(dev) || !drm_dev)
- return 0;
+ if (!drm_dev)
+ return;
private = drm_dev->dev_private;
drm_atomic_helper_resume(drm_dev, private->suspend_state);
exynos_drm_fbdev_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
-
- return 0;
}
-#endif
static const struct dev_pm_ops exynos_drm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume)
+ .prepare = exynos_drm_suspend,
+ .complete = exynos_drm_resume,
};
/* forward declaration */
@@ -240,6 +236,7 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
}, {
DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
DRM_COMPONENT_DRIVER | DRM_FIMC_DEVICE,
@@ -376,11 +373,6 @@ static int exynos_drm_bind(struct device *dev)
if (ret)
goto err_unbind_all;
- /* Probe non kms sub drivers and virtual display driver. */
- ret = exynos_drm_device_subdrv_probe(drm);
- if (ret)
- goto err_unbind_all;
-
drm_mode_config_reset(drm);
/*
@@ -411,7 +403,6 @@ err_cleanup_fbdev:
exynos_drm_fbdev_fini(drm);
err_cleanup_poll:
drm_kms_helper_poll_fini(drm);
- exynos_drm_device_subdrv_remove(drm);
err_unbind_all:
component_unbind_all(drm->dev, drm);
err_mode_config_cleanup:
@@ -420,7 +411,7 @@ err_mode_config_cleanup:
err_free_private:
kfree(private);
err_free_drm:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -431,8 +422,6 @@ static void exynos_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
- exynos_drm_device_subdrv_remove(drm);
-
exynos_drm_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
@@ -444,7 +433,7 @@ static void exynos_drm_unbind(struct device *dev)
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops exynos_drm_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 0f6d079a55c9..c737c4bd2c19 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -179,17 +179,13 @@ static inline void exynos_drm_pipe_clk_enable(struct exynos_drm_crtc *crtc,
crtc->pipe_clk->enable(crtc->pipe_clk, enable);
}
-struct exynos_drm_g2d_private {
- struct device *dev;
+struct drm_exynos_file_private {
+ /* for g2d api */
struct list_head inuse_cmdlist;
struct list_head event_list;
struct list_head userptr_list;
};
-struct drm_exynos_file_private {
- struct exynos_drm_g2d_private *g2d_priv;
-};
-
/*
* Exynos drm private structure.
*
@@ -201,6 +197,7 @@ struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
struct drm_atomic_state *suspend_state;
+ struct device *g2d_dev;
struct device *dma_dev;
void *mapping;
@@ -217,44 +214,6 @@ static inline struct device *to_dma_dev(struct drm_device *dev)
return priv->dma_dev;
}
-/*
- * Exynos drm sub driver structure.
- *
- * @list: sub driver has its own list object to register to exynos drm driver.
- * @dev: pointer to device object for subdrv device driver.
- * @drm_dev: pointer to drm_device and this pointer would be set
- * when sub driver calls exynos_drm_subdrv_register().
- * @probe: this callback would be called by exynos drm driver after
- * subdrv is registered to it.
- * @remove: this callback is used to release resources created
- * by probe callback.
- * @open: this would be called with drm device file open.
- * @close: this would be called with drm device file close.
- */
-struct exynos_drm_subdrv {
- struct list_head list;
- struct device *dev;
- struct drm_device *drm_dev;
-
- int (*probe)(struct drm_device *drm_dev, struct device *dev);
- void (*remove)(struct drm_device *drm_dev, struct device *dev);
- int (*open)(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file);
- void (*close)(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file);
-};
-
- /* This function would be called by non kms drivers such as g2d and ipp. */
-int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
-
-/* this function removes subdrv list from exynos drm driver */
-int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
-
-int exynos_drm_device_subdrv_probe(struct drm_device *dev);
-int exynos_drm_device_subdrv_remove(struct drm_device *dev);
-int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
-void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
-
#ifdef CONFIG_DRM_EXYNOS_DPI
struct drm_encoder *exynos_dpi_probe(struct device *dev);
int exynos_dpi_remove(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 6d29777884f9..781b82c2c579 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1479,7 +1479,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
connector->status = connector_status_disconnected;
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -1519,6 +1519,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
dsi->panel = of_drm_find_panel(device->dev.of_node);
+ if (IS_ERR(dsi->panel))
+ dsi->panel = NULL;
+
if (dsi->panel) {
drm_panel_attach(dsi->panel, &dsi->connector);
dsi->connector.status = connector_status_connected;
@@ -1860,6 +1863,8 @@ err_clk:
static const struct dev_pm_ops exynos_dsi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver dsi_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 7fcc1a7ab1a0..9f52382e19ee 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -101,7 +101,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
{
const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
- struct drm_gem_object *obj;
struct drm_framebuffer *fb;
int i;
int ret;
@@ -112,15 +111,14 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
unsigned long size = height * mode_cmd->pitches[i] +
mode_cmd->offsets[i];
- obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
- if (!obj) {
+ exynos_gem[i] = exynos_drm_gem_get(file_priv,
+ mode_cmd->handles[i]);
+ if (!exynos_gem[i]) {
DRM_ERROR("failed to lookup gem object\n");
ret = -ENOENT;
goto err;
}
- exynos_gem[i] = to_exynos_gem(obj);
-
if (size > exynos_gem[i]->size) {
i++;
ret = -EINVAL;
@@ -138,7 +136,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
err:
while (i--)
- drm_gem_object_unreference_unlocked(&exynos_gem[i]->base);
+ exynos_drm_gem_put(exynos_gem[i]);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 6127ef25acd6..e8d0670bb5f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
static void fimc_set_window(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
+ unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, h1, h2, v1, v2;
/* cropped image */
h1 = buf->rect.x;
- h2 = buf->buf.width - buf->rect.w - buf->rect.x;
+ h2 = real_width - buf->rect.w - buf->rect.x;
v1 = buf->rect.y;
v2 = buf->buf.height - buf->rect.h - buf->rect.y;
DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
- buf->buf.width, buf->buf.height);
+ real_width, buf->buf.height);
DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
/*
@@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx,
static void fimc_src_set_size(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
+ unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+ DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
/* original size */
- cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) |
+ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
EXYNOS_ORGISIZE_VERTICAL(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
@@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx,
* for now, we support only ITU601 8 bit mode
*/
cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
- EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) |
+ EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) |
EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
@@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
static void fimc_dst_set_size(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
+ unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, cfg_ext;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+ DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
/* original size */
- cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) |
+ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 01b1570d0c3a..b7f56935a46b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1192,6 +1192,8 @@ static int exynos_fimd_resume(struct device *dev)
static const struct dev_pm_ops exynos_fimd_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_fimd_suspend, exynos_fimd_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver fimd_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f68ef1b3a28c..f2481a2014bb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -190,7 +191,7 @@ struct g2d_buf_desc {
struct g2d_buf_info {
unsigned int map_nr;
enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
- unsigned long handles[MAX_REG_TYPE_NR];
+ void *obj[MAX_REG_TYPE_NR];
unsigned int types[MAX_REG_TYPE_NR];
struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
};
@@ -237,7 +238,7 @@ struct g2d_data {
int irq;
struct workqueue_struct *g2d_workq;
struct work_struct runqueue_work;
- struct exynos_drm_subdrv subdrv;
+ struct drm_device *drm_dev;
unsigned long flags;
/* cmdlist */
@@ -268,14 +269,13 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
struct g2d_buf_info *buf_info;
g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
- g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
+ g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(g2d->drm_dev),
G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL,
g2d->cmdlist_dma_attrs);
@@ -308,7 +308,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err:
- dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
+ dma_free_attrs(to_dma_dev(g2d->drm_dev), G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
return ret;
@@ -316,12 +316,10 @@ err:
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
-
kfree(g2d->cmdlist_node);
if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
- dma_free_attrs(to_dma_dev(subdrv->drm_dev),
+ dma_free_attrs(to_dma_dev(g2d->drm_dev),
G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
@@ -355,32 +353,31 @@ static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
mutex_unlock(&g2d->cmdlist_mutex);
}
-static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
+static void g2d_add_cmdlist_to_inuse(struct drm_exynos_file_private *file_priv,
struct g2d_cmdlist_node *node)
{
struct g2d_cmdlist_node *lnode;
- if (list_empty(&g2d_priv->inuse_cmdlist))
+ if (list_empty(&file_priv->inuse_cmdlist))
goto add_to_list;
/* this links to base address of new cmdlist */
- lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
+ lnode = list_entry(file_priv->inuse_cmdlist.prev,
struct g2d_cmdlist_node, list);
lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
add_to_list:
- list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
+ list_add_tail(&node->list, &file_priv->inuse_cmdlist);
if (node->event)
- list_add_tail(&node->event->base.link, &g2d_priv->event_list);
+ list_add_tail(&node->event->base.link, &file_priv->event_list);
}
-static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
- unsigned long obj,
+static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
+ void *obj,
bool force)
{
- struct g2d_cmdlist_userptr *g2d_userptr =
- (struct g2d_cmdlist_userptr *)obj;
+ struct g2d_cmdlist_userptr *g2d_userptr = obj;
struct page **pages;
if (!obj)
@@ -398,7 +395,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
return;
out:
- dma_unmap_sg(to_dma_dev(drm_dev), g2d_userptr->sgt->sgl,
+ dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl,
g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
pages = frame_vector_pages(g2d_userptr->vec);
@@ -419,16 +416,14 @@ out:
kfree(g2d_userptr);
}
-static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
unsigned long userptr,
unsigned long size,
struct drm_file *filp,
- unsigned long *obj)
+ void **obj)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr;
- struct g2d_data *g2d;
struct sg_table *sgt;
unsigned long start, end;
unsigned int npages, offset;
@@ -439,10 +434,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
return ERR_PTR(-EINVAL);
}
- g2d = dev_get_drvdata(g2d_priv->dev);
-
/* check if userptr already exists in userptr_list. */
- list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+ list_for_each_entry(g2d_userptr, &file_priv->userptr_list, list) {
if (g2d_userptr->userptr == userptr) {
/*
* also check size because there could be same address
@@ -450,7 +443,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
*/
if (g2d_userptr->size == size) {
atomic_inc(&g2d_userptr->refcount);
- *obj = (unsigned long)g2d_userptr;
+ *obj = g2d_userptr;
return &g2d_userptr->dma_addr;
}
@@ -517,7 +510,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->sgt = sgt;
- if (!dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents,
+ if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
DMA_BIDIRECTIONAL)) {
DRM_ERROR("failed to map sgt with dma region.\n");
ret = -ENOMEM;
@@ -527,14 +520,14 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
g2d_userptr->userptr = userptr;
- list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+ list_add_tail(&g2d_userptr->list, &file_priv->userptr_list);
if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
g2d->current_pool += npages << PAGE_SHIFT;
g2d_userptr->in_pool = true;
}
- *obj = (unsigned long)g2d_userptr;
+ *obj = g2d_userptr;
return &g2d_userptr->dma_addr;
@@ -556,19 +549,14 @@ err_free:
return ERR_PTR(ret);
}
-static void g2d_userptr_free_all(struct drm_device *drm_dev,
- struct g2d_data *g2d,
- struct drm_file *filp)
+static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr, *n;
- list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+ list_for_each_entry_safe(g2d_userptr, n, &file_priv->userptr_list, list)
if (g2d_userptr->in_pool)
- g2d_userptr_put_dma_addr(drm_dev,
- (unsigned long)g2d_userptr,
- true);
+ g2d_userptr_put_dma_addr(g2d, g2d_userptr, true);
g2d->current_pool = 0;
}
@@ -723,26 +711,23 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
buf_desc = &buf_info->descs[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
- unsigned long size;
+ struct exynos_drm_gem *exynos_gem;
- size = exynos_drm_gem_get_size(drm_dev, handle, file);
- if (!size) {
+ exynos_gem = exynos_drm_gem_get(file, handle);
+ if (!exynos_gem) {
ret = -EFAULT;
goto err;
}
- if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
- size)) {
+ if (!g2d_check_buf_desc_is_valid(buf_desc,
+ reg_type, exynos_gem->size)) {
+ exynos_drm_gem_put(exynos_gem);
ret = -EFAULT;
goto err;
}
- addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
- file);
- if (IS_ERR(addr)) {
- ret = -EFAULT;
- goto err;
- }
+ addr = &exynos_gem->dma_addr;
+ buf_info->obj[reg_type] = exynos_gem;
} else {
struct drm_exynos_g2d_userptr g2d_userptr;
@@ -758,11 +743,11 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
goto err;
}
- addr = g2d_userptr_get_dma_addr(drm_dev,
+ addr = g2d_userptr_get_dma_addr(g2d,
g2d_userptr.userptr,
g2d_userptr.size,
file,
- &handle);
+ &buf_info->obj[reg_type]);
if (IS_ERR(addr)) {
ret = -EFAULT;
goto err;
@@ -771,7 +756,6 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
cmdlist->data[reg_pos + 1] = *addr;
buf_info->reg_types[i] = reg_type;
- buf_info->handles[reg_type] = handle;
}
return 0;
@@ -785,29 +769,26 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_file *filp)
{
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
struct g2d_buf_info *buf_info = &node->buf_info;
int i;
for (i = 0; i < buf_info->map_nr; i++) {
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
- unsigned long handle;
+ void *obj;
reg_type = buf_info->reg_types[i];
buf_desc = &buf_info->descs[reg_type];
- handle = buf_info->handles[reg_type];
+ obj = buf_info->obj[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM)
- exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
- filp);
+ exynos_drm_gem_put(obj);
else
- g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
- false);
+ g2d_userptr_put_dma_addr(g2d, obj, false);
buf_info->reg_types[i] = REG_TYPE_NONE;
- buf_info->handles[reg_type] = 0;
+ buf_info->obj[reg_type] = NULL;
buf_info->types[reg_type] = 0;
memset(buf_desc, 0x00, sizeof(*buf_desc));
}
@@ -922,7 +903,7 @@ static void g2d_runqueue_worker(struct work_struct *work)
static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
{
- struct drm_device *drm_dev = g2d->subdrv.drm_dev;
+ struct drm_device *drm_dev = g2d->drm_dev;
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e;
struct timespec64 now;
@@ -1031,7 +1012,7 @@ out:
mutex_unlock(&g2d->runqueue_mutex);
}
-static int g2d_check_reg_offset(struct device *dev,
+static int g2d_check_reg_offset(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
int nr, bool for_addr)
{
@@ -1131,7 +1112,7 @@ static int g2d_check_reg_offset(struct device *dev,
return 0;
err:
- dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
+ dev_err(g2d->dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL;
}
@@ -1139,23 +1120,8 @@ err:
int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev;
- struct g2d_data *g2d;
struct drm_exynos_g2d_get_ver *ver = data;
- if (!g2d_priv)
- return -ENODEV;
-
- dev = g2d_priv->dev;
- if (!dev)
- return -ENODEV;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
ver->major = G2D_HW_MAJOR_VER;
ver->minor = G2D_HW_MINOR_VER;
@@ -1166,9 +1132,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev;
- struct g2d_data *g2d;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
struct drm_exynos_g2d_set_cmdlist *req = data;
struct drm_exynos_g2d_cmd *cmd;
struct drm_exynos_pending_g2d_event *e;
@@ -1177,17 +1142,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
int size;
int ret;
- if (!g2d_priv)
- return -ENODEV;
-
- dev = g2d_priv->dev;
- if (!dev)
- return -ENODEV;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
node = g2d_get_cmdlist(g2d);
if (!node)
return -ENOMEM;
@@ -1199,7 +1153,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
*/
if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
- dev_err(dev, "number of submitted G2D commands exceeds limit\n");
+ dev_err(g2d->dev, "number of submitted G2D commands exceeds limit\n");
return -EINVAL;
}
@@ -1267,7 +1221,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
*/
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
- dev_err(dev, "cmdlist size is too big\n");
+ dev_err(g2d->dev, "cmdlist size is too big\n");
ret = -EINVAL;
goto err_free_event;
}
@@ -1282,7 +1236,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_nr * 2;
- ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
+ ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
@@ -1301,7 +1255,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_buf_nr * 2;
- ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
+ ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true);
if (ret < 0)
goto err_free_event;
@@ -1319,7 +1273,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
/* tail */
cmdlist->data[cmdlist->last] = 0;
- g2d_add_cmdlist_to_inuse(g2d_priv, node);
+ g2d_add_cmdlist_to_inuse(file_priv, node);
return 0;
@@ -1337,25 +1291,13 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev;
- struct g2d_data *g2d;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
struct drm_exynos_g2d_exec *req = data;
struct g2d_runqueue_node *runqueue_node;
struct list_head *run_cmdlist;
struct list_head *event_list;
- if (!g2d_priv)
- return -ENODEV;
-
- dev = g2d_priv->dev;
- if (!dev)
- return -ENODEV;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
if (!runqueue_node)
return -ENOMEM;
@@ -1367,11 +1309,11 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
init_completion(&runqueue_node->complete);
runqueue_node->async = req->async;
- list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
- list_splice_init(&g2d_priv->event_list, event_list);
+ list_splice_init(&file_priv->inuse_cmdlist, run_cmdlist);
+ list_splice_init(&file_priv->event_list, event_list);
if (list_empty(run_cmdlist)) {
- dev_err(dev, "there is no inuse cmdlist\n");
+ dev_err(g2d->dev, "there is no inuse cmdlist\n");
kmem_cache_free(g2d->runqueue_slab, runqueue_node);
return -EPERM;
}
@@ -1395,71 +1337,28 @@ out:
return 0;
}
-static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
-{
- struct g2d_data *g2d;
- int ret;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
- /* allocate dma-aware cmdlist buffer. */
- ret = g2d_init_cmdlist(g2d);
- if (ret < 0) {
- dev_err(dev, "cmdlist init failed\n");
- return ret;
- }
-
- ret = drm_iommu_attach_device(drm_dev, dev);
- if (ret < 0) {
- dev_err(dev, "failed to enable iommu.\n");
- g2d_fini_cmdlist(g2d);
- }
-
- return ret;
-
-}
-
-static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
-{
- drm_iommu_detach_device(drm_dev, dev);
-}
-
-static int g2d_open(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
+int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv;
-
- g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
- if (!g2d_priv)
- return -ENOMEM;
- g2d_priv->dev = dev;
- file_priv->g2d_priv = g2d_priv;
-
- INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
- INIT_LIST_HEAD(&g2d_priv->event_list);
- INIT_LIST_HEAD(&g2d_priv->userptr_list);
+ INIT_LIST_HEAD(&file_priv->inuse_cmdlist);
+ INIT_LIST_HEAD(&file_priv->event_list);
+ INIT_LIST_HEAD(&file_priv->userptr_list);
return 0;
}
-static void g2d_close(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
+void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
struct g2d_data *g2d;
struct g2d_cmdlist_node *node, *n;
- if (!dev)
+ if (!priv->g2d_dev)
return;
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return;
+ g2d = dev_get_drvdata(priv->g2d_dev);
/* Remove the runqueue nodes that belong to us. */
mutex_lock(&g2d->runqueue_mutex);
@@ -1480,24 +1379,70 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
* Properly unmap these buffers here.
*/
mutex_lock(&g2d->cmdlist_mutex);
- list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+ list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) {
g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
}
mutex_unlock(&g2d->cmdlist_mutex);
/* release all g2d_userptr in pool. */
- g2d_userptr_free_all(drm_dev, g2d, file);
+ g2d_userptr_free_all(g2d, file);
+}
+
+static int g2d_bind(struct device *dev, struct device *master, void *data)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ int ret;
+
+ g2d->drm_dev = drm_dev;
- kfree(file_priv->g2d_priv);
+ /* allocate dma-aware cmdlist buffer. */
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0) {
+ dev_err(dev, "cmdlist init failed\n");
+ return ret;
+ }
+
+ ret = drm_iommu_attach_device(drm_dev, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable iommu.\n");
+ g2d_fini_cmdlist(g2d);
+ return ret;
+ }
+ priv->g2d_dev = dev;
+
+ dev_info(dev, "The Exynos G2D (ver %d.%d) successfully registered.\n",
+ G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
+ return 0;
+}
+
+static void g2d_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ /* Suspend operation and wait for engine idle. */
+ set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+ g2d_wait_finish(g2d, NULL);
+ priv->g2d_dev = NULL;
+
+ cancel_work_sync(&g2d->runqueue_work);
+ drm_iommu_detach_device(g2d->drm_dev, dev);
}
+static const struct component_ops g2d_component_ops = {
+ .bind = g2d_bind,
+ .unbind = g2d_unbind,
+};
+
static int g2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct g2d_data *g2d;
- struct exynos_drm_subdrv *subdrv;
int ret;
g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
@@ -1564,22 +1509,12 @@ static int g2d_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, g2d);
- subdrv = &g2d->subdrv;
- subdrv->dev = dev;
- subdrv->probe = g2d_subdrv_probe;
- subdrv->remove = g2d_subdrv_remove;
- subdrv->open = g2d_open;
- subdrv->close = g2d_close;
-
- ret = exynos_drm_subdrv_register(subdrv);
+ ret = component_add(dev, &g2d_component_ops);
if (ret < 0) {
dev_err(dev, "failed to register drm g2d device\n");
goto err_put_clk;
}
- dev_info(dev, "The Exynos G2D (ver %d.%d) successfully probed.\n",
- G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
-
return 0;
err_put_clk:
@@ -1595,12 +1530,7 @@ static int g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
- /* Suspend operation and wait for engine idle. */
- set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
- g2d_wait_finish(g2d, NULL);
-
- cancel_work_sync(&g2d->runqueue_work);
- exynos_drm_subdrv_unregister(&g2d->subdrv);
+ component_del(&pdev->dev, &g2d_component_ops);
/* There should be no locking needed here. */
g2d_remove_runqueue_nodes(g2d, NULL);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
index 1a9c7ca8c15b..287b2ed8f178 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -14,6 +14,9 @@ extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
+extern int g2d_open(struct drm_device *drm_dev, struct drm_file *file);
+extern void g2d_close(struct drm_device *drm_dev, struct drm_file *file);
#else
static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -33,4 +36,12 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
{
return -ENODEV;
}
+
+int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
+{
+ return 0;
+}
+
+void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
+{ }
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 6e1494fa71b4..34ace85feb68 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
@@ -171,26 +171,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
kfree(exynos_gem);
}
-unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv)
-{
- struct exynos_drm_gem *exynos_gem;
- struct drm_gem_object *obj;
-
- obj = drm_gem_object_lookup(file_priv, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return 0;
- }
-
- exynos_gem = to_exynos_gem(obj);
-
- drm_gem_object_unreference_unlocked(obj);
-
- return exynos_gem->size;
-}
-
static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
@@ -299,43 +279,15 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
-dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp)
-{
- struct exynos_drm_gem *exynos_gem;
- struct drm_gem_object *obj;
-
- obj = drm_gem_object_lookup(filp, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return ERR_PTR(-EINVAL);
- }
-
- exynos_gem = to_exynos_gem(obj);
-
- return &exynos_gem->dma_addr;
-}
-
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp)
+struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
+ unsigned int gem_handle)
{
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(filp, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return;
- }
-
- drm_gem_object_unreference_unlocked(obj);
-
- /*
- * decrease obj->refcount one more time because we has already
- * increased it at exynos_drm_gem_get_dma_addr().
- */
- drm_gem_object_unreference_unlocked(obj);
+ if (!obj)
+ return NULL;
+ return to_exynos_gem(obj);
}
static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
@@ -383,7 +335,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
args->flags = exynos_gem->flags;
args->size = exynos_gem->size;
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 9057d7f1d6ed..d46a62c30812 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -77,32 +77,26 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/*
- * get dma address from gem handle and this function could be used for
+ * get exynos drm object from gem handle, this function could be used for
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased.
*/
-dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp);
+struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
+ unsigned int gem_handle);
/*
- * put dma address from gem handle and this function could be used for
- * other drivers such as 2d/3d acceleration drivers.
- * with this function call, gem object reference count would be decreased.
+ * put exynos drm object acquired from exynos_drm_gem_get(),
+ * gem object reference count would be decreased.
*/
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp);
+static inline void exynos_drm_gem_put(struct exynos_drm_gem *exynos_gem)
+{
+ drm_gem_object_put_unlocked(&exynos_gem->base);
+}
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-/* get buffer size to gem handle. */
-unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv);
-
/* free gem object. */
void exynos_drm_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 35ac66730563..7ba414b52faa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
GSC_IN_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
+ break;
case DRM_FORMAT_NV61:
- cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
- GSC_IN_YUV420_2P);
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
break;
case DRM_FORMAT_YUV422:
cfg |= GSC_IN_YUV422_3P;
break;
case DRM_FORMAT_YUV420:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
+ break;
case DRM_FORMAT_YVU420:
- cfg |= GSC_IN_YUV420_3P;
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
break;
case DRM_FORMAT_NV12:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
+ break;
case DRM_FORMAT_NV16:
- cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
- GSC_IN_YUV420_2P);
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
break;
}
@@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation)
switch (degree) {
case DRM_MODE_ROTATE_0:
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg |= GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg |= GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_90:
cfg |= GSC_IN_ROT_90;
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg |= GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg |= GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_180:
cfg |= GSC_IN_ROT_180;
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg &= ~GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg &= ~GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_270:
cfg |= GSC_IN_ROT_270;
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg &= ~GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg &= ~GSC_IN_ROT_YFLIP;
break;
}
@@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx,
cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
GSC_SRCIMG_WIDTH_MASK);
- cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) |
+ cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
GSC_SRCIMG_HEIGHT(buf->buf.height));
gsc_write(cfg, GSC_SRCIMG_SIZE);
@@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
GSC_OUT_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV61:
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
break;
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
+ break;
case DRM_FORMAT_YUV422:
+ cfg |= GSC_OUT_YUV422_3P;
+ break;
case DRM_FORMAT_YUV420:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
+ break;
case DRM_FORMAT_YVU420:
- cfg |= GSC_OUT_YUV420_3P;
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
break;
case DRM_FORMAT_NV12:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
+ break;
case DRM_FORMAT_NV16:
- cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
- GSC_OUT_YUV420_2P);
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
break;
}
@@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx,
/* original size */
cfg = gsc_read(GSC_DSTIMG_SIZE);
cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK);
- cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) |
+ cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
GSC_DSTIMG_HEIGHT(buf->buf.height);
gsc_write(cfg, GSC_DSTIMG_SIZE);
@@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = {
};
static const struct drm_exynos_ipp_limit gsc_5433_limits[] = {
- { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) },
+ { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) },
{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) },
{ IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) },
{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 26374e58c557..23226a0212e8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -345,39 +345,18 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
int ret = 0;
int i;
- /* basic checks */
- if (buf->buf.width == 0 || buf->buf.height == 0)
- return -EINVAL;
- buf->format = drm_format_info(buf->buf.fourcc);
- for (i = 0; i < buf->format->num_planes; i++) {
- unsigned int width = (i == 0) ? buf->buf.width :
- DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
-
- if (buf->buf.pitch[i] == 0)
- buf->buf.pitch[i] = width * buf->format->cpp[i];
- if (buf->buf.pitch[i] < width * buf->format->cpp[i])
- return -EINVAL;
- if (!buf->buf.gem_id[i])
- return -ENOENT;
- }
-
- /* pitch for additional planes must match */
- if (buf->format->num_planes > 2 &&
- buf->buf.pitch[1] != buf->buf.pitch[2])
- return -EINVAL;
-
/* get GEM buffers and check their size */
for (i = 0; i < buf->format->num_planes; i++) {
unsigned int height = (i == 0) ? buf->buf.height :
DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
unsigned long size = height * buf->buf.pitch[i];
- struct drm_gem_object *obj = drm_gem_object_lookup(filp,
+ struct exynos_drm_gem *gem = exynos_drm_gem_get(filp,
buf->buf.gem_id[i]);
- if (!obj) {
+ if (!gem) {
ret = -ENOENT;
goto gem_free;
}
- buf->exynos_gem[i] = to_exynos_gem(obj);
+ buf->exynos_gem[i] = gem;
if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
i++;
@@ -391,7 +370,7 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
return 0;
gem_free:
while (i--) {
- drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
+ exynos_drm_gem_put(buf->exynos_gem[i]);
buf->exynos_gem[i] = NULL;
}
return ret;
@@ -404,7 +383,7 @@ static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
if (!buf->exynos_gem[0])
return;
for (i = 0; i < buf->format->num_planes; i++)
- drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
+ exynos_drm_gem_put(buf->exynos_gem[i]);
}
static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
@@ -428,7 +407,7 @@ enum drm_ipp_size_id {
IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
};
-static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = {
+static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
[IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
[IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
@@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
struct drm_ipp_limit l;
struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
+ int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
if (!limits)
return 0;
__get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
- if (!__size_limit_check(buf->buf.width, &l.h) ||
+ if (!__size_limit_check(real_width, &l.h) ||
!__size_limit_check(buf->buf.height, &l.v))
return -EINVAL;
@@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits(
return 0;
}
+static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
+ struct exynos_drm_ipp_buffer *buf,
+ struct exynos_drm_ipp_buffer *src,
+ struct exynos_drm_ipp_buffer *dst,
+ bool rotate, bool swap)
+{
+ const struct exynos_drm_ipp_formats *fmt;
+ int ret, i;
+
+ fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
+ buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
+ DRM_EXYNOS_IPP_FORMAT_DESTINATION);
+ if (!fmt) {
+ DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
+ buf == src ? "src" : "dst");
+ return -EINVAL;
+ }
+
+ /* basic checks */
+ if (buf->buf.width == 0 || buf->buf.height == 0)
+ return -EINVAL;
+
+ buf->format = drm_format_info(buf->buf.fourcc);
+ for (i = 0; i < buf->format->num_planes; i++) {
+ unsigned int width = (i == 0) ? buf->buf.width :
+ DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
+
+ if (buf->buf.pitch[i] == 0)
+ buf->buf.pitch[i] = width * buf->format->cpp[i];
+ if (buf->buf.pitch[i] < width * buf->format->cpp[i])
+ return -EINVAL;
+ if (!buf->buf.gem_id[i])
+ return -ENOENT;
+ }
+
+ /* pitch for additional planes must match */
+ if (buf->format->num_planes > 2 &&
+ buf->buf.pitch[1] != buf->buf.pitch[2])
+ return -EINVAL;
+
+ /* check driver limits */
+ ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
+ fmt->num_limits,
+ rotate,
+ buf == dst ? swap : false);
+ if (ret)
+ return ret;
+ ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
+ fmt->limits,
+ fmt->num_limits, swap);
+ return ret;
+}
+
static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
{
struct exynos_drm_ipp *ipp = task->ipp;
- const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt;
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
unsigned int rotation = task->transform.rotation;
int ret = 0;
@@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
return -EINVAL;
}
- src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier,
- DRM_EXYNOS_IPP_FORMAT_SOURCE);
- if (!src_fmt) {
- DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task);
- return -EINVAL;
- }
- ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits,
- src_fmt->num_limits,
- rotate, false);
- if (ret)
- return ret;
- ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
- src_fmt->limits,
- src_fmt->num_limits, swap);
+ ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
if (ret)
return ret;
- dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier,
- DRM_EXYNOS_IPP_FORMAT_DESTINATION);
- if (!dst_fmt) {
- DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task);
- return -EINVAL;
- }
- ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
- dst_fmt->num_limits,
- false, swap);
- if (ret)
- return ret;
- ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
- dst_fmt->limits,
- dst_fmt->num_limits, swap);
+ ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 2174814273e2..2fd299a58297 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -367,6 +367,8 @@ static int exynos_mic_resume(struct device *dev)
static const struct dev_pm_ops exynos_mic_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_mic_suspend, exynos_mic_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static int exynos_mic_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 38a2a7f1204b..dba29aec59b4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
if (plane->state) {
exynos_state = to_exynos_plane_state(plane->state);
if (exynos_state->base.fb)
- drm_framebuffer_unreference(exynos_state->base.fb);
+ drm_framebuffer_put(exynos_state->base.fb);
kfree(exynos_state);
plane->state = NULL;
}
@@ -263,8 +263,6 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
if (!state->crtc)
return;
- plane->crtc = state->crtc;
-
if (exynos_crtc->ops->update_plane)
exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 1a76dd3d52e1..a820a68429b9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot,
val &= ~ROT_CONTROL_FLIP_MASK;
if (rotation & DRM_MODE_REFLECT_X)
- val |= ROT_CONTROL_FLIP_HORIZONTAL;
- if (rotation & DRM_MODE_REFLECT_Y)
val |= ROT_CONTROL_FLIP_VERTICAL;
+ if (rotation & DRM_MODE_REFLECT_Y)
+ val |= ROT_CONTROL_FLIP_HORIZONTAL;
val &= ~ROT_CONTROL_ROT_MASK;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 91d4382343d0..0ddb6eec7b11 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -30,6 +30,7 @@
#define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset))
#define SCALER_MAX_CLK 4
#define SCALER_AUTOSUSPEND_DELAY 2000
+#define SCALER_RESET_WAIT_RETRIES 100
struct scaler_data {
const char *clk_name[SCALER_MAX_CLK];
@@ -51,9 +52,9 @@ struct scaler_context {
static u32 scaler_get_format(u32 drm_fmt)
{
switch (drm_fmt) {
- case DRM_FORMAT_NV21:
- return SCALER_YUV420_2P_UV;
case DRM_FORMAT_NV12:
+ return SCALER_YUV420_2P_UV;
+ case DRM_FORMAT_NV21:
return SCALER_YUV420_2P_VU;
case DRM_FORMAT_YUV420:
return SCALER_YUV420_3P;
@@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt)
return SCALER_YUV422_1P_UYVY;
case DRM_FORMAT_YVYU:
return SCALER_YUV422_1P_YVYU;
- case DRM_FORMAT_NV61:
- return SCALER_YUV422_2P_UV;
case DRM_FORMAT_NV16:
+ return SCALER_YUV422_2P_UV;
+ case DRM_FORMAT_NV61:
return SCALER_YUV422_2P_VU;
case DRM_FORMAT_YUV422:
return SCALER_YUV422_3P;
- case DRM_FORMAT_NV42:
- return SCALER_YUV444_2P_UV;
case DRM_FORMAT_NV24:
+ return SCALER_YUV444_2P_UV;
+ case DRM_FORMAT_NV42:
return SCALER_YUV444_2P_VU;
case DRM_FORMAT_YUV444:
return SCALER_YUV444_3P;
@@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt)
return 0;
}
+static inline int scaler_reset(struct scaler_context *scaler)
+{
+ int retry = SCALER_RESET_WAIT_RETRIES;
+
+ scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
+ do {
+ cpu_relax();
+ } while (retry > 1 &&
+ scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
+ do {
+ cpu_relax();
+ scaler_write(1, SCALER_INT_EN);
+ } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+
+ return retry ? 0 : -EIO;
+}
+
static inline void scaler_enable_int(struct scaler_context *scaler)
{
u32 val;
@@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
- scaler->task = task;
-
pm_runtime_get_sync(scaler->dev);
+ if (scaler_reset(scaler)) {
+ pm_runtime_put(scaler->dev);
+ return -EIO;
+ }
+
+ scaler->task = task;
scaler_set_src_fmt(scaler, src_fmt);
scaler_set_src_base(scaler, &task->src);
@@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler)
static inline u32 scaler_get_int_status(struct scaler_context *scaler)
{
- return scaler_read(SCALER_INT_STATUS);
+ u32 val = scaler_read(SCALER_INT_STATUS);
+
+ scaler_write(val, SCALER_INT_STATUS);
+
+ return val;
}
static inline int scaler_task_done(u32 val)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e6b0940b1ac2..19697c1362d8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -319,7 +319,7 @@ static int vidi_get_modes(struct drm_connector *connector)
return -ENOMEM;
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
return drm_add_edid_modes(connector, edid);
}
@@ -344,7 +344,7 @@ static int vidi_create_connector(struct drm_encoder *encoder)
}
drm_connector_helper_add(connector, &vidi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index db91932550cf..2092a650df7d 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -888,7 +888,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
edid->width_cm, edid->height_cm);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -951,7 +951,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
}
drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (hdata->bridge) {
ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
@@ -2093,6 +2093,8 @@ static int __maybe_unused exynos_hdmi_resume(struct device *dev)
static const struct dev_pm_ops exynos_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver hdmi_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 272c79f5f5bf..ffbf4a950f69 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -837,8 +837,6 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
struct drm_device *drm_dev)
{
int ret;
- struct exynos_drm_private *priv;
- priv = drm_dev->dev_private;
mixer_ctx->drm_dev = drm_dev;
@@ -1271,6 +1269,8 @@ static int __maybe_unused exynos_mixer_resume(struct device *dev)
static const struct dev_pm_ops exynos_mixer_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver mixer_driver = {
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
index 4704a993cbb7..16b39734115c 100644
--- a/drivers/gpu/drm/exynos/regs-gsc.h
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -138,6 +138,7 @@
#define GSC_OUT_YUV420_3P (3 << 4)
#define GSC_OUT_YUV422_1P (4 << 4)
#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV422_3P (6 << 4)
#define GSC_OUT_YUV444 (7 << 4)
#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
#define GSC_OUT_TILE_C_16x8 (0 << 2)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index c54806d08dd7..2298ed2a9e1c 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -117,7 +117,7 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
if (ret < 0)
goto err_cleanup;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
goto err_sysfs;
@@ -148,8 +148,9 @@ int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
if (panel_node) {
fsl_dev->connector.panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
- if (!fsl_dev->connector.panel)
- return -EPROBE_DEFER;
+ if (IS_ERR(fsl_dev->connector.panel))
+ return PTR_ERR(fsl_dev->connector.panel);
+
return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
}
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index c51d9259c7a7..204c8e452eb7 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -251,7 +251,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
if (!fb)
return;
- offset = psbfb->gtt->offset;
+ offset = to_gtt_range(fb->obj[0])->offset;
stride = fb->pitches[0];
switch (fb->format->depth) {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 5ea785f07ba8..90ed20083009 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1770,7 +1770,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index f0878998526a..4e4e4a66eaee 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -216,7 +216,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index cb0a2ae916e0..adefae58b5fc 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -33,6 +33,7 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
@@ -40,14 +41,9 @@
#include "framebuffer.h"
#include "gtt.h"
-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle);
-
static const struct drm_framebuffer_funcs psb_fb_funcs = {
- .destroy = psb_user_framebuffer_destroy,
- .create_handle = psb_user_framebuffer_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
};
#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
@@ -96,34 +92,36 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
struct psb_fbdev *fbdev = info->par;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
+ struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
/*
* We have to poke our nose in here. The core fb code assumes
* panning is part of the hardware that can be invoked before
* the actual fb is mapped. In our case that isn't quite true.
*/
- if (psbfb->gtt->npage) {
+ if (gtt->npage) {
/* GTT roll shifts in 4K pages, we need to shift the right
number of pages */
int pages = info->fix.line_length >> 12;
- psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+ psb_gtt_roll(dev, gtt, var->yoffset * pages);
}
return 0;
}
-static int psbfb_vm_fault(struct vm_fault *vmf)
+static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct psb_framebuffer *psbfb = vma->vm_private_data;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
int page_num;
int i;
unsigned long address;
- int ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
unsigned long pfn;
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
- psbfb->gtt->offset;
+ gtt->offset;
page_num = vma_pages(vma);
address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
@@ -133,18 +131,14 @@ static int psbfb_vm_fault(struct vm_fault *vmf)
for (i = 0; i < page_num; i++) {
pfn = (phys_addr >> PAGE_SHIFT);
- ret = vm_insert_mixed(vma, address,
+ ret = vmf_insert_mixed(vma, address,
__pfn_to_pfn_t(pfn, PFN_DEV));
- if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+ if (unlikely(ret & VM_FAULT_ERROR))
break;
- else if (unlikely(ret != 0)) {
- ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
- return ret;
- }
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
}
- return VM_FAULT_NOPAGE;
+ return ret;
}
static void psbfb_vm_open(struct vm_area_struct *vma)
@@ -246,7 +240,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
return -EINVAL;
drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
- fb->gtt = gt;
+ fb->base.obj[0] = &gt->gem;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
@@ -518,8 +512,8 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
drm_framebuffer_unregister_private(&psbfb->base);
drm_framebuffer_cleanup(&psbfb->base);
- if (psbfb->gtt)
- drm_gem_object_unreference_unlocked(&psbfb->gtt->gem);
+ if (psbfb->base.obj[0])
+ drm_gem_object_put_unlocked(psbfb->base.obj[0]);
return 0;
}
@@ -576,44 +570,6 @@ static void psb_fbdev_fini(struct drm_device *dev)
dev_priv->fbdev = NULL;
}
-/**
- * psb_user_framebuffer_create_handle - add hamdle to a framebuffer
- * @fb: framebuffer
- * @file_priv: our DRM file
- * @handle: returned handle
- *
- * Our framebuffer object is a GTT range which also contains a GEM
- * object. We need to turn it into a handle for userspace. GEM will do
- * the work for us
- */
-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
- struct gtt_range *r = psbfb->gtt;
- return drm_gem_handle_create(file_priv, &r->gem, handle);
-}
-
-/**
- * psb_user_framebuffer_destroy - destruct user created fb
- * @fb: framebuffer
- *
- * User framebuffers are backed by GEM objects so all we have to do is
- * clean up a bit and drop the reference, GEM will handle the fallout
- */
-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
- struct gtt_range *r = psbfb->gtt;
-
- /* Let DRM do its clean up */
- drm_framebuffer_cleanup(fb);
- /* We are no longer using the resource in GEM */
- drm_gem_object_unreference_unlocked(&r->gem);
- kfree(fb);
-}
-
static const struct drm_mode_config_funcs psb_mode_funcs = {
.fb_create = psb_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index 395f20b07aab..23dc3c5f8f0d 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -31,7 +31,6 @@ struct psb_framebuffer {
struct drm_framebuffer base;
struct address_space *addr_space;
struct fb_info *fbdev;
- struct gtt_range *gtt;
};
struct psb_fbdev {
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 131239759a75..576f1b272f23 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -93,7 +93,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
return ret;
}
/* We have the initial and handle reference but need only one now */
- drm_gem_object_unreference_unlocked(&r->gem);
+ drm_gem_object_put_unlocked(&r->gem);
*handlep = handle;
return 0;
}
@@ -134,12 +134,13 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
-int psb_gem_fault(struct vm_fault *vmf)
+vm_fault_t psb_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj;
struct gtt_range *r;
- int ret;
+ int err;
+ vm_fault_t ret;
unsigned long pfn;
pgoff_t page_offset;
struct drm_device *dev;
@@ -158,9 +159,10 @@ int psb_gem_fault(struct vm_fault *vmf)
/* For now the mmap pins the object and it stays pinned. As things
stand that will do us no harm */
if (r->mmapping == 0) {
- ret = psb_gtt_pin(r);
- if (ret < 0) {
- dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
+ err = psb_gtt_pin(r);
+ if (err < 0) {
+ dev_err(dev->dev, "gma500: pin failed: %d\n", err);
+ ret = vmf_error(err);
goto fail;
}
r->mmapping = 1;
@@ -175,18 +177,9 @@ int psb_gem_fault(struct vm_fault *vmf)
pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
else
pfn = page_to_pfn(r->pages[page_offset]);
- ret = vm_insert_pfn(vma, vmf->address, pfn);
-
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
fail:
mutex_unlock(&dev_priv->mmap_mutex);
- switch (ret) {
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+
+ return ret;
}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index f3c48a2be71b..09c1161a7ac6 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -60,7 +60,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct gtt_range *gtt;
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -76,12 +76,14 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
goto gma_pipe_cleaner;
}
+ gtt = to_gtt_range(fb->obj[0]);
+
/* We are displaying this buffer, make sure it is actually loaded
into the GTT */
- ret = psb_gtt_pin(psbfb->gtt);
+ ret = psb_gtt_pin(gtt);
if (ret < 0)
goto gma_pipe_set_base_exit;
- start = psbfb->gtt->offset;
+ start = gtt->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
@@ -129,7 +131,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
gma_pipe_cleaner:
/* If there was a previous display we can now unpin it */
if (old_fb)
- psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+ psb_gtt_unpin(to_gtt_range(old_fb->obj[0]));
gma_pipe_set_base_exit:
gma_power_end(dev);
@@ -353,7 +355,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
gt = container_of(gma_crtc->cursor_obj,
struct gtt_range, gem);
psb_gtt_unpin(gt);
- drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
+ drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
gma_crtc->cursor_obj = NULL;
}
return 0;
@@ -429,7 +431,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
if (gma_crtc->cursor_obj) {
gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
psb_gtt_unpin(gt);
- drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
+ drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
}
gma_crtc->cursor_obj = obj;
@@ -437,7 +439,7 @@ unlock:
return ret;
unref_cursor:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -491,7 +493,7 @@ void gma_crtc_disable(struct drm_crtc *crtc)
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
- gt = to_psb_fb(crtc->primary->fb)->gtt;
+ gt = to_gtt_range(crtc->primary->fb->obj[0]);
psb_gtt_unpin(gt);
}
}
@@ -663,7 +665,7 @@ void gma_connector_attach_encoder(struct gma_connector *connector,
struct gma_encoder *encoder)
{
connector->encoder = encoder;
- drm_mode_connector_attach_encoder(&connector->base,
+ drm_connector_attach_encoder(&connector->base,
&encoder->base);
}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index cdbb350c9d5d..cb0c3a2a1fd4 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -53,6 +53,8 @@ struct gtt_range {
int roll; /* Roll applied to the GTT entries */
};
+#define to_gtt_range(x) container_of(x, struct gtt_range, gem)
+
extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
const char *name, int backed,
u32 align);
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 978ae4b25e82..e0ccf1d19a4d 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -34,7 +34,7 @@ struct vbt_header {
u8 reserved0;
u32 bdb_offset; /**< from beginning of VBT */
u32 aim_offset[4]; /**< from beginning of VBT */
-} __attribute__((packed));
+} __packed;
struct bdb_header {
@@ -61,7 +61,7 @@ struct vbios_data {
u8 rsvd4; /* popup memory size */
u8 resize_pci_bios;
u8 rsvd5; /* is crt already on ddc2 */
-} __attribute__((packed));
+} __packed;
/*
* There are several types of BIOS data blocks (BDBs), each block has
@@ -133,7 +133,7 @@ struct bdb_general_features {
u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
u8 rsvd11:3; /* finish byte */
-} __attribute__((packed));
+} __packed;
/* pre-915 */
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
@@ -213,7 +213,7 @@ struct child_device_config {
u8 dvo2_wiring;
u16 extended_type;
u8 dvo_function;
-} __attribute__((packed));
+} __packed;
struct bdb_general_definitions {
@@ -256,7 +256,7 @@ struct bdb_lvds_options {
u8 lvds_edid:1;
u8 rsvd2:1;
u8 rsvd4;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_backlight {
u8 type:2;
@@ -268,7 +268,7 @@ struct bdb_lvds_backlight {
u8 i2caddr;
u8 brightnesscmd;
/*FIXME: more...*/
-} __attribute__((packed));
+} __packed;
/* LFP pointer table contains entries to the struct below */
struct bdb_lvds_lfp_data_ptr {
@@ -278,12 +278,12 @@ struct bdb_lvds_lfp_data_ptr {
u8 dvo_table_size;
u16 panel_pnp_id_offset;
u8 pnp_table_size;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data_ptrs {
u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
struct bdb_lvds_lfp_data_ptr ptr[16];
-} __attribute__((packed));
+} __packed;
/* LFP data has 3 blocks per entry */
struct lvds_fp_timing {
@@ -300,7 +300,7 @@ struct lvds_fp_timing {
u32 pfit_reg;
u32 pfit_reg_val;
u16 terminator;
-} __attribute__((packed));
+} __packed;
struct lvds_dvo_timing {
u16 clock; /**< In 10khz */
@@ -328,7 +328,7 @@ struct lvds_dvo_timing {
u8 vsync_positive:1;
u8 hsync_positive:1;
u8 rsvd2:1;
-} __attribute__((packed));
+} __packed;
struct lvds_pnp_id {
u16 mfg_name;
@@ -336,17 +336,17 @@ struct lvds_pnp_id {
u32 serial;
u8 mfg_week;
u8 mfg_year;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data_entry {
struct lvds_fp_timing fp_timing;
struct lvds_dvo_timing dvo_timing;
struct lvds_pnp_id pnp_id;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data {
struct bdb_lvds_lfp_data_entry data[16];
-} __attribute__((packed));
+} __packed;
struct aimdb_header {
char signature[16];
@@ -354,12 +354,12 @@ struct aimdb_header {
u16 aimdb_version;
u16 aimdb_header_size;
u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
struct aimdb_block {
u8 aimdb_id;
u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
struct vch_panel_data {
u16 fp_timing_offset;
@@ -370,12 +370,12 @@ struct vch_panel_data {
u8 text_fitting_size;
u16 graphics_fitting_offset;
u8 graphics_fitting_size;
-} __attribute__((packed));
+} __packed;
struct vch_bdb_22 {
struct aimdb_block aimdb_block;
struct vch_panel_data panels[16];
-} __attribute__((packed));
+} __packed;
struct bdb_sdvo_lvds_options {
u8 panel_backlight;
@@ -391,7 +391,7 @@ struct bdb_sdvo_lvds_options {
u8 panel_misc_bits_2;
u8 panel_misc_bits_3;
u8 panel_misc_bits_4;
-} __attribute__((packed));
+} __packed;
#define BDB_DRIVER_FEATURE_NO_LVDS 0
#define BDB_DRIVER_FEATURE_INT_LVDS 1
@@ -436,7 +436,7 @@ struct bdb_driver_features {
u8 hdmi_termination;
u8 custom_vbt_version;
-} __attribute__((packed));
+} __packed;
#define EDP_18BPP 0
#define EDP_24BPP 1
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index a05c020602bd..d0bf5a1e94e8 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -999,7 +999,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
p_funcs->encoder_helper_funcs);
/*attach to given connector*/
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
/*set possible crtcs and clones*/
if (dsi_connector->pipe) {
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 5c066448be5b..2b9fa0163dea 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -167,7 +167,6 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -196,7 +195,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (!gma_power_begin(dev, true))
return 0;
- start = psbfb->gtt->offset;
+ start = to_gtt_range(fb->obj[0])->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 0fff269d3fe6..1b7fd6a9d8a5 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -600,7 +600,6 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -617,7 +616,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
if (!gma_power_begin(dev, true))
return 0;
- start = psbfb->gtt->offset;
+ start = to_gtt_range(fb->obj[0])->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 78566a80ad25..c6d72de1c054 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -578,7 +578,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
}
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
}
return ret;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index e6943fef0611..83babb815a5d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -376,7 +376,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
* preferred mode is the right one.
*/
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
kfree(edid);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index e8300f509023..93d2f4000d2f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -21,6 +21,7 @@
#define _PSB_DRV_H_
#include <linux/kref.h>
+#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_global.h>
@@ -749,7 +750,7 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int psb_gem_fault(struct vm_fault *vmf);
+extern vm_fault_t psb_gem_fault(struct vm_fault *vmf);
/* psb_device.c */
extern const struct psb_ops psb_chip_ops;
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index e5360726d80b..fb4da3cd6681 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -66,7 +66,7 @@ int psb_intel_ddc_get_modes(struct drm_connector *connector,
edid = drm_get_edid(connector, adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index f2ee6aa10afa..dd3cec0e3190 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -429,13 +429,20 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
+#define MAX_ARG_LEN 32
+
static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
const void *args, int args_len)
{
- u8 buf[args_len*2 + 2], status;
- struct i2c_msg msgs[args_len + 3];
+ u8 buf[MAX_ARG_LEN*2 + 2], status;
+ struct i2c_msg msgs[MAX_ARG_LEN + 3];
int i, ret;
+ if (args_len > MAX_ARG_LEN) {
+ DRM_ERROR("Need to increase arg length\n");
+ return false;
+ }
+
psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
@@ -1465,7 +1472,7 @@ static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
if (connector_is_digital == monitor_is_digital) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index d2f4749ebf8d..744956cea749 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -133,7 +133,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
}
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 2269be91f3e1..bb774202a5a1 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -859,7 +859,6 @@ static int ade_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(crtc_state);
if (src_w != crtc_w || src_h != crtc_h) {
- DRM_ERROR("Scale not support!!!\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 3f7396caad48..5d2f0d548469 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -76,9 +76,12 @@ struct tda9950_priv {
static int tda9950_write_range(struct i2c_client *client, u8 addr, u8 *p, int cnt)
{
struct i2c_msg msg;
- u8 buf[cnt + 1];
+ u8 buf[CEC_MAX_MSG_SIZE + 3];
int ret;
+ if (WARN_ON(cnt > sizeof(buf) - 1))
+ return -EINVAL;
+
buf[0] = addr;
memcpy(buf + 1, p, cnt);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 6ebd8842dbcc..a7c39f39793f 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -69,6 +69,7 @@ struct tda998x_priv {
bool edid_delay_active;
struct drm_encoder encoder;
+ struct drm_bridge bridge;
struct drm_connector connector;
struct tda998x_audio_port audio_port[2];
@@ -79,9 +80,10 @@ struct tda998x_priv {
#define conn_to_tda998x_priv(x) \
container_of(x, struct tda998x_priv, connector)
-
#define enc_to_tda998x_priv(x) \
container_of(x, struct tda998x_priv, encoder)
+#define bridge_to_tda998x_priv(x) \
+ container_of(x, struct tda998x_priv, bridge)
/* The TDA9988 series of devices use a paged register scheme.. to simplify
* things we encode the page # in upper bits of the register #. To read/
@@ -589,13 +591,22 @@ out:
return ret;
}
+#define MAX_WRITE_RANGE_BUF 32
+
static void
reg_write_range(struct tda998x_priv *priv, u16 reg, u8 *p, int cnt)
{
struct i2c_client *client = priv->hdmi;
- u8 buf[cnt+1];
+ /* This is the maximum size of the buffer passed in */
+ u8 buf[MAX_WRITE_RANGE_BUF + 1];
int ret;
+ if (cnt > MAX_WRITE_RANGE_BUF) {
+ dev_err(&client->dev, "Fixed write buffer too small (%d)\n",
+ MAX_WRITE_RANGE_BUF);
+ return;
+ }
+
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
@@ -753,7 +764,7 @@ static void tda998x_detect_work(struct work_struct *work)
{
struct tda998x_priv *priv =
container_of(work, struct tda998x_priv, detect_work);
- struct drm_device *dev = priv->encoder.dev;
+ struct drm_device *dev = priv->connector.dev;
if (dev)
drm_kms_helper_hotplug_event(dev);
@@ -805,7 +816,7 @@ static void
tda998x_write_if(struct tda998x_priv *priv, u8 bit, u16 addr,
union hdmi_infoframe *frame)
{
- u8 buf[32];
+ u8 buf[MAX_WRITE_RANGE_BUF];
ssize_t len;
len = hdmi_infoframe_pack(frame, buf, sizeof(buf));
@@ -1095,29 +1106,6 @@ static int tda998x_audio_codec_init(struct tda998x_priv *priv,
/* DRM connector functions */
-static int tda998x_connector_fill_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- int ret;
-
- mutex_lock(&priv->audio_mutex);
- ret = drm_helper_probe_single_connector_modes(connector, maxX, maxY);
-
- if (connector->edid_blob_ptr) {
- struct edid *edid = (void *)connector->edid_blob_ptr->data;
-
- cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
-
- priv->sink_has_audio = drm_detect_monitor_audio(edid);
- } else {
- priv->sink_has_audio = false;
- }
- mutex_unlock(&priv->audio_mutex);
-
- return ret;
-}
-
static enum drm_connector_status
tda998x_connector_detect(struct drm_connector *connector, bool force)
{
@@ -1136,7 +1124,7 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs tda998x_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
- .fill_modes = tda998x_connector_fill_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.detect = tda998x_connector_detect,
.destroy = tda998x_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1234,41 +1222,30 @@ static int tda998x_connector_get_modes(struct drm_connector *connector)
return 0;
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
+ cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
+
+ mutex_lock(&priv->audio_mutex);
n = drm_add_edid_modes(connector, edid);
+ priv->sink_has_audio = drm_detect_monitor_audio(edid);
+ mutex_unlock(&priv->audio_mutex);
kfree(edid);
return n;
}
-static enum drm_mode_status tda998x_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* TDA19988 dotclock can go up to 165MHz */
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
-
- if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
- return MODE_CLOCK_HIGH;
- if (mode->htotal >= BIT(13))
- return MODE_BAD_HVALUE;
- if (mode->vtotal >= BIT(11))
- return MODE_BAD_VVALUE;
- return MODE_OK;
-}
-
static struct drm_encoder *
tda998x_connector_best_encoder(struct drm_connector *connector)
{
struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- return &priv->encoder;
+ return priv->bridge.encoder;
}
static
const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
.get_modes = tda998x_connector_get_modes,
- .mode_valid = tda998x_connector_mode_valid,
.best_encoder = tda998x_connector_best_encoder,
};
@@ -1292,25 +1269,48 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
if (ret)
return ret;
- drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
+ drm_connector_attach_encoder(&priv->connector,
+ priv->bridge.encoder);
return 0;
}
-/* DRM encoder functions */
+/* DRM bridge functions */
-static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+static int tda998x_bridge_attach(struct drm_bridge *bridge)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
- bool on;
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
- /* we only care about on or off: */
- on = mode == DRM_MODE_DPMS_ON;
+ return tda998x_connector_init(priv, bridge->dev);
+}
- if (on == priv->is_on)
- return;
+static void tda998x_bridge_detach(struct drm_bridge *bridge)
+{
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
- if (on) {
+ drm_connector_cleanup(&priv->connector);
+}
+
+static enum drm_mode_status tda998x_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ /* TDA19988 dotclock can go up to 165MHz */
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+ if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
+ return MODE_CLOCK_HIGH;
+ if (mode->htotal >= BIT(13))
+ return MODE_BAD_HVALUE;
+ if (mode->vtotal >= BIT(11))
+ return MODE_BAD_VVALUE;
+ return MODE_OK;
+}
+
+static void tda998x_bridge_enable(struct drm_bridge *bridge)
+{
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+ if (!priv->is_on) {
/* enable video ports, audio will be enabled later */
reg_write(priv, REG_ENA_VP_0, 0xff);
reg_write(priv, REG_ENA_VP_1, 0xff);
@@ -1321,7 +1321,14 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
priv->is_on = true;
- } else {
+ }
+}
+
+static void tda998x_bridge_disable(struct drm_bridge *bridge)
+{
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+ if (priv->is_on) {
/* disable video ports */
reg_write(priv, REG_ENA_VP_0, 0x00);
reg_write(priv, REG_ENA_VP_1, 0x00);
@@ -1331,12 +1338,12 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void
-tda998x_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void tda998x_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+ unsigned long tmds_clock;
u16 ref_pix, ref_line, n_pix, n_line;
u16 hs_pix_s, hs_pix_e;
u16 vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
@@ -1407,12 +1414,19 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
(mode->vsync_end - mode->vsync_start)/2;
}
- div = 148500 / mode->clock;
- if (div != 0) {
- div--;
- if (div > 3)
- div = 3;
- }
+ tmds_clock = mode->clock;
+
+ /*
+ * The divisor is power-of-2. The TDA9983B datasheet gives
+ * this as ranges of Msample/s, which is 10x the TMDS clock:
+ * 0 - 800 to 1500 Msample/s
+ * 1 - 400 to 800 Msample/s
+ * 2 - 200 to 400 Msample/s
+ * 3 - as 2 above
+ */
+ for (div = 0; div < 3; div++)
+ if (80000 >> div <= tmds_clock)
+ break;
mutex_lock(&priv->audio_mutex);
@@ -1543,26 +1557,14 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
mutex_unlock(&priv->audio_mutex);
}
-static void tda998x_destroy(struct tda998x_priv *priv)
-{
- /* disable all IRQs and free the IRQ handler */
- cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
- reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
-
- if (priv->audio_pdev)
- platform_device_unregister(priv->audio_pdev);
-
- if (priv->hdmi->irq)
- free_irq(priv->hdmi->irq, priv);
-
- del_timer_sync(&priv->edid_delay_timer);
- cancel_work_sync(&priv->detect_work);
-
- i2c_unregister_device(priv->cec);
-
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
-}
+static const struct drm_bridge_funcs tda998x_bridge_funcs = {
+ .attach = tda998x_bridge_attach,
+ .detach = tda998x_bridge_detach,
+ .mode_valid = tda998x_bridge_mode_valid,
+ .disable = tda998x_bridge_disable,
+ .mode_set = tda998x_bridge_mode_set,
+ .enable = tda998x_bridge_enable,
+};
/* I2C driver functions */
@@ -1608,16 +1610,69 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
return 0;
}
-static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
+static void tda998x_set_config(struct tda998x_priv *priv,
+ const struct tda998x_encoder_params *p)
{
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+ (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+ VIP_CNTRL_0_SWAP_B(p->swap_b) |
+ (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+ (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+ VIP_CNTRL_1_SWAP_D(p->swap_d) |
+ (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+ (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+ VIP_CNTRL_2_SWAP_F(p->swap_f) |
+ (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+
+ priv->audio_params = p->audio_params;
+}
+
+static void tda998x_destroy(struct device *dev)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+ drm_bridge_remove(&priv->bridge);
+
+ /* disable all IRQs and free the IRQ handler */
+ cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
+ reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+ if (priv->audio_pdev)
+ platform_device_unregister(priv->audio_pdev);
+
+ if (priv->hdmi->irq)
+ free_irq(priv->hdmi->irq, priv);
+
+ del_timer_sync(&priv->edid_delay_timer);
+ cancel_work_sync(&priv->detect_work);
+
+ i2c_unregister_device(priv->cec);
+
+ if (priv->cec_notify)
+ cec_notifier_put(priv->cec_notify);
+}
+
+static int tda998x_create(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
struct device_node *np = client->dev.of_node;
struct i2c_board_info cec_info;
+ struct tda998x_priv *priv;
u32 video;
int rev_lo, rev_hi, ret;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
mutex_init(&priv->mutex); /* protect the page access */
mutex_init(&priv->audio_mutex); /* protect access from audio thread */
mutex_init(&priv->edid_mutex);
+ INIT_LIST_HEAD(&priv->bridge.list);
init_waitqueue_head(&priv->edid_delay_waitq);
timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
INIT_WORK(&priv->detect_work, tda998x_detect_work);
@@ -1640,13 +1695,13 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
/* read version: */
rev_lo = reg_read(priv, REG_VERSION_LSB);
if (rev_lo < 0) {
- dev_err(&client->dev, "failed to read version: %d\n", rev_lo);
+ dev_err(dev, "failed to read version: %d\n", rev_lo);
return rev_lo;
}
rev_hi = reg_read(priv, REG_VERSION_MSB);
if (rev_hi < 0) {
- dev_err(&client->dev, "failed to read version: %d\n", rev_hi);
+ dev_err(dev, "failed to read version: %d\n", rev_hi);
return rev_hi;
}
@@ -1657,20 +1712,19 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
switch (priv->rev) {
case TDA9989N2:
- dev_info(&client->dev, "found TDA9989 n2");
+ dev_info(dev, "found TDA9989 n2");
break;
case TDA19989:
- dev_info(&client->dev, "found TDA19989");
+ dev_info(dev, "found TDA19989");
break;
case TDA19989N2:
- dev_info(&client->dev, "found TDA19989 n2");
+ dev_info(dev, "found TDA19989 n2");
break;
case TDA19988:
- dev_info(&client->dev, "found TDA19988");
+ dev_info(dev, "found TDA19988");
break;
default:
- dev_err(&client->dev, "found unsupported device: %04x\n",
- priv->rev);
+ dev_err(dev, "found unsupported device: %04x\n", priv->rev);
return -ENXIO;
}
@@ -1713,8 +1767,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
tda998x_irq_thread, irq_flags,
"tda998x", priv);
if (ret) {
- dev_err(&client->dev,
- "failed to request IRQ#%u: %d\n",
+ dev_err(dev, "failed to request IRQ#%u: %d\n",
client->irq, ret);
goto err_irq;
}
@@ -1723,13 +1776,13 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
}
- priv->cec_notify = cec_notifier_get(&client->dev);
+ priv->cec_notify = cec_notifier_get(dev);
if (!priv->cec_notify) {
ret = -ENOMEM;
goto fail;
}
- priv->cec_glue.parent = &client->dev;
+ priv->cec_glue.parent = dev;
priv->cec_glue.data = priv;
priv->cec_glue.init = tda998x_cec_hook_init;
priv->cec_glue.exit = tda998x_cec_hook_exit;
@@ -1759,61 +1812,44 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
/* enable EDID read irq: */
reg_set(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
- if (!np)
- return 0; /* non-DT */
+ if (np) {
+ /* get the device tree parameters */
+ ret = of_property_read_u32(np, "video-ports", &video);
+ if (ret == 0) {
+ priv->vip_cntrl_0 = video >> 16;
+ priv->vip_cntrl_1 = video >> 8;
+ priv->vip_cntrl_2 = video;
+ }
+
+ ret = tda998x_get_audio_ports(priv, np);
+ if (ret)
+ goto fail;
- /* get the device tree parameters */
- ret = of_property_read_u32(np, "video-ports", &video);
- if (ret == 0) {
- priv->vip_cntrl_0 = video >> 16;
- priv->vip_cntrl_1 = video >> 8;
- priv->vip_cntrl_2 = video;
+ if (priv->audio_port[0].format != AFMT_UNUSED)
+ tda998x_audio_codec_init(priv, &client->dev);
+ } else if (dev->platform_data) {
+ tda998x_set_config(priv, dev->platform_data);
}
- ret = tda998x_get_audio_ports(priv, np);
- if (ret)
- goto fail;
+ priv->bridge.funcs = &tda998x_bridge_funcs;
+#ifdef CONFIG_OF
+ priv->bridge.of_node = dev->of_node;
+#endif
- if (priv->audio_port[0].format != AFMT_UNUSED)
- tda998x_audio_codec_init(priv, &client->dev);
+ drm_bridge_add(&priv->bridge);
return 0;
fail:
- /* if encoder_init fails, the encoder slave is never registered,
- * so cleanup here:
- */
- i2c_unregister_device(priv->cec);
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
- if (client->irq)
- free_irq(client->irq, priv);
+ tda998x_destroy(dev);
err_irq:
return ret;
}
-static void tda998x_encoder_prepare(struct drm_encoder *encoder)
-{
- tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void tda998x_encoder_commit(struct drm_encoder *encoder)
-{
- tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
- .dpms = tda998x_encoder_dpms,
- .prepare = tda998x_encoder_prepare,
- .commit = tda998x_encoder_commit,
- .mode_set = tda998x_encoder_mode_set,
-};
+/* DRM encoder functions */
static void tda998x_encoder_destroy(struct drm_encoder *encoder)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
-
- tda998x_destroy(priv);
drm_encoder_cleanup(encoder);
}
@@ -1821,40 +1857,12 @@ static const struct drm_encoder_funcs tda998x_encoder_funcs = {
.destroy = tda998x_encoder_destroy,
};
-static void tda998x_set_config(struct tda998x_priv *priv,
- const struct tda998x_encoder_params *p)
-{
- priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
- (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
- VIP_CNTRL_0_SWAP_B(p->swap_b) |
- (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
- priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
- (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
- VIP_CNTRL_1_SWAP_D(p->swap_d) |
- (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
- priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
- (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
- VIP_CNTRL_2_SWAP_F(p->swap_f) |
- (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
-
- priv->audio_params = p->audio_params;
-}
-
-static int tda998x_bind(struct device *dev, struct device *master, void *data)
+static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
{
- struct tda998x_encoder_params *params = dev->platform_data;
- struct i2c_client *client = to_i2c_client(dev);
- struct drm_device *drm = data;
- struct tda998x_priv *priv;
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
u32 crtcs = 0;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- dev_set_drvdata(dev, priv);
-
if (dev->of_node)
crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
@@ -1866,40 +1874,36 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
priv->encoder.possible_crtcs = crtcs;
- ret = tda998x_create(client, priv);
- if (ret)
- return ret;
-
- if (!dev->of_node && params)
- tda998x_set_config(priv, params);
-
- drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret)
goto err_encoder;
- ret = tda998x_connector_init(priv, drm);
+ ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL);
if (ret)
- goto err_connector;
+ goto err_bridge;
return 0;
-err_connector:
+err_bridge:
drm_encoder_cleanup(&priv->encoder);
err_encoder:
- tda998x_destroy(priv);
return ret;
}
+static int tda998x_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = data;
+
+ return tda998x_encoder_init(dev, drm);
+}
+
static void tda998x_unbind(struct device *dev, struct device *master,
void *data)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
- drm_connector_cleanup(&priv->connector);
drm_encoder_cleanup(&priv->encoder);
- tda998x_destroy(priv);
}
static const struct component_ops tda998x_ops = {
@@ -1910,16 +1914,27 @@ static const struct component_ops tda998x_ops = {
static int
tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
+ int ret;
+
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_warn(&client->dev, "adapter does not support I2C\n");
return -EIO;
}
- return component_add(&client->dev, &tda998x_ops);
+
+ ret = tda998x_create(&client->dev);
+ if (ret)
+ return ret;
+
+ ret = component_add(&client->dev, &tda998x_ops);
+ if (ret)
+ tda998x_destroy(&client->dev);
+ return ret;
}
static int tda998x_remove(struct i2c_client *client)
{
component_del(&client->dev, &tda998x_ops);
+ tda998x_destroy(&client->dev);
return 0;
}
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 576a417690d4..3b378936f575 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -934,7 +934,7 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
DRM_DEBUG("idx %d used %d discard %d\n",
vertex->idx, vertex->used, vertex->discard);
- if (vertex->idx < 0 || vertex->idx > dma->buf_count)
+ if (vertex->idx < 0 || vertex->idx >= dma->buf_count)
return -EINVAL;
i810_dma_dispatch_vertex(dev,
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index dfd95889f4b7..33a458b7f1fc 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -23,6 +23,8 @@ config DRM_I915
select SYNC_FILE
select IOSF_MBI
select CRC32
+ select SND_HDA_I915 if SND_HDA_CORE
+ select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 9de8b1c51a5c..459f8f88a34c 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -51,6 +51,18 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
+config DRM_I915_ERRLOG_GEM
+ bool "Insert extra logging (very verbose) for common GEM errors"
+ default n
+ depends on DRM_I915_DEBUG_GEM
+ help
+ Enable additional logging that may help track down the cause of
+ principally userspace errors.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_TRACE_GEM
bool "Insert extra ftrace output from the GEM internals"
depends on DRM_I915_DEBUG_GEM
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 4c6adae23e18..5794f102f9b8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -135,15 +135,14 @@ i915-y += dvo_ch7017.o \
dvo_ns2501.o \
dvo_sil164.o \
dvo_tfp410.o \
+ icl_dsi.o \
intel_crt.o \
intel_ddi.o \
intel_dp_aux_backlight.o \
intel_dp_link_training.o \
intel_dp_mst.o \
intel_dp.o \
- intel_dsi.o \
intel_dsi_dcs_backlight.o \
- intel_dsi_pll.o \
intel_dsi_vbt.o \
intel_dvo.o \
intel_hdmi.o \
@@ -152,7 +151,9 @@ i915-y += dvo_ch7017.o \
intel_lvds.o \
intel_panel.o \
intel_sdvo.o \
- intel_tv.o
+ intel_tv.o \
+ vlv_dsi.o \
+ vlv_dsi_pll.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 80b3e16cf48c..caac9942e1e3 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,7 +159,7 @@
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
- uint8_t dummy;
+ u8 dummy;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -186,7 +186,7 @@ static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
{
- uint8_t buf[2] = { addr, val };
+ u8 buf[2] = { addr, val };
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -258,11 +258,11 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
- uint8_t outputs_enable, lvds_control_2, lvds_power_down;
- uint8_t horizontal_active_pixel_input;
- uint8_t horizontal_active_pixel_output, vertical_active_line_output;
- uint8_t active_input_line_output;
+ u8 lvds_pll_feedback_div, lvds_pll_vco_control;
+ u8 outputs_enable, lvds_control_2, lvds_power_down;
+ u8 horizontal_active_pixel_input;
+ u8 horizontal_active_pixel_output, vertical_active_line_output;
+ u8 active_input_line_output;
DRM_DEBUG_KMS("Registers before mode setting\n");
ch7017_dump_regs(dvo);
@@ -333,7 +333,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
/* set the CH7017 power state */
static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
{
- uint8_t val;
+ u8 val;
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
@@ -361,7 +361,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
{
- uint8_t val;
+ u8 val;
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
@@ -373,7 +373,7 @@ static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
{
- uint8_t val;
+ u8 val;
#define DUMP(reg) \
do { \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 7aeeffd2428b..397ac5233726 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -85,7 +85,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
static struct ch7xxx_id_struct {
- uint8_t vid;
+ u8 vid;
char *name;
} ch7xxx_ids[] = {
{ CH7011_VID, "CH7011" },
@@ -96,7 +96,7 @@ static struct ch7xxx_id_struct {
};
static struct ch7xxx_did_struct {
- uint8_t did;
+ u8 did;
char *name;
} ch7xxx_dids[] = {
{ CH7xxx_DID, "CH7XXX" },
@@ -107,7 +107,7 @@ struct ch7xxx_priv {
bool quiet;
};
-static char *ch7xxx_get_id(uint8_t vid)
+static char *ch7xxx_get_id(u8 vid)
{
int i;
@@ -119,7 +119,7 @@ static char *ch7xxx_get_id(uint8_t vid)
return NULL;
}
-static char *ch7xxx_get_did(uint8_t did)
+static char *ch7xxx_get_did(u8 did)
{
int i;
@@ -132,7 +132,7 @@ static char *ch7xxx_get_did(uint8_t did)
}
/** Reads an 8 bit register */
-static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -170,11 +170,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
}
/** Writes an 8 bit register */
-static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -201,7 +201,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
{
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
- uint8_t vendor, device;
+ u8 vendor, device;
char *name, *devid;
ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
@@ -244,7 +244,7 @@ out:
static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
{
- uint8_t cdet, orig_pm, pm;
+ u8 cdet, orig_pm, pm;
ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
@@ -276,7 +276,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- uint8_t tvco, tpcp, tpd, tlpf, idf;
+ u8 tvco, tpcp, tpd, tlpf, idf;
if (mode->clock <= 65000) {
tvco = 0x23;
@@ -336,7 +336,7 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
- uint8_t val;
+ u8 val;
if ((i % 8) == 0)
DRM_DEBUG_KMS("\n %02X: ", i);
ch7xxx_readb(dvo, i, &val);
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index c73aff163908..24278cc49090 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -161,7 +161,7 @@
* instead. The following list contains all registers that
* require saving.
*/
-static const uint16_t backup_addresses[] = {
+static const u16 backup_addresses[] = {
0x11, 0x12,
0x18, 0x19, 0x1a, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
@@ -174,11 +174,11 @@ static const uint16_t backup_addresses[] = {
struct ivch_priv {
bool quiet;
- uint16_t width, height;
+ u16 width, height;
/* Register backup */
- uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
+ u16 reg_backup[ARRAY_SIZE(backup_addresses)];
};
@@ -188,7 +188,7 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo);
*
* Each of the 256 registers are 16 bits long.
*/
-static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -231,7 +231,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
}
/* Writes a 16-bit register on the ivch */
-static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -263,7 +263,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
struct ivch_priv *priv;
- uint16_t temp;
+ u16 temp;
int i;
priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
@@ -342,7 +342,7 @@ static void ivch_reset(struct intel_dvo_device *dvo)
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
- uint16_t vr01, vr30, backlight;
+ u16 vr01, vr30, backlight;
ivch_reset(dvo);
@@ -379,7 +379,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
{
- uint16_t vr01;
+ u16 vr01;
ivch_reset(dvo);
@@ -398,9 +398,9 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *adjusted_mode)
{
struct ivch_priv *priv = dvo->dev_priv;
- uint16_t vr40 = 0;
- uint16_t vr01 = 0;
- uint16_t vr10;
+ u16 vr40 = 0;
+ u16 vr01 = 0;
+ u16 vr10;
ivch_reset(dvo);
@@ -416,7 +416,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
if (mode->hdisplay != adjusted_mode->crtc_hdisplay ||
mode->vdisplay != adjusted_mode->crtc_vdisplay) {
- uint16_t x_ratio, y_ratio;
+ u16 x_ratio, y_ratio;
vr01 |= VR01_PANEL_FIT_ENABLE;
vr40 |= VR40_CLOCK_GATING_ENABLE;
@@ -438,7 +438,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
static void ivch_dump_regs(struct intel_dvo_device *dvo)
{
- uint16_t val;
+ u16 val;
ivch_read(dvo, VR00, &val);
DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 2379c33cfe51..c584e01dc8dc 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -191,8 +191,8 @@ enum {
};
struct ns2501_reg {
- uint8_t offset;
- uint8_t value;
+ u8 offset;
+ u8 value;
};
/*
@@ -202,23 +202,23 @@ struct ns2501_reg {
* read all this with a grain of salt.
*/
struct ns2501_configuration {
- uint8_t sync; /* configuration of the C0 register */
- uint8_t conf; /* configuration register 8 */
- uint8_t syncb; /* configuration register 41 */
- uint8_t dither; /* configuration of the dithering */
- uint8_t pll_a; /* PLL configuration, register A, 1B */
- uint16_t pll_b; /* PLL configuration, register B, 1C/1D */
- uint16_t hstart; /* horizontal start, registers C1/C2 */
- uint16_t hstop; /* horizontal total, registers C3/C4 */
- uint16_t vstart; /* vertical start, registers C5/C6 */
- uint16_t vstop; /* vertical total, registers C7/C8 */
- uint16_t vsync; /* manual vertical sync start, 80/81 */
- uint16_t vtotal; /* number of lines generated, 82/83 */
- uint16_t hpos; /* horizontal position + 256, 98/99 */
- uint16_t vpos; /* vertical position, 8e/8f */
- uint16_t voffs; /* vertical output offset, 9c/9d */
- uint16_t hscale; /* horizontal scaling factor, b8/b9 */
- uint16_t vscale; /* vertical scaling factor, 10/11 */
+ u8 sync; /* configuration of the C0 register */
+ u8 conf; /* configuration register 8 */
+ u8 syncb; /* configuration register 41 */
+ u8 dither; /* configuration of the dithering */
+ u8 pll_a; /* PLL configuration, register A, 1B */
+ u16 pll_b; /* PLL configuration, register B, 1C/1D */
+ u16 hstart; /* horizontal start, registers C1/C2 */
+ u16 hstop; /* horizontal total, registers C3/C4 */
+ u16 vstart; /* vertical start, registers C5/C6 */
+ u16 vstop; /* vertical total, registers C7/C8 */
+ u16 vsync; /* manual vertical sync start, 80/81 */
+ u16 vtotal; /* number of lines generated, 82/83 */
+ u16 hpos; /* horizontal position + 256, 98/99 */
+ u16 vpos; /* vertical position, 8e/8f */
+ u16 voffs; /* vertical output offset, 9c/9d */
+ u16 hscale; /* horizontal scaling factor, b8/b9 */
+ u16 vscale; /* vertical scaling factor, 10/11 */
};
/*
@@ -389,7 +389,7 @@ struct ns2501_priv {
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
-static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -434,11 +434,11 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
-static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 1c1a0674dbab..4ae5d8fd9ff0 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -65,7 +65,7 @@ struct sil164_priv {
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
-static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -102,11 +102,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
return false;
}
-static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -173,7 +173,7 @@ out:
static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
{
- uint8_t reg9;
+ u8 reg9;
sil164_readb(dvo, SIL164_REG9, &reg9);
@@ -243,7 +243,7 @@ static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
static void sil164_dump_regs(struct intel_dvo_device *dvo)
{
- uint8_t val;
+ u8 val;
sil164_readb(dvo, SIL164_FREQ_LO, &val);
DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 31e181da93db..d603bc2f2506 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -90,7 +90,7 @@ struct tfp410_priv {
bool quiet;
};
-static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -127,11 +127,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
return false;
}
-static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -155,7 +155,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
{
- uint8_t ch1, ch2;
+ u8 ch1, ch2;
if (tfp410_readb(dvo, addr+0, &ch1) &&
tfp410_readb(dvo, addr+1, &ch2))
@@ -203,7 +203,7 @@ out:
static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
{
enum drm_connector_status ret = connector_status_disconnected;
- uint8_t ctl2;
+ u8 ctl2;
if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
if (ctl2 & TFP410_CTL_2_RSEN)
@@ -236,7 +236,7 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
/* set the tfp410 power state */
static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
{
- uint8_t ctl1;
+ u8 ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return;
@@ -251,7 +251,7 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
{
- uint8_t ctl1;
+ u8 ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return false;
@@ -264,7 +264,7 @@ static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
static void tfp410_dump_regs(struct intel_dvo_device *dvo)
{
- uint8_t val, val2;
+ u8 val, val2;
tfp410_readb(dvo, TFP410_REV, &val);
DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 7c9ec4f4f36c..fe754022e356 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&dev_priv->drm.struct_mutex);
- ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
+ ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
@@ -131,7 +131,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
assert_rpm_wakelock_held(dev_priv);
- if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
+ if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
return;
reg = vgpu->fence.regs[fence];
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b51c05d03f14..a614db310ea2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -172,6 +172,7 @@ struct decode_info {
#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
#define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
#define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
+#define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5)
#define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
#define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
@@ -862,6 +863,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
{
struct intel_vgpu *vgpu = s->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
+ u32 ctx_sr_ctl;
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -872,7 +874,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
- return 0;
+ return -EBADRQC;
}
if (is_shadowed_mmio(offset)) {
@@ -894,6 +896,28 @@ static int cmd_reg_handler(struct parser_exec_state *s,
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
+ /* TODO
+ * Right now only scan LRI command on KBL and in inhibit context.
+ * It's good enough to support initializing mmio by lri command in
+ * vgpu inhibit context on KBL.
+ */
+ if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+ intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+ !strncmp(cmd, "lri", 3)) {
+ intel_gvt_hypervisor_read_gpa(s->vgpu,
+ s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
+ /* check inhibit context */
+ if (ctx_sr_ctl & 1) {
+ u32 data = cmd_val(s, index + 1);
+
+ if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
+ intel_vgpu_mask_mmio_write(vgpu,
+ offset, &data, 4);
+ else
+ vgpu_vreg(vgpu, offset) = data;
+ }
+ }
+
/* TODO: Update the global mask if this MMIO is a masked-MMIO */
intel_gvt_mmio_set_cmd_accessed(gvt, offset);
return 0;
@@ -1256,7 +1280,9 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip)
return 0;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1284,7 +1310,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1308,7 +1336,9 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv))
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1317,26 +1347,14 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
static int check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv)
- || IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv))
- return gen8_check_mi_display_flip(s, info);
- return -ENODEV;
+ return gen8_check_mi_display_flip(s, info);
}
static int update_plane_mmio_from_mi_display_flip(
struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv)
- || IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv))
- return gen8_update_plane_mmio_from_mi_display_flip(s, info);
- return -ENODEV;
+ return gen8_update_plane_mmio_from_mi_display_flip(s, info);
}
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
@@ -1615,15 +1633,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
*/
static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
- struct intel_gvt *gvt = s->vgpu->gvt;
-
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- /* BDW decides privilege based on address space */
- if (cmd_val(s, 0) & (1 << 8) &&
+ /* Decide privilege based on address space */
+ if (cmd_val(s, 0) & (1 << 8) &&
!(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
- return 0;
- }
+ return 0;
return 1;
}
@@ -2349,6 +2362,9 @@ static struct cmd_info cmd_info[] = {
{"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
0, 16, NULL},
+ {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
{"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
{"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6d8180e8d1e2..3019dbc39aef 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -171,6 +171,29 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
+ if (IS_BROXTON(dev_priv)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA |
+ BXT_DE_PORT_HP_DDIB |
+ BXT_DE_PORT_HP_DDIC);
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIA;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIB;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIC;
+ }
+
+ return;
+ }
+
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
@@ -196,7 +219,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -216,7 +239,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -236,7 +259,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -273,8 +296,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
for_each_pipe(dev_priv, pipe) {
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
- vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
- vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
}
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
@@ -337,26 +360,28 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
int pipe, id;
+ int found = false;
- if (WARN_ON(!mutex_is_locked(&gvt->lock)))
- return;
-
+ mutex_lock(&gvt->lock);
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
- if (pipe_is_enabled(vgpu, pipe))
- goto out;
+ if (pipe_is_enabled(vgpu, pipe)) {
+ found = true;
+ break;
+ }
}
+ if (found)
+ break;
}
/* all the pipes are disabled */
- hrtimer_cancel(&irq->vblank_timer.timer);
- return;
-
-out:
- hrtimer_start(&irq->vblank_timer.timer,
- ktime_add_ns(ktime_get(), irq->vblank_timer.period),
- HRTIMER_MODE_ABS);
-
+ if (!found)
+ hrtimer_cancel(&irq->vblank_timer.timer);
+ else
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+ mutex_unlock(&gvt->lock);
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
@@ -393,8 +418,10 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
{
int pipe;
+ mutex_lock(&vgpu->vgpu_lock);
for_each_pipe(vgpu->gvt->dev_priv, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -409,11 +436,10 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
struct intel_vgpu *vgpu;
int id;
- if (WARN_ON(!mutex_is_locked(&gvt->lock)))
- return;
-
+ mutex_lock(&gvt->lock);
for_each_active_vgpu(gvt, vgpu, id)
emulate_vblank(vgpu);
+ mutex_unlock(&gvt->lock);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 6f4f8e941fc2..6e3f56684f4e 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -164,7 +164,9 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;
@@ -192,6 +194,14 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
return obj;
}
+static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
+{
+ if (c && c->x_hot <= c->width && c->y_hot <= c->height)
+ return true;
+ else
+ return false;
+}
+
static int vgpu_get_plane_info(struct drm_device *dev,
struct intel_vgpu *vgpu,
struct intel_vgpu_fb_info *info,
@@ -229,12 +239,14 @@ static int vgpu_get_plane_info(struct drm_device *dev,
info->x_pos = c.x_pos;
info->y_pos = c.y_pos;
- /* The invalid cursor hotspot value is delivered to host
- * until we find a way to get the cursor hotspot info of
- * guest OS.
- */
- info->x_hot = UINT_MAX;
- info->y_hot = UINT_MAX;
+ if (validate_hotspot(&c)) {
+ info->x_hot = c.x_hot;
+ info->y_hot = c.y_hot;
+ } else {
+ info->x_hot = UINT_MAX;
+ info->y_hot = UINT_MAX;
+ }
+
info->size = (((info->stride * c.height * c.bpp) / 8)
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else {
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index f61337632969..4b98539025c5 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -77,6 +77,20 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
return chr;
}
+static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
+{
+ int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+ int port = -EINVAL;
+
+ if (port_select == 1)
+ port = PORT_B;
+ else if (port_select == 2)
+ port = PORT_C;
+ else if (port_select == 3)
+ port = PORT_D;
+ return port;
+}
+
static inline int get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
@@ -105,6 +119,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
@@ -116,7 +131,10 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (pin_select == 0)
return 0;
- port = get_port_from_gmbus0(pin_select);
+ if (IS_BROXTON(dev_priv))
+ port = bxt_get_port_from_gmbus0(pin_select);
+ else
+ port = get_port_from_gmbus0(pin_select);
if (WARN_ON(port < 0))
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 427e40e64d41..714d709829a2 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -146,14 +146,11 @@ struct execlist_ring_context {
u32 nop4;
u32 lri_cmd_2;
struct execlist_mmio_pair ctx_timestamp;
- struct execlist_mmio_pair pdp3_UDW;
- struct execlist_mmio_pair pdp3_LDW;
- struct execlist_mmio_pair pdp2_UDW;
- struct execlist_mmio_pair pdp2_LDW;
- struct execlist_mmio_pair pdp1_UDW;
- struct execlist_mmio_pair pdp1_LDW;
- struct execlist_mmio_pair pdp0_UDW;
- struct execlist_mmio_pair pdp0_LDW;
+ /*
+ * pdps[8]={ pdp3_UDW, pdp3_LDW, pdp2_UDW, pdp2_LDW,
+ * pdp1_UDW, pdp1_LDW, pdp0_UDW, pdp0_LDW}
+ */
+ struct execlist_mmio_pair pdps[8];
};
struct intel_vgpu_elsp_dwords {
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 1c120683e958..face664be3e8 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -36,6 +36,7 @@
#include <uapi/drm/drm_fourcc.h>
#include "i915_drv.h"
#include "gvt.h"
+#include "i915_pvinfo.h"
#define PRIMARY_FORMAT_NUM 16
struct pixel_format {
@@ -150,7 +151,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
switch (tiled) {
case PLANE_CTL_TILED_LINEAR:
stride = stride_reg * 64;
@@ -214,7 +217,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (!plane->enabled)
return -ENODEV;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
_PLANE_CTL_TILED_SHIFT;
fmt = skl_format_to_drm(
@@ -256,7 +261,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
}
plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
- (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ?
+ (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) ?
(_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp);
@@ -300,16 +307,16 @@ static int cursor_mode_to_drm(int mode)
int cursor_pixel_formats_index = 4;
switch (mode) {
- case CURSOR_MODE_128_ARGB_AX:
+ case MCURSOR_MODE_128_ARGB_AX:
cursor_pixel_formats_index = 0;
break;
- case CURSOR_MODE_256_ARGB_AX:
+ case MCURSOR_MODE_256_ARGB_AX:
cursor_pixel_formats_index = 1;
break;
- case CURSOR_MODE_64_ARGB_AX:
+ case MCURSOR_MODE_64_ARGB_AX:
cursor_pixel_formats_index = 2;
break;
- case CURSOR_MODE_64_32B_AX:
+ case MCURSOR_MODE_64_32B_AX:
cursor_pixel_formats_index = 3;
break;
@@ -342,8 +349,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
return -ENODEV;
val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
- mode = val & CURSOR_MODE;
- plane->enabled = (mode != CURSOR_MODE_DISABLE);
+ mode = val & MCURSOR_MODE;
+ plane->enabled = (mode != MCURSOR_MODE_DISABLE);
if (!plane->enabled)
return -ENODEV;
@@ -384,6 +391,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
+ plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot));
+ plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index a73e1d418c22..4ac18b447247 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -162,7 +162,7 @@ static int verify_firmware(struct intel_gvt *gvt,
h = (struct gvt_firmware_header *)fw->data;
- crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+ crc32_start = offsetofend(struct gvt_firmware_header, crc32);
mem = fw->data + crc32_start;
#define VERIFY(s, a, b) do { \
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 23296547da95..00aad8164dec 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -216,16 +216,22 @@ static struct gtt_type_table_entry gtt_type_table[] = {
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ /* We take IPS bit as 'PSE' for PTE level. */
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
- GTT_TYPE_INVALID),
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
- GTT_TYPE_INVALID),
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDE_PT,
@@ -339,8 +345,14 @@ static inline int gtt_set_entry64(void *pt,
#define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
#define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
+#define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
#define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
+#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
+#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
+
+#define GTT_64K_PTE_STRIDE 16
+
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
unsigned long pfn;
@@ -349,6 +361,8 @@ static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
+ else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
+ pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
else
pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
return pfn;
@@ -362,6 +376,9 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
e->val64 &= ~ADDR_2M_MASK;
pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
+ } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
+ e->val64 &= ~ADDR_64K_MASK;
+ pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
} else {
e->val64 &= ~ADDR_4K_MASK;
pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
@@ -372,16 +389,41 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
{
- /* Entry doesn't have PSE bit. */
- if (get_pse_type(e->type) == GTT_TYPE_INVALID)
- return false;
+ return !!(e->val64 & _PAGE_PSE);
+}
- e->type = get_entry_type(e->type);
- if (!(e->val64 & _PAGE_PSE))
+static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
+{
+ if (gen8_gtt_test_pse(e)) {
+ switch (e->type) {
+ case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ e->val64 &= ~_PAGE_PSE;
+ e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
+ break;
+ case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
+ e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
+ e->val64 &= ~_PAGE_PSE;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ }
+}
+
+static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
+{
+ if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
return false;
- e->type = get_pse_type(e->type);
- return true;
+ return !!(e->val64 & GEN8_PDE_IPS_64K);
+}
+
+static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
+{
+ if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
+ return;
+
+ e->val64 &= ~GEN8_PDE_IPS_64K;
}
static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
@@ -408,6 +450,21 @@ static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
e->val64 |= _PAGE_PRESENT;
}
+static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+ return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
+}
+
+static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
+}
+
+static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
+}
+
/*
* Per-platform GMA routines.
*/
@@ -440,6 +497,12 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
.set_present = gtt_entry_set_present,
.test_present = gen8_gtt_test_present,
.test_pse = gen8_gtt_test_pse,
+ .clear_pse = gen8_gtt_clear_pse,
+ .clear_ips = gen8_gtt_clear_ips,
+ .test_ips = gen8_gtt_test_ips,
+ .clear_64k_splited = gen8_gtt_clear_64k_splited,
+ .set_64k_splited = gen8_gtt_set_64k_splited,
+ .test_64k_splited = gen8_gtt_test_64k_splited,
.get_pfn = gen8_gtt_get_pfn,
.set_pfn = gen8_gtt_set_pfn,
};
@@ -453,6 +516,27 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
.gma_to_pml4_index = gen8_gma_to_pml4_index,
};
+/* Update entry type per pse and ips bit. */
+static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
+ struct intel_gvt_gtt_entry *entry, bool ips)
+{
+ switch (entry->type) {
+ case GTT_TYPE_PPGTT_PDE_ENTRY:
+ case GTT_TYPE_PPGTT_PDP_ENTRY:
+ if (pte_ops->test_pse(entry))
+ entry->type = get_pse_type(entry->type);
+ break;
+ case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
+ if (ips)
+ entry->type = get_pse_type(entry->type);
+ break;
+ default:
+ GEM_BUG_ON(!gtt_type_is_entry(entry->type));
+ }
+
+ GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
+}
+
/*
* MM helpers.
*/
@@ -468,8 +552,7 @@ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
mm->ppgtt_mm.shadow_pdps,
entry, index, false, 0, mm->vgpu);
-
- pte_ops->test_pse(entry);
+ update_entry_type_for_real(pte_ops, entry, false);
}
static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
@@ -574,7 +657,8 @@ static inline int ppgtt_spt_get_entry(
if (ret)
return ret;
- ops->test_pse(e);
+ update_entry_type_for_real(ops, e, guest ?
+ spt->guest_page.pde_ips : false);
gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
type, e->type, index, e->val64);
@@ -653,10 +737,12 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
- if (spt->guest_page.oos_page)
- detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
+ if (spt->guest_page.gfn) {
+ if (spt->guest_page.oos_page)
+ detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
- intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+ intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+ }
list_del_init(&spt->post_shadow_list);
free_spt(spt);
@@ -717,8 +803,9 @@ static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
+/* Allocate shadow page table without guest page. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
- struct intel_vgpu *vgpu, int type, unsigned long gfn)
+ struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
@@ -753,26 +840,12 @@ retry:
spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
- /*
- * Init guest_page.
- */
- spt->guest_page.type = type;
- spt->guest_page.gfn = gfn;
-
- ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
- ppgtt_write_protection_handler, spt);
- if (ret)
- goto err_unmap_dma;
-
ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
if (ret)
- goto err_unreg_page_track;
+ goto err_unmap_dma;
- trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
return spt;
-err_unreg_page_track:
- intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
err_unmap_dma:
dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
err_free_spt:
@@ -780,6 +853,37 @@ err_free_spt:
return ERR_PTR(ret);
}
+/* Allocate shadow page table associated with specific gfn. */
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
+ struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
+ unsigned long gfn, bool guest_pde_ips)
+{
+ struct intel_vgpu_ppgtt_spt *spt;
+ int ret;
+
+ spt = ppgtt_alloc_spt(vgpu, type);
+ if (IS_ERR(spt))
+ return spt;
+
+ /*
+ * Init guest_page.
+ */
+ ret = intel_vgpu_register_page_track(vgpu, gfn,
+ ppgtt_write_protection_handler, spt);
+ if (ret) {
+ ppgtt_free_spt(spt);
+ return ERR_PTR(ret);
+ }
+
+ spt->guest_page.type = type;
+ spt->guest_page.gfn = gfn;
+ spt->guest_page.pde_ips = guest_pde_ips;
+
+ trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+
+ return spt;
+}
+
#define pt_entry_size_shift(spt) \
((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
@@ -787,24 +891,38 @@ err_free_spt:
(I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
#define for_each_present_guest_entry(spt, e, i) \
- for (i = 0; i < pt_entries(spt); i++) \
+ for (i = 0; i < pt_entries(spt); \
+ i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
if (!ppgtt_get_guest_entry(spt, e, i) && \
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
#define for_each_present_shadow_entry(spt, e, i) \
- for (i = 0; i < pt_entries(spt); i++) \
+ for (i = 0; i < pt_entries(spt); \
+ i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
if (!ppgtt_get_shadow_entry(spt, e, i) && \
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
-static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
+#define for_each_shadow_entry(spt, e, i) \
+ for (i = 0; i < pt_entries(spt); \
+ i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
+ if (!ppgtt_get_shadow_entry(spt, e, i))
+
+static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
{
int v = atomic_read(&spt->refcount);
trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
-
atomic_inc(&spt->refcount);
}
+static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
+{
+ int v = atomic_read(&spt->refcount);
+
+ trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
+ return atomic_dec_return(&spt->refcount);
+}
+
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
@@ -843,7 +961,8 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
pfn = ops->get_pfn(entry);
type = spt->shadow_page.type;
- if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
+ /* Uninitialized spte or unshadowed spte. */
+ if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
return;
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
@@ -855,14 +974,11 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
struct intel_gvt_gtt_entry e;
unsigned long index;
int ret;
- int v = atomic_read(&spt->refcount);
trace_spt_change(spt->vgpu->id, "die", spt,
spt->guest_page.gfn, spt->shadow_page.type);
- trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
-
- if (atomic_dec_return(&spt->refcount) > 0)
+ if (ppgtt_put_spt(spt) > 0)
return 0;
for_each_present_shadow_entry(spt, &e, index) {
@@ -871,9 +987,15 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
gvt_vdbg_mm("invalidate 4K entry\n");
ppgtt_invalidate_pte(spt, &e);
break;
+ case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
+ /* We don't setup 64K shadow entry so far. */
+ WARN(1, "suspicious 64K gtt entry\n");
+ continue;
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ gvt_vdbg_mm("invalidate 2M entry\n");
+ continue;
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
- WARN(1, "GVT doesn't support 2M/1GB page\n");
+ WARN(1, "GVT doesn't support 1GB page\n");
continue;
case GTT_TYPE_PPGTT_PML4_ENTRY:
case GTT_TYPE_PPGTT_PDP_ENTRY:
@@ -899,6 +1021,22 @@ fail:
return ret;
}
+static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
+ u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
+ GAMW_ECO_ENABLE_64K_IPS_FIELD;
+
+ return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ /* 64K paging only controlled by IPS bit in PTE now. */
+ return true;
+ } else
+ return false;
+}
+
static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
@@ -906,35 +1044,54 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *spt = NULL;
+ bool ips = false;
int ret;
GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
+ if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
+ ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
+
spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
- if (spt)
+ if (spt) {
ppgtt_get_spt(spt);
- else {
+
+ if (ips != spt->guest_page.pde_ips) {
+ spt->guest_page.pde_ips = ips;
+
+ gvt_dbg_mm("reshadow PDE since ips changed\n");
+ clear_page(spt->shadow_page.vaddr);
+ ret = ppgtt_populate_spt(spt);
+ if (ret) {
+ ppgtt_put_spt(spt);
+ goto err;
+ }
+ }
+ } else {
int type = get_next_pt_type(we->type);
- spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we));
+ spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
if (IS_ERR(spt)) {
ret = PTR_ERR(spt);
- goto fail;
+ goto err;
}
ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
if (ret)
- goto fail;
+ goto err_free_spt;
ret = ppgtt_populate_spt(spt);
if (ret)
- goto fail;
+ goto err_free_spt;
trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
spt->shadow_page.type);
}
return spt;
-fail:
+
+err_free_spt:
+ ppgtt_free_spt(spt);
+err:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
spt, we->val64, we->type);
return ERR_PTR(ret);
@@ -948,16 +1105,118 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
se->type = ge->type;
se->val64 = ge->val64;
+ /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
+ if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
+ ops->clear_ips(se);
+
ops->set_pfn(se, s->shadow_page.mfn);
}
+/**
+ * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
+ * negtive if found err.
+ */
+static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
+ struct intel_gvt_gtt_entry *entry)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long pfn;
+
+ if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+ return 0;
+
+ pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
+ if (pfn == INTEL_GVT_INVALID_ADDR)
+ return -EINVAL;
+
+ return PageTransHuge(pfn_to_page(pfn));
+}
+
+static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
+ struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+ struct intel_gvt_gtt_entry *se)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *sub_spt;
+ struct intel_gvt_gtt_entry sub_se;
+ unsigned long start_gfn;
+ dma_addr_t dma_addr;
+ unsigned long sub_index;
+ int ret;
+
+ gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
+
+ start_gfn = ops->get_pfn(se);
+
+ sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
+ if (IS_ERR(sub_spt))
+ return PTR_ERR(sub_spt);
+
+ for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+ start_gfn + sub_index, PAGE_SIZE, &dma_addr);
+ if (ret) {
+ ppgtt_invalidate_spt(spt);
+ return ret;
+ }
+ sub_se.val64 = se->val64;
+
+ /* Copy the PAT field from PDE. */
+ sub_se.val64 &= ~_PAGE_PAT;
+ sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
+
+ ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
+ ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
+ }
+
+ /* Clear dirty field. */
+ se->val64 &= ~_PAGE_DIRTY;
+
+ ops->clear_pse(se);
+ ops->clear_ips(se);
+ ops->set_pfn(se, sub_spt->shadow_page.mfn);
+ ppgtt_set_shadow_entry(spt, se, index);
+ return 0;
+}
+
+static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
+ struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+ struct intel_gvt_gtt_entry *se)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry entry = *se;
+ unsigned long start_gfn;
+ dma_addr_t dma_addr;
+ int i, ret;
+
+ gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
+
+ GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
+
+ start_gfn = ops->get_pfn(se);
+
+ entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
+ ops->set_64k_splited(&entry);
+
+ for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+ start_gfn + i, PAGE_SIZE, &dma_addr);
+ if (ret)
+ return ret;
+
+ ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
+ ppgtt_set_shadow_entry(spt, &entry, index + i);
+ }
+ return 0;
+}
+
static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
struct intel_gvt_gtt_entry *ge)
{
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry se = *ge;
- unsigned long gfn;
+ unsigned long gfn, page_size = PAGE_SIZE;
dma_addr_t dma_addr;
int ret;
@@ -970,16 +1229,33 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
gvt_vdbg_mm("shadow 4K gtt entry\n");
break;
+ case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
+ gvt_vdbg_mm("shadow 64K gtt entry\n");
+ /*
+ * The layout of 64K page is special, the page size is
+ * controlled by uper PDE. To be simple, we always split
+ * 64K page to smaller 4K pages in shadow PT.
+ */
+ return split_64KB_gtt_entry(vgpu, spt, index, &se);
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ gvt_vdbg_mm("shadow 2M gtt entry\n");
+ ret = is_2MB_gtt_possible(vgpu, ge);
+ if (ret == 0)
+ return split_2MB_gtt_entry(vgpu, spt, index, &se);
+ else if (ret < 0)
+ return ret;
+ page_size = I915_GTT_PAGE_SIZE_2M;
+ break;
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
- gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
+ gvt_vgpu_err("GVT doesn't support 1GB entry\n");
return -EINVAL;
default:
GEM_BUG_ON(1);
};
/* direct shadow */
- ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
+ &dma_addr);
if (ret)
return -ENXIO;
@@ -1062,8 +1338,12 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
ret = ppgtt_invalidate_spt(s);
if (ret)
goto fail;
- } else
+ } else {
+ /* We don't setup 64K shadow entry so far. */
+ WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
+ "suspicious 64K entry\n");
ppgtt_invalidate_pte(spt, se);
+ }
return 0;
fail:
@@ -1286,7 +1566,7 @@ static int ppgtt_handle_guest_write_page_table(
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry old_se;
int new_present;
- int ret;
+ int i, ret;
new_present = ops->test_present(we);
@@ -1308,8 +1588,27 @@ static int ppgtt_handle_guest_write_page_table(
goto fail;
if (!new_present) {
- ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
- ppgtt_set_shadow_entry(spt, &old_se, index);
+ /* For 64KB splited entries, we need clear them all. */
+ if (ops->test_64k_splited(&old_se) &&
+ !(index % GTT_64K_PTE_STRIDE)) {
+ gvt_vdbg_mm("remove splited 64K shadow entries\n");
+ for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
+ ops->clear_64k_splited(&old_se);
+ ops->set_pfn(&old_se,
+ vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index + i);
+ }
+ } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
+ old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
+ ops->clear_pse(&old_se);
+ ops->set_pfn(&old_se,
+ vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index);
+ } else {
+ ops->set_pfn(&old_se,
+ vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index);
+ }
}
return 0;
@@ -1391,7 +1690,17 @@ static int ppgtt_handle_guest_write_page_table_bytes(
ppgtt_get_guest_entry(spt, &we, index);
- ops->test_pse(&we);
+ /*
+ * For page table which has 64K gtt entry, only PTE#0, PTE#16,
+ * PTE#32, ... PTE#496 are used. Unused PTEs update should be
+ * ignored.
+ */
+ if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
+ (index % GTT_64K_PTE_STRIDE)) {
+ gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
+ index);
+ return 0;
+ }
if (bytes == info->gtt_entry_size) {
ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
@@ -1592,6 +1901,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
}
+ mm->ggtt_mm.last_partial_off = -1UL;
return mm;
}
@@ -1616,6 +1926,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
+ mm->ggtt_mm.last_partial_off = -1UL;
}
vgpu_free_mm(mm);
@@ -1868,6 +2179,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
+ /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
+ * write, we assume the two 4 bytes writes are consecutive.
+ * Otherwise, we abort and report error
+ */
+ if (bytes < info->gtt_entry_size) {
+ if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
+ /* the first partial part*/
+ ggtt_mm->ggtt_mm.last_partial_off = off;
+ ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+ return 0;
+ } else if ((g_gtt_index ==
+ (ggtt_mm->ggtt_mm.last_partial_off >>
+ info->gtt_entry_size_shift)) &&
+ (off != ggtt_mm->ggtt_mm.last_partial_off)) {
+ /* the second partial part */
+
+ int last_off = ggtt_mm->ggtt_mm.last_partial_off &
+ (info->gtt_entry_size - 1);
+
+ memcpy((void *)&e.val64 + last_off,
+ (void *)&ggtt_mm->ggtt_mm.last_partial_data +
+ last_off, bytes);
+
+ ggtt_mm->ggtt_mm.last_partial_off = -1UL;
+ } else {
+ int last_offset;
+
+ gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
+ ggtt_mm->ggtt_mm.last_partial_off, off,
+ bytes, info->gtt_entry_size);
+
+ /* set host ggtt entry to scratch page and clear
+ * virtual ggtt entry as not present for last
+ * partially write offset
+ */
+ last_offset = ggtt_mm->ggtt_mm.last_partial_off &
+ (~(info->gtt_entry_size - 1));
+
+ ggtt_get_host_entry(ggtt_mm, &m, last_offset);
+ ggtt_invalidate_pte(vgpu, &m);
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+ ops->clear_present(&m);
+ ggtt_set_host_entry(ggtt_mm, &m, last_offset);
+ ggtt_invalidate(gvt->dev_priv);
+
+ ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
+ ops->clear_present(&e);
+ ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
+
+ ggtt_mm->ggtt_mm.last_partial_off = off;
+ ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+
+ return 0;
+ }
+ }
+
if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
m = e;
@@ -1881,7 +2248,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
}
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
- &dma_addr);
+ PAGE_SIZE, &dma_addr);
if (ret) {
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
@@ -1973,7 +2340,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
*/
- if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
+ if (type > GTT_TYPE_PPGTT_PTE_PT) {
struct intel_gvt_gtt_entry se;
memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
@@ -2257,13 +2624,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
gvt_dbg_core("init gtt\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
- gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
- } else {
- return -ENODEV;
- }
+ gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
+ gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
page = (void *)get_zeroed_page(GFP_KERNEL);
if (!page) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 3792f2b7f4ff..7a9b36176efb 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -63,6 +63,12 @@ struct intel_gvt_gtt_pte_ops {
void (*clear_present)(struct intel_gvt_gtt_entry *e);
void (*set_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
+ void (*clear_pse)(struct intel_gvt_gtt_entry *e);
+ bool (*test_ips)(struct intel_gvt_gtt_entry *e);
+ void (*clear_ips)(struct intel_gvt_gtt_entry *e);
+ bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e);
+ void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e);
+ void (*set_64k_splited)(struct intel_gvt_gtt_entry *e);
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
};
@@ -95,6 +101,7 @@ typedef enum {
GTT_TYPE_GGTT_PTE,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY,
GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PTE_1G_ENTRY,
@@ -150,6 +157,8 @@ struct intel_vgpu_mm {
} ppgtt_mm;
struct {
void *virtual_ggtt;
+ unsigned long last_partial_off;
+ u64 last_partial_data;
} ggtt_mm;
};
};
@@ -220,6 +229,7 @@ struct intel_vgpu_ppgtt_spt {
struct {
intel_gvt_gtt_type_t type;
+ bool pde_ips; /* for 64KB PTEs */
void *vaddr;
struct page *page;
unsigned long mfn;
@@ -227,6 +237,7 @@ struct intel_vgpu_ppgtt_spt {
struct {
intel_gvt_gtt_type_t type;
+ bool pde_ips; /* for 64KB PTEs */
unsigned long gfn;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 61bd14fcb649..46c8b720e336 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -176,6 +176,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
.vgpu_create = intel_gvt_create_vgpu,
.vgpu_destroy = intel_gvt_destroy_vgpu,
+ .vgpu_release = intel_gvt_release_vgpu,
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
@@ -238,18 +239,15 @@ static void init_device_info(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- info->max_support_vgpus = 8;
- info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
- info->mmio_size = 2 * 1024 * 1024;
- info->mmio_bar = 0;
- info->gtt_start_offset = 8 * 1024 * 1024;
- info->gtt_entry_size = 8;
- info->gtt_entry_size_shift = 3;
- info->gmadr_bytes_in_cmd = 8;
- info->max_surface_size = 36 * 1024 * 1024;
- }
+ info->max_support_vgpus = 8;
+ info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
+ info->mmio_size = 2 * 1024 * 1024;
+ info->mmio_bar = 0;
+ info->gtt_start_offset = 8 * 1024 * 1024;
+ info->gtt_entry_size = 8;
+ info->gtt_entry_size_shift = 3;
+ info->gmadr_bytes_in_cmd = 8;
+ info->max_surface_size = 36 * 1024 * 1024;
info->msi_cap_offset = pdev->msi_cap;
}
@@ -271,11 +269,8 @@ static int gvt_service_thread(void *data)
continue;
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
- (void *)&gvt->service_request)) {
- mutex_lock(&gvt->lock);
+ (void *)&gvt->service_request))
intel_gvt_emulate_vblank(gvt);
- mutex_unlock(&gvt->lock);
- }
if (test_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request) ||
@@ -321,6 +316,11 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!gvt))
return;
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+ intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+ intel_gvt_clean_vgpu_types(gvt);
+
intel_gvt_debugfs_clean(gvt);
clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
@@ -328,17 +328,10 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt);
- intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
-
- intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
- intel_gvt_clean_vgpu_types(gvt);
-
+ intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
- intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
-
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
@@ -379,6 +372,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
idr_init(&gvt->vgpu_idr);
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
+ mutex_init(&gvt->sched_lock);
gvt->dev_priv = dev_priv;
init_device_info(gvt);
@@ -473,3 +467,7 @@ out_clean_idr:
kfree(gvt);
return ret;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
+MODULE_SOFTDEP("pre: kvmgt");
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 05d15a095310..31f6cdbe5c42 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -170,12 +170,18 @@ struct intel_vgpu_submission {
struct intel_vgpu {
struct intel_gvt *gvt;
+ struct mutex vgpu_lock;
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
+
+ /* Both sched_data and sched_ctl can be seen a part of the global gvt
+ * scheduler structure. So below 2 vgpu data are protected
+ * by sched_lock, not vgpu_lock.
+ */
void *sched_data;
struct vgpu_sched_ctl sched_ctl;
@@ -268,6 +274,8 @@ struct intel_gvt_mmio {
#define F_CMD_ACCESSED (1 << 5)
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
+/* This reg is saved/restored in context */
+#define F_IN_CTX (1 << 7)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
@@ -294,7 +302,13 @@ struct intel_vgpu_type {
};
struct intel_gvt {
+ /* GVT scope lock, protect GVT itself, and all resource currently
+ * not yet protected by special locks(vgpu and scheduler lock).
+ */
struct mutex lock;
+ /* scheduler scope lock, protect gvt and vgpu schedule related data */
+ struct mutex sched_lock;
+
struct drm_i915_private *dev_priv;
struct idr vgpu_idr; /* vGPU IDR pool */
@@ -314,6 +328,10 @@ struct intel_gvt {
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
+
+ /* service_request is always used in bit operation, we should always
+ * use it with atomic bit ops so that no need to use gvt big lock.
+ */
unsigned long service_request;
struct {
@@ -361,9 +379,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
-#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
#define gvt_ggtt_sz(gvt) \
- ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+ ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
@@ -468,6 +486,7 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
unsigned int engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
@@ -545,7 +564,8 @@ struct intel_gvt_ops {
unsigned int);
struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
struct intel_vgpu_type *);
- void (*vgpu_destroy)(struct intel_vgpu *);
+ void (*vgpu_destroy)(struct intel_vgpu *vgpu);
+ void (*vgpu_release)(struct intel_vgpu *vgpu);
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
@@ -639,6 +659,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
}
+/**
+ * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a in-context mask, false if it isn't.
+ *
+ */
+static inline bool intel_gvt_mmio_is_in_ctx(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
+}
+
+/**
+ * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_in_ctx(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
+}
+
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
int intel_gvt_debugfs_init(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index bcbc47a88a70..7a58ca555197 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -55,6 +55,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_SKL;
else if (IS_KABYLAKE(gvt->dev_priv))
return D_KBL;
+ else if (IS_BROXTON(gvt->dev_priv))
+ return D_BXT;
return 0;
}
@@ -208,6 +210,31 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
return 0;
}
+static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
+
+ if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
+ if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
+ gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
+ else if (!ips)
+ gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
+ else {
+ /* All engines must be enabled together for vGPU,
+ * since we don't know which engine the ppgtt will
+ * bind to when shadowing.
+ */
+ gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
+ ips);
+ return -EINVAL;
+ }
+ }
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
@@ -255,7 +282,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)
+ || IS_BROXTON(vgpu->gvt->dev_priv)) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -316,6 +344,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}
}
+ /* vgpu_lock already hold by emulate mmio r/w */
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
/* sw will wait for the device to ack the reset request */
@@ -420,7 +449,10 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
else
vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+ /* vgpu_lock already hold by emulate mmio r/w */
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_check_vblank_emulation(vgpu->gvt);
+ mutex_lock(&vgpu->vgpu_lock);
return 0;
}
@@ -857,7 +889,8 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
data = vgpu_vreg(vgpu, offset);
if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv))
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)
+ || IS_BROXTON(vgpu->gvt->dev_priv))
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
@@ -1209,8 +1242,8 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
ret = handle_g2v_notification(vgpu, data);
break;
/* add xhot and yhot to handled list to avoid error log */
- case 0x78830:
- case 0x78834:
+ case _vgtif_reg(cursor_x_hot):
+ case _vgtif_reg(cursor_y_hot):
case _vgtif_reg(pdp[0].lo):
case _vgtif_reg(pdp[0].hi):
case _vgtif_reg(pdp[1].lo):
@@ -1369,6 +1402,16 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
*data0 = 0x1e1a1100;
else
*data0 = 0x61514b3d;
+ } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ /**
+ * "Read memory latency" command on gen9.
+ * Below memory latency values are read
+ * from Broxton MRB.
+ */
+ if (!*data0)
+ *data0 = 0x16080707;
+ else
+ *data0 = 0x16161616;
}
break;
case SKL_PCODE_CDCLK_CONTROL:
@@ -1426,8 +1469,11 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
{
u32 v = *(u32 *)p_data;
- v &= (1 << 31) | (1 << 29) | (1 << 9) |
- (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
+ if (IS_BROXTON(vgpu->gvt->dev_priv))
+ v &= (1 << 31) | (1 << 29);
+ else
+ v &= (1 << 31) | (1 << 29) | (1 << 9) |
+ (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
v |= (v >> 1);
return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
@@ -1447,6 +1493,109 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
+static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (v & BXT_DE_PLL_PLL_ENABLE)
+ v |= BXT_DE_PLL_LOCK;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (v & PORT_PLL_ENABLE)
+ v |= PORT_PLL_LOCK;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+ u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
+
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = vgpu_vreg(vgpu, offset);
+
+ v &= ~UNIQUE_TRANGE_EN_METHOD;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
+ vgpu_vreg(vgpu, offset - 0x600) = v;
+ vgpu_vreg(vgpu, offset - 0x800) = v;
+ } else {
+ vgpu_vreg(vgpu, offset - 0x400) = v;
+ vgpu_vreg(vgpu, offset - 0x600) = v;
+ }
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (v & BIT(0)) {
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
+ ~PHY_RESERVED;
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
+ PHY_POWER_GOOD;
+ }
+
+ if (v & BIT(1)) {
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
+ ~PHY_RESERVED;
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
+ PHY_POWER_GOOD;
+ }
+
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ vgpu_vreg(vgpu, offset) = 0;
+ return 0;
+}
+
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@@ -1657,7 +1806,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
+ gamw_echo_dev_rw_ia_write);
+
MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
@@ -2670,17 +2821,17 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL);
- MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL);
+ MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
+ MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2805,53 +2956,57 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
- MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x51000), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
- MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
- MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ NULL, NULL);
+ MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ NULL, NULL);
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
/* TRTT */
- MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
- MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
+ MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
+ NULL, gen9_trtte_write);
+ MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
- MMIO_D(_MMIO(0x45008), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46430), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46520), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
- MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
- MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x4068), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x67054), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
@@ -2869,11 +3024,188 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
+ MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_D(_MMIO(0x4ab8), D_KBL);
- MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL);
+ MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
+
+ return 0;
+}
+
+static int init_bxt_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
+
+ MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
+ MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
+ MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
+ MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
+ MMIO_D(ERROR_GEN6, D_BXT);
+ MMIO_D(DONE_REG, D_BXT);
+ MMIO_D(EIR, D_BXT);
+ MMIO_D(PGTBL_ER, D_BXT);
+ MMIO_D(_MMIO(0x4194), D_BXT);
+ MMIO_D(_MMIO(0x4294), D_BXT);
+ MMIO_D(_MMIO(0x4494), D_BXT);
+
+ MMIO_RING_D(RING_PSMI_CTL, D_BXT);
+ MMIO_RING_D(RING_DMA_FADD, D_BXT);
+ MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
+ MMIO_RING_D(RING_IPEHR, D_BXT);
+ MMIO_RING_D(RING_INSTPS, D_BXT);
+ MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
+ MMIO_RING_D(RING_BBSTATE, D_BXT);
+ MMIO_RING_D(RING_IPEIR, D_BXT);
+
+ MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
+
+ MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
+ MMIO_D(BXT_RP_STATE_CAP, D_BXT);
+ MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
+ NULL, bxt_phy_ctl_family_write);
+ MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
+ NULL, bxt_phy_ctl_family_write);
+ MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
+ MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
+ MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
+ MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
+ NULL, bxt_port_pll_enable_write);
+ MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
+ NULL, bxt_port_pll_enable_write);
+ MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
+ bxt_port_pll_enable_write);
+
+ MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
+
+ MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
+
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
+ NULL, bxt_pcs_dw12_grp_write);
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
+ bxt_port_tx_dw3_read, NULL);
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
+
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
+ NULL, bxt_pcs_dw12_grp_write);
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
+ bxt_port_tx_dw3_read, NULL);
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
+
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
+ NULL, bxt_pcs_dw12_grp_write);
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
+ bxt_port_tx_dw3_read, NULL);
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
+
+ MMIO_D(BXT_DE_PLL_CTL, D_BXT);
+ MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
+ MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
+ MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
+
+ MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
+
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
+
+ MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
+ MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
+
+ MMIO_D(RC6_CTX_BASE, D_BXT);
+
+ MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
+ MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
+ MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
+ MMIO_D(GEN6_GFXPAUSE, D_BXT);
+ MMIO_D(GEN8_L3SQCREG1, D_BXT);
+
+ MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2965,6 +3297,16 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
ret = init_skl_mmio_info(gvt);
if (ret)
goto err;
+ } else if (IS_BROXTON(dev_priv)) {
+ ret = init_broadwell_mmio_info(gvt);
+ if (ret)
+ goto err;
+ ret = init_skl_mmio_info(gvt);
+ if (ret)
+ goto err;
+ ret = init_bxt_mmio_info(gvt);
+ if (ret)
+ goto err;
}
gvt->mmio.mmio_block = mmio_blocks;
@@ -3046,6 +3388,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}
/**
+ * intel_vgpu_mask_mmio_write - write mask register
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 mask, old_vreg;
+
+ old_vreg = vgpu_vreg(vgpu, offset);
+ write_vreg(vgpu, offset, p_data, bytes);
+ mask = vgpu_vreg(vgpu, offset) >> 16;
+ vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
+ (vgpu_vreg(vgpu, offset) & mask);
+
+ return 0;
+}
+
+/**
* intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
* force-nopriv register
*
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index f6dd9f717888..5af11cf1b482 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -53,7 +53,7 @@ struct intel_gvt_mpt {
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
- dma_addr_t *dma_addr);
+ unsigned long size, dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 7a041b368f68..5daa23ae566b 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -350,7 +350,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
clear_bits |= (1 << bit);
}
- WARN_ON(!up_irq_info);
+ if (WARN_ON(!up_irq_info))
+ return;
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
@@ -580,7 +581,9 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
+ } else if (IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)
+ || IS_BROXTON(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -690,14 +693,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
gvt_dbg_core("init irq framework\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- irq->ops = &gen8_irq_ops;
- irq->irq_map = gen8_irq_map;
- } else {
- WARN_ON(1);
- return -ENODEV;
- }
+ irq->ops = &gen8_irq_ops;
+ irq->irq_map = gen8_irq_map;
/* common event initialization */
init_events(irq);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index df4e4a07db3d..a45f46d8537f 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -43,6 +43,8 @@
#include <linux/mdev.h>
#include <linux/debugfs.h>
+#include <linux/nospec.h>
+
#include "i915_drv.h"
#include "gvt.h"
@@ -94,6 +96,7 @@ struct gvt_dma {
struct rb_node dma_addr_node;
gfn_t gfn;
dma_addr_t dma_addr;
+ unsigned long size;
struct kref ref;
};
@@ -106,36 +109,90 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
-static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
- dma_addr_t *dma_addr)
+static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long size)
{
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- struct page *page;
- unsigned long pfn;
+ int total_pages;
+ int npage;
int ret;
- /* Pin the page first. */
- ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
- if (ret != 1) {
- gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
- gfn, ret);
- return -EINVAL;
+ total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+
+ for (npage = 0; npage < total_pages; npage++) {
+ unsigned long cur_gfn = gfn + npage;
+
+ ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
+ WARN_ON(ret != 1);
}
+}
- if (!pfn_valid(pfn)) {
- gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
- vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
- return -EINVAL;
+/* Pin a normal or compound guest page for dma. */
+static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long size, struct page **page)
+{
+ unsigned long base_pfn = 0;
+ int total_pages;
+ int npage;
+ int ret;
+
+ total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+ /*
+ * We pin the pages one-by-one to avoid allocating a big arrary
+ * on stack to hold pfns.
+ */
+ for (npage = 0; npage < total_pages; npage++) {
+ unsigned long cur_gfn = gfn + npage;
+ unsigned long pfn;
+
+ ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
+ if (ret != 1) {
+ gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
+ cur_gfn, ret);
+ goto err;
+ }
+
+ if (!pfn_valid(pfn)) {
+ gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
+ npage++;
+ ret = -EFAULT;
+ goto err;
+ }
+
+ if (npage == 0)
+ base_pfn = pfn;
+ else if (base_pfn + npage != pfn) {
+ gvt_vgpu_err("The pages are not continuous\n");
+ ret = -EINVAL;
+ npage++;
+ goto err;
+ }
}
+ *page = pfn_to_page(base_pfn);
+ return 0;
+err:
+ gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
+ return ret;
+}
+
+static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ dma_addr_t *dma_addr, unsigned long size)
+{
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct page *page = NULL;
+ int ret;
+
+ ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
+ if (ret)
+ return ret;
+
/* Setup DMA mapping. */
- page = pfn_to_page(pfn);
- *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr)) {
- gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn);
- vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
+ gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
+ page_to_pfn(page), ret);
+ gvt_unpin_guest_page(vgpu, gfn, size);
return -ENOMEM;
}
@@ -143,14 +200,12 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
}
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, unsigned long size)
{
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- int ret;
- dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
- WARN_ON(ret != 1);
+ dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+ gvt_unpin_guest_page(vgpu, gfn, size);
}
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
@@ -191,7 +246,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
}
static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, unsigned long size)
{
struct gvt_dma *new, *itr;
struct rb_node **link, *parent = NULL;
@@ -203,6 +258,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
new->vgpu = vgpu;
new->gfn = gfn;
new->dma_addr = dma_addr;
+ new->size = size;
kref_init(&new->ref);
/* gfn_cache maps gfn to struct gvt_dma. */
@@ -260,7 +316,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
break;
}
dma = rb_entry(node, struct gvt_dma, gfn_node);
- gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr);
+ gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
__gvt_cache_remove_entry(vgpu, dma);
mutex_unlock(&vgpu->vdev.cache_lock);
}
@@ -515,7 +571,8 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
if (!entry)
continue;
- gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr);
+ gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
+ entry->size);
__gvt_cache_remove_entry(vgpu, entry);
}
mutex_unlock(&vgpu->vdev.cache_lock);
@@ -611,7 +668,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
return;
- intel_gvt_ops->vgpu_deactivate(vgpu);
+ intel_gvt_ops->vgpu_release(vgpu);
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier);
@@ -1084,7 +1141,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct vfio_region_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- int i, ret;
+ unsigned int i;
+ int ret;
struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
size_t size;
int nr_areas = 1;
@@ -1169,6 +1227,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (info.index >= VFIO_PCI_NUM_REGIONS +
vgpu->vdev.num_regions)
return -EINVAL;
+ info.index =
+ array_index_nospec(info.index,
+ VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
@@ -1195,11 +1257,13 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
&sparse->header, sizeof(*sparse) +
(sparse->nr_areas *
sizeof(*sparse->areas)));
- kfree(sparse);
- if (ret)
+ if (ret) {
+ kfree(sparse);
return ret;
+ }
break;
default:
+ kfree(sparse);
return -EINVAL;
}
}
@@ -1215,6 +1279,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
sizeof(info), caps.buf,
caps.size)) {
kfree(caps.buf);
+ kfree(sparse);
return -EFAULT;
}
info.cap_offset = sizeof(info);
@@ -1223,6 +1288,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
kfree(caps.buf);
}
+ kfree(sparse);
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
@@ -1560,7 +1626,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
- mutex_init(&vgpu->dmabuf_lock);
init_completion(&vgpu->vblank_done);
info->track_node.track_write = kvmgt_page_track_write;
@@ -1648,7 +1713,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
}
int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
- dma_addr_t *dma_addr)
+ unsigned long size, dma_addr_t *dma_addr)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
@@ -1665,11 +1730,11 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
entry = __gvt_cache_find_gfn(info->vgpu, gfn);
if (!entry) {
- ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
+ ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
if (ret)
goto err_unlock;
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr);
+ ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else {
@@ -1681,7 +1746,7 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
return 0;
err_unmap:
- gvt_dma_unmap_page(vgpu, gfn, *dma_addr);
+ gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
err_unlock:
mutex_unlock(&info->vgpu->vdev.cache_lock);
return ret;
@@ -1691,7 +1756,8 @@ static void __gvt_dma_release(struct kref *ref)
{
struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
- gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr);
+ gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
+ entry->size);
__gvt_cache_remove_entry(entry->vgpu, entry);
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index b31eb36fc102..994366035364 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -67,7 +67,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
return;
gvt = vgpu->gvt;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (reg_is_mmio(gvt, offset)) {
if (read)
@@ -85,7 +85,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
memcpy(pt, p_data, bytes);
}
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -109,7 +109,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
return 0;
}
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -156,7 +156,7 @@ err:
gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
offset, bytes);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -182,7 +182,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
return 0;
}
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -220,7 +220,7 @@ err:
gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
bytes);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 71b620875943..1ffc69eba30e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -42,15 +42,16 @@ struct intel_vgpu;
#define D_BDW (1 << 0)
#define D_SKL (1 << 1)
#define D_KBL (1 << 2)
+#define D_BXT (1 << 3)
-#define D_GEN9PLUS (D_SKL | D_KBL)
-#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
+#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT)
+#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
-#define D_SKL_PLUS (D_SKL | D_KBL)
-#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
+#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT)
+#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
#define D_PRE_SKL (D_BDW)
-#define D_ALL (D_BDW | D_SKL | D_KBL)
+#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT)
typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
@@ -98,4 +99,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read);
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 0f949554d118..42e1e6bdcc2c 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -364,7 +364,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
+ if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
@@ -401,7 +402,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
+ if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -446,9 +447,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
#define CTX_CONTEXT_CONTROL_VAL 0x03
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
+bool is_inhibit_context(struct intel_context *ce)
{
- u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
+ const u32 *reg_state = ce->lrc_reg_state;
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
@@ -467,7 +468,9 @@ static void switch_mmio(struct intel_vgpu *pre,
u32 old_v, new_v;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv))
switch_mocs(pre, next, ring_id);
for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
@@ -479,7 +482,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* state image on kabylake, it's initialized by lri command and
* save or restore with context together.
*/
- if (IS_KABYLAKE(dev_priv) && mmio->in_context)
+ if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ && mmio->in_context)
continue;
// save
@@ -501,7 +505,7 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(s->shadow_ctx, ring_id))
+ !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
continue;
if (mmio->mask)
@@ -574,14 +578,18 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+ if (IS_SKYLAKE(gvt->dev_priv) ||
+ IS_KABYLAKE(gvt->dev_priv) ||
+ IS_BROXTON(gvt->dev_priv))
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
else
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->in_context)
+ if (mmio->in_context) {
gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+ intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 0439eb8057a8..5c3b9ff9f96a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
+bool is_inhibit_context(struct intel_context *ce);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 32ffcd566cdd..67f19992b226 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -230,17 +230,18 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
/**
* intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
* @vgpu: a vGPU
- * @gpfn: guest pfn
+ * @gfn: guest pfn
+ * @size: page size
* @dma_addr: retrieve allocated dma addr
*
* Returns:
* 0 on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_dma_map_guest_page(
- struct intel_vgpu *vgpu, unsigned long gfn,
+ struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
dma_addr_t *dma_addr)
{
- return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn,
+ return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
dma_addr);
}
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
index 53e2bd79c97d..256d0db8bbb1 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.c
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -157,11 +157,10 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
void *data, unsigned int bytes)
{
- struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_page_track *page_track;
int ret = 0;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
if (!page_track) {
@@ -179,6 +178,6 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
}
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index d053cbe1dc94..09d7bb72b4ff 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
ktime_t cur_time;
- mutex_lock(&gvt->lock);
+ mutex_lock(&gvt->sched_lock);
cur_time = ktime_get();
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
@@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
tbs_sched_func(sched_data);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
}
static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
@@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
{
+ int ret;
+
+ mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops = &tbs_schedule_ops;
+ ret = gvt->scheduler.sched_ops->init(gvt);
+ mutex_unlock(&gvt->sched_lock);
- return gvt->scheduler.sched_ops->init(gvt);
+ return ret;
}
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
{
+ mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops->clean(gvt);
+ mutex_unlock(&gvt->sched_lock);
}
+/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
+ * sched_data, and sched_ctl. We see these 2 data as part of
+ * the global scheduler which are proteced by gvt->sched_lock.
+ * Caller should make their decision if the vgpu_lock should
+ * be hold outside.
+ */
+
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
{
- return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+ int ret;
+
+ mutex_lock(&vgpu->gvt->sched_lock);
+ ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+ mutex_unlock(&vgpu->gvt->sched_lock);
+
+ return ret;
}
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->gvt->sched_lock);
vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+ mutex_lock(&vgpu->gvt->sched_lock);
if (!vgpu_data->active) {
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
{
+ mutex_lock(&gvt->sched_lock);
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+ mutex_unlock(&gvt->sched_lock);
}
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
@@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
+ mutex_lock(&vgpu->gvt->sched_lock);
scheduler->sched_ops->stop_schedule(vgpu);
if (scheduler->next_vgpu == vgpu)
@@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index c2d183b91500..43aa058e29fc 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -45,20 +45,16 @@ static void set_context_pdp_root_pointer(
struct execlist_ring_context *ring_context,
u32 pdp[8])
{
- struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
int i;
for (i = 0; i < 8; i++)
- pdp_pair[i].val = pdp[7 - i];
+ ring_context->pdps[i].val = pdp[7 - i];
}
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
@@ -128,9 +124,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
@@ -205,7 +200,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
static inline bool is_gvt_request(struct i915_request *req)
{
- return i915_gem_context_force_single_submission(req->ctx);
+ return i915_gem_context_force_single_submission(req->gem_context);
}
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -280,10 +275,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK;
}
-static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static void shadow_context_descriptor_update(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc = 0;
desc = ce->lrc_desc;
@@ -292,7 +285,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
* like GEN8_CTX_* cached in desc_template
*/
desc &= U64_MAX << 12;
- desc |= ctx->desc_template & ((1ULL << 12) - 1);
+ desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
ce->lrc_desc = desc;
}
@@ -300,12 +293,12 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct i915_request *req = workload->req;
void *shadow_ring_buffer_va;
u32 *cs;
- struct i915_request *req = workload->req;
- if (IS_KABYLAKE(req->i915) &&
- is_inhibit_context(req->ctx, req->engine->id))
+ if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
+ && is_inhibit_context(req->hw_context))
intel_vgpu_restore_inhibit_context(vgpu, req);
/* allocate shadow ring buffer */
@@ -353,92 +346,67 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- int ring_id = workload->ring_id;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct intel_ring *ring;
+ struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
+ struct intel_context *ce;
+ struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (workload->shadowed)
+ if (workload->req)
return 0;
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ce = intel_context_pin(shadow_ctx, engine);
+ if (IS_ERR(ce)) {
+ gvt_vgpu_err("fail to pin shadow context\n");
+ return PTR_ERR(ce);
+ }
+
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
- shadow_context_descriptor_update(shadow_ctx,
- dev_priv->engine[ring_id]);
+ if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
+ shadow_context_descriptor_update(ce);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
- goto err_scan;
+ goto err_unpin;
if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
- goto err_scan;
+ goto err_shadow;
}
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ring = intel_context_pin(shadow_ctx, engine);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- gvt_vgpu_err("fail to pin shadow context\n");
+ rq = i915_request_alloc(engine, shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_vgpu_err("fail to allocate gem request\n");
+ ret = PTR_ERR(rq);
goto err_shadow;
}
+ workload->req = i915_request_get(rq);
ret = populate_shadow_context(workload);
if (ret)
- goto err_unpin;
- workload->shadowed = true;
- return 0;
+ goto err_req;
-err_unpin:
- intel_context_unpin(shadow_ctx, engine);
+ return 0;
+err_req:
+ rq = fetch_and_zero(&workload->req);
+ i915_request_put(rq);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
- return ret;
-}
-
-static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
-{
- int ring_id = workload->ring_id;
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct i915_request *rq;
- struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- int ret;
-
- rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
- if (IS_ERR(rq)) {
- gvt_vgpu_err("fail to allocate gem request\n");
- ret = PTR_ERR(rq);
- goto err_unpin;
- }
-
- gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
-
- workload->req = i915_request_get(rq);
- ret = copy_workload_to_ring_buffer(workload);
- if (ret)
- goto err_unpin;
- return 0;
-
err_unpin:
- intel_context_unpin(shadow_ctx, engine);
- release_shadow_wa_ctx(&workload->wa_ctx);
+ intel_context_unpin(ce);
return ret;
}
@@ -508,7 +476,11 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
i915_gem_obj_finish_shmem_access(bb->obj);
bb->accessing = false;
- i915_vma_move_to_active(bb->vma, workload->req, 0);
+ ret = i915_vma_move_to_active(bb->vma,
+ workload->req,
+ 0);
+ if (ret)
+ goto err;
}
}
return 0;
@@ -517,21 +489,13 @@ err:
return ret;
}
-static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- struct intel_vgpu_workload *workload = container_of(wa_ctx,
- struct intel_vgpu_workload,
- wa_ctx);
- int ring_id = workload->ring_id;
- struct intel_vgpu_submission *s = &workload->vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
- struct execlist_ring_context *shadow_ring_context;
- struct page *page;
-
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
+ struct intel_vgpu_workload *workload =
+ container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
+ struct i915_request *rq = workload->req;
+ struct execlist_ring_context *shadow_ring_context =
+ (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -539,9 +503,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
-
- kunmap_atomic(shadow_ring_context);
- return 0;
}
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -633,7 +594,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm;
}
- ret = intel_gvt_generate_request(workload);
+ ret = copy_workload_to_ring_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to generate request\n");
goto err_unpin_mm;
@@ -670,16 +631,14 @@ err_unpin_mm:
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- int ret = 0;
+ int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
+ mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
@@ -687,10 +646,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
goto out;
ret = prepare_workload(workload);
- if (ret) {
- intel_context_unpin(shadow_ctx, engine);
- goto out;
- }
out:
if (ret)
@@ -704,6 +659,7 @@ out:
}
mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -713,7 +669,7 @@ static struct intel_vgpu_workload *pick_next_workload(
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
- mutex_lock(&gvt->lock);
+ mutex_lock(&gvt->sched_lock);
/*
* no current vgpu / will be scheduled out / no workload
@@ -759,33 +715,29 @@ static struct intel_vgpu_workload *pick_next_workload(
atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
return workload;
}
static void update_guest_context(struct intel_vgpu_workload *workload)
{
+ struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- int ring_id = workload->ring_id;
- struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;
- gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
- workload->ctx_desc.lrca);
-
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+ gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
+ workload->ctx_desc.lrca);
+ context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
context_page_num = 19;
i = 2;
@@ -832,7 +784,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
kunmap(page);
}
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -858,19 +811,17 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id];
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_request *rq = workload->req;
int event;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
+ mutex_lock(&gvt->sched_lock);
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
*/
- if (workload->req) {
- struct drm_i915_private *dev_priv =
- workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine =
- dev_priv->engine[workload->ring_id];
+ if (rq) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
@@ -886,8 +837,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
- i915_request_put(fetch_and_zero(&workload->req));
-
if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
update_guest_context(workload);
@@ -896,10 +845,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event);
}
- mutex_lock(&dev_priv->drm.struct_mutex);
+
/* unpin shadow ctx as the shadow_ctx update is done */
- intel_context_unpin(s->shadow_ctx, engine);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&rq->i915->drm.struct_mutex);
+ intel_context_unpin(rq->hw_context);
+ mutex_unlock(&rq->i915->drm.struct_mutex);
+
+ i915_request_put(fetch_and_zero(&workload->req));
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -928,7 +880,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
- clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
}
workload->complete(workload);
@@ -939,7 +891,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
if (gvt->scheduler.need_reschedule)
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
struct workload_thread_param {
@@ -957,7 +910,8 @@ static int workload_thread(void *priv)
struct intel_vgpu *vgpu = NULL;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv);
+ || IS_KABYLAKE(gvt->dev_priv)
+ || IS_BROXTON(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
@@ -991,9 +945,7 @@ static int workload_thread(void *priv)
intel_uncore_forcewake_get(gvt->dev_priv,
FORCEWAKE_ALL);
- mutex_lock(&gvt->lock);
ret = dispatch_workload(workload);
- mutex_unlock(&gvt->lock);
if (ret) {
vgpu = workload->vgpu;
@@ -1130,7 +1082,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
if (!s->active)
return;
- clean_workloads(vgpu, engine_mask);
+ intel_vgpu_clean_workloads(vgpu, engine_mask);
s->ops->reset(vgpu, engine_mask);
}
@@ -1270,7 +1222,6 @@ alloc_workload(struct intel_vgpu *vgpu)
atomic_set(&workload->shadow_ctx_active, 0);
workload->status = -EINPROGRESS;
- workload->shadowed = false;
workload->vgpu = vgpu;
return workload;
@@ -1285,7 +1236,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
u64 gpa;
int i;
- gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+ gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
for (i = 0; i < 8; i++)
intel_gvt_hypervisor_read_gpa(vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 6c644782193e..ca5529d0e48e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -83,7 +83,6 @@ struct intel_vgpu_workload {
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
- bool shadowed;
int status;
struct intel_vgpu_mm *shadow_mm;
@@ -159,4 +158,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+ unsigned long engine_mask);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 572a18c2bfb5..a4e8e3cf74fd 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -46,6 +46,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
@@ -58,6 +59,9 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+ vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
+ vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
+
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
@@ -218,27 +222,43 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
* @vgpu: virtual GPU
*
* This function is called when user wants to deactivate a virtual GPU.
- * All virtual GPU runtime information will be destroyed.
+ * The virtual GPU will be stopped.
*
*/
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
- struct intel_gvt *gvt = vgpu->gvt;
-
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
vgpu->active = false;
if (atomic_read(&vgpu->submission.running_workload_num)) {
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
}
intel_vgpu_stop_schedule(vgpu);
- intel_vgpu_dmabuf_cleanup(vgpu);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
+}
+
+/**
+ * intel_gvt_release_vgpu - release a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to release a virtual GPU.
+ * The virtual GPU will be stopped and all runtime information will be
+ * destroyed.
+ *
+ */
+void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
+{
+ intel_gvt_deactivate_vgpu(vgpu);
+
+ mutex_lock(&vgpu->vgpu_lock);
+ intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
+ intel_vgpu_dmabuf_cleanup(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -252,14 +272,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
WARN(vgpu->active, "vGPU is still active!\n");
intel_gvt_debugfs_remove_vgpu(vgpu);
- idr_remove(&gvt->vgpu_idr, vgpu->id);
- if (idr_is_empty(&gvt->vgpu_idr))
- intel_gvt_clean_irq(gvt);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
@@ -269,10 +286,16 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
- vfree(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
+ mutex_lock(&gvt->lock);
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
+ if (idr_is_empty(&gvt->vgpu_idr))
+ intel_gvt_clean_irq(gvt);
intel_gvt_update_vgpu_types(gvt);
mutex_unlock(&gvt->lock);
+
+ vfree(vgpu);
}
#define IDLE_VGPU_IDR 0
@@ -298,6 +321,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
vgpu->id = IDLE_VGPU_IDR;
vgpu->gvt = gvt;
+ mutex_init(&vgpu->vgpu_lock);
for (i = 0; i < I915_NUM_ENGINES; i++)
INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
@@ -324,7 +348,10 @@ out_free_vgpu:
*/
void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->vgpu_lock);
intel_vgpu_clean_sched_policy(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
+
vfree(vgpu);
}
@@ -342,8 +369,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (!vgpu)
return ERR_PTR(-ENOMEM);
- mutex_lock(&gvt->lock);
-
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
GFP_KERNEL);
if (ret < 0)
@@ -353,6 +378,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
+ mutex_init(&vgpu->vgpu_lock);
+ mutex_init(&vgpu->dmabuf_lock);
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr);
@@ -400,8 +427,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- mutex_unlock(&gvt->lock);
-
return vgpu;
out_clean_sched_policy:
@@ -424,7 +449,6 @@ out_clean_idr:
idr_remove(&gvt->vgpu_idr, vgpu->id);
out_free_vgpu:
vfree(vgpu);
- mutex_unlock(&gvt->lock);
return ERR_PTR(ret);
}
@@ -456,12 +480,12 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
+ mutex_lock(&gvt->lock);
vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (IS_ERR(vgpu))
- return vgpu;
-
- /* calculate left instance change for types */
- intel_gvt_update_vgpu_types(gvt);
+ if (!IS_ERR(vgpu))
+ /* calculate left instance change for types */
+ intel_gvt_update_vgpu_types(gvt);
+ mutex_unlock(&gvt->lock);
return vgpu;
}
@@ -473,7 +497,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
* @engine_mask: engines to reset for GT reset
*
* This function is called when user wants to reset a virtual GPU through
- * device model reset or GT reset. The caller should hold the gvt lock.
+ * device model reset or GT reset. The caller should hold the vgpu lock.
*
* vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
* the whole vGPU to default state as when it is created. This vGPU function
@@ -513,9 +537,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
* scheduler when the reset is triggered by current vgpu.
*/
if (scheduler->current_vgpu == NULL) {
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
}
intel_vgpu_reset_submission(vgpu, resetting_eng);
@@ -555,7 +579,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
*/
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
{
- mutex_lock(&vgpu->gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
intel_gvt_reset_vgpu_locked(vgpu, true, 0);
- mutex_unlock(&vgpu->gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 13e7b9e4a6e6..f9ce35da4123 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data)
} else {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
- if (ppgtt->base.file != stats->file_priv)
+ if (ppgtt->vm.file != stats->file_priv)
continue;
}
@@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
dpy_count, dpy_size);
seq_printf(m, "%llu [%pa] gtt total\n",
- ggtt->base.total, &ggtt->mappable_end);
+ ggtt->vm.total, &ggtt->mappable_end);
seq_printf(m, "Supported page sizes: %s\n",
stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
buf, sizeof(buf)));
@@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct i915_request,
client_link);
rcu_read_lock();
- task = pid_task(request && request->ctx->pid ?
- request->ctx->pid : file->pid,
+ task = pid_task(request && request->gem_context->pid ?
+ request->gem_context->pid : file->pid,
PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
@@ -1162,19 +1162,28 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- pm_ier = I915_READ(GEN6_PMIER);
- pm_imr = I915_READ(GEN6_PMIMR);
- pm_isr = I915_READ(GEN6_PMISR);
- pm_iir = I915_READ(GEN6_PMIIR);
- pm_mask = I915_READ(GEN6_PMINTRMSK);
- } else {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
+ /*
+ * The equivalent to the PM ISR & IIR cannot be read
+ * without affecting the current state of the system
+ */
+ pm_isr = 0;
+ pm_iir = 0;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
pm_ier = I915_READ(GEN8_GT_IER(2));
pm_imr = I915_READ(GEN8_GT_IMR(2));
pm_isr = I915_READ(GEN8_GT_ISR(2));
pm_iir = I915_READ(GEN8_GT_IIR(2));
- pm_mask = I915_READ(GEN6_PMINTRMSK);
+ } else {
+ pm_ier = I915_READ(GEN6_PMIER);
+ pm_imr = I915_READ(GEN6_PMIMR);
+ pm_isr = I915_READ(GEN6_PMISR);
+ pm_iir = I915_READ(GEN6_PMIIR);
}
+ pm_mask = I915_READ(GEN6_PMINTRMSK);
+
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n",
@@ -1182,8 +1191,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
- seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
- pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
+
+ seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+ pm_ier, pm_imr, pm_mask);
+ if (INTEL_GEN(dev_priv) <= 10)
+ seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+ pm_isr, pm_iir);
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
@@ -1205,7 +1218,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
seq_printf(m, "RP PREV UP: %d (%dus)\n",
rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
- seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
+ seq_printf(m, "Up threshold: %d%%\n",
+ rps->power.up_threshold);
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
@@ -1213,7 +1227,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
- seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
+ seq_printf(m, "Down threshold: %d%%\n",
+ rps->power.down_threshold);
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
@@ -1346,11 +1361,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno, seqno[id],
intel_engine_last_submit(engine));
- seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
+ seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)),
- yesno(engine->hangcheck.stalled));
+ yesno(engine->hangcheck.stalled),
+ yesno(engine->hangcheck.wedged));
spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
@@ -1645,11 +1661,6 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
else
seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
- if (fbc->work.scheduled)
- seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
- fbc->work.scheduled_vblank,
- drm_crtc_vblank_count(&fbc->crtc->base));
-
if (intel_fbc_is_active(dev_priv)) {
u32 mask;
@@ -1895,7 +1906,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fbdev_fb->base.format->cpp[0] * 8,
fbdev_fb->base.modifier,
drm_framebuffer_read_refcount(&fbdev_fb->base));
- describe_obj(m, fbdev_fb->obj);
+ describe_obj(m, intel_fb_obj(&fbdev_fb->base));
seq_putc(m, '\n');
}
#endif
@@ -1913,7 +1924,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.format->cpp[0] * 8,
fb->base.modifier,
drm_framebuffer_read_refcount(&fb->base));
- describe_obj(m, fb->obj);
+ describe_obj(m, intel_fb_obj(&fb->base));
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
@@ -2209,6 +2220,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
+ seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, rps->cur_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
@@ -2252,13 +2264,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
- rps_power_to_str(rps->power));
+ rps_power_to_str(rps->power.mode));
seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
rpup && rpupei ? 100 * rpup / rpupei : 0,
- rps->up_threshold);
+ rps->power.up_threshold);
seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
- rps->down_threshold);
+ rps->power.down_threshold);
} else {
seq_puts(m, "\nRPS Autotuning inactive\n");
}
@@ -2523,7 +2535,7 @@ static int i915_guc_log_level_get(void *data, u64 *val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- *val = intel_guc_log_level_get(&dev_priv->guc.log);
+ *val = intel_guc_log_get_level(&dev_priv->guc.log);
return 0;
}
@@ -2535,7 +2547,7 @@ static int i915_guc_log_level_set(void *data, u64 val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- return intel_guc_log_level_set(&dev_priv->guc.log, val);
+ return intel_guc_log_set_level(&dev_priv->guc.log, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -2583,31 +2595,9 @@ static const struct file_operations i915_guc_log_relay_fops = {
.release = i915_guc_log_relay_release,
};
-static const char *psr2_live_status(u32 val)
-{
- static const char * const live_status[] = {
- "IDLE",
- "CAPTURE",
- "CAPTURE_FS",
- "SLEEP",
- "BUFON_FW",
- "ML_UP",
- "SU_STANDBY",
- "FAST_SLEEP",
- "DEEP_SLEEP",
- "BUF_ON",
- "TG_ON"
- };
-
- val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
- if (val < ARRAY_SIZE(live_status))
- return live_status[val];
-
- return "unknown";
-}
-
-static const char *psr_sink_status(u8 val)
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
{
+ u8 val;
static const char * const sink_status[] = {
"inactive",
"transition to active, capture and display",
@@ -2616,22 +2606,94 @@ static const char *psr_sink_status(u8 val)
"transition to inactive, capture and display, timing re-sync",
"reserved",
"reserved",
- "sink internal error"
+ "sink internal error",
};
+ struct drm_connector *connector = m->private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp =
+ enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ int ret;
- val &= DP_PSR_SINK_STATE_MASK;
- if (val < ARRAY_SIZE(sink_status))
- return sink_status[val];
+ if (!CAN_PSR(dev_priv)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
- return "unknown";
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+
+ if (ret == 1) {
+ const char *str = "unknown";
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+ } else {
+ return ret;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static void
+psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+{
+ u32 val, psr_status;
+
+ if (dev_priv->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ psr_status = I915_READ(EDP_PSR2_STATUS);
+ val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
+ EDP_PSR2_STATUS_STATE_SHIFT;
+ if (val < ARRAY_SIZE(live_status)) {
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n",
+ psr_status, live_status[val]);
+ return;
+ }
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ psr_status = I915_READ(EDP_PSR_STATUS);
+ val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (val < ARRAY_SIZE(live_status)) {
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n",
+ psr_status, live_status[val]);
+ return;
+ }
+ }
+
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
}
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 psrperf = 0;
- u32 stat[3];
- enum pipe pipe;
bool enabled = false;
bool sink_support;
@@ -2649,50 +2711,18 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
dev_priv->psr.busy_frontbuffer_bits);
- seq_printf(m, "Re-enable work scheduled: %s\n",
- yesno(work_busy(&dev_priv->psr.work.work)));
-
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled)
- enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
- else
- enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
- } else {
- for_each_pipe(dev_priv, pipe) {
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
- enum intel_display_power_domain power_domain;
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain))
- continue;
-
- stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- enabled = true;
-
- intel_display_power_put(dev_priv, power_domain);
- }
- }
+ if (dev_priv->psr.psr2_enabled)
+ enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
+ else
+ enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
seq_printf(m, "Main link in standby mode: %s\n",
yesno(dev_priv->psr.link_standby));
- seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
-
- if (!HAS_DDI(dev_priv))
- for_each_pipe(dev_priv, pipe) {
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- seq_printf(m, " pipe %c", pipe_name(pipe));
- }
- seq_puts(m, "\n");
+ seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
/*
- * VLV/CHV PSR has no kind of performance counter
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2701,21 +2731,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Performance_Counter: %u\n", psrperf);
}
- if (dev_priv->psr.psr2_enabled) {
- u32 psr2 = I915_READ(EDP_PSR2_STATUS);
-
- seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
- psr2, psr2_live_status(psr2));
- }
- if (dev_priv->psr.enabled) {
- struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
- u8 val;
-
- if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
- psr_sink_status(val));
- }
+ psr_source_status(dev_priv, m);
mutex_unlock(&dev_priv->psr.lock);
if (READ_ONCE(dev_priv->psr.debug)) {
@@ -2762,86 +2779,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
i915_edp_psr_debug_get, i915_edp_psr_debug_set,
"%llu\n");
-static int i915_sink_crc(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp = NULL;
- struct drm_modeset_acquire_ctx ctx;
- int ret;
- u8 crc[6];
-
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
- drm_connector_list_iter_begin(dev, &conn_iter);
-
- for_each_intel_connector_iter(connector, &conn_iter) {
- struct drm_crtc *crtc;
- struct drm_connector_state *state;
- struct intel_crtc_state *crtc_state;
-
- if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
- continue;
-
-retry:
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
- if (ret)
- goto err;
-
- state = connector->base.state;
- if (!state->best_encoder)
- continue;
-
- crtc = state->crtc;
- ret = drm_modeset_lock(&crtc->mutex, &ctx);
- if (ret)
- goto err;
-
- crtc_state = to_intel_crtc_state(crtc->state);
- if (!crtc_state->base.active)
- continue;
-
- /*
- * We need to wait for all crtc updates to complete, to make
- * sure any pending modesets and plane updates are completed.
- */
- if (crtc_state->base.commit) {
- ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
-
- if (ret)
- goto err;
- }
-
- intel_dp = enc_to_intel_dp(state->best_encoder);
-
- ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
- if (ret)
- goto err;
-
- seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
- crc[0], crc[1], crc[2],
- crc[3], crc[4], crc[5]);
- goto out;
-
-err:
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
- goto out;
- }
- ret = -ENODEV;
-out:
- drm_connector_list_iter_end(&conn_iter);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
-}
-
static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3398,28 +3335,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_workarounds *workarounds = &dev_priv->workarounds;
+ struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
int i;
- intel_runtime_pm_get(dev_priv);
-
- seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for (i = 0; i < workarounds->count; ++i) {
- i915_reg_t addr;
- u32 mask, value, read;
- bool ok;
-
- addr = workarounds->reg[i].addr;
- mask = workarounds->reg[i].mask;
- value = workarounds->reg[i].value;
- read = I915_READ(addr);
- ok = (value & mask) == (read & mask);
- seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
- i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
- }
-
- intel_runtime_pm_put(dev_priv);
+ seq_printf(m, "Workarounds applied: %d\n", wa->count);
+ for (i = 0; i < wa->count; ++i)
+ seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
+ wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
return 0;
}
@@ -4121,7 +4043,8 @@ fault_irq_set(struct drm_i915_private *i915,
err = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED |
- I915_WAIT_INTERRUPTIBLE);
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unlock;
@@ -4226,7 +4149,8 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (val & DROP_RETIRE)
i915_retire_requests(dev_priv);
@@ -4245,8 +4169,13 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(dev_priv);
fs_reclaim_release(GFP_KERNEL);
- if (val & DROP_IDLE)
- drain_delayed_work(&dev_priv->gt.idle_work);
+ if (val & DROP_IDLE) {
+ do {
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ flush_delayed_work(&dev_priv->gt.retire_work);
+ drain_delayed_work(&dev_priv->gt.idle_work);
+ } while (READ_ONCE(dev_priv->gt.awake));
+ }
if (val & DROP_FREED)
i915_gem_drain_freed_objects(dev_priv);
@@ -4795,7 +4724,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_llc", i915_llc, 0},
{"i915_edp_psr_status", i915_edp_psr_status, 0},
- {"i915_sink_crc_eDP1", i915_sink_crc, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
@@ -4829,7 +4757,6 @@ static const struct i915_debugfs_files {
#endif
{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
{"i915_next_seqno", &i915_next_seqno_fops},
- {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
@@ -4849,7 +4776,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
{
struct drm_minor *minor = dev_priv->drm.primary;
struct dentry *ent;
- int ret, i;
+ int i;
ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
minor->debugfs_root, to_i915(minor->dev),
@@ -4857,10 +4784,6 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
if (!ent)
return -ENOMEM;
- ret = intel_pipe_crc_create(minor);
- if (ret)
- return ret;
-
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
ent = debugfs_create_file(i915_debugfs_files[i].name,
S_IRUGO | S_IWUSR,
@@ -4982,9 +4905,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
debugfs_create_file("i915_dpcd", S_IRUGO, root,
connector, &i915_dpcd_fops);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
debugfs_create_file("i915_panel_timings", S_IRUGO, root,
connector, &i915_panel_fops);
+ debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
+ connector, &i915_psr_sink_status_fops);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9c449b8d8eab..f8cfd16be534 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,11 +67,18 @@ bool __i915_inject_load_failure(const char *func, int line)
if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
i915_modparams.inject_load_failure, func, line);
+ i915_modparams.inject_load_failure = 0;
return true;
}
return false;
}
+
+bool i915_error_injected(void)
+{
+ return i915_load_fail_count && !i915_modparams.inject_load_failure;
+}
+
#endif
#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
@@ -97,8 +104,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
vaf.fmt = fmt;
vaf.va = &args;
- dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
- __builtin_return_address(0), &vaf);
+ if (is_error)
+ dev_printk(level, kdev, "%pV", &vaf);
+ else
+ dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+
+ va_end(args);
if (is_error && !shown_bug_once) {
/*
@@ -110,25 +122,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
dev_notice(kdev, "%s", FDO_BUG_MSG);
shown_bug_once = true;
}
-
- va_end(args);
-}
-
-static bool i915_error_injected(struct drm_i915_private *dev_priv)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
- return i915_modparams.inject_load_failure &&
- i915_load_fail_count == i915_modparams.inject_load_failure;
-#else
- return false;
-#endif
}
-#define i915_load_error(dev_priv, fmt, ...) \
- __i915_printk(dev_priv, \
- i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
- fmt, ##__VA_ARGS__)
-
/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
static enum intel_pch
intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
@@ -233,6 +228,8 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (IS_ICELAKE(dev_priv))
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
if (id)
DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
@@ -246,14 +243,6 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
{
struct pci_dev *pch = NULL;
- /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
- * (which really amounts to a PCH but no South Display).
- */
- if (INTEL_INFO(dev_priv)->num_pipes == 0) {
- dev_priv->pch_type = PCH_NOP;
- return;
- }
-
/*
* The reason to probe ISA bridge instead of Dev31:Fun0 is to
* make graphics device passthrough work easy for VMM, that only
@@ -282,18 +271,28 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
pch->subsystem_device)) {
id = intel_virt_detect_pch(dev_priv);
- if (id) {
- pch_type = intel_pch_type(dev_priv, id);
- if (WARN_ON(pch_type == PCH_NONE))
- pch_type = PCH_NOP;
- } else {
- pch_type = PCH_NOP;
- }
+ pch_type = intel_pch_type(dev_priv, id);
+
+ /* Sanity check virtual PCH id */
+ if (WARN_ON(id && pch_type == PCH_NONE))
+ id = 0;
+
dev_priv->pch_type = pch_type;
dev_priv->pch_id = id;
break;
}
}
+
+ /*
+ * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+ * display.
+ */
+ if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+ DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
+ dev_priv->pch_type = PCH_NOP;
+ dev_priv->pch_id = 0;
+ }
+
if (!pch)
DRM_DEBUG_KMS("No PCH found.\n");
@@ -634,26 +633,6 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
-static void i915_gem_fini(struct drm_i915_private *dev_priv)
-{
- /* Flush any outstanding unpin_work. */
- i915_gem_drain_workqueue(dev_priv);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uc_fini_hw(dev_priv);
- intel_uc_fini(dev_priv);
- i915_gem_cleanup_engines(dev_priv);
- i915_gem_contexts_fini(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- intel_uc_fini_misc(dev_priv);
- i915_gem_cleanup_userptr(dev_priv);
-
- i915_gem_drain_freed_objects(dev_priv);
-
- WARN_ON(!list_empty(&dev_priv->contexts.list));
-}
-
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -703,7 +682,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_gem_init(dev_priv);
if (ret)
- goto cleanup_irq;
+ goto cleanup_modeset;
intel_setup_overlay(dev_priv);
@@ -723,6 +702,8 @@ cleanup_gem:
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev_priv);
+cleanup_modeset:
+ intel_modeset_cleanup(dev);
cleanup_irq:
drm_irq_uninstall(dev);
intel_teardown_gmbus(dev_priv);
@@ -919,7 +900,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
spin_lock_init(&dev_priv->uncore.lock);
mutex_init(&dev_priv->sb_lock);
- mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
@@ -1173,8 +1153,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
- intel_opregion_setup(dev_priv);
-
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1189,6 +1167,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* get lost on g4x as well, and interrupt delivery seems to stay
* properly dead afterwards. So we'll just disable them for all
* pre-gen5 chipsets.
+ *
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy
+ * interrupts even when in MSI mode. This results in spurious
+ * interrupt warnings if the legacy irq no. is shared with another
+ * device. The kernel then disables that interrupt source and so
+ * prevents the other device from working properly.
*/
if (INTEL_GEN(dev_priv) >= 5) {
if (pci_enable_msi(pdev) < 0)
@@ -1197,10 +1181,16 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
ret = intel_gvt_init(dev_priv);
if (ret)
- goto err_ggtt;
+ goto err_msi;
+
+ intel_opregion_setup(dev_priv);
return 0;
+err_msi:
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ pm_qos_remove_request(&dev_priv->pm_qos);
err_ggtt:
i915_ggtt_cleanup_hw(dev_priv);
err_perf:
@@ -1433,6 +1423,7 @@ out_fini:
drm_dev_fini(&dev_priv->drm);
out_free:
kfree(dev_priv);
+ pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -1553,17 +1544,30 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
return false;
}
+static int i915_drm_prepare(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ int err;
+
+ /*
+ * NB intel_display_suspend() may issue new requests after we've
+ * ostensibly marked the GPU as ready-to-sleep here. We need to
+ * split out that work and pull it forward so that after point,
+ * the GPU is not woken again.
+ */
+ err = i915_gem_suspend(i915);
+ if (err)
+ dev_err(&i915->drm.pdev->dev,
+ "GEM idle failed, suspend/resume might fail\n");
+
+ return err;
+}
+
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
pci_power_t opregion_target_state;
- int error;
-
- /* ignore lid events during suspend */
- mutex_lock(&dev_priv->modeset_restore_lock);
- dev_priv->modeset_restore = MODESET_SUSPENDED;
- mutex_unlock(&dev_priv->modeset_restore_lock);
disable_rpm_wakeref_asserts(dev_priv);
@@ -1575,16 +1579,9 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(pdev);
- error = i915_gem_suspend(dev_priv);
- if (error) {
- dev_err(&pdev->dev,
- "GEM idle failed, resume might fail\n");
- goto out;
- }
-
intel_display_suspend(dev);
- intel_dp_mst_suspend(dev);
+ intel_dp_mst_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
@@ -1600,7 +1597,6 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
- intel_uncore_suspend(dev_priv);
intel_opregion_unregister(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -1609,10 +1605,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_csr_ucode_suspend(dev_priv);
-out:
enable_rpm_wakeref_asserts(dev_priv);
- return error;
+ return 0;
}
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
@@ -1623,7 +1618,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv);
+ i915_gem_suspend_late(dev_priv);
+
intel_display_set_init_power(dev_priv, false);
+ intel_uncore_suspend(dev_priv);
/*
* In case of firmware assisted context save/restore don't manually
@@ -1710,6 +1708,8 @@ static int i915_drm_resume(struct drm_device *dev)
disable_rpm_wakeref_asserts(dev_priv);
intel_sanitize_gt_powersave(dev_priv);
+ i915_gem_sanitize(dev_priv);
+
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
DRM_ERROR("failed to re-enable GGTT\n");
@@ -1746,7 +1746,7 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_dp_mst_resume(dev);
+ intel_dp_mst_resume(dev_priv);
intel_display_resume(dev);
@@ -1764,10 +1764,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
- mutex_lock(&dev_priv->modeset_restore_lock);
- dev_priv->modeset_restore = MODESET_DONE;
- mutex_unlock(&dev_priv->modeset_restore_lock);
-
intel_opregion_notify_adapter(dev_priv, PCI_D0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1851,7 +1847,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
else
intel_display_set_init_power(dev_priv, true);
- i915_gem_sanitize(dev_priv);
+ intel_engines_sanitize(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2081,6 +2077,22 @@ out:
return ret;
}
+static int i915_pm_prepare(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ if (!dev) {
+ dev_err(kdev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_prepare(dev);
+}
+
static int i915_pm_suspend(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2731,6 +2743,7 @@ const struct dev_pm_ops i915_pm_ops = {
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
* PMSG_RESUME]
*/
+ .prepare = i915_pm_prepare,
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7014a96546f4..4aca5344863d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -40,6 +40,7 @@
#include <linux/hash.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
+#include <linux/mm_types.h>
#include <linux/perf_event.h>
#include <linux/pm_qos.h>
#include <linux/reservation.h>
@@ -85,8 +86,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180514"
-#define DRIVER_TIMESTAMP 1526300884
+#define DRIVER_DATE "20180719"
+#define DRIVER_TIMESTAMP 1532015279
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -107,13 +108,24 @@
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+
bool __i915_inject_load_failure(const char *func, int line);
#define i915_inject_load_failure() \
__i915_inject_load_failure(__func__, __LINE__)
+
+bool i915_error_injected(void);
+
#else
+
#define i915_inject_load_failure() false
+#define i915_error_injected() false
+
#endif
+#define i915_load_error(i915, fmt, ...) \
+ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
+ fmt, ##__VA_ARGS__)
+
typedef struct {
uint32_t val;
} uint_fixed_16_16_t;
@@ -287,7 +299,6 @@ struct i915_hotplug {
u32 event_bits;
struct delayed_work reenable_work;
- struct intel_digital_port *irq_port[I915_MAX_PORTS];
u32 long_port_mask;
u32 short_port_mask;
struct work_struct dig_port_work;
@@ -500,6 +511,7 @@ struct intel_fbc {
bool enabled;
bool active;
+ bool flip_pending;
bool underrun_detected;
struct work_struct underrun_work;
@@ -567,12 +579,6 @@ struct intel_fbc {
unsigned int gen9_wa_cfb_stride;
} params;
- struct intel_fbc_work {
- bool scheduled;
- u64 scheduled_vblank;
- struct work_struct work;
- } work;
-
const char *no_fbc_reason;
};
@@ -608,26 +614,17 @@ struct i915_psr {
bool sink_support;
struct intel_dp *enabled;
bool active;
- struct delayed_work work;
+ struct work_struct work;
unsigned busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
bool colorimetry_support;
bool alpm;
- bool has_hw_tracking;
bool psr2_enabled;
u8 sink_sync_latency;
bool debug;
ktime_t last_entry_attempt;
ktime_t last_exit;
-
- void (*enable_source)(struct intel_dp *,
- const struct intel_crtc_state *);
- void (*disable_source)(struct intel_dp *,
- const struct intel_crtc_state *);
- void (*enable_sink)(struct intel_dp *);
- void (*activate)(struct intel_dp *);
- void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
};
enum intel_pch {
@@ -639,7 +636,7 @@ enum intel_pch {
PCH_KBP, /* Kaby Lake PCH */
PCH_CNP, /* Cannon Lake PCH */
PCH_ICP, /* Ice Lake PCH */
- PCH_NOP,
+ PCH_NOP, /* PCH without south display */
};
enum intel_sbi_destination {
@@ -652,6 +649,7 @@ enum intel_sbi_destination {
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
#define QUIRK_INCREASE_T12_DELAY (1<<6)
+#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
struct intel_fbdev;
struct intel_fbc_work;
@@ -781,11 +779,17 @@ struct intel_rps {
u8 rp0_freq; /* Non-overclocked max frequency. */
u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
- u8 up_threshold; /* Current %busy required to uplock */
- u8 down_threshold; /* Current %busy required to downclock */
-
int last_adj;
- enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+ struct {
+ struct mutex mutex;
+
+ enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
+ unsigned int interactive;
+
+ u8 up_threshold; /* Current %busy required to uplock */
+ u8 down_threshold; /* Current %busy required to downclock */
+ } power;
bool enabled;
atomic_t num_waiters;
@@ -954,7 +958,7 @@ struct i915_gem_mm {
/**
* Small stash of WC pages
*/
- struct pagevec wc_stash;
+ struct pagestash wc_stash;
/**
* tmpfs instance used for shmem backed objects
@@ -1002,16 +1006,13 @@ struct i915_gem_mm {
#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
-enum modeset_restore {
- MODESET_ON_LID_OPEN,
- MODESET_DONE,
- MODESET_SUSPENDED,
-};
+#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
#define DP_AUX_A 0x40
#define DP_AUX_B 0x10
#define DP_AUX_C 0x20
#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
#define DP_AUX_F 0x60
#define DDC_PIN_B 0x05
@@ -1056,9 +1057,9 @@ struct intel_vbt_data {
/* Feature bits */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
- unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
+ unsigned int int_lvds_support:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
unsigned int panel_type:4;
@@ -1074,7 +1075,6 @@ struct intel_vbt_data {
int vswing;
bool low_vswing;
bool initialized;
- bool support;
int bpp;
struct edp_power_seq pps;
} edp;
@@ -1085,8 +1085,8 @@ struct intel_vbt_data {
bool require_aux_wakeup;
int idle_frames;
enum psr_lines_to_wait lines_to_wait;
- int tp1_wakeup_time;
- int tp2_tp3_wakeup_time;
+ int tp1_wakeup_time_us;
+ int tp2_tp3_wakeup_time_us;
} psr;
struct {
@@ -1271,20 +1271,11 @@ enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_MAX,
};
-struct intel_pipe_crc_entry {
- uint32_t frame;
- uint32_t crc[5];
-};
-
#define INTEL_PIPE_CRC_ENTRIES_NR 128
struct intel_pipe_crc {
spinlock_t lock;
- bool opened; /* exclusive access to the result file */
- struct intel_pipe_crc_entry *entries;
- enum intel_pipe_crc_source source;
- int head, tail;
- wait_queue_head_t wq;
int skipped;
+ enum intel_pipe_crc_source source;
};
struct i915_frontbuffer_tracking {
@@ -1299,7 +1290,7 @@ struct i915_frontbuffer_tracking {
};
struct i915_wa_reg {
- i915_reg_t addr;
+ u32 addr;
u32 value;
/* bitmask representing WA bits */
u32 mask;
@@ -1739,12 +1730,9 @@ struct drm_i915_private {
unsigned long quirks;
- enum modeset_restore modeset_restore;
- struct mutex modeset_restore_lock;
struct drm_atomic_state *modeset_restore_state;
struct drm_modeset_acquire_ctx reset_ctx;
- struct list_head vm_list; /* Global list of all address spaces */
struct i915_ggtt ggtt; /* VM representing the global address space */
struct i915_gem_mm mm;
@@ -1850,6 +1838,7 @@ struct drm_i915_private {
*/
struct ida hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
} contexts;
@@ -1957,7 +1946,9 @@ struct drm_i915_private {
*/
struct i915_perf_stream *exclusive_stream;
+ struct intel_context *pinned_ctx;
u32 specific_ctx_id;
+ u32 specific_ctx_id_mask;
struct hrtimer poll_check_timer;
wait_queue_head_t poll_wq;
@@ -2245,9 +2236,6 @@ static inline struct scatterlist *____sg_next(struct scatterlist *sg)
**/
static inline struct scatterlist *__sg_next(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
return sg_is_last(sg) ? NULL : ____sg_next(sg);
}
@@ -2313,6 +2301,7 @@ intel_info(const struct drm_i915_private *dev_priv)
}
#define INTEL_INFO(dev_priv) intel_info((dev_priv))
+#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
@@ -2565,17 +2554,10 @@ intel_info(const struct drm_i915_private *dev_priv)
(IS_CANNONLAKE(dev_priv) || \
IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
-/*
- * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
- * even when in MSI mode. This results in spurious interrupt warnings if the
- * legacy irq no. is shared with another device. The kernel then disables that
- * interrupt source and so prevents the other device from working properly.
- *
- * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX
- * interrupts.
- */
-#define HAS_AUX_IRQ(dev_priv) true
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
+#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
+ IS_GEMINILAKE(dev_priv) || \
+ IS_KABYLAKE(dev_priv))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -2750,14 +2732,14 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
int intel_engines_init(struct drm_i915_private *dev_priv);
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
+
/* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
- enum hpd_pin pin);
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
enum port port);
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
@@ -3104,9 +3086,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
}
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -3171,12 +3150,14 @@ void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
+void i915_gem_fini(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags);
+ unsigned int flags, long timeout);
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
-int i915_gem_fault(struct vm_fault *vmf);
+vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
@@ -3215,7 +3196,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
- return container_of(vm, struct i915_hw_ppgtt, base);
+ return container_of(vm, struct i915_hw_ppgtt, vm);
}
/* i915_gem_fence_reg.c */
@@ -3322,7 +3303,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
-
+void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -3447,6 +3428,8 @@ extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
+extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
+ bool interactive);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
@@ -3680,14 +3663,6 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
}
-static inline unsigned long
-timespec_to_jiffies_timeout(const struct timespec *value)
-{
- unsigned long j = timespec_to_jiffies(value);
-
- return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
/*
* If you need to wait X milliseconds between events A and B, but event B
* doesn't happen exactly after event A, you record the timestamp (jiffies) of
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d44ad7bc1e94..fcc73a6ab503 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -65,7 +65,7 @@ insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
+ return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
size, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -139,6 +139,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static u32 __i915_gem_park(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(i915->gt.active_requests);
GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
@@ -181,6 +183,8 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
void i915_gem_park(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(i915->gt.active_requests);
@@ -193,6 +197,8 @@ void i915_gem_park(struct drm_i915_private *i915)
void i915_gem_unpark(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->gt.active_requests);
@@ -243,17 +249,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct i915_vma *vma;
u64 pinned;
- pinned = ggtt->base.reserved;
+ pinned = ggtt->vm.reserved;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
- list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = ggtt->base.total;
+ args->aper_size = ggtt->vm.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -796,7 +802,7 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
* that was!).
*/
- wmb();
+ i915_gem_chipset_flush(dev_priv);
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
@@ -831,6 +837,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
}
break;
+ case I915_GEM_DOMAIN_WC:
+ wmb();
+ break;
+
case I915_GEM_DOMAIN_CPU:
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
break;
@@ -1217,9 +1227,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
wmb();
} else {
page_base += offset & PAGE_MASK;
@@ -1240,8 +1250,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
out_unpin:
if (node.allocated) {
wmb();
- ggtt->base.clear_range(&ggtt->base,
- node.start, node.size);
+ ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
@@ -1420,9 +1429,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb(); /* flush the write before we modify the GGTT */
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
wmb(); /* flush modifications to the GGTT (insert_page) */
} else {
page_base += offset & PAGE_MASK;
@@ -1449,8 +1458,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
out_unpin:
if (node.allocated) {
wmb();
- ggtt->base.clear_range(&ggtt->base,
- node.start, node.size);
+ ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
@@ -1619,6 +1627,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto err;
}
+ /* Writes not allowed into this read-only object */
+ if (i915_gem_object_is_readonly(obj)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
ret = -ENODEV;
@@ -1991,9 +2005,9 @@ compute_partial_view(struct drm_i915_gem_object *obj,
* The current feature set supported by i915_gem_fault() and thus GTT mmaps
* is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
*/
-int i915_gem_fault(struct vm_fault *vmf)
+vm_fault_t i915_gem_fault(struct vm_fault *vmf)
{
-#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
+#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
struct vm_area_struct *area = vmf->vma;
struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
struct drm_device *dev = obj->base.dev;
@@ -2002,9 +2016,12 @@ int i915_gem_fault(struct vm_fault *vmf)
bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
struct i915_vma *vma;
pgoff_t page_offset;
- unsigned int flags;
int ret;
+ /* Sanity check that we allow writing into this object */
+ if (i915_gem_object_is_readonly(obj) && write)
+ return VM_FAULT_SIGBUS;
+
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
@@ -2038,27 +2055,34 @@ int i915_gem_fault(struct vm_fault *vmf)
goto err_unlock;
}
- /* If the object is smaller than a couple of partial vma, it is
- * not worth only creating a single partial vma - we may as well
- * clear enough space for the full object.
- */
- flags = PIN_MAPPABLE;
- if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
- flags |= PIN_NONBLOCK | PIN_NONFAULT;
/* Now pin it into the GTT as needed */
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK |
+ PIN_NONFAULT);
if (IS_ERR(vma)) {
/* Use a partial view if it is bigger than available space */
struct i915_ggtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
+ unsigned int flags;
+
+ flags = PIN_MAPPABLE;
+ if (view.type == I915_GGTT_VIEW_NORMAL)
+ flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
- /* Userspace is now writing through an untracked VMA, abandon
+ /*
+ * Userspace is now writing through an untracked VMA, abandon
* all hope that the hardware is able to track future writes.
*/
obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
- vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+ if (IS_ERR(vma) && !view.type) {
+ flags = PIN_MAPPABLE;
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+ }
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
@@ -2108,10 +2132,9 @@ err:
* fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
- ret = VM_FAULT_SIGBUS;
- break;
- }
+ if (!i915_terminally_wedged(&dev_priv->gpu_error))
+ return VM_FAULT_SIGBUS;
+ /* else: fall through */
case -EAGAIN:
/*
* EAGAIN means the gpu is hung and we'll wait for the error
@@ -2126,21 +2149,16 @@ err:
* EBUSY is ok: this just means that another thread
* already did the job.
*/
- ret = VM_FAULT_NOPAGE;
- break;
+ return VM_FAULT_NOPAGE;
case -ENOMEM:
- ret = VM_FAULT_OOM;
- break;
+ return VM_FAULT_OOM;
case -ENOSPC:
case -EFAULT:
- ret = VM_FAULT_SIGBUS;
- break;
+ return VM_FAULT_SIGBUS;
default:
WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
- ret = VM_FAULT_SIGBUS;
- break;
+ return VM_FAULT_SIGBUS;
}
- return ret;
}
static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
@@ -2259,7 +2277,9 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
/* Attempt to reap some mmap space from dead objects */
do {
- err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+ err = i915_gem_wait_for_idle(dev_priv,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
if (err)
break;
@@ -2404,29 +2424,15 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
rcu_read_unlock();
}
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
+static struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
- if (i915_gem_object_has_pinned_pages(obj))
- return;
-
- GEM_BUG_ON(obj->bind_count);
- if (!i915_gem_object_has_pages(obj))
- return;
-
- /* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock_nested(&obj->mm.lock, subclass);
- if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
- goto unlock;
-
- /* ->put_pages might need to allocate memory for the bit17 swizzle
- * array, hence protect them from being reaped by removing them from gtt
- * lists early. */
pages = fetch_and_zero(&obj->mm.pages);
- GEM_BUG_ON(!pages);
+ if (!pages)
+ return NULL;
spin_lock(&i915->mm.obj_lock);
list_del(&obj->mm.link);
@@ -2445,12 +2451,37 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
}
__i915_gem_object_reset_page_iter(obj);
+ obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
+ return pages;
+}
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
+{
+ struct sg_table *pages;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ return;
+
+ GEM_BUG_ON(obj->bind_count);
+ if (!i915_gem_object_has_pages(obj))
+ return;
+ /* May be called by shrinker from within get_pages() (on another bo) */
+ mutex_lock_nested(&obj->mm.lock, subclass);
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+ goto unlock;
+
+ /*
+ * ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early.
+ */
+ pages = __i915_gem_object_unset_pages(obj);
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
- obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
-
unlock:
mutex_unlock(&obj->mm.lock);
}
@@ -2968,16 +2999,16 @@ static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
- DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
- ctx->name, atomic_read(&ctx->guilty_count),
- score, yesno(banned && bannable));
-
/* Cool contexts don't accumulate client ban score */
if (!bannable)
return;
- if (banned)
+ if (banned) {
+ DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count),
+ score);
i915_gem_context_set_banned(ctx);
+ }
if (!IS_ERR_OR_NULL(ctx->file_priv))
i915_gem_client_mark_guilty(ctx->file_priv, ctx);
@@ -3025,7 +3056,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
- struct i915_request *request = NULL;
+ struct i915_request *request;
/*
* During the reset sequence, we must prevent the engine from
@@ -3036,52 +3067,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
*/
intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
- /*
- * Prevent the signaler thread from updating the request
- * state (by calling dma_fence_signal) as we are processing
- * the reset. The write from the GPU of the seqno is
- * asynchronous and the signaler thread may see a different
- * value to us and declare the request complete, even though
- * the reset routine have picked that request as the active
- * (incomplete) request. This conflict is not handled
- * gracefully!
- */
- kthread_park(engine->breadcrumbs.signaler);
-
- /*
- * Prevent request submission to the hardware until we have
- * completed the reset in i915_gem_reset_finish(). If a request
- * is completed by one engine, it may then queue a request
- * to a second via its execlists->tasklet *just* as we are
- * calling engine->init_hw() and also writing the ELSP.
- * Turning off the execlists->tasklet until the reset is over
- * prevents the race.
- *
- * Note that this needs to be a single atomic operation on the
- * tasklet (flush existing tasks, prevent new tasks) to prevent
- * a race between reset and set-wedged. It is not, so we do the best
- * we can atm and make sure we don't lock the machine up in the more
- * common case of recursively being called from set-wedged from inside
- * i915_reset.
- */
- if (!atomic_read(&engine->execlists.tasklet.count))
- tasklet_kill(&engine->execlists.tasklet);
- tasklet_disable(&engine->execlists.tasklet);
-
- /*
- * We're using worker to queue preemption requests from the tasklet in
- * GuC submission mode.
- * Even though tasklet was disabled, we may still have a worker queued.
- * Let's make sure that all workers scheduled before disabling the
- * tasklet are completed before continuing with the reset.
- */
- if (engine->i915->guc.preempt_wq)
- flush_workqueue(engine->i915->guc.preempt_wq);
-
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- request = i915_gem_find_active_request(engine);
+ request = engine->reset.prepare(engine);
if (request && request->fence.error == -EIO)
request = ERR_PTR(-EIO); /* Previous reset failed! */
@@ -3111,43 +3097,24 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
return err;
}
-static void skip_request(struct i915_request *request)
-{
- void *vaddr = request->ring->vaddr;
- u32 head;
-
- /* As this request likely depends on state from the lost
- * context, clear out all the user operations leaving the
- * breadcrumb at the end (so we get the fence notifications).
- */
- head = request->head;
- if (request->postfix < head) {
- memset(vaddr + head, 0, request->ring->size - head);
- head = 0;
- }
- memset(vaddr + head, 0, request->postfix - head);
-
- dma_fence_set_error(&request->fence, -EIO);
-}
-
static void engine_skip_context(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- struct i915_gem_context *hung_ctx = request->ctx;
+ struct i915_gem_context *hung_ctx = request->gem_context;
struct i915_timeline *timeline = request->timeline;
unsigned long flags;
GEM_BUG_ON(timeline == &engine->timeline);
spin_lock_irqsave(&engine->timeline.lock, flags);
- spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
+ spin_lock(&timeline->lock);
list_for_each_entry_continue(request, &engine->timeline.requests, link)
- if (request->ctx == hung_ctx)
- skip_request(request);
+ if (request->gem_context == hung_ctx)
+ i915_request_skip(request, -EIO);
list_for_each_entry(request, &timeline->requests, link)
- skip_request(request);
+ i915_request_skip(request, -EIO);
spin_unlock(&timeline->lock);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -3189,11 +3156,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
}
if (stalled) {
- i915_gem_context_mark_guilty(request->ctx);
- skip_request(request);
+ i915_gem_context_mark_guilty(request->gem_context);
+ i915_request_skip(request, -EIO);
/* If this context is now banned, skip all pending requests. */
- if (i915_gem_context_is_banned(request->ctx))
+ if (i915_gem_context_is_banned(request->gem_context))
engine_skip_context(request);
} else {
/*
@@ -3203,15 +3170,17 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
*/
request = i915_gem_find_active_request(engine);
if (request) {
- i915_gem_context_mark_innocent(request->ctx);
+ unsigned long flags;
+
+ i915_gem_context_mark_innocent(request->gem_context);
dma_fence_set_error(&request->fence, -EAGAIN);
/* Rewind the engine to replay the incomplete rq */
- spin_lock_irq(&engine->timeline.lock);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
request = list_prev_entry(request, link);
if (&request->link == &engine->timeline.requests)
request = NULL;
- spin_unlock_irq(&engine->timeline.lock);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
}
@@ -3232,13 +3201,8 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
if (request)
request = i915_gem_reset_request(engine, request, stalled);
- if (request) {
- DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
- engine->name, request->global_seqno);
- }
-
/* Setup the CS to resume from the breadcrumb of the hung request */
- engine->reset_hw(engine, request);
+ engine->reset.reset(engine, request);
}
void i915_gem_reset(struct drm_i915_private *dev_priv,
@@ -3252,14 +3216,14 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct i915_gem_context *ctx;
+ struct intel_context *ce;
i915_gem_reset_engine(engine,
engine->hangcheck.active_request,
stalled_mask & ENGINE_MASK(id));
- ctx = fetch_and_zero(&engine->last_retired_context);
- if (ctx)
- intel_context_unpin(ctx, engine);
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
/*
* Ostensibily, we always want a context loaded for powersaving,
@@ -3277,7 +3241,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
rq = i915_request_alloc(engine,
dev_priv->kernel_context);
if (!IS_ERR(rq))
- __i915_request_add(rq, false);
+ i915_request_add(rq);
}
}
@@ -3286,8 +3250,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
- tasklet_enable(&engine->execlists.tasklet);
- kthread_unpark(engine->breadcrumbs.signaler);
+ engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
}
@@ -3565,6 +3528,22 @@ new_requests_since_last_retire(const struct drm_i915_private *i915)
work_pending(&i915->gt.idle_work.work));
}
+static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return;
+
+ GEM_BUG_ON(i915->gt.active_requests);
+ for_each_engine(engine, i915, id) {
+ GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
+ GEM_BUG_ON(engine->last_retired_context !=
+ to_intel_context(i915->kernel_context, engine));
+ }
+}
+
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
@@ -3576,6 +3555,24 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ return;
+
+ /*
+ * Flush out the last user context, leaving only the pinned
+ * kernel context resident. When we are idling on the kernel_context,
+ * no more new requests (with a context switch) are emitted and we
+ * can finally rest. A consequence is that the idle work handler is
+ * always called at least twice before idling (and if the system is
+ * idle that implies a round trip through the retire worker).
+ */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_switch_to_kernel_context(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
+ READ_ONCE(dev_priv->gt.active_requests));
+
/*
* Wait for last execlists context complete, but bail out in case a
* new request is submitted. As we don't trust the hardware, we
@@ -3609,6 +3606,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
epoch = __i915_gem_park(dev_priv);
+ assert_kernel_context_is_current(dev_priv);
+
rearm_hangcheck = false;
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3755,9 +3754,31 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
return ret;
}
-static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
+static long wait_for_timeline(struct i915_timeline *tl,
+ unsigned int flags, long timeout)
{
- return i915_gem_active_wait(&tl->last_request, flags);
+ struct i915_request *rq;
+
+ rq = i915_gem_active_get_unlocked(&tl->last_request);
+ if (!rq)
+ return timeout;
+
+ /*
+ * "Race-to-idle".
+ *
+ * Switching to the kernel context is often used a synchronous
+ * step prior to idling, e.g. in suspend for flushing all
+ * current operations to memory before sleeping. These we
+ * want to complete as quickly as possible to avoid prolonged
+ * stalls, so allow the gpu to boost to maximum clocks.
+ */
+ if (flags & I915_WAIT_FOR_IDLE_BOOST)
+ gen6_rps_boost(rq, NULL);
+
+ timeout = i915_request_wait(rq, flags, timeout);
+ i915_request_put(rq);
+
+ return timeout;
}
static int wait_for_engines(struct drm_i915_private *i915)
@@ -3773,8 +3794,13 @@ static int wait_for_engines(struct drm_i915_private *i915)
return 0;
}
-int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
+int i915_gem_wait_for_idle(struct drm_i915_private *i915,
+ unsigned int flags, long timeout)
{
+ GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
+ flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
+ timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
+
/* If the device is asleep, we have no requests outstanding */
if (!READ_ONCE(i915->gt.awake))
return 0;
@@ -3786,26 +3812,31 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
lockdep_assert_held(&i915->drm.struct_mutex);
list_for_each_entry(tl, &i915->gt.timelines, link) {
- err = wait_for_timeline(tl, flags);
- if (err)
- return err;
+ timeout = wait_for_timeline(tl, flags, timeout);
+ if (timeout < 0)
+ return timeout;
}
- i915_retire_requests(i915);
- return wait_for_engines(i915);
+ err = wait_for_engines(i915);
+ if (err)
+ return err;
+
+ i915_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests);
} else {
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int err;
for_each_engine(engine, i915, id) {
- err = wait_for_timeline(&engine->timeline, flags);
- if (err)
- return err;
- }
+ struct i915_timeline *tl = &engine->timeline;
- return 0;
+ timeout = wait_for_timeline(tl, flags, timeout);
+ if (timeout < 0)
+ return timeout;
+ }
}
+
+ return 0;
}
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
@@ -4379,7 +4410,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 flags)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_address_space *vm = &dev_priv->ggtt.base;
+ struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct i915_vma *vma;
int ret;
@@ -4967,25 +4998,25 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj);
}
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+void i915_gem_sanitize(struct drm_i915_private *i915)
{
- struct i915_gem_context *kernel_context = i915->kernel_context;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ int err;
- for_each_engine(engine, i915, id) {
- GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
- GEM_BUG_ON(engine->last_retired_context != kernel_context);
- }
-}
+ GEM_TRACE("\n");
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
- if (i915_terminally_wedged(&i915->gpu_error)) {
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_get(i915);
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+
+ /*
+ * As we have just resumed the machine and woken the device up from
+ * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+ * back to defaults, recovering from whatever wedged state we left it
+ * in and so worth trying to use the device once more.
+ */
+ if (i915_terminally_wedged(&i915->gpu_error))
i915_gem_unset_wedged(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- }
/*
* If we inherit context state from the BIOS or earlier occupants
@@ -4995,60 +5026,94 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
+ err = -ENODEV;
if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
- WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+ err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+ if (!err)
+ intel_engines_sanitize(i915);
+
+ intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_runtime_pm_put(i915);
+
+ i915_gem_contexts_lost(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
}
-int i915_gem_suspend(struct drm_i915_private *dev_priv)
+int i915_gem_suspend(struct drm_i915_private *i915)
{
- struct drm_device *dev = &dev_priv->drm;
int ret;
- intel_runtime_pm_get(dev_priv);
- intel_suspend_gt_powersave(dev_priv);
+ GEM_TRACE("\n");
- mutex_lock(&dev->struct_mutex);
+ intel_runtime_pm_get(i915);
+ intel_suspend_gt_powersave(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
- /* We have to flush all the executing contexts to main memory so
+ /*
+ * We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last
* context image is coherent, we have to switch away from it. That
- * leaves the dev_priv->kernel_context still active when
+ * leaves the i915->kernel_context still active when
* we actually suspend, and its image in memory may not match the GPU
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
- ret = i915_gem_switch_to_kernel_context(dev_priv);
+ if (!i915_terminally_wedged(&i915->gpu_error)) {
+ ret = i915_gem_switch_to_kernel_context(i915);
if (ret)
goto err_unlock;
- ret = i915_gem_wait_for_idle(dev_priv,
+ ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED |
+ I915_WAIT_FOR_IDLE_BOOST,
+ MAX_SCHEDULE_TIMEOUT);
if (ret && ret != -EIO)
goto err_unlock;
- assert_kernel_context_is_current(dev_priv);
+ assert_kernel_context_is_current(i915);
}
- i915_gem_contexts_lost(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ i915_retire_requests(i915); /* ensure we flush after wedging */
+
+ mutex_unlock(&i915->drm.struct_mutex);
- intel_uc_suspend(dev_priv);
+ intel_uc_suspend(i915);
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+ cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+ cancel_delayed_work_sync(&i915->gt.retire_work);
- /* As the idle_work is rearming if it detects a race, play safe and
+ /*
+ * As the idle_work is rearming if it detects a race, play safe and
* repeat the flush until it is definitely idle.
*/
- drain_delayed_work(&dev_priv->gt.idle_work);
+ drain_delayed_work(&i915->gt.idle_work);
- /* Assert that we sucessfully flushed all the work and
+ /*
+ * Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
- WARN_ON(dev_priv->gt.awake);
- if (WARN_ON(!intel_engines_are_idle(dev_priv)))
- i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
+ WARN_ON(i915->gt.awake);
+ if (WARN_ON(!intel_engines_are_idle(i915)))
+ i915_gem_set_wedged(i915); /* no hope, discard everything */
+
+ intel_runtime_pm_put(i915);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ intel_runtime_pm_put(i915);
+ return ret;
+}
+
+void i915_gem_suspend_late(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ struct list_head *phases[] = {
+ &i915->mm.unbound_list,
+ &i915->mm.bound_list,
+ NULL
+ }, **phase;
/*
* Neither the BIOS, ourselves or any other kernel
@@ -5069,20 +5134,22 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
- intel_uc_sanitize(dev_priv);
- i915_gem_sanitize(dev_priv);
- intel_runtime_pm_put(dev_priv);
- return 0;
+ mutex_lock(&i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ list_for_each_entry(obj, *phase, mm.link)
+ WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
- return ret;
+ intel_uc_sanitize(i915);
+ i915_gem_sanitize(i915);
}
void i915_gem_resume(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
WARN_ON(i915->gt.awake);
mutex_lock(&i915->drm.struct_mutex);
@@ -5256,8 +5323,18 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
/* Only when the HW is re-initialised, can we replay the requests */
ret = __i915_gem_restart_engines(dev_priv);
+ if (ret)
+ goto cleanup_uc;
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ return 0;
+
+cleanup_uc:
+ intel_uc_fini_hw(dev_priv);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
return ret;
}
@@ -5294,7 +5371,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (engine->init_context)
err = engine->init_context(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
if (err)
goto err_active;
}
@@ -5303,9 +5380,11 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (err)
goto err_active;
- err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
- if (err)
+ if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
+ i915_gem_set_wedged(i915);
+ err = -EIO; /* Caller will declare us wedged */
goto err_active;
+ }
assert_kernel_context_is_current(i915);
@@ -5368,7 +5447,9 @@ err_active:
if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
goto out_ctx;
- if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
+ if (WARN_ON(i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT)))
goto out_ctx;
i915_gem_contexts_lost(i915);
@@ -5379,12 +5460,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
- /*
- * We need to fallback to 4K pages since gvt gtt handling doesn't
- * support huge page entries - we will need to check either hypervisor
- * mm can support huge guest page or just do emulation in gvt.
- */
- if (intel_vgpu_active(dev_priv))
+ /* We need to fallback to 4K pages if host doesn't support huge gtt. */
+ if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
@@ -5402,13 +5479,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- ret = intel_wopcm_init(&dev_priv->wopcm);
+ ret = intel_uc_init_misc(dev_priv);
if (ret)
return ret;
- ret = intel_uc_init_misc(dev_priv);
+ ret = intel_wopcm_init(&dev_priv->wopcm);
if (ret)
- return ret;
+ goto err_uc_misc;
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
@@ -5484,8 +5561,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* driver doesn't explode during runtime.
*/
err_init_hw:
- i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
- i915_gem_contexts_lost(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ WARN_ON(i915_gem_suspend(dev_priv));
+ i915_gem_suspend_late(dev_priv);
+
+ i915_gem_drain_workqueue(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
err_uc_init:
intel_uc_fini(dev_priv);
@@ -5502,6 +5585,7 @@ err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
+err_uc_misc:
intel_uc_fini_misc(dev_priv);
if (ret != -EIO)
@@ -5514,7 +5598,8 @@ err_unlock:
* for all other failure, such as an allocation failure, bail.
*/
if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
- DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+ i915_load_error(dev_priv,
+ "Failed to initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(dev_priv);
}
ret = 0;
@@ -5524,6 +5609,28 @@ err_unlock:
return ret;
}
+void i915_gem_fini(struct drm_i915_private *dev_priv)
+{
+ i915_gem_suspend_late(dev_priv);
+
+ /* Flush any outstanding unpin_work. */
+ i915_gem_drain_workqueue(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_uc_fini_hw(dev_priv);
+ intel_uc_fini(dev_priv);
+ i915_gem_cleanup_engines(dev_priv);
+ i915_gem_contexts_fini(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_uc_fini_misc(dev_priv);
+ i915_gem_cleanup_userptr(dev_priv);
+
+ i915_gem_drain_freed_objects(dev_priv);
+
+ WARN_ON(!list_empty(&dev_priv->contexts.list));
+}
+
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
i915_gem_sanitize(i915);
@@ -5688,16 +5795,17 @@ int i915_gem_freeze(struct drm_i915_private *dev_priv)
return 0;
}
-int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+int i915_gem_freeze_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
struct list_head *phases[] = {
- &dev_priv->mm.unbound_list,
- &dev_priv->mm.bound_list,
+ &i915->mm.unbound_list,
+ &i915->mm.bound_list,
NULL
- }, **p;
+ }, **phase;
- /* Called just before we write the hibernation image.
+ /*
+ * Called just before we write the hibernation image.
*
* We need to update the domain tracking to reflect that the CPU
* will be accessing all the pages to create and restore from the
@@ -5711,15 +5819,15 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
* the objects as well, see i915_gem_freeze()
*/
- i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
- i915_gem_drain_freed_objects(dev_priv);
+ i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
+ i915_gem_drain_freed_objects(i915);
- spin_lock(&dev_priv->mm.obj_lock);
- for (p = phases; *p; p++) {
- list_for_each_entry(obj, *p, mm.link)
- __start_cpu_write(obj);
+ mutex_lock(&i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ list_for_each_entry(obj, *phase, mm.link)
+ WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
}
- spin_unlock(&dev_priv->mm.obj_lock);
+ mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
@@ -6039,16 +6147,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
goto err_unlock;
}
- pages = fetch_and_zero(&obj->mm.pages);
- if (pages) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
- __i915_gem_object_reset_page_iter(obj);
-
- spin_lock(&i915->mm.obj_lock);
- list_del(&obj->mm.link);
- spin_unlock(&i915->mm.obj_lock);
- }
+ pages = __i915_gem_object_unset_pages(obj);
obj->ops = &i915_gem_phys_ops;
@@ -6066,7 +6165,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
err_xfer:
obj->ops = &i915_gem_object_ops;
- obj->mm.pages = pages;
+ if (!IS_ERR_OR_NULL(pages)) {
+ unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+ }
err_unlock:
mutex_unlock(&obj->mm.lock);
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 525920404ede..e46592956872 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -26,6 +26,7 @@
#define __I915_GEM_H__
#include <linux/bug.h>
+#include <linux/interrupt.h>
struct drm_i915_private;
@@ -62,9 +63,12 @@ struct drm_i915_private;
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
+#define GEM_TRACE_DUMP_ON(expr) \
+ do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
#else
#define GEM_TRACE(...) do { } while (0)
#define GEM_TRACE_DUMP() do { } while (0)
+#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
#define I915_NUM_ENGINES 8
@@ -72,4 +76,21 @@ struct drm_i915_private;
void i915_gem_park(struct drm_i915_private *i915);
void i915_gem_unpark(struct drm_i915_private *i915);
+static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
+{
+ if (atomic_inc_return(&t->count) == 1)
+ tasklet_unlock_wait(t);
+}
+
+static inline void __tasklet_enable_sync_once(struct tasklet_struct *t)
+{
+ if (atomic_dec_return(&t->count) == 0)
+ tasklet_kill(t);
+}
+
+static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
+{
+ return !atomic_read(&t->count);
+}
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 060335d3d9e0..b10770cfccd2 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -127,14 +127,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
- if (!ce->state)
- continue;
-
- WARN_ON(ce->pin_count);
- if (ce->ring)
- intel_ring_free(ce->ring);
-
- __i915_gem_object_release_unless_active(ce->state->obj);
+ if (ce->ops)
+ ce->ops->destroy(ce);
}
kfree(ctx->name);
@@ -203,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
if (ctx->ppgtt)
- i915_ppgtt_close(&ctx->ppgtt->base);
+ i915_ppgtt_close(&ctx->ppgtt->vm);
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_put(ctx);
@@ -214,10 +208,19 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
int ret;
unsigned int max;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
max = GEN11_MAX_CONTEXT_HW_ID;
- else
- max = MAX_CONTEXT_HW_ID;
+ } else {
+ /*
+ * When using GuC in proxy submission, GuC consumes the
+ * highest bit in the context id to indicate proxy submission.
+ */
+ if (USES_GUC_SUBMISSION(dev_priv))
+ max = MAX_GUC_CONTEXT_HW_ID;
+ else
+ max = MAX_CONTEXT_HW_ID;
+ }
+
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, max, GFP_KERNEL);
@@ -246,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
+ if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -266,6 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
struct i915_gem_context *ctx;
+ unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -283,6 +287,12 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->i915 = dev_priv;
ctx->sched.priority = I915_PRIORITY_NORMAL;
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+ struct intel_context *ce = &ctx->__engine[n];
+
+ ce->gem_context = ctx;
+ }
+
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
@@ -364,7 +374,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
if (USES_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name);
+ ppgtt = i915_ppgtt_create(dev_priv, file_priv);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -502,8 +512,8 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
}
DRM_DEBUG_DRIVER("%s context support initialized\n",
- dev_priv->engine[RCS]->context_size ? "logical" :
- "fake");
+ DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+ "logical" : "fake");
return 0;
}
@@ -514,16 +524,8 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv, id) {
- engine->legacy_active_context = NULL;
- engine->legacy_active_ppgtt = NULL;
-
- if (!engine->last_retired_context)
- continue;
-
- intel_context_unpin(engine->last_retired_context, engine);
- engine->last_retired_context = NULL;
- }
+ for_each_engine(engine, dev_priv, id)
+ intel_engine_lost_context(engine);
}
void i915_gem_contexts_fini(struct drm_i915_private *i915)
@@ -583,68 +585,122 @@ last_request_on_engine(struct i915_timeline *timeline,
{
struct i915_request *rq;
- if (timeline == &engine->timeline)
- return NULL;
+ GEM_BUG_ON(timeline == &engine->timeline);
rq = i915_gem_active_raw(&timeline->last_request,
&engine->i915->drm.struct_mutex);
- if (rq && rq->engine == engine)
+ if (rq && rq->engine == engine) {
+ GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
+ timeline->name, engine->name,
+ rq->fence.context, rq->fence.seqno);
+ GEM_BUG_ON(rq->timeline != timeline);
return rq;
+ }
return NULL;
}
-static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
+static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
{
- struct i915_timeline *timeline;
+ struct drm_i915_private *i915 = engine->i915;
+ const struct intel_context * const ce =
+ to_intel_context(i915->kernel_context, engine);
+ struct i915_timeline *barrier = ce->ring->timeline;
+ struct intel_ring *ring;
+ bool any_active = false;
- list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
- if (last_request_on_engine(timeline, engine))
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
+ struct i915_request *rq;
+
+ rq = last_request_on_engine(ring->timeline, engine);
+ if (!rq)
+ continue;
+
+ any_active = true;
+
+ if (rq->hw_context == ce)
+ continue;
+
+ /*
+ * Was this request submitted after the previous
+ * switch-to-kernel-context?
+ */
+ if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
+ GEM_TRACE("%s needs barrier for %llx:%d\n",
+ ring->timeline->name,
+ rq->fence.context,
+ rq->fence.seqno);
return false;
+ }
+
+ GEM_TRACE("%s has barrier after %llx:%d\n",
+ ring->timeline->name,
+ rq->fence.context,
+ rq->fence.seqno);
}
- return intel_engine_has_kernel_context(engine);
+ /*
+ * If any other timeline was still active and behind the last barrier,
+ * then our last switch-to-kernel-context must still be queued and
+ * will run last (leaving the engine in the kernel context when it
+ * eventually idles).
+ */
+ if (any_active)
+ return true;
+
+ /* The engine is idle; check that it is idling in the kernel context. */
+ return engine->last_retired_context == ce;
}
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
- struct i915_timeline *timeline;
enum intel_engine_id id;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
- i915_retire_requests(dev_priv);
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915->kernel_context);
+
+ i915_retire_requests(i915);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, i915, id) {
+ struct intel_ring *ring;
struct i915_request *rq;
- if (engine_has_idle_kernel_context(engine))
+ GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
+ if (engine_has_kernel_context_barrier(engine))
continue;
- rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ GEM_TRACE("emit barrier on %s\n", engine->name);
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
/* Queue this switch after all other activity */
- list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+ list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
struct i915_request *prev;
- prev = last_request_on_engine(timeline, engine);
- if (prev)
- i915_sw_fence_await_sw_fence_gfp(&rq->submit,
- &prev->submit,
- I915_FENCE_GFP);
+ prev = last_request_on_engine(ring->timeline, engine);
+ if (!prev)
+ continue;
+
+ if (prev->gem_context == i915->kernel_context)
+ continue;
+
+ GEM_TRACE("add barrier on %s for %llx:%d\n",
+ engine->name,
+ prev->fence.context,
+ prev->fence.seqno);
+ i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ &prev->submit,
+ I915_FENCE_GFP);
+ i915_timeline_sync_set(rq->timeline, &prev->fence);
}
- /*
- * Force a flush after the switch to ensure that all rendering
- * and operations prior to switching to the kernel context hits
- * memory. This should be guaranteed by the previous request,
- * but an extra layer of paranoia before we declare the system
- * idle (on suspend etc) is advisable!
- */
- __i915_request_add(rq, true);
+ i915_request_add(rq);
}
return 0;
@@ -664,7 +720,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct i915_gem_context *ctx;
int ret;
- if (!dev_priv->engine[RCS]->context_size)
+ if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
return -ENODEV;
if (args->pad != 0)
@@ -747,11 +803,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt)
- args->value = ctx->ppgtt->base.total;
+ args->value = ctx->ppgtt->vm.total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
- args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
+ args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
else
- args->value = to_i915(dev)->ggtt.base.total;
+ args->value = to_i915(dev)->ggtt.vm.total;
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
args->value = i915_gem_context_no_error_capture(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index ace3b129c189..b116e4942c10 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -30,6 +30,7 @@
#include <linux/radix-tree.h>
#include "i915_gem.h"
+#include "i915_scheduler.h"
struct pid;
@@ -45,6 +46,13 @@ struct intel_ring;
#define DEFAULT_CONTEXT_HANDLE 0
+struct intel_context;
+
+struct intel_context_ops {
+ void (*unpin)(struct intel_context *ce);
+ void (*destroy)(struct intel_context *ce);
+};
+
/**
* struct i915_gem_context - client state
*
@@ -144,11 +152,14 @@ struct i915_gem_context {
/** engine: per-engine logical HW state */
struct intel_context {
+ struct i915_gem_context *gem_context;
struct i915_vma *state;
struct intel_ring *ring;
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
+
+ const struct intel_context_ops *ops;
} __engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
@@ -263,25 +274,26 @@ to_intel_context(struct i915_gem_context *ctx,
return &ctx->__engine[engine->id];
}
-static inline struct intel_ring *
+static inline struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
return engine->context_pin(engine, ctx);
}
-static inline void __intel_context_pin(struct i915_gem_context *ctx,
- const struct intel_engine_cs *engine)
+static inline void __intel_context_pin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
GEM_BUG_ON(!ce->pin_count);
ce->pin_count++;
}
-static inline void intel_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static inline void intel_context_unpin(struct intel_context *ce)
{
- engine->context_unpin(engine, ctx);
+ GEM_BUG_ON(!ce->pin_count);
+ if (--ce->pin_count)
+ return;
+
+ GEM_BUG_ON(!ce->ops);
+ ce->ops->unpin(ce);
}
/* i915_gem_context.c */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 69a7aec49e84..82e2ca17a441 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -111,15 +111,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
i915_gem_object_unpin_map(obj);
}
-static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-
-}
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@@ -225,9 +216,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = i915_gem_dmabuf_kmap,
- .map_atomic = i915_gem_dmabuf_kmap_atomic,
.unmap = i915_gem_dmabuf_kunmap,
- .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 54814a196ee4..02b83a5ed96c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -69,7 +69,8 @@ static int ggtt_flush(struct drm_i915_private *i915)
err = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 22df17c8ca9b..3f0c612d42e7 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -66,6 +66,15 @@ enum {
#define __I915_EXEC_ILLEGAL_FLAGS \
(__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
+/* Catch emission of unexpected errors for CI! */
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+#undef EINVAL
+#define EINVAL ({ \
+ DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
+ 22; \
+})
+#endif
+
/**
* DOC: User command execution
*
@@ -534,7 +543,8 @@ eb_add_vma(struct i915_execbuffer *eb,
* paranoia do it everywhere.
*/
if (i == batch_idx) {
- if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
+ if (entry->relocation_count &&
+ !(eb->flags[i] & EXEC_OBJECT_PINNED))
eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
@@ -723,7 +733,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->ctx = ctx;
- eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
+ eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
eb->context_flags = 0;
if (ctx->flags & CONTEXT_NO_ZEROMAP)
@@ -921,7 +931,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
i915_gem_object_unpin_map(cache->rq->batch->obj);
i915_gem_chipset_flush(cache->rq->i915);
- __i915_request_add(cache->rq, true);
+ i915_request_add(cache->rq);
cache->rq = NULL;
}
@@ -948,9 +958,9 @@ static void reloc_cache_reset(struct reloc_cache *cache)
if (cache->node.allocated) {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
- ggtt->base.clear_range(&ggtt->base,
- cache->node.start,
- cache->node.size);
+ ggtt->vm.clear_range(&ggtt->vm,
+ cache->node.start,
+ cache->node.size);
drm_mm_remove_node(&cache->node);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -1021,7 +1031,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range
- (&ggtt->base.mm, &cache->node,
+ (&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -1042,9 +1052,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, page),
- offset, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, page),
+ offset, I915_CACHE_NONE, 0);
} else {
offset += page << PAGE_SHIFT;
}
@@ -1155,18 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_request;
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
- i915_vma_move_to_active(batch, rq, 0);
- reservation_object_lock(batch->resv, NULL);
- reservation_object_add_excl_fence(batch->resv, &rq->fence);
- reservation_object_unlock(batch->resv);
- i915_vma_unpin(batch);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto skip_request;
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- reservation_object_lock(vma->resv, NULL);
- reservation_object_add_excl_fence(vma->resv, &rq->fence);
- reservation_object_unlock(vma->resv);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
rq->batch = batch;
+ i915_vma_unpin(batch);
cache->rq = rq;
cache->rq_cmd = cmd;
@@ -1175,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
/* Return with batch mapping (cmd) still pinned */
return 0;
+skip_request:
+ i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1761,25 +1771,6 @@ slow:
return eb_relocate_slow(eb);
}
-static void eb_export_fence(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
-{
- struct reservation_object *resv = vma->resv;
-
- /*
- * Ignore errors from failing to allocate the new fence, we can't
- * handle an error right now. Worst case should be missed
- * synchronisation leading to rendering corruption.
- */
- reservation_object_lock(resv, NULL);
- if (flags & EXEC_OBJECT_WRITE)
- reservation_object_add_excl_fence(resv, &rq->fence);
- else if (reservation_object_reserve_shared(resv) == 0)
- reservation_object_add_shared_fence(resv, &rq->fence);
- reservation_object_unlock(resv);
-}
-
static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
@@ -1833,8 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
unsigned int flags = eb->flags[i];
struct i915_vma *vma = eb->vma[i];
- i915_vma_move_to_active(vma, eb->request, flags);
- eb_export_fence(vma, eb->request, flags);
+ err = i915_vma_move_to_active(vma, eb->request, flags);
+ if (unlikely(err)) {
+ i915_request_skip(eb->request, err);
+ return err;
+ }
__eb_unreserve_vma(vma, flags);
vma->exec_flags = NULL;
@@ -1874,45 +1868,6 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
return true;
}
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- const unsigned int idx = rq->engine->id;
-
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-
- /*
- * Add a reference if we're newly entering the active list.
- * The order in which we add operations to the retirement queue is
- * vital here: mark_active adds to the start of the callback list,
- * such that subsequent callbacks are called first. Therefore we
- * add the active reference first and queue for it to be dropped
- * *last*.
- */
- if (!i915_vma_is_active(vma))
- obj->active_count++;
- i915_vma_set_active(vma, idx);
- i915_gem_active_set(&vma->last_read[idx], rq);
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
-
- obj->write_domain = 0;
- if (flags & EXEC_OBJECT_WRITE) {
- obj->write_domain = I915_GEM_DOMAIN_RENDER;
-
- if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
- i915_gem_active_set(&obj->frontbuffer_write, rq);
-
- obj->read_domains = 0;
- }
- obj->read_domains |= I915_GEM_GPU_DOMAINS;
-
- if (flags & EXEC_OBJECT_NEEDS_FENCE)
- i915_gem_active_set(&vma->last_fence, rq);
-}
-
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
{
u32 *cs;
@@ -2438,7 +2393,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
err_request:
- __i915_request_add(eb.request, err == 0);
+ i915_request_add(eb.request);
add_to_client(eb.request, file);
if (fences)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 996ab2ad6c45..f00c7fbef79e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -42,7 +42,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
-#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
+#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
/**
* DOC: Global GTT views
@@ -195,18 +195,18 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
u32 unused)
{
u32 pte_flags;
- int ret;
+ int err;
if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
- ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
- vma->size);
- if (ret)
- return ret;
+ err = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start, vma->size);
+ if (err)
+ return err;
}
- /* Currently applicable only to VLV */
+ /* Applicable to VLV, and gen8+ */
pte_flags = 0;
- if (vma->obj->gt_ro)
+ if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
@@ -244,10 +244,13 @@ static void clear_pages(struct i915_vma *vma)
}
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ u32 flags)
{
- gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
- pte |= addr;
+ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
switch (level) {
case I915_CACHE_NONE:
@@ -375,37 +378,70 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
+static void stash_init(struct pagestash *stash)
+{
+ pagevec_init(&stash->pvec);
+ spin_lock_init(&stash->lock);
+}
+
+static struct page *stash_pop_page(struct pagestash *stash)
+{
+ struct page *page = NULL;
+
+ spin_lock(&stash->lock);
+ if (likely(stash->pvec.nr))
+ page = stash->pvec.pages[--stash->pvec.nr];
+ spin_unlock(&stash->lock);
+
+ return page;
+}
+
+static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
+{
+ int nr;
+
+ spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
+
+ nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec));
+ memcpy(stash->pvec.pages + stash->pvec.nr,
+ pvec->pages + pvec->nr - nr,
+ sizeof(pvec->pages[0]) * nr);
+ stash->pvec.nr += nr;
+
+ spin_unlock(&stash->lock);
+
+ pvec->nr -= nr;
+}
+
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
- struct pagevec *pvec = &vm->free_pages;
- struct pagevec stash;
+ struct pagevec stack;
+ struct page *page;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
- if (likely(pvec->nr))
- return pvec->pages[--pvec->nr];
+ page = stash_pop_page(&vm->free_pages);
+ if (page)
+ return page;
if (!vm->pt_kmap_wc)
return alloc_page(gfp);
- /* A placeholder for a specific mutex to guard the WC stash */
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
-
/* Look in our global stash of WC pages... */
- pvec = &vm->i915->mm.wc_stash;
- if (likely(pvec->nr))
- return pvec->pages[--pvec->nr];
+ page = stash_pop_page(&vm->i915->mm.wc_stash);
+ if (page)
+ return page;
/*
- * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
+ * Otherwise batch allocate pages to amortize cost of set_pages_wc.
*
* We have to be careful as page allocation may trigger the shrinker
* (via direct reclaim) which will fill up the WC stash underneath us.
* So we add our WB pages into a temporary pvec on the stack and merge
* them into the WC stash after all the allocations are complete.
*/
- pagevec_init(&stash);
+ pagevec_init(&stack);
do {
struct page *page;
@@ -413,59 +449,67 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
break;
- stash.pages[stash.nr++] = page;
- } while (stash.nr < pagevec_space(pvec));
+ stack.pages[stack.nr++] = page;
+ } while (pagevec_space(&stack));
- if (stash.nr) {
- int nr = min_t(int, stash.nr, pagevec_space(pvec));
- struct page **pages = stash.pages + stash.nr - nr;
+ if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
+ page = stack.pages[--stack.nr];
- if (nr && !set_pages_array_wc(pages, nr)) {
- memcpy(pvec->pages + pvec->nr,
- pages, sizeof(pages[0]) * nr);
- pvec->nr += nr;
- stash.nr -= nr;
- }
+ /* Merge spare WC pages to the global stash */
+ stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
- pagevec_release(&stash);
+ /* Push any surplus WC pages onto the local VM stash */
+ if (stack.nr)
+ stash_push_pagevec(&vm->free_pages, &stack);
}
- return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
+ /* Return unwanted leftovers */
+ if (unlikely(stack.nr)) {
+ WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
+ __pagevec_release(&stack);
+ }
+
+ return page;
}
static void vm_free_pages_release(struct i915_address_space *vm,
bool immediate)
{
- struct pagevec *pvec = &vm->free_pages;
+ struct pagevec *pvec = &vm->free_pages.pvec;
+ struct pagevec stack;
+ lockdep_assert_held(&vm->free_pages.lock);
GEM_BUG_ON(!pagevec_count(pvec));
if (vm->pt_kmap_wc) {
- struct pagevec *stash = &vm->i915->mm.wc_stash;
-
- /* When we use WC, first fill up the global stash and then
+ /*
+ * When we use WC, first fill up the global stash and then
* only if full immediately free the overflow.
*/
+ stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
- if (pagevec_space(stash)) {
- do {
- stash->pages[stash->nr++] =
- pvec->pages[--pvec->nr];
- if (!pvec->nr)
- return;
- } while (pagevec_space(stash));
-
- /* As we have made some room in the VM's free_pages,
- * we can wait for it to fill again. Unless we are
- * inside i915_address_space_fini() and must
- * immediately release the pages!
- */
- if (!immediate)
- return;
- }
+ /*
+ * As we have made some room in the VM's free_pages,
+ * we can wait for it to fill again. Unless we are
+ * inside i915_address_space_fini() and must
+ * immediately release the pages!
+ */
+ if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
+ return;
+
+ /*
+ * We have to drop the lock to allow ourselves to sleep,
+ * so take a copy of the pvec and clear the stash for
+ * others to use it as we sleep.
+ */
+ stack = *pvec;
+ pagevec_reinit(pvec);
+ spin_unlock(&vm->free_pages.lock);
+ pvec = &stack;
set_pages_array_wb(pvec->pages, pvec->nr);
+
+ spin_lock(&vm->free_pages.lock);
}
__pagevec_release(pvec);
@@ -481,20 +525,60 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
* unconditional might_sleep() for everybody.
*/
might_sleep();
- if (!pagevec_add(&vm->free_pages, page))
+ spin_lock(&vm->free_pages.lock);
+ if (!pagevec_add(&vm->free_pages.pvec, page))
vm_free_pages_release(vm, false);
+ spin_unlock(&vm->free_pages.lock);
+}
+
+static void i915_address_space_init(struct i915_address_space *vm,
+ struct drm_i915_private *dev_priv)
+{
+ /*
+ * The vm->mutex must be reclaim safe (for use in the shrinker).
+ * Do a dummy acquire now under fs_reclaim so that any allocation
+ * attempt holding the lock is immediately reported by lockdep.
+ */
+ mutex_init(&vm->mutex);
+ i915_gem_shrinker_taints_mutex(&vm->mutex);
+
+ GEM_BUG_ON(!vm->total);
+ drm_mm_init(&vm->mm, 0, vm->total);
+ vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+
+ stash_init(&vm->free_pages);
+
+ INIT_LIST_HEAD(&vm->active_list);
+ INIT_LIST_HEAD(&vm->inactive_list);
+ INIT_LIST_HEAD(&vm->unbound_list);
+}
+
+static void i915_address_space_fini(struct i915_address_space *vm)
+{
+ spin_lock(&vm->free_pages.lock);
+ if (pagevec_count(&vm->free_pages.pvec))
+ vm_free_pages_release(vm, true);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
+ spin_unlock(&vm->free_pages.lock);
+
+ drm_mm_takedown(&vm->mm);
+
+ mutex_destroy(&vm->mutex);
}
static int __setup_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p,
gfp_t gfp)
{
- p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
+ p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
if (unlikely(!p->page))
return -ENOMEM;
- p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ p->daddr = dma_map_page_attrs(vm->dma,
+ p->page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
vm_free_page(vm, p->page);
return -ENOMEM;
@@ -506,7 +590,7 @@ static int __setup_page_dma(struct i915_address_space *vm,
static int setup_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p)
{
- return __setup_page_dma(vm, p, I915_GFP_DMA);
+ return __setup_page_dma(vm, p, __GFP_HIGHMEM);
}
static void cleanup_page_dma(struct i915_address_space *vm,
@@ -520,8 +604,8 @@ static void cleanup_page_dma(struct i915_address_space *vm,
#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
-#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
-#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
+#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
+#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
static void fill_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p,
@@ -575,8 +659,11 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
goto skip;
- addr = dma_map_page(vm->dma, page, 0, size,
- PCI_DMA_BIDIRECTIONAL);
+ addr = dma_map_page_attrs(vm->dma,
+ page, 0, size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
if (unlikely(dma_mapping_error(vm->dma, addr)))
goto free_page;
@@ -614,7 +701,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
{
struct i915_page_table *pt;
- pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
+ pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
if (unlikely(!pt))
return ERR_PTR(-ENOMEM);
@@ -637,21 +724,20 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
fill_px(vm, pt,
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
}
-static void gen6_initialize_pt(struct i915_address_space *vm,
+static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
struct i915_page_table *pt)
{
- fill32_px(vm, pt,
- vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+ fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
{
struct i915_page_directory *pd;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
+ pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
@@ -685,7 +771,7 @@ static int __pdp_init(struct i915_address_space *vm,
const unsigned int pdpes = i915_pdpes_per_pdp(vm);
pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
- GFP_KERNEL | __GFP_NOWARN);
+ I915_GFP_ALLOW_FAIL);
if (unlikely(!pdp->page_directory))
return -ENOMEM;
@@ -765,53 +851,6 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
}
-/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct i915_request *rq,
- unsigned entry,
- dma_addr_t addr)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- BUG_ON(entry >= 4);
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
- *cs++ = upper_32_bits(addr);
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
- *cs++ = lower_32_bits(addr);
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- int i, ret;
-
- for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-
- ret = gen8_write_pdp(rq, i, pd_daddr);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
-}
-
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
* context switching/execlist queuing code takes extra steps
@@ -819,7 +858,7 @@ static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
}
/* Removes entries from a single page table, releasing it if it's empty.
@@ -833,7 +872,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
unsigned int pte = gen8_pte_index(start);
unsigned int pte_end = pte + num_entries;
const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t *vaddr;
GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -1005,14 +1044,15 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
struct i915_page_directory_pointer *pdp,
struct sgt_dma *iter,
struct gen8_insert_pte *idx,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level,
+ u32 flags)
{
struct i915_page_directory *pd;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
gen8_pte_t *vaddr;
bool ret;
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+ GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
pd = pdp->page_directory[idx->pdpe];
vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
do {
@@ -1043,7 +1083,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
break;
}
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+ GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
pd = pdp->page_directory[idx->pdpe];
}
@@ -1059,14 +1099,14 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level,
- u32 unused)
+ u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
- cache_level);
+ cache_level, flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
@@ -1074,9 +1114,10 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
struct i915_page_directory_pointer **pdps,
struct sgt_dma *iter,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level,
+ u32 flags)
{
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
u64 start = vma->node.start;
dma_addr_t rem = iter->sg->length;
@@ -1192,19 +1233,21 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level,
- u32 unused)
+ u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
+ gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
+ flags);
} else {
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
- &iter, &idx, cache_level))
+ &iter, &idx, cache_level,
+ flags))
GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -1229,7 +1272,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
{
int ret;
- ret = setup_scratch_page(vm, I915_GFP_DMA);
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
@@ -1272,7 +1315,7 @@ free_scratch_page:
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct drm_i915_private *dev_priv = vm->i915;
enum vgt_g2v_type msg;
int i;
@@ -1333,13 +1376,13 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
int i;
for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
- if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
+ if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
continue;
- gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
}
- cleanup_px(&ppgtt->base, &ppgtt->pml4);
+ cleanup_px(&ppgtt->vm, &ppgtt->pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1353,7 +1396,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (use_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
else
- gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
gen8_free_scratch(vm);
}
@@ -1489,7 +1532,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
gen8_pte_t scratch_pte,
struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_directory *pd;
u32 pdpe;
@@ -1499,7 +1542,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
u64 pd_start = start;
u32 pde;
- if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
+ if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
continue;
seq_printf(m, "\tPDPE #%d\n", pdpe);
@@ -1507,7 +1550,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
u32 pte;
gen8_pte_t *pt_vaddr;
- if (pd->page_table[pde] == ppgtt->base.scratch_pt)
+ if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
continue;
pt_vaddr = kmap_atomic_px(pt);
@@ -1540,10 +1583,10 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
- u64 start = 0, length = ppgtt->base.total;
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ u64 start = 0, length = ppgtt->vm.total;
if (use_4lvl(vm)) {
u64 pml4e;
@@ -1551,7 +1594,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
struct i915_page_directory_pointer *pdp;
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
+ if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
continue;
seq_printf(m, " PML4E #%llu\n", pml4e);
@@ -1564,10 +1607,10 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
struct i915_page_directory *pd;
- u64 start = 0, length = ppgtt->base.total;
+ u64 start = 0, length = ppgtt->vm.total;
u64 from = start;
unsigned int pdpe;
@@ -1601,211 +1644,153 @@ unwind:
* space.
*
*/
-static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = vm->i915;
- int ret;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ppgtt->ref);
+
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
- ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
+ ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
1ULL << 48 :
1ULL << 32;
+ /*
+ * From bdw, there is support for read-only pages in the PPGTT.
+ *
+ * XXX GVT is not honouring the lack of RW in the PTE bits.
+ */
+ ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
+
+ i915_address_space_init(&ppgtt->vm, i915);
+
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
- ppgtt->base.pt_kmap_wc = true;
+ if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
+ ppgtt->vm.pt_kmap_wc = true;
- ret = gen8_init_scratch(&ppgtt->base);
- if (ret) {
- ppgtt->base.total = 0;
- return ret;
- }
+ err = gen8_init_scratch(&ppgtt->vm);
+ if (err)
+ goto err_free;
- if (use_4lvl(vm)) {
- ret = setup_px(&ppgtt->base, &ppgtt->pml4);
- if (ret)
- goto free_scratch;
+ if (use_4lvl(&ppgtt->vm)) {
+ err = setup_px(&ppgtt->vm, &ppgtt->pml4);
+ if (err)
+ goto err_scratch;
- gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
+ gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
- ppgtt->switch_mm = gen8_mm_switch_4lvl;
- ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
- ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
} else {
- ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
- if (ret)
- goto free_scratch;
+ err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
+ if (err)
+ goto err_scratch;
- if (intel_vgpu_active(dev_priv)) {
- ret = gen8_preallocate_top_level_pdp(ppgtt);
- if (ret) {
+ if (intel_vgpu_active(i915)) {
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err) {
__pdp_fini(&ppgtt->pdp);
- goto free_scratch;
+ goto err_scratch;
}
}
- ppgtt->switch_mm = gen8_mm_switch_3lvl;
- ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
- ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
}
- if (intel_vgpu_active(dev_priv))
+ if (intel_vgpu_active(i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
- ppgtt->base.cleanup = gen8_ppgtt_cleanup;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
ppgtt->debug_dump = gen8_dump_ppgtt;
- return 0;
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
-free_scratch:
- gen8_free_scratch(&ppgtt->base);
- return ret;
+ return ppgtt;
+
+err_scratch:
+ gen8_free_scratch(&ppgtt->vm);
+err_free:
+ kfree(ppgtt);
+ return ERR_PTR(err);
}
-static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct i915_page_table *unused;
- gen6_pte_t scratch_pte;
- u32 pd_entry, pte, pde;
- u32 start = 0, length = ppgtt->base.total;
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+ struct i915_page_table *pt;
+ u32 pte, pde;
- scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, 0);
+ gen6_for_all_pdes(pt, &base->pd, pde) {
+ gen6_pte_t *vaddr;
+
+ if (pt == base->vm.scratch_pt)
+ continue;
+
+ if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
+ u32 expected =
+ GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
+ GEN6_PDE_VALID;
+ u32 pd_entry = readl(ppgtt->pd_addr + pde);
+
+ if (pd_entry != expected)
+ seq_printf(m,
+ "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
+ pde,
+ pd_entry,
+ expected);
+
+ seq_printf(m, "\tPDE: %x\n", pd_entry);
+ }
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
- u32 expected;
- gen6_pte_t *pt_vaddr;
- const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
- pd_entry = readl(ppgtt->pd_addr + pde);
- expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
-
- if (pd_entry != expected)
- seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
- pde,
- pd_entry,
- expected);
- seq_printf(m, "\tPDE: %x\n", pd_entry);
-
- pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
-
- for (pte = 0; pte < GEN6_PTES; pte+=4) {
- unsigned long va =
- (pde * PAGE_SIZE * GEN6_PTES) +
- (pte * PAGE_SIZE);
+ vaddr = kmap_atomic_px(base->pd.page_table[pde]);
+ for (pte = 0; pte < GEN6_PTES; pte += 4) {
int i;
- bool found = false;
+
for (i = 0; i < 4; i++)
- if (pt_vaddr[pte + i] != scratch_pte)
- found = true;
- if (!found)
+ if (vaddr[pte + i] != scratch_pte)
+ break;
+ if (i == 4)
continue;
- seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
+ seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
+ pde, pte,
+ (pde * GEN6_PTES + pte) * PAGE_SIZE);
for (i = 0; i < 4; i++) {
- if (pt_vaddr[pte + i] != scratch_pte)
- seq_printf(m, " %08x", pt_vaddr[pte + i]);
+ if (vaddr[pte + i] != scratch_pte)
+ seq_printf(m, " %08x", vaddr[pte + i]);
else
- seq_puts(m, " SCRATCH ");
+ seq_puts(m, " SCRATCH");
}
seq_puts(m, "\n");
}
- kunmap_atomic(pt_vaddr);
+ kunmap_atomic(vaddr);
}
}
/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
+static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
const unsigned int pde,
const struct i915_page_table *pt)
{
/* Caller needs to make sure the write completes if necessary */
- writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
- ppgtt->pd_addr + pde);
-}
-
-/* Write all the page tables found in the ppgtt structure to incrementing page
- * directories. */
-static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
- u32 start, u32 length)
-{
- struct i915_page_table *pt;
- unsigned int pde;
-
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
- gen6_write_pde(ppgtt, pde, pt);
-
- mark_tlbs_dirty(ppgtt);
- wmb();
-}
-
-static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
-{
- GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
- return ppgtt->pd.base.ggtt_offset << 10;
-}
-
-static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- /* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(2);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
- *cs++ = PP_DIR_DCLV_2G;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = get_pd_offset(ppgtt);
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- /* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(2);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
- *cs++ = PP_DIR_DCLV_2G;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = get_pd_offset(ppgtt);
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- struct drm_i915_private *dev_priv = rq->i915;
-
- I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
- return 0;
+ iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
+ ppgtt->pd_addr + pde);
}
static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
@@ -1867,22 +1852,30 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
unsigned int first_entry = start >> PAGE_SHIFT;
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length >> PAGE_SHIFT;
- gen6_pte_t scratch_pte =
- vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
while (num_entries) {
- struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
- unsigned int end = min(pte + num_entries, GEN6_PTES);
+ struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
+ const unsigned int end = min(pte + num_entries, GEN6_PTES);
+ const unsigned int count = end - pte;
gen6_pte_t *vaddr;
- num_entries -= end - pte;
+ GEM_BUG_ON(pt == vm->scratch_pt);
+
+ num_entries -= count;
+
+ GEM_BUG_ON(count > pt->used_ptes);
+ pt->used_ptes -= count;
+ if (!pt->used_ptes)
+ ppgtt->scan_for_unused_pt = true;
- /* Note that the hw doesn't support removing PDE on the fly
+ /*
+ * Note that the hw doesn't support removing PDE on the fly
* (they are cached inside the context with no means to
* invalidate the cache), so we can only reset the PTE
* entries back to scratch.
@@ -1911,6 +1904,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
+ GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
+
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -1939,218 +1934,280 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
static int gen6_alloc_va_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
struct i915_page_table *pt;
u64 from = start;
unsigned int pde;
bool flush = false;
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
+ gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
+ const unsigned int count = gen6_pte_count(start, length);
+
if (pt == vm->scratch_pt) {
pt = alloc_pt(vm);
if (IS_ERR(pt))
goto unwind_out;
- gen6_initialize_pt(vm, pt);
- ppgtt->pd.page_table[pde] = pt;
- gen6_write_pde(ppgtt, pde, pt);
- flush = true;
+ gen6_initialize_pt(ppgtt, pt);
+ ppgtt->base.pd.page_table[pde] = pt;
+
+ if (i915_vma_is_bound(ppgtt->vma,
+ I915_VMA_GLOBAL_BIND)) {
+ gen6_write_pde(ppgtt, pde, pt);
+ flush = true;
+ }
+
+ GEM_BUG_ON(pt->used_ptes);
}
+
+ pt->used_ptes += count;
}
if (flush) {
- mark_tlbs_dirty(ppgtt);
- wmb();
+ mark_tlbs_dirty(&ppgtt->base);
+ gen6_ggtt_invalidate(ppgtt->base.vm.i915);
}
return 0;
unwind_out:
- gen6_ppgtt_clear_range(vm, from, start);
+ gen6_ppgtt_clear_range(vm, from, start - from);
return -ENOMEM;
}
-static int gen6_init_scratch(struct i915_address_space *vm)
+static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
{
+ struct i915_address_space * const vm = &ppgtt->base.vm;
+ struct i915_page_table *unused;
+ u32 pde;
int ret;
- ret = setup_scratch_page(vm, I915_GFP_DMA);
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
+ ppgtt->scratch_pte =
+ vm->pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_NONE, PTE_READ_ONLY);
+
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
cleanup_scratch_page(vm);
return PTR_ERR(vm->scratch_pt);
}
- gen6_initialize_pt(vm, vm->scratch_pt);
+ gen6_initialize_pt(ppgtt, vm->scratch_pt);
+ gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
+ ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
return 0;
}
-static void gen6_free_scratch(struct i915_address_space *vm)
+static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
{
free_pt(vm, vm->scratch_pt);
cleanup_scratch_page(vm);
}
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory *pd = &ppgtt->pd;
struct i915_page_table *pt;
u32 pde;
- drm_mm_remove_node(&ppgtt->node);
+ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+ if (pt != ppgtt->base.vm.scratch_pt)
+ free_pt(&ppgtt->base.vm, pt);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- gen6_for_all_pdes(pt, pd, pde)
- if (pt != vm->scratch_pt)
- free_pt(vm, pt);
+ i915_vma_destroy(ppgtt->vma);
- gen6_free_scratch(vm);
+ gen6_ppgtt_free_pd(ppgtt);
+ gen6_ppgtt_free_scratch(vm);
}
-static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
+static int pd_vma_set_pages(struct i915_vma *vma)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
+ vma->pages = ERR_PTR(-ENODEV);
+ return 0;
+}
- /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
- * allocator works in address space sizes, so it's multiplied by page
- * size. We allocate at the top of the GTT to avoid fragmentation.
- */
- BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
+static void pd_vma_clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
- ret = gen6_init_scratch(vm);
- if (ret)
- return ret;
+ vma->pages = NULL;
+}
- ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
- GEN6_PD_SIZE, GEN6_PD_ALIGN,
- I915_COLOR_UNEVICTABLE,
- 0, ggtt->base.total,
- PIN_HIGH);
- if (ret)
- goto err_out;
+static int pd_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct gen6_hw_ppgtt *ppgtt = vma->private;
+ u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE;
+ struct i915_page_table *pt;
+ unsigned int pde;
- if (ppgtt->node.start < ggtt->mappable_end)
- DRM_DEBUG("Forced to use aperture for PDEs\n");
+ ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
- ppgtt->pd.base.ggtt_offset =
- ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+ gen6_write_pde(ppgtt, pde, pt);
- ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
- ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
+ mark_tlbs_dirty(&ppgtt->base);
+ gen6_ggtt_invalidate(ppgtt->base.vm.i915);
return 0;
-
-err_out:
- gen6_free_scratch(vm);
- return ret;
}
-static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+static void pd_vma_unbind(struct i915_vma *vma)
{
- return gen6_ppgtt_allocate_page_directories(ppgtt);
-}
+ struct gen6_hw_ppgtt *ppgtt = vma->private;
+ struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
+ struct i915_page_table *pt;
+ unsigned int pde;
-static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
- u64 start, u64 length)
-{
- struct i915_page_table *unused;
- u32 pde;
+ if (!ppgtt->scan_for_unused_pt)
+ return;
+
+ /* Free all no longer used page tables */
+ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
+ if (pt->used_ptes || pt == scratch_pt)
+ continue;
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
- ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
+ free_pt(&ppgtt->base.vm, pt);
+ ppgtt->base.pd.page_table[pde] = scratch_pt;
+ }
+
+ ppgtt->scan_for_unused_pt = false;
}
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
-{
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
+static const struct i915_vma_ops pd_vma_ops = {
+ .set_pages = pd_vma_set_pages,
+ .clear_pages = pd_vma_clear_pages,
+ .bind_vma = pd_vma_bind,
+ .unbind_vma = pd_vma_unbind,
+};
- ppgtt->base.pte_encode = ggtt->base.pte_encode;
- if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
- ppgtt->switch_mm = gen6_mm_switch;
- else if (IS_HASWELL(dev_priv))
- ppgtt->switch_mm = hsw_mm_switch;
- else if (IS_GEN7(dev_priv))
- ppgtt->switch_mm = gen7_mm_switch;
- else
- BUG();
+static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
+{
+ struct drm_i915_private *i915 = ppgtt->base.vm.i915;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_vma *vma;
- ret = gen6_ppgtt_alloc(ppgtt);
- if (ret)
- return ret;
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(size > ggtt->vm.total);
- ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+ vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
+ if (!vma)
+ return ERR_PTR(-ENOMEM);
- gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
- gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
+ init_request_active(&vma->last_fence, NULL);
- ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
- if (ret) {
- gen6_ppgtt_cleanup(&ppgtt->base);
- return ret;
- }
+ vma->vm = &ggtt->vm;
+ vma->ops = &pd_vma_ops;
+ vma->private = ppgtt;
- ppgtt->base.clear_range = gen6_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
- ppgtt->base.cleanup = gen6_ppgtt_cleanup;
- ppgtt->debug_dump = gen6_dump_ppgtt;
+ vma->active = RB_ROOT;
- DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
- ppgtt->node.size >> 20,
- ppgtt->node.start / PAGE_SIZE);
+ vma->size = size;
+ vma->fence_size = size;
+ vma->flags = I915_VMA_GGTT;
+ vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
- DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
- ppgtt->pd.base.ggtt_offset << 10);
+ INIT_LIST_HEAD(&vma->obj_link);
+ list_add(&vma->vm_link, &vma->vm->unbound_list);
- return 0;
+ return vma;
}
-static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_private *dev_priv)
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
{
- ppgtt->base.i915 = dev_priv;
- ppgtt->base.dma = &dev_priv->drm.pdev->dev;
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- if (INTEL_GEN(dev_priv) < 8)
- return gen6_ppgtt_init(ppgtt);
- else
- return gen8_ppgtt_init(ppgtt);
+ /*
+ * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+ * which will be pinned into every active context.
+ * (When vma->pin_count becomes atomic, I expect we will naturally
+ * need a larger, unpacked, type and kill this redundancy.)
+ */
+ if (ppgtt->pin_count++)
+ return 0;
+
+ /*
+ * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+ * allocator works in address space sizes, so it's multiplied by page
+ * size. We allocate at the top of the GTT to avoid fragmentation.
+ */
+ return i915_vma_pin(ppgtt->vma,
+ 0, GEN6_PD_ALIGN,
+ PIN_GLOBAL | PIN_HIGH);
}
-static void i915_address_space_init(struct i915_address_space *vm,
- struct drm_i915_private *dev_priv,
- const char *name)
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
{
- drm_mm_init(&vm->mm, 0, vm->total);
- vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- INIT_LIST_HEAD(&vm->active_list);
- INIT_LIST_HEAD(&vm->inactive_list);
- INIT_LIST_HEAD(&vm->unbound_list);
+ GEM_BUG_ON(!ppgtt->pin_count);
+ if (--ppgtt->pin_count)
+ return;
- list_add_tail(&vm->global_link, &dev_priv->vm_list);
- pagevec_init(&vm->free_pages);
+ i915_vma_unpin(ppgtt->vma);
}
-static void i915_address_space_fini(struct i915_address_space *vm)
+static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{
- if (pagevec_count(&vm->free_pages))
- vm_free_pages_release(vm, true);
+ struct i915_ggtt * const ggtt = &i915->ggtt;
+ struct gen6_hw_ppgtt *ppgtt;
+ int err;
- drm_mm_takedown(&vm->mm);
- list_del(&vm->global_link);
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ppgtt->base.ref);
+
+ ppgtt->base.vm.i915 = i915;
+ ppgtt->base.vm.dma = &i915->drm.pdev->dev;
+
+ ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+
+ i915_address_space_init(&ppgtt->base.vm, i915);
+
+ ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
+ ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.debug_dump = gen6_dump_ppgtt;
+
+ ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
+
+ ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
+
+ err = gen6_ppgtt_init_scratch(ppgtt);
+ if (err)
+ goto err_free;
+
+ ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
+ if (IS_ERR(ppgtt->vma)) {
+ err = PTR_ERR(ppgtt->vma);
+ goto err_scratch;
+ }
+
+ return &ppgtt->base;
+
+err_scratch:
+ gen6_ppgtt_free_scratch(&ppgtt->base.vm);
+err_free:
+ kfree(ppgtt);
+ return ERR_PTR(err);
}
static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
@@ -2212,29 +2269,28 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
}
+static struct i915_hw_ppgtt *
+__hw_ppgtt_create(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) < 8)
+ return gen6_ppgtt_create(i915);
+ else
+ return gen8_ppgtt_create(i915);
+}
+
struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv,
- const char *name)
+i915_ppgtt_create(struct drm_i915_private *i915,
+ struct drm_i915_file_private *fpriv)
{
struct i915_hw_ppgtt *ppgtt;
- int ret;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ERR_PTR(-ENOMEM);
- ret = __hw_ppgtt_init(ppgtt, dev_priv);
- if (ret) {
- kfree(ppgtt);
- return ERR_PTR(ret);
- }
+ ppgtt = __hw_ppgtt_create(i915);
+ if (IS_ERR(ppgtt))
+ return ppgtt;
- kref_init(&ppgtt->ref);
- i915_address_space_init(&ppgtt->base, dev_priv, name);
- ppgtt->base.file = fpriv;
+ ppgtt->vm.file = fpriv;
- trace_i915_ppgtt_create(&ppgtt->base);
+ trace_i915_ppgtt_create(&ppgtt->vm);
return ppgtt;
}
@@ -2268,16 +2324,16 @@ void i915_ppgtt_release(struct kref *kref)
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
- trace_i915_ppgtt_release(&ppgtt->base);
+ trace_i915_ppgtt_release(&ppgtt->vm);
- ppgtt_destroy_vma(&ppgtt->base);
+ ppgtt_destroy_vma(&ppgtt->vm);
- GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
- GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
- GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
- ppgtt->base.cleanup(&ppgtt->base);
- i915_address_space_fini(&ppgtt->base);
+ ppgtt->vm.cleanup(&ppgtt->vm);
+ i915_address_space_fini(&ppgtt->vm);
kfree(ppgtt);
}
@@ -2373,7 +2429,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
- ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
i915_ggtt_invalidate(dev_priv);
}
@@ -2419,7 +2475,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
- gen8_set_pte(pte, gen8_pte_encode(addr, level));
+ gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
ggtt->invalidate(vm->i915);
}
@@ -2427,14 +2483,19 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level level,
- u32 unused)
+ u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
dma_addr_t addr;
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += vma->node.start >> PAGE_SHIFT;
for_each_sgt_dma(addr, sgt_iter, vma->pages)
@@ -2500,7 +2561,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2561,13 +2622,14 @@ struct insert_entries {
struct i915_address_space *vm;
struct i915_vma *vma;
enum i915_cache_level level;
+ u32 flags;
};
static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
- gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
+ gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
@@ -2576,9 +2638,9 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level level,
- u32 unused)
+ u32 flags)
{
- struct insert_entries arg = { vm, vma, level };
+ struct insert_entries arg = { vm, vma, level, flags };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
@@ -2669,9 +2731,9 @@ static int ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
- /* Currently applicable only to VLV */
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
- if (obj->gt_ro)
+ if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
@@ -2709,23 +2771,22 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
/* Currently applicable only to VLV */
pte_flags = 0;
- if (vma->obj->gt_ro)
+ if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
- if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
- appgtt->base.allocate_va_range) {
- ret = appgtt->base.allocate_va_range(&appgtt->base,
- vma->node.start,
- vma->size);
+ if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ ret = appgtt->vm.allocate_va_range(&appgtt->vm,
+ vma->node.start,
+ vma->size);
if (ret)
return ret;
}
- appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
- pte_flags);
+ appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
+ pte_flags);
}
if (flags & I915_VMA_GLOBAL_BIND) {
@@ -2748,7 +2809,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
}
if (vma->flags & I915_VMA_LOCAL_BIND) {
- struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
+ struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
vm->clear_range(vm, vma->node.start, vma->size);
}
@@ -2762,7 +2823,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
- if (i915_gem_wait_for_idle(dev_priv, 0)) {
+ if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -2811,34 +2872,32 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
struct i915_hw_ppgtt *ppgtt;
int err;
- ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
+ ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM));
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
- if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
+ if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
err = -ENODEV;
goto err_ppgtt;
}
- if (ppgtt->base.allocate_va_range) {
- /* Note we only pre-allocate as far as the end of the global
- * GTT. On 48b / 4-level page-tables, the difference is very,
- * very significant! We have to preallocate as GVT/vgpu does
- * not like the page directory disappearing.
- */
- err = ppgtt->base.allocate_va_range(&ppgtt->base,
- 0, ggtt->base.total);
- if (err)
- goto err_ppgtt;
- }
+ /*
+ * Note we only pre-allocate as far as the end of the global
+ * GTT. On 48b / 4-level page-tables, the difference is very,
+ * very significant! We have to preallocate as GVT/vgpu does
+ * not like the page directory disappearing.
+ */
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
+ if (err)
+ goto err_ppgtt;
i915->mm.aliasing_ppgtt = ppgtt;
- GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
- ggtt->base.bind_vma = aliasing_gtt_bind_vma;
+ GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+ ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
- GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
- ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
+ GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+ ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
return 0;
@@ -2858,8 +2917,8 @@ void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
i915_ppgtt_put(ppgtt);
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
}
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2883,7 +2942,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret;
/* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
+ ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -2891,16 +2950,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret;
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
+ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- ggtt->base.clear_range(&ggtt->base, hole_start,
- hole_end - hole_start);
+ ggtt->vm.clear_range(&ggtt->vm, hole_start,
+ hole_end - hole_start);
}
/* And finally clear the reserved guard page */
- ggtt->base.clear_range(&ggtt->base,
- ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
ret = i915_gem_init_aliasing_ppgtt(dev_priv);
@@ -2925,30 +2983,26 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
struct i915_vma *vma, *vn;
struct pagevec *pvec;
- ggtt->base.closed = true;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
- list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
- WARN_ON(i915_vma_unbind(vma));
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- i915_gem_cleanup_stolen(&dev_priv->drm);
+ ggtt->vm.closed = true;
mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_fini_aliasing_ppgtt(dev_priv);
+ GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
+ WARN_ON(i915_vma_unbind(vma));
+
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
- if (drm_mm_initialized(&ggtt->base.mm)) {
+ if (drm_mm_initialized(&ggtt->vm.mm)) {
intel_vgt_deballoon(dev_priv);
- i915_address_space_fini(&ggtt->base);
+ i915_address_space_fini(&ggtt->vm);
}
- ggtt->base.cleanup(&ggtt->base);
+ ggtt->vm.cleanup(&ggtt->vm);
- pvec = &dev_priv->mm.wc_stash;
+ pvec = &dev_priv->mm.wc_stash.pvec;
if (pvec->nr) {
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
@@ -2958,6 +3012,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
arch_phys_wc_del(ggtt->mtrr);
io_mapping_fini(&ggtt->iomap);
+
+ i915_gem_cleanup_stolen(&dev_priv->drm);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2996,7 +3052,7 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
phys_addr_t phys_addr;
int ret;
@@ -3020,7 +3076,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
- ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
+ ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
@@ -3326,7 +3382,7 @@ static void setup_private_pat(struct drm_i915_private *dev_priv)
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3350,29 +3406,30 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
size = gen8_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
- ggtt->base.cleanup = gen6_gmch_remove;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.insert_page = gen8_ggtt_insert_page;
- ggtt->base.clear_range = nop_clear_range;
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
- ggtt->base.clear_range = gen8_ggtt_clear_range;
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
- ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
- ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- if (ggtt->base.clear_range != nop_clear_range)
- ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ if (ggtt->vm.clear_range != nop_clear_range)
+ ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
}
ggtt->invalidate = gen6_ggtt_invalidate;
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
setup_private_pat(dev_priv);
return ggtt_probe_common(ggtt, size);
@@ -3380,7 +3437,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3407,29 +3464,30 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
- ggtt->base.clear_range = gen6_ggtt_clear_range;
- ggtt->base.insert_page = gen6_ggtt_insert_page;
- ggtt->base.insert_entries = gen6_ggtt_insert_entries;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = gen6_gmch_remove;
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
if (HAS_EDRAM(dev_priv))
- ggtt->base.pte_encode = iris_pte_encode;
+ ggtt->vm.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev_priv))
- ggtt->base.pte_encode = hsw_pte_encode;
+ ggtt->vm.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev_priv))
- ggtt->base.pte_encode = byt_pte_encode;
+ ggtt->vm.pte_encode = byt_pte_encode;
else if (INTEL_GEN(dev_priv) >= 7)
- ggtt->base.pte_encode = ivb_pte_encode;
+ ggtt->vm.pte_encode = ivb_pte_encode;
else
- ggtt->base.pte_encode = snb_pte_encode;
+ ggtt->vm.pte_encode = snb_pte_encode;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
return ggtt_probe_common(ggtt, size);
}
@@ -3441,7 +3499,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
phys_addr_t gmadr_base;
int ret;
@@ -3451,26 +3509,25 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
return -EIO;
}
- intel_gtt_get(&ggtt->base.total,
- &gmadr_base,
- &ggtt->mappable_end);
+ intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
ggtt->gmadr =
(struct resource) DEFINE_RES_MEM(gmadr_base,
ggtt->mappable_end);
ggtt->do_idle_maps = needs_idle_maps(dev_priv);
- ggtt->base.insert_page = i915_ggtt_insert_page;
- ggtt->base.insert_entries = i915_ggtt_insert_entries;
- ggtt->base.clear_range = i915_ggtt_clear_range;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = i915_gmch_remove;
+ ggtt->vm.insert_page = i915_ggtt_insert_page;
+ ggtt->vm.insert_entries = i915_ggtt_insert_entries;
+ ggtt->vm.clear_range = i915_ggtt_clear_range;
+ ggtt->vm.cleanup = i915_gmch_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
if (unlikely(ggtt->do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -3486,8 +3543,8 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- ggtt->base.i915 = dev_priv;
- ggtt->base.dma = &dev_priv->drm.pdev->dev;
+ ggtt->vm.i915 = dev_priv;
+ ggtt->vm.dma = &dev_priv->drm.pdev->dev;
if (INTEL_GEN(dev_priv) <= 5)
ret = i915_gmch_probe(ggtt);
@@ -3504,27 +3561,29 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
* restriction!
*/
if (USES_GUC(dev_priv)) {
- ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
- ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+ ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
- if ((ggtt->base.total - 1) >> 32) {
+ if ((ggtt->vm.total - 1) >> 32) {
DRM_ERROR("We never expected a Global GTT with more than 32bits"
" of address space! Found %lldM!\n",
- ggtt->base.total >> 20);
- ggtt->base.total = 1ULL << 32;
- ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+ ggtt->vm.total >> 20);
+ ggtt->vm.total = 1ULL << 32;
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
- if (ggtt->mappable_end > ggtt->base.total) {
+ if (ggtt->mappable_end > ggtt->vm.total) {
DRM_ERROR("mappable aperture extends past end of GGTT,"
" aperture=%pa, total=%llx\n",
- &ggtt->mappable_end, ggtt->base.total);
- ggtt->mappable_end = ggtt->base.total;
+ &ggtt->mappable_end, ggtt->vm.total);
+ ggtt->mappable_end = ggtt->vm.total;
}
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("DSM size = %lluM\n",
(u64)resource_size(&intel_graphics_stolen_res) >> 20);
@@ -3543,7 +3602,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- INIT_LIST_HEAD(&dev_priv->vm_list);
+ stash_init(&dev_priv->mm.wc_stash);
/* Note that we use page colouring to enforce a guard page at the
* end of the address space. This is required as the CS may prefetch
@@ -3551,9 +3610,13 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* and beyond the end of the GTT if we do not provide a guard.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_address_space_init(&ggtt->base, dev_priv, "[global]");
+ i915_address_space_init(&ggtt->vm, dev_priv);
+
+ /* Only VLV supports read-only GGTT mappings */
+ ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
+
if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
- ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+ ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
@@ -3576,7 +3639,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
out_gtt_cleanup:
- ggtt->base.cleanup(&ggtt->base);
+ ggtt->vm.cleanup(&ggtt->vm);
return ret;
}
@@ -3610,34 +3673,35 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915)
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_object *obj, *on;
+ struct i915_vma *vma, *vn;
i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
- ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+ ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
- bool ggtt_bound = false;
- struct i915_vma *vma;
+ GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
- for_each_ggtt_vma(vma, obj) {
- if (!i915_vma_unbind(vma))
- continue;
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND))
+ continue;
- WARN_ON(i915_vma_bind(vma, obj->cache_level,
- PIN_UPDATE));
- ggtt_bound = true;
- }
+ if (!i915_vma_unbind(vma))
+ continue;
- if (ggtt_bound)
+ WARN_ON(i915_vma_bind(vma,
+ obj ? obj->cache_level : 0,
+ PIN_UPDATE));
+ if (obj)
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
}
- ggtt->base.closed = false;
+ ggtt->vm.closed = false;
+ i915_ggtt_invalidate(dev_priv);
if (INTEL_GEN(dev_priv) >= 8) {
struct intel_ppat *ppat = &dev_priv->ppat;
@@ -3646,23 +3710,6 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
dev_priv->ppat.update_hw(dev_priv);
return;
}
-
- if (USES_PPGTT(dev_priv)) {
- struct i915_address_space *vm;
-
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- struct i915_hw_ppgtt *ppgtt;
-
- if (i915_is_ggtt(vm))
- ppgtt = dev_priv->mm.aliasing_ppgtt;
- else
- ppgtt = i915_vm_to_ppgtt(vm);
-
- gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
- }
- }
-
- i915_ggtt_invalidate(dev_priv);
}
static struct scatterlist *
@@ -3880,7 +3927,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
node->size = size;
@@ -3977,7 +4024,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
GEM_BUG_ON(start >= end);
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
if (unlikely(range_overflows(start, size, end)))
@@ -3988,7 +4035,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
mode = DRM_MM_INSERT_BEST;
if (flags & PIN_HIGH)
- mode = DRM_MM_INSERT_HIGH;
+ mode = DRM_MM_INSERT_HIGHEST;
if (flags & PIN_MAPPABLE)
mode = DRM_MM_INSERT_LOW;
@@ -4008,6 +4055,15 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (err != -ENOSPC)
return err;
+ if (mode & DRM_MM_INSERT_ONCE) {
+ err = drm_mm_insert_node_in_range(&vm->mm, node,
+ size, alignment, color,
+ start, end,
+ DRM_MM_INSERT_BEST);
+ if (err != -ENOSPC)
+ return err;
+ }
+
if (flags & PIN_NOEVICT)
return -ENOSPC;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index aec4f73574f4..2a116a91420b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -58,6 +58,7 @@
struct drm_i915_file_private;
struct drm_i915_fence_reg;
+struct i915_vma;
typedef u32 gen6_pte_t;
typedef u64 gen8_pte_t;
@@ -65,7 +66,7 @@ typedef u64 gen8_pde_t;
typedef u64 gen8_ppgtt_pdpe_t;
typedef u64 gen8_ppgtt_pml4e_t;
-#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@@ -254,6 +255,26 @@ struct i915_pml4 {
struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
};
+struct i915_vma_ops {
+ /* Map an object into an address space with the given cache flags. */
+ int (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ /*
+ * Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page.
+ */
+ void (*unbind_vma)(struct i915_vma *vma);
+
+ int (*set_pages)(struct i915_vma *vma);
+ void (*clear_pages)(struct i915_vma *vma);
+};
+
+struct pagestash {
+ spinlock_t lock;
+ struct pagevec pvec;
+};
+
struct i915_address_space {
struct drm_mm mm;
struct drm_i915_private *i915;
@@ -267,12 +288,13 @@ struct i915_address_space {
* assign blame.
*/
struct drm_i915_file_private *file;
- struct list_head global_link;
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */
bool closed;
+ struct mutex mutex; /* protects vma and our lists */
+
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@@ -308,8 +330,13 @@ struct i915_address_space {
*/
struct list_head unbound_list;
- struct pagevec free_pages;
- bool pt_kmap_wc;
+ struct pagestash free_pages;
+
+ /* Some systems require uncached updates of the page directories */
+ bool pt_kmap_wc:1;
+
+ /* Some systems support read-only mappings for GGTT and/or PPGTT */
+ bool has_read_only:1;
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
@@ -331,15 +358,8 @@ struct i915_address_space {
enum i915_cache_level cache_level,
u32 flags);
void (*cleanup)(struct i915_address_space *vm);
- /** Unmap an object from an address space. This usually consists of
- * setting the valid PTE entries to a reserved scratch page. */
- void (*unbind_vma)(struct i915_vma *vma);
- /* Map an object into an address space with the given cache flags. */
- int (*bind_vma)(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
- int (*set_pages)(struct i915_vma *vma);
- void (*clear_pages)(struct i915_vma *vma);
+
+ struct i915_vma_ops vma_ops;
I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
I915_SELFTEST_DECLARE(bool scrub_64K);
@@ -367,7 +387,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
* the spec.
*/
struct i915_ggtt {
- struct i915_address_space base;
+ struct i915_address_space vm;
struct io_mapping iomap; /* Mapping to our CPU mappable region */
struct resource gmadr; /* GMADR resource */
@@ -385,9 +405,9 @@ struct i915_ggtt {
};
struct i915_hw_ppgtt {
- struct i915_address_space base;
+ struct i915_address_space vm;
struct kref ref;
- struct drm_mm_node node;
+
unsigned long pd_dirty_rings;
union {
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
@@ -395,13 +415,28 @@ struct i915_hw_ppgtt {
struct i915_page_directory pd; /* GEN6-7 */
};
+ void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+};
+
+struct gen6_hw_ppgtt {
+ struct i915_hw_ppgtt base;
+
+ struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
+ gen6_pte_t scratch_pte;
- int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq);
- void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+ unsigned int pin_count;
+ bool scan_for_unused_pt;
};
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base)
+
+static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
+{
+ BUILD_BUG_ON(offsetof(struct gen6_hw_ppgtt, base));
+ return __to_gen6_ppgtt(base);
+}
+
/*
* gen6_for_each_pde() iterates over every pde from start until start+length.
* If start and start+length are not perfectly divisible, the macro will round
@@ -440,8 +475,8 @@ static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
const u64 mask = ~((1ULL << pde_shift) - 1);
u64 end;
- WARN_ON(length == 0);
- WARN_ON(offset_in_page(addr|length));
+ GEM_BUG_ON(length == 0);
+ GEM_BUG_ON(offset_in_page(addr | length));
end = addr + length;
@@ -543,7 +578,7 @@ static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm)
{
GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, base);
+ return container_of(vm, struct i915_ggtt, vm);
}
#define INTEL_MAX_PPAT_ENTRIES 8
@@ -591,8 +626,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv,
- const char *name);
+ struct drm_i915_file_private *fpriv);
void i915_ppgtt_close(struct i915_address_space *vm);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
@@ -605,6 +639,9 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
+
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 54f00b350779..83e5e01fa9ea 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -141,7 +141,6 @@ struct drm_i915_gem_object {
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
*/
- unsigned long gt_ro:1;
unsigned int cache_level:3;
unsigned int cache_coherent:2;
#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
@@ -268,7 +267,6 @@ struct drm_i915_gem_object {
union {
struct i915_gem_userptr {
uintptr_t ptr;
- unsigned read_only :1;
struct i915_mm_struct *mm;
struct i915_mmu_object *mmu_object;
@@ -337,26 +335,17 @@ __attribute__((nonnull))
static inline struct drm_i915_gem_object *
i915_gem_object_get(struct drm_i915_gem_object *obj)
{
- drm_gem_object_reference(&obj->base);
+ drm_gem_object_get(&obj->base);
return obj;
}
-__deprecated
-extern void drm_gem_object_reference(struct drm_gem_object *);
-
__attribute__((nonnull))
static inline void
i915_gem_object_put(struct drm_i915_gem_object *obj)
{
- __drm_gem_object_unreference(&obj->base);
+ __drm_gem_object_put(&obj->base);
}
-__deprecated
-extern void drm_gem_object_unreference(struct drm_gem_object *);
-
-__deprecated
-extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
-
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
reservation_object_lock(obj->resv, NULL);
@@ -367,6 +356,18 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
reservation_object_unlock(obj->resv);
}
+static inline void
+i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
+{
+ obj->base.vma_node.readonly = true;
+}
+
+static inline bool
+i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
+{
+ return obj->base.vma_node.readonly;
+}
+
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 1036e8686916..90baf9086d0a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
if (IS_ERR(so.obj))
return PTR_ERR(so.obj);
- so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
+ so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma);
goto err_obj;
@@ -222,7 +222,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
goto err_unpin;
}
- i915_vma_move_to_active(so.vma, rq, 0);
+ err = i915_vma_move_to_active(so.vma, rq, 0);
err_unpin:
i915_vma_unpin(so.vma);
err_vma:
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 5757fb7c4b5a..ea90d3a0d511 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -23,6 +23,7 @@
*/
#include <linux/oom.h>
+#include <linux/sched/mm.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -172,7 +173,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
* we will free as much as we can and hope to get a second chance.
*/
if (flags & I915_SHRINK_ACTIVE)
- i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
trace_i915_gem_shrink(i915, target, flags);
i915_retire_requests(i915);
@@ -392,7 +395,8 @@ shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
do {
- if (i915_gem_wait_for_idle(i915, 0) == 0 &&
+ if (i915_gem_wait_for_idle(i915,
+ 0, MAX_SCHEDULE_TIMEOUT) == 0 &&
shrinker_lock(i915, unlock))
break;
@@ -466,7 +470,9 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
- ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
@@ -480,7 +486,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
- &i915->ggtt.base.inactive_list, vm_link) {
+ &i915->ggtt.vm.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count;
@@ -526,3 +532,14 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
unregister_shrinker(&i915->mm.shrinker);
}
+
+void i915_gem_shrinker_taints_mutex(struct mutex *mutex)
+{
+ if (!IS_ENABLED(CONFIG_LOCKDEP))
+ return;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ mutex_lock(mutex);
+ mutex_unlock(mutex);
+ fs_reclaim_release(GFP_KERNEL);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index ad949cc30928..53440bf87650 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -254,6 +254,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
default:
MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+ /* fall through */
case GEN7_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
break;
@@ -343,6 +344,35 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
+static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ resource_size_t *base,
+ resource_size_t *size)
+{
+ u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED);
+
+ DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+
+ *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
+
+ switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
+ case GEN8_STOLEN_RESERVED_1M:
+ *size = 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_2M:
+ *size = 2 * 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_4M:
+ *size = 4 * 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_8M:
+ *size = 8 * 1024 * 1024;
+ break;
+ default:
+ *size = 8 * 1024 * 1024;
+ MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
+ }
+}
+
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
resource_size_t reserved_base, stolen_top;
@@ -399,7 +429,9 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
gen7_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
- default:
+ case 8:
+ case 9:
+ case 10:
if (IS_LP(dev_priv))
chv_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
@@ -407,6 +439,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
bdw_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
+ case 11:
+ default:
+ icl_get_stolen_reserved(dev_priv, &reserved_base,
+ &reserved_size);
+ break;
}
/*
@@ -642,7 +679,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret)
goto err;
- vma = i915_vma_instance(obj, &ggtt->base, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_pages;
@@ -653,7 +690,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
- ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
@@ -666,7 +703,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+ list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 854bd51b9478..2c9b284036d1 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -112,10 +112,11 @@ static void del_object(struct i915_mmu_object *mo)
mo->attached = false;
}
-static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
@@ -124,7 +125,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
LIST_HEAD(cancelled);
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
- return;
+ return 0;
/* interval ranges are inclusive, but invalidate range is exclusive */
end--;
@@ -132,6 +133,10 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
spin_lock(&mn->lock);
it = interval_tree_iter_first(&mn->objects, start, end);
while (it) {
+ if (!blockable) {
+ spin_unlock(&mn->lock);
+ return -EAGAIN;
+ }
/* The mmu_object is released late when destroying the
* GEM object so it is entirely possible to gain a
* reference on an object in the process of being freed
@@ -154,6 +159,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
if (!list_empty(&cancelled))
flush_workqueue(mn->wq);
+
+ return 0;
}
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
@@ -507,7 +514,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
struct mm_struct *mm = obj->userptr.mm->mm;
unsigned int flags = 0;
- if (!obj->userptr.read_only)
+ if (!i915_gem_object_is_readonly(obj))
flags |= FOLL_WRITE;
ret = -EFAULT;
@@ -643,7 +650,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (pvec) /* defer to worker if malloc fails */
pinned = __get_user_pages_fast(obj->userptr.ptr,
num_pages,
- !obj->userptr.read_only,
+ !i915_gem_object_is_readonly(obj),
pvec);
}
@@ -789,10 +796,15 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) {
- /* On almost all of the current hw, we cannot tell the GPU that a
- * page is readonly, so this is just a placeholder in the uAPI.
+ struct i915_hw_ppgtt *ppgtt;
+
+ /*
+ * On almost all of the older hw, we cannot tell the GPU that
+ * a page is readonly.
*/
- return -ENODEV;
+ ppgtt = dev_priv->kernel_context->ppgtt;
+ if (!ppgtt || !ppgtt->vm.has_read_only)
+ return -ENODEV;
}
obj = i915_gem_object_alloc(dev_priv);
@@ -806,7 +818,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
obj->userptr.ptr = args->user_ptr;
- obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
+ if (args->flags & I915_USERPTR_READ_ONLY)
+ i915_gem_object_set_readonly(obj);
/* And keep a pointer to the current->mm for resolving the user pages
* at binding. This means that we need to hook into the mmu_notifier
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index df234dc23274..f7f2aa71d8d9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -31,6 +31,7 @@
#include <linux/stop_machine.h>
#include <linux/zlib.h>
#include <drm/drm_print.h>
+#include <linux/ascii85.h>
#include "i915_gpu_error.h"
#include "i915_drv.h"
@@ -335,21 +336,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
struct drm_i915_error_buffer *err,
int count)
{
- int i;
-
err_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- err_printf(m, " %08x_%08x %8u %02x %02x [ ",
+ err_printf(m, " %08x_%08x %8u %02x %02x %02x",
upper_32_bits(err->gtt_offset),
lower_32_bits(err->gtt_offset),
err->size,
err->read_domains,
- err->write_domain);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- err_printf(m, "%02x ", err->rseqno[i]);
-
- err_printf(m, "] %02x", err->wseqno);
+ err->write_domain,
+ err->wseqno);
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
@@ -522,35 +518,12 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-static int
-ascii85_encode_len(int len)
-{
- return DIV_ROUND_UP(len, 4);
-}
-
-static bool
-ascii85_encode(u32 in, char *out)
-{
- int i;
-
- if (in == 0)
- return false;
-
- out[5] = '\0';
- for (i = 5; i--; ) {
- out[i] = '!' + in % 85;
- in /= 85;
- }
-
- return true;
-}
-
static void print_error_obj(struct drm_i915_error_state_buf *m,
struct intel_engine_cs *engine,
const char *name,
struct drm_i915_error_object *obj)
{
- char out[6];
+ char out[ASCII85_BUFSZ];
int page;
if (!obj)
@@ -572,12 +545,8 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
len -= obj->unused;
len = ascii85_encode_len(len);
- for (i = 0; i < len; i++) {
- if (ascii85_encode(obj->pages[page][i], out))
- err_puts(m, out);
- else
- err_puts(m, "z");
- }
+ for (i = 0; i < len; i++)
+ err_puts(m, ascii85_encode(obj->pages[page][i], out));
}
err_puts(m, "\n");
}
@@ -973,8 +942,7 @@ i915_error_object_create(struct drm_i915_private *i915,
void __iomem *s;
int ret;
- ggtt->base.insert_page(&ggtt->base, dma, slot,
- I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst);
@@ -993,7 +961,7 @@ unwind:
out:
compress_fini(&compress, dst);
- ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst;
}
@@ -1022,13 +990,10 @@ static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- int i;
err->size = obj->base.size;
err->name = obj->base.name;
- for (i = 0; i < I915_NUM_ENGINES; i++)
- err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
err->engine = __active_get_engine_id(&obj->frontbuffer_write);
@@ -1051,6 +1016,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
int i = 0;
list_for_each_entry(vma, head, vm_link) {
+ if (!vma->obj)
+ continue;
+
if (pinned_only && !i915_vma_is_pinned(vma))
continue;
@@ -1287,9 +1255,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
static void record_request(struct i915_request *request,
struct drm_i915_error_request *erq)
{
- erq->context = request->ctx->hw_id;
+ struct i915_gem_context *ctx = request->gem_context;
+
+ erq->context = ctx->hw_id;
erq->sched_attr = request->sched.attr;
- erq->ban_score = atomic_read(&request->ctx->ban_score);
+ erq->ban_score = atomic_read(&ctx->ban_score);
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
@@ -1297,7 +1267,7 @@ static void record_request(struct i915_request *request,
erq->tail = request->tail;
rcu_read_lock();
- erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+ erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
rcu_read_unlock();
}
@@ -1461,12 +1431,12 @@ static void gem_record_rings(struct i915_gpu_state *error)
request = i915_gem_find_active_request(engine);
if (request) {
+ struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
- ee->vm = request->ctx->ppgtt ?
- &request->ctx->ppgtt->base : &ggtt->base;
+ ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
- record_context(&ee->context, request->ctx);
+ record_context(&ee->context, ctx);
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
@@ -1483,11 +1453,10 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->ctx =
i915_error_object_create(i915,
- to_intel_context(request->ctx,
- engine)->state);
+ request->hw_context->state);
error->simulated |=
- i915_gem_context_no_error_capture(request->ctx);
+ i915_gem_context_no_error_capture(ctx);
ee->rq_head = request->head;
ee->rq_post = request->postfix;
@@ -1563,17 +1532,17 @@ static void capture_active_buffers(struct i915_gpu_state *error)
static void capture_pinned_buffers(struct i915_gpu_state *error)
{
- struct i915_address_space *vm = &error->i915->ggtt.base;
+ struct i915_address_space *vm = &error->i915->ggtt.vm;
struct drm_i915_error_buffer *bo;
struct i915_vma *vma;
int count_inactive, count_active;
count_inactive = 0;
- list_for_each_entry(vma, &vm->active_list, vm_link)
+ list_for_each_entry(vma, &vm->inactive_list, vm_link)
count_inactive++;
count_active = 0;
- list_for_each_entry(vma, &vm->inactive_list, vm_link)
+ list_for_each_entry(vma, &vm->active_list, vm_link)
count_active++;
bo = NULL;
@@ -1667,7 +1636,16 @@ static void capture_reg_state(struct i915_gpu_state *error)
}
/* 4: Everything else */
- if (INTEL_GEN(dev_priv) >= 8) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ error->ier = I915_READ(GEN8_DE_MISC_IER);
+ error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE);
+ error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE);
+ error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE);
+ error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE);
+ error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE);
+ error->ngtier = 6;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index dac0f8c4c1cf..f893a4e8b783 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -58,7 +58,7 @@ struct i915_gpu_state {
u32 eir;
u32 pgtbl_er;
u32 ier;
- u32 gtier[4], ngtier;
+ u32 gtier[6], ngtier;
u32 ccid;
u32 derrmr;
u32 forcewake;
@@ -177,7 +177,7 @@ struct i915_gpu_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 rseqno[I915_NUM_ENGINES], wseqno;
+ u32 wseqno;
u64 gtt_offset;
u32 read_domains;
u32 write_domain;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4a02747ac658..90628a47ae17 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -115,6 +115,22 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};
+static const u32 hpd_gen11[HPD_NUM_PINS] = {
+ [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
+ [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
+ [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
+ [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
+};
+
+static const u32 hpd_icp[HPD_NUM_PINS] = {
+ [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
+ [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
+ [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
+ [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
+ [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
+ [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
+};
+
/* IIR can theoretically queue up two events. Be paranoid. */
#define GEN8_IRQ_RESET_NDX(type, which) do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
@@ -247,9 +263,9 @@ static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit);
-bool gen11_reset_one_iir(struct drm_i915_private * const i915,
- const unsigned int bank,
- const unsigned int bit)
+static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+ const unsigned int bank,
+ const unsigned int bit)
{
void __iomem * const regs = i915->regs;
u32 dw;
@@ -1138,21 +1154,21 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
static void notify_ring(struct intel_engine_cs *engine)
{
+ const u32 seqno = intel_engine_get_seqno(engine);
struct i915_request *rq = NULL;
+ struct task_struct *tsk = NULL;
struct intel_wait *wait;
- if (!engine->breadcrumbs.irq_armed)
+ if (unlikely(!engine->breadcrumbs.irq_armed))
return;
- atomic_inc(&engine->irq_count);
- set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+ rcu_read_lock();
spin_lock(&engine->breadcrumbs.irq_lock);
wait = engine->breadcrumbs.irq_wait;
if (wait) {
- bool wakeup = engine->irq_seqno_barrier;
-
- /* We use a callback from the dma-fence to submit
+ /*
+ * We use a callback from the dma-fence to submit
* requests after waiting on our own requests. To
* ensure minimum delay in queuing the next request to
* hardware, signal the fence now rather than wait for
@@ -1163,19 +1179,26 @@ static void notify_ring(struct intel_engine_cs *engine)
* and to handle coalescing of multiple seqno updates
* and many waiters.
*/
- if (i915_seqno_passed(intel_engine_get_seqno(engine),
- wait->seqno)) {
+ if (i915_seqno_passed(seqno, wait->seqno)) {
struct i915_request *waiter = wait->request;
- wakeup = true;
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ if (waiter &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&waiter->fence.flags) &&
intel_wait_check_request(wait, waiter))
rq = i915_request_get(waiter);
+
+ tsk = wait->tsk;
+ } else {
+ if (engine->irq_seqno_barrier &&
+ i915_seqno_passed(seqno, wait->seqno - 1)) {
+ set_bit(ENGINE_IRQ_BREADCRUMB,
+ &engine->irq_posted);
+ tsk = wait->tsk;
+ }
}
- if (wakeup)
- wake_up_process(wait->tsk);
+ engine->breadcrumbs.irq_count++;
} else {
if (engine->breadcrumbs.irq_armed)
__intel_engine_disarm_breadcrumbs(engine);
@@ -1183,11 +1206,19 @@ static void notify_ring(struct intel_engine_cs *engine)
spin_unlock(&engine->breadcrumbs.irq_lock);
if (rq) {
- dma_fence_signal(&rq->fence);
+ spin_lock(&rq->lock);
+ dma_fence_signal_locked(&rq->fence);
GEM_BUG_ON(!i915_request_completed(rq));
+ spin_unlock(&rq->lock);
+
i915_request_put(rq);
}
+ if (tsk && tsk->state & TASK_NORMAL)
+ wake_up_process(tsk);
+
+ rcu_read_unlock();
+
trace_intel_engine_notify(engine, wait);
}
@@ -1234,9 +1265,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
c0 = max(render, media);
c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
- if (c0 > time * rps->up_threshold)
+ if (c0 > time * rps->power.up_threshold)
events = GEN6_PM_RP_UP_THRESHOLD;
- else if (c0 < time * rps->down_threshold)
+ else if (c0 < time * rps->power.down_threshold)
events = GEN6_PM_RP_DOWN_THRESHOLD;
}
@@ -1462,14 +1493,10 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
static void
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
bool tasklet = false;
- if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
- if (READ_ONCE(engine->execlists.active))
- tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
- &engine->irq_posted);
- }
+ if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+ tasklet = true;
if (iir & GT_RENDER_USER_INTERRUPT) {
notify_ring(engine);
@@ -1477,7 +1504,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
}
if (tasklet)
- tasklet_hi_schedule(&execlists->tasklet);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
}
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
@@ -1549,78 +1576,122 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
}
}
-static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
+static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_A:
+ switch (pin) {
+ case HPD_PORT_C:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_D:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_E:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_F:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
+ default:
+ return false;
+ }
+}
+
+static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_A:
return val & PORTA_HOTPLUG_LONG_DETECT;
- case PORT_B:
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
+static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_A:
+ return val & ICP_DDIA_HPD_LONG_DETECT;
+ case HPD_PORT_B:
+ return val & ICP_DDIB_HPD_LONG_DETECT;
+ default:
+ return false;
+ }
+}
+
+static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_C:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_D:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_E:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_F:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
+ default:
+ return false;
+ }
+}
+
+static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_E:
+ switch (pin) {
+ case HPD_PORT_E:
return val & PORTE_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool spt_port_hotplug_long_detect(enum port port, u32 val)
+static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_A:
+ switch (pin) {
+ case HPD_PORT_A:
return val & PORTA_HOTPLUG_LONG_DETECT;
- case PORT_B:
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
- case PORT_D:
+ case HPD_PORT_D:
return val & PORTD_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
+static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_A:
+ switch (pin) {
+ case HPD_PORT_A:
return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool pch_port_hotplug_long_detect(enum port port, u32 val)
+static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_B:
+ switch (pin) {
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
- case PORT_D:
+ case HPD_PORT_D:
return val & PORTD_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
+static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_B:
+ switch (pin) {
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_INT_LONG_PULSE;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_INT_LONG_PULSE;
- case PORT_D:
+ case HPD_PORT_D:
return val & PORTD_HOTPLUG_INT_LONG_PULSE;
default:
return false;
@@ -1638,27 +1709,22 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
u32 *pin_mask, u32 *long_mask,
u32 hotplug_trigger, u32 dig_hotplug_reg,
const u32 hpd[HPD_NUM_PINS],
- bool long_pulse_detect(enum port port, u32 val))
+ bool long_pulse_detect(enum hpd_pin pin, u32 val))
{
- enum port port;
- int i;
+ enum hpd_pin pin;
- for_each_hpd_pin(i) {
- if ((hpd[i] & hotplug_trigger) == 0)
+ for_each_hpd_pin(pin) {
+ if ((hpd[pin] & hotplug_trigger) == 0)
continue;
- *pin_mask |= BIT(i);
-
- port = intel_hpd_pin_to_port(dev_priv, i);
- if (port == PORT_NONE)
- continue;
+ *pin_mask |= BIT(pin);
- if (long_pulse_detect(port, dig_hotplug_reg))
- *long_mask |= BIT(i);
+ if (long_pulse_detect(pin, dig_hotplug_reg))
+ *long_mask |= BIT(pin);
}
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg, *pin_mask);
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
}
@@ -1680,69 +1746,34 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
uint32_t crc4)
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_pipe_crc_entry *entry;
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct drm_driver *driver = dev_priv->drm.driver;
uint32_t crcs[5];
- int head, tail;
spin_lock(&pipe_crc->lock);
- if (pipe_crc->source && !crtc->base.crc.opened) {
- if (!pipe_crc->entries) {
- spin_unlock(&pipe_crc->lock);
- DRM_DEBUG_KMS("spurious interrupt\n");
- return;
- }
-
- head = pipe_crc->head;
- tail = pipe_crc->tail;
-
- if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
- spin_unlock(&pipe_crc->lock);
- DRM_ERROR("CRC buffer overflowing\n");
- return;
- }
-
- entry = &pipe_crc->entries[head];
-
- entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
- entry->crc[0] = crc0;
- entry->crc[1] = crc1;
- entry->crc[2] = crc2;
- entry->crc[3] = crc3;
- entry->crc[4] = crc4;
-
- head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
- pipe_crc->head = head;
-
- spin_unlock(&pipe_crc->lock);
-
- wake_up_interruptible(&pipe_crc->wq);
- } else {
- /*
- * For some not yet identified reason, the first CRC is
- * bonkers. So let's just wait for the next vblank and read
- * out the buggy result.
- *
- * On GEN8+ sometimes the second CRC is bonkers as well, so
- * don't trust that one either.
- */
- if (pipe_crc->skipped <= 0 ||
- (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
- pipe_crc->skipped++;
- spin_unlock(&pipe_crc->lock);
- return;
- }
+ /*
+ * For some not yet identified reason, the first CRC is
+ * bonkers. So let's just wait for the next vblank and read
+ * out the buggy result.
+ *
+ * On GEN8+ sometimes the second CRC is bonkers as well, so
+ * don't trust that one either.
+ */
+ if (pipe_crc->skipped <= 0 ||
+ (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+ pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
- crcs[0] = crc0;
- crcs[1] = crc1;
- crcs[2] = crc2;
- crcs[3] = crc3;
- crcs[4] = crc4;
- drm_crtc_add_crc_entry(&crtc->base, true,
- drm_crtc_accurate_vblank_count(&crtc->base),
- crcs);
+ return;
}
+ spin_unlock(&pipe_crc->lock);
+
+ crcs[0] = crc0;
+ crcs[1] = crc1;
+ crcs[2] = crc2;
+ crcs[3] = crc3;
+ crcs[4] = crc4;
+ drm_crtc_add_crc_entry(&crtc->base, true,
+ drm_crtc_accurate_vblank_count(&crtc->base),
+ crcs);
}
#else
static inline void
@@ -1998,10 +2029,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
{
- u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+ u32 hotplug_status = 0, hotplug_status_mask;
+ int i;
+
+ if (IS_G4X(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
+ DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
+ else
+ hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
+
+ /*
+ * We absolutely have to clear all the pending interrupt
+ * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
+ * interrupt bit won't have an edge, and the i965/g4x
+ * edge triggered IIR will not notice that an interrupt
+ * is still pending. We can't use PORT_HOTPLUG_EN to
+ * guarantee the edge as the act of toggling the enable
+ * bits can itself generate a new hotplug interrupt :(
+ */
+ for (i = 0; i < 10; i++) {
+ u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
+
+ if (tmp == 0)
+ return hotplug_status;
- if (hotplug_status)
+ hotplug_status |= tmp;
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ }
+
+ WARN_ONCE(1,
+ "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
+ I915_READ(PORT_HOTPLUG_STAT));
return hotplug_status;
}
@@ -2108,7 +2167,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
- POSTING_READ(VLV_MASTER_IER);
if (gt_iir)
snb_gt_irq_handler(dev_priv, gt_iir);
@@ -2193,7 +2251,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ(GEN8_MASTER_IRQ);
gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
@@ -2362,6 +2419,43 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
cpt_serr_int_handler(dev_priv);
}
+static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+{
+ u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
+ u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ u32 pin_mask = 0, long_mask = 0;
+
+ if (ddi_hotplug_trigger) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
+ I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ ddi_hotplug_trigger,
+ dig_hotplug_reg, hpd_icp,
+ icp_ddi_port_hotplug_long_detect);
+ }
+
+ if (tc_hotplug_trigger) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
+ I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ tc_hotplug_trigger,
+ dig_hotplug_reg, hpd_icp,
+ icp_tc_port_hotplug_long_detect);
+ }
+
+ if (pin_mask)
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+
+ if (pch_iir & SDE_GMBUS_ICP)
+ gmbus_irq_handler(dev_priv);
+}
+
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
@@ -2525,7 +2619,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- POSTING_READ(DEIER);
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
@@ -2535,7 +2628,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (!HAS_PCH_NOP(dev_priv)) {
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
- POSTING_READ(SDEIER);
}
/* Find, clear, then process each source of interrupt */
@@ -2570,11 +2662,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
}
I915_WRITE(DEIER, de_ier);
- POSTING_READ(DEIER);
- if (!HAS_PCH_NOP(dev_priv)) {
+ if (!HAS_PCH_NOP(dev_priv))
I915_WRITE(SDEIER, sde_ier);
- POSTING_READ(SDEIER);
- }
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
enable_rpm_wakeref_asserts(dev_priv);
@@ -2598,6 +2687,40 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
+static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+{
+ u32 pin_mask = 0, long_mask = 0;
+ u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
+ u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
+
+ if (trigger_tc) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
+ I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
+ dig_hotplug_reg, hpd_gen11,
+ gen11_port_hotplug_long_detect);
+ }
+
+ if (trigger_tbt) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+ I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
+ dig_hotplug_reg, hpd_gen11,
+ gen11_port_hotplug_long_detect);
+ }
+
+ if (pin_mask)
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ else
+ DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2633,6 +2756,17 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
}
+ if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+ iir = I915_READ(GEN11_DE_HPD_IIR);
+ if (iir) {
+ I915_WRITE(GEN11_DE_HPD_IIR, iir);
+ ret = IRQ_HANDLED;
+ gen11_hpd_irq_handler(dev_priv, iir);
+ } else {
+ DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
+ }
+ }
+
if (master_ctl & GEN8_DE_PORT_IRQ) {
iir = I915_READ(GEN8_DE_PORT_IIR);
if (iir) {
@@ -2648,7 +2782,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ tmp_mask |= ICL_AUX_CHANNEL_E;
+
+ if (IS_CNL_WITH_PORT_F(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 11)
tmp_mask |= CNL_AUX_CHANNEL_F;
if (iir & tmp_mask) {
@@ -2732,8 +2870,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
- HAS_PCH_CNP(dev_priv))
+ if (HAS_PCH_ICP(dev_priv))
+ icp_irq_handler(dev_priv, iir);
+ else if (HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv) ||
+ HAS_PCH_CNP(dev_priv))
spt_irq_handler(dev_priv, iir);
else
cpt_irq_handler(dev_priv, iir);
@@ -2950,11 +3091,44 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
spin_unlock(&i915->irq_lock);
}
+static void
+gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
+ u32 *iir)
+{
+ void __iomem * const regs = dev_priv->regs;
+
+ if (!(master_ctl & GEN11_GU_MISC_IRQ))
+ return;
+
+ *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+ if (likely(*iir))
+ raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
+}
+
+static void
+gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
+ const u32 master_ctl, const u32 iir)
+{
+ if (!(master_ctl & GEN11_GU_MISC_IRQ))
+ return;
+
+ if (unlikely(!iir)) {
+ DRM_ERROR("GU_MISC iir blank!\n");
+ return;
+ }
+
+ if (iir & GEN11_GU_MISC_GSE)
+ intel_opregion_asle_intr(dev_priv);
+ else
+ DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
+}
+
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
void __iomem * const regs = i915->regs;
u32 master_ctl;
+ u32 gu_misc_iir;
if (!intel_irqs_enabled(i915))
return IRQ_NONE;
@@ -2983,9 +3157,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(i915);
}
+ gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
+
/* Acknowledge and enable interrupts. */
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
+ gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
+
return IRQ_HANDLED;
}
@@ -3061,7 +3239,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
*/
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
- I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
}
}
@@ -3472,7 +3650,12 @@ static void gen11_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(GEN8_DE_PORT_);
GEN3_IRQ_RESET(GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(GEN11_DE_HPD_);
+ GEN3_IRQ_RESET(GEN11_GU_MISC_);
GEN3_IRQ_RESET(GEN8_PCU_);
+
+ if (HAS_PCH_ICP(dev_priv))
+ GEN3_IRQ_RESET(SDE);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -3589,6 +3772,73 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_detection_setup(dev_priv);
}
+static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug;
+
+ hotplug = I915_READ(SHOTPLUG_CTL_DDI);
+ hotplug |= ICP_DDIA_HPD_ENABLE |
+ ICP_DDIB_HPD_ENABLE;
+ I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
+
+ hotplug = I915_READ(SHOTPLUG_CTL_TC);
+ hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
+ ICP_TC_HPD_ENABLE(PORT_TC2) |
+ ICP_TC_HPD_ENABLE(PORT_TC3) |
+ ICP_TC_HPD_ENABLE(PORT_TC4);
+ I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+}
+
+static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
+
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ icp_hpd_detection_setup(dev_priv);
+}
+
+static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug;
+
+ hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
+ hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+ I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
+
+ hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+ hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+ I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
+}
+
+static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+ u32 val;
+
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
+ hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
+
+ val = I915_READ(GEN11_DE_HPD_IMR);
+ val &= ~hotplug_irqs;
+ I915_WRITE(GEN11_DE_HPD_IMR, val);
+ POSTING_READ(GEN11_DE_HPD_IMR);
+
+ gen11_hpd_detection_setup(dev_priv);
+
+ if (HAS_PCH_ICP(dev_priv))
+ icp_hpd_irq_setup(dev_priv);
+}
+
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 val, hotplug;
@@ -3915,9 +4165,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
- u32 de_misc_masked = GEN8_DE_MISC_GSE | GEN8_DE_EDP_PSR;
+ u32 de_misc_masked = GEN8_DE_EDP_PSR;
enum pipe pipe;
+ if (INTEL_GEN(dev_priv) <= 10)
+ de_misc_masked |= GEN8_DE_MISC_GSE;
+
if (INTEL_GEN(dev_priv) >= 9) {
de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
@@ -3928,7 +4181,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ de_port_masked |= ICL_AUX_CHANNEL_E;
+
+ if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
de_port_masked |= CNL_AUX_CHANNEL_F;
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
@@ -3956,10 +4212,18 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
- if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11) {
+ u32 de_hpd_masked = 0;
+ u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
+ GEN11_DE_TBT_HOTPLUG_MASK;
+
+ GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
+ gen11_hpd_detection_setup(dev_priv);
+ } else if (IS_GEN9_LP(dev_priv)) {
bxt_hpd_detection_setup(dev_priv);
- else if (IS_BROADWELL(dev_priv))
+ } else if (IS_BROADWELL(dev_priv)) {
ilk_hpd_detection_setup(dev_priv);
+ }
}
static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4008,13 +4272,34 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
}
+static void icp_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 mask = SDE_GMBUS_ICP;
+
+ WARN_ON(I915_READ(SDEIER) != 0);
+ I915_WRITE(SDEIER, 0xffffffff);
+ POSTING_READ(SDEIER);
+
+ gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+ I915_WRITE(SDEIMR, ~mask);
+
+ icp_hpd_detection_setup(dev_priv);
+}
+
static int gen11_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 gu_misc_masked = GEN11_GU_MISC_GSE;
+
+ if (HAS_PCH_ICP(dev_priv))
+ icp_irq_postinstall(dev);
gen11_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
+ GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
@@ -4062,11 +4347,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT);
enable_mask =
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
@@ -4081,6 +4368,81 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
+static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
+ u16 *eir, u16 *eir_stuck)
+{
+ u16 emr;
+
+ *eir = I915_READ16(EIR);
+
+ if (*eir)
+ I915_WRITE16(EIR, *eir);
+
+ *eir_stuck = I915_READ16(EIR);
+ if (*eir_stuck == 0)
+ return;
+
+ /*
+ * Toggle all EMR bits to make sure we get an edge
+ * in the ISR master error bit if we don't clear
+ * all the EIR bits. Otherwise the edge triggered
+ * IIR on i965/g4x wouldn't notice that an interrupt
+ * is still pending. Also some EIR bits can't be
+ * cleared except by handling the underlying error
+ * (or by a GPU reset) so we mask any bit that
+ * remains set.
+ */
+ emr = I915_READ16(EMR);
+ I915_WRITE16(EMR, 0xffff);
+ I915_WRITE16(EMR, emr | *eir_stuck);
+}
+
+static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
+ u16 eir, u16 eir_stuck)
+{
+ DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
+
+ if (eir_stuck)
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
+}
+
+static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
+ u32 *eir, u32 *eir_stuck)
+{
+ u32 emr;
+
+ *eir = I915_READ(EIR);
+
+ I915_WRITE(EIR, *eir);
+
+ *eir_stuck = I915_READ(EIR);
+ if (*eir_stuck == 0)
+ return;
+
+ /*
+ * Toggle all EMR bits to make sure we get an edge
+ * in the ISR master error bit if we don't clear
+ * all the EIR bits. Otherwise the edge triggered
+ * IIR on i965/g4x wouldn't notice that an interrupt
+ * is still pending. Also some EIR bits can't be
+ * cleared except by handling the underlying error
+ * (or by a GPU reset) so we mask any bit that
+ * remains set.
+ */
+ emr = I915_READ(EMR);
+ I915_WRITE(EMR, 0xffffffff);
+ I915_WRITE(EMR, emr | *eir_stuck);
+}
+
+static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
+ u32 eir, u32 eir_stuck)
+{
+ DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
+
+ if (eir_stuck)
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
+}
+
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -4095,6 +4457,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
+ u16 eir = 0, eir_stuck = 0;
u16 iir;
iir = I915_READ16(IIR);
@@ -4107,13 +4470,16 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
* signalled in iir */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
I915_WRITE16(IIR, iir);
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
@@ -4151,12 +4517,14 @@ static int i915_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask =
~(I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev_priv)) {
@@ -4194,6 +4562,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 eir = 0, eir_stuck = 0;
u32 hotplug_status = 0;
u32 iir;
@@ -4211,13 +4580,16 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
* signalled in iir */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -4271,14 +4643,14 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_MASTER_ERROR_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
if (IS_G4X(dev_priv))
@@ -4338,6 +4710,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 eir = 0, eir_stuck = 0;
u32 hotplug_status = 0;
u32 iir;
@@ -4354,6 +4727,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
* signalled in iir */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT)
@@ -4362,8 +4738,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev_priv->engine[VCS]);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -4478,7 +4854,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen11_irq_reset;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+ dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
} else if (INTEL_GEN(dev_priv) >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 66ea3552c63e..295e981e4a39 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -44,10 +44,6 @@ i915_param_named(modeset, int, 0400,
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
-i915_param_named_unsafe(panel_ignore_lid, int, 0600,
- "Override lid status (0=autodetect, 1=autodetect disabled [default], "
- "-1=force lid closed, -2=force lid open)");
-
i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
@@ -92,7 +88,7 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400,
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
- "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
+ "(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
i915_param_named_unsafe(alpha_support, bool, 0400,
@@ -130,9 +126,6 @@ i915_param_named_unsafe(invert_brightness, int, 0600,
i915_param_named(disable_display, bool, 0400,
"Disable display (default: false)");
-i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
- "Enable command parsing (true=enabled [default], false=disabled)");
-
i915_param_named(mmio_debug, int, 0600,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 6684025b7af8..6c4d4a21474b 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -36,7 +36,6 @@ struct drm_printer;
#define I915_PARAMS_FOR_EACH(param) \
param(char *, vbt_firmware, NULL) \
param(int, modeset, -1) \
- param(int, panel_ignore_lid, 1) \
param(int, lvds_channel_mode, 0) \
param(int, panel_use_ssc, -1) \
param(int, vbt_sdvo_panel_type, -1) \
@@ -58,7 +57,6 @@ struct drm_printer;
param(unsigned int, inject_load_failure, 0) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
- param(bool, enable_cmd_parser, true) \
param(bool, enable_hangcheck, true) \
param(bool, fastboot, false) \
param(bool, prefault_disable, false) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 4364922e935d..6a4d1388ad2d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -340,7 +340,6 @@ static const struct intel_device_info intel_valleyview_info = {
GEN(7),
.is_lp = 1,
.num_pipes = 2,
- .has_psr = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_gmch_display = 1,
@@ -433,7 +432,6 @@ static const struct intel_device_info intel_cherryview_info = {
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_64bit_reloc = 1,
- .has_psr = 1,
.has_runtime_pm = 1,
.has_resource_streamer = 1,
.has_rc6 = 1,
@@ -659,12 +657,15 @@ static const struct pci_device_id pciidlist[] = {
INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
+ INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
+ INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
{0, 0, 0}
@@ -673,10 +674,16 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static void i915_pci_remove(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_device *dev;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev) /* driver load aborted, nothing to cleanup */
+ return;
i915_driver_unload(dev);
drm_dev_put(dev);
+
+ pci_set_drvdata(pdev, NULL);
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -711,6 +718,11 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
+ if (i915_inject_load_failure()) {
+ i915_pci_remove(pdev);
+ return -ENODEV;
+ }
+
err = i915_live_selftests(pdev);
if (err) {
i915_pci_remove(pdev);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 019bd2d073ad..6bf10952c724 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -315,7 +315,7 @@ static u32 i915_oa_max_sample_rate = 100000;
* code assumes all reports have a power-of-two size and ~(size - 1) can
* be used as a mask to align the OA tail pointer.
*/
-static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A13] = { 0, 64 },
[I915_OA_FORMAT_A29] = { 1, 128 },
[I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
@@ -326,7 +326,7 @@ static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
};
-static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A12] = { 0, 64 },
[I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
@@ -737,12 +737,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
continue;
}
- /*
- * XXX: Just keep the lower 21 bits for now since I'm not
- * entirely sure if the HW touches any of the higher bits in
- * this field
- */
- ctx_id = report32[2] & 0x1fffff;
+ ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
/*
* Squash whatever is in the CTX_ID field if it's marked as
@@ -1203,6 +1198,33 @@ static int i915_oa_read(struct i915_perf_stream *stream,
return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
}
+static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
+ struct i915_gem_context *ctx)
+{
+ struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_context *ce;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(&i915->drm);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * As the ID is the gtt offset of the context's vma we
+ * pin the vma to ensure the ID remains fixed.
+ *
+ * NB: implied RCS engine...
+ */
+ ce = intel_context_pin(ctx, engine);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ce))
+ return ce;
+
+ i915->perf.oa.pinned_ctx = ce;
+
+ return ce;
+}
+
/**
* oa_get_render_ctx_id - determine and hold ctx hw id
* @stream: An i915-perf stream opened for OA metrics
@@ -1215,40 +1237,76 @@ static int i915_oa_read(struct i915_perf_stream *stream,
*/
static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct drm_i915_private *i915 = stream->dev_priv;
+ struct intel_context *ce;
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
- } else {
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
- struct intel_ring *ring;
- int ret;
-
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
+ ce = oa_pin_context(i915, stream->ctx);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+ switch (INTEL_GEN(i915)) {
+ case 7: {
/*
- * As the ID is the gtt offset of the context's vma we
- * pin the vma to ensure the ID remains fixed.
- *
- * NB: implied RCS engine...
+ * On Haswell we don't do any post processing of the reports
+ * and don't need to use the mask.
*/
- ring = intel_context_pin(stream->ctx, engine);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (IS_ERR(ring))
- return PTR_ERR(ring);
+ i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
+ i915->perf.oa.specific_ctx_id_mask = 0;
+ break;
+ }
+ case 8:
+ case 9:
+ case 10:
+ if (USES_GUC_SUBMISSION(i915)) {
+ /*
+ * When using GuC, the context descriptor we write in
+ * i915 is read by GuC and rewritten before it's
+ * actually written into the hardware. The LRCA is
+ * what is put into the context id field of the
+ * context descriptor by GuC. Because it's aligned to
+ * a page, the lower 12bits are always at 0 and
+ * dropped by GuC. They won't be part of the context
+ * ID in the OA reports, so squash those lower bits.
+ */
+ i915->perf.oa.specific_ctx_id =
+ lower_32_bits(ce->lrc_desc) >> 12;
- /*
- * Explicitly track the ID (instead of calling
- * i915_ggtt_offset() on the fly) considering the difference
- * with gen8+ and execlists
- */
- dev_priv->perf.oa.specific_ctx_id =
- i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state);
+ /*
+ * GuC uses the top bit to signal proxy submission, so
+ * ignore that bit.
+ */
+ i915->perf.oa.specific_ctx_id_mask =
+ (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
+ } else {
+ i915->perf.oa.specific_ctx_id_mask =
+ (1U << GEN8_CTX_ID_WIDTH) - 1;
+ i915->perf.oa.specific_ctx_id =
+ upper_32_bits(ce->lrc_desc);
+ i915->perf.oa.specific_ctx_id &=
+ i915->perf.oa.specific_ctx_id_mask;
+ }
+ break;
+
+ case 11: {
+ i915->perf.oa.specific_ctx_id_mask =
+ ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
+ ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
+ ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
+ i915->perf.oa.specific_ctx_id &=
+ i915->perf.oa.specific_ctx_id_mask;
+ break;
+ }
+
+ default:
+ MISSING_CASE(INTEL_GEN(i915));
}
+ DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+ i915->perf.oa.specific_ctx_id,
+ i915->perf.oa.specific_ctx_id_mask);
+
return 0;
}
@@ -1262,17 +1320,15 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_context *ce;
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- } else {
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
+ dev_priv->perf.oa.specific_ctx_id_mask = 0;
+ ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+ if (ce) {
mutex_lock(&dev_priv->drm.struct_mutex);
-
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- intel_context_unpin(stream->ctx, engine);
-
+ intel_context_unpin(ce);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
}
@@ -1780,7 +1836,9 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
* So far the best way to work around this issue seems to be draining
* the GPU from any submitted work.
*/
- ret = i915_gem_wait_for_idle(dev_priv, wait_flags);
+ ret = i915_gem_wait_for_idle(dev_priv,
+ wait_flags,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index dc87797db500..d6c8f8fdfda5 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -4,6 +4,7 @@
* Copyright © 2017-2018 Intel Corporation
*/
+#include <linux/irq.h>
#include "i915_pmu.h"
#include "intel_ringbuffer.h"
#include "i915_drv.h"
@@ -127,6 +128,7 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
{
if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
i915->pmu.timer_enabled = true;
+ i915->pmu.timer_last = ktime_get();
hrtimer_start_range_ns(&i915->pmu.timer,
ns_to_ktime(PERIOD), 0,
HRTIMER_MODE_REL_PINNED);
@@ -155,12 +157,13 @@ static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
}
static void
-update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
+add_sample(struct i915_pmu_sample *sample, u32 val)
{
- sample->cur += mul_u32_u32(val, unit);
+ sample->cur += val;
}
-static void engines_sample(struct drm_i915_private *dev_priv)
+static void
+engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -182,8 +185,9 @@ static void engines_sample(struct drm_i915_private *dev_priv)
val = !i915_seqno_passed(current_seqno, last_seqno);
- update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
- PERIOD, val);
+ if (val)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
+ period_ns);
if (val && (engine->pmu.enable &
(BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
@@ -194,11 +198,13 @@ static void engines_sample(struct drm_i915_private *dev_priv)
val = 0;
}
- update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
- PERIOD, !!(val & RING_WAIT));
+ if (val & RING_WAIT)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
+ period_ns);
- update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
- PERIOD, !!(val & RING_WAIT_SEMAPHORE));
+ if (val & RING_WAIT_SEMAPHORE)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
+ period_ns);
}
if (fw)
@@ -207,7 +213,14 @@ static void engines_sample(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
-static void frequency_sample(struct drm_i915_private *dev_priv)
+static void
+add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
+{
+ sample->cur += mul_u32_u32(val, mul);
+}
+
+static void
+frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
if (dev_priv->pmu.enable &
config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
@@ -221,15 +234,17 @@ static void frequency_sample(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
- update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
- 1, intel_gpu_freq(dev_priv, val));
+ add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+ intel_gpu_freq(dev_priv, val),
+ period_ns / 1000);
}
if (dev_priv->pmu.enable &
config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
- update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.cur_freq));
+ add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
+ intel_gpu_freq(dev_priv,
+ dev_priv->gt_pm.rps.cur_freq),
+ period_ns / 1000);
}
}
@@ -237,14 +252,27 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
+ unsigned int period_ns;
+ ktime_t now;
if (!READ_ONCE(i915->pmu.timer_enabled))
return HRTIMER_NORESTART;
- engines_sample(i915);
- frequency_sample(i915);
+ now = ktime_get();
+ period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
+ i915->pmu.timer_last = now;
+
+ /*
+ * Strictly speaking the passed in period may not be 100% accurate for
+ * all internal calculation, since some amount of time can be spent on
+ * grabbing the forcewake. However the potential error from timer call-
+ * back delay greatly dominates this so we keep it simple.
+ */
+ engines_sample(i915, period_ns);
+ frequency_sample(i915, period_ns);
+
+ hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
- hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
return HRTIMER_RESTART;
}
@@ -519,12 +547,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
case I915_PMU_ACTUAL_FREQUENCY:
val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
- FREQUENCY);
+ USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_REQUESTED_FREQUENCY:
val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
- FREQUENCY);
+ USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_INTERRUPTS:
val = count_interrupts(i915);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 2ba735299f7c..7f164ca3db12 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -65,6 +65,14 @@ struct i915_pmu {
* event types.
*/
u64 enable;
+
+ /**
+ * @timer_last:
+ *
+ * Timestmap of the previous timer invocation.
+ */
+ ktime_t timer_last;
+
/**
* @enable_count: Reference counts for the enabled events.
*
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 195203f298df..eeaa3d506d95 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -54,6 +54,7 @@ enum vgt_g2v_type {
*/
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
#define VGT_CAPS_HWSP_EMULATION BIT(3)
+#define VGT_CAPS_HUGE_GTT BIT(4)
struct vgt_if {
u64 magic; /* VGT_MAGIC */
@@ -93,7 +94,10 @@ struct vgt_if {
u32 rsv5[4];
u32 g2v_notify;
- u32 rsv6[7];
+ u32 rsv6[5];
+
+ u32 cursor_x_hot;
+ u32 cursor_y_hot;
struct {
u32 lo;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7720569f2024..08ec7446282e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -139,23 +139,40 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
}
+/*
+ * Given the first two numbers __a and __b of arbitrarily many evenly spaced
+ * numbers, pick the 0-based __index'th value.
+ *
+ * Always prefer this over _PICK() if the numbers are evenly spaced.
+ */
+#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
+
+/*
+ * Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
+ *
+ * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
+ */
#define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
-#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+/*
+ * Named helper wrappers around _PICK_EVEN() and _PICK().
+ */
+#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
-#define _PLANE(plane, a, b) _PIPE(plane, a, b)
+#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
-#define _TRANS(tran, a, b) ((a) + (tran)*((b)-(a)))
+#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
-#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _PLL(pll, a, b) ((a) + (pll)*((b)-(a)))
+#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
@@ -164,7 +181,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \
BUILD_BUG_ON_MSG((value) & ~(mask), \
"Incorrect value for mask"); \
- (mask) << 16 | (value); })
+ __MASKED_FIELD(mask, value); })
#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
@@ -270,19 +287,19 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
-#define ILK_GRDOM_FULL (0<<1)
-#define ILK_GRDOM_RENDER (1<<1)
-#define ILK_GRDOM_MEDIA (3<<1)
-#define ILK_GRDOM_MASK (3<<1)
-#define ILK_GRDOM_RESET_ENABLE (1<<0)
+#define ILK_GRDOM_FULL (0 << 1)
+#define ILK_GRDOM_RENDER (1 << 1)
+#define ILK_GRDOM_MEDIA (3 << 1)
+#define ILK_GRDOM_MASK (3 << 1)
+#define ILK_GRDOM_RESET_ENABLE (1 << 0)
#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
-#define GEN6_MBC_SNPCR_MASK (3<<21)
-#define GEN6_MBC_SNPCR_MAX (0<<21)
-#define GEN6_MBC_SNPCR_MED (1<<21)
-#define GEN6_MBC_SNPCR_LOW (2<<21)
-#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+#define GEN6_MBC_SNPCR_MASK (3 << 21)
+#define GEN6_MBC_SNPCR_MAX (0 << 21)
+#define GEN6_MBC_SNPCR_MED (1 << 21)
+#define GEN6_MBC_SNPCR_LOW (2 << 21)
+#define GEN6_MBC_SNPCR_MIN (3 << 21) /* only 1/16th of the cache is shared */
#define VLV_G3DCTL _MMIO(0x9024)
#define VLV_GSCKGCTL _MMIO(0x9028)
@@ -314,13 +331,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_GRDOM_VECS (1 << 13)
#define GEN11_GRDOM_VECS2 (1 << 14)
-#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base+0x228)
-#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base+0x518)
-#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base+0x220)
+#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
+#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
+#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base + 0x220)
#define PP_DIR_DCLV_2G 0xffffffff
-#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8 + 4)
-#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8)
+#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8)
#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
#define GEN8_RPCS_ENABLE (1 << 31)
@@ -358,25 +375,25 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13)
#define GAM_ECOCHK _MMIO(0x4090)
-#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
-#define ECOCHK_SNB_BIT (1<<10)
-#define ECOCHK_DIS_TLB (1<<8)
-#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
-#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
-#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
-#define ECOCHK_PPGTT_GFDT_IVB (0x1<<4)
-#define ECOCHK_PPGTT_LLC_IVB (0x1<<3)
-#define ECOCHK_PPGTT_UC_HSW (0x1<<3)
-#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
-#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
+#define BDW_DISABLE_HDC_INVALIDATION (1 << 25)
+#define ECOCHK_SNB_BIT (1 << 10)
+#define ECOCHK_DIS_TLB (1 << 8)
+#define HSW_ECOCHK_ARB_PRIO_SOL (1 << 6)
+#define ECOCHK_PPGTT_CACHE64B (0x3 << 3)
+#define ECOCHK_PPGTT_CACHE4B (0x0 << 3)
+#define ECOCHK_PPGTT_GFDT_IVB (0x1 << 4)
+#define ECOCHK_PPGTT_LLC_IVB (0x1 << 3)
+#define ECOCHK_PPGTT_UC_HSW (0x1 << 3)
+#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
+#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
#define GAC_ECO_BITS _MMIO(0x14090)
-#define ECOBITS_SNB_BIT (1<<13)
-#define ECOBITS_PPGTT_CACHE64B (3<<8)
-#define ECOBITS_PPGTT_CACHE4B (0<<8)
+#define ECOBITS_SNB_BIT (1 << 13)
+#define ECOBITS_PPGTT_CACHE64B (3 << 8)
+#define ECOBITS_PPGTT_CACHE4B (0 << 8)
#define GAB_CTL _MMIO(0x24000)
-#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
#define GEN6_STOLEN_RESERVED _MMIO(0x1082C0)
#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
@@ -395,6 +412,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_STOLEN_RESERVED_4M (2 << 7)
#define GEN8_STOLEN_RESERVED_8M (3 << 7)
#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
+#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20)
/* VGA stuff */
@@ -404,15 +422,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _VGA_MSR_WRITE _MMIO(0x3c2)
#define VGA_MSR_WRITE 0x3c2
#define VGA_MSR_READ 0x3cc
-#define VGA_MSR_MEM_EN (1<<1)
-#define VGA_MSR_CGA_MODE (1<<0)
+#define VGA_MSR_MEM_EN (1 << 1)
+#define VGA_MSR_CGA_MODE (1 << 0)
#define VGA_SR_INDEX 0x3c4
#define SR01 1
#define VGA_SR_DATA 0x3c5
#define VGA_AR_INDEX 0x3c0
-#define VGA_AR_VID_EN (1<<5)
+#define VGA_AR_VID_EN (1 << 5)
#define VGA_AR_DATA_WRITE 0x3c0
#define VGA_AR_DATA_READ 0x3c1
@@ -445,8 +463,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4)
#define MI_PREDICATE_RESULT_2 _MMIO(0x2214)
-#define LOWER_SLICE_ENABLED (1<<0)
-#define LOWER_SLICE_DISABLED (0<<0)
+#define LOWER_SLICE_ENABLED (1 << 0)
+#define LOWER_SLICE_DISABLED (0 << 0)
/*
* Registers used only by the command parser
@@ -504,47 +522,47 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OACONTROL_CTX_MASK 0xFFFFF000
#define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F
#define GEN7_OACONTROL_TIMER_PERIOD_SHIFT 6
-#define GEN7_OACONTROL_TIMER_ENABLE (1<<5)
-#define GEN7_OACONTROL_FORMAT_A13 (0<<2)
-#define GEN7_OACONTROL_FORMAT_A29 (1<<2)
-#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2<<2)
-#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3<<2)
-#define GEN7_OACONTROL_FORMAT_B4_C8 (4<<2)
-#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5<<2)
-#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6<<2)
-#define GEN7_OACONTROL_FORMAT_C4_B8 (7<<2)
+#define GEN7_OACONTROL_TIMER_ENABLE (1 << 5)
+#define GEN7_OACONTROL_FORMAT_A13 (0 << 2)
+#define GEN7_OACONTROL_FORMAT_A29 (1 << 2)
+#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2 << 2)
+#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3 << 2)
+#define GEN7_OACONTROL_FORMAT_B4_C8 (4 << 2)
+#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5 << 2)
+#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6 << 2)
+#define GEN7_OACONTROL_FORMAT_C4_B8 (7 << 2)
#define GEN7_OACONTROL_FORMAT_SHIFT 2
-#define GEN7_OACONTROL_PER_CTX_ENABLE (1<<1)
-#define GEN7_OACONTROL_ENABLE (1<<0)
+#define GEN7_OACONTROL_PER_CTX_ENABLE (1 << 1)
+#define GEN7_OACONTROL_ENABLE (1 << 0)
#define GEN8_OACTXID _MMIO(0x2364)
#define GEN8_OA_DEBUG _MMIO(0x2B04)
-#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1<<5)
-#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1<<6)
-#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1<<2)
-#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1<<1)
+#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5)
+#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6)
+#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2)
+#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1)
#define GEN8_OACONTROL _MMIO(0x2B00)
-#define GEN8_OA_REPORT_FORMAT_A12 (0<<2)
-#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2)
-#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5<<2)
-#define GEN8_OA_REPORT_FORMAT_C4_B8 (7<<2)
+#define GEN8_OA_REPORT_FORMAT_A12 (0 << 2)
+#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2 << 2)
+#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5 << 2)
+#define GEN8_OA_REPORT_FORMAT_C4_B8 (7 << 2)
#define GEN8_OA_REPORT_FORMAT_SHIFT 2
-#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1<<1)
-#define GEN8_OA_COUNTER_ENABLE (1<<0)
+#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1 << 1)
+#define GEN8_OA_COUNTER_ENABLE (1 << 0)
#define GEN8_OACTXCONTROL _MMIO(0x2360)
#define GEN8_OA_TIMER_PERIOD_MASK 0x3F
#define GEN8_OA_TIMER_PERIOD_SHIFT 2
-#define GEN8_OA_TIMER_ENABLE (1<<1)
-#define GEN8_OA_COUNTER_RESUME (1<<0)
+#define GEN8_OA_TIMER_ENABLE (1 << 1)
+#define GEN8_OA_COUNTER_RESUME (1 << 0)
#define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */
-#define GEN7_OABUFFER_OVERRUN_DISABLE (1<<3)
-#define GEN7_OABUFFER_EDGE_TRIGGER (1<<2)
-#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1)
-#define GEN7_OABUFFER_RESUME (1<<0)
+#define GEN7_OABUFFER_OVERRUN_DISABLE (1 << 3)
+#define GEN7_OABUFFER_EDGE_TRIGGER (1 << 2)
+#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1 << 1)
+#define GEN7_OABUFFER_RESUME (1 << 0)
#define GEN8_OABUFFER_UDW _MMIO(0x23b4)
#define GEN8_OABUFFER _MMIO(0x2b14)
@@ -552,33 +570,33 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OASTATUS1 _MMIO(0x2364)
#define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0
-#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1<<2)
-#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1<<1)
-#define GEN7_OASTATUS1_REPORT_LOST (1<<0)
+#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1 << 2)
+#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1 << 1)
+#define GEN7_OASTATUS1_REPORT_LOST (1 << 0)
#define GEN7_OASTATUS2 _MMIO(0x2368)
#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0
#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */
#define GEN8_OASTATUS _MMIO(0x2b08)
-#define GEN8_OASTATUS_OVERRUN_STATUS (1<<3)
-#define GEN8_OASTATUS_COUNTER_OVERFLOW (1<<2)
-#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1<<1)
-#define GEN8_OASTATUS_REPORT_LOST (1<<0)
+#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3)
+#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2)
+#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1)
+#define GEN8_OASTATUS_REPORT_LOST (1 << 0)
#define GEN8_OAHEADPTR _MMIO(0x2B0C)
#define GEN8_OAHEADPTR_MASK 0xffffffc0
#define GEN8_OATAILPTR _MMIO(0x2B10)
#define GEN8_OATAILPTR_MASK 0xffffffc0
-#define OABUFFER_SIZE_128K (0<<3)
-#define OABUFFER_SIZE_256K (1<<3)
-#define OABUFFER_SIZE_512K (2<<3)
-#define OABUFFER_SIZE_1M (3<<3)
-#define OABUFFER_SIZE_2M (4<<3)
-#define OABUFFER_SIZE_4M (5<<3)
-#define OABUFFER_SIZE_8M (6<<3)
-#define OABUFFER_SIZE_16M (7<<3)
+#define OABUFFER_SIZE_128K (0 << 3)
+#define OABUFFER_SIZE_256K (1 << 3)
+#define OABUFFER_SIZE_512K (2 << 3)
+#define OABUFFER_SIZE_1M (3 << 3)
+#define OABUFFER_SIZE_2M (4 << 3)
+#define OABUFFER_SIZE_4M (5 << 3)
+#define OABUFFER_SIZE_8M (6 << 3)
+#define OABUFFER_SIZE_16M (7 << 3)
/*
* Flexible, Aggregate EU Counter Registers.
@@ -601,35 +619,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OASTARTTRIG1_THRESHOLD_MASK 0xffff
#define OASTARTTRIG2 _MMIO(0x2714)
-#define OASTARTTRIG2_INVERT_A_0 (1<<0)
-#define OASTARTTRIG2_INVERT_A_1 (1<<1)
-#define OASTARTTRIG2_INVERT_A_2 (1<<2)
-#define OASTARTTRIG2_INVERT_A_3 (1<<3)
-#define OASTARTTRIG2_INVERT_A_4 (1<<4)
-#define OASTARTTRIG2_INVERT_A_5 (1<<5)
-#define OASTARTTRIG2_INVERT_A_6 (1<<6)
-#define OASTARTTRIG2_INVERT_A_7 (1<<7)
-#define OASTARTTRIG2_INVERT_A_8 (1<<8)
-#define OASTARTTRIG2_INVERT_A_9 (1<<9)
-#define OASTARTTRIG2_INVERT_A_10 (1<<10)
-#define OASTARTTRIG2_INVERT_A_11 (1<<11)
-#define OASTARTTRIG2_INVERT_A_12 (1<<12)
-#define OASTARTTRIG2_INVERT_A_13 (1<<13)
-#define OASTARTTRIG2_INVERT_A_14 (1<<14)
-#define OASTARTTRIG2_INVERT_A_15 (1<<15)
-#define OASTARTTRIG2_INVERT_B_0 (1<<16)
-#define OASTARTTRIG2_INVERT_B_1 (1<<17)
-#define OASTARTTRIG2_INVERT_B_2 (1<<18)
-#define OASTARTTRIG2_INVERT_B_3 (1<<19)
-#define OASTARTTRIG2_INVERT_C_0 (1<<20)
-#define OASTARTTRIG2_INVERT_C_1 (1<<21)
-#define OASTARTTRIG2_INVERT_D_0 (1<<22)
-#define OASTARTTRIG2_THRESHOLD_ENABLE (1<<23)
-#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1<<24)
-#define OASTARTTRIG2_EVENT_SELECT_0 (1<<28)
-#define OASTARTTRIG2_EVENT_SELECT_1 (1<<29)
-#define OASTARTTRIG2_EVENT_SELECT_2 (1<<30)
-#define OASTARTTRIG2_EVENT_SELECT_3 (1<<31)
+#define OASTARTTRIG2_INVERT_A_0 (1 << 0)
+#define OASTARTTRIG2_INVERT_A_1 (1 << 1)
+#define OASTARTTRIG2_INVERT_A_2 (1 << 2)
+#define OASTARTTRIG2_INVERT_A_3 (1 << 3)
+#define OASTARTTRIG2_INVERT_A_4 (1 << 4)
+#define OASTARTTRIG2_INVERT_A_5 (1 << 5)
+#define OASTARTTRIG2_INVERT_A_6 (1 << 6)
+#define OASTARTTRIG2_INVERT_A_7 (1 << 7)
+#define OASTARTTRIG2_INVERT_A_8 (1 << 8)
+#define OASTARTTRIG2_INVERT_A_9 (1 << 9)
+#define OASTARTTRIG2_INVERT_A_10 (1 << 10)
+#define OASTARTTRIG2_INVERT_A_11 (1 << 11)
+#define OASTARTTRIG2_INVERT_A_12 (1 << 12)
+#define OASTARTTRIG2_INVERT_A_13 (1 << 13)
+#define OASTARTTRIG2_INVERT_A_14 (1 << 14)
+#define OASTARTTRIG2_INVERT_A_15 (1 << 15)
+#define OASTARTTRIG2_INVERT_B_0 (1 << 16)
+#define OASTARTTRIG2_INVERT_B_1 (1 << 17)
+#define OASTARTTRIG2_INVERT_B_2 (1 << 18)
+#define OASTARTTRIG2_INVERT_B_3 (1 << 19)
+#define OASTARTTRIG2_INVERT_C_0 (1 << 20)
+#define OASTARTTRIG2_INVERT_C_1 (1 << 21)
+#define OASTARTTRIG2_INVERT_D_0 (1 << 22)
+#define OASTARTTRIG2_THRESHOLD_ENABLE (1 << 23)
+#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1 << 24)
+#define OASTARTTRIG2_EVENT_SELECT_0 (1 << 28)
+#define OASTARTTRIG2_EVENT_SELECT_1 (1 << 29)
+#define OASTARTTRIG2_EVENT_SELECT_2 (1 << 30)
+#define OASTARTTRIG2_EVENT_SELECT_3 (1 << 31)
#define OASTARTTRIG3 _MMIO(0x2718)
#define OASTARTTRIG3_NOA_SELECT_MASK 0xf
@@ -658,35 +676,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OASTARTTRIG5_THRESHOLD_MASK 0xffff
#define OASTARTTRIG6 _MMIO(0x2724)
-#define OASTARTTRIG6_INVERT_A_0 (1<<0)
-#define OASTARTTRIG6_INVERT_A_1 (1<<1)
-#define OASTARTTRIG6_INVERT_A_2 (1<<2)
-#define OASTARTTRIG6_INVERT_A_3 (1<<3)
-#define OASTARTTRIG6_INVERT_A_4 (1<<4)
-#define OASTARTTRIG6_INVERT_A_5 (1<<5)
-#define OASTARTTRIG6_INVERT_A_6 (1<<6)
-#define OASTARTTRIG6_INVERT_A_7 (1<<7)
-#define OASTARTTRIG6_INVERT_A_8 (1<<8)
-#define OASTARTTRIG6_INVERT_A_9 (1<<9)
-#define OASTARTTRIG6_INVERT_A_10 (1<<10)
-#define OASTARTTRIG6_INVERT_A_11 (1<<11)
-#define OASTARTTRIG6_INVERT_A_12 (1<<12)
-#define OASTARTTRIG6_INVERT_A_13 (1<<13)
-#define OASTARTTRIG6_INVERT_A_14 (1<<14)
-#define OASTARTTRIG6_INVERT_A_15 (1<<15)
-#define OASTARTTRIG6_INVERT_B_0 (1<<16)
-#define OASTARTTRIG6_INVERT_B_1 (1<<17)
-#define OASTARTTRIG6_INVERT_B_2 (1<<18)
-#define OASTARTTRIG6_INVERT_B_3 (1<<19)
-#define OASTARTTRIG6_INVERT_C_0 (1<<20)
-#define OASTARTTRIG6_INVERT_C_1 (1<<21)
-#define OASTARTTRIG6_INVERT_D_0 (1<<22)
-#define OASTARTTRIG6_THRESHOLD_ENABLE (1<<23)
-#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1<<24)
-#define OASTARTTRIG6_EVENT_SELECT_4 (1<<28)
-#define OASTARTTRIG6_EVENT_SELECT_5 (1<<29)
-#define OASTARTTRIG6_EVENT_SELECT_6 (1<<30)
-#define OASTARTTRIG6_EVENT_SELECT_7 (1<<31)
+#define OASTARTTRIG6_INVERT_A_0 (1 << 0)
+#define OASTARTTRIG6_INVERT_A_1 (1 << 1)
+#define OASTARTTRIG6_INVERT_A_2 (1 << 2)
+#define OASTARTTRIG6_INVERT_A_3 (1 << 3)
+#define OASTARTTRIG6_INVERT_A_4 (1 << 4)
+#define OASTARTTRIG6_INVERT_A_5 (1 << 5)
+#define OASTARTTRIG6_INVERT_A_6 (1 << 6)
+#define OASTARTTRIG6_INVERT_A_7 (1 << 7)
+#define OASTARTTRIG6_INVERT_A_8 (1 << 8)
+#define OASTARTTRIG6_INVERT_A_9 (1 << 9)
+#define OASTARTTRIG6_INVERT_A_10 (1 << 10)
+#define OASTARTTRIG6_INVERT_A_11 (1 << 11)
+#define OASTARTTRIG6_INVERT_A_12 (1 << 12)
+#define OASTARTTRIG6_INVERT_A_13 (1 << 13)
+#define OASTARTTRIG6_INVERT_A_14 (1 << 14)
+#define OASTARTTRIG6_INVERT_A_15 (1 << 15)
+#define OASTARTTRIG6_INVERT_B_0 (1 << 16)
+#define OASTARTTRIG6_INVERT_B_1 (1 << 17)
+#define OASTARTTRIG6_INVERT_B_2 (1 << 18)
+#define OASTARTTRIG6_INVERT_B_3 (1 << 19)
+#define OASTARTTRIG6_INVERT_C_0 (1 << 20)
+#define OASTARTTRIG6_INVERT_C_1 (1 << 21)
+#define OASTARTTRIG6_INVERT_D_0 (1 << 22)
+#define OASTARTTRIG6_THRESHOLD_ENABLE (1 << 23)
+#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1 << 24)
+#define OASTARTTRIG6_EVENT_SELECT_4 (1 << 28)
+#define OASTARTTRIG6_EVENT_SELECT_5 (1 << 29)
+#define OASTARTTRIG6_EVENT_SELECT_6 (1 << 30)
+#define OASTARTTRIG6_EVENT_SELECT_7 (1 << 31)
#define OASTARTTRIG7 _MMIO(0x2728)
#define OASTARTTRIG7_NOA_SELECT_MASK 0xf
@@ -715,31 +733,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
#define OAREPORTTRIG2 _MMIO(0x2744)
-#define OAREPORTTRIG2_INVERT_A_0 (1<<0)
-#define OAREPORTTRIG2_INVERT_A_1 (1<<1)
-#define OAREPORTTRIG2_INVERT_A_2 (1<<2)
-#define OAREPORTTRIG2_INVERT_A_3 (1<<3)
-#define OAREPORTTRIG2_INVERT_A_4 (1<<4)
-#define OAREPORTTRIG2_INVERT_A_5 (1<<5)
-#define OAREPORTTRIG2_INVERT_A_6 (1<<6)
-#define OAREPORTTRIG2_INVERT_A_7 (1<<7)
-#define OAREPORTTRIG2_INVERT_A_8 (1<<8)
-#define OAREPORTTRIG2_INVERT_A_9 (1<<9)
-#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG2_INVERT_B_0 (1<<16)
-#define OAREPORTTRIG2_INVERT_B_1 (1<<17)
-#define OAREPORTTRIG2_INVERT_B_2 (1<<18)
-#define OAREPORTTRIG2_INVERT_B_3 (1<<19)
-#define OAREPORTTRIG2_INVERT_C_0 (1<<20)
-#define OAREPORTTRIG2_INVERT_C_1 (1<<21)
-#define OAREPORTTRIG2_INVERT_D_0 (1<<22)
-#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23)
-#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
+#define OAREPORTTRIG2_INVERT_A_0 (1 << 0)
+#define OAREPORTTRIG2_INVERT_A_1 (1 << 1)
+#define OAREPORTTRIG2_INVERT_A_2 (1 << 2)
+#define OAREPORTTRIG2_INVERT_A_3 (1 << 3)
+#define OAREPORTTRIG2_INVERT_A_4 (1 << 4)
+#define OAREPORTTRIG2_INVERT_A_5 (1 << 5)
+#define OAREPORTTRIG2_INVERT_A_6 (1 << 6)
+#define OAREPORTTRIG2_INVERT_A_7 (1 << 7)
+#define OAREPORTTRIG2_INVERT_A_8 (1 << 8)
+#define OAREPORTTRIG2_INVERT_A_9 (1 << 9)
+#define OAREPORTTRIG2_INVERT_A_10 (1 << 10)
+#define OAREPORTTRIG2_INVERT_A_11 (1 << 11)
+#define OAREPORTTRIG2_INVERT_A_12 (1 << 12)
+#define OAREPORTTRIG2_INVERT_A_13 (1 << 13)
+#define OAREPORTTRIG2_INVERT_A_14 (1 << 14)
+#define OAREPORTTRIG2_INVERT_A_15 (1 << 15)
+#define OAREPORTTRIG2_INVERT_B_0 (1 << 16)
+#define OAREPORTTRIG2_INVERT_B_1 (1 << 17)
+#define OAREPORTTRIG2_INVERT_B_2 (1 << 18)
+#define OAREPORTTRIG2_INVERT_B_3 (1 << 19)
+#define OAREPORTTRIG2_INVERT_C_0 (1 << 20)
+#define OAREPORTTRIG2_INVERT_C_1 (1 << 21)
+#define OAREPORTTRIG2_INVERT_D_0 (1 << 22)
+#define OAREPORTTRIG2_THRESHOLD_ENABLE (1 << 23)
+#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1 << 31)
#define OAREPORTTRIG3 _MMIO(0x2748)
#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf
@@ -768,31 +786,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
#define OAREPORTTRIG6 _MMIO(0x2754)
-#define OAREPORTTRIG6_INVERT_A_0 (1<<0)
-#define OAREPORTTRIG6_INVERT_A_1 (1<<1)
-#define OAREPORTTRIG6_INVERT_A_2 (1<<2)
-#define OAREPORTTRIG6_INVERT_A_3 (1<<3)
-#define OAREPORTTRIG6_INVERT_A_4 (1<<4)
-#define OAREPORTTRIG6_INVERT_A_5 (1<<5)
-#define OAREPORTTRIG6_INVERT_A_6 (1<<6)
-#define OAREPORTTRIG6_INVERT_A_7 (1<<7)
-#define OAREPORTTRIG6_INVERT_A_8 (1<<8)
-#define OAREPORTTRIG6_INVERT_A_9 (1<<9)
-#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG6_INVERT_B_0 (1<<16)
-#define OAREPORTTRIG6_INVERT_B_1 (1<<17)
-#define OAREPORTTRIG6_INVERT_B_2 (1<<18)
-#define OAREPORTTRIG6_INVERT_B_3 (1<<19)
-#define OAREPORTTRIG6_INVERT_C_0 (1<<20)
-#define OAREPORTTRIG6_INVERT_C_1 (1<<21)
-#define OAREPORTTRIG6_INVERT_D_0 (1<<22)
-#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23)
-#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
+#define OAREPORTTRIG6_INVERT_A_0 (1 << 0)
+#define OAREPORTTRIG6_INVERT_A_1 (1 << 1)
+#define OAREPORTTRIG6_INVERT_A_2 (1 << 2)
+#define OAREPORTTRIG6_INVERT_A_3 (1 << 3)
+#define OAREPORTTRIG6_INVERT_A_4 (1 << 4)
+#define OAREPORTTRIG6_INVERT_A_5 (1 << 5)
+#define OAREPORTTRIG6_INVERT_A_6 (1 << 6)
+#define OAREPORTTRIG6_INVERT_A_7 (1 << 7)
+#define OAREPORTTRIG6_INVERT_A_8 (1 << 8)
+#define OAREPORTTRIG6_INVERT_A_9 (1 << 9)
+#define OAREPORTTRIG6_INVERT_A_10 (1 << 10)
+#define OAREPORTTRIG6_INVERT_A_11 (1 << 11)
+#define OAREPORTTRIG6_INVERT_A_12 (1 << 12)
+#define OAREPORTTRIG6_INVERT_A_13 (1 << 13)
+#define OAREPORTTRIG6_INVERT_A_14 (1 << 14)
+#define OAREPORTTRIG6_INVERT_A_15 (1 << 15)
+#define OAREPORTTRIG6_INVERT_B_0 (1 << 16)
+#define OAREPORTTRIG6_INVERT_B_1 (1 << 17)
+#define OAREPORTTRIG6_INVERT_B_2 (1 << 18)
+#define OAREPORTTRIG6_INVERT_B_3 (1 << 19)
+#define OAREPORTTRIG6_INVERT_C_0 (1 << 20)
+#define OAREPORTTRIG6_INVERT_C_1 (1 << 21)
+#define OAREPORTTRIG6_INVERT_D_0 (1 << 22)
+#define OAREPORTTRIG6_THRESHOLD_ENABLE (1 << 23)
+#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1 << 31)
#define OAREPORTTRIG7 _MMIO(0x2758)
#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf
@@ -828,9 +846,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC_COMPARE_VALUE_MASK 0xffff
#define OACEC_COMPARE_VALUE_SHIFT 3
-#define OACEC_SELECT_NOA (0<<19)
-#define OACEC_SELECT_PREV (1<<19)
-#define OACEC_SELECT_BOOLEAN (2<<19)
+#define OACEC_SELECT_NOA (0 << 19)
+#define OACEC_SELECT_PREV (1 << 19)
+#define OACEC_SELECT_BOOLEAN (2 << 19)
/* CECX_1 */
#define OACEC_MASK_MASK 0xffff
@@ -948,9 +966,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* Reset registers
*/
#define DEBUG_RESET_I830 _MMIO(0x6070)
-#define DEBUG_RESET_FULL (1<<7)
-#define DEBUG_RESET_RENDER (1<<8)
-#define DEBUG_RESET_DISPLAY (1<<9)
+#define DEBUG_RESET_FULL (1 << 7)
+#define DEBUG_RESET_RENDER (1 << 8)
+#define DEBUG_RESET_DISPLAY (1 << 9)
/*
* IOSF sideband
@@ -961,7 +979,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IOSF_PORT_SHIFT 8
#define IOSF_BYTE_ENABLES_SHIFT 4
#define IOSF_BAR_SHIFT 1
-#define IOSF_SB_BUSY (1<<0)
+#define IOSF_SB_BUSY (1 << 0)
#define IOSF_PORT_BUNIT 0x03
#define IOSF_PORT_PUNIT 0x04
#define IOSF_PORT_NC 0x11
@@ -1044,13 +1062,13 @@ enum i915_power_well_id {
/*
* HSW/BDW
- * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
+ * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
*/
HSW_DISP_PW_GLOBAL = 15,
/*
* GEN9+
- * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
+ * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
*/
SKL_DISP_PW_MISC_IO = 0,
SKL_DISP_PW_DDI_A_E,
@@ -1074,17 +1092,54 @@ enum i915_power_well_id {
SKL_DISP_PW_2,
/* - custom power wells */
- SKL_DISP_PW_DC_OFF,
BXT_DPIO_CMN_A,
BXT_DPIO_CMN_BC,
- GLK_DPIO_CMN_C, /* 19 */
+ GLK_DPIO_CMN_C, /* 18 */
+
+ /*
+ * GEN11+
+ * - _HSW_PWR_WELL_CTL1-4
+ * (status bit: (id&15)*2, req bit:(id&15)*2+1)
+ */
+ ICL_DISP_PW_1 = 0,
+ ICL_DISP_PW_2,
+ ICL_DISP_PW_3,
+ ICL_DISP_PW_4,
+
+ /*
+ * - _HSW_PWR_WELL_CTL_AUX1/2/4
+ * (status bit: (id&15)*2, req bit:(id&15)*2+1)
+ */
+ ICL_DISP_PW_AUX_A = 16,
+ ICL_DISP_PW_AUX_B,
+ ICL_DISP_PW_AUX_C,
+ ICL_DISP_PW_AUX_D,
+ ICL_DISP_PW_AUX_E,
+ ICL_DISP_PW_AUX_F,
+
+ ICL_DISP_PW_AUX_TBT1 = 24,
+ ICL_DISP_PW_AUX_TBT2,
+ ICL_DISP_PW_AUX_TBT3,
+ ICL_DISP_PW_AUX_TBT4,
+
+ /*
+ * - _HSW_PWR_WELL_CTL_DDI1/2/4
+ * (status bit: (id&15)*2, req bit:(id&15)*2+1)
+ */
+ ICL_DISP_PW_DDI_A = 32,
+ ICL_DISP_PW_DDI_B,
+ ICL_DISP_PW_DDI_C,
+ ICL_DISP_PW_DDI_D,
+ ICL_DISP_PW_DDI_E,
+ ICL_DISP_PW_DDI_F, /* 37 */
/*
* Multiple platforms.
* Must start following the highest ID of any platform.
* - custom power wells
*/
- I915_DISP_PW_ALWAYS_ON = 20,
+ SKL_DISP_PW_DC_OFF = 38,
+ I915_DISP_PW_ALWAYS_ON,
};
#define PUNIT_REG_PWRGT_CTRL 0x60
@@ -1098,8 +1153,8 @@ enum i915_power_well_id {
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
-#define GPLLENABLE (1<<4)
-#define GENFREQSTATUS (1<<0)
+#define GPLLENABLE (1 << 4)
+#define GENFREQSTATUS (1 << 0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_REG_CZ_TIMESTAMP 0xce
@@ -1141,11 +1196,11 @@ enum i915_power_well_id {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
-#define VLV_TURBO_SOC_OVERRIDE 0x04
-#define VLV_OVERRIDE_EN 1
-#define VLV_SOC_TDP_EN (1 << 1)
-#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
-#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
+#define VLV_TURBO_SOC_OVERRIDE 0x04
+#define VLV_OVERRIDE_EN 1
+#define VLV_SOC_TDP_EN (1 << 1)
+#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
+#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
@@ -1194,10 +1249,10 @@ enum i915_power_well_id {
#define DPIO_DEVFN 0
#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
-#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
-#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
-#define DPIO_SFR_BYPASS (1<<1)
-#define DPIO_CMNRST (1<<0)
+#define DPIO_MODSEL1 (1 << 3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1 << 2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1 << 1)
+#define DPIO_CMNRST (1 << 0)
#define DPIO_PHY(pipe) ((pipe) >> 1)
#define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy])
@@ -1215,7 +1270,7 @@ enum i915_power_well_id {
#define DPIO_P1_SHIFT (21) /* 3 bits */
#define DPIO_P2_SHIFT (16) /* 5 bits */
#define DPIO_N_SHIFT (12) /* 4 bits */
-#define DPIO_ENABLE_CALIBRATION (1<<11)
+#define DPIO_ENABLE_CALIBRATION (1 << 11)
#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
#define DPIO_M2DIV_MASK 0xff
#define _VLV_PLL_DW3_CH1 0x802c
@@ -1264,10 +1319,10 @@ enum i915_power_well_id {
#define _VLV_PCS_DW0_CH0 0x8200
#define _VLV_PCS_DW0_CH1 0x8400
-#define DPIO_PCS_TX_LANE2_RESET (1<<16)
-#define DPIO_PCS_TX_LANE1_RESET (1<<7)
-#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3)
+#define DPIO_PCS_TX_LANE2_RESET (1 << 16)
+#define DPIO_PCS_TX_LANE1_RESET (1 << 7)
+#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1 << 4)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1 << 3)
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
#define _VLV_PCS01_DW0_CH0 0x200
@@ -1279,11 +1334,11 @@ enum i915_power_well_id {
#define _VLV_PCS_DW1_CH0 0x8204
#define _VLV_PCS_DW1_CH1 0x8404
-#define CHV_PCS_REQ_SOFTRESET_EN (1<<23)
-#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
-#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
+#define CHV_PCS_REQ_SOFTRESET_EN (1 << 23)
+#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1 << 22)
+#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1 << 21)
#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
-#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
+#define DPIO_PCS_CLK_SOFT_RESET (1 << 5)
#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
#define _VLV_PCS01_DW1_CH0 0x204
@@ -1308,12 +1363,12 @@ enum i915_power_well_id {
#define _VLV_PCS_DW9_CH0 0x8224
#define _VLV_PCS_DW9_CH1 0x8424
-#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13)
-#define DPIO_PCS_TX2MARGIN_000 (0<<13)
-#define DPIO_PCS_TX2MARGIN_101 (1<<13)
-#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10)
-#define DPIO_PCS_TX1MARGIN_000 (0<<10)
-#define DPIO_PCS_TX1MARGIN_101 (1<<10)
+#define DPIO_PCS_TX2MARGIN_MASK (0x7 << 13)
+#define DPIO_PCS_TX2MARGIN_000 (0 << 13)
+#define DPIO_PCS_TX2MARGIN_101 (1 << 13)
+#define DPIO_PCS_TX1MARGIN_MASK (0x7 << 10)
+#define DPIO_PCS_TX1MARGIN_000 (0 << 10)
+#define DPIO_PCS_TX1MARGIN_101 (1 << 10)
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
#define _VLV_PCS01_DW9_CH0 0x224
@@ -1325,14 +1380,14 @@ enum i915_power_well_id {
#define _CHV_PCS_DW10_CH0 0x8228
#define _CHV_PCS_DW10_CH1 0x8428
-#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
-#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
-#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24)
-#define DPIO_PCS_TX2DEEMP_9P5 (0<<24)
-#define DPIO_PCS_TX2DEEMP_6P0 (2<<24)
-#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16)
-#define DPIO_PCS_TX1DEEMP_9P5 (0<<16)
-#define DPIO_PCS_TX1DEEMP_6P0 (2<<16)
+#define DPIO_PCS_SWING_CALC_TX0_TX2 (1 << 30)
+#define DPIO_PCS_SWING_CALC_TX1_TX3 (1 << 31)
+#define DPIO_PCS_TX2DEEMP_MASK (0xf << 24)
+#define DPIO_PCS_TX2DEEMP_9P5 (0 << 24)
+#define DPIO_PCS_TX2DEEMP_6P0 (2 << 24)
+#define DPIO_PCS_TX1DEEMP_MASK (0xf << 16)
+#define DPIO_PCS_TX1DEEMP_9P5 (0 << 16)
+#define DPIO_PCS_TX1DEEMP_6P0 (2 << 16)
#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
#define _VLV_PCS01_DW10_CH0 0x0228
@@ -1344,10 +1399,10 @@ enum i915_power_well_id {
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
-#define DPIO_TX2_STAGGER_MASK(x) ((x)<<24)
-#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
-#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
+#define DPIO_TX2_STAGGER_MASK(x) ((x) << 24)
+#define DPIO_LANEDESKEW_STRAP_OVRD (1 << 3)
+#define DPIO_LEFT_TXFIFO_RST_MASTER (1 << 1)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER (1 << 0)
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
#define _VLV_PCS01_DW11_CH0 0x022c
@@ -1366,11 +1421,11 @@ enum i915_power_well_id {
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
-#define DPIO_TX2_STAGGER_MULT(x) ((x)<<20)
-#define DPIO_TX1_STAGGER_MULT(x) ((x)<<16)
-#define DPIO_TX1_STAGGER_MASK(x) ((x)<<8)
-#define DPIO_LANESTAGGER_STRAP_OVRD (1<<6)
-#define DPIO_LANESTAGGER_STRAP(x) ((x)<<0)
+#define DPIO_TX2_STAGGER_MULT(x) ((x) << 20)
+#define DPIO_TX1_STAGGER_MULT(x) ((x) << 16)
+#define DPIO_TX1_STAGGER_MASK(x) ((x) << 8)
+#define DPIO_LANESTAGGER_STRAP_OVRD (1 << 6)
+#define DPIO_LANESTAGGER_STRAP(x) ((x) << 0)
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
#define _VLV_PCS_DW14_CH0 0x8238
@@ -1391,7 +1446,7 @@ enum i915_power_well_id {
#define _VLV_TX_DW3_CH0 0x828c
#define _VLV_TX_DW3_CH1 0x848c
/* The following bit for CHV phy */
-#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
+#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1 << 27)
#define DPIO_SWING_MARGIN101_SHIFT 16
#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT)
#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
@@ -1410,7 +1465,7 @@ enum i915_power_well_id {
#define _VLV_TX_DW5_CH0 0x8294
#define _VLV_TX_DW5_CH1 0x8494
-#define DPIO_TX_OCALINIT_EN (1<<31)
+#define DPIO_TX_OCALINIT_EN (1 << 31)
#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
#define _VLV_TX_DW11_CH0 0x82ac
@@ -1640,10 +1695,10 @@ enum i915_power_well_id {
#define PORT_PLL_LOCK_THRESHOLD_SHIFT 1
#define PORT_PLL_LOCK_THRESHOLD_MASK (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
/* PORT_PLL_10_A */
-#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27)
+#define PORT_PLL_DCO_AMP_OVR_EN_H (1 << 27)
#define PORT_PLL_DCO_AMP_DEFAULT 15
#define PORT_PLL_DCO_AMP_MASK 0x3c00
-#define PORT_PLL_DCO_AMP(x) ((x)<<10)
+#define PORT_PLL_DCO_AMP(x) ((x) << 10)
#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
_PORT_PLL_0_B, \
_PORT_PLL_0_C)
@@ -1666,6 +1721,26 @@ enum i915_power_well_id {
#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
_ICL_PORT_CL_DW5_B)
+#define _CNL_PORT_CL_DW10_A 0x162028
+#define _ICL_PORT_CL_DW10_B 0x6c028
+#define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \
+ _CNL_PORT_CL_DW10_A, \
+ _ICL_PORT_CL_DW10_B)
+#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
+#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
+#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
+#define PWR_UP_ALL_LANES (0x0 << 4)
+#define PWR_DOWN_LN_3_2_1 (0xe << 4)
+#define PWR_DOWN_LN_3_2 (0xc << 4)
+#define PWR_DOWN_LN_3 (0x8 << 4)
+#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
+#define PWR_DOWN_LN_1_0 (0x3 << 4)
+#define PWR_DOWN_LN_1 (0x2 << 4)
+#define PWR_DOWN_LN_3_1 (0xa << 4)
+#define PWR_DOWN_LN_3_1_0 (0xb << 4)
+#define PWR_DOWN_LN_MASK (0xf << 4)
+#define PWR_DOWN_LN_SHIFT 4
+
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
@@ -1678,6 +1753,13 @@ enum i915_power_well_id {
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
+#define _ICL_PORT_CL_DW12_A 0x162030
+#define _ICL_PORT_CL_DW12_B 0x6C030
+#define ICL_LANE_ENABLE_AUX (1 << 0)
+#define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \
+ _ICL_PORT_CL_DW12_A, \
+ _ICL_PORT_CL_DW12_B)
+
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
@@ -1715,16 +1797,22 @@ enum i915_power_well_id {
_CNL_PORT_PCS_DW1_LN0_D, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_F))
+
#define _ICL_PORT_PCS_DW1_GRP_A 0x162604
#define _ICL_PORT_PCS_DW1_GRP_B 0x6C604
#define _ICL_PORT_PCS_DW1_LN0_A 0x162804
#define _ICL_PORT_PCS_DW1_LN0_B 0x6C804
+#define _ICL_PORT_PCS_DW1_AUX_A 0x162304
+#define _ICL_PORT_PCS_DW1_AUX_B 0x6c304
#define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\
_ICL_PORT_PCS_DW1_GRP_A, \
_ICL_PORT_PCS_DW1_GRP_B)
#define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \
_ICL_PORT_PCS_DW1_LN0_A, \
_ICL_PORT_PCS_DW1_LN0_B)
+#define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_PCS_DW1_AUX_A, \
+ _ICL_PORT_PCS_DW1_AUX_B)
#define COMMON_KEEPER_EN (1 << 26)
/* CNL Port TX registers */
@@ -1745,7 +1833,7 @@ enum i915_power_well_id {
_CNL_PORT_TX_D_GRP_OFFSET, \
_CNL_PORT_TX_AE_GRP_OFFSET, \
_CNL_PORT_TX_F_GRP_OFFSET) + \
- 4*(dw))
+ 4 * (dw))
#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \
_CNL_PORT_TX_AE_LN0_OFFSET, \
_CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1753,7 +1841,7 @@ enum i915_power_well_id {
_CNL_PORT_TX_D_LN0_OFFSET, \
_CNL_PORT_TX_AE_LN0_OFFSET, \
_CNL_PORT_TX_F_LN0_OFFSET) + \
- 4*(dw))
+ 4 * (dw))
#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
@@ -1761,16 +1849,23 @@ enum i915_power_well_id {
#define _ICL_PORT_TX_DW2_GRP_B 0x6C688
#define _ICL_PORT_TX_DW2_LN0_A 0x162888
#define _ICL_PORT_TX_DW2_LN0_B 0x6C888
+#define _ICL_PORT_TX_DW2_AUX_A 0x162388
+#define _ICL_PORT_TX_DW2_AUX_B 0x6c388
#define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW2_GRP_A, \
_ICL_PORT_TX_DW2_GRP_B)
#define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW2_LN0_A, \
_ICL_PORT_TX_DW2_LN0_B)
+#define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW2_AUX_A, \
+ _ICL_PORT_TX_DW2_AUX_B)
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
#define SWING_SEL_LOWER_MASK (0x7 << 11)
+#define FRC_LATENCY_OPTIM_MASK (0x7 << 8)
+#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8)
#define RCOMP_SCALAR(x) ((x) << 0)
#define RCOMP_SCALAR_MASK (0xFF << 0)
@@ -1779,21 +1874,26 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
- (ln * (_CNL_PORT_TX_DW4_LN1_AE - \
+ ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
#define _ICL_PORT_TX_DW4_GRP_A 0x162690
#define _ICL_PORT_TX_DW4_GRP_B 0x6C690
#define _ICL_PORT_TX_DW4_LN0_A 0x162890
#define _ICL_PORT_TX_DW4_LN1_A 0x162990
#define _ICL_PORT_TX_DW4_LN0_B 0x6C890
+#define _ICL_PORT_TX_DW4_AUX_A 0x162390
+#define _ICL_PORT_TX_DW4_AUX_B 0x6c390
#define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW4_GRP_A, \
_ICL_PORT_TX_DW4_GRP_B)
#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \
_ICL_PORT_TX_DW4_LN0_A, \
_ICL_PORT_TX_DW4_LN0_B) + \
- (ln * (_ICL_PORT_TX_DW4_LN1_A - \
- _ICL_PORT_TX_DW4_LN0_A)))
+ ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
+ _ICL_PORT_TX_DW4_LN0_A)))
+#define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW4_AUX_A, \
+ _ICL_PORT_TX_DW4_AUX_B)
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1808,12 +1908,17 @@ enum i915_power_well_id {
#define _ICL_PORT_TX_DW5_GRP_B 0x6C694
#define _ICL_PORT_TX_DW5_LN0_A 0x162894
#define _ICL_PORT_TX_DW5_LN0_B 0x6C894
+#define _ICL_PORT_TX_DW5_AUX_A 0x162394
+#define _ICL_PORT_TX_DW5_AUX_B 0x6c394
#define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW5_GRP_A, \
_ICL_PORT_TX_DW5_GRP_B)
#define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW5_LN0_A, \
_ICL_PORT_TX_DW5_LN0_B)
+#define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW5_AUX_A, \
+ _ICL_PORT_TX_DW5_AUX_B)
#define TX_TRAINING_EN (1 << 31)
#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
@@ -1990,6 +2095,11 @@ enum i915_power_well_id {
_ICL_PORT_COMP_DW10_A, \
_ICL_PORT_COMP_DW10_B)
+/* ICL PHY DFLEX registers */
+#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
+#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n)))
+#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n)))
+
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
@@ -2134,8 +2244,8 @@ enum i915_power_well_id {
/* SKL balance leg register */
#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
/* I_boost values */
-#define BALANCE_LEG_SHIFT(port) (8+3*(port))
-#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
+#define BALANCE_LEG_SHIFT(port) (8 + 3 * (port))
+#define BALANCE_LEG_MASK(port) (7 << (8 + 3 * (port)))
/* Balance leg disable bits */
#define BALANCE_LEG_DISABLE_SHIFT 23
#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
@@ -2155,10 +2265,10 @@ enum i915_power_well_id {
#define I830_FENCE_TILING_Y_SHIFT 12
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
-#define I830_FENCE_REG_VALID (1<<0)
+#define I830_FENCE_REG_VALID (1 << 0)
#define I915_FENCE_MAX_PITCH_VAL 4
#define I830_FENCE_MAX_PITCH_VAL 6
-#define I830_FENCE_MAX_SIZE_VAL (1<<8)
+#define I830_FENCE_MAX_SIZE_VAL (1 << 8)
#define I915_FENCE_START_MASK 0x0ff00000
#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
@@ -2167,7 +2277,7 @@ enum i915_power_well_id {
#define FENCE_REG_965_HI(i) _MMIO(0x03000 + (i) * 8 + 4)
#define I965_FENCE_PITCH_SHIFT 2
#define I965_FENCE_TILING_Y_SHIFT 1
-#define I965_FENCE_REG_VALID (1<<0)
+#define I965_FENCE_REG_VALID (1 << 0)
#define I965_FENCE_MAX_PITCH_VAL 0x0400
#define FENCE_REG_GEN6_LO(i) _MMIO(0x100000 + (i) * 8)
@@ -2190,13 +2300,13 @@ enum i915_power_well_id {
#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER _MMIO(0x02024)
-#define PRB0_BASE (0x2030-0x30)
-#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
-#define PRB2_BASE (0x2050-0x30) /* gen3 */
-#define SRB0_BASE (0x2100-0x30) /* gen2 */
-#define SRB1_BASE (0x2110-0x30) /* gen2 */
-#define SRB2_BASE (0x2120-0x30) /* 830 */
-#define SRB3_BASE (0x2130-0x30) /* 830 */
+#define PRB0_BASE (0x2030 - 0x30)
+#define PRB1_BASE (0x2040 - 0x30) /* 830,gen3 */
+#define PRB2_BASE (0x2050 - 0x30) /* gen3 */
+#define SRB0_BASE (0x2100 - 0x30) /* gen2 */
+#define SRB1_BASE (0x2110 - 0x30) /* gen2 */
+#define SRB2_BASE (0x2120 - 0x30) /* 830 */
+#define SRB3_BASE (0x2130 - 0x30) /* 830 */
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
@@ -2209,14 +2319,14 @@ enum i915_power_well_id {
#define GEN11_VEBOX_RING_BASE 0x1c8000
#define GEN11_VEBOX2_RING_BASE 0x1d8000
#define BLT_RING_BASE 0x22000
-#define RING_TAIL(base) _MMIO((base)+0x30)
-#define RING_HEAD(base) _MMIO((base)+0x34)
-#define RING_START(base) _MMIO((base)+0x38)
-#define RING_CTL(base) _MMIO((base)+0x3c)
+#define RING_TAIL(base) _MMIO((base) + 0x30)
+#define RING_HEAD(base) _MMIO((base) + 0x34)
+#define RING_START(base) _MMIO((base) + 0x38)
+#define RING_CTL(base) _MMIO((base) + 0x3c)
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
-#define RING_SYNC_0(base) _MMIO((base)+0x40)
-#define RING_SYNC_1(base) _MMIO((base)+0x44)
-#define RING_SYNC_2(base) _MMIO((base)+0x48)
+#define RING_SYNC_0(base) _MMIO((base) + 0x40)
+#define RING_SYNC_1(base) _MMIO((base) + 0x44)
+#define RING_SYNC_2(base) _MMIO((base) + 0x48)
#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
@@ -2230,21 +2340,22 @@ enum i915_power_well_id {
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC INVALID_MMIO_REG
-#define RING_PSMI_CTL(base) _MMIO((base)+0x50)
-#define RING_MAX_IDLE(base) _MMIO((base)+0x54)
-#define RING_HWS_PGA(base) _MMIO((base)+0x80)
-#define RING_HWS_PGA_GEN6(base) _MMIO((base)+0x2080)
-#define RING_RESET_CTL(base) _MMIO((base)+0xd0)
+#define RING_PSMI_CTL(base) _MMIO((base) + 0x50)
+#define RING_MAX_IDLE(base) _MMIO((base) + 0x54)
+#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
+#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
+#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
#define RESET_CTL_REQUEST_RESET (1 << 0)
#define RESET_CTL_READY_TO_RESET (1 << 1)
+#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
#define HSW_GTT_CACHE_EN _MMIO(0x4024)
#define GTT_CACHE_EN_ALL 0xF0007FFF
#define GEN7_WR_WATERMARK _MMIO(0x4028)
#define GEN7_GFX_PRIO_CTRL _MMIO(0x402C)
#define ARB_MODE _MMIO(0x4030)
-#define ARB_MODE_SWIZZLE_SNB (1<<4)
-#define ARB_MODE_SWIZZLE_IVB (1<<5)
+#define ARB_MODE_SWIZZLE_SNB (1 << 4)
+#define ARB_MODE_SWIZZLE_IVB (1 << 5)
#define GEN7_GFX_PEND_TLB0 _MMIO(0x4034)
#define GEN7_GFX_PEND_TLB1 _MMIO(0x4038)
/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
@@ -2254,30 +2365,30 @@ enum i915_power_well_id {
#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
#define GAMTARBMODE _MMIO(0x04a08)
-#define ARB_MODE_BWGTLB_DISABLE (1<<9)
-#define ARB_MODE_SWIZZLE_BDW (1<<1)
+#define ARB_MODE_BWGTLB_DISABLE (1 << 9)
+#define ARB_MODE_SWIZZLE_BDW (1 << 1)
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
-#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id)
+#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100 * (engine)->hw_id)
#define GEN8_RING_FAULT_REG _MMIO(0x4094)
#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
-#define RING_FAULT_GTTSEL_MASK (1<<11)
+#define RING_FAULT_GTTSEL_MASK (1 << 11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
-#define RING_FAULT_VALID (1<<0)
+#define RING_FAULT_VALID (1 << 0)
#define DONE_REG _MMIO(0x40b0)
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
-#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index)*4)
+#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
-#define RING_ACTHD(base) _MMIO((base)+0x74)
-#define RING_ACTHD_UDW(base) _MMIO((base)+0x5c)
-#define RING_NOPID(base) _MMIO((base)+0x94)
-#define RING_IMR(base) _MMIO((base)+0xa8)
-#define RING_HWSTAM(base) _MMIO((base)+0x98)
-#define RING_TIMESTAMP(base) _MMIO((base)+0x358)
-#define RING_TIMESTAMP_UDW(base) _MMIO((base)+0x358 + 4)
+#define RING_ACTHD(base) _MMIO((base) + 0x74)
+#define RING_ACTHD_UDW(base) _MMIO((base) + 0x5c)
+#define RING_NOPID(base) _MMIO((base) + 0x94)
+#define RING_IMR(base) _MMIO((base) + 0xa8)
+#define RING_HWSTAM(base) _MMIO((base) + 0x98)
+#define RING_TIMESTAMP(base) _MMIO((base) + 0x358)
+#define RING_TIMESTAMP_UDW(base) _MMIO((base) + 0x358 + 4)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -2290,24 +2401,25 @@ enum i915_power_well_id {
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
-#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
-#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
-#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */
+#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
+#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
-#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4)
+#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
#define RING_MAX_NONPRIV_SLOTS 12
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
-#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18)
+#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1 << 18)
#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
-#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
-#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24)
+#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
+#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
#if 0
#define PRB0_TAIL _MMIO(0x2030)
@@ -2333,19 +2445,19 @@ enum i915_power_well_id {
#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf)
#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
-#define RING_IPEIR(base) _MMIO((base)+0x64)
-#define RING_IPEHR(base) _MMIO((base)+0x68)
+#define RING_IPEIR(base) _MMIO((base) + 0x64)
+#define RING_IPEHR(base) _MMIO((base) + 0x68)
/*
* On GEN4, only the render ring INSTDONE exists and has a different
* layout than the GEN7+ version.
* The GEN2 counterpart of this register is GEN2_INSTDONE.
*/
-#define RING_INSTDONE(base) _MMIO((base)+0x6c)
-#define RING_INSTPS(base) _MMIO((base)+0x70)
-#define RING_DMA_FADD(base) _MMIO((base)+0x78)
-#define RING_DMA_FADD_UDW(base) _MMIO((base)+0x60) /* gen8+ */
-#define RING_INSTPM(base) _MMIO((base)+0xc0)
-#define RING_MI_MODE(base) _MMIO((base)+0x9c)
+#define RING_INSTDONE(base) _MMIO((base) + 0x6c)
+#define RING_INSTPS(base) _MMIO((base) + 0x70)
+#define RING_DMA_FADD(base) _MMIO((base) + 0x78)
+#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */
+#define RING_INSTPM(base) _MMIO((base) + 0xc0)
+#define RING_MI_MODE(base) _MMIO((base) + 0x9c)
#define INSTPS _MMIO(0x2070) /* 965+ only */
#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
#define ACTHD_I965 _MMIO(0x2074)
@@ -2353,37 +2465,37 @@ enum i915_power_well_id {
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
-#define PWRCTX_EN (1<<0)
+#define PWRCTX_EN (1 << 0)
#define IPEIR _MMIO(0x2088)
#define IPEHR _MMIO(0x208c)
#define GEN2_INSTDONE _MMIO(0x2090)
#define NOPID _MMIO(0x2094)
#define HWSTAM _MMIO(0x2098)
#define DMA_FADD_I8XX _MMIO(0x20d0)
-#define RING_BBSTATE(base) _MMIO((base)+0x110)
+#define RING_BBSTATE(base) _MMIO((base) + 0x110)
#define RING_BB_PPGTT (1 << 5)
-#define RING_SBBADDR(base) _MMIO((base)+0x114) /* hsw+ */
-#define RING_SBBSTATE(base) _MMIO((base)+0x118) /* hsw+ */
-#define RING_SBBADDR_UDW(base) _MMIO((base)+0x11c) /* gen8+ */
-#define RING_BBADDR(base) _MMIO((base)+0x140)
-#define RING_BBADDR_UDW(base) _MMIO((base)+0x168) /* gen8+ */
-#define RING_BB_PER_CTX_PTR(base) _MMIO((base)+0x1c0) /* gen8+ */
-#define RING_INDIRECT_CTX(base) _MMIO((base)+0x1c4) /* gen8+ */
-#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base)+0x1c8) /* gen8+ */
-#define RING_CTX_TIMESTAMP(base) _MMIO((base)+0x3a8) /* gen8+ */
+#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */
+#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */
+#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */
+#define RING_BBADDR(base) _MMIO((base) + 0x140)
+#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */
+#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */
+#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */
+#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
+#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */
#define ERROR_GEN6 _MMIO(0x40a0)
#define GEN7_ERR_INT _MMIO(0x44040)
-#define ERR_INT_POISON (1<<31)
-#define ERR_INT_MMIO_UNCLAIMED (1<<13)
-#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
-#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
-#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
-#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
-#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
-#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + (pipe)*3))
-#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
-#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
+#define ERR_INT_POISON (1 << 31)
+#define ERR_INT_MMIO_UNCLAIMED (1 << 13)
+#define ERR_INT_PIPE_CRC_DONE_C (1 << 8)
+#define ERR_INT_FIFO_UNDERRUN_C (1 << 6)
+#define ERR_INT_PIPE_CRC_DONE_B (1 << 5)
+#define ERR_INT_FIFO_UNDERRUN_B (1 << 3)
+#define ERR_INT_PIPE_CRC_DONE_A (1 << 2)
+#define ERR_INT_PIPE_CRC_DONE(pipe) (1 << (2 + (pipe) * 3))
+#define ERR_INT_FIFO_UNDERRUN_A (1 << 0)
+#define ERR_INT_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
@@ -2391,7 +2503,7 @@ enum i915_power_well_id {
#define FAULT_GTT_SEL (1 << 4)
#define FPGA_DBG _MMIO(0x42300)
-#define FPGA_DBG_RM_NOCLAIM (1<<31)
+#define FPGA_DBG_RM_NOCLAIM (1 << 31)
#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
#define CLAIM_ER_CLR (1 << 31)
@@ -2400,22 +2512,22 @@ enum i915_power_well_id {
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
-#define DERRMR_PIPEA_SCANLINE (1<<0)
-#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
-#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
-#define DERRMR_PIPEA_VBLANK (1<<3)
-#define DERRMR_PIPEA_HBLANK (1<<5)
-#define DERRMR_PIPEB_SCANLINE (1<<8)
-#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
-#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
-#define DERRMR_PIPEB_VBLANK (1<<11)
-#define DERRMR_PIPEB_HBLANK (1<<13)
+#define DERRMR_PIPEA_SCANLINE (1 << 0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1 << 1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1 << 2)
+#define DERRMR_PIPEA_VBLANK (1 << 3)
+#define DERRMR_PIPEA_HBLANK (1 << 5)
+#define DERRMR_PIPEB_SCANLINE (1 << 8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1 << 9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1 << 10)
+#define DERRMR_PIPEB_VBLANK (1 << 11)
+#define DERRMR_PIPEB_HBLANK (1 << 13)
/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
-#define DERRMR_PIPEC_SCANLINE (1<<14)
-#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
-#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
-#define DERRMR_PIPEC_VBLANK (1<<21)
-#define DERRMR_PIPEC_HBLANK (1<<22)
+#define DERRMR_PIPEC_SCANLINE (1 << 14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1 << 15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1 << 20)
+#define DERRMR_PIPEC_VBLANK (1 << 21)
+#define DERRMR_PIPEC_HBLANK (1 << 22)
/* GM45+ chicken bits -- debug workaround bits that may be required
@@ -2439,7 +2551,7 @@ enum i915_power_well_id {
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
-#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
+#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x) << 1) /* gen8+ */
#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
#define MI_MODE _MMIO(0x209c)
@@ -2478,22 +2590,22 @@ enum i915_power_well_id {
#define GFX_MODE _MMIO(0x2520)
#define GFX_MODE_GEN7 _MMIO(0x229c)
-#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base+0x29c)
-#define GFX_RUN_LIST_ENABLE (1<<15)
-#define GFX_INTERRUPT_STEERING (1<<14)
-#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
-#define GFX_SURFACE_FAULT_ENABLE (1<<12)
-#define GFX_REPLAY_MODE (1<<11)
-#define GFX_PSMI_GRANULARITY (1<<10)
-#define GFX_PPGTT_ENABLE (1<<9)
-#define GEN8_GFX_PPGTT_48B (1<<7)
-
-#define GFX_FORWARD_VBLANK_MASK (3<<5)
-#define GFX_FORWARD_VBLANK_NEVER (0<<5)
-#define GFX_FORWARD_VBLANK_ALWAYS (1<<5)
-#define GFX_FORWARD_VBLANK_COND (2<<5)
-
-#define GEN11_GFX_DISABLE_LEGACY_MODE (1<<3)
+#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base + 0x29c)
+#define GFX_RUN_LIST_ENABLE (1 << 15)
+#define GFX_INTERRUPT_STEERING (1 << 14)
+#define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13)
+#define GFX_SURFACE_FAULT_ENABLE (1 << 12)
+#define GFX_REPLAY_MODE (1 << 11)
+#define GFX_PSMI_GRANULARITY (1 << 10)
+#define GFX_PPGTT_ENABLE (1 << 9)
+#define GEN8_GFX_PPGTT_48B (1 << 7)
+
+#define GFX_FORWARD_VBLANK_MASK (3 << 5)
+#define GFX_FORWARD_VBLANK_NEVER (0 << 5)
+#define GFX_FORWARD_VBLANK_ALWAYS (1 << 5)
+#define GFX_FORWARD_VBLANK_COND (2 << 5)
+
+#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3)
#define VLV_DISPLAY_BASE 0x180000
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
@@ -2507,8 +2619,8 @@ enum i915_power_well_id {
#define IMR _MMIO(0x20a8)
#define ISR _MMIO(0x20ac)
#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
-#define GINT_DIS (1<<22)
-#define GCFG_DIS (1<<8)
+#define GINT_DIS (1 << 22)
+#define GCFG_DIS (1 << 8)
#define VLV_GUNIT_CLOCK_GATE2 _MMIO(VLV_DISPLAY_BASE + 0x2064)
#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084)
#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0)
@@ -2518,35 +2630,35 @@ enum i915_power_well_id {
#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
#define VLV_PCBR_ADDR_SHIFT 12
-#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
+#define DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */
#define EIR _MMIO(0x20b0)
#define EMR _MMIO(0x20b4)
#define ESR _MMIO(0x20b8)
-#define GM45_ERROR_PAGE_TABLE (1<<5)
-#define GM45_ERROR_MEM_PRIV (1<<4)
-#define I915_ERROR_PAGE_TABLE (1<<4)
-#define GM45_ERROR_CP_PRIV (1<<3)
-#define I915_ERROR_MEMORY_REFRESH (1<<1)
-#define I915_ERROR_INSTRUCTION (1<<0)
+#define GM45_ERROR_PAGE_TABLE (1 << 5)
+#define GM45_ERROR_MEM_PRIV (1 << 4)
+#define I915_ERROR_PAGE_TABLE (1 << 4)
+#define GM45_ERROR_CP_PRIV (1 << 3)
+#define I915_ERROR_MEMORY_REFRESH (1 << 1)
+#define I915_ERROR_INSTRUCTION (1 << 0)
#define INSTPM _MMIO(0x20c0)
-#define INSTPM_SELF_EN (1<<12) /* 915GM only */
-#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
+#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
+#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
be delivered when out of C3. */
-#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
-#define INSTPM_TLB_INVALIDATE (1<<9)
-#define INSTPM_SYNC_FLUSH (1<<5)
+#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1 << 9)
+#define INSTPM_SYNC_FLUSH (1 << 5)
#define ACTHD _MMIO(0x20c8)
#define MEM_MODE _MMIO(0x20cc)
-#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
-#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
-#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
+#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
+#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
+#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) /* 85x only */
#define FW_BLC _MMIO(0x20d8)
#define FW_BLC2 _MMIO(0x20dc)
#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
-#define FW_BLC_SELF_EN_MASK (1<<31)
-#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
-#define FW_BLC_SELF_EN (1<<15) /* 945 only */
+#define FW_BLC_SELF_EN_MASK (1 << 31)
+#define FW_BLC_SELF_FIFO_MASK (1 << 16) /* 945 only */
+#define FW_BLC_SELF_EN (1 << 15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
@@ -2645,37 +2757,37 @@ enum i915_power_well_id {
#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */
#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */
-#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
-#define CM0_IZ_OPT_DISABLE (1<<6)
-#define CM0_ZR_OPT_DISABLE (1<<5)
-#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
-#define CM0_DEPTH_EVICT_DISABLE (1<<4)
-#define CM0_COLOR_EVICT_DISABLE (1<<3)
-#define CM0_DEPTH_WRITE_DISABLE (1<<1)
-#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8)
+#define CM0_IZ_OPT_DISABLE (1 << 6)
+#define CM0_ZR_OPT_DISABLE (1 << 5)
+#define CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5)
+#define CM0_DEPTH_EVICT_DISABLE (1 << 4)
+#define CM0_COLOR_EVICT_DISABLE (1 << 3)
+#define CM0_DEPTH_WRITE_DISABLE (1 << 1)
+#define CM0_RC_OP_FLUSH_DISABLE (1 << 0)
#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
-#define GFX_FLSH_CNTL_EN (1<<0)
+#define GFX_FLSH_CNTL_EN (1 << 0)
#define ECOSKPD _MMIO(0x21d0)
-#define ECO_GATING_CX_ONLY (1<<3)
-#define ECO_FLIP_DONE (1<<0)
+#define ECO_GATING_CX_ONLY (1 << 3)
+#define ECO_FLIP_DONE (1 << 0)
#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
-#define RC_OP_FLUSH_ENABLE (1<<0)
-#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
+#define RC_OP_FLUSH_ENABLE (1 << 0)
+#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
-#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
-#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
-#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6)
+#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6)
+#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1)
#define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0)
#define GEN6_BLITTER_LOCK_SHIFT 16
-#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
+#define GEN6_BLITTER_FBC_NOTIFY (1 << 3)
#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050)
#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
-#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
+#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1 << 10)
#define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
@@ -2714,6 +2826,10 @@ enum i915_power_well_id {
#define GEN10_F2_SS_DIS_SHIFT 18
#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
+#define GEN10_MIRROR_FUSE3 _MMIO(0x9118)
+#define GEN10_L3BANK_PAIR_COUNT 4
+#define GEN10_L3BANK_MASK 0x0F
+
#define GEN8_EU_DISABLE0 _MMIO(0x9134)
#define GEN8_EU_DIS0_S0_MASK 0xffffff
#define GEN8_EU_DIS0_S1_SHIFT 24
@@ -2727,7 +2843,7 @@ enum i915_power_well_id {
#define GEN8_EU_DISABLE2 _MMIO(0x913c)
#define GEN8_EU_DIS2_S2_MASK 0xff
-#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4)
+#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4)
#define GEN10_EU_DISABLE3 _MMIO(0x9140)
#define GEN10_EU_DIS_SS_MASK 0xff
@@ -2784,44 +2900,43 @@ enum i915_power_well_id {
(IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
/* These are all the "old" interrupts */
-#define ILK_BSD_USER_INTERRUPT (1<<5)
-
-#define I915_PM_INTERRUPT (1<<31)
-#define I915_ISP_INTERRUPT (1<<22)
-#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
-#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
-#define I915_MIPIC_INTERRUPT (1<<19)
-#define I915_MIPIA_INTERRUPT (1<<18)
-#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
-#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
-#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1<<16)
-#define I915_MASTER_ERROR_INTERRUPT (1<<15)
-#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
-#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1<<14)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
-#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1<<13)
-#define I915_HWB_OOM_INTERRUPT (1<<13)
-#define I915_LPE_PIPE_C_INTERRUPT (1<<12)
-#define I915_SYNC_STATUS_INTERRUPT (1<<12)
-#define I915_MISC_INTERRUPT (1<<11)
-#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
-#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1<<10)
-#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
-#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1<<9)
-#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
-#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1<<8)
-#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
-#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
-#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
-#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
-#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
-#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1<<3)
-#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1<<2)
-#define I915_DEBUG_INTERRUPT (1<<2)
-#define I915_WINVALID_INTERRUPT (1<<1)
-#define I915_USER_INTERRUPT (1<<1)
-#define I915_ASLE_INTERRUPT (1<<0)
-#define I915_BSD_USER_INTERRUPT (1<<25)
+#define ILK_BSD_USER_INTERRUPT (1 << 5)
+
+#define I915_PM_INTERRUPT (1 << 31)
+#define I915_ISP_INTERRUPT (1 << 22)
+#define I915_LPE_PIPE_B_INTERRUPT (1 << 21)
+#define I915_LPE_PIPE_A_INTERRUPT (1 << 20)
+#define I915_MIPIC_INTERRUPT (1 << 19)
+#define I915_MIPIA_INTERRUPT (1 << 18)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 18)
+#define I915_DISPLAY_PORT_INTERRUPT (1 << 17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16)
+#define I915_MASTER_ERROR_INTERRUPT (1 << 15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13)
+#define I915_HWB_OOM_INTERRUPT (1 << 13)
+#define I915_LPE_PIPE_C_INTERRUPT (1 << 12)
+#define I915_SYNC_STATUS_INTERRUPT (1 << 12)
+#define I915_MISC_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1 << 9)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1 << 9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1 << 7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1 << 6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1 << 5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1 << 4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1 << 3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1 << 2)
+#define I915_DEBUG_INTERRUPT (1 << 2)
+#define I915_WINVALID_INTERRUPT (1 << 1)
+#define I915_USER_INTERRUPT (1 << 1)
+#define I915_ASLE_INTERRUPT (1 << 0)
+#define I915_BSD_USER_INTERRUPT (1 << 25)
#define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000)
#define I915_HDMI_LPE_AUDIO_SIZE 0x1000
@@ -2844,19 +2959,19 @@ enum i915_power_well_id {
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
#define GEN7_FF_SCHED_MASK 0x0077070
#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
-#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
-#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
-#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
-#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_TS_SCHED_HS1 (0x5 << 16)
+#define GEN7_FF_TS_SCHED_HS0 (0x3 << 16)
+#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1 << 16)
+#define GEN7_FF_TS_SCHED_HW (0x0 << 16) /* Default */
#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
-#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
-#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
-#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
-#define GEN7_FF_VS_SCHED_HW (0x0<<12)
-#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
-#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
-#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
-#define GEN7_FF_DS_SCHED_HW (0x0<<4)
+#define GEN7_FF_VS_SCHED_HS1 (0x5 << 12)
+#define GEN7_FF_VS_SCHED_HS0 (0x3 << 12)
+#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1 << 12) /* Default */
+#define GEN7_FF_VS_SCHED_HW (0x0 << 12)
+#define GEN7_FF_DS_SCHED_HS1 (0x5 << 4)
+#define GEN7_FF_DS_SCHED_HS0 (0x3 << 4)
+#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1 << 4) /* Default */
+#define GEN7_FF_DS_SCHED_HW (0x0 << 4)
/*
* Framebuffer compression (915+ only)
@@ -2865,51 +2980,51 @@ enum i915_power_well_id {
#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
#define FBC_CONTROL _MMIO(0x3208)
-#define FBC_CTL_EN (1<<31)
-#define FBC_CTL_PERIODIC (1<<30)
+#define FBC_CTL_EN (1 << 31)
+#define FBC_CTL_PERIODIC (1 << 30)
#define FBC_CTL_INTERVAL_SHIFT (16)
-#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
-#define FBC_CTL_C3_IDLE (1<<13)
+#define FBC_CTL_UNCOMPRESSIBLE (1 << 14)
+#define FBC_CTL_C3_IDLE (1 << 13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO_SHIFT (0)
#define FBC_COMMAND _MMIO(0x320c)
-#define FBC_CMD_COMPRESS (1<<0)
+#define FBC_CMD_COMPRESS (1 << 0)
#define FBC_STATUS _MMIO(0x3210)
-#define FBC_STAT_COMPRESSING (1<<31)
-#define FBC_STAT_COMPRESSED (1<<30)
-#define FBC_STAT_MODIFIED (1<<29)
+#define FBC_STAT_COMPRESSING (1 << 31)
+#define FBC_STAT_COMPRESSED (1 << 30)
+#define FBC_STAT_MODIFIED (1 << 29)
#define FBC_STAT_CURRENT_LINE_SHIFT (0)
#define FBC_CONTROL2 _MMIO(0x3214)
-#define FBC_CTL_FENCE_DBL (0<<4)
-#define FBC_CTL_IDLE_IMM (0<<2)
-#define FBC_CTL_IDLE_FULL (1<<2)
-#define FBC_CTL_IDLE_LINE (2<<2)
-#define FBC_CTL_IDLE_DEBUG (3<<2)
-#define FBC_CTL_CPU_FENCE (1<<1)
-#define FBC_CTL_PLANE(plane) ((plane)<<0)
+#define FBC_CTL_FENCE_DBL (0 << 4)
+#define FBC_CTL_IDLE_IMM (0 << 2)
+#define FBC_CTL_IDLE_FULL (1 << 2)
+#define FBC_CTL_IDLE_LINE (2 << 2)
+#define FBC_CTL_IDLE_DEBUG (3 << 2)
+#define FBC_CTL_CPU_FENCE (1 << 1)
+#define FBC_CTL_PLANE(plane) ((plane) << 0)
#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
#define FBC_LL_SIZE (1536)
#define FBC_LLC_READ_CTRL _MMIO(0x9044)
-#define FBC_LLC_FULLY_OPEN (1<<30)
+#define FBC_LLC_FULLY_OPEN (1 << 30)
/* Framebuffer compression for GM45+ */
#define DPFC_CB_BASE _MMIO(0x3200)
#define DPFC_CONTROL _MMIO(0x3208)
-#define DPFC_CTL_EN (1<<31)
-#define DPFC_CTL_PLANE(plane) ((plane)<<30)
-#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
-#define DPFC_CTL_FENCE_EN (1<<29)
-#define IVB_DPFC_CTL_FENCE_EN (1<<28)
-#define DPFC_CTL_PERSISTENT_MODE (1<<25)
-#define DPFC_SR_EN (1<<10)
-#define DPFC_CTL_LIMIT_1X (0<<6)
-#define DPFC_CTL_LIMIT_2X (1<<6)
-#define DPFC_CTL_LIMIT_4X (2<<6)
+#define DPFC_CTL_EN (1 << 31)
+#define DPFC_CTL_PLANE(plane) ((plane) << 30)
+#define IVB_DPFC_CTL_PLANE(plane) ((plane) << 29)
+#define DPFC_CTL_FENCE_EN (1 << 29)
+#define IVB_DPFC_CTL_FENCE_EN (1 << 28)
+#define DPFC_CTL_PERSISTENT_MODE (1 << 25)
+#define DPFC_SR_EN (1 << 10)
+#define DPFC_CTL_LIMIT_1X (0 << 6)
+#define DPFC_CTL_LIMIT_2X (1 << 6)
+#define DPFC_CTL_LIMIT_4X (2 << 6)
#define DPFC_RECOMP_CTL _MMIO(0x320c)
-#define DPFC_RECOMP_STALL_EN (1<<27)
+#define DPFC_RECOMP_STALL_EN (1 << 27)
#define DPFC_RECOMP_STALL_WM_SHIFT (16)
#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
@@ -2922,12 +3037,12 @@ enum i915_power_well_id {
#define DPFC_STATUS2 _MMIO(0x3214)
#define DPFC_FENCE_YOFF _MMIO(0x3218)
#define DPFC_CHICKEN _MMIO(0x3224)
-#define DPFC_HT_MODIFY (1<<31)
+#define DPFC_HT_MODIFY (1 << 31)
/* Framebuffer compression for Ironlake */
#define ILK_DPFC_CB_BASE _MMIO(0x43200)
#define ILK_DPFC_CONTROL _MMIO(0x43208)
-#define FBC_CTL_FALSE_COLOR (1<<10)
+#define FBC_CTL_FALSE_COLOR (1 << 10)
/* The bit 28-8 is reserved */
#define DPFC_RESERVED (0x1FFFFF00)
#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
@@ -2938,15 +3053,15 @@ enum i915_power_well_id {
#define BDW_FBC_COMP_SEG_MASK 0xfff
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
-#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
-#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
+#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
+#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
-#define ILK_FBC_RT_VALID (1<<0)
-#define SNB_FBC_FRONT_BUFFER (1<<1)
+#define ILK_FBC_RT_VALID (1 << 0)
+#define SNB_FBC_FRONT_BUFFER (1 << 1)
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
-#define ILK_FBCQ_DIS (1<<22)
-#define ILK_PABSTRETCH_DIS (1<<21)
+#define ILK_FBCQ_DIS (1 << 22)
+#define ILK_PABSTRETCH_DIS (1 << 21)
/*
@@ -2955,7 +3070,7 @@ enum i915_power_well_id {
* The following two registers are of type GTTMMADR
*/
#define SNB_DPFC_CTL_SA _MMIO(0x100100)
-#define SNB_CPU_FENCE_ENABLE (1<<29)
+#define SNB_CPU_FENCE_ENABLE (1 << 29)
#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
/* Framebuffer compression for Ivybridge */
@@ -2965,8 +3080,8 @@ enum i915_power_well_id {
#define IPS_ENABLE (1 << 31)
#define MSG_FBC_REND_STATE _MMIO(0x50380)
-#define FBC_REND_NUKE (1<<2)
-#define FBC_REND_CACHE_CLEAN (1<<1)
+#define FBC_REND_NUKE (1 << 2)
+#define FBC_REND_CACHE_CLEAN (1 << 1)
/*
* GPIO regs
@@ -2979,6 +3094,10 @@ enum i915_power_well_id {
#define GPIOF _MMIO(0x5024)
#define GPIOG _MMIO(0x5028)
#define GPIOH _MMIO(0x502c)
+#define GPIOJ _MMIO(0x5034)
+#define GPIOK _MMIO(0x5038)
+#define GPIOL _MMIO(0x503C)
+#define GPIOM _MMIO(0x5040)
# define GPIO_CLOCK_DIR_MASK (1 << 0)
# define GPIO_CLOCK_DIR_IN (0 << 1)
# define GPIO_CLOCK_DIR_OUT (1 << 1)
@@ -2995,12 +3114,13 @@ enum i915_power_well_id {
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
-#define GMBUS_AKSV_SELECT (1<<11)
-#define GMBUS_RATE_100KHZ (0<<8)
-#define GMBUS_RATE_50KHZ (1<<8)
-#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
-#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
-#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_AKSV_SELECT (1 << 11)
+#define GMBUS_RATE_100KHZ (0 << 8)
+#define GMBUS_RATE_50KHZ (1 << 8)
+#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
#define GMBUS_PIN_DISABLED 0
#define GMBUS_PIN_SSC 1
#define GMBUS_PIN_VGADDC 2
@@ -3021,36 +3141,37 @@ enum i915_power_well_id {
#define GMBUS_NUM_PINS 13 /* including 0 */
#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
-#define GMBUS_SW_CLR_INT (1<<31)
-#define GMBUS_SW_RDY (1<<30)
-#define GMBUS_ENT (1<<29) /* enable timeout */
-#define GMBUS_CYCLE_NONE (0<<25)
-#define GMBUS_CYCLE_WAIT (1<<25)
-#define GMBUS_CYCLE_INDEX (2<<25)
-#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_SW_CLR_INT (1 << 31)
+#define GMBUS_SW_RDY (1 << 30)
+#define GMBUS_ENT (1 << 29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0 << 25)
+#define GMBUS_CYCLE_WAIT (1 << 25)
+#define GMBUS_CYCLE_INDEX (2 << 25)
+#define GMBUS_CYCLE_STOP (4 << 25)
#define GMBUS_BYTE_COUNT_SHIFT 16
#define GMBUS_BYTE_COUNT_MAX 256U
+#define GEN9_GMBUS_BYTE_COUNT_MAX 511U
#define GMBUS_SLAVE_INDEX_SHIFT 8
#define GMBUS_SLAVE_ADDR_SHIFT 1
-#define GMBUS_SLAVE_READ (1<<0)
-#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS_SLAVE_READ (1 << 0)
+#define GMBUS_SLAVE_WRITE (0 << 0)
#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
-#define GMBUS_INUSE (1<<15)
-#define GMBUS_HW_WAIT_PHASE (1<<14)
-#define GMBUS_STALL_TIMEOUT (1<<13)
-#define GMBUS_INT (1<<12)
-#define GMBUS_HW_RDY (1<<11)
-#define GMBUS_SATOER (1<<10)
-#define GMBUS_ACTIVE (1<<9)
+#define GMBUS_INUSE (1 << 15)
+#define GMBUS_HW_WAIT_PHASE (1 << 14)
+#define GMBUS_STALL_TIMEOUT (1 << 13)
+#define GMBUS_INT (1 << 12)
+#define GMBUS_HW_RDY (1 << 11)
+#define GMBUS_SATOER (1 << 10)
+#define GMBUS_ACTIVE (1 << 9)
#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
-#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
-#define GMBUS_NAK_EN (1<<3)
-#define GMBUS_IDLE_EN (1<<2)
-#define GMBUS_HW_WAIT_EN (1<<1)
-#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
+#define GMBUS_NAK_EN (1 << 3)
+#define GMBUS_IDLE_EN (1 << 2)
+#define GMBUS_HW_WAIT_EN (1 << 1)
+#define GMBUS_HW_RDY_EN (1 << 0)
#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
-#define GMBUS_2BYTE_INDEX_EN (1<<31)
+#define GMBUS_2BYTE_INDEX_EN (1 << 31)
/*
* Clock control & power management
@@ -3088,10 +3209,10 @@ enum i915_power_well_id {
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
-#define DPLL_LOCK_VLV (1<<15)
-#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
-#define DPLL_INTEGRATED_REF_CLK_VLV (1<<13)
-#define DPLL_SSC_REF_CLK_CHV (1<<13)
+#define DPLL_LOCK_VLV (1 << 15)
+#define DPLL_INTEGRATED_CRI_CLK_VLV (1 << 14)
+#define DPLL_INTEGRATED_REF_CLK_VLV (1 << 13)
+#define DPLL_SSC_REF_CLK_CHV (1 << 13)
#define DPLL_PORTC_READY_MASK (0xf << 4)
#define DPLL_PORTB_READY_MASK (0xf)
@@ -3101,20 +3222,20 @@ enum i915_power_well_id {
#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
-#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27))
+#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2 * (phy) + (ch) + 27))
#define PHY_LDO_DELAY_0NS 0x0
#define PHY_LDO_DELAY_200NS 0x1
#define PHY_LDO_DELAY_600NS 0x2
-#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23))
-#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8*(phy)+4*(ch)+11))
+#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2 * (phy) + 23))
+#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8 * (phy) + 4 * (ch) + 11))
#define PHY_CH_SU_PSR 0x1
#define PHY_CH_DEEP_PSR 0x7
-#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
+#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6 * (phy) + 3 * (ch) + 2))
#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
-#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
-#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch))))
-#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline))))
+#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
+#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6 - (6 * (phy) + 3 * (ch))))
+#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -3135,7 +3256,7 @@ enum i915_power_well_id {
/* Ironlake */
# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
-# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x) - 1) << 9)
# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
@@ -3224,10 +3345,10 @@ enum i915_power_well_id {
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
#define D_STATE _MMIO(0x6104)
-#define DSTATE_GFX_RESET_I830 (1<<6)
-#define DSTATE_PLL_D3_OFF (1<<3)
-#define DSTATE_GFX_CLOCK_GATING (1<<1)
-#define DSTATE_DOT_CLOCK_GATING (1<<0)
+#define DSTATE_GFX_RESET_I830 (1 << 6)
+#define DSTATE_PLL_D3_OFF (1 << 3)
+#define DSTATE_GFX_CLOCK_GATING (1 << 1)
+#define DSTATE_DOT_CLOCK_GATING (1 << 0)
#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
@@ -3344,7 +3465,7 @@ enum i915_power_well_id {
#define DEUC _MMIO(0x6214) /* CRL only */
#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
-#define FW_CSPWRDWNEN (1<<15)
+#define FW_CSPWRDWNEN (1 << 15)
#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
@@ -3469,7 +3590,7 @@ enum i915_power_well_id {
#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
#define TSC1 _MMIO(0x11001)
-#define TSE (1<<0)
+#define TSE (1 << 0)
#define TR1 _MMIO(0x11006)
#define TSFS _MMIO(0x11020)
#define TSFS_SLOPE_MASK 0x0000ff00
@@ -3515,23 +3636,23 @@ enum i915_power_well_id {
#define MEMCTL_CMD_CHVID 3
#define MEMCTL_CMD_VMMOFF 4
#define MEMCTL_CMD_VMMON 5
-#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
+#define MEMCTL_CMD_STS (1 << 12) /* write 1 triggers command, clears
when command complete */
#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
#define MEMCTL_FREQ_SHIFT 8
-#define MEMCTL_SFCAVM (1<<7)
+#define MEMCTL_SFCAVM (1 << 7)
#define MEMCTL_TGT_VID_MASK 0x007f
#define MEMIHYST _MMIO(0x1117c)
#define MEMINTREN _MMIO(0x11180) /* 16 bits */
-#define MEMINT_RSEXIT_EN (1<<8)
-#define MEMINT_CX_SUPR_EN (1<<7)
-#define MEMINT_CONT_BUSY_EN (1<<6)
-#define MEMINT_AVG_BUSY_EN (1<<5)
-#define MEMINT_EVAL_CHG_EN (1<<4)
-#define MEMINT_MON_IDLE_EN (1<<3)
-#define MEMINT_UP_EVAL_EN (1<<2)
-#define MEMINT_DOWN_EVAL_EN (1<<1)
-#define MEMINT_SW_CMD_EN (1<<0)
+#define MEMINT_RSEXIT_EN (1 << 8)
+#define MEMINT_CX_SUPR_EN (1 << 7)
+#define MEMINT_CONT_BUSY_EN (1 << 6)
+#define MEMINT_AVG_BUSY_EN (1 << 5)
+#define MEMINT_EVAL_CHG_EN (1 << 4)
+#define MEMINT_MON_IDLE_EN (1 << 3)
+#define MEMINT_UP_EVAL_EN (1 << 2)
+#define MEMINT_DOWN_EVAL_EN (1 << 1)
+#define MEMINT_SW_CMD_EN (1 << 0)
#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */
#define MEM_RSEXIT_MASK 0xc000
#define MEM_RSEXIT_SHIFT 14
@@ -3553,26 +3674,26 @@ enum i915_power_well_id {
#define MEM_INT_STEER_SMI 2
#define MEM_INT_STEER_SCI 3
#define MEMINTRSTS _MMIO(0x11184)
-#define MEMINT_RSEXIT (1<<7)
-#define MEMINT_CONT_BUSY (1<<6)
-#define MEMINT_AVG_BUSY (1<<5)
-#define MEMINT_EVAL_CHG (1<<4)
-#define MEMINT_MON_IDLE (1<<3)
-#define MEMINT_UP_EVAL (1<<2)
-#define MEMINT_DOWN_EVAL (1<<1)
-#define MEMINT_SW_CMD (1<<0)
+#define MEMINT_RSEXIT (1 << 7)
+#define MEMINT_CONT_BUSY (1 << 6)
+#define MEMINT_AVG_BUSY (1 << 5)
+#define MEMINT_EVAL_CHG (1 << 4)
+#define MEMINT_MON_IDLE (1 << 3)
+#define MEMINT_UP_EVAL (1 << 2)
+#define MEMINT_DOWN_EVAL (1 << 1)
+#define MEMINT_SW_CMD (1 << 0)
#define MEMMODECTL _MMIO(0x11190)
-#define MEMMODE_BOOST_EN (1<<31)
+#define MEMMODE_BOOST_EN (1 << 31)
#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
#define MEMMODE_BOOST_FREQ_SHIFT 24
#define MEMMODE_IDLE_MODE_MASK 0x00030000
#define MEMMODE_IDLE_MODE_SHIFT 16
#define MEMMODE_IDLE_MODE_EVAL 0
#define MEMMODE_IDLE_MODE_CONT 1
-#define MEMMODE_HWIDLE_EN (1<<15)
-#define MEMMODE_SWMODE_EN (1<<14)
-#define MEMMODE_RCLK_GATE (1<<13)
-#define MEMMODE_HW_UPDATE (1<<12)
+#define MEMMODE_HWIDLE_EN (1 << 15)
+#define MEMMODE_SWMODE_EN (1 << 14)
+#define MEMMODE_RCLK_GATE (1 << 13)
+#define MEMMODE_HW_UPDATE (1 << 12)
#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
#define MEMMODE_FSTART_SHIFT 8
#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
@@ -3586,8 +3707,8 @@ enum i915_power_well_id {
#define SWMEMCMD_TARVID (3 << 13)
#define SWMEMCMD_VRM_OFF (4 << 13)
#define SWMEMCMD_VRM_ON (5 << 13)
-#define CMDSTS (1<<12)
-#define SFCAVM (1<<11)
+#define CMDSTS (1 << 12)
+#define SFCAVM (1 << 11)
#define SWFREQ_MASK 0x0380 /* P0-7 */
#define SWFREQ_SHIFT 7
#define TARVID_MASK 0x001f
@@ -3596,49 +3717,49 @@ enum i915_power_well_id {
#define RCUPEI _MMIO(0x111b0)
#define RCDNEI _MMIO(0x111b4)
#define RSTDBYCTL _MMIO(0x111b8)
-#define RS1EN (1<<31)
-#define RS2EN (1<<30)
-#define RS3EN (1<<29)
-#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */
-#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */
-#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */
-#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */
-#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */
-#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */
-#define RSX_STATUS_MASK (7<<20)
-#define RSX_STATUS_ON (0<<20)
-#define RSX_STATUS_RC1 (1<<20)
-#define RSX_STATUS_RC1E (2<<20)
-#define RSX_STATUS_RS1 (3<<20)
-#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */
-#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */
-#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */
-#define RSX_STATUS_RSVD2 (7<<20)
-#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */
-#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
-#define JRSC (1<<17) /* rsx coupled to cpu c-state */
-#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */
-#define RS1CONTSAV_MASK (3<<14)
-#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */
-#define RS1CONTSAV_RSVD (1<<14)
-#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */
-#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */
-#define NORMSLEXLAT_MASK (3<<12)
-#define SLOW_RS123 (0<<12)
-#define SLOW_RS23 (1<<12)
-#define SLOW_RS3 (2<<12)
-#define NORMAL_RS123 (3<<12)
-#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */
-#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
-#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */
-#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */
-#define RS_CSTATE_MASK (3<<4)
-#define RS_CSTATE_C367_RS1 (0<<4)
-#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
-#define RS_CSTATE_RSVD (2<<4)
-#define RS_CSTATE_C367_RS2 (3<<4)
-#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
-#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
+#define RS1EN (1 << 31)
+#define RS2EN (1 << 30)
+#define RS3EN (1 << 29)
+#define D3RS3EN (1 << 28) /* Display D3 imlies RS3 */
+#define SWPROMORSX (1 << 27) /* RSx promotion timers ignored */
+#define RCWAKERW (1 << 26) /* Resetwarn from PCH causes wakeup */
+#define DPRSLPVREN (1 << 25) /* Fast voltage ramp enable */
+#define GFXTGHYST (1 << 24) /* Hysteresis to allow trunk gating */
+#define RCX_SW_EXIT (1 << 23) /* Leave RSx and prevent re-entry */
+#define RSX_STATUS_MASK (7 << 20)
+#define RSX_STATUS_ON (0 << 20)
+#define RSX_STATUS_RC1 (1 << 20)
+#define RSX_STATUS_RC1E (2 << 20)
+#define RSX_STATUS_RS1 (3 << 20)
+#define RSX_STATUS_RS2 (4 << 20) /* aka rc6 */
+#define RSX_STATUS_RSVD (5 << 20) /* deep rc6 unsupported on ilk */
+#define RSX_STATUS_RS3 (6 << 20) /* rs3 unsupported on ilk */
+#define RSX_STATUS_RSVD2 (7 << 20)
+#define UWRCRSXE (1 << 19) /* wake counter limit prevents rsx */
+#define RSCRP (1 << 18) /* rs requests control on rs1/2 reqs */
+#define JRSC (1 << 17) /* rsx coupled to cpu c-state */
+#define RS2INC0 (1 << 16) /* allow rs2 in cpu c0 */
+#define RS1CONTSAV_MASK (3 << 14)
+#define RS1CONTSAV_NO_RS1 (0 << 14) /* rs1 doesn't save/restore context */
+#define RS1CONTSAV_RSVD (1 << 14)
+#define RS1CONTSAV_SAVE_RS1 (2 << 14) /* rs1 saves context */
+#define RS1CONTSAV_FULL_RS1 (3 << 14) /* rs1 saves and restores context */
+#define NORMSLEXLAT_MASK (3 << 12)
+#define SLOW_RS123 (0 << 12)
+#define SLOW_RS23 (1 << 12)
+#define SLOW_RS3 (2 << 12)
+#define NORMAL_RS123 (3 << 12)
+#define RCMODE_TIMEOUT (1 << 11) /* 0 is eval interval method */
+#define IMPROMOEN (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define RCENTSYNC (1 << 9) /* rs coupled to cpu c-state (3/6/7) */
+#define STATELOCK (1 << 7) /* locked to rs_cstate if 0 */
+#define RS_CSTATE_MASK (3 << 4)
+#define RS_CSTATE_C367_RS1 (0 << 4)
+#define RS_CSTATE_C36_RS1_C7_RS2 (1 << 4)
+#define RS_CSTATE_RSVD (2 << 4)
+#define RS_CSTATE_C367_RS2 (3 << 4)
+#define REDSAVES (1 << 3) /* no context save if was idle during rs0 */
+#define REDRESTORES (1 << 2) /* no restore if was idle during rs0 */
#define VIDCTL _MMIO(0x111c0)
#define VIDSTS _MMIO(0x111c8)
#define VIDSTART _MMIO(0x111cc) /* 8 bits */
@@ -3647,7 +3768,7 @@ enum i915_power_well_id {
#define MEMSTAT_VID_SHIFT 8
#define MEMSTAT_PSTATE_MASK 0x00f8
#define MEMSTAT_PSTATE_SHIFT 3
-#define MEMSTAT_MON_ACTV (1<<2)
+#define MEMSTAT_MON_ACTV (1 << 2)
#define MEMSTAT_SRC_CTL_MASK 0x0003
#define MEMSTAT_SRC_CTL_CORE 0
#define MEMSTAT_SRC_CTL_TRB 1
@@ -3656,7 +3777,7 @@ enum i915_power_well_id {
#define RCPREVBSYTUPAVG _MMIO(0x113b8)
#define RCPREVBSYTDNAVG _MMIO(0x113bc)
#define PMMISC _MMIO(0x11214)
-#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
+#define MCPPCE_EN (1 << 0) /* enable PM_MSG from PCH->MPC */
#define SDEW _MMIO(0x1124c)
#define CSIEW0 _MMIO(0x11250)
#define CSIEW1 _MMIO(0x11254)
@@ -3673,8 +3794,8 @@ enum i915_power_well_id {
#define RPPREVBSYTUPAVG _MMIO(0x113b8)
#define RPPREVBSYTDNAVG _MMIO(0x113bc)
#define ECR _MMIO(0x11600)
-#define ECR_GPFE (1<<31)
-#define ECR_IMONE (1<<30)
+#define ECR_GPFE (1 << 31)
+#define ECR_IMONE (1 << 30)
#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
#define OGW0 _MMIO(0x11608)
#define OGW1 _MMIO(0x1160c)
@@ -3781,11 +3902,11 @@ enum {
FAULT_AND_CONTINUE /* Unsupported */
};
-#define GEN8_CTX_VALID (1<<0)
-#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
-#define GEN8_CTX_FORCE_RESTORE (1<<2)
-#define GEN8_CTX_L3LLC_COHERENT (1<<5)
-#define GEN8_CTX_PRIVILEGE (1<<8)
+#define GEN8_CTX_VALID (1 << 0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
+#define GEN8_CTX_FORCE_RESTORE (1 << 2)
+#define GEN8_CTX_L3LLC_COHERENT (1 << 5)
+#define GEN8_CTX_PRIVILEGE (1 << 8)
#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
#define GEN8_CTX_ID_SHIFT 32
@@ -3807,7 +3928,7 @@ enum {
#define OVADD _MMIO(0x30000)
#define DOVSTA _MMIO(0x30008)
-#define OC_BUF (0x3<<20)
+#define OC_BUF (0x3 << 20)
#define OGAMC5 _MMIO(0x30010)
#define OGAMC4 _MMIO(0x30014)
#define OGAMC3 _MMIO(0x30018)
@@ -3975,64 +4096,65 @@ enum {
/* VLV eDP PSR registers */
#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
-#define VLV_EDP_PSR_ENABLE (1<<0)
-#define VLV_EDP_PSR_RESET (1<<1)
-#define VLV_EDP_PSR_MODE_MASK (7<<2)
-#define VLV_EDP_PSR_MODE_HW_TIMER (1<<3)
-#define VLV_EDP_PSR_MODE_SW_TIMER (1<<2)
-#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1<<7)
-#define VLV_EDP_PSR_ACTIVE_ENTRY (1<<8)
-#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1<<9)
-#define VLV_EDP_PSR_DBL_FRAME (1<<10)
-#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
+#define VLV_EDP_PSR_ENABLE (1 << 0)
+#define VLV_EDP_PSR_RESET (1 << 1)
+#define VLV_EDP_PSR_MODE_MASK (7 << 2)
+#define VLV_EDP_PSR_MODE_HW_TIMER (1 << 3)
+#define VLV_EDP_PSR_MODE_SW_TIMER (1 << 2)
+#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1 << 7)
+#define VLV_EDP_PSR_ACTIVE_ENTRY (1 << 8)
+#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1 << 9)
+#define VLV_EDP_PSR_DBL_FRAME (1 << 10)
+#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff << 16)
#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
-#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
-#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
-#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
+#define VLV_EDP_PSR_SDP_FREQ_MASK (3 << 30)
+#define VLV_EDP_PSR_SDP_FREQ_ONCE (1 << 31)
+#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1 << 30)
#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
-#define VLV_EDP_PSR_LAST_STATE_MASK (7<<3)
+#define VLV_EDP_PSR_LAST_STATE_MASK (7 << 3)
#define VLV_EDP_PSR_CURR_STATE_MASK 7
-#define VLV_EDP_PSR_DISABLED (0<<0)
-#define VLV_EDP_PSR_INACTIVE (1<<0)
-#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2<<0)
-#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3<<0)
-#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
-#define VLV_EDP_PSR_EXIT (5<<0)
-#define VLV_EDP_PSR_IN_TRANS (1<<7)
+#define VLV_EDP_PSR_DISABLED (0 << 0)
+#define VLV_EDP_PSR_INACTIVE (1 << 0)
+#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2 << 0)
+#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3 << 0)
+#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4 << 0)
+#define VLV_EDP_PSR_EXIT (5 << 0)
+#define VLV_EDP_PSR_IN_TRANS (1 << 7)
#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
/* HSW+ eDP PSR registers */
#define HSW_EDP_PSR_BASE 0x64800
#define BDW_EDP_PSR_BASE 0x6f800
#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
-#define EDP_PSR_ENABLE (1<<31)
-#define BDW_PSR_SINGLE_FRAME (1<<30)
-#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1<<29) /* SW can't modify */
-#define EDP_PSR_LINK_STANDBY (1<<27)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25)
+#define EDP_PSR_ENABLE (1 << 31)
+#define BDW_PSR_SINGLE_FRAME (1 << 30)
+#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
+#define EDP_PSR_LINK_STANDBY (1 << 27)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3 << 25)
#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
-#define EDP_PSR_SKIP_AUX_EXIT (1<<12)
-#define EDP_PSR_TP1_TP2_SEL (0<<11)
-#define EDP_PSR_TP1_TP3_SEL (1<<11)
-#define EDP_PSR_TP2_TP3_TIME_500us (0<<8)
-#define EDP_PSR_TP2_TP3_TIME_100us (1<<8)
-#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8)
-#define EDP_PSR_TP2_TP3_TIME_0us (3<<8)
-#define EDP_PSR_TP1_TIME_500us (0<<4)
-#define EDP_PSR_TP1_TIME_100us (1<<4)
-#define EDP_PSR_TP1_TIME_2500us (2<<4)
-#define EDP_PSR_TP1_TIME_0us (3<<4)
+#define EDP_PSR_SKIP_AUX_EXIT (1 << 12)
+#define EDP_PSR_TP1_TP2_SEL (0 << 11)
+#define EDP_PSR_TP1_TP3_SEL (1 << 11)
+#define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */
+#define EDP_PSR_TP2_TP3_TIME_500us (0 << 8)
+#define EDP_PSR_TP2_TP3_TIME_100us (1 << 8)
+#define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8)
+#define EDP_PSR_TP2_TP3_TIME_0us (3 << 8)
+#define EDP_PSR_TP1_TIME_500us (0 << 4)
+#define EDP_PSR_TP1_TIME_100us (1 << 4)
+#define EDP_PSR_TP1_TIME_2500us (2 << 4)
+#define EDP_PSR_TP1_TIME_0us (3 << 4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
/* Bspec claims those aren't shifted but stay at 0x64800 */
@@ -4052,55 +4174,56 @@ enum {
#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
-#define EDP_PSR_STATUS_STATE_MASK (7<<29)
-#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
-#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
-#define EDP_PSR_STATUS_STATE_SRDENT (2<<29)
-#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29)
-#define EDP_PSR_STATUS_STATE_BUFON (4<<29)
-#define EDP_PSR_STATUS_STATE_AUXACK (5<<29)
-#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29)
-#define EDP_PSR_STATUS_LINK_MASK (3<<26)
-#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26)
-#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26)
-#define EDP_PSR_STATUS_LINK_STANDBY (2<<26)
+#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
+#define EDP_PSR_STATUS_STATE_SHIFT 29
+#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
+#define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29)
+#define EDP_PSR_STATUS_STATE_SRDENT (2 << 29)
+#define EDP_PSR_STATUS_STATE_BUFOFF (3 << 29)
+#define EDP_PSR_STATUS_STATE_BUFON (4 << 29)
+#define EDP_PSR_STATUS_STATE_AUXACK (5 << 29)
+#define EDP_PSR_STATUS_STATE_SRDOFFACK (6 << 29)
+#define EDP_PSR_STATUS_LINK_MASK (3 << 26)
+#define EDP_PSR_STATUS_LINK_FULL_OFF (0 << 26)
+#define EDP_PSR_STATUS_LINK_FULL_ON (1 << 26)
+#define EDP_PSR_STATUS_LINK_STANDBY (2 << 26)
#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
#define EDP_PSR_STATUS_COUNT_SHIFT 16
#define EDP_PSR_STATUS_COUNT_MASK 0xf
-#define EDP_PSR_STATUS_AUX_ERROR (1<<15)
-#define EDP_PSR_STATUS_AUX_SENDING (1<<12)
-#define EDP_PSR_STATUS_SENDING_IDLE (1<<9)
-#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8)
-#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
+#define EDP_PSR_STATUS_AUX_ERROR (1 << 15)
+#define EDP_PSR_STATUS_AUX_SENDING (1 << 12)
+#define EDP_PSR_STATUS_SENDING_IDLE (1 << 9)
+#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1 << 8)
+#define EDP_PSR_STATUS_SENDING_TP1 (1 << 4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
-#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28)
-#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
-#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
-#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
-#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1<<16)
-#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15) /* SKL+ */
+#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
+#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
+#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
+#define EDP_PSR_DEBUG_MASK_HPD (1 << 25)
+#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16)
+#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
#define EDP_PSR2_CTL _MMIO(0x6f900)
-#define EDP_PSR2_ENABLE (1<<31)
-#define EDP_SU_TRACK_ENABLE (1<<30)
-#define EDP_Y_COORDINATE_VALID (1<<26) /* GLK and CNL+ */
-#define EDP_Y_COORDINATE_ENABLE (1<<25) /* GLK and CNL+ */
-#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
-#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20)
-#define EDP_PSR2_TP2_TIME_500 (0<<8)
-#define EDP_PSR2_TP2_TIME_100 (1<<8)
-#define EDP_PSR2_TP2_TIME_2500 (2<<8)
-#define EDP_PSR2_TP2_TIME_50 (3<<8)
-#define EDP_PSR2_TP2_TIME_MASK (3<<8)
+#define EDP_PSR2_ENABLE (1 << 31)
+#define EDP_SU_TRACK_ENABLE (1 << 30)
+#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
+#define EDP_Y_COORDINATE_ENABLE (1 << 25) /* GLK and CNL+ */
+#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
+#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
+#define EDP_PSR2_TP2_TIME_500us (0 << 8)
+#define EDP_PSR2_TP2_TIME_100us (1 << 8)
+#define EDP_PSR2_TP2_TIME_2500us (2 << 8)
+#define EDP_PSR2_TP2_TIME_50us (3 << 8)
+#define EDP_PSR2_TP2_TIME_MASK (3 << 8)
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
-#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
-#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4)
+#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4)
+#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4)
#define EDP_PSR2_IDLE_FRAME_MASK 0xf
#define EDP_PSR2_IDLE_FRAME_SHIFT 0
@@ -4128,7 +4251,7 @@ enum {
#define PSR_EVENT_PSR_DISABLE (1 << 0)
#define EDP_PSR2_STATUS _MMIO(0x6f940)
-#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
+#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
/* VGA port control */
@@ -4136,47 +4259,48 @@ enum {
#define PCH_ADPA _MMIO(0xe1100)
#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
-#define ADPA_DAC_ENABLE (1<<31)
+#define ADPA_DAC_ENABLE (1 << 31)
#define ADPA_DAC_DISABLE 0
-#define ADPA_PIPE_SELECT_MASK (1<<30)
-#define ADPA_PIPE_A_SELECT 0
-#define ADPA_PIPE_B_SELECT (1<<30)
-#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
-/* CPT uses bits 29:30 for pch transcoder select */
+#define ADPA_PIPE_SEL_SHIFT 30
+#define ADPA_PIPE_SEL_MASK (1 << 30)
+#define ADPA_PIPE_SEL(pipe) ((pipe) << 30)
+#define ADPA_PIPE_SEL_SHIFT_CPT 29
+#define ADPA_PIPE_SEL_MASK_CPT (3 << 29)
+#define ADPA_PIPE_SEL_CPT(pipe) ((pipe) << 29)
#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0 << 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3 << 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3 << 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2 << 24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1 << 23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0 << 22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1 << 22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0 << 21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1 << 21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0 << 20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1 << 20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0 << 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1 << 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2 << 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3 << 18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0 << 17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1 << 17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1 << 16)
+#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
#define ADPA_SETS_HVPOLARITY 0
-#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_VSYNC_CNTL_DISABLE (1 << 10)
#define ADPA_VSYNC_CNTL_ENABLE 0
-#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_HSYNC_CNTL_DISABLE (1 << 11)
#define ADPA_HSYNC_CNTL_ENABLE 0
-#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
#define ADPA_VSYNC_ACTIVE_LOW 0
-#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
#define ADPA_HSYNC_ACTIVE_LOW 0
-#define ADPA_DPMS_MASK (~(3<<10))
-#define ADPA_DPMS_ON (0<<10)
-#define ADPA_DPMS_SUSPEND (1<<10)
-#define ADPA_DPMS_STANDBY (2<<10)
-#define ADPA_DPMS_OFF (3<<10)
+#define ADPA_DPMS_MASK (~(3 << 10))
+#define ADPA_DPMS_ON (0 << 10)
+#define ADPA_DPMS_SUSPEND (1 << 10)
+#define ADPA_DPMS_STANDBY (2 << 10)
+#define ADPA_DPMS_OFF (3 << 10)
/* Hotplug control (945+ only) */
@@ -4301,9 +4425,9 @@ enum {
/* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31)
-#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
+#define SDVO_PIPE_SEL_SHIFT 30
#define SDVO_PIPE_SEL_MASK (1 << 30)
-#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
#define SDVO_STALL_SELECT (1 << 29)
#define SDVO_INTERRUPT_ENABLE (1 << 26)
/*
@@ -4343,12 +4467,14 @@ enum {
#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
/* Gen 6 (CPT) SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
+#define SDVO_PIPE_SEL_SHIFT_CPT 29
#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
/* CHV SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
+#define SDVO_PIPE_SEL_SHIFT_CHV 24
#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
+#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
/* DVO port control */
@@ -4359,7 +4485,9 @@ enum {
#define _DVOC 0x61160
#define DVOC _MMIO(_DVOC)
#define DVO_ENABLE (1 << 31)
-#define DVO_PIPE_B_SELECT (1 << 30)
+#define DVO_PIPE_SEL_SHIFT 30
+#define DVO_PIPE_SEL_MASK (1 << 30)
+#define DVO_PIPE_SEL(pipe) ((pipe) << 30)
#define DVO_PIPE_STALL_UNUSED (0 << 28)
#define DVO_PIPE_STALL (1 << 28)
#define DVO_PIPE_STALL_TV (2 << 28)
@@ -4381,7 +4509,7 @@ enum {
#define DVO_BLANK_ACTIVE_HIGH (1 << 2)
#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
-#define DVO_PRESERVE_MASK (0x7<<24)
+#define DVO_PRESERVE_MASK (0x7 << 24)
#define DVOA_SRCDIM _MMIO(0x61124)
#define DVOB_SRCDIM _MMIO(0x61144)
#define DVOC_SRCDIM _MMIO(0x61164)
@@ -4396,9 +4524,12 @@ enum {
*/
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
-#define LVDS_PIPEB_SELECT (1 << 30)
-#define LVDS_PIPE_MASK (1 << 30)
-#define LVDS_PIPE(pipe) ((pipe) << 30)
+#define LVDS_PIPE_SEL_SHIFT 30
+#define LVDS_PIPE_SEL_MASK (1 << 30)
+#define LVDS_PIPE_SEL(pipe) ((pipe) << 30)
+#define LVDS_PIPE_SEL_SHIFT_CPT 29
+#define LVDS_PIPE_SEL_MASK_CPT (3 << 29)
+#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
/* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -4471,6 +4602,16 @@ enum {
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+#define DRM_DIP_ENABLE (1 << 28)
+#define PSR_VSC_BIT_7_SET (1 << 27)
+#define VSC_SELECT_MASK (0x3 << 26)
+#define VSC_SELECT_SHIFT 26
+#define VSC_DIP_HW_HEA_DATA (0 << 26)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 26)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 26)
+#define VSC_DIP_SW_HEA_DATA (3 << 26)
+#define VDIP_ENABLE_PPS (1 << 24)
+
/* Panel power sequencing */
#define PPS_BASE 0x61200
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
@@ -4695,7 +4836,9 @@ enum {
/* Enables the TV encoder */
# define TV_ENC_ENABLE (1 << 31)
/* Sources the TV encoder input from pipe B instead of A. */
-# define TV_ENC_PIPEB_SELECT (1 << 30)
+# define TV_ENC_PIPE_SEL_SHIFT 30
+# define TV_ENC_PIPE_SEL_MASK (1 << 30)
+# define TV_ENC_PIPE_SEL(pipe) ((pipe) << 30)
/* Outputs composite video (DAC A only) */
# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
/* Outputs SVideo video (DAC B/C) */
@@ -5177,10 +5320,15 @@ enum {
#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
#define DP_PORT_EN (1 << 31)
-#define DP_PIPEB_SELECT (1 << 30)
-#define DP_PIPE_MASK (1 << 30)
-#define DP_PIPE_SELECT_CHV(pipe) ((pipe) << 16)
-#define DP_PIPE_MASK_CHV (3 << 16)
+#define DP_PIPE_SEL_SHIFT 30
+#define DP_PIPE_SEL_MASK (1 << 30)
+#define DP_PIPE_SEL(pipe) ((pipe) << 30)
+#define DP_PIPE_SEL_SHIFT_IVB 29
+#define DP_PIPE_SEL_MASK_IVB (3 << 29)
+#define DP_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define DP_PIPE_SEL_SHIFT_CHV 16
+#define DP_PIPE_SEL_MASK_CHV (3 << 16)
+#define DP_PIPE_SEL_CHV(pipe) ((pipe) << 16)
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -5287,6 +5435,13 @@ enum {
#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320)
#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324)
+#define _DPE_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64410)
+#define _DPE_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64414)
+#define _DPE_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64418)
+#define _DPE_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6441c)
+#define _DPE_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64420)
+#define _DPE_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64424)
+
#define _DPF_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64510)
#define _DPF_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64514)
#define _DPF_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64518)
@@ -5342,7 +5497,7 @@ enum {
#define _PIPEB_DATA_M_G4X 0x71050
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
-#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
+#define TU_SIZE(x) (((x) - 1) << 25) /* default size 64 */
#define TU_SIZE_SHIFT 25
#define TU_SIZE_MASK (0x3f << 25)
@@ -5384,18 +5539,18 @@ enum {
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
-#define PIPECONF_ENABLE (1<<31)
+#define PIPECONF_ENABLE (1 << 31)
#define PIPECONF_DISABLE 0
-#define PIPECONF_DOUBLE_WIDE (1<<30)
-#define I965_PIPECONF_ACTIVE (1<<30)
-#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */
-#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
+#define PIPECONF_DOUBLE_WIDE (1 << 30)
+#define I965_PIPECONF_ACTIVE (1 << 30)
+#define PIPECONF_DSI_PLL_LOCKED (1 << 29) /* vlv & pipe A only */
+#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27)
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
-#define PIPECONF_PIPE_LOCKED (1<<25)
+#define PIPECONF_PIPE_LOCKED (1 << 25)
#define PIPECONF_PALETTE 0
-#define PIPECONF_GAMMA (1<<24)
-#define PIPECONF_FORCE_BORDER (1<<25)
+#define PIPECONF_GAMMA (1 << 24)
+#define PIPECONF_FORCE_BORDER (1 << 25)
#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
@@ -5414,67 +5569,67 @@ enum {
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
-#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
+#define PIPECONF_CXSR_DOWNCLOCK (1 << 16)
#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
-#define PIPECONF_8BPC (0<<5)
-#define PIPECONF_10BPC (1<<5)
-#define PIPECONF_6BPC (2<<5)
-#define PIPECONF_12BPC (3<<5)
-#define PIPECONF_DITHER_EN (1<<4)
+#define PIPECONF_8BPC (0 << 5)
+#define PIPECONF_10BPC (1 << 5)
+#define PIPECONF_6BPC (2 << 5)
+#define PIPECONF_12BPC (3 << 5)
+#define PIPECONF_DITHER_EN (1 << 4)
#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
-#define PIPECONF_DITHER_TYPE_SP (0<<2)
-#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
-#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
-#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
+#define PIPECONF_DITHER_TYPE_SP (0 << 2)
+#define PIPECONF_DITHER_TYPE_ST1 (1 << 2)
+#define PIPECONF_DITHER_TYPE_ST2 (2 << 2)
+#define PIPECONF_DITHER_TYPE_TEMP (3 << 2)
#define _PIPEASTAT 0x70024
-#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
-#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
-#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
-#define PIPE_CRC_DONE_ENABLE (1UL<<28)
-#define PERF_COUNTER2_INTERRUPT_EN (1UL<<27)
-#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
-#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
-#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
-#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
-#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
-#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
-#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
-#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
-#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
-#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
-#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19)
-#define PERF_COUNTER_INTERRUPT_EN (1UL<<19)
-#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL<<17)
-#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
-#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
-#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15)
-#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
-#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
-#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
-#define PERF_COUNTER2_INTERRUPT_STATUS (1UL<<11)
-#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
-#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
-#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
-#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
-#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
-#define PIPE_DPST_EVENT_STATUS (1UL<<7)
-#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
-#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
-#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
-#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
-#define PIPE_B_PSR_STATUS_VLV (1UL<<3)
-#define PERF_COUNTER_INTERRUPT_STATUS (1UL<<3)
-#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL<<1)
-#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
-#define PIPE_HBLANK_INT_STATUS (1UL<<0)
-#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
+#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
+#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
+#define PIPE_CRC_ERROR_ENABLE (1UL << 29)
+#define PIPE_CRC_DONE_ENABLE (1UL << 28)
+#define PERF_COUNTER2_INTERRUPT_EN (1UL << 27)
+#define PIPE_GMBUS_EVENT_ENABLE (1UL << 27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL << 26)
+#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL << 26)
+#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL << 25)
+#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL << 24)
+#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
+#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL << 21)
+#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL << 20)
+#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL << 19)
+#define PERF_COUNTER_INTERRUPT_EN (1UL << 19)
+#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL << 18) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL << 17)
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL << 16)
+#define PIPE_OVERLAY_UPDATED_ENABLE (1UL << 16)
+#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL << 15)
+#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL << 14)
+#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL << 13)
+#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL << 12)
+#define PERF_COUNTER2_INTERRUPT_STATUS (1UL << 11)
+#define PIPE_GMBUS_INTERRUPT_STATUS (1UL << 11)
+#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL << 10)
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
+#define PIPE_VSYNC_INTERRUPT_STATUS (1UL << 9)
+#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL << 8)
+#define PIPE_DPST_EVENT_STATUS (1UL << 7)
+#define PIPE_A_PSR_STATUS_VLV (1UL << 6)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL << 6)
+#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL << 5)
+#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL << 4)
+#define PIPE_B_PSR_STATUS_VLV (1UL << 3)
+#define PERF_COUNTER_INTERRUPT_STATUS (1UL << 3)
+#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL << 2) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_HBLANK_INT_STATUS (1UL << 0)
+#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0)
#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
@@ -5503,67 +5658,67 @@ enum {
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
-#define PIPEMISC_YUV420_ENABLE (1<<27)
-#define PIPEMISC_YUV420_MODE_FULL_BLEND (1<<26)
-#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1<<11)
-#define PIPEMISC_DITHER_BPC_MASK (7<<5)
-#define PIPEMISC_DITHER_8_BPC (0<<5)
-#define PIPEMISC_DITHER_10_BPC (1<<5)
-#define PIPEMISC_DITHER_6_BPC (2<<5)
-#define PIPEMISC_DITHER_12_BPC (3<<5)
-#define PIPEMISC_DITHER_ENABLE (1<<4)
-#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
-#define PIPEMISC_DITHER_TYPE_SP (0<<2)
+#define PIPEMISC_YUV420_ENABLE (1 << 27)
+#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26)
+#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
+#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
+#define PIPEMISC_DITHER_8_BPC (0 << 5)
+#define PIPEMISC_DITHER_10_BPC (1 << 5)
+#define PIPEMISC_DITHER_6_BPC (2 << 5)
+#define PIPEMISC_DITHER_12_BPC (3 << 5)
+#define PIPEMISC_DITHER_ENABLE (1 << 4)
+#define PIPEMISC_DITHER_TYPE_MASK (3 << 2)
+#define PIPEMISC_DITHER_TYPE_SP (0 << 2)
#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
-#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
-#define PIPEB_HLINE_INT_EN (1<<28)
-#define PIPEB_VBLANK_INT_EN (1<<27)
-#define SPRITED_FLIP_DONE_INT_EN (1<<26)
-#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
-#define PLANEB_FLIP_DONE_INT_EN (1<<24)
-#define PIPE_PSR_INT_EN (1<<22)
-#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
-#define PIPEA_HLINE_INT_EN (1<<20)
-#define PIPEA_VBLANK_INT_EN (1<<19)
-#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
-#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
-#define PLANEA_FLIPDONE_INT_EN (1<<16)
-#define PIPEC_LINE_COMPARE_INT_EN (1<<13)
-#define PIPEC_HLINE_INT_EN (1<<12)
-#define PIPEC_VBLANK_INT_EN (1<<11)
-#define SPRITEF_FLIPDONE_INT_EN (1<<10)
-#define SPRITEE_FLIPDONE_INT_EN (1<<9)
-#define PLANEC_FLIPDONE_INT_EN (1<<8)
+#define PIPEB_LINE_COMPARE_INT_EN (1 << 29)
+#define PIPEB_HLINE_INT_EN (1 << 28)
+#define PIPEB_VBLANK_INT_EN (1 << 27)
+#define SPRITED_FLIP_DONE_INT_EN (1 << 26)
+#define SPRITEC_FLIP_DONE_INT_EN (1 << 25)
+#define PLANEB_FLIP_DONE_INT_EN (1 << 24)
+#define PIPE_PSR_INT_EN (1 << 22)
+#define PIPEA_LINE_COMPARE_INT_EN (1 << 21)
+#define PIPEA_HLINE_INT_EN (1 << 20)
+#define PIPEA_VBLANK_INT_EN (1 << 19)
+#define SPRITEB_FLIP_DONE_INT_EN (1 << 18)
+#define SPRITEA_FLIP_DONE_INT_EN (1 << 17)
+#define PLANEA_FLIPDONE_INT_EN (1 << 16)
+#define PIPEC_LINE_COMPARE_INT_EN (1 << 13)
+#define PIPEC_HLINE_INT_EN (1 << 12)
+#define PIPEC_VBLANK_INT_EN (1 << 11)
+#define SPRITEF_FLIPDONE_INT_EN (1 << 10)
+#define SPRITEE_FLIPDONE_INT_EN (1 << 9)
+#define PLANEC_FLIPDONE_INT_EN (1 << 8)
#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
-#define SPRITEF_INVALID_GTT_INT_EN (1<<27)
-#define SPRITEE_INVALID_GTT_INT_EN (1<<26)
-#define PLANEC_INVALID_GTT_INT_EN (1<<25)
-#define CURSORC_INVALID_GTT_INT_EN (1<<24)
-#define CURSORB_INVALID_GTT_INT_EN (1<<23)
-#define CURSORA_INVALID_GTT_INT_EN (1<<22)
-#define SPRITED_INVALID_GTT_INT_EN (1<<21)
-#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
-#define PLANEB_INVALID_GTT_INT_EN (1<<19)
-#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
-#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
-#define PLANEA_INVALID_GTT_INT_EN (1<<16)
+#define SPRITEF_INVALID_GTT_INT_EN (1 << 27)
+#define SPRITEE_INVALID_GTT_INT_EN (1 << 26)
+#define PLANEC_INVALID_GTT_INT_EN (1 << 25)
+#define CURSORC_INVALID_GTT_INT_EN (1 << 24)
+#define CURSORB_INVALID_GTT_INT_EN (1 << 23)
+#define CURSORA_INVALID_GTT_INT_EN (1 << 22)
+#define SPRITED_INVALID_GTT_INT_EN (1 << 21)
+#define SPRITEC_INVALID_GTT_INT_EN (1 << 20)
+#define PLANEB_INVALID_GTT_INT_EN (1 << 19)
+#define SPRITEB_INVALID_GTT_INT_EN (1 << 18)
+#define SPRITEA_INVALID_GTT_INT_EN (1 << 17)
+#define PLANEA_INVALID_GTT_INT_EN (1 << 16)
#define DPINVGTT_EN_MASK 0xff0000
#define DPINVGTT_EN_MASK_CHV 0xfff0000
-#define SPRITEF_INVALID_GTT_STATUS (1<<11)
-#define SPRITEE_INVALID_GTT_STATUS (1<<10)
-#define PLANEC_INVALID_GTT_STATUS (1<<9)
-#define CURSORC_INVALID_GTT_STATUS (1<<8)
-#define CURSORB_INVALID_GTT_STATUS (1<<7)
-#define CURSORA_INVALID_GTT_STATUS (1<<6)
-#define SPRITED_INVALID_GTT_STATUS (1<<5)
-#define SPRITEC_INVALID_GTT_STATUS (1<<4)
-#define PLANEB_INVALID_GTT_STATUS (1<<3)
-#define SPRITEB_INVALID_GTT_STATUS (1<<2)
-#define SPRITEA_INVALID_GTT_STATUS (1<<1)
-#define PLANEA_INVALID_GTT_STATUS (1<<0)
+#define SPRITEF_INVALID_GTT_STATUS (1 << 11)
+#define SPRITEE_INVALID_GTT_STATUS (1 << 10)
+#define PLANEC_INVALID_GTT_STATUS (1 << 9)
+#define CURSORC_INVALID_GTT_STATUS (1 << 8)
+#define CURSORB_INVALID_GTT_STATUS (1 << 7)
+#define CURSORA_INVALID_GTT_STATUS (1 << 6)
+#define SPRITED_INVALID_GTT_STATUS (1 << 5)
+#define SPRITEC_INVALID_GTT_STATUS (1 << 4)
+#define PLANEB_INVALID_GTT_STATUS (1 << 3)
+#define SPRITEB_INVALID_GTT_STATUS (1 << 2)
+#define SPRITEA_INVALID_GTT_STATUS (1 << 1)
+#define PLANEA_INVALID_GTT_STATUS (1 << 0)
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
@@ -5604,149 +5759,149 @@ enum {
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
-#define DSPFW_SR_MASK (0x1ff<<23)
+#define DSPFW_SR_MASK (0x1ff << 23)
#define DSPFW_CURSORB_SHIFT 16
-#define DSPFW_CURSORB_MASK (0x3f<<16)
+#define DSPFW_CURSORB_MASK (0x3f << 16)
#define DSPFW_PLANEB_SHIFT 8
-#define DSPFW_PLANEB_MASK (0x7f<<8)
-#define DSPFW_PLANEB_MASK_VLV (0xff<<8) /* vlv/chv */
+#define DSPFW_PLANEB_MASK (0x7f << 8)
+#define DSPFW_PLANEB_MASK_VLV (0xff << 8) /* vlv/chv */
#define DSPFW_PLANEA_SHIFT 0
-#define DSPFW_PLANEA_MASK (0x7f<<0)
-#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */
+#define DSPFW_PLANEA_MASK (0x7f << 0)
+#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
-#define DSPFW_FBC_SR_EN (1<<31) /* g4x */
+#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
#define DSPFW_FBC_SR_SHIFT 28
-#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */
+#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
#define DSPFW_FBC_HPLL_SR_SHIFT 24
-#define DSPFW_FBC_HPLL_SR_MASK (0xf<<24) /* g4x */
+#define DSPFW_FBC_HPLL_SR_MASK (0xf << 24) /* g4x */
#define DSPFW_SPRITEB_SHIFT (16)
-#define DSPFW_SPRITEB_MASK (0x7f<<16) /* g4x */
-#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
+#define DSPFW_SPRITEB_MASK (0x7f << 16) /* g4x */
+#define DSPFW_SPRITEB_MASK_VLV (0xff << 16) /* vlv/chv */
#define DSPFW_CURSORA_SHIFT 8
-#define DSPFW_CURSORA_MASK (0x3f<<8)
+#define DSPFW_CURSORA_MASK (0x3f << 8)
#define DSPFW_PLANEC_OLD_SHIFT 0
-#define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */
+#define DSPFW_PLANEC_OLD_MASK (0x7f << 0) /* pre-gen4 sprite C */
#define DSPFW_SPRITEA_SHIFT 0
-#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
-#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
+#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
+#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
-#define DSPFW_HPLL_SR_EN (1<<31)
-#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_HPLL_SR_EN (1 << 31)
+#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
#define DSPFW_CURSOR_SR_SHIFT 24
-#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
+#define DSPFW_CURSOR_SR_MASK (0x3f << 24)
#define DSPFW_HPLL_CURSOR_SHIFT 16
-#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
+#define DSPFW_HPLL_CURSOR_MASK (0x3f << 16)
#define DSPFW_HPLL_SR_SHIFT 0
-#define DSPFW_HPLL_SR_MASK (0x1ff<<0)
+#define DSPFW_HPLL_SR_MASK (0x1ff << 0)
/* vlv/chv */
#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
#define DSPFW_SPRITEB_WM1_SHIFT 16
-#define DSPFW_SPRITEB_WM1_MASK (0xff<<16)
+#define DSPFW_SPRITEB_WM1_MASK (0xff << 16)
#define DSPFW_CURSORA_WM1_SHIFT 8
-#define DSPFW_CURSORA_WM1_MASK (0x3f<<8)
+#define DSPFW_CURSORA_WM1_MASK (0x3f << 8)
#define DSPFW_SPRITEA_WM1_SHIFT 0
-#define DSPFW_SPRITEA_WM1_MASK (0xff<<0)
+#define DSPFW_SPRITEA_WM1_MASK (0xff << 0)
#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
#define DSPFW_PLANEB_WM1_SHIFT 24
-#define DSPFW_PLANEB_WM1_MASK (0xff<<24)
+#define DSPFW_PLANEB_WM1_MASK (0xff << 24)
#define DSPFW_PLANEA_WM1_SHIFT 16
-#define DSPFW_PLANEA_WM1_MASK (0xff<<16)
+#define DSPFW_PLANEA_WM1_MASK (0xff << 16)
#define DSPFW_CURSORB_WM1_SHIFT 8
-#define DSPFW_CURSORB_WM1_MASK (0x3f<<8)
+#define DSPFW_CURSORB_WM1_MASK (0x3f << 8)
#define DSPFW_CURSOR_SR_WM1_SHIFT 0
-#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0)
+#define DSPFW_CURSOR_SR_WM1_MASK (0x3f << 0)
#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
#define DSPFW_SR_WM1_SHIFT 0
-#define DSPFW_SR_WM1_MASK (0x1ff<<0)
+#define DSPFW_SR_WM1_MASK (0x1ff << 0)
#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
#define DSPFW_SPRITED_WM1_SHIFT 24
-#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
+#define DSPFW_SPRITED_WM1_MASK (0xff << 24)
#define DSPFW_SPRITED_SHIFT 16
-#define DSPFW_SPRITED_MASK_VLV (0xff<<16)
+#define DSPFW_SPRITED_MASK_VLV (0xff << 16)
#define DSPFW_SPRITEC_WM1_SHIFT 8
-#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
+#define DSPFW_SPRITEC_WM1_MASK (0xff << 8)
#define DSPFW_SPRITEC_SHIFT 0
-#define DSPFW_SPRITEC_MASK_VLV (0xff<<0)
+#define DSPFW_SPRITEC_MASK_VLV (0xff << 0)
#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
#define DSPFW_SPRITEF_WM1_SHIFT 24
-#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
+#define DSPFW_SPRITEF_WM1_MASK (0xff << 24)
#define DSPFW_SPRITEF_SHIFT 16
-#define DSPFW_SPRITEF_MASK_VLV (0xff<<16)
+#define DSPFW_SPRITEF_MASK_VLV (0xff << 16)
#define DSPFW_SPRITEE_WM1_SHIFT 8
-#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
+#define DSPFW_SPRITEE_WM1_MASK (0xff << 8)
#define DSPFW_SPRITEE_SHIFT 0
-#define DSPFW_SPRITEE_MASK_VLV (0xff<<0)
+#define DSPFW_SPRITEE_MASK_VLV (0xff << 0)
#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
#define DSPFW_PLANEC_WM1_SHIFT 24
-#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
+#define DSPFW_PLANEC_WM1_MASK (0xff << 24)
#define DSPFW_PLANEC_SHIFT 16
-#define DSPFW_PLANEC_MASK_VLV (0xff<<16)
+#define DSPFW_PLANEC_MASK_VLV (0xff << 16)
#define DSPFW_CURSORC_WM1_SHIFT 8
-#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
+#define DSPFW_CURSORC_WM1_MASK (0x3f << 16)
#define DSPFW_CURSORC_SHIFT 0
-#define DSPFW_CURSORC_MASK (0x3f<<0)
+#define DSPFW_CURSORC_MASK (0x3f << 0)
/* vlv/chv high order bits */
#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
#define DSPFW_SR_HI_SHIFT 24
-#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SR_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_HI_SHIFT 23
-#define DSPFW_SPRITEF_HI_MASK (1<<23)
+#define DSPFW_SPRITEF_HI_MASK (1 << 23)
#define DSPFW_SPRITEE_HI_SHIFT 22
-#define DSPFW_SPRITEE_HI_MASK (1<<22)
+#define DSPFW_SPRITEE_HI_MASK (1 << 22)
#define DSPFW_PLANEC_HI_SHIFT 21
-#define DSPFW_PLANEC_HI_MASK (1<<21)
+#define DSPFW_PLANEC_HI_MASK (1 << 21)
#define DSPFW_SPRITED_HI_SHIFT 20
-#define DSPFW_SPRITED_HI_MASK (1<<20)
+#define DSPFW_SPRITED_HI_MASK (1 << 20)
#define DSPFW_SPRITEC_HI_SHIFT 16
-#define DSPFW_SPRITEC_HI_MASK (1<<16)
+#define DSPFW_SPRITEC_HI_MASK (1 << 16)
#define DSPFW_PLANEB_HI_SHIFT 12
-#define DSPFW_PLANEB_HI_MASK (1<<12)
+#define DSPFW_PLANEB_HI_MASK (1 << 12)
#define DSPFW_SPRITEB_HI_SHIFT 8
-#define DSPFW_SPRITEB_HI_MASK (1<<8)
+#define DSPFW_SPRITEB_HI_MASK (1 << 8)
#define DSPFW_SPRITEA_HI_SHIFT 4
-#define DSPFW_SPRITEA_HI_MASK (1<<4)
+#define DSPFW_SPRITEA_HI_MASK (1 << 4)
#define DSPFW_PLANEA_HI_SHIFT 0
-#define DSPFW_PLANEA_HI_MASK (1<<0)
+#define DSPFW_PLANEA_HI_MASK (1 << 0)
#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
#define DSPFW_SR_WM1_HI_SHIFT 24
-#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SR_WM1_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
-#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
+#define DSPFW_SPRITEF_WM1_HI_MASK (1 << 23)
#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
-#define DSPFW_SPRITEE_WM1_HI_MASK (1<<22)
+#define DSPFW_SPRITEE_WM1_HI_MASK (1 << 22)
#define DSPFW_PLANEC_WM1_HI_SHIFT 21
-#define DSPFW_PLANEC_WM1_HI_MASK (1<<21)
+#define DSPFW_PLANEC_WM1_HI_MASK (1 << 21)
#define DSPFW_SPRITED_WM1_HI_SHIFT 20
-#define DSPFW_SPRITED_WM1_HI_MASK (1<<20)
+#define DSPFW_SPRITED_WM1_HI_MASK (1 << 20)
#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
-#define DSPFW_SPRITEC_WM1_HI_MASK (1<<16)
+#define DSPFW_SPRITEC_WM1_HI_MASK (1 << 16)
#define DSPFW_PLANEB_WM1_HI_SHIFT 12
-#define DSPFW_PLANEB_WM1_HI_MASK (1<<12)
+#define DSPFW_PLANEB_WM1_HI_MASK (1 << 12)
#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
-#define DSPFW_SPRITEB_WM1_HI_MASK (1<<8)
+#define DSPFW_SPRITEB_WM1_HI_MASK (1 << 8)
#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
-#define DSPFW_SPRITEA_WM1_HI_MASK (1<<4)
+#define DSPFW_SPRITEA_WM1_HI_MASK (1 << 4)
#define DSPFW_PLANEA_WM1_HI_SHIFT 0
-#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
+#define DSPFW_PLANEA_WM1_HI_MASK (1 << 0)
/* drain latency register values*/
#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
+#define DDL_SPRITE_SHIFT(sprite) (8 + 8 * (sprite))
#define DDL_PLANE_SHIFT 0
-#define DDL_PRECISION_HIGH (1<<7)
-#define DDL_PRECISION_LOW (0<<7)
+#define DDL_PRECISION_HIGH (1 << 7)
+#define DDL_PRECISION_LOW (0 << 7)
#define DRAIN_LATENCY_MASK 0x7f
#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
-#define CBR_PND_DEADLINE_DISABLE (1<<31)
-#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
+#define CBR_PND_DEADLINE_DISABLE (1 << 31)
+#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
-#define CBR_DPLLBMD_PIPE(pipe) (1<<(7+(pipe)*11)) /* pipes B and C */
+#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -5818,32 +5973,32 @@ enum {
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK _MMIO(0x45100)
-#define WM0_PIPE_PLANE_MASK (0xffff<<16)
+#define WM0_PIPE_PLANE_MASK (0xffff << 16)
#define WM0_PIPE_PLANE_SHIFT 16
-#define WM0_PIPE_SPRITE_MASK (0xff<<8)
+#define WM0_PIPE_SPRITE_MASK (0xff << 8)
#define WM0_PIPE_SPRITE_SHIFT 8
#define WM0_PIPE_CURSOR_MASK (0xff)
#define WM0_PIPEB_ILK _MMIO(0x45104)
#define WM0_PIPEC_IVB _MMIO(0x45200)
#define WM1_LP_ILK _MMIO(0x45108)
-#define WM1_LP_SR_EN (1<<31)
+#define WM1_LP_SR_EN (1 << 31)
#define WM1_LP_LATENCY_SHIFT 24
-#define WM1_LP_LATENCY_MASK (0x7f<<24)
-#define WM1_LP_FBC_MASK (0xf<<20)
+#define WM1_LP_LATENCY_MASK (0x7f << 24)
+#define WM1_LP_FBC_MASK (0xf << 20)
#define WM1_LP_FBC_SHIFT 20
#define WM1_LP_FBC_SHIFT_BDW 19
-#define WM1_LP_SR_MASK (0x7ff<<8)
+#define WM1_LP_SR_MASK (0x7ff << 8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0xff)
#define WM2_LP_ILK _MMIO(0x4510c)
-#define WM2_LP_EN (1<<31)
+#define WM2_LP_EN (1 << 31)
#define WM3_LP_ILK _MMIO(0x45110)
-#define WM3_LP_EN (1<<31)
+#define WM3_LP_EN (1 << 31)
#define WM1S_LP_ILK _MMIO(0x45120)
#define WM2S_LP_IVB _MMIO(0x45124)
#define WM3S_LP_IVB _MMIO(0x45128)
-#define WM1S_LP_EN (1<<31)
+#define WM1S_LP_EN (1 << 31)
#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
(WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
@@ -5900,8 +6055,7 @@ enum {
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_SHIFT 28
-#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
-#define CURSOR_PIPE_CSC_ENABLE (1<<24)
+#define CURSOR_STRIDE(x) ((ffs(x) - 9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
@@ -5910,18 +6064,21 @@ enum {
#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT)
/* New style CUR*CNTR flags */
-#define CURSOR_MODE 0x27
-#define CURSOR_MODE_DISABLE 0x00
-#define CURSOR_MODE_128_32B_AX 0x02
-#define CURSOR_MODE_256_32B_AX 0x03
-#define CURSOR_MODE_64_32B_AX 0x07
-#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
-#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
-#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_MODE 0x27
+#define MCURSOR_MODE_DISABLE 0x00
+#define MCURSOR_MODE_128_32B_AX 0x02
+#define MCURSOR_MODE_256_32B_AX 0x03
+#define MCURSOR_MODE_64_32B_AX 0x07
+#define MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX)
+#define MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX)
+#define MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX)
+#define MCURSOR_PIPE_SELECT_MASK (0x3 << 28)
+#define MCURSOR_PIPE_SELECT_SHIFT 28
#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define CURSOR_ROTATE_180 (1<<15)
-#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
+#define MCURSOR_PIPE_CSC_ENABLE (1 << 24)
+#define MCURSOR_ROTATE_180 (1 << 15)
+#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
@@ -5958,41 +6115,41 @@ enum {
/* Display A control */
#define _DSPACNTR 0x70180
-#define DISPLAY_PLANE_ENABLE (1<<31)
+#define DISPLAY_PLANE_ENABLE (1 << 31)
#define DISPLAY_PLANE_DISABLE 0
-#define DISPPLANE_GAMMA_ENABLE (1<<30)
+#define DISPPLANE_GAMMA_ENABLE (1 << 30)
#define DISPPLANE_GAMMA_DISABLE 0
-#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
-#define DISPPLANE_YUV422 (0x0<<26)
-#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_BGRA555 (0x3<<26)
-#define DISPPLANE_BGRX555 (0x4<<26)
-#define DISPPLANE_BGRX565 (0x5<<26)
-#define DISPPLANE_BGRX888 (0x6<<26)
-#define DISPPLANE_BGRA888 (0x7<<26)
-#define DISPPLANE_RGBX101010 (0x8<<26)
-#define DISPPLANE_RGBA101010 (0x9<<26)
-#define DISPPLANE_BGRX101010 (0xa<<26)
-#define DISPPLANE_RGBX161616 (0xc<<26)
-#define DISPPLANE_RGBX888 (0xe<<26)
-#define DISPPLANE_RGBA888 (0xf<<26)
-#define DISPPLANE_STEREO_ENABLE (1<<25)
+#define DISPPLANE_PIXFORMAT_MASK (0xf << 26)
+#define DISPPLANE_YUV422 (0x0 << 26)
+#define DISPPLANE_8BPP (0x2 << 26)
+#define DISPPLANE_BGRA555 (0x3 << 26)
+#define DISPPLANE_BGRX555 (0x4 << 26)
+#define DISPPLANE_BGRX565 (0x5 << 26)
+#define DISPPLANE_BGRX888 (0x6 << 26)
+#define DISPPLANE_BGRA888 (0x7 << 26)
+#define DISPPLANE_RGBX101010 (0x8 << 26)
+#define DISPPLANE_RGBA101010 (0x9 << 26)
+#define DISPPLANE_BGRX101010 (0xa << 26)
+#define DISPPLANE_RGBX161616 (0xc << 26)
+#define DISPPLANE_RGBX888 (0xe << 26)
+#define DISPPLANE_RGBA888 (0xf << 26)
+#define DISPPLANE_STEREO_ENABLE (1 << 25)
#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
+#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24)
#define DISPPLANE_SEL_PIPE_SHIFT 24
-#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
-#define DISPPLANE_SEL_PIPE(pipe) ((pipe)<<DISPPLANE_SEL_PIPE_SHIFT)
-#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
+#define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SRC_KEY_ENABLE (1 << 22)
#define DISPPLANE_SRC_KEY_DISABLE 0
-#define DISPPLANE_LINE_DOUBLE (1<<20)
+#define DISPPLANE_LINE_DOUBLE (1 << 20)
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
-#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-#define DISPPLANE_ALPHA_PREMULTIPLY (1<<16) /* CHV pipe B */
-#define DISPPLANE_ROTATE_180 (1<<15)
-#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
-#define DISPPLANE_TILED (1<<10)
-#define DISPPLANE_MIRROR (1<<8) /* CHV pipe B */
+#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18)
+#define DISPPLANE_ALPHA_PREMULTIPLY (1 << 16) /* CHV pipe B */
+#define DISPPLANE_ROTATE_180 (1 << 15)
+#define DISPPLANE_TRICKLE_FEED_DISABLE (1 << 14) /* Ironlake */
+#define DISPPLANE_TILED (1 << 10)
+#define DISPPLANE_MIRROR (1 << 8) /* CHV pipe B */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
@@ -6015,15 +6172,15 @@ enum {
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
-#define CHV_BLEND_LEGACY (0<<30)
-#define CHV_BLEND_ANDROID (1<<30)
-#define CHV_BLEND_MPO (2<<30)
-#define CHV_BLEND_MASK (3<<30)
+#define CHV_BLEND_LEGACY (0 << 30)
+#define CHV_BLEND_ANDROID (1 << 30)
+#define CHV_BLEND_MPO (2 << 30)
+#define CHV_BLEND_MASK (3 << 30)
#define _CHV_CANVAS_A 0x60a04
#define _PRIMPOS_A 0x60a08
#define _PRIMSIZE_A 0x60a0c
#define _PRIMCNSTALPHA_A 0x60a10
-#define PRIM_CONST_ALPHA_ENABLE (1<<31)
+#define PRIM_CONST_ALPHA_ENABLE (1 << 31)
#define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A)
#define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A)
@@ -6033,8 +6190,8 @@ enum {
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
-#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
-#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
+#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) ((val) & DISP_BASEADDR_MASK)
/*
* VBIOS flags
@@ -6064,7 +6221,7 @@ enum {
/* Display B control */
#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180)
-#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
@@ -6079,27 +6236,27 @@ enum {
/* Sprite A control */
#define _DVSACNTR 0x72180
-#define DVS_ENABLE (1<<31)
-#define DVS_GAMMA_ENABLE (1<<30)
-#define DVS_YUV_RANGE_CORRECTION_DISABLE (1<<27)
-#define DVS_PIXFORMAT_MASK (3<<25)
-#define DVS_FORMAT_YUV422 (0<<25)
-#define DVS_FORMAT_RGBX101010 (1<<25)
-#define DVS_FORMAT_RGBX888 (2<<25)
-#define DVS_FORMAT_RGBX161616 (3<<25)
-#define DVS_PIPE_CSC_ENABLE (1<<24)
-#define DVS_SOURCE_KEY (1<<22)
-#define DVS_RGB_ORDER_XBGR (1<<20)
-#define DVS_YUV_FORMAT_BT709 (1<<18)
-#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
-#define DVS_YUV_ORDER_YUYV (0<<16)
-#define DVS_YUV_ORDER_UYVY (1<<16)
-#define DVS_YUV_ORDER_YVYU (2<<16)
-#define DVS_YUV_ORDER_VYUY (3<<16)
-#define DVS_ROTATE_180 (1<<15)
-#define DVS_DEST_KEY (1<<2)
-#define DVS_TRICKLE_FEED_DISABLE (1<<14)
-#define DVS_TILED (1<<10)
+#define DVS_ENABLE (1 << 31)
+#define DVS_GAMMA_ENABLE (1 << 30)
+#define DVS_YUV_RANGE_CORRECTION_DISABLE (1 << 27)
+#define DVS_PIXFORMAT_MASK (3 << 25)
+#define DVS_FORMAT_YUV422 (0 << 25)
+#define DVS_FORMAT_RGBX101010 (1 << 25)
+#define DVS_FORMAT_RGBX888 (2 << 25)
+#define DVS_FORMAT_RGBX161616 (3 << 25)
+#define DVS_PIPE_CSC_ENABLE (1 << 24)
+#define DVS_SOURCE_KEY (1 << 22)
+#define DVS_RGB_ORDER_XBGR (1 << 20)
+#define DVS_YUV_FORMAT_BT709 (1 << 18)
+#define DVS_YUV_BYTE_ORDER_MASK (3 << 16)
+#define DVS_YUV_ORDER_YUYV (0 << 16)
+#define DVS_YUV_ORDER_UYVY (1 << 16)
+#define DVS_YUV_ORDER_YVYU (2 << 16)
+#define DVS_YUV_ORDER_VYUY (3 << 16)
+#define DVS_ROTATE_180 (1 << 15)
+#define DVS_DEST_KEY (1 << 2)
+#define DVS_TRICKLE_FEED_DISABLE (1 << 14)
+#define DVS_TILED (1 << 10)
#define _DVSALINOFF 0x72184
#define _DVSASTRIDE 0x72188
#define _DVSAPOS 0x7218c
@@ -6111,13 +6268,13 @@ enum {
#define _DVSATILEOFF 0x721a4
#define _DVSASURFLIVE 0x721ac
#define _DVSASCALE 0x72204
-#define DVS_SCALE_ENABLE (1<<31)
-#define DVS_FILTER_MASK (3<<29)
-#define DVS_FILTER_MEDIUM (0<<29)
-#define DVS_FILTER_ENHANCING (1<<29)
-#define DVS_FILTER_SOFTENING (2<<29)
-#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
-#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define DVS_SCALE_ENABLE (1 << 31)
+#define DVS_FILTER_MASK (3 << 29)
+#define DVS_FILTER_MEDIUM (0 << 29)
+#define DVS_FILTER_ENHANCING (1 << 29)
+#define DVS_FILTER_SOFTENING (2 << 29)
+#define DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE (1 << 27)
#define _DVSAGAMC 0x72300
#define _DVSBCNTR 0x73180
@@ -6148,31 +6305,31 @@ enum {
#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define _SPRA_CTL 0x70280
-#define SPRITE_ENABLE (1<<31)
-#define SPRITE_GAMMA_ENABLE (1<<30)
-#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1<<28)
-#define SPRITE_PIXFORMAT_MASK (7<<25)
-#define SPRITE_FORMAT_YUV422 (0<<25)
-#define SPRITE_FORMAT_RGBX101010 (1<<25)
-#define SPRITE_FORMAT_RGBX888 (2<<25)
-#define SPRITE_FORMAT_RGBX161616 (3<<25)
-#define SPRITE_FORMAT_YUV444 (4<<25)
-#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
-#define SPRITE_PIPE_CSC_ENABLE (1<<24)
-#define SPRITE_SOURCE_KEY (1<<22)
-#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
-#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
-#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
-#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
-#define SPRITE_YUV_ORDER_YUYV (0<<16)
-#define SPRITE_YUV_ORDER_UYVY (1<<16)
-#define SPRITE_YUV_ORDER_YVYU (2<<16)
-#define SPRITE_YUV_ORDER_VYUY (3<<16)
-#define SPRITE_ROTATE_180 (1<<15)
-#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
-#define SPRITE_INT_GAMMA_ENABLE (1<<13)
-#define SPRITE_TILED (1<<10)
-#define SPRITE_DEST_KEY (1<<2)
+#define SPRITE_ENABLE (1 << 31)
+#define SPRITE_GAMMA_ENABLE (1 << 30)
+#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
+#define SPRITE_PIXFORMAT_MASK (7 << 25)
+#define SPRITE_FORMAT_YUV422 (0 << 25)
+#define SPRITE_FORMAT_RGBX101010 (1 << 25)
+#define SPRITE_FORMAT_RGBX888 (2 << 25)
+#define SPRITE_FORMAT_RGBX161616 (3 << 25)
+#define SPRITE_FORMAT_YUV444 (4 << 25)
+#define SPRITE_FORMAT_XR_BGR101010 (5 << 25) /* Extended range */
+#define SPRITE_PIPE_CSC_ENABLE (1 << 24)
+#define SPRITE_SOURCE_KEY (1 << 22)
+#define SPRITE_RGB_ORDER_RGBX (1 << 20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1 << 19)
+#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) /* 0 is BT601 */
+#define SPRITE_YUV_BYTE_ORDER_MASK (3 << 16)
+#define SPRITE_YUV_ORDER_YUYV (0 << 16)
+#define SPRITE_YUV_ORDER_UYVY (1 << 16)
+#define SPRITE_YUV_ORDER_YVYU (2 << 16)
+#define SPRITE_YUV_ORDER_VYUY (3 << 16)
+#define SPRITE_ROTATE_180 (1 << 15)
+#define SPRITE_TRICKLE_FEED_DISABLE (1 << 14)
+#define SPRITE_INT_GAMMA_ENABLE (1 << 13)
+#define SPRITE_TILED (1 << 10)
+#define SPRITE_DEST_KEY (1 << 2)
#define _SPRA_LINOFF 0x70284
#define _SPRA_STRIDE 0x70288
#define _SPRA_POS 0x7028c
@@ -6185,13 +6342,13 @@ enum {
#define _SPRA_OFFSET 0x702a4
#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304
-#define SPRITE_SCALE_ENABLE (1<<31)
-#define SPRITE_FILTER_MASK (3<<29)
-#define SPRITE_FILTER_MEDIUM (0<<29)
-#define SPRITE_FILTER_ENHANCING (1<<29)
-#define SPRITE_FILTER_SOFTENING (2<<29)
-#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
-#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
+#define SPRITE_SCALE_ENABLE (1 << 31)
+#define SPRITE_FILTER_MASK (3 << 29)
+#define SPRITE_FILTER_MEDIUM (0 << 29)
+#define SPRITE_FILTER_ENHANCING (1 << 29)
+#define SPRITE_FILTER_SOFTENING (2 << 29)
+#define SPRITE_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE (1 << 27)
#define _SPRA_GAMC 0x70400
#define _SPRB_CTL 0x71280
@@ -6225,28 +6382,28 @@ enum {
#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
-#define SP_ENABLE (1<<31)
-#define SP_GAMMA_ENABLE (1<<30)
-#define SP_PIXFORMAT_MASK (0xf<<26)
-#define SP_FORMAT_YUV422 (0<<26)
-#define SP_FORMAT_BGR565 (5<<26)
-#define SP_FORMAT_BGRX8888 (6<<26)
-#define SP_FORMAT_BGRA8888 (7<<26)
-#define SP_FORMAT_RGBX1010102 (8<<26)
-#define SP_FORMAT_RGBA1010102 (9<<26)
-#define SP_FORMAT_RGBX8888 (0xe<<26)
-#define SP_FORMAT_RGBA8888 (0xf<<26)
-#define SP_ALPHA_PREMULTIPLY (1<<23) /* CHV pipe B */
-#define SP_SOURCE_KEY (1<<22)
-#define SP_YUV_FORMAT_BT709 (1<<18)
-#define SP_YUV_BYTE_ORDER_MASK (3<<16)
-#define SP_YUV_ORDER_YUYV (0<<16)
-#define SP_YUV_ORDER_UYVY (1<<16)
-#define SP_YUV_ORDER_YVYU (2<<16)
-#define SP_YUV_ORDER_VYUY (3<<16)
-#define SP_ROTATE_180 (1<<15)
-#define SP_TILED (1<<10)
-#define SP_MIRROR (1<<8) /* CHV pipe B */
+#define SP_ENABLE (1 << 31)
+#define SP_GAMMA_ENABLE (1 << 30)
+#define SP_PIXFORMAT_MASK (0xf << 26)
+#define SP_FORMAT_YUV422 (0 << 26)
+#define SP_FORMAT_BGR565 (5 << 26)
+#define SP_FORMAT_BGRX8888 (6 << 26)
+#define SP_FORMAT_BGRA8888 (7 << 26)
+#define SP_FORMAT_RGBX1010102 (8 << 26)
+#define SP_FORMAT_RGBA1010102 (9 << 26)
+#define SP_FORMAT_RGBX8888 (0xe << 26)
+#define SP_FORMAT_RGBA8888 (0xf << 26)
+#define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */
+#define SP_SOURCE_KEY (1 << 22)
+#define SP_YUV_FORMAT_BT709 (1 << 18)
+#define SP_YUV_BYTE_ORDER_MASK (3 << 16)
+#define SP_YUV_ORDER_YUYV (0 << 16)
+#define SP_YUV_ORDER_UYVY (1 << 16)
+#define SP_YUV_ORDER_YVYU (2 << 16)
+#define SP_YUV_ORDER_VYUY (3 << 16)
+#define SP_ROTATE_180 (1 << 15)
+#define SP_TILED (1 << 10)
+#define SP_MIRROR (1 << 8) /* CHV pipe B */
#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
@@ -6257,7 +6414,7 @@ enum {
#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
-#define SP_CONST_ALPHA_ENABLE (1<<31)
+#define SP_CONST_ALPHA_ENABLE (1 << 31)
#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
#define SP_CONTRAST(x) ((x) << 18) /* u3.6 */
#define SP_BRIGHTNESS(x) ((x) & 0xff) /* s8 */
@@ -6349,40 +6506,40 @@ enum {
* correctly map to the same formats in ICL, as long as bit 23 is set to 0
*/
#define PLANE_CTL_FORMAT_MASK (0xf << 24)
-#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
-#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
-#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24)
-#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24)
-#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24)
-#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
-#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
-#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
+#define PLANE_CTL_FORMAT_YUV422 (0 << 24)
+#define PLANE_CTL_FORMAT_NV12 (1 << 24)
+#define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24)
+#define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24)
+#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24)
+#define PLANE_CTL_FORMAT_AYUV (8 << 24)
+#define PLANE_CTL_FORMAT_INDEXED (12 << 24)
+#define PLANE_CTL_FORMAT_RGB_565 (14 << 24)
#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
-#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
-#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
+#define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21)
+#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
#define PLANE_CTL_ORDER_BGRX (0 << 20)
#define PLANE_CTL_ORDER_RGBX (1 << 20)
#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18)
#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
-#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
-#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
-#define PLANE_CTL_YUV422_YVYU ( 2 << 16)
-#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
+#define PLANE_CTL_YUV422_YUYV (0 << 16)
+#define PLANE_CTL_YUV422_UYVY (1 << 16)
+#define PLANE_CTL_YUV422_YVYU (2 << 16)
+#define PLANE_CTL_YUV422_VYUY (3 << 16)
#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
#define PLANE_CTL_TILED_MASK (0x7 << 10)
-#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
-#define PLANE_CTL_TILED_X ( 1 << 10)
-#define PLANE_CTL_TILED_Y ( 4 << 10)
-#define PLANE_CTL_TILED_YF ( 5 << 10)
-#define PLANE_CTL_FLIP_HORIZONTAL ( 1 << 8)
+#define PLANE_CTL_TILED_LINEAR (0 << 10)
+#define PLANE_CTL_TILED_X (1 << 10)
+#define PLANE_CTL_TILED_Y (4 << 10)
+#define PLANE_CTL_TILED_YF (5 << 10)
+#define PLANE_CTL_FLIP_HORIZONTAL (1 << 8)
#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
-#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
-#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
-#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
+#define PLANE_CTL_ALPHA_DISABLE (0 << 4)
+#define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4)
+#define PLANE_CTL_ALPHA_HW_PREMULTIPLY (3 << 4)
#define PLANE_CTL_ROTATE_MASK 0x3
#define PLANE_CTL_ROTATE_0 0x0
#define PLANE_CTL_ROTATE_90 0x1
@@ -6610,7 +6767,7 @@ enum {
# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
#define FDI_PLL_FREQ_CTL _MMIO(0x46030)
-#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
+#define FDI_PLL_FREQ_CHANGE_REQUEST (1 << 24)
#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
@@ -6659,14 +6816,14 @@ enum {
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
-#define PF_ENABLE (1<<31)
-#define PF_PIPE_SEL_MASK_IVB (3<<29)
-#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
-#define PF_FILTER_MASK (3<<23)
-#define PF_FILTER_PROGRAMMED (0<<23)
-#define PF_FILTER_MED_3x3 (1<<23)
-#define PF_FILTER_EDGE_ENHANCE (2<<23)
-#define PF_FILTER_EDGE_SOFTEN (3<<23)
+#define PF_ENABLE (1 << 31)
+#define PF_PIPE_SEL_MASK_IVB (3 << 29)
+#define PF_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define PF_FILTER_MASK (3 << 23)
+#define PF_FILTER_PROGRAMMED (0 << 23)
+#define PF_FILTER_MED_3x3 (1 << 23)
+#define PF_FILTER_EDGE_ENHANCE (2 << 23)
+#define PF_FILTER_EDGE_SOFTEN (3 << 23)
#define _PFA_WIN_SZ 0x68074
#define _PFB_WIN_SZ 0x68874
#define _PFA_WIN_POS 0x68070
@@ -6684,7 +6841,7 @@ enum {
#define _PSA_CTL 0x68180
#define _PSB_CTL 0x68980
-#define PS_ENABLE (1<<31)
+#define PS_ENABLE (1 << 31)
#define _PSA_WIN_SZ 0x68174
#define _PSB_WIN_SZ 0x68974
#define _PSA_WIN_POS 0x68170
@@ -6769,6 +6926,10 @@ enum {
#define _PS_VPHASE_1B 0x68988
#define _PS_VPHASE_2B 0x68A88
#define _PS_VPHASE_1C 0x69188
+#define PS_Y_PHASE(x) ((x) << 16)
+#define PS_UV_RGB_PHASE(x) ((x) << 0)
+#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
+#define PS_PHASE_TRIP (1 << 0)
#define _PS_HPHASE_1A 0x68194
#define _PS_HPHASE_2A 0x68294
@@ -6782,7 +6943,7 @@ enum {
#define _PS_ECC_STAT_2B 0x68AD0
#define _PS_ECC_STAT_1C 0x691D0
-#define _ID(id, a, b) ((a) + (id)*((b)-(a)))
+#define _ID(id, a, b) _PICK_EVEN(id, a, b)
#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
_ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
@@ -6863,37 +7024,37 @@ enum {
#define DE_PIPEB_CRC_DONE (1 << 10)
#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
#define DE_PIPEA_VBLANK (1 << 7)
-#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe)))
+#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8 * (pipe)))
#define DE_PIPEA_EVEN_FIELD (1 << 6)
#define DE_PIPEA_ODD_FIELD (1 << 5)
#define DE_PIPEA_LINE_COMPARE (1 << 4)
#define DE_PIPEA_VSYNC (1 << 3)
#define DE_PIPEA_CRC_DONE (1 << 2)
-#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
+#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8 * (pipe)))
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
-#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe)))
+#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8 * (pipe)))
/* More Ivybridge lolz */
-#define DE_ERR_INT_IVB (1<<30)
-#define DE_GSE_IVB (1<<29)
-#define DE_PCH_EVENT_IVB (1<<28)
-#define DE_DP_A_HOTPLUG_IVB (1<<27)
-#define DE_AUX_CHANNEL_A_IVB (1<<26)
-#define DE_EDP_PSR_INT_HSW (1<<19)
-#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
-#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
-#define DE_PIPEC_VBLANK_IVB (1<<10)
-#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
-#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
-#define DE_PIPEB_VBLANK_IVB (1<<5)
-#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
-#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
-#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
-#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define DE_ERR_INT_IVB (1 << 30)
+#define DE_GSE_IVB (1 << 29)
+#define DE_PCH_EVENT_IVB (1 << 28)
+#define DE_DP_A_HOTPLUG_IVB (1 << 27)
+#define DE_AUX_CHANNEL_A_IVB (1 << 26)
+#define DE_EDP_PSR_INT_HSW (1 << 19)
+#define DE_SPRITEC_FLIP_DONE_IVB (1 << 14)
+#define DE_PLANEC_FLIP_DONE_IVB (1 << 13)
+#define DE_PIPEC_VBLANK_IVB (1 << 10)
+#define DE_SPRITEB_FLIP_DONE_IVB (1 << 9)
+#define DE_PLANEB_FLIP_DONE_IVB (1 << 8)
+#define DE_PIPEB_VBLANK_IVB (1 << 5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1 << 4)
+#define DE_PLANEA_FLIP_DONE_IVB (1 << 3)
+#define DE_PLANE_FLIP_DONE_IVB(plane) (1 << (3 + 5 * (plane)))
+#define DE_PIPEA_VBLANK_IVB (1 << 0)
#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */
-#define MASTER_INTERRUPT_ENABLE (1<<31)
+#define MASTER_INTERRUPT_ENABLE (1 << 31)
#define DEISR _MMIO(0x44000)
#define DEIMR _MMIO(0x44004)
@@ -6906,37 +7067,37 @@ enum {
#define GTIER _MMIO(0x4401c)
#define GEN8_MASTER_IRQ _MMIO(0x44200)
-#define GEN8_MASTER_IRQ_CONTROL (1<<31)
-#define GEN8_PCU_IRQ (1<<30)
-#define GEN8_DE_PCH_IRQ (1<<23)
-#define GEN8_DE_MISC_IRQ (1<<22)
-#define GEN8_DE_PORT_IRQ (1<<20)
-#define GEN8_DE_PIPE_C_IRQ (1<<18)
-#define GEN8_DE_PIPE_B_IRQ (1<<17)
-#define GEN8_DE_PIPE_A_IRQ (1<<16)
-#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe)))
-#define GEN8_GT_VECS_IRQ (1<<6)
-#define GEN8_GT_GUC_IRQ (1<<5)
-#define GEN8_GT_PM_IRQ (1<<4)
-#define GEN8_GT_VCS2_IRQ (1<<3)
-#define GEN8_GT_VCS1_IRQ (1<<2)
-#define GEN8_GT_BCS_IRQ (1<<1)
-#define GEN8_GT_RCS_IRQ (1<<0)
+#define GEN8_MASTER_IRQ_CONTROL (1 << 31)
+#define GEN8_PCU_IRQ (1 << 30)
+#define GEN8_DE_PCH_IRQ (1 << 23)
+#define GEN8_DE_MISC_IRQ (1 << 22)
+#define GEN8_DE_PORT_IRQ (1 << 20)
+#define GEN8_DE_PIPE_C_IRQ (1 << 18)
+#define GEN8_DE_PIPE_B_IRQ (1 << 17)
+#define GEN8_DE_PIPE_A_IRQ (1 << 16)
+#define GEN8_DE_PIPE_IRQ(pipe) (1 << (16 + (pipe)))
+#define GEN8_GT_VECS_IRQ (1 << 6)
+#define GEN8_GT_GUC_IRQ (1 << 5)
+#define GEN8_GT_PM_IRQ (1 << 4)
+#define GEN8_GT_VCS2_IRQ (1 << 3)
+#define GEN8_GT_VCS1_IRQ (1 << 2)
+#define GEN8_GT_BCS_IRQ (1 << 1)
+#define GEN8_GT_RCS_IRQ (1 << 0)
#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
-#define GEN9_GUC_TO_HOST_INT_EVENT (1<<31)
-#define GEN9_GUC_EXEC_ERROR_EVENT (1<<30)
-#define GEN9_GUC_DISPLAY_EVENT (1<<29)
-#define GEN9_GUC_SEMA_SIGNAL_EVENT (1<<28)
-#define GEN9_GUC_IOMMU_MSG_EVENT (1<<27)
-#define GEN9_GUC_DB_RING_EVENT (1<<26)
-#define GEN9_GUC_DMA_DONE_EVENT (1<<25)
-#define GEN9_GUC_FATAL_ERROR_EVENT (1<<24)
-#define GEN9_GUC_NOTIFICATION_EVENT (1<<23)
+#define GEN9_GUC_TO_HOST_INT_EVENT (1 << 31)
+#define GEN9_GUC_EXEC_ERROR_EVENT (1 << 30)
+#define GEN9_GUC_DISPLAY_EVENT (1 << 29)
+#define GEN9_GUC_SEMA_SIGNAL_EVENT (1 << 28)
+#define GEN9_GUC_IOMMU_MSG_EVENT (1 << 27)
+#define GEN9_GUC_DB_RING_EVENT (1 << 26)
+#define GEN9_GUC_DMA_DONE_EVENT (1 << 25)
+#define GEN9_GUC_FATAL_ERROR_EVENT (1 << 24)
+#define GEN9_GUC_NOTIFICATION_EVENT (1 << 23)
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
@@ -6985,6 +7146,7 @@ enum {
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
#define GEN8_DE_PORT_IIR _MMIO(0x44448)
#define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define ICL_AUX_CHANNEL_E (1 << 29)
#define CNL_AUX_CHANNEL_F (1 << 28)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
@@ -7011,9 +7173,16 @@ enum {
#define GEN8_PCU_IIR _MMIO(0x444e8)
#define GEN8_PCU_IER _MMIO(0x444ec)
+#define GEN11_GU_MISC_ISR _MMIO(0x444f0)
+#define GEN11_GU_MISC_IMR _MMIO(0x444f4)
+#define GEN11_GU_MISC_IIR _MMIO(0x444f8)
+#define GEN11_GU_MISC_IER _MMIO(0x444fc)
+#define GEN11_GU_MISC_GSE (1 << 27)
+
#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
#define GEN11_MASTER_IRQ (1 << 31)
#define GEN11_PCU_IRQ (1 << 30)
+#define GEN11_GU_MISC_IRQ (1 << 29)
#define GEN11_DISPLAY_IRQ (1 << 16)
#define GEN11_GT_DW_IRQ(x) (1 << (x))
#define GEN11_GT_DW1_IRQ (1 << 1)
@@ -7024,11 +7193,40 @@ enum {
#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
#define GEN11_DE_PCH_IRQ (1 << 23)
#define GEN11_DE_MISC_IRQ (1 << 22)
+#define GEN11_DE_HPD_IRQ (1 << 21)
#define GEN11_DE_PORT_IRQ (1 << 20)
#define GEN11_DE_PIPE_C (1 << 18)
#define GEN11_DE_PIPE_B (1 << 17)
#define GEN11_DE_PIPE_A (1 << 16)
+#define GEN11_DE_HPD_ISR _MMIO(0x44470)
+#define GEN11_DE_HPD_IMR _MMIO(0x44474)
+#define GEN11_DE_HPD_IIR _MMIO(0x44478)
+#define GEN11_DE_HPD_IER _MMIO(0x4447c)
+#define GEN11_TC4_HOTPLUG (1 << 19)
+#define GEN11_TC3_HOTPLUG (1 << 18)
+#define GEN11_TC2_HOTPLUG (1 << 17)
+#define GEN11_TC1_HOTPLUG (1 << 16)
+#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC4_HOTPLUG | \
+ GEN11_TC3_HOTPLUG | \
+ GEN11_TC2_HOTPLUG | \
+ GEN11_TC1_HOTPLUG)
+#define GEN11_TBT4_HOTPLUG (1 << 3)
+#define GEN11_TBT3_HOTPLUG (1 << 2)
+#define GEN11_TBT2_HOTPLUG (1 << 1)
+#define GEN11_TBT1_HOTPLUG (1 << 0)
+#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT4_HOTPLUG | \
+ GEN11_TBT3_HOTPLUG | \
+ GEN11_TBT2_HOTPLUG | \
+ GEN11_TBT1_HOTPLUG)
+
+#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
+#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
+#define GEN11_HOTPLUG_CTL_ENABLE(tc_port) (8 << (tc_port) * 4)
+#define GEN11_HOTPLUG_CTL_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
+#define GEN11_HOTPLUG_CTL_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
+#define GEN11_HOTPLUG_CTL_NO_DETECT(tc_port) (0 << (tc_port) * 4)
+
#define GEN11_GT_INTR_DW0 _MMIO(0x190018)
#define GEN11_CSME (31)
#define GEN11_GUNIT (28)
@@ -7043,7 +7241,7 @@ enum {
#define GEN11_VECS(x) (31 - (x))
#define GEN11_VCS(x) (x)
-#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + (x * 4))
+#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4))
#define GEN11_INTR_IDENTITY_REG0 _MMIO(0x190060)
#define GEN11_INTR_IDENTITY_REG1 _MMIO(0x190064)
@@ -7052,12 +7250,12 @@ enum {
#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
-#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + (x * 4))
+#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
#define GEN11_IIR_REG0_SELECTOR _MMIO(0x190070)
#define GEN11_IIR_REG1_SELECTOR _MMIO(0x190074)
-#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + (x * 4))
+#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034)
@@ -7079,8 +7277,8 @@ enum {
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT (1 << 25)
-#define ILK_DPARB_GATE (1<<22)
-#define ILK_VSDPFD_FULL (1<<21)
+#define ILK_DPARB_GATE (1 << 22)
+#define ILK_VSDPFD_FULL (1 << 21)
#define FUSE_STRAP _MMIO(0x42014)
#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
@@ -7130,31 +7328,31 @@ enum {
#define CHICKEN_TRANS_A 0x420c0
#define CHICKEN_TRANS_B 0x420c4
#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
-#define VSC_DATA_SEL_SOFTWARE_CONTROL (1<<25) /* GLK and CNL+ */
-#define DDI_TRAINING_OVERRIDE_ENABLE (1<<19)
-#define DDI_TRAINING_OVERRIDE_VALUE (1<<18)
-#define DDIE_TRAINING_OVERRIDE_ENABLE (1<<17) /* CHICKEN_TRANS_A only */
-#define DDIE_TRAINING_OVERRIDE_VALUE (1<<16) /* CHICKEN_TRANS_A only */
-#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15)
-#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12)
+#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
+#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
+#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
+#define DDIE_TRAINING_OVERRIDE_ENABLE (1 << 17) /* CHICKEN_TRANS_A only */
+#define DDIE_TRAINING_OVERRIDE_VALUE (1 << 16) /* CHICKEN_TRANS_A only */
+#define PSR2_ADD_VERTICAL_LINE_COUNT (1 << 15)
+#define PSR2_VSC_ENABLE_PROG_HEADER (1 << 12)
#define DISP_ARB_CTL _MMIO(0x45000)
-#define DISP_FBC_MEMORY_WAKE (1<<31)
-#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
-#define DISP_FBC_WM_DIS (1<<15)
+#define DISP_FBC_MEMORY_WAKE (1 << 31)
+#define DISP_TILE_SURFACE_SWIZZLING (1 << 13)
+#define DISP_FBC_WM_DIS (1 << 15)
#define DISP_ARB_CTL2 _MMIO(0x45004)
-#define DISP_DATA_PARTITION_5_6 (1<<6)
-#define DISP_IPC_ENABLE (1<<3)
+#define DISP_DATA_PARTITION_5_6 (1 << 6)
+#define DISP_IPC_ENABLE (1 << 3)
#define DBUF_CTL _MMIO(0x45008)
#define DBUF_CTL_S1 _MMIO(0x45008)
#define DBUF_CTL_S2 _MMIO(0x44FE8)
-#define DBUF_POWER_REQUEST (1<<31)
-#define DBUF_POWER_STATE (1<<30)
+#define DBUF_POWER_REQUEST (1 << 31)
+#define DBUF_POWER_STATE (1 << 30)
#define GEN7_MSG_CTL _MMIO(0x45010)
-#define WAIT_FOR_PCH_RESET_ACK (1<<1)
-#define WAIT_FOR_PCH_FLR_ACK (1<<0)
+#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
+#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
-#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
@@ -7179,16 +7377,16 @@ enum {
#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
-#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
+#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1 << 14)
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
-#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
-#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1<<10)
+#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8)
+#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
-#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1<<0)
+#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
@@ -7197,22 +7395,27 @@ enum {
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
-# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
-# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
-#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
-# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13)
-# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
-# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
-# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
+ #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1 << 10) | (1 << 26))
+ #define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
+
+#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+ #define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13)
+ #define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12)
+ #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
+ #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
+
+#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
+ #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
#define HIZ_CHICKEN _MMIO(0x7018)
-# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
-# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
+# define CHV_HZ_8X8_MODE_IN_1X (1 << 15)
+# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1 << 3)
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
-#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
+#define DISABLE_PIXEL_MASK_CAMMING (1 << 14)
#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
#define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -7230,7 +7433,7 @@ enum {
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
-#define GEN7_L3AGDIS (1<<19)
+#define GEN7_L3AGDIS (1 << 19)
#define GEN7_L3CNTLREG2 _MMIO(0xB020)
#define GEN7_L3CNTLREG3 _MMIO(0xB024)
@@ -7240,7 +7443,7 @@ enum {
#define GEN11_I2M_WRITE_DISABLE (1 << 28)
#define GEN7_L3SQCREG4 _MMIO(0xb034)
-#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27)
#define GEN8_L3SQCREG4 _MMIO(0xb118)
#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
@@ -7251,12 +7454,12 @@ enum {
#define HDC_CHICKEN0 _MMIO(0x7300)
#define CNL_HDC_CHICKEN0 _MMIO(0xE5F0)
#define ICL_HDC_MODE _MMIO(0xE5F4)
-#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
-#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
-#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
-#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5)
-#define HDC_FORCE_NON_COHERENT (1<<4)
-#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
+#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1 << 15)
+#define HDC_FENCE_DEST_SLM_DISABLE (1 << 14)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1 << 11)
+#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1 << 5)
+#define HDC_FORCE_NON_COHERENT (1 << 4)
+#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
@@ -7269,13 +7472,21 @@ enum {
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
-#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1 << 11)
#define HSW_SCRATCH1 _MMIO(0xb038)
-#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
+#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1 << 27)
#define BDW_SCRATCH1 _MMIO(0xb11c)
-#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
+#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
+
+/*GEN11 chicken */
+#define _PIPEA_CHICKEN 0x70038
+#define _PIPEB_CHICKEN 0x71038
+#define _PIPEC_CHICKEN 0x72038
+#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
+#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+ _PIPEB_CHICKEN)
/* PCH */
@@ -7320,7 +7531,7 @@ enum {
#define SDE_TRANSA_FIFO_UNDER (1 << 0)
#define SDE_TRANS_MASK (0x3f)
-/* south display engine interrupt: CPT/PPT */
+/* south display engine interrupt: CPT - CNP */
#define SDE_AUDIO_POWER_D_CPT (1 << 31)
#define SDE_AUDIO_POWER_C_CPT (1 << 30)
#define SDE_AUDIO_POWER_B_CPT (1 << 29)
@@ -7368,14 +7579,29 @@ enum {
SDE_FDI_RXB_CPT | \
SDE_FDI_RXA_CPT)
+/* south display engine interrupt: ICP */
+#define SDE_TC4_HOTPLUG_ICP (1 << 27)
+#define SDE_TC3_HOTPLUG_ICP (1 << 26)
+#define SDE_TC2_HOTPLUG_ICP (1 << 25)
+#define SDE_TC1_HOTPLUG_ICP (1 << 24)
+#define SDE_GMBUS_ICP (1 << 23)
+#define SDE_DDIB_HOTPLUG_ICP (1 << 17)
+#define SDE_DDIA_HOTPLUG_ICP (1 << 16)
+#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \
+ SDE_DDIA_HOTPLUG_ICP)
+#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \
+ SDE_TC3_HOTPLUG_ICP | \
+ SDE_TC2_HOTPLUG_ICP | \
+ SDE_TC1_HOTPLUG_ICP)
+
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
#define SDEIIR _MMIO(0xc4008)
#define SDEIER _MMIO(0xc400c)
#define SERR_INT _MMIO(0xc4040)
-#define SERR_INT_POISON (1<<31)
-#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
+#define SERR_INT_POISON (1 << 31)
+#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
/* digital port hotplug */
#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
@@ -7428,6 +7654,134 @@ enum {
#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
+/* This register is a reuse of PCH_PORT_HOTPLUG register. The
+ * functionality covered in PCH_PORT_HOTPLUG is split into
+ * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
+ */
+
+#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define ICP_DDIB_HPD_ENABLE (1 << 7)
+#define ICP_DDIB_HPD_STATUS_MASK (3 << 4)
+#define ICP_DDIB_HPD_NO_DETECT (0 << 4)
+#define ICP_DDIB_HPD_SHORT_DETECT (1 << 4)
+#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
+#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
+#define ICP_DDIA_HPD_ENABLE (1 << 3)
+#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
+#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
+#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
+#define ICP_DDIA_HPD_LONG_DETECT (2 << 0)
+#define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0)
+
+#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
+#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4)
+/* Icelake DSC Rate Control Range Parameter Registers */
+#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
+#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
+#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define RC_BPG_OFFSET_SHIFT 10
+#define RC_MAX_QP_SHIFT 5
+#define RC_MIN_QP_SHIFT 0
+
+#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
+#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
+#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
+#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
+#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
+#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
+#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
+
+#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
+#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
+
#define PCH_GPIOA _MMIO(0xc5010)
#define PCH_GPIOB _MMIO(0xc5014)
#define PCH_GPIOC _MMIO(0xc5018)
@@ -7444,46 +7798,46 @@ enum {
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pll) _MMIO(pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
-#define FP_CB_TUNE (0x3<<22)
+#define FP_CB_TUNE (0x3 << 22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
-#define PCH_FP0(pll) _MMIO(pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pll) _MMIO(pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST _MMIO(0xc606c)
#define PCH_DREF_CONTROL _MMIO(0xC6200)
#define DREF_CONTROL_MASK 0x7fc3
-#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
-#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
-#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13)
-#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
-#define DREF_SSC_SOURCE_DISABLE (0<<11)
-#define DREF_SSC_SOURCE_ENABLE (2<<11)
-#define DREF_SSC_SOURCE_MASK (3<<11)
-#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
-#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
-#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
-#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
-#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
-#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
-#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7)
-#define DREF_SSC4_DOWNSPREAD (0<<6)
-#define DREF_SSC4_CENTERSPREAD (1<<6)
-#define DREF_SSC1_DISABLE (0<<1)
-#define DREF_SSC1_ENABLE (1<<1)
+#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_MASK (3 << 13)
+#define DREF_SSC_SOURCE_DISABLE (0 << 11)
+#define DREF_SSC_SOURCE_ENABLE (2 << 11)
+#define DREF_SSC_SOURCE_MASK (3 << 11)
+#define DREF_NONSPREAD_SOURCE_DISABLE (0 << 9)
+#define DREF_NONSPREAD_CK505_ENABLE (1 << 9)
+#define DREF_NONSPREAD_SOURCE_ENABLE (2 << 9)
+#define DREF_NONSPREAD_SOURCE_MASK (3 << 9)
+#define DREF_SUPERSPREAD_SOURCE_DISABLE (0 << 7)
+#define DREF_SUPERSPREAD_SOURCE_ENABLE (2 << 7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3 << 7)
+#define DREF_SSC4_DOWNSPREAD (0 << 6)
+#define DREF_SSC4_CENTERSPREAD (1 << 6)
+#define DREF_SSC1_DISABLE (0 << 1)
+#define DREF_SSC1_ENABLE (1 << 1)
#define DREF_SSC4_DISABLE (0)
#define DREF_SSC4_ENABLE (1)
#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
#define FDL_TP1_TIMER_SHIFT 12
-#define FDL_TP1_TIMER_MASK (3<<12)
+#define FDL_TP1_TIMER_MASK (3 << 12)
#define FDL_TP2_TIMER_SHIFT 10
-#define FDL_TP2_TIMER_MASK (3<<10)
+#define FDL_TP2_TIMER_MASK (3 << 10)
#define RAWCLK_FREQ_MASK 0x3ff
#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
#define CNP_RAWCLK_DIV(div) ((div) << 16)
@@ -7520,7 +7874,7 @@ enum {
#define TRANS_VBLANK_END_SHIFT 16
#define TRANS_VBLANK_START_SHIFT 0
#define _PCH_TRANS_VSYNC_A 0xe0014
-#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
@@ -7600,15 +7954,28 @@ enum {
#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
#define _HSW_VIDEO_DIP_GCP_B 0x61210
+/* Icelake PPS_DATA and _ECC DIP Registers.
+ * These are available for transcoders B,C and eDP.
+ * Adding the _A so as to reuse the _MMIO_TRANS2
+ * definition, with which it offsets to the right location.
+ */
+
+#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
+#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
+#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
+#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
+
#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
#define _HSW_STEREO_3D_CTL_A 0x70020
-#define S3D_ENABLE (1<<31)
+#define S3D_ENABLE (1 << 31)
#define _HSW_STEREO_3D_CTL_B 0x71020
#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
@@ -7651,156 +8018,156 @@ enum {
#define _PCH_TRANSBCONF 0xf1008
#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
-#define TRANS_DISABLE (0<<31)
-#define TRANS_ENABLE (1<<31)
-#define TRANS_STATE_MASK (1<<30)
-#define TRANS_STATE_DISABLE (0<<30)
-#define TRANS_STATE_ENABLE (1<<30)
-#define TRANS_FSYNC_DELAY_HB1 (0<<27)
-#define TRANS_FSYNC_DELAY_HB2 (1<<27)
-#define TRANS_FSYNC_DELAY_HB3 (2<<27)
-#define TRANS_FSYNC_DELAY_HB4 (3<<27)
-#define TRANS_INTERLACE_MASK (7<<21)
-#define TRANS_PROGRESSIVE (0<<21)
-#define TRANS_INTERLACED (3<<21)
-#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
-#define TRANS_8BPC (0<<5)
-#define TRANS_10BPC (1<<5)
-#define TRANS_6BPC (2<<5)
-#define TRANS_12BPC (3<<5)
+#define TRANS_DISABLE (0 << 31)
+#define TRANS_ENABLE (1 << 31)
+#define TRANS_STATE_MASK (1 << 30)
+#define TRANS_STATE_DISABLE (0 << 30)
+#define TRANS_STATE_ENABLE (1 << 30)
+#define TRANS_FSYNC_DELAY_HB1 (0 << 27)
+#define TRANS_FSYNC_DELAY_HB2 (1 << 27)
+#define TRANS_FSYNC_DELAY_HB3 (2 << 27)
+#define TRANS_FSYNC_DELAY_HB4 (3 << 27)
+#define TRANS_INTERLACE_MASK (7 << 21)
+#define TRANS_PROGRESSIVE (0 << 21)
+#define TRANS_INTERLACED (3 << 21)
+#define TRANS_LEGACY_INTERLACED_ILK (2 << 21)
+#define TRANS_8BPC (0 << 5)
+#define TRANS_10BPC (1 << 5)
+#define TRANS_6BPC (2 << 5)
+#define TRANS_12BPC (3 << 5)
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
-#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10)
-#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
+#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1 << 10)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1 << 4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
-#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
-#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27)
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26)
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE (1 << 31)
+#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1 << 29)
+#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3 << 27)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1 << 26)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1 << 25)
#define SOUTH_CHICKEN1 _MMIO(0xc2000)
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
-#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
-#define SPT_PWM_GRANULARITY (1<<0)
+#define SPT_PWM_GRANULARITY (1 << 0)
#define SOUTH_CHICKEN2 _MMIO(0xc2004)
-#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
-#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
-#define LPT_PWM_GRANULARITY (1<<5)
-#define DPLS_EDP_PPS_FIX_DIS (1<<0)
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1 << 13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1 << 12)
+#define LPT_PWM_GRANULARITY (1 << 5)
+#define DPLS_EDP_PPS_FIX_DIS (1 << 0)
#define _FDI_RXA_CHICKEN 0xc200c
#define _FDI_RXB_CHICKEN 0xc2010
-#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
-#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
+#define FDI_RX_PHASE_SYNC_POINTER_OVR (1 << 1)
+#define FDI_RX_PHASE_SYNC_POINTER_EN (1 << 0)
#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
-#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1<<31)
-#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
-#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
-#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
-#define CNP_PWM_CGE_GATING_DISABLE (1<<13)
-#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
+#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31)
+#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30)
+#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29)
+#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
+#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
/* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100
#define _FDI_TXB_CTL 0x61100
#define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
-#define FDI_TX_DISABLE (0<<31)
-#define FDI_TX_ENABLE (1<<31)
-#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
-#define FDI_LINK_TRAIN_PATTERN_2 (1<<28)
-#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28)
-#define FDI_LINK_TRAIN_NONE (3<<28)
-#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+#define FDI_TX_DISABLE (0 << 31)
+#define FDI_TX_ENABLE (1 << 31)
+#define FDI_LINK_TRAIN_PATTERN_1 (0 << 28)
+#define FDI_LINK_TRAIN_PATTERN_2 (1 << 28)
+#define FDI_LINK_TRAIN_PATTERN_IDLE (2 << 28)
+#define FDI_LINK_TRAIN_NONE (3 << 28)
+#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3 << 25)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3 << 22)
/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
SNB has different settings. */
/* SNB A-stepping */
-#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
-#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
-#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
-#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
/* SNB B-stepping */
-#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
-#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
-#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
-#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
-#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0 << 22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a << 22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39 << 22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38 << 22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f << 22)
#define FDI_DP_PORT_WIDTH_SHIFT 19
#define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT)
#define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
-#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
+#define FDI_TX_ENHANCE_FRAME_ENABLE (1 << 18)
/* Ironlake: hardwired to 1 */
-#define FDI_TX_PLL_ENABLE (1<<14)
+#define FDI_TX_PLL_ENABLE (1 << 14)
/* Ivybridge has different bits for lolz */
-#define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8)
-#define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8)
-#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8)
-#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
+#define FDI_LINK_TRAIN_PATTERN_1_IVB (0 << 8)
+#define FDI_LINK_TRAIN_PATTERN_2_IVB (1 << 8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2 << 8)
+#define FDI_LINK_TRAIN_NONE_IVB (3 << 8)
/* both Tx and Rx */
-#define FDI_COMPOSITE_SYNC (1<<11)
-#define FDI_LINK_TRAIN_AUTO (1<<10)
-#define FDI_SCRAMBLING_ENABLE (0<<7)
-#define FDI_SCRAMBLING_DISABLE (1<<7)
+#define FDI_COMPOSITE_SYNC (1 << 11)
+#define FDI_LINK_TRAIN_AUTO (1 << 10)
+#define FDI_SCRAMBLING_ENABLE (0 << 7)
+#define FDI_SCRAMBLING_DISABLE (1 << 7)
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
#define _FDI_RXA_CTL 0xf000c
#define _FDI_RXB_CTL 0xf100c
#define FDI_RX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
-#define FDI_RX_ENABLE (1<<31)
+#define FDI_RX_ENABLE (1 << 31)
/* train, dp width same as FDI_TX */
-#define FDI_FS_ERRC_ENABLE (1<<27)
-#define FDI_FE_ERRC_ENABLE (1<<26)
-#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
-#define FDI_8BPC (0<<16)
-#define FDI_10BPC (1<<16)
-#define FDI_6BPC (2<<16)
-#define FDI_12BPC (3<<16)
-#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
-#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
-#define FDI_RX_PLL_ENABLE (1<<13)
-#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
-#define FDI_FE_ERR_CORRECT_ENABLE (1<<10)
-#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
-#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
-#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
-#define FDI_PCDCLK (1<<4)
+#define FDI_FS_ERRC_ENABLE (1 << 27)
+#define FDI_FE_ERRC_ENABLE (1 << 26)
+#define FDI_RX_POLARITY_REVERSED_LPT (1 << 16)
+#define FDI_8BPC (0 << 16)
+#define FDI_10BPC (1 << 16)
+#define FDI_6BPC (2 << 16)
+#define FDI_12BPC (3 << 16)
+#define FDI_RX_LINK_REVERSAL_OVERRIDE (1 << 15)
+#define FDI_DMI_LINK_REVERSE_MASK (1 << 14)
+#define FDI_RX_PLL_ENABLE (1 << 13)
+#define FDI_FS_ERR_CORRECT_ENABLE (1 << 11)
+#define FDI_FE_ERR_CORRECT_ENABLE (1 << 10)
+#define FDI_FS_ERR_REPORT_ENABLE (1 << 9)
+#define FDI_FE_ERR_REPORT_ENABLE (1 << 8)
+#define FDI_RX_ENHANCE_FRAME_ENABLE (1 << 6)
+#define FDI_PCDCLK (1 << 4)
/* CPT */
-#define FDI_AUTO_TRAINING (1<<10)
-#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
-#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
-#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
-#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
-#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
+#define FDI_AUTO_TRAINING (1 << 10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0 << 8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1 << 8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2 << 8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3 << 8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3 << 8)
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
-#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
-#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
-#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
-#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
-#define FDI_RX_TP1_TO_TP2_48 (2<<20)
-#define FDI_RX_TP1_TO_TP2_64 (3<<20)
-#define FDI_RX_FDI_DELAY_90 (0x90<<0)
+#define FDI_RX_PWRDN_LANE1_MASK (3 << 26)
+#define FDI_RX_PWRDN_LANE1_VAL(x) ((x) << 26)
+#define FDI_RX_PWRDN_LANE0_MASK (3 << 24)
+#define FDI_RX_PWRDN_LANE0_VAL(x) ((x) << 24)
+#define FDI_RX_TP1_TO_TP2_48 (2 << 20)
+#define FDI_RX_TP1_TO_TP2_64 (3 << 20)
+#define FDI_RX_FDI_DELAY_90 (0x90 << 0)
#define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define _FDI_RXA_TUSIZE1 0xf0030
@@ -7811,17 +8178,17 @@ enum {
#define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
/* FDI_RX interrupt register format */
-#define FDI_RX_INTER_LANE_ALIGN (1<<10)
-#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */
-#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */
-#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7)
-#define FDI_RX_FS_CODE_ERR (1<<6)
-#define FDI_RX_FE_CODE_ERR (1<<5)
-#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4)
-#define FDI_RX_HDCP_LINK_FAIL (1<<3)
-#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2)
-#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
-#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
+#define FDI_RX_INTER_LANE_ALIGN (1 << 10)
+#define FDI_RX_SYMBOL_LOCK (1 << 9) /* train 2 */
+#define FDI_RX_BIT_LOCK (1 << 8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL (1 << 7)
+#define FDI_RX_FS_CODE_ERR (1 << 6)
+#define FDI_RX_FE_CODE_ERR (1 << 5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1 << 4)
+#define FDI_RX_HDCP_LINK_FAIL (1 << 3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW (1 << 2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW (1 << 1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1 << 0)
#define _FDI_RXA_IIR 0xf0014
#define _FDI_RXA_IMR 0xf0018
@@ -7867,71 +8234,58 @@ enum {
#define PCH_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
/* CPT */
-#define PORT_TRANS_A_SEL_CPT 0
-#define PORT_TRANS_B_SEL_CPT (1<<29)
-#define PORT_TRANS_C_SEL_CPT (2<<29)
-#define PORT_TRANS_SEL_MASK (3<<29)
-#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
-#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
-#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
-#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
-#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
-
#define _TRANS_DP_CTL_A 0xe0300
#define _TRANS_DP_CTL_B 0xe1300
#define _TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
-#define TRANS_DP_OUTPUT_ENABLE (1<<31)
-#define TRANS_DP_PORT_SEL_B (0<<29)
-#define TRANS_DP_PORT_SEL_C (1<<29)
-#define TRANS_DP_PORT_SEL_D (2<<29)
-#define TRANS_DP_PORT_SEL_NONE (3<<29)
-#define TRANS_DP_PORT_SEL_MASK (3<<29)
-#define TRANS_DP_PIPE_TO_PORT(val) ((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B)
-#define TRANS_DP_AUDIO_ONLY (1<<26)
-#define TRANS_DP_ENH_FRAMING (1<<18)
-#define TRANS_DP_8BPC (0<<9)
-#define TRANS_DP_10BPC (1<<9)
-#define TRANS_DP_6BPC (2<<9)
-#define TRANS_DP_12BPC (3<<9)
-#define TRANS_DP_BPC_MASK (3<<9)
-#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
+#define TRANS_DP_OUTPUT_ENABLE (1 << 31)
+#define TRANS_DP_PORT_SEL_MASK (3 << 29)
+#define TRANS_DP_PORT_SEL_NONE (3 << 29)
+#define TRANS_DP_PORT_SEL(port) (((port) - PORT_B) << 29)
+#define TRANS_DP_AUDIO_ONLY (1 << 26)
+#define TRANS_DP_ENH_FRAMING (1 << 18)
+#define TRANS_DP_8BPC (0 << 9)
+#define TRANS_DP_10BPC (1 << 9)
+#define TRANS_DP_6BPC (2 << 9)
+#define TRANS_DP_12BPC (3 << 9)
+#define TRANS_DP_BPC_MASK (3 << 9)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1 << 4)
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
-#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1 << 3)
#define TRANS_DP_HSYNC_ACTIVE_LOW 0
-#define TRANS_DP_SYNC_MASK (3<<3)
+#define TRANS_DP_SYNC_MASK (3 << 3)
/* SNB eDP training params */
/* SNB A-stepping */
-#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
-#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
-#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
/* SNB B-stepping */
-#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
-#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
-#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
-#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1 << 22)
+#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a << 22)
+#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39 << 22)
+#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38 << 22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f << 22)
/* IVB */
-#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
-#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
-#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
-#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f << 22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 << 22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e << 22)
/* legacy values */
-#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
-#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
-#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
-#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
-#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 << 22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 << 22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 << 22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 << 22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 << 22)
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f << 22)
#define VLV_PMWGICZ _MMIO(0x1300a4)
@@ -7978,7 +8332,7 @@ enum {
#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
#define FORCEWAKE_MT_ACK _MMIO(0x130040)
#define ECOBUS _MMIO(0xa180)
-#define FORCEWAKE_MT_ENABLE (1<<5)
+#define FORCEWAKE_MT_ENABLE (1 << 5)
#define VLV_SPAREG2H _MMIO(0xA194)
#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xA2A0)
#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
@@ -7987,13 +8341,13 @@ enum {
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
-#define GT_FIFO_SBDROPERR (1<<6)
-#define GT_FIFO_BLOBDROPERR (1<<5)
-#define GT_FIFO_SB_READ_ABORTERR (1<<4)
-#define GT_FIFO_DROPERR (1<<3)
-#define GT_FIFO_OVFERR (1<<2)
-#define GT_FIFO_IAWRERR (1<<1)
-#define GT_FIFO_IARDERR (1<<0)
+#define GT_FIFO_SBDROPERR (1 << 6)
+#define GT_FIFO_BLOBDROPERR (1 << 5)
+#define GT_FIFO_SB_READ_ABORTERR (1 << 4)
+#define GT_FIFO_DROPERR (1 << 3)
+#define GT_FIFO_OVFERR (1 << 2)
+#define GT_FIFO_IAWRERR (1 << 1)
+#define GT_FIFO_IARDERR (1 << 0)
#define GTFIFOCTL _MMIO(0x120008)
#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
@@ -8027,37 +8381,37 @@ enum {
# define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20)
#define GEN7_UCGCTL4 _MMIO(0x940c)
-#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
-#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14)
+#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1 << 25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1 << 14)
#define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414)
#define GEN6_RSTCTL _MMIO(0x9420)
#define GEN8_UCGCTL6 _MMIO(0x9430)
-#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
-#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
-#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
+#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1 << 24)
+#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
#define GEN6_GFXPAUSE _MMIO(0xA000)
#define GEN6_RPNSWREQ _MMIO(0xA008)
-#define GEN6_TURBO_DISABLE (1<<31)
-#define GEN6_FREQUENCY(x) ((x)<<25)
-#define HSW_FREQUENCY(x) ((x)<<24)
-#define GEN9_FREQUENCY(x) ((x)<<23)
-#define GEN6_OFFSET(x) ((x)<<19)
-#define GEN6_AGGRESSIVE_TURBO (0<<15)
+#define GEN6_TURBO_DISABLE (1 << 31)
+#define GEN6_FREQUENCY(x) ((x) << 25)
+#define HSW_FREQUENCY(x) ((x) << 24)
+#define GEN9_FREQUENCY(x) ((x) << 23)
+#define GEN6_OFFSET(x) ((x) << 19)
+#define GEN6_AGGRESSIVE_TURBO (0 << 15)
#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
#define GEN6_RC_CONTROL _MMIO(0xA090)
-#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
-#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
-#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
-#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
-#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
-#define VLV_RC_CTL_CTX_RST_PARALLEL (1<<24)
-#define GEN7_RC_CTL_TO_MODE (1<<28)
-#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
-#define GEN6_RC_CTL_HW_ENABLE (1<<31)
+#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16)
+#define GEN6_RC_CTL_RC6p_ENABLE (1 << 17)
+#define GEN6_RC_CTL_RC6_ENABLE (1 << 18)
+#define GEN6_RC_CTL_RC1e_ENABLE (1 << 20)
+#define GEN6_RC_CTL_RC7_ENABLE (1 << 22)
+#define VLV_RC_CTL_CTX_RST_PARALLEL (1 << 24)
+#define GEN7_RC_CTL_TO_MODE (1 << 28)
+#define GEN6_RC_CTL_EI_MODE(x) ((x) << 27)
+#define GEN6_RC_CTL_HW_ENABLE (1 << 31)
#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xA010)
#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xA014)
#define GEN6_RPSTAT1 _MMIO(0xA01C)
@@ -8068,19 +8422,19 @@ enum {
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
#define GEN6_RP_CONTROL _MMIO(0xA024)
-#define GEN6_RP_MEDIA_TURBO (1<<11)
-#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
-#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
-#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
-#define GEN6_RP_MEDIA_HW_MODE (1<<9)
-#define GEN6_RP_MEDIA_SW_MODE (0<<9)
-#define GEN6_RP_MEDIA_IS_GFX (1<<8)
-#define GEN6_RP_ENABLE (1<<7)
-#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
-#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
-#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
-#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
-#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
+#define GEN6_RP_MEDIA_TURBO (1 << 11)
+#define GEN6_RP_MEDIA_MODE_MASK (3 << 9)
+#define GEN6_RP_MEDIA_HW_TURBO_MODE (3 << 9)
+#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2 << 9)
+#define GEN6_RP_MEDIA_HW_MODE (1 << 9)
+#define GEN6_RP_MEDIA_SW_MODE (0 << 9)
+#define GEN6_RP_MEDIA_IS_GFX (1 << 8)
+#define GEN6_RP_ENABLE (1 << 7)
+#define GEN6_RP_UP_IDLE_MIN (0x1 << 3)
+#define GEN6_RP_UP_BUSY_AVG (0x2 << 3)
+#define GEN6_RP_UP_BUSY_CONT (0x4 << 3)
+#define GEN6_RP_DOWN_IDLE_AVG (0x2 << 0)
+#define GEN6_RP_DOWN_IDLE_CONT (0x1 << 0)
#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C)
#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030)
#define GEN6_RP_CUR_UP_EI _MMIO(0xA050)
@@ -8116,15 +8470,15 @@ enum {
#define VLV_RCEDATA _MMIO(0xA0BC)
#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
#define GEN6_PMINTRMSK _MMIO(0xA168)
-#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1<<31)
-#define ARAT_EXPIRED_INTRMSK (1<<9)
+#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1 << 31)
+#define ARAT_EXPIRED_INTRMSK (1 << 9)
#define GEN8_MISC_CTRL0 _MMIO(0xA180)
#define VLV_PWRDWNUPCTL _MMIO(0xA294)
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
#define GEN9_PG_ENABLE _MMIO(0xA210)
-#define GEN9_RENDER_PG_ENABLE (1<<0)
-#define GEN9_MEDIA_PG_ENABLE (1<<1)
+#define GEN9_RENDER_PG_ENABLE (1 << 0)
+#define GEN9_MEDIA_PG_ENABLE (1 << 1)
#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
@@ -8137,13 +8491,13 @@ enum {
#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */
#define GEN6_PMIIR _MMIO(0x44028)
#define GEN6_PMIER _MMIO(0x4402C)
-#define GEN6_PM_MBOX_EVENT (1<<25)
-#define GEN6_PM_THERMAL_EVENT (1<<24)
-#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
-#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
-#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
-#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
-#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
+#define GEN6_PM_MBOX_EVENT (1 << 25)
+#define GEN6_PM_THERMAL_EVENT (1 << 24)
+#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
+#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1)
#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
@@ -8152,16 +8506,16 @@ enum {
#define GEN7_GT_SCRATCH_REG_NUM 8
#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098)
-#define VLV_GFX_CLK_STATUS_BIT (1<<3)
-#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
+#define VLV_GFX_CLK_STATUS_BIT (1 << 3)
+#define VLV_GFX_CLK_FORCE_ON_BIT (1 << 2)
#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104)
#define VLV_COUNTER_CONTROL _MMIO(0x138104)
-#define VLV_COUNT_RANGE_HIGH (1<<15)
-#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
-#define VLV_RENDER_RC0_COUNT_EN (1<<4)
-#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
-#define VLV_RENDER_RC6_COUNT_EN (1<<0)
+#define VLV_COUNT_RANGE_HIGH (1 << 15)
+#define VLV_MEDIA_RC0_COUNT_EN (1 << 5)
+#define VLV_RENDER_RC0_COUNT_EN (1 << 4)
+#define VLV_MEDIA_RC6_COUNT_EN (1 << 1)
+#define VLV_RENDER_RC6_COUNT_EN (1 << 0)
#define GEN6_GT_GFX_RC6 _MMIO(0x138108)
#define VLV_GT_RENDER_RC6 _MMIO(0x138108)
#define VLV_GT_MEDIA_RC6 _MMIO(0x13810C)
@@ -8172,7 +8526,7 @@ enum {
#define VLV_MEDIA_C0_COUNT _MMIO(0x13811C)
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
-#define GEN6_PCODE_READY (1<<31)
+#define GEN6_PCODE_READY (1 << 31)
#define GEN6_PCODE_ERROR_MASK 0xFF
#define GEN6_PCODE_SUCCESS 0x0
#define GEN6_PCODE_ILLEGAL_CMD 0x1
@@ -8216,7 +8570,7 @@ enum {
#define GEN6_PCODE_DATA1 _MMIO(0x13812C)
#define GEN6_GT_CORE_STATUS _MMIO(0x138060)
-#define GEN6_CORE_CPD_STATE_MASK (7<<4)
+#define GEN6_CORE_CPD_STATE_MASK (7 << 4)
#define GEN6_RCn_MASK 7
#define GEN6_RC0 0
#define GEN6_RC3 2
@@ -8228,26 +8582,26 @@ enum {
#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
-#define CHV_SS_PG_ENABLE (1<<1)
-#define CHV_EU08_PG_ENABLE (1<<9)
-#define CHV_EU19_PG_ENABLE (1<<17)
-#define CHV_EU210_PG_ENABLE (1<<25)
+#define CHV_SS_PG_ENABLE (1 << 1)
+#define CHV_EU08_PG_ENABLE (1 << 9)
+#define CHV_EU19_PG_ENABLE (1 << 17)
+#define CHV_EU210_PG_ENABLE (1 << 25)
#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
-#define CHV_EU311_PG_ENABLE (1<<1)
+#define CHV_EU311_PG_ENABLE (1 << 1)
-#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice)*0x4)
+#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4)
#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
((slice) % 3) * 0x4)
#define GEN9_PGCTL_SLICE_ACK (1 << 0)
-#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2))
+#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2))
#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
-#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice)*0x8)
+#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8)
#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
((slice) % 3) * 0x8)
-#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice)*0x8)
+#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8)
#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
((slice) % 3) * 0x8)
#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
@@ -8260,10 +8614,10 @@ enum {
#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
#define GEN7_MISCCPCTL _MMIO(0x9424)
-#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
-#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2)
-#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4)
-#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6)
+#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0)
+#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2)
+#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4)
+#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6)
#define GEN8_GARBCNTL _MMIO(0xB004)
#define GEN9_GAPS_TSV_CREDIT_DISABLE (1 << 7)
@@ -8292,61 +8646,62 @@ enum {
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
-#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
-#define GEN7_PARITY_ERROR_VALID (1<<13)
-#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
-#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
+#define GEN7_L3CDERRST1_ROW_MASK (0x7ff << 14)
+#define GEN7_PARITY_ERROR_VALID (1 << 13)
+#define GEN7_L3CDERRST1_BANK_MASK (3 << 11)
+#define GEN7_L3CDERRST1_SUBBANK_MASK (7 << 8)
#define GEN7_PARITY_ERROR_ROW(reg) \
- ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
+ (((reg) & GEN7_L3CDERRST1_ROW_MASK) >> 14)
#define GEN7_PARITY_ERROR_BANK(reg) \
- ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
+ (((reg) & GEN7_L3CDERRST1_BANK_MASK) >> 11)
#define GEN7_PARITY_ERROR_SUBBANK(reg) \
- ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
-#define GEN7_L3CDERRST1_ENABLE (1<<7)
+ (((reg) & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
+#define GEN7_L3CDERRST1_ENABLE (1 << 7)
#define GEN7_L3LOG(slice, i) _MMIO(0xB070 + (slice) * 0x200 + (i) * 4)
#define GEN7_L3LOG_SIZE 0x80
#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100)
-#define GEN7_MAX_PS_THREAD_DEP (8<<12)
-#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
-#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4)
-#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+#define GEN7_MAX_PS_THREAD_DEP (8 << 12)
+#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1 << 10)
+#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1 << 4)
+#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1 << 3)
#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188)
-#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
-#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
+#define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5)
+#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3)
#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
-#define FLOW_CONTROL_ENABLE (1<<15)
-#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
-#define STALL_DOP_GATING_DISABLE (1<<5)
-#define THROTTLE_12_5 (7<<2)
-#define DISABLE_EARLY_EOT (1<<1)
+#define FLOW_CONTROL_ENABLE (1 << 15)
+#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1 << 8)
+#define STALL_DOP_GATING_DISABLE (1 << 5)
+#define THROTTLE_12_5 (7 << 2)
+#define DISABLE_EARLY_EOT (1 << 1)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
-#define DOP_CLOCK_GATING_DISABLE (1<<0)
-#define PUSH_CONSTANT_DEREF_DISABLE (1<<8)
+#define DOP_CLOCK_GATING_DISABLE (1 << 0)
+#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
+#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
#define HALF_SLICE_CHICKEN2 _MMIO(0xe180)
-#define GEN8_ST_PO_DISABLE (1<<13)
+#define GEN8_ST_PO_DISABLE (1 << 13)
#define HALF_SLICE_CHICKEN3 _MMIO(0xe184)
-#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
-#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
-#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
-#define CNL_FAST_ANISO_L1_BANKING_FIX (1<<4)
-#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
+#define HSW_SAMPLE_C_PERFORMANCE (1 << 9)
+#define GEN8_CENTROID_PIXEL_OPT_DIS (1 << 8)
+#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1 << 5)
+#define CNL_FAST_ANISO_L1_BANKING_FIX (1 << 4)
+#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1)
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
-#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1<<8)
-#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
-#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
+#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1 << 8)
+#define GEN9_ENABLE_YV12_BUGFIX (1 << 4)
+#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
/* Audio */
#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
@@ -8478,6 +8833,14 @@ enum {
#define _HSW_PWR_WELL_CTL3 0x45408
#define _HSW_PWR_WELL_CTL4 0x4540C
+#define _ICL_PWR_WELL_CTL_AUX1 0x45440
+#define _ICL_PWR_WELL_CTL_AUX2 0x45444
+#define _ICL_PWR_WELL_CTL_AUX4 0x4544C
+
+#define _ICL_PWR_WELL_CTL_DDI1 0x45450
+#define _ICL_PWR_WELL_CTL_DDI2 0x45454
+#define _ICL_PWR_WELL_CTL_DDI4 0x4545C
+
/*
* Each power well control register contains up to 16 (request, status) HW
* flag tuples. The register index and HW flag shift is determined by the
@@ -8487,21 +8850,27 @@ enum {
*/
#define _HSW_PW_REG_IDX(pw) ((pw) >> 4)
#define _HSW_PW_SHIFT(pw) (((pw) & 0xf) * 2)
-/* TODO: Add all PWR_WELL_CTL registers below for new platforms */
#define HSW_PWR_WELL_CTL_BIOS(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
- _HSW_PWR_WELL_CTL1))
+ _HSW_PWR_WELL_CTL1, \
+ _ICL_PWR_WELL_CTL_AUX1, \
+ _ICL_PWR_WELL_CTL_DDI1))
#define HSW_PWR_WELL_CTL_DRIVER(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
- _HSW_PWR_WELL_CTL2))
+ _HSW_PWR_WELL_CTL2, \
+ _ICL_PWR_WELL_CTL_AUX2, \
+ _ICL_PWR_WELL_CTL_DDI2))
+/* KVMR doesn't have a reg for AUX or DDI power well control */
#define HSW_PWR_WELL_CTL_KVMR _MMIO(_HSW_PWR_WELL_CTL3)
#define HSW_PWR_WELL_CTL_DEBUG(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
- _HSW_PWR_WELL_CTL4))
+ _HSW_PWR_WELL_CTL4, \
+ _ICL_PWR_WELL_CTL_AUX4, \
+ _ICL_PWR_WELL_CTL_DDI4))
#define HSW_PWR_WELL_CTL_REQ(pw) (1 << (_HSW_PW_SHIFT(pw) + 1))
#define HSW_PWR_WELL_CTL_STATE(pw) (1 << _HSW_PW_SHIFT(pw))
#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
-#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
-#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
-#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20)
+#define HSW_PWR_WELL_FORCE_ON (1 << 19)
#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
/* SKL Fuse Status */
@@ -8512,9 +8881,11 @@ enum skl_power_gate {
};
#define SKL_FUSE_STATUS _MMIO(0x42000)
-#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
+#define SKL_FUSE_DOWNLOAD_STATUS (1 << 31)
/* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */
#define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1)
+/* PG0 (HW control->no power well ID), PG1..PG4 (ICL_DISP_PW1..ICL_DISP_PW4) */
+#define ICL_PW_TO_PG(pw) ((pw) - ICL_DISP_PW_1 + SKL_PG1)
#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
#define _CNL_AUX_REG_IDX(pw) ((pw) - 9)
@@ -8527,8 +8898,8 @@ enum skl_power_gate {
_CNL_AUX_ANAOVRD1_C, \
_CNL_AUX_ANAOVRD1_D, \
_CNL_AUX_ANAOVRD1_F))
-#define CNL_AUX_ANAOVRD1_ENABLE (1<<16)
-#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1<<23)
+#define CNL_AUX_ANAOVRD1_ENABLE (1 << 16)
+#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23)
/* HDCP Key Registers */
#define HDCP_KEY_CONF _MMIO(0x66c00)
@@ -8573,7 +8944,7 @@ enum skl_power_gate {
#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C)
#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10)
#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14)
-#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + h * 4))
+#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4))
#define HDCP_SHA_TEXT _MMIO(0x66d18)
/* HDCP Auth Registers */
@@ -8589,7 +8960,7 @@ enum skl_power_gate {
_PORTC_HDCP_AUTHENC, \
_PORTD_HDCP_AUTHENC, \
_PORTE_HDCP_AUTHENC, \
- _PORTF_HDCP_AUTHENC) + x)
+ _PORTF_HDCP_AUTHENC) + (x))
#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
#define HDCP_CONF_CAPTURE_AN BIT(0)
#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0))
@@ -8610,7 +8981,7 @@ enum skl_power_gate {
#define HDCP_STATUS_R0_READY BIT(18)
#define HDCP_STATUS_AN_READY BIT(17)
#define HDCP_STATUS_CIPHER BIT(16)
-#define HDCP_STATUS_FRAME_CNT(x) ((x >> 8) & 0xff)
+#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
@@ -8619,37 +8990,37 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
-#define TRANS_DDI_FUNC_ENABLE (1<<31)
+#define TRANS_DDI_FUNC_ENABLE (1 << 31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_PORT_MASK (7 << 28)
#define TRANS_DDI_PORT_SHIFT 28
-#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
-#define TRANS_DDI_PORT_NONE (0<<28)
-#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
-#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
-#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
-#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
-#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
-#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
-#define TRANS_DDI_BPC_MASK (7<<20)
-#define TRANS_DDI_BPC_8 (0<<20)
-#define TRANS_DDI_BPC_10 (1<<20)
-#define TRANS_DDI_BPC_6 (2<<20)
-#define TRANS_DDI_BPC_12 (3<<20)
-#define TRANS_DDI_PVSYNC (1<<17)
-#define TRANS_DDI_PHSYNC (1<<16)
-#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
-#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
-#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
-#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
-#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
-#define TRANS_DDI_HDCP_SIGNALLING (1<<9)
-#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
-#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7)
-#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6)
-#define TRANS_DDI_BFI_ENABLE (1<<4)
-#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1<<4)
-#define TRANS_DDI_HDMI_SCRAMBLING (1<<0)
+#define TRANS_DDI_SELECT_PORT(x) ((x) << 28)
+#define TRANS_DDI_PORT_NONE (0 << 28)
+#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
+#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24)
+#define TRANS_DDI_MODE_SELECT_FDI (4 << 24)
+#define TRANS_DDI_BPC_MASK (7 << 20)
+#define TRANS_DDI_BPC_8 (0 << 20)
+#define TRANS_DDI_BPC_10 (1 << 20)
+#define TRANS_DDI_BPC_6 (2 << 20)
+#define TRANS_DDI_BPC_12 (3 << 20)
+#define TRANS_DDI_PVSYNC (1 << 17)
+#define TRANS_DDI_PHSYNC (1 << 16)
+#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
+#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
+#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
+#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define TRANS_DDI_BFI_ENABLE (1 << 4)
+#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
+#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
| TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
| TRANS_DDI_HDMI_SCRAMBLING)
@@ -8658,28 +9029,29 @@ enum skl_power_gate {
#define _DP_TP_CTL_A 0x64040
#define _DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
-#define DP_TP_CTL_ENABLE (1<<31)
-#define DP_TP_CTL_MODE_SST (0<<27)
-#define DP_TP_CTL_MODE_MST (1<<27)
-#define DP_TP_CTL_FORCE_ACT (1<<25)
-#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
-#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
-#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
-#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
-#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
-#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
-#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
-#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
-#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
+#define DP_TP_CTL_ENABLE (1 << 31)
+#define DP_TP_CTL_MODE_SST (0 << 27)
+#define DP_TP_CTL_MODE_MST (1 << 27)
+#define DP_TP_CTL_FORCE_ACT (1 << 25)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1 << 18)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1 << 15)
+#define DP_TP_CTL_LINK_TRAIN_MASK (7 << 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 (0 << 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 (1 << 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 (4 << 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT4 (5 << 8)
+#define DP_TP_CTL_LINK_TRAIN_IDLE (2 << 8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3 << 8)
+#define DP_TP_CTL_SCRAMBLE_DISABLE (1 << 7)
/* DisplayPort Transport Status */
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
-#define DP_TP_STATUS_IDLE_DONE (1<<25)
-#define DP_TP_STATUS_ACT_SENT (1<<24)
-#define DP_TP_STATUS_MODE_STATUS_MST (1<<23)
-#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+#define DP_TP_STATUS_IDLE_DONE (1 << 25)
+#define DP_TP_STATUS_ACT_SENT (1 << 24)
+#define DP_TP_STATUS_MODE_STATUS_MST (1 << 23)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1 << 12)
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8)
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4)
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
@@ -8688,16 +9060,16 @@ enum skl_power_gate {
#define _DDI_BUF_CTL_A 0x64000
#define _DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
-#define DDI_BUF_CTL_ENABLE (1<<31)
+#define DDI_BUF_CTL_ENABLE (1 << 31)
#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
-#define DDI_BUF_EMP_MASK (0xf<<24)
-#define DDI_BUF_PORT_REVERSAL (1<<16)
-#define DDI_BUF_IS_IDLE (1<<7)
-#define DDI_A_4_LANES (1<<4)
+#define DDI_BUF_EMP_MASK (0xf << 24)
+#define DDI_BUF_PORT_REVERSAL (1 << 16)
+#define DDI_BUF_IS_IDLE (1 << 7)
+#define DDI_A_4_LANES (1 << 4)
#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
#define DDI_PORT_WIDTH_MASK (7 << 1)
#define DDI_PORT_WIDTH_SHIFT 1
-#define DDI_INIT_DISPLAY_DETECTED (1<<0)
+#define DDI_INIT_DISPLAY_DETECTED (1 << 0)
/* DDI Buffer Translations */
#define _DDI_BUF_TRANS_A 0x64E00
@@ -8712,95 +9084,99 @@ enum skl_power_gate {
#define SBI_ADDR _MMIO(0xC6000)
#define SBI_DATA _MMIO(0xC6004)
#define SBI_CTL_STAT _MMIO(0xC6008)
-#define SBI_CTL_DEST_ICLK (0x0<<16)
-#define SBI_CTL_DEST_MPHY (0x1<<16)
-#define SBI_CTL_OP_IORD (0x2<<8)
-#define SBI_CTL_OP_IOWR (0x3<<8)
-#define SBI_CTL_OP_CRRD (0x6<<8)
-#define SBI_CTL_OP_CRWR (0x7<<8)
-#define SBI_RESPONSE_FAIL (0x1<<1)
-#define SBI_RESPONSE_SUCCESS (0x0<<1)
-#define SBI_BUSY (0x1<<0)
-#define SBI_READY (0x0<<0)
+#define SBI_CTL_DEST_ICLK (0x0 << 16)
+#define SBI_CTL_DEST_MPHY (0x1 << 16)
+#define SBI_CTL_OP_IORD (0x2 << 8)
+#define SBI_CTL_OP_IOWR (0x3 << 8)
+#define SBI_CTL_OP_CRRD (0x6 << 8)
+#define SBI_CTL_OP_CRWR (0x7 << 8)
+#define SBI_RESPONSE_FAIL (0x1 << 1)
+#define SBI_RESPONSE_SUCCESS (0x0 << 1)
+#define SBI_BUSY (0x1 << 0)
+#define SBI_READY (0x0 << 0)
/* SBI offsets */
#define SBI_SSCDIVINTPHASE 0x0200
#define SBI_SSCDIVINTPHASE6 0x0600
#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1
-#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f<<1)
-#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f << 1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x) << 1)
#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8
-#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f<<8)
-#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
-#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
-#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f << 8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x) << 8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x) << 15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1 << 0)
#define SBI_SSCDITHPHASE 0x0204
#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
-#define SBI_SSCCTL_PATHALT (1<<3)
-#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCCTL_PATHALT (1 << 3)
+#define SBI_SSCCTL_DISABLE (1 << 0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4
-#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1<<4)
-#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
+#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1 << 4)
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x) << 4)
#define SBI_DBUFF0 0x2a00
#define SBI_GEN0 0x1f00
-#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
+#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1 << 0)
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE _MMIO(0xC6020)
-#define PIXCLK_GATE_UNGATE (1<<0)
-#define PIXCLK_GATE_GATE (0<<0)
+#define PIXCLK_GATE_UNGATE (1 << 0)
+#define PIXCLK_GATE_GATE (0 << 0)
/* SPLL */
#define SPLL_CTL _MMIO(0x46020)
-#define SPLL_PLL_ENABLE (1<<31)
-#define SPLL_PLL_SSC (1<<28)
-#define SPLL_PLL_NON_SSC (2<<28)
-#define SPLL_PLL_LCPLL (3<<28)
-#define SPLL_PLL_REF_MASK (3<<28)
-#define SPLL_PLL_FREQ_810MHz (0<<26)
-#define SPLL_PLL_FREQ_1350MHz (1<<26)
-#define SPLL_PLL_FREQ_2700MHz (2<<26)
-#define SPLL_PLL_FREQ_MASK (3<<26)
+#define SPLL_PLL_ENABLE (1 << 31)
+#define SPLL_PLL_SSC (1 << 28)
+#define SPLL_PLL_NON_SSC (2 << 28)
+#define SPLL_PLL_LCPLL (3 << 28)
+#define SPLL_PLL_REF_MASK (3 << 28)
+#define SPLL_PLL_FREQ_810MHz (0 << 26)
+#define SPLL_PLL_FREQ_1350MHz (1 << 26)
+#define SPLL_PLL_FREQ_2700MHz (2 << 26)
+#define SPLL_PLL_FREQ_MASK (3 << 26)
/* WRPLL */
#define _WRPLL_CTL1 0x46040
#define _WRPLL_CTL2 0x46060
#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
-#define WRPLL_PLL_ENABLE (1<<31)
-#define WRPLL_PLL_SSC (1<<28)
-#define WRPLL_PLL_NON_SSC (2<<28)
-#define WRPLL_PLL_LCPLL (3<<28)
-#define WRPLL_PLL_REF_MASK (3<<28)
+#define WRPLL_PLL_ENABLE (1 << 31)
+#define WRPLL_PLL_SSC (1 << 28)
+#define WRPLL_PLL_NON_SSC (2 << 28)
+#define WRPLL_PLL_LCPLL (3 << 28)
+#define WRPLL_PLL_REF_MASK (3 << 28)
/* WRPLL divider programming */
-#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0)
#define WRPLL_DIVIDER_REF_MASK (0xff)
-#define WRPLL_DIVIDER_POST(x) ((x)<<8)
-#define WRPLL_DIVIDER_POST_MASK (0x3f<<8)
+#define WRPLL_DIVIDER_POST(x) ((x) << 8)
+#define WRPLL_DIVIDER_POST_MASK (0x3f << 8)
#define WRPLL_DIVIDER_POST_SHIFT 8
-#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x) << 16)
#define WRPLL_DIVIDER_FB_SHIFT 16
-#define WRPLL_DIVIDER_FB_MASK (0xff<<16)
+#define WRPLL_DIVIDER_FB_MASK (0xff << 16)
/* Port clock selection */
#define _PORT_CLK_SEL_A 0x46100
#define _PORT_CLK_SEL_B 0x46104
#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
-#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
-#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
-#define PORT_CLK_SEL_LCPLL_810 (2<<29)
-#define PORT_CLK_SEL_SPLL (3<<29)
-#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
-#define PORT_CLK_SEL_WRPLL1 (4<<29)
-#define PORT_CLK_SEL_WRPLL2 (5<<29)
-#define PORT_CLK_SEL_NONE (7<<29)
-#define PORT_CLK_SEL_MASK (7<<29)
+#define PORT_CLK_SEL_LCPLL_2700 (0 << 29)
+#define PORT_CLK_SEL_LCPLL_1350 (1 << 29)
+#define PORT_CLK_SEL_LCPLL_810 (2 << 29)
+#define PORT_CLK_SEL_SPLL (3 << 29)
+#define PORT_CLK_SEL_WRPLL(pll) (((pll) + 4) << 29)
+#define PORT_CLK_SEL_WRPLL1 (4 << 29)
+#define PORT_CLK_SEL_WRPLL2 (5 << 29)
+#define PORT_CLK_SEL_NONE (7 << 29)
+#define PORT_CLK_SEL_MASK (7 << 29)
/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
#define DDI_CLK_SEL_NONE (0x0 << 28)
#define DDI_CLK_SEL_MG (0x8 << 28)
+#define DDI_CLK_SEL_TBT_162 (0xC << 28)
+#define DDI_CLK_SEL_TBT_270 (0xD << 28)
+#define DDI_CLK_SEL_TBT_540 (0xE << 28)
+#define DDI_CLK_SEL_TBT_810 (0xF << 28)
#define DDI_CLK_SEL_MASK (0xF << 28)
/* Transcoder clock selection */
@@ -8808,8 +9184,8 @@ enum skl_power_gate {
#define _TRANS_CLK_SEL_B 0x46144
#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
/* For each transcoder, we need to select the corresponding port clock */
-#define TRANS_CLK_SEL_DISABLED (0x0<<29)
-#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
+#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
+#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
#define CDCLK_FREQ _MMIO(0x46200)
@@ -8819,28 +9195,29 @@ enum skl_power_gate {
#define _TRANS_EDP_MSA_MISC 0x6f410
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
-#define TRANS_MSA_SYNC_CLK (1<<0)
-#define TRANS_MSA_6_BPC (0<<5)
-#define TRANS_MSA_8_BPC (1<<5)
-#define TRANS_MSA_10_BPC (2<<5)
-#define TRANS_MSA_12_BPC (3<<5)
-#define TRANS_MSA_16_BPC (4<<5)
+#define TRANS_MSA_SYNC_CLK (1 << 0)
+#define TRANS_MSA_6_BPC (0 << 5)
+#define TRANS_MSA_8_BPC (1 << 5)
+#define TRANS_MSA_10_BPC (2 << 5)
+#define TRANS_MSA_12_BPC (3 << 5)
+#define TRANS_MSA_16_BPC (4 << 5)
+#define TRANS_MSA_CEA_RANGE (1 << 3)
/* LCPLL Control */
#define LCPLL_CTL _MMIO(0x130040)
-#define LCPLL_PLL_DISABLE (1<<31)
-#define LCPLL_PLL_LOCK (1<<30)
-#define LCPLL_CLK_FREQ_MASK (3<<26)
-#define LCPLL_CLK_FREQ_450 (0<<26)
-#define LCPLL_CLK_FREQ_54O_BDW (1<<26)
-#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
-#define LCPLL_CLK_FREQ_675_BDW (3<<26)
-#define LCPLL_CD_CLOCK_DISABLE (1<<25)
-#define LCPLL_ROOT_CD_CLOCK_DISABLE (1<<24)
-#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
-#define LCPLL_POWER_DOWN_ALLOW (1<<22)
-#define LCPLL_CD_SOURCE_FCLK (1<<21)
-#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
+#define LCPLL_PLL_DISABLE (1 << 31)
+#define LCPLL_PLL_LOCK (1 << 30)
+#define LCPLL_CLK_FREQ_MASK (3 << 26)
+#define LCPLL_CLK_FREQ_450 (0 << 26)
+#define LCPLL_CLK_FREQ_54O_BDW (1 << 26)
+#define LCPLL_CLK_FREQ_337_5_BDW (2 << 26)
+#define LCPLL_CLK_FREQ_675_BDW (3 << 26)
+#define LCPLL_CD_CLOCK_DISABLE (1 << 25)
+#define LCPLL_ROOT_CD_CLOCK_DISABLE (1 << 24)
+#define LCPLL_CD2X_CLOCK_DISABLE (1 << 23)
+#define LCPLL_POWER_DOWN_ALLOW (1 << 22)
+#define LCPLL_CD_SOURCE_FCLK (1 << 21)
+#define LCPLL_CD_SOURCE_FCLK_DONE (1 << 19)
/*
* SKL Clocks
@@ -8868,16 +9245,16 @@ enum skl_power_gate {
/* LCPLL_CTL */
#define LCPLL1_CTL _MMIO(0x46010)
#define LCPLL2_CTL _MMIO(0x46014)
-#define LCPLL_PLL_ENABLE (1<<31)
+#define LCPLL_PLL_ENABLE (1 << 31)
/* DPLL control1 */
#define DPLL_CTRL1 _MMIO(0x6C058)
-#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
-#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
-#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
-#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id)*6+1)
-#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
-#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
+#define DPLL_CTRL1_HDMI_MODE(id) (1 << ((id) * 6 + 5))
+#define DPLL_CTRL1_SSC(id) (1 << ((id) * 6 + 4))
+#define DPLL_CTRL1_LINK_RATE_MASK(id) (7 << ((id) * 6 + 1))
+#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id) * 6 + 1)
+#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate) << ((id) * 6 + 1))
+#define DPLL_CTRL1_OVERRIDE(id) (1 << ((id) * 6))
#define DPLL_CTRL1_LINK_RATE_2700 0
#define DPLL_CTRL1_LINK_RATE_1350 1
#define DPLL_CTRL1_LINK_RATE_810 2
@@ -8887,43 +9264,43 @@ enum skl_power_gate {
/* DPLL control2 */
#define DPLL_CTRL2 _MMIO(0x6C05C)
-#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15))
-#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
-#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
-#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk)<<((port)*3+1))
-#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
+#define DPLL_CTRL2_DDI_CLK_OFF(port) (1 << ((port) + 15))
+#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3 << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port) * 3 + 1)
+#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk) << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1 << ((port) * 3))
/* DPLL Status */
#define DPLL_STATUS _MMIO(0x6C060)
-#define DPLL_LOCK(id) (1<<((id)*8))
+#define DPLL_LOCK(id) (1 << ((id) * 8))
/* DPLL cfg */
#define _DPLL1_CFGCR1 0x6C040
#define _DPLL2_CFGCR1 0x6C048
#define _DPLL3_CFGCR1 0x6C050
-#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
-#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
-#define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9)
+#define DPLL_CFGCR1_FREQ_ENABLE (1 << 31)
+#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
+#define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9)
#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
#define _DPLL1_CFGCR2 0x6C044
#define _DPLL2_CFGCR2 0x6C04C
#define _DPLL3_CFGCR2 0x6C054
-#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
-#define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8)
-#define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7)
-#define DPLL_CFGCR2_KDIV_MASK (3<<5)
-#define DPLL_CFGCR2_KDIV(x) ((x)<<5)
-#define DPLL_CFGCR2_KDIV_5 (0<<5)
-#define DPLL_CFGCR2_KDIV_2 (1<<5)
-#define DPLL_CFGCR2_KDIV_3 (2<<5)
-#define DPLL_CFGCR2_KDIV_1 (3<<5)
-#define DPLL_CFGCR2_PDIV_MASK (7<<2)
-#define DPLL_CFGCR2_PDIV(x) ((x)<<2)
-#define DPLL_CFGCR2_PDIV_1 (0<<2)
-#define DPLL_CFGCR2_PDIV_2 (1<<2)
-#define DPLL_CFGCR2_PDIV_3 (2<<2)
-#define DPLL_CFGCR2_PDIV_7 (4<<2)
+#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8)
+#define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8)
+#define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7)
+#define DPLL_CFGCR2_KDIV_MASK (3 << 5)
+#define DPLL_CFGCR2_KDIV(x) ((x) << 5)
+#define DPLL_CFGCR2_KDIV_5 (0 << 5)
+#define DPLL_CFGCR2_KDIV_2 (1 << 5)
+#define DPLL_CFGCR2_KDIV_3 (2 << 5)
+#define DPLL_CFGCR2_KDIV_1 (3 << 5)
+#define DPLL_CFGCR2_PDIV_MASK (7 << 2)
+#define DPLL_CFGCR2_PDIV(x) ((x) << 2)
+#define DPLL_CFGCR2_PDIV_1 (0 << 2)
+#define DPLL_CFGCR2_PDIV_2 (1 << 2)
+#define DPLL_CFGCR2_PDIV_3 (2 << 2)
+#define DPLL_CFGCR2_PDIV_7 (4 << 2)
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
@@ -8935,9 +9312,9 @@ enum skl_power_gate {
#define DPCLKA_CFGCR0 _MMIO(0x6C200)
#define DPCLKA_CFGCR0_ICL _MMIO(0x164280)
#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \
- (port)+10))
+ (port) + 10))
#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
- (port)*2)
+ (port) * 2)
#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
#define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
@@ -8950,6 +9327,8 @@ enum skl_power_gate {
#define PLL_POWER_STATE (1 << 26)
#define CNL_DPLL_ENABLE(pll) _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE)
+#define TBT_PLL_ENABLE _MMIO(0x46020)
+
#define _MG_PLL1_ENABLE 0x46030
#define _MG_PLL2_ENABLE 0x46034
#define _MG_PLL3_ENABLE 0x46038
@@ -8963,6 +9342,7 @@ enum skl_power_gate {
#define _MG_REFCLKIN_CTL_PORT3 0x16A92C
#define _MG_REFCLKIN_CTL_PORT4 0x16B92C
#define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8)
+#define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8)
#define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \
_MG_REFCLKIN_CTL_PORT1, \
_MG_REFCLKIN_CTL_PORT2)
@@ -8972,7 +9352,9 @@ enum skl_power_gate {
#define _MG_CLKTOP2_CORECLKCTL1_PORT3 0x16A8D8
#define _MG_CLKTOP2_CORECLKCTL1_PORT4 0x16B8D8
#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16)
+#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16)
#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8)
+#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8)
#define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \
_MG_CLKTOP2_CORECLKCTL1_PORT1, \
_MG_CLKTOP2_CORECLKCTL1_PORT2)
@@ -8982,9 +9364,13 @@ enum skl_power_gate {
#define _MG_CLKTOP2_HSCLKCTL_PORT3 0x16A8D4
#define _MG_CLKTOP2_HSCLKCTL_PORT4 0x16B8D4
#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16)
+#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16)
#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14)
+#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14)
#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x) ((x) << 12)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12)
#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8)
+#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8)
#define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \
_MG_CLKTOP2_HSCLKCTL_PORT1, \
_MG_CLKTOP2_HSCLKCTL_PORT2)
@@ -9058,12 +9444,18 @@ enum skl_power_gate {
#define _MG_PLL_BIAS_PORT3 0x16AA14
#define _MG_PLL_BIAS_PORT4 0x16BA14
#define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30)
+#define MG_PLL_BIAS_BIAS_GB_SEL_MASK (0x3 << 30)
#define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24)
+#define MG_PLL_BIAS_INIT_DCOAMP_MASK (0x3f << 24)
#define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16)
+#define MG_PLL_BIAS_BIAS_BONUS_MASK (0xff << 16)
#define MG_PLL_BIAS_BIASCAL_EN (1 << 15)
#define MG_PLL_BIAS_CTRIM(x) ((x) << 8)
+#define MG_PLL_BIAS_CTRIM_MASK (0x1f << 8)
#define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5)
+#define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5)
#define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0)
+#define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0)
#define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \
_MG_PLL_BIAS_PORT2)
@@ -9105,13 +9497,16 @@ enum skl_power_gate {
#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
+#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
+#define DPLL_CFGCR1_KDIV_SHIFT (6)
#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
#define DPLL_CFGCR1_KDIV_1 (1 << 6)
#define DPLL_CFGCR1_KDIV_2 (2 << 6)
#define DPLL_CFGCR1_KDIV_4 (4 << 6)
#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
+#define DPLL_CFGCR1_PDIV_SHIFT (2)
#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
#define DPLL_CFGCR1_PDIV_2 (1 << 2)
#define DPLL_CFGCR1_PDIV_3 (2 << 2)
@@ -9145,22 +9540,22 @@ enum skl_power_gate {
/* GEN9 DC */
#define DC_STATE_EN _MMIO(0x45504)
#define DC_STATE_DISABLE 0
-#define DC_STATE_EN_UPTO_DC5 (1<<0)
-#define DC_STATE_EN_DC9 (1<<3)
-#define DC_STATE_EN_UPTO_DC6 (2<<0)
+#define DC_STATE_EN_UPTO_DC5 (1 << 0)
+#define DC_STATE_EN_DC9 (1 << 3)
+#define DC_STATE_EN_UPTO_DC6 (2 << 0)
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
#define DC_STATE_DEBUG _MMIO(0x45520)
-#define DC_STATE_DEBUG_MASK_CORES (1<<0)
-#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
+#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
+#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW _MMIO(0x138144)
-#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
-#define D_COMP_COMP_FORCE (1<<8)
-#define D_COMP_COMP_DISABLE (1<<0)
+#define D_COMP_RCOMP_IN_PROGRESS (1 << 9)
+#define D_COMP_COMP_FORCE (1 << 8)
+#define D_COMP_COMP_DISABLE (1 << 0)
/* Pipe WM_LINETIME - watermark line time */
#define _PIPE_WM_LINETIME_A 0x45270
@@ -9168,27 +9563,27 @@ enum skl_power_gate {
#define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B)
#define PIPE_WM_LINETIME_MASK (0x1ff)
#define PIPE_WM_LINETIME_TIME(x) ((x))
-#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
-#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff << 16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x) << 16)
/* SFUSE_STRAP */
#define SFUSE_STRAP _MMIO(0xc2014)
-#define SFUSE_STRAP_FUSE_LOCK (1<<13)
-#define SFUSE_STRAP_RAW_FREQUENCY (1<<8)
-#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
-#define SFUSE_STRAP_CRT_DISABLED (1<<6)
-#define SFUSE_STRAP_DDIF_DETECTED (1<<3)
-#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
-#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
-#define SFUSE_STRAP_DDID_DETECTED (1<<0)
+#define SFUSE_STRAP_FUSE_LOCK (1 << 13)
+#define SFUSE_STRAP_RAW_FREQUENCY (1 << 8)
+#define SFUSE_STRAP_DISPLAY_DISABLED (1 << 7)
+#define SFUSE_STRAP_CRT_DISABLED (1 << 6)
+#define SFUSE_STRAP_DDIF_DETECTED (1 << 3)
+#define SFUSE_STRAP_DDIB_DETECTED (1 << 2)
+#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
+#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
#define WM_MISC _MMIO(0x45260)
#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
#define WM_DBG _MMIO(0x45280)
-#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
-#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
-#define WM_DBG_DISALLOW_SPRITE (1<<2)
+#define WM_DBG_DISALLOW_MULTIPLE_LP (1 << 0)
+#define WM_DBG_DISALLOW_MAXFIFO (1 << 1)
+#define WM_DBG_DISALLOW_SPRITE (1 << 2)
/* pipe CSC */
#define _PIPE_A_CSC_COEFF_RY_GY 0x49010
@@ -9314,6 +9709,22 @@ enum skl_power_gate {
#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF
+#define _ICL_DSI_ESC_CLK_DIV0 0x6b090
+#define _ICL_DSI_ESC_CLK_DIV1 0x6b890
+#define ICL_DSI_ESC_CLK_DIV(port) _MMIO_PORT((port), \
+ _ICL_DSI_ESC_CLK_DIV0, \
+ _ICL_DSI_ESC_CLK_DIV1)
+#define _ICL_DPHY_ESC_CLK_DIV0 0x162190
+#define _ICL_DPHY_ESC_CLK_DIV1 0x6C190
+#define ICL_DPHY_ESC_CLK_DIV(port) _MMIO_PORT((port), \
+ _ICL_DPHY_ESC_CLK_DIV0, \
+ _ICL_DPHY_ESC_CLK_DIV1)
+#define ICL_BYTE_CLK_PER_ESC_CLK_MASK (0x1f << 16)
+#define ICL_BYTE_CLK_PER_ESC_CLK_SHIFT 16
+#define ICL_ESC_CLK_DIV_MASK 0x1ff
+#define ICL_ESC_CLK_DIV_SHIFT 0
+#define DSI_MAX_ESC_CLK 20000 /* in KHz */
+
/* Gen4+ Timestamp and Pipe Frame time stamp registers */
#define GEN4_TIMESTAMP _MMIO(0x2358)
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
@@ -9351,7 +9762,7 @@ enum skl_power_gate {
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
#define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \
- ((val & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
+ (((val) & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
/* RX upper control divider to select actual RX clock output from 8x */
#define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21
#define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5
@@ -9364,7 +9775,7 @@ enum skl_power_gate {
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \
BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK)
#define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \
- ((val & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
+ (((val) & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
/* 8/3X divider to select the actual 8/3X clock output from 8x */
#define BXT_MIPI1_8X_BY3_SHIFT 19
#define BXT_MIPI2_8X_BY3_SHIFT 3
@@ -9377,7 +9788,7 @@ enum skl_power_gate {
_MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \
BXT_MIPI2_8X_BY3_DIVIDER_MASK)
#define BXT_MIPI_8X_BY3_DIVIDER(port, val) \
- ((val & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
+ (((val) & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
/* RX lower control divider to select actual RX clock output from 8x */
#define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16
#define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0
@@ -9390,7 +9801,7 @@ enum skl_power_gate {
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \
BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK)
#define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \
- ((val & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
+ (((val) & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
#define RX_DIVIDER_BIT_1_2 0x3
#define RX_DIVIDER_BIT_3_4 0xC
@@ -9448,6 +9859,14 @@ enum skl_power_gate {
#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
+/* ICL DSI MODE control */
+#define _ICL_DSI_IO_MODECTL_0 0x6B094
+#define _ICL_DSI_IO_MODECTL_1 0x6B894
+#define ICL_DSI_IO_MODECTL(port) _MMIO_PORT(port, \
+ _ICL_DSI_IO_MODECTL_0, \
+ _ICL_DSI_IO_MODECTL_1)
+#define COMBO_PHY_MODE_DSI (1 << 0)
+
#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
#define STAP_SELECT (1 << 0)
@@ -9927,4 +10346,310 @@ enum skl_power_gate {
_ICL_PHY_MISC_B)
#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
+/* Icelake Display Stream Compression Registers */
+#define DSCA_PICTURE_PARAMETER_SET_0 0x6B200
+#define DSCC_PICTURE_PARAMETER_SET_0 0x6BA00
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
+#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define DSC_VBR_ENABLE (1 << 19)
+#define DSC_422_ENABLE (1 << 18)
+#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
+#define DSC_BLOCK_PREDICTION (1 << 16)
+#define DSC_LINE_BUF_DEPTH_SHIFT 12
+#define DSC_BPC_SHIFT 8
+#define DSC_VER_MIN_SHIFT 4
+#define DSC_VER_MAJ (0x1 << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_1 0x6B204
+#define DSCC_PICTURE_PARAMETER_SET_1 0x6BA04
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574
+#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
+#define DSC_BPP(bpp) ((bpp) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_2 0x6B208
+#define DSCC_PICTURE_PARAMETER_SET_2 0x6BA08
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578
+#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC)
+#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16)
+#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_3 0x6B20C
+#define DSCC_PICTURE_PARAMETER_SET_3 0x6BA0C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC)
+#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16)
+#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_4 0x6B210
+#define DSCC_PICTURE_PARAMETER_SET_4 0x6BA10
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580
+#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
+#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
+#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_5 0x6B214
+#define DSCC_PICTURE_PARAMETER_SET_5 0x6BA14
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584
+#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
+#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16)
+#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_6 0x6B218
+#define DSCC_PICTURE_PARAMETER_SET_6 0x6BA18
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588
+#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
+#define DSC_FLATNESS_MAX_QP(max_qp) (qp << 24)
+#define DSC_FLATNESS_MIN_QP(min_qp) (qp << 16)
+#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8)
+#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_7 0x6B21C
+#define DSCC_PICTURE_PARAMETER_SET_7 0x6BA1C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC)
+#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16)
+#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_8 0x6B220
+#define DSCC_PICTURE_PARAMETER_SET_8 0x6BA20
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590
+#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC)
+#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16)
+#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_9 0x6B224
+#define DSCC_PICTURE_PARAMETER_SET_9 0x6BA24
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594
+#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC)
+#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16)
+#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_10 0x6B228
+#define DSCC_PICTURE_PARAMETER_SET_10 0x6BA28
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598
+#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC)
+#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20)
+#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16)
+#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8)
+#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_11 0x6B22C
+#define DSCC_PICTURE_PARAMETER_SET_11 0x6BA2C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_12 0x6B260
+#define DSCC_PICTURE_PARAMETER_SET_12 0x6BA60
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_13 0x6B264
+#define DSCC_PICTURE_PARAMETER_SET_13 0x6BA64
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4
+#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_14 0x6B268
+#define DSCC_PICTURE_PARAMETER_SET_14 0x6BA68
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8
+#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_15 0x6B26C
+#define DSCC_PICTURE_PARAMETER_SET_15 0x6BA6C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC
+#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_16 0x6B270
+#define DSCC_PICTURE_PARAMETER_SET_16 0x6BA70
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
+#define DSC_SLICE_CHUNK_SIZE(slice_chunk_aize) (slice_chunk_size << 0)
+
+/* Icelake Rate Control Buffer Threshold Registers */
+#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
+#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
+#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30)
+#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4)
+#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PC)
+#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC)
+
+#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238)
+#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4)
+#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38)
+#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4)
+#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PC)
+#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8928894dd9c7..5c2c93cbab12 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -206,7 +206,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
/* Carefully retire all requests without writing to the rings */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -320,6 +321,7 @@ static void advance_ring(struct i915_request *request)
* is just about to be. Either works, if we miss the last two
* noops - they are safe to be replayed on a reset.
*/
+ GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
tail = READ_ONCE(request->tail);
list_del(&ring->active_link);
} else {
@@ -383,8 +385,8 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
* the subsequent request.
*/
if (engine->last_retired_context)
- intel_context_unpin(engine->last_retired_context, engine);
- engine->last_retired_context = rq->ctx;
+ intel_context_unpin(engine->last_retired_context);
+ engine->last_retired_context = rq->hw_context;
}
static void __retire_engine_upto(struct intel_engine_cs *engine,
@@ -455,8 +457,8 @@ static void i915_request_retire(struct i915_request *request)
i915_request_remove_from_client(request);
/* Retirement decays the ban score as it is a sign of ctx progress */
- atomic_dec_if_positive(&request->ctx->ban_score);
- intel_context_unpin(request->ctx, request->engine);
+ atomic_dec_if_positive(&request->gem_context->ban_score);
+ intel_context_unpin(request->hw_context);
__retire_engine_upto(request->engine, request);
@@ -502,7 +504,7 @@ static void move_to_timeline(struct i915_request *request,
GEM_BUG_ON(request->timeline == &request->engine->timeline);
lockdep_assert_held(&request->engine->timeline.lock);
- spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING);
+ spin_lock(&request->timeline->lock);
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
}
@@ -657,7 +659,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_request *rq;
- struct intel_ring *ring;
+ struct intel_context *ce;
int ret;
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -681,22 +683,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ring = intel_context_pin(ctx, engine);
- if (IS_ERR(ring))
- return ERR_CAST(ring);
- GEM_BUG_ON(!ring);
+ ce = intel_context_pin(ctx, engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
ret = reserve_gt(i915);
if (ret)
goto err_unpin;
- ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
+ ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
if (ret)
goto err_unreserve;
/* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
- if (!list_is_last(&rq->ring_link, &ring->request_list) &&
+ rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
+ if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
i915_request_completed(rq))
i915_request_retire(rq);
@@ -735,7 +736,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
/* Ratelimit ourselves to prevent oom from malicious clients */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED |
- I915_WAIT_INTERRUPTIBLE);
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto err_unreserve;
@@ -760,9 +762,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
INIT_LIST_HEAD(&rq->active_list);
rq->i915 = i915;
rq->engine = engine;
- rq->ctx = ctx;
- rq->ring = ring;
- rq->timeline = ring->timeline;
+ rq->gem_context = ctx;
+ rq->hw_context = ce;
+ rq->ring = ce->ring;
+ rq->timeline = ce->ring->timeline;
GEM_BUG_ON(rq->timeline == &engine->timeline);
spin_lock_init(&rq->lock);
@@ -814,14 +817,16 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
goto err_unwind;
/* Keep a second pin for the dual retirement along engine and ring */
- __intel_context_pin(rq->ctx, engine);
+ __intel_context_pin(ce);
+
+ rq->infix = rq->ring->emit; /* end of header; start of user payload */
/* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
return rq;
err_unwind:
- rq->ring->emit = rq->head;
+ ce->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */
GEM_BUG_ON(!list_empty(&rq->active_list));
@@ -832,7 +837,7 @@ err_unwind:
err_unreserve:
unreserve_gt(i915);
err_unpin:
- intel_context_unpin(ctx, engine);
+ intel_context_unpin(ce);
return ERR_PTR(ret);
}
@@ -1010,19 +1015,39 @@ i915_request_await_object(struct i915_request *to,
return ret;
}
+void i915_request_skip(struct i915_request *rq, int error)
+{
+ void *vaddr = rq->ring->vaddr;
+ u32 head;
+
+ GEM_BUG_ON(!IS_ERR_VALUE((long)error));
+ dma_fence_set_error(&rq->fence, error);
+
+ /*
+ * As this request likely depends on state from the lost
+ * context, clear out all the user operations leaving the
+ * breadcrumb at the end (so we get the fence notifications).
+ */
+ head = rq->infix;
+ if (rq->postfix < head) {
+ memset(vaddr + head, 0, rq->ring->size - head);
+ head = 0;
+ }
+ memset(vaddr + head, 0, rq->postfix - head);
+}
+
/*
* NB: This function is not allowed to fail. Doing so would mean the the
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
-void __i915_request_add(struct i915_request *request, bool flush_caches)
+void i915_request_add(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- struct intel_ring *ring = request->ring;
struct i915_timeline *timeline = request->timeline;
+ struct intel_ring *ring = request->ring;
struct i915_request *prev;
u32 *cs;
- int err;
GEM_TRACE("%s fence %llx:%d\n",
engine->name, request->fence.context, request->fence.seqno);
@@ -1043,20 +1068,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
* know that it is time to use that space up.
*/
request->reserved_space = 0;
-
- /*
- * Emit any outstanding flushes - execbuf can fail to emit the flush
- * after having emitted the batchbuffer command. Hence we need to fix
- * things up similar to emitting the lazy request. The difference here
- * is that the flush _must_ happen before the next request, no matter
- * what.
- */
- if (flush_caches) {
- err = engine->emit_flush(request, EMIT_FLUSH);
-
- /* Not allowed to fail! */
- WARN(err, "engine->emit_flush() failed: %d!\n", err);
- }
+ engine->emit_flush(request, EMIT_FLUSH);
/*
* Record the position of the start of the breadcrumb so that
@@ -1095,8 +1107,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
i915_gem_active_set(&timeline->last_request, request);
list_add_tail(&request->ring_link, &ring->request_list);
- if (list_is_first(&request->ring_link, &ring->request_list))
+ if (list_is_first(&request->ring_link, &ring->request_list)) {
+ GEM_TRACE("marking %s as active\n", ring->timeline->name);
list_add(&ring->active_link, &request->i915->gt.active_rings);
+ }
request->emitted_jiffies = jiffies;
/*
@@ -1113,7 +1127,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule)
- engine->schedule(request, &request->ctx->sched);
+ engine->schedule(request, &request->gem_context->sched);
rcu_read_unlock();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1205,7 +1219,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
* takes to sleep on a request, on the order of a microsecond.
*/
- irq = atomic_read(&engine->irq_count);
+ irq = READ_ONCE(engine->breadcrumbs.irq_count);
timeout_us += local_clock_us(&cpu);
do {
if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
@@ -1217,7 +1231,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
* assume we won't see one in the near future but require
* the engine->seqno_barrier() to fixup coherency.
*/
- if (atomic_read(&engine->irq_count) != irq)
+ if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
break;
if (signal_pending_state(state, current))
@@ -1294,7 +1308,7 @@ long i915_request_wait(struct i915_request *rq,
if (flags & I915_WAIT_LOCKED)
add_wait_queue(errq, &reset);
- intel_wait_init(&wait, rq);
+ intel_wait_init(&wait);
restart:
do {
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index eddbd4245cb3..e1c9365dfefb 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -93,8 +93,9 @@ struct i915_request {
* i915_request_free() will then decrement the refcount on the
* context.
*/
- struct i915_gem_context *ctx;
+ struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
+ struct intel_context *hw_context;
struct intel_ring *ring;
struct i915_timeline *timeline;
struct intel_signal_node signaling;
@@ -133,6 +134,9 @@ struct i915_request {
/** Position in the ring of the start of the request */
u32 head;
+ /** Position in the ring of the start of the user packets */
+ u32 infix;
+
/**
* Position in the ring of the start of the postfix.
* This is required to calculate the maximum available ring space
@@ -249,13 +253,13 @@ int i915_request_await_object(struct i915_request *to,
int i915_request_await_dma_fence(struct i915_request *rq,
struct dma_fence *fence);
-void __i915_request_add(struct i915_request *rq, bool flush_caches);
-#define i915_request_add(rq) \
- __i915_request_add(rq, false)
+void i915_request_add(struct i915_request *rq);
void __i915_request_submit(struct i915_request *request);
void i915_request_submit(struct i915_request *request);
+void i915_request_skip(struct i915_request *request, int error);
+
void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request);
@@ -266,6 +270,7 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
@@ -375,6 +380,7 @@ static inline void
init_request_active(struct i915_gem_active *active,
i915_gem_retire_fn retire)
{
+ RCU_INIT_POINTER(active->request, NULL);
INIT_LIST_HEAD(&active->link);
active->retire = retire ?: i915_gem_retire_noop;
}
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 9766e806dce6..a73472dd12fd 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -99,6 +99,6 @@ __printf(2, 3)
bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
#define igt_timeout(t, fmt, ...) \
- __igt_timeout((t), KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+ __igt_timeout((t), KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif /* !__I915_SELFTEST_H__ */
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index dc2a4632faa7..a2c2c3ab5fb0 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -37,6 +37,8 @@ struct i915_timeline {
u32 seqno;
spinlock_t lock;
+#define TIMELINE_CLIENT 0 /* default subclass */
+#define TIMELINE_ENGINE 1
/**
* List of breadcrumbs associated with GPU requests currently
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8cc3a256f29d..b50c6b829715 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -591,21 +591,26 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, sync_from)
- __field(u32, sync_to)
+ __field(u32, from_class)
+ __field(u32, from_instance)
+ __field(u32, to_class)
+ __field(u32, to_instance)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = from->i915->drm.primary->index;
- __entry->sync_from = from->engine->id;
- __entry->sync_to = to->engine->id;
+ __entry->from_class = from->engine->uabi_class;
+ __entry->from_instance = from->engine->instance;
+ __entry->to_class = to->engine->uabi_class;
+ __entry->to_instance = to->engine->instance;
__entry->seqno = from->global_seqno;
),
- TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+ TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
__entry->dev,
- __entry->sync_from, __entry->sync_to,
+ __entry->from_class, __entry->from_instance,
+ __entry->to_class, __entry->to_instance,
__entry->seqno)
);
@@ -616,24 +621,27 @@ TRACE_EVENT(i915_request_queue,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, flags)
),
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->flags)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->flags)
);
DECLARE_EVENT_CLASS(i915_request,
@@ -643,24 +651,27 @@ DECLARE_EVENT_CLASS(i915_request,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global)
),
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global = rq->global_seqno;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->global)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->global)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -686,8 +697,9 @@ TRACE_EVENT(i915_request_in,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global_seqno)
__field(u32, port)
@@ -696,8 +708,9 @@ TRACE_EVENT(i915_request_in,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global_seqno = rq->global_seqno;
@@ -705,10 +718,10 @@ TRACE_EVENT(i915_request_in,
__entry->port = port;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, prio=%u, global=%u, port=%u",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->prio, __entry->global_seqno,
- __entry->port)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->prio, __entry->global_seqno, __entry->port)
);
TRACE_EVENT(i915_request_out,
@@ -718,8 +731,9 @@ TRACE_EVENT(i915_request_out,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global_seqno)
__field(u32, completed)
@@ -727,17 +741,18 @@ TRACE_EVENT(i915_request_out,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global_seqno = rq->global_seqno;
__entry->completed = i915_request_completed(rq);
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, completed?=%u",
- __entry->dev, __entry->hw_id, __entry->ring,
- __entry->ctx, __entry->seqno,
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
__entry->global_seqno, __entry->completed)
);
@@ -771,21 +786,23 @@ TRACE_EVENT(intel_engine_notify,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, ring)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(bool, waiters)
),
TP_fast_assign(
__entry->dev = engine->i915->drm.primary->index;
- __entry->ring = engine->id;
+ __entry->class = engine->uabi_class;
+ __entry->instance = engine->instance;
__entry->seqno = intel_engine_get_seqno(engine);
__entry->waiters = waiters;
),
- TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u",
- __entry->dev, __entry->ring, __entry->seqno,
- __entry->waiters)
+ TP_printk("dev=%u, engine=%u:%u, seqno=%u, waiters=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->seqno, __entry->waiters)
);
DEFINE_EVENT(i915_request, i915_request_retire,
@@ -800,8 +817,9 @@ TRACE_EVENT(i915_request_wait_begin,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global)
__field(unsigned int, flags)
@@ -815,18 +833,20 @@ TRACE_EVENT(i915_request_wait_begin,
*/
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global = rq->global_seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->global,
- !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
+ __entry->flags)
);
DEFINE_EVENT(i915_request, i915_request_wait_end,
@@ -936,7 +956,7 @@ DECLARE_EVENT_CLASS(i915_context,
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
__entry->hw_id = ctx->hw_id;
- __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
+ __entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
@@ -953,36 +973,6 @@ DEFINE_EVENT(i915_context, i915_context_free,
TP_ARGS(ctx)
);
-/**
- * DOC: switch_mm tracepoint
- *
- * This tracepoint allows tracking of the mm switch, which is an important point
- * in the lifetime of the vm in the legacy submission path. This tracepoint is
- * called only if full ppgtt is enabled.
- */
-TRACE_EVENT(switch_mm,
- TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
-
- TP_ARGS(engine, to),
-
- TP_STRUCT__entry(
- __field(u32, ring)
- __field(struct i915_gem_context *, to)
- __field(struct i915_address_space *, vm)
- __field(u32, dev)
- ),
-
- TP_fast_assign(
- __entry->ring = engine->id;
- __entry->to = to;
- __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
- __entry->dev = engine->i915->drm.primary->index;
- ),
-
- TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
- __entry->dev, __entry->ring, __entry->to, __entry->vm)
-);
-
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 00165ad55fb3..395dd2511568 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -43,7 +43,7 @@
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
-#if GCC_VERSION >= 70000
+#if defined(GCC_VERSION) && GCC_VERSION >= 70000
#define add_overflows(A, B) \
__builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
#else
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 5fe9f3f39467..869cf4a3b6de 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
node->start + node->size,
node->size / 1024);
- ggtt->base.reserved -= node->size;
+ ggtt->vm.reserved -= node->size;
drm_mm_remove_node(node);
}
@@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
- ret = i915_gem_gtt_reserve(&ggtt->base, node,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, node,
size, start, I915_COLOR_UNEVICTABLE,
0);
if (!ret)
- ggtt->base.reserved += size;
+ ggtt->vm.reserved += size;
return ret;
}
@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
int intel_vgt_balloon(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned long ggtt_end = ggtt->base.total;
+ unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index bb8338450dc1..551acc390046 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -36,6 +36,12 @@ intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
}
+static inline bool
+intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+}
+
int intel_vgt_balloon(struct drm_i915_private *dev_priv);
void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 9324d476e0a7..11d834f94220 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -21,7 +21,7 @@
* IN THE SOFTWARE.
*
*/
-
+
#include "i915_vma.h"
#include "i915_drv.h"
@@ -30,18 +30,53 @@
#include <drm/drm_gem.h>
+#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
+
+#include <linux/stackdepot.h>
+
+static void vma_print_allocator(struct i915_vma *vma, const char *reason)
+{
+ unsigned long entries[12];
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = ARRAY_SIZE(entries),
+ };
+ char buf[512];
+
+ if (!vma->node.stack) {
+ DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
+ vma->node.start, vma->node.size, reason);
+ return;
+ }
+
+ depot_fetch_stack(vma->node.stack, &trace);
+ snprint_stack_trace(buf, sizeof(buf), &trace, 0);
+ DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
+ vma->node.start, vma->node.size, reason, buf);
+}
+
+#else
+
+static void vma_print_allocator(struct i915_vma *vma, const char *reason)
+{
+}
+
+#endif
+
+struct i915_vma_active {
+ struct i915_gem_active base;
+ struct i915_vma *vma;
+ struct rb_node node;
+ u64 timeline;
+};
+
static void
-i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
+__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
{
- const unsigned int idx = rq->engine->id;
- struct i915_vma *vma =
- container_of(active, struct i915_vma, last_read[idx]);
struct drm_i915_gem_object *obj = vma->obj;
- GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
-
- i915_vma_clear_active(vma, idx);
- if (i915_vma_is_active(vma))
+ GEM_BUG_ON(!i915_vma_is_active(vma));
+ if (--vma->active_count)
return;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
@@ -75,6 +110,21 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
}
}
+static void
+i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq)
+{
+ struct i915_vma_active *active =
+ container_of(base, typeof(*active), base);
+
+ __i915_vma_retire(active->vma, rq);
+}
+
+static void
+i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq)
+{
+ __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq);
+}
+
static struct i915_vma *
vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
@@ -82,19 +132,20 @@ vma_create(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
struct rb_node *rb, **p;
- int i;
/* The aliasing_ppgtt should never be used directly! */
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
- init_request_active(&vma->last_read[i], i915_vma_retire);
+ vma->active = RB_ROOT;
+
+ init_request_active(&vma->last_active, i915_vma_last_retire);
init_request_active(&vma->last_fence, NULL);
vma->vm = vm;
+ vma->ops = &vm->vma_ops;
vma->obj = obj;
vma->resv = obj->resv;
vma->size = obj->base.size;
@@ -109,7 +160,7 @@ vma_create(struct drm_i915_gem_object *obj,
obj->base.size >> PAGE_SHIFT));
vma->size = view->partial.size;
vma->size <<= PAGE_SHIFT;
- GEM_BUG_ON(vma->size >= obj->base.size);
+ GEM_BUG_ON(vma->size > obj->base.size);
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
@@ -280,7 +331,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
GEM_BUG_ON(!vma->pages);
trace_i915_vma_bind(vma, bind_flags);
- ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+ ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
@@ -345,7 +396,7 @@ void i915_vma_flush_writes(struct i915_vma *vma)
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
GEM_BUG_ON(vma->iomap == NULL);
@@ -365,6 +416,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
return;
obj = vma->obj;
+ GEM_BUG_ON(!obj);
i915_vma_unpin(vma);
i915_vma_close(vma);
@@ -459,6 +511,18 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
return true;
}
+static void assert_bind_count(const struct drm_i915_gem_object *obj)
+{
+ /*
+ * Combine the assertion that the object is bound and that we have
+ * pinned its pages. But we should never have bound the object
+ * more than we have pinned its pages. (For complete accuracy, we
+ * assume that no else is pinning the pages, but as a rough assertion
+ * that we will not run into problems later, this will do!)
+ */
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+}
+
/**
* i915_vma_insert - finds a slot for the vma in its address space
* @vma: the vma
@@ -477,7 +541,7 @@ static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *dev_priv = vma->vm->i915;
- struct drm_i915_gem_object *obj = vma->obj;
+ unsigned int cache_level;
u64 start, end;
int ret;
@@ -512,20 +576,25 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
* attempt to find space.
*/
if (size > end) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
- size, obj->base.size,
- flags & PIN_MAPPABLE ? "mappable" : "total",
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
+ size, flags & PIN_MAPPABLE ? "mappable" : "total",
end);
return -ENOSPC;
}
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
+ if (vma->obj) {
+ ret = i915_gem_object_pin_pages(vma->obj);
+ if (ret)
+ return ret;
+
+ cache_level = vma->obj->cache_level;
+ } else {
+ cache_level = 0;
+ }
GEM_BUG_ON(vma->pages);
- ret = vma->vm->set_pages(vma);
+ ret = vma->ops->set_pages(vma);
if (ret)
goto err_unpin;
@@ -538,7 +607,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
- size, offset, obj->cache_level,
+ size, offset, cache_level,
flags);
if (ret)
goto err_clear;
@@ -577,7 +646,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
- size, alignment, obj->cache_level,
+ size, alignment, cache_level,
start, end, flags);
if (ret)
goto err_clear;
@@ -586,23 +655,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- spin_lock(&dev_priv->mm.obj_lock);
- list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
- obj->bind_count++;
- spin_unlock(&dev_priv->mm.obj_lock);
+ if (vma->obj) {
+ struct drm_i915_gem_object *obj = vma->obj;
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
+ obj->bind_count++;
+ spin_unlock(&dev_priv->mm.obj_lock);
+
+ assert_bind_count(obj);
+ }
return 0;
err_clear:
- vma->vm->clear_pages(vma);
+ vma->ops->clear_pages(vma);
err_unpin:
- i915_gem_object_unpin_pages(obj);
+ if (vma->obj)
+ i915_gem_object_unpin_pages(vma->obj);
return ret;
}
@@ -610,30 +684,35 @@ static void
i915_vma_remove(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
- struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
- vma->vm->clear_pages(vma);
+ vma->ops->clear_pages(vma);
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
- /* Since the unbound list is global, only move to that list if
+ /*
+ * Since the unbound list is global, only move to that list if
* no more VMAs exist.
*/
- spin_lock(&i915->mm.obj_lock);
- if (--obj->bind_count == 0)
- list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
- spin_unlock(&i915->mm.obj_lock);
-
- /* And finally now the object is completely decoupled from this vma,
- * we can drop its hold on the backing storage and allow it to be
- * reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ if (vma->obj) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ spin_lock(&i915->mm.obj_lock);
+ if (--obj->bind_count == 0)
+ list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
+ spin_unlock(&i915->mm.obj_lock);
+
+ /*
+ * And finally now the object is completely decoupled from this
+ * vma, we can drop its hold on the backing storage and allow
+ * it to be reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
+ assert_bind_count(obj);
+ }
}
int __i915_vma_do_pin(struct i915_vma *vma,
@@ -658,7 +737,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+ ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
if (ret)
goto err_remove;
@@ -715,23 +794,28 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma)
{
- int i;
+ struct drm_i915_private *i915 = vma->vm->i915;
+ struct i915_vma_active *iter, *n;
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
- for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
- GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
list_del(&vma->obj_link);
list_del(&vma->vm_link);
- rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+ if (vma->obj)
+ rb_erase(&vma->obj_node, &vma->obj->vma_tree);
if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
- kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+ rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
+ GEM_BUG_ON(i915_gem_active_isset(&iter->base));
+ kfree(iter);
+ }
+
+ kmem_cache_free(i915->vmas, vma);
}
void i915_vma_destroy(struct i915_vma *vma)
@@ -795,23 +879,173 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link);
}
-int i915_vma_unbind(struct i915_vma *vma)
+static void export_fence(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
+{
+ struct reservation_object *resv = vma->resv;
+
+ /*
+ * Ignore errors from failing to allocate the new fence, we can't
+ * handle an error right now. Worst case should be missed
+ * synchronisation leading to rendering corruption.
+ */
+ reservation_object_lock(resv, NULL);
+ if (flags & EXEC_OBJECT_WRITE)
+ reservation_object_add_excl_fence(resv, &rq->fence);
+ else if (reservation_object_reserve_shared(resv) == 0)
+ reservation_object_add_shared_fence(resv, &rq->fence);
+ reservation_object_unlock(resv);
+}
+
+static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx)
+{
+ struct i915_vma_active *active;
+ struct rb_node **p, *parent;
+ struct i915_request *old;
+
+ /*
+ * We track the most recently used timeline to skip a rbtree search
+ * for the common case, under typical loads we never need the rbtree
+ * at all. We can reuse the last_active slot if it is empty, that is
+ * after the previous activity has been retired, or if the active
+ * matches the current timeline.
+ *
+ * Note that we allow the timeline to be active simultaneously in
+ * the rbtree and the last_active cache. We do this to avoid having
+ * to search and replace the rbtree element for a new timeline, with
+ * the cost being that we must be aware that the vma may be retired
+ * twice for the same timeline (as the older rbtree element will be
+ * retired before the new request added to last_active).
+ */
+ old = i915_gem_active_raw(&vma->last_active,
+ &vma->vm->i915->drm.struct_mutex);
+ if (!old || old->fence.context == idx)
+ goto out;
+
+ /* Move the currently active fence into the rbtree */
+ idx = old->fence.context;
+
+ parent = NULL;
+ p = &vma->active.rb_node;
+ while (*p) {
+ parent = *p;
+
+ active = rb_entry(parent, struct i915_vma_active, node);
+ if (active->timeline == idx)
+ goto replace;
+
+ if (active->timeline < idx)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+
+ active = kmalloc(sizeof(*active), GFP_KERNEL);
+
+ /* kmalloc may retire the vma->last_active request (thanks shrinker)! */
+ if (unlikely(!i915_gem_active_raw(&vma->last_active,
+ &vma->vm->i915->drm.struct_mutex))) {
+ kfree(active);
+ goto out;
+ }
+
+ if (unlikely(!active))
+ return ERR_PTR(-ENOMEM);
+
+ init_request_active(&active->base, i915_vma_retire);
+ active->vma = vma;
+ active->timeline = idx;
+
+ rb_link_node(&active->node, parent, p);
+ rb_insert_color(&active->node, &vma->active);
+
+replace:
+ /*
+ * Overwrite the previous active slot in the rbtree with last_active,
+ * leaving last_active zeroed. If the previous slot is still active,
+ * we must be careful as we now only expect to receive one retire
+ * callback not two, and so much undo the active counting for the
+ * overwritten slot.
+ */
+ if (i915_gem_active_isset(&active->base)) {
+ /* Retire ourselves from the old rq->active_list */
+ __list_del_entry(&active->base.link);
+ vma->active_count--;
+ GEM_BUG_ON(!vma->active_count);
+ }
+ GEM_BUG_ON(list_empty(&vma->last_active.link));
+ list_replace_init(&vma->last_active.link, &active->base.link);
+ active->base.request = fetch_and_zero(&vma->last_active.request);
+
+out:
+ return &vma->last_active;
+}
+
+int i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
- unsigned long active;
+ struct i915_gem_active *active;
+
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+ active = active_instance(vma, rq->fence.context);
+ if (IS_ERR(active))
+ return PTR_ERR(active);
+
+ /*
+ * Add a reference if we're newly entering the active list.
+ * The order in which we add operations to the retirement queue is
+ * vital here: mark_active adds to the start of the callback list,
+ * such that subsequent callbacks are called first. Therefore we
+ * add the active reference first and queue for it to be dropped
+ * *last*.
+ */
+ if (!i915_gem_active_isset(active) && !vma->active_count++) {
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
+ obj->active_count++;
+ }
+ i915_gem_active_set(active, rq);
+ GEM_BUG_ON(!i915_vma_is_active(vma));
+ GEM_BUG_ON(!obj->active_count);
+
+ obj->write_domain = 0;
+ if (flags & EXEC_OBJECT_WRITE) {
+ obj->write_domain = I915_GEM_DOMAIN_RENDER;
+
+ if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
+ i915_gem_active_set(&obj->frontbuffer_write, rq);
+
+ obj->read_domains = 0;
+ }
+ obj->read_domains |= I915_GEM_GPU_DOMAINS;
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE)
+ i915_gem_active_set(&vma->last_fence, rq);
+
+ export_fence(vma, rq, flags);
+ return 0;
+}
+
+int i915_vma_unbind(struct i915_vma *vma)
+{
int ret;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- /* First wait upon any activity as retiring the request may
+ /*
+ * First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
*/
might_sleep();
- active = i915_vma_get_active(vma);
- if (active) {
- int idx;
+ if (i915_vma_is_active(vma)) {
+ struct i915_vma_active *active, *n;
- /* When a closed VMA is retired, it is unbound - eek.
+ /*
+ * When a closed VMA is retired, it is unbound - eek.
* In order to prevent it from being recursively closed,
* take a pin on the vma so that the second unbind is
* aborted.
@@ -825,33 +1059,36 @@ int i915_vma_unbind(struct i915_vma *vma)
*/
__i915_vma_pin(vma);
- for_each_active(active, idx) {
- ret = i915_gem_active_retire(&vma->last_read[idx],
- &vma->vm->i915->drm.struct_mutex);
- if (ret)
- break;
- }
+ ret = i915_gem_active_retire(&vma->last_active,
+ &vma->vm->i915->drm.struct_mutex);
+ if (ret)
+ goto unpin;
- if (!ret) {
- ret = i915_gem_active_retire(&vma->last_fence,
+ rbtree_postorder_for_each_entry_safe(active, n,
+ &vma->active, node) {
+ ret = i915_gem_active_retire(&active->base,
&vma->vm->i915->drm.struct_mutex);
+ if (ret)
+ goto unpin;
}
+ ret = i915_gem_active_retire(&vma->last_fence,
+ &vma->vm->i915->drm.struct_mutex);
+unpin:
__i915_vma_unpin(vma);
if (ret)
return ret;
}
GEM_BUG_ON(i915_vma_is_active(vma));
- if (i915_vma_is_pinned(vma))
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
return -EBUSY;
+ }
if (!drm_mm_node_allocated(&vma->node))
return 0;
- GEM_BUG_ON(obj->bind_count == 0);
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
if (i915_vma_is_map_and_fenceable(vma)) {
/*
* Check that we have flushed all writes through the GGTT
@@ -878,7 +1115,7 @@ int i915_vma_unbind(struct i915_vma *vma)
if (likely(!vma->vm->closed)) {
trace_i915_vma_unbind(vma);
- vma->vm->unbind_vma(vma);
+ vma->ops->unbind_vma(vma);
}
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index fc4294cfaa91..f06d66377107 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -26,6 +26,7 @@
#define __I915_VMA_H__
#include <linux/io-mapping.h>
+#include <linux/rbtree.h>
#include <drm/drm_mm.h>
@@ -49,10 +50,12 @@ struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ const struct i915_vma_ops *ops;
struct drm_i915_fence_reg *fence;
struct reservation_object *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;
+ void *private; /* owned by creator */
u64 size;
u64 display_alignment;
struct i915_page_sizes page_sizes;
@@ -92,8 +95,9 @@ struct i915_vma {
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
#define I915_VMA_GGTT_WRITE BIT(12)
- unsigned int active;
- struct i915_gem_active last_read[I915_NUM_ENGINES];
+ unsigned int active_count;
+ struct rb_root active;
+ struct i915_gem_active last_active;
struct i915_gem_active last_fence;
/**
@@ -136,6 +140,15 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+static inline bool i915_vma_is_active(struct i915_vma *vma)
+{
+ return vma->active_count;
+}
+
+int __must_check i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags);
+
static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
{
return vma->flags & I915_VMA_GGTT;
@@ -185,34 +198,6 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
-static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
-{
- return vma->active;
-}
-
-static inline bool i915_vma_is_active(const struct i915_vma *vma)
-{
- return i915_vma_get_active(vma);
-}
-
-static inline void i915_vma_set_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active |= BIT(engine);
-}
-
-static inline void i915_vma_clear_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active &= ~BIT(engine);
-}
-
-static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
- unsigned int engine)
-{
- return vma->active & BIT(engine);
-}
-
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
@@ -339,6 +324,12 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
__i915_vma_unpin(vma);
}
+static inline bool i915_vma_is_bound(const struct i915_vma *vma,
+ unsigned int where)
+{
+ return vma->flags & where;
+}
+
/**
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
* @vma: VMA to iomap
@@ -407,7 +398,7 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+ /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
if (vma->fence)
__i915_vma_unpin_fence(vma);
}
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
new file mode 100644
index 000000000000..13830e43a4d1
--- /dev/null
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Madhav Chauhan <madhav.chauhan@intel.com>
+ * Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include "intel_dsi.h"
+
+static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 afe_clk_khz; /* 8X Clock */
+ u32 esc_clk_div_m;
+
+ afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp,
+ intel_dsi->lane_count);
+
+ esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(ICL_DSI_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ POSTING_READ(ICL_DSI_ESC_CLK_DIV(port));
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port));
+ }
+}
+
+static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+ tmp |= COMBO_PHY_MODE_DSI;
+ I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_display_power_get(dev_priv, port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO);
+ }
+}
+
+static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+ u32 lane_mask;
+
+ switch (intel_dsi->lane_count) {
+ case 1:
+ lane_mask = PWR_DOWN_LN_3_1_0;
+ break;
+ case 2:
+ lane_mask = PWR_DOWN_LN_3_1;
+ break;
+ case 3:
+ lane_mask = PWR_DOWN_LN_3;
+ break;
+ case 4:
+ default:
+ lane_mask = PWR_UP_ALL_LANES;
+ break;
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_CL_DW10(port));
+ tmp &= ~PWR_DOWN_LN_MASK;
+ I915_WRITE(ICL_PORT_CL_DW10(port), tmp | lane_mask);
+ }
+}
+
+static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder)
+{
+ /* step 4a: power up all lanes of the DDI used by DSI */
+ gen11_dsi_power_up_lanes(encoder);
+}
+
+static void __attribute__((unused))
+gen11_dsi_pre_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ /* step2: enable IO power */
+ gen11_dsi_enable_io_power(encoder);
+
+ /* step3: enable DSI PLL */
+ gen11_dsi_program_esc_clk_div(encoder);
+
+ /* step4: enable DSI port and DPHY */
+ gen11_dsi_enable_port_and_phy(encoder);
+}
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index d1abf4bb7c81..6ba478e57b9b 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -12,10 +12,6 @@
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
-static struct intel_dsm_priv {
- acpi_handle dhandle;
-} intel_dsm_priv;
-
static const guid_t intel_dsm_guid =
GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
@@ -72,12 +68,12 @@ static char *intel_dsm_mux_type(u8 type)
}
}
-static void intel_dsm_platform_mux_info(void)
+static void intel_dsm_platform_mux_info(acpi_handle dhandle)
{
int i;
union acpi_object *pkg, *connector_count;
- pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid,
+ pkg = acpi_evaluate_dsm_typed(dhandle, &intel_dsm_guid,
INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
NULL, ACPI_TYPE_PACKAGE);
if (!pkg) {
@@ -107,41 +103,40 @@ static void intel_dsm_platform_mux_info(void)
ACPI_FREE(pkg);
}
-static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+static acpi_handle intel_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
- return false;
+ return NULL;
if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
DRM_DEBUG_KMS("no _DSM method for intel device\n");
- return false;
+ return NULL;
}
- intel_dsm_priv.dhandle = dhandle;
- intel_dsm_platform_mux_info();
+ intel_dsm_platform_mux_info(dhandle);
- return true;
+ return dhandle;
}
static bool intel_dsm_detect(void)
{
+ acpi_handle dhandle = NULL;
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
- bool has_dsm = false;
int vga_count = 0;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
- has_dsm |= intel_dsm_pci_probe(pdev);
+ dhandle = intel_dsm_pci_probe(pdev) ?: dhandle;
}
- if (vga_count == 2 && has_dsm) {
- acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+ if (vga_count == 2 && dhandle) {
+ acpi_get_name(dhandle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
return true;
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 40285d1b91b7..b04952bacf77 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -59,7 +59,8 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
else if (property == dev_priv->broadcast_rgb_property)
*val = intel_conn_state->broadcast_rgb;
else {
- DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
+ DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -95,7 +96,8 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
return 0;
}
- DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
+ DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -124,6 +126,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
if (new_conn_state->force_audio != old_conn_state->force_audio ||
new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
+ new_conn_state->base.content_type != old_conn_state->base.content_type ||
new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
crtc_state->mode_changed = true;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 6d068786eb41..dcba645cabb8 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -120,12 +120,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
&crtc_state->base.adjusted_mode;
int ret;
- /*
- * Both crtc and plane->crtc could be NULL if we're updating a
- * property while the plane is disabled. We don't actually have
- * anything driver-specific we need to test in that case, so
- * just return success.
- */
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
@@ -209,12 +203,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
const struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
- /*
- * Both crtc and plane->crtc could be NULL if we're updating a
- * property while the plane is disabled. We don't actually have
- * anything driver-specific we need to test in that case, so
- * just return success.
- */
if (!crtc)
return 0;
@@ -277,7 +265,8 @@ intel_plane_atomic_get_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t *val)
{
- DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -299,6 +288,7 @@ intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t val)
{
- DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 3ea566f99450..b725835b47ef 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -59,6 +59,7 @@
*/
/* DP N/M table */
+#define LC_810M 810000
#define LC_540M 540000
#define LC_270M 270000
#define LC_162M 162000
@@ -99,6 +100,15 @@ static const struct dp_aud_n_m dp_aud_n_m[] = {
{ 128000, LC_540M, 4096, 33750 },
{ 176400, LC_540M, 3136, 18750 },
{ 192000, LC_540M, 2048, 11250 },
+ { 32000, LC_810M, 1024, 50625 },
+ { 44100, LC_810M, 784, 28125 },
+ { 48000, LC_810M, 512, 16875 },
+ { 64000, LC_810M, 2048, 50625 },
+ { 88200, LC_810M, 1568, 28125 },
+ { 96000, LC_810M, 1024, 16875 },
+ { 128000, LC_810M, 4096, 50625 },
+ { 176400, LC_810M, 3136, 28125 },
+ { 192000, LC_810M, 2048, 16875 },
};
static const struct dp_aud_n_m *
@@ -198,13 +208,13 @@ static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
}
static bool intel_eld_uptodate(struct drm_connector *connector,
- i915_reg_t reg_eldv, uint32_t bits_eldv,
- i915_reg_t reg_elda, uint32_t bits_elda,
+ i915_reg_t reg_eldv, u32 bits_eldv,
+ i915_reg_t reg_elda, u32 bits_elda,
i915_reg_t reg_edid)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- uint8_t *eld = connector->eld;
- uint32_t tmp;
+ const u8 *eld = connector->eld;
+ u32 tmp;
int i;
tmp = I915_READ(reg_eldv);
@@ -218,7 +228,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
I915_WRITE(reg_elda, tmp);
for (i = 0; i < drm_eld_size(eld) / 4; i++)
- if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+ if (I915_READ(reg_edid) != *((const u32 *)eld + i))
return false;
return true;
@@ -229,7 +239,7 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- uint32_t eldv, tmp;
+ u32 eldv, tmp;
DRM_DEBUG_KMS("Disable audio codec\n");
@@ -251,12 +261,12 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_connector *connector = conn_state->connector;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t tmp;
+ const u8 *eld = connector->eld;
+ u32 eldv;
+ u32 tmp;
int len, i;
- DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
+ DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld));
tmp = I915_READ(G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
@@ -278,7 +288,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
len = min(drm_eld_size(eld) / 4, len);
DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
- I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+ I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i));
tmp = I915_READ(G4X_AUD_CNTL_ST);
tmp |= eldv;
@@ -393,7 +403,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
- uint32_t tmp;
+ u32 tmp;
DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
@@ -426,8 +436,8 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_connector *connector = conn_state->connector;
enum pipe pipe = crtc->pipe;
- const uint8_t *eld = connector->eld;
- uint32_t tmp;
+ const u8 *eld = connector->eld;
+ u32 tmp;
int len, i;
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
@@ -456,7 +466,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
+ I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((const u32 *)eld + i));
/* ELD valid */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
@@ -477,7 +487,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
- uint32_t tmp, eldv;
+ u32 tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
@@ -524,8 +534,8 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
struct drm_connector *connector = conn_state->connector;
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
- uint8_t *eld = connector->eld;
- uint32_t tmp, eldv;
+ const u8 *eld = connector->eld;
+ u32 tmp, eldv;
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
@@ -575,7 +585,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+ I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i));
/* ELD valid */
tmp = I915_READ(aud_cntrl_st2);
@@ -639,11 +649,12 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
dev_priv->av_enc_map[pipe] = encoder;
mutex_unlock(&dev_priv->av_mutex);
- if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ if (acomp && acomp->base.audio_ops &&
+ acomp->base.audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
(int) port, (int) pipe);
}
@@ -681,11 +692,12 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
- if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ if (acomp && acomp->base.audio_ops &&
+ acomp->base.audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
(int) port, (int) pipe);
}
@@ -880,7 +892,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
return ret;
}
-static const struct i915_audio_component_ops i915_audio_component_ops = {
+static const struct drm_audio_component_ops i915_audio_component_ops = {
.owner = THIS_MODULE,
.get_power = i915_audio_component_get_power,
.put_power = i915_audio_component_put_power,
@@ -897,12 +909,12 @@ static int i915_audio_component_bind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
int i;
- if (WARN_ON(acomp->ops || acomp->dev))
+ if (WARN_ON(acomp->base.ops || acomp->base.dev))
return -EEXIST;
drm_modeset_lock_all(&dev_priv->drm);
- acomp->ops = &i915_audio_component_ops;
- acomp->dev = i915_kdev;
+ acomp->base.ops = &i915_audio_component_ops;
+ acomp->base.dev = i915_kdev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
@@ -919,8 +931,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
drm_modeset_lock_all(&dev_priv->drm);
- acomp->ops = NULL;
- acomp->dev = NULL;
+ acomp->base.ops = NULL;
+ acomp->base.dev = NULL;
dev_priv->audio_component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 54270bdde100..1faa494e2bc9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -267,8 +267,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_lfp_data_ptrs)
return;
- dev_priv->vbt.lvds_vbt = 1;
-
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
panel_type);
@@ -518,8 +516,31 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (!driver)
return;
- if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
- dev_priv->vbt.edp.support = 1;
+ if (INTEL_GEN(dev_priv) >= 5) {
+ /*
+ * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
+ * to mean "eDP". The VBT spec doesn't agree with that
+ * interpretation, but real world VBTs seem to.
+ */
+ if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
+ dev_priv->vbt.int_lvds_support = 0;
+ } else {
+ /*
+ * FIXME it's not clear which BDB version has the LVDS config
+ * bits defined. Revision history in the VBT spec says:
+ * "0.92 | Add two definitions for VBT value of LVDS Active
+ * Config (00b and 11b values defined) | 06/13/2005"
+ * but does not the specify the BDB version.
+ *
+ * So far version 134 (on i945gm) is the oldest VBT observed
+ * in the wild with the bits correctly populated. Version
+ * 108 (on i85x) does not have the bits correctly populated.
+ */
+ if (bdb->version >= 134 &&
+ driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
+ driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
+ dev_priv->vbt.int_lvds_support = 0;
+ }
DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
/*
@@ -542,11 +563,8 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
- if (!edp) {
- if (dev_priv->vbt.edp.support)
- DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+ if (!edp)
return;
- }
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
@@ -634,7 +652,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
}
if (bdb->version >= 173) {
- uint8_t vswing;
+ u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
if (i915_modparams.edp_vswing) {
@@ -688,8 +706,54 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
break;
}
- dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
- dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
+ /*
+ * New psr options 0=500us, 1=100us, 2=2500us, 3=0us
+ * Old decimal value is wake up time in multiples of 100 us.
+ */
+ if (bdb->version >= 205 &&
+ (IS_GEN9_BC(dev_priv) || IS_GEMINILAKE(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 10)) {
+ switch (psr_table->tp1_wakeup_time) {
+ case 0:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 500;
+ break;
+ case 1:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 100;
+ break;
+ case 3:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp1_wakeup_time);
+ /* fallthrough */
+ case 2:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
+ break;
+ }
+
+ switch (psr_table->tp2_tp3_wakeup_time) {
+ case 0:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+ break;
+ case 1:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+ break;
+ case 3:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp2_tp3_wakeup_time);
+ /* fallthrough */
+ case 2:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+ break;
+ }
+ } else {
+ dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+ }
}
static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
@@ -902,7 +966,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
* includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
* byte.
*/
- size_of_sequence = *((const uint32_t *)(data + index));
+ size_of_sequence = *((const u32 *)(data + index));
index += 4;
seq_end = index + size_of_sequence;
@@ -1197,18 +1261,37 @@ static const u8 cnp_ddc_pin_map[] = {
[DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
};
+static const u8 icp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
+ [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
+ [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
+ [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
- if (HAS_PCH_CNP(dev_priv)) {
- if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
- return cnp_ddc_pin_map[vbt_pin];
- } else {
- DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
- return 0;
- }
+ const u8 *ddc_pin_map;
+ int n_entries;
+
+ if (HAS_PCH_ICP(dev_priv)) {
+ ddc_pin_map = icp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(icp_ddc_pin_map);
+ } else if (HAS_PCH_CNP(dev_priv)) {
+ ddc_pin_map = cnp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
+ } else {
+ /* Assuming direct map */
+ return vbt_pin;
}
- return vbt_pin;
+ if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
+ return ddc_pin_map[vbt_pin];
+
+ DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+ vbt_pin);
+ return 0;
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
@@ -1504,7 +1587,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* LFP panel data */
dev_priv->vbt.lvds_dither = 1;
- dev_priv->vbt.lvds_vbt = 0;
/* SDVO panel data */
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
@@ -1513,6 +1595,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->vbt.int_tv_support = 1;
dev_priv->vbt.int_crt_support = 1;
+ /* driver features */
+ dev_priv->vbt.int_lvds_support = 1;
+
/* Default to using SSC */
dev_priv->vbt.lvds_use_ssc = 1;
/*
@@ -1636,7 +1721,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
- if (HAS_PCH_NOP(dev_priv)) {
+ if (INTEL_INFO(dev_priv)->num_pipes == 0) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 18e643df523e..1db6ba7d926e 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -98,12 +98,14 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t)
struct intel_engine_cs *engine =
from_timer(engine, t, breadcrumbs.hangcheck);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned int irq_count;
if (!b->irq_armed)
return;
- if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
- b->hangcheck_interrupts = atomic_read(&engine->irq_count);
+ irq_count = READ_ONCE(b->irq_count);
+ if (b->hangcheck_interrupts != irq_count) {
+ b->hangcheck_interrupts = irq_count;
mod_timer(&b->hangcheck, wait_timeout());
return;
}
@@ -272,13 +274,14 @@ static bool use_fake_irq(const struct intel_breadcrumbs *b)
if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
return false;
- /* Only start with the heavy weight fake irq timer if we have not
+ /*
+ * Only start with the heavy weight fake irq timer if we have not
* seen any interrupts since enabling it the first time. If the
* interrupts are still arriving, it means we made a mistake in our
* engine->seqno_barrier(), a timing error that should be transient
* and unlikely to reoccur.
*/
- return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
+ return READ_ONCE(b->irq_count) == b->hangcheck_interrupts;
}
static void enable_fake_irq(struct intel_breadcrumbs *b)
@@ -846,8 +849,9 @@ static void cancel_fake_irq(struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned long flags;
- spin_lock_irq(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
/*
* Leave the fake_irq timer enabled (if it is running), but clear the
@@ -871,7 +875,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
*/
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
- spin_unlock_irq(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 704ddb4d3ca7..29075c763428 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -316,6 +316,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
break;
default:
DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+ /* fall through */
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
cdclk_state->cdclk = 133333;
break;
@@ -991,6 +992,16 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
u32 freq_select, cdclk_ctl;
int ret;
+ /*
+ * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are
+ * unsupported on SKL. In theory this should never happen since only
+ * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not
+ * supported on SKL either, see the above WA. WARN whenever trying to
+ * use the corresponding VCO freq as that always leads to using the
+ * minimum 308MHz CDCLK.
+ */
+ WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
+
mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
@@ -1787,6 +1798,7 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
switch (ref) {
default:
MISSING_CASE(ref);
+ /* fall through */
case 24000:
ranges = ranges_24;
break;
@@ -1814,6 +1826,7 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
switch (cdclk) {
default:
MISSING_CASE(cdclk);
+ /* fall through */
case 307200:
case 556800:
case 652800:
@@ -1861,11 +1874,36 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
skl_cdclk_decimal(cdclk));
mutex_lock(&dev_priv->pcu_lock);
- /* TODO: add proper DVFS support. */
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 2);
+ sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
+
+ /*
+ * Can't read out the voltage level :(
+ * Let's just assume everything is as expected.
+ */
+ dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+}
+
+static u8 icl_calc_voltage_level(int cdclk)
+{
+ switch (cdclk) {
+ case 50000:
+ case 307200:
+ case 312000:
+ return 0;
+ case 556800:
+ case 552000:
+ return 1;
+ default:
+ MISSING_CASE(cdclk);
+ /* fall through */
+ case 652800:
+ case 648000:
+ return 2;
+ }
}
static void icl_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1879,6 +1917,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
default:
MISSING_CASE(val);
+ /* fall through */
case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
cdclk_state->ref = 24000;
break;
@@ -1899,7 +1938,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
*/
cdclk_state->vco = 0;
cdclk_state->cdclk = cdclk_state->bypass;
- return;
+ goto out;
}
cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
@@ -1908,6 +1947,14 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
cdclk_state->cdclk = cdclk_state->vco / 2;
+
+out:
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_state->voltage_level =
+ icl_calc_voltage_level(cdclk_state->cdclk);
}
/**
@@ -1950,6 +1997,8 @@ sanitize:
sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
sanitized_state.cdclk);
+ sanitized_state.voltage_level =
+ icl_calc_voltage_level(sanitized_state.cdclk);
icl_set_cdclk(dev_priv, &sanitized_state);
}
@@ -1967,6 +2016,7 @@ void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
+ cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
icl_set_cdclk(dev_priv, &cdclk_state);
}
@@ -2470,6 +2520,9 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ max(icl_calc_voltage_level(cdclk),
+ cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
cdclk = icl_calc_cdclk(0, ref);
@@ -2477,6 +2530,8 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ icl_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual = intel_state->cdclk.logical;
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 072b326d5ee0..0c6bf82bb059 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -63,33 +63,35 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = I915_READ(adpa_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT;
+ else
+ *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT;
+
+ return val & ADPA_DAC_ENABLE;
+}
+
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
-
- tmp = I915_READ(crt->adpa_reg);
+ ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
- if (!(tmp & ADPA_DAC_ENABLE))
- goto out;
-
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- ret = true;
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -168,11 +170,9 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
if (HAS_PCH_LPT(dev_priv))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev_priv))
- adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
- else if (crtc->pipe == 0)
- adpa |= ADPA_PIPE_A_SELECT;
+ adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
else
- adpa |= ADPA_PIPE_B_SELECT;
+ adpa |= ADPA_PIPE_SEL(crtc->pipe);
if (!HAS_PCH_SPLIT(dev_priv))
I915_WRITE(BCLRPAT(crtc->pipe), 0);
@@ -232,6 +232,8 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
lpt_disable_pch_transcoder(dev_priv);
@@ -268,6 +270,8 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
dev_priv->display.fdi_link_train(crtc, crtc_state);
+
+ intel_ddi_enable_pipe_clock(crtc_state);
}
static void hsw_enable_crt(struct intel_encoder *encoder,
@@ -333,6 +337,10 @@ intel_crt_mode_valid(struct drm_connector *connector,
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
return MODE_CLOCK_HIGH;
+ /* HSW/BDW FDI limited to 4k */
+ if (mode->hdisplay > 4096)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -375,6 +383,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ /* HSW/BDW FDI limited to 4k */
+ if (adjusted_mode->crtc_hdisplay > 4096 ||
+ adjusted_mode->crtc_hblank_start > 4096)
+ return false;
+
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
@@ -513,7 +526,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
- if (IS_G4X(dev_priv) && !IS_GM45(dev_priv))
+ if (IS_G45(dev_priv))
tries = 2;
else
tries = 1;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f4a8598a2d39..8761513f3532 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -915,7 +915,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
- if (IS_CANNONLAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ if (port == PORT_A || port == PORT_B)
+ icl_get_combo_buf_trans(dev_priv, port,
+ INTEL_OUTPUT_HDMI, &n_entries);
+ else
+ n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+ default_entry = n_entries - 1;
+ } else if (IS_CANNONLAKE(dev_priv)) {
cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
default_entry = n_entries - 1;
} else if (IS_GEN9_LP(dev_priv)) {
@@ -1055,14 +1062,31 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
const struct intel_shared_dpll *pll)
{
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ int clock = crtc->config->port_clock;
const enum intel_dpll_id id = pll->info->id;
switch (id) {
default:
MISSING_CASE(id);
+ /* fall through */
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
return DDI_CLK_SEL_NONE;
+ case DPLL_ID_ICL_TBTPLL:
+ switch (clock) {
+ case 162000:
+ return DDI_CLK_SEL_TBT_162;
+ case 270000:
+ return DDI_CLK_SEL_TBT_270;
+ case 540000:
+ return DDI_CLK_SEL_TBT_540;
+ case 810000:
+ return DDI_CLK_SEL_TBT_810;
+ default:
+ MISSING_CASE(clock);
+ break;
+ }
case DPLL_ID_ICL_MGPLL1:
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
@@ -1243,35 +1267,6 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
return ret;
}
-/* Finds the only possible encoder associated with the given CRTC. */
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_encoder *ret = NULL;
- struct drm_atomic_state *state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int num_encoders = 0;
- int i;
-
- state = crtc_state->base.state;
-
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->base.crtc)
- continue;
-
- ret = to_intel_encoder(connector_state->best_encoder);
- num_encoders++;
- }
-
- WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
- pipe_name(crtc->pipe));
-
- BUG_ON(ret == NULL);
- return ret;
-}
-
#define LC_FREQ 2700
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
@@ -1374,8 +1369,13 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
- cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+ if (INTEL_GEN(dev_priv) >= 11) {
+ cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
+ } else {
+ cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+ }
p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
@@ -1451,6 +1451,30 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
+static void icl_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ int link_clock = 0;
+ uint32_t pll_id;
+
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ if (port == PORT_A || port == PORT_B) {
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+ link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ else
+ link_clock = icl_calc_dp_combo_pll_link(dev_priv,
+ pll_id);
+ } else {
+ /* FIXME - Add for MG PLL */
+ WARN(1, "MG PLL clock_get code not implemented yet\n");
+ }
+
+ pipe_config->port_clock = link_clock;
+ ddi_dotclock_get(pipe_config);
+}
+
static void cnl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -1644,6 +1668,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
bxt_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
+ else if (IS_ICELAKE(dev_priv))
+ icl_ddi_clock_get(encoder, pipe_config);
}
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1659,6 +1685,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
WARN_ON(transcoder_is_dsi(cpu_transcoder));
temp = TRANS_MSA_SYNC_CLK;
+
+ if (crtc_state->limited_color_range)
+ temp |= TRANS_MSA_CEA_RANGE;
+
switch (crtc_state->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
@@ -1782,15 +1812,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
I915_WRITE(reg, val);
+
+ if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
+ /* Quirk time at 100ms for reliable operation */
+ msleep(100);
+ }
}
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
@@ -1958,15 +1997,50 @@ out:
return ret;
}
-static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder)
+static inline enum intel_display_power_domain
+intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
+{
+ /* CNL HW requires corresponding AUX IOs to be powered up for PSR with
+ * DC states enabled at the same time, while for driver initiated AUX
+ * transfers we need the same AUX IOs to be powered but with DC states
+ * disabled. Accordingly use the AUX power domain here which leaves DC
+ * states enabled.
+ * However, for non-A AUX ports the corresponding non-EDP transcoders
+ * would have already enabled power well 2 and DC_OFF. This means we can
+ * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
+ * specific AUX_IO reference without powering up any extra wells.
+ * Note that PSR is enabled only on Port A even though this function
+ * returns the correct domain for other ports too.
+ */
+ return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+ intel_dp->aux_power_domain;
+}
+
+static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum pipe pipe;
+ struct intel_digital_port *dig_port;
+ u64 domains;
- if (intel_ddi_get_hw_state(encoder, &pipe))
- return BIT_ULL(dig_port->ddi_io_power_domain);
+ /*
+ * TODO: Add support for MST encoders. Atm, the following should never
+ * happen since fake-MST encoders don't set their get_power_domains()
+ * hook.
+ */
+ if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
+ return 0;
- return 0;
+ dig_port = enc_to_dig_port(&encoder->base);
+ domains = BIT_ULL(dig_port->ddi_io_power_domain);
+
+ /* AUX power is only needed for (e)DP mode, not for HDMI. */
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ struct intel_dp *intel_dp = &dig_port->dp;
+
+ domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp));
+ }
+
+ return domains;
}
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2115,6 +2189,26 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
DP_TRAIN_VOLTAGE_SWING_MASK;
}
+/*
+ * We assume that the full set of pre-emphasis values can be
+ * used on all DDI platforms. Should that change we need to
+ * rethink this code.
+ */
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ return DP_TRAIN_PRE_EMPH_LEVEL_1;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ default:
+ return DP_TRAIN_PRE_EMPH_LEVEL_0;
+ }
+}
+
static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
@@ -2586,6 +2680,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(intel_dp));
+
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
@@ -2610,6 +2707,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
+
+ intel_ddi_enable_pipe_clock(crtc_state);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2640,6 +2739,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
+ intel_ddi_enable_pipe_clock(crtc_state);
+
intel_dig_port->set_infoframes(&encoder->base,
crtc_state->has_infoframe,
crtc_state, conn_state);
@@ -2709,6 +2810,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
/*
* Power down sink before disabling the port, otherwise we end
* up getting interrupts from the sink on detecting link loss.
@@ -2724,6 +2827,9 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
+
+ intel_display_power_put(dev_priv,
+ intel_ddi_main_link_aux_domain(intel_dp));
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -2734,11 +2840,13 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- intel_disable_ddi_buf(encoder);
-
dig_port->set_infoframes(&encoder->base, false,
old_crtc_state, old_conn_state);
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
+ intel_disable_ddi_buf(encoder);
+
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
@@ -3025,6 +3133,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
{
if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
+ else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 1;
}
void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -3533,7 +3643,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
goto err;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
- dev_priv->hotplug.irq_port[port] = intel_dig_port;
}
/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 0fd13df424cf..0ef0c6448d53 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -858,6 +858,8 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
void intel_driver_caps_print(const struct intel_driver_caps *caps,
struct drm_printer *p)
{
+ drm_printf(p, "Has logical contexts? %s\n",
+ yesno(caps->has_logical_contexts));
drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 933e31669557..633f9fbf72ea 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -186,6 +186,7 @@ struct intel_device_info {
struct intel_driver_caps {
unsigned int scheduler;
+ bool has_logical_contexts:1;
};
static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2cc6faa1daa8..ed3fa1c8a983 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1022,7 +1022,7 @@ bool intel_crtc_active(struct intel_crtc *crtc)
* We can ditch the adjusted_mode.crtc_clock check as soon
* as Haswell has gained clock readout/fastboot support.
*
- * We can ditch the crtc->primary->fb check as soon as we can
+ * We can ditch the crtc->primary->state->fb check as soon as we can
* properly reconstruct framebuffers.
*
* FIXME: The intel_crtc->active here should be switched to
@@ -1202,7 +1202,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
{
i915_reg_t pp_reg;
u32 val;
- enum pipe panel_pipe = PIPE_A;
+ enum pipe panel_pipe = INVALID_PIPE;
bool locked = true;
if (WARN_ON(HAS_DDI(dev_priv)))
@@ -1214,18 +1214,35 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
pp_reg = PP_CONTROL(0);
port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
- if (port_sel == PANEL_PORT_SELECT_LVDS &&
- I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
- panel_pipe = PIPE_B;
- /* XXX: else fix for eDP */
+ switch (port_sel) {
+ case PANEL_PORT_SELECT_LVDS:
+ intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPA:
+ intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPC:
+ intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPD:
+ intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+ break;
+ default:
+ MISSING_CASE(port_sel);
+ break;
+ }
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* presumably write lock depends on pipe, not port select */
pp_reg = PP_CONTROL(pipe);
panel_pipe = pipe;
} else {
+ u32 port_sel;
+
pp_reg = PP_CONTROL(0);
- if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
- panel_pipe = PIPE_B;
+ port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+ WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
+ intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
}
val = I915_READ(pp_reg);
@@ -1267,7 +1284,10 @@ void assert_pipe(struct drm_i915_private *dev_priv,
static void assert_plane(struct intel_plane *plane, bool state)
{
- bool cur_state = plane->get_hw_state(plane);
+ enum pipe pipe;
+ bool cur_state;
+
+ cur_state = plane->get_hw_state(plane, &pipe);
I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
@@ -1305,125 +1325,64 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 port_sel, u32 val)
-{
- if ((val & DP_PORT_EN) == 0)
- return false;
-
- if (HAS_PCH_CPT(dev_priv)) {
- u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
- if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
- return false;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
- return false;
- } else {
- if ((val & DP_PIPE_MASK) != (pipe << 30))
- return false;
- }
- return true;
-}
-
-static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ i915_reg_t dp_reg)
{
- if ((val & SDVO_ENABLE) == 0)
- return false;
-
- if (HAS_PCH_CPT(dev_priv)) {
- if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
- return false;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
- return false;
- } else {
- if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
- return false;
- }
- return true;
-}
+ enum pipe port_pipe;
+ bool state;
-static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
-{
- if ((val & LVDS_PORT_EN) == 0)
- return false;
+ state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
- if (HAS_PCH_CPT(dev_priv)) {
- if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
- return false;
- } else {
- if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
- return false;
- }
- return true;
-}
+ I915_STATE_WARN(state && port_pipe == pipe,
+ "PCH DP %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
-static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
-{
- if ((val & ADPA_DAC_ENABLE) == 0)
- return false;
- if (HAS_PCH_CPT(dev_priv)) {
- if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
- return false;
- } else {
- if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
- return false;
- }
- return true;
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH DP %c still using transcoder B\n",
+ port_name(port));
}
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, i915_reg_t reg,
- u32 port_sel)
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ i915_reg_t hdmi_reg)
{
- u32 val = I915_READ(reg);
- I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
- "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
- i915_mmio_reg_offset(reg), pipe_name(pipe));
+ enum pipe port_pipe;
+ bool state;
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
- && (val & DP_PIPEB_SELECT),
- "IBX PCH dp port still using transcoder B\n");
-}
+ state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, i915_reg_t reg)
-{
- u32 val = I915_READ(reg);
- I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
- "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
- i915_mmio_reg_offset(reg), pipe_name(pipe));
+ I915_STATE_WARN(state && port_pipe == pipe,
+ "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
- && (val & SDVO_PIPE_B_SELECT),
- "IBX PCH hdmi port still using transcoder B\n");
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH HDMI %c still using transcoder B\n",
+ port_name(port));
}
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- u32 val;
+ enum pipe port_pipe;
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
- val = I915_READ(PCH_ADPA);
- I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
- "PCH VGA enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
+ I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
+ port_pipe == pipe,
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
- val = I915_READ(PCH_LVDS);
- I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
- "PCH LVDS enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
+ I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
+ port_pipe == pipe,
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
- assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
- assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
- assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
}
static void _vlv_enable_pll(struct intel_crtc *crtc,
@@ -2521,6 +2480,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 gtt_offset_rotated = 0;
unsigned int max_size = 0;
int i, num_planes = fb->format->num_planes;
@@ -2585,7 +2545,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
* fb layout agrees with the fence layout. We already check that the
* fb stride matches the fence stride elsewhere.
*/
- if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) &&
+ if (i == 0 && i915_gem_object_is_tiled(obj) &&
(x + width) * cpp > fb->pitches[i]) {
DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
i, fb->offsets[i]);
@@ -2670,9 +2630,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
max_size = max(max_size, offset + size);
}
- if (max_size * tile_size > intel_fb->obj->base.size) {
+ if (max_size * tile_size > obj->base.size) {
DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
- max_size * tile_size, intel_fb->obj->base.size);
+ max_size * tile_size, obj->base.size);
return -EINVAL;
}
@@ -2796,10 +2756,10 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
/* FIXME pre-g4x don't work like this */
if (visible) {
- crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base));
+ crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
crtc_state->active_planes |= BIT(plane->id);
} else {
- crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base));
+ crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
crtc_state->active_planes &= ~BIT(plane->id);
}
@@ -2922,9 +2882,8 @@ valid_fb:
if (i915_gem_object_is_tiled(obj))
dev_priv->preserve_bios_swizzle = true;
- drm_framebuffer_get(fb);
- primary->fb = primary->state->fb = fb;
- primary->crtc = primary->state->crtc = &intel_crtc->base;
+ plane_state->fb = fb;
+ plane_state->crtc = &intel_crtc->base;
intel_set_plane_visible(to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state),
@@ -3430,24 +3389,33 @@ static void i9xx_disable_plane(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static bool i9xx_plane_get_hw_state(struct intel_plane *plane)
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- enum pipe pipe = plane->pipe;
bool ret;
+ u32 val;
/*
* Not 100% correct for planes that can move between pipes,
* but that's only the case for gen2-4 which don't have any
* display power wells.
*/
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(DSPCNTR(i9xx_plane)) & DISPLAY_PLANE_ENABLE;
+ val = I915_READ(DSPCNTR(i9xx_plane));
+
+ ret = val & DISPLAY_PLANE_ENABLE;
+
+ if (INTEL_GEN(dev_priv) >= 5)
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
intel_display_power_put(dev_priv, power_domain);
@@ -3689,7 +3657,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
- if (intel_format_is_yuv(fb->format->format)) {
+ if (fb->format->is_yuv) {
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
@@ -4631,20 +4599,33 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
}
}
-/* Return which DP Port should be selected for Transcoder DP control */
-static enum port
-intel_trans_dp_port_sel(struct intel_crtc *crtc)
+/*
+ * Finds the encoder associated with the given CRTC. This can only be
+ * used when we know that the CRTC isn't feeding multiple encoders!
+ */
+static struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *encoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_connector_state *connector_state;
+ const struct drm_connector *connector;
+ struct intel_encoder *encoder = NULL;
+ int num_encoders = 0;
+ int i;
- for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
- if (encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_EDP)
- return encoder->port;
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
+ continue;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+ num_encoders++;
}
- return -1;
+ WARN(num_encoders != 1, "%d encoders for pipe %c\n",
+ num_encoders, pipe_name(crtc->pipe));
+
+ return encoder;
}
/*
@@ -4655,7 +4636,8 @@ intel_trans_dp_port_sel(struct intel_crtc *crtc)
* - DP transcoding bits
* - transcoder
*/
-static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
+static void ironlake_pch_enable(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
@@ -4714,6 +4696,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
&crtc_state->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
+ enum port port;
+
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
@@ -4726,19 +4710,9 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
- switch (intel_trans_dp_port_sel(crtc)) {
- case PORT_B:
- temp |= TRANS_DP_PORT_SEL_B;
- break;
- case PORT_C:
- temp |= TRANS_DP_PORT_SEL_C;
- break;
- case PORT_D:
- temp |= TRANS_DP_PORT_SEL_D;
- break;
- default:
- BUG();
- }
+ port = intel_get_crtc_new_encoder(state, crtc_state)->port;
+ WARN_ON(port < PORT_B || port > PORT_D);
+ temp |= TRANS_DP_PORT_SEL(port);
I915_WRITE(reg, temp);
}
@@ -4746,7 +4720,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
ironlake_enable_pch_transcoder(dev_priv, pipe);
}
-static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
+static void lpt_pch_enable(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -4776,6 +4751,39 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
}
}
+/*
+ * The hardware phase 0.0 refers to the center of the pixel.
+ * We want to start from the top/left edge which is phase
+ * -0.5. That matches how the hardware calculates the scaling
+ * factors (from top-left of the first pixel to bottom-right
+ * of the last pixel, as opposed to the pixel centers).
+ *
+ * For 4:2:0 subsampled chroma planes we obviously have to
+ * adjust that so that the chroma sample position lands in
+ * the right spot.
+ *
+ * Note that for packed YCbCr 4:2:2 formats there is no way to
+ * control chroma siting. The hardware simply replicates the
+ * chroma samples for both of the luma samples, and thus we don't
+ * actually get the expected MPEG2 chroma siting convention :(
+ * The same behaviour is observed on pre-SKL platforms as well.
+ */
+u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
+{
+ int phase = -0x8000;
+ u16 trip = 0;
+
+ if (chroma_cosited)
+ phase += (sub - 1) * 0x8000 / sub;
+
+ if (phase < 0)
+ phase = 0x10000 + phase;
+ else
+ trip = PS_PHASE_TRIP;
+
+ return ((phase >> 2) & PS_PHASE_MASK) | trip;
+}
+
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
@@ -4975,14 +4983,22 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
&crtc->config->scaler_state;
if (crtc->config->pch_pfit.enabled) {
+ u16 uv_rgb_hphase, uv_rgb_vphase;
int id;
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
return;
+ uv_rgb_hphase = skl_scaler_calc_phase(1, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+
id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+ I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
}
@@ -5501,10 +5517,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
*
* Spurious PCH underruns also occur during PCH enabling.
*/
- if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- if (intel_crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
if (intel_crtc->config->has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
@@ -5549,7 +5563,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(pipe_config);
if (intel_crtc->config->has_pch_encoder)
- ironlake_pch_enable(pipe_config);
+ ironlake_pch_enable(old_intel_state, pipe_config);
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
@@ -5559,9 +5573,16 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
if (HAS_PCH_CPT(dev_priv))
cpt_verify_modeset(dev, intel_crtc->pipe);
- /* Must wait for vblank to avoid spurious PCH FIFO underruns */
- if (intel_crtc->config->has_pch_encoder)
+ /*
+ * Must wait for vblank to avoid spurious PCH FIFO underruns.
+ * And a second vblank wait is needed at least on ILK with
+ * some interlaced HDMI modes. Let's do the double wait always
+ * in case there are more corner cases we don't know about.
+ */
+ if (intel_crtc->config->has_pch_encoder) {
intel_wait_for_vblank(dev_priv, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
+ }
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@@ -5611,6 +5632,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
bool psl_clkgate_wa;
+ u32 pipe_chicken;
if (WARN_ON(intel_crtc->active))
return;
@@ -5623,6 +5645,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (INTEL_GEN(dev_priv) >= 11)
icl_map_plls_to_ports(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(crtc, pipe_config, old_state);
+
if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
@@ -5651,11 +5675,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
-
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_pipe_clock(pipe_config);
-
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
intel_crtc->config->pch_pfit.enabled;
@@ -5673,6 +5692,17 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(&pipe_config->base);
+ /*
+ * Display WA #1153: enable hardware to bypass the alpha math
+ * and rounding for per-pixel values 00 and 0xff
+ */
+ if (INTEL_GEN(dev_priv) >= 11) {
+ pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
+ if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
+ I915_WRITE_FW(PIPE_CHICKEN(pipe),
+ pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
+ }
+
intel_ddi_set_pipe_settings(pipe_config);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_transcoder_func(pipe_config);
@@ -5688,7 +5718,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(pipe_config);
if (intel_crtc->config->has_pch_encoder)
- lpt_pch_enable(pipe_config);
+ lpt_pch_enable(old_intel_state, pipe_config);
if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(pipe_config, true);
@@ -5741,10 +5771,8 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
* pipe is already disabled, but FDI RX/TX is still enabled.
* Happens at least with VGA+HDMI cloning. Suppress them.
*/
- if (intel_crtc->config->has_pch_encoder) {
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- }
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -5794,7 +5822,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -5805,20 +5833,17 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_disable_pipe(old_crtc_state);
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
+ if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
+ intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+ intel_ddi_disable_transcoder_func(old_crtc_state);
if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
else
ironlake_pfit_disable(intel_crtc, false);
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_disable_pipe_clock(intel_crtc->config);
-
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (INTEL_GEN(dev_priv) >= 11)
@@ -5849,6 +5874,22 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (IS_ICELAKE(dev_priv))
+ return port >= PORT_C && port <= PORT_F;
+
+ return false;
+}
+
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (!intel_port_is_tc(dev_priv, port))
+ return PORT_TC_NONE;
+
+ return port - PORT_C;
+}
+
enum intel_display_power_domain intel_port_to_power_domain(enum port port)
{
switch (port) {
@@ -7675,16 +7716,18 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- enum pipe pipe = crtc->pipe;
+ enum pipe pipe;
u32 val, base, offset;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- if (!plane->get_hw_state(plane))
+ if (!plane->get_hw_state(plane, &pipe))
return;
+ WARN_ON(pipe != crtc->pipe);
+
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -8705,16 +8748,18 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
enum plane_id plane_id = plane->id;
- enum pipe pipe = crtc->pipe;
+ enum pipe pipe;
u32 val, base, offset, stride_mult, tiling, alpha;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- if (!plane->get_hw_state(plane))
+ if (!plane->get_hw_state(plane, &pipe))
return;
+ WARN_ON(pipe != crtc->pipe);
+
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -9142,9 +9187,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->base.state);
+
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
struct intel_encoder *encoder =
- intel_ddi_get_crtc_new_encoder(crtc_state);
+ intel_get_crtc_new_encoder(state, crtc_state);
if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
@@ -9172,6 +9220,44 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
+static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
+ enum port port,
+ struct intel_crtc_state *pipe_config)
+{
+ enum intel_dpll_id id;
+ u32 temp;
+
+ /* TODO: TBT pll not implemented. */
+ switch (port) {
+ case PORT_A:
+ case PORT_B:
+ temp = I915_READ(DPCLKA_CFGCR0_ICL) &
+ DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
+
+ if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
+ return;
+ break;
+ case PORT_C:
+ id = DPLL_ID_ICL_MGPLL1;
+ break;
+ case PORT_D:
+ id = DPLL_ID_ICL_MGPLL2;
+ break;
+ case PORT_E:
+ id = DPLL_ID_ICL_MGPLL3;
+ break;
+ case PORT_F:
+ id = DPLL_ID_ICL_MGPLL4;
+ break;
+ default:
+ MISSING_CASE(port);
+ return;
+ }
+
+ pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
@@ -9273,6 +9359,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
WARN(1, "unknown pipe linked to edp transcoder\n");
+ /* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
case TRANS_DDI_EDP_INPUT_A_ON:
trans_edp_pipe = PIPE_A;
@@ -9328,7 +9415,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
* registers/MIPI[BXT]. We can break out here early, since we
* need the same DSI PLL to be enabled for both DSI ports.
*/
- if (!intel_dsi_pll_is_enabled(dev_priv))
+ if (!bxt_dsi_pll_is_enabled(dev_priv))
break;
/* XXX: this works for video mode only */
@@ -9359,7 +9446,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icelake_get_ddi_pll(dev_priv, port, pipe_config);
+ else if (IS_CANNONLAKE(dev_priv))
cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_BC(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -9692,7 +9781,8 @@ static void i845_disable_cursor(struct intel_plane *plane,
i845_update_cursor(plane, NULL, NULL);
}
-static bool i845_cursor_get_hw_state(struct intel_plane *plane)
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
@@ -9704,6 +9794,8 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane)
ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+ *pipe = PIPE_A;
+
intel_display_power_put(dev_priv, power_domain);
return ret;
@@ -9715,25 +9807,30 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- u32 cntl;
+ u32 cntl = 0;
- cntl = MCURSOR_GAMMA_ENABLE;
+ if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
- if (HAS_DDI(dev_priv))
- cntl |= CURSOR_PIPE_CSC_ENABLE;
+ if (INTEL_GEN(dev_priv) <= 10) {
+ cntl |= MCURSOR_GAMMA_ENABLE;
+
+ if (HAS_DDI(dev_priv))
+ cntl |= MCURSOR_PIPE_CSC_ENABLE;
+ }
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
switch (plane_state->base.crtc_w) {
case 64:
- cntl |= CURSOR_MODE_64_ARGB_AX;
+ cntl |= MCURSOR_MODE_64_ARGB_AX;
break;
case 128:
- cntl |= CURSOR_MODE_128_ARGB_AX;
+ cntl |= MCURSOR_MODE_128_ARGB_AX;
break;
case 256:
- cntl |= CURSOR_MODE_256_ARGB_AX;
+ cntl |= MCURSOR_MODE_256_ARGB_AX;
break;
default:
MISSING_CASE(plane_state->base.crtc_w);
@@ -9741,7 +9838,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
}
if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
- cntl |= CURSOR_ROTATE_180;
+ cntl |= MCURSOR_ROTATE_180;
return cntl;
}
@@ -9903,23 +10000,32 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
i9xx_update_cursor(plane, NULL, NULL);
}
-static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum pipe pipe = plane->pipe;
bool ret;
+ u32 val;
/*
* Not 100% correct for planes that can move between pipes,
* but that's only the case for gen2-3 which don't have any
* display power wells.
*/
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+ val = I915_READ(CURCNTR(plane->pipe));
+
+ ret = val & MCURSOR_MODE;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
+ MCURSOR_PIPE_SELECT_SHIFT;
intel_display_power_put(dev_priv, power_domain);
@@ -10631,7 +10737,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->base.state->crtc)
- drm_connector_unreference(&connector->base);
+ drm_connector_put(&connector->base);
if (connector->base.encoder) {
connector->base.state->best_encoder =
@@ -10639,7 +10745,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
connector->base.state->crtc =
connector->base.encoder->crtc;
- drm_connector_reference(&connector->base);
+ drm_connector_get(&connector->base);
} else {
connector->base.state->best_encoder = NULL;
connector->base.state->crtc = NULL;
@@ -10918,6 +11024,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
case INTEL_OUTPUT_DDI:
if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
+ /* else: fall through */
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
@@ -11791,7 +11898,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
struct drm_crtc_state *new_state)
{
struct intel_dpll_hw_state dpll_hw_state;
- unsigned crtc_mask;
+ unsigned int crtc_mask;
bool active;
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
@@ -11818,7 +11925,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
return;
}
- crtc_mask = 1 << drm_crtc_index(crtc);
+ crtc_mask = drm_crtc_mask(crtc);
if (new_state->active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
@@ -11853,7 +11960,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
if (old_state->shared_dpll &&
old_state->shared_dpll != new_state->shared_dpll) {
- unsigned crtc_mask = 1 << drm_crtc_index(crtc);
+ unsigned int crtc_mask = drm_crtc_mask(crtc);
struct intel_shared_dpll *pll = old_state->shared_dpll;
I915_STATE_WARN(pll->active_mask & crtc_mask,
@@ -12449,6 +12556,19 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
}
+static void intel_atomic_cleanup_work(struct work_struct *work)
+{
+ struct drm_atomic_state *state =
+ container_of(work, struct drm_atomic_state, commit_work);
+ struct drm_i915_private *i915 = to_i915(state->dev);
+
+ drm_atomic_helper_cleanup_planes(&i915->drm, state);
+ drm_atomic_helper_commit_cleanup_done(state);
+ drm_atomic_state_put(state);
+
+ intel_atomic_helper_free_state(i915);
+}
+
static void intel_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -12609,13 +12729,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
}
- drm_atomic_helper_cleanup_planes(dev, state);
-
- drm_atomic_helper_commit_cleanup_done(state);
-
- drm_atomic_state_put(state);
-
- intel_atomic_helper_free_state(dev_priv);
+ /*
+ * Defer the cleanup of the old state to a separate worker to not
+ * impede the current task (userspace for blocking modesets) that
+ * are executed inline. For out-of-line asynchronous modesets/flips,
+ * deferring to a new worker seems overkill, but we would place a
+ * schedule point (cond_resched()) here anyway to keep latencies
+ * down.
+ */
+ INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
+ schedule_work(&state->commit_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
@@ -12981,6 +13104,19 @@ intel_prepare_plane_fb(struct drm_plane *plane,
add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
}
+ /*
+ * We declare pageflips to be interactive and so merit a small bias
+ * towards upclocking to deliver the frame on time. By only changing
+ * the RPS thresholds to sample more regularly and aim for higher
+ * clocks we can hopefully deliver low power workloads (like kodi)
+ * that are not quite steady state without resorting to forcing
+ * maximum clocks following a vblank miss (see do_rps_boost()).
+ */
+ if (!intel_state->rps_interactive) {
+ intel_rps_mark_interactive(dev_priv, true);
+ intel_state->rps_interactive = true;
+ }
+
return 0;
}
@@ -12997,8 +13133,15 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(old_state->state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
+ if (intel_state->rps_interactive) {
+ intel_rps_mark_interactive(dev_priv, false);
+ intel_state->rps_interactive = false;
+ }
+
/* Should only be called after a successful intel_prepare_plane_fb()! */
mutex_lock(&dev_priv->drm.struct_mutex);
intel_plane_unpin_fb(to_intel_plane_state(old_state));
@@ -13181,8 +13324,17 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
-static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
@@ -13195,8 +13347,17 @@ static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool i965_mod_supported(uint32_t format, uint64_t modifier)
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
@@ -13211,8 +13372,26 @@ static bool i965_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ break;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (!plane->has_ccs)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@@ -13244,38 +13423,36 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
- if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
- return false;
-
- if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
- modifier != DRM_FORMAT_MOD_LINEAR)
- return false;
-
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_mod_supported(format, modifier);
- else if (INTEL_GEN(dev_priv) >= 4)
- return i965_mod_supported(format, modifier);
- else
- return i8xx_mod_supported(format, modifier);
+ return modifier == DRM_FORMAT_MOD_LINEAR &&
+ format == DRM_FORMAT_ARGB8888;
}
-static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
-{
- if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
- return false;
+static struct drm_plane_funcs skl_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = skl_plane_format_mod_supported,
+};
- return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888;
-}
+static struct drm_plane_funcs i965_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i965_plane_format_mod_supported,
+};
-static struct drm_plane_funcs intel_plane_funcs = {
+static struct drm_plane_funcs i8xx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
@@ -13283,7 +13460,7 @@ static struct drm_plane_funcs intel_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_primary_plane_format_mod_supported,
+ .format_mod_supported = i8xx_plane_format_mod_supported,
};
static int
@@ -13408,7 +13585,7 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_cursor_plane_format_mod_supported,
+ .format_mod_supported = intel_cursor_format_mod_supported,
};
static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
@@ -13466,6 +13643,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary = NULL;
struct intel_plane_state *state = NULL;
+ const struct drm_plane_funcs *plane_funcs;
const uint32_t *intel_primary_formats;
unsigned int supported_rotations;
unsigned int num_formats;
@@ -13521,6 +13699,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->check_plane = intel_check_primary_plane;
if (INTEL_GEN(dev_priv) >= 9) {
+ primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
+ PLANE_PRIMARY);
+
if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
intel_primary_formats = skl_pri_planar_formats;
num_formats = ARRAY_SIZE(skl_pri_planar_formats);
@@ -13529,7 +13710,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(skl_primary_formats);
}
- if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
+ if (primary->has_ccs)
modifiers = skl_format_modifiers_ccs;
else
modifiers = skl_format_modifiers_noccs;
@@ -13537,6 +13718,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = skl_update_plane;
primary->disable_plane = skl_disable_plane;
primary->get_hw_state = skl_plane_get_hw_state;
+
+ plane_funcs = &skl_plane_funcs;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13545,6 +13728,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_plane;
primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
+
+ plane_funcs = &i965_plane_funcs;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13553,25 +13738,27 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_plane;
primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
+
+ plane_funcs = &i8xx_plane_funcs;
}
if (INTEL_GEN(dev_priv) >= 9)
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, &intel_plane_funcs,
+ 0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane 1%c", pipe_name(pipe));
else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, &intel_plane_funcs,
+ 0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, &intel_plane_funcs,
+ 0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
@@ -13951,7 +14138,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (intel_crt_present(dev_priv))
intel_crt_init(dev_priv);
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D);
+ intel_ddi_init(dev_priv, PORT_E);
+ intel_ddi_init(dev_priv, PORT_F);
+ } else if (IS_GEN9_LP(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
* DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
@@ -13961,7 +14155,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
- intel_dsi_init(dev_priv);
+ vlv_dsi_init(dev_priv);
} else if (HAS_DDI(dev_priv)) {
int found;
@@ -14067,7 +14261,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
}
- intel_dsi_init(dev_priv);
+ vlv_dsi_init(dev_priv);
} else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
bool found = false;
@@ -14124,14 +14318,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
drm_framebuffer_cleanup(fb);
- i915_gem_object_lock(intel_fb->obj);
- WARN_ON(!intel_fb->obj->framebuffer_references--);
- i915_gem_object_unlock(intel_fb->obj);
+ i915_gem_object_lock(obj);
+ WARN_ON(!obj->framebuffer_references--);
+ i915_gem_object_unlock(obj);
- i915_gem_object_put(intel_fb->obj);
+ i915_gem_object_put(obj);
kfree(intel_fb);
}
@@ -14140,8 +14335,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int *handle)
{
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
if (obj->userptr.mm) {
DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
@@ -14349,11 +14543,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
break;
case DRM_FORMAT_NV12:
- if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED_CCS ||
- mode_cmd->modifier[0] == I915_FORMAT_MOD_Yf_TILED_CCS) {
- DRM_DEBUG_KMS("RC not to be enabled with NV12\n");
- goto err;
- }
if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("unsupported pixel format: %s\n",
@@ -14411,9 +14600,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
i, fb->pitches[i], stride_alignment);
goto err;
}
- }
- intel_fb->obj = obj;
+ fb->obj[i] = &obj->base;
+ }
ret = intel_fill_fb_info(dev_priv, fb);
if (ret)
@@ -14469,6 +14658,10 @@ static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int hdisplay_max, htotal_max;
+ int vdisplay_max, vtotal_max;
+
/*
* Can't reject DBLSCAN here because Xorg ddxen can add piles
* of DBLSCAN modes to the output's mode list when they detect
@@ -14498,6 +14691,36 @@ intel_mode_valid(struct drm_device *dev,
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
+ vdisplay_max = 4096;
+ htotal_max = 8192;
+ vtotal_max = 8192;
+ } else if (INTEL_GEN(dev_priv) >= 3) {
+ hdisplay_max = 4096;
+ vdisplay_max = 4096;
+ htotal_max = 8192;
+ vtotal_max = 8192;
+ } else {
+ hdisplay_max = 2048;
+ vdisplay_max = 2048;
+ htotal_max = 4096;
+ vtotal_max = 4096;
+ }
+
+ if (mode->hdisplay > hdisplay_max ||
+ mode->hsync_start > htotal_max ||
+ mode->hsync_end > htotal_max ||
+ mode->htotal > htotal_max)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > vdisplay_max ||
+ mode->vsync_start > vtotal_max ||
+ mode->vsync_end > vtotal_max ||
+ mode->vtotal > vtotal_max)
+ return MODE_V_ILLEGAL;
+
return MODE_OK;
}
@@ -14646,6 +14869,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
DRM_INFO("Applying T12 delay quirk\n");
}
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+ DRM_INFO("Applying Increase DDI Disabled quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -14732,6 +14967,13 @@ static struct intel_quirk intel_quirks[] = {
/* Toshiba Satellite P50-C-18C */
{ 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+ /* GeminiLake NUC */
+ { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ /* ASRock ITX*/
+ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -14936,6 +15178,7 @@ int intel_modeset_init(struct drm_device *dev)
}
}
+ /* maximum framebuffer dimensions */
if (IS_GEN2(dev_priv)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
@@ -14951,11 +15194,11 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
dev->mode_config.cursor_height = 1023;
} else if (IS_GEN2(dev_priv)) {
- dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
- dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
+ dev->mode_config.cursor_width = 64;
+ dev->mode_config.cursor_height = 64;
} else {
- dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
- dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
+ dev->mode_config.cursor_width = 256;
+ dev->mode_config.cursor_height = 256;
}
dev->mode_config.fb_base = ggtt->gmadr.start;
@@ -15105,8 +15348,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
- WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
I915_WRITE(PIPECONF(pipe), 0);
POSTING_READ(PIPECONF(pipe));
@@ -15120,12 +15363,12 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 val = I915_READ(DSPCNTR(i9xx_plane));
+ enum pipe pipe;
- return (val & DISPLAY_PLANE_ENABLE) == 0 ||
- (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
+ if (!plane->get_hw_state(plane, &pipe))
+ return true;
+
+ return pipe == crtc->pipe;
}
static void
@@ -15284,6 +15527,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
+
+ /* notify opregion of the sanitized encoder state */
+ intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
}
void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15324,7 +15570,10 @@ static void readout_plane_state(struct intel_crtc *crtc)
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- bool visible = plane->get_hw_state(plane);
+ enum pipe pipe;
+ bool visible;
+
+ visible = plane->get_hw_state(plane, &pipe);
intel_set_plane_visible(crtc_state, plane_state, visible);
}
@@ -15423,9 +15672,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* rely on the connector_mask being accurate.
*/
encoder->base.crtc->state->connector_mask |=
- 1 << drm_connector_index(&connector->base);
+ drm_connector_mask(&connector->base);
encoder->base.crtc->state->encoder_mask |=
- 1 << drm_encoder_index(&encoder->base);
+ drm_encoder_mask(&encoder->base);
}
} else {
@@ -15491,11 +15740,20 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
for_each_intel_encoder(&dev_priv->drm, encoder) {
u64 get_domains;
enum intel_display_power_domain domain;
+ struct intel_crtc_state *crtc_state;
if (!encoder->get_power_domains)
continue;
- get_domains = encoder->get_power_domains(encoder);
+ /*
+ * MST-primary and inactive encoders don't have a crtc state
+ * and neither of these require any power domain references.
+ */
+ if (!encoder->base.crtc)
+ continue;
+
+ crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
+ get_domains = encoder->get_power_domains(encoder, crtc_state);
for_each_power_domain(domain, get_domains)
intel_display_power_get(dev_priv, domain);
}
@@ -15671,6 +15929,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ flush_workqueue(dev_priv->modeset_wq);
+
flush_work(&dev_priv->atomic_helper.free_work);
WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
@@ -15714,8 +15974,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder)
{
connector->encoder = encoder;
- drm_mode_connector_attach_encoder(&connector->base,
- &encoder->base);
+ drm_connector_attach_encoder(&connector->base, &encoder->base);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 2ef31617614a..138a1bc1818c 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -126,6 +126,41 @@ enum port {
#define port_name(p) ((p) + 'A')
+/*
+ * Ports identifier referenced from other drivers.
+ * Expected to remain stable over time
+ */
+static inline const char *port_identifier(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return "Port A";
+ case PORT_B:
+ return "Port B";
+ case PORT_C:
+ return "Port C";
+ case PORT_D:
+ return "Port D";
+ case PORT_E:
+ return "Port E";
+ case PORT_F:
+ return "Port F";
+ default:
+ return "<invalid>";
+ }
+}
+
+enum tc_port {
+ PORT_TC_NONE = -1,
+
+ PORT_TC1 = 0,
+ PORT_TC2,
+ PORT_TC3,
+ PORT_TC4,
+
+ I915_MAX_TC_PORTS
+};
+
enum dpio_channel {
DPIO_CH0,
DPIO_CH1
@@ -144,7 +179,7 @@ enum aux_ch {
AUX_CH_B,
AUX_CH_C,
AUX_CH_D,
- _AUX_CH_E, /* does not exist */
+ AUX_CH_E, /* ICL+ */
AUX_CH_F,
};
@@ -185,8 +220,13 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
POWER_DOMAIN_AUX_IO_A,
+ POWER_DOMAIN_AUX_TBT1,
+ POWER_DOMAIN_AUX_TBT2,
+ POWER_DOMAIN_AUX_TBT3,
+ POWER_DOMAIN_AUX_TBT4,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
@@ -249,7 +289,7 @@ struct intel_link_m_n {
&(dev)->mode_config.plane_list, \
base.head) \
for_each_if((plane_mask) & \
- BIT(drm_plane_index(&intel_plane->base)))
+ drm_plane_mask(&intel_plane->base)))
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \
@@ -266,13 +306,17 @@ struct intel_link_m_n {
list_for_each_entry(intel_crtc, \
&(dev)->mode_config.crtc_list, \
base.head) \
- for_each_if((crtc_mask) & BIT(drm_crtc_index(&intel_crtc->base)))
+ for_each_if((crtc_mask) & drm_crtc_mask(&intel_crtc->base))
#define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \
&(dev)->mode_config.encoder_list, \
base.head)
+#define for_each_intel_dp(dev, intel_encoder) \
+ for_each_intel_encoder(dev, intel_encoder) \
+ for_each_if(intel_encoder_is_dp(intel_encoder))
+
#define for_each_intel_connector_iter(intel_connector, iter) \
while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 16faea30114a..cd0f649b57a5 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -56,7 +56,7 @@ struct dp_link_dpll {
struct dpll dpll;
};
-static const struct dp_link_dpll gen4_dpll[] = {
+static const struct dp_link_dpll g4x_dpll[] = {
{ 162000,
{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
{ 270000,
@@ -256,6 +256,17 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
return 810000;
}
+static int icl_max_source_rate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum port port = dig_port->base.port;
+
+ if (port == PORT_B)
+ return 540000;
+
+ return 810000;
+}
+
static void
intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
@@ -285,10 +296,13 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
/* This should only be done once */
WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
- if (IS_CANNONLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 10) {
source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates);
- max_rate = cnl_max_source_rate(intel_dp);
+ if (INTEL_GEN(dev_priv) == 10)
+ max_rate = cnl_max_source_rate(intel_dp);
+ else
+ max_rate = icl_max_source_rate(intel_dp);
} else if (IS_GEN9_LP(dev_priv)) {
source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
@@ -516,7 +530,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
uint32_t DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power seqeuncer kick due to port %c being active\n",
+ "skipping pipe %c power sequencer kick due to port %c being active\n",
pipe_name(pipe), port_name(intel_dig_port->base.port)))
return;
@@ -532,9 +546,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
DP |= DP_LINK_TRAIN_PAT_1;
if (IS_CHERRYVIEW(dev_priv))
- DP |= DP_PIPE_SELECT_CHV(pipe);
- else if (pipe == PIPE_B)
- DP |= DP_PIPEB_SELECT;
+ DP |= DP_PIPE_SEL_CHV(pipe);
+ else
+ DP |= DP_PIPE_SEL(pipe);
pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
@@ -557,7 +571,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
/*
* Similar magic as in intel_dp_enable_port().
* We _must_ do this port enable + disable trick
- * to make this power seqeuencer lock onto the port.
+ * to make this power sequencer lock onto the port.
* Otherwise even VDD force bit won't work.
*/
I915_WRITE(intel_dp->output_reg, DP);
@@ -586,14 +600,8 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
* We don't have power sequencer currently.
* Pick one that's not used by other ports.
*/
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp;
-
- if (encoder->type != INTEL_OUTPUT_DP &&
- encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- intel_dp = enc_to_intel_dp(&encoder->base);
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
if (encoder->type == INTEL_OUTPUT_EDP) {
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
@@ -785,19 +793,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
* should use them always.
*/
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp;
-
- if (encoder->type != INTEL_OUTPUT_DP &&
- encoder->type != INTEL_OUTPUT_EDP &&
- encoder->type != INTEL_OUTPUT_DDI)
- continue;
-
- intel_dp = enc_to_intel_dp(&encoder->base);
-
- /* Skip pure DVI/HDMI DDI encoders */
- if (!i915_mmio_reg_valid(intel_dp->output_reg))
- continue;
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
@@ -939,7 +936,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
}
static uint32_t
-intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
+intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
@@ -947,14 +944,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
bool done;
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- if (has_aux_irq)
- done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
- msecs_to_jiffies_timeout(10));
- else
- done = wait_for(C, 10) == 0;
+ done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+ msecs_to_jiffies_timeout(10));
if (!done)
- DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
- has_aux_irq);
+ DRM_ERROR("dp aux hw did not signal timeout!\n");
#undef C
return status;
@@ -1019,7 +1012,6 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
}
static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
- bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider)
{
@@ -1040,7 +1032,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
return DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
- (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_INTERRUPT |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
timeout |
DP_AUX_CH_CTL_RECEIVE_ERROR |
@@ -1050,13 +1042,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
}
static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
- bool has_aux_irq,
int send_bytes,
uint32_t unused)
{
return DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
- (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_INTERRUPT |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_TIME_OUT_MAX |
DP_AUX_CH_CTL_RECEIVE_ERROR |
@@ -1079,7 +1070,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
int i, ret, recv_bytes;
uint32_t status;
int try, clock = 0;
- bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
bool vdd;
ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
@@ -1134,7 +1124,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
- has_aux_irq,
send_bytes,
aux_clock_divider);
@@ -1151,7 +1140,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl, send_ctl);
- status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
+ status = intel_dp_aux_wait_done(intel_dp);
/* Clear done status and any errors */
I915_WRITE(ch_ctl,
@@ -1350,6 +1339,9 @@ static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
case DP_AUX_D:
aux_ch = AUX_CH_D;
break;
+ case DP_AUX_E:
+ aux_ch = AUX_CH_E;
+ break;
case DP_AUX_F:
aux_ch = AUX_CH_F;
break;
@@ -1377,6 +1369,8 @@ intel_aux_power_domain(struct intel_dp *intel_dp)
return POWER_DOMAIN_AUX_C;
case AUX_CH_D:
return POWER_DOMAIN_AUX_D;
+ case AUX_CH_E:
+ return POWER_DOMAIN_AUX_E;
case AUX_CH_F:
return POWER_DOMAIN_AUX_F;
default:
@@ -1463,6 +1457,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_B:
case AUX_CH_C:
case AUX_CH_D:
+ case AUX_CH_E:
case AUX_CH_F:
return DP_AUX_CH_CTL(aux_ch);
default:
@@ -1481,6 +1476,7 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_B:
case AUX_CH_C:
case AUX_CH_D:
+ case AUX_CH_E:
case AUX_CH_F:
return DP_AUX_CH_DATA(aux_ch, index);
default:
@@ -1544,6 +1540,13 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
return max_rate >= 540000;
}
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
+{
+ int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
+
+ return max_rate >= 810000;
+}
+
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -1553,8 +1556,8 @@ intel_dp_set_clock(struct intel_encoder *encoder,
int i, count = 0;
if (IS_G4X(dev_priv)) {
- divisor = gen4_dpll;
- count = ARRAY_SIZE(gen4_dpll);
+ divisor = g4x_dpll;
+ count = ARRAY_SIZE(g4x_dpll);
} else if (HAS_PCH_SPLIT(dev_priv)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
@@ -1970,7 +1973,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Split out the IBX/CPU vs CPT settings */
- if (IS_GEN7(dev_priv) && port == PORT_A) {
+ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1980,7 +1983,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
- intel_dp->DP |= crtc->pipe << 29;
+ intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp;
@@ -2006,9 +2009,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
if (IS_CHERRYVIEW(dev_priv))
- intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
- else if (crtc->pipe == PIPE_B)
- intel_dp->DP |= DP_PIPEB_SELECT;
+ intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
+ else
+ intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
}
}
@@ -2630,52 +2633,66 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
}
+static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
+ enum port port, enum pipe *pipe)
+{
+ enum pipe p;
+
+ for_each_pipe(dev_priv, p) {
+ u32 val = I915_READ(TRANS_DP_CTL(p));
+
+ if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
+ *pipe = p;
+ return true;
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
+
+ /* must initialize pipe to something for the asserts */
+ *pipe = PIPE_A;
+
+ return false;
+}
+
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe)
+{
+ bool ret;
+ u32 val;
+
+ val = I915_READ(dp_reg);
+
+ ret = val & DP_PORT_EN;
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
+ else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+ ret &= cpt_dp_port_selected(dev_priv, port, pipe);
+ else if (IS_CHERRYVIEW(dev_priv))
+ *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+ else
+ *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+
+ return ret;
+}
+
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = encoder->port;
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
+ ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, pipe);
- tmp = I915_READ(intel_dp->output_reg);
-
- if (!(tmp & DP_PORT_EN))
- goto out;
-
- if (IS_GEN7(dev_priv) && port == PORT_A) {
- *pipe = PORT_TO_PIPE_CPT(tmp);
- } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- enum pipe p;
-
- for_each_pipe(dev_priv, p) {
- u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
- if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
- *pipe = p;
- ret = true;
-
- goto out;
- }
- }
-
- DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
- i915_mmio_reg_offset(intel_dp->output_reg));
- } else if (IS_CHERRYVIEW(dev_priv)) {
- *pipe = DP_PORT_TO_PIPE_CHV(tmp);
- } else {
- *pipe = PORT_TO_PIPE(tmp);
- }
-
- ret = true;
-
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -2796,10 +2813,6 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
- intel_psr_disable(intel_dp, old_crtc_state);
-
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
@@ -2854,10 +2867,11 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
+ uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
- if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
+ if (dp_train_pat & train_pat_mask)
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
- dp_train_pat & DP_TRAINING_PATTERN_MASK);
+ dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2868,7 +2882,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ switch (dp_train_pat & train_pat_mask) {
case DP_TRAINING_PATTERN_DISABLE:
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
@@ -2882,10 +2896,13 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
case DP_TRAINING_PATTERN_3:
temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
break;
+ case DP_TRAINING_PATTERN_4:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
+ break;
}
I915_WRITE(DP_TP_CTL(port), temp);
- } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
@@ -3008,10 +3025,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
intel_edp_backlight_on(pipe_config, conn_state);
- intel_psr_enable(intel_dp, pipe_config);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder,
@@ -3043,11 +3057,11 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
edp_panel_vdd_off_sync(intel_dp);
/*
- * VLV seems to get confused when multiple power seqeuencers
+ * VLV seems to get confused when multiple power sequencers
* have the same port selected (even if only one has power/vdd
* enabled). The failure manifests as vlv_wait_port_ready() failing
* CHV on the other hand doesn't seem to mind having the same port
- * selected in multiple power seqeuencers, but let's clear the
+ * selected in multiple power sequencers, but let's clear the
* port select always when logically disconnecting a power sequencer
* from a port.
*/
@@ -3066,16 +3080,9 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->pps_mutex);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp;
- enum port port;
-
- if (encoder->type != INTEL_OUTPUT_DP &&
- encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- intel_dp = enc_to_intel_dp(&encoder->base);
- port = dp_to_dig_port(intel_dp)->base.port;
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = encoder->port;
WARN(intel_dp->active_pipe == pipe,
"stealing pipe %c power sequencer from active (e)DP port %c\n",
@@ -3197,14 +3204,14 @@ uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->base.port;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum port port = encoder->port;
- if (INTEL_GEN(dev_priv) >= 9) {
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ if (HAS_DDI(dev_priv))
return intel_ddi_dp_voltage_max(encoder);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (IS_GEN7(dev_priv) && port == PORT_A)
+ else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
@@ -3216,33 +3223,11 @@ uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->base.port;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum port port = encoder->port;
- if (INTEL_GEN(dev_priv) >= 9) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
+ if (HAS_DDI(dev_priv)) {
+ return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
@@ -3255,7 +3240,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_GEN7(dev_priv) && port == PORT_A) {
+ } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
@@ -3450,7 +3435,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
}
static uint32_t
-gen4_signal_levels(uint8_t train_set)
+g4x_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
@@ -3487,9 +3472,9 @@ gen4_signal_levels(uint8_t train_set)
return signal_levels;
}
-/* Gen6's DP voltage swing and pre-emphasis control */
+/* SNB CPU eDP voltage swing and pre-emphasis control */
static uint32_t
-gen6_edp_signal_levels(uint8_t train_set)
+snb_cpu_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3515,9 +3500,9 @@ gen6_edp_signal_levels(uint8_t train_set)
}
}
-/* Gen7's DP voltage swing and pre-emphasis control */
+/* IVB CPU eDP voltage swing and pre-emphasis control */
static uint32_t
-gen7_edp_signal_levels(uint8_t train_set)
+ivb_cpu_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3564,14 +3549,14 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
signal_levels = chv_signal_levels(intel_dp);
} else if (IS_VALLEYVIEW(dev_priv)) {
signal_levels = vlv_signal_levels(intel_dp);
- } else if (IS_GEN7(dev_priv) && port == PORT_A) {
- signal_levels = gen7_edp_signal_levels(train_set);
+ } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+ signal_levels = ivb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
} else if (IS_GEN6(dev_priv) && port == PORT_A) {
- signal_levels = gen6_edp_signal_levels(train_set);
+ signal_levels = snb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
- signal_levels = gen4_signal_levels(train_set);
+ signal_levels = g4x_signal_levels(train_set);
mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
}
@@ -3654,7 +3639,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
- if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
@@ -3683,8 +3668,9 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
/* always enable with pattern 1 (as per spec) */
- DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
- DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
+ DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
+ DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
+ DP_LINK_TRAIN_PAT_1;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
@@ -3739,8 +3725,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
- intel_psr_init_dpcd(intel_dp);
-
/*
* Read the eDP display control registers.
*
@@ -3756,6 +3740,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
+ /*
+ * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
+ * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
+ */
+ intel_psr_init_dpcd(intel_dp);
+
/* Read the eDP 1.4+ supported link rates. */
if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -3884,129 +3874,6 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
intel_dp->is_mst);
}
-static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state, bool disable_wa)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- u8 buf;
- int ret = 0;
- int count = 0;
- int attempts = 10;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
- DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
- ret = -EIO;
- goto out;
- }
-
- if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf & ~DP_TEST_SINK_START) < 0) {
- DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
- ret = -EIO;
- goto out;
- }
-
- do {
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_TEST_SINK_MISC, &buf) < 0) {
- ret = -EIO;
- goto out;
- }
- count = buf & DP_TEST_COUNT_MASK;
- } while (--attempts && count);
-
- if (attempts == 0) {
- DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
- ret = -ETIMEDOUT;
- }
-
- out:
- if (disable_wa)
- hsw_enable_ips(crtc_state);
- return ret;
-}
-
-static int intel_dp_sink_crc_start(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- u8 buf;
- int ret;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
- return -EIO;
-
- if (!(buf & DP_TEST_CRC_SUPPORTED))
- return -ENOTTY;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
- return -EIO;
-
- if (buf & DP_TEST_SINK_START) {
- ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false);
- if (ret)
- return ret;
- }
-
- hsw_disable_ips(crtc_state);
-
- if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf | DP_TEST_SINK_START) < 0) {
- hsw_enable_ips(crtc_state);
- return -EIO;
- }
-
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
- return 0;
-}
-
-int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- u8 buf;
- int count, ret;
- int attempts = 6;
-
- ret = intel_dp_sink_crc_start(intel_dp, crtc_state);
- if (ret)
- return ret;
-
- do {
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_TEST_SINK_MISC, &buf) < 0) {
- ret = -EIO;
- goto stop;
- }
- count = buf & DP_TEST_COUNT_MASK;
-
- } while (--attempts && count == 0);
-
- if (attempts == 0) {
- DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
- ret = -ETIMEDOUT;
- goto stop;
- }
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
- ret = -EIO;
- goto stop;
- }
-
-stop:
- intel_dp_sink_crc_stop(intel_dp, crtc_state, true);
- return ret;
-}
-
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
@@ -4466,10 +4333,15 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
+ /* Handle CEC interrupts, if any */
+ drm_dp_cec_irq(&intel_dp->aux);
+
/* defer to the hotplug work for link retraining if needed */
if (intel_dp_needs_link_retrain(intel_dp))
return false;
+ intel_psr_short_pulse(intel_dp);
+
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
/* Send a Hotplug Uevent to userspace to start modeset */
@@ -4537,14 +4409,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
static enum drm_connector_status
edp_detect(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum drm_connector_status status;
-
- status = intel_panel_detect(dev_priv);
- if (status == connector_status_unknown)
- status = connector_status_connected;
-
- return status;
+ return connector_status_connected;
}
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
@@ -4780,6 +4645,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
intel_connector->detect_edid = edid;
intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ drm_dp_cec_set_edid(&intel_dp->aux, edid);
}
static void
@@ -4787,6 +4653,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
+ drm_dp_cec_unset_edid(&intel_dp->aux);
kfree(intel_connector->detect_edid);
intel_connector->detect_edid = NULL;
@@ -4805,7 +4672,7 @@ intel_dp_long_pulse(struct intel_connector *connector)
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
- /* Can't disconnect eDP, but you can close the lid... */
+ /* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
@@ -4975,6 +4842,7 @@ static int
intel_dp_connector_register(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = connector->dev;
int ret;
ret = intel_connector_register(connector);
@@ -4987,13 +4855,20 @@ intel_dp_connector_register(struct drm_connector *connector)
intel_dp->aux.name, connector->kdev->kobj.name);
intel_dp->aux.dev = connector->kdev;
- return drm_dp_aux_register(&intel_dp->aux);
+ ret = drm_dp_aux_register(&intel_dp->aux);
+ if (!ret)
+ drm_dp_cec_register_connector(&intel_dp->aux,
+ connector->name, dev->dev);
+ return ret;
}
static void
intel_dp_connector_unregister(struct drm_connector *connector)
{
- drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ drm_dp_cec_unregister_connector(&intel_dp->aux);
+ drm_dp_aux_unregister(&intel_dp->aux);
intel_connector_unregister(connector);
}
@@ -5319,14 +5194,14 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum pipe pipe;
- if ((intel_dp->DP & DP_PORT_EN) == 0)
- return INVALID_PIPE;
+ if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, &pipe))
+ return pipe;
- if (IS_CHERRYVIEW(dev_priv))
- return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
- else
- return PORT_TO_PIPE(intel_dp->DP);
+ return INVALID_PIPE;
}
void intel_dp_encoder_reset(struct drm_encoder *encoder)
@@ -5675,7 +5550,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
/*
* On some VLV machines the BIOS can leave the VDD
- * enabled even on power seqeuencers which aren't
+ * enabled even on power sequencers which aren't
* hooked up to any port. This would mess up the
* power domain tracking the first time we pick
* one of these power sequencers for use since
@@ -5683,7 +5558,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
* already on and therefore wouldn't grab the power
* domain reference. Disable VDD first to avoid this.
* This also avoids spuriously turning the VDD on as
- * soon as the new power seqeuencer gets initialized.
+ * soon as the new power sequencer gets initialized.
*/
if (force_disable_vdd) {
u32 pp = ironlake_get_pp_control(intel_dp);
@@ -5721,10 +5596,20 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
port_sel = PANEL_PORT_SELECT_VLV(port);
} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- if (port == PORT_A)
+ switch (port) {
+ case PORT_A:
port_sel = PANEL_PORT_SELECT_DPA;
- else
+ break;
+ case PORT_C:
+ port_sel = PANEL_PORT_SELECT_DPC;
+ break;
+ case PORT_D:
port_sel = PANEL_PORT_SELECT_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ break;
+ }
}
pp_on |= port_sel;
@@ -6179,7 +6064,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
edid);
} else {
kfree(edid);
@@ -6268,8 +6153,8 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
/* Set connector link status to BAD and send a Uevent to notify
* userspace to do a modeset.
*/
- drm_mode_connector_set_link_status_property(connector,
- DRM_MODE_LINK_STATUS_BAD);
+ drm_connector_set_link_status_property(connector,
+ DRM_MODE_LINK_STATUS_BAD);
mutex_unlock(&connector->dev->mode_config.mutex);
/* Send Hotplug uevent so userspace can reprobe */
drm_kms_helper_hotplug_event(connector->dev);
@@ -6382,7 +6267,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
+ if (IS_G45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@@ -6462,7 +6347,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->port = port;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
- dev_priv->hotplug.irq_port[port] = intel_dig_port;
if (port != PORT_A)
intel_infoframe_init(intel_dig_port);
@@ -6481,37 +6365,44 @@ err_connector_alloc:
return false;
}
-void intel_dp_mst_suspend(struct drm_device *dev)
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int i;
+ struct intel_encoder *encoder;
- /* disable MST */
- for (i = 0; i < I915_MAX_PORTS; i++) {
- struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp;
- if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+ if (encoder->type != INTEL_OUTPUT_DDI)
continue;
- if (intel_dig_port->dp.is_mst)
- drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (!intel_dp->can_mst)
+ continue;
+
+ if (intel_dp->is_mst)
+ drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
}
}
-void intel_dp_mst_resume(struct drm_device *dev)
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int i;
+ struct intel_encoder *encoder;
- for (i = 0; i < I915_MAX_PORTS; i++) {
- struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp;
int ret;
- if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+ if (encoder->type != INTEL_OUTPUT_DDI)
+ continue;
+
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (!intel_dp->can_mst)
continue;
- ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
if (ret)
- intel_dp_check_mst_status(&intel_dig_port->dp);
+ intel_dp_check_mst_status(intel_dp);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 2bb2ceb9d463..357136f17f85 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -26,7 +26,7 @@
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
- uint8_t reg_val = 0;
+ u8 reg_val = 0;
/* Early return when display use other mechanism to enable backlight. */
if (!(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP))
@@ -54,11 +54,11 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
* Read the current backlight value from DPCD register(s) based
* on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
*/
-static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
- uint8_t read_val[2] = { 0x0 };
- uint16_t level = 0;
+ u8 read_val[2] = { 0x0 };
+ u16 level = 0;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
&read_val, sizeof(read_val)) < 0) {
@@ -82,7 +82,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
- uint8_t vals[2] = { 0x0 };
+ u8 vals[2] = { 0x0 };
vals[0] = level;
@@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
- uint8_t dpcd_buf, new_dpcd_buf, edp_backlight_mode;
+ u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 3fcaa98b9055..4da6e33c7fa1 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -219,32 +219,47 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
}
/*
- * Pick training pattern for channel equalization. Training Pattern 3 for HBR2
+ * Pick training pattern for channel equalization. Training pattern 4 for HBR3
+ * or for 1.4 devices that support it, training Pattern 3 for HBR2
* or 1.2 devices that support it, Training Pattern 2 otherwise.
*/
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
{
- u32 training_pattern = DP_TRAINING_PATTERN_2;
- bool source_tps3, sink_tps3;
+ bool source_tps3, sink_tps3, source_tps4, sink_tps4;
/*
+ * Intel platforms that support HBR3 also support TPS4. It is mandatory
+ * for all downstream devices that support HBR3. There are no known eDP
+ * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
+ * specification.
+ */
+ source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
+ sink_tps4 = drm_dp_tps4_supported(intel_dp->dpcd);
+ if (source_tps4 && sink_tps4) {
+ return DP_TRAINING_PATTERN_4;
+ } else if (intel_dp->link_rate == 810000) {
+ if (!source_tps4)
+ DRM_DEBUG_KMS("8.1 Gbps link rate without source HBR3/TPS4 support\n");
+ if (!sink_tps4)
+ DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n");
+ }
+ /*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2. However, not
* all sinks follow the spec.
*/
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
-
if (source_tps3 && sink_tps3) {
- training_pattern = DP_TRAINING_PATTERN_3;
- } else if (intel_dp->link_rate == 540000) {
+ return DP_TRAINING_PATTERN_3;
+ } else if (intel_dp->link_rate >= 540000) {
if (!source_tps3)
- DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
+ DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
if (!sink_tps3)
- DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
+ DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
}
- return training_pattern;
+ return DP_TRAINING_PATTERN_2;
}
static bool
@@ -256,11 +271,13 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
bool channel_eq = false;
training_pattern = intel_dp_training_pattern(intel_dp);
+ /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
+ if (training_pattern != DP_TRAINING_PATTERN_4)
+ training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp,
- training_pattern |
- DP_LINK_SCRAMBLING_DISABLE)) {
+ training_pattern)) {
DRM_ERROR("failed to start channel equalization\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 5890500a3a8b..7e3e01607643 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -403,20 +403,10 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
-static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
- if (!intel_dp)
- return NULL;
- return &intel_dp->mst_encoders[0]->base.base;
-}
-
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
- .best_encoder = intel_mst_best_encoder,
.atomic_check = intel_dp_mst_atomic_check,
};
@@ -476,8 +466,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base;
- ret = drm_mode_connector_attach_encoder(&intel_connector->base,
- enc);
+ ret = drm_connector_attach_encoder(&intel_connector->base, enc);
if (ret)
goto err;
}
@@ -485,7 +474,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
- ret = drm_mode_connector_set_path_property(connector, pathprop);
+ ret = drm_connector_set_path_property(connector, pathprop);
if (ret)
goto err;
@@ -524,7 +513,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
intel_connector->mst_port = NULL;
drm_modeset_unlock(&connector->dev->mode_config.connection_mutex);
- drm_connector_unreference(connector);
+ drm_connector_put(connector);
}
static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 383fbc15113d..b51ad2917dbe 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -163,8 +163,8 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
- unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
- unsigned old_mask;
+ unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+ unsigned int old_mask;
if (WARN_ON(pll == NULL))
return;
@@ -207,7 +207,7 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
- unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
+ unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
/* PCH only available on ILK+ */
if (INTEL_GEN(dev_priv) < 5)
@@ -2525,6 +2525,77 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
return true;
}
+int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
+ uint32_t pll_id)
+{
+ uint32_t cfgcr0, cfgcr1;
+ uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
+ const struct skl_wrpll_params *params;
+ int index, n_entries, link_clock;
+
+ /* Read back values from DPLL CFGCR registers */
+ cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
+
+ dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
+ dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ DPLL_CFGCR0_DCO_FRACTION_SHIFT;
+ pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
+ kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
+ qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
+ DPLL_CFGCR1_QDIV_MODE_SHIFT;
+ qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+
+ params = dev_priv->cdclk.hw.ref == 24000 ?
+ icl_dp_combo_pll_24MHz_values :
+ icl_dp_combo_pll_19_2MHz_values;
+ n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
+
+ for (index = 0; index < n_entries; index++) {
+ if (dco_integer == params[index].dco_integer &&
+ dco_fraction == params[index].dco_fraction &&
+ pdiv == params[index].pdiv &&
+ kdiv == params[index].kdiv &&
+ qdiv_mode == params[index].qdiv_mode &&
+ qdiv_ratio == params[index].qdiv_ratio)
+ break;
+ }
+
+ /* Map PLL Index to Link Clock */
+ switch (index) {
+ default:
+ MISSING_CASE(index);
+ /* fall through */
+ case 0:
+ link_clock = 540000;
+ break;
+ case 1:
+ link_clock = 270000;
+ break;
+ case 2:
+ link_clock = 162000;
+ break;
+ case 3:
+ link_clock = 324000;
+ break;
+ case 4:
+ link_clock = 216000;
+ break;
+ case 5:
+ link_clock = 432000;
+ break;
+ case 6:
+ link_clock = 648000;
+ break;
+ case 7:
+ link_clock = 810000;
+ break;
+ }
+
+ return link_clock;
+}
+
static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
{
return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
@@ -2569,6 +2640,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
switch (div1) {
default:
MISSING_CASE(div1);
+ /* fall through */
case 2:
hsdiv = 0;
break;
@@ -2742,25 +2814,31 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
MG_PLL_SSC_FLLEN |
MG_PLL_SSC_STEPSIZE(ssc_stepsize);
- pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART;
-
- if (refclk_khz != 38400) {
- pll_state->mg_pll_tdc_coldst_bias |=
- MG_PLL_TDC_COLDST_IREFINT_EN |
- MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
- MG_PLL_TDC_COLDST_COLDSTART |
- MG_PLL_TDC_TDCOVCCORR_EN |
- MG_PLL_TDC_TDCSEL(3);
-
- pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
- MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
- MG_PLL_BIAS_BIAS_BONUS(10) |
- MG_PLL_BIAS_BIASCAL_EN |
- MG_PLL_BIAS_CTRIM(12) |
- MG_PLL_BIAS_VREF_RDAC(4) |
- MG_PLL_BIAS_IREFTRIM(iref_trim);
+ pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
+ MG_PLL_TDC_COLDST_IREFINT_EN |
+ MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
+ MG_PLL_TDC_TDCOVCCORR_EN |
+ MG_PLL_TDC_TDCSEL(3);
+
+ pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
+ MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
+ MG_PLL_BIAS_BIAS_BONUS(10) |
+ MG_PLL_BIAS_BIASCAL_EN |
+ MG_PLL_BIAS_CTRIM(12) |
+ MG_PLL_BIAS_VREF_RDAC(4) |
+ MG_PLL_BIAS_IREFTRIM(iref_trim);
+
+ if (refclk_khz == 38400) {
+ pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+ pll_state->mg_pll_bias_mask = 0;
+ } else {
+ pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ pll_state->mg_pll_bias_mask = -1U;
}
+ pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
+ pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+
return true;
}
@@ -2787,10 +2865,17 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
case PORT_D:
case PORT_E:
case PORT_F:
- min = icl_port_to_mg_pll_id(port);
- max = min;
- ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
- &pll_state);
+ if (0 /* TODO: TBT PLLs */) {
+ min = DPLL_ID_ICL_TBTPLL;
+ max = min;
+ ret = icl_calc_dpll_state(crtc_state, encoder, clock,
+ &pll_state);
+ } else {
+ min = icl_port_to_mg_pll_id(port);
+ max = min;
+ ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
+ &pll_state);
+ }
break;
default:
MISSING_CASE(port);
@@ -2820,9 +2905,12 @@ static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
switch (id) {
default:
MISSING_CASE(id);
+ /* fall through */
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
return CNL_DPLL_ENABLE(id);
+ case DPLL_ID_ICL_TBTPLL:
+ return TBT_PLL_ENABLE;
case DPLL_ID_ICL_MGPLL1:
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
@@ -2850,6 +2938,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
switch (id) {
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
+ case DPLL_ID_ICL_TBTPLL:
hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
break;
@@ -2859,18 +2948,41 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
case DPLL_ID_ICL_MGPLL4:
port = icl_mg_pll_id_to_port(id);
hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
+ hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
hw_state->mg_clktop2_coreclkctl1 =
I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+ hw_state->mg_clktop2_coreclkctl1 &=
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
hw_state->mg_clktop2_hsclkctl =
I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+ hw_state->mg_clktop2_hsclkctl &=
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port));
hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port));
hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port));
hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port));
hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port));
+
hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port));
hw_state->mg_pll_tdc_coldst_bias =
I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+
+ if (dev_priv->cdclk.hw.ref == 38400) {
+ hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+ hw_state->mg_pll_bias_mask = 0;
+ } else {
+ hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ hw_state->mg_pll_bias_mask = -1U;
+ }
+
+ hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
+ hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
break;
default:
MISSING_CASE(id);
@@ -2898,19 +3010,48 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum port port = icl_mg_pll_id_to_port(pll->info->id);
+ u32 val;
+
+ /*
+ * Some of the following registers have reserved fields, so program
+ * these with RMW based on a mask. The mask can be fixed or generated
+ * during the calc/readout phase if the mask depends on some other HW
+ * state like refclk, see icl_calc_mg_pll_state().
+ */
+ val = I915_READ(MG_REFCLKIN_CTL(port));
+ val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+ val |= hw_state->mg_refclkin_ctl;
+ I915_WRITE(MG_REFCLKIN_CTL(port), val);
+
+ val = I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+ val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+ val |= hw_state->mg_clktop2_coreclkctl1;
+ I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), val);
+
+ val = I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+ val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
+ val |= hw_state->mg_clktop2_hsclkctl;
+ I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), val);
- I915_WRITE(MG_REFCLKIN_CTL(port), hw_state->mg_refclkin_ctl);
- I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port),
- hw_state->mg_clktop2_coreclkctl1);
- I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), hw_state->mg_clktop2_hsclkctl);
I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0);
I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1);
I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf);
I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock);
I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc);
- I915_WRITE(MG_PLL_BIAS(port), hw_state->mg_pll_bias);
- I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port),
- hw_state->mg_pll_tdc_coldst_bias);
+
+ val = I915_READ(MG_PLL_BIAS(port));
+ val &= ~hw_state->mg_pll_bias_mask;
+ val |= hw_state->mg_pll_bias;
+ I915_WRITE(MG_PLL_BIAS(port), val);
+
+ val = I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+ val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
+ val |= hw_state->mg_pll_tdc_coldst_bias;
+ I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), val);
+
POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port));
}
@@ -2936,6 +3077,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
switch (id) {
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
+ case DPLL_ID_ICL_TBTPLL:
icl_dpll_write(dev_priv, pll);
break;
case DPLL_ID_ICL_MGPLL1:
@@ -3034,6 +3176,7 @@ static const struct intel_shared_dpll_funcs icl_pll_funcs = {
static const struct dpll_info icl_plls[] = {
{ "DPLL 0", &icl_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
{ "DPLL 1", &icl_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &icl_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
{ "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
{ "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
{ "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 7a0cd564a9ee..7e522cf4f13f 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -114,23 +114,27 @@ enum intel_dpll_id {
*/
DPLL_ID_ICL_DPLL1 = 1,
/**
+ * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL
+ */
+ DPLL_ID_ICL_TBTPLL = 2,
+ /**
* @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
*/
- DPLL_ID_ICL_MGPLL1 = 2,
+ DPLL_ID_ICL_MGPLL1 = 3,
/**
* @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
*/
- DPLL_ID_ICL_MGPLL2 = 3,
+ DPLL_ID_ICL_MGPLL2 = 4,
/**
* @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
*/
- DPLL_ID_ICL_MGPLL3 = 4,
+ DPLL_ID_ICL_MGPLL3 = 5,
/**
* @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
*/
- DPLL_ID_ICL_MGPLL4 = 5,
+ DPLL_ID_ICL_MGPLL4 = 6,
};
-#define I915_NUM_PLLS 6
+#define I915_NUM_PLLS 7
struct intel_dpll_hw_state {
/* i9xx, pch plls */
@@ -176,6 +180,8 @@ struct intel_dpll_hw_state {
uint32_t mg_pll_ssc;
uint32_t mg_pll_bias;
uint32_t mg_pll_tdc_coldst_bias;
+ uint32_t mg_pll_bias_mask;
+ uint32_t mg_pll_tdc_coldst_bias_mask;
};
/**
@@ -336,5 +342,7 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
+int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
+ uint32_t pll_id);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0361130500a6..8fc61e96754f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -39,6 +39,7 @@
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
+#include <media/cec-notifier.h>
/**
* __wait_for - magic wait macro
@@ -158,12 +159,6 @@
#define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */
-/* Maximum cursor sizes */
-#define GEN2_CURSOR_WIDTH 64
-#define GEN2_CURSOR_HEIGHT 64
-#define MAX_CURSOR_WIDTH 256
-#define MAX_CURSOR_HEIGHT 256
-
#define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2
@@ -194,7 +189,6 @@ enum intel_output_type {
struct intel_framebuffer {
struct drm_framebuffer base;
- struct drm_i915_gem_object *obj;
struct intel_rotation_info rot_info;
/* for each plane in the normal GTT view */
@@ -261,7 +255,8 @@ struct intel_encoder {
struct intel_crtc_state *pipe_config);
/* Returns a mask of power domains that need to be referenced as part
* of the hardware state readout code. */
- u64 (*get_power_domains)(struct intel_encoder *encoder);
+ u64 (*get_power_domains)(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
@@ -310,6 +305,8 @@ struct intel_panel {
} backlight;
};
+struct intel_digital_port;
+
/*
* This structure serves as a translation layer between the generic HDCP code
* and the bus-specific code. What that means is that HDCP over HDMI differs
@@ -488,6 +485,8 @@ struct intel_atomic_state {
*/
bool skip_intermediate_wm;
+ bool rps_interactive;
+
/* Gen9+ only */
struct skl_ddb_values wm_results;
@@ -953,6 +952,7 @@ struct intel_plane {
enum pipe pipe;
bool can_scale;
bool has_fbc;
+ bool has_ccs;
int max_downscale;
uint32_t frontbuffer_bit;
@@ -971,7 +971,7 @@ struct intel_plane {
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
struct intel_crtc *crtc);
- bool (*get_hw_state)(struct intel_plane *plane);
+ bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
@@ -1004,7 +1004,7 @@ struct cxsr_latency {
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
-#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
+#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
struct intel_hdmi {
i915_reg_t hdmi_reg;
@@ -1017,6 +1017,7 @@ struct intel_hdmi {
bool has_audio;
bool rgb_quant_range_selectable;
struct intel_connector *attached_connector;
+ struct cec_notifier *cec_notifier;
};
struct intel_dp_mst_encoder;
@@ -1139,7 +1140,6 @@ struct intel_dp {
* register with to kick off an AUX transaction.
*/
uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
- bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider);
@@ -1252,22 +1252,29 @@ intel_attached_encoder(struct drm_connector *connector)
return to_intel_connector(connector)->encoder;
}
-static inline struct intel_digital_port *
-enc_to_dig_port(struct drm_encoder *encoder)
+static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-
- switch (intel_encoder->type) {
+ switch (encoder->type) {
case INTEL_OUTPUT_DDI:
- WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ if (intel_encoder_is_dig_port(intel_encoder))
return container_of(encoder, struct intel_digital_port,
base.base);
- default:
+ else
return NULL;
- }
}
static inline struct intel_dp_mst_encoder *
@@ -1281,6 +1288,20 @@ static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
return &enc_to_dig_port(encoder)->dp;
}
+static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
+{
+ switch (encoder->type) {
+ case INTEL_OUTPUT_DP:
+ case INTEL_OUTPUT_EDP:
+ return true;
+ case INTEL_OUTPUT_DDI:
+ /* Skip pure HDMI/DVI DDI encoders */
+ return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg);
+ default:
+ return false;
+ }
+}
+
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
@@ -1337,9 +1358,6 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
-bool gen11_reset_one_iir(struct drm_i915_private * const i915,
- const unsigned int bank,
- const unsigned int bit);
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
@@ -1376,6 +1394,8 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
/* intel_crt.c */
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe);
void intel_crt_init(struct drm_i915_private *dev_priv);
void intel_crt_reset(struct drm_encoder *encoder);
@@ -1388,12 +1408,9 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder);
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -1407,6 +1424,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
+ u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_map_plls_to_ports(struct drm_crtc *crtc,
@@ -1488,6 +1507,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
+ enum port port);
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
@@ -1615,6 +1637,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
+u16 skl_scaler_calc_phase(int sub, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
uint32_t pixel_format);
@@ -1644,6 +1667,9 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe);
bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1661,8 +1687,6 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-int intel_dp_sink_crc(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
@@ -1676,8 +1700,8 @@ void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_mst_suspend(struct drm_device *dev);
-void intel_dp_mst_resume(struct drm_device *dev);
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
@@ -1707,6 +1731,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
uint8_t *link_bw, uint8_t *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
@@ -1726,8 +1751,8 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
-/* intel_dsi.c */
-void intel_dsi_init(struct drm_i915_private *dev_priv);
+/* vlv_dsi.c */
+void vlv_dsi_init(struct drm_i915_private *dev_priv);
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1821,6 +1846,8 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
/* intel_lvds.c */
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe);
void intel_lvds_init(struct drm_i915_private *dev_priv);
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
@@ -1867,7 +1894,6 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
void intel_panel_destroy_backlight(struct drm_connector *connector);
-enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv);
extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
@@ -1911,12 +1937,12 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_init(struct drm_i915_private *dev_priv);
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_short_pulse(struct intel_dp *intel_dp);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
@@ -2058,12 +2084,13 @@ void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
/* intel_sdvo.c */
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe);
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
i915_reg_t reg, enum port port);
/* intel_sprite.c */
-bool intel_format_is_yuv(u32 format);
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
@@ -2076,10 +2103,9 @@ void skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
-bool skl_plane_get_hw_state(struct intel_plane *plane);
+bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
-bool intel_format_is_yuv(uint32_t format);
bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
@@ -2145,7 +2171,6 @@ void lspcon_resume(struct intel_lspcon *lspcon);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
/* intel_pipe_crc.c */
-int intel_pipe_crc_create(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt);
@@ -2161,5 +2186,4 @@ static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
{
}
#endif
-extern const struct file_operations i915_display_crc_ctl_fops;
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 7afeb9580f41..ad7c1cb32983 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -129,21 +129,29 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct intel_dsi, base.base);
}
-/* intel_dsi.c */
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
+/* vlv_dsi.c */
+void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
-/* intel_dsi_pll.c */
-bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
-int intel_compute_dsi_pll(struct intel_encoder *encoder,
- struct intel_crtc_state *config);
-void intel_enable_dsi_pll(struct intel_encoder *encoder,
- const struct intel_crtc_state *config);
-void intel_disable_dsi_pll(struct intel_encoder *encoder);
-u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
- struct intel_crtc_state *config);
-void intel_dsi_reset_clocks(struct intel_encoder *encoder,
- enum port port);
+/* vlv_dsi_pll.c */
+int vlv_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void vlv_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config);
+void vlv_dsi_pll_disable(struct intel_encoder *encoder);
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config);
+void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
+
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+int bxt_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void bxt_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config);
+void bxt_dsi_pll_disable(struct intel_encoder *encoder);
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config);
+void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 4d6ffa7b3e7b..ac83d6b89ae0 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -181,7 +181,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
- wait_for_dsi_fifo_empty(intel_dsi, port);
+ vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
data += len;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 61d908e0df0e..4e142ff49708 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -137,19 +137,15 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
- if (!(tmp & DVO_ENABLE))
- return false;
-
- *pipe = PORT_TO_PIPE(tmp);
+ *pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
- return true;
+ return tmp & DVO_ENABLE;
}
static void intel_dvo_get_config(struct intel_encoder *encoder,
@@ -282,8 +278,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
DVO_BLANK_ACTIVE_HIGH;
- if (pipe == 1)
- dvo_val |= DVO_PIPE_B_SELECT;
+ dvo_val |= DVO_PIPE_SEL(pipe);
dvo_val |= DVO_PIPE_STALL;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
@@ -443,7 +438,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
int gpio;
bool dvoinit;
enum pipe pipe;
- uint32_t dpll[I915_MAX_PIPES];
+ u32 dpll[I915_MAX_PIPES];
enum port port;
/*
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 1590375f31cb..2d1952849d69 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -25,7 +25,6 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
-#include "i915_vgpu.h"
#include "intel_ringbuffer.h"
#include "intel_lrc.h"
@@ -230,6 +229,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
break;
default:
MISSING_CASE(class);
+ /* fall through */
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
case COPY_ENGINE_CLASS:
@@ -302,6 +302,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
+ if (engine->context_size)
+ DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
@@ -456,28 +458,16 @@ static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
i915_gem_batch_pool_init(&engine->batch_pool, engine);
}
-static bool csb_force_mmio(struct drm_i915_private *i915)
-{
- /* Older GVT emulation depends upon intercepting CSB mmio */
- if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
- return true;
-
- return false;
-}
-
static void intel_engine_init_execlist(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- execlists->csb_use_mmio = csb_force_mmio(engine->i915);
-
execlists->port_mask = 1;
BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
execlists->queue_priority = INT_MIN;
- execlists->queue = RB_ROOT;
- execlists->first = NULL;
+ execlists->queue = RB_ROOT_CACHED;
}
/**
@@ -492,6 +482,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
i915_timeline_init(engine->i915, &engine->timeline, engine->name);
+ lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE);
intel_engine_init_execlist(engine);
intel_engine_init_hangcheck(engine);
@@ -499,7 +490,8 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_cmd_parser(engine);
}
-int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
+int intel_engine_create_scratch(struct intel_engine_cs *engine,
+ unsigned int size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -515,7 +507,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
return PTR_ERR(obj);
}
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
@@ -533,7 +525,7 @@ err_unref:
return ret;
}
-static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
+void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->scratch);
}
@@ -585,7 +577,7 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -645,6 +637,12 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
return 0;
}
+static void __intel_context_unpin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ intel_context_unpin(to_intel_context(ctx, engine));
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -658,7 +656,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
*/
int intel_engine_init_common(struct intel_engine_cs *engine)
{
- struct intel_ring *ring;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_context *ce;
int ret;
engine->set_default_submission(engine);
@@ -670,18 +669,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default
* context.
*/
- ring = intel_context_pin(engine->i915->kernel_context, engine);
- if (IS_ERR(ring))
- return PTR_ERR(ring);
+ ce = intel_context_pin(i915->kernel_context, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
/*
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time.
*/
- if (engine->i915->preempt_context) {
- ring = intel_context_pin(engine->i915->preempt_context, engine);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
+ if (i915->preempt_context) {
+ ce = intel_context_pin(i915->preempt_context, engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
goto err_unpin_kernel;
}
}
@@ -690,7 +689,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
goto err_unpin_preempt;
- if (HWS_NEEDS_PHYSICAL(engine->i915))
+ if (HWS_NEEDS_PHYSICAL(i915))
ret = init_phys_status_page(engine);
else
ret = init_status_page(engine);
@@ -702,10 +701,11 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt:
- if (engine->i915->preempt_context)
- intel_context_unpin(engine->i915->preempt_context, engine);
+ if (i915->preempt_context)
+ __intel_context_unpin(i915->preempt_context, engine);
+
err_unpin_kernel:
- intel_context_unpin(engine->i915->kernel_context, engine);
+ __intel_context_unpin(i915->kernel_context, engine);
return ret;
}
@@ -718,6 +718,8 @@ err_unpin_kernel:
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+
intel_engine_cleanup_scratch(engine);
if (HWS_NEEDS_PHYSICAL(engine->i915))
@@ -732,9 +734,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- if (engine->i915->preempt_context)
- intel_context_unpin(engine->i915->preempt_context, engine);
- intel_context_unpin(engine->i915->kernel_context, engine);
+ if (i915->preempt_context)
+ __intel_context_unpin(i915->preempt_context, engine);
+ __intel_context_unpin(i915->kernel_context, engine);
i915_timeline_fini(&engine->timeline);
}
@@ -769,6 +771,35 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
return bbaddr;
}
+int intel_engine_stop_cs(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ const u32 base = engine->mmio_base;
+ const i915_reg_t mode = RING_MI_MODE(base);
+ int err;
+
+ if (INTEL_GEN(dev_priv) < 3)
+ return -ENODEV;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+
+ err = 0;
+ if (__intel_wait_for_register_fw(dev_priv,
+ mode, MODE_IDLE, MODE_IDLE,
+ 1000, 0,
+ NULL)) {
+ GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
+ err = -ETIMEDOUT;
+ }
+
+ /* A final mmio read to let GPU writes be hopefully flushed to memory */
+ POSTING_READ_FW(mode);
+
+ return err;
+}
+
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
switch (type) {
@@ -780,12 +811,32 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
}
}
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
+{
+ const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+ u32 mcr_s_ss_select;
+ u32 slice = fls(sseu->slice_mask);
+ u32 subslice = fls(sseu->subslice_mask[slice]);
+
+ if (INTEL_GEN(dev_priv) == 10)
+ mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
+ GEN8_MCR_SUBSLICE(subslice);
+ else if (INTEL_GEN(dev_priv) >= 11)
+ mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
+ GEN11_MCR_SUBSLICE(subslice);
+ else
+ mcr_s_ss_select = 0;
+
+ return mcr_s_ss_select;
+}
+
static inline uint32_t
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
uint32_t mcr_slice_subslice_mask;
uint32_t mcr_slice_subslice_select;
+ uint32_t default_mcr_s_ss_select;
uint32_t mcr;
uint32_t ret;
enum forcewake_domains fw_domains;
@@ -802,6 +853,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
GEN8_MCR_SUBSLICE(subslice);
}
+ default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
+
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -812,11 +865,10 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
- /*
- * The HW expects the slice and sublice selectors to be reset to 0
- * after reading out the registers.
- */
- WARN_ON_ONCE(mcr & mcr_slice_subslice_mask);
+
+ WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
+ default_mcr_s_ss_select);
+
mcr &= ~mcr_slice_subslice_mask;
mcr |= mcr_slice_subslice_select;
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
@@ -824,6 +876,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
ret = I915_READ_FW(reg);
mcr &= ~mcr_slice_subslice_mask;
+ mcr |= default_mcr_s_ss_select;
+
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
@@ -934,11 +988,24 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return true;
/* Waiting to drain ELSP? */
- if (READ_ONCE(engine->execlists.active))
- return false;
+ if (READ_ONCE(engine->execlists.active)) {
+ struct tasklet_struct *t = &engine->execlists.tasklet;
+
+ local_bh_disable();
+ if (tasklet_trylock(t)) {
+ /* Must wait for any GPU reset in progress. */
+ if (__tasklet_is_enabled(t))
+ t->func(t->data);
+ tasklet_unlock(t);
+ }
+ local_bh_enable();
- /* ELSP is empty, but there are ready requests? */
- if (READ_ONCE(engine->execlists.first))
+ if (READ_ONCE(engine->execlists.active))
+ return false;
+ }
+
+ /* ELSP is empty, but there are ready requests? E.g. after reset */
+ if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
return false;
/* Ring stopped? */
@@ -978,8 +1045,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
*/
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
{
- const struct i915_gem_context * const kernel_context =
- engine->i915->kernel_context;
+ const struct intel_context *kernel_context =
+ to_intel_context(engine->i915->kernel_context, engine);
struct i915_request *rq;
lockdep_assert_held(&engine->i915->drm.struct_mutex);
@@ -991,7 +1058,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
*/
rq = __i915_gem_active_peek(&engine->timeline.last_request);
if (rq)
- return rq->ctx == kernel_context;
+ return rq->hw_context == kernel_context;
else
return engine->last_retired_context == kernel_context;
}
@@ -1006,6 +1073,28 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
}
/**
+ * intel_engines_sanitize: called after the GPU has lost power
+ * @i915: the i915 device
+ *
+ * Anytime we reset the GPU, either with an explicit GPU reset or through a
+ * PCI power cycle, the GPU loses state and we must reset our state tracking
+ * to match. Note that calling intel_engines_sanitize() if the GPU has not
+ * been reset results in much confusion!
+ */
+void intel_engines_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ GEM_TRACE("\n");
+
+ for_each_engine(engine, i915, id) {
+ if (engine->reset.reset)
+ engine->reset.reset(engine, NULL);
+ }
+}
+
+/**
* intel_engines_park: called when the GT is transitioning from busy->idle
* @i915: the i915 device
*
@@ -1043,6 +1132,11 @@ void intel_engines_park(struct drm_i915_private *i915)
if (engine->park)
engine->park(engine);
+ if (engine->pinned_default_state) {
+ i915_gem_object_unpin_map(engine->default_state);
+ engine->pinned_default_state = NULL;
+ }
+
i915_gem_batch_pool_fini(&engine->batch_pool);
engine->execlists.no_priolist = false;
}
@@ -1060,6 +1154,16 @@ void intel_engines_unpark(struct drm_i915_private *i915)
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
+ void *map;
+
+ /* Pin the default state for fast resets from atomic context. */
+ map = NULL;
+ if (engine->default_state)
+ map = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (!IS_ERR_OR_NULL(map))
+ engine->pinned_default_state = map;
+
if (engine->unpark)
engine->unpark(engine);
@@ -1067,6 +1171,26 @@ void intel_engines_unpark(struct drm_i915_private *i915)
}
}
+/**
+ * intel_engine_lost_context: called when the GPU is reset into unknown state
+ * @engine: the engine
+ *
+ * We have either reset the GPU or otherwise about to lose state tracking of
+ * the current GPU logical state (e.g. suspend). On next use, it is therefore
+ * imperative that we make no presumptions about the current state and load
+ * from scratch.
+ */
+void intel_engine_lost_context(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ lockdep_assert_held(&engine->i915->drm.struct_mutex);
+
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
+}
+
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
@@ -1151,7 +1275,7 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
rowsize, sizeof(u32),
line, sizeof(line),
false) >= sizeof(line));
- drm_printf(m, "%08zx %s\n", pos, line);
+ drm_printf(m, "[%04zx] %s\n", pos, line);
prev = buf + pos;
skip = false;
@@ -1166,6 +1290,8 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
&engine->execlists;
u64 addr;
+ if (engine->id == RCS && IS_GEN(dev_priv, 4, 7))
+ drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
I915_READ(RING_START(engine->mmio_base)));
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
@@ -1232,12 +1358,10 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
read = GEN8_CSB_READ_PTR(ptr);
write = GEN8_CSB_WRITE_PTR(ptr);
- drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n",
+ drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], tasklet queued? %s (%s)\n",
read, execlists->csb_head,
write,
intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
- yesno(test_bit(ENGINE_IRQ_EXECLIST,
- &engine->irq_posted)),
yesno(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)),
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
@@ -1287,6 +1411,39 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
}
}
+static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
+{
+ void *ring;
+ int size;
+
+ drm_printf(m,
+ "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
+ rq->head, rq->postfix, rq->tail,
+ rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+ rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+
+ size = rq->tail - rq->head;
+ if (rq->tail < rq->head)
+ size += rq->ring->size;
+
+ ring = kmalloc(size, GFP_ATOMIC);
+ if (ring) {
+ const void *vaddr = rq->ring->vaddr;
+ unsigned int head = rq->head;
+ unsigned int len = 0;
+
+ if (rq->tail < head) {
+ len = rq->ring->size - head;
+ memcpy(ring, vaddr + head, len);
+ head = 0;
+ }
+ memcpy(ring + len, vaddr + head, size - len);
+
+ hexdump(m, ring, size);
+ kfree(ring);
+ }
+}
+
void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
@@ -1296,6 +1453,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
const struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq, *last;
+ unsigned long flags;
struct rb_node *rb;
int count;
@@ -1336,11 +1494,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq = i915_gem_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
- drm_printf(m,
- "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
- rq->head, rq->postfix, rq->tail,
- rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
- rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+
drm_printf(m, "\t\tring->start: 0x%08x\n",
i915_ggtt_offset(rq->ring->vma));
drm_printf(m, "\t\tring->head: 0x%08x\n",
@@ -1351,6 +1505,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq->ring->emit);
drm_printf(m, "\t\tring->space: 0x%08x\n",
rq->ring->space);
+
+ print_request_ring(m, rq);
}
rcu_read_unlock();
@@ -1362,7 +1518,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
- spin_lock_irq(&engine->timeline.lock);
+ local_irq_save(flags);
+ spin_lock(&engine->timeline.lock);
last = NULL;
count = 0;
@@ -1384,7 +1541,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
last = NULL;
count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
- for (rb = execlists->first; rb; rb = rb_next(rb)) {
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
@@ -1404,22 +1561,21 @@ void intel_engine_dump(struct intel_engine_cs *engine,
print_request(m, last, "\t\tQ ");
}
- spin_unlock_irq(&engine->timeline.lock);
+ spin_unlock(&engine->timeline.lock);
- spin_lock_irq(&b->rb_lock);
+ spin_lock(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
drm_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock_irq(&b->rb_lock);
+ spin_unlock(&b->rb_lock);
+ local_irq_restore(flags);
- drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
+ drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n",
engine->irq_posted,
yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted)),
- yesno(test_bit(ENGINE_IRQ_EXECLIST,
&engine->irq_posted)));
drm_printf(m, "HWSP:\n");
@@ -1468,8 +1624,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (!intel_engine_supports_stats(engine))
return -ENODEV;
- tasklet_disable(&execlists->tasklet);
- write_seqlock_irqsave(&engine->stats.lock, flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ write_seqlock(&engine->stats.lock);
if (unlikely(engine->stats.enabled == ~0)) {
err = -EBUSY;
@@ -1493,8 +1649,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
}
unlock:
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
- tasklet_enable(&execlists->tasklet);
+ write_sequnlock(&engine->stats.lock);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
return err;
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index b431b6733cc1..01d1d2088f04 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -399,89 +399,6 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
return dev_priv->fbc.active;
}
-static void intel_fbc_work_fn(struct work_struct *__work)
-{
- struct drm_i915_private *dev_priv =
- container_of(__work, struct drm_i915_private, fbc.work.work);
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_work *work = &fbc->work;
- struct intel_crtc *crtc = fbc->crtc;
- struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
-
- if (drm_crtc_vblank_get(&crtc->base)) {
- /* CRTC is now off, leave FBC deactivated */
- mutex_lock(&fbc->lock);
- work->scheduled = false;
- mutex_unlock(&fbc->lock);
- return;
- }
-
-retry:
- /* Delay the actual enabling to let pageflipping cease and the
- * display to settle before starting the compression. Note that
- * this delay also serves a second purpose: it allows for a
- * vblank to pass after disabling the FBC before we attempt
- * to modify the control registers.
- *
- * WaFbcWaitForVBlankBeforeEnable:ilk,snb
- *
- * It is also worth mentioning that since work->scheduled_vblank can be
- * updated multiple times by the other threads, hitting the timeout is
- * not an error condition. We'll just end up hitting the "goto retry"
- * case below.
- */
- wait_event_timeout(vblank->queue,
- drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
- msecs_to_jiffies(50));
-
- mutex_lock(&fbc->lock);
-
- /* Were we cancelled? */
- if (!work->scheduled)
- goto out;
-
- /* Were we delayed again while this function was sleeping? */
- if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
- mutex_unlock(&fbc->lock);
- goto retry;
- }
-
- intel_fbc_hw_activate(dev_priv);
-
- work->scheduled = false;
-
-out:
- mutex_unlock(&fbc->lock);
- drm_crtc_vblank_put(&crtc->base);
-}
-
-static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_work *work = &fbc->work;
-
- WARN_ON(!mutex_is_locked(&fbc->lock));
- if (WARN_ON(!fbc->enabled))
- return;
-
- if (drm_crtc_vblank_get(&crtc->base)) {
- DRM_ERROR("vblank not available for FBC on pipe %c\n",
- pipe_name(crtc->pipe));
- return;
- }
-
- /* It is useless to call intel_fbc_cancel_work() or cancel_work() in
- * this function since we're not releasing fbc.lock, so it won't have an
- * opportunity to grab it to discover that it was cancelled. So we just
- * update the expected jiffy count. */
- work->scheduled = true;
- work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
- drm_crtc_vblank_put(&crtc->base);
-
- schedule_work(&work->work);
-}
-
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
const char *reason)
{
@@ -489,11 +406,6 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
WARN_ON(!mutex_is_locked(&fbc->lock));
- /* Calling cancel_work() here won't help due to the fact that the work
- * function grabs fbc->lock. Just set scheduled to false so the work
- * function can know it was cancelled. */
- fbc->work.scheduled = false;
-
if (fbc->active)
intel_fbc_hw_deactivate(dev_priv);
@@ -924,13 +836,6 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
32 * fbc->threshold) * 8;
}
-static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
- struct intel_fbc_reg_params *params2)
-{
- /* We can use this since intel_fbc_get_reg_params() does a memset. */
- return memcmp(params1, params2, sizeof(*params1)) == 0;
-}
-
void intel_fbc_pre_update(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
@@ -953,6 +858,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
goto unlock;
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
+ fbc->flip_pending = true;
deactivate:
intel_fbc_deactivate(dev_priv, reason);
@@ -988,13 +894,15 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_reg_params old_params;
WARN_ON(!mutex_is_locked(&fbc->lock));
if (!fbc->enabled || fbc->crtc != crtc)
return;
+ fbc->flip_pending = false;
+ WARN_ON(fbc->active);
+
if (!i915_modparams.enable_fbc) {
intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
__intel_fbc_disable(dev_priv);
@@ -1002,25 +910,16 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
return;
}
- if (!intel_fbc_can_activate(crtc)) {
- WARN_ON(fbc->active);
- return;
- }
-
- old_params = fbc->params;
intel_fbc_get_reg_params(crtc, &fbc->params);
- /* If the scanout has not changed, don't modify the FBC settings.
- * Note that we make the fundamental assumption that the fb->obj
- * cannot be unpinned (and have its GTT offset and fence revoked)
- * without first being decoupled from the scanout and FBC disabled.
- */
- if (fbc->active &&
- intel_fbc_reg_params_equal(&old_params, &fbc->params))
+ if (!intel_fbc_can_activate(crtc))
return;
- intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
- intel_fbc_schedule_activation(crtc);
+ if (!fbc->busy_bits) {
+ intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
+ intel_fbc_hw_activate(dev_priv);
+ } else
+ intel_fbc_deactivate(dev_priv, "frontbuffer write");
}
void intel_fbc_post_update(struct intel_crtc *crtc)
@@ -1085,7 +984,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
(frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
if (fbc->active)
intel_fbc_recompress(dev_priv);
- else
+ else if (!fbc->flip_pending)
__intel_fbc_post_update(fbc->crtc);
}
@@ -1225,8 +1124,6 @@ void intel_fbc_disable(struct intel_crtc *crtc)
if (fbc->crtc == crtc)
__intel_fbc_disable(dev_priv);
mutex_unlock(&fbc->lock);
-
- cancel_work_sync(&fbc->work.work);
}
/**
@@ -1248,8 +1145,6 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
__intel_fbc_disable(dev_priv);
}
mutex_unlock(&fbc->lock);
-
- cancel_work_sync(&fbc->work.work);
}
static void intel_fbc_underrun_work_fn(struct work_struct *work)
@@ -1400,12 +1295,10 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
fbc->enabled = false;
fbc->active = false;
- fbc->work.scheduled = false;
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->has_fbc = false;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index e9e02b58b7be..fb2f9fce34cd 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -47,7 +47,7 @@
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
{
- struct drm_i915_gem_object *obj = ifbdev->fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
unsigned int origin =
ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
@@ -193,7 +193,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
- if (!intel_fb || WARN_ON(!intel_fb->obj)) {
+ if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
@@ -265,7 +265,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (intel_fb->obj->stolen && !prealloc)
+ if (intel_fb_obj(fb)->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -792,7 +792,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* been restored from swap. If the object is stolen however, it will be
* full of whatever garbage was left in there.
*/
- if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
+ if (state == FBINFO_STATE_RUNNING &&
+ intel_fb_obj(&ifbdev->fb->base)->stolen)
memset_io(info->screen_base, 0, info->screen_size);
drm_fb_helper_set_suspend(&ifbdev->helper, state);
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 7fff0a0eceb4..c3379bde266f 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -153,8 +153,6 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
spin_unlock(&dev_priv->fb_tracking.lock);
-
- intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 116f4ccf1bbd..560c7406ae40 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -27,6 +27,8 @@
#include "intel_guc_submission.h"
#include "i915_drv.h"
+static void guc_init_ggtt_pin_bias(struct intel_guc *guc);
+
static void gen8_guc_raise_irq(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -73,7 +75,7 @@ void intel_guc_init_early(struct intel_guc *guc)
guc->notify = gen8_guc_raise_irq;
}
-int intel_guc_init_wq(struct intel_guc *guc)
+static int guc_init_wq(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -124,7 +126,7 @@ int intel_guc_init_wq(struct intel_guc *guc)
return 0;
}
-void intel_guc_fini_wq(struct intel_guc *guc)
+static void guc_fini_wq(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -135,6 +137,28 @@ void intel_guc_fini_wq(struct intel_guc *guc)
destroy_workqueue(guc->log.relay.flush_wq);
}
+int intel_guc_init_misc(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+ int ret;
+
+ guc_init_ggtt_pin_bias(guc);
+
+ ret = guc_init_wq(guc);
+ if (ret)
+ return ret;
+
+ intel_uc_fw_fetch(i915, &guc->fw);
+
+ return 0;
+}
+
+void intel_guc_fini_misc(struct intel_guc *guc)
+{
+ intel_uc_fw_fini(&guc->fw);
+ guc_fini_wq(guc);
+}
+
static int guc_shared_data_create(struct intel_guc *guc)
{
struct i915_vma *vma;
@@ -169,7 +193,7 @@ int intel_guc_init(struct intel_guc *guc)
ret = guc_shared_data_create(guc);
if (ret)
- return ret;
+ goto err_fetch;
GEM_BUG_ON(!guc->shared_data);
ret = intel_guc_log_create(&guc->log);
@@ -190,6 +214,8 @@ err_log:
intel_guc_log_destroy(&guc->log);
err_shared:
guc_shared_data_destroy(guc);
+err_fetch:
+ intel_uc_fw_fini(&guc->fw);
return ret;
}
@@ -201,14 +227,17 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
guc_shared_data_destroy(guc);
+ intel_uc_fw_fini(&guc->fw);
}
-static u32 get_log_control_flags(void)
+static u32 guc_ctl_debug_flags(struct intel_guc *guc)
{
- u32 level = i915_modparams.guc_log_level;
- u32 flags = 0;
+ u32 level = intel_guc_log_get_level(&guc->log);
+ u32 flags;
+ u32 ads;
- GEM_BUG_ON(level < 0);
+ ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
+ flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
if (!GUC_LOG_LEVEL_IS_ENABLED(level))
flags |= GUC_LOG_DEFAULT_DISABLED;
@@ -222,6 +251,78 @@ static u32 get_log_control_flags(void)
return flags;
}
+static u32 guc_ctl_feature_flags(struct intel_guc *guc)
+{
+ u32 flags = 0;
+
+ flags |= GUC_CTL_VCS2_ENABLED;
+
+ if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
+ flags |= GUC_CTL_KERNEL_SUBMISSIONS;
+ else
+ flags |= GUC_CTL_DISABLE_SCHEDULER;
+
+ return flags;
+}
+
+static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
+{
+ u32 flags = 0;
+
+ if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
+ u32 ctxnum, base;
+
+ base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
+ ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
+
+ base >>= PAGE_SHIFT;
+ flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
+ (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
+ }
+ return flags;
+}
+
+static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
+{
+ u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
+ u32 flags;
+
+ #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
+ #define UNIT SZ_1M
+ #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
+ #else
+ #define UNIT SZ_4K
+ #define FLAG 0
+ #endif
+
+ BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
+ BUILD_BUG_ON(!DPC_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
+ BUILD_BUG_ON(!ISR_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
+
+ BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
+ (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
+ BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
+ (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
+ BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
+ (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
+
+ flags = GUC_LOG_VALID |
+ GUC_LOG_NOTIFY_ON_HALF_FULL |
+ FLAG |
+ ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
+ ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
+ ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
+ (offset << GUC_LOG_BUF_ADDR_SHIFT);
+
+ #undef UNIT
+ #undef FLAG
+
+ return flags;
+}
+
/*
* Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup
@@ -245,32 +346,13 @@ void intel_guc_init_params(struct intel_guc *guc)
params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
- params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
- GUC_CTL_VCS2_ENABLED;
-
- params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
-
- params[GUC_CTL_DEBUG] = get_log_control_flags();
+ params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
+ params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
+ params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
+ params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
- /* If GuC submission is enabled, set up additional parameters here */
- if (USES_GUC_SUBMISSION(dev_priv)) {
- u32 ads = intel_guc_ggtt_offset(guc,
- guc->ads_vma) >> PAGE_SHIFT;
- u32 pgs = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
- u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
-
- params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
- params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
-
- pgs >>= PAGE_SHIFT;
- params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
- (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
-
- params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
-
- /* Unmask this bit to enable the GuC's internal scheduler */
- params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
- }
+ for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+ DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
/*
* All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
@@ -346,10 +428,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
ret = -EIO;
if (ret) {
- DRM_DEBUG_DRIVER("INTEL_GUC_SEND: Action 0x%X failed;"
- " ret=%d status=0x%08X response=0x%08X\n",
- action[0], ret, status,
- I915_READ(SOFT_SCRATCH(15)));
+ DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
+ action[0], ret, status);
goto out;
}
@@ -386,11 +466,13 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
* could happen that GuC sets the bit for 2nd interrupt but Host
* clears out the bit on handling the 1st interrupt.
*/
+ disable_rpm_wakeref_asserts(dev_priv);
spin_lock(&guc->irq_lock);
val = I915_READ(SOFT_SCRATCH(15));
msg = val & guc->msg_enabled_mask;
I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
spin_unlock(&guc->irq_lock);
+ enable_rpm_wakeref_asserts(dev_priv);
intel_guc_to_host_process_recv_msg(guc, msg);
}
@@ -532,13 +614,13 @@ int intel_guc_resume(struct intel_guc *guc)
*/
/**
- * intel_guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
+ * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
* @guc: intel_guc structure.
*
* This function will calculate and initialize the ggtt_pin_bias value based on
* overall WOPCM size and GuC WOPCM size.
*/
-void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc)
+static void guc_init_ggtt_pin_bias(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_i915(guc);
@@ -572,7 +654,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
if (IS_ERR(vma))
goto err;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index f1265e122d30..4121928a495e 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -151,11 +151,10 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
void intel_guc_init_early(struct intel_guc *guc);
void intel_guc_init_send_regs(struct intel_guc *guc);
void intel_guc_init_params(struct intel_guc *guc);
-void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc);
-int intel_guc_init_wq(struct intel_guc *guc);
-void intel_guc_fini_wq(struct intel_guc *guc);
+int intel_guc_init_misc(struct intel_guc *guc);
int intel_guc_init(struct intel_guc *guc);
void intel_guc_fini(struct intel_guc *guc);
+void intel_guc_fini_misc(struct intel_guc *guc);
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 0867ba76d445..1a0f2a39cef9 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -84,12 +84,12 @@
#define GUC_LOG_VALID (1 << 0)
#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
-#define GUC_LOG_CRASH_PAGES 1
#define GUC_LOG_CRASH_SHIFT 4
-#define GUC_LOG_DPC_PAGES 7
+#define GUC_LOG_CRASH_MASK (0x1 << GUC_LOG_CRASH_SHIFT)
#define GUC_LOG_DPC_SHIFT 6
-#define GUC_LOG_ISR_PAGES 7
+#define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT)
#define GUC_LOG_ISR_SHIFT 9
+#define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT)
#define GUC_LOG_BUF_ADDR_SHIFT 12
#define GUC_CTL_PAGE_FAULT_CONTROL 5
@@ -532,20 +532,6 @@ enum guc_log_buffer_type {
};
/**
- * DOC: GuC Log buffer Layout
- *
- * Page0 +-------------------------------+
- * | ISR state header (32 bytes) |
- * | DPC state header |
- * | Crash dump state header |
- * Page1 +-------------------------------+
- * | ISR logs |
- * Page9 +-------------------------------+
- * | DPC logs |
- * Page17 +-------------------------------+
- * | Crash Dump logs |
- * +-------------------------------+
- *
* Below state structure is used for coordination of retrieval of GuC firmware
* logs. Separate state is maintained for each log buffer type.
* read_ptr points to the location where i915 read last in log buffer and
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 401e1704d61e..6da61a71d28f 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -215,11 +215,11 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
{
switch (type) {
case GUC_ISR_LOG_BUFFER:
- return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+ return ISR_BUFFER_SIZE;
case GUC_DPC_LOG_BUFFER:
- return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+ return DPC_BUFFER_SIZE;
case GUC_CRASH_DUMP_LOG_BUFFER:
- return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+ return CRASH_BUFFER_SIZE;
default:
MISSING_CASE(type);
}
@@ -397,7 +397,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
lockdep_assert_held(&log->relay.lock);
/* Keep the size of sub buffers same as shared log buffer */
- subbuf_size = GUC_LOG_SIZE;
+ subbuf_size = log->vma->size;
/*
* Store up to 8 snapshots, which is large enough to buffer sufficient
@@ -452,13 +452,34 @@ int intel_guc_log_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct i915_vma *vma;
- unsigned long offset;
- u32 flags;
+ u32 guc_log_size;
int ret;
GEM_BUG_ON(log->vma);
- vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
+ /*
+ * GuC Log buffer Layout
+ *
+ * +===============================+ 00B
+ * | Crash dump state header |
+ * +-------------------------------+ 32B
+ * | DPC state header |
+ * +-------------------------------+ 64B
+ * | ISR state header |
+ * +-------------------------------+ 96B
+ * | |
+ * +===============================+ PAGE_SIZE (4KB)
+ * | Crash Dump logs |
+ * +===============================+ + CRASH_SIZE
+ * | DPC logs |
+ * +===============================+ + DPC_SIZE
+ * | ISR logs |
+ * +===============================+ + ISR_SIZE
+ */
+ guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
+ ISR_BUFFER_SIZE;
+
+ vma = intel_guc_allocate_vma(guc, guc_log_size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -466,20 +487,12 @@ int intel_guc_log_create(struct intel_guc_log *log)
log->vma = vma;
- /* each allocated unit is a page */
- flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
- (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
- (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
- (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
-
- offset = intel_guc_ggtt_offset(guc, vma) >> PAGE_SHIFT;
- log->flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+ log->level = i915_modparams.guc_log_level;
return 0;
err:
- /* logging will be off */
- i915_modparams.guc_log_level = 0;
+ DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
return ret;
}
@@ -488,15 +501,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
i915_vma_unpin_and_release(&log->vma);
}
-int intel_guc_log_level_get(struct intel_guc_log *log)
-{
- GEM_BUG_ON(!log->vma);
- GEM_BUG_ON(i915_modparams.guc_log_level < 0);
-
- return i915_modparams.guc_log_level;
-}
-
-int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
+int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -504,33 +509,32 @@ int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
GEM_BUG_ON(!log->vma);
- GEM_BUG_ON(i915_modparams.guc_log_level < 0);
/*
* GuC is recognizing log levels starting from 0 to max, we're using 0
* as indication that logging should be disabled.
*/
- if (val < GUC_LOG_LEVEL_DISABLED || val > GUC_LOG_LEVEL_MAX)
+ if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
return -EINVAL;
mutex_lock(&dev_priv->drm.struct_mutex);
- if (i915_modparams.guc_log_level == val) {
+ if (log->level == level) {
ret = 0;
goto out_unlock;
}
intel_runtime_pm_get(dev_priv);
- ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(val),
- GUC_LOG_LEVEL_IS_ENABLED(val),
- GUC_LOG_LEVEL_TO_VERBOSITY(val));
+ ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level),
+ GUC_LOG_LEVEL_IS_ENABLED(level),
+ GUC_LOG_LEVEL_TO_VERBOSITY(level));
intel_runtime_pm_put(dev_priv);
if (ret) {
DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
goto out_unlock;
}
- i915_modparams.guc_log_level = val;
+ log->level = level;
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index fa80535a6f9d..7bc763f10c03 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -30,15 +30,19 @@
#include <linux/workqueue.h>
#include "intel_guc_fwif.h"
+#include "i915_gem.h"
struct intel_guc;
-/*
- * The first page is to save log buffer state. Allocate one
- * extra page for others in case for overlap
- */
-#define GUC_LOG_SIZE ((1 + GUC_LOG_DPC_PAGES + 1 + GUC_LOG_ISR_PAGES + \
- 1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT)
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+#define CRASH_BUFFER_SIZE SZ_2M
+#define DPC_BUFFER_SIZE SZ_8M
+#define ISR_BUFFER_SIZE SZ_8M
+#else
+#define CRASH_BUFFER_SIZE SZ_8K
+#define DPC_BUFFER_SIZE SZ_32K
+#define ISR_BUFFER_SIZE SZ_32K
+#endif
/*
* While we're using plain log level in i915, GuC controls are much more...
@@ -58,7 +62,7 @@ struct intel_guc;
#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)
struct intel_guc_log {
- u32 flags;
+ u32 level;
struct i915_vma *vma;
struct {
void *buf_addr;
@@ -80,8 +84,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log);
int intel_guc_log_create(struct intel_guc_log *log);
void intel_guc_log_destroy(struct intel_guc_log *log);
-int intel_guc_log_level_get(struct intel_guc_log *log);
-int intel_guc_log_level_set(struct intel_guc_log *log, u64 control_val);
+int intel_guc_log_set_level(struct intel_guc_log *log, u32 level);
bool intel_guc_log_relay_enabled(const struct intel_guc_log *log);
int intel_guc_log_relay_open(struct intel_guc_log *log);
void intel_guc_log_relay_flush(struct intel_guc_log *log);
@@ -89,4 +92,9 @@ void intel_guc_log_relay_close(struct intel_guc_log *log);
void intel_guc_log_handle_flush_event(struct intel_guc_log *log);
+static inline u32 intel_guc_log_get_level(struct intel_guc_log *log)
+{
+ return log->level;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 2feb65096966..4aa5e6463e7b 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -513,8 +513,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
struct intel_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx,
- engine));
+ u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
spin_lock(&client->wq_lock);
@@ -537,7 +536,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
*/
static void flush_ggtt_writes(struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
+ struct drm_i915_private *dev_priv = vma->vm->i915;
if (i915_vma_is_map_and_fenceable(vma))
POSTING_READ_FW(GUC_STATUS);
@@ -552,8 +551,8 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
- engine));
+ u32 ctx_desc = lower_32_bits(to_intel_context(client->owner,
+ engine)->lrc_desc);
u32 data[7];
/*
@@ -623,6 +622,22 @@ static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
}
+static void complete_preempt_context(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+
+ if (inject_preempt_hang(execlists))
+ return;
+
+ execlists_cancel_port_requests(execlists);
+ execlists_unwind_incomplete_requests(execlists);
+
+ wait_for_guc_preempt_report(engine);
+ intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+}
+
/**
* guc_submit() - Submit commands through GuC
* @engine: engine associated with the commands
@@ -681,9 +696,6 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
lockdep_assert_held(&engine->timeline.lock);
- rb = execlists->first;
- GEM_BUG_ON(rb_first(&execlists->queue) != rb);
-
if (port_isset(port)) {
if (intel_engine_has_preemption(engine)) {
struct guc_preempt_work *preempt_work =
@@ -705,12 +717,12 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
}
GEM_BUG_ON(port_isset(port));
- while (rb) {
+ while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
- if (last && rq->ctx != last->ctx) {
+ if (last && rq->hw_context != last->hw_context) {
if (port == last_port) {
__list_del_many(&p->requests,
&rq->sched.link);
@@ -730,15 +742,13 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
submit = true;
}
- rb = rb_next(rb);
- rb_erase(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
done:
execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
- execlists->first = rb;
if (submit)
port_assign(port, last);
if (last)
@@ -747,7 +757,8 @@ done:
/* We must always keep the beast fed if we have work piled up */
GEM_BUG_ON(port_isset(execlists->port) &&
!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
- GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
+ GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
+ !port_isset(execlists->port));
return submit;
}
@@ -793,20 +804,44 @@ static void guc_submission_tasklet(unsigned long data)
if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
- GUC_PREEMPT_FINISHED) {
- execlists_cancel_port_requests(&engine->execlists);
- execlists_unwind_incomplete_requests(execlists);
-
- wait_for_guc_preempt_report(engine);
-
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
- intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
- }
+ GUC_PREEMPT_FINISHED)
+ complete_preempt_context(engine);
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
guc_dequeue(engine);
}
+static struct i915_request *
+guc_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+
+ /*
+ * We're using worker to queue preemption requests from the tasklet in
+ * GuC submission mode.
+ * Even though tasklet was disabled, we may still have a worker queued.
+ * Let's make sure that all workers scheduled before disabling the
+ * tasklet are completed before continuing with the reset.
+ */
+ if (engine->i915->guc.preempt_wq)
+ flush_workqueue(engine->i915->guc.preempt_wq);
+
+ return i915_gem_find_active_request(engine);
+}
+
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
@@ -876,8 +911,12 @@ static void guc_clients_doorbell_fini(struct intel_guc *guc)
__update_doorbell_desc(guc->preempt_client,
GUC_DOORBELL_INVALID);
}
- __destroy_doorbell(guc->execbuf_client);
- __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID);
+
+ if (guc->execbuf_client) {
+ __destroy_doorbell(guc->execbuf_client);
+ __update_doorbell_desc(guc->execbuf_client,
+ GUC_DOORBELL_INVALID);
+ }
}
/**
@@ -1090,7 +1129,8 @@ static void guc_clients_destroy(struct intel_guc *guc)
guc_client_free(client);
client = fetch_and_zero(&guc->execbuf_client);
- guc_client_free(client);
+ if (client)
+ guc_client_free(client);
}
/*
@@ -1119,7 +1159,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
WARN_ON(!guc_verify_doorbells(guc));
ret = guc_clients_create(guc);
if (ret)
- return ret;
+ goto err_pool;
for_each_engine(engine, dev_priv, id) {
guc->preempt_work[id].engine = engine;
@@ -1128,6 +1168,9 @@ int intel_guc_submission_init(struct intel_guc *guc)
return 0;
+err_pool:
+ guc_stage_desc_pool_destroy(guc);
+ return ret;
}
void intel_guc_submission_fini(struct intel_guc *guc)
@@ -1142,7 +1185,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
guc_clients_destroy(guc);
WARN_ON(!guc_verify_doorbells(guc));
- guc_stage_desc_pool_destroy(guc);
+ if (guc->stage_desc_pool)
+ guc_stage_desc_pool_destroy(guc);
}
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
@@ -1225,6 +1269,31 @@ static void guc_submission_unpark(struct intel_engine_cs *engine)
intel_engine_pin_breadcrumbs_irq(engine);
}
+static void guc_set_default_submission(struct intel_engine_cs *engine)
+{
+ /*
+ * We inherit a bunch of functions from execlists that we'd like
+ * to keep using:
+ *
+ * engine->submit_request = execlists_submit_request;
+ * engine->cancel_requests = execlists_cancel_requests;
+ * engine->schedule = execlists_schedule;
+ *
+ * But we need to override the actual submission backend in order
+ * to talk to the GuC.
+ */
+ intel_execlists_set_default_submission(engine);
+
+ engine->execlists.tasklet.func = guc_submission_tasklet;
+
+ engine->park = guc_submission_park;
+ engine->unpark = guc_submission_unpark;
+
+ engine->reset.prepare = guc_reset_prepare;
+
+ engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
+}
+
int intel_guc_submission_enable(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -1263,14 +1332,8 @@ int intel_guc_submission_enable(struct intel_guc *guc)
guc_interrupts_capture(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct intel_engine_execlists * const execlists =
- &engine->execlists;
-
- execlists->tasklet.func = guc_submission_tasklet;
- engine->park = guc_submission_park;
- engine->unpark = guc_submission_unpark;
-
- engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
+ engine->set_default_submission = guc_set_default_submission;
+ engine->set_default_submission(engine);
}
return 0;
@@ -1284,9 +1347,6 @@ void intel_guc_submission_disable(struct intel_guc *guc)
guc_interrupts_release(dev_priv);
guc_clients_doorbell_fini(guc);
-
- /* Revert back to manual ELSP submission */
- intel_engines_reset_default_submission(dev_priv);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index a2fe7c8d4477..c22b3e18a0f5 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -47,6 +47,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_KABYLAKE(dev_priv))
return true;
+ if (IS_BROXTON(dev_priv))
+ return true;
return false;
}
@@ -90,6 +92,9 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
{
int ret;
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
if (!i915_modparams.enable_gvt) {
DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
return 0;
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index d47e346bd49e..2fc7a0dd0df9 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -294,6 +294,7 @@ static void hangcheck_store_sample(struct intel_engine_cs *engine,
engine->hangcheck.seqno = hc->seqno;
engine->hangcheck.action = hc->action;
engine->hangcheck.stalled = hc->stalled;
+ engine->hangcheck.wedged = hc->wedged;
}
static enum intel_engine_hangcheck_action
@@ -368,6 +369,9 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
hc->stalled = time_after(jiffies,
engine->hangcheck.action_timestamp + timeout);
+ hc->wedged = time_after(jiffies,
+ engine->hangcheck.action_timestamp +
+ I915_ENGINE_WEDGED_TIMEOUT);
}
static void hangcheck_declare_hang(struct drm_i915_private *i915,
@@ -409,7 +413,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
gpu_error.hangcheck_work.work);
struct intel_engine_cs *engine;
enum intel_engine_id id;
- unsigned int hung = 0, stuck = 0;
+ unsigned int hung = 0, stuck = 0, wedged = 0;
if (!i915_modparams.enable_hangcheck)
return;
@@ -440,6 +444,17 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (hc.action != ENGINE_DEAD)
stuck |= intel_engine_flag(engine);
}
+
+ if (engine->hangcheck.wedged)
+ wedged |= intel_engine_flag(engine);
+ }
+
+ if (wedged) {
+ dev_err(dev_priv->drm.dev,
+ "GPU recovery timed out,"
+ " cancelling all in-flight rendering.\n");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(dev_priv);
}
if (hung)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d8cb53ef4351..a9076402dcb0 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -51,7 +51,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t enabled_bits;
+ u32 enabled_bits;
enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
@@ -59,6 +59,15 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
"HDMI port enabled, expecting disabled\n");
}
+static void
+assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ WARN(I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
+ TRANS_DDI_FUNC_ENABLE,
+ "HDMI transcoder function enabled, expecting disabled\n");
+}
+
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
@@ -144,7 +153,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
@@ -199,7 +208,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -259,7 +268,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -317,7 +326,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -376,19 +385,16 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
- i915_reg_t data_reg;
int data_size = type == DP_SDP_VSC ?
VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
int i;
u32 val = I915_READ(ctl_reg);
- data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
-
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
@@ -442,7 +448,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe *frame)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- uint8_t buffer[VIDEO_DIP_DATA_SIZE];
+ u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
/* see comment above for the reason for this offset */
@@ -461,7 +467,8 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
@@ -491,6 +498,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
intel_hdmi->rgb_quant_range_selectable,
is_hdmi2_sink);
+ drm_hdmi_avi_infoframe_content_type(&frame.avi,
+ conn_state);
+
/* TODO: handle pixel repetition for YCBCR420 outputs */
intel_write_infoframe(encoder, crtc_state, &frame);
}
@@ -586,7 +596,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -727,7 +737,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -770,7 +780,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -823,7 +833,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -834,11 +844,11 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
u32 val = I915_READ(reg);
- assert_hdmi_port_disabled(intel_hdmi);
+ assert_hdmi_transcoder_func_disabled(dev_priv,
+ crtc_state->cpu_transcoder);
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
@@ -856,7 +866,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -1161,33 +1171,16 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
-
- tmp = I915_READ(intel_hdmi->hdmi_reg);
-
- if (!(tmp & SDVO_ENABLE))
- goto out;
-
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else if (IS_CHERRYVIEW(dev_priv))
- *pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- ret = true;
+ ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -1421,8 +1414,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- temp &= ~SDVO_PIPE_B_SELECT;
- temp |= SDVO_ENABLE;
+ temp &= ~SDVO_PIPE_SEL_MASK;
+ temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
@@ -1577,14 +1570,23 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
/* check if we can do 8bpc */
status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
- /* if we can't do 8bpc we may still be able to do 12bpc */
- if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK && hdmi->has_hdmi_sink && !force_dvi)
- status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true, force_dvi);
+ if (hdmi->has_hdmi_sink && !force_dvi) {
+ /* if we can't do 8bpc we may still be able to do 12bpc */
+ if (status != MODE_OK && !HAS_GMCH_DISPLAY(dev_priv))
+ status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
+ true, force_dvi);
+
+ /* if we can't do 8,12bpc we may still be able to do 10bpc */
+ if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
+ status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
+ true, force_dvi);
+ }
return status;
}
-static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
+static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
+ int bpc)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->base.crtc->dev);
@@ -1596,6 +1598,9 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
if (HAS_GMCH_DISPLAY(dev_priv))
return false;
+ if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
+ return false;
+
if (crtc_state->pipe_bpp <= 8*3)
return false;
@@ -1603,7 +1608,7 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
return false;
/*
- * HDMI 12bpc affects the clocks, so it's only possible
+ * HDMI deep color affects the clocks, so it's only possible
* when not cloning with other encoder types.
*/
if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
@@ -1618,16 +1623,24 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
if (crtc_state->ycbcr420) {
const struct drm_hdmi_info *hdmi = &info->hdmi;
- if (!(hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
+ if (bpc == 12 && !(hdmi->y420_dc_modes &
+ DRM_EDID_YCBCR420_DC_36))
+ return false;
+ else if (bpc == 10 && !(hdmi->y420_dc_modes &
+ DRM_EDID_YCBCR420_DC_30))
return false;
} else {
- if (!(info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36))
+ if (bpc == 12 && !(info->edid_hdmi_dc_modes &
+ DRM_EDID_HDMI_DC_36))
+ return false;
+ else if (bpc == 10 && !(info->edid_hdmi_dc_modes &
+ DRM_EDID_HDMI_DC_30))
return false;
}
}
/* Display WA #1139: glk */
- if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
+ if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
crtc_state->base.adjusted_mode.htotal > 5460)
return false;
@@ -1637,7 +1650,8 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
static bool
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
struct intel_crtc_state *config,
- int *clock_12bpc, int *clock_8bpc)
+ int *clock_12bpc, int *clock_10bpc,
+ int *clock_8bpc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
@@ -1649,6 +1663,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
/* YCBCR420 TMDS rate requirement is half the pixel clock */
config->port_clock /= 2;
*clock_12bpc /= 2;
+ *clock_10bpc /= 2;
*clock_8bpc /= 2;
config->ycbcr420 = true;
@@ -1676,6 +1691,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
+ int clock_10bpc = clock_8bpc * 5 / 4;
int clock_12bpc = clock_8bpc * 3 / 2;
int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
@@ -1702,12 +1718,14 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
pipe_config->pixel_multiplier = 2;
clock_8bpc *= 2;
+ clock_10bpc *= 2;
clock_12bpc *= 2;
}
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
- &clock_12bpc, &clock_8bpc)) {
+ &clock_12bpc, &clock_10bpc,
+ &clock_8bpc)) {
DRM_ERROR("Can't support YCBCR420 output\n");
return false;
}
@@ -1725,18 +1743,25 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
}
/*
- * HDMI is either 12 or 8, so if the display lets 10bpc sneak
- * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
- * outputs. We also need to check that the higher clock still fits
- * within limits.
+ * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need
+ * to check that the higher clock still fits within limits.
*/
- if (hdmi_12bpc_possible(pipe_config) &&
- hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK) {
+ if (hdmi_deep_color_possible(pipe_config, 12) &&
+ hdmi_port_clock_valid(intel_hdmi, clock_12bpc,
+ true, force_dvi) == MODE_OK) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
/* Need to adjust the port link by 1.5x for 12bpc. */
pipe_config->port_clock = clock_12bpc;
+ } else if (hdmi_deep_color_possible(pipe_config, 10) &&
+ hdmi_port_clock_valid(intel_hdmi, clock_10bpc,
+ true, force_dvi) == MODE_OK) {
+ DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n");
+ desired_bpp = 10 * 3;
+
+ /* Need to adjust the port link by 1.25x for 10bpc. */
+ pipe_config->port_clock = clock_10bpc;
} else {
DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
desired_bpp = 8*3;
@@ -1874,6 +1899,8 @@ intel_hdmi_set_edid(struct drm_connector *connector)
connected = true;
}
+ cec_notifier_set_phys_addr_from_edid(intel_hdmi->cec_notifier, edid);
+
return connected;
}
@@ -1882,6 +1909,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -1897,6 +1925,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ if (status != connector_status_connected)
+ cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
+
return status;
}
@@ -2037,6 +2068,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
static void intel_hdmi_destroy(struct drm_connector *connector)
{
+ if (intel_attached_hdmi(connector)->cec_notifier)
+ cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
kfree(to_intel_connector(connector)->detect_edid);
drm_connector_cleanup(connector);
kfree(connector);
@@ -2071,6 +2104,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
+ drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
@@ -2257,7 +2291,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
- else if (IS_ICELAKE(dev_priv))
+ else if (HAS_PCH_ICP(dev_priv))
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
@@ -2352,10 +2386,15 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
+ if (IS_G45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
+
+ intel_hdmi->cec_notifier = cec_notifier_get_conn(dev->dev,
+ port_identifier(port));
+ if (!intel_hdmi->cec_notifier)
+ DRM_DEBUG_KMS("CEC notifier get failed\n");
}
void intel_hdmi_init(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 43aa92beff2a..648a13c6043c 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -77,37 +77,6 @@
*/
/**
- * intel_hpd_port - return port hard associated with certain pin.
- * @dev_priv: private driver data pointer
- * @pin: the hpd pin to get associated port
- *
- * Return port that is associatade with @pin and PORT_NONE if no port is
- * hard associated with that @pin.
- */
-enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
- enum hpd_pin pin)
-{
- switch (pin) {
- case HPD_PORT_A:
- return PORT_A;
- case HPD_PORT_B:
- return PORT_B;
- case HPD_PORT_C:
- return PORT_C;
- case HPD_PORT_D:
- return PORT_D;
- case HPD_PORT_E:
- if (IS_CNL_WITH_PORT_F(dev_priv))
- return PORT_F;
- return PORT_E;
- case HPD_PORT_F:
- return PORT_F;
- default:
- return PORT_NONE; /* no port for this pin */
- }
-}
-
-/**
* intel_hpd_pin_default - return default pin associated with certain port.
* @dev_priv: private driver data pointer
* @port: the hpd port to get associated pin
@@ -241,25 +210,25 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
- int i;
+ enum hpd_pin pin;
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
- for_each_hpd_pin(i) {
+ for_each_hpd_pin(pin) {
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
+ if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
continue;
- dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+ dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_connector *intel_connector = to_intel_connector(connector);
- if (intel_connector->encoder->hpd_pin == i) {
+ if (intel_connector->encoder->hpd_pin == pin) {
if (connector->polled != intel_connector->polled)
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
connector->name);
@@ -301,13 +270,18 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
return true;
}
+static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
+{
+ return intel_encoder_is_dig_port(encoder) &&
+ enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
+}
+
static void i915_digport_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, hotplug.dig_port_work);
u32 long_port_mask, short_port_mask;
- struct intel_digital_port *intel_dig_port;
- int i;
+ struct intel_encoder *encoder;
u32 old_bits = 0;
spin_lock_irq(&dev_priv->irq_lock);
@@ -317,27 +291,27 @@ static void i915_digport_work_func(struct work_struct *work)
dev_priv->hotplug.short_port_mask = 0;
spin_unlock_irq(&dev_priv->irq_lock);
- for (i = 0; i < I915_MAX_PORTS; i++) {
- bool valid = false;
- bool long_hpd = false;
- intel_dig_port = dev_priv->hotplug.irq_port[i];
- if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_digital_port *dig_port;
+ enum port port = encoder->port;
+ bool long_hpd, short_hpd;
+ enum irqreturn ret;
+
+ if (!intel_encoder_has_hpd_pulse(encoder))
continue;
- if (long_port_mask & (1 << i)) {
- valid = true;
- long_hpd = true;
- } else if (short_port_mask & (1 << i))
- valid = true;
+ long_hpd = long_port_mask & BIT(port);
+ short_hpd = short_port_mask & BIT(port);
- if (valid) {
- enum irqreturn ret;
+ if (!long_hpd && !short_hpd)
+ continue;
- ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
- if (ret == IRQ_NONE) {
- /* fall back to old school hpd */
- old_bits |= (1 << intel_dig_port->base.hpd_pin);
- }
+ dig_port = enc_to_dig_port(&encoder->base);
+
+ ret = dig_port->hpd_pulse(dig_port, long_hpd);
+ if (ret == IRQ_NONE) {
+ /* fall back to old school hpd */
+ old_bits |= BIT(encoder->hpd_pin);
}
}
@@ -418,26 +392,24 @@ static void i915_hotplug_work_func(struct work_struct *work)
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask)
{
- int i;
- enum port port;
+ struct intel_encoder *encoder;
bool storm_detected = false;
bool queue_dig = false, queue_hp = false;
- bool is_dig_port;
if (!pin_mask)
return;
spin_lock(&dev_priv->irq_lock);
- for_each_hpd_pin(i) {
- if (!(BIT(i) & pin_mask))
- continue;
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ enum hpd_pin pin = encoder->hpd_pin;
+ bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
- port = intel_hpd_pin_to_port(dev_priv, i);
- is_dig_port = port != PORT_NONE &&
- dev_priv->hotplug.irq_port[port];
+ if (!(BIT(pin) & pin_mask))
+ continue;
- if (is_dig_port) {
- bool long_hpd = long_mask & BIT(i);
+ if (has_hpd_pulse) {
+ bool long_hpd = long_mask & BIT(pin);
+ enum port port = encoder->port;
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
long_hpd ? "long" : "short");
@@ -455,7 +427,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
}
}
- if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
+ if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
@@ -463,20 +435,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* interrupts on saner platforms.
*/
WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
- "Received HPD interrupt on pin %d although disabled\n", i);
+ "Received HPD interrupt on pin %d although disabled\n", pin);
continue;
}
- if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
+ if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
continue;
- if (!is_dig_port) {
- dev_priv->hotplug.event_bits |= BIT(i);
+ if (!has_hpd_pulse) {
+ dev_priv->hotplug.event_bits |= BIT(pin);
queue_hp = true;
}
- if (intel_hpd_irq_storm_detect(dev_priv, i)) {
- dev_priv->hotplug.event_bits &= ~BIT(i);
+ if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
+ dev_priv->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
}
}
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 291285277403..ffcad5fad6a7 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -32,6 +32,14 @@ void intel_huc_init_early(struct intel_huc *huc)
intel_huc_fw_init_early(huc);
}
+int intel_huc_init_misc(struct intel_huc *huc)
+{
+ struct drm_i915_private *i915 = huc_to_i915(huc);
+
+ intel_uc_fw_fetch(i915, &huc->fw);
+ return 0;
+}
+
/**
* intel_huc_auth() - Authenticate HuC uCode
* @huc: intel_huc structure
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index aa854907abac..7e41d870b509 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -36,9 +36,15 @@ struct intel_huc {
};
void intel_huc_init_early(struct intel_huc *huc);
+int intel_huc_init_misc(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc);
int intel_huc_check_status(struct intel_huc *huc);
+static inline void intel_huc_fini_misc(struct intel_huc *huc)
+{
+ intel_uc_fw_fini(&huc->fw);
+}
+
static inline int intel_huc_sanitize(struct intel_huc *huc)
{
intel_uc_fw_sanitize(&huc->fw);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index e6875509bcd9..bef32b7c248e 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -77,12 +77,12 @@ static const struct gmbus_pin gmbus_pins_cnp[] = {
};
static const struct gmbus_pin gmbus_pins_icp[] = {
- [GMBUS_PIN_1_BXT] = { "dpa", GPIOA },
- [GMBUS_PIN_2_BXT] = { "dpb", GPIOB },
- [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOC },
- [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOD },
- [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOE },
- [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOF },
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+ [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
+ [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
+ [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
};
/* pin is expected to be valid */
@@ -361,15 +361,39 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
return ret;
}
+static inline
+unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
+{
+ return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
+ GMBUS_BYTE_COUNT_MAX;
+}
+
static int
gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
unsigned short addr, u8 *buf, unsigned int len,
- u32 gmbus1_index)
+ u32 gmbus0_reg, u32 gmbus1_index)
{
+ unsigned int size = len;
+ bool burst_read = len > gmbus_max_xfer_size(dev_priv);
+ bool extra_byte_added = false;
+
+ if (burst_read) {
+ /*
+ * As per HW Spec, for 512Bytes need to read extra Byte and
+ * Ignore the extra byte read.
+ */
+ if (len == 512) {
+ extra_byte_added = true;
+ len++;
+ }
+ size = len % 256 + 256;
+ I915_WRITE_FW(GMBUS0, gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
+ }
+
I915_WRITE_FW(GMBUS1,
gmbus1_index |
GMBUS_CYCLE_WAIT |
- (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (size << GMBUS_BYTE_COUNT_SHIFT) |
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
@@ -382,17 +406,34 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
val = I915_READ_FW(GMBUS3);
do {
+ if (extra_byte_added && len == 1)
+ break;
+
*buf++ = val & 0xff;
val >>= 8;
} while (--len && ++loop < 4);
+
+ if (burst_read && len == size - 4)
+ /* Reset the override bit */
+ I915_WRITE_FW(GMBUS0, gmbus0_reg);
}
return 0;
}
+/*
+ * HW spec says that 512Bytes in Burst read need special treatment.
+ * But it doesn't talk about other multiple of 256Bytes. And couldn't locate
+ * an I2C slave, which supports such a lengthy burst read too for experiments.
+ *
+ * So until things get clarified on HW support, to avoid the burst read length
+ * in fold of 256Bytes except 512, max burst read length is fixed at 767Bytes.
+ */
+#define INTEL_GMBUS_BURST_READ_MAX_LEN 767U
+
static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
- u32 gmbus1_index)
+ u32 gmbus0_reg, u32 gmbus1_index)
{
u8 *buf = msg->buf;
unsigned int rx_size = msg->len;
@@ -400,10 +441,13 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
int ret;
do {
- len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
+ if (HAS_GMBUS_BURST_READ(dev_priv))
+ len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
+ else
+ len = min(rx_size, gmbus_max_xfer_size(dev_priv));
- ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
- buf, len, gmbus1_index);
+ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len,
+ gmbus0_reg, gmbus1_index);
if (ret)
return ret;
@@ -462,7 +506,7 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
int ret;
do {
- len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
+ len = min(tx_size, gmbus_max_xfer_size(dev_priv));
ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len,
gmbus1_index);
@@ -491,7 +535,8 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
}
static int
-gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
+ u32 gmbus0_reg)
{
u32 gmbus1_index = 0;
u32 gmbus5 = 0;
@@ -509,7 +554,8 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
I915_WRITE_FW(GMBUS5, gmbus5);
if (msgs[1].flags & I2C_M_RD)
- ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+ ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
+ gmbus1_index);
else
ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index);
@@ -544,10 +590,12 @@ retry:
for (; i < num; i += inc) {
inc = 1;
if (gmbus_is_index_xfer(msgs, i, num)) {
- ret = gmbus_index_xfer(dev_priv, &msgs[i]);
+ ret = gmbus_index_xfer(dev_priv, &msgs[i],
+ gmbus0_source | bus->reg0);
inc = 2; /* an index transmission is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
- ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
+ ret = gmbus_xfer_read(dev_priv, &msgs[i],
+ gmbus0_source | bus->reg0, 0);
} else {
ret = gmbus_xfer_write(dev_priv, &msgs[i], 0);
}
@@ -771,7 +819,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (HAS_PCH_NOP(dev_priv))
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 6269750e2b54..cdf19553ffac 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -62,6 +62,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
@@ -126,9 +127,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
return platdev;
}
- pm_runtime_forbid(&platdev->dev);
- pm_runtime_set_active(&platdev->dev);
- pm_runtime_enable(&platdev->dev);
+ pm_runtime_no_callbacks(&platdev->dev);
return platdev;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7c4c8fb1dae4..174479232e94 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -137,6 +137,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_render_state.h"
+#include "i915_vgpu.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_workarounds.h"
@@ -164,7 +165,8 @@
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine,
+ struct intel_context *ce);
static void execlists_init_reg_state(u32 *reg_state,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
@@ -189,12 +191,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
!i915_request_completed(last));
}
-/**
- * intel_lr_context_descriptor_update() - calculate & cache the descriptor
- * descriptor for a pinned context
- * @ctx: Context to work on
- * @engine: Engine the descriptor will be used with
- *
+/*
* The context descriptor encodes various attributes of a context,
* including its GTT address and some flags. Because it's fairly
* expensive to calculate, we'll just do it once and cache the result,
@@ -204,7 +201,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
*
* bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
* bits 12-31: LRCA, GTT address of (the HWSP of) this context
- * bits 32-52: ctx ID, a globally unique tag
+ * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
* bits 53-54: mbz, reserved for use by hardware
* bits 55-63: group ID, currently unused and set to 0
*
@@ -222,9 +219,9 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
*/
static void
intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+ struct intel_engine_cs *engine,
+ struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -237,6 +234,11 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
/* bits 12-31 */
GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
+ /*
+ * The following 32bits are copied into the OA reports (dword 2).
+ * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
+ * anything below.
+ */
if (INTEL_GEN(ctx->i915) >= 11) {
GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
@@ -271,7 +273,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
find_priolist:
/* most positive priority is scheduled first, equal priorities fifo */
rb = NULL;
- parent = &execlists->queue.rb_node;
+ parent = &execlists->queue.rb_root.rb_node;
while (*parent) {
rb = *parent;
p = to_priolist(rb);
@@ -309,10 +311,7 @@ find_priolist:
p->priority = prio;
INIT_LIST_HEAD(&p->requests);
rb_link_node(&p->node, rb, parent);
- rb_insert_color(&p->node, &execlists->queue);
-
- if (first)
- execlists->first = &p->node;
+ rb_insert_color_cached(&p->node, &execlists->queue, first);
return p;
}
@@ -418,9 +417,9 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static u64 execlists_update_context(struct i915_request *rq)
{
- struct intel_context *ce = to_intel_context(rq->ctx, rq->engine);
+ struct intel_context *ce = rq->hw_context;
struct i915_hw_ppgtt *ppgtt =
- rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+ rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
@@ -430,7 +429,7 @@ static u64 execlists_update_context(struct i915_request *rq)
* PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode.
*/
- if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
+ if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
execlists_update_context_pdps(ppgtt, reg_state);
return ce->lrc_desc;
@@ -454,6 +453,16 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
unsigned int n;
/*
+ * We can skip acquiring intel_runtime_pm_get() here as it was taken
+ * on our behalf by the request (see i915_gem_mark_busy()) and it will
+ * not be relinquished until the device is idle (see
+ * i915_gem_idle_work_handler()). As a precaution, we make sure
+ * that all ELSP are drained i.e. we have processed the CSB,
+ * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ */
+ GEM_BUG_ON(!engine->i915->gt.awake);
+
+ /*
* ELSQ note: the submit queue is not cleared after being submitted
* to the HW so we need to make sure we always clean it up. This is
* currently ensured by the fact that we always write the same number
@@ -495,14 +504,14 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
}
-static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
+static bool ctx_single_port_submission(const struct intel_context *ce)
{
return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- i915_gem_context_force_single_submission(ctx));
+ i915_gem_context_force_single_submission(ce->gem_context));
}
-static bool can_merge_ctx(const struct i915_gem_context *prev,
- const struct i915_gem_context *next)
+static bool can_merge_ctx(const struct intel_context *prev,
+ const struct intel_context *next)
{
if (prev != next)
return false;
@@ -552,11 +561,24 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
if (execlists->ctrl_reg)
writel(EL_CTRL_LOAD, execlists->ctrl_reg);
- execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
- execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
+ execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
+}
+
+static void complete_preempt_context(struct intel_engine_execlists *execlists)
+{
+ GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+
+ if (inject_preempt_hang(execlists))
+ return;
+
+ execlists_cancel_port_requests(execlists);
+ __unwind_incomplete_requests(container_of(execlists,
+ struct intel_engine_cs,
+ execlists));
}
-static bool __execlists_dequeue(struct intel_engine_cs *engine)
+static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
@@ -566,9 +588,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
struct rb_node *rb;
bool submit = false;
- lockdep_assert_held(&engine->timeline.lock);
-
- /* Hardware submission is through 2 ports. Conceptually each port
+ /*
+ * Hardware submission is through 2 ports. Conceptually each port
* has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
* static for a context, and unique to each, so we only execute
* requests belonging to a single context from each ring. RING_HEAD
@@ -589,9 +610,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
- rb = execlists->first;
- GEM_BUG_ON(rb_first(&execlists->queue) != rb);
-
if (last) {
/*
* Don't resubmit or switch until all outstanding
@@ -602,8 +620,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(!execlists_is_active(execlists,
EXECLISTS_ACTIVE_USER));
GEM_BUG_ON(!port_count(&port[0]));
- if (port_count(&port[0]) > 1)
- return false;
/*
* If we write to ELSP a second time before the HW has had
@@ -613,11 +629,11 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* the HW to indicate that it has had a chance to respond.
*/
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
- return false;
+ return;
if (need_preempt(engine, last, execlists->queue_priority)) {
inject_preempt_context(engine);
- return false;
+ return;
}
/*
@@ -642,7 +658,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* priorities of the ports haven't been switch.
*/
if (port_count(&port[1]))
- return false;
+ return;
/*
* WaIdleLiteRestore:bdw,skl
@@ -655,7 +671,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
last->tail = last->wa_tail;
}
- while (rb) {
+ while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
@@ -671,7 +687,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* second request, and so we never need to tell the
* hardware about the first.
*/
- if (last && !can_merge_ctx(rq->ctx, last->ctx)) {
+ if (last &&
+ !can_merge_ctx(rq->hw_context, last->hw_context)) {
/*
* If we are on the second port and cannot
* combine this request with the last, then we
@@ -690,14 +707,14 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* the same context (even though a different
* request) to the second port.
*/
- if (ctx_single_port_submission(last->ctx) ||
- ctx_single_port_submission(rq->ctx)) {
+ if (ctx_single_port_submission(last->hw_context) ||
+ ctx_single_port_submission(rq->hw_context)) {
__list_del_many(&p->requests,
&rq->sched.link);
goto done;
}
- GEM_BUG_ON(last->ctx == rq->ctx);
+ GEM_BUG_ON(last->hw_context == rq->hw_context);
if (submit)
port_assign(port, last);
@@ -713,8 +730,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
submit = true;
}
- rb = rb_next(rb);
- rb_erase(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
@@ -740,35 +756,23 @@ done:
execlists->queue_priority =
port != execlists->port ? rq_prio(last) : INT_MIN;
- execlists->first = rb;
- if (submit)
+ if (submit) {
port_assign(port, last);
+ execlists_submit_ports(engine);
+ }
/* We must always keep the beast fed if we have work piled up */
- GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
+ GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
+ !port_isset(execlists->port));
/* Re-evaluate the executing context setup after each preemptive kick */
if (last)
execlists_user_begin(execlists, execlists->port);
- return submit;
-}
-
-static void execlists_dequeue(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- unsigned long flags;
- bool submit;
-
- spin_lock_irqsave(&engine->timeline.lock, flags);
- submit = __execlists_dequeue(engine);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
- if (submit)
- execlists_submit_ports(engine);
-
- GEM_BUG_ON(port_isset(execlists->port) &&
- !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
+ /* If the engine is now idle, so should be the flag; and vice versa. */
+ GEM_BUG_ON(execlists_is_active(&engine->execlists,
+ EXECLISTS_ACTIVE_USER) ==
+ !port_isset(engine->execlists.port));
}
void
@@ -799,82 +803,27 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
port++;
}
- execlists_user_end(execlists);
+ execlists_clear_all_active(execlists);
}
-static void clear_gtiir(struct intel_engine_cs *engine)
+static void reset_csb_pointers(struct intel_engine_execlists *execlists)
{
- struct drm_i915_private *dev_priv = engine->i915;
- int i;
-
/*
- * Clear any pending interrupt state.
- *
- * We do it twice out of paranoia that some of the IIR are
- * double buffered, and so if we only reset it once there may
- * still be an interrupt pending.
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
*/
- if (INTEL_GEN(dev_priv) >= 11) {
- static const struct {
- u8 bank;
- u8 bit;
- } gen11_gtiir[] = {
- [RCS] = {0, GEN11_RCS0},
- [BCS] = {0, GEN11_BCS},
- [_VCS(0)] = {1, GEN11_VCS(0)},
- [_VCS(1)] = {1, GEN11_VCS(1)},
- [_VCS(2)] = {1, GEN11_VCS(2)},
- [_VCS(3)] = {1, GEN11_VCS(3)},
- [_VECS(0)] = {1, GEN11_VECS(0)},
- [_VECS(1)] = {1, GEN11_VECS(1)},
- };
- unsigned long irqflags;
-
- GEM_BUG_ON(engine->id >= ARRAY_SIZE(gen11_gtiir));
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- for (i = 0; i < 2; i++) {
- gen11_reset_one_iir(dev_priv,
- gen11_gtiir[engine->id].bank,
- gen11_gtiir[engine->id].bit);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- } else {
- static const u8 gtiir[] = {
- [RCS] = 0,
- [BCS] = 0,
- [VCS] = 1,
- [VCS2] = 1,
- [VECS] = 3,
- };
-
- GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
-
- for (i = 0; i < 2; i++) {
- I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
- engine->irq_keep_mask);
- POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
- }
- GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
- engine->irq_keep_mask);
- }
+ execlists->csb_head = execlists->csb_write_reset;
+ WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset);
}
-static void reset_irq(struct intel_engine_cs *engine)
+static void nop_submission_tasklet(unsigned long data)
{
- /* Mark all CS interrupts as complete */
- smp_store_mb(engine->execlists.active, 0);
- synchronize_hardirq(engine->i915->drm.irq);
-
- clear_gtiir(engine);
-
- /*
- * The port is checked prior to scheduling a tasklet, but
- * just in case we have suspended the tasklet to do the
- * wedging make sure that when it wakes, it decides there
- * is no work to do by clearing the irq_posted bit.
- */
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ /* The driver is wedged; don't process any more events. */
}
static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -901,13 +850,11 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
* submission's irq state, we also wish to remind ourselves that
* it is irq state.)
*/
- local_irq_save(flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
execlists_cancel_port_requests(execlists);
- reset_irq(engine);
-
- spin_lock(&engine->timeline.lock);
+ execlists_user_end(execlists);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline.requests, link) {
@@ -917,8 +864,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
}
/* Flush the queued requests to the timeline list (for retiring). */
- rb = execlists->first;
- while (rb) {
+ while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
@@ -928,8 +874,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
__i915_request_submit(rq);
}
- rb = rb_next(rb);
- rb_erase(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
@@ -938,221 +883,198 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority = INT_MIN;
- execlists->queue = RB_ROOT;
- execlists->first = NULL;
+ execlists->queue = RB_ROOT_CACHED;
GEM_BUG_ON(port_isset(execlists->port));
- spin_unlock(&engine->timeline.lock);
+ GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
+ execlists->tasklet.func = nop_submission_tasklet;
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-/*
- * Check the unread Context Status Buffers and manage the submission of new
- * contexts to the ELSP accordingly.
- */
-static void execlists_submission_tasklet(unsigned long data)
+static inline bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+ return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
+static void process_csb(struct intel_engine_cs *engine)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- struct drm_i915_private *dev_priv = engine->i915;
- bool fw = false;
+ const u32 * const buf = execlists->csb_status;
+ u8 head, tail;
/*
- * We can skip acquiring intel_runtime_pm_get() here as it was taken
- * on our behalf by the request (see i915_gem_mark_busy()) and it will
- * not be relinquished until the device is idle (see
- * i915_gem_idle_work_handler()). As a precaution, we make sure
- * that all ELSP are drained i.e. we have processed the CSB,
- * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ * Note that csb_write, csb_status may be either in HWSP or mmio.
+ * When reading from the csb_write mmio register, we have to be
+ * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
+ * the low 4bits. As it happens we know the next 4bits are always
+ * zero and so we can simply masked off the low u8 of the register
+ * and treat it identically to reading from the HWSP (without having
+ * to use explicit shifting and masking, and probably bifurcating
+ * the code to handle the legacy mmio read).
*/
- GEM_BUG_ON(!dev_priv->gt.awake);
+ head = execlists->csb_head;
+ tail = READ_ONCE(*execlists->csb_write);
+ GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
+ if (unlikely(head == tail))
+ return;
/*
- * Prefer doing test_and_clear_bit() as a two stage operation to avoid
- * imposing the cost of a locked atomic transaction when submitting a
- * new request (outside of the context-switch interrupt).
+ * Hopefully paired with a wmb() in HW!
+ *
+ * We must complete the read of the write pointer before any reads
+ * from the CSB, so that we do not see stale values. Without an rmb
+ * (lfence) the HW may speculatively perform the CSB[] reads *before*
+ * we perform the READ_ONCE(*csb_write).
*/
- while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
- /* The HWSP contains a (cacheable) mirror of the CSB */
- const u32 *buf =
- &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
- unsigned int head, tail;
+ rmb();
- if (unlikely(execlists->csb_use_mmio)) {
- buf = (u32 * __force)
- (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
- execlists->csb_head = -1; /* force mmio read of CSB ptrs */
- }
+ do {
+ struct i915_request *rq;
+ unsigned int status;
+ unsigned int count;
- /* Clear before reading to catch new interrupts */
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- smp_mb__after_atomic();
+ if (++head == GEN8_CSB_ENTRIES)
+ head = 0;
- if (unlikely(execlists->csb_head == -1)) { /* following a reset */
- if (!fw) {
- intel_uncore_forcewake_get(dev_priv,
- execlists->fw_domains);
- fw = true;
- }
+ /*
+ * We are flying near dragons again.
+ *
+ * We hold a reference to the request in execlist_port[]
+ * but no more than that. We are operating in softirq
+ * context and so cannot hold any mutex or sleep. That
+ * prevents us stopping the requests we are processing
+ * in port[] from being retired simultaneously (the
+ * breadcrumb will be complete before we see the
+ * context-switch). As we only hold the reference to the
+ * request, any pointer chasing underneath the request
+ * is subject to a potential use-after-free. Thus we
+ * store all of the bookkeeping within port[] as
+ * required, and avoid using unguarded pointers beneath
+ * request itself. The same applies to the atomic
+ * status notifier.
+ */
- head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
- tail = GEN8_CSB_WRITE_PTR(head);
- head = GEN8_CSB_READ_PTR(head);
- execlists->csb_head = head;
- } else {
- const int write_idx =
- intel_hws_csb_write_index(dev_priv) -
- I915_HWS_CSB_BUF0_INDEX;
+ GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
+ engine->name, head,
+ buf[2 * head + 0], buf[2 * head + 1],
+ execlists->active);
+
+ status = buf[2 * head];
+ if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
+ GEN8_CTX_STATUS_PREEMPTED))
+ execlists_set_active(execlists,
+ EXECLISTS_ACTIVE_HWACK);
+ if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
+ execlists_clear_active(execlists,
+ EXECLISTS_ACTIVE_HWACK);
+
+ if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
+ continue;
+
+ /* We should never get a COMPLETED | IDLE_ACTIVE! */
+ GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
- head = execlists->csb_head;
- tail = READ_ONCE(buf[write_idx]);
- rmb(); /* Hopefully paired with a wmb() in HW */
+ if (status & GEN8_CTX_STATUS_COMPLETE &&
+ buf[2*head + 1] == execlists->preempt_complete_status) {
+ GEM_TRACE("%s preempt-idle\n", engine->name);
+ complete_preempt_context(execlists);
+ continue;
}
- GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
- engine->name,
- head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?",
- tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
- while (head != tail) {
- struct i915_request *rq;
- unsigned int status;
- unsigned int count;
+ if (status & GEN8_CTX_STATUS_PREEMPTED &&
+ execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT))
+ continue;
- if (++head == GEN8_CSB_ENTRIES)
- head = 0;
+ GEM_BUG_ON(!execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_USER));
- /* We are flying near dragons again.
- *
- * We hold a reference to the request in execlist_port[]
- * but no more than that. We are operating in softirq
- * context and so cannot hold any mutex or sleep. That
- * prevents us stopping the requests we are processing
- * in port[] from being retired simultaneously (the
- * breadcrumb will be complete before we see the
- * context-switch). As we only hold the reference to the
- * request, any pointer chasing underneath the request
- * is subject to a potential use-after-free. Thus we
- * store all of the bookkeeping within port[] as
- * required, and avoid using unguarded pointers beneath
- * request itself. The same applies to the atomic
- * status notifier.
+ rq = port_unpack(port, &count);
+ GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
+ engine->name,
+ port->context_id, count,
+ rq ? rq->global_seqno : 0,
+ rq ? rq->fence.context : 0,
+ rq ? rq->fence.seqno : 0,
+ intel_engine_get_seqno(engine),
+ rq ? rq_prio(rq) : 0);
+
+ /* Check the context/desc id for this event matches */
+ GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
+
+ GEM_BUG_ON(count == 0);
+ if (--count == 0) {
+ /*
+ * On the final event corresponding to the
+ * submission of this context, we expect either
+ * an element-switch event or a completion
+ * event (and on completion, the active-idle
+ * marker). No more preemptions, lite-restore
+ * or otherwise.
*/
+ GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
+ GEM_BUG_ON(port_isset(&port[1]) &&
+ !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
+ GEM_BUG_ON(!port_isset(&port[1]) &&
+ !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
- status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
- GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
- engine->name, head,
- status, buf[2*head + 1],
- execlists->active);
-
- if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
- GEN8_CTX_STATUS_PREEMPTED))
- execlists_set_active(execlists,
- EXECLISTS_ACTIVE_HWACK);
- if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
- execlists_clear_active(execlists,
- EXECLISTS_ACTIVE_HWACK);
-
- if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
- continue;
-
- /* We should never get a COMPLETED | IDLE_ACTIVE! */
- GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
-
- if (status & GEN8_CTX_STATUS_COMPLETE &&
- buf[2*head + 1] == execlists->preempt_complete_status) {
- GEM_TRACE("%s preempt-idle\n", engine->name);
-
- execlists_cancel_port_requests(execlists);
- execlists_unwind_incomplete_requests(execlists);
-
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT));
- execlists_clear_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT);
- continue;
- }
-
- if (status & GEN8_CTX_STATUS_PREEMPTED &&
- execlists_is_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT))
- continue;
+ /*
+ * We rely on the hardware being strongly
+ * ordered, that the breadcrumb write is
+ * coherent (visible from the CPU) before the
+ * user interrupt and CSB is processed.
+ */
+ GEM_BUG_ON(!i915_request_completed(rq));
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_USER));
+ execlists_context_schedule_out(rq,
+ INTEL_CONTEXT_SCHEDULE_OUT);
+ i915_request_put(rq);
- rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
- engine->name,
- port->context_id, count,
- rq ? rq->global_seqno : 0,
- rq ? rq->fence.context : 0,
- rq ? rq->fence.seqno : 0,
- intel_engine_get_seqno(engine),
- rq ? rq_prio(rq) : 0);
+ GEM_TRACE("%s completed ctx=%d\n",
+ engine->name, port->context_id);
- /* Check the context/desc id for this event matches */
- GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
-
- GEM_BUG_ON(count == 0);
- if (--count == 0) {
- /*
- * On the final event corresponding to the
- * submission of this context, we expect either
- * an element-switch event or a completion
- * event (and on completion, the active-idle
- * marker). No more preemptions, lite-restore
- * or otherwise.
- */
- GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
- GEM_BUG_ON(port_isset(&port[1]) &&
- !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
- GEM_BUG_ON(!port_isset(&port[1]) &&
- !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
-
- /*
- * We rely on the hardware being strongly
- * ordered, that the breadcrumb write is
- * coherent (visible from the CPU) before the
- * user interrupt and CSB is processed.
- */
- GEM_BUG_ON(!i915_request_completed(rq));
-
- execlists_context_schedule_out(rq,
- INTEL_CONTEXT_SCHEDULE_OUT);
- i915_request_put(rq);
-
- GEM_TRACE("%s completed ctx=%d\n",
- engine->name, port->context_id);
-
- port = execlists_port_complete(execlists, port);
- if (port_isset(port))
- execlists_user_begin(execlists, port);
- else
- execlists_user_end(execlists);
- } else {
- port_set(port, port_pack(rq, count));
- }
+ port = execlists_port_complete(execlists, port);
+ if (port_isset(port))
+ execlists_user_begin(execlists, port);
+ else
+ execlists_user_end(execlists);
+ } else {
+ port_set(port, port_pack(rq, count));
}
+ } while (head != tail);
- if (head != execlists->csb_head) {
- execlists->csb_head = head;
- writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
- dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
- }
- }
+ execlists->csb_head = head;
+}
- if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
+static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
+{
+ lockdep_assert_held(&engine->timeline.lock);
+
+ process_csb(engine);
+ if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
execlists_dequeue(engine);
+}
+
+/*
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+static void execlists_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ unsigned long flags;
- if (fw)
- intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
+ GEM_TRACE("%s awake?=%d, active=%x\n",
+ engine->name,
+ engine->i915->gt.awake,
+ engine->execlists.active);
- /* If the engine is now idle, so should be the flag; and vice versa. */
- GEM_BUG_ON(execlists_is_active(&engine->execlists,
- EXECLISTS_ACTIVE_USER) ==
- !port_isset(engine->execlists.port));
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ __execlists_submission_tasklet(engine);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static void queue_request(struct intel_engine_cs *engine,
@@ -1163,16 +1085,30 @@ static void queue_request(struct intel_engine_cs *engine,
&lookup_priolist(engine, prio)->requests);
}
-static void __submit_queue(struct intel_engine_cs *engine, int prio)
+static void __update_queue(struct intel_engine_cs *engine, int prio)
{
engine->execlists.queue_priority = prio;
- tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
+static void __submit_queue_imm(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ if (reset_in_progress(execlists))
+ return; /* defer until we restart the engine following reset */
+
+ if (execlists->tasklet.func == execlists_submission_tasklet)
+ __execlists_submission_tasklet(engine);
+ else
+ tasklet_hi_schedule(&execlists->tasklet);
}
static void submit_queue(struct intel_engine_cs *engine, int prio)
{
- if (prio > engine->execlists.queue_priority)
- __submit_queue(engine, prio);
+ if (prio > engine->execlists.queue_priority) {
+ __update_queue(engine, prio);
+ __submit_queue_imm(engine);
+ }
}
static void execlists_submit_request(struct i915_request *request)
@@ -1184,11 +1120,12 @@ static void execlists_submit_request(struct i915_request *request)
spin_lock_irqsave(&engine->timeline.lock, flags);
queue_request(engine, &request->sched, rq_prio(request));
- submit_queue(engine, rq_prio(request));
- GEM_BUG_ON(!engine->execlists.first);
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
GEM_BUG_ON(list_empty(&request->sched.link));
+ submit_queue(engine, rq_prio(request));
+
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
@@ -1315,13 +1252,40 @@ static void execlists_schedule(struct i915_request *request,
}
if (prio > engine->execlists.queue_priority &&
- i915_sw_fence_done(&sched_to_request(node)->submit))
- __submit_queue(engine, prio);
+ i915_sw_fence_done(&sched_to_request(node)->submit)) {
+ /* defer submission until after all of our updates */
+ __update_queue(engine, prio);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
}
spin_unlock_irq(&engine->timeline.lock);
}
+static void execlists_context_destroy(struct intel_context *ce)
+{
+ GEM_BUG_ON(ce->pin_count);
+
+ if (!ce->state)
+ return;
+
+ intel_ring_free(ce->ring);
+
+ GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
+ i915_gem_object_put(ce->state->obj);
+}
+
+static void execlists_context_unpin(struct intel_context *ce)
+{
+ intel_ring_unpin(ce->ring);
+
+ ce->state->obj->pin_global--;
+ i915_gem_object_unpin_map(ce->state->obj);
+ i915_vma_unpin(ce->state);
+
+ i915_gem_context_put(ce->gem_context);
+}
+
static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
{
unsigned int flags;
@@ -1345,21 +1309,15 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
}
-static struct intel_ring *
-execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_context *
+__execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx,
+ struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
void *vaddr;
int ret;
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
- if (likely(ce->pin_count++))
- goto out;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
- ret = execlists_context_deferred_alloc(ctx, engine);
+ ret = execlists_context_deferred_alloc(ctx, engine, ce);
if (ret)
goto err;
GEM_BUG_ON(!ce->state);
@@ -1378,17 +1336,17 @@ execlists_context_pin(struct intel_engine_cs *engine,
if (ret)
goto unpin_map;
- intel_lr_context_descriptor_update(ctx, engine);
+ intel_lr_context_descriptor_update(ctx, engine, ce);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
+ GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head;
ce->state->obj->pin_global++;
i915_gem_context_get(ctx);
-out:
- return ce->ring;
+ return ce;
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
@@ -1399,33 +1357,33 @@ err:
return ERR_PTR(ret);
}
-static void execlists_context_unpin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static const struct intel_context_ops execlists_context_ops = {
+ .unpin = execlists_context_unpin,
+ .destroy = execlists_context_destroy,
+};
+
+static struct intel_context *
+execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- GEM_BUG_ON(ce->pin_count == 0);
- if (--ce->pin_count)
- return;
-
- intel_ring_unpin(ce->ring);
+ if (likely(ce->pin_count++))
+ return ce;
+ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
- ce->state->obj->pin_global--;
- i915_gem_object_unpin_map(ce->state->obj);
- i915_vma_unpin(ce->state);
+ ce->ops = &execlists_context_ops;
- i915_gem_context_put(ctx);
+ return __execlists_context_pin(engine, ctx, ce);
}
static int execlists_request_alloc(struct i915_request *request)
{
- struct intel_context *ce =
- to_intel_context(request->ctx, request->engine);
int ret;
- GEM_BUG_ON(!ce->pin_count);
+ GEM_BUG_ON(!request->hw_context->pin_count);
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
@@ -1538,29 +1496,56 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
+struct lri {
+ i915_reg_t reg;
+ u32 value;
+};
+
+static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
{
- *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ GEM_BUG_ON(!count || count > 63);
- /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
- batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ *batch++ = MI_LOAD_REGISTER_IMM(count);
+ do {
+ *batch++ = i915_mmio_reg_offset(lri->reg);
+ *batch++ = lri->value;
+ } while (lri++, --count);
+ *batch++ = MI_NOOP;
- *batch++ = MI_LOAD_REGISTER_IMM(3);
+ return batch;
+}
- /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
- *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
- *batch++ = _MASKED_BIT_DISABLE(
- GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
+static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
+{
+ static const struct lri lri[] = {
+ /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
+ {
+ COMMON_SLICE_CHICKEN2,
+ __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
+ 0),
+ },
+
+ /* BSpec: 11391 */
+ {
+ FF_SLICE_CHICKEN,
+ __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
+ FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
+ },
+
+ /* BSpec: 11299 */
+ {
+ _3D_CHICKEN3,
+ __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
+ _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
+ }
+ };
- /* BSpec: 11391 */
- *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
- *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- /* BSpec: 11299 */
- *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
- *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
+ /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
+ batch = gen8_emit_flush_coherentl3_wa(engine, batch);
- *batch++ = MI_NOOP;
+ batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
/* WaClearSlmSpaceAtContextSwitch:kbl */
/* Actual scratch location is at 128 bytes offset */
@@ -1652,7 +1637,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1767,17 +1752,29 @@ static void enable_execlists(struct intel_engine_cs *engine)
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+ I915_WRITE(RING_MI_MODE(engine->mmio_base),
+ _MASKED_BIT_DISABLE(STOP_RING));
+
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
engine->status_page.ggtt_offset);
POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
- /* Following the reset, we need to reload the CSB read/write pointers */
- engine->execlists.csb_head = -1;
+static bool unexpected_starting_state(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ bool unexpected = false;
+
+ if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) {
+ DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
+ unexpected = true;
+ }
+
+ return unexpected;
}
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
int ret;
ret = intel_mocs_init_engine(engine);
@@ -1785,13 +1782,14 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
return ret;
intel_engine_reset_breadcrumbs(engine);
- intel_engine_init_hangcheck(engine);
- enable_execlists(engine);
+ if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
+ struct drm_printer p = drm_debug_printer(__func__);
- /* After a GPU reset, we may have requests to replay */
- if (execlists->first)
- tasklet_schedule(&execlists->tasklet);
+ intel_engine_dump(engine, &p, NULL);
+ }
+
+ enable_execlists(engine);
return 0;
}
@@ -1833,8 +1831,69 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
return 0;
}
-static void reset_common_ring(struct intel_engine_cs *engine,
- struct i915_request *request)
+static struct i915_request *
+execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *request, *active;
+ unsigned long flags;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ /*
+ * We want to flush the pending context switches, having disabled
+ * the tasklet above, we can assume exclusive access to the execlists.
+ * For this allows us to catch up with an inflight preemption event,
+ * and avoid blaming an innocent request if the stall was due to the
+ * preemption itself.
+ */
+ process_csb(engine);
+
+ /*
+ * The last active request can then be no later than the last request
+ * now in ELSP[0]. So search backwards from there, so that if the GPU
+ * has advanced beyond the last CSB update, it will be pardoned.
+ */
+ active = NULL;
+ request = port_request(execlists->port);
+ if (request) {
+ /*
+ * Prevent the breadcrumb from advancing before we decide
+ * which request is currently active.
+ */
+ intel_engine_stop_cs(engine);
+
+ list_for_each_entry_from_reverse(request,
+ &engine->timeline.requests,
+ link) {
+ if (__i915_request_completed(request,
+ request->global_seqno))
+ break;
+
+ active = request;
+ }
+ }
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+ return active;
+}
+
+static void execlists_reset(struct intel_engine_cs *engine,
+ struct i915_request *request)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
@@ -1844,8 +1903,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
engine->name, request ? request->global_seqno : 0,
intel_engine_get_seqno(engine));
- /* See execlists_cancel_requests() for the irq/spinlock split. */
- local_irq_save(flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
/*
* Catch up with any missed context-switch interrupts.
@@ -1857,14 +1915,14 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* requests were completed.
*/
execlists_cancel_port_requests(execlists);
- reset_irq(engine);
/* Push back any incomplete requests for replay after the reset. */
- spin_lock(&engine->timeline.lock);
__unwind_incomplete_requests(engine);
- spin_unlock(&engine->timeline.lock);
- local_irq_restore(flags);
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ reset_csb_pointers(&engine->execlists);
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
/*
* If the request was innocent, we leave the request in the ELSP
@@ -1888,35 +1946,52 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- regs = to_intel_context(request->ctx, engine)->lrc_reg_state;
- if (engine->default_state) {
- void *defaults;
-
- defaults = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
- if (!IS_ERR(defaults)) {
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- defaults + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
- i915_gem_object_unpin_map(engine->default_state);
- }
+ regs = request->hw_context->lrc_reg_state;
+ if (engine->pinned_default_state) {
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
}
- execlists_init_reg_state(regs, request->ctx, engine, request->ring);
+ execlists_init_reg_state(regs,
+ request->gem_context, engine, request->ring);
/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
- regs[CTX_RING_HEAD + 1] = request->postfix;
- request->ring->head = request->postfix;
+ request->ring->head = intel_ring_wrap(request->ring, request->postfix);
+ regs[CTX_RING_HEAD + 1] = request->ring->head;
+
intel_ring_update_space(request->ring);
/* Reset WaIdleLiteRestore:bdw,skl as well */
unwind_wa_tail(request);
}
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ /* After a GPU reset, we may have requests to replay */
+ if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
+ tasklet_schedule(&execlists->tasklet);
+
+ /*
+ * Flush the tasklet while we still have the forcewake to be sure
+ * that it is not allowed to sleep before we restart and reload a
+ * context.
+ *
+ * As before (with execlists_reset_prepare) we rely on the caller
+ * serialising multiple attempts to reset so that we know that we
+ * are the only one manipulating tasklet state.
+ */
+ __tasklet_enable_sync_once(&execlists->tasklet);
+
+ GEM_TRACE("%s\n", engine->name);
+}
+
static int intel_logical_ring_emit_pdps(struct i915_request *rq)
{
- struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+ struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
struct intel_engine_cs *engine = rq->engine;
const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
u32 *cs;
@@ -1955,15 +2030,15 @@ static int gen8_emit_bb_start(struct i915_request *rq,
* it is unsafe in case of lite-restore (because the ctx is
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
- if (rq->ctx->ppgtt &&
- (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
- !i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
+ if (rq->gem_context->ppgtt &&
+ (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
+ !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
!intel_vgpu_active(rq->i915)) {
ret = intel_logical_ring_emit_pdps(rq);
if (ret)
return ret;
- rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
+ rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
}
cs = intel_ring_begin(rq, 6);
@@ -2217,13 +2292,15 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
kfree(engine);
}
-static void execlists_set_default_submission(struct intel_engine_cs *engine)
+void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->cancel_requests = execlists_cancel_requests;
engine->schedule = execlists_schedule;
engine->execlists.tasklet.func = execlists_submission_tasklet;
+ engine->reset.prepare = execlists_reset_prepare;
+
engine->park = NULL;
engine->unpark = NULL;
@@ -2243,18 +2320,19 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
engine->init_hw = gen8_init_common_ring;
- engine->reset_hw = reset_common_ring;
- engine->context_pin = execlists_context_pin;
- engine->context_unpin = execlists_context_unpin;
+ engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.reset = execlists_reset;
+ engine->reset.finish = execlists_reset_finish;
+ engine->context_pin = execlists_context_pin;
engine->request_alloc = execlists_request_alloc;
engine->emit_flush = gen8_emit_flush;
engine->emit_breadcrumb = gen8_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
- engine->set_default_submission = execlists_set_default_submission;
+ engine->set_default_submission = intel_execlists_set_default_submission;
if (INTEL_GEN(engine->i915) < 11) {
engine->irq_enable = gen8_logical_ring_enable_irq;
@@ -2294,28 +2372,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
static void
logical_ring_setup(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- enum forcewake_domains fw_domains;
-
intel_engine_setup_common(engine);
/* Intentionally left blank. */
engine->buffer = NULL;
- fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
- RING_ELSP(engine),
- FW_REG_WRITE);
-
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- RING_CONTEXT_STATUS_PTR(engine),
- FW_REG_READ | FW_REG_WRITE);
-
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- RING_CONTEXT_STATUS_BUF_BASE(engine),
- FW_REG_READ);
-
- engine->execlists.fw_domains = fw_domains;
-
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
@@ -2323,33 +2384,61 @@ logical_ring_setup(struct intel_engine_cs *engine)
logical_ring_default_irqs(engine);
}
+static bool csb_force_mmio(struct drm_i915_private *i915)
+{
+ /* Older GVT emulation depends upon intercepting CSB mmio */
+ return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915);
+}
+
static int logical_ring_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
int ret;
ret = intel_engine_init_common(engine);
if (ret)
goto error;
- if (HAS_LOGICAL_RING_ELSQ(engine->i915)) {
- engine->execlists.submit_reg = engine->i915->regs +
+ if (HAS_LOGICAL_RING_ELSQ(i915)) {
+ execlists->submit_reg = i915->regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
- engine->execlists.ctrl_reg = engine->i915->regs +
+ execlists->ctrl_reg = i915->regs +
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
} else {
- engine->execlists.submit_reg = engine->i915->regs +
+ execlists->submit_reg = i915->regs +
i915_mmio_reg_offset(RING_ELSP(engine));
}
- engine->execlists.preempt_complete_status = ~0u;
- if (engine->i915->preempt_context) {
+ execlists->preempt_complete_status = ~0u;
+ if (i915->preempt_context) {
struct intel_context *ce =
- to_intel_context(engine->i915->preempt_context, engine);
+ to_intel_context(i915->preempt_context, engine);
- engine->execlists.preempt_complete_status =
+ execlists->preempt_complete_status =
upper_32_bits(ce->lrc_desc);
}
+ execlists->csb_read =
+ i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
+ if (csb_force_mmio(i915)) {
+ execlists->csb_status = (u32 __force *)
+ (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+
+ execlists->csb_write = (u32 __force *)execlists->csb_read;
+ execlists->csb_write_reset =
+ _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK,
+ GEN8_CSB_ENTRIES - 1);
+ } else {
+ execlists->csb_status =
+ &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+
+ execlists->csb_write =
+ &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
+ execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1;
+ }
+ reset_csb_pointers(execlists);
+
return 0;
error:
@@ -2482,7 +2571,7 @@ static void execlists_init_reg_state(u32 *regs,
struct drm_i915_private *dev_priv = engine->i915;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
u32 base = engine->mmio_base;
- bool rcs = engine->id == RCS;
+ bool rcs = engine->class == RENDER_CLASS;
/* A context is actually a big batch buffer with several
* MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@@ -2550,7 +2639,7 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
- if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
+ if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
@@ -2629,10 +2718,10 @@ err_unpin_ctx:
}
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+ struct intel_engine_cs *engine,
+ struct intel_context *ce)
{
struct drm_i915_gem_object *ctx_obj;
- struct intel_context *ce = to_intel_context(ctx, engine);
struct i915_vma *vma;
uint32_t context_size;
struct intel_ring *ring;
@@ -2654,7 +2743,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
- vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 4ec7d8dd13c8..4dfb78e3ec7e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -104,11 +104,6 @@ struct i915_gem_context;
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
-static inline uint64_t
-intel_lr_context_descriptor(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- return to_intel_context(ctx, engine)->lrc_desc;
-}
+void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 8ae8f42f430a..5dae16ccd9f1 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -116,7 +116,7 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
{
- uint8_t rev;
+ u8 rev;
if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV,
&rev) != 1) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 48f618dc9abb..f9f3b0885ba5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -44,8 +44,6 @@
/* Private structure for the integrated LVDS support */
struct intel_lvds_connector {
struct intel_connector base;
-
- struct notifier_block lid_notifier;
};
struct intel_lvds_pps {
@@ -85,34 +83,35 @@ static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *conn
return container_of(connector, struct intel_lvds_connector, base.base);
}
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = I915_READ(lvds_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
+ else
+ *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
+
+ return val & LVDS_PORT_EN;
+}
+
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
-
- tmp = I915_READ(lvds_encoder->reg);
-
- if (!(tmp & LVDS_PORT_EN))
- goto out;
-
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- ret = true;
+ ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -255,14 +254,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~PORT_TRANS_SEL_MASK;
- temp |= PORT_TRANS_SEL_CPT(pipe);
+ temp &= ~LVDS_PIPE_SEL_MASK_CPT;
+ temp |= LVDS_PIPE_SEL_CPT(pipe);
} else {
- if (pipe == 1) {
- temp |= LVDS_PIPEB_SELECT;
- } else {
- temp &= ~LVDS_PIPEB_SELECT;
- }
+ temp &= ~LVDS_PIPE_SEL_MASK;
+ temp |= LVDS_PIPE_SEL(pipe);
}
/* set the corresponsding LVDS_BORDER bit */
@@ -454,26 +450,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return true;
}
-/*
- * Detect the LVDS connection.
- *
- * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
- * connected and closed means disconnected. We also send hotplug events as
- * needed, using lid status notification from the input layer.
- */
static enum drm_connector_status
intel_lvds_detect(struct drm_connector *connector, bool force)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- enum drm_connector_status status;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
-
- status = intel_panel_detect(dev_priv);
- if (status != connector_status_unknown)
- return status;
-
return connector_status_connected;
}
@@ -498,117 +477,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 1;
}
-static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
-{
- DRM_INFO("Skipping forced modeset for %s\n", id->ident);
- return 1;
-}
-
-/* The GPU hangs up on these systems if modeset is performed on LID open */
-static const struct dmi_system_id intel_no_modeset_on_lid[] = {
- {
- .callback = intel_no_modeset_on_lid_dmi_callback,
- .ident = "Toshiba Tecra A11",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
- },
- },
-
- { } /* terminating entry */
-};
-
-/*
- * Lid events. Note the use of 'modeset':
- * - we set it to MODESET_ON_LID_OPEN on lid close,
- * and set it to MODESET_DONE on open
- * - we use it as a "only once" bit (ie we ignore
- * duplicate events where it was already properly set)
- * - the suspend/resume paths will set it to
- * MODESET_SUSPENDED and ignore the lid open event,
- * because they restore the mode ("lid open").
- */
-static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
- void *unused)
-{
- struct intel_lvds_connector *lvds_connector =
- container_of(nb, struct intel_lvds_connector, lid_notifier);
- struct drm_connector *connector = &lvds_connector->base.base;
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
- return NOTIFY_OK;
-
- mutex_lock(&dev_priv->modeset_restore_lock);
- if (dev_priv->modeset_restore == MODESET_SUSPENDED)
- goto exit;
- /*
- * check and update the status of LVDS connector after receiving
- * the LID nofication event.
- */
- connector->status = connector->funcs->detect(connector, false);
-
- /* Don't force modeset on machines where it causes a GPU lockup */
- if (dmi_check_system(intel_no_modeset_on_lid))
- goto exit;
- if (!acpi_lid_open()) {
- /* do modeset on next lid open event */
- dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
- goto exit;
- }
-
- if (dev_priv->modeset_restore == MODESET_DONE)
- goto exit;
-
- /*
- * Some old platform's BIOS love to wreak havoc while the lid is closed.
- * We try to detect this here and undo any damage. The split for PCH
- * platforms is rather conservative and a bit arbitrary expect that on
- * those platforms VGA disabling requires actual legacy VGA I/O access,
- * and as part of the cleanup in the hw state restore we also redisable
- * the vga plane.
- */
- if (!HAS_PCH_SPLIT(dev_priv))
- intel_display_resume(dev);
-
- dev_priv->modeset_restore = MODESET_DONE;
-
-exit:
- mutex_unlock(&dev_priv->modeset_restore_lock);
- return NOTIFY_OK;
-}
-
-static int
-intel_lvds_connector_register(struct drm_connector *connector)
-{
- struct intel_lvds_connector *lvds = to_lvds_connector(connector);
- int ret;
-
- ret = intel_connector_register(connector);
- if (ret)
- return ret;
-
- lvds->lid_notifier.notifier_call = intel_lid_notify;
- if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
- DRM_DEBUG_KMS("lid notifier registration failed\n");
- lvds->lid_notifier.notifier_call = NULL;
- }
-
- return 0;
-}
-
-static void
-intel_lvds_connector_unregister(struct drm_connector *connector)
-{
- struct intel_lvds_connector *lvds = to_lvds_connector(connector);
-
- if (lvds->lid_notifier.notifier_call)
- acpi_lid_notifier_unregister(&lvds->lid_notifier);
-
- intel_connector_unregister(connector);
-}
-
/**
* intel_lvds_destroy - unregister and free LVDS structures
* @connector: connector to free
@@ -641,8 +509,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_lvds_connector_register,
- .early_unregister = intel_lvds_connector_unregister,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -948,7 +816,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* register is uninitialized.
*/
val = I915_READ(lvds_encoder->reg);
- if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+ if (HAS_PCH_CPT(dev_priv))
+ val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
+ else
+ val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
+ if (val == 0)
val = dev_priv->vbt.bios_lvds_val;
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
@@ -1003,8 +875,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
return;
/* Skip init on machines we know falsely report LVDS */
- if (dmi_check_system(intel_no_lvds))
+ if (dmi_check_system(intel_no_lvds)) {
+ WARN(!dev_priv->vbt.int_lvds_support,
+ "Useless DMI match. Internal LVDS support disabled by VBT\n");
+ return;
+ }
+
+ if (!dev_priv->vbt.int_lvds_support) {
+ DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
return;
+ }
if (HAS_PCH_SPLIT(dev_priv))
lvds_reg = PCH_LVDS;
@@ -1016,10 +896,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
- if (dev_priv->vbt.edp.support) {
- DRM_DEBUG_KMS("disable LVDS for eDP support\n");
- return;
- }
}
pin = GMBUS_PIN_PANEL;
@@ -1108,8 +984,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
* 2) check for VBT data
* 3) check to see if LVDS is already on
* if none of the above, no panel
- * 4) make sure lid is open
- * if closed, act like it's not there for now
*/
/*
@@ -1125,7 +999,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_gmbus_get_adapter(dev_priv, pin));
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
edid);
} else {
kfree(edid);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index b39846613e3c..ca44bf368e24 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -40,7 +40,7 @@ int intel_connector_update_modes(struct drm_connector *connector,
{
int ret;
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index c58e5f53bab0..e034b4166d32 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -608,16 +608,16 @@ void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
-static struct intel_opregion *system_opregion;
-
+/*
+ * The only video events relevant to opregion are 0x80. These indicate either a
+ * docking event, lid switch or display switch request. In Linux, these are
+ * handled by the dock, button and video drivers.
+ */
static int intel_opregion_video_event(struct notifier_block *nb,
unsigned long val, void *data)
{
- /* The only video events relevant to opregion are 0x80. These indicate
- either a docking event, lid switch or display switch request. In
- Linux, these are handled by the dock, button and video drivers.
- */
-
+ struct intel_opregion *opregion = container_of(nb, struct intel_opregion,
+ acpi_notifier);
struct acpi_bus_event *event = data;
struct opregion_acpi *acpi;
int ret = NOTIFY_OK;
@@ -625,10 +625,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
return NOTIFY_DONE;
- if (!system_opregion)
- return NOTIFY_DONE;
-
- acpi = system_opregion->acpi;
+ acpi = opregion->acpi;
if (event->type == 0x80 && ((acpi->cevt & 1) == 0))
ret = NOTIFY_BAD;
@@ -638,10 +635,6 @@ static int intel_opregion_video_event(struct notifier_block *nb,
return ret;
}
-static struct notifier_block intel_opregion_notifier = {
- .notifier_call = intel_opregion_video_event,
-};
-
/*
* Initialise the DIDL field in opregion. This passes a list of devices to
* the firmware. Values are defined by section B.4.2 of the ACPI specification
@@ -797,8 +790,8 @@ void intel_opregion_register(struct drm_i915_private *dev_priv)
opregion->acpi->csts = 0;
opregion->acpi->drdy = 1;
- system_opregion = opregion;
- register_acpi_notifier(&intel_opregion_notifier);
+ opregion->acpi_notifier.notifier_call = intel_opregion_video_event;
+ register_acpi_notifier(&opregion->acpi_notifier);
}
if (opregion->asle) {
@@ -822,8 +815,8 @@ void intel_opregion_unregister(struct drm_i915_private *dev_priv)
if (opregion->acpi) {
opregion->acpi->drdy = 0;
- system_opregion = NULL;
- unregister_acpi_notifier(&intel_opregion_notifier);
+ unregister_acpi_notifier(&opregion->acpi_notifier);
+ opregion->acpi_notifier.notifier_call = NULL;
}
/* just clear all opregion memory pointers now */
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index e0e437ba9e51..e8498a8cda3d 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -49,6 +49,7 @@ struct intel_opregion {
u32 vbt_size;
u32 *lid_state;
struct work_struct asle_work;
+ struct notifier_block acpi_notifier;
};
#define OPREGION_SIZE (8 * 1024)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index b443278e569c..4a9f139e7b73 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -375,26 +375,6 @@ out:
pipe_config->gmch_pfit.lvds_border_bits = border;
}
-enum drm_connector_status
-intel_panel_detect(struct drm_i915_private *dev_priv)
-{
- /* Assume that the BIOS does not lie through the OpRegion... */
- if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) {
- return *dev_priv->opregion.lid_state & 0x1 ?
- connector_status_connected :
- connector_status_disconnected;
- }
-
- switch (i915_modparams.panel_ignore_lid) {
- case -2:
- return connector_status_connected;
- case -1:
- return connector_status_disconnected;
- default:
- return connector_status_unknown;
- }
-}
-
/**
* scale - scale values from one range to another
* @source_val: value in range [@source_min..@source_max]
@@ -406,11 +386,11 @@ intel_panel_detect(struct drm_i915_private *dev_priv)
* Return @source_val in range [@source_min..@source_max] scaled to range
* [@target_min..@target_max].
*/
-static uint32_t scale(uint32_t source_val,
- uint32_t source_min, uint32_t source_max,
- uint32_t target_min, uint32_t target_max)
+static u32 scale(u32 source_val,
+ u32 source_min, u32 source_max,
+ u32 target_min, u32 target_max)
{
- uint64_t target_val;
+ u64 target_val;
WARN_ON(source_min > source_max);
WARN_ON(target_min > target_max);
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 39a4e4edda07..849e1b69ba73 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -30,160 +30,6 @@
#include <linux/debugfs.h>
#include "intel_drv.h"
-struct pipe_crc_info {
- const char *name;
- struct drm_i915_private *dev_priv;
- enum pipe pipe;
-};
-
-static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
-{
- struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-
- if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
- return -ENODEV;
-
- spin_lock_irq(&pipe_crc->lock);
-
- if (pipe_crc->opened) {
- spin_unlock_irq(&pipe_crc->lock);
- return -EBUSY; /* already open */
- }
-
- pipe_crc->opened = true;
- filep->private_data = inode->i_private;
-
- spin_unlock_irq(&pipe_crc->lock);
-
- return 0;
-}
-
-static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
-{
- struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-
- spin_lock_irq(&pipe_crc->lock);
- pipe_crc->opened = false;
- spin_unlock_irq(&pipe_crc->lock);
-
- return 0;
-}
-
-/* (6 fields, 8 chars each, space separated (5) + '\n') */
-#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
-/* account for \'0' */
-#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
-
-static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
-{
- lockdep_assert_held(&pipe_crc->lock);
- return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
- INTEL_PIPE_CRC_ENTRIES_NR);
-}
-
-static ssize_t
-i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
- loff_t *pos)
-{
- struct pipe_crc_info *info = filep->private_data;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
- char buf[PIPE_CRC_BUFFER_LEN];
- int n_entries;
- ssize_t bytes_read;
-
- /*
- * Don't allow user space to provide buffers not big enough to hold
- * a line of data.
- */
- if (count < PIPE_CRC_LINE_LEN)
- return -EINVAL;
-
- if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
- return 0;
-
- /* nothing to read */
- spin_lock_irq(&pipe_crc->lock);
- while (pipe_crc_data_count(pipe_crc) == 0) {
- int ret;
-
- if (filep->f_flags & O_NONBLOCK) {
- spin_unlock_irq(&pipe_crc->lock);
- return -EAGAIN;
- }
-
- ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
- pipe_crc_data_count(pipe_crc), pipe_crc->lock);
- if (ret) {
- spin_unlock_irq(&pipe_crc->lock);
- return ret;
- }
- }
-
- /* We now have one or more entries to read */
- n_entries = count / PIPE_CRC_LINE_LEN;
-
- bytes_read = 0;
- while (n_entries > 0) {
- struct intel_pipe_crc_entry *entry =
- &pipe_crc->entries[pipe_crc->tail];
-
- if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
- INTEL_PIPE_CRC_ENTRIES_NR) < 1)
- break;
-
- BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
- pipe_crc->tail = (pipe_crc->tail + 1) &
- (INTEL_PIPE_CRC_ENTRIES_NR - 1);
-
- bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
- "%8u %8x %8x %8x %8x %8x\n",
- entry->frame, entry->crc[0],
- entry->crc[1], entry->crc[2],
- entry->crc[3], entry->crc[4]);
-
- spin_unlock_irq(&pipe_crc->lock);
-
- if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
- return -EFAULT;
-
- user_buf += PIPE_CRC_LINE_LEN;
- n_entries--;
-
- spin_lock_irq(&pipe_crc->lock);
- }
-
- spin_unlock_irq(&pipe_crc->lock);
-
- return bytes_read;
-}
-
-static const struct file_operations i915_pipe_crc_fops = {
- .owner = THIS_MODULE,
- .open = i915_pipe_crc_open,
- .read = i915_pipe_crc_read,
- .release = i915_pipe_crc_release,
-};
-
-static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
- {
- .name = "i915_pipe_A_crc",
- .pipe = PIPE_A,
- },
- {
- .name = "i915_pipe_B_crc",
- .pipe = PIPE_B,
- },
- {
- .name = "i915_pipe_C_crc",
- .pipe = PIPE_C,
- },
-};
-
static const char * const pipe_crc_sources[] = {
"none",
"plane1",
@@ -197,29 +43,6 @@ static const char * const pipe_crc_sources[] = {
"auto",
};
-static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
-{
- BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
- return pipe_crc_sources[source];
-}
-
-static int display_crc_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe)
- seq_printf(m, "%c %s\n", pipe_name(pipe),
- pipe_crc_source_name(dev_priv->pipe_crc[pipe].source));
-
- return 0;
-}
-
-static int display_crc_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, display_crc_ctl_show, inode->i_private);
-}
-
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
uint32_t *val)
{
@@ -616,177 +439,6 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
}
-static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source source)
-{
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- enum intel_display_power_domain power_domain;
- u32 val = 0; /* shut up gcc */
- int ret;
-
- if (pipe_crc->source == source)
- return 0;
-
- /* forbid changing the source without going back to 'none' */
- if (pipe_crc->source && source)
- return -EINVAL;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
- DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
- return -EIO;
- }
-
- ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val, true);
- if (ret != 0)
- goto out;
-
- /* none -> real source transition */
- if (source) {
- struct intel_pipe_crc_entry *entries;
-
- DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
- pipe_name(pipe), pipe_crc_source_name(source));
-
- entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
- sizeof(pipe_crc->entries[0]),
- GFP_KERNEL);
- if (!entries) {
- ret = -ENOMEM;
- goto out;
- }
-
- spin_lock_irq(&pipe_crc->lock);
- kfree(pipe_crc->entries);
- pipe_crc->entries = entries;
- pipe_crc->head = 0;
- pipe_crc->tail = 0;
- spin_unlock_irq(&pipe_crc->lock);
- }
-
- pipe_crc->source = source;
-
- I915_WRITE(PIPE_CRC_CTL(pipe), val);
- POSTING_READ(PIPE_CRC_CTL(pipe));
-
- /* real source -> none transition */
- if (!source) {
- struct intel_pipe_crc_entry *entries;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
-
- DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
- pipe_name(pipe));
-
- drm_modeset_lock(&crtc->base.mutex, NULL);
- if (crtc->base.state->active)
- intel_wait_for_vblank(dev_priv, pipe);
- drm_modeset_unlock(&crtc->base.mutex);
-
- spin_lock_irq(&pipe_crc->lock);
- entries = pipe_crc->entries;
- pipe_crc->entries = NULL;
- pipe_crc->head = 0;
- pipe_crc->tail = 0;
- spin_unlock_irq(&pipe_crc->lock);
-
- kfree(entries);
-
- if (IS_G4X(dev_priv))
- g4x_undo_pipe_scramble_reset(dev_priv, pipe);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_undo_pipe_scramble_reset(dev_priv, pipe);
- else if ((IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
- hsw_pipe_A_crc_wa(dev_priv, false);
- }
-
- ret = 0;
-
-out:
- intel_display_power_put(dev_priv, power_domain);
-
- return ret;
-}
-
-/*
- * Parse pipe CRC command strings:
- * command: wsp* object wsp+ name wsp+ source wsp*
- * object: 'pipe'
- * name: (A | B | C)
- * source: (none | plane1 | plane2 | pf)
- * wsp: (#0x20 | #0x9 | #0xA)+
- *
- * eg.:
- * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
- * "pipe A none" -> Stop CRC
- */
-static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
-{
- int n_words = 0;
-
- while (*buf) {
- char *end;
-
- /* skip leading white space */
- buf = skip_spaces(buf);
- if (!*buf)
- break; /* end of buffer */
-
- /* find end of word */
- for (end = buf; *end && !isspace(*end); end++)
- ;
-
- if (n_words == max_words) {
- DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
- max_words);
- return -EINVAL; /* ran out of words[] before bytes */
- }
-
- if (*end)
- *end++ = '\0';
- words[n_words++] = buf;
- buf = end;
- }
-
- return n_words;
-}
-
-enum intel_pipe_crc_object {
- PIPE_CRC_OBJECT_PIPE,
-};
-
-static const char * const pipe_crc_objects[] = {
- "pipe",
-};
-
-static int
-display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
-{
- int i;
-
- i = match_string(pipe_crc_objects, ARRAY_SIZE(pipe_crc_objects), buf);
- if (i < 0)
- return i;
-
- *o = i;
- return 0;
-}
-
-static int display_crc_ctl_parse_pipe(struct drm_i915_private *dev_priv,
- const char *buf, enum pipe *pipe)
-{
- const char name = buf[0];
-
- if (name < 'A' || name >= pipe_name(INTEL_INFO(dev_priv)->num_pipes))
- return -EINVAL;
-
- *pipe = name - 'A';
-
- return 0;
-}
-
static int
display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
{
@@ -805,81 +457,6 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
return 0;
}
-static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
- char *buf, size_t len)
-{
-#define N_WORDS 3
- int n_words;
- char *words[N_WORDS];
- enum pipe pipe;
- enum intel_pipe_crc_object object;
- enum intel_pipe_crc_source source;
-
- n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
- if (n_words != N_WORDS) {
- DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
- N_WORDS);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_object(words[0], &object) < 0) {
- DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_pipe(dev_priv, words[1], &pipe) < 0) {
- DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_source(words[2], &source) < 0) {
- DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
- return -EINVAL;
- }
-
- return pipe_crc_set_source(dev_priv, pipe, source);
-}
-
-static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- char *tmpbuf;
- int ret;
-
- if (len == 0)
- return 0;
-
- if (len > PAGE_SIZE - 1) {
- DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
- PAGE_SIZE);
- return -E2BIG;
- }
-
- tmpbuf = memdup_user_nul(ubuf, len);
- if (IS_ERR(tmpbuf))
- return PTR_ERR(tmpbuf);
-
- ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
-
- kfree(tmpbuf);
- if (ret < 0)
- return ret;
-
- *offp += len;
- return len;
-}
-
-const struct file_operations i915_display_crc_ctl_fops = {
- .owner = THIS_MODULE,
- .open = display_crc_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = display_crc_ctl_write
-};
-
void intel_display_crc_init(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -887,30 +464,8 @@ void intel_display_crc_init(struct drm_i915_private *dev_priv)
for_each_pipe(dev_priv, pipe) {
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- pipe_crc->opened = false;
spin_lock_init(&pipe_crc->lock);
- init_waitqueue_head(&pipe_crc->wq);
- }
-}
-
-int intel_pipe_crc_create(struct drm_minor *minor)
-{
- struct drm_i915_private *dev_priv = to_i915(minor->dev);
- struct dentry *ent;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
- struct pipe_crc_info *info = &i915_pipe_crc_data[i];
-
- info->dev_priv = dev_priv;
- ent = debugfs_create_file(info->name, S_IRUGO,
- minor->debugfs_root, info,
- &i915_pipe_crc_fops);
- if (!ent)
- return -ENOMEM;
}
-
- return 0;
}
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 53aaaa3e6886..43ae9de12ba3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6264,42 +6264,15 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
return limits;
}
-static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int new_power;
u32 threshold_up = 0, threshold_down = 0; /* in % */
u32 ei_up = 0, ei_down = 0;
- new_power = rps->power;
- switch (rps->power) {
- case LOW_POWER:
- if (val > rps->efficient_freq + 1 &&
- val > rps->cur_freq)
- new_power = BETWEEN;
- break;
-
- case BETWEEN:
- if (val <= rps->efficient_freq &&
- val < rps->cur_freq)
- new_power = LOW_POWER;
- else if (val >= rps->rp0_freq &&
- val > rps->cur_freq)
- new_power = HIGH_POWER;
- break;
+ lockdep_assert_held(&rps->power.mutex);
- case HIGH_POWER:
- if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
- val < rps->cur_freq)
- new_power = BETWEEN;
- break;
- }
- /* Max/min bins are special */
- if (val <= rps->min_freq_softlimit)
- new_power = LOW_POWER;
- if (val >= rps->max_freq_softlimit)
- new_power = HIGH_POWER;
- if (new_power == rps->power)
+ if (new_power == rps->power.mode)
return;
/* Note the units here are not exactly 1us, but 1280ns. */
@@ -6362,12 +6335,71 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
GEN6_RP_DOWN_IDLE_AVG);
skip_hw_write:
- rps->power = new_power;
- rps->up_threshold = threshold_up;
- rps->down_threshold = threshold_down;
+ rps->power.mode = new_power;
+ rps->power.up_threshold = threshold_up;
+ rps->power.down_threshold = threshold_down;
+}
+
+static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ int new_power;
+
+ new_power = rps->power.mode;
+ switch (rps->power.mode) {
+ case LOW_POWER:
+ if (val > rps->efficient_freq + 1 &&
+ val > rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+
+ case BETWEEN:
+ if (val <= rps->efficient_freq &&
+ val < rps->cur_freq)
+ new_power = LOW_POWER;
+ else if (val >= rps->rp0_freq &&
+ val > rps->cur_freq)
+ new_power = HIGH_POWER;
+ break;
+
+ case HIGH_POWER:
+ if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+ val < rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+ }
+ /* Max/min bins are special */
+ if (val <= rps->min_freq_softlimit)
+ new_power = LOW_POWER;
+ if (val >= rps->max_freq_softlimit)
+ new_power = HIGH_POWER;
+
+ mutex_lock(&rps->power.mutex);
+ if (rps->power.interactive)
+ new_power = HIGH_POWER;
+ rps_set_power(dev_priv, new_power);
+ mutex_unlock(&rps->power.mutex);
rps->last_adj = 0;
}
+void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
+{
+ struct intel_rps *rps = &i915->gt_pm.rps;
+
+ if (INTEL_GEN(i915) < 6)
+ return;
+
+ mutex_lock(&rps->power.mutex);
+ if (interactive) {
+ if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake))
+ rps_set_power(i915, HIGH_POWER);
+ } else {
+ GEM_BUG_ON(!rps->power.interactive);
+ rps->power.interactive--;
+ }
+ mutex_unlock(&rps->power.mutex);
+}
+
static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -6780,7 +6812,7 @@ static void reset_rps(struct drm_i915_private *dev_priv,
u8 freq = rps->cur_freq;
/* force a reset */
- rps->power = -1;
+ rps->power.mode = -1;
rps->cur_freq = -1;
if (set(dev_priv, freq))
@@ -7347,11 +7379,11 @@ out:
static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
{
- if (WARN_ON(!dev_priv->vlv_pctx))
- return;
+ struct drm_i915_gem_object *pctx;
- i915_gem_object_put(dev_priv->vlv_pctx);
- dev_priv->vlv_pctx = NULL;
+ pctx = fetch_and_zero(&dev_priv->vlv_pctx);
+ if (pctx)
+ i915_gem_object_put(pctx);
}
static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
@@ -9604,6 +9636,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
mutex_init(&dev_priv->pcu_lock);
+ mutex_init(&dev_priv->gt_pm.rps.power.mutex);
atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index db27f2faa1de..4bd5768731ee 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -56,51 +56,10 @@
#include "intel_drv.h"
#include "i915_drv.h"
-static inline enum intel_display_power_domain
-psr_aux_domain(struct intel_dp *intel_dp)
-{
- /* CNL HW requires corresponding AUX IOs to be powered up for PSR.
- * However, for non-A AUX ports the corresponding non-EDP transcoders
- * would have already enabled power well 2 and DC_OFF. This means we can
- * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
- * specific AUX_IO reference without powering up any extra wells.
- * Note that PSR is enabled only on Port A even though this function
- * returns the correct domain for other ports too.
- */
- return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
- intel_dp->aux_power_domain;
-}
-
-static void psr_aux_io_power_get(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
-
- if (INTEL_GEN(dev_priv) < 10)
- return;
-
- intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
-}
-
-static void psr_aux_io_power_put(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
-
- if (INTEL_GEN(dev_priv) < 10)
- return;
-
- intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
-}
-
void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
{
u32 debug_mask, mask;
- /* No PSR interrupts on VLV/CHV */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return;
-
mask = EDP_PSR_ERROR(TRANSCODER_EDP);
debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
@@ -201,15 +160,6 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
}
}
-static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp)
-{
- uint8_t psr_caps = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
- return false;
- return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
-}
-
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
uint8_t dprx = 0;
@@ -232,13 +182,13 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
- u8 val = 0;
+ u8 val = 8; /* assume the worst if we can't read the value */
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
else
- DRM_ERROR("Unable to get sink synchronization latency\n");
+ DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
return val;
}
@@ -250,13 +200,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
- if (intel_dp->psr_dpcd[0]) {
- dev_priv->psr.sink_support = true;
- DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+ if (!intel_dp->psr_dpcd[0])
+ return;
+ DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
+ intel_dp->psr_dpcd[0]);
+
+ if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
+ DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
+ return;
}
+ dev_priv->psr.sink_support = true;
+ dev_priv->psr.sink_sync_latency =
+ intel_dp_get_sink_sync_latency(intel_dp);
if (INTEL_GEN(dev_priv) >= 9 &&
(intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+ bool y_req = intel_dp->psr_dpcd[1] &
+ DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+ bool alpm = intel_dp_get_alpm_status(intel_dp);
+
/*
* All panels that supports PSR version 03h (PSR2 +
* Y-coordinate) can handle Y-coordinates in VSC but we are
@@ -268,49 +230,19 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
* Y-coordinate requirement panels we would need to enable
* GTC first.
*/
- dev_priv->psr.sink_psr2_support =
- intel_dp_get_y_coord_required(intel_dp);
- DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support
- ? "supported" : "not supported");
+ dev_priv->psr.sink_psr2_support = y_req && alpm;
+ DRM_DEBUG_KMS("PSR2 %ssupported\n",
+ dev_priv->psr.sink_psr2_support ? "" : "not ");
if (dev_priv->psr.sink_psr2_support) {
dev_priv->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
- dev_priv->psr.alpm =
- intel_dp_get_alpm_status(intel_dp);
- dev_priv->psr.sink_sync_latency =
- intel_dp_get_sink_sync_latency(intel_dp);
}
}
}
-static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t val;
-
- val = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
-}
-
-static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- uint32_t val;
-
- /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
- val = I915_READ(VLV_VSCSDP(crtc->pipe));
- val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
- val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
- I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
-}
-
-static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
@@ -341,12 +273,6 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
}
-static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
-{
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-}
-
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -373,7 +299,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
/* Start with bits set for DDI_AUX_CTL register */
- aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
+ aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
aux_clock_divider);
/* Select only valid bits for SRD_AUX_CTL */
@@ -381,7 +307,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
}
-static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
+static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
@@ -389,95 +315,64 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
u8 dpcd_val = DP_PSR_ENABLE;
/* Enable ALPM at sink for psr2 */
- if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm)
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_RECEIVER_ALPM_CONFIG,
- DP_ALPM_ENABLE);
-
- if (dev_priv->psr.psr2_enabled)
+ if (dev_priv->psr.psr2_enabled) {
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
+ DP_ALPM_ENABLE);
dpcd_val |= DP_PSR_ENABLE_PSR2;
+ }
+
if (dev_priv->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+ if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
+ dpcd_val |= DP_PSR_CRC_VERIFICATION;
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
-static void vlv_psr_enable_source(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
- /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
- I915_WRITE(VLV_PSRCTL(crtc->pipe),
- VLV_EDP_PSR_MODE_SW_TIMER |
- VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
- VLV_EDP_PSR_ENABLE);
-}
-
-static void vlv_psr_activate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
- /*
- * Let's do the transition from PSR_state 1 (inactive) to
- * PSR_state 2 (transition to active - static frame transmission).
- * Then Hardware is responsible for the transition to
- * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
- */
- I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
- VLV_EDP_PSR_ACTIVE_ENTRY);
-}
-
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
- uint32_t max_sleep_time = 0x1f;
- /*
- * Let's respect VBT in case VBT asks a higher idle_frame value.
- * Let's use 6 as the minimum to cover all known cases including
- * the off-by-one issue that HW has in some cases. Also there are
- * cases where sink should be able to train
- * with the 5 or 6 idle patterns.
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
*/
- uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- uint32_t val = EDP_PSR_ENABLE;
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+ /* sink_sync_latency of 8 means source has to wait for more than 8
+ * frames, we'll go with 9 frames for now
+ */
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+ val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (dev_priv->psr.link_standby)
val |= EDP_PSR_LINK_STANDBY;
- if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
- val |= EDP_PSR_TP1_TIME_2500us;
- else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
- val |= EDP_PSR_TP1_TIME_500us;
- else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
+ if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
+ val |= EDP_PSR_TP1_TIME_0us;
+ else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
val |= EDP_PSR_TP1_TIME_100us;
+ else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
+ val |= EDP_PSR_TP1_TIME_500us;
else
- val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_2500us;
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
- val |= EDP_PSR_TP2_TP3_TIME_2500us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
- val |= EDP_PSR_TP2_TP3_TIME_500us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
+ if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR_TP2_TP3_TIME_100us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ val |= EDP_PSR_TP2_TP3_TIME_500us;
else
- val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP2_TP3_TIME_2500us;
if (intel_dp_source_supports_hbr2(intel_dp) &&
drm_dp_tps3_supported(intel_dp->dpcd))
@@ -485,6 +380,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP1_TP2_SEL;
+ if (INTEL_GEN(dev_priv) >= 8)
+ val |= EDP_PSR_CRC_ENABLE;
+
val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
I915_WRITE(EDP_PSR_CTL, val);
}
@@ -494,15 +392,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- /*
- * Let's respect VBT in case VBT asks a higher idle_frame value.
- * Let's use 6 as the minimum to cover all known cases including
- * the off-by-one issue that HW has in some cases. Also there are
- * cases where sink should be able to train
- * with the 5 or 6 idle patterns.
+ u32 val;
+
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
*/
- uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
/* FIXME: selective update is probably totally broken because it doesn't
* mesh at all with our frontbuffer tracking. And the hw alone isn't
@@ -513,36 +411,19 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
- val |= EDP_PSR2_TP2_TIME_2500;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
- val |= EDP_PSR2_TP2_TIME_500;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
- val |= EDP_PSR2_TP2_TIME_100;
+ if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
+ val |= EDP_PSR2_TP2_TIME_50us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+ val |= EDP_PSR2_TP2_TIME_100us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ val |= EDP_PSR2_TP2_TIME_500us;
else
- val |= EDP_PSR2_TP2_TIME_50;
+ val |= EDP_PSR2_TP2_TIME_2500us;
I915_WRITE(EDP_PSR2_CTL, val);
}
-static void hsw_psr_activate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- /* On HSW+ after we enable PSR on source it will activate it
- * as soon as it match configure idle_frame count. So
- * we just actually enable it here on activation time.
- */
-
- /* psr1 and psr2 are mutually exclusive.*/
- if (dev_priv->psr.psr2_enabled)
- hsw_activate_psr2(intel_dp);
- else
- hsw_activate_psr1(intel_dp);
-}
-
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -602,17 +483,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
*/
- if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
+ if (dig_port->base.port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return;
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- !dev_priv->psr.link_standby) {
- DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
- return;
- }
-
if (IS_HASWELL(dev_priv) &&
I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
S3D_ENABLE) {
@@ -640,11 +515,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
- return;
- }
-
crtc_state->has_psr = true;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
@@ -656,27 +526,29 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (dev_priv->psr.psr2_enabled)
+ if (INTEL_GEN(dev_priv) >= 9)
WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- else
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
- dev_priv->psr.activate(intel_dp);
+ /* psr1 and psr2 are mutually exclusive.*/
+ if (dev_priv->psr.psr2_enabled)
+ hsw_activate_psr2(intel_dp);
+ else
+ hsw_activate_psr1(intel_dp);
+
dev_priv->psr.active = true;
}
-static void hsw_psr_enable_source(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void intel_psr_enable_source(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- psr_aux_io_power_get(intel_dp);
-
/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
* use hardcoded values PSR AUX transactions
*/
@@ -712,7 +584,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD |
EDP_PSR_DEBUG_MASK_LPSP |
- EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
+ EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP);
}
}
@@ -746,64 +619,19 @@ void intel_psr_enable(struct intel_dp *intel_dp,
dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.setup_vsc(intel_dp, crtc_state);
- dev_priv->psr.enable_sink(intel_dp);
- dev_priv->psr.enable_source(intel_dp, crtc_state);
+ intel_psr_setup_vsc(intel_dp, crtc_state);
+ intel_psr_enable_sink(intel_dp);
+ intel_psr_enable_source(intel_dp, crtc_state);
dev_priv->psr.enabled = intel_dp;
- if (INTEL_GEN(dev_priv) >= 9) {
- intel_psr_activate(intel_dp);
- } else {
- /*
- * FIXME: Activation should happen immediately since this
- * function is just called after pipe is fully trained and
- * enabled.
- * However on some platforms we face issues when first
- * activation follows a modeset so quickly.
- * - On VLV/CHV we get bank screen on first activation
- * - On HSW/BDW we get a recoverable frozen screen until
- * next exit-activate sequence.
- */
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
- }
+ intel_psr_activate(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void vlv_psr_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- uint32_t val;
-
- if (dev_priv->psr.active) {
- /* Put VLV PSR back to PSR_state 0 (disabled). */
- if (intel_wait_for_register(dev_priv,
- VLV_PSRSTAT(crtc->pipe),
- VLV_EDP_PSR_IN_TRANS,
- 0,
- 1))
- WARN(1, "PSR transition took longer than expected\n");
-
- val = I915_READ(VLV_PSRCTL(crtc->pipe));
- val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
- val &= ~VLV_EDP_PSR_ENABLE;
- val &= ~VLV_EDP_PSR_MODE_MASK;
- I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
-
- dev_priv->psr.active = false;
- } else {
- WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
- }
-}
-
-static void hsw_psr_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
+static void
+intel_psr_disable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -842,8 +670,25 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
else
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
}
+}
+
+static void intel_psr_disable_locked(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ lockdep_assert_held(&dev_priv->psr.lock);
+
+ if (!dev_priv->psr.enabled)
+ return;
+
+ intel_psr_disable_source(intel_dp);
- psr_aux_io_power_put(intel_dp);
+ /* Disable PSR on Sink */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+
+ dev_priv->psr.enabled = NULL;
}
/**
@@ -867,23 +712,49 @@ void intel_psr_disable(struct intel_dp *intel_dp,
return;
mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
+ intel_psr_disable_locked(intel_dp);
+ mutex_unlock(&dev_priv->psr.lock);
+ cancel_work_sync(&dev_priv->psr.work);
+}
- dev_priv->psr.disable_source(intel_dp, old_crtc_state);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg;
+ u32 mask;
- /* Disable PSR on Sink */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+ if (!new_crtc_state->has_psr)
+ return 0;
- dev_priv->psr.enabled = NULL;
- mutex_unlock(&dev_priv->psr.lock);
+ /*
+ * The sole user right now is intel_pipe_update_start(),
+ * which won't race with psr_enable/disable, which is
+ * where psr2_enabled is written to. So, we don't need
+ * to acquire the psr.lock. More importantly, we want the
+ * latency inside intel_pipe_update_start() to be as low
+ * as possible, so no need to acquire psr.lock when it is
+ * not needed and will induce latencies in the atomic
+ * update path.
+ */
+ if (dev_priv->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
+ } else {
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
+ }
- cancel_delayed_work_sync(&dev_priv->psr.work);
+ /*
+ * Max time for PSR to idle = Inverse of the refresh rate +
+ * 6 ms of exit training time + 1.5 ms of aux channel
+ * handshake. 50 msec is defesive enough to cover everything.
+ */
+ return intel_wait_for_register(dev_priv, reg, mask,
+ EDP_PSR_STATUS_STATE_IDLE, 50);
}
-static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
+static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
{
struct intel_dp *intel_dp;
i915_reg_t reg;
@@ -894,21 +765,12 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
if (!intel_dp)
return false;
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS;
- mask = EDP_PSR2_STATUS_STATE_MASK;
- } else {
- reg = EDP_PSR_STATUS;
- mask = EDP_PSR_STATUS_STATE_MASK;
- }
+ if (dev_priv->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- struct drm_crtc *crtc =
- dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
- reg = VLV_PSRSTAT(pipe);
- mask = VLV_EDP_PSR_IN_TRANS;
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
}
mutex_unlock(&dev_priv->psr.lock);
@@ -925,17 +787,20 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
static void intel_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work.work);
+ container_of(work, typeof(*dev_priv), psr.work);
mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled)
+ goto unlock;
+
/*
* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (!psr_wait_for_idle(dev_priv))
+ if (!__psr_wait_for_idle_locked(dev_priv))
goto unlock;
/*
@@ -943,7 +808,7 @@ static void intel_psr_work(struct work_struct *work)
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
- if (dev_priv->psr.busy_frontbuffer_bits)
+ if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
goto unlock;
intel_psr_activate(dev_priv->psr.enabled);
@@ -953,103 +818,24 @@ unlock:
static void intel_psr_exit(struct drm_i915_private *dev_priv)
{
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
- struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
u32 val;
if (!dev_priv->psr.active)
return;
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
- WARN_ON(!(val & EDP_PSR2_ENABLE));
- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
- } else {
- val = I915_READ(EDP_PSR_CTL);
- WARN_ON(!(val & EDP_PSR_ENABLE));
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
- }
+ if (dev_priv->psr.psr2_enabled) {
+ val = I915_READ(EDP_PSR2_CTL);
+ WARN_ON(!(val & EDP_PSR2_ENABLE));
+ I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
} else {
- val = I915_READ(VLV_PSRCTL(pipe));
-
- /*
- * Here we do the transition drirectly from
- * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
- * PSR_state 5 (exit).
- * PSR State 4 (active with single frame update) can be skipped.
- * On PSR_state 5 (exit) Hardware is responsible to transition
- * back to PSR_state 1 (inactive).
- * Now we are at Same state after vlv_psr_enable_source.
- */
- val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
- I915_WRITE(VLV_PSRCTL(pipe), val);
-
- /*
- * Send AUX wake up - Spec says after transitioning to PSR
- * active we have to send AUX wake up by writing 01h in DPCD
- * 600h of sink device.
- * XXX: This might slow down the transition, but without this
- * HW doesn't complete the transition to PSR_state 1 and we
- * never get the screen updated.
- */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
- DP_SET_POWER_D0);
+ val = I915_READ(EDP_PSR_CTL);
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+ I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
}
-
dev_priv->psr.active = false;
}
/**
- * intel_psr_single_frame_update - Single Frame Update
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * Some platforms support a single frame update feature that is used to
- * send and update only one frame on Remote Frame Buffer.
- * So far it is only implemented for Valleyview and Cherryview because
- * hardware requires this to be done before a page flip.
- */
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits)
-{
- struct drm_crtc *crtc;
- enum pipe pipe;
- u32 val;
-
- if (!CAN_PSR(dev_priv))
- return;
-
- /*
- * Single frame update is already supported on BDW+ but it requires
- * many W/A and it isn't really needed.
- */
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
- return;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
- val = I915_READ(VLV_PSRCTL(pipe));
-
- /*
- * We need to set this bit before writing registers for a flip.
- * This bit will be self-clear when it gets to the PSR active state.
- */
- I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
- }
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-/**
* intel_psr_invalidate - Invalidade PSR
* @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
@@ -1071,7 +857,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
- if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP)
return;
mutex_lock(&dev_priv->psr.lock);
@@ -1114,7 +900,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
- if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP)
return;
mutex_lock(&dev_priv->psr.lock);
@@ -1131,8 +917,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
/* By definition flush = invalidate + flush */
if (frontbuffer_bits) {
- if (dev_priv->psr.psr2_enabled ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (dev_priv->psr.psr2_enabled) {
intel_psr_exit(dev_priv);
} else {
/*
@@ -1149,9 +934,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
}
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
- if (!work_busy(&dev_priv->psr.work.work))
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(100));
+ schedule_work(&dev_priv->psr.work);
mutex_unlock(&dev_priv->psr.lock);
}
@@ -1184,38 +967,64 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- /* On VLV and CHV only standby mode is supported. */
- dev_priv->psr.link_standby = true;
else
/* For new platforms let's respect VBT back again */
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
- /* Override link_standby x link_off defaults */
- if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
- DRM_DEBUG_KMS("PSR: Forcing link standby\n");
- dev_priv->psr.link_standby = true;
- }
- if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
- DRM_DEBUG_KMS("PSR: Forcing main link off\n");
- dev_priv->psr.link_standby = false;
+ INIT_WORK(&dev_priv->psr.work, intel_psr_work);
+ mutex_init(&dev_priv->psr.lock);
+}
+
+void intel_psr_short_pulse(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_psr *psr = &dev_priv->psr;
+ u8 val;
+ const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
+ DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
+ DP_PSR_LINK_CRC_ERROR;
+
+ if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ return;
+
+ mutex_lock(&psr->lock);
+
+ if (psr->enabled != intel_dp)
+ goto exit;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
+ DRM_ERROR("PSR_STATUS dpcd read failed\n");
+ goto exit;
}
- INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
- mutex_init(&dev_priv->psr.lock);
+ if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
+ DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ intel_psr_disable_locked(intel_dp);
+ }
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->psr.enable_source = vlv_psr_enable_source;
- dev_priv->psr.disable_source = vlv_psr_disable;
- dev_priv->psr.enable_sink = vlv_psr_enable_sink;
- dev_priv->psr.activate = vlv_psr_activate;
- dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
- } else {
- dev_priv->psr.has_hw_tracking = true;
- dev_priv->psr.enable_source = hsw_psr_enable_source;
- dev_priv->psr.disable_source = hsw_psr_disable;
- dev_priv->psr.enable_sink = hsw_psr_enable_sink;
- dev_priv->psr.activate = hsw_psr_activate;
- dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
+ DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
+ goto exit;
}
+
+ if (val & DP_PSR_RFB_STORAGE_ERROR)
+ DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
+ if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
+ DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
+ if (val & DP_PSR_LINK_CRC_ERROR)
+ DRM_ERROR("PSR Link CRC error, disabling PSR\n");
+
+ if (val & ~errors)
+ DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
+ val & ~errors);
+ if (val & errors)
+ intel_psr_disable_locked(intel_dp);
+ /* clear status register */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
+
+ /* TODO: handle PSR2 errors */
+exit:
+ mutex_unlock(&psr->lock);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8f19349a6055..6a8f27d0a742 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -387,8 +387,18 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
mmio = RING_HWS_PGA(engine->mmio_base);
}
- if (INTEL_GEN(dev_priv) >= 6)
- I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
+ if (INTEL_GEN(dev_priv) >= 6) {
+ u32 mask = ~0u;
+
+ /*
+ * Keep the render interrupt unmasked as this papers over
+ * lost interrupts following a reset.
+ */
+ if (engine->id == RCS)
+ mask &= ~BIT(0);
+
+ I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
+ }
I915_WRITE(mmio, engine->status_page.ggtt_offset);
POSTING_READ(mmio);
@@ -496,6 +506,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
engine->name, I915_READ_HEAD(engine));
+ /* Check that the ring offsets point within the ring! */
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
+
intel_ring_update_space(ring);
I915_WRITE_HEAD(engine, ring->head);
I915_WRITE_TAIL(engine, ring->tail);
@@ -520,8 +534,6 @@ static int init_ring_common(struct intel_engine_cs *engine)
goto out;
}
- intel_engine_init_hangcheck(engine);
-
if (INTEL_GEN(dev_priv) > 2)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
@@ -531,16 +543,33 @@ out:
return ret;
}
-static void reset_ring_common(struct intel_engine_cs *engine,
- struct i915_request *request)
+static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
{
- /*
- * RC6 must be prevented until the reset is complete and the engine
- * reinitialised. If it occurs in the middle of this sequence, the
- * state written to/loaded from the power context is ill-defined (e.g.
- * the PP_BASE_DIR may be lost).
- */
- assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);
+ intel_engine_stop_cs(engine);
+
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ return i915_gem_find_active_request(engine);
+}
+
+static void skip_request(struct i915_request *rq)
+{
+ void *vaddr = rq->ring->vaddr;
+ u32 head;
+
+ head = rq->infix;
+ if (rq->postfix < head) {
+ memset32(vaddr + head, MI_NOOP,
+ (rq->ring->size - head) / sizeof(u32));
+ head = 0;
+ }
+ memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32));
+}
+
+static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+ GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0);
/*
* Try to restore the logical GPU state to match the continuation
@@ -556,47 +585,18 @@ static void reset_ring_common(struct intel_engine_cs *engine,
* If the request was innocent, we try to replay the request with
* the restored context.
*/
- if (request) {
- struct drm_i915_private *dev_priv = request->i915;
- struct intel_context *ce = to_intel_context(request->ctx,
- engine);
- struct i915_hw_ppgtt *ppgtt;
-
- if (ce->state) {
- I915_WRITE(CCID,
- i915_ggtt_offset(ce->state) |
- BIT(8) /* must be set! */ |
- CCID_EXTENDED_STATE_SAVE |
- CCID_EXTENDED_STATE_RESTORE |
- CCID_EN);
- }
-
- ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
- if (ppgtt) {
- u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
-
- I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
-
- /* Wait for the PD reload to complete */
- if (intel_wait_for_register(dev_priv,
- RING_PP_DIR_BASE(engine),
- BIT(0), 0,
- 10))
- DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
-
- ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
- }
-
+ if (rq) {
/* If the rq hung, jump to its breadcrumb and skip the batch */
- if (request->fence.error == -EIO)
- request->ring->head = request->postfix;
- } else {
- engine->legacy_active_context = NULL;
- engine->legacy_active_ppgtt = NULL;
+ rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
+ if (rq->fence.error == -EIO)
+ skip_request(rq);
}
}
+static void reset_finish(struct intel_engine_cs *engine)
+{
+}
+
static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
@@ -1033,6 +1033,8 @@ int intel_ring_pin(struct intel_ring *ring,
flags |= PIN_OFFSET_BIAS | offset_bias;
if (vma->obj->stolen)
flags |= PIN_MAPPABLE;
+ else
+ flags |= PIN_HIGH;
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
@@ -1066,6 +1068,8 @@ err:
void intel_ring_reset(struct intel_ring *ring, u32 tail)
{
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
+
ring->tail = tail;
ring->head = tail;
ring->emit = tail;
@@ -1093,6 +1097,7 @@ void intel_ring_unpin(struct intel_ring *ring)
static struct i915_vma *
intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
{
+ struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -1102,10 +1107,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
if (IS_ERR(obj))
return ERR_CAST(obj);
- /* mark ring buffers as read-only from GPU side by default */
- obj->gt_ro = 1;
+ /*
+ * Mark ring buffers as read-only from GPU side (so no stray overwrites)
+ * if supported by the platform's GGTT.
+ */
+ if (vm->has_read_only)
+ i915_gem_object_set_readonly(obj);
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
goto err;
@@ -1169,10 +1178,46 @@ intel_ring_free(struct intel_ring *ring)
kfree(ring);
}
-static int context_pin(struct intel_context *ce)
+static void intel_ring_context_destroy(struct intel_context *ce)
{
- struct i915_vma *vma = ce->state;
- int ret;
+ GEM_BUG_ON(ce->pin_count);
+
+ if (!ce->state)
+ return;
+
+ GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
+ i915_gem_object_put(ce->state->obj);
+}
+
+static int __context_pin_ppgtt(struct i915_gem_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+ int err = 0;
+
+ ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+ if (ppgtt)
+ err = gen6_ppgtt_pin(ppgtt);
+
+ return err;
+}
+
+static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+
+ ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+ if (ppgtt)
+ gen6_ppgtt_unpin(ppgtt);
+}
+
+static int __context_pin(struct intel_context *ce)
+{
+ struct i915_vma *vma;
+ int err;
+
+ vma = ce->state;
+ if (!vma)
+ return 0;
/*
* Clear this page out of any CPU caches for coherent swap-in/out.
@@ -1180,13 +1225,43 @@ static int context_pin(struct intel_context *ce)
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- if (ret)
- return ret;
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ if (err)
+ return err;
}
- return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
- PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
+ PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ return err;
+
+ /*
+ * And mark is as a globally pinned object to let the shrinker know
+ * it cannot reclaim the object until we release it.
+ */
+ vma->obj->pin_global++;
+
+ return 0;
+}
+
+static void __context_unpin(struct intel_context *ce)
+{
+ struct i915_vma *vma;
+
+ vma = ce->state;
+ if (!vma)
+ return;
+
+ vma->obj->pin_global--;
+ i915_vma_unpin(vma);
+}
+
+static void intel_ring_context_unpin(struct intel_context *ce)
+{
+ __context_unpin_ppgtt(ce->gem_context);
+ __context_unpin(ce);
+
+ i915_gem_context_put(ce->gem_context);
}
static struct i915_vma *
@@ -1243,7 +1318,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
}
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -1258,81 +1333,79 @@ err_obj:
return ERR_PTR(err);
}
-static struct intel_ring *
-intel_ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_context *
+__ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx,
+ struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
- int ret;
-
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
- if (likely(ce->pin_count++))
- goto out;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+ int err;
if (!ce->state && engine->context_size) {
struct i915_vma *vma;
vma = alloc_context_vma(engine);
if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
+ err = PTR_ERR(vma);
goto err;
}
ce->state = vma;
}
- if (ce->state) {
- ret = context_pin(ce);
- if (ret)
- goto err;
+ err = __context_pin(ce);
+ if (err)
+ goto err;
- ce->state->obj->pin_global++;
- }
+ err = __context_pin_ppgtt(ce->gem_context);
+ if (err)
+ goto err_unpin;
i915_gem_context_get(ctx);
-out:
/* One ringbuffer to rule them all */
- return engine->buffer;
+ GEM_BUG_ON(!engine->buffer);
+ ce->ring = engine->buffer;
+ return ce;
+
+err_unpin:
+ __context_unpin(ce);
err:
ce->pin_count = 0;
- return ERR_PTR(ret);
+ return ERR_PTR(err);
}
-static void intel_ring_context_unpin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static const struct intel_context_ops ring_context_ops = {
+ .unpin = intel_ring_context_unpin,
+ .destroy = intel_ring_context_destroy,
+};
+
+static struct intel_context *
+intel_ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- GEM_BUG_ON(ce->pin_count == 0);
- if (--ce->pin_count)
- return;
+ if (likely(ce->pin_count++))
+ return ce;
+ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
- if (ce->state) {
- ce->state->obj->pin_global--;
- i915_vma_unpin(ce->state);
- }
+ ce->ops = &ring_context_ops;
- i915_gem_context_put(ctx);
+ return __ring_context_pin(engine, ctx, ce);
}
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
- struct intel_ring *ring;
struct i915_timeline *timeline;
+ struct intel_ring *ring;
+ unsigned int size;
int err;
intel_engine_setup_common(engine);
- err = intel_engine_init_common(engine);
- if (err)
- goto err;
-
timeline = i915_timeline_create(engine->i915, engine->name);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
@@ -1354,8 +1427,23 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
+ size = PAGE_SIZE;
+ if (HAS_BROKEN_CS_TLB(engine->i915))
+ size = I830_WA_SIZE;
+ err = intel_engine_create_scratch(engine, size);
+ if (err)
+ goto err_unpin;
+
+ err = intel_engine_init_common(engine);
+ if (err)
+ goto err_scratch;
+
return 0;
+err_scratch:
+ intel_engine_cleanup_scratch(engine);
+err_unpin:
+ intel_ring_unpin(ring);
err_ring:
intel_ring_free(ring);
err:
@@ -1392,6 +1480,48 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
intel_ring_reset(engine->buffer, 0);
}
+static int load_pd_dir(struct i915_request *rq,
+ const struct i915_hw_ppgtt *ppgtt)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+ *cs++ = PP_DIR_DCLV_2G;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = ppgtt->pd.base.ggtt_offset << 10;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int flush_pd_dir(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Stall until the page table load is complete */
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
static inline int mi_set_context(struct i915_request *rq, u32 flags)
{
struct drm_i915_private *i915 = rq->i915;
@@ -1402,6 +1532,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
(HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
INTEL_INFO(i915)->num_rings - 1 :
0;
+ bool force_restore = false;
int len;
u32 *cs;
@@ -1415,6 +1546,12 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
len = 4;
if (IS_GEN7(i915))
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
+ if (flags & MI_FORCE_RESTORE) {
+ GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
+ flags &= ~MI_FORCE_RESTORE;
+ force_restore = true;
+ len += 2;
+ }
cs = intel_ring_begin(rq, len);
if (IS_ERR(cs))
@@ -1439,9 +1576,29 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
}
}
+ if (force_restore) {
+ /*
+ * The HW doesn't handle being told to restore the current
+ * context very well. Quite often it likes goes to go off and
+ * sulk, especially when it is meant to be reloading PP_DIR.
+ * A very simple fix to force the reload is to simply switch
+ * away from the current context and back again.
+ *
+ * Note that the kernel_context will contain random state
+ * following the INHIBIT_RESTORE. We accept this since we
+ * never use the kernel_context state; it is merely a
+ * placeholder we use to flush other contexts.
+ */
+ *cs++ = MI_SET_CONTEXT;
+ *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
+ engine)->state) |
+ MI_MM_SPACE_GTT |
+ MI_RESTORE_INHIBIT;
+ }
+
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags;
+ *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1509,31 +1666,28 @@ static int remap_l3(struct i915_request *rq, int slice)
static int switch_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- struct i915_gem_context *to_ctx = rq->ctx;
- struct i915_hw_ppgtt *to_mm =
- to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
- struct i915_gem_context *from_ctx = engine->legacy_active_context;
- struct i915_hw_ppgtt *from_mm = engine->legacy_active_ppgtt;
+ struct i915_gem_context *ctx = rq->gem_context;
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+ unsigned int unwind_mm = 0;
u32 hw_flags = 0;
int ret, i;
lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
- if (to_mm != from_mm ||
- (to_mm && intel_engine_flag(engine) & to_mm->pd_dirty_rings)) {
- trace_switch_mm(engine, to_ctx);
- ret = to_mm->switch_mm(to_mm, rq);
+ if (ppgtt) {
+ ret = load_pd_dir(rq, ppgtt);
if (ret)
goto err;
- to_mm->pd_dirty_rings &= ~intel_engine_flag(engine);
- engine->legacy_active_ppgtt = to_mm;
- hw_flags = MI_FORCE_RESTORE;
+ if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
+ unwind_mm = intel_engine_flag(engine);
+ ppgtt->pd_dirty_rings &= ~unwind_mm;
+ hw_flags = MI_FORCE_RESTORE;
+ }
}
- if (to_intel_context(to_ctx, engine)->state &&
- (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
+ if (rq->hw_context->state) {
GEM_BUG_ON(engine->id != RCS);
/*
@@ -1543,35 +1697,38 @@ static int switch_context(struct i915_request *rq)
* as nothing actually executes using the kernel context; it
* is purely used for flushing user contexts.
*/
- if (i915_gem_context_is_kernel(to_ctx))
+ if (i915_gem_context_is_kernel(ctx))
hw_flags = MI_RESTORE_INHIBIT;
ret = mi_set_context(rq, hw_flags);
if (ret)
goto err_mm;
+ }
- engine->legacy_active_context = to_ctx;
+ if (ppgtt) {
+ ret = flush_pd_dir(rq);
+ if (ret)
+ goto err_mm;
}
- if (to_ctx->remap_slice) {
+ if (ctx->remap_slice) {
for (i = 0; i < MAX_L3_SLICES; i++) {
- if (!(to_ctx->remap_slice & BIT(i)))
+ if (!(ctx->remap_slice & BIT(i)))
continue;
ret = remap_l3(rq, i);
if (ret)
- goto err_ctx;
+ goto err_mm;
}
- to_ctx->remap_slice = 0;
+ ctx->remap_slice = 0;
}
return 0;
-err_ctx:
- engine->legacy_active_context = from_ctx;
err_mm:
- engine->legacy_active_ppgtt = from_mm;
+ if (unwind_mm)
+ ppgtt->pd_dirty_rings |= unwind_mm;
err:
return ret;
}
@@ -1580,7 +1737,7 @@ static int ring_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count);
+ GEM_BUG_ON(!request->hw_context->pin_count);
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
@@ -2006,11 +2163,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
intel_ring_init_semaphores(dev_priv, engine);
engine->init_hw = init_ring_common;
- engine->reset_hw = reset_ring_common;
+ engine->reset.prepare = reset_prepare;
+ engine->reset.reset = reset_ring;
+ engine->reset.finish = reset_finish;
engine->context_pin = intel_ring_context_pin;
- engine->context_unpin = intel_ring_context_unpin;
-
engine->request_alloc = ring_request_alloc;
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
@@ -2074,16 +2231,6 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
if (ret)
return ret;
- if (INTEL_GEN(dev_priv) >= 6) {
- ret = intel_engine_create_scratch(engine, PAGE_SIZE);
- if (ret)
- return ret;
- } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
- ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
- if (ret)
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 010750e8ee44..f5ffa6d31e82 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -122,7 +122,8 @@ struct intel_engine_hangcheck {
int deadlock;
struct intel_instdone instdone;
struct i915_request *active_request;
- bool stalled;
+ bool stalled:1;
+ bool wedged:1;
};
struct intel_ring {
@@ -192,6 +193,11 @@ struct i915_priolist {
int priority;
};
+struct st_preempt_hang {
+ struct completion completion;
+ bool inject_hang;
+};
+
/**
* struct intel_engine_execlists - execlist submission queue and port state
*
@@ -291,32 +297,49 @@ struct intel_engine_execlists {
/**
* @queue: queue of requests, in priority lists
*/
- struct rb_root queue;
+ struct rb_root_cached queue;
/**
- * @first: leftmost level in priority @queue
+ * @csb_read: control register for Context Switch buffer
+ *
+ * Note this register is always in mmio.
*/
- struct rb_node *first;
+ u32 __iomem *csb_read;
/**
- * @fw_domains: forcewake domains for irq tasklet
+ * @csb_write: control register for Context Switch buffer
+ *
+ * Note this register may be either mmio or HWSP shadow.
*/
- unsigned int fw_domains;
+ u32 *csb_write;
/**
- * @csb_head: context status buffer head
+ * @csb_status: status array for Context Switch buffer
+ *
+ * Note these register may be either mmio or HWSP shadow.
*/
- unsigned int csb_head;
+ u32 *csb_status;
/**
- * @csb_use_mmio: access csb through mmio, instead of hwsp
+ * @preempt_complete_status: expected CSB upon completing preemption
*/
- bool csb_use_mmio;
+ u32 preempt_complete_status;
/**
- * @preempt_complete_status: expected CSB upon completing preemption
+ * @csb_write_reset: reset value for CSB write pointer
+ *
+ * As the CSB write pointer maybe either in HWSP or as a field
+ * inside an mmio register, we want to reprogram it slightly
+ * differently to avoid later confusion.
*/
- u32 preempt_complete_status;
+ u32 csb_write_reset;
+
+ /**
+ * @csb_head: context status buffer head
+ */
+ u8 csb_head;
+
+ I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
};
#define INTEL_ENGINE_CS_MAX_NAME 8
@@ -342,11 +365,10 @@ struct intel_engine_cs {
struct i915_timeline timeline;
struct drm_i915_gem_object *default_state;
+ void *pinned_default_state;
- atomic_t irq_count;
unsigned long irq_posted;
#define ENGINE_IRQ_BREADCRUMB 0
-#define ENGINE_IRQ_EXECLIST 1
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
@@ -378,6 +400,7 @@ struct intel_engine_cs {
unsigned int hangcheck_interrupts;
unsigned int irq_enabled;
+ unsigned int irq_count;
bool irq_armed : 1;
I915_SELFTEST_DECLARE(bool mock : 1);
@@ -423,18 +446,22 @@ struct intel_engine_cs {
void (*irq_disable)(struct intel_engine_cs *engine);
int (*init_hw)(struct intel_engine_cs *engine);
- void (*reset_hw)(struct intel_engine_cs *engine,
- struct i915_request *rq);
+
+ struct {
+ struct i915_request *(*prepare)(struct intel_engine_cs *engine);
+ void (*reset)(struct intel_engine_cs *engine,
+ struct i915_request *rq);
+ void (*finish)(struct intel_engine_cs *engine);
+ } reset;
void (*park)(struct intel_engine_cs *engine);
void (*unpark)(struct intel_engine_cs *engine);
void (*set_default_submission)(struct intel_engine_cs *engine);
- struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
- void (*context_unpin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
+ struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
+
int (*request_alloc)(struct i915_request *rq);
int (*init_context)(struct i915_request *rq);
@@ -550,16 +577,7 @@ struct intel_engine_cs {
* to the kernel context and trash it as the save may not happen
* before the hardware is powered down.
*/
- struct i915_gem_context *last_retired_context;
-
- /* We track the current MI_SET_CONTEXT in order to eliminate
- * redudant context switches. This presumes that requests are not
- * reordered! Or when they are the tracking is updated along with
- * the emission of individual requests into the legacy command
- * stream (ring).
- */
- struct i915_gem_context *legacy_active_context;
- struct i915_hw_ppgtt *legacy_active_ppgtt;
+ struct intel_context *last_retired_context;
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
@@ -672,6 +690,12 @@ execlists_clear_active(struct intel_engine_execlists *execlists,
__clear_bit(bit, (unsigned long *)&execlists->active);
}
+static inline void
+execlists_clear_all_active(struct intel_engine_execlists *execlists)
+{
+ execlists->active = 0;
+}
+
static inline bool
execlists_is_active(const struct intel_engine_execlists *execlists,
unsigned int bit)
@@ -809,6 +833,19 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
return pos & (ring->size - 1);
}
+static inline bool
+intel_ring_offset_valid(const struct intel_ring *ring,
+ unsigned int pos)
+{
+ if (pos & -ring->size) /* must be strictly within the ring */
+ return false;
+
+ if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
+ return false;
+
+ return true;
+}
+
static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
@@ -820,12 +857,7 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
- /* We could combine these into a single tail operation, but keeping
- * them as seperate tests will help identify the cause should one
- * ever fire.
- */
- GEM_BUG_ON(!IS_ALIGNED(tail, 8));
- GEM_BUG_ON(tail >= ring->size);
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
/*
* "Ring Buffer Use"
@@ -865,14 +897,19 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
-int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
+int intel_engine_create_scratch(struct intel_engine_cs *engine,
+ unsigned int size);
+void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
+
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
+int intel_engine_stop_cs(struct intel_engine_cs *engine);
+
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
@@ -918,11 +955,10 @@ static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
-static inline void intel_wait_init(struct intel_wait *wait,
- struct i915_request *rq)
+static inline void intel_wait_init(struct intel_wait *wait)
{
wait->tsk = current;
- wait->request = rq;
+ wait->request = NULL;
}
static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
@@ -1042,10 +1078,13 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
return cs;
}
+void intel_engines_sanitize(struct drm_i915_private *i915);
+
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
+void intel_engine_lost_context(struct intel_engine_cs *engine);
void intel_engines_park(struct drm_i915_private *i915);
void intel_engines_unpark(struct drm_i915_private *i915);
@@ -1123,4 +1162,24 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine);
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+
+static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
+{
+ if (!execlists->preempt_hang.inject_hang)
+ return false;
+
+ complete(&execlists->preempt_hang.completion);
+ return true;
+}
+
+#else
+
+static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
+{
+ return false;
+}
+
+#endif
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 53a6eaa9671a..6b5aa3b074ec 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -128,10 +128,20 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "AUX_C";
case POWER_DOMAIN_AUX_D:
return "AUX_D";
+ case POWER_DOMAIN_AUX_E:
+ return "AUX_E";
case POWER_DOMAIN_AUX_F:
return "AUX_F";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
+ case POWER_DOMAIN_AUX_TBT1:
+ return "AUX_TBT1";
+ case POWER_DOMAIN_AUX_TBT2:
+ return "AUX_TBT2";
+ case POWER_DOMAIN_AUX_TBT3:
+ return "AUX_TBT3";
+ case POWER_DOMAIN_AUX_TBT4:
+ return "AUX_TBT4";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@@ -382,7 +392,8 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
u32 val;
if (wait_fuses) {
- pg = SKL_PW_TO_PG(id);
+ pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) :
+ SKL_PW_TO_PG(id);
/*
* For PW1 we have to wait both for the PW0/PG0 fuse state
* before enabling the power well and PW1/PG1's own fuse
@@ -428,6 +439,43 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
+#define ICL_AUX_PW_TO_PORT(pw) ((pw) - ICL_DISP_PW_AUX_A)
+
+static void
+icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = power_well->id;
+ enum port port = ICL_AUX_PW_TO_PORT(id);
+ u32 val;
+
+ val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
+ I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
+
+ val = I915_READ(ICL_PORT_CL_DW12(port));
+ I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well);
+}
+
+static void
+icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = power_well->id;
+ enum port port = ICL_AUX_PW_TO_PORT(id);
+ u32 val;
+
+ val = I915_READ(ICL_PORT_CL_DW12(port));
+ I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
+
+ val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
+ I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
+ val & ~HSW_PWR_WELL_CTL_REQ(id));
+
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -1822,6 +1870,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
@@ -1894,6 +1943,105 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
+/*
+ * ICL PW_0/PG_0 domains (HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - central power except FBC
+ * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
+ * ICL PW_1/PG_1 domains (HW/DMC control):
+ * - DBUF function
+ * - PIPE_A and its planes, except VGA
+ * - transcoder EDP + PSR
+ * - transcoder DSI
+ * - DDI_A
+ * - FBC
+ */
+#define ICL_PW_4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /* VDSC/joining */
+#define ICL_PW_3_POWER_DOMAINS ( \
+ ICL_PW_4_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /*
+ * - transcoder WD
+ * - KVMR (HW control)
+ */
+#define ICL_PW_2_POWER_DOMAINS ( \
+ ICL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /*
+ * - eDP/DSI VDSC
+ * - KVMR (HW control)
+ */
+#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ ICL_PW_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define ICL_DDI_IO_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
+#define ICL_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
+#define ICL_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
+#define ICL_DDI_IO_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
+#define ICL_DDI_IO_E_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
+#define ICL_DDI_IO_F_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
+
+#define ICL_AUX_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_A))
+#define ICL_AUX_B_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B))
+#define ICL_AUX_C_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C))
+#define ICL_AUX_D_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D))
+#define ICL_AUX_E_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E))
+#define ICL_AUX_F_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F))
+#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1))
+#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2))
+#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3))
+#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -2451,6 +2599,157 @@ static struct i915_power_well cnl_power_wells[] = {
},
};
+static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_combo_phy_aux_power_well_enable,
+ .disable = icl_combo_phy_aux_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static struct i915_power_well icl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = I915_DISP_PW_ALWAYS_ON,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_1,
+ .hsw.has_fuses = true,
+ },
+ {
+ .name = "power well 2",
+ .domains = ICL_PW_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_2,
+ .hsw.has_fuses = true,
+ },
+ {
+ .name = "DC off",
+ .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = SKL_DISP_PW_DC_OFF,
+ },
+ {
+ .name = "power well 3",
+ .domains = ICL_PW_3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_A,
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_B,
+ },
+ {
+ .name = "DDI C IO",
+ .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_C,
+ },
+ {
+ .name = "DDI D IO",
+ .domains = ICL_DDI_IO_D_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_D,
+ },
+ {
+ .name = "DDI E IO",
+ .domains = ICL_DDI_IO_E_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_E,
+ },
+ {
+ .name = "DDI F IO",
+ .domains = ICL_DDI_IO_F_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_F,
+ },
+ {
+ .name = "AUX A",
+ .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = ICL_DISP_PW_AUX_A,
+ },
+ {
+ .name = "AUX B",
+ .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = ICL_DISP_PW_AUX_B,
+ },
+ {
+ .name = "AUX C",
+ .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_C,
+ },
+ {
+ .name = "AUX D",
+ .domains = ICL_AUX_D_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_D,
+ },
+ {
+ .name = "AUX E",
+ .domains = ICL_AUX_E_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_E,
+ },
+ {
+ .name = "AUX F",
+ .domains = ICL_AUX_F_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_F,
+ },
+ {
+ .name = "AUX TBT1",
+ .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_TBT1,
+ },
+ {
+ .name = "AUX TBT2",
+ .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_TBT2,
+ },
+ {
+ .name = "AUX TBT3",
+ .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_TBT3,
+ },
+ {
+ .name = "AUX TBT4",
+ .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_TBT4,
+ },
+ {
+ .name = "power well 4",
+ .domains = ICL_PW_4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ },
+};
+
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -2468,7 +2767,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) {
max_dc = 2;
mask = 0;
} else if (IS_GEN9_LP(dev_priv)) {
@@ -2556,7 +2855,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_HASWELL(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ set_power_wells(power_domains, icl_power_wells);
+ } else if (IS_HASWELL(dev_priv)) {
set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv)) {
set_power_wells(power_domains, bdw_power_wells);
@@ -2911,6 +3212,7 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
+ /* fall through */
case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
break;
@@ -3023,6 +3325,8 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
static void icl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
enum port port;
u32 val;
@@ -3051,8 +3355,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
I915_WRITE(ICL_PORT_CL_DW5(port), val);
}
- /* 4. Enable power well 1 (PG1) and aux IO power. */
- /* FIXME: ICL power wells code not here yet. */
+ /*
+ * 4. Enable Power Well 1 (PG1).
+ * The AUX IO power wells will be enabled on demand.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
/* 5. Enable CDCLK. */
icl_init_cdclk(dev_priv);
@@ -3070,6 +3380,8 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
enum port port;
u32 val;
@@ -3083,8 +3395,15 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 3. Disable CD clock */
icl_uninit_cdclk(dev_priv);
- /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
- /* FIXME: ICL power wells code not here yet. */
+ /*
+ * 4. Disable Power Well 1 (PG1).
+ * The AUX IO power wells are toggled on demand, so they are already
+ * disabled at this point.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
/* 5. Disable Comp */
for (port = PORT_A; port <= PORT_B; port++) {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 26975df4e593..812fe7b06f87 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1340,6 +1340,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
switch (crtc_state->pixel_multiplier) {
default:
WARN(1, "unknown pixel multiplier specified\n");
+ /* fall through */
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -1400,33 +1401,40 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
- if (active_outputs & intel_sdvo_connector->output_flag)
- return true;
+ return active_outputs & intel_sdvo_connector->output_flag;
+}
+
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = I915_READ(sdvo_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
+ else if (IS_CHERRYVIEW(dev_priv))
+ *pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
else
- return false;
+ *pipe = (val & SDVO_PIPE_SEL_MASK) >> SDVO_PIPE_SEL_SHIFT;
+
+ return val & SDVO_ENABLE;
}
static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u16 active_outputs = 0;
- u32 tmp;
+ bool ret;
- tmp = I915_READ(intel_sdvo->sdvo_reg);
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
- if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
- return false;
+ ret = intel_sdvo_port_enabled(dev_priv, intel_sdvo->sdvo_reg, pipe);
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- return true;
+ return ret || active_outputs;
}
static void intel_sdvo_get_config(struct intel_encoder *encoder,
@@ -1553,8 +1561,8 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- temp &= ~SDVO_PIPE_B_SELECT;
- temp |= SDVO_ENABLE;
+ temp &= ~SDVO_PIPE_SEL_MASK;
+ temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
intel_sdvo_write_sdvox(intel_sdvo, temp);
temp &= ~SDVO_ENABLE;
@@ -1903,7 +1911,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
if (edid != NULL) {
if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
edid)) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
@@ -2306,14 +2314,19 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
switch (sdvo->controlled_output) {
case SDVO_OUTPUT_LVDS1:
mask |= SDVO_OUTPUT_LVDS1;
+ /* fall through */
case SDVO_OUTPUT_LVDS0:
mask |= SDVO_OUTPUT_LVDS0;
+ /* fall through */
case SDVO_OUTPUT_TMDS1:
mask |= SDVO_OUTPUT_TMDS1;
+ /* fall through */
case SDVO_OUTPUT_TMDS0:
mask |= SDVO_OUTPUT_TMDS0;
+ /* fall through */
case SDVO_OUTPUT_RGB1:
mask |= SDVO_OUTPUT_RGB1;
+ /* fall through */
case SDVO_OUTPUT_RGB0:
mask |= SDVO_OUTPUT_RGB0;
break;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index ee23613f9fd4..f7026e887fa9 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -41,20 +41,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-bool intel_format_is_yuv(u32 format)
-{
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_NV12:
- return true;
- default:
- return false;
- }
-}
-
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
{
@@ -107,13 +93,21 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
VBLANK_EVASION_TIME_US);
max = vblank_start - 1;
- local_irq_disable();
-
if (min <= 0 || max <= 0)
- return;
+ goto irq_disable;
if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
- return;
+ goto irq_disable;
+
+ /*
+ * Wait for psr to idle out after enabling the VBL interrupts
+ * VBL interrupts will start the PSR exit and prevent a PSR
+ * re-entry as well.
+ */
+ if (intel_psr_wait_for_idle(new_crtc_state))
+ DRM_ERROR("PSR idle timed out, atomic update may fail\n");
+
+ local_irq_disable();
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
@@ -171,6 +165,10 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
trace_i915_pipe_update_vblank_evaded(crtc);
+ return;
+
+irq_disable:
+ local_irq_disable();
}
/**
@@ -284,13 +282,35 @@ skl_update_plane(struct intel_plane *plane,
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
int scaler_id = plane_state->scaler_id;
- const struct intel_scaler *scaler;
+ const struct intel_scaler *scaler =
+ &crtc_state->scaler_state.scalers[scaler_id];
+ u16 y_hphase, uv_rgb_hphase;
+ u16 y_vphase, uv_rgb_vphase;
+
+ /* TODO: handle sub-pixel coordinates */
+ if (fb->format->format == DRM_FORMAT_NV12) {
+ y_hphase = skl_scaler_calc_phase(1, false);
+ y_vphase = skl_scaler_calc_phase(1, false);
+
+ /* MPEG2 chroma siting convention */
+ uv_rgb_hphase = skl_scaler_calc_phase(2, true);
+ uv_rgb_vphase = skl_scaler_calc_phase(2, false);
+ } else {
+ /* not used */
+ y_hphase = 0;
+ y_vphase = 0;
- scaler = &crtc_state->scaler_state.scalers[scaler_id];
+ uv_rgb_hphase = skl_scaler_calc_phase(1, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+ }
I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+ I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
((crtc_w + 1) << 16)|(crtc_h + 1));
@@ -327,19 +347,21 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
bool
-skl_plane_get_hw_state(struct intel_plane *plane)
+skl_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
+ ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -380,7 +402,7 @@ chv_update_csc(const struct intel_plane_state *plane_state)
const s16 *csc = csc_matrix[plane_state->base.color_encoding];
/* Seems RGB data bypasses the CSC always */
- if (!intel_format_is_yuv(fb->format->format))
+ if (!fb->format->is_yuv)
return;
I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
@@ -415,7 +437,7 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
enum plane_id plane_id = plane->id;
int contrast, brightness, sh_scale, sh_sin, sh_cos;
- if (intel_format_is_yuv(fb->format->format) &&
+ if (fb->format->is_yuv &&
plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
/*
* Expand limited range to full range:
@@ -588,19 +610,21 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
static bool
-vlv_plane_get_hw_state(struct intel_plane *plane)
+vlv_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
+ ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -754,18 +778,20 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
static bool
-ivb_plane_get_hw_state(struct intel_plane *plane)
+ivb_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
+ ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -910,18 +936,20 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
static bool
-g4x_plane_get_hw_state(struct intel_plane *plane)
+g4x_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
+ ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -1010,7 +1038,7 @@ intel_check_sprite_plane(struct intel_plane *plane,
src->y1 = src_y << 16;
src->y2 = (src_y + src_h) << 16;
- if (intel_format_is_yuv(fb->format->format) &&
+ if (fb->format->is_yuv &&
fb->format->format != DRM_FORMAT_NV12 &&
(src_x % 2 || src_w % 2)) {
DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
@@ -1071,6 +1099,37 @@ intel_check_sprite_plane(struct intel_plane *plane,
return 0;
}
+static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+{
+ return INTEL_GEN(dev_priv) >= 9;
+}
+
+static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
+ const struct drm_intel_sprite_colorkey *set)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+
+ *key = *set;
+
+ /*
+ * We want src key enabled on the
+ * sprite and not on the primary.
+ */
+ if (plane->id == PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_SOURCE)
+ key->flags = 0;
+
+ /*
+ * On SKL+ we want dst key enabled on
+ * the primary and not on the sprite.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ key->flags = 0;
+}
+
int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1100,6 +1159,16 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
return -ENOENT;
+ /*
+ * SKL+ only plane 2 can do destination keying against plane 1.
+ * Also multiple planes can't do destination keying on the same
+ * pipe simultaneously.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 &&
+ to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(plane->dev);
@@ -1112,11 +1181,28 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
while (1) {
plane_state = drm_atomic_get_plane_state(state, plane);
ret = PTR_ERR_OR_ZERO(plane_state);
- if (!ret) {
- to_intel_plane_state(plane_state)->ckey = *set;
- ret = drm_atomic_commit(state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+
+ /*
+ * On some platforms we have to configure
+ * the dst colorkey on the primary plane.
+ */
+ if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+ struct intel_crtc *crtc =
+ intel_get_crtc_for_pipe(dev_priv,
+ to_intel_plane(plane)->pipe);
+
+ plane_state = drm_atomic_get_plane_state(state,
+ crtc->base.primary);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
}
+ if (!ret)
+ ret = drm_atomic_commit(state);
+
if (ret != -EDEADLK)
break;
@@ -1211,8 +1297,17 @@ static const uint64_t skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
-static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
+static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_YUYV:
@@ -1228,8 +1323,17 @@ static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool snb_mod_supported(uint32_t format, uint64_t modifier)
+static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@@ -1246,8 +1350,17 @@ static bool snb_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
+static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_ABGR8888:
@@ -1269,8 +1382,26 @@ static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ break;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (!plane->has_ccs)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@@ -1302,38 +1433,48 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool intel_sprite_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
- if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
- return false;
+static const struct drm_plane_funcs g4x_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = g4x_sprite_format_mod_supported,
+};
- if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
- modifier != DRM_FORMAT_MOD_LINEAR)
- return false;
+static const struct drm_plane_funcs snb_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = snb_sprite_format_mod_supported,
+};
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_mod_supported(format, modifier);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_mod_supported(format, modifier);
- else if (INTEL_GEN(dev_priv) >= 6)
- return snb_mod_supported(format, modifier);
- else
- return g4x_mod_supported(format, modifier);
-}
+static const struct drm_plane_funcs vlv_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = vlv_sprite_format_mod_supported,
+};
-static const struct drm_plane_funcs intel_sprite_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_sprite_plane_format_mod_supported,
+static const struct drm_plane_funcs skl_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = skl_plane_format_mod_supported,
};
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
@@ -1359,6 +1500,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
{
struct intel_plane *intel_plane = NULL;
struct intel_plane_state *state = NULL;
+ const struct drm_plane_funcs *plane_funcs;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
const uint64_t *modifiers;
@@ -1383,6 +1525,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->can_scale = true;
state->scaler_id = -1;
+ intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
+ PLANE_SPRITE0 + plane);
+
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
intel_plane->get_hw_state = skl_plane_get_hw_state;
@@ -1396,10 +1541,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
}
- if (skl_plane_has_ccs(dev_priv, pipe, PLANE_SPRITE0 + plane))
+ if (intel_plane->has_ccs)
modifiers = skl_plane_format_modifiers_ccs;
else
modifiers = skl_plane_format_modifiers_noccs;
+
+ plane_funcs = &skl_plane_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
@@ -1411,6 +1558,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
modifiers = i9xx_plane_format_modifiers;
+
+ plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
@@ -1427,6 +1576,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
+
+ plane_funcs = &snb_sprite_funcs;
} else {
intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
@@ -1439,9 +1590,13 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
if (IS_GEN6(dev_priv)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+
+ plane_funcs = &snb_sprite_funcs;
} else {
plane_formats = g4x_plane_formats;
num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
+
+ plane_funcs = &g4x_sprite_funcs;
}
}
@@ -1468,14 +1623,14 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 9)
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, &intel_sprite_plane_funcs,
+ possible_crtcs, plane_funcs,
plane_formats, num_plane_formats,
modifiers,
DRM_PLANE_TYPE_OVERLAY,
"plane %d%c", plane + 2, pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, &intel_sprite_plane_funcs,
+ possible_crtcs, plane_funcs,
plane_formats, num_plane_formats,
modifiers,
DRM_PLANE_TYPE_OVERLAY,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b55b5c157e38..b5b04cb892e9 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -798,16 +798,12 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
static bool
intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp = I915_READ(TV_CTL);
- if (!(tmp & TV_ENC_ENABLE))
- return false;
+ *pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
- *pipe = PORT_TO_PIPE(tmp);
-
- return true;
+ return tmp & TV_ENC_ENABLE;
}
static void
@@ -1032,8 +1028,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
break;
}
- if (intel_crtc->pipe == 1)
- tv_ctl |= TV_ENC_PIPEB_SELECT;
+ tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
tv_ctl |= tv_mode->oversample;
if (tv_mode->progressive)
@@ -1157,12 +1152,9 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
/* Poll for TV detection */
- tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+ tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- if (intel_crtc->pipe == 1)
- tv_ctl |= TV_ENC_PIPEB_SELECT;
- else
- tv_ctl &= ~TV_ENC_PIPEB_SELECT;
+ tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
@@ -1355,8 +1347,7 @@ intel_tv_get_modes(struct drm_connector *connector)
mode_ptr = drm_mode_create(connector->dev);
if (!mode_ptr)
continue;
- strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
- mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0';
+ strlcpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
mode_ptr->hdisplay = hactive_s;
mode_ptr->hsync_start = hactive_s + 1;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 1cffaf7b5dbe..7c95697e1a35 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -50,10 +50,10 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
return ret;
}
-static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
+static int __get_platform_enable_guc(struct drm_i915_private *i915)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct intel_uc_fw *guc_fw = &i915->guc.fw;
+ struct intel_uc_fw *huc_fw = &i915->huc.fw;
int enable_guc = 0;
/* Default is to enable GuC/HuC if we know their firmwares */
@@ -67,11 +67,11 @@ static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
return enable_guc;
}
-static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
+static int __get_default_guc_log_level(struct drm_i915_private *i915)
{
int guc_log_level;
- if (!HAS_GUC(dev_priv) || !intel_uc_is_using_guc())
+ if (!HAS_GUC(i915) || !intel_uc_is_using_guc())
guc_log_level = GUC_LOG_LEVEL_DISABLED;
else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -86,7 +86,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
/**
* sanitize_options_early - sanitize uC related modparam options
- * @dev_priv: device private
+ * @i915: device private
*
* In case of "enable_guc" option this function will attempt to modify
* it only if it was initially set to "auto(-1)". Default value for this
@@ -101,14 +101,14 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
* unless GuC is enabled on given platform and the driver is compiled with
* debug config when this modparam will default to "enable(1..4)".
*/
-static void sanitize_options_early(struct drm_i915_private *dev_priv)
+static void sanitize_options_early(struct drm_i915_private *i915)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct intel_uc_fw *guc_fw = &i915->guc.fw;
+ struct intel_uc_fw *huc_fw = &i915->huc.fw;
/* A negative value means "use platform default" */
if (i915_modparams.enable_guc < 0)
- i915_modparams.enable_guc = __get_platform_enable_guc(dev_priv);
+ i915_modparams.enable_guc = __get_platform_enable_guc(i915);
DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
i915_modparams.enable_guc,
@@ -119,28 +119,28 @@ static void sanitize_options_early(struct drm_i915_private *dev_priv)
if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"enable_guc", i915_modparams.enable_guc,
- !HAS_GUC(dev_priv) ? "no GuC hardware" :
- "no GuC firmware");
+ !HAS_GUC(i915) ? "no GuC hardware" :
+ "no GuC firmware");
}
/* Verify HuC firmware availability */
if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"enable_guc", i915_modparams.enable_guc,
- !HAS_HUC(dev_priv) ? "no HuC hardware" :
- "no HuC firmware");
+ !HAS_HUC(i915) ? "no HuC hardware" :
+ "no HuC firmware");
}
/* A negative value means "use platform/config default" */
if (i915_modparams.guc_log_level < 0)
i915_modparams.guc_log_level =
- __get_default_guc_log_level(dev_priv);
+ __get_default_guc_log_level(i915);
if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"guc_log_level", i915_modparams.guc_log_level,
- !HAS_GUC(dev_priv) ? "no GuC hardware" :
- "GuC not enabled");
+ !HAS_GUC(i915) ? "no GuC hardware" :
+ "GuC not enabled");
i915_modparams.guc_log_level = 0;
}
@@ -171,44 +171,30 @@ void intel_uc_init_early(struct drm_i915_private *i915)
intel_huc_init_early(huc);
sanitize_options_early(i915);
-
- if (USES_GUC(i915))
- intel_uc_fw_fetch(i915, &guc->fw);
-
- if (USES_HUC(i915))
- intel_uc_fw_fetch(i915, &huc->fw);
}
void intel_uc_cleanup_early(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
-
- if (USES_HUC(i915))
- intel_uc_fw_fini(&huc->fw);
-
- if (USES_GUC(i915))
- intel_uc_fw_fini(&guc->fw);
guc_free_load_err_log(guc);
}
/**
* intel_uc_init_mmio - setup uC MMIO access
- *
- * @dev_priv: device private
+ * @i915: device private
*
* Setup minimal state necessary for MMIO accesses later in the
* initialization sequence.
*/
-void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
+void intel_uc_init_mmio(struct drm_i915_private *i915)
{
- intel_guc_init_send_regs(&dev_priv->guc);
+ intel_guc_init_send_regs(&i915->guc);
}
static void guc_capture_load_err_log(struct intel_guc *guc)
{
- if (!guc->log.vma || !i915_modparams.guc_log_level)
+ if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
return;
if (!guc->load_err_log)
@@ -225,11 +211,11 @@ static void guc_free_load_err_log(struct intel_guc *guc)
static int guc_enable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
- gen9_enable_guc_interrupts(dev_priv);
+ gen9_enable_guc_interrupts(i915);
- if (HAS_GUC_CT(dev_priv))
+ if (HAS_GUC_CT(i915))
return intel_guc_ct_enable(&guc->ct);
guc->send = intel_guc_send_mmio;
@@ -239,60 +225,73 @@ static int guc_enable_communication(struct intel_guc *guc)
static void guc_disable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
- if (HAS_GUC_CT(dev_priv))
+ if (HAS_GUC_CT(i915))
intel_guc_ct_disable(&guc->ct);
- gen9_disable_guc_interrupts(dev_priv);
+ gen9_disable_guc_interrupts(i915);
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
}
-int intel_uc_init_misc(struct drm_i915_private *dev_priv)
+int intel_uc_init_misc(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
int ret;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
- intel_guc_init_ggtt_pin_bias(guc);
-
- ret = intel_guc_init_wq(guc);
+ ret = intel_guc_init_misc(guc);
if (ret)
return ret;
+ if (USES_HUC(i915)) {
+ ret = intel_huc_init_misc(huc);
+ if (ret)
+ goto err_guc;
+ }
+
return 0;
+
+err_guc:
+ intel_guc_fini_misc(guc);
+ return ret;
}
-void intel_uc_fini_misc(struct drm_i915_private *dev_priv)
+void intel_uc_fini_misc(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return;
- intel_guc_fini_wq(guc);
+ if (USES_HUC(i915))
+ intel_huc_fini_misc(huc);
+
+ intel_guc_fini_misc(guc);
}
-int intel_uc_init(struct drm_i915_private *dev_priv)
+int intel_uc_init(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
int ret;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
- if (!HAS_GUC(dev_priv))
+ if (!HAS_GUC(i915))
return -ENODEV;
ret = intel_guc_init(guc);
if (ret)
return ret;
- if (USES_GUC_SUBMISSION(dev_priv)) {
+ if (USES_GUC_SUBMISSION(i915)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
@@ -307,16 +306,16 @@ int intel_uc_init(struct drm_i915_private *dev_priv)
return 0;
}
-void intel_uc_fini(struct drm_i915_private *dev_priv)
+void intel_uc_fini(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GUC(i915));
- if (USES_GUC_SUBMISSION(dev_priv))
+ if (USES_GUC_SUBMISSION(i915))
intel_guc_submission_fini(guc);
intel_guc_fini(guc);
@@ -340,22 +339,22 @@ void intel_uc_sanitize(struct drm_i915_private *i915)
__intel_uc_reset_hw(i915);
}
-int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+int intel_uc_init_hw(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
- struct intel_huc *huc = &dev_priv->huc;
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
int ret, attempts;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GUC(i915));
- gen9_reset_guc_interrupts(dev_priv);
+ gen9_reset_guc_interrupts(i915);
/* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
- if (IS_GEN9(dev_priv))
+ if (IS_GEN9(i915))
attempts = 3;
else
attempts = 1;
@@ -365,11 +364,11 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
* Always reset the GuC just before (re)loading, so
* that the state and timing are fairly predictable
*/
- ret = __intel_uc_reset_hw(dev_priv);
+ ret = __intel_uc_reset_hw(i915);
if (ret)
goto err_out;
- if (USES_HUC(dev_priv)) {
+ if (USES_HUC(i915)) {
ret = intel_huc_fw_upload(huc);
if (ret)
goto err_out;
@@ -392,24 +391,24 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto err_log_capture;
- if (USES_HUC(dev_priv)) {
+ if (USES_HUC(i915)) {
ret = intel_huc_auth(huc);
if (ret)
goto err_communication;
}
- if (USES_GUC_SUBMISSION(dev_priv)) {
+ if (USES_GUC_SUBMISSION(i915)) {
ret = intel_guc_submission_enable(guc);
if (ret)
goto err_communication;
}
- dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n",
+ dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
guc->fw.major_ver_found, guc->fw.minor_ver_found);
- dev_info(dev_priv->drm.dev, "GuC submission %s\n",
- enableddisabled(USES_GUC_SUBMISSION(dev_priv)));
- dev_info(dev_priv->drm.dev, "HuC %s\n",
- enableddisabled(USES_HUC(dev_priv)));
+ dev_info(i915->drm.dev, "GuC submission %s\n",
+ enableddisabled(USES_GUC_SUBMISSION(i915)));
+ dev_info(i915->drm.dev, "HuC %s\n",
+ enableddisabled(USES_HUC(i915)));
return 0;
@@ -428,20 +427,20 @@ err_out:
if (GEM_WARN_ON(ret == -EIO))
ret = -EINVAL;
- dev_err(dev_priv->drm.dev, "GuC initialization failed %d\n", ret);
+ dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
return ret;
}
-void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
+void intel_uc_fini_hw(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GUC(i915));
- if (USES_GUC_SUBMISSION(dev_priv))
+ if (USES_GUC_SUBMISSION(i915))
intel_guc_submission_disable(guc);
guc_disable_communication(guc);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 448293eb638d..50b39aa4ffb8 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -359,8 +359,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
}
/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
-static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
- bool restore)
+static unsigned int
+intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
@@ -412,20 +412,11 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
-
- if (restore) { /* If reset with a user forcewake, try to restore */
- if (fw)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
-
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
- dev_priv->uncore.fifo_count =
- fifo_free_entries(dev_priv);
- }
-
- if (!restore)
- assert_forcewakes_inactive(dev_priv);
+ assert_forcewakes_inactive(dev_priv);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ return fw; /* track the lost user forcewake domains */
}
static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
@@ -534,7 +525,7 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
}
static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
- bool restore_forcewake)
+ unsigned int restore_forcewake)
{
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
@@ -549,7 +540,17 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
}
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
+ intel_uncore_forcewake_reset(dev_priv);
+ if (restore_forcewake) {
+ spin_lock_irq(&dev_priv->uncore.lock);
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ restore_forcewake);
+
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
+ dev_priv->uncore.fifo_count =
+ fifo_free_entries(dev_priv);
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ }
iosf_mbi_punit_release();
}
@@ -558,13 +559,18 @@ void intel_uncore_suspend(struct drm_i915_private *dev_priv)
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
&dev_priv->uncore.pmic_bus_access_nb);
- intel_uncore_forcewake_reset(dev_priv, false);
+ dev_priv->uncore.fw_domains_saved =
+ intel_uncore_forcewake_reset(dev_priv);
iosf_mbi_punit_release();
}
void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
{
- __intel_uncore_early_sanitize(dev_priv, true);
+ unsigned int restore_forcewake;
+
+ restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
+ __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+
iosf_mbi_register_pmic_bus_access_notifier(
&dev_priv->uncore.pmic_bus_access_nb);
i915_check_and_clear_faults(dev_priv);
@@ -1545,7 +1551,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
intel_uncore_edram_detect(dev_priv);
intel_uncore_fw_domains_init(dev_priv);
- __intel_uncore_early_sanitize(dev_priv, false);
+ __intel_uncore_early_sanitize(dev_priv, 0);
dev_priv->uncore.unclaimed_mmio_check = 1;
dev_priv->uncore.pmic_bus_access_nb.notifier_call =
@@ -1632,7 +1638,7 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv)
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
&dev_priv->uncore.pmic_bus_access_nb);
- intel_uncore_forcewake_reset(dev_priv, false);
+ intel_uncore_forcewake_reset(dev_priv);
iosf_mbi_punit_release();
}
@@ -1702,15 +1708,9 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
const u32 base = engine->mmio_base;
- const i915_reg_t mode = RING_MI_MODE(base);
-
- I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
- if (__intel_wait_for_register_fw(dev_priv,
- mode, MODE_IDLE, MODE_IDLE,
- 500, 0,
- NULL))
- DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
- engine->name);
+
+ if (intel_engine_stop_cs(engine))
+ DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
@@ -2099,21 +2099,25 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
{
struct intel_engine_cs *engine;
unsigned int tmp;
+ int ret;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
- if (gen8_reset_engine_start(engine))
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ if (gen8_reset_engine_start(engine)) {
+ ret = -EIO;
goto not_ready;
+ }
+ }
if (INTEL_GEN(dev_priv) >= 11)
- return gen11_reset_engines(dev_priv, engine_mask);
+ ret = gen11_reset_engines(dev_priv, engine_mask);
else
- return gen6_reset_engines(dev_priv, engine_mask);
+ ret = gen6_reset_engines(dev_priv, engine_mask);
not_ready:
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
gen8_reset_engine_cancel(engine);
- return -EIO;
+ return ret;
}
typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
@@ -2176,6 +2180,8 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
* Thus assume it is best to stop engines on all gens
* where we have a gpu reset.
*
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
* WaMediaResetMainRingCleanup:ctg,elk (presumably)
*
* FIXME: Wa for more modern gens needs to be validated
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 47478d609630..e5e157d288de 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -67,21 +67,21 @@ struct intel_uncore_funcs {
void (*force_wake_put)(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
- uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
- uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
- uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
- uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
+ u8 (*mmio_readb)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ u16 (*mmio_readw)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ u32 (*mmio_readl)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ u64 (*mmio_readq)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
void (*mmio_writeb)(struct drm_i915_private *dev_priv,
- i915_reg_t r, uint8_t val, bool trace);
+ i915_reg_t r, u8 val, bool trace);
void (*mmio_writew)(struct drm_i915_private *dev_priv,
- i915_reg_t r, uint16_t val, bool trace);
+ i915_reg_t r, u16 val, bool trace);
void (*mmio_writel)(struct drm_i915_private *dev_priv,
- i915_reg_t r, uint32_t val, bool trace);
+ i915_reg_t r, u32 val, bool trace);
};
struct intel_forcewake_range {
@@ -104,6 +104,7 @@ struct intel_uncore {
enum forcewake_domains fw_domains;
enum forcewake_domains fw_domains_active;
+ enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
u32 fw_set;
u32 fw_clear;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 458468237b5f..bba98cf83cbd 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -318,6 +318,12 @@ enum vbt_gmbus_ddi {
DDC_BUS_DDI_C,
DDC_BUS_DDI_D,
DDC_BUS_DDI_F,
+ ICL_DDC_BUS_DDI_A = 0x1,
+ ICL_DDC_BUS_DDI_B,
+ ICL_DDC_BUS_PORT_1 = 0x4,
+ ICL_DDC_BUS_PORT_2,
+ ICL_DDC_BUS_PORT_3,
+ ICL_DDC_BUS_PORT_4,
};
#define VBT_DP_MAX_LINK_RATE_HBR3 0
@@ -414,7 +420,9 @@ struct child_device_config {
u16 extended_type;
u8 dvo_function;
u8 dp_usb_type_c:1; /* 195 */
- u8 flags2_reserved:7; /* 195 */
+ u8 tbt:1; /* 209 */
+ u8 flags2_reserved:2; /* 195 */
+ u8 dp_port_trace_length:4; /* 209 */
u8 dp_gpio_index; /* 195 */
u16 dp_gpio_pin_num; /* 195 */
u8 dp_iboost_level:4; /* 196 */
@@ -448,7 +456,7 @@ struct bdb_general_definitions {
* number = (block_size - sizeof(bdb_general_definitions))/
* defs->child_dev_size;
*/
- uint8_t devices[0];
+ u8 devices[0];
} __packed;
/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
@@ -635,7 +643,7 @@ struct bdb_sdvo_lvds_options {
#define BDB_DRIVER_FEATURE_NO_LVDS 0
#define BDB_DRIVER_FEATURE_INT_LVDS 1
#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
-#define BDB_DRIVER_FEATURE_EDP 3
+#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS 3
struct bdb_driver_features {
u8 boot_dev_algorithm:1;
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 2df3538ceba5..4bcdeaf8d98f 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,29 +48,58 @@
* - Public functions to init or apply the given workaround type.
*/
-static int wa_add(struct drm_i915_private *dev_priv,
- i915_reg_t addr,
- const u32 mask, const u32 val)
+static void wa_add(struct drm_i915_private *i915,
+ i915_reg_t reg, const u32 mask, const u32 val)
{
- const unsigned int idx = dev_priv->workarounds.count;
+ struct i915_workarounds *wa = &i915->workarounds;
+ unsigned int start = 0, end = wa->count;
+ unsigned int addr = i915_mmio_reg_offset(reg);
+ struct i915_wa_reg *r;
+
+ while (start < end) {
+ unsigned int mid = start + (end - start) / 2;
+
+ if (wa->reg[mid].addr < addr) {
+ start = mid + 1;
+ } else if (wa->reg[mid].addr > addr) {
+ end = mid;
+ } else {
+ r = &wa->reg[mid];
+
+ if ((mask & ~r->mask) == 0) {
+ DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
+ addr, r->mask, r->value);
+
+ r->value &= ~mask;
+ }
+
+ r->value |= val;
+ r->mask |= mask;
+ return;
+ }
+ }
- if (WARN_ON(idx >= I915_MAX_WA_REGS))
- return -ENOSPC;
+ if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
+ DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
+ addr, mask, val);
+ return;
+ }
- dev_priv->workarounds.reg[idx].addr = addr;
- dev_priv->workarounds.reg[idx].value = val;
- dev_priv->workarounds.reg[idx].mask = mask;
+ r = &wa->reg[wa->count++];
+ r->addr = addr;
+ r->value = val;
+ r->mask = mask;
- dev_priv->workarounds.count++;
+ while (r-- > wa->reg) {
+ GEM_BUG_ON(r[0].addr == r[1].addr);
+ if (r[1].addr > r[0].addr)
+ break;
- return 0;
+ swap(r[1], r[0]);
+ }
}
-#define WA_REG(addr, mask, val) do { \
- const int r = wa_add(dev_priv, (addr), (mask), (val)); \
- if (r) \
- return r; \
- } while (0)
+#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -463,6 +492,22 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
*/
WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
+ /* Wa_2006611047:icl (pre-prod)
+ * Formerly known as WaDisableImprovedTdlClkGating
+ */
+ if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+ GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
+
+ /* WaEnableStateCacheRedirectToCS:icl */
+ WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1,
+ GEN11_STATE_CACHE_REDIRECT_TO_CS);
+
+ /* Wa_2006665173:icl (pre-prod) */
+ if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
+ GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+
return 0;
}
@@ -521,7 +566,7 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
*cs++ = MI_LOAD_REGISTER_IMM(w->count);
for (i = 0; i < w->count; i++) {
- *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+ *cs++ = w->reg[i].addr;
*cs++ = w->reg[i].value;
}
*cs++ = MI_NOOP;
@@ -647,6 +692,19 @@ static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+ /* WaKBLVECSSemaphoreWaitPoll:kbl */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ for_each_engine(engine, dev_priv, tmp) {
+ if (engine->id == RCS)
+ continue;
+
+ I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
+ }
+ }
}
static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
@@ -672,8 +730,74 @@ static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
+static void wa_init_mcr(struct drm_i915_private *dev_priv)
+{
+ const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+ u32 mcr;
+ u32 mcr_slice_subslice_mask;
+
+ /*
+ * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
+ * L3Banks could be fused off in single slice scenario. If that is
+ * the case, we might need to program MCR select to a valid L3Bank
+ * by default, to make sure we correctly read certain registers
+ * later on (in the range 0xB100 - 0xB3FF).
+ * This might be incompatible with
+ * WaProgramMgsrForCorrectSliceSpecificMmioReads.
+ * Fortunately, this should not happen in production hardware, so
+ * we only assert that this is the case (instead of implementing
+ * something more complex that requires checking the range of every
+ * MMIO read).
+ */
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ is_power_of_2(sseu->slice_mask)) {
+ /*
+ * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
+ * enabled subslice, no need to redirect MCR packet
+ */
+ u32 slice = fls(sseu->slice_mask);
+ u32 fuse3 = I915_READ(GEN10_MIRROR_FUSE3);
+ u8 ss_mask = sseu->subslice_mask[slice];
+
+ u8 enabled_mask = (ss_mask | ss_mask >>
+ GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
+ u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;
+
+ /*
+ * Production silicon should have matched L3Bank and
+ * subslice enabled
+ */
+ WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
+ }
+
+ mcr = I915_READ(GEN8_MCR_SELECTOR);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
+ GEN11_MCR_SUBSLICE_MASK;
+ else
+ mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
+ GEN8_MCR_SUBSLICE_MASK;
+ /*
+ * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
+ * Before any MMIO read into slice/subslice specific registers, MCR
+ * packet control register needs to be programmed to point to any
+ * enabled s/ss pair. Otherwise, incorrect values will be returned.
+ * This means each subsequent MMIO read will be forwarded to an
+ * specific s/ss combination, but this is OK since these registers
+ * are consistent across s/ss in almost all cases. In the rare
+ * occasions, such as INSTDONE, where this value is dependent
+ * on s/ss combo, the read should be done with read_subslice_reg.
+ */
+ mcr &= ~mcr_slice_subslice_mask;
+ mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
+ I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+}
+
static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
{
+ wa_init_mcr(dev_priv);
+
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
I915_WRITE(GAMT_CHKN_BIT_REG,
@@ -692,6 +816,8 @@ static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
{
+ wa_init_mcr(dev_priv);
+
/* This is not an Wa. Enable for better image quality */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
@@ -772,6 +898,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
PMFLUSHDONE_LNICRSDROP |
PMFLUSH_GAPL3UNBLOCK |
PMFLUSHDONE_LNEBLK);
+
+ /* Wa_1406463099:icl
+ * Formerly known as WaGamTlbPendError
+ */
+ I915_WRITE(GAMT_CHKN_BIT_REG,
+ I915_READ(GAMT_CHKN_BIT_REG) |
+ GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 91c72911be3c..7efb326badcd 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -338,7 +338,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
static int igt_check_page_sizes(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+ struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
int err = 0;
@@ -379,7 +379,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
static int igt_mock_exhaust_device_supported_pages(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -415,7 +415,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
goto out_put;
}
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -458,7 +458,7 @@ out_device:
static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
int bit;
@@ -500,7 +500,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
/* Force the page size for this object */
obj->mm.page_sizes.sg = page_size;
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_unpin;
@@ -570,6 +570,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
@@ -591,12 +592,13 @@ static void close_object_list(struct list_head *objects,
list_for_each_entry_safe(obj, on, objects, st_link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (!IS_ERR(vma))
i915_vma_close(vma);
list_del(&obj->st_link);
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
}
@@ -604,8 +606,8 @@ static void close_object_list(struct list_head *objects,
static int igt_mock_ppgtt_huge_fill(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
- unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
unsigned long page_num;
bool single = false;
LIST_HEAD(objects);
@@ -641,7 +643,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
break;
@@ -725,7 +727,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
static int igt_mock_ppgtt_64K(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
struct drm_i915_gem_object *obj;
const struct object_info {
unsigned int size;
@@ -819,7 +821,7 @@ static int igt_mock_ppgtt_64K(void *arg)
*/
obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_object_unpin;
@@ -866,6 +868,7 @@ static int igt_mock_ppgtt_64K(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
}
@@ -887,8 +890,8 @@ out_object_put:
static struct i915_vma *
gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
{
- struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
- const int gen = INTEL_GEN(vma->vm->i915);
+ struct drm_i915_private *i915 = vma->vm->i915;
+ const int gen = INTEL_GEN(i915);
unsigned int count = vma->size >> PAGE_SHIFT;
struct drm_i915_gem_object *obj;
struct i915_vma *batch;
@@ -919,12 +922,12 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
*cmd++ = val;
} else if (gen >= 4) {
*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? 1 << 22 : 0);
+ (gen < 6 ? MI_USE_GGTT : 0);
*cmd++ = 0;
*cmd++ = offset;
*cmd++ = val;
} else {
- *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cmd++ = offset;
*cmd++ = val;
}
@@ -985,7 +988,10 @@ static int gpu_write(struct i915_vma *vma,
goto err_request;
}
- i915_vma_move_to_active(batch, rq, 0);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto err_request;
+
i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch);
i915_vma_close(batch);
@@ -996,14 +1002,12 @@ static int gpu_write(struct i915_vma *vma,
if (err)
goto err_request;
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-
- reservation_object_lock(vma->resv, NULL);
- reservation_object_add_excl_fence(vma->resv, &rq->fence);
- reservation_object_unlock(vma->resv);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ i915_request_skip(rq, err);
err_request:
- __i915_request_add(rq, err == 0);
+ i915_request_add(rq);
return err;
}
@@ -1047,7 +1051,8 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u32 dword, u32 val)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
@@ -1100,7 +1105,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
I915_RND_STATE(prng);
@@ -1262,6 +1268,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
}
@@ -1323,6 +1330,7 @@ static int igt_ppgtt_internal_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
@@ -1391,6 +1399,7 @@ static int igt_ppgtt_gemfs_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
@@ -1439,7 +1448,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -1493,7 +1502,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -1531,7 +1540,8 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1587,7 +1597,8 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
@@ -1690,20 +1701,20 @@ int i915_gem_huge_page_mock_selftests(void)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV), "mock");
+ ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
}
- if (!i915_vm_is_48bit(&ppgtt->base)) {
+ if (!i915_vm_is_48bit(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n");
err = -EINVAL;
goto out_close;
}
/* If we were ever hit this then it's time to mock the 64K scratch */
- if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
+ if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
pr_err("PPGTT missing 64K scratch page\n");
err = -EINVAL;
goto out_close;
@@ -1712,7 +1723,7 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt);
out_close:
- i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
@@ -1720,7 +1731,7 @@ out_unlock:
i915_modparams.enable_ppgtt = saved_ppgtt;
- drm_dev_unref(&dev_priv->drm);
+ drm_dev_put(&dev_priv->drm);
return err;
}
@@ -1744,6 +1755,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return 0;
}
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return 0;
+
file = mock_file(dev_priv);
if (IS_ERR(file))
return PTR_ERR(file);
@@ -1758,7 +1772,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
}
if (ctx->ppgtt)
- ctx->ppgtt->base.scrub_64K = true;
+ ctx->ppgtt->vm.scrub_64K = true;
err = i915_subtests(tests, ctx);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index 340a98c0c804..3a095c37c120 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -42,11 +42,21 @@ static int cpu_set(struct drm_i915_gem_object *obj,
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
- if (needs_clflush & CLFLUSH_BEFORE)
+
+ if (needs_clflush & CLFLUSH_BEFORE) {
+ mb();
clflush(map+offset_in_page(offset) / sizeof(*map));
+ mb();
+ }
+
map[offset_in_page(offset) / sizeof(*map)] = v;
- if (needs_clflush & CLFLUSH_AFTER)
+
+ if (needs_clflush & CLFLUSH_AFTER) {
+ mb();
clflush(map+offset_in_page(offset) / sizeof(*map));
+ mb();
+ }
+
kunmap_atomic(map);
i915_gem_obj_finish_shmem_access(obj);
@@ -68,8 +78,13 @@ static int cpu_get(struct drm_i915_gem_object *obj,
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
- if (needs_clflush & CLFLUSH_BEFORE)
+
+ if (needs_clflush & CLFLUSH_BEFORE) {
+ mb();
clflush(map+offset_in_page(offset) / sizeof(*map));
+ mb();
+ }
+
*v = map[offset_in_page(offset) / sizeof(*map)];
kunmap_atomic(map);
@@ -199,7 +214,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
- __i915_request_add(rq, false);
+ i915_request_add(rq);
i915_vma_unpin(vma);
return PTR_ERR(cs);
}
@@ -210,28 +225,24 @@ static int gpu_set(struct drm_i915_gem_object *obj,
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = v;
} else if (INTEL_GEN(i915) >= 4) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma) + offset;
*cs++ = v;
} else {
- *cs++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cs++ = i915_ggtt_offset(vma) + offset;
*cs++ = v;
*cs++ = MI_NOOP;
}
intel_ring_advance(rq, cs);
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unpin(vma);
- reservation_object_lock(obj->resv, NULL);
- reservation_object_add_excl_fence(obj->resv, &rq->fence);
- reservation_object_unlock(obj->resv);
-
- __i915_request_add(rq, true);
+ i915_request_add(rq);
- return 0;
+ return err;
}
static bool always_valid(struct drm_i915_private *i915)
@@ -239,8 +250,16 @@ static bool always_valid(struct drm_i915_private *i915)
return true;
}
+static bool needs_fence_registers(struct drm_i915_private *i915)
+{
+ return !i915_terminally_wedged(&i915->gpu_error);
+}
+
static bool needs_mi_store_dword(struct drm_i915_private *i915)
{
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return false;
+
return intel_engine_can_store_dword(i915->engine[RCS]);
}
@@ -251,7 +270,7 @@ static const struct igt_coherency_mode {
bool (*valid)(struct drm_i915_private *i915);
} igt_coherency_mode[] = {
{ "cpu", cpu_set, cpu_get, always_valid },
- { "gtt", gtt_set, gtt_get, always_valid },
+ { "gtt", gtt_set, gtt_get, needs_fence_registers },
{ "wc", wc_set, wc_get, always_valid },
{ "gpu", gpu_set, NULL, needs_mi_store_dword },
{ },
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index ddb03f009232..1c92560d35da 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -23,9 +23,11 @@
*/
#include "../i915_selftest.h"
+#include "i915_random.h"
#include "igt_flush_test.h"
#include "mock_drm.h"
+#include "mock_gem_device.h"
#include "huge_gem_object.h"
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
@@ -62,12 +64,12 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
*cmd++ = value;
} else if (gen >= 4) {
*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? 1 << 22 : 0);
+ (gen < 6 ? MI_USE_GGTT : 0);
*cmd++ = 0;
*cmd++ = offset;
*cmd++ = value;
} else {
- *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cmd++ = offset;
*cmd++ = value;
}
@@ -114,7 +116,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
@@ -169,24 +171,28 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
if (err)
goto err_request;
- i915_vma_move_to_active(batch, rq, 0);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto skip_request;
+
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
+
i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch);
i915_vma_close(batch);
- i915_vma_move_to_active(vma, rq, 0);
i915_vma_unpin(vma);
- reservation_object_lock(obj->resv, NULL);
- reservation_object_add_excl_fence(obj->resv, &rq->fence);
- reservation_object_unlock(obj->resv);
-
- __i915_request_add(rq, true);
+ i915_request_add(rq);
return 0;
+skip_request:
+ i915_request_skip(rq, err);
err_request:
- __i915_request_add(rq, false);
+ i915_request_add(rq);
err_batch:
i915_vma_unpin(batch);
err_vma:
@@ -247,9 +253,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
}
for (; m < DW_PER_PAGE; m++) {
- if (map[m] != 0xdeadbeef) {
+ if (map[m] != STACK_MAGIC) {
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
- n, m, map[m], 0xdeadbeef);
+ n, m, map[m], STACK_MAGIC);
err = -EINVAL;
goto out_unmap;
}
@@ -289,7 +295,7 @@ create_test_object(struct i915_gem_context *ctx,
{
struct drm_i915_gem_object *obj;
struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+ ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
u64 size;
int err;
@@ -305,7 +311,7 @@ create_test_object(struct i915_gem_context *ctx,
if (err)
return ERR_PTR(err);
- err = cpu_fill(obj, 0xdeadbeef);
+ err = cpu_fill(obj, STACK_MAGIC);
if (err) {
pr_err("Failed to fill object with cpu, err=%d\n",
err);
@@ -335,11 +341,15 @@ static int igt_ctx_exec(void *arg)
bool first_shared_gtt = true;
int err = -ENODEV;
- /* Create a few different contexts (with different mm) and write
+ /*
+ * Create a few different contexts (with different mm) and write
* through each ctx/mm using the GPU making sure those writes end
* up in the expected pages of our obj.
*/
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return 0;
+
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
@@ -366,6 +376,9 @@ static int igt_ctx_exec(void *arg)
}
for_each_engine(engine, i915, id) {
+ if (!engine->context_size)
+ continue; /* No logical context support in HW */
+
if (!intel_engine_can_store_dword(engine))
continue;
@@ -420,6 +433,237 @@ out_unlock:
return err;
}
+static int igt_ctx_readonly(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj = NULL;
+ struct drm_file *file;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
+ struct i915_gem_context *ctx;
+ struct i915_hw_ppgtt *ppgtt;
+ unsigned long ndwords, dw;
+ int err = -ENODEV;
+
+ /*
+ * Create a few read-only objects (with the occasional writable object)
+ * and try to write into these object checking that the GPU discards
+ * any write to a read-only object.
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ctx = i915_gem_create_context(i915, file->driver_priv);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
+ if (!ppgtt || !ppgtt->vm.has_read_only) {
+ err = 0;
+ goto out_unlock;
+ }
+
+ ndwords = 0;
+ dw = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct intel_engine_cs *engine;
+ unsigned int id;
+
+ for_each_engine(engine, i915, id) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (!obj) {
+ obj = create_test_object(ctx, file, &objects);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_unlock;
+ }
+
+ if (prandom_u32_state(&prng) & 1)
+ i915_gem_object_set_readonly(obj);
+ }
+
+ intel_runtime_pm_get(i915);
+ err = gpu_fill(obj, ctx, engine, dw);
+ intel_runtime_pm_put(i915);
+ if (err) {
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ ndwords, dw, max_dwords(obj),
+ engine->name, ctx->hw_id,
+ yesno(!!ctx->ppgtt), err);
+ goto out_unlock;
+ }
+
+ if (++dw == max_dwords(obj)) {
+ obj = NULL;
+ dw = 0;
+ }
+ ndwords++;
+ }
+ }
+ pr_info("Submitted %lu dwords (across %u engines)\n",
+ ndwords, INTEL_INFO(i915)->num_rings);
+
+ dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
+ unsigned int num_writes;
+
+ num_writes = rem;
+ if (i915_gem_object_is_readonly(obj))
+ num_writes = 0;
+
+ err = cpu_check(obj, num_writes);
+ if (err)
+ break;
+
+ dw += rem;
+ }
+
+out_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mock_file_free(i915, file);
+ return err;
+}
+
+static __maybe_unused const char *
+__engine_name(struct drm_i915_private *i915, unsigned int engines)
+{
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ if (engines == ALL_ENGINES)
+ return "all";
+
+ for_each_engine_masked(engine, i915, engines, tmp)
+ return engine->name;
+
+ return "none";
+}
+
+static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
+ struct i915_gem_context *ctx,
+ unsigned int engines)
+{
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+ int err;
+
+ GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+ }
+
+ err = i915_gem_switch_to_kernel_context(i915);
+ if (err)
+ return err;
+
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ if (!engine_has_kernel_context_barrier(engine)) {
+ pr_err("kernel context not last on engine %s!\n",
+ engine->name);
+ return -EINVAL;
+ }
+ }
+
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(i915->gt.active_requests);
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ if (engine->last_retired_context->gem_context != i915->kernel_context) {
+ pr_err("engine %s not idling in kernel context!\n",
+ engine->name);
+ return -EINVAL;
+ }
+ }
+
+ err = i915_gem_switch_to_kernel_context(i915);
+ if (err)
+ return err;
+
+ if (i915->gt.active_requests) {
+ pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
+ i915->gt.active_requests);
+ return -EINVAL;
+ }
+
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ if (!intel_engine_has_kernel_context(engine)) {
+ pr_err("kernel context not last on engine %s!\n",
+ engine->name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int igt_switch_to_kernel_context(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ int err;
+
+ /*
+ * A core premise of switching to the kernel context is that
+ * if an engine is already idling in the kernel context, we
+ * do not emit another request and wake it up. The other being
+ * that we do indeed end up idling in the kernel context.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&i915->drm.struct_mutex);
+ return PTR_ERR(ctx);
+ }
+
+ /* First check idling each individual engine */
+ for_each_engine(engine, i915, id) {
+ err = __igt_switch_to_kernel_context(i915, ctx, BIT(id));
+ if (err)
+ goto out_unlock;
+ }
+
+ /* Now en masse */
+ err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES);
+ if (err)
+ goto out_unlock;
+
+out_unlock:
+ GEM_TRACE_DUMP_ON(err);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ kernel_context_close(ctx);
+ return err;
+}
+
static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -432,7 +676,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
continue;
@@ -447,14 +691,37 @@ static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
i915_gem_fini_aliasing_ppgtt(i915);
}
+int i915_gem_context_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_switch_to_kernel_context),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+
+ drm_dev_put(&i915->drm);
+ return err;
+}
+
int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(igt_switch_to_kernel_context),
SUBTEST(igt_ctx_exec),
+ SUBTEST(igt_ctx_readonly),
};
bool fake_alias = false;
int err;
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return 0;
+
/* Install a fake aliasing gtt for exercise */
if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
mutex_lock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index 89dc25a5a53b..a7055b12e53c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -389,7 +389,7 @@ int i915_gem_dmabuf_mock_selftests(void)
err = i915_subtests(tests, i915);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index ab9d7bee0aae..128ad1cf0647 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -35,7 +35,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
u64 size;
for (size = 0;
- size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
size += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
return -EINVAL;
}
- if (list_empty(&i915->ggtt.base.inactive_list)) {
+ if (list_empty(&i915->ggtt.vm.inactive_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
}
@@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link)
+ list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
i915_vma_unpin(vma);
}
@@ -103,7 +103,7 @@ static int igt_evict_something(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
- err = i915_gem_evict_something(&ggtt->base,
+ err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
@@ -116,7 +116,7 @@ static int igt_evict_something(void *arg)
unpin_ggtt(i915);
/* Everything is unpinned, we should be able to evict something */
- err = i915_gem_evict_something(&ggtt->base,
+ err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
@@ -181,7 +181,7 @@ static int igt_evict_for_vma(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
err);
@@ -191,7 +191,7 @@ static int igt_evict_for_vma(void *arg)
unpin_ggtt(i915);
/* Everything is unpinned, we should be able to evict the node */
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n",
err);
@@ -229,7 +229,7 @@ static int igt_evict_for_cache_color(void *arg)
* i915_gtt_color_adjust throughout our driver, so using a mock color
* adjust will work just fine for our purposes.
*/
- ggtt->base.mm.color_adjust = mock_color_adjust;
+ ggtt->vm.mm.color_adjust = mock_color_adjust;
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -265,7 +265,7 @@ static int igt_evict_for_cache_color(void *arg)
i915_vma_unpin(vma);
/* Remove just the second vma */
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
goto cleanup;
@@ -276,7 +276,7 @@ static int igt_evict_for_cache_color(void *arg)
*/
target.color = I915_CACHE_L3_LLC;
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
err = -EINVAL;
@@ -288,7 +288,7 @@ static int igt_evict_for_cache_color(void *arg)
cleanup:
unpin_ggtt(i915);
cleanup_objects(i915);
- ggtt->base.mm.color_adjust = NULL;
+ ggtt->vm.mm.color_adjust = NULL;
return err;
}
@@ -305,7 +305,7 @@ static int igt_evict_vm(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
- err = i915_gem_evict_vm(&ggtt->base);
+ err = i915_gem_evict_vm(&ggtt->vm);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -314,7 +314,7 @@ static int igt_evict_vm(void *arg)
unpin_ggtt(i915);
- err = i915_gem_evict_vm(&ggtt->base);
+ err = i915_gem_evict_vm(&ggtt->vm);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -359,9 +359,9 @@ static int igt_evict_contexts(void *arg)
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
- err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
PIN_NOEVICT);
if (err)
goto out_locked;
@@ -377,9 +377,9 @@ static int igt_evict_contexts(void *arg)
goto out_locked;
}
- if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
+ if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
PIN_NOEVICT)) {
kfree(r);
break;
@@ -490,7 +490,7 @@ int i915_gem_evict_mock_selftests(void)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -500,5 +500,8 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index f7dc926f4ef1..8e2e269db97e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -32,6 +32,20 @@
#include "mock_drm.h"
#include "mock_gem_device.h"
+static void cleanup_freed_objects(struct drm_i915_private *i915)
+{
+ /*
+ * As we may hold onto the struct_mutex for inordinate lengths of
+ * time, the NMI khungtaskd detector may fire for the free objects
+ * worker.
+ */
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+}
+
static void fake_free_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
@@ -134,31 +148,34 @@ static int igt_ppgtt_alloc(void *arg)
{
struct drm_i915_private *dev_priv = arg;
struct i915_hw_ppgtt *ppgtt;
- u64 size, last;
- int err;
+ u64 size, last, limit;
+ int err = 0;
/* Allocate a ppggt and try to fill the entire range */
if (!USES_PPGTT(dev_priv))
return 0;
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return -ENOMEM;
+ ppgtt = __hw_ppgtt_create(dev_priv);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
- mutex_lock(&dev_priv->drm.struct_mutex);
- err = __hw_ppgtt_init(ppgtt, dev_priv);
- if (err)
- goto err_ppgtt;
-
- if (!ppgtt->base.allocate_va_range)
+ if (!ppgtt->vm.allocate_va_range)
goto err_ppgtt_cleanup;
+ /*
+ * While we only allocate the page tables here and so we could
+ * address a much larger GTT than we could actually fit into
+ * RAM, a practical limit is the amount of physical pages in the system.
+ * This should ensure that we do not run into the oomkiller during
+ * the test and take down the machine wilfully.
+ */
+ limit = totalram_pages << PAGE_SHIFT;
+ limit = min(ppgtt->vm.total, limit);
+
/* Check we can allocate the entire range */
- for (size = 4096;
- size <= ppgtt->base.total;
- size <<= 2) {
- err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
+ for (size = 4096; size <= limit; size <<= 2) {
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
if (err) {
if (err == -ENOMEM) {
pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
@@ -168,15 +185,15 @@ static int igt_ppgtt_alloc(void *arg)
goto err_ppgtt_cleanup;
}
- ppgtt->base.clear_range(&ppgtt->base, 0, size);
+ cond_resched();
+
+ ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
}
/* Check we can incrementally allocate the entire range */
- for (last = 0, size = 4096;
- size <= ppgtt->base.total;
- last = size, size <<= 2) {
- err = ppgtt->base.allocate_va_range(&ppgtt->base,
- last, size - last);
+ for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
+ last, size - last);
if (err) {
if (err == -ENOMEM) {
pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
@@ -185,13 +202,14 @@ static int igt_ppgtt_alloc(void *arg)
}
goto err_ppgtt_cleanup;
}
+
+ cond_resched();
}
err_ppgtt_cleanup:
- ppgtt->base.cleanup(&ppgtt->base);
-err_ppgtt:
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_ppgtt_put(ppgtt);
mutex_unlock(&dev_priv->drm.struct_mutex);
- kfree(ppgtt);
return err;
}
@@ -293,6 +311,8 @@ static int lowlevel_hole(struct drm_i915_private *i915,
i915_gem_object_put(obj);
kfree(order);
+
+ cleanup_freed_objects(i915);
}
return 0;
@@ -521,6 +541,7 @@ static int fill_hole(struct drm_i915_private *i915,
}
close_object_list(&objects, vm);
+ cleanup_freed_objects(i915);
}
return 0;
@@ -607,6 +628,8 @@ err_put:
i915_gem_object_put(obj);
if (err)
return err;
+
+ cleanup_freed_objects(i915);
}
return 0;
@@ -791,6 +814,8 @@ err_obj:
kfree(order);
if (err)
return err;
+
+ cleanup_freed_objects(i915);
}
return 0;
@@ -859,6 +884,7 @@ static int __shrink_hole(struct drm_i915_private *i915,
}
close_object_list(&objects, vm);
+ cleanup_freed_objects(i915);
return err;
}
@@ -951,6 +977,7 @@ static int shrink_boom(struct drm_i915_private *i915,
i915_gem_object_put(explode);
memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
+ cleanup_freed_objects(i915);
}
return 0;
@@ -982,17 +1009,17 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
+ ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
}
- GEM_BUG_ON(offset_in_page(ppgtt->base.total));
- GEM_BUG_ON(ppgtt->base.closed);
+ GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
+ GEM_BUG_ON(ppgtt->vm.closed);
- err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
+ err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
- i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1061,18 +1088,18 @@ static int exercise_ggtt(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
restart:
- list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
- drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
+ list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
+ drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
if (hole_start < last)
continue;
- if (ggtt->base.mm.color_adjust)
- ggtt->base.mm.color_adjust(node, 0,
- &hole_start, &hole_end);
+ if (ggtt->vm.mm.color_adjust)
+ ggtt->vm.mm.color_adjust(node, 0,
+ &hole_start, &hole_end);
if (hole_start >= hole_end)
continue;
- err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
+ err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
if (err)
break;
@@ -1134,7 +1161,7 @@ static int igt_ggtt_page(void *arg)
goto out_free;
memset(&tmp, 0, sizeof(tmp));
- err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
+ err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
count * PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
@@ -1147,9 +1174,9 @@ static int igt_ggtt_page(void *arg)
for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE;
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, 0),
- offset, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, 0),
+ offset, I915_CACHE_NONE, 0);
}
order = i915_random_order(count, &prng);
@@ -1188,7 +1215,7 @@ static int igt_ggtt_page(void *arg)
kfree(order);
out_remove:
- ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
+ ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
intel_runtime_pm_put(i915);
drm_mm_remove_node(&tmp);
out_unpin:
@@ -1217,6 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
+ const u64 limit = totalram_pages << PAGE_SHIFT;
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
@@ -1229,7 +1257,7 @@ static int exercise_mock(struct drm_i915_private *i915,
ppgtt = ctx->ppgtt;
GEM_BUG_ON(!ppgtt);
- err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
+ err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
mock_context_close(ctx);
return err;
@@ -1270,7 +1298,7 @@ static int igt_gtt_reserve(void *arg)
/* Start by filling the GGTT */
for (total = 0;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1288,20 +1316,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1319,7 +1347,7 @@ static int igt_gtt_reserve(void *arg)
/* Now we start forcing evictions */
for (total = I915_GTT_PAGE_SIZE;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1337,20 +1365,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1371,7 +1399,7 @@ static int igt_gtt_reserve(void *arg)
struct i915_vma *vma;
u64 offset;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1383,18 +1411,18 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- offset = random_offset(0, i915->ggtt.base.total,
+ offset = random_offset(0, i915->ggtt.vm.total,
2*I915_GTT_PAGE_SIZE,
I915_GTT_MIN_ALIGNMENT);
- err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size,
offset,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1429,8 +1457,8 @@ static int igt_gtt_insert(void *arg)
u64 start, end;
} invalid_insert[] = {
{
- i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
- 0, i915->ggtt.base.total,
+ i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
+ 0, i915->ggtt.vm.total,
},
{
2*I915_GTT_PAGE_SIZE, 0,
@@ -1460,7 +1488,7 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) {
- err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE,
ii->start, ii->end,
@@ -1475,7 +1503,7 @@ static int igt_gtt_insert(void *arg)
/* Start by filling the GGTT */
for (total = 0;
- total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1493,15 +1521,15 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
0);
if (err == -ENOSPC) {
/* maxed out the GGTT space */
@@ -1510,7 +1538,7 @@ static int igt_gtt_insert(void *arg)
}
if (err) {
pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1522,7 +1550,7 @@ static int igt_gtt_insert(void *arg)
list_for_each_entry(obj, &objects, st_link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1542,7 +1570,7 @@ static int igt_gtt_insert(void *arg)
struct i915_vma *vma;
u64 offset;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1557,13 +1585,13 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
0);
if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1579,7 +1607,7 @@ static int igt_gtt_insert(void *arg)
/* And then force evictions */
for (total = 0;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1597,19 +1625,19 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
0);
if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1646,7 +1674,7 @@ int i915_gem_gtt_mock_selftests(void)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -1669,7 +1697,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ggtt_page),
};
- GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
+ GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index fbdb2419d418..ba4f322d56b8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -113,7 +113,7 @@ static int igt_gem_huge(void *arg)
obj = huge_gem_object(i915,
nreal * PAGE_SIZE,
- i915->ggtt.base.total + PAGE_SIZE);
+ i915->ggtt.vm.total + PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -169,9 +169,16 @@ static u64 tiled_offset(const struct tile *tile, u64 v)
v += y * tile->width;
v += div64_u64_rem(x, tile->width, &x) << tile->size;
v += x;
- } else {
+ } else if (tile->width == 128) {
const unsigned int ytile_span = 16;
- const unsigned int ytile_height = 32 * ytile_span;
+ const unsigned int ytile_height = 512;
+
+ v += y * ytile_span;
+ v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
+ v += x;
+ } else {
+ const unsigned int ytile_span = 32;
+ const unsigned int ytile_height = 256;
v += y * ytile_span;
v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
@@ -288,6 +295,8 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
kunmap(p);
if (err)
return err;
+
+ i915_vma_destroy(vma);
}
return 0;
@@ -311,7 +320,7 @@ static int igt_partial_tiling(void *arg)
obj = huge_gem_object(i915,
nreal << PAGE_SHIFT,
- (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -347,6 +356,14 @@ static int igt_partial_tiling(void *arg)
unsigned int pitch;
struct tile tile;
+ if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ /*
+ * The swizzling pattern is actually unknown as it
+ * varies based on physical address of each page.
+ * See i915_gem_detect_bit_6_swizzle().
+ */
+ break;
+
tile.tiling = tiling;
switch (tiling) {
case I915_TILING_X:
@@ -357,7 +374,8 @@ static int igt_partial_tiling(void *arg)
break;
}
- if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN ||
+ GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
+ if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
continue;
@@ -440,7 +458,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int err;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -454,12 +472,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
return PTR_ERR(rq);
}
- i915_vma_move_to_active(vma, rq, 0);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+
i915_request_add(rq);
- i915_gem_object_set_active_reference(obj);
+ __i915_gem_object_release_unless_active(obj);
i915_vma_unpin(vma);
- return 0;
+
+ return err;
}
static bool assert_mmap_offset(struct drm_i915_private *i915,
@@ -479,6 +499,19 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
return err == expected;
}
+static void disable_retire_worker(struct drm_i915_private *i915)
+{
+ mutex_lock(&i915->drm.struct_mutex);
+ if (!i915->gt.active_requests++) {
+ intel_runtime_pm_get(i915);
+ i915_gem_unpark(i915);
+ intel_runtime_pm_put(i915);
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+ cancel_delayed_work_sync(&i915->gt.retire_work);
+ cancel_delayed_work_sync(&i915->gt.idle_work);
+}
+
static int igt_mmap_offset_exhaustion(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -488,6 +521,10 @@ static int igt_mmap_offset_exhaustion(void *arg)
u64 hole_start, hole_end;
int loop, err;
+ /* Disable background reaper */
+ disable_retire_worker(i915);
+ GEM_BUG_ON(!i915->gt.awake);
+
/* Trim the device mmap space to only a page */
memset(&resv, 0, sizeof(resv));
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
@@ -496,7 +533,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
err = drm_mm_reserve_node(mm, &resv);
if (err) {
pr_err("Failed to trim VMA manager, err=%d\n", err);
- return err;
+ goto out_park;
}
break;
}
@@ -538,6 +575,9 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
+ if (i915_terminally_wedged(&i915->gpu_error))
+ break;
+
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
@@ -554,6 +594,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto err_obj;
}
+ /* NB we rely on the _active_ reference to access obj now */
GEM_BUG_ON(!i915_gem_object_is_active(obj));
err = i915_gem_object_create_mmap_offset(obj);
if (err) {
@@ -565,6 +606,13 @@ static int igt_mmap_offset_exhaustion(void *arg)
out:
drm_mm_remove_node(&resv);
+out_park:
+ mutex_lock(&i915->drm.struct_mutex);
+ if (--i915->gt.active_requests)
+ queue_delayed_work(i915->wq, &i915->gt.retire_work, 0);
+ else
+ queue_delayed_work(i915->wq, &i915->gt.idle_work, 0);
+ mutex_unlock(&i915->drm.struct_mutex);
return err;
err_obj:
i915_gem_object_put(obj);
@@ -586,7 +634,7 @@ int i915_gem_object_mock_selftests(void)
err = i915_subtests(tests, i915);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index d16d74178e9d..1b70208eeea7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -24,3 +24,4 @@ selftest(vma, i915_vma_mock_selftests)
selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
+selftest(contexts, i915_gem_context_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 94bc2e1898a4..c4aac6141e04 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -262,7 +262,7 @@ int i915_request_mock_selftests(void)
return -ENOMEM;
err = i915_subtests(tests, i915);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -286,7 +286,9 @@ static int begin_live_test(struct live_test *t,
t->func = func;
t->name = name;
- err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (err) {
pr_err("%s(%s): failed to idle before, with err=%d!",
func, name, err);
@@ -342,9 +344,9 @@ static int live_nop_request(void *arg)
mutex_lock(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id) {
- IGT_TIMEOUT(end_time);
- struct i915_request *request;
+ struct i915_request *request = NULL;
unsigned long n, prime;
+ IGT_TIMEOUT(end_time);
ktime_t times[2] = {};
err = begin_live_test(&t, i915, __func__, engine->name);
@@ -430,7 +432,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err)
goto err;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -466,7 +468,7 @@ empty_request(struct intel_engine_cs *engine,
goto out_request;
out_request:
- __i915_request_add(request, err == 0);
+ i915_request_add(request);
return err ? ERR_PTR(err) : request;
}
@@ -555,7 +557,8 @@ out_unlock:
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx = i915->kernel_context;
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
struct i915_vma *vma;
@@ -593,11 +596,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
} else if (gen >= 6) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
*cmd++ = lower_32_bits(vma->node.start);
- } else if (gen >= 4) {
- *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
- *cmd++ = lower_32_bits(vma->node.start);
} else {
- *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
+ *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cmd++ = lower_32_bits(vma->node.start);
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
@@ -677,7 +677,9 @@ static int live_all_engines(void *arg)
i915_gem_object_set_active_reference(batch->obj);
}
- i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[id], 0);
+ GEM_BUG_ON(err);
+
i915_request_get(request[id]);
i915_request_add(request[id]);
}
@@ -787,7 +789,9 @@ static int live_sequential_engines(void *arg)
GEM_BUG_ON(err);
request[id]->batch = batch;
- i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[id], 0);
+ GEM_BUG_ON(err);
+
i915_gem_object_set_active_reference(batch->obj);
i915_vma_get(batch);
@@ -861,5 +865,9 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_sequential_engines),
SUBTEST(live_empty_request),
};
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index addc5a599c4a..86c54ea37f48 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -210,6 +210,8 @@ int __i915_subtests(const char *caller,
return -EINTR;
pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
+ GEM_TRACE("Running %s/%s\n", caller, st->name);
+
err = st->func(data);
if (err && err != -EINTR) {
pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index e90f97236e50..ffa74290e054 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -35,7 +35,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != &ctx->ppgtt->base) {
+ if (vma->vm != &ctx->ppgtt->vm) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -110,8 +110,7 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) {
- struct i915_address_space *vm =
- &ctx->ppgtt->base;
+ struct i915_address_space *vm = &ctx->ppgtt->vm;
struct i915_vma *vma;
int err;
@@ -259,12 +258,12 @@ static int igt_vma_pin1(void *arg)
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
- INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
VALID(4096, PIN_GLOBAL),
@@ -272,12 +271,12 @@ static int igt_vma_pin1(void *arg)
VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
- VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
- VALID(i915->ggtt.base.total, PIN_GLOBAL),
- NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL),
+ VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
+ VALID(i915->ggtt.vm.total, PIN_GLOBAL),
+ NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
- INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
@@ -289,9 +288,9 @@ static int igt_vma_pin1(void *arg)
* variable start, end and size.
*/
NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
- NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total),
+ NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+ NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
#endif
{ },
#undef NOSPACE
@@ -307,13 +306,13 @@ static int igt_vma_pin1(void *arg)
* focusing on error handling of boundary conditions.
*/
- GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm));
+ GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = checked_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
goto out;
@@ -405,7 +404,7 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
static int igt_vma_rotate(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_address_space *vm = &i915->ggtt.base;
+ struct i915_address_space *vm = &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const struct intel_rotation_plane_info planes[] = {
{ .width = 1, .height = 1, .stride = 1 },
@@ -604,7 +603,7 @@ static bool assert_pin(struct i915_vma *vma,
static int igt_vma_partial(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_address_space *vm = &i915->ggtt.base;
+ struct i915_address_space *vm = &i915->ggtt.vm;
const unsigned int npages = 1021; /* prime! */
struct drm_i915_gem_object *obj;
const struct phase {
@@ -734,7 +733,7 @@ int i915_vma_mock_selftests(void)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 0d06f559243f..af66e3d4e23a 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -9,52 +9,8 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
-struct wedge_me {
- struct delayed_work work;
- struct drm_i915_private *i915;
- const void *symbol;
-};
-
-static void wedge_me(struct work_struct *work)
-{
- struct wedge_me *w = container_of(work, typeof(*w), work.work);
-
- pr_err("%pS timed out, cancelling all further testing.\n", w->symbol);
-
- GEM_TRACE("%pS timed out.\n", w->symbol);
- GEM_TRACE_DUMP();
-
- i915_gem_set_wedged(w->i915);
-}
-
-static void __init_wedge(struct wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const void *symbol)
-{
- w->i915 = i915;
- w->symbol = symbol;
-
- INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
- schedule_delayed_work(&w->work, timeout);
-}
-
-static void __fini_wedge(struct wedge_me *w)
-{
- cancel_delayed_work_sync(&w->work);
- destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
-}
-
-#define wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \
- (W)->i915; \
- __fini_wedge((W)))
-
int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
{
- struct wedge_me w;
-
cond_resched();
if (flags & I915_WAIT_LOCKED &&
@@ -63,8 +19,15 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
i915_gem_set_wedged(i915);
}
- wedge_on_timeout(&w, i915, HZ)
- i915_gem_wait_for_idle(i915, flags);
+ if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ __builtin_return_address(0));
+
+ GEM_TRACE("%pS timed out.\n", __builtin_return_address(0));
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(i915);
+ }
return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
new file mode 100644
index 000000000000..08e5ff11bbd9
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
@@ -0,0 +1,58 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef IGT_WEDGE_ME_H
+#define IGT_WEDGE_ME_H
+
+#include <linux/workqueue.h>
+
+#include "../i915_gem.h"
+
+struct drm_i915_private;
+
+struct igt_wedge_me {
+ struct delayed_work work;
+ struct drm_i915_private *i915;
+ const char *name;
+};
+
+static void __igt_wedge_me(struct work_struct *work)
+{
+ struct igt_wedge_me *w = container_of(work, typeof(*w), work.work);
+
+ pr_err("%s timed out, cancelling test.\n", w->name);
+
+ GEM_TRACE("%s timed out.\n", w->name);
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(w->i915);
+}
+
+static void __igt_init_wedge(struct igt_wedge_me *w,
+ struct drm_i915_private *i915,
+ long timeout,
+ const char *name)
+{
+ w->i915 = i915;
+ w->name = name;
+
+ INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me);
+ schedule_delayed_work(&w->work, timeout);
+}
+
+static void __igt_fini_wedge(struct igt_wedge_me *w)
+{
+ cancel_delayed_work_sync(&w->work);
+ destroy_delayed_work_on_stack(&w->work);
+ w->i915 = NULL;
+}
+
+#define igt_wedge_on_timeout(W, DEV, TIMEOUT) \
+ for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \
+ (W)->i915; \
+ __igt_fini_wedge((W)))
+
+#endif /* IGT_WEDGE_ME_H */
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index d6926e7820e5..f03b407fdbe2 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -464,7 +464,7 @@ int intel_breadcrumbs_mock_selftests(void)
return -ENOMEM;
err = i915_subtests(tests, i915->engine[RCS]);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index fb74e2cf8a0a..407c98fb9170 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -196,19 +196,23 @@ static int igt_guc_clients(void *args)
}
unreserve_doorbell(guc->execbuf_client);
- err = guc_clients_doorbell_init(guc);
+
+ __create_doorbell(guc->execbuf_client);
+ err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id);
if (err != -EIO) {
pr_err("unexpected (err = %d)", err);
- goto out;
+ goto out_db;
}
if (!available_dbs(guc, guc->execbuf_client->priority)) {
pr_err("doorbell not available when it should\n");
err = -EIO;
- goto out;
+ goto out_db;
}
+out_db:
/* clean after test */
+ __destroy_doorbell(guc->execbuf_client);
err = reserve_doorbell(guc->execbuf_client);
if (err) {
pr_err("failed to reserve back the doorbell back\n");
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 438e0b045a2c..65d66cdedd26 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -27,6 +27,7 @@
#include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h"
+#include "igt_wedge_me.h"
#include "mock_context.h"
#include "mock_drm.h"
@@ -105,7 +106,10 @@ static int emit_recurse_batch(struct hang *h,
struct i915_request *rq)
{
struct drm_i915_private *i915 = h->i915;
- struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ rq->gem_context->ppgtt ?
+ &rq->gem_context->ppgtt->vm :
+ &i915->ggtt.vm;
struct i915_vma *hws, *vma;
unsigned int flags;
u32 *batch;
@@ -127,13 +131,19 @@ static int emit_recurse_batch(struct hang *h,
if (err)
goto unpin_vma;
- i915_vma_move_to_active(vma, rq, 0);
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
- i915_vma_move_to_active(hws, rq, 0);
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj);
@@ -168,7 +178,7 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
} else if (INTEL_GEN(i915) >= 4) {
- *batch++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
@@ -181,7 +191,7 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
} else {
- *batch++ = MI_STORE_DWORD_IMM;
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
*batch++ = MI_ARB_CHECK;
@@ -190,7 +200,7 @@ static int emit_recurse_batch(struct hang *h,
batch += 1024 / sizeof(*batch);
*batch++ = MI_ARB_CHECK;
- *batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1;
+ *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
@@ -202,6 +212,7 @@ static int emit_recurse_batch(struct hang *h,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
@@ -242,7 +253,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
err = emit_recurse_batch(h, rq);
if (err) {
- __i915_request_add(rq, false);
+ i915_request_add(rq);
return ERR_PTR(err);
}
@@ -315,7 +326,7 @@ static int igt_hang_sanitycheck(void *arg)
*h.batch = MI_BATCH_BUFFER_END;
i915_gem_chipset_flush(i915);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
timeout = i915_request_wait(rq,
I915_WAIT_LOCKED,
@@ -461,7 +472,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
mutex_unlock(&i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
@@ -560,6 +571,30 @@ struct active_engine {
#define TEST_SELF BIT(2)
#define TEST_PRIORITY BIT(3)
+static int active_request_put(struct i915_request *rq)
+{
+ int err = 0;
+
+ if (!rq)
+ return 0;
+
+ if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
+ GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n",
+ rq->engine->name,
+ rq->fence.context,
+ rq->fence.seqno,
+ i915_request_global_seqno(rq));
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(rq->i915);
+ err = -EIO;
+ }
+
+ i915_request_put(rq);
+
+ return err;
+}
+
static int active_engine(void *data)
{
I915_RND_STATE(prng);
@@ -608,24 +643,20 @@ static int active_engine(void *data)
i915_request_add(new);
mutex_unlock(&engine->i915->drm.struct_mutex);
- if (old) {
- if (i915_request_wait(old, 0, HZ) < 0) {
- GEM_TRACE("%s timed out.\n", engine->name);
- GEM_TRACE_DUMP();
-
- i915_gem_set_wedged(engine->i915);
- i915_request_put(old);
- err = -EIO;
- break;
- }
- i915_request_put(old);
- }
+ err = active_request_put(old);
+ if (err)
+ break;
cond_resched();
}
- for (count = 0; count < ARRAY_SIZE(rq); count++)
- i915_request_put(rq[count]);
+ for (count = 0; count < ARRAY_SIZE(rq); count++) {
+ int err__ = active_request_put(rq[count]);
+
+ /* Keep the first error */
+ if (!err)
+ err = err__;
+ }
err_file:
mock_file_free(engine->i915, file);
@@ -719,7 +750,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
mutex_unlock(&i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
@@ -891,7 +922,7 @@ static u32 fake_hangcheck(struct i915_request *rq, u32 mask)
return reset_count;
}
-static int igt_wait_reset(void *arg)
+static int igt_reset_wait(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_request *rq;
@@ -919,7 +950,7 @@ static int igt_wait_reset(void *arg)
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -965,6 +996,170 @@ unlock:
return err;
}
+struct evict_vma {
+ struct completion completion;
+ struct i915_vma *vma;
+};
+
+static int evict_vma(void *data)
+{
+ struct evict_vma *arg = data;
+ struct i915_address_space *vm = arg->vma->vm;
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_mm_node evict = arg->vma->node;
+ int err;
+
+ complete(&arg->completion);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_evict_for_node(vm, &evict, 0);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
+static int __igt_reset_evict_vma(struct drm_i915_private *i915,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_gem_object *obj;
+ struct task_struct *tsk = NULL;
+ struct i915_request *rq;
+ struct evict_vma arg;
+ struct hang h;
+ int err;
+
+ if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ return 0;
+
+ /* Check that we can recover an unbind stuck on a hanging request */
+
+ global_reset_lock(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ if (err)
+ goto unlock;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto fini;
+ }
+
+ arg.vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(arg.vma)) {
+ err = PTR_ERR(arg.vma);
+ goto out_obj;
+ }
+
+ rq = hang_create_request(&h, i915->engine[RCS]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_obj;
+ }
+
+ err = i915_vma_pin(arg.vma, 0, 0,
+ i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER);
+ if (err)
+ goto out_obj;
+
+ err = i915_vma_move_to_active(arg.vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unpin(arg.vma);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err)
+ goto out_rq;
+
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (!wait_until_running(&h, rq)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+
+ i915_gem_set_wedged(i915);
+ goto out_reset;
+ }
+
+ init_completion(&arg.completion);
+
+ tsk = kthread_run(evict_vma, &arg, "igt/evict_vma");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ tsk = NULL;
+ goto out_reset;
+ }
+
+ wait_for_completion(&arg.completion);
+
+ if (wait_for(waitqueue_active(&rq->execute), 10)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("igt/evict_vma kthread did not wait\n");
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+
+ i915_gem_set_wedged(i915);
+ goto out_reset;
+ }
+
+out_reset:
+ fake_hangcheck(rq, intel_engine_flag(rq->engine));
+
+ if (tsk) {
+ struct igt_wedge_me w;
+
+ /* The reset, even indirectly, should take less than 10ms. */
+ igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ err = kthread_stop(tsk);
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+out_rq:
+ i915_request_put(rq);
+out_obj:
+ i915_gem_object_put(obj);
+fini:
+ hang_fini(&h);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ global_reset_unlock(i915);
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return -EIO;
+
+ return err;
+}
+
+static int igt_reset_evict_ggtt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+
+ return __igt_reset_evict_vma(i915, &i915->ggtt.vm);
+}
+
+static int igt_reset_evict_ppgtt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
+ int err;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = kernel_context(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ err = 0;
+ if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */
+ err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm);
+
+ kernel_context_close(ctx);
+ return err;
+}
+
static int wait_for_others(struct drm_i915_private *i915,
struct intel_engine_cs *exclude)
{
@@ -1014,7 +1209,7 @@ static int igt_reset_queue(void *arg)
}
i915_request_get(prev);
- __i915_request_add(prev, true);
+ i915_request_add(prev);
count = 0;
do {
@@ -1028,7 +1223,7 @@ static int igt_reset_queue(void *arg)
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
/*
* XXX We don't handle resetting the kernel context
@@ -1161,7 +1356,7 @@ static int igt_handle_error(void *arg)
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -1210,8 +1405,10 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
SUBTEST(igt_reset_engines),
- SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
+ SUBTEST(igt_reset_wait),
+ SUBTEST(igt_reset_evict_ggtt),
+ SUBTEST(igt_reset_evict_ppgtt),
SUBTEST(igt_handle_error),
};
bool saved_hangcheck;
@@ -1220,6 +1417,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
if (!intel_has_gpu_reset(i915))
return 0;
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return -EIO; /* we're long past hope of a successful reset */
+
intel_runtime_pm_get(i915);
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 1b8a07125150..582566faef09 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin,
struct i915_request *rq,
u32 arbitration_command)
{
- struct i915_address_space *vm = &rq->ctx->ppgtt->base;
+ struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
@@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin,
if (err)
goto unpin_vma;
- i915_vma_move_to_active(vma, rq, 0);
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
- i915_vma_move_to_active(hws, rq, 0);
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj);
@@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
@@ -155,7 +162,7 @@ spinner_create_request(struct spinner *spin,
err = emit_recurse_batch(spin, rq, arbitration_command);
if (err) {
- __i915_request_add(rq, false);
+ i915_request_add(rq);
return ERR_PTR(err);
}
@@ -444,16 +451,134 @@ err_wedged:
goto err_ctx_lo;
}
+static int live_preempt_hang(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ if (spinner_init(&spin_hi, i915))
+ goto err_unlock;
+
+ if (spinner_init(&spin_lo, i915))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+
+ ctx_lo = kernel_context(i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!wait_for_spinner(&spin_lo, rq)) {
+ GEM_TRACE("lo spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ init_completion(&engine->execlists.preempt_hang.completion);
+ engine->execlists.preempt_hang.inject_hang = true;
+
+ i915_request_add(rq);
+
+ if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
+ HZ / 10)) {
+ pr_err("Preemption did not occur within timeout!");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ i915_reset_engine(engine, NULL);
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+ engine->execlists.preempt_hang.inject_hang = false;
+
+ if (!wait_for_spinner(&spin_hi, rq)) {
+ GEM_TRACE("hi spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ spinner_end(&spin_hi);
+ spinner_end(&spin_lo);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ spinner_fini(&spin_lo);
+err_spin_hi:
+ spinner_fini(&spin_hi);
+err_unlock:
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
+ SUBTEST(live_preempt_hang),
};
if (!HAS_EXECLISTS(i915))
return 0;
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 47bc5b2ddb56..81d9d31042a9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -160,7 +160,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
i915_reg_t reg = { offset };
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv, false);
+ intel_uncore_forcewake_reset(dev_priv);
iosf_mbi_punit_release();
check_for_unclaimed_mmio(dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 17444a3abbb9..0d39b3bf0c0d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -6,6 +6,7 @@
#include "../i915_selftest.h"
+#include "igt_wedge_me.h"
#include "mock_context.h"
static struct drm_i915_gem_object *
@@ -33,7 +34,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
memset(cs, 0xc5, PAGE_SIZE);
i915_gem_object_unpin_map(result);
- vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -49,6 +50,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_pin;
}
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_req;
+
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
if (INTEL_GEN(ctx->i915) >= 8)
srm++;
@@ -67,15 +72,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
}
intel_ring_advance(rq, cs);
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- reservation_object_lock(vma->resv, NULL);
- reservation_object_add_excl_fence(vma->resv, &rq->fence);
- reservation_object_unlock(vma->resv);
-
i915_gem_object_get(result);
i915_gem_object_set_active_reference(result);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
i915_vma_unpin(vma);
return result;
@@ -112,6 +112,7 @@ static int check_whitelist(const struct whitelist *w,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *results;
+ struct igt_wedge_me wedge;
u32 *vaddr;
int err;
int i;
@@ -120,7 +121,11 @@ static int check_whitelist(const struct whitelist *w,
if (IS_ERR(results))
return PTR_ERR(results);
- err = i915_gem_object_set_to_cpu_domain(results, false);
+ err = 0;
+ igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
+ err = i915_gem_object_set_to_cpu_domain(results, false);
+ if (i915_terminally_wedged(&ctx->i915->gpu_error))
+ err = -EIO;
if (err)
goto out_put;
@@ -283,6 +288,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
};
int err;
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
mutex_lock(&i915->drm.struct_mutex);
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 501becc47c0c..8904f1ce64e3 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -30,6 +30,7 @@ mock_context(struct drm_i915_private *i915,
const char *name)
{
struct i915_gem_context *ctx;
+ unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -43,6 +44,12 @@ mock_context(struct drm_i915_private *i915,
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+ struct intel_context *ce = &ctx->__engine[n];
+
+ ce->gem_context = ctx;
+ }
+
ret = ida_simple_get(&i915->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
index 302f7d103635..ca682caf1062 100644
--- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
@@ -94,18 +94,6 @@ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
vm_unmap_ram(vaddr, mock->npages);
}
-static void *mock_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
-
- return kmap_atomic(mock->pages[page_num]);
-}
-
-static void mock_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
- kunmap_atomic(addr);
-}
-
static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
@@ -130,9 +118,7 @@ static const struct dma_buf_ops mock_dmabuf_ops = {
.unmap_dma_buf = mock_unmap_dma_buf,
.release = mock_dmabuf_release,
.map = mock_dmabuf_kmap,
- .map_atomic = mock_dmabuf_kmap_atomic,
.unmap = mock_dmabuf_kunmap,
- .unmap_atomic = mock_dmabuf_kunmap_atomic,
.mmap = mock_dmabuf_mmap,
.vmap = mock_dmabuf_vmap,
.vunmap = mock_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 26bf29d97007..22a73da45ad5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -72,25 +72,34 @@ static void hw_delay_complete(struct timer_list *t)
spin_unlock(&engine->hw_lock);
}
-static struct intel_ring *
-mock_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static void mock_context_unpin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
- if (!ce->pin_count++)
- i915_gem_context_get(ctx);
+ i915_gem_context_put(ce->gem_context);
+}
- return engine->buffer;
+static void mock_context_destroy(struct intel_context *ce)
+{
+ GEM_BUG_ON(ce->pin_count);
}
-static void mock_context_unpin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static const struct intel_context_ops mock_context_ops = {
+ .unpin = mock_context_unpin,
+ .destroy = mock_context_destroy,
+};
+
+static struct intel_context *
+mock_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
- if (!--ce->pin_count)
- i915_gem_context_put(ctx);
+ if (!ce->pin_count++) {
+ i915_gem_context_get(ctx);
+ ce->ring = engine->buffer;
+ ce->ops = &mock_context_ops;
+ }
+
+ return ce;
}
static int mock_request_alloc(struct i915_request *request)
@@ -185,13 +194,14 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.status_page.page_addr = (void *)(engine + 1);
engine->base.context_pin = mock_context_pin;
- engine->base.context_unpin = mock_context_unpin;
engine->base.request_alloc = mock_request_alloc;
engine->base.emit_flush = mock_emit_flush;
engine->base.emit_breadcrumb = mock_emit_breadcrumb;
engine->base.submit_request = mock_submit_request;
i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
+ lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE);
+
intel_engine_init_breadcrumbs(&engine->base);
engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
@@ -204,8 +214,13 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
if (!engine->base.buffer)
goto err_breadcrumbs;
+ if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
+ goto err_ring;
+
return &engine->base;
+err_ring:
+ mock_ring_free(engine->base.buffer);
err_breadcrumbs:
intel_engine_fini_breadcrumbs(&engine->base);
i915_timeline_fini(&engine->base.timeline);
@@ -238,11 +253,15 @@ void mock_engine_free(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
+ struct intel_context *ce;
GEM_BUG_ON(timer_pending(&mock->hw_delay));
- if (engine->last_retired_context)
- intel_context_unpin(engine->last_retired_context, engine);
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
+
+ __intel_context_unpin(engine->i915->kernel_context, engine);
mock_ring_free(engine->buffer);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 94baedfa0f74..43ed8b28aeaa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -136,8 +136,6 @@ static struct dev_pm_domain pm_domain = {
struct drm_i915_private *mock_gem_device(void)
{
struct drm_i915_private *i915;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
struct pci_dev *pdev;
int err;
@@ -159,7 +157,8 @@ struct drm_i915_private *mock_gem_device(void)
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- WARN_ON(pm_runtime_get_sync(&pdev->dev));
+ if (pm_runtime_enabled(&pdev->dev))
+ WARN_ON(pm_runtime_get_sync(&pdev->dev));
i915 = (struct drm_i915_private *)(pdev + 1);
pci_set_drvdata(pdev, i915);
@@ -233,13 +232,13 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(i915);
mkwrite_device_info(i915)->ring_mask = BIT(0);
- i915->engine[RCS] = mock_engine(i915, "mock", RCS);
- if (!i915->engine[RCS])
- goto err_unlock;
-
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
- goto err_engine;
+ goto err_unlock;
+
+ i915->engine[RCS] = mock_engine(i915, "mock", RCS);
+ if (!i915->engine[RCS])
+ goto err_context;
mutex_unlock(&i915->drm.struct_mutex);
@@ -247,9 +246,8 @@ struct drm_i915_private *mock_gem_device(void)
return i915;
-err_engine:
- for_each_engine(engine, i915, id)
- mock_engine_free(engine);
+err_context:
+ i915_gem_contexts_fini(i915);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
kmem_cache_destroy(i915->priorities);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 36c112088940..a140ea5c3a7c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -66,25 +66,21 @@ mock_ppgtt(struct drm_i915_private *i915,
return NULL;
kref_init(&ppgtt->ref);
- ppgtt->base.i915 = i915;
- ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE);
- ppgtt->base.file = ERR_PTR(-ENODEV);
-
- INIT_LIST_HEAD(&ppgtt->base.active_list);
- INIT_LIST_HEAD(&ppgtt->base.inactive_list);
- INIT_LIST_HEAD(&ppgtt->base.unbound_list);
-
- INIT_LIST_HEAD(&ppgtt->base.global_link);
- drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
-
- ppgtt->base.clear_range = nop_clear_range;
- ppgtt->base.insert_page = mock_insert_page;
- ppgtt->base.insert_entries = mock_insert_entries;
- ppgtt->base.bind_vma = mock_bind_ppgtt;
- ppgtt->base.unbind_vma = mock_unbind_ppgtt;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
- ppgtt->base.cleanup = mock_cleanup;
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
+ ppgtt->vm.file = ERR_PTR(-ENODEV);
+
+ i915_address_space_init(&ppgtt->vm, i915);
+
+ ppgtt->vm.clear_range = nop_clear_range;
+ ppgtt->vm.insert_page = mock_insert_page;
+ ppgtt->vm.insert_entries = mock_insert_entries;
+ ppgtt->vm.cleanup = mock_cleanup;
+
+ ppgtt->vm.vma_ops.bind_vma = mock_bind_ppgtt;
+ ppgtt->vm.vma_ops.unbind_vma = mock_unbind_ppgtt;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
return ppgtt;
}
@@ -105,29 +101,28 @@ void mock_init_ggtt(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
- INIT_LIST_HEAD(&i915->vm_list);
-
- ggtt->base.i915 = i915;
+ ggtt->vm.i915 = i915;
ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
- ggtt->base.total = 4096 * PAGE_SIZE;
-
- ggtt->base.clear_range = nop_clear_range;
- ggtt->base.insert_page = mock_insert_page;
- ggtt->base.insert_entries = mock_insert_entries;
- ggtt->base.bind_vma = mock_bind_ggtt;
- ggtt->base.unbind_vma = mock_unbind_ggtt;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = mock_cleanup;
-
- i915_address_space_init(&ggtt->base, i915, "global");
+ ggtt->vm.total = 4096 * PAGE_SIZE;
+
+ ggtt->vm.clear_range = nop_clear_range;
+ ggtt->vm.insert_page = mock_insert_page;
+ ggtt->vm.insert_entries = mock_insert_entries;
+ ggtt->vm.cleanup = mock_cleanup;
+
+ ggtt->vm.vma_ops.bind_vma = mock_bind_ggtt;
+ ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+ i915_address_space_init(&ggtt->vm, i915);
}
void mock_fini_ggtt(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
- i915_address_space_fini(&ggtt->base);
+ i915_address_space_fini(&ggtt->vm);
}
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index f349b3920199..435a2c35ee8c 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -69,7 +69,7 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
}
}
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
+void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
@@ -342,11 +342,15 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
else
pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
- }
- ret = intel_compute_dsi_pll(encoder, pipe_config);
- if (ret)
- return false;
+ ret = bxt_dsi_pll_compute(encoder, pipe_config);
+ if (ret)
+ return false;
+ } else {
+ ret = vlv_dsi_pll_compute(encoder, pipe_config);
+ if (ret)
+ return false;
+ }
pipe_config->clock_set = true;
@@ -546,12 +550,12 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_dsi_device_ready(encoder);
- else if (IS_BROXTON(dev_priv))
- bxt_dsi_device_ready(encoder);
- else if (IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_dsi_device_ready(encoder);
+ else if (IS_GEN9_LP(dev_priv))
+ bxt_dsi_device_ready(encoder);
+ else
+ vlv_dsi_device_ready(encoder);
}
static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
@@ -810,8 +814,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
*/
- intel_disable_dsi_pll(encoder);
- intel_enable_dsi_pll(encoder, pipe_config);
+ if (IS_GEN9_LP(dev_priv)) {
+ bxt_dsi_pll_disable(encoder);
+ bxt_dsi_pll_enable(encoder, pipe_config);
+ } else {
+ vlv_dsi_pll_disable(encoder);
+ vlv_dsi_pll_enable(encoder, pipe_config);
+ }
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
@@ -929,11 +938,10 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
- IS_BROXTON(dev_priv))
- vlv_dsi_clear_device_ready(encoder);
- else if (IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_dsi_clear_device_ready(encoder);
+ else
+ vlv_dsi_clear_device_ready(encoder);
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
@@ -949,7 +957,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
- wait_for_dsi_fifo_empty(intel_dsi, port);
+ vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
intel_dsi_port_disable(encoder);
usleep_range(2000, 5000);
@@ -979,11 +987,13 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
val & ~MIPIO_RST_CTRL);
}
- intel_disable_dsi_pll(encoder);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
+ bxt_dsi_pll_disable(encoder);
+ } else {
u32 val;
+ vlv_dsi_pll_disable(encoder);
+
val = I915_READ(DSPCLK_GATE_D);
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
@@ -1024,7 +1034,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* configuration, otherwise accessing DSI registers will hang the
* machine. See BSpec North Display Engine registers/MIPI[BXT].
*/
- if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
+ if (IS_GEN9_LP(dev_priv) && !bxt_dsi_pll_is_enabled(dev_priv))
goto out_put_power;
/* XXX: this only works for one DSI output */
@@ -1247,16 +1257,19 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEN9_LP(dev_priv)) {
bxt_dsi_get_pipe_config(encoder, pipe_config);
+ pclk = bxt_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
+ pipe_config);
+ } else {
+ pclk = vlv_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
+ pipe_config);
+ }
- pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
- pipe_config);
- if (!pclk)
- return;
-
- pipe_config->base.adjusted_mode.crtc_clock = pclk;
- pipe_config->port_clock = pclk;
+ if (pclk) {
+ pipe_config->base.adjusted_mode.crtc_clock = pclk;
+ pipe_config->port_clock = pclk;
+ }
}
static enum drm_mode_status
@@ -1585,20 +1598,24 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
enum port port;
u32 val;
- if (!IS_GEMINILAKE(dev_priv)) {
- for_each_dsi_port(port, intel_dsi->ports) {
- /* Panel commands can be sent when clock is in LP11 */
- I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
+ if (IS_GEMINILAKE(dev_priv))
+ return;
- intel_dsi_reset_clocks(encoder, port);
- I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* Panel commands can be sent when clock is in LP11 */
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
- val = I915_READ(MIPI_DSI_FUNC_PRG(port));
- val &= ~VID_MODE_FORMAT_MASK;
- I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+ if (IS_GEN9_LP(dev_priv))
+ bxt_dsi_reset_clocks(encoder, port);
+ else
+ vlv_dsi_reset_clocks(encoder, port);
+ I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
- I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
- }
+ val = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ val &= ~VID_MODE_FORMAT_MASK;
+ I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
}
}
@@ -1671,16 +1688,16 @@ static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
- enum i9xx_plane_id plane;
+ enum i9xx_plane_id i9xx_plane;
u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (connector->encoder->crtc_mask == BIT(PIPE_B))
- plane = PLANE_B;
+ i9xx_plane = PLANE_B;
else
- plane = PLANE_A;
+ i9xx_plane = PLANE_A;
- val = I915_READ(DSPCNTR(plane));
+ val = I915_READ(DSPCNTR(i9xx_plane));
if (val & DISPPLANE_ROTATE_180)
orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
}
@@ -1713,7 +1730,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
}
}
-void intel_dsi_init(struct drm_i915_private *dev_priv)
+void vlv_dsi_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_dsi *intel_dsi;
@@ -1730,14 +1747,10 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv))
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
- } else {
- DRM_ERROR("Unsupported Mipi device to reg base");
- return;
- }
+ else
+ dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index 2ff2ee7f3b78..a132a8037ecc 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -111,8 +111,8 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
*/
-static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
- struct intel_crtc_state *config)
+int vlv_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -142,8 +142,8 @@ static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
return 0;
}
-static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
- const struct intel_crtc_state *config)
+void vlv_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -175,7 +175,7 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
DRM_DEBUG_KMS("DSI PLL locked\n");
}
-static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
+void vlv_dsi_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
@@ -192,7 +192,7 @@ static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
-static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
{
bool enabled;
u32 val;
@@ -229,7 +229,7 @@ static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
return enabled;
}
-static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
+void bxt_dsi_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
@@ -261,8 +261,8 @@ static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
bpp, pipe_bpp);
}
-static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
- struct intel_crtc_state *config)
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -327,8 +327,8 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
return pclk;
}
-static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
- struct intel_crtc_state *config)
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config)
{
u32 pclk;
u32 dsi_clk;
@@ -357,16 +357,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
return pclk;
}
-u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
- struct intel_crtc_state *config)
-{
- if (IS_GEN9_LP(to_i915(encoder->base.dev)))
- return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
- else
- return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
-}
-
-static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
+void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -480,8 +471,8 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
}
-static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder,
- struct intel_crtc_state *config)
+int bxt_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -528,8 +519,8 @@ static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder,
return 0;
}
-static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder,
- const struct intel_crtc_state *config)
+void bxt_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -568,52 +559,7 @@ static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder,
DRM_DEBUG_KMS("DSI PLL locked\n");
}
-bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
-{
- if (IS_GEN9_LP(dev_priv))
- return bxt_dsi_pll_is_enabled(dev_priv);
-
- MISSING_CASE(INTEL_DEVID(dev_priv));
-
- return false;
-}
-
-int intel_compute_dsi_pll(struct intel_encoder *encoder,
- struct intel_crtc_state *config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_compute_dsi_pll(encoder, config);
- else if (IS_GEN9_LP(dev_priv))
- return gen9lp_compute_dsi_pll(encoder, config);
-
- return -ENODEV;
-}
-
-void intel_enable_dsi_pll(struct intel_encoder *encoder,
- const struct intel_crtc_state *config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_enable_dsi_pll(encoder, config);
- else if (IS_GEN9_LP(dev_priv))
- gen9lp_enable_dsi_pll(encoder, config);
-}
-
-void intel_disable_dsi_pll(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_disable_dsi_pll(encoder);
- else if (IS_GEN9_LP(dev_priv))
- bxt_disable_dsi_pll(encoder);
-}
-
-static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder,
- enum port port)
+void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 tmp;
struct drm_device *dev = encoder->base.dev;
@@ -638,13 +584,3 @@ static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder,
}
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
-
-void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_GEN9_LP(dev_priv))
- gen9lp_dsi_reset_clocks(encoder, port);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_dsi_reset_clocks(encoder, port);
-}
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 1d053bbefc02..5ea0c82f9957 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -35,12 +35,6 @@
#define MAX_CRTC 4
-struct imx_drm_device {
- struct drm_device *drm;
- unsigned int pipes;
- struct drm_atomic_state *state;
-};
-
#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
static int legacyfb_depth = 16;
module_param(legacyfb_depth, int, 0444);
@@ -219,22 +213,12 @@ static int compare_of(struct device *dev, void *data)
static int imx_drm_bind(struct device *dev)
{
struct drm_device *drm;
- struct imx_drm_device *imxdrm;
int ret;
drm = drm_dev_alloc(&imx_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
- imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
- if (!imxdrm) {
- ret = -ENOMEM;
- goto err_unref;
- }
-
- imxdrm->drm = drm;
- drm->dev_private = imxdrm;
-
/*
* enable drm irq mode.
* - with irq_enabled = true, we can use the vblank feature.
@@ -306,8 +290,7 @@ err_unbind:
component_unbind_all(drm->dev, drm);
err_kms:
drm_mode_config_cleanup(drm);
-err_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -327,7 +310,7 @@ static void imx_drm_unbind(struct device *dev)
component_unbind_all(drm->dev, drm);
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops imx_drm_ops = {
@@ -355,37 +338,15 @@ static int imx_drm_platform_remove(struct platform_device *pdev)
static int imx_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct imx_drm_device *imxdrm;
-
- /* The drm_dev is NULL before .load hook is called */
- if (drm_dev == NULL)
- return 0;
-
- drm_kms_helper_poll_disable(drm_dev);
- imxdrm = drm_dev->dev_private;
- imxdrm->state = drm_atomic_helper_suspend(drm_dev);
- if (IS_ERR(imxdrm->state)) {
- drm_kms_helper_poll_enable(drm_dev);
- return PTR_ERR(imxdrm->state);
- }
-
- return 0;
+ return drm_mode_config_helper_suspend(drm_dev);
}
static int imx_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct imx_drm_device *imx_drm;
- if (drm_dev == NULL)
- return 0;
-
- imx_drm = drm_dev->dev_private;
- drm_atomic_helper_resume(drm_dev, imx_drm->state);
- drm_kms_helper_poll_enable(drm_dev);
-
- return 0;
+ return drm_mode_config_helper_resume(drm_dev);
}
#endif
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 15c2bec47a04..ab9c6f706eb3 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -10,7 +10,6 @@ struct drm_display_mode;
struct drm_encoder;
struct drm_framebuffer;
struct drm_plane;
-struct imx_drm_crtc;
struct platform_device;
struct imx_crtc_state {
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 56dd7a9a8e25..3bd0f8a18e74 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -143,7 +143,7 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
if (imx_ldb_ch->edid) {
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
imx_ldb_ch->edid);
num_modes = drm_add_edid_modes(connector, imx_ldb_ch->edid);
}
@@ -471,8 +471,7 @@ static int imx_ldb_register(struct drm_device *drm,
drm_connector_init(drm, &imx_ldb_ch->connector,
&imx_ldb_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
- encoder);
+ drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
}
if (imx_ldb_ch->panel) {
@@ -612,6 +611,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(imx_ldb->regmap);
}
+ /* disable LDB by resetting the control register to POR default */
+ regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
+
imx_ldb->dev = dev;
if (of_id)
@@ -652,14 +654,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
if (ret || i < 0 || i > 1)
return -EINVAL;
+ if (!of_device_is_available(child))
+ continue;
+
if (dual && i > 0) {
dev_warn(dev, "dual-channel mode, ignoring second output\n");
continue;
}
- if (!of_device_is_available(child))
- continue;
-
channel = &imx_ldb->channel[i];
channel->ldb = imx_ldb;
channel->chno = i;
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index bc27c2699464..cffd3310240e 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -235,7 +235,7 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, tve->ddc);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
@@ -493,7 +493,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
- drm_mode_connector_attach_encoder(&tve->connector, &tve->encoder);
+ drm_connector_attach_encoder(&tve->connector, &tve->encoder);
return 0;
}
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index e83af0f2be86..7d4b710b837a 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -35,7 +35,6 @@
struct ipu_crtc {
struct device *dev;
struct drm_crtc base;
- struct imx_drm_crtc *imx_crtc;
/* plane[0] is the full plane, plane[1] is the partial plane */
struct ipu_plane *plane[2];
@@ -213,7 +212,7 @@ static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary);
+ u32 primary_plane_mask = drm_plane_mask(crtc->primary);
if (state->active && (primary_plane_mask & state->plane_mask) == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index aedecda9728a..aefd04e18f93 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -63,7 +63,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
}
if (imxpd->edid) {
- drm_mode_connector_update_edid_property(connector, imxpd->edid);
+ drm_connector_update_edid_property(connector, imxpd->edid);
num_modes = drm_add_edid_modes(connector, imxpd->edid);
}
@@ -197,7 +197,7 @@ static int imx_pd_register(struct drm_device *drm,
return ret;
}
} else {
- drm_mode_connector_attach_encoder(&imxpd->connector, encoder);
+ drm_connector_attach_encoder(&imxpd->connector, encoder);
}
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index 7a3eb8c17ef9..5ce84d0dbf81 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "mtk_cec.h"
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 658b8dd45b83..2d6aa150a9ff 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -539,6 +539,9 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
int ret;
int i;
+ if (!path)
+ return 0;
+
for (i = 0; i < path_len; i++) {
enum mtk_ddp_comp_id comp_id = path[i];
struct device_node *node;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 8130f3dab661..87e4191c250e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -28,8 +28,12 @@
#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
+#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
+#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
-#define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN 0x0c8
+#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
+#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
+#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
@@ -41,45 +45,89 @@
#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
#define INT_MUTEX BIT(1)
-#define MT8173_MUTEX_MOD_DISP_OVL0 BIT(11)
-#define MT8173_MUTEX_MOD_DISP_OVL1 BIT(12)
-#define MT8173_MUTEX_MOD_DISP_RDMA0 BIT(13)
-#define MT8173_MUTEX_MOD_DISP_RDMA1 BIT(14)
-#define MT8173_MUTEX_MOD_DISP_RDMA2 BIT(15)
-#define MT8173_MUTEX_MOD_DISP_WDMA0 BIT(16)
-#define MT8173_MUTEX_MOD_DISP_WDMA1 BIT(17)
-#define MT8173_MUTEX_MOD_DISP_COLOR0 BIT(18)
-#define MT8173_MUTEX_MOD_DISP_COLOR1 BIT(19)
-#define MT8173_MUTEX_MOD_DISP_AAL BIT(20)
-#define MT8173_MUTEX_MOD_DISP_GAMMA BIT(21)
-#define MT8173_MUTEX_MOD_DISP_UFOE BIT(22)
-#define MT8173_MUTEX_MOD_DISP_PWM0 BIT(23)
-#define MT8173_MUTEX_MOD_DISP_PWM1 BIT(24)
-#define MT8173_MUTEX_MOD_DISP_OD BIT(25)
-
-#define MT2701_MUTEX_MOD_DISP_OVL BIT(3)
-#define MT2701_MUTEX_MOD_DISP_WDMA BIT(6)
-#define MT2701_MUTEX_MOD_DISP_COLOR BIT(7)
-#define MT2701_MUTEX_MOD_DISP_BLS BIT(9)
-#define MT2701_MUTEX_MOD_DISP_RDMA0 BIT(10)
-#define MT2701_MUTEX_MOD_DISP_RDMA1 BIT(12)
+#define MT8173_MUTEX_MOD_DISP_OVL0 11
+#define MT8173_MUTEX_MOD_DISP_OVL1 12
+#define MT8173_MUTEX_MOD_DISP_RDMA0 13
+#define MT8173_MUTEX_MOD_DISP_RDMA1 14
+#define MT8173_MUTEX_MOD_DISP_RDMA2 15
+#define MT8173_MUTEX_MOD_DISP_WDMA0 16
+#define MT8173_MUTEX_MOD_DISP_WDMA1 17
+#define MT8173_MUTEX_MOD_DISP_COLOR0 18
+#define MT8173_MUTEX_MOD_DISP_COLOR1 19
+#define MT8173_MUTEX_MOD_DISP_AAL 20
+#define MT8173_MUTEX_MOD_DISP_GAMMA 21
+#define MT8173_MUTEX_MOD_DISP_UFOE 22
+#define MT8173_MUTEX_MOD_DISP_PWM0 23
+#define MT8173_MUTEX_MOD_DISP_PWM1 24
+#define MT8173_MUTEX_MOD_DISP_OD 25
+
+#define MT2712_MUTEX_MOD_DISP_PWM2 10
+#define MT2712_MUTEX_MOD_DISP_OVL0 11
+#define MT2712_MUTEX_MOD_DISP_OVL1 12
+#define MT2712_MUTEX_MOD_DISP_RDMA0 13
+#define MT2712_MUTEX_MOD_DISP_RDMA1 14
+#define MT2712_MUTEX_MOD_DISP_RDMA2 15
+#define MT2712_MUTEX_MOD_DISP_WDMA0 16
+#define MT2712_MUTEX_MOD_DISP_WDMA1 17
+#define MT2712_MUTEX_MOD_DISP_COLOR0 18
+#define MT2712_MUTEX_MOD_DISP_COLOR1 19
+#define MT2712_MUTEX_MOD_DISP_AAL0 20
+#define MT2712_MUTEX_MOD_DISP_UFOE 22
+#define MT2712_MUTEX_MOD_DISP_PWM0 23
+#define MT2712_MUTEX_MOD_DISP_PWM1 24
+#define MT2712_MUTEX_MOD_DISP_OD0 25
+#define MT2712_MUTEX_MOD2_DISP_AAL1 33
+#define MT2712_MUTEX_MOD2_DISP_OD1 34
+
+#define MT2701_MUTEX_MOD_DISP_OVL 3
+#define MT2701_MUTEX_MOD_DISP_WDMA 6
+#define MT2701_MUTEX_MOD_DISP_COLOR 7
+#define MT2701_MUTEX_MOD_DISP_BLS 9
+#define MT2701_MUTEX_MOD_DISP_RDMA0 10
+#define MT2701_MUTEX_MOD_DISP_RDMA1 12
#define MUTEX_SOF_SINGLE_MODE 0
#define MUTEX_SOF_DSI0 1
#define MUTEX_SOF_DSI1 2
#define MUTEX_SOF_DPI0 3
+#define MUTEX_SOF_DPI1 4
+#define MUTEX_SOF_DSI2 5
+#define MUTEX_SOF_DSI3 6
#define OVL0_MOUT_EN_COLOR0 0x1
#define OD_MOUT_EN_RDMA0 0x1
+#define OD1_MOUT_EN_RDMA1 BIT(16)
#define UFOE_MOUT_EN_DSI0 0x1
#define COLOR0_SEL_IN_OVL0 0x1
#define OVL1_MOUT_EN_COLOR1 0x1
#define GAMMA_MOUT_EN_RDMA1 0x1
-#define RDMA1_MOUT_DPI0 0x2
+#define RDMA0_SOUT_DPI0 0x2
+#define RDMA0_SOUT_DSI2 0x4
+#define RDMA0_SOUT_DSI3 0x5
+#define RDMA1_SOUT_DPI0 0x2
+#define RDMA1_SOUT_DPI1 0x3
+#define RDMA1_SOUT_DSI1 0x1
+#define RDMA1_SOUT_DSI2 0x4
+#define RDMA1_SOUT_DSI3 0x5
+#define RDMA2_SOUT_DPI0 0x2
+#define RDMA2_SOUT_DPI1 0x3
+#define RDMA2_SOUT_DSI1 0x1
+#define RDMA2_SOUT_DSI2 0x4
+#define RDMA2_SOUT_DSI3 0x5
#define DPI0_SEL_IN_RDMA1 0x1
+#define DPI0_SEL_IN_RDMA2 0x3
+#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
+#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
+#define DSI1_SEL_IN_RDMA1 0x1
+#define DSI1_SEL_IN_RDMA2 0x4
+#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
+#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
#define COLOR1_SEL_IN_OVL1 0x1
#define OVL_MOUT_EN_RDMA 0x1
@@ -108,12 +156,32 @@ static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA,
};
+static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT2712_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_AAL1] = MT2712_MUTEX_MOD2_DISP_AAL1,
+ [DDP_COMPONENT_COLOR0] = MT2712_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_COLOR1] = MT2712_MUTEX_MOD_DISP_COLOR1,
+ [DDP_COMPONENT_OD0] = MT2712_MUTEX_MOD_DISP_OD0,
+ [DDP_COMPONENT_OD1] = MT2712_MUTEX_MOD2_DISP_OD1,
+ [DDP_COMPONENT_OVL0] = MT2712_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL1] = MT2712_MUTEX_MOD_DISP_OVL1,
+ [DDP_COMPONENT_PWM0] = MT2712_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_PWM1] = MT2712_MUTEX_MOD_DISP_PWM1,
+ [DDP_COMPONENT_PWM2] = MT2712_MUTEX_MOD_DISP_PWM2,
+ [DDP_COMPONENT_RDMA0] = MT2712_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT2712_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_RDMA2] = MT2712_MUTEX_MOD_DISP_RDMA2,
+ [DDP_COMPONENT_UFOE] = MT2712_MUTEX_MOD_DISP_UFOE,
+ [DDP_COMPONENT_WDMA0] = MT2712_MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_WDMA1] = MT2712_MUTEX_MOD_DISP_WDMA1,
+};
+
static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
- [DDP_COMPONENT_AAL] = MT8173_MUTEX_MOD_DISP_AAL,
+ [DDP_COMPONENT_AAL0] = MT8173_MUTEX_MOD_DISP_AAL,
[DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0,
[DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1,
[DDP_COMPONENT_GAMMA] = MT8173_MUTEX_MOD_DISP_GAMMA,
- [DDP_COMPONENT_OD] = MT8173_MUTEX_MOD_DISP_OD,
+ [DDP_COMPONENT_OD0] = MT8173_MUTEX_MOD_DISP_OD,
[DDP_COMPONENT_OVL0] = MT8173_MUTEX_MOD_DISP_OVL0,
[DDP_COMPONENT_OVL1] = MT8173_MUTEX_MOD_DISP_OVL1,
[DDP_COMPONENT_PWM0] = MT8173_MUTEX_MOD_DISP_PWM0,
@@ -138,7 +206,7 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
*addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
value = OVL_MOUT_EN_RDMA;
- } else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) {
+ } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
*addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
value = OD_MOUT_EN_RDMA0;
} else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
@@ -150,9 +218,48 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
*addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
value = GAMMA_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
+ *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
+ value = OD1_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI3;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI3;
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN;
- value = RDMA1_MOUT_DPI0;
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI3;
} else {
value = 0;
}
@@ -172,6 +279,33 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
*addr = DISP_REG_CONFIG_DPI_SEL_IN;
value = DPI0_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI2_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI3_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI1_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI1_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI2_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI3_SEL_IN_RDMA2;
} else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
*addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
value = COLOR1_SEL_IN_OVL1;
@@ -278,6 +412,7 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
mutex[mutex->id]);
unsigned int reg;
+ unsigned int offset;
WARN_ON(&ddp->mutex[mutex->id] != mutex);
@@ -288,13 +423,30 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
case DDP_COMPONENT_DSI1:
reg = MUTEX_SOF_DSI0;
break;
+ case DDP_COMPONENT_DSI2:
+ reg = MUTEX_SOF_DSI2;
+ break;
+ case DDP_COMPONENT_DSI3:
+ reg = MUTEX_SOF_DSI3;
+ break;
case DDP_COMPONENT_DPI0:
reg = MUTEX_SOF_DPI0;
break;
+ case DDP_COMPONENT_DPI1:
+ reg = MUTEX_SOF_DPI1;
+ break;
default:
- reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
- reg |= ddp->mutex_mod[id];
- writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+ if (ddp->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(mutex->id);
+ reg = readl_relaxed(ddp->regs + offset);
+ reg |= 1 << ddp->mutex_mod[id];
+ writel_relaxed(reg, ddp->regs + offset);
+ } else {
+ offset = DISP_REG_MUTEX_MOD2(mutex->id);
+ reg = readl_relaxed(ddp->regs + offset);
+ reg |= 1 << (ddp->mutex_mod[id] - 32);
+ writel_relaxed(reg, ddp->regs + offset);
+ }
return;
}
@@ -307,20 +459,32 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
mutex[mutex->id]);
unsigned int reg;
+ unsigned int offset;
WARN_ON(&ddp->mutex[mutex->id] != mutex);
switch (id) {
case DDP_COMPONENT_DSI0:
case DDP_COMPONENT_DSI1:
+ case DDP_COMPONENT_DSI2:
+ case DDP_COMPONENT_DSI3:
case DDP_COMPONENT_DPI0:
+ case DDP_COMPONENT_DPI1:
writel_relaxed(MUTEX_SOF_SINGLE_MODE,
ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
break;
default:
- reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
- reg &= ~(ddp->mutex_mod[id]);
- writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+ if (ddp->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(mutex->id);
+ reg = readl_relaxed(ddp->regs + offset);
+ reg &= ~(1 << ddp->mutex_mod[id]);
+ writel_relaxed(reg, ddp->regs + offset);
+ } else {
+ offset = DISP_REG_MUTEX_MOD2(mutex->id);
+ reg = readl_relaxed(ddp->regs + offset);
+ reg &= ~(1 << (ddp->mutex_mod[id] - 32));
+ writel_relaxed(reg, ddp->regs + offset);
+ }
break;
}
}
@@ -407,6 +571,7 @@ static int mtk_ddp_remove(struct platform_device *pdev)
static const struct of_device_id ddp_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
+ { .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod},
{ .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
{},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 4672317e3ad1..ff974d82a4a6 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -218,18 +218,25 @@ struct mtk_ddp_comp_match {
};
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
- [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal },
+ [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
+ [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
+ [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
[DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL },
+ [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, NULL },
+ [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, NULL },
[DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma },
- [DDP_COMPONENT_OD] = { MTK_DISP_OD, 0, &ddp_od },
+ [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od },
+ [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
[DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL },
[DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL },
[DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
+ [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
+ [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
[DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, NULL },
[DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, NULL },
[DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, NULL },
@@ -271,7 +278,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
if (comp_id == DDP_COMPONENT_BLS ||
comp_id == DDP_COMPONENT_DPI0 ||
+ comp_id == DDP_COMPONENT_DPI1 ||
comp_id == DDP_COMPONENT_DSI0 ||
+ comp_id == DDP_COMPONENT_DSI1 ||
+ comp_id == DDP_COMPONENT_DSI2 ||
+ comp_id == DDP_COMPONENT_DSI3 ||
comp_id == DDP_COMPONENT_PWM0) {
comp->regs = NULL;
comp->clk = NULL;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 0828cf8bf85c..7413ffeb3c9d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -41,19 +41,25 @@ enum mtk_ddp_comp_type {
};
enum mtk_ddp_comp_id {
- DDP_COMPONENT_AAL,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_AAL1,
DDP_COMPONENT_BLS,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_DPI0,
+ DDP_COMPONENT_DPI1,
DDP_COMPONENT_DSI0,
DDP_COMPONENT_DSI1,
+ DDP_COMPONENT_DSI2,
+ DDP_COMPONENT_DSI3,
DDP_COMPONENT_GAMMA,
- DDP_COMPONENT_OD,
+ DDP_COMPONENT_OD0,
+ DDP_COMPONENT_OD1,
DDP_COMPONENT_OVL0,
DDP_COMPONENT_OVL1,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
+ DDP_COMPONENT_PWM2,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_RDMA2,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index a2ca90fc403c..39721119713b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -146,11 +146,37 @@ static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = {
DDP_COMPONENT_DPI0,
};
+static const enum mtk_ddp_comp_id mt2712_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_OD0,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_DPI0,
+ DDP_COMPONENT_PWM0,
+};
+
+static const enum mtk_ddp_comp_id mt2712_mtk_ddp_ext[] = {
+ DDP_COMPONENT_OVL1,
+ DDP_COMPONENT_COLOR1,
+ DDP_COMPONENT_AAL1,
+ DDP_COMPONENT_OD1,
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_DPI1,
+ DDP_COMPONENT_PWM1,
+};
+
+static const enum mtk_ddp_comp_id mt2712_mtk_ddp_third[] = {
+ DDP_COMPONENT_RDMA2,
+ DDP_COMPONENT_DSI3,
+ DDP_COMPONENT_PWM2,
+};
+
static const enum mtk_ddp_comp_id mt8173_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_COLOR0,
- DDP_COMPONENT_AAL,
- DDP_COMPONENT_OD,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_OD0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_UFOE,
DDP_COMPONENT_DSI0,
@@ -173,6 +199,15 @@ static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.shadow_register = true,
};
+static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
+ .main_path = mt2712_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt2712_mtk_ddp_main),
+ .ext_path = mt2712_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt2712_mtk_ddp_ext),
+ .third_path = mt2712_mtk_ddp_third,
+ .third_len = ARRAY_SIZE(mt2712_mtk_ddp_third),
+};
+
static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.main_path = mt8173_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8173_mtk_ddp_main),
@@ -232,6 +267,11 @@ static int mtk_drm_kms_init(struct drm_device *drm)
if (ret < 0)
goto err_component_unbind;
+ ret = mtk_drm_crtc_create(drm, private->data->third_path,
+ private->data->third_len);
+ if (ret < 0)
+ goto err_component_unbind;
+
/* Use OVL device for all DMA memory allocations */
np = private->comp_node[private->data->main_path[0]] ?:
private->comp_node[private->data->ext_path[0]];
@@ -360,24 +400,44 @@ static const struct component_master_ops mtk_drm_ops = {
};
static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
- { .compatible = "mediatek,mt2701-disp-ovl", .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt8173-disp-ovl", .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt2701-disp-rdma", .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8173-disp-rdma", .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8173-disp-wdma", .data = (void *)MTK_DISP_WDMA },
- { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR },
- { .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR },
- { .compatible = "mediatek,mt8173-disp-aal", .data = (void *)MTK_DISP_AAL},
- { .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, },
- { .compatible = "mediatek,mt8173-disp-ufoe", .data = (void *)MTK_DISP_UFOE },
- { .compatible = "mediatek,mt2701-dsi", .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt8173-dsi", .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt8173-dpi", .data = (void *)MTK_DPI },
- { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
- { .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
- { .compatible = "mediatek,mt2701-disp-pwm", .data = (void *)MTK_DISP_BLS },
- { .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM },
- { .compatible = "mediatek,mt8173-disp-od", .data = (void *)MTK_DISP_OD },
+ { .compatible = "mediatek,mt2701-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8173-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt2701-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8173-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8173-disp-wdma",
+ .data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt2701-disp-color",
+ .data = (void *)MTK_DISP_COLOR },
+ { .compatible = "mediatek,mt8173-disp-color",
+ .data = (void *)MTK_DISP_COLOR },
+ { .compatible = "mediatek,mt8173-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8173-disp-gamma",
+ .data = (void *)MTK_DISP_GAMMA, },
+ { .compatible = "mediatek,mt8173-disp-ufoe",
+ .data = (void *)MTK_DISP_UFOE },
+ { .compatible = "mediatek,mt2701-dsi",
+ .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8173-dsi",
+ .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8173-dpi",
+ .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt2701-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt2712-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8173-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt2701-disp-pwm",
+ .data = (void *)MTK_DISP_BLS },
+ { .compatible = "mediatek,mt8173-disp-pwm",
+ .data = (void *)MTK_DISP_PWM },
+ { .compatible = "mediatek,mt8173-disp-od",
+ .data = (void *)MTK_DISP_OD },
{ }
};
@@ -552,6 +612,8 @@ static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
static const struct of_device_id mtk_drm_of_ids[] = {
{ .compatible = "mediatek,mt2701-mmsys",
.data = &mt2701_mmsys_driver_data},
+ { .compatible = "mediatek,mt2712-mmsys",
+ .data = &mt2712_mmsys_driver_data},
{ .compatible = "mediatek,mt8173-mmsys",
.data = &mt8173_mmsys_driver_data},
{ }
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index c3378c452c0a..ecc00ca3221d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -17,7 +17,7 @@
#include <linux/io.h>
#include "mtk_drm_ddp_comp.h"
-#define MAX_CRTC 2
+#define MAX_CRTC 3
#define MAX_CONNECTOR 2
struct device;
@@ -33,6 +33,9 @@ struct mtk_mmsys_driver_data {
unsigned int main_len;
const enum mtk_ddp_comp_id *ext_path;
unsigned int ext_len;
+ const enum mtk_ddp_comp_id *third_path;
+ unsigned int third_len;
+
bool shadow_register;
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index 0d8d506695f9..be5f6f1daf55 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -15,6 +15,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/dma-buf.h>
#include <linux/reservation.h>
@@ -22,78 +23,37 @@
#include "mtk_drm_fb.h"
#include "mtk_drm_gem.h"
-/*
- * mtk specific framebuffer structure.
- *
- * @fb: drm framebuffer object.
- * @gem_obj: array of gem objects.
- */
-struct mtk_drm_fb {
- struct drm_framebuffer base;
- /* For now we only support a single plane */
- struct drm_gem_object *gem_obj;
-};
-
-#define to_mtk_fb(x) container_of(x, struct mtk_drm_fb, base)
-
-struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb)
-{
- struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
- return mtk_fb->gem_obj;
-}
-
-static int mtk_drm_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
- return drm_gem_handle_create(file_priv, mtk_fb->gem_obj, handle);
-}
-
-static void mtk_drm_fb_destroy(struct drm_framebuffer *fb)
-{
- struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
- drm_framebuffer_cleanup(fb);
-
- drm_gem_object_put_unlocked(mtk_fb->gem_obj);
-
- kfree(mtk_fb);
-}
-
static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
- .create_handle = mtk_drm_fb_create_handle,
- .destroy = mtk_drm_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
-static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev,
+static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode,
struct drm_gem_object *obj)
{
- struct mtk_drm_fb *mtk_fb;
+ struct drm_framebuffer *fb;
int ret;
if (drm_format_num_planes(mode->pixel_format) != 1)
return ERR_PTR(-EINVAL);
- mtk_fb = kzalloc(sizeof(*mtk_fb), GFP_KERNEL);
- if (!mtk_fb)
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(dev, &mtk_fb->base, mode);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode);
- mtk_fb->gem_obj = obj;
+ fb->obj[0] = obj;
- ret = drm_framebuffer_init(dev, &mtk_fb->base, &mtk_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, fb, &mtk_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
- kfree(mtk_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
- return mtk_fb;
+ return fb;
}
/*
@@ -110,7 +70,7 @@ int mtk_fb_wait(struct drm_framebuffer *fb)
if (!fb)
return 0;
- gem = mtk_fb_get_gem_obj(fb);
+ gem = fb->obj[0];
if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
return 0;
@@ -128,7 +88,7 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *cmd)
{
- struct mtk_drm_fb *mtk_fb;
+ struct drm_framebuffer *fb;
struct drm_gem_object *gem;
unsigned int width = cmd->width;
unsigned int height = cmd->height;
@@ -151,13 +111,13 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
goto unreference;
}
- mtk_fb = mtk_drm_framebuffer_init(dev, cmd, gem);
- if (IS_ERR(mtk_fb)) {
- ret = PTR_ERR(mtk_fb);
+ fb = mtk_drm_framebuffer_init(dev, cmd, gem);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
goto unreference;
}
- return &mtk_fb->base;
+ return fb;
unreference:
drm_gem_object_put_unlocked(gem);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.h b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
index 9b2ae345a4e9..7f976b196a15 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
@@ -14,7 +14,6 @@
#ifndef MTK_DRM_FB_H
#define MTK_DRM_FB_H
-struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb);
int mtk_fb_wait(struct drm_framebuffer *fb);
struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 2f4b0ffee598..f7e6aa1b5b7d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -95,11 +95,6 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
if (!fb)
return 0;
- if (!mtk_fb_get_gem_obj(fb)) {
- DRM_DEBUG_KMS("buffer is null\n");
- return -EFAULT;
- }
-
if (!state->crtc)
return 0;
@@ -127,7 +122,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return;
- gem = mtk_fb_get_gem_obj(fb);
+ gem = fb->obj[0];
mtk_gem = to_mtk_gem_obj(gem);
addr = mtk_gem->dma_addr;
pitch = fb->pitches[0];
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index aa0943ec32b0..66df1b177959 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -782,7 +782,7 @@ static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi)
drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs);
dsi->conn.dpms = DRM_MODE_DPMS_OFF;
- drm_mode_connector_attach_encoder(&dsi->conn, &dsi->encoder);
+ drm_connector_attach_encoder(&dsi->conn, &dsi->encoder);
if (dsi->panel) {
ret = drm_panel_attach(dsi->panel, &dsi->conn);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 59a11026dceb..2d45d1dd9554 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1220,7 +1220,7 @@ static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
- drm_mode_connector_update_edid_property(conn, edid);
+ drm_connector_update_edid_property(conn, edid);
ret = drm_add_edid_modes(conn, edid);
kfree(edid);
@@ -1306,7 +1306,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
hdmi->conn.interlace_allowed = true;
hdmi->conn.doublescan_allowed = false;
- ret = drm_mode_connector_attach_encoder(&hdmi->conn,
+ ret = drm_connector_attach_encoder(&hdmi->conn,
bridge->encoder);
if (ret) {
dev_err(hdmi->dev,
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 32b1a6cdecfc..d3443125e661 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -197,8 +197,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
priv->io_base = regs;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
- if (!res)
- return -EINVAL;
+ if (!res) {
+ ret = -EINVAL;
+ goto free_drm;
+ }
/* Simply ioremap since it may be a shared register zone */
regs = devm_ioremap(dev, res->start, resource_size(res));
if (!regs) {
@@ -215,8 +217,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
- if (!res)
- return -EINVAL;
+ if (!res) {
+ ret = -EINVAL;
+ goto free_drm;
+ }
/* Simply ioremap since it may be a shared register zone */
regs = devm_ioremap(dev, res->start, resource_size(res));
if (!regs) {
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index c9ad45686e7a..df7247cd93f9 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -329,6 +329,12 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
vclk_freq = mode->clock;
+ if (!vic) {
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, vclk_freq,
+ vclk_freq, vclk_freq, false);
+ return;
+ }
+
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
@@ -542,10 +548,12 @@ static enum drm_mode_status
dw_hdmi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
+ struct meson_drm *priv = connector->dev->dev_private;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
int vic = drm_match_cea_mode(mode);
+ enum drm_mode_status status;
DRM_DEBUG_DRIVER("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
mode->base.id, mode->name, mode->vrefresh, mode->clock,
@@ -556,8 +564,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
/* Check against non-VIC supported modes */
if (!vic) {
- if (!meson_venc_hdmi_supported_mode(mode))
- return MODE_BAD;
+ status = meson_venc_hdmi_supported_mode(mode);
+ if (status != MODE_OK)
+ return status;
+
+ return meson_vclk_dmt_supported_freq(priv, mode->clock);
/* Check against supported VIC modes */
} else if (!meson_venc_hdmi_supported_vic(vic))
return MODE_BAD;
@@ -583,16 +594,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
vclk_freq, venc_freq, hdmi_freq);
- /* Finally filter by configurable vclk frequencies */
+ /* Finally filter by configurable vclk frequencies for VIC modes */
switch (vclk_freq) {
- case 25175:
- case 40000:
case 54000:
- case 65000:
case 74250:
- case 108000:
case 148500:
- case 162000:
case 297000:
case 594000:
return MODE_OK;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index f0511220317f..ae5473257f72 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -320,32 +320,23 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
CTS_VDAC_EN, CTS_VDAC_EN);
}
-
+enum {
/* PLL O1 O2 O3 VP DV EN TX */
/* 4320 /4 /4 /1 /5 /1 => /2 /2 */
-#define MESON_VCLK_HDMI_ENCI_54000 1
+ MESON_VCLK_HDMI_ENCI_54000 = 1,
/* 4320 /4 /4 /1 /5 /1 => /1 /2 */
-#define MESON_VCLK_HDMI_DDR_54000 2
+ MESON_VCLK_HDMI_DDR_54000,
/* 2970 /4 /1 /1 /5 /1 => /1 /2 */
-#define MESON_VCLK_HDMI_DDR_148500 3
-/* 4028 /4 /4 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_25175 4
-/* 3200 /4 /2 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_40000 5
-/* 5200 /4 /2 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_65000 6
+ MESON_VCLK_HDMI_DDR_148500,
/* 2970 /2 /2 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_74250 7
-/* 4320 /4 /1 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_108000 8
+ MESON_VCLK_HDMI_74250,
/* 2970 /1 /2 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_148500 9
-/* 3240 /2 /1 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_162000 10
+ MESON_VCLK_HDMI_148500,
/* 2970 /1 /1 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_297000 11
+ MESON_VCLK_HDMI_297000,
/* 5940 /1 /1 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_594000 12
+ MESON_VCLK_HDMI_594000
+};
struct meson_vclk_params {
unsigned int pll_base_freq;
@@ -411,46 +402,6 @@ struct meson_vclk_params {
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
- [MESON_VCLK_HDMI_25175] = {
- .pll_base_freq = 4028000,
- .pll_od1 = 4,
- .pll_od2 = 4,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
- [MESON_VCLK_HDMI_40000] = {
- .pll_base_freq = 3200000,
- .pll_od1 = 4,
- .pll_od2 = 2,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
- [MESON_VCLK_HDMI_65000] = {
- .pll_base_freq = 5200000,
- .pll_od1 = 4,
- .pll_od2 = 2,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
- [MESON_VCLK_HDMI_108000] = {
- .pll_base_freq = 4320000,
- .pll_od1 = 4,
- .pll_od2 = 1,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
- [MESON_VCLK_HDMI_162000] = {
- .pll_base_freq = 3240000,
- .pll_od1 = 2,
- .pll_od2 = 1,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
};
static inline unsigned int pll_od_to_reg(unsigned int od)
@@ -470,358 +421,217 @@ static inline unsigned int pll_od_to_reg(unsigned int od)
return 0;
}
-void meson_hdmi_pll_set(struct meson_drm *priv,
- unsigned int base,
- unsigned int od1,
- unsigned int od2,
- unsigned int od3)
+void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
+ unsigned int frac, unsigned int od1,
+ unsigned int od2, unsigned int od3)
{
unsigned int val;
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
- switch (base) {
- case 2970000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800023d);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* Enable and unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- 0x7 << 28, 0x4 << 28);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4e00);
- break;
-
- case 3200000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000242);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4aab);
- break;
-
- case 3240000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000243);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4800);
- break;
-
- case 3865000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000250);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4855);
- break;
-
- case 4028000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000253);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4eab);
- break;
-
- case 4320000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800025a);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
- break;
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000200 | m);
+ if (frac)
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0x00004000 | frac);
+ else
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
- case 5940000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800027b);
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4c00);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
- break;
+ /* Enable and unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 0x7 << 28, 0x4 << 28);
- case 5200000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800026c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
- break;
- };
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
- switch (base) {
- case 2970000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb300);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 3200000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000285);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb155);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 3240000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000287);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 3865000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a1);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb02b);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 4028000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a7);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb355);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 4320000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002b4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 5940000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002f7);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb200);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 5200000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002d8);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb2ab);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- };
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000200 | m);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000 | frac);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
/* Reset PLL */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- HDMI_PLL_RESET, HDMI_PLL_RESET);
+ HDMI_PLL_RESET, HDMI_PLL_RESET);
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- HDMI_PLL_RESET, 0);
+ HDMI_PLL_RESET, 0);
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
- };
+ }
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 3 << 16, pll_od_to_reg(od1) << 16);
+ 3 << 16, pll_od_to_reg(od1) << 16);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
- 3 << 21, pll_od_to_reg(od1) << 21);
+ 3 << 21, pll_od_to_reg(od1) << 21);
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 3 << 22, pll_od_to_reg(od2) << 22);
+ 3 << 22, pll_od_to_reg(od2) << 22);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
- 3 << 23, pll_od_to_reg(od2) << 23);
+ 3 << 23, pll_od_to_reg(od2) << 23);
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 3 << 18, pll_od_to_reg(od3) << 18);
+ 3 << 18, pll_od_to_reg(od3) << 18);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
- 3 << 19, pll_od_to_reg(od3) << 19);
+ 3 << 19, pll_od_to_reg(od3) << 19);
+
}
-void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int vclk_freq, unsigned int venc_freq,
- unsigned int dac_freq, bool hdmi_use_enci)
+#define XTAL_FREQ 24000
+
+static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
+ unsigned int pll_freq)
{
- unsigned int freq;
- unsigned int hdmi_tx_div;
- unsigned int venc_div;
+ /* The GXBB PLL has a /2 pre-multiplier */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ pll_freq /= 2;
- if (target == MESON_VCLK_TARGET_CVBS) {
- meson_venci_cvbs_clock_config(priv);
- return;
+ return pll_freq / XTAL_FREQ;
+}
+
+#define HDMI_FRAC_MAX_GXBB 4096
+#define HDMI_FRAC_MAX_GXL 1024
+
+static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
+ unsigned int m,
+ unsigned int pll_freq)
+{
+ unsigned int parent_freq = XTAL_FREQ;
+ unsigned int frac_max = HDMI_FRAC_MAX_GXL;
+ unsigned int frac_m;
+ unsigned int frac;
+
+ /* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ frac_max = HDMI_FRAC_MAX_GXBB;
+ parent_freq *= 2;
}
- hdmi_tx_div = vclk_freq / dac_freq;
+ /* We can have a perfect match !*/
+ if (pll_freq / m == parent_freq &&
+ pll_freq % m == 0)
+ return 0;
- if (hdmi_tx_div == 0) {
- pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
- dac_freq);
- return;
+ frac = div_u64((u64)pll_freq * (u64)frac_max, parent_freq);
+ frac_m = m * frac_max;
+ if (frac_m > frac)
+ return frac_max;
+ frac -= frac_m;
+
+ return min((u16)frac, (u16)(frac_max - 1));
+}
+
+static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
+ unsigned int m,
+ unsigned int frac)
+{
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ /* Empiric supported min/max dividers */
+ if (m < 53 || m > 123)
+ return false;
+ if (frac >= HDMI_FRAC_MAX_GXBB)
+ return false;
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ /* Empiric supported min/max dividers */
+ if (m < 106 || m > 247)
+ return false;
+ if (frac >= HDMI_FRAC_MAX_GXL)
+ return false;
}
- venc_div = vclk_freq / venc_freq;
+ return true;
+}
- if (venc_div == 0) {
- pr_err("Fatal Error, invalid HDMI venc freq %d\n",
- venc_freq);
- return;
+static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
+ unsigned int freq,
+ unsigned int *m,
+ unsigned int *frac,
+ unsigned int *od)
+{
+ /* Cycle from /16 to /2 */
+ for (*od = 16 ; *od > 1 ; *od >>= 1) {
+ *m = meson_hdmi_pll_get_m(priv, freq * *od);
+ if (!*m)
+ continue;
+ *frac = meson_hdmi_pll_get_frac(priv, *m, freq * *od);
+
+ DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d\n",
+ freq, *m, *frac, *od);
+
+ if (meson_hdmi_pll_validate_params(priv, *m, *frac))
+ return true;
}
- switch (vclk_freq) {
- case 54000:
- if (hdmi_use_enci)
- freq = MESON_VCLK_HDMI_ENCI_54000;
- else
- freq = MESON_VCLK_HDMI_DDR_54000;
- break;
- case 25175:
- freq = MESON_VCLK_HDMI_25175;
- break;
- case 40000:
- freq = MESON_VCLK_HDMI_40000;
- break;
- case 65000:
- freq = MESON_VCLK_HDMI_65000;
- break;
- case 74250:
- freq = MESON_VCLK_HDMI_74250;
- break;
- case 108000:
- freq = MESON_VCLK_HDMI_108000;
- break;
- case 148500:
- if (dac_freq != 148500)
- freq = MESON_VCLK_HDMI_DDR_148500;
- else
- freq = MESON_VCLK_HDMI_148500;
- break;
- case 162000:
- freq = MESON_VCLK_HDMI_162000;
- break;
- case 297000:
- freq = MESON_VCLK_HDMI_297000;
- break;
- case 594000:
- freq = MESON_VCLK_HDMI_594000;
- break;
- default:
- pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
- vclk_freq);
+ return false;
+}
+
+/* pll_freq is the frequency after the OD dividers */
+enum drm_mode_status
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
+{
+ unsigned int od, m, frac;
+
+ /* In DMT mode, path after PLL is always /10 */
+ freq *= 10;
+
+ if (meson_hdmi_pll_find_params(priv, freq, &m, &frac, &od))
+ return MODE_OK;
+
+ return MODE_CLOCK_RANGE;
+}
+EXPORT_SYMBOL_GPL(meson_vclk_dmt_supported_freq);
+
+/* pll_freq is the frequency after the OD dividers */
+static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
+ unsigned int pll_freq)
+{
+ unsigned int od, m, frac, od1, od2, od3;
+
+ if (meson_hdmi_pll_find_params(priv, pll_freq, &m, &frac, &od)) {
+ od3 = 1;
+ if (od < 4) {
+ od1 = 2;
+ od2 = 1;
+ } else {
+ od2 = od / 4;
+ od1 = od / od2;
+ }
+
+ DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d/%d/%d\n",
+ pll_freq, m, frac, od1, od2, od3);
+
+ meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
+
return;
}
+ DRM_ERROR("Fatal, unable to find parameters for PLL freq %d\n",
+ pll_freq);
+}
+
+static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
+ unsigned int od1, unsigned int od2, unsigned int od3,
+ unsigned int vid_pll_div, unsigned int vclk_div,
+ unsigned int hdmi_tx_div, unsigned int venc_div,
+ bool hdmi_use_enci)
+{
/* Set HDMI-TX sys clock */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
CTS_HDMI_SYS_SEL_MASK, 0);
@@ -831,19 +641,49 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
CTS_HDMI_SYS_EN, CTS_HDMI_SYS_EN);
/* Set HDMI PLL rate */
- meson_hdmi_pll_set(priv, params[freq].pll_base_freq,
- params[freq].pll_od1,
- params[freq].pll_od2,
- params[freq].pll_od3);
+ if (!od1 && !od2 && !od3) {
+ meson_hdmi_pll_generic_set(priv, pll_base_freq);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ switch (pll_base_freq) {
+ case 2970000:
+ meson_hdmi_pll_set_params(priv, 0x3d, 0xe00,
+ od1, od2, od3);
+ break;
+ case 4320000:
+ meson_hdmi_pll_set_params(priv, 0x5a, 0,
+ od1, od2, od3);
+ break;
+ case 5940000:
+ meson_hdmi_pll_set_params(priv, 0x7b, 0xc00,
+ od1, od2, od3);
+ break;
+ }
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ switch (pll_base_freq) {
+ case 2970000:
+ meson_hdmi_pll_set_params(priv, 0x7b, 0x300,
+ od1, od2, od3);
+ break;
+ case 4320000:
+ meson_hdmi_pll_set_params(priv, 0xb4, 0,
+ od1, od2, od3);
+ break;
+ case 5940000:
+ meson_hdmi_pll_set_params(priv, 0xf7, 0x200,
+ od1, od2, od3);
+ break;
+ }
+ }
/* Setup vid_pll divider */
- meson_vid_pll_set(priv, params[freq].vid_pll_div);
+ meson_vid_pll_set(priv, vid_pll_div);
/* Set VCLK div */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_SEL_MASK, 0);
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
- VCLK_DIV_MASK, params[freq].vclk_div - 1);
+ VCLK_DIV_MASK, vclk_div - 1);
/* Set HDMI-TX source */
switch (hdmi_tx_div) {
@@ -981,4 +821,80 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL, VCLK_EN, VCLK_EN);
}
+
+void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+ unsigned int vclk_freq, unsigned int venc_freq,
+ unsigned int dac_freq, bool hdmi_use_enci)
+{
+ unsigned int freq;
+ unsigned int hdmi_tx_div;
+ unsigned int venc_div;
+
+ if (target == MESON_VCLK_TARGET_CVBS) {
+ meson_venci_cvbs_clock_config(priv);
+ return;
+ } else if (target == MESON_VCLK_TARGET_DMT) {
+ /* The DMT clock path is fixed after the PLL:
+ * - automatic PLL freq + OD management
+ * - vid_pll_div = VID_PLL_DIV_5
+ * - vclk_div = 2
+ * - hdmi_tx_div = 1
+ * - venc_div = 1
+ * - encp encoder
+ */
+ meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
+ VID_PLL_DIV_5, 2, 1, 1, false);
+ return;
+ }
+
+ hdmi_tx_div = vclk_freq / dac_freq;
+
+ if (hdmi_tx_div == 0) {
+ pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
+ dac_freq);
+ return;
+ }
+
+ venc_div = vclk_freq / venc_freq;
+
+ if (venc_div == 0) {
+ pr_err("Fatal Error, invalid HDMI venc freq %d\n",
+ venc_freq);
+ return;
+ }
+
+ switch (vclk_freq) {
+ case 54000:
+ if (hdmi_use_enci)
+ freq = MESON_VCLK_HDMI_ENCI_54000;
+ else
+ freq = MESON_VCLK_HDMI_DDR_54000;
+ break;
+ case 74250:
+ freq = MESON_VCLK_HDMI_74250;
+ break;
+ case 148500:
+ if (dac_freq != 148500)
+ freq = MESON_VCLK_HDMI_DDR_148500;
+ else
+ freq = MESON_VCLK_HDMI_148500;
+ break;
+ case 297000:
+ freq = MESON_VCLK_HDMI_297000;
+ break;
+ case 594000:
+ freq = MESON_VCLK_HDMI_594000;
+ break;
+ default:
+ pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
+ vclk_freq);
+ return;
+ }
+
+ meson_vclk_set(priv, params[freq].pll_base_freq,
+ params[freq].pll_od1, params[freq].pll_od2,
+ params[freq].pll_od3, params[freq].vid_pll_div,
+ params[freq].vclk_div, hdmi_tx_div, venc_div,
+ hdmi_use_enci);
+}
EXPORT_SYMBOL_GPL(meson_vclk_setup);
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index 0401b5213471..869fa3a3073e 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -24,11 +24,15 @@
enum {
MESON_VCLK_TARGET_CVBS = 0,
MESON_VCLK_TARGET_HDMI = 1,
+ MESON_VCLK_TARGET_DMT = 2,
};
/* 27MHz is the CVBS Pixel Clock */
#define MESON_VCLK_CVBS 27000
+enum drm_mode_status
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
+
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int vclk_freq, unsigned int venc_freq,
unsigned int dac_freq, bool hdmi_use_enci);
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 6e2701389801..514245e69b38 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -697,314 +697,6 @@ union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
},
};
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_640x480_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x31f,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0x90,
- .havon_end = 0x30f,
- .vavon_bline = 0x23,
- .vavon_eline = 0x202,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0x60,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 2,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x20c,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_800x600_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x41f,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0xD8,
- .havon_end = 0x3f7,
- .vavon_bline = 0x1b,
- .vavon_eline = 0x272,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0x80,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 4,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x273,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1024x768_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 1343,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 296,
- .havon_end = 1319,
- .vavon_bline = 35,
- .vavon_eline = 802,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 136,
- .vso_begin = 30,
- .vso_end = 50,
- .vso_bline = 0,
- .vso_eline = 6,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 805,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1152x864_75 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x63f,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0x180,
- .havon_end = 0x5ff,
- .vavon_bline = 0x23,
- .vavon_eline = 0x382,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0x80,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 3,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x383,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1280x1024_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x697,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0x168,
- .havon_end = 0x667,
- .vavon_bline = 0x29,
- .vavon_eline = 0x428,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0x70,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 3,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x429,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1600x1200_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x86f,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0x1f0,
- .havon_end = 0x82f,
- .vavon_bline = 0x31,
- .vavon_eline = 0x4e0,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0xc0,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 3,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x4e1,
- },
-};
-
-struct meson_hdmi_venc_dmt_mode {
- struct drm_display_mode drm_mode;
- union meson_hdmi_venc_mode *mode;
-} meson_hdmi_venc_dmt_modes[] = {
- /* 640x480@60Hz */
- {
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- &meson_hdmi_encp_mode_640x480_60,
- },
- /* 800x600@60Hz */
- {
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- &meson_hdmi_encp_mode_800x600_60,
- },
- /* 1024x768@60Hz */
- {
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024,
- 1048, 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- &meson_hdmi_encp_mode_1024x768_60,
- },
- /* 1152x864@75Hz */
- {
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152,
- 1216, 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- &meson_hdmi_encp_mode_1152x864_75,
- },
- /* 1280x1024@60Hz */
- {
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280,
- 1328, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- &meson_hdmi_encp_mode_1280x1024_60,
- },
- /* 1600x1200@60Hz */
- {
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600,
- 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- &meson_hdmi_encp_mode_1600x1200_60,
- },
- /* 1920x1080@60Hz */
- {
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920,
- 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- &meson_hdmi_encp_mode_1080p60
- },
- { }, /* sentinel */
-};
-
struct meson_hdmi_venc_vic_mode {
unsigned int vic;
union meson_hdmi_venc_mode *mode;
@@ -1044,17 +736,20 @@ static unsigned long modulo(unsigned long a, unsigned long b)
return a;
}
-bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
+enum drm_mode_status
+meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
{
- struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes;
+ if (mode->flags & ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))
+ return MODE_BAD;
- while (vmode->mode) {
- if (drm_mode_equal(&vmode->drm_mode, mode))
- return true;
- vmode++;
- }
+ if (mode->hdisplay < 640 || mode->hdisplay > 1920)
+ return MODE_BAD_HVALUE;
- return false;
+ if (mode->vdisplay < 480 || mode->vdisplay > 1200)
+ return MODE_BAD_VVALUE;
+
+ return MODE_OK;
}
EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_mode);
@@ -1072,18 +767,29 @@ bool meson_venc_hdmi_supported_vic(int vic)
}
EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_vic);
-static union meson_hdmi_venc_mode
-*meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode)
+void meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode,
+ union meson_hdmi_venc_mode *dmt_mode)
{
- struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes;
-
- while (vmode->mode) {
- if (drm_mode_equal(&vmode->drm_mode, mode))
- return vmode->mode;
- vmode++;
- }
-
- return NULL;
+ memset(dmt_mode, 0, sizeof(*dmt_mode));
+
+ dmt_mode->encp.dvi_settings = 0x21;
+ dmt_mode->encp.video_mode = 0x4040;
+ dmt_mode->encp.video_mode_adv = 0x18;
+ dmt_mode->encp.max_pxcnt = mode->htotal - 1;
+ dmt_mode->encp.havon_begin = mode->htotal - mode->hsync_start;
+ dmt_mode->encp.havon_end = dmt_mode->encp.havon_begin +
+ mode->hdisplay - 1;
+ dmt_mode->encp.vavon_bline = mode->vtotal - mode->vsync_start;
+ dmt_mode->encp.vavon_eline = dmt_mode->encp.vavon_bline +
+ mode->vdisplay - 1;
+ dmt_mode->encp.hso_begin = 0;
+ dmt_mode->encp.hso_end = mode->hsync_end - mode->hsync_start;
+ dmt_mode->encp.vso_begin = 30;
+ dmt_mode->encp.vso_end = 50;
+ dmt_mode->encp.vso_bline = 0;
+ dmt_mode->encp.vso_eline = mode->vsync_end - mode->vsync_start;
+ dmt_mode->encp.vso_eline_present = true;
+ dmt_mode->encp.max_lncnt = mode->vtotal - 1;
}
static union meson_hdmi_venc_mode *meson_venc_hdmi_get_vic_vmode(int vic)
@@ -1120,6 +826,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
struct drm_display_mode *mode)
{
union meson_hdmi_venc_mode *vmode = NULL;
+ union meson_hdmi_venc_mode vmode_dmt;
bool use_enci = false;
bool venc_repeat = false;
bool hdmi_repeat = false;
@@ -1147,14 +854,17 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int sof_lines;
unsigned int vsync_lines;
- if (meson_venc_hdmi_supported_vic(vic))
+ if (meson_venc_hdmi_supported_vic(vic)) {
vmode = meson_venc_hdmi_get_vic_vmode(vic);
- else
- vmode = meson_venc_hdmi_get_dmt_vmode(mode);
- if (!vmode) {
- dev_err(priv->dev, "%s: Fatal Error, unsupported mode "
- DRM_MODE_FMT "\n", __func__, DRM_MODE_ARG(mode));
- return;
+ if (!vmode) {
+ dev_err(priv->dev, "%s: Fatal Error, unsupported mode "
+ DRM_MODE_FMT "\n", __func__,
+ DRM_MODE_ARG(mode));
+ return;
+ }
+ } else {
+ meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt);
+ vmode = &vmode_dmt;
}
/* Use VENCI for 480i and 576i and double HDMI pixels */
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index 7c18a36a0dd0..97eaebbfa0c4 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -58,7 +58,8 @@ struct meson_cvbs_enci_mode {
};
/* HDMI Clock parameters */
-bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode);
+enum drm_mode_status
+meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode);
bool meson_venc_hdmi_supported_vic(int vic);
bool meson_venc_hdmi_venc_repeat(int vic);
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 79d95ca8a0c0..f7945bae3b4a 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -282,7 +282,7 @@ int meson_venc_cvbs_create(struct meson_drm *priv)
encoder->possible_crtcs = BIT(0);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 8918539a19aa..acf7bfe68454 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1553,7 +1553,7 @@ static int mga_vga_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
@@ -1747,7 +1747,7 @@ int mgag200_modeset_init(struct mga_device *mdev)
return -1;
}
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
ret = mgag200_fbdev_init(mdev);
if (ret) {
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 38cbde971b48..843a9d40c05e 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -12,6 +12,7 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
+ select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
select PM_OPP
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index cd40c050b2d7..261fa79d456d 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/gpu/drm/msm
+ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
@@ -10,6 +11,9 @@ msm-y := \
adreno/a5xx_gpu.o \
adreno/a5xx_power.o \
adreno/a5xx_preempt.o \
+ adreno/a6xx_gpu.o \
+ adreno/a6xx_gmu.o \
+ adreno/a6xx_hfi.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -45,6 +49,33 @@ msm-y := \
disp/mdp5/mdp5_mixer.o \
disp/mdp5/mdp5_plane.o \
disp/mdp5/mdp5_smp.o \
+ disp/dpu1/dpu_core_irq.o \
+ disp/dpu1/dpu_core_perf.o \
+ disp/dpu1/dpu_crtc.o \
+ disp/dpu1/dpu_encoder.o \
+ disp/dpu1/dpu_encoder_phys_cmd.o \
+ disp/dpu1/dpu_encoder_phys_vid.o \
+ disp/dpu1/dpu_formats.o \
+ disp/dpu1/dpu_hw_blk.o \
+ disp/dpu1/dpu_hw_catalog.o \
+ disp/dpu1/dpu_hw_cdm.o \
+ disp/dpu1/dpu_hw_ctl.o \
+ disp/dpu1/dpu_hw_interrupts.o \
+ disp/dpu1/dpu_hw_intf.o \
+ disp/dpu1/dpu_hw_lm.o \
+ disp/dpu1/dpu_hw_pingpong.o \
+ disp/dpu1/dpu_hw_sspp.o \
+ disp/dpu1/dpu_hw_top.o \
+ disp/dpu1/dpu_hw_util.o \
+ disp/dpu1/dpu_hw_vbif.o \
+ disp/dpu1/dpu_io_util.o \
+ disp/dpu1/dpu_irq.o \
+ disp/dpu1/dpu_kms.o \
+ disp/dpu1/dpu_mdss.o \
+ disp/dpu1/dpu_plane.o \
+ disp/dpu1/dpu_power_handle.o \
+ disp/dpu1/dpu_rm.o \
+ disp/dpu1/dpu_vbif.o \
msm_atomic.o \
msm_debugfs.o \
msm_drv.o \
@@ -62,7 +93,8 @@ msm-y := \
msm_ringbuffer.o \
msm_submitqueue.o
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+ disp/dpu1/dpu_dbg.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 644374c7b3e0..4bff0a740c7d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -84,13 +86,12 @@ enum a2xx_sq_surfaceformat {
FMT_5_5_5_1 = 13,
FMT_8_8_8_8_A = 14,
FMT_4_4_4_4 = 15,
- FMT_10_11_11 = 16,
- FMT_11_11_10 = 17,
+ FMT_8_8_8 = 16,
FMT_DXT1 = 18,
FMT_DXT2_3 = 19,
FMT_DXT4_5 = 20,
+ FMT_10_10_10_2 = 21,
FMT_24_8 = 22,
- FMT_24_8_FLOAT = 23,
FMT_16 = 24,
FMT_16_16 = 25,
FMT_16_16_16_16 = 26,
@@ -106,29 +107,23 @@ enum a2xx_sq_surfaceformat {
FMT_32_FLOAT = 36,
FMT_32_32_FLOAT = 37,
FMT_32_32_32_32_FLOAT = 38,
- FMT_32_AS_8 = 39,
- FMT_32_AS_8_8 = 40,
- FMT_16_MPEG = 41,
- FMT_16_16_MPEG = 42,
- FMT_8_INTERLACED = 43,
- FMT_32_AS_8_INTERLACED = 44,
- FMT_32_AS_8_8_INTERLACED = 45,
- FMT_16_INTERLACED = 46,
- FMT_16_MPEG_INTERLACED = 47,
- FMT_16_16_MPEG_INTERLACED = 48,
+ FMT_ATI_TC_RGB = 39,
+ FMT_ATI_TC_RGBA = 40,
+ FMT_ATI_TC_555_565_RGB = 41,
+ FMT_ATI_TC_555_565_RGBA = 42,
+ FMT_ATI_TC_RGBA_INTERP = 43,
+ FMT_ATI_TC_555_565_RGBA_INTERP = 44,
+ FMT_ETC1_RGBA_INTERP = 46,
+ FMT_ETC1_RGB = 47,
+ FMT_ETC1_RGBA = 48,
FMT_DXN = 49,
- FMT_8_8_8_8_AS_16_16_16_16 = 50,
- FMT_DXT1_AS_16_16_16_16 = 51,
- FMT_DXT2_3_AS_16_16_16_16 = 52,
- FMT_DXT4_5_AS_16_16_16_16 = 53,
+ FMT_2_3_3 = 51,
FMT_2_10_10_10_AS_16_16_16_16 = 54,
- FMT_10_11_11_AS_16_16_16_16 = 55,
- FMT_11_11_10_AS_16_16_16_16 = 56,
+ FMT_10_10_10_2_AS_16_16_16_16 = 55,
FMT_32_32_32_FLOAT = 57,
FMT_DXT3A = 58,
FMT_DXT5A = 59,
FMT_CTX1 = 60,
- FMT_DXT3A_AS_1_1_1_1 = 61,
};
enum a2xx_sq_ps_vtx_mode {
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 663a73216926..645a19aef399 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 3ebbeb3a9b68..669c2d4b070d 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -411,15 +411,6 @@ static const unsigned int a3xx_registers[] = {
~0 /* sentinel */
};
-#ifdef CONFIG_DEBUG_FS
-static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
-{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A3XX_RBBM_STATUS));
- adreno_show(gpu, m);
-}
-#endif
-
/* would be nice to not have to duplicate the _show() stuff with printk(): */
static void a3xx_dump(struct msm_gpu *gpu)
{
@@ -427,6 +418,21 @@ static void a3xx_dump(struct msm_gpu *gpu)
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
adreno_dump(gpu);
}
+
+static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
+
+ return state;
+}
+
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
@@ -450,9 +456,11 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
-#ifdef CONFIG_DEBUG_FS
- .show = a3xx_show,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
#endif
+ .gpu_state_get = a3xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 1a14f4a40b9c..19565e87aa7b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -263,12 +265,6 @@ enum a4xx_depth_format {
DEPTH4_32 = 3,
};
-enum a4xx_tess_spacing {
- EQUAL_SPACING = 0,
- ODD_SPACING = 2,
- EVEN_SPACING = 3,
-};
-
enum a4xx_ccu_perfcounter_select {
CCU_BUSY_CYCLES = 0,
CCU_RB_DEPTH_RETURN_STALL = 2,
@@ -3544,12 +3540,13 @@ static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_VS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3571,12 +3568,13 @@ static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_FS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3598,12 +3596,13 @@ static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_HS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3625,12 +3624,13 @@ static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_DS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3652,12 +3652,13 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_GS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3672,23 +3673,103 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
}
-#define REG_A4XX_HLSQ_CS_CONTROL 0x000023ca
+#define REG_A4XX_HLSQ_CS_CONTROL_REG 0x000023ca
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_CS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_0 0x000023cd
+#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_1 0x000023ce
+#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_1_SIZE_X(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT) & A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_2 0x000023cf
#define REG_A4XX_HLSQ_CL_NDRANGE_3 0x000023d0
+#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT) & A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_4 0x000023d1
#define REG_A4XX_HLSQ_CL_NDRANGE_5 0x000023d2
+#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT) & A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_6 0x000023d3
#define REG_A4XX_HLSQ_CL_CONTROL_0 0x000023d4
+#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK 0x000000ff
+#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK;
+}
+#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK 0xff000000
+#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK;
+}
#define REG_A4XX_HLSQ_CL_CONTROL_1 0x000023d5
@@ -4087,5 +4168,71 @@ static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
#define REG_A4XX_TEX_CONST_7 0x00000007
+#define REG_A4XX_SSBO_0_0 0x00000000
+#define A4XX_SSBO_0_0_BASE__MASK 0xffffffe0
+#define A4XX_SSBO_0_0_BASE__SHIFT 5
+static inline uint32_t A4XX_SSBO_0_0_BASE(uint32_t val)
+{
+ return ((val >> 5) << A4XX_SSBO_0_0_BASE__SHIFT) & A4XX_SSBO_0_0_BASE__MASK;
+}
+
+#define REG_A4XX_SSBO_0_1 0x00000001
+#define A4XX_SSBO_0_1_PITCH__MASK 0x003fffff
+#define A4XX_SSBO_0_1_PITCH__SHIFT 0
+static inline uint32_t A4XX_SSBO_0_1_PITCH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_0_1_PITCH__SHIFT) & A4XX_SSBO_0_1_PITCH__MASK;
+}
+
+#define REG_A4XX_SSBO_0_2 0x00000002
+#define A4XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
+#define A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
+static inline uint32_t A4XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A4XX_SSBO_0_2_ARRAY_PITCH__MASK;
+}
+
+#define REG_A4XX_SSBO_0_3 0x00000003
+#define A4XX_SSBO_0_3_CPP__MASK 0x0000003f
+#define A4XX_SSBO_0_3_CPP__SHIFT 0
+static inline uint32_t A4XX_SSBO_0_3_CPP(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_0_3_CPP__SHIFT) & A4XX_SSBO_0_3_CPP__MASK;
+}
+
+#define REG_A4XX_SSBO_1_0 0x00000000
+#define A4XX_SSBO_1_0_CPP__MASK 0x0000001f
+#define A4XX_SSBO_1_0_CPP__SHIFT 0
+static inline uint32_t A4XX_SSBO_1_0_CPP(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_0_CPP__SHIFT) & A4XX_SSBO_1_0_CPP__MASK;
+}
+#define A4XX_SSBO_1_0_FMT__MASK 0x0000ff00
+#define A4XX_SSBO_1_0_FMT__SHIFT 8
+static inline uint32_t A4XX_SSBO_1_0_FMT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_SSBO_1_0_FMT__SHIFT) & A4XX_SSBO_1_0_FMT__MASK;
+}
+#define A4XX_SSBO_1_0_WIDTH__MASK 0xffff0000
+#define A4XX_SSBO_1_0_WIDTH__SHIFT 16
+static inline uint32_t A4XX_SSBO_1_0_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_0_WIDTH__SHIFT) & A4XX_SSBO_1_0_WIDTH__MASK;
+}
+
+#define REG_A4XX_SSBO_1_1 0x00000001
+#define A4XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
+#define A4XX_SSBO_1_1_HEIGHT__SHIFT 0
+static inline uint32_t A4XX_SSBO_1_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_1_HEIGHT__SHIFT) & A4XX_SSBO_1_1_HEIGHT__MASK;
+}
+#define A4XX_SSBO_1_1_DEPTH__MASK 0xffff0000
+#define A4XX_SSBO_1_1_DEPTH__SHIFT 16
+static inline uint32_t A4XX_SSBO_1_1_DEPTH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_1_DEPTH__SHIFT) & A4XX_SSBO_1_1_DEPTH__MASK;
+}
+
#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 16d3d596638e..7c4e6dc1ed59 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -455,15 +455,19 @@ static const unsigned int a4xx_registers[] = {
~0 /* sentinel */
};
-#ifdef CONFIG_DEBUG_FS
-static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
+static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A4XX_RBBM_STATUS));
- adreno_show(gpu, m);
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+ state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
+
+ return state;
}
-#endif
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
@@ -538,9 +542,11 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
-#ifdef CONFIG_DEBUG_FS
- .show = a4xx_show,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
#endif
+ .gpu_state_get = a4xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
.get_timestamp = a4xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index e0e6711f4f78..182d37ff3794 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -119,6 +121,11 @@ enum a5xx_vtx_fmt {
VFMT5_8_8_8_8_SNORM = 50,
VFMT5_8_8_8_8_UINT = 51,
VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_10_10_10_2_UNORM = 54,
+ VFMT5_10_10_10_2_SNORM = 57,
+ VFMT5_10_10_10_2_UINT = 58,
+ VFMT5_10_10_10_2_SINT = 59,
+ VFMT5_11_11_10_FLOAT = 66,
VFMT5_16_16_UNORM = 67,
VFMT5_16_16_SNORM = 68,
VFMT5_16_16_FLOAT = 69,
@@ -204,14 +211,45 @@ enum a5xx_tex_fmt {
TFMT5_32_32_FLOAT = 103,
TFMT5_32_32_UINT = 104,
TFMT5_32_32_SINT = 105,
+ TFMT5_32_32_32_UINT = 114,
+ TFMT5_32_32_32_SINT = 115,
+ TFMT5_32_32_32_FLOAT = 116,
TFMT5_32_32_32_32_FLOAT = 130,
TFMT5_32_32_32_32_UINT = 131,
TFMT5_32_32_32_32_SINT = 132,
TFMT5_X8Z24_UNORM = 160,
+ TFMT5_ETC2_RG11_UNORM = 171,
+ TFMT5_ETC2_RG11_SNORM = 172,
+ TFMT5_ETC2_R11_UNORM = 173,
+ TFMT5_ETC2_R11_SNORM = 174,
+ TFMT5_ETC1 = 175,
+ TFMT5_ETC2_RGB8 = 176,
+ TFMT5_ETC2_RGBA8 = 177,
+ TFMT5_ETC2_RGB8A1 = 178,
+ TFMT5_DXT1 = 179,
+ TFMT5_DXT3 = 180,
+ TFMT5_DXT5 = 181,
TFMT5_RGTC1_UNORM = 183,
TFMT5_RGTC1_SNORM = 184,
TFMT5_RGTC2_UNORM = 187,
TFMT5_RGTC2_SNORM = 188,
+ TFMT5_BPTC_UFLOAT = 190,
+ TFMT5_BPTC_FLOAT = 191,
+ TFMT5_BPTC = 192,
+ TFMT5_ASTC_4x4 = 193,
+ TFMT5_ASTC_5x4 = 194,
+ TFMT5_ASTC_5x5 = 195,
+ TFMT5_ASTC_6x5 = 196,
+ TFMT5_ASTC_6x6 = 197,
+ TFMT5_ASTC_8x5 = 198,
+ TFMT5_ASTC_8x6 = 199,
+ TFMT5_ASTC_8x8 = 200,
+ TFMT5_ASTC_10x5 = 201,
+ TFMT5_ASTC_10x6 = 202,
+ TFMT5_ASTC_10x8 = 203,
+ TFMT5_ASTC_10x10 = 204,
+ TFMT5_ASTC_12x10 = 205,
+ TFMT5_ASTC_12x12 = 206,
};
enum a5xx_tex_fetchsize {
@@ -239,7 +277,7 @@ enum a5xx_blit_buf {
BLIT_MRT6 = 6,
BLIT_MRT7 = 7,
BLIT_ZS = 8,
- BLIT_Z32 = 9,
+ BLIT_S = 9,
};
enum a5xx_cp_perfcounter_select {
@@ -899,6 +937,12 @@ enum a5xx_tex_type {
#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+#define REG_A5XX_CP_ME_NRT_ADDR_LO 0x0000080d
+
+#define REG_A5XX_CP_ME_NRT_ADDR_HI 0x0000080e
+
+#define REG_A5XX_CP_ME_NRT_DATA 0x00000810
+
#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
@@ -2072,9 +2116,17 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_PC_MODE_CNTL 0x00000d02
-#define REG_A5XX_UNKNOWN_0D08 0x00000d08
+#define REG_A5XX_PC_INDEX_BUF_LO 0x00000d04
+
+#define REG_A5XX_PC_INDEX_BUF_HI 0x00000d05
+
+#define REG_A5XX_PC_START_INDEX 0x00000d06
-#define REG_A5XX_UNKNOWN_0D09 0x00000d09
+#define REG_A5XX_PC_MAX_INDEX 0x00000d07
+
+#define REG_A5XX_PC_TESSFACTOR_ADDR_LO 0x00000d08
+
+#define REG_A5XX_PC_TESSFACTOR_ADDR_HI 0x00000d09
#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
@@ -2327,6 +2379,14 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_VBIF_PERF_CNT_EN3 0x000030c3
+#define REG_A5XX_VBIF_PERF_CNT_CLR0 0x000030c8
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR1 0x000030c9
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR2 0x000030ca
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR3 0x000030cb
+
#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
@@ -2590,6 +2650,7 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+#define A5XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040
#define REG_A5XX_UNKNOWN_E001 0x0000e001
@@ -2700,7 +2761,7 @@ static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
}
-#define REG_A5XX_UNKNOWN_E093 0x0000e093
+#define REG_A5XX_GRAS_SU_LAYERED 0x0000e093
#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
@@ -2936,7 +2997,9 @@ static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val
#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+#define A5XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000004
#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
@@ -3002,6 +3065,13 @@ static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0
static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
+#define A5XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
+#define A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
+static inline uint32_t A5XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A5XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
@@ -3060,6 +3130,12 @@ static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode
{
return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
}
+#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00001800
+#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 11
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
+}
#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
@@ -3223,6 +3299,7 @@ static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
}
#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@@ -3369,7 +3446,25 @@ static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
}
-#define REG_A5XX_UNKNOWN_E1C7 0x0000e1c7
+#define REG_A5XX_RB_STENCILREFMASK_BF 0x0000e1c7
+#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
@@ -3428,6 +3523,7 @@ static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
}
#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213
+#define A5XX_RB_RESOLVE_CNTL_3_TILED 0x00000001
#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214
@@ -3459,6 +3555,7 @@ static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c
#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002
+#define A5XX_RB_CLEAR_CNTL_MSAA_RESOLVE 0x00000004
#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0
#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4
static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val)
@@ -3627,22 +3724,69 @@ static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
{
return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
}
+#define A5XX_PC_PRIMITIVE_CNTL_PRIMITIVE_RESTART 0x00000100
+#define A5XX_PC_PRIMITIVE_CNTL_COUNT_PRIMITIVES 0x00000200
#define A5XX_PC_PRIMITIVE_CNTL_PROVOKING_VTX_LAST 0x00000400
#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385
#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800
#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x00000007
+#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 0
+static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000038
+#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT 3
+static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A5XX_PC_RASTER_CNTL_POLYMODE_ENABLE 0x00000040
#define REG_A5XX_UNKNOWN_E389 0x0000e389
#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
-#define REG_A5XX_UNKNOWN_E38D 0x0000e38d
+#define REG_A5XX_PC_GS_LAYERED 0x0000e38d
#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+#define A5XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff
+#define A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0
+static inline uint32_t A5XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A5XX_PC_GS_PARAM_MAX_VERTICES__MASK;
+}
+#define A5XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800
+#define A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11
+static inline uint32_t A5XX_PC_GS_PARAM_INVOCATIONS(uint32_t val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A5XX_PC_GS_PARAM_INVOCATIONS__MASK;
+}
+#define A5XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000
+#define A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23
+static inline uint32_t A5XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A5XX_PC_GS_PARAM_PRIMTYPE__MASK;
+}
#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+#define A5XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f
+#define A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0
+static inline uint32_t A5XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val)
+{
+ return ((val) << A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A5XX_PC_HS_PARAM_VERTICES_OUT__MASK;
+}
+#define A5XX_PC_HS_PARAM_SPACING__MASK 0x00600000
+#define A5XX_PC_HS_PARAM_SPACING__SHIFT 21
+static inline uint32_t A5XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
+{
+ return ((val) << A5XX_PC_HS_PARAM_SPACING__SHIFT) & A5XX_PC_HS_PARAM_SPACING__MASK;
+}
+#define A5XX_PC_HS_PARAM_CW 0x00800000
+#define A5XX_PC_HS_PARAM_CONNECTED 0x01000000
#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
@@ -3667,10 +3811,40 @@ static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
{
return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
}
+#define A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
+}
#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+#define A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff
+#define A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
+}
#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+#define A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
+}
+#define A5XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A5XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
#define REG_A5XX_VFD_CONTROL_4 0x0000e404
@@ -3700,12 +3874,18 @@ static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
}
#define A5XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
-#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
{
return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
}
+#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
+#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
+static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
#define A5XX_VFD_DECODE_INSTR_UNK30 0x40000000
#define A5XX_VFD_DECODE_INSTR_FLOAT 0x80000000
@@ -3960,6 +4140,7 @@ static inline uint32_t A5XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
#define A5XX_SP_BLEND_CNTL_ENABLED 0x00000001
#define A5XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A5XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
@@ -4001,16 +4182,12 @@ static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
{
return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
}
+#define A5XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
+#define A5XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
#define A5XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db
-#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2
-
-#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3
-
-#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
-
#define REG_A5XX_SP_CS_CTRL_REG0 0x0000e5f0
#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00000008
#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 3
@@ -4039,7 +4216,39 @@ static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
return ((val) << A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
}
-#define REG_A5XX_UNKNOWN_E600 0x0000e600
+#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2
+
+#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3
+
+#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
+
+#define REG_A5XX_SP_HS_CTRL_REG0 0x0000e600
+#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
+}
#define REG_A5XX_UNKNOWN_E602 0x0000e602
@@ -4047,13 +4256,67 @@ static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_HS_OBJ_START_HI 0x0000e604
+#define REG_A5XX_SP_DS_CTRL_REG0 0x0000e610
+#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
#define REG_A5XX_UNKNOWN_E62B 0x0000e62b
#define REG_A5XX_SP_DS_OBJ_START_LO 0x0000e62c
#define REG_A5XX_SP_DS_OBJ_START_HI 0x0000e62d
-#define REG_A5XX_UNKNOWN_E640 0x0000e640
+#define REG_A5XX_SP_GS_CTRL_REG0 0x0000e640
+#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
+}
#define REG_A5XX_UNKNOWN_E65B 0x0000e65b
@@ -4173,6 +4436,18 @@ static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
{
return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
}
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
@@ -4375,34 +4650,52 @@ static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
}
#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
-#define A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_SIZE_X(uint32_t val)
+#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__MASK;
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
}
#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
-#define A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y(uint32_t val)
+#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__MASK;
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
}
#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
-#define A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z(uint32_t val)
+#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__MASK;
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
}
#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
+}
#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
@@ -4468,6 +4761,8 @@ static inline uint32_t A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
#define REG_A5XX_HLSQ_CS_INSTRLEN 0x0000e7dd
+#define REG_A5XX_RB_2D_BLIT_CNTL 0x00002100
+
#define REG_A5XX_RB_2D_SRC_SOLID_DW0 0x00002101
#define REG_A5XX_RB_2D_SRC_SOLID_DW1 0x00002102
@@ -4483,12 +4778,19 @@ static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
{
return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK;
+}
#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_RB_2D_SRC_INFO_FLAGS 0x00001000
#define REG_A5XX_RB_2D_SRC_LO 0x00002108
@@ -4515,12 +4817,19 @@ static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
{
return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_DST_INFO_TILE_MODE__MASK;
+}
#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_RB_2D_DST_INFO_FLAGS 0x00001000
#define REG_A5XX_RB_2D_DST_LO 0x00002111
@@ -4548,6 +4857,8 @@ static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val)
#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144
+#define REG_A5XX_GRAS_2D_BLIT_CNTL 0x00002180
+
#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181
#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
@@ -4555,12 +4866,19 @@ static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt va
{
return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK;
+}
#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_GRAS_2D_SRC_INFO_FLAGS 0x00001000
#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182
#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
@@ -4569,12 +4887,19 @@ static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt va
{
return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK;
+}
#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_GRAS_2D_DST_INFO_FLAGS 0x00001000
#define REG_A5XX_UNKNOWN_2100 0x00002100
@@ -4698,6 +5023,12 @@ static inline uint32_t A5XX_TEX_CONST_0_MIPLVLS(uint32_t val)
{
return ((val) << A5XX_TEX_CONST_0_MIPLVLS__SHIFT) & A5XX_TEX_CONST_0_MIPLVLS__MASK;
}
+#define A5XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
+#define A5XX_TEX_CONST_0_SAMPLES__SHIFT 20
+static inline uint32_t A5XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SAMPLES__SHIFT) & A5XX_TEX_CONST_0_SAMPLES__MASK;
+}
#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
#define A5XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
@@ -4788,5 +5119,81 @@ static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
#define REG_A5XX_TEX_CONST_11 0x0000000b
+#define REG_A5XX_SSBO_0_0 0x00000000
+#define A5XX_SSBO_0_0_BASE_LO__MASK 0xffffffe0
+#define A5XX_SSBO_0_0_BASE_LO__SHIFT 5
+static inline uint32_t A5XX_SSBO_0_0_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A5XX_SSBO_0_0_BASE_LO__SHIFT) & A5XX_SSBO_0_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_SSBO_0_1 0x00000001
+#define A5XX_SSBO_0_1_PITCH__MASK 0x003fffff
+#define A5XX_SSBO_0_1_PITCH__SHIFT 0
+static inline uint32_t A5XX_SSBO_0_1_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_0_1_PITCH__SHIFT) & A5XX_SSBO_0_1_PITCH__MASK;
+}
+
+#define REG_A5XX_SSBO_0_2 0x00000002
+#define A5XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
+#define A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
+static inline uint32_t A5XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A5XX_SSBO_0_2_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_SSBO_0_3 0x00000003
+#define A5XX_SSBO_0_3_CPP__MASK 0x0000003f
+#define A5XX_SSBO_0_3_CPP__SHIFT 0
+static inline uint32_t A5XX_SSBO_0_3_CPP(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_0_3_CPP__SHIFT) & A5XX_SSBO_0_3_CPP__MASK;
+}
+
+#define REG_A5XX_SSBO_1_0 0x00000000
+#define A5XX_SSBO_1_0_FMT__MASK 0x0000ff00
+#define A5XX_SSBO_1_0_FMT__SHIFT 8
+static inline uint32_t A5XX_SSBO_1_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_SSBO_1_0_FMT__SHIFT) & A5XX_SSBO_1_0_FMT__MASK;
+}
+#define A5XX_SSBO_1_0_WIDTH__MASK 0xffff0000
+#define A5XX_SSBO_1_0_WIDTH__SHIFT 16
+static inline uint32_t A5XX_SSBO_1_0_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_0_WIDTH__SHIFT) & A5XX_SSBO_1_0_WIDTH__MASK;
+}
+
+#define REG_A5XX_SSBO_1_1 0x00000001
+#define A5XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
+#define A5XX_SSBO_1_1_HEIGHT__SHIFT 0
+static inline uint32_t A5XX_SSBO_1_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_1_HEIGHT__SHIFT) & A5XX_SSBO_1_1_HEIGHT__MASK;
+}
+#define A5XX_SSBO_1_1_DEPTH__MASK 0xffff0000
+#define A5XX_SSBO_1_1_DEPTH__SHIFT 16
+static inline uint32_t A5XX_SSBO_1_1_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_1_DEPTH__SHIFT) & A5XX_SSBO_1_1_DEPTH__MASK;
+}
+
+#define REG_A5XX_SSBO_2_0 0x00000000
+#define A5XX_SSBO_2_0_BASE_LO__MASK 0xffffffff
+#define A5XX_SSBO_2_0_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_SSBO_2_0_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_2_0_BASE_LO__SHIFT) & A5XX_SSBO_2_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_SSBO_2_1 0x00000001
+#define A5XX_SSBO_2_1_BASE_HI__MASK 0xffffffff
+#define A5XX_SSBO_2_1_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_SSBO_2_1_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_2_1_BASE_HI__SHIFT) & A5XX_SSBO_2_1_BASE_HI__MASK;
+}
+
#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index d39400e5bc42..ab1d9308c311 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
@@ -19,6 +20,8 @@
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
#include "msm_gem.h"
#include "msm_mmu.h"
#include "a5xx_gpu.h"
@@ -91,12 +94,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
mem_region, mem_phys, mem_size, NULL);
} else {
- char newname[strlen("qcom/") + strlen(fwname) + 1];
+ char *newname;
- sprintf(newname, "qcom/%s", fwname);
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
mem_region, mem_phys, mem_size, NULL);
+ kfree(newname);
}
if (ret)
goto out;
@@ -1123,8 +1127,9 @@ static const u32 a5xx_registers[] = {
0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
- 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
- 0xB9A0, 0xB9BF, ~0
+ 0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
+ 0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
+ 0xAC60, 0xAC60, ~0,
};
static void a5xx_dump(struct msm_gpu *gpu)
@@ -1195,19 +1200,231 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+struct a5xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a5xx_gpu_state {
+ struct msm_gpu_state base;
+ u32 *hlsqregs;
+};
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+static int a5xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+ &dumper->bo, &dumper->iova);
- /*
- * Temporarily disable hardware clock gating before going into
- * adreno_show to avoid issues while reading the registers
- */
+ if (IS_ERR(dumper->ptr))
+ return PTR_ERR(dumper->ptr);
+
+ return 0;
+}
+
+static void a5xx_crashdumper_free(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ msm_gem_put_iova(dumper->bo, gpu->aspace);
+ msm_gem_put_vaddr(dumper->bo);
+
+ drm_gem_object_unreference(dumper->bo);
+}
+
+static int a5xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ u32 val;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+ REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+ return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
+ val & 0x04, 100, 10000);
+}
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper. These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+ u32 type;
+ u32 regoffset;
+ u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+ { 0x35, 0xe00, 0x32 }, /* HSLQ non-context */
+ { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
+ { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
+ { 0x32, 0xe780, 0x62 }, /* HLSQ 3D context 0 */
+ { 0x34, 0xef80, 0x62 }, /* HLSQ 3D context 1 */
+ { 0x3f, 0x0ec0, 0x40 }, /* SP non-context */
+ { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
+ { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
+ { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
+ { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
+ { 0x3a, 0x0f00, 0x1c }, /* TP non-context */
+ { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
+ { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
+ { 0x39, 0xe700, 0x80 }, /* TP 3D context 0 */
+ { 0x37, 0xef00, 0x80 }, /* TP 3D context 1 */
+};
+
+static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
+ struct a5xx_gpu_state *a5xx_state)
+{
+ struct a5xx_crashdumper dumper = { 0 };
+ u32 offset, count = 0;
+ u64 *ptr;
+ int i;
+
+ if (a5xx_crashdumper_init(gpu, &dumper))
+ return;
+
+ /* The script will be written at offset 0 */
+ ptr = dumper.ptr;
+
+ /* Start writing the data at offset 256k */
+ offset = dumper.iova + (256 * SZ_1K);
+
+ /* Count how many additional registers to get from the HLSQ aperture */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+ count += a5xx_hlsq_aperture_regs[i].count;
+
+ a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ /* Build the crashdump script */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 type = a5xx_hlsq_aperture_regs[i].type;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ /* Write the register to select the desired bank */
+ *ptr++ = ((u64) type << 8);
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
+ (1 << 21) | 1;
+
+ *ptr++ = offset;
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
+ | c;
+
+ offset += c * sizeof(u32);
+ }
+
+ /* Write two zeros to close off the script */
+ *ptr++ = 0;
+ *ptr++ = 0;
+
+ if (a5xx_crashdumper_run(gpu, &dumper)) {
+ kfree(a5xx_state->hlsqregs);
+ a5xx_crashdumper_free(gpu, &dumper);
+ return;
+ }
+
+ /* Copy the data from the crashdumper to the state */
+ memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
+ count * sizeof(u32));
+
+ a5xx_crashdumper_free(gpu, &dumper);
+}
+
+static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
+ GFP_KERNEL);
+
+ if (!a5xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ /* Temporarily disable hardware clock gating before reading the hw */
a5xx_set_hwcg(gpu, false);
- adreno_show(gpu, m);
+
+ /* First get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &(a5xx_state->base));
+
+ a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
+
+ /* Get the HLSQ regs with the help of the crashdumper */
+ a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
+
a5xx_set_hwcg(gpu, true);
+
+ return &a5xx_state->base;
+}
+
+static void a5xx_gpu_state_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ kfree(a5xx_state->hlsqregs);
+
+ adreno_gpu_state_destroy(state);
+ kfree(a5xx_state);
+}
+
+int a5xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a5xx_gpu_state_destroy);
+}
+
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ int i, j;
+ u32 pos = 0;
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ adreno_show(gpu, state, p);
+
+ /* Dump the additional a5xx HLSQ registers */
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ drm_printf(p, "registers-hlsq:\n");
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ for (j = 0; j < c; j++, pos++, o++) {
+ /*
+ * To keep the crashdump simple we pull the entire range
+ * for each register type but not all of the registers
+ * in the range are valid. Fortunately invalid registers
+ * stick out like a sore thumb with a value of
+ * 0xdeadbeef
+ */
+ if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ o << 2, a5xx_state->hlsqregs[pos]);
+ }
+ }
}
#endif
@@ -1239,11 +1456,15 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = a5xx_active_ring,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = a5xx_show,
+#endif
+#if defined(CONFIG_DEBUG_FS)
.debugfs_init = a5xx_debugfs_init,
#endif
.gpu_busy = a5xx_gpu_busy,
+ .gpu_state_get = a5xx_gpu_state_get,
+ .gpu_state_put = a5xx_gpu_state_put,
},
.get_timestamp = a5xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
new file mode 100644
index 000000000000..87eab51f7000
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -0,0 +1,4562 @@
+#ifndef A6XX_XML
+#define A6XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a6xx_color_fmt {
+ RB6_A8_UNORM = 2,
+ RB6_R8_UNORM = 3,
+ RB6_R8_SNORM = 4,
+ RB6_R8_UINT = 5,
+ RB6_R8_SINT = 6,
+ RB6_R4G4B4A4_UNORM = 8,
+ RB6_R5G5B5A1_UNORM = 10,
+ RB6_R5G6B5_UNORM = 14,
+ RB6_R8G8_UNORM = 15,
+ RB6_R8G8_SNORM = 16,
+ RB6_R8G8_UINT = 17,
+ RB6_R8G8_SINT = 18,
+ RB6_R16_UNORM = 21,
+ RB6_R16_SNORM = 22,
+ RB6_R16_FLOAT = 23,
+ RB6_R16_UINT = 24,
+ RB6_R16_SINT = 25,
+ RB6_R8G8B8A8_UNORM = 48,
+ RB6_R8G8B8_UNORM = 49,
+ RB6_R8G8B8A8_SNORM = 50,
+ RB6_R8G8B8A8_UINT = 51,
+ RB6_R8G8B8A8_SINT = 52,
+ RB6_R10G10B10A2_UNORM = 55,
+ RB6_R10G10B10A2_UINT = 58,
+ RB6_R11G11B10_FLOAT = 66,
+ RB6_R16G16_UNORM = 67,
+ RB6_R16G16_SNORM = 68,
+ RB6_R16G16_FLOAT = 69,
+ RB6_R16G16_UINT = 70,
+ RB6_R16G16_SINT = 71,
+ RB6_R32_FLOAT = 74,
+ RB6_R32_UINT = 75,
+ RB6_R32_SINT = 76,
+ RB6_R16G16B16A16_UNORM = 96,
+ RB6_R16G16B16A16_SNORM = 97,
+ RB6_R16G16B16A16_FLOAT = 98,
+ RB6_R16G16B16A16_UINT = 99,
+ RB6_R16G16B16A16_SINT = 100,
+ RB6_R32G32_FLOAT = 103,
+ RB6_R32G32_UINT = 104,
+ RB6_R32G32_SINT = 105,
+ RB6_R32G32B32A32_FLOAT = 130,
+ RB6_R32G32B32A32_UINT = 131,
+ RB6_R32G32B32A32_SINT = 132,
+ RB6_X8Z24_UNORM = 160,
+};
+
+enum a6xx_tile_mode {
+ TILE6_LINEAR = 0,
+ TILE6_2 = 2,
+ TILE6_3 = 3,
+};
+
+enum a6xx_vtx_fmt {
+ VFMT6_8_UNORM = 3,
+ VFMT6_8_SNORM = 4,
+ VFMT6_8_UINT = 5,
+ VFMT6_8_SINT = 6,
+ VFMT6_8_8_UNORM = 15,
+ VFMT6_8_8_SNORM = 16,
+ VFMT6_8_8_UINT = 17,
+ VFMT6_8_8_SINT = 18,
+ VFMT6_16_UNORM = 21,
+ VFMT6_16_SNORM = 22,
+ VFMT6_16_FLOAT = 23,
+ VFMT6_16_UINT = 24,
+ VFMT6_16_SINT = 25,
+ VFMT6_8_8_8_UNORM = 33,
+ VFMT6_8_8_8_SNORM = 34,
+ VFMT6_8_8_8_UINT = 35,
+ VFMT6_8_8_8_SINT = 36,
+ VFMT6_8_8_8_8_UNORM = 48,
+ VFMT6_8_8_8_8_SNORM = 50,
+ VFMT6_8_8_8_8_UINT = 51,
+ VFMT6_8_8_8_8_SINT = 52,
+ VFMT6_10_10_10_2_UNORM = 54,
+ VFMT6_10_10_10_2_SNORM = 57,
+ VFMT6_10_10_10_2_UINT = 58,
+ VFMT6_10_10_10_2_SINT = 59,
+ VFMT6_11_11_10_FLOAT = 66,
+ VFMT6_16_16_UNORM = 67,
+ VFMT6_16_16_SNORM = 68,
+ VFMT6_16_16_FLOAT = 69,
+ VFMT6_16_16_UINT = 70,
+ VFMT6_16_16_SINT = 71,
+ VFMT6_32_UNORM = 72,
+ VFMT6_32_SNORM = 73,
+ VFMT6_32_FLOAT = 74,
+ VFMT6_32_UINT = 75,
+ VFMT6_32_SINT = 76,
+ VFMT6_32_FIXED = 77,
+ VFMT6_16_16_16_UNORM = 88,
+ VFMT6_16_16_16_SNORM = 89,
+ VFMT6_16_16_16_FLOAT = 90,
+ VFMT6_16_16_16_UINT = 91,
+ VFMT6_16_16_16_SINT = 92,
+ VFMT6_16_16_16_16_UNORM = 96,
+ VFMT6_16_16_16_16_SNORM = 97,
+ VFMT6_16_16_16_16_FLOAT = 98,
+ VFMT6_16_16_16_16_UINT = 99,
+ VFMT6_16_16_16_16_SINT = 100,
+ VFMT6_32_32_UNORM = 101,
+ VFMT6_32_32_SNORM = 102,
+ VFMT6_32_32_FLOAT = 103,
+ VFMT6_32_32_UINT = 104,
+ VFMT6_32_32_SINT = 105,
+ VFMT6_32_32_FIXED = 106,
+ VFMT6_32_32_32_UNORM = 112,
+ VFMT6_32_32_32_SNORM = 113,
+ VFMT6_32_32_32_UINT = 114,
+ VFMT6_32_32_32_SINT = 115,
+ VFMT6_32_32_32_FLOAT = 116,
+ VFMT6_32_32_32_FIXED = 117,
+ VFMT6_32_32_32_32_UNORM = 128,
+ VFMT6_32_32_32_32_SNORM = 129,
+ VFMT6_32_32_32_32_FLOAT = 130,
+ VFMT6_32_32_32_32_UINT = 131,
+ VFMT6_32_32_32_32_SINT = 132,
+ VFMT6_32_32_32_32_FIXED = 133,
+};
+
+enum a6xx_tex_fmt {
+ TFMT6_A8_UNORM = 2,
+ TFMT6_8_UNORM = 3,
+ TFMT6_8_SNORM = 4,
+ TFMT6_8_UINT = 5,
+ TFMT6_8_SINT = 6,
+ TFMT6_4_4_4_4_UNORM = 8,
+ TFMT6_5_5_5_1_UNORM = 10,
+ TFMT6_5_6_5_UNORM = 14,
+ TFMT6_8_8_UNORM = 15,
+ TFMT6_8_8_SNORM = 16,
+ TFMT6_8_8_UINT = 17,
+ TFMT6_8_8_SINT = 18,
+ TFMT6_L8_A8_UNORM = 19,
+ TFMT6_16_UNORM = 21,
+ TFMT6_16_SNORM = 22,
+ TFMT6_16_FLOAT = 23,
+ TFMT6_16_UINT = 24,
+ TFMT6_16_SINT = 25,
+ TFMT6_8_8_8_8_UNORM = 48,
+ TFMT6_8_8_8_UNORM = 49,
+ TFMT6_8_8_8_8_SNORM = 50,
+ TFMT6_8_8_8_8_UINT = 51,
+ TFMT6_8_8_8_8_SINT = 52,
+ TFMT6_9_9_9_E5_FLOAT = 53,
+ TFMT6_10_10_10_2_UNORM = 54,
+ TFMT6_10_10_10_2_UINT = 58,
+ TFMT6_11_11_10_FLOAT = 66,
+ TFMT6_16_16_UNORM = 67,
+ TFMT6_16_16_SNORM = 68,
+ TFMT6_16_16_FLOAT = 69,
+ TFMT6_16_16_UINT = 70,
+ TFMT6_16_16_SINT = 71,
+ TFMT6_32_FLOAT = 74,
+ TFMT6_32_UINT = 75,
+ TFMT6_32_SINT = 76,
+ TFMT6_16_16_16_16_UNORM = 96,
+ TFMT6_16_16_16_16_SNORM = 97,
+ TFMT6_16_16_16_16_FLOAT = 98,
+ TFMT6_16_16_16_16_UINT = 99,
+ TFMT6_16_16_16_16_SINT = 100,
+ TFMT6_32_32_FLOAT = 103,
+ TFMT6_32_32_UINT = 104,
+ TFMT6_32_32_SINT = 105,
+ TFMT6_32_32_32_UINT = 114,
+ TFMT6_32_32_32_SINT = 115,
+ TFMT6_32_32_32_FLOAT = 116,
+ TFMT6_32_32_32_32_FLOAT = 130,
+ TFMT6_32_32_32_32_UINT = 131,
+ TFMT6_32_32_32_32_SINT = 132,
+ TFMT6_X8Z24_UNORM = 160,
+ TFMT6_ETC2_RG11_UNORM = 171,
+ TFMT6_ETC2_RG11_SNORM = 172,
+ TFMT6_ETC2_R11_UNORM = 173,
+ TFMT6_ETC2_R11_SNORM = 174,
+ TFMT6_ETC1 = 175,
+ TFMT6_ETC2_RGB8 = 176,
+ TFMT6_ETC2_RGBA8 = 177,
+ TFMT6_ETC2_RGB8A1 = 178,
+ TFMT6_DXT1 = 179,
+ TFMT6_DXT3 = 180,
+ TFMT6_DXT5 = 181,
+ TFMT6_RGTC1_UNORM = 183,
+ TFMT6_RGTC1_SNORM = 184,
+ TFMT6_RGTC2_UNORM = 187,
+ TFMT6_RGTC2_SNORM = 188,
+ TFMT6_BPTC_UFLOAT = 190,
+ TFMT6_BPTC_FLOAT = 191,
+ TFMT6_BPTC = 192,
+ TFMT6_ASTC_4x4 = 193,
+ TFMT6_ASTC_5x4 = 194,
+ TFMT6_ASTC_5x5 = 195,
+ TFMT6_ASTC_6x5 = 196,
+ TFMT6_ASTC_6x6 = 197,
+ TFMT6_ASTC_8x5 = 198,
+ TFMT6_ASTC_8x6 = 199,
+ TFMT6_ASTC_8x8 = 200,
+ TFMT6_ASTC_10x5 = 201,
+ TFMT6_ASTC_10x6 = 202,
+ TFMT6_ASTC_10x8 = 203,
+ TFMT6_ASTC_10x10 = 204,
+ TFMT6_ASTC_12x10 = 205,
+ TFMT6_ASTC_12x12 = 206,
+};
+
+enum a6xx_tex_fetchsize {
+ TFETCH6_1_BYTE = 0,
+ TFETCH6_2_BYTE = 1,
+ TFETCH6_4_BYTE = 2,
+ TFETCH6_8_BYTE = 3,
+ TFETCH6_16_BYTE = 4,
+};
+
+enum a6xx_depth_format {
+ DEPTH6_NONE = 0,
+ DEPTH6_16 = 1,
+ DEPTH6_24_8 = 2,
+ DEPTH6_32 = 4,
+};
+
+enum a6xx_cp_perfcounter_select {
+ PERF_CP_ALWAYS_COUNT = 0,
+};
+
+enum a6xx_tex_filter {
+ A6XX_TEX_NEAREST = 0,
+ A6XX_TEX_LINEAR = 1,
+ A6XX_TEX_ANISO = 2,
+};
+
+enum a6xx_tex_clamp {
+ A6XX_TEX_REPEAT = 0,
+ A6XX_TEX_CLAMP_TO_EDGE = 1,
+ A6XX_TEX_MIRROR_REPEAT = 2,
+ A6XX_TEX_CLAMP_TO_BORDER = 3,
+ A6XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a6xx_tex_aniso {
+ A6XX_TEX_ANISO_1 = 0,
+ A6XX_TEX_ANISO_2 = 1,
+ A6XX_TEX_ANISO_4 = 2,
+ A6XX_TEX_ANISO_8 = 3,
+ A6XX_TEX_ANISO_16 = 4,
+};
+
+enum a6xx_tex_swiz {
+ A6XX_TEX_X = 0,
+ A6XX_TEX_Y = 1,
+ A6XX_TEX_Z = 2,
+ A6XX_TEX_W = 3,
+ A6XX_TEX_ZERO = 4,
+ A6XX_TEX_ONE = 5,
+};
+
+enum a6xx_tex_type {
+ A6XX_TEX_1D = 0,
+ A6XX_TEX_2D = 1,
+ A6XX_TEX_CUBE = 2,
+ A6XX_TEX_3D = 3,
+};
+
+#define A6XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR 0x00000002
+#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW 0x00000040
+#define A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A6XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A6XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A6XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A6XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A6XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A6XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT 0x00800000
+#define A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A6XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A6XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+#define A6XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A6XX_CP_INT_CP_UCODE_ERROR 0x00000002
+#define A6XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A6XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define A6XX_CP_INT_CP_VSD_PARITY_ERROR 0x00000040
+#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR 0x00000080
+#define REG_A6XX_CP_RB_BASE 0x00000800
+
+#define REG_A6XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A6XX_CP_RB_CNTL 0x00000802
+
+#define REG_A6XX_CP_RB_RPTR_ADDR_LO 0x00000804
+
+#define REG_A6XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A6XX_CP_RB_RPTR 0x00000806
+
+#define REG_A6XX_CP_RB_WPTR 0x00000807
+
+#define REG_A6XX_CP_SQE_CNTL 0x00000808
+
+#define REG_A6XX_CP_HW_FAULT 0x00000821
+
+#define REG_A6XX_CP_INTERRUPT_STATUS 0x00000823
+
+#define REG_A6XX_CP_PROTECT_STATUS 0x00000824
+
+#define REG_A6XX_CP_SQE_INSTR_BASE_LO 0x00000830
+
+#define REG_A6XX_CP_SQE_INSTR_BASE_HI 0x00000831
+
+#define REG_A6XX_CP_MISC_CNTL 0x00000840
+
+#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
+
+#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
+
+#define REG_A6XX_CP_MEM_POOL_SIZE 0x000008c3
+
+#define REG_A6XX_CP_CHICKEN_DBG 0x00000841
+
+#define REG_A6XX_CP_ADDR_MODE_CNTL 0x00000842
+
+#define REG_A6XX_CP_DBG_ECO_CNTL 0x00000843
+
+#define REG_A6XX_CP_PROTECT_CNTL 0x0000084f
+
+static inline uint32_t REG_A6XX_CP_SCRATCH(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_PROTECT(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+#define A6XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0003ffff
+#define A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A6XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A6XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A6XX_CP_PROTECT_REG_MASK_LEN__MASK 0x7ffc0000
+#define A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT 18
+static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A6XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A6XX_CP_PROTECT_REG_READ 0x80000000
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_CNTL 0x000008a0
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x000008a1
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x000008a2
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO 0x000008a3
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI 0x000008a4
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO 0x000008a5
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI 0x000008a6
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO 0x000008a7
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI 0x000008a8
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_0 0x000008d0
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_1 0x000008d1
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_2 0x000008d2
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_3 0x000008d3
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_4 0x000008d4
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_5 0x000008d5
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_6 0x000008d6
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_7 0x000008d7
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_8 0x000008d8
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_9 0x000008d9
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_10 0x000008da
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_11 0x000008db
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_12 0x000008dc
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_13 0x000008dd
+
+#define REG_A6XX_CP_CRASH_SCRIPT_BASE_LO 0x00000900
+
+#define REG_A6XX_CP_CRASH_SCRIPT_BASE_HI 0x00000901
+
+#define REG_A6XX_CP_CRASH_DUMP_CNTL 0x00000902
+
+#define REG_A6XX_CP_CRASH_DUMP_STATUS 0x00000903
+
+#define REG_A6XX_CP_SQE_STAT_ADDR 0x00000908
+
+#define REG_A6XX_CP_SQE_STAT_DATA 0x00000909
+
+#define REG_A6XX_CP_DRAW_STATE_ADDR 0x0000090a
+
+#define REG_A6XX_CP_DRAW_STATE_DATA 0x0000090b
+
+#define REG_A6XX_CP_ROQ_DBG_ADDR 0x0000090c
+
+#define REG_A6XX_CP_ROQ_DBG_DATA 0x0000090d
+
+#define REG_A6XX_CP_MEM_POOL_DBG_ADDR 0x0000090e
+
+#define REG_A6XX_CP_MEM_POOL_DBG_DATA 0x0000090f
+
+#define REG_A6XX_CP_SQE_UCODE_DBG_ADDR 0x00000910
+
+#define REG_A6XX_CP_SQE_UCODE_DBG_DATA 0x00000911
+
+#define REG_A6XX_CP_IB1_BASE 0x00000928
+
+#define REG_A6XX_CP_IB1_BASE_HI 0x00000929
+
+#define REG_A6XX_CP_IB1_REM_SIZE 0x0000092a
+
+#define REG_A6XX_CP_IB2_BASE 0x0000092b
+
+#define REG_A6XX_CP_IB2_BASE_HI 0x0000092c
+
+#define REG_A6XX_CP_IB2_REM_SIZE 0x0000092d
+
+#define REG_A6XX_CP_ALWAYS_ON_COUNTER_LO 0x00000980
+
+#define REG_A6XX_CP_ALWAYS_ON_COUNTER_HI 0x00000981
+
+#define REG_A6XX_CP_AHB_CNTL 0x0000098d
+
+#define REG_A6XX_CP_APERTURE_CNTL_HOST 0x00000a00
+
+#define REG_A6XX_CP_APERTURE_CNTL_CD 0x00000a03
+
+#define REG_A6XX_VSC_ADDR_MODE_CNTL 0x00000c01
+
+#define REG_A6XX_RBBM_INT_0_STATUS 0x00000201
+
+#define REG_A6XX_RBBM_STATUS 0x00000210
+#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x00800000
+#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x00400000
+#define A6XX_RBBM_STATUS_HLSQ_BUSY 0x00200000
+#define A6XX_RBBM_STATUS_VSC_BUSY 0x00100000
+#define A6XX_RBBM_STATUS_TPL1_BUSY 0x00080000
+#define A6XX_RBBM_STATUS_SP_BUSY 0x00040000
+#define A6XX_RBBM_STATUS_UCHE_BUSY 0x00020000
+#define A6XX_RBBM_STATUS_VPC_BUSY 0x00010000
+#define A6XX_RBBM_STATUS_VFD_BUSY 0x00008000
+#define A6XX_RBBM_STATUS_TESS_BUSY 0x00004000
+#define A6XX_RBBM_STATUS_PC_VSD_BUSY 0x00002000
+#define A6XX_RBBM_STATUS_PC_DCALL_BUSY 0x00001000
+#define A6XX_RBBM_STATUS_COM_DCOM_BUSY 0x00000800
+#define A6XX_RBBM_STATUS_LRZ_BUSY 0x00000400
+#define A6XX_RBBM_STATUS_A2D_BUSY 0x00000200
+#define A6XX_RBBM_STATUS_CCU_BUSY 0x00000100
+#define A6XX_RBBM_STATUS_RB_BUSY 0x00000080
+#define A6XX_RBBM_STATUS_RAS_BUSY 0x00000040
+#define A6XX_RBBM_STATUS_TSE_BUSY 0x00000020
+#define A6XX_RBBM_STATUS_VBIF_BUSY 0x00000010
+#define A6XX_RBBM_STATUS_GFX_DBGC_BUSY 0x00000008
+#define A6XX_RBBM_STATUS_CP_BUSY 0x00000004
+#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CP_MASTER 0x00000002
+#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER 0x00000001
+
+#define REG_A6XX_RBBM_STATUS3 0x00000213
+
+#define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS 0x00000215
+
+#define REG_A6XX_RBBM_PERFCTR_CP_0_LO 0x00000400
+
+#define REG_A6XX_RBBM_PERFCTR_CP_0_HI 0x00000401
+
+#define REG_A6XX_RBBM_PERFCTR_CP_1_LO 0x00000402
+
+#define REG_A6XX_RBBM_PERFCTR_CP_1_HI 0x00000403
+
+#define REG_A6XX_RBBM_PERFCTR_CP_2_LO 0x00000404
+
+#define REG_A6XX_RBBM_PERFCTR_CP_2_HI 0x00000405
+
+#define REG_A6XX_RBBM_PERFCTR_CP_3_LO 0x00000406
+
+#define REG_A6XX_RBBM_PERFCTR_CP_3_HI 0x00000407
+
+#define REG_A6XX_RBBM_PERFCTR_CP_4_LO 0x00000408
+
+#define REG_A6XX_RBBM_PERFCTR_CP_4_HI 0x00000409
+
+#define REG_A6XX_RBBM_PERFCTR_CP_5_LO 0x0000040a
+
+#define REG_A6XX_RBBM_PERFCTR_CP_5_HI 0x0000040b
+
+#define REG_A6XX_RBBM_PERFCTR_CP_6_LO 0x0000040c
+
+#define REG_A6XX_RBBM_PERFCTR_CP_6_HI 0x0000040d
+
+#define REG_A6XX_RBBM_PERFCTR_CP_7_LO 0x0000040e
+
+#define REG_A6XX_RBBM_PERFCTR_CP_7_HI 0x0000040f
+
+#define REG_A6XX_RBBM_PERFCTR_CP_8_LO 0x00000410
+
+#define REG_A6XX_RBBM_PERFCTR_CP_8_HI 0x00000411
+
+#define REG_A6XX_RBBM_PERFCTR_CP_9_LO 0x00000412
+
+#define REG_A6XX_RBBM_PERFCTR_CP_9_HI 0x00000413
+
+#define REG_A6XX_RBBM_PERFCTR_CP_10_LO 0x00000414
+
+#define REG_A6XX_RBBM_PERFCTR_CP_10_HI 0x00000415
+
+#define REG_A6XX_RBBM_PERFCTR_CP_11_LO 0x00000416
+
+#define REG_A6XX_RBBM_PERFCTR_CP_11_HI 0x00000417
+
+#define REG_A6XX_RBBM_PERFCTR_CP_12_LO 0x00000418
+
+#define REG_A6XX_RBBM_PERFCTR_CP_12_HI 0x00000419
+
+#define REG_A6XX_RBBM_PERFCTR_CP_13_LO 0x0000041a
+
+#define REG_A6XX_RBBM_PERFCTR_CP_13_HI 0x0000041b
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_0_LO 0x0000041c
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_0_HI 0x0000041d
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_1_LO 0x0000041e
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_1_HI 0x0000041f
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_2_LO 0x00000420
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_2_HI 0x00000421
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_3_LO 0x00000422
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_3_HI 0x00000423
+
+#define REG_A6XX_RBBM_PERFCTR_PC_0_LO 0x00000424
+
+#define REG_A6XX_RBBM_PERFCTR_PC_0_HI 0x00000425
+
+#define REG_A6XX_RBBM_PERFCTR_PC_1_LO 0x00000426
+
+#define REG_A6XX_RBBM_PERFCTR_PC_1_HI 0x00000427
+
+#define REG_A6XX_RBBM_PERFCTR_PC_2_LO 0x00000428
+
+#define REG_A6XX_RBBM_PERFCTR_PC_2_HI 0x00000429
+
+#define REG_A6XX_RBBM_PERFCTR_PC_3_LO 0x0000042a
+
+#define REG_A6XX_RBBM_PERFCTR_PC_3_HI 0x0000042b
+
+#define REG_A6XX_RBBM_PERFCTR_PC_4_LO 0x0000042c
+
+#define REG_A6XX_RBBM_PERFCTR_PC_4_HI 0x0000042d
+
+#define REG_A6XX_RBBM_PERFCTR_PC_5_LO 0x0000042e
+
+#define REG_A6XX_RBBM_PERFCTR_PC_5_HI 0x0000042f
+
+#define REG_A6XX_RBBM_PERFCTR_PC_6_LO 0x00000430
+
+#define REG_A6XX_RBBM_PERFCTR_PC_6_HI 0x00000431
+
+#define REG_A6XX_RBBM_PERFCTR_PC_7_LO 0x00000432
+
+#define REG_A6XX_RBBM_PERFCTR_PC_7_HI 0x00000433
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_0_LO 0x00000434
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_0_HI 0x00000435
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_1_LO 0x00000436
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_1_HI 0x00000437
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_2_LO 0x00000438
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_2_HI 0x00000439
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_3_LO 0x0000043a
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_3_HI 0x0000043b
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_4_LO 0x0000043c
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_4_HI 0x0000043d
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_5_LO 0x0000043e
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_5_HI 0x0000043f
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_6_LO 0x00000440
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_6_HI 0x00000441
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_7_LO 0x00000442
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_7_HI 0x00000443
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_0_LO 0x00000444
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_0_HI 0x00000445
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_1_LO 0x00000446
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_1_HI 0x00000447
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_2_LO 0x00000448
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_2_HI 0x00000449
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_3_LO 0x0000044a
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_3_HI 0x0000044b
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_4_LO 0x0000044c
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_4_HI 0x0000044d
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_5_LO 0x0000044e
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_5_HI 0x0000044f
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_0_LO 0x00000450
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_0_HI 0x00000451
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_1_LO 0x00000452
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_1_HI 0x00000453
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_2_LO 0x00000454
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_2_HI 0x00000455
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_3_LO 0x00000456
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_3_HI 0x00000457
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_4_LO 0x00000458
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_4_HI 0x00000459
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_5_LO 0x0000045a
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_5_HI 0x0000045b
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_0_LO 0x0000045c
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_0_HI 0x0000045d
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_1_LO 0x0000045e
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_1_HI 0x0000045f
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_2_LO 0x00000460
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_2_HI 0x00000461
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_3_LO 0x00000462
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_3_HI 0x00000463
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_4_LO 0x00000464
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_4_HI 0x00000465
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_LO 0x00000466
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_HI 0x00000467
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_LO 0x00000468
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_HI 0x00000469
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_2_LO 0x0000046a
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_4_HI 0x00000465
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_LO 0x00000466
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_HI 0x00000467
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_LO 0x00000468
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_HI 0x00000469
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_2_LO 0x0000046a
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_2_HI 0x0000046b
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_3_LO 0x0000046c
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_3_HI 0x0000046d
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_0_LO 0x0000046e
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_0_HI 0x0000046f
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_1_LO 0x00000470
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_1_HI 0x00000471
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_2_LO 0x00000472
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_2_HI 0x00000473
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_3_LO 0x00000474
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_3_HI 0x00000475
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_0_LO 0x00000476
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_0_HI 0x00000477
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_1_LO 0x00000478
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_1_HI 0x00000479
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_2_LO 0x0000047a
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_2_HI 0x0000047b
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_3_LO 0x0000047c
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_3_HI 0x0000047d
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_4_LO 0x0000047e
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_4_HI 0x0000047f
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_5_LO 0x00000480
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_5_HI 0x00000481
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_6_LO 0x00000482
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_6_HI 0x00000483
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_7_LO 0x00000484
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_7_HI 0x00000485
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_8_LO 0x00000486
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_8_HI 0x00000487
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_9_LO 0x00000488
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_9_HI 0x00000489
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_10_LO 0x0000048a
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_10_HI 0x0000048b
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_11_LO 0x0000048c
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_11_HI 0x0000048d
+
+#define REG_A6XX_RBBM_PERFCTR_TP_0_LO 0x0000048e
+
+#define REG_A6XX_RBBM_PERFCTR_TP_0_HI 0x0000048f
+
+#define REG_A6XX_RBBM_PERFCTR_TP_1_LO 0x00000490
+
+#define REG_A6XX_RBBM_PERFCTR_TP_1_HI 0x00000491
+
+#define REG_A6XX_RBBM_PERFCTR_TP_2_LO 0x00000492
+
+#define REG_A6XX_RBBM_PERFCTR_TP_2_HI 0x00000493
+
+#define REG_A6XX_RBBM_PERFCTR_TP_3_LO 0x00000494
+
+#define REG_A6XX_RBBM_PERFCTR_TP_3_HI 0x00000495
+
+#define REG_A6XX_RBBM_PERFCTR_TP_4_LO 0x00000496
+
+#define REG_A6XX_RBBM_PERFCTR_TP_4_HI 0x00000497
+
+#define REG_A6XX_RBBM_PERFCTR_TP_5_LO 0x00000498
+
+#define REG_A6XX_RBBM_PERFCTR_TP_5_HI 0x00000499
+
+#define REG_A6XX_RBBM_PERFCTR_TP_6_LO 0x0000049a
+
+#define REG_A6XX_RBBM_PERFCTR_TP_6_HI 0x0000049b
+
+#define REG_A6XX_RBBM_PERFCTR_TP_7_LO 0x0000049c
+
+#define REG_A6XX_RBBM_PERFCTR_TP_7_HI 0x0000049d
+
+#define REG_A6XX_RBBM_PERFCTR_TP_8_LO 0x0000049e
+
+#define REG_A6XX_RBBM_PERFCTR_TP_8_HI 0x0000049f
+
+#define REG_A6XX_RBBM_PERFCTR_TP_9_LO 0x000004a0
+
+#define REG_A6XX_RBBM_PERFCTR_TP_9_HI 0x000004a1
+
+#define REG_A6XX_RBBM_PERFCTR_TP_10_LO 0x000004a2
+
+#define REG_A6XX_RBBM_PERFCTR_TP_10_HI 0x000004a3
+
+#define REG_A6XX_RBBM_PERFCTR_TP_11_LO 0x000004a4
+
+#define REG_A6XX_RBBM_PERFCTR_TP_11_HI 0x000004a5
+
+#define REG_A6XX_RBBM_PERFCTR_SP_0_LO 0x000004a6
+
+#define REG_A6XX_RBBM_PERFCTR_SP_0_HI 0x000004a7
+
+#define REG_A6XX_RBBM_PERFCTR_SP_1_LO 0x000004a8
+
+#define REG_A6XX_RBBM_PERFCTR_SP_1_HI 0x000004a9
+
+#define REG_A6XX_RBBM_PERFCTR_SP_2_LO 0x000004aa
+
+#define REG_A6XX_RBBM_PERFCTR_SP_2_HI 0x000004ab
+
+#define REG_A6XX_RBBM_PERFCTR_SP_3_LO 0x000004ac
+
+#define REG_A6XX_RBBM_PERFCTR_SP_3_HI 0x000004ad
+
+#define REG_A6XX_RBBM_PERFCTR_SP_4_LO 0x000004ae
+
+#define REG_A6XX_RBBM_PERFCTR_SP_4_HI 0x000004af
+
+#define REG_A6XX_RBBM_PERFCTR_SP_5_LO 0x000004b0
+
+#define REG_A6XX_RBBM_PERFCTR_SP_5_HI 0x000004b1
+
+#define REG_A6XX_RBBM_PERFCTR_SP_6_LO 0x000004b2
+
+#define REG_A6XX_RBBM_PERFCTR_SP_6_HI 0x000004b3
+
+#define REG_A6XX_RBBM_PERFCTR_SP_7_LO 0x000004b4
+
+#define REG_A6XX_RBBM_PERFCTR_SP_7_HI 0x000004b5
+
+#define REG_A6XX_RBBM_PERFCTR_SP_8_LO 0x000004b6
+
+#define REG_A6XX_RBBM_PERFCTR_SP_8_HI 0x000004b7
+
+#define REG_A6XX_RBBM_PERFCTR_SP_9_LO 0x000004b8
+
+#define REG_A6XX_RBBM_PERFCTR_SP_9_HI 0x000004b9
+
+#define REG_A6XX_RBBM_PERFCTR_SP_10_LO 0x000004ba
+
+#define REG_A6XX_RBBM_PERFCTR_SP_10_HI 0x000004bb
+
+#define REG_A6XX_RBBM_PERFCTR_SP_11_LO 0x000004bc
+
+#define REG_A6XX_RBBM_PERFCTR_SP_11_HI 0x000004bd
+
+#define REG_A6XX_RBBM_PERFCTR_SP_12_LO 0x000004be
+
+#define REG_A6XX_RBBM_PERFCTR_SP_12_HI 0x000004bf
+
+#define REG_A6XX_RBBM_PERFCTR_SP_13_LO 0x000004c0
+
+#define REG_A6XX_RBBM_PERFCTR_SP_13_HI 0x000004c1
+
+#define REG_A6XX_RBBM_PERFCTR_SP_14_LO 0x000004c2
+
+#define REG_A6XX_RBBM_PERFCTR_SP_14_HI 0x000004c3
+
+#define REG_A6XX_RBBM_PERFCTR_SP_15_LO 0x000004c4
+
+#define REG_A6XX_RBBM_PERFCTR_SP_15_HI 0x000004c5
+
+#define REG_A6XX_RBBM_PERFCTR_SP_16_LO 0x000004c6
+
+#define REG_A6XX_RBBM_PERFCTR_SP_16_HI 0x000004c7
+
+#define REG_A6XX_RBBM_PERFCTR_SP_17_LO 0x000004c8
+
+#define REG_A6XX_RBBM_PERFCTR_SP_17_HI 0x000004c9
+
+#define REG_A6XX_RBBM_PERFCTR_SP_18_LO 0x000004ca
+
+#define REG_A6XX_RBBM_PERFCTR_SP_18_HI 0x000004cb
+
+#define REG_A6XX_RBBM_PERFCTR_SP_19_LO 0x000004cc
+
+#define REG_A6XX_RBBM_PERFCTR_SP_19_HI 0x000004cd
+
+#define REG_A6XX_RBBM_PERFCTR_SP_20_LO 0x000004ce
+
+#define REG_A6XX_RBBM_PERFCTR_SP_20_HI 0x000004cf
+
+#define REG_A6XX_RBBM_PERFCTR_SP_21_LO 0x000004d0
+
+#define REG_A6XX_RBBM_PERFCTR_SP_21_HI 0x000004d1
+
+#define REG_A6XX_RBBM_PERFCTR_SP_22_LO 0x000004d2
+
+#define REG_A6XX_RBBM_PERFCTR_SP_22_HI 0x000004d3
+
+#define REG_A6XX_RBBM_PERFCTR_SP_23_LO 0x000004d4
+
+#define REG_A6XX_RBBM_PERFCTR_SP_23_HI 0x000004d5
+
+#define REG_A6XX_RBBM_PERFCTR_RB_0_LO 0x000004d6
+
+#define REG_A6XX_RBBM_PERFCTR_RB_0_HI 0x000004d7
+
+#define REG_A6XX_RBBM_PERFCTR_RB_1_LO 0x000004d8
+
+#define REG_A6XX_RBBM_PERFCTR_RB_1_HI 0x000004d9
+
+#define REG_A6XX_RBBM_PERFCTR_RB_2_LO 0x000004da
+
+#define REG_A6XX_RBBM_PERFCTR_RB_2_HI 0x000004db
+
+#define REG_A6XX_RBBM_PERFCTR_RB_3_LO 0x000004dc
+
+#define REG_A6XX_RBBM_PERFCTR_RB_3_HI 0x000004dd
+
+#define REG_A6XX_RBBM_PERFCTR_RB_4_LO 0x000004de
+
+#define REG_A6XX_RBBM_PERFCTR_RB_4_HI 0x000004df
+
+#define REG_A6XX_RBBM_PERFCTR_RB_5_LO 0x000004e0
+
+#define REG_A6XX_RBBM_PERFCTR_RB_5_HI 0x000004e1
+
+#define REG_A6XX_RBBM_PERFCTR_RB_6_LO 0x000004e2
+
+#define REG_A6XX_RBBM_PERFCTR_RB_6_HI 0x000004e3
+
+#define REG_A6XX_RBBM_PERFCTR_RB_7_LO 0x000004e4
+
+#define REG_A6XX_RBBM_PERFCTR_RB_7_HI 0x000004e5
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_0_LO 0x000004e6
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_0_HI 0x000004e7
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_1_LO 0x000004e8
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_1_HI 0x000004e9
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_0_LO 0x000004ea
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_0_HI 0x000004eb
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_1_LO 0x000004ec
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_1_HI 0x000004ed
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_2_LO 0x000004ee
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_2_HI 0x000004ef
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_3_LO 0x000004f0
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_3_HI 0x000004f1
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_0_LO 0x000004f2
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_0_HI 0x000004f3
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_1_LO 0x000004f4
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_1_HI 0x000004f5
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_2_LO 0x000004f6
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_2_HI 0x000004f7
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_3_LO 0x000004f8
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_3_HI 0x000004f9
+
+#define REG_A6XX_RBBM_PERFCTR_CNTL 0x00000500
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD0 0x00000501
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD1 0x00000502
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD2 0x00000503
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD3 0x00000504
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000505
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000506
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000507
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000508
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000509
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000050a
+
+#define REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000050b
+
+#define REG_A6XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A6XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A6XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010
+
+#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
+
+#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A6XX_RBBM_INT_0_MASK 0x00000038
+
+#define REG_A6XX_RBBM_SP_HYST_CNT 0x00000042
+
+#define REG_A6XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A6XX_RBBM_RAC_THRESHOLD_CNT 0x00000044
+
+#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A6XX_RBBM_CLOCK_CNTL 0x000000ae
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP0 0x000000b0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP1 0x000000b1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP2 0x000000b2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP3 0x000000b3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP0 0x000000b4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP1 0x000000b5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP2 0x000000b6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP3 0x000000b7
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP0 0x000000b8
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP1 0x000000b9
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP2 0x000000ba
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP3 0x000000bb
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP0 0x000000bc
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP1 0x000000bd
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP2 0x000000be
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP3 0x000000bf
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP0 0x000000c0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP1 0x000000c1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP2 0x000000c2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP3 0x000000c3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP0 0x000000c4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP1 0x000000c5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP2 0x000000c6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP3 0x000000c7
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP0 0x000000c8
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP1 0x000000c9
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP2 0x000000ca
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP3 0x000000cb
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP0 0x000000cc
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP1 0x000000cd
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP2 0x000000ce
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP3 0x000000cf
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP0 0x000000d0
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP1 0x000000d1
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP2 0x000000d2
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP3 0x000000d3
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP0 0x000000d4
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP1 0x000000d5
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP2 0x000000d6
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP3 0x000000d7
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP0 0x000000d8
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP1 0x000000d9
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP2 0x000000da
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP3 0x000000db
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP0 0x000000dc
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP1 0x000000dd
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP2 0x000000de
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP3 0x000000df
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP0 0x000000e0
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP1 0x000000e1
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP2 0x000000e2
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP3 0x000000e3
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP0 0x000000e4
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP1 0x000000e5
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP2 0x000000e6
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP3 0x000000e7
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP0 0x000000e8
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP1 0x000000e9
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP2 0x000000ea
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP3 0x000000eb
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP0 0x000000ec
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP1 0x000000ed
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP2 0x000000ee
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP3 0x000000ef
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB0 0x000000f0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB1 0x000000f1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB2 0x000000f2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB3 0x000000f3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB0 0x000000f4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB1 0x000000f5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB2 0x000000f6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB3 0x000000f7
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU0 0x000000f8
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU1 0x000000f9
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU2 0x000000fa
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU3 0x000000fb
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000100
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000101
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000102
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000103
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RAC 0x00000104
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RAC 0x00000105
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_RAC 0x00000106
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RAC 0x00000107
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000108
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000109
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000010a
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_UCHE 0x0000010b
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_UCHE 0x0000010c
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_UCHE 0x0000010d
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_UCHE 0x0000010e
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_UCHE 0x0000010f
+
+#define REG_A6XX_RBBM_CLOCK_HYST_UCHE 0x00000110
+
+#define REG_A6XX_RBBM_CLOCK_MODE_VFD 0x00000111
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_VFD 0x00000112
+
+#define REG_A6XX_RBBM_CLOCK_HYST_VFD 0x00000113
+
+#define REG_A6XX_RBBM_CLOCK_MODE_GPC 0x00000114
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GPC 0x00000115
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GPC 0x00000116
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2 0x00000117
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX 0x00000118
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX 0x00000119
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GMU_GX 0x0000011a
+
+#define REG_A6XX_RBBM_CLOCK_MODE_HLSQ 0x0000011b
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ 0x0000011c
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_B 0x00000601
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_C 0x00000602
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_D 0x00000603
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLT 0x00000604
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLM 0x00000605
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0 0x00000608
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1 0x00000609
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2 0x0000060a
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3 0x0000060b
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0 0x0000060c
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1 0x0000060d
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2 0x0000060e
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3 0x0000060f
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000610
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000611
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000062f
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000630
+
+#define REG_A6XX_VSC_PERFCTR_VSC_SEL_0 0x00000cd8
+
+#define REG_A6XX_VSC_PERFCTR_VSC_SEL_1 0x00000cd9
+
+#define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_0 0x00008610
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_1 0x00008611
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_2 0x00008612
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_3 0x00008613
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_0 0x00008614
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_1 0x00008615
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_2 0x00008616
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_3 0x00008617
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_0 0x00008618
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_1 0x00008619
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_2 0x0000861a
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_3 0x0000861b
+
+#define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05
+
+#define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_0 0x00008e10
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_1 0x00008e11
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_2 0x00008e12
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_3 0x00008e13
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_4 0x00008e14
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_5 0x00008e15
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_6 0x00008e16
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_7 0x00008e17
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_0 0x00008e18
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_1 0x00008e19
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_2 0x00008e1a
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_3 0x00008e1b
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_4 0x00008e1c
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_0 0x00008e2c
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_1 0x00008e2d
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_2 0x00008e2e
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_3 0x00008e2f
+
+#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD 0x00008e3d
+
+#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x00008e50
+
+#define REG_A6XX_PC_DBG_ECO_CNTL 0x00009e00
+
+#define REG_A6XX_PC_ADDR_MODE_CNTL 0x00009e01
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_0 0x00009e34
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_1 0x00009e35
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_2 0x00009e36
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_3 0x00009e37
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_4 0x00009e38
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_5 0x00009e39
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_6 0x00009e3a
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_7 0x00009e3b
+
+#define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x0000be10
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x0000be11
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x0000be12
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x0000be13
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x0000be14
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x0000be15
+
+#define REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000c800
+
+#define REG_A6XX_HLSQ_DBG_READ_SEL 0x0000d000
+
+#define REG_A6XX_VFD_ADDR_MODE_CNTL 0x0000a601
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_0 0x0000a610
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_1 0x0000a611
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_2 0x0000a612
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_3 0x0000a613
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_4 0x0000a614
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_5 0x0000a615
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_6 0x0000a616
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_7 0x0000a617
+
+#define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_0 0x00009604
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_1 0x00009605
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_2 0x00009606
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_3 0x00009607
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_4 0x00009608
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_5 0x00009609
+
+#define REG_A6XX_UCHE_ADDR_MODE_CNTL 0x00000e00
+
+#define REG_A6XX_UCHE_MODE_CNTL 0x00000e01
+
+#define REG_A6XX_UCHE_WRITE_RANGE_MAX_LO 0x00000e05
+
+#define REG_A6XX_UCHE_WRITE_RANGE_MAX_HI 0x00000e06
+
+#define REG_A6XX_UCHE_WRITE_THRU_BASE_LO 0x00000e07
+
+#define REG_A6XX_UCHE_WRITE_THRU_BASE_HI 0x00000e08
+
+#define REG_A6XX_UCHE_TRAP_BASE_LO 0x00000e09
+
+#define REG_A6XX_UCHE_TRAP_BASE_HI 0x00000e0a
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e0b
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e0c
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e0d
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e0e
+
+#define REG_A6XX_UCHE_CACHE_WAYS 0x00000e17
+
+#define REG_A6XX_UCHE_FILTER_CNTL 0x00000e18
+
+#define REG_A6XX_UCHE_CLIENT_PF 0x00000e19
+#define A6XX_UCHE_CLIENT_PF_PERFSEL__MASK 0x000000ff
+#define A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT 0
+static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
+{
+ return ((val) << A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT) & A6XX_UCHE_CLIENT_PF_PERFSEL__MASK;
+}
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e1c
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e1d
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e1e
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e1f
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e20
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e21
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e22
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e23
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_8 0x00000e24
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_9 0x00000e25
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_10 0x00000e26
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_11 0x00000e27
+
+#define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01
+
+#define REG_A6XX_SP_NC_MODE_CNTL 0x0000ae02
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_0 0x0000ae10
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_1 0x0000ae11
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_2 0x0000ae12
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_3 0x0000ae13
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_4 0x0000ae14
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_5 0x0000ae15
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_6 0x0000ae16
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_7 0x0000ae17
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_8 0x0000ae18
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_9 0x0000ae19
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_10 0x0000ae1a
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_11 0x0000ae1b
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_12 0x0000ae1c
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_13 0x0000ae1d
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_14 0x0000ae1e
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_15 0x0000ae1f
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_16 0x0000ae20
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_17 0x0000ae21
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_18 0x0000ae22
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_19 0x0000ae23
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_20 0x0000ae24
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_21 0x0000ae25
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_22 0x0000ae26
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_23 0x0000ae27
+
+#define REG_A6XX_TPL1_ADDR_MODE_CNTL 0x0000b601
+
+#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_0 0x0000b610
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_1 0x0000b611
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_2 0x0000b612
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_3 0x0000b613
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_4 0x0000b614
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_5 0x0000b615
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_6 0x0000b616
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_7 0x0000b617
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_8 0x0000b618
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_9 0x0000b619
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_10 0x0000b61a
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_11 0x0000b61b
+
+#define REG_A6XX_VBIF_VERSION 0x00003000
+
+#define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00018400
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00018401
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00018402
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00018403
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00018404
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00018405
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00018408
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00018409
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0001840a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0001840b
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0001840c
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0001840d
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0001840e
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0001840f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00018410
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00018411
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0001842f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00018430
+
+#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00021140
+
+#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00021148
+
+#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00021540
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00021541
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00021542
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00021543
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00021544
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00021545
+
+#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00021572
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00021573
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00021574
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00021575
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00021576
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00021577
+
+#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000215a4
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000215a5
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000215a6
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000215a7
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000215a8
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000215a9
+
+#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000215d6
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000215d7
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000215d8
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000215d9
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000215da
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000215db
+
+#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x000a0000
+
+#define REG_A6XX_X1_WINDOW_OFFSET 0x000088d4
+#define A6XX_X1_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_X1_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_X1_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_X1_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_X1_WINDOW_OFFSET_X__SHIFT) & A6XX_X1_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_X1_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_X1_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_X1_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_X1_WINDOW_OFFSET_Y__SHIFT) & A6XX_X1_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_X2_WINDOW_OFFSET 0x0000b4d1
+#define A6XX_X2_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_X2_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_X2_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_X2_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_X2_WINDOW_OFFSET_X__SHIFT) & A6XX_X2_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_X2_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_X2_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_X2_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_X2_WINDOW_OFFSET_Y__SHIFT) & A6XX_X2_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_X3_WINDOW_OFFSET 0x0000b307
+#define A6XX_X3_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_X3_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_X3_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_X3_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_X3_WINDOW_OFFSET_X__SHIFT) & A6XX_X3_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_X3_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_X3_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_X3_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_X3_WINDOW_OFFSET_Y__SHIFT) & A6XX_X3_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_X1_BIN_SIZE 0x000080a1
+#define A6XX_X1_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_X1_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_X1_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_X1_BIN_SIZE_WIDTH__SHIFT) & A6XX_X1_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_X1_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_X1_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_X1_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_X1_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X1_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_X2_BIN_SIZE 0x00008800
+#define A6XX_X2_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_X2_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_X2_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_X2_BIN_SIZE_WIDTH__SHIFT) & A6XX_X2_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_X2_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_X2_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_X2_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_X2_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X2_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_X3_BIN_SIZE 0x000088d3
+#define A6XX_X3_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_X3_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_X3_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_X3_BIN_SIZE_WIDTH__SHIFT) & A6XX_X3_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_X3_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_X3_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_X3_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_X3_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X3_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_VSC_BIN_SIZE 0x00000c02
+#define A6XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A6XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_VSC_SIZE_ADDRESS_LO 0x00000c03
+
+#define REG_A6XX_VSC_SIZE_ADDRESS_HI 0x00000c04
+
+#define REG_A6XX_VSC_BIN_COUNT 0x00000c06
+#define A6XX_VSC_BIN_COUNT_NX__MASK 0x000007fe
+#define A6XX_VSC_BIN_COUNT_NX__SHIFT 1
+static inline uint32_t A6XX_VSC_BIN_COUNT_NX(uint32_t val)
+{
+ return ((val) << A6XX_VSC_BIN_COUNT_NX__SHIFT) & A6XX_VSC_BIN_COUNT_NX__MASK;
+}
+#define A6XX_VSC_BIN_COUNT_NY__MASK 0x001ff800
+#define A6XX_VSC_BIN_COUNT_NY__SHIFT 11
+static inline uint32_t A6XX_VSC_BIN_COUNT_NY(uint32_t val)
+{
+ return ((val) << A6XX_VSC_BIN_COUNT_NY__SHIFT) & A6XX_VSC_BIN_COUNT_NY__MASK;
+}
+
+static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+#define A6XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
+#define A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
+#define A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_W__MASK 0x03f00000
+#define A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_H__MASK 0xfc000000
+#define A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT 26
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+#define REG_A6XX_VSC_XXX_ADDRESS_LO 0x00000c30
+
+#define REG_A6XX_VSC_XXX_ADDRESS_HI 0x00000c31
+
+#define REG_A6XX_VSC_XXX_PITCH 0x00000c32
+
+#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO 0x00000c34
+
+#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_HI 0x00000c35
+
+#define REG_A6XX_VSC_PIPE_DATA_PITCH 0x00000c36
+
+static inline uint32_t REG_A6XX_VSC_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12
+
+#define REG_A6XX_GRAS_UNKNOWN_8001 0x00008001
+
+#define REG_A6XX_GRAS_UNKNOWN_8004 0x00008004
+
+#define REG_A6XX_GRAS_CNTL 0x00008005
+#define A6XX_GRAS_CNTL_VARYING 0x00000001
+#define A6XX_GRAS_CNTL_XCOORD 0x00000040
+#define A6XX_GRAS_CNTL_YCOORD 0x00000080
+#define A6XX_GRAS_CNTL_ZCOORD 0x00000100
+#define A6XX_GRAS_CNTL_WCOORD 0x00000200
+
+#define REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x00008006
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_XOFFSET_0 0x00008010
+#define A6XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_XSCALE_0 0x00008011
+#define A6XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_YOFFSET_0 0x00008012
+#define A6XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_YSCALE_0 0x00008013
+#define A6XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_ZOFFSET_0 0x00008014
+#define A6XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_ZSCALE_0 0x00008015
+#define A6XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_CNTL 0x00008090
+#define A6XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001
+#define A6XX_GRAS_SU_CNTL_CULL_BACK 0x00000002
+#define A6XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
+#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+}
+#define A6XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+#define A6XX_GRAS_SU_CNTL_MSAA_ENABLE 0x00002000
+
+#define REG_A6XX_GRAS_SU_POINT_MINMAX 0x00008091
+#define A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POINT_SIZE 0x00008092
+#define A6XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A6XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095
+#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00008096
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x00008097
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO 0x00008098
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A6XX_GRAS_UNKNOWN_8099 0x00008099
+
+#define REG_A6XX_GRAS_UNKNOWN_809B 0x0000809b
+
+#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2
+#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_GRAS_DEST_MSAA_CNTL 0x000080a3
+#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_GRAS_UNKNOWN_80A4 0x000080a4
+
+#define REG_A6XX_GRAS_UNKNOWN_80A5 0x000080a5
+
+#define REG_A6XX_GRAS_UNKNOWN_80A6 0x000080a6
+
+#define REG_A6XX_GRAS_UNKNOWN_80AF 0x000080af
+
+#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x000080b0
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x000080b1
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x000080d0
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x000080d1
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL 0x000080f0
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_BR 0x000080f1
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_CNTL 0x00008100
+#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
+#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
+#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+
+#define REG_A6XX_GRAS_2D_BLIT_INFO 0x00008102
+#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO 0x00008103
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_BASE_HI 0x00008104
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_PITCH 0x00008105
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK 0x003ff800
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x00008106
+
+#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x00008107
+
+#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
+
+#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
+#define A6XX_GRAS_2D_SRC_TL_X_X__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_TL_X_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_TL_X_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_TL_X_X__SHIFT) & A6XX_GRAS_2D_SRC_TL_X_X__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_BR_X 0x00008402
+#define A6XX_GRAS_2D_SRC_BR_X_X__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_BR_X_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_BR_X_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_BR_X_X__SHIFT) & A6XX_GRAS_2D_SRC_BR_X_X__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_TL_Y 0x00008403
+#define A6XX_GRAS_2D_SRC_TL_Y_Y__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_TL_Y_Y__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_TL_Y_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_TL_Y_Y__SHIFT) & A6XX_GRAS_2D_SRC_TL_Y_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_BR_Y 0x00008404
+#define A6XX_GRAS_2D_SRC_BR_Y_Y__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_BR_Y_Y__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_BR_Y_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_BR_Y_Y__SHIFT) & A6XX_GRAS_2D_SRC_BR_Y_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_DST_TL 0x00008405
+#define A6XX_GRAS_2D_DST_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_2D_DST_TL_X__MASK 0x00007fff
+#define A6XX_GRAS_2D_DST_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_DST_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_TL_X__SHIFT) & A6XX_GRAS_2D_DST_TL_X__MASK;
+}
+#define A6XX_GRAS_2D_DST_TL_Y__MASK 0x7fff0000
+#define A6XX_GRAS_2D_DST_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_TL_Y__SHIFT) & A6XX_GRAS_2D_DST_TL_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_DST_BR 0x00008406
+#define A6XX_GRAS_2D_DST_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_2D_DST_BR_X__MASK 0x00007fff
+#define A6XX_GRAS_2D_DST_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_DST_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_BR_X__SHIFT) & A6XX_GRAS_2D_DST_BR_X__MASK;
+}
+#define A6XX_GRAS_2D_DST_BR_Y__MASK 0x7fff0000
+#define A6XX_GRAS_2D_DST_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_DST_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_BR_Y__SHIFT) & A6XX_GRAS_2D_DST_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_RESOLVE_CNTL_1 0x0000840a
+#define A6XX_GRAS_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A6XX_GRAS_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_1_X__MASK;
+}
+#define A6XX_GRAS_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A6XX_GRAS_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_RESOLVE_CNTL_2 0x0000840b
+#define A6XX_GRAS_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A6XX_GRAS_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_2_X__MASK;
+}
+#define A6XX_GRAS_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A6XX_GRAS_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_UNKNOWN_8600 0x00008600
+
+#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802
+#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_RB_DEST_MSAA_CNTL 0x00008803
+#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_RB_UNKNOWN_8804 0x00008804
+
+#define REG_A6XX_RB_UNKNOWN_8805 0x00008805
+
+#define REG_A6XX_RB_UNKNOWN_8806 0x00008806
+
+#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809
+#define A6XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A6XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
+#define A6XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
+#define A6XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
+#define A6XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+#define A6XX_RB_RENDER_CONTROL0_UNK10 0x00000400
+
+#define REG_A6XX_RB_RENDER_CONTROL1 0x0000880a
+#define A6XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
+#define A6XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+#define A6XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000008
+
+#define REG_A6XX_RB_FS_OUTPUT_CNTL0 0x0000880b
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z 0x00000002
+
+#define REG_A6XX_RB_FS_OUTPUT_CNTL1 0x0000880c
+#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
+#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT 0
+static inline uint32_t A6XX_RB_FS_OUTPUT_CNTL1_MRT(uint32_t val)
+{
+ return ((val) << A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK;
+}
+
+#define REG_A6XX_RB_RENDER_COMPONENTS 0x0000880d
+#define A6XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_RB_DITHER_CNTL 0x0000880e
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK 0x00000003
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT 0
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK 0x0000000c
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT 2
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK 0x00000030
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT 4
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK 0x000000c0
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT 6
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK 0x00000300
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT 8
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK 0x00000c00
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT 10
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK 0x00001000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT 12
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK 0x0000c000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT 14
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK;
+}
+
+#define REG_A6XX_RB_SRGB_CNTL 0x0000880f
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT0 0x00000001
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT1 0x00000002
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT2 0x00000004
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT3 0x00000008
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT4 0x00000010
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT5 0x00000020
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080
+
+#define REG_A6XX_RB_UNKNOWN_8818 0x00008818
+
+#define REG_A6XX_RB_UNKNOWN_8819 0x00008819
+
+#define REG_A6XX_RB_UNKNOWN_881A 0x0000881a
+
+#define REG_A6XX_RB_UNKNOWN_881B 0x0000881b
+
+#define REG_A6XX_RB_UNKNOWN_881C 0x0000881c
+
+#define REG_A6XX_RB_UNKNOWN_881D 0x0000881d
+
+#define REG_A6XX_RB_UNKNOWN_881E 0x0000881e
+
+static inline uint32_t REG_A6XX_RB_MRT(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_CONTROL(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+#define A6XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A6XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A6XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
+#define A6XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
+#define A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
+static inline uint32_t A6XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A6XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x00008821 + 0x8*i0; }
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; }
+#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
+#define A6XX_RB_MRT_PITCH__MASK 0xffffffff
+#define A6XX_RB_MRT_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_PITCH__SHIFT) & A6XX_RB_MRT_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x00008824 + 0x8*i0; }
+#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_MRT_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x00008825 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x00008826 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_GMEM(uint32_t i0) { return 0x00008827 + 0x8*i0; }
+
+#define REG_A6XX_RB_BLEND_RED_F32 0x00008860
+#define A6XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_RED_F32__SHIFT) & A6XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_GREEN_F32 0x00008861
+#define A6XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_GREEN_F32__SHIFT) & A6XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_BLUE_F32 0x00008862
+#define A6XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_BLUE_F32__SHIFT) & A6XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_ALPHA_F32 0x00008863
+#define A6XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_ALPHA_F32__SHIFT) & A6XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A6XX_RB_ALPHA_CONTROL 0x00008864
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_CNTL 0x00008865
+#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_CNTL 0x00008871
+#define A6XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
+#define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A6XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A6XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040
+
+#define REG_A6XX_RB_DEPTH_BUFFER_INFO 0x00008872
+#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_PITCH 0x00008873
+#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x00008874
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_LO 0x00008875
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_HI 0x00008876
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_GMEM 0x00008877
+
+#define REG_A6XX_RB_UNKNOWN_8878 0x00008878
+
+#define REG_A6XX_RB_UNKNOWN_8879 0x00008879
+
+#define REG_A6XX_RB_STENCIL_CONTROL 0x00008880
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A6XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_INFO 0x00008881
+#define A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+
+#define REG_A6XX_RB_STENCIL_BUFFER_PITCH 0x00008882
+#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH 0x00008883
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_LO 0x00008884
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_HI 0x00008885
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_GMEM 0x00008886
+
+#define REG_A6XX_RB_STENCILREF 0x00008887
+#define A6XX_RB_STENCILREF_REF__MASK 0x000000ff
+#define A6XX_RB_STENCILREF_REF__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILREF_REF(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK;
+}
+
+#define REG_A6XX_RB_STENCILMASK 0x00008888
+#define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff
+#define A6XX_RB_STENCILMASK_MASK__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILMASK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK;
+}
+
+#define REG_A6XX_RB_STENCILWRMASK 0x00008889
+#define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff
+#define A6XX_RB_STENCILWRMASK_WRMASK__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILWRMASK_WRMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK;
+}
+
+#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890
+#define A6XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET_X__SHIFT) & A6XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
+#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+
+#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
+
+#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
+#define A6XX_RB_BLIT_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK 0x00007fff
+#define A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_X__MASK;
+}
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_SCISSOR_BR 0x000088d2
+#define A6XX_RB_BLIT_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK 0x00007fff
+#define A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_X__MASK;
+}
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
+
+#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
+#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK 0x00000003
+#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004
+#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
+#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK 0x00000060
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT 5
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_LO 0x000088d8
+
+#define REG_A6XX_RB_BLIT_DST_HI 0x000088d9
+
+#define REG_A6XX_RB_BLIT_DST_PITCH 0x000088da
+#define A6XX_RB_BLIT_DST_PITCH__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_DST_PITCH__SHIFT) & A6XX_RB_BLIT_DST_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_ARRAY_PITCH 0x000088db
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_FLAG_DST_LO 0x000088dc
+
+#define REG_A6XX_RB_BLIT_FLAG_DST_HI 0x000088dd
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0 0x000088df
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW1 0x000088e0
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW2 0x000088e1
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW3 0x000088e2
+
+#define REG_A6XX_RB_BLIT_INFO 0x000088e3
+#define A6XX_RB_BLIT_INFO_UNK0 0x00000001
+#define A6XX_RB_BLIT_INFO_FAST_CLEAR 0x00000002
+#define A6XX_RB_BLIT_INFO_INTEGER 0x00000004
+#define A6XX_RB_BLIT_INFO_UNK3 0x00000008
+#define A6XX_RB_BLIT_INFO_MASK__MASK 0x000000f0
+#define A6XX_RB_BLIT_INFO_MASK__SHIFT 4
+static inline uint32_t A6XX_RB_BLIT_INFO_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_INFO_MASK__SHIFT) & A6XX_RB_BLIT_INFO_MASK__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x00008900
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x00008901
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x00008902
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x00008904 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x00008905 + 0x3*i0; }
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x003ff800
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_COUNT_ADDR_LO 0x00008927
+
+#define REG_A6XX_RB_SAMPLE_COUNT_ADDR_HI 0x00008928
+
+#define REG_A6XX_RB_2D_BLIT_CNTL 0x00008c00
+#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
+#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
+#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_2D_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_2D_DST_INFO_TILE_MODE__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_FLAGS 0x00001000
+
+#define REG_A6XX_RB_2D_DST_LO 0x00008c18
+
+#define REG_A6XX_RB_2D_DST_HI 0x00008c19
+
+#define REG_A6XX_RB_2D_DST_SIZE 0x00008c1a
+#define A6XX_RB_2D_DST_SIZE_PITCH__MASK 0x0000ffff
+#define A6XX_RB_2D_DST_SIZE_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A6XX_RB_2D_DST_SIZE_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_LO 0x00008c20
+
+#define REG_A6XX_RB_2D_DST_FLAGS_HI 0x00008c21
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C0 0x00008c2c
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C1 0x00008c2d
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C2 0x00008c2e
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C3 0x00008c2f
+
+#define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01
+
+#define REG_A6XX_RB_CCU_CNTL 0x00008e07
+
+#define REG_A6XX_VPC_UNKNOWN_9101 0x00009101
+
+#define REG_A6XX_VPC_GS_SIV_CNTL 0x00009104
+
+#define REG_A6XX_VPC_UNKNOWN_9108 0x00009108
+
+static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+
+#define REG_A6XX_VPC_UNKNOWN_9210 0x00009210
+
+#define REG_A6XX_VPC_UNKNOWN_9211 0x00009211
+
+static inline uint32_t REG_A6XX_VPC_VAR(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+
+#define REG_A6XX_VPC_SO_CNTL 0x00009216
+#define A6XX_VPC_SO_CNTL_ENABLE 0x00010000
+
+#define REG_A6XX_VPC_SO_PROG 0x00009217
+#define A6XX_VPC_SO_PROG_A_BUF__MASK 0x00000003
+#define A6XX_VPC_SO_PROG_A_BUF__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_PROG_A_BUF(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_PROG_A_BUF__SHIFT) & A6XX_VPC_SO_PROG_A_BUF__MASK;
+}
+#define A6XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc
+#define A6XX_VPC_SO_PROG_A_OFF__SHIFT 2
+static inline uint32_t A6XX_VPC_SO_PROG_A_OFF(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_PROG_A_OFF__SHIFT) & A6XX_VPC_SO_PROG_A_OFF__MASK;
+}
+#define A6XX_VPC_SO_PROG_A_EN 0x00000800
+#define A6XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
+#define A6XX_VPC_SO_PROG_B_BUF__SHIFT 12
+static inline uint32_t A6XX_VPC_SO_PROG_B_BUF(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_PROG_B_BUF__SHIFT) & A6XX_VPC_SO_PROG_B_BUF__MASK;
+}
+#define A6XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000
+#define A6XX_VPC_SO_PROG_B_OFF__SHIFT 14
+static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_PROG_B_OFF__SHIFT) & A6XX_VPC_SO_PROG_B_OFF__MASK;
+}
+#define A6XX_VPC_SO_PROG_B_EN 0x00800000
+
+static inline uint32_t REG_A6XX_VPC_SO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000921b + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000921d + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000921f + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x00009220 + 0x7*i0; }
+
+#define REG_A6XX_VPC_UNKNOWN_9236 0x00009236
+
+#define REG_A6XX_VPC_UNKNOWN_9300 0x00009300
+
+#define REG_A6XX_VPC_PACK 0x00009301
+#define A6XX_VPC_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_PACK_NUMNONPOSVAR__MASK 0x0000ff00
+#define A6XX_VPC_PACK_NUMNONPOSVAR__SHIFT 8
+static inline uint32_t A6XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A6XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A6XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+#define A6XX_VPC_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_PACK_PSIZELOC__SHIFT) & A6XX_VPC_PACK_PSIZELOC__MASK;
+}
+
+#define REG_A6XX_VPC_CNTL_0 0x00009304
+#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK 0x000000ff
+#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT) & A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK;
+}
+#define A6XX_VPC_CNTL_0_VARYING 0x00010000
+
+#define REG_A6XX_VPC_SO_BUF_CNTL 0x00009305
+#define A6XX_VPC_SO_BUF_CNTL_BUF0 0x00000001
+#define A6XX_VPC_SO_BUF_CNTL_BUF1 0x00000008
+#define A6XX_VPC_SO_BUF_CNTL_BUF2 0x00000040
+#define A6XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
+#define A6XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
+
+#define REG_A6XX_VPC_UNKNOWN_9600 0x00009600
+
+#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602
+
+#define REG_A6XX_PC_UNKNOWN_9801 0x00009801
+
+#define REG_A6XX_PC_RESTART_INDEX 0x00009803
+
+#define REG_A6XX_PC_MODE_CNTL 0x00009804
+
+#define REG_A6XX_PC_UNKNOWN_9805 0x00009805
+
+#define REG_A6XX_PC_UNKNOWN_9981 0x00009981
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
+#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
+#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_1 0x00009b01
+#define A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK 0x0000007f
+#define A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A6XX_PC_UNKNOWN_9B06 0x00009b06
+
+#define REG_A6XX_PC_UNKNOWN_9B07 0x00009b07
+
+#define REG_A6XX_PC_TESSFACTOR_ADDR_LO 0x00009e08
+
+#define REG_A6XX_PC_TESSFACTOR_ADDR_HI 0x00009e09
+
+#define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72
+
+#define REG_A6XX_VFD_CONTROL_0 0x0000a000
+#define A6XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f
+#define A6XX_VFD_CONTROL_0_VTXCNT__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A6XX_VFD_CONTROL_0_VTXCNT__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_1 0x0000a001
+#define A6XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A6XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
+#define A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_2 0x0000a002
+#define A6XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_3 0x0000a003
+#define A6XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_4 0x0000a004
+
+#define REG_A6XX_VFD_CONTROL_5 0x0000a005
+
+#define REG_A6XX_VFD_CONTROL_6 0x0000a006
+
+#define REG_A6XX_VFD_MODE_CNTL 0x0000a007
+#define A6XX_VFD_MODE_CNTL_BINNING_PASS 0x00000001
+
+#define REG_A6XX_VFD_UNKNOWN_A008 0x0000a008
+
+#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e
+
+#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f
+
+static inline uint32_t REG_A6XX_VFD_FETCH(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000a011 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000a012 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000a013 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DECODE(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+#define A6XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A6XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A6XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_IDX__SHIFT) & A6XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
+#define A6XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
+#define A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_vtx_fmt val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A6XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
+#define A6XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
+static inline uint32_t A6XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A6XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_UNK30 0x40000000
+#define A6XX_VFD_DECODE_INSTR_FLOAT 0x80000000
+
+static inline uint32_t REG_A6XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000a091 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A6XX_SP_UNKNOWN_A0F8 0x0000a0f8
+
+#define REG_A6XX_SP_PRIMITIVE_CNTL 0x0000a802
+#define A6XX_SP_PRIMITIVE_CNTL_VSOUT__MASK 0x0000001f
+#define A6XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT 0
+static inline uint32_t A6XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val)
+{
+ return ((val) << A6XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A6XX_SP_PRIMITIVE_CNTL_VSOUT__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_VS_OUT(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+#define A6XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A6XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A6XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A6XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A6XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_VS_OBJ_START_LO 0x0000a81c
+
+#define REG_A6XX_SP_VS_OBJ_START_HI 0x0000a81d
+
+#define REG_A6XX_SP_VS_TEX_COUNT 0x0000a822
+
+#define REG_A6XX_SP_VS_CONFIG 0x0000a823
+#define A6XX_SP_VS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_VS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_VS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_VS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NTEX__SHIFT) & A6XX_SP_VS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_VS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_VS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_VS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NSAMP__SHIFT) & A6XX_SP_VS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_VS_INSTRLEN 0x0000a824
+
+#define REG_A6XX_SP_HS_CTRL_REG0 0x0000a830
+#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_HS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_HS_UNKNOWN_A831 0x0000a831
+
+#define REG_A6XX_SP_HS_OBJ_START_LO 0x0000a834
+
+#define REG_A6XX_SP_HS_OBJ_START_HI 0x0000a835
+
+#define REG_A6XX_SP_HS_TEX_COUNT 0x0000a83a
+
+#define REG_A6XX_SP_HS_CONFIG 0x0000a83b
+#define A6XX_SP_HS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_HS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_HS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_HS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NTEX__SHIFT) & A6XX_SP_HS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_HS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_HS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_HS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NSAMP__SHIFT) & A6XX_SP_HS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_HS_INSTRLEN 0x0000a83c
+
+#define REG_A6XX_SP_DS_CTRL_REG0 0x0000a840
+#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_DS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_DS_OBJ_START_LO 0x0000a85c
+
+#define REG_A6XX_SP_DS_OBJ_START_HI 0x0000a85d
+
+#define REG_A6XX_SP_DS_TEX_COUNT 0x0000a862
+
+#define REG_A6XX_SP_DS_CONFIG 0x0000a863
+#define A6XX_SP_DS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_DS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_DS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_DS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NTEX__SHIFT) & A6XX_SP_DS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_DS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_DS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_DS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NSAMP__SHIFT) & A6XX_SP_DS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_DS_INSTRLEN 0x0000a864
+
+#define REG_A6XX_SP_GS_CTRL_REG0 0x0000a870
+#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_GS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_GS_UNKNOWN_A871 0x0000a871
+
+#define REG_A6XX_SP_GS_OBJ_START_LO 0x0000a88d
+
+#define REG_A6XX_SP_GS_OBJ_START_HI 0x0000a88e
+
+#define REG_A6XX_SP_GS_TEX_COUNT 0x0000a893
+
+#define REG_A6XX_SP_GS_CONFIG 0x0000a894
+#define A6XX_SP_GS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_GS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_GS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_GS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NTEX__SHIFT) & A6XX_SP_GS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_GS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_GS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_GS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NSAMP__SHIFT) & A6XX_SP_GS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_GS_INSTRLEN 0x0000a895
+
+#define REG_A6XX_SP_VS_TEX_SAMP_LO 0x0000a8a0
+
+#define REG_A6XX_SP_VS_TEX_SAMP_HI 0x0000a8a1
+
+#define REG_A6XX_SP_HS_TEX_SAMP_LO 0x0000a8a2
+
+#define REG_A6XX_SP_HS_TEX_SAMP_HI 0x0000a8a3
+
+#define REG_A6XX_SP_DS_TEX_SAMP_LO 0x0000a8a4
+
+#define REG_A6XX_SP_DS_TEX_SAMP_HI 0x0000a8a5
+
+#define REG_A6XX_SP_GS_TEX_SAMP_LO 0x0000a8a6
+
+#define REG_A6XX_SP_GS_TEX_SAMP_HI 0x0000a8a7
+
+#define REG_A6XX_SP_VS_TEX_CONST_LO 0x0000a8a8
+
+#define REG_A6XX_SP_VS_TEX_CONST_HI 0x0000a8a9
+
+#define REG_A6XX_SP_HS_TEX_CONST_LO 0x0000a8aa
+
+#define REG_A6XX_SP_HS_TEX_CONST_HI 0x0000a8ab
+
+#define REG_A6XX_SP_DS_TEX_CONST_LO 0x0000a8ac
+
+#define REG_A6XX_SP_DS_TEX_CONST_HI 0x0000a8ad
+
+#define REG_A6XX_SP_GS_TEX_CONST_LO 0x0000a8ae
+
+#define REG_A6XX_SP_GS_TEX_CONST_HI 0x0000a8af
+
+#define REG_A6XX_SP_FS_CTRL_REG0 0x0000a980
+#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_FS_OBJ_START_LO 0x0000a983
+
+#define REG_A6XX_SP_FS_OBJ_START_HI 0x0000a984
+
+#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
+#define A6XX_SP_BLEND_CNTL_ENABLED 0x00000001
+#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
+
+#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT1 0x00000002
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT2 0x00000004
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT3 0x00000008
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT4 0x00000010
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT5 0x00000020
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT6 0x00000040
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT7 0x00000080
+
+#define REG_A6XX_SP_FS_RENDER_COMPONENTS 0x0000a98b
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_SP_FS_OUTPUT_CNTL0 0x0000a98c
+#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK 0x0000ff00
+#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT 8
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK;
+}
+
+#define REG_A6XX_SP_FS_OUTPUT_CNTL1 0x0000a98d
+#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
+#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL1_MRT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_MRT(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
+#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
+#define A6XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
+
+#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7
+
+#define REG_A6XX_SP_UNKNOWN_A9A8 0x0000a9a8
+
+#define REG_A6XX_SP_FS_TEX_SAMP_LO 0x0000a9e0
+
+#define REG_A6XX_SP_FS_TEX_SAMP_HI 0x0000a9e1
+
+#define REG_A6XX_SP_CS_TEX_SAMP_LO 0x0000a9e2
+
+#define REG_A6XX_SP_CS_TEX_SAMP_HI 0x0000a9e3
+
+#define REG_A6XX_SP_FS_TEX_CONST_LO 0x0000a9e4
+
+#define REG_A6XX_SP_FS_TEX_CONST_HI 0x0000a9e5
+
+#define REG_A6XX_SP_CS_TEX_CONST_LO 0x0000a9e6
+
+#define REG_A6XX_SP_CS_TEX_CONST_HI 0x0000a9e7
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+#define A6XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+#define REG_A6XX_SP_CS_CTRL_REG0 0x0000a9b0
+#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_CS_OBJ_START_LO 0x0000a9b4
+
+#define REG_A6XX_SP_CS_OBJ_START_HI 0x0000a9b5
+
+#define REG_A6XX_SP_CS_INSTRLEN 0x0000a9bc
+
+#define REG_A6XX_SP_UNKNOWN_AB00 0x0000ab00
+
+#define REG_A6XX_SP_FS_CONFIG 0x0000ab04
+#define A6XX_SP_FS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_FS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_FS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_FS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NTEX__SHIFT) & A6XX_SP_FS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_FS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_FS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NSAMP__SHIFT) & A6XX_SP_FS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05
+
+#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
+
+#define REG_A6XX_SP_UNKNOWN_AE04 0x0000ae04
+
+#define REG_A6XX_SP_UNKNOWN_AE0F 0x0000ae0f
+
+#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
+
+#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300
+#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_SP_TP_DEST_MSAA_CNTL 0x0000b301
+#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO 0x0000b302
+
+#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_HI 0x0000b303
+
+#define REG_A6XX_SP_TP_UNKNOWN_B304 0x0000b304
+
+#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
+
+#define REG_A6XX_SP_PS_2D_SRC_LO 0x0000b4c2
+
+#define REG_A6XX_SP_PS_2D_SRC_HI 0x0000b4c3
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS_LO 0x0000b4ca
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI 0x0000b4cb
+
+#define REG_A6XX_SP_UNKNOWN_B600 0x0000b600
+
+#define REG_A6XX_SP_UNKNOWN_B605 0x0000b605
+
+#define REG_A6XX_HLSQ_VS_CNTL 0x0000b800
+#define A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_HS_CNTL 0x0000b801
+#define A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_DS_CNTL 0x0000b802
+#define A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_GS_CNTL 0x0000b803
+#define A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982
+
+#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983
+#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_3_REG 0x0000b984
+#define A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_4_REG 0x0000b985
+#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_0 0x0000b990
+#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_1 0x0000b991
+#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_2 0x0000b992
+#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_3 0x0000b993
+#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_4 0x0000b994
+#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_5 0x0000b995
+#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_6 0x0000b996
+#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_CNTL_0 0x0000b997
+#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_UNK0__MASK 0x0000ff00
+#define A6XX_HLSQ_CS_CNTL_0_UNK0__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_UNK0__SHIFT) & A6XX_HLSQ_CS_CNTL_0_UNK0__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_UNK1__MASK 0x00ff0000
+#define A6XX_HLSQ_CS_CNTL_0_UNK1__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_UNK1(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_UNK1__SHIFT) & A6XX_HLSQ_CS_CNTL_0_UNK1__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
+#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_X 0x0000b999
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000b99a
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000b99b
+
+#define REG_A6XX_HLSQ_UPDATE_CNTL 0x0000bb08
+
+#define REG_A6XX_HLSQ_FS_CNTL 0x0000bb10
+#define A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_UNKNOWN_BB11 0x0000bb11
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE00 0x0000be00
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE01 0x0000be01
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE04 0x0000be04
+
+#define REG_A6XX_TEX_SAMP_0 0x00000000
+#define A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A6XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A6XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A6XX_TEX_SAMP_0_XY_MAG(enum a6xx_tex_filter val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_XY_MAG__SHIFT) & A6XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A6XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A6XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A6XX_TEX_SAMP_0_XY_MIN(enum a6xx_tex_filter val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_XY_MIN__SHIFT) & A6XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A6XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_S(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_S__SHIFT) & A6XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A6XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_T(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_T__SHIFT) & A6XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A6XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_R(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_R__SHIFT) & A6XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A6XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A6XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A6XX_TEX_SAMP_0_ANISO(enum a6xx_tex_aniso val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_ANISO__SHIFT) & A6XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A6XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A6XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A6XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_1 0x00000001
+#define A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A6XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A6XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A6XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A6XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A6XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A6XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A6XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A6XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A6XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A6XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_2 0x00000002
+#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xfffffff0
+#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 4
+static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val)
+{
+ return ((val) << A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_3 0x00000003
+
+#define REG_A6XX_TEX_CONST_0 0x00000000
+#define A6XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A6XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_0_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_TEX_CONST_0_TILE_MODE__SHIFT) & A6XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A6XX_TEX_CONST_0_SRGB 0x00000004
+#define A6XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A6XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_X(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_X__SHIFT) & A6XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A6XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Y(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A6XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Z(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A6XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_W(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_W__SHIFT) & A6XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A6XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A6XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A6XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
+{
+ return ((val) << A6XX_TEX_CONST_0_FMT__SHIFT) & A6XX_TEX_CONST_0_FMT__MASK;
+}
+#define A6XX_TEX_CONST_0_SWAP__MASK 0xc0000000
+#define A6XX_TEX_CONST_0_SWAP__SHIFT 30
+static inline uint32_t A6XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWAP__SHIFT) & A6XX_TEX_CONST_0_SWAP__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_1 0x00000001
+#define A6XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A6XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_1_WIDTH__SHIFT) & A6XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A6XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A6XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_1_HEIGHT__SHIFT) & A6XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_2 0x00000002
+#define A6XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
+#define A6XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_2_FETCHSIZE(enum a6xx_tex_fetchsize val)
+{
+ return ((val) << A6XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A6XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A6XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
+#define A6XX_TEX_CONST_2_PITCH__SHIFT 7
+static inline uint32_t A6XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_2_PITCH__SHIFT) & A6XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_2_TYPE__MASK 0x60000000
+#define A6XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A6XX_TEX_CONST_2_TYPE(enum a6xx_tex_type val)
+{
+ return ((val) << A6XX_TEX_CONST_2_TYPE__SHIFT) & A6XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_3 0x00000003
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_3_FLAG 0x10000000
+
+#define REG_A6XX_TEX_CONST_4 0x00000004
+#define A6XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
+#define A6XX_TEX_CONST_4_BASE_LO__SHIFT 5
+static inline uint32_t A6XX_TEX_CONST_4_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A6XX_TEX_CONST_4_BASE_LO__SHIFT) & A6XX_TEX_CONST_4_BASE_LO__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_5 0x00000005
+#define A6XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_5_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_5_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_5_BASE_HI__SHIFT) & A6XX_TEX_CONST_5_BASE_HI__MASK;
+}
+#define A6XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A6XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A6XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_5_DEPTH__SHIFT) & A6XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_6 0x00000006
+
+#define REG_A6XX_TEX_CONST_7 0x00000007
+#define A6XX_TEX_CONST_7_FLAG_LO__MASK 0xffffffe0
+#define A6XX_TEX_CONST_7_FLAG_LO__SHIFT 5
+static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val)
+{
+ return ((val >> 5) << A6XX_TEX_CONST_7_FLAG_LO__SHIFT) & A6XX_TEX_CONST_7_FLAG_LO__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_8 0x00000008
+#define A6XX_TEX_CONST_8_BASE_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_8_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_8_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_8_BASE_HI__SHIFT) & A6XX_TEX_CONST_8_BASE_HI__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_9 0x00000009
+
+#define REG_A6XX_TEX_CONST_10 0x0000000a
+
+#define REG_A6XX_TEX_CONST_11 0x0000000b
+
+#define REG_A6XX_TEX_CONST_12 0x0000000c
+
+#define REG_A6XX_TEX_CONST_13 0x0000000d
+
+#define REG_A6XX_TEX_CONST_14 0x0000000e
+
+#define REG_A6XX_TEX_CONST_15 0x0000000f
+
+
+#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
new file mode 100644
index 000000000000..bbb8126ec5c5
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -0,0 +1,1207 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/pm_opp.h>
+#include <soc/qcom/cmd-db.h>
+
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+static irqreturn_t a6xx_gmu_irq(int irq, void *data)
+{
+ struct a6xx_gmu *gmu = data;
+ u32 status;
+
+ status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
+ dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
+
+ /* Temporary until we can recover safely */
+ BUG();
+ }
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
+ dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
+ dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t a6xx_hfi_irq(int irq, void *data)
+{
+ struct a6xx_gmu *gmu = data;
+ u32 status;
+
+ status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
+
+ if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
+ tasklet_schedule(&gmu->hfi_tasklet);
+
+ if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
+ dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
+
+ /* Temporary until we can recover safely */
+ BUG();
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Check to see if the GX rail is still powered */
+static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+{
+ u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+ return !(val &
+ (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
+ A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
+}
+
+static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
+{
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
+
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
+ ((index << 24) & 0xff) | (3 & 0xf));
+
+ /*
+ * Send an invalid index as a vote for the bus bandwidth and let the
+ * firmware decide on the right vote
+ */
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
+
+ /* Set and clear the OOB for DCVS to trigger the GMU */
+ a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
+
+ return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
+}
+
+static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int local = gmu->idle_level;
+
+ /* SPTP and IFPC both report as IFPC */
+ if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
+ local = GMU_IDLE_STATE_IFPC;
+
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+
+ if (val == local) {
+ if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
+ !a6xx_gmu_gx_is_on(gmu))
+ return true;
+ }
+
+ return false;
+}
+
+/* Wait for the GMU to get to its most idle state */
+int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ return spin_until(a6xx_gmu_check_idle_level(gmu));
+}
+
+static int a6xx_gmu_start(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
+ val == 0xbabeface, 100, 10000);
+
+ if (ret)
+ dev_err(gmu->dev, "GMU firmware initialization timed out\n");
+
+ return ret;
+}
+
+static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int ret;
+
+ gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
+ A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
+
+ gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
+ val & 1, 100, 10000);
+ if (ret)
+ dev_err(gmu->dev, "Unable to start the HFI queues\n");
+
+ return ret;
+}
+
+/* Trigger a OOB (out of band) request to the GMU */
+int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+{
+ int ret;
+ u32 val;
+ int request, ack;
+ const char *name;
+
+ switch (state) {
+ case GMU_OOB_GPU_SET:
+ request = GMU_OOB_GPU_SET_REQUEST;
+ ack = GMU_OOB_GPU_SET_ACK;
+ name = "GPU_SET";
+ break;
+ case GMU_OOB_BOOT_SLUMBER:
+ request = GMU_OOB_BOOT_SLUMBER_REQUEST;
+ ack = GMU_OOB_BOOT_SLUMBER_ACK;
+ name = "BOOT_SLUMBER";
+ break;
+ case GMU_OOB_DCVS_SET:
+ request = GMU_OOB_DCVS_REQUEST;
+ ack = GMU_OOB_DCVS_ACK;
+ name = "GPU_DCVS";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Trigger the equested OOB operation */
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
+
+ /* Wait for the acknowledge interrupt */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & (1 << ack), 100, 10000);
+
+ if (ret)
+ dev_err(gmu->dev,
+ "Timeout waiting for GMU OOB set %s: 0x%x\n",
+ name,
+ gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
+
+ /* Clear the acknowledge interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
+
+ return ret;
+}
+
+/* Clear a pending OOB state in the GMU */
+void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+{
+ switch (state) {
+ case GMU_OOB_GPU_SET:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_GPU_SET_CLEAR);
+ break;
+ case GMU_OOB_BOOT_SLUMBER:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
+ break;
+ case GMU_OOB_DCVS_SET:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_DCVS_CLEAR);
+ break;
+ }
+}
+
+/* Enable CPU control of SPTP power power collapse */
+static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
+ (val & 0x38) == 0x28, 1, 100);
+
+ if (ret) {
+ dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
+ }
+
+ return 0;
+}
+
+/* Disable CPU control of SPTP power power collapse */
+static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int ret;
+
+ /* Make sure retention is on */
+ gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
+ (val & 0x04), 100, 10000);
+
+ if (ret)
+ dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
+}
+
+/* Let the GMU know we are starting a boot sequence */
+static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
+{
+ u32 vote;
+
+ /* Let the GMU know we are getting ready for boot */
+ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
+
+ /* Choose the "default" power level as the highest available */
+ vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
+ gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
+
+ /* Let the GMU know the boot sequence has started */
+ return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+}
+
+/* Let the GMU know that we are about to go into slumber */
+static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
+{
+ int ret;
+
+ /* Disable the power counter so the GMU isn't busy */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+
+ /* Disable SPTP_PC if the CPU is responsible for it */
+ if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
+ a6xx_sptprac_disable(gmu);
+
+ /* Tell the GMU to get ready to slumber */
+ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
+
+ ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ if (!ret) {
+ /* Check to see if the GMU really did slumber */
+ if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
+ != 0x0f) {
+ dev_err(gmu->dev, "The GMU did not go into slumber\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ /* Put fence into allow mode */
+ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+ return ret;
+}
+
+static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
+ /* Wait for the register to finish posting */
+ wmb();
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
+ val & (1 << 1), 100, 10000);
+ if (ret) {
+ dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
+ return ret;
+ }
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
+ !val, 100, 10000);
+
+ if (!ret) {
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ /* Re-enable the power counter */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+ return 0;
+ }
+
+ dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
+ return ret;
+}
+
+static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+ val, val & (1 << 16), 100, 10000);
+ if (ret)
+ dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+}
+
+static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
+{
+ /* Disable SDE clock gating */
+ gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
+
+ /* Setup RSC PDC handshake for sleep and wakeup */
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
+
+ /* Load RSC sequencer uCode for sleep and wakeup */
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
+
+ /* Load PDC sequencer uCode for power up and power down sequence */
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
+
+ /* Set TCS commands used by PDC sequence for low power modes */
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
+
+ /* Setup GPU PDC */
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
+
+ /* ensure no writes happen before the uCode is fully written */
+ wmb();
+}
+
+/*
+ * The lowest 16 bits of this value are the number of XO clock cycles for main
+ * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
+ * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
+ */
+
+#define GMU_PWR_COL_HYST 0x000a1680
+
+/* Set up the idle state for the GMU */
+static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
+{
+ /* Disable GMU WB/RB buffer */
+ gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
+
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
+
+ switch (gmu->idle_level) {
+ case GMU_IDLE_STATE_IFPC:
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
+ GMU_PWR_COL_HYST);
+ gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
+ /* Fall through */
+ case GMU_IDLE_STATE_SPTP:
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
+ GMU_PWR_COL_HYST);
+ gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
+ }
+
+ /* Enable RPMh GPU client */
+ gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
+ A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
+}
+
+static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
+{
+ static bool rpmh_init;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ int i, ret;
+ u32 chipid;
+ u32 *image;
+
+ if (state == GMU_WARM_BOOT) {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+ } else {
+ if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
+ "GMU firmware is not loaded\n"))
+ return -ENOENT;
+
+ /* Sanity check the size of the firmware that was loaded */
+ if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
+ dev_err(gmu->dev,
+ "GMU firmware is bigger than the available region\n");
+ return -EINVAL;
+ }
+
+ /* Turn on register retention */
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
+
+ /* We only need to load the RPMh microcode once */
+ if (!rpmh_init) {
+ a6xx_gmu_rpmh_init(gmu);
+ rpmh_init = true;
+ } else if (state != GMU_RESET) {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+ }
+
+ image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
+
+ for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
+ gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
+ image[i]);
+ }
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
+ gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
+
+ /* Write the iova of the HFI table */
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
+
+ gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
+ (1 << 31) | (0xa << 18) | (0xa0));
+
+ chipid = adreno_gpu->rev.core << 24;
+ chipid |= adreno_gpu->rev.major << 16;
+ chipid |= adreno_gpu->rev.minor << 12;
+ chipid |= adreno_gpu->rev.patchid << 8;
+
+ gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
+
+ /* Set up the lowest idle level on the GMU */
+ a6xx_gmu_power_config(gmu);
+
+ ret = a6xx_gmu_start(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_gmu_gfx_rail_on(gmu);
+ if (ret)
+ return ret;
+
+ /* Enable SPTP_PC if the CPU is responsible for it */
+ if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
+ ret = a6xx_sptprac_enable(gmu);
+ if (ret)
+ return ret;
+ }
+
+ ret = a6xx_gmu_hfi_start(gmu);
+ if (ret)
+ return ret;
+
+ /* FIXME: Do we need this wmb() here? */
+ wmb();
+
+ return 0;
+}
+
+#define A6XX_HFI_IRQ_MASK \
+ (A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
+ A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
+
+#define A6XX_GMU_IRQ_MASK \
+ (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
+
+static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
+{
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
+
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
+ ~A6XX_GMU_IRQ_MASK);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
+ ~A6XX_HFI_IRQ_MASK);
+
+ enable_irq(gmu->gmu_irq);
+ enable_irq(gmu->hfi_irq);
+}
+
+static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
+{
+ disable_irq(gmu->gmu_irq);
+ disable_irq(gmu->hfi_irq);
+
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
+}
+
+int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int ret;
+ u32 val;
+
+ /* Flush all the queues */
+ a6xx_hfi_stop(gmu);
+
+ /* Stop the interrupts */
+ a6xx_gmu_irq_disable(gmu);
+
+ /* Force off SPTP in case the GMU is managing it */
+ a6xx_sptprac_disable(gmu);
+
+ /* Make sure there are no outstanding RPMh votes */
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
+ (val & 1), 100, 1000);
+
+ /* Force off the GX GSDC */
+ regulator_force_disable(gmu->gx);
+
+ /* Disable the resources */
+ clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
+ pm_runtime_put_sync(gmu->dev);
+
+ /* Re-enable the resources */
+ pm_runtime_get_sync(gmu->dev);
+
+ /* Use a known rate to bring up the GMU */
+ clk_set_rate(gmu->core_clk, 200000000);
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret)
+ goto out;
+
+ a6xx_gmu_irq_enable(gmu);
+
+ ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
+ if (!ret)
+ ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
+
+ /* Set the GPU back to the highest power frequency */
+ a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+
+out:
+ if (ret)
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ return ret;
+}
+
+int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int status, ret;
+
+ if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
+ return 0;
+
+ /* Turn on the resources */
+ pm_runtime_get_sync(gmu->dev);
+
+ /* Use a known rate to bring up the GMU */
+ clk_set_rate(gmu->core_clk, 200000000);
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret)
+ goto out;
+
+ a6xx_gmu_irq_enable(gmu);
+
+ /* Check to see if we are doing a cold or warm boot */
+ status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
+ GMU_WARM_BOOT : GMU_COLD_BOOT;
+
+ ret = a6xx_gmu_fw_start(gmu, status);
+ if (ret)
+ goto out;
+
+ ret = a6xx_hfi_start(gmu, status);
+
+ /* Set the GPU to the highest power frequency */
+ a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+
+out:
+ /* Make sure to turn off the boot OOB request on error */
+ if (ret)
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ return ret;
+}
+
+bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
+{
+ u32 reg;
+
+ if (!gmu->mmio)
+ return true;
+
+ reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
+
+ if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
+ return false;
+
+ return true;
+}
+
+int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 val;
+
+ /*
+ * The GMU may still be in slumber unless the GPU started so check and
+ * skip putting it back into slumber if so
+ */
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+
+ if (val != 0xf) {
+ int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
+
+ /* Temporary until we can recover safely */
+ BUG_ON(ret);
+
+ /* tell the GMU we want to slumber */
+ a6xx_gmu_notify_slumber(gmu);
+
+ ret = gmu_poll_timeout(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
+ !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
+ 100, 10000);
+
+ /*
+ * Let the user know we failed to slumber but don't worry too
+ * much because we are powering down anyway
+ */
+
+ if (ret)
+ dev_err(gmu->dev,
+ "Unable to slumber GMU: status = 0%x/0%x\n",
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
+ }
+
+ /* Turn off HFI */
+ a6xx_hfi_stop(gmu);
+
+ /* Stop the interrupts and mask the hardware */
+ a6xx_gmu_irq_disable(gmu);
+
+ /* Tell RPMh to power off the GPU */
+ a6xx_rpmh_stop(gmu);
+
+ clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
+
+ pm_runtime_put_sync(gmu->dev);
+
+ return 0;
+}
+
+static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
+{
+ int count, i;
+ u64 iova;
+
+ if (IS_ERR_OR_NULL(bo))
+ return;
+
+ count = bo->size >> PAGE_SHIFT;
+ iova = bo->iova;
+
+ for (i = 0; i < count; i++, iova += PAGE_SIZE) {
+ iommu_unmap(gmu->domain, iova, PAGE_SIZE);
+ __free_pages(bo->pages[i], 0);
+ }
+
+ kfree(bo->pages);
+ kfree(bo);
+}
+
+static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
+ size_t size)
+{
+ struct a6xx_gmu_bo *bo;
+ int ret, count, i;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ bo->size = PAGE_ALIGN(size);
+
+ count = bo->size >> PAGE_SHIFT;
+
+ bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
+ if (!bo->pages) {
+ kfree(bo);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < count; i++) {
+ bo->pages[i] = alloc_page(GFP_KERNEL);
+ if (!bo->pages[i])
+ goto err;
+ }
+
+ bo->iova = gmu->uncached_iova_base;
+
+ for (i = 0; i < count; i++) {
+ ret = iommu_map(gmu->domain,
+ bo->iova + (PAGE_SIZE * i),
+ page_to_phys(bo->pages[i]), PAGE_SIZE,
+ IOMMU_READ | IOMMU_WRITE);
+
+ if (ret) {
+ dev_err(gmu->dev, "Unable to map GMU buffer object\n");
+
+ for (i = i - 1 ; i >= 0; i--)
+ iommu_unmap(gmu->domain,
+ bo->iova + (PAGE_SIZE * i),
+ PAGE_SIZE);
+
+ goto err;
+ }
+ }
+
+ bo->virt = vmap(bo->pages, count, VM_IOREMAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!bo->virt)
+ goto err;
+
+ /* Align future IOVA addresses on 1MB boundaries */
+ gmu->uncached_iova_base += ALIGN(size, SZ_1M);
+
+ return bo;
+
+err:
+ for (i = 0; i < count; i++) {
+ if (bo->pages[i])
+ __free_pages(bo->pages[i], 0);
+ }
+
+ kfree(bo->pages);
+ kfree(bo);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+{
+ int ret;
+
+ /*
+ * The GMU address space is hardcoded to treat the range
+ * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
+ * between the GMU and the CPU will live in this space
+ */
+ gmu->uncached_iova_base = 0x60000000;
+
+
+ gmu->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!gmu->domain)
+ return -ENODEV;
+
+ ret = iommu_attach_device(gmu->domain, gmu->dev);
+
+ if (ret) {
+ iommu_domain_free(gmu->domain);
+ gmu->domain = NULL;
+ }
+
+ return ret;
+}
+
+/* Get the list of RPMh voltage levels from cmd-db */
+static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size)
+{
+ u32 len = cmd_db_read_aux_data_len(id);
+
+ if (!len)
+ return 0;
+
+ if (WARN_ON(len > size))
+ return -EINVAL;
+
+ cmd_db_read_aux_data(id, vals, len);
+
+ /*
+ * The data comes back as an array of unsigned shorts so adjust the
+ * count accordingly
+ */
+ return len >> 1;
+}
+
+/* Return the 'arc-level' for the given frequency */
+static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
+{
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ u32 val = 0;
+
+ if (!freq)
+ return 0;
+
+ opp = dev_pm_opp_find_freq_exact(dev, freq, true);
+ if (IS_ERR(opp))
+ return 0;
+
+ np = dev_pm_opp_get_of_node(opp);
+
+ if (np) {
+ of_property_read_u32(np, "qcom,level", &val);
+ of_node_put(np);
+ }
+
+ dev_pm_opp_put(opp);
+
+ return val;
+}
+
+static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
+ unsigned long *freqs, int freqs_count,
+ u16 *pri, int pri_count,
+ u16 *sec, int sec_count)
+{
+ int i, j;
+
+ /* Construct a vote for each frequency */
+ for (i = 0; i < freqs_count; i++) {
+ u8 pindex = 0, sindex = 0;
+ u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
+
+ /* Get the primary index that matches the arc level */
+ for (j = 0; j < pri_count; j++) {
+ if (pri[j] >= level) {
+ pindex = j;
+ break;
+ }
+ }
+
+ if (j == pri_count) {
+ dev_err(dev,
+ "Level %u not found in in the RPMh list\n",
+ level);
+ dev_err(dev, "Available levels:\n");
+ for (j = 0; j < pri_count; j++)
+ dev_err(dev, " %u\n", pri[j]);
+
+ return -EINVAL;
+ }
+
+ /*
+ * Look for a level in in the secondary list that matches. If
+ * nothing fits, use the maximum non zero vote
+ */
+
+ for (j = 0; j < sec_count; j++) {
+ if (sec[j] >= level) {
+ sindex = j;
+ break;
+ } else if (sec[j]) {
+ sindex = j;
+ }
+ }
+
+ /* Construct the vote */
+ votes[i] = ((pri[pindex] & 0xffff) << 16) |
+ (sindex << 8) | pindex;
+ }
+
+ return 0;
+}
+
+/*
+ * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
+ * to construct the list of votes on the CPU and send it over. Query the RPMh
+ * voltage levels and build the votes
+ */
+
+static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ u16 gx[16], cx[16], mx[16];
+ u32 gxcount, cxcount, mxcount;
+ int ret;
+
+ /* Get the list of available voltage levels for each component */
+ gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx));
+ cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx));
+ mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx));
+
+ /* Build the GX votes */
+ ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
+ gmu->gpu_freqs, gmu->nr_gpu_freqs,
+ gx, gxcount, mx, mxcount);
+
+ /* Build the CX votes */
+ ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
+ gmu->gmu_freqs, gmu->nr_gmu_freqs,
+ cx, cxcount, mx, mxcount);
+
+ return ret;
+}
+
+static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
+ u32 size)
+{
+ int count = dev_pm_opp_get_opp_count(dev);
+ struct dev_pm_opp *opp;
+ int i, index = 0;
+ unsigned long freq = 1;
+
+ /*
+ * The OPP table doesn't contain the "off" frequency level so we need to
+ * add 1 to the table size to account for it
+ */
+
+ if (WARN(count + 1 > size,
+ "The GMU frequency table is being truncated\n"))
+ count = size - 1;
+
+ /* Set the "off" frequency */
+ freqs[index++] = 0;
+
+ for (i = 0; i < count; i++) {
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_put(opp);
+ freqs[index++] = freq++;
+ }
+
+ return index;
+}
+
+static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ int ret = 0;
+
+ /*
+ * The GMU handles its own frequency switching so build a list of
+ * available frequencies to send during initialization
+ */
+ ret = dev_pm_opp_of_add_table(gmu->dev);
+ if (ret) {
+ dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
+ return ret;
+ }
+
+ gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
+ gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
+
+ /*
+ * The GMU also handles GPU frequency switching so build a list
+ * from the GPU OPP table
+ */
+ gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
+ gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
+
+ /* Build the list of RPMh votes that we'll send to the GMU */
+ return a6xx_gmu_rpmh_votes_init(gmu);
+}
+
+static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
+{
+ int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks);
+
+ if (ret < 1)
+ return ret;
+
+ gmu->nr_clocks = ret;
+
+ gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
+ gmu->nr_clocks, "gmu");
+
+ return 0;
+}
+
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name)
+{
+ void __iomem *ret;
+ struct resource *res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, name);
+
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!ret) {
+ dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ret;
+}
+
+static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
+ const char *name, irq_handler_t handler)
+{
+ int irq, ret;
+
+ irq = platform_get_irq_byname(pdev, name);
+
+ ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
+ name, gmu);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
+ return ret;
+ }
+
+ disable_irq(irq);
+
+ return irq;
+}
+
+void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ if (IS_ERR_OR_NULL(gmu->mmio))
+ return;
+
+ pm_runtime_disable(gmu->dev);
+ a6xx_gmu_stop(a6xx_gpu);
+
+ a6xx_gmu_irq_disable(gmu);
+ a6xx_gmu_memory_free(gmu, gmu->hfi);
+
+ iommu_detach_device(gmu->domain, gmu->dev);
+
+ iommu_domain_free(gmu->domain);
+}
+
+int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct platform_device *pdev = of_find_device_by_node(node);
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ gmu->dev = &pdev->dev;
+
+ of_dma_configure(gmu->dev, node, false);
+
+ /* Fow now, don't do anything fancy until we get our feet under us */
+ gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
+
+ pm_runtime_enable(gmu->dev);
+ gmu->gx = devm_regulator_get(gmu->dev, "vdd");
+
+ /* Get the list of clocks */
+ ret = a6xx_gmu_clocks_probe(gmu);
+ if (ret)
+ return ret;
+
+ /* Set up the IOMMU context bank */
+ ret = a6xx_gmu_memory_probe(gmu);
+ if (ret)
+ return ret;
+
+ /* Allocate memory for for the HFI queues */
+ gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
+ if (IS_ERR(gmu->hfi))
+ goto err;
+
+ /* Allocate memory for the GMU debug region */
+ gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
+ if (IS_ERR(gmu->debug))
+ goto err;
+
+ /* Map the GMU registers */
+ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
+
+ /* Map the GPU power domain controller registers */
+ gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
+
+ if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio))
+ goto err;
+
+ /* Get the HFI and GMU interrupts */
+ gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
+ gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
+
+ if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
+ goto err;
+
+ /* Set up a tasklet to handle GMU HFI responses */
+ tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
+
+ /* Get the power levels for the GMU and GPU */
+ a6xx_gmu_pwrlevels_probe(gmu);
+
+ /* Set up the HFI queues */
+ a6xx_hfi_init(gmu);
+
+ return 0;
+err:
+ a6xx_gmu_memory_free(gmu, gmu->hfi);
+
+ if (gmu->domain) {
+ iommu_detach_device(gmu->domain, gmu->dev);
+
+ iommu_domain_free(gmu->domain);
+ }
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
new file mode 100644
index 000000000000..d9a386c18799
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_GMU_H_
+#define _A6XX_GMU_H_
+
+#include <linux/interrupt.h>
+#include "msm_drv.h"
+#include "a6xx_hfi.h"
+
+struct a6xx_gmu_bo {
+ void *virt;
+ size_t size;
+ u64 iova;
+ struct page **pages;
+};
+
+/*
+ * These define the different GMU wake up options - these define how both the
+ * CPU and the GMU bring up the hardware
+ */
+
+/* THe GMU has already been booted and the rentention registers are active */
+#define GMU_WARM_BOOT 0
+
+/* the GMU is coming up for the first time or back from a power collapse */
+#define GMU_COLD_BOOT 1
+
+/* The GMU is being soft reset after a fault */
+#define GMU_RESET 2
+
+/*
+ * These define the level of control that the GMU has - the higher the number
+ * the more things that the GMU hardware controls on its own.
+ */
+
+/* The GMU does not do any idle state management */
+#define GMU_IDLE_STATE_ACTIVE 0
+
+/* The GMU manages SPTP power collapse */
+#define GMU_IDLE_STATE_SPTP 2
+
+/* The GMU does automatic IFPC (intra-frame power collapse) */
+#define GMU_IDLE_STATE_IFPC 3
+
+struct a6xx_gmu {
+ struct device *dev;
+
+ void * __iomem mmio;
+ void * __iomem pdc_mmio;
+
+ int hfi_irq;
+ int gmu_irq;
+
+ struct regulator *gx;
+
+ struct iommu_domain *domain;
+ u64 uncached_iova_base;
+
+ int idle_level;
+
+ struct a6xx_gmu_bo *hfi;
+ struct a6xx_gmu_bo *debug;
+
+ int nr_clocks;
+ struct clk_bulk_data *clocks;
+ struct clk *core_clk;
+
+ int nr_gpu_freqs;
+ unsigned long gpu_freqs[16];
+ u32 gx_arc_votes[16];
+
+ int nr_gmu_freqs;
+ unsigned long gmu_freqs[4];
+ u32 cx_arc_votes[4];
+
+ struct a6xx_hfi_queue queues[2];
+
+ struct tasklet_struct hfi_tasklet;
+};
+
+static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
+{
+ return msm_readl(gmu->mmio + (offset << 2));
+}
+
+static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ return msm_writel(value, gmu->mmio + (offset << 2));
+}
+
+static inline void pdc_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ return msm_writel(value, gmu->pdc_mmio + (offset << 2));
+}
+
+static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
+{
+ u32 val = gmu_read(gmu, reg);
+
+ val &= ~mask;
+
+ gmu_write(gmu, reg, val | or);
+}
+
+#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+/*
+ * These are the available OOB (out of band requests) to the GMU where "out of
+ * band" means that the CPU talks to the GMU directly and not through HFI.
+ * Normally this works by writing a ITCM/DTCM register and then triggering a
+ * interrupt (the "request" bit) and waiting for an acknowledgment (the "ack"
+ * bit). The state is cleared by writing the "clear' bit to the GMU interrupt.
+ *
+ * These are used to force the GMU/GPU to stay on during a critical sequence or
+ * for hardware workarounds.
+ */
+
+enum a6xx_gmu_oob_state {
+ GMU_OOB_BOOT_SLUMBER = 0,
+ GMU_OOB_GPU_SET,
+ GMU_OOB_DCVS_SET,
+};
+
+/* These are the interrupt / ack bits for each OOB request that are set
+ * in a6xx_gmu_set_oob and a6xx_clear_oob
+ */
+
+/*
+ * Let the GMU know that a boot or slumber operation has started. The value in
+ * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
+ * doing
+ */
+#define GMU_OOB_BOOT_SLUMBER_REQUEST 22
+#define GMU_OOB_BOOT_SLUMBER_ACK 30
+#define GMU_OOB_BOOT_SLUMBER_CLEAR 30
+
+/*
+ * Set a new power level for the GPU when the CPU is doing frequency scaling
+ */
+#define GMU_OOB_DCVS_REQUEST 23
+#define GMU_OOB_DCVS_ACK 31
+#define GMU_OOB_DCVS_CLEAR 31
+
+/*
+ * Let the GMU know to not turn off any GPU registers while the CPU is in a
+ * critical section
+ */
+#define GMU_OOB_GPU_SET_REQUEST 16
+#define GMU_OOB_GPU_SET_ACK 24
+#define GMU_OOB_GPU_SET_CLEAR 24
+
+
+void a6xx_hfi_init(struct a6xx_gmu *gmu);
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
+void a6xx_hfi_stop(struct a6xx_gmu *gmu);
+
+void a6xx_hfi_task(unsigned long data);
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
new file mode 100644
index 000000000000..ef68098d2adc
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -0,0 +1,382 @@
+#ifndef A6XX_GMU_XML
+#define A6XX_GMU_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB 0x00800000
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK 0x00400000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK 0x40000000
+#define A6XX_GMU_OOB_DCVS_SET_MASK 0x00800000
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK 0x80000000
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK 0x80000000
+#define A6XX_GMU_OOB_GPU_SET_MASK 0x00040000
+#define A6XX_GMU_OOB_GPU_CHECK_MASK 0x04000000
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK 0x04000000
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK 0x00020000
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK 0x02000000
+#define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001
+#define A6XX_HFI_IRQ_DSGQ_MASK 0x00000002
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK 0x00000004
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK 0x00800000
+#define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000
+#define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16
+static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT) & A6XX_HFI_IRQ_GMU_ERR_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_OOB_MASK__MASK 0xff000000
+#define A6XX_HFI_IRQ_OOB_MASK__SHIFT 24
+static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_OOB_MASK__SHIFT) & A6XX_HFI_IRQ_OOB_MASK__MASK;
+}
+#define A6XX_HFI_H2F_IRQ_MASK_BIT 0x00000001
+#define REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL 0x00000080
+
+#define REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL 0x00000081
+
+#define REG_A6XX_GMU_CM3_ITCM_START 0x00000c00
+
+#define REG_A6XX_GMU_CM3_DTCM_START 0x00001c00
+
+#define REG_A6XX_GMU_NMI_CONTROL_STATUS 0x000023f0
+
+#define REG_A6XX_GMU_BOOT_SLUMBER_OPTION 0x000023f8
+
+#define REG_A6XX_GMU_GX_VOTE_IDX 0x000023f9
+
+#define REG_A6XX_GMU_MX_VOTE_IDX 0x000023fa
+
+#define REG_A6XX_GMU_DCVS_ACK_OPTION 0x000023fc
+
+#define REG_A6XX_GMU_DCVS_PERF_SETTING 0x000023fd
+
+#define REG_A6XX_GMU_DCVS_BW_SETTING 0x000023fe
+
+#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff
+
+#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f
+
+#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000
+
+#define REG_A6XX_GMU_CM3_BOOT_CONFIG 0x00005001
+
+#define REG_A6XX_GMU_CM3_FW_BUSY 0x0000501a
+
+#define REG_A6XX_GMU_CM3_FW_INIT_RESULT 0x0000501c
+
+#define REG_A6XX_GMU_CM3_CFG 0x0000502d
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE 0x00005040
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0 0x00005041
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1 0x00005042
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L 0x00005044
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H 0x00005045
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L 0x00005046
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H 0x00005047
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L 0x00005048
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H 0x00005049
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L 0x0000504a
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H 0x0000504b
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L 0x0000504c
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H 0x0000504d
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L 0x0000504e
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H 0x0000504f
+
+#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x000050c0
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE 0x00000001
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE 0x00000002
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE 0x00000004
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK 0x00003c00
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT 10
+static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS(uint32_t val)
+{
+ return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK;
+}
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK 0xffffc000
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT 14
+static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_t val)
+{
+ return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK;
+}
+
+#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x000050c1
+
+#define REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST 0x000050c2
+
+#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000004
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000008
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF 0x00000080
+
+#define REG_A6XX_GMU_GPU_NAP_CTRL 0x000050e4
+#define A6XX_GMU_GPU_NAP_CTRL_HW_NAP_ENABLE 0x00000001
+#define A6XX_GMU_GPU_NAP_CTRL_SID__MASK 0x000001f0
+#define A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT 4
+static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
+{
+ return ((val) << A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT) & A6XX_GMU_GPU_NAP_CTRL_SID__MASK;
+}
+
+#define REG_A6XX_GMU_RPMH_CTRL 0x000050e8
+#define A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE 0x00000001
+#define A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE 0x00000010
+#define A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE 0x00000100
+#define A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE 0x00000200
+#define A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE 0x00000400
+#define A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE 0x00000800
+#define A6XX_GMU_RPMH_CTRL_DDR_MIN_VOTE_ENABLE 0x00001000
+#define A6XX_GMU_RPMH_CTRL_MX_MIN_VOTE_ENABLE 0x00002000
+#define A6XX_GMU_RPMH_CTRL_CX_MIN_VOTE_ENABLE 0x00004000
+#define A6XX_GMU_RPMH_CTRL_GFX_MIN_VOTE_ENABLE 0x00008000
+
+#define REG_A6XX_GMU_RPMH_HYST_CTRL 0x000050e9
+
+#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec
+
+#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0
+
+#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157
+
+#define REG_A6XX_GMU_LLM_GLM_SLEEP_STATUS 0x00005158
+
+#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_L 0x00005088
+
+#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_H 0x00005089
+
+#define REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE 0x000050c3
+
+#define REG_A6XX_GMU_HFI_CTRL_STATUS 0x00005180
+
+#define REG_A6XX_GMU_HFI_VERSION_INFO 0x00005181
+
+#define REG_A6XX_GMU_HFI_SFR_ADDR 0x00005182
+
+#define REG_A6XX_GMU_HFI_MMAP_ADDR 0x00005183
+
+#define REG_A6XX_GMU_HFI_QTBL_INFO 0x00005184
+
+#define REG_A6XX_GMU_HFI_QTBL_ADDR 0x00005185
+
+#define REG_A6XX_GMU_HFI_CTRL_INIT 0x00005186
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_SET 0x00005190
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_CLR 0x00005191
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_INFO 0x00005192
+#define A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ 0x00000001
+#define A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT 0x00800000
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_MASK 0x00005193
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_SET 0x00005194
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_CLR 0x00005195
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_RAW_INFO 0x00005196
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_0 0x00005197
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_1 0x00005198
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_2 0x00005199
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_3 0x0000519a
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_0 0x0000519b
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_1 0x0000519c
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_2 0x0000519d
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_3 0x0000519e
+
+#define REG_A6XX_GMU_GENERAL_1 0x000051c6
+
+#define REG_A6XX_GMU_GENERAL_7 0x000051cc
+
+#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d
+
+#define REG_A6XX_GPU_CS_ENABLE_REG 0x00008920
+
+#define REG_A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL 0x0000515d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3 0x00008578
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2 0x00008558
+
+#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_0 0x00008580
+
+#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_2 0x00027ada
+
+#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x00008957
+
+#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000881d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000881f
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x00008821
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
+
+#define REG_A6XX_GPU_CS_AMP_PERIOD_CTRL 0x0000896d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD 0x0000514d
+
+#define REG_A6XX_GMU_AO_INTERRUPT_EN 0x00009303
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR 0x00009304
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS 0x00009305
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE 0x00000001
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_RSCC_COMP 0x00000002
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_VDROOP 0x00000004
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR 0x00000008
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_DBD_WAKEUP 0x00000010
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR 0x00000020
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK 0x00009306
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL 0x00009309
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL 0x0000930a
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL 0x0000930b
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x0000930c
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB 0x00800000
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2 0x0000930d
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x0000930e
+
+#define REG_A6XX_GMU_AO_AHB_FENCE_CTRL 0x00009310
+
+#define REG_A6XX_GMU_AHB_FENCE_STATUS 0x00009313
+
+#define REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x00009315
+
+#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
+
+#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00008c04
+
+#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307
+
+#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308
+
+#define REG_A6XX_GMU_AHB_FENCE_RANGE_0 0x00009311
+
+#define REG_A6XX_GMU_AHB_FENCE_RANGE_1 0x00009312
+
+#define REG_A6XX_GPU_CC_GX_GDSCR 0x00009c03
+
+#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
+
+#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00008c08
+
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00008c09
+
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x00008c0a
+
+#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x00008c0b
+
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x00008c0d
+
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x00008c0e
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00008c82
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00008c83
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00008c89
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x00008c8c
+
+#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00008d00
+
+#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00008d01
+
+#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00008d80
+
+#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00008f46
+
+#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000090ae
+
+#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00009216
+
+#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000937e
+
+
+#endif /* A6XX_GMU_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
new file mode 100644
index 000000000000..c629f742a1d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Check that the GMU is idle */
+ if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
+ return false;
+
+ /* Check tha the CX master is idle */
+ if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
+ ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
+ return false;
+
+ return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
+}
+
+bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a6xx_check_idle(gpu))) {
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
+ return false;
+ }
+
+ return true;
+}
+
+static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+}
+
+static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ /* Invalidate CCU depth and color */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ break;
+ }
+ }
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->seqno);
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno);
+
+ a6xx_flush(gpu, ring);
+}
+
+static const struct {
+ u32 offset;
+ u32 value;
+} a6xx_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
+};
+
+static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned int i;
+ u32 val;
+
+ val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
+
+ /* Don't re-program the registers if they are already correct */
+ if ((!state && !val) || (state && (val == 0x8aa8aa02)))
+ return;
+
+ /* Disable SP clock before programming HWCG registers */
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_hwcg); i++)
+ gpu_write(gpu, a6xx_hwcg[i].offset,
+ state ? a6xx_hwcg[i].value : 0);
+
+ /* Enable SP clock */
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
+
+ gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? 0x8aa8aa02 : 0);
+}
+
+static int a6xx_cp_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002f);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+
+ /* Pad rest of the cmds with 0's */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ a6xx_flush(gpu, ring);
+ return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a6xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (!a6xx_gpu->sqe_bo) {
+ a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
+
+ if (IS_ERR(a6xx_gpu->sqe_bo)) {
+ int ret = PTR_ERR(a6xx_gpu->sqe_bo);
+
+ a6xx_gpu->sqe_bo = NULL;
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "Could not allocate SQE ucode: %d\n", ret);
+
+ return ret;
+ }
+ }
+
+ gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
+ REG_A6XX_CP_SQE_INSTR_BASE_HI, a6xx_gpu->sqe_iova);
+
+ return 0;
+}
+
+#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_IB2 | \
+ A6XX_RBBM_INT_0_MASK_CP_IB1 | \
+ A6XX_RBBM_INT_0_MASK_CP_RB | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
+
+static int a6xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ /* Make sure the GMU keeps the GPU on while we set it up */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
+
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* enable hardware clockgating */
+ a6xx_set_hwcg(gpu, true);
+
+ /* VBIF start */
+ gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+ gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
+ gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
+ gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
+
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
+
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
+ 0x00100000 + adreno_gpu->gmem - 1);
+
+ gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
+ gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
+
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
+
+ /* Setting the mem pool size */
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
+
+ /* Setting the primFifo thresholds default values */
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
+
+ /* Set the AHB default slave response to "ERROR" */
+ gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
+
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
+
+ /* FIXME: not sure if this should live here or in a6xx_gmu.c */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK,
+ 0xff000000);
+ gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
+ 0xff, 0x20);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE,
+ 0x01);
+
+ gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0x1fffff);
+
+ gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
+
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
+ A6XX_PROTECT_RDONLY(0x600, 0x51));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
+ A6XX_PROTECT_RDONLY(0xfc00, 0x3));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
+ A6XX_PROTECT_RDONLY(0x0, 0x4f9));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
+ A6XX_PROTECT_RDONLY(0x501, 0xa));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
+ A6XX_PROTECT_RDONLY(0x511, 0x44));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
+ A6XX_PROTECT_RW(0xbe20, 0x11f3));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
+ A6XX_PROTECT_RDONLY(0x8d0, 0x23));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(25),
+ A6XX_PROTECT_RDONLY(0x980, 0x4));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(26), A6XX_PROTECT_RW(0xa630, 0x0));
+
+ /* Enable interrupts */
+ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ goto out;
+
+ ret = a6xx_ucode_init(gpu);
+ if (ret)
+ goto out;
+
+ /* Always come up on rb 0 */
+ a6xx_gpu->cur_ring = gpu->rb[0];
+
+ /* Enable the SQE_to start the CP engine */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
+
+ ret = a6xx_cp_init(gpu);
+ if (ret)
+ goto out;
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+
+out:
+ /*
+ * Tell the GMU that we are done touching the GPU and it can start power
+ * management
+ */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ /* Take the GMU out of its special boot mode */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
+
+ return ret;
+}
+
+static void a6xx_dump(struct msm_gpu *gpu)
+{
+ dev_info(&gpu->pdev->dev, "status: %08x\n",
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+#define VBIF_RESET_ACK_TIMEOUT 100
+#define VBIF_RESET_ACK_MASK 0x00f0
+
+static void a6xx_recover(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++)
+ dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
+
+ if (hang_debug)
+ a6xx_dump(gpu);
+
+ /*
+ * Turn off keep alive that might have been enabled by the hang
+ * interrupt
+ */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
+
+ gpu->funcs->pm_suspend(gpu);
+ gpu->funcs->pm_resume(gpu);
+
+ msm_gpu_hw_init(gpu);
+}
+
+static int a6xx_fault_handler(void *arg, unsigned long iova, int flags)
+{
+ struct msm_gpu *gpu = arg;
+
+ pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
+ iova, flags,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
+
+ return -EFAULT;
+}
+
+static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
+
+ if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
+ val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A6XX_CP_INT_CP_UCODE_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP ucode error interrupt\n");
+
+ if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
+
+ if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 20) ? "READ" : "WRITE",
+ (val & 0x3ffff), val);
+ }
+
+ if (status & A6XX_CP_INT_CP_AHB_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
+
+ if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
+
+ if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
+
+}
+
+static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+ /*
+ * Force the GPU to stay on until after we finish
+ * collecting information
+ */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, ring ? ring->seqno : 0,
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
+ gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
+ gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
+ gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
+
+ gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
+ a6xx_fault_detect_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a6xx_cp_hw_err_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_A6XX_CP_RB_RPTR_ADDR_LO),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ REG_A6XX_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
+};
+
+static const u32 a6xx_registers[] = {
+ 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
+ 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
+ 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
+ 0x0100, 0x011d, 0x0200, 0x020d, 0x0210, 0x0213, 0x0218, 0x023d,
+ 0x0400, 0x04f9, 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511,
+ 0x0533, 0x0533, 0x0540, 0x0555, 0x0800, 0x0808, 0x0810, 0x0813,
+ 0x0820, 0x0821, 0x0823, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
+ 0x084f, 0x086f, 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4,
+ 0x08d0, 0x08dd, 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911,
+ 0x0928, 0x093e, 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996,
+ 0x0998, 0x099e, 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1,
+ 0x09c2, 0x09c8, 0x0a00, 0x0a03, 0x0c00, 0x0c04, 0x0c06, 0x0c06,
+ 0x0c10, 0x0cd9, 0x0e00, 0x0e0e, 0x0e10, 0x0e13, 0x0e17, 0x0e19,
+ 0x0e1c, 0x0e2b, 0x0e30, 0x0e32, 0x0e38, 0x0e39, 0x8600, 0x8601,
+ 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b, 0x8630, 0x8637,
+ 0x8e01, 0x8e01, 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e0c, 0x8e0c,
+ 0x8e10, 0x8e1c, 0x8e20, 0x8e25, 0x8e28, 0x8e28, 0x8e2c, 0x8e2f,
+ 0x8e3b, 0x8e3e, 0x8e40, 0x8e43, 0x8e50, 0x8e5e, 0x8e70, 0x8e77,
+ 0x9600, 0x9604, 0x9624, 0x9637, 0x9e00, 0x9e01, 0x9e03, 0x9e0e,
+ 0x9e11, 0x9e16, 0x9e19, 0x9e19, 0x9e1c, 0x9e1c, 0x9e20, 0x9e23,
+ 0x9e30, 0x9e31, 0x9e34, 0x9e34, 0x9e70, 0x9e72, 0x9e78, 0x9e79,
+ 0x9e80, 0x9fff, 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a,
+ 0xa610, 0xa617, 0xa630, 0xa630,
+ ~0
+};
+
+static int a6xx_pm_resume(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ ret = a6xx_gmu_resume(a6xx_gpu);
+
+ gpu->needs_hw_init = true;
+
+ return ret;
+}
+
+static int a6xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /*
+ * Make sure the GMU is idle before continuing (because some transitions
+ * may use VBIF
+ */
+ a6xx_gmu_wait_for_idle(a6xx_gpu);
+
+ /* Clear the VBIF pipe before shutting down */
+ /* FIXME: This accesses the GPU - do we need to make sure it is on? */
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return a6xx_gmu_stop(a6xx_gpu);
+}
+
+static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Force the GPU power on so we can read this register */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+static void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ adreno_show(gpu, state, p);
+}
+#endif
+
+static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ return a6xx_gpu->cur_ring;
+}
+
+static void a6xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (a6xx_gpu->sqe_bo) {
+ if (a6xx_gpu->sqe_iova)
+ msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a6xx_gpu->sqe_bo);
+ }
+
+ a6xx_gmu_remove(a6xx_gpu);
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a6xx_gpu);
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a6xx_hw_init,
+ .pm_suspend = a6xx_pm_suspend,
+ .pm_resume = a6xx_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a6xx_submit,
+ .flush = a6xx_flush,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = a6xx_show,
+#endif
+ },
+ .get_timestamp = a6xx_get_timestamp,
+};
+
+struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct device_node *node;
+ struct a6xx_gpu *a6xx_gpu;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ int ret;
+
+ a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
+ if (!a6xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a6xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ adreno_gpu->registers = a6xx_registers;
+ adreno_gpu->reg_offsets = a6xx_register_offsets;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /* Check if there is a GMU phandle and set it up */
+ node = of_parse_phandle(pdev->dev.of_node, "gmu", 0);
+
+ /* FIXME: How do we gracefully handle this? */
+ BUG_ON(!node);
+
+ ret = a6xx_gmu_probe(a6xx_gpu, node);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ if (gpu->aspace)
+ msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
+ a6xx_fault_handler);
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
new file mode 100644
index 000000000000..dd69e5b0e692
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef __A6XX_GPU_H__
+#define __A6XX_GPU_H__
+
+
+#include "adreno_gpu.h"
+#include "a6xx.xml.h"
+
+#include "a6xx_gmu.h"
+
+extern bool hang_debug;
+
+struct a6xx_gpu {
+ struct adreno_gpu base;
+
+ struct drm_gem_object *sqe_bo;
+ uint64_t sqe_iova;
+
+ struct msm_ringbuffer *cur_ring;
+
+ struct a6xx_gmu gmu;
+};
+
+#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ */
+#define A6XX_PROTECT_RW(_reg, _len) \
+ ((1 << 31) | \
+ (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define A6XX_PROTECT_RDONLY(_reg, _len) \
+ ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
+
+int a6xx_gmu_resume(struct a6xx_gpu *gpu);
+int a6xx_gmu_stop(struct a6xx_gpu *gpu);
+
+int a6xx_gmu_wait_for_idle(struct a6xx_gpu *gpu);
+
+int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu);
+bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
+
+int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
+void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
+
+int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
+void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+
+#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
new file mode 100644
index 000000000000..f19ef4cb6ea4
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/completion.h>
+#include <linux/circ_buf.h>
+#include <linux/list.h>
+
+#include "a6xx_gmu.h"
+#include "a6xx_gmu.xml.h"
+
+#define HFI_MSG_ID(val) [val] = #val
+
+static const char * const a6xx_hfi_msg_id[] = {
+ HFI_MSG_ID(HFI_H2F_MSG_INIT),
+ HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
+ HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
+ HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
+ HFI_MSG_ID(HFI_H2F_MSG_TEST),
+};
+
+static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
+ u32 dwords)
+{
+ struct a6xx_hfi_queue_header *header = queue->header;
+ u32 i, hdr, index = header->read_index;
+
+ if (header->read_index == header->write_index) {
+ header->rx_request = 1;
+ return 0;
+ }
+
+ hdr = queue->data[index];
+
+ /*
+ * If we are to assume that the GMU firmware is in fact a rational actor
+ * and is programmed to not send us a larger response than we expect
+ * then we can also assume that if the header size is unexpectedly large
+ * that it is due to memory corruption and/or hardware failure. In this
+ * case the only reasonable course of action is to BUG() to help harden
+ * the failure.
+ */
+
+ BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
+
+ for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
+ data[i] = queue->data[index];
+ index = (index + 1) % header->size;
+ }
+
+ header->read_index = index;
+ return HFI_HEADER_SIZE(hdr);
+}
+
+static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
+{
+ struct a6xx_hfi_queue_header *header = queue->header;
+ u32 i, space, index = header->write_index;
+
+ spin_lock(&queue->lock);
+
+ space = CIRC_SPACE(header->write_index, header->read_index,
+ header->size);
+ if (space < dwords) {
+ header->dropped++;
+ spin_unlock(&queue->lock);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < dwords; i++) {
+ queue->data[index] = data[i];
+ index = (index + 1) % header->size;
+ }
+
+ header->write_index = index;
+ spin_unlock(&queue->lock);
+
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
+ return 0;
+}
+
+struct a6xx_hfi_response {
+ u32 id;
+ u32 seqnum;
+ struct list_head node;
+ struct completion complete;
+
+ u32 error;
+ u32 payload[16];
+};
+
+/*
+ * Incoming HFI ack messages can come in out of order so we need to store all
+ * the pending messages on a list until they are handled.
+ */
+static spinlock_t hfi_ack_lock = __SPIN_LOCK_UNLOCKED(message_lock);
+static LIST_HEAD(hfi_ack_list);
+
+static void a6xx_hfi_handle_ack(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_msg_response *msg)
+{
+ struct a6xx_hfi_response *resp;
+ u32 id, seqnum;
+
+ /* msg->ret_header contains the header of the message being acked */
+ id = HFI_HEADER_ID(msg->ret_header);
+ seqnum = HFI_HEADER_SEQNUM(msg->ret_header);
+
+ spin_lock(&hfi_ack_lock);
+ list_for_each_entry(resp, &hfi_ack_list, node) {
+ if (resp->id == id && resp->seqnum == seqnum) {
+ resp->error = msg->error;
+ memcpy(resp->payload, msg->payload,
+ sizeof(resp->payload));
+
+ complete(&resp->complete);
+ spin_unlock(&hfi_ack_lock);
+ return;
+ }
+ }
+ spin_unlock(&hfi_ack_lock);
+
+ dev_err(gmu->dev, "Nobody was waiting for HFI message %d\n", seqnum);
+}
+
+static void a6xx_hfi_handle_error(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_msg_response *msg)
+{
+ struct a6xx_hfi_msg_error *error = (struct a6xx_hfi_msg_error *) msg;
+
+ dev_err(gmu->dev, "GMU firmware error %d\n", error->code);
+}
+
+void a6xx_hfi_task(unsigned long data)
+{
+ struct a6xx_gmu *gmu = (struct a6xx_gmu *) data;
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
+ struct a6xx_hfi_msg_response resp;
+
+ for (;;) {
+ u32 id;
+ int ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
+ sizeof(resp) >> 2);
+
+ /* Returns the number of bytes copied or negative on error */
+ if (ret <= 0) {
+ if (ret < 0)
+ dev_err(gmu->dev,
+ "Unable to read the HFI message queue\n");
+ break;
+ }
+
+ id = HFI_HEADER_ID(resp.header);
+
+ if (id == HFI_F2H_MSG_ACK)
+ a6xx_hfi_handle_ack(gmu, &resp);
+ else if (id == HFI_F2H_MSG_ERROR)
+ a6xx_hfi_handle_error(gmu, &resp);
+ }
+}
+
+static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
+ void *data, u32 size, u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
+ struct a6xx_hfi_response resp = { 0 };
+ int ret, dwords = size >> 2;
+ u32 seqnum;
+
+ seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
+
+ /* First dword of the message is the message header - fill it in */
+ *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
+ (dwords << 8) | id;
+
+ init_completion(&resp.complete);
+ resp.id = id;
+ resp.seqnum = seqnum;
+
+ spin_lock_bh(&hfi_ack_lock);
+ list_add_tail(&resp.node, &hfi_ack_list);
+ spin_unlock_bh(&hfi_ack_lock);
+
+ ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
+ if (ret) {
+ dev_err(gmu->dev, "Unable to send message %s id %d\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ goto out;
+ }
+
+ /* Wait up to 5 seconds for the response */
+ ret = wait_for_completion_timeout(&resp.complete,
+ msecs_to_jiffies(5000));
+ if (!ret) {
+ dev_err(gmu->dev,
+ "Message %s id %d timed out waiting for response\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ ret = -ETIMEDOUT;
+ } else
+ ret = 0;
+
+out:
+ spin_lock_bh(&hfi_ack_lock);
+ list_del(&resp.node);
+ spin_unlock_bh(&hfi_ack_lock);
+
+ if (ret)
+ return ret;
+
+ if (resp.error) {
+ dev_err(gmu->dev, "Message %s id %d returned error %d\n",
+ a6xx_hfi_msg_id[id], seqnum, resp.error);
+ return -EINVAL;
+ }
+
+ if (payload && payload_size) {
+ int copy = min_t(u32, payload_size, sizeof(resp.payload));
+
+ memcpy(payload, resp.payload, copy);
+ }
+
+ return 0;
+}
+
+static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
+{
+ struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
+
+ msg.dbg_buffer_addr = (u32) gmu->debug->iova;
+ msg.dbg_buffer_size = (u32) gmu->debug->size;
+ msg.boot_state = boot_state;
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
+{
+ struct a6xx_hfi_msg_fw_version msg = { 0 };
+
+ /* Currently supporting version 1.1 */
+ msg.supported_version = (1 << 28) | (1 << 16);
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
+ version, sizeof(*version));
+}
+
+static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_perf_table msg = { 0 };
+ int i;
+
+ msg.num_gpu_levels = gmu->nr_gpu_freqs;
+ msg.num_gmu_levels = gmu->nr_gmu_freqs;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
+ }
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
+ msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
+ }
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_bw_table msg = { 0 };
+
+ /*
+ * The sdm845 GMU doesn't do bus frequency scaling on its own but it
+ * does need at least one entry in the list because it might be accessed
+ * when the GMU is shutting down. Send a single "off" entry.
+ */
+
+ msg.bw_level_num = 1;
+
+ msg.ddr_cmds_num = 3;
+ msg.ddr_wait_bitmask = 0x07;
+
+ msg.ddr_cmds_addrs[0] = 0x50000;
+ msg.ddr_cmds_addrs[1] = 0x5005c;
+ msg.ddr_cmds_addrs[2] = 0x5000c;
+
+ msg.ddr_cmds_data[0][0] = 0x40000000;
+ msg.ddr_cmds_data[0][1] = 0x40000000;
+ msg.ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes. This is used but the values for the
+ * sdm845 GMU are known and fixed so we can hard code them.
+ */
+
+ msg.cnoc_cmds_num = 3;
+ msg.cnoc_wait_bitmask = 0x05;
+
+ msg.cnoc_cmds_addrs[0] = 0x50034;
+ msg.cnoc_cmds_addrs[1] = 0x5007c;
+ msg.cnoc_cmds_addrs[2] = 0x5004c;
+
+ msg.cnoc_cmds_data[0][0] = 0x40000000;
+ msg.cnoc_cmds_data[0][1] = 0x00000000;
+ msg.cnoc_cmds_data[0][2] = 0x40000000;
+
+ msg.cnoc_cmds_data[1][0] = 0x60000001;
+ msg.cnoc_cmds_data[1][1] = 0x20000001;
+ msg.cnoc_cmds_data[1][2] = 0x60000001;
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_test msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
+{
+ int ret;
+
+ ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_get_fw_version(gmu, NULL);
+ if (ret)
+ return ret;
+
+ /*
+ * We have to get exchange version numbers per the sequence but at this
+ * point th kernel driver doesn't need to know the exact version of
+ * the GMU firmware
+ */
+
+ ret = a6xx_hfi_send_perf_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_bw_table(gmu);
+ if (ret)
+ return ret;
+
+ /*
+ * Let the GMU know that there won't be any more HFI messages until next
+ * boot
+ */
+ a6xx_hfi_send_test(gmu);
+
+ return 0;
+}
+
+void a6xx_hfi_stop(struct a6xx_gmu *gmu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
+ struct a6xx_hfi_queue *queue = &gmu->queues[i];
+
+ if (!queue->header)
+ continue;
+
+ if (queue->header->read_index != queue->header->write_index)
+ dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
+
+ queue->header->read_index = 0;
+ queue->header->write_index = 0;
+ }
+}
+
+static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
+ struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
+ u32 id)
+{
+ spin_lock_init(&queue->lock);
+ queue->header = header;
+ queue->data = virt;
+ atomic_set(&queue->seqnum, 0);
+
+ /* Set up the shared memory header */
+ header->iova = iova;
+ header->type = 10 << 8 | id;
+ header->status = 1;
+ header->size = SZ_4K >> 2;
+ header->msg_size = 0;
+ header->dropped = 0;
+ header->rx_watermark = 1;
+ header->tx_watermark = 1;
+ header->rx_request = 1;
+ header->tx_request = 0;
+ header->read_index = 0;
+ header->write_index = 0;
+}
+
+void a6xx_hfi_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gmu_bo *hfi = gmu->hfi;
+ struct a6xx_hfi_queue_table_header *table = hfi->virt;
+ struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
+ u64 offset;
+ int table_size;
+
+ /*
+ * The table size is the size of the table header plus all of the queue
+ * headers
+ */
+ table_size = sizeof(*table);
+ table_size += (ARRAY_SIZE(gmu->queues) *
+ sizeof(struct a6xx_hfi_queue_header));
+
+ table->version = 0;
+ table->size = table_size;
+ /* First queue header is located immediately after the table header */
+ table->qhdr0_offset = sizeof(*table) >> 2;
+ table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
+ table->num_queues = ARRAY_SIZE(gmu->queues);
+ table->active_queues = ARRAY_SIZE(gmu->queues);
+
+ /* Command queue */
+ offset = SZ_4K;
+ a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
+ hfi->iova + offset, 0);
+
+ /* GMU response queue */
+ offset += SZ_4K;
+ a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
+ hfi->iova + offset, 4);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
new file mode 100644
index 000000000000..60d1319fa44f
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_HFI_H_
+#define _A6XX_HFI_H_
+
+struct a6xx_hfi_queue_table_header {
+ u32 version;
+ u32 size; /* Size of the queue table in dwords */
+ u32 qhdr0_offset; /* Offset of the first queue header */
+ u32 qhdr_size; /* Size of the queue headers */
+ u32 num_queues; /* Number of total queues */
+ u32 active_queues; /* Number of active queues */
+};
+
+struct a6xx_hfi_queue_header {
+ u32 status;
+ u32 iova;
+ u32 type;
+ u32 size;
+ u32 msg_size;
+ u32 dropped;
+ u32 rx_watermark;
+ u32 tx_watermark;
+ u32 rx_request;
+ u32 tx_request;
+ u32 read_index;
+ u32 write_index;
+};
+
+struct a6xx_hfi_queue {
+ struct a6xx_hfi_queue_header *header;
+ spinlock_t lock;
+ u32 *data;
+ atomic_t seqnum;
+};
+
+/* This is the outgoing queue to the GMU */
+#define HFI_COMMAND_QUEUE 0
+
+/* THis is the incoming response queue from the GMU */
+#define HFI_RESPONSE_QUEUE 1
+
+#define HFI_HEADER_ID(msg) ((msg) & 0xff)
+#define HFI_HEADER_SIZE(msg) (((msg) >> 8) & 0xff)
+#define HFI_HEADER_SEQNUM(msg) (((msg) >> 20) & 0xfff)
+
+/* FIXME: Do we need this or can we use ARRAY_SIZE? */
+#define HFI_RESPONSE_PAYLOAD_SIZE 16
+
+/* HFI message types */
+
+#define HFI_MSG_CMD 0
+#define HFI_MSG_ACK 2
+
+#define HFI_F2H_MSG_ACK 126
+
+struct a6xx_hfi_msg_response {
+ u32 header;
+ u32 ret_header;
+ u32 error;
+ u32 payload[HFI_RESPONSE_PAYLOAD_SIZE];
+};
+
+#define HFI_F2H_MSG_ERROR 100
+
+struct a6xx_hfi_msg_error {
+ u32 header;
+ u32 code;
+ u32 payload[2];
+};
+
+#define HFI_H2F_MSG_INIT 0
+
+struct a6xx_hfi_msg_gmu_init_cmd {
+ u32 header;
+ u32 seg_id;
+ u32 dbg_buffer_addr;
+ u32 dbg_buffer_size;
+ u32 boot_state;
+};
+
+#define HFI_H2F_MSG_FW_VERSION 1
+
+struct a6xx_hfi_msg_fw_version {
+ u32 header;
+ u32 supported_version;
+};
+
+#define HFI_H2F_MSG_PERF_TABLE 4
+
+struct perf_level {
+ u32 vote;
+ u32 freq;
+};
+
+struct a6xx_hfi_msg_perf_table {
+ u32 header;
+ u32 num_gpu_levels;
+ u32 num_gmu_levels;
+
+ struct perf_level gx_votes[16];
+ struct perf_level cx_votes[4];
+};
+
+#define HFI_H2F_MSG_BW_TABLE 3
+
+struct a6xx_hfi_msg_bw_table {
+ u32 header;
+ u32 bw_level_num;
+ u32 cnoc_cmds_num;
+ u32 ddr_cmds_num;
+ u32 cnoc_wait_bitmask;
+ u32 ddr_wait_bitmask;
+ u32 cnoc_cmds_addrs[6];
+ u32 cnoc_cmds_data[2][6];
+ u32 ddr_cmds_addrs[8];
+ u32 ddr_cmds_data[16][8];
+};
+
+#define HFI_H2F_MSG_TEST 5
+
+struct a6xx_hfi_msg_test {
+ u32 header;
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index b634cf71352b..5dace1350810 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -44,6 +46,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+enum chip {
+ A2XX = 0,
+ A3XX = 0,
+ A4XX = 0,
+ A5XX = 0,
+ A6XX = 0,
+};
+
enum adreno_pa_su_sc_draw {
PC_DRAW_POINTS = 0,
PC_DRAW_LINES = 1,
@@ -181,6 +191,12 @@ enum a3xx_rb_blend_opcode {
BLEND_MAX_DST_SRC = 4,
};
+enum a4xx_tess_spacing {
+ EQUAL_SPACING = 0,
+ ODD_SPACING = 2,
+ EVEN_SPACING = 3,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 0ae5ace65462..7d3e9a129ac7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -35,6 +35,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 0, 6, 0),
@@ -45,6 +46,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
@@ -55,6 +57,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 3, 0, ANY_ID),
@@ -65,6 +68,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a330_pfp.fw",
},
.gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
@@ -75,6 +79,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 3, 0, ANY_ID),
@@ -85,6 +90,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(5, 3, 0, 2),
@@ -96,10 +102,25 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
},
.gmem = SZ_1M,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(6, 3, 0, ANY_ID),
+ .revn = 630,
+ .name = "A630",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_1M,
+ .init = a6xx_gpu_init,
},
};
@@ -116,6 +137,8 @@ MODULE_FIRMWARE("qcom/a530_zap.mdt");
MODULE_FIRMWARE("qcom/a530_zap.b00");
MODULE_FIRMWARE("qcom/a530_zap.b01");
MODULE_FIRMWARE("qcom/a530_zap.b02");
+MODULE_FIRMWARE("qcom/a630_sqe.fw");
+MODULE_FIRMWARE("qcom/a630_gmu.bin");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -144,6 +167,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct msm_gpu *gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
int ret;
if (pdev)
@@ -154,11 +178,31 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
return NULL;
}
- pm_runtime_get_sync(&pdev->dev);
+ adreno_gpu = to_adreno_gpu(gpu);
+
+ /*
+ * The number one reason for HW init to fail is if the firmware isn't
+ * loaded yet. Try that first and don't bother continuing on
+ * otherwise
+ */
+
+ ret = adreno_load_fw(adreno_gpu);
+ if (ret)
+ return NULL;
+
+ /* Make sure pm runtime is active and reset any previous errors */
+ pm_runtime_set_active(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+ return NULL;
+ }
+
mutex_lock(&dev->struct_mutex);
ret = msm_gpu_hw_init(gpu);
mutex_unlock(&dev->struct_mutex);
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
@@ -316,6 +360,7 @@ static int adreno_suspend(struct device *dev)
#endif
static const struct dev_pm_ops adreno_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 17d0506d058c..da1363a0c54d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -17,7 +17,10 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/ascii85.h>
+#include <linux/kernel.h>
#include <linux/pm_opp.h>
+#include <linux/slab.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -70,10 +73,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
{
struct drm_device *drm = adreno_gpu->base.dev;
const struct firmware *fw = NULL;
- char newname[strlen("qcom/") + strlen(fwname) + 1];
+ char *newname;
int ret;
- sprintf(newname, "qcom/%s", fwname);
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+ if (!newname)
+ return ERR_PTR(-ENOMEM);
/*
* Try first to load from qcom/$fwfile using a direct load (to avoid
@@ -87,11 +92,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
dev_info(drm->dev, "loaded %s from new location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_NEW;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
newname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
@@ -106,11 +112,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
dev_info(drm->dev, "loaded %s from legacy location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
fwname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
@@ -126,19 +133,23 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
dev_info(drm->dev, "loaded %s with helper\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_HELPER;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
newname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
dev_err(drm->dev, "failed to load %s\n", fwname);
- return ERR_PTR(-ENOENT);
+ fw = ERR_PTR(-ENOENT);
+out:
+ kfree(newname);
+ return fw;
}
-static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
+int adreno_load_fw(struct adreno_gpu *adreno_gpu)
{
int i;
@@ -368,40 +379,185 @@ bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return false;
}
-#ifdef CONFIG_DEBUG_FS
-void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i, count = 0;
+
+ kref_init(&state->ref);
+
+ ktime_get_real_ts64(&state->time);
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ int size = 0, j;
+
+ state->ring[i].fence = gpu->rb[i]->memptrs->fence;
+ state->ring[i].iova = gpu->rb[i]->iova;
+ state->ring[i].seqno = gpu->rb[i]->seqno;
+ state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
+ state->ring[i].wptr = get_wptr(gpu->rb[i]);
+
+ /* Copy at least 'wptr' dwords of the data */
+ size = state->ring[i].wptr;
+
+ /* After wptr find the last non zero dword to save space */
+ for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
+ if (gpu->rb[i]->start[j])
+ size = j + 1;
+
+ if (size) {
+ state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
+ if (state->ring[i].data) {
+ memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
+ state->ring[i].data_size = size << 2;
+ }
+ }
+ }
+
+ /* Count the number of registers */
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
+ count += adreno_gpu->registers[i + 1] -
+ adreno_gpu->registers[i] + 1;
+
+ state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
+ if (state->registers) {
+ int pos = 0;
+
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ u32 start = adreno_gpu->registers[i];
+ u32 end = adreno_gpu->registers[i + 1];
+ u32 addr;
+
+ for (addr = start; addr <= end; addr++) {
+ state->registers[pos++] = addr;
+ state->registers[pos++] = gpu_read(gpu, addr);
+ }
+ }
+
+ state->nr_registers = count;
+ }
+
+ return 0;
+}
+
+void adreno_gpu_state_destroy(struct msm_gpu_state *state)
+{
int i;
- seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
+ for (i = 0; i < ARRAY_SIZE(state->ring); i++)
+ kfree(state->ring[i].data);
+
+ for (i = 0; state->bos && i < state->nr_bos; i++)
+ kvfree(state->bos[i].data);
+
+ kfree(state->bos);
+ kfree(state->comm);
+ kfree(state->cmd);
+ kfree(state->registers);
+}
+
+static void adreno_gpu_state_kref_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+
+ adreno_gpu_state_destroy(state);
+ kfree(state);
+}
+
+int adreno_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+
+static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
+{
+ char out[ASCII85_BUFSZ];
+ long l, datalen, i;
+
+ if (!ptr || !len)
+ return;
+
+ /*
+ * Only dump the non-zero part of the buffer - rarely will any data
+ * completely fill the entire allocated size of the buffer
+ */
+ for (datalen = 0, i = 0; i < len >> 2; i++) {
+ if (ptr[i])
+ datalen = (i << 2) + 1;
+ }
+
+ /* Skip printing the object if it is empty */
+ if (datalen == 0)
+ return;
+
+ l = ascii85_encode_len(datalen);
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+ for (i = 0; i < l; i++)
+ drm_puts(p, ascii85_encode(ptr[i], out));
+
+ drm_puts(p, "\n");
+}
+
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- for (i = 0; i < gpu->nr_rings; i++) {
- struct msm_ringbuffer *ring = gpu->rb[i];
+ drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
- seq_printf(m, "rb %d: fence: %d/%d\n", i,
- ring->memptrs->fence, ring->seqno);
+ drm_puts(p, "ringbuffer:\n");
- seq_printf(m, " rptr: %d\n",
- get_rptr(adreno_gpu, ring));
- seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
+ for (i = 0; i < gpu->nr_rings; i++) {
+ drm_printf(p, " - id: %d\n", i);
+ drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova);
+ drm_printf(p, " last-fence: %d\n", state->ring[i].seqno);
+ drm_printf(p, " retired-fence: %d\n", state->ring[i].fence);
+ drm_printf(p, " rptr: %d\n", state->ring[i].rptr);
+ drm_printf(p, " wptr: %d\n", state->ring[i].wptr);
+ drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ);
+
+ adreno_show_object(p, state->ring[i].data,
+ state->ring[i].data_size);
}
- /* dump these out in a form that can be parsed by demsm: */
- seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
- for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
- uint32_t start = adreno_gpu->registers[i];
- uint32_t end = adreno_gpu->registers[i+1];
- uint32_t addr;
+ if (state->bos) {
+ drm_puts(p, "bos:\n");
- for (addr = start; addr <= end; addr++) {
- uint32_t val = gpu_read(gpu, addr);
- seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
+ for (i = 0; i < state->nr_bos; i++) {
+ drm_printf(p, " - iova: 0x%016llx\n",
+ state->bos[i].iova);
+ drm_printf(p, " size: %zd\n", state->bos[i].size);
+
+ adreno_show_object(p, state->bos[i].data,
+ state->bos[i].size);
}
}
+
+ drm_puts(p, "registers:\n");
+
+ for (i = 0; i < state->nr_registers; i++) {
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ state->registers[i * 2] << 2,
+ state->registers[(i * 2) + 1]);
+ }
}
#endif
@@ -565,7 +721,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_get_pwrlevels(&pdev->dev, gpu);
- pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ adreno_gpu->info->inactive_period);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index d6b0e7b813f4..de6e6ee42fba 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -50,7 +50,9 @@ enum adreno_regs {
enum {
ADRENO_FW_PM4 = 0,
+ ADRENO_FW_SQE = 0, /* a6xx */
ADRENO_FW_PFP = 1,
+ ADRENO_FW_GMU = 1, /* a6xx */
ADRENO_FW_GPMU = 2,
ADRENO_FW_MAX,
};
@@ -84,6 +86,7 @@ struct adreno_info {
enum adreno_quirks quirks;
struct msm_gpu *(*init)(struct drm_device *dev);
const char *zapfw;
+ u32 inactive_period;
};
const struct adreno_info *adreno_info(struct adreno_rev rev);
@@ -214,8 +217,9 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
-#ifdef CONFIG_DEBUG_FS
-void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
#endif
void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu);
@@ -226,7 +230,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
int nr_rings);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+int adreno_load_fw(struct adreno_gpu *adreno_gpu);
+void adreno_gpu_state_destroy(struct msm_gpu_state *state);
+
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
+int adreno_gpu_state_put(struct msm_gpu_state *state);
/* ringbuffer helpers (the parts that are adreno specific) */
@@ -328,6 +337,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
enum adreno_regs lo, enum adreno_regs hi, u64 data)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index fb605a3534cf..03a91e10b310 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -71,7 +73,8 @@ enum vgt_event_type {
FLUSH_SO_1 = 18,
FLUSH_SO_2 = 19,
FLUSH_SO_3 = 20,
- UNK_19 = 25,
+ PC_CCU_INVALIDATE_DEPTH = 24,
+ PC_CCU_INVALIDATE_COLOR = 25,
UNK_1C = 28,
UNK_1D = 29,
BLIT = 30,
@@ -199,9 +202,12 @@ enum adreno_pm4_type3_packets {
CP_WAIT_MEM_WRITES = 18,
CP_COND_REG_EXEC = 71,
CP_MEM_TO_REG = 66,
+ CP_EXEC_CS_INDIRECT = 65,
CP_EXEC_CS = 51,
CP_PERFCOUNTER_ACTION = 80,
CP_SMMU_TABLE_UPDATE = 83,
+ CP_SET_MARKER = 101,
+ CP_SET_PSEUDO_REG = 86,
CP_CONTEXT_REG_BUNCH = 92,
CP_YIELD_ENABLE = 28,
CP_SKIP_IB2_ENABLE_GLOBAL = 29,
@@ -215,7 +221,10 @@ enum adreno_pm4_type3_packets {
CP_COMPUTE_CHECKPOINT = 110,
CP_MEM_TO_MEM = 115,
CP_BLIT = 44,
- CP_UNK_39 = 57,
+ CP_REG_TEST = 57,
+ CP_SET_MODE = 99,
+ CP_LOAD_STATE6_GEOM = 50,
+ CP_LOAD_STATE6_FRAG = 52,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -224,6 +233,11 @@ enum adreno_pm4_type3_packets {
IN_INCR_UPDT_STATE = 85,
IN_INCR_UPDT_CONST = 86,
IN_INCR_UPDT_INSTR = 87,
+ PKT4 = 4,
+ CP_UNK_A6XX_14 = 20,
+ CP_UNK_A6XX_36 = 54,
+ CP_UNK_A6XX_55 = 85,
+ UNK_A6XX_6D = 109,
};
enum adreno_state_block {
@@ -278,6 +292,33 @@ enum a4xx_state_src {
SS4_INDIRECT = 2,
};
+enum a6xx_state_block {
+ SB6_VS_TEX = 0,
+ SB6_HS_TEX = 1,
+ SB6_DS_TEX = 2,
+ SB6_GS_TEX = 3,
+ SB6_FS_TEX = 4,
+ SB6_CS_TEX = 5,
+ SB6_VS_SHADER = 8,
+ SB6_HS_SHADER = 9,
+ SB6_DS_SHADER = 10,
+ SB6_GS_SHADER = 11,
+ SB6_FS_SHADER = 12,
+ SB6_CS_SHADER = 13,
+ SB6_SSBO = 14,
+ SB6_CS_SSBO = 15,
+};
+
+enum a6xx_state_type {
+ ST6_SHADER = 0,
+ ST6_CONSTANTS = 1,
+};
+
+enum a6xx_state_src {
+ SS6_DIRECT = 0,
+ SS6_INDIRECT = 2,
+};
+
enum a4xx_index_size {
INDEX4_SIZE_8_BIT = 0,
INDEX4_SIZE_16_BIT = 1,
@@ -300,6 +341,7 @@ enum render_mode_cmd {
GMEM = 3,
BLIT2D = 5,
BLIT2DSCALE = 7,
+ END2D = 8,
};
enum cp_blit_cmd {
@@ -308,6 +350,22 @@ enum cp_blit_cmd {
BLIT_OP_SCALE = 3,
};
+enum a6xx_render_mode {
+ RM6_BYPASS = 1,
+ RM6_BINNING = 2,
+ RM6_GMEM = 4,
+ RM6_BLIT2D = 5,
+ RM6_RESOLVE = 6,
+};
+
+enum pseudo_reg {
+ SMMU_INFO = 0,
+ NON_SECURE_SAVE_ADDR = 1,
+ SECURE_SAVE_ADDR = 2,
+ NON_PRIV_SAVE_ADDR = 3,
+ COUNTER = 4,
+};
+
#define REG_CP_LOAD_STATE_0 0x00000000
#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
@@ -349,7 +407,7 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
}
#define REG_CP_LOAD_STATE4_0 0x00000000
-#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x0000ffff
+#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x00003fff
#define CP_LOAD_STATE4_0_DST_OFF__SHIFT 0
static inline uint32_t CP_LOAD_STATE4_0_DST_OFF(uint32_t val)
{
@@ -396,6 +454,54 @@ static inline uint32_t CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(uint32_t val)
return ((val) << CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK;
}
+#define REG_CP_LOAD_STATE6_0 0x00000000
+#define CP_LOAD_STATE6_0_DST_OFF__MASK 0x00003fff
+#define CP_LOAD_STATE6_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE6_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_0_DST_OFF__SHIFT) & CP_LOAD_STATE6_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_TYPE__MASK 0x00004000
+#define CP_LOAD_STATE6_0_STATE_TYPE__SHIFT 14
+static inline uint32_t CP_LOAD_STATE6_0_STATE_TYPE(enum a6xx_state_type val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_TYPE__SHIFT) & CP_LOAD_STATE6_0_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_SRC__MASK 0x00030000
+#define CP_LOAD_STATE6_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE6_0_STATE_SRC(enum a6xx_state_src val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_SRC__SHIFT) & CP_LOAD_STATE6_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_BLOCK__MASK 0x003c0000
+#define CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT 18
+static inline uint32_t CP_LOAD_STATE6_0_STATE_BLOCK(enum a6xx_state_block val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE6_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE6_0_NUM_UNIT__MASK 0xffc00000
+#define CP_LOAD_STATE6_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE6_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE6_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_1 0x00000001
+#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE6_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_2 0x00000002
+#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
+#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT 0
+static inline uint32_t CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK;
+}
+
#define REG_CP_DRAW_INDX_0 0x00000000
#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
@@ -580,6 +686,153 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
}
+#define REG_A4XX_CP_DRAW_INDIRECT_0 0x00000000
+#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
+#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
+#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK 0x00000300
+#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT 8
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
+#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__MASK 0x01f00000
+#define A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__SHIFT 20
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_TESS_MODE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDIRECT_1 0x00000001
+#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_1_INDIRECT(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK;
+}
+
+
+#define REG_A5XX_CP_DRAW_INDIRECT_2 0x00000002
+#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_0 0x00000000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK 0x00000300
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT 8
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__MASK 0x01f00000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__SHIFT 20
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__MASK;
+}
+
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
+#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
+#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
+#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK;
+}
+
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
+#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
+#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
+#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_4 0x00000004
+#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_5 0x00000005
+#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK;
+}
+
static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
@@ -593,6 +846,12 @@ static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val)
#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000
#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000
#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000
+#define CP_SET_DRAW_STATE__0_ENABLE_MASK__MASK 0x00f00000
+#define CP_SET_DRAW_STATE__0_ENABLE_MASK__SHIFT 20
+static inline uint32_t CP_SET_DRAW_STATE__0_ENABLE_MASK(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__0_ENABLE_MASK__SHIFT) & CP_SET_DRAW_STATE__0_ENABLE_MASK__MASK;
+}
#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000
#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24
static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val)
@@ -708,6 +967,22 @@ static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
return ((val) << CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK;
}
+#define REG_CP_SET_BIN_DATA5_5 0x00000005
+#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_6 0x00000006
+#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK;
+}
+
#define REG_CP_REG_TO_MEM_0 0x00000000
#define CP_REG_TO_MEM_0_REG__MASK 0x0000ffff
#define CP_REG_TO_MEM_0_REG__SHIFT 0
@@ -732,6 +1007,46 @@ static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
}
+#define REG_CP_REG_TO_MEM_2 0x00000002
+#define CP_REG_TO_MEM_2_DEST_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_2_DEST_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_2_DEST_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_2_DEST_HI__MASK;
+}
+
+#define REG_CP_MEM_TO_REG_0 0x00000000
+#define CP_MEM_TO_REG_0_REG__MASK 0x0000ffff
+#define CP_MEM_TO_REG_0_REG__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_0_REG(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_0_REG__SHIFT) & CP_MEM_TO_REG_0_REG__MASK;
+}
+#define CP_MEM_TO_REG_0_CNT__MASK 0x3ff80000
+#define CP_MEM_TO_REG_0_CNT__SHIFT 19
+static inline uint32_t CP_MEM_TO_REG_0_CNT(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_0_CNT__SHIFT) & CP_MEM_TO_REG_0_CNT__MASK;
+}
+#define CP_MEM_TO_REG_0_64B 0x40000000
+#define CP_MEM_TO_REG_0_ACCUMULATE 0x80000000
+
+#define REG_CP_MEM_TO_REG_1 0x00000001
+#define CP_MEM_TO_REG_1_SRC__MASK 0xffffffff
+#define CP_MEM_TO_REG_1_SRC__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_1_SRC(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_1_SRC__SHIFT) & CP_MEM_TO_REG_1_SRC__MASK;
+}
+
+#define REG_CP_MEM_TO_REG_2 0x00000002
+#define CP_MEM_TO_REG_2_SRC_HI__MASK 0xffffffff
+#define CP_MEM_TO_REG_2_SRC_HI__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_2_SRC_HI(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_2_SRC_HI__SHIFT) & CP_MEM_TO_REG_2_SRC_HI__MASK;
+}
+
#define REG_CP_MEM_TO_MEM_0 0x00000000
#define CP_MEM_TO_MEM_0_NEG_A 0x00000001
#define CP_MEM_TO_MEM_0_NEG_B 0x00000002
@@ -953,15 +1268,15 @@ static inline uint32_t CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI(uint32_t val)
#define REG_CP_COMPUTE_CHECKPOINT_2 0x00000002
#define REG_CP_COMPUTE_CHECKPOINT_3 0x00000003
-
-#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
-#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK 0xffffffff
-#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT 0
-static inline uint32_t CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN(uint32_t val)
+#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN(uint32_t val)
{
- return ((val) << CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK;
+ return ((val) << CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK;
}
+#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
+
#define REG_CP_COMPUTE_CHECKPOINT_5 0x00000005
#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK 0xffffffff
#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT 0
@@ -978,6 +1293,8 @@ static inline uint32_t CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI(uint32_t val)
return ((val) << CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK;
}
+#define REG_CP_COMPUTE_CHECKPOINT_7 0x00000007
+
#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000
#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001
@@ -1032,13 +1349,13 @@ static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val)
}
#define REG_CP_BLIT_1 0x00000001
-#define CP_BLIT_1_SRC_X1__MASK 0x0000ffff
+#define CP_BLIT_1_SRC_X1__MASK 0x00003fff
#define CP_BLIT_1_SRC_X1__SHIFT 0
static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val)
{
return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK;
}
-#define CP_BLIT_1_SRC_Y1__MASK 0xffff0000
+#define CP_BLIT_1_SRC_Y1__MASK 0x3fff0000
#define CP_BLIT_1_SRC_Y1__SHIFT 16
static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
{
@@ -1046,13 +1363,13 @@ static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
}
#define REG_CP_BLIT_2 0x00000002
-#define CP_BLIT_2_SRC_X2__MASK 0x0000ffff
+#define CP_BLIT_2_SRC_X2__MASK 0x00003fff
#define CP_BLIT_2_SRC_X2__SHIFT 0
static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val)
{
return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK;
}
-#define CP_BLIT_2_SRC_Y2__MASK 0xffff0000
+#define CP_BLIT_2_SRC_Y2__MASK 0x3fff0000
#define CP_BLIT_2_SRC_Y2__SHIFT 16
static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
{
@@ -1060,13 +1377,13 @@ static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
}
#define REG_CP_BLIT_3 0x00000003
-#define CP_BLIT_3_DST_X1__MASK 0x0000ffff
+#define CP_BLIT_3_DST_X1__MASK 0x00003fff
#define CP_BLIT_3_DST_X1__SHIFT 0
static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val)
{
return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK;
}
-#define CP_BLIT_3_DST_Y1__MASK 0xffff0000
+#define CP_BLIT_3_DST_Y1__MASK 0x3fff0000
#define CP_BLIT_3_DST_Y1__SHIFT 16
static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
{
@@ -1074,13 +1391,13 @@ static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
}
#define REG_CP_BLIT_4 0x00000004
-#define CP_BLIT_4_DST_X2__MASK 0x0000ffff
+#define CP_BLIT_4_DST_X2__MASK 0x00003fff
#define CP_BLIT_4_DST_X2__SHIFT 0
static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val)
{
return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK;
}
-#define CP_BLIT_4_DST_Y2__MASK 0xffff0000
+#define CP_BLIT_4_DST_Y2__MASK 0x3fff0000
#define CP_BLIT_4_DST_Y2__SHIFT 16
static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val)
{
@@ -1113,5 +1430,129 @@ static inline uint32_t CP_EXEC_CS_3_NGROUPS_Z(uint32_t val)
return ((val) << CP_EXEC_CS_3_NGROUPS_Z__SHIFT) & CP_EXEC_CS_3_NGROUPS_Z__MASK;
}
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_0 0x00000000
+
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_1 0x00000001
+#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK 0xffffffff
+#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_1_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK;
+}
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_2 0x00000002
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK 0x00000ffc
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT 2
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK;
+}
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK 0x003ff000
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT 12
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK;
+}
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK 0xffc00000
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK;
+}
+
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_1 0x00000001
+#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK 0xffffffff
+#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT 0
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK;
+}
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_2 0x00000002
+#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK 0xffffffff
+#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT 0
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK;
+}
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_3 0x00000003
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK 0x00000ffc
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT 2
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK;
+}
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK 0x003ff000
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT 12
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK;
+}
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK 0xffc00000
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK;
+}
+
+#define REG_A2XX_CP_SET_MARKER_0 0x00000000
+#define A2XX_CP_SET_MARKER_0_MARKER__MASK 0x0000000f
+#define A2XX_CP_SET_MARKER_0_MARKER__SHIFT 0
+static inline uint32_t A2XX_CP_SET_MARKER_0_MARKER(uint32_t val)
+{
+ return ((val) << A2XX_CP_SET_MARKER_0_MARKER__SHIFT) & A2XX_CP_SET_MARKER_0_MARKER__MASK;
+}
+#define A2XX_CP_SET_MARKER_0_MODE__MASK 0x0000000f
+#define A2XX_CP_SET_MARKER_0_MODE__SHIFT 0
+static inline uint32_t A2XX_CP_SET_MARKER_0_MODE(enum a6xx_render_mode val)
+{
+ return ((val) << A2XX_CP_SET_MARKER_0_MODE__SHIFT) & A2XX_CP_SET_MARKER_0_MODE__MASK;
+}
+#define A2XX_CP_SET_MARKER_0_IFPC 0x00000100
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x00000007
+#define A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT 0
+static inline uint32_t A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val)
+{
+ return ((val) << A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK;
+}
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define A2XX_CP_SET_PSEUDO_REG__1_LO__MASK 0xffffffff
+#define A2XX_CP_SET_PSEUDO_REG__1_LO__SHIFT 0
+static inline uint32_t A2XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val)
+{
+ return ((val) << A2XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A2XX_CP_SET_PSEUDO_REG__1_LO__MASK;
+}
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define A2XX_CP_SET_PSEUDO_REG__2_HI__MASK 0xffffffff
+#define A2XX_CP_SET_PSEUDO_REG__2_HI__SHIFT 0
+static inline uint32_t A2XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val)
+{
+ return ((val) << A2XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A2XX_CP_SET_PSEUDO_REG__2_HI__MASK;
+}
+
+#define REG_A2XX_CP_REG_TEST_0 0x00000000
+#define A2XX_CP_REG_TEST_0_REG__MASK 0x00000fff
+#define A2XX_CP_REG_TEST_0_REG__SHIFT 0
+static inline uint32_t A2XX_CP_REG_TEST_0_REG(uint32_t val)
+{
+ return ((val) << A2XX_CP_REG_TEST_0_REG__SHIFT) & A2XX_CP_REG_TEST_0_REG__MASK;
+}
+#define A2XX_CP_REG_TEST_0_BIT__MASK 0x01f00000
+#define A2XX_CP_REG_TEST_0_BIT__SHIFT 20
+static inline uint32_t A2XX_CP_REG_TEST_0_BIT(uint32_t val)
+{
+ return ((val) << A2XX_CP_REG_TEST_0_BIT__SHIFT) & A2XX_CP_REG_TEST_0_BIT__MASK;
+}
+#define A2XX_CP_REG_TEST_0_UNK25 0x02000000
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
new file mode 100644
index 000000000000..879c13fe74e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -0,0 +1,479 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_core_irq.h"
+#include "dpu_trace.h"
+
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @arg: private data of callback handler
+ * @irq_idx: interrupt index
+ */
+static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
+{
+ struct dpu_kms *dpu_kms = arg;
+ struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
+ struct dpu_irq_callback *cb;
+ unsigned long irq_flags;
+
+ pr_debug("irq_idx=%d\n", irq_idx);
+
+ if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
+ DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
+ atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
+ }
+
+ atomic_inc(&irq_obj->irq_counts[irq_idx]);
+
+ /*
+ * Perform registered function callback
+ */
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+ if (cb->func)
+ cb->func(cb->arg, irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ /*
+ * Clear pending interrupt status in HW.
+ * NOTE: dpu_core_irq_callback_handler is protected by top-level
+ * spinlock, so it is safe to clear any interrupt status here.
+ */
+ dpu_kms->hw_intr->ops.clear_intr_status_nolock(
+ dpu_kms->hw_intr,
+ irq_idx);
+}
+
+int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
+ enum dpu_intr_type intr_type, u32 instance_idx)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.irq_idx_lookup)
+ return -EINVAL;
+
+ return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+ instance_idx);
+}
+
+/**
+ * _dpu_core_irq_enable - enable core interrupt given by the index
+ * @dpu_kms: Pointer to dpu kms context
+ * @irq_idx: interrupt index
+ */
+static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ unsigned long irq_flags;
+ int ret = 0, enable_count;
+
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->irq_obj.enable_counts ||
+ !dpu_kms->irq_obj.irq_counts) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+ DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+ trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
+
+ if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+ ret = dpu_kms->hw_intr->ops.enable_irq(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idx);
+
+ DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ /* empty callback list but interrupt is enabled */
+ if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
+ DPU_ERROR("irq_idx=%d enabled with no callback\n",
+ irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ }
+
+ return ret;
+}
+
+int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+ int i, ret = 0, counts;
+
+ if (!dpu_kms || !irq_idxs || !irq_count) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts)
+ DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+/**
+ * _dpu_core_irq_disable - disable core interrupt given by the index
+ * @dpu_kms: Pointer to dpu kms context
+ * @irq_idx: interrupt index
+ */
+static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ int ret = 0, enable_count;
+
+ if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+ DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+ trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
+
+ if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+ ret = dpu_kms->hw_intr->ops.disable_irq(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+ irq_idx);
+ DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+ }
+
+ return ret;
+}
+
+int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+ int i, ret = 0, counts;
+
+ if (!dpu_kms || !irq_idxs || !irq_count) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts == 2)
+ DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.get_interrupt_status)
+ return 0;
+
+ if (irq_idx < 0) {
+ DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+ __builtin_return_address(0), irq_idx);
+ return 0;
+ }
+
+ return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
+ irq_idx, clear);
+}
+
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ list_add_tail(&register_irq_cb->list,
+ &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ /* empty callback list but interrupt is still enabled */
+ if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+ atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
+ DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.clear_all_irqs)
+ return;
+
+ dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
+}
+
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.disable_all_irqs)
+ return;
+
+ dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+ struct dpu_irq *irq_obj = s->private;
+ struct dpu_irq_callback *cb;
+ unsigned long irq_flags;
+ int i, irq_count, enable_count, cb_count;
+
+ if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+ DPU_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ for (i = 0; i < irq_obj->total_irqs; i++) {
+ spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+ cb_count = 0;
+ irq_count = atomic_read(&irq_obj->irq_counts[i]);
+ enable_count = atomic_read(&irq_obj->enable_counts[i]);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+ cb_count++;
+ spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+ if (irq_count || enable_count || cb_count)
+ seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+ i, irq_count, enable_count, cb_count);
+ }
+
+ return 0;
+}
+
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
+
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
+ parent, &dpu_kms->irq_obj,
+ &dpu_debugfs_core_irq_fops);
+
+ return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove(dpu_kms->irq_obj.debugfs_file);
+ dpu_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid drm device\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ dpu_clear_all_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ spin_lock_init(&dpu_kms->irq_obj.cb_lock);
+
+ /* Create irq callbacks for all possible irq_idx */
+ dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
+ dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(struct list_head), GFP_KERNEL);
+ dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
+ INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
+ atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
+ atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
+ }
+}
+
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
+{
+ return 0;
+}
+
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid drm device\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
+ if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
+ !list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+ DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+ dpu_clear_all_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ kfree(dpu_kms->irq_obj.irq_cb_tbl);
+ kfree(dpu_kms->irq_obj.enable_counts);
+ kfree(dpu_kms->irq_obj.irq_counts);
+ dpu_kms->irq_obj.irq_cb_tbl = NULL;
+ dpu_kms->irq_obj.enable_counts = NULL;
+ dpu_kms->irq_obj.irq_counts = NULL;
+ dpu_kms->irq_obj.total_irqs = 0;
+}
+
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
+{
+ /*
+ * Read interrupt status from all sources. Interrupt status are
+ * stored within hw_intr.
+ * Function will also clear the interrupt status after reading.
+ * Individual interrupt status bit will only get stored if it
+ * is enabled.
+ */
+ dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
+
+ /*
+ * Dispatch to HW driver to handle interrupt lookup that is being
+ * fired. When matching interrupt is located, HW driver will call to
+ * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
+ * dpu_core_irq_callback_handler will perform the registered function
+ * callback, and do the interrupt status clearing once the registered
+ * callback is finished.
+ */
+ dpu_kms->hw_intr->ops.dispatch_irqs(
+ dpu_kms->hw_intr,
+ dpu_core_irq_callback_handler,
+ dpu_kms);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
new file mode 100644
index 000000000000..5e98bba46af5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_CORE_IRQ_H__
+#define __DPU_CORE_IRQ_H__
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: none
+ */
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: 0 if success; error code otherwise
+ */
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: none
+ */
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq - core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: interrupt handling status
+ */
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ * interrupt mapping table.
+ * @dpu_kms: DPU handle
+ * @intr_type: DPU HW interrupt type for lookup
+ * @instance_idx: DPU HW block instance defined in dpu_hw_mdss.h
+ * @return: irq_idx or -EINVAL when fail to lookup
+ */
+int dpu_core_irq_idx_lookup(
+ struct dpu_kms *dpu_kms,
+ enum dpu_intr_type intr_type,
+ uint32_t instance_idx);
+
+/**
+ * dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
+ * @dpu_kms: DPU handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is enabled if count is 0 before increment.
+ */
+int dpu_core_irq_enable(
+ struct dpu_kms *dpu_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
+ * @dpu_kms: DPU handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is disabled if count is 0 after decrement.
+ */
+int dpu_core_irq_disable(
+ struct dpu_kms *dpu_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @clear: True to clear the irq after read
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
+u32 dpu_core_irq_read(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ bool clear);
+
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must exist until un-registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_register_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must match with registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_unregister_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ * @Return: 0 on success
+ */
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent);
+
+/**
+ * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
+ * @dpu_kms: pointer to kms
+ */
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
+
+#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
new file mode 100644
index 000000000000..41c5191f9056
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -0,0 +1,637 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_core_perf.h"
+
+#define DPU_PERF_MODE_STRING_SIZE 128
+
+/**
+ * enum dpu_perf_mode - performance tuning mode
+ * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
+ * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
+ * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
+ */
+enum dpu_perf_mode {
+ DPU_PERF_MODE_NORMAL,
+ DPU_PERF_MODE_MINIMUM,
+ DPU_PERF_MODE_FIXED,
+ DPU_PERF_MODE_MAX
+};
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid device\n");
+ return NULL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_dpu_kms(priv->kms);
+}
+
+static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
+{
+ return dpu_crtc_is_enabled(crtc);
+}
+
+static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+ bool intf_connected = false;
+
+ if (!crtc)
+ goto end;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
+ _dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+ DPU_DEBUG("video interface connected crtc:%d\n",
+ tmp_crtc->base.id);
+ intf_connected = true;
+ goto end;
+ }
+ }
+
+end:
+ return intf_connected;
+}
+
+static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct dpu_core_perf_params *perf)
+{
+ struct dpu_crtc_state *dpu_cstate;
+ int i;
+
+ if (!kms || !kms->catalog || !crtc || !state || !perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_cstate = to_dpu_crtc_state(state);
+ memset(perf, 0, sizeof(struct dpu_core_perf_params));
+
+ if (!dpu_cstate->bw_control) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
+ 1000ULL;
+ perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
+ }
+ perf->core_clk_rate = kms->perf.max_core_clk_rate;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = 0;
+ perf->max_per_pipe_ib[i] = 0;
+ }
+ perf->core_clk_rate = 0;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
+ perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
+ }
+ perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+ }
+
+ DPU_DEBUG(
+ "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+}
+
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ u32 bw, threshold;
+ u64 bw_sum_of_intfs = 0;
+ enum dpu_crtc_client_type curr_client_type;
+ bool is_video_mode;
+ struct dpu_crtc_state *dpu_cstate;
+ struct drm_crtc *tmp_crtc;
+ struct dpu_kms *kms;
+ int i;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ /* we only need bandwidth check on real-time clients (interfaces) */
+ if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
+ return 0;
+
+ dpu_cstate = to_dpu_crtc_state(state);
+
+ /* obtain new values */
+ _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
+
+ for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+ i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
+ curr_client_type = dpu_crtc_get_client_type(crtc);
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ (dpu_crtc_get_client_type(tmp_crtc) ==
+ curr_client_type) &&
+ (tmp_crtc != crtc)) {
+ struct dpu_crtc_state *tmp_cstate =
+ to_dpu_crtc_state(tmp_crtc->state);
+
+ DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+ tmp_crtc->base.id,
+ tmp_cstate->new_perf.bw_ctl[i],
+ tmp_cstate->bw_control);
+ /*
+ * For bw check only use the bw if the
+ * atomic property has been already set
+ */
+ if (tmp_cstate->bw_control)
+ bw_sum_of_intfs +=
+ tmp_cstate->new_perf.bw_ctl[i];
+ }
+ }
+
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+ DPU_DEBUG("calculated bandwidth=%uk\n", bw);
+
+ is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+ threshold = (is_video_mode ||
+ _dpu_core_video_mode_intf_connected(crtc)) ?
+ kms->catalog->perf.max_bw_low :
+ kms->catalog->perf.max_bw_high;
+
+ DPU_DEBUG("final threshold bw limit = %d\n", threshold);
+
+ if (!dpu_cstate->bw_control) {
+ DPU_DEBUG("bypass bandwidth check\n");
+ } else if (!threshold) {
+ DPU_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+ threshold);
+ return -E2BIG;
+ }
+ }
+
+ return 0;
+}
+
+static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
+ struct drm_crtc *crtc, u32 bus_id)
+{
+ struct dpu_core_perf_params perf = { { 0 } };
+ enum dpu_crtc_client_type curr_client_type
+ = dpu_crtc_get_client_type(crtc);
+ struct drm_crtc *tmp_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ int ret = 0;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ curr_client_type ==
+ dpu_crtc_get_client_type(tmp_crtc)) {
+ dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+ perf.max_per_pipe_ib[bus_id] =
+ max(perf.max_per_pipe_ib[bus_id],
+ dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
+
+ DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
+ tmp_crtc->base.id, bus_id,
+ dpu_cstate->new_perf.bw_ctl[bus_id]);
+ }
+ }
+ return ret;
+}
+
+/**
+ * @dpu_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc - pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ struct dpu_kms *kms;
+ int i;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+ /* only do this for command mode rt client */
+ if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
+ return;
+
+ /*
+ * If video interface present, cmd panel bandwidth cannot be
+ * released.
+ */
+ if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ dpu_crtc_get_intf_mode(tmp_crtc) ==
+ INTF_MODE_VIDEO)
+ return;
+ }
+
+ /* Release the bandwidth */
+ if (kms->perf.enable_bw_release) {
+ trace_dpu_cmd_release_bw(crtc->base.id);
+ DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ dpu_crtc->cur_perf.bw_ctl[i] = 0;
+ _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+ }
+ }
+}
+
+static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
+{
+ struct dss_clk *core_clk = kms->perf.core_clk;
+
+ if (core_clk->max_rate && (rate > core_clk->max_rate))
+ rate = core_clk->max_rate;
+
+ core_clk->rate = rate;
+ return msm_dss_clk_set_rate(core_clk, 1);
+}
+
+static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
+{
+ u64 clk_rate = kms->perf.perf_tune.min_core_clk;
+ struct drm_crtc *crtc;
+ struct dpu_crtc_state *dpu_cstate;
+
+ drm_for_each_crtc(crtc, kms->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+ clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
+ clk_rate);
+ clk_rate = clk_round_rate(kms->perf.core_clk->clk,
+ clk_rate);
+ }
+ }
+
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
+ clk_rate = kms->perf.fix_core_clk_rate;
+
+ DPU_DEBUG("clk:%llu\n", clk_rate);
+
+ return clk_rate;
+}
+
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req)
+{
+ struct dpu_core_perf_params *new, *old;
+ int update_bus = 0, update_clk = 0;
+ u64 clk_rate = 0;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ int i;
+ struct msm_drm_private *priv;
+ struct dpu_kms *kms;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+ priv = kms->dev->dev_private;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+ DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
+ crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+ old = &dpu_crtc->cur_perf;
+ new = &dpu_cstate->new_perf;
+
+ if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ /*
+ * cases for bus bandwidth update.
+ * 1. new bandwidth vote - "ab or ib vote" is higher
+ * than current vote for update request.
+ * 2. new bandwidth vote - "ab or ib vote" is lower
+ * than current vote at end of commit or stop.
+ */
+ if ((params_changed && ((new->bw_ctl[i] >
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] >
+ old->max_per_pipe_ib[i]))) ||
+ (!params_changed && ((new->bw_ctl[i] <
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] <
+ old->max_per_pipe_ib[i])))) {
+ DPU_DEBUG(
+ "crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ crtc->base.id, params_changed,
+ new->bw_ctl[i], old->bw_ctl[i]);
+ old->bw_ctl[i] = new->bw_ctl[i];
+ old->max_per_pipe_ib[i] =
+ new->max_per_pipe_ib[i];
+ update_bus |= BIT(i);
+ }
+ }
+
+ if ((params_changed &&
+ (new->core_clk_rate > old->core_clk_rate)) ||
+ (!params_changed &&
+ (new->core_clk_rate < old->core_clk_rate))) {
+ old->core_clk_rate = new->core_clk_rate;
+ update_clk = 1;
+ }
+ } else {
+ DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
+ memset(old, 0, sizeof(*old));
+ memset(new, 0, sizeof(*new));
+ update_bus = ~0;
+ update_clk = 1;
+ }
+ trace_dpu_perf_crtc_update(crtc->base.id,
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ new->core_clk_rate, stop_req,
+ update_bus, update_clk);
+
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ if (update_bus & BIT(i)) {
+ ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+ if (ret) {
+ DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
+ crtc->base.id, i);
+ return ret;
+ }
+ }
+ }
+
+ /*
+ * Update the clock after bandwidth vote to ensure
+ * bandwidth is available before clock rate is increased.
+ */
+ if (update_clk) {
+ clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
+
+ trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
+
+ ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate);
+ if (ret) {
+ DPU_ERROR("failed to set %s clock rate %llu\n",
+ kms->perf.core_clk->clk_name, clk_rate);
+ return ret;
+ }
+
+ kms->perf.core_clk_rate = clk_rate;
+ DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _dpu_core_perf_mode_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ struct dpu_perf_cfg *cfg = &perf->catalog->perf;
+ u32 perf_mode = 0;
+ char buf[10];
+
+ if (!perf)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtouint(buf, 0, &perf_mode))
+ return -EFAULT;
+
+ if (perf_mode >= DPU_PERF_MODE_MAX)
+ return -EFAULT;
+
+ if (perf_mode == DPU_PERF_MODE_FIXED) {
+ DRM_INFO("fix performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
+ /* run the driver with max clk and BW vote */
+ perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+ perf->perf_tune.min_bus_vote =
+ (u64) cfg->max_bw_high * 1000;
+ DRM_INFO("minimum performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_NORMAL) {
+ /* reset the perf tune params to 0 */
+ perf->perf_tune.min_core_clk = 0;
+ perf->perf_tune.min_bus_vote = 0;
+ DRM_INFO("normal performance mode\n");
+ }
+ perf->perf_tune.mode = perf_mode;
+
+ return count;
+}
+
+static ssize_t _dpu_core_perf_mode_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ int len = 0;
+ char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
+
+ if (!perf)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf),
+ "mode %d min_mdp_clk %llu min_bus_vote %llu\n",
+ perf->perf_tune.mode,
+ perf->perf_tune.min_core_clk,
+ perf->perf_tune.min_bus_vote);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static const struct file_operations dpu_core_perf_mode_fops = {
+ .open = simple_open,
+ .read = _dpu_core_perf_mode_read,
+ .write = _dpu_core_perf_mode_write,
+};
+
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+ debugfs_remove_recursive(perf->debugfs_root);
+ perf->debugfs_root = NULL;
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent)
+{
+ struct dpu_mdss_cfg *catalog = perf->catalog;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ priv = perf->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ perf->debugfs_root = debugfs_create_dir("core_perf", parent);
+ if (!perf->debugfs_root) {
+ DPU_ERROR("failed to create core perf debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+ &perf->max_core_clk_rate);
+ debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+ &perf->core_clk_rate);
+ debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+ (u32 *)&perf->enable_bw_release);
+ debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_low);
+ debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_high);
+ debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_core_ib);
+ debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_llcc_ib);
+ debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_dram_ib);
+ debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+ (u32 *)perf, &dpu_core_perf_mode_fops);
+ debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+ &perf->fix_core_clk_rate);
+ debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+ &perf->fix_core_ib_vote);
+ debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+ &perf->fix_core_ab_vote);
+
+ return 0;
+}
+#else
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent)
+{
+ return 0;
+}
+#endif
+
+void dpu_core_perf_destroy(struct dpu_core_perf *perf)
+{
+ if (!perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_core_perf_debugfs_destroy(perf);
+ perf->max_core_clk_rate = 0;
+ perf->core_clk = NULL;
+ perf->phandle = NULL;
+ perf->catalog = NULL;
+ perf->dev = NULL;
+}
+
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ struct drm_device *dev,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_power_handle *phandle,
+ struct dss_clk *core_clk)
+{
+ perf->dev = dev;
+ perf->catalog = catalog;
+ perf->phandle = phandle;
+ perf->core_clk = core_clk;
+
+ perf->max_core_clk_rate = core_clk->max_rate;
+ if (!perf->max_core_clk_rate) {
+ DPU_DEBUG("optional max core clk rate, use default\n");
+ perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
new file mode 100644
index 000000000000..fbcbe0c7527a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_CORE_PERF_H_
+#define _DPU_CORE_PERF_H_
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "dpu_hw_catalog.h"
+#include "dpu_power_handle.h"
+
+#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
+
+/**
+ * struct dpu_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct dpu_core_perf_params {
+ u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 core_clk_rate;
+};
+
+/**
+ * struct dpu_core_perf_tune - definition of performance tuning control
+ * @mode: performance mode
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct dpu_core_perf_tune {
+ u32 mode;
+ u64 min_core_clk;
+ u64 min_bus_vote;
+};
+
+/**
+ * struct dpu_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @catalog: Pointer to catalog configuration
+ * @phandle: Pointer to power handler
+ * @core_clk: Pointer to core clock structure
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ */
+struct dpu_core_perf {
+ struct drm_device *dev;
+ struct dentry *debugfs_root;
+ struct dpu_mdss_cfg *catalog;
+ struct dpu_power_handle *phandle;
+ struct dss_clk *core_clk;
+ u64 core_clk_rate;
+ u64 max_core_clk_rate;
+ struct dpu_core_perf_tune perf_tune;
+ u32 enable_bw_release;
+ u64 fix_core_clk_rate;
+ u64 fix_core_ib_vote;
+ u64 fix_core_ab_vote;
+};
+
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req);
+
+/**
+ * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * dpu_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void dpu_core_perf_destroy(struct dpu_core_perf *perf);
+
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @phandle: Pointer to power handle
+ * @core_clk: pointer to core clock
+ */
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ struct drm_device *dev,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_power_handle *phandle,
+ struct dss_clk *core_clk);
+
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @perf: Pointer to core performance context
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent);
+
+#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
new file mode 100644
index 000000000000..80cbf75bc2ff
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -0,0 +1,2138 @@
+/*
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/sort.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_rect.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_crtc.h"
+#include "dpu_plane.h"
+#include "dpu_encoder.h"
+#include "dpu_vbif.h"
+#include "dpu_power_handle.h"
+#include "dpu_core_perf.h"
+#include "dpu_trace.h"
+
+#define DPU_DRM_BLEND_OP_NOT_DEFINED 0
+#define DPU_DRM_BLEND_OP_OPAQUE 1
+#define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
+#define DPU_DRM_BLEND_OP_COVERAGE 3
+#define DPU_DRM_BLEND_OP_MAX 4
+
+/* layer mixer index on dpu_crtc */
+#define LEFT_MIXER 0
+#define RIGHT_MIXER 1
+
+#define MISR_BUFF_SIZE 256
+
+static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return NULL;
+ }
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_dpu_kms(priv->kms);
+}
+
+static inline int _dpu_crtc_power_enable(struct dpu_crtc *dpu_crtc, bool enable)
+{
+ struct drm_crtc *crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!dpu_crtc) {
+ DPU_ERROR("invalid dpu crtc\n");
+ return -EINVAL;
+ }
+
+ crtc = &dpu_crtc->base;
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid drm device\n");
+ return -EINVAL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ if (enable)
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ else
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+/**
+ * _dpu_crtc_rp_to_crtc - get crtc from resource pool object
+ * @rp: Pointer to resource pool
+ * return: Pointer to drm crtc if success; null otherwise
+ */
+static struct drm_crtc *_dpu_crtc_rp_to_crtc(struct dpu_crtc_respool *rp)
+{
+ if (!rp)
+ return NULL;
+
+ return container_of(rp, struct dpu_crtc_state, rp)->base.crtc;
+}
+
+/**
+ * _dpu_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
+ * @rp: Pointer to resource pool
+ * @force: True to reclaim all resources; otherwise, reclaim only unused ones
+ * return: None
+ */
+static void _dpu_crtc_rp_reclaim(struct dpu_crtc_respool *rp, bool force)
+{
+ struct dpu_crtc_res *res, *next;
+ struct drm_crtc *crtc;
+
+ crtc = _dpu_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ DPU_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
+ force ? "destroy" : "free_unused");
+
+ list_for_each_entry_safe(res, next, &rp->res_list, list) {
+ if (!force && !(res->flags & DPU_CRTC_RES_FLAG_FREE))
+ continue;
+ DPU_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, rp->sequence_id,
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ list_del(&res->list);
+ if (res->ops.put)
+ res->ops.put(res->val);
+ kfree(res);
+ }
+}
+
+/**
+ * _dpu_crtc_rp_free_unused - free unused resource in pool
+ * @rp: Pointer to resource pool
+ * return: none
+ */
+static void _dpu_crtc_rp_free_unused(struct dpu_crtc_respool *rp)
+{
+ mutex_lock(rp->rp_lock);
+ _dpu_crtc_rp_reclaim(rp, false);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_destroy - destroy resource pool
+ * @rp: Pointer to resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_destroy(struct dpu_crtc_respool *rp)
+{
+ mutex_lock(rp->rp_lock);
+ list_del_init(&rp->rp_list);
+ _dpu_crtc_rp_reclaim(rp, true);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_hw_blk_get - get callback for hardware block
+ * @val: Resource handle
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle
+ */
+static void *_dpu_crtc_hw_blk_get(void *val, u32 type, u64 tag)
+{
+ DPU_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
+ return dpu_hw_blk_get(val, type, tag);
+}
+
+/**
+ * _dpu_crtc_hw_blk_put - put callback for hardware block
+ * @val: Resource handle
+ * return: None
+ */
+static void _dpu_crtc_hw_blk_put(void *val)
+{
+ DPU_DEBUG("res://%pK\n", val);
+ dpu_hw_blk_put(val);
+}
+
+/**
+ * _dpu_crtc_rp_duplicate - duplicate resource pool and reset reference count
+ * @rp: Pointer to original resource pool
+ * @dup_rp: Pointer to duplicated resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_duplicate(struct dpu_crtc_respool *rp,
+ struct dpu_crtc_respool *dup_rp)
+{
+ struct dpu_crtc_res *res, *dup_res;
+ struct drm_crtc *crtc;
+
+ if (!rp || !dup_rp || !rp->rp_head) {
+ DPU_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ crtc = _dpu_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ DPU_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
+
+ mutex_lock(rp->rp_lock);
+ dup_rp->sequence_id = rp->sequence_id + 1;
+ INIT_LIST_HEAD(&dup_rp->res_list);
+ dup_rp->ops = rp->ops;
+ list_for_each_entry(res, &rp->res_list, list) {
+ dup_res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
+ if (!dup_res) {
+ mutex_unlock(rp->rp_lock);
+ return;
+ }
+ INIT_LIST_HEAD(&dup_res->list);
+ atomic_set(&dup_res->refcount, 0);
+ dup_res->type = res->type;
+ dup_res->tag = res->tag;
+ dup_res->val = res->val;
+ dup_res->ops = res->ops;
+ dup_res->flags = DPU_CRTC_RES_FLAG_FREE;
+ DPU_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, dup_rp->sequence_id,
+ dup_res->type, dup_res->tag, dup_res->val,
+ atomic_read(&dup_res->refcount));
+ list_add_tail(&dup_res->list, &dup_rp->res_list);
+ if (dup_res->ops.get)
+ dup_res->ops.get(dup_res->val, 0, -1);
+ }
+
+ dup_rp->rp_lock = rp->rp_lock;
+ dup_rp->rp_head = rp->rp_head;
+ INIT_LIST_HEAD(&dup_rp->rp_list);
+ list_add_tail(&dup_rp->rp_list, rp->rp_head);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_reset - reset resource pool after allocation
+ * @rp: Pointer to original resource pool
+ * @rp_lock: Pointer to serialization resource pool lock
+ * @rp_head: Pointer to crtc resource pool head
+ * return: None
+ */
+static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp,
+ struct mutex *rp_lock, struct list_head *rp_head)
+{
+ if (!rp || !rp_lock || !rp_head) {
+ DPU_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ mutex_lock(rp_lock);
+ rp->rp_lock = rp_lock;
+ rp->rp_head = rp_head;
+ INIT_LIST_HEAD(&rp->rp_list);
+ rp->sequence_id = 0;
+ INIT_LIST_HEAD(&rp->res_list);
+ rp->ops.get = _dpu_crtc_hw_blk_get;
+ rp->ops.put = _dpu_crtc_hw_blk_put;
+ list_add_tail(&rp->rp_list, rp->rp_head);
+ mutex_unlock(rp_lock);
+}
+
+static void dpu_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ DPU_DEBUG("\n");
+
+ if (!crtc)
+ return;
+
+ dpu_crtc->phandle = NULL;
+
+ drm_crtc_cleanup(crtc);
+ mutex_destroy(&dpu_crtc->crtc_lock);
+ kfree(dpu_crtc);
+}
+
+static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
+ struct dpu_plane_state *pstate)
+{
+ struct dpu_hw_mixer *lm = mixer->hw_lm;
+
+ /* default to opaque blending */
+ lm->ops.setup_blend_config(lm, pstate->stage, 0XFF, 0,
+ DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_BG_CONST);
+}
+
+static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *crtc_state;
+ int lm_idx, lm_horiz_position;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ lm_horiz_position = 0;
+ for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
+ struct dpu_hw_mixer *hw_lm = dpu_crtc->mixers[lm_idx].hw_lm;
+ struct dpu_hw_mixer_cfg cfg;
+
+ if (!lm_roi || !drm_rect_visible(lm_roi))
+ continue;
+
+ cfg.out_width = drm_rect_width(lm_roi);
+ cfg.out_height = drm_rect_height(lm_roi);
+ cfg.right_mixer = lm_horiz_position++;
+ cfg.flags = 0;
+ hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
+ }
+}
+
+static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
+ struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
+{
+ struct drm_plane *plane;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate;
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_format *format;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+ struct dpu_hw_stage_cfg *stage_cfg;
+
+ u32 flush_mask;
+ uint32_t stage_idx, lm_idx;
+ int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
+ bool bg_alpha_enable = false;
+
+ if (!dpu_crtc || !mixer) {
+ DPU_ERROR("invalid dpu_crtc or mixer\n");
+ return;
+ }
+
+ ctl = mixer->hw_ctl;
+ lm = mixer->hw_lm;
+ stage_cfg = &dpu_crtc->stage_cfg;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ state = plane->state;
+ if (!state)
+ continue;
+
+ pstate = to_dpu_plane_state(state);
+ fb = state->fb;
+
+ dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
+
+ DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+ crtc->base.id,
+ pstate->stage,
+ plane->base.id,
+ dpu_plane_pipe(plane) - SSPP_VIG0,
+ state->fb ? state->fb->base.id : -1);
+
+ format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
+ if (!format) {
+ DPU_ERROR("invalid format\n");
+ return;
+ }
+
+ if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
+ bg_alpha_enable = true;
+
+ stage_idx = zpos_cnt[pstate->stage]++;
+ stage_cfg->stage[pstate->stage][stage_idx] =
+ dpu_plane_pipe(plane);
+ stage_cfg->multirect_index[pstate->stage][stage_idx] =
+ pstate->multirect_index;
+
+ trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
+ state, pstate, stage_idx,
+ dpu_plane_pipe(plane) - SSPP_VIG0,
+ format->base.pixel_format,
+ fb ? fb->modifier : 0);
+
+ /* blend config update */
+ for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate);
+
+ mixer[lm_idx].flush_mask |= flush_mask;
+
+ if (bg_alpha_enable && !format->alpha_enable)
+ mixer[lm_idx].mixer_op_mode = 0;
+ else
+ mixer[lm_idx].mixer_op_mode |=
+ 1 << pstate->stage;
+ }
+ }
+
+ _dpu_crtc_program_lm_output_roi(crtc);
+}
+
+/**
+ * _dpu_crtc_blend_setup - configure crtc mixers
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_crtc_state;
+ struct dpu_crtc_mixer *mixer;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+
+ int i;
+
+ if (!crtc)
+ return;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_crtc_state = to_dpu_crtc_state(crtc->state);
+ mixer = dpu_crtc->mixers;
+
+ DPU_DEBUG("%s\n", dpu_crtc->name);
+
+ if (dpu_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+ DPU_ERROR("invalid number mixers: %d\n", dpu_crtc->num_mixers);
+ return;
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+ DPU_ERROR("invalid lm or ctl assigned to mixer\n");
+ return;
+ }
+ mixer[i].mixer_op_mode = 0;
+ mixer[i].flush_mask = 0;
+ if (mixer[i].hw_ctl->ops.clear_all_blendstages)
+ mixer[i].hw_ctl->ops.clear_all_blendstages(
+ mixer[i].hw_ctl);
+ }
+
+ /* initialize stage cfg */
+ memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+
+ _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ ctl = mixer[i].hw_ctl;
+ lm = mixer[i].hw_lm;
+
+ lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
+
+ mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
+ mixer[i].hw_lm->idx);
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+ DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+ mixer[i].hw_lm->idx - LM_0,
+ mixer[i].mixer_op_mode,
+ ctl->idx - CTL_0,
+ mixer[i].flush_mask);
+
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &dpu_crtc->stage_cfg);
+ }
+}
+
+/**
+ * _dpu_crtc_complete_flip - signal pending page_flip events
+ * Any pending vblank events are added to the vblank_event_list
+ * so that the next vblank interrupt shall signal them.
+ * However PAGE_FLIP events are not handled through the vblank_event_list.
+ * This API signals any pending PAGE_FLIP events requested through
+ * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (dpu_crtc->event) {
+ DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
+ dpu_crtc->event);
+ trace_dpu_crtc_complete_flip(DRMID(crtc));
+ drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
+ dpu_crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ if (!crtc || !crtc->dev) {
+ DPU_ERROR("invalid crtc\n");
+ return INTF_MODE_NONE;
+ }
+
+ drm_for_each_encoder(encoder, crtc->dev)
+ if (encoder->crtc == crtc)
+ return dpu_encoder_get_intf_mode(encoder);
+
+ return INTF_MODE_NONE;
+}
+
+static void dpu_crtc_vblank_cb(void *data)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ /* keep statistics on vblank callback - with auto reset via debugfs */
+ if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
+ dpu_crtc->vblank_cb_time = ktime_get();
+ else
+ dpu_crtc->vblank_cb_count++;
+ _dpu_crtc_complete_flip(crtc);
+ drm_crtc_handle_vblank(crtc);
+ trace_dpu_crtc_vblank_cb(DRMID(crtc));
+}
+
+static void dpu_crtc_frame_event_work(struct kthread_work *work)
+{
+ struct msm_drm_private *priv;
+ struct dpu_crtc_frame_event *fevent;
+ struct drm_crtc *crtc;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_kms *dpu_kms;
+ unsigned long flags;
+ bool frame_done = false;
+
+ if (!work) {
+ DPU_ERROR("invalid work handle\n");
+ return;
+ }
+
+ fevent = container_of(work, struct dpu_crtc_frame_event, work);
+ if (!fevent->crtc || !fevent->crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ crtc = fevent->crtc;
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid kms handle\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+ DPU_ATRACE_BEGIN("crtc_frame_event");
+
+ DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+ ktime_to_ns(fevent->ts));
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (atomic_read(&dpu_crtc->frame_pending) < 1) {
+ /* this should not happen */
+ DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
+ crtc->base.id,
+ fevent->event,
+ ktime_to_ns(fevent->ts),
+ atomic_read(&dpu_crtc->frame_pending));
+ } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
+ /* release bandwidth and other resources */
+ trace_dpu_crtc_frame_event_done(DRMID(crtc),
+ fevent->event);
+ dpu_core_perf_crtc_release_bw(crtc);
+ } else {
+ trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
+ fevent->event);
+ }
+
+ if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
+ dpu_core_perf_crtc_update(crtc, 0, false);
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR))
+ frame_done = true;
+ }
+
+ if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
+ DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
+ crtc->base.id, ktime_to_ns(fevent->ts));
+
+ if (frame_done)
+ complete_all(&dpu_crtc->frame_done_comp);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+ DPU_ATRACE_END("crtc_frame_event");
+}
+
+/*
+ * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
+ * registers this API to encoder for all frame event callbacks like
+ * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
+ * from different context - IRQ, user thread, commit_thread, etc. Each event
+ * should be carefully reviewed and should be processed in proper task context
+ * to avoid schedulin delay or properly manage the irq context's bottom half
+ * processing.
+ */
+static void dpu_crtc_frame_event_cb(void *data, u32 event)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_crtc_frame_event *fevent;
+ unsigned long flags;
+ u32 crtc_id;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ /* Nothing to do on idle event */
+ if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
+ return;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ priv = crtc->dev->dev_private;
+ crtc_id = drm_crtc_index(crtc);
+
+ trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
+ struct dpu_crtc_frame_event, list);
+ if (fevent)
+ list_del_init(&fevent->list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+
+ if (!fevent) {
+ DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
+ return;
+ }
+
+ fevent->event = event;
+ fevent->crtc = crtc;
+ fevent->ts = ktime_get();
+ kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
+}
+
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ if (!crtc || !crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ trace_dpu_crtc_complete_commit(DRMID(crtc));
+}
+
+static void _dpu_crtc_setup_mixer_for_encoder(
+ struct drm_crtc *crtc,
+ struct drm_encoder *enc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_rm *rm = &dpu_kms->rm;
+ struct dpu_crtc_mixer *mixer;
+ struct dpu_hw_ctl *last_valid_ctl = NULL;
+ int i;
+ struct dpu_rm_hw_iter lm_iter, ctl_iter;
+
+ dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
+ dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
+
+ /* Set up all the mixers and ctls reserved by this encoder */
+ for (i = dpu_crtc->num_mixers; i < ARRAY_SIZE(dpu_crtc->mixers); i++) {
+ mixer = &dpu_crtc->mixers[i];
+
+ if (!dpu_rm_get_hw(rm, &lm_iter))
+ break;
+ mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
+
+ /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
+ if (!dpu_rm_get_hw(rm, &ctl_iter)) {
+ DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
+ mixer->hw_lm->idx - LM_0);
+ mixer->hw_ctl = last_valid_ctl;
+ } else {
+ mixer->hw_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
+ last_valid_ctl = mixer->hw_ctl;
+ }
+
+ /* Shouldn't happen, mixers are always >= ctls */
+ if (!mixer->hw_ctl) {
+ DPU_ERROR("no valid ctls found for lm %d\n",
+ mixer->hw_lm->idx - LM_0);
+ return;
+ }
+
+ mixer->encoder = enc;
+
+ dpu_crtc->num_mixers++;
+ DPU_DEBUG("setup mixer %d: lm %d\n",
+ i, mixer->hw_lm->idx - LM_0);
+ DPU_DEBUG("setup mixer %d: ctl %d\n",
+ i, mixer->hw_ctl->idx - CTL_0);
+ }
+}
+
+static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *enc;
+
+ dpu_crtc->num_mixers = 0;
+ dpu_crtc->mixers_swapped = false;
+ memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ /* Check for mixers on all encoders attached to this crtc */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
+ }
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+ struct drm_display_mode *adj_mode;
+ u32 crtc_split_width;
+ int i;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ adj_mode = &state->adjusted_mode;
+ crtc_split_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, adj_mode);
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ struct drm_rect *r = &cstate->lm_bounds[i];
+ r->x1 = crtc_split_width * i;
+ r->y1 = 0;
+ r->x2 = r->x1 + crtc_split_width;
+ r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+
+ trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
+ }
+
+ drm_mode_debug_printmodeline(adj_mode);
+}
+
+static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ unsigned long flags;
+ struct dpu_crtc_smmu_state_data *smmu_state;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dev = crtc->dev;
+ smmu_state = &dpu_crtc->smmu_state;
+
+ if (!dpu_crtc->num_mixers) {
+ _dpu_crtc_setup_mixers(crtc);
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
+ }
+
+ if (dpu_crtc->event) {
+ WARN_ON(dpu_crtc->event);
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ /* encoder will trigger pending mask now */
+ dpu_encoder_trigger_kickoff_pending(encoder);
+ }
+
+ /*
+ * If no mixers have been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ _dpu_crtc_blend_setup(crtc);
+
+ /*
+ * PP_DONE irq is only used by command mode for now.
+ * It is better to request pending before FLUSH and START trigger
+ * to make sure no pp_done irq missed.
+ * This is safe because no pp_done will happen before SW trigger
+ * in command mode.
+ */
+}
+
+static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_device *dev;
+ struct drm_plane *plane;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *event_thread;
+ unsigned long flags;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
+ dev = crtc->dev;
+ priv = dev->dev_private;
+
+ if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
+ return;
+ }
+
+ event_thread = &priv->event_thread[crtc->index];
+
+ if (dpu_crtc->event) {
+ DPU_DEBUG("already received dpu_crtc->event\n");
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ /*
+ * For planes without commit update, drm framework will not add
+ * those planes to current state since hardware update is not
+ * required. However, if those planes were power collapsed since
+ * last commit cycle, driver has to restore the hardware state
+ * of those planes explicitly here prior to plane flush.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc)
+ dpu_plane_restore(plane);
+
+ /* update performance setting before crtc kickoff */
+ dpu_core_perf_crtc_update(crtc, 1, false);
+
+ /*
+ * Final plane updates: Give each plane a chance to complete all
+ * required writes/flushing before crtc's "flush
+ * everything" call below.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (dpu_crtc->smmu_state.transition_error)
+ dpu_plane_set_error(plane, true);
+ dpu_plane_flush(plane);
+ }
+
+ /* Kickoff will be scheduled by outer layer */
+}
+
+/**
+ * dpu_crtc_destroy_state - state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ */
+static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ _dpu_crtc_rp_destroy(&cstate->rp);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ kfree(cstate);
+}
+
+static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ int ret, rc = 0;
+
+ if (!crtc) {
+ DPU_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ if (!atomic_read(&dpu_crtc->frame_pending)) {
+ DPU_DEBUG("no frames pending\n");
+ return 0;
+ }
+
+ DPU_ATRACE_BEGIN("frame done completion wait");
+ ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
+ msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
+ if (!ret) {
+ DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
+ rc = -ETIMEDOUT;
+ }
+ DPU_ATRACE_END("frame done completion wait");
+
+ return rc;
+}
+
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_crtc_state *cstate;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+ dev = crtc->dev;
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ priv = dpu_kms->dev->dev_private;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to start a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ DPU_ATRACE_BEGIN("crtc_commit");
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct dpu_encoder_kickoff_params params = { 0 };
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ /*
+ * Encoder will flush/start now, unless it has a tx pending.
+ * If so, it may delay and flush at an irq event (e.g. ppdone)
+ */
+ dpu_encoder_prepare_for_kickoff(encoder, &params);
+ }
+
+ /* wait for frame_event_done completion */
+ DPU_ATRACE_BEGIN("wait_for_frame_done_event");
+ ret = _dpu_crtc_wait_for_frame_done(crtc);
+ DPU_ATRACE_END("wait_for_frame_done_event");
+ if (ret) {
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+ goto end;
+ }
+
+ if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+ } else
+ DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+
+ dpu_crtc->play_count++;
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ dpu_encoder_kickoff(encoder);
+ }
+
+end:
+ reinit_completion(&dpu_crtc->frame_done_comp);
+ DPU_ATRACE_END("crtc_commit");
+}
+
+/**
+ * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
+ * @dpu_crtc: Pointer to dpu crtc structure
+ * @enable: Whether to enable/disable vblanks
+ *
+ * @Return: error code
+ */
+static int _dpu_crtc_vblank_enable_no_lock(
+ struct dpu_crtc *dpu_crtc, bool enable)
+{
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *enc;
+
+ if (!dpu_crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ crtc = &dpu_crtc->base;
+ dev = crtc->dev;
+
+ if (enable) {
+ int ret;
+
+ /* drop lock since power crtc cb may try to re-acquire lock */
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ ret = _dpu_crtc_power_enable(dpu_crtc, true);
+ mutex_lock(&dpu_crtc->crtc_lock);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+ DRMID(enc), enable,
+ dpu_crtc);
+
+ dpu_encoder_register_vblank_callback(enc,
+ dpu_crtc_vblank_cb, (void *)crtc);
+ }
+ } else {
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+ DRMID(enc), enable,
+ dpu_crtc);
+
+ dpu_encoder_register_vblank_callback(enc, NULL, NULL);
+ }
+
+ /* drop lock since power crtc cb may try to re-acquire lock */
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+ mutex_lock(&dpu_crtc->crtc_lock);
+ }
+
+ return 0;
+}
+
+/**
+ * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int ret = 0;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+ priv = crtc->dev->dev_private;
+
+ if (!priv->kms) {
+ DPU_ERROR("invalid crtc kms\n");
+ return;
+ }
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ /*
+ * If the vblank is enabled, release a power reference on suspend
+ * and take it back during resume (if it is still enabled).
+ */
+ trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
+ if (dpu_crtc->suspend == enable)
+ DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+ crtc->base.id, enable);
+ else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+
+ dpu_crtc->suspend = enable;
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+/**
+ * dpu_crtc_duplicate_state - state duplicate hook
+ * @crtc: Pointer to drm crtc structure
+ * @Returns: Pointer to new drm_crtc_state structure
+ */
+static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate, *old_cstate;
+
+ if (!crtc || !crtc->state) {
+ DPU_ERROR("invalid argument(s)\n");
+ return NULL;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ old_cstate = to_dpu_crtc_state(crtc->state);
+ cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
+ if (!cstate) {
+ DPU_ERROR("failed to allocate state\n");
+ return NULL;
+ }
+
+ /* duplicate base helper */
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+
+ _dpu_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
+
+ return &cstate->base;
+}
+
+/**
+ * dpu_crtc_reset - reset hook for CRTCs
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void dpu_crtc_reset(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ /* revert suspend actions, if necessary */
+ if (dpu_kms_is_suspend_state(crtc->dev))
+ _dpu_crtc_set_suspend(crtc, false);
+
+ /* remove previous state, if present */
+ if (crtc->state) {
+ dpu_crtc_destroy_state(crtc, crtc->state);
+ crtc->state = 0;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
+ if (!cstate) {
+ DPU_ERROR("failed to allocate state\n");
+ return;
+ }
+
+ _dpu_crtc_rp_reset(&cstate->rp, &dpu_crtc->rp_lock,
+ &dpu_crtc->rp_head);
+
+ cstate->base.crtc = crtc;
+ crtc->state = &cstate->base;
+}
+
+static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct dpu_crtc_mixer *m;
+ u32 i, misr_status;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
+
+ switch (event_type) {
+ case DPU_POWER_EVENT_POST_ENABLE:
+ /* restore encoder; crtc will be programmed during commit */
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ dpu_encoder_virt_restore(encoder);
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr ||
+ !dpu_crtc->misr_enable)
+ continue;
+
+ m->hw_lm->ops.setup_misr(m->hw_lm, true,
+ dpu_crtc->misr_frame_count);
+ }
+ break;
+ case DPU_POWER_EVENT_PRE_DISABLE:
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr ||
+ !dpu_crtc->misr_enable)
+ continue;
+
+ misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+ dpu_crtc->misr_data[i] = misr_status ? misr_status :
+ dpu_crtc->misr_data[i];
+ }
+ break;
+ case DPU_POWER_EVENT_POST_DISABLE:
+ /**
+ * Nothing to do. All the planes on the CRTC will be
+ * programmed for every frame
+ */
+ break;
+ default:
+ DPU_DEBUG("event:%d not handled\n", event_type);
+ break;
+ }
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void dpu_crtc_disable(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+ struct drm_display_mode *mode;
+ struct drm_encoder *encoder;
+ struct msm_drm_private *priv;
+ int ret;
+ unsigned long flags;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
+ mode = &cstate->base.adjusted_mode;
+ priv = crtc->dev->dev_private;
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+
+ if (dpu_kms_is_suspend_state(crtc->dev))
+ _dpu_crtc_set_suspend(crtc, true);
+
+ /* Disable/save vblank irq handling */
+ drm_crtc_vblank_off(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ /* wait for frame_event_done completion */
+ if (_dpu_crtc_wait_for_frame_done(crtc))
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+
+ trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
+ if (dpu_crtc->enabled && !dpu_crtc->suspend &&
+ dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->enabled = false;
+
+ if (atomic_read(&dpu_crtc->frame_pending)) {
+ trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
+ atomic_read(&dpu_crtc->frame_pending));
+ dpu_core_perf_crtc_release_bw(crtc);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+ }
+
+ dpu_core_perf_crtc_update(crtc, 0, true);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
+ }
+
+ if (dpu_crtc->power_event)
+ dpu_power_handle_unregister_event(dpu_crtc->phandle,
+ dpu_crtc->power_event);
+
+ memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+ dpu_crtc->num_mixers = 0;
+ dpu_crtc->mixers_swapped = false;
+
+ /* disable clk & bw control until clk & bw properties are set */
+ cstate->bw_control = false;
+ cstate->bw_split_vote = false;
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static void dpu_crtc_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct msm_drm_private *priv;
+ int ret;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ priv = crtc->dev->dev_private;
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ dpu_encoder_register_frame_event_callback(encoder,
+ dpu_crtc_frame_event_cb, (void *)crtc);
+ }
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
+ if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
+ dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->enabled = true;
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ /* Enable/restore vblank irq handling */
+ drm_crtc_vblank_on(crtc);
+
+ dpu_crtc->power_event = dpu_power_handle_register_event(
+ dpu_crtc->phandle,
+ DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE |
+ DPU_POWER_EVENT_PRE_DISABLE,
+ dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
+
+}
+
+struct plane_state {
+ struct dpu_plane_state *dpu_pstate;
+ const struct drm_plane_state *drm_pstate;
+ int stage;
+ u32 pipe_id;
+};
+
+static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct plane_state *pstates;
+ struct dpu_crtc_state *cstate;
+
+ const struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+
+ int cnt = 0, rc = 0, mixer_width, i, z_pos;
+
+ struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
+ int multirect_count = 0;
+ const struct drm_plane_state *pipe_staged[SSPP_MAX];
+ int left_zpos_cnt = 0, right_zpos_cnt = 0;
+ struct drm_rect crtc_rect = { 0 };
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ if (!state->enable || !state->active) {
+ DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+ crtc->base.id, state->enable, state->active);
+ goto end;
+ }
+
+ mode = &state->adjusted_mode;
+ DPU_DEBUG("%s: check", dpu_crtc->name);
+
+ /* force a full mode set if active state changed */
+ if (state->active_changed)
+ state->mode_changed = true;
+
+ memset(pipe_staged, 0, sizeof(pipe_staged));
+
+ mixer_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+ _dpu_crtc_setup_lm_bounds(crtc, state);
+
+ crtc_rect.x2 = mode->hdisplay;
+ crtc_rect.y2 = mode->vdisplay;
+
+ /* get plane state for all drm planes associated with crtc state */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ struct drm_rect dst, clip = crtc_rect;
+
+ if (IS_ERR_OR_NULL(pstate)) {
+ rc = PTR_ERR(pstate);
+ DPU_ERROR("%s: failed to get plane%d state, %d\n",
+ dpu_crtc->name, plane->base.id, rc);
+ goto end;
+ }
+ if (cnt >= DPU_STAGE_MAX * 4)
+ continue;
+
+ pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
+ pstates[cnt].drm_pstate = pstate;
+ pstates[cnt].stage = pstate->normalized_zpos;
+ pstates[cnt].pipe_id = dpu_plane_pipe(plane);
+
+ if (pipe_staged[pstates[cnt].pipe_id]) {
+ multirect_plane[multirect_count].r0 =
+ pipe_staged[pstates[cnt].pipe_id];
+ multirect_plane[multirect_count].r1 = pstate;
+ multirect_count++;
+
+ pipe_staged[pstates[cnt].pipe_id] = NULL;
+ } else {
+ pipe_staged[pstates[cnt].pipe_id] = pstate;
+ }
+
+ cnt++;
+
+ dst = drm_plane_state_dest(pstate);
+ if (!drm_rect_intersect(&clip, &dst) ||
+ !drm_rect_equals(&clip, &dst)) {
+ DPU_ERROR("invalid vertical/horizontal destination\n");
+ DPU_ERROR("display: " DRM_RECT_FMT " plane: "
+ DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
+ DRM_RECT_ARG(&dst));
+ rc = -E2BIG;
+ goto end;
+ }
+ }
+
+ for (i = 1; i < SSPP_MAX; i++) {
+ if (pipe_staged[i]) {
+ dpu_plane_clear_multirect(pipe_staged[i]);
+
+ if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
+ DPU_ERROR(
+ "r1 only virt plane:%d not supported\n",
+ pipe_staged[i]->plane->base.id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+ }
+
+ z_pos = -1;
+ for (i = 0; i < cnt; i++) {
+ /* reset counts at every new blend stage */
+ if (pstates[i].stage != z_pos) {
+ left_zpos_cnt = 0;
+ right_zpos_cnt = 0;
+ z_pos = pstates[i].stage;
+ }
+
+ /* verify z_pos setting before using it */
+ if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
+ DPU_ERROR("> %d plane stages assigned\n",
+ DPU_STAGE_MAX - DPU_STAGE_0);
+ rc = -EINVAL;
+ goto end;
+ } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
+ if (left_zpos_cnt == 2) {
+ DPU_ERROR("> 2 planes @ stage %d on left\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ left_zpos_cnt++;
+
+ } else {
+ if (right_zpos_cnt == 2) {
+ DPU_ERROR("> 2 planes @ stage %d on right\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ right_zpos_cnt++;
+ }
+
+ pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
+ DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
+ }
+
+ for (i = 0; i < multirect_count; i++) {
+ if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
+ DPU_ERROR(
+ "multirect validation failed for planes (%d - %d)\n",
+ multirect_plane[i].r0->plane->base.id,
+ multirect_plane[i].r1->plane->base.id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ rc = dpu_core_perf_crtc_check(crtc, state);
+ if (rc) {
+ DPU_ERROR("crtc%d failed performance check %d\n",
+ crtc->base.id, rc);
+ goto end;
+ }
+
+ /* validate source split:
+ * use pstates sorted by stage to check planes on same stage
+ * we assume that all pipes are in source split so its valid to compare
+ * without taking into account left/right mixer placement
+ */
+ for (i = 1; i < cnt; i++) {
+ struct plane_state *prv_pstate, *cur_pstate;
+ struct drm_rect left_rect, right_rect;
+ int32_t left_pid, right_pid;
+ int32_t stage;
+
+ prv_pstate = &pstates[i - 1];
+ cur_pstate = &pstates[i];
+ if (prv_pstate->stage != cur_pstate->stage)
+ continue;
+
+ stage = cur_pstate->stage;
+
+ left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
+ left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
+
+ right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
+ right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
+
+ if (right_rect.x1 < left_rect.x1) {
+ swap(left_pid, right_pid);
+ swap(left_rect, right_rect);
+ }
+
+ /**
+ * - planes are enumerated in pipe-priority order such that
+ * planes with lower drm_id must be left-most in a shared
+ * blend-stage when using source split.
+ * - planes in source split must be contiguous in width
+ * - planes in source split must have same dest yoff and height
+ */
+ if (right_pid < left_pid) {
+ DPU_ERROR(
+ "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
+ stage, left_pid, right_pid);
+ rc = -EINVAL;
+ goto end;
+ } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
+ DPU_ERROR("non-contiguous coordinates for src split. "
+ "stage: %d left: " DRM_RECT_FMT " right: "
+ DRM_RECT_FMT "\n", stage,
+ DRM_RECT_ARG(&left_rect),
+ DRM_RECT_ARG(&right_rect));
+ rc = -EINVAL;
+ goto end;
+ } else if (left_rect.y1 != right_rect.y1 ||
+ drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
+ DPU_ERROR("source split at stage: %d. invalid "
+ "yoff/height: left: " DRM_RECT_FMT " right: "
+ DRM_RECT_FMT "\n", stage,
+ DRM_RECT_ARG(&left_rect),
+ DRM_RECT_ARG(&right_rect));
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+end:
+ _dpu_crtc_rp_free_unused(&cstate->rp);
+ kfree(pstates);
+ return rc;
+}
+
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
+{
+ struct dpu_crtc *dpu_crtc;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
+ if (dpu_crtc->enabled && !dpu_crtc->suspend) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->vblank_requested = en;
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_crtc_mixer *m;
+
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate;
+
+ int i, out_width;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ dpu_crtc = s->private;
+ crtc = &dpu_crtc->base;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ mode = &crtc->state->adjusted_mode;
+ out_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+ seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+ mode->hdisplay, mode->vdisplay);
+
+ seq_puts(s, "\n");
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm)
+ seq_printf(s, "\tmixer[%d] has no lm\n", i);
+ else if (!m->hw_ctl)
+ seq_printf(s, "\tmixer[%d] has no ctl\n", i);
+ else
+ seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+ m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+ out_width, mode->vdisplay);
+ }
+
+ seq_puts(s, "\n");
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ state = plane->state;
+
+ if (!pstate || !state)
+ continue;
+
+ seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
+ pstate->stage);
+
+ if (plane->state->fb) {
+ fb = plane->state->fb;
+
+ seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
+ fb->base.id, (char *) &fb->format->format,
+ fb->width, fb->height);
+ for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
+ seq_printf(s, "cpp[%d]:%u ",
+ i, fb->format->cpp[i]);
+ seq_puts(s, "\n\t");
+
+ seq_printf(s, "modifier:%8llu ", fb->modifier);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+ seq_printf(s, "pitches[%d]:%8u ", i,
+ fb->pitches[i]);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+ seq_printf(s, "offsets[%d]:%8u ", i,
+ fb->offsets[i]);
+ seq_puts(s, "\n");
+ }
+
+ seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
+ state->src_x, state->src_y, state->src_w, state->src_h);
+
+ seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
+ state->crtc_x, state->crtc_y, state->crtc_w,
+ state->crtc_h);
+ seq_printf(s, "\tmultirect: mode: %d index: %d\n",
+ pstate->multirect_mode, pstate->multirect_index);
+
+ seq_puts(s, "\n");
+ }
+ if (dpu_crtc->vblank_cb_count) {
+ ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
+ s64 diff_ms = ktime_to_ms(diff);
+ s64 fps = diff_ms ? div_s64(
+ dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+ seq_printf(s,
+ "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
+ fps, dpu_crtc->vblank_cb_count,
+ ktime_to_ms(diff), dpu_crtc->play_count);
+
+ /* reset time & count for next measurement */
+ dpu_crtc->vblank_cb_count = 0;
+ dpu_crtc->vblank_cb_time = ktime_set(0, 0);
+ }
+
+ seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ return 0;
+}
+
+static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, _dpu_debugfs_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_crtc_misr_setup(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_mixer *m;
+ int i = 0, rc;
+ char buf[MISR_BUFF_SIZE + 1];
+ u32 frame_count, enable;
+ size_t buff_copy;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_crtc = file->private_data;
+ buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+ if (copy_from_user(buf, user_buf, buff_copy)) {
+ DPU_ERROR("buffer copy failed\n");
+ return -EINVAL;
+ }
+
+ buf[buff_copy] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EINVAL;
+
+ rc = _dpu_crtc_power_enable(dpu_crtc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ dpu_crtc->misr_enable = enable;
+ dpu_crtc->misr_frame_count = frame_count;
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ dpu_crtc->misr_data[i] = 0;
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
+ continue;
+
+ m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
+ }
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+
+ return count;
+}
+
+static ssize_t _dpu_crtc_misr_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_mixer *m;
+ int i = 0, rc;
+ u32 misr_status;
+ ssize_t len = 0;
+ char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+
+ if (*ppos)
+ return 0;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_crtc = file->private_data;
+ rc = _dpu_crtc_power_enable(dpu_crtc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ if (!dpu_crtc->misr_enable) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "disabled\n");
+ goto buff_check;
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
+ continue;
+
+ misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+ dpu_crtc->misr_data[i] = misr_status ? misr_status :
+ dpu_crtc->misr_data[i];
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
+ m->hw_lm->idx - LM_0);
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+ dpu_crtc->misr_data[i]);
+ }
+
+buff_check:
+ if (count <= len) {
+ len = 0;
+ goto end;
+ }
+
+ if (copy_to_user(user_buff, buf, len)) {
+ len = -EFAULT;
+ goto end;
+ }
+
+ *ppos += len; /* increase offset */
+
+end:
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+ return len;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_res *res;
+ struct dpu_crtc_respool *rp;
+ int i;
+
+ seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
+ seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
+ seq_printf(s, "core_clk_rate: %llu\n",
+ dpu_crtc->cur_perf.core_clk_rate);
+ for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+ i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ seq_printf(s, "bw_ctl[%s]: %llu\n",
+ dpu_power_handle_get_dbus_name(i),
+ dpu_crtc->cur_perf.bw_ctl[i]);
+ seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
+ dpu_power_handle_get_dbus_name(i),
+ dpu_crtc->cur_perf.max_per_pipe_ib[i]);
+ }
+
+ mutex_lock(&dpu_crtc->rp_lock);
+ list_for_each_entry(rp, &dpu_crtc->rp_head, rp_list) {
+ seq_printf(s, "rp.%d: ", rp->sequence_id);
+ list_for_each_entry(res, &rp->res_list, list)
+ seq_printf(s, "0x%x/0x%llx/%pK/%d ",
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ seq_puts(s, "\n");
+ }
+ mutex_unlock(&dpu_crtc->rp_lock);
+
+ return 0;
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
+
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_kms *dpu_kms;
+
+ static const struct file_operations debugfs_status_fops = {
+ .open = _dpu_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _dpu_crtc_misr_read,
+ .write = _dpu_crtc_misr_setup,
+ };
+
+ if (!crtc)
+ return -EINVAL;
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+ if (!dpu_kms)
+ return -EINVAL;
+
+ dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
+ crtc->dev->primary->debugfs_root);
+ if (!dpu_crtc->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_file("status", 0400,
+ dpu_crtc->debugfs_root,
+ dpu_crtc, &debugfs_status_fops);
+ debugfs_create_file("state", 0600,
+ dpu_crtc->debugfs_root,
+ &dpu_crtc->base,
+ &dpu_crtc_debugfs_state_fops);
+ debugfs_create_file("misr_data", 0600, dpu_crtc->debugfs_root,
+ dpu_crtc, &debugfs_misr_fops);
+
+ return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+
+ if (!crtc)
+ return;
+ dpu_crtc = to_dpu_crtc(crtc);
+ debugfs_remove_recursive(dpu_crtc->debugfs_root);
+}
+#else
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int dpu_crtc_late_register(struct drm_crtc *crtc)
+{
+ return _dpu_crtc_init_debugfs(crtc);
+}
+
+static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
+{
+ _dpu_crtc_destroy_debugfs(crtc);
+}
+
+static const struct drm_crtc_funcs dpu_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = dpu_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = dpu_crtc_reset,
+ .atomic_duplicate_state = dpu_crtc_duplicate_state,
+ .atomic_destroy_state = dpu_crtc_destroy_state,
+ .late_register = dpu_crtc_late_register,
+ .early_unregister = dpu_crtc_early_unregister,
+};
+
+static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
+ .disable = dpu_crtc_disable,
+ .atomic_enable = dpu_crtc_enable,
+ .atomic_check = dpu_crtc_atomic_check,
+ .atomic_begin = dpu_crtc_atomic_begin,
+ .atomic_flush = dpu_crtc_atomic_flush,
+};
+
+/* initialize crtc */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
+{
+ struct drm_crtc *crtc = NULL;
+ struct dpu_crtc *dpu_crtc = NULL;
+ struct msm_drm_private *priv = NULL;
+ struct dpu_kms *kms = NULL;
+ int i;
+
+ priv = dev->dev_private;
+ kms = to_dpu_kms(priv->kms);
+
+ dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
+ if (!dpu_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &dpu_crtc->base;
+ crtc->dev = dev;
+
+ mutex_init(&dpu_crtc->crtc_lock);
+ spin_lock_init(&dpu_crtc->spin_lock);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+
+ mutex_init(&dpu_crtc->rp_lock);
+ INIT_LIST_HEAD(&dpu_crtc->rp_head);
+
+ init_completion(&dpu_crtc->frame_done_comp);
+
+ INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
+
+ for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
+ INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
+ list_add(&dpu_crtc->frame_events[i].list,
+ &dpu_crtc->frame_event_list);
+ kthread_init_work(&dpu_crtc->frame_events[i].work,
+ dpu_crtc_frame_event_work);
+ }
+
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &dpu_crtc_funcs,
+ NULL);
+
+ drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+ plane->crtc = crtc;
+
+ /* save user friendly CRTC name for later */
+ snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+
+ /* initialize event handling */
+ spin_lock_init(&dpu_crtc->event_lock);
+
+ dpu_crtc->phandle = &kms->phandle;
+
+ DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
new file mode 100644
index 000000000000..e87109e608e9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_CRTC_H_
+#define _DPU_CRTC_H_
+
+#include <linux/kthread.h>
+#include <drm/drm_crtc.h>
+#include "dpu_kms.h"
+#include "dpu_core_perf.h"
+#include "dpu_hw_blk.h"
+
+#define DPU_CRTC_NAME_SIZE 12
+
+/* define the maximum number of in-flight frame events */
+#define DPU_CRTC_FRAME_EVENT_SIZE 4
+
+/**
+ * enum dpu_crtc_client_type: crtc client type
+ * @RT_CLIENT: RealTime client like video/cmd mode display
+ * voting through apps rsc
+ * @NRT_CLIENT: Non-RealTime client like WB display
+ * voting through apps rsc
+ */
+enum dpu_crtc_client_type {
+ RT_CLIENT,
+ NRT_CLIENT,
+};
+
+/**
+ * enum dpu_crtc_smmu_state: smmu state
+ * @ATTACHED: all the context banks are attached.
+ * @DETACHED: all the context banks are detached.
+ * @ATTACH_ALL_REQ: transient state of attaching context banks.
+ * @DETACH_ALL_REQ: transient state of detaching context banks.
+ */
+enum dpu_crtc_smmu_state {
+ ATTACHED = 0,
+ DETACHED,
+ ATTACH_ALL_REQ,
+ DETACH_ALL_REQ,
+};
+
+/**
+ * enum dpu_crtc_smmu_state_transition_type: state transition type
+ * @NONE: no pending state transitions
+ * @PRE_COMMIT: state transitions should be done before processing the commit
+ * @POST_COMMIT: state transitions to be done after processing the commit.
+ */
+enum dpu_crtc_smmu_state_transition_type {
+ NONE,
+ PRE_COMMIT,
+ POST_COMMIT
+};
+
+/**
+ * struct dpu_crtc_smmu_state_data: stores the smmu state and transition type
+ * @state: current state of smmu context banks
+ * @transition_type: transition request type
+ * @transition_error: whether there is error while transitioning the state
+ */
+struct dpu_crtc_smmu_state_data {
+ uint32_t state;
+ uint32_t transition_type;
+ uint32_t transition_error;
+};
+
+/**
+ * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm: LM HW Driver context
+ * @hw_ctl: CTL Path HW driver context
+ * @encoder: Encoder attached to this lm & ctl
+ * @mixer_op_mode: mixer blending operation mode
+ * @flush_mask: mixer flush mask for ctl, mixer and pipe
+ */
+struct dpu_crtc_mixer {
+ struct dpu_hw_mixer *hw_lm;
+ struct dpu_hw_ctl *hw_ctl;
+ struct drm_encoder *encoder;
+ u32 mixer_op_mode;
+ u32 flush_mask;
+};
+
+/**
+ * struct dpu_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work: base work structure
+ * @crtc: Pointer to crtc handling this event
+ * @list: event list
+ * @ts: timestamp at queue entry
+ * @event: event identifier
+ */
+struct dpu_crtc_frame_event {
+ struct kthread_work work;
+ struct drm_crtc *crtc;
+ struct list_head list;
+ ktime_t ts;
+ u32 event;
+};
+
+/*
+ * Maximum number of free event structures to cache
+ */
+#define DPU_CRTC_MAX_EVENT_COUNT 16
+
+/**
+ * struct dpu_crtc - virtualized CRTC data structure
+ * @base : Base drm crtc structure
+ * @name : ASCII description of this crtc
+ * @num_ctls : Number of ctl paths in use
+ * @num_mixers : Number of mixers in use
+ * @mixers_swapped: Whether the mixers have been swapped for left/right update
+ * especially in the case of DSC Merge.
+ * @mixers : List of active mixers
+ * @event : Pointer to last received drm vblank event. If there is a
+ * pending vblank event, this will be non-null.
+ * @vsync_count : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @stage_cfg : H/w mixer stage configuration
+ * @debugfs_root : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @play_count : frame count between crtc enable and disable
+ * @vblank_cb_time : ktime at vblank count reset
+ * @vblank_requested : whether the user has requested vblank events
+ * @suspend : whether or not a suspend operation is in progress
+ * @enabled : whether the DPU CRTC is currently enabled. updated in the
+ * commit-thread, not state-swap time which is earlier, so
+ * safe to make decisions on during VBLANK on/off work
+ * @feature_list : list of color processing features supported on a crtc
+ * @active_list : list of color processing features are active
+ * @dirty_list : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
+ * @crtc_lock : crtc lock around create, destroy and access.
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @spin_lock : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp : for frame_event_done synchronization
+ * @event_thread : Pointer to event handler thread
+ * @event_worker : Event worker queue
+ * @event_lock : Spinlock around event handling code
+ * @misr_enable : boolean entry indicates misr enable/disable status.
+ * @misr_frame_count : misr frame count provided by client
+ * @misr_data : store misr data before turning off the clocks.
+ * @phandle: Pointer to power handler
+ * @power_event : registered power event handle
+ * @cur_perf : current performance committed to clock/bandwidth driver
+ * @rp_lock : serialization lock for resource pool
+ * @rp_head : list of active resource pool
+ * @scl3_cfg_lut : qseed3 lut config
+ */
+struct dpu_crtc {
+ struct drm_crtc base;
+ char name[DPU_CRTC_NAME_SIZE];
+
+ /* HW Resources reserved for the crtc */
+ u32 num_ctls;
+ u32 num_mixers;
+ bool mixers_swapped;
+ struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+ struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
+
+ struct drm_pending_vblank_event *event;
+ u32 vsync_count;
+
+ struct dpu_hw_stage_cfg stage_cfg;
+ struct dentry *debugfs_root;
+
+ u32 vblank_cb_count;
+ u64 play_count;
+ ktime_t vblank_cb_time;
+ bool vblank_requested;
+ bool suspend;
+ bool enabled;
+
+ struct list_head feature_list;
+ struct list_head active_list;
+ struct list_head dirty_list;
+ struct list_head ad_dirty;
+ struct list_head ad_active;
+
+ struct mutex crtc_lock;
+
+ atomic_t frame_pending;
+ struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
+ struct list_head frame_event_list;
+ spinlock_t spin_lock;
+ struct completion frame_done_comp;
+
+ /* for handling internal event thread */
+ spinlock_t event_lock;
+ bool misr_enable;
+ u32 misr_frame_count;
+ u32 misr_data[CRTC_DUAL_MIXERS];
+
+ struct dpu_power_handle *phandle;
+ struct dpu_power_event *power_event;
+
+ struct dpu_core_perf_params cur_perf;
+
+ struct mutex rp_lock;
+ struct list_head rp_head;
+
+ struct dpu_crtc_smmu_state_data smmu_state;
+};
+
+#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
+
+/**
+ * struct dpu_crtc_res_ops - common operations for crtc resources
+ * @get: get given resource
+ * @put: put given resource
+ */
+struct dpu_crtc_res_ops {
+ void *(*get)(void *val, u32 type, u64 tag);
+ void (*put)(void *val);
+};
+
+#define DPU_CRTC_RES_FLAG_FREE BIT(0)
+
+/**
+ * struct dpu_crtc_res - definition of crtc resources
+ * @list: list of crtc resource
+ * @type: crtc resource type
+ * @tag: unique identifier per type
+ * @refcount: reference/usage count
+ * @ops: callback operations
+ * @val: resource handle associated with type/tag
+ * @flags: customization flags
+ */
+struct dpu_crtc_res {
+ struct list_head list;
+ u32 type;
+ u64 tag;
+ atomic_t refcount;
+ struct dpu_crtc_res_ops ops;
+ void *val;
+ u32 flags;
+};
+
+/**
+ * dpu_crtc_respool - crtc resource pool
+ * @rp_lock: pointer to serialization lock
+ * @rp_head: pointer to head of active resource pools of this crtc
+ * @rp_list: list of crtc resource pool
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @res_list: list of resource managed by this resource pool
+ * @ops: resource operations for parent resource pool
+ */
+struct dpu_crtc_respool {
+ struct mutex *rp_lock;
+ struct list_head *rp_head;
+ struct list_head rp_list;
+ u32 sequence_id;
+ struct list_head res_list;
+ struct dpu_crtc_res_ops ops;
+};
+
+/**
+ * struct dpu_crtc_state - dpu container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @is_ppsplit : Whether current topology requires PPSplit special handling
+ * @bw_control : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
+ * @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
+ * Origin top left of CRTC.
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @new_perf: new performance state being requested
+ */
+struct dpu_crtc_state {
+ struct drm_crtc_state base;
+
+ bool bw_control;
+ bool bw_split_vote;
+
+ bool is_ppsplit;
+ struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
+
+ uint64_t input_fence_timeout_ns;
+
+ struct dpu_core_perf_params new_perf;
+ struct dpu_crtc_respool rp;
+};
+
+#define to_dpu_crtc_state(x) \
+ container_of(x, struct dpu_crtc_state, base)
+
+/**
+ * dpu_crtc_get_mixer_width - get the mixer width
+ * Mixer width will be same as panel width(/2 for split)
+ */
+static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
+ struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+ u32 mixer_width;
+
+ if (!dpu_crtc || !cstate || !mode)
+ return 0;
+
+ mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+ mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
+
+ return mixer_width;
+}
+
+/**
+ * dpu_crtc_get_mixer_height - get the mixer height
+ * Mixer height will be same as panel height
+ */
+static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
+ struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+ if (!dpu_crtc || !cstate || !mode)
+ return 0;
+
+ return mode->vdisplay;
+}
+
+/**
+ * dpu_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+
+ if (!crtc)
+ return -EINVAL;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ return atomic_read(&dpu_crtc->frame_pending);
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to dpu_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int dpu_crtc_register_custom_event(struct dpu_kms *kms,
+ struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_get_client_type - check the crtc type- rt, nrt etc.
+ * @crtc: Pointer to crtc
+ */
+static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
+ struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *cstate =
+ crtc ? to_dpu_crtc_state(crtc->state) : NULL;
+
+ if (!cstate)
+ return NRT_CLIENT;
+
+ return RT_CLIENT;
+}
+
+/**
+ * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
+ * @crtc: Pointer to crtc
+ */
+static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
+{
+ return crtc ? crtc->enabled : false;
+}
+
+#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
new file mode 100644
index 000000000000..ae2aee7ed9e1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
@@ -0,0 +1,2393 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/pm_runtime.h>
+
+#include "dpu_dbg.h"
+#include "disp/dpu1/dpu_hw_catalog.h"
+
+
+#define DEFAULT_DBGBUS_DPU DPU_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_VBIFRT DPU_DBG_DUMP_IN_MEM
+#define REG_BASE_NAME_LEN 80
+
+#define DBGBUS_FLAGS_DSPP BIT(0)
+#define DBGBUS_DSPP_STATUS 0x34C
+
+#define DBGBUS_NAME_DPU "dpu"
+#define DBGBUS_NAME_VBIF_RT "vbif_rt"
+
+/* offsets from dpu top address for the debug buses */
+#define DBGBUS_SSPP0 0x188
+#define DBGBUS_AXI_INTF 0x194
+#define DBGBUS_SSPP1 0x298
+#define DBGBUS_DSPP 0x348
+#define DBGBUS_PERIPH 0x418
+
+#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
+
+/* following offsets are with respect to MDP VBIF base for DBG BUS access */
+#define MMSS_VBIF_CLKON 0x4
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
+#define MMSS_VBIF_TEST_BUS_OUT 0x230
+
+/* Vbif error info */
+#define MMSS_VBIF_PND_ERR 0x190
+#define MMSS_VBIF_SRC_ERR 0x194
+#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
+#define MMSS_VBIF_ERR_INFO 0X1a0
+#define MMSS_VBIF_ERR_INFO_1 0x1a4
+#define MMSS_VBIF_CLIENT_NUM 14
+
+/**
+ * struct dpu_dbg_reg_base - register region base.
+ * may sub-ranges: sub-ranges are used for dumping
+ * or may not have sub-ranges: dumping is base -> max_offset
+ * @reg_base_head: head of this node
+ * @name: register base name
+ * @base: base pointer
+ * @off: cached offset of region for manual register dumping
+ * @cnt: cached range of region for manual register dumping
+ * @max_offset: length of region
+ * @buf: buffer used for manual register dumping
+ * @buf_len: buffer length used for manual register dumping
+ * @cb: callback for external dump function, null if not defined
+ * @cb_ptr: private pointer to callback function
+ */
+struct dpu_dbg_reg_base {
+ struct list_head reg_base_head;
+ char name[REG_BASE_NAME_LEN];
+ void __iomem *base;
+ size_t off;
+ size_t cnt;
+ size_t max_offset;
+ char *buf;
+ size_t buf_len;
+ void (*cb)(void *ptr);
+ void *cb_ptr;
+};
+
+struct dpu_debug_bus_entry {
+ u32 wr_addr;
+ u32 block_id;
+ u32 test_id;
+ void (*analyzer)(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val);
+};
+
+struct vbif_debug_bus_entry {
+ u32 disable_bus_addr;
+ u32 block_bus_addr;
+ u32 bit_offset;
+ u32 block_cnt;
+ u32 test_pnt_start;
+ u32 test_pnt_cnt;
+};
+
+struct dpu_dbg_debug_bus_common {
+ char *name;
+ u32 enable_mask;
+ bool include_in_deferred_work;
+ u32 flags;
+ u32 entries_size;
+ u32 *dumped_content;
+};
+
+struct dpu_dbg_dpu_debug_bus {
+ struct dpu_dbg_debug_bus_common cmn;
+ struct dpu_debug_bus_entry *entries;
+ u32 top_blk_off;
+};
+
+struct dpu_dbg_vbif_debug_bus {
+ struct dpu_dbg_debug_bus_common cmn;
+ struct vbif_debug_bus_entry *entries;
+};
+
+/**
+ * struct dpu_dbg_base - global dpu debug base structure
+ * @reg_base_list: list of register dumping regions
+ * @dev: device pointer
+ * @dump_work: work struct for deferring register dump work to separate thread
+ * @dbgbus_dpu: debug bus structure for the dpu
+ * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ */
+static struct dpu_dbg_base {
+ struct list_head reg_base_list;
+ struct device *dev;
+
+ struct work_struct dump_work;
+
+ struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
+ struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
+} dpu_dbg_base;
+
+static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & 0xFFF000))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & BIT(15)))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & BIT(15)))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
+
+ /* Unpack 0 sspp 0*/
+ { DBGBUS_SSPP0, 50, 2 },
+ { DBGBUS_SSPP0, 60, 2 },
+ { DBGBUS_SSPP0, 70, 2 },
+ { DBGBUS_SSPP0, 85, 2 },
+
+ /* Upack 0 sspp 1*/
+ { DBGBUS_SSPP1, 50, 2 },
+ { DBGBUS_SSPP1, 60, 2 },
+ { DBGBUS_SSPP1, 70, 2 },
+ { DBGBUS_SSPP1, 85, 2 },
+
+ /* scheduler */
+ { DBGBUS_DSPP, 130, 0 },
+ { DBGBUS_DSPP, 130, 1 },
+ { DBGBUS_DSPP, 130, 2 },
+ { DBGBUS_DSPP, 130, 3 },
+ { DBGBUS_DSPP, 130, 4 },
+ { DBGBUS_DSPP, 130, 5 },
+
+ /* qseed */
+ { DBGBUS_SSPP0, 6, 0},
+ { DBGBUS_SSPP0, 6, 1},
+ { DBGBUS_SSPP0, 26, 0},
+ { DBGBUS_SSPP0, 26, 1},
+ { DBGBUS_SSPP1, 6, 0},
+ { DBGBUS_SSPP1, 6, 1},
+ { DBGBUS_SSPP1, 26, 0},
+ { DBGBUS_SSPP1, 26, 1},
+
+ /* scale */
+ { DBGBUS_SSPP0, 16, 0},
+ { DBGBUS_SSPP0, 16, 1},
+ { DBGBUS_SSPP0, 36, 0},
+ { DBGBUS_SSPP0, 36, 1},
+ { DBGBUS_SSPP1, 16, 0},
+ { DBGBUS_SSPP1, 16, 1},
+ { DBGBUS_SSPP1, 36, 0},
+ { DBGBUS_SSPP1, 36, 1},
+
+ /* fetch sspp0 */
+
+ /* vig 0 */
+ { DBGBUS_SSPP0, 0, 0 },
+ { DBGBUS_SSPP0, 0, 1 },
+ { DBGBUS_SSPP0, 0, 2 },
+ { DBGBUS_SSPP0, 0, 3 },
+ { DBGBUS_SSPP0, 0, 4 },
+ { DBGBUS_SSPP0, 0, 5 },
+ { DBGBUS_SSPP0, 0, 6 },
+ { DBGBUS_SSPP0, 0, 7 },
+
+ { DBGBUS_SSPP0, 1, 0 },
+ { DBGBUS_SSPP0, 1, 1 },
+ { DBGBUS_SSPP0, 1, 2 },
+ { DBGBUS_SSPP0, 1, 3 },
+ { DBGBUS_SSPP0, 1, 4 },
+ { DBGBUS_SSPP0, 1, 5 },
+ { DBGBUS_SSPP0, 1, 6 },
+ { DBGBUS_SSPP0, 1, 7 },
+
+ { DBGBUS_SSPP0, 2, 0 },
+ { DBGBUS_SSPP0, 2, 1 },
+ { DBGBUS_SSPP0, 2, 2 },
+ { DBGBUS_SSPP0, 2, 3 },
+ { DBGBUS_SSPP0, 2, 4 },
+ { DBGBUS_SSPP0, 2, 5 },
+ { DBGBUS_SSPP0, 2, 6 },
+ { DBGBUS_SSPP0, 2, 7 },
+
+ { DBGBUS_SSPP0, 4, 0 },
+ { DBGBUS_SSPP0, 4, 1 },
+ { DBGBUS_SSPP0, 4, 2 },
+ { DBGBUS_SSPP0, 4, 3 },
+ { DBGBUS_SSPP0, 4, 4 },
+ { DBGBUS_SSPP0, 4, 5 },
+ { DBGBUS_SSPP0, 4, 6 },
+ { DBGBUS_SSPP0, 4, 7 },
+
+ { DBGBUS_SSPP0, 5, 0 },
+ { DBGBUS_SSPP0, 5, 1 },
+ { DBGBUS_SSPP0, 5, 2 },
+ { DBGBUS_SSPP0, 5, 3 },
+ { DBGBUS_SSPP0, 5, 4 },
+ { DBGBUS_SSPP0, 5, 5 },
+ { DBGBUS_SSPP0, 5, 6 },
+ { DBGBUS_SSPP0, 5, 7 },
+
+ /* vig 2 */
+ { DBGBUS_SSPP0, 20, 0 },
+ { DBGBUS_SSPP0, 20, 1 },
+ { DBGBUS_SSPP0, 20, 2 },
+ { DBGBUS_SSPP0, 20, 3 },
+ { DBGBUS_SSPP0, 20, 4 },
+ { DBGBUS_SSPP0, 20, 5 },
+ { DBGBUS_SSPP0, 20, 6 },
+ { DBGBUS_SSPP0, 20, 7 },
+
+ { DBGBUS_SSPP0, 21, 0 },
+ { DBGBUS_SSPP0, 21, 1 },
+ { DBGBUS_SSPP0, 21, 2 },
+ { DBGBUS_SSPP0, 21, 3 },
+ { DBGBUS_SSPP0, 21, 4 },
+ { DBGBUS_SSPP0, 21, 5 },
+ { DBGBUS_SSPP0, 21, 6 },
+ { DBGBUS_SSPP0, 21, 7 },
+
+ { DBGBUS_SSPP0, 22, 0 },
+ { DBGBUS_SSPP0, 22, 1 },
+ { DBGBUS_SSPP0, 22, 2 },
+ { DBGBUS_SSPP0, 22, 3 },
+ { DBGBUS_SSPP0, 22, 4 },
+ { DBGBUS_SSPP0, 22, 5 },
+ { DBGBUS_SSPP0, 22, 6 },
+ { DBGBUS_SSPP0, 22, 7 },
+
+ { DBGBUS_SSPP0, 24, 0 },
+ { DBGBUS_SSPP0, 24, 1 },
+ { DBGBUS_SSPP0, 24, 2 },
+ { DBGBUS_SSPP0, 24, 3 },
+ { DBGBUS_SSPP0, 24, 4 },
+ { DBGBUS_SSPP0, 24, 5 },
+ { DBGBUS_SSPP0, 24, 6 },
+ { DBGBUS_SSPP0, 24, 7 },
+
+ { DBGBUS_SSPP0, 25, 0 },
+ { DBGBUS_SSPP0, 25, 1 },
+ { DBGBUS_SSPP0, 25, 2 },
+ { DBGBUS_SSPP0, 25, 3 },
+ { DBGBUS_SSPP0, 25, 4 },
+ { DBGBUS_SSPP0, 25, 5 },
+ { DBGBUS_SSPP0, 25, 6 },
+ { DBGBUS_SSPP0, 25, 7 },
+
+ /* dma 2 */
+ { DBGBUS_SSPP0, 30, 0 },
+ { DBGBUS_SSPP0, 30, 1 },
+ { DBGBUS_SSPP0, 30, 2 },
+ { DBGBUS_SSPP0, 30, 3 },
+ { DBGBUS_SSPP0, 30, 4 },
+ { DBGBUS_SSPP0, 30, 5 },
+ { DBGBUS_SSPP0, 30, 6 },
+ { DBGBUS_SSPP0, 30, 7 },
+
+ { DBGBUS_SSPP0, 31, 0 },
+ { DBGBUS_SSPP0, 31, 1 },
+ { DBGBUS_SSPP0, 31, 2 },
+ { DBGBUS_SSPP0, 31, 3 },
+ { DBGBUS_SSPP0, 31, 4 },
+ { DBGBUS_SSPP0, 31, 5 },
+ { DBGBUS_SSPP0, 31, 6 },
+ { DBGBUS_SSPP0, 31, 7 },
+
+ { DBGBUS_SSPP0, 32, 0 },
+ { DBGBUS_SSPP0, 32, 1 },
+ { DBGBUS_SSPP0, 32, 2 },
+ { DBGBUS_SSPP0, 32, 3 },
+ { DBGBUS_SSPP0, 32, 4 },
+ { DBGBUS_SSPP0, 32, 5 },
+ { DBGBUS_SSPP0, 32, 6 },
+ { DBGBUS_SSPP0, 32, 7 },
+
+ { DBGBUS_SSPP0, 33, 0 },
+ { DBGBUS_SSPP0, 33, 1 },
+ { DBGBUS_SSPP0, 33, 2 },
+ { DBGBUS_SSPP0, 33, 3 },
+ { DBGBUS_SSPP0, 33, 4 },
+ { DBGBUS_SSPP0, 33, 5 },
+ { DBGBUS_SSPP0, 33, 6 },
+ { DBGBUS_SSPP0, 33, 7 },
+
+ { DBGBUS_SSPP0, 34, 0 },
+ { DBGBUS_SSPP0, 34, 1 },
+ { DBGBUS_SSPP0, 34, 2 },
+ { DBGBUS_SSPP0, 34, 3 },
+ { DBGBUS_SSPP0, 34, 4 },
+ { DBGBUS_SSPP0, 34, 5 },
+ { DBGBUS_SSPP0, 34, 6 },
+ { DBGBUS_SSPP0, 34, 7 },
+
+ { DBGBUS_SSPP0, 35, 0 },
+ { DBGBUS_SSPP0, 35, 1 },
+ { DBGBUS_SSPP0, 35, 2 },
+ { DBGBUS_SSPP0, 35, 3 },
+
+ /* dma 0 */
+ { DBGBUS_SSPP0, 40, 0 },
+ { DBGBUS_SSPP0, 40, 1 },
+ { DBGBUS_SSPP0, 40, 2 },
+ { DBGBUS_SSPP0, 40, 3 },
+ { DBGBUS_SSPP0, 40, 4 },
+ { DBGBUS_SSPP0, 40, 5 },
+ { DBGBUS_SSPP0, 40, 6 },
+ { DBGBUS_SSPP0, 40, 7 },
+
+ { DBGBUS_SSPP0, 41, 0 },
+ { DBGBUS_SSPP0, 41, 1 },
+ { DBGBUS_SSPP0, 41, 2 },
+ { DBGBUS_SSPP0, 41, 3 },
+ { DBGBUS_SSPP0, 41, 4 },
+ { DBGBUS_SSPP0, 41, 5 },
+ { DBGBUS_SSPP0, 41, 6 },
+ { DBGBUS_SSPP0, 41, 7 },
+
+ { DBGBUS_SSPP0, 42, 0 },
+ { DBGBUS_SSPP0, 42, 1 },
+ { DBGBUS_SSPP0, 42, 2 },
+ { DBGBUS_SSPP0, 42, 3 },
+ { DBGBUS_SSPP0, 42, 4 },
+ { DBGBUS_SSPP0, 42, 5 },
+ { DBGBUS_SSPP0, 42, 6 },
+ { DBGBUS_SSPP0, 42, 7 },
+
+ { DBGBUS_SSPP0, 44, 0 },
+ { DBGBUS_SSPP0, 44, 1 },
+ { DBGBUS_SSPP0, 44, 2 },
+ { DBGBUS_SSPP0, 44, 3 },
+ { DBGBUS_SSPP0, 44, 4 },
+ { DBGBUS_SSPP0, 44, 5 },
+ { DBGBUS_SSPP0, 44, 6 },
+ { DBGBUS_SSPP0, 44, 7 },
+
+ { DBGBUS_SSPP0, 45, 0 },
+ { DBGBUS_SSPP0, 45, 1 },
+ { DBGBUS_SSPP0, 45, 2 },
+ { DBGBUS_SSPP0, 45, 3 },
+ { DBGBUS_SSPP0, 45, 4 },
+ { DBGBUS_SSPP0, 45, 5 },
+ { DBGBUS_SSPP0, 45, 6 },
+ { DBGBUS_SSPP0, 45, 7 },
+
+ /* fetch sspp1 */
+ /* vig 1 */
+ { DBGBUS_SSPP1, 0, 0 },
+ { DBGBUS_SSPP1, 0, 1 },
+ { DBGBUS_SSPP1, 0, 2 },
+ { DBGBUS_SSPP1, 0, 3 },
+ { DBGBUS_SSPP1, 0, 4 },
+ { DBGBUS_SSPP1, 0, 5 },
+ { DBGBUS_SSPP1, 0, 6 },
+ { DBGBUS_SSPP1, 0, 7 },
+
+ { DBGBUS_SSPP1, 1, 0 },
+ { DBGBUS_SSPP1, 1, 1 },
+ { DBGBUS_SSPP1, 1, 2 },
+ { DBGBUS_SSPP1, 1, 3 },
+ { DBGBUS_SSPP1, 1, 4 },
+ { DBGBUS_SSPP1, 1, 5 },
+ { DBGBUS_SSPP1, 1, 6 },
+ { DBGBUS_SSPP1, 1, 7 },
+
+ { DBGBUS_SSPP1, 2, 0 },
+ { DBGBUS_SSPP1, 2, 1 },
+ { DBGBUS_SSPP1, 2, 2 },
+ { DBGBUS_SSPP1, 2, 3 },
+ { DBGBUS_SSPP1, 2, 4 },
+ { DBGBUS_SSPP1, 2, 5 },
+ { DBGBUS_SSPP1, 2, 6 },
+ { DBGBUS_SSPP1, 2, 7 },
+
+ { DBGBUS_SSPP1, 4, 0 },
+ { DBGBUS_SSPP1, 4, 1 },
+ { DBGBUS_SSPP1, 4, 2 },
+ { DBGBUS_SSPP1, 4, 3 },
+ { DBGBUS_SSPP1, 4, 4 },
+ { DBGBUS_SSPP1, 4, 5 },
+ { DBGBUS_SSPP1, 4, 6 },
+ { DBGBUS_SSPP1, 4, 7 },
+
+ { DBGBUS_SSPP1, 5, 0 },
+ { DBGBUS_SSPP1, 5, 1 },
+ { DBGBUS_SSPP1, 5, 2 },
+ { DBGBUS_SSPP1, 5, 3 },
+ { DBGBUS_SSPP1, 5, 4 },
+ { DBGBUS_SSPP1, 5, 5 },
+ { DBGBUS_SSPP1, 5, 6 },
+ { DBGBUS_SSPP1, 5, 7 },
+
+ /* vig 3 */
+ { DBGBUS_SSPP1, 20, 0 },
+ { DBGBUS_SSPP1, 20, 1 },
+ { DBGBUS_SSPP1, 20, 2 },
+ { DBGBUS_SSPP1, 20, 3 },
+ { DBGBUS_SSPP1, 20, 4 },
+ { DBGBUS_SSPP1, 20, 5 },
+ { DBGBUS_SSPP1, 20, 6 },
+ { DBGBUS_SSPP1, 20, 7 },
+
+ { DBGBUS_SSPP1, 21, 0 },
+ { DBGBUS_SSPP1, 21, 1 },
+ { DBGBUS_SSPP1, 21, 2 },
+ { DBGBUS_SSPP1, 21, 3 },
+ { DBGBUS_SSPP1, 21, 4 },
+ { DBGBUS_SSPP1, 21, 5 },
+ { DBGBUS_SSPP1, 21, 6 },
+ { DBGBUS_SSPP1, 21, 7 },
+
+ { DBGBUS_SSPP1, 22, 0 },
+ { DBGBUS_SSPP1, 22, 1 },
+ { DBGBUS_SSPP1, 22, 2 },
+ { DBGBUS_SSPP1, 22, 3 },
+ { DBGBUS_SSPP1, 22, 4 },
+ { DBGBUS_SSPP1, 22, 5 },
+ { DBGBUS_SSPP1, 22, 6 },
+ { DBGBUS_SSPP1, 22, 7 },
+
+ { DBGBUS_SSPP1, 24, 0 },
+ { DBGBUS_SSPP1, 24, 1 },
+ { DBGBUS_SSPP1, 24, 2 },
+ { DBGBUS_SSPP1, 24, 3 },
+ { DBGBUS_SSPP1, 24, 4 },
+ { DBGBUS_SSPP1, 24, 5 },
+ { DBGBUS_SSPP1, 24, 6 },
+ { DBGBUS_SSPP1, 24, 7 },
+
+ { DBGBUS_SSPP1, 25, 0 },
+ { DBGBUS_SSPP1, 25, 1 },
+ { DBGBUS_SSPP1, 25, 2 },
+ { DBGBUS_SSPP1, 25, 3 },
+ { DBGBUS_SSPP1, 25, 4 },
+ { DBGBUS_SSPP1, 25, 5 },
+ { DBGBUS_SSPP1, 25, 6 },
+ { DBGBUS_SSPP1, 25, 7 },
+
+ /* dma 3 */
+ { DBGBUS_SSPP1, 30, 0 },
+ { DBGBUS_SSPP1, 30, 1 },
+ { DBGBUS_SSPP1, 30, 2 },
+ { DBGBUS_SSPP1, 30, 3 },
+ { DBGBUS_SSPP1, 30, 4 },
+ { DBGBUS_SSPP1, 30, 5 },
+ { DBGBUS_SSPP1, 30, 6 },
+ { DBGBUS_SSPP1, 30, 7 },
+
+ { DBGBUS_SSPP1, 31, 0 },
+ { DBGBUS_SSPP1, 31, 1 },
+ { DBGBUS_SSPP1, 31, 2 },
+ { DBGBUS_SSPP1, 31, 3 },
+ { DBGBUS_SSPP1, 31, 4 },
+ { DBGBUS_SSPP1, 31, 5 },
+ { DBGBUS_SSPP1, 31, 6 },
+ { DBGBUS_SSPP1, 31, 7 },
+
+ { DBGBUS_SSPP1, 32, 0 },
+ { DBGBUS_SSPP1, 32, 1 },
+ { DBGBUS_SSPP1, 32, 2 },
+ { DBGBUS_SSPP1, 32, 3 },
+ { DBGBUS_SSPP1, 32, 4 },
+ { DBGBUS_SSPP1, 32, 5 },
+ { DBGBUS_SSPP1, 32, 6 },
+ { DBGBUS_SSPP1, 32, 7 },
+
+ { DBGBUS_SSPP1, 33, 0 },
+ { DBGBUS_SSPP1, 33, 1 },
+ { DBGBUS_SSPP1, 33, 2 },
+ { DBGBUS_SSPP1, 33, 3 },
+ { DBGBUS_SSPP1, 33, 4 },
+ { DBGBUS_SSPP1, 33, 5 },
+ { DBGBUS_SSPP1, 33, 6 },
+ { DBGBUS_SSPP1, 33, 7 },
+
+ { DBGBUS_SSPP1, 34, 0 },
+ { DBGBUS_SSPP1, 34, 1 },
+ { DBGBUS_SSPP1, 34, 2 },
+ { DBGBUS_SSPP1, 34, 3 },
+ { DBGBUS_SSPP1, 34, 4 },
+ { DBGBUS_SSPP1, 34, 5 },
+ { DBGBUS_SSPP1, 34, 6 },
+ { DBGBUS_SSPP1, 34, 7 },
+
+ { DBGBUS_SSPP1, 35, 0 },
+ { DBGBUS_SSPP1, 35, 1 },
+ { DBGBUS_SSPP1, 35, 2 },
+
+ /* dma 1 */
+ { DBGBUS_SSPP1, 40, 0 },
+ { DBGBUS_SSPP1, 40, 1 },
+ { DBGBUS_SSPP1, 40, 2 },
+ { DBGBUS_SSPP1, 40, 3 },
+ { DBGBUS_SSPP1, 40, 4 },
+ { DBGBUS_SSPP1, 40, 5 },
+ { DBGBUS_SSPP1, 40, 6 },
+ { DBGBUS_SSPP1, 40, 7 },
+
+ { DBGBUS_SSPP1, 41, 0 },
+ { DBGBUS_SSPP1, 41, 1 },
+ { DBGBUS_SSPP1, 41, 2 },
+ { DBGBUS_SSPP1, 41, 3 },
+ { DBGBUS_SSPP1, 41, 4 },
+ { DBGBUS_SSPP1, 41, 5 },
+ { DBGBUS_SSPP1, 41, 6 },
+ { DBGBUS_SSPP1, 41, 7 },
+
+ { DBGBUS_SSPP1, 42, 0 },
+ { DBGBUS_SSPP1, 42, 1 },
+ { DBGBUS_SSPP1, 42, 2 },
+ { DBGBUS_SSPP1, 42, 3 },
+ { DBGBUS_SSPP1, 42, 4 },
+ { DBGBUS_SSPP1, 42, 5 },
+ { DBGBUS_SSPP1, 42, 6 },
+ { DBGBUS_SSPP1, 42, 7 },
+
+ { DBGBUS_SSPP1, 44, 0 },
+ { DBGBUS_SSPP1, 44, 1 },
+ { DBGBUS_SSPP1, 44, 2 },
+ { DBGBUS_SSPP1, 44, 3 },
+ { DBGBUS_SSPP1, 44, 4 },
+ { DBGBUS_SSPP1, 44, 5 },
+ { DBGBUS_SSPP1, 44, 6 },
+ { DBGBUS_SSPP1, 44, 7 },
+
+ { DBGBUS_SSPP1, 45, 0 },
+ { DBGBUS_SSPP1, 45, 1 },
+ { DBGBUS_SSPP1, 45, 2 },
+ { DBGBUS_SSPP1, 45, 3 },
+ { DBGBUS_SSPP1, 45, 4 },
+ { DBGBUS_SSPP1, 45, 5 },
+ { DBGBUS_SSPP1, 45, 6 },
+ { DBGBUS_SSPP1, 45, 7 },
+
+ /* cursor 1 */
+ { DBGBUS_SSPP1, 80, 0 },
+ { DBGBUS_SSPP1, 80, 1 },
+ { DBGBUS_SSPP1, 80, 2 },
+ { DBGBUS_SSPP1, 80, 3 },
+ { DBGBUS_SSPP1, 80, 4 },
+ { DBGBUS_SSPP1, 80, 5 },
+ { DBGBUS_SSPP1, 80, 6 },
+ { DBGBUS_SSPP1, 80, 7 },
+
+ { DBGBUS_SSPP1, 81, 0 },
+ { DBGBUS_SSPP1, 81, 1 },
+ { DBGBUS_SSPP1, 81, 2 },
+ { DBGBUS_SSPP1, 81, 3 },
+ { DBGBUS_SSPP1, 81, 4 },
+ { DBGBUS_SSPP1, 81, 5 },
+ { DBGBUS_SSPP1, 81, 6 },
+ { DBGBUS_SSPP1, 81, 7 },
+
+ { DBGBUS_SSPP1, 82, 0 },
+ { DBGBUS_SSPP1, 82, 1 },
+ { DBGBUS_SSPP1, 82, 2 },
+ { DBGBUS_SSPP1, 82, 3 },
+ { DBGBUS_SSPP1, 82, 4 },
+ { DBGBUS_SSPP1, 82, 5 },
+ { DBGBUS_SSPP1, 82, 6 },
+ { DBGBUS_SSPP1, 82, 7 },
+
+ { DBGBUS_SSPP1, 83, 0 },
+ { DBGBUS_SSPP1, 83, 1 },
+ { DBGBUS_SSPP1, 83, 2 },
+ { DBGBUS_SSPP1, 83, 3 },
+ { DBGBUS_SSPP1, 83, 4 },
+ { DBGBUS_SSPP1, 83, 5 },
+ { DBGBUS_SSPP1, 83, 6 },
+ { DBGBUS_SSPP1, 83, 7 },
+
+ { DBGBUS_SSPP1, 84, 0 },
+ { DBGBUS_SSPP1, 84, 1 },
+ { DBGBUS_SSPP1, 84, 2 },
+ { DBGBUS_SSPP1, 84, 3 },
+ { DBGBUS_SSPP1, 84, 4 },
+ { DBGBUS_SSPP1, 84, 5 },
+ { DBGBUS_SSPP1, 84, 6 },
+ { DBGBUS_SSPP1, 84, 7 },
+
+ /* dspp */
+ { DBGBUS_DSPP, 13, 0 },
+ { DBGBUS_DSPP, 19, 0 },
+ { DBGBUS_DSPP, 14, 0 },
+ { DBGBUS_DSPP, 14, 1 },
+ { DBGBUS_DSPP, 14, 3 },
+ { DBGBUS_DSPP, 20, 0 },
+ { DBGBUS_DSPP, 20, 1 },
+ { DBGBUS_DSPP, 20, 3 },
+
+ /* ppb_0 */
+ { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+ /* ppb_1 */
+ { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+ /* lm_lut */
+ { DBGBUS_DSPP, 109, 0 },
+ { DBGBUS_DSPP, 105, 0 },
+ { DBGBUS_DSPP, 103, 0 },
+
+ /* tear-check */
+ { DBGBUS_PERIPH, 63, 0 },
+ { DBGBUS_PERIPH, 64, 0 },
+ { DBGBUS_PERIPH, 65, 0 },
+ { DBGBUS_PERIPH, 73, 0 },
+ { DBGBUS_PERIPH, 74, 0 },
+
+ /* crossbar */
+ { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+ /* rotator */
+ { DBGBUS_DSPP, 9, 0},
+
+ /* blend */
+ /* LM0 */
+ { DBGBUS_DSPP, 63, 0},
+ { DBGBUS_DSPP, 63, 1},
+ { DBGBUS_DSPP, 63, 2},
+ { DBGBUS_DSPP, 63, 3},
+ { DBGBUS_DSPP, 63, 4},
+ { DBGBUS_DSPP, 63, 5},
+ { DBGBUS_DSPP, 63, 6},
+ { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 64, 0},
+ { DBGBUS_DSPP, 64, 1},
+ { DBGBUS_DSPP, 64, 2},
+ { DBGBUS_DSPP, 64, 3},
+ { DBGBUS_DSPP, 64, 4},
+ { DBGBUS_DSPP, 64, 5},
+ { DBGBUS_DSPP, 64, 6},
+ { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 65, 0},
+ { DBGBUS_DSPP, 65, 1},
+ { DBGBUS_DSPP, 65, 2},
+ { DBGBUS_DSPP, 65, 3},
+ { DBGBUS_DSPP, 65, 4},
+ { DBGBUS_DSPP, 65, 5},
+ { DBGBUS_DSPP, 65, 6},
+ { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 66, 0},
+ { DBGBUS_DSPP, 66, 1},
+ { DBGBUS_DSPP, 66, 2},
+ { DBGBUS_DSPP, 66, 3},
+ { DBGBUS_DSPP, 66, 4},
+ { DBGBUS_DSPP, 66, 5},
+ { DBGBUS_DSPP, 66, 6},
+ { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 67, 0},
+ { DBGBUS_DSPP, 67, 1},
+ { DBGBUS_DSPP, 67, 2},
+ { DBGBUS_DSPP, 67, 3},
+ { DBGBUS_DSPP, 67, 4},
+ { DBGBUS_DSPP, 67, 5},
+ { DBGBUS_DSPP, 67, 6},
+ { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 68, 0},
+ { DBGBUS_DSPP, 68, 1},
+ { DBGBUS_DSPP, 68, 2},
+ { DBGBUS_DSPP, 68, 3},
+ { DBGBUS_DSPP, 68, 4},
+ { DBGBUS_DSPP, 68, 5},
+ { DBGBUS_DSPP, 68, 6},
+ { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 69, 0},
+ { DBGBUS_DSPP, 69, 1},
+ { DBGBUS_DSPP, 69, 2},
+ { DBGBUS_DSPP, 69, 3},
+ { DBGBUS_DSPP, 69, 4},
+ { DBGBUS_DSPP, 69, 5},
+ { DBGBUS_DSPP, 69, 6},
+ { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM1 */
+ { DBGBUS_DSPP, 70, 0},
+ { DBGBUS_DSPP, 70, 1},
+ { DBGBUS_DSPP, 70, 2},
+ { DBGBUS_DSPP, 70, 3},
+ { DBGBUS_DSPP, 70, 4},
+ { DBGBUS_DSPP, 70, 5},
+ { DBGBUS_DSPP, 70, 6},
+ { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 71, 0},
+ { DBGBUS_DSPP, 71, 1},
+ { DBGBUS_DSPP, 71, 2},
+ { DBGBUS_DSPP, 71, 3},
+ { DBGBUS_DSPP, 71, 4},
+ { DBGBUS_DSPP, 71, 5},
+ { DBGBUS_DSPP, 71, 6},
+ { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 72, 0},
+ { DBGBUS_DSPP, 72, 1},
+ { DBGBUS_DSPP, 72, 2},
+ { DBGBUS_DSPP, 72, 3},
+ { DBGBUS_DSPP, 72, 4},
+ { DBGBUS_DSPP, 72, 5},
+ { DBGBUS_DSPP, 72, 6},
+ { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 73, 0},
+ { DBGBUS_DSPP, 73, 1},
+ { DBGBUS_DSPP, 73, 2},
+ { DBGBUS_DSPP, 73, 3},
+ { DBGBUS_DSPP, 73, 4},
+ { DBGBUS_DSPP, 73, 5},
+ { DBGBUS_DSPP, 73, 6},
+ { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 74, 0},
+ { DBGBUS_DSPP, 74, 1},
+ { DBGBUS_DSPP, 74, 2},
+ { DBGBUS_DSPP, 74, 3},
+ { DBGBUS_DSPP, 74, 4},
+ { DBGBUS_DSPP, 74, 5},
+ { DBGBUS_DSPP, 74, 6},
+ { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 75, 0},
+ { DBGBUS_DSPP, 75, 1},
+ { DBGBUS_DSPP, 75, 2},
+ { DBGBUS_DSPP, 75, 3},
+ { DBGBUS_DSPP, 75, 4},
+ { DBGBUS_DSPP, 75, 5},
+ { DBGBUS_DSPP, 75, 6},
+ { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 76, 0},
+ { DBGBUS_DSPP, 76, 1},
+ { DBGBUS_DSPP, 76, 2},
+ { DBGBUS_DSPP, 76, 3},
+ { DBGBUS_DSPP, 76, 4},
+ { DBGBUS_DSPP, 76, 5},
+ { DBGBUS_DSPP, 76, 6},
+ { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM2 */
+ { DBGBUS_DSPP, 77, 0},
+ { DBGBUS_DSPP, 77, 1},
+ { DBGBUS_DSPP, 77, 2},
+ { DBGBUS_DSPP, 77, 3},
+ { DBGBUS_DSPP, 77, 4},
+ { DBGBUS_DSPP, 77, 5},
+ { DBGBUS_DSPP, 77, 6},
+ { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 78, 0},
+ { DBGBUS_DSPP, 78, 1},
+ { DBGBUS_DSPP, 78, 2},
+ { DBGBUS_DSPP, 78, 3},
+ { DBGBUS_DSPP, 78, 4},
+ { DBGBUS_DSPP, 78, 5},
+ { DBGBUS_DSPP, 78, 6},
+ { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 79, 0},
+ { DBGBUS_DSPP, 79, 1},
+ { DBGBUS_DSPP, 79, 2},
+ { DBGBUS_DSPP, 79, 3},
+ { DBGBUS_DSPP, 79, 4},
+ { DBGBUS_DSPP, 79, 5},
+ { DBGBUS_DSPP, 79, 6},
+ { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 80, 0},
+ { DBGBUS_DSPP, 80, 1},
+ { DBGBUS_DSPP, 80, 2},
+ { DBGBUS_DSPP, 80, 3},
+ { DBGBUS_DSPP, 80, 4},
+ { DBGBUS_DSPP, 80, 5},
+ { DBGBUS_DSPP, 80, 6},
+ { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 81, 0},
+ { DBGBUS_DSPP, 81, 1},
+ { DBGBUS_DSPP, 81, 2},
+ { DBGBUS_DSPP, 81, 3},
+ { DBGBUS_DSPP, 81, 4},
+ { DBGBUS_DSPP, 81, 5},
+ { DBGBUS_DSPP, 81, 6},
+ { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 82, 0},
+ { DBGBUS_DSPP, 82, 1},
+ { DBGBUS_DSPP, 82, 2},
+ { DBGBUS_DSPP, 82, 3},
+ { DBGBUS_DSPP, 82, 4},
+ { DBGBUS_DSPP, 82, 5},
+ { DBGBUS_DSPP, 82, 6},
+ { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 83, 0},
+ { DBGBUS_DSPP, 83, 1},
+ { DBGBUS_DSPP, 83, 2},
+ { DBGBUS_DSPP, 83, 3},
+ { DBGBUS_DSPP, 83, 4},
+ { DBGBUS_DSPP, 83, 5},
+ { DBGBUS_DSPP, 83, 6},
+ { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+ /* csc */
+ { DBGBUS_SSPP0, 7, 0},
+ { DBGBUS_SSPP0, 7, 1},
+ { DBGBUS_SSPP0, 27, 0},
+ { DBGBUS_SSPP0, 27, 1},
+ { DBGBUS_SSPP1, 7, 0},
+ { DBGBUS_SSPP1, 7, 1},
+ { DBGBUS_SSPP1, 27, 0},
+ { DBGBUS_SSPP1, 27, 1},
+
+ /* pcc */
+ { DBGBUS_SSPP0, 3, 3},
+ { DBGBUS_SSPP0, 23, 3},
+ { DBGBUS_SSPP0, 33, 3},
+ { DBGBUS_SSPP0, 43, 3},
+ { DBGBUS_SSPP1, 3, 3},
+ { DBGBUS_SSPP1, 23, 3},
+ { DBGBUS_SSPP1, 33, 3},
+ { DBGBUS_SSPP1, 43, 3},
+
+ /* spa */
+ { DBGBUS_SSPP0, 8, 0},
+ { DBGBUS_SSPP0, 28, 0},
+ { DBGBUS_SSPP1, 8, 0},
+ { DBGBUS_SSPP1, 28, 0},
+ { DBGBUS_DSPP, 13, 0},
+ { DBGBUS_DSPP, 19, 0},
+
+ /* igc */
+ { DBGBUS_SSPP0, 9, 0},
+ { DBGBUS_SSPP0, 9, 1},
+ { DBGBUS_SSPP0, 9, 3},
+ { DBGBUS_SSPP0, 29, 0},
+ { DBGBUS_SSPP0, 29, 1},
+ { DBGBUS_SSPP0, 29, 3},
+ { DBGBUS_SSPP0, 17, 0},
+ { DBGBUS_SSPP0, 17, 1},
+ { DBGBUS_SSPP0, 17, 3},
+ { DBGBUS_SSPP0, 37, 0},
+ { DBGBUS_SSPP0, 37, 1},
+ { DBGBUS_SSPP0, 37, 3},
+ { DBGBUS_SSPP0, 46, 0},
+ { DBGBUS_SSPP0, 46, 1},
+ { DBGBUS_SSPP0, 46, 3},
+
+ { DBGBUS_SSPP1, 9, 0},
+ { DBGBUS_SSPP1, 9, 1},
+ { DBGBUS_SSPP1, 9, 3},
+ { DBGBUS_SSPP1, 29, 0},
+ { DBGBUS_SSPP1, 29, 1},
+ { DBGBUS_SSPP1, 29, 3},
+ { DBGBUS_SSPP1, 17, 0},
+ { DBGBUS_SSPP1, 17, 1},
+ { DBGBUS_SSPP1, 17, 3},
+ { DBGBUS_SSPP1, 37, 0},
+ { DBGBUS_SSPP1, 37, 1},
+ { DBGBUS_SSPP1, 37, 3},
+ { DBGBUS_SSPP1, 46, 0},
+ { DBGBUS_SSPP1, 46, 1},
+ { DBGBUS_SSPP1, 46, 3},
+
+ { DBGBUS_DSPP, 14, 0},
+ { DBGBUS_DSPP, 14, 1},
+ { DBGBUS_DSPP, 14, 3},
+ { DBGBUS_DSPP, 20, 0},
+ { DBGBUS_DSPP, 20, 1},
+ { DBGBUS_DSPP, 20, 3},
+
+ { DBGBUS_PERIPH, 60, 0},
+};
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
+
+ /* Unpack 0 sspp 0*/
+ { DBGBUS_SSPP0, 50, 2 },
+ { DBGBUS_SSPP0, 60, 2 },
+ { DBGBUS_SSPP0, 70, 2 },
+
+ /* Upack 0 sspp 1*/
+ { DBGBUS_SSPP1, 50, 2 },
+ { DBGBUS_SSPP1, 60, 2 },
+ { DBGBUS_SSPP1, 70, 2 },
+
+ /* scheduler */
+ { DBGBUS_DSPP, 130, 0 },
+ { DBGBUS_DSPP, 130, 1 },
+ { DBGBUS_DSPP, 130, 2 },
+ { DBGBUS_DSPP, 130, 3 },
+ { DBGBUS_DSPP, 130, 4 },
+ { DBGBUS_DSPP, 130, 5 },
+
+ /* qseed */
+ { DBGBUS_SSPP0, 6, 0},
+ { DBGBUS_SSPP0, 6, 1},
+ { DBGBUS_SSPP0, 26, 0},
+ { DBGBUS_SSPP0, 26, 1},
+ { DBGBUS_SSPP1, 6, 0},
+ { DBGBUS_SSPP1, 6, 1},
+ { DBGBUS_SSPP1, 26, 0},
+ { DBGBUS_SSPP1, 26, 1},
+
+ /* scale */
+ { DBGBUS_SSPP0, 16, 0},
+ { DBGBUS_SSPP0, 16, 1},
+ { DBGBUS_SSPP0, 36, 0},
+ { DBGBUS_SSPP0, 36, 1},
+ { DBGBUS_SSPP1, 16, 0},
+ { DBGBUS_SSPP1, 16, 1},
+ { DBGBUS_SSPP1, 36, 0},
+ { DBGBUS_SSPP1, 36, 1},
+
+ /* fetch sspp0 */
+
+ /* vig 0 */
+ { DBGBUS_SSPP0, 0, 0 },
+ { DBGBUS_SSPP0, 0, 1 },
+ { DBGBUS_SSPP0, 0, 2 },
+ { DBGBUS_SSPP0, 0, 3 },
+ { DBGBUS_SSPP0, 0, 4 },
+ { DBGBUS_SSPP0, 0, 5 },
+ { DBGBUS_SSPP0, 0, 6 },
+ { DBGBUS_SSPP0, 0, 7 },
+
+ { DBGBUS_SSPP0, 1, 0 },
+ { DBGBUS_SSPP0, 1, 1 },
+ { DBGBUS_SSPP0, 1, 2 },
+ { DBGBUS_SSPP0, 1, 3 },
+ { DBGBUS_SSPP0, 1, 4 },
+ { DBGBUS_SSPP0, 1, 5 },
+ { DBGBUS_SSPP0, 1, 6 },
+ { DBGBUS_SSPP0, 1, 7 },
+
+ { DBGBUS_SSPP0, 2, 0 },
+ { DBGBUS_SSPP0, 2, 1 },
+ { DBGBUS_SSPP0, 2, 2 },
+ { DBGBUS_SSPP0, 2, 3 },
+ { DBGBUS_SSPP0, 2, 4 },
+ { DBGBUS_SSPP0, 2, 5 },
+ { DBGBUS_SSPP0, 2, 6 },
+ { DBGBUS_SSPP0, 2, 7 },
+
+ { DBGBUS_SSPP0, 4, 0 },
+ { DBGBUS_SSPP0, 4, 1 },
+ { DBGBUS_SSPP0, 4, 2 },
+ { DBGBUS_SSPP0, 4, 3 },
+ { DBGBUS_SSPP0, 4, 4 },
+ { DBGBUS_SSPP0, 4, 5 },
+ { DBGBUS_SSPP0, 4, 6 },
+ { DBGBUS_SSPP0, 4, 7 },
+
+ { DBGBUS_SSPP0, 5, 0 },
+ { DBGBUS_SSPP0, 5, 1 },
+ { DBGBUS_SSPP0, 5, 2 },
+ { DBGBUS_SSPP0, 5, 3 },
+ { DBGBUS_SSPP0, 5, 4 },
+ { DBGBUS_SSPP0, 5, 5 },
+ { DBGBUS_SSPP0, 5, 6 },
+ { DBGBUS_SSPP0, 5, 7 },
+
+ /* vig 2 */
+ { DBGBUS_SSPP0, 20, 0 },
+ { DBGBUS_SSPP0, 20, 1 },
+ { DBGBUS_SSPP0, 20, 2 },
+ { DBGBUS_SSPP0, 20, 3 },
+ { DBGBUS_SSPP0, 20, 4 },
+ { DBGBUS_SSPP0, 20, 5 },
+ { DBGBUS_SSPP0, 20, 6 },
+ { DBGBUS_SSPP0, 20, 7 },
+
+ { DBGBUS_SSPP0, 21, 0 },
+ { DBGBUS_SSPP0, 21, 1 },
+ { DBGBUS_SSPP0, 21, 2 },
+ { DBGBUS_SSPP0, 21, 3 },
+ { DBGBUS_SSPP0, 21, 4 },
+ { DBGBUS_SSPP0, 21, 5 },
+ { DBGBUS_SSPP0, 21, 6 },
+ { DBGBUS_SSPP0, 21, 7 },
+
+ { DBGBUS_SSPP0, 22, 0 },
+ { DBGBUS_SSPP0, 22, 1 },
+ { DBGBUS_SSPP0, 22, 2 },
+ { DBGBUS_SSPP0, 22, 3 },
+ { DBGBUS_SSPP0, 22, 4 },
+ { DBGBUS_SSPP0, 22, 5 },
+ { DBGBUS_SSPP0, 22, 6 },
+ { DBGBUS_SSPP0, 22, 7 },
+
+ { DBGBUS_SSPP0, 24, 0 },
+ { DBGBUS_SSPP0, 24, 1 },
+ { DBGBUS_SSPP0, 24, 2 },
+ { DBGBUS_SSPP0, 24, 3 },
+ { DBGBUS_SSPP0, 24, 4 },
+ { DBGBUS_SSPP0, 24, 5 },
+ { DBGBUS_SSPP0, 24, 6 },
+ { DBGBUS_SSPP0, 24, 7 },
+
+ { DBGBUS_SSPP0, 25, 0 },
+ { DBGBUS_SSPP0, 25, 1 },
+ { DBGBUS_SSPP0, 25, 2 },
+ { DBGBUS_SSPP0, 25, 3 },
+ { DBGBUS_SSPP0, 25, 4 },
+ { DBGBUS_SSPP0, 25, 5 },
+ { DBGBUS_SSPP0, 25, 6 },
+ { DBGBUS_SSPP0, 25, 7 },
+
+ /* dma 2 */
+ { DBGBUS_SSPP0, 30, 0 },
+ { DBGBUS_SSPP0, 30, 1 },
+ { DBGBUS_SSPP0, 30, 2 },
+ { DBGBUS_SSPP0, 30, 3 },
+ { DBGBUS_SSPP0, 30, 4 },
+ { DBGBUS_SSPP0, 30, 5 },
+ { DBGBUS_SSPP0, 30, 6 },
+ { DBGBUS_SSPP0, 30, 7 },
+
+ { DBGBUS_SSPP0, 31, 0 },
+ { DBGBUS_SSPP0, 31, 1 },
+ { DBGBUS_SSPP0, 31, 2 },
+ { DBGBUS_SSPP0, 31, 3 },
+ { DBGBUS_SSPP0, 31, 4 },
+ { DBGBUS_SSPP0, 31, 5 },
+ { DBGBUS_SSPP0, 31, 6 },
+ { DBGBUS_SSPP0, 31, 7 },
+
+ { DBGBUS_SSPP0, 32, 0 },
+ { DBGBUS_SSPP0, 32, 1 },
+ { DBGBUS_SSPP0, 32, 2 },
+ { DBGBUS_SSPP0, 32, 3 },
+ { DBGBUS_SSPP0, 32, 4 },
+ { DBGBUS_SSPP0, 32, 5 },
+ { DBGBUS_SSPP0, 32, 6 },
+ { DBGBUS_SSPP0, 32, 7 },
+
+ { DBGBUS_SSPP0, 33, 0 },
+ { DBGBUS_SSPP0, 33, 1 },
+ { DBGBUS_SSPP0, 33, 2 },
+ { DBGBUS_SSPP0, 33, 3 },
+ { DBGBUS_SSPP0, 33, 4 },
+ { DBGBUS_SSPP0, 33, 5 },
+ { DBGBUS_SSPP0, 33, 6 },
+ { DBGBUS_SSPP0, 33, 7 },
+
+ { DBGBUS_SSPP0, 34, 0 },
+ { DBGBUS_SSPP0, 34, 1 },
+ { DBGBUS_SSPP0, 34, 2 },
+ { DBGBUS_SSPP0, 34, 3 },
+ { DBGBUS_SSPP0, 34, 4 },
+ { DBGBUS_SSPP0, 34, 5 },
+ { DBGBUS_SSPP0, 34, 6 },
+ { DBGBUS_SSPP0, 34, 7 },
+
+ { DBGBUS_SSPP0, 35, 0 },
+ { DBGBUS_SSPP0, 35, 1 },
+ { DBGBUS_SSPP0, 35, 2 },
+ { DBGBUS_SSPP0, 35, 3 },
+
+ /* dma 0 */
+ { DBGBUS_SSPP0, 40, 0 },
+ { DBGBUS_SSPP0, 40, 1 },
+ { DBGBUS_SSPP0, 40, 2 },
+ { DBGBUS_SSPP0, 40, 3 },
+ { DBGBUS_SSPP0, 40, 4 },
+ { DBGBUS_SSPP0, 40, 5 },
+ { DBGBUS_SSPP0, 40, 6 },
+ { DBGBUS_SSPP0, 40, 7 },
+
+ { DBGBUS_SSPP0, 41, 0 },
+ { DBGBUS_SSPP0, 41, 1 },
+ { DBGBUS_SSPP0, 41, 2 },
+ { DBGBUS_SSPP0, 41, 3 },
+ { DBGBUS_SSPP0, 41, 4 },
+ { DBGBUS_SSPP0, 41, 5 },
+ { DBGBUS_SSPP0, 41, 6 },
+ { DBGBUS_SSPP0, 41, 7 },
+
+ { DBGBUS_SSPP0, 42, 0 },
+ { DBGBUS_SSPP0, 42, 1 },
+ { DBGBUS_SSPP0, 42, 2 },
+ { DBGBUS_SSPP0, 42, 3 },
+ { DBGBUS_SSPP0, 42, 4 },
+ { DBGBUS_SSPP0, 42, 5 },
+ { DBGBUS_SSPP0, 42, 6 },
+ { DBGBUS_SSPP0, 42, 7 },
+
+ { DBGBUS_SSPP0, 44, 0 },
+ { DBGBUS_SSPP0, 44, 1 },
+ { DBGBUS_SSPP0, 44, 2 },
+ { DBGBUS_SSPP0, 44, 3 },
+ { DBGBUS_SSPP0, 44, 4 },
+ { DBGBUS_SSPP0, 44, 5 },
+ { DBGBUS_SSPP0, 44, 6 },
+ { DBGBUS_SSPP0, 44, 7 },
+
+ { DBGBUS_SSPP0, 45, 0 },
+ { DBGBUS_SSPP0, 45, 1 },
+ { DBGBUS_SSPP0, 45, 2 },
+ { DBGBUS_SSPP0, 45, 3 },
+ { DBGBUS_SSPP0, 45, 4 },
+ { DBGBUS_SSPP0, 45, 5 },
+ { DBGBUS_SSPP0, 45, 6 },
+ { DBGBUS_SSPP0, 45, 7 },
+
+ /* fetch sspp1 */
+ /* vig 1 */
+ { DBGBUS_SSPP1, 0, 0 },
+ { DBGBUS_SSPP1, 0, 1 },
+ { DBGBUS_SSPP1, 0, 2 },
+ { DBGBUS_SSPP1, 0, 3 },
+ { DBGBUS_SSPP1, 0, 4 },
+ { DBGBUS_SSPP1, 0, 5 },
+ { DBGBUS_SSPP1, 0, 6 },
+ { DBGBUS_SSPP1, 0, 7 },
+
+ { DBGBUS_SSPP1, 1, 0 },
+ { DBGBUS_SSPP1, 1, 1 },
+ { DBGBUS_SSPP1, 1, 2 },
+ { DBGBUS_SSPP1, 1, 3 },
+ { DBGBUS_SSPP1, 1, 4 },
+ { DBGBUS_SSPP1, 1, 5 },
+ { DBGBUS_SSPP1, 1, 6 },
+ { DBGBUS_SSPP1, 1, 7 },
+
+ { DBGBUS_SSPP1, 2, 0 },
+ { DBGBUS_SSPP1, 2, 1 },
+ { DBGBUS_SSPP1, 2, 2 },
+ { DBGBUS_SSPP1, 2, 3 },
+ { DBGBUS_SSPP1, 2, 4 },
+ { DBGBUS_SSPP1, 2, 5 },
+ { DBGBUS_SSPP1, 2, 6 },
+ { DBGBUS_SSPP1, 2, 7 },
+
+ { DBGBUS_SSPP1, 4, 0 },
+ { DBGBUS_SSPP1, 4, 1 },
+ { DBGBUS_SSPP1, 4, 2 },
+ { DBGBUS_SSPP1, 4, 3 },
+ { DBGBUS_SSPP1, 4, 4 },
+ { DBGBUS_SSPP1, 4, 5 },
+ { DBGBUS_SSPP1, 4, 6 },
+ { DBGBUS_SSPP1, 4, 7 },
+
+ { DBGBUS_SSPP1, 5, 0 },
+ { DBGBUS_SSPP1, 5, 1 },
+ { DBGBUS_SSPP1, 5, 2 },
+ { DBGBUS_SSPP1, 5, 3 },
+ { DBGBUS_SSPP1, 5, 4 },
+ { DBGBUS_SSPP1, 5, 5 },
+ { DBGBUS_SSPP1, 5, 6 },
+ { DBGBUS_SSPP1, 5, 7 },
+
+ /* vig 3 */
+ { DBGBUS_SSPP1, 20, 0 },
+ { DBGBUS_SSPP1, 20, 1 },
+ { DBGBUS_SSPP1, 20, 2 },
+ { DBGBUS_SSPP1, 20, 3 },
+ { DBGBUS_SSPP1, 20, 4 },
+ { DBGBUS_SSPP1, 20, 5 },
+ { DBGBUS_SSPP1, 20, 6 },
+ { DBGBUS_SSPP1, 20, 7 },
+
+ { DBGBUS_SSPP1, 21, 0 },
+ { DBGBUS_SSPP1, 21, 1 },
+ { DBGBUS_SSPP1, 21, 2 },
+ { DBGBUS_SSPP1, 21, 3 },
+ { DBGBUS_SSPP1, 21, 4 },
+ { DBGBUS_SSPP1, 21, 5 },
+ { DBGBUS_SSPP1, 21, 6 },
+ { DBGBUS_SSPP1, 21, 7 },
+
+ { DBGBUS_SSPP1, 22, 0 },
+ { DBGBUS_SSPP1, 22, 1 },
+ { DBGBUS_SSPP1, 22, 2 },
+ { DBGBUS_SSPP1, 22, 3 },
+ { DBGBUS_SSPP1, 22, 4 },
+ { DBGBUS_SSPP1, 22, 5 },
+ { DBGBUS_SSPP1, 22, 6 },
+ { DBGBUS_SSPP1, 22, 7 },
+
+ { DBGBUS_SSPP1, 24, 0 },
+ { DBGBUS_SSPP1, 24, 1 },
+ { DBGBUS_SSPP1, 24, 2 },
+ { DBGBUS_SSPP1, 24, 3 },
+ { DBGBUS_SSPP1, 24, 4 },
+ { DBGBUS_SSPP1, 24, 5 },
+ { DBGBUS_SSPP1, 24, 6 },
+ { DBGBUS_SSPP1, 24, 7 },
+
+ { DBGBUS_SSPP1, 25, 0 },
+ { DBGBUS_SSPP1, 25, 1 },
+ { DBGBUS_SSPP1, 25, 2 },
+ { DBGBUS_SSPP1, 25, 3 },
+ { DBGBUS_SSPP1, 25, 4 },
+ { DBGBUS_SSPP1, 25, 5 },
+ { DBGBUS_SSPP1, 25, 6 },
+ { DBGBUS_SSPP1, 25, 7 },
+
+ /* dma 3 */
+ { DBGBUS_SSPP1, 30, 0 },
+ { DBGBUS_SSPP1, 30, 1 },
+ { DBGBUS_SSPP1, 30, 2 },
+ { DBGBUS_SSPP1, 30, 3 },
+ { DBGBUS_SSPP1, 30, 4 },
+ { DBGBUS_SSPP1, 30, 5 },
+ { DBGBUS_SSPP1, 30, 6 },
+ { DBGBUS_SSPP1, 30, 7 },
+
+ { DBGBUS_SSPP1, 31, 0 },
+ { DBGBUS_SSPP1, 31, 1 },
+ { DBGBUS_SSPP1, 31, 2 },
+ { DBGBUS_SSPP1, 31, 3 },
+ { DBGBUS_SSPP1, 31, 4 },
+ { DBGBUS_SSPP1, 31, 5 },
+ { DBGBUS_SSPP1, 31, 6 },
+ { DBGBUS_SSPP1, 31, 7 },
+
+ { DBGBUS_SSPP1, 32, 0 },
+ { DBGBUS_SSPP1, 32, 1 },
+ { DBGBUS_SSPP1, 32, 2 },
+ { DBGBUS_SSPP1, 32, 3 },
+ { DBGBUS_SSPP1, 32, 4 },
+ { DBGBUS_SSPP1, 32, 5 },
+ { DBGBUS_SSPP1, 32, 6 },
+ { DBGBUS_SSPP1, 32, 7 },
+
+ { DBGBUS_SSPP1, 33, 0 },
+ { DBGBUS_SSPP1, 33, 1 },
+ { DBGBUS_SSPP1, 33, 2 },
+ { DBGBUS_SSPP1, 33, 3 },
+ { DBGBUS_SSPP1, 33, 4 },
+ { DBGBUS_SSPP1, 33, 5 },
+ { DBGBUS_SSPP1, 33, 6 },
+ { DBGBUS_SSPP1, 33, 7 },
+
+ { DBGBUS_SSPP1, 34, 0 },
+ { DBGBUS_SSPP1, 34, 1 },
+ { DBGBUS_SSPP1, 34, 2 },
+ { DBGBUS_SSPP1, 34, 3 },
+ { DBGBUS_SSPP1, 34, 4 },
+ { DBGBUS_SSPP1, 34, 5 },
+ { DBGBUS_SSPP1, 34, 6 },
+ { DBGBUS_SSPP1, 34, 7 },
+
+ { DBGBUS_SSPP1, 35, 0 },
+ { DBGBUS_SSPP1, 35, 1 },
+ { DBGBUS_SSPP1, 35, 2 },
+
+ /* dma 1 */
+ { DBGBUS_SSPP1, 40, 0 },
+ { DBGBUS_SSPP1, 40, 1 },
+ { DBGBUS_SSPP1, 40, 2 },
+ { DBGBUS_SSPP1, 40, 3 },
+ { DBGBUS_SSPP1, 40, 4 },
+ { DBGBUS_SSPP1, 40, 5 },
+ { DBGBUS_SSPP1, 40, 6 },
+ { DBGBUS_SSPP1, 40, 7 },
+
+ { DBGBUS_SSPP1, 41, 0 },
+ { DBGBUS_SSPP1, 41, 1 },
+ { DBGBUS_SSPP1, 41, 2 },
+ { DBGBUS_SSPP1, 41, 3 },
+ { DBGBUS_SSPP1, 41, 4 },
+ { DBGBUS_SSPP1, 41, 5 },
+ { DBGBUS_SSPP1, 41, 6 },
+ { DBGBUS_SSPP1, 41, 7 },
+
+ { DBGBUS_SSPP1, 42, 0 },
+ { DBGBUS_SSPP1, 42, 1 },
+ { DBGBUS_SSPP1, 42, 2 },
+ { DBGBUS_SSPP1, 42, 3 },
+ { DBGBUS_SSPP1, 42, 4 },
+ { DBGBUS_SSPP1, 42, 5 },
+ { DBGBUS_SSPP1, 42, 6 },
+ { DBGBUS_SSPP1, 42, 7 },
+
+ { DBGBUS_SSPP1, 44, 0 },
+ { DBGBUS_SSPP1, 44, 1 },
+ { DBGBUS_SSPP1, 44, 2 },
+ { DBGBUS_SSPP1, 44, 3 },
+ { DBGBUS_SSPP1, 44, 4 },
+ { DBGBUS_SSPP1, 44, 5 },
+ { DBGBUS_SSPP1, 44, 6 },
+ { DBGBUS_SSPP1, 44, 7 },
+
+ { DBGBUS_SSPP1, 45, 0 },
+ { DBGBUS_SSPP1, 45, 1 },
+ { DBGBUS_SSPP1, 45, 2 },
+ { DBGBUS_SSPP1, 45, 3 },
+ { DBGBUS_SSPP1, 45, 4 },
+ { DBGBUS_SSPP1, 45, 5 },
+ { DBGBUS_SSPP1, 45, 6 },
+ { DBGBUS_SSPP1, 45, 7 },
+
+ /* dspp */
+ { DBGBUS_DSPP, 13, 0 },
+ { DBGBUS_DSPP, 19, 0 },
+ { DBGBUS_DSPP, 14, 0 },
+ { DBGBUS_DSPP, 14, 1 },
+ { DBGBUS_DSPP, 14, 3 },
+ { DBGBUS_DSPP, 20, 0 },
+ { DBGBUS_DSPP, 20, 1 },
+ { DBGBUS_DSPP, 20, 3 },
+
+ /* ppb_0 */
+ { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+ /* ppb_1 */
+ { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+ /* lm_lut */
+ { DBGBUS_DSPP, 109, 0 },
+ { DBGBUS_DSPP, 105, 0 },
+ { DBGBUS_DSPP, 103, 0 },
+
+ /* crossbar */
+ { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+ /* rotator */
+ { DBGBUS_DSPP, 9, 0},
+
+ /* blend */
+ /* LM0 */
+ { DBGBUS_DSPP, 63, 1},
+ { DBGBUS_DSPP, 63, 2},
+ { DBGBUS_DSPP, 63, 3},
+ { DBGBUS_DSPP, 63, 4},
+ { DBGBUS_DSPP, 63, 5},
+ { DBGBUS_DSPP, 63, 6},
+ { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 64, 1},
+ { DBGBUS_DSPP, 64, 2},
+ { DBGBUS_DSPP, 64, 3},
+ { DBGBUS_DSPP, 64, 4},
+ { DBGBUS_DSPP, 64, 5},
+ { DBGBUS_DSPP, 64, 6},
+ { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 65, 1},
+ { DBGBUS_DSPP, 65, 2},
+ { DBGBUS_DSPP, 65, 3},
+ { DBGBUS_DSPP, 65, 4},
+ { DBGBUS_DSPP, 65, 5},
+ { DBGBUS_DSPP, 65, 6},
+ { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 66, 1},
+ { DBGBUS_DSPP, 66, 2},
+ { DBGBUS_DSPP, 66, 3},
+ { DBGBUS_DSPP, 66, 4},
+ { DBGBUS_DSPP, 66, 5},
+ { DBGBUS_DSPP, 66, 6},
+ { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 67, 1},
+ { DBGBUS_DSPP, 67, 2},
+ { DBGBUS_DSPP, 67, 3},
+ { DBGBUS_DSPP, 67, 4},
+ { DBGBUS_DSPP, 67, 5},
+ { DBGBUS_DSPP, 67, 6},
+ { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 68, 1},
+ { DBGBUS_DSPP, 68, 2},
+ { DBGBUS_DSPP, 68, 3},
+ { DBGBUS_DSPP, 68, 4},
+ { DBGBUS_DSPP, 68, 5},
+ { DBGBUS_DSPP, 68, 6},
+ { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 69, 1},
+ { DBGBUS_DSPP, 69, 2},
+ { DBGBUS_DSPP, 69, 3},
+ { DBGBUS_DSPP, 69, 4},
+ { DBGBUS_DSPP, 69, 5},
+ { DBGBUS_DSPP, 69, 6},
+ { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 84, 1},
+ { DBGBUS_DSPP, 84, 2},
+ { DBGBUS_DSPP, 84, 3},
+ { DBGBUS_DSPP, 84, 4},
+ { DBGBUS_DSPP, 84, 5},
+ { DBGBUS_DSPP, 84, 6},
+ { DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 85, 1},
+ { DBGBUS_DSPP, 85, 2},
+ { DBGBUS_DSPP, 85, 3},
+ { DBGBUS_DSPP, 85, 4},
+ { DBGBUS_DSPP, 85, 5},
+ { DBGBUS_DSPP, 85, 6},
+ { DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 86, 1},
+ { DBGBUS_DSPP, 86, 2},
+ { DBGBUS_DSPP, 86, 3},
+ { DBGBUS_DSPP, 86, 4},
+ { DBGBUS_DSPP, 86, 5},
+ { DBGBUS_DSPP, 86, 6},
+ { DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 87, 1},
+ { DBGBUS_DSPP, 87, 2},
+ { DBGBUS_DSPP, 87, 3},
+ { DBGBUS_DSPP, 87, 4},
+ { DBGBUS_DSPP, 87, 5},
+ { DBGBUS_DSPP, 87, 6},
+ { DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM1 */
+ { DBGBUS_DSPP, 70, 1},
+ { DBGBUS_DSPP, 70, 2},
+ { DBGBUS_DSPP, 70, 3},
+ { DBGBUS_DSPP, 70, 4},
+ { DBGBUS_DSPP, 70, 5},
+ { DBGBUS_DSPP, 70, 6},
+ { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 71, 1},
+ { DBGBUS_DSPP, 71, 2},
+ { DBGBUS_DSPP, 71, 3},
+ { DBGBUS_DSPP, 71, 4},
+ { DBGBUS_DSPP, 71, 5},
+ { DBGBUS_DSPP, 71, 6},
+ { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 72, 1},
+ { DBGBUS_DSPP, 72, 2},
+ { DBGBUS_DSPP, 72, 3},
+ { DBGBUS_DSPP, 72, 4},
+ { DBGBUS_DSPP, 72, 5},
+ { DBGBUS_DSPP, 72, 6},
+ { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 73, 1},
+ { DBGBUS_DSPP, 73, 2},
+ { DBGBUS_DSPP, 73, 3},
+ { DBGBUS_DSPP, 73, 4},
+ { DBGBUS_DSPP, 73, 5},
+ { DBGBUS_DSPP, 73, 6},
+ { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 74, 1},
+ { DBGBUS_DSPP, 74, 2},
+ { DBGBUS_DSPP, 74, 3},
+ { DBGBUS_DSPP, 74, 4},
+ { DBGBUS_DSPP, 74, 5},
+ { DBGBUS_DSPP, 74, 6},
+ { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 75, 1},
+ { DBGBUS_DSPP, 75, 2},
+ { DBGBUS_DSPP, 75, 3},
+ { DBGBUS_DSPP, 75, 4},
+ { DBGBUS_DSPP, 75, 5},
+ { DBGBUS_DSPP, 75, 6},
+ { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 76, 1},
+ { DBGBUS_DSPP, 76, 2},
+ { DBGBUS_DSPP, 76, 3},
+ { DBGBUS_DSPP, 76, 4},
+ { DBGBUS_DSPP, 76, 5},
+ { DBGBUS_DSPP, 76, 6},
+ { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 88, 1},
+ { DBGBUS_DSPP, 88, 2},
+ { DBGBUS_DSPP, 88, 3},
+ { DBGBUS_DSPP, 88, 4},
+ { DBGBUS_DSPP, 88, 5},
+ { DBGBUS_DSPP, 88, 6},
+ { DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 89, 1},
+ { DBGBUS_DSPP, 89, 2},
+ { DBGBUS_DSPP, 89, 3},
+ { DBGBUS_DSPP, 89, 4},
+ { DBGBUS_DSPP, 89, 5},
+ { DBGBUS_DSPP, 89, 6},
+ { DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 90, 1},
+ { DBGBUS_DSPP, 90, 2},
+ { DBGBUS_DSPP, 90, 3},
+ { DBGBUS_DSPP, 90, 4},
+ { DBGBUS_DSPP, 90, 5},
+ { DBGBUS_DSPP, 90, 6},
+ { DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 91, 1},
+ { DBGBUS_DSPP, 91, 2},
+ { DBGBUS_DSPP, 91, 3},
+ { DBGBUS_DSPP, 91, 4},
+ { DBGBUS_DSPP, 91, 5},
+ { DBGBUS_DSPP, 91, 6},
+ { DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM2 */
+ { DBGBUS_DSPP, 77, 0},
+ { DBGBUS_DSPP, 77, 1},
+ { DBGBUS_DSPP, 77, 2},
+ { DBGBUS_DSPP, 77, 3},
+ { DBGBUS_DSPP, 77, 4},
+ { DBGBUS_DSPP, 77, 5},
+ { DBGBUS_DSPP, 77, 6},
+ { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 78, 0},
+ { DBGBUS_DSPP, 78, 1},
+ { DBGBUS_DSPP, 78, 2},
+ { DBGBUS_DSPP, 78, 3},
+ { DBGBUS_DSPP, 78, 4},
+ { DBGBUS_DSPP, 78, 5},
+ { DBGBUS_DSPP, 78, 6},
+ { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 79, 0},
+ { DBGBUS_DSPP, 79, 1},
+ { DBGBUS_DSPP, 79, 2},
+ { DBGBUS_DSPP, 79, 3},
+ { DBGBUS_DSPP, 79, 4},
+ { DBGBUS_DSPP, 79, 5},
+ { DBGBUS_DSPP, 79, 6},
+ { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 80, 0},
+ { DBGBUS_DSPP, 80, 1},
+ { DBGBUS_DSPP, 80, 2},
+ { DBGBUS_DSPP, 80, 3},
+ { DBGBUS_DSPP, 80, 4},
+ { DBGBUS_DSPP, 80, 5},
+ { DBGBUS_DSPP, 80, 6},
+ { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 81, 0},
+ { DBGBUS_DSPP, 81, 1},
+ { DBGBUS_DSPP, 81, 2},
+ { DBGBUS_DSPP, 81, 3},
+ { DBGBUS_DSPP, 81, 4},
+ { DBGBUS_DSPP, 81, 5},
+ { DBGBUS_DSPP, 81, 6},
+ { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 82, 0},
+ { DBGBUS_DSPP, 82, 1},
+ { DBGBUS_DSPP, 82, 2},
+ { DBGBUS_DSPP, 82, 3},
+ { DBGBUS_DSPP, 82, 4},
+ { DBGBUS_DSPP, 82, 5},
+ { DBGBUS_DSPP, 82, 6},
+ { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 83, 0},
+ { DBGBUS_DSPP, 83, 1},
+ { DBGBUS_DSPP, 83, 2},
+ { DBGBUS_DSPP, 83, 3},
+ { DBGBUS_DSPP, 83, 4},
+ { DBGBUS_DSPP, 83, 5},
+ { DBGBUS_DSPP, 83, 6},
+ { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 92, 1},
+ { DBGBUS_DSPP, 92, 2},
+ { DBGBUS_DSPP, 92, 3},
+ { DBGBUS_DSPP, 92, 4},
+ { DBGBUS_DSPP, 92, 5},
+ { DBGBUS_DSPP, 92, 6},
+ { DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 93, 1},
+ { DBGBUS_DSPP, 93, 2},
+ { DBGBUS_DSPP, 93, 3},
+ { DBGBUS_DSPP, 93, 4},
+ { DBGBUS_DSPP, 93, 5},
+ { DBGBUS_DSPP, 93, 6},
+ { DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 94, 1},
+ { DBGBUS_DSPP, 94, 2},
+ { DBGBUS_DSPP, 94, 3},
+ { DBGBUS_DSPP, 94, 4},
+ { DBGBUS_DSPP, 94, 5},
+ { DBGBUS_DSPP, 94, 6},
+ { DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 95, 1},
+ { DBGBUS_DSPP, 95, 2},
+ { DBGBUS_DSPP, 95, 3},
+ { DBGBUS_DSPP, 95, 4},
+ { DBGBUS_DSPP, 95, 5},
+ { DBGBUS_DSPP, 95, 6},
+ { DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM5 */
+ { DBGBUS_DSPP, 110, 1},
+ { DBGBUS_DSPP, 110, 2},
+ { DBGBUS_DSPP, 110, 3},
+ { DBGBUS_DSPP, 110, 4},
+ { DBGBUS_DSPP, 110, 5},
+ { DBGBUS_DSPP, 110, 6},
+ { DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 111, 1},
+ { DBGBUS_DSPP, 111, 2},
+ { DBGBUS_DSPP, 111, 3},
+ { DBGBUS_DSPP, 111, 4},
+ { DBGBUS_DSPP, 111, 5},
+ { DBGBUS_DSPP, 111, 6},
+ { DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 112, 1},
+ { DBGBUS_DSPP, 112, 2},
+ { DBGBUS_DSPP, 112, 3},
+ { DBGBUS_DSPP, 112, 4},
+ { DBGBUS_DSPP, 112, 5},
+ { DBGBUS_DSPP, 112, 6},
+ { DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 113, 1},
+ { DBGBUS_DSPP, 113, 2},
+ { DBGBUS_DSPP, 113, 3},
+ { DBGBUS_DSPP, 113, 4},
+ { DBGBUS_DSPP, 113, 5},
+ { DBGBUS_DSPP, 113, 6},
+ { DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 114, 1},
+ { DBGBUS_DSPP, 114, 2},
+ { DBGBUS_DSPP, 114, 3},
+ { DBGBUS_DSPP, 114, 4},
+ { DBGBUS_DSPP, 114, 5},
+ { DBGBUS_DSPP, 114, 6},
+ { DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 115, 1},
+ { DBGBUS_DSPP, 115, 2},
+ { DBGBUS_DSPP, 115, 3},
+ { DBGBUS_DSPP, 115, 4},
+ { DBGBUS_DSPP, 115, 5},
+ { DBGBUS_DSPP, 115, 6},
+ { DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 116, 1},
+ { DBGBUS_DSPP, 116, 2},
+ { DBGBUS_DSPP, 116, 3},
+ { DBGBUS_DSPP, 116, 4},
+ { DBGBUS_DSPP, 116, 5},
+ { DBGBUS_DSPP, 116, 6},
+ { DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 117, 1},
+ { DBGBUS_DSPP, 117, 2},
+ { DBGBUS_DSPP, 117, 3},
+ { DBGBUS_DSPP, 117, 4},
+ { DBGBUS_DSPP, 117, 5},
+ { DBGBUS_DSPP, 117, 6},
+ { DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 118, 1},
+ { DBGBUS_DSPP, 118, 2},
+ { DBGBUS_DSPP, 118, 3},
+ { DBGBUS_DSPP, 118, 4},
+ { DBGBUS_DSPP, 118, 5},
+ { DBGBUS_DSPP, 118, 6},
+ { DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 119, 1},
+ { DBGBUS_DSPP, 119, 2},
+ { DBGBUS_DSPP, 119, 3},
+ { DBGBUS_DSPP, 119, 4},
+ { DBGBUS_DSPP, 119, 5},
+ { DBGBUS_DSPP, 119, 6},
+ { DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 120, 1},
+ { DBGBUS_DSPP, 120, 2},
+ { DBGBUS_DSPP, 120, 3},
+ { DBGBUS_DSPP, 120, 4},
+ { DBGBUS_DSPP, 120, 5},
+ { DBGBUS_DSPP, 120, 6},
+ { DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
+
+ /* csc */
+ { DBGBUS_SSPP0, 7, 0},
+ { DBGBUS_SSPP0, 7, 1},
+ { DBGBUS_SSPP0, 27, 0},
+ { DBGBUS_SSPP0, 27, 1},
+ { DBGBUS_SSPP1, 7, 0},
+ { DBGBUS_SSPP1, 7, 1},
+ { DBGBUS_SSPP1, 27, 0},
+ { DBGBUS_SSPP1, 27, 1},
+
+ /* pcc */
+ { DBGBUS_SSPP0, 3, 3},
+ { DBGBUS_SSPP0, 23, 3},
+ { DBGBUS_SSPP0, 33, 3},
+ { DBGBUS_SSPP0, 43, 3},
+ { DBGBUS_SSPP1, 3, 3},
+ { DBGBUS_SSPP1, 23, 3},
+ { DBGBUS_SSPP1, 33, 3},
+ { DBGBUS_SSPP1, 43, 3},
+
+ /* spa */
+ { DBGBUS_SSPP0, 8, 0},
+ { DBGBUS_SSPP0, 28, 0},
+ { DBGBUS_SSPP1, 8, 0},
+ { DBGBUS_SSPP1, 28, 0},
+ { DBGBUS_DSPP, 13, 0},
+ { DBGBUS_DSPP, 19, 0},
+
+ /* igc */
+ { DBGBUS_SSPP0, 17, 0},
+ { DBGBUS_SSPP0, 17, 1},
+ { DBGBUS_SSPP0, 17, 3},
+ { DBGBUS_SSPP0, 37, 0},
+ { DBGBUS_SSPP0, 37, 1},
+ { DBGBUS_SSPP0, 37, 3},
+ { DBGBUS_SSPP0, 46, 0},
+ { DBGBUS_SSPP0, 46, 1},
+ { DBGBUS_SSPP0, 46, 3},
+
+ { DBGBUS_SSPP1, 17, 0},
+ { DBGBUS_SSPP1, 17, 1},
+ { DBGBUS_SSPP1, 17, 3},
+ { DBGBUS_SSPP1, 37, 0},
+ { DBGBUS_SSPP1, 37, 1},
+ { DBGBUS_SSPP1, 37, 3},
+ { DBGBUS_SSPP1, 46, 0},
+ { DBGBUS_SSPP1, 46, 1},
+ { DBGBUS_SSPP1, 46, 3},
+
+ { DBGBUS_DSPP, 14, 0},
+ { DBGBUS_DSPP, 14, 1},
+ { DBGBUS_DSPP, 14, 3},
+ { DBGBUS_DSPP, 20, 0},
+ { DBGBUS_DSPP, 20, 1},
+ { DBGBUS_DSPP, 20, 3},
+
+ /* intf0-3 */
+ { DBGBUS_PERIPH, 0, 0},
+ { DBGBUS_PERIPH, 1, 0},
+ { DBGBUS_PERIPH, 2, 0},
+ { DBGBUS_PERIPH, 3, 0},
+
+ /* te counter wrapper */
+ { DBGBUS_PERIPH, 60, 0},
+
+ /* dsc0 */
+ { DBGBUS_PERIPH, 47, 0},
+ { DBGBUS_PERIPH, 47, 1},
+ { DBGBUS_PERIPH, 47, 2},
+ { DBGBUS_PERIPH, 47, 3},
+ { DBGBUS_PERIPH, 47, 4},
+ { DBGBUS_PERIPH, 47, 5},
+ { DBGBUS_PERIPH, 47, 6},
+ { DBGBUS_PERIPH, 47, 7},
+
+ /* dsc1 */
+ { DBGBUS_PERIPH, 48, 0},
+ { DBGBUS_PERIPH, 48, 1},
+ { DBGBUS_PERIPH, 48, 2},
+ { DBGBUS_PERIPH, 48, 3},
+ { DBGBUS_PERIPH, 48, 4},
+ { DBGBUS_PERIPH, 48, 5},
+ { DBGBUS_PERIPH, 48, 6},
+ { DBGBUS_PERIPH, 48, 7},
+
+ /* dsc2 */
+ { DBGBUS_PERIPH, 51, 0},
+ { DBGBUS_PERIPH, 51, 1},
+ { DBGBUS_PERIPH, 51, 2},
+ { DBGBUS_PERIPH, 51, 3},
+ { DBGBUS_PERIPH, 51, 4},
+ { DBGBUS_PERIPH, 51, 5},
+ { DBGBUS_PERIPH, 51, 6},
+ { DBGBUS_PERIPH, 51, 7},
+
+ /* dsc3 */
+ { DBGBUS_PERIPH, 52, 0},
+ { DBGBUS_PERIPH, 52, 1},
+ { DBGBUS_PERIPH, 52, 2},
+ { DBGBUS_PERIPH, 52, 3},
+ { DBGBUS_PERIPH, 52, 4},
+ { DBGBUS_PERIPH, 52, 5},
+ { DBGBUS_PERIPH, 52, 6},
+ { DBGBUS_PERIPH, 52, 7},
+
+ /* tear-check */
+ { DBGBUS_PERIPH, 63, 0 },
+ { DBGBUS_PERIPH, 64, 0 },
+ { DBGBUS_PERIPH, 65, 0 },
+ { DBGBUS_PERIPH, 73, 0 },
+ { DBGBUS_PERIPH, 74, 0 },
+
+ /* cdwn */
+ { DBGBUS_PERIPH, 80, 0},
+ { DBGBUS_PERIPH, 80, 1},
+ { DBGBUS_PERIPH, 80, 2},
+
+ { DBGBUS_PERIPH, 81, 0},
+ { DBGBUS_PERIPH, 81, 1},
+ { DBGBUS_PERIPH, 81, 2},
+
+ { DBGBUS_PERIPH, 82, 0},
+ { DBGBUS_PERIPH, 82, 1},
+ { DBGBUS_PERIPH, 82, 2},
+ { DBGBUS_PERIPH, 82, 3},
+ { DBGBUS_PERIPH, 82, 4},
+ { DBGBUS_PERIPH, 82, 5},
+ { DBGBUS_PERIPH, 82, 6},
+ { DBGBUS_PERIPH, 82, 7},
+
+ /* hdmi */
+ { DBGBUS_PERIPH, 68, 0},
+ { DBGBUS_PERIPH, 68, 1},
+ { DBGBUS_PERIPH, 68, 2},
+ { DBGBUS_PERIPH, 68, 3},
+ { DBGBUS_PERIPH, 68, 4},
+ { DBGBUS_PERIPH, 68, 5},
+
+ /* edp */
+ { DBGBUS_PERIPH, 69, 0},
+ { DBGBUS_PERIPH, 69, 1},
+ { DBGBUS_PERIPH, 69, 2},
+ { DBGBUS_PERIPH, 69, 3},
+ { DBGBUS_PERIPH, 69, 4},
+ { DBGBUS_PERIPH, 69, 5},
+
+ /* dsi0 */
+ { DBGBUS_PERIPH, 70, 0},
+ { DBGBUS_PERIPH, 70, 1},
+ { DBGBUS_PERIPH, 70, 2},
+ { DBGBUS_PERIPH, 70, 3},
+ { DBGBUS_PERIPH, 70, 4},
+ { DBGBUS_PERIPH, 70, 5},
+
+ /* dsi1 */
+ { DBGBUS_PERIPH, 71, 0},
+ { DBGBUS_PERIPH, 71, 1},
+ { DBGBUS_PERIPH, 71, 2},
+ { DBGBUS_PERIPH, 71, 3},
+ { DBGBUS_PERIPH, 71, 4},
+ { DBGBUS_PERIPH, 71, 5},
+};
+
+static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
+ {0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */
+ {0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */
+ {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
+ {0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */
+ {0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */
+ {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
+ {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
+};
+
+/**
+ * _dpu_dbg_enable_power - use callback to turn power on for hw register access
+ * @enable: whether to turn power on or off
+ */
+static inline void _dpu_dbg_enable_power(int enable)
+{
+ if (enable)
+ pm_runtime_get_sync(dpu_dbg_base.dev);
+ else
+ pm_runtime_put_sync(dpu_dbg_base.dev);
+}
+
+static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
+{
+ bool in_log, in_mem;
+ u32 **dump_mem = NULL;
+ u32 *dump_addr = NULL;
+ u32 status = 0;
+ struct dpu_debug_bus_entry *head;
+ phys_addr_t phys = 0;
+ int list_size;
+ int i;
+ u32 offset;
+ void __iomem *mem_base = NULL;
+ struct dpu_dbg_reg_base *reg_base;
+
+ if (!bus || !bus->cmn.entries_size)
+ return;
+
+ list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+ reg_base_head)
+ if (strlen(reg_base->name) &&
+ !strcmp(reg_base->name, bus->cmn.name))
+ mem_base = reg_base->base + bus->top_blk_off;
+
+ if (!mem_base) {
+ pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+ return;
+ }
+
+ dump_mem = &bus->cmn.dumped_content;
+
+ /* will keep in memory 4 entries of 4 bytes each */
+ list_size = (bus->cmn.entries_size * 4 * 4);
+
+ in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+ in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+ if (!in_log && !in_mem)
+ return;
+
+ dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(dpu_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ _dpu_dbg_enable_power(true);
+ for (i = 0; i < bus->cmn.entries_size; i++) {
+ head = bus->entries + i;
+ writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+ mem_base + head->wr_addr);
+ wmb(); /* make sure test bits were written */
+
+ if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
+ offset = DBGBUS_DSPP_STATUS;
+ /* keep DSPP test point enabled */
+ if (head->wr_addr != DBGBUS_DSPP)
+ writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
+ } else {
+ offset = head->wr_addr + 0x4;
+ }
+
+ status = readl_relaxed(mem_base + offset);
+
+ if (in_log)
+ dev_info(dpu_dbg_base.dev,
+ "waddr=0x%x blk=%d tst=%d val=0x%x\n",
+ head->wr_addr, head->block_id,
+ head->test_id, status);
+
+ if (dump_addr && in_mem) {
+ dump_addr[i*4] = head->wr_addr;
+ dump_addr[i*4 + 1] = head->block_id;
+ dump_addr[i*4 + 2] = head->test_id;
+ dump_addr[i*4 + 3] = status;
+ }
+
+ if (head->analyzer)
+ head->analyzer(mem_base, head, status);
+
+ /* Disable debug bus once we are done */
+ writel_relaxed(0, mem_base + head->wr_addr);
+ if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
+ head->wr_addr != DBGBUS_DSPP)
+ writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
+ }
+ _dpu_dbg_enable_power(false);
+
+ dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
+}
+
+static void _dpu_dbg_dump_vbif_debug_bus_entry(
+ struct vbif_debug_bus_entry *head, void __iomem *mem_base,
+ u32 *dump_addr, bool in_log)
+{
+ int i, j;
+ u32 val;
+
+ if (!dump_addr && !in_log)
+ return;
+
+ for (i = 0; i < head->block_cnt; i++) {
+ writel_relaxed(1 << (i + head->bit_offset),
+ mem_base + head->block_bus_addr);
+ /* make sure that current bus blcok enable */
+ wmb();
+ for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
+ writel_relaxed(j, mem_base + head->block_bus_addr + 4);
+ /* make sure that test point is enabled */
+ wmb();
+ val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
+ if (dump_addr) {
+ *dump_addr++ = head->block_bus_addr;
+ *dump_addr++ = i;
+ *dump_addr++ = j;
+ *dump_addr++ = val;
+ }
+ if (in_log)
+ dev_info(dpu_dbg_base.dev,
+ "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+ head->block_bus_addr, i, j, val);
+ }
+ }
+}
+
+static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
+{
+ bool in_log, in_mem;
+ u32 **dump_mem = NULL;
+ u32 *dump_addr = NULL;
+ u32 value, d0, d1;
+ unsigned long reg, reg1, reg2;
+ struct vbif_debug_bus_entry *head;
+ phys_addr_t phys = 0;
+ int i, list_size = 0;
+ void __iomem *mem_base = NULL;
+ struct vbif_debug_bus_entry *dbg_bus;
+ u32 bus_size;
+ struct dpu_dbg_reg_base *reg_base;
+
+ if (!bus || !bus->cmn.entries_size)
+ return;
+
+ list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+ reg_base_head)
+ if (strlen(reg_base->name) &&
+ !strcmp(reg_base->name, bus->cmn.name))
+ mem_base = reg_base->base;
+
+ if (!mem_base) {
+ pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+ return;
+ }
+
+ dbg_bus = bus->entries;
+ bus_size = bus->cmn.entries_size;
+ list_size = bus->cmn.entries_size;
+ dump_mem = &bus->cmn.dumped_content;
+
+ dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
+
+ if (!dump_mem || !dbg_bus || !bus_size || !list_size)
+ return;
+
+ /* allocate memory for each test point */
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+ list_size += (head->block_cnt * head->test_pnt_cnt);
+ }
+
+ /* 4 bytes * 4 entries for each test point*/
+ list_size *= 16;
+
+ in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+ in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+ if (!in_log && !in_mem)
+ return;
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(dpu_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ _dpu_dbg_enable_power(true);
+
+ value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
+ writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
+
+ /* make sure that vbif core is on */
+ wmb();
+
+ /**
+ * Extract VBIF error info based on XIN halt and error status.
+ * If the XIN client is not in HALT state, or an error is detected,
+ * then retrieve the VBIF error info for it.
+ */
+ reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
+ reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
+ reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
+ dev_err(dpu_dbg_base.dev,
+ "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
+ reg, reg1, reg2);
+ reg >>= 16;
+ reg &= ~(reg1 | reg2);
+ for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
+ if (!test_bit(0, &reg)) {
+ writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
+ /* make sure reg write goes through */
+ wmb();
+
+ d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
+ d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
+
+ dev_err(dpu_dbg_base.dev,
+ "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
+ i, d0, d1);
+ }
+ reg >>= 1;
+ }
+
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+
+ writel_relaxed(0, mem_base + head->disable_bus_addr);
+ writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+ /* make sure that other bus is off */
+ wmb();
+
+ _dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
+ in_log);
+ if (dump_addr)
+ dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+ }
+
+ _dpu_dbg_enable_power(false);
+
+ dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
+}
+
+/**
+ * _dpu_dump_array - dump array of register bases
+ * @name: string indicating origin of dump
+ * @dump_dbgbus_dpu: whether to dump the dpu debug bus
+ * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
+ */
+static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt)
+{
+ if (dump_dbgbus_dpu)
+ _dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
+
+ if (dump_dbgbus_vbif_rt)
+ _dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
+}
+
+/**
+ * _dpu_dump_work - deferred dump work function
+ * @work: work structure
+ */
+static void _dpu_dump_work(struct work_struct *work)
+{
+ _dpu_dump_array("dpudump_workitem",
+ dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
+ dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
+}
+
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt)
+{
+ if (queue_work && work_pending(&dpu_dbg_base.dump_work))
+ return;
+
+ if (!queue_work) {
+ _dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
+ return;
+ }
+
+ /* schedule work to dump later */
+ dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
+ dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
+ dump_dbgbus_vbif_rt;
+ schedule_work(&dpu_dbg_base.dump_work);
+}
+
+/*
+ * dpu_dbg_debugfs_open - debugfs open handler for debug dump
+ * @inode: debugfs inode
+ * @file: file handle
+ */
+static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+/**
+ * dpu_dbg_dump_write - debugfs write handler for debug dump
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t dpu_dbg_dump_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ _dpu_dump_array("dump_debugfs", true, true);
+ return count;
+}
+
+static const struct file_operations dpu_dbg_dump_fops = {
+ .open = dpu_dbg_debugfs_open,
+ .write = dpu_dbg_dump_write,
+};
+
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+ static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+ char debug_name[80] = "";
+
+ if (!debugfs_root)
+ return -EINVAL;
+
+ debugfs_create_file("dump", 0600, debugfs_root, NULL,
+ &dpu_dbg_dump_fops);
+
+ if (dbg->dbgbus_dpu.entries) {
+ dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
+ snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+ dbg->dbgbus_dpu.cmn.name);
+ dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
+ debugfs_create_u32(debug_name, 0600, debugfs_root,
+ &dbg->dbgbus_dpu.cmn.enable_mask);
+ }
+
+ if (dbg->dbgbus_vbif_rt.entries) {
+ dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
+ snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+ dbg->dbgbus_vbif_rt.cmn.name);
+ dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
+ debugfs_create_u32(debug_name, 0600, debugfs_root,
+ &dbg->dbgbus_vbif_rt.cmn.enable_mask);
+ }
+
+ return 0;
+}
+
+static void _dpu_dbg_debugfs_destroy(void)
+{
+}
+
+void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+ static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+
+ memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
+ memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
+
+ if (IS_MSM8998_TARGET(hwversion)) {
+ dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
+ dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
+ dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+ dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+ dbg->dbgbus_vbif_rt.cmn.entries_size =
+ ARRAY_SIZE(vbif_dbg_bus_msm8998);
+ } else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
+ dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
+ dbg->dbgbus_dpu.cmn.entries_size =
+ ARRAY_SIZE(dbg_bus_dpu_sdm845);
+ dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+ /* vbif is unchanged vs 8998 */
+ dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+ dbg->dbgbus_vbif_rt.cmn.entries_size =
+ ARRAY_SIZE(vbif_dbg_bus_msm8998);
+ } else {
+ pr_err("unsupported chipset id %X\n", hwversion);
+ }
+}
+
+int dpu_dbg_init(struct device *dev)
+{
+ if (!dev) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
+ dpu_dbg_base.dev = dev;
+
+ INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
+
+ return 0;
+}
+
+/**
+ * dpu_dbg_destroy - destroy dpu debug facilities
+ */
+void dpu_dbg_destroy(void)
+{
+ _dpu_dbg_debugfs_destroy();
+}
+
+void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+ dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
new file mode 100644
index 000000000000..1e6fa945f98b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DPU_DBG_H_
+#define DPU_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+enum dpu_dbg_dump_flag {
+ DPU_DBG_DUMP_IN_LOG = BIT(0),
+ DPU_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
+ * @hwversion: Chipset revision
+ */
+void dpu_dbg_init_dbg_buses(u32 hwversion);
+
+/**
+ * dpu_dbg_init - initialize global dpu debug facilities: regdump
+ * @dev: device handle
+ * Returns: 0 or -ERROR
+ */
+int dpu_dbg_init(struct device *dev);
+
+/**
+ * dpu_dbg_debugfs_register - register entries at the given debugfs dir
+ * @debugfs_root: debugfs root in which to create dpu debug entries
+ * Returns: 0 or -ERROR
+ */
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
+
+/**
+ * dpu_dbg_destroy - destroy the global dpu debug facilities
+ * Returns: none
+ */
+void dpu_dbg_destroy(void);
+
+/**
+ * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
+ * @queue_work: whether to queue the dumping work to the work_struct
+ * @name: string indicating origin of dump
+ * @dump_dbgbus: dump the dpu debug bus
+ * @dump_vbif_rt: dump the vbif rt bus
+ * Returns: none
+ */
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt);
+
+/**
+ * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
+ * address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void dpu_dbg_set_dpu_top_offset(u32 blk_off);
+
+#else
+
+static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+}
+
+static inline int dpu_dbg_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+ return 0;
+}
+
+static inline void dpu_dbg_destroy(void)
+{
+}
+
+static inline void dpu_dbg_dump(bool queue_work, const char *name,
+ bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
+{
+}
+
+static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+
+#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
new file mode 100644
index 000000000000..1b4de3486ef9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -0,0 +1,2500 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/kthread.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_formats.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_crtc.h"
+#include "dpu_trace.h"
+#include "dpu_core_irq.h"
+
+#define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+ ##__VA_ARGS__)
+
+#define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+ ##__VA_ARGS__)
+
+/*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+ * them at runtime
+ */
+#define NUM_PHYS_ENCODER_TYPES 2
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
+ (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
+
+#define MAX_CHANNELS_PER_ENC 2
+
+#define MISR_BUFF_SIZE 256
+
+#define IDLE_SHORT_TIMEOUT 1
+
+#define MAX_VDISPLAY_SPLIT 1080
+
+/**
+ * enum dpu_enc_rc_events - events for resource control state machine
+ * @DPU_ENC_RC_EVENT_KICKOFF:
+ * This event happens at NORMAL priority.
+ * Event that signals the start of the transfer. When this event is
+ * received, enable MDP/DSI core clocks. Regardless of the previous
+ * state, the resource should be in ON state at the end of this event.
+ * @DPU_ENC_RC_EVENT_FRAME_DONE:
+ * This event happens at INTERRUPT level.
+ * Event signals the end of the data transfer after the PP FRAME_DONE
+ * event. At the end of this event, a delayed work is scheduled to go to
+ * IDLE_PC state after IDLE_TIMEOUT time.
+ * @DPU_ENC_RC_EVENT_PRE_STOP:
+ * This event happens at NORMAL priority.
+ * This event, when received during the ON state, leave the RC STATE
+ * in the PRE_OFF state. It should be followed by the STOP event as
+ * part of encoder disable.
+ * If received during IDLE or OFF states, it will do nothing.
+ * @DPU_ENC_RC_EVENT_STOP:
+ * This event happens at NORMAL priority.
+ * When this event is received, disable all the MDP/DSI core clocks, and
+ * disable IRQs. It should be called from the PRE_OFF or IDLE states.
+ * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
+ * PRE_OFF is expected when PRE_STOP was executed during the ON state.
+ * Resource state should be in OFF at the end of the event.
+ * @DPU_ENC_RC_EVENT_ENTER_IDLE:
+ * This event happens at NORMAL priority from a work item.
+ * Event signals that there were no frame updates for IDLE_TIMEOUT time.
+ * This would disable MDP/DSI core clocks and change the resource state
+ * to IDLE.
+ */
+enum dpu_enc_rc_events {
+ DPU_ENC_RC_EVENT_KICKOFF = 1,
+ DPU_ENC_RC_EVENT_FRAME_DONE,
+ DPU_ENC_RC_EVENT_PRE_STOP,
+ DPU_ENC_RC_EVENT_STOP,
+ DPU_ENC_RC_EVENT_ENTER_IDLE
+};
+
+/*
+ * enum dpu_enc_rc_states - states that the resource control maintains
+ * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
+ * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
+ * @DPU_ENC_RC_STATE_ON: Resource is in ON state
+ * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
+ * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
+ */
+enum dpu_enc_rc_states {
+ DPU_ENC_RC_STATE_OFF,
+ DPU_ENC_RC_STATE_PRE_OFF,
+ DPU_ENC_RC_STATE_ON,
+ DPU_ENC_RC_STATE_IDLE
+};
+
+/**
+ * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
+ * encoders. Virtual encoder manages one "logical" display. Physical
+ * encoders manage one intf block, tied to a specific panel/sub-panel.
+ * Virtual encoder defers as much as possible to the physical encoders.
+ * Virtual encoder registers itself with the DRM Framework as the encoder.
+ * @base: drm_encoder base class for registration with DRM
+ * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @bus_scaling_client: Client handle to the bus scaling interface
+ * @num_phys_encs: Actual number of physical encoders contained.
+ * @phys_encs: Container of physical encoders managed.
+ * @cur_master: Pointer to the current master in this mode. Optimization
+ * Only valid after enable. Cleared as disable.
+ * @hw_pp Handle to the pingpong blocks used for the display. No.
+ * pingpong blocks can be different than num_phys_encs.
+ * @intfs_swapped Whether or not the phys_enc interfaces have been swapped
+ * for partial update right-only cases, such as pingpong
+ * split where virtual pingpong does not generate IRQs
+ * @crtc_vblank_cb: Callback into the upper layer / CRTC for
+ * notification of the VBLANK
+ * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
+ * @crtc_kickoff_cb: Callback into CRTC that will flush & start
+ * all CTL paths
+ * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
+ * @debugfs_root: Debug file system root file node
+ * @enc_lock: Lock around physical encoder create/destroy and
+ access.
+ * @frame_busy_mask: Bitmask tracking which phys_enc we are still
+ * busy processing current command.
+ * Bit0 = phys_encs[0] etc.
+ * @crtc_frame_event_cb: callback handler for frame event
+ * @crtc_frame_event_cb_data: callback handler private data
+ * @frame_done_timeout: frame done timeout in Hz
+ * @frame_done_timer: watchdog timer for frame done event
+ * @vsync_event_timer: vsync timer
+ * @disp_info: local copy of msm_display_info struct
+ * @misr_enable: misr enable/disable status
+ * @misr_frame_count: misr frame count before start capturing the data
+ * @idle_pc_supported: indicate if idle power collaps is supported
+ * @rc_lock: resource control mutex lock to protect
+ * virt encoder over various state changes
+ * @rc_state: resource controller state
+ * @delayed_off_work: delayed worker to schedule disabling of
+ * clks and resources after IDLE_TIMEOUT time.
+ * @vsync_event_work: worker to handle vsync event for autorefresh
+ * @topology: topology of the display
+ * @mode_set_complete: flag to indicate modeset completion
+ * @idle_timeout: idle timeout duration in milliseconds
+ */
+struct dpu_encoder_virt {
+ struct drm_encoder base;
+ spinlock_t enc_spinlock;
+ uint32_t bus_scaling_client;
+
+ uint32_t display_num_of_h_tiles;
+
+ unsigned int num_phys_encs;
+ struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+ struct dpu_encoder_phys *cur_master;
+ struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+
+ bool intfs_swapped;
+
+ void (*crtc_vblank_cb)(void *);
+ void *crtc_vblank_cb_data;
+
+ struct dentry *debugfs_root;
+ struct mutex enc_lock;
+ DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
+ void (*crtc_frame_event_cb)(void *, u32 event);
+ void *crtc_frame_event_cb_data;
+
+ atomic_t frame_done_timeout;
+ struct timer_list frame_done_timer;
+ struct timer_list vsync_event_timer;
+
+ struct msm_display_info disp_info;
+ bool misr_enable;
+ u32 misr_frame_count;
+
+ bool idle_pc_supported;
+ struct mutex rc_lock;
+ enum dpu_enc_rc_states rc_state;
+ struct kthread_delayed_work delayed_off_work;
+ struct kthread_work vsync_event_work;
+ struct msm_display_topology topology;
+ bool mode_set_complete;
+
+ u32 idle_timeout;
+};
+
+#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
+static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
+ bool enable)
+{
+ struct drm_encoder *drm_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu enc\n");
+ return -EINVAL;
+ }
+
+ drm_enc = &dpu_enc->base;
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("drm device invalid\n");
+ return -EINVAL;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ if (!priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ if (enable)
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ else
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
+ DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+ phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_ERROR);
+}
+
+static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
+ int32_t hw_id, struct dpu_encoder_wait_info *info);
+
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx,
+ struct dpu_encoder_wait_info *wait_info)
+{
+ struct dpu_encoder_irq *irq;
+ u32 irq_status;
+ int ret;
+
+ if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ /* note: do master / slave checking outside */
+
+ /* return EWOULDBLOCK since we know the wait isn't necessary */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ return -EWOULDBLOCK;
+ }
+
+ if (irq->irq_idx < 0) {
+ DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->name);
+ return 0;
+ }
+
+ DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+
+ ret = dpu_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ irq->hw_idx,
+ wait_info);
+
+ if (ret <= 0) {
+ irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
+ irq->irq_idx, true);
+ if (irq_status) {
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
+ "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ local_irq_save(flags);
+ irq->cb.func(phys_enc, irq->irq_idx);
+ local_irq_restore(flags);
+ ret = 0;
+ } else {
+ ret = -ETIMEDOUT;
+ DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
+ "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+ } else {
+ ret = 0;
+ trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
+ intr_idx, irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+
+ return ret;
+}
+
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ struct dpu_encoder_irq *irq;
+ int ret = 0;
+
+ if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ if (irq->irq_idx >= 0) {
+ DPU_DEBUG_PHYS(phys_enc,
+ "skipping already registered irq %s type %d\n",
+ irq->name, irq->intr_type);
+ return 0;
+ }
+
+ irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
+ irq->intr_type, irq->hw_idx);
+ if (irq->irq_idx < 0) {
+ DPU_ERROR_PHYS(phys_enc,
+ "failed to lookup IRQ index for %s type:%d\n",
+ irq->name, irq->intr_type);
+ return -EINVAL;
+ }
+
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
+ &irq->cb);
+ if (ret) {
+ DPU_ERROR_PHYS(phys_enc,
+ "failed to register IRQ callback for %s\n",
+ irq->name);
+ irq->irq_idx = -EINVAL;
+ return ret;
+ }
+
+ ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+ if (ret) {
+ DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ irq->irq_idx, &irq->cb);
+ irq->irq_idx = -EINVAL;
+ return ret;
+ }
+
+ trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx);
+
+ return ret;
+}
+
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ struct dpu_encoder_irq *irq;
+ int ret;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ /* silently skip irqs that weren't registered */
+ if (irq->irq_idx < 0) {
+ DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ return 0;
+ }
+
+ ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+ if (ret) {
+ DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret);
+ }
+
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
+ &irq->cb);
+ if (ret) {
+ DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret);
+ }
+
+ trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx);
+
+ irq->irq_idx = -EINVAL;
+
+ return 0;
+}
+
+void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ if (!hw_res || !drm_enc || !conn_state) {
+ DPU_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
+ drm_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ /* Query resources used by phys encs, expected to be without overlap */
+ memset(hw_res, 0, sizeof(*hw_res));
+ hw_res->display_num_of_h_tiles = dpu_enc->display_num_of_h_tiles;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.get_hw_resources)
+ phys->ops.get_hw_resources(phys, hw_res, conn_state);
+ }
+}
+
+static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ mutex_lock(&dpu_enc->enc_lock);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.destroy) {
+ phys->ops.destroy(phys);
+ --dpu_enc->num_phys_encs;
+ dpu_enc->phys_encs[i] = NULL;
+ }
+ }
+
+ if (dpu_enc->num_phys_encs)
+ DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
+ dpu_enc->num_phys_encs);
+ dpu_enc->num_phys_encs = 0;
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ drm_encoder_cleanup(drm_enc);
+ mutex_destroy(&dpu_enc->enc_lock);
+
+ kfree(dpu_enc);
+}
+
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct split_pipe_cfg cfg = { 0 };
+ struct dpu_hw_mdp *hw_mdptop;
+ struct msm_display_info *disp_info;
+
+ if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ hw_mdptop = phys_enc->hw_mdptop;
+ disp_info = &dpu_enc->disp_info;
+
+ if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
+ return;
+
+ /**
+ * disable split modes since encoder will be operating in as the only
+ * encoder, either for the entire use case in the case of, for example,
+ * single DSI, or for this frame in the case of left/right only partial
+ * update.
+ */
+ if (phys_enc->split_role == ENC_ROLE_SOLO) {
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ return;
+ }
+
+ cfg.en = true;
+ cfg.mode = phys_enc->intf_mode;
+ cfg.intf = interface;
+
+ if (cfg.en && phys_enc->ops.needs_single_flush &&
+ phys_enc->ops.needs_single_flush(phys_enc))
+ cfg.split_flush_en = true;
+
+ if (phys_enc->split_role == ENC_ROLE_MASTER) {
+ DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
+
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ }
+}
+
+static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode)
+{
+ struct drm_display_mode *cur_mode;
+
+ if (!connector || !adj_mode)
+ return;
+
+ list_for_each_entry(cur_mode, &connector->modes, head) {
+ if (cur_mode->vdisplay == adj_mode->vdisplay &&
+ cur_mode->hdisplay == adj_mode->hdisplay &&
+ cur_mode->vrefresh == adj_mode->vrefresh) {
+ adj_mode->private = cur_mode->private;
+ adj_mode->private_flags |= cur_mode->private_flags;
+ }
+ }
+}
+
+static struct msm_display_topology dpu_encoder_get_topology(
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct drm_display_mode *mode)
+{
+ struct msm_display_topology topology;
+ int i, intf_count = 0;
+
+ for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+ if (dpu_enc->phys_encs[i])
+ intf_count++;
+
+ /* User split topology for width > 1080 */
+ topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
+ topology.num_enc = 0;
+ topology.num_intf = intf_count;
+
+ return topology;
+}
+static int dpu_encoder_virt_atomic_check(
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ const struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+ struct msm_display_topology topology;
+ int i = 0;
+ int ret = 0;
+
+ if (!drm_enc || !crtc_state || !conn_state) {
+ DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+ drm_enc != 0, crtc_state != 0, conn_state != 0);
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ mode = &crtc_state->mode;
+ adj_mode = &crtc_state->adjusted_mode;
+ trace_dpu_enc_atomic_check(DRMID(drm_enc));
+
+ /*
+ * display drivers may populate private fields of the drm display mode
+ * structure while registering possible modes of a connector with DRM.
+ * These private fields are not populated back while DRM invokes
+ * the mode_set callbacks. This module retrieves and populates the
+ * private fields of the given mode.
+ */
+ _dpu_encoder_adjust_mode(conn_state->connector, adj_mode);
+
+ /* perform atomic check on the first physical encoder (master) */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.atomic_check)
+ ret = phys->ops.atomic_check(phys, crtc_state,
+ conn_state);
+ else if (phys && phys->ops.mode_fixup)
+ if (!phys->ops.mode_fixup(phys, mode, adj_mode))
+ ret = -EINVAL;
+
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc,
+ "mode unsupported, phys idx %d\n", i);
+ break;
+ }
+ }
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+ /* Reserve dynamic resources now. Indicating AtomicTest phase */
+ if (!ret) {
+ /*
+ * Avoid reserving resources when mode set is pending. Topology
+ * info may not be available to complete reservation.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state)
+ && dpu_enc->mode_set_complete) {
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
+ conn_state, topology, true);
+ dpu_enc->mode_set_complete = false;
+ }
+ }
+
+ if (!ret)
+ drm_mode_set_crtcinfo(adj_mode, 0);
+
+ trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
+ adj_mode->private_flags);
+
+ return ret;
+}
+
+static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
+ struct msm_display_info *disp_info)
+{
+ struct dpu_vsync_source_cfg vsync_cfg = { 0 };
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct drm_encoder *drm_enc;
+ int i;
+
+ if (!dpu_enc || !disp_info) {
+ DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
+ dpu_enc != NULL, disp_info != NULL);
+ return;
+ } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
+ DPU_ERROR("invalid num phys enc %d/%d\n",
+ dpu_enc->num_phys_encs,
+ (int) ARRAY_SIZE(dpu_enc->hw_pp));
+ return;
+ }
+
+ drm_enc = &dpu_enc->base;
+ /* this pointers are checked in virt_enable_helper */
+ priv = drm_enc->dev->dev_private;
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ }
+
+ hw_mdptop = dpu_kms->hw_mdp;
+ if (!hw_mdptop) {
+ DPU_ERROR("invalid mdptop\n");
+ return;
+ }
+
+ if (hw_mdptop->ops.setup_vsync_source &&
+ disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
+
+ vsync_cfg.pp_count = dpu_enc->num_phys_encs;
+ if (disp_info->is_te_using_watchdog_timer)
+ vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
+ else
+ vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
+
+ hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
+ }
+}
+
+static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.irq_control)
+ phys->ops.irq_control(phys, enable);
+ }
+
+}
+
+static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+ bool enable)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("encoder master not set\n");
+ return;
+ }
+
+ if (enable) {
+ /* enable DPU core clks */
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* enable all the irq */
+ _dpu_encoder_irq_control(drm_enc, true);
+
+ } else {
+ /* disable all the irq */
+ _dpu_encoder_irq_control(drm_enc, false);
+
+ /* disable DPU core clks */
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ }
+
+}
+
+static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
+ u32 sw_event)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *disp_thread;
+ bool is_vid_mode = false;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
+ !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ is_vid_mode = dpu_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_VID_MODE;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
+ DPU_ERROR("invalid crtc index\n");
+ return -EINVAL;
+ }
+ disp_thread = &priv->disp_thread[drm_enc->crtc->index];
+
+ /*
+ * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
+ * events and return early for other events (ie wb display).
+ */
+ if (!dpu_enc->idle_pc_supported &&
+ (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
+ sw_event != DPU_ENC_RC_EVENT_STOP &&
+ sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
+ return 0;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
+ dpu_enc->rc_state, "begin");
+
+ switch (sw_event) {
+ case DPU_ENC_RC_EVENT_KICKOFF:
+ /* cancel delayed off work, if any */
+ if (kthread_cancel_delayed_work_sync(
+ &dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in ON state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
+ dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
+ _dpu_encoder_irq_control(drm_enc, true);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, true);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "kickoff");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_FRAME_DONE:
+ /*
+ * mutex lock is not used as this event happens at interrupt
+ * context. And locking is not required as, the other events
+ * like KICKOFF and STOP does a wait-for-idle before executing
+ * the resource_control
+ */
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ return -EINVAL;
+ }
+
+ /*
+ * schedule off work item only when there are no
+ * frames pending
+ */
+ if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
+ DRM_DEBUG_KMS("id:%d skip schedule work\n",
+ DRMID(drm_enc));
+ return 0;
+ }
+
+ kthread_queue_delayed_work(
+ &disp_thread->worker,
+ &dpu_enc->delayed_off_work,
+ msecs_to_jiffies(dpu_enc->idle_timeout));
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "frame done");
+ break;
+
+ case DPU_ENC_RC_EVENT_PRE_STOP:
+ /* cancel delayed off work, if any */
+ if (kthread_cancel_delayed_work_sync(
+ &dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (is_vid_mode &&
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ _dpu_encoder_irq_control(drm_enc, true);
+ }
+ /* skip if is already OFF or IDLE, resources are off already */
+ else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "pre stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_STOP:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in OFF state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
+ DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ /**
+ * expect to arrive here only if in either idle state or pre-off
+ * and in IDLE state the resources are already disabled
+ */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_ENTER_IDLE:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ /*
+ * if we are in ON but a frame was just kicked off,
+ * ignore the IDLE event, it's probably a stale timer event
+ */
+ if (dpu_enc->frame_busy_mask[0]) {
+ DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ if (is_vid_mode)
+ _dpu_encoder_irq_control(drm_enc, false);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "idle");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ default:
+ DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
+ sw_event);
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "error");
+ break;
+ }
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "end");
+ return 0;
+}
+
+static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct list_head *connector_list;
+ struct drm_connector *conn = NULL, *conn_iter;
+ struct dpu_rm_hw_iter pp_iter;
+ struct msm_display_topology topology;
+ enum dpu_rm_topology_name topology_name;
+ int i = 0, ret;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ connector_list = &dpu_kms->dev->mode_config.connector_list;
+
+ trace_dpu_enc_mode_set(DRMID(drm_enc));
+
+ list_for_each_entry(conn_iter, connector_list, head)
+ if (conn_iter->encoder == drm_enc)
+ conn = conn_iter;
+
+ if (!conn) {
+ DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
+ return;
+ } else if (!conn->state) {
+ DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
+ return;
+ }
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+ /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
+ conn->state, topology, false);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc,
+ "failed to reserve hw resources, %d\n", ret);
+ return;
+ }
+
+ dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ dpu_enc->hw_pp[i] = NULL;
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
+ break;
+ dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
+ }
+
+ topology_name = dpu_rm_get_topology_name(topology);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys) {
+ if (!dpu_enc->hw_pp[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "invalid pingpong block for the encoder\n");
+ return;
+ }
+ phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->connector = conn->state->connector;
+ phys->topology_name = topology_name;
+ if (phys->ops.mode_set)
+ phys->ops.mode_set(phys, mode, adj_mode);
+ }
+ }
+
+ dpu_enc->mode_set_complete = true;
+}
+
+static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ if (!dpu_enc || !dpu_enc->cur_master) {
+ DPU_ERROR("invalid dpu encoder/master\n");
+ return;
+ }
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
+ dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
+ dpu_enc->cur_master->hw_mdptop);
+
+ if (dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
+ dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
+ dpu_enc->cur_master->hw_mdptop,
+ dpu_kms->catalog);
+
+ _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
+}
+
+void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
+ phys->ops.restore(phys);
+ }
+
+ if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
+ dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+ struct drm_display_mode *cur_mode = NULL;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+
+ trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
+ cur_mode->vdisplay);
+
+ dpu_enc->cur_master = NULL;
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
+ DPU_DEBUG_ENC(dpu_enc, "master is now idx %d\n", i);
+ dpu_enc->cur_master = phys;
+ break;
+ }
+ }
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("virt encoder has no master! num_phys %d\n", i);
+ return;
+ }
+
+ ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
+ ret);
+ return;
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys)
+ continue;
+
+ if (phys != dpu_enc->cur_master) {
+ if (phys->ops.enable)
+ phys->ops.enable(phys);
+ }
+
+ if (dpu_enc->misr_enable && (dpu_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
+ phys->ops.setup_misr(phys, true,
+ dpu_enc->misr_frame_count);
+ }
+
+ if (dpu_enc->cur_master->ops.enable)
+ dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode *mode;
+ int i = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ } else if (!drm_enc->dev) {
+ DPU_ERROR("invalid dev\n");
+ return;
+ } else if (!drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid dev_private\n");
+ return;
+ }
+
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ trace_dpu_enc_disable(DRMID(drm_enc));
+
+ /* wait for idle */
+ dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.disable)
+ phys->ops.disable(phys);
+ }
+
+ /* after phys waits for frame-done, should be no more frames pending */
+ if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
+ del_timer_sync(&dpu_enc->frame_done_timer);
+ }
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i])
+ dpu_enc->phys_encs[i]->connector = NULL;
+ }
+
+ dpu_enc->cur_master = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
+
+ dpu_rm_release(&dpu_kms->rm, drm_enc);
+}
+
+static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
+ enum dpu_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return catalog->intf[i].id;
+ }
+ }
+
+ return INTF_MAX;
+}
+
+static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ unsigned long lock_flags;
+
+ if (!drm_enc || !phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_vblank_callback");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ if (dpu_enc->crtc_vblank_cb)
+ dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ atomic_inc(&phy_enc->vsync_cnt);
+ DPU_ATRACE_END("encoder_vblank_callback");
+}
+
+static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ if (!phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_underrun_callback");
+ atomic_inc(&phy_enc->underrun_cnt);
+ trace_dpu_enc_underrun_cb(DRMID(drm_enc),
+ atomic_read(&phy_enc->underrun_cnt));
+ DPU_ATRACE_END("encoder_underrun_callback");
+}
+
+void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
+ void (*vbl_cb)(void *), void *vbl_data)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+ int i;
+
+ enable = vbl_cb ? true : false;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ dpu_enc->crtc_vblank_cb = vbl_cb;
+ dpu_enc->crtc_vblank_cb_data = vbl_data;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.control_vblank_irq)
+ phys->ops.control_vblank_irq(phys, enable);
+ }
+}
+
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
+ void (*frame_event_cb)(void *, u32 event),
+ void *frame_event_cb_data)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+
+ enable = frame_event_cb ? true : false;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ dpu_enc->crtc_frame_event_cb = frame_event_cb;
+ dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+static void dpu_encoder_frame_done_callback(
+ struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *ready_phys, u32 event)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned int i;
+
+ if (event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ /**
+ * suppress frame_done without waiter,
+ * likely autorefresh
+ */
+ trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
+ event, ready_phys->intf_idx);
+ return;
+ }
+
+ /* One of the physical encoders has become idle */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i] == ready_phys) {
+ clear_bit(i, dpu_enc->frame_busy_mask);
+ trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
+ dpu_enc->frame_busy_mask[0]);
+ }
+ }
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ atomic_set(&dpu_enc->frame_done_timeout, 0);
+ del_timer(&dpu_enc->frame_done_timer);
+
+ dpu_encoder_resource_control(drm_enc,
+ DPU_ENC_RC_EVENT_FRAME_DONE);
+
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data,
+ event);
+ }
+ } else {
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data, event);
+ }
+}
+
+static void dpu_encoder_off_work(struct kthread_work *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, delayed_off_work.work);
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu encoder\n");
+ return;
+ }
+
+ dpu_encoder_resource_control(&dpu_enc->base,
+ DPU_ENC_RC_EVENT_ENTER_IDLE);
+
+ dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
+ DPU_ENCODER_FRAME_EVENT_IDLE);
+}
+
+/**
+ * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
+ * drm_enc: Pointer to drm encoder structure
+ * phys: Pointer to physical encoder structure
+ * extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+ struct dpu_hw_ctl *ctl;
+ int pending_kickoff_cnt;
+ u32 ret = UINT_MAX;
+
+ if (!drm_enc || !phys) {
+ DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
+ drm_enc != 0, phys != 0);
+ return;
+ }
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ ctl = phys->hw_ctl;
+ if (!ctl || !ctl->ops.trigger_flush) {
+ DPU_ERROR("missing trigger cb\n");
+ return;
+ }
+
+ pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+
+ if (extra_flush_bits && ctl->ops.update_pending_flush)
+ ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+ ctl->ops.trigger_flush(ctl);
+
+ if (ctl->ops.get_pending_flush)
+ ret = ctl->ops.get_pending_flush(ctl);
+
+ trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
+ pending_kickoff_cnt, ctl->idx, ret);
+}
+
+/**
+ * _dpu_encoder_trigger_start - trigger start for a physical encoder
+ * phys: Pointer to physical encoder structure
+ */
+static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+{
+ if (!phys) {
+ DPU_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
+ phys->ops.trigger_start(phys);
+}
+
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ ctl = phys_enc->hw_ctl;
+ if (ctl && ctl->ops.trigger_start) {
+ ctl->ops.trigger_start(ctl);
+ trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
+ }
+}
+
+static int dpu_encoder_helper_wait_event_timeout(
+ int32_t drm_id,
+ int32_t hw_id,
+ struct dpu_encoder_wait_info *info)
+{
+ int rc = 0;
+ s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
+ s64 jiffies = msecs_to_jiffies(info->timeout_ms);
+ s64 time;
+
+ do {
+ rc = wait_event_timeout(*(info->wq),
+ atomic_read(info->atomic_cnt) == 0, jiffies);
+ time = ktime_to_ms(ktime_get());
+
+ trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
+ expected_time,
+ atomic_read(info->atomic_cnt));
+ /* If we timed out, counter is valid and time is less, wait again */
+ } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
+ (time < expected_time));
+
+ return rc;
+}
+
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_hw_ctl *ctl;
+ int rc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ ctl = phys_enc->hw_ctl;
+
+ if (!ctl || !ctl->ops.reset)
+ return;
+
+ DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
+ ctl->idx);
+
+ rc = ctl->ops.reset(ctl);
+ if (rc) {
+ DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+/**
+ * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
+ * Iterate through the physical encoders and perform consolidated flush
+ * and/or control start triggering as needed. This is done in the virtual
+ * encoder rather than the individual physical ones in order to handle
+ * use cases that require visibility into multiple physical encoders at
+ * a time.
+ * dpu_enc: Pointer to virtual encoder structure
+ */
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ uint32_t i, pending_flush;
+ unsigned long lock_flags;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ pending_flush = 0x0;
+
+ /* update pending counts and trigger kickoff ctl flush atomically */
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+
+ /* don't perform flush/start operations for slave encoders */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || phys->enable_state == DPU_ENC_DISABLED)
+ continue;
+
+ ctl = phys->hw_ctl;
+ if (!ctl)
+ continue;
+
+ if (phys->split_role != ENC_ROLE_SLAVE)
+ set_bit(i, dpu_enc->frame_busy_mask);
+ if (!phys->ops.needs_single_flush ||
+ !phys->ops.needs_single_flush(phys))
+ _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+ else if (ctl->ops.get_pending_flush)
+ pending_flush |= ctl->ops.get_pending_flush(ctl);
+ }
+
+ /* for split flush, combine pending flush masks and send to master */
+ if (pending_flush && dpu_enc->cur_master) {
+ _dpu_encoder_trigger_flush(
+ &dpu_enc->base,
+ dpu_enc->cur_master,
+ pending_flush);
+ }
+
+ _dpu_encoder_trigger_start(dpu_enc->cur_master);
+
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ unsigned int i;
+ struct dpu_hw_ctl *ctl;
+ struct msm_display_info *disp_info;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ disp_info = &dpu_enc->disp_info;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->hw_ctl) {
+ ctl = phys->hw_ctl;
+ if (ctl->ops.clear_pending_flush)
+ ctl->ops.clear_pending_flush(ctl);
+
+ /* update only for command mode primary ctl */
+ if ((phys == dpu_enc->cur_master) &&
+ (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+ && ctl->ops.trigger_pending)
+ ctl->ops.trigger_pending(ctl);
+ }
+ }
+}
+
+static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
+ struct drm_display_mode *mode)
+{
+ u64 pclk_rate;
+ u32 pclk_period;
+ u32 line_time;
+
+ /*
+ * For linetime calculation, only operate on master encoder.
+ */
+ if (!dpu_enc->cur_master)
+ return 0;
+
+ if (!dpu_enc->cur_master->ops.get_line_count) {
+ DPU_ERROR("get_line_count function not defined\n");
+ return 0;
+ }
+
+ pclk_rate = mode->clock; /* pixel clock in kHz */
+ if (pclk_rate == 0) {
+ DPU_ERROR("pclk is 0, cannot calculate line time\n");
+ return 0;
+ }
+
+ pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
+ if (pclk_period == 0) {
+ DPU_ERROR("pclk period is 0\n");
+ return 0;
+ }
+
+ /*
+ * Line time calculation based on Pixel clock and HTOTAL.
+ * Final unit is in ns.
+ */
+ line_time = (pclk_period * mode->htotal) / 1000;
+ if (line_time == 0) {
+ DPU_ERROR("line time calculation is 0\n");
+ return 0;
+ }
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
+ pclk_rate, pclk_period, line_time);
+
+ return line_time;
+}
+
+static int _dpu_encoder_wakeup_time(struct drm_encoder *drm_enc,
+ ktime_t *wakeup_time)
+{
+ struct drm_display_mode *mode;
+ struct dpu_encoder_virt *dpu_enc;
+ u32 cur_line;
+ u32 line_time;
+ u32 vtotal, time_to_vsync;
+ ktime_t cur_time;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ if (!drm_enc->crtc || !drm_enc->crtc->state) {
+ DPU_ERROR("crtc/crtc state object is NULL\n");
+ return -EINVAL;
+ }
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
+ line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
+ if (!line_time)
+ return -EINVAL;
+
+ cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
+
+ vtotal = mode->vtotal;
+ if (cur_line >= vtotal)
+ time_to_vsync = line_time * vtotal;
+ else
+ time_to_vsync = line_time * (vtotal - cur_line);
+
+ if (time_to_vsync == 0) {
+ DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
+ vtotal);
+ return -EINVAL;
+ }
+
+ cur_time = ktime_get();
+ *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
+ cur_line, vtotal, time_to_vsync,
+ ktime_to_ms(cur_time),
+ ktime_to_ms(*wakeup_time));
+ return 0;
+}
+
+static void dpu_encoder_vsync_event_handler(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ vsync_event_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *event_thread;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private ||
+ !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index\n");
+ return;
+ }
+ event_thread = &priv->event_thread[drm_enc->crtc->index];
+ if (!event_thread) {
+ DPU_ERROR("event_thread not found for crtc:%d\n",
+ drm_enc->crtc->index);
+ return;
+ }
+
+ del_timer(&dpu_enc->vsync_event_timer);
+}
+
+static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, vsync_event_work);
+ ktime_t wakeup_time;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu encoder\n");
+ return;
+ }
+
+ if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time))
+ return;
+
+ trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
+ mod_timer(&dpu_enc->vsync_event_timer,
+ nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+}
+
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ bool needs_hw_reset = false;
+ unsigned int i;
+
+ if (!drm_enc || !params) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
+
+ /* prepare for next kickoff, may include waiting on previous kickoff */
+ DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys) {
+ if (phys->ops.prepare_for_kickoff)
+ phys->ops.prepare_for_kickoff(phys, params);
+ if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+ needs_hw_reset = true;
+ }
+ }
+ DPU_ATRACE_END("enc_prepare_for_kickoff");
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+
+ /* if any phys needs reset, reset all phys, in-order */
+ if (needs_hw_reset) {
+ trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.hw_reset)
+ phys->ops.hw_reset(phys);
+ }
+ }
+}
+
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ ktime_t wakeup_time;
+ unsigned int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DPU_ATRACE_BEGIN("encoder_kickoff");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_kickoff(DRMID(drm_enc));
+
+ atomic_set(&dpu_enc->frame_done_timeout,
+ DPU_FRAME_DONE_TIMEOUT * 1000 /
+ drm_enc->crtc->state->adjusted_mode.vrefresh);
+ mod_timer(&dpu_enc->frame_done_timer, jiffies +
+ ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
+
+ /* All phys encs are ready to go, trigger the kickoff */
+ _dpu_encoder_kickoff_phys(dpu_enc);
+
+ /* allow phys encs to handle any post-kickoff business */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.handle_post_kickoff)
+ phys->ops.handle_post_kickoff(phys);
+ }
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+ !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
+ trace_dpu_enc_early_kickoff(DRMID(drm_enc),
+ ktime_to_ms(wakeup_time));
+ mod_timer(&dpu_enc->vsync_event_timer,
+ nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+ }
+
+ DPU_ATRACE_END("encoder_kickoff");
+}
+
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.prepare_commit)
+ phys->ops.prepare_commit(phys);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_encoder_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ dpu_enc = s->private;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys)
+ continue;
+
+ seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
+ phys->intf_idx - INTF_0,
+ atomic_read(&phys->vsync_cnt),
+ atomic_read(&phys->underrun_cnt));
+
+ switch (phys->intf_mode) {
+ case INTF_MODE_VIDEO:
+ seq_puts(s, "mode: video\n");
+ break;
+ case INTF_MODE_CMD:
+ seq_puts(s, "mode: command\n");
+ break;
+ default:
+ seq_puts(s, "mode: ???\n");
+ break;
+ }
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return 0;
+}
+
+static int _dpu_encoder_debugfs_status_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _dpu_encoder_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_encoder_misr_setup(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i = 0, rc;
+ char buf[MISR_BUFF_SIZE + 1];
+ size_t buff_copy;
+ u32 frame_count, enable;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_enc = file->private_data;
+
+ buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+ if (copy_from_user(buf, user_buf, buff_copy))
+ return -EINVAL;
+
+ buf[buff_copy] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EINVAL;
+
+ rc = _dpu_encoder_power_enable(dpu_enc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ dpu_enc->misr_enable = enable;
+ dpu_enc->misr_frame_count = frame_count;
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || !phys->ops.setup_misr)
+ continue;
+
+ phys->ops.setup_misr(phys, enable, frame_count);
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+ _dpu_encoder_power_enable(dpu_enc, false);
+
+ return count;
+}
+
+static ssize_t _dpu_encoder_misr_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i = 0, len = 0;
+ char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+ int rc;
+
+ if (*ppos)
+ return 0;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_enc = file->private_data;
+
+ rc = _dpu_encoder_power_enable(dpu_enc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ if (!dpu_enc->misr_enable) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "disabled\n");
+ goto buff_check;
+ } else if (dpu_enc->disp_info.capabilities &
+ ~MSM_DISPLAY_CAP_VID_MODE) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "unsupported\n");
+ goto buff_check;
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || !phys->ops.collect_misr)
+ continue;
+
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "Intf idx:%d\n", phys->intf_idx - INTF_0);
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+ phys->ops.collect_misr(phys));
+ }
+
+buff_check:
+ if (count <= len) {
+ len = 0;
+ goto end;
+ }
+
+ if (copy_to_user(user_buff, buf, len)) {
+ len = -EFAULT;
+ goto end;
+ }
+
+ *ppos += len; /* increase offset */
+
+end:
+ mutex_unlock(&dpu_enc->enc_lock);
+ _dpu_encoder_power_enable(dpu_enc, false);
+ return len;
+}
+
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int i;
+
+ static const struct file_operations debugfs_status_fops = {
+ .open = _dpu_encoder_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _dpu_encoder_misr_read,
+ .write = _dpu_encoder_misr_setup,
+ };
+
+ char name[DPU_NAME_SIZE];
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid encoder or kms\n");
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
+
+ /* create overall sub-directory for the encoder */
+ dpu_enc->debugfs_root = debugfs_create_dir(name,
+ drm_enc->dev->primary->debugfs_root);
+ if (!dpu_enc->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_file("status", 0600,
+ dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
+
+ debugfs_create_file("misr_data", 0600,
+ dpu_enc->debugfs_root, dpu_enc, &debugfs_misr_fops);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ if (dpu_enc->phys_encs[i] &&
+ dpu_enc->phys_encs[i]->ops.late_register)
+ dpu_enc->phys_encs[i]->ops.late_register(
+ dpu_enc->phys_encs[i],
+ dpu_enc->debugfs_root);
+
+ return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+
+ if (!drm_enc)
+ return;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ debugfs_remove_recursive(dpu_enc->debugfs_root);
+}
+#else
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+}
+#endif
+
+static int dpu_encoder_late_register(struct drm_encoder *encoder)
+{
+ return _dpu_encoder_init_debugfs(encoder);
+}
+
+static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
+{
+ _dpu_encoder_destroy_debugfs(encoder);
+}
+
+static int dpu_encoder_virt_add_phys_encs(
+ u32 display_caps,
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_enc_phys_init_params *params)
+{
+ struct dpu_encoder_phys *enc = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ /*
+ * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
+ * in this function, check up-front.
+ */
+ if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
+ ARRAY_SIZE(dpu_enc->phys_encs)) {
+ DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
+ dpu_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
+ enc = dpu_encoder_phys_vid_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
+ enc = dpu_encoder_phys_cmd_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
+ return 0;
+}
+
+static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
+ .handle_vblank_virt = dpu_encoder_vblank_callback,
+ .handle_underrun_virt = dpu_encoder_underrun_callback,
+ .handle_frame_done = dpu_encoder_frame_done_callback,
+};
+
+static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct msm_display_info *disp_info,
+ int *drm_enc_mode)
+{
+ int ret = 0;
+ int i = 0;
+ enum dpu_intf_type intf_type;
+ struct dpu_enc_phys_init_params phys_params;
+
+ if (!dpu_enc || !dpu_kms) {
+ DPU_ERROR("invalid arg(s), enc %d kms %d\n",
+ dpu_enc != 0, dpu_kms != 0);
+ return -EINVAL;
+ }
+
+ memset(&phys_params, 0, sizeof(phys_params));
+ phys_params.dpu_kms = dpu_kms;
+ phys_params.parent = &dpu_enc->base;
+ phys_params.parent_ops = &dpu_encoder_parent_ops;
+ phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
+
+ DPU_DEBUG("\n");
+
+ if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
+ *drm_enc_mode = DRM_MODE_ENCODER_DSI;
+ intf_type = INTF_DSI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_HDMI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_DP;
+ } else {
+ DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
+ return -EINVAL;
+ }
+
+ WARN_ON(disp_info->num_of_h_tiles < 1);
+
+ dpu_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
+
+ DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
+
+ if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
+ (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
+ dpu_enc->idle_pc_supported =
+ dpu_kms->catalog->caps->has_idle_pc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+ /*
+ * Left-most tile is at index 0, content is controller id
+ * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+ * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+ */
+ u32 controller_id = disp_info->h_tile_instance[i];
+
+ if (disp_info->num_of_h_tiles > 1) {
+ if (i == 0)
+ phys_params.split_role = ENC_ROLE_MASTER;
+ else
+ phys_params.split_role = ENC_ROLE_SLAVE;
+ } else {
+ phys_params.split_role = ENC_ROLE_SOLO;
+ }
+
+ DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
+ i, controller_id, phys_params.split_role);
+
+ phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
+ intf_type,
+ controller_id);
+ if (phys_params.intf_idx == INTF_MAX) {
+ DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+
+ if (!ret) {
+ ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
+ dpu_enc,
+ &phys_params);
+ if (ret)
+ DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
+ }
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys) {
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
+ }
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return ret;
+}
+
+static void dpu_encoder_frame_done_timeout(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ frame_done_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ struct msm_drm_private *priv;
+ u32 event;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+ priv = drm_enc->dev->dev_private;
+
+ if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
+ DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
+ DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
+ return;
+ } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
+ return;
+ }
+
+ DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+
+ event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
+ dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
+}
+
+static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
+ .mode_set = dpu_encoder_virt_mode_set,
+ .disable = dpu_encoder_virt_disable,
+ .enable = dpu_kms_encoder_enable,
+ .atomic_check = dpu_encoder_virt_atomic_check,
+
+ /* This is called by dpu_kms_encoder_enable */
+ .commit = dpu_encoder_virt_enable,
+};
+
+static const struct drm_encoder_funcs dpu_encoder_funcs = {
+ .destroy = dpu_encoder_destroy,
+ .late_register = dpu_encoder_late_register,
+ .early_unregister = dpu_encoder_early_unregister,
+};
+
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ struct msm_display_info *disp_info)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_encoder *drm_enc = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int drm_enc_mode = DRM_MODE_ENCODER_NONE;
+ int ret = 0;
+
+ dpu_enc = to_dpu_encoder_virt(enc);
+
+ mutex_init(&dpu_enc->enc_lock);
+ ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info,
+ &drm_enc_mode);
+ if (ret)
+ goto fail;
+
+ dpu_enc->cur_master = NULL;
+ spin_lock_init(&dpu_enc->enc_spinlock);
+
+ atomic_set(&dpu_enc->frame_done_timeout, 0);
+ timer_setup(&dpu_enc->frame_done_timer,
+ dpu_encoder_frame_done_timeout, 0);
+
+ if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
+ timer_setup(&dpu_enc->vsync_event_timer,
+ dpu_encoder_vsync_event_handler,
+ 0);
+
+
+ mutex_init(&dpu_enc->rc_lock);
+ kthread_init_delayed_work(&dpu_enc->delayed_off_work,
+ dpu_encoder_off_work);
+ dpu_enc->idle_timeout = IDLE_TIMEOUT;
+
+ kthread_init_work(&dpu_enc->vsync_event_work,
+ dpu_encoder_vsync_event_work_handler);
+
+ memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
+
+ DPU_DEBUG_ENC(dpu_enc, "created\n");
+
+ return ret;
+
+fail:
+ DPU_ERROR("failed to create encoder\n");
+ if (drm_enc)
+ dpu_encoder_destroy(drm_enc);
+
+ return ret;
+
+
+}
+
+struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+ int drm_enc_mode)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int rc = 0;
+
+ dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
+ if (!dpu_enc)
+ return ERR_PTR(ENOMEM);
+
+ rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
+ drm_enc_mode, NULL);
+ if (rc) {
+ devm_kfree(dev->dev, dpu_enc);
+ return ERR_PTR(rc);
+ }
+
+ drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+
+ return &dpu_enc->base;
+}
+
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
+ enum msm_event_wait event)
+{
+ int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ if (!phys)
+ continue;
+
+ switch (event) {
+ case MSM_ENC_COMMIT_DONE:
+ fn_wait = phys->ops.wait_for_commit_done;
+ break;
+ case MSM_ENC_TX_COMPLETE:
+ fn_wait = phys->ops.wait_for_tx_complete;
+ break;
+ case MSM_ENC_VBLANK:
+ fn_wait = phys->ops.wait_for_vblank;
+ break;
+ default:
+ DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
+ event);
+ return -EINVAL;
+ };
+
+ if (fn_wait) {
+ DPU_ATRACE_BEGIN("wait_for_completion_event");
+ ret = fn_wait(phys);
+ DPU_ATRACE_END("wait_for_completion_event");
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i;
+
+ if (!encoder) {
+ DPU_ERROR("invalid encoder\n");
+ return INTF_MODE_NONE;
+ }
+ dpu_enc = to_dpu_encoder_virt(encoder);
+
+ if (dpu_enc->cur_master)
+ return dpu_enc->cur_master->intf_mode;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys)
+ return phys->intf_mode;
+ }
+
+ return INTF_MODE_NONE;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
new file mode 100644
index 000000000000..60f809fc7c13
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_ENCODER_H__
+#define __DPU_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+#include "dpu_hw_mdss.h"
+
+#define DPU_ENCODER_FRAME_EVENT_DONE BIT(0)
+#define DPU_ENCODER_FRAME_EVENT_ERROR BIT(1)
+#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
+#define DPU_ENCODER_FRAME_EVENT_IDLE BIT(3)
+
+#define IDLE_TIMEOUT (66 - 16/2)
+
+/**
+ * Encoder functions and data types
+ * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles: Number of horizontal tiles in case of split
+ * interface
+ * @topology: Topology of the display
+ */
+struct dpu_encoder_hw_resources {
+ enum dpu_intf_mode intfs[INTF_MAX];
+ bool needs_cdm;
+ u32 display_num_of_h_tiles;
+};
+
+/**
+ * dpu_encoder_kickoff_params - info encoder requires at kickoff
+ * @affected_displays: bitmask, bit set means the ROI of the commit lies within
+ * the bounds of the physical display at the bit index
+ */
+struct dpu_encoder_kickoff_params {
+ unsigned long affected_displays;
+};
+
+/**
+ * dpu_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder: encoder pointer
+ * @hw_res: resource table to populate with encoder required resources
+ * @conn_state: report hw reqs based on this proposed connector state
+ */
+void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+
+/**
+ * dpu_encoder_register_vblank_callback - provide callback to encoder that
+ * will be called on the next vblank.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister and disable IRQs
+ * @data: user data provided to callback
+ */
+void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
+ void (*cb)(void *), void *data);
+
+/**
+ * dpu_encoder_register_frame_event_callback - provide callback to encoder that
+ * will be called after the request is complete, or other events.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister
+ * @data: user data provided to callback
+ */
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+ void (*cb)(void *, u32), void *data);
+
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ * path (i.e. ctl flush and start) at next appropriate time.
+ * Immediately: if no previous commit is outstanding.
+ * Delayed: Block until next trigger can be issued.
+ * @encoder: encoder pointer
+ * @params: kickoff time parameters
+ */
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
+ struct dpu_encoder_kickoff_params *params);
+
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ * kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ * (i.e. ctl flush and start) immediately.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_wait_for_event - Waits for encoder events
+ * @encoder: encoder pointer
+ * @event: event to wait for
+ * MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending
+ * frames to hardware at a vblank or ctl_start
+ * Encoders will map this differently depending on the
+ * panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> ctl_start
+ * MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to
+ * the panel. Encoders will map this differently
+ * depending on the panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> pp_done
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+ enum msm_event_wait event);
+
+/*
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_virt_restore - restore the encoder configs
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev: Pointer to drm device structure
+ * @disp_info: Pointer to display information structure
+ * Returns: Pointer to newly created drm encoder
+ */
+struct drm_encoder *dpu_encoder_init(
+ struct drm_device *dev,
+ int drm_enc_mode);
+
+/**
+ * dpu_encoder_setup - setup dpu_encoder for the display probed
+ * @dev: Pointer to drm device structure
+ * @enc: Pointer to the drm_encoder
+ * @disp_info: Pointer to the display info
+ */
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ struct msm_display_info *disp_info);
+
+/**
+ * dpu_encoder_prepare_commit - prepare encoder at the very beginning of an
+ * atomic commit, before any registers are written
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_set_idle_timeout - set the idle timeout for video
+ * and command mode encoders.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @idle_timeout: idle timeout duration in milliseconds
+ */
+void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
+ u32 idle_timeout);
+
+#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
new file mode 100644
index 000000000000..c7df8aad6613
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_ENCODER_PHYS_H__
+#define __DPU_ENCODER_PHYS_H__
+
+#include <linux/jiffies.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_encoder.h"
+
+#define DPU_ENCODER_NAME_MAX 16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS 84
+#define KICKOFF_TIMEOUT_JIFFIES msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum dpu_enc_split_role - Role this physical encoder will play in a
+ * split-panel configuration, where one panel is master, and others slaves.
+ * Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO: This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER: This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE: This encoder is not the master of a split panel config.
+ */
+enum dpu_enc_split_role {
+ ENC_ROLE_SOLO,
+ ENC_ROLE_MASTER,
+ ENC_ROLE_SLAVE,
+};
+
+/**
+ * enum dpu_enc_enable_state - current enabled state of the physical encoder
+ * @DPU_ENC_DISABLING: Encoder transitioning to disable state
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_DISABLED: Encoder is disabled
+ * @DPU_ENC_ENABLING: Encoder transitioning to enabled
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_ENABLED: Encoder is enabled
+ * @DPU_ENC_ERR_NEEDS_HW_RESET: Encoder is enabled, but requires a hw_reset
+ * to recover from a previous error
+ */
+enum dpu_enc_enable_state {
+ DPU_ENC_DISABLING,
+ DPU_ENC_DISABLED,
+ DPU_ENC_ENABLING,
+ DPU_ENC_ENABLED,
+ DPU_ENC_ERR_NEEDS_HW_RESET
+};
+
+struct dpu_encoder_phys;
+
+/**
+ * struct dpu_encoder_virt_ops - Interface the containing virtual encoder
+ * provides for the physical encoders to use to callback.
+ * @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_frame_done: Notify virtual encoder that this phys encoder
+ * completes last request frame.
+ */
+struct dpu_encoder_virt_ops {
+ void (*handle_vblank_virt)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys);
+ void (*handle_underrun_virt)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys);
+ void (*handle_frame_done)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct dpu_encoder_phys_ops - Interface the physical encoders provide to
+ * the containing virtual encoder.
+ * @late_register: DRM Call. Add Userspace interfaces, debugfs.
+ * @prepare_commit: MSM Atomic Call, start of atomic commit sequence
+ * @is_master: Whether this phys_enc is the current master
+ * encoder. Can be switched at enable time. Based
+ * on split_role and current mode (CMD/VID).
+ * @mode_fixup: DRM Call. Fixup a DRM mode.
+ * @mode_set: DRM Call. Set a DRM mode.
+ * This likely caches the mode, for use at enable.
+ * @enable: DRM Call. Enable a DRM mode.
+ * @disable: DRM Call. Disable mode.
+ * @atomic_check: DRM Call. Atomic check new DRM state.
+ * @destroy: DRM Call. Destroy and release resources.
+ * @get_hw_resources: Populate the structure with the hardware
+ * resources that this phys_enc is using.
+ * Expect no overlap between phys_encs.
+ * @control_vblank_irq Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done: Wait for hardware to have flushed the
+ * current pending frames to hardware
+ * @wait_for_tx_complete: Wait for hardware to transfer the pixels
+ * to the panel
+ * @wait_for_vblank: Wait for VBLANK, for sub-driver internal use
+ * @prepare_for_kickoff: Do any work necessary prior to a kickoff
+ * For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff: Do any work necessary post-kickoff work
+ * @trigger_start: Process start event on physical encoder
+ * @needs_single_flush: Whether encoder slaves need to be flushed
+ * @setup_misr: Sets up MISR, enable and disables based on sysfs
+ * @collect_misr: Collects MISR data on frame update
+ * @hw_reset: Issue HW recovery such as CTL reset and clear
+ * DPU_ENC_ERR_NEEDS_HW_RESET state
+ * @irq_control: Handler to enable/disable all the encoder IRQs
+ * @prepare_idle_pc: phys encoder can update the vsync_enable status
+ * on idle power collapse prepare
+ * @restore: Restore all the encoder configs.
+ * @get_line_count: Obtain current vertical line count
+ */
+
+struct dpu_encoder_phys_ops {
+ int (*late_register)(struct dpu_encoder_phys *encoder,
+ struct dentry *debugfs_root);
+ void (*prepare_commit)(struct dpu_encoder_phys *encoder);
+ bool (*is_master)(struct dpu_encoder_phys *encoder);
+ bool (*mode_fixup)(struct dpu_encoder_phys *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*mode_set)(struct dpu_encoder_phys *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*enable)(struct dpu_encoder_phys *encoder);
+ void (*disable)(struct dpu_encoder_phys *encoder);
+ int (*atomic_check)(struct dpu_encoder_phys *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+ void (*destroy)(struct dpu_encoder_phys *encoder);
+ void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+ int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
+ int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
+ void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params);
+ void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
+ void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
+ bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
+
+ void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
+ bool enable, u32 frame_count);
+ u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
+ void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
+ void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+ void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
+ void (*restore)(struct dpu_encoder_phys *phys);
+ int (*get_line_count)(struct dpu_encoder_phys *phys);
+};
+
+/**
+ * enum dpu_intr_idx - dpu encoder interrupt index
+ * @INTR_IDX_VSYNC: Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
+ */
+enum dpu_intr_idx {
+ INTR_IDX_VSYNC,
+ INTR_IDX_PINGPONG,
+ INTR_IDX_UNDERRUN,
+ INTR_IDX_CTL_START,
+ INTR_IDX_RDPTR,
+ INTR_IDX_MAX,
+};
+
+/**
+ * dpu_encoder_irq - tracking structure for interrupts
+ * @name: string name of interrupt
+ * @intr_type: Encoder interrupt type
+ * @intr_idx: Encoder interrupt enumeration
+ * @hw_idx: HW Block ID
+ * @irq_idx: IRQ interface lookup index from DPU IRQ framework
+ * will be -EINVAL if IRQ is not registered
+ * @irq_cb: interrupt callback
+ */
+struct dpu_encoder_irq {
+ const char *name;
+ enum dpu_intr_type intr_type;
+ enum dpu_intr_idx intr_idx;
+ int hw_idx;
+ int irq_idx;
+ struct dpu_irq_callback cb;
+};
+
+/**
+ * struct dpu_encoder_phys - physical encoder that drives a single INTF block
+ * tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ * phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent: Pointer to the containing virtual encoder
+ * @connector: If a mode is set, cached pointer to the active connector
+ * @ops: Operations exposed to the virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop: Hardware interface to the top registers
+ * @hw_ctl: Hardware interface to the ctl registers
+ * @hw_cdm: Hardware interface to the cdm registers
+ * @cdm_cfg: Chroma-down hardware configuration
+ * @hw_pp: Hardware interface to the ping pong registers
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @cached_mode: DRM mode cached at mode_set time, acted on in enable
+ * @enabled: Whether the encoder has enabled and running a mode
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_mode: Interface mode
+ * @intf_idx: Interface index on dpu hardware
+ * @topology_name: topology selected for the display
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state: Enable state tracking
+ * @vblank_refcount: Reference count of vblank request
+ * @vsync_cnt: Vsync count for the physical encoder
+ * @underrun_cnt: Underrun count for the physical encoder
+ * @pending_kickoff_cnt: Atomic counter tracking the number of kickoffs
+ * vs. the number of done/vblank irqs. Should hover
+ * between 0-2 Incremented when a new kickoff is
+ * scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
+ * pending.
+ * @pending_kickoff_wq: Wait queue for blocking until kickoff completes
+ * @irq: IRQ tracking structures
+ */
+struct dpu_encoder_phys {
+ struct drm_encoder *parent;
+ struct drm_connector *connector;
+ struct dpu_encoder_phys_ops ops;
+ const struct dpu_encoder_virt_ops *parent_ops;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_cdm *hw_cdm;
+ struct dpu_hw_cdm_cfg cdm_cfg;
+ struct dpu_hw_pingpong *hw_pp;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode cached_mode;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf_mode intf_mode;
+ enum dpu_intf intf_idx;
+ enum dpu_rm_topology_name topology_name;
+ spinlock_t *enc_spinlock;
+ enum dpu_enc_enable_state enable_state;
+ atomic_t vblank_refcount;
+ atomic_t vsync_cnt;
+ atomic_t underrun_cnt;
+ atomic_t pending_ctlstart_cnt;
+ atomic_t pending_kickoff_cnt;
+ wait_queue_head_t pending_kickoff_wq;
+ struct dpu_encoder_irq irq[INTR_IDX_MAX];
+};
+
+static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
+{
+ atomic_inc_return(&phys->pending_ctlstart_cnt);
+ return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @hw_intf: Hardware interface to the intf registers
+ * @timing_params: Current timing parameter
+ */
+struct dpu_encoder_phys_vid {
+ struct dpu_encoder_phys base;
+ struct dpu_hw_intf *hw_intf;
+ struct intf_timing_params timing_params;
+};
+
+/**
+ * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @intf_idx: Intf Block index used by this phys encoder
+ * @stream_sel: Stream selection for multi-stream interfaces
+ * @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt
+ * after ctl_start instead of before next frame kickoff
+ * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
+ * @pending_vblank_wq: Wait queue for blocking until VBLANK received
+ */
+struct dpu_encoder_phys_cmd {
+ struct dpu_encoder_phys base;
+ int stream_sel;
+ bool serialize_wait4pp;
+ int pp_timeout_report_cnt;
+ atomic_t pending_vblank_cnt;
+ wait_queue_head_t pending_vblank_wq;
+};
+
+/**
+ * struct dpu_enc_phys_init_params - initialization parameters for phys encs
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @parent: Pointer to the containing virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_idx: Interface index this phys_enc will control
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct dpu_enc_phys_init_params {
+ struct dpu_kms *dpu_kms;
+ struct drm_encoder *parent;
+ const struct dpu_encoder_virt_ops *parent_ops;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf intf_idx;
+ spinlock_t *enc_spinlock;
+};
+
+/**
+ * dpu_encoder_wait_info - container for passing arguments to irq wait functions
+ * @wq: wait queue structure
+ * @atomic_cnt: wait until atomic_cnt equals zero
+ * @timeout_ms: timeout value in milliseconds
+ */
+struct dpu_encoder_wait_info {
+ wait_queue_head_t *wq;
+ atomic_t *atomic_cnt;
+ s64 timeout_ms;
+};
+
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_helper_hw_reset - issue ctl hw reset
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl hw reset. If state is currently
+ * DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
+
+static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
+ return BLEND_3D_NONE;
+
+ if (phys_enc->split_role == ENC_ROLE_SOLO &&
+ phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+ return BLEND_3D_H_ROW_INT;
+
+ return BLEND_3D_NONE;
+}
+
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ * This helper function may be used by physical encoders to configure
+ * the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface);
+
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ * timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ * note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @wait_info: wait info struct
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx,
+ struct dpu_encoder_wait_info *wait_info);
+
+/**
+ * dpu_encoder_helper_register_irq - register and enable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_unregister_irq - unregister and disable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
new file mode 100644
index 000000000000..3084675ed425
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_cmd(x) \
+ container_of(x, struct dpu_encoder_phys_cmd, base)
+
+#define PP_TIMEOUT_MAX_TRIALS 10
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
+
+#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
+ struct dpu_encoder_phys_cmd *cmd_enc)
+{
+ return KICKOFF_TIMEOUT_MS;
+}
+
+static inline bool dpu_encoder_phys_cmd_is_master(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool dpu_encoder_phys_cmd_mode_fixup(
+ struct dpu_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+ return true;
+}
+
+static void _dpu_encoder_phys_cmd_update_intf_cfg(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc)
+ return;
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl || !ctl->ops.setup_intf_cfg)
+ return;
+
+ intf_cfg.intf = phys_enc->intf_idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
+ intf_cfg.stream_sel = cmd_enc->stream_sel;
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+}
+
+static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ unsigned long lock_flags;
+ int new_cnt;
+ u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("pp_done_irq");
+ /* notify all synchronous clients first, then asynchronous clients */
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
+ phys_enc, event);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ new_cnt, event);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("pp_done_irq");
+}
+
+static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("rd_ptr_irq");
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
+ wake_up_all(&cmd_enc->pending_vblank_wq);
+ DPU_ATRACE_END("rd_ptr_irq");
+}
+
+static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc || !phys_enc->hw_ctl)
+ return;
+
+ DPU_ATRACE_BEGIN("ctl_start_irq");
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+ /* Signal any waiting ctl start interrupt */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("ctl_start_irq");
+}
+
+static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ if (!phys_enc)
+ return;
+
+ if (phys_enc->parent_ops->handle_underrun_virt)
+ phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_irq *irq;
+
+ irq = &phys_enc->irq[INTR_IDX_CTL_START];
+ irq->hw_idx = phys_enc->hw_ctl->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+ irq->hw_idx = phys_enc->hw_pp->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_RDPTR];
+ irq->hw_idx = phys_enc->hw_pp->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->hw_idx = phys_enc->intf_idx;
+ irq->irq_idx = -EINVAL;
+}
+
+static void dpu_encoder_phys_cmd_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
+ struct dpu_rm_hw_iter iter;
+ int i, instance;
+
+ if (!phys_enc || !mode || !adj_mode) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+ phys_enc->cached_mode = *adj_mode;
+ DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
+ drm_mode_debug_printmodeline(adj_mode);
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (dpu_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+ }
+
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ _dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+}
+
+static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ bool do_log = false;
+
+ if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+ return -EINVAL;
+
+ cmd_enc->pp_timeout_report_cnt++;
+ if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
+ frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
+ do_log = true;
+ } else if (cmd_enc->pp_timeout_report_cnt == 1) {
+ do_log = true;
+ }
+
+ trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt),
+ frame_event);
+
+ /* to avoid flooding, only log first time, and "dead" time */
+ if (do_log) {
+ DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->hw_ctl->idx - CTL_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+ /* request a ctl reset before the next kickoff */
+ phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc, frame_event);
+
+ return -ETIMEDOUT;
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_idle(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
+ &wait_info);
+ if (ret == -ETIMEDOUT)
+ _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
+ else if (!ret)
+ cmd_enc->pp_timeout_report_cnt = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ int refcount;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable ? "true" : "false", refcount);
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_RDPTR);
+
+end:
+ if (ret) {
+ DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0, ret,
+ enable ? "true" : "false", refcount);
+ }
+
+ return ret;
+}
+
+static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_encoder_helper_register_irq(phys_enc,
+ INTR_IDX_CTL_START);
+ } else {
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_CTL_START);
+
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
+ }
+}
+
+static void dpu_encoder_phys_cmd_tearcheck_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_tear_check tc_cfg = { 0 };
+ struct drm_display_mode *mode;
+ bool tc_enable = true;
+ u32 vsync_hz;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ mode = &phys_enc->cached_mode;
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+ !phys_enc->hw_pp->ops.enable_tearcheck) {
+ DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+ return;
+ }
+
+ dpu_kms = phys_enc->dpu_kms;
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ /*
+ * TE default: dsi byte clock calculated base on 70 fps;
+ * around 14 ms to complete a kickoff cycle if te disabled;
+ * vclk_line base on 60 fps; write is faster than read;
+ * init == start == rdptr;
+ *
+ * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+ * frequency divided by the no. of rows (lines) in the LCDpanel.
+ */
+ vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
+ if (vsync_hz <= 0) {
+ DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
+ vsync_hz);
+ return;
+ }
+
+ tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+
+ /* enable external TE after kickoff to avoid premature autorefresh */
+ tc_cfg.hw_vsync_mode = 0;
+
+ /*
+ * By setting sync_cfg_height to near max register value, we essentially
+ * disable dpu hw generated TE signal, since hw TE will arrive first.
+ * Only caveat is if due to error, we hit wrap-around.
+ */
+ tc_cfg.sync_cfg_height = 0xFFF0;
+ tc_cfg.vsync_init_val = mode->vdisplay;
+ tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+ tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+ tc_cfg.start_pos = mode->vdisplay;
+ tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+ mode->vtotal, mode->vrefresh);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+ tc_cfg.rd_ptr_irq);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+ tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+ tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+ phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void _dpu_encoder_phys_cmd_pingpong_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
+ || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+ _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
+ dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool dpu_encoder_phys_cmd_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ /**
+ * we do separate flush for each CTL and let
+ * CTL_START synchronize them
+ */
+ return false;
+}
+
+static void dpu_encoder_phys_cmd_enable_helper(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ u32 flush_mask = 0;
+
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+
+ _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ goto skip_flush;
+
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+ return;
+}
+
+static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid phys encoder\n");
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (phys_enc->enable_state == DPU_ENC_ENABLED) {
+ DPU_ERROR("already enabled\n");
+ return;
+ }
+
+ dpu_encoder_phys_cmd_enable_helper(phys_enc);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+static void _dpu_encoder_phys_cmd_connect_te(
+ struct dpu_encoder_phys *phys_enc, bool enable)
+{
+ if (!phys_enc || !phys_enc->hw_pp ||
+ !phys_enc->hw_pp->ops.connect_external_te)
+ return;
+
+ trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
+ phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+}
+
+static void dpu_encoder_phys_cmd_prepare_idle_pc(
+ struct dpu_encoder_phys *phys_enc)
+{
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+}
+
+static int dpu_encoder_phys_cmd_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_pingpong *hw_pp;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return -EINVAL;
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return -EINVAL;
+
+ hw_pp = phys_enc->hw_pp;
+ if (!hw_pp->ops.get_line_count)
+ return -EINVAL;
+
+ return hw_pp->ops.get_line_count(hw_pp);
+}
+
+static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->enable_state);
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
+ return;
+ }
+
+ if (phys_enc->hw_pp->ops.enable_tearcheck)
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ kfree(cmd_enc);
+}
+
+static void dpu_encoder_phys_cmd_get_hw_resources(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
+ DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "\n");
+ hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+}
+
+static void dpu_encoder_phys_cmd_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
+ /*
+ * Mark kickoff request as outstanding. If there are more than one,
+ * outstanding, then we have to wait for the previous one to complete
+ */
+ ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (ret) {
+ /* force pending_kickoff_cnt 0 to discard failed kickoff */
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
+ DRMID(phys_enc->parent), ret,
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid argument(s)\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+ &wait_info);
+ if (ret == -ETIMEDOUT) {
+ DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
+ ret = -EINVAL;
+ } else if (!ret)
+ ret = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_tx_complete(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (rc) {
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
+ DRMID(phys_enc->parent), rc,
+ phys_enc->intf_idx - INTF_0);
+ }
+
+ return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc = 0;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ /* only required for master controller */
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+
+ /* required for both controllers */
+ if (!rc && cmd_enc->serialize_wait4pp)
+ dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
+
+ return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc = 0;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+ struct dpu_encoder_wait_info wait_info;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ /* only required for master controller */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return rc;
+
+ wait_info.wq = &cmd_enc->pending_vblank_wq;
+ wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
+ wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+
+ atomic_inc(&cmd_enc->pending_vblank_cnt);
+
+ rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
+ &wait_info);
+
+ return rc;
+}
+
+static void dpu_encoder_phys_cmd_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ /**
+ * re-enable external TE, either for the first time after enabling
+ * or if disabled for Autorefresh
+ */
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void dpu_encoder_phys_cmd_trigger_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ dpu_encoder_helper_trigger_start(phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_init_ops(
+ struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_cmd_is_master;
+ ops->mode_set = dpu_encoder_phys_cmd_mode_set;
+ ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
+ ops->enable = dpu_encoder_phys_cmd_enable;
+ ops->disable = dpu_encoder_phys_cmd_disable;
+ ops->destroy = dpu_encoder_phys_cmd_destroy;
+ ops->get_hw_resources = dpu_encoder_phys_cmd_get_hw_resources;
+ ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
+ ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
+ ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
+ ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
+ ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
+ ops->hw_reset = dpu_encoder_helper_hw_reset;
+ ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+ ops->restore = dpu_encoder_phys_cmd_enable_helper;
+ ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
+ ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
+ ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_cmd *cmd_enc = NULL;
+ struct dpu_hw_mdp *hw_mdp;
+ struct dpu_encoder_irq *irq;
+ int i, ret = 0;
+
+ DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+ cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ if (!cmd_enc) {
+ ret = -ENOMEM;
+ DPU_ERROR("failed to allocate\n");
+ goto fail;
+ }
+ phys_enc = &cmd_enc->base;
+
+ hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ DPU_ERROR("failed to get mdptop\n");
+ goto fail_mdp_init;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_CMD;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ cmd_enc->stream_sel = 0;
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+ for (i = 0; i < INTR_IDX_MAX; i++) {
+ irq = &phys_enc->irq[i];
+ INIT_LIST_HEAD(&irq->cb.list);
+ irq->irq_idx = -EINVAL;
+ irq->hw_idx = -EINVAL;
+ irq->cb.arg = phys_enc;
+ }
+
+ irq = &phys_enc->irq[INTR_IDX_CTL_START];
+ irq->name = "ctl_start";
+ irq->intr_type = DPU_IRQ_TYPE_CTL_START;
+ irq->intr_idx = INTR_IDX_CTL_START;
+ irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+ irq->name = "pp_done";
+ irq->intr_type = DPU_IRQ_TYPE_PING_PONG_COMP;
+ irq->intr_idx = INTR_IDX_PINGPONG;
+ irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_RDPTR];
+ irq->name = "pp_rd_ptr";
+ irq->intr_type = DPU_IRQ_TYPE_PING_PONG_RD_PTR;
+ irq->intr_idx = INTR_IDX_RDPTR;
+ irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->name = "underrun";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+ irq->intr_idx = INTR_IDX_UNDERRUN;
+ irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+ atomic_set(&cmd_enc->pending_vblank_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ init_waitqueue_head(&cmd_enc->pending_vblank_wq);
+
+ DPU_DEBUG_CMDENC(cmd_enc, "created\n");
+
+ return phys_enc;
+
+fail_mdp_init:
+ kfree(cmd_enc);
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
new file mode 100644
index 000000000000..14fc7c2a6bb7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -0,0 +1,922 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_vid(x) \
+ container_of(x, struct dpu_encoder_phys_vid, base)
+
+static bool dpu_encoder_phys_vid_is_master(
+ struct dpu_encoder_phys *phys_enc)
+{
+ bool ret = false;
+
+ if (phys_enc->split_role != ENC_ROLE_SLAVE)
+ ret = true;
+
+ return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+ const struct dpu_encoder_phys_vid *vid_enc,
+ const struct drm_display_mode *mode,
+ struct intf_timing_params *timing)
+{
+ memset(timing, 0, sizeof(*timing));
+
+ if ((mode->htotal < mode->hsync_end)
+ || (mode->hsync_start < mode->hdisplay)
+ || (mode->vtotal < mode->vsync_end)
+ || (mode->vsync_start < mode->vdisplay)
+ || (mode->hsync_end < mode->hsync_start)
+ || (mode->vsync_end < mode->vsync_start)) {
+ DPU_ERROR(
+ "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+ mode->hsync_start, mode->hsync_end,
+ mode->htotal, mode->hdisplay);
+ DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+ mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->vdisplay);
+ return;
+ }
+
+ /*
+ * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+ * Active Region Front Porch Sync Back Porch
+ * <-----------------><------------><-----><----------->
+ * <- [hv]display --->
+ * <--------- [hv]sync_start ------>
+ * <----------------- [hv]sync_end ------->
+ * <---------------------------- [hv]total ------------->
+ */
+ timing->width = mode->hdisplay; /* active width */
+ timing->height = mode->vdisplay; /* active height */
+ timing->xres = timing->width;
+ timing->yres = timing->height;
+ timing->h_back_porch = mode->htotal - mode->hsync_end;
+ timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+ timing->v_back_porch = mode->vtotal - mode->vsync_end;
+ timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+ timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+ timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+ timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ timing->border_clr = 0;
+ timing->underflow_clr = 0xff;
+ timing->hsync_skew = mode->hskew;
+
+ /* DSI controller cannot handle active-low sync signals. */
+ if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+ timing->hsync_polarity = 0;
+ timing->vsync_polarity = 0;
+ }
+
+ /*
+ * For edp only:
+ * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+ * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+ */
+ /*
+ * if (vid_enc->hw->cap->type == INTF_EDP) {
+ * display_v_start += mode->htotal - mode->hsync_start;
+ * display_v_end -= mode->hsync_start - mode->hdisplay;
+ * }
+ */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->xres;
+ u32 inactive =
+ timing->h_back_porch + timing->h_front_porch +
+ timing->hsync_pulse_width;
+ return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->yres;
+ u32 inactive =
+ timing->v_back_porch + timing->v_front_porch +
+ timing->vsync_pulse_width;
+ return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ * Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+ struct dpu_encoder_phys_vid *vid_enc,
+ const struct intf_timing_params *timing)
+{
+ u32 worst_case_needed_lines =
+ vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ u32 start_of_frame_lines =
+ timing->v_back_porch + timing->vsync_pulse_width;
+ u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+ u32 actual_vfp_lines = 0;
+
+ /* Fetch must be outside active lines, otherwise undefined. */
+ if (start_of_frame_lines >= worst_case_needed_lines) {
+ DPU_DEBUG_VIDENC(vid_enc,
+ "prog fetch is not needed, large vbp+vsw\n");
+ actual_vfp_lines = 0;
+ } else if (timing->v_front_porch < needed_vfp_lines) {
+ /* Warn fetch needed, but not enough porch in panel config */
+ pr_warn_once
+ ("low vbp+vfp may lead to perf issues in some cases\n");
+ DPU_DEBUG_VIDENC(vid_enc,
+ "less vfp than fetch req, using entire vfp\n");
+ actual_vfp_lines = timing->v_front_porch;
+ } else {
+ DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+ actual_vfp_lines = needed_vfp_lines;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+ timing->v_front_porch, timing->v_back_porch,
+ timing->vsync_pulse_width);
+ DPU_DEBUG_VIDENC(vid_enc,
+ "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+ worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+ return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ * the start of fetch into the vertical front porch for cases where the
+ * vsync pulse width and vertical back porch time is insufficient
+ *
+ * Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ * HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
+ const struct intf_timing_params *timing)
+{
+ struct dpu_encoder_phys_vid *vid_enc =
+ to_dpu_encoder_phys_vid(phys_enc);
+ struct intf_prog_fetch f = { 0 };
+ u32 vfp_fetch_lines = 0;
+ u32 horiz_total = 0;
+ u32 vert_total = 0;
+ u32 vfp_fetch_start_vsync_counter = 0;
+ unsigned long lock_flags;
+
+ if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+ return;
+
+ vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+ if (vfp_fetch_lines) {
+ vert_total = get_vertical_total(timing);
+ horiz_total = get_horizontal_total(timing);
+ vfp_fetch_start_vsync_counter =
+ (vert_total - vfp_fetch_lines) * horiz_total + 1;
+ f.enable = 1;
+ f.fetch_start = vfp_fetch_start_vsync_counter;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+ vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static bool dpu_encoder_phys_vid_mode_fixup(
+ struct dpu_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+
+ /*
+ * Modifying mode has consequences when the mode comes back to us
+ */
+ return true;
+}
+
+static void dpu_encoder_phys_vid_setup_timing_engine(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct drm_display_mode mode;
+ struct intf_timing_params timing_params = { 0 };
+ const struct dpu_format *fmt = NULL;
+ u32 fmt_fourcc = DRM_FORMAT_RGB888;
+ unsigned long lock_flags;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ mode = phys_enc->cached_mode;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+ DPU_ERROR("timing engine setup is not supported\n");
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+ drm_mode_debug_printmodeline(&mode);
+
+ if (phys_enc->split_role != ENC_ROLE_SOLO) {
+ mode.hdisplay >>= 1;
+ mode.htotal >>= 1;
+ mode.hsync_start >>= 1;
+ mode.hsync_end >>= 1;
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "split_role %d, halve horizontal %d %d %d %d\n",
+ phys_enc->split_role,
+ mode.hdisplay, mode.htotal,
+ mode.hsync_start, mode.hsync_end);
+ }
+
+ drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+
+ fmt = dpu_get_dpu_format(fmt_fourcc);
+ DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+ intf_cfg.intf = vid_enc->hw_intf->idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+ &timing_params, fmt);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ programmable_fetch_config(phys_enc, &timing_params);
+
+ vid_enc->timing_params = timing_params;
+}
+
+static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_hw_ctl *hw_ctl;
+ unsigned long lock_flags;
+ u32 flush_register = 0;
+ int new_cnt = -1, old_cnt = -1;
+
+ if (!phys_enc)
+ return;
+
+ hw_ctl = phys_enc->hw_ctl;
+ if (!hw_ctl)
+ return;
+
+ DPU_ATRACE_BEGIN("vblank_irq");
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+ /*
+ * only decrement the pending flush count if we've actually flushed
+ * hardware. due to sw irq latency, vblank may have already happened
+ * so we need to double-check with hw that it accepted the flush bits
+ */
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ if (hw_ctl && hw_ctl->ops.get_flush_register)
+ flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+ if (flush_register == 0)
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+ -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("vblank_irq");
+}
+
+static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ if (!phys_enc)
+ return;
+
+ if (phys_enc->parent_ops->handle_underrun_virt)
+ phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return false;
+
+ if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
+ return true;
+
+ return false;
+}
+
+static bool dpu_encoder_phys_vid_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+}
+
+static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_irq *irq;
+
+ /*
+ * Initialize irq->hw_idx only when irq is not registered.
+ * Prevent invalidating irq->irq_idx as modeset may be
+ * called many times during dfps.
+ */
+
+ irq = &phys_enc->irq[INTR_IDX_VSYNC];
+ if (irq->irq_idx < 0)
+ irq->hw_idx = phys_enc->intf_idx;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ if (irq->irq_idx < 0)
+ irq->hw_idx = phys_enc->intf_idx;
+}
+
+static void dpu_encoder_phys_vid_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_rm *rm;
+ struct dpu_rm_hw_iter iter;
+ int i, instance;
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !phys_enc->dpu_kms) {
+ DPU_ERROR("invalid encoder/kms\n");
+ return;
+ }
+
+ rm = &phys_enc->dpu_kms->rm;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ if (adj_mode) {
+ phys_enc->cached_mode = *adj_mode;
+ drm_mode_debug_printmodeline(adj_mode);
+ DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+ }
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (dpu_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+ }
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ _dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+}
+
+static int dpu_encoder_phys_vid_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ struct dpu_encoder_phys_vid *vid_enc;
+ int refcount;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_KMS("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_VSYNC);
+
+end:
+ if (ret) {
+ DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
+ DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret, enable,
+ refcount);
+ }
+ return ret;
+}
+
+static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct dpu_hw_intf *intf;
+ struct dpu_hw_ctl *ctl;
+ u32 flush_mask = 0;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ DPU_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ intf = vid_enc->hw_intf;
+ ctl = phys_enc->hw_ctl;
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+
+ dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
+
+ /*
+ * For single flush cases (dual-ctl or pp-split), skip setting the
+ * flush bit for the slave intf, since both intfs use same ctl
+ * and HW will only flush the master.
+ */
+ if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
+ !dpu_encoder_phys_vid_is_master(phys_enc))
+ goto skip_flush;
+
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+ DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
+
+ /* ctl_flush & timing engine enable will be triggered by framework */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED)
+ phys_enc->enable_state = DPU_ENC_ENABLING;
+}
+
+static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+ kfree(vid_enc);
+}
+
+static void dpu_encoder_phys_vid_get_hw_resources(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !hw_res) {
+ DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+ phys_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf) {
+ DPU_ERROR("invalid arg(s), hw_intf\n");
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+ hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+}
+
+static int _dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc, bool notify)
+{
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc) {
+ pr_err("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
+ if (notify && phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+ return 0;
+ }
+
+ /* Wait for kickoff to complete */
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
+ &wait_info);
+
+ if (ret == -ETIMEDOUT) {
+ dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
+ } else if (!ret && notify && phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+
+ return ret;
+}
+
+static int dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+}
+
+static void dpu_encoder_phys_vid_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct dpu_hw_ctl *ctl;
+ int rc;
+
+ if (!phys_enc || !params) {
+ DPU_ERROR("invalid encoder/parameters\n");
+ return;
+ }
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl || !ctl->ops.wait_reset_status)
+ return;
+
+ /*
+ * hw supports hardware initiated ctl reset, so before we kickoff a new
+ * frame, need to check and wait for hw initiated ctl reset completion
+ */
+ rc = ctl->ops.wait_reset_status(ctl);
+ if (rc) {
+ DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+ ctl->idx, rc);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+}
+
+static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct dpu_encoder_phys_vid *vid_enc;
+ unsigned long lock_flags;
+ int ret;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ DPU_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR("already disabled\n");
+ return;
+ }
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+ if (dpu_encoder_phys_vid_is_master(phys_enc))
+ dpu_encoder_phys_inc_pending(phys_enc);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ if (dpu_encoder_phys_vid_is_master(phys_enc)) {
+ ret = _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+ DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret);
+ }
+ }
+
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_vid_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long lock_flags;
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
+
+ /*
+ * Video mode must flush CTL before enabling timing engine
+ * Video encoders need to turn on their interfaces now
+ */
+ if (phys_enc->enable_state == DPU_ENC_ENABLING) {
+ trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0);
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+ }
+}
+
+static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ int ret;
+
+ if (!phys_enc)
+ return;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0,
+ enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+ if (ret)
+ return;
+
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ } else {
+ dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ }
+}
+
+static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
+ bool enable, u32 frame_count)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+ vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
+ enable, frame_count);
+}
+
+static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return 0;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
+ vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
+}
+
+static int dpu_encoder_phys_vid_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ return -EINVAL;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+ return -EINVAL;
+
+ return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+}
+
+static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_vid_is_master;
+ ops->mode_set = dpu_encoder_phys_vid_mode_set;
+ ops->mode_fixup = dpu_encoder_phys_vid_mode_fixup;
+ ops->enable = dpu_encoder_phys_vid_enable;
+ ops->disable = dpu_encoder_phys_vid_disable;
+ ops->destroy = dpu_encoder_phys_vid_destroy;
+ ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
+ ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->irq_control = dpu_encoder_phys_vid_irq_control;
+ ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
+ ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
+ ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
+ ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
+ ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
+ ops->hw_reset = dpu_encoder_helper_hw_reset;
+ ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_vid *vid_enc = NULL;
+ struct dpu_rm_hw_iter iter;
+ struct dpu_hw_mdp *hw_mdp;
+ struct dpu_encoder_irq *irq;
+ int i, ret = 0;
+
+ if (!p) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+ if (!vid_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phys_enc = &vid_enc->base;
+
+ hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ DPU_ERROR("failed to get mdptop\n");
+ goto fail;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ /**
+ * hw_intf resource permanently assigned to this encoder
+ * Other resources allocated at atomic commit time by use case
+ */
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
+ while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
+ struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
+
+ if (hw_intf->idx == p->intf_idx) {
+ vid_enc->hw_intf = hw_intf;
+ break;
+ }
+ }
+
+ if (!vid_enc->hw_intf) {
+ ret = -EINVAL;
+ DPU_ERROR("failed to get hw_intf\n");
+ goto fail;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_VIDEO;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ for (i = 0; i < INTR_IDX_MAX; i++) {
+ irq = &phys_enc->irq[i];
+ INIT_LIST_HEAD(&irq->cb.list);
+ irq->irq_idx = -EINVAL;
+ irq->hw_idx = -EINVAL;
+ irq->cb.arg = phys_enc;
+ }
+
+ irq = &phys_enc->irq[INTR_IDX_VSYNC];
+ irq->name = "vsync_irq";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_VSYNC;
+ irq->intr_idx = INTR_IDX_VSYNC;
+ irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->name = "underrun";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+ irq->intr_idx = INTR_IDX_UNDERRUN;
+ irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+
+ DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+
+ return phys_enc;
+
+fail:
+ DPU_ERROR("failed to create encoder\n");
+ if (vid_enc)
+ dpu_encoder_phys_vid_destroy(phys_enc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
new file mode 100644
index 000000000000..bfcd165e96df
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -0,0 +1,1173 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <uapi/drm/drm_fourcc.h>
+
+#include "msm_media_info.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+
+#define DPU_UBWC_META_MACRO_W_H 16
+#define DPU_UBWC_META_BLOCK_SIZE 256
+#define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096
+
+#define DPU_TILE_HEIGHT_DEFAULT 1
+#define DPU_TILE_HEIGHT_TILED 4
+#define DPU_TILE_HEIGHT_UBWC 4
+#define DPU_TILE_HEIGHT_NV12 8
+
+#define DPU_MAX_IMG_WIDTH 0x3FFF
+#define DPU_MAX_IMG_HEIGHT 0x3FFF
+
+/**
+ * DPU supported format packing, bpp, and other format
+ * information.
+ * DPU currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \
+bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \
+alpha, bp, flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
+alpha, chroma, count, bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3)}, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = count, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
+flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PLANAR, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 1, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+/*
+ * struct dpu_media_color_map - maps drm format to media format
+ * @format: DRM base pixel format
+ * @color: Media API color related to DRM format
+ */
+struct dpu_media_color_map {
+ uint32_t format;
+ uint32_t color;
+};
+
+static const struct dpu_format dpu_format_map[] = {
+ INTERLEAVED_RGB_FMT(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ PSEUDO_YUV_FMT(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV16,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV61,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(VYUY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(UYVY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YUYV,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YVYU,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PLANAR_YUV_FMT(YUV420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+
+ PLANAR_YUV_FMT(YVU420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+};
+
+/*
+ * A5x tile formats tables:
+ * These tables hold the A5x tile formats supported.
+ */
+static const struct dpu_format dpu_format_map_tile[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+
+ PSEUDO_YUV_FMT_TILED(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+};
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static const struct dpu_format dpu_format_map_ubwc[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV |
+ DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_p010[] = {
+ PSEUDO_YUV_FMT_LOOSE(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX),
+ DPU_FETCH_LINEAR, 2),
+};
+
+static const struct dpu_format dpu_format_map_p010_ubwc[] = {
+ PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+ DPU_FORMAT_FLAG_COMPRESSED),
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_tp10_ubwc[] = {
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+ DPU_FORMAT_FLAG_COMPRESSED),
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
+ * Note: Not using the drm_format_*_subsampling since we have formats
+ */
+static void _dpu_get_v_h_subsample_rate(
+ enum dpu_chroma_samp_type chroma_sample,
+ uint32_t *v_sample,
+ uint32_t *h_sample)
+{
+ if (!v_sample || !h_sample)
+ return;
+
+ switch (chroma_sample) {
+ case DPU_CHROMA_H2V1:
+ *v_sample = 1;
+ *h_sample = 2;
+ break;
+ case DPU_CHROMA_H1V2:
+ *v_sample = 2;
+ *h_sample = 1;
+ break;
+ case DPU_CHROMA_420:
+ *v_sample = 2;
+ *h_sample = 2;
+ break;
+ default:
+ *v_sample = 1;
+ *h_sample = 1;
+ break;
+ }
+}
+
+static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
+{
+ static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
+ {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
+ };
+ int color_fmt = -1;
+ int i;
+
+ if (fmt->base.pixel_format == DRM_FORMAT_NV12) {
+ if (DPU_FORMAT_IS_DX(fmt)) {
+ if (fmt->unpack_tight)
+ color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
+ else
+ color_fmt = COLOR_FMT_P010_UBWC;
+ } else
+ color_fmt = COLOR_FMT_NV12_UBWC;
+ return color_fmt;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i)
+ if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) {
+ color_fmt = dpu_media_ubwc_map[i].color;
+ break;
+ }
+ return color_fmt;
+}
+
+static int _dpu_format_get_plane_sizes_ubwc(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout)
+{
+ int i;
+ int color;
+ bool meta = DPU_FORMAT_IS_UBWC(fmt);
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ color = _dpu_format_get_media_color_ubwc(fmt);
+ if (color < 0) {
+ DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
+ (char *)&fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ uint32_t y_sclines, uv_sclines;
+ uint32_t y_meta_scanlines = 0;
+ uint32_t uv_meta_scanlines = 0;
+
+ layout->num_planes = 2;
+ layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
+ y_sclines = VENUS_Y_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
+ uv_sclines = VENUS_UV_SCANLINES(color, height);
+ layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
+ uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+ layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
+ uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ } else {
+ uint32_t rgb_scanlines, rgb_meta_scanlines;
+
+ layout->num_planes = 1;
+
+ layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+ }
+
+done:
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int _dpu_format_get_plane_sizes_linear(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ int i;
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ /* Due to memset above, only need to set planes of interest */
+ if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) {
+ layout->num_planes = 1;
+ layout->plane_size[0] = width * height * layout->format->bpp;
+ layout->plane_pitch[0] = width * layout->format->bpp;
+ } else {
+ uint32_t v_subsample, h_subsample;
+ uint32_t chroma_samp;
+ uint32_t bpp = 1;
+
+ chroma_samp = fmt->chroma_sample;
+ _dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample,
+ &h_subsample);
+
+ if (width % h_subsample || height % v_subsample) {
+ DRM_ERROR("mismatch in subsample vs dimensions\n");
+ return -EINVAL;
+ }
+
+ if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
+ (DPU_FORMAT_IS_DX(fmt)))
+ bpp = 2;
+ layout->plane_pitch[0] = width * bpp;
+ layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
+ layout->plane_size[0] = layout->plane_pitch[0] * height;
+ layout->plane_size[1] = layout->plane_pitch[1] *
+ (height / v_subsample);
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ layout->num_planes = 2;
+ layout->plane_size[1] *= 2;
+ layout->plane_pitch[1] *= 2;
+ } else {
+ /* planar */
+ layout->num_planes = 3;
+ layout->plane_size[2] = layout->plane_size[1];
+ layout->plane_pitch[2] = layout->plane_pitch[1];
+ }
+ }
+
+ /*
+ * linear format: allow user allocated pitches if they are greater than
+ * the requirement.
+ * ubwc format: pitch values are computed uniformly across
+ * all the components based on ubwc specifications.
+ */
+ for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) {
+ if (pitches && layout->plane_pitch[i] < pitches[i])
+ layout->plane_pitch[i] = pitches[i];
+ }
+
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int dpu_format_get_plane_sizes(
+ const struct dpu_format *fmt,
+ const uint32_t w,
+ const uint32_t h,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ if (!layout || !fmt) {
+ DRM_ERROR("invalid pointer\n");
+ return -EINVAL;
+ }
+
+ if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt))
+ return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+
+ return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
+}
+
+static int _dpu_format_populate_addrs_ubwc(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t base_addr = 0;
+ bool meta;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid pointers\n");
+ return -EINVAL;
+ }
+
+ if (aspace)
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
+ if (!base_addr) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+
+ meta = DPU_FORMAT_IS_UBWC(layout->format);
+
+ /* Per-format logic for verifying active planes */
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | Y meta | ** | Y bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Y bitstream | ** | CbCr bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Cbcr metadata | ** | Y meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | CbCr bitstream | ** | CbCr meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /************************************************/
+
+ /* configure Y bitstream plane */
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+
+ /* configure CbCr bitstream plane */
+ layout->plane_addr[1] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2] + layout->plane_size[3];
+
+ if (!meta)
+ goto done;
+
+ /* configure Y metadata plane */
+ layout->plane_addr[2] = base_addr;
+
+ /* configure CbCr metadata plane */
+ layout->plane_addr[3] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2];
+
+ } else {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | RGB meta | ** | RGB bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | RGB bitstream | ** | NONE | */
+ /* | data | ** | | */
+ /* ------------------- ** -------------------- */
+ /* ** | RGB meta | */
+ /* ** | plane | */
+ /* ** -------------------- */
+ /************************************************/
+
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+ layout->plane_addr[1] = 0;
+
+ if (!meta)
+ goto done;
+
+ layout->plane_addr[2] = base_addr;
+ layout->plane_addr[3] = 0;
+ }
+done:
+ return 0;
+}
+
+static int _dpu_format_populate_addrs_linear(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ unsigned int i;
+
+ /* Can now check the pitches given vs pitches expected */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (layout->plane_pitch[i] > fb->pitches[i]) {
+ DRM_ERROR("plane %u expected pitch %u, fb %u\n",
+ i, layout->plane_pitch[i], fb->pitches[i]);
+ return -EINVAL;
+ }
+ }
+
+ /* Populate addresses for simple formats here */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (aspace)
+ layout->plane_addr[i] =
+ msm_framebuffer_iova(fb, aspace, i);
+ if (!layout->plane_addr[i]) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ int i, ret;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ if ((fb->width > DPU_MAX_IMG_WIDTH) ||
+ (fb->height > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ layout->format = to_dpu_format(msm_framebuffer_format(fb));
+
+ /* Populate the plane sizes etc via get_format */
+ ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
+ layout, fb->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DPU_MAX_PLANES; ++i)
+ plane_addr[i] = layout->plane_addr[i];
+
+ /* Populate the addresses given the fb */
+ if (DPU_FORMAT_IS_UBWC(layout->format) ||
+ DPU_FORMAT_IS_TILE(layout->format))
+ ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+ else
+ ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
+
+ /* check if anything changed */
+ if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos)
+{
+ int ret, i, num_base_fmt_planes;
+ const struct dpu_format *fmt;
+ struct dpu_hw_fmt_layout layout;
+ uint32_t bos_total_size = 0;
+
+ if (!msm_fmt || !cmd || !bos) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ fmt = to_dpu_format(msm_fmt);
+ num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+
+ ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+ &layout, cmd->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_base_fmt_planes; i++) {
+ if (!bos[i]) {
+ DRM_ERROR("invalid handle for plane %d\n", i);
+ return -EINVAL;
+ }
+ if ((i == 0) || (bos[i] != bos[0]))
+ bos_total_size += bos[i]->size;
+ }
+
+ if (bos_total_size < layout.total_size) {
+ DRM_ERROR("buffers total size too small %u expected %u\n",
+ bos_total_size, layout.total_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier)
+{
+ uint32_t i = 0;
+ const struct dpu_format *fmt = NULL;
+ const struct dpu_format *map = NULL;
+ ssize_t map_size = 0;
+
+ /*
+ * Currently only support exactly zero or one modifier.
+ * All planes use the same modifier.
+ */
+ DPU_DEBUG("plane format modifier 0x%llX\n", modifier);
+
+ switch (modifier) {
+ case 0:
+ map = dpu_format_map;
+ map_size = ARRAY_SIZE(dpu_format_map);
+ break;
+ case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+ map = dpu_format_map_ubwc;
+ map_size = ARRAY_SIZE(dpu_format_map_ubwc);
+ DPU_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+ (char *)&format);
+ break;
+ default:
+ DPU_ERROR("unsupported format modifier %llX\n", modifier);
+ return NULL;
+ }
+
+ for (i = 0; i < map_size; i++) {
+ if (format == map[i].base.pixel_format) {
+ fmt = &map[i];
+ break;
+ }
+ }
+
+ if (fmt == NULL)
+ DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
+ (char *)&format, modifier);
+ else
+ DPU_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+ (char *)&format, modifier,
+ DPU_FORMAT_IS_UBWC(fmt),
+ DPU_FORMAT_IS_YUV(fmt));
+
+ return fmt;
+}
+
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers)
+{
+ const struct dpu_format *fmt = dpu_get_dpu_format_ext(format,
+ modifiers);
+ if (fmt)
+ return &fmt->base;
+ return NULL;
+}
+
+uint32_t dpu_populate_formats(
+ const struct dpu_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max)
+{
+ uint32_t i, fourcc_format;
+
+ if (!format_list || !pixel_formats)
+ return 0;
+
+ for (i = 0, fourcc_format = 0;
+ format_list->fourcc_format && i < pixel_formats_max;
+ ++format_list) {
+ /* verify if listed format is in dpu_format_map? */
+
+ /* optionally return modified formats */
+ if (pixel_modifiers) {
+ /* assume same modifier for all fb planes */
+ pixel_formats[i] = format_list->fourcc_format;
+ pixel_modifiers[i++] = format_list->modifier;
+ } else {
+ /* assume base formats grouped together */
+ if (fourcc_format != format_list->fourcc_format) {
+ fourcc_format = format_list->fourcc_format;
+ pixel_formats[i++] = fourcc_format;
+ }
+ }
+ }
+
+ return i;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
new file mode 100644
index 000000000000..a54451d8d011
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_FORMATS_H
+#define _DPU_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * dpu_get_dpu_format_ext() - Returns dpu format structure pointer.
+ * @format: DRM FourCC Code
+ * @modifiers: format modifier array from client, one per plane
+ */
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier);
+
+#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
+
+/**
+ * dpu_get_msm_format - get an dpu_format by its msm_format base
+ * callback function registers with the msm_kms layer
+ * @kms: kms driver
+ * @format: DRM FourCC Code
+ * @modifiers: data layout modifier
+ */
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers);
+
+/**
+ * dpu_populate_formats - populate the given array with fourcc codes supported
+ * @format_list: pointer to list of possible formats
+ * @pixel_formats: array to populate with fourcc codes
+ * @pixel_modifiers: array to populate with drm modifiers, can be NULL
+ * @pixel_formats_max: length of pixel formats array
+ * Return: number of elements populated
+ */
+uint32_t dpu_populate_formats(
+ const struct dpu_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max);
+
+/**
+ * dpu_format_check_modified_format - validate format and buffers for
+ * dpu non-standard, i.e. modified format
+ * @kms: kms driver
+ * @msm_fmt: pointer to the msm_fmt base pointer of an dpu_format
+ * @cmd: fb_cmd2 structure user request
+ * @bos: gem buffer object list
+ *
+ * Return: error code on failure, 0 on success
+ */
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
+
+/**
+ * dpu_format_populate_layout - populate the given format layout based on
+ * mmu, fb, and format found in the fb
+ * @aspace: address space pointer
+ * @fb: framebuffer pointer
+ * @fmtl: format layout structure to populate
+ *
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ * are the same as before or 0 if new addresses were populated
+ */
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *fmtl);
+
+#endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
new file mode 100644
index 000000000000..58d29e43faef
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -0,0 +1,155 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_blk.h"
+
+/* Serialization lock for dpu_hw_blk_list */
+static DEFINE_MUTEX(dpu_hw_blk_lock);
+
+/* List of all hw block objects */
+static LIST_HEAD(dpu_hw_blk_list);
+
+/**
+ * dpu_hw_blk_init - initialize hw block object
+ * @type: hw block type - enum dpu_hw_blk_type
+ * @id: instance id of the hw block
+ * @ops: Pointer to block operations
+ * return: 0 if success; error code otherwise
+ */
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+ struct dpu_hw_blk_ops *ops)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&hw_blk->list);
+ hw_blk->type = type;
+ hw_blk->id = id;
+ atomic_set(&hw_blk->refcount, 0);
+
+ if (ops)
+ hw_blk->ops = *ops;
+
+ mutex_lock(&dpu_hw_blk_lock);
+ list_add(&hw_blk->list, &dpu_hw_blk_list);
+ mutex_unlock(&dpu_hw_blk_lock);
+
+ return 0;
+}
+
+/**
+ * dpu_hw_blk_destroy - destroy hw block object.
+ * @hw_blk: pointer to hw block object
+ * return: none
+ */
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ if (atomic_read(&hw_blk->refcount))
+ pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
+ hw_blk->id);
+
+ mutex_lock(&dpu_hw_blk_lock);
+ list_del(&hw_blk->list);
+ mutex_unlock(&dpu_hw_blk_lock);
+}
+
+/**
+ * dpu_hw_blk_get - get hw_blk from free pool
+ * @hw_blk: if specified, increment reference count only
+ * @type: if hw_blk is not specified, allocate the next available of this type
+ * @id: if specified (>= 0), allocate the given instance of the above type
+ * return: pointer to hw block object
+ */
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id)
+{
+ struct dpu_hw_blk *curr;
+ int rc, refcount;
+
+ if (!hw_blk) {
+ mutex_lock(&dpu_hw_blk_lock);
+ list_for_each_entry(curr, &dpu_hw_blk_list, list) {
+ if ((curr->type != type) ||
+ (id >= 0 && curr->id != id) ||
+ (id < 0 &&
+ atomic_read(&curr->refcount)))
+ continue;
+
+ hw_blk = curr;
+ break;
+ }
+ mutex_unlock(&dpu_hw_blk_lock);
+ }
+
+ if (!hw_blk) {
+ pr_debug("no hw_blk:%d\n", type);
+ return NULL;
+ }
+
+ refcount = atomic_inc_return(&hw_blk->refcount);
+
+ if (refcount == 1 && hw_blk->ops.start) {
+ rc = hw_blk->ops.start(hw_blk);
+ if (rc) {
+ pr_err("failed to start hw_blk:%d rc:%d\n", type, rc);
+ goto error_start;
+ }
+ }
+
+ pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
+ hw_blk->id, refcount);
+ return hw_blk;
+
+error_start:
+ dpu_hw_blk_put(hw_blk);
+ return ERR_PTR(rc);
+}
+
+/**
+ * dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
+ * @hw_blk: hw block to be freed
+ * @free_blk: function to be called when reference count goes to zero
+ */
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
+ atomic_read(&hw_blk->refcount));
+
+ if (!atomic_read(&hw_blk->refcount)) {
+ pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
+ return;
+ }
+
+ if (atomic_dec_return(&hw_blk->refcount))
+ return;
+
+ if (hw_blk->ops.stop)
+ hw_blk->ops.stop(hw_blk);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
new file mode 100644
index 000000000000..0f4ca8af1ec5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_BLK_H
+#define _DPU_HW_BLK_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+
+struct dpu_hw_blk;
+
+/**
+ * struct dpu_hw_blk_ops - common hardware block operations
+ * @start: start operation on first get
+ * @stop: stop operation on last put
+ */
+struct dpu_hw_blk_ops {
+ int (*start)(struct dpu_hw_blk *);
+ void (*stop)(struct dpu_hw_blk *);
+};
+
+/**
+ * struct dpu_hw_blk - definition of hardware block object
+ * @list: list of hardware blocks
+ * @type: hardware block type
+ * @id: instance id
+ * @refcount: reference/usage count
+ */
+struct dpu_hw_blk {
+ struct list_head list;
+ u32 type;
+ int id;
+ atomic_t refcount;
+ struct dpu_hw_blk_ops ops;
+};
+
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+ struct dpu_hw_blk_ops *ops);
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
+
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id);
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk);
+#endif /*_DPU_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
new file mode 100644
index 000000000000..44ee06398b1d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_kms.h"
+
+#define VIG_SDM845_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+ BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define DMA_SDM845_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+ BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define MIXER_SDM845_MASK \
+ (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+
+#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
+
+#define PINGPONG_SDM845_SPLIT_MASK \
+ (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+
+#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
+#define DEFAULT_DPU_LINE_WIDTH 2048
+#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
+
+#define MAX_HORZ_DECIMATION 4
+#define MAX_VERT_DECIMATION 4
+
+#define MAX_UPSCALE_RATIO 20
+#define MAX_DOWNSCALE_RATIO 4
+#define SSPP_UNITY_SCALE 1
+
+#define STRCAT(X, Y) (X Y)
+
+/*************************************************************
+ * DPU sub blocks config
+ *************************************************************/
+/* DPU top level caps */
+static const struct dpu_caps sdm845_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_20,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+};
+
+static struct dpu_mdp_cfg sdm845_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x45C,
+ .features = 0,
+ .highest_bank_bit = 0x2,
+ .has_dest_scaler = true,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x2B4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x2BC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0x2C4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2BC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ },
+};
+
+/*************************************************************
+ * CTL sub blocks config
+ *************************************************************/
+static struct dpu_ctl_cfg sdm845_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0xE4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0xE4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0xE4,
+ .features = 0
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0xE4,
+ .features = 0
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0xE4,
+ .features = 0
+ },
+};
+
+/*************************************************************
+ * SSPP sub blocks config
+ *************************************************************/
+
+/* SSPP common configuration */
+static const struct dpu_sspp_blks_common sdm845_sspp_common = {
+ .maxlinewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .maxhdeciexp = MAX_HORZ_DECIMATION,
+ .maxvdeciexp = MAX_VERT_DECIMATION,
+};
+
+#define _VIG_SBLK(num, sdma_pri) \
+ { \
+ .common = &sdm845_sspp_common, \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+ .id = DPU_SSPP_SCALER_QSEED3, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = STRCAT("sspp_csc", num), \
+ .id = DPU_SSPP_CSC_10BIT, \
+ .base = 0x1a00, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .virt_format_list = plane_formats, \
+ }
+
+#define _DMA_SBLK(num, sdma_pri) \
+ { \
+ .common = &sdm845_sspp_common, \
+ .maxdwnscale = SSPP_UNITY_SCALE, \
+ .maxupscale = SSPP_UNITY_SCALE, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .format_list = plane_formats, \
+ .virt_format_list = plane_formats, \
+ }
+
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
+
+#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1c8, \
+ .features = VIG_SDM845_MASK, \
+ .sblk = &_sblk, \
+ .xin_id = _xinid, \
+ .type = SSPP_TYPE_VIG, \
+ .clk_ctrl = _clkctrl \
+ }
+
+#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1c8, \
+ .features = DMA_SDM845_MASK, \
+ .sblk = &_sblk, \
+ .xin_id = _xinid, \
+ .type = SSPP_TYPE_DMA, \
+ .clk_ctrl = _clkctrl \
+ }
+
+static struct dpu_sspp_cfg sdm845_sspp[] = {
+ SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
+ sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
+ SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
+ sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
+ SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
+ sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
+ SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
+ sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
+ SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
+ sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
+ SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
+ sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
+ SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
+ sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
+ SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
+ sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
+};
+
+/*************************************************************
+ * MIXER sub blocks config
+ *************************************************************/
+static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 11, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
+ 0xb0, 0xc8, 0xe0, 0xf8, 0x110
+ },
+};
+
+#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x320, \
+ .features = MIXER_SDM845_MASK, \
+ .sblk = &sdm845_lm_sblk, \
+ .ds = _ds, \
+ .pingpong = _pp, \
+ .lm_pair_mask = (1 << _lmpair) \
+ }
+
+static struct dpu_lm_cfg sdm845_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
+ LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
+ LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
+ LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
+};
+
+/*************************************************************
+ * DS sub blocks config
+ *************************************************************/
+static const struct dpu_ds_top_cfg sdm845_ds_top = {
+ .name = "ds_top_0", .id = DS_TOP,
+ .base = 0x60000, .len = 0xc,
+ .maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
+ .maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxupscale = MAX_UPSCALE_RATIO,
+};
+
+#define DS_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x800, \
+ .features = DPU_SSPP_SCALER_QSEED3, \
+ .top = &sdm845_ds_top \
+ }
+
+static struct dpu_ds_cfg sdm845_ds[] = {
+ DS_BLK("ds_0", DS_0, 0x800),
+ DS_BLK("ds_1", DS_1, 0x1000),
+};
+
+/*************************************************************
+ * PINGPONG sub blocks config
+ *************************************************************/
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
+ .te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ .version = 0x1},
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+#define PP_BLK_TE(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0xd4, \
+ .features = PINGPONG_SDM845_SPLIT_MASK, \
+ .sblk = &sdm845_pp_sblk_te \
+ }
+#define PP_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0xd4, \
+ .features = PINGPONG_SDM845_MASK, \
+ .sblk = &sdm845_pp_sblk \
+ }
+
+static struct dpu_pingpong_cfg sdm845_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
+};
+
+/*************************************************************
+ * INTF sub blocks config
+ *************************************************************/
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x280, \
+ .type = _type, \
+ .controller_id = _ctrl_id, \
+ .prog_fetch_lines_worst_case = 24 \
+ }
+
+static struct dpu_intf_cfg sdm845_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
+};
+
+/*************************************************************
+ * CDM sub blocks config
+ *************************************************************/
+static struct dpu_cdm_cfg sdm845_cdm[] = {
+ {
+ .name = "cdm_0", .id = CDM_0,
+ .base = 0x79200, .len = 0x224,
+ .features = 0,
+ .intf_connect = BIT(INTF_3),
+ },
+};
+
+/*************************************************************
+ * VBIF sub blocks config
+ *************************************************************/
+/* VBIF QOS remap */
+static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+
+static struct dpu_vbif_cfg sdm845_vbif[] = {
+ {
+ .name = "vbif_0", .id = VBIF_0,
+ .base = 0, .len = 0x1040,
+ .features = BIT(DPU_VBIF_QOS_REMAP),
+ .xin_halt_timeout = 0x4000,
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+ .priority_lvl = sdm845_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+ .priority_lvl = sdm845_nrt_pri_lvl,
+ },
+ .memtype_count = 14,
+ .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+ },
+};
+
+static struct dpu_reg_dma_cfg sdm845_regdma = {
+ .base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
+};
+
+/*************************************************************
+ * PERF data config
+ *************************************************************/
+
+/* SSPP QOS LUTs */
+static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+ {.fl = 4, .lut = 0x357},
+ {.fl = 5, .lut = 0x3357},
+ {.fl = 6, .lut = 0x23357},
+ {.fl = 7, .lut = 0x223357},
+ {.fl = 8, .lut = 0x2223357},
+ {.fl = 9, .lut = 0x22223357},
+ {.fl = 10, .lut = 0x222223357},
+ {.fl = 11, .lut = 0x2222223357},
+ {.fl = 12, .lut = 0x22222223357},
+ {.fl = 13, .lut = 0x222222223357},
+ {.fl = 14, .lut = 0x1222222223357},
+ {.fl = 0, .lut = 0x11222222223357}
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+ {.fl = 10, .lut = 0x344556677},
+ {.fl = 11, .lut = 0x3344556677},
+ {.fl = 12, .lut = 0x23344556677},
+ {.fl = 13, .lut = 0x223344556677},
+ {.fl = 14, .lut = 0x1223344556677},
+ {.fl = 0, .lut = 0x112233344556677},
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static struct dpu_perf_cfg sdm845_perf_data = {
+ .max_bw_low = 6800000,
+ .max_bw_high = 6800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .core_ib_ff = "6.0",
+ .core_clk_ff = "1.0",
+ .comp_ratio_rt =
+ "NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23",
+ .comp_ratio_nrt =
+ "NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25",
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sdm845_qos_linear),
+ .entries = sdm845_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_macrotile),
+ .entries = sdm845_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_nrt),
+ .entries = sdm845_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+};
+
+/*************************************************************
+ * Hardware catalog init
+ *************************************************************/
+
+/*
+ * sdm845_cfg_init(): populate sdm845 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+ *dpu_cfg = (struct dpu_mdss_cfg){
+ .caps = &sdm845_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sdm845_mdp),
+ .mdp = sdm845_mdp,
+ .ctl_count = ARRAY_SIZE(sdm845_ctl),
+ .ctl = sdm845_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sdm845_lm),
+ .mixer = sdm845_lm,
+ .ds_count = ARRAY_SIZE(sdm845_ds),
+ .ds = sdm845_ds,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .cdm_count = ARRAY_SIZE(sdm845_cdm),
+ .cdm = sdm845_cdm,
+ .intf_count = ARRAY_SIZE(sdm845_intf),
+ .intf = sdm845_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = sdm845_regdma,
+ .perf = sdm845_perf_data,
+ };
+}
+
+static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+ { .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
+ { .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+};
+
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
+{
+ kfree(dpu_cfg);
+}
+
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
+{
+ int i;
+ struct dpu_mdss_cfg *dpu_cfg;
+
+ dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
+ if (!dpu_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+ if (cfg_handler[i].hw_rev == hw_rev) {
+ cfg_handler[i].cfg_init(dpu_cfg);
+ dpu_cfg->hwversion = hw_rev;
+ return dpu_cfg;
+ }
+ }
+
+ DPU_ERROR("unsupported chipset id:%X\n", hw_rev);
+ dpu_hw_catalog_deinit(dpu_cfg);
+ return ERR_PTR(-ENODEV);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
new file mode 100644
index 000000000000..f0cb0d4fc80e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -0,0 +1,804 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CATALOG_H
+#define _DPU_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <drm/drmP.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS 12
+
+#define DPU_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28) |\
+ ((MINOR & 0xFFF) << 16) |\
+ (STEP & 0xFFFF))
+
+#define DPU_HW_MAJOR(rev) ((rev) >> 28)
+#define DPU_HW_MINOR(rev) (((rev) >> 16) & 0xFFF)
+#define DPU_HW_STEP(rev) ((rev) & 0xFFFF)
+#define DPU_HW_MAJOR_MINOR(rev) ((rev) >> 16)
+
+#define IS_DPU_MAJOR_MINOR_SAME(rev1, rev2) \
+ (DPU_HW_MAJOR_MINOR((rev1)) == DPU_HW_MAJOR_MINOR((rev2)))
+
+#define DPU_HW_VER_170 DPU_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define DPU_HW_VER_171 DPU_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define DPU_HW_VER_172 DPU_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define DPU_HW_VER_300 DPU_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define DPU_HW_VER_301 DPU_HW_VER(3, 0, 1) /* 8998 v1.1 */
+#define DPU_HW_VER_400 DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
+#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
+#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
+#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+
+
+#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
+#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
+#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
+#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
+#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+
+
+#define DPU_HW_BLK_NAME_LEN 16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS 2
+
+#define MAX_XIN_COUNT 16
+
+/**
+ * Supported UBWC feature versions
+ */
+enum {
+ DPU_HW_UBWC_VER_10 = 0x100,
+ DPU_HW_UBWC_VER_20 = 0x200,
+ DPU_HW_UBWC_VER_30 = 0x300,
+};
+
+#define IS_UBWC_20_SUPPORTED(rev) ((rev) >= DPU_HW_UBWC_VER_20)
+
+/**
+ * MDP TOP BLOCK features
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @DPU_MDP_BWC, MDSS HW supports Bandwidth compression.
+ * @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
+ * compression initial revision
+ * @DPU_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
+ * @DPU_MDP_MAX Maximum value
+
+ */
+enum {
+ DPU_MDP_PANIC_PER_PIPE = 0x1,
+ DPU_MDP_10BIT_SUPPORT,
+ DPU_MDP_BWC,
+ DPU_MDP_UBWC_1_0,
+ DPU_MDP_UBWC_1_5,
+ DPU_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @DPU_SSPP_SRC Src and fetch part of the pipes,
+ * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
+ * @DPU_SSPP_CSC, Support of Color space converion
+ * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
+ * @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer
+ * @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq
+ * @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control
+ * @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect
+ * @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support
+ * @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support
+ * @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper
+ * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @DPU_SSPP_CDP Supports client driven prefetch
+ * @DPU_SSPP_MAX maximum value
+ */
+enum {
+ DPU_SSPP_SRC = 0x1,
+ DPU_SSPP_SCALER_QSEED2,
+ DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_RGB,
+ DPU_SSPP_CSC,
+ DPU_SSPP_CSC_10BIT,
+ DPU_SSPP_CURSOR,
+ DPU_SSPP_QOS,
+ DPU_SSPP_QOS_8LVL,
+ DPU_SSPP_EXCL_RECT,
+ DPU_SSPP_SMART_DMA_V1,
+ DPU_SSPP_SMART_DMA_V2,
+ DPU_SSPP_TS_PREFILL,
+ DPU_SSPP_TS_PREFILL_REC1,
+ DPU_SSPP_CDP,
+ DPU_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @DPU_MIXER_LAYER Layer mixer layer blend configuration,
+ * @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
+ * @DPU_MIXER_GC Gamma correction block
+ * @DPU_DIM_LAYER Layer mixer supports dim layer
+ * @DPU_MIXER_MAX maximum value
+ */
+enum {
+ DPU_MIXER_LAYER = 0x1,
+ DPU_MIXER_SOURCESPLIT,
+ DPU_MIXER_GC,
+ DPU_DIM_LAYER,
+ DPU_MIXER_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @DPU_PINGPONG_TE Tear check block
+ * @DPU_PINGPONG_TE2 Additional tear check block for split pipes
+ * @DPU_PINGPONG_SPLIT PP block supports split fifo
+ * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
+ * @DPU_PINGPONG_DITHER, Dither blocks
+ * @DPU_PINGPONG_MAX
+ */
+enum {
+ DPU_PINGPONG_TE = 0x1,
+ DPU_PINGPONG_TE2,
+ DPU_PINGPONG_SPLIT,
+ DPU_PINGPONG_SLAVE,
+ DPU_PINGPONG_DITHER,
+ DPU_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @DPU_CTL_SPLIT_DISPLAY CTL supports video mode split display
+ * @DPU_CTL_MAX
+ */
+enum {
+ DPU_CTL_SPLIT_DISPLAY = 0x1,
+ DPU_CTL_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit
+ * @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap
+ * @DPU_VBIF_MAX maximum value
+ */
+enum {
+ DPU_VBIF_QOS_OTLIM = 0x1,
+ DPU_VBIF_QOS_REMAP,
+ DPU_VBIF_MAX
+};
+
+/**
+ * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @len: length of hardware block
+ * @features bit mask identifying sub-blocks/features
+ */
+#define DPU_HW_BLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len; \
+ unsigned long features
+
+/**
+ * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this sub-block
+ * @base: offset of this sub-block relative to the block
+ * offset
+ * @len register block length of this sub-block
+ */
+#define DPU_HW_SUBBLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len
+
+/**
+ * struct dpu_src_blk: SSPP part of the source pipes
+ * @info: HW register and features supported by this sub-blk
+ */
+struct dpu_src_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_scaler_blk: Scaler information
+ * @info: HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct dpu_scaler_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+struct dpu_csc_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_pp_blk : Pixel processing sub-blk information
+ * @info: HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct dpu_pp_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+/**
+ * struct dpu_format_extended - define dpu specific pixel format+modifier
+ * @fourcc_format: Base FOURCC pixel format code
+ * @modifier: 64-bit drm format modifier, same modifier must be applied to all
+ * framebuffer planes
+ */
+struct dpu_format_extended {
+ uint32_t fourcc_format;
+ uint64_t modifier;
+};
+
+/**
+ * enum dpu_qos_lut_usage - define QoS LUT use cases
+ */
+enum dpu_qos_lut_usage {
+ DPU_QOS_LUT_USAGE_LINEAR,
+ DPU_QOS_LUT_USAGE_MACROTILE,
+ DPU_QOS_LUT_USAGE_NRT,
+ DPU_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct dpu_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct dpu_qos_lut_entry {
+ u32 fl;
+ u64 lut;
+};
+
+/**
+ * struct dpu_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct dpu_qos_lut_tbl {
+ u32 nentry;
+ struct dpu_qos_lut_entry *entries;
+};
+
+/**
+ * struct dpu_caps - define DPU capabilities
+ * @max_mixer_width max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ * supported z order
+ * @qseed_type qseed2 or qseed3 support.
+ * @smart_dma_rev Supported version of SmartDMA feature.
+ * @ubwc_version UBWC feature version (0x0 for not supported)
+ * @has_src_split source split feature status
+ * @has_dim_layer dim layer feature status
+ * @has_idle_pc indicate if idle power collapse feature is supported
+ */
+struct dpu_caps {
+ u32 max_mixer_width;
+ u32 max_mixer_blendstages;
+ u32 qseed_type;
+ u32 smart_dma_rev;
+ u32 ubwc_version;
+ bool has_src_split;
+ bool has_dim_layer;
+ bool has_idle_pc;
+};
+
+/**
+ * struct dpu_sspp_blks_common : SSPP sub-blocks common configuration
+ * @maxwidth: max pixelwidth supported by this pipe
+ * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @maxhdeciexp: max horizontal decimation supported by this pipe
+ * (max is 2^value)
+ * @maxvdeciexp: max vertical decimation supported by this pipe
+ * (max is 2^value)
+ */
+struct dpu_sspp_blks_common {
+ u32 maxlinewidth;
+ u32 pixel_ram_size;
+ u32 maxhdeciexp;
+ u32 maxvdeciexp;
+};
+
+/**
+ * struct dpu_sspp_sub_blks : SSPP sub-blocks
+ * common: Pointer to common configurations shared by sub blocks
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale: maxupscale ratio supported
+ * @smart_dma_priority: hw priority of rect1 of multirect pipe
+ * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ * @virt_format_list: Pointer to list of supported formats for virtual planes
+ */
+struct dpu_sspp_sub_blks {
+ const struct dpu_sspp_blks_common *common;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ u32 maxdwnscale;
+ u32 maxupscale;
+ u32 smart_dma_priority;
+ u32 max_per_pipe_bw;
+ struct dpu_src_blk src_blk;
+ struct dpu_scaler_blk scaler_blk;
+ struct dpu_pp_blk csc_blk;
+ struct dpu_pp_blk hsic_blk;
+ struct dpu_pp_blk memcolor_blk;
+ struct dpu_pp_blk pcc_blk;
+ struct dpu_pp_blk igc_blk;
+
+ const struct dpu_format_extended *format_list;
+ const struct dpu_format_extended *virt_format_list;
+};
+
+/**
+ * struct dpu_lm_sub_blks: information of mixer block
+ * @maxwidth: Max pixel width supported by this mixer
+ * @maxblendstages: Max number of blend-stages supported
+ * @blendstage_base: Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct dpu_lm_sub_blks {
+ u32 maxwidth;
+ u32 maxblendstages;
+ u32 blendstage_base[MAX_BLOCKS];
+ struct dpu_pp_blk gc;
+};
+
+struct dpu_pingpong_sub_blks {
+ struct dpu_pp_blk te;
+ struct dpu_pp_blk te2;
+ struct dpu_pp_blk dither;
+};
+
+/**
+ * dpu_clk_ctrl_type - Defines top level clock control signals
+ */
+enum dpu_clk_ctrl_type {
+ DPU_CLK_CTRL_NONE,
+ DPU_CLK_CTRL_VIG0,
+ DPU_CLK_CTRL_VIG1,
+ DPU_CLK_CTRL_VIG2,
+ DPU_CLK_CTRL_VIG3,
+ DPU_CLK_CTRL_VIG4,
+ DPU_CLK_CTRL_RGB0,
+ DPU_CLK_CTRL_RGB1,
+ DPU_CLK_CTRL_RGB2,
+ DPU_CLK_CTRL_RGB3,
+ DPU_CLK_CTRL_DMA0,
+ DPU_CLK_CTRL_DMA1,
+ DPU_CLK_CTRL_CURSOR0,
+ DPU_CLK_CTRL_CURSOR1,
+ DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+ DPU_CLK_CTRL_MAX,
+};
+
+/* struct dpu_clk_ctrl_reg : Clock control register
+ * @reg_off: register offset
+ * @bit_off: bit offset
+ */
+struct dpu_clk_ctrl_reg {
+ u32 reg_off;
+ u32 bit_off;
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ * @highest_bank_bit: UBWC parameter
+ * @ubwc_static: ubwc static configuration
+ * @ubwc_swizzle: ubwc default swizzle setting
+ * @has_dest_scaler: indicates support of destination scaler
+ * @clk_ctrls clock control register definition
+ */
+struct dpu_mdp_cfg {
+ DPU_HW_BLK_INFO;
+ u32 highest_bank_bit;
+ u32 ubwc_static;
+ u32 ubwc_swizzle;
+ bool has_dest_scaler;
+ struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_ctl_cfg {
+ DPU_HW_BLK_INFO;
+};
+
+/**
+ * struct dpu_sspp_cfg - information of source pipes
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: SSPP sub-blocks information
+ * @xin_id: bus client identifier
+ * @clk_ctrl clock control identifier
+ * @type sspp type identifier
+ */
+struct dpu_sspp_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_sspp_sub_blks *sblk;
+ u32 xin_id;
+ enum dpu_clk_ctrl_type clk_ctrl;
+ u32 type;
+};
+
+/**
+ * struct dpu_lm_cfg - information of layer mixer blocks
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: LM Sub-blocks information
+ * @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @ds: ID of connected DS, DS_MAX if unsupported
+ * @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
+ */
+struct dpu_lm_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_lm_sub_blks *sblk;
+ u32 pingpong;
+ u32 ds;
+ unsigned long lm_pair_mask;
+};
+
+/**
+ * struct dpu_ds_top_cfg - information of dest scaler top
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying features
+ * @version hw version of dest scaler
+ * @maxinputwidth maximum input line width
+ * @maxoutputwidth maximum output line width
+ * @maxupscale maximum upscale ratio
+ */
+struct dpu_ds_top_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ u32 maxinputwidth;
+ u32 maxoutputwidth;
+ u32 maxupscale;
+};
+
+/**
+ * struct dpu_ds_cfg - information of dest scaler blocks
+ * @id enum identifying this block
+ * @base register offset wrt DS top offset
+ * @features bit mask identifying features
+ * @version hw version of the qseed block
+ * @top DS top information
+ */
+struct dpu_ds_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ const struct dpu_ds_top_cfg *top;
+};
+
+/**
+ * struct dpu_pingpong_cfg - information of PING-PONG blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk sub-blocks information
+ */
+struct dpu_pingpong_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_cdm_cfg - information of chroma down blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @intf_connect Bitmask of INTF IDs this CDM can connect to
+ */
+struct dpu_cdm_cfg {
+ DPU_HW_BLK_INFO;
+ unsigned long intf_connect;
+};
+
+/**
+ * struct dpu_intf_cfg - information of timing engine blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @type: Interface type(DSI, DP, HDMI)
+ * @controller_id: Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
+ */
+struct dpu_intf_cfg {
+ DPU_HW_BLK_INFO;
+ u32 type; /* interface type*/
+ u32 controller_id;
+ u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps pixel per seconds
+ * @ot_limit OT limit to use up to specified pixel per second
+ */
+struct dpu_vbif_dynamic_ot_cfg {
+ u64 pps;
+ u32 ot_limit;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count length of cfg
+ * @cfg pointer to array of configuration settings with
+ * ascending requirements
+ */
+struct dpu_vbif_dynamic_ot_tbl {
+ u32 count;
+ struct dpu_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct dpu_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl num of priority level
+ * @priority_lvl pointer to array of priority level in ascending order
+ */
+struct dpu_vbif_qos_tbl {
+ u32 npriority_lvl;
+ u32 *priority_lvl;
+};
+
+/**
+ * struct dpu_vbif_cfg - information of VBIF blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @ot_rd_limit default OT read limit
+ * @ot_wr_limit default OT write limit
+ * @xin_halt_timeout maximum time (in usec) for xin to halt
+ * @dynamic_ot_rd_tbl dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl dynamic OT write configuration table
+ * @qos_rt_tbl real-time QoS priority table
+ * @qos_nrt_tbl non-real-time QoS priority table
+ * @memtype_count number of defined memtypes
+ * @memtype array of xin memtype definitions
+ */
+struct dpu_vbif_cfg {
+ DPU_HW_BLK_INFO;
+ u32 default_ot_rd_limit;
+ u32 default_ot_wr_limit;
+ u32 xin_halt_timeout;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+ struct dpu_vbif_qos_tbl qos_rt_tbl;
+ struct dpu_vbif_qos_tbl qos_nrt_tbl;
+ u32 memtype_count;
+ u32 memtype[MAX_XIN_COUNT];
+};
+/**
+ * struct dpu_reg_dma_cfg - information of lut dma blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @version version of lutdma hw block
+ * @trigger_sel_off offset to trigger select registers of lutdma
+ */
+struct dpu_reg_dma_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ u32 trigger_sel_off;
+};
+
+/**
+ * Define CDP use cases
+ * @DPU_PERF_CDP_UDAGE_RT: real-time use cases
+ * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+ DPU_PERF_CDP_USAGE_RT,
+ DPU_PERF_CDP_USAGE_NRT,
+ DPU_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct dpu_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct dpu_perf_cdp_cfg {
+ bool rd_enable;
+ bool wr_enable;
+};
+
+/**
+ * struct dpu_perf_cfg - performance control settings
+ * @max_bw_low low threshold of maximum bandwidth (kbps)
+ * @max_bw_high high threshold of maximum bandwidth (kbps)
+ * @min_core_ib minimum bandwidth for core (kbps)
+ * @min_core_ib minimum mnoc ib vote in kbps
+ * @min_llcc_ib minimum llcc ib vote in kbps
+ * @min_dram_ib minimum dram ib vote in kbps
+ * @core_ib_ff core instantaneous bandwidth fudge factor
+ * @core_clk_ff core clock fudge factor
+ * @comp_ratio_rt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @comp_ratio_nrt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @undersized_prefill_lines undersized prefill in lines
+ * @xtra_prefill_lines extra prefill latency in lines
+ * @dest_scale_prefill_lines destination scaler latency in lines
+ * @macrotile_perfill_lines macrotile latency in lines
+ * @yuv_nv12_prefill_lines yuv_nv12 latency in lines
+ * @linear_prefill_lines linear latency in lines
+ * @downscaling_prefill_lines downscaling latency in lines
+ * @amortizable_theshold minimum y position for traffic shaping prefill
+ * @min_prefill_lines minimum pipeline latency in lines
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg cdp use case configurations
+ */
+struct dpu_perf_cfg {
+ u32 max_bw_low;
+ u32 max_bw_high;
+ u32 min_core_ib;
+ u32 min_llcc_ib;
+ u32 min_dram_ib;
+ const char *core_ib_ff;
+ const char *core_clk_ff;
+ const char *comp_ratio_rt;
+ const char *comp_ratio_nrt;
+ u32 undersized_prefill_lines;
+ u32 xtra_prefill_lines;
+ u32 dest_scale_prefill_lines;
+ u32 macrotile_prefill_lines;
+ u32 yuv_nv12_prefill_lines;
+ u32 linear_prefill_lines;
+ u32 downscaling_prefill_lines;
+ u32 amortizable_threshold;
+ u32 min_prefill_lines;
+ u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX];
+};
+
+/**
+ * struct dpu_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @dma_formats Supported formats for dma pipe
+ * @cursor_formats Supported formats for cursor pipe
+ * @vig_formats Supported formats for vig pipe
+ */
+struct dpu_mdss_cfg {
+ u32 hwversion;
+
+ const struct dpu_caps *caps;
+
+ u32 mdp_count;
+ struct dpu_mdp_cfg *mdp;
+
+ u32 ctl_count;
+ struct dpu_ctl_cfg *ctl;
+
+ u32 sspp_count;
+ struct dpu_sspp_cfg *sspp;
+
+ u32 mixer_count;
+ struct dpu_lm_cfg *mixer;
+
+ u32 ds_count;
+ struct dpu_ds_cfg *ds;
+
+ u32 pingpong_count;
+ struct dpu_pingpong_cfg *pingpong;
+
+ u32 cdm_count;
+ struct dpu_cdm_cfg *cdm;
+
+ u32 intf_count;
+ struct dpu_intf_cfg *intf;
+
+ u32 vbif_count;
+ struct dpu_vbif_cfg *vbif;
+
+ u32 reg_dma_count;
+ struct dpu_reg_dma_cfg dma_cfg;
+
+ u32 ad_count;
+
+ /* Add additional block data structures here */
+
+ struct dpu_perf_cfg perf;
+ struct dpu_format_extended *dma_formats;
+ struct dpu_format_extended *cursor_formats;
+ struct dpu_format_extended *vig_formats;
+};
+
+struct dpu_mdss_hw_cfg_handler {
+ u32 hw_rev;
+ void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DS(s) ((s)->ds)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_AD(s) ((s)->ad)
+
+/**
+ * dpu_hw_catalog_init - dpu hardware catalog init API retrieves
+ * hardcoded target specific catalog information in config structure
+ * @hw_rev: caller needs provide the hardware revision.
+ *
+ * Return: dpu config structure
+ */
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
+
+/**
+ * dpu_hw_catalog_deinit - dpu hardware catalog cleanup
+ * @dpu_cfg: pointer returned from init function
+ */
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
+
+/**
+ * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
+ * @cfg: pointer to sspp cfg
+ */
+static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
+{
+ return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
+ test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
+}
+#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
new file mode 100644
index 000000000000..3c9f028628ef
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+
+static const struct dpu_format_extended plane_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended plane_formats_yuv[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV21, 0},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_NV61, 0},
+ {DRM_FORMAT_VYUY, 0},
+ {DRM_FORMAT_UYVY, 0},
+ {DRM_FORMAT_YUYV, 0},
+ {DRM_FORMAT_YVYU, 0},
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_YVU420, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended cursor_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended wb2_formats[] = {
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_YUYV, 0},
+
+ {0, 0},
+};
+
+static const struct dpu_format_extended rgb_10bit_formats[] = {
+ {DRM_FORMAT_BGRA1010102, 0},
+ {DRM_FORMAT_BGRX1010102, 0},
+ {DRM_FORMAT_RGBA1010102, 0},
+ {DRM_FORMAT_RGBX1010102, 0},
+ {DRM_FORMAT_ABGR2101010, 0},
+ {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XBGR2101010, 0},
+ {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ARGB2101010, 0},
+ {DRM_FORMAT_XRGB2101010, 0},
+};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
new file mode 100644
index 000000000000..554874ba0c3b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CDM_CSC_10_OPMODE 0x000
+#define CDM_CSC_10_BASE 0x004
+
+#define CDM_CDWN2_OP_MODE 0x100
+#define CDM_CDWN2_CLAMP_OUT 0x104
+#define CDM_CDWN2_PARAMS_3D_0 0x108
+#define CDM_CDWN2_PARAMS_3D_1 0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
+#define CDM_CDWN2_COEFF_COSITE_V 0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
+#define CDM_CDWN2_OUT_SIZE 0x130
+
+#define CDM_HDMI_PACK_OP_MODE 0x200
+#define CDM_CSC_10_MATRIX_COEFF_0 0x004
+
+/**
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct dpu_csc_cfg rgb2yuv_cfg = {
+ {
+ 0x0083, 0x0102, 0x0032,
+ 0x1fb5, 0x1f6c, 0x00e1,
+ 0x00e1, 0x1f45, 0x1fdc
+ },
+ { 0x00, 0x00, 0x00 },
+ { 0x0040, 0x0200, 0x0200 },
+ { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+ { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->cdm_count; i++) {
+ if (cdm == m->cdm[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->cdm[i].base;
+ b->length = m->cdm[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_CDM;
+ return &m->cdm[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
+ struct dpu_csc_cfg *data)
+{
+ dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
+
+ return 0;
+}
+
+static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
+ struct dpu_hw_cdm_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 opmode = 0;
+ u32 out_size = 0;
+
+ if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+ opmode &= ~BIT(7);
+ else
+ opmode |= BIT(7);
+
+ /* ENABLE DWNS_H bit */
+ opmode |= BIT(1);
+
+ switch (cfg->h_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_H field */
+ opmode &= ~(0x18);
+ /* CLEAR DWNS_H bit */
+ opmode &= ~BIT(1);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_H field (pixel drop is 0) */
+ opmode &= ~(0x18);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_H field (Average is 0x1) */
+ opmode &= ~(0x18);
+ opmode |= (0x1 << 0x3);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_H field (Average is 0x2) */
+ opmode &= ~(0x18);
+ opmode |= (0x2 << 0x3);
+ /* Co-site horizontal coefficients */
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+ cosite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+ cosite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+ cosite_h_coeff[2]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_H field (Average is 0x3) */
+ opmode &= ~(0x18);
+ opmode |= (0x3 << 0x3);
+
+ /* Off-site horizontal coefficients */
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+ offsite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+ offsite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+ offsite_h_coeff[2]);
+ break;
+ default:
+ pr_err("%s invalid horz down sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ /* ENABLE DWNS_V bit */
+ opmode |= BIT(2);
+
+ switch (cfg->v_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_V field */
+ opmode &= ~(0x60);
+ /* CLEAR DWNS_V bit */
+ opmode &= ~BIT(2);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_V field (pixel drop is 0) */
+ opmode &= ~(0x60);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_V field (Average is 0x1) */
+ opmode &= ~(0x60);
+ opmode |= (0x1 << 0x5);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_V field (Average is 0x2) */
+ opmode &= ~(0x60);
+ opmode |= (0x2 << 0x5);
+ /* Co-site vertical coefficients */
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_COSITE_V,
+ cosite_v_coeff[0]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_V field (Average is 0x3) */
+ opmode &= ~(0x60);
+ opmode |= (0x3 << 0x5);
+
+ /* Off-site vertical coefficients */
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_OFFSITE_V,
+ offsite_v_coeff[0]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+ opmode |= BIT(0); /* EN CDWN module */
+ else
+ opmode &= ~BIT(0);
+
+ out_size = (cfg->output_width & 0xFFFF) |
+ ((cfg->output_height & 0xFFFF) << 16);
+ DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+ DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+ DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+ ((0x3FF << 16) | 0x0));
+
+ return 0;
+}
+
+static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
+ struct dpu_hw_cdm_cfg *cdm)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ const struct dpu_format *fmt = cdm->output_fmt;
+ struct cdm_output_cfg cdm_cfg = { 0 };
+ u32 opmode = 0;
+ u32 csc = 0;
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ return -EINVAL;
+
+ if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+ if (fmt->chroma_sample != DPU_CHROMA_H1V2)
+ return -EINVAL; /*unsupported format */
+ opmode = BIT(0);
+ opmode |= (fmt->chroma_sample << 1);
+ cdm_cfg.intf_en = true;
+ }
+
+ csc |= BIT(2);
+ csc &= ~BIT(1);
+ csc |= BIT(0);
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+
+ DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+ DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+ return 0;
+}
+
+static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
+{
+ struct cdm_output_cfg cdm_cfg = { 0 };
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+}
+
+static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
+ ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
+ ops->enable = dpu_hw_cdm_enable;
+ ops->disable = dpu_hw_cdm_disable;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m,
+ struct dpu_hw_mdp *hw_mdp)
+{
+ struct dpu_hw_cdm *c;
+ struct dpu_cdm_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _cdm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_cdm_ops(&c->ops, c->caps->features);
+ c->hw_mdp = hw_mdp;
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ /*
+ * Perform any default initialization for the chroma down module
+ * @setup default csc coefficients
+ */
+ dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
+{
+ if (cdm)
+ dpu_hw_blk_destroy(&cdm->base);
+ kfree(cdm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
new file mode 100644
index 000000000000..5cceb1ecb8e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CDM_H
+#define _DPU_HW_CDM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_cdm;
+
+struct dpu_hw_cdm_cfg {
+ u32 output_width;
+ u32 output_height;
+ u32 output_bit_depth;
+ u32 h_cdwn_type;
+ u32 v_cdwn_type;
+ const struct dpu_format *output_fmt;
+ u32 output_type;
+ int flags;
+};
+
+enum dpu_hw_cdwn_type {
+ CDM_CDWN_DISABLE,
+ CDM_CDWN_PIXEL_DROP,
+ CDM_CDWN_AVG,
+ CDM_CDWN_COSITE,
+ CDM_CDWN_OFFSITE,
+};
+
+enum dpu_hw_cdwn_output_type {
+ CDM_CDWN_OUTPUT_HDMI,
+ CDM_CDWN_OUTPUT_WB,
+};
+
+enum dpu_hw_cdwn_output_bit_depth {
+ CDM_CDWN_OUTPUT_8BIT,
+ CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ * Assumption is these functions will be called after
+ * clocks are enabled
+ * @setup_csc: Programs the csc matrix
+ * @setup_cdwn: Sets up the chroma down sub module
+ * @enable: Enables the output to interface and programs the
+ * output packer
+ * @disable: Puts the cdm in bypass mode
+ */
+struct dpu_hw_cdm_ops {
+ /**
+ * Programs the CSC matrix for conversion from RGB space to YUV space,
+ * it is optional to call this function as this matrix is automatically
+ * set during initialization, user should call this if it wants
+ * to program a different matrix than default matrix.
+ * @cdm: Pointer to the chroma down context structure
+ * @data Pointer to CSC configuration data
+ * return: 0 if success; error code otherwise
+ */
+ int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
+ struct dpu_csc_cfg *data);
+
+ /**
+ * Programs the Chroma downsample part.
+ * @cdm Pointer to chroma down context
+ */
+ int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
+ struct dpu_hw_cdm_cfg *cfg);
+
+ /**
+ * Enable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ int (*enable)(struct dpu_hw_cdm *cdm,
+ struct dpu_hw_cdm_cfg *cfg);
+
+ /**
+ * Disable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ void (*disable)(struct dpu_hw_cdm *cdm);
+};
+
+struct dpu_hw_cdm {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* chroma down */
+ const struct dpu_cdm_cfg *caps;
+ enum dpu_cdm idx;
+
+ /* mdp top hw driver */
+ struct dpu_hw_mdp *hw_mdp;
+
+ /* ops */
+ struct dpu_hw_cdm_ops ops;
+};
+
+/**
+ * dpu_hw_cdm - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_cdm, base);
+}
+
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx: cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ * @hw_mdp: pointer to mdp top hw driver object
+ */
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m,
+ struct dpu_hw_mdp *hw_mdp);
+
+/**
+ * dpu_hw_cdm_destroy - destroys CDM driver context
+ * @cdm: pointer to CDM driver context
+ */
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
+
+#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
new file mode 100644
index 000000000000..06be7cf7ce50
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -0,0 +1,540 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CTL_LAYER(lm) \
+ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT(lm) \
+ (0x40 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT2(lm) \
+ (0x70 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT3(lm) \
+ (0xA0 + (((lm) - LM_0) * 0x004))
+#define CTL_TOP 0x014
+#define CTL_FLUSH 0x018
+#define CTL_START 0x01C
+#define CTL_PREPARE 0x0d0
+#define CTL_SW_RESET 0x030
+#define CTL_LAYER_EXTN_OFFSET 0x40
+
+#define CTL_MIXER_BORDER_OUT BIT(24)
+#define CTL_FLUSH_MASK_CTL BIT(17)
+
+#define DPU_REG_RESET_TIMEOUT_US 2000
+
+static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->ctl_count; i++) {
+ if (ctl == m->ctl[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->ctl[i].base;
+ b->length = m->ctl[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_CTL;
+ return &m->ctl[i];
+ }
+ }
+ return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
+ enum dpu_lm lm)
+{
+ int i;
+ int stages = -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ if (lm == mixer[i].id) {
+ stages = mixer[i].sblk->maxblendstages;
+ break;
+ }
+ }
+
+ return stages;
+}
+
+static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
+{
+ DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
+{
+ DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
+static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ ctx->pending_flush_mask = 0x0;
+}
+
+static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+ u32 flushbits)
+{
+ ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ if (!ctx)
+ return 0x0;
+
+ return ctx->pending_flush_mask;
+}
+
+static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
+{
+
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, CTL_FLUSH);
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp sspp)
+{
+ uint32_t flushbits = 0;
+
+ switch (sspp) {
+ case SSPP_VIG0:
+ flushbits = BIT(0);
+ break;
+ case SSPP_VIG1:
+ flushbits = BIT(1);
+ break;
+ case SSPP_VIG2:
+ flushbits = BIT(2);
+ break;
+ case SSPP_VIG3:
+ flushbits = BIT(18);
+ break;
+ case SSPP_RGB0:
+ flushbits = BIT(3);
+ break;
+ case SSPP_RGB1:
+ flushbits = BIT(4);
+ break;
+ case SSPP_RGB2:
+ flushbits = BIT(5);
+ break;
+ case SSPP_RGB3:
+ flushbits = BIT(19);
+ break;
+ case SSPP_DMA0:
+ flushbits = BIT(11);
+ break;
+ case SSPP_DMA1:
+ flushbits = BIT(12);
+ break;
+ case SSPP_DMA2:
+ flushbits = BIT(24);
+ break;
+ case SSPP_DMA3:
+ flushbits = BIT(25);
+ break;
+ case SSPP_CURSOR0:
+ flushbits = BIT(22);
+ break;
+ case SSPP_CURSOR1:
+ flushbits = BIT(23);
+ break;
+ default:
+ break;
+ }
+
+ return flushbits;
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm)
+{
+ uint32_t flushbits = 0;
+
+ switch (lm) {
+ case LM_0:
+ flushbits = BIT(6);
+ break;
+ case LM_1:
+ flushbits = BIT(7);
+ break;
+ case LM_2:
+ flushbits = BIT(8);
+ break;
+ case LM_3:
+ flushbits = BIT(9);
+ break;
+ case LM_4:
+ flushbits = BIT(10);
+ break;
+ case LM_5:
+ flushbits = BIT(20);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ flushbits |= CTL_FLUSH_MASK_CTL;
+
+ return flushbits;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ *flushbits |= BIT(31);
+ break;
+ case INTF_1:
+ *flushbits |= BIT(30);
+ break;
+ case INTF_2:
+ *flushbits |= BIT(29);
+ break;
+ case INTF_3:
+ *flushbits |= BIT(28);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_cdm cdm)
+{
+ switch (cdm) {
+ case CDM_0:
+ *flushbits |= BIT(26);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ ktime_t timeout;
+ u32 status;
+
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ /*
+ * it takes around 30us to have mdp finish resetting its ctl path
+ * poll every 50us so that reset should be completed at 1st poll
+ */
+ do {
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x1;
+ if (status)
+ usleep_range(20, 50);
+ } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
+
+ return status;
+}
+
+static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+ DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 status;
+
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x01;
+ if (!status)
+ return 0;
+
+ pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
+ pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i;
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
+ }
+}
+
+static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
+ u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+ int i, j;
+ u8 stages;
+ int pipes_per_stage;
+
+ stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+ if (stages < 0)
+ return;
+
+ if (test_bit(DPU_MIXER_SOURCESPLIT,
+ &ctx->mixer_hw_caps->features))
+ pipes_per_stage = PIPES_PER_STAGE;
+ else
+ pipes_per_stage = 1;
+
+ mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+
+ if (!stage_cfg)
+ goto exit;
+
+ for (i = 0; i <= stages; i++) {
+ /* overflow to ext register if 'i + 1 > 7' */
+ mix = (i + 1) & 0x7;
+ ext = i >= 7;
+
+ for (j = 0 ; j < pipes_per_stage; j++) {
+ enum dpu_sspp_multirect_index rect_index =
+ stage_cfg->multirect_index[i][j];
+
+ switch (stage_cfg->stage[i][j]) {
+ case SSPP_VIG0:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
+ } else {
+ mixercfg |= mix << 0;
+ mixercfg_ext |= ext << 0;
+ }
+ break;
+ case SSPP_VIG1:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
+ } else {
+ mixercfg |= mix << 3;
+ mixercfg_ext |= ext << 2;
+ }
+ break;
+ case SSPP_VIG2:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
+ } else {
+ mixercfg |= mix << 6;
+ mixercfg_ext |= ext << 4;
+ }
+ break;
+ case SSPP_VIG3:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
+ } else {
+ mixercfg |= mix << 26;
+ mixercfg_ext |= ext << 6;
+ }
+ break;
+ case SSPP_RGB0:
+ mixercfg |= mix << 9;
+ mixercfg_ext |= ext << 8;
+ break;
+ case SSPP_RGB1:
+ mixercfg |= mix << 12;
+ mixercfg_ext |= ext << 10;
+ break;
+ case SSPP_RGB2:
+ mixercfg |= mix << 15;
+ mixercfg_ext |= ext << 12;
+ break;
+ case SSPP_RGB3:
+ mixercfg |= mix << 29;
+ mixercfg_ext |= ext << 14;
+ break;
+ case SSPP_DMA0:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
+ } else {
+ mixercfg |= mix << 18;
+ mixercfg_ext |= ext << 16;
+ }
+ break;
+ case SSPP_DMA1:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
+ } else {
+ mixercfg |= mix << 21;
+ mixercfg_ext |= ext << 18;
+ }
+ break;
+ case SSPP_DMA2:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
+ } else {
+ mix |= (i + 1) & 0xF;
+ mixercfg_ext2 |= mix << 0;
+ }
+ break;
+ case SSPP_DMA3:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
+ } else {
+ mix |= (i + 1) & 0xF;
+ mixercfg_ext2 |= mix << 4;
+ }
+ break;
+ case SSPP_CURSOR0:
+ mixercfg_ext |= ((i + 1) & 0xF) << 20;
+ break;
+ case SSPP_CURSOR1:
+ mixercfg_ext |= ((i + 1) & 0xF) << 26;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+exit:
+ DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
+}
+
+static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_cfg = 0;
+
+ intf_cfg |= (cfg->intf & 0xF) << 4;
+
+ if (cfg->mode_3d) {
+ intf_cfg |= BIT(19);
+ intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+ }
+
+ switch (cfg->intf_mode_sel) {
+ case DPU_CTL_MODE_SEL_VID:
+ intf_cfg &= ~BIT(17);
+ intf_cfg &= ~(0x3 << 15);
+ break;
+ case DPU_CTL_MODE_SEL_CMD:
+ intf_cfg |= BIT(17);
+ intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+ break;
+ default:
+ pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+ return;
+ }
+
+ DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
+ unsigned long cap)
+{
+ ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+ ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
+ ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+ ops->get_flush_register = dpu_hw_ctl_get_flush_register;
+ ops->trigger_start = dpu_hw_ctl_trigger_start;
+ ops->trigger_pending = dpu_hw_ctl_trigger_pending;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ ops->reset = dpu_hw_ctl_reset_control;
+ ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
+ ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+ ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
+ ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
+ ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
+ ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+ ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_ctl *c;
+ struct dpu_ctl_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _ctl_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create dpu_hw_ctl %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->caps = cfg;
+ _setup_ctl_ops(&c->ops, c->caps->features);
+ c->idx = idx;
+ c->mixer_count = m->mixer_count;
+ c->mixer_hw_caps = m->mixer;
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
+{
+ if (ctx)
+ dpu_hw_blk_destroy(&ctx->base);
+ kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
new file mode 100644
index 000000000000..c66a71f8b839
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CTL_H
+#define _DPU_HW_CTL_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_blk.h"
+
+/**
+ * dpu_ctl_mode_sel: Interface mode selection
+ * DPU_CTL_MODE_SEL_VID: Video mode interface
+ * DPU_CTL_MODE_SEL_CMD: Command mode interface
+ */
+enum dpu_ctl_mode_sel {
+ DPU_CTL_MODE_SEL_VID = 0,
+ DPU_CTL_MODE_SEL_CMD
+};
+
+struct dpu_hw_ctl;
+/**
+ * struct dpu_hw_stage_cfg - blending stage cfg
+ * @stage : SSPP_ID at each stage
+ * @multirect_index: index of the rectangle of SSPP.
+ */
+struct dpu_hw_stage_cfg {
+ enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE];
+ enum dpu_sspp_multirect_index multirect_index
+ [DPU_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
+ * @intf : Interface id
+ * @mode_3d: 3d mux configuration
+ * @intf_mode_sel: Interface mode, cmd / vid
+ * @stream_sel: Stream selection for multi-stream interfaces
+ */
+struct dpu_hw_intf_cfg {
+ enum dpu_intf intf;
+ enum dpu_3d_blend_mode mode_3d;
+ enum dpu_ctl_mode_sel intf_mode_sel;
+ int stream_sel;
+};
+
+/**
+ * struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_ctl_ops {
+ /**
+ * kickoff hw operation for Sw controlled interfaces
+ * DSI cmd mode and WB interface are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_start)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * kickoff prepare is in progress hw operation for sw
+ * controlled interfaces: DSI cmd mode and WB interface
+ * are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_pending)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Clear the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Query the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
+ * Write the value of the pending_flush_mask to hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Read the value of the flush register
+ * @ctx : ctl path ctx pointer
+ * @Return: value of the ctl flush register.
+ */
+ u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Setup ctl_path interface config
+ * @ctx
+ * @cfg : interface config structure pointer
+ */
+ void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg);
+
+ int (*reset)(struct dpu_hw_ctl *c);
+
+ /*
+ * wait_reset_status - checks ctl reset status
+ * @ctx : ctl path ctx pointer
+ *
+ * This function checks the ctl reset status bit.
+ * If the reset bit is set, it keeps polling the status till the hw
+ * reset is complete.
+ * Returns: 0 on success or -error if reset incomplete within interval
+ */
+ int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
+
+ uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp blk);
+
+ uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm blk);
+
+ int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits,
+ enum dpu_intf blk);
+
+ int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits,
+ enum dpu_cdm blk);
+
+ /**
+ * Set all blend stages to disabled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Configure layer mixer to pipe configuration
+ * @ctx : ctl path ctx pointer
+ * @lm : layer mixer enumeration
+ * @cfg : blend stage configuration
+ */
+ void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_ctl : CTL PATH driver object
+ * @base: hardware block base structure
+ * @hw: block register map object
+ * @idx: control path index
+ * @caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @ops: operation list
+ */
+struct dpu_hw_ctl {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* ctl path */
+ int idx;
+ const struct dpu_ctl_cfg *caps;
+ int mixer_count;
+ const struct dpu_lm_cfg *mixer_hw_caps;
+ u32 pending_flush_mask;
+
+ /* ops */
+ struct dpu_hw_ctl_ops ops;
+};
+
+/**
+ * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_ctl, base);
+}
+
+/**
+ * dpu_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx: ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+
+#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
new file mode 100644
index 000000000000..c0b7f0049365
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -0,0 +1,1183 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDP base
+ */
+#define MDP_SSPP_TOP0_OFF 0x0
+#define MDP_INTF_0_OFF 0x6A000
+#define MDP_INTF_1_OFF 0x6A800
+#define MDP_INTF_2_OFF 0x6B000
+#define MDP_INTF_3_OFF 0x6B800
+#define MDP_INTF_4_OFF 0x6C000
+#define MDP_AD4_0_OFF 0x7C000
+#define MDP_AD4_1_OFF 0x7D000
+#define MDP_AD4_INTR_EN_OFF 0x41c
+#define MDP_AD4_INTR_CLEAR_OFF 0x424
+#define MDP_AD4_INTR_STATUS_OFF 0x420
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define DPU_INTR_WB_0_DONE BIT(0)
+#define DPU_INTR_WB_1_DONE BIT(1)
+#define DPU_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define DPU_INTR_WD_TIMER_0_DONE BIT(2)
+#define DPU_INTR_WD_TIMER_1_DONE BIT(3)
+#define DPU_INTR_WD_TIMER_2_DONE BIT(5)
+#define DPU_INTR_WD_TIMER_3_DONE BIT(6)
+#define DPU_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_DONE BIT(8)
+#define DPU_INTR_PING_PONG_1_DONE BIT(9)
+#define DPU_INTR_PING_PONG_2_DONE BIT(10)
+#define DPU_INTR_PING_PONG_3_DONE BIT(11)
+#define DPU_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define DPU_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define DPU_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define DPU_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define DPU_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define DPU_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define DPU_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define DPU_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define DPU_INTR_INTF_0_UNDERRUN BIT(24)
+#define DPU_INTR_INTF_1_UNDERRUN BIT(26)
+#define DPU_INTR_INTF_2_UNDERRUN BIT(28)
+#define DPU_INTR_INTF_3_UNDERRUN BIT(30)
+#define DPU_INTR_INTF_0_VSYNC BIT(25)
+#define DPU_INTR_INTF_1_VSYNC BIT(27)
+#define DPU_INTR_INTF_2_VSYNC BIT(29)
+#define DPU_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define DPU_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define DPU_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define DPU_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define DPU_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define DPU_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define DPU_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define DPU_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define DPU_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define DPU_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define DPU_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Ctl start interrupt status bit definitions
+ */
+#define DPU_INTR_CTL_0_START BIT(9)
+#define DPU_INTR_CTL_1_START BIT(10)
+#define DPU_INTR_CTL_2_START BIT(11)
+#define DPU_INTR_CTL_3_START BIT(12)
+#define DPU_INTR_CTL_4_START BIT(13)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define DPU_INTR_CWB_2_OVERFLOW BIT(14)
+#define DPU_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_DONE BIT(0)
+#define DPU_INTR_HIST_VIG_1_DONE BIT(4)
+#define DPU_INTR_HIST_VIG_2_DONE BIT(8)
+#define DPU_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define DPU_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define DPU_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define DPU_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_DONE BIT(12)
+#define DPU_INTR_HIST_DSPP_1_DONE BIT(16)
+#define DPU_INTR_HIST_DSPP_2_DONE BIT(20)
+#define DPU_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define DPU_INTR_VIDEO_INTO_STATIC BIT(0)
+#define DPU_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define DPU_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define DPU_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define DPU_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define DPU_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define DPU_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define DPU_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define DPU_INTR_PROG_LINE BIT(8)
+
+/**
+ * AD4 interrupt status bit definitions
+ */
+#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
+#define DPU_INTR_DARKENH_UPDATED BIT(3)
+#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
+#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
+#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
+/**
+ * struct dpu_intr_reg - array of DPU register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct dpu_intr_reg {
+ u32 clr_off;
+ u32 en_off;
+ u32 status_off;
+};
+
+/**
+ * struct dpu_irq_type - maps each irq with i/f
+ * @intr_type: type of interrupt listed in dpu_intr_type
+ * @instance_idx: instance index of the associated HW block in DPU
+ * @irq_mask: corresponding bit in the interrupt status reg
+ * @reg_idx: which reg set to use
+ */
+struct dpu_irq_type {
+ u32 intr_type;
+ u32 instance_idx;
+ u32 irq_mask;
+ u32 reg_idx;
+};
+
+/**
+ * List of DPU interrupt registers
+ */
+static const struct dpu_intr_reg dpu_intr_set[] = {
+ {
+ MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR_EN,
+ MDP_SSPP_TOP0_OFF+INTR_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR2_EN,
+ MDP_SSPP_TOP0_OFF+INTR2_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+ },
+ {
+ MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_0_OFF+INTF_INTR_EN,
+ MDP_INTF_0_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_1_OFF+INTF_INTR_EN,
+ MDP_INTF_1_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF+INTF_INTR_EN,
+ MDP_INTF_2_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF+INTF_INTR_EN,
+ MDP_INTF_3_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF+INTF_INTR_EN,
+ MDP_INTF_4_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
+ },
+ {
+ MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
+ }
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ * a matching interface type and instance index.
+ */
+static const struct dpu_irq_type dpu_irq_map[] = {
+ /* BEGIN MAP_RANGE: 0-31, INTR */
+ /* irq_idx: 0-3 */
+ { DPU_IRQ_TYPE_WB_ROT_COMP, WB_0, DPU_INTR_WB_0_DONE, 0},
+ { DPU_IRQ_TYPE_WB_ROT_COMP, WB_1, DPU_INTR_WB_1_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_0, DPU_INTR_WD_TIMER_0_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_1, DPU_INTR_WD_TIMER_1_DONE, 0},
+ /* irq_idx: 4-7 */
+ { DPU_IRQ_TYPE_WB_WFD_COMP, WB_2, DPU_INTR_WB_2_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_2, DPU_INTR_WD_TIMER_2_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_3, DPU_INTR_WD_TIMER_3_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_4, DPU_INTR_WD_TIMER_4_DONE, 0},
+ /* irq_idx: 8-11 */
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_DONE, 0},
+ /* irq_idx: 12-15 */
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_RD_PTR, 0},
+ /* irq_idx: 16-19 */
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_WR_PTR, 0},
+ /* irq_idx: 20-23 */
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+ /* irq_idx: 24-27 */
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, DPU_INTR_INTF_0_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_0, DPU_INTR_INTF_0_VSYNC, 0},
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, DPU_INTR_INTF_1_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_1, DPU_INTR_INTF_1_VSYNC, 0},
+ /* irq_idx: 28-31 */
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, DPU_INTR_INTF_2_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_2, DPU_INTR_INTF_2_VSYNC, 0},
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, DPU_INTR_INTF_3_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_3, DPU_INTR_INTF_3_VSYNC, 0},
+
+ /* BEGIN MAP_RANGE: 32-64, INTR2 */
+ /* irq_idx: 32-35 */
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 36-39 */
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_WR_PTR, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 40 */
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_RD_PTR, 1},
+ /* irq_idx: 41-45 */
+ { DPU_IRQ_TYPE_CTL_START, CTL_0,
+ DPU_INTR_CTL_0_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_1,
+ DPU_INTR_CTL_1_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_2,
+ DPU_INTR_CTL_2_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_3,
+ DPU_INTR_CTL_3_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_4,
+ DPU_INTR_CTL_4_START, 1},
+ /* irq_idx: 46-47 */
+ { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_2, DPU_INTR_CWB_2_OVERFLOW, 1},
+ { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_3, DPU_INTR_CWB_3_OVERFLOW, 1},
+ /* irq_idx: 48-51 */
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+ /* irq_idx: 52-55 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 56-59 */
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_TE_DETECTED, 1},
+ /* irq_idx: 60-63 */
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+ /* BEGIN MAP_RANGE: 64-95 HIST */
+ /* irq_idx: 64-67 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, DPU_INTR_HIST_VIG_0_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+ DPU_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 68-71 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, DPU_INTR_HIST_VIG_1_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+ DPU_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 72-75 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, DPU_INTR_HIST_VIG_2_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+ DPU_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, DPU_INTR_HIST_VIG_3_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+ DPU_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 76-79 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, DPU_INTR_HIST_DSPP_0_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+ DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 80-83 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, DPU_INTR_HIST_DSPP_1_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+ DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 84-87 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, DPU_INTR_HIST_DSPP_2_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+ DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, DPU_INTR_HIST_DSPP_3_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+ DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 88-91 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 92-95 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+ /* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+ /* irq_idx: 96-99 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+ DPU_INTR_VIDEO_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 3},
+ /* irq_idx: 100-103 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 3},
+ /* irq_idx: 104-107 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 108-111 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 112-115 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 116-119 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 120-123 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 124-127 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+ /* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+ /* irq_idx: 128-131 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+ DPU_INTR_VIDEO_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 4},
+ /* irq_idx: 132-135 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 4},
+ /* irq_idx: 136-139 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 140-143 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 144-147 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 148-151 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 152-155 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 156-159 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+ /* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+ /* irq_idx: 160-163 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+ DPU_INTR_VIDEO_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 5},
+ /* irq_idx: 164-167 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 5},
+ /* irq_idx: 168-171 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_2, DPU_INTR_PROG_LINE, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 172-175 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 176-179 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 180-183 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 184-187 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 188-191 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+ /* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+ /* irq_idx: 192-195 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+ DPU_INTR_VIDEO_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 6},
+ /* irq_idx: 196-199 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 6},
+ /* irq_idx: 200-203 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_3, DPU_INTR_PROG_LINE, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 204-207 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 208-211 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 212-215 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 216-219 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 220-223 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+ /* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+ /* irq_idx: 224-227 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+ DPU_INTR_VIDEO_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 7},
+ /* irq_idx: 228-231 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 7},
+ /* irq_idx: 232-235 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_4, DPU_INTR_PROG_LINE, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 236-239 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 240-243 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 244-247 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 248-251 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 252-255 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+
+ /* BEGIN MAP_RANGE: 256-287 AD4_0_INTR */
+ /* irq_idx: 256-259 */
+ { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_0, DPU_INTR_BACKLIGHT_UPDATED, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 260-263 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 264-267 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 268-271 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 272-275 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 276-279 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 280-283 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 284-287 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+
+ /* BEGIN MAP_RANGE: 288-319 AD4_1_INTR */
+ /* irq_idx: 288-291 */
+ { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_1, DPU_INTR_BACKLIGHT_UPDATED, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 292-295 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 296-299 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 300-303 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 304-307 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 308-311 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 312-315 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 315-319 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+};
+
+static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
+ u32 instance_idx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_irq_map); i++) {
+ if (intr_type == dpu_irq_map[i].intr_type &&
+ instance_idx == dpu_irq_map[i].instance_idx)
+ return i;
+ }
+
+ pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+ intr_type, instance_idx);
+ return -EINVAL;
+}
+
+static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
+ uint32_t mask)
+{
+ if (!intr)
+ return;
+
+ DPU_REG_WRITE(&intr->hw, reg_off, mask);
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
+ void (*cbfunc)(void *, int),
+ void *arg)
+{
+ int reg_idx;
+ int irq_idx;
+ int start_idx;
+ int end_idx;
+ u32 irq_status;
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ /*
+ * The dispatcher will save the IRQ status before calling here.
+ * Now need to go through each IRQ status and find matching
+ * irq lookup index.
+ */
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
+ irq_status = intr->save_irq_status[reg_idx];
+
+ /*
+ * Each Interrupt register has a range of 32 indexes, and
+ * that is static for dpu_irq_map.
+ */
+ start_idx = reg_idx * 32;
+ end_idx = start_idx + 32;
+
+ if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
+ end_idx > ARRAY_SIZE(dpu_irq_map))
+ continue;
+
+ /*
+ * Search through matching intr status from irq map.
+ * start_idx and end_idx defined the search range in
+ * the dpu_irq_map.
+ */
+ for (irq_idx = start_idx;
+ (irq_idx < end_idx) && irq_status;
+ irq_idx++)
+ if ((irq_status & dpu_irq_map[irq_idx].irq_mask) &&
+ (dpu_irq_map[irq_idx].reg_idx == reg_idx)) {
+ /*
+ * Once a match on irq mask, perform a callback
+ * to the given cbfunc. cbfunc will take care
+ * the interrupt status clearing. If cbfunc is
+ * not provided, then the interrupt clearing
+ * is here.
+ */
+ if (cbfunc)
+ cbfunc(arg, irq_idx);
+ else
+ intr->ops.clear_intr_status_nolock(
+ intr, irq_idx);
+
+ /*
+ * When callback finish, clear the irq_status
+ * with the matching mask. Once irq_status
+ * is all cleared, the search can be stopped.
+ */
+ irq_status &= ~dpu_irq_map[irq_idx].irq_mask;
+ }
+ }
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct dpu_intr_reg *reg;
+ const struct dpu_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &dpu_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &dpu_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if (cache_irq_mask & irq->irq_mask) {
+ dbgstr = "DPU IRQ already set:";
+ } else {
+ dbgstr = "DPU IRQ enabled:";
+
+ cache_irq_mask |= irq->irq_mask;
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ /* Enabling interrupts with the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ const struct dpu_intr_reg *reg;
+ const struct dpu_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &dpu_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &dpu_intr_set[reg_idx];
+
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if ((cache_irq_mask & irq->irq_mask) == 0) {
+ dbgstr = "DPU IRQ is already cleared:";
+ } else {
+ dbgstr = "DPU IRQ mask disable:";
+
+ cache_irq_mask &= ~irq->irq_mask;
+ /* Disable interrupts based on the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+ unsigned long irq_flags;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ dpu_hw_intr_disable_irq_nolock(intr, irq_idx);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return 0;
+}
+
+static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
+{
+ int i;
+
+ if (!intr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+
+ /* ensure register writes go through */
+ wmb();
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
+{
+ int i;
+
+ if (!intr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+
+ /* ensure register writes go through */
+ wmb();
+
+ return 0;
+}
+
+static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
+ uint32_t *mask)
+{
+ if (!intr || !mask)
+ return -EINVAL;
+
+ *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+ | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+
+ return 0;
+}
+
+static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
+{
+ int i;
+ u32 enable_mask;
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ /* Read interrupt status */
+ intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
+ dpu_intr_set[i].status_off);
+
+ /* Read enable mask */
+ enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[i].en_off);
+
+ /* and clear the interrupt */
+ if (intr->save_irq_status[i])
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off,
+ intr->save_irq_status[i]);
+
+ /* Finally update IRQ status based on enable mask */
+ intr->save_irq_status[i] &= enable_mask;
+ }
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
+ int irq_idx)
+{
+ int reg_idx;
+
+ if (!intr)
+ return;
+
+ reg_idx = dpu_irq_map[irq_idx].reg_idx;
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ dpu_irq_map[irq_idx].irq_mask);
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
+ int irq_idx)
+{
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
+ int irq_idx, bool clear)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ u32 intr_status;
+
+ if (!intr)
+ return 0;
+
+ if (irq_idx >= ARRAY_SIZE(dpu_irq_map) || irq_idx < 0) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return 0;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+ reg_idx = dpu_irq_map[irq_idx].reg_idx;
+ intr_status = DPU_REG_READ(&intr->hw,
+ dpu_intr_set[reg_idx].status_off) &
+ dpu_irq_map[irq_idx].irq_mask;
+ if (intr_status && clear)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ intr_status);
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return intr_status;
+}
+
+static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
+{
+ ops->set_mask = dpu_hw_intr_set_mask;
+ ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
+ ops->enable_irq = dpu_hw_intr_enable_irq;
+ ops->disable_irq = dpu_hw_intr_disable_irq;
+ ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
+ ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
+ ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
+ ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
+ ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
+ ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
+ ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
+ ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
+}
+
+static void __intr_offset(struct dpu_mdss_cfg *m,
+ void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
+{
+ hw->base_off = addr;
+ hw->blk_off = m->mdp[0].base;
+ hw->hwversion = m->hwversion;
+}
+
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intr *intr;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ if (!intr)
+ return ERR_PTR(-ENOMEM);
+
+ __intr_offset(m, addr, &intr->hw);
+ __setup_intr_ops(&intr->ops);
+
+ intr->irq_idx_tbl_size = ARRAY_SIZE(dpu_irq_map);
+
+ intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->cache_irq_mask == NULL) {
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ intr->save_irq_status = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->save_irq_status == NULL) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&intr->irq_lock);
+
+ return intr;
+}
+
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
+{
+ if (intr) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr->save_irq_status);
+ kfree(intr);
+ }
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
new file mode 100644
index 000000000000..61e4cba36562
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -0,0 +1,257 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTERRUPTS_H
+#define _DPU_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP BIT(0)
+#define IRQ_SOURCE_DSI0 BIT(4)
+#define IRQ_SOURCE_DSI1 BIT(5)
+#define IRQ_SOURCE_HDMI BIT(8)
+#define IRQ_SOURCE_EDP BIT(12)
+#define IRQ_SOURCE_MHL BIT(16)
+
+/**
+ * dpu_intr_type - HW Interrupt Type
+ * @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done
+ * @DPU_IRQ_TYPE_WB_WFD_COMP: WB WFD done
+ * @DPU_IRQ_TYPE_PING_PONG_COMP: PingPong done
+ * @DPU_IRQ_TYPE_PING_PONG_RD_PTR: PingPong read pointer
+ * @DPU_IRQ_TYPE_PING_PONG_WR_PTR: PingPong write pointer
+ * @DPU_IRQ_TYPE_PING_PONG_AUTO_REF: PingPong auto refresh
+ * @DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check
+ * @DPU_IRQ_TYPE_PING_PONG_TE_CHECK: PingPong TE detection
+ * @DPU_IRQ_TYPE_INTF_UNDER_RUN: INTF underrun
+ * @DPU_IRQ_TYPE_INTF_VSYNC: INTF VSYNC
+ * @DPU_IRQ_TYPE_CWB_OVERFLOW: Concurrent WB overflow
+ * @DPU_IRQ_TYPE_HIST_VIG_DONE: VIG Histogram done
+ * @DPU_IRQ_TYPE_HIST_VIG_RSTSEQ: VIG Histogram reset
+ * @DPU_IRQ_TYPE_HIST_DSPP_DONE: DSPP Histogram done
+ * @DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ: DSPP Histogram reset
+ * @DPU_IRQ_TYPE_WD_TIMER: Watchdog timer
+ * @DPU_IRQ_TYPE_SFI_VIDEO_IN: Video static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_VIDEO_OUT: Video static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_IN: DSI CMD0 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_OUT: DSI CMD0 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_IN: DSI CMD1 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_OUT: DSI CMD1 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_PROG_LINE: Programmable Line interrupt
+ * @DPU_IRQ_TYPE_AD4_BL_DONE: AD4 backlight
+ * @DPU_IRQ_TYPE_CTL_START: Control start
+ * @DPU_IRQ_TYPE_RESERVED: Reserved for expansion
+ */
+enum dpu_intr_type {
+ DPU_IRQ_TYPE_WB_ROT_COMP,
+ DPU_IRQ_TYPE_WB_WFD_COMP,
+ DPU_IRQ_TYPE_PING_PONG_COMP,
+ DPU_IRQ_TYPE_PING_PONG_RD_PTR,
+ DPU_IRQ_TYPE_PING_PONG_WR_PTR,
+ DPU_IRQ_TYPE_PING_PONG_AUTO_REF,
+ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+ DPU_IRQ_TYPE_PING_PONG_TE_CHECK,
+ DPU_IRQ_TYPE_INTF_UNDER_RUN,
+ DPU_IRQ_TYPE_INTF_VSYNC,
+ DPU_IRQ_TYPE_CWB_OVERFLOW,
+ DPU_IRQ_TYPE_HIST_VIG_DONE,
+ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ,
+ DPU_IRQ_TYPE_HIST_DSPP_DONE,
+ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+ DPU_IRQ_TYPE_WD_TIMER,
+ DPU_IRQ_TYPE_SFI_VIDEO_IN,
+ DPU_IRQ_TYPE_SFI_VIDEO_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_0_IN,
+ DPU_IRQ_TYPE_SFI_CMD_0_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_1_IN,
+ DPU_IRQ_TYPE_SFI_CMD_1_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_2_IN,
+ DPU_IRQ_TYPE_SFI_CMD_2_OUT,
+ DPU_IRQ_TYPE_PROG_LINE,
+ DPU_IRQ_TYPE_AD4_BL_DONE,
+ DPU_IRQ_TYPE_CTL_START,
+ DPU_IRQ_TYPE_RESERVED,
+};
+
+struct dpu_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct dpu_hw_intr_ops {
+ /**
+ * set_mask - Programs the given interrupt register with the
+ * given interrupt mask. Register value will get overwritten.
+ * @intr: HW interrupt handle
+ * @reg_off: MDSS HW register offset
+ * @irqmask: IRQ mask value
+ */
+ void (*set_mask)(
+ struct dpu_hw_intr *intr,
+ uint32_t reg,
+ uint32_t irqmask);
+
+ /**
+ * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+ * Used for all irq related ops
+ * @intr_type: Interrupt type defined in dpu_intr_type
+ * @instance_idx: HW interrupt block instance
+ * @return: irq_idx or -EINVAL for lookup fail
+ */
+ int (*irq_idx_lookup)(
+ enum dpu_intr_type intr_type,
+ u32 instance_idx);
+
+ /**
+ * enable_irq - Enable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*enable_irq)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * disable_irq - Disable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_irq)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+ * any asserted IRQs). Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*clear_all_irqs)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * disable_all_irqs - Disables all the interrupts. Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_all_irqs)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * dispatch_irqs - IRQ dispatcher will call the given callback
+ * function when a matching interrupt status bit is
+ * found in the irq mapping table.
+ * @intr: HW interrupt handle
+ * @cbfunc: Callback function pointer
+ * @arg: Argument to pass back during callback
+ */
+ void (*dispatch_irqs)(
+ struct dpu_hw_intr *intr,
+ void (*cbfunc)(void *arg, int irq_idx),
+ void *arg);
+
+ /**
+ * get_interrupt_statuses - Gets and store value from all interrupt
+ * status registers that are currently fired.
+ * @intr: HW interrupt handle
+ */
+ void (*get_interrupt_statuses)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * clear_interrupt_status - Clears HW interrupt status based on given
+ * lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_interrupt_status)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_intr_status_nolock() - clears the HW interrupts without lock
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_intr_status_nolock)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * get_interrupt_status - Gets HW interrupt status, and clear if set,
+ * based on given lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @clear: True to clear irq after read
+ */
+ u32 (*get_interrupt_status)(
+ struct dpu_hw_intr *intr,
+ int irq_idx,
+ bool clear);
+
+ /**
+ * get_valid_interrupts - Gets a mask of all valid interrupt sources
+ * within DPU. These are actually status bits
+ * within interrupt registers that specify the
+ * source of the interrupt in IRQs. For example,
+ * valid interrupt sources can be MDP, DSI,
+ * HDMI etc.
+ * @intr: HW interrupt handle
+ * @mask: Returning the interrupt source MASK
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_valid_interrupts)(
+ struct dpu_hw_intr *intr,
+ uint32_t *mask);
+};
+
+/**
+ * struct dpu_hw_intr: hw interrupts handling data structure
+ * @hw: virtual address mapping
+ * @ops: function pointer mapping for IRQ handling
+ * @cache_irq_mask: array of IRQ enable masks reg storage created during init
+ * @save_irq_status: array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @irq_lock: spinlock for accessing IRQ resources
+ */
+struct dpu_hw_intr {
+ struct dpu_hw_blk_reg_map hw;
+ struct dpu_hw_intr_ops ops;
+ u32 *cache_irq_mask;
+ u32 *save_irq_status;
+ u32 irq_idx_tbl_size;
+ spinlock_t irq_lock;
+};
+
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
new file mode 100644
index 000000000000..d280df5613c9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -0,0 +1,349 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define INTF_TIMING_ENGINE_EN 0x000
+#define INTF_CONFIG 0x004
+#define INTF_HSYNC_CTL 0x008
+#define INTF_VSYNC_PERIOD_F0 0x00C
+#define INTF_VSYNC_PERIOD_F1 0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0 0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1 0x018
+#define INTF_DISPLAY_V_START_F0 0x01C
+#define INTF_DISPLAY_V_START_F1 0x020
+#define INTF_DISPLAY_V_END_F0 0x024
+#define INTF_DISPLAY_V_END_F1 0x028
+#define INTF_ACTIVE_V_START_F0 0x02C
+#define INTF_ACTIVE_V_START_F1 0x030
+#define INTF_ACTIVE_V_END_F0 0x034
+#define INTF_ACTIVE_V_END_F1 0x038
+#define INTF_DISPLAY_HCTL 0x03C
+#define INTF_ACTIVE_HCTL 0x040
+#define INTF_BORDER_COLOR 0x044
+#define INTF_UNDERFLOW_COLOR 0x048
+#define INTF_HSYNC_SKEW 0x04C
+#define INTF_POLARITY_CTL 0x050
+#define INTF_TEST_CTL 0x054
+#define INTF_TP_COLOR0 0x058
+#define INTF_TP_COLOR1 0x05C
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_DEFLICKER_CONFIG 0x0F0
+#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
+#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
+
+#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
+#define INTF_PANEL_FORMAT 0x090
+#define INTF_TPG_ENABLE 0x100
+#define INTF_TPG_MAIN_CONTROL 0x104
+#define INTF_TPG_VIDEO_CONFIG 0x108
+#define INTF_TPG_COMPONENT_LIMITS 0x10C
+#define INTF_TPG_RECTANGLE 0x110
+#define INTF_TPG_INITIAL_VALUE 0x114
+#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
+#define INTF_TPG_RGB_MAPPING 0x11C
+#define INTF_PROG_FETCH_START 0x170
+#define INTF_PROG_ROT_START 0x174
+
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_MISR_CTRL 0x180
+#define INTF_MISR_SIGNATURE 0x184
+
+static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->intf_count; i++) {
+ if ((intf == m->intf[i].id) &&
+ (m->intf[i].type != INTF_NONE)) {
+ b->base_off = addr;
+ b->blk_off = m->intf[i].base;
+ b->length = m->intf[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_INTF;
+ return &m->intf[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+ const struct intf_timing_params *p,
+ const struct dpu_format *fmt)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 active_h_start, active_h_end;
+ u32 active_v_start, active_v_end;
+ u32 active_hctl, display_hctl, hsync_ctl;
+ u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+ u32 panel_format;
+ u32 intf_cfg;
+
+ /* read interface_cfg */
+ intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+ hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+ p->h_front_porch;
+ vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+ p->v_front_porch;
+
+ display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+ hsync_period) + p->hsync_skew;
+ display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+ p->hsync_skew - 1;
+
+ if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+ }
+
+ hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+ hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+ if (p->width != p->xres) {
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ } else {
+ active_h_start = 0;
+ active_h_end = 0;
+ }
+
+ if (p->height != p->yres) {
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+ } else {
+ active_v_start = 0;
+ active_v_end = 0;
+ }
+
+ if (active_h_end) {
+ active_hctl = (active_h_end << 16) | active_h_start;
+ intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */
+ } else {
+ active_hctl = 0;
+ }
+
+ if (active_v_end)
+ intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ den_polarity = 0;
+ if (ctx->cap->type == INTF_HDMI) {
+ hsync_polarity = p->yres >= 720 ? 0 : 1;
+ vsync_polarity = p->yres >= 720 ? 0 : 1;
+ } else {
+ hsync_polarity = 0;
+ vsync_polarity = 0;
+ }
+ polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
+ (vsync_polarity << 1) | /* VSYNC Polarity */
+ (hsync_polarity << 0); /* HSYNC Polarity */
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ panel_format = (fmt->bits[C0_G_Y] |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (0x21 << 8));
+ else
+ /* Interface treats all the pixel data in RGB888 format */
+ panel_format = (COLOR_8BIT |
+ (COLOR_8BIT << 2) |
+ (COLOR_8BIT << 4) |
+ (0x21 << 8));
+
+ DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+ DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+ DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+ p->vsync_pulse_width * hsync_period);
+ DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+ DPU_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+ DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+ DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+ DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+ DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+ DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+ DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+ DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void dpu_hw_intf_enable_timing_engine(
+ struct dpu_hw_intf *intf,
+ u8 enable)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ /* Note: Display interface select is handled in top block hw layer */
+ DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void dpu_hw_intf_setup_prg_fetch(
+ struct dpu_hw_intf *intf,
+ const struct intf_prog_fetch *fetch)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ int fetch_enable;
+
+ /*
+ * Fetch should always be outside the active lines. If the fetching
+ * is programmed within active region, hardware behavior is unknown.
+ */
+
+ fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
+ if (fetch->enable) {
+ fetch_enable |= BIT(31);
+ DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
+ fetch->fetch_start);
+ } else {
+ fetch_enable &= ~BIT(31);
+ }
+
+ DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void dpu_hw_intf_get_status(
+ struct dpu_hw_intf *intf,
+ struct intf_status *s)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+ s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+ if (s->is_en) {
+ s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+ s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
+ } else {
+ s->line_count = 0;
+ s->frame_count = 0;
+ }
+}
+
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
+ bool enable, u32 frame_count)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* clear misr data */
+ wmb();
+
+ if (enable)
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+ return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
+}
+
+static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!intf)
+ return 0;
+
+ c = &intf->hw;
+
+ return DPU_REG_READ(c, INTF_LINE_COUNT);
+}
+
+static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
+ ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
+ ops->get_status = dpu_hw_intf_get_status;
+ ops->enable_timing = dpu_hw_intf_enable_timing_engine;
+ ops->setup_misr = dpu_hw_intf_setup_misr;
+ ops->collect_misr = dpu_hw_intf_collect_misr;
+ ops->get_line_count = dpu_hw_intf_get_line_count;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intf *c;
+ struct dpu_intf_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _intf_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create dpu_hw_intf %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ c->mdss = m;
+ _setup_intf_ops(&c->ops, c->cap->features);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
+{
+ if (intf)
+ dpu_hw_blk_destroy(&intf->base);
+ kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
new file mode 100644
index 000000000000..a79d735da68d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTF_H
+#define _DPU_HW_INTF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_intf;
+
+/* intf timing settings */
+struct intf_timing_params {
+ u32 width; /* active width */
+ u32 height; /* active height */
+ u32 xres; /* Display panel width */
+ u32 yres; /* Display panel height */
+
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 hsync_pulse_width;
+ u32 vsync_pulse_width;
+ u32 hsync_polarity;
+ u32 vsync_polarity;
+ u32 border_clr;
+ u32 underflow_clr;
+ u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+ u8 enable;
+ /* vsync counter for the front porch pixel line */
+ u32 fetch_start;
+};
+
+struct intf_status {
+ u8 is_en; /* interface timing engine is enabled or not */
+ u32 frame_count; /* frame count since timing engine enabled */
+ u32 line_count; /* current line count including blanking */
+};
+
+/**
+ * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
+ * @ get_line_count: reads current vertical line counter
+ */
+struct dpu_hw_intf_ops {
+ void (*setup_timing_gen)(struct dpu_hw_intf *intf,
+ const struct intf_timing_params *p,
+ const struct dpu_format *fmt);
+
+ void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
+ const struct intf_prog_fetch *fetch);
+
+ void (*enable_timing)(struct dpu_hw_intf *intf,
+ u8 enable);
+
+ void (*get_status)(struct dpu_hw_intf *intf,
+ struct intf_status *status);
+
+ void (*setup_misr)(struct dpu_hw_intf *intf,
+ bool enable, u32 frame_count);
+
+ u32 (*collect_misr)(struct dpu_hw_intf *intf);
+
+ u32 (*get_line_count)(struct dpu_hw_intf *intf);
+};
+
+struct dpu_hw_intf {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* intf */
+ enum dpu_intf idx;
+ const struct dpu_intf_cfg *cap;
+ const struct dpu_mdss_cfg *mdss;
+
+ /* ops */
+ struct dpu_hw_intf_ops ops;
+};
+
+/**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
+ * dpu_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx: interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intf_destroy(): Destroys INTF driver context
+ * @intf: Pointer to INTF driver context
+ */
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+
+#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
new file mode 100644
index 000000000000..4ab72b0f07a5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -0,0 +1,261 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define LM_OP_MODE 0x00
+#define LM_OUT_SIZE 0x04
+#define LM_BORDER_COLOR_0 0x08
+#define LM_BORDER_COLOR_1 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP 0x00
+#define LM_BLEND0_CONST_ALPHA 0x04
+#define LM_FG_COLOR_FILL_COLOR_0 0x08
+#define LM_FG_COLOR_FILL_COLOR_1 0x0C
+#define LM_FG_COLOR_FILL_SIZE 0x10
+#define LM_FG_COLOR_FILL_XY 0x14
+
+#define LM_BLEND0_FG_ALPHA 0x04
+#define LM_BLEND0_BG_ALPHA 0x08
+
+#define LM_MISR_CTRL 0x310
+#define LM_MISR_SIGNATURE 0x314
+
+static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->mixer_count; i++) {
+ if (mixer == m->mixer[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mixer[i].base;
+ b->length = m->mixer[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_LM;
+ return &m->mixer[i];
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c: mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
+{
+ const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
+ int rc;
+
+ if (stage == DPU_STAGE_BASE)
+ rc = -EINVAL;
+ else if (stage <= sblk->maxblendstages)
+ rc = sblk->blendstage_base[stage - DPU_STAGE_0];
+ else
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *mixer)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 outsize;
+ u32 op_mode;
+
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ outsize = mixer->out_height << 16 | mixer->out_width;
+ DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+ /* SPLIT_LEFT_RIGHT */
+ if (mixer->right_mixer)
+ op_mode |= BIT(31);
+ else
+ op_mode &= ~BIT(31);
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
+ (color->color_0 & 0xFFF) |
+ ((color->color_1 & 0xFFF) << 0x10));
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
+ (color->color_2 & 0xFFF) |
+ ((color->color_3 & 0xFFF) << 0x10));
+ }
+}
+
+static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+ u32 const_alpha;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+ DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
+ uint32_t mixer_op_mode)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int op_mode;
+
+ /* read the existing op_mode configuration */
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
+ void *cfg)
+{
+}
+
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
+ bool enable, u32 frame_count)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* clear misr data */
+ wmb();
+
+ if (enable)
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, LM_MISR_SIGNATURE);
+}
+
+static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+ struct dpu_hw_lm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_mixer_out = dpu_hw_lm_setup_out;
+ if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
+ else
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
+ ops->setup_alpha_out = dpu_hw_lm_setup_color3;
+ ops->setup_border_color = dpu_hw_lm_setup_border_color;
+ ops->setup_gc = dpu_hw_lm_gc;
+ ops->setup_misr = dpu_hw_lm_setup_misr;
+ ops->collect_misr = dpu_hw_lm_collect_misr;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mixer *c;
+ struct dpu_lm_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _lm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_mixer_ops(m, &c->ops, c->cap->features);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
+{
+ if (lm)
+ dpu_hw_blk_destroy(&lm->base);
+ kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
new file mode 100644
index 000000000000..e29e5dab31bf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_LM_H
+#define _DPU_HW_LM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mixer;
+
+struct dpu_hw_mixer_cfg {
+ u32 out_width;
+ u32 out_height;
+ bool right_mixer;
+ int flags;
+};
+
+struct dpu_hw_color3_cfg {
+ u8 keep_fg[DPU_STAGE_MAX];
+};
+
+/**
+ *
+ * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_lm_ops {
+ /*
+ * Sets up mixer output width and height
+ * and border color if enabled
+ */
+ void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *cfg);
+
+ /*
+ * Alpha blending configuration
+ * for the specified stage
+ */
+ void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
+ uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+ /*
+ * Alpha color component selection from either fg or bg
+ */
+ void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
+
+ /**
+ * setup_border_color : enable/disable border color
+ */
+ void (*setup_border_color)(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en);
+ /**
+ * setup_gc : enable/disable gamma correction feature
+ */
+ void (*setup_gc)(struct dpu_hw_mixer *mixer,
+ void *cfg);
+
+ /* setup_misr: enables/disables MISR in HW register */
+ void (*setup_misr)(struct dpu_hw_mixer *ctx,
+ bool enable, u32 frame_count);
+
+ /* collect_misr: reads and stores MISR data from HW register */
+ u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
+};
+
+struct dpu_hw_mixer {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* lm */
+ enum dpu_lm idx;
+ const struct dpu_lm_cfg *cap;
+ const struct dpu_mdp_cfg *mdp;
+ const struct dpu_ctl_cfg *ctl;
+
+ /* ops */
+ struct dpu_hw_lm_ops ops;
+
+ /* store mixer info specific to display */
+ struct dpu_hw_mixer_cfg cfg;
+};
+
+/**
+ * to_dpu_hw_mixer - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_mixer, base);
+}
+
+/**
+ * dpu_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx: mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm: Pointer to LM driver context
+ */
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+
+#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
new file mode 100644
index 000000000000..35e6bf930924
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -0,0 +1,465 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_MDSS_H
+#define _DPU_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define DPU_DBG_NAME "dpu"
+
+#define DPU_NONE 0
+
+#ifndef DPU_CSC_MATRIX_COEFF_SIZE
+#define DPU_CSC_MATRIX_COEFF_SIZE 9
+#endif
+
+#ifndef DPU_CSC_CLAMP_SIZE
+#define DPU_CSC_CLAMP_SIZE 6
+#endif
+
+#ifndef DPU_CSC_BIAS_SIZE
+#define DPU_CSC_BIAS_SIZE 3
+#endif
+
+#ifndef DPU_MAX_PLANES
+#define DPU_MAX_PLANES 4
+#endif
+
+#define PIPES_PER_STAGE 2
+#ifndef DPU_MAX_DE_CURVES
+#define DPU_MAX_DE_CURVES 3
+#endif
+
+enum dpu_format_flags {
+ DPU_FORMAT_FLAG_YUV_BIT,
+ DPU_FORMAT_FLAG_DX_BIT,
+ DPU_FORMAT_FLAG_COMPRESSED_BIT,
+ DPU_FORMAT_FLAG_BIT_MAX,
+};
+
+#define DPU_FORMAT_FLAG_YUV BIT(DPU_FORMAT_FLAG_YUV_BIT)
+#define DPU_FORMAT_FLAG_DX BIT(DPU_FORMAT_FLAG_DX_BIT)
+#define DPU_FORMAT_FLAG_COMPRESSED BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
+#define DPU_FORMAT_IS_YUV(X) \
+ (test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define DPU_FORMAT_IS_DX(X) \
+ (test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define DPU_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == DPU_FETCH_LINEAR)
+#define DPU_FORMAT_IS_TILE(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ !test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define DPU_FORMAT_IS_UBWC(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define DPU_BLEND_FG_ALPHA_FG_CONST (0 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_CONST (1 << 0)
+#define DPU_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_PIXEL (3 << 0)
+#define DPU_BLEND_FG_INV_ALPHA (1 << 2)
+#define DPU_BLEND_FG_MOD_ALPHA (1 << 3)
+#define DPU_BLEND_FG_INV_MOD_ALPHA (1 << 4)
+#define DPU_BLEND_FG_TRANSP_EN (1 << 5)
+#define DPU_BLEND_BG_ALPHA_FG_CONST (0 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_CONST (1 << 8)
+#define DPU_BLEND_BG_ALPHA_FG_PIXEL (2 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_PIXEL (3 << 8)
+#define DPU_BLEND_BG_INV_ALPHA (1 << 10)
+#define DPU_BLEND_BG_MOD_ALPHA (1 << 11)
+#define DPU_BLEND_BG_INV_MOD_ALPHA (1 << 12)
+#define DPU_BLEND_BG_TRANSP_EN (1 << 13)
+
+#define DPU_VSYNC0_SOURCE_GPIO 0
+#define DPU_VSYNC1_SOURCE_GPIO 1
+#define DPU_VSYNC2_SOURCE_GPIO 2
+#define DPU_VSYNC_SOURCE_INTF_0 3
+#define DPU_VSYNC_SOURCE_INTF_1 4
+#define DPU_VSYNC_SOURCE_INTF_2 5
+#define DPU_VSYNC_SOURCE_INTF_3 6
+#define DPU_VSYNC_SOURCE_WD_TIMER_4 11
+#define DPU_VSYNC_SOURCE_WD_TIMER_3 12
+#define DPU_VSYNC_SOURCE_WD_TIMER_2 13
+#define DPU_VSYNC_SOURCE_WD_TIMER_1 14
+#define DPU_VSYNC_SOURCE_WD_TIMER_0 15
+
+enum dpu_hw_blk_type {
+ DPU_HW_BLK_TOP = 0,
+ DPU_HW_BLK_SSPP,
+ DPU_HW_BLK_LM,
+ DPU_HW_BLK_CTL,
+ DPU_HW_BLK_CDM,
+ DPU_HW_BLK_PINGPONG,
+ DPU_HW_BLK_INTF,
+ DPU_HW_BLK_WB,
+ DPU_HW_BLK_MAX,
+};
+
+enum dpu_mdp {
+ MDP_TOP = 0x1,
+ MDP_MAX,
+};
+
+enum dpu_sspp {
+ SSPP_NONE,
+ SSPP_VIG0,
+ SSPP_VIG1,
+ SSPP_VIG2,
+ SSPP_VIG3,
+ SSPP_RGB0,
+ SSPP_RGB1,
+ SSPP_RGB2,
+ SSPP_RGB3,
+ SSPP_DMA0,
+ SSPP_DMA1,
+ SSPP_DMA2,
+ SSPP_DMA3,
+ SSPP_CURSOR0,
+ SSPP_CURSOR1,
+ SSPP_MAX
+};
+
+enum dpu_sspp_type {
+ SSPP_TYPE_VIG,
+ SSPP_TYPE_RGB,
+ SSPP_TYPE_DMA,
+ SSPP_TYPE_CURSOR,
+ SSPP_TYPE_MAX
+};
+
+enum dpu_lm {
+ LM_0 = 1,
+ LM_1,
+ LM_2,
+ LM_3,
+ LM_4,
+ LM_5,
+ LM_6,
+ LM_MAX
+};
+
+enum dpu_stage {
+ DPU_STAGE_BASE = 0,
+ DPU_STAGE_0,
+ DPU_STAGE_1,
+ DPU_STAGE_2,
+ DPU_STAGE_3,
+ DPU_STAGE_4,
+ DPU_STAGE_5,
+ DPU_STAGE_6,
+ DPU_STAGE_7,
+ DPU_STAGE_8,
+ DPU_STAGE_9,
+ DPU_STAGE_10,
+ DPU_STAGE_MAX
+};
+enum dpu_dspp {
+ DSPP_0 = 1,
+ DSPP_1,
+ DSPP_2,
+ DSPP_3,
+ DSPP_MAX
+};
+
+enum dpu_ds {
+ DS_TOP,
+ DS_0,
+ DS_1,
+ DS_MAX
+};
+
+enum dpu_ctl {
+ CTL_0 = 1,
+ CTL_1,
+ CTL_2,
+ CTL_3,
+ CTL_4,
+ CTL_MAX
+};
+
+enum dpu_cdm {
+ CDM_0 = 1,
+ CDM_1,
+ CDM_MAX
+};
+
+enum dpu_pingpong {
+ PINGPONG_0 = 1,
+ PINGPONG_1,
+ PINGPONG_2,
+ PINGPONG_3,
+ PINGPONG_4,
+ PINGPONG_S0,
+ PINGPONG_MAX
+};
+
+enum dpu_intf {
+ INTF_0 = 1,
+ INTF_1,
+ INTF_2,
+ INTF_3,
+ INTF_4,
+ INTF_5,
+ INTF_6,
+ INTF_MAX
+};
+
+enum dpu_intf_type {
+ INTF_NONE = 0x0,
+ INTF_DSI = 0x1,
+ INTF_HDMI = 0x3,
+ INTF_LCDC = 0x5,
+ INTF_EDP = 0x9,
+ INTF_DP = 0xa,
+ INTF_TYPE_MAX,
+
+ /* virtual interfaces */
+ INTF_WB = 0x100,
+};
+
+enum dpu_intf_mode {
+ INTF_MODE_NONE = 0,
+ INTF_MODE_CMD,
+ INTF_MODE_VIDEO,
+ INTF_MODE_WB_BLOCK,
+ INTF_MODE_WB_LINE,
+ INTF_MODE_MAX
+};
+
+enum dpu_wb {
+ WB_0 = 1,
+ WB_1,
+ WB_2,
+ WB_3,
+ WB_MAX
+};
+
+enum dpu_ad {
+ AD_0 = 0x1,
+ AD_1,
+ AD_MAX
+};
+
+enum dpu_cwb {
+ CWB_0 = 0x1,
+ CWB_1,
+ CWB_2,
+ CWB_3,
+ CWB_MAX
+};
+
+enum dpu_wd_timer {
+ WD_TIMER_0 = 0x1,
+ WD_TIMER_1,
+ WD_TIMER_2,
+ WD_TIMER_3,
+ WD_TIMER_4,
+ WD_TIMER_5,
+ WD_TIMER_MAX
+};
+
+enum dpu_vbif {
+ VBIF_0,
+ VBIF_1,
+ VBIF_MAX,
+ VBIF_RT = VBIF_0,
+ VBIF_NRT = VBIF_1
+};
+
+enum dpu_iommu_domain {
+ DPU_IOMMU_DOMAIN_UNSECURE,
+ DPU_IOMMU_DOMAIN_SECURE,
+ DPU_IOMMU_DOMAIN_MAX
+};
+
+/**
+ * DPU HW,Component order color map
+ */
+enum {
+ C0_G_Y = 0,
+ C1_B_Cb = 1,
+ C2_R_Cr = 2,
+ C3_ALPHA = 3
+};
+
+/**
+ * enum dpu_plane_type - defines how the color component pixel packing
+ * @DPU_PLANE_INTERLEAVED : Color components in single plane
+ * @DPU_PLANE_PLANAR : Color component in separate planes
+ * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum dpu_plane_type {
+ DPU_PLANE_INTERLEAVED,
+ DPU_PLANE_PLANAR,
+ DPU_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum dpu_chroma_samp_type - chroma sub-samplng type
+ * @DPU_CHROMA_RGB : No chroma subsampling
+ * @DPU_CHROMA_H2V1 : Chroma pixels are horizontally subsampled
+ * @DPU_CHROMA_H1V2 : Chroma pixels are vertically subsampled
+ * @DPU_CHROMA_420 : 420 subsampling
+ */
+enum dpu_chroma_samp_type {
+ DPU_CHROMA_RGB,
+ DPU_CHROMA_H2V1,
+ DPU_CHROMA_H1V2,
+ DPU_CHROMA_420
+};
+
+/**
+ * dpu_fetch_type - Defines How DPU HW fetches data
+ * @DPU_FETCH_LINEAR : fetch is line by line
+ * @DPU_FETCH_TILE : fetches data in Z order from a tile
+ * @DPU_FETCH_UBWC : fetch and decompress data
+ */
+enum dpu_fetch_type {
+ DPU_FETCH_LINEAR,
+ DPU_FETCH_TILE,
+ DPU_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+ COLOR_ALPHA_1BIT = 0,
+ COLOR_ALPHA_4BIT = 1,
+ COLOR_4BIT = 0,
+ COLOR_5BIT = 1, /* No 5-bit Alpha */
+ COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+ COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum dpu_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT : column interleaving
+ * @BLEND_3D_MAX :
+ */
+enum dpu_3d_blend_mode {
+ BLEND_3D_NONE = 0,
+ BLEND_3D_FRAME_INT,
+ BLEND_3D_H_ROW_INT,
+ BLEND_3D_V_ROW_INT,
+ BLEND_3D_COL_INT,
+ BLEND_3D_MAX
+};
+
+/** struct dpu_format - defines the format configuration which
+ * allows DPU HW to correctly fetch and decode the format
+ * @base: base msm_format struture containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @is_yuv: is format a yuv variant
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct dpu_format {
+ struct msm_format base;
+ enum dpu_plane_type fetch_planes;
+ u8 element[DPU_MAX_PLANES];
+ u8 bits[DPU_MAX_PLANES];
+ enum dpu_chroma_samp_type chroma_sample;
+ u8 unpack_align_msb;
+ u8 unpack_tight;
+ u8 unpack_count;
+ u8 bpp;
+ u8 alpha_enable;
+ u8 num_planes;
+ enum dpu_fetch_type fetch_mode;
+ DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
+ u16 tile_width;
+ u16 tile_height;
+};
+#define to_dpu_format(x) container_of(x, struct dpu_format, base)
+
+/**
+ * struct dpu_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct dpu_hw_fmt_layout {
+ const struct dpu_format *format;
+ uint32_t num_planes;
+ uint32_t width;
+ uint32_t height;
+ uint32_t total_size;
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ uint32_t plane_size[DPU_MAX_PLANES];
+ uint32_t plane_pitch[DPU_MAX_PLANES];
+};
+
+struct dpu_csc_cfg {
+ /* matrix coefficients in S15.16 format */
+ uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE];
+ uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE];
+ uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct dpu_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct dpu_mdss_color {
+ u32 color_0;
+ u32 color_1;
+ u32 color_2;
+ u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define DPU_DBG_MASK_NONE (1 << 0)
+#define DPU_DBG_MASK_CDM (1 << 1)
+#define DPU_DBG_MASK_INTF (1 << 2)
+#define DPU_DBG_MASK_LM (1 << 3)
+#define DPU_DBG_MASK_CTL (1 << 4)
+#define DPU_DBG_MASK_PINGPONG (1 << 5)
+#define DPU_DBG_MASK_SSPP (1 << 6)
+#define DPU_DBG_MASK_WB (1 << 7)
+#define DPU_DBG_MASK_TOP (1 << 8)
+#define DPU_DBG_MASK_VBIF (1 << 9)
+#define DPU_DBG_MASK_ROT (1 << 10)
+
+#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
new file mode 100644
index 000000000000..cc3a623903f4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -0,0 +1,250 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define PP_TEAR_CHECK_EN 0x000
+#define PP_SYNC_CONFIG_VSYNC 0x004
+#define PP_SYNC_CONFIG_HEIGHT 0x008
+#define PP_SYNC_WRCOUNT 0x00C
+#define PP_VSYNC_INIT_VAL 0x010
+#define PP_INT_COUNT_VAL 0x014
+#define PP_SYNC_THRESH 0x018
+#define PP_START_POS 0x01C
+#define PP_RD_PTR_IRQ 0x020
+#define PP_WR_PTR_IRQ 0x024
+#define PP_OUT_LINE_COUNT 0x028
+#define PP_LINE_COUNT 0x02C
+
+#define PP_FBC_MODE 0x034
+#define PP_FBC_BUDGET_CTL 0x038
+#define PP_FBC_LOSSY_MODE 0x03C
+
+static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->pingpong_count; i++) {
+ if (pp == m->pingpong[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->pingpong[i].base;
+ b->length = m->pingpong[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_PINGPONG;
+ return &m->pingpong[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *te)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int cfg;
+
+ if (!pp || !te)
+ return -EINVAL;
+ c = &pp->hw;
+
+ cfg = BIT(19); /*VSYNC_COUNTER_EN */
+ if (te->hw_vsync_mode)
+ cfg |= BIT(20);
+
+ cfg |= te->vsync_count;
+
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+ DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+ DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+ DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
+ DPU_REG_WRITE(c, PP_SYNC_THRESH,
+ ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start));
+ DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
+ (te->start_pos + te->sync_threshold_start + 1));
+
+ return 0;
+}
+
+static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
+ u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+ int rc;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
+ val, (val & 0xffff) >= 1, 10, timeout_us);
+
+ return rc;
+}
+
+static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!pp)
+ return -EINVAL;
+ c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+ return 0;
+}
+
+static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
+ bool enable_external_te)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 cfg;
+ int orig;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
+ orig = (bool)(cfg & BIT(20));
+ if (enable_external_te)
+ cfg |= BIT(20);
+ else
+ cfg &= ~BIT(20);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
+
+ return orig;
+}
+
+static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+
+ if (!pp || !info)
+ return -EINVAL;
+ c = &pp->hw;
+
+ val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
+ info->rd_ptr_init_val = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
+ info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+ info->rd_ptr_line_count = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_LINE_COUNT);
+ info->wr_ptr_line_count = val & 0xffff;
+
+ return 0;
+}
+
+static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 height, init;
+ u32 line = 0xFFFF;
+
+ if (!pp)
+ return 0;
+ c = &pp->hw;
+
+ init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+ height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+ if (height < init)
+ goto line_count_exit;
+
+ line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+ if (line < init)
+ line += (0xFFFF - init);
+ else
+ line -= init;
+
+line_count_exit:
+ return line;
+}
+
+static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
+ const struct dpu_pingpong_cfg *hw_cap)
+{
+ ops->setup_tearcheck = dpu_hw_pp_setup_te_config;
+ ops->enable_tearcheck = dpu_hw_pp_enable_te;
+ ops->connect_external_te = dpu_hw_pp_connect_external_te;
+ ops->get_vsync_info = dpu_hw_pp_get_vsync_info;
+ ops->poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
+ ops->get_line_count = dpu_hw_pp_get_line_count;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_pingpong *c;
+ struct dpu_pingpong_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _pingpong_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_pingpong_ops(&c->ops, c->caps);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
+{
+ if (pp)
+ dpu_hw_blk_destroy(&pp->base);
+ kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
new file mode 100644
index 000000000000..3caccd7d6a3e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_PINGPONG_H
+#define _DPU_HW_PINGPONG_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_pingpong;
+
+struct dpu_hw_tear_check {
+ /*
+ * This is ratio of MDP VSYNC clk freq(Hz) to
+ * refresh rate divided by no of lines
+ */
+ u32 vsync_count;
+ u32 sync_cfg_height;
+ u32 vsync_init_val;
+ u32 sync_threshold_start;
+ u32 sync_threshold_continue;
+ u32 start_pos;
+ u32 rd_ptr_irq;
+ u8 hw_vsync_mode;
+};
+
+struct dpu_hw_pp_vsync_info {
+ u32 rd_ptr_init_val; /* value of rd pointer at vsync edge */
+ u32 rd_ptr_frame_count; /* num frames sent since enabling interface */
+ u32 rd_ptr_line_count; /* current line on panel (rd ptr) */
+ u32 wr_ptr_line_count; /* current line within pp fifo (wr ptr) */
+};
+
+/**
+ *
+ * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_tearcheck : program tear check values
+ * @enable_tearcheck : enables tear check
+ * @get_vsync_info : retries timing info of the panel
+ * @setup_dither : function to program the dither hw block
+ * @get_line_count: obtain current vertical line counter
+ */
+struct dpu_hw_pingpong_ops {
+ /**
+ * enables vysnc generation and sets up init value of
+ * read pointer and programs the tear check cofiguration
+ */
+ int (*setup_tearcheck)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *cfg);
+
+ /**
+ * enables tear check block
+ */
+ int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
+ bool enable);
+
+ /**
+ * read, modify, write to either set or clear listening to external TE
+ * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
+ */
+ int (*connect_external_te)(struct dpu_hw_pingpong *pp,
+ bool enable_external_te);
+
+ /**
+ * provides the programmed and current
+ * line_count
+ */
+ int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info);
+
+ /**
+ * poll until write pointer transmission starts
+ * @Return: 0 on success, -ETIMEDOUT on timeout
+ */
+ int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us);
+
+ /**
+ * Obtain current vertical line counter
+ */
+ u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+};
+
+struct dpu_hw_pingpong {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* pingpong */
+ enum dpu_pingpong idx;
+ const struct dpu_pingpong_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_pingpong_ops ops;
+};
+
+/**
+ * dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
+ * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
+ * pingpong idx.
+ * @idx: Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_pingpong context
+ */
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_pingpong_destroy - destroys pingpong driver context
+ * should be called to free the context
+ * @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init
+ */
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+
+#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
new file mode 100644
index 000000000000..c25b52a6b219
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
+
+/* DPU_SSPP_SRC */
+#define SSPP_SRC_SIZE 0x00
+#define SSPP_SRC_XY 0x08
+#define SSPP_OUT_SIZE 0x0c
+#define SSPP_OUT_XY 0x10
+#define SSPP_SRC0_ADDR 0x14
+#define SSPP_SRC1_ADDR 0x18
+#define SSPP_SRC2_ADDR 0x1C
+#define SSPP_SRC3_ADDR 0x20
+#define SSPP_SRC_YSTRIDE0 0x24
+#define SSPP_SRC_YSTRIDE1 0x28
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+
+/* SSPP_MULTIRECT*/
+#define SSPP_SRC_SIZE_REC1 0x16C
+#define SSPP_SRC_XY_REC1 0x168
+#define SSPP_OUT_SIZE_REC1 0x160
+#define SSPP_OUT_XY_REC1 0x164
+#define SSPP_SRC_FORMAT_REC1 0x174
+#define SSPP_SRC_UNPACK_PATTERN_REC1 0x178
+#define SSPP_SRC_OP_MODE_REC1 0x17C
+#define SSPP_MULTIRECT_OPMODE 0x170
+#define SSPP_SRC_CONSTANT_COLOR_REC1 0x180
+#define SSPP_EXCL_REC_SIZE_REC1 0x184
+#define SSPP_EXCL_REC_XY_REC1 0x188
+
+#define MDSS_MDP_OP_DEINTERLACE BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
+#define MDSS_MDP_OP_IGC_EN BIT(16)
+#define MDSS_MDP_OP_FLIP_UD BIT(14)
+#define MDSS_MDP_OP_FLIP_LR BIT(13)
+#define MDSS_MDP_OP_BWC_EN BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR 0x3c
+#define SSPP_EXCL_REC_CTL 0x40
+#define SSPP_UBWC_STATIC_CTRL 0x44
+#define SSPP_FETCH_CONFIG 0x048
+#define SSPP_DANGER_LUT 0x60
+#define SSPP_SAFE_LUT 0x64
+#define SSPP_CREQ_LUT 0x68
+#define SSPP_QOS_CTRL 0x6C
+#define SSPP_DECIMATION_CONFIG 0xB4
+#define SSPP_SRC_ADDR_SW_STATUS 0x70
+#define SSPP_CREQ_LUT_0 0x74
+#define SSPP_CREQ_LUT_1 0x78
+#define SSPP_SW_PIX_EXT_C0_LR 0x100
+#define SSPP_SW_PIX_EXT_C0_TB 0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR 0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB 0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118
+#define SSPP_SW_PIX_EXT_C3_LR 0x120
+#define SSPP_SW_PIX_EXT_C3_TB 0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
+#define SSPP_TRAFFIC_SHAPER 0x130
+#define SSPP_CDP_CNTL 0x134
+#define SSPP_UBWC_ERROR_STATUS 0x138
+#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
+#define SSPP_TRAFFIC_SHAPER_REC1 0x158
+#define SSPP_EXCL_REC_SIZE 0x1B4
+#define SSPP_EXCL_REC_XY 0x1B8
+#define SSPP_VIG_OP_MODE 0x0
+#define SSPP_VIG_CSC_10_OP_MODE 0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX 0xFF
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF 4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20
+
+/* DPU_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG 0x04
+#define COMP0_3_PHASE_STEP_X 0x10
+#define COMP0_3_PHASE_STEP_Y 0x14
+#define COMP1_2_PHASE_STEP_X 0x18
+#define COMP1_2_PHASE_STEP_Y 0x1c
+#define COMP0_3_INIT_PHASE_X 0x20
+#define COMP0_3_INIT_PHASE_Y 0x24
+#define COMP1_2_INIT_PHASE_X 0x28
+#define COMP1_2_INIT_PHASE_Y 0x2C
+#define VIG_0_QSEED2_SHARP 0x30
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN BIT(17)
+#define VIG_OP_MEM_PROT_CONT BIT(15)
+#define VIG_OP_MEM_PROT_VAL BIT(14)
+#define VIG_OP_MEM_PROT_SAT BIT(13)
+#define VIG_OP_MEM_PROT_HUE BIT(12)
+#define VIG_OP_HIST BIT(8)
+#define VIG_OP_SKY_COL BIT(7)
+#define VIG_OP_FOIL BIT(6)
+#define VIG_OP_SKIN_COL BIT(5)
+#define VIG_OP_PA_EN BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN BIT(0)
+#define CSC_10BIT_OFFSET 4
+
+/* traffic shaper clock in Hz */
+#define TS_CLK 19200000
+
+static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+ int s_id,
+ u32 *idx)
+{
+ int rc = 0;
+ const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+ if (!ctx)
+ return -EINVAL;
+
+ switch (s_id) {
+ case DPU_SSPP_SRC:
+ *idx = sblk->src_blk.base;
+ break;
+ case DPU_SSPP_SCALER_QSEED2:
+ case DPU_SSPP_SCALER_QSEED3:
+ case DPU_SSPP_SCALER_RGB:
+ *idx = sblk->scaler_blk.base;
+ break;
+ case DPU_SSPP_CSC:
+ case DPU_SSPP_CSC_10BIT:
+ *idx = sblk->csc_blk.base;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void dpu_hw_sspp_setup_multirect(struct dpu_hw_pipe *ctx,
+ enum dpu_sspp_multirect_index index,
+ enum dpu_sspp_multirect_mode mode)
+{
+ u32 mode_mask;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (index == DPU_SSPP_RECT_SOLO) {
+ /**
+ * if rect index is RECT_SOLO, we cannot expect a
+ * virtual plane sharing the same SSPP id. So we go
+ * and disable multirect
+ */
+ mode_mask = 0;
+ } else {
+ mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
+ mode_mask |= index;
+ if (mode == DPU_SSPP_MULTIRECT_TIME_MX)
+ mode_mask |= BIT(2);
+ else
+ mode_mask &= ~BIT(2);
+ }
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
+}
+
+static void _sspp_setup_opmode(struct dpu_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+ _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) ||
+ !test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/**
+ * Setup source pixel format, flip,
+ */
+static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
+ const struct dpu_format *fmt, u32 flags,
+ enum dpu_sspp_multirect_index rect_mode)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 chroma_samp, unpack, src_format;
+ u32 opmode = 0;
+ u32 fast_clear = 0;
+ u32 op_mode_off, unpack_pat_off, format_off;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt)
+ return;
+
+ if (rect_mode == DPU_SSPP_RECT_SOLO || rect_mode == DPU_SSPP_RECT_0) {
+ op_mode_off = SSPP_SRC_OP_MODE;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
+ format_off = SSPP_SRC_FORMAT;
+ } else {
+ op_mode_off = SSPP_SRC_OP_MODE_REC1;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
+ format_off = SSPP_SRC_FORMAT_REC1;
+ }
+
+ c = &ctx->hw;
+ opmode = DPU_REG_READ(c, op_mode_off + idx);
+ opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+ MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+ if (flags & DPU_SSPP_FLIP_LR)
+ opmode |= MDSS_MDP_OP_FLIP_LR;
+ if (flags & DPU_SSPP_FLIP_UD)
+ opmode |= MDSS_MDP_OP_FLIP_UD;
+
+ chroma_samp = fmt->chroma_sample;
+ if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
+ if (chroma_samp == DPU_CHROMA_H2V1)
+ chroma_samp = DPU_CHROMA_H1V2;
+ else if (chroma_samp == DPU_CHROMA_H1V2)
+ chroma_samp = DPU_CHROMA_H2V1;
+ }
+
+ src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+ if (flags & DPU_SSPP_ROT_90)
+ src_format |= BIT(11); /* ROT90 */
+
+ if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
+ src_format |= BIT(8); /* SRCC3_EN */
+
+ if (flags & DPU_SSPP_SOLID_FILL)
+ src_format |= BIT(22);
+
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) | (fmt->element[0] << 0);
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
+
+ if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
+ if (DPU_FORMAT_IS_UBWC(fmt))
+ opmode |= MDSS_MDP_OP_BWC_EN;
+ src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+ DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
+ DPU_FETCH_CONFIG_RESET_VALUE |
+ ctx->mdp->highest_bank_bit << 18);
+ if (IS_UBWC_20_SUPPORTED(ctx->catalog->caps->ubwc_version)) {
+ fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ fast_clear | (ctx->mdp->ubwc_swizzle) |
+ (ctx->mdp->highest_bank_bit << 4));
+ }
+ }
+
+ opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+ /* if this is YUV pixel format, enable CSC */
+ if (DPU_FORMAT_IS_YUV(fmt))
+ src_format |= BIT(15);
+
+ if (DPU_FORMAT_IS_DX(fmt))
+ src_format |= BIT(14);
+
+ /* update scaler opmode, if appropriate */
+ if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ _sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+ else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
+ _sspp_setup_csc10_opmode(ctx,
+ VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+
+ DPU_REG_WRITE(c, format_off + idx, src_format);
+ DPU_REG_WRITE(c, unpack_pat_off + idx, unpack);
+ DPU_REG_WRITE(c, op_mode_off + idx, opmode);
+
+ /* clear previous UBWC error */
+ DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pixel_ext *pe_ext)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u8 color;
+ u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+ const u32 bytemask = 0xff;
+ const u32 shortmask = 0xffff;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext)
+ return;
+
+ c = &ctx->hw;
+
+ /* program SW pixel extension override for all pipes*/
+ for (color = 0; color < DPU_MAX_PLANES; color++) {
+ /* color 2 has the same set of registers as color 1 */
+ if (color == 2)
+ continue;
+
+ lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+ ((pe_ext->right_rpt[color] & bytemask) << 16)|
+ ((pe_ext->left_ftch[color] & bytemask) << 8)|
+ (pe_ext->left_rpt[color] & bytemask);
+
+ tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+ ((pe_ext->btm_rpt[color] & bytemask) << 16)|
+ ((pe_ext->top_ftch[color] & bytemask) << 8)|
+ (pe_ext->top_rpt[color] & bytemask);
+
+ tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+ pe_ext->num_ext_pxls_top[color] +
+ pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+ ((pe_ext->roi_w[color] +
+ pe_ext->num_ext_pxls_left[color] +
+ pe_ext->num_ext_pxls_right[color]) & shortmask);
+ }
+
+ /* color 0 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+ tot_req_pixels[0]);
+
+ /* color 1 and color 2 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+ tot_req_pixels[1]);
+
+ /* color 3 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+ tot_req_pixels[3]);
+}
+
+static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *sspp,
+ struct dpu_hw_pixel_ext *pe,
+ void *scaler_cfg)
+{
+ u32 idx;
+ struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+ (void)pe;
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
+ || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+ return;
+
+ dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
+ ctx->cap->sblk->scaler_blk.version,
+ sspp->layout.format);
+}
+
+static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx)
+{
+ u32 idx;
+
+ if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx))
+ return 0;
+
+ return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
+}
+
+/**
+ * dpu_hw_sspp_setup_rects()
+ */
+static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index rect_index)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+ u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+ src_size_off = SSPP_SRC_SIZE;
+ src_xy_off = SSPP_SRC_XY;
+ out_size_off = SSPP_OUT_SIZE;
+ out_xy_off = SSPP_OUT_XY;
+ } else {
+ src_size_off = SSPP_SRC_SIZE_REC1;
+ src_xy_off = SSPP_SRC_XY_REC1;
+ out_size_off = SSPP_OUT_SIZE_REC1;
+ out_xy_off = SSPP_OUT_XY_REC1;
+ }
+
+
+ /* src and dest rect programming */
+ src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
+ src_size = (drm_rect_height(&cfg->src_rect) << 16) |
+ drm_rect_width(&cfg->src_rect);
+ dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
+ dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
+ drm_rect_width(&cfg->dst_rect);
+
+ if (rect_index == DPU_SSPP_RECT_SOLO) {
+ ystride0 = (cfg->layout.plane_pitch[0]) |
+ (cfg->layout.plane_pitch[1] << 16);
+ ystride1 = (cfg->layout.plane_pitch[2]) |
+ (cfg->layout.plane_pitch[3] << 16);
+ } else {
+ ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
+ ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
+
+ if (rect_index == DPU_SSPP_RECT_0) {
+ ystride0 = (ystride0 & 0xFFFF0000) |
+ (cfg->layout.plane_pitch[0] & 0x0000FFFF);
+ ystride1 = (ystride1 & 0xFFFF0000)|
+ (cfg->layout.plane_pitch[2] & 0x0000FFFF);
+ } else {
+ ystride0 = (ystride0 & 0x0000FFFF) |
+ ((cfg->layout.plane_pitch[0] << 16) &
+ 0xFFFF0000);
+ ystride1 = (ystride1 & 0x0000FFFF) |
+ ((cfg->layout.plane_pitch[2] << 16) &
+ 0xFFFF0000);
+ }
+ }
+
+ /* rectangle register programming */
+ DPU_REG_WRITE(c, src_size_off + idx, src_size);
+ DPU_REG_WRITE(c, src_xy_off + idx, src_xy);
+ DPU_REG_WRITE(c, out_size_off + idx, dst_size);
+ DPU_REG_WRITE(c, out_xy_off + idx, dst_xy);
+
+ DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+ DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+}
+
+static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index rect_mode)
+{
+ int i;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (rect_mode == DPU_SSPP_RECT_SOLO) {
+ for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+ cfg->layout.plane_addr[i]);
+ } else if (rect_mode == DPU_SSPP_RECT_0) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
+ cfg->layout.plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
+ cfg->layout.plane_addr[2]);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
+ cfg->layout.plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
+ cfg->layout.plane_addr[2]);
+ }
+}
+
+static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
+ struct dpu_csc_cfg *data)
+{
+ u32 idx;
+ bool csc10 = false;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data)
+ return;
+
+ if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
+ idx += CSC_10BIT_OFFSET;
+ csc10 = true;
+ }
+
+ dpu_hw_csc_setup(&ctx->hw, idx, data, csc10);
+}
+
+static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
+ dpu_sspp_multirect_index rect_index)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+ else
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
+ color);
+}
+
+static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+}
+
+static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
+ cfg->creq_lut >> 32);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+ }
+}
+
+static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+ u32 qos_ctrl = 0;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->vblank_en) {
+ qos_ctrl |= ((cfg->creq_vblank &
+ SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+ qos_ctrl |= ((cfg->danger_vblank &
+ SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+ qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+ }
+
+ if (cfg->danger_safe_en)
+ qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cdp_cfg *cfg)
+{
+ u32 idx;
+ u32 cdp_cntl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->tile_amortize_enable)
+ cdp_cntl |= BIT(2);
+ if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
+static void _setup_layer_ops(struct dpu_hw_pipe *c,
+ unsigned long features)
+{
+ if (test_bit(DPU_SSPP_SRC, &features)) {
+ c->ops.setup_format = dpu_hw_sspp_setup_format;
+ c->ops.setup_rects = dpu_hw_sspp_setup_rects;
+ c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
+ c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
+ c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
+ }
+
+ if (test_bit(DPU_SSPP_QOS, &features)) {
+ c->ops.setup_danger_safe_lut =
+ dpu_hw_sspp_setup_danger_safe_lut;
+ c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut;
+ c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
+ }
+
+ if (test_bit(DPU_SSPP_CSC, &features) ||
+ test_bit(DPU_SSPP_CSC_10BIT, &features))
+ c->ops.setup_csc = dpu_hw_sspp_setup_csc;
+
+ if (dpu_hw_sspp_multirect_enabled(c->cap))
+ c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
+
+ if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+ c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
+ c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
+ }
+
+ if (test_bit(DPU_SSPP_CDP, &features))
+ c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
+}
+
+static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if ((sspp < SSPP_MAX) && catalog && addr && b) {
+ for (i = 0; i < catalog->sspp_count; i++) {
+ if (sspp == catalog->sspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = catalog->sspp[i].base;
+ b->length = catalog->sspp[i].len;
+ b->hwversion = catalog->hwversion;
+ b->log_mask = DPU_DBG_MASK_SSPP;
+ return &catalog->sspp[i];
+ }
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+ void __iomem *addr, struct dpu_mdss_cfg *catalog,
+ bool is_virtual_pipe)
+{
+ struct dpu_hw_pipe *hw_pipe;
+ struct dpu_sspp_cfg *cfg;
+ int rc;
+
+ if (!addr || !catalog)
+ return ERR_PTR(-EINVAL);
+
+ hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+ if (!hw_pipe)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(hw_pipe);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ hw_pipe->catalog = catalog;
+ hw_pipe->mdp = &catalog->mdp[0];
+ hw_pipe->idx = idx;
+ hw_pipe->cap = cfg;
+ _setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+
+ rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return hw_pipe;
+
+blk_init_error:
+ kzfree(hw_pipe);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
+{
+ if (ctx)
+ dpu_hw_blk_destroy(&ctx->base);
+ kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
new file mode 100644
index 000000000000..4d81e5f5ce1b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -0,0 +1,424 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_SSPP_H
+#define _DPU_HW_SSPP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+#include "dpu_formats.h"
+
+struct dpu_hw_pipe;
+
+/**
+ * Flags
+ */
+#define DPU_SSPP_FLIP_LR BIT(0)
+#define DPU_SSPP_FLIP_UD BIT(1)
+#define DPU_SSPP_SOURCE_ROTATED_90 BIT(2)
+#define DPU_SSPP_ROT_90 BIT(3)
+#define DPU_SSPP_SOLID_FILL BIT(4)
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
+ (1UL << DPU_SSPP_SCALER_QSEED2) | \
+ (1UL << DPU_SSPP_SCALER_QSEED3))
+
+/**
+ * Component indices
+ */
+enum {
+ DPU_SSPP_COMP_0,
+ DPU_SSPP_COMP_1_2,
+ DPU_SSPP_COMP_2,
+ DPU_SSPP_COMP_3,
+
+ DPU_SSPP_COMP_MAX
+};
+
+/**
+ * DPU_SSPP_RECT_SOLO - multirect disabled
+ * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
+ * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ *
+ * Note: HW supports multirect with either RECT0 or
+ * RECT1. Considering no benefit of such configs over
+ * SOLO mode and to keep the plane management simple,
+ * we dont support single rect multirect configs.
+ */
+enum dpu_sspp_multirect_index {
+ DPU_SSPP_RECT_SOLO = 0,
+ DPU_SSPP_RECT_0,
+ DPU_SSPP_RECT_1,
+};
+
+enum dpu_sspp_multirect_mode {
+ DPU_SSPP_MULTIRECT_NONE = 0,
+ DPU_SSPP_MULTIRECT_PARALLEL,
+ DPU_SSPP_MULTIRECT_TIME_MX,
+};
+
+enum {
+ DPU_FRAME_LINEAR,
+ DPU_FRAME_TILE_A4X,
+ DPU_FRAME_TILE_A5X,
+};
+
+enum dpu_hw_filter {
+ DPU_SCALE_FILTER_NEAREST = 0,
+ DPU_SCALE_FILTER_BIL,
+ DPU_SCALE_FILTER_PCMN,
+ DPU_SCALE_FILTER_CA,
+ DPU_SCALE_FILTER_MAX
+};
+
+enum dpu_hw_filter_alpa {
+ DPU_SCALE_ALPHA_PIXEL_REP,
+ DPU_SCALE_ALPHA_BIL
+};
+
+enum dpu_hw_filter_yuv {
+ DPU_SCALE_2D_4X4,
+ DPU_SCALE_2D_CIR,
+ DPU_SCALE_1D_SEP,
+ DPU_SCALE_BIL
+};
+
+struct dpu_hw_sharp_cfg {
+ u32 strength;
+ u32 edge_thr;
+ u32 smooth_thr;
+ u32 noise_thr;
+};
+
+struct dpu_hw_pixel_ext {
+ /* scaling factors are enabled for this input layer */
+ uint8_t enable_pxl_ext;
+
+ int init_phase_x[DPU_MAX_PLANES];
+ int phase_step_x[DPU_MAX_PLANES];
+ int init_phase_y[DPU_MAX_PLANES];
+ int phase_step_y[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels extension in left, right, top and bottom direction
+ * for all color components. This pixel value for each color component
+ * should be sum of fetch + repeat pixels.
+ */
+ int num_ext_pxls_left[DPU_MAX_PLANES];
+ int num_ext_pxls_right[DPU_MAX_PLANES];
+ int num_ext_pxls_top[DPU_MAX_PLANES];
+ int num_ext_pxls_btm[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top and
+ * bottom directions from source image for scaling.
+ */
+ int left_ftch[DPU_MAX_PLANES];
+ int right_ftch[DPU_MAX_PLANES];
+ int top_ftch[DPU_MAX_PLANES];
+ int btm_ftch[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int left_rpt[DPU_MAX_PLANES];
+ int right_rpt[DPU_MAX_PLANES];
+ int top_rpt[DPU_MAX_PLANES];
+ int btm_rpt[DPU_MAX_PLANES];
+
+ uint32_t roi_w[DPU_MAX_PLANES];
+ uint32_t roi_h[DPU_MAX_PLANES];
+
+ /*
+ * Filter type to be used for scaling in horizontal and vertical
+ * directions
+ */
+ enum dpu_hw_filter horz_filter[DPU_MAX_PLANES];
+ enum dpu_hw_filter vert_filter[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_hw_pipe_cfg : Pipe description
+ * @layout: format layout information for programming buffer to hardware
+ * @src_rect: src ROI, caller takes into account the different operations
+ * such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @index: index of the rectangle of SSPP
+ * @mode: parallel or time multiplex multirect mode
+ */
+struct dpu_hw_pipe_cfg {
+ struct dpu_hw_fmt_layout layout;
+ struct drm_rect src_rect;
+ struct drm_rect dst_rect;
+ enum dpu_sspp_multirect_index index;
+ enum dpu_sspp_multirect_mode mode;
+};
+
+/**
+ * struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_pipe_qos_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+ u64 creq_lut;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ bool vblank_en;
+ bool danger_safe_en;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ DPU_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ * DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ * DPU_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct dpu_hw_pipe_cdp_cfg {
+ bool enable;
+ bool ubwc_meta_enable;
+ bool tile_amortize_enable;
+ u32 preload_ahead;
+};
+
+/**
+ * struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct dpu_hw_pipe_ts_cfg {
+ u64 size;
+ u64 time;
+};
+
+/**
+ * struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_sspp_ops {
+ /**
+ * setup_format - setup pixel format cropping rectangle, flip
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @flags: Extra flags for format config
+ * @index: rectangle index in multirect
+ */
+ void (*setup_format)(struct dpu_hw_pipe *ctx,
+ const struct dpu_format *fmt, u32 flags,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_rects - setup pipe ROI rectangles
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @index: rectangle index in multirect
+ */
+ void (*setup_rects)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_pe - setup pipe pixel extension
+ * @ctx: Pointer to pipe context
+ * @pe_ext: Pointer to pixel ext settings
+ */
+ void (*setup_pe)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pixel_ext *pe_ext);
+
+ /**
+ * setup_sourceaddress - setup pipe source addresses
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @index: rectangle index in multirect
+ */
+ void (*setup_sourceaddress)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_csc - setup color space coversion
+ * @ctx: Pointer to pipe context
+ * @data: Pointer to config structure
+ */
+ void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
+
+ /**
+ * setup_solidfill - enable/disable colorfill
+ * @ctx: Pointer to pipe context
+ * @const_color: Fill color value
+ * @flags: Pipe flags
+ * @index: rectangle index in multirect
+ */
+ void (*setup_solidfill)(struct dpu_hw_pipe *ctx, u32 color,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_multirect - setup multirect configuration
+ * @ctx: Pointer to pipe context
+ * @index: rectangle index in multirect
+ * @mode: parallel fetch / time multiplex multirect mode
+ */
+
+ void (*setup_multirect)(struct dpu_hw_pipe *ctx,
+ enum dpu_sspp_multirect_index index,
+ enum dpu_sspp_multirect_mode mode);
+
+ /**
+ * setup_sharpening - setup sharpening
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to config structure
+ */
+ void (*setup_sharpening)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_sharp_cfg *cfg);
+
+ /**
+ * setup_danger_safe_lut - setup danger safe LUTs
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_creq_lut - setup CREQ LUT
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_qos_ctrl - setup QoS control
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_qos_ctrl)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_histogram - setup histograms
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to histogram configuration
+ */
+ void (*setup_histogram)(struct dpu_hw_pipe *ctx,
+ void *cfg);
+
+ /**
+ * setup_scaler - setup scaler
+ * @ctx: Pointer to pipe context
+ * @pipe_cfg: Pointer to pipe configuration
+ * @pe_cfg: Pointer to pixel extension configuration
+ * @scaler_cfg: Pointer to scaler configuration
+ */
+ void (*setup_scaler)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *pipe_cfg,
+ struct dpu_hw_pixel_ext *pe_cfg,
+ void *scaler_cfg);
+
+ /**
+ * get_scaler_ver - get scaler h/w version
+ * @ctx: Pointer to pipe context
+ */
+ u32 (*get_scaler_ver)(struct dpu_hw_pipe *ctx);
+
+ /**
+ * setup_cdp - setup client driven prefetch
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to cdp configuration
+ */
+ void (*setup_cdp)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cdp_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_pipe - pipe description
+ * @base: hardware block base structure
+ * @hw: block hardware details
+ * @catalog: back pointer to catalog
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: pipe index
+ * @cap: pointer to layer_cfg
+ * @ops: pointer to operations possible for this pipe
+ */
+struct dpu_hw_pipe {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+ struct dpu_mdss_cfg *catalog;
+ struct dpu_mdp_cfg *mdp;
+
+ /* Pipe */
+ enum dpu_sspp idx;
+ const struct dpu_sspp_cfg *cap;
+
+ /* Ops */
+ struct dpu_hw_sspp_ops ops;
+};
+
+/**
+ * dpu_hw_pipe - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pipe, base);
+}
+
+/**
+ * dpu_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx: Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ * @is_virtual_pipe: is this pipe virtual pipe
+ */
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+ void __iomem *addr, struct dpu_mdss_cfg *catalog,
+ bool is_virtual_pipe);
+
+/**
+ * dpu_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init
+ */
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
+
+#endif /*_DPU_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
new file mode 100644
index 000000000000..db2798e862fc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -0,0 +1,398 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_top.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define SSPP_SPARE 0x28
+#define UBWC_STATIC 0x144
+
+#define FLD_SPLIT_DISPLAY_CMD BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS 0x360
+#define SAFE_STATUS 0x364
+
+#define TE_LINE_INTERVAL 0x3F4
+
+#define TRAFFIC_SHAPER_EN BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
+
+#define MDP_WD_TIMER_0_CTL 0x380
+#define MDP_WD_TIMER_0_CTL2 0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
+#define MDP_WD_TIMER_1_CTL 0x390
+#define MDP_WD_TIMER_1_CTL2 0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE 0x398
+#define MDP_WD_TIMER_2_CTL 0x420
+#define MDP_WD_TIMER_2_CTL2 0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE 0x428
+#define MDP_WD_TIMER_3_CTL 0x430
+#define MDP_WD_TIMER_3_CTL2 0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE 0x438
+#define MDP_WD_TIMER_4_CTL 0x440
+#define MDP_WD_TIMER_4_CTL2 0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
+
+#define MDP_TICK_COUNT 16
+#define XO_CLK_RATE 19200
+#define MS_TICKS_IN_SEC 1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+ ((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
+#define DCE_SEL 0x450
+
+static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 upper_pipe = 0;
+ u32 lower_pipe = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->en) {
+ if (cfg->mode == INTF_MODE_CMD) {
+ lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+ /* interface controlling sw trigger */
+ if (cfg->intf == INTF_2)
+ lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+ else
+ lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = lower_pipe;
+ } else {
+ if (cfg->intf == INTF_2) {
+ lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+ } else {
+ lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+ }
+ }
+ }
+
+ DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 out_ctl = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->intf_en)
+ out_ctl |= BIT(19);
+
+ DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
+}
+
+static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off, bit_off;
+ u32 reg_val, new_val;
+ bool clk_forced_on;
+
+ if (!mdp)
+ return false;
+
+ c = &mdp->hw;
+
+ if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
+ return false;
+
+ reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
+ bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
+
+ reg_val = DPU_REG_READ(c, reg_off);
+
+ if (enable)
+ new_val = reg_val | BIT(bit_off);
+ else
+ new_val = reg_val & ~BIT(bit_off);
+
+ DPU_REG_WRITE(c, reg_off, new_val);
+
+ clk_forced_on = !(reg_val & BIT(bit_off));
+
+ return clk_forced_on;
+}
+
+
+static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, DANGER_STATUS);
+ status->mdp = (value >> 0) & 0x3;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+}
+
+static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
+ static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
+
+ if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
+ return;
+
+ c = &mdp->hw;
+ reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
+ for (i = 0; i < cfg->pp_count; i++) {
+ int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
+ if (pp_idx >= ARRAY_SIZE(pp_offset))
+ continue;
+
+ reg &= ~(0xf << pp_offset[pp_idx]);
+ reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
+ }
+ DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+ if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
+ cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
+ switch (cfg->vsync_source) {
+ case DPU_VSYNC_SOURCE_WD_TIMER_4:
+ wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_4_CTL;
+ wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_3:
+ wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_3_CTL;
+ wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_2:
+ wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_2_CTL;
+ wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_1:
+ wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_1_CTL;
+ wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_0:
+ default:
+ wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_0_CTL;
+ wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+ break;
+ }
+
+ DPU_REG_WRITE(c, wd_load_value,
+ CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+ DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+ reg = DPU_REG_READ(c, wd_ctl2);
+ reg |= BIT(8); /* enable heartbeat timer */
+ reg |= BIT(0); /* enable WD timer */
+ DPU_REG_WRITE(c, wd_ctl2, reg);
+
+ /* make sure that timers are enabled/disabled for vsync state */
+ wmb();
+ }
+}
+
+static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, SAFE_STATUS);
+ status->mdp = (value >> 0) & 0x1;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+}
+
+static void dpu_hw_reset_ubwc(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_blk_reg_map c;
+
+ if (!mdp || !m)
+ return;
+
+ if (!IS_UBWC_20_SUPPORTED(m->caps->ubwc_version))
+ return;
+
+ /* force blk offset to zero to access beginning of register region */
+ c = mdp->hw;
+ c.blk_off = 0x0;
+ DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
+static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!mdp)
+ return;
+
+ c = &mdp->hw;
+
+ DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
+}
+
+static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_split_pipe = dpu_hw_setup_split_pipe;
+ ops->setup_cdm_output = dpu_hw_setup_cdm_output;
+ ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
+ ops->get_danger_status = dpu_hw_get_danger_status;
+ ops->setup_vsync_source = dpu_hw_setup_vsync_source;
+ ops->get_safe_status = dpu_hw_get_safe_status;
+ ops->reset_ubwc = dpu_hw_reset_ubwc;
+ ops->intf_audio_select = dpu_hw_intf_audio_select;
+}
+
+static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if (!m || !addr || !b)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < m->mdp_count; i++) {
+ if (mdp == m->mdp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mdp[i].base;
+ b->length = m->mdp[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_TOP;
+ return &m->mdp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mdp *mdp;
+ const struct dpu_mdp_cfg *cfg;
+ int rc;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ if (!mdp)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &mdp->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(mdp);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ mdp->idx = idx;
+ mdp->caps = cfg;
+ _setup_mdp_ops(&mdp->ops, mdp->caps->features);
+
+ rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+
+ return mdp;
+
+blk_init_error:
+ kzfree(mdp);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
+{
+ if (mdp)
+ dpu_hw_blk_destroy(&mdp->base);
+ kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
new file mode 100644
index 000000000000..899925aaa6d7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -0,0 +1,202 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_TOP_H
+#define _DPU_HW_TOP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+ bool en;
+ bool rd_client;
+ u32 client_id;
+ u32 bpc_denom;
+ u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en : Enable/disable dual pipe confguration
+ * @mode : Panel interface mode
+ * @intf : Interface id for main control path
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ * flushed
+ */
+struct split_pipe_cfg {
+ bool en;
+ enum dpu_intf_mode mode;
+ enum dpu_intf intf;
+ bool split_flush_en;
+};
+
+/**
+ * struct cdm_output_cfg: output configuration for cdm
+ * @intf_en : enable/disable interface output
+ */
+struct cdm_output_cfg {
+ bool intf_en;
+};
+
+/**
+ * struct dpu_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ */
+struct dpu_danger_safe_status {
+ u8 mdp;
+ u8 sspp[SSPP_MAX];
+};
+
+/**
+ * struct dpu_vsync_source_cfg - configure vsync source and configure the
+ * watchdog timers if required.
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ */
+struct dpu_vsync_source_cfg {
+ u32 pp_count;
+ u32 frame_rate;
+ u32 ppnumber[PINGPONG_MAX];
+ u32 vsync_source;
+};
+
+/**
+ * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_cdm_output : programs cdm control
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct dpu_hw_mdp_ops {
+ /** setup_split_pipe() : Regsiters are not double buffered, thisk
+ * function should be called before timing control enable
+ * @mdp : mdp top context driver
+ * @cfg : upper and lower part of pipe configuration
+ */
+ void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *p);
+
+ /**
+ * setup_cdm_output() : Setup selection control of the cdm data path
+ * @mdp : mdp top context driver
+ * @cfg : cdm output configuration
+ */
+ void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg);
+
+ /**
+ * setup_traffic_shaper() : Setup traffic shaper control
+ * @mdp : mdp top context driver
+ * @cfg : traffic shaper configuration
+ */
+ void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp,
+ struct traffic_shaper_cfg *cfg);
+
+ /**
+ * setup_clk_force_ctrl - set clock force control
+ * @mdp: mdp top context driver
+ * @clk_ctrl: clock to be controlled
+ * @enable: force on enable
+ * @return: if the clock is forced-on by this function
+ */
+ bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable);
+
+ /**
+ * get_danger_status - get danger status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_danger_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * setup_vsync_source - setup vsync source configuration details
+ * @mdp: mdp top context driver
+ * @cfg: vsync source selection configuration
+ */
+ void (*setup_vsync_source)(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg);
+
+ /**
+ * get_safe_status - get safe status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_safe_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * reset_ubwc - reset top level UBWC configuration
+ * @mdp: mdp top context driver
+ * @m: pointer to mdss catalog data
+ */
+ void (*reset_ubwc)(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m);
+
+ /**
+ * intf_audio_select - select the external interface for audio
+ * @mdp: mdp top context driver
+ */
+ void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
+};
+
+struct dpu_hw_mdp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* top */
+ enum dpu_mdp idx;
+ const struct dpu_mdp_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_mdp_ops ops;
+};
+
+/**
+ * to_dpu_hw_mdp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_mdp, base);
+}
+
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
+
+#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
new file mode 100644
index 000000000000..4cabae480a7b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -0,0 +1,368 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+/* using a file static variables for debugfs access */
+static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
+
+/* DPU_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION 0x00
+#define QSEED3_OP_MODE 0x04
+#define QSEED3_RGB2Y_COEFF 0x08
+#define QSEED3_PHASE_INIT 0x0C
+#define QSEED3_PHASE_STEP_Y_H 0x10
+#define QSEED3_PHASE_STEP_Y_V 0x14
+#define QSEED3_PHASE_STEP_UV_H 0x18
+#define QSEED3_PHASE_STEP_UV_V 0x1C
+#define QSEED3_PRELOAD 0x20
+#define QSEED3_DE_SHARPEN 0x24
+#define QSEED3_DE_SHARPEN_CTL 0x28
+#define QSEED3_DE_SHAPE_CTL 0x2C
+#define QSEED3_DE_THRESHOLD 0x30
+#define QSEED3_DE_ADJUST_DATA_0 0x34
+#define QSEED3_DE_ADJUST_DATA_1 0x38
+#define QSEED3_DE_ADJUST_DATA_2 0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A 0x40
+#define QSEED3_SRC_SIZE_UV 0x44
+#define QSEED3_DST_SIZE 0x48
+#define QSEED3_COEF_LUT_CTRL 0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT 0
+#define QSEED3_COEF_LUT_DIR_BIT 1
+#define QSEED3_COEF_LUT_Y_CIR_BIT 2
+#define QSEED3_COEF_LUT_UV_CIR_BIT 3
+#define QSEED3_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3_BUFFER_CTRL 0x50
+#define QSEED3_CLK_CTRL0 0x54
+#define QSEED3_CLK_CTRL1 0x58
+#define QSEED3_CLK_STATUS 0x5C
+#define QSEED3_MISR_CTRL 0x70
+#define QSEED3_MISR_SIGNATURE_0 0x74
+#define QSEED3_MISR_SIGNATURE_1 0x78
+#define QSEED3_PHASE_INIT_Y_H 0x90
+#define QSEED3_PHASE_INIT_Y_V 0x94
+#define QSEED3_PHASE_INIT_UV_H 0x98
+#define QSEED3_PHASE_INIT_UV_V 0x9C
+#define QSEED3_COEF_LUT 0x100
+#define QSEED3_FILTERS 5
+#define QSEED3_LUT_REGIONS 4
+#define QSEED3_CIRCULAR_LUTS 9
+#define QSEED3_SEPARABLE_LUTS 10
+#define QSEED3_LUT_SIZE 60
+#define QSEED3_ENABLE 2
+#define QSEED3_DIR_LUT_SIZE (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name)
+{
+ /* don't need to mutex protect this */
+ if (c->log_mask & dpu_hw_util_log_mask)
+ DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+ name, c->blk_off + reg_off, val);
+ writel_relaxed(val, c->base_off + c->blk_off + reg_off);
+}
+
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off)
+{
+ return readl_relaxed(c->base_off + c->blk_off + reg_off);
+}
+
+u32 *dpu_hw_util_get_log_mask_ptr(void)
+{
+ return &dpu_hw_util_log_mask;
+}
+
+static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+ int i, j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset, lut_len;
+ u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+ static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+ {{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+ {{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+ {{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+ {{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+ {{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+ };
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+ (scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->dir_lut;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->cir_lut +
+ scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[2] = scaler3_cfg->cir_lut +
+ scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[3] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[4] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+ lut_addr = QSEED3_COEF_LUT + offset
+ + off_tbl[filter][i][1];
+ lut_len = off_tbl[filter][i][0] << 2;
+ for (j = 0; j < lut_len; j++) {
+ DPU_REG_WRITE(c,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
+static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
+{
+ u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+ u32 adjust_a, adjust_b, adjust_c;
+
+ if (!de_cfg->enable)
+ return;
+
+ sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+ ((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+ sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+ ((de_cfg->prec_shift & 0x7) << 13) |
+ ((de_cfg->clip & 0x7) << 16);
+
+ shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+ ((de_cfg->thr_dieout & 0x3FF) << 16);
+
+ de_thr = (de_cfg->thr_low & 0x3FF) |
+ ((de_cfg->thr_high & 0x3FF) << 16);
+
+ adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+ ((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+ adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+ ((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+ adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+ ((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
+
+}
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format)
+{
+ u32 op_mode = 0;
+ u32 phase_init, preload, src_y_rgb, src_uv, dst;
+
+ if (!scaler3_cfg->enable)
+ goto end;
+
+ op_mode |= BIT(0);
+ op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+ if (format && DPU_FORMAT_IS_YUV(format)) {
+ op_mode |= BIT(12);
+ op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+ }
+
+ op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+ op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+ preload =
+ ((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+ ((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+ ((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+ ((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+ src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+ src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+ dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+ ((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+ if (scaler3_cfg->de.enable) {
+ _dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
+ op_mode |= BIT(8);
+ }
+
+ if (scaler3_cfg->lut_flag)
+ _dpu_hw_setup_scaler3_lut(c, scaler3_cfg,
+ scaler_offset);
+
+ if (scaler_version == 0x1002) {
+ phase_init =
+ ((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+ ((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+ ((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+ ((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
+ } else {
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
+ scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
+ scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
+ scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
+ scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+ }
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
+ scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
+ scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
+ scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
+ scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
+
+ DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
+
+end:
+ if (format && !DPU_FORMAT_IS_DX(format))
+ op_mode |= BIT(14);
+
+ if (format && format->alpha_enable) {
+ op_mode |= BIT(10);
+ if (scaler_version == 0x1002)
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+ else
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+ }
+
+ DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
+}
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset)
+{
+ return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
+}
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct dpu_csc_cfg *data, bool csc10)
+{
+ static const u32 matrix_shift = 7;
+ u32 clamp_shift = csc10 ? 16 : 8;
+ u32 val;
+
+ /* matrix coeff - convert S15.16 to S4.9 */
+ val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off, val);
+ val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x4, val);
+ val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x8, val);
+ val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0xc, val);
+ val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
+ DPU_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+ /* Pre clamp */
+ val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x14, val);
+ val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x18, val);
+ val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+ /* Post clamp */
+ val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x20, val);
+ val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x24, val);
+ val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+ /* Pre-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+ /* Post-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
new file mode 100644
index 000000000000..1240f505ca53
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -0,0 +1,348 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_UTIL_H
+#define _DPU_HW_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "dpu_hw_mdss.h"
+
+#define REG_MASK(n) ((BIT(n)) - 1)
+struct dpu_format_extended;
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @base_off: mdp register mapped offset
+ * @blk_off: pipe offset relative to mdss offset
+ * @length length of register block offset
+ * @xin_id xin id
+ * @hwversion mdss hw version number
+ */
+struct dpu_hw_blk_reg_map {
+ void __iomem *base_off;
+ u32 blk_off;
+ u32 length;
+ u32 xin_id;
+ u32 hwversion;
+ u32 log_mask;
+};
+
+/**
+ * struct dpu_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable: detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip: clip shift
+ * @ limit: limit value
+ * @ thr_quiet: quiet threshold
+ * @ thr_dieout: dieout threshold
+ * @ thr_high: low threshold
+ * @ thr_high: high threshold
+ * @ prec_shift: precision shift
+ * @ adjust_a: A-coefficients for mapping curve
+ * @ adjust_b: B-coefficients for mapping curve
+ * @ adjust_c: C-coefficients for mapping curve
+ */
+struct dpu_hw_scaler3_de_cfg {
+ u32 enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+
+/**
+ * struct dpu_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable: scaler enable
+ * @dir_en: direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x: horizontal preload value
+ * @ preload_y: vertical preload value
+ * @ src_width: source width
+ * @ src_height: source height
+ * @ dst_width: destination width
+ * @ dst_height: destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg: blend coefficients configuration
+ * @ lut_flag: scaler LUT update flags
+ * 0x1 swap LUT bank
+ * 0x2 update 2D filter LUT
+ * 0x4 update y circular filter LUT
+ * 0x8 update uv circular filter LUT
+ * 0x10 update y separable filter LUT
+ * 0x20 update uv separable filter LUT
+ * @ dir_lut_idx: 2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut: pointer to 2D LUT
+ * @ cir_lut: pointer to circular filter LUT
+ * @ sep_lut: pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ */
+struct dpu_hw_scaler3_cfg {
+ u32 enable;
+ u32 dir_en;
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ u32 preload_x[DPU_MAX_PLANES];
+ u32 preload_y[DPU_MAX_PLANES];
+ u32 src_width[DPU_MAX_PLANES];
+ u32 src_height[DPU_MAX_PLANES];
+
+ u32 dst_width;
+ u32 dst_height;
+
+ u32 y_rgb_filter_cfg;
+ u32 uv_filter_cfg;
+ u32 alpha_filter_cfg;
+ u32 blend_cfg;
+
+ u32 lut_flag;
+ u32 dir_lut_idx;
+
+ u32 y_rgb_cir_lut_idx;
+ u32 uv_cir_lut_idx;
+ u32 y_rgb_sep_lut_idx;
+ u32 uv_sep_lut_idx;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_hw_scaler3_de_cfg de;
+};
+
+struct dpu_hw_scaler3_lut_cfg {
+ bool is_configured;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+};
+
+/**
+ * struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch: Number of extra pixels to overfetch from left
+ * @right_ftch: Number of extra pixels to overfetch from right
+ * @top_ftch: Number of extra lines to overfetch from top
+ * @btm_ftch: Number of extra lines to overfetch from bottom
+ * @left_rpt: Number of extra pixels to repeat from left
+ * @right_rpt: Number of extra pixels to repeat from right
+ * @top_rpt: Number of extra lines to repeat from top
+ * @btm_rpt: Number of extra lines to repeat from bottom
+ */
+struct dpu_drm_pix_ext_v1 {
+ /*
+ * Number of pixels ext in left, right, top and bottom direction
+ * for all color components.
+ */
+ int32_t num_ext_pxls_lr[DPU_MAX_PLANES];
+ int32_t num_ext_pxls_tb[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top
+ * and bottom directions from source image for scaling.
+ */
+ int32_t left_ftch[DPU_MAX_PLANES];
+ int32_t right_ftch[DPU_MAX_PLANES];
+ int32_t top_ftch[DPU_MAX_PLANES];
+ int32_t btm_ftch[DPU_MAX_PLANES];
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int32_t left_rpt[DPU_MAX_PLANES];
+ int32_t right_rpt[DPU_MAX_PLANES];
+ int32_t top_rpt[DPU_MAX_PLANES];
+ int32_t btm_rpt[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable: Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip: Clip coefficient
+ * @limit: Detail enhancer limit factor
+ * @thr_quiet: Quite zone threshold
+ * @thr_dieout: Die-out zone threshold
+ * @thr_low: Linear zone left threshold
+ * @thr_high: Linear zone right threshold
+ * @prec_shift: Detail enhancer precision
+ * @adjust_a: Mapping curves A coefficients
+ * @adjust_b: Mapping curves B coefficients
+ * @adjust_c: Mapping curves C coefficients
+ */
+struct dpu_drm_de_v1 {
+ uint32_t enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+/**
+ * struct dpu_drm_scaler_v2 - version 2 of struct dpu_drm_scaler
+ * @enable: Scaler enable
+ * @dir_en: Detail enhancer enable
+ * @pe: Pixel extension settings
+ * @horz_decimate: Horizontal decimation factor
+ * @vert_decimate: Vertical decimation factor
+ * @init_phase_x: Initial scaler phase values for x
+ * @phase_step_x: Phase step values for x
+ * @init_phase_y: Initial scaler phase values for y
+ * @phase_step_y: Phase step values for y
+ * @preload_x: Horizontal preload value
+ * @preload_y: Vertical preload value
+ * @src_width: Source width
+ * @src_height: Source height
+ * @dst_width: Destination width
+ * @dst_height: Destination height
+ * @y_rgb_filter_cfg: Y/RGB plane filter configuration
+ * @uv_filter_cfg: UV plane filter configuration
+ * @alpha_filter_cfg: Alpha filter configuration
+ * @blend_cfg: Selection of blend coefficients
+ * @lut_flag: LUT configuration flags
+ * @dir_lut_idx: 2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx: UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx: UV separable LUT index
+ * @de: Detail enhancer settings
+ */
+struct dpu_drm_scaler_v2 {
+ /*
+ * General definitions
+ */
+ uint32_t enable;
+ uint32_t dir_en;
+
+ /*
+ * Pix ext settings
+ */
+ struct dpu_drm_pix_ext_v1 pe;
+
+ /*
+ * Decimation settings
+ */
+ uint32_t horz_decimate;
+ uint32_t vert_decimate;
+
+ /*
+ * Phase settings
+ */
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ uint32_t preload_x[DPU_MAX_PLANES];
+ uint32_t preload_y[DPU_MAX_PLANES];
+ uint32_t src_width[DPU_MAX_PLANES];
+ uint32_t src_height[DPU_MAX_PLANES];
+
+ uint32_t dst_width;
+ uint32_t dst_height;
+
+ uint32_t y_rgb_filter_cfg;
+ uint32_t uv_filter_cfg;
+ uint32_t alpha_filter_cfg;
+ uint32_t blend_cfg;
+
+ uint32_t lut_flag;
+ uint32_t dir_lut_idx;
+
+ /* for Y(RGB) and UV planes*/
+ uint32_t y_rgb_cir_lut_idx;
+ uint32_t uv_cir_lut_idx;
+ uint32_t y_rgb_sep_lut_idx;
+ uint32_t uv_sep_lut_idx;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_drm_de_v1 de;
+};
+
+
+u32 *dpu_hw_util_get_log_mask_ptr(void);
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name);
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
+
+#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
+#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
+
+#define MISR_FRAME_COUNT_MASK 0xFF
+#define MISR_CTRL_ENABLE BIT(8)
+#define MISR_CTRL_STATUS BIT(9)
+#define MISR_CTRL_STATUS_CLEAR BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
+
+void *dpu_hw_util_get_dir(void);
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format);
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset);
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct dpu_csc_cfg *data, bool csc10);
+
+#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
new file mode 100644
index 000000000000..d43905525f92
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_dbg.h"
+
+#define VBIF_VERSION 0x0000
+#define VBIF_CLK_FORCE_CTRL0 0x0008
+#define VBIF_CLK_FORCE_CTRL1 0x000C
+#define VBIF_QOS_REMAP_00 0x0020
+#define VBIF_QOS_REMAP_01 0x0024
+#define VBIF_QOS_REMAP_10 0x0028
+#define VBIF_QOS_REMAP_11 0x002C
+#define VBIF_WRITE_GATHER_EN 0x00AC
+#define VBIF_IN_RD_LIM_CONF0 0x00B0
+#define VBIF_IN_RD_LIM_CONF1 0x00B4
+#define VBIF_IN_RD_LIM_CONF2 0x00B8
+#define VBIF_IN_WR_LIM_CONF0 0x00C0
+#define VBIF_IN_WR_LIM_CONF1 0x00C4
+#define VBIF_IN_WR_LIM_CONF2 0x00C8
+#define VBIF_OUT_RD_LIM_CONF0 0x00D0
+#define VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
+#define VBIF_XIN_PND_ERR 0x0190
+#define VBIF_XIN_SRC_ERR 0x0194
+#define VBIF_XIN_CLR_ERR 0x019C
+#define VBIF_XIN_HALT_CTRL0 0x0200
+#define VBIF_XIN_HALT_CTRL1 0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
+
+static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 pnd, src;
+
+ if (!vbif)
+ return;
+ c = &vbif->hw;
+ pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
+ src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
+
+ if (pnd_errors)
+ *pnd_errors = pnd;
+ if (src_errors)
+ *src_errors = src;
+
+ DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
+}
+
+static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off;
+ u32 bit_off;
+ u32 reg_val;
+
+ /*
+ * Assume 4 bits per bit field, 8 fields per 32-bit register so
+ * 16 bit fields maximum across two registers
+ */
+ if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+ return;
+
+ c = &vbif->hw;
+
+ if (xin_id >= 8) {
+ xin_id -= 8;
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+ } else {
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+ }
+ bit_off = (xin_id & 0x7) * 4;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0x7 << bit_off);
+ reg_val |= (value & 0x7) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0xFF << bit_off);
+ reg_val |= (limit) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+ u32 limit;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ limit = (reg_val >> bit_off) & 0xFF;
+
+ return limit;
+}
+
+static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+ if (enable)
+ reg_val |= BIT(xin_id);
+ else
+ reg_val &= ~BIT(xin_id);
+
+ DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+ return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+ if (!vbif)
+ return;
+
+ c = &vbif->hw;
+
+ reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+ reg_shift = (xin_id & 0x7) * 4;
+
+ reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+ reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+
+ mask = 0x7 << reg_shift;
+
+ reg_val &= ~mask;
+ reg_val |= (remap_level << reg_shift) & mask;
+
+ reg_val_lvl &= ~mask;
+ reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+ DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+ DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+}
+
+static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_val;
+
+ if (!vbif || xin_id >= MAX_XIN_COUNT)
+ return;
+
+ c = &vbif->hw;
+
+ reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
+ reg_val |= BIT(xin_id);
+ DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
+}
+
+static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
+ unsigned long cap)
+{
+ ops->set_limit_conf = dpu_hw_set_limit_conf;
+ ops->get_limit_conf = dpu_hw_get_limit_conf;
+ ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
+ ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
+ if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
+ ops->set_qos_remap = dpu_hw_set_qos_remap;
+ ops->set_mem_type = dpu_hw_set_mem_type;
+ ops->clear_errors = dpu_hw_clear_errors;
+ ops->set_write_gather_en = dpu_hw_set_write_gather_en;
+}
+
+static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->vbif_count; i++) {
+ if (vbif == m->vbif[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->vbif[i].base;
+ b->length = m->vbif[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_VBIF;
+ return &m->vbif[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_vbif *c;
+ const struct dpu_vbif_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_vbif_ops(&c->ops, c->cap->features);
+
+ /* no need to register sub-range in dpu dbg, dump entire vbif io base */
+
+ return c;
+}
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
+{
+ kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
new file mode 100644
index 000000000000..471ff673c045
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_VBIF_H
+#define _DPU_HW_VBIF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_vbif;
+
+/**
+ * struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_vbif_ops {
+ /**
+ * set_limit_conf - set transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @limit: outstanding transaction limit
+ */
+ void (*set_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit);
+
+ /**
+ * get_limit_conf - get transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @return: outstanding transaction limit
+ */
+ u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd);
+
+ /**
+ * set_halt_ctrl - set halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @enable: halt control enable
+ */
+ void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable);
+
+ /**
+ * get_halt_ctrl - get halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @return: halt control enable
+ */
+ bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id);
+
+ /**
+ * set_qos_remap - set QoS priority remap
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @level: priority level
+ * @remap_level: remapped level
+ */
+ void (*set_qos_remap)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level);
+
+ /**
+ * set_mem_type - set memory type
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @value: memory type value
+ */
+ void (*set_mem_type)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value);
+
+ /**
+ * clear_errors - clear any vbif errors
+ * This function clears any detected pending/source errors
+ * on the VBIF interface, and optionally returns the detected
+ * error mask(s).
+ * @vbif: vbif context driver
+ * @pnd_errors: pointer to pending error reporting variable
+ * @src_errors: pointer to source error reporting variable
+ */
+ void (*clear_errors)(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors);
+
+ /**
+ * set_write_gather_en - set write_gather enable
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ */
+ void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id);
+};
+
+struct dpu_hw_vbif {
+ /* base */
+ struct dpu_hw_blk_reg_map hw;
+
+ /* vbif */
+ enum dpu_vbif idx;
+ const struct dpu_vbif_cfg *cap;
+
+ /* ops */
+ struct dpu_hw_vbif_ops ops;
+};
+
+/**
+ * dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+
+#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
new file mode 100644
index 000000000000..5b2bc9b65b15
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HWIO_H
+#define _DPU_HWIO_H
+
+#include "dpu_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL 0x004
+#define INTR_EN 0x010
+#define INTR_STATUS 0x014
+#define INTR_CLEAR 0x018
+#define INTR2_EN 0x008
+#define INTR2_STATUS 0x00c
+#define INTR2_CLEAR 0x02c
+#define HIST_INTR_EN 0x01c
+#define HIST_INTR_STATUS 0x020
+#define HIST_INTR_CLEAR 0x024
+#define INTF_INTR_EN 0x1C0
+#define INTF_INTR_STATUS 0x1C4
+#define INTF_INTR_CLEAR 0x1C8
+#define SPLIT_DISPLAY_EN 0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN 0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN 0x308
+#define HW_EVENTS_CTL 0x37C
+#define CLK_CTRL3 0x3A8
+#define CLK_STATUS3 0x3AC
+#define CLK_CTRL4 0x3B0
+#define CLK_STATUS4 0x3B4
+#define CLK_CTRL5 0x3B8
+#define CLK_STATUS5 0x3BC
+#define CLK_CTRL7 0x3D0
+#define CLK_STATUS7 0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4
+#define INTF_SW_RESET_MASK 0x3FC
+#define HDMI_DP_CORE_SELECT 0x408
+#define MDP_OUT_CTL_0 0x410
+#define MDP_VSYNC_SEL 0x414
+#define DCE_SEL 0x450
+
+#endif /*_DPU_HWIO_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
new file mode 100644
index 000000000000..790d39f816dc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -0,0 +1,203 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include "dpu_io_util.h"
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+ int i;
+
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+}
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+ rc = PTR_ERR_OR_ZERO(clk_arry[i].clk);
+ if (rc) {
+ DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ for (i--; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+
+ return rc;
+}
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_arry[i].clk) {
+ if (clk_arry[i].type != DSS_CLK_AHB) {
+ DEV_DBG("%pS->%s: '%s' rate %ld\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name,
+ clk_arry[i].rate);
+ rc = clk_set_rate(clk_arry[i].clk,
+ clk_arry[i].rate);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ break;
+ }
+ }
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+ int i, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ DEV_DBG("%pS->%s: enable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ if (clk_arry[i].clk) {
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ }
+
+ if (rc) {
+ msm_dss_enable_clk(&clk_arry[i],
+ i, false);
+ break;
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: disable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+
+ if (clk_arry[i].clk)
+ clk_disable_unprepare(clk_arry[i].clk);
+ else
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ }
+ }
+
+ return rc;
+}
+
+int msm_dss_parse_clock(struct platform_device *pdev,
+ struct dss_module_power *mp)
+{
+ u32 i, rc = 0;
+ const char *clock_name;
+ int num_clk = 0;
+
+ if (!pdev || !mp)
+ return -EINVAL;
+
+ mp->num_clk = 0;
+ num_clk = of_property_count_strings(pdev->dev.of_node, "clock-names");
+ if (num_clk <= 0) {
+ pr_debug("clocks are not defined\n");
+ return 0;
+ }
+
+ mp->clk_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct dss_clk) * num_clk,
+ GFP_KERNEL);
+ if (!mp->clk_config)
+ return -ENOMEM;
+
+ for (i = 0; i < num_clk; i++) {
+ rc = of_property_read_string_index(pdev->dev.of_node,
+ "clock-names", i,
+ &clock_name);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+ i);
+ break;
+ }
+ strlcpy(mp->clk_config[i].clk_name, clock_name,
+ sizeof(mp->clk_config[i].clk_name));
+
+ mp->clk_config[i].type = DSS_CLK_AHB;
+ }
+
+ rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+ goto err;
+ }
+
+ rc = of_clk_set_defaults(pdev->dev.of_node, false);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+ goto err;
+ }
+
+ for (i = 0; i < num_clk; i++) {
+ u32 rate = clk_get_rate(mp->clk_config[i].clk);
+ if (!rate)
+ continue;
+ mp->clk_config[i].rate = rate;
+ mp->clk_config[i].type = DSS_CLK_PCLK;
+ }
+
+ mp->num_clk = num_clk;
+ return 0;
+
+err:
+ msm_dss_put_clk(mp->clk_config, num_clk);
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
new file mode 100644
index 000000000000..bc07381d7429
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IO_UTIL_H__
+#define __DPU_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define DEV_DBG(fmt, args...) pr_debug(fmt, ##args)
+#define DEV_INFO(fmt, args...) pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...) pr_err(fmt, ##args)
+
+struct dss_gpio {
+ unsigned int gpio;
+ unsigned int value;
+ char gpio_name[32];
+};
+
+enum dss_clk_type {
+ DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+ DSS_CLK_PCLK,
+};
+
+struct dss_clk {
+ struct clk *clk; /* clk handle */
+ char clk_name[32];
+ enum dss_clk_type type;
+ unsigned long rate;
+ unsigned long max_rate;
+};
+
+struct dss_module_power {
+ unsigned int num_gpio;
+ struct dss_gpio *gpio_config;
+ unsigned int num_clk;
+ struct dss_clk *clk_config;
+};
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+int msm_dss_parse_clock(struct platform_device *pdev,
+ struct dss_module_power *mp);
+#endif /* __DPU_IO_UTIL_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
new file mode 100644
index 000000000000..d5e6ce0140cf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_irq.h"
+#include "dpu_core_irq.h"
+
+irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ return dpu_core_irq(dpu_kms);
+}
+
+void dpu_irq_preinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ if (!dpu_kms->dev || !dpu_kms->dev->dev) {
+ pr_err("invalid device handles\n");
+ return;
+ }
+
+ dpu_core_irq_preinstall(dpu_kms);
+}
+
+int dpu_irq_postinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int rc;
+
+ if (!kms) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ rc = dpu_core_irq_postinstall(dpu_kms);
+
+ return rc;
+}
+
+void dpu_irq_uninstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ if (!kms) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_core_irq_uninstall(dpu_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
new file mode 100644
index 000000000000..3e147f7176e2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IRQ_H__
+#define __DPU_IRQ_H__
+
+#include <linux/kernel.h>
+#include <linux/irqdomain.h>
+
+#include "msm_kms.h"
+
+/**
+ * dpu_irq_controller - define MDSS level interrupt controller context
+ * @enabled_mask: enable status of MDSS level interrupt
+ * @domain: interrupt domain of this controller
+ */
+struct dpu_irq_controller {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+};
+
+/**
+ * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: none
+ */
+void dpu_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: 0 if success; error code otherwise
+ */
+int dpu_irq_postinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_uninstall - uninstall MDSS IRQ handler
+ * @drm_dev: pointer to kms context
+ * @return: none
+ */
+void dpu_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq - MDSS level IRQ handler
+ * @kms: pointer to kms context
+ * @return: interrupt handling status
+ */
+irqreturn_t dpu_irq(struct msm_kms *kms);
+
+#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
new file mode 100644
index 000000000000..7dd6bd2d6d37
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -0,0 +1,1345 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+
+#include "dpu_kms.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_vbif.h"
+#include "dpu_encoder.h"
+#include "dpu_plane.h"
+#include "dpu_crtc.h"
+
+#define CREATE_TRACE_POINTS
+#include "dpu_trace.h"
+
+static const char * const iommu_ports[] = {
+ "mdp_0",
+};
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
+ *
+ * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
+ */
+#define DPU_DEBUGFS_DIR "msm_dpu"
+#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+static int dpu_kms_hw_init(struct msm_kms *kms);
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+
+static unsigned long dpu_iomap_size(struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ DRM_ERROR("failed to get memory resource: %s\n", name);
+ return 0;
+ }
+
+ return resource_size(res);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_danger_signal_status(struct seq_file *s,
+ bool danger_status)
+{
+ struct dpu_kms *kms = (struct dpu_kms *)s->private;
+ struct msm_drm_private *priv;
+ struct dpu_danger_safe_status status;
+ int i;
+
+ if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ DPU_ERROR("invalid arg(s)\n");
+ return 0;
+ }
+
+ priv = kms->dev->dev_private;
+ memset(&status, 0, sizeof(struct dpu_danger_safe_status));
+
+ pm_runtime_get_sync(&kms->pdev->dev);
+ if (danger_status) {
+ seq_puts(s, "\nDanger signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ } else {
+ seq_puts(s, "\nSafe signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ }
+ pm_runtime_put_sync(&kms->pdev->dev);
+
+ seq_printf(s, "MDP : 0x%x\n", status.mdp);
+
+ for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+ seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
+ status.sspp[i]);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, true);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
+
+static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, false);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
+
+static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove_recursive(dpu_kms->debugfs_danger);
+ dpu_kms->debugfs_danger = NULL;
+}
+
+static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ dpu_kms->debugfs_danger = debugfs_create_dir("danger",
+ parent);
+ if (!dpu_kms->debugfs_danger) {
+ DPU_ERROR("failed to create danger debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+ dpu_kms, &dpu_debugfs_danger_stats_fops);
+ debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+ dpu_kms, &dpu_debugfs_safe_stats_fops);
+
+ return 0;
+}
+
+static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+ struct dpu_debugfs_regset32 *regset;
+ struct dpu_kms *dpu_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ void __iomem *base;
+ uint32_t i, addr;
+
+ if (!s || !s->private)
+ return 0;
+
+ regset = s->private;
+
+ dpu_kms = regset->dpu_kms;
+ if (!dpu_kms || !dpu_kms->mmio)
+ return 0;
+
+ dev = dpu_kms->dev;
+ if (!dev)
+ return 0;
+
+ priv = dev->dev_private;
+ if (!priv)
+ return 0;
+
+ base = dpu_kms->mmio + regset->offset;
+
+ /* insert padding spaces, if needed */
+ if (regset->offset & 0xF) {
+ seq_printf(s, "[%x]", regset->offset & ~0xF);
+ for (i = 0; i < (regset->offset & 0xF); i += 4)
+ seq_puts(s, " ");
+ }
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* main register output */
+ for (i = 0; i < regset->blk_len; i += 4) {
+ addr = regset->offset + i;
+ if ((addr & 0xF) == 0x0)
+ seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+ seq_printf(s, " %08x", readl_relaxed(base + i));
+ }
+ seq_puts(s, "\n");
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+static int dpu_debugfs_open_regset32(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations dpu_fops_regset32 = {
+ .open = dpu_debugfs_open_regset32,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
+{
+ if (regset) {
+ regset->offset = offset;
+ regset->blk_len = length;
+ regset->dpu_kms = dpu_kms;
+ }
+}
+
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct dpu_debugfs_regset32 *regset)
+{
+ if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
+ return NULL;
+
+ /* make sure offset is a multiple of 4 */
+ regset->offset = round_down(regset->offset, 4);
+
+ return debugfs_create_file(name, mode, parent,
+ regset, &dpu_fops_regset32);
+}
+
+static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
+{
+ void *p;
+ int rc;
+
+ p = dpu_hw_util_get_log_mask_ptr();
+
+ if (!dpu_kms || !p)
+ return -EINVAL;
+
+ dpu_kms->debugfs_root = debugfs_create_dir("debug",
+ dpu_kms->dev->primary->debugfs_root);
+ if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
+ DRM_ERROR("debugfs create_dir failed %ld\n",
+ PTR_ERR(dpu_kms->debugfs_root));
+ return PTR_ERR(dpu_kms->debugfs_root);
+ }
+
+ rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
+ if (rc) {
+ DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
+ return rc;
+ }
+
+ /* allow root to be NULL */
+ debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
+
+ (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
+ (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
+ (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
+
+ rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
+ if (rc) {
+ DPU_ERROR("failed to init perf %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+ /* don't need to NULL check debugfs_root */
+ if (dpu_kms) {
+ dpu_debugfs_vbif_destroy(dpu_kms);
+ dpu_debugfs_danger_destroy(dpu_kms);
+ dpu_debugfs_core_irq_destroy(dpu_kms);
+ debugfs_remove_recursive(dpu_kms->debugfs_root);
+ }
+}
+#else
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ return dpu_crtc_vblank(crtc, true);
+}
+
+static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ dpu_crtc_vblank(crtc, false);
+}
+
+static void dpu_kms_prepare_commit(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct dpu_kms *dpu_kms;
+ struct msm_drm_private *priv;
+ struct drm_device *dev;
+ struct drm_encoder *encoder;
+
+ if (!kms)
+ return;
+ dpu_kms = to_dpu_kms(kms);
+ dev = dpu_kms->dev;
+
+ if (!dev || !dev->dev_private)
+ return;
+ priv = dev->dev_private;
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc != NULL)
+ dpu_encoder_prepare_commit(encoder);
+}
+
+/*
+ * Override the encoder enable since we need to setup the inline rotator and do
+ * some crtc magic before enabling any bridge that might be present.
+ */
+void dpu_kms_encoder_enable(struct drm_encoder *encoder)
+{
+ const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
+ struct drm_crtc *crtc = encoder->crtc;
+
+ /* Forward this enable call to the commit hook */
+ if (funcs && funcs->commit)
+ funcs->commit(encoder);
+
+ if (crtc && crtc->state->active) {
+ trace_dpu_kms_enc_enable(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+}
+
+static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ /* If modeset is required, kickoff is run in encoder_enable */
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ continue;
+
+ if (crtc->state->active) {
+ trace_dpu_kms_commit(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+ }
+}
+
+static void dpu_kms_complete_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct dpu_kms *dpu_kms;
+ struct msm_drm_private *priv;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ if (!kms || !old_state)
+ return;
+ dpu_kms = to_dpu_kms(kms);
+
+ if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
+ return;
+ priv = dpu_kms->dev->dev_private;
+
+ DPU_ATRACE_BEGIN("kms_complete_commit");
+
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ dpu_crtc_complete_commit(crtc, old_crtc_state);
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ DPU_ATRACE_END("kms_complete_commit");
+}
+
+static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ int ret;
+
+ if (!kms || !crtc || !crtc->state) {
+ DPU_ERROR("invalid params\n");
+ return;
+ }
+
+ dev = crtc->dev;
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+ return;
+ }
+
+ if (!crtc->state->active) {
+ DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+ return;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ /*
+ * Wait for post-flush if necessary to delay before
+ * plane_cleanup. For example, wait for vsync in case of video
+ * mode panels. This may be a no-op for command mode panels.
+ */
+ trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
+ ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
+ if (ret && ret != -EWOULDBLOCK) {
+ DPU_ERROR("wait for commit done returned %d\n", ret);
+ break;
+ }
+ }
+}
+
+static void _dpu_kms_initialize_dsi(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ struct drm_encoder *encoder = NULL;
+ int i, rc;
+
+ /*TODO: Support two independent DSI connectors */
+ encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR_OR_NULL(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return;
+ }
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (!priv->dsi[i]) {
+ DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
+ return;
+ }
+
+ rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+ i, rc);
+ continue;
+ }
+ }
+}
+
+/**
+ * _dpu_kms_setup_displays - create encoders, bridges and connectors
+ * for underlying displays
+ * @dev: Pointer to drm device structure
+ * @priv: Pointer to private drm device data
+ * @dpu_kms: Pointer to dpu kms structure
+ * Returns: Zero on success
+ */
+static void _dpu_kms_setup_displays(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+
+ /**
+ * Extend this function to initialize other
+ * types of displays
+ */
+}
+
+static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid dev\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid dev_private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+ priv->num_crtcs = 0;
+
+ for (i = 0; i < priv->num_planes; i++)
+ priv->planes[i]->funcs->destroy(priv->planes[i]);
+ priv->num_planes = 0;
+
+ for (i = 0; i < priv->num_connectors; i++)
+ priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+ priv->num_connectors = 0;
+
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+ priv->num_encoders = 0;
+}
+
+static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
+{
+ struct drm_device *dev;
+ struct drm_plane *primary_planes[MAX_PLANES], *plane;
+ struct drm_crtc *crtc;
+
+ struct msm_drm_private *priv;
+ struct dpu_mdss_cfg *catalog;
+
+ int primary_planes_idx = 0, i, ret;
+ int max_crtc_count;
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return -EINVAL;
+ }
+
+ dev = dpu_kms->dev;
+ priv = dev->dev_private;
+ catalog = dpu_kms->catalog;
+
+ /*
+ * Create encoder and query display drivers to create
+ * bridges and connectors
+ */
+ _dpu_kms_setup_displays(dev, priv, dpu_kms);
+
+ max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+
+ /* Create the planes */
+ for (i = 0; i < catalog->sspp_count; i++) {
+ bool primary = true;
+
+ if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
+ || primary_planes_idx >= max_crtc_count)
+ primary = false;
+
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
+ (1UL << max_crtc_count) - 1, 0);
+ if (IS_ERR(plane)) {
+ DPU_ERROR("dpu_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ if (primary)
+ primary_planes[primary_planes_idx++] = plane;
+ }
+
+ max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+ /* Create one CRTC per encoder */
+ for (i = 0; i < max_crtc_count; i++) {
+ crtc = dpu_crtc_init(dev, primary_planes[i]);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ /* All CRTCs are compatible with all encoders */
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ return 0;
+fail:
+ _dpu_kms_drm_obj_destroy(dpu_kms);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_device *dev;
+ int rc;
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return -EINVAL;
+ }
+
+ dev = dpu_kms->dev;
+
+ rc = _dpu_debugfs_init(dpu_kms);
+ if (rc)
+ DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
+
+ return rc;
+}
+#endif
+
+static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
+{
+ struct drm_device *dev;
+ int i;
+
+ dev = dpu_kms->dev;
+ if (!dev)
+ return;
+
+ if (dpu_kms->hw_intr)
+ dpu_hw_intr_destroy(dpu_kms->hw_intr);
+ dpu_kms->hw_intr = NULL;
+
+ if (dpu_kms->power_event)
+ dpu_power_handle_unregister_event(
+ &dpu_kms->phandle, dpu_kms->power_event);
+
+ /* safe to call these more than once during shutdown */
+ _dpu_debugfs_destroy(dpu_kms);
+ _dpu_kms_mmu_destroy(dpu_kms);
+
+ if (dpu_kms->catalog) {
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+ if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+ dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+ }
+ }
+
+ if (dpu_kms->rm_init)
+ dpu_rm_destroy(&dpu_kms->rm);
+ dpu_kms->rm_init = false;
+
+ if (dpu_kms->catalog)
+ dpu_hw_catalog_deinit(dpu_kms->catalog);
+ dpu_kms->catalog = NULL;
+
+ if (dpu_kms->core_client)
+ dpu_power_client_destroy(&dpu_kms->phandle,
+ dpu_kms->core_client);
+ dpu_kms->core_client = NULL;
+
+ if (dpu_kms->vbif[VBIF_NRT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+
+ if (dpu_kms->vbif[VBIF_RT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+
+ if (dpu_kms->mmio)
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
+ dpu_kms->mmio = NULL;
+}
+
+static void dpu_kms_destroy(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+
+ dpu_dbg_destroy();
+ _dpu_kms_hw_destroy(dpu_kms);
+}
+
+static int dpu_kms_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct dpu_kms *dpu_kms;
+ int ret = 0, num_crtcs = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev_to_msm_kms(ddev))
+ return -EINVAL;
+
+ dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+ /* disable hot-plug polling */
+ drm_kms_helper_poll_disable(ddev);
+
+ /* acquire modeset lock(s) */
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ DPU_ATRACE_BEGIN("kms_pm_suspend");
+
+ ret = drm_modeset_lock_all_ctx(ddev, &ctx);
+ if (ret)
+ goto unlock;
+
+ /* save current state for resume */
+ if (dpu_kms->suspend_state)
+ drm_atomic_state_put(dpu_kms->suspend_state);
+ dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
+ if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
+ DRM_ERROR("failed to back up suspend state\n");
+ dpu_kms->suspend_state = NULL;
+ goto unlock;
+ }
+
+ /* create atomic state to disable all CRTCs */
+ state = drm_atomic_state_alloc(ddev);
+ if (IS_ERR_OR_NULL(state)) {
+ DRM_ERROR("failed to allocate crtc disable state\n");
+ goto unlock;
+ }
+
+ state->acquire_ctx = &ctx;
+
+ /* check for nothing to do */
+ if (num_crtcs == 0) {
+ DRM_DEBUG("all crtcs are already in the off state\n");
+ drm_atomic_state_put(state);
+ goto suspended;
+ }
+
+ /* commit the "disable all" state */
+ ret = drm_atomic_commit(state);
+ if (ret < 0) {
+ DRM_ERROR("failed to disable crtcs, %d\n", ret);
+ drm_atomic_state_put(state);
+ goto unlock;
+ }
+
+suspended:
+ dpu_kms->suspend_block = true;
+
+unlock:
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ DPU_ATRACE_END("kms_pm_suspend");
+ return 0;
+}
+
+static int dpu_kms_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct dpu_kms *dpu_kms;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev_to_msm_kms(ddev))
+ return -EINVAL;
+
+ dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+ DPU_ATRACE_BEGIN("kms_pm_resume");
+
+ drm_mode_config_reset(ddev);
+
+ drm_modeset_lock_all(ddev);
+
+ dpu_kms->suspend_block = false;
+
+ if (dpu_kms->suspend_state) {
+ dpu_kms->suspend_state->acquire_ctx =
+ ddev->mode_config.acquire_ctx;
+ ret = drm_atomic_commit(dpu_kms->suspend_state);
+ if (ret < 0) {
+ DRM_ERROR("failed to restore state, %d\n", ret);
+ drm_atomic_state_put(dpu_kms->suspend_state);
+ }
+ dpu_kms->suspend_state = NULL;
+ }
+ drm_modeset_unlock_all(ddev);
+
+ /* enable hot-plug polling */
+ drm_kms_helper_poll_enable(ddev);
+
+ DPU_ATRACE_END("kms_pm_resume");
+ return 0;
+}
+
+static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ bool cmd_mode)
+{
+ struct msm_display_info info;
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ int i, rc = 0;
+
+ memset(&info, 0, sizeof(info));
+
+ info.intf_type = encoder->encoder_type;
+ info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
+ MSM_DISPLAY_CAP_VID_MODE;
+
+ /* TODO: No support for DSI swap */
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (priv->dsi[i]) {
+ info.h_tile_instance[info.num_of_h_tiles] = i;
+ info.num_of_h_tiles++;
+ }
+ }
+
+ rc = dpu_encoder_setup(encoder->dev, encoder, &info);
+ if (rc)
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = dpu_kms_hw_init,
+ .irq_preinstall = dpu_irq_preinstall,
+ .irq_postinstall = dpu_irq_postinstall,
+ .irq_uninstall = dpu_irq_uninstall,
+ .irq = dpu_irq,
+ .prepare_commit = dpu_kms_prepare_commit,
+ .commit = dpu_kms_commit,
+ .complete_commit = dpu_kms_complete_commit,
+ .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
+ .enable_vblank = dpu_kms_enable_vblank,
+ .disable_vblank = dpu_kms_disable_vblank,
+ .check_modified_format = dpu_format_check_modified_format,
+ .get_format = dpu_get_msm_format,
+ .round_pixclk = dpu_kms_round_pixclk,
+ .pm_suspend = dpu_kms_pm_suspend,
+ .pm_resume = dpu_kms_pm_resume,
+ .destroy = dpu_kms_destroy,
+ .set_encoder_mode = _dpu_kms_set_encoder_mode,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = dpu_kms_debugfs_init,
+#endif
+};
+
+/* the caller api needs to turn on clock before calling it */
+static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
+{
+ dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
+}
+
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
+{
+ struct msm_mmu *mmu;
+
+ mmu = dpu_kms->base.aspace->mmu;
+
+ mmu->funcs->detach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_put(dpu_kms->base.aspace);
+
+ return 0;
+}
+
+static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
+{
+ struct iommu_domain *domain;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return 0;
+
+ aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
+ domain, "dpu1");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ dpu_kms->base.aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ DPU_ERROR("failed to attach iommu %d\n", ret);
+ msm_gem_address_space_put(aspace);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ _dpu_kms_mmu_destroy(dpu_kms);
+
+ return ret;
+}
+
+static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
+ char *clock_name)
+{
+ struct dss_module_power *mp = &dpu_kms->mp;
+ int i;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name))
+ return &mp->clk_config[i];
+ }
+
+ return NULL;
+}
+
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
+{
+ struct dss_clk *clk;
+
+ clk = _dpu_kms_get_clk(dpu_kms, clock_name);
+ if (!clk)
+ return -EINVAL;
+
+ return clk_get_rate(clk->clk);
+}
+
+static void dpu_kms_handle_power_event(u32 event_type, void *usr)
+{
+ struct dpu_kms *dpu_kms = usr;
+
+ if (!dpu_kms)
+ return;
+
+ if (event_type == DPU_POWER_EVENT_POST_ENABLE)
+ dpu_vbif_init_memtypes(dpu_kms);
+}
+
+static int dpu_kms_hw_init(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ int i, rc = -EINVAL;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ goto end;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+ dev = dpu_kms->dev;
+ if (!dev) {
+ DPU_ERROR("invalid device\n");
+ goto end;
+ }
+
+ rc = dpu_dbg_init(&dpu_kms->pdev->dev);
+ if (rc) {
+ DRM_ERROR("failed to init dpu dbg: %d\n", rc);
+ goto end;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ DPU_ERROR("invalid private data\n");
+ goto dbg_destroy;
+ }
+
+ dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
+ if (IS_ERR(dpu_kms->mmio)) {
+ rc = PTR_ERR(dpu_kms->mmio);
+ DPU_ERROR("mdp register memory map failed: %d\n", rc);
+ dpu_kms->mmio = NULL;
+ goto error;
+ }
+ DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+ dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
+
+ dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
+ if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+ rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+ DPU_ERROR("vbif register memory map failed: %d\n", rc);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+ goto error;
+ }
+ dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
+ if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+ DPU_DEBUG("VBIF NRT is not defined");
+ } else {
+ dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
+ "vbif_nrt");
+ }
+
+ dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
+ if (IS_ERR(dpu_kms->reg_dma)) {
+ dpu_kms->reg_dma = NULL;
+ DPU_DEBUG("REG_DMA is not defined");
+ } else {
+ dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
+ }
+
+ dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
+ "core");
+ if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
+ rc = PTR_ERR(dpu_kms->core_client);
+ if (!dpu_kms->core_client)
+ rc = -EINVAL;
+ DPU_ERROR("dpu power client create failed: %d\n", rc);
+ dpu_kms->core_client = NULL;
+ goto error;
+ }
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ _dpu_kms_core_hw_rev_init(dpu_kms);
+
+ pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
+
+ dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
+ if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
+ rc = PTR_ERR(dpu_kms->catalog);
+ if (!dpu_kms->catalog)
+ rc = -EINVAL;
+ DPU_ERROR("catalog init failed: %d\n", rc);
+ dpu_kms->catalog = NULL;
+ goto power_error;
+ }
+
+ dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
+
+ /*
+ * Now we need to read the HW catalog and initialize resources such as
+ * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+ */
+ rc = _dpu_kms_mmu_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
+ dpu_kms->dev);
+ if (rc) {
+ DPU_ERROR("rm init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ dpu_kms->rm_init = true;
+
+ dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
+ rc = PTR_ERR(dpu_kms->hw_mdp);
+ if (!dpu_kms->hw_mdp)
+ rc = -EINVAL;
+ DPU_ERROR("failed to get hw_mdp: %d\n", rc);
+ dpu_kms->hw_mdp = NULL;
+ goto power_error;
+ }
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+ dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+ dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
+ rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
+ if (!dpu_kms->hw_vbif[vbif_idx])
+ rc = -EINVAL;
+ DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+ dpu_kms->hw_vbif[vbif_idx] = NULL;
+ goto power_error;
+ }
+ }
+
+ rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
+ &dpu_kms->phandle,
+ _dpu_kms_get_clk(dpu_kms, "core"));
+ if (rc) {
+ DPU_ERROR("failed to init perf %d\n", rc);
+ goto perf_err;
+ }
+
+ dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+ rc = PTR_ERR(dpu_kms->hw_intr);
+ DPU_ERROR("hw_intr init failed: %d\n", rc);
+ dpu_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
+ /*
+ * _dpu_kms_drm_obj_init should create the DRM related objects
+ * i.e. CRTCs, planes, encoders, connectors and so forth
+ */
+ rc = _dpu_kms_drm_obj_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("modeset init failed: %d\n", rc);
+ goto drm_obj_init_err;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is
+ * is 4K
+ */
+ dev->mode_config.max_width =
+ dpu_kms->catalog->caps->max_mixer_width * 2;
+ dev->mode_config.max_height = 4096;
+
+ /*
+ * Support format modifiers for compression etc.
+ */
+ dev->mode_config.allow_fb_modifiers = true;
+
+ /*
+ * Handle (re)initializations during power enable
+ */
+ dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
+ dpu_kms->power_event = dpu_power_handle_register_event(
+ &dpu_kms->phandle,
+ DPU_POWER_EVENT_POST_ENABLE,
+ dpu_kms_handle_power_event, dpu_kms, "kms");
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+
+drm_obj_init_err:
+ dpu_core_perf_destroy(&dpu_kms->perf);
+hw_intr_init_err:
+perf_err:
+power_error:
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+error:
+ _dpu_kms_hw_destroy(dpu_kms);
+dbg_destroy:
+ dpu_dbg_destroy();
+end:
+ return rc;
+}
+
+struct msm_kms *dpu_kms_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int irq;
+
+ if (!dev || !dev->dev_private) {
+ DPU_ERROR("drm device node invalid\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ priv = dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+ if (irq < 0) {
+ DPU_ERROR("failed to get irq: %d\n", irq);
+ return ERR_PTR(irq);
+ }
+ dpu_kms->base.irq = irq;
+
+ return &dpu_kms->base;
+}
+
+static int dpu_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *ddev = dev_get_drvdata(master);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct dpu_kms *dpu_kms;
+ struct dss_module_power *mp;
+ int ret = 0;
+
+ dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
+ if (!dpu_kms)
+ return -ENOMEM;
+
+ mp = &dpu_kms->mp;
+ ret = msm_dss_parse_clock(pdev, mp);
+ if (ret) {
+ DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+ return ret;
+ }
+
+ dpu_power_resource_init(pdev, &dpu_kms->phandle);
+
+ platform_set_drvdata(pdev, dpu_kms);
+
+ msm_kms_init(&dpu_kms->base, &kms_funcs);
+ dpu_kms->dev = ddev;
+ dpu_kms->pdev = pdev;
+
+ pm_runtime_enable(&pdev->dev);
+ dpu_kms->rpm_enabled = true;
+
+ priv->kms = &dpu_kms->base;
+ return ret;
+}
+
+static void dpu_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+ devm_kfree(&pdev->dev, mp->clk_config);
+ mp->num_clk = 0;
+
+ if (dpu_kms->rpm_enabled)
+ pm_runtime_disable(&pdev->dev);
+}
+
+static const struct component_ops dpu_ops = {
+ .bind = dpu_bind,
+ .unbind = dpu_unbind,
+};
+
+static int dpu_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dpu_ops);
+}
+
+static int dpu_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dpu_ops);
+ return 0;
+}
+
+static int __maybe_unused dpu_runtime_suspend(struct device *dev)
+{
+ int rc = -1;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_device *ddev;
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ ddev = dpu_kms->dev;
+ if (!ddev) {
+ DPU_ERROR("invalid drm_device\n");
+ goto exit;
+ }
+
+ rc = dpu_power_resource_enable(&dpu_kms->phandle,
+ dpu_kms->core_client, false);
+ if (rc)
+ DPU_ERROR("resource disable failed: %d\n", rc);
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+ if (rc)
+ DPU_ERROR("clock disable failed rc:%d\n", rc);
+
+exit:
+ return rc;
+}
+
+static int __maybe_unused dpu_runtime_resume(struct device *dev)
+{
+ int rc = -1;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_device *ddev;
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ ddev = dpu_kms->dev;
+ if (!ddev) {
+ DPU_ERROR("invalid drm_device\n");
+ goto exit;
+ }
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (rc) {
+ DPU_ERROR("clock enable failed rc:%d\n", rc);
+ goto exit;
+ }
+
+ rc = dpu_power_resource_enable(&dpu_kms->phandle,
+ dpu_kms->core_client, true);
+ if (rc)
+ DPU_ERROR("resource enable failed: %d\n", rc);
+
+exit:
+ return rc;
+}
+
+static const struct dev_pm_ops dpu_pm_ops = {
+ SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+};
+
+static const struct of_device_id dpu_dt_match[] = {
+ { .compatible = "qcom,sdm845-dpu", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpu_dt_match);
+
+static struct platform_driver dpu_driver = {
+ .probe = dpu_dev_probe,
+ .remove = dpu_dev_remove,
+ .driver = {
+ .name = "msm_dpu",
+ .of_match_table = dpu_dt_match,
+ .pm = &dpu_pm_ops,
+ },
+};
+
+void __init msm_dpu_register(void)
+{
+ platform_driver_register(&dpu_driver);
+}
+
+void __exit msm_dpu_unregister(void)
+{
+ platform_driver_unregister(&dpu_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
new file mode 100644
index 000000000000..66d466628e2b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_KMS_H__
+#define __DPU_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "dpu_dbg.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_top.h"
+#include "dpu_rm.h"
+#include "dpu_power_handle.h"
+#include "dpu_irq.h"
+#include "dpu_core_perf.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_KMS)) \
+ DRM_DEBUG(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * DPU_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG_DRIVER(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ DRM_ERROR(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ * This macro is similar to the standard ktime_compare() function, but
+ * attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+ ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define DPU_NAME_SIZE 12
+
+/* timeout in frames waiting for frame done */
+#define DPU_FRAME_DONE_TIMEOUT 60
+
+/*
+ * struct dpu_irq_callback - IRQ callback handlers
+ * @list: list to callback
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct dpu_irq_callback {
+ struct list_head list;
+ void (*func)(void *arg, int irq_idx);
+ void *arg;
+};
+
+/**
+ * struct dpu_irq: IRQ structure contains callback registration info
+ * @total_irq: total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl: array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
+ * @cb_lock: callback lock
+ * @debugfs_file: debugfs file for irq statistics
+ */
+struct dpu_irq {
+ u32 total_irqs;
+ struct list_head *irq_cb_tbl;
+ atomic_t *enable_counts;
+ atomic_t *irq_counts;
+ spinlock_t cb_lock;
+ struct dentry *debugfs_file;
+};
+
+struct dpu_kms {
+ struct msm_kms base;
+ struct drm_device *dev;
+ int core_rev;
+ struct dpu_mdss_cfg *catalog;
+
+ struct dpu_power_handle phandle;
+ struct dpu_power_client *core_client;
+ struct dpu_power_event *power_event;
+
+ /* directory entry for debugfs */
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_danger;
+ struct dentry *debugfs_vbif;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
+ unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
+
+ struct regulator *vdd;
+ struct regulator *mmagic;
+ struct regulator *venus;
+
+ struct dpu_hw_intr *hw_intr;
+ struct dpu_irq irq_obj;
+
+ struct dpu_core_perf perf;
+
+ /* saved atomic state during system suspend */
+ struct drm_atomic_state *suspend_state;
+ bool suspend_block;
+
+ struct dpu_rm rm;
+ bool rm_init;
+
+ struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
+ struct dpu_hw_mdp *hw_mdp;
+
+ bool has_danger_ctrl;
+
+ struct platform_device *pdev;
+ bool rpm_enabled;
+ struct dss_module_power mp;
+};
+
+struct vsync_info {
+ u32 frame_count;
+ u32 line_count;
+};
+
+#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+
+/* get struct msm_kms * from drm_device * */
+#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
+ ((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
+
+/**
+ * dpu_kms_is_suspend_state - whether or not the system is pm suspended
+ * @dev: Pointer to drm device
+ * Return: Suspend status
+ */
+static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
+{
+ if (!ddev_to_msm_kms(dev))
+ return false;
+
+ return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
+}
+
+/**
+ * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
+ * suspend status
+ * @dev: Pointer to drm device
+ * Return: True if commits should be rejected due to pm suspend
+ */
+static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
+{
+ if (!dpu_kms_is_suspend_state(dev))
+ return false;
+
+ return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
+}
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.txt
+ *
+ * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
+ * @dpu_debugfs_create_regset32: Create 32-bit register dump file
+ * @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node
+ */
+
+/**
+ * Companion structure for dpu_debugfs_create_regset32. Do not initialize the
+ * members of this structure explicitly; use dpu_debugfs_setup_regset32 instead.
+ */
+struct dpu_debugfs_regset32 {
+ uint32_t offset;
+ uint32_t blk_len;
+ struct dpu_kms *dpu_kms;
+};
+
+/**
+ * dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs
+ * This function is meant to initialize dpu_debugfs_regset32 structures for use
+ * with dpu_debugfs_create_regset32.
+ * @regset: opaque register definition structure
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * Similar to the related debugfs_create_regset32 API, the structure pointed to
+ * by regset needs to persist for the lifetime of the created file. The calling
+ * code is responsible for initialization/management of this structure.
+ *
+ * The structure pointed to by regset is meant to be opaque. Please use
+ * dpu_debugfs_setup_regset32 to initialize it.
+ *
+ * @name: File name within debugfs
+ * @mode: File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @regset: Pointer to persistent register block definition
+ *
+ * Return: dentry pointer for newly created file, use either debugfs_remove()
+ * or debugfs_remove_recursive() (on a parent directory) to remove the
+ * file
+ */
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct dpu_debugfs_regset32 *regset);
+
+/**
+ * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @dpu_kms: Pointer to DPU's KMS structure
+ *
+ * Return: dentry pointer for DPU's debugfs location
+ */
+void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
+
+/**
+ * DPU info management functions
+ * These functions/definitions allow for building up a 'dpu_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define DPU_KMS_INFO_MAX_SIZE 4096
+
+/**
+ * Vblank enable/disable functions
+ */
+int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+void dpu_kms_encoder_enable(struct drm_encoder *encoder);
+
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms: poiner to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
+
+#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
new file mode 100644
index 000000000000..9e533b86682c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -0,0 +1,245 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include "dpu_kms.h"
+
+#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
+
+#define HW_INTR_STATUS 0x0010
+
+struct dpu_mdss {
+ struct msm_mdss base;
+ void __iomem *mmio;
+ unsigned long mmio_len;
+ u32 hwversion;
+ struct dss_module_power mp;
+ struct dpu_irq_controller irq_controller;
+};
+
+static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+{
+ struct dpu_mdss *dpu_mdss = arg;
+ u32 interrupts;
+
+ interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
+
+ while (interrupts) {
+ irq_hw_number_t hwirq = fls(interrupts) - 1;
+ unsigned int mapping;
+ int rc;
+
+ mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
+ hwirq);
+ if (mapping == 0) {
+ DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
+ return IRQ_NONE;
+ }
+
+ rc = generic_handle_irq(mapping);
+ if (rc < 0) {
+ DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
+ hwirq, mapping, rc);
+ return IRQ_NONE;
+ }
+
+ interrupts &= ~(1 << hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void dpu_mdss_irq_mask(struct irq_data *irqd)
+{
+ struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static void dpu_mdss_irq_unmask(struct irq_data *irqd)
+{
+ struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip dpu_mdss_irq_chip = {
+ .name = "dpu_mdss",
+ .irq_mask = dpu_mdss_irq_mask,
+ .irq_unmask = dpu_mdss_irq_unmask,
+};
+
+static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct dpu_mdss *dpu_mdss = domain->host_data;
+ int ret;
+
+ irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
+ ret = irq_set_chip_data(irq, dpu_mdss);
+
+ return ret;
+}
+
+static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
+ .map = dpu_mdss_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
+{
+ struct device *dev;
+ struct irq_domain *domain;
+
+ dev = dpu_mdss->base.dev->dev;
+
+ domain = irq_domain_add_linear(dev->of_node, 32,
+ &dpu_mdss_irqdomain_ops, dpu_mdss);
+ if (!domain) {
+ DPU_ERROR("failed to add irq_domain\n");
+ return -EINVAL;
+ }
+
+ dpu_mdss->irq_controller.enabled_mask = 0;
+ dpu_mdss->irq_controller.domain = domain;
+
+ return 0;
+}
+
+static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+{
+ if (dpu_mdss->irq_controller.domain) {
+ irq_domain_remove(dpu_mdss->irq_controller.domain);
+ dpu_mdss->irq_controller.domain = NULL;
+ }
+ return 0;
+}
+static int dpu_mdss_enable(struct msm_mdss *mdss)
+{
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+ int ret;
+
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (ret)
+ DPU_ERROR("clock enable failed, ret:%d\n", ret);
+
+ return ret;
+}
+
+static int dpu_mdss_disable(struct msm_mdss *mdss)
+{
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+ int ret;
+
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+ if (ret)
+ DPU_ERROR("clock disable failed, ret:%d\n", ret);
+
+ return ret;
+}
+
+static void dpu_mdss_destroy(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+
+ _dpu_mdss_irq_domain_fini(dpu_mdss);
+
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+ devm_kfree(&pdev->dev, mp->clk_config);
+
+ if (dpu_mdss->mmio)
+ devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+ dpu_mdss->mmio = NULL;
+
+ pm_runtime_disable(dev->dev);
+ priv->mdss = NULL;
+}
+
+static const struct msm_mdss_funcs mdss_funcs = {
+ .enable = dpu_mdss_enable,
+ .disable = dpu_mdss_disable,
+ .destroy = dpu_mdss_destroy,
+};
+
+int dpu_mdss_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct resource *res;
+ struct dpu_mdss *dpu_mdss;
+ struct dss_module_power *mp;
+ int ret = 0;
+
+ dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
+ if (!dpu_mdss)
+ return -ENOMEM;
+
+ dpu_mdss->mmio = msm_ioremap(pdev, "mdss", "mdss");
+ if (IS_ERR(dpu_mdss->mmio))
+ return PTR_ERR(dpu_mdss->mmio);
+
+ DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss");
+ if (!res) {
+ DRM_ERROR("failed to get memory resource for mdss\n");
+ return -ENOMEM;
+ }
+ dpu_mdss->mmio_len = resource_size(res);
+
+ mp = &dpu_mdss->mp;
+ ret = msm_dss_parse_clock(pdev, mp);
+ if (ret) {
+ DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+ goto clk_parse_err;
+ }
+
+ dpu_mdss->base.dev = dev;
+ dpu_mdss->base.funcs = &mdss_funcs;
+
+ ret = _dpu_mdss_irq_domain_add(dpu_mdss);
+ if (ret)
+ goto irq_domain_error;
+
+ ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+ dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
+ if (ret) {
+ DPU_ERROR("failed to init irq: %d\n", ret);
+ goto irq_error;
+ }
+
+ pm_runtime_enable(dev->dev);
+
+ pm_runtime_get_sync(dev->dev);
+ dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
+ pm_runtime_put_sync(dev->dev);
+
+ priv->mdss = &dpu_mdss->base;
+
+ return ret;
+
+irq_error:
+ _dpu_mdss_irq_domain_fini(dpu_mdss);
+irq_domain_error:
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_parse_err:
+ devm_kfree(&pdev->dev, mp->clk_config);
+ if (dpu_mdss->mmio)
+ devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+ dpu_mdss->mmio = NULL;
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
new file mode 100644
index 000000000000..b640e39ebaca
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -0,0 +1,1963 @@
+/*
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_vbif.h"
+#include "dpu_plane.h"
+
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT 21
+#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL 15
+
+#define SHARP_STRENGTH_DEFAULT 32
+#define SHARP_EDGE_THR_DEFAULT 112
+#define SHARP_SMOOTH_THR_DEFAULT 8
+#define SHARP_NOISE_THR_DEFAULT 2
+
+#define DPU_NAME_SIZE 12
+
+#define DPU_PLANE_COLOR_FILL_FLAG BIT(31)
+#define DPU_ZPOS_MAX 255
+
+/* multirect rect index */
+enum {
+ R0,
+ R1,
+ R_MAX
+};
+
+#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+
+#define DEFAULT_REFRESH_RATE 60
+
+/**
+ * enum dpu_plane_qos - Different qos configurations for each pipe
+ *
+ * @DPU_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @DPU_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ * this configuration is mutually exclusive from VBLANK_CTRL.
+ * @DPU_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum dpu_plane_qos {
+ DPU_PLANE_QOS_VBLANK_CTRL = BIT(0),
+ DPU_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+ DPU_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct dpu_plane - local dpu plane structure
+ * @aspace: address space pointer
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
+ * @mplane_list: List of multirect planes of the same pipe
+ * @catalog: Points to dpu catalog structure
+ * @revalidate: force revalidation of all the plane properties
+ */
+struct dpu_plane {
+ struct drm_plane base;
+
+ struct mutex lock;
+
+ enum dpu_sspp pipe;
+ uint32_t features; /* capabilities from catalog */
+ uint32_t nformats;
+ uint32_t formats[64];
+
+ struct dpu_hw_pipe *pipe_hw;
+ struct dpu_hw_pipe_cfg pipe_cfg;
+ struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+ uint32_t color_fill;
+ bool is_error;
+ bool is_rt_pipe;
+ bool is_virtual;
+ struct list_head mplane_list;
+ struct dpu_mdss_cfg *catalog;
+
+ struct dpu_csc_cfg *csc_ptr;
+
+ const struct dpu_sspp_sub_blks *pipe_sblk;
+ char pipe_name[DPU_NAME_SIZE];
+
+ /* debugfs related stuff */
+ struct dentry *debugfs_root;
+ struct dpu_debugfs_regset32 debugfs_src;
+ struct dpu_debugfs_regset32 debugfs_scaler;
+ struct dpu_debugfs_regset32 debugfs_csc;
+ bool debugfs_default_scale;
+};
+
+#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
+
+static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv;
+
+ if (!plane || !plane->dev)
+ return NULL;
+ priv = plane->dev->dev_private;
+ if (!priv)
+ return NULL;
+ return to_dpu_kms(priv->kms);
+}
+
+static bool dpu_plane_enabled(struct drm_plane_state *state)
+{
+ return state && state->fb && state->crtc;
+}
+
+static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
+{
+ return state && state->crtc;
+}
+
+/**
+ * _dpu_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane: Pointer to drm plane
+ * @fmt: Pointer to source buffer format
+ * @src_wdith: width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+ const struct dpu_format *fmt, u32 src_width)
+{
+ struct dpu_plane *pdpu, *tmp;
+ struct dpu_plane_state *pstate;
+ u32 fixed_buff_size;
+ u32 total_fl;
+
+ if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
+ DPU_ERROR("invalid arguments\n");
+ return 0;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+ fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
+
+ list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
+ if (!dpu_plane_enabled(tmp->base.state))
+ continue;
+ DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
+ pdpu->base.base.id, tmp->base.base.id,
+ src_width,
+ drm_rect_width(&tmp->pipe_cfg.src_rect));
+ src_width = max_t(u32, src_width,
+ drm_rect_width(&tmp->pipe_cfg.src_rect));
+ }
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ if (fmt->chroma_sample == DPU_CHROMA_420) {
+ /* NV12 */
+ total_fl = (fixed_buff_size / 2) /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ /* non NV12 */
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ } else {
+ if (pstate->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) {
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ total_fl = (fixed_buff_size) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ }
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
+ plane->base.id, pdpu->pipe - SSPP_VIG0,
+ (char *)&fmt->base.pixel_format,
+ src_width, total_fl);
+
+ return total_fl;
+}
+
+/**
+ * _dpu_plane_get_qos_lut - get LUT mapping based on fill level
+ * @tbl: Pointer to LUT table
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl)
+{
+ int i;
+
+ if (!tbl || !tbl->nentry || !tbl->entries)
+ return 0;
+
+ for (i = 0; i < tbl->nentry; i++)
+ if (total_fl <= tbl->entries[i].fl)
+ return tbl->entries[i].lut;
+
+ /* if last fl is zero, use as default */
+ if (!tbl->entries[i-1].fl)
+ return tbl->entries[i-1].lut;
+
+ return 0;
+}
+
+/**
+ * _dpu_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ const struct dpu_format *fmt = NULL;
+ u64 qos_lut;
+ u32 total_fl = 0, lut_usage;
+
+ if (!plane || !fb) {
+ DPU_ERROR("invalid arguments plane %d fb %d\n",
+ plane != 0, fb != 0);
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
+ return;
+ }
+
+ if (!pdpu->is_rt_pipe) {
+ lut_usage = DPU_QOS_LUT_USAGE_NRT;
+ } else {
+ fmt = dpu_get_dpu_format_ext(
+ fb->format->format,
+ fb->modifier);
+ total_fl = _dpu_plane_calc_fill_level(plane, fmt,
+ drm_rect_width(&pdpu->pipe_cfg.src_rect));
+
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
+ lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
+ else
+ lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
+ }
+
+ qos_lut = _dpu_plane_get_qos_lut(
+ &pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
+
+ pdpu->pipe_qos_cfg.creq_lut = qos_lut;
+
+ trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ pdpu->is_rt_pipe, total_fl, qos_lut);
+
+ pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ const struct dpu_format *fmt = NULL;
+ u32 danger_lut, safe_lut;
+
+ if (!plane || !fb) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
+ return;
+ }
+
+ if (!pdpu->is_rt_pipe) {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_NRT];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_NRT];
+ } else {
+ fmt = dpu_get_dpu_format_ext(
+ fb->format->format,
+ fb->modifier);
+
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_LINEAR];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_LINEAR];
+ } else {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_MACROTILE];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_MACROTILE];
+ }
+ }
+
+ pdpu->pipe_qos_cfg.danger_lut = danger_lut;
+ pdpu->pipe_qos_cfg.safe_lut = safe_lut;
+
+ trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ (fmt) ? fmt->fetch_mode : 0,
+ pdpu->pipe_qos_cfg.danger_lut,
+ pdpu->pipe_qos_cfg.safe_lut);
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ fmt ? fmt->fetch_mode : -1,
+ pdpu->pipe_qos_cfg.danger_lut,
+ pdpu->pipe_qos_cfg.safe_lut);
+
+ pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
+ &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane: Pointer to drm plane
+ * @enable: true to enable QoS control
+ * @flags: QoS control mode (enum dpu_plane_qos)
+ */
+static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
+ bool enable, u32 flags)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
+ return;
+ }
+
+ if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
+ pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
+ pdpu->pipe_qos_cfg.danger_vblank =
+ pdpu->pipe_sblk->danger_vblank;
+ pdpu->pipe_qos_cfg.vblank_en = enable;
+ }
+
+ if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
+ /* this feature overrules previous VBLANK_CTRL */
+ pdpu->pipe_qos_cfg.vblank_en = false;
+ pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+ }
+
+ if (flags & DPU_PLANE_QOS_PANIC_CTRL)
+ pdpu->pipe_qos_cfg.danger_safe_en = enable;
+
+ if (!pdpu->is_rt_pipe) {
+ pdpu->pipe_qos_cfg.vblank_en = false;
+ pdpu->pipe_qos_cfg.danger_safe_en = false;
+ }
+
+ DPU_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ pdpu->pipe_qos_cfg.danger_safe_en,
+ pdpu->pipe_qos_cfg.vblank_en,
+ pdpu->pipe_qos_cfg.creq_vblank,
+ pdpu->pipe_qos_cfg.danger_vblank,
+ pdpu->is_rt_pipe);
+
+ pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
+ &pdpu->pipe_qos_cfg);
+}
+
+int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+ struct dpu_plane *pdpu;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->is_rt_pipe)
+ goto end;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+end:
+ return 0;
+}
+
+/**
+ * _dpu_plane_set_ot_limit - set OT limit for the given plane
+ * @plane: Pointer to drm plane
+ * @crtc: Pointer to drm crtc
+ */
+static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
+ struct drm_crtc *crtc)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_vbif_set_ot_params ot_params;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev || !crtc) {
+ DPU_ERROR("invalid arguments plane %d crtc %d\n",
+ plane != 0, crtc != 0);
+ return;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+ ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
+ ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
+ ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+ ot_params.is_wfd = !pdpu->is_rt_pipe;
+ ot_params.frame_rate = crtc->mode.vrefresh;
+ ot_params.vbif_idx = VBIF_RT;
+ ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+ ot_params.rd = true;
+
+ dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
+}
+
+/**
+ * _dpu_plane_set_vbif_qos - set vbif QoS for the given plane
+ * @plane: Pointer to drm plane
+ */
+static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_vbif_set_qos_params qos_params;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ memset(&qos_params, 0, sizeof(qos_params));
+ qos_params.vbif_idx = VBIF_RT;
+ qos_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+ qos_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+ qos_params.num = pdpu->pipe_hw->idx - SSPP_VIG0;
+ qos_params.is_rt = pdpu->is_rt_pipe;
+
+ DPU_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+ plane->base.id, qos_params.num,
+ qos_params.vbif_idx,
+ qos_params.xin_id, qos_params.is_rt,
+ qos_params.clk_ctrl);
+
+ dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
+}
+
+/**
+ * _dpu_plane_get_aspace: gets the address space
+ */
+static int _dpu_plane_get_aspace(
+ struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ struct msm_gem_address_space **aspace)
+{
+ struct dpu_kms *kms;
+
+ if (!pdpu || !pstate || !aspace) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_plane_get_kms(&pdpu->base);
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ *aspace = kms->base.aspace;
+
+ return 0;
+}
+
+static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+ struct dpu_plane_state *pstate,
+ struct dpu_hw_pipe_cfg *pipe_cfg,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ struct msm_gem_address_space *aspace = NULL;
+ int ret;
+
+ if (!plane || !pstate || !pipe_cfg || !fb) {
+ DPU_ERROR(
+ "invalid arg(s), plane %d state %d cfg %d fb %d\n",
+ plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
+ return;
+ }
+
+ ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
+ return;
+ }
+
+ ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
+ if (ret == -EAGAIN)
+ DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
+ else if (ret)
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ else if (pdpu->pipe_hw->ops.setup_sourceaddress) {
+ trace_dpu_plane_set_scanout(pdpu->pipe_hw->idx,
+ &pipe_cfg->layout,
+ pstate->multirect_index);
+ pdpu->pipe_hw->ops.setup_sourceaddress(pdpu->pipe_hw, pipe_cfg,
+ pstate->multirect_index);
+ }
+}
+
+static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+ struct dpu_hw_scaler3_cfg *scale_cfg,
+ const struct dpu_format *fmt,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+ uint32_t i;
+
+ if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+ !chroma_subsmpl_v) {
+ DPU_ERROR(
+ "pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
+ !!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
+ chroma_subsmpl_v);
+ return;
+ }
+
+ memset(scale_cfg, 0, sizeof(*scale_cfg));
+ memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
+
+
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v;
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h;
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2];
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0];
+
+ for (i = 0; i < DPU_MAX_PLANES; i++) {
+ scale_cfg->src_width[i] = src_w;
+ scale_cfg->src_height[i] = src_h;
+ if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+ scale_cfg->src_width[i] /= chroma_subsmpl_h;
+ scale_cfg->src_height[i] /= chroma_subsmpl_v;
+ }
+ scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+ pstate->pixel_ext.num_ext_pxls_top[i] =
+ scale_cfg->src_height[i];
+ pstate->pixel_ext.num_ext_pxls_left[i] =
+ scale_cfg->src_width[i];
+ }
+ if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+ && (src_w == dst_w))
+ return;
+
+ scale_cfg->dst_width = dst_w;
+ scale_cfg->dst_height = dst_h;
+ scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->uv_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL;
+ scale_cfg->lut_flag = 0;
+ scale_cfg->blend_cfg = 1;
+ scale_cfg->enable = 1;
+}
+
+static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+{
+ static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+ };
+ static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+ };
+
+ if (!pdpu) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features)
+ pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L;
+ else
+ pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L;
+
+ DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
+ pdpu->csc_ptr->csc_mv[0],
+ pdpu->csc_ptr->csc_mv[1],
+ pdpu->csc_ptr->csc_mv[2]);
+}
+
+static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ const struct dpu_format *fmt, bool color_fill)
+{
+ struct dpu_hw_pixel_ext *pe;
+ uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+
+ if (!pdpu || !fmt || !pstate) {
+ DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
+ pdpu != 0, fmt != 0, pstate != 0);
+ return;
+ }
+
+ pe = &pstate->pixel_ext;
+
+ /* don't chroma subsample if decimating */
+ chroma_subsmpl_h =
+ drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
+ chroma_subsmpl_v =
+ drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+
+ /* update scaler. calculate default config for QSEED3 */
+ _dpu_plane_setup_scaler3(pdpu, pstate,
+ drm_rect_width(&pdpu->pipe_cfg.src_rect),
+ drm_rect_height(&pdpu->pipe_cfg.src_rect),
+ drm_rect_width(&pdpu->pipe_cfg.dst_rect),
+ drm_rect_height(&pdpu->pipe_cfg.dst_rect),
+ &pstate->scaler3_cfg, fmt,
+ chroma_subsmpl_h, chroma_subsmpl_v);
+}
+
+/**
+ * _dpu_plane_color_fill - enables color fill on plane
+ * @pdpu: Pointer to DPU plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
+ uint32_t color, uint32_t alpha)
+{
+ const struct dpu_format *fmt;
+ const struct drm_plane *plane;
+ struct dpu_plane_state *pstate;
+
+ if (!pdpu || !pdpu->base.state) {
+ DPU_ERROR("invalid plane\n");
+ return -EINVAL;
+ }
+
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
+ return -EINVAL;
+ }
+
+ plane = &pdpu->base;
+ pstate = to_dpu_plane_state(plane->state);
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /*
+ * select fill format to match user property expectation,
+ * h/w only supports RGB variants
+ */
+ fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
+
+ /* update sspp */
+ if (fmt && pdpu->pipe_hw->ops.setup_solidfill) {
+ pdpu->pipe_hw->ops.setup_solidfill(pdpu->pipe_hw,
+ (color & 0xFFFFFF) | ((alpha & 0xFF) << 24),
+ pstate->multirect_index);
+
+ /* override scaler/decimation if solid fill */
+ pdpu->pipe_cfg.src_rect.x1 = 0;
+ pdpu->pipe_cfg.src_rect.y1 = 0;
+ pdpu->pipe_cfg.src_rect.x2 =
+ drm_rect_width(&pdpu->pipe_cfg.dst_rect);
+ pdpu->pipe_cfg.src_rect.y2 =
+ drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, true);
+
+ if (pdpu->pipe_hw->ops.setup_format)
+ pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
+ fmt, DPU_SSPP_SOLID_FILL,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_rects)
+ pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+ &pdpu->pipe_cfg,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_pe)
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pstate->pixel_ext);
+
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ &pdpu->pipe_cfg, &pstate->pixel_ext,
+ &pstate->scaler3_cfg);
+ }
+
+ return 0;
+}
+
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!drm_state)
+ return;
+
+ pstate = to_dpu_plane_state(drm_state);
+
+ pstate->multirect_index = DPU_SSPP_RECT_SOLO;
+ pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+}
+
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
+{
+ struct dpu_plane_state *pstate[R_MAX];
+ const struct drm_plane_state *drm_state[R_MAX];
+ struct drm_rect src[R_MAX], dst[R_MAX];
+ struct dpu_plane *dpu_plane[R_MAX];
+ const struct dpu_format *fmt[R_MAX];
+ int i, buffer_lines;
+ unsigned int max_tile_height = 1;
+ bool parallel_fetch_qualified = true;
+ bool has_tiled_rect = false;
+
+ for (i = 0; i < R_MAX; i++) {
+ const struct msm_format *msm_fmt;
+
+ drm_state[i] = i ? plane->r1 : plane->r0;
+ msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+ fmt[i] = to_dpu_format(msm_fmt);
+
+ if (DPU_FORMAT_IS_UBWC(fmt[i])) {
+ has_tiled_rect = true;
+ if (fmt[i]->tile_height > max_tile_height)
+ max_tile_height = fmt[i]->tile_height;
+ }
+ }
+
+ for (i = 0; i < R_MAX; i++) {
+ int width_threshold;
+
+ pstate[i] = to_dpu_plane_state(drm_state[i]);
+ dpu_plane[i] = to_dpu_plane(drm_state[i]->plane);
+
+ if (pstate[i] == NULL) {
+ DPU_ERROR("DPU plane state of plane id %d is NULL\n",
+ drm_state[i]->plane->base.id);
+ return -EINVAL;
+ }
+
+ src[i].x1 = drm_state[i]->src_x >> 16;
+ src[i].y1 = drm_state[i]->src_y >> 16;
+ src[i].x2 = src[i].x1 + (drm_state[i]->src_w >> 16);
+ src[i].y2 = src[i].y1 + (drm_state[i]->src_h >> 16);
+
+ dst[i] = drm_plane_state_dest(drm_state[i]);
+
+ if (drm_rect_calc_hscale(&src[i], &dst[i], 1, 1) != 1 ||
+ drm_rect_calc_vscale(&src[i], &dst[i], 1, 1) != 1) {
+ DPU_ERROR_PLANE(dpu_plane[i],
+ "scaling is not supported in multirect mode\n");
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(fmt[i])) {
+ DPU_ERROR_PLANE(dpu_plane[i],
+ "Unsupported format for multirect mode\n");
+ return -EINVAL;
+ }
+
+ /**
+ * SSPP PD_MEM is split half - one for each RECT.
+ * Tiled formats need 5 lines of buffering while fetching
+ * whereas linear formats need only 2 lines.
+ * So we cannot support more than half of the supported SSPP
+ * width for tiled formats.
+ */
+ width_threshold = dpu_plane[i]->pipe_sblk->common->maxlinewidth;
+ if (has_tiled_rect)
+ width_threshold /= 2;
+
+ if (parallel_fetch_qualified &&
+ drm_rect_width(&src[i]) > width_threshold)
+ parallel_fetch_qualified = false;
+
+ }
+
+ /* Validate RECT's and set the mode */
+
+ /* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
+ if (parallel_fetch_qualified) {
+ pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+ pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ goto done;
+ }
+
+ /* TIME_MX Mode */
+ buffer_lines = 2 * max_tile_height;
+
+ if (dst[R1].y1 >= dst[R0].y2 + buffer_lines ||
+ dst[R0].y1 >= dst[R1].y2 + buffer_lines) {
+ pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+ pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+ } else {
+ DPU_ERROR(
+ "No multirect mode possible for the planes (%d - %d)\n",
+ drm_state[R0]->plane->base.id,
+ drm_state[R1]->plane->base.id);
+ return -EINVAL;
+ }
+
+done:
+ if (dpu_plane[R0]->is_virtual) {
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
+ } else {
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
+ };
+
+ DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
+ pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
+ DPU_DEBUG_PLANE(dpu_plane[R1], "R1: %d - %d\n",
+ pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
+ return 0;
+}
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush for the given plane
+ * @plane: Pointer to drm plane structure
+ * @ctl: Pointer to hardware control driver
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+ u32 *flush_sspp)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !flush_sspp) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ pstate = to_dpu_plane_state(plane->state);
+
+ *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
+}
+
+static int dpu_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
+ struct dpu_hw_fmt_layout layout;
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
+ struct dma_fence *fence;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
+
+ ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
+ return ret;
+ }
+
+ /* cache aspace */
+ pstate->aspace = aspace;
+
+ /*
+ * TODO: Need to sort out the msm_framebuffer_prepare() call below so
+ * we can use msm_atomic_prepare_fb() instead of doing the
+ * implicit fence and fb prepare by hand here.
+ */
+ obj = msm_framebuffer_bo(new_state->fb, 0);
+ msm_obj = to_msm_bo(obj);
+ fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ if (fence)
+ drm_atomic_set_fence_for_plane(new_state, fence);
+
+ if (pstate->aspace) {
+ ret = msm_framebuffer_prepare(new_state->fb,
+ pstate->aspace);
+ if (ret) {
+ DPU_ERROR("failed to prepare framebuffer\n");
+ return ret;
+ }
+ }
+
+ /* validate framebuffer layout before commit */
+ ret = dpu_format_populate_layout(pstate->aspace,
+ new_state->fb, &layout);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dpu_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *old_pstate;
+
+ if (!old_state || !old_state->fb)
+ return;
+
+ old_pstate = to_dpu_plane_state(old_state);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
+
+ msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace);
+}
+
+static bool dpu_plane_validate_src(struct drm_rect *src,
+ struct drm_rect *fb_rect,
+ uint32_t min_src_size)
+{
+ /* Ensure fb size is supported */
+ if (drm_rect_width(fb_rect) > MAX_IMG_WIDTH ||
+ drm_rect_height(fb_rect) > MAX_IMG_HEIGHT)
+ return false;
+
+ /* Ensure src rect is above the minimum size */
+ if (drm_rect_width(src) < min_src_size ||
+ drm_rect_height(src) < min_src_size)
+ return false;
+
+ /* Ensure src is fully encapsulated in fb */
+ return drm_rect_intersect(fb_rect, src) &&
+ drm_rect_equals(fb_rect, src);
+}
+
+static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ int ret = 0;
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ const struct dpu_format *fmt;
+ struct drm_rect src, dst, fb_rect = { 0 };
+ uint32_t max_upscale = 1, max_downscale = 1;
+ uint32_t min_src_size, max_linewidth;
+ int hscale = 1, vscale = 1;
+
+ if (!plane || !state) {
+ DPU_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(state);
+
+ if (!pdpu->pipe_sblk) {
+ DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ src.x1 = state->src_x >> 16;
+ src.y1 = state->src_y >> 16;
+ src.x2 = src.x1 + (state->src_w >> 16);
+ src.y2 = src.y1 + (state->src_h >> 16);
+
+ dst = drm_plane_state_dest(state);
+
+ fb_rect.x2 = state->fb->width;
+ fb_rect.y2 = state->fb->height;
+
+ max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
+
+ if (pdpu->features & DPU_SSPP_SCALER) {
+ max_downscale = pdpu->pipe_sblk->maxdwnscale;
+ max_upscale = pdpu->pipe_sblk->maxupscale;
+ }
+ if (drm_rect_width(&src) < drm_rect_width(&dst))
+ hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
+ else
+ hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
+ if (drm_rect_height(&src) < drm_rect_height(&dst))
+ vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
+ else
+ vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
+
+ DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
+ dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
+
+ if (!dpu_plane_enabled(state))
+ goto exit;
+
+ fmt = to_dpu_format(msm_framebuffer_format(state->fb));
+
+ min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+ if (DPU_FORMAT_IS_YUV(fmt) &&
+ (!(pdpu->features & DPU_SSPP_SCALER) ||
+ !(pdpu->features & (BIT(DPU_SSPP_CSC)
+ | BIT(DPU_SSPP_CSC_10BIT))))) {
+ DPU_ERROR_PLANE(pdpu,
+ "plane doesn't have scaler/csc for yuv\n");
+ ret = -EINVAL;
+
+ /* check src bounds */
+ } else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
+ DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src));
+ ret = -E2BIG;
+
+ /* valid yuv image */
+ } else if (DPU_FORMAT_IS_YUV(fmt) &&
+ (src.x1 & 0x1 || src.y1 & 0x1 ||
+ drm_rect_width(&src) & 0x1 ||
+ drm_rect_height(&src) & 0x1)) {
+ DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src));
+ ret = -EINVAL;
+
+ /* min dst support */
+ } else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
+ DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&dst));
+ ret = -EINVAL;
+
+ /* check decimated source width */
+ } else if (drm_rect_width(&src) > max_linewidth) {
+ DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DRM_RECT_ARG(&src), max_linewidth);
+ ret = -E2BIG;
+
+ /* check scaler capability */
+ } else if (hscale < 0 || vscale < 0) {
+ DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
+ DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
+ ret = -E2BIG;
+ }
+
+exit:
+ return ret;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (!state->fb)
+ return 0;
+
+ DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
+
+ return dpu_plane_sspp_atomic_check(plane, state);
+}
+
+void dpu_plane_flush(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !plane->state) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+
+ /*
+ * These updates have to be done immediately before the plane flush
+ * timing, and may not be moved to the atomic_update/mode_set functions.
+ */
+ if (pdpu->is_error)
+ /* force white frame with 100% alpha pipe output on error */
+ _dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF);
+ else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
+ /* force 100% alpha */
+ _dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
+ else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc)
+ pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr);
+
+ /* flag h/w flush complete */
+ if (plane->state)
+ pstate->pending = false;
+}
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane)
+ return;
+
+ pdpu = to_dpu_plane(plane);
+ pdpu->is_error = error;
+}
+
+static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ uint32_t nplanes, src_flags;
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+ struct dpu_plane_state *pstate;
+ struct dpu_plane_state *old_pstate;
+ const struct dpu_format *fmt;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_rect src, dst;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return -EINVAL;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return -EINVAL;
+ } else if (!old_state) {
+ DPU_ERROR("invalid old state\n");
+ return -EINVAL;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ state = plane->state;
+
+ pstate = to_dpu_plane_state(state);
+
+ old_pstate = to_dpu_plane_state(old_state);
+
+ crtc = state->crtc;
+ fb = state->fb;
+ if (!crtc || !fb) {
+ DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
+ crtc != 0, fb != 0);
+ return -EINVAL;
+ }
+ fmt = to_dpu_format(msm_framebuffer_format(fb));
+ nplanes = fmt->num_planes;
+
+ memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
+
+ _dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb);
+
+ pstate->pending = true;
+
+ pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
+ _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+ src.x1 = state->src_x >> 16;
+ src.y1 = state->src_y >> 16;
+ src.x2 = src.x1 + (state->src_w >> 16);
+ src.y2 = src.y1 + (state->src_h >> 16);
+
+ dst = drm_plane_state_dest(state);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT
+ ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src),
+ crtc->base.id, DRM_RECT_ARG(&dst),
+ (char *)&fmt->base.pixel_format,
+ DPU_FORMAT_IS_UBWC(fmt));
+
+ pdpu->pipe_cfg.src_rect = src;
+ pdpu->pipe_cfg.dst_rect = dst;
+
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
+
+ /* override for color fill */
+ if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
+ /* skip remaining processing on color fill */
+ return 0;
+ }
+
+ if (pdpu->pipe_hw->ops.setup_rects) {
+ pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+ &pdpu->pipe_cfg,
+ pstate->multirect_index);
+ }
+
+ if (pdpu->pipe_hw->ops.setup_pe &&
+ (pstate->multirect_index != DPU_SSPP_RECT_1))
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pstate->pixel_ext);
+
+ /**
+ * when programmed in multirect mode, scalar block will be
+ * bypassed. Still we need to update alpha and bitwidth
+ * ONLY for RECT0
+ */
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ &pdpu->pipe_cfg, &pstate->pixel_ext,
+ &pstate->scaler3_cfg);
+
+ if (pdpu->pipe_hw->ops.setup_multirect)
+ pdpu->pipe_hw->ops.setup_multirect(
+ pdpu->pipe_hw,
+ pstate->multirect_index,
+ pstate->multirect_mode);
+
+ if (pdpu->pipe_hw->ops.setup_format) {
+ src_flags = 0x0;
+
+ /* update format */
+ pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_cdp) {
+ struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+ memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+
+ cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg
+ [DPU_PERF_CDP_USAGE_RT].rd_enable;
+ cdp_cfg->ubwc_meta_enable =
+ DPU_FORMAT_IS_UBWC(fmt);
+ cdp_cfg->tile_amortize_enable =
+ DPU_FORMAT_IS_UBWC(fmt) ||
+ DPU_FORMAT_IS_TILE(fmt);
+ cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+
+ pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg);
+ }
+
+ /* update csc */
+ if (DPU_FORMAT_IS_YUV(fmt))
+ _dpu_plane_setup_csc(pdpu);
+ else
+ pdpu->csc_ptr = 0;
+ }
+
+ _dpu_plane_set_qos_lut(plane, fb);
+ _dpu_plane_set_danger_lut(plane, fb);
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ _dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
+ _dpu_plane_set_ot_limit(plane, crtc);
+ }
+
+ _dpu_plane_set_qos_remap(plane);
+ return 0;
+}
+
+static void _dpu_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+ struct dpu_plane_state *pstate;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return;
+ } else if (!old_state) {
+ DPU_ERROR("invalid old state\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ state = plane->state;
+ pstate = to_dpu_plane_state(state);
+
+ trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
+ pstate->multirect_mode);
+
+ pstate->pending = true;
+
+ if (is_dpu_plane_virtual(plane) &&
+ pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
+ pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
+ DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
+}
+
+static void dpu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pdpu->is_error = false;
+ state = plane->state;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (!dpu_plane_sspp_enabled(state)) {
+ _dpu_plane_atomic_disable(plane, old_state);
+ } else {
+ int ret;
+
+ ret = dpu_plane_sspp_atomic_update(plane, old_state);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ }
+}
+
+void dpu_plane_restore(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane || !plane->state) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /* last plane state is same as current state */
+ dpu_plane_atomic_update(plane, plane->state);
+}
+
+static void dpu_plane_destroy(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (pdpu) {
+ _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+ mutex_destroy(&pdpu->lock);
+
+ drm_plane_helper_disable(plane, NULL);
+
+ /* this will destroy the states as well */
+ drm_plane_cleanup(plane);
+
+ if (pdpu->pipe_hw)
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
+
+ kfree(pdpu);
+ }
+}
+
+static void dpu_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !state) {
+ DPU_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ return;
+ }
+
+ pstate = to_dpu_plane_state(state);
+
+ /* remove ref count for frame buffers */
+ if (state->fb)
+ drm_framebuffer_put(state->fb);
+
+ kfree(pstate);
+}
+
+static struct drm_plane_state *
+dpu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ struct dpu_plane_state *old_state;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return NULL;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return NULL;
+ }
+
+ old_state = to_dpu_plane_state(plane->state);
+ pdpu = to_dpu_plane(plane);
+ pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return NULL;
+ }
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ pstate->pending = false;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
+ return &pstate->base;
+}
+
+static void dpu_plane_reset(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /* remove previous state, if present */
+ if (plane->state) {
+ dpu_plane_destroy_state(plane, plane->state);
+ plane->state = 0;
+ }
+
+ pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return;
+ }
+
+ pstate->base.plane = plane;
+
+ plane->state = &pstate->base;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t _dpu_plane_danger_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ struct dpu_mdss_cfg *cfg = kms->catalog;
+ int len = 0;
+ char buf[40] = {'\0'};
+
+ if (!cfg)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, kms->dev) {
+ if (plane->fb && plane->state) {
+ dpu_plane_danger_signal_ctrl(plane, enable);
+ DPU_DEBUG("plane:%d img:%dx%d ",
+ plane->base.id, plane->fb->width,
+ plane->fb->height);
+ DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h);
+ } else {
+ DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+ }
+ }
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ struct dpu_mdss_cfg *cfg = kms->catalog;
+ int disable_panic;
+ char buf[10];
+
+ if (!cfg)
+ return -EFAULT;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &disable_panic))
+ return -EFAULT;
+
+ if (disable_panic) {
+ /* Disable panic signal for all active pipes */
+ DPU_DEBUG("Disabling danger:\n");
+ _dpu_plane_set_danger_state(kms, false);
+ kms->has_danger_ctrl = false;
+ } else {
+ /* Enable panic signal for all active pipes */
+ DPU_DEBUG("Enabling danger:\n");
+ kms->has_danger_ctrl = true;
+ _dpu_plane_set_danger_state(kms, true);
+ }
+
+ return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+ .open = simple_open,
+ .read = _dpu_plane_danger_read,
+ .write = _dpu_plane_danger_write,
+};
+
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_kms *kms;
+ struct msm_drm_private *priv;
+ const struct dpu_sspp_sub_blks *sblk = 0;
+ const struct dpu_sspp_cfg *cfg = 0;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+
+ if (pdpu && pdpu->pipe_hw)
+ cfg = pdpu->pipe_hw->cap;
+ if (cfg)
+ sblk = cfg->sblk;
+
+ if (!sblk)
+ return 0;
+
+ /* create overall sub-directory for the pipe */
+ pdpu->debugfs_root =
+ debugfs_create_dir(pdpu->pipe_name,
+ plane->dev->primary->debugfs_root);
+
+ if (!pdpu->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_x32("features", 0600,
+ pdpu->debugfs_root, &pdpu->features);
+
+ /* add register dump support */
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_src,
+ sblk->src_blk.base + cfg->base,
+ sblk->src_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("src_blk", 0400,
+ pdpu->debugfs_root, &pdpu->debugfs_src);
+
+ if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
+ sblk->scaler_blk.base + cfg->base,
+ sblk->scaler_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("scaler_blk", 0400,
+ pdpu->debugfs_root,
+ &pdpu->debugfs_scaler);
+ debugfs_create_bool("default_scaling",
+ 0600,
+ pdpu->debugfs_root,
+ &pdpu->debugfs_default_scale);
+ }
+
+ if (cfg->features & BIT(DPU_SSPP_CSC) ||
+ cfg->features & BIT(DPU_SSPP_CSC_10BIT)) {
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_csc,
+ sblk->csc_blk.base + cfg->base,
+ sblk->csc_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("csc_blk", 0400,
+ pdpu->debugfs_root, &pdpu->debugfs_csc);
+ }
+
+ debugfs_create_u32("xin_id",
+ 0400,
+ pdpu->debugfs_root,
+ (u32 *) &cfg->xin_id);
+ debugfs_create_u32("clk_ctrl",
+ 0400,
+ pdpu->debugfs_root,
+ (u32 *) &cfg->clk_ctrl);
+ debugfs_create_x32("creq_vblank",
+ 0600,
+ pdpu->debugfs_root,
+ (u32 *) &sblk->creq_vblank);
+ debugfs_create_x32("danger_vblank",
+ 0600,
+ pdpu->debugfs_root,
+ (u32 *) &sblk->danger_vblank);
+
+ debugfs_create_file("disable_danger",
+ 0600,
+ pdpu->debugfs_root,
+ kms, &dpu_plane_danger_enable);
+
+ return 0;
+}
+
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane)
+ return;
+ pdpu = to_dpu_plane(plane);
+
+ debugfs_remove_recursive(pdpu->debugfs_root);
+}
+#else
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+ return 0;
+}
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+}
+#endif
+
+static int dpu_plane_late_register(struct drm_plane *plane)
+{
+ return _dpu_plane_init_debugfs(plane);
+}
+
+static void dpu_plane_early_unregister(struct drm_plane *plane)
+{
+ _dpu_plane_destroy_debugfs(plane);
+}
+
+static const struct drm_plane_funcs dpu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = dpu_plane_destroy,
+ .reset = dpu_plane_reset,
+ .atomic_duplicate_state = dpu_plane_duplicate_state,
+ .atomic_destroy_state = dpu_plane_destroy_state,
+ .late_register = dpu_plane_late_register,
+ .early_unregister = dpu_plane_early_unregister,
+};
+
+static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
+ .prepare_fb = dpu_plane_prepare_fb,
+ .cleanup_fb = dpu_plane_cleanup_fb,
+ .atomic_check = dpu_plane_atomic_check,
+ .atomic_update = dpu_plane_atomic_update,
+};
+
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
+{
+ return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
+}
+
+bool is_dpu_plane_virtual(struct drm_plane *plane)
+{
+ return plane ? to_dpu_plane(plane)->is_virtual : false;
+}
+
+/* initialize plane */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs, u32 master_plane_id)
+{
+ struct drm_plane *plane = NULL, *master_plane = NULL;
+ const struct dpu_format_extended *format_list;
+ struct dpu_plane *pdpu;
+ struct msm_drm_private *priv;
+ struct dpu_kms *kms;
+ enum drm_plane_type type;
+ int zpos_max = DPU_ZPOS_MAX;
+ int ret = -EINVAL;
+
+ if (!dev) {
+ DPU_ERROR("[%u]device is NULL\n", pipe);
+ goto exit;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ DPU_ERROR("[%u]private data is NULL\n", pipe);
+ goto exit;
+ }
+
+ if (!priv->kms) {
+ DPU_ERROR("[%u]invalid KMS reference\n", pipe);
+ goto exit;
+ }
+ kms = to_dpu_kms(priv->kms);
+
+ if (!kms->catalog) {
+ DPU_ERROR("[%u]invalid catalog reference\n", pipe);
+ goto exit;
+ }
+
+ /* create and zero local structure */
+ pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
+ if (!pdpu) {
+ DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* cache local stuff for later */
+ plane = &pdpu->base;
+ pdpu->pipe = pipe;
+ pdpu->is_virtual = (master_plane_id != 0);
+ INIT_LIST_HEAD(&pdpu->mplane_list);
+ master_plane = drm_plane_find(dev, NULL, master_plane_id);
+ if (master_plane) {
+ struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
+
+ list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
+ }
+
+ /* initialize underlying h/w driver */
+ pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
+ master_plane_id != 0);
+ if (IS_ERR(pdpu->pipe_hw)) {
+ DPU_ERROR("[%u]SSPP init failed\n", pipe);
+ ret = PTR_ERR(pdpu->pipe_hw);
+ goto clean_plane;
+ } else if (!pdpu->pipe_hw->cap || !pdpu->pipe_hw->cap->sblk) {
+ DPU_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
+ goto clean_sspp;
+ }
+
+ /* cache features mask for later */
+ pdpu->features = pdpu->pipe_hw->cap->features;
+ pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk;
+ if (!pdpu->pipe_sblk) {
+ DPU_ERROR("[%u]invalid sblk\n", pipe);
+ goto clean_sspp;
+ }
+
+ if (!master_plane_id)
+ format_list = pdpu->pipe_sblk->format_list;
+ else
+ format_list = pdpu->pipe_sblk->virt_format_list;
+
+ pdpu->nformats = dpu_populate_formats(format_list,
+ pdpu->formats,
+ 0,
+ ARRAY_SIZE(pdpu->formats));
+
+ if (!pdpu->nformats) {
+ DPU_ERROR("[%u]no valid formats for plane\n", pipe);
+ goto clean_sspp;
+ }
+
+ if (pdpu->features & BIT(DPU_SSPP_CURSOR))
+ type = DRM_PLANE_TYPE_CURSOR;
+ else if (primary_plane)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+ pdpu->formats, pdpu->nformats,
+ NULL, type, NULL);
+ if (ret)
+ goto clean_sspp;
+
+ pdpu->catalog = kms->catalog;
+
+ if (kms->catalog->mixer_count &&
+ kms->catalog->mixer[0].sblk->maxblendstages) {
+ zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
+ if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
+ zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
+ }
+
+ ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
+ if (ret)
+ DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+
+ /* success! finalize initialization */
+ drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+
+ /* save user friendly pipe name for later */
+ snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id);
+
+ mutex_init(&pdpu->lock);
+
+ DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name,
+ pipe, plane->base.id, master_plane_id);
+ return plane;
+
+clean_sspp:
+ if (pdpu && pdpu->pipe_hw)
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
+clean_plane:
+ kfree(pdpu);
+exit:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
new file mode 100644
index 000000000000..f6fe6ddc7a3a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_PLANE_H_
+#define _DPU_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * struct dpu_plane_state: Define dpu extension of drm plane state object
+ * @base: base drm plane state object
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: cached plane property values
+ * @aspace: pointer to address space for input/output buffers
+ * @input_fence: dereferenced input fence pointer
+ * @stage: assigned by crtc blender
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
+ * @pending: whether the current update is still pending
+ * @scaler3_cfg: configuration data for scaler3
+ * @pixel_ext: configuration data for pixel extensions
+ * @scaler_check_state: indicates status of user provided pixel extension data
+ * @cdp_cfg: CDP configuration
+ */
+struct dpu_plane_state {
+ struct drm_plane_state base;
+ struct msm_gem_address_space *aspace;
+ void *input_fence;
+ enum dpu_stage stage;
+ uint32_t multirect_index;
+ uint32_t multirect_mode;
+ bool pending;
+
+ /* scaler configuration */
+ struct dpu_hw_scaler3_cfg scaler3_cfg;
+ struct dpu_hw_pixel_ext pixel_ext;
+
+ struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+};
+
+/**
+ * struct dpu_multirect_plane_states: Defines multirect pair of drm plane states
+ * @r0: drm plane configured on rect 0
+ * @r1: drm plane configured on rect 1
+ */
+struct dpu_multirect_plane_states {
+ const struct drm_plane_state *r0;
+ const struct drm_plane_state *r1;
+};
+
+#define to_dpu_plane_state(x) \
+ container_of(x, struct dpu_plane_state, base)
+
+/**
+ * dpu_plane_pipe - return sspp identifier for the given plane
+ * @plane: Pointer to DRM plane object
+ * Returns: sspp identifier of the given plane
+ */
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
+
+/**
+ * is_dpu_plane_virtual - check for virtual plane
+ * @plane: Pointer to DRM plane object
+ * returns: true - if the plane is virtual
+ * false - if the plane is primary
+ */
+bool is_dpu_plane_virtual(struct drm_plane *plane);
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush mask
+ * @plane: Pointer to DRM plane object
+ * @ctl: Pointer to control hardware
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+ u32 *flush_sspp);
+
+/**
+ * dpu_plane_restore - restore hw state if previously power collapsed
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_restore(struct drm_plane *plane);
+
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_flush(struct drm_plane *plane);
+
+/**
+ * dpu_plane_kickoff - final plane operations before commit kickoff
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_kickoff(struct drm_plane *plane);
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error);
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: dpu hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
+ * a regular plane initialization. A non-zero primary plane
+ * id will be passed for a virtual pipe initialization.
+ *
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs, u32 master_plane_id);
+
+/**
+ * dpu_plane_validate_multirecti_v2 - validate the multirect planes
+ * against hw limitations
+ * @plane: drm plate states of the multirect pair
+ */
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
+
+/**
+ * dpu_plane_clear_multirect - clear multirect bits for the given pipe
+ * @drm_state: Pointer to DRM plane state
+ */
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
+
+/**
+ * dpu_plane_wait_input_fence - wait for input fence object
+ * @plane: Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * dpu_plane_color_fill - enables color fill on plane
+ * @plane: Pointer to DRM plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int dpu_plane_color_fill(struct drm_plane *plane,
+ uint32_t color, uint32_t alpha);
+
+/**
+ * dpu_plane_set_revalidate - sets revalidate flag which forces a full
+ * validation of the plane properties in the next atomic check
+ * @plane: Pointer to DRM plane object
+ * @enable: Boolean to set/unset the flag
+ */
+void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
+
+#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
new file mode 100644
index 000000000000..a75eebca2f37
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+
+#include "dpu_power_handle.h"
+#include "dpu_trace.h"
+
+static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
+ [DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
+ [DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
+ [DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
+};
+
+const char *dpu_power_handle_get_dbus_name(u32 bus_id)
+{
+ if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
+ return data_bus_name[bus_id];
+
+ return NULL;
+}
+
+static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
+ u32 event_type)
+{
+ struct dpu_power_event *event;
+
+ list_for_each_entry(event, &phandle->event_list, list) {
+ if (event->event_type & event_type)
+ event->cb_fnc(event_type, event->usr);
+ }
+}
+
+struct dpu_power_client *dpu_power_client_create(
+ struct dpu_power_handle *phandle, char *client_name)
+{
+ struct dpu_power_client *client;
+ static u32 id;
+
+ if (!client_name || !phandle) {
+ pr_err("client name is null or invalid power data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&phandle->phandle_lock);
+ strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+ client->usecase_ndx = VOTE_INDEX_DISABLE;
+ client->id = id;
+ client->active = true;
+ pr_debug("client %s created:%pK id :%d\n", client_name,
+ client, id);
+ id++;
+ list_add(&client->list, &phandle->power_client_clist);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return client;
+}
+
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+ struct dpu_power_client *client)
+{
+ if (!client || !phandle) {
+ pr_err("reg bus vote: invalid client handle\n");
+ } else if (!client->active) {
+ pr_err("dpu power deinit already done\n");
+ kfree(client);
+ } else {
+ pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+ client->name, client, client->id);
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(client);
+ }
+}
+
+void dpu_power_resource_init(struct platform_device *pdev,
+ struct dpu_power_handle *phandle)
+{
+ phandle->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&phandle->power_client_clist);
+ INIT_LIST_HEAD(&phandle->event_list);
+
+ mutex_init(&phandle->phandle_lock);
+}
+
+void dpu_power_resource_deinit(struct platform_device *pdev,
+ struct dpu_power_handle *phandle)
+{
+ struct dpu_power_client *curr_client, *next_client;
+ struct dpu_power_event *curr_event, *next_event;
+
+ if (!phandle || !pdev) {
+ pr_err("invalid input param\n");
+ return;
+ }
+
+ mutex_lock(&phandle->phandle_lock);
+ list_for_each_entry_safe(curr_client, next_client,
+ &phandle->power_client_clist, list) {
+ pr_err("client:%s-%d still registered with refcount:%d\n",
+ curr_client->name, curr_client->id,
+ curr_client->refcount);
+ curr_client->active = false;
+ list_del(&curr_client->list);
+ }
+
+ list_for_each_entry_safe(curr_event, next_event,
+ &phandle->event_list, list) {
+ pr_err("event:%d, client:%s still registered\n",
+ curr_event->event_type,
+ curr_event->client_name);
+ curr_event->active = false;
+ list_del(&curr_event->list);
+ }
+ mutex_unlock(&phandle->phandle_lock);
+}
+
+int dpu_power_resource_enable(struct dpu_power_handle *phandle,
+ struct dpu_power_client *pclient, bool enable)
+{
+ bool changed = false;
+ u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
+ struct dpu_power_client *client;
+
+ if (!phandle || !pclient) {
+ pr_err("invalid input argument\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phandle->phandle_lock);
+ if (enable)
+ pclient->refcount++;
+ else if (pclient->refcount)
+ pclient->refcount--;
+
+ if (pclient->refcount)
+ pclient->usecase_ndx = VOTE_INDEX_LOW;
+ else
+ pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+ list_for_each_entry(client, &phandle->power_client_clist, list) {
+ if (client->usecase_ndx < VOTE_INDEX_MAX &&
+ client->usecase_ndx > max_usecase_ndx)
+ max_usecase_ndx = client->usecase_ndx;
+ }
+
+ if (phandle->current_usecase_ndx != max_usecase_ndx) {
+ changed = true;
+ prev_usecase_ndx = phandle->current_usecase_ndx;
+ phandle->current_usecase_ndx = max_usecase_ndx;
+ }
+
+ pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
+ __builtin_return_address(0), changed, max_usecase_ndx,
+ pclient->name, pclient->id, enable, pclient->refcount);
+
+ if (!changed)
+ goto end;
+
+ if (enable) {
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_PRE_ENABLE);
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_POST_ENABLE);
+
+ } else {
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_PRE_DISABLE);
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_POST_DISABLE);
+ }
+
+end:
+ mutex_unlock(&phandle->phandle_lock);
+ return 0;
+}
+
+struct dpu_power_event *dpu_power_handle_register_event(
+ struct dpu_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name)
+{
+ struct dpu_power_event *event;
+
+ if (!phandle) {
+ pr_err("invalid power handle\n");
+ return ERR_PTR(-EINVAL);
+ } else if (!cb_fnc || !event_type) {
+ pr_err("no callback fnc or event type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
+ if (!event)
+ return ERR_PTR(-ENOMEM);
+
+ event->event_type = event_type;
+ event->cb_fnc = cb_fnc;
+ event->usr = usr;
+ strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
+ event->active = true;
+
+ mutex_lock(&phandle->phandle_lock);
+ list_add(&event->list, &phandle->event_list);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return event;
+}
+
+void dpu_power_handle_unregister_event(
+ struct dpu_power_handle *phandle,
+ struct dpu_power_event *event)
+{
+ if (!phandle || !event) {
+ pr_err("invalid phandle or event\n");
+ } else if (!event->active) {
+ pr_err("power handle deinit already done\n");
+ kfree(event);
+ } else {
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&event->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(event);
+ }
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
new file mode 100644
index 000000000000..344f74464eca
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
@@ -0,0 +1,225 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DPU_POWER_HANDLE_H_
+#define _DPU_POWER_HANDLE_H_
+
+#define MAX_CLIENT_NAME_LEN 128
+
+#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
+#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
+#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
+#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
+
+#include "dpu_io_util.h"
+
+/* event will be triggered before power handler disable */
+#define DPU_POWER_EVENT_PRE_DISABLE 0x1
+
+/* event will be triggered after power handler disable */
+#define DPU_POWER_EVENT_POST_DISABLE 0x2
+
+/* event will be triggered before power handler enable */
+#define DPU_POWER_EVENT_PRE_ENABLE 0x4
+
+/* event will be triggered after power handler enable */
+#define DPU_POWER_EVENT_POST_ENABLE 0x8
+
+/**
+ * mdss_bus_vote_type: register bus vote type
+ * VOTE_INDEX_DISABLE: removes the client vote
+ * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MAX: invalid
+ */
+enum mdss_bus_vote_type {
+ VOTE_INDEX_DISABLE,
+ VOTE_INDEX_LOW,
+ VOTE_INDEX_MAX,
+};
+
+/**
+ * enum dpu_power_handle_data_bus_client - type of axi bus clients
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
+ */
+enum dpu_power_handle_data_bus_client {
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
+};
+
+/**
+ * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
+ * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
+ */
+enum DPU_POWER_HANDLE_DBUS_ID {
+ DPU_POWER_HANDLE_DBUS_ID_MNOC,
+ DPU_POWER_HANDLE_DBUS_ID_LLCC,
+ DPU_POWER_HANDLE_DBUS_ID_EBI,
+ DPU_POWER_HANDLE_DBUS_ID_MAX,
+};
+
+/**
+ * struct dpu_power_client: stores the power client for dpu driver
+ * @name: name of the client
+ * @usecase_ndx: current regs bus vote type
+ * @refcount: current refcount if multiple modules are using same
+ * same client for enable/disable. Power module will
+ * aggregate the refcount and vote accordingly for this
+ * client.
+ * @id: assigned during create. helps for debugging.
+ * @list: list to attach power handle master list
+ * @ab: arbitrated bandwidth for each bus client
+ * @ib: instantaneous bandwidth for each bus client
+ * @active: inidcates the state of dpu power handle
+ */
+struct dpu_power_client {
+ char name[MAX_CLIENT_NAME_LEN];
+ short usecase_ndx;
+ short refcount;
+ u32 id;
+ struct list_head list;
+ u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ bool active;
+};
+
+/*
+ * struct dpu_power_event - local event registration structure
+ * @client_name: name of the client registering
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback event trigger
+ * @event: refer to DPU_POWER_HANDLE_EVENT_*
+ * @list: list to attach event master list
+ * @active: indicates the state of dpu power handle
+ */
+struct dpu_power_event {
+ char client_name[MAX_CLIENT_NAME_LEN];
+ void (*cb_fnc)(u32 event_type, void *usr);
+ void *usr;
+ u32 event_type;
+ struct list_head list;
+ bool active;
+};
+
+/**
+ * struct dpu_power_handle: power handle main struct
+ * @client_clist: master list to store all clients
+ * @phandle_lock: lock to synchronize the enable/disable
+ * @dev: pointer to device structure
+ * @usecase_ndx: current usecase index
+ * @event_list: current power handle event list
+ */
+struct dpu_power_handle {
+ struct list_head power_client_clist;
+ struct mutex phandle_lock;
+ struct device *dev;
+ u32 current_usecase_ndx;
+ struct list_head event_list;
+};
+
+/**
+ * dpu_power_resource_init() - initializes the dpu power handle
+ * @pdev: platform device to search the power resources
+ * @pdata: power handle to store the power resources
+ */
+void dpu_power_resource_init(struct platform_device *pdev,
+ struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_resource_deinit() - release the dpu power handle
+ * @pdev: platform device for power resources
+ * @pdata: power handle containing the resources
+ *
+ * Return: error code.
+ */
+void dpu_power_resource_deinit(struct platform_device *pdev,
+ struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_client_create() - create the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: error code.
+ */
+struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
+ char *client_name);
+
+/**
+ * dpu_power_client_destroy() - destroy the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: none
+ */
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+ struct dpu_power_client *client);
+
+/**
+ * dpu_power_resource_enable() - enable/disable the power resources
+ * @pdata: power handle containing the resources
+ * @client: client information to enable/disable its vote
+ * @enable: boolean request for enable/disable
+ *
+ * Return: error code.
+ */
+int dpu_power_resource_enable(struct dpu_power_handle *pdata,
+ struct dpu_power_client *pclient, bool enable);
+
+/**
+ * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
+ * @phandle: power handle containing the resources
+ * @client: client information to bandwidth control
+ * @enable: true to enable bandwidth for data base
+ *
+ * Return: none
+ */
+void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
+ struct dpu_power_client *pclient, int enable);
+
+/**
+ * dpu_power_handle_register_event - register a callback function for an event.
+ * Clients can register for multiple events with a single register.
+ * Any block with access to phandle can register for the event
+ * notification.
+ * @phandle: power handle containing the resources
+ * @event_type: event type to register; refer DPU_POWER_HANDLE_EVENT_*
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback on event trigger
+ *
+ * Return: event pointer if success, or error code otherwise
+ */
+struct dpu_power_event *dpu_power_handle_register_event(
+ struct dpu_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name);
+/**
+ * dpu_power_handle_unregister_event - unregister callback for event(s)
+ * @phandle: power handle containing the resources
+ * @event: event pointer returned after power handle register
+ */
+void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
+ struct dpu_power_event *event);
+
+/**
+ * dpu_power_handle_get_dbus_name - get name of given data bus identifier
+ * @bus_id: data bus identifier
+ * Return: Pointer to name string if success; NULL otherwise
+ */
+const char *dpu_power_handle_get_dbus_name(u32 bus_id);
+
+#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
new file mode 100644
index 000000000000..13c0a36d4ef9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_intf.h"
+#include "dpu_encoder.h"
+#include "dpu_trace.h"
+
+#define RESERVED_BY_OTHER(h, r) \
+ ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+
+#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
+#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
+#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
+#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
+ (t).num_comp_enc == (r).num_enc && \
+ (t).num_intf == (r).num_intf)
+
+struct dpu_rm_topology_def {
+ enum dpu_rm_topology_name top_name;
+ int num_lm;
+ int num_comp_enc;
+ int num_intf;
+ int num_ctl;
+ int needs_split_display;
+};
+
+static const struct dpu_rm_topology_def g_top_table[] = {
+ { DPU_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
+ { DPU_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
+ { DPU_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true },
+ { DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
+};
+
+/**
+ * struct dpu_rm_requirements - Reservation requirements parameter bundle
+ * @top_ctrl: topology control preference from kernel client
+ * @top: selected topology for the display
+ * @hw_res: Hardware resources required as reported by the encoders
+ */
+struct dpu_rm_requirements {
+ uint64_t top_ctrl;
+ const struct dpu_rm_topology_def *topology;
+ struct dpu_encoder_hw_resources hw_res;
+};
+
+/**
+ * struct dpu_rm_rsvp - Use Case Reservation tagging structure
+ * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
+ * By using as a tag, rather than lists of pointers to HW blocks used
+ * we can avoid some list management since we don't know how many blocks
+ * of each type a given use case may require.
+ * @list: List head for list of all reservations
+ * @seq: Global RSVP sequence number for debugging, especially for
+ * differentiating differenct allocations for same encoder.
+ * @enc_id: Reservations are tracked by Encoder DRM object ID.
+ * CRTCs may be connected to multiple Encoders.
+ * An encoder or connector id identifies the display path.
+ * @topology DRM<->HW topology use case
+ */
+struct dpu_rm_rsvp {
+ struct list_head list;
+ uint32_t seq;
+ uint32_t enc_id;
+ enum dpu_rm_topology_name topology;
+};
+
+/**
+ * struct dpu_rm_hw_blk - hardware block tracking list member
+ * @list: List head for list of all hardware blocks tracking items
+ * @rsvp: Pointer to use case reservation if reserved by a client
+ * @rsvp_nxt: Temporary pointer used during reservation to the incoming
+ * request. Will be swapped into rsvp if proposal is accepted
+ * @type: Type of hardware block this structure tracks
+ * @id: Hardware ID number, within it's own space, ie. LM_X
+ * @catalog: Pointer to the hardware catalog entry for this block
+ * @hw: Pointer to the hardware register access object for this block
+ */
+struct dpu_rm_hw_blk {
+ struct list_head list;
+ struct dpu_rm_rsvp *rsvp;
+ struct dpu_rm_rsvp *rsvp_nxt;
+ enum dpu_hw_blk_type type;
+ uint32_t id;
+ struct dpu_hw_blk *hw;
+};
+
+/**
+ * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum dpu_rm_dbg_rsvp_stage {
+ DPU_RM_STAGE_BEGIN,
+ DPU_RM_STAGE_AFTER_CLEAR,
+ DPU_RM_STAGE_AFTER_RSVPNEXT,
+ DPU_RM_STAGE_FINAL
+};
+
+static void _dpu_rm_print_rsvps(
+ struct dpu_rm *rm,
+ enum dpu_rm_dbg_rsvp_stage stage)
+{
+ struct dpu_rm_rsvp *rsvp;
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+
+ DPU_DEBUG("%d\n", stage);
+
+ list_for_each_entry(rsvp, &rm->rsvps, list) {
+ DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
+ rsvp->enc_id, rsvp->topology);
+ }
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (!blk->rsvp && !blk->rsvp_nxt)
+ continue;
+
+ DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
+ (blk->rsvp) ? blk->rsvp->seq : 0,
+ (blk->rsvp) ? blk->rsvp->enc_id : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+ blk->type, blk->id);
+ }
+ }
+}
+
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
+{
+ return rm->hw_mdp;
+}
+
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology)
+{
+ int i;
+
+ for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
+ if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
+ return g_top_table[i].top_name;
+
+ return DPU_RM_TOPOLOGY_NONE;
+}
+
+void dpu_rm_init_hw_iter(
+ struct dpu_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum dpu_hw_blk_type type)
+{
+ memset(iter, 0, sizeof(*iter));
+ iter->enc_id = enc_id;
+ iter->type = type;
+}
+
+static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+ struct list_head *blk_list;
+
+ if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
+ DPU_ERROR("invalid rm\n");
+ return false;
+ }
+
+ i->hw = NULL;
+ blk_list = &rm->hw_blks[i->type];
+
+ if (i->blk && (&i->blk->list == blk_list)) {
+ DPU_DEBUG("attempt resume iteration past last\n");
+ return false;
+ }
+
+ i->blk = list_prepare_entry(i->blk, blk_list, list);
+
+ list_for_each_entry_continue(i->blk, blk_list, list) {
+ struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
+
+ if (i->blk->type != i->type) {
+ DPU_ERROR("found incorrect block type %d on %d list\n",
+ i->blk->type, i->type);
+ return false;
+ }
+
+ if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+ i->hw = i->blk->hw;
+ DPU_DEBUG("found type %d id %d for enc %d\n",
+ i->type, i->blk->id, i->enc_id);
+ return true;
+ }
+ }
+
+ DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
+
+ return false;
+}
+
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+ bool ret;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _dpu_rm_get_hw_locked(rm, i);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
+static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
+{
+ switch (type) {
+ case DPU_HW_BLK_LM:
+ dpu_hw_lm_destroy(hw);
+ break;
+ case DPU_HW_BLK_CTL:
+ dpu_hw_ctl_destroy(hw);
+ break;
+ case DPU_HW_BLK_CDM:
+ dpu_hw_cdm_destroy(hw);
+ break;
+ case DPU_HW_BLK_PINGPONG:
+ dpu_hw_pingpong_destroy(hw);
+ break;
+ case DPU_HW_BLK_INTF:
+ dpu_hw_intf_destroy(hw);
+ break;
+ case DPU_HW_BLK_SSPP:
+ /* SSPPs are not managed by the resource manager */
+ case DPU_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case DPU_HW_BLK_MAX:
+ default:
+ DPU_ERROR("unsupported block type %d\n", type);
+ break;
+ }
+}
+
+int dpu_rm_destroy(struct dpu_rm *rm)
+{
+
+ struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
+ enum dpu_hw_blk_type type;
+
+ if (!rm) {
+ DPU_ERROR("invalid rm\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
+ list_del(&rsvp_cur->list);
+ kfree(rsvp_cur);
+ }
+
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
+ list) {
+ list_del(&hw_cur->list);
+ _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+ kfree(hw_cur);
+ }
+ }
+
+ dpu_hw_mdp_destroy(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+
+ mutex_destroy(&rm->rm_lock);
+
+ return 0;
+}
+
+static int _dpu_rm_hw_blk_create(
+ struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ enum dpu_hw_blk_type type,
+ uint32_t id,
+ void *hw_catalog_info)
+{
+ struct dpu_rm_hw_blk *blk;
+ struct dpu_hw_mdp *hw_mdp;
+ void *hw;
+
+ hw_mdp = rm->hw_mdp;
+
+ switch (type) {
+ case DPU_HW_BLK_LM:
+ hw = dpu_hw_lm_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_CTL:
+ hw = dpu_hw_ctl_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_CDM:
+ hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
+ break;
+ case DPU_HW_BLK_PINGPONG:
+ hw = dpu_hw_pingpong_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_INTF:
+ hw = dpu_hw_intf_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_SSPP:
+ /* SSPPs are not managed by the resource manager */
+ case DPU_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case DPU_HW_BLK_MAX:
+ default:
+ DPU_ERROR("unsupported block type %d\n", type);
+ return -EINVAL;
+ }
+
+ if (IS_ERR_OR_NULL(hw)) {
+ DPU_ERROR("failed hw object creation: type %d, err %ld\n",
+ type, PTR_ERR(hw));
+ return -EFAULT;
+ }
+
+ blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+ if (!blk) {
+ _dpu_rm_hw_destroy(type, hw);
+ return -ENOMEM;
+ }
+
+ blk->type = type;
+ blk->id = id;
+ blk->hw = hw;
+ list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+ return 0;
+}
+
+int dpu_rm_init(struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ struct drm_device *dev)
+{
+ int rc, i;
+ enum dpu_hw_blk_type type;
+
+ if (!rm || !cat || !mmio || !dev) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ /* Clear, setup lists */
+ memset(rm, 0, sizeof(*rm));
+
+ mutex_init(&rm->rm_lock);
+
+ INIT_LIST_HEAD(&rm->rsvps);
+ for (type = 0; type < DPU_HW_BLK_MAX; type++)
+ INIT_LIST_HEAD(&rm->hw_blks[type]);
+
+ rm->dev = dev;
+
+ /* Some of the sub-blocks require an mdptop to be created */
+ rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
+ if (IS_ERR_OR_NULL(rm->hw_mdp)) {
+ rc = PTR_ERR(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+ DPU_ERROR("failed: mdp hw not available\n");
+ goto fail;
+ }
+
+ /* Interrogate HW catalog and create tracking items for hw blocks */
+ for (i = 0; i < cat->mixer_count; i++) {
+ struct dpu_lm_cfg *lm = &cat->mixer[i];
+
+ if (lm->pingpong == PINGPONG_MAX) {
+ DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
+ continue;
+ }
+
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
+ cat->mixer[i].id, &cat->mixer[i]);
+ if (rc) {
+ DPU_ERROR("failed: lm hw not available\n");
+ goto fail;
+ }
+
+ if (!rm->lm_max_width) {
+ rm->lm_max_width = lm->sblk->maxwidth;
+ } else if (rm->lm_max_width != lm->sblk->maxwidth) {
+ /*
+ * Don't expect to have hw where lm max widths differ.
+ * If found, take the min.
+ */
+ DPU_ERROR("unsupported: lm maxwidth differs\n");
+ if (rm->lm_max_width > lm->sblk->maxwidth)
+ rm->lm_max_width = lm->sblk->maxwidth;
+ }
+ }
+
+ for (i = 0; i < cat->pingpong_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
+ cat->pingpong[i].id, &cat->pingpong[i]);
+ if (rc) {
+ DPU_ERROR("failed: pp hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->intf_count; i++) {
+ if (cat->intf[i].type == INTF_NONE) {
+ DPU_DEBUG("skip intf %d with type none\n", i);
+ continue;
+ }
+
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
+ cat->intf[i].id, &cat->intf[i]);
+ if (rc) {
+ DPU_ERROR("failed: intf hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->ctl_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
+ cat->ctl[i].id, &cat->ctl[i]);
+ if (rc) {
+ DPU_ERROR("failed: ctl hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->cdm_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
+ cat->cdm[i].id, &cat->cdm[i]);
+ if (rc) {
+ DPU_ERROR("failed: cdm hw not available\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ dpu_rm_destroy(rm);
+
+ return rc;
+}
+
+/**
+ * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ * proposed use case requirements, incl. hardwired dependent blocks like
+ * pingpong
+ * @rm: dpu resource manager handle
+ * @rsvp: reservation currently being created
+ * @reqs: proposed use case requirements
+ * @lm: proposed layer mixer, function checks if lm, and all other hardwired
+ * blocks connected to the lm (pp) is available and appropriate
+ * @pp: output parameter, pingpong block attached to the layer mixer.
+ * NULL if pp was not available, or not matching requirements.
+ * @primary_lm: if non-null, this function check if lm is compatible primary_lm
+ * as well as satisfying all other requirements
+ * @Return: true if lm matches all requirements, false otherwise
+ */
+static bool _dpu_rm_check_lm_and_get_connected_blks(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs,
+ struct dpu_rm_hw_blk *lm,
+ struct dpu_rm_hw_blk **pp,
+ struct dpu_rm_hw_blk *primary_lm)
+{
+ const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
+ struct dpu_rm_hw_iter iter;
+
+ *pp = NULL;
+
+ DPU_DEBUG("check lm %d pp %d\n",
+ lm_cfg->id, lm_cfg->pingpong);
+
+ /* Check if this layer mixer is a peer of the proposed primary LM */
+ if (primary_lm) {
+ const struct dpu_lm_cfg *prim_lm_cfg =
+ to_dpu_hw_mixer(primary_lm->hw)->cap;
+
+ if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
+ prim_lm_cfg->id);
+ return false;
+ }
+ }
+
+ /* Already reserved? */
+ if (RESERVED_BY_OTHER(lm, rsvp)) {
+ DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+ return false;
+ }
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id == lm_cfg->pingpong) {
+ *pp = iter.blk;
+ break;
+ }
+ }
+
+ if (!*pp) {
+ DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+ return false;
+ }
+
+ if (RESERVED_BY_OTHER(*pp, rsvp)) {
+ DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
+ (*pp)->id);
+ return false;
+ }
+
+ return true;
+}
+
+static int _dpu_rm_reserve_lms(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs)
+
+{
+ struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
+ struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
+ struct dpu_rm_hw_iter iter_i, iter_j;
+ int lm_count = 0;
+ int i, rc = 0;
+
+ if (!reqs->topology->num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
+ return -EINVAL;
+ }
+
+ /* Find a primary mixer */
+ dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
+ while (lm_count != reqs->topology->num_lm &&
+ _dpu_rm_get_hw_locked(rm, &iter_i)) {
+ memset(&lm, 0, sizeof(lm));
+ memset(&pp, 0, sizeof(pp));
+
+ lm_count = 0;
+ lm[lm_count] = iter_i.blk;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(
+ rm, rsvp, reqs, lm[lm_count],
+ &pp[lm_count], NULL))
+ continue;
+
+ ++lm_count;
+
+ /* Valid primary mixer found, find matching peers */
+ dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
+
+ while (lm_count != reqs->topology->num_lm &&
+ _dpu_rm_get_hw_locked(rm, &iter_j)) {
+ if (iter_i.blk == iter_j.blk)
+ continue;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(
+ rm, rsvp, reqs, iter_j.blk,
+ &pp[lm_count], iter_i.blk))
+ continue;
+
+ lm[lm_count] = iter_j.blk;
+ ++lm_count;
+ }
+ }
+
+ if (lm_count != reqs->topology->num_lm) {
+ DPU_DEBUG("unable to find appropriate mixers\n");
+ return -ENAVAIL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lm); i++) {
+ if (!lm[i])
+ break;
+
+ lm[i]->rsvp_nxt = rsvp;
+ pp[i]->rsvp_nxt = rsvp;
+
+ trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
+ pp[i]->id);
+ }
+
+ return rc;
+}
+
+static int _dpu_rm_reserve_ctls(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ const struct dpu_rm_topology_def *top)
+{
+ struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
+ struct dpu_rm_hw_iter iter;
+ int i = 0;
+
+ memset(&ctls, 0, sizeof(ctls));
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
+ unsigned long features = ctl->caps->features;
+ bool has_split_display;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
+
+ DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+
+ if (top->needs_split_display != has_split_display)
+ continue;
+
+ ctls[i] = iter.blk;
+ DPU_DEBUG("ctl %d match\n", iter.blk->id);
+
+ if (++i == top->num_ctl)
+ break;
+ }
+
+ if (i != top->num_ctl)
+ return -ENAVAIL;
+
+ for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
+ ctls[i]->rsvp_nxt = rsvp;
+ trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
+ rsvp->enc_id);
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_cdm(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ uint32_t id,
+ enum dpu_hw_blk_type type)
+{
+ struct dpu_rm_hw_iter iter;
+
+ DRM_DEBUG_KMS("type %d id %d\n", type, id);
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
+ const struct dpu_cdm_cfg *caps = cdm->caps;
+ bool match = false;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
+ match = test_bit(id, &caps->intf_connect);
+
+ DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
+ iter.blk->type, iter.blk->id, rsvp->enc_id,
+ caps->intf_connect, match);
+
+ if (!match)
+ continue;
+
+ trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
+ rsvp->enc_id);
+ iter.blk->rsvp_nxt = rsvp;
+ break;
+ }
+
+ if (!iter.hw) {
+ DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
+ return -ENAVAIL;
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_intf(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ uint32_t id,
+ enum dpu_hw_blk_type type,
+ bool needs_cdm)
+{
+ struct dpu_rm_hw_iter iter;
+ int ret = 0;
+
+ /* Find the block entry in the rm, and note the reservation */
+ dpu_rm_init_hw_iter(&iter, 0, type);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id != id)
+ continue;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+ DPU_ERROR("type %d id %d already reserved\n", type, id);
+ return -ENAVAIL;
+ }
+
+ iter.blk->rsvp_nxt = rsvp;
+ trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
+ rsvp->enc_id);
+ break;
+ }
+
+ /* Shouldn't happen since intfs are fixed at probe */
+ if (!iter.hw) {
+ DPU_ERROR("couldn't find type %d id %d\n", type, id);
+ return -EINVAL;
+ }
+
+ if (needs_cdm)
+ ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
+
+ return ret;
+}
+
+static int _dpu_rm_reserve_intf_related_hw(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_encoder_hw_resources *hw_res)
+{
+ int i, ret = 0;
+ u32 id;
+
+ for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
+ if (hw_res->intfs[i] == INTF_MODE_NONE)
+ continue;
+ id = i + INTF_0;
+ ret = _dpu_rm_reserve_intf(rm, rsvp, id,
+ DPU_HW_BLK_INTF, hw_res->needs_cdm);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int _dpu_rm_make_next_rsvp(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs)
+{
+ int ret;
+ struct dpu_rm_topology_def topology;
+
+ /* Create reservation info, tag reserved blocks with it as we go */
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ rsvp->topology = reqs->topology->top_name;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
+ if (ret) {
+ DPU_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ /*
+ * Do assignment preferring to give away low-resource CTLs first:
+ * - Check mixers without Split Display
+ * - Only then allow to grab from CTLs with split display capability
+ */
+ _dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
+ if (ret && !reqs->topology->needs_split_display) {
+ memcpy(&topology, reqs->topology, sizeof(topology));
+ topology.needs_split_display = true;
+ _dpu_rm_reserve_ctls(rm, rsvp, &topology);
+ }
+ if (ret) {
+ DPU_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ /* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
+ ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int _dpu_rm_populate_requirements(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct dpu_rm_requirements *reqs,
+ struct msm_display_topology req_topology)
+{
+ int i;
+
+ memset(reqs, 0, sizeof(*reqs));
+
+ dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
+
+ for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
+ if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
+ req_topology)) {
+ reqs->topology = &g_top_table[i];
+ break;
+ }
+ }
+
+ if (!reqs->topology) {
+ DPU_ERROR("invalid topology for the display\n");
+ return -EINVAL;
+ }
+
+ /**
+ * Set the requirement based on caps if not set from user space
+ * This will ensure to select LM tied with DS blocks
+ * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
+ */
+ if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
+ conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
+ reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
+
+ DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
+ reqs->hw_res.display_num_of_h_tiles);
+ DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
+ reqs->topology->num_lm, reqs->topology->num_ctl,
+ reqs->topology->top_name,
+ reqs->topology->needs_split_display);
+
+ return 0;
+}
+
+static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc)
+{
+ struct dpu_rm_rsvp *i;
+
+ if (!rm || !enc) {
+ DPU_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ if (list_empty(&rm->rsvps))
+ return NULL;
+
+ list_for_each_entry(i, &rm->rsvps, list)
+ if (i->enc_id == enc->base.id)
+ return i;
+
+ return NULL;
+}
+
+static struct drm_connector *_dpu_rm_get_connector(
+ struct drm_encoder *enc)
+{
+ struct drm_connector *conn = NULL;
+ struct list_head *connector_list =
+ &enc->dev->mode_config.connector_list;
+
+ list_for_each_entry(conn, connector_list, head)
+ if (conn->encoder == enc)
+ return conn;
+
+ return NULL;
+}
+
+/**
+ * _dpu_rm_release_rsvp - release resources and release a reservation
+ * @rm: KMS handle
+ * @rsvp: RSVP pointer to release and release resources for
+ */
+static void _dpu_rm_release_rsvp(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct drm_connector *conn)
+{
+ struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+
+ if (!rsvp)
+ return;
+
+ DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
+
+ list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
+ if (rsvp == rsvp_c) {
+ list_del(&rsvp_c->list);
+ break;
+ }
+ }
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp == rsvp) {
+ blk->rsvp = NULL;
+ DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type, blk->id);
+ }
+ if (blk->rsvp_nxt == rsvp) {
+ blk->rsvp_nxt = NULL;
+ DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type, blk->id);
+ }
+ }
+ }
+
+ kfree(rsvp);
+}
+
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+{
+ struct dpu_rm_rsvp *rsvp;
+ struct drm_connector *conn;
+
+ if (!rm || !enc) {
+ DPU_ERROR("invalid params\n");
+ return;
+ }
+
+ mutex_lock(&rm->rm_lock);
+
+ rsvp = _dpu_rm_get_rsvp(rm, enc);
+ if (!rsvp) {
+ DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ conn = _dpu_rm_get_connector(enc);
+ if (!conn) {
+ DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ _dpu_rm_release_rsvp(rm, rsvp, conn);
+end:
+ mutex_unlock(&rm->rm_lock);
+}
+
+static int _dpu_rm_commit_rsvp(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+ int ret = 0;
+
+ /* Swap next rsvp to be the active */
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp_nxt) {
+ blk->rsvp = blk->rsvp_nxt;
+ blk->rsvp_nxt = NULL;
+ }
+ }
+ }
+
+ if (!ret)
+ DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
+ rsvp->topology);
+
+ return ret;
+}
+
+int dpu_rm_reserve(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct msm_display_topology topology,
+ bool test_only)
+{
+ struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct dpu_rm_requirements reqs;
+ int ret;
+
+ if (!rm || !enc || !crtc_state || !conn_state) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ /* Check if this is just a page-flip */
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return 0;
+
+ DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
+ conn_state->connector->base.id, enc->base.id,
+ crtc_state->crtc->base.id, test_only);
+
+ mutex_lock(&rm->rm_lock);
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
+
+ ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
+ conn_state, &reqs, topology);
+ if (ret) {
+ DPU_ERROR("failed to populate hw requirements\n");
+ goto end;
+ }
+
+ /*
+ * We only support one active reservation per-hw-block. But to implement
+ * transactional semantics for test-only, and for allowing failure while
+ * modifying your existing reservation, over the course of this
+ * function we can have two reservations:
+ * Current: Existing reservation
+ * Next: Proposed reservation. The proposed reservation may fail, or may
+ * be discarded if in test-only mode.
+ * If reservation is successful, and we're not in test-only, then we
+ * replace the current with the next.
+ */
+ rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
+ if (!rsvp_nxt) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
+
+ /*
+ * User can request that we clear out any reservation during the
+ * atomic_check phase by using this CLEAR bit
+ */
+ if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
+ DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
+ rsvp_cur->seq, rsvp_cur->enc_id);
+ _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+ rsvp_cur = NULL;
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
+ }
+
+ /* Check the proposed reservation, store it in hw's "next" field */
+ ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+ rsvp_nxt, &reqs);
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
+
+ if (ret) {
+ DPU_ERROR("failed to reserve hw resources: %d\n", ret);
+ _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else if (test_only && !RM_RQ_LOCK(&reqs)) {
+ /*
+ * Normally, if test_only, test the reservation and then undo
+ * However, if the user requests LOCK, then keep the reservation
+ * made during the atomic_check phase.
+ */
+ DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+ _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else {
+ if (test_only && RM_RQ_LOCK(&reqs))
+ DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+
+ _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+
+ ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+ }
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
+
+end:
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
new file mode 100644
index 000000000000..ffd1841a6067
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_RM_H__
+#define __DPU_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "dpu_hw_top.h"
+
+/**
+ * enum dpu_rm_topology_name - HW resource use case in use by connector
+ * @DPU_RM_TOPOLOGY_NONE: No topology in use currently
+ * @DPU_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
+ */
+enum dpu_rm_topology_name {
+ DPU_RM_TOPOLOGY_NONE = 0,
+ DPU_RM_TOPOLOGY_SINGLEPIPE,
+ DPU_RM_TOPOLOGY_DUALPIPE,
+ DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
+ DPU_RM_TOPOLOGY_MAX,
+};
+
+/**
+ * enum dpu_rm_topology_control - HW resource use case in use by connector
+ * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
+ * test, reserve the resources for this display.
+ * Normal behavior would not impact the reservation
+ * list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
+ * release any reservation held by this display.
+ * Normal behavior would not impact the
+ * reservation list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_DS : Require layer mixers with DS capabilities
+ */
+enum dpu_rm_topology_control {
+ DPU_RM_TOPCTL_RESERVE_LOCK,
+ DPU_RM_TOPCTL_RESERVE_CLEAR,
+ DPU_RM_TOPCTL_DS,
+};
+
+/**
+ * struct dpu_rm - DPU dynamic hardware resource manager
+ * @dev: device handle for event logging purposes
+ * @rsvps: list of hardware reservations by each crtc->encoder->connector
+ * @hw_blks: array of lists of hardware resources present in the system, one
+ * list per type of hardware block
+ * @hw_mdp: hardware object for mdp_top
+ * @lm_max_width: cached layer mixer maximum width
+ * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
+ */
+struct dpu_rm {
+ struct drm_device *dev;
+ struct list_head rsvps;
+ struct list_head hw_blks[DPU_HW_BLK_MAX];
+ struct dpu_hw_mdp *hw_mdp;
+ uint32_t lm_max_width;
+ uint32_t rsvp_next_seq;
+ struct mutex rm_lock;
+};
+
+/**
+ * struct dpu_rm_hw_blk - resource manager internal structure
+ * forward declaration for single iterator definition without void pointer
+ */
+struct dpu_rm_hw_blk;
+
+/**
+ * struct dpu_rm_hw_iter - iterator for use with dpu_rm
+ * @hw: dpu_hw object requested, or NULL on failure
+ * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct dpu_rm_hw_iter {
+ void *hw;
+ struct dpu_rm_hw_blk *blk;
+ uint32_t enc_id;
+ enum dpu_hw_blk_type type;
+};
+
+/**
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ * for all HW blocks.
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @dev: device handle for event logging purposes
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_init(struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ struct drm_device *dev);
+
+/**
+ * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
+ * @rm: DPU Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_destroy(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ * the use connections and user requirements, specified through related
+ * topology control properties, and reserve hardware blocks to that
+ * display chain.
+ * HW blocks can then be accessed through dpu_rm_get_* functions.
+ * HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @conn_state: Proposed Atomic DRM Connector State handle
+ * @topology: Pointer to topology info for the display
+ * @test_only: Atomic-Test phase, discard results (unless property overrides)
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_reserve(struct dpu_rm *rm,
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct msm_display_topology topology,
+ bool test_only);
+
+/**
+ * dpu_rm_reserve - Given the encoder for the display chain, release any
+ * HW blocks previously reserved for that use case.
+ * @rm: DPU Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+
+/**
+ * dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
+ * This is never reserved, and is usable by any display.
+ * @rm: DPU Resource Manager handle
+ * @Return: Pointer to hw block or NULL
+ */
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
+ * using dpu_rm_get_hw
+ * @iter: iter object to initialize
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+void dpu_rm_init_hw_iter(
+ struct dpu_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum dpu_hw_blk_type type);
+/**
+ * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
+ * Meant to do a single pass through the hardware list to iteratively
+ * retrieve hardware blocks of a given type for a given encoder.
+ * Initialize an iterator object.
+ * Set hw block type of interest. Set encoder id of interest, 0 for any.
+ * Function returns first hw of type for that encoder.
+ * Subsequent calls will return the next reserved hw of that type in-order.
+ * Iterator HW pointer will be null on failure to find hw.
+ * @rm: DPU Resource Manager handle
+ * @iter: iterator object
+ * @Return: true on match found, false on no match found
+ */
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+
+/**
+ * dpu_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int dpu_rm_check_property_topctl(uint64_t val);
+
+/**
+ * dpu_rm_get_topology_name - returns the name of the the given topology
+ * definition
+ * @topology: topology definition
+ * @Return: name of the topology
+ */
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology);
+
+#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
new file mode 100644
index 000000000000..ae0ca5076238
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -0,0 +1,1007 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPU_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drm_rect.h>
+#include "dpu_crtc.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_plane.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpu
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpu_trace
+
+TRACE_EVENT(dpu_perf_set_qos_luts,
+ TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+ u32 lut, u32 lut_usage),
+ TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(bool, rt)
+ __field(u32, fl)
+ __field(u64, lut)
+ __field(u32, lut_usage)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->rt = rt;
+ __entry->fl = fl;
+ __entry->lut = lut;
+ __entry->lut_usage = lut_usage;
+ ),
+ TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
+ __entry->pnum, __entry->fmt,
+ __entry->rt, __entry->fl,
+ __entry->lut, __entry->lut_usage)
+);
+
+TRACE_EVENT(dpu_perf_set_danger_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+ u32 safe_lut),
+ TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, mode)
+ __field(u32, danger_lut)
+ __field(u32, safe_lut)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->mode = mode;
+ __entry->danger_lut = danger_lut;
+ __entry->safe_lut = safe_lut;
+ ),
+ TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+ __entry->pnum, __entry->fmt,
+ __entry->mode, __entry->danger_lut,
+ __entry->safe_lut)
+);
+
+TRACE_EVENT(dpu_perf_set_ot,
+ TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+ TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, xin_id)
+ __field(u32, rd_lim)
+ __field(u32, vbif_idx)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->xin_id = xin_id;
+ __entry->rd_lim = rd_lim;
+ __entry->vbif_idx = vbif_idx;
+ ),
+ TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+ __entry->pnum, __entry->xin_id, __entry->rd_lim,
+ __entry->vbif_idx)
+)
+
+TRACE_EVENT(dpu_perf_update_bus,
+ TP_PROTO(int client, unsigned long long ab_quota,
+ unsigned long long ib_quota),
+ TP_ARGS(client, ab_quota, ib_quota),
+ TP_STRUCT__entry(
+ __field(int, client)
+ __field(u64, ab_quota)
+ __field(u64, ib_quota)
+ ),
+ TP_fast_assign(
+ __entry->client = client;
+ __entry->ab_quota = ab_quota;
+ __entry->ib_quota = ib_quota;
+ ),
+ TP_printk("Request client:%d ab=%llu ib=%llu",
+ __entry->client,
+ __entry->ab_quota,
+ __entry->ib_quota)
+)
+
+
+TRACE_EVENT(dpu_cmd_release_bw,
+ TP_PROTO(u32 crtc_id),
+ TP_ARGS(crtc_id),
+ TP_STRUCT__entry(
+ __field(u32, crtc_id)
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ ),
+ TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(tracing_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(dpu_trace_counter,
+ TP_PROTO(int pid, char *name, int value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(int, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%d", __entry->pid,
+ __get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(dpu_perf_crtc_update,
+ TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
+ u64 bw_ctl_ebi, u32 core_clk_rate,
+ bool stop_req, u32 update_bus, u32 update_clk),
+ TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
+ stop_req, update_bus, update_clk),
+ TP_STRUCT__entry(
+ __field(u32, crtc)
+ __field(u64, bw_ctl_mnoc)
+ __field(u64, bw_ctl_llcc)
+ __field(u64, bw_ctl_ebi)
+ __field(u32, core_clk_rate)
+ __field(bool, stop_req)
+ __field(u32, update_bus)
+ __field(u32, update_clk)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->bw_ctl_mnoc = bw_ctl_mnoc;
+ __entry->bw_ctl_llcc = bw_ctl_llcc;
+ __entry->bw_ctl_ebi = bw_ctl_ebi;
+ __entry->core_clk_rate = core_clk_rate;
+ __entry->stop_req = stop_req;
+ __entry->update_bus = update_bus;
+ __entry->update_clk = update_clk;
+ ),
+ TP_printk(
+ "crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+ __entry->crtc,
+ __entry->bw_ctl_mnoc,
+ __entry->bw_ctl_llcc,
+ __entry->bw_ctl_ebi,
+ __entry->core_clk_rate,
+ __entry->stop_req,
+ __entry->update_bus,
+ __entry->update_clk)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_irq_template,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intr_idx, intr_idx )
+ __field( int, hw_idx )
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intr_idx = intr_idx;
+ __entry->hw_idx = hw_idx;
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("id=%u, intr=%d, hw=%d, irq=%d",
+ __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ __entry->irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+
+TRACE_EVENT(dpu_enc_irq_wait_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx, pp_idx, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intr_idx, intr_idx )
+ __field( int, hw_idx )
+ __field( int, irq_idx )
+ __field( enum dpu_pingpong, pp_idx )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intr_idx = intr_idx;
+ __entry->hw_idx = hw_idx;
+ __entry->irq_idx = irq_idx;
+ __entry->pp_idx = pp_idx;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+);
+
+DECLARE_EVENT_CLASS(dpu_drm_obj_template,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ ),
+ TP_printk("id=%u", __entry->drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_flip,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_vblank_cb,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_enc_enable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+
+TRACE_EVENT(dpu_enc_enable,
+ TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
+ TP_ARGS(drm_id, hdisplay, vdisplay),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, hdisplay )
+ __field( int, vdisplay )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hdisplay = hdisplay;
+ __entry->vdisplay = vdisplay;
+ ),
+ TP_printk("id=%u, mode=%dx%d",
+ __entry->drm_id, __entry->hdisplay, __entry->vdisplay)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_keyval_template,
+ TP_PROTO(uint32_t drm_id, int val),
+ TP_ARGS(drm_id, val),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, val )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->val = val;
+ ),
+ TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb,
+ TP_PROTO(uint32_t drm_id, int count),
+ TP_ARGS(drm_id, count)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
+ TP_PROTO(uint32_t drm_id, int ctl_idx),
+ TP_ARGS(drm_id, ctl_idx)
+);
+
+TRACE_EVENT(dpu_enc_atomic_check_flags,
+ TP_PROTO(uint32_t drm_id, unsigned int flags, int private_flags),
+ TP_ARGS(drm_id, flags, private_flags),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, flags )
+ __field( int, private_flags )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->flags = flags;
+ __entry->private_flags = private_flags;
+ ),
+ TP_printk("id=%u, flags=%u, private_flags=%d",
+ __entry->drm_id, __entry->flags, __entry->private_flags)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ ),
+ TP_printk("id=%u, enable=%s",
+ __entry->drm_id, __entry->enable ? "true" : "false")
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+
+TRACE_EVENT(dpu_enc_rc,
+ TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported,
+ int rc_state, const char *stage),
+ TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, sw_event )
+ __field( bool, idle_pc_supported )
+ __field( int, rc_state )
+ __string( stage_str, stage )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->sw_event = sw_event;
+ __entry->idle_pc_supported = idle_pc_supported;
+ __entry->rc_state = rc_state;
+ __assign_str(stage_str, stage);
+ ),
+ TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d\n",
+ __get_str(stage_str), __entry->drm_id, __entry->sw_event,
+ __entry->idle_pc_supported ? "true" : "false",
+ __entry->rc_state)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
+ TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, event, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event,
+ __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb,
+ TP_PROTO(uint32_t drm_id, unsigned int idx,
+ unsigned long frame_busy_mask),
+ TP_ARGS(drm_id, idx, frame_busy_mask),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, idx )
+ __field( unsigned long, frame_busy_mask )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->idx = idx;
+ __entry->frame_busy_mask = frame_busy_mask;
+ ),
+ TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id,
+ __entry->idx, __entry->frame_busy_mask)
+);
+
+TRACE_EVENT(dpu_enc_trigger_flush,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+ int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+ TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
+ pending_flush_ret),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( int, pending_kickoff_cnt )
+ __field( int, ctl_idx )
+ __field( u32, pending_flush_ret )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->pending_kickoff_cnt = pending_kickoff_cnt;
+ __entry->ctl_idx = ctl_idx;
+ __entry->pending_flush_ret = pending_flush_ret;
+ ),
+ TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+ "pending_flush_ret=%u", __entry->drm_id,
+ __entry->intf_idx, __entry->pending_kickoff_cnt,
+ __entry->ctl_idx, __entry->pending_flush_ret)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( ktime_t, time )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->time = time;
+ ),
+ TP_printk("id=%u, time=%lld", __entry->drm_id,
+ ktime_to_ms(__entry->time))
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+
+DECLARE_EVENT_CLASS(dpu_id_event_template,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+
+TRACE_EVENT(dpu_enc_wait_event_timeout,
+ TP_PROTO(uint32_t drm_id, int32_t hw_id, int rc, s64 time,
+ s64 expected_time, int atomic_cnt),
+ TP_ARGS(drm_id, hw_id, rc, time, expected_time, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int32_t, hw_id )
+ __field( int, rc )
+ __field( s64, time )
+ __field( s64, expected_time )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hw_id = hw_id;
+ __entry->rc = rc;
+ __entry->time = time;
+ __entry->expected_time = expected_time;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, hw_id=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+ __entry->drm_id, __entry->hw_id, __entry->rc, __entry->time,
+ __entry->expected_time, __entry->atomic_cnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, pp, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
+ __entry->pp, __entry->enable ? "true" : "false",
+ __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pp_tx_done,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count,
+ u32 event),
+ TP_ARGS(drm_id, pp, new_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, new_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->new_count = new_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, new_count=%d, event=%u", __entry->drm_id,
+ __entry->pp, __entry->new_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pdone_timeout,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count,
+ int kickoff_count, u32 event),
+ TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, timeout_count )
+ __field( int, kickoff_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->timeout_count = timeout_count;
+ __entry->kickoff_count = kickoff_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, timeout_count=%d, kickoff_count=%d, event=%u",
+ __entry->drm_id, __entry->pp, __entry->timeout_count,
+ __entry->kickoff_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, intf_idx, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
+ __entry->intf_idx, __entry->enable ? "true" : "false",
+ __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_crtc_setup_mixer,
+ TP_PROTO(uint32_t crtc_id, uint32_t plane_id,
+ struct drm_plane_state *state, struct dpu_plane_state *pstate,
+ uint32_t stage_idx, enum dpu_sspp sspp, uint32_t pixel_format,
+ uint64_t modifier),
+ TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx, sspp,
+ pixel_format, modifier),
+ TP_STRUCT__entry(
+ __field( uint32_t, crtc_id )
+ __field( uint32_t, plane_id )
+ __field( struct drm_plane_state*,state )
+ __field( struct dpu_plane_state*,pstate )
+ __field( uint32_t, stage_idx )
+ __field( enum dpu_sspp, sspp )
+ __field( uint32_t, pixel_format )
+ __field( uint64_t, modifier )
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ __entry->plane_id = plane_id;
+ __entry->state = state;
+ __entry->pstate = pstate;
+ __entry->stage_idx = stage_idx;
+ __entry->sspp = sspp;
+ __entry->pixel_format = pixel_format;
+ __entry->modifier = modifier;
+ ),
+ TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
+ "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
+ "multirect_index:%d multirect_mode:%u pix_format:%u "
+ "modifier:%llu",
+ __entry->crtc_id, __entry->plane_id,
+ __entry->state->fb ? __entry->state->fb->base.id : -1,
+ __entry->state->src_w >> 16, __entry->state->src_h >> 16,
+ __entry->state->src_x >> 16, __entry->state->src_y >> 16,
+ __entry->state->crtc_w, __entry->state->crtc_h,
+ __entry->state->crtc_x, __entry->state->crtc_y,
+ __entry->stage_idx, __entry->pstate->stage, __entry->sspp,
+ __entry->pstate->multirect_index,
+ __entry->pstate->multirect_mode, __entry->pixel_format,
+ __entry->modifier)
+);
+
+TRACE_EVENT(dpu_crtc_setup_lm_bounds,
+ TP_PROTO(uint32_t drm_id, int mixer, struct drm_rect *bounds),
+ TP_ARGS(drm_id, mixer, bounds),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, mixer )
+ __field( struct drm_rect *, bounds )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->mixer = mixer;
+ __entry->bounds = bounds;
+ ),
+ TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
+ __entry->mixer, DRM_RECT_ARG(__entry->bounds))
+);
+
+TRACE_EVENT(dpu_crtc_vblank_enable,
+ TP_PROTO(uint32_t drm_id, uint32_t enc_id, bool enable,
+ struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enc_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( uint32_t, enc_id )
+ __field( bool, enable )
+ __field( struct dpu_crtc *, crtc )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enc_id = enc_id;
+ __entry->enable = enable;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
+ "vblank_req:%s}",
+ __entry->drm_id, __entry->enc_id,
+ __entry->enable ? "true" : "false",
+ __entry->crtc->enabled ? "true" : "false",
+ __entry->crtc->suspend ? "true" : "false",
+ __entry->crtc->vblank_requested ? "true" : "false")
+);
+
+DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ __field( struct dpu_crtc *, crtc )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+ __entry->drm_id, __entry->enable ? "true" : "false",
+ __entry->crtc->enabled ? "true" : "false",
+ __entry->crtc->suspend ? "true" : "false",
+ __entry->crtc->vblank_requested ? "true" : "false")
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_disable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_vblank,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+
+TRACE_EVENT(dpu_crtc_disable_frame_pending,
+ TP_PROTO(uint32_t drm_id, int frame_pending),
+ TP_ARGS(drm_id, frame_pending),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, frame_pending )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->frame_pending = frame_pending;
+ ),
+ TP_printk("id:%u frame_pending:%d", __entry->drm_id,
+ __entry->frame_pending)
+);
+
+TRACE_EVENT(dpu_plane_set_scanout,
+ TP_PROTO(enum dpu_sspp index, struct dpu_hw_fmt_layout *layout,
+ enum dpu_sspp_multirect_index multirect_index),
+ TP_ARGS(index, layout, multirect_index),
+ TP_STRUCT__entry(
+ __field( enum dpu_sspp, index )
+ __field( struct dpu_hw_fmt_layout*, layout )
+ __field( enum dpu_sspp_multirect_index, multirect_index)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->layout = layout;
+ __entry->multirect_index = multirect_index;
+ ),
+ TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
+ "multirect_index:%d", __entry->index, __entry->layout->width,
+ __entry->layout->height, __entry->layout->plane_addr[0],
+ __entry->layout->plane_size[0],
+ __entry->layout->plane_addr[1],
+ __entry->layout->plane_size[1],
+ __entry->layout->plane_addr[2],
+ __entry->layout->plane_size[2],
+ __entry->layout->plane_addr[3],
+ __entry->layout->plane_size[3], __entry->multirect_index)
+);
+
+TRACE_EVENT(dpu_plane_disable,
+ TP_PROTO(uint32_t drm_id, bool is_virtual, uint32_t multirect_mode),
+ TP_ARGS(drm_id, is_virtual, multirect_mode),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, is_virtual )
+ __field( uint32_t, multirect_mode )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->is_virtual = is_virtual;
+ __entry->multirect_mode = multirect_mode;
+ ),
+ TP_printk("id:%u is_virtual:%s multirect_mode:%u", __entry->drm_id,
+ __entry->is_virtual ? "true" : "false",
+ __entry->multirect_mode)
+);
+
+DECLARE_EVENT_CLASS(dpu_rm_iter_template,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( enum dpu_hw_blk_type, type )
+ __field( uint32_t, enc_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->type = type;
+ __entry->enc_id = enc_id;
+ ),
+ TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
+ __entry->enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+
+TRACE_EVENT(dpu_rm_reserve_lms,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
+ uint32_t pp_id),
+ TP_ARGS(id, type, enc_id, pp_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( enum dpu_hw_blk_type, type )
+ __field( uint32_t, enc_id )
+ __field( uint32_t, pp_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->type = type;
+ __entry->enc_id = enc_id;
+ __entry->pp_id = pp_id;
+ ),
+ TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
+ __entry->type, __entry->enc_id, __entry->pp_id)
+);
+
+TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
+ TP_PROTO(enum dpu_vbif index, u32 xin_id),
+ TP_ARGS(index, xin_id),
+ TP_STRUCT__entry(
+ __field( enum dpu_vbif, index )
+ __field( u32, xin_id )
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->xin_id = xin_id;
+ ),
+ TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id)
+);
+
+TRACE_EVENT(dpu_pp_connect_ext_te,
+ TP_PROTO(enum dpu_pingpong pp, u32 cfg),
+ TP_ARGS(pp, cfg),
+ TP_STRUCT__entry(
+ __field( enum dpu_pingpong, pp )
+ __field( u32, cfg )
+ ),
+ TP_fast_assign(
+ __entry->pp = pp;
+ __entry->cfg = cfg;
+ ),
+ TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ __field( int, enable_count )
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ __entry->enable_count = enable_count;
+ ),
+ TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx,
+ __entry->enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ __field( struct dpu_irq_callback *, callback)
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ __entry->callback = callback;
+ ),
+ TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx,
+ __entry->callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback)
+);
+
+TRACE_EVENT(dpu_core_perf_update_clk,
+ TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
+ TP_ARGS(dev, stop_req, clk_rate),
+ TP_STRUCT__entry(
+ __field( struct drm_device *, dev )
+ __field( bool, stop_req )
+ __field( u64, clk_rate )
+ ),
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->stop_req = stop_req;
+ __entry->clk_rate = clk_rate;
+ ),
+ TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
+ __entry->stop_req ? "true" : "false", __entry->clk_rate)
+);
+
+#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
+
+#define DPU_ATRACE_INT(name, value) \
+ trace_dpu_trace_counter(current->tgid, name, value)
+
+#endif /* _DPU_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
new file mode 100644
index 000000000000..295528292296
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -0,0 +1,384 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "dpu_vbif.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_trace.h"
+
+/**
+ * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif: Pointer to hardware vbif driver
+ * @xin_id: Client interface identifier
+ * @return: 0 if success; error code otherwise
+ */
+static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ ktime_t timeout;
+ bool status;
+ int rc;
+
+ if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+ for (;;) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ if (status)
+ break;
+ if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ break;
+ }
+ usleep_range(501, 1000);
+ }
+
+ if (!status) {
+ rc = -ETIMEDOUT;
+ DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+ vbif->idx - VBIF_0, xin_id);
+ } else {
+ rc = 0;
+ DPU_DEBUG("VBIF %d client %d is halted\n",
+ vbif->idx - VBIF_0, xin_id);
+ }
+
+ return rc;
+}
+
+/**
+ * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @ot_lim: Pointer to OT limit to be modified
+ * @params: Pointer to usecase parameters
+ */
+static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
+ u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
+{
+ u64 pps;
+ const struct dpu_vbif_dynamic_ot_tbl *tbl;
+ u32 i;
+
+ if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
+ return;
+
+ /* Dynamic OT setting done only for WFD */
+ if (!params->is_wfd)
+ return;
+
+ pps = params->frame_rate;
+ pps *= params->width;
+ pps *= params->height;
+
+ tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+ &vbif->cap->dynamic_ot_wr_tbl;
+
+ for (i = 0; i < tbl->count; i++) {
+ if (pps <= tbl->cfg[i].pps) {
+ *ot_lim = tbl->cfg[i].ot_limit;
+ break;
+ }
+ }
+
+ DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ vbif->idx - VBIF_0, params->xin_id,
+ params->width, params->height, params->frame_rate,
+ pps, *ot_lim);
+}
+
+/**
+ * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ * @return: OT limit
+ */
+static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
+ struct dpu_vbif_set_ot_params *params)
+{
+ u32 ot_lim = 0;
+ u32 val;
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ if (vbif->cap->default_ot_wr_limit && !params->rd)
+ ot_lim = vbif->cap->default_ot_wr_limit;
+ else if (vbif->cap->default_ot_rd_limit && params->rd)
+ ot_lim = vbif->cap->default_ot_rd_limit;
+
+ /*
+ * If default ot is not set from dt/catalog,
+ * then do not configure it.
+ */
+ if (ot_lim == 0)
+ goto exit;
+
+ /* Modify the limits if the target and the use case requires it */
+ _dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+ if (vbif && vbif->ops.get_limit_conf) {
+ val = vbif->ops.get_limit_conf(vbif,
+ params->xin_id, params->rd);
+ if (val == ot_lim)
+ ot_lim = 0;
+ }
+
+exit:
+ DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+ vbif->idx - VBIF_0, params->xin_id, ot_lim);
+ return ot_lim;
+}
+
+/**
+ * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params)
+{
+ struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ u32 ot_lim;
+ int ret, i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = dpu_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i] &&
+ dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
+ vbif = dpu_kms->hw_vbif[i];
+ }
+
+ if (!vbif || !mdp) {
+ DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
+ vbif != 0, mdp != 0);
+ return;
+ }
+
+ if (!mdp->ops.setup_clk_force_ctrl ||
+ !vbif->ops.set_limit_conf ||
+ !vbif->ops.set_halt_ctrl)
+ return;
+
+ /* set write_gather_en for all write clients */
+ if (vbif->ops.set_write_gather_en && !params->rd)
+ vbif->ops.set_write_gather_en(vbif, params->xin_id);
+
+ ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+ if (ot_lim == 0)
+ goto exit;
+
+ trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
+ params->vbif_idx);
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+ ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
+ if (ret)
+ trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+ return;
+}
+
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params)
+{
+ struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ const struct dpu_vbif_qos_tbl *qos_tbl;
+ int i;
+
+ if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = dpu_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i] &&
+ dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
+ vbif = dpu_kms->hw_vbif[i];
+ break;
+ }
+ }
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
+ return;
+ }
+
+ if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+ DPU_DEBUG("qos remap not supported\n");
+ return;
+ }
+
+ qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+ &vbif->cap->qos_nrt_tbl;
+
+ if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+ DPU_DEBUG("qos tbl not defined\n");
+ return;
+ }
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+ DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+ params->vbif_idx, params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ }
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ u32 i, pnd, src;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->ops.clear_errors) {
+ vbif->ops.clear_errors(vbif, &pnd, &src);
+ if (pnd || src) {
+ DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
+ vbif->idx - VBIF_0, pnd, src);
+ }
+ }
+ }
+}
+
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ int i, j;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+ for (j = 0; j < vbif->cap->memtype_count; j++)
+ vbif->ops.set_mem_type(
+ vbif, j, vbif->cap->memtype[j]);
+ }
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove_recursive(dpu_kms->debugfs_vbif);
+ dpu_kms->debugfs_vbif = NULL;
+}
+
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+ char vbif_name[32];
+ struct dentry *debugfs_vbif;
+ int i, j;
+
+ dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
+ if (!dpu_kms->debugfs_vbif) {
+ DPU_ERROR("failed to create vbif debugfs\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+ snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+ debugfs_vbif = debugfs_create_dir(vbif_name,
+ dpu_kms->debugfs_vbif);
+
+ debugfs_create_u32("features", 0600, debugfs_vbif,
+ (u32 *)&vbif->features);
+
+ debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
+ (u32 *)&vbif->xin_halt_timeout);
+
+ debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_rd_limit);
+
+ debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_wr_limit);
+
+ for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+ struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_rd_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+
+ for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+ struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_wr_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
new file mode 100644
index 000000000000..f17af52dbbd5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_VBIF_H__
+#define __DPU_VBIF_H__
+
+#include "dpu_kms.h"
+
+struct dpu_vbif_set_ot_params {
+ u32 xin_id;
+ u32 num;
+ u32 width;
+ u32 height;
+ u32 frame_rate;
+ bool rd;
+ bool is_wfd;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+};
+
+struct dpu_vbif_set_memtype_params {
+ u32 xin_id;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+ bool is_cacheable;
+};
+
+/**
+ * struct dpu_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct dpu_vbif_set_qos_params {
+ u32 vbif_idx;
+ u32 xin_id;
+ u32 clk_ctrl;
+ u32 num;
+ bool is_rt;
+};
+
+/**
+ * dpu_vbif_set_ot_limit - set OT limit for vbif client
+ * @dpu_kms: DPU handler
+ * @params: Pointer to OT configuration parameters
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params);
+
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms: DPU handler
+ * @params: Pointer to QoS configuration parameters
+ */
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params);
+
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
+
+#ifdef CONFIG_DEBUG_FS
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
+#else
+static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
+ struct dentry *debugfs_root)
+{
+ return 0;
+}
+static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
new file mode 100644
index 000000000000..4f12e5c534c8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -0,0 +1,1376 @@
+#ifndef __MEDIA_INFO_H__
+#define __MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+ ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+ (((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+#ifndef MSM_MEDIA_MAX
+#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b))
+#endif
+
+enum color_fmts {
+ /* Venus NV12:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV12,
+
+ /* Venus NV21:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * V U V U V U V U V U V U . . . . ^
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Padding & Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV21,
+ /* Venus NV12_MVTB:
+ * Two YUV 4:2:0 images/views one after the other
+ * in a top-bottom layout, same as NV12
+ * with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_1
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_2
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * View_1 begin at: 0 (zero)
+ * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((2*(Y_Stride * Y_Scanlines)
+ * + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096)
+ */
+ COLOR_FMT_NV12_MVTB,
+ /*
+ * The buffer can be of 2 types:
+ * (1) Venus NV12 UBWC Progressive
+ * (2) Venus NV12 UBWC Interlaced
+ *
+ * (1) Venus NV12 UBWC Progressive Buffer Format:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Y_Stride = align(Width, 128)
+ * UV_Stride = align(Width, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ *
+ *
+ * (2) Venus NV12 UBWC Interlaced Buffer Format:
+ * Compressed Macro-tile format for NV12 interlaced.
+ * Contains 8 planes in the following order -
+ * (A) Y_Meta_Top_Field_Plane
+ * (B) Y_UBWC_Top_Field_Plane
+ * (C) UV_Meta_Top_Field_Plane
+ * (D) UV_UBWC_Top_Field_Plane
+ * (E) Y_Meta_Bottom_Field_Plane
+ * (F) Y_UBWC_Bottom_Field_Plane
+ * (G) UV_Meta_Bottom_Field_Plane
+ * (H) UV_UBWC_Bottom_Field_Plane
+ * Y_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Top_Field_Plane.
+ * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+ * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit Y samples for top field of an interlaced frame.
+ *
+ * UV_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Top_Field_Plane.
+ * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+ * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit subsampled color difference samples for top field of an
+ * interlaced frame.
+ *
+ * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+ * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+ * format for bottom field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+ * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+ *
+ * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+ * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+ * macro-tile format for bottom field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+ * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit subsampled color difference samples for bottom
+ * field of an interlaced frame.
+ *
+ * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * <-----Y_TF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_TF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_TF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_TF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_TF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_TF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_TF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_TF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-----Y_BF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_BF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_BF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_BF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_BF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_BF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_BF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_BF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Half_height = (Height+1)>>1
+ * Y_TF_Stride = align(Width, 128)
+ * UV_TF_Stride = align(Width, 128)
+ * Y_TF_Scanlines = align(Half_height, 32)
+ * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+ * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+ * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_TF_Meta_Plane_size =
+ * align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+ * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_TF_Meta_Plane_size =
+ * align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+ * Y_BF_Stride = align(Width, 128)
+ * UV_BF_Stride = align(Width, 128)
+ * Y_BF_Scanlines = align(Half_height, 32)
+ * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+ * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+ * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_BF_Meta_Plane_size =
+ * align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+ * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_BF_Meta_Plane_size =
+ * align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+ * Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+ * Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+ * Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +
+ * + max(Extradata, Y_TF_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_UBWC,
+ /* Venus NV12 10-bit UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 4/3, 128)
+ * UV_Stride = align(Width * 4/3, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_BPP10_UBWC,
+ /* Venus RGBA8888 format:
+ * Contains 1 plane in the following order -
+ * (A) RGBA plane
+ *
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Plane_size + Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888,
+ /* Venus RGBA8888 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888_UBWC,
+ /* Venus RGBA1010102 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 256)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA1010102_UBWC,
+ /* Venus RGB565 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGB plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 2, 128)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGB565_UBWC,
+ /* P010 UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 2, 256)
+ * UV_Stride = align(Width * 2, 256)
+ * Y_Scanlines = align(Height, 16)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_P010_UBWC,
+ /* Venus P010:
+ * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+ * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width * 2 aligned to 128
+ * UV_Stride : Width * 2 aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_P010,
+};
+
+#define COLOR_FMT_RGBA1010102_UBWC COLOR_FMT_RGBA1010102_UBWC
+#define COLOR_FMT_RGB565_UBWC COLOR_FMT_RGB565_UBWC
+#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC
+#define COLOR_FMT_P010 COLOR_FMT_P010
+
+static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
+{
+ (void)height;
+ (void)width;
+
+ /*
+ * In the future, calculate the size based on the w/h but just
+ * hardcode it for now since 16K satisfies all current usecases.
+ */
+ return 16 * 1024;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment, stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ break;
+ case COLOR_FMT_P010:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width*2, alignment);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment, stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ break;
+ case COLOR_FMT_P010:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width*2, alignment);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment, sclines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ alignment = 16;
+ break;
+ default:
+ return 0;
+ }
+ sclines = MSM_MEDIA_ALIGN(height, alignment);
+invalid_input:
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment, sclines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 16;
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 32;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
+
+invalid_input:
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+{
+ int y_tile_width = 0, y_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_width = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_tile_width = 48;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+ y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
+
+invalid_input:
+ return y_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+{
+ int y_tile_height = 0, y_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ y_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+ y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+
+invalid_input:
+ return y_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+{
+ int uv_tile_width = 0, uv_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_width = 16;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ uv_tile_width = 24;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+ uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+
+invalid_input:
+ return uv_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+{
+ int uv_tile_height = 0, uv_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ uv_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+ uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+
+invalid_input:
+ return uv_meta_scanlines;
+}
+
+static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment = 0, stride = 0, bpp = 4;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 128;
+ break;
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 256;
+ bpp = 2;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ alignment = 256;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
+
+invalid_input:
+ return stride;
+}
+
+static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment = 0, scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 32;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 16;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ scanlines = MSM_MEDIA_ALIGN(height, alignment);
+
+invalid_input:
+ return scanlines;
+}
+
+static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+{
+ int rgb_tile_width = 0, rgb_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_tile_width = 16;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
+ rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+
+invalid_input:
+ return rgb_meta_stride;
+}
+
+static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+{
+ int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
+ rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+
+invalid_input:
+ return rgb_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ * @height
+ * Progressive: height
+ * Interlaced: height
+ */
+static inline unsigned int VENUS_BUFFER_SIZE(
+ int color_fmt, int width, int height)
+{
+ const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
+ unsigned int uv_alignment = 0, size = 0;
+ unsigned int y_plane, uv_plane, y_stride,
+ uv_stride, y_sclines, uv_sclines;
+ unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+ unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+ unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+ unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+ unsigned int rgb_stride = 0, rgb_scanlines = 0;
+ unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
+ unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
+
+ if (!width || !height)
+ goto invalid_input;
+
+ y_stride = VENUS_Y_STRIDE(color_fmt, width);
+ uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_P010:
+ uv_alignment = 4096;
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + uv_alignment;
+ size = y_plane + uv_plane +
+ MSM_MEDIA_MAX(extra_size, 8 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_MVTB:
+ uv_alignment = 4096;
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + uv_alignment;
+ size = y_plane + uv_plane;
+ size = 2 * size + extra_size;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines =
+ VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines =
+ VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane)*2 +
+ MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane +
+ MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_RGBA8888:
+ rgb_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, 4096);
+ size = rgb_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
+ 4096);
+ rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
+ height);
+ rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
+ rgb_meta_scanlines, 4096);
+ size = rgb_ubwc_plane + rgb_meta_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return size;
+}
+
+static inline unsigned int VENUS_VIEW2_OFFSET(
+ int color_fmt, int width, int height)
+{
+ unsigned int offset = 0;
+ unsigned int y_plane, uv_plane, y_stride,
+ uv_stride, y_sclines, uv_sclines;
+ if (!width || !height)
+ goto invalid_input;
+
+ y_stride = VENUS_Y_STRIDE(color_fmt, width);
+ uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_MVTB:
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines;
+ offset = y_plane + uv_plane;
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return offset;
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
index 576cea30d391..4b36b8954bae 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index b001699297c4..457c29dba4a1 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -201,7 +201,7 @@ static void blend_setup(struct drm_crtc *crtc)
int idx = idxs[pipe_id];
if (idx > 0) {
const struct mdp_format *format =
- to_mdp_format(msm_framebuffer_format(plane->fb));
+ to_mdp_format(msm_framebuffer_format(plane->state->fb));
alpha[idx-1] = format->alpha_enable;
}
}
@@ -665,7 +665,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
NULL);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
- plane->crtc = crtc;
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 4b646bf9c214..44d1cda56974 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -125,6 +125,8 @@ static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ drm_atomic_helper_wait_for_vblanks(mdp4_kms->dev, state);
+
/* see 119ecb7fd */
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_put(crtc);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 4a645926edb7..2bfb39082f54 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -341,7 +341,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (panel) {
+ if (!IS_ERR(panel)) {
drm_panel_disable(panel);
drm_panel_unprepare(panel);
}
@@ -410,7 +410,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (panel) {
+ if (!IS_ERR(panel)) {
drm_panel_prepare(panel);
drm_panel_enable(panel);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index e3b1c86b7aae..5368e621999c 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -34,9 +34,12 @@ static enum drm_connector_status mdp4_lvds_connector_detect(
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
- if (!mdp4_lvds_connector->panel)
+ if (!mdp4_lvds_connector->panel) {
mdp4_lvds_connector->panel =
of_drm_find_panel(mdp4_lvds_connector->panel_node);
+ if (IS_ERR(mdp4_lvds_connector->panel))
+ mdp4_lvds_connector->panel = NULL;
+ }
return mdp4_lvds_connector->panel ?
connector_status_connected :
@@ -129,7 +132,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 20e956e14c21..79ff653d8081 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -68,7 +68,7 @@ static void mdp4_plane_destroy(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp4_plane);
@@ -167,8 +167,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
msm_framebuffer_iova(fb, kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
msm_framebuffer_iova(fb, kms->aspace, 3));
-
- plane->fb = fb;
}
static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
index d9c10e02ee41..784d98989e3a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 10271359789e..b1da9ce54379 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -65,7 +65,7 @@ struct mdp5_crtc {
struct drm_gem_object *scanout_bo;
uint64_t iova;
uint32_t width, height;
- uint32_t x, y;
+ int x, y;
} cursor;
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
@@ -760,20 +760,31 @@ static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
* Cursor Region Of Interest (ROI) is a plane read from cursor
* buffer to render. The ROI region is determined by the visibility of
* the cursor point. In the default Cursor image the cursor point will
- * be at the top left of the cursor image, unless it is specified
- * otherwise using hotspot feature.
+ * be at the top left of the cursor image.
*
+ * Without rotation:
* If the cursor point reaches the right (xres - x < cursor.width) or
* bottom (yres - y < cursor.height) boundary of the screen, then ROI
* width and ROI height need to be evaluated to crop the cursor image
* accordingly.
* (xres-x) will be new cursor width when x > (xres - cursor.width)
* (yres-y) will be new cursor height when y > (yres - cursor.height)
+ *
+ * With rotation:
+ * We get negative x and/or y coordinates.
+ * (cursor.width - abs(x)) will be new cursor width when x < 0
+ * (cursor.height - abs(y)) will be new cursor width when y < 0
*/
- *roi_w = min(mdp5_crtc->cursor.width, xres -
+ if (mdp5_crtc->cursor.x >= 0)
+ *roi_w = min(mdp5_crtc->cursor.width, xres -
mdp5_crtc->cursor.x);
- *roi_h = min(mdp5_crtc->cursor.height, yres -
+ else
+ *roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
+ if (mdp5_crtc->cursor.y >= 0)
+ *roi_h = min(mdp5_crtc->cursor.height, yres -
mdp5_crtc->cursor.y);
+ else
+ *roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
}
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
@@ -783,7 +794,7 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
struct mdp5_kms *mdp5_kms = get_kms(crtc);
const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t blendcfg, stride;
- uint32_t x, y, width, height;
+ uint32_t x, y, src_x, src_y, width, height;
uint32_t roi_w, roi_h;
int lm;
@@ -800,6 +811,26 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
get_roi(crtc, &roi_w, &roi_h);
+ /* If cusror buffer overlaps due to rotation on the
+ * upper or left screen border the pixel offset inside
+ * the cursor buffer of the ROI is the positive overlap
+ * distance.
+ */
+ if (mdp5_crtc->cursor.x < 0) {
+ src_x = abs(mdp5_crtc->cursor.x);
+ x = 0;
+ } else {
+ src_x = 0;
+ }
+ if (mdp5_crtc->cursor.y < 0) {
+ src_y = abs(mdp5_crtc->cursor.y);
+ y = 0;
+ } else {
+ src_y = 0;
+ }
+ DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
+ crtc->name, x, y, roi_w, roi_h, src_x, src_y);
+
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -812,6 +843,9 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
+ MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
+ MDP5_LM_CURSOR_XY_SRC_X(src_x));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
mdp5_crtc->cursor.iova);
@@ -932,8 +966,9 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
if (unlikely(!crtc->state->enable))
return 0;
- mdp5_crtc->cursor.x = x = max(x, 0);
- mdp5_crtc->cursor.y = y = max(y, 0);
+ /* accept negative x/y coordinates up to maximum cursor overlap */
+ mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
+ mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
get_roi(crtc, &roi_w, &roi_h);
@@ -1207,7 +1242,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
"unref cursor", unref_cursor_worker);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
- plane->crtc = crtc;
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index 9af94e35f678..fcd44d1d1068 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -319,7 +319,17 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
mdp5_cstate->ctl = ctl;
mdp5_cstate->pipeline.intf = intf;
- mdp5_cstate->defer_start = true;
+
+ /*
+ * This is a bit awkward, but we want to flush the CTL and hit the
+ * START bit at most once for an atomic update. In the non-full-
+ * modeset case, this is done from crtc->atomic_flush(), but that
+ * is too early in the case of full modeset, in which case we
+ * defer to encoder->enable(). But we need to *know* whether
+ * encoder->enable() will be called to do this:
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ mdp5_cstate->defer_start = true;
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 6e12e275deba..bddd625ab91b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -170,6 +170,8 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
struct device *dev = &mdp5_kms->pdev->dev;
struct mdp5_global_state *global_state;
+ drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state);
+
global_state = mdp5_get_existing_global_state(mdp5_kms);
if (mdp5_kms->smp)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index f2a0db7a8a03..1cc4e57f0226 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -20,12 +20,10 @@
#include "msm_drv.h"
#include "mdp5_kms.h"
-/*
- * If needed, this can become more specific: something like struct mdp5_mdss,
- * which contains a 'struct msm_mdss base' member.
- */
-struct msm_mdss {
- struct drm_device *dev;
+#define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
+
+struct mdp5_mdss {
+ struct msm_mdss base;
void __iomem *mmio, *vbif;
@@ -41,22 +39,22 @@ struct msm_mdss {
} irqcontroller;
};
-static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
+static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
{
- msm_writel(data, mdss->mmio + reg);
+ msm_writel(data, mdp5_mdss->mmio + reg);
}
-static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
+static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
{
- return msm_readl(mdss->mmio + reg);
+ return msm_readl(mdp5_mdss->mmio + reg);
}
static irqreturn_t mdss_irq(int irq, void *arg)
{
- struct msm_mdss *mdss = arg;
+ struct mdp5_mdss *mdp5_mdss = arg;
u32 intr;
- intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
+ intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
VERB("intr=%08x", intr);
@@ -64,7 +62,7 @@ static irqreturn_t mdss_irq(int irq, void *arg)
irq_hw_number_t hwirq = fls(intr) - 1;
generic_handle_irq(irq_find_mapping(
- mdss->irqcontroller.domain, hwirq));
+ mdp5_mdss->irqcontroller.domain, hwirq));
intr &= ~(1 << hwirq);
}
@@ -84,19 +82,19 @@ static irqreturn_t mdss_irq(int irq, void *arg)
static void mdss_hw_mask_irq(struct irq_data *irqd)
{
- struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+ struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static void mdss_hw_unmask_irq(struct irq_data *irqd)
{
- struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+ struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
- set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
@@ -109,13 +107,13 @@ static struct irq_chip mdss_hw_irq_chip = {
static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- struct msm_mdss *mdss = d->host_data;
+ struct mdp5_mdss *mdp5_mdss = d->host_data;
if (!(VALID_IRQS & (1 << hwirq)))
return -EPERM;
irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, mdss);
+ irq_set_chip_data(irq, mdp5_mdss);
return 0;
}
@@ -126,90 +124,99 @@ static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
};
-static int mdss_irq_domain_init(struct msm_mdss *mdss)
+static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
{
- struct device *dev = mdss->dev->dev;
+ struct device *dev = mdp5_mdss->base.dev->dev;
struct irq_domain *d;
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
- mdss);
+ mdp5_mdss);
if (!d) {
dev_err(dev, "mdss irq domain add failed\n");
return -ENXIO;
}
- mdss->irqcontroller.enabled_mask = 0;
- mdss->irqcontroller.domain = d;
+ mdp5_mdss->irqcontroller.enabled_mask = 0;
+ mdp5_mdss->irqcontroller.domain = d;
return 0;
}
-int msm_mdss_enable(struct msm_mdss *mdss)
+static int mdp5_mdss_enable(struct msm_mdss *mdss)
{
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
DBG("");
- clk_prepare_enable(mdss->ahb_clk);
- if (mdss->axi_clk)
- clk_prepare_enable(mdss->axi_clk);
- if (mdss->vsync_clk)
- clk_prepare_enable(mdss->vsync_clk);
+ clk_prepare_enable(mdp5_mdss->ahb_clk);
+ if (mdp5_mdss->axi_clk)
+ clk_prepare_enable(mdp5_mdss->axi_clk);
+ if (mdp5_mdss->vsync_clk)
+ clk_prepare_enable(mdp5_mdss->vsync_clk);
return 0;
}
-int msm_mdss_disable(struct msm_mdss *mdss)
+static int mdp5_mdss_disable(struct msm_mdss *mdss)
{
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
DBG("");
- if (mdss->vsync_clk)
- clk_disable_unprepare(mdss->vsync_clk);
- if (mdss->axi_clk)
- clk_disable_unprepare(mdss->axi_clk);
- clk_disable_unprepare(mdss->ahb_clk);
+ if (mdp5_mdss->vsync_clk)
+ clk_disable_unprepare(mdp5_mdss->vsync_clk);
+ if (mdp5_mdss->axi_clk)
+ clk_disable_unprepare(mdp5_mdss->axi_clk);
+ clk_disable_unprepare(mdp5_mdss->ahb_clk);
return 0;
}
-static int msm_mdss_get_clocks(struct msm_mdss *mdss)
+static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
{
- struct platform_device *pdev = to_platform_device(mdss->dev->dev);
+ struct platform_device *pdev =
+ to_platform_device(mdp5_mdss->base.dev->dev);
- mdss->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(mdss->ahb_clk))
- mdss->ahb_clk = NULL;
+ mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
+ if (IS_ERR(mdp5_mdss->ahb_clk))
+ mdp5_mdss->ahb_clk = NULL;
- mdss->axi_clk = msm_clk_get(pdev, "bus");
- if (IS_ERR(mdss->axi_clk))
- mdss->axi_clk = NULL;
+ mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
+ if (IS_ERR(mdp5_mdss->axi_clk))
+ mdp5_mdss->axi_clk = NULL;
- mdss->vsync_clk = msm_clk_get(pdev, "vsync");
- if (IS_ERR(mdss->vsync_clk))
- mdss->vsync_clk = NULL;
+ mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
+ if (IS_ERR(mdp5_mdss->vsync_clk))
+ mdp5_mdss->vsync_clk = NULL;
return 0;
}
-void msm_mdss_destroy(struct drm_device *dev)
+static void mdp5_mdss_destroy(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_mdss *mdss = priv->mdss;
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
- if (!mdss)
+ if (!mdp5_mdss)
return;
- irq_domain_remove(mdss->irqcontroller.domain);
- mdss->irqcontroller.domain = NULL;
+ irq_domain_remove(mdp5_mdss->irqcontroller.domain);
+ mdp5_mdss->irqcontroller.domain = NULL;
- regulator_disable(mdss->vdd);
+ regulator_disable(mdp5_mdss->vdd);
pm_runtime_disable(dev->dev);
}
-int msm_mdss_init(struct drm_device *dev)
+static const struct msm_mdss_funcs mdss_funcs = {
+ .enable = mdp5_mdss_enable,
+ .disable = mdp5_mdss_disable,
+ .destroy = mdp5_mdss_destroy,
+};
+
+int mdp5_mdss_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
- struct msm_mdss *mdss;
+ struct mdp5_mdss *mdp5_mdss;
int ret;
DBG("");
@@ -217,40 +224,40 @@ int msm_mdss_init(struct drm_device *dev)
if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
return 0;
- mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
- if (!mdss) {
+ mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
+ if (!mdp5_mdss) {
ret = -ENOMEM;
goto fail;
}
- mdss->dev = dev;
+ mdp5_mdss->base.dev = dev;
- mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
- if (IS_ERR(mdss->mmio)) {
- ret = PTR_ERR(mdss->mmio);
+ mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
+ if (IS_ERR(mdp5_mdss->mmio)) {
+ ret = PTR_ERR(mdp5_mdss->mmio);
goto fail;
}
- mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
- if (IS_ERR(mdss->vbif)) {
- ret = PTR_ERR(mdss->vbif);
+ mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+ if (IS_ERR(mdp5_mdss->vbif)) {
+ ret = PTR_ERR(mdp5_mdss->vbif);
goto fail;
}
- ret = msm_mdss_get_clocks(mdss);
+ ret = msm_mdss_get_clocks(mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to get clocks: %d\n", ret);
goto fail;
}
/* Regulator to enable GDSCs in downstream kernels */
- mdss->vdd = devm_regulator_get(dev->dev, "vdd");
- if (IS_ERR(mdss->vdd)) {
- ret = PTR_ERR(mdss->vdd);
+ mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
+ if (IS_ERR(mdp5_mdss->vdd)) {
+ ret = PTR_ERR(mdp5_mdss->vdd);
goto fail;
}
- ret = regulator_enable(mdss->vdd);
+ ret = regulator_enable(mdp5_mdss->vdd);
if (ret) {
dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
ret);
@@ -258,25 +265,26 @@ int msm_mdss_init(struct drm_device *dev)
}
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
- mdss_irq, 0, "mdss_isr", mdss);
+ mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to init irq: %d\n", ret);
goto fail_irq;
}
- ret = mdss_irq_domain_init(mdss);
+ ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
goto fail_irq;
}
- priv->mdss = mdss;
+ mdp5_mdss->base.funcs = &mdss_funcs;
+ priv->mdss = &mdp5_mdss->base;
pm_runtime_enable(dev->dev);
return 0;
fail_irq:
- regulator_disable(mdss->vdd);
+ regulator_disable(mdp5_mdss->vdd);
fail:
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index e09bc53a0e65..7d306c5acd09 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -46,7 +46,7 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
@@ -512,7 +512,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
if (plane_enabled(new_state)) {
struct mdp5_ctl *ctl;
struct mdp5_pipeline *pipeline =
- mdp5_crtc_get_pipeline(plane->crtc);
+ mdp5_crtc_get_pipeline(new_state->crtc);
int ret;
ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
@@ -1029,8 +1029,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
src_img_w, src_img_h,
src_x + src_w, src_y, src_w, src_h);
- plane->fb = fb;
-
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
index 1494c407be44..d420c8044e23 100644
--- a/drivers/gpu/drm/msm/disp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index b744bcc7d8ad..ff8164cc6738 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -208,6 +208,9 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
+ if (!msm_dsi_manager_validate_current_config(msm_dsi->id))
+ goto fail;
+
msm_dsi->encoder = encoder;
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 70d9a9a47acd..08f3fc6771b7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -100,6 +100,7 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
+bool msm_dsi_manager_validate_current_config(u8 id);
/* msm dsi */
static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
@@ -149,6 +150,7 @@ static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
#endif
/* dsi host */
+struct msm_dsi_host;
int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg);
void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
@@ -162,7 +164,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
- struct msm_dsi_phy_shared_timings *phy_shared_timings);
+ struct msm_dsi_phy_shared_timings *phy_shared_timings,
+ bool is_dual_dsi);
int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode);
@@ -175,13 +178,29 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll);
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
- struct msm_dsi_phy_clk_request *clk_req);
+ struct msm_dsi_phy_clk_request *clk_req,
+ bool is_dual_dsi);
void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev);
int msm_dsi_host_init(struct msm_dsi *msm_dsi);
int msm_dsi_runtime_suspend(struct device *dev);
int msm_dsi_runtime_resume(struct device *dev);
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host);
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size);
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index f6a9471b70c8..21f489a737d7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,8 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-01-12 09:09:22)
-- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 0327bb54b01b..dcdfb1bb54f9 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -136,20 +136,58 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
.num_dsi = 2,
};
+const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_v2,
+ .link_clk_disable = dsi_link_clk_disable_v2,
+ .clk_init_ver = dsi_clk_init_v2,
+ .tx_buf_alloc = dsi_tx_buf_alloc_v2,
+ .tx_buf_get = dsi_tx_buf_get_v2,
+ .tx_buf_put = NULL,
+ .dma_base_get = dsi_dma_base_get_v2,
+ .calc_clk_rate = dsi_calc_clk_rate_v2,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = NULL,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = dsi_clk_init_6g_v2,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
- {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064,
+ &apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
- &msm8974_apq8084_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3,
+ &msm8994_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1,
+ &msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
+ &msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 9cfdcf1c95d5..16c507911110 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -40,10 +40,22 @@ struct msm_dsi_config {
const int num_dsi;
};
+struct msm_dsi_host_cfg_ops {
+ int (*link_clk_enable)(struct msm_dsi_host *msm_host);
+ void (*link_clk_disable)(struct msm_dsi_host *msm_host);
+ int (*clk_init_ver)(struct msm_dsi_host *msm_host);
+ int (*tx_buf_alloc)(struct msm_dsi_host *msm_host, int size);
+ void* (*tx_buf_get)(struct msm_dsi_host *msm_host);
+ void (*tx_buf_put)(struct msm_dsi_host *msm_host);
+ int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+};
+
struct msm_dsi_cfg_handler {
u32 major;
u32 minor;
const struct msm_dsi_config *cfg;
+ const struct msm_dsi_host_cfg_ops *ops;
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 2f1a2780658a..96fb5f635314 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -118,6 +118,7 @@ struct msm_dsi_host {
struct clk *byte_intf_clk;
u32 byte_clk_rate;
+ u32 pixel_clk_rate;
u32 esc_clk_rate;
/* DSI v2 specific clocks */
@@ -332,6 +333,54 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
return 0;
}
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ int ret = 0;
+
+ msm_host->src_clk = msm_clk_get(pdev, "src");
+
+ if (IS_ERR(msm_host->src_clk)) {
+ ret = PTR_ERR(msm_host->src_clk);
+ pr_err("%s: can't find src clock. ret=%d\n",
+ __func__, ret);
+ msm_host->src_clk = NULL;
+ return ret;
+ }
+
+ msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
+ if (!msm_host->esc_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get esc clock parent. ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
+ if (!msm_host->dsi_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get src clock parent. ret=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ int ret = 0;
+
+ msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
+ if (IS_ERR(msm_host->byte_intf_clk)) {
+ ret = PTR_ERR(msm_host->byte_intf_clk);
+ pr_err("%s: can't find byte_intf clock. ret=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
@@ -379,19 +428,6 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G &&
- cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_2_1) {
- msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
- if (IS_ERR(msm_host->byte_intf_clk)) {
- ret = PTR_ERR(msm_host->byte_intf_clk);
- pr_err("%s: can't find byte_intf clock. ret=%d\n",
- __func__, ret);
- goto exit;
- }
- } else {
- msm_host->byte_intf_clk = NULL;
- }
-
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
if (!msm_host->byte_clk_src) {
ret = -ENODEV;
@@ -406,31 +442,8 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
- msm_host->src_clk = msm_clk_get(pdev, "src");
- if (IS_ERR(msm_host->src_clk)) {
- ret = PTR_ERR(msm_host->src_clk);
- pr_err("%s: can't find src clock. ret=%d\n",
- __func__, ret);
- msm_host->src_clk = NULL;
- goto exit;
- }
-
- msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
- if (!msm_host->esc_clk_src) {
- ret = -ENODEV;
- pr_err("%s: can't get esc clock parent. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
- if (!msm_host->dsi_clk_src) {
- ret = -ENODEV;
- pr_err("%s: can't get src clock parent. ret=%d\n",
- __func__, ret);
- }
- }
+ if (cfg_hnd->ops->clk_init_ver)
+ ret = cfg_hnd->ops->clk_init_ver(msm_host);
exit:
return ret;
}
@@ -498,7 +511,7 @@ int msm_dsi_runtime_resume(struct device *dev)
return dsi_bus_clk_enable(msm_host);
}
-static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -511,7 +524,7 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
goto error;
}
- ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
@@ -566,7 +579,7 @@ error:
return ret;
}
-static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
{
int ret;
@@ -592,7 +605,7 @@ static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
goto error;
}
- ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
@@ -634,98 +647,121 @@ error:
return ret;
}
-static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->pixel_clk);
+ if (msm_host->byte_intf_clk)
+ clk_disable_unprepare(msm_host->byte_intf_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
+}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
- return dsi_link_clk_enable_6g(msm_host);
- else
- return dsi_link_clk_enable_v2(msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
+{
+ clk_disable_unprepare(msm_host->pixel_clk);
+ clk_disable_unprepare(msm_host->src_clk);
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
}
-static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
+static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ struct drm_display_mode *mode = msm_host->mode;
+ u32 pclk_rate;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- clk_disable_unprepare(msm_host->esc_clk);
- clk_disable_unprepare(msm_host->pixel_clk);
- if (msm_host->byte_intf_clk)
- clk_disable_unprepare(msm_host->byte_intf_clk);
- clk_disable_unprepare(msm_host->byte_clk);
- } else {
- clk_disable_unprepare(msm_host->pixel_clk);
- clk_disable_unprepare(msm_host->src_clk);
- clk_disable_unprepare(msm_host->esc_clk);
- clk_disable_unprepare(msm_host->byte_clk);
- }
+ pclk_rate = mode->clock * 1000;
+
+ /*
+ * For dual DSI mode, the current DRM mode has the complete width of the
+ * panel. Since, the complete panel is driven by two DSI controllers,
+ * the clock rates have to be split between the two dsi controllers.
+ * Adjust the byte and pixel clock rates for each dsi host accordingly.
+ */
+ if (is_dual_dsi)
+ pclk_rate /= 2;
+
+ return pclk_rate;
}
-static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
+static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
- struct drm_display_mode *mode = msm_host->mode;
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
- u32 pclk_rate;
+ u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
+ u64 pclk_bpp = (u64)pclk_rate * bpp;
- if (!mode) {
- pr_err("%s: mode not set\n", __func__);
- return -EINVAL;
- }
-
- pclk_rate = mode->clock * 1000;
- if (lanes > 0) {
- msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
- } else {
+ if (lanes == 0) {
pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
- msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
+ lanes = 1;
}
- DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
+ do_div(pclk_bpp, (8 * lanes));
- msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+ msm_host->pixel_clk_rate = pclk_rate;
+ msm_host->byte_clk_rate = pclk_bpp;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
- unsigned int esc_mhz, esc_div;
- unsigned long byte_mhz;
+ DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
+ msm_host->byte_clk_rate);
- msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
+}
- /*
- * esc clock is byte clock followed by a 4 bit divider,
- * we need to find an escape clock frequency within the
- * mipi DSI spec range within the maximum divider limit
- * We iterate here between an escape clock frequencey
- * between 20 Mhz to 5 Mhz and pick up the first one
- * that can be supported by our divider
- */
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+ if (!msm_host->mode) {
+ pr_err("%s: mode not set\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_calc_pclk(msm_host, is_dual_dsi);
+ msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+ return 0;
+}
- byte_mhz = msm_host->byte_clk_rate / 1000000;
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+ u32 bpp = dsi_get_bpp(msm_host->format);
+ u64 pclk_bpp;
+ unsigned int esc_mhz, esc_div;
+ unsigned long byte_mhz;
- for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
- esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
+ dsi_calc_pclk(msm_host, is_dual_dsi);
- /*
- * TODO: Ideally, we shouldn't know what sort of divider
- * is available in mmss_cc, we're just assuming that
- * it'll always be a 4 bit divider. Need to come up with
- * a better way here.
- */
- if (esc_div >= 1 && esc_div <= 16)
- break;
- }
+ pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
+ do_div(pclk_bpp, 8);
+ msm_host->src_clk_rate = pclk_bpp;
- if (esc_mhz < 5)
- return -EINVAL;
+ /*
+ * esc clock is byte clock followed by a 4 bit divider,
+ * we need to find an escape clock frequency within the
+ * mipi DSI spec range within the maximum divider limit
+ * We iterate here between an escape clock frequencey
+ * between 20 Mhz to 5 Mhz and pick up the first one
+ * that can be supported by our divider
+ */
+
+ byte_mhz = msm_host->byte_clk_rate / 1000000;
- msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+ for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
+ esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
- DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
- msm_host->src_clk_rate);
+ /*
+ * TODO: Ideally, we shouldn't know what sort of divider
+ * is available in mmss_cc, we're just assuming that
+ * it'll always be a 4 bit divider. Need to come up with
+ * a better way here.
+ */
+ if (esc_div >= 1 && esc_div <= 16)
+ break;
}
+ if (esc_mhz < 5)
+ return -EINVAL;
+
+ msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+
+ DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
+ msm_host->src_clk_rate);
+
return 0;
}
@@ -885,7 +921,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
dsi_write(msm_host, REG_DSI_CTRL, data);
}
-static void dsi_timing_setup(struct msm_dsi_host *msm_host)
+static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
@@ -897,10 +933,26 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
u32 ha_end = ha_start + mode->hdisplay;
u32 va_start = v_total - mode->vsync_start;
u32 va_end = va_start + mode->vdisplay;
+ u32 hdisplay = mode->hdisplay;
u32 wc;
DBG("");
+ /*
+ * For dual DSI mode, the current DRM mode has
+ * the complete width of the panel. Since, the complete
+ * panel is driven by two DSI controllers, the horizontal
+ * timings have to be split between the two dsi controllers.
+ * Adjust the DSI host timing values accordingly.
+ */
+ if (is_dual_dsi) {
+ h_total /= 2;
+ hs_end /= 2;
+ ha_start /= 2;
+ ha_end /= 2;
+ hdisplay /= 2;
+ }
+
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
dsi_write(msm_host, REG_DSI_ACTIVE_H,
DSI_ACTIVE_H_START(ha_start) |
@@ -921,7 +973,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
} else { /* command mode */
/* image data and 1 byte write_memory_start cmd */
- wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+ wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
@@ -931,7 +983,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
MIPI_DSI_DCS_LONG_WRITE));
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
- DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
+ DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(hdisplay) |
DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
}
}
@@ -1015,50 +1067,37 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
}
}
-/* dsi_cmd */
-static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv = dev->dev_private;
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- int ret;
uint64_t iova;
+ u8 *data;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
- if (IS_ERR(msm_host->tx_gem_obj)) {
- ret = PTR_ERR(msm_host->tx_gem_obj);
- pr_err("%s: failed to allocate gem, %d\n",
- __func__, ret);
- msm_host->tx_gem_obj = NULL;
- return ret;
- }
+ data = msm_gem_kernel_new(dev, size, MSM_BO_UNCACHED,
+ priv->kms->aspace,
+ &msm_host->tx_gem_obj, &iova);
- ret = msm_gem_get_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, &iova);
- if (ret) {
- pr_err("%s: failed to get iova, %d\n", __func__, ret);
- return ret;
- }
+ if (IS_ERR(data)) {
+ msm_host->tx_gem_obj = NULL;
+ return PTR_ERR(data);
+ }
- if (iova & 0x07) {
- pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
- return -EINVAL;
- }
+ msm_host->tx_size = msm_host->tx_gem_obj->size;
- msm_host->tx_size = msm_host->tx_gem_obj->size;
- } else {
- msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
+ return 0;
+}
+
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+{
+ struct drm_device *dev = msm_host->dev;
+
+ msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
&msm_host->tx_buf_paddr, GFP_KERNEL);
- if (!msm_host->tx_buf) {
- ret = -ENOMEM;
- pr_err("%s: failed to allocate tx buf, %d\n",
- __func__, ret);
- return ret;
- }
+ if (!msm_host->tx_buf)
+ return -ENOMEM;
- msm_host->tx_size = size;
- }
+ msm_host->tx_size = size;
return 0;
}
@@ -1089,6 +1128,21 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
msm_host->tx_buf_paddr);
}
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
+{
+ return msm_gem_get_vaddr(msm_host->tx_gem_obj);
+}
+
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
+{
+ return msm_host->tx_buf;
+}
+
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
+{
+ msm_gem_put_vaddr(msm_host->tx_gem_obj);
+}
+
/*
* prepare cmd buffer to be txed
*/
@@ -1113,15 +1167,11 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
return -EINVAL;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- pr_err("%s: get vaddr failed, %d\n", __func__, ret);
- return ret;
- }
- } else {
- data = msm_host->tx_buf;
+ data = cfg_hnd->ops->tx_buf_get(msm_host);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+ return ret;
}
/* MSM specific command format in memory */
@@ -1142,8 +1192,8 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
if (packet.size < len)
memset(data + packet.size, 0xff, len - packet.size);
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
- msm_gem_put_vaddr(msm_host->tx_gem_obj);
+ if (cfg_hnd->ops->tx_buf_put)
+ cfg_hnd->ops->tx_buf_put(msm_host);
return len;
}
@@ -1190,24 +1240,38 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
return msg->rx_len;
}
-static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv = dev->dev_private;
+
+ if (!dma_base)
+ return -EINVAL;
+
+ return msm_gem_get_iova(msm_host->tx_gem_obj,
+ priv->kms->aspace, dma_base);
+}
+
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
+{
+ if (!dma_base)
+ return -EINVAL;
+
+ *dma_base = msm_host->tx_buf_paddr;
+ return 0;
+}
+
+static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
uint64_t dma_base;
bool triggered;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- ret = msm_gem_get_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, &dma_base);
- if (ret) {
- pr_err("%s: failed to get iova: %d\n", __func__, ret);
- return ret;
- }
- } else {
- dma_base = msm_host->tx_buf_paddr;
+ ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
+ if (ret) {
+ pr_err("%s: failed to get iova: %d\n", __func__, ret);
+ return ret;
}
reinit_completion(&msm_host->dma_comp);
@@ -1845,6 +1909,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct platform_device *pdev = msm_host->pdev;
int ret;
@@ -1865,7 +1930,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
}
msm_host->dev = dev;
- ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
+ ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
return ret;
@@ -1898,7 +1963,7 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
* output
*/
if (check_defer && msm_host->device_node) {
- if (!of_drm_find_panel(msm_host->device_node))
+ if (IS_ERR(of_drm_find_panel(msm_host->device_node)))
if (!of_drm_find_bridge(msm_host->device_node))
return -EPROBE_DEFER;
}
@@ -1923,6 +1988,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
/* TODO: make sure dsi_cmd_mdp is idle.
* Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
@@ -1935,7 +2001,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
* mdp clock need to be enabled to receive dsi interrupt
*/
pm_runtime_get_sync(&msm_host->pdev->dev);
- dsi_link_clk_enable(msm_host);
+ cfg_hnd->ops->link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
@@ -1956,6 +2022,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
@@ -1965,7 +2032,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
/* TODO: unvote for bus bandwidth */
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
}
@@ -2129,7 +2196,6 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct clk *byte_clk_provider, *pixel_clk_provider;
int ret;
@@ -2155,14 +2221,16 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
+ if (msm_host->dsi_clk_src) {
ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
if (ret) {
pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
__func__, ret);
goto exit;
}
+ }
+ if (msm_host->esc_clk_src) {
ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
if (ret) {
pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
@@ -2189,12 +2257,14 @@ void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
}
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
- struct msm_dsi_phy_clk_request *clk_req)
+ struct msm_dsi_phy_clk_request *clk_req,
+ bool is_dual_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- ret = dsi_calc_clk_rate(msm_host);
+ ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return;
@@ -2256,9 +2326,11 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
}
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
- struct msm_dsi_phy_shared_timings *phy_shared_timings)
+ struct msm_dsi_phy_shared_timings *phy_shared_timings,
+ bool is_dual_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret = 0;
mutex_lock(&msm_host->dev_mutex);
@@ -2277,7 +2349,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
}
pm_runtime_get_sync(&msm_host->pdev->dev);
- ret = dsi_link_clk_enable(msm_host);
+ ret = cfg_hnd->ops->link_clk_enable(msm_host);
if (ret) {
pr_err("%s: failed to enable link clocks. ret=%d\n",
__func__, ret);
@@ -2291,7 +2363,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
goto fail_disable_clk;
}
- dsi_timing_setup(msm_host);
+ dsi_timing_setup(msm_host, is_dual_dsi);
dsi_sw_reset(msm_host);
dsi_ctrl_config(msm_host, true, phy_shared_timings);
@@ -2304,7 +2376,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
return 0;
fail_disable_clk:
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
@@ -2316,6 +2388,7 @@ unlock_ret:
int msm_dsi_host_power_off(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
mutex_lock(&msm_host->dev_mutex);
if (!msm_host->power_on) {
@@ -2330,7 +2403,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
dsi_host_regulator_disable(msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4cb1cb68878b..5224010d90e4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -134,8 +134,9 @@ static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
{
struct msm_dsi_phy_clk_request clk_req;
int ret;
+ bool is_dual_dsi = IS_DUAL_DSI();
- msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req);
+ msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi);
ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req);
msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
@@ -305,102 +306,25 @@ static void dsi_mgr_connector_destroy(struct drm_connector *connector)
kfree(dsi_connector);
}
-static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
-{
- struct drm_display_mode *mode, *m;
-
- /* Only support left-right mode */
- list_for_each_entry_safe(mode, m, &connector->probed_modes, head) {
- mode->clock >>= 1;
- mode->hdisplay >>= 1;
- mode->hsync_start >>= 1;
- mode->hsync_end >>= 1;
- mode->htotal >>= 1;
- drm_mode_set_name(mode);
- }
-}
-
-static int dsi_dual_connector_tile_init(
- struct drm_connector *connector, int id)
-{
- struct drm_display_mode *mode;
- /* Fake topology id */
- char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'};
-
- if (connector->tile_group) {
- DBG("Tile property has been initialized");
- return 0;
- }
-
- /* Use the first mode only for now */
- mode = list_first_entry(&connector->probed_modes,
- struct drm_display_mode,
- head);
- if (!mode)
- return -EINVAL;
-
- connector->tile_group = drm_mode_get_tile_group(
- connector->dev, topo_id);
- if (!connector->tile_group)
- connector->tile_group = drm_mode_create_tile_group(
- connector->dev, topo_id);
- if (!connector->tile_group) {
- pr_err("%s: failed to create tile group\n", __func__);
- return -ENOMEM;
- }
-
- connector->has_tile = true;
- connector->tile_is_single_monitor = true;
-
- /* mode has been fixed */
- connector->tile_h_size = mode->hdisplay;
- connector->tile_v_size = mode->vdisplay;
-
- /* Only support left-right mode */
- connector->num_h_tile = 2;
- connector->num_v_tile = 1;
-
- connector->tile_v_loc = 0;
- connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0;
-
- return 0;
-}
-
static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
{
int id = dsi_mgr_connector_get_id(connector);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
- int ret, num;
+ int num;
if (!panel)
return 0;
- /* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode,
- * panel should not attach to any connector.
- * Only temporarily attach panel to the current connector here,
- * to let panel set mode to this connector.
+ /*
+ * In dual DSI mode, we have one connector that can be
+ * attached to the drm_panel.
*/
drm_panel_attach(panel, connector);
num = drm_panel_get_modes(panel);
- drm_panel_detach(panel);
if (!num)
return 0;
- if (IS_DUAL_DSI()) {
- /* report half resolution to user */
- dsi_dual_connector_fix_modes(connector);
- ret = dsi_dual_connector_tile_init(connector, id);
- if (ret)
- return ret;
- ret = drm_mode_connector_set_tile_property(connector);
- if (ret) {
- pr_err("%s: set tile property failed, %d\n",
- __func__, ret);
- return ret;
- }
- }
-
return num;
}
@@ -454,11 +378,11 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (ret)
goto phy_en_fail;
- /* Do nothing with the host if it is DSI 1 in case of dual DSI */
- if (is_dual_dsi && (DSI_1 == id))
+ /* Do nothing with the host if it is slave-DSI in case of dual DSI */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
- ret = msm_dsi_host_power_on(host, &phy_shared_timings[id]);
+ ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_dual_dsi);
if (ret) {
pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
goto host_on_fail;
@@ -466,7 +390,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_power_on(msm_dsi1->host,
- &phy_shared_timings[DSI_1]);
+ &phy_shared_timings[DSI_1], is_dual_dsi);
if (ret) {
pr_err("%s: power on host1 failed, %d\n",
__func__, ret);
@@ -556,11 +480,11 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
return;
/*
- * Do nothing with the host if it is DSI 1 in case of dual DSI.
+ * Do nothing with the host if it is slave-DSI in case of dual DSI.
* It is safe to call dsi_mgr_phy_disable() here because a single PHY
* won't be diabled until both PHYs request disable.
*/
- if (is_dual_dsi && (DSI_1 == id))
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
goto disable_phy;
if (panel) {
@@ -621,7 +545,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- if (is_dual_dsi && (DSI_1 == id))
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
msm_dsi_host_set_display_mode(host, adjusted_mode);
@@ -684,11 +608,28 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_mode_connector_attach_encoder(connector, msm_dsi->encoder);
+ drm_connector_attach_encoder(connector, msm_dsi->encoder);
return connector;
}
+bool msm_dsi_manager_validate_current_config(u8 id)
+{
+ bool is_dual_dsi = IS_DUAL_DSI();
+
+ /*
+ * For dual DSI, we only have one drm panel. For this
+ * use case, we register only one bridge/connector.
+ * Skip bridge/connector initialisation if it is
+ * slave-DSI for dual DSI configuration.
+ */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) {
+ DBG("Skip bridge registration for slave DSI->id: %d\n", id);
+ return false;
+ }
+ return true;
+}
+
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
{
@@ -751,12 +692,8 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
connector_list = &dev->mode_config.connector_list;
list_for_each_entry(connector, connector_list, head) {
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == encoder->base.id)
- return connector;
- }
+ if (drm_connector_has_possible_encoder(connector, encoder))
+ return connector;
}
return ERR_PTR(-ENODEV);
@@ -836,6 +773,7 @@ void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
struct msm_drm_private *priv;
struct msm_kms *kms;
struct drm_encoder *encoder;
+ bool cmd_mode;
/*
* drm_device pointer is assigned to msm_dsi only in the modeset_init
@@ -850,10 +788,11 @@ void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
priv = dev->dev_private;
kms = priv->kms;
encoder = msm_dsi_get_encoder(msm_dsi);
+ cmd_mode = !(device_flags &
+ MIPI_DSI_MODE_VIDEO);
if (encoder && kms->funcs->set_encoder_mode)
- if (!(device_flags & MIPI_DSI_MODE_VIDEO))
- kms->funcs->set_encoder_mode(kms, encoder, true);
+ kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
}
int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 57cf7fa7f1c4..874265314413 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index c4c37a7df637..4c03f0b7343e 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -798,6 +798,8 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
return ERR_PTR(-ENOMEM);
}
+ spin_lock_init(&pll_10nm->postdiv_lock);
+
pll = &pll_10nm->base;
pll->min_rate = 1000000000UL;
pll->max_rate = 3500000000UL;
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 9d4d1feaefd7..07c48ddb5301 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f150d4a47707..9cb6e6fe9810 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index 6f3fc6b0f0a3..058ff92a0207 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -56,7 +56,7 @@ static int edp_connector_get_modes(struct drm_connector *connector)
if (ret)
return ret;
- drm_mode_connector_update_edid_property(connector, drm_edid);
+ drm_connector_update_edid_property(connector, drm_edid);
if (drm_edid)
ret = drm_add_edid_modes(connector, drm_edid);
@@ -134,7 +134,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_mode_connector_attach_encoder(connector, edp->encoder);
+ drm_connector_attach_encoder(connector, edp->encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index ecebf8b623ab..3eff3ea3b271 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index c0848dfedd50..e9c9a0af508e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -392,7 +392,7 @@ static int msm_hdmi_connector_get_modes(struct drm_connector *connector)
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
ret = drm_add_edid_modes(connector, edid);
@@ -477,7 +477,7 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
return ERR_PTR(ret);
}
- drm_mode_connector_attach_encoder(connector, hdmi->encoder);
+ drm_connector_attach_encoder(connector, hdmi->encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index da646deedf4b..7717d4269662 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f0635c3da7f4..c1f1779c980f 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -71,12 +71,15 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
+ if (kms->funcs->commit) {
+ DRM_DEBUG_ATOMIC("triggering commit\n");
+ kms->funcs->commit(kms, state);
+ }
+
msm_atomic_wait_for_commit_done(dev, state);
kms->funcs->complete_commit(kms, state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
-
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 1ff3fda245d1..f0da0d3c8a80 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -16,26 +16,101 @@
*/
#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_kms.h"
#include "msm_debugfs.h"
-static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
+struct msm_gpu_show_priv {
+ struct msm_gpu_state *state;
+ struct drm_device *dev;
+};
+
+static int msm_gpu_show(struct seq_file *m, void *arg)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ drm_printf(&p, "%s Status:\n", gpu->name);
+ gpu->funcs->show(gpu, show_priv->state, &p);
+
+ mutex_unlock(&show_priv->dev->struct_mutex);
+
+ return 0;
+}
+
+static int msm_gpu_release(struct inode *inode, struct file *file)
{
+ struct seq_file *m = file->private_data;
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ gpu->funcs->gpu_state_put(show_priv->state);
+ mutex_unlock(&show_priv->dev->struct_mutex);
+
+ kfree(show_priv);
+
+ return single_release(inode, file);
+}
+
+static int msm_gpu_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
+ struct msm_gpu_show_priv *show_priv;
+ int ret;
- if (gpu) {
- seq_printf(m, "%s Status:\n", gpu->name);
- pm_runtime_get_sync(&gpu->pdev->dev);
- gpu->funcs->show(gpu, m);
- pm_runtime_put_sync(&gpu->pdev->dev);
+ if (!gpu)
+ return -ENODEV;
+
+ show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
+ if (!show_priv)
+ return -ENOMEM;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ show_priv->state = gpu->funcs->gpu_state_get(gpu);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (IS_ERR(show_priv->state)) {
+ ret = PTR_ERR(show_priv->state);
+ kfree(show_priv);
+ return ret;
}
- return 0;
+ show_priv->dev = dev;
+
+ return single_open(file, msm_gpu_show, show_priv);
}
+static const struct file_operations msm_gpu_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_gpu_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = msm_gpu_release,
+};
+
static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -105,7 +180,6 @@ static int show_locked(struct seq_file *m, void *arg)
}
static struct drm_info_list msm_debugfs_list[] = {
- {"gpu", show_locked, 0, msm_gpu_show},
{"gem", show_locked, 0, msm_gem_show},
{ "mm", show_locked, 0, msm_mm_show },
{ "fb", show_locked, 0, msm_fb_show },
@@ -158,6 +232,9 @@ int msm_debugfs_init(struct drm_minor *minor)
return ret;
}
+ debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
+ dev, &msm_gpu_fops);
+
if (priv->kms->funcs->debugfs_init) {
ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 021a0b6f9a59..c1abad8a8612 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -15,6 +16,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/kthread.h>
+#include <uapi/linux/sched/types.h>
#include <drm/drm_of.h>
#include "msm_drv.h"
@@ -78,6 +81,63 @@ module_param(modeset, bool, 0600);
* Util/helpers:
*/
+int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
+{
+ struct property *prop;
+ const char *name;
+ struct clk_bulk_data *local;
+ int i = 0, ret, count;
+
+ count = of_property_count_strings(dev->of_node, "clock-names");
+ if (count < 1)
+ return 0;
+
+ local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
+ count, GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!local[i].id) {
+ devm_kfree(dev, local);
+ return -ENOMEM;
+ }
+
+ i++;
+ }
+
+ ret = devm_clk_bulk_get(dev, count, local);
+
+ if (ret) {
+ for (i = 0; i < count; i++)
+ devm_kfree(dev, (void *) local[i].id);
+ devm_kfree(dev, local);
+
+ return ret;
+ }
+
+ *bulk = local;
+ return count;
+}
+
+struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
+ const char *name)
+{
+ int i;
+ char n[32];
+
+ snprintf(n, sizeof(n), "%s_clk", name);
+
+ for (i = 0; bulk && i < count; i++) {
+ if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
+ return bulk[i].clk;
+ }
+
+
+ return NULL;
+}
+
struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
{
struct clk *clk;
@@ -149,7 +209,7 @@ struct vblank_event {
bool enable;
};
-static void vblank_ctrl_worker(struct work_struct *work)
+static void vblank_ctrl_worker(struct kthread_work *work)
{
struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
struct msm_vblank_ctrl, work);
@@ -197,7 +257,8 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
- queue_work(priv->wq, &vbl_ctrl->work);
+ kthread_queue_work(&priv->disp_thread[crtc_id].worker,
+ &vbl_ctrl->work);
return 0;
}
@@ -208,19 +269,36 @@ static int msm_drm_uninit(struct device *dev)
struct drm_device *ddev = platform_get_drvdata(pdev);
struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
+ struct msm_mdss *mdss = priv->mdss;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp;
+ int i;
/* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
- cancel_work_sync(&vbl_ctrl->work);
+ kthread_flush_work(&vbl_ctrl->work);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
kfree(vbl_ev);
}
+ /* clean up display commit/event worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ if (priv->disp_thread[i].thread) {
+ kthread_flush_worker(&priv->disp_thread[i].worker);
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ if (priv->event_thread[i].thread) {
+ kthread_flush_worker(&priv->event_thread[i].worker);
+ kthread_stop(priv->event_thread[i].thread);
+ priv->event_thread[i].thread = NULL;
+ }
+ }
+
msm_gem_shrinker_cleanup(ddev);
drm_kms_helper_poll_fini(ddev);
@@ -243,9 +321,6 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
- flush_workqueue(priv->atomic_wq);
- destroy_workqueue(priv->atomic_wq);
-
if (kms && kms->funcs)
kms->funcs->destroy(kms);
@@ -258,7 +333,8 @@ static int msm_drm_uninit(struct device *dev)
component_unbind_all(dev, ddev);
- msm_mdss_destroy(ddev);
+ if (mdss && mdss->funcs)
+ mdss->funcs->destroy(ddev);
ddev->dev_private = NULL;
drm_dev_unref(ddev);
@@ -268,6 +344,10 @@ static int msm_drm_uninit(struct device *dev)
return 0;
}
+#define KMS_MDP4 4
+#define KMS_MDP5 5
+#define KMS_DPU 3
+
static int get_mdp_ver(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -357,7 +437,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
struct drm_device *ddev;
struct msm_drm_private *priv;
struct msm_kms *kms;
- int ret;
+ struct msm_mdss *mdss;
+ int ret, i;
+ struct sched_param param;
ddev = drm_dev_alloc(drv, dev);
if (IS_ERR(ddev)) {
@@ -369,53 +451,61 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- drm_dev_unref(ddev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_unref_drm_dev;
}
ddev->dev_private = priv;
priv->dev = ddev;
- ret = msm_mdss_init(ddev);
- if (ret) {
- kfree(priv);
- drm_dev_unref(ddev);
- return ret;
+ switch (get_mdp_ver(pdev)) {
+ case KMS_MDP5:
+ ret = mdp5_mdss_init(ddev);
+ break;
+ case KMS_DPU:
+ ret = dpu_mdss_init(ddev);
+ break;
+ default:
+ ret = 0;
+ break;
}
+ if (ret)
+ goto err_free_priv;
+
+ mdss = priv->mdss;
priv->wq = alloc_ordered_workqueue("msm", 0);
- priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
- INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+ kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(ddev);
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
- if (ret) {
- msm_mdss_destroy(ddev);
- kfree(priv);
- drm_dev_unref(ddev);
- return ret;
- }
+ if (ret)
+ goto err_destroy_mdss;
ret = msm_init_vram(ddev);
if (ret)
- goto fail;
+ goto err_msm_uninit;
msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {
- case 4:
+ case KMS_MDP4:
kms = mdp4_kms_init(ddev);
priv->kms = kms;
break;
- case 5:
+ case KMS_MDP5:
kms = mdp5_kms_init(ddev);
break;
+ case KMS_DPU:
+ kms = dpu_kms_init(ddev);
+ priv->kms = kms;
+ break;
default:
kms = ERR_PTR(-ENODEV);
break;
@@ -430,24 +520,100 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
*/
dev_err(dev, "failed to load kms\n");
ret = PTR_ERR(kms);
- goto fail;
+ goto err_msm_uninit;
}
+ /* Enable normalization of plane zpos */
+ ddev->mode_config.normalize_zpos = true;
+
if (kms) {
ret = kms->funcs->hw_init(kms);
if (ret) {
dev_err(dev, "kms hw init failed: %d\n", ret);
- goto fail;
+ goto err_msm_uninit;
}
}
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
+ /**
+ * this priority was found during empiric testing to have appropriate
+ * realtime scheduling to process display updates and interact with
+ * other real time and normal priority task
+ */
+ param.sched_priority = 16;
+ for (i = 0; i < priv->num_crtcs; i++) {
+
+ /* initialize display thread */
+ priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ kthread_init_worker(&priv->disp_thread[i].worker);
+ priv->disp_thread[i].dev = ddev;
+ priv->disp_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->disp_thread[i].worker,
+ "crtc_commit:%d", priv->disp_thread[i].crtc_id);
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->disp_thread[i].thread)) {
+ dev_err(dev, "failed to create crtc_commit kthread\n");
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ /* initialize event thread */
+ priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ kthread_init_worker(&priv->event_thread[i].worker);
+ priv->event_thread[i].dev = ddev;
+ priv->event_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->event_thread[i].worker,
+ "crtc_event:%d", priv->event_thread[i].crtc_id);
+ /**
+ * event thread should also run at same priority as disp_thread
+ * because it is handling frame_done events. A lower priority
+ * event thread and higher priority disp_thread can causes
+ * frame_pending counters beyond 2. This can lead to commit
+ * failure at crtc commit level.
+ */
+ ret = sched_setscheduler(priv->event_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display event thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->event_thread[i].thread)) {
+ dev_err(dev, "failed to create crtc_event kthread\n");
+ priv->event_thread[i].thread = NULL;
+ }
+
+ if ((!priv->disp_thread[i].thread) ||
+ !priv->event_thread[i].thread) {
+ /* clean up previously created threads if any */
+ for ( ; i >= 0; i--) {
+ if (priv->disp_thread[i].thread) {
+ kthread_stop(
+ priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ if (priv->event_thread[i].thread) {
+ kthread_stop(
+ priv->event_thread[i].thread);
+ priv->event_thread[i].thread = NULL;
+ }
+ }
+ goto err_msm_uninit;
+ }
+ }
+
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
dev_err(dev, "failed to initialize vblank\n");
- goto fail;
+ goto err_msm_uninit;
}
if (kms) {
@@ -456,13 +622,13 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
pm_runtime_put_sync(dev);
if (ret < 0) {
dev_err(dev, "failed to install IRQ handler\n");
- goto fail;
+ goto err_msm_uninit;
}
}
ret = drm_dev_register(ddev, 0);
if (ret)
- goto fail;
+ goto err_msm_uninit;
drm_mode_config_reset(ddev);
@@ -473,15 +639,23 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ret = msm_debugfs_late_init(ddev);
if (ret)
- goto fail;
+ goto err_msm_uninit;
drm_kms_helper_poll_init(ddev);
return 0;
-fail:
+err_msm_uninit:
msm_drm_uninit(dev);
return ret;
+err_destroy_mdss:
+ if (mdss && mdss->funcs)
+ mdss->funcs->destroy(ddev);
+err_free_priv:
+ kfree(priv);
+err_unref_drm_dev:
+ drm_dev_unref(ddev);
+ return ret;
}
/*
@@ -894,16 +1068,35 @@ static struct drm_driver msm_driver = {
static int msm_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ /* TODO: Use atomic helper suspend/resume */
+ if (kms && kms->funcs && kms->funcs->pm_suspend)
+ return kms->funcs->pm_suspend(dev);
drm_kms_helper_poll_disable(ddev);
+ priv->pm_state = drm_atomic_helper_suspend(ddev);
+ if (IS_ERR(priv->pm_state)) {
+ drm_kms_helper_poll_enable(ddev);
+ return PTR_ERR(priv->pm_state);
+ }
+
return 0;
}
static int msm_pm_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ /* TODO: Use atomic helper suspend/resume */
+ if (kms && kms->funcs && kms->funcs->pm_resume)
+ return kms->funcs->pm_resume(dev);
+ drm_atomic_helper_resume(ddev, priv->pm_state);
drm_kms_helper_poll_enable(ddev);
return 0;
@@ -915,11 +1108,12 @@ static int msm_runtime_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
DBG("");
- if (priv->mdss)
- return msm_mdss_disable(priv->mdss);
+ if (mdss && mdss->funcs)
+ return mdss->funcs->disable(mdss);
return 0;
}
@@ -928,11 +1122,12 @@ static int msm_runtime_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
DBG("");
- if (priv->mdss)
- return msm_mdss_enable(priv->mdss);
+ if (mdss && mdss->funcs)
+ return mdss->funcs->enable(mdss);
return 0;
}
@@ -1031,12 +1226,13 @@ static int add_display_components(struct device *dev,
int ret;
/*
- * MDP5 based devices don't have a flat hierarchy. There is a top level
- * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
- * children devices, find the MDP5 node, and then add the interfaces
- * to our components list.
+ * MDP5/DPU based devices don't have a flat hierarchy. There is a top
+ * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
+ * Populate the children devices, find the MDP5/DPU node, and then add
+ * the interfaces to our components list.
*/
- if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
+ if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
+ of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate children devices\n");
@@ -1146,8 +1342,9 @@ static int msm_pdev_remove(struct platform_device *pdev)
}
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
- { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
+ { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
+ { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
+ { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
@@ -1169,6 +1366,7 @@ static int __init msm_drm_register(void)
DBG("init");
msm_mdp_register();
+ msm_dpu_register();
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
@@ -1185,6 +1383,7 @@ static void __exit msm_drm_unregister(void)
msm_edp_unregister();
msm_dsi_unregister();
msm_mdp_unregister();
+ msm_dpu_unregister();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b2da1fbf81e0..8e510d5c758a 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -33,6 +34,7 @@
#include <linux/of_graph.h>
#include <linux/of_device.h>
#include <asm/sizes.h>
+#include <linux/kthread.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -54,6 +56,12 @@ struct msm_fence_context;
struct msm_gem_address_space;
struct msm_gem_vma;
+#define MAX_CRTCS 8
+#define MAX_PLANES 20
+#define MAX_ENCODERS 8
+#define MAX_BRIDGES 8
+#define MAX_CONNECTORS 8
+
struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;
@@ -68,12 +76,77 @@ enum msm_mdp_plane_property {
};
struct msm_vblank_ctrl {
- struct work_struct work;
+ struct kthread_work work;
struct list_head event_list;
spinlock_t lock;
};
#define MSM_GPU_MAX_RINGS 4
+#define MAX_H_TILES_PER_DISPLAY 2
+
+/**
+ * enum msm_display_caps - features/capabilities supported by displays
+ * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
+ * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
+ * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
+ * @MSM_DISPLAY_CAP_EDID: EDID supported
+ */
+enum msm_display_caps {
+ MSM_DISPLAY_CAP_VID_MODE = BIT(0),
+ MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
+ MSM_DISPLAY_CAP_HOT_PLUG = BIT(2),
+ MSM_DISPLAY_CAP_EDID = BIT(3),
+};
+
+/**
+ * enum msm_event_wait - type of HW events to wait for
+ * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
+ * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
+ * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
+ */
+enum msm_event_wait {
+ MSM_ENC_COMMIT_DONE = 0,
+ MSM_ENC_TX_COMPLETE,
+ MSM_ENC_VBLANK,
+};
+
+/**
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm: number of layer mixers used
+ * @num_enc: number of compression encoder blocks used
+ * @num_intf: number of interfaces the panel is mounted on
+ */
+struct msm_display_topology {
+ u32 num_lm;
+ u32 num_enc;
+ u32 num_intf;
+};
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type: DRM_MODE_CONNECTOR_ display type
+ * @capabilities: Bitmask of display flags
+ * @num_of_h_tiles: Number of horizontal tiles in case of split interface
+ * @h_tile_instance: Controller instance used per tile. Number of elements is
+ * based on num_of_h_tiles
+ * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
+ * used instead of panel TE in cmd mode panels
+ */
+struct msm_display_info {
+ int intf_type;
+ uint32_t capabilities;
+ uint32_t num_of_h_tiles;
+ uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+ bool is_te_using_watchdog_timer;
+};
+
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
+ struct drm_device *dev;
+ struct task_struct *thread;
+ unsigned int crtc_id;
+ struct kthread_worker worker;
+};
struct msm_drm_private {
@@ -84,7 +157,7 @@ struct msm_drm_private {
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
- /* top level MDSS wrapper device (for MDP5 only) */
+ /* top level MDSS wrapper device (for MDP5/DPU only) */
struct msm_mdss *mdss;
/* possibly this should be in the kms component, but it is
@@ -115,22 +188,24 @@ struct msm_drm_private {
struct list_head inactive_list;
struct workqueue_struct *wq;
- struct workqueue_struct *atomic_wq;
unsigned int num_planes;
- struct drm_plane *planes[16];
+ struct drm_plane *planes[MAX_PLANES];
unsigned int num_crtcs;
- struct drm_crtc *crtcs[8];
+ struct drm_crtc *crtcs[MAX_CRTCS];
+
+ struct msm_drm_thread disp_thread[MAX_CRTCS];
+ struct msm_drm_thread event_thread[MAX_CRTCS];
unsigned int num_encoders;
- struct drm_encoder *encoders[8];
+ struct drm_encoder *encoders[MAX_ENCODERS];
unsigned int num_bridges;
- struct drm_bridge *bridges[8];
+ struct drm_bridge *bridges[MAX_BRIDGES];
unsigned int num_connectors;
- struct drm_connector *connectors[8];
+ struct drm_connector *connectors[MAX_CONNECTORS];
/* Properties */
struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
@@ -150,6 +225,7 @@ struct msm_drm_private {
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
+ struct drm_atomic_state *pm_state;
};
struct msm_format {
@@ -174,6 +250,9 @@ struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -184,7 +263,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int msm_gem_fault(struct vm_fault *vmf);
+vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
@@ -285,6 +364,8 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
void __init msm_mdp_register(void);
void __exit msm_mdp_unregister(void);
+void __init msm_dpu_register(void);
+void __exit msm_dpu_unregister(void);
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
@@ -306,6 +387,10 @@ static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
#endif
struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
+int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk);
+
+struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
+ const char *name);
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname);
void msm_writel(u32 data, void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 7a16242bf8bf..2a7348aeb38d 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "msm_drv.h"
#include "msm_kms.h"
@@ -25,49 +26,20 @@
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
- struct drm_gem_object *planes[MAX_PLANE];
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
-static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- return drm_gem_handle_create(file_priv,
- msm_fb->planes[0], handle);
-}
-
-static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = fb->format->num_planes;
-
- DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
- drm_framebuffer_cleanup(fb);
-
- for (i = 0; i < n; i++) {
- struct drm_gem_object *bo = msm_fb->planes[i];
-
- drm_gem_object_put_unlocked(bo);
- }
-
- kfree(msm_fb);
-}
-
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
- .create_handle = msm_framebuffer_create_handle,
- .destroy = msm_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
@@ -77,7 +49,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
for (i = 0; i < n; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
- msm_gem_describe(msm_fb->planes[i], m);
+ msm_gem_describe(fb->obj[i], m);
}
}
#endif
@@ -90,12 +62,11 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+ ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -107,26 +78,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], aspace);
+ msm_gem_put_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- if (!msm_fb->planes[plane])
+ if (!fb->obj[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+ return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- return msm_fb->planes[plane];
+ return drm_gem_fb_get_obj(fb, plane);
}
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
@@ -202,7 +170,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->format = format;
- if (n > ARRAY_SIZE(msm_fb->planes)) {
+ if (n > ARRAY_SIZE(fb->obj)) {
ret = -EINVAL;
goto fail;
}
@@ -221,7 +189,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
goto fail;
}
- msm_fb->planes[i] = bos[i];
+ msm_fb->base.obj[i] = bos[i];
}
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f583bb4222f9..f59ca27a4a35 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -219,7 +219,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
-int msm_gem_fault(struct vm_fault *vmf)
+vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
@@ -227,15 +227,18 @@ int msm_gem_fault(struct vm_fault *vmf)
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
- int ret;
+ int err;
+ vm_fault_t ret;
/*
* vm_ops.open/drm_gem_mmap_obj and close get and put
* a reference on obj. So, we dont need to hold one here.
*/
- ret = mutex_lock_interruptible(&msm_obj->lock);
- if (ret)
+ err = mutex_lock_interruptible(&msm_obj->lock);
+ if (err) {
+ ret = VM_FAULT_NOPAGE;
goto out;
+ }
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
mutex_unlock(&msm_obj->lock);
@@ -245,7 +248,7 @@ int msm_gem_fault(struct vm_fault *vmf)
/* make sure we have pages attached now */
pages = get_pages(obj);
if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
+ ret = vmf_error(PTR_ERR(pages));
goto out_unlock;
}
@@ -257,27 +260,11 @@ int msm_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
-
+ ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock:
mutex_unlock(&msm_obj->lock);
out:
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return ret;
}
/** get mmap offset */
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 1c09acfb4028..5e808cfec345 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -20,10 +20,11 @@
#include "msm_mmu.h"
#include "msm_fence.h"
+#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
#include <linux/pm_opp.h>
#include <linux/devfreq.h>
-
+#include <linux/devcoredump.h>
/*
* Power Management:
@@ -87,7 +88,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
static void msm_devfreq_init(struct msm_gpu *gpu)
{
/* We need target support to do devfreq */
- if (!gpu->funcs->gpu_busy)
+ if (!gpu->funcs->gpu_busy || !gpu->core_clk)
return;
msm_devfreq_profile.initial_freq = gpu->fast_rate;
@@ -141,8 +142,6 @@ static int disable_pwrrail(struct msm_gpu *gpu)
static int enable_clk(struct msm_gpu *gpu)
{
- int i;
-
if (gpu->core_clk && gpu->fast_rate)
clk_set_rate(gpu->core_clk, gpu->fast_rate);
@@ -150,28 +149,12 @@ static int enable_clk(struct msm_gpu *gpu)
if (gpu->rbbmtimer_clk)
clk_set_rate(gpu->rbbmtimer_clk, 19200000);
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_prepare(gpu->grp_clks[i]);
-
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_enable(gpu->grp_clks[i]);
-
- return 0;
+ return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
}
static int disable_clk(struct msm_gpu *gpu)
{
- int i;
-
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_disable(gpu->grp_clks[i]);
-
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_unprepare(gpu->grp_clks[i]);
+ clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
/*
* Set the clock to a deliberately low rate. On older targets the clock
@@ -273,6 +256,123 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
return ret;
}
+#ifdef CONFIG_DEV_COREDUMP
+static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
+ size_t count, void *data, size_t datalen)
+{
+ struct msm_gpu *gpu = data;
+ struct drm_print_iterator iter;
+ struct drm_printer p;
+ struct msm_gpu_state *state;
+
+ state = msm_gpu_crashstate_get(gpu);
+ if (!state)
+ return 0;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ p = drm_coredump_printer(&iter);
+
+ drm_printf(&p, "---\n");
+ drm_printf(&p, "kernel: " UTS_RELEASE "\n");
+ drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+ drm_printf(&p, "time: %lld.%09ld\n",
+ state->time.tv_sec, state->time.tv_nsec);
+ if (state->comm)
+ drm_printf(&p, "comm: %s\n", state->comm);
+ if (state->cmd)
+ drm_printf(&p, "cmdline: %s\n", state->cmd);
+
+ gpu->funcs->show(gpu, state, &p);
+
+ msm_gpu_crashstate_put(gpu);
+
+ return count - iter.remain;
+}
+
+static void msm_gpu_devcoredump_free(void *data)
+{
+ struct msm_gpu *gpu = data;
+
+ msm_gpu_crashstate_put(gpu);
+}
+
+static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
+ struct msm_gem_object *obj, u64 iova, u32 flags)
+{
+ struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
+
+ /* Don't record write only objects */
+
+ state_bo->size = obj->base.size;
+ state_bo->iova = iova;
+
+ /* Only store the data for buffer objects marked for read */
+ if ((flags & MSM_SUBMIT_BO_READ)) {
+ void *ptr;
+
+ state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
+ if (!state_bo->data)
+ return;
+
+ ptr = msm_gem_get_vaddr_active(&obj->base);
+ if (IS_ERR(ptr)) {
+ kvfree(state_bo->data);
+ return;
+ }
+
+ memcpy(state_bo->data, ptr, obj->base.size);
+ msm_gem_put_vaddr(&obj->base);
+ }
+
+ state->nr_bos++;
+}
+
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, char *comm, char *cmd)
+{
+ struct msm_gpu_state *state;
+
+ /* Only save one crash state at a time */
+ if (gpu->crashstate)
+ return;
+
+ state = gpu->funcs->gpu_state_get(gpu);
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ /* Fill in the additional crash state information */
+ state->comm = kstrdup(comm, GFP_KERNEL);
+ state->cmd = kstrdup(cmd, GFP_KERNEL);
+
+ if (submit) {
+ int i;
+
+ state->bos = kcalloc(submit->nr_bos,
+ sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ for (i = 0; state->bos && i < submit->nr_bos; i++)
+ msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
+ submit->bos[i].iova, submit->bos[i].flags);
+ }
+
+ /* Set the active crash state to be dumped on failure */
+ gpu->crashstate = state;
+
+ /* FIXME: Release the crashstate if this errors out? */
+ dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
+ msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
+}
+#else
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm,
+ char *cmd)
+{
+}
+#endif
+
/*
* Hangcheck detection for locked gpu:
*/
@@ -314,6 +414,7 @@ static void recover_worker(struct work_struct *work)
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
+ char *comm = NULL, *cmd = NULL;
int i;
mutex_lock(&dev->struct_mutex);
@@ -327,7 +428,7 @@ static void recover_worker(struct work_struct *work)
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
- char *cmd;
+ comm = kstrdup(task->comm, GFP_ATOMIC);
/*
* So slightly annoying, in other paths like
@@ -340,22 +441,28 @@ static void recover_worker(struct work_struct *work)
* about the submit going away.
*/
mutex_unlock(&dev->struct_mutex);
- cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
mutex_lock(&dev->struct_mutex);
+ }
+ rcu_read_unlock();
+ if (comm && cmd) {
dev_err(dev->dev, "%s: offending task: %s (%s)\n",
- gpu->name, task->comm, cmd);
+ gpu->name, comm, cmd);
msm_rd_dump_submit(priv->hangrd, submit,
- "offending task: %s (%s)", task->comm, cmd);
-
- kfree(cmd);
- } else {
+ "offending task: %s (%s)", comm, cmd);
+ } else
msm_rd_dump_submit(priv->hangrd, submit, NULL);
- }
- rcu_read_unlock();
}
+ /* Record the crash state */
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ kfree(cmd);
+ kfree(comm);
/*
* Update all the rings with the latest and greatest fence.. this
@@ -660,44 +767,22 @@ static irqreturn_t irq_handler(int irq, void *data)
return gpu->funcs->irq(gpu);
}
-static struct clk *get_clock(struct device *dev, const char *name)
-{
- struct clk *clk = devm_clk_get(dev, name);
-
- return IS_ERR(clk) ? NULL : clk;
-}
-
static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
{
- struct device *dev = &pdev->dev;
- struct property *prop;
- const char *name;
- int i = 0;
+ int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks);
- gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
- if (gpu->nr_clocks < 1) {
+ if (ret < 1) {
gpu->nr_clocks = 0;
- return 0;
- }
-
- gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
- GFP_KERNEL);
- if (!gpu->grp_clks) {
- gpu->nr_clocks = 0;
- return -ENOMEM;
+ return ret;
}
- of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
- gpu->grp_clks[i] = get_clock(dev, name);
+ gpu->nr_clocks = ret;
- /* Remember the key clocks that we need to control later */
- if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
- gpu->core_clk = gpu->grp_clks[i];
- else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
- gpu->rbbmtimer_clk = gpu->grp_clks[i];
+ gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
+ gpu->nr_clocks, "core");
- ++i;
- }
+ gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
+ gpu->nr_clocks, "rbbmtimer");
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index b8241179175a..9122ee6e55e4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -27,6 +27,7 @@
struct msm_gem_submit;
struct msm_gpu_perfcntr;
+struct msm_gpu_state;
struct msm_gpu_config {
const char *ioname;
@@ -64,11 +65,14 @@ struct msm_gpu_funcs {
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */
- void (*show)(struct msm_gpu *gpu, struct seq_file *m);
+ void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
/* for generation specific debugfs: */
int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
+ struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
+ int (*gpu_state_put)(struct msm_gpu_state *state);
};
struct msm_gpu {
@@ -108,7 +112,7 @@ struct msm_gpu {
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
- struct clk **grp_clks;
+ struct clk_bulk_data *grp_clks;
int nr_clocks;
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
@@ -129,6 +133,8 @@ struct msm_gpu {
u64 busy_cycles;
ktime_t time;
} devfreq;
+
+ struct msm_gpu_state *crashstate;
};
/* It turns out that all targets use the same ringbuffer size */
@@ -175,6 +181,38 @@ struct msm_gpu_submitqueue {
struct kref ref;
};
+struct msm_gpu_state_bo {
+ u64 iova;
+ size_t size;
+ void *data;
+};
+
+struct msm_gpu_state {
+ struct kref ref;
+ struct timespec64 time;
+
+ struct {
+ u64 iova;
+ u32 fence;
+ u32 seqno;
+ u32 rptr;
+ u32 wptr;
+ void *data;
+ int data_size;
+ } ring[MSM_GPU_MAX_RINGS];
+
+ int nr_registers;
+ u32 *registers;
+
+ u32 rbbm_status;
+
+ char *comm;
+ char *cmd;
+
+ int nr_bos;
+ struct msm_gpu_state_bo *bos;
+};
+
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
msm_writel(data, gpu->mmio + (reg << 2));
@@ -254,4 +292,32 @@ static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
kref_put(&queue->ref, msm_submitqueue_destroy);
}
+static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = NULL;
+
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ if (gpu->crashstate) {
+ kref_get(&gpu->crashstate->ref);
+ state = gpu->crashstate;
+ }
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+
+ return state;
+}
+
+static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
+{
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ if (gpu->crashstate) {
+ if (gpu->funcs->gpu_state_put(gpu->crashstate))
+ gpu->crashstate = NULL;
+ }
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+}
+
#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index dfd92947de2c..fd88cebb6adb 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -42,6 +43,7 @@ struct msm_kms_funcs {
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
/* modeset, bracketing atomic_commit(): */
void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+ void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
/* functions to wait for atomic commit completed on each CRTC */
void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
@@ -50,6 +52,11 @@ struct msm_kms_funcs {
const struct msm_format *(*get_format)(struct msm_kms *kms,
const uint32_t format,
const uint64_t modifiers);
+ /* do format checking on format modified through fb_cmd2 modifiers */
+ int (*check_modified_format)(const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
/* misc: */
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
@@ -60,6 +67,9 @@ struct msm_kms_funcs {
void (*set_encoder_mode)(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode);
+ /* pm suspend/resume hooks */
+ int (*pm_suspend)(struct device *dev);
+ int (*pm_resume)(struct device *dev);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
#ifdef CONFIG_DEBUG_FS
@@ -86,9 +96,20 @@ static inline void msm_kms_init(struct msm_kms *kms,
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
-int msm_mdss_init(struct drm_device *dev);
-void msm_mdss_destroy(struct drm_device *dev);
-int msm_mdss_enable(struct msm_mdss *mdss);
-int msm_mdss_disable(struct msm_mdss *mdss);
+struct msm_kms *dpu_kms_init(struct drm_device *dev);
+
+struct msm_mdss_funcs {
+ int (*enable)(struct msm_mdss *mdss);
+ int (*disable)(struct msm_mdss *mdss);
+ void (*destroy)(struct drm_device *dev);
+};
+
+struct msm_mdss {
+ struct drm_device *dev;
+ const struct msm_mdss_funcs *funcs;
+};
+
+int mdp5_mdss_init(struct drm_device *dev);
+int dpu_mdss_init(struct drm_device *dev);
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6aa6ee16dcbd..2c569e264df3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1017,7 +1017,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
nv_crtc->cursor.show(nv_crtc, true);
out:
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 4feab0a5419d..e7af95d37ddb 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -556,6 +556,6 @@ nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry)
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 9805d2cdc1a1..73d41abbb510 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -716,6 +716,6 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry)
entry->location != DCB_LOC_ON_CHIP)
nv04_tmds_slave_init(encoder);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 501d2d290e9c..70dce544984e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev)
nouveau_display(dev)->init = nv04_display_init;
nouveau_display(dev)->fini = nv04_display_fini;
+ /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
+ dev->driver->driver_features &= ~DRIVER_ATOMIC;
+
nouveau_hw_save_vga_fonts(dev, 1);
nv04_crtc_create(dev, 0);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 01664357d3e1..de4490b4ed30 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -244,7 +244,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
/* Attach it to the specified connector. */
get_slave_funcs(encoder)->create_resources(encoder, connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 6d99f11fee4e..6a4ca139cf5d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -821,6 +821,6 @@ nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
encoder->possible_clones = 0;
nv17_tv_create_resources(encoder, connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index b83465ae7c1b..8412119bd940 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -136,12 +136,24 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
{
struct nouveau_cli *cli = (void *)device->object.client;
struct nv50_disp_core_channel_dma_v0 *args = data;
+ u8 type = NVIF_MEM_COHERENT;
int ret;
mutex_init(&dmac->lock);
- ret = nvif_mem_init_map(&cli->mmu, NVIF_MEM_COHERENT, 0x1000,
- &dmac->push);
+ /* Pascal added support for 47-bit physical addresses, but some
+ * parts of EVO still only accept 40-bit PAs.
+ *
+ * To avoid issues on systems with large amounts of RAM, and on
+ * systems where an IOMMU maps pages at a high address, we need
+ * to allocate push buffers in VRAM instead.
+ *
+ * This appears to match NVIDIA's behaviour on Pascal.
+ */
+ if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
+ type |= NVIF_MEM_VRAM;
+
+ ret = nvif_mem_init_map(&cli->mmu, type, 0x1000, &dmac->push);
if (ret)
return ret;
@@ -216,6 +228,19 @@ void
evo_kick(u32 *push, struct nv50_dmac *evoc)
{
struct nv50_dmac *dmac = evoc;
+
+ /* Push buffer fetches are not coherent with BAR1, we need to ensure
+ * writes have been flushed right through to VRAM before writing PUT.
+ */
+ if (dmac->push.type & NVIF_MEM_VRAM) {
+ struct nvif_device *device = dmac->base.device;
+ nvif_wr32(&device->object, 0x070000, 0x00000001);
+ nvif_msec(device, 2000,
+ if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+ break;
+ );
+ }
+
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
}
@@ -424,7 +449,7 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
"dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_dac_help);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -850,7 +875,7 @@ nv50_mstc_get_modes(struct drm_connector *connector)
int ret = 0;
mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
- drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
+ drm_connector_update_edid_property(&mstc->connector, mstc->edid);
if (mstc->edid)
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
@@ -927,11 +952,11 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
nouveau_conn_attach_properties(&mstc->connector);
for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
- drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
+ drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
- drm_mode_connector_set_path_property(&mstc->connector, path);
+ drm_connector_set_path_property(&mstc->connector, path);
return 0;
}
@@ -1007,7 +1032,7 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
mstc->port = NULL;
drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
- drm_connector_unreference(&mstc->connector);
+ drm_connector_put(&mstc->connector);
}
static void
@@ -1418,7 +1443,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
"sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_sor_help);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nv50_disp *disp = nv50_disp(encoder->dev);
@@ -1576,7 +1601,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
"pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_pior_help);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -1585,8 +1610,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
*****************************************************************************/
static void
-nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
+nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
{
+ struct nouveau_drm *drm = nouveau_drm(state->dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_core *core = disp->core;
struct nv50_mstm *mstm;
@@ -1618,6 +1644,22 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
}
static void
+nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_plane *plane;
+ int i;
+
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (interlock[wndw->interlock.type] & wndw->interlock.data) {
+ if (wndw->func->update)
+ wndw->func->update(wndw, interlock);
+ }
+ }
+}
+
+static void
nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -1684,7 +1726,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
help->disable(encoder);
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
if (outp->flush_disable) {
- nv50_disp_atomic_commit_core(drm, interlock);
+ nv50_disp_atomic_commit_wndw(state, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
memset(interlock, 0x00, sizeof(interlock));
}
}
@@ -1693,15 +1736,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
/* Flush disable. */
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (atom->flush_disable) {
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- struct nv50_wndw *wndw = nv50_wndw(plane);
- if (interlock[wndw->interlock.type] & wndw->interlock.data) {
- if (wndw->func->update)
- wndw->func->update(wndw, interlock);
- }
- }
-
- nv50_disp_atomic_commit_core(drm, interlock);
+ nv50_disp_atomic_commit_wndw(state, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
memset(interlock, 0x00, sizeof(interlock));
}
}
@@ -1762,18 +1798,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
/* Flush update. */
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- struct nv50_wndw *wndw = nv50_wndw(plane);
- if (interlock[wndw->interlock.type] & wndw->interlock.data) {
- if (wndw->func->update)
- wndw->func->update(wndw, interlock);
- }
- }
+ nv50_disp_atomic_commit_wndw(state, interlock);
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (interlock[NV50_DISP_INTERLOCK_BASE] ||
+ interlock[NV50_DISP_INTERLOCK_OVLY] ||
+ interlock[NV50_DISP_INTERLOCK_WNDW] ||
!atom->state.legacy_cursor_update)
- nv50_disp_atomic_commit_core(drm, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
else
disp->core->func->update(disp->core, interlock, false);
}
@@ -1871,7 +1903,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
nv50_disp_atomic_commit_tail(state);
drm_for_each_crtc(crtc, dev) {
- if (crtc->state->enable) {
+ if (crtc->state->active) {
if (!drm->have_disp_power_ref) {
drm->have_disp_power_ref = true;
return 0;
@@ -2119,10 +2151,6 @@ nv50_display_destroy(struct drm_device *dev)
kfree(disp);
}
-MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
-static int nouveau_atomic = 0;
-module_param_named(atomic, nouveau_atomic, int, 0400);
-
int
nv50_display_create(struct drm_device *dev)
{
@@ -2147,8 +2175,6 @@ nv50_display_create(struct drm_device *dev)
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
- if (nouveau_atomic)
- dev->driver->driver_features |= DRIVER_ATOMIC;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2231,6 +2257,9 @@ nv50_display_create(struct drm_device *dev)
connector->funcs->destroy(connector);
}
+ /* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
+ dev->vblank_disable_immediate = true;
+
out:
if (ret)
nv50_display_destroy(dev);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index c5a9bc1af5af..2187922e8dc2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -586,7 +586,6 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
wndw->id = index;
wndw->interlock.type = interlock_type;
wndw->interlock.data = interlock_data;
- wndw->ctxdma.parent = &wndw->wndw.base.user;
wndw->ctxdma.parent = &wndw->wndw.base.user;
INIT_LIST_HEAD(&wndw->ctxdma.list);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 20754d9e6883..8407651f6ac6 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -78,7 +78,7 @@ struct nvif_mclass {
#define nvif_mclass(o,m) ({ \
struct nvif_object *object = (o); \
struct nvif_sclass *sclass; \
- const typeof(m[0]) *mclass = (m); \
+ typeof(m[0]) *mclass = (m); \
int ret = -ENODEV; \
int cnt, i, j; \
\
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index e2211bb2cf79..e67a471331b5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -139,7 +139,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
if (chan->ntfy) {
nouveau_vma_del(&chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy);
- drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
+ drm_gem_object_put_unlocked(&chan->ntfy->gem);
}
if (chan->heap.block_size)
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index debbbf0fd4bd..408b955e5c39 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
INIT_LIST_HEAD(&drm->bl_connectors);
@@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev)
return 0;
}
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
connector->connector_type != DRM_MODE_CONNECTOR_eDP)
continue;
@@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev)
break;
}
}
-
+ drm_connector_list_iter_end(&conn_iter);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7b557c354307..51932c72334e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -363,19 +363,11 @@ module_param_named(hdmimhz, nouveau_hdmimhz, int, 0400);
struct nouveau_encoder *
find_encoder(struct drm_connector *connector, int type)
{
- struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *enc;
- int i, id;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- id = connector->encoder_ids[i];
- if (!id)
- break;
+ int i;
- enc = drm_encoder_find(dev, NULL, id);
- if (!enc)
- continue;
+ drm_connector_for_each_possible_encoder(connector, enc, i) {
nv_encoder = nouveau_encoder(enc);
if (type == DCB_OUTPUT_ANY ||
@@ -420,7 +412,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
- struct nouveau_encoder *nv_encoder;
+ struct nouveau_encoder *nv_encoder = NULL;
struct drm_encoder *encoder;
int i, panel = -ENODEV;
@@ -436,14 +428,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
}
}
- for (i = 0; nv_encoder = NULL, i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- int id = connector->encoder_ids[i];
- if (id == 0)
- break;
-
- encoder = drm_encoder_find(dev, NULL, id);
- if (!encoder)
- continue;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
nv_encoder = nouveau_encoder(encoder);
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
@@ -565,7 +550,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
kfree(nv_connector->edid);
nv_connector->edid = NULL;
}
@@ -590,7 +575,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
else
nv_connector->edid = drm_get_edid(connector, i2c);
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
nv_connector->edid);
if (!nv_connector->edid) {
NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
@@ -672,7 +657,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
kfree(nv_connector->edid);
nv_connector->edid = NULL;
}
@@ -736,7 +721,7 @@ out:
status = connector_status_unknown;
#endif
- drm_mode_connector_update_edid_property(connector, nv_connector->edid);
+ drm_connector_update_edid_property(connector, nv_connector->edid);
nouveau_connector_set_encoder(connector, nv_encoder);
return status;
}
@@ -1208,14 +1193,19 @@ nouveau_connector_create(struct drm_device *dev, int index)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_connector *nv_connector = NULL;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int type, ret = 0;
bool dummy;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
nv_connector = nouveau_connector(connector);
- if (nv_connector->index == index)
+ if (nv_connector->index == index) {
+ drm_connector_list_iter_end(&conn_iter);
return connector;
+ }
}
+ drm_connector_list_iter_end(&conn_iter);
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index a4d1a059bd3d..dc7454e7f19a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -33,6 +33,7 @@
#include <drm/drm_encoder.h>
#include <drm/drm_dp_helper.h>
#include "nouveau_crtc.h"
+#include "nouveau_encoder.h"
struct nvkm_i2c_port;
@@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
+static inline bool
+nouveau_connector_is_mst(struct drm_connector *connector)
+{
+ const struct nouveau_encoder *nv_encoder;
+ const struct drm_encoder *encoder;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return false;
+
+ nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY);
+ if (!nv_encoder)
+ return false;
+
+ encoder = &nv_encoder->base.base;
+ return encoder->encoder_type == DRM_MODE_ENCODER_DPMST;
+}
+
+#define nouveau_for_each_non_mst_connector_iter(connector, iter) \
+ drm_for_each_connector_iter(connector, iter) \
+ for_each_if(!nouveau_connector_is_mst(connector))
+
static inline struct nouveau_connector *
nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
{
struct drm_device *dev = nv_crtc->base.dev;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct nouveau_connector *nv_connector = NULL;
struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder && connector->encoder->crtc == crtc)
- return nouveau_connector(connector);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+ if (connector->encoder && connector->encoder->crtc == crtc) {
+ nv_connector = nouveau_connector(connector);
+ break;
+ }
}
+ drm_connector_list_iter_end(&conn_iter);
- return NULL;
+ return nv_connector;
}
struct drm_connector *
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 963a4dba8213..9109b69cd052 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -160,7 +160,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
args.ustate = value;
}
+ ret = pm_runtime_get_sync(drm->dev);
+ if (IS_ERR_VALUE(ret) && ret != -EACCES)
+ return ret;
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
+ pm_runtime_put_autosuspend(drm->dev);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 774b429142bc..139368b31916 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -205,7 +205,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
if (fb->nvbo)
- drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
+ drm_gem_object_put_unlocked(&fb->nvbo->gem);
drm_framebuffer_cleanup(drm_fb);
kfree(fb);
@@ -287,7 +287,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
if (ret == 0)
return &fb->base;
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ERR_PTR(ret);
}
@@ -404,6 +404,7 @@ nouveau_display_init(struct drm_device *dev)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int ret;
ret = disp->init(dev);
@@ -411,10 +412,12 @@ nouveau_display_init(struct drm_device *dev)
return ret;
/* enable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *conn = nouveau_connector(connector);
nvif_notify_get(&conn->hpd);
}
+ drm_connector_list_iter_end(&conn_iter);
/* enable flip completion events */
nvif_notify_get(&drm->flip);
@@ -427,6 +430,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
if (!suspend) {
if (drm_drv_uses_atomic_modeset(dev))
@@ -439,10 +443,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
nvif_notify_put(&drm->flip);
/* disable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *conn = nouveau_connector(connector);
nvif_notify_put(&conn->hpd);
}
+ drm_connector_list_iter_end(&conn_iter);
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
@@ -939,7 +945,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
return ret;
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
- drm_gem_object_unreference_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(&bo->gem);
return ret;
}
@@ -954,7 +960,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 775443c9af94..c7ec86d6c3c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -81,6 +81,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
int nouveau_modeset = -1;
module_param_named(modeset, nouveau_modeset, int, 0400);
+MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
+static int nouveau_atomic = 0;
+module_param_named(atomic, nouveau_atomic, int, 0400);
+
MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
static int nouveau_runtime_pm = -1;
module_param_named(runpm, nouveau_runtime_pm, int, 0400);
@@ -509,6 +513,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
pci_set_master(pdev);
+ if (nouveau_atomic)
+ driver_pci.driver_features |= DRIVER_ATOMIC;
+
ret = drm_get_pci_dev(pdev, pent, &driver_pci);
if (ret) {
nvkm_device_del(&device);
@@ -874,22 +881,11 @@ nouveau_pmops_runtime_resume(struct device *dev)
static int
nouveau_pmops_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct drm_crtc *crtc;
-
if (!nouveau_pmops_runtime()) {
pm_runtime_forbid(dev);
return -EBUSY;
}
- list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
- if (crtc->enabled) {
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
- return -EBUSY;
- }
- }
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
@@ -912,8 +908,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
get_task_comm(tmpname, current);
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
- if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL)))
- return ret;
+ if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
ret = nouveau_cli_init(drm, name, cli);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 85c1f10bc2b6..844498c4267c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
nouveau_vma_del(&nouveau_fb->vma);
nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);
- drm_framebuffer_unreference(&nouveau_fb->base);
+ drm_framebuffer_put(&nouveau_fb->base);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 300daee74209..b56524d343c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -274,7 +274,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(&nvbo->gem);
+ drm_gem_object_put_unlocked(&nvbo->gem);
return ret;
}
@@ -354,7 +354,7 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
- drm_gem_object_unreference_unlocked(&nvbo->gem);
+ drm_gem_object_put_unlocked(&nvbo->gem);
}
}
@@ -400,14 +400,14 @@ retry:
nvbo = nouveau_gem_object(gem);
if (nvbo == res_bo) {
res_bo = NULL;
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
continue;
}
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
NV_PRINTK(err, cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
ret = -EINVAL;
break;
}
@@ -616,7 +616,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct nouveau_bo *nvbo;
uint32_t data;
- if (unlikely(r->bo_index > req->nr_buffers)) {
+ if (unlikely(r->bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
@@ -626,7 +626,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
if (b->presumed.valid)
continue;
- if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+ if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
@@ -894,7 +894,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
ret = lret;
nouveau_bo_sync_for_cpu(nvbo);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ret;
}
@@ -913,7 +913,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
nvbo = nouveau_gem_object(gem);
nouveau_bo_sync_for_device(nvbo);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
@@ -930,7 +930,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
return -ENOENT;
ret = nouveau_gem_info(file_priv, gem, req);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 44178b4c3599..08a1ab6b150d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -69,8 +69,8 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
- if (kstrtol(buf, 10, &value) == -EINVAL)
- return count;
+ if (kstrtol(buf, 10, &value))
+ return -EINVAL;
therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST,
value / 1000);
@@ -102,8 +102,8 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
- if (kstrtol(buf, 10, &value) == -EINVAL)
- return count;
+ if (kstrtol(buf, 10, &value))
+ return -EINVAL;
therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST,
value / 1000);
@@ -156,7 +156,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
long value;
int ret;
- if (kstrtol(buf, 10, &value) == -EINVAL)
+ if (kstrtol(buf, 10, &value))
return -EINVAL;
ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY, value);
@@ -179,7 +179,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
long value;
int ret;
- if (kstrtol(buf, 10, &value) == -EINVAL)
+ if (kstrtol(buf, 10, &value))
return -EINVAL;
ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY, value);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 1ada186fab77..039e23548e08 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -36,7 +36,7 @@ static int nouveau_platform_probe(struct platform_device *pdev)
ret = drm_dev_register(drm, 0);
if (ret < 0) {
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 8c093ca4222e..8edb9f2a4269 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
- * All Rights Reserved.
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
- * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index d0322ce85172..1a47c40e171b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -87,11 +87,12 @@ nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
{
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->info) {
- if ((engine = nvkm_engine_ref(engine))) {
+ if (!IS_ERR((engine = nvkm_engine_ref(engine)))) {
int ret = engine->func->info(engine, mthd, data);
nvkm_engine_unref(&engine);
return ret;
}
+ return PTR_ERR(engine);
}
return -ENOSYS;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 78597da6313a..0e372a190d3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -23,6 +23,10 @@
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
#include "priv.h"
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
static int
nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
{
@@ -105,6 +109,15 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
unsigned long pgsize_bitmap;
int ret;
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+ }
+#endif
+
if (!tdev->func->iommu_bit)
return;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
index 29e6dd58ac48..525f95d06429 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
@@ -52,7 +52,7 @@ void
gf119_disp_chan_intr(struct nv50_disp_chan *chan, bool en)
{
struct nvkm_device *device = chan->disp->base.engine.subdev.device;
- const u64 mask = 0x00000001 << chan->chid.user;
+ const u32 mask = 0x00000001 << chan->chid.user;
if (!en) {
nvkm_mask(device, 0x610090, mask, 0x00000000);
nvkm_mask(device, 0x6100a0, mask, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 57719f675eec..bcf32d92ee5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -166,8 +166,8 @@ void
nv50_disp_chan_intr(struct nv50_disp_chan *chan, bool en)
{
struct nvkm_device *device = chan->disp->base.engine.subdev.device;
- const u64 mask = 0x00010001 << chan->chid.user;
- const u64 data = en ? 0x00010000 : 0x00000000;
+ const u32 mask = 0x00010001 << chan->chid.user;
+ const u32 data = en ? 0x00010000 << chan->chid.user : 0x00000000;
nvkm_mask(device, 0x610028, mask, data);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
index 19173ea19096..3b3327789ae7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -25,24 +25,31 @@
#include <nvif/class.h>
static void
-gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
+gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm)
{
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730));
- u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734));
+ u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730 + (sm * 0x80)));
+ u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734 + (sm * 0x80)));
const struct nvkm_enum *warp;
char glob[128];
nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
- nvkm_error(subdev, "GPC%i/TPC%i/MP trap: "
+ nvkm_error(subdev, "GPC%i/TPC%i/SM%d trap: "
"global %08x [%s] warp %04x [%s]\n",
- gpc, tpc, gerr, glob, werr, warp ? warp->name : "");
+ gpc, tpc, sm, gerr, glob, werr, warp ? warp->name : "");
+
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730 + sm * 0x80), 0x00000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr);
+}
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730), 0x00000000);
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734), gerr);
+static void
+gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
+{
+ gv100_gr_trap_sm(gr, gpc, tpc, 0);
+ gv100_gr_trap_sm(gr, gpc, tpc, 1);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
index 20b6fc8243e0..71524548de32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
@@ -58,8 +58,14 @@ nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h)
h->ecount = nvbios_rd08(b, h->offset + 0x5);
h->base_id = nvbios_rd08(b, h->offset + 0x0f);
- h->boost_id = nvbios_rd08(b, h->offset + 0x10);
- h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
+ if (h->hlen > 0x10)
+ h->boost_id = nvbios_rd08(b, h->offset + 0x10);
+ else
+ h->boost_id = 0xff;
+ if (h->hlen > 0x11)
+ h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
+ else
+ h->tdp_id = 0xff;
return 0;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index 007bf4af33b9..16ad91c91a7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -133,8 +133,14 @@ nvkm_fault_oneinit(struct nvkm_subdev *subdev)
}
}
- return nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
- &fault->event);
+ ret = nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
+ &fault->event);
+ if (ret)
+ return ret;
+
+ if (fault->func->oneinit)
+ ret = fault->func->oneinit(fault);
+ return ret;
}
static void *
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 73c7728b5969..3cd610d7deb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -176,8 +176,17 @@ gv100_fault_init(struct nvkm_fault *fault)
nvkm_notify_get(&fault->nrpfb);
}
+static int
+gv100_fault_oneinit(struct nvkm_fault *fault)
+{
+ return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
+ gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
+ &fault->nrpfb);
+}
+
static const struct nvkm_fault_func
gv100_fault = {
+ .oneinit = gv100_fault_oneinit,
.init = gv100_fault_init,
.fini = gv100_fault_fini,
.intr = gv100_fault_intr,
@@ -192,15 +201,5 @@ int
gv100_fault_new(struct nvkm_device *device, int index,
struct nvkm_fault **pfault)
{
- struct nvkm_fault *fault;
- int ret;
-
- ret = nvkm_fault_new_(&gv100_fault, device, index, &fault);
- *pfault = fault;
- if (ret)
- return ret;
-
- return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
- gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
- &fault->nrpfb);
+ return nvkm_fault_new_(&gv100_fault, device, index, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index 44843ecf12b0..e4d2f5234fd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -20,6 +20,7 @@ int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
int index, struct nvkm_fault **);
struct nvkm_fault_func {
+ int (*oneinit)(struct nvkm_fault *);
void (*init)(struct nvkm_fault *);
void (*fini)(struct nvkm_fault *);
void (*intr)(struct nvkm_fault *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 73b5d46104bd..434d2fc5bb1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -140,6 +140,9 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
if (fb->func->init)
fb->func->init(fb);
+ if (fb->func->init_remapper)
+ fb->func->init_remapper(fb);
+
if (fb->func->init_page) {
ret = fb->func->init_page(fb);
if (WARN_ON(ret))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index dffe1f5e1071..8205ce436b3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -37,6 +37,14 @@ gp100_fb_init_unkn(struct nvkm_fb *base)
}
void
+gp100_fb_init_remapper(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ /* Disable address remapper. */
+ nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000);
+}
+
+void
gp100_fb_init(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
@@ -56,6 +64,7 @@ gp100_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gp100_fb_init,
+ .init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.ram_new = gp100_ram_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index b84b9861ef26..b4d74e815674 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -31,6 +31,7 @@ gp102_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gp100_fb_init,
+ .init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
.ram_new = gp100_ram_new,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 2857f31466bf..1e4ad61c19e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -11,6 +11,7 @@ struct nvkm_fb_func {
u32 (*tags)(struct nvkm_fb *);
int (*oneinit)(struct nvkm_fb *);
void (*init)(struct nvkm_fb *);
+ void (*init_remapper)(struct nvkm_fb *);
int (*init_page)(struct nvkm_fb *);
void (*init_unkn)(struct nvkm_fb *);
void (*intr)(struct nvkm_fb *);
@@ -69,5 +70,6 @@ int gf100_fb_init_page(struct nvkm_fb *);
int gm200_fb_init_page(struct nvkm_fb *);
+void gp100_fb_init_remapper(struct nvkm_fb *);
void gp100_fb_init_unkn(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index a721354249ce..d02e183717dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -414,6 +414,20 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
{
struct ls_ucode_img *_img;
u32 pos = 0;
+ u32 max_desc_size = 0;
+ u8 *gdesc;
+
+ /* Figure out how large we need gdesc to be. */
+ list_for_each_entry(_img, imgs, node) {
+ const struct acr_r352_ls_func *ls_func =
+ acr->func->ls_func[_img->falcon_id];
+
+ max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
+ }
+
+ gdesc = kmalloc(max_desc_size, GFP_KERNEL);
+ if (!gdesc)
+ return -ENOMEM;
nvkm_kmap(wpr_blob);
@@ -421,7 +435,6 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
const struct acr_r352_ls_func *ls_func =
acr->func->ls_func[_img->falcon_id];
- u8 gdesc[ls_func->bl_desc_size];
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
@@ -447,6 +460,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
nvkm_done(wpr_blob);
+ kfree(gdesc);
+
return 0;
}
@@ -771,7 +786,11 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
struct fw_bl_desc *hsbl_desc;
void *bl, *blob_data, *hsbl_code, *hsbl_data;
u32 code_size;
- u8 bl_desc[bl_desc_size];
+ u8 *bl_desc;
+
+ bl_desc = kzalloc(bl_desc_size, GFP_KERNEL);
+ if (!bl_desc)
+ return -ENOMEM;
/* Find the bootloader descriptor for our blob and copy it */
if (blob == acr->load_blob) {
@@ -802,7 +821,6 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
code_size, hsbl_desc->start_tag, 0, false);
/* Generate the BL header */
- memset(bl_desc, 0, bl_desc_size);
acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
/*
@@ -811,6 +829,7 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
bl_desc_size, 0);
+ kfree(bl_desc);
return hsbl_desc->start_tag << 8;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
index 866877b88797..978ad0790367 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
@@ -265,6 +265,19 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
{
struct ls_ucode_img *_img;
u32 pos = 0;
+ u32 max_desc_size = 0;
+ u8 *gdesc;
+
+ list_for_each_entry(_img, imgs, node) {
+ const struct acr_r352_ls_func *ls_func =
+ acr->func->ls_func[_img->falcon_id];
+
+ max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
+ }
+
+ gdesc = kmalloc(max_desc_size, GFP_KERNEL);
+ if (!gdesc)
+ return -ENOMEM;
nvkm_kmap(wpr_blob);
@@ -272,7 +285,6 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
const struct acr_r352_ls_func *ls_func =
acr->func->ls_func[_img->falcon_id];
- u8 gdesc[ls_func->bl_desc_size];
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
@@ -298,6 +310,8 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
nvkm_done(wpr_blob);
+ kfree(gdesc);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
index 30491d132d59..df8b919dcf09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -129,6 +129,7 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
return 0;
}
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
@@ -144,3 +145,4 @@ MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
index 632e9545e292..28ca29d0eeee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
@@ -74,6 +74,7 @@ gp10b_secboot_new(struct nvkm_device *device, int index,
return 0;
}
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
@@ -91,3 +92,4 @@ MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
+#endif
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 92fe125ce22e..f34c06bb5bd7 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -4,7 +4,7 @@
* Copyright (C) 2010 Nokia Corporation
*
* Original Driver Author: Imre Deak <imre.deak@nokia.com>
- * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@ti.com>
* Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index b5d8a00df811..a1f1dc18407a 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -2,7 +2,7 @@
* Toppoly TD028TTEC1 panel support
*
* Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Neo 1973 code (jbt6k74.c):
* Copyright (C) 2006-2007 by OpenMoko, Inc.
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index acef7ece5783..07d00a186f15 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
@@ -82,7 +82,7 @@ static void __exit omap_dss_exit(void)
module_init(omap_dss_init);
module_exit(omap_dss_exit);
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 7f3ac6b13b56..84f274c4a4cb 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 424143128cd4..9e7fcbd57e52 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 3d662e6805eb..9fcc50217133 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index d4a680629825..74467b308721 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 0b908e9de792..cb80ddaa19d2 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 847c78ade024..38302631b64b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
@@ -180,6 +180,9 @@ struct dss_pll_hw {
/* DRA7 errata i886: use high N & M to avoid jitter */
bool errata_i886;
+
+ /* DRA7 errata i932: retry pll lock on failure */
+ bool errata_i932;
};
struct dss_pll {
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 078b0e8216c3..ff362b38bf0d 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -16,6 +16,7 @@
#define DSS_SUBSYS_NAME "PLL"
+#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -381,6 +382,22 @@ static int dss_wait_hsdiv_ack(struct dss_pll *pll, u32 hsdiv_ack_mask)
return -ETIMEDOUT;
}
+static bool pll_is_locked(u32 stat)
+{
+ /*
+ * Required value for each bitfield listed below
+ *
+ * PLL_STATUS[6] = 0 PLL_BYPASS
+ * PLL_STATUS[5] = 0 PLL_HIGHJITTER
+ *
+ * PLL_STATUS[3] = 0 PLL_LOSSREF
+ * PLL_STATUS[2] = 0 PLL_RECAL
+ * PLL_STATUS[1] = 1 PLL_LOCK
+ * PLL_STATUS[0] = 1 PLL_CTRL_RESET_DONE
+ */
+ return ((stat & 0x6f) == 0x3);
+}
+
int dss_pll_write_config_type_a(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo)
{
@@ -436,18 +453,54 @@ int dss_pll_write_config_type_a(struct dss_pll *pll,
l = FLD_MOD(l, 0, 25, 25); /* M7_CLOCK_EN */
writel_relaxed(l, base + PLL_CONFIGURATION2);
- writel_relaxed(1, base + PLL_GO); /* PLL_GO */
+ if (hw->errata_i932) {
+ int cnt = 0;
+ u32 sleep_time;
+ const u32 max_lock_retries = 20;
- if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
- DSSERR("DSS DPLL GO bit not going down.\n");
- r = -EIO;
- goto err;
- }
+ /*
+ * Calculate wait time for PLL LOCK
+ * 1000 REFCLK cycles in us.
+ */
+ sleep_time = DIV_ROUND_UP(1000*1000*1000, cinfo->fint);
- if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
- DSSERR("cannot lock DSS DPLL\n");
- r = -EIO;
- goto err;
+ for (cnt = 0; cnt < max_lock_retries; cnt++) {
+ writel_relaxed(1, base + PLL_GO); /* PLL_GO */
+
+ /**
+ * read the register back to ensure the write is
+ * flushed
+ */
+ readl_relaxed(base + PLL_GO);
+
+ usleep_range(sleep_time, sleep_time + 5);
+ l = readl_relaxed(base + PLL_STATUS);
+
+ if (pll_is_locked(l) &&
+ !(readl_relaxed(base + PLL_GO) & 0x1))
+ break;
+
+ }
+
+ if (cnt == max_lock_retries) {
+ DSSERR("cannot lock PLL\n");
+ r = -EIO;
+ goto err;
+ }
+ } else {
+ writel_relaxed(1, base + PLL_GO); /* PLL_GO */
+
+ if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
+ DSSERR("DSS DPLL GO bit not going down.\n");
+ r = -EIO;
+ goto err;
+ }
+
+ if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
+ DSSERR("cannot lock DSS DPLL\n");
+ r = -EIO;
+ goto err;
+ }
}
l = readl_relaxed(base + PLL_CONFIGURATION2);
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 1e2c931f6acf..69c3b7a3d5c7 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 24d1ced210bd..ac01907dcc34 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* VENC settings from TI's DSS driver
*
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index 585ed94ccf17..cb46311f92c9 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -134,6 +134,7 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
.has_refsel = true,
.errata_i886 = true,
+ .errata_i932 = true,
};
struct dss_pll *dss_video_pll_init(struct dss_device *dss,
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 5cde26ac937b..2ddb856666c4 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -126,14 +126,14 @@ static int omap_connector_get_modes(struct drm_connector *connector)
if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
drm_edid_is_valid(edid)) {
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
connector, edid);
n = drm_add_edid_modes(connector, edid);
omap_connector->hdmi_mode =
drm_detect_hdmi_monitor(edid);
} else {
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
connector, NULL);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index b42e286616b0..91cf043f2b6b 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -30,16 +30,11 @@ static int gem_show(struct seq_file *m, void *arg)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct omap_drm_private *priv = dev->dev_private;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(m, "All Objects:\n");
+ mutex_lock(&priv->list_lock);
omap_gem_describe_objects(&priv->obj_list, m);
-
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&priv->list_lock);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index ef3b0e3571ec..1b6601e9b107 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -274,7 +274,7 @@ static int omap_modeset_init(struct drm_device *dev)
if (IS_ERR(crtc))
return PTR_ERR(crtc);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << crtc_idx);
priv->crtcs[priv->num_crtcs++] = crtc;
@@ -493,7 +493,7 @@ static struct drm_driver omap_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = omap_gem_prime_export,
.gem_prime_import = omap_gem_prime_import,
- .gem_free_object = omap_gem_free_object,
+ .gem_free_object_unlocked = omap_gem_free_object,
.gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
@@ -540,7 +540,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
priv->omaprev = soc ? (unsigned int)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
- spin_lock_init(&priv->list_lock);
+ mutex_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
/* Allocate and initialize the DRM device. */
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 6eaee4df4559..f27c8e216adf 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -71,7 +71,7 @@ struct omap_drm_private {
struct workqueue_struct *wq;
/* lock for obj_list below */
- spinlock_t list_lock;
+ struct mutex list_lock;
/* list of GEM objects: */
struct list_head obj_list;
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 5fd22ca73913..9f1e3d8f8488 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -51,9 +52,6 @@ static const u32 formats[] = {
/* per-plane info for the fb: */
struct plane {
- struct drm_gem_object *bo;
- u32 pitch;
- u32 offset;
dma_addr_t dma_addr;
};
@@ -68,56 +66,28 @@ struct omap_framebuffer {
struct mutex lock;
};
-static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- return drm_gem_handle_create(file_priv,
- omap_fb->planes[0].bo, handle);
-}
-
-static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int i, n = fb->format->num_planes;
-
- DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
- drm_framebuffer_cleanup(fb);
-
- for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
-
- drm_gem_object_unreference_unlocked(plane->bo);
- }
-
- kfree(omap_fb);
-}
-
static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
- .create_handle = omap_framebuffer_create_handle,
- .destroy = omap_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
-static u32 get_linear_addr(struct plane *plane,
+static u32 get_linear_addr(struct drm_framebuffer *fb,
const struct drm_format_info *format, int n, int x, int y)
{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ struct plane *plane = &omap_fb->planes[n];
u32 offset;
- offset = plane->offset
+ offset = fb->offsets[n]
+ (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
- + (y * plane->pitch / (n == 0 ? 1 : format->vsub));
+ + (y * fb->pitches[n] / (n == 0 ? 1 : format->vsub));
return plane->dma_addr + offset;
}
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- struct plane *plane = &omap_fb->planes[0];
-
- return omap_gem_flags(plane->bo) & OMAP_BO_TILED;
+ return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@@ -176,7 +146,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x = state->src_x >> 16;
y = state->src_y >> 16;
- if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
+ if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
@@ -201,12 +171,12 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x += w - 1;
/* Note: x and y are in TILER units, not pixels */
- omap_gem_rotated_dma_addr(plane->bo, orient, x, y,
+ omap_gem_rotated_dma_addr(fb->obj[0], orient, x, y,
&info->paddr);
info->rotation_type = OMAP_DSS_ROT_TILER;
info->rotation = state->rotation ?: DRM_MODE_ROTATE_0;
/* Note: stride in TILER units, not pixels */
- info->screen_width = omap_gem_tiled_stride(plane->bo, orient);
+ info->screen_width = omap_gem_tiled_stride(fb->obj[0], orient);
} else {
switch (state->rotation & DRM_MODE_ROTATE_MASK) {
case 0:
@@ -221,10 +191,10 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
break;
}
- info->paddr = get_linear_addr(plane, format, 0, x, y);
+ info->paddr = get_linear_addr(fb, format, 0, x, y);
info->rotation_type = OMAP_DSS_ROT_NONE;
info->rotation = DRM_MODE_ROTATE_0;
- info->screen_width = plane->pitch;
+ info->screen_width = fb->pitches[0];
}
/* convert to pixels: */
@@ -234,11 +204,11 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
plane = &omap_fb->planes[1];
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
- WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
- omap_gem_rotated_dma_addr(plane->bo, orient, x/2, y/2,
+ WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+ omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
- info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
+ info->p_uv_addr = get_linear_addr(fb, format, 1, x, y);
}
} else {
info->p_uv_addr = 0;
@@ -261,10 +231,10 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- ret = omap_gem_pin(plane->bo, &plane->dma_addr);
+ ret = omap_gem_pin(fb->obj[i], &plane->dma_addr);
if (ret)
goto fail;
- omap_gem_dma_sync_buffer(plane->bo, DMA_TO_DEVICE);
+ omap_gem_dma_sync_buffer(fb->obj[i], DMA_TO_DEVICE);
}
omap_fb->pin_count++;
@@ -276,7 +246,7 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
fail:
for (i--; i >= 0; i--) {
struct plane *plane = &omap_fb->planes[i];
- omap_gem_unpin(plane->bo);
+ omap_gem_unpin(fb->obj[i]);
plane->dma_addr = 0;
}
@@ -302,54 +272,25 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- omap_gem_unpin(plane->bo);
+ omap_gem_unpin(fb->obj[i]);
plane->dma_addr = 0;
}
mutex_unlock(&omap_fb->lock);
}
-/* iterate thru all the connectors, returning ones that are attached
- * to the same fb..
- */
-struct drm_connector *omap_framebuffer_get_next_connector(
- struct drm_framebuffer *fb, struct drm_connector *from)
-{
- struct drm_device *dev = fb->dev;
- struct list_head *connector_list = &dev->mode_config.connector_list;
- struct drm_connector *connector = from;
-
- if (!from)
- return list_first_entry_or_null(connector_list, typeof(*from),
- head);
-
- list_for_each_entry_from(connector, connector_list, head) {
- if (connector != from) {
- struct drm_encoder *encoder = connector->encoder;
- struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
- if (crtc && crtc->primary->fb == fb)
- return connector;
-
- }
- }
-
- return NULL;
-}
-
#ifdef CONFIG_DEBUG_FS
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
(char *)&fb->format->format);
for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
- i, plane->offset, plane->pitch);
- omap_gem_describe(plane->bo, m);
+ i, fb->offsets[n], fb->pitches[i]);
+ omap_gem_describe(fb->obj[i], m);
}
}
#endif
@@ -454,9 +395,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
goto fail;
}
- plane->bo = bos[i];
- plane->offset = mode_cmd->offsets[i];
- plane->pitch = pitch;
+ fb->obj[i] = bos[i];
plane->dma_addr = 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
index 94ad5f9e4404..c20cb4bc714d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.h
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -38,8 +38,6 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct drm_plane_state *state, struct omap_overlay_info *info);
-struct drm_connector *omap_framebuffer_get_next_connector(
- struct drm_framebuffer *fb, struct drm_connector *from);
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 0f66c74a54b0..d958cc813a94 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -170,13 +170,11 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
goto fail;
}
- mutex_lock(&dev->struct_mutex);
-
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
- goto fail_unlock;
+ goto fail;
}
DBG("fbi=%p, dev=%p", fbi, dev);
@@ -212,12 +210,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
- mutex_unlock(&dev->struct_mutex);
-
return 0;
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
fail:
if (ret) {
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 17a53d207978..4ba5d035c590 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -47,6 +47,9 @@ struct omap_gem_object {
/** roll applied when mapping to DMM */
u32 roll;
+ /** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
+ struct mutex lock;
+
/**
* dma_addr contains the buffer DMA address. It is valid for
*
@@ -137,14 +140,12 @@ struct omap_drm_usergart {
*/
/** get mmap offset */
-static u64 mmap_offset(struct drm_gem_object *obj)
+u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
size_t size;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
/* Make it mmapable */
size = omap_gem_mmap_size(obj);
ret = drm_gem_create_mmap_offset_size(obj, size);
@@ -156,7 +157,7 @@ static u64 mmap_offset(struct drm_gem_object *obj)
return drm_vma_node_offset_addr(&obj->vma_node);
}
-static bool is_contiguous(struct omap_gem_object *omap_obj)
+static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
{
if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
return true;
@@ -171,14 +172,14 @@ static bool is_contiguous(struct omap_gem_object *omap_obj)
* Eviction
*/
-static void evict_entry(struct drm_gem_object *obj,
+static void omap_gem_evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
int n = priv->usergart[fmt].height;
size_t size = PAGE_SIZE * n;
- loff_t off = mmap_offset(obj) +
+ loff_t off = omap_gem_mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
@@ -199,7 +200,7 @@ static void evict_entry(struct drm_gem_object *obj,
}
/* Evict a buffer from usergart, if it is mapped there */
-static void evict(struct drm_gem_object *obj)
+static void omap_gem_evict(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
@@ -213,7 +214,7 @@ static void evict(struct drm_gem_object *obj)
&priv->usergart[fmt].entry[i];
if (entry->obj == obj)
- evict_entry(obj, fmt, entry);
+ omap_gem_evict_entry(obj, fmt, entry);
}
}
}
@@ -222,7 +223,10 @@ static void evict(struct drm_gem_object *obj)
* Page Management
*/
-/** ensure backing pages are allocated */
+/*
+ * Ensure backing pages are allocated. Must be called with the omap_obj.lock
+ * held.
+ */
static int omap_gem_attach_pages(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
@@ -232,7 +236,14 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
int i, ret;
dma_addr_t *addrs;
- WARN_ON(omap_obj->pages);
+ lockdep_assert_held(&omap_obj->lock);
+
+ /*
+ * If not using shmem (in which case backing pages don't need to be
+ * allocated) or if pages are already allocated we're done.
+ */
+ if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
+ return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
@@ -288,35 +299,15 @@ free_pages:
return ret;
}
-/* acquire pages when needed (for example, for DMA where physically
- * contiguous buffer is not required
- */
-static int get_pages(struct drm_gem_object *obj, struct page ***pages)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
-
- if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
- ret = omap_gem_attach_pages(obj);
- if (ret) {
- dev_err(obj->dev->dev, "could not attach pages\n");
- return ret;
- }
- }
-
- /* TODO: even phys-contig.. we should have a list of pages? */
- *pages = omap_obj->pages;
-
- return 0;
-}
-
-/** release backing pages */
+/* Release backing pages. Must be called with the omap_obj.lock held. */
static void omap_gem_detach_pages(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
unsigned int npages = obj->size >> PAGE_SHIFT;
unsigned int i;
+ lockdep_assert_held(&omap_obj->lock);
+
for (i = 0; i < npages; i++) {
if (omap_obj->dma_addrs[i])
dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
@@ -336,16 +327,6 @@ u32 omap_gem_flags(struct drm_gem_object *obj)
return to_omap_bo(obj)->flags;
}
-u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
-{
- u64 offset;
-
- mutex_lock(&obj->dev->struct_mutex);
- offset = mmap_offset(obj);
- mutex_unlock(&obj->dev->struct_mutex);
- return offset;
-}
-
/** get mmap size */
size_t omap_gem_mmap_size(struct drm_gem_object *obj)
{
@@ -371,7 +352,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
*/
/* Normal handling for the case of faulting in non-tiled buffers */
-static int fault_1d(struct drm_gem_object *obj,
+static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -385,18 +366,19 @@ static int fault_1d(struct drm_gem_object *obj,
omap_gem_cpu_sync_page(obj, pgoff);
pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else {
- BUG_ON(!is_contiguous(omap_obj));
+ BUG_ON(!omap_gem_is_contiguous(omap_obj));
pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
}
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ return vmf_insert_mixed(vma, vmf->address,
+ __pfn_to_pfn_t(pfn, PFN_DEV));
}
/* Special handling for the case of faulting in 2d tiled buffers */
-static int fault_2d(struct drm_gem_object *obj,
+static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -407,7 +389,8 @@ static int fault_2d(struct drm_gem_object *obj,
unsigned long pfn;
pgoff_t pgoff, base_pgoff;
unsigned long vaddr;
- int i, ret, slots;
+ int i, err, slots;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
/*
* Note the height of the slot is also equal to the number of pages
@@ -443,7 +426,7 @@ static int fault_2d(struct drm_gem_object *obj,
/* evict previous buffer using this usergart entry, if any: */
if (entry->obj)
- evict_entry(entry->obj, fmt, entry);
+ omap_gem_evict_entry(entry->obj, fmt, entry);
entry->obj = obj;
entry->obj_pgoff = base_pgoff;
@@ -473,9 +456,10 @@ static int fault_2d(struct drm_gem_object *obj,
memset(pages + slots, 0,
sizeof(struct page *) * (n - slots));
- ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
- if (ret) {
- dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
+ err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
+ if (err) {
+ ret = vmf_error(err);
+ dev_err(obj->dev->dev, "failed to pin: %d\n", err);
return ret;
}
@@ -485,7 +469,10 @@ static int fault_2d(struct drm_gem_object *obj,
pfn, pfn << PAGE_SHIFT);
for (i = n; i > 0; i--) {
- vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vmf_insert_mixed(vma,
+ vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+ if (ret & VM_FAULT_ERROR)
+ break;
pfn += priv->usergart[fmt].stride_pfn;
vaddr += PAGE_SIZE * m;
}
@@ -494,7 +481,7 @@ static int fault_2d(struct drm_gem_object *obj,
priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
% NUM_USERGART_ENTRIES;
- return 0;
+ return ret;
}
/**
@@ -509,24 +496,25 @@ static int fault_2d(struct drm_gem_object *obj,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
-int omap_gem_fault(struct vm_fault *vmf)
+vm_fault_t omap_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- struct drm_device *dev = obj->dev;
- struct page **pages;
- int ret;
+ int err;
+ vm_fault_t ret;
/* Make sure we don't parallel update on a fault, nor move or remove
* something from beneath our feet
*/
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
/* if a shmem backed object, make sure we have pages attached now */
- ret = get_pages(obj, &pages);
- if (ret)
+ err = omap_gem_attach_pages(obj);
+ if (err) {
+ ret = vmf_error(err);
goto fail;
+ }
/* where should we do corresponding put_pages().. we are mapping
* the original page, rather than thru a GART, so we can't rely
@@ -535,28 +523,14 @@ int omap_gem_fault(struct vm_fault *vmf)
*/
if (omap_obj->flags & OMAP_BO_TILED)
- ret = fault_2d(obj, vma, vmf);
+ ret = omap_gem_fault_2d(obj, vma, vmf);
else
- ret = fault_1d(obj, vma, vmf);
+ ret = omap_gem_fault_1d(obj, vma, vmf);
fail:
- mutex_unlock(&dev->struct_mutex);
- switch (ret) {
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ mutex_unlock(&omap_obj->lock);
+ return ret;
}
/** We override mainly to fix up some of the vm mapping flags.. */
@@ -689,21 +663,22 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
omap_obj->roll = roll;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
/* if we aren't mapped yet, we don't need to do anything */
if (omap_obj->block) {
- struct page **pages;
- ret = get_pages(obj, &pages);
+ ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
- ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
+
+ ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
+ roll, true);
if (ret)
dev_err(obj->dev->dev, "could not repin: %d\n", ret);
}
fail:
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&omap_obj->lock);
return ret;
}
@@ -722,7 +697,7 @@ fail:
* the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
* unmapped from the CPU.
*/
-static inline bool is_cached_coherent(struct drm_gem_object *obj)
+static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -738,7 +713,7 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- if (is_cached_coherent(obj))
+ if (omap_gem_is_cached_coherent(obj))
return;
if (omap_obj->dma_addrs[pgoff]) {
@@ -758,7 +733,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
struct page **pages = omap_obj->pages;
bool dirty = false;
- if (is_cached_coherent(obj))
+ if (omap_gem_is_cached_coherent(obj))
return;
for (i = 0; i < npages; i++) {
@@ -804,18 +779,17 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
- if (!is_contiguous(omap_obj) && priv->has_dmm) {
+ if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
if (omap_obj->dma_addr_cnt == 0) {
- struct page **pages;
u32 npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct tiler_block *block;
BUG_ON(omap_obj->block);
- ret = get_pages(obj, &pages);
+ ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
@@ -835,7 +809,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
}
/* TODO: enable async refill.. */
- ret = tiler_pin(block, pages, npages,
+ ret = tiler_pin(block, omap_obj->pages, npages,
omap_obj->roll, true);
if (ret) {
tiler_release(block);
@@ -853,7 +827,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
omap_obj->dma_addr_cnt++;
*dma_addr = omap_obj->dma_addr;
- } else if (is_contiguous(omap_obj)) {
+ } else if (omap_gem_is_contiguous(omap_obj)) {
*dma_addr = omap_obj->dma_addr;
} else {
ret = -EINVAL;
@@ -861,7 +835,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
}
fail:
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&omap_obj->lock);
return ret;
}
@@ -879,7 +853,8 @@ void omap_gem_unpin(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
+
if (omap_obj->dma_addr_cnt > 0) {
omap_obj->dma_addr_cnt--;
if (omap_obj->dma_addr_cnt == 0) {
@@ -898,7 +873,7 @@ void omap_gem_unpin(struct drm_gem_object *obj)
}
}
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&omap_obj->lock);
}
/* Get rotated scanout address (only valid if already pinned), at the
@@ -911,13 +886,16 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
+
if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
(omap_obj->flags & OMAP_BO_TILED)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
- mutex_unlock(&obj->dev->struct_mutex);
+
+ mutex_unlock(&omap_obj->lock);
+
return ret;
}
@@ -944,17 +922,27 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap)
{
- int ret;
- if (!remap) {
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- if (!omap_obj->pages)
- return -ENOMEM;
- *pages = omap_obj->pages;
- return 0;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&omap_obj->lock);
+
+ if (remap) {
+ ret = omap_gem_attach_pages(obj);
+ if (ret)
+ goto unlock;
}
- mutex_lock(&obj->dev->struct_mutex);
- ret = get_pages(obj, pages);
- mutex_unlock(&obj->dev->struct_mutex);
+
+ if (!omap_obj->pages) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ *pages = omap_obj->pages;
+
+unlock:
+ mutex_unlock(&omap_obj->lock);
+
return ret;
}
@@ -969,23 +957,34 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
-/* Get kernel virtual address for CPU access.. this more or less only
- * exists for omap_fbdev. This should be called with struct_mutex
- * held.
+/*
+ * Get kernel virtual address for CPU access.. this more or less only
+ * exists for omap_fbdev.
*/
void *omap_gem_vaddr(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+ void *vaddr;
+ int ret;
+
+ mutex_lock(&omap_obj->lock);
+
if (!omap_obj->vaddr) {
- struct page **pages;
- int ret = get_pages(obj, &pages);
- if (ret)
- return ERR_PTR(ret);
- omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ ret = omap_gem_attach_pages(obj);
+ if (ret) {
+ vaddr = ERR_PTR(ret);
+ goto unlock;
+ }
+
+ omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
}
- return omap_obj->vaddr;
+
+ vaddr = omap_obj->vaddr;
+
+unlock:
+ mutex_unlock(&omap_obj->lock);
+ return vaddr;
}
#endif
@@ -1001,6 +1000,7 @@ int omap_gem_resume(struct drm_device *dev)
struct omap_gem_object *omap_obj;
int ret = 0;
+ mutex_lock(&priv->list_lock);
list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
if (omap_obj->block) {
struct drm_gem_object *obj = &omap_obj->base;
@@ -1012,12 +1012,14 @@ int omap_gem_resume(struct drm_device *dev)
omap_obj->roll, true);
if (ret) {
dev_err(dev->dev, "could not repin: %d\n", ret);
- return ret;
+ goto done;
}
}
}
- return 0;
+done:
+ mutex_unlock(&priv->list_lock);
+ return ret;
}
#endif
@@ -1033,6 +1035,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
off = drm_vma_node_start(&obj->vma_node);
+ mutex_lock(&omap_obj->lock);
+
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount),
off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
@@ -1050,6 +1054,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, " %zu", obj->size);
}
+ mutex_unlock(&omap_obj->lock);
+
seq_printf(m, "\n");
}
@@ -1081,17 +1087,21 @@ void omap_gem_free_object(struct drm_gem_object *obj)
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- evict(obj);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ omap_gem_evict(obj);
- spin_lock(&priv->list_lock);
+ mutex_lock(&priv->list_lock);
list_del(&omap_obj->mm_list);
- spin_unlock(&priv->list_lock);
+ mutex_unlock(&priv->list_lock);
- /* this means the object is still pinned.. which really should
- * not happen. I think..
+ /*
+ * We own the sole reference to the object at this point, but to keep
+ * lockdep happy, we must still take the omap_obj_lock to call
+ * omap_gem_detach_pages(). This should hardly make any difference as
+ * there can't be any lock contention.
*/
+ mutex_lock(&omap_obj->lock);
+
+ /* The object should not be pinned. */
WARN_ON(omap_obj->dma_addr_cnt > 0);
if (omap_obj->pages) {
@@ -1110,8 +1120,12 @@ void omap_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, omap_obj->sgt);
}
+ mutex_unlock(&omap_obj->lock);
+
drm_gem_object_release(obj);
+ mutex_destroy(&omap_obj->lock);
+
kfree(omap_obj);
}
@@ -1167,6 +1181,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
obj = &omap_obj->base;
omap_obj->flags = flags;
+ mutex_init(&omap_obj->lock);
if (flags & OMAP_BO_TILED) {
/*
@@ -1206,9 +1221,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
goto err_release;
}
- spin_lock(&priv->list_lock);
+ mutex_lock(&priv->list_lock);
list_add(&omap_obj->mm_list, &priv->obj_list);
- spin_unlock(&priv->list_lock);
+ mutex_unlock(&priv->list_lock);
return obj;
@@ -1231,16 +1246,15 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
if (sgt->orig_nents != 1 && !priv->has_dmm)
return ERR_PTR(-EINVAL);
- mutex_lock(&dev->struct_mutex);
-
gsize.bytes = PAGE_ALIGN(size);
obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
- if (!obj) {
- obj = ERR_PTR(-ENOMEM);
- goto done;
- }
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
omap_obj = to_omap_bo(obj);
+
+ mutex_lock(&omap_obj->lock);
+
omap_obj->sgt = sgt;
if (sgt->orig_nents == 1) {
@@ -1276,7 +1290,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
}
done:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&omap_obj->lock);
return obj;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
index a78bde05193a..c1c45fbde155 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.h
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -21,6 +21,7 @@
#define __OMAPDRM_GEM_H__
#include <linux/types.h>
+#include <linux/mm_types.h>
enum dma_data_direction;
@@ -80,7 +81,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *buffer);
-int omap_gem_fault(struct vm_fault *vmf);
+vm_fault_t omap_gem_fault(struct vm_fault *vmf);
int omap_gem_roll(struct drm_gem_object *obj, u32 roll);
void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 8e41d649e248..ec04a69ade46 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -93,23 +93,6 @@ static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
return 0;
}
-
-static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
- unsigned long page_num)
-{
- struct drm_gem_object *obj = buffer->priv;
- struct page **pages;
- omap_gem_get_pages(obj, &pages, false);
- omap_gem_cpu_sync_page(obj, page_num);
- return kmap_atomic(pages[page_num]);
-}
-
-static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
- unsigned long page_num, void *addr)
-{
- kunmap_atomic(addr);
-}
-
static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
unsigned long page_num)
{
@@ -148,8 +131,6 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
- .map_atomic = omap_gem_dmabuf_kmap_atomic,
- .unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
.map = omap_gem_dmabuf_kmap,
.unmap = omap_gem_dmabuf_kunmap,
.mmap = omap_gem_dmabuf_mmap,
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 25682ff3449a..6020c30a33b3 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -46,6 +46,15 @@ config DRM_PANEL_ILITEK_IL9322
Say Y here if you want to enable support for Ilitek IL9322
QVGA (320x240) RGB, YUV and ITU-T BT.656 panels.
+config DRM_PANEL_ILITEK_ILI9881C
+ tristate "Ilitek ILI9881C-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Ilitek ILI9881c controller.
+
config DRM_PANEL_INNOLUX_P079ZCA
tristate "Innolux P079ZCA panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index f26efc11d746..5ccaaa9d13af 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
+obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
new file mode 100644
index 000000000000..3ad4a46c4e94
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2018, Bootlin
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct ili9881c {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct backlight_device *backlight;
+ struct regulator *power;
+ struct gpio_desc *reset;
+};
+
+enum ili9881c_op {
+ ILI9881C_SWITCH_PAGE,
+ ILI9881C_COMMAND,
+};
+
+struct ili9881c_instr {
+ enum ili9881c_op op;
+
+ union arg {
+ struct cmd {
+ u8 cmd;
+ u8 data;
+ } cmd;
+ u8 page;
+ } arg;
+};
+
+#define ILI9881C_SWITCH_PAGE_INSTR(_page) \
+ { \
+ .op = ILI9881C_SWITCH_PAGE, \
+ .arg = { \
+ .page = (_page), \
+ }, \
+ }
+
+#define ILI9881C_COMMAND_INSTR(_cmd, _data) \
+ { \
+ .op = ILI9881C_COMMAND, \
+ .arg = { \
+ .cmd = { \
+ .cmd = (_cmd), \
+ .data = (_data), \
+ }, \
+ }, \
+ }
+
+static const struct ili9881c_instr ili9881c_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0x03),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x06),
+ ILI9881C_COMMAND_INSTR(0x07, 0x06),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x18),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x04),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x03),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x25),
+ ILI9881C_COMMAND_INSTR(0x10, 0x25),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xC0),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x04),
+ ILI9881C_COMMAND_INSTR(0x21, 0x01),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x60, 0x02),
+ ILI9881C_COMMAND_INSTR(0x61, 0x02),
+ ILI9881C_COMMAND_INSTR(0x62, 0x02),
+ ILI9881C_COMMAND_INSTR(0x63, 0x02),
+ ILI9881C_COMMAND_INSTR(0x64, 0x02),
+ ILI9881C_COMMAND_INSTR(0x65, 0x02),
+ ILI9881C_COMMAND_INSTR(0x66, 0x02),
+ ILI9881C_COMMAND_INSTR(0x67, 0x02),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x06),
+ ILI9881C_COMMAND_INSTR(0x70, 0x07),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x02),
+ ILI9881C_COMMAND_INSTR(0x76, 0x02),
+ ILI9881C_COMMAND_INSTR(0x77, 0x02),
+ ILI9881C_COMMAND_INSTR(0x78, 0x02),
+ ILI9881C_COMMAND_INSTR(0x79, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x80, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x83, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x84, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x85, 0x06),
+ ILI9881C_COMMAND_INSTR(0x86, 0x07),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6C, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6E, 0x22),
+ ILI9881C_COMMAND_INSTR(0x6F, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3A, 0xA4),
+ ILI9881C_COMMAND_INSTR(0x8D, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x53, 0xDC),
+ ILI9881C_COMMAND_INSTR(0x55, 0xA7),
+ ILI9881C_COMMAND_INSTR(0x50, 0x78),
+ ILI9881C_COMMAND_INSTR(0x51, 0x78),
+ ILI9881C_COMMAND_INSTR(0x31, 0x02),
+ ILI9881C_COMMAND_INSTR(0x60, 0x14),
+ ILI9881C_COMMAND_INSTR(0xA0, 0x2A),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x39),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x46),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x0e),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x12),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x25),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x19),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xA8, 0xa6),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xAB, 0x85),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x1B),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x51),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x22),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x2d),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x4f),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x3F),
+ ILI9881C_COMMAND_INSTR(0xC0, 0x2A),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x3a),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x45),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x0e),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x11),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x24),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xC8, 0xaa),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xCB, 0x96),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x1B),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x51),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x22),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x2b),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x3F),
+};
+
+static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
+{
+ return container_of(panel, struct ili9881c, panel);
+}
+
+/*
+ * The panel seems to accept some private DCS commands that map
+ * directly to registers.
+ *
+ * It is organised by page, with each page having its own set of
+ * registers, and the first page looks like it's holding the standard
+ * DCS commands.
+ *
+ * So before any attempt at sending a command or data, we have to be
+ * sure if we're in the right page or not.
+ */
+static int ili9881c_switch_page(struct ili9881c *ctx, u8 page)
+{
+ u8 buf[4] = { 0xff, 0x98, 0x81, page };
+ int ret;
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ili9881c_send_cmd_data(struct ili9881c *ctx, u8 cmd, u8 data)
+{
+ u8 buf[2] = { cmd, data };
+ int ret;
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ili9881c_prepare(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+ unsigned int i;
+ int ret;
+
+ /* Power the panel */
+ ret = regulator_enable(ctx->power);
+ if (ret)
+ return ret;
+ msleep(5);
+
+ /* And reset it */
+ gpiod_set_value(ctx->reset, 1);
+ msleep(20);
+
+ gpiod_set_value(ctx->reset, 0);
+ msleep(20);
+
+ for (i = 0; i < ARRAY_SIZE(ili9881c_init); i++) {
+ const struct ili9881c_instr *instr = &ili9881c_init[i];
+
+ if (instr->op == ILI9881C_SWITCH_PAGE)
+ ret = ili9881c_switch_page(ctx, instr->arg.page);
+ else if (instr->op == ILI9881C_COMMAND)
+ ret = ili9881c_send_cmd_data(ctx, instr->arg.cmd.cmd,
+ instr->arg.cmd.data);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = ili9881c_switch_page(ctx, 0);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_tear_on(ctx->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ili9881c_enable(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+ msleep(120);
+
+ mipi_dsi_dcs_set_display_on(ctx->dsi);
+ backlight_enable(ctx->backlight);
+
+ return 0;
+}
+
+static int ili9881c_disable(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+ backlight_disable(ctx->backlight);
+ return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int ili9881c_unprepare(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+ mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ regulator_disable(ctx->power);
+ gpiod_set_value(ctx->reset, 1);
+
+ return 0;
+}
+
+static const struct drm_display_mode bananapi_default_mode = {
+ .clock = 62000,
+ .vrefresh = 60,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 10,
+ .hsync_end = 720 + 10 + 20,
+ .htotal = 720 + 10 + 20 + 30,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 10,
+ .vsync_end = 1280 + 10 + 10,
+ .vtotal = 1280 + 10 + 10 + 20,
+};
+
+static int ili9881c_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &bananapi_default_mode);
+ if (!mode) {
+ dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ bananapi_default_mode.hdisplay,
+ bananapi_default_mode.vdisplay,
+ bananapi_default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ panel->connector->display_info.width_mm = 62;
+ panel->connector->display_info.height_mm = 110;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ili9881c_funcs = {
+ .prepare = ili9881c_prepare,
+ .unprepare = ili9881c_unprepare,
+ .enable = ili9881c_enable,
+ .disable = ili9881c_disable,
+ .get_modes = ili9881c_get_modes,
+};
+
+static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct device_node *np;
+ struct ili9881c *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = &dsi->dev;
+ ctx->panel.funcs = &ili9881c_funcs;
+
+ ctx->power = devm_regulator_get(&dsi->dev, "power");
+ if (IS_ERR(ctx->power)) {
+ dev_err(&dsi->dev, "Couldn't get our power regulator\n");
+ return PTR_ERR(ctx->power);
+ }
+
+ ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset)) {
+ dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->reset);
+ }
+
+ np = of_parse_phandle(dsi->dev.of_node, "backlight", 0);
+ if (np) {
+ ctx->backlight = of_find_backlight_by_node(np);
+ of_node_put(np);
+
+ if (!ctx->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct ili9881c *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ if (ctx->backlight)
+ put_device(&ctx->backlight->dev);
+
+ return 0;
+}
+
+static const struct of_device_id ili9881c_of_match[] = {
+ { .compatible = "bananapi,lhr050h41" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ili9881c_of_match);
+
+static struct mipi_dsi_driver ili9881c_dsi_driver = {
+ .probe = ili9881c_dsi_probe,
+ .remove = ili9881c_dsi_remove,
+ .driver = {
+ .name = "ili9881c-dsi",
+ .of_match_table = ili9881c_of_match,
+ },
+};
+module_mipi_dsi_driver(ili9881c_dsi_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Ilitek ILI9881C Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 57df39b5c589..72edb334d997 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -11,6 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <drm/drmP.h>
@@ -20,12 +21,41 @@
#include <video/mipi_display.h>
+struct panel_init_cmd {
+ size_t len;
+ const char *data;
+};
+
+#define _INIT_CMD(...) { \
+ .len = sizeof((char[]){__VA_ARGS__}), \
+ .data = (char[]){__VA_ARGS__} }
+
+struct panel_desc {
+ const struct drm_display_mode *mode;
+ unsigned int bpc;
+ struct {
+ unsigned int width;
+ unsigned int height;
+ } size;
+
+ unsigned long flags;
+ enum mipi_dsi_pixel_format format;
+ const struct panel_init_cmd *init_cmds;
+ unsigned int lanes;
+ const char * const *supply_names;
+ unsigned int num_supplies;
+ unsigned int sleep_mode_delay;
+ unsigned int power_down_delay;
+};
+
struct innolux_panel {
struct drm_panel base;
struct mipi_dsi_device *link;
+ const struct panel_desc *desc;
struct backlight_device *backlight;
- struct regulator *supply;
+ struct regulator_bulk_data *supplies;
+ unsigned int num_supplies;
struct gpio_desc *enable_gpio;
bool prepared;
@@ -72,12 +102,16 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
return err;
}
+ if (innolux->desc->sleep_mode_delay)
+ msleep(innolux->desc->sleep_mode_delay);
+
gpiod_set_value_cansleep(innolux->enable_gpio, 0);
- /* T8: 80ms - 1000ms */
- msleep(80);
+ if (innolux->desc->power_down_delay)
+ msleep(innolux->desc->power_down_delay);
- err = regulator_disable(innolux->supply);
+ err = regulator_bulk_disable(innolux->desc->num_supplies,
+ innolux->supplies);
if (err < 0)
return err;
@@ -89,24 +123,55 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
static int innolux_panel_prepare(struct drm_panel *panel)
{
struct innolux_panel *innolux = to_innolux_panel(panel);
- int err, regulator_err;
+ int err;
if (innolux->prepared)
return 0;
gpiod_set_value_cansleep(innolux->enable_gpio, 0);
- err = regulator_enable(innolux->supply);
+ err = regulator_bulk_enable(innolux->desc->num_supplies,
+ innolux->supplies);
if (err < 0)
return err;
- /* T2: 15ms - 1000ms */
- usleep_range(15000, 16000);
+ /* p079zca: t2 (20ms), p097pfg: t4 (15ms) */
+ usleep_range(20000, 21000);
gpiod_set_value_cansleep(innolux->enable_gpio, 1);
- /* T4: 15ms - 1000ms */
- usleep_range(15000, 16000);
+ /* p079zca: t4, p097pfg: t5 */
+ usleep_range(20000, 21000);
+
+ if (innolux->desc->init_cmds) {
+ const struct panel_init_cmd *cmds =
+ innolux->desc->init_cmds;
+ unsigned int i;
+
+ for (i = 0; cmds[i].len != 0; i++) {
+ const struct panel_init_cmd *cmd = &cmds[i];
+
+ err = mipi_dsi_generic_write(innolux->link, cmd->data,
+ cmd->len);
+ if (err < 0) {
+ dev_err(panel->dev,
+ "failed to write command %u\n", i);
+ goto poweroff;
+ }
+
+ /*
+ * Included by random guessing, because without this
+ * (or at least, some delay), the panel sometimes
+ * didn't appear to pick up the command sequence.
+ */
+ err = mipi_dsi_dcs_nop(innolux->link);
+ if (err < 0) {
+ dev_err(panel->dev,
+ "failed to send DCS nop: %d\n", err);
+ goto poweroff;
+ }
+ }
+ }
err = mipi_dsi_dcs_exit_sleep_mode(innolux->link);
if (err < 0) {
@@ -133,12 +198,9 @@ static int innolux_panel_prepare(struct drm_panel *panel)
return 0;
poweroff:
- regulator_err = regulator_disable(innolux->supply);
- if (regulator_err)
- DRM_DEV_ERROR(panel->dev, "failed to disable regulator: %d\n",
- regulator_err);
-
gpiod_set_value_cansleep(innolux->enable_gpio, 0);
+ regulator_bulk_disable(innolux->desc->num_supplies, innolux->supplies);
+
return err;
}
@@ -162,7 +224,11 @@ static int innolux_panel_enable(struct drm_panel *panel)
return 0;
}
-static const struct drm_display_mode default_mode = {
+static const char * const innolux_p079zca_supply_names[] = {
+ "power",
+};
+
+static const struct drm_display_mode innolux_p079zca_mode = {
.clock = 56900,
.hdisplay = 768,
.hsync_start = 768 + 40,
@@ -175,15 +241,181 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
+static const struct panel_desc innolux_p079zca_panel_desc = {
+ .mode = &innolux_p079zca_mode,
+ .bpc = 8,
+ .size = {
+ .width = 120,
+ .height = 160,
+ },
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+ .supply_names = innolux_p079zca_supply_names,
+ .num_supplies = ARRAY_SIZE(innolux_p079zca_supply_names),
+ .power_down_delay = 80, /* T8: 80ms - 1000ms */
+};
+
+static const char * const innolux_p097pfg_supply_names[] = {
+ "avdd",
+ "avee",
+};
+
+static const struct drm_display_mode innolux_p097pfg_mode = {
+ .clock = 229000,
+ .hdisplay = 1536,
+ .hsync_start = 1536 + 100,
+ .hsync_end = 1536 + 100 + 24,
+ .htotal = 1536 + 100 + 24 + 100,
+ .vdisplay = 2048,
+ .vsync_start = 2048 + 100,
+ .vsync_end = 2048 + 100 + 2,
+ .vtotal = 2048 + 100 + 2 + 18,
+ .vrefresh = 60,
+};
+
+/*
+ * Display manufacturer failed to provide init sequencing according to
+ * https://chromium-review.googlesource.com/c/chromiumos/third_party/coreboot/+/892065/
+ * so the init sequence stems from a register dump of a working panel.
+ */
+static const struct panel_init_cmd innolux_p097pfg_init_cmds[] = {
+ /* page 0 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x00),
+ _INIT_CMD(0xB1, 0xE8, 0x11),
+ _INIT_CMD(0xB2, 0x25, 0x02),
+ _INIT_CMD(0xB5, 0x08, 0x00),
+ _INIT_CMD(0xBC, 0x0F, 0x00),
+ _INIT_CMD(0xB8, 0x03, 0x06, 0x00, 0x00),
+ _INIT_CMD(0xBD, 0x01, 0x90, 0x14, 0x14),
+ _INIT_CMD(0x6F, 0x01),
+ _INIT_CMD(0xC0, 0x03),
+ _INIT_CMD(0x6F, 0x02),
+ _INIT_CMD(0xC1, 0x0D),
+ _INIT_CMD(0xD9, 0x01, 0x09, 0x70),
+ _INIT_CMD(0xC5, 0x12, 0x21, 0x00),
+ _INIT_CMD(0xBB, 0x93, 0x93),
+
+ /* page 1 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x01),
+ _INIT_CMD(0xB3, 0x3C, 0x3C),
+ _INIT_CMD(0xB4, 0x0F, 0x0F),
+ _INIT_CMD(0xB9, 0x45, 0x45),
+ _INIT_CMD(0xBA, 0x14, 0x14),
+ _INIT_CMD(0xCA, 0x02),
+ _INIT_CMD(0xCE, 0x04),
+ _INIT_CMD(0xC3, 0x9B, 0x9B),
+ _INIT_CMD(0xD8, 0xC0, 0x03),
+ _INIT_CMD(0xBC, 0x82, 0x01),
+ _INIT_CMD(0xBD, 0x9E, 0x01),
+
+ /* page 2 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x02),
+ _INIT_CMD(0xB0, 0x82),
+ _INIT_CMD(0xD1, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x82, 0x00, 0xA5,
+ 0x00, 0xC1, 0x00, 0xEA, 0x01, 0x0D, 0x01, 0x40),
+ _INIT_CMD(0xD2, 0x01, 0x6A, 0x01, 0xA8, 0x01, 0xDC, 0x02, 0x29,
+ 0x02, 0x67, 0x02, 0x68, 0x02, 0xA8, 0x02, 0xF0),
+ _INIT_CMD(0xD3, 0x03, 0x19, 0x03, 0x49, 0x03, 0x67, 0x03, 0x8C,
+ 0x03, 0xA6, 0x03, 0xC7, 0x03, 0xDE, 0x03, 0xEC),
+ _INIT_CMD(0xD4, 0x03, 0xFF, 0x03, 0xFF),
+ _INIT_CMD(0xE0, 0x00, 0x00, 0x00, 0x86, 0x00, 0xC5, 0x00, 0xE5,
+ 0x00, 0xFF, 0x01, 0x26, 0x01, 0x45, 0x01, 0x75),
+ _INIT_CMD(0xE1, 0x01, 0x9C, 0x01, 0xD5, 0x02, 0x05, 0x02, 0x4D,
+ 0x02, 0x86, 0x02, 0x87, 0x02, 0xC3, 0x03, 0x03),
+ _INIT_CMD(0xE2, 0x03, 0x2A, 0x03, 0x56, 0x03, 0x72, 0x03, 0x94,
+ 0x03, 0xAC, 0x03, 0xCB, 0x03, 0xE0, 0x03, 0xED),
+ _INIT_CMD(0xE3, 0x03, 0xFF, 0x03, 0xFF),
+
+ /* page 3 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x03),
+ _INIT_CMD(0xB0, 0x00, 0x00, 0x00, 0x00),
+ _INIT_CMD(0xB1, 0x00, 0x00, 0x00, 0x00),
+ _INIT_CMD(0xB2, 0x00, 0x00, 0x06, 0x04, 0x01, 0x40, 0x85),
+ _INIT_CMD(0xB3, 0x10, 0x07, 0xFC, 0x04, 0x01, 0x40, 0x80),
+ _INIT_CMD(0xB6, 0xF0, 0x08, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01,
+ 0x40, 0x80),
+ _INIT_CMD(0xBA, 0xC5, 0x07, 0x00, 0x04, 0x11, 0x25, 0x8C),
+ _INIT_CMD(0xBB, 0xC5, 0x07, 0x00, 0x03, 0x11, 0x25, 0x8C),
+ _INIT_CMD(0xC0, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x80, 0x80),
+ _INIT_CMD(0xC1, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x80, 0x80),
+ _INIT_CMD(0xC4, 0x00, 0x00),
+ _INIT_CMD(0xEF, 0x41),
+
+ /* page 4 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x04),
+ _INIT_CMD(0xEC, 0x4C),
+
+ /* page 5 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x05),
+ _INIT_CMD(0xB0, 0x13, 0x03, 0x03, 0x01),
+ _INIT_CMD(0xB1, 0x30, 0x00),
+ _INIT_CMD(0xB2, 0x02, 0x02, 0x00),
+ _INIT_CMD(0xB3, 0x82, 0x23, 0x82, 0x9D),
+ _INIT_CMD(0xB4, 0xC5, 0x75, 0x24, 0x57),
+ _INIT_CMD(0xB5, 0x00, 0xD4, 0x72, 0x11, 0x11, 0xAB, 0x0A),
+ _INIT_CMD(0xB6, 0x00, 0x00, 0xD5, 0x72, 0x24, 0x56),
+ _INIT_CMD(0xB7, 0x5C, 0xDC, 0x5C, 0x5C),
+ _INIT_CMD(0xB9, 0x0C, 0x00, 0x00, 0x01, 0x00),
+ _INIT_CMD(0xC0, 0x75, 0x11, 0x11, 0x54, 0x05),
+ _INIT_CMD(0xC6, 0x00, 0x00, 0x00, 0x00),
+ _INIT_CMD(0xD0, 0x00, 0x48, 0x08, 0x00, 0x00),
+ _INIT_CMD(0xD1, 0x00, 0x48, 0x09, 0x00, 0x00),
+
+ /* page 6 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x06),
+ _INIT_CMD(0xB0, 0x02, 0x32, 0x32, 0x08, 0x2F),
+ _INIT_CMD(0xB1, 0x2E, 0x15, 0x14, 0x13, 0x12),
+ _INIT_CMD(0xB2, 0x11, 0x10, 0x00, 0x3D, 0x3D),
+ _INIT_CMD(0xB3, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+ _INIT_CMD(0xB4, 0x3D, 0x32),
+ _INIT_CMD(0xB5, 0x03, 0x32, 0x32, 0x09, 0x2F),
+ _INIT_CMD(0xB6, 0x2E, 0x1B, 0x1A, 0x19, 0x18),
+ _INIT_CMD(0xB7, 0x17, 0x16, 0x01, 0x3D, 0x3D),
+ _INIT_CMD(0xB8, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+ _INIT_CMD(0xB9, 0x3D, 0x32),
+ _INIT_CMD(0xC0, 0x01, 0x32, 0x32, 0x09, 0x2F),
+ _INIT_CMD(0xC1, 0x2E, 0x1A, 0x1B, 0x16, 0x17),
+ _INIT_CMD(0xC2, 0x18, 0x19, 0x03, 0x3D, 0x3D),
+ _INIT_CMD(0xC3, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+ _INIT_CMD(0xC4, 0x3D, 0x32),
+ _INIT_CMD(0xC5, 0x00, 0x32, 0x32, 0x08, 0x2F),
+ _INIT_CMD(0xC6, 0x2E, 0x14, 0x15, 0x10, 0x11),
+ _INIT_CMD(0xC7, 0x12, 0x13, 0x02, 0x3D, 0x3D),
+ _INIT_CMD(0xC8, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+ _INIT_CMD(0xC9, 0x3D, 0x32),
+
+ {},
+};
+
+static const struct panel_desc innolux_p097pfg_panel_desc = {
+ .mode = &innolux_p097pfg_mode,
+ .bpc = 8,
+ .size = {
+ .width = 147,
+ .height = 196,
+ },
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_cmds = innolux_p097pfg_init_cmds,
+ .lanes = 4,
+ .supply_names = innolux_p097pfg_supply_names,
+ .num_supplies = ARRAY_SIZE(innolux_p097pfg_supply_names),
+ .sleep_mode_delay = 100, /* T15 */
+};
+
static int innolux_panel_get_modes(struct drm_panel *panel)
{
+ struct innolux_panel *innolux = to_innolux_panel(panel);
+ const struct drm_display_mode *m = innolux->desc->mode;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(panel->drm, m);
if (!mode) {
DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ m->hdisplay, m->vdisplay, m->vrefresh);
return -ENOMEM;
}
@@ -191,9 +423,11 @@ static int innolux_panel_get_modes(struct drm_panel *panel)
drm_mode_probed_add(panel->connector, mode);
- panel->connector->display_info.width_mm = 120;
- panel->connector->display_info.height_mm = 160;
- panel->connector->display_info.bpc = 8;
+ panel->connector->display_info.width_mm =
+ innolux->desc->size.width;
+ panel->connector->display_info.height_mm =
+ innolux->desc->size.height;
+ panel->connector->display_info.bpc = innolux->desc->bpc;
return 1;
}
@@ -207,19 +441,42 @@ static const struct drm_panel_funcs innolux_panel_funcs = {
};
static const struct of_device_id innolux_of_match[] = {
- { .compatible = "innolux,p079zca", },
+ { .compatible = "innolux,p079zca",
+ .data = &innolux_p079zca_panel_desc
+ },
+ { .compatible = "innolux,p097pfg",
+ .data = &innolux_p097pfg_panel_desc
+ },
{ }
};
MODULE_DEVICE_TABLE(of, innolux_of_match);
-static int innolux_panel_add(struct innolux_panel *innolux)
+static int innolux_panel_add(struct mipi_dsi_device *dsi,
+ const struct panel_desc *desc)
{
- struct device *dev = &innolux->link->dev;
- int err;
+ struct innolux_panel *innolux;
+ struct device *dev = &dsi->dev;
+ int err, i;
+
+ innolux = devm_kzalloc(dev, sizeof(*innolux), GFP_KERNEL);
+ if (!innolux)
+ return -ENOMEM;
+
+ innolux->desc = desc;
+
+ innolux->supplies = devm_kcalloc(dev, desc->num_supplies,
+ sizeof(*innolux->supplies),
+ GFP_KERNEL);
+ if (!innolux->supplies)
+ return -ENOMEM;
+
+ for (i = 0; i < desc->num_supplies; i++)
+ innolux->supplies[i].supply = desc->supply_names[i];
- innolux->supply = devm_regulator_get(dev, "power");
- if (IS_ERR(innolux->supply))
- return PTR_ERR(innolux->supply);
+ err = devm_regulator_bulk_get(dev, desc->num_supplies,
+ innolux->supplies);
+ if (err < 0)
+ return err;
innolux->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_HIGH);
@@ -230,15 +487,21 @@ static int innolux_panel_add(struct innolux_panel *innolux)
}
innolux->backlight = devm_of_find_backlight(dev);
-
if (IS_ERR(innolux->backlight))
return PTR_ERR(innolux->backlight);
drm_panel_init(&innolux->base);
innolux->base.funcs = &innolux_panel_funcs;
- innolux->base.dev = &innolux->link->dev;
+ innolux->base.dev = dev;
+
+ err = drm_panel_add(&innolux->base);
+ if (err < 0)
+ return err;
+
+ mipi_dsi_set_drvdata(dsi, innolux);
+ innolux->link = dsi;
- return drm_panel_add(&innolux->base);
+ return 0;
}
static void innolux_panel_del(struct innolux_panel *innolux)
@@ -249,28 +512,19 @@ static void innolux_panel_del(struct innolux_panel *innolux)
static int innolux_panel_probe(struct mipi_dsi_device *dsi)
{
- struct innolux_panel *innolux;
+ const struct panel_desc *desc;
int err;
- dsi->lanes = 4;
- dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_LPM;
-
- innolux = devm_kzalloc(&dsi->dev, sizeof(*innolux), GFP_KERNEL);
- if (!innolux)
- return -ENOMEM;
-
- mipi_dsi_set_drvdata(dsi, innolux);
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->mode_flags = desc->flags;
+ dsi->format = desc->format;
+ dsi->lanes = desc->lanes;
- innolux->link = dsi;
-
- err = innolux_panel_add(innolux);
+ err = innolux_panel_add(dsi, desc);
if (err < 0)
return err;
- err = mipi_dsi_attach(dsi);
- return err;
+ return mipi_dsi_attach(dsi);
}
static int innolux_panel_remove(struct mipi_dsi_device *dsi)
@@ -292,7 +546,6 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
err);
- drm_panel_detach(&innolux->base);
innolux_panel_del(innolux);
return 0;
@@ -318,5 +571,6 @@ static struct mipi_dsi_driver innolux_panel_driver = {
module_mipi_dsi_driver(innolux_panel_driver);
MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
+MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");
MODULE_DESCRIPTION("Innolux P079ZCA panel driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 0a94ab79a6c0..99caa7835e7b 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -500,7 +500,6 @@ static int jdi_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
ret);
- drm_panel_detach(&jdi->base);
jdi_panel_del(jdi);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 5185819c5b79..8a1687887ae9 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -282,7 +282,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
{
struct panel_lvds *lvds = dev_get_drvdata(&pdev->dev);
- drm_panel_detach(&lvds->panel);
drm_panel_remove(&lvds->panel);
panel_lvds_disable(&lvds->panel);
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 90f1ae4af93c..87fa316e1d7b 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -14,8 +14,6 @@
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
-#define DRV_NAME "orisetech_otm8009a"
-
#define OTM8009A_BACKLIGHT_DEFAULT 240
#define OTM8009A_BACKLIGHT_MAX 255
@@ -98,6 +96,20 @@ static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data,
DRM_WARN("mipi dsi dcs write buffer failed\n");
}
+static void otm8009a_dcs_write_buf_hs(struct otm8009a *ctx, const void *data,
+ size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* data will be sent in dsi hs mode (ie. no lpm) */
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ otm8009a_dcs_write_buf(ctx, data, len);
+
+ /* restore back the dsi lpm mode */
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+}
+
#define dcs_write_seq(ctx, seq...) \
({ \
static const u8 d[] = { seq }; \
@@ -248,11 +260,7 @@ static int otm8009a_disable(struct drm_panel *panel)
if (!ctx->enabled)
return 0; /* This is not an issue so we return 0 here */
- /* Power off the backlight. Note: end-user still controls brightness */
- ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
- ret = backlight_update_status(ctx->bl_dev);
- if (ret)
- return ret;
+ backlight_disable(ctx->bl_dev);
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret)
@@ -316,13 +324,6 @@ static int otm8009a_prepare(struct drm_panel *panel)
ctx->prepared = true;
- /*
- * Power on the backlight. Note: end-user still controls brightness
- * Note: ctx->prepared must be true before updating the backlight.
- */
- ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(ctx->bl_dev);
-
return 0;
}
@@ -330,6 +331,11 @@ static int otm8009a_enable(struct drm_panel *panel)
{
struct otm8009a *ctx = panel_to_otm8009a(panel);
+ if (ctx->enabled)
+ return 0;
+
+ backlight_enable(ctx->bl_dev);
+
ctx->enabled = true;
return 0;
@@ -387,7 +393,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
*/
data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS;
data[1] = bd->props.brightness;
- otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+ otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
/* set Brightness Control & Backlight on */
data[1] = 0x24;
@@ -399,7 +405,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
/* Update Brightness Control & Backlight */
data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY;
- otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+ otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
return 0;
}
@@ -444,11 +450,14 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
ctx->panel.dev = dev;
ctx->panel.funcs = &otm8009a_drm_funcs;
- ctx->bl_dev = backlight_device_register(DRV_NAME "_backlight", dev, ctx,
- &otm8009a_backlight_ops, NULL);
+ ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
+ dsi->host->dev, ctx,
+ &otm8009a_backlight_ops,
+ NULL);
if (IS_ERR(ctx->bl_dev)) {
- dev_err(dev, "failed to register backlight device\n");
- return PTR_ERR(ctx->bl_dev);
+ ret = PTR_ERR(ctx->bl_dev);
+ dev_err(dev, "failed to register backlight: %d\n", ret);
+ return ret;
}
ctx->bl_dev->props.max_brightness = OTM8009A_BACKLIGHT_MAX;
@@ -466,11 +475,6 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
return ret;
}
- DRM_INFO(DRV_NAME "_panel %ux%u@%u %ubpp dsi %udl - ready\n",
- default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh,
- mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
-
return 0;
}
@@ -481,8 +485,6 @@ static int otm8009a_remove(struct mipi_dsi_device *dsi)
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
- backlight_device_unregister(ctx->bl_dev);
-
return 0;
}
@@ -496,7 +498,7 @@ static struct mipi_dsi_driver orisetech_otm8009a_driver = {
.probe = otm8009a_probe,
.remove = otm8009a_remove,
.driver = {
- .name = DRV_NAME "_panel",
+ .name = "panel-orisetech-otm8009a",
.of_match_table = orisetech_otm8009a_of_match,
},
};
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 74a806121f80..cb4dfb98be0f 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -299,7 +299,6 @@ static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
- drm_panel_detach(&wuxga_nt->base);
wuxga_nt_panel_del(wuxga_nt);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index a188a3959f1a..6ad827b93ae1 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -823,7 +823,7 @@ static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
int ret, i;
ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
- if (ret < ARRAY_SIZE(id) || id[0] == 0x00) {
+ if (ret < 0 || ret < ARRAY_SIZE(id) || id[0] == 0x00) {
dev_err(ctx->dev, "read id failed\n");
ctx->error = -EIO;
return;
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 71c09ed436ae..75f925390551 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -292,7 +292,6 @@ static int seiko_panel_remove(struct platform_device *pdev)
{
struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
- drm_panel_detach(&panel->base);
drm_panel_remove(&panel->base);
seiko_panel_disable(&panel->base);
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index 6bf8730f1a21..02fc0f5423d4 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -418,7 +418,6 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
- drm_panel_detach(&sharp->base);
sharp_panel_del(sharp);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index 494aa9b1628a..e5cae0050f52 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -327,7 +327,6 @@ static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
- drm_panel_detach(&sharp_nt->base);
sharp_nt_panel_del(sharp_nt);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index cbf1ab404ee7..97964f7f2ace 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -252,7 +252,7 @@ static int panel_simple_get_modes(struct drm_panel *panel)
/* probe EDID if a DDC bus is available */
if (p->ddc) {
struct edid *edid = drm_get_edid(panel->connector, p->ddc);
- drm_mode_connector_update_edid_property(panel->connector, edid);
+ drm_connector_update_edid_property(panel->connector, edid);
if (edid) {
num += drm_add_edid_modes(panel->connector, edid);
kfree(edid);
@@ -364,7 +364,6 @@ static int panel_simple_remove(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
- drm_panel_detach(&panel->base);
drm_panel_remove(&panel->base);
panel_simple_disable(&panel->base);
@@ -581,6 +580,34 @@ static const struct panel_desc auo_b133htn01 = {
},
};
+static const struct display_timing auo_g070vvn01_timings = {
+ .pixelclock = { 33300000, 34209000, 45000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 20, 40, 200 },
+ .hback_porch = { 87, 40, 1 },
+ .hsync_len = { 1, 48, 87 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 5, 13, 200 },
+ .vback_porch = { 31, 31, 29 },
+ .vsync_len = { 1, 1, 3 },
+};
+
+static const struct panel_desc auo_g070vvn01 = {
+ .timings = &auo_g070vvn01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .delay = {
+ .prepare = 200,
+ .enable = 50,
+ .disable = 50,
+ .unprepare = 1000,
+ },
+};
+
static const struct drm_display_mode auo_g104sn02_mode = {
.clock = 40000,
.hdisplay = 800,
@@ -687,7 +714,7 @@ static const struct panel_desc auo_p320hvn03 = {
.enable = 450,
.unprepare = 500,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
static const struct drm_display_mode auo_t215hvn01_mode = {
@@ -745,6 +772,28 @@ static const struct panel_desc avic_tm070ddh03 = {
},
};
+static const struct drm_display_mode boe_hv070wsa_mode = {
+ .clock = 40800,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 90,
+ .hsync_end = 1024 + 90 + 90,
+ .htotal = 1024 + 90 + 90 + 90,
+ .vdisplay = 600,
+ .vsync_start = 600 + 3,
+ .vsync_end = 600 + 3 + 4,
+ .vtotal = 600 + 3 + 4 + 3,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc boe_hv070wsa = {
+ .modes = &boe_hv070wsa_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 154,
+ .height = 90,
+ },
+};
+
static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
{
.clock = 71900,
@@ -857,6 +906,61 @@ static const struct panel_desc chunghwa_claa101wb01 = {
},
};
+static const struct drm_display_mode dataimage_scf0700c48ggu18_mode = {
+ .clock = 33260,
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 128,
+ .htotal = 800 + 40 + 128 + 88,
+ .vdisplay = 480,
+ .vsync_start = 480 + 10,
+ .vsync_end = 480 + 10 + 2,
+ .vtotal = 480 + 10 + 2 + 33,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc dataimage_scf0700c48ggu18 = {
+ .modes = &dataimage_scf0700c48ggu18_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
+static const struct display_timing dlc_dlc0700yzg_1_timing = {
+ .pixelclock = { 45000000, 51200000, 57000000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 100, 106, 113 },
+ .hback_porch = { 100, 106, 113 },
+ .hsync_len = { 100, 108, 114 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 8, 11, 15 },
+ .vback_porch = { 8, 11, 15 },
+ .vsync_len = { 9, 13, 15 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc dlc_dlc0700yzg_1 = {
+ .timings = &dlc_dlc0700yzg_1_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .delay = {
+ .prepare = 30,
+ .enable = 200,
+ .disable = 200,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+};
+
static const struct drm_display_mode edt_et057090dhu_mode = {
.clock = 25175,
.hdisplay = 640,
@@ -909,6 +1013,18 @@ static const struct panel_desc edt_etm0700g0dh6 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
};
+static const struct panel_desc edt_etm0700g0bdh6 = {
+ .modes = &edt_etm0700g0dh6_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
.clock = 32260,
.hdisplay = 800,
@@ -1086,6 +1202,36 @@ static const struct panel_desc innolux_at070tn92 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct display_timing innolux_g070y2_l01_timing = {
+ .pixelclock = { 28000000, 29500000, 32000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 61, 91, 141 },
+ .hback_porch = { 60, 90, 140 },
+ .hsync_len = { 12, 12, 12 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 4, 9, 30 },
+ .vback_porch = { 4, 8, 28 },
+ .vsync_len = { 2, 2, 2 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc innolux_g070y2_l01 = {
+ .timings = &innolux_g070y2_l01_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .delay = {
+ .prepare = 10,
+ .enable = 100,
+ .disable = 100,
+ .unprepare = 800,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct display_timing innolux_g101ice_l01_timing = {
.pixelclock = { 60400000, 71100000, 74700000 },
.hactive = { 1280, 1280, 1280 },
@@ -1217,6 +1363,33 @@ static const struct panel_desc innolux_n156bge_l21 = {
},
};
+static const struct drm_display_mode innolux_tv123wam_mode = {
+ .clock = 206016,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 80,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 10,
+ .vtotal = 1440 + 3 + 10 + 27,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc innolux_tv123wam = {
+ .modes = &innolux_tv123wam_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 259,
+ .height = 173,
+ },
+ .delay = {
+ .unprepare = 500,
+ },
+};
+
static const struct drm_display_mode innolux_zj070na_01p_mode = {
.clock = 51501,
.hdisplay = 1024,
@@ -1247,8 +1420,8 @@ static const struct display_timing koe_tx31d200vm0baa_timing = {
.hback_porch = { 16, 36, 56 },
.hsync_len = { 8, 8, 8 },
.vactive = { 480, 480, 480 },
- .vfront_porch = { 6, 21, 33.5 },
- .vback_porch = { 6, 21, 33.5 },
+ .vfront_porch = { 6, 21, 33 },
+ .vback_porch = { 6, 21, 33 },
.vsync_len = { 8, 8, 8 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
@@ -1511,6 +1684,33 @@ static const struct panel_desc netron_dy_e231732 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 41,
+ .htotal = 480 + 2 + 41 + 2,
+ .vdisplay = 272,
+ .vsync_start = 272 + 2,
+ .vsync_end = 272 + 2 + 10,
+ .vtotal = 272 + 2 + 10 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
+ .modes = &newhaven_nhd_43_480272ef_atxl_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 54,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE |
+ DRM_BUS_FLAG_SYNC_POSEDGE,
+};
+
static const struct display_timing nlt_nl192108ac18_02d_timing = {
.pixelclock = { 130000000, 148350000, 163000000 },
.hactive = { 1920, 1920, 1920 },
@@ -1696,6 +1896,36 @@ static const struct panel_desc qd43003c0_40 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct display_timing rocktech_rk070er9427_timing = {
+ .pixelclock = { 26400000, 33300000, 46800000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 210, 354 },
+ .hback_porch = { 46, 46, 46 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 7, 22, 147 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 1, 1 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc rocktech_rk070er9427 = {
+ .timings = &rocktech_rk070er9427_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .delay = {
+ .prepare = 41,
+ .enable = 50,
+ .unprepare = 41,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
.clock = 271560,
.hdisplay = 2560,
@@ -1764,6 +1994,30 @@ static const struct panel_desc samsung_ltn140at29_301 = {
},
};
+static const struct drm_display_mode sharp_lq035q7db03_mode = {
+ .clock = 5500,
+ .hdisplay = 240,
+ .hsync_start = 240 + 16,
+ .hsync_end = 240 + 16 + 7,
+ .htotal = 240 + 16 + 7 + 5,
+ .vdisplay = 320,
+ .vsync_start = 320 + 9,
+ .vsync_end = 320 + 9 + 1,
+ .vtotal = 320 + 9 + 1 + 7,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc sharp_lq035q7db03 = {
+ .modes = &sharp_lq035q7db03_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 54,
+ .height = 72,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct display_timing sharp_lq101k1ly04_timing = {
.pixelclock = { 60000000, 65000000, 80000000 },
.hactive = { 1280, 1280, 1280 },
@@ -2095,6 +2349,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
+ .compatible = "auo,g070vvn01",
+ .data = &auo_g070vvn01,
+ }, {
.compatible = "auo,g104sn02",
.data = &auo_g104sn02,
}, {
@@ -2113,6 +2370,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "avic,tm070ddh03",
.data = &avic_tm070ddh03,
}, {
+ .compatible = "boe,hv070wsa-100",
+ .data = &boe_hv070wsa
+ }, {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
@@ -2125,6 +2385,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "chunghwa,claa101wb01",
.data = &chunghwa_claa101wb01
}, {
+ .compatible = "dataimage,scf0700c48ggu18",
+ .data = &dataimage_scf0700c48ggu18,
+ }, {
+ .compatible = "dlc,dlc0700yzg-1",
+ .data = &dlc_dlc0700yzg_1,
+ }, {
.compatible = "edt,et057090dhu",
.data = &edt_et057090dhu,
}, {
@@ -2134,6 +2400,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,etm0700g0dh6",
.data = &edt_etm0700g0dh6,
}, {
+ .compatible = "edt,etm0700g0bdh6",
+ .data = &edt_etm0700g0bdh6,
+ }, {
+ .compatible = "edt,etm0700g0edh6",
+ .data = &edt_etm0700g0bdh6,
+ }, {
.compatible = "foxlink,fl500wvr00-a0t",
.data = &foxlink_fl500wvr00_a0t,
}, {
@@ -2155,10 +2427,13 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,at070tn92",
.data = &innolux_at070tn92,
}, {
- .compatible ="innolux,g101ice-l01",
+ .compatible = "innolux,g070y2-l01",
+ .data = &innolux_g070y2_l01,
+ }, {
+ .compatible = "innolux,g101ice-l01",
.data = &innolux_g101ice_l01
}, {
- .compatible ="innolux,g121i1-l01",
+ .compatible = "innolux,g121i1-l01",
.data = &innolux_g121i1_l01
}, {
.compatible = "innolux,g121x1-l03",
@@ -2170,6 +2445,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
+ .compatible = "innolux,tv123wam",
+ .data = &innolux_tv123wam,
+ }, {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
@@ -2206,6 +2484,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "netron-dy,e231732",
.data = &netron_dy_e231732,
}, {
+ .compatible = "newhaven,nhd-4.3-480272ef-atxl",
+ .data = &newhaven_nhd_43_480272ef_atxl,
+ }, {
.compatible = "nlt,nl192108ac18-02d",
.data = &nlt_nl192108ac18_02d,
}, {
@@ -2227,6 +2508,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "qiaodian,qd43003c0-40",
.data = &qd43003c0_40,
}, {
+ .compatible = "rocktech,rk070er9427",
+ .data = &rocktech_rk070er9427,
+ }, {
.compatible = "samsung,lsn122dl01-c01",
.data = &samsung_lsn122dl01_c01,
}, {
@@ -2236,6 +2520,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "samsung,ltn140at29-301",
.data = &samsung_ltn140at29_301,
}, {
+ .compatible = "sharp,lq035q7db03",
+ .data = &sharp_lq035q7db03,
+ }, {
.compatible = "sharp,lq101k1ly04",
.data = &sharp_lq101k1ly04,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 358c64ef1922..74284e5afc5d 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -419,7 +419,6 @@ static int st7789v_remove(struct spi_device *spi)
{
struct st7789v *ctx = spi_get_drvdata(spi);
- drm_panel_detach(&ctx->panel);
drm_panel_remove(&ctx->panel);
if (ctx->backlight)
diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile
index 19a8189dc54f..0c70f0e91d21 100644
--- a/drivers/gpu/drm/pl111/Makefile
+++ b/drivers/gpu/drm/pl111/Makefile
@@ -4,6 +4,7 @@ pl111_drm-y += pl111_display.o \
pl111_drv.o
pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o
+pl111_drm-$(CONFIG_ARCH_NOMADIK) += pl111_nomadik.o
pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
obj-$(CONFIG_DRM_PL111) += pl111_drm.o
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 19b0d006a54a..754f6b25f265 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -63,7 +63,7 @@ pl111_mode_valid(struct drm_crtc *crtc,
* We use the pixelclock to also account for interlaced modes, the
* resulting bandwidth is in bytes per second.
*/
- bw = mode->clock * 1000; /* In Hz */
+ bw = mode->clock * 1000ULL; /* In Hz */
bw = bw * mode->hdisplay * mode->vdisplay * cpp;
bw = div_u64(bw, mode->htotal * mode->vtotal);
@@ -223,48 +223,84 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
/* Hard-code TFT panel */
cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1);
+ /* On the ST Micro variant, assume all 24 bits are connected */
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_CDWID_24;
- /* Note that the the hardware's format reader takes 'r' from
+ /*
+ * Note that the the ARM hardware's format reader takes 'r' from
* the low bit, while DRM formats list channels from high bit
- * to low bit as you read left to right.
+ * to low bit as you read left to right. The ST Micro version of
+ * the PL110 (LCDC) however uses the standard DRM format.
*/
switch (fb->format->format) {
+ case DRM_FORMAT_BGR888:
+ /* Only supported on the ST Micro variant */
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_LCDBPP24_PACKED | CNTL_BGR;
+ break;
+ case DRM_FORMAT_RGB888:
+ /* Only supported on the ST Micro variant */
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_LCDBPP24_PACKED;
+ break;
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
- cntl |= CNTL_LCDBPP24;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_LCDBPP24 | CNTL_BGR;
+ else
+ cntl |= CNTL_LCDBPP24;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
- cntl |= CNTL_LCDBPP24 | CNTL_BGR;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_LCDBPP24;
+ else
+ cntl |= CNTL_LCDBPP24 | CNTL_BGR;
break;
case DRM_FORMAT_BGR565:
if (priv->variant->is_pl110)
cntl |= CNTL_LCDBPP16;
+ else if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_LCDBPP16 | CNTL_ST_1XBPP_565 | CNTL_BGR;
else
cntl |= CNTL_LCDBPP16_565;
break;
case DRM_FORMAT_RGB565:
if (priv->variant->is_pl110)
- cntl |= CNTL_LCDBPP16;
+ cntl |= CNTL_LCDBPP16 | CNTL_BGR;
+ else if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_LCDBPP16 | CNTL_ST_1XBPP_565;
else
- cntl |= CNTL_LCDBPP16_565;
- cntl |= CNTL_BGR;
+ cntl |= CNTL_LCDBPP16_565 | CNTL_BGR;
break;
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_XBGR1555:
cntl |= CNTL_LCDBPP16;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_1XBPP_5551 | CNTL_BGR;
break;
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
- cntl |= CNTL_LCDBPP16 | CNTL_BGR;
+ cntl |= CNTL_LCDBPP16;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_1XBPP_5551;
+ else
+ cntl |= CNTL_BGR;
break;
case DRM_FORMAT_ABGR4444:
case DRM_FORMAT_XBGR4444:
cntl |= CNTL_LCDBPP16_444;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_1XBPP_444 | CNTL_BGR;
break;
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_XRGB4444:
- cntl |= CNTL_LCDBPP16_444 | CNTL_BGR;
+ cntl |= CNTL_LCDBPP16_444;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_1XBPP_444;
+ else
+ cntl |= CNTL_BGR;
break;
default:
WARN_ONCE(true, "Unknown FB format 0x%08x\n",
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index ce4501d0ab48..1aa015ccacef 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -36,11 +36,14 @@ struct drm_minor;
* struct pl111_variant_data - encodes IP differences
* @name: the name of this variant
* @is_pl110: this is the early PL110 variant
+ * @is_lcdc: this is the ST Microelectronics Nomadik LCDC variant
* @external_bgr: this is the Versatile Pl110 variant with external
* BGR/RGB routing
* @broken_clockdivider: the clock divider is broken and we need to
* use the supplied clock directly
* @broken_vblank: the vblank IRQ is broken on this variant
+ * @st_bitmux_control: this variant is using the ST Micro bitmux
+ * extensions to the control register
* @formats: array of supported pixel formats on this variant
* @nformats: the length of the array of supported pixel formats
* @fb_bpp: desired bits per pixel on the default framebuffer
@@ -48,9 +51,11 @@ struct drm_minor;
struct pl111_variant_data {
const char *name;
bool is_pl110;
+ bool is_lcdc;
bool external_bgr;
bool broken_clockdivider;
bool broken_vblank;
+ bool st_bitmux_control;
const u32 *formats;
unsigned int nformats;
unsigned int fb_bpp;
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 454ff0804642..47fe30223444 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -75,6 +75,7 @@
#include "pl111_drm.h"
#include "pl111_versatile.h"
+#include "pl111_nomadik.h"
#define DRIVER_DESC "DRM module for PL111"
@@ -249,6 +250,8 @@ static struct drm_driver pl111_drm_driver = {
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = pl111_debugfs_init,
@@ -288,8 +291,8 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
priv->memory_bw = 0;
}
- /* The two variants swap this register */
- if (variant->is_pl110) {
+ /* The two main variants swap this register */
+ if (variant->is_pl110 || variant->is_lcdc) {
priv->ienb = CLCD_PL110_IENB;
priv->ctrl = CLCD_PL110_CNTL;
} else {
@@ -301,13 +304,15 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
if (IS_ERR(priv->regs)) {
dev_err(dev, "%s failed mmio\n", __func__);
ret = PTR_ERR(priv->regs);
- goto dev_unref;
+ goto dev_put;
}
/* This may override some variant settings */
ret = pl111_versatile_init(dev, priv);
if (ret)
- goto dev_unref;
+ goto dev_put;
+
+ pl111_nomadik_init(dev);
/* turn off interrupts before requesting the irq */
writel(0, priv->regs + priv->ienb);
@@ -321,16 +326,16 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
ret = pl111_modeset_init(drm);
if (ret != 0)
- goto dev_unref;
+ goto dev_put;
ret = drm_dev_register(drm, 0);
if (ret < 0)
- goto dev_unref;
+ goto dev_put;
return 0;
-dev_unref:
- drm_dev_unref(drm);
+dev_put:
+ drm_dev_put(drm);
of_reserved_mem_device_release(dev);
return ret;
@@ -347,7 +352,7 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
of_reserved_mem_device_release(dev);
return 0;
@@ -400,16 +405,50 @@ static const struct pl111_variant_data pl111_variant = {
.fb_bpp = 32,
};
+static const u32 pl110_nomadik_pixel_formats[] = {
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+};
+
+static const struct pl111_variant_data pl110_nomadik_variant = {
+ .name = "LCDC (PL110 Nomadik)",
+ .formats = pl110_nomadik_pixel_formats,
+ .nformats = ARRAY_SIZE(pl110_nomadik_pixel_formats),
+ .is_lcdc = true,
+ .st_bitmux_control = true,
+ .broken_vblank = true,
+ .fb_bpp = 16,
+};
+
static const struct amba_id pl111_id_table[] = {
{
.id = 0x00041110,
.mask = 0x000fffff,
- .data = (void*)&pl110_variant,
+ .data = (void *)&pl110_variant,
+ },
+ {
+ .id = 0x00180110,
+ .mask = 0x00fffffe,
+ .data = (void *)&pl110_nomadik_variant,
},
{
.id = 0x00041111,
.mask = 0x000fffff,
- .data = (void*)&pl111_variant,
+ .data = (void *)&pl111_variant,
},
{0, 0},
};
diff --git a/drivers/gpu/drm/pl111/pl111_nomadik.c b/drivers/gpu/drm/pl111/pl111_nomadik.c
new file mode 100644
index 000000000000..6f385e59be22
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_nomadik.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include "pl111_nomadik.h"
+
+#define PMU_CTRL_OFFSET 0x0000
+#define PMU_CTRL_LCDNDIF BIT(26)
+
+void pl111_nomadik_init(struct device *dev)
+{
+ struct regmap *pmu_regmap;
+
+ /*
+ * Just bail out of this is not found, we could be running
+ * multiplatform on something else than Nomadik.
+ */
+ pmu_regmap =
+ syscon_regmap_lookup_by_compatible("stericsson,nomadik-pmu");
+ if (IS_ERR(pmu_regmap))
+ return;
+
+ /*
+ * This bit in the PMU controller multiplexes the two graphics
+ * blocks found in the Nomadik STn8815. The other one is called
+ * MDIF (Master Display Interface) and gets muxed out here.
+ */
+ regmap_update_bits(pmu_regmap,
+ PMU_CTRL_OFFSET,
+ PMU_CTRL_LCDNDIF,
+ 0);
+ dev_info(dev, "set Nomadik PMU mux to CLCD mode\n");
+}
+EXPORT_SYMBOL_GPL(pl111_nomadik_init);
diff --git a/drivers/gpu/drm/pl111/pl111_nomadik.h b/drivers/gpu/drm/pl111/pl111_nomadik.h
new file mode 100644
index 000000000000..19d663d46353
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_nomadik.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/device.h>
+
+#ifndef PL111_NOMADIK_H
+#define PL111_NOMADIK_H
+#endif
+
+#ifdef CONFIG_ARCH_NOMADIK
+
+void pl111_nomadik_init(struct device *dev);
+
+#else
+
+static inline void pl111_nomadik_init(struct device *dev)
+{
+}
+
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 768207fbbae3..0570c6826bff 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -1086,7 +1086,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
/* we get HPD via client monitors config */
connector->polled = DRM_CONNECTOR_POLL_HPD;
encoder->possible_crtcs = 1 << num_output;
- drm_mode_connector_attach_encoder(&qxl_output->base,
+ drm_connector_attach_encoder(&qxl_output->base,
&qxl_output->enc);
drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 7cb214577275..e37f0097f744 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -50,12 +50,6 @@ static const char *qxl_get_timeline_name(struct dma_fence *fence)
return "release";
}
-static bool qxl_nop_signaling(struct dma_fence *fence)
-{
- /* fences are always automatically signaled, so just pretend we did this.. */
- return true;
-}
-
static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
@@ -119,7 +113,6 @@ signaled:
static const struct dma_fence_ops qxl_fence_ops = {
.get_driver_name = qxl_get_driver_name,
.get_timeline_name = qxl_get_timeline_name,
- .enable_signaling = qxl_nop_signaling,
.wait = qxl_fence_wait,
};
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index b9302c918271..d587779a80b4 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5676,19 +5676,29 @@ int ci_dpm_init(struct radeon_device *rdev)
u16 data_offset, size;
u8 frev, crev;
struct ci_power_info *pi;
+ enum pci_bus_speed speed_cap;
+ struct pci_dev *root = rdev->pdev->bus->self;
int ret;
- u32 mask;
pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
if (pi == NULL)
return -ENOMEM;
rdev->pm.dpm.priv = pi;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret)
+ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
pi->sys_pcie_mask = 0;
- else
- pi->sys_pcie_mask = mask;
+ } else {
+ if (speed_cap == PCIE_SPEED_8_0GT)
+ pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+ RADEON_PCIE_SPEED_50 |
+ RADEON_PCIE_SPEED_80;
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+ RADEON_PCIE_SPEED_50;
+ else
+ pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
+ }
pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 7c73bc7e2f85..ebce4601a305 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -9499,9 +9499,10 @@ int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
+ enum pci_bus_speed speed_cap;
int bridge_pos, gpu_pos;
- u32 speed_cntl, mask, current_data_rate;
- int ret, i;
+ u32 speed_cntl, current_data_rate;
+ int i;
u16 tmp16;
if (pci_is_root_bus(rdev->pdev->bus))
@@ -9516,23 +9517,24 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
if (!(rdev->flags & RADEON_IS_PCIE))
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
+ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN)
return;
- if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ if ((speed_cap != PCIE_SPEED_8_0GT) &&
+ (speed_cap != PCIE_SPEED_5_0GT))
return;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
LC_CURRENT_DATA_RATE_SHIFT;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (speed_cap == PCIE_SPEED_8_0GT) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
- } else if (mask & DRM_PCIE_SPEED_50) {
+ } else if (speed_cap == PCIE_SPEED_5_0GT) {
if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -9548,7 +9550,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
if (!gpu_pos)
return;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (speed_cap == PCIE_SPEED_8_0GT) {
/* re-try equalization if gen3 is not already enabled */
if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg;
@@ -9636,9 +9638,9 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~0xf;
- if (mask & DRM_PCIE_SPEED_80)
+ if (speed_cap == PCIE_SPEED_8_0GT)
tmp16 |= 3; /* gen3 */
- else if (mask & DRM_PCIE_SPEED_50)
+ else if (speed_cap == PCIE_SPEED_5_0GT)
tmp16 |= 2; /* gen2 */
else
tmp16 |= 1; /* gen1 */
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 73d4c5348116..5e044c98fca2 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1327,9 +1327,9 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
case RADEON_PCIE_GEN3:
return RADEON_PCIE_GEN3;
default:
- if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
+ if ((sys_mask & RADEON_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
return RADEON_PCIE_GEN3;
- else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
+ else if ((sys_mask & RADEON_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
return RADEON_PCIE_GEN2;
else
return RADEON_PCIE_GEN1;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 4a2eb409aacc..1a6f6edb3515 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1653,6 +1653,10 @@ struct radeon_pm {
struct radeon_dpm dpm;
};
+#define RADEON_PCIE_SPEED_25 1
+#define RADEON_PCIE_SPEED_50 2
+#define RADEON_PCIE_SPEED_80 4
+
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2aea2bdff99b..414642e5b7a3 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -244,23 +244,15 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *best_encoder = NULL;
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *best_encoder;
+ struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
int i;
best_encoder = connector_funcs->best_encoder(connector);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -270,7 +262,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
radeon_atombios_connected_scratch_regs(connector, encoder, connected);
else
radeon_combios_connected_scratch_regs(connector, encoder, connected);
-
}
}
@@ -279,17 +270,11 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
+
return NULL;
}
@@ -383,20 +368,23 @@ static int radeon_ddc_get_modes(struct drm_connector *connector)
int ret;
if (radeon_connector->edid) {
- drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
+ drm_connector_update_edid_property(connector, radeon_connector->edid);
ret = drm_add_edid_modes(connector, radeon_connector->edid);
return ret;
}
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
return 0;
}
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
+ struct drm_encoder *encoder;
+ int i;
+
+ /* pick the first one */
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
+
return NULL;
}
@@ -436,19 +424,19 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct drm_connector *conflict;
struct radeon_connector *radeon_conflict;
- int i;
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
+ struct drm_encoder *enc;
+ int i;
+
if (conflict == connector)
continue;
radeon_conflict = to_radeon_connector(conflict);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (conflict->encoder_ids[i] == 0)
- break;
+ drm_connector_for_each_possible_encoder(conflict, enc, i) {
/* if the IDs match */
- if (conflict->encoder_ids[i] == encoder->base.id) {
+ if (enc == encoder) {
if (conflict->status != connector_status_connected)
continue;
@@ -1256,7 +1244,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
const struct drm_encoder_helper_funcs *encoder_funcs;
- int i, r;
+ int r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
@@ -1374,15 +1362,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (radeon_connector->dac_load_detect) {
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ int i;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1458,18 +1440,11 @@ exit:
/* okay need to be smart in here about which encoder to pick */
static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (radeon_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1484,8 +1459,9 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
+
return NULL;
}
@@ -1628,14 +1604,7 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
struct radeon_encoder *radeon_encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
@@ -1657,14 +1626,7 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
int i;
bool found = false;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index cd8a3ee16649..f920be236cc9 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -195,11 +195,11 @@ static int radeon_dp_mst_get_ddc_modes(struct drm_connector *connector)
radeon_connector->edid = edid;
DRM_DEBUG_KMS("edid retrieved %p\n", edid);
if (radeon_connector->edid) {
- drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ drm_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
return ret;
}
- drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+ drm_connector_update_edid_property(&radeon_connector->base, NULL);
return ret;
}
@@ -290,7 +290,7 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
- drm_mode_connector_set_path_property(connector, pathprop);
+ drm_connector_set_path_property(connector, pathprop);
return connector;
}
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c6ee80216cf4..c341fb2a5b56 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -211,7 +211,7 @@ radeon_link_encoder_connector(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->devices & radeon_connector->devices) {
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
radeon_encoder_add_backlight(radeon_encoder, connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index abd24975c9b1..f8b35df44c60 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -118,19 +118,27 @@ static void radeon_mn_release(struct mmu_notifier *mn,
* We block for all BOs between start and end to be idle and
* unmap them by move them into system domain again.
*/
-static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
+static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
struct ttm_operation_ctx ctx = { false, false };
struct interval_tree_node *it;
+ int ret = 0;
/* notification is exclusive, but interval is inclusive */
end -= 1;
- mutex_lock(&rmn->lock);
+ /* TODO we should be able to split locking for interval tree and
+ * the tear down.
+ */
+ if (blockable)
+ mutex_lock(&rmn->lock);
+ else if (!mutex_trylock(&rmn->lock))
+ return -EAGAIN;
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
@@ -138,6 +146,11 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct radeon_bo *bo;
long r;
+ if (!blockable) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
node = container_of(it, struct radeon_mn_node, it);
it = interval_tree_iter_next(it, start, end);
@@ -166,7 +179,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
}
}
+out_unlock:
mutex_unlock(&rmn->lock);
+
+ return ret;
}
static const struct mmu_notifier_ops radeon_mn_ops = {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index edbb4cd519fd..ba2fd295697f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -307,7 +307,7 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
if (bo == NULL)
return NULL;
- ttm_bo_reference(&bo->tbo);
+ ttm_bo_get(&bo->tbo);
return bo;
}
@@ -320,9 +320,8 @@ void radeon_bo_unref(struct radeon_bo **bo)
return;
rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
- ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
+ ttm_bo_put(tbo);
+ *bo = NULL;
}
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 48f4b273e316..0c7f228db6e3 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2009 VMware, Inc.
*
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 8689fcca051c..cbb67e9ffb3a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -947,11 +947,11 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
static struct vm_operations_struct radeon_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
-static int radeon_ttm_fault(struct vm_fault *vmf)
+static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
struct radeon_device *rdev;
- int r;
+ vm_fault_t ret;
bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
if (bo == NULL) {
@@ -959,9 +959,9 @@ static int radeon_ttm_fault(struct vm_fault *vmf)
}
rdev = radeon_get_rdev(bo->bdev);
down_read(&rdev->pm.mclk_lock);
- r = ttm_vm_ops->fault(vmf);
+ ret = ttm_vm_ops->fault(vmf);
up_read(&rdev->pm.mclk_lock);
- return r;
+ return ret;
}
int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 1907c950d76f..85c604d29235 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -7082,9 +7082,10 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
static void si_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
+ enum pci_bus_speed speed_cap;
int bridge_pos, gpu_pos;
- u32 speed_cntl, mask, current_data_rate;
- int ret, i;
+ u32 speed_cntl, current_data_rate;
+ int i;
u16 tmp16;
if (pci_is_root_bus(rdev->pdev->bus))
@@ -7099,23 +7100,24 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
if (!(rdev->flags & RADEON_IS_PCIE))
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
+ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN)
return;
- if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ if ((speed_cap != PCIE_SPEED_8_0GT) &&
+ (speed_cap != PCIE_SPEED_5_0GT))
return;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
LC_CURRENT_DATA_RATE_SHIFT;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (speed_cap == PCIE_SPEED_8_0GT) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
- } else if (mask & DRM_PCIE_SPEED_50) {
+ } else if (speed_cap == PCIE_SPEED_5_0GT) {
if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -7131,7 +7133,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
if (!gpu_pos)
return;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (speed_cap == PCIE_SPEED_8_0GT) {
/* re-try equalization if gen3 is not already enabled */
if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg;
@@ -7219,9 +7221,9 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~0xf;
- if (mask & DRM_PCIE_SPEED_80)
+ if (speed_cap == PCIE_SPEED_8_0GT)
tmp16 |= 3; /* gen3 */
- else if (mask & DRM_PCIE_SPEED_50)
+ else if (speed_cap == PCIE_SPEED_5_0GT)
tmp16 |= 2; /* gen2 */
else
tmp16 |= 1; /* gen1 */
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index fea88078cf8e..8fb60b3af015 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6899,8 +6899,9 @@ int si_dpm_init(struct radeon_device *rdev)
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
struct atom_clock_dividers dividers;
+ enum pci_bus_speed speed_cap;
+ struct pci_dev *root = rdev->pdev->bus->self;
int ret;
- u32 mask;
si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
if (si_pi == NULL)
@@ -6910,11 +6911,20 @@ int si_dpm_init(struct radeon_device *rdev)
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret)
+ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
si_pi->sys_pcie_mask = 0;
- else
- si_pi->sys_pcie_mask = mask;
+ } else {
+ if (speed_cap == PCIE_SPEED_8_0GT)
+ si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+ RADEON_PCIE_SPEED_50 |
+ RADEON_PCIE_SPEED_80;
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+ RADEON_PCIE_SPEED_50;
+ else
+ si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
+ }
si_pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(rdev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 155ad840f3c5..4c39de3f4f0f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -353,7 +353,7 @@ static int rcar_lvds_attach(struct drm_bridge *bridge)
drm_connector_helper_add(connector, &rcar_lvds_conn_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
return ret;
@@ -434,8 +434,8 @@ static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
ret = -EPROBE_DEFER;
} else {
lvds->panel = of_drm_find_panel(remote);
- if (!lvds->panel)
- ret = -EPROBE_DEFER;
+ if (IS_ERR(lvds->panel))
+ ret = PTR_ERR(lvds->panel);
}
done:
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index c6fbdcd87c16..8ad0d773dc33 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -275,7 +275,7 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
dp->sink_has_audio = drm_detect_monitor_audio(edid);
ret = drm_add_edid_modes(connector, edid);
if (ret)
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
edid);
}
mutex_unlock(&dp->lock);
@@ -1062,7 +1062,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
DRM_ERROR("failed to attach connector and encoder\n");
goto err_free_connector;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index eb3042c6d1b2..3105965fc260 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -792,7 +792,6 @@ err_config_video:
int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
{
- u32 val;
int ret;
ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0);
@@ -801,11 +800,7 @@ int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
return ret;
}
- val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
- val |= SPDIF_FIFO_MID_RANGE(0xe0);
- val |= SPDIF_JITTER_THRSH(0xe0);
- val |= SPDIF_JITTER_AVG_WIN(7);
- writel(val, dp->regs + SPDIF_CTRL_ADDR);
+ writel(0, dp->regs + SPDIF_CTRL_ADDR);
/* clearn the audio config and reset */
writel(0, dp->regs + AUDIO_SRC_CNTL);
@@ -929,12 +924,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
{
u32 val;
- val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
- val |= SPDIF_FIFO_MID_RANGE(0xe0);
- val |= SPDIF_JITTER_THRSH(0xe0);
- val |= SPDIF_JITTER_AVG_WIN(7);
- writel(val, dp->regs + SPDIF_CTRL_ADDR);
-
writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
@@ -942,9 +931,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
- val |= SPDIF_FIFO_MID_RANGE(0xe0);
- val |= SPDIF_JITTER_THRSH(0xe0);
- val |= SPDIF_JITTER_AVG_WIN(7);
writel(val, dp->regs + SPDIF_CTRL_ADDR);
clk_prepare_enable(dp->spdif_clk);
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index d53d5a09547f..662b6cb5d3f0 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -595,7 +595,7 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (dsi->panel)
+ if (!IS_ERR(dsi->panel))
return drm_panel_attach(dsi->panel, &dsi->connector);
return -EINVAL;
@@ -1129,7 +1129,7 @@ static int dw_mipi_dsi_register(struct drm_device *drm,
&dw_mipi_dsi_atomic_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 88d0774c97bd..1c02b3e61299 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -565,7 +565,7 @@ static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
if (edid) {
hdmi->hdmi_data.sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->hdmi_data.sink_has_audio = drm_detect_monitor_audio(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
@@ -634,7 +634,7 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
drm_connector_init(drm, &hdmi->connector, &inno_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
- drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+ drm_connector_attach_encoder(&hdmi->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index d4f4118b482d..ea18cb2a76c0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -18,52 +18,13 @@
#include <drm/drm_atomic.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_psr.h"
-#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
-
-struct rockchip_drm_fb {
- struct drm_framebuffer fb;
- struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
-};
-
-struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane)
-{
- struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
-
- if (plane >= ROCKCHIP_MAX_FB_BUFFER)
- return NULL;
-
- return rk_fb->obj[plane];
-}
-
-static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
-{
- struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
- int i;
-
- for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
- drm_gem_object_put_unlocked(rockchip_fb->obj[i]);
-
- drm_framebuffer_cleanup(fb);
- kfree(rockchip_fb);
-}
-
-static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
-
- return drm_gem_handle_create(file_priv,
- rockchip_fb->obj[0], handle);
-}
-
static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int flags, unsigned int color,
@@ -75,46 +36,45 @@ static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
}
static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
- .destroy = rockchip_drm_fb_destroy,
- .create_handle = rockchip_drm_fb_create_handle,
- .dirty = rockchip_drm_fb_dirty,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = rockchip_drm_fb_dirty,
};
-static struct rockchip_drm_fb *
+static struct drm_framebuffer *
rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes)
{
- struct rockchip_drm_fb *rockchip_fb;
+ struct drm_framebuffer *fb;
int ret;
int i;
- rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
- if (!rockchip_fb)
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(dev, &rockchip_fb->fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
for (i = 0; i < num_planes; i++)
- rockchip_fb->obj[i] = obj[i];
+ fb->obj[i] = obj[i];
- ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
- &rockchip_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, fb, &rockchip_drm_fb_funcs);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"Failed to initialize framebuffer: %d\n",
ret);
- kfree(rockchip_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
- return rockchip_fb;
+ return fb;
}
static struct drm_framebuffer *
rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct rockchip_drm_fb *rockchip_fb;
+ struct drm_framebuffer *fb;
struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
struct drm_gem_object *obj;
unsigned int hsub;
@@ -153,13 +113,13 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
objs[i] = obj;
}
- rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
- if (IS_ERR(rockchip_fb)) {
- ret = PTR_ERR(rockchip_fb);
+ fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
goto err_gem_object_unreference;
}
- return &rockchip_fb->fb;
+ return fb;
err_gem_object_unreference:
for (i--; i >= 0; i--)
@@ -242,13 +202,13 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
- struct rockchip_drm_fb *rockchip_fb;
+ struct drm_framebuffer *fb;
- rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
- if (IS_ERR(rockchip_fb))
- return ERR_CAST(rockchip_fb);
+ fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
+ if (IS_ERR(fb))
+ return ERR_CAST(fb);
- return &rockchip_fb->fb;
+ return fb;
}
void rockchip_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
index 2fe47f1ee98f..f1265cb1aee8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -22,7 +22,4 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
void rockchip_drm_mode_config_init(struct drm_device *dev);
-
-struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane);
#endif /* _ROCKCHIP_DRM_FB_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 2121345a61af..1359e5c773e4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -243,18 +243,6 @@ static enum vop_data_format vop_convert_format(uint32_t format)
}
}
-static bool is_yuv_support(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV24:
- return true;
- default:
- return false;
- }
-}
-
static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
uint32_t dst, bool is_horizontal,
int vsu_mode, int *vskiplines)
@@ -298,7 +286,8 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
uint16_t cbcr_ver_scl_mode = SCALE_NONE;
int hsub = drm_format_horz_chroma_subsampling(pixel_format);
int vsub = drm_format_vert_chroma_subsampling(pixel_format);
- bool is_yuv = is_yuv_support(pixel_format);
+ const struct drm_format_info *info;
+ bool is_yuv = false;
uint16_t cbcr_src_w = src_w / hsub;
uint16_t cbcr_src_h = src_h / vsub;
uint16_t vsu_mode;
@@ -306,6 +295,11 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
uint32_t val;
int vskiplines;
+ info = drm_format_info(pixel_format);
+
+ if (info->is_yuv)
+ is_yuv = true;
+
if (dst_w > 3840) {
DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
return;
@@ -486,6 +480,31 @@ static void vop_line_flag_irq_disable(struct vop *vop)
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
+static int vop_core_clks_enable(struct vop *vop)
+{
+ int ret;
+
+ ret = clk_enable(vop->hclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(vop->aclk);
+ if (ret < 0)
+ goto err_disable_hclk;
+
+ return 0;
+
+err_disable_hclk:
+ clk_disable(vop->hclk);
+ return ret;
+}
+
+static void vop_core_clks_disable(struct vop *vop)
+{
+ clk_disable(vop->aclk);
+ clk_disable(vop->hclk);
+}
+
static int vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
@@ -497,17 +516,13 @@ static int vop_enable(struct drm_crtc *crtc)
return ret;
}
- ret = clk_enable(vop->hclk);
+ ret = vop_core_clks_enable(vop);
if (WARN_ON(ret < 0))
goto err_put_pm_runtime;
ret = clk_enable(vop->dclk);
if (WARN_ON(ret < 0))
- goto err_disable_hclk;
-
- ret = clk_enable(vop->aclk);
- if (WARN_ON(ret < 0))
- goto err_disable_dclk;
+ goto err_disable_core;
/*
* Slave iommu shares power, irq and clock with vop. It was associated
@@ -519,7 +534,7 @@ static int vop_enable(struct drm_crtc *crtc)
if (ret) {
DRM_DEV_ERROR(vop->dev,
"failed to attach dma mapping, %d\n", ret);
- goto err_disable_aclk;
+ goto err_disable_dclk;
}
spin_lock(&vop->reg_lock);
@@ -552,18 +567,14 @@ static int vop_enable(struct drm_crtc *crtc)
spin_unlock(&vop->reg_lock);
- enable_irq(vop->irq);
-
drm_crtc_vblank_on(crtc);
return 0;
-err_disable_aclk:
- clk_disable(vop->aclk);
err_disable_dclk:
clk_disable(vop->dclk);
-err_disable_hclk:
- clk_disable(vop->hclk);
+err_disable_core:
+ vop_core_clks_disable(vop);
err_put_pm_runtime:
pm_runtime_put_sync(vop->dev);
return ret;
@@ -599,8 +610,6 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
vop_dsp_hold_valid_irq_disable(vop);
- disable_irq(vop->irq);
-
vop->is_enabled = false;
/*
@@ -609,8 +618,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
clk_disable(vop->dclk);
- clk_disable(vop->aclk);
- clk_disable(vop->hclk);
+ vop_core_clks_disable(vop);
pm_runtime_put(vop->dev);
mutex_unlock(&vop->vop_lock);
@@ -666,7 +674,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
- if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
+ if (fb->format->is_yuv && ((state->src.x1 >> 16) % 2)) {
DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
}
@@ -728,7 +736,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
return;
}
- obj = rockchip_fb_get_gem_obj(fb, 0);
+ obj = fb->obj[0];
rk_obj = to_rockchip_obj(obj);
actual_w = drm_rect_width(src) >> 16;
@@ -753,12 +761,12 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
VOP_WIN_SET(vop, win, format, format);
VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
- if (is_yuv_support(fb->format->format)) {
+ if (fb->format->is_yuv) {
int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
int bpp = fb->format->cpp[1];
- uv_obj = rockchip_fb_get_gem_obj(fb, 1);
+ uv_obj = fb->obj[1];
rk_uv_obj = to_rockchip_obj(uv_obj);
offset = (src->x1 >> 16) * bpp / hsub;
@@ -1178,6 +1186,18 @@ static irqreturn_t vop_isr(int irq, void *data)
int ret = IRQ_NONE;
/*
+ * The irq is shared with the iommu. If the runtime-pm state of the
+ * vop-device is disabled the irq has to be targeted at the iommu.
+ */
+ if (!pm_runtime_get_if_in_use(vop->dev))
+ return IRQ_NONE;
+
+ if (vop_core_clks_enable(vop)) {
+ DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n");
+ goto out;
+ }
+
+ /*
* interrupt register has interrupt status, enable and clear bits, we
* must hold irq_lock to avoid a race with enable/disable_vblank().
*/
@@ -1192,7 +1212,7 @@ static irqreturn_t vop_isr(int irq, void *data)
/* This is expected for vop iommu irqs, since the irq is shared */
if (!active_irqs)
- return IRQ_NONE;
+ goto out_disable;
if (active_irqs & DSP_HOLD_VALID_INTR) {
complete(&vop->dsp_hold_completion);
@@ -1218,6 +1238,10 @@ static irqreturn_t vop_isr(int irq, void *data)
DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
active_irqs);
+out_disable:
+ vop_core_clks_disable(vop);
+out:
+ pm_runtime_put(vop->dev);
return ret;
}
@@ -1278,7 +1302,7 @@ static int vop_create_crtc(struct vop *vop)
for (i = 0; i < vop_data->win_size; i++) {
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win_data = vop_win->data;
- unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
+ unsigned long possible_crtcs = drm_crtc_mask(crtc);
if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
continue;
@@ -1596,9 +1620,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_disable_pm_runtime;
- /* IRQ is initially disabled; it gets enabled in power_on */
- disable_irq(vop->irq);
-
return 0;
err_disable_pm_runtime:
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 084acdd0019a..fcb91041a666 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -331,16 +331,19 @@ static inline int scl_vop_cal_lb_mode(int width, bool is_yuv)
{
int lb_mode;
- if (width > 2560)
- lb_mode = LB_RGB_3840X2;
- else if (width > 1920)
- lb_mode = LB_RGB_2560X4;
- else if (!is_yuv)
- lb_mode = LB_RGB_1920X5;
- else if (width > 1280)
- lb_mode = LB_YUV_3840X5;
- else
- lb_mode = LB_YUV_2560X8;
+ if (is_yuv) {
+ if (width > 1280)
+ lb_mode = LB_YUV_3840X5;
+ else
+ lb_mode = LB_YUV_2560X8;
+ } else {
+ if (width > 2560)
+ lb_mode = LB_RGB_3840X2;
+ else if (width > 1920)
+ lb_mode = LB_RGB_2560X4;
+ else
+ lb_mode = LB_RGB_1920X5;
+ }
return lb_mode;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index e67f4ea28c0e..456bd9f13bae 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -363,8 +363,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
of_property_read_u32(endpoint, "reg", &endpoint_id);
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
&lvds->panel, &lvds->bridge);
- if (!ret)
+ if (!ret) {
+ of_node_put(endpoint);
break;
+ }
}
if (!child_count) {
DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
@@ -432,7 +434,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
drm_connector_helper_add(connector,
&rockchip_lvds_connector_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach encoder: %d\n", ret);
@@ -446,14 +448,12 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
goto err_free_connector;
}
} else {
- lvds->bridge->encoder = encoder;
ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
goto err_free_encoder;
}
- encoder->bridge = lvds->bridge;
}
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 2db89bed52e8..7559a820bd43 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -971,7 +971,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dma && dma->buflist) {
- if (cmdbuf->dma_idx > dma->buf_count) {
+ if (cmdbuf->dma_idx >= dma->buf_count) {
DRM_ERROR
("vertex buffer index %u out of range (0-%u)\n",
cmdbuf->dma_idx, dma->buf_count - 1);
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
index bd0377c0d2ee..7665883f81d4 100644
--- a/drivers/gpu/drm/scheduler/Makefile
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -20,7 +20,6 @@
# OTHER DEALINGS IN THE SOFTWARE.
#
#
-ccflags-y := -Iinclude/drm
gpu-sched-y := gpu_scheduler.o sched_fence.o
obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 44d480768dfe..4fc211e19d6e 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -21,6 +21,29 @@
*
*/
+/**
+ * DOC: Overview
+ *
+ * The GPU scheduler provides entities which allow userspace to push jobs
+ * into software queues which are then scheduled on a hardware run queue.
+ * The software queues have a priority among them. The scheduler selects the entities
+ * from the run queue using a FIFO. The scheduler provides dependency handling
+ * features among jobs. The driver is supposed to provide callback functions for
+ * backend operations to the scheduler like submitting a job to hardware run queue,
+ * returning the dependencies of a job etc.
+ *
+ * The organisation of the scheduler is the following:
+ *
+ * 1. Each hw run queue has one scheduler
+ * 2. Each scheduler has multiple run queues with different priorities
+ * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
+ * 3. Each scheduler run queue has a queue of entities to schedule
+ * 4. Entities themselves maintain a queue of jobs that will be scheduled on
+ * the hardware.
+ *
+ * The jobs in a entity are always scheduled in the order that they were pushed.
+ */
+
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
@@ -39,14 +62,30 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
-/* Initialize a given run queue struct */
-static void drm_sched_rq_init(struct drm_sched_rq *rq)
+/**
+ * drm_sched_rq_init - initialize a given run queue struct
+ *
+ * @rq: scheduler run queue
+ *
+ * Initializes a scheduler runqueue.
+ */
+static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_rq *rq)
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
rq->current_entity = NULL;
+ rq->sched = sched;
}
+/**
+ * drm_sched_rq_add_entity - add an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Adds a scheduler entity to the run queue.
+ */
static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
@@ -57,6 +96,14 @@ static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
spin_unlock(&rq->lock);
}
+/**
+ * drm_sched_rq_remove_entity - remove an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Removes a scheduler entity from the run queue.
+ */
static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
@@ -70,9 +117,9 @@ static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
}
/**
- * Select an entity which could provide a job to run
+ * drm_sched_rq_select_entity - Select an entity which could provide a job to run
*
- * @rq The run queue to check.
+ * @rq: scheduler run queue to check.
*
* Try to find a ready entity, returns NULL if none found.
*/
@@ -112,30 +159,33 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
}
/**
- * Init a context entity used by scheduler when submit to HW ring.
+ * drm_sched_entity_init - Init a context entity used by scheduler when
+ * submit to HW ring.
+ *
+ * @entity: scheduler entity to init
+ * @rq_list: the list of run queue on which jobs from this
+ * entity can be submitted
+ * @num_rq_list: number of run queue in rq_list
+ * @guilty: atomic_t set to 1 when a job on this queue
+ * is found to be guilty causing a timeout
*
- * @sched The pointer to the scheduler
- * @entity The pointer to a valid drm_sched_entity
- * @rq The run queue this entity belongs
- * @guilty atomic_t set to 1 when a job on this queue
- * is found to be guilty causing a timeout
+ * Note: the rq_list should have atleast one element to schedule
+ * the entity
*
- * return 0 if succeed. negative error code on failure
+ * Returns 0 on success or a negative error code on failure.
*/
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity,
- struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+ struct drm_sched_rq **rq_list,
+ unsigned int num_rq_list,
atomic_t *guilty)
{
- if (!(sched && entity && rq))
+ if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
- entity->rq = rq;
- entity->sched = sched;
+ entity->rq = rq_list[0];
entity->guilty = guilty;
- entity->fini_status = 0;
entity->last_scheduled = NULL;
spin_lock_init(&entity->rq_lock);
@@ -149,40 +199,27 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
EXPORT_SYMBOL(drm_sched_entity_init);
/**
- * Query if entity is initialized
+ * drm_sched_entity_is_idle - Check if entity is idle
*
- * @sched Pointer to scheduler instance
- * @entity The pointer to a valid scheduler entity
- *
- * return true if entity is initialized, false otherwise
-*/
-static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
-{
- return entity->sched == sched &&
- entity->rq != NULL;
-}
-
-/**
- * Check if entity is idle
+ * @entity: scheduler entity
*
- * @entity The pointer to a valid scheduler entity
- *
- * Return true if entity don't has any unscheduled jobs.
+ * Returns true if the entity does not have any unscheduled jobs.
*/
static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb();
- if (spsc_queue_peek(&entity->job_queue) == NULL)
+
+ if (list_empty(&entity->list) ||
+ spsc_queue_peek(&entity->job_queue) == NULL)
return true;
return false;
}
/**
- * Check if entity is ready
+ * drm_sched_entity_is_ready - Check if entity is ready
*
- * @entity The pointer to a valid scheduler entity
+ * @entity: scheduler entity
*
* Return true if entity could provide a job.
*/
@@ -210,44 +247,67 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
/**
- * Destroy a context entity
+ * drm_sched_entity_flush - Flush a context entity
*
- * @sched Pointer to scheduler instance
- * @entity The pointer to a valid scheduler entity
+ * @entity: scheduler entity
+ * @timeout: time to wait in for Q to become empty in jiffies.
*
- * Splitting drm_sched_entity_fini() into two functions, The first one is does the waiting,
+ * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting,
* removes the entity from the runqueue and returns an error when the process was killed.
+ *
+ * Returns the remaining time in jiffies left from the input timeout
*/
-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{
- if (!drm_sched_entity_is_initialized(sched, entity))
- return;
+ struct drm_gpu_scheduler *sched;
+ struct task_struct *last_user;
+ long ret = timeout;
+
+ sched = entity->rq->sched;
/**
* The client will not queue more IBs during this fini, consume existing
* queued IBs or discard them on SIGKILL
*/
- if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
- entity->fini_status = -ERESTARTSYS;
- else
- entity->fini_status = wait_event_killable(sched->job_scheduled,
- drm_sched_entity_is_idle(entity));
- drm_sched_entity_set_rq(entity, NULL);
+ if (current->flags & PF_EXITING) {
+ if (timeout)
+ ret = wait_event_timeout(
+ sched->job_scheduled,
+ drm_sched_entity_is_idle(entity),
+ timeout);
+ } else
+ wait_event_killable(sched->job_scheduled, drm_sched_entity_is_idle(entity));
+
+
+ /* For killed process disable any more IBs enqueue right now */
+ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
+ if ((!last_user || last_user == current->group_leader) &&
+ (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+ drm_sched_rq_remove_entity(entity->rq, entity);
+
+ return ret;
}
-EXPORT_SYMBOL(drm_sched_entity_do_release);
+EXPORT_SYMBOL(drm_sched_entity_flush);
/**
- * Destroy a context entity
+ * drm_sched_entity_cleanup - Destroy a context entity
+ *
+ * @entity: scheduler entity
*
- * @sched Pointer to scheduler instance
- * @entity The pointer to a valid scheduler entity
+ * This should be called after @drm_sched_entity_do_release. It goes over the
+ * entity and signals all jobs with an error code if the process was killed.
*
- * The second one then goes over the entity and signals all jobs with an error code.
*/
-void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
- if (entity->fini_status) {
+ struct drm_gpu_scheduler *sched;
+
+ sched = entity->rq->sched;
+ drm_sched_rq_remove_entity(entity->rq, entity);
+
+ /* Consumption of existing IBs wasn't completed. Forcefully
+ * remove them here.
+ */
+ if (spsc_queue_peek(&entity->job_queue)) {
struct drm_sched_job *job;
int r;
@@ -267,27 +327,43 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
struct drm_sched_fence *s_fence = job->s_fence;
drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);
- r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
- drm_sched_entity_kill_jobs_cb);
- if (r == -ENOENT)
+
+ /*
+ * When pipe is hanged by older entity, new entity might
+ * not even have chance to submit it's first job to HW
+ * and so entity->last_scheduled will remain NULL
+ */
+ if (!entity->last_scheduled) {
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
- else if (r)
- DRM_ERROR("fence add callback failed (%d)\n", r);
+ } else {
+ r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb);
+ if (r == -ENOENT)
+ drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+ else if (r)
+ DRM_ERROR("fence add callback failed (%d)\n", r);
+ }
}
}
dma_fence_put(entity->last_scheduled);
entity->last_scheduled = NULL;
}
-EXPORT_SYMBOL(drm_sched_entity_cleanup);
+EXPORT_SYMBOL(drm_sched_entity_fini);
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+/**
+ * drm_sched_entity_fini - Destroy a context entity
+ *
+ * @entity: scheduler entity
+ *
+ * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
+ */
+void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
- drm_sched_entity_do_release(sched, entity);
- drm_sched_entity_cleanup(sched, entity);
+ drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+ drm_sched_entity_fini(entity);
}
-EXPORT_SYMBOL(drm_sched_entity_fini);
+EXPORT_SYMBOL(drm_sched_entity_destroy);
static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
@@ -295,7 +371,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb
container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
- drm_sched_wakeup(entity->sched);
+ drm_sched_wakeup(entity->rq->sched);
}
static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
@@ -306,29 +382,43 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
dma_fence_put(f);
}
+/**
+ * drm_sched_entity_set_rq - Sets the run queue for an entity
+ *
+ * @entity: scheduler entity
+ * @rq: scheduler run queue
+ *
+ * Sets the run queue for an entity and removes the entity from the previous
+ * run queue in which was present.
+ */
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
struct drm_sched_rq *rq)
{
if (entity->rq == rq)
return;
- spin_lock(&entity->rq_lock);
-
- if (entity->rq)
- drm_sched_rq_remove_entity(entity->rq, entity);
+ BUG_ON(!rq);
+ spin_lock(&entity->rq_lock);
+ drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
- if (rq)
- drm_sched_rq_add_entity(rq, entity);
-
+ drm_sched_rq_add_entity(rq, entity);
spin_unlock(&entity->rq_lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_rq);
+/**
+ * drm_sched_dependency_optimized
+ *
+ * @fence: the dependency fence
+ * @entity: the entity which depends on the above fence
+ *
+ * Returns true if the dependency can be optimized and false otherwise
+ */
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_fence *s_fence;
if (!fence || dma_fence_is_signaled(fence))
@@ -345,7 +435,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct dma_fence * fence = entity->dependency;
struct drm_sched_fence *s_fence;
@@ -390,7 +480,7 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
static struct drm_sched_job *
drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_job *sched_job = to_drm_sched_job(
spsc_queue_peek(&entity->job_queue));
@@ -413,9 +503,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
}
/**
- * Submit a job to the job queue
+ * drm_sched_entity_push_job - Submit a job to the entity's job queue
*
- * @sched_job The pointer to job required to submit
+ * @sched_job: job to submit
+ * @entity: scheduler entity
*
* Note: To guarantee that the order of insertion to queue matches
* the job's fence sequence number this function should be
@@ -431,12 +522,18 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
trace_drm_sched_job(sched_job, entity);
+ WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */
if (first) {
/* Add the entity to the run queue */
spin_lock(&entity->rq_lock);
+ if (!entity->rq) {
+ DRM_ERROR("Trying to push to a killed entity\n");
+ spin_unlock(&entity->rq_lock);
+ return;
+ }
drm_sched_rq_add_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
drm_sched_wakeup(sched);
@@ -452,24 +549,28 @@ static void drm_sched_job_finish(struct work_struct *work)
finish_work);
struct drm_gpu_scheduler *sched = s_job->sched;
- /* remove job from ring_mirror_list */
- spin_lock(&sched->job_list_lock);
- list_del_init(&s_job->node);
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- struct drm_sched_job *next;
-
- spin_unlock(&sched->job_list_lock);
- cancel_delayed_work_sync(&s_job->work_tdr);
- spin_lock(&sched->job_list_lock);
+ /*
+ * Canceling the timeout without removing our job from the ring mirror
+ * list is safe, as we will only end up in this worker if our jobs
+ * finished fence has been signaled. So even if some another worker
+ * manages to find this job as the next job in the list, the fence
+ * signaled check below will prevent the timeout to be restarted.
+ */
+ cancel_delayed_work_sync(&s_job->work_tdr);
- /* queue TDR for next job */
- next = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
+ spin_lock(&sched->job_list_lock);
+ /* queue TDR for next job */
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !list_is_last(&s_job->node, &sched->ring_mirror_list)) {
+ struct drm_sched_job *next = list_next_entry(s_job, node);
- if (next)
+ if (!dma_fence_is_signaled(&next->s_fence->finished))
schedule_delayed_work(&next->work_tdr, sched->timeout);
}
+ /* remove job from ring_mirror_list */
+ list_del(&s_job->node);
spin_unlock(&sched->job_list_lock);
+
dma_fence_put(&s_job->s_fence->finished);
sched->ops->free_job(s_job);
}
@@ -506,6 +607,13 @@ static void drm_sched_job_timedout(struct work_struct *work)
job->sched->ops->timedout_job(job);
}
+/**
+ * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
+ *
+ * @sched: scheduler instance
+ * @bad: bad scheduler job
+ *
+ */
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
struct drm_sched_job *s_job;
@@ -550,6 +658,12 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
}
EXPORT_SYMBOL(drm_sched_hw_job_reset);
+/**
+ * drm_sched_job_recovery - recover jobs after a reset
+ *
+ * @sched: scheduler instance
+ *
+ */
void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
@@ -599,16 +713,23 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
EXPORT_SYMBOL(drm_sched_job_recovery);
/**
- * Init a sched_job with basic field
+ * drm_sched_job_init - init a scheduler job
+ *
+ * @job: scheduler job to init
+ * @entity: scheduler entity to use
+ * @owner: job owner for debugging
*
- * Note: Refer to drm_sched_entity_push_job documentation
+ * Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
+ *
+ * Returns 0 for success, negative error code otherwise.
*/
int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner)
{
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
+
job->sched = sched;
job->entity = entity;
job->s_priority = entity->rq - sched->sched_rq;
@@ -626,7 +747,11 @@ int drm_sched_job_init(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_init);
/**
- * Return ture if we can push more jobs to the hw.
+ * drm_sched_ready - is the scheduler ready
+ *
+ * @sched: scheduler instance
+ *
+ * Return true if we can push more jobs to the hw, otherwise false.
*/
static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
{
@@ -635,7 +760,10 @@ static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
}
/**
- * Wake up the scheduler when it is ready
+ * drm_sched_wakeup - Wake up the scheduler when it is ready
+ *
+ * @sched: scheduler instance
+ *
*/
static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
{
@@ -644,8 +772,12 @@ static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
}
/**
- * Select next entity to process
-*/
+ * drm_sched_select_entity - Select next entity to process
+ *
+ * @sched: scheduler instance
+ *
+ * Returns the entity to process or NULL if none are found.
+ */
static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler *sched)
{
@@ -665,6 +797,14 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
return entity;
}
+/**
+ * drm_sched_process_job - process a job
+ *
+ * @f: fence
+ * @cb: fence callbacks
+ *
+ * Called after job has finished execution.
+ */
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct drm_sched_fence *s_fence =
@@ -680,6 +820,13 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
wake_up_interruptible(&sched->wake_up_worker);
}
+/**
+ * drm_sched_blocked - check if the scheduler is blocked
+ *
+ * @sched: scheduler instance
+ *
+ * Returns true if blocked, otherwise false.
+ */
static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
{
if (kthread_should_park()) {
@@ -690,6 +837,13 @@ static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
return false;
}
+/**
+ * drm_sched_main - main scheduler thread
+ *
+ * @param: scheduler instance
+ *
+ * Returns 0.
+ */
static int drm_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
@@ -744,15 +898,17 @@ static int drm_sched_main(void *param)
}
/**
- * Init a gpu scheduler instance
+ * drm_sched_init - Init a gpu scheduler instance
*
- * @sched The pointer to the scheduler
- * @ops The backend operations for this scheduler.
- * @hw_submissions Number of hw submissions to do.
- * @name Name used for debugging
+ * @sched: scheduler instance
+ * @ops: backend operations for this scheduler
+ * @hw_submission: number of hw submissions that can be in flight
+ * @hang_limit: number of times to allow a job to hang before dropping it
+ * @timeout: timeout value in jiffies for the scheduler
+ * @name: name used for debugging
*
* Return 0 on success, otherwise error code.
-*/
+ */
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
unsigned hw_submission,
@@ -767,7 +923,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->timeout = timeout;
sched->hang_limit = hang_limit;
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
- drm_sched_rq_init(&sched->sched_rq[i]);
+ drm_sched_rq_init(sched, &sched->sched_rq[i]);
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
@@ -788,9 +944,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
EXPORT_SYMBOL(drm_sched_init);
/**
- * Destroy a gpu scheduler
+ * drm_sched_fini - Destroy a gpu scheduler
+ *
+ * @sched: scheduler instance
*
- * @sched The pointer to the scheduler
+ * Tears down and cleans up the scheduler.
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index df4461648e3f..d8d2dff9ea2f 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -81,11 +81,6 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
return (const char *)fence->sched->name;
}
-static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
-{
- return true;
-}
-
/**
* drm_sched_fence_free - free up the fence memory
*
@@ -134,18 +129,12 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
- .enable_signaling = drm_sched_fence_enable_signaling,
- .signaled = NULL,
- .wait = dma_fence_default_wait,
.release = drm_sched_fence_release_scheduled,
};
const struct dma_fence_ops drm_sched_fence_ops_finished = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
- .enable_signaling = drm_sched_fence_enable_signaling,
- .signaled = NULL,
- .wait = dma_fence_default_wait,
.release = drm_sched_fence_release_finished,
};
@@ -172,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
return NULL;
fence->owner = owner;
- fence->sched = entity->sched;
+ fence->sched = entity->rq->sched;
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);
diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h
index 54acc117550c..6b943ea1c57d 100644
--- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h
@@ -19,7 +19,9 @@ selftest(align64, igt_align64)
selftest(evict, igt_evict)
selftest(evict_range, igt_evict_range)
selftest(bottomup, igt_bottomup)
+selftest(lowest, igt_lowest)
selftest(topdown, igt_topdown)
+selftest(highest, igt_highest)
selftest(color, igt_color)
selftest(color_evict, igt_color_evict)
selftest(color_evict_range, igt_color_evict_range)
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 933af1c25387..fbed2c90fd51 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -1825,6 +1825,77 @@ err:
return ret;
}
+static int __igt_once(unsigned int mode)
+{
+ struct drm_mm mm;
+ struct drm_mm_node rsvd_lo, rsvd_hi, node;
+ int err;
+
+ drm_mm_init(&mm, 0, 7);
+
+ memset(&rsvd_lo, 0, sizeof(rsvd_lo));
+ rsvd_lo.start = 1;
+ rsvd_lo.size = 1;
+ err = drm_mm_reserve_node(&mm, &rsvd_lo);
+ if (err) {
+ pr_err("Could not reserve low node\n");
+ goto err;
+ }
+
+ memset(&rsvd_hi, 0, sizeof(rsvd_hi));
+ rsvd_hi.start = 5;
+ rsvd_hi.size = 1;
+ err = drm_mm_reserve_node(&mm, &rsvd_hi);
+ if (err) {
+ pr_err("Could not reserve low node\n");
+ goto err_lo;
+ }
+
+ if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
+ pr_err("Expected a hole after lo and high nodes!\n");
+ err = -EINVAL;
+ goto err_hi;
+ }
+
+ memset(&node, 0, sizeof(node));
+ err = drm_mm_insert_node_generic(&mm, &node,
+ 2, 0, 0,
+ mode | DRM_MM_INSERT_ONCE);
+ if (!err) {
+ pr_err("Unexpectedly inserted the node into the wrong hole: node.start=%llx\n",
+ node.start);
+ err = -EINVAL;
+ goto err_node;
+ }
+
+ err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
+ if (err) {
+ pr_err("Could not insert the node into the available hole!\n");
+ err = -EINVAL;
+ goto err_hi;
+ }
+
+err_node:
+ drm_mm_remove_node(&node);
+err_hi:
+ drm_mm_remove_node(&rsvd_hi);
+err_lo:
+ drm_mm_remove_node(&rsvd_lo);
+err:
+ drm_mm_takedown(&mm);
+ return err;
+}
+
+static int igt_lowest(void *ignored)
+{
+ return __igt_once(DRM_MM_INSERT_LOW);
+}
+
+static int igt_highest(void *ignored)
+{
+ return __igt_once(DRM_MM_INSERT_HIGH);
+}
+
static void separate_adjacent_colors(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 40df8887fc17..fc66167b0641 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -675,7 +675,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
if (ret < 0)
goto err_cleanup;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
goto err_backlight;
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index df0a282b9615..57b870e1e696 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -332,7 +332,7 @@ static void sti_cursor_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane);
+ drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 90c46b49c931..832fc43960ee 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -224,7 +224,7 @@ static int sti_bind(struct device *dev)
ret = sti_init(ddev);
if (ret)
- goto err_drm_dev_unref;
+ goto err_drm_dev_put;
ret = component_bind_all(ddev->dev, ddev);
if (ret)
@@ -248,8 +248,8 @@ err_register:
drm_mode_config_cleanup(ddev);
err_cleanup:
sti_cleanup(ddev);
-err_drm_dev_unref:
- drm_dev_unref(ddev);
+err_drm_dev_put:
+ drm_dev_put(ddev);
return ret;
}
@@ -259,7 +259,7 @@ static void sti_unbind(struct device *dev)
drm_dev_unregister(ddev);
sti_cleanup(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
}
static const struct component_master_ops sti_ops = {
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index a5979cd25cc7..b08376b7611b 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -387,7 +387,9 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force)
if (!dvo->panel) {
dvo->panel = of_drm_find_panel(dvo->panel_node);
- if (dvo->panel)
+ if (IS_ERR(dvo->panel))
+ dvo->panel = NULL;
+ else
drm_panel_attach(dvo->panel, connector);
}
@@ -484,7 +486,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(drm_connector,
&sti_dvo_connector_helper_funcs);
- err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 9b2c47051b51..c32de6cbf061 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -211,7 +211,11 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
struct drm_plane *drm_plane = &gdp->plane.drm_plane;
- struct drm_crtc *crtc = drm_plane->crtc;
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock(&drm_plane->mutex, NULL);
+ crtc = drm_plane->state->crtc;
+ drm_modeset_unlock(&drm_plane->mutex);
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&gdp->plane), gdp->regs);
@@ -879,7 +883,7 @@ static void sti_gdp_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane);
+ drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 67bbdb49fffc..49438337f70d 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -709,7 +709,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(drm_connector,
&sti_hda_connector_helper_funcs);
- err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 58f431102512..34cdc4644435 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -977,7 +977,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
cec_notifier_set_phys_addr_from_edid(hdmi->notifier, edid);
count = drm_add_edid_modes(connector, edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
kfree(edid);
return count;
@@ -1290,7 +1290,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi->drm_connector = drm_connector;
- err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 106be8c4e58b..03ac3b4a4469 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1260,7 +1260,7 @@ static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane);
+ drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 8698e08313e1..f2021b23554d 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -148,16 +148,16 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
ret = drv_load(ddev);
if (ret)
- goto err_unref;
+ goto err_put;
ret = drm_dev_register(ddev, 0);
if (ret)
- goto err_unref;
+ goto err_put;
return 0;
-err_unref:
- drm_dev_unref(ddev);
+err_put:
+ drm_dev_put(ddev);
return ret;
}
@@ -170,7 +170,7 @@ static int stm_drm_platform_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
drv_unload(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return 0;
}
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index d997a6014d6c..808d9fb627e9 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -457,6 +457,14 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc,
int target_max = target + CLK_TOLERANCE_HZ;
int result;
+ result = clk_round_rate(ldev->pixel_clk, target);
+
+ DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
+
+ /* Filter modes according to the max frequency supported by the pads */
+ if (result > ldev->caps.pad_max_freq_hz)
+ return MODE_CLOCK_HIGH;
+
/*
* Accept all "preferred" modes:
* - this is important for panels because panel clock tolerances are
@@ -468,10 +476,6 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc,
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return MODE_OK;
- result = clk_round_rate(ldev->pixel_clk, target);
-
- DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
-
/*
* Filter modes according to the clock value, particularly useful for
* hdmi modes that require precise pixel clocks.
@@ -991,11 +995,15 @@ static int ltdc_get_caps(struct drm_device *ddev)
* does not work on 2nd layer.
*/
ldev->caps.non_alpha_only_l1 = true;
+ ldev->caps.pad_max_freq_hz = 90000000;
+ if (ldev->caps.hw_version == HWVER_10200)
+ ldev->caps.pad_max_freq_hz = 65000000;
break;
case HWVER_20101:
ldev->caps.reg_ofs = REG_OFS_4;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
ldev->caps.non_alpha_only_l1 = false;
+ ldev->caps.pad_max_freq_hz = 150000000;
break;
default:
return -ENODEV;
@@ -1074,8 +1082,11 @@ int ltdc_load(struct drm_device *ddev)
}
}
- if (!IS_ERR(rstc))
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ usleep_range(10, 20);
reset_control_deassert(rstc);
+ }
/* Disable interrupts */
reg_clear(ldev->regs, LTDC_IER,
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index 1e16d6afb0d2..d5afb8960867 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -18,6 +18,7 @@ struct ltdc_caps {
u32 bus_width; /* bus width (32 or 64 bits) */
const u32 *pix_fmt_hw; /* supported pixel formats */
bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
+ int pad_max_freq_hz; /* max frequency supported by pad */
};
#define LTDC_MAX_LAYER 4
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 156a865c3e6d..c2c042287c19 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -68,4 +68,11 @@ config DRM_SUN8I_MIXER
graphics mixture and feed graphics to TCON, If M is
selected the module will be called sun8i-mixer.
+config DRM_SUN8I_TCON_TOP
+ tristate
+ default DRM_SUN4I if DRM_SUN8I_MIXER!=n
+ help
+ TCON TOP is responsible for configuring display pipeline for
+ HTMI, TVE and LCD.
+
endif
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 2589f4acd5ae..0eb38ac8e86e 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -32,8 +32,12 @@ obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o
obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
-obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o
+obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
+ifdef CONFIG_DRM_SUN4I_BACKEND
+obj-$(CONFIG_DRM_SUN4I) += sun4i-frontend.o
+endif
obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o
obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o
obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o
+obj-$(CONFIG_DRM_SUN8I_TCON_TOP) += sun8i_tcon_top.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index de0a76dfa1a2..d7950b52a1fd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -86,12 +86,6 @@ static inline bool sun4i_backend_format_is_packed_yuv422(uint32_t format)
}
}
-static inline bool sun4i_backend_format_is_yuv(uint32_t format)
-{
- return sun4i_backend_format_is_planar_yuv(format) ||
- sun4i_backend_format_is_packed_yuv422(format);
-}
-
static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
{
int i;
@@ -304,7 +298,7 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
val);
- if (sun4i_backend_format_is_yuv(fb->format->format))
+ if (fb->format->is_yuv)
return sun4i_backend_update_yuv_format(backend, layer, plane);
ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
@@ -384,7 +378,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
*/
paddr -= PHYS_OFFSET;
- if (sun4i_backend_format_is_yuv(fb->format->format))
+ if (fb->format->is_yuv)
return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
/* Write the 32 lower bits of the address (in bits) */
@@ -502,7 +496,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
num_alpha_planes++;
- if (sun4i_backend_format_is_yuv(fb->format->format)) {
+ if (fb->format->is_yuv) {
DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
num_yuv_planes++;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 2d7c57406715..3eedf335a935 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -242,7 +242,7 @@ struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm,
/* Set possible_crtcs to this crtc for overlay planes */
for (i = 0; planes[i]; i++) {
- uint32_t possible_crtcs = BIT(drm_crtc_index(&scrtc->crtc));
+ uint32_t possible_crtcs = drm_crtc_mask(&scrtc->crtc);
struct drm_plane *plane = planes[i];
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 50d19605c38f..dd19d674055c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -26,6 +26,7 @@
#include "sun4i_frontend.h"
#include "sun4i_framebuffer.h"
#include "sun4i_tcon.h"
+#include "sun8i_tcon_top.h"
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
@@ -143,7 +144,7 @@ cleanup_mode_config:
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
free_drm:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -156,7 +157,7 @@ static void sun4i_drv_unbind(struct device *dev)
sun4i_framebuffer_free(drm);
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops sun4i_drv_master_ops = {
@@ -197,6 +198,28 @@ static bool sun4i_drv_node_is_tcon(struct device_node *node)
return !!of_match_node(sun4i_tcon_of_table, node);
}
+static bool sun4i_drv_node_is_tcon_with_ch0(struct device_node *node)
+{
+ const struct of_device_id *match;
+
+ match = of_match_node(sun4i_tcon_of_table, node);
+ if (match) {
+ struct sun4i_tcon_quirks *quirks;
+
+ quirks = (struct sun4i_tcon_quirks *)match->data;
+
+ return quirks->has_channel_0;
+ }
+
+ return false;
+}
+
+static bool sun4i_drv_node_is_tcon_top(struct device_node *node)
+{
+ return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
+ !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
static int compare_of(struct device *dev, void *data)
{
DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n",
@@ -231,12 +254,69 @@ struct endpoint_list {
DECLARE_KFIFO(fifo, struct device_node *, 16);
};
+static void sun4i_drv_traverse_endpoints(struct endpoint_list *list,
+ struct device_node *node,
+ int port_id)
+{
+ struct device_node *ep, *remote, *port;
+
+ port = of_graph_get_port_by_id(node, port_id);
+ if (!port) {
+ DRM_DEBUG_DRIVER("No output to bind on port %d\n", port_id);
+ return;
+ }
+
+ for_each_available_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ DRM_DEBUG_DRIVER("Error retrieving the output node\n");
+ continue;
+ }
+
+ if (sun4i_drv_node_is_tcon(node)) {
+ /*
+ * TCON TOP is always probed before TCON. However, TCON
+ * points back to TCON TOP when it is source for HDMI.
+ * We have to skip it here to prevent infinite looping
+ * between TCON TOP and TCON.
+ */
+ if (sun4i_drv_node_is_tcon_top(remote)) {
+ DRM_DEBUG_DRIVER("TCON output endpoint is TCON TOP... skipping\n");
+ of_node_put(remote);
+ continue;
+ }
+
+ /*
+ * If the node is our TCON with channel 0, the first
+ * port is used for panel or bridges, and will not be
+ * part of the component framework.
+ */
+ if (sun4i_drv_node_is_tcon_with_ch0(node)) {
+ struct of_endpoint endpoint;
+
+ if (of_graph_parse_endpoint(ep, &endpoint)) {
+ DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
+ of_node_put(remote);
+ continue;
+ }
+
+ if (!endpoint.id) {
+ DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
+ of_node_put(remote);
+ continue;
+ }
+ }
+ }
+
+ kfifo_put(&list->fifo, remote);
+ }
+}
+
static int sun4i_drv_add_endpoints(struct device *dev,
struct endpoint_list *list,
struct component_match **match,
struct device_node *node)
{
- struct device_node *port, *ep, *remote;
int count = 0;
/*
@@ -272,41 +352,13 @@ static int sun4i_drv_add_endpoints(struct device *dev,
count++;
}
- /* Inputs are listed first, then outputs */
- port = of_graph_get_port_by_id(node, 1);
- if (!port) {
- DRM_DEBUG_DRIVER("No output to bind\n");
- return count;
- }
+ /* each node has at least one output */
+ sun4i_drv_traverse_endpoints(list, node, 1);
- for_each_available_child_of_node(port, ep) {
- remote = of_graph_get_remote_port_parent(ep);
- if (!remote) {
- DRM_DEBUG_DRIVER("Error retrieving the output node\n");
- of_node_put(remote);
- continue;
- }
-
- /*
- * If the node is our TCON, the first port is used for
- * panel or bridges, and will not be part of the
- * component framework.
- */
- if (sun4i_drv_node_is_tcon(node)) {
- struct of_endpoint endpoint;
-
- if (of_graph_parse_endpoint(ep, &endpoint)) {
- DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
- continue;
- }
-
- if (!endpoint.id) {
- DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
- continue;
- }
- }
-
- kfifo_put(&list->fifo, remote);
+ /* TCON TOP has second and third output */
+ if (sun4i_drv_node_is_tcon_top(node)) {
+ sun4i_drv_traverse_endpoints(list, node, 3);
+ sun4i_drv_traverse_endpoints(list, node, 5);
}
return count;
@@ -366,6 +418,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ .compatible = "allwinner,sun8i-a83t-display-engine" },
{ .compatible = "allwinner,sun8i-h3-display-engine" },
+ { .compatible = "allwinner,sun8i-r40-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" },
{ }
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index fa4bcd092eaf..061d2e0d9011 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -220,7 +220,7 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector)
DRM_DEBUG_DRIVER("Monitor is %s monitor\n",
hdmi->hdmi_monitor ? "an HDMI" : "a DVI");
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
cec_s_phys_addr_from_edid(hdmi->cec_adap, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -623,7 +623,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
ret = cec_register_adapter(hdmi->cec_adap, dev);
if (ret < 0)
goto err_cleanup_connector;
- drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
+ drm_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index be3f14d7746d..af7dcb6da351 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -136,7 +136,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
/* The LVDS encoder can only work with the TCON channel 0 */
- lvds->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
+ lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
if (tcon->panel) {
drm_connector_helper_add(&lvds->connector,
@@ -149,7 +149,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
goto err_cleanup_connector;
}
- drm_mode_connector_attach_encoder(&lvds->connector,
+ drm_connector_attach_encoder(&lvds->connector,
&lvds->encoder);
ret = drm_panel_attach(tcon->panel, &lvds->connector);
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index f2fa1f210509..bf068da6b12e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -202,7 +202,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
/* The RGB encoder can only work with the TCON channel 0 */
- rgb->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
+ rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
if (tcon->panel) {
drm_connector_helper_add(&rgb->connector,
@@ -215,7 +215,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
goto err_cleanup_connector;
}
- drm_mode_connector_attach_encoder(&rgb->connector,
+ drm_connector_attach_encoder(&rgb->connector,
&rgb->encoder);
ret = drm_panel_attach(tcon->panel, &rgb->connector);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 8232b39e16ca..3fb084f802e2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -766,12 +766,14 @@ static int sun4i_tcon_init_regmap(struct device *dev,
*/
static struct sunxi_engine *
sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
- struct device_node *node)
+ struct device_node *node,
+ u32 port_id)
{
struct device_node *port, *ep, *remote;
struct sunxi_engine *engine = ERR_PTR(-EINVAL);
+ u32 reg = 0;
- port = of_graph_get_port_by_id(node, 0);
+ port = of_graph_get_port_by_id(node, port_id);
if (!port)
return ERR_PTR(-EINVAL);
@@ -801,8 +803,21 @@ sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
if (remote == engine->node)
goto out_put_remote;
+ /*
+ * According to device tree binding input ports have even id
+ * number and output ports have odd id. Since component with
+ * more than one input and one output (TCON TOP) exits, correct
+ * remote input id has to be calculated by subtracting 1 from
+ * remote output id. If this for some reason can't be done, 0
+ * is used as input port id.
+ */
+ of_node_put(port);
+ port = of_graph_get_remote_port(ep);
+ if (!of_property_read_u32(port, "reg", &reg) && reg > 0)
+ reg -= 1;
+
/* keep looking through upstream ports */
- engine = sun4i_tcon_find_engine_traverse(drv, remote);
+ engine = sun4i_tcon_find_engine_traverse(drv, remote, reg);
out_put_remote:
of_node_put(remote);
@@ -925,7 +940,7 @@ static struct sunxi_engine *sun4i_tcon_find_engine(struct sun4i_drv *drv,
/* Fallback to old method by traversing input endpoints */
of_node_put(port);
- return sun4i_tcon_find_engine_traverse(drv, node);
+ return sun4i_tcon_find_engine_traverse(drv, node, 0);
}
static int sun4i_tcon_bind(struct device *dev, struct device *master,
@@ -1067,23 +1082,25 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
goto err_free_dotclock;
}
- /*
- * If we have an LVDS panel connected to the TCON, we should
- * just probe the LVDS connector. Otherwise, just probe RGB as
- * we used to.
- */
- remote = of_graph_get_remote_node(dev->of_node, 1, 0);
- if (of_device_is_compatible(remote, "panel-lvds"))
- if (can_lvds)
- ret = sun4i_lvds_init(drm, tcon);
+ if (tcon->quirks->has_channel_0) {
+ /*
+ * If we have an LVDS panel connected to the TCON, we should
+ * just probe the LVDS connector. Otherwise, just probe RGB as
+ * we used to.
+ */
+ remote = of_graph_get_remote_node(dev->of_node, 1, 0);
+ if (of_device_is_compatible(remote, "panel-lvds"))
+ if (can_lvds)
+ ret = sun4i_lvds_init(drm, tcon);
+ else
+ ret = -EINVAL;
else
- ret = -EINVAL;
- else
- ret = sun4i_rgb_init(drm, tcon);
- of_node_put(remote);
+ ret = sun4i_rgb_init(drm, tcon);
+ of_node_put(remote);
- if (ret < 0)
- goto err_free_dotclock;
+ if (ret < 0)
+ goto err_free_dotclock;
+ }
if (tcon->quirks->needs_de_be_mux) {
/*
@@ -1137,13 +1154,19 @@ static const struct component_ops sun4i_tcon_ops = {
static int sun4i_tcon_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ const struct sun4i_tcon_quirks *quirks;
struct drm_bridge *bridge;
struct drm_panel *panel;
int ret;
- ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
- if (ret == -EPROBE_DEFER)
- return ret;
+ quirks = of_device_get_match_data(&pdev->dev);
+
+ /* panels and bridges are present only on TCONs with channel 0 */
+ if (quirks->has_channel_0) {
+ ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ }
return component_add(&pdev->dev, &sun4i_tcon_ops);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index b070d522ed8d..1a838d208211 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -623,7 +623,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
}
tv->connector.interlace_allowed = true;
- drm_mode_connector_attach_encoder(&tv->connector, &tv->encoder);
+ drm_connector_attach_encoder(&tv->connector, &tv->encoder);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index b5e071a49045..88eb268fdf73 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index bfbf761f0c1d..e3b34a345546 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -13,6 +13,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/slab.h>
#include <linux/phy/phy.h>
@@ -247,10 +248,8 @@ static u16 sun6i_dsi_crc_compute(u8 const *buffer, size_t len)
return crc_ccitt(0xffff, buffer, len);
}
-static u16 sun6i_dsi_crc_repeat_compute(u8 pd, size_t len)
+static u16 sun6i_dsi_crc_repeat(u8 pd, u8 *buffer, size_t len)
{
- u8 buffer[len];
-
memset(buffer, pd, len);
return sun6i_dsi_crc_compute(buffer, len);
@@ -274,11 +273,11 @@ static u32 sun6i_dsi_build_blk0_pkt(u8 vc, u16 wc)
wc & 0xff, wc >> 8);
}
-static u32 sun6i_dsi_build_blk1_pkt(u16 pd, size_t len)
+static u32 sun6i_dsi_build_blk1_pkt(u16 pd, u8 *buffer, size_t len)
{
u32 val = SUN6I_DSI_BLK_PD(pd);
- return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat_compute(pd, len));
+ return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat(pd, buffer, len));
}
static void sun6i_dsi_inst_abort(struct sun6i_dsi *dsi)
@@ -452,6 +451,54 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
struct mipi_dsi_device *device = dsi->device;
unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
u16 hbp, hfp, hsa, hblk, vblk;
+ size_t bytes;
+ u8 *buffer;
+
+ /* Do all timing calculations up front to allocate buffer space */
+
+ /*
+ * A sync period is composed of a blanking packet (4 bytes +
+ * payload + 2 bytes) and a sync event packet (4 bytes). Its
+ * minimal size is therefore 10 bytes
+ */
+#define HSA_PACKET_OVERHEAD 10
+ hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+
+ /*
+ * The backporch is set using a blanking packet (4 bytes +
+ * payload + 2 bytes). Its minimal size is therefore 6 bytes
+ */
+#define HBP_PACKET_OVERHEAD 6
+ hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
+
+ /*
+ * The frontporch is set using a blanking packet (4 bytes +
+ * payload + 2 bytes). Its minimal size is therefore 6 bytes
+ */
+#define HFP_PACKET_OVERHEAD 6
+ hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
+
+ /*
+ * hblk seems to be the line + porches length.
+ */
+ hblk = mode->htotal * Bpp - hsa;
+
+ /*
+ * And I'm not entirely sure what vblk is about. The driver in
+ * Allwinner BSP is using a rather convoluted calculation
+ * there only for 4 lanes. However, using 0 (the !4 lanes
+ * case) even with a 4 lanes screen seems to work...
+ */
+ vblk = 0;
+
+ /* How many bytes do we need to send all payloads? */
+ bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk);
+ buffer = kmalloc(bytes, GFP_KERNEL);
+ if (WARN_ON(!buffer))
+ return;
regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0);
@@ -485,63 +532,37 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) |
SUN6I_DSI_BASIC_SIZE1_VT(mode->vtotal));
- /*
- * A sync period is composed of a blanking packet (4 bytes +
- * payload + 2 bytes) and a sync event packet (4 bytes). Its
- * minimal size is therefore 10 bytes
- */
-#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
- (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+ /* sync */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hsa));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA1_REG,
- sun6i_dsi_build_blk1_pkt(0, hsa));
+ sun6i_dsi_build_blk1_pkt(0, buffer, hsa));
- /*
- * The backporch is set using a blanking packet (4 bytes +
- * payload + 2 bytes). Its minimal size is therefore 6 bytes
- */
-#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
- (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
+ /* backporch */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hbp));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP1_REG,
- sun6i_dsi_build_blk1_pkt(0, hbp));
+ sun6i_dsi_build_blk1_pkt(0, buffer, hbp));
- /*
- * The frontporch is set using a blanking packet (4 bytes +
- * payload + 2 bytes). Its minimal size is therefore 6 bytes
- */
-#define HFP_PACKET_OVERHEAD 6
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
- (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
+ /* frontporch */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hfp));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP1_REG,
- sun6i_dsi_build_blk1_pkt(0, hfp));
+ sun6i_dsi_build_blk1_pkt(0, buffer, hfp));
- /*
- * hblk seems to be the line + porches length.
- */
- hblk = mode->htotal * Bpp - hsa;
+ /* hblk */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hblk));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK1_REG,
- sun6i_dsi_build_blk1_pkt(0, hblk));
+ sun6i_dsi_build_blk1_pkt(0, buffer, hblk));
- /*
- * And I'm not entirely sure what vblk is about. The driver in
- * Allwinner BSP is using a rather convoluted calculation
- * there only for 4 lanes. However, using 0 (the !4 lanes
- * case) even with a 4 lanes screen seems to work...
- */
- vblk = 0;
+ /* vblk */
regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, vblk));
regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK1_REG,
- sun6i_dsi_build_blk1_pkt(0, vblk));
+ sun6i_dsi_build_blk1_pkt(0, buffer, vblk));
+
+ kfree(buffer);
}
static int sun6i_dsi_start(struct sun6i_dsi *dsi,
@@ -812,8 +833,8 @@ static int sun6i_dsi_attach(struct mipi_dsi_host *host,
dsi->device = device;
dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (!dsi->panel)
- return -EINVAL;
+ if (IS_ERR(dsi->panel))
+ return PTR_ERR(dsi->panel);
dev_info(host->dev, "Attached device %s\n", device->name);
@@ -920,7 +941,7 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
goto err_cleanup_connector;
}
- drm_mode_connector_attach_encoder(&dsi->connector, &dsi->encoder);
+ drm_connector_attach_encoder(&dsi->connector, &dsi->encoder);
drm_panel_attach(dsi->panel, &dsi->connector);
return 0;
@@ -1040,7 +1061,7 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
return 0;
}
-static int sun6i_dsi_runtime_resume(struct device *dev)
+static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
@@ -1069,7 +1090,7 @@ static int sun6i_dsi_runtime_resume(struct device *dev)
return 0;
}
-static int sun6i_dsi_runtime_suspend(struct device *dev)
+static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 9f40a44b456b..31875b636434 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -12,6 +12,7 @@
#include <drm/drm_crtc_helper.h>
#include "sun8i_dw_hdmi.h"
+#include "sun8i_tcon_top.h"
static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -41,6 +42,44 @@ sun8i_dw_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static bool sun8i_dw_hdmi_node_is_tcon_top(struct device_node *node)
+{
+ return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
+ !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
+static u32 sun8i_dw_hdmi_find_possible_crtcs(struct drm_device *drm,
+ struct device_node *node)
+{
+ struct device_node *port, *ep, *remote, *remote_port;
+ u32 crtcs = 0;
+
+ remote = of_graph_get_remote_node(node, 0, -1);
+ if (!remote)
+ return 0;
+
+ if (sun8i_dw_hdmi_node_is_tcon_top(remote)) {
+ port = of_graph_get_port_by_id(remote, 4);
+ if (!port)
+ goto crtcs_exit;
+
+ for_each_child_of_node(port, ep) {
+ remote_port = of_graph_get_remote_port(ep);
+ if (remote_port) {
+ crtcs |= drm_of_crtc_port_mask(drm, remote_port);
+ of_node_put(remote_port);
+ }
+ }
+ } else {
+ crtcs = drm_of_find_possible_crtcs(drm, node);
+ }
+
+crtcs_exit:
+ of_node_put(remote);
+
+ return crtcs;
+}
+
static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -63,7 +102,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = &pdev->dev;
encoder = &hdmi->encoder;
- encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+ encoder->possible_crtcs =
+ sun8i_dw_hdmi_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
* supposed to be connected to, it's because the CRTC has
@@ -181,7 +221,7 @@ static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids);
-struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
+static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
.probe = sun8i_dw_hdmi_probe,
.remove = sun8i_dw_hdmi_remove,
.driver = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index 79154f0f674a..aadbe0a10b0c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -98,7 +98,8 @@
#define SUN8I_HDMI_PHY_PLL_CFG1_LDO2_EN BIT(29)
#define SUN8I_HDMI_PHY_PLL_CFG1_LDO1_EN BIT(28)
#define SUN8I_HDMI_PHY_PLL_CFG1_HV_IS_33 BIT(27)
-#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL BIT(26)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK BIT(26)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT 26
#define SUN8I_HDMI_PHY_PLL_CFG1_PLLEN BIT(25)
#define SUN8I_HDMI_PHY_PLL_CFG1_LDO_VSET(x) ((x) << 22)
#define SUN8I_HDMI_PHY_PLL_CFG1_UNKNOWN(x) ((x) << 20)
@@ -147,6 +148,7 @@ struct sun8i_hdmi_phy;
struct sun8i_hdmi_phy_variant {
bool has_phy_clk;
+ bool has_second_pll;
void (*phy_init)(struct sun8i_hdmi_phy *phy);
void (*phy_disable)(struct dw_hdmi *hdmi,
struct sun8i_hdmi_phy *phy);
@@ -160,6 +162,7 @@ struct sun8i_hdmi_phy {
struct clk *clk_mod;
struct clk *clk_phy;
struct clk *clk_pll0;
+ struct clk *clk_pll1;
unsigned int rcal;
struct regmap *regs;
struct reset_control *rst_phy;
@@ -188,6 +191,7 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void);
-int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev);
+int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
+ bool second_parent);
#endif /* _SUN8I_DW_HDMI_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 5a52fc489a9d..82502b351aec 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -183,7 +183,13 @@ static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi,
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_TXEN_MASK, 0);
- regmap_write(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, pll_cfg1_init);
+ /*
+ * NOTE: We have to be careful not to overwrite PHY parent
+ * clock selection bit and clock divider.
+ */
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+ (u32)~SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK,
+ pll_cfg1_init);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG2_REG,
(u32)~SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_MSK,
pll_cfg2_init);
@@ -352,6 +358,10 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
SUN8I_HDMI_PHY_ANA_CFG3_SCLEN |
SUN8I_HDMI_PHY_ANA_CFG3_SDAEN);
+ /* reset PHY PLL clock parent */
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+ SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK, 0);
+
/* set HW control of CEC pins */
regmap_write(phy->regs, SUN8I_HDMI_PHY_CEC_REG, 0);
@@ -386,6 +396,14 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
.name = "phy"
};
+static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
+ .has_phy_clk = true,
+ .has_second_pll = true,
+ .phy_init = &sun8i_hdmi_phy_init_h3,
+ .phy_disable = &sun8i_hdmi_phy_disable_h3,
+ .phy_config = &sun8i_hdmi_phy_config_h3,
+};
+
static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
.phy_init = &sun8i_hdmi_phy_init_a83t,
.phy_disable = &sun8i_hdmi_phy_disable_a83t,
@@ -401,6 +419,10 @@ static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
{
+ .compatible = "allwinner,sun50i-a64-hdmi-phy",
+ .data = &sun50i_a64_hdmi_phy,
+ },
+ {
.compatible = "allwinner,sun8i-a83t-hdmi-phy",
.data = &sun8i_a83t_hdmi_phy,
},
@@ -472,18 +494,30 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
goto err_put_clk_mod;
}
- ret = sun8i_phy_clk_create(phy, dev);
+ if (phy->variant->has_second_pll) {
+ phy->clk_pll1 = of_clk_get_by_name(node, "pll-1");
+ if (IS_ERR(phy->clk_pll1)) {
+ dev_err(dev, "Could not get pll-1 clock\n");
+ ret = PTR_ERR(phy->clk_pll1);
+ goto err_put_clk_pll0;
+ }
+ }
+
+ ret = sun8i_phy_clk_create(phy, dev,
+ phy->variant->has_second_pll);
if (ret) {
dev_err(dev, "Couldn't create the PHY clock\n");
- goto err_put_clk_pll0;
+ goto err_put_clk_pll1;
}
+
+ clk_prepare_enable(phy->clk_phy);
}
phy->rst_phy = of_reset_control_get_shared(node, "phy");
if (IS_ERR(phy->rst_phy)) {
dev_err(dev, "Could not get phy reset control\n");
ret = PTR_ERR(phy->rst_phy);
- goto err_put_clk_pll0;
+ goto err_disable_clk_phy;
}
ret = reset_control_deassert(phy->rst_phy);
@@ -514,9 +548,12 @@ err_deassert_rst_phy:
reset_control_assert(phy->rst_phy);
err_put_rst_phy:
reset_control_put(phy->rst_phy);
+err_disable_clk_phy:
+ clk_disable_unprepare(phy->clk_phy);
+err_put_clk_pll1:
+ clk_put(phy->clk_pll1);
err_put_clk_pll0:
- if (phy->variant->has_phy_clk)
- clk_put(phy->clk_pll0);
+ clk_put(phy->clk_pll0);
err_put_clk_mod:
clk_put(phy->clk_mod);
err_put_clk_bus:
@@ -531,13 +568,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
clk_disable_unprepare(phy->clk_mod);
clk_disable_unprepare(phy->clk_bus);
+ clk_disable_unprepare(phy->clk_phy);
reset_control_assert(phy->rst_phy);
reset_control_put(phy->rst_phy);
- if (phy->variant->has_phy_clk)
- clk_put(phy->clk_pll0);
+ clk_put(phy->clk_pll0);
+ clk_put(phy->clk_pll1);
clk_put(phy->clk_mod);
clk_put(phy->clk_bus);
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
index faea449812f8..a4d31fe3abff 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
@@ -22,35 +22,45 @@ static int sun8i_phy_clk_determine_rate(struct clk_hw *hw,
{
unsigned long rate = req->rate;
unsigned long best_rate = 0;
+ struct clk_hw *best_parent = NULL;
struct clk_hw *parent;
int best_div = 1;
- int i;
+ int i, p;
- parent = clk_hw_get_parent(hw);
-
- for (i = 1; i <= 16; i++) {
- unsigned long ideal = rate * i;
- unsigned long rounded;
-
- rounded = clk_hw_round_rate(parent, ideal);
+ for (p = 0; p < clk_hw_get_num_parents(hw); p++) {
+ parent = clk_hw_get_parent_by_index(hw, p);
+ if (!parent)
+ continue;
- if (rounded == ideal) {
- best_rate = rounded;
- best_div = i;
- break;
+ for (i = 1; i <= 16; i++) {
+ unsigned long ideal = rate * i;
+ unsigned long rounded;
+
+ rounded = clk_hw_round_rate(parent, ideal);
+
+ if (rounded == ideal) {
+ best_rate = rounded;
+ best_div = i;
+ best_parent = parent;
+ break;
+ }
+
+ if (!best_rate ||
+ abs(rate - rounded / i) <
+ abs(rate - best_rate / best_div)) {
+ best_rate = rounded;
+ best_div = i;
+ best_parent = parent;
+ }
}
- if (!best_rate ||
- abs(rate - rounded / i) <
- abs(rate - best_rate / best_div)) {
- best_rate = rounded;
- best_div = i;
- }
+ if (best_rate / best_div == rate)
+ break;
}
req->rate = best_rate / best_div;
req->best_parent_rate = best_rate;
- req->best_parent_hw = parent;
+ req->best_parent_hw = best_parent;
return 0;
}
@@ -95,22 +105,58 @@ static int sun8i_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static u8 sun8i_phy_clk_get_parent(struct clk_hw *hw)
+{
+ struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
+ u32 reg;
+
+ regmap_read(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, &reg);
+ reg = (reg & SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK) >>
+ SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT;
+
+ return reg;
+}
+
+static int sun8i_phy_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
+
+ if (index > 1)
+ return -EINVAL;
+
+ regmap_update_bits(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+ SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK,
+ index << SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT);
+
+ return 0;
+}
+
static const struct clk_ops sun8i_phy_clk_ops = {
.determine_rate = sun8i_phy_clk_determine_rate,
.recalc_rate = sun8i_phy_clk_recalc_rate,
.set_rate = sun8i_phy_clk_set_rate,
+
+ .get_parent = sun8i_phy_clk_get_parent,
+ .set_parent = sun8i_phy_clk_set_parent,
};
-int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev)
+int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
+ bool second_parent)
{
struct clk_init_data init;
struct sun8i_phy_clk *priv;
- const char *parents[1];
+ const char *parents[2];
parents[0] = __clk_get_name(phy->clk_pll0);
if (!parents[0])
return -ENODEV;
+ if (second_parent) {
+ parents[1] = __clk_get_name(phy->clk_pll1);
+ if (!parents[1])
+ return -ENODEV;
+ }
+
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -118,7 +164,7 @@ int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev)
init.name = "hdmi-phy-clk";
init.ops = &sun8i_phy_clk_ops;
init.parent_names = parents;
- init.num_parents = 1;
+ init.num_parents = second_parent ? 2 : 1;
init.flags = CLK_SET_RATE_PARENT;
priv->phy = phy;
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 126899d6f0d3..fc3713608f78 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -21,8 +21,9 @@
#include <linux/component.h>
#include <linux/dma-mapping.h>
-#include <linux/reset.h>
#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/reset.h>
#include "sun4i_drv.h"
#include "sun8i_mixer.h"
@@ -322,6 +323,42 @@ static struct regmap_config sun8i_mixer_regmap_config = {
.max_register = 0xbfffc, /* guessed */
};
+static int sun8i_mixer_of_get_id(struct device_node *node)
+{
+ struct device_node *port, *ep;
+ int ret = -EINVAL;
+
+ /* output is port 1 */
+ port = of_graph_get_port_by_id(node, 1);
+ if (!port)
+ return -EINVAL;
+
+ /* try to find downstream endpoint */
+ for_each_available_child_of_node(port, ep) {
+ struct device_node *remote;
+ u32 reg;
+
+ remote = of_graph_get_remote_endpoint(ep);
+ if (!remote)
+ continue;
+
+ ret = of_property_read_u32(remote, "reg", &reg);
+ if (!ret) {
+ of_node_put(remote);
+ of_node_put(ep);
+ of_node_put(port);
+
+ return reg;
+ }
+
+ of_node_put(remote);
+ }
+
+ of_node_put(port);
+
+ return ret;
+}
+
static int sun8i_mixer_bind(struct device *dev, struct device *master,
void *data)
{
@@ -353,8 +390,16 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
dev_set_drvdata(dev, mixer);
mixer->engine.ops = &sun8i_engine_ops;
mixer->engine.node = dev->of_node;
- /* The ID of the mixer currently doesn't matter */
- mixer->engine.id = -1;
+
+ /*
+ * While this function can fail, we shouldn't do anything
+ * if this happens. Some early DE2 DT entries don't provide
+ * mixer id but work nevertheless because matching between
+ * TCON and mixer is done by comparing node pointers (old
+ * way) instead comparing ids. If this function fails and
+ * id is needed, it will fail during id matching anyway.
+ */
+ mixer->engine.id = sun8i_mixer_of_get_id(dev->of_node);
mixer->cfg = of_device_get_match_data(dev);
if (!mixer->cfg)
@@ -432,14 +477,14 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(0),
SUN8I_MIXER_BLEND_COLOR_BLACK);
- /* Fixed zpos for now */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ROUTE, 0x43210);
-
plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
for (i = 0; i < plane_cnt; i++)
regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_MODE(i),
SUN8I_MIXER_BLEND_MODE_DEF);
+ regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
+
return 0;
err_disable_bus_clk:
@@ -500,6 +545,22 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.vi_num = 1,
};
+static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
+ .ccsc = 0,
+ .mod_rate = 297000000,
+ .scaler_mask = 0xf,
+ .ui_num = 3,
+ .vi_num = 1,
+};
+
+static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
+ .ccsc = 1,
+ .mod_rate = 297000000,
+ .scaler_mask = 0x3,
+ .ui_num = 1,
+ .vi_num = 1,
+};
+
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2,
.ui_num = 1,
@@ -522,6 +583,14 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.data = &sun8i_h3_mixer0_cfg,
},
{
+ .compatible = "allwinner,sun8i-r40-de2-mixer-0",
+ .data = &sun8i_r40_mixer0_cfg,
+ },
+ {
+ .compatible = "allwinner,sun8i-r40-de2-mixer-1",
+ .data = &sun8i_r40_mixer1_cfg,
+ },
+ {
.compatible = "allwinner,sun8i-v3s-de2-mixer",
.data = &sun8i_v3s_mixer_cfg,
},
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index f34e70c42adf..406c42e752d7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -44,6 +44,7 @@
#define SUN8I_MIXER_BLEND_CK_MIN(x) (0x10e0 + 0x04 * (x))
#define SUN8I_MIXER_BLEND_OUTCTL 0x10fc
+#define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK GENMASK(12, 8)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe) BIT(8 + pipe)
#define SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(pipe) BIT(pipe)
/* colors are always in AARRGGBB format */
@@ -51,6 +52,9 @@
/* The following numbers are some still unknown magic numbers */
#define SUN8I_MIXER_BLEND_MODE_DEF 0x03010301
+#define SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(n) (0xf << ((n) << 2))
+#define SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(n) ((n) << 2)
+
#define SUN8I_MIXER_BLEND_OUTCTL_INTERLACED BIT(1)
#define SUN8I_MIXER_FBFMT_ARGB8888 0
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
new file mode 100644
index 000000000000..55fe398d8290
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#include <drm/drmP.h>
+
+#include <dt-bindings/clock/sun8i-tcon-top.h>
+
+#include <linux/bitfield.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include "sun8i_tcon_top.h"
+
+static bool sun8i_tcon_top_node_is_tcon_top(struct device_node *node)
+{
+ return !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
+int sun8i_tcon_top_set_hdmi_src(struct device *dev, int tcon)
+{
+ struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+ unsigned long flags;
+ u32 val;
+
+ if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) {
+ dev_err(dev, "Device is not TCON TOP!\n");
+ return -EINVAL;
+ }
+
+ if (tcon < 2 || tcon > 3) {
+ dev_err(dev, "TCON index must be 2 or 3!\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tcon_top->reg_lock, flags);
+
+ val = readl(tcon_top->regs + TCON_TOP_GATE_SRC_REG);
+ val &= ~TCON_TOP_HDMI_SRC_MSK;
+ val |= FIELD_PREP(TCON_TOP_HDMI_SRC_MSK, tcon - 1);
+ writel(val, tcon_top->regs + TCON_TOP_GATE_SRC_REG);
+
+ spin_unlock_irqrestore(&tcon_top->reg_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(sun8i_tcon_top_set_hdmi_src);
+
+int sun8i_tcon_top_de_config(struct device *dev, int mixer, int tcon)
+{
+ struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+ unsigned long flags;
+ u32 reg;
+
+ if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) {
+ dev_err(dev, "Device is not TCON TOP!\n");
+ return -EINVAL;
+ }
+
+ if (mixer > 1) {
+ dev_err(dev, "Mixer index is too high!\n");
+ return -EINVAL;
+ }
+
+ if (tcon > 3) {
+ dev_err(dev, "TCON index is too high!\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tcon_top->reg_lock, flags);
+
+ reg = readl(tcon_top->regs + TCON_TOP_PORT_SEL_REG);
+ if (mixer == 0) {
+ reg &= ~TCON_TOP_PORT_DE0_MSK;
+ reg |= FIELD_PREP(TCON_TOP_PORT_DE0_MSK, tcon);
+ } else {
+ reg &= ~TCON_TOP_PORT_DE1_MSK;
+ reg |= FIELD_PREP(TCON_TOP_PORT_DE1_MSK, tcon);
+ }
+ writel(reg, tcon_top->regs + TCON_TOP_PORT_SEL_REG);
+
+ spin_unlock_irqrestore(&tcon_top->reg_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(sun8i_tcon_top_de_config);
+
+
+static struct clk_hw *sun8i_tcon_top_register_gate(struct device *dev,
+ const char *parent,
+ void __iomem *regs,
+ spinlock_t *lock,
+ u8 bit, int name_index)
+{
+ const char *clk_name, *parent_name;
+ int ret, index;
+
+ index = of_property_match_string(dev->of_node, "clock-names", parent);
+ if (index < 0)
+ return ERR_PTR(index);
+
+ parent_name = of_clk_get_parent_name(dev->of_node, index);
+
+ ret = of_property_read_string_index(dev->of_node,
+ "clock-output-names", name_index,
+ &clk_name);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_hw_register_gate(dev, clk_name, parent_name,
+ CLK_SET_RATE_PARENT,
+ regs + TCON_TOP_GATE_SRC_REG,
+ bit, 0, lock);
+};
+
+static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct clk_hw_onecell_data *clk_data;
+ struct sun8i_tcon_top *tcon_top;
+ struct resource *res;
+ void __iomem *regs;
+ int ret, i;
+
+ tcon_top = devm_kzalloc(dev, sizeof(*tcon_top), GFP_KERNEL);
+ if (!tcon_top)
+ return -ENOMEM;
+
+ clk_data = devm_kzalloc(dev, sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * CLK_NUM,
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+ tcon_top->clk_data = clk_data;
+
+ spin_lock_init(&tcon_top->reg_lock);
+
+ tcon_top->rst = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(tcon_top->rst)) {
+ dev_err(dev, "Couldn't get our reset line\n");
+ return PTR_ERR(tcon_top->rst);
+ }
+
+ tcon_top->bus = devm_clk_get(dev, "bus");
+ if (IS_ERR(tcon_top->bus)) {
+ dev_err(dev, "Couldn't get the bus clock\n");
+ return PTR_ERR(tcon_top->bus);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ tcon_top->regs = regs;
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ ret = reset_control_deassert(tcon_top->rst);
+ if (ret) {
+ dev_err(dev, "Could not deassert ctrl reset control\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(tcon_top->bus);
+ if (ret) {
+ dev_err(dev, "Could not enable bus clock\n");
+ goto err_assert_reset;
+ }
+
+ /*
+ * TCON TOP has two muxes, which select parent clock for each TCON TV
+ * channel clock. Parent could be either TCON TV or TVE clock. For now
+ * we leave this fixed to TCON TV, since TVE driver for R40 is not yet
+ * implemented. Once it is, graph needs to be traversed to determine
+ * if TVE is active on each TCON TV. If it is, mux should be switched
+ * to TVE clock parent.
+ */
+ clk_data->hws[CLK_TCON_TOP_TV0] =
+ sun8i_tcon_top_register_gate(dev, "tcon-tv0", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_TV0_GATE, 0);
+
+ clk_data->hws[CLK_TCON_TOP_TV1] =
+ sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_TV1_GATE, 1);
+
+ clk_data->hws[CLK_TCON_TOP_DSI] =
+ sun8i_tcon_top_register_gate(dev, "dsi", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_DSI_GATE, 2);
+
+ for (i = 0; i < CLK_NUM; i++)
+ if (IS_ERR(clk_data->hws[i])) {
+ ret = PTR_ERR(clk_data->hws[i]);
+ goto err_unregister_gates;
+ }
+
+ clk_data->num = CLK_NUM;
+
+ ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ clk_data);
+ if (ret)
+ goto err_unregister_gates;
+
+ dev_set_drvdata(dev, tcon_top);
+
+ return 0;
+
+err_unregister_gates:
+ for (i = 0; i < CLK_NUM; i++)
+ if (clk_data->hws[i])
+ clk_hw_unregister_gate(clk_data->hws[i]);
+ clk_disable_unprepare(tcon_top->bus);
+err_assert_reset:
+ reset_control_assert(tcon_top->rst);
+
+ return ret;
+}
+
+static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+ struct clk_hw_onecell_data *clk_data = tcon_top->clk_data;
+ int i;
+
+ of_clk_del_provider(dev->of_node);
+ for (i = 0; i < CLK_NUM; i++)
+ clk_hw_unregister_gate(clk_data->hws[i]);
+
+ clk_disable_unprepare(tcon_top->bus);
+ reset_control_assert(tcon_top->rst);
+}
+
+static const struct component_ops sun8i_tcon_top_ops = {
+ .bind = sun8i_tcon_top_bind,
+ .unbind = sun8i_tcon_top_unbind,
+};
+
+static int sun8i_tcon_top_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &sun8i_tcon_top_ops);
+}
+
+static int sun8i_tcon_top_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sun8i_tcon_top_ops);
+
+ return 0;
+}
+
+/* sun4i_drv uses this list to check if a device node is a TCON TOP */
+const struct of_device_id sun8i_tcon_top_of_table[] = {
+ { .compatible = "allwinner,sun8i-r40-tcon-top" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
+EXPORT_SYMBOL(sun8i_tcon_top_of_table);
+
+static struct platform_driver sun8i_tcon_top_platform_driver = {
+ .probe = sun8i_tcon_top_probe,
+ .remove = sun8i_tcon_top_remove,
+ .driver = {
+ .name = "sun8i-tcon-top",
+ .of_match_table = sun8i_tcon_top_of_table,
+ },
+};
+module_platform_driver(sun8i_tcon_top_platform_driver);
+
+MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+MODULE_DESCRIPTION("Allwinner R40 TCON TOP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.h b/drivers/gpu/drm/sun4i/sun8i_tcon_top.h
new file mode 100644
index 000000000000..0390584a330e
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#ifndef _SUN8I_TCON_TOP_H_
+#define _SUN8I_TCON_TOP_H_
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define TCON_TOP_TCON_TV_SETUP_REG 0x00
+
+#define TCON_TOP_PORT_SEL_REG 0x1C
+#define TCON_TOP_PORT_DE0_MSK GENMASK(1, 0)
+#define TCON_TOP_PORT_DE1_MSK GENMASK(5, 4)
+
+#define TCON_TOP_GATE_SRC_REG 0x20
+#define TCON_TOP_HDMI_SRC_MSK GENMASK(29, 28)
+#define TCON_TOP_TCON_TV1_GATE 24
+#define TCON_TOP_TCON_TV0_GATE 20
+#define TCON_TOP_TCON_DSI_GATE 16
+
+#define CLK_NUM 3
+
+struct sun8i_tcon_top {
+ struct clk *bus;
+ struct clk_hw_onecell_data *clk_data;
+ void __iomem *regs;
+ struct reset_control *rst;
+
+ /*
+ * spinlock is used to synchronize access to same
+ * register where multiple clock gates can be set.
+ */
+ spinlock_t reg_lock;
+};
+
+extern const struct of_device_id sun8i_tcon_top_of_table[];
+
+int sun8i_tcon_top_set_hdmi_src(struct device *dev, int tcon);
+int sun8i_tcon_top_de_config(struct device *dev, int mixer, int tcon);
+
+#endif /* _SUN8I_TCON_TOP_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 9a540330cb79..28c15c6ef1ef 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -27,7 +27,8 @@
#include "sun8i_ui_scaler.h"
static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
- int overlay, bool enable)
+ int overlay, bool enable, unsigned int zpos,
+ unsigned int old_zpos)
{
u32 val;
@@ -43,18 +44,36 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay),
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN, val);
- if (enable)
- val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel);
- else
- val = 0;
+ if (!enable || zpos != old_zpos) {
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
+ 0);
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL,
- SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel), val);
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
+ 0);
+ }
+
+ if (enable) {
+ val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+
+ val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
+ val);
+ }
}
static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane)
+ int overlay, struct drm_plane *plane,
+ unsigned int zpos)
{
struct drm_plane_state *state = plane->state;
u32 src_w, src_h, dst_w, dst_h;
@@ -137,10 +156,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_COORD(channel),
+ SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(channel),
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
outsize);
return 0;
@@ -236,30 +255,35 @@ static void sun8i_ui_layer_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
+ unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
- sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false);
+ sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false, 0,
+ old_zpos);
}
static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
+ unsigned int zpos = plane->state->normalized_zpos;
+ unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
if (!plane->state->visible) {
sun8i_ui_layer_enable(mixer, layer->channel,
- layer->overlay, false);
+ layer->overlay, false, 0, old_zpos);
return;
}
sun8i_ui_layer_update_coord(mixer, layer->channel,
- layer->overlay, plane);
+ layer->overlay, plane, zpos);
sun8i_ui_layer_update_formats(mixer, layer->channel,
layer->overlay, plane);
sun8i_ui_layer_update_buffer(mixer, layer->channel,
layer->overlay, plane);
- sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, true);
+ sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay,
+ true, zpos, old_zpos);
}
static struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
@@ -307,6 +331,7 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
int channel = mixer->cfg->vi_num + index;
struct sun8i_ui_layer *layer;
+ unsigned int plane_cnt;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
@@ -327,8 +352,10 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
return ERR_PTR(ret);
}
- /* fixed zpos for now */
- ret = drm_plane_create_zpos_immutable_property(&layer->plane, channel);
+ plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
+
+ ret = drm_plane_create_zpos_property(&layer->plane, channel,
+ 0, plane_cnt - 1);
if (ret) {
dev_err(drm->dev, "Couldn't add zpos property\n");
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 5877f8ef5895..f4fe97813f94 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -21,7 +21,8 @@
#include "sun8i_vi_scaler.h"
static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
- int overlay, bool enable)
+ int overlay, bool enable, unsigned int zpos,
+ unsigned int old_zpos)
{
u32 val;
@@ -37,18 +38,36 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN, val);
- if (enable)
- val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel);
- else
- val = 0;
+ if (!enable || zpos != old_zpos) {
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
+ 0);
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL,
- SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel), val);
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
+ 0);
+ }
+
+ if (enable) {
+ val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+
+ val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
+ val);
+ }
}
static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane)
+ int overlay, struct drm_plane *plane,
+ unsigned int zpos)
{
struct drm_plane_state *state = plane->state;
const struct drm_format_info *format = state->fb->format;
@@ -130,10 +149,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_COORD(channel),
+ SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(channel),
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
outsize);
return 0;
@@ -264,30 +283,35 @@ static void sun8i_vi_layer_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
+ unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
- sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false);
+ sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false, 0,
+ old_zpos);
}
static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
+ unsigned int zpos = plane->state->normalized_zpos;
+ unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
if (!plane->state->visible) {
sun8i_vi_layer_enable(mixer, layer->channel,
- layer->overlay, false);
+ layer->overlay, false, 0, old_zpos);
return;
}
sun8i_vi_layer_update_coord(mixer, layer->channel,
- layer->overlay, plane);
+ layer->overlay, plane, zpos);
sun8i_vi_layer_update_formats(mixer, layer->channel,
layer->overlay, plane);
sun8i_vi_layer_update_buffer(mixer, layer->channel,
layer->overlay, plane);
- sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, true);
+ sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay,
+ true, zpos, old_zpos);
}
static struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
@@ -351,6 +375,7 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
int index)
{
struct sun8i_vi_layer *layer;
+ unsigned int plane_cnt;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
@@ -368,8 +393,10 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
return ERR_PTR(ret);
}
- /* fixed zpos for now */
- ret = drm_plane_create_zpos_immutable_property(&layer->plane, index);
+ plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
+
+ ret = drm_plane_create_zpos_property(&layer->plane, index,
+ 0, plane_cnt - 1);
if (ret) {
dev_err(drm->dev, "Couldn't add zpos property\n");
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index c3afe7b2237e..965088afcfad 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2312,7 +2312,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
* POWER_CONTROL registers during CRTC enabling.
*/
if (dc->soc->coupled_pm && dc->pipe == 1) {
- u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE;
+ u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
struct device_link *link;
struct device *partner;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 776c1513e582..a2bd5876c633 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -398,7 +398,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
* unaligned offset is malformed and cause commands stream
* corruption on the buffer address relocation.
*/
- if (offset & 3 || offset >= obj->gem.size) {
+ if (offset & 3 || offset > obj->gem.size) {
err = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 87c5d89bc9ba..ee6ca8fa1c65 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1052,7 +1052,7 @@ static int tegra_dsi_init(struct host1x_client *client)
drm_encoder_helper_add(&dsi->output.encoder,
&tegra_dsi_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(&dsi->output.connector,
+ drm_connector_attach_encoder(&dsi->output.connector,
&dsi->output.encoder);
drm_connector_register(&dsi->output.connector);
@@ -1411,6 +1411,9 @@ static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
struct tegra_output *output = &dsi->output;
output->panel = of_drm_find_panel(device->dev.of_node);
+ if (IS_ERR(output->panel))
+ output->panel = NULL;
+
if (output->panel && output->connector.dev) {
drm_panel_attach(output->panel, &output->connector);
drm_helper_hpd_irq_event(output->connector.dev);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 00a5c9f32254..4f80100ff5f3 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -582,18 +582,6 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
return 0;
}
-static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
- unsigned long page)
-{
- return NULL;
-}
-
-static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
- unsigned long page,
- void *addr)
-{
-}
-
static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
{
return NULL;
@@ -634,8 +622,6 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.release = tegra_gem_prime_release,
.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
.end_cpu_access = tegra_gem_prime_end_cpu_access,
- .map_atomic = tegra_gem_prime_kmap_atomic,
- .unmap_atomic = tegra_gem_prime_kunmap_atomic,
.map = tegra_gem_prime_kmap,
.unmap = tegra_gem_prime_kunmap,
.mmap = tegra_gem_prime_mmap,
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 784739a9f497..0082468f703c 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1488,7 +1488,7 @@ static int tegra_hdmi_init(struct host1x_client *client)
drm_encoder_helper_add(&hdmi->output.encoder,
&tegra_hdmi_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(&hdmi->output.connector,
+ drm_connector_attach_encoder(&hdmi->output.connector,
&hdmi->output.encoder);
drm_connector_register(&hdmi->output.connector);
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index ffe34bd0bb9d..c662efc7e413 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -37,7 +37,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, output->ddc);
cec_notifier_set_phys_addr_from_edid(output->notifier, edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
err = drm_add_edid_modes(connector, edid);
@@ -110,8 +110,8 @@ int tegra_output_probe(struct tegra_output *output)
panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
if (panel) {
output->panel = of_drm_find_panel(panel);
- if (!output->panel)
- return -EPROBE_DEFER;
+ if (IS_ERR(output->panel))
+ return PTR_ERR(output->panel);
of_node_put(panel);
}
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 78ec5193741d..28a78d3120bc 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -289,7 +289,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
drm_encoder_helper_add(&output->encoder,
&tegra_rgb_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(&output->connector,
+ drm_connector_attach_encoder(&output->connector,
&output->encoder);
drm_connector_register(&output->connector);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7d2a955fc515..d7fe9f15def1 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2622,7 +2622,7 @@ static int tegra_sor_init(struct host1x_client *client)
encoder, NULL);
drm_encoder_helper_add(&sor->output.encoder, helpers);
- drm_mode_connector_attach_encoder(&sor->output.connector,
+ drm_connector_attach_encoder(&sor->output.connector,
&sor->output.encoder);
drm_connector_register(&sor->output.connector);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index b8a5e4ed22e6..0fb300d41a09 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -378,7 +378,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
if (!priv->external_connector &&
((priv->num_encoders == 0) || (priv->num_connectors == 0))) {
dev_err(dev, "no encoders/connectors found\n");
- ret = -ENXIO;
+ ret = -EPROBE_DEFER;
goto init_failed;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index d651bdd6597e..b4eaf9bc87f8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -103,12 +103,11 @@ struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
struct drm_encoder *encoder)
{
struct drm_connector *connector;
- int i;
- list_for_each_entry(connector, &ddev->mode_config.connector_list, head)
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
- if (connector->encoder_ids[i] == encoder->base.id)
- return connector;
+ list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+ if (drm_connector_has_possible_encoder(connector, encoder))
+ return connector;
+ }
dev_err(ddev->dev, "No connector found for %s encoder (id %d)\n",
encoder->name, encoder->base.id);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index d616d64a6725..a1acab39d87f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -223,7 +223,7 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index c45cabb38db0..daebf1aa6b0a 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -173,7 +173,7 @@ static int tfp410_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, tfp410_connector->mod->i2c);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
ret = drm_add_edid_modes(connector, edid);
@@ -240,7 +240,7 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
index 4592a5e3f20b..16f4b5c91f1b 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tinydrm/Kconfig
@@ -20,6 +20,17 @@ config TINYDRM_ILI9225
If M is selected the module will be called ili9225.
+config TINYDRM_ILI9341
+ tristate "DRM support for ILI9341 display panels"
+ depends on DRM_TINYDRM && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
+ help
+ DRM driver for the following Ilitek ILI9341 panels:
+ * YX240QV29-T 2.4" 240x320 TFT (Adafruit 2.4")
+
+ If M is selected the module will be called ili9341.
+
config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM_TINYDRM && SPI
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile
index 49a111929724..14d99080665a 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tinydrm/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o
# Displays
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
+obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 24a33bf862fa..19c7f70adfa5 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -204,7 +204,7 @@ static int tinydrm_register(struct tinydrm_device *tdev)
if (ret)
return ret;
- ret = drm_fb_cma_fbdev_init_with_funcs(drm, 0, 0, tdev->fb_funcs);
+ ret = drm_fbdev_generic_setup(drm, 0);
if (ret)
DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
@@ -214,7 +214,6 @@ static int tinydrm_register(struct tinydrm_device *tdev)
static void tinydrm_unregister(struct tinydrm_device *tdev)
{
drm_atomic_helper_shutdown(tdev->drm);
- drm_fb_cma_fbdev_fini(tdev->drm);
drm_dev_unregister(tdev->drm);
}
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 841c69aba059..455fefe012f5 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -368,7 +368,6 @@ static struct drm_driver ili9225_driver = {
DRIVER_ATOMIC,
.fops = &ili9225_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = drm_fb_helper_lastclose,
.name = "ili9225",
.desc = "Ilitek ILI9225",
.date = "20171106",
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
new file mode 100644
index 000000000000..6701037749a7
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/ili9341.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for Ilitek ILI9341 panels
+ *
+ * Copyright 2018 David Lechner <david@lechnology.com>
+ *
+ * Based on mi0283qt.c:
+ * Copyright 2016 Noralf Trønnes
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <video/mipi_display.h>
+
+#define ILI9341_FRMCTR1 0xb1
+#define ILI9341_DISCTRL 0xb6
+#define ILI9341_ETMOD 0xb7
+
+#define ILI9341_PWCTRL1 0xc0
+#define ILI9341_PWCTRL2 0xc1
+#define ILI9341_VMCTRL1 0xc5
+#define ILI9341_VMCTRL2 0xc7
+#define ILI9341_PWCTRLA 0xcb
+#define ILI9341_PWCTRLB 0xcf
+
+#define ILI9341_PGAMCTRL 0xe0
+#define ILI9341_NGAMCTRL 0xe1
+#define ILI9341_DTCTRLA 0xe8
+#define ILI9341_DTCTRLB 0xea
+#define ILI9341_PWRSEQ 0xed
+
+#define ILI9341_EN3GAM 0xf2
+#define ILI9341_PUMPCTRL 0xf7
+
+#define ILI9341_MADCTL_BGR BIT(3)
+#define ILI9341_MADCTL_MV BIT(5)
+#define ILI9341_MADCTL_MX BIT(6)
+#define ILI9341_MADCTL_MY BIT(7)
+
+static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ u8 addr_mode;
+ int ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ ret = mipi_dbi_poweron_conditional_reset(mipi);
+ if (ret < 0)
+ return;
+ if (ret == 1)
+ goto out_enable;
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+
+ mipi_dbi_command(mipi, ILI9341_PWCTRLB, 0x00, 0xc1, 0x30);
+ mipi_dbi_command(mipi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
+ mipi_dbi_command(mipi, ILI9341_DTCTRLA, 0x85, 0x00, 0x78);
+ mipi_dbi_command(mipi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
+ mipi_dbi_command(mipi, ILI9341_PUMPCTRL, 0x20);
+ mipi_dbi_command(mipi, ILI9341_DTCTRLB, 0x00, 0x00);
+
+ /* Power Control */
+ mipi_dbi_command(mipi, ILI9341_PWCTRL1, 0x23);
+ mipi_dbi_command(mipi, ILI9341_PWCTRL2, 0x10);
+ /* VCOM */
+ mipi_dbi_command(mipi, ILI9341_VMCTRL1, 0x3e, 0x28);
+ mipi_dbi_command(mipi, ILI9341_VMCTRL2, 0x86);
+
+ /* Memory Access Control */
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+
+ /* Frame Rate */
+ mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b);
+
+ /* Gamma */
+ mipi_dbi_command(mipi, ILI9341_EN3GAM, 0x00);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
+ mipi_dbi_command(mipi, ILI9341_PGAMCTRL,
+ 0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1,
+ 0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00);
+ mipi_dbi_command(mipi, ILI9341_NGAMCTRL,
+ 0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1,
+ 0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f);
+
+ /* DDRAM */
+ mipi_dbi_command(mipi, ILI9341_ETMOD, 0x07);
+
+ /* Display */
+ mipi_dbi_command(mipi, ILI9341_DISCTRL, 0x08, 0x82, 0x27, 0x00);
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(100);
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ msleep(100);
+
+out_enable:
+ switch (mipi->rotation) {
+ default:
+ addr_mode = ILI9341_MADCTL_MX;
+ break;
+ case 90:
+ addr_mode = ILI9341_MADCTL_MV;
+ break;
+ case 180:
+ addr_mode = ILI9341_MADCTL_MY;
+ break;
+ case 270:
+ addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
+ ILI9341_MADCTL_MX;
+ break;
+ }
+ addr_mode |= ILI9341_MADCTL_BGR;
+ mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+}
+
+static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
+ .enable = yx240qv29_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = tinydrm_display_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode yx240qv29_mode = {
+ TINYDRM_MODE(240, 320, 37, 49),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
+
+static struct drm_driver ili9341_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+ .fops = &ili9341_fops,
+ TINYDRM_GEM_DRIVER_OPS,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "ili9341",
+ .desc = "Ilitek ILI9341",
+ .date = "20180514",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id ili9341_of_match[] = {
+ { .compatible = "adafruit,yx240qv29" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ili9341_of_match);
+
+static const struct spi_device_id ili9341_id[] = {
+ { "yx240qv29", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ili9341_id);
+
+static int ili9341_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi *mipi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(mipi->reset)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(mipi->reset);
+ }
+
+ dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ mipi->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(mipi->backlight))
+ return PTR_ERR(mipi->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, mipi, dc);
+ if (ret)
+ return ret;
+
+ ret = mipi_dbi_init(&spi->dev, mipi, &ili9341_pipe_funcs,
+ &ili9341_driver, &yx240qv29_mode, rotation);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, mipi);
+
+ return devm_tinydrm_register(&mipi->tinydrm);
+}
+
+static void ili9341_shutdown(struct spi_device *spi)
+{
+ struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+ tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver ili9341_spi_driver = {
+ .driver = {
+ .name = "ili9341",
+ .of_match_table = ili9341_of_match,
+ },
+ .id_table = ili9341_id,
+ .probe = ili9341_probe,
+ .shutdown = ili9341_shutdown,
+};
+module_spi_driver(ili9341_spi_driver);
+
+MODULE_DESCRIPTION("Ilitek ILI9341 DRM driver");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 015d03f2acba..d7bb4c5e6657 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -154,7 +154,6 @@ static struct drm_driver mi0283qt_driver = {
DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 4d1fb31a781f..cb3441e51d5f 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -260,6 +260,8 @@ static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
/**
* mipi_dbi_enable_flush - MIPI DBI enable helper
* @mipi: MIPI DBI structure
+ * @crtc_state: CRTC state
+ * @plane_state: Plane state
*
* This function sets &mipi_dbi->enabled, flushes the whole framebuffer and
* enables the backlight. Drivers can use this in their
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 5c29e3803ecb..2fcbc3067d71 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -304,7 +304,6 @@ static struct drm_driver st7586_driver = {
DRIVER_ATOMIC,
.fops = &st7586_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 6c7b15c9da4f..3081bc57c116 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -120,7 +120,6 @@ static struct drm_driver st7735r_driver = {
DRIVER_ATOMIC,
.fops = &st7735r_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5d8688e522d1..7c484729f9b2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -287,12 +287,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (ret) {
if (bdev->driver->move_notify) {
- struct ttm_mem_reg tmp_mem = *mem;
- *mem = bo->mem;
- bo->mem = tmp_mem;
+ swap(*mem, bo->mem);
bdev->driver->move_notify(bo, false, mem);
- bo->mem = *mem;
- *mem = tmp_mem;
+ swap(*mem, bo->mem);
}
goto out_err;
@@ -590,12 +587,18 @@ static void ttm_bo_release(struct kref *kref)
kref_put(&bo->list_kref, ttm_bo_release_list);
}
+void ttm_bo_put(struct ttm_buffer_object *bo)
+{
+ kref_put(&bo->kref, ttm_bo_release);
+}
+EXPORT_SYMBOL(ttm_bo_put);
+
void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo = *p_bo;
*p_bo = NULL;
- kref_put(&bo->kref, ttm_bo_release);
+ ttm_bo_put(bo);
}
EXPORT_SYMBOL(ttm_bo_unref);
@@ -1201,7 +1204,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
if (!resv)
ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index f2c167702eef..046a6dda690a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -463,7 +463,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
struct ttm_transfer_obj *fbo;
fbo = container_of(bo, struct ttm_transfer_obj, base);
- ttm_bo_unref(&fbo->bo);
+ ttm_bo_put(fbo->bo);
kfree(fbo);
}
@@ -492,8 +492,9 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (!fbo)
return -ENOMEM;
+ ttm_bo_get(bo);
fbo->base = *bo;
- fbo->bo = ttm_bo_reference(bo);
+ fbo->bo = bo;
/**
* Fix up members that we shouldn't copy directly:
@@ -730,7 +731,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bo->ttm = NULL;
ttm_bo_unreserve(ghost_obj);
- ttm_bo_unref(&ghost_obj);
+ ttm_bo_put(ghost_obj);
}
*old_mem = *new_mem;
@@ -786,7 +787,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
bo->ttm = NULL;
ttm_bo_unreserve(ghost_obj);
- ttm_bo_unref(&ghost_obj);
+ ttm_bo_put(ghost_obj);
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
@@ -851,7 +852,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
bo->ttm = NULL;
ttm_bo_unreserve(ghost);
- ttm_bo_unref(&ghost);
+ ttm_bo_put(ghost);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index c7ece7613a6a..6fe91c1b692d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -44,10 +44,11 @@
#define TTM_BO_VM_NUM_PREFAULT 16
-static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
+static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
- int ret = 0;
+ vm_fault_t ret = 0;
+ int err = 0;
if (likely(!bo->moving))
goto out_unlock;
@@ -67,20 +68,20 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_unlock;
- ttm_bo_reference(bo);
+ ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
(void) dma_fence_wait(bo->moving, true);
ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
goto out_unlock;
}
/*
* Ordinary wait.
*/
- ret = dma_fence_wait(bo->moving, true);
- if (unlikely(ret != 0)) {
- ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+ err = dma_fence_wait(bo->moving, true);
+ if (unlikely(err != 0)) {
+ ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
goto out_unlock;
}
@@ -105,7 +106,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ page_offset;
}
-static int ttm_bo_vm_fault(struct vm_fault *vmf)
+static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -116,8 +117,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
unsigned long pfn;
struct ttm_tt *ttm = NULL;
struct page *page;
- int ret;
+ int err;
int i;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
@@ -129,17 +131,17 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
- ret = ttm_bo_reserve(bo, true, true, NULL);
- if (unlikely(ret != 0)) {
- if (ret != -EBUSY)
+ err = ttm_bo_reserve(bo, true, true, NULL);
+ if (unlikely(err != 0)) {
+ if (err != -EBUSY)
return VM_FAULT_NOPAGE;
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- ttm_bo_reference(bo);
+ ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
(void) ttm_bo_wait_unreserved(bo);
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
}
return VM_FAULT_RETRY;
@@ -163,8 +165,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
}
if (bdev->driver->fault_reserve_notify) {
- ret = bdev->driver->fault_reserve_notify(bo);
- switch (ret) {
+ err = bdev->driver->fault_reserve_notify(bo);
+ switch (err) {
case 0:
break;
case -EBUSY:
@@ -192,13 +194,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
goto out_unlock;
}
- ret = ttm_mem_io_lock(man, true);
- if (unlikely(ret != 0)) {
+ err = ttm_mem_io_lock(man, true);
+ if (unlikely(err != 0)) {
ret = VM_FAULT_NOPAGE;
goto out_unlock;
}
- ret = ttm_mem_io_reserve_vm(bo);
- if (unlikely(ret != 0)) {
+ err = ttm_mem_io_reserve_vm(bo);
+ if (unlikely(err != 0)) {
ret = VM_FAULT_SIGBUS;
goto out_io_unlock;
}
@@ -266,23 +268,20 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
}
if (vma->vm_flags & VM_MIXEDMAP)
- ret = vm_insert_mixed(&cvma, address,
+ ret = vmf_insert_mixed(&cvma, address,
__pfn_to_pfn_t(pfn, PFN_DEV));
else
- ret = vm_insert_pfn(&cvma, address, pfn);
+ ret = vmf_insert_pfn(&cvma, address, pfn);
/*
* Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error.
*/
- if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+ if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
break;
- else if (unlikely(ret != 0)) {
- ret =
- (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ else if (unlikely(ret & VM_FAULT_ERROR))
goto out_io_unlock;
- }
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))
@@ -303,14 +302,14 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
- (void)ttm_bo_reference(bo);
+ ttm_bo_get(bo);
}
static void ttm_bo_vm_close(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
vma->vm_private_data = NULL;
}
@@ -462,7 +461,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
out_unref:
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_mmap);
@@ -472,8 +471,10 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
if (vma->vm_pgoff != 0)
return -EACCES;
+ ttm_bo_get(bo);
+
vma->vm_ops = &ttm_bo_vm_ops;
- vma->vm_private_data = ttm_bo_reference(bo);
+ vma->vm_private_data = bo;
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 6e2d1300b457..f841accc2c00 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -47,13 +47,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 16
@@ -222,52 +216,6 @@ static struct kobj_type ttm_pool_kobj_type = {
static struct ttm_pool_manager *_manager;
-#ifndef CONFIG_X86
-static int set_pages_wb(struct page *page, int numpages)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < numpages; i++)
- unmap_page_from_agp(page++);
-#endif
- return 0;
-}
-
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-#endif
-
/**
* Select the right pool or requested caching state and ttm flags. */
static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
@@ -302,13 +250,13 @@ static void ttm_pages_put(struct page *pages[], unsigned npages,
unsigned int i, pages_nr = (1 << order);
if (order == 0) {
- if (set_pages_array_wb(pages, npages))
+ if (ttm_set_pages_array_wb(pages, npages))
pr_err("Failed to set %d pages to wb!\n", npages);
}
for (i = 0; i < npages; ++i) {
if (order > 0) {
- if (set_pages_wb(pages[i], pages_nr))
+ if (ttm_set_pages_wb(pages[i], pages_nr))
pr_err("Failed to set %d pages to wb!\n", pages_nr);
}
__free_pages(pages[i], order);
@@ -498,12 +446,12 @@ static int ttm_set_pages_caching(struct page **pages,
/* Set page caching */
switch (cstate) {
case tt_uncached:
- r = set_pages_array_uc(pages, cpages);
+ r = ttm_set_pages_array_uc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to uc!\n", cpages);
break;
case tt_wc:
- r = set_pages_array_wc(pages, cpages);
+ r = ttm_set_pages_array_wc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to wc!\n", cpages);
break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 3f14c1cc0789..507be7ac1165 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -50,12 +50,7 @@
#include <linux/kthread.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4
@@ -268,54 +263,19 @@ static struct kobj_type ttm_pool_kobj_type = {
.default_attrs = ttm_pool_attrs,
};
-#ifndef CONFIG_X86
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-#endif /* for !CONFIG_X86 */
-
static int ttm_set_pages_caching(struct dma_pool *pool,
struct page **pages, unsigned cpages)
{
int r = 0;
/* Set page caching */
if (pool->type & IS_UC) {
- r = set_pages_array_uc(pages, cpages);
+ r = ttm_set_pages_array_uc(pages, cpages);
if (r)
pr_err("%s: Failed to set %d pages to uc!\n",
pool->dev_name, cpages);
}
if (pool->type & IS_WC) {
- r = set_pages_array_wc(pages, cpages);
+ r = ttm_set_pages_array_wc(pages, cpages);
if (r)
pr_err("%s: Failed to set %d pages to wc!\n",
pool->dev_name, cpages);
@@ -389,17 +349,14 @@ static void ttm_pool_update_free_locked(struct dma_pool *pool,
static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
{
struct page *page = d_page->p;
- unsigned i, num_pages;
+ unsigned num_pages;
/* Don't set WB on WB page pool. */
if (!(pool->type & IS_CACHED)) {
num_pages = pool->size / PAGE_SIZE;
- for (i = 0; i < num_pages; ++i, ++page) {
- if (set_pages_array_wb(&page, 1)) {
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, 1);
- }
- }
+ if (ttm_set_pages_wb(page, num_pages))
+ pr_err("%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, num_pages);
}
list_del(&d_page->page_list);
@@ -420,7 +377,7 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
/* Don't set WB on WB page pool. */
if (npages && !(pool->type & IS_CACHED) &&
- set_pages_array_wb(pages, npages))
+ ttm_set_pages_array_wb(pages, npages))
pr_err("%s: Failed to set %d pages to wb!\n",
pool->dev_name, npages);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a1e543972ca7..e3a0691582ff 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -38,9 +38,7 @@
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
/**
* Allocates a ttm structure for the given BO.
@@ -115,10 +113,9 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
return 0;
}
-#ifdef CONFIG_X86
-static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
+static int ttm_tt_set_page_caching(struct page *p,
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
{
int ret = 0;
@@ -129,26 +126,18 @@ static inline int ttm_tt_set_page_caching(struct page *p,
/* p isn't in the default caching state, set it to
* writeback first to free its current memtype. */
- ret = set_pages_wb(p, 1);
+ ret = ttm_set_pages_wb(p, 1);
if (ret)
return ret;
}
if (c_new == tt_wc)
- ret = set_memory_wc((unsigned long) page_address(p), 1);
+ ret = ttm_set_pages_wc(p, 1);
else if (c_new == tt_uncached)
- ret = set_pages_uc(p, 1);
+ ret = ttm_set_pages_uc(p, 1);
return ret;
}
-#else /* CONFIG_X86 */
-static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
-{
- return 0;
-}
-#endif /* CONFIG_X86 */
/*
* Change caching policy for the linear kernel map
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 09dc585aa46f..68e88bed77ca 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -99,7 +99,7 @@ static int udl_get_modes(struct drm_connector *connector)
struct udl_drm_connector,
connector);
- drm_mode_connector_update_edid_property(connector, udl_connector->edid);
+ drm_connector_update_edid_property(connector, udl_connector->edid);
if (udl_connector->edid)
return drm_add_edid_modes(connector, udl_connector->edid);
return 0;
@@ -200,7 +200,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
drm_connector_register(connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
connector->polled = DRM_CONNECTOR_POLL_HPD |
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index 0a20695eb120..556f62662aa9 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -29,7 +29,6 @@ struct udl_drm_dmabuf_attachment {
};
static int udl_attach_dma_buf(struct dma_buf *dmabuf,
- struct device *dev,
struct dma_buf_attachment *attach)
{
struct udl_drm_dmabuf_attachment *udl_attach;
@@ -158,27 +157,12 @@ static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
return NULL;
}
-static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- /* TODO */
-
- return NULL;
-}
-
static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
/* TODO */
}
-static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num,
- void *addr)
-{
- /* TODO */
-}
-
static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
struct vm_area_struct *vma)
{
@@ -193,9 +177,7 @@ static const struct dma_buf_ops udl_dmabuf_ops = {
.map_dma_buf = udl_map_dma_buf,
.unmap_dma_buf = udl_unmap_dma_buf,
.map = udl_dmabuf_kmap,
- .map_atomic = udl_dmabuf_kmap_atomic,
.unmap = udl_dmabuf_kunmap,
- .unmap_atomic = udl_dmabuf_kunmap_atomic,
.mmap = udl_dmabuf_mmap,
.release = drm_gem_dmabuf_release,
};
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 55c0cc309198..e9e9b1ff678e 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -16,6 +16,7 @@
#include <linux/usb.h>
#include <drm/drm_gem.h>
+#include <linux/mm_types.h>
#define DRIVER_NAME "udl"
#define DRIVER_DESC "DisplayLink"
@@ -112,7 +113,7 @@ udl_fb_user_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd);
-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width,
int *ident_ptr, int *sent_ptr);
@@ -136,7 +137,7 @@ void udl_gem_put_pages(struct udl_gem_object *obj);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int udl_gem_fault(struct vm_fault *vmf);
+vm_fault_t udl_gem_fault(struct vm_fault *vmf);
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 2ebdc6d5a76e..dbb62f6eb48a 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -90,7 +90,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int bytes_identical = 0;
struct urb *urb;
int aligned_x;
- int bpp = fb->base.format->cpp[0];
+ int log_bpp;
+
+ BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
+ log_bpp = __ffs(fb->base.format->cpp[0]);
if (!fb->active_16)
return 0;
@@ -125,19 +128,22 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
for (i = y; i < y + height ; i++) {
const int line_offset = fb->base.pitches[0] * i;
- const int byte_offset = line_offset + (x * bpp);
- const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
- if (udl_render_hline(dev, bpp, &urb,
+ const int byte_offset = line_offset + (x << log_bpp);
+ const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
+ if (udl_render_hline(dev, log_bpp, &urb,
(char *) fb->obj->vmapping,
&cmd, byte_offset, dev_byte_offset,
- width * bpp,
+ width << log_bpp,
&bytes_identical, &bytes_sent))
goto error;
}
if (cmd > (char *) urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
- int len = cmd - (char *) urb->transfer_buffer;
+ int len;
+ if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
+ *cmd++ = 0xAF;
+ len = cmd - (char *) urb->transfer_buffer;
ret = udl_submit_urb(dev, urb, len);
bytes_sent += len;
} else
@@ -146,7 +152,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
error:
atomic_add(bytes_sent, &udl->bytes_sent);
atomic_add(bytes_identical, &udl->bytes_identical);
- atomic_add(width*height*bpp, &udl->bytes_rendered);
+ atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
end_cycles = get_cycles();
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
@@ -172,7 +178,7 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
pos = (unsigned long)info->fix.smem_start + offset;
- pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
+ pr_debug("mmap() framebuffer addr:%lu size:%lu\n",
pos, size);
/* We don't want the framebuffer to be mapped encrypted */
@@ -218,7 +224,7 @@ static int udl_fb_open(struct fb_info *info, int user)
struct fb_deferred_io *fbdefio;
- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
if (fbdefio) {
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
@@ -230,7 +236,7 @@ static int udl_fb_open(struct fb_info *info, int user)
}
#endif
- pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
+ pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d\n",
info->node, user, info, ufbdev->fb_count);
return 0;
@@ -255,7 +261,7 @@ static int udl_fb_release(struct fb_info *info, int user)
}
#endif
- pr_warn("released /dev/fb%d user=%d count=%d\n",
+ pr_debug("released /dev/fb%d user=%d count=%d\n",
info->node, user, ufbdev->fb_count);
return 0;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 9a15cce22cce..d5a23295dd80 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -100,13 +100,12 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-int udl_gem_fault(struct vm_fault *vmf)
+vm_fault_t udl_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
struct page *page;
unsigned int page_offset;
- int ret = 0;
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
@@ -114,17 +113,7 @@ int udl_gem_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
page = obj->pages[page_offset];
- ret = vm_insert_page(vma, vmf->address, page);
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return vmf_insert_page(vma, vmf->address, page);
}
int udl_gem_get_pages(struct udl_gem_object *obj)
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index d518de8f496b..f455f095a146 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -170,25 +170,19 @@ static void udl_free_urb_list(struct drm_device *dev)
struct list_head *node;
struct urb_node *unode;
struct urb *urb;
- int ret;
- unsigned long flags;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
while (count--) {
+ down(&udl->urbs.limit_sem);
- /* Getting interrupted means a leak, but ok at shutdown*/
- ret = down_interruptible(&udl->urbs.limit_sem);
- if (ret)
- break;
-
- spin_lock_irqsave(&udl->urbs.lock, flags);
+ spin_lock_irq(&udl->urbs.lock);
node = udl->urbs.list.next; /* have reserved one with sem */
list_del_init(node);
- spin_unlock_irqrestore(&udl->urbs.lock, flags);
+ spin_unlock_irq(&udl->urbs.lock);
unode = list_entry(node, struct urb_node, entry);
urb = unode->urb;
@@ -205,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev)
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
{
struct udl_device *udl = dev->dev_private;
- int i = 0;
struct urb *urb;
struct urb_node *unode;
char *buf;
+ size_t wanted_size = count * size;
spin_lock_init(&udl->urbs.lock);
+retry:
udl->urbs.size = size;
INIT_LIST_HEAD(&udl->urbs.list);
- while (i < count) {
+ sema_init(&udl->urbs.limit_sem, 0);
+ udl->urbs.count = 0;
+ udl->urbs.available = 0;
+
+ while (udl->urbs.count * size < wanted_size) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
@@ -231,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
}
unode->urb = urb;
- buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
+ buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
+ if (size > PAGE_SIZE) {
+ size /= 2;
+ udl_free_urb_list(dev);
+ goto retry;
+ }
break;
}
@@ -246,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
list_add_tail(&unode->entry, &udl->urbs.list);
- i++;
+ up(&udl->urbs.limit_sem);
+ udl->urbs.count++;
+ udl->urbs.available++;
}
- sema_init(&udl->urbs.limit_sem, i);
- udl->urbs.count = i;
- udl->urbs.available = i;
-
- DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
+ DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
- return i;
+ return udl->urbs.count;
}
struct urb *udl_get_urb(struct drm_device *dev)
@@ -265,7 +267,6 @@ struct urb *udl_get_urb(struct drm_device *dev)
struct list_head *entry;
struct urb_node *unode;
struct urb *urb = NULL;
- unsigned long flags;
/* Wait for an in-flight buffer to complete and get re-queued */
ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
@@ -276,14 +277,14 @@ struct urb *udl_get_urb(struct drm_device *dev)
goto error;
}
- spin_lock_irqsave(&udl->urbs.lock, flags);
+ spin_lock_irq(&udl->urbs.lock);
BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
entry = udl->urbs.list.next;
list_del_init(entry);
udl->urbs.available--;
- spin_unlock_irqrestore(&udl->urbs.lock, flags);
+ spin_unlock_irq(&udl->urbs.lock);
unode = list_entry(entry, struct urb_node, entry);
urb = unode->urb;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 5bcae7649795..7e37765cf5ac 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -243,7 +243,7 @@ static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
memcpy(buf, udl->mode_buf, udl->mode_buf_len);
retval = udl_submit_urb(dev, urb, udl->mode_buf_len);
- DRM_INFO("write mode info %d\n", udl->mode_buf_len);
+ DRM_DEBUG("write mode info %d\n", udl->mode_buf_len);
return retval;
}
@@ -366,7 +366,6 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
{
struct udl_framebuffer *ufb = to_udl_fb(fb);
struct drm_device *dev = crtc->dev;
- unsigned long flags;
struct drm_framebuffer *old_fb = crtc->primary->fb;
if (old_fb) {
@@ -377,10 +376,10 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
if (event)
drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
crtc->primary->fb = fb;
return 0;
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 0c87b1ac6b68..ce87661e544f 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
-#include <linux/prefetch.h>
#include <asm/unaligned.h>
#include <drm/drmP.h>
@@ -51,9 +50,6 @@ static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
int start = width;
int end = width;
- prefetch((void *) front);
- prefetch((void *) back);
-
for (j = 0; j < width; j++) {
if (back[j] != front[j]) {
start = j;
@@ -83,12 +79,12 @@ static inline u16 pixel32_to_be16(const uint32_t pixel)
((pixel >> 8) & 0xf800));
}
-static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp)
+static inline u16 get_pixel_val16(const uint8_t *pixel, int log_bpp)
{
- u16 pixel_val16 = 0;
- if (bpp == 2)
+ u16 pixel_val16;
+ if (log_bpp == 1)
pixel_val16 = *(const uint16_t *)pixel;
- else if (bpp == 4)
+ else
pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel);
return pixel_val16;
}
@@ -125,8 +121,9 @@ static void udl_compress_hline16(
const u8 *const pixel_end,
uint32_t *device_address_ptr,
uint8_t **command_buffer_ptr,
- const uint8_t *const cmd_buffer_end, int bpp)
+ const uint8_t *const cmd_buffer_end, int log_bpp)
{
+ const int bpp = 1 << log_bpp;
const u8 *pixel = *pixel_start_ptr;
uint32_t dev_addr = *device_address_ptr;
uint8_t *cmd = *command_buffer_ptr;
@@ -139,8 +136,6 @@ static void udl_compress_hline16(
const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
uint16_t pixel_val16;
- prefetchw((void *) cmd); /* pull in one cache line at least */
-
*cmd++ = 0xaf;
*cmd++ = 0x6b;
*cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
@@ -153,12 +148,11 @@ static void udl_compress_hline16(
raw_pixels_count_byte = cmd++; /* we'll know this later */
raw_pixel_start = pixel;
- cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
- min((int)(pixel_end - pixel) / bpp,
- (int)(cmd_buffer_end - cmd) / 2))) * bpp;
+ cmd_pixel_end = pixel + (min3(MAX_CMD_PIXELS + 1UL,
+ (unsigned long)(pixel_end - pixel) >> log_bpp,
+ (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) << log_bpp);
- prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
- pixel_val16 = get_pixel_val16(pixel, bpp);
+ pixel_val16 = get_pixel_val16(pixel, log_bpp);
while (pixel < cmd_pixel_end) {
const u8 *const start = pixel;
@@ -170,7 +164,7 @@ static void udl_compress_hline16(
pixel += bpp;
while (pixel < cmd_pixel_end) {
- pixel_val16 = get_pixel_val16(pixel, bpp);
+ pixel_val16 = get_pixel_val16(pixel, log_bpp);
if (pixel_val16 != repeating_pixel_val16)
break;
pixel += bpp;
@@ -179,10 +173,10 @@ static void udl_compress_hline16(
if (unlikely(pixel > start + bpp)) {
/* go back and fill in raw pixel count */
*raw_pixels_count_byte = (((start -
- raw_pixel_start) / bpp) + 1) & 0xFF;
+ raw_pixel_start) >> log_bpp) + 1) & 0xFF;
/* immediately after raw data is repeat byte */
- *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
+ *cmd++ = (((pixel - start) >> log_bpp) - 1) & 0xFF;
/* Then start another raw pixel span */
raw_pixel_start = pixel;
@@ -192,11 +186,14 @@ static void udl_compress_hline16(
if (pixel > raw_pixel_start) {
/* finalize last RAW span */
- *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
+ *raw_pixels_count_byte = ((pixel - raw_pixel_start) >> log_bpp) & 0xFF;
+ } else {
+ /* undo unused byte */
+ cmd--;
}
- *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
- dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2;
+ *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) >> log_bpp) & 0xFF;
+ dev_addr += ((pixel - cmd_pixel_start) >> log_bpp) * 2;
}
if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
@@ -219,19 +216,19 @@ static void udl_compress_hline16(
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset,
u32 byte_width,
int *ident_ptr, int *sent_ptr)
{
const u8 *line_start, *line_end, *next_pixel;
- u32 base16 = 0 + (device_byte_offset / bpp) * 2;
+ u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2;
struct urb *urb = *urb_ptr;
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
- BUG_ON(!(bpp == 2 || bpp == 4));
+ BUG_ON(!(log_bpp == 1 || log_bpp == 2));
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
@@ -241,7 +238,7 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
udl_compress_hline16(&next_pixel,
line_end, &base16,
- (u8 **) &cmd, (u8 *) cmd_end, bpp);
+ (u8 **) &cmd, (u8 *) cmd_end, log_bpp);
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 7b1e2a549a71..54d96518a131 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -227,37 +227,19 @@ v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
}
-int v3d_gem_fault(struct vm_fault *vmf)
+vm_fault_t v3d_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct v3d_bo *bo = to_v3d_bo(obj);
- unsigned long pfn;
+ pfn_t pfn;
pgoff_t pgoff;
- int ret;
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = page_to_pfn(bo->pages[pgoff]);
-
- ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
-
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
+
+ return vmf_insert_mixed(vma, vmf->address, pfn);
}
int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index cdb582043b4f..2a85fa68ffea 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv;
+ struct drm_sched_rq *rq;
int i;
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
- drm_sched_entity_init(&v3d->queue[i].sched,
- &v3d_priv->sched_entity[i],
- &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
- NULL);
+ rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
}
file->driver_priv = v3d_priv;
@@ -146,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
static void
v3d_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file->driver_priv;
enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_entity_fini(&v3d->queue[q].sched,
- &v3d_priv->sched_entity[q]);
+ drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
}
kfree(v3d_priv);
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index a043ac3aae98..e6fed696ad86 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -2,6 +2,7 @@
/* Copyright (C) 2015-2018 Broadcom */
#include <linux/reservation.h>
+#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
@@ -25,7 +26,6 @@ struct v3d_queue_state {
u64 fence_context;
u64 emit_seqno;
- u64 finished_seqno;
};
struct v3d_dev {
@@ -85,6 +85,11 @@ struct v3d_dev {
*/
struct mutex reset_lock;
+ /* Lock taken when creating and pushing the GPU scheduler
+ * jobs, to keep the sched-fence seqnos in order.
+ */
+ struct mutex sched_lock;
+
struct {
u32 num_allocated;
u32 pages_allocated;
@@ -179,6 +184,8 @@ struct v3d_job {
/* GPU virtual addresses of the start/end of the CL job. */
u32 start, end;
+
+ u32 timedout_ctca, timedout_ctra;
};
struct v3d_exec_info {
@@ -248,7 +255,7 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int v3d_gem_fault(struct vm_fault *vmf);
+vm_fault_t v3d_gem_fault(struct vm_fault *vmf);
int v3d_mmap(struct file *filp, struct vm_area_struct *vma);
struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj);
int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index 087d49c8cb12..50bfcf9a8a1a 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -35,24 +35,7 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
return "v3d-render";
}
-static bool v3d_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
-static bool v3d_fence_signaled(struct dma_fence *fence)
-{
- struct v3d_fence *f = to_v3d_fence(fence);
- struct v3d_dev *v3d = to_v3d_dev(f->dev);
-
- return v3d->queue[f->queue].finished_seqno >= f->seqno;
-}
-
const struct dma_fence_ops v3d_fence_ops = {
.get_driver_name = v3d_fence_get_driver_name,
.get_timeline_name = v3d_fence_get_timeline_name,
- .enable_signaling = v3d_fence_enable_signaling,
- .signaled = v3d_fence_signaled,
- .wait = dma_fence_default_wait,
- .release = dma_fence_free,
};
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b513f9189caf..5ce24098a5fd 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -550,9 +550,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret)
goto fail;
+ mutex_lock(&v3d->sched_lock);
if (exec->bin.start != exec->bin.end) {
ret = drm_sched_job_init(&exec->bin.base,
- &v3d->queue[V3D_BIN].sched,
&v3d_priv->sched_entity[V3D_BIN],
v3d_priv);
if (ret)
@@ -567,7 +567,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
}
ret = drm_sched_job_init(&exec->render.base,
- &v3d->queue[V3D_RENDER].sched,
&v3d_priv->sched_entity[V3D_RENDER],
v3d_priv);
if (ret)
@@ -576,6 +575,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
kref_get(&exec->refcount); /* put by scheduler job completion */
drm_sched_entity_push_job(&exec->render.base,
&v3d_priv->sched_entity[V3D_RENDER]);
+ mutex_unlock(&v3d->sched_lock);
v3d_attach_object_fences(exec);
@@ -594,6 +594,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
return 0;
fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
fail:
v3d_exec_put(exec);
@@ -615,6 +616,7 @@ v3d_gem_init(struct drm_device *dev)
spin_lock_init(&v3d->job_lock);
mutex_init(&v3d->bo_lock);
mutex_init(&v3d->reset_lock);
+ mutex_init(&v3d->sched_lock);
/* Note: We don't allocate address 0. Various bits of HW
* treat 0 as special, such as the occlusion query counters
@@ -650,17 +652,14 @@ void
v3d_gem_destroy(struct drm_device *dev)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
- enum v3d_queue q;
v3d_sched_fini(v3d);
/* Waiting for exec to finish would need to be done before
* unregistering V3D.
*/
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
- WARN_ON(v3d->queue[q].emit_seqno !=
- v3d->queue[q].finished_seqno);
- }
+ WARN_ON(v3d->bin_job);
+ WARN_ON(v3d->render_job);
drm_mm_takedown(&v3d->mm);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 77e1fa046c10..e07514eb11b5 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -87,15 +87,12 @@ v3d_irq(int irq, void *arg)
}
if (intsts & V3D_INT_FLDONE) {
- v3d->queue[V3D_BIN].finished_seqno++;
dma_fence_signal(v3d->bin_job->bin.done_fence);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FRDONE) {
- v3d->queue[V3D_RENDER].finished_seqno++;
dma_fence_signal(v3d->render_job->render.done_fence);
-
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index fc13282dfc2f..854046565989 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -222,6 +222,7 @@
#define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n)
#define V3D_CLE_CT0RA 0x00118
#define V3D_CLE_CT1RA 0x0011c
+#define V3D_CLE_CTNRA(n) (V3D_CLE_CT0RA + 4 * n)
#define V3D_CLE_CT0LC 0x00120
#define V3D_CLE_CT1LC 0x00124
#define V3D_CLE_CT0PC 0x00128
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index b07bece9417d..a5501581d96b 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -14,8 +14,8 @@
* to the HW only when it has completed the last one, instead of
* filling up the CT[01]Q FIFOs with jobs. Similarly, we use
* v3d_job_dependency() to manage the dependency between bin and
- * render, instead of having the clients submit jobs with using the
- * HW's semaphores to interlock between them.
+ * render, instead of having the clients submit jobs using the HW's
+ * semaphores to interlock between them.
*/
#include <linux/kthread.h>
@@ -114,8 +114,8 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
v3d_invalidate_caches(v3d);
fence = v3d_fence_create(v3d, q);
- if (!fence)
- return fence;
+ if (IS_ERR(fence))
+ return NULL;
if (job->done_fence)
dma_fence_put(job->done_fence);
@@ -153,7 +153,25 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
struct v3d_job *job = to_v3d_job(sched_job);
struct v3d_exec_info *exec = job->exec;
struct v3d_dev *v3d = exec->v3d;
+ enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
enum v3d_queue q;
+ u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
+ u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
+
+ /* If the current address or return address have changed, then
+ * the GPU has probably made progress and we should delay the
+ * reset. This could fail if the GPU got in an infinite loop
+ * in the CL, but that is pretty unlikely outside of an i-g-t
+ * testcase.
+ */
+ if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
+ job->timedout_ctca = ctca;
+ job->timedout_ctra = ctra;
+
+ schedule_delayed_work(&job->base.work_tdr,
+ job->base.sched->timeout);
+ return;
+ }
mutex_lock(&v3d->reset_lock);
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 4a3a868235f8..b303703bc7f3 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -19,6 +19,7 @@ vc4-y := \
vc4_plane.o \
vc4_render_cl.o \
vc4_trace_points.o \
+ vc4_txp.o \
vc4_v3d.o \
vc4_validate.o \
vc4_validate_shaders.o
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index add9cc97a3b6..8dcce7182bb7 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -721,7 +721,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
return dmabuf;
}
-int vc4_fault(struct vm_fault *vmf)
+vm_fault_t vc4_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index c8650bbcbcb3..0e6a121858d1 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -46,6 +46,8 @@ struct vc4_crtc_state {
struct drm_crtc_state base;
/* Dlist area for this CRTC configuration. */
struct drm_mm_node mm;
+ bool feed_txp;
+ bool txp_armed;
};
static inline struct vc4_crtc_state *
@@ -324,10 +326,8 @@ static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc)
return NULL;
}
-static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void vc4_crtc_config_pv(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
@@ -338,12 +338,6 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
- bool debug_dump_regs = false;
-
- if (debug_dump_regs) {
- DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
- vc4_crtc_dump_regs(vc4_crtc);
- }
/* Reset the PV fifo. */
CRTC_WRITE(PV_CONTROL, 0);
@@ -419,6 +413,49 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
PV_CONTROL_CLK_SELECT) |
PV_CONTROL_FIFO_CLR |
PV_CONTROL_EN);
+}
+
+static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ bool debug_dump_regs = false;
+
+ if (debug_dump_regs) {
+ DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
+ vc4_crtc_dump_regs(vc4_crtc);
+ }
+
+ if (vc4_crtc->channel == 2) {
+ u32 dispctrl;
+ u32 dsp3_mux;
+
+ /*
+ * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
+ * FIFO X'.
+ * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
+ *
+ * DSP3 is connected to FIFO2 unless the transposer is
+ * enabled. In this case, FIFO 2 is directly accessed by the
+ * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
+ * route.
+ */
+ if (vc4_state->feed_txp)
+ dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
+ else
+ dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
+
+ dispctrl = HVS_READ(SCALER_DISPCTRL) &
+ ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+ HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
+ }
+
+ if (!vc4_state->feed_txp)
+ vc4_crtc_config_pv(crtc);
HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
SCALER_DISPBKGND_AUTOHS |
@@ -499,6 +536,13 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
}
}
+void vc4_crtc_txp_armed(struct drm_crtc_state *state)
+{
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+
+ vc4_state->txp_armed = true;
+}
+
static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -514,8 +558,11 @@ static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&dev->event_lock, flags);
- vc4_crtc->event = crtc->state->event;
- crtc->state->event = NULL;
+
+ if (!vc4_state->feed_txp || vc4_state->txp_armed) {
+ vc4_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ }
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
vc4_state->mm.start);
@@ -533,8 +580,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- struct drm_crtc_state *state = crtc->state;
- struct drm_display_mode *mode = &state->adjusted_mode;
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
require_hvs_enabled(dev);
@@ -546,15 +593,21 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
/* Turn on the scaler, which will wait for vstart to start
* compositing.
+ * When feeding the transposer, we should operate in oneshot
+ * mode.
*/
HVS_WRITE(SCALER_DISPCTRLX(vc4_crtc->channel),
VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) |
VC4_SET_FIELD(mode->vdisplay, SCALER_DISPCTRLX_HEIGHT) |
- SCALER_DISPCTRLX_ENABLE);
+ SCALER_DISPCTRLX_ENABLE |
+ (vc4_state->feed_txp ? SCALER_DISPCTRLX_ONESHOT : 0));
- /* Turn on the pixel valve, which will emit the vstart signal. */
- CRTC_WRITE(PV_V_CONTROL,
- CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
+ /* When feeding the transposer block the pixelvalve is unneeded and
+ * should not be enabled.
+ */
+ if (!vc4_state->feed_txp)
+ CRTC_WRITE(PV_V_CONTROL,
+ CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
}
static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -579,8 +632,10 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_plane *plane;
unsigned long flags;
const struct drm_plane_state *plane_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
u32 dlist_count = 0;
- int ret;
+ int ret, i;
/* The pixelvalve can only feed one encoder (and encoders are
* 1:1 with connectors.)
@@ -600,6 +655,24 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
if (ret)
return ret;
+ for_each_new_connector_in_state(state->state, conn, conn_state, i) {
+ if (conn_state->crtc != crtc)
+ continue;
+
+ /* The writeback connector is implemented using the transposer
+ * block which is directly taking its data from the HVS FIFO.
+ */
+ if (conn->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) {
+ state->no_vblank = true;
+ vc4_state->feed_txp = true;
+ } else {
+ state->no_vblank = false;
+ vc4_state->feed_txp = false;
+ }
+
+ break;
+ }
+
return 0;
}
@@ -713,7 +786,8 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
spin_lock_irqsave(&dev->event_lock, flags);
if (vc4_crtc->event &&
- (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
+ (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)) ||
+ vc4_state->feed_txp)) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
drm_crtc_vblank_put(crtc);
@@ -721,6 +795,13 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+void vc4_crtc_handle_vblank(struct vc4_crtc *crtc)
+{
+ crtc->t_vblank = ktime_get();
+ drm_crtc_handle_vblank(&crtc->base);
+ vc4_crtc_handle_page_flip(crtc);
+}
+
static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
{
struct vc4_crtc *vc4_crtc = data;
@@ -728,10 +809,8 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
irqreturn_t ret = IRQ_NONE;
if (stat & PV_INT_VFP_START) {
- vc4_crtc->t_vblank = ktime_get();
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
- drm_crtc_handle_vblank(&vc4_crtc->base);
- vc4_crtc_handle_page_flip(vc4_crtc);
+ vc4_crtc_handle_vblank(vc4_crtc);
ret = IRQ_HANDLED;
}
@@ -862,7 +941,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
* is released.
*/
drm_atomic_set_fb_for_plane(plane->state, fb);
- plane->fb = fb;
vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
vc4_async_page_flip_complete);
@@ -885,12 +963,15 @@ static int vc4_page_flip(struct drm_crtc *crtc,
static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
{
- struct vc4_crtc_state *vc4_state;
+ struct vc4_crtc_state *vc4_state, *old_vc4_state;
vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
if (!vc4_state)
return NULL;
+ old_vc4_state = to_vc4_crtc_state(crtc->state);
+ vc4_state->feed_txp = old_vc4_state->feed_txp;
+
__drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base);
return &vc4_state->base;
}
@@ -988,9 +1069,17 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, drm) {
- struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+ struct vc4_encoder *vc4_encoder;
int i;
+ /* HVS FIFO2 can feed the TXP IP. */
+ if (crtc_data->hvs_channel == 2 &&
+ encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
+ encoder->possible_crtcs |= drm_crtc_mask(crtc);
+ continue;
+ }
+
+ vc4_encoder = to_vc4_encoder(encoder);
for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) {
if (vc4_encoder->type == encoder_types[i]) {
vc4_encoder->clock_select = i;
@@ -1057,7 +1146,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
&vc4_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
- primary_plane->crtc = crtc;
vc4_crtc->channel = vc4_crtc->data->hvs_channel;
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
@@ -1083,7 +1171,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(plane))
continue;
- plane->possible_crtcs = 1 << drm_crtc_index(crtc);
+ plane->possible_crtcs = drm_crtc_mask(crtc);
}
/* Set up the legacy cursor after overlay initialization,
@@ -1092,8 +1180,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
*/
cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
if (!IS_ERR(cursor_plane)) {
- cursor_plane->possible_crtcs = 1 << drm_crtc_index(crtc);
- cursor_plane->crtc = crtc;
+ cursor_plane->possible_crtcs = drm_crtc_mask(crtc);
crtc->cursor = cursor_plane;
}
@@ -1121,7 +1208,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
err_destroy_planes:
list_for_each_entry_safe(destroy_plane, temp,
&drm->mode_config.plane_list, head) {
- if (destroy_plane->possible_crtcs == 1 << drm_crtc_index(crtc))
+ if (destroy_plane->possible_crtcs == drm_crtc_mask(crtc))
destroy_plane->funcs->destroy(destroy_plane);
}
err:
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 5db06bdb5f27..7a0003de71ab 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -21,6 +21,7 @@ static const struct drm_info_list vc4_debugfs_list[] = {
{"dsi1_regs", vc4_dsi_debugfs_regs, 0, (void *)(uintptr_t)1},
{"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
{"vec_regs", vc4_vec_debugfs_regs, 0},
+ {"txp_regs", vc4_txp_debugfs_regs, 0},
{"hvs_regs", vc4_hvs_debugfs_regs, 0},
{"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
{"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 466d0a27b415..04270a14fcaa 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -288,7 +288,7 @@ static int vc4_drm_bind(struct device *dev)
ret = vc4_bo_cache_init(drm);
if (ret)
- goto dev_unref;
+ goto dev_put;
drm_mode_config_init(drm);
@@ -313,8 +313,8 @@ unbind_all:
gem_destroy:
vc4_gem_destroy(drm);
vc4_bo_cache_destroy(drm);
-dev_unref:
- drm_dev_unref(drm);
+dev_put:
+ drm_dev_put(drm);
return ret;
}
@@ -331,7 +331,7 @@ static void vc4_drm_unbind(struct device *dev)
drm_atomic_private_obj_fini(&vc4->ctm_manager);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops vc4_drm_ops = {
@@ -344,6 +344,7 @@ static struct platform_driver *const component_drivers[] = {
&vc4_vec_driver,
&vc4_dpi_driver,
&vc4_dsi_driver,
+ &vc4_txp_driver,
&vc4_hvs_driver,
&vc4_crtc_driver,
&vc4_v3d_driver,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 554a4e810d5b..bd6ef1f31822 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -6,6 +6,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/mm_types.h>
#include <linux/reservation.h>
#include <drm/drmP.h>
#include <drm/drm_encoder.h>
@@ -72,6 +73,7 @@ struct vc4_dev {
struct vc4_dpi *dpi;
struct vc4_dsi *dsi1;
struct vc4_vec *vec;
+ struct vc4_txp *txp;
struct vc4_hang_state *hang_state;
@@ -674,7 +676,7 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vc4_fault(struct vm_fault *vmf);
+vm_fault_t vc4_fault(struct vm_fault *vmf);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
@@ -697,6 +699,8 @@ bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
+void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
+void vc4_crtc_txp_armed(struct drm_crtc_state *state);
/* vc4_debugfs.c */
int vc4_debugfs_init(struct drm_minor *minor);
@@ -744,6 +748,10 @@ int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
extern struct platform_driver vc4_vec_driver;
int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
+/* vc4_txp.c */
+extern struct platform_driver vc4_txp_driver;
+int vc4_txp_debugfs_regs(struct seq_file *m, void *unused);
+
/* vc4_irq.c */
irqreturn_t vc4_irq(int irq, void *arg);
void vc4_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 8aa897835118..0c607eb33d7e 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -814,7 +814,9 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
struct vc4_dsi *dsi = vc4_encoder->dsi;
struct device *dev = &dsi->pdev->dev;
+ drm_bridge_disable(dsi->bridge);
vc4_dsi_ulps(dsi, true);
+ drm_bridge_post_disable(dsi->bridge);
clk_disable_unprepare(dsi->pll_phy_clock);
clk_disable_unprepare(dsi->escape_clock);
@@ -1089,21 +1091,6 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
/* Display reset sequence timeout */
DSI_PORT_WRITE(PR_TO_CNT, 100000);
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- DSI_PORT_WRITE(DISP0_CTRL,
- VC4_SET_FIELD(dsi->divider,
- DSI_DISP0_PIX_CLK_DIV) |
- VC4_SET_FIELD(dsi->format, DSI_DISP0_PFORMAT) |
- VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
- DSI_DISP0_LP_STOP_CTRL) |
- DSI_DISP0_ST_END |
- DSI_DISP0_ENABLE);
- } else {
- DSI_PORT_WRITE(DISP0_CTRL,
- DSI_DISP0_COMMAND_MODE |
- DSI_DISP0_ENABLE);
- }
-
/* Set up DISP1 for transferring long command payloads through
* the pixfifo.
*/
@@ -1128,6 +1115,25 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
vc4_dsi_ulps(dsi, false);
+ drm_bridge_pre_enable(dsi->bridge);
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ DSI_PORT_WRITE(DISP0_CTRL,
+ VC4_SET_FIELD(dsi->divider,
+ DSI_DISP0_PIX_CLK_DIV) |
+ VC4_SET_FIELD(dsi->format, DSI_DISP0_PFORMAT) |
+ VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
+ DSI_DISP0_LP_STOP_CTRL) |
+ DSI_DISP0_ST_END |
+ DSI_DISP0_ENABLE);
+ } else {
+ DSI_PORT_WRITE(DISP0_CTRL,
+ DSI_DISP0_COMMAND_MODE |
+ DSI_DISP0_ENABLE);
+ }
+
+ drm_bridge_enable(dsi->bridge);
+
if (debug_dump_regs) {
DRM_INFO("DSI regs after:\n");
vc4_dsi_dump_regs(dsi);
@@ -1606,8 +1612,18 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
&panel, &dsi->bridge);
- if (ret)
+ if (ret) {
+ /* If the bridge or panel pointed by dev->of_node is not
+ * enabled, just return 0 here so that we don't prevent the DRM
+ * dev from being registered. Of course that means the DSI
+ * encoder won't be exposed, but that's not a problem since
+ * nothing is connected to it.
+ */
+ if (ret == -ENODEV)
+ return 0;
+
return ret;
+ }
if (panel) {
dsi->bridge = devm_drm_panel_bridge_add(dev, panel,
@@ -1639,6 +1655,12 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
dev_err(dev, "bridge attach failed: %d\n", ret);
return ret;
}
+ /* Disable the atomic helper calls into the bridge. We
+ * manually call the bridge pre_enable / enable / etc. calls
+ * from our driver, since we need to sequence them within the
+ * encoder's enable/disable paths.
+ */
+ dsi->encoder->bridge = NULL;
pm_runtime_enable(dev);
@@ -1652,7 +1674,8 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
- pm_runtime_disable(dev);
+ if (dsi->bridge)
+ pm_runtime_disable(dev);
vc4_dsi_encoder_destroy(dsi->encoder);
diff --git a/drivers/gpu/drm/vc4/vc4_fence.c b/drivers/gpu/drm/vc4/vc4_fence.c
index dbf5a5a5d5f5..580214e2158c 100644
--- a/drivers/gpu/drm/vc4/vc4_fence.c
+++ b/drivers/gpu/drm/vc4/vc4_fence.c
@@ -33,11 +33,6 @@ static const char *vc4_fence_get_timeline_name(struct dma_fence *fence)
return "vc4-v3d";
}
-static bool vc4_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static bool vc4_fence_signaled(struct dma_fence *fence)
{
struct vc4_fence *f = to_vc4_fence(fence);
@@ -49,8 +44,5 @@ static bool vc4_fence_signaled(struct dma_fence *fence)
const struct dma_fence_ops vc4_fence_ops = {
.get_driver_name = vc4_fence_get_driver_name,
.get_timeline_name = vc4_fence_get_timeline_name,
- .enable_signaling = vc4_fence_enable_signaling,
.signaled = vc4_fence_signaled,
- .wait = dma_fence_default_wait,
- .release = dma_fence_free,
};
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index b8d50533e2bb..fd5522fd179e 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -285,7 +285,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
drm_rgb_quant_range_selectable(edid);
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -329,7 +329,7 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return connector;
}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 8a411e5f8776..ca5aa7fba769 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -153,18 +153,11 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
- /* Make sure that drm_atomic_helper_wait_for_vblanks()
- * actually waits for vblank. If we're doing a full atomic
- * modeset (as opposed to a vc4_update_plane() short circuit),
- * then we need to wait for scanout to be done with our
- * display lists before we free it and potentially reallocate
- * and overwrite the dlist memory with a new modeset.
- */
- state->legacy_cursor_update = false;
+ drm_atomic_helper_fake_vblank(state);
drm_atomic_helper_commit_hw_done(state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_wait_for_flip_done(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 1d34619eb3fe..cfb50fedfa2b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -320,6 +320,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+ } else {
+ vc4_state->x_scaling[1] = VC4_SCALING_NONE;
+ vc4_state->y_scaling[1] = VC4_SCALING_NONE;
}
vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
@@ -467,12 +470,14 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
struct drm_framebuffer *fb = state->fb;
u32 ctl0_offset = vc4_state->dlist_count;
const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
+ u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
int num_planes = drm_format_num_planes(format->drm);
bool mix_plane_alpha;
bool covers_screen;
u32 scl0, scl1, pitch0;
u32 lbm_size, tiling;
unsigned long irqflags;
+ u32 hvs_format = format->hvs;
int ret, i;
ret = vc4_plane_setup_clipping_and_scaling(state);
@@ -512,7 +517,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
scl1 = vc4_get_scl_field(state, 0);
}
- switch (fb->modifier) {
+ switch (base_format_mod) {
case DRM_FORMAT_MOD_LINEAR:
tiling = SCALER_CTL0_TILING_LINEAR;
pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
@@ -535,6 +540,49 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
break;
}
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ case DRM_FORMAT_MOD_BROADCOM_SAND256: {
+ uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
+
+ /* Column-based NV12 or RGBA.
+ */
+ if (fb->format->num_planes > 1) {
+ if (hvs_format != HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE) {
+ DRM_DEBUG_KMS("SAND format only valid for NV12/21");
+ return -EINVAL;
+ }
+ hvs_format = HVS_PIXEL_FORMAT_H264;
+ } else {
+ if (base_format_mod == DRM_FORMAT_MOD_BROADCOM_SAND256) {
+ DRM_DEBUG_KMS("SAND256 format only valid for H.264");
+ return -EINVAL;
+ }
+ }
+
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ tiling = SCALER_CTL0_TILING_64B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tiling = SCALER_CTL0_TILING_128B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tiling = SCALER_CTL0_TILING_256B_OR_T;
+ break;
+ default:
+ break;
+ }
+
+ if (param > SCALER_TILE_HEIGHT_MASK) {
+ DRM_DEBUG_KMS("SAND height too large (%d)\n", param);
+ return -EINVAL;
+ }
+
+ pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
+ break;
+ }
+
default:
DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
(long long)fb->modifier);
@@ -544,8 +592,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
+ VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
(format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
- (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
+ (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
(vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
@@ -607,8 +656,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Pitch word 1/2 */
for (i = 1; i < num_planes; i++) {
- vc4_dlist_write(vc4_state,
- VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH));
+ if (hvs_format != HVS_PIXEL_FORMAT_H264) {
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(fb->pitches[i],
+ SCALER_SRC_PITCH));
+ } else {
+ vc4_dlist_write(vc4_state, pitch0);
+ }
}
/* Colorspace conversion words */
@@ -810,18 +864,21 @@ static int vc4_prepare_fb(struct drm_plane *plane,
struct dma_fence *fence;
int ret;
- if ((plane->state->fb == state->fb) || !state->fb)
+ if (!state->fb)
return 0;
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ fence = reservation_object_get_excl_rcu(bo->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+
+ if (plane->state->fb == state->fb)
+ return 0;
+
ret = vc4_bo_inc_usecnt(bo);
if (ret)
return ret;
- fence = reservation_object_get_excl_rcu(bo->resv);
- drm_atomic_set_fence_for_plane(state, fence);
-
return 0;
}
@@ -848,7 +905,7 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
static void vc4_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
@@ -866,13 +923,32 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
case DRM_FORMAT_BGR565:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
- return true;
+ switch (fourcc_mod_broadcom_mod(modifier)) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ return true;
+ default:
+ return false;
+ }
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ switch (fourcc_mod_broadcom_mod(modifier)) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ return true;
+ default:
+ return false;
+ }
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
- case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
default:
return (modifier == DRM_FORMAT_MOD_LINEAR);
}
@@ -900,6 +976,9 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
unsigned i;
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
+ DRM_FORMAT_MOD_BROADCOM_SAND128,
+ DRM_FORMAT_MOD_BROADCOM_SAND64,
+ DRM_FORMAT_MOD_BROADCOM_SAND256,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index d1fb6fec46eb..d6864fa4bd14 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -1031,6 +1031,12 @@ enum hvs_pixel_format {
#define SCALER_SRC_PITCH_MASK VC4_MASK(15, 0)
#define SCALER_SRC_PITCH_SHIFT 0
+/* PITCH0/1/2 fields for tiled (SAND). */
+#define SCALER_TILE_SKIP_0_MASK VC4_MASK(18, 16)
+#define SCALER_TILE_SKIP_0_SHIFT 16
+#define SCALER_TILE_HEIGHT_MASK VC4_MASK(15, 0)
+#define SCALER_TILE_HEIGHT_SHIFT 0
+
/* PITCH0 fields for T-tiled. */
#define SCALER_PITCH0_TILE_WIDTH_L_MASK VC4_MASK(22, 16)
#define SCALER_PITCH0_TILE_WIDTH_L_SHIFT 16
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
new file mode 100644
index 000000000000..6e23c50168f9
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2018 Broadcom
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_writeback.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+/* Base address of the output. Raster formats must be 4-byte aligned,
+ * T and LT must be 16-byte aligned or maybe utile-aligned (docs are
+ * inconsistent, but probably utile).
+ */
+#define TXP_DST_PTR 0x00
+
+/* Pitch in bytes for raster images, 16-byte aligned. For tiled, it's
+ * the width in tiles.
+ */
+#define TXP_DST_PITCH 0x04
+/* For T-tiled imgaes, DST_PITCH should be the number of tiles wide,
+ * shifted up.
+ */
+# define TXP_T_TILE_WIDTH_SHIFT 7
+/* For LT-tiled images, DST_PITCH should be the number of utiles wide,
+ * shifted up.
+ */
+# define TXP_LT_TILE_WIDTH_SHIFT 4
+
+/* Pre-rotation width/height of the image. Must match HVS config.
+ *
+ * If TFORMAT and 32-bit, limit is 1920 for 32-bit and 3840 to 16-bit
+ * and width/height must be tile or utile-aligned as appropriate. If
+ * transposing (rotating), width is limited to 1920.
+ *
+ * Height is limited to various numbers between 4088 and 4095. I'd
+ * just use 4088 to be safe.
+ */
+#define TXP_DIM 0x08
+# define TXP_HEIGHT_SHIFT 16
+# define TXP_HEIGHT_MASK GENMASK(31, 16)
+# define TXP_WIDTH_SHIFT 0
+# define TXP_WIDTH_MASK GENMASK(15, 0)
+
+#define TXP_DST_CTRL 0x0c
+/* These bits are set to 0x54 */
+#define TXP_PILOT_SHIFT 24
+#define TXP_PILOT_MASK GENMASK(31, 24)
+/* Bits 22-23 are set to 0x01 */
+#define TXP_VERSION_SHIFT 22
+#define TXP_VERSION_MASK GENMASK(23, 22)
+
+/* Powers down the internal memory. */
+# define TXP_POWERDOWN BIT(21)
+
+/* Enables storing the alpha component in 8888/4444, instead of
+ * filling with ~ALPHA_INVERT.
+ */
+# define TXP_ALPHA_ENABLE BIT(20)
+
+/* 4 bits, each enables stores for a channel in each set of 4 bytes.
+ * Set to 0xf for normal operation.
+ */
+# define TXP_BYTE_ENABLE_SHIFT 16
+# define TXP_BYTE_ENABLE_MASK GENMASK(19, 16)
+
+/* Debug: Generate VSTART again at EOF. */
+# define TXP_VSTART_AT_EOF BIT(15)
+
+/* Debug: Terminate the current frame immediately. Stops AXI
+ * writes.
+ */
+# define TXP_ABORT BIT(14)
+
+# define TXP_DITHER BIT(13)
+
+/* Inverts alpha if TXP_ALPHA_ENABLE, chooses fill value for
+ * !TXP_ALPHA_ENABLE.
+ */
+# define TXP_ALPHA_INVERT BIT(12)
+
+/* Note: I've listed the channels here in high bit (in byte 3/2/1) to
+ * low bit (in byte 0) order.
+ */
+# define TXP_FORMAT_SHIFT 8
+# define TXP_FORMAT_MASK GENMASK(11, 8)
+# define TXP_FORMAT_ABGR4444 0
+# define TXP_FORMAT_ARGB4444 1
+# define TXP_FORMAT_BGRA4444 2
+# define TXP_FORMAT_RGBA4444 3
+# define TXP_FORMAT_BGR565 6
+# define TXP_FORMAT_RGB565 7
+/* 888s are non-rotated, raster-only */
+# define TXP_FORMAT_BGR888 8
+# define TXP_FORMAT_RGB888 9
+# define TXP_FORMAT_ABGR8888 12
+# define TXP_FORMAT_ARGB8888 13
+# define TXP_FORMAT_BGRA8888 14
+# define TXP_FORMAT_RGBA8888 15
+
+/* If TFORMAT is set, generates LT instead of T format. */
+# define TXP_LINEAR_UTILE BIT(7)
+
+/* Rotate output by 90 degrees. */
+# define TXP_TRANSPOSE BIT(6)
+
+/* Generate a tiled format for V3D. */
+# define TXP_TFORMAT BIT(5)
+
+/* Generates some undefined test mode output. */
+# define TXP_TEST_MODE BIT(4)
+
+/* Request odd field from HVS. */
+# define TXP_FIELD BIT(3)
+
+/* Raise interrupt when idle. */
+# define TXP_EI BIT(2)
+
+/* Set when generating a frame, clears when idle. */
+# define TXP_BUSY BIT(1)
+
+/* Starts a frame. Self-clearing. */
+# define TXP_GO BIT(0)
+
+/* Number of lines received and committed to memory. */
+#define TXP_PROGRESS 0x10
+
+#define TXP_READ(offset) readl(txp->regs + (offset))
+#define TXP_WRITE(offset, val) writel(val, txp->regs + (offset))
+
+struct vc4_txp {
+ struct platform_device *pdev;
+
+ struct drm_writeback_connector connector;
+
+ void __iomem *regs;
+};
+
+static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vc4_txp, connector.encoder);
+}
+
+static inline struct vc4_txp *connector_to_vc4_txp(struct drm_connector *conn)
+{
+ return container_of(conn, struct vc4_txp, connector.base);
+}
+
+#define TXP_REG(reg) { reg, #reg }
+static const struct {
+ u32 reg;
+ const char *name;
+} txp_regs[] = {
+ TXP_REG(TXP_DST_PTR),
+ TXP_REG(TXP_DST_PITCH),
+ TXP_REG(TXP_DIM),
+ TXP_REG(TXP_DST_CTRL),
+ TXP_REG(TXP_PROGRESS),
+};
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_txp_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_txp *txp = vc4->txp;
+ int i;
+
+ if (!txp)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(txp_regs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ txp_regs[i].name, txp_regs[i].reg,
+ TXP_READ(txp_regs[i].reg));
+ }
+
+ return 0;
+}
+#endif
+
+static int vc4_txp_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static enum drm_mode_status
+vc4_txp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ int w = mode->hdisplay, h = mode->vdisplay;
+
+ if (w < mode_config->min_width || w > mode_config->max_width)
+ return MODE_BAD_HVALUE;
+
+ if (h < mode_config->min_height || h > mode_config->max_height)
+ return MODE_BAD_VVALUE;
+
+ return MODE_OK;
+}
+
+static const u32 drm_fmts[] = {
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+};
+
+static const u32 txp_fmts[] = {
+ TXP_FORMAT_RGB888,
+ TXP_FORMAT_BGR888,
+ TXP_FORMAT_ARGB8888,
+ TXP_FORMAT_ABGR8888,
+ TXP_FORMAT_ARGB8888,
+ TXP_FORMAT_ABGR8888,
+ TXP_FORMAT_RGBA8888,
+ TXP_FORMAT_BGRA8888,
+ TXP_FORMAT_RGBA8888,
+ TXP_FORMAT_BGRA8888,
+};
+
+static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_gem_cma_object *gem;
+ struct drm_framebuffer *fb;
+ int i;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
+ conn_state->crtc);
+
+ fb = conn_state->writeback_job->fb;
+ if (fb->width != crtc_state->mode.hdisplay ||
+ fb->height != crtc_state->mode.vdisplay) {
+ DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
+ fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) {
+ if (fb->format->format == drm_fmts[i])
+ break;
+ }
+
+ if (i == ARRAY_SIZE(drm_fmts))
+ return -EINVAL;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ /* Pitch must be aligned on 16 bytes. */
+ if (fb->pitches[0] & GENMASK(3, 0))
+ return -EINVAL;
+
+ vc4_crtc_txp_armed(crtc_state);
+
+ return 0;
+}
+
+static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
+ struct drm_connector_state *conn_state)
+{
+ struct vc4_txp *txp = connector_to_vc4_txp(conn);
+ struct drm_gem_cma_object *gem;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+ u32 ctrl;
+ int i;
+
+ if (WARN_ON(!conn_state->writeback_job ||
+ !conn_state->writeback_job->fb))
+ return;
+
+ mode = &conn_state->crtc->state->adjusted_mode;
+ fb = conn_state->writeback_job->fb;
+
+ for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) {
+ if (fb->format->format == drm_fmts[i])
+ break;
+ }
+
+ if (WARN_ON(i == ARRAY_SIZE(drm_fmts)))
+ return;
+
+ ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI |
+ VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
+ VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
+
+ if (fb->format->has_alpha)
+ ctrl |= TXP_ALPHA_ENABLE;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
+ TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
+ TXP_WRITE(TXP_DIM,
+ VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
+ VC4_SET_FIELD(mode->vdisplay, TXP_HEIGHT));
+
+ TXP_WRITE(TXP_DST_CTRL, ctrl);
+
+ drm_writeback_queue_job(&txp->connector, conn_state->writeback_job);
+}
+
+static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
+ .get_modes = vc4_txp_connector_get_modes,
+ .mode_valid = vc4_txp_connector_mode_valid,
+ .atomic_check = vc4_txp_connector_atomic_check,
+ .atomic_commit = vc4_txp_connector_atomic_commit,
+};
+
+static enum drm_connector_status
+vc4_txp_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void vc4_txp_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs vc4_txp_connector_funcs = {
+ .detect = vc4_txp_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = vc4_txp_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
+{
+ struct vc4_txp *txp = encoder_to_vc4_txp(encoder);
+
+ if (TXP_READ(TXP_DST_CTRL) & TXP_BUSY) {
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ TXP_WRITE(TXP_DST_CTRL, TXP_ABORT);
+
+ while (TXP_READ(TXP_DST_CTRL) & TXP_BUSY &&
+ time_before(jiffies, timeout))
+ ;
+
+ WARN_ON(TXP_READ(TXP_DST_CTRL) & TXP_BUSY);
+ }
+
+ TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
+}
+
+static const struct drm_encoder_helper_funcs vc4_txp_encoder_helper_funcs = {
+ .disable = vc4_txp_encoder_disable,
+};
+
+static irqreturn_t vc4_txp_interrupt(int irq, void *data)
+{
+ struct vc4_txp *txp = data;
+
+ TXP_WRITE(TXP_DST_CTRL, TXP_READ(TXP_DST_CTRL) & ~TXP_EI);
+ vc4_crtc_handle_vblank(to_vc4_crtc(txp->connector.base.state->crtc));
+ drm_writeback_signal_completion(&txp->connector, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_txp *txp;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ txp = devm_kzalloc(dev, sizeof(*txp), GFP_KERNEL);
+ if (!txp)
+ return -ENOMEM;
+
+ txp->pdev = pdev;
+
+ txp->regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(txp->regs))
+ return PTR_ERR(txp->regs);
+
+ drm_connector_helper_add(&txp->connector.base,
+ &vc4_txp_connector_helper_funcs);
+ ret = drm_writeback_connector_init(drm, &txp->connector,
+ &vc4_txp_connector_funcs,
+ &vc4_txp_encoder_helper_funcs,
+ drm_fmts, ARRAY_SIZE(drm_fmts));
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0,
+ dev_name(dev), txp);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, txp);
+ vc4->txp = txp;
+
+ return 0;
+}
+
+static void vc4_txp_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_txp *txp = dev_get_drvdata(dev);
+
+ vc4_txp_connector_destroy(&txp->connector.base);
+
+ vc4->txp = NULL;
+}
+
+static const struct component_ops vc4_txp_ops = {
+ .bind = vc4_txp_bind,
+ .unbind = vc4_txp_unbind,
+};
+
+static int vc4_txp_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vc4_txp_ops);
+}
+
+static int vc4_txp_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vc4_txp_ops);
+ return 0;
+}
+
+static const struct of_device_id vc4_txp_dt_match[] = {
+ { .compatible = "brcm,bcm2835-txp" },
+ { /* sentinel */ },
+};
+
+struct platform_driver vc4_txp_driver = {
+ .probe = vc4_txp_probe,
+ .remove = vc4_txp_remove,
+ .driver = {
+ .name = "vc4_txp",
+ .of_match_table = vc4_txp_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 3a9a302247a2..8e7facb6514e 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -404,7 +404,7 @@ static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
VC4_VEC_TV_MODE_NTSC);
vec->tv_mode = &vc4_vec_tv_modes[VC4_VEC_TV_MODE_NTSC];
- drm_mode_connector_attach_encoder(connector, vec->encoder);
+ drm_connector_attach_encoder(connector, vec->encoder);
return connector;
}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 2524ff116f00..0e5620f76ee0 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -61,23 +61,22 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
kfree(vgem_obj);
}
-static int vgem_gem_fault(struct vm_fault *vmf)
+static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
unsigned long vaddr = vmf->address;
- int ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
loff_t num_pages;
pgoff_t page_offset;
page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
- if (page_offset > num_pages)
+ if (page_offset >= num_pages)
return VM_FAULT_SIGBUS;
- ret = -ENOENT;
mutex_lock(&obj->pages_lock);
if (obj->pages) {
get_page(obj->pages[page_offset]);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index a5edd86603d9..25503b933599 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -28,6 +28,7 @@
#include "virtgpu_drv.h"
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#define XRES_MIN 32
#define YRES_MIN 32
@@ -48,16 +49,6 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
-static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct virtio_gpu_framebuffer *virtio_gpu_fb
- = to_virtio_gpu_framebuffer(fb);
-
- drm_gem_object_put_unlocked(virtio_gpu_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(virtio_gpu_fb);
-}
-
static int
virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
@@ -71,20 +62,9 @@ virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
}
-static int
-virtio_gpu_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct virtio_gpu_framebuffer *virtio_gpu_fb =
- to_virtio_gpu_framebuffer(fb);
-
- return drm_gem_handle_create(file_priv, virtio_gpu_fb->obj, handle);
-}
-
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
- .create_handle = virtio_gpu_framebuffer_create_handle,
- .destroy = virtio_gpu_user_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
.dirty = virtio_gpu_framebuffer_surface_dirty,
};
@@ -97,7 +77,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
int ret;
struct virtio_gpu_object *bo;
- vgfb->obj = obj;
+ vgfb->base.obj[0] = obj;
bo = gem_to_virtio_gpu_obj(obj);
@@ -105,7 +85,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
- vgfb->obj = NULL;
+ vgfb->base.obj[0] = NULL;
return ret;
}
@@ -302,8 +282,6 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
drm_crtc_init_with_planes(dev, crtc, primary, cursor,
&virtio_gpu_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
- primary->crtc = crtc;
- cursor->crtc = crtc;
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -314,7 +292,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
encoder->possible_crtcs = 1 << index;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
drm_connector_register(connector);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d25c8ca224aa..65605e207bbe 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -124,7 +124,6 @@ struct virtio_gpu_output {
struct virtio_gpu_framebuffer {
struct drm_framebuffer base;
- struct drm_gem_object *obj;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
uint32_t hw_res_handle;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 8af69ab58b89..a121b1c79522 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -46,7 +46,7 @@ static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
int bpp = fb->base.format->cpp[0];
int x2, y2;
unsigned long flags;
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
if ((width <= 0) ||
(x + width > fb->base.width) ||
@@ -121,7 +121,7 @@ int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
unsigned int num_clips)
{
struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
struct drm_clip_rect norect;
struct drm_clip_rect *clips_ptr;
int left, right, top, bottom;
@@ -305,8 +305,8 @@ static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_unregister_fbi(&vgfbdev->helper);
- if (vgfb->obj)
- vgfb->obj = NULL;
+ if (vgfb->base.obj[0])
+ vgfb->base.obj[0] = NULL;
drm_fb_helper_fini(&vgfbdev->helper);
drm_framebuffer_cleanup(&vgfb->base);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 23353521f903..00c742a441bf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -36,11 +36,6 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
return "controlq";
}
-static bool virtio_enable_signaling(struct dma_fence *f)
-{
- return true;
-}
-
static bool virtio_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
@@ -67,9 +62,7 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
static const struct dma_fence_ops virtio_fence_ops = {
.get_driver_name = virtio_get_driver_name,
.get_timeline_name = virtio_get_timeline_name,
- .enable_signaling = virtio_enable_signaling,
.signaled = virtio_signaled,
- .wait = dma_fence_default_wait,
.fence_value_str = virtio_fence_value_str,
.timeline_value_str = virtio_timeline_value_str,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 71ba455af915..dc5b5b2b7aab 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -154,7 +154,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
- bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
if (bo->dumb) {
virtio_gpu_cmd_transfer_to_host_2d
@@ -208,7 +208,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
- bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
} else {
handle = 0;
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
new file mode 100644
index 000000000000..986297da51bf
--- /dev/null
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -0,0 +1,3 @@
+vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o
+
+obj-$(CONFIG_DRM_VKMS) += vkms.o
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
new file mode 100644
index 000000000000..875fca662ac0
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "vkms_drv.h"
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+{
+ struct vkms_output *output = container_of(timer, struct vkms_output,
+ vblank_hrtimer);
+ struct drm_crtc *crtc = &output->crtc;
+ int ret_overrun;
+ bool ret;
+
+ ret = drm_crtc_handle_vblank(crtc);
+ if (!ret)
+ DRM_ERROR("vkms failure on handling vblank");
+
+ ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
+ output->period_ns);
+
+ return HRTIMER_RESTART;
+}
+
+static int vkms_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+
+ drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+ hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ out->vblank_hrtimer.function = &vkms_vblank_simulate;
+ out->period_ns = ktime_set(0, vblank->framedur_ns);
+ hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
+
+ return 0;
+}
+
+static void vkms_disable_vblank(struct drm_crtc *crtc)
+{
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+
+ hrtimer_cancel(&out->vblank_hrtimer);
+}
+
+bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+ struct vkms_output *output = &vkmsdev->output;
+
+ *vblank_time = output->vblank_hrtimer.node.expires;
+
+ return true;
+}
+
+static const struct drm_crtc_funcs vkms_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = vkms_enable_vblank,
+ .disable_vblank = vkms_disable_vblank,
+};
+
+static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ drm_crtc_vblank_on(crtc);
+}
+
+static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ drm_crtc_vblank_off(crtc);
+}
+
+static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ unsigned long flags;
+
+ if (crtc->state->event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ else
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ crtc->state->event = NULL;
+ }
+}
+
+static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
+ .atomic_flush = vkms_crtc_atomic_flush,
+ .atomic_enable = vkms_crtc_atomic_enable,
+ .atomic_disable = vkms_crtc_atomic_disable,
+};
+
+int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary, struct drm_plane *cursor)
+{
+ int ret;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
+ &vkms_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init CRTC\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
new file mode 100644
index 000000000000..6e728b825259
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -0,0 +1,156 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_helper.h>
+#include "vkms_drv.h"
+
+#define DRIVER_NAME "vkms"
+#define DRIVER_DESC "Virtual Kernel Mode Setting"
+#define DRIVER_DATE "20180514"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static struct vkms_device *vkms_device;
+
+static const struct file_operations vkms_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = drm_gem_mmap,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .release = drm_release,
+};
+
+static const struct vm_operations_struct vkms_gem_vm_ops = {
+ .fault = vkms_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static void vkms_release(struct drm_device *dev)
+{
+ struct vkms_device *vkms = container_of(dev, struct vkms_device, drm);
+
+ platform_device_unregister(vkms->platform);
+ drm_atomic_helper_shutdown(&vkms->drm);
+ drm_mode_config_cleanup(&vkms->drm);
+ drm_dev_fini(&vkms->drm);
+}
+
+static struct drm_driver vkms_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
+ .release = vkms_release,
+ .fops = &vkms_driver_fops,
+ .dumb_create = vkms_dumb_create,
+ .dumb_map_offset = vkms_dumb_map,
+ .gem_vm_ops = &vkms_gem_vm_ops,
+ .gem_free_object_unlocked = vkms_gem_free_object,
+ .get_vblank_timestamp = vkms_get_vblank_timestamp,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static const struct drm_mode_config_funcs vkms_mode_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int vkms_modeset_init(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.funcs = &vkms_mode_funcs;
+ dev->mode_config.min_width = XRES_MIN;
+ dev->mode_config.min_height = YRES_MIN;
+ dev->mode_config.max_width = XRES_MAX;
+ dev->mode_config.max_height = YRES_MAX;
+
+ return vkms_output_init(vkmsdev);
+}
+
+static int __init vkms_init(void)
+{
+ int ret;
+
+ vkms_device = kzalloc(sizeof(*vkms_device), GFP_KERNEL);
+ if (!vkms_device)
+ return -ENOMEM;
+
+ ret = drm_dev_init(&vkms_device->drm, &vkms_driver, NULL);
+ if (ret)
+ goto out_free;
+
+ vkms_device->platform =
+ platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+ if (IS_ERR(vkms_device->platform)) {
+ ret = PTR_ERR(vkms_device->platform);
+ goto out_fini;
+ }
+
+ vkms_device->drm.irq_enabled = true;
+
+ ret = drm_vblank_init(&vkms_device->drm, 1);
+ if (ret) {
+ DRM_ERROR("Failed to vblank\n");
+ goto out_fini;
+ }
+
+ ret = vkms_modeset_init(vkms_device);
+ if (ret)
+ goto out_unregister;
+
+ ret = drm_dev_register(&vkms_device->drm, 0);
+ if (ret)
+ goto out_unregister;
+
+ return 0;
+
+out_unregister:
+ platform_device_unregister(vkms_device->platform);
+
+out_fini:
+ drm_dev_fini(&vkms_device->drm);
+
+out_free:
+ kfree(vkms_device);
+ return ret;
+}
+
+static void __exit vkms_exit(void)
+{
+ if (!vkms_device) {
+ DRM_INFO("vkms_device is NULL.\n");
+ return;
+ }
+
+ drm_dev_unregister(&vkms_device->drm);
+ drm_dev_put(&vkms_device->drm);
+
+ kfree(vkms_device);
+}
+
+module_init(vkms_init);
+module_exit(vkms_exit);
+
+MODULE_AUTHOR("Haneen Mohammed <hamohammed.sa@gmail.com>");
+MODULE_AUTHOR("Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
new file mode 100644
index 000000000000..07be29f2dc44
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -0,0 +1,78 @@
+#ifndef _VKMS_DRV_H_
+#define _VKMS_DRV_H_
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_encoder.h>
+#include <linux/hrtimer.h>
+
+#define XRES_MIN 32
+#define YRES_MIN 32
+
+#define XRES_DEF 1024
+#define YRES_DEF 768
+
+#define XRES_MAX 8192
+#define YRES_MAX 8192
+
+static const u32 vkms_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+struct vkms_output {
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct hrtimer vblank_hrtimer;
+ ktime_t period_ns;
+ struct drm_pending_vblank_event *event;
+};
+
+struct vkms_device {
+ struct drm_device drm;
+ struct platform_device *platform;
+ struct vkms_output output;
+};
+
+struct vkms_gem_object {
+ struct drm_gem_object gem;
+ struct mutex pages_lock; /* Page lock used in page fault handler */
+ struct page **pages;
+};
+
+#define drm_crtc_to_vkms_output(target) \
+ container_of(target, struct vkms_output, crtc)
+
+#define drm_device_to_vkms_device(target) \
+ container_of(target, struct vkms_device, drm)
+
+/* CRTC */
+int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary, struct drm_plane *cursor);
+
+bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq);
+
+int vkms_output_init(struct vkms_device *vkmsdev);
+
+struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev);
+
+/* Gem stuff */
+struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ u32 *handle,
+ u64 size);
+
+int vkms_gem_fault(struct vm_fault *vmf);
+
+int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset);
+
+void vkms_gem_free_object(struct drm_gem_object *obj);
+
+#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
new file mode 100644
index 000000000000..c7e38368602b
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/shmem_fs.h>
+
+#include "vkms_drv.h"
+
+static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
+ u64 size)
+{
+ struct vkms_gem_object *obj;
+ int ret;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ size = roundup(size, PAGE_SIZE);
+ ret = drm_gem_object_init(dev, &obj->gem, size);
+ if (ret) {
+ kfree(obj);
+ return ERR_PTR(ret);
+ }
+
+ mutex_init(&obj->pages_lock);
+
+ return obj;
+}
+
+void vkms_gem_free_object(struct drm_gem_object *obj)
+{
+ struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
+ gem);
+
+ kvfree(gem->pages);
+ mutex_destroy(&gem->pages_lock);
+ drm_gem_object_release(obj);
+ kfree(gem);
+}
+
+int vkms_gem_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct vkms_gem_object *obj = vma->vm_private_data;
+ unsigned long vaddr = vmf->address;
+ pgoff_t page_offset;
+ loff_t num_pages;
+ int ret;
+
+ page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
+ num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
+
+ if (page_offset > num_pages)
+ return VM_FAULT_SIGBUS;
+
+ ret = -ENOENT;
+ mutex_lock(&obj->pages_lock);
+ if (obj->pages) {
+ get_page(obj->pages[page_offset]);
+ vmf->page = obj->pages[page_offset];
+ ret = 0;
+ }
+ mutex_unlock(&obj->pages_lock);
+ if (ret) {
+ struct page *page;
+ struct address_space *mapping;
+
+ mapping = file_inode(obj->gem.filp)->i_mapping;
+ page = shmem_read_mapping_page(mapping, page_offset);
+
+ if (!IS_ERR(page)) {
+ vmf->page = page;
+ ret = 0;
+ } else {
+ switch (PTR_ERR(page)) {
+ case -ENOSPC:
+ case -ENOMEM:
+ ret = VM_FAULT_OOM;
+ break;
+ case -EBUSY:
+ ret = VM_FAULT_RETRY;
+ break;
+ case -EFAULT:
+ case -EINVAL:
+ ret = VM_FAULT_SIGBUS;
+ break;
+ default:
+ WARN_ON(PTR_ERR(page));
+ ret = VM_FAULT_SIGBUS;
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ u32 *handle,
+ u64 size)
+{
+ struct vkms_gem_object *obj;
+ int ret;
+
+ if (!file || !dev || !handle)
+ return ERR_PTR(-EINVAL);
+
+ obj = __vkms_gem_create(dev, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ ret = drm_gem_handle_create(file, &obj->gem, handle);
+ drm_gem_object_put_unlocked(&obj->gem);
+ if (ret) {
+ drm_gem_object_release(&obj->gem);
+ kfree(obj);
+ return ERR_PTR(ret);
+ }
+
+ return &obj->gem;
+}
+
+int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_object *gem_obj;
+ u64 pitch, size;
+
+ if (!args || !dev || !file)
+ return -EINVAL;
+
+ pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+ size = pitch * args->height;
+
+ if (!size)
+ return -EINVAL;
+
+ gem_obj = vkms_gem_create(dev, file, &args->handle, size);
+ if (IS_ERR(gem_obj))
+ return PTR_ERR(gem_obj);
+
+ args->size = gem_obj->size;
+ args->pitch = pitch;
+
+ DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
+
+ return 0;
+}
+
+int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (!obj->filp) {
+ ret = -EINVAL;
+ goto unref;
+ }
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto unref;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+unref:
+ drm_gem_object_put_unlocked(obj);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
new file mode 100644
index 000000000000..901012cb1af1
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "vkms_drv.h"
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+
+static void vkms_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs vkms_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = vkms_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_encoder_funcs vkms_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int vkms_conn_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+
+ return count;
+}
+
+static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
+ .get_modes = vkms_conn_get_modes,
+};
+
+int vkms_output_init(struct vkms_device *vkmsdev)
+{
+ struct vkms_output *output = &vkmsdev->output;
+ struct drm_device *dev = &vkmsdev->drm;
+ struct drm_connector *connector = &output->connector;
+ struct drm_encoder *encoder = &output->encoder;
+ struct drm_crtc *crtc = &output->crtc;
+ struct drm_plane *primary;
+ int ret;
+
+ primary = vkms_plane_init(vkmsdev);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ ret = vkms_crtc_init(dev, crtc, primary, NULL);
+ if (ret)
+ goto err_crtc;
+
+ ret = drm_connector_init(dev, connector, &vkms_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret) {
+ DRM_ERROR("Failed to init connector\n");
+ goto err_connector;
+ }
+
+ drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
+
+ ret = drm_connector_register(connector);
+ if (ret) {
+ DRM_ERROR("Failed to register connector\n");
+ goto err_connector_register;
+ }
+
+ ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init encoder\n");
+ goto err_encoder;
+ }
+ encoder->possible_crtcs = 1;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ DRM_ERROR("Failed to attach connector to encoder\n");
+ goto err_attach;
+ }
+
+ drm_mode_config_reset(dev);
+
+ return 0;
+
+err_attach:
+ drm_encoder_cleanup(encoder);
+
+err_encoder:
+ drm_connector_unregister(connector);
+
+err_connector_register:
+ drm_connector_cleanup(connector);
+
+err_connector:
+ drm_crtc_cleanup(crtc);
+
+err_crtc:
+ drm_plane_cleanup(primary);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
new file mode 100644
index 000000000000..9f75b1e2c1c4
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "vkms_drv.h"
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
+
+static const struct drm_plane_funcs vkms_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static void vkms_primary_plane_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+}
+
+static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
+ .atomic_update = vkms_primary_plane_update,
+};
+
+struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+ struct drm_plane *plane;
+ const u32 *formats;
+ int ret, nformats;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ formats = vkms_formats;
+ nformats = ARRAY_SIZE(vkms_formats);
+
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &vkms_plane_funcs,
+ formats, nformats,
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ kfree(plane);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(plane, &vkms_primary_helper_funcs);
+
+ return plane;
+}
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 8c308dac99c5..6b28a326f8bb 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
depends on DRM && PCI && X86 && MMU
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 794cc9d5c9b0..09b2aa08363e 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
- vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
+ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
- vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+ vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
index 9ce2466a5d00..69c4253fbfbb 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
index 2dfd57c5f463..9cbba0e8ce6a 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -46,10 +47,10 @@
* the SVGA3D protocol and remain reserved; they should not be used in the
* future.
*
- * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * IDs between 1040 and 2999 (inclusive) are available for use by the
* current SVGA3D protocol.
*
- * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * FIFO clients other than SVGA3D should stay below 1000, or at 3000
* and up.
*/
@@ -89,19 +90,19 @@ typedef enum {
SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN = 1069,
SVGA_3D_CMD_SURFACE_DEFINE_V2 = 1070,
SVGA_3D_CMD_GENERATE_MIPMAPS = 1071,
- SVGA_3D_CMD_VIDEO_CREATE_DECODER = 1072,
- SVGA_3D_CMD_VIDEO_DESTROY_DECODER = 1073,
- SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR = 1074,
- SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR = 1075,
- SVGA_3D_CMD_VIDEO_DECODE_START_FRAME = 1076,
- SVGA_3D_CMD_VIDEO_DECODE_RENDER = 1077,
- SVGA_3D_CMD_VIDEO_DECODE_END_FRAME = 1078,
- SVGA_3D_CMD_VIDEO_PROCESS_FRAME = 1079,
+ SVGA_3D_CMD_DEAD4 = 1072,
+ SVGA_3D_CMD_DEAD5 = 1073,
+ SVGA_3D_CMD_DEAD6 = 1074,
+ SVGA_3D_CMD_DEAD7 = 1075,
+ SVGA_3D_CMD_DEAD8 = 1076,
+ SVGA_3D_CMD_DEAD9 = 1077,
+ SVGA_3D_CMD_DEAD10 = 1078,
+ SVGA_3D_CMD_DEAD11 = 1079,
SVGA_3D_CMD_ACTIVATE_SURFACE = 1080,
SVGA_3D_CMD_DEACTIVATE_SURFACE = 1081,
SVGA_3D_CMD_SCREEN_DMA = 1082,
- SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE = 1083,
- SVGA_3D_CMD_OPEN_CONTEXT_SURFACE = 1084,
+ SVGA_3D_CMD_DEAD1 = 1083,
+ SVGA_3D_CMD_DEAD2 = 1084,
SVGA_3D_CMD_LOGICOPS_BITBLT = 1085,
SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1086,
@@ -217,7 +218,7 @@ typedef enum {
SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW = 1177,
SVGA_3D_CMD_DX_PRED_COPY_REGION = 1178,
SVGA_3D_CMD_DX_PRED_COPY = 1179,
- SVGA_3D_CMD_DX_STRETCHBLT = 1180,
+ SVGA_3D_CMD_DX_PRESENTBLT = 1180,
SVGA_3D_CMD_DX_GENMIPS = 1181,
SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE = 1182,
SVGA_3D_CMD_DX_READBACK_SUBRESOURCE = 1183,
@@ -254,7 +255,7 @@ typedef enum {
SVGA_3D_CMD_DX_READBACK_ALL_QUERY = 1214,
SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER = 1215,
SVGA_3D_CMD_DX_MOB_FENCE_64 = 1216,
- SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT = 1217,
+ SVGA_3D_CMD_DX_BIND_ALL_SHADER = 1217,
SVGA_3D_CMD_DX_HINT = 1218,
SVGA_3D_CMD_DX_BUFFER_UPDATE = 1219,
SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220,
@@ -262,17 +263,47 @@ typedef enum {
SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222,
/*
- * Reserve some IDs to be used for the DX11 shader types.
+ * Reserve some IDs to be used for the SM5 shader types.
*/
SVGA_3D_CMD_DX_RESERVED1 = 1223,
SVGA_3D_CMD_DX_RESERVED2 = 1224,
SVGA_3D_CMD_DX_RESERVED3 = 1225,
- SVGA_3D_CMD_DX_MAX = 1226,
- SVGA_3D_CMD_MAX = 1226,
+ SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER = 1226,
+ SVGA_3D_CMD_DX_MAX = 1227,
+
+ SVGA_3D_CMD_SCREEN_COPY = 1227,
+
+ /*
+ * Reserve some IDs to be used for video.
+ */
+ SVGA_3D_CMD_VIDEO_RESERVED1 = 1228,
+ SVGA_3D_CMD_VIDEO_RESERVED2 = 1229,
+ SVGA_3D_CMD_VIDEO_RESERVED3 = 1230,
+ SVGA_3D_CMD_VIDEO_RESERVED4 = 1231,
+ SVGA_3D_CMD_VIDEO_RESERVED5 = 1232,
+ SVGA_3D_CMD_VIDEO_RESERVED6 = 1233,
+ SVGA_3D_CMD_VIDEO_RESERVED7 = 1234,
+ SVGA_3D_CMD_VIDEO_RESERVED8 = 1235,
+
+ SVGA_3D_CMD_GROW_OTABLE = 1236,
+ SVGA_3D_CMD_DX_GROW_COTABLE = 1237,
+ SVGA_3D_CMD_INTRA_SURFACE_COPY = 1238,
+
+ SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 = 1239,
+
+ SVGA_3D_CMD_DX_RESOLVE_COPY = 1240,
+ SVGA_3D_CMD_DX_PRED_RESOLVE_COPY = 1241,
+ SVGA_3D_CMD_DX_PRED_CONVERT_REGION = 1242,
+ SVGA_3D_CMD_DX_PRED_CONVERT = 1243,
+ SVGA_3D_CMD_WHOLE_SURFACE_COPY = 1244,
+
+ SVGA_3D_CMD_MAX = 1245,
SVGA_3D_CMD_FUTURE_MAX = 3000
} SVGAFifo3dCmdId;
+#define SVGA_NUM_3D_CMD (SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)
+
/*
* FIFO command format definitions:
*/
@@ -301,7 +332,7 @@ typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
@@ -327,7 +358,7 @@ typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
@@ -459,6 +490,28 @@ struct {
#include "vmware_pack_end.h"
SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
+/*
+ * Perform a surface copy within the same image.
+ * The src/dest boxes are allowed to overlap.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId surface;
+ SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdIntraSurfaceCopy; /* SVGA_3D_CMD_INTRA_SURFACE_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 srcSid;
+ uint32 destSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWholeSurfaceCopy; /* SVGA_3D_CMD_WHOLE_SURFACE_COPY */
+
typedef
#include "vmware_pack_begin.h"
struct {
@@ -772,6 +825,17 @@ struct {
#include "vmware_pack_end.h"
SVGA3dVertexElement;
+/*
+ * Should the vertex element respect the stream value? The high bit of the
+ * stream should be set to indicate that the stream should be respected. If
+ * the high bit is not set, the stream will be ignored and replaced by the index
+ * of the position of the currently considered vertex element.
+ *
+ * All guests should set this bit and correctly specify the stream going
+ * forward.
+ */
+#define SVGA3D_VERTEX_ELEMENT_RESPECT_STREAM (1 << 7)
+
typedef
#include "vmware_pack_begin.h"
struct {
@@ -1102,8 +1166,6 @@ struct {
#include "vmware_pack_end.h"
SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
-
-
typedef
#include "vmware_pack_begin.h"
struct {
@@ -1147,38 +1209,6 @@ struct SVGA3dCmdScreenDMA {
SVGA3dCmdScreenDMA; /* SVGA_3D_CMD_SCREEN_DMA */
/*
- * Set Unity Surface Cookie
- *
- * Associates the supplied cookie with the surface id for use with
- * Unity. This cookie is a hint from guest to host, there is no way
- * for the guest to readback the cookie and the host is free to drop
- * the cookie association at will. The default value for the cookie
- * on all surfaces is 0.
- */
-
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dCmdSetUnitySurfaceCookie {
- uint32 sid;
- uint64 cookie;
-}
-#include "vmware_pack_end.h"
-SVGA3dCmdSetUnitySurfaceCookie; /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */
-
-/*
- * Open a context-specific surface in a non-context-specific manner.
- */
-
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dCmdOpenContextSurface {
- uint32 sid;
-}
-#include "vmware_pack_end.h"
-SVGA3dCmdOpenContextSurface; /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */
-
-
-/*
* Logic ops
*/
@@ -1324,7 +1354,7 @@ typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dSurfaceFormat format;
- SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurface1Flags surface1Flags;
uint32 numMipLevels;
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
@@ -1332,7 +1362,11 @@ struct {
SVGAMobId mobid;
uint32 arraySize;
uint32 mobPitch;
- uint32 pad[5];
+ SVGA3dSurface2Flags surface2Flags;
+ uint8 multisamplePattern;
+ uint8 qualityLevel;
+ uint8 pad0[2];
+ uint32 pad1[3];
}
#include "vmware_pack_end.h"
SVGAOTableSurfaceEntry;
@@ -1360,7 +1394,8 @@ struct {
SVGAOTableShaderEntry;
#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
-#define SVGA_STFLAG_PRIMARY (1 << 0)
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+#define SVGA_STFLAG_RESERVED (1 << 1) /* Added with cap SVGA_CAP_HP_CMD_QUEUE */
typedef uint32 SVGAScreenTargetFlags;
typedef
@@ -1528,6 +1563,25 @@ struct {
#include "vmware_pack_end.h"
SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+/*
+ * Guests using SVGA_3D_CMD_GROW_OTABLE are promising that
+ * the new OTable contains the same contents as the old one, except possibly
+ * for some new invalid entries at the end.
+ *
+ * (Otherwise, guests should use one of the SetOTableBase commands.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAOTableType type;
+ PPN64 baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGrowOTable; /* SVGA_3D_CMD_GROW_OTABLE */
+
typedef
#include "vmware_pack_begin.h"
struct {
@@ -1615,7 +1669,7 @@ typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDefineGBSurface {
uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
uint32 numMipLevels;
uint32 multisampleCount;
@@ -1626,6 +1680,45 @@ struct SVGA3dCmdDefineGBSurface {
SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
/*
+ * Defines a guest-backed surface, adding the arraySize field.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v2 {
+ uint32 sid;
+ SVGA3dSurface1Flags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+ uint32 arraySize;
+ uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
+
+/*
+ * Defines a guest-backed surface, adding the larger flags.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v3 {
+ uint32 sid;
+ SVGA3dSurfaceAllFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dMSPattern multisamplePattern;
+ SVGA3dMSQualityLevel qualityLevel;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+ uint32 arraySize;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v3; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 */
+
+/*
* Destroy a guest-backed surface.
*/
@@ -1672,7 +1765,7 @@ SVGA3dCmdBindGBSurfaceWithPitch; /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
typedef
#include "vmware_pack_begin.h"
-struct{
+struct SVGA3dCmdCondBindGBSurface {
uint32 sid;
SVGAMobId testMobid;
SVGAMobId mobid;
@@ -2066,6 +2159,26 @@ struct {
uint32 mobOffset;
}
#include "vmware_pack_end.h"
-SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE*/
+SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stid;
+ SVGA3dSurfaceImageId dest;
+
+ uint32 statusMobId;
+ uint32 statusMobOffset;
+
+ /* Reserved fields */
+ uint32 mustBeInvalidId;
+ uint32 mustBeZero;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdScreenCopy; /* SVGA_3D_CMD_SCREEN_COPY */
+
+#define SVGA_SCREEN_COPY_STATUS_FAILURE 0x00
+#define SVGA_SCREEN_COPY_STATUS_SUCCESS 0x01
+#define SVGA_SCREEN_COPY_STATUS_INVALID 0xFFFFFFFF
#endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
index c18b663f360f..f256560049bf 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -229,9 +230,9 @@ typedef enum {
SVGA3D_DEVCAP_DEAD2 = 94,
/*
- * Does the device support the DX commands?
+ * Does the device support DXContexts?
*/
- SVGA3D_DEVCAP_DX = 95,
+ SVGA3D_DEVCAP_DXCONTEXT = 95,
/*
* What is the maximum size of a texture array?
@@ -241,21 +242,47 @@ typedef enum {
SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96,
/*
- * What is the maximum number of vertex buffers that can
- * be used in the DXContext inputAssembly?
+ * What is the maximum number of vertex buffers or vertex input registers
+ * that can be expected to work correctly with a DXContext?
+ *
+ * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but
+ * anything in excess of this cap is not guaranteed to render correctly.
+ *
+ * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS
+ * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or
+ * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1,
+ * but only the registers up to this cap value are guaranteed to render
+ * correctly.
+ *
+ * If guest-drivers are able to expose a lower-limit, it's recommended
+ * that they clamp to this value. Otherwise, the host will make a
+ * best-effort on case-by-case basis if guests exceed this.
*/
SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97,
/*
- * What is the maximum number of constant buffers
- * that can be expected to work correctly with a
- * DX context?
+ * What is the maximum number of constant buffers that can be expected to
+ * work correctly with a DX context?
+ *
+ * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but
+ * anything in excess of this cap is not guaranteed to render correctly.
+ *
+ * If guest-drivers are able to expose a lower-limit, it's recommended
+ * that they clamp to this value. Otherwise, the host will make a
+ * best-effort on case-by-case basis if guests exceed this.
*/
SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98,
/*
* Does the device support provoking vertex control?
- * If zero, the first vertex will always be the provoking vertex.
+ *
+ * If this cap is present, the provokingVertexLast field in the
+ * rasterizer state is enabled. (Guests can then set it to FALSE,
+ * meaning that the first vertex is the provoking vertex, or TRUE,
+ * meaning that the last verteix is the provoking vertex.)
+ *
+ * If this cap is FALSE, then guests should set the provokingVertexLast
+ * to FALSE, otherwise rendering behavior is undefined.
*/
SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99,
@@ -281,7 +308,7 @@ typedef enum {
SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119,
SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120,
SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121,
- SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8 = 122,
+ SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1 = 122,
SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123,
SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124,
SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125,
@@ -320,8 +347,8 @@ typedef enum {
SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158,
SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159,
SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160,
- SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS = 161,
- SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT = 162,
+ SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24 = 161,
+ SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT = 162,
SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163,
SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164,
SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165,
@@ -339,8 +366,8 @@ typedef enum {
SVGA3D_DEVCAP_DXFMT_R32_SINT = 177,
SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178,
SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179,
- SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS = 180,
- SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT = 181,
+ SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8 = 180,
+ SVGA3D_DEVCAP_DXFMT_X24_G8_UINT = 181,
SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182,
SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183,
SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184,
@@ -404,6 +431,17 @@ typedef enum {
SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242,
SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243,
+ /*
+ * Advertises shaderModel 4.1 support, independent blend-states,
+ * cube-map arrays, and a higher vertex input registers limit.
+ *
+ * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.)
+ */
+ SVGA3D_DEVCAP_SM41 = 244,
+
+ SVGA3D_DEVCAP_MULTISAMPLE_2X = 245,
+ SVGA3D_DEVCAP_MULTISAMPLE_4X = 246,
+
SVGA3D_DEVCAP_MAX /* This must be the last index. */
} SVGA3dDevCapIndex;
@@ -419,9 +457,7 @@ typedef enum {
* MIPS: Does the format support mip levels?
* ARRAY: Does the format support texture arrays?
* VOLUME: Does the format support having volume?
- * MULTISAMPLE_2: Does the format support 2x multisample?
- * MULTISAMPLE_4: Does the format support 4x multisample?
- * MULTISAMPLE_8: Does the format support 8x multisample?
+ * MULTISAMPLE: Does the format support multisample?
*/
#define SVGA3D_DXFMT_SUPPORTED (1 << 0)
#define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1)
@@ -432,20 +468,8 @@ typedef enum {
#define SVGA3D_DXFMT_ARRAY (1 << 6)
#define SVGA3D_DXFMT_VOLUME (1 << 7)
#define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8)
-#define SVGADX_DXFMT_MULTISAMPLE_2 (1 << 9)
-#define SVGADX_DXFMT_MULTISAMPLE_4 (1 << 10)
-#define SVGADX_DXFMT_MULTISAMPLE_8 (1 << 11)
-#define SVGADX_DXFMT_MAX (1 << 12)
-
-/*
- * Convenience mask for any multisample capability.
- *
- * The multisample bits imply both load and render capability.
- */
-#define SVGA3D_DXFMT_MULTISAMPLE ( \
- SVGADX_DXFMT_MULTISAMPLE_2 | \
- SVGADX_DXFMT_MULTISAMPLE_4 | \
- SVGADX_DXFMT_MULTISAMPLE_8 )
+#define SVGA3D_DXFMT_MULTISAMPLE (1 << 9)
+#define SVGA3D_DXFMT_MAX (1 << 10)
typedef union {
Bool b;
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
index 8c5ae608cfb4..7a49c94df221 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2012-2015 VMware, Inc. All rights reserved.
+ * Copyright 2012-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -56,6 +57,16 @@ typedef uint32 SVGA3dInputClassification;
#define SVGA3D_RESOURCE_TYPE_MAX 7
typedef uint32 SVGA3dResourceType;
+#define SVGA3D_COLOR_WRITE_ENABLE_RED (1 << 0)
+#define SVGA3D_COLOR_WRITE_ENABLE_GREEN (1 << 1)
+#define SVGA3D_COLOR_WRITE_ENABLE_BLUE (1 << 2)
+#define SVGA3D_COLOR_WRITE_ENABLE_ALPHA (1 << 3)
+#define SVGA3D_COLOR_WRITE_ENABLE_ALL (SVGA3D_COLOR_WRITE_ENABLE_RED | \
+ SVGA3D_COLOR_WRITE_ENABLE_GREEN | \
+ SVGA3D_COLOR_WRITE_ENABLE_BLUE | \
+ SVGA3D_COLOR_WRITE_ENABLE_ALPHA)
+typedef uint8 SVGA3dColorWriteEnable;
+
#define SVGA3D_DEPTH_WRITE_MASK_ZERO 0
#define SVGA3D_DEPTH_WRITE_MASK_ALL 1
typedef uint8 SVGA3dDepthWriteMask;
@@ -88,17 +99,28 @@ typedef uint8 SVGA3dCullMode;
#define SVGA3D_COMPARISON_MAX 9
typedef uint8 SVGA3dComparisonFunc;
+/*
+ * SVGA3D_MULTISAMPLE_RAST_DISABLE disables MSAA for all primitives.
+ * SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE, which is supported in SM41,
+ * disables MSAA for lines only.
+ */
+#define SVGA3D_MULTISAMPLE_RAST_DISABLE 0
+#define SVGA3D_MULTISAMPLE_RAST_ENABLE 1
+#define SVGA3D_MULTISAMPLE_RAST_DX_MAX 1
+#define SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE 2
+#define SVGA3D_MULTISAMPLE_RAST_MAX 2
+typedef uint8 SVGA3dMultisampleRastEnable;
+
#define SVGA3D_DX_MAX_VERTEXBUFFERS 32
+#define SVGA3D_DX_MAX_VERTEXINPUTREGISTERS 16
+#define SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS 32
#define SVGA3D_DX_MAX_SOTARGETS 4
#define SVGA3D_DX_MAX_SRVIEWS 128
#define SVGA3D_DX_MAX_CONSTBUFFERS 16
#define SVGA3D_DX_MAX_SAMPLERS 16
-/* Id limits */
-static const uint32 SVGA3dBlendObjectCountPerContext = 4096;
-static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096;
+#define SVGA3D_DX_MAX_CONSTBUF_BINDING_SIZE (4096 * 4 * (uint32)sizeof(uint32))
-typedef uint32 SVGA3dSurfaceId;
typedef uint32 SVGA3dShaderResourceViewId;
typedef uint32 SVGA3dRenderTargetViewId;
typedef uint32 SVGA3dDepthStencilViewId;
@@ -194,20 +216,6 @@ SVGA3dCmdDXInvalidateContext; /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
typedef
#include "vmware_pack_begin.h"
-struct SVGA3dReplyFormatData {
- uint32 formatSupport;
- uint32 msaa2xQualityLevels:5;
- uint32 msaa4xQualityLevels:5;
- uint32 msaa8xQualityLevels:5;
- uint32 msaa16xQualityLevels:5;
- uint32 msaa32xQualityLevels:5;
- uint32 pad:7;
-}
-#include "vmware_pack_end.h"
-SVGA3dReplyFormatData;
-
-typedef
-#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetSingleConstantBuffer {
uint32 slot;
SVGA3dShaderType type;
@@ -624,6 +632,28 @@ SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
typedef
#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredConvertRegion {
+ SVGA3dSurfaceId dstSid;
+ uint32 dstSubResource;
+ SVGA3dBox destBox;
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dBox srcBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredConvertRegion; /* SVGA_3D_CMD_DX_PRED_CONVERT_REGION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredConvert {
+ SVGA3dSurfaceId dstSid;
+ SVGA3dSurfaceId srcSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredConvert; /* SVGA_3D_CMD_DX_PRED_CONVERT */
+
+typedef
+#include "vmware_pack_begin.h"
struct SVGA3dCmdDXBufferCopy {
SVGA3dSurfaceId dest;
SVGA3dSurfaceId src;
@@ -635,23 +665,57 @@ struct SVGA3dCmdDXBufferCopy {
SVGA3dCmdDXBufferCopy;
/* SVGA_3D_CMD_DX_BUFFER_COPY */
-typedef uint32 SVGA3dDXStretchBltMode;
-#define SVGADX_STRETCHBLT_LINEAR (1 << 0)
-#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1)
+/*
+ * Perform a surface copy between a multisample, and a non-multisampled
+ * surface.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId dstSid;
+ uint32 dstSubResource;
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dSurfaceFormat copyFormat;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXResolveCopy; /* SVGA_3D_CMD_DX_RESOLVE_COPY */
+
+/*
+ * Perform a predicated surface copy between a multisample, and a
+ * non-multisampled surface.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId dstSid;
+ uint32 dstSubResource;
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dSurfaceFormat copyFormat;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredResolveCopy; /* SVGA_3D_CMD_DX_PRED_RESOLVE_COPY */
+
+typedef uint32 SVGA3dDXPresentBltMode;
+#define SVGADX_PRESENTBLT_LINEAR (1 << 0)
+#define SVGADX_PRESENTBLT_FORCE_SRC_SRGB (1 << 1)
+#define SVGADX_PRESENTBLT_FORCE_SRC_XRBIAS (1 << 2)
+#define SVGADX_PRESENTBLT_MODE_MAX (1 << 3)
typedef
#include "vmware_pack_begin.h"
-struct SVGA3dCmdDXStretchBlt {
+struct SVGA3dCmdDXPresentBlt {
SVGA3dSurfaceId srcSid;
uint32 srcSubResource;
SVGA3dSurfaceId dstSid;
uint32 destSubResource;
SVGA3dBox boxSrc;
SVGA3dBox boxDest;
- SVGA3dDXStretchBltMode mode;
+ SVGA3dDXPresentBltMode mode;
}
#include "vmware_pack_end.h"
-SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */
+SVGA3dCmdDXPresentBlt; /* SVGA_3D_CMD_DX_PRESENTBLT*/
typedef
#include "vmware_pack_begin.h"
@@ -662,26 +726,6 @@ struct SVGA3dCmdDXGenMips {
SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
/*
- * Defines a resource/DX surface. Resources share the surfaceId namespace.
- *
- */
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dCmdDefineGBSurface_v2 {
- uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
- SVGA3dSurfaceFormat format;
- uint32 numMipLevels;
- uint32 multisampleCount;
- SVGA3dTextureFilter autogenFilter;
- SVGA3dSize size;
- uint32 arraySize;
- uint32 pad;
-}
-#include "vmware_pack_end.h"
-SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
-
-/*
* Update a sub-resource in a guest-backed resource.
* (Inform the device that the guest-contents have been updated.)
*/
@@ -724,7 +768,8 @@ SVGA3dCmdDXInvalidateSubResource; /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
/*
* Raw byte wise transfer from a buffer surface into another surface
- * of the requested box.
+ * of the requested box. Supported if 3d is enabled and SVGA_CAP_DX
+ * is set. This command does not take a context.
*/
typedef
#include "vmware_pack_begin.h"
@@ -773,6 +818,93 @@ struct SVGA3dCmdDXSurfaceCopyAndReadback {
SVGA3dCmdDXSurfaceCopyAndReadback;
/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
+/*
+ * SVGA_DX_HINT_NONE: Does nothing.
+ *
+ * SVGA_DX_HINT_PREFETCH_OBJECT:
+ * SVGA_DX_HINT_PREEVICT_OBJECT:
+ * Consumes a SVGAObjectRef, and hints that the host should consider
+ * fetching/evicting the specified object.
+ *
+ * An id of SVGA3D_INVALID_ID can be used if the guest isn't sure
+ * what object was affected. (For instance, if the guest knows that
+ * it is about to evict a DXShader, but doesn't know precisely which one,
+ * the device can still use this to help limit it's search, or track
+ * how many page-outs have happened.)
+ *
+ * SVGA_DX_HINT_PREFETCH_COBJECT:
+ * SVGA_DX_HINT_PREEVICT_COBJECT:
+ * Same as the above, except they consume an SVGACObjectRef.
+ */
+typedef uint32 SVGADXHintId;
+#define SVGA_DX_HINT_NONE 0
+#define SVGA_DX_HINT_PREFETCH_OBJECT 1
+#define SVGA_DX_HINT_PREEVICT_OBJECT 2
+#define SVGA_DX_HINT_PREFETCH_COBJECT 3
+#define SVGA_DX_HINT_PREEVICT_COBJECT 4
+#define SVGA_DX_HINT_MAX 5
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAObjectRef {
+ SVGAOTableType type;
+ uint32 id;
+}
+#include "vmware_pack_end.h"
+SVGAObjectRef;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGACObjectRef {
+ SVGACOTableType type;
+ uint32 cid;
+ uint32 id;
+}
+#include "vmware_pack_end.h"
+SVGACObjectRef;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXHint {
+ SVGADXHintId hintId;
+
+ /*
+ * Followed by variable sized data depending on the hintId.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXHint;
+/* SVGA_3D_CMD_DX_HINT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBufferUpdate {
+ SVGA3dSurfaceId sid;
+ uint32 x;
+ uint32 width;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBufferUpdate;
+/* SVGA_3D_CMD_DX_BUFFER_UPDATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetConstantBufferOffset {
+ uint32 slot;
+ uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetConstantBufferOffset;
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetVSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetPSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetGSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET */
+
typedef
#include "vmware_pack_begin.h"
@@ -789,7 +921,7 @@ struct {
uint32 firstArraySlice;
uint32 mipLevels;
uint32 arraySize;
- } tex;
+ } tex; /* 1d, 2d, 3d, cube */
struct {
uint32 firstElement;
uint32 numElements;
@@ -844,6 +976,7 @@ struct SVGA3dRenderTargetViewDesc {
struct {
uint32 firstElement;
uint32 numElements;
+ uint32 padding0;
} buffer;
struct {
uint32 mipSlice;
@@ -964,9 +1097,6 @@ SVGA3dInputElementDesc;
typedef
#include "vmware_pack_begin.h"
struct {
- /*
- * XXX: How many of these can there be?
- */
uint32 elid;
uint32 numDescs;
SVGA3dInputElementDesc desc[32];
@@ -1007,7 +1137,7 @@ struct SVGA3dDXBlendStatePerRT {
uint8 srcBlendAlpha;
uint8 destBlendAlpha;
uint8 blendOpAlpha;
- uint8 renderTargetWriteMask;
+ SVGA3dColorWriteEnable renderTargetWriteMask;
uint8 logicOpEnable;
uint8 logicOp;
uint16 pad0;
@@ -1125,7 +1255,7 @@ struct {
float slopeScaledDepthBias;
uint8 depthClipEnable;
uint8 scissorEnable;
- uint8 multisampleEnable;
+ SVGA3dMultisampleRastEnable multisampleEnable;
uint8 antialiasedLineEnable;
float lineWidth;
uint8 lineStippleEnable;
@@ -1152,7 +1282,7 @@ struct SVGA3dCmdDXDefineRasterizerState {
float slopeScaledDepthBias;
uint8 depthClipEnable;
uint8 scissorEnable;
- uint8 multisampleEnable;
+ SVGA3dMultisampleRastEnable multisampleEnable;
uint8 antialiasedLineEnable;
float lineWidth;
uint8 lineStippleEnable;
@@ -1222,21 +1352,6 @@ struct SVGA3dCmdDXDestroySamplerState {
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
-/*
- */
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dSignatureEntry {
- uint8 systemValue;
- uint8 reg; /* register is a reserved word */
- uint16 mask;
- uint8 registerComponentType;
- uint8 minPrecision;
- uint16 pad0;
-}
-#include "vmware_pack_end.h"
-SVGA3dSignatureEntry;
-
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineShader {
@@ -1254,12 +1369,7 @@ struct SVGACOTableDXShaderEntry {
uint32 sizeInBytes;
uint32 offsetInBytes;
SVGAMobId mobid;
- uint32 numInputSignatureEntries;
- uint32 numOutputSignatureEntries;
-
- uint32 numPatchConstantSignatureEntries;
-
- uint32 pad;
+ uint32 pad[4];
}
#include "vmware_pack_end.h"
SVGACOTableDXShaderEntry;
@@ -1283,6 +1393,25 @@ struct SVGA3dCmdDXBindShader {
#include "vmware_pack_end.h"
SVGA3dCmdDXBindShader; /* SVGA_3D_CMD_DX_BIND_SHADER */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindAllShader {
+ uint32 cid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindAllShader; /* SVGA_3D_CMD_DX_BIND_ALL_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXCondBindAllShader {
+ uint32 cid;
+ SVGAMobId testMobid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXCondBindAllShader; /* SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER */
+
/*
* The maximum number of streamout decl's in each streamout entry.
*/
@@ -1356,7 +1485,6 @@ SVGA3dCmdDXMobFence64; /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
*
* This command allows the guest to bind a mob to a context-object table.
*/
-
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetCOTable {
@@ -1368,6 +1496,26 @@ struct SVGA3dCmdDXSetCOTable {
#include "vmware_pack_end.h"
SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
+/*
+ * Guests using SVGA_3D_CMD_DX_GROW_COTABLE are promising that
+ * the new COTable contains the same contents as the old one, except possibly
+ * for some new invalid entries at the end.
+ *
+ * If there is an old cotable mob bound, it also has to still be valid.
+ *
+ * (Otherwise, guests should use the DXSetCOTableBase command.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXGrowCOTable {
+ uint32 cid;
+ uint32 mobid;
+ SVGACOTableType type;
+ uint32 validSizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXGrowCOTable; /* SVGA_3D_CMD_DX_GROW_COTABLE */
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXReadbackCOTable {
@@ -1471,7 +1619,7 @@ struct SVGADXContextMobFormat {
SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
- uint32 pad7[381];
+ uint32 pad7[380];
}
#include "vmware_pack_end.h"
SVGADXContextMobFormat;
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
index a1c36877ad55..b22a67f15660 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -62,7 +63,9 @@
* Maximum size in dwords of shader text the SVGA device will allow.
* Currently 8 MB.
*/
-#define SVGA3D_MAX_SHADER_MEMORY (8 * 1024 * 1024 / sizeof(uint32))
+#define SVGA3D_MAX_SHADER_MEMORY_BYTES (8 * 1024 * 1024)
+#define SVGA3D_MAX_SHADER_MEMORY (SVGA3D_MAX_SHADER_MEMORY_BYTES / \
+ sizeof(uint32))
#define SVGA3D_MAX_CLIP_PLANES 6
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
index b44ce648f592..bdfc404c91e3 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index babe7cb84fc2..f2bfd3d80598 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2008-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,189 +25,355 @@
*
**************************************************************************/
-#include <linux/kernel.h>
-
-#ifdef __KERNEL__
-
-#include <drm/vmwgfx_drm.h>
-#define surf_size_struct struct drm_vmw_size
-
-#else /* __KERNEL__ */
+/*
+ * svga3d_surfacedefs.h --
+ *
+ * Surface definitions and inlineable utilities for SVGA3d.
+ */
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
-#endif /* ARRAY_SIZE */
+#ifndef _SVGA3D_SURFACEDEFS_H_
+#define _SVGA3D_SURFACEDEFS_H_
-#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
-#define surf_size_struct SVGA3dSize
-#define u32 uint32
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_MODULE
+#include "includeCheck.h"
-#endif /* __KERNEL__ */
+#include <linux/kernel.h>
+#include <drm/vmwgfx_drm.h>
#include "svga3d_reg.h"
+#define surf_size_struct struct drm_vmw_size
+
/*
- * enum svga3d_block_desc describes the active data channels in a block.
- *
- * There can be at-most four active channels in a block:
- * 1. Red, bump W, luminance and depth are stored in the first channel.
- * 2. Green, bump V and stencil are stored in the second channel.
- * 3. Blue and bump U are stored in the third channel.
- * 4. Alpha and bump Q are stored in the fourth channel.
- *
- * Block channels can be used to store compressed and buffer data:
- * 1. For compressed formats, only the data channel is used and its size
- * is equal to that of a singular block in the compression scheme.
- * 2. For buffer formats, only the data channel is used and its size is
- * exactly one byte in length.
- * 3. In each case the bit depth represent the size of a singular block.
- *
- * Note: Compressed and IEEE formats do not use the bitMask structure.
+ * enum svga3d_block_desc - describes generic properties about formats.
*/
-
enum svga3d_block_desc {
- SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
- SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
- data */
- SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
- data */
- SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
- U and V */
- SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
- data */
- SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
- data */
- SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
- channel */
- SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
- data */
- SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
- data */
- SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
- data */
- SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
- data */
- SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
- SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
- channel */
- SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
- data */
- SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
- data */
- SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
- data depending on the
- compression method used */
- SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
- floating point
- representation in
- all channels */
- SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
- data. */
- SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
- SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
- SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
- SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
- SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
- e.g., NV12. */
- SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
- Y, U, V, e.g., YV12. */
-
- SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
- SVGA3DBLOCKDESC_GREEN,
- SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
- SVGA3DBLOCKDESC_BLUE,
- SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_SRGB,
- SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
- SVGA3DBLOCKDESC_SRGB,
+ /* Nothing special can be said about this format. */
+ SVGA3DBLOCKDESC_NONE = 0,
+
+ /* Format contains Blue/U data */
+ SVGA3DBLOCKDESC_BLUE = 1 << 0,
+ SVGA3DBLOCKDESC_W = 1 << 0,
+ SVGA3DBLOCKDESC_BUMP_L = 1 << 0,
+
+ /* Format contains Green/V data */
+ SVGA3DBLOCKDESC_GREEN = 1 << 1,
+ SVGA3DBLOCKDESC_V = 1 << 1,
+
+ /* Format contains Red/W/Luminance data */
+ SVGA3DBLOCKDESC_RED = 1 << 2,
+ SVGA3DBLOCKDESC_U = 1 << 2,
+ SVGA3DBLOCKDESC_LUMINANCE = 1 << 2,
+
+ /* Format contains Alpha/Q data */
+ SVGA3DBLOCKDESC_ALPHA = 1 << 3,
+ SVGA3DBLOCKDESC_Q = 1 << 3,
+
+ /* Format is a buffer */
+ SVGA3DBLOCKDESC_BUFFER = 1 << 4,
+
+ /* Format is compressed */
+ SVGA3DBLOCKDESC_COMPRESSED = 1 << 5,
+
+ /* Format uses IEEE floating point */
+ SVGA3DBLOCKDESC_FP = 1 << 6,
+
+ /* Three separate blocks store data. */
+ SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 7,
+
+ /* 2 planes of Y, UV, e.g., NV12. */
+ SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 8,
+
+ /* 3 planes of separate Y, U, V, e.g., YV12. */
+ SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 9,
+
+ /* Block with a stencil channel */
+ SVGA3DBLOCKDESC_STENCIL = 1 << 11,
+
+ /* Typeless format */
+ SVGA3DBLOCKDESC_TYPELESS = 1 << 12,
+
+ /* Channels are signed integers */
+ SVGA3DBLOCKDESC_SINT = 1 << 13,
+
+ /* Channels are unsigned integers */
+ SVGA3DBLOCKDESC_UINT = 1 << 14,
+
+ /* Channels are normalized (when sampling) */
+ SVGA3DBLOCKDESC_NORM = 1 << 15,
+
+ /* Channels are in SRGB */
+ SVGA3DBLOCKDESC_SRGB = 1 << 16,
+
+ /* Shared exponent */
+ SVGA3DBLOCKDESC_EXP = 1 << 17,
+
+ /* Format contains color data. */
+ SVGA3DBLOCKDESC_COLOR = 1 << 18,
+ /* Format contains depth data. */
+ SVGA3DBLOCKDESC_DEPTH = 1 << 19,
+ /* Format contains bump data. */
+ SVGA3DBLOCKDESC_BUMP = 1 << 20,
+
+ /* Format contains YUV video data. */
+ SVGA3DBLOCKDESC_YUV_VIDEO = 1 << 21,
+
+ /* For mixed unsigned/signed formats. */
+ SVGA3DBLOCKDESC_MIXED = 1 << 22,
+
+ /* For distingushing CxV8U8. */
+ SVGA3DBLOCKDESC_CX = 1 << 23,
+
+ /* Different compressed format groups. */
+ SVGA3DBLOCKDESC_BC1 = 1 << 24,
+ SVGA3DBLOCKDESC_BC2 = 1 << 25,
+ SVGA3DBLOCKDESC_BC3 = 1 << 26,
+ SVGA3DBLOCKDESC_BC4 = 1 << 27,
+ SVGA3DBLOCKDESC_BC5 = 1 << 28,
+
+ SVGA3DBLOCKDESC_A_UINT = SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_A_UNORM = SVGA3DBLOCKDESC_A_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_R_UINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_R_UNORM = SVGA3DBLOCKDESC_R_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_R_SINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_R_SNORM = SVGA3DBLOCKDESC_R_SINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_G_UINT = SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RG_UINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RG_UNORM = SVGA3DBLOCKDESC_RG_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RG_SINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RG_SNORM = SVGA3DBLOCKDESC_RG_SINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RGB_UINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGB_SINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGB_UNORM = SVGA3DBLOCKDESC_RGB_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RGB_UNORM_SRGB = SVGA3DBLOCKDESC_RGB_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_RGBA_UINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGBA_UNORM = SVGA3DBLOCKDESC_RGBA_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RGBA_UNORM_SRGB = SVGA3DBLOCKDESC_RGBA_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_RGBA_SINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGBA_SNORM = SVGA3DBLOCKDESC_RGBA_SINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_FP |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
- SVGA3DBLOCKDESC_V,
+ SVGA3DBLOCKDESC_V |
+ SVGA3DBLOCKDESC_BUMP,
SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
- SVGA3DBLOCKDESC_LUMINANCE,
+ SVGA3DBLOCKDESC_BUMP_L |
+ SVGA3DBLOCKDESC_MIXED |
+ SVGA3DBLOCKDESC_BUMP,
SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
- SVGA3DBLOCKDESC_W,
+ SVGA3DBLOCKDESC_W |
+ SVGA3DBLOCKDESC_BUMP,
SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
- SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_MIXED |
+ SVGA3DBLOCKDESC_BUMP,
SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
- SVGA3DBLOCKDESC_V |
- SVGA3DBLOCKDESC_W |
- SVGA3DBLOCKDESC_Q,
- SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
- SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_V |
+ SVGA3DBLOCKDESC_W |
+ SVGA3DBLOCKDESC_Q |
+ SVGA3DBLOCKDESC_BUMP,
+ SVGA3DBLOCKDESC_L_UNORM = SVGA3DBLOCKDESC_LUMINANCE |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_NORM |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_LA_UNORM = SVGA3DBLOCKDESC_LUMINANCE |
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_NORM |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
- SVGA3DBLOCKDESC_IEEE_FP,
+ SVGA3DBLOCKDESC_FP |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
- SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
- SVGA3DBLOCKDESC_BLUE,
- SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
- SVGA3DBLOCKDESC_STENCIL,
- SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
- SVGA3DBLOCKDESC_Y,
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_YUV_VIDEO |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
- SVGA3DBLOCKDESC_Y |
- SVGA3DBLOCKDESC_U_VIDEO |
- SVGA3DBLOCKDESC_V_VIDEO,
- SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_EXP,
- SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
- SVGA3DBLOCKDESC_SRGB,
- SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
- SVGA3DBLOCKDESC_2PLANAR_YUV,
- SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
- SVGA3DBLOCKDESC_3PLANAR_YUV,
+ SVGA3DBLOCKDESC_YUV_VIDEO |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGB_EXP = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_EXP |
+ SVGA3DBLOCKDESC_COLOR,
+
+ SVGA3DBLOCKDESC_COMP_TYPELESS = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_TYPELESS,
+ SVGA3DBLOCKDESC_COMP_UNORM = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_NORM |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_COMP_SNORM = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_NORM |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_BC1_COMP_TYPELESS = SVGA3DBLOCKDESC_BC1 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC1_COMP_UNORM = SVGA3DBLOCKDESC_BC1 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC1_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_BC2_COMP_TYPELESS = SVGA3DBLOCKDESC_BC2 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC2_COMP_UNORM = SVGA3DBLOCKDESC_BC2 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC2_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_BC3_COMP_TYPELESS = SVGA3DBLOCKDESC_BC3 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC3_COMP_UNORM = SVGA3DBLOCKDESC_BC3 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC3_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_BC4_COMP_TYPELESS = SVGA3DBLOCKDESC_BC4 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC4_COMP_UNORM = SVGA3DBLOCKDESC_BC4 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC4_COMP_SNORM = SVGA3DBLOCKDESC_BC4 |
+ SVGA3DBLOCKDESC_COMP_SNORM,
+ SVGA3DBLOCKDESC_BC5_COMP_TYPELESS = SVGA3DBLOCKDESC_BC5 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC5_COMP_UNORM = SVGA3DBLOCKDESC_BC5 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC5_COMP_SNORM = SVGA3DBLOCKDESC_BC5 |
+ SVGA3DBLOCKDESC_COMP_SNORM,
+
+ SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_YUV_VIDEO |
+ SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_2PLANAR_YUV |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_YUV_VIDEO |
+ SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_3PLANAR_YUV |
+ SVGA3DBLOCKDESC_COLOR,
+
+ SVGA3DBLOCKDESC_DEPTH_UINT = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_UINT,
+ SVGA3DBLOCKDESC_DEPTH_UNORM = SVGA3DBLOCKDESC_DEPTH_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_STENCIL,
+ SVGA3DBLOCKDESC_DS_UINT = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_STENCIL |
+ SVGA3DBLOCKDESC_UINT,
+ SVGA3DBLOCKDESC_DS_UNORM = SVGA3DBLOCKDESC_DS_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_DEPTH_FP = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_FP,
+
+ SVGA3DBLOCKDESC_UV_UINT = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_UINT,
+ SVGA3DBLOCKDESC_UV_SNORM = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_UVCX_SNORM = SVGA3DBLOCKDESC_UV_SNORM |
+ SVGA3DBLOCKDESC_CX,
+ SVGA3DBLOCKDESC_UVWQ_SNORM = SVGA3DBLOCKDESC_UVWQ |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_NORM,
};
-/*
- * SVGA3dSurfaceDesc describes the actual pixel data.
- *
- * This structure provides the following information:
- * 1. Block description.
- * 2. Dimensions of a block in the surface.
- * 3. Size of block in bytes.
- * 4. Bit depth of the pixel data.
- * 5. Channel bit depths and masks (if applicable).
- */
struct svga3d_channel_def {
union {
u8 blue;
- u8 u;
+ u8 w_bump;
+ u8 l_bump;
u8 uv_video;
u8 u_video;
};
union {
u8 green;
- u8 v;
u8 stencil;
+ u8 v_bump;
u8 v_video;
};
union {
u8 red;
- u8 w;
+ u8 u_bump;
u8 luminance;
- u8 y;
+ u8 y_video;
u8 depth;
u8 data;
};
union {
u8 alpha;
- u8 q;
+ u8 q_bump;
u8 exp;
};
};
+/*
+ * struct svga3d_surface_desc - describes the actual pixel data.
+ *
+ * @format: Format
+ * @block_desc: Block description
+ * @block_size: Dimensions in pixels of a block
+ * @bytes_per_block: Size of block in bytes
+ * @pitch_bytes_per_block: Size of a block in bytes for purposes of pitch
+ * @bit_depth: Channel bit depths
+ * @bit_offset: Channel bit masks (in bits offset from the start of the pointer)
+ */
struct svga3d_surface_desc {
SVGA3dSurfaceFormat format;
enum svga3d_block_desc block_desc;
+
surf_size_struct block_size;
u32 bytes_per_block;
u32 pitch_bytes_per_block;
- u32 total_bit_depth;
struct svga3d_channel_def bit_depth;
struct svga3d_channel_def bit_offset;
};
@@ -215,729 +381,728 @@ struct svga3d_surface_desc {
static const struct svga3d_surface_desc svga3d_surface_descs[] = {
{SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
{1, 1, 1}, 0, 0,
- 0, {{0}, {0}, {0}, {0}},
+ {{0}, {0}, {0}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{5}, {6}, {5}, {0}},
+ {{5}, {6}, {5}, {0}},
{{0}, {5}, {11}, {0}}},
- {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 2, 2,
- 15, {{5}, {5}, {5}, {0}},
+ {{5}, {5}, {5}, {0}},
{{0}, {5}, {10}, {0}}},
- {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{5}, {5}, {5}, {1}},
+ {{5}, {5}, {5}, {1}},
{{0}, {5}, {10}, {15}}},
- {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{4}, {4}, {4}, {4}},
+ {{4}, {4}, {4}, {4}},
{{0}, {4}, {8}, {12}}},
- {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS,
+ {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
- {{0}, {24}, {0}, {0}}},
+ {{0}, {8}, {24}, {0}},
+ {{0}, {0}, {8}, {0}}},
- {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS,
+ {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {1}, {15}, {0}},
- {{0}, {15}, {0}, {0}}},
+ {{0}, {1}, {15}, {0}},
+ {{0}, {0}, {1}, {0}}},
- {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE,
+ {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_L_UNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA,
- {1 , 1, 1}, 1, 1,
- 8, {{0}, {0}, {4}, {4}},
+ {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA_UNORM,
+ {1, 1, 1}, 1, 1,
+ {{0}, {0}, {4}, {4}},
{{0}, {0}, {0}, {4}}},
- {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE,
+ {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_L_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA,
+ {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {8}, {8}},
+ {{0}, {0}, {8}, {8}},
{{0}, {0}, {0}, {8}}},
- {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT1, SVGA3DBLOCKDESC_BC1_COMP_UNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT2, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT3, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT4, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT5, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {8}, {8}},
- {{0}, {0}, {0}, {8}}},
+ {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
{SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 2, 2,
- 16, {{5}, {5}, {6}, {0}},
- {{11}, {6}, {0}, {0}}},
+ {{6}, {5}, {5}, {0}},
+ {{10}, {5}, {0}, {0}}},
{SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
- {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL,
+ {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 3, 3,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
{SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
{SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
- {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
+ {{10}, {10}, {10}, {2}},
{{0}, {10}, {20}, {30}}},
- {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{8}, {8}, {0}, {0}},
- {{8}, {0}, {0}, {0}}},
+ {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
- {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ,
+ {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ_SNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
- {{24}, {16}, {8}, {0}}},
+ {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
- {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UVCX_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{8}, {8}, {0}, {0}},
- {{8}, {0}, {0}, {0}}},
+ {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
{SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
{SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
- {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA,
+ {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_A_UNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {0}, {8}},
+ {{0}, {0}, {0}, {8}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
{SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
{SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {24}, {0}},
- {{0}, {24}, {0}, {0}}},
+ {{0}, {0}, {24}, {0}},
+ {{0}, {0}, {8}, {0}}},
- {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV_SNORM,
{1, 1, 1}, 4, 4,
- 32, {{16}, {16}, {0}, {0}},
- {{16}, {0}, {0}, {0}}},
+ {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
- {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
- {{0}, {0}, {16}, {0}}},
+ {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
- {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
{SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
- {1, 1, 1}, 2, 2,
- 16, {{8}, {0}, {8}, {0}},
+ {2, 1, 1}, 4, 4,
+ {{8}, {0}, {8}, {0}},
{{0}, {0}, {8}, {0}}},
{SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
- {1, 1, 1}, 2, 2,
- 16, {{8}, {0}, {8}, {0}},
+ {2, 1, 1}, 4, 4,
+ {{8}, {0}, {8}, {0}},
{{8}, {0}, {0}, {0}}},
{SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
{2, 2, 1}, 6, 2,
- 48, {{0}, {0}, {48}, {0}},
+ {{0}, {0}, {48}, {0}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
- {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
- {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ,
+ {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
- {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 12, 12,
- 96, {{32}, {32}, {32}, {0}},
+ {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
{SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
{1, 1, 1}, 12, 12,
- 96, {{32}, {32}, {32}, {0}},
+ {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
- {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB_UINT,
{1, 1, 1}, 12, 12,
- 96, {{32}, {32}, {32}, {0}},
+ {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
- {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW,
+ {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_RGB_SINT,
{1, 1, 1}, 12, 12,
- 96, {{32}, {32}, {32}, {0}},
+ {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
- {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ,
+ {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ,
+ {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG_UINT,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_RG_SINT,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 8, 8,
- 64, {{0}, {8}, {32}, {0}},
+ {{0}, {8}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
{SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 8, 8,
- 64, {{0}, {8}, {32}, {0}},
+ {{0}, {8}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP,
+ {SVGA3D_R32_FLOAT_X8X24, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 8, 8,
- 64, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN,
+ {SVGA3D_X32_G8X24_UINT, SVGA3DBLOCKDESC_G_UINT,
{1, 1, 1}, 8, 8,
- 64, {{0}, {8}, {0}, {0}},
+ {{0}, {8}, {0}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
- {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
{SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
{1, 1, 1}, 4, 4,
- 32, {{10}, {11}, {11}, {0}},
- {{0}, {10}, {21}, {0}}},
+ {{10}, {11}, {11}, {0}},
+ {{22}, {11}, {0}, {0}}},
- {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+ {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
- {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP,
+ {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_UINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
- {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_RG_SINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
- {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_R_UINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_R_SINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
+ {{0}, {8}, {24}, {0}},
{{0}, {24}, {0}, {0}}},
- {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS,
+ {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
+ {{0}, {8}, {24}, {0}},
{{0}, {24}, {0}, {0}}},
- {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R24_UNORM_X8, SVGA3DBLOCKDESC_R_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {24}, {0}},
+ {{0}, {0}, {24}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN,
+ {SVGA3D_X24_G8_UINT, SVGA3DBLOCKDESC_G_UINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {0}, {0}},
+ {{0}, {8}, {0}, {0}},
{{0}, {24}, {0}, {0}}},
- {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG_UINT,
{1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_RG_SINT,
{1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_R_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_R_UINT,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U,
+ {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_R_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U,
+ {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_R_SINT,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_R_UNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_R_UINT,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U,
+ {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_R_SNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U,
+ {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_R_SINT,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_P8, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_P8, SVGA3DBLOCKDESC_NONE,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE,
+ {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGB_EXP,
{1, 1, 1}, 4, 4,
- 32, {{9}, {9}, {9}, {5}},
+ {{9}, {9}, {9}, {5}},
{{18}, {9}, {0}, {27}}},
- {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
- {{0}, {8}, {0}, {0}}},
+ {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_NONE,
+ {2, 1, 1}, 4, 4,
+ {{0}, {8}, {8}, {0}},
+ {{0}, {0}, {8}, {0}}},
- {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_NONE,
+ {2, 1, 1}, 4, 4,
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_BC1_COMP_TYPELESS,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_BC2_COMP_TYPELESS,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_BC3_COMP_TYPELESS,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_BC4_COMP_TYPELESS,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_ATI1, SVGA3DBLOCKDESC_BC4_COMP_UNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_BC4_COMP_SNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_BC5_COMP_TYPELESS,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_ATI2, SVGA3DBLOCKDESC_BC5_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_BC5_COMP_SNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
- {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+ {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB,
+ {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_UNORM_SRGB,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
- {{0}, {24}, {0}, {0}}},
+ {{0}, {0}, {24}, {0}},
+ {{0}, {0}, {8}, {0}}},
- {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS,
+ {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
- {{0}, {24}, {0}, {0}}},
+ {{0}, {8}, {24}, {0}},
+ {{0}, {0}, {8}, {0}}},
{SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
{2, 2, 1}, 6, 2,
- 48, {{0}, {0}, {48}, {0}},
+ {{0}, {0}, {48}, {0}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
{SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
{SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
- {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
- {{24}, {16}, {8}, {0}}},
+ {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
{SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
- {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
- {{0}, {0}, {16}, {0}}},
+ {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
- {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG_SNORM,
{1, 1, 1}, 4, 4,
- 32, {{16}, {16}, {0}, {0}},
- {{16}, {0}, {0}, {0}}},
+ {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
{SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{8}, {8}, {0}, {0}},
- {{8}, {0}, {0}, {0}}},
+ {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
{SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA,
+ {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_A_UNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {0}, {8}},
+ {{0}, {0}, {0}, {8}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_BC1_COMP_UNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{5}, {6}, {5}, {0}},
+ {{5}, {6}, {5}, {0}},
{{0}, {5}, {11}, {0}}},
- {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{5}, {5}, {5}, {1}},
+ {{5}, {5}, {5}, {1}},
{{0}, {5}, {10}, {15}}},
- {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_BC4_COMP_UNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_BC5_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
-
};
static inline u32 clamped_umul32(u32 a, u32 b)
@@ -946,6 +1111,10 @@ static inline u32 clamped_umul32(u32 a, u32 b)
return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
}
+/**
+ * svga3dsurface_get_desc - Look up the appropriate SVGA3dSurfaceDesc for the
+ * given format.
+ */
static inline const struct svga3d_surface_desc *
svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
{
@@ -955,23 +1124,10 @@ svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
}
-/*
- *----------------------------------------------------------------------
- *
- * svga3dsurface_get_mip_size --
- *
- * Given a base level size and the mip level, compute the size of
- * the mip level.
- *
- * Results:
- * See above.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
+/**
+ * svga3dsurface_get_mip_size - Given a base level size and the mip level,
+ * compute the size of the mip level.
*/
-
static inline surf_size_struct
svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
{
@@ -1018,28 +1174,17 @@ svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
return pitch;
}
-/*
- *-----------------------------------------------------------------------------
- *
- * svga3dsurface_get_image_buffer_size --
- *
- * Return the number of bytes of buffer space required to store
- * one image of a surface, optionally using the specified pitch.
- *
- * If pitch is zero, it is assumed that rows are tightly packed.
+/**
+ * svga3dsurface_get_image_buffer_size - Calculates image buffer size.
*
- * This function is overflow-safe. If the result would have
- * overflowed, instead we return MAX_UINT32.
+ * Return the number of bytes of buffer space required to store one image of a
+ * surface, optionally using the specified pitch.
*
- * Results:
- * Byte count.
+ * If pitch is zero, it is assumed that rows are tightly packed.
*
- * Side effects:
- * None.
- *
- *-----------------------------------------------------------------------------
+ * This function is overflow-safe. If the result would have overflowed, instead
+ * we return MAX_UINT32.
*/
-
static inline u32
svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
const surf_size_struct *size,
@@ -1067,6 +1212,9 @@ svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
return total_size;
}
+/**
+ * svga3dsurface_get_serialized_size - Get the serialized size for the image.
+ */
static inline u32
svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
surf_size_struct base_level_size,
@@ -1087,6 +1235,26 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
return total_size * num_layers;
}
+/**
+ * svga3dsurface_get_serialized_size_extended - Returns the number of bytes
+ * required for a surface with given parameters. Support for sample count.
+ */
+static inline u32
+svga3dsurface_get_serialized_size_extended(SVGA3dSurfaceFormat format,
+ surf_size_struct base_level_size,
+ u32 num_mip_levels,
+ u32 num_layers,
+ u32 num_samples)
+{
+ uint64_t total_size =
+ svga3dsurface_get_serialized_size(format,
+ base_level_size,
+ num_mip_levels,
+ num_layers);
+ total_size *= max_t(u32, 1, num_samples);
+
+ return min_t(uint64_t, total_size, (uint64_t)U32_MAX);
+}
/**
* svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
@@ -1206,3 +1374,5 @@ svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
}
return svga3dsurface_is_dx_screen_target_format(format);
}
+
+#endif /* _SVGA3D_SURFACEDEFS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
index 27b33ba88430..308370665a8e 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2012-2015 VMware, Inc. All rights reserved.
+ * Copyright 2012-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -44,9 +45,21 @@
#define SVGA3D_INVALID_ID ((uint32)-1)
+typedef uint8 SVGABool8; /* 8-bit Bool definition */
typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
typedef uint32 SVGA3dColor; /* a, r, g, b */
+typedef uint32 SVGA3dSurfaceId;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 numerator;
+ uint32 denominator;
+}
+#include "vmware_pack_end.h"
+SVGA3dFraction64;
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCopyRect {
@@ -145,7 +158,7 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_BUMPU8V8 = 20,
SVGA3D_BUMPL6V5U5 = 21,
SVGA3D_BUMPX8L8V8U8 = 22,
- SVGA3D_BUMPL8V8U8 = 23,
+ SVGA3D_FORMAT_DEAD1 = 23,
SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
@@ -204,8 +217,8 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_R32G32_SINT = 59,
SVGA3D_R32G8X24_TYPELESS = 60,
SVGA3D_D32_FLOAT_S8X24_UINT = 61,
- SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
- SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
+ SVGA3D_R32_FLOAT_X8X24 = 62,
+ SVGA3D_X32_G8X24_UINT = 63,
SVGA3D_R10G10B10A2_TYPELESS = 64,
SVGA3D_R10G10B10A2_UINT = 65,
SVGA3D_R11G11B10_FLOAT = 66,
@@ -223,8 +236,8 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_R32_SINT = 78,
SVGA3D_R24G8_TYPELESS = 79,
SVGA3D_D24_UNORM_S8_UINT = 80,
- SVGA3D_R24_UNORM_X8_TYPELESS = 81,
- SVGA3D_X24_TYPELESS_G8_UINT = 82,
+ SVGA3D_R24_UNORM_X8 = 81,
+ SVGA3D_X24_G8_UINT = 82,
SVGA3D_R8G8_TYPELESS = 83,
SVGA3D_R8G8_UNORM = 84,
SVGA3D_R8G8_UINT = 85,
@@ -296,92 +309,114 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_FORMAT_MAX
} SVGA3dSurfaceFormat;
-typedef enum SVGA3dSurfaceFlags {
- SVGA3D_SURFACE_CUBEMAP = (1 << 0),
+/*
+ * SVGA3d Surface Flags --
+ */
+#define SVGA3D_SURFACE_CUBEMAP (1 << 0)
- /*
- * HINT flags are not enforced by the device but are useful for
- * performance.
- */
- SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
- SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
- SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
- SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
- SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
- SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
- SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
- SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
- SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
- SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
- SVGA3D_SURFACE_DECODE_RENDERTARGET = (1 << 11),
+/*
+ * HINT flags are not enforced by the device but are useful for
+ * performance.
+ */
+#define SVGA3D_SURFACE_HINT_STATIC (CONST64U(1) << 1)
+#define SVGA3D_SURFACE_HINT_DYNAMIC (CONST64U(1) << 2)
+#define SVGA3D_SURFACE_HINT_INDEXBUFFER (CONST64U(1) << 3)
+#define SVGA3D_SURFACE_HINT_VERTEXBUFFER (CONST64U(1) << 4)
+#define SVGA3D_SURFACE_HINT_TEXTURE (CONST64U(1) << 5)
+#define SVGA3D_SURFACE_HINT_RENDERTARGET (CONST64U(1) << 6)
+#define SVGA3D_SURFACE_HINT_DEPTHSTENCIL (CONST64U(1) << 7)
+#define SVGA3D_SURFACE_HINT_WRITEONLY (CONST64U(1) << 8)
+#define SVGA3D_SURFACE_MASKABLE_ANTIALIAS (CONST64U(1) << 9)
+#define SVGA3D_SURFACE_AUTOGENMIPMAPS (CONST64U(1) << 10)
+
+#define SVGA3D_SURFACE_DECODE_RENDERTARGET (CONST64U(1) << 11)
- /*
- * Is this surface using a base-level pitch for it's mob backing?
- *
- * This flag is not intended to be set by guest-drivers, but is instead
- * set by the device when the surface is bound to a mob with a specified
- * pitch.
- */
- SVGA3D_SURFACE_MOB_PITCH = (1 << 12),
+/*
+ * Is this surface using a base-level pitch for it's mob backing?
+ *
+ * This flag is not intended to be set by guest-drivers, but is instead
+ * set by the device when the surface is bound to a mob with a specified
+ * pitch.
+ */
+#define SVGA3D_SURFACE_MOB_PITCH (CONST64U(1) << 12)
- SVGA3D_SURFACE_INACTIVE = (1 << 13),
- SVGA3D_SURFACE_HINT_RT_LOCKABLE = (1 << 14),
- SVGA3D_SURFACE_VOLUME = (1 << 15),
+#define SVGA3D_SURFACE_INACTIVE (CONST64U(1) << 13)
+#define SVGA3D_SURFACE_HINT_RT_LOCKABLE (CONST64U(1) << 14)
+#define SVGA3D_SURFACE_VOLUME (CONST64U(1) << 15)
- /*
- * Required to be set on a surface to bind it to a screen target.
- */
- SVGA3D_SURFACE_SCREENTARGET = (1 << 16),
+/*
+ * Required to be set on a surface to bind it to a screen target.
+ */
+#define SVGA3D_SURFACE_SCREENTARGET (CONST64U(1) << 16)
- /*
- * Align images in the guest-backing mob to 16-bytes.
- */
- SVGA3D_SURFACE_ALIGN16 = (1 << 17),
+/*
+ * Align images in the guest-backing mob to 16-bytes.
+ */
+#define SVGA3D_SURFACE_ALIGN16 (CONST64U(1) << 17)
- SVGA3D_SURFACE_1D = (1 << 18),
- SVGA3D_SURFACE_ARRAY = (1 << 19),
+#define SVGA3D_SURFACE_1D (CONST64U(1) << 18)
+#define SVGA3D_SURFACE_ARRAY (CONST64U(1) << 19)
- /*
- * Bind flags.
- * These are enforced for any surface defined with DefineGBSurface_v2.
- */
- SVGA3D_SURFACE_BIND_VERTEX_BUFFER = (1 << 20),
- SVGA3D_SURFACE_BIND_INDEX_BUFFER = (1 << 21),
- SVGA3D_SURFACE_BIND_CONSTANT_BUFFER = (1 << 22),
- SVGA3D_SURFACE_BIND_SHADER_RESOURCE = (1 << 23),
- SVGA3D_SURFACE_BIND_RENDER_TARGET = (1 << 24),
- SVGA3D_SURFACE_BIND_DEPTH_STENCIL = (1 << 25),
- SVGA3D_SURFACE_BIND_STREAM_OUTPUT = (1 << 26),
+/*
+ * Bind flags.
+ * These are enforced for any surface defined with DefineGBSurface_v2.
+ */
+#define SVGA3D_SURFACE_BIND_VERTEX_BUFFER (CONST64U(1) << 20)
+#define SVGA3D_SURFACE_BIND_INDEX_BUFFER (CONST64U(1) << 21)
+#define SVGA3D_SURFACE_BIND_CONSTANT_BUFFER (CONST64U(1) << 22)
+#define SVGA3D_SURFACE_BIND_SHADER_RESOURCE (CONST64U(1) << 23)
+#define SVGA3D_SURFACE_BIND_RENDER_TARGET (CONST64U(1) << 24)
+#define SVGA3D_SURFACE_BIND_DEPTH_STENCIL (CONST64U(1) << 25)
+#define SVGA3D_SURFACE_BIND_STREAM_OUTPUT (CONST64U(1) << 26)
- /*
- * A note on staging flags:
- *
- * The STAGING flags notes that the surface will not be used directly by the
- * drawing pipeline, i.e. that it will not be bound to any bind point.
- * Staging surfaces may be used by copy operations to move data in and out
- * of other surfaces.
- *
- * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
- * updates indirectly, i.e. the surface will not be updated directly, but
- * will receive copies from staging surfaces.
- */
- SVGA3D_SURFACE_STAGING_UPLOAD = (1 << 27),
- SVGA3D_SURFACE_STAGING_DOWNLOAD = (1 << 28),
- SVGA3D_SURFACE_HINT_INDIRECT_UPDATE = (1 << 29),
+/*
+ * The STAGING flags notes that the surface will not be used directly by the
+ * drawing pipeline, i.e. that it will not be bound to any bind point.
+ * Staging surfaces may be used by copy operations to move data in and out
+ * of other surfaces. No bind flags may be set on surfaces with this flag.
+ *
+ * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
+ * updates indirectly, i.e. the surface will not be updated directly, but
+ * will receive copies from staging surfaces.
+ */
+#define SVGA3D_SURFACE_STAGING_UPLOAD (CONST64U(1) << 27)
+#define SVGA3D_SURFACE_STAGING_DOWNLOAD (CONST64U(1) << 28)
+#define SVGA3D_SURFACE_HINT_INDIRECT_UPDATE (CONST64U(1) << 29)
- /*
- * Setting this flag allow this surface to be used with the
- * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
- * buffer surfaces, an no bind flags are allowed to be set on surfaces
- * with this flag.
- */
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER = (1 << 30),
+/*
+ * Setting this flag allow this surface to be used with the
+ * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
+ * buffer surfaces, and no bind flags are allowed to be set on surfaces
+ * with this flag.
+ */
+#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30)
- /*
- * Marker for the last defined bit.
- */
- SVGA3D_SURFACE_FLAG_MAX = (1 << 31),
-} SVGA3dSurfaceFlags;
+/*
+ * Reserved for video operations.
+ */
+#define SVGA3D_SURFACE_RESERVED1 (CONST64U(1) << 31)
+
+/*
+ * Specifies that a surface is multisample, and therefore requires the full
+ * mob-backing to store all the samples.
+ */
+#define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32)
+
+#define SVGA3D_SURFACE_FLAG_MAX (CONST64U(1) << 33)
+
+/*
+ * Surface flags types:
+ *
+ * SVGA3dSurface1Flags: Lower 32-bits of flags.
+ * SVGA3dSurface2Flags: Upper 32-bits of flags.
+ * SVGA3dSurfaceAllFlags: Full 64-bits of flags.
+ */
+typedef uint32 SVGA3dSurface1Flags;
+typedef uint32 SVGA3dSurface2Flags;
+typedef uint64 SVGA3dSurfaceAllFlags;
+
+#define SVGA3D_SURFACE_FLAGS1_MASK ((uint64_t)MAX_UINT32)
+#define SVGA3D_SURFACE_FLAGS2_MASK (MAX_UINT64 & ~SVGA3D_SURFACE_FLAGS1_MASK)
#define SVGA3D_SURFACE_HB_DISALLOWED_MASK \
( SVGA3D_SURFACE_MOB_PITCH | \
@@ -392,29 +427,41 @@ typedef enum SVGA3dSurfaceFlags {
SVGA3D_SURFACE_STAGING_UPLOAD | \
SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE \
+ )
+
+#define SVGA3D_SURFACE_HB_PRESENT_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_MULTISAMPLE \
)
#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \
( SVGA3D_SURFACE_CUBEMAP | \
SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
- SVGA3D_SURFACE_DECODE_RENDERTARGET | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
- SVGA3D_SURFACE_ARRAY | \
SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE \
+ )
+
+#define SVGA3D_SURFACE_BASICOPS_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_MULTISAMPLE \
)
#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
( SVGA3D_SURFACE_CUBEMAP | \
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
- SVGA3D_SURFACE_DECODE_RENDERTARGET | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
@@ -426,12 +473,36 @@ typedef enum SVGA3dSurfaceFlags {
SVGA3D_SURFACE_STAGING_UPLOAD | \
SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE \
+ )
+
+#define SVGA3D_SURFACE_BUFFER_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
+ SVGA3D_SURFACE_ARRAY | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_MOB_PITCH \
+ )
+
+#define SVGA3D_SURFACE_MULTISAMPLE_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_SCREENTARGET | \
+ SVGA3D_SURFACE_MOB_PITCH \
)
#define SVGA3D_SURFACE_DX_ONLY_MASK \
( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ )
#define SVGA3D_SURFACE_STAGING_MASK \
( SVGA3D_SURFACE_STAGING_UPLOAD | \
@@ -487,7 +558,7 @@ typedef enum {
/*
* Indicates that this format can be converted to any RGB format for which
- * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified.
*/
SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
@@ -498,22 +569,22 @@ typedef enum {
/*
* Indicated that this format can be read as an SRGB texture (meaning that the
- * sampler will linearize the looked up data)
+ * sampler will linearize the looked up data).
*/
SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
/*
- * Indicates that this format can be used in the bumpmap instructions
+ * Indicates that this format can be used in the bumpmap instructions.
*/
SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
/*
- * Indicates that this format can be sampled by the displacement map sampler
+ * Indicates that this format can be sampled by the displacement map sampler.
*/
SVGA3DFORMAT_OP_DMAP = 0x00020000,
/*
- * Indicates that this format cannot be used with texture filtering
+ * Indicates that this format cannot be used with texture filtering.
*/
SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
@@ -530,18 +601,18 @@ typedef enum {
SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
/*
- * Indicates that this format cannot be used with alpha blending
+ * Indicates that this format cannot be used with alpha blending.
*/
SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
/*
* Indicates that the device can auto-generated sublevels for resources
- * of this format
+ * of this format.
*/
SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
/*
- * Indicates that this format can be used by vertex texture sampler
+ * Indicates that this format can be used by vertex texture sampler.
*/
SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
@@ -1501,7 +1572,6 @@ union SVGADXQueryResultUnion {
#include "vmware_pack_end.h"
SVGADXQueryResultUnion;
-
typedef enum {
SVGA3D_QUERYSTATE_PENDING = 0, /* Query is not finished yet */
SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully */
@@ -1533,9 +1603,9 @@ typedef
struct {
union {
struct {
- uint16 function; /* SVGA3dFogFunction */
- uint8 type; /* SVGA3dFogType */
- uint8 base; /* SVGA3dFogBase */
+ uint16 function; /* SVGA3dFogFunction */
+ uint8 type; /* SVGA3dFogType */
+ uint8 base; /* SVGA3dFogBase */
};
uint32 uintValue;
};
@@ -1547,19 +1617,27 @@ SVGA3dFogMode;
* Uniquely identify one image (a 1D/2D/3D array) from a surface. This
* is a surface ID as well as face/mipmap indices.
*/
-
typedef
#include "vmware_pack_begin.h"
struct SVGA3dSurfaceImageId {
- uint32 sid;
- uint32 face;
- uint32 mipmap;
+ uint32 sid;
+ uint32 face;
+ uint32 mipmap;
}
#include "vmware_pack_end.h"
SVGA3dSurfaceImageId;
typedef
#include "vmware_pack_begin.h"
+struct SVGA3dSubSurfaceId {
+ uint32 sid;
+ uint32 subResourceId;
+}
+#include "vmware_pack_end.h"
+SVGA3dSubSurfaceId;
+
+typedef
+#include "vmware_pack_begin.h"
struct {
uint32 width;
uint32 height;
@@ -1582,13 +1660,18 @@ typedef enum {
SVGA_OTABLE_DX9_MAX = 5,
SVGA_OTABLE_DXCONTEXT = 5,
- SVGA_OTABLE_MAX = 6
-} SVGAOTableType;
+ SVGA_OTABLE_DX_MAX = 6,
-/*
- * Deprecated.
- */
-#define SVGA_OTABLE_COUNT 4
+ SVGA_OTABLE_RESERVED1 = 6,
+ SVGA_OTABLE_RESERVED2 = 7,
+
+ /*
+ * Additions to this table need to be tied to HW-version features and
+ * checkpointed accordingly.
+ */
+ SVGA_OTABLE_DEVEL_MAX = 8,
+ SVGA_OTABLE_MAX = 8
+} SVGAOTableType;
typedef enum {
SVGA_COTABLE_MIN = 0,
@@ -1605,7 +1688,7 @@ typedef enum {
SVGA_COTABLE_DXSHADER = 10,
SVGA_COTABLE_DX10_MAX = 11,
SVGA_COTABLE_UAVIEW = 11,
- SVGA_COTABLE_MAX
+ SVGA_COTABLE_MAX = 12,
} SVGACOTableType;
/*
@@ -1626,8 +1709,37 @@ typedef enum SVGAMobFormat {
SVGA3D_MOBFMT_PREDX_MAX = 7,
SVGA3D_MOBFMT_EMPTY = 7,
SVGA3D_MOBFMT_MAX,
+
+ /*
+ * This isn't actually used by the guest, but is a mob-format used
+ * internally by the SVGA device (and is therefore not binary compatible).
+ */
+ SVGA3D_MOBFMT_HB,
} SVGAMobFormat;
#define SVGA3D_MOB_EMPTY_BASE 1
+/*
+ * Multisample pattern types.
+ */
+
+typedef enum SVGA3dMSPattern {
+ SVGA3D_MS_PATTERN_NONE = 0,
+ SVGA3D_MS_PATTERN_MIN = 0,
+ SVGA3D_MS_PATTERN_STANDARD = 1,
+ SVGA3D_MS_PATTERN_CENTER = 2,
+ SVGA3D_MS_PATTERN_MAX = 3,
+} SVGA3dMSPattern;
+
+/*
+ * Precision settings for each sample.
+ */
+
+typedef enum SVGA3dMSQualityLevel {
+ SVGA3D_MS_QUALITY_NONE = 0,
+ SVGA3D_MS_QUALITY_MIN = 0,
+ SVGA3D_MS_QUALITY_FULL = 1,
+ SVGA3D_MS_QUALITY_MAX = 2,
+} SVGA3dMSQualityLevel;
+
#endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
index 884b1d1fb85f..acb41e28e46f 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
index faf6d9b2b891..e5385146e7fc 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index 88e72bf9a534..056f54b35d73 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -63,16 +64,26 @@ typedef uint32 SVGAMobId;
#define SVGA_MAX_BITS_PER_PIXEL 32
#define SVGA_MAX_DEPTH 24
#define SVGA_MAX_DISPLAYS 10
+#define SVGA_MAX_SCREEN_SIZE 8192
+#define SVGA_SCREEN_ROOT_LIMIT (SVGA_MAX_SCREEN_SIZE * SVGA_MAX_DISPLAYS)
+
/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
* cursor bypass mode. This is still supported, but no new guest
* drivers should use it.
*/
-#define SVGA_CURSOR_ON_HIDE 0x0 /* Must be 0 to maintain backward compatibility */
-#define SVGA_CURSOR_ON_SHOW 0x1 /* Must be 1 to maintain backward compatibility */
-#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 /* Remove the cursor from the framebuffer because we need to see what's under it */
-#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
+#define SVGA_CURSOR_ON_HIDE 0x0
+#define SVGA_CURSOR_ON_SHOW 0x1
+
+/*
+ * Remove the cursor from the framebuffer
+ * because we need to see what's under it
+ */
+#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2
+
+/* Put the cursor back in the framebuffer so the user can see it */
+#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3
/*
* The maximum framebuffer size that can traced for guests unless the
@@ -101,7 +112,10 @@ typedef uint32 SVGAMobId;
#define SVGA_VERSION_0 0
#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0)
-/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
+/*
+ * "Invalid" value for all SVGA IDs.
+ * (Version ID, screen object ID, surface ID...)
+ */
#define SVGA_ID_INVALID 0xFFFFFFFF
/* Port offsets, relative to BAR0 */
@@ -154,7 +168,7 @@ enum {
SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */
SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
- SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */
+ SVGA_REG_GUEST_ID = 23, /* (Deprecated) */
SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
@@ -186,7 +200,14 @@ enum {
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
- SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
+
+ /*
+ * Max primary memory.
+ * See SVGA_CAP_NO_BB_RESTRICTION.
+ */
+ SVGA_REG_MAX_PRIMARY_MEM = 50,
+ SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,
+
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
SVGA_REG_CMD_PREPEND_LOW = 53,
@@ -194,7 +215,10 @@ enum {
SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
SVGA_REG_MOB_MAX_SIZE = 57,
- SVGA_REG_TOP = 58, /* Must be 1 more than the last register */
+ SVGA_REG_BLANK_SCREEN_TARGETS = 58,
+ SVGA_REG_CAP2 = 59,
+ SVGA_REG_DEVEL_CAP = 60,
+ SVGA_REG_TOP = 61, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -392,6 +416,7 @@ typedef enum {
SVGA_CB_CONTEXT_0 = 0x0,
SVGA_CB_CONTEXT_1 = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */
SVGA_CB_CONTEXT_MAX = 0x2,
+ SVGA_CB_CONTEXT_HP_MAX = 0x2,
} SVGACBContext;
@@ -448,6 +473,18 @@ typedef enum {
* due to an error. No IRQ is raised.
*/
SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
+
+ /*
+ * Written by the host when the host finished a
+ * SVGA_DC_CMD_ASYNC_STOP_QUEUE request for this command buffer
+ * queue. The offset of the first byte not processed is stored in
+ * the errorOffset field of the command buffer header. All guest
+ * visible side effects of commands till that point are guaranteed
+ * to be finished before this is written. The
+ * SVGA_IRQFLAG_COMMAND_BUFFER IRQ is raised as long as the
+ * SVGA_CB_FLAG_NO_IRQ is not set.
+ */
+ SVGA_CB_STATUS_PARTIAL_COMPLETE = 7,
} SVGACBStatus;
typedef enum {
@@ -460,8 +497,8 @@ typedef enum {
typedef
#include "vmware_pack_begin.h"
struct {
- volatile SVGACBStatus status;
- volatile uint32 errorOffset;
+ volatile SVGACBStatus status; /* Modified by device. */
+ volatile uint32 errorOffset; /* Modified by device. */
uint64 id;
SVGACBFlags flags;
uint32 length;
@@ -472,7 +509,9 @@ struct {
uint32 mobOffset;
} mob;
} ptr;
- uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */
+ uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise,
+ * modified by device.
+ */
uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
uint32 mustBeZero[6];
}
@@ -483,20 +522,26 @@ typedef enum {
SVGA_DC_CMD_NOP = 0,
SVGA_DC_CMD_START_STOP_CONTEXT = 1,
SVGA_DC_CMD_PREEMPT = 2,
- SVGA_DC_CMD_MAX = 3,
- SVGA_DC_CMD_FORCE_UINT = MAX_UINT32,
+ SVGA_DC_CMD_START_QUEUE = 3, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+ SVGA_DC_CMD_ASYNC_STOP_QUEUE = 4, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+ SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE = 5, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+ SVGA_DC_CMD_MAX = 6,
} SVGADeviceContextCmdId;
-typedef struct {
+/*
+ * Starts or stops both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1.
+ */
+
+typedef struct SVGADCCmdStartStop {
uint32 enable;
- SVGACBContext context;
+ SVGACBContext context; /* Must be zero */
} SVGADCCmdStartStop;
/*
* SVGADCCmdPreempt --
*
* This command allows the guest to request that all command buffers
- * on the specified context be preempted that can be. After execution
+ * on SVGA_CB_CONTEXT_0 be preempted that can be. After execution
* of this command all command buffers that were preempted will
* already have SVGA_CB_STATUS_PREEMPTED written into the status
* field. The device might still be processing a command buffer,
@@ -506,12 +551,69 @@ typedef struct {
* command buffer header set to zero.
*/
-typedef struct {
- SVGACBContext context;
+typedef struct SVGADCCmdPreempt {
+ SVGACBContext context; /* Must be zero */
uint32 ignoreIDZero;
} SVGADCCmdPreempt;
/*
+ * Starts the requested command buffer processing queue. Valid only
+ * if the SVGA_CAP_HP_CMD_QUEUE cap is set.
+ *
+ * For a command queue to be considered runnable it must be enabled
+ * and any corresponding higher priority queues must also be enabled.
+ * For example in order for command buffers to be processed on
+ * SVGA_CB_CONTEXT_0 both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1 must
+ * be enabled. But for commands to be runnable on SVGA_CB_CONTEXT_1
+ * only that queue must be enabled.
+ */
+
+typedef struct SVGADCCmdStartQueue {
+ SVGACBContext context;
+} SVGADCCmdStartQueue;
+
+/*
+ * Requests the SVGA device to stop processing the requested command
+ * buffer queue as soon as possible. The guest knows the stop has
+ * completed when one of the following happens.
+ *
+ * 1) A command buffer status of SVGA_CB_STATUS_PARTIAL_COMPLETE is returned
+ * 2) A command buffer error is encountered with would stop the queue
+ * regardless of the async stop request.
+ * 3) All command buffers that have been submitted complete successfully.
+ * 4) The stop completes synchronously if no command buffers are
+ * active on the queue when it is issued.
+ *
+ * If the command queue is not in a runnable state there is no
+ * guarentee this async stop will finish. For instance if the high
+ * priority queue is not enabled and a stop is requested on the low
+ * priority queue, the high priority queue must be reenabled to
+ * guarantee that the async stop will finish.
+ *
+ * This command along with SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE can be used
+ * to implement mid command buffer preemption.
+ *
+ * Valid only if the SVGA_CAP_HP_CMD_QUEUE cap is set.
+ */
+
+typedef struct SVGADCCmdAsyncStopQueue {
+ SVGACBContext context;
+} SVGADCCmdAsyncStopQueue;
+
+/*
+ * Requests the SVGA device to throw away any full command buffers on
+ * the requested command queue that have not been started. For a
+ * driver to know which command buffers were thrown away a driver
+ * should only issue this command when the queue is stopped, for
+ * whatever reason.
+ */
+
+typedef struct SVGADCCmdEmptyQueue {
+ SVGACBContext context;
+} SVGADCCmdEmptyQueue;
+
+
+/*
* SVGAGMRImageFormat --
*
* This is a packed representation of the source 2D image format
@@ -536,7 +638,7 @@ typedef struct SVGAGMRImageFormat {
struct {
uint32 bitsPerPixel : 8;
uint32 colorDepth : 8;
- uint32 reserved : 16; /* Must be zero */
+ uint32 reserved : 16; /* Must be zero */
};
uint32 value;
@@ -672,8 +774,36 @@ SVGASignedPoint;
* SVGA_CAP_GBOBJECTS --
* Enable guest-backed objects and surfaces.
*
- * SVGA_CAP_CMD_BUFFERS_3 --
- * Enable support for command buffers in a mob.
+ * SVGA_CAP_DX --
+ * Enable support for DX commands, and command buffers in a mob.
+ *
+ * SVGA_CAP_HP_CMD_QUEUE --
+ * Enable support for the high priority command queue, and the
+ * ScreenCopy command.
+ *
+ * SVGA_CAP_NO_BB_RESTRICTION --
+ * Allow ScreenTargets to be defined without regard to the 32-bpp
+ * bounding-box memory restrictions. ie:
+ *
+ * The summed memory usage of all screens (assuming they were defined as
+ * 32-bpp) must always be less than the value of the
+ * SVGA_REG_MAX_PRIMARY_MEM register.
+ *
+ * If this cap is not present, the 32-bpp bounding box around all screens
+ * must additionally be under the value of the SVGA_REG_MAX_PRIMARY_MEM
+ * register.
+ *
+ * If the cap is present, the bounding box restriction is lifted (and only
+ * the screen-sum limit applies).
+ *
+ * (Note that this is a slight lie... there is still a sanity limit on any
+ * dimension of the topology to be less than SVGA_SCREEN_ROOT_LIMIT, even
+ * when SVGA_CAP_NO_BB_RESTRICTION is present, but that should be
+ * large enough to express any possible topology without holes between
+ * monitors.)
+ *
+ * SVGA_CAP_CAP2_REGISTER --
+ * If this cap is present, the SVGA_REG_CAP2 register is supported.
*/
#define SVGA_CAP_NONE 0x00000000
@@ -699,8 +829,30 @@ SVGASignedPoint;
#define SVGA_CAP_GBOBJECTS 0x08000000
#define SVGA_CAP_DX 0x10000000
#define SVGA_CAP_HP_CMD_QUEUE 0x20000000
+#define SVGA_CAP_NO_BB_RESTRICTION 0x40000000
+#define SVGA_CAP_CAP2_REGISTER 0x80000000
-#define SVGA_CAP_CMD_RESERVED 0x80000000
+/*
+ * The SVGA_REG_CAP2 register is an additional set of SVGA capability bits.
+ *
+ * SVGA_CAP2_GROW_OTABLE --
+ * Allow the GrowOTable/DXGrowCOTable commands.
+ *
+ * SVGA_CAP2_INTRA_SURFACE_COPY --
+ * Allow the IntraSurfaceCopy command.
+ *
+ * SVGA_CAP2_DX2 --
+ * Allow the DefineGBSurface_v3, WholeSurfaceCopy.
+ *
+ * SVGA_CAP2_RESERVED --
+ * Reserve the last bit for extending the SVGA capabilities to some
+ * future mechanisms.
+ */
+#define SVGA_CAP2_NONE 0x00000000
+#define SVGA_CAP2_GROW_OTABLE 0x00000001
+#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002
+#define SVGA_CAP2_DX2 0x00000004
+#define SVGA_CAP2_RESERVED 0x80000000
/*
@@ -722,7 +874,8 @@ typedef enum {
SVGABackdoorCapDeviceCaps = 0,
SVGABackdoorCapFifoCaps = 1,
SVGABackdoorCap3dHWVersion = 2,
- SVGABackdoorCapMax = 3,
+ SVGABackdoorCapDeviceCaps2 = 3,
+ SVGABackdoorCapMax = 4,
} SVGABackdoorCapType;
@@ -1914,16 +2067,6 @@ SVGAFifoCmdRemapGMR2;
#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */
-/*
- * To simplify autoDetect display configuration, support a minimum of
- * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated:
- * numDisplays = 2
- * maxWidth = numDisplay * 1920 = 3840
- * maxHeight = rotated width of single monitor = 1920
- * vramSize = maxWidth * maxHeight * 4 = 29491200
- */
-#define SVGA_VRAM_SIZE_AUTODETECT (32 * 1024 * 1024)
-
#if defined(VMX86_SERVER)
#define SVGA_VRAM_SIZE (4 * 1024 * 1024)
#define SVGA_VRAM_SIZE_3D (64 * 1024 * 1024)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
index 2e8ba4df8de9..350bbc6fab02 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2015 VMware, Inc. All rights reserved.
+ * Copyright 2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -40,7 +41,10 @@ typedef uint64 PPN64;
typedef bool Bool;
+#define MAX_UINT64 U64_MAX
#define MAX_UINT32 U32_MAX
#define MAX_UINT16 U16_MAX
+#define CONST64U(x) x##ULL
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
index 7e7b0ce34aa2..75308bd0d970 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
@@ -1,25 +1,2 @@
-/**********************************************************
- * Copyright 2015 VMware, Inc. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/compiler.h>
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
index e2e440ed3d44..e93d6f28b68c 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
@@ -1,25 +1,2 @@
-/**********************************************************
- * Copyright 2015 VMware, Inc. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
__packed
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 55d32ae43aa4..0b9ee7fb45d6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index bf2e77ad5a20..6a2a9d69043b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index e8c94b19db7b..fc6673cde289 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2017 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2017 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
new file mode 100644
index 000000000000..2dda03345761
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -0,0 +1,1123 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
+#include "drm/ttm/ttm_object.h"
+
+
+/**
+ * struct vmw_user_buffer_object - User-space-visible buffer object
+ *
+ * @prime: The prime object providing user visibility.
+ * @vbo: The struct vmw_buffer_object
+ */
+struct vmw_user_buffer_object {
+ struct ttm_prime_object prime;
+ struct vmw_buffer_object vbo;
+};
+
+
+/**
+ * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the
+ * TTM buffer object.
+ */
+static struct vmw_buffer_object *
+vmw_buffer_object(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vmw_buffer_object, base);
+}
+
+
+/**
+ * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_user_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
+ * object.
+ */
+static struct vmw_user_buffer_object *
+vmw_user_buffer_object(struct ttm_buffer_object *bo)
+{
+ struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+ return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
+}
+
+
+/**
+ * vmw_bo_pin_in_placement - Validate a buffer to placement.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @placement: The placement to pin it.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ struct ttm_placement *placement,
+ bool interruptible)
+{
+ struct ttm_operation_ctx ctx = {interruptible, false };
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+ uint32_t new_flags;
+
+ ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, placement, &ctx);
+
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_write_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+
+/**
+ * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @pin: Pin buffer if true.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible)
+{
+ struct ttm_operation_ctx ctx = {interruptible, false };
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+ uint32_t new_flags;
+
+ ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ if (buf->pin_count > 0) {
+ ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ goto out_unreserve;
+ }
+
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+ if (likely(ret == 0) || ret == -ERESTARTSYS)
+ goto out_unreserve;
+
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+
+out_unreserve:
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
+
+ ttm_bo_unreserve(bo);
+err:
+ ttm_write_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+
+/**
+ * vmw_bo_pin_in_vram - Move a buffer to vram.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible)
+{
+ return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
+ interruptible);
+}
+
+
+/**
+ * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to pin.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible)
+{
+ struct ttm_operation_ctx ctx = {interruptible, false };
+ struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_placement placement;
+ struct ttm_place place;
+ int ret = 0;
+ uint32_t new_flags;
+
+ place = vmw_vram_placement.placement[0];
+ place.lpfn = bo->num_pages;
+ placement.num_placement = 1;
+ placement.placement = &place;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &place;
+
+ ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+ if (unlikely(ret != 0))
+ goto err_unlock;
+
+ /*
+ * Is this buffer already in vram but not at the start of it?
+ * In that case, evict it first because TTM isn't good at handling
+ * that situation.
+ */
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.start < bo->num_pages &&
+ bo->mem.start > 0 &&
+ buf->pin_count == 0) {
+ ctx.interruptible = false;
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+ }
+
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(&placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, &placement, &ctx);
+
+ /* For some reason we didn't end up at the start of vram */
+ WARN_ON(ret == 0 && bo->offset != 0);
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
+
+ ttm_bo_unreserve(bo);
+err_unlock:
+ ttm_write_unlock(&dev_priv->reservation_sem);
+
+ return ret;
+}
+
+
+/**
+ * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
+ *
+ * This function takes the reservation_sem in write mode.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to unpin.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_unpin(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible)
+{
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ vmw_bo_pin_reserved(buf, false);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+/**
+ * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
+ * of a buffer.
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
+ * @ptr: SVGAGuestPtr returning the result.
+ */
+void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
+ SVGAGuestPtr *ptr)
+{
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
+ ptr->offset = bo->offset;
+ } else {
+ ptr->gmrId = bo->mem.start;
+ ptr->offset = 0;
+ }
+}
+
+
+/**
+ * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
+ *
+ * @vbo: The buffer object. Must be reserved.
+ * @pin: Whether to pin or unpin.
+ *
+ */
+void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
+{
+ struct ttm_operation_ctx ctx = { false, true };
+ struct ttm_place pl;
+ struct ttm_placement placement;
+ struct ttm_buffer_object *bo = &vbo->base;
+ uint32_t old_mem_type = bo->mem.mem_type;
+ int ret;
+
+ lockdep_assert_held(&bo->resv->lock.base);
+
+ if (pin) {
+ if (vbo->pin_count++ > 0)
+ return;
+ } else {
+ WARN_ON(vbo->pin_count <= 0);
+ if (--vbo->pin_count > 0)
+ return;
+ }
+
+ pl.fpfn = 0;
+ pl.lpfn = 0;
+ pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+ | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+ if (pin)
+ pl.flags |= TTM_PL_FLAG_NO_EVICT;
+
+ memset(&placement, 0, sizeof(placement));
+ placement.num_placement = 1;
+ placement.placement = &pl;
+
+ ret = ttm_bo_validate(bo, &placement, &ctx);
+
+ BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+}
+
+
+/**
+ * vmw_bo_map_and_cache - Map a buffer object and cache the map
+ *
+ * @vbo: The buffer object to map
+ * Return: A kernel virtual address or NULL if mapping failed.
+ *
+ * This function maps a buffer object into the kernel address space, or
+ * returns the virtual kernel address of an already existing map. The virtual
+ * address remains valid as long as the buffer object is pinned or reserved.
+ * The cached map is torn down on either
+ * 1) Buffer object move
+ * 2) Buffer object swapout
+ * 3) Buffer object destruction
+ *
+ */
+void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
+{
+ struct ttm_buffer_object *bo = &vbo->base;
+ bool not_used;
+ void *virtual;
+ int ret;
+
+ virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
+ if (virtual)
+ return virtual;
+
+ ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+ if (ret)
+ DRM_ERROR("Buffer object map failed: %d.\n", ret);
+
+ return ttm_kmap_obj_virtual(&vbo->map, &not_used);
+}
+
+
+/**
+ * vmw_bo_unmap - Tear down a cached buffer object map.
+ *
+ * @vbo: The buffer object whose map we are tearing down.
+ *
+ * This function tears down a cached map set up using
+ * vmw_buffer_object_map_and_cache().
+ */
+void vmw_bo_unmap(struct vmw_buffer_object *vbo)
+{
+ if (vbo->map.bo == NULL)
+ return;
+
+ ttm_bo_kunmap(&vbo->map);
+}
+
+
+/**
+ * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @size: The requested buffer size.
+ * @user: Whether this is an ordinary dma buffer or a user dma buffer.
+ */
+static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
+ bool user)
+{
+ static size_t struct_size, user_struct_size;
+ size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
+
+ if (unlikely(struct_size == 0)) {
+ size_t backend_size = ttm_round_pot(vmw_tt_size);
+
+ struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_buffer_object));
+ user_struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_user_buffer_object));
+ }
+
+ if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+ page_array_size +=
+ ttm_round_pot(num_pages * sizeof(dma_addr_t));
+
+ return ((user) ? user_struct_size : struct_size) +
+ page_array_size;
+}
+
+
+/**
+ * vmw_bo_bo_free - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+void vmw_bo_bo_free(struct ttm_buffer_object *bo)
+{
+ struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+ vmw_bo_unmap(vmw_bo);
+ kfree(vmw_bo);
+}
+
+
+/**
+ * vmw_user_bo_destroy - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
+{
+ struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
+
+ vmw_bo_unmap(&vmw_user_bo->vbo);
+ ttm_prime_object_kfree(vmw_user_bo, prime);
+}
+
+
+/**
+ * vmw_bo_init - Initialize a vmw buffer object
+ *
+ * @dev_priv: Pointer to the device private struct
+ * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
+ * @size: Buffer object size in bytes.
+ * @placement: Initial placement.
+ * @interruptible: Whether waits should be performed interruptible.
+ * @bo_free: The buffer object destructor.
+ * Returns: Zero on success, negative error code on error.
+ *
+ * Note that on error, the code will free the buffer object.
+ */
+int vmw_bo_init(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible,
+ void (*bo_free)(struct ttm_buffer_object *bo))
+{
+ struct ttm_bo_device *bdev = &dev_priv->bdev;
+ size_t acc_size;
+ int ret;
+ bool user = (bo_free == &vmw_user_bo_destroy);
+
+ WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
+
+ acc_size = vmw_bo_acc_size(dev_priv, size, user);
+ memset(vmw_bo, 0, sizeof(*vmw_bo));
+
+ INIT_LIST_HEAD(&vmw_bo->res_list);
+
+ ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+ ttm_bo_type_device, placement,
+ 0, interruptible, acc_size,
+ NULL, NULL, bo_free);
+ return ret;
+}
+
+
+/**
+ * vmw_user_bo_release - TTM reference base object release callback for
+ * vmw user buffer objects
+ *
+ * @p_base: The TTM base object pointer about to be unreferenced.
+ *
+ * Clears the TTM base object pointer and drops the reference the
+ * base object has on the underlying struct vmw_buffer_object.
+ */
+static void vmw_user_bo_release(struct ttm_base_object **p_base)
+{
+ struct vmw_user_buffer_object *vmw_user_bo;
+ struct ttm_base_object *base = *p_base;
+ struct ttm_buffer_object *bo;
+
+ *p_base = NULL;
+
+ if (unlikely(base == NULL))
+ return;
+
+ vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+ prime.base);
+ bo = &vmw_user_bo->vbo.base;
+ ttm_bo_unref(&bo);
+}
+
+
+/**
+ * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
+ * for vmw user buffer objects
+ *
+ * @base: Pointer to the TTM base object
+ * @ref_type: Reference type of the reference reaching zero.
+ *
+ * Called when user-space drops its last synccpu reference on the buffer
+ * object, Either explicitly or as part of a cleanup file close.
+ */
+static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
+ enum ttm_ref_type ref_type)
+{
+ struct vmw_user_buffer_object *user_bo;
+
+ user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
+
+ switch (ref_type) {
+ case TTM_REF_SYNCCPU_WRITE:
+ ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ break;
+ default:
+ WARN_ONCE(true, "Undefined buffer object reference release.\n");
+ }
+}
+
+
+/**
+ * vmw_user_bo_alloc - Allocate a user buffer object
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the buffer object.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
+ * should be assigned.
+ * Return: Zero on success, negative error code on error.
+ */
+int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_buffer_object **p_vbo,
+ struct ttm_base_object **p_base)
+{
+ struct vmw_user_buffer_object *user_bo;
+ struct ttm_buffer_object *tmp;
+ int ret;
+
+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+ if (unlikely(!user_bo)) {
+ DRM_ERROR("Failed to allocate a buffer.\n");
+ return -ENOMEM;
+ }
+
+ ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
+ (dev_priv->has_mob) ?
+ &vmw_sys_placement :
+ &vmw_vram_sys_placement, true,
+ &vmw_user_bo_destroy);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = ttm_bo_reference(&user_bo->vbo.base);
+ ret = ttm_prime_object_init(tfile,
+ size,
+ &user_bo->prime,
+ shareable,
+ ttm_buffer_type,
+ &vmw_user_bo_release,
+ &vmw_user_bo_ref_obj_release);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unref(&tmp);
+ goto out_no_base_object;
+ }
+
+ *p_vbo = &user_bo->vbo;
+ if (p_base) {
+ *p_base = &user_bo->prime.base;
+ kref_get(&(*p_base)->refcount);
+ }
+ *handle = user_bo->prime.base.hash.key;
+
+out_no_base_object:
+ return ret;
+}
+
+
+/**
+ * vmw_user_bo_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
+ */
+int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile)
+{
+ struct vmw_user_buffer_object *vmw_user_bo;
+
+ if (unlikely(bo->destroy != vmw_user_bo_destroy))
+ return -EPERM;
+
+ vmw_user_bo = vmw_user_buffer_object(bo);
+
+ /* Check that the caller has opened the object. */
+ if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
+ return 0;
+
+ DRM_ERROR("Could not grant buffer access.\n");
+ return -EPERM;
+}
+
+
+/**
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ * Return: Zero on success, Negative error code on error. In particular,
+ * -EBUSY will be returned if a dontblock operation is requested and the
+ * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
+ * interrupted by a signal.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ struct ttm_buffer_object *bo = &user_bo->vbo.base;
+ bool existed;
+ int ret;
+
+ if (flags & drm_vmw_synccpu_allow_cs) {
+ bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
+ long lret;
+
+ lret = reservation_object_wait_timeout_rcu
+ (bo->resv, true, true,
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+ if (!lret)
+ return -EBUSY;
+ else if (lret < 0)
+ return lret;
+ return 0;
+ }
+
+ ret = ttm_bo_synccpu_write_grab
+ (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_SYNCCPU_WRITE, &existed, false);
+ if (ret != 0 || existed)
+ ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+
+ return ret;
+}
+
+/**
+ * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_bo_synccpu_release(uint32_t handle,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ if (!(flags & drm_vmw_synccpu_allow_cs))
+ return ttm_ref_object_base_unref(tfile, handle,
+ TTM_REF_SYNCCPU_WRITE);
+
+ return 0;
+}
+
+
+/**
+ * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_synccpu_arg *arg =
+ (struct drm_vmw_synccpu_arg *) data;
+ struct vmw_buffer_object *vbo;
+ struct vmw_user_buffer_object *user_bo;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_base_object *buffer_base;
+ int ret;
+
+ if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+ || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+ drm_vmw_synccpu_dontblock |
+ drm_vmw_synccpu_allow_cs)) != 0) {
+ DRM_ERROR("Illegal synccpu flags.\n");
+ return -EINVAL;
+ }
+
+ switch (arg->op) {
+ case drm_vmw_synccpu_grab:
+ ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
+ &buffer_base);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_bo = container_of(vbo, struct vmw_user_buffer_object,
+ vbo);
+ ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
+ vmw_bo_unreference(&vbo);
+ ttm_base_object_unref(&buffer_base);
+ if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+ ret != -EBUSY)) {
+ DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ case drm_vmw_synccpu_release:
+ ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
+ arg->flags);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ default:
+ DRM_ERROR("Invalid synccpu operation.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/**
+ * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
+ * allocation functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and allocates a
+ * struct vmw_user_buffer_object bo.
+ */
+int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ union drm_vmw_alloc_dmabuf_arg *arg =
+ (union drm_vmw_alloc_dmabuf_arg *)data;
+ struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+ struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+ struct vmw_buffer_object *vbo;
+ uint32_t handle;
+ int ret;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ req->size, false, &handle, &vbo,
+ NULL);
+ if (unlikely(ret != 0))
+ goto out_no_bo;
+
+ rep->handle = handle;
+ rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
+ rep->cur_gmr_id = handle;
+ rep->cur_gmr_offset = 0;
+
+ vmw_bo_unreference(&vbo);
+
+out_no_bo:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+
+ return ret;
+}
+
+
+/**
+ * vmw_bo_unref_ioctl - Generic handle close ioctl.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and closes a
+ * handle to a TTM base object, optionally freeing the object.
+ */
+int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_unref_dmabuf_arg *arg =
+ (struct drm_vmw_unref_dmabuf_arg *)data;
+
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ arg->handle,
+ TTM_REF_USAGE);
+}
+
+
+/**
+ * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
+ *
+ * @tfile: The TTM object file the handle is registered with.
+ * @handle: The user buffer object handle
+ * @out: Pointer to a where a pointer to the embedded
+ * struct vmw_buffer_object should be placed.
+ * @p_base: Pointer to where a pointer to the TTM base object should be
+ * placed, or NULL if no such pointer is required.
+ * Return: Zero on success, Negative error code on error.
+ *
+ * Both the output base object pointer and the vmw buffer object pointer
+ * will be refcounted.
+ */
+int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+ uint32_t handle, struct vmw_buffer_object **out,
+ struct ttm_base_object **p_base)
+{
+ struct vmw_user_buffer_object *vmw_user_bo;
+ struct ttm_base_object *base;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return -ESRCH;
+ }
+
+ if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
+ ttm_base_object_unref(&base);
+ DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return -EINVAL;
+ }
+
+ vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+ prime.base);
+ (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
+ if (p_base)
+ *p_base = base;
+ else
+ ttm_base_object_unref(&base);
+ *out = &vmw_user_bo->vbo;
+
+ return 0;
+}
+
+
+/**
+ * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
+ *
+ * @tfile: The TTM object file to register the handle with.
+ * @vbo: The embedded vmw buffer object.
+ * @handle: Pointer to where the new handle should be placed.
+ * Return: Zero on success, Negative error code on error.
+ */
+int vmw_user_bo_reference(struct ttm_object_file *tfile,
+ struct vmw_buffer_object *vbo,
+ uint32_t *handle)
+{
+ struct vmw_user_buffer_object *user_bo;
+
+ if (vbo->base.destroy != vmw_user_bo_destroy)
+ return -EINVAL;
+
+ user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
+
+ *handle = user_bo->prime.base.hash.key;
+ return ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_USAGE, NULL, false);
+}
+
+
+/**
+ * vmw_bo_fence_single - Utility function to fence a single TTM buffer
+ * object without unreserving it.
+ *
+ * @bo: Pointer to the struct ttm_buffer_object to fence.
+ * @fence: Pointer to the fence. If NULL, this function will
+ * insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ struct vmw_private *dev_priv =
+ container_of(bdev, struct vmw_private, bdev);
+
+ if (fence == NULL) {
+ vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ reservation_object_add_excl_fence(bo->resv, &fence->base);
+ dma_fence_put(&fence->base);
+ } else
+ reservation_object_add_excl_fence(bo->resv, &fence->base);
+}
+
+
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_bo_alloc ioctl, except
+ * that the arguments have a different format.
+ */
+int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_buffer_object *vbo;
+ int ret;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ args->size, false, &args->handle,
+ &vbo, NULL);
+ if (unlikely(ret != 0))
+ goto out_no_bo;
+
+ vmw_bo_unreference(&vbo);
+out_no_bo:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+
+/**
+ * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * @offset: The address space offset returned.
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm dumb_map_offset functionality.
+ */
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_buffer_object *out_buf;
+ int ret;
+
+ ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
+ if (ret != 0)
+ return -EINVAL;
+
+ *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
+ vmw_bo_unreference(&out_buf);
+ return 0;
+}
+
+
+/**
+ * vmw_dumb_destroy - Destroy a dumb boffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm dumb_destroy functionality.
+ */
+int vmw_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ handle, TTM_REF_USAGE);
+}
+
+
+/**
+ * vmw_bo_swap_notify - swapout notify callback.
+ *
+ * @bo: The buffer object to be swapped out.
+ */
+void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
+{
+ /* Is @bo embedded in a struct vmw_buffer_object? */
+ if (bo->destroy != vmw_bo_bo_free &&
+ bo->destroy != vmw_user_bo_destroy)
+ return;
+
+ /* Kill any cached kernel maps before swapout */
+ vmw_bo_unmap(vmw_buffer_object(bo));
+}
+
+
+/**
+ * vmw_bo_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
+ *
+ * Detaches cached maps and device bindings that require that the
+ * buffer doesn't move.
+ */
+void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+ struct vmw_buffer_object *vbo;
+
+ if (mem == NULL)
+ return;
+
+ /* Make sure @bo is embedded in a struct vmw_buffer_object? */
+ if (bo->destroy != vmw_bo_bo_free &&
+ bo->destroy != vmw_user_bo_destroy)
+ return;
+
+ vbo = container_of(bo, struct vmw_buffer_object, base);
+
+ /*
+ * Kill any cached kernel maps before move to or from VRAM.
+ * With other types of moves, the underlying pages stay the same,
+ * and the map can be kept.
+ */
+ if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
+ vmw_bo_unmap(vbo);
+
+ /*
+ * If we're moving a backup MOB out of MOB placement, then make sure we
+ * read back all resource content first, and unbind the MOB from
+ * the resource.
+ */
+ if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
+ vmw_resource_unbind_list(vbo);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 9f45d5004cae..e7e4655d3f36 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 36c7b6c839c0..3b75af9bf85f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 3767ac335aca..7c3cb8efd11a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -38,7 +38,7 @@ struct vmw_user_context {
struct vmw_cmdbuf_res_manager *man;
struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
spinlock_t cotable_lock;
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
};
static void vmw_user_context_free(struct vmw_resource *res);
@@ -424,7 +424,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(bo, fence);
+ vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -648,7 +648,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(bo, fence);
+ vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
* specified in the parameter. 0 otherwise.
*/
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
- struct vmw_dma_buffer *mob)
+ struct vmw_buffer_object *mob)
{
struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res);
@@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
if (mob == NULL) {
if (uctx->dx_query_mob) {
uctx->dx_query_mob->dx_query_ctx = NULL;
- vmw_dmabuf_unreference(&uctx->dx_query_mob);
+ vmw_bo_unreference(&uctx->dx_query_mob);
uctx->dx_query_mob = NULL;
}
@@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
mob->dx_query_ctx = ctx_res;
if (!uctx->dx_query_mob)
- uctx->dx_query_mob = vmw_dmabuf_reference(mob);
+ uctx->dx_query_mob = vmw_bo_reference(mob);
return 0;
}
@@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
*
* @ctx_res: The context resource
*/
-struct vmw_dma_buffer *
+struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
struct vmw_user_context *uctx =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index cbf54ea7b4c0..1d45714e1d5a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -324,7 +324,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
mutex_unlock(&dev_priv->binding_mutex);
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- vmw_fence_single_bo(bo, fence);
+ vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -367,7 +367,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
}
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- vmw_fence_single_bo(&res->backup->base, fence);
+ vmw_bo_fence_single(&res->backup->base, fence);
vmw_fence_obj_unreference(&fence);
return 0;
@@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res);
- struct vmw_dma_buffer *buf, *old_buf = res->backup;
+ struct vmw_buffer_object *buf, *old_buf = res->backup;
struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
size_t old_size = res->backup_size;
size_t old_size_read_back = vcotbl->size_read_back;
@@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
if (!buf)
return -ENOMEM;
- ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
- true, vmw_dmabuf_bo_free);
+ ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
+ true, vmw_bo_bo_free);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret;
@@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
/* Let go of the old mob. */
list_del(&res->mob_head);
list_add_tail(&res->mob_head, &buf->res_list);
- vmw_dmabuf_unreference(&old_buf);
+ vmw_bo_unreference(&old_buf);
res->id = vcotbl->type;
return 0;
@@ -491,7 +491,7 @@ out_map_new:
ttm_bo_kunmap(&old_map);
out_wait:
ttm_bo_unreserve(bo);
- vmw_dmabuf_unreference(&buf);
+ vmw_bo_unreference(&buf);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
deleted file mode 100644
index d59d9dd16ebc..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ /dev/null
@@ -1,376 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include <drm/ttm/ttm_placement.h>
-
-#include <drm/drmP.h>
-#include "vmwgfx_drv.h"
-
-
-/**
- * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @placement: The placement to pin it.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- struct ttm_placement *placement,
- bool interruptible)
-{
- struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
- int ret;
- uint32_t new_flags;
-
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_execbuf_release_pinned_bo(dev_priv);
-
- ret = ttm_bo_reserve(bo, interruptible, false, NULL);
- if (unlikely(ret != 0))
- goto err;
-
- if (buf->pin_count > 0)
- ret = ttm_bo_mem_compat(placement, &bo->mem,
- &new_flags) == true ? 0 : -EINVAL;
- else
- ret = ttm_bo_validate(bo, placement, &ctx);
-
- if (!ret)
- vmw_bo_pin_reserved(buf, true);
-
- ttm_bo_unreserve(bo);
-
-err:
- ttm_write_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
- * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @pin: Pin buffer if true.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
- int ret;
- uint32_t new_flags;
-
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_execbuf_release_pinned_bo(dev_priv);
-
- ret = ttm_bo_reserve(bo, interruptible, false, NULL);
- if (unlikely(ret != 0))
- goto err;
-
- if (buf->pin_count > 0) {
- ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
- &new_flags) == true ? 0 : -EINVAL;
- goto out_unreserve;
- }
-
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
- if (likely(ret == 0) || ret == -ERESTARTSYS)
- goto out_unreserve;
-
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
-
-out_unreserve:
- if (!ret)
- vmw_bo_pin_reserved(buf, true);
-
- ttm_bo_unreserve(bo);
-err:
- ttm_write_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
- * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
- interruptible);
-}
-
-/**
- * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to pin.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
- struct ttm_placement placement;
- struct ttm_place place;
- int ret = 0;
- uint32_t new_flags;
-
- place = vmw_vram_placement.placement[0];
- place.lpfn = bo->num_pages;
- placement.num_placement = 1;
- placement.placement = &place;
- placement.num_busy_placement = 1;
- placement.busy_placement = &place;
-
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, NULL);
- if (unlikely(ret != 0))
- goto err_unlock;
-
- /*
- * Is this buffer already in vram but not at the start of it?
- * In that case, evict it first because TTM isn't good at handling
- * that situation.
- */
- if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start < bo->num_pages &&
- bo->mem.start > 0 &&
- buf->pin_count == 0) {
- ctx.interruptible = false;
- (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
- }
-
- if (buf->pin_count > 0)
- ret = ttm_bo_mem_compat(&placement, &bo->mem,
- &new_flags) == true ? 0 : -EINVAL;
- else
- ret = ttm_bo_validate(bo, &placement, &ctx);
-
- /* For some reason we didn't end up at the start of vram */
- WARN_ON(ret == 0 && bo->offset != 0);
- if (!ret)
- vmw_bo_pin_reserved(buf, true);
-
- ttm_bo_unreserve(bo);
-err_unlock:
- ttm_write_unlock(&dev_priv->reservation_sem);
-
- return ret;
-}
-
-/**
- * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
- *
- * This function takes the reservation_sem in write mode.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to unpin.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- struct ttm_buffer_object *bo = &buf->base;
- int ret;
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_reserve(bo, interruptible, false, NULL);
- if (unlikely(ret != 0))
- goto err;
-
- vmw_bo_pin_reserved(buf, false);
-
- ttm_bo_unreserve(bo);
-
-err:
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
- * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
- * of a buffer.
- *
- * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
- * @ptr: SVGAGuestPtr returning the result.
- */
-void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
- SVGAGuestPtr *ptr)
-{
- if (bo->mem.mem_type == TTM_PL_VRAM) {
- ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
- ptr->offset = bo->offset;
- } else {
- ptr->gmrId = bo->mem.start;
- ptr->offset = 0;
- }
-}
-
-
-/**
- * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
- *
- * @vbo: The buffer object. Must be reserved.
- * @pin: Whether to pin or unpin.
- *
- */
-void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
-{
- struct ttm_operation_ctx ctx = { false, true };
- struct ttm_place pl;
- struct ttm_placement placement;
- struct ttm_buffer_object *bo = &vbo->base;
- uint32_t old_mem_type = bo->mem.mem_type;
- int ret;
-
- lockdep_assert_held(&bo->resv->lock.base);
-
- if (pin) {
- if (vbo->pin_count++ > 0)
- return;
- } else {
- WARN_ON(vbo->pin_count <= 0);
- if (--vbo->pin_count > 0)
- return;
- }
-
- pl.fpfn = 0;
- pl.lpfn = 0;
- pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
- | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
- if (pin)
- pl.flags |= TTM_PL_FLAG_NO_EVICT;
-
- memset(&placement, 0, sizeof(placement));
- placement.num_placement = 1;
- placement.placement = &pl;
-
- ret = ttm_bo_validate(bo, &placement, &ctx);
-
- BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
-}
-
-
-/*
- * vmw_dma_buffer_unmap - Tear down a cached buffer object map.
- *
- * @vbo: The buffer object whose map we are tearing down.
- *
- * This function tears down a cached map set up using
- * vmw_dma_buffer_map_and_cache().
- */
-void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
-{
- if (vbo->map.bo == NULL)
- return;
-
- ttm_bo_kunmap(&vbo->map);
-}
-
-
-/*
- * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
- *
- * @vbo: The buffer object to map
- * Return: A kernel virtual address or NULL if mapping failed.
- *
- * This function maps a buffer object into the kernel address space, or
- * returns the virtual kernel address of an already existing map. The virtual
- * address remains valid as long as the buffer object is pinned or reserved.
- * The cached map is torn down on either
- * 1) Buffer object move
- * 2) Buffer object swapout
- * 3) Buffer object destruction
- *
- */
-void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
-{
- struct ttm_buffer_object *bo = &vbo->base;
- bool not_used;
- void *virtual;
- int ret;
-
- virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
- if (virtual)
- return virtual;
-
- ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
- if (ret)
- DRM_ERROR("Buffer object map failed: %d.\n", ret);
-
- return ttm_kmap_obj_virtual(&vbo->map, &not_used);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 09cc721160c4..bb6dbbe18835 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -137,6 +137,12 @@
#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
+ union drm_vmw_gb_surface_create_ext_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
+ union drm_vmw_gb_surface_reference_ext_arg)
/**
* The core DRM version of this macro doesn't account for
@@ -153,9 +159,9 @@
static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+ VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
@@ -219,11 +225,17 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
vmw_gb_surface_reference_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU,
- vmw_user_dmabuf_synccpu_ioctl,
+ vmw_user_bo_synccpu_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
vmw_extended_context_define_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
+ vmw_gb_surface_define_ext_ioctl,
+ DRM_AUTH | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
+ vmw_gb_surface_reference_ext_ioctl,
+ DRM_AUTH | DRM_RENDER_ALLOW),
};
static const struct pci_device_id vmw_pci_id_list[] = {
@@ -258,6 +270,15 @@ MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
+static void vmw_print_capabilities2(uint32_t capabilities2)
+{
+ DRM_INFO("Capabilities2:\n");
+ if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
+ DRM_INFO(" Grow oTable.\n");
+ if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
+ DRM_INFO(" IntraSurface copy.\n");
+}
+
static void vmw_print_capabilities(uint32_t capabilities)
{
DRM_INFO("Capabilities:\n");
@@ -321,7 +342,7 @@ static void vmw_print_capabilities(uint32_t capabilities)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
int ret;
- struct vmw_dma_buffer *vbo;
+ struct vmw_buffer_object *vbo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
@@ -335,9 +356,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (!vbo)
return -ENOMEM;
- ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
- &vmw_sys_ne_placement, false,
- &vmw_dmabuf_bo_free);
+ ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
+ &vmw_sys_ne_placement, false,
+ &vmw_bo_bo_free);
if (unlikely(ret != 0))
return ret;
@@ -358,7 +379,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n");
- vmw_dmabuf_unreference(&vbo);
+ vmw_bo_unreference(&vbo);
} else
dev_priv->dummy_query_bo = vbo;
@@ -460,7 +481,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
BUG_ON(dev_priv->pinned_bo != NULL);
- vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
+ vmw_bo_unreference(&dev_priv->dummy_query_bo);
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
@@ -644,6 +665,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
+ mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
@@ -683,6 +705,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+
+ if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
+ dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
+ }
+
+
ret = vmw_dma_select_mode(dev_priv);
if (unlikely(ret != 0)) {
DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
@@ -751,6 +779,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
vmw_print_capabilities(dev_priv->capabilities);
+ if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
+ vmw_print_capabilities2(dev_priv->capabilities2);
ret = vmw_dma_masks(dev_priv);
if (unlikely(ret != 0))
@@ -883,7 +913,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->has_mob) {
spin_lock(&dev_priv->cap_lock);
- vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
spin_unlock(&dev_priv->cap_lock);
}
@@ -898,9 +928,23 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
goto out_no_fifo;
+ if (dev_priv->has_dx) {
+ /*
+ * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
+ * support
+ */
+ if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+ SVGA3D_DEVCAP_SM41);
+ dev_priv->has_sm4_1 = vmw_read(dev_priv,
+ SVGA_REG_DEV_CAP);
+ }
+ }
+
DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
- DRM_INFO("Atomic: %s\n",
- (dev->driver->driver_features & DRIVER_ATOMIC) ? "yes" : "no");
+ DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
+ ? "yes." : "no.");
+ DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
VMWGFX_REPO, VMWGFX_GIT_VERSION);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5fcbe1620d50..1abe21758b0d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -43,10 +43,10 @@
#include <linux/sync_file.h>
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20180322"
+#define VMWGFX_DRIVER_DATE "20180704"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 14
-#define VMWGFX_DRIVER_PATCHLEVEL 1
+#define VMWGFX_DRIVER_MINOR 15
+#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -83,10 +83,10 @@
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
- bool gb_aware;
+ bool gb_aware; /* user-space is guest-backed aware */
};
-struct vmw_dma_buffer {
+struct vmw_buffer_object {
struct ttm_buffer_object base;
struct list_head res_list;
s32 pin_count;
@@ -120,7 +120,7 @@ struct vmw_resource {
unsigned long backup_size;
bool res_dirty; /* Protected by backup buffer reserved */
bool backup_dirty; /* Protected by backup buffer reserved */
- struct vmw_dma_buffer *backup;
+ struct vmw_buffer_object *backup;
unsigned long backup_offset;
unsigned long pin_count; /* Protected by resource reserved */
const struct vmw_res_func *func;
@@ -166,7 +166,7 @@ struct vmw_surface_offset;
struct vmw_surface {
struct vmw_resource res;
- uint32_t flags;
+ SVGA3dSurfaceAllFlags flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
struct drm_vmw_size base_size;
@@ -180,6 +180,8 @@ struct vmw_surface {
SVGA3dTextureFilter autogen_filter;
uint32_t multisample_count;
struct list_head view_list;
+ SVGA3dMSPattern multisample_pattern;
+ SVGA3dMSQualityLevel quality_level;
};
struct vmw_marker_queue {
@@ -304,7 +306,7 @@ struct vmw_sw_context{
uint32_t cmd_bounce_size;
struct list_head resource_list;
struct list_head ctx_resource_list; /* For contexts and cotables */
- struct vmw_dma_buffer *cur_query_bo;
+ struct vmw_buffer_object *cur_query_bo;
struct list_head res_relocations;
uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max];
@@ -315,7 +317,7 @@ struct vmw_sw_context{
bool staged_bindings_inuse;
struct list_head staged_cmd_res;
struct vmw_resource_val_node *dx_ctx_node;
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man;
};
@@ -386,6 +388,7 @@ struct vmw_private {
uint32_t initial_height;
u32 *mmio_virt;
uint32_t capabilities;
+ uint32_t capabilities2;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t max_mob_pages;
@@ -397,6 +400,7 @@ struct vmw_private {
spinlock_t cap_lock;
bool has_dx;
bool assume_16bpp;
+ bool has_sm4_1;
/*
* VGA registers.
@@ -412,6 +416,15 @@ struct vmw_private {
uint32_t num_displays;
/*
+ * Currently requested_layout_mutex is used to protect the gui
+ * positionig state in display unit. With that use case currently this
+ * mutex is only taken during layout ioctl and atomic check_modeset.
+ * Other display unit state can be protected with this mutex but that
+ * needs careful consideration.
+ */
+ struct mutex requested_layout_mutex;
+
+ /*
* Framebuffer info.
*/
@@ -513,8 +526,8 @@ struct vmw_private {
* are protected by the cmdbuf mutex.
*/
- struct vmw_dma_buffer *dummy_query_bo;
- struct vmw_dma_buffer *pinned_bo;
+ struct vmw_buffer_object *dummy_query_bo;
+ struct vmw_buffer_object *pinned_bo;
uint32_t query_cid;
uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
@@ -623,43 +636,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
struct vmw_surface **out_surf,
- struct vmw_dma_buffer **out_buf);
+ struct vmw_buffer_object **out_buf);
extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res);
-extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
-extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *vmw_bo,
- size_t size, struct ttm_placement *placement,
- bool interuptable,
- void (*bo_free) (struct ttm_buffer_object *bo));
-extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
- struct ttm_object_file *tfile);
-extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t size,
- bool shareable,
- uint32_t *handle,
- struct vmw_dma_buffer **p_dma_buf,
- struct ttm_base_object **p_base);
-extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
- struct vmw_dma_buffer *dma_buf,
- uint32_t *handle);
-extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
- uint32_t cur_validate_node);
-extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
-extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
- uint32_t id, struct vmw_dma_buffer **out,
- struct ttm_base_object **base);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -670,43 +653,70 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct vmw_resource **out);
extern void vmw_resource_unreserve(struct vmw_resource *res,
bool switch_backup,
- struct vmw_dma_buffer *new_backup,
+ struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset);
-extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo);
-extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
-extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
- struct vmw_fence_obj *fence);
+extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
-
+extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
/**
- * DMA buffer helper routines - vmwgfx_dmabuf.c
+ * Buffer object helper functions - vmwgfx_bo.c
*/
-extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- struct ttm_placement *placement,
+extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
+ struct vmw_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible);
+extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible);
+extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible);
+extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_buffer_object *bo,
bool interruptible);
-extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible);
-extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible);
-extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- bool interruptible);
-extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- bool interruptible);
+extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
+ struct vmw_buffer_object *bo,
+ bool interruptible);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
-extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
-extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo);
-extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo);
+extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
+extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_bo_init(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interuptable,
+ void (*bo_free)(struct ttm_buffer_object *bo));
+extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile);
+extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_buffer_object **p_dma_buf,
+ struct ttm_base_object **p_base);
+extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
+ struct vmw_buffer_object *dma_buf,
+ uint32_t *handle);
+extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+ uint32_t id, struct vmw_buffer_object **out,
+ struct ttm_base_object **base);
+extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence);
+extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
+extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
+extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -758,7 +768,7 @@ extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
/**
- * TTM buffer object driver - vmwgfx_buffer.c
+ * TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
extern const size_t vmw_tt_size;
@@ -1041,8 +1051,8 @@ vmw_context_binding_state(struct vmw_resource *ctx);
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
bool readback);
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
- struct vmw_dma_buffer *mob);
-extern struct vmw_dma_buffer *
+ struct vmw_buffer_object *mob);
+extern struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
@@ -1070,14 +1080,22 @@ extern int vmw_surface_validate(struct vmw_private *dev_priv,
struct vmw_surface *srf);
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
- uint32_t svga3d_flags,
+ SVGA3dSurfaceAllFlags svga3d_flags,
SVGA3dSurfaceFormat format,
bool for_scanout,
uint32_t num_mip_levels,
uint32_t multisample_count,
uint32_t array_size,
struct drm_vmw_size size,
+ SVGA3dMSPattern multisample_pattern,
+ SVGA3dMSQualityLevel quality_level,
struct vmw_surface **srf_out);
+extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv);
+extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv);
/*
* Shader management - vmwgfx_shader.c
@@ -1224,6 +1242,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
u32 w, u32 h,
struct vmw_diff_cpy *diff);
+/* Host messaging -vmwgfx_msg.c: */
+int vmw_host_get_guestinfo(const char *guest_info_param,
+ char *buffer, size_t *length);
+int vmw_host_log(const char *log);
+
/**
* Inline helper functions
*/
@@ -1243,9 +1266,9 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
return srf;
}
-static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
+static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
{
- struct vmw_dma_buffer *tmp_buf = *buf;
+ struct vmw_buffer_object *tmp_buf = *buf;
*buf = NULL;
if (tmp_buf != NULL) {
@@ -1255,7 +1278,8 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
}
}
-static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
+static inline struct vmw_buffer_object *
+vmw_bo_reference(struct vmw_buffer_object *buf)
{
if (ttm_bo_reference(&buf->base))
return buf;
@@ -1302,10 +1326,4 @@ static inline void vmw_mmio_write(u32 value, u32 *addr)
{
WRITE_ONCE(*addr, value);
}
-
-/**
- * Add vmw_msg module function
- */
-extern int vmw_host_log(const char *log);
-
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c9d5cc237124..1f134570b759 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -92,7 +92,7 @@ struct vmw_resource_val_node {
struct list_head head;
struct drm_hash_item hash;
struct vmw_resource *res;
- struct vmw_dma_buffer *new_backup;
+ struct vmw_buffer_object *new_backup;
struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
u32 first_usage : 1;
@@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
- struct vmw_dma_buffer **vmw_bo_p);
+ struct vmw_buffer_object **vmw_bo_p);
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_dma_buffer *vbo,
+ struct vmw_buffer_object *vbo,
bool validate_as_mob,
uint32_t *p_val_node);
/**
@@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
}
vmw_resource_unreserve(res, switch_backup, val->new_backup,
val->new_backup_offset);
- vmw_dmabuf_unreference(&val->new_backup);
+ vmw_bo_unreference(&val->new_backup);
}
}
@@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
}
if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob)
@@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
* submission is reached.
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_dma_buffer *vbo,
+ struct vmw_buffer_object *vbo,
bool validate_as_mob,
uint32_t *p_val_node)
{
@@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
return ret;
if (res->backup) {
- struct vmw_dma_buffer *vbo = res->backup;
+ struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list
(sw_context, vbo,
@@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
}
if (sw_context->dx_query_mob) {
- struct vmw_dma_buffer *expected_dx_query_mob;
+ struct vmw_buffer_object *expected_dx_query_mob;
expected_dx_query_mob =
vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
@@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
- struct vmw_dma_buffer *backup = res->backup;
+ struct vmw_buffer_object *backup = res->backup;
ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) {
@@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
/* Check if the resource switched backup buffer */
if (backup && res->backup && (backup != res->backup)) {
- struct vmw_dma_buffer *vbo = res->backup;
+ struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list
(sw_context, vbo,
@@ -821,7 +821,7 @@ out_no_reloc:
static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{
struct vmw_private *dev_priv = ctx_res->dev_priv;
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindAllQuery body;
@@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *new_query_bo,
+ struct vmw_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
@@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) {
vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
- vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+ vmw_bo_unreference(&dev_priv->pinned_bo);
}
if (!sw_context->needs_post_query_barrier) {
@@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true;
dev_priv->pinned_bo =
- vmw_dmabuf_reference(sw_context->cur_query_bo);
+ vmw_bo_reference(sw_context->cur_query_bo);
}
}
}
@@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
- struct vmw_dma_buffer **vmw_bo_p)
+ struct vmw_buffer_object **vmw_bo_p)
{
- struct vmw_dma_buffer *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo = NULL;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
- NULL);
+ ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
ret = -EINVAL;
@@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
return 0;
out_no_reloc:
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL;
return ret;
}
@@ -1343,15 +1342,14 @@ out_no_reloc:
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
- struct vmw_dma_buffer **vmw_bo_p)
+ struct vmw_buffer_object **vmw_bo_p)
{
- struct vmw_dma_buffer *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo = NULL;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
- NULL);
+ ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
ret = -EINVAL;
@@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
return 0;
out_no_reloc:
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL;
return ret;
}
@@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
SVGA3dCmdDXBindQuery q;
} *cmd;
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
int ret;
@@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
sw_context->dx_query_mob = vmw_bo;
sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdEndGBQuery q;
@@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdEndQuery q;
@@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery q;
@@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return 0;
}
@@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery q;
@@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return 0;
}
@@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo = NULL;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
@@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
header);
out_no_surface:
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
int ret;
struct {
@@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
uint32_t *buf_id,
unsigned long backup_offset)
{
- struct vmw_dma_buffer *dma_buf;
+ struct vmw_buffer_object *dma_buf;
int ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
@@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
if (val_node->first_usage)
val_node->no_buffer_needed = true;
- vmw_dmabuf_unreference(&val_node->new_backup);
+ vmw_bo_unreference(&val_node->new_backup);
val_node->new_backup = dma_buf;
val_node->new_backup_offset = backup_offset;
@@ -3118,6 +3116,32 @@ static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
&cmd->body.destSid, NULL);
}
+/**
+ * vmw_cmd_intra_surface_copy -
+ * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdIntraSurfaceCopy body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
+ return -EINVAL;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.surface.sid, NULL);
+}
+
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3232,9 +3256,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
false, false, false),
@@ -3473,6 +3497,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
&vmw_cmd_dx_transfer_from_buffer,
true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
+ true, false, true),
};
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
@@ -3701,8 +3727,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
bool interruptible,
bool validate_as_mob)
{
- struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
- base);
+ struct vmw_buffer_object *vbo =
+ container_of(bo, struct vmw_buffer_object, base);
struct ttm_operation_ctx ctx = { interruptible, true };
int ret;
@@ -4423,7 +4449,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
- vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+ vmw_bo_unreference(&dev_priv->pinned_bo);
out_unlock:
return;
@@ -4432,7 +4458,7 @@ out_no_emit:
out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
- vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+ vmw_bo_unreference(&dev_priv->pinned_bo);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 54e300365a5c..b913a56f3426 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -42,7 +42,7 @@ struct vmw_fb_par {
void *vmalloc;
struct mutex bo_mutex;
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
unsigned bo_size;
struct drm_framebuffer *set_fb;
struct drm_display_mode *set_mode;
@@ -184,7 +184,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
struct drm_clip_rect clip;
struct drm_framebuffer *cur_fb;
u8 *src_ptr, *dst_ptr;
- struct vmw_dma_buffer *vbo = par->vmw_bo;
+ struct vmw_buffer_object *vbo = par->vmw_bo;
void *virtual;
if (!READ_ONCE(par->dirty.active))
@@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
(void) ttm_read_lock(&vmw_priv->reservation_sem, false);
(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
- virtual = vmw_dma_buffer_map_and_cache(vbo);
+ virtual = vmw_bo_map_and_cache(vbo);
if (!virtual)
goto out_unreserve;
@@ -391,9 +391,9 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
*/
static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
- size_t size, struct vmw_dma_buffer **out)
+ size_t size, struct vmw_buffer_object **out)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
int ret;
(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
@@ -404,10 +404,10 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
goto err_unlock;
}
- ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
+ ret = vmw_bo_init(vmw_priv, vmw_bo, size,
&vmw_sys_placement,
false,
- &vmw_dmabuf_bo_free);
+ &vmw_bo_bo_free);
if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */
@@ -439,38 +439,13 @@ static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
static int vmwgfx_set_config_internal(struct drm_mode_set *set)
{
struct drm_crtc *crtc = set->crtc;
- struct drm_framebuffer *fb;
- struct drm_crtc *tmp;
- struct drm_device *dev = set->crtc->dev;
struct drm_modeset_acquire_ctx ctx;
int ret;
drm_modeset_acquire_init(&ctx, 0);
restart:
- /*
- * NOTE: ->set_config can also disable other crtcs (if we steal all
- * connectors from it), hence we need to refcount the fbs across all
- * crtcs. Atomic modeset will have saner semantics ...
- */
- drm_for_each_crtc(tmp, dev)
- tmp->primary->old_fb = tmp->primary->fb;
-
- fb = set->fb;
-
ret = crtc->funcs->set_config(set, &ctx);
- if (ret == 0) {
- crtc->primary->crtc = crtc;
- crtc->primary->fb = fb;
- }
-
- drm_for_each_crtc(tmp, dev) {
- if (tmp->primary->fb)
- drm_framebuffer_get(tmp->primary->fb);
- if (tmp->primary->old_fb)
- drm_framebuffer_put(tmp->primary->old_fb);
- tmp->primary->old_fb = NULL;
- }
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
@@ -516,7 +491,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
}
if (par->vmw_bo && detach_bo && unref_bo)
- vmw_dmabuf_unreference(&par->vmw_bo);
+ vmw_bo_unreference(&par->vmw_bo);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 9ed544f8958f..3d546d409334 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -175,7 +175,6 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
struct vmw_private *dev_priv = fman->dev_priv;
struct vmwgfx_wait_cb cb;
long ret = timeout;
- unsigned long irq_flags;
if (likely(vmw_fence_obj_signaled(fence)))
return timeout;
@@ -183,7 +182,7 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
vmw_seqno_waiter_add(dev_priv);
- spin_lock_irqsave(f->lock, irq_flags);
+ spin_lock(f->lock);
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
@@ -194,30 +193,45 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
cb.task = current;
list_add(&cb.base.node, &f->cb_list);
- while (ret > 0) {
+ for (;;) {
__vmw_fences_update(fman);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
- break;
+ /*
+ * We can use the barrier free __set_current_state() since
+ * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
+ * fence spinlock.
+ */
if (intr)
__set_current_state(TASK_INTERRUPTIBLE);
else
__set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock_irqrestore(f->lock, irq_flags);
- ret = schedule_timeout(ret);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
+ if (ret == 0 && timeout > 0)
+ ret = 1;
+ break;
+ }
- spin_lock_irqsave(f->lock, irq_flags);
- if (ret > 0 && intr && signal_pending(current))
+ if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
- }
+ break;
+ }
+ if (ret == 0)
+ break;
+
+ spin_unlock(f->lock);
+
+ ret = schedule_timeout(ret);
+
+ spin_lock(f->lock);
+ }
+ __set_current_state(TASK_RUNNING);
if (!list_empty(&cb.base.node))
list_del(&cb.base.node);
- __set_current_state(TASK_RUNNING);
out:
- spin_unlock_irqrestore(f->lock, irq_flags);
+ spin_unlock(f->lock);
vmw_seqno_waiter_remove(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 20224dba9d8e..c9382933c2b9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2011-2012 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index a1c68e6a689e..d0fd147ef75f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 66ffa1d4759c..007a0cc7f232 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index f2f9d88131f2..ddb1e9365a3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index c5e8eae0dbe2..172a6ba6539c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -56,6 +56,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_HW_CAPS:
param->value = dev_priv->capabilities;
break;
+ case DRM_VMW_PARAM_HW_CAPS2:
+ param->value = dev_priv->capabilities2;
+ break;
case DRM_VMW_PARAM_FIFO_CAPS:
param->value = dev_priv->fifo.capabilities;
break;
@@ -113,6 +116,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_DX:
param->value = dev_priv->has_dx;
break;
+ case DRM_VMW_PARAM_SM4_1:
+ param->value = dev_priv->has_sm4_1;
+ break;
default:
return -EINVAL;
}
@@ -122,15 +128,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
{
- /* If the header is updated, update the format test as well! */
- BUILD_BUG_ON(SVGA3D_DEVCAP_DXFMT_BC5_UNORM + 1 != SVGA3D_DEVCAP_MAX);
-
- if (cap >= SVGA3D_DEVCAP_DXFMT_X8R8G8B8 &&
- cap <= SVGA3D_DEVCAP_DXFMT_BC5_UNORM)
- fmt_value &= ~(SVGADX_DXFMT_MULTISAMPLE_2 |
- SVGADX_DXFMT_MULTISAMPLE_4 |
- SVGADX_DXFMT_MULTISAMPLE_8);
- else if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
+ /*
+ * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
+ * to check the sample count supported by virtual device. Since there
+ * never was support for multisample count for backing MOB return 0.
+ */
+ if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
return 0;
return fmt_value;
@@ -377,8 +380,8 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
}
vfb = vmw_framebuffer_to_vfb(fb);
- if (!vfb->dmabuf) {
- DRM_ERROR("Framebuffer not dmabuf backed.\n");
+ if (!vfb->bo) {
+ DRM_ERROR("Framebuffer not buffer backed.\n");
ret = -EINVAL;
goto out_no_ttm_lock;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index b9239ba067c4..c3ad4478266b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 01f2dc9e6f52..23beff5d8e3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -85,10 +85,10 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
return 0;
}
-static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
- u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
+static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *bo,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
{
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
@@ -100,13 +100,13 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
+ ret = ttm_bo_reserve(&bo->base, true, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
}
- ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+ ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0))
goto err_unreserve;
@@ -116,7 +116,7 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
ttm_bo_kunmap(&map);
err_unreserve:
- ttm_bo_unreserve(&dmabuf->base);
+ ttm_bo_unreserve(&bo->base);
return ret;
}
@@ -352,13 +352,13 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
if (vps->surf)
vmw_surface_unreference(&vps->surf);
- if (vps->dmabuf)
- vmw_dmabuf_unreference(&vps->dmabuf);
+ if (vps->bo)
+ vmw_bo_unreference(&vps->bo);
if (fb) {
- if (vmw_framebuffer_to_vfb(fb)->dmabuf) {
- vps->dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
- vmw_dmabuf_reference(vps->dmabuf);
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
+ vmw_bo_reference(vps->bo);
} else {
vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
vmw_surface_reference(vps->surf);
@@ -390,7 +390,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
}
du->cursor_surface = vps->surf;
- du->cursor_dmabuf = vps->dmabuf;
+ du->cursor_bo = vps->bo;
if (vps->surf) {
du->cursor_age = du->cursor_surface->snooper.age;
@@ -399,11 +399,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
vps->surf->snooper.image,
64, 64, hotspot_x,
hotspot_y);
- } else if (vps->dmabuf) {
- ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf,
- plane->state->crtc_w,
- plane->state->crtc_h,
- hotspot_x, hotspot_y);
+ } else if (vps->bo) {
+ ret = vmw_cursor_update_bo(dev_priv, vps->bo,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ hotspot_x, hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return;
@@ -519,7 +519,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
ret = -EINVAL;
}
- if (!vmw_framebuffer_to_vfb(fb)->dmabuf)
+ if (!vmw_framebuffer_to_vfb(fb)->bo)
surface = vmw_framebuffer_to_vfbs(fb)->surface;
if (surface && !surface->snooper.image) {
@@ -535,9 +535,9 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
- int connector_mask = 1 << drm_connector_index(&du->connector);
+ int connector_mask = drm_connector_mask(&du->connector);
bool has_primary = new_state->plane_mask &
- BIT(drm_plane_index(crtc->primary));
+ drm_plane_mask(crtc->primary);
/* We always want to have an active plane with an active CRTC */
if (has_primary != new_state->enable)
@@ -687,8 +687,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
if (vps->surf)
(void) vmw_surface_reference(vps->surf);
- if (vps->dmabuf)
- (void) vmw_dmabuf_reference(vps->dmabuf);
+ if (vps->bo)
+ (void) vmw_bo_reference(vps->bo);
state = &vps->base;
@@ -745,8 +745,8 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
if (vps->surf)
vmw_surface_unreference(&vps->surf);
- if (vps->dmabuf)
- vmw_dmabuf_unreference(&vps->dmabuf);
+ if (vps->bo)
+ vmw_bo_unreference(&vps->bo);
drm_atomic_helper_plane_destroy_state(plane, state);
}
@@ -902,12 +902,12 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
/**
* vmw_kms_readback - Perform a readback from the screen system to
- * a dma-buffer backed framebuffer.
+ * a buffer-object backed framebuffer.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm_file identifying the caller.
* Must be set to NULL if @user_fence_rep is NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
* @user_fence_rep: User-space provided structure for fence information.
* Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects.
@@ -951,7 +951,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2
*mode_cmd,
- bool is_dmabuf_proxy)
+ bool is_bo_proxy)
{
struct drm_device *dev = dev_priv->dev;
@@ -1019,7 +1019,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
vfbs->surface = vmw_surface_reference(surface);
vfbs->base.user_handle = mode_cmd->handles[0];
- vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
+ vfbs->is_bo_proxy = is_bo_proxy;
*out = &vfbs->base;
@@ -1038,30 +1038,30 @@ out_err1:
}
/*
- * Dmabuf framebuffer code
+ * Buffer-object framebuffer code
*/
-static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
{
- struct vmw_framebuffer_dmabuf *vfbd =
+ struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
drm_framebuffer_cleanup(framebuffer);
- vmw_dmabuf_unreference(&vfbd->buffer);
+ vmw_bo_unreference(&vfbd->buffer);
if (vfbd->base.user_obj)
ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
-static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
- struct drm_file *file_priv,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
+static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- struct vmw_framebuffer_dmabuf *vfbd =
+ struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
struct drm_clip_rect norect;
int ret, increment = 1;
@@ -1092,13 +1092,13 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
true, true, NULL);
break;
case vmw_du_screen_object:
- ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
- clips, NULL, num_clips,
- increment, true, NULL, NULL);
+ ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base,
+ clips, NULL, num_clips,
+ increment, true, NULL, NULL);
break;
case vmw_du_legacy:
- ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
- clips, num_clips, increment);
+ ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
+ clips, num_clips, increment);
break;
default:
ret = -EINVAL;
@@ -1114,23 +1114,23 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
return ret;
}
-static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
- .destroy = vmw_framebuffer_dmabuf_destroy,
- .dirty = vmw_framebuffer_dmabuf_dirty,
+static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
+ .destroy = vmw_framebuffer_bo_destroy,
+ .dirty = vmw_framebuffer_bo_dirty,
};
/**
- * Pin the dmabuffer in a location suitable for access by the
+ * Pin the bofer in a location suitable for access by the
* display system.
*/
static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
struct ttm_placement *placement;
int ret;
- buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
if (!buf)
@@ -1139,12 +1139,12 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
switch (dev_priv->active_display_unit) {
case vmw_du_legacy:
vmw_overlay_pause_all(dev_priv);
- ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
+ ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
vmw_overlay_resume_all(dev_priv);
break;
case vmw_du_screen_object:
case vmw_du_screen_target:
- if (vfb->dmabuf) {
+ if (vfb->bo) {
if (dev_priv->capabilities & SVGA_CAP_3D) {
/*
* Use surface DMA to get content to
@@ -1160,8 +1160,7 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
placement = &vmw_mob_placement;
}
- return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement,
- false);
+ return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
default:
return -EINVAL;
}
@@ -1172,36 +1171,36 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
- buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
if (WARN_ON(!buf))
return 0;
- return vmw_dmabuf_unpin(dev_priv, buf, false);
+ return vmw_bo_unpin(dev_priv, buf, false);
}
/**
- * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
+ * vmw_create_bo_proxy - create a proxy surface for the buffer object
*
* @dev: DRM device
* @mode_cmd: parameters for the new surface
- * @dmabuf_mob: MOB backing the DMA buf
+ * @bo_mob: MOB backing the buffer object
* @srf_out: newly created surface
*
- * When the content FB is a DMA buf, we create a surface as a proxy to the
+ * When the content FB is a buffer object, we create a surface as a proxy to the
* same buffer. This way we can do a surface copy rather than a surface DMA.
* This is a more efficient approach
*
* RETURNS:
* 0 on success, error code otherwise
*/
-static int vmw_create_dmabuf_proxy(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct vmw_dma_buffer *dmabuf_mob,
- struct vmw_surface **srf_out)
+static int vmw_create_bo_proxy(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct vmw_buffer_object *bo_mob,
+ struct vmw_surface **srf_out)
{
uint32_t format;
struct drm_vmw_size content_base_size = {0};
@@ -1239,15 +1238,17 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
content_base_size.depth = 1;
ret = vmw_surface_gb_priv_define(dev,
- 0, /* kernel visible only */
- 0, /* flags */
- format,
- true, /* can be a scanout buffer */
- 1, /* num of mip levels */
- 0,
- 0,
- content_base_size,
- srf_out);
+ 0, /* kernel visible only */
+ 0, /* flags */
+ format,
+ true, /* can be a scanout buffer */
+ 1, /* num of mip levels */
+ 0,
+ 0,
+ content_base_size,
+ SVGA3D_MS_PATTERN_NONE,
+ SVGA3D_MS_QUALITY_NONE,
+ srf_out);
if (ret) {
DRM_ERROR("Failed to allocate proxy content buffer\n");
return ret;
@@ -1258,8 +1259,8 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
/* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void) vmw_resource_reserve(res, false, true);
- vmw_dmabuf_unreference(&res->backup);
- res->backup = vmw_dmabuf_reference(dmabuf_mob);
+ vmw_bo_unreference(&res->backup);
+ res->backup = vmw_bo_reference(bo_mob);
res->backup_offset = 0;
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
@@ -1269,21 +1270,21 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
-static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
- struct vmw_framebuffer **out,
- const struct drm_mode_fb_cmd2
- *mode_cmd)
+static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *bo,
+ struct vmw_framebuffer **out,
+ const struct drm_mode_fb_cmd2
+ *mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
- struct vmw_framebuffer_dmabuf *vfbd;
+ struct vmw_framebuffer_bo *vfbd;
unsigned int requested_size;
struct drm_format_name_buf format_name;
int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0];
- if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
+ if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
@@ -1312,20 +1313,20 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
}
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
- vfbd->base.dmabuf = true;
- vfbd->buffer = vmw_dmabuf_reference(dmabuf);
+ vfbd->base.bo = true;
+ vfbd->buffer = vmw_bo_reference(bo);
vfbd->base.user_handle = mode_cmd->handles[0];
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
- &vmw_framebuffer_dmabuf_funcs);
+ &vmw_framebuffer_bo_funcs);
if (ret)
goto out_err2;
return 0;
out_err2:
- vmw_dmabuf_unreference(&dmabuf);
+ vmw_bo_unreference(&bo);
kfree(vfbd);
out_err1:
return ret;
@@ -1354,57 +1355,57 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
* vmw_kms_new_framebuffer - Create a new framebuffer.
*
* @dev_priv: Pointer to device private struct.
- * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
- * Either @dmabuf or @surface must be NULL.
+ * @bo: Pointer to buffer object to wrap the kms framebuffer around.
+ * Either @bo or @surface must be NULL.
* @surface: Pointer to a surface to wrap the kms framebuffer around.
- * Either @dmabuf or @surface must be NULL.
- * @only_2d: No presents will occur to this dma buffer based framebuffer. This
- * Helps the code to do some important optimizations.
+ * Either @bo or @surface must be NULL.
+ * @only_2d: No presents will occur to this buffer object based framebuffer.
+ * This helps the code to do some important optimizations.
* @mode_cmd: Frame-buffer metadata.
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
+ struct vmw_buffer_object *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
- bool is_dmabuf_proxy = false;
+ bool is_bo_proxy = false;
int ret;
/*
* We cannot use the SurfaceDMA command in an non-accelerated VM,
- * therefore, wrap the DMA buf in a surface so we can use the
+ * therefore, wrap the buffer object in a surface so we can use the
* SurfaceCopy command.
*/
if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
- dmabuf && only_2d &&
+ bo && only_2d &&
mode_cmd->width > 64 && /* Don't create a proxy for cursor */
dev_priv->active_display_unit == vmw_du_screen_target) {
- ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
- dmabuf, &surface);
+ ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
+ bo, &surface);
if (ret)
return ERR_PTR(ret);
- is_dmabuf_proxy = true;
+ is_bo_proxy = true;
}
/* Create the new framebuffer depending one what we have */
if (surface) {
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
mode_cmd,
- is_dmabuf_proxy);
+ is_bo_proxy);
/*
- * vmw_create_dmabuf_proxy() adds a reference that is no longer
+ * vmw_create_bo_proxy() adds a reference that is no longer
* needed
*/
- if (is_dmabuf_proxy)
+ if (is_bo_proxy)
vmw_surface_unreference(&surface);
- } else if (dmabuf) {
- ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
- mode_cmd);
+ } else if (bo) {
+ ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
+ mode_cmd);
} else {
BUG();
}
@@ -1430,23 +1431,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
- struct vmw_dma_buffer *bo = NULL;
+ struct vmw_buffer_object *bo = NULL;
struct ttm_base_object *user_obj;
int ret;
- /**
- * This code should be conditioned on Screen Objects not being used.
- * If screen objects are used, we can allocate a GMR to hold the
- * requested framebuffer.
- */
-
- if (!vmw_kms_validate_mode_vram(dev_priv,
- mode_cmd->pitches[0],
- mode_cmd->height)) {
- DRM_ERROR("Requested mode exceed bounding box limit.\n");
- return ERR_PTR(-ENOMEM);
- }
-
/*
* Take a reference on the user object of the resource
* backing the kms fb. This ensures that user-space handle
@@ -1466,7 +1454,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* End conditioned code.
*/
- /* returns either a dmabuf or surface */
+ /* returns either a bo or surface */
ret = vmw_user_lookup_handle(dev_priv, tfile,
mode_cmd->handles[0],
&surface, &bo);
@@ -1494,7 +1482,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo)
- vmw_dmabuf_unreference(&bo);
+ vmw_bo_unreference(&bo);
if (surface)
vmw_surface_unreference(&surface);
@@ -1508,7 +1496,168 @@ err_out:
return &vfb->base;
}
+/**
+ * vmw_kms_check_display_memory - Validates display memory required for a
+ * topology
+ * @dev: DRM device
+ * @num_rects: number of drm_rect in rects
+ * @rects: array of drm_rect representing the topology to validate indexed by
+ * crtc index.
+ *
+ * Returns:
+ * 0 on success otherwise negative error code
+ */
+static int vmw_kms_check_display_memory(struct drm_device *dev,
+ uint32_t num_rects,
+ struct drm_rect *rects)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_rect bounding_box = {0};
+ u64 total_pixels = 0, pixel_mem, bb_mem;
+ int i;
+
+ for (i = 0; i < num_rects; i++) {
+ /*
+ * Currently this check is limiting the topology within max
+ * texture/screentarget size. This should change in future when
+ * user-space support multiple fb with topology.
+ */
+ if (rects[i].x1 < 0 || rects[i].y1 < 0 ||
+ rects[i].x2 > mode_config->max_width ||
+ rects[i].y2 > mode_config->max_height) {
+ DRM_ERROR("Invalid GUI layout.\n");
+ return -EINVAL;
+ }
+ /* Bounding box upper left is at (0,0). */
+ if (rects[i].x2 > bounding_box.x2)
+ bounding_box.x2 = rects[i].x2;
+
+ if (rects[i].y2 > bounding_box.y2)
+ bounding_box.y2 = rects[i].y2;
+
+ total_pixels += (u64) drm_rect_width(&rects[i]) *
+ (u64) drm_rect_height(&rects[i]);
+ }
+
+ /* Virtual svga device primary limits are always in 32-bpp. */
+ pixel_mem = total_pixels * 4;
+
+ /*
+ * For HV10 and below prim_bb_mem is vram size. When
+ * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
+ * limit on primary bounding box
+ */
+ if (pixel_mem > dev_priv->prim_bb_mem) {
+ DRM_ERROR("Combined output size too large.\n");
+ return -EINVAL;
+ }
+
+ /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
+ if (dev_priv->active_display_unit != vmw_du_screen_target ||
+ !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
+ bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
+
+ if (bb_mem > dev_priv->prim_bb_mem) {
+ DRM_ERROR("Topology is beyond supported limits.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_kms_check_topology - Validates topology in drm_atomic_state
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Returns:
+ * 0 on success otherwise negative error code
+ */
+static int vmw_kms_check_topology(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_rect *rects;
+ struct drm_crtc *crtc;
+ uint32_t i;
+ int ret = 0;
+
+ rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
+ GFP_KERNEL);
+ if (!rects)
+ return -ENOMEM;
+
+ mutex_lock(&dev_priv->requested_layout_mutex);
+
+ drm_for_each_crtc(crtc, dev) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_crtc_state *crtc_state = crtc->state;
+
+ i = drm_crtc_index(crtc);
+
+ if (crtc_state && crtc_state->enable) {
+ rects[i].x1 = du->gui_x;
+ rects[i].y1 = du->gui_y;
+ rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
+ rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
+ }
+ }
+
+ /* Determine change to topology due to new atomic state */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct vmw_connector_state *vmw_conn_state;
+
+ if (!new_crtc_state->enable && old_crtc_state->enable) {
+ rects[i].x1 = 0;
+ rects[i].y1 = 0;
+ rects[i].x2 = 0;
+ rects[i].y2 = 0;
+ continue;
+ }
+
+ if (!du->pref_active) {
+ ret = -EINVAL;
+ goto clean;
+ }
+
+ /*
+ * For vmwgfx each crtc has only one connector attached and it
+ * is not changed so don't really need to check the
+ * crtc->connector_mask and iterate over it.
+ */
+ connector = &du->connector;
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ goto clean;
+ }
+
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+ vmw_conn_state->gui_x = du->gui_x;
+ vmw_conn_state->gui_y = du->gui_y;
+
+ rects[i].x1 = du->gui_x;
+ rects[i].y1 = du->gui_y;
+ rects[i].x2 = du->gui_x + new_crtc_state->mode.hdisplay;
+ rects[i].y2 = du->gui_y + new_crtc_state->mode.vdisplay;
+ }
+
+ ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
+ rects);
+
+clean:
+ mutex_unlock(&dev_priv->requested_layout_mutex);
+ kfree(rects);
+ return ret;
+}
/**
* vmw_kms_atomic_check_modeset- validate state object for modeset changes
@@ -1520,36 +1669,39 @@ err_out:
* us to assign a value to mode->crtc_clock so that
* drm_calc_timestamping_constants() won't throw an error message
*
- * RETURNS
+ * Returns:
* Zero for success or -errno
*/
static int
vmw_kms_atomic_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- struct vmw_private *dev_priv = vmw_priv(dev);
- int i;
-
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- unsigned long requested_bb_mem = 0;
+ struct drm_crtc_state *crtc_state;
+ bool need_modeset = false;
+ int i, ret;
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- if (crtc->primary->fb) {
- int cpp = crtc->primary->fb->pitches[0] /
- crtc->primary->fb->width;
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
- requested_bb_mem += crtc->mode.hdisplay * cpp *
- crtc->mode.vdisplay;
- }
+ if (!state->allow_modeset)
+ return ret;
- if (requested_bb_mem > dev_priv->prim_bb_mem)
- return -EINVAL;
- }
+ /*
+ * Legacy path do not set allow_modeset properly like
+ * @drm_atomic_helper_update_plane, This will result in unnecessary call
+ * to vmw_kms_check_topology. So extra set of check.
+ */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ need_modeset = true;
}
- return drm_atomic_helper_check(dev, state);
+ if (need_modeset)
+ return vmw_kms_check_topology(dev, state);
+
+ return ret;
}
static const struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1841,40 +1993,49 @@ void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
}
-
-/*
- * Small shared kms functions.
+/**
+ * vmw_du_update_layout - Update the display unit with topology from resolution
+ * plugin and generate DRM uevent
+ * @dev_priv: device private
+ * @num_rects: number of drm_rect in rects
+ * @rects: toplogy to update
*/
-
-static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
- struct drm_vmw_rect *rects)
+static int vmw_du_update_layout(struct vmw_private *dev_priv,
+ unsigned int num_rects, struct drm_rect *rects)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_display_unit *du;
struct drm_connector *con;
+ struct drm_connector_list_iter conn_iter;
- mutex_lock(&dev->mode_config.mutex);
-
-#if 0
- {
- unsigned int i;
-
- DRM_INFO("%s: new layout ", __func__);
- for (i = 0; i < num; i++)
- DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
- rects[i].w, rects[i].h);
- DRM_INFO("\n");
+ /*
+ * Currently only gui_x/y is protected with requested_layout_mutex.
+ */
+ mutex_lock(&dev_priv->requested_layout_mutex);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(con, &conn_iter) {
+ du = vmw_connector_to_du(con);
+ if (num_rects > du->unit) {
+ du->pref_width = drm_rect_width(&rects[du->unit]);
+ du->pref_height = drm_rect_height(&rects[du->unit]);
+ du->pref_active = true;
+ du->gui_x = rects[du->unit].x1;
+ du->gui_y = rects[du->unit].y1;
+ } else {
+ du->pref_width = 800;
+ du->pref_height = 600;
+ du->pref_active = false;
+ du->gui_x = 0;
+ du->gui_y = 0;
+ }
}
-#endif
+ drm_connector_list_iter_end(&conn_iter);
+ mutex_unlock(&dev_priv->requested_layout_mutex);
+ mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(con, &dev->mode_config.connector_list, head) {
du = vmw_connector_to_du(con);
- if (num > du->unit) {
- du->pref_width = rects[du->unit].w;
- du->pref_height = rects[du->unit].h;
- du->pref_active = true;
- du->gui_x = rects[du->unit].x;
- du->gui_y = rects[du->unit].y;
+ if (num_rects > du->unit) {
drm_object_property_set_value
(&con->base, dev->mode_config.suggested_x_property,
du->gui_x);
@@ -1882,9 +2043,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
(&con->base, dev->mode_config.suggested_y_property,
du->gui_y);
} else {
- du->pref_width = 800;
- du->pref_height = 600;
- du->pref_active = false;
drm_object_property_set_value
(&con->base, dev->mode_config.suggested_x_property,
0);
@@ -1894,8 +2052,8 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
}
con->status = vmw_du_connector_detect(con, true);
}
-
mutex_unlock(&dev->mode_config.mutex);
+
drm_sysfs_hotplug_event(dev);
return 0;
@@ -2110,7 +2268,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
drm_mode_probed_add(connector, mode);
}
- drm_mode_connector_list_update(connector);
+ drm_connector_list_update(connector);
/* Move the prefered mode first, help apps pick the right mode. */
drm_mode_sort(&connector->modes);
@@ -2195,7 +2353,25 @@ vmw_du_connector_atomic_get_property(struct drm_connector *connector,
return 0;
}
-
+/**
+ * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Update preferred topology of display unit as per ioctl request. The topology
+ * is expressed as array of drm_vmw_rect.
+ * e.g.
+ * [0 0 640 480] [640 0 800 600] [0 480 640 480]
+ *
+ * NOTE:
+ * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
+ * device limit on topology, x + w and y + h (lower right) cannot be greater
+ * than INT_MAX. So topology beyond these limits will return with error.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -2204,15 +2380,12 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_update_layout_arg *)data;
void __user *user_rects;
struct drm_vmw_rect *rects;
+ struct drm_rect *drm_rects;
unsigned rects_size;
- int ret;
- int i;
- u64 total_pixels = 0;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_vmw_rect bounding_box = {0};
+ int ret, i;
if (!arg->num_outputs) {
- struct drm_vmw_rect def_rect = {0, 0, 800, 600};
+ struct drm_rect def_rect = {0, 0, 800, 600};
vmw_du_update_layout(dev_priv, 1, &def_rect);
return 0;
}
@@ -2231,52 +2404,29 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
goto out_free;
}
- for (i = 0; i < arg->num_outputs; ++i) {
- if (rects[i].x < 0 ||
- rects[i].y < 0 ||
- rects[i].x + rects[i].w > mode_config->max_width ||
- rects[i].y + rects[i].h > mode_config->max_height) {
- DRM_ERROR("Invalid GUI layout.\n");
- ret = -EINVAL;
- goto out_free;
- }
-
- /*
- * bounding_box.w and bunding_box.h are used as
- * lower-right coordinates
- */
- if (rects[i].x + rects[i].w > bounding_box.w)
- bounding_box.w = rects[i].x + rects[i].w;
-
- if (rects[i].y + rects[i].h > bounding_box.h)
- bounding_box.h = rects[i].y + rects[i].h;
+ drm_rects = (struct drm_rect *)rects;
- total_pixels += (u64) rects[i].w * (u64) rects[i].h;
- }
-
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- /*
- * For Screen Targets, the limits for a toplogy are:
- * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
- * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
- */
- u64 bb_mem = (u64) bounding_box.w * bounding_box.h * 4;
- u64 pixel_mem = total_pixels * 4;
+ for (i = 0; i < arg->num_outputs; i++) {
+ struct drm_vmw_rect curr_rect;
- if (bb_mem > dev_priv->prim_bb_mem) {
- DRM_ERROR("Topology is beyond supported limits.\n");
- ret = -EINVAL;
+ /* Verify user-space for overflow as kernel use drm_rect */
+ if ((rects[i].x + rects[i].w > INT_MAX) ||
+ (rects[i].y + rects[i].h > INT_MAX)) {
+ ret = -ERANGE;
goto out_free;
}
- if (pixel_mem > dev_priv->prim_bb_mem) {
- DRM_ERROR("Combined output size too large\n");
- ret = -EINVAL;
- goto out_free;
- }
+ curr_rect = rects[i];
+ drm_rects[i].x1 = curr_rect.x;
+ drm_rects[i].y1 = curr_rect.y;
+ drm_rects[i].x2 = curr_rect.x + curr_rect.w;
+ drm_rects[i].y2 = curr_rect.y + curr_rect.h;
}
- vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
+ ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
+
+ if (ret == 0)
+ vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
out_free:
kfree(rects);
@@ -2322,9 +2472,10 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
} else {
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
- if (crtc->primary->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
+ struct drm_plane *plane = crtc->primary;
+
+ if (plane->state->fb == &framebuffer->base)
+ units[num_units++] = vmw_crtc_to_du(crtc);
}
}
@@ -2422,7 +2573,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
* interrupted by a signal.
*/
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
bool interruptible,
bool validate_as_mob,
bool for_cpu_blit)
@@ -2454,7 +2605,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
* Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_buffer_prepare.
*/
-void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
+void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
{
if (buf)
ttm_bo_unreserve(&buf->base);
@@ -2477,7 +2628,7 @@ void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
*/
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user *
user_fence_rep)
@@ -2489,7 +2640,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
file_priv ? &handle : NULL);
if (buf)
- vmw_fence_single_bo(&buf->base, fence);
+ vmw_bo_fence_single(&buf->base, fence);
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
@@ -2517,7 +2668,7 @@ void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
struct vmw_resource *res = ctx->res;
vmw_kms_helper_buffer_revert(ctx->buf);
- vmw_dmabuf_unreference(&ctx->buf);
+ vmw_bo_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -2562,7 +2713,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
if (ret)
goto out_unreserve;
- ctx->buf = vmw_dmabuf_reference(res->backup);
+ ctx->buf = vmw_bo_reference(res->backup);
}
ret = vmw_resource_validate(res);
if (ret)
@@ -2595,7 +2746,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL);
- vmw_dmabuf_unreference(&ctx->buf);
+ vmw_bo_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -2806,6 +2957,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
struct drm_crtc *crtc)
{
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_plane *plane = crtc->primary;
struct vmw_framebuffer *vfb;
mutex_lock(&dev_priv->global_kms_state_mutex);
@@ -2813,7 +2965,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
if (!du->is_implicit)
goto out_unlock;
- vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
+ vfb = vmw_framebuffer_to_vfb(plane->state->fb);
WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
dev_priv->implicit_fb != vfb);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 6b7c012719f1..31311298ec0b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -90,7 +90,7 @@ struct vmw_kms_dirty {
#define vmw_framebuffer_to_vfbs(x) \
container_of(x, struct vmw_framebuffer_surface, base.base)
#define vmw_framebuffer_to_vfbd(x) \
- container_of(x, struct vmw_framebuffer_dmabuf, base.base)
+ container_of(x, struct vmw_framebuffer_bo, base.base)
/**
* Base class for framebuffers
@@ -102,7 +102,7 @@ struct vmw_framebuffer {
struct drm_framebuffer base;
int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb);
- bool dmabuf;
+ bool bo;
struct ttm_base_object *user_obj;
uint32_t user_handle;
};
@@ -117,15 +117,15 @@ struct vmw_clip_rect {
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
- struct vmw_dma_buffer *buffer;
+ struct vmw_buffer_object *buffer;
struct list_head head;
- bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */
+ bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
};
-struct vmw_framebuffer_dmabuf {
+struct vmw_framebuffer_bo {
struct vmw_framebuffer base;
- struct vmw_dma_buffer *buffer;
+ struct vmw_buffer_object *buffer;
};
@@ -161,18 +161,18 @@ struct vmw_crtc_state {
*
* @base DRM plane object
* @surf Display surface for STDU
- * @dmabuf display dmabuf for SOU
+ * @bo display bo for SOU
* @content_fb_type Used by STDU.
- * @dmabuf_size Size of the dmabuf, used by Screen Object Display Unit
+ * @bo_size Size of the bo, used by Screen Object Display Unit
* @pinned pin count for STDU display surface
*/
struct vmw_plane_state {
struct drm_plane_state base;
struct vmw_surface *surf;
- struct vmw_dma_buffer *dmabuf;
+ struct vmw_buffer_object *bo;
int content_fb_type;
- unsigned long dmabuf_size;
+ unsigned long bo_size;
int pinned;
@@ -192,6 +192,24 @@ struct vmw_connector_state {
struct drm_connector_state base;
bool is_implicit;
+
+ /**
+ * @gui_x:
+ *
+ * vmwgfx connector property representing the x position of this display
+ * unit (connector is synonymous to display unit) in overall topology.
+ * This is what the device expect as xRoot while creating screen.
+ */
+ int gui_x;
+
+ /**
+ * @gui_y:
+ *
+ * vmwgfx connector property representing the y position of this display
+ * unit (connector is synonymous to display unit) in overall topology.
+ * This is what the device expect as yRoot while creating screen.
+ */
+ int gui_y;
};
/**
@@ -209,7 +227,7 @@ struct vmw_display_unit {
struct drm_plane cursor;
struct vmw_surface *cursor_surface;
- struct vmw_dma_buffer *cursor_dmabuf;
+ struct vmw_buffer_object *cursor_bo;
size_t cursor_age;
int cursor_x;
@@ -243,7 +261,7 @@ struct vmw_display_unit {
struct vmw_validation_ctx {
struct vmw_resource *res;
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
};
#define vmw_crtc_to_du(x) \
@@ -291,14 +309,14 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct vmw_kms_dirty *dirty);
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
bool interruptible,
bool validate_as_mob,
bool for_cpu_blit);
-void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
+void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user *
user_fence_rep);
@@ -316,7 +334,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
uint32_t num_clips);
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
+ struct vmw_buffer_object *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd);
@@ -384,11 +402,11 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector,
*/
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
-int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment);
+int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips, int increment);
int vmw_kms_update_proxy(struct vmw_resource *res,
const struct drm_clip_rect *clips,
unsigned num_clips,
@@ -408,14 +426,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc);
-int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- struct drm_clip_rect *clips,
- struct drm_vmw_rect *vclips,
- unsigned num_clips, int increment,
- bool interruptible,
- struct vmw_fence_obj **out_fence,
- struct drm_crtc *crtc);
+int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ unsigned int num_clips, int increment,
+ bool interruptible,
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc);
int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 4a5907e3f560..723578117191 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -438,7 +438,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_connector;
}
- (void) drm_mode_connector_attach_encoder(connector, encoder);
+ (void) drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
@@ -547,11 +547,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
}
-int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment)
+int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips, int increment)
{
size_t fifo_size;
int i;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
index efd1ffd68185..e53bc639a754 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2010 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index d07c585e3c1d..7ed179d30ec5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -225,7 +225,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0);
- vmw_fence_single_bo(bo, NULL);
+ vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
}
@@ -362,7 +362,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0);
- vmw_fence_single_bo(bo, NULL);
+ vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
ttm_bo_unref(&batch->otable_bo);
@@ -620,7 +620,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
if (bo) {
- vmw_fence_single_bo(bo, NULL);
+ vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
}
vmw_fifo_resource_dec(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 21d746bdc922..8b9270f31409 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
- * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -31,6 +31,7 @@
#include <linux/frame.h>
#include <asm/hypervisor.h>
#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
#include "vmwgfx_msg.h"
@@ -234,7 +235,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
(HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
- DRM_ERROR("Failed to get reply size\n");
+ DRM_ERROR("Failed to get reply size for host message.\n");
return -EINVAL;
}
@@ -245,7 +246,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
reply_len = ebx;
reply = kzalloc(reply_len + 1, GFP_KERNEL);
if (!reply) {
- DRM_ERROR("Cannot allocate memory for reply\n");
+ DRM_ERROR("Cannot allocate memory for host message reply.\n");
return -ENOMEM;
}
@@ -338,7 +339,8 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
if (!msg) {
- DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
+ DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
+ guest_info_param);
return -ENOMEM;
}
@@ -374,7 +376,7 @@ out_msg:
out_open:
*length = 0;
kfree(msg);
- DRM_ERROR("Failed to get %s", guest_info_param);
+ DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
return -EINVAL;
}
@@ -403,7 +405,7 @@ int vmw_host_log(const char *log)
msg = kasprintf(GFP_KERNEL, "log %s", log);
if (!msg) {
- DRM_ERROR("Cannot allocate memory for log message\n");
+ DRM_ERROR("Cannot allocate memory for host log message.\n");
return -ENOMEM;
}
@@ -422,7 +424,7 @@ out_msg:
vmw_close_channel(&channel);
out_open:
kfree(msg);
- DRM_ERROR("Failed to send log\n");
+ DRM_ERROR("Failed to send host log message.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 8545488aa0cf..4907e50fb20a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -1,16 +1,29 @@
-/*
- * Copyright (C) 2016, VMware, Inc.
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
+ **************************************************************************
*
* Based on code from vmware.c and vmmouse.c.
* Author:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 222c9c2123a1..9f1b9d289bec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -38,7 +38,7 @@
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
struct vmw_stream {
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
bool claimed;
bool paused;
struct drm_vmw_control_stream_arg saved;
@@ -94,7 +94,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
* -ERESTARTSYS if interrupted by a signal.
*/
static int vmw_overlay_send_put(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
@@ -225,16 +225,16 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
* used with GMRs instead of being locked to vram.
*/
static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
bool pin, bool inter)
{
if (!pin)
- return vmw_dmabuf_unpin(dev_priv, buf, inter);
+ return vmw_bo_unpin(dev_priv, buf, inter);
if (dev_priv->active_display_unit == vmw_du_legacy)
- return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
+ return vmw_bo_pin_in_vram(dev_priv, buf, inter);
- return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
+ return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
}
/**
@@ -278,7 +278,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
}
if (!pause) {
- vmw_dmabuf_unreference(&stream->buf);
+ vmw_bo_unreference(&stream->buf);
stream->paused = false;
} else {
stream->paused = true;
@@ -297,7 +297,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted.
*/
static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
@@ -347,7 +347,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
}
if (stream->buf != buf)
- stream->buf = vmw_dmabuf_reference(buf);
+ stream->buf = vmw_bo_reference(buf);
stream->saved = *arg;
/* stream is no longer stopped/paused */
stream->paused = false;
@@ -466,7 +466,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct drm_vmw_control_stream_arg *arg =
(struct drm_vmw_control_stream_arg *)data;
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
struct vmw_resource *res;
int ret;
@@ -484,13 +484,13 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
+ ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
if (ret)
goto out_unlock;
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
- vmw_dmabuf_unreference(&buf);
+ vmw_bo_unreference(&buf);
out_unlock:
mutex_unlock(&overlay->mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 0d42a46521fc..0861c821a7fe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2013 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -40,7 +40,6 @@
*/
static int vmw_prime_map_attach(struct dma_buf *dma_buf,
- struct device *target_dev,
struct dma_buf_attachment *attach)
{
return -ENOSYS;
@@ -72,17 +71,6 @@ static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
}
-static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- return NULL;
-}
-
-static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
-
-}
static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
unsigned long page_num)
{
@@ -109,9 +97,7 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops = {
.unmap_dma_buf = vmw_prime_unmap_dma_buf,
.release = NULL,
.map = vmw_prime_dmabuf_kmap,
- .map_atomic = vmw_prime_dmabuf_kmap_atomic,
.unmap = vmw_prime_dmabuf_kunmap,
- .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
.mmap = vmw_prime_dmabuf_mmap,
.vmap = vmw_prime_dmabuf_vmap,
.vunmap = vmw_prime_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index dce798053a96..e99f6cdbb091 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6b3a942b18df..92003ea5a219 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,7 +27,6 @@
#include "vmwgfx_drv.h"
#include <drm/vmwgfx_drm.h>
-#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
#include "vmwgfx_resource_priv.h"
@@ -35,29 +34,6 @@
#define VMW_RES_EVICT_ERR_COUNT 10
-struct vmw_user_dma_buffer {
- struct ttm_prime_object prime;
- struct vmw_dma_buffer dma;
-};
-
-struct vmw_bo_user_rep {
- uint32_t handle;
- uint64_t map_handle;
-};
-
-static inline struct vmw_dma_buffer *
-vmw_dma_buffer(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct vmw_dma_buffer, base);
-}
-
-static inline struct vmw_user_dma_buffer *
-vmw_user_dma_buffer(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
- return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
-}
-
struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
{
kref_get(&res->kref);
@@ -116,7 +92,7 @@ static void vmw_resource_release(struct kref *kref)
res->backup_dirty = false;
list_del_init(&res->mob_head);
ttm_bo_unreserve(bo);
- vmw_dmabuf_unreference(&res->backup);
+ vmw_bo_unreference(&res->backup);
}
if (likely(res->hw_destroy != NULL)) {
@@ -287,7 +263,7 @@ out_bad_resource:
}
/**
- * Helper function that looks either a surface or dmabuf.
+ * Helper function that looks either a surface or bo.
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
@@ -295,7 +271,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
struct vmw_surface **out_surf,
- struct vmw_dma_buffer **out_buf)
+ struct vmw_buffer_object **out_buf)
{
struct vmw_resource *res;
int ret;
@@ -311,513 +287,11 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
}
*out_surf = NULL;
- ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
+ ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
return ret;
}
/**
- * Buffer management.
- */
-
-/**
- * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @size: The requested buffer size.
- * @user: Whether this is an ordinary dma buffer or a user dma buffer.
- */
-static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
- bool user)
-{
- static size_t struct_size, user_struct_size;
- size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
-
- if (unlikely(struct_size == 0)) {
- size_t backend_size = ttm_round_pot(vmw_tt_size);
-
- struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_dma_buffer));
- user_struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
- }
-
- if (dev_priv->map_mode == vmw_dma_alloc_coherent)
- page_array_size +=
- ttm_round_pot(num_pages * sizeof(dma_addr_t));
-
- return ((user) ? user_struct_size : struct_size) +
- page_array_size;
-}
-
-void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- vmw_dma_buffer_unmap(vmw_bo);
- kfree(vmw_bo);
-}
-
-static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
-{
- struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-
- vmw_dma_buffer_unmap(&vmw_user_bo->dma);
- ttm_prime_object_kfree(vmw_user_bo, prime);
-}
-
-int vmw_dmabuf_init(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *vmw_bo,
- size_t size, struct ttm_placement *placement,
- bool interruptible,
- void (*bo_free) (struct ttm_buffer_object *bo))
-{
- struct ttm_bo_device *bdev = &dev_priv->bdev;
- size_t acc_size;
- int ret;
- bool user = (bo_free == &vmw_user_dmabuf_destroy);
-
- BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
-
- acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
- memset(vmw_bo, 0, sizeof(*vmw_bo));
-
- INIT_LIST_HEAD(&vmw_bo->res_list);
-
- ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- ttm_bo_type_device, placement,
- 0, interruptible, acc_size,
- NULL, NULL, bo_free);
- return ret;
-}
-
-static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
-{
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_base_object *base = *p_base;
- struct ttm_buffer_object *bo;
-
- *p_base = NULL;
-
- if (unlikely(base == NULL))
- return;
-
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
- prime.base);
- bo = &vmw_user_bo->dma.base;
- ttm_bo_unref(&bo);
-}
-
-static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
- enum ttm_ref_type ref_type)
-{
- struct vmw_user_dma_buffer *user_bo;
- user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
-
- switch (ref_type) {
- case TTM_REF_SYNCCPU_WRITE:
- ttm_bo_synccpu_write_release(&user_bo->dma.base);
- break;
- default:
- BUG();
- }
-}
-
-/**
- * vmw_user_dmabuf_alloc - Allocate a user dma buffer
- *
- * @dev_priv: Pointer to a struct device private.
- * @tfile: Pointer to a struct ttm_object_file on which to register the user
- * object.
- * @size: Size of the dma buffer.
- * @shareable: Boolean whether the buffer is shareable with other open files.
- * @handle: Pointer to where the handle value should be assigned.
- * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
- * should be assigned.
- */
-int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t size,
- bool shareable,
- uint32_t *handle,
- struct vmw_dma_buffer **p_dma_buf,
- struct ttm_base_object **p_base)
-{
- struct vmw_user_dma_buffer *user_bo;
- struct ttm_buffer_object *tmp;
- int ret;
-
- user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
- if (unlikely(!user_bo)) {
- DRM_ERROR("Failed to allocate a buffer.\n");
- return -ENOMEM;
- }
-
- ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
- (dev_priv->has_mob) ?
- &vmw_sys_placement :
- &vmw_vram_sys_placement, true,
- &vmw_user_dmabuf_destroy);
- if (unlikely(ret != 0))
- return ret;
-
- tmp = ttm_bo_reference(&user_bo->dma.base);
- ret = ttm_prime_object_init(tfile,
- size,
- &user_bo->prime,
- shareable,
- ttm_buffer_type,
- &vmw_user_dmabuf_release,
- &vmw_user_dmabuf_ref_obj_release);
- if (unlikely(ret != 0)) {
- ttm_bo_unref(&tmp);
- goto out_no_base_object;
- }
-
- *p_dma_buf = &user_bo->dma;
- if (p_base) {
- *p_base = &user_bo->prime.base;
- kref_get(&(*p_base)->refcount);
- }
- *handle = user_bo->prime.base.hash.key;
-
-out_no_base_object:
- return ret;
-}
-
-/**
- * vmw_user_dmabuf_verify_access - verify access permissions on this
- * buffer object.
- *
- * @bo: Pointer to the buffer object being accessed
- * @tfile: Identifying the caller.
- */
-int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
- struct ttm_object_file *tfile)
-{
- struct vmw_user_dma_buffer *vmw_user_bo;
-
- if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
- return -EPERM;
-
- vmw_user_bo = vmw_user_dma_buffer(bo);
-
- /* Check that the caller has opened the object. */
- if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
- return 0;
-
- DRM_ERROR("Could not grant buffer access.\n");
- return -EPERM;
-}
-
-/**
- * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
- * access, idling previous GPU operations on the buffer and optionally
- * blocking it for further command submissions.
- *
- * @user_bo: Pointer to the buffer object being grabbed for CPU access
- * @tfile: Identifying the caller.
- * @flags: Flags indicating how the grab should be performed.
- *
- * A blocking grab will be automatically released when @tfile is closed.
- */
-static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
- struct ttm_object_file *tfile,
- uint32_t flags)
-{
- struct ttm_buffer_object *bo = &user_bo->dma.base;
- bool existed;
- int ret;
-
- if (flags & drm_vmw_synccpu_allow_cs) {
- bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
- long lret;
-
- lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
- nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
- if (!lret)
- return -EBUSY;
- else if (lret < 0)
- return lret;
- return 0;
- }
-
- ret = ttm_bo_synccpu_write_grab
- (bo, !!(flags & drm_vmw_synccpu_dontblock));
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed, false);
- if (ret != 0 || existed)
- ttm_bo_synccpu_write_release(&user_bo->dma.base);
-
- return ret;
-}
-
-/**
- * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
- * and unblock command submission on the buffer if blocked.
- *
- * @handle: Handle identifying the buffer object.
- * @tfile: Identifying the caller.
- * @flags: Flags indicating the type of release.
- */
-static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
- struct ttm_object_file *tfile,
- uint32_t flags)
-{
- if (!(flags & drm_vmw_synccpu_allow_cs))
- return ttm_ref_object_base_unref(tfile, handle,
- TTM_REF_SYNCCPU_WRITE);
-
- return 0;
-}
-
-/**
- * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
- * functionality.
- *
- * @dev: Identifies the drm device.
- * @data: Pointer to the ioctl argument.
- * @file_priv: Identifies the caller.
- *
- * This function checks the ioctl arguments for validity and calls the
- * relevant synccpu functions.
- */
-int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_synccpu_arg *arg =
- (struct drm_vmw_synccpu_arg *) data;
- struct vmw_dma_buffer *dma_buf;
- struct vmw_user_dma_buffer *user_bo;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_base_object *buffer_base;
- int ret;
-
- if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
- || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
- drm_vmw_synccpu_dontblock |
- drm_vmw_synccpu_allow_cs)) != 0) {
- DRM_ERROR("Illegal synccpu flags.\n");
- return -EINVAL;
- }
-
- switch (arg->op) {
- case drm_vmw_synccpu_grab:
- ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
- &buffer_base);
- if (unlikely(ret != 0))
- return ret;
-
- user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
- dma);
- ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
- vmw_dmabuf_unreference(&dma_buf);
- ttm_base_object_unref(&buffer_base);
- if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
- ret != -EBUSY)) {
- DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
- (unsigned int) arg->handle);
- return ret;
- }
- break;
- case drm_vmw_synccpu_release:
- ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
- arg->flags);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
- (unsigned int) arg->handle);
- return ret;
- }
- break;
- default:
- DRM_ERROR("Invalid synccpu operation.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- union drm_vmw_alloc_dmabuf_arg *arg =
- (union drm_vmw_alloc_dmabuf_arg *)data;
- struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
- struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_dma_buffer *dma_buf;
- uint32_t handle;
- int ret;
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- req->size, false, &handle, &dma_buf,
- NULL);
- if (unlikely(ret != 0))
- goto out_no_dmabuf;
-
- rep->handle = handle;
- rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
- rep->cur_gmr_id = handle;
- rep->cur_gmr_offset = 0;
-
- vmw_dmabuf_unreference(&dma_buf);
-
-out_no_dmabuf:
- ttm_read_unlock(&dev_priv->reservation_sem);
-
- return ret;
-}
-
-int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_unref_dmabuf_arg *arg =
- (struct drm_vmw_unref_dmabuf_arg *)data;
-
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->handle,
- TTM_REF_USAGE);
-}
-
-int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_dma_buffer **out,
- struct ttm_base_object **p_base)
-{
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_base_object *base;
-
- base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL)) {
- pr_err("Invalid buffer object handle 0x%08lx\n",
- (unsigned long)handle);
- return -ESRCH;
- }
-
- if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
- ttm_base_object_unref(&base);
- pr_err("Invalid buffer object handle 0x%08lx\n",
- (unsigned long)handle);
- return -EINVAL;
- }
-
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
- prime.base);
- (void)ttm_bo_reference(&vmw_user_bo->dma.base);
- if (p_base)
- *p_base = base;
- else
- ttm_base_object_unref(&base);
- *out = &vmw_user_bo->dma;
-
- return 0;
-}
-
-int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
- struct vmw_dma_buffer *dma_buf,
- uint32_t *handle)
-{
- struct vmw_user_dma_buffer *user_bo;
-
- if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
- return -EINVAL;
-
- user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
-
- *handle = user_bo->prime.base.hash.key;
- return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL, false);
-}
-
-/**
- * vmw_dumb_create - Create a dumb kms buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @args: Pointer to a struct drm_mode_create_dumb structure
- *
- * This is a driver callback for the core drm create_dumb functionality.
- * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
- * that the arguments have a different format.
- */
-int vmw_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_dma_buffer *dma_buf;
- int ret;
-
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- args->size, false, &args->handle,
- &dma_buf, NULL);
- if (unlikely(ret != 0))
- goto out_no_dmabuf;
-
- vmw_dmabuf_unreference(&dma_buf);
-out_no_dmabuf:
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
- * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- * @offset: The address space offset returned.
- *
- * This is a driver callback for the core drm dumb_map_offset functionality.
- */
-int vmw_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset)
-{
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_dma_buffer *out_buf;
- int ret;
-
- ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
- if (ret != 0)
- return -EINVAL;
-
- *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
- vmw_dmabuf_unreference(&out_buf);
- return 0;
-}
-
-/**
- * vmw_dumb_destroy - Destroy a dumb boffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- *
- * This is a driver callback for the core drm dumb_destroy functionality.
- */
-int vmw_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
-}
-
-/**
* vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
*
* @res: The resource for which to allocate a backup buffer.
@@ -829,7 +303,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
{
unsigned long size =
(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
- struct vmw_dma_buffer *backup;
+ struct vmw_buffer_object *backup;
int ret;
if (likely(res->backup)) {
@@ -841,16 +315,16 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
if (unlikely(!backup))
return -ENOMEM;
- ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+ ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
res->func->backup_placement,
interruptible,
- &vmw_dmabuf_bo_free);
+ &vmw_bo_bo_free);
if (unlikely(ret != 0))
- goto out_no_dmabuf;
+ goto out_no_bo;
res->backup = backup;
-out_no_dmabuf:
+out_no_bo:
return ret;
}
@@ -919,7 +393,7 @@ out_bind_failed:
*/
void vmw_resource_unreserve(struct vmw_resource *res,
bool switch_backup,
- struct vmw_dma_buffer *new_backup,
+ struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset)
{
struct vmw_private *dev_priv = res->dev_priv;
@@ -931,11 +405,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (res->backup) {
lockdep_assert_held(&res->backup->base.resv->lock.base);
list_del_init(&res->mob_head);
- vmw_dmabuf_unreference(&res->backup);
+ vmw_bo_unreference(&res->backup);
}
if (new_backup) {
- res->backup = vmw_dmabuf_reference(new_backup);
+ res->backup = vmw_bo_reference(new_backup);
lockdep_assert_held(&new_backup->base.resv->lock.base);
list_add_tail(&res->mob_head, &new_backup->res_list);
} else {
@@ -959,6 +433,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
* for a resource and in that case, allocate
* one, reserve and validate it.
*
+ * @ticket: The ww aqcquire context to use, or NULL if trylocking.
* @res: The resource for which to allocate a backup buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
@@ -966,7 +441,8 @@ void vmw_resource_unreserve(struct vmw_resource *res,
* reserved and validated backup buffer.
*/
static int
-vmw_resource_check_buffer(struct vmw_resource *res,
+vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
+ struct vmw_resource *res,
bool interruptible,
struct ttm_validate_buffer *val_buf)
{
@@ -985,7 +461,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
val_buf->bo = ttm_bo_reference(&res->backup->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
+ ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
@@ -1003,11 +479,11 @@ vmw_resource_check_buffer(struct vmw_resource *res,
return 0;
out_no_validate:
- ttm_eu_backoff_reservation(NULL, &val_list);
+ ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
ttm_bo_unref(&val_buf->bo);
if (backup_dirty)
- vmw_dmabuf_unreference(&res->backup);
+ vmw_bo_unreference(&res->backup);
return ret;
}
@@ -1050,10 +526,12 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
* vmw_resource_backoff_reservation - Unreserve and unreference a
* backup buffer
*.
+ * @ticket: The ww acquire ctx used for reservation.
* @val_buf: Backup buffer information.
*/
static void
-vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
+ struct ttm_validate_buffer *val_buf)
{
struct list_head val_list;
@@ -1062,7 +540,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
- ttm_eu_backoff_reservation(NULL, &val_list);
+ ttm_eu_backoff_reservation(ticket, &val_list);
ttm_bo_unref(&val_buf->bo);
}
@@ -1070,10 +548,12 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
* vmw_resource_do_evict - Evict a resource, and transfer its data
* to a backup buffer.
*
+ * @ticket: The ww acquire ticket to use, or NULL if trylocking.
* @res: The resource to evict.
* @interruptible: Whether to wait interruptible.
*/
-static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
+static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
+ struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
@@ -1083,7 +563,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
val_buf.bo = NULL;
val_buf.shared = false;
- ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
+ ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -1098,7 +578,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
res->backup_dirty = true;
res->res_dirty = false;
out_no_unbind:
- vmw_resource_backoff_reservation(&val_buf);
+ vmw_resource_backoff_reservation(ticket, &val_buf);
return ret;
}
@@ -1152,7 +632,8 @@ int vmw_resource_validate(struct vmw_resource *res)
write_unlock(&dev_priv->resource_lock);
- ret = vmw_resource_do_evict(evict_res, true);
+ /* Trylock backup buffers with a NULL ticket. */
+ ret = vmw_resource_do_evict(NULL, evict_res, true);
if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
@@ -1171,7 +652,7 @@ int vmw_resource_validate(struct vmw_resource *res)
goto out_no_validate;
else if (!res->func->needs_backup && res->backup) {
list_del_init(&res->mob_head);
- vmw_dmabuf_unreference(&res->backup);
+ vmw_bo_unreference(&res->backup);
}
return 0;
@@ -1180,109 +661,39 @@ out_no_validate:
return ret;
}
-/**
- * vmw_fence_single_bo - Utility function to fence a single TTM buffer
- * object without unreserving it.
- *
- * @bo: Pointer to the struct ttm_buffer_object to fence.
- * @fence: Pointer to the fence. If NULL, this function will
- * insert a fence into the command stream..
- *
- * Contrary to the ttm_eu version of this function, it takes only
- * a single buffer object instead of a list, and it also doesn't
- * unreserve the buffer object, which needs to be done separately.
- */
-void vmw_fence_single_bo(struct ttm_buffer_object *bo,
- struct vmw_fence_obj *fence)
-{
- struct ttm_bo_device *bdev = bo->bdev;
-
- struct vmw_private *dev_priv =
- container_of(bdev, struct vmw_private, bdev);
-
- if (fence == NULL) {
- vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- reservation_object_add_excl_fence(bo->resv, &fence->base);
- dma_fence_put(&fence->base);
- } else
- reservation_object_add_excl_fence(bo->resv, &fence->base);
-}
/**
- * vmw_resource_move_notify - TTM move_notify_callback
+ * vmw_resource_unbind_list
*
- * @bo: The TTM buffer object about to move.
- * @mem: The struct ttm_mem_reg indicating to what memory
- * region the move is taking place.
+ * @vbo: Pointer to the current backing MOB.
*
* Evicts the Guest Backed hardware resource if the backup
* buffer is being moved out of MOB memory.
- * Note that this function should not race with the resource
- * validation code as long as it accesses only members of struct
- * resource that remain static while bo::res is !NULL and
- * while we have @bo reserved. struct resource::backup is *not* a
- * static member. The resource validation code will take care
- * to set @bo::res to NULL, while having @bo reserved when the
- * buffer is no longer bound to the resource, so @bo:res can be
- * used to determine whether there is a need to unbind and whether
- * it is safe to unbind.
+ * Note that this function will not race with the resource
+ * validation code, since resource validation and eviction
+ * both require the backup buffer to be reserved.
*/
-void vmw_resource_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
{
- struct vmw_dma_buffer *dma_buf;
-
- if (mem == NULL)
- return;
-
- if (bo->destroy != vmw_dmabuf_bo_free &&
- bo->destroy != vmw_user_dmabuf_destroy)
- return;
-
- dma_buf = container_of(bo, struct vmw_dma_buffer, base);
-
- /*
- * Kill any cached kernel maps before move. An optimization could
- * be to do this iff source or destination memory type is VRAM.
- */
- vmw_dma_buffer_unmap(dma_buf);
- if (mem->mem_type != VMW_PL_MOB) {
- struct vmw_resource *res, *n;
- struct ttm_validate_buffer val_buf;
+ struct vmw_resource *res, *next;
+ struct ttm_validate_buffer val_buf = {
+ .bo = &vbo->base,
+ .shared = false
+ };
- val_buf.bo = bo;
- val_buf.shared = false;
+ lockdep_assert_held(&vbo->base.resv->lock.base);
+ list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
+ if (!res->func->unbind)
+ continue;
- list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
-
- if (unlikely(res->func->unbind == NULL))
- continue;
-
- (void) res->func->unbind(res, true, &val_buf);
- res->backup_dirty = true;
- res->res_dirty = false;
- list_del_init(&res->mob_head);
- }
-
- (void) ttm_bo_wait(bo, false, false);
+ (void) res->func->unbind(res, true, &val_buf);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+ list_del_init(&res->mob_head);
}
-}
-
-
-/**
- * vmw_resource_swap_notify - swapout notify callback.
- *
- * @bo: The buffer object to be swapped out.
- */
-void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
-{
- if (bo->destroy != vmw_dmabuf_bo_free &&
- bo->destroy != vmw_user_dmabuf_destroy)
- return;
- /* Kill any cached kernel maps before swapout */
- vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
+ (void) ttm_bo_wait(&vbo->base, false, false);
}
@@ -1294,7 +705,7 @@ void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
* Read back cached states from the device if they exist. This function
* assumings binding_mutex is held.
*/
-int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
+int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
{
struct vmw_resource *dx_query_ctx;
struct vmw_private *dev_priv;
@@ -1344,7 +755,7 @@ int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
struct ttm_bo_device *bdev = bo->bdev;
struct vmw_private *dev_priv;
@@ -1353,7 +764,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
mutex_lock(&dev_priv->binding_mutex);
- dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
+ dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
mutex_unlock(&dev_priv->binding_mutex);
return;
@@ -1368,7 +779,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
/* Create a fence and attach the BO to it */
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- vmw_fence_single_bo(bo, fence);
+ vmw_bo_fence_single(bo, fence);
if (fence != NULL)
vmw_fence_obj_unreference(&fence);
@@ -1405,6 +816,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
struct vmw_resource *evict_res;
unsigned err_count = 0;
int ret;
+ struct ww_acquire_ctx ticket;
do {
write_lock(&dev_priv->resource_lock);
@@ -1418,7 +830,8 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock);
- ret = vmw_resource_do_evict(evict_res, false);
+ /* Wait lock backup buffers with a ticket. */
+ ret = vmw_resource_do_evict(&ticket, evict_res, false);
if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
@@ -1481,7 +894,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
goto out_no_reserve;
if (res->pin_count == 0) {
- struct vmw_dma_buffer *vbo = NULL;
+ struct vmw_buffer_object *vbo = NULL;
if (res->backup) {
vbo = res->backup;
@@ -1539,7 +952,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
WARN_ON(res->pin_count == 0);
if (--res->pin_count == 0 && res->backup) {
- struct vmw_dma_buffer *vbo = res->backup;
+ struct vmw_buffer_object *vbo = res->backup;
(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
vmw_bo_pin_reserved(vbo, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index ac05968a832b..a8c1c5ebd71d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2012-2014 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 3d667e903beb..ad0de7f0cd60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2011-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -66,7 +66,7 @@ struct vmw_kms_sou_readback_blit {
SVGAFifoCmdBlitScreenToGMRFB body;
};
-struct vmw_kms_sou_dmabuf_blit {
+struct vmw_kms_sou_bo_blit {
uint32 header;
SVGAFifoCmdBlitGMRFBToScreen body;
};
@@ -83,7 +83,7 @@ struct vmw_screen_object_unit {
struct vmw_display_unit base;
unsigned long buffer_size; /**< Size of allocated buffer */
- struct vmw_dma_buffer *buffer; /**< Backing store buffer */
+ struct vmw_buffer_object *buffer; /**< Backing store buffer */
bool defined;
};
@@ -109,7 +109,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
*/
static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou,
- uint32_t x, uint32_t y,
+ int x, int y,
struct drm_display_mode *mode)
{
size_t fifo_size;
@@ -139,13 +139,8 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
cmd->obj.size.width = mode->hdisplay;
cmd->obj.size.height = mode->vdisplay;
- if (sou->base.is_implicit) {
- cmd->obj.root.x = x;
- cmd->obj.root.y = y;
- } else {
- cmd->obj.root.x = sou->base.gui_x;
- cmd->obj.root.y = sou->base.gui_y;
- }
+ cmd->obj.root.x = x;
+ cmd->obj.root.y = y;
sou->base.set_gui_x = cmd->obj.root.x;
sou->base.set_gui_y = cmd->obj.root.y;
@@ -222,12 +217,11 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct vmw_plane_state *vps;
int ret;
-
- sou = vmw_crtc_to_sou(crtc);
+ sou = vmw_crtc_to_sou(crtc);
dev_priv = vmw_priv(crtc->dev);
- ps = crtc->primary->state;
- fb = ps->fb;
- vps = vmw_plane_state_to_vps(ps);
+ ps = crtc->primary->state;
+ fb = ps->fb;
+ vps = vmw_plane_state_to_vps(ps);
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
@@ -240,11 +234,25 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
if (vfb) {
- sou->buffer = vps->dmabuf;
- sou->buffer_size = vps->dmabuf_size;
+ struct drm_connector_state *conn_state;
+ struct vmw_connector_state *vmw_conn_state;
+ int x, y;
+
+ sou->buffer = vps->bo;
+ sou->buffer_size = vps->bo_size;
+
+ if (sou->base.is_implicit) {
+ x = crtc->x;
+ y = crtc->y;
+ } else {
+ conn_state = sou->base.connector.state;
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
+ }
- ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
- &crtc->mode);
+ ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
if (ret)
DRM_ERROR("Failed to define Screen Object %dx%d\n",
crtc->x, crtc->y);
@@ -408,10 +416,10 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
struct drm_crtc *crtc = plane->state->crtc ?
plane->state->crtc : old_state->crtc;
- if (vps->dmabuf)
- vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false);
- vmw_dmabuf_unreference(&vps->dmabuf);
- vps->dmabuf_size = 0;
+ if (vps->bo)
+ vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
+ vmw_bo_unreference(&vps->bo);
+ vps->bo_size = 0;
vmw_du_plane_cleanup_fb(plane, old_state);
}
@@ -440,8 +448,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
if (!new_fb) {
- vmw_dmabuf_unreference(&vps->dmabuf);
- vps->dmabuf_size = 0;
+ vmw_bo_unreference(&vps->bo);
+ vps->bo_size = 0;
return 0;
}
@@ -449,22 +457,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
size = new_state->crtc_w * new_state->crtc_h * 4;
dev_priv = vmw_priv(crtc->dev);
- if (vps->dmabuf) {
- if (vps->dmabuf_size == size) {
+ if (vps->bo) {
+ if (vps->bo_size == size) {
/*
* Note that this might temporarily up the pin-count
* to 2, until cleanup_fb() is called.
*/
- return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf,
+ return vmw_bo_pin_in_vram(dev_priv, vps->bo,
true);
}
- vmw_dmabuf_unreference(&vps->dmabuf);
- vps->dmabuf_size = 0;
+ vmw_bo_unreference(&vps->bo);
+ vps->bo_size = 0;
}
- vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
- if (!vps->dmabuf)
+ vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
+ if (!vps->bo)
return -ENOMEM;
vmw_svga_enable(dev_priv);
@@ -473,22 +481,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
+ ret = vmw_bo_init(dev_priv, vps->bo, size,
&vmw_vram_ne_placement,
- false, &vmw_dmabuf_bo_free);
+ false, &vmw_bo_bo_free);
vmw_overlay_resume_all(dev_priv);
if (ret) {
- vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
+ vps->bo = NULL; /* vmw_bo_init frees on error */
return ret;
}
- vps->dmabuf_size = size;
+ vps->bo_size = size;
/*
* TTM already thinks the buffer is pinned, but make sure the
* pin_count is upped.
*/
- return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true);
+ return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
}
@@ -512,10 +520,10 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
vclips.w = crtc->mode.hdisplay;
vclips.h = crtc->mode.vdisplay;
- if (vfb->dmabuf)
- ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL,
- &vclips, 1, 1, true,
- &fence, crtc);
+ if (vfb->bo)
+ ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
+ &vclips, 1, 1, true,
+ &fence, crtc);
else
ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
&vclips, NULL, 0, 0,
@@ -527,8 +535,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
*/
if (ret != 0)
DRM_ERROR("Failed to update screen.\n");
-
- crtc->primary->fb = plane->state->fb;
} else {
/*
* When disabling a plane, CRTC and FB should always be NULL
@@ -697,7 +703,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_connector;
}
- (void) drm_mode_connector_attach_encoder(connector, encoder);
+ (void) drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
@@ -777,11 +783,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
return 0;
}
-static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
+static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer)
{
- struct vmw_dma_buffer *buf =
- container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+ struct vmw_buffer_object *buf =
+ container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer;
int depth = framebuffer->base.format->depth;
struct {
@@ -972,13 +978,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
}
/**
- * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
+ * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips.
*
* @dirty: The closure structure.
*
* Commits a previously built command buffer of readback clips.
*/
-static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
{
if (!dirty->num_hits) {
vmw_fifo_commit(dirty->dev_priv, 0);
@@ -986,20 +992,20 @@ static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
}
vmw_fifo_commit(dirty->dev_priv,
- sizeof(struct vmw_kms_sou_dmabuf_blit) *
+ sizeof(struct vmw_kms_sou_bo_blit) *
dirty->num_hits);
}
/**
- * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
+ * vmw_sou_bo_clip - Callback to encode a readback cliprect.
*
* @dirty: The closure structure
*
* Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
*/
-static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
+static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty)
{
- struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
+ struct vmw_kms_sou_bo_blit *blit = dirty->cmd;
blit += dirty->num_hits;
blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
@@ -1014,10 +1020,10 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
}
/**
- * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
+ * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
*
* @dev_priv: Pointer to the device private structure.
- * @framebuffer: Pointer to the dma-buffer backed framebuffer.
+ * @framebuffer: Pointer to the buffer-object backed framebuffer.
* @clips: Array of clip rects.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL.
@@ -1027,12 +1033,12 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
* @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized.
- * @crtc: If crtc is passed, perform dmabuf dirty on that crtc only.
+ * @crtc: If crtc is passed, perform bo dirty on that crtc only.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
-int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
@@ -1041,8 +1047,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc)
{
- struct vmw_dma_buffer *buf =
- container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+ struct vmw_buffer_object *buf =
+ container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer;
struct vmw_kms_dirty dirty;
int ret;
@@ -1052,14 +1058,14 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
if (ret)
return ret;
- ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
+ ret = do_bo_define_gmrfb(dev_priv, framebuffer);
if (unlikely(ret != 0))
goto out_revert;
dirty.crtc = crtc;
- dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
- dirty.clip = vmw_sou_dmabuf_clip;
- dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
+ dirty.fifo_commit = vmw_sou_bo_fifo_commit;
+ dirty.clip = vmw_sou_bo_clip;
+ dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) *
num_clips;
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
0, 0, num_clips, increment, &dirty);
@@ -1118,12 +1124,12 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
/**
* vmw_kms_sou_readback - Perform a readback from the screen object system to
- * a dma-buffer backed framebuffer.
+ * a buffer-object backed framebuffer.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm_file identifying the caller.
* Must be set to NULL if @user_fence_rep is NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
* @user_fence_rep: User-space provided structure for fence information.
* Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects.
@@ -1141,8 +1147,8 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
uint32_t num_clips,
struct drm_crtc *crtc)
{
- struct vmw_dma_buffer *buf =
- container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+ struct vmw_buffer_object *buf =
+ container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_kms_dirty dirty;
int ret;
@@ -1151,7 +1157,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
if (ret)
return ret;
- ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
+ ret = do_bo_define_gmrfb(dev_priv, vfb);
if (unlikely(ret != 0))
goto out_revert;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 73b8e9a16368..fe4842ca3b6e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -159,7 +159,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
SVGA3dShaderType type,
uint8_t num_input_sig,
uint8_t num_output_sig,
- struct vmw_dma_buffer *byte_code,
+ struct vmw_buffer_object *byte_code,
void (*res_free) (struct vmw_resource *res))
{
struct vmw_shader *shader = vmw_res_to_shader(res);
@@ -178,7 +178,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
res->backup_size = size;
if (byte_code) {
- res->backup = vmw_dmabuf_reference(byte_code);
+ res->backup = vmw_bo_reference(byte_code);
res->backup_offset = offset;
}
shader->size = size;
@@ -306,7 +306,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(val_buf->bo, fence);
+ vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -537,7 +537,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(val_buf->bo, fence);
+ vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -723,7 +723,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
}
static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buffer,
+ struct vmw_buffer_object *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
@@ -801,7 +801,7 @@ out:
static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buffer,
+ struct vmw_buffer_object *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type)
@@ -862,12 +862,12 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_dma_buffer *buffer = NULL;
+ struct vmw_buffer_object *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
if (buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
+ ret = vmw_user_bo_lookup(tfile, buffer_handle,
&buffer, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find buffer for shader "
@@ -906,7 +906,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
- vmw_dmabuf_unreference(&buffer);
+ vmw_bo_unreference(&buffer);
return ret;
}
@@ -983,7 +983,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
struct list_head *list)
{
struct ttm_operation_ctx ctx = { false, true };
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
int ret;
@@ -997,8 +997,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(!buf))
return -ENOMEM;
- ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
- true, vmw_dmabuf_bo_free);
+ ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
+ true, vmw_bo_bo_free);
if (unlikely(ret != 0))
goto out;
@@ -1031,7 +1031,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
res, list);
vmw_resource_unreference(&res);
no_reserve:
- vmw_dmabuf_unreference(&buf);
+ vmw_bo_unreference(&buf);
out:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index a0cb310665cc..6ebc5affde14 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index d3573c37c436..e9b6b7baa009 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
index 268738387b5e..b80c7252f2fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 67331f01ef32..93f6b96ca7bb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/******************************************************************************
*
- * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * COPYRIGHT (C) 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -44,7 +44,7 @@
enum stdu_content_type {
SAME_AS_DISPLAY = 0,
SEPARATE_SURFACE,
- SEPARATE_DMA
+ SEPARATE_BO
};
/**
@@ -58,7 +58,7 @@ enum stdu_content_type {
* @bottom: Bottom side of bounding box.
* @fb_left: Left side of the framebuffer/content bounding box
* @fb_top: Top of the framebuffer/content bounding box
- * @buf: DMA buffer when DMA-ing between buffer and screen targets.
+ * @buf: buffer object when DMA-ing between buffer and screen targets.
* @sid: Surface ID when copying between surface and screen targets.
*/
struct vmw_stdu_dirty {
@@ -68,7 +68,7 @@ struct vmw_stdu_dirty {
s32 fb_left, fb_top;
u32 pitch;
union {
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
u32 sid;
};
};
@@ -178,13 +178,9 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
cmd->body.height = mode->vdisplay;
cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
cmd->body.dpi = 0;
- if (stdu->base.is_implicit) {
- cmd->body.xRoot = crtc_x;
- cmd->body.yRoot = crtc_y;
- } else {
- cmd->body.xRoot = stdu->base.gui_x;
- cmd->body.yRoot = stdu->base.gui_y;
- }
+ cmd->body.xRoot = crtc_x;
+ cmd->body.yRoot = crtc_y;
+
stdu->base.set_gui_x = cmd->body.xRoot;
stdu->base.set_gui_y = cmd->body.yRoot;
@@ -374,11 +370,14 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
- int ret;
-
+ struct drm_connector_state *conn_state;
+ struct vmw_connector_state *vmw_conn_state;
+ int x, y, ret;
- stdu = vmw_crtc_to_stdu(crtc);
+ stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
+ conn_state = stdu->base.connector.state;
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
if (stdu->defined) {
ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
@@ -397,8 +396,16 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (!crtc->state->enable)
return;
+ if (stdu->base.is_implicit) {
+ x = crtc->x;
+ y = crtc->y;
+ } else {
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
+ }
+
vmw_svga_enable(dev_priv);
- ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, crtc->x, crtc->y);
+ ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y);
if (ret)
DRM_ERROR("Failed to define Screen Target of size %dx%d\n",
@@ -414,6 +421,7 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct drm_plane_state *plane_state = crtc->primary->state;
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
struct vmw_framebuffer *vfb;
@@ -422,7 +430,7 @@ static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
- fb = crtc->primary->fb;
+ fb = plane_state->fb;
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
@@ -507,14 +515,14 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
/**
- * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
+ * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
*
* @dirty: The closure structure.
*
* Encodes a surface DMA command cliprect and updates the bounding box
* for the DMA.
*/
-static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
@@ -542,14 +550,14 @@ static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
}
/**
- * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
+ * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command.
*
* @dirty: The closure structure.
*
* Fills in the missing fields in a DMA command, and optionally encodes
* a screen target update command, depending on transfer direction.
*/
-static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
@@ -593,13 +601,13 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
/**
- * vmw_stdu_dmabuf_cpu_clip - Callback to encode a CPU blit
+ * vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit
*
* @dirty: The closure structure.
*
* This function calculates the bounding box for all the incoming clips.
*/
-static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_cpu_clip(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
@@ -623,14 +631,14 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
/**
- * vmw_stdu_dmabuf_cpu_commit - Callback to do a CPU blit from DMAbuf
+ * vmw_stdu_bo_cpu_commit - Callback to do a CPU blit from buffer object
*
* @dirty: The closure structure.
*
* For the special case when we cannot create a proxy surface in a
* 2D VM, we have to do a CPU blit ourselves.
*/
-static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
@@ -651,7 +659,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
if (width == 0 || height == 0)
return;
- /* Assume we are blitting from Guest (dmabuf) to Host (display_srf) */
+ /* Assume we are blitting from Guest (bo) to Host (display_srf) */
dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
dst_bo = &stdu->display_srf->res.backup->base;
dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
@@ -711,13 +719,13 @@ out_cleanup:
}
/**
- * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
+ * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed
* framebuffer and the screen target system.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm-file identifying the caller. May be
* set to NULL, but then @user_fence_rep must also be set to NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
* @clips: Array of clip rects. Either @clips or @vclips must be NULL.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL.
@@ -746,8 +754,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
bool interruptible,
struct drm_crtc *crtc)
{
- struct vmw_dma_buffer *buf =
- container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+ struct vmw_buffer_object *buf =
+ container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_stdu_dirty ddirty;
int ret;
bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
@@ -769,8 +777,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
ddirty.fb_left = ddirty.fb_top = S32_MAX;
ddirty.pitch = vfb->base.pitches[0];
ddirty.buf = buf;
- ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
- ddirty.base.clip = vmw_stdu_dmabuf_clip;
+ ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit;
+ ddirty.base.clip = vmw_stdu_bo_clip;
ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
num_clips * sizeof(SVGA3dCopyBox) +
sizeof(SVGA3dCmdSurfaceDMASuffix);
@@ -779,8 +787,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
if (cpu_blit) {
- ddirty.base.fifo_commit = vmw_stdu_dmabuf_cpu_commit;
- ddirty.base.clip = vmw_stdu_dmabuf_cpu_clip;
+ ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
+ ddirty.base.clip = vmw_stdu_bo_cpu_clip;
ddirty.base.fifo_reserve_size = 0;
}
@@ -926,7 +934,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
if (ret)
return ret;
- if (vfbs->is_dmabuf_proxy) {
+ if (vfbs->is_bo_proxy) {
ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
if (ret)
goto out_finish;
@@ -1074,7 +1082,7 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
* @new_state: info on the new plane state, including the FB
*
* This function allocates a new display surface if the content is
- * backed by a DMA. The display surface is pinned here, and it'll
+ * backed by a buffer object. The display surface is pinned here, and it'll
* be unpinned in .cleanup_fb()
*
* Returns 0 on success
@@ -1104,13 +1112,13 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
}
vfb = vmw_framebuffer_to_vfb(new_fb);
- new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+ new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay &&
new_vfbs->surface->base_size.height == vdisplay)
new_content_type = SAME_AS_DISPLAY;
- else if (vfb->dmabuf)
- new_content_type = SEPARATE_DMA;
+ else if (vfb->bo)
+ new_content_type = SEPARATE_BO;
else
new_content_type = SEPARATE_SURFACE;
@@ -1123,10 +1131,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
display_base_size.depth = 1;
/*
- * If content buffer is a DMA buf, then we have to construct
- * surface info
+ * If content buffer is a buffer object, then we have to
+ * construct surface info
*/
- if (new_content_type == SEPARATE_DMA) {
+ if (new_content_type == SEPARATE_BO) {
switch (new_fb->format->cpp[0]*8) {
case 32:
@@ -1149,6 +1157,9 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
content_srf.flags = 0;
content_srf.mip_levels[0] = 1;
content_srf.multisample_count = 0;
+ content_srf.multisample_pattern =
+ SVGA3D_MS_PATTERN_NONE;
+ content_srf.quality_level = SVGA3D_MS_QUALITY_NONE;
} else {
content_srf = *new_vfbs->surface;
}
@@ -1177,6 +1188,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
content_srf.multisample_count,
0,
display_base_size,
+ content_srf.multisample_pattern,
+ content_srf.quality_level,
&vps->surf);
if (ret != 0) {
DRM_ERROR("Couldn't allocate STDU surface.\n");
@@ -1211,12 +1224,12 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
vps->content_fb_type = new_content_type;
/*
- * This should only happen if the DMA buf is too large to create a
+ * This should only happen if the buffer object is too large to create a
* proxy surface for.
- * If we are a 2D VM with a DMA buffer then we have to use CPU blit
+ * If we are a 2D VM with a buffer object then we have to use CPU blit
* so cache these mappings
*/
- if (vps->content_fb_type == SEPARATE_DMA &&
+ if (vps->content_fb_type == SEPARATE_BO &&
!(dev_priv->capabilities & SVGA_CAP_3D))
vps->cpp = new_fb->pitches[0] / new_fb->width;
@@ -1275,7 +1288,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
if (ret)
DRM_ERROR("Failed to bind surface to STDU.\n");
- if (vfb->dmabuf)
+ if (vfb->bo)
ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
&vclips, 1, 1, true, false,
crtc);
@@ -1285,8 +1298,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
1, 1, NULL, crtc);
if (ret)
DRM_ERROR("Failed to update STDU.\n");
-
- crtc->primary->fb = plane->state->fb;
} else {
crtc = old_state->crtc;
stdu = vmw_crtc_to_stdu(crtc);
@@ -1487,7 +1498,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_connector;
}
- (void) drm_mode_connector_attach_encoder(connector, encoder);
+ (void) drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index b236c48bf265..e125233e074b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -33,6 +33,10 @@
#include "vmwgfx_binding.h"
#include "device_include/svga3d_surfacedefs.h"
+#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
+#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
+#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
+ (svga3d_flags & ((uint64_t)U32_MAX))
/**
* struct vmw_user_surface - User-space visible surface resource
@@ -81,7 +85,16 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_surface_destroy(struct vmw_resource *res);
-
+static int
+vmw_gb_surface_define_internal(struct drm_device *dev,
+ struct drm_vmw_gb_surface_create_ext_req *req,
+ struct drm_vmw_gb_surface_create_rep *rep,
+ struct drm_file *file_priv);
+static int
+vmw_gb_surface_reference_internal(struct drm_device *dev,
+ struct drm_vmw_surface_arg *req,
+ struct drm_vmw_gb_surface_ref_ext_rep *rep,
+ struct drm_file *file_priv);
static const struct vmw_user_resource_conv user_surface_conv = {
.object_type = VMW_RES_SURFACE,
@@ -224,7 +237,12 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
- cmd->body.surfaceFlags = srf->flags;
+ /*
+ * Downcast of surfaceFlags, was upcasted when received from user-space,
+ * since driver internally stores as 64 bit.
+ * For legacy surface define only 32 bit flag is supported.
+ */
+ cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->flags;
cmd->body.format = srf->format;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
cmd->body.face[i].numMipLevels = srf->mip_levels[i];
@@ -468,7 +486,7 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(val_buf->bo, fence);
+ vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -760,7 +778,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf = &user_srf->srf;
res = &srf->res;
- srf->flags = req->flags;
+ /* Driver internally stores as 64-bit flags */
+ srf->flags = (SVGA3dSurfaceAllFlags)req->flags;
srf->format = req->format;
srf->scanout = req->scanout;
@@ -785,6 +804,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->base_size = *srf->sizes;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->multisample_count = 0;
+ srf->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+ srf->quality_level = SVGA3D_MS_QUALITY_NONE;
cur_bo_offset = 0;
cur_offset = srf->offsets;
@@ -842,12 +863,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (dev_priv->has_mob && req->shareable) {
uint32_t backup_handle;
- ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
- res->backup_size,
- true,
- &backup_handle,
- &res->backup,
- &user_srf->backup_base);
+ ret = vmw_user_bo_alloc(dev_priv, tfile,
+ res->backup_size,
+ true,
+ &backup_handle,
+ &res->backup,
+ &user_srf->backup_base);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
@@ -990,7 +1011,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- rep->flags = srf->flags;
+ /* Downcast of flags when sending back to user space */
+ rep->flags = (uint32_t)srf->flags;
rep->format = srf->format;
memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
@@ -1031,6 +1053,10 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface_v2 body;
} *cmd2;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBSurface_v3 body;
+ } *cmd3;
if (likely(res->id != -1))
return 0;
@@ -1047,7 +1073,11 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo;
}
- if (srf->array_size > 0) {
+ if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+ cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
+ cmd_len = sizeof(cmd3->body);
+ submit_len = sizeof(*cmd3);
+ } else if (srf->array_size > 0) {
/* has_dx checked on creation time. */
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
cmd_len = sizeof(cmd2->body);
@@ -1060,6 +1090,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd = vmw_fifo_reserve(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
+ cmd3 = (typeof(cmd3))cmd;
if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
@@ -1067,12 +1098,27 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo;
}
- if (srf->array_size > 0) {
+ if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+ cmd3->header.id = cmd_id;
+ cmd3->header.size = cmd_len;
+ cmd3->body.sid = srf->res.id;
+ cmd3->body.surfaceFlags = srf->flags;
+ cmd3->body.format = srf->format;
+ cmd3->body.numMipLevels = srf->mip_levels[0];
+ cmd3->body.multisampleCount = srf->multisample_count;
+ cmd3->body.multisamplePattern = srf->multisample_pattern;
+ cmd3->body.qualityLevel = srf->quality_level;
+ cmd3->body.autogenFilter = srf->autogen_filter;
+ cmd3->body.size.width = srf->base_size.width;
+ cmd3->body.size.height = srf->base_size.height;
+ cmd3->body.size.depth = srf->base_size.depth;
+ cmd3->body.arraySize = srf->array_size;
+ } else if (srf->array_size > 0) {
cmd2->header.id = cmd_id;
cmd2->header.size = cmd_len;
cmd2->body.sid = srf->res.id;
cmd2->body.surfaceFlags = srf->flags;
- cmd2->body.format = cpu_to_le32(srf->format);
+ cmd2->body.format = srf->format;
cmd2->body.numMipLevels = srf->mip_levels[0];
cmd2->body.multisampleCount = srf->multisample_count;
cmd2->body.autogenFilter = srf->autogen_filter;
@@ -1085,7 +1131,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = cpu_to_le32(srf->format);
+ cmd->body.format = srf->format;
cmd->body.numMipLevels = srf->mip_levels[0];
cmd->body.multisampleCount = srf->multisample_count;
cmd->body.autogenFilter = srf->autogen_filter;
@@ -1210,7 +1256,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(val_buf->bo, fence);
+ vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -1256,194 +1302,55 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
/**
* vmw_gb_surface_define_ioctl - Ioctl function implementing
- * the user surface define functionality.
+ * the user surface define functionality.
*
- * @dev: Pointer to a struct drm_device.
- * @data: Pointer to data copied from / to user-space.
- * @file_priv: Pointer to a drm file private structure.
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
*/
int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf;
- struct vmw_surface *srf;
- struct vmw_resource *res;
- struct vmw_resource *tmp;
union drm_vmw_gb_surface_create_arg *arg =
(union drm_vmw_gb_surface_create_arg *)data;
- struct drm_vmw_gb_surface_create_req *req = &arg->req;
struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- int ret;
- uint32_t size;
- uint32_t backup_handle = 0;
-
- if (req->multisample_count != 0)
- return -EINVAL;
-
- if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
- return -EINVAL;
+ struct drm_vmw_gb_surface_create_ext_req req_ext;
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
-
- size = vmw_user_surface_size + 128;
-
- /* Define a surface based on the parameters. */
- ret = vmw_surface_gb_priv_define(dev,
- size,
- req->svga3d_flags,
- req->format,
- req->drm_surface_flags & drm_vmw_surface_flag_scanout,
- req->mip_levels,
- req->multisample_count,
- req->array_size,
- req->base_size,
- &srf);
- if (unlikely(ret != 0))
- return ret;
-
- user_srf = container_of(srf, struct vmw_user_surface, srf);
- if (drm_is_primary_client(file_priv))
- user_srf->master = drm_master_get(file_priv->master);
+ req_ext.base = arg->req;
+ req_ext.version = drm_vmw_gb_surface_v1;
+ req_ext.svga3d_flags_upper_32_bits = 0;
+ req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+ req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
+ req_ext.must_be_zero = 0;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
- res = &user_srf->srf.res;
-
-
- if (req->buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
- &res->backup,
- &user_srf->backup_base);
- if (ret == 0) {
- if (res->backup->base.num_pages * PAGE_SIZE <
- res->backup_size) {
- DRM_ERROR("Surface backup buffer is too small.\n");
- vmw_dmabuf_unreference(&res->backup);
- ret = -EINVAL;
- goto out_unlock;
- } else {
- backup_handle = req->buffer_handle;
- }
- }
- } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
- ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
- res->backup_size,
- req->drm_surface_flags &
- drm_vmw_surface_flag_shareable,
- &backup_handle,
- &res->backup,
- &user_srf->backup_base);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&res);
- goto out_unlock;
- }
-
- tmp = vmw_resource_reference(res);
- ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
- req->drm_surface_flags &
- drm_vmw_surface_flag_shareable,
- VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- vmw_resource_unreference(&res);
- goto out_unlock;
- }
-
- rep->handle = user_srf->prime.base.hash.key;
- rep->backup_size = res->backup_size;
- if (res->backup) {
- rep->buffer_map_handle =
- drm_vma_node_offset_addr(&res->backup->base.vma_node);
- rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
- rep->buffer_handle = backup_handle;
- } else {
- rep->buffer_map_handle = 0;
- rep->buffer_size = 0;
- rep->buffer_handle = SVGA3D_INVALID_ID;
- }
-
- vmw_resource_unreference(&res);
-
-out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
+ return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
}
/**
* vmw_gb_surface_reference_ioctl - Ioctl function implementing
- * the user surface reference functionality.
+ * the user surface reference functionality.
*
- * @dev: Pointer to a struct drm_device.
- * @data: Pointer to data copied from / to user-space.
- * @file_priv: Pointer to a drm file private structure.
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
*/
int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_gb_surface_reference_arg *arg =
(union drm_vmw_gb_surface_reference_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req;
struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
- struct ttm_base_object *base;
- uint32_t backup_handle;
- int ret = -EINVAL;
+ struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
+ int ret;
+
+ ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
- ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
- req->handle_type, &base);
if (unlikely(ret != 0))
return ret;
- user_srf = container_of(base, struct vmw_user_surface, prime.base);
- srf = &user_srf->srf;
- if (!srf->res.backup) {
- DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
- goto out_bad_resource;
- }
-
- mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
- ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
- &backup_handle);
- mutex_unlock(&dev_priv->cmdbuf_mutex);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a GB surface "
- "backup buffer.\n");
- (void) ttm_ref_object_base_unref(tfile, base->hash.key,
- TTM_REF_USAGE);
- goto out_bad_resource;
- }
-
- rep->creq.svga3d_flags = srf->flags;
- rep->creq.format = srf->format;
- rep->creq.mip_levels = srf->mip_levels[0];
- rep->creq.drm_surface_flags = 0;
- rep->creq.multisample_count = srf->multisample_count;
- rep->creq.autogen_filter = srf->autogen_filter;
- rep->creq.array_size = srf->array_size;
- rep->creq.buffer_handle = backup_handle;
- rep->creq.base_size = srf->base_size;
- rep->crep.handle = user_srf->prime.base.hash.key;
- rep->crep.backup_size = srf->res.backup_size;
- rep->crep.buffer_handle = backup_handle;
- rep->crep.buffer_map_handle =
- drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
- rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
-
-out_bad_resource:
- ttm_base_object_unref(&base);
+ rep->creq = rep_ext.creq.base;
+ rep->crep = rep_ext.crep;
return ret;
}
@@ -1461,6 +1368,8 @@ out_bad_resource:
* @multisample_count:
* @array_size: Surface array size.
* @size: width, heigh, depth of the surface requested
+ * @multisample_pattern: Multisampling pattern when msaa is supported
+ * @quality_level: Precision settings
* @user_srf_out: allocated user_srf. Set to NULL on failure.
*
* GB surfaces allocated by this function will not have a user mode handle, and
@@ -1470,13 +1379,15 @@ out_bad_resource:
*/
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
- uint32_t svga3d_flags,
+ SVGA3dSurfaceAllFlags svga3d_flags,
SVGA3dSurfaceFormat format,
bool for_scanout,
uint32_t num_mip_levels,
uint32_t multisample_count,
uint32_t array_size,
struct drm_vmw_size size,
+ SVGA3dMSPattern multisample_pattern,
+ SVGA3dMSQualityLevel quality_level,
struct vmw_surface **srf_out)
{
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -1487,7 +1398,8 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
};
struct vmw_surface *srf;
int ret;
- u32 num_layers;
+ u32 num_layers = 1;
+ u32 sample_count = 1;
*srf_out = NULL;
@@ -1562,19 +1474,23 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->array_size = array_size;
srf->multisample_count = multisample_count;
+ srf->multisample_pattern = multisample_pattern;
+ srf->quality_level = quality_level;
if (array_size)
num_layers = array_size;
else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
num_layers = SVGA3D_MAX_SURFACE_FACES;
- else
- num_layers = 1;
+
+ if (srf->flags & SVGA3D_SURFACE_MULTISAMPLE)
+ sample_count = srf->multisample_count;
srf->res.backup_size =
- svga3dsurface_get_serialized_size(srf->format,
- srf->base_size,
- srf->mip_levels[0],
- num_layers);
+ svga3dsurface_get_serialized_size_extended(srf->format,
+ srf->base_size,
+ srf->mip_levels[0],
+ num_layers,
+ sample_count);
if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
srf->res.backup_size += sizeof(SVGA3dDXSOState);
@@ -1599,3 +1515,266 @@ out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
+
+/**
+ * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_gb_surface_create_ext_arg *arg =
+ (union drm_vmw_gb_surface_create_ext_arg *)data;
+ struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
+ struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
+
+ return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
+}
+
+/**
+ * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_gb_surface_reference_ext_arg *arg =
+ (union drm_vmw_gb_surface_reference_ext_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
+
+ return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
+}
+
+/**
+ * vmw_gb_surface_define_internal - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @req: Request argument from user-space.
+ * @rep: Response argument to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+static int
+vmw_gb_surface_define_internal(struct drm_device *dev,
+ struct drm_vmw_gb_surface_create_ext_req *req,
+ struct drm_vmw_gb_surface_create_rep *rep,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+ uint32_t size;
+ uint32_t backup_handle = 0;
+ SVGA3dSurfaceAllFlags svga3d_flags_64 =
+ SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
+ req->base.svga3d_flags);
+
+ if (!dev_priv->has_sm4_1) {
+ /*
+ * If SM4_1 is not support then cannot send 64-bit flag to
+ * device.
+ */
+ if (req->svga3d_flags_upper_32_bits != 0)
+ return -EINVAL;
+
+ if (req->base.multisample_count != 0)
+ return -EINVAL;
+
+ if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
+ return -EINVAL;
+
+ if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
+ return -EINVAL;
+ }
+
+ if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
+ req->base.multisample_count == 0)
+ return -EINVAL;
+
+ if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+
+ size = vmw_user_surface_size + 128;
+
+ /* Define a surface based on the parameters. */
+ ret = vmw_surface_gb_priv_define(dev,
+ size,
+ svga3d_flags_64,
+ req->base.format,
+ req->base.drm_surface_flags &
+ drm_vmw_surface_flag_scanout,
+ req->base.mip_levels,
+ req->base.multisample_count,
+ req->base.array_size,
+ req->base.base_size,
+ req->multisample_pattern,
+ req->quality_level,
+ &srf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_srf = container_of(srf, struct vmw_user_surface, srf);
+ if (drm_is_primary_client(file_priv))
+ user_srf->master = drm_master_get(file_priv->master);
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ res = &user_srf->srf.res;
+
+ if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
+ ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
+ &res->backup,
+ &user_srf->backup_base);
+ if (ret == 0) {
+ if (res->backup->base.num_pages * PAGE_SIZE <
+ res->backup_size) {
+ DRM_ERROR("Surface backup buffer too small.\n");
+ vmw_bo_unreference(&res->backup);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else {
+ backup_handle = req->base.buffer_handle;
+ }
+ }
+ } else if (req->base.drm_surface_flags &
+ drm_vmw_surface_flag_create_buffer)
+ ret = vmw_user_bo_alloc(dev_priv, tfile,
+ res->backup_size,
+ req->base.drm_surface_flags &
+ drm_vmw_surface_flag_shareable,
+ &backup_handle,
+ &res->backup,
+ &user_srf->backup_base);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ tmp = vmw_resource_reference(res);
+ ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ req->base.drm_surface_flags &
+ drm_vmw_surface_flag_shareable,
+ VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ rep->handle = user_srf->prime.base.hash.key;
+ rep->backup_size = res->backup_size;
+ if (res->backup) {
+ rep->buffer_map_handle =
+ drm_vma_node_offset_addr(&res->backup->base.vma_node);
+ rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+ rep->buffer_handle = backup_handle;
+ } else {
+ rep->buffer_map_handle = 0;
+ rep->buffer_size = 0;
+ rep->buffer_handle = SVGA3D_INVALID_ID;
+ }
+
+ vmw_resource_unreference(&res);
+
+out_unlock:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+/**
+ * vmw_gb_surface_reference_internal - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @req: Pointer to user-space request surface arg.
+ * @rep: Pointer to response to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+static int
+vmw_gb_surface_reference_internal(struct drm_device *dev,
+ struct drm_vmw_surface_arg *req,
+ struct drm_vmw_gb_surface_ref_ext_rep *rep,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct ttm_base_object *base;
+ uint32_t backup_handle;
+ int ret = -EINVAL;
+
+ ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
+ req->handle_type, &base);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_srf = container_of(base, struct vmw_user_surface, prime.base);
+ srf = &user_srf->srf;
+ if (!srf->res.backup) {
+ DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
+ goto out_bad_resource;
+ }
+
+ mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
+ ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a GB surface "
+ "backup buffer.\n");
+ (void) ttm_ref_object_base_unref(tfile, base->hash.key,
+ TTM_REF_USAGE);
+ goto out_bad_resource;
+ }
+
+ rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(srf->flags);
+ rep->creq.base.format = srf->format;
+ rep->creq.base.mip_levels = srf->mip_levels[0];
+ rep->creq.base.drm_surface_flags = 0;
+ rep->creq.base.multisample_count = srf->multisample_count;
+ rep->creq.base.autogen_filter = srf->autogen_filter;
+ rep->creq.base.array_size = srf->array_size;
+ rep->creq.base.buffer_handle = backup_handle;
+ rep->creq.base.base_size = srf->base_size;
+ rep->crep.handle = user_srf->prime.base.hash.key;
+ rep->crep.backup_size = srf->res.backup_size;
+ rep->crep.buffer_handle = backup_handle;
+ rep->crep.buffer_map_handle =
+ drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+ rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+
+ rep->creq.version = drm_vmw_gb_surface_v1;
+ rep->creq.svga3d_flags_upper_32_bits =
+ SVGA3D_FLAGS_UPPER_32(srf->flags);
+ rep->creq.multisample_pattern = srf->multisample_pattern;
+ rep->creq.quality_level = srf->quality_level;
+ rep->creq.must_be_zero = 0;
+
+out_bad_resource:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 21111fd091f9..31786b200afc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -798,7 +798,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
struct ttm_object_file *tfile =
vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
- return vmw_user_dmabuf_verify_access(bo, tfile);
+ return vmw_user_bo_verify_access(bo, tfile);
}
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -852,7 +852,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_mem_reg *mem)
{
- vmw_resource_move_notify(bo, mem);
+ vmw_bo_move_notify(bo, mem);
vmw_query_move_notify(bo, mem);
}
@@ -864,7 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
- vmw_resource_swap_notify(bo);
+ vmw_bo_swap_notify(bo);
(void) ttm_bo_wait(bo, false, false);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index e771091d2cd3..7b1e5a5cbd2c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
index b4162fd78600..ebc1d83c34b4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2012-2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2012-2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index b3786c1a4e80..6b6d5ab82ec3 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -623,7 +623,7 @@ static int displback_initwait(struct xen_drm_front_info *front_info)
if (ret < 0)
return ret;
- DRM_INFO("Have %d conector(s)\n", cfg->num_connectors);
+ DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
/* Create event channels for all connectors and publish */
ret = xen_drm_front_evtchnl_create_all(front_info);
if (ret < 0)
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
index 2c2479b571ae..5693b4a4b02b 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.h
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -126,12 +126,12 @@ struct xen_drm_front_drm_info {
static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
{
- return (u64)fb;
+ return (uintptr_t)fb;
}
static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
{
- return (u64)gem_obj;
+ return (uintptr_t)gem_obj;
}
int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
index 8099cb343ae3..d333b67cc1a0 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
@@ -122,7 +122,7 @@ static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
}
#define xen_page_to_vaddr(page) \
- ((phys_addr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
+ ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
static int backend_unmap(struct xen_drm_front_shbuf *buf)
{
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index 13ea90f7a185..78655269d843 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -272,7 +272,7 @@ static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -326,7 +326,7 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
drm_connector_helper_add(&hdmi->connector,
&zx_hdmi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+ drm_connector_attach_encoder(&hdmi->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index d1931f5ea0b2..ae8c53b4b261 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -446,7 +446,7 @@ static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
static void zx_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
index 0de1a71ca4e0..b73afb212fb2 100644
--- a/drivers/gpu/drm/zte/zx_tvenc.c
+++ b/drivers/gpu/drm/zte/zx_tvenc.c
@@ -297,7 +297,7 @@ static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
DRM_MODE_CONNECTOR_Composite);
drm_connector_helper_add(connector, &zx_tvenc_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
index 3e7e33cd3dfa..23d1ff4355a0 100644
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ b/drivers/gpu/drm/zte/zx_vga.c
@@ -109,7 +109,7 @@ static int zx_vga_connector_get_modes(struct drm_connector *connector)
*/
zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_HAS_DEVICE);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -175,7 +175,7 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
drm_connector_helper_add(connector, &zx_vga_connector_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
DRM_DEV_ERROR(dev, "failed to attach encoder: %d\n", ret);
goto clean_connector;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index f1d5f76e9c33..d88073e7d22d 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -218,6 +218,9 @@ static int host1x_probe(struct platform_device *pdev)
return err;
}
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+ goto skip_iommu;
+
host->group = iommu_group_get(&pdev->dev);
if (host->group) {
struct iommu_domain_geometry *geometry;
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index e2f4a4d93d20..527a1cddb14f 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -569,7 +569,8 @@ void host1x_job_unpin(struct host1x_job *job)
for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i];
- if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
+ if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
+ unpin->size && host->domain) {
iommu_unmap(host->domain, job->addr_phys[i],
unpin->size);
free_iova(&host->iova,
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 48685cddbad1..474b00e19697 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -122,6 +122,8 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
return IPUV3_COLORSPACE_YUV;
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_RGB24:
@@ -190,6 +192,8 @@ int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
return (24 * pixel_stride) >> 3;
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_XRGB32:
return (32 * pixel_stride) >> 3;
default:
break;
@@ -1401,6 +1405,8 @@ static int ipu_probe(struct platform_device *pdev)
return -ENODEV;
ipu->id = of_alias_get_id(np, "ipu");
+ if (ipu->id < 0)
+ ipu->id = 0;
if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
IS_ENABLED(CONFIG_DRM)) {
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 9f2d9ec42add..a9d2501500a1 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -188,6 +188,12 @@ static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
case V4L2_PIX_FMT_RGB32:
/* R G B A <=> [32:0] A:B:G:R */
return DRM_FORMAT_XBGR8888;
+ case V4L2_PIX_FMT_XBGR32:
+ /* B G R X <=> [32:0] X:R:G:B */
+ return DRM_FORMAT_XRGB8888;
+ case V4L2_PIX_FMT_XRGB32:
+ /* X R G B <=> [32:0] B:G:R:X */
+ return DRM_FORMAT_BGRX8888;
case V4L2_PIX_FMT_UYVY:
return DRM_FORMAT_UYVY;
case V4L2_PIX_FMT_YUYV:
@@ -269,9 +275,20 @@ EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset);
void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
{
+ u32 ilo, sly;
+
+ if (stride < 0) {
+ stride = -stride;
+ ilo = 0x100000 - (stride / 8);
+ } else {
+ ilo = stride / 8;
+ }
+
+ sly = (stride * 2) - 1;
+
ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
- ipu_ch_param_write_field(ch, IPU_FIELD_ILO, stride / 8);
- ipu_ch_param_write_field(ch, IPU_FIELD_SLY, (stride * 2) - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_ILO, ilo);
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLY, sly);
};
EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan);
@@ -530,17 +547,17 @@ static const struct ipu_rgb def_bgra_16 = {
#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * (y) / 4) + (x) / 2)
+ (pix->width * ((y) / 2) / 2) + (x) / 2)
#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * pix->height / 4) + \
- (pix->width * (y) / 4) + (x) / 2)
+ (pix->width * ((y) / 2) / 2) + (x) / 2)
#define U2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * (y) / 2) + (x) / 2)
#define V2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * pix->height / 2) + \
(pix->width * (y) / 2) + (x) / 2)
#define UV_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * (y) / 2) + (x))
+ (pix->width * ((y) / 2)) + (x))
#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * y) + (x))
@@ -776,6 +793,8 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
break;
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_XBGR32:
offset = image->rect.left * 4 +
image->rect.top * pix->bytesperline;
break;
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index caa05b0702e1..954eefe144e2 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -224,14 +224,18 @@ static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
* Find the CSI data format and data width for the given V4L2 media
* bus pixel format code.
*/
-static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
+static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code,
+ enum v4l2_mbus_type mbus_type)
{
switch (mbus_code) {
case MEDIA_BUS_FMT_BGR565_2X8_BE:
case MEDIA_BUS_FMT_BGR565_2X8_LE:
case MEDIA_BUS_FMT_RGB565_2X8_BE:
case MEDIA_BUS_FMT_RGB565_2X8_LE:
- cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
+ if (mbus_type == V4L2_MBUS_CSI2)
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
+ else
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RGB565;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
@@ -247,6 +251,12 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
cfg->mipi_dt = MIPI_DT_RGB555;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB_YUV444;
+ cfg->mipi_dt = MIPI_DT_RGB888;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
case MEDIA_BUS_FMT_UYVY8_2X8:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
cfg->mipi_dt = MIPI_DT_YUV422;
@@ -318,13 +328,17 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
/*
* Fill a CSI bus config struct from mbus_config and mbus_framefmt.
*/
-static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
+static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
struct v4l2_mbus_config *mbus_cfg,
struct v4l2_mbus_framefmt *mbus_fmt)
{
+ int ret;
+
memset(csicfg, 0, sizeof(*csicfg));
- mbus_code_to_bus_cfg(csicfg, mbus_fmt->code);
+ ret = mbus_code_to_bus_cfg(csicfg, mbus_fmt->code, mbus_cfg->type);
+ if (ret < 0)
+ return ret;
switch (mbus_cfg->type) {
case V4L2_MBUS_PARALLEL:
@@ -339,7 +353,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
break;
case V4L2_MBUS_BT656:
csicfg->ext_vsync = 0;
- if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field))
+ if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
+ mbus_fmt->field == V4L2_FIELD_ALTERNATE)
csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
else
csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
@@ -355,6 +370,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
/* will never get here, keep compiler quiet */
break;
}
+
+ return 0;
}
int ipu_csi_init_interface(struct ipu_csi *csi,
@@ -364,8 +381,11 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
struct ipu_csi_bus_config cfg;
unsigned long flags;
u32 width, height, data = 0;
+ int ret;
- fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+ ret = fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+ if (ret < 0)
+ return ret;
/* set default sensor frame width and height */
width = mbus_fmt->width;
@@ -586,11 +606,14 @@ int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
struct ipu_csi_bus_config cfg;
unsigned long flags;
u32 temp;
+ int ret;
if (vc > 3)
return -EINVAL;
- mbus_code_to_bus_cfg(&cfg, mbus_fmt->code);
+ ret = mbus_code_to_bus_cfg(&cfg, mbus_fmt->code, V4L2_MBUS_CSI2);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&csi->lock, flags);
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 524a717ab28e..f4081962784c 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -227,6 +227,12 @@ static const struct ipu_image_pixfmt image_convert_formats[] = {
.fourcc = V4L2_PIX_FMT_BGR32,
.bpp = 32,
}, {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .bpp = 32,
+ }, {
.fourcc = V4L2_PIX_FMT_YUYV,
.bpp = 16,
.uv_width_dec = 2,
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index 0f70e8847540..2f8db9d62551 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -128,7 +128,8 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
list_for_each_entry(pre, &ipu_pre_list, list) {
if (pre_node == pre->dev->of_node) {
mutex_unlock(&ipu_pre_list_mutex);
- device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
+ device_link_add(dev, pre->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
of_node_put(pre_node);
return pre;
}
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 83f9dd934a5d..38a3a9764e49 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -100,7 +100,8 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
list_for_each_entry(prg, &ipu_prg_list, list) {
if (prg_node == prg->dev->of_node) {
mutex_unlock(&ipu_prg_list_mutex);
- device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
+ device_link_add(dev, prg->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
prg->id = ipu_id;
of_node_put(prg_node);
return prg;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index fc4adf3d34e8..a96bf46bc483 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -103,9 +103,11 @@
* runtime pm. If true, writing ON and OFF to the vga_switcheroo debugfs
* interface is a no-op so as not to interfere with runtime pm
* @list: client list
+ * @vga_dev: pci device, indicate which GPU is bound to current audio client
*
* Registered client. A client can be either a GPU or an audio device on a GPU.
- * For audio clients, the @fb_info and @active members are bogus.
+ * For audio clients, the @fb_info and @active members are bogus. For GPU
+ * clients, the @vga_dev is bogus.
*/
struct vga_switcheroo_client {
struct pci_dev *pdev;
@@ -116,6 +118,7 @@ struct vga_switcheroo_client {
bool active;
bool driver_power_control;
struct list_head list;
+ struct pci_dev *vga_dev;
};
/*
@@ -161,9 +164,8 @@ struct vgasr_priv {
};
#define ID_BIT_AUDIO 0x100
-#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
-#define client_is_vga(c) ((c)->id == VGA_SWITCHEROO_UNKNOWN_ID || \
- !client_is_audio(c))
+#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
+#define client_is_vga(c) (!client_is_audio(c))
#define client_id(c) ((c)->id & ~ID_BIT_AUDIO)
static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
@@ -192,14 +194,29 @@ static void vga_switcheroo_enable(void)
vgasr_priv.handler->init();
list_for_each_entry(client, &vgasr_priv.clients, list) {
- if (client->id != VGA_SWITCHEROO_UNKNOWN_ID)
+ if (!client_is_vga(client) ||
+ client_id(client) != VGA_SWITCHEROO_UNKNOWN_ID)
continue;
+
ret = vgasr_priv.handler->get_client_id(client->pdev);
if (ret < 0)
return;
client->id = ret;
}
+
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (!client_is_audio(client) ||
+ client_id(client) != VGA_SWITCHEROO_UNKNOWN_ID)
+ continue;
+
+ ret = vgasr_priv.handler->get_client_id(client->vga_dev);
+ if (ret < 0)
+ return;
+
+ client->id = ret | ID_BIT_AUDIO;
+ }
+
vga_switcheroo_debugfs_init(&vgasr_priv);
vgasr_priv.active = true;
}
@@ -272,7 +289,9 @@ EXPORT_SYMBOL(vga_switcheroo_handler_flags);
static int register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- enum vga_switcheroo_client_id id, bool active,
+ enum vga_switcheroo_client_id id,
+ struct pci_dev *vga_dev,
+ bool active,
bool driver_power_control)
{
struct vga_switcheroo_client *client;
@@ -287,6 +306,7 @@ static int register_client(struct pci_dev *pdev,
client->id = id;
client->active = active;
client->driver_power_control = driver_power_control;
+ client->vga_dev = vga_dev;
mutex_lock(&vgasr_mutex);
list_add_tail(&client->list, &vgasr_priv.clients);
@@ -319,7 +339,7 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
bool driver_power_control)
{
- return register_client(pdev, ops, VGA_SWITCHEROO_UNKNOWN_ID,
+ return register_client(pdev, ops, VGA_SWITCHEROO_UNKNOWN_ID, NULL,
pdev == vga_default_device(),
driver_power_control);
}
@@ -329,19 +349,40 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
* vga_switcheroo_register_audio_client - register audio client
* @pdev: client pci device
* @ops: client callbacks
- * @id: client identifier
+ * @vga_dev: pci device which is bound to current audio client
*
* Register audio client (audio device on a GPU). The client is assumed
* to use runtime PM. Beforehand, vga_switcheroo_client_probe_defer()
* shall be called to ensure that all prerequisites are met.
*
- * Return: 0 on success, -ENOMEM on memory allocation error.
+ * Return: 0 on success, -ENOMEM on memory allocation error, -EINVAL on getting
+ * client id error.
*/
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- enum vga_switcheroo_client_id id)
+ struct pci_dev *vga_dev)
{
- return register_client(pdev, ops, id | ID_BIT_AUDIO, false, true);
+ enum vga_switcheroo_client_id id = VGA_SWITCHEROO_UNKNOWN_ID;
+
+ /*
+ * if vga_switcheroo has enabled, that mean two GPU clients and also
+ * handler are registered. Get audio client id from bound GPU client
+ * id directly, otherwise, set it as VGA_SWITCHEROO_UNKNOWN_ID,
+ * it will set to correct id in later when vga_switcheroo_enable()
+ * is called.
+ */
+ mutex_lock(&vgasr_mutex);
+ if (vgasr_priv.active) {
+ id = vgasr_priv.handler->get_client_id(vga_dev);
+ if (id < 0) {
+ mutex_unlock(&vgasr_mutex);
+ return -EINVAL;
+ }
+ }
+ mutex_unlock(&vgasr_mutex);
+
+ return register_client(pdev, ops, id | ID_BIT_AUDIO, vga_dev,
+ false, true);
}
EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a49a10437c40..61e1953ff921 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -207,6 +207,16 @@ config HID_CORSAIR
- Vengeance K90
- Scimitar PRO RGB
+config HID_COUGAR
+ tristate "Cougar devices"
+ depends on HID
+ help
+ Support for Cougar devices that are not fully compliant with the
+ HID standard.
+
+ Supported devices:
+ - Cougar 500k Gaming Keyboard
+
config HID_PRODIKEYS
tristate "Prodikeys PC-MIDI Keyboard support"
depends on HID && SND
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 511e1cbff768..bd7ac53b75c5 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CMEDIA) += hid-cmedia.o
obj-$(CONFIG_HID_CORSAIR) += hid-corsair.o
+obj-$(CONFIG_HID_COUGAR) += hid-cougar.o
obj-$(CONFIG_HID_CP2112) += hid-cp2112.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
obj-$(CONFIG_HID_DRAGONRISE) += hid-dr.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f858cc72011d..3da354af7a0a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -128,9 +128,19 @@ static int open_collection(struct hid_parser *parser, unsigned type)
usage = parser->local.usage[0];
- if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) {
- hid_err(parser->device, "collection stack overflow\n");
- return -EINVAL;
+ if (parser->collection_stack_ptr == parser->collection_stack_size) {
+ unsigned int *collection_stack;
+ unsigned int new_size = parser->collection_stack_size +
+ HID_COLLECTION_STACK_SIZE;
+
+ collection_stack = krealloc(parser->collection_stack,
+ new_size * sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!collection_stack)
+ return -ENOMEM;
+
+ parser->collection_stack = collection_stack;
+ parser->collection_stack_size = new_size;
}
if (parser->device->maxcollection == parser->device->collection_size) {
@@ -840,6 +850,7 @@ static int hid_scan_report(struct hid_device *hid)
break;
}
+ kfree(parser->collection_stack);
vfree(parser);
return 0;
}
@@ -1939,6 +1950,29 @@ static int hid_bus_match(struct device *dev, struct device_driver *drv)
return hid_match_device(hdev, hdrv) != NULL;
}
+/**
+ * hid_compare_device_paths - check if both devices share the same path
+ * @hdev_a: hid device
+ * @hdev_b: hid device
+ * @separator: char to use as separator
+ *
+ * Check if two devices share the same path up to the last occurrence of
+ * the separator char. Both paths must exist (i.e., zero-length paths
+ * don't match).
+ */
+bool hid_compare_device_paths(struct hid_device *hdev_a,
+ struct hid_device *hdev_b, char separator)
+{
+ int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
+ int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
+
+ if (n1 != n2 || n1 <= 0 || n2 <= 0)
+ return false;
+
+ return !strncmp(hdev_a->phys, hdev_b->phys, n1);
+}
+EXPORT_SYMBOL_GPL(hid_compare_device_paths);
+
static int hid_device_probe(struct device *dev)
{
struct hid_driver *hdrv = to_hid_driver(dev->driver);
@@ -1952,6 +1986,8 @@ static int hid_device_probe(struct device *dev)
}
hdev->io_started = false;
+ clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
+
if (!hdev->driver) {
id = hid_match_device(hdev, hdrv);
if (id == NULL) {
@@ -2215,7 +2251,8 @@ static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
struct hid_device *hdev = to_hid_device(dev);
if (hdev->driver == hdrv &&
- !hdrv->match(hdev, hid_ignore_special_drivers))
+ !hdrv->match(hdev, hid_ignore_special_drivers) &&
+ !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
return device_reprobe(dev);
return 0;
diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c
new file mode 100644
index 000000000000..ad2e87de7dc5
--- /dev/null
+++ b/drivers/hid/hid-cougar.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HID driver for Cougar 500k Gaming Keyboard
+ *
+ * Copyright (c) 2018 Daniel M. Lambea <dmlambea@gmail.com>
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Daniel M. Lambea <dmlambea@gmail.com>");
+MODULE_DESCRIPTION("Cougar 500k Gaming Keyboard");
+MODULE_LICENSE("GPL");
+MODULE_INFO(key_mappings, "G1-G6 are mapped to F13-F18");
+
+static int cougar_g6_is_space = 1;
+module_param_named(g6_is_space, cougar_g6_is_space, int, 0600);
+MODULE_PARM_DESC(g6_is_space,
+ "If set, G6 programmable key sends SPACE instead of F18 (0=off, 1=on) (default=1)");
+
+
+#define COUGAR_VENDOR_USAGE 0xff00ff00
+
+#define COUGAR_FIELD_CODE 1
+#define COUGAR_FIELD_ACTION 2
+
+#define COUGAR_KEY_G1 0x83
+#define COUGAR_KEY_G2 0x84
+#define COUGAR_KEY_G3 0x85
+#define COUGAR_KEY_G4 0x86
+#define COUGAR_KEY_G5 0x87
+#define COUGAR_KEY_G6 0x78
+#define COUGAR_KEY_FN 0x0d
+#define COUGAR_KEY_MR 0x6f
+#define COUGAR_KEY_M1 0x80
+#define COUGAR_KEY_M2 0x81
+#define COUGAR_KEY_M3 0x82
+#define COUGAR_KEY_LEDS 0x67
+#define COUGAR_KEY_LOCK 0x6e
+
+
+/* Default key mappings. The special key COUGAR_KEY_G6 is defined first
+ * because it is more frequent to use the spacebar rather than any other
+ * special keys. Depending on the value of the parameter 'g6_is_space',
+ * the mapping will be updated in the probe function.
+ */
+static unsigned char cougar_mapping[][2] = {
+ { COUGAR_KEY_G6, KEY_SPACE },
+ { COUGAR_KEY_G1, KEY_F13 },
+ { COUGAR_KEY_G2, KEY_F14 },
+ { COUGAR_KEY_G3, KEY_F15 },
+ { COUGAR_KEY_G4, KEY_F16 },
+ { COUGAR_KEY_G5, KEY_F17 },
+ { COUGAR_KEY_LOCK, KEY_SCREENLOCK },
+/* The following keys are handled by the hardware itself, so no special
+ * treatment is required:
+ { COUGAR_KEY_FN, KEY_RESERVED },
+ { COUGAR_KEY_MR, KEY_RESERVED },
+ { COUGAR_KEY_M1, KEY_RESERVED },
+ { COUGAR_KEY_M2, KEY_RESERVED },
+ { COUGAR_KEY_M3, KEY_RESERVED },
+ { COUGAR_KEY_LEDS, KEY_RESERVED },
+*/
+ { 0, 0 },
+};
+
+struct cougar_shared {
+ struct list_head list;
+ struct kref kref;
+ bool enabled;
+ struct hid_device *dev;
+ struct input_dev *input;
+};
+
+struct cougar {
+ bool special_intf;
+ struct cougar_shared *shared;
+};
+
+static LIST_HEAD(cougar_udev_list);
+static DEFINE_MUTEX(cougar_udev_list_lock);
+
+static void cougar_fix_g6_mapping(struct hid_device *hdev)
+{
+ int i;
+
+ for (i = 0; cougar_mapping[i][0]; i++) {
+ if (cougar_mapping[i][0] == COUGAR_KEY_G6) {
+ cougar_mapping[i][1] =
+ cougar_g6_is_space ? KEY_SPACE : KEY_F18;
+ hid_info(hdev, "G6 mapped to %s\n",
+ cougar_g6_is_space ? "space" : "F18");
+ return;
+ }
+ }
+ hid_warn(hdev, "no mapping defined for G6/spacebar");
+}
+
+/*
+ * Constant-friendly rdesc fixup for mouse interface
+ */
+static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
+ (rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) {
+ hid_info(hdev,
+ "usage count exceeds max: fixing up report descriptor\n");
+ rdesc[115] = ((HID_MAX_USAGES-1) & 0xff);
+ rdesc[116] = ((HID_MAX_USAGES-1) >> 8);
+ }
+ return rdesc;
+}
+
+static struct cougar_shared *cougar_get_shared_data(struct hid_device *hdev)
+{
+ struct cougar_shared *shared;
+
+ /* Try to find an already-probed interface from the same device */
+ list_for_each_entry(shared, &cougar_udev_list, list) {
+ if (hid_compare_device_paths(hdev, shared->dev, '/')) {
+ kref_get(&shared->kref);
+ return shared;
+ }
+ }
+ return NULL;
+}
+
+static void cougar_release_shared_data(struct kref *kref)
+{
+ struct cougar_shared *shared = container_of(kref,
+ struct cougar_shared, kref);
+
+ mutex_lock(&cougar_udev_list_lock);
+ list_del(&shared->list);
+ mutex_unlock(&cougar_udev_list_lock);
+
+ kfree(shared);
+}
+
+static void cougar_remove_shared_data(void *resource)
+{
+ struct cougar *cougar = resource;
+
+ if (cougar->shared) {
+ kref_put(&cougar->shared->kref, cougar_release_shared_data);
+ cougar->shared = NULL;
+ }
+}
+
+/*
+ * Bind the device group's shared data to this cougar struct.
+ * If no shared data exists for this group, create and initialize it.
+ */
+static int cougar_bind_shared_data(struct hid_device *hdev, struct cougar *cougar)
+{
+ struct cougar_shared *shared;
+ int error = 0;
+
+ mutex_lock(&cougar_udev_list_lock);
+
+ shared = cougar_get_shared_data(hdev);
+ if (!shared) {
+ shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+ if (!shared) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ kref_init(&shared->kref);
+ shared->dev = hdev;
+ list_add_tail(&shared->list, &cougar_udev_list);
+ }
+
+ cougar->shared = shared;
+
+ error = devm_add_action(&hdev->dev, cougar_remove_shared_data, cougar);
+ if (error) {
+ mutex_unlock(&cougar_udev_list_lock);
+ cougar_remove_shared_data(cougar);
+ return error;
+ }
+
+out:
+ mutex_unlock(&cougar_udev_list_lock);
+ return error;
+}
+
+static int cougar_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct cougar *cougar;
+ struct hid_input *next, *hidinput = NULL;
+ unsigned int connect_mask;
+ int error;
+
+ cougar = devm_kzalloc(&hdev->dev, sizeof(*cougar), GFP_KERNEL);
+ if (!cougar)
+ return -ENOMEM;
+ hid_set_drvdata(hdev, cougar);
+
+ error = hid_parse(hdev);
+ if (error) {
+ hid_err(hdev, "parse failed\n");
+ goto fail;
+ }
+
+ if (hdev->collection->usage == COUGAR_VENDOR_USAGE) {
+ cougar->special_intf = true;
+ connect_mask = HID_CONNECT_HIDRAW;
+ } else
+ connect_mask = HID_CONNECT_DEFAULT;
+
+ error = hid_hw_start(hdev, connect_mask);
+ if (error) {
+ hid_err(hdev, "hw start failed\n");
+ goto fail;
+ }
+
+ error = cougar_bind_shared_data(hdev, cougar);
+ if (error)
+ goto fail_stop_and_cleanup;
+
+ /* The custom vendor interface will use the hid_input registered
+ * for the keyboard interface, in order to send translated key codes
+ * to it.
+ */
+ if (hdev->collection->usage == HID_GD_KEYBOARD) {
+ cougar_fix_g6_mapping(hdev);
+ list_for_each_entry_safe(hidinput, next, &hdev->inputs, list) {
+ if (hidinput->registered && hidinput->input != NULL) {
+ cougar->shared->input = hidinput->input;
+ cougar->shared->enabled = true;
+ break;
+ }
+ }
+ } else if (hdev->collection->usage == COUGAR_VENDOR_USAGE) {
+ error = hid_hw_open(hdev);
+ if (error)
+ goto fail_stop_and_cleanup;
+ }
+ return 0;
+
+fail_stop_and_cleanup:
+ hid_hw_stop(hdev);
+fail:
+ hid_set_drvdata(hdev, NULL);
+ return error;
+}
+
+/*
+ * Convert events from vendor intf to input key events
+ */
+static int cougar_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct cougar *cougar;
+ unsigned char code, action;
+ int i;
+
+ cougar = hid_get_drvdata(hdev);
+ if (!cougar->special_intf || !cougar->shared ||
+ !cougar->shared->input || !cougar->shared->enabled)
+ return 0;
+
+ code = data[COUGAR_FIELD_CODE];
+ action = data[COUGAR_FIELD_ACTION];
+ for (i = 0; cougar_mapping[i][0]; i++) {
+ if (code == cougar_mapping[i][0]) {
+ input_event(cougar->shared->input, EV_KEY,
+ cougar_mapping[i][1], action);
+ input_sync(cougar->shared->input);
+ return 0;
+ }
+ }
+ hid_warn(hdev, "unmapped special key code %x: ignoring\n", code);
+ return 0;
+}
+
+static void cougar_remove(struct hid_device *hdev)
+{
+ struct cougar *cougar = hid_get_drvdata(hdev);
+
+ if (cougar) {
+ /* Stop the vendor intf to process more events */
+ if (cougar->shared)
+ cougar->shared->enabled = false;
+ if (cougar->special_intf)
+ hid_hw_close(hdev);
+ }
+ hid_hw_stop(hdev);
+}
+
+static struct hid_device_id cougar_id_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
+ USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD) },
+ {}
+};
+MODULE_DEVICE_TABLE(hid, cougar_id_table);
+
+static struct hid_driver cougar_driver = {
+ .name = "cougar",
+ .id_table = cougar_id_table,
+ .report_fixup = cougar_report_fixup,
+ .probe = cougar_probe,
+ .remove = cougar_remove,
+ .raw_event = cougar_raw_event,
+};
+
+module_hid_driver(cougar_driver);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 8469b6964ff6..b48100236df8 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1154,6 +1154,8 @@ copy_rest:
goto out;
if (list->tail > list->head) {
len = list->tail - list->head;
+ if (len > count)
+ len = count;
if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
ret = -EFAULT;
@@ -1163,6 +1165,8 @@ copy_rest:
list->head += len;
} else {
len = HID_DEBUG_BUFSIZE - list->head;
+ if (len > count)
+ len = count;
if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
ret = -EFAULT;
@@ -1170,7 +1174,9 @@ copy_rest:
}
list->head = 0;
ret += len;
- goto copy_rest;
+ count -= len;
+ if (count > 0)
+ goto copy_rest;
}
}
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index 803a725785cf..07e26c3567eb 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -19,38 +19,49 @@
#include "hid-ids.h"
+#define ELAN_MT_I2C 0x5d
#define ELAN_SINGLE_FINGER 0x81
#define ELAN_MT_FIRST_FINGER 0x82
#define ELAN_MT_SECOND_FINGER 0x83
#define ELAN_INPUT_REPORT_SIZE 8
+#define ELAN_I2C_REPORT_SIZE 32
+#define ELAN_FINGER_DATA_LEN 5
+#define ELAN_MAX_FINGERS 5
+#define ELAN_MAX_PRESSURE 255
+#define ELAN_TP_USB_INTF 1
+
+#define ELAN_FEATURE_REPORT 0x0d
+#define ELAN_FEATURE_SIZE 5
+#define ELAN_PARAM_MAX_X 6
+#define ELAN_PARAM_MAX_Y 7
+#define ELAN_PARAM_RES 8
#define ELAN_MUTE_LED_REPORT 0xBC
#define ELAN_LED_REPORT_SIZE 8
-struct elan_touchpad_settings {
- u8 max_fingers;
- u16 max_x;
- u16 max_y;
- u8 max_area_x;
- u8 max_area_y;
- u8 max_w;
- int usb_bInterfaceNumber;
-};
+#define ELAN_HAS_LED BIT(0)
struct elan_drvdata {
struct input_dev *input;
u8 prev_report[ELAN_INPUT_REPORT_SIZE];
struct led_classdev mute_led;
u8 mute_led_state;
- struct elan_touchpad_settings *settings;
+ u16 max_x;
+ u16 max_y;
+ u16 res_x;
+ u16 res_y;
};
static int is_not_elan_touchpad(struct hid_device *hdev)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct elan_drvdata *drvdata = hid_get_drvdata(hdev);
+ if (hdev->bus == BUS_USB) {
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ return (intf->altsetting->desc.bInterfaceNumber !=
+ ELAN_TP_USB_INTF);
+ }
- return (intf->altsetting->desc.bInterfaceNumber != drvdata->settings->usb_bInterfaceNumber);
+ return 0;
}
static int elan_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -62,12 +73,86 @@ static int elan_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (field->report->id == ELAN_SINGLE_FINGER ||
field->report->id == ELAN_MT_FIRST_FINGER ||
- field->report->id == ELAN_MT_SECOND_FINGER)
+ field->report->id == ELAN_MT_SECOND_FINGER ||
+ field->report->id == ELAN_MT_I2C)
return -1;
return 0;
}
+static int elan_get_device_param(struct hid_device *hdev,
+ unsigned char *dmabuf, unsigned char param)
+{
+ int ret;
+
+ dmabuf[0] = ELAN_FEATURE_REPORT;
+ dmabuf[1] = 0x05;
+ dmabuf[2] = 0x03;
+ dmabuf[3] = param;
+ dmabuf[4] = 0x01;
+
+ ret = hid_hw_raw_request(hdev, ELAN_FEATURE_REPORT, dmabuf,
+ ELAN_FEATURE_SIZE, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
+ if (ret != ELAN_FEATURE_SIZE) {
+ hid_err(hdev, "Set report error for parm %d: %d\n", param, ret);
+ return ret;
+ }
+
+ ret = hid_hw_raw_request(hdev, ELAN_FEATURE_REPORT, dmabuf,
+ ELAN_FEATURE_SIZE, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != ELAN_FEATURE_SIZE) {
+ hid_err(hdev, "Get report error for parm %d: %d\n", param, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static unsigned int elan_convert_res(char val)
+{
+ /*
+ * (value from firmware) * 10 + 790 = dpi
+ * dpi * 10 / 254 = dots/mm
+ */
+ return (val * 10 + 790) * 10 / 254;
+}
+
+static int elan_get_device_params(struct hid_device *hdev)
+{
+ struct elan_drvdata *drvdata = hid_get_drvdata(hdev);
+ unsigned char *dmabuf;
+ int ret;
+
+ dmabuf = kmalloc(ELAN_FEATURE_SIZE, GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ ret = elan_get_device_param(hdev, dmabuf, ELAN_PARAM_MAX_X);
+ if (ret)
+ goto err;
+
+ drvdata->max_x = (dmabuf[4] << 8) | dmabuf[3];
+
+ ret = elan_get_device_param(hdev, dmabuf, ELAN_PARAM_MAX_Y);
+ if (ret)
+ goto err;
+
+ drvdata->max_y = (dmabuf[4] << 8) | dmabuf[3];
+
+ ret = elan_get_device_param(hdev, dmabuf, ELAN_PARAM_RES);
+ if (ret)
+ goto err;
+
+ drvdata->res_x = elan_convert_res(dmabuf[3]);
+ drvdata->res_y = elan_convert_res(dmabuf[4]);
+
+err:
+ kfree(dmabuf);
+ return ret;
+}
+
static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
int ret;
@@ -77,6 +162,10 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (is_not_elan_touchpad(hdev))
return 0;
+ ret = elan_get_device_params(hdev);
+ if (ret)
+ return ret;
+
input = devm_input_allocate_device(&hdev->dev);
if (!input)
return -ENOMEM;
@@ -90,25 +179,25 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
input->id.version = hdev->version;
input->dev.parent = &hdev->dev;
- input_set_abs_params(input, ABS_MT_POSITION_X, 0,
- drvdata->settings->max_x, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0,
- drvdata->settings->max_y, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0,
- drvdata->settings->max_fingers, 0, 0);
- input_set_abs_params(input, ABS_TOOL_WIDTH, 0,
- drvdata->settings->max_w, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, drvdata->max_x,
+ 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, drvdata->max_y,
+ 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, ELAN_MAX_PRESSURE,
+ 0, 0);
__set_bit(BTN_LEFT, input->keybit);
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
- ret = input_mt_init_slots(input, drvdata->settings->max_fingers,
- INPUT_MT_POINTER);
+ ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER);
if (ret) {
hid_err(hdev, "Failed to init elan MT slots: %d\n", ret);
return ret;
}
+ input_abs_set_res(input, ABS_X, drvdata->res_x);
+ input_abs_set_res(input, ABS_Y, drvdata->res_y);
+
ret = input_register_device(input);
if (ret) {
hid_err(hdev, "Failed to register elan input device: %d\n",
@@ -126,7 +215,7 @@ static void elan_report_mt_slot(struct elan_drvdata *drvdata, u8 *data,
unsigned int slot_num)
{
struct input_dev *input = drvdata->input;
- int x, y, w;
+ int x, y, p;
bool active = !!data;
@@ -134,17 +223,17 @@ static void elan_report_mt_slot(struct elan_drvdata *drvdata, u8 *data,
input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
if (active) {
x = ((data[0] & 0xF0) << 4) | data[1];
- y = drvdata->settings->max_y -
+ y = drvdata->max_y -
(((data[0] & 0x07) << 8) | data[2]);
- w = data[4];
+ p = data[4];
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
- input_report_abs(input, ABS_TOOL_WIDTH, w);
+ input_report_abs(input, ABS_MT_PRESSURE, p);
}
}
-static void elan_report_input(struct elan_drvdata *drvdata, u8 *data)
+static void elan_usb_report_input(struct elan_drvdata *drvdata, u8 *data)
{
int i;
struct input_dev *input = drvdata->input;
@@ -162,7 +251,7 @@ static void elan_report_input(struct elan_drvdata *drvdata, u8 *data)
* byte 5: x8 x7 x6 x5 x4 x3 x2 x1
* byte 6: y8 y7 y6 y5 y4 y3 y2 y1
* byte 7: sy4 sy3 sy2 sy1 sx4 sx3 sx2 sx1
- * byte 8: w8 w7 w6 w5 w4 w3 w2 w1
+ * byte 8: p8 p7 p6 p5 p4 p3 p2 p1
*
* packet structure for ELAN_MT_SECOND_FINGER:
*
@@ -171,19 +260,21 @@ static void elan_report_input(struct elan_drvdata *drvdata, u8 *data)
* byte 3: x8 x7 x6 x5 x4 x3 x2 x1
* byte 4: y8 y7 y6 y5 y4 y3 y2 y1
* byte 5: sy4 sy3 sy2 sy1 sx4 sx3 sx2 sx1
- * byte 6: w8 w7 w6 w5 w4 w3 w2 w1
+ * byte 6: p8 p7 p6 p5 p4 p3 p2 p1
* byte 7: 0 0 0 0 0 0 0 0
* byte 8: 0 0 0 0 0 0 0 0
*
* f5-f1: finger touch bits
* L: clickpad button
- * sy / sx: not sure yet, but this looks like rectangular
- * area for finger
- * w: looks like finger width
+ * sy / sx: finger width / height expressed in traces, the total number
+ * of traces can be queried by doing a HID_REQ_SET_REPORT
+ * { 0x0d, 0x05, 0x03, 0x05, 0x01 } followed by a GET, in the
+ * returned buf, buf[3]=no-x-traces, buf[4]=no-y-traces.
+ * p: pressure
*/
if (data[0] == ELAN_SINGLE_FINGER) {
- for (i = 0; i < drvdata->settings->max_fingers; i++) {
+ for (i = 0; i < ELAN_MAX_FINGERS; i++) {
if (data[2] & BIT(i + 3))
elan_report_mt_slot(drvdata, data + 3, i);
else
@@ -210,7 +301,7 @@ static void elan_report_input(struct elan_drvdata *drvdata, u8 *data)
if (prev_report[0] != ELAN_MT_FIRST_FINGER)
return;
- for (i = 0; i < drvdata->settings->max_fingers; i++) {
+ for (i = 0; i < ELAN_MAX_FINGERS; i++) {
if (prev_report[2] & BIT(i + 3)) {
if (!first) {
first = 1;
@@ -229,6 +320,46 @@ static void elan_report_input(struct elan_drvdata *drvdata, u8 *data)
input_sync(input);
}
+static void elan_i2c_report_input(struct elan_drvdata *drvdata, u8 *data)
+{
+ struct input_dev *input = drvdata->input;
+ u8 *finger_data;
+ int i;
+
+ /*
+ * Elan MT touchpads in i2c mode send finger data in the same format
+ * as in USB mode, but then with all fingers in a single packet.
+ *
+ * packet structure for ELAN_MT_I2C:
+ *
+ * byte 1: 1 0 0 1 1 1 0 1 // 0x5d
+ * byte 2: f5 f4 f3 f2 f1 0 0 L
+ * byte 3: x12 x11 x10 x9 0? y11 y10 y9
+ * byte 4: x8 x7 x6 x5 x4 x3 x2 x1
+ * byte 5: y8 y7 y6 y5 y4 y3 y2 y1
+ * byte 6: sy4 sy3 sy2 sy1 sx4 sx3 sx2 sx1
+ * byte 7: p8 p7 p6 p5 p4 p3 p2 p1
+ * byte 8-12: Same as byte 3-7 for second finger down
+ * byte 13-17: Same as byte 3-7 for third finger down
+ * byte 18-22: Same as byte 3-7 for fourth finger down
+ * byte 23-27: Same as byte 3-7 for fifth finger down
+ */
+
+ finger_data = data + 2;
+ for (i = 0; i < ELAN_MAX_FINGERS; i++) {
+ if (data[1] & BIT(i + 3)) {
+ elan_report_mt_slot(drvdata, finger_data, i);
+ finger_data += ELAN_FINGER_DATA_LEN;
+ } else {
+ elan_report_mt_slot(drvdata, NULL, i);
+ }
+ }
+
+ input_report_key(input, BTN_LEFT, data[1] & 0x01);
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
static int elan_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
@@ -241,11 +372,16 @@ static int elan_raw_event(struct hid_device *hdev,
data[0] == ELAN_MT_FIRST_FINGER ||
data[0] == ELAN_MT_SECOND_FINGER) {
if (size == ELAN_INPUT_REPORT_SIZE) {
- elan_report_input(drvdata, data);
+ elan_usb_report_input(drvdata, data);
return 1;
}
}
+ if (data[0] == ELAN_MT_I2C && size == ELAN_I2C_REPORT_SIZE) {
+ elan_i2c_report_input(drvdata, data);
+ return 1;
+ }
+
return 0;
}
@@ -343,7 +479,6 @@ static int elan_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!drvdata)
return -ENOMEM;
- drvdata->settings = (struct elan_touchpad_settings *)id->driver_data;
hid_set_drvdata(hdev, drvdata);
ret = hid_parse(hdev);
@@ -371,9 +506,11 @@ static int elan_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret)
goto err;
- ret = elan_init_mute_led(hdev);
- if (ret)
- goto err;
+ if (id->driver_data & ELAN_HAS_LED) {
+ ret = elan_init_mute_led(hdev);
+ if (ret)
+ goto err;
+ }
return 0;
err:
@@ -386,22 +523,14 @@ static void elan_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
-static const struct elan_touchpad_settings hp_x2_10_touchpad_data = {
- .max_fingers = 5,
- .max_x = 2930,
- .max_y = 1250,
- .max_area_x = 15,
- .max_area_y = 15,
- .max_w = 255,
- .usb_bInterfaceNumber = 1,
-};
-
static const struct hid_device_id elan_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_HP_X2),
+ .driver_data = ELAN_HAS_LED },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_HP_X2_10_COVER),
- (kernel_ulong_t)&hp_x2_10_touchpad_data},
+ .driver_data = ELAN_HAS_LED },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_TOSHIBA_CLICK_L9W) },
{ }
};
-
MODULE_DEVICE_TABLE(hid, elan_devices);
static struct hid_driver elan_driver = {
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 3aa2bb9f0f81..b372854cf38d 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -598,6 +598,9 @@ static struct hv_driver mousevsc_drv = {
.id_table = id_table,
.probe = mousevsc_probe,
.remove = mousevsc_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int __init mousevsc_init(void)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c7981ddd8776..79bdf0c7e351 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -369,6 +369,8 @@
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
#define USB_VENDOR_ID_ELAN 0x04f3
+#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
+#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
#define USB_VENDOR_ID_ELECOM 0x056e
@@ -1001,6 +1003,9 @@
#define USB_VENDOR_ID_SINO_LITE 0x1345
#define USB_DEVICE_ID_SINO_LITE_CONTROLLER 0x3008
+#define USB_VENDOR_ID_SOLID_YEAR 0x060b
+#define USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD 0x500a
+
#define USB_VENDOR_ID_SOUNDGRAPH 0x15c2
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index ab93dd5927c3..4e94ea3e280a 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1550,6 +1550,9 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
case HID_GD_WIRELESS_RADIO_CTLS:
suffix = "Wireless Radio Control";
break;
+ case HID_GD_SYSTEM_MULTIAXIS:
+ suffix = "System Multi Axis";
+ break;
default:
break;
}
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 96e7d3231d2f..72d983626afd 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -22,12 +22,13 @@
#include "hid-ids.h"
-#define MS_HIDINPUT 0x01
-#define MS_ERGONOMY 0x02
-#define MS_PRESENTER 0x04
-#define MS_RDESC 0x08
-#define MS_NOGET 0x10
-#define MS_DUPLICATE_USAGES 0x20
+#define MS_HIDINPUT BIT(0)
+#define MS_ERGONOMY BIT(1)
+#define MS_PRESENTER BIT(2)
+#define MS_RDESC BIT(3)
+#define MS_NOGET BIT(4)
+#define MS_DUPLICATE_USAGES BIT(5)
+#define MS_SURFACE_DIAL BIT(6)
static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
@@ -130,6 +131,30 @@ static int ms_presenter_8k_quirk(struct hid_input *hi, struct hid_usage *usage,
return 1;
}
+static int ms_surface_dial_quirk(struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ switch (usage->hid & HID_USAGE_PAGE) {
+ case 0xff070000:
+ /* fall-through */
+ case HID_UP_DIGITIZER:
+ /* ignore those axis */
+ return -1;
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ /* fall-through */
+ case HID_GD_Y:
+ /* fall-through */
+ case HID_GD_RFKILL_BTN:
+ /* ignore those axis */
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static int ms_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
@@ -146,6 +171,13 @@ static int ms_input_mapping(struct hid_device *hdev, struct hid_input *hi,
ms_presenter_8k_quirk(hi, usage, bit, max))
return 1;
+ if (quirks & MS_SURFACE_DIAL) {
+ int ret = ms_surface_dial_quirk(hi, field, usage, bit, max);
+
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -229,6 +261,9 @@ static int ms_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (quirks & MS_NOGET)
hdev->quirks |= HID_QUIRK_NOGET;
+ if (quirks & MS_SURFACE_DIAL)
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
@@ -281,6 +316,8 @@ static const struct hid_device_id ms_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
.driver_data = MS_PRESENTER },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, 0x091B),
+ .driver_data = MS_SURFACE_DIAL },
{ }
};
MODULE_DEVICE_TABLE(hid, ms_devices);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 45968f7970f8..40fbb7c52723 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -28,14 +28,11 @@
*/
/*
- * This driver is regularly tested thanks to the tool hid-test[1].
- * This tool relies on hid-replay[2] and a database of hid devices[3].
+ * This driver is regularly tested thanks to the test suite in hid-tools[1].
* Please run these regression tests before patching this module so that
* your patch won't break existing known devices.
*
- * [1] https://github.com/bentiss/hid-test
- * [2] https://github.com/bentiss/hid-replay
- * [3] https://github.com/bentiss/hid-devices
+ * [1] https://gitlab.freedesktop.org/libevdev/hid-tools
*/
#include <linux/device.h>
@@ -90,13 +87,54 @@ enum latency_mode {
#define MT_IO_FLAGS_ACTIVE_SLOTS 1
#define MT_IO_FLAGS_PENDING_SLOTS 2
-struct mt_slot {
- __s32 x, y, cx, cy, p, w, h, a;
- __s32 contactid; /* the device ContactID assigned to this slot */
- bool touch_state; /* is the touch valid? */
- bool inrange_state; /* is the finger in proximity of the sensor? */
- bool confidence_state; /* is the touch made by a finger? */
- bool has_azimuth; /* the contact reports azimuth */
+static const bool mtrue = true; /* default for true */
+static const bool mfalse; /* default for false */
+static const __s32 mzero; /* default for 0 */
+
+#define DEFAULT_TRUE ((void *)&mtrue)
+#define DEFAULT_FALSE ((void *)&mfalse)
+#define DEFAULT_ZERO ((void *)&mzero)
+
+struct mt_usages {
+ struct list_head list;
+ __s32 *x, *y, *cx, *cy, *p, *w, *h, *a;
+ __s32 *contactid; /* the device ContactID assigned to this slot */
+ bool *tip_state; /* is the touch valid? */
+ bool *inrange_state; /* is the finger in proximity of the sensor? */
+ bool *confidence_state; /* is the touch made by a finger? */
+};
+
+struct mt_application {
+ struct list_head list;
+ unsigned int application;
+ struct list_head mt_usages; /* mt usages list */
+
+ __s32 quirks;
+
+ __s32 *scantime; /* scantime reported */
+ __s32 scantime_logical_max; /* max value for raw scantime */
+
+ __s32 *raw_cc; /* contact count in the report */
+ int left_button_state; /* left button state */
+ unsigned int mt_flags; /* flags to pass to input-mt */
+
+ unsigned long *pending_palm_slots; /* slots where we reported palm
+ * and need to release */
+
+ __u8 num_received; /* how many contacts we received */
+ __u8 num_expected; /* expected last contact index */
+ __u8 buttons_count; /* number of physical buttons per touchpad */
+ __u8 touches_by_report; /* how many touches are present in one report:
+ * 1 means we should use a serial protocol
+ * > 1 means hybrid (multitouch) protocol
+ */
+
+ __s32 dev_time; /* the scan time provided by the device */
+ unsigned long jiffies; /* the frame's jiffies */
+ int timestamp; /* the timestamp to be sent */
+ int prev_scantime; /* scantime reported previously */
+
+ bool have_contact_count;
};
struct mt_class {
@@ -111,46 +149,30 @@ struct mt_class {
bool export_all_inputs; /* do not ignore mouse, keyboards, etc... */
};
-struct mt_fields {
- unsigned usages[HID_MAX_FIELDS];
- unsigned int length;
+struct mt_report_data {
+ struct list_head list;
+ struct hid_report *report;
+ struct mt_application *application;
+ bool is_mt_collection;
};
struct mt_device {
- struct mt_slot curdata; /* placeholder of incoming data */
struct mt_class mtclass; /* our mt device class */
struct timer_list release_timer; /* to release sticky fingers */
struct hid_device *hdev; /* hid_device we're attached to */
- struct mt_fields *fields; /* temporary placeholder for storing the
- multitouch fields */
unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */
- int cc_index; /* contact count field index in the report */
- int cc_value_index; /* contact count value index in the field */
- int scantime_index; /* scantime field index in the report */
- int scantime_val_index; /* scantime value index in the field */
- int prev_scantime; /* scantime reported in the previous packet */
- int left_button_state; /* left button state */
- unsigned last_slot_field; /* the last field of a slot */
- unsigned mt_report_id; /* the report ID of the multitouch device */
__u8 inputmode_value; /* InputMode HID feature value */
- __u8 num_received; /* how many contacts we received */
- __u8 num_expected; /* expected last contact index */
__u8 maxcontacts;
- __u8 touches_by_report; /* how many touches are present in one report:
- * 1 means we should use a serial protocol
- * > 1 means hybrid (multitouch) protocol */
- __u8 buttons_count; /* number of physical buttons per touchpad */
bool is_buttonpad; /* is this device a button pad? */
bool serial_maybe; /* need to check for serial protocol */
- bool curvalid; /* is the current contact valid? */
- unsigned mt_flags; /* flags to pass to input-mt */
- __s32 dev_time; /* the scan time provided by the device */
- unsigned long jiffies; /* the frame's jiffies */
- int timestamp; /* the timestamp to be sent */
+
+ struct list_head applications;
+ struct list_head reports;
};
-static void mt_post_parse_default_settings(struct mt_device *td);
-static void mt_post_parse(struct mt_device *td);
+static void mt_post_parse_default_settings(struct mt_device *td,
+ struct mt_application *app);
+static void mt_post_parse(struct mt_device *td, struct mt_application *app);
/* classes of device behavior */
#define MT_CLS_DEFAULT 0x0001
@@ -203,15 +225,16 @@ static void mt_post_parse(struct mt_device *td);
* to a valid contact that was just read.
*/
-static int cypress_compute_slot(struct mt_device *td)
+static int cypress_compute_slot(struct mt_application *application,
+ struct mt_usages *slot)
{
- if (td->curdata.contactid != 0 || td->num_received == 0)
- return td->curdata.contactid;
+ if (*slot->contactid != 0 || application->num_received == 0)
+ return *slot->contactid;
else
return -1;
}
-static struct mt_class mt_classes[] = {
+static const struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_CONTACT_CNT_ACCURATE },
@@ -353,6 +376,7 @@ static ssize_t mt_set_quirks(struct device *dev,
{
struct hid_device *hdev = to_hid_device(dev);
struct mt_device *td = hid_get_drvdata(hdev);
+ struct mt_application *application;
unsigned long val;
@@ -361,8 +385,11 @@ static ssize_t mt_set_quirks(struct device *dev,
td->mtclass.quirks = val;
- if (td->cc_index < 0)
- td->mtclass.quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
+ list_for_each_entry(application, &td->applications, list) {
+ application->quirks = val;
+ if (!application->have_contact_count)
+ application->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
+ }
return count;
}
@@ -457,41 +484,199 @@ static void set_abs(struct input_dev *input, unsigned int code,
input_abs_set_res(input, code, hidinput_calc_abs_res(field, code));
}
-static void mt_store_field(struct hid_usage *usage, struct mt_device *td,
- struct hid_input *hi)
+static struct mt_usages *mt_allocate_usage(struct hid_device *hdev,
+ struct mt_application *application)
+{
+ struct mt_usages *usage;
+
+ usage = devm_kzalloc(&hdev->dev, sizeof(*usage), GFP_KERNEL);
+ if (!usage)
+ return NULL;
+
+ /* set some defaults so we do not need to check for null pointers */
+ usage->x = DEFAULT_ZERO;
+ usage->y = DEFAULT_ZERO;
+ usage->cx = DEFAULT_ZERO;
+ usage->cy = DEFAULT_ZERO;
+ usage->p = DEFAULT_ZERO;
+ usage->w = DEFAULT_ZERO;
+ usage->h = DEFAULT_ZERO;
+ usage->a = DEFAULT_ZERO;
+ usage->contactid = DEFAULT_ZERO;
+ usage->tip_state = DEFAULT_FALSE;
+ usage->inrange_state = DEFAULT_FALSE;
+ usage->confidence_state = DEFAULT_TRUE;
+
+ list_add_tail(&usage->list, &application->mt_usages);
+
+ return usage;
+}
+
+static struct mt_application *mt_allocate_application(struct mt_device *td,
+ unsigned int application)
+{
+ struct mt_application *mt_application;
+
+ mt_application = devm_kzalloc(&td->hdev->dev, sizeof(*mt_application),
+ GFP_KERNEL);
+ if (!mt_application)
+ return NULL;
+
+ mt_application->application = application;
+ INIT_LIST_HEAD(&mt_application->mt_usages);
+
+ if (application == HID_DG_TOUCHSCREEN)
+ mt_application->mt_flags |= INPUT_MT_DIRECT;
+
+ /*
+ * Model touchscreens providing buttons as touchpads.
+ */
+ if (application == HID_DG_TOUCHPAD) {
+ mt_application->mt_flags |= INPUT_MT_POINTER;
+ td->inputmode_value = MT_INPUTMODE_TOUCHPAD;
+ }
+
+ mt_application->scantime = DEFAULT_ZERO;
+ mt_application->raw_cc = DEFAULT_ZERO;
+ mt_application->quirks = td->mtclass.quirks;
+
+ list_add_tail(&mt_application->list, &td->applications);
+
+ return mt_application;
+}
+
+static struct mt_application *mt_find_application(struct mt_device *td,
+ unsigned int application)
+{
+ struct mt_application *tmp, *mt_application = NULL;
+
+ list_for_each_entry(tmp, &td->applications, list) {
+ if (application == tmp->application) {
+ mt_application = tmp;
+ break;
+ }
+ }
+
+ if (!mt_application)
+ mt_application = mt_allocate_application(td, application);
+
+ return mt_application;
+}
+
+static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
+ struct hid_report *report)
+{
+ struct mt_report_data *rdata;
+ struct hid_field *field;
+ int r, n;
+
+ rdata = devm_kzalloc(&td->hdev->dev, sizeof(*rdata), GFP_KERNEL);
+ if (!rdata)
+ return NULL;
+
+ rdata->report = report;
+ rdata->application = mt_find_application(td, report->application);
+
+ if (!rdata->application) {
+ devm_kfree(&td->hdev->dev, rdata);
+ return NULL;
+ }
+
+ for (r = 0; r < report->maxfield; r++) {
+ field = report->field[r];
+
+ if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
+ continue;
+
+ for (n = 0; n < field->report_count; n++) {
+ if (field->usage[n].hid == HID_DG_CONTACTID)
+ rdata->is_mt_collection = true;
+ }
+ }
+
+ list_add_tail(&rdata->list, &td->reports);
+
+ return rdata;
+}
+
+static struct mt_report_data *mt_find_report_data(struct mt_device *td,
+ struct hid_report *report)
{
- struct mt_fields *f = td->fields;
+ struct mt_report_data *tmp, *rdata = NULL;
- if (f->length >= HID_MAX_FIELDS)
+ list_for_each_entry(tmp, &td->reports, list) {
+ if (report == tmp->report) {
+ rdata = tmp;
+ break;
+ }
+ }
+
+ if (!rdata)
+ rdata = mt_allocate_report_data(td, report);
+
+ return rdata;
+}
+
+static void mt_store_field(struct hid_device *hdev,
+ struct mt_application *application,
+ __s32 *value,
+ size_t offset)
+{
+ struct mt_usages *usage;
+ __s32 **target;
+
+ if (list_empty(&application->mt_usages))
+ usage = mt_allocate_usage(hdev, application);
+ else
+ usage = list_last_entry(&application->mt_usages,
+ struct mt_usages,
+ list);
+
+ if (!usage)
return;
- f->usages[f->length++] = usage->hid;
+ target = (__s32 **)((char *)usage + offset);
+
+ /* the value has already been filled, create a new slot */
+ if (*target != DEFAULT_TRUE &&
+ *target != DEFAULT_FALSE &&
+ *target != DEFAULT_ZERO) {
+ usage = mt_allocate_usage(hdev, application);
+ if (!usage)
+ return;
+
+ target = (__s32 **)((char *)usage + offset);
+ }
+
+ *target = value;
}
+#define MT_STORE_FIELD(__name) \
+ mt_store_field(hdev, app, \
+ &field->value[usage->usage_index], \
+ offsetof(struct mt_usages, __name))
+
static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
+ unsigned long **bit, int *max, struct mt_application *app)
{
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_class *cls = &td->mtclass;
int code;
struct hid_usage *prev_usage = NULL;
- if (field->application == HID_DG_TOUCHSCREEN)
- td->mt_flags |= INPUT_MT_DIRECT;
-
/*
* Model touchscreens providing buttons as touchpads.
*/
- if (field->application == HID_DG_TOUCHPAD ||
+ if (field->application == HID_DG_TOUCHSCREEN &&
(usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
- td->mt_flags |= INPUT_MT_POINTER;
+ app->mt_flags |= INPUT_MT_POINTER;
td->inputmode_value = MT_INPUTMODE_TOUCHPAD;
}
/* count the buttons on touchpads */
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON)
- td->buttons_count++;
+ app->buttons_count++;
if (usage->usage_index)
prev_usage = &field->usage[usage->usage_index - 1];
@@ -502,33 +687,40 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
switch (usage->hid) {
case HID_GD_X:
if (prev_usage && (prev_usage->hid == usage->hid)) {
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_TOOL_X);
- set_abs(hi->input, ABS_MT_TOOL_X, field,
- cls->sn_move);
+ code = ABS_MT_TOOL_X;
+ MT_STORE_FIELD(cx);
} else {
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_POSITION_X);
- set_abs(hi->input, ABS_MT_POSITION_X, field,
- cls->sn_move);
+ code = ABS_MT_POSITION_X;
+ MT_STORE_FIELD(x);
+ }
+
+ set_abs(hi->input, code, field, cls->sn_move);
+
+ /*
+ * A system multi-axis that exports X and Y has a high
+ * chance of being used directly on a surface
+ */
+ if (field->application == HID_GD_SYSTEM_MULTIAXIS) {
+ __set_bit(INPUT_PROP_DIRECT,
+ hi->input->propbit);
+ input_set_abs_params(hi->input,
+ ABS_MT_TOOL_TYPE,
+ MT_TOOL_DIAL,
+ MT_TOOL_DIAL, 0, 0);
}
- mt_store_field(usage, td, hi);
return 1;
case HID_GD_Y:
if (prev_usage && (prev_usage->hid == usage->hid)) {
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_TOOL_Y);
- set_abs(hi->input, ABS_MT_TOOL_Y, field,
- cls->sn_move);
+ code = ABS_MT_TOOL_Y;
+ MT_STORE_FIELD(cy);
} else {
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_POSITION_Y);
- set_abs(hi->input, ABS_MT_POSITION_Y, field,
- cls->sn_move);
+ code = ABS_MT_POSITION_Y;
+ MT_STORE_FIELD(y);
}
- mt_store_field(usage, td, hi);
+ set_abs(hi->input, code, field, cls->sn_move);
+
return 1;
}
return 0;
@@ -536,43 +728,45 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_UP_DIGITIZER:
switch (usage->hid) {
case HID_DG_INRANGE:
- if (cls->quirks & MT_QUIRK_HOVERING) {
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_DISTANCE);
+ if (app->quirks & MT_QUIRK_HOVERING) {
input_set_abs_params(hi->input,
ABS_MT_DISTANCE, 0, 1, 0, 0);
}
- mt_store_field(usage, td, hi);
+ MT_STORE_FIELD(inrange_state);
return 1;
case HID_DG_CONFIDENCE:
if ((cls->name == MT_CLS_WIN_8 ||
cls->name == MT_CLS_WIN_8_DUAL) &&
- field->application == HID_DG_TOUCHPAD)
- cls->quirks |= MT_QUIRK_CONFIDENCE;
- mt_store_field(usage, td, hi);
+ (field->application == HID_DG_TOUCHPAD ||
+ field->application == HID_DG_TOUCHSCREEN))
+ app->quirks |= MT_QUIRK_CONFIDENCE;
+
+ if (app->quirks & MT_QUIRK_CONFIDENCE)
+ input_set_abs_params(hi->input,
+ ABS_MT_TOOL_TYPE,
+ MT_TOOL_FINGER,
+ MT_TOOL_PALM, 0, 0);
+
+ MT_STORE_FIELD(confidence_state);
return 1;
case HID_DG_TIPSWITCH:
- hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
- input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
- mt_store_field(usage, td, hi);
+ if (field->application != HID_GD_SYSTEM_MULTIAXIS)
+ input_set_capability(hi->input,
+ EV_KEY, BTN_TOUCH);
+ MT_STORE_FIELD(tip_state);
return 1;
case HID_DG_CONTACTID:
- mt_store_field(usage, td, hi);
- td->touches_by_report++;
- td->mt_report_id = field->report->id;
+ MT_STORE_FIELD(contactid);
+ app->touches_by_report++;
return 1;
case HID_DG_WIDTH:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_TOUCH_MAJOR);
- if (!(cls->quirks & MT_QUIRK_NO_AREA))
+ if (!(app->quirks & MT_QUIRK_NO_AREA))
set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field,
cls->sn_width);
- mt_store_field(usage, td, hi);
+ MT_STORE_FIELD(w);
return 1;
case HID_DG_HEIGHT:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_TOUCH_MINOR);
- if (!(cls->quirks & MT_QUIRK_NO_AREA)) {
+ if (!(app->quirks & MT_QUIRK_NO_AREA)) {
set_abs(hi->input, ABS_MT_TOUCH_MINOR, field,
cls->sn_height);
@@ -585,41 +779,23 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
input_set_abs_params(hi->input,
ABS_MT_ORIENTATION, 0, 1, 0, 0);
}
- mt_store_field(usage, td, hi);
+ MT_STORE_FIELD(h);
return 1;
case HID_DG_TIPPRESSURE:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_PRESSURE);
set_abs(hi->input, ABS_MT_PRESSURE, field,
cls->sn_pressure);
- mt_store_field(usage, td, hi);
+ MT_STORE_FIELD(p);
return 1;
case HID_DG_SCANTIME:
- hid_map_usage(hi, usage, bit, max,
- EV_MSC, MSC_TIMESTAMP);
input_set_capability(hi->input, EV_MSC, MSC_TIMESTAMP);
- /* Ignore if indexes are out of bounds. */
- if (field->index >= field->report->maxfield ||
- usage->usage_index >= field->report_count)
- return 1;
- td->scantime_index = field->index;
- td->scantime_val_index = usage->usage_index;
- /*
- * We don't set td->last_slot_field as scan time is
- * global to the report.
- */
+ app->scantime = &field->value[usage->usage_index];
+ app->scantime_logical_max = field->logical_maximum;
return 1;
case HID_DG_CONTACTCOUNT:
- /* Ignore if indexes are out of bounds. */
- if (field->index >= field->report->maxfield ||
- usage->usage_index >= field->report_count)
- return 1;
- td->cc_index = field->index;
- td->cc_value_index = usage->usage_index;
+ app->have_contact_count = true;
+ app->raw_cc = &field->value[usage->usage_index];
return 1;
case HID_DG_AZIMUTH:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_ORIENTATION);
/*
* Azimuth has the range of [0, MAX) representing a full
* revolution. Set ABS_MT_ORIENTATION to a quarter of
@@ -630,11 +806,10 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
field->logical_maximum / 4,
cls->sn_move ?
field->logical_maximum / cls->sn_move : 0, 0);
- mt_store_field(usage, td, hi);
+ MT_STORE_FIELD(a);
return 1;
case HID_DG_CONTACTMAX:
- /* we don't set td->last_slot_field as contactcount and
- * contact max are global to the report */
+ /* contact max are global to the report */
return -1;
case HID_DG_TOUCH:
/* Legacy devices use TIPSWITCH and not TOUCH.
@@ -650,10 +825,14 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
* MS PTP spec says that external buttons left and right have
* usages 2 and 3.
*/
- if ((cls->quirks & MT_QUIRK_WIN8_PTP_BUTTONS) &&
+ if ((app->quirks & MT_QUIRK_WIN8_PTP_BUTTONS) &&
field->application == HID_DG_TOUCHPAD &&
(usage->hid & HID_USAGE) > 1)
code--;
+
+ if (field->application == HID_GD_SYSTEM_MULTIAXIS)
+ code = BTN_0 + ((usage->hid - 1) & HID_USAGE);
+
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
input_set_capability(hi->input, EV_KEY, code);
return 1;
@@ -666,110 +845,68 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
-static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
+static int mt_compute_slot(struct mt_device *td, struct mt_application *app,
+ struct mt_usages *slot,
+ struct input_dev *input)
{
- __s32 quirks = td->mtclass.quirks;
+ __s32 quirks = app->quirks;
if (quirks & MT_QUIRK_SLOT_IS_CONTACTID)
- return td->curdata.contactid;
+ return *slot->contactid;
if (quirks & MT_QUIRK_CYPRESS)
- return cypress_compute_slot(td);
+ return cypress_compute_slot(app, slot);
if (quirks & MT_QUIRK_SLOT_IS_CONTACTNUMBER)
- return td->num_received;
+ return app->num_received;
if (quirks & MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE)
- return td->curdata.contactid - 1;
+ return *slot->contactid - 1;
- return input_mt_get_slot_by_key(input, td->curdata.contactid);
+ return input_mt_get_slot_by_key(input, *slot->contactid);
}
-/*
- * this function is called when a whole contact has been processed,
- * so that it can assign it to a slot and store the data there
- */
-static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
+static void mt_release_pending_palms(struct mt_device *td,
+ struct mt_application *app,
+ struct input_dev *input)
{
- if ((td->mtclass.quirks & MT_QUIRK_CONTACT_CNT_ACCURATE) &&
- td->num_received >= td->num_expected)
- return;
-
- if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
- int active;
- int slotnum = mt_compute_slot(td, input);
- struct mt_slot *s = &td->curdata;
- struct input_mt *mt = input->mt;
+ int slotnum;
+ bool need_sync = false;
- if (slotnum < 0 || slotnum >= td->maxcontacts)
- return;
-
- if ((td->mtclass.quirks & MT_QUIRK_IGNORE_DUPLICATES) && mt) {
- struct input_mt_slot *slot = &mt->slots[slotnum];
- if (input_mt_is_active(slot) &&
- input_mt_is_used(mt, slot))
- return;
- }
-
- if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
- s->confidence_state = true;
- active = (s->touch_state || s->inrange_state) &&
- s->confidence_state;
+ for_each_set_bit(slotnum, app->pending_palm_slots, td->maxcontacts) {
+ clear_bit(slotnum, app->pending_palm_slots);
input_mt_slot(input, slotnum);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
- if (active) {
- /* this finger is in proximity of the sensor */
- int wide = (s->w > s->h);
- int major = max(s->w, s->h);
- int minor = min(s->w, s->h);
- int orientation = wide;
+ input_mt_report_slot_state(input, MT_TOOL_PALM, false);
- if (s->has_azimuth)
- orientation = s->a;
-
- /*
- * divided by two to match visual scale of touch
- * for devices with this quirk
- */
- if (td->mtclass.quirks & MT_QUIRK_TOUCH_SIZE_SCALING) {
- major = major >> 1;
- minor = minor >> 1;
- }
-
- input_event(input, EV_ABS, ABS_MT_POSITION_X, s->x);
- input_event(input, EV_ABS, ABS_MT_POSITION_Y, s->y);
- input_event(input, EV_ABS, ABS_MT_TOOL_X, s->cx);
- input_event(input, EV_ABS, ABS_MT_TOOL_Y, s->cy);
- input_event(input, EV_ABS, ABS_MT_DISTANCE,
- !s->touch_state);
- input_event(input, EV_ABS, ABS_MT_ORIENTATION,
- orientation);
- input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p);
- input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
- input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
-
- set_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
- }
+ need_sync = true;
}
- td->num_received++;
+ if (need_sync) {
+ input_mt_sync_frame(input);
+ input_sync(input);
+ }
}
/*
* this function is called when a whole packet has been received and processed,
* so that it can decide what to send to the input layer.
*/
-static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
+static void mt_sync_frame(struct mt_device *td, struct mt_application *app,
+ struct input_dev *input)
{
- if (td->mtclass.quirks & MT_QUIRK_WIN8_PTP_BUTTONS)
- input_event(input, EV_KEY, BTN_LEFT, td->left_button_state);
+ if (app->quirks & MT_QUIRK_WIN8_PTP_BUTTONS)
+ input_event(input, EV_KEY, BTN_LEFT, app->left_button_state);
input_mt_sync_frame(input);
- input_event(input, EV_MSC, MSC_TIMESTAMP, td->timestamp);
+ input_event(input, EV_MSC, MSC_TIMESTAMP, app->timestamp);
input_sync(input);
- td->num_received = 0;
- td->left_button_state = 0;
+
+ mt_release_pending_palms(td, app, input);
+
+ app->num_received = 0;
+ app->left_button_state = 0;
+
if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
else
@@ -777,17 +914,15 @@ static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
}
-static int mt_compute_timestamp(struct mt_device *td, struct hid_field *field,
- __s32 value)
+static int mt_compute_timestamp(struct mt_application *app, __s32 value)
{
- long delta = value - td->dev_time;
- unsigned long jdelta = jiffies_to_usecs(jiffies - td->jiffies);
+ long delta = value - app->prev_scantime;
+ unsigned long jdelta = jiffies_to_usecs(jiffies - app->jiffies);
- td->jiffies = jiffies;
- td->dev_time = value;
+ app->jiffies = jiffies;
if (delta < 0)
- delta += field->logical_maximum;
+ delta += app->scantime_logical_max;
/* HID_DG_SCANTIME is expressed in 100us, we want it in us. */
delta *= 100;
@@ -796,7 +931,7 @@ static int mt_compute_timestamp(struct mt_device *td, struct hid_field *field,
/* No data received for a while, resync the timestamp. */
return 0;
else
- return td->timestamp + delta;
+ return app->timestamp + delta;
}
static int mt_touch_event(struct hid_device *hid, struct hid_field *field,
@@ -809,63 +944,90 @@ static int mt_touch_event(struct hid_device *hid, struct hid_field *field,
return 1;
}
-static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value,
- bool first_packet)
+static int mt_process_slot(struct mt_device *td, struct input_dev *input,
+ struct mt_application *app,
+ struct mt_usages *slot)
{
- struct mt_device *td = hid_get_drvdata(hid);
- __s32 quirks = td->mtclass.quirks;
- struct input_dev *input = field->hidinput->input;
+ struct input_mt *mt = input->mt;
+ __s32 quirks = app->quirks;
+ bool valid = true;
+ bool confidence_state = true;
+ bool inrange_state = false;
+ int active;
+ int slotnum;
+ int tool = MT_TOOL_FINGER;
+
+ if (!slot)
+ return -EINVAL;
- if (hid->claimed & HID_CLAIMED_INPUT) {
- switch (usage->hid) {
- case HID_DG_INRANGE:
- if (quirks & MT_QUIRK_VALID_IS_INRANGE)
- td->curvalid = value;
- if (quirks & MT_QUIRK_HOVERING)
- td->curdata.inrange_state = value;
- break;
- case HID_DG_TIPSWITCH:
- if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
- td->curvalid = value;
- td->curdata.touch_state = value;
- break;
- case HID_DG_CONFIDENCE:
- if (quirks & MT_QUIRK_CONFIDENCE)
- td->curdata.confidence_state = value;
- if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
- td->curvalid = value;
- break;
- case HID_DG_CONTACTID:
- td->curdata.contactid = value;
- break;
- case HID_DG_TIPPRESSURE:
- td->curdata.p = value;
- break;
- case HID_GD_X:
- if (usage->code == ABS_MT_TOOL_X)
- td->curdata.cx = value;
- else
- td->curdata.x = value;
- break;
- case HID_GD_Y:
- if (usage->code == ABS_MT_TOOL_Y)
- td->curdata.cy = value;
- else
- td->curdata.y = value;
- break;
- case HID_DG_WIDTH:
- td->curdata.w = value;
- break;
- case HID_DG_HEIGHT:
- td->curdata.h = value;
- break;
- case HID_DG_SCANTIME:
- td->timestamp = mt_compute_timestamp(td, field, value);
- break;
- case HID_DG_CONTACTCOUNT:
- break;
- case HID_DG_AZIMUTH:
+ if ((quirks & MT_QUIRK_CONTACT_CNT_ACCURATE) &&
+ app->num_received >= app->num_expected)
+ return -EAGAIN;
+
+ if (!(quirks & MT_QUIRK_ALWAYS_VALID)) {
+ if (quirks & MT_QUIRK_VALID_IS_INRANGE)
+ valid = *slot->inrange_state;
+ if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
+ valid = *slot->tip_state;
+ if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
+ valid = *slot->confidence_state;
+
+ if (!valid)
+ return 0;
+ }
+
+ slotnum = mt_compute_slot(td, app, slot, input);
+ if (slotnum < 0 || slotnum >= td->maxcontacts)
+ return 0;
+
+ if ((quirks & MT_QUIRK_IGNORE_DUPLICATES) && mt) {
+ struct input_mt_slot *i_slot = &mt->slots[slotnum];
+
+ if (input_mt_is_active(i_slot) &&
+ input_mt_is_used(mt, i_slot))
+ return -EAGAIN;
+ }
+
+ if (quirks & MT_QUIRK_CONFIDENCE)
+ confidence_state = *slot->confidence_state;
+
+ if (quirks & MT_QUIRK_HOVERING)
+ inrange_state = *slot->inrange_state;
+
+ active = *slot->tip_state || inrange_state;
+
+ if (app->application == HID_GD_SYSTEM_MULTIAXIS)
+ tool = MT_TOOL_DIAL;
+ else if (unlikely(!confidence_state)) {
+ tool = MT_TOOL_PALM;
+ if (!active &&
+ input_mt_is_active(&mt->slots[slotnum])) {
+ /*
+ * The non-confidence was reported for
+ * previously valid contact that is also no
+ * longer valid. We can't simply report
+ * lift-off as userspace will not be aware
+ * of non-confidence, so we need to split
+ * it into 2 events: active MT_TOOL_PALM
+ * and a separate liftoff.
+ */
+ active = true;
+ set_bit(slotnum, app->pending_palm_slots);
+ }
+ }
+
+ input_mt_slot(input, slotnum);
+ input_mt_report_slot_state(input, tool, active);
+ if (active) {
+ /* this finger is in proximity of the sensor */
+ int wide = (*slot->w > *slot->h);
+ int major = max(*slot->w, *slot->h);
+ int minor = min(*slot->w, *slot->h);
+ int orientation = wide;
+ int max_azimuth;
+ int azimuth;
+
+ if (slot->a != DEFAULT_ZERO) {
/*
* Azimuth is counter-clockwise and ranges from [0, MAX)
* (a full revolution). Convert it to clockwise ranging
@@ -876,77 +1038,107 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
* out of range to [-MAX/2, MAX/2] to report an upside
* down ellipsis.
*/
- if (value > field->logical_maximum / 2)
- value -= field->logical_maximum;
- td->curdata.a = -value;
- td->curdata.has_azimuth = true;
- break;
- case HID_DG_TOUCH:
- /* do nothing */
- break;
+ azimuth = *slot->a;
+ max_azimuth = input_abs_get_max(input,
+ ABS_MT_ORIENTATION);
+ if (azimuth > max_azimuth * 2)
+ azimuth -= max_azimuth * 4;
+ orientation = -azimuth;
+ }
- default:
+ if (quirks & MT_QUIRK_TOUCH_SIZE_SCALING) {
/*
- * For Win8 PTP touchpads we should only look at
- * non finger/touch events in the first_packet of
- * a (possible) multi-packet frame.
+ * divided by two to match visual scale of touch
+ * for devices with this quirk
*/
- if ((quirks & MT_QUIRK_WIN8_PTP_BUTTONS) &&
- !first_packet)
- return;
+ major = major >> 1;
+ minor = minor >> 1;
+ }
- /*
- * For Win8 PTP touchpads we map both the clickpad click
- * and any "external" left buttons to BTN_LEFT if a
- * device claims to have both we need to report 1 for
- * BTN_LEFT if either is pressed, so we or all values
- * together and report the result in mt_sync_frame().
- */
- if ((quirks & MT_QUIRK_WIN8_PTP_BUTTONS) &&
- usage->type == EV_KEY && usage->code == BTN_LEFT) {
- td->left_button_state |= value;
- return;
- }
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, *slot->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, *slot->y);
+ input_event(input, EV_ABS, ABS_MT_TOOL_X, *slot->cx);
+ input_event(input, EV_ABS, ABS_MT_TOOL_Y, *slot->cy);
+ input_event(input, EV_ABS, ABS_MT_DISTANCE, !*slot->tip_state);
+ input_event(input, EV_ABS, ABS_MT_ORIENTATION, orientation);
+ input_event(input, EV_ABS, ABS_MT_PRESSURE, *slot->p);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
+
+ set_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
+ }
- if (usage->type)
- input_event(input, usage->type, usage->code,
- value);
+ return 0;
+}
+
+static void mt_process_mt_event(struct hid_device *hid,
+ struct mt_application *app,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ __s32 value,
+ bool first_packet)
+{
+ __s32 quirks = app->quirks;
+ struct input_dev *input = field->hidinput->input;
+
+ if (!usage->type || !(hid->claimed & HID_CLAIMED_INPUT))
+ return;
+
+ if (quirks & MT_QUIRK_WIN8_PTP_BUTTONS) {
+
+ /*
+ * For Win8 PTP touchpads we should only look at
+ * non finger/touch events in the first_packet of a
+ * (possible) multi-packet frame.
+ */
+ if (!first_packet)
return;
- }
- if (usage->usage_index + 1 == field->report_count) {
- /* we only take into account the last report. */
- if (usage->hid == td->last_slot_field)
- mt_complete_slot(td, field->hidinput->input);
+ /*
+ * For Win8 PTP touchpads we map both the clickpad click
+ * and any "external" left buttons to BTN_LEFT if a
+ * device claims to have both we need to report 1 for
+ * BTN_LEFT if either is pressed, so we or all values
+ * together and report the result in mt_sync_frame().
+ */
+ if (usage->type == EV_KEY && usage->code == BTN_LEFT) {
+ app->left_button_state |= value;
+ return;
}
-
}
+
+ input_event(input, usage->type, usage->code, value);
}
-static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
+static void mt_touch_report(struct hid_device *hid,
+ struct mt_report_data *rdata)
{
struct mt_device *td = hid_get_drvdata(hid);
+ struct hid_report *report = rdata->report;
+ struct mt_application *app = rdata->application;
struct hid_field *field;
+ struct input_dev *input;
+ struct mt_usages *slot;
bool first_packet;
unsigned count;
- int r, n, scantime = 0;
+ int r, n;
+ int scantime = 0;
+ int contact_count = -1;
/* sticky fingers release in progress, abort */
if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
return;
+ scantime = *app->scantime;
+ app->timestamp = mt_compute_timestamp(app, scantime);
+ if (app->raw_cc != DEFAULT_ZERO)
+ contact_count = *app->raw_cc;
+
/*
* Includes multi-packet support where subsequent
* packets are sent with zero contactcount.
*/
- if (td->scantime_index >= 0) {
- field = report->field[td->scantime_index];
- scantime = field->value[td->scantime_val_index];
- }
- if (td->cc_index >= 0) {
- struct hid_field *field = report->field[td->cc_index];
- int value = field->value[td->cc_value_index];
-
+ if (contact_count >= 0) {
/*
* For Win8 PTPs the first packet (td->num_received == 0) may
* have a contactcount of 0 if there only is a button event.
@@ -954,16 +1146,25 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
* of a possible multi-packet frame be checking that the
* timestamp has changed.
*/
- if ((td->mtclass.quirks & MT_QUIRK_WIN8_PTP_BUTTONS) &&
- td->num_received == 0 && td->prev_scantime != scantime)
- td->num_expected = value;
+ if ((app->quirks & MT_QUIRK_WIN8_PTP_BUTTONS) &&
+ app->num_received == 0 &&
+ app->prev_scantime != scantime)
+ app->num_expected = contact_count;
/* A non 0 contact count always indicates a first packet */
- else if (value)
- td->num_expected = value;
+ else if (contact_count)
+ app->num_expected = contact_count;
+ }
+ app->prev_scantime = scantime;
+
+ first_packet = app->num_received == 0;
+
+ input = report->field[0]->hidinput->input;
+
+ list_for_each_entry(slot, &app->mt_usages, list) {
+ if (!mt_process_slot(td, input, app, slot))
+ app->num_received++;
}
- td->prev_scantime = scantime;
- first_packet = td->num_received == 0;
for (r = 0; r < report->maxfield; r++) {
field = report->field[r];
count = field->report_count;
@@ -972,12 +1173,13 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
continue;
for (n = 0; n < count; n++)
- mt_process_mt_event(hid, field, &field->usage[n],
- field->value[n], first_packet);
+ mt_process_mt_event(hid, app, field,
+ &field->usage[n], field->value[n],
+ first_packet);
}
- if (td->num_received >= td->num_expected)
- mt_sync_frame(td, report->field[0]->hidinput->input);
+ if (app->num_received >= app->num_expected)
+ mt_sync_frame(td, app, input);
/*
* Windows 8 specs says 2 things:
@@ -997,7 +1199,7 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
* only affect laggish machines and the ones that have a firmware
* defect.
*/
- if (td->mtclass.quirks & MT_QUIRK_STICKY_FINGERS) {
+ if (app->quirks & MT_QUIRK_STICKY_FINGERS) {
if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
mod_timer(&td->release_timer,
jiffies + msecs_to_jiffies(100));
@@ -1009,7 +1211,8 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
}
static int mt_touch_input_configured(struct hid_device *hdev,
- struct hid_input *hi)
+ struct hid_input *hi,
+ struct mt_application *app)
{
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_class *cls = &td->mtclass;
@@ -1019,28 +1222,36 @@ static int mt_touch_input_configured(struct hid_device *hdev,
if (!td->maxcontacts)
td->maxcontacts = MT_DEFAULT_MAXCONTACT;
- mt_post_parse(td);
+ mt_post_parse(td, app);
if (td->serial_maybe)
- mt_post_parse_default_settings(td);
+ mt_post_parse_default_settings(td, app);
if (cls->is_indirect)
- td->mt_flags |= INPUT_MT_POINTER;
+ app->mt_flags |= INPUT_MT_POINTER;
- if (cls->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
- td->mt_flags |= INPUT_MT_DROP_UNUSED;
+ if (app->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
+ app->mt_flags |= INPUT_MT_DROP_UNUSED;
/* check for clickpads */
- if ((td->mt_flags & INPUT_MT_POINTER) && (td->buttons_count == 1))
+ if ((app->mt_flags & INPUT_MT_POINTER) &&
+ (app->buttons_count == 1))
td->is_buttonpad = true;
if (td->is_buttonpad)
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
- ret = input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
+ app->pending_palm_slots = devm_kcalloc(&hi->input->dev,
+ BITS_TO_LONGS(td->maxcontacts),
+ sizeof(long),
+ GFP_KERNEL);
+ if (!app->pending_palm_slots)
+ return -ENOMEM;
+
+ ret = input_mt_init_slots(input, td->maxcontacts, app->mt_flags);
if (ret)
return ret;
- td->mt_flags = 0;
+ app->mt_flags = 0;
return 0;
}
@@ -1051,6 +1262,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
unsigned long **bit, int *max)
{
struct mt_device *td = hid_get_drvdata(hdev);
+ struct mt_application *application;
+ struct mt_report_data *rdata;
+
+ rdata = mt_find_report_data(td, field->report);
+ if (!rdata) {
+ hid_err(hdev, "failed to allocate data for report\n");
+ return 0;
+ }
+
+ application = rdata->application;
/*
* If mtclass.export_all_inputs is not set, only map fields from
@@ -1066,8 +1287,9 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
field->application != HID_GD_SYSTEM_CONTROL &&
field->application != HID_CP_CONSUMER_CONTROL &&
field->application != HID_GD_WIRELESS_RADIO_CTLS &&
+ field->application != HID_GD_SYSTEM_MULTIAXIS &&
!(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS &&
- td->mtclass.quirks & MT_QUIRK_ASUS_CUSTOM_UP))
+ application->quirks & MT_QUIRK_ASUS_CUSTOM_UP))
return -1;
/*
@@ -1076,7 +1298,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
* map usages to input keys.
*/
if (field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS &&
- td->mtclass.quirks & MT_QUIRK_ASUS_CUSTOM_UP &&
+ application->quirks & MT_QUIRK_ASUS_CUSTOM_UP &&
(usage->hid & HID_USAGE_PAGE) == HID_UP_CUSTOM) {
set_bit(EV_REP, hi->input->evbit);
if (field->flags & HID_MAIN_ITEM_VARIABLE)
@@ -1093,23 +1315,9 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
}
- /*
- * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
- * for the stylus.
- * The check for mt_report_id ensures we don't process
- * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical
- * collection, but within the report ID.
- */
- if (field->physical == HID_DG_STYLUS)
- return 0;
- else if ((field->physical == 0) &&
- (field->report->id != td->mt_report_id) &&
- (td->mt_report_id != -1))
- return 0;
-
- if (field->application == HID_DG_TOUCHSCREEN ||
- field->application == HID_DG_TOUCHPAD)
- return mt_touch_input_mapping(hdev, hi, field, usage, bit, max);
+ if (rdata->is_mt_collection)
+ return mt_touch_input_mapping(hdev, hi, field, usage, bit, max,
+ application);
/* let hid-core decide for the others */
return 0;
@@ -1119,15 +1327,11 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- /*
- * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
- * for the stylus.
- */
- if (field->physical == HID_DG_STYLUS)
- return 0;
+ struct mt_device *td = hid_get_drvdata(hdev);
+ struct mt_report_data *rdata;
- if (field->application == HID_DG_TOUCHSCREEN ||
- field->application == HID_DG_TOUCHPAD) {
+ rdata = mt_find_report_data(td, field->report);
+ if (rdata && rdata->is_mt_collection) {
/* We own these mappings, tell hid-input to ignore them */
return -1;
}
@@ -1140,8 +1344,10 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct mt_device *td = hid_get_drvdata(hid);
+ struct mt_report_data *rdata;
- if (field->report->id == td->mt_report_id)
+ rdata = mt_find_report_data(td, field->report);
+ if (rdata && rdata->is_mt_collection)
return mt_touch_event(hid, field, usage, value);
return 0;
@@ -1151,12 +1357,14 @@ static void mt_report(struct hid_device *hid, struct hid_report *report)
{
struct mt_device *td = hid_get_drvdata(hid);
struct hid_field *field = report->field[0];
+ struct mt_report_data *rdata;
if (!(hid->claimed & HID_CLAIMED_INPUT))
return;
- if (report->id == td->mt_report_id)
- return mt_touch_report(hid, report);
+ rdata = mt_find_report_data(td, report);
+ if (rdata && rdata->is_mt_collection)
+ return mt_touch_report(hid, rdata);
if (field && field->hidinput && field->hidinput->input)
input_sync(field->hidinput->input);
@@ -1197,9 +1405,9 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
return true;
case HID_DG_CONTACTMAX:
- if (td->mtclass.maxcontacts) {
+ if (cls->maxcontacts) {
max = min_t(int, field->logical_maximum,
- td->mtclass.maxcontacts);
+ cls->maxcontacts);
if (field->value[index] != max) {
field->value[index] = max;
return true;
@@ -1259,12 +1467,13 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
}
}
-static void mt_post_parse_default_settings(struct mt_device *td)
+static void mt_post_parse_default_settings(struct mt_device *td,
+ struct mt_application *app)
{
- __s32 quirks = td->mtclass.quirks;
+ __s32 quirks = app->quirks;
/* unknown serial device needs special quirks */
- if (td->touches_by_report == 1) {
+ if (list_is_singular(&app->mt_usages)) {
quirks |= MT_QUIRK_ALWAYS_VALID;
quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP;
quirks &= ~MT_QUIRK_VALID_IS_INRANGE;
@@ -1272,21 +1481,13 @@ static void mt_post_parse_default_settings(struct mt_device *td)
quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
}
- td->mtclass.quirks = quirks;
+ app->quirks = quirks;
}
-static void mt_post_parse(struct mt_device *td)
+static void mt_post_parse(struct mt_device *td, struct mt_application *app)
{
- struct mt_fields *f = td->fields;
- struct mt_class *cls = &td->mtclass;
-
- if (td->touches_by_report > 0) {
- int field_count_per_touch = f->length / td->touches_by_report;
- td->last_slot_field = f->usages[field_count_per_touch - 1];
- }
-
- if (td->cc_index < 0)
- cls->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
+ if (!app->have_contact_count)
+ app->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
}
static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
@@ -1295,13 +1496,24 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
char *name;
const char *suffix = NULL;
unsigned int application = 0;
+ struct mt_report_data *rdata;
+ struct mt_application *mt_application = NULL;
struct hid_report *report;
int ret;
list_for_each_entry(report, &hi->reports, hidinput_list) {
application = report->application;
- if (report->id == td->mt_report_id) {
- ret = mt_touch_input_configured(hdev, hi);
+ rdata = mt_find_report_data(td, report);
+ if (!rdata) {
+ hid_err(hdev, "failed to allocate data for report\n");
+ return -ENOMEM;
+ }
+
+ mt_application = rdata->application;
+
+ if (rdata->is_mt_collection) {
+ ret = mt_touch_input_configured(hdev, hi,
+ mt_application);
if (ret)
return ret;
}
@@ -1327,6 +1539,7 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
case HID_GD_SYSTEM_CONTROL:
case HID_CP_CONSUMER_CONTROL:
case HID_GD_WIRELESS_RADIO_CTLS:
+ case HID_GD_SYSTEM_MULTIAXIS:
/* already handled by hid core */
break;
case HID_DG_TOUCHSCREEN:
@@ -1390,6 +1603,7 @@ static void mt_fix_const_fields(struct hid_device *hdev, unsigned int usage)
static void mt_release_contacts(struct hid_device *hid)
{
struct hid_input *hidinput;
+ struct mt_application *application;
struct mt_device *td = hid_get_drvdata(hid);
list_for_each_entry(hidinput, &hid->inputs, list) {
@@ -1409,7 +1623,9 @@ static void mt_release_contacts(struct hid_device *hid)
}
}
- td->num_received = 0;
+ list_for_each_entry(application, &td->applications, list) {
+ application->num_received = 0;
+ }
}
static void mt_expired_timeout(struct timer_list *t)
@@ -1432,7 +1648,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret, i;
struct mt_device *td;
- struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
+ const struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
for (i = 0; mt_classes[i].name ; i++) {
if (id->driver_data == mt_classes[i].name) {
@@ -1449,17 +1665,10 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
td->hdev = hdev;
td->mtclass = *mtclass;
td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN;
- td->cc_index = -1;
- td->scantime_index = -1;
- td->mt_report_id = -1;
hid_set_drvdata(hdev, td);
- td->fields = devm_kzalloc(&hdev->dev, sizeof(struct mt_fields),
- GFP_KERNEL);
- if (!td->fields) {
- dev_err(&hdev->dev, "cannot allocate multitouch fields data\n");
- return -ENOMEM;
- }
+ INIT_LIST_HEAD(&td->applications);
+ INIT_LIST_HEAD(&td->reports);
if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
td->serial_maybe = true;
@@ -1496,10 +1705,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true);
- /* release .fields memory as it is not used anymore */
- devm_kfree(&hdev->dev, td->fields);
- td->fields = NULL;
-
return 0;
}
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 43b1c7234316..9bc6f4867cb3 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -955,6 +955,8 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = sysfs_create_group(&hdev->dev.kobj,
&ntrig_attribute_group);
+ if (ret)
+ hid_err(hdev, "cannot create sysfs group\n");
return 0;
err_free:
diff --git a/drivers/hid/hid-redragon.c b/drivers/hid/hid-redragon.c
index daf59578bf93..73c9d4c4fa34 100644
--- a/drivers/hid/hid-redragon.c
+++ b/drivers/hid/hid-redragon.c
@@ -44,29 +44,6 @@ static __u8 *redragon_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
-static int redragon_probe(struct hid_device *dev,
- const struct hid_device_id *id)
-{
- int ret;
-
- ret = hid_parse(dev);
- if (ret) {
- hid_err(dev, "parse failed\n");
- return ret;
- }
-
- /* do not register unused input device */
- if (dev->maxapplication == 1)
- return 0;
-
- ret = hid_hw_start(dev, HID_CONNECT_DEFAULT);
- if (ret) {
- hid_err(dev, "hw start failed\n");
- return ret;
- }
-
- return 0;
-}
static const struct hid_device_id redragon_devices[] = {
{HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_REDRAGON_ASURA)},
{}
@@ -77,8 +54,7 @@ MODULE_DEVICE_TABLE(hid, redragon_devices);
static struct hid_driver redragon_driver = {
.name = "redragon",
.id_table = redragon_devices,
- .report_fixup = redragon_report_fixup,
- .probe = redragon_probe
+ .report_fixup = redragon_report_fixup
};
module_hid_driver(redragon_driver);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index e475c5073c99..9671a4bad643 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1353,7 +1353,7 @@ static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
char *name;
int ret;
- sc->touchpad = input_allocate_device();
+ sc->touchpad = devm_input_allocate_device(&sc->hdev->dev);
if (!sc->touchpad)
return -ENOMEM;
@@ -1370,11 +1370,9 @@ static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
* DS4 compatible non-Sony devices with different names.
*/
name_sz = strlen(sc->hdev->name) + sizeof(DS4_TOUCHPAD_SUFFIX);
- name = kzalloc(name_sz, GFP_KERNEL);
- if (!name) {
- ret = -ENOMEM;
- goto err;
- }
+ name = devm_kzalloc(&sc->hdev->dev, name_sz, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
snprintf(name, name_sz, "%s" DS4_TOUCHPAD_SUFFIX, sc->hdev->name);
sc->touchpad->name = name;
@@ -1403,34 +1401,13 @@ static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
ret = input_mt_init_slots(sc->touchpad, touch_count, INPUT_MT_POINTER);
if (ret < 0)
- goto err;
+ return ret;
ret = input_register_device(sc->touchpad);
if (ret < 0)
- goto err;
+ return ret;
return 0;
-
-err:
- kfree(sc->touchpad->name);
- sc->touchpad->name = NULL;
-
- input_free_device(sc->touchpad);
- sc->touchpad = NULL;
-
- return ret;
-}
-
-static void sony_unregister_touchpad(struct sony_sc *sc)
-{
- if (!sc->touchpad)
- return;
-
- kfree(sc->touchpad->name);
- sc->touchpad->name = NULL;
-
- input_unregister_device(sc->touchpad);
- sc->touchpad = NULL;
}
static int sony_register_sensors(struct sony_sc *sc)
@@ -1440,7 +1417,7 @@ static int sony_register_sensors(struct sony_sc *sc)
int ret;
int range;
- sc->sensor_dev = input_allocate_device();
+ sc->sensor_dev = devm_input_allocate_device(&sc->hdev->dev);
if (!sc->sensor_dev)
return -ENOMEM;
@@ -1457,11 +1434,9 @@ static int sony_register_sensors(struct sony_sc *sc)
* DS4 compatible non-Sony devices with different names.
*/
name_sz = strlen(sc->hdev->name) + sizeof(SENSOR_SUFFIX);
- name = kzalloc(name_sz, GFP_KERNEL);
- if (!name) {
- ret = -ENOMEM;
- goto err;
- }
+ name = devm_kzalloc(&sc->hdev->dev, name_sz, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
snprintf(name, name_sz, "%s" SENSOR_SUFFIX, sc->hdev->name);
sc->sensor_dev->name = name;
@@ -1503,33 +1478,11 @@ static int sony_register_sensors(struct sony_sc *sc)
ret = input_register_device(sc->sensor_dev);
if (ret < 0)
- goto err;
+ return ret;
return 0;
-
-err:
- kfree(sc->sensor_dev->name);
- sc->sensor_dev->name = NULL;
-
- input_free_device(sc->sensor_dev);
- sc->sensor_dev = NULL;
-
- return ret;
}
-static void sony_unregister_sensors(struct sony_sc *sc)
-{
- if (!sc->sensor_dev)
- return;
-
- kfree(sc->sensor_dev->name);
- sc->sensor_dev->name = NULL;
-
- input_unregister_device(sc->sensor_dev);
- sc->sensor_dev = NULL;
-}
-
-
/*
* Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller
* to "operational". Without this, the ps3 controller will not report any
@@ -1987,25 +1940,6 @@ static int sony_led_blink_set(struct led_classdev *led, unsigned long *delay_on,
return 0;
}
-static void sony_leds_remove(struct sony_sc *sc)
-{
- struct led_classdev *led;
- int n;
-
- BUG_ON(!(sc->quirks & SONY_LED_SUPPORT));
-
- for (n = 0; n < sc->led_count; n++) {
- led = sc->leds[n];
- sc->leds[n] = NULL;
- if (!led)
- continue;
- led_classdev_unregister(led);
- kfree(led);
- }
-
- sc->led_count = 0;
-}
-
static int sony_leds_init(struct sony_sc *sc)
{
struct hid_device *hdev = sc->hdev;
@@ -2078,11 +2012,10 @@ static int sony_leds_init(struct sony_sc *sc)
if (use_ds4_names)
name_sz = strlen(dev_name(&hdev->dev)) + strlen(ds4_name_str[n]) + 2;
- led = kzalloc(sizeof(struct led_classdev) + name_sz, GFP_KERNEL);
+ led = devm_kzalloc(&hdev->dev, sizeof(struct led_classdev) + name_sz, GFP_KERNEL);
if (!led) {
hid_err(hdev, "Couldn't allocate memory for LED %d\n", n);
- ret = -ENOMEM;
- goto error_leds;
+ return -ENOMEM;
}
name = (void *)(&led[1]);
@@ -2103,21 +2036,14 @@ static int sony_leds_init(struct sony_sc *sc)
sc->leds[n] = led;
- ret = led_classdev_register(&hdev->dev, led);
+ ret = devm_led_classdev_register(&hdev->dev, led);
if (ret) {
hid_err(hdev, "Failed to register LED %d\n", n);
- sc->leds[n] = NULL;
- kfree(led);
- goto error_leds;
+ return ret;
}
}
- return ret;
-
-error_leds:
- sony_leds_remove(sc);
-
- return ret;
+ return 0;
}
static void sixaxis_send_output_report(struct sony_sc *sc)
@@ -2276,16 +2202,20 @@ static int sony_allocate_output_report(struct sony_sc *sc)
if ((sc->quirks & SIXAXIS_CONTROLLER) ||
(sc->quirks & NAVIGATION_CONTROLLER))
sc->output_report_dmabuf =
- kmalloc(sizeof(union sixaxis_output_report_01),
+ devm_kmalloc(&sc->hdev->dev,
+ sizeof(union sixaxis_output_report_01),
GFP_KERNEL);
else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
- sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x11_SIZE,
+ sc->output_report_dmabuf = devm_kmalloc(&sc->hdev->dev,
+ DS4_OUTPUT_REPORT_0x11_SIZE,
GFP_KERNEL);
else if (sc->quirks & (DUALSHOCK4_CONTROLLER_USB | DUALSHOCK4_DONGLE))
- sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x05_SIZE,
+ sc->output_report_dmabuf = devm_kmalloc(&sc->hdev->dev,
+ DS4_OUTPUT_REPORT_0x05_SIZE,
GFP_KERNEL);
else if (sc->quirks & MOTION_CONTROLLER)
- sc->output_report_dmabuf = kmalloc(MOTION_REPORT_0x02_SIZE,
+ sc->output_report_dmabuf = devm_kmalloc(&sc->hdev->dev,
+ MOTION_REPORT_0x02_SIZE,
GFP_KERNEL);
else
return 0;
@@ -2392,36 +2322,21 @@ static int sony_battery_probe(struct sony_sc *sc, int append_dev_id)
sc->battery_desc.get_property = sony_battery_get_property;
sc->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY;
sc->battery_desc.use_for_apm = 0;
- sc->battery_desc.name = kasprintf(GFP_KERNEL, battery_str_fmt,
- sc->mac_address, sc->device_id);
+ sc->battery_desc.name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ battery_str_fmt, sc->mac_address, sc->device_id);
if (!sc->battery_desc.name)
return -ENOMEM;
- sc->battery = power_supply_register(&hdev->dev, &sc->battery_desc,
+ sc->battery = devm_power_supply_register(&hdev->dev, &sc->battery_desc,
&psy_cfg);
if (IS_ERR(sc->battery)) {
ret = PTR_ERR(sc->battery);
hid_err(hdev, "Unable to register battery device\n");
- goto err_free;
+ return ret;
}
power_supply_powers(sc->battery, &hdev->dev);
return 0;
-
-err_free:
- kfree(sc->battery_desc.name);
- sc->battery_desc.name = NULL;
- return ret;
-}
-
-static void sony_battery_remove(struct sony_sc *sc)
-{
- if (!sc->battery_desc.name)
- return;
-
- power_supply_unregister(sc->battery);
- kfree(sc->battery_desc.name);
- sc->battery_desc.name = NULL;
}
/*
@@ -2879,16 +2794,7 @@ err_stop:
device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
if (sc->hw_version)
device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
- if (sc->quirks & SONY_LED_SUPPORT)
- sony_leds_remove(sc);
- if (sc->quirks & SONY_BATTERY_SUPPORT)
- sony_battery_remove(sc);
- if (sc->touchpad)
- sony_unregister_touchpad(sc);
- if (sc->sensor_dev)
- sony_unregister_sensors(sc);
sony_cancel_work_sync(sc);
- kfree(sc->output_report_dmabuf);
sony_remove_dev_list(sc);
sony_release_device_id(sc);
hid_hw_stop(hdev);
@@ -2965,18 +2871,6 @@ static void sony_remove(struct hid_device *hdev)
hid_hw_close(hdev);
- if (sc->quirks & SONY_LED_SUPPORT)
- sony_leds_remove(sc);
-
- if (sc->quirks & SONY_BATTERY_SUPPORT)
- sony_battery_remove(sc);
-
- if (sc->touchpad)
- sony_unregister_touchpad(sc);
-
- if (sc->sensor_dev)
- sony_unregister_sensors(sc);
-
if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
@@ -2988,8 +2882,6 @@ static void sony_remove(struct hid_device *hdev)
sony_cancel_work_sync(sc);
- kfree(sc->output_report_dmabuf);
-
sony_remove_dev_list(sc);
sony_release_device_id(sc);
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 579884ebd94d..7780da4fe897 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -455,6 +455,12 @@ static __u8 wiimote_cmd_read_ext(struct wiimote_data *wdata, __u8 *rmem)
return WIIMOTE_EXT_BALANCE_BOARD;
if (rmem[4] == 0x01 && rmem[5] == 0x20)
return WIIMOTE_EXT_PRO_CONTROLLER;
+ if (rmem[0] == 0x01 && rmem[1] == 0x00 &&
+ rmem[4] == 0x01 && rmem[5] == 0x03)
+ return WIIMOTE_EXT_DRUMS;
+ if (rmem[0] == 0x00 && rmem[1] == 0x00 &&
+ rmem[4] == 0x01 && rmem[5] == 0x03)
+ return WIIMOTE_EXT_GUITAR;
return WIIMOTE_EXT_UNKNOWN;
}
@@ -488,6 +494,8 @@ static bool wiimote_cmd_map_mp(struct wiimote_data *wdata, __u8 exttype)
/* map MP with correct pass-through mode */
switch (exttype) {
case WIIMOTE_EXT_CLASSIC_CONTROLLER:
+ case WIIMOTE_EXT_DRUMS:
+ case WIIMOTE_EXT_GUITAR:
wmem = 0x07;
break;
case WIIMOTE_EXT_NUNCHUK:
@@ -1075,6 +1083,8 @@ static const char *wiimote_exttype_names[WIIMOTE_EXT_NUM] = {
[WIIMOTE_EXT_CLASSIC_CONTROLLER] = "Nintendo Wii Classic Controller",
[WIIMOTE_EXT_BALANCE_BOARD] = "Nintendo Wii Balance Board",
[WIIMOTE_EXT_PRO_CONTROLLER] = "Nintendo Wii U Pro Controller",
+ [WIIMOTE_EXT_DRUMS] = "Nintendo Wii Drums",
+ [WIIMOTE_EXT_GUITAR] = "Nintendo Wii Guitar",
};
/*
@@ -1660,6 +1670,10 @@ static ssize_t wiimote_ext_show(struct device *dev,
return sprintf(buf, "balanceboard\n");
case WIIMOTE_EXT_PRO_CONTROLLER:
return sprintf(buf, "procontroller\n");
+ case WIIMOTE_EXT_DRUMS:
+ return sprintf(buf, "drums\n");
+ case WIIMOTE_EXT_GUITAR:
+ return sprintf(buf, "guitar\n");
case WIIMOTE_EXT_UNKNOWN:
/* fallthrough */
default:
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index c830ed39348f..aa72eb9a8e2f 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -1950,6 +1950,444 @@ static const struct wiimod_ops wiimod_pro = {
};
/*
+ * Drums
+ * Guitar-Hero, Rock-Band and other games came bundled with drums which can
+ * be plugged as extension to a Wiimote. Drum-reports are still not entirely
+ * figured out, but the most important information is known.
+ * We create a separate device for drums and report all information via this
+ * input device.
+ */
+
+static inline void wiimod_drums_report_pressure(struct wiimote_data *wdata,
+ __u8 none, __u8 which,
+ __u8 pressure, __u8 onoff,
+ __u8 *store, __u16 code,
+ __u8 which_code)
+{
+ static const __u8 default_pressure = 3;
+
+ if (!none && which == which_code) {
+ *store = pressure;
+ input_report_abs(wdata->extension.input, code, *store);
+ } else if (onoff != !!*store) {
+ *store = onoff ? default_pressure : 0;
+ input_report_abs(wdata->extension.input, code, *store);
+ }
+}
+
+static void wiimod_drums_in_ext(struct wiimote_data *wdata, const __u8 *ext)
+{
+ __u8 pressure, which, none, hhp, sx, sy;
+ __u8 o, r, y, g, b, bass, bm, bp;
+
+ /* Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | 0 | 0 | SX <5:0> |
+ * 2 | 0 | 0 | SY <5:0> |
+ * -----+-----+-----+-----------------------------+-----+
+ * 3 | HPP | NON | WHICH <5:1> | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 4 | SOFT <7:5> | 0 | 1 | 1 | 0 | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | ? | 1 | 1 | B- | 1 | B+ | 1 | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | O | R | Y | G | B | BSS | 1 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * All buttons are 0 if pressed
+ *
+ * With Motion+ enabled, the following bits will get invalid:
+ * Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | 0 | 0 | SX <5:1> |XXXXX|
+ * 2 | 0 | 0 | SY <5:1> |XXXXX|
+ * -----+-----+-----+-----------------------------+-----+
+ * 3 | HPP | NON | WHICH <5:1> | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 4 | SOFT <7:5> | 0 | 1 | 1 | 0 | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | ? | 1 | 1 | B- | 1 | B+ | 1 |XXXXX|
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | O | R | Y | G | B | BSS |XXXXX|XXXXX|
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ */
+
+ pressure = 7 - (ext[3] >> 5);
+ which = (ext[2] >> 1) & 0x1f;
+ none = !!(ext[2] & 0x40);
+ hhp = !(ext[2] & 0x80);
+ sx = ext[0] & 0x3f;
+ sy = ext[1] & 0x3f;
+ o = !(ext[5] & 0x80);
+ r = !(ext[5] & 0x40);
+ y = !(ext[5] & 0x20);
+ g = !(ext[5] & 0x10);
+ b = !(ext[5] & 0x08);
+ bass = !(ext[5] & 0x04);
+ bm = !(ext[4] & 0x10);
+ bp = !(ext[4] & 0x04);
+
+ if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) {
+ sx &= 0x3e;
+ sy &= 0x3e;
+ }
+
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ o, &wdata->state.pressure_drums[0],
+ ABS_HAT2Y, 0x0e);
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ r, &wdata->state.pressure_drums[1],
+ ABS_HAT0X, 0x19);
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ y, &wdata->state.pressure_drums[2],
+ ABS_HAT2X, 0x11);
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ g, &wdata->state.pressure_drums[3],
+ ABS_HAT1X, 0x12);
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ b, &wdata->state.pressure_drums[4],
+ ABS_HAT0Y, 0x0f);
+
+ /* Bass shares pressure with hi-hat (set via hhp) */
+ wiimod_drums_report_pressure(wdata, none, hhp ? 0xff : which, pressure,
+ bass, &wdata->state.pressure_drums[5],
+ ABS_HAT3X, 0x1b);
+ /* Hi-hat has no on/off values, just pressure. Force to off/0. */
+ wiimod_drums_report_pressure(wdata, none, hhp ? which : 0xff, pressure,
+ 0, &wdata->state.pressure_drums[6],
+ ABS_HAT3Y, 0x0e);
+
+ input_report_abs(wdata->extension.input, ABS_X, sx - 0x20);
+ input_report_abs(wdata->extension.input, ABS_Y, sy - 0x20);
+
+ input_report_key(wdata->extension.input, BTN_START, bp);
+ input_report_key(wdata->extension.input, BTN_SELECT, bm);
+
+ input_sync(wdata->extension.input);
+}
+
+static int wiimod_drums_open(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags |= WIIPROTO_FLAG_EXT_USED;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+}
+
+static void wiimod_drums_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags &= ~WIIPROTO_FLAG_EXT_USED;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+}
+
+static int wiimod_drums_probe(const struct wiimod_ops *ops,
+ struct wiimote_data *wdata)
+{
+ int ret;
+
+ wdata->extension.input = input_allocate_device();
+ if (!wdata->extension.input)
+ return -ENOMEM;
+
+ input_set_drvdata(wdata->extension.input, wdata);
+ wdata->extension.input->open = wiimod_drums_open;
+ wdata->extension.input->close = wiimod_drums_close;
+ wdata->extension.input->dev.parent = &wdata->hdev->dev;
+ wdata->extension.input->id.bustype = wdata->hdev->bus;
+ wdata->extension.input->id.vendor = wdata->hdev->vendor;
+ wdata->extension.input->id.product = wdata->hdev->product;
+ wdata->extension.input->id.version = wdata->hdev->version;
+ wdata->extension.input->name = WIIMOTE_NAME " Drums";
+
+ set_bit(EV_KEY, wdata->extension.input->evbit);
+ set_bit(BTN_START, wdata->extension.input->keybit);
+ set_bit(BTN_SELECT, wdata->extension.input->keybit);
+
+ set_bit(EV_ABS, wdata->extension.input->evbit);
+ set_bit(ABS_X, wdata->extension.input->absbit);
+ set_bit(ABS_Y, wdata->extension.input->absbit);
+ set_bit(ABS_HAT0X, wdata->extension.input->absbit);
+ set_bit(ABS_HAT0Y, wdata->extension.input->absbit);
+ set_bit(ABS_HAT1X, wdata->extension.input->absbit);
+ set_bit(ABS_HAT2X, wdata->extension.input->absbit);
+ set_bit(ABS_HAT2Y, wdata->extension.input->absbit);
+ set_bit(ABS_HAT3X, wdata->extension.input->absbit);
+ set_bit(ABS_HAT3Y, wdata->extension.input->absbit);
+ input_set_abs_params(wdata->extension.input,
+ ABS_X, -32, 31, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_Y, -32, 31, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT0X, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT0Y, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT1X, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT2X, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT2Y, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT3X, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT3Y, 0, 7, 0, 0);
+
+ ret = input_register_device(wdata->extension.input);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ input_free_device(wdata->extension.input);
+ wdata->extension.input = NULL;
+ return ret;
+}
+
+static void wiimod_drums_remove(const struct wiimod_ops *ops,
+ struct wiimote_data *wdata)
+{
+ if (!wdata->extension.input)
+ return;
+
+ input_unregister_device(wdata->extension.input);
+ wdata->extension.input = NULL;
+}
+
+static const struct wiimod_ops wiimod_drums = {
+ .flags = 0,
+ .arg = 0,
+ .probe = wiimod_drums_probe,
+ .remove = wiimod_drums_remove,
+ .in_ext = wiimod_drums_in_ext,
+};
+
+/*
+ * Guitar
+ * Guitar-Hero, Rock-Band and other games came bundled with guitars which can
+ * be plugged as extension to a Wiimote.
+ * We create a separate device for guitars and report all information via this
+ * input device.
+ */
+
+enum wiimod_guitar_keys {
+ WIIMOD_GUITAR_KEY_G,
+ WIIMOD_GUITAR_KEY_R,
+ WIIMOD_GUITAR_KEY_Y,
+ WIIMOD_GUITAR_KEY_B,
+ WIIMOD_GUITAR_KEY_O,
+ WIIMOD_GUITAR_KEY_UP,
+ WIIMOD_GUITAR_KEY_DOWN,
+ WIIMOD_GUITAR_KEY_PLUS,
+ WIIMOD_GUITAR_KEY_MINUS,
+ WIIMOD_GUITAR_KEY_NUM,
+};
+
+static const __u16 wiimod_guitar_map[] = {
+ BTN_1, /* WIIMOD_GUITAR_KEY_G */
+ BTN_2, /* WIIMOD_GUITAR_KEY_R */
+ BTN_3, /* WIIMOD_GUITAR_KEY_Y */
+ BTN_4, /* WIIMOD_GUITAR_KEY_B */
+ BTN_5, /* WIIMOD_GUITAR_KEY_O */
+ BTN_DPAD_UP, /* WIIMOD_GUITAR_KEY_UP */
+ BTN_DPAD_DOWN, /* WIIMOD_GUITAR_KEY_DOWN */
+ BTN_START, /* WIIMOD_GUITAR_KEY_PLUS */
+ BTN_SELECT, /* WIIMOD_GUITAR_KEY_MINUS */
+};
+
+static void wiimod_guitar_in_ext(struct wiimote_data *wdata, const __u8 *ext)
+{
+ __u8 sx, sy, tb, wb, bd, bm, bp, bo, br, bb, bg, by, bu;
+
+ /* Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | 0 | 0 | SX <5:0> |
+ * 2 | 0 | 0 | SY <5:0> |
+ * -----+-----+-----+-----+-----------------------------+
+ * 3 | 0 | 0 | 0 | TB <4:0> |
+ * -----+-----+-----+-----+-----------------------------+
+ * 4 | 0 | 0 | 0 | WB <4:0> |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | 1 | BD | 1 | B- | 1 | B+ | 1 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | BO | BR | BB | BG | BY | 1 | 1 | BU |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * All buttons are 0 if pressed
+ *
+ * With Motion+ enabled, it will look like this:
+ * Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | 0 | 0 | SX <5:1> | BU |
+ * 2 | 0 | 0 | SY <5:1> | 1 |
+ * -----+-----+-----+-----+-----------------------+-----+
+ * 3 | 0 | 0 | 0 | TB <4:0> |
+ * -----+-----+-----+-----+-----------------------------+
+ * 4 | 0 | 0 | 0 | WB <4:0> |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | 1 | BD | 1 | B- | 1 | B+ | 1 |XXXXX|
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | BO | BR | BB | BG | BY | 1 |XXXXX|XXXXX|
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ */
+
+ sx = ext[0] & 0x3f;
+ sy = ext[1] & 0x3f;
+ tb = ext[2] & 0x1f;
+ wb = ext[3] & 0x1f;
+ bd = !(ext[4] & 0x40);
+ bm = !(ext[4] & 0x10);
+ bp = !(ext[4] & 0x04);
+ bo = !(ext[5] & 0x80);
+ br = !(ext[5] & 0x40);
+ bb = !(ext[5] & 0x20);
+ bg = !(ext[5] & 0x10);
+ by = !(ext[5] & 0x08);
+ bu = !(ext[5] & 0x01);
+
+ if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) {
+ bu = !(ext[0] & 0x01);
+ sx &= 0x3e;
+ sy &= 0x3e;
+ }
+
+ input_report_abs(wdata->extension.input, ABS_X, sx - 0x20);
+ input_report_abs(wdata->extension.input, ABS_Y, sy - 0x20);
+ input_report_abs(wdata->extension.input, ABS_HAT0X, tb);
+ input_report_abs(wdata->extension.input, ABS_HAT1X, wb - 0x10);
+
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_G],
+ bg);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_R],
+ br);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_Y],
+ by);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_B],
+ bb);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_O],
+ bo);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_UP],
+ bu);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_DOWN],
+ bd);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_PLUS],
+ bp);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_MINUS],
+ bm);
+
+ input_sync(wdata->extension.input);
+}
+
+static int wiimod_guitar_open(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags |= WIIPROTO_FLAG_EXT_USED;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+}
+
+static void wiimod_guitar_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags &= ~WIIPROTO_FLAG_EXT_USED;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+}
+
+static int wiimod_guitar_probe(const struct wiimod_ops *ops,
+ struct wiimote_data *wdata)
+{
+ int ret, i;
+
+ wdata->extension.input = input_allocate_device();
+ if (!wdata->extension.input)
+ return -ENOMEM;
+
+ input_set_drvdata(wdata->extension.input, wdata);
+ wdata->extension.input->open = wiimod_guitar_open;
+ wdata->extension.input->close = wiimod_guitar_close;
+ wdata->extension.input->dev.parent = &wdata->hdev->dev;
+ wdata->extension.input->id.bustype = wdata->hdev->bus;
+ wdata->extension.input->id.vendor = wdata->hdev->vendor;
+ wdata->extension.input->id.product = wdata->hdev->product;
+ wdata->extension.input->id.version = wdata->hdev->version;
+ wdata->extension.input->name = WIIMOTE_NAME " Guitar";
+
+ set_bit(EV_KEY, wdata->extension.input->evbit);
+ for (i = 0; i < WIIMOD_GUITAR_KEY_NUM; ++i)
+ set_bit(wiimod_guitar_map[i],
+ wdata->extension.input->keybit);
+
+ set_bit(EV_ABS, wdata->extension.input->evbit);
+ set_bit(ABS_X, wdata->extension.input->absbit);
+ set_bit(ABS_Y, wdata->extension.input->absbit);
+ set_bit(ABS_HAT0X, wdata->extension.input->absbit);
+ set_bit(ABS_HAT1X, wdata->extension.input->absbit);
+ input_set_abs_params(wdata->extension.input,
+ ABS_X, -32, 31, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_Y, -32, 31, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT0X, 0, 0x1f, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT1X, 0, 0x0f, 1, 1);
+
+ ret = input_register_device(wdata->extension.input);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ input_free_device(wdata->extension.input);
+ wdata->extension.input = NULL;
+ return ret;
+}
+
+static void wiimod_guitar_remove(const struct wiimod_ops *ops,
+ struct wiimote_data *wdata)
+{
+ if (!wdata->extension.input)
+ return;
+
+ input_unregister_device(wdata->extension.input);
+ wdata->extension.input = NULL;
+}
+
+static const struct wiimod_ops wiimod_guitar = {
+ .flags = 0,
+ .arg = 0,
+ .probe = wiimod_guitar_probe,
+ .remove = wiimod_guitar_remove,
+ .in_ext = wiimod_guitar_in_ext,
+};
+
+/*
* Builtin Motion Plus
* This module simply sets the WIIPROTO_FLAG_BUILTIN_MP protocol flag which
* disables polling for Motion-Plus. This should be set only for devices which
@@ -2201,4 +2639,6 @@ const struct wiimod_ops *wiimod_ext_table[WIIMOTE_EXT_NUM] = {
[WIIMOTE_EXT_CLASSIC_CONTROLLER] = &wiimod_classic,
[WIIMOTE_EXT_BALANCE_BOARD] = &wiimod_bboard,
[WIIMOTE_EXT_PRO_CONTROLLER] = &wiimod_pro,
+ [WIIMOTE_EXT_DRUMS] = &wiimod_drums,
+ [WIIMOTE_EXT_GUITAR] = &wiimod_guitar,
};
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
index 510ca77fe14e..3bf3d3cc1c38 100644
--- a/drivers/hid/hid-wiimote.h
+++ b/drivers/hid/hid-wiimote.h
@@ -89,6 +89,8 @@ enum wiimote_exttype {
WIIMOTE_EXT_CLASSIC_CONTROLLER,
WIIMOTE_EXT_BALANCE_BOARD,
WIIMOTE_EXT_PRO_CONTROLLER,
+ WIIMOTE_EXT_DRUMS,
+ WIIMOTE_EXT_GUITAR,
WIIMOTE_EXT_NUM,
};
@@ -137,6 +139,7 @@ struct wiimote_state {
/* calibration/cache data */
__u16 calib_bboard[4][3];
__s16 calib_pro_sticks[4];
+ __u8 pressure_drums[7];
__u8 cache_rumble;
};
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index c1652bb7bd15..2ce194a84868 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -484,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
return;
}
- if ((ret_size > size) || (ret_size <= 2)) {
+ if ((ret_size > size) || (ret_size < 2)) {
dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
__func__, size, ret_size);
return;
@@ -1002,18 +1002,18 @@ static int i2c_hid_probe(struct i2c_client *client,
return client->irq;
}
- ihid = kzalloc(sizeof(struct i2c_hid), GFP_KERNEL);
+ ihid = devm_kzalloc(&client->dev, sizeof(*ihid), GFP_KERNEL);
if (!ihid)
return -ENOMEM;
if (client->dev.of_node) {
ret = i2c_hid_of_probe(client, &ihid->pdata);
if (ret)
- goto err;
+ return ret;
} else if (!platform_data) {
ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
if (ret)
- goto err;
+ return ret;
} else {
ihid->pdata = *platform_data;
}
@@ -1021,21 +1021,20 @@ static int i2c_hid_probe(struct i2c_client *client,
/* Parse platform agnostic common properties from ACPI / device tree */
i2c_hid_fwnode_probe(client, &ihid->pdata);
- ihid->pdata.supply = devm_regulator_get(&client->dev, "vdd");
- if (IS_ERR(ihid->pdata.supply)) {
- ret = PTR_ERR(ihid->pdata.supply);
- if (ret != -EPROBE_DEFER)
- dev_err(&client->dev, "Failed to get regulator: %d\n",
- ret);
- goto err;
- }
+ ihid->pdata.supplies[0].supply = "vdd";
+ ihid->pdata.supplies[1].supply = "vddl";
+
+ ret = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(ihid->pdata.supplies),
+ ihid->pdata.supplies);
+ if (ret)
+ return ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
+ ihid->pdata.supplies);
+ if (ret < 0)
+ return ret;
- ret = regulator_enable(ihid->pdata.supply);
- if (ret < 0) {
- dev_err(&client->dev, "Failed to enable regulator: %d\n",
- ret);
- goto err;
- }
if (ihid->pdata.post_power_delay_ms)
msleep(ihid->pdata.post_power_delay_ms);
@@ -1122,11 +1121,9 @@ err_pm:
pm_runtime_disable(&client->dev);
err_regulator:
- regulator_disable(ihid->pdata.supply);
-
-err:
+ regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
+ ihid->pdata.supplies);
i2c_hid_free_buffers(ihid);
- kfree(ihid);
return ret;
}
@@ -1148,9 +1145,8 @@ static int i2c_hid_remove(struct i2c_client *client)
if (ihid->bufsize)
i2c_hid_free_buffers(ihid);
- regulator_disable(ihid->pdata.supply);
-
- kfree(ihid);
+ regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
+ ihid->pdata.supplies);
return 0;
}
@@ -1201,9 +1197,8 @@ static int i2c_hid_suspend(struct device *dev)
hid_warn(hid, "Failed to enable irq wake: %d\n",
wake_status);
} else {
- ret = regulator_disable(ihid->pdata.supply);
- if (ret < 0)
- hid_warn(hid, "Failed to disable supply: %d\n", ret);
+ regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
+ ihid->pdata.supplies);
}
return 0;
@@ -1218,9 +1213,11 @@ static int i2c_hid_resume(struct device *dev)
int wake_status;
if (!device_may_wakeup(&client->dev)) {
- ret = regulator_enable(ihid->pdata.supply);
- if (ret < 0)
- hid_warn(hid, "Failed to enable supply: %d\n", ret);
+ ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
+ ihid->pdata.supplies);
+ if (ret)
+ hid_warn(hid, "Failed to enable supplies: %d\n", ret);
+
if (ihid->pdata.post_power_delay_ms)
msleep(ihid->pdata.post_power_delay_ms);
} else if (ihid->irq_wake_enabled) {
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 9a60ec13cb10..bfbca7ec54ce 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -907,8 +907,9 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
struct ishtp_device *dev;
int i;
- dev = kzalloc(sizeof(struct ishtp_device) + sizeof(struct ish_hw),
- GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev,
+ sizeof(struct ishtp_device) + sizeof(struct ish_hw),
+ GFP_KERNEL);
if (!dev)
return NULL;
@@ -925,7 +926,9 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
for (i = 0; i < IPC_TX_FIFO_SIZE; ++i) {
struct wr_msg_ctl_info *tx_buf;
- tx_buf = kzalloc(sizeof(struct wr_msg_ctl_info), GFP_KERNEL);
+ tx_buf = devm_kzalloc(&pdev->dev,
+ sizeof(struct wr_msg_ctl_info),
+ GFP_KERNEL);
if (!tx_buf) {
/*
* IPC buffers may be limited or not available
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index a2c53ea3b5ed..050f9872f5c0 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -95,6 +95,13 @@ static int ish_init(struct ishtp_device *dev)
return 0;
}
+static const struct pci_device_id ish_invalid_pci_ids[] = {
+ /* Mehlow platform special pci ids */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xA309)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xA30A)},
+ {}
+};
+
/**
* ish_probe() - PCI driver probe callback
* @pdev: pci device
@@ -110,6 +117,10 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ish_hw *hw;
int ret;
+ /* Check for invalid platforms for ISH support */
+ if (pci_dev_present(ish_invalid_pci_ids))
+ return -ENODEV;
+
/* enable pci dev */
ret = pci_enable_device(pdev);
if (ret) {
@@ -172,7 +183,6 @@ free_irq:
free_irq(pdev->irq, dev);
free_device:
pci_iounmap(pdev, hw->mem_addr);
- kfree(dev);
release_regions:
pci_release_regions(pdev);
disable_device:
@@ -202,7 +212,6 @@ static void ish_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_clear_master(pdev);
pci_disable_device(pdev);
- kfree(ishtp_dev);
}
static struct device __maybe_unused *ish_resume_device;
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index ae4a69f7f2f4..8b5dd580ceec 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -298,7 +298,6 @@ int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
struct ishtp_msg_hdr *ishtp_hdr = &hdr;
const size_t len = sizeof(struct hbm_flow_control);
int rv;
- unsigned int num_frags;
unsigned long flags;
spin_lock_irqsave(&cl->fc_spinlock, flags);
@@ -314,7 +313,6 @@ int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
return 0;
}
- num_frags = cl->recv_msg_num_frags;
cl->recv_msg_num_frags = 0;
rv = ishtp_write_message(dev, ishtp_hdr, data);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index af0e0d061b15..11103efebbaa 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -480,6 +480,7 @@ static void hid_ctrl(struct urb *urb)
{
struct hid_device *hid = urb->context;
struct usbhid_device *usbhid = hid->driver_data;
+ unsigned long flags;
int unplug = 0, status = urb->status;
switch (status) {
@@ -501,7 +502,7 @@ static void hid_ctrl(struct urb *urb)
hid_warn(urb->dev, "ctrl urb status %d received\n", status);
}
- spin_lock(&usbhid->lock);
+ spin_lock_irqsave(&usbhid->lock, flags);
if (unplug) {
usbhid->ctrltail = usbhid->ctrlhead;
@@ -511,13 +512,13 @@ static void hid_ctrl(struct urb *urb)
if (usbhid->ctrlhead != usbhid->ctrltail &&
hid_submit_ctrl(hid) == 0) {
/* Successfully submitted next urb in queue */
- spin_unlock(&usbhid->lock);
+ spin_unlock_irqrestore(&usbhid->lock, flags);
return;
}
}
clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- spin_unlock(&usbhid->lock);
+ spin_unlock_irqrestore(&usbhid->lock, flags);
usb_autopm_put_interface_async(usbhid->intf);
wake_up(&usbhid->wait);
}
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index e3ce233f8bdc..23872d08308c 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -36,6 +36,7 @@
#include <linux/hiddev.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
+#include <linux/nospec.h>
#include "usbhid.h"
#ifdef CONFIG_USB_DYNAMIC_MINORS
@@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
if (uref->field_index >= report->maxfield)
goto inval;
+ uref->field_index = array_index_nospec(uref->field_index,
+ report->maxfield);
field = report->field[uref->field_index];
if (uref->usage_index >= field->maxusage)
goto inval;
+ uref->usage_index = array_index_nospec(uref->usage_index,
+ field->maxusage);
uref->usage_code = field->usage[uref->usage_index].hid;
@@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
if (uref->field_index >= report->maxfield)
goto inval;
+ uref->field_index = array_index_nospec(uref->field_index,
+ report->maxfield);
field = report->field[uref->field_index];
@@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (finfo.field_index >= report->maxfield)
break;
+ finfo.field_index = array_index_nospec(finfo.field_index,
+ report->maxfield);
field = report->field[finfo.field_index];
memset(&finfo, 0, sizeof(finfo));
@@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (cinfo.index >= hid->maxcollection)
break;
+ cinfo.index = array_index_nospec(cinfo.index,
+ hid->maxcollection);
cinfo.type = hid->collection[cinfo.index].type;
cinfo.usage = hid->collection[cinfo.index].usage;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index d6797535fff9..0bdd85d486fe 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -210,6 +210,57 @@ static int wacom_calc_hid_res(int logical_extents, int physical_extents,
return hidinput_calc_abs_res(&field, ABS_X);
}
+static void wacom_hid_usage_quirk(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_features *features = &wacom->wacom_wac.features;
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ /*
+ * The Dell Canvas 27 needs to be switched to its vendor-defined
+ * report to provide the best resolution.
+ */
+ if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+ hdev->product == 0x4200 &&
+ field->application == HID_UP_MSVENDOR) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 2;
+ }
+
+ /*
+ * ISDv4 devices which predate HID's adoption of the
+ * HID_DG_BARELSWITCH2 usage use 0x000D0000 in its
+ * position instead. We can accurately detect if a
+ * usage with that value should be HID_DG_BARRELSWITCH2
+ * based on the surrounding usages, which have remained
+ * constant across generations.
+ */
+ if (features->type == HID_GENERIC &&
+ usage->hid == 0x000D0000 &&
+ field->application == HID_DG_PEN &&
+ field->physical == HID_DG_STYLUS) {
+ int i = usage->usage_index;
+
+ if (i-4 >= 0 && i+1 < field->maxusage &&
+ field->usage[i-4].hid == HID_DG_TIPSWITCH &&
+ field->usage[i-3].hid == HID_DG_BARRELSWITCH &&
+ field->usage[i-2].hid == HID_DG_ERASER &&
+ field->usage[i-1].hid == HID_DG_INVERT &&
+ field->usage[i+1].hid == HID_DG_INRANGE) {
+ usage->hid = HID_DG_BARRELSWITCH2;
+ }
+ }
+
+ /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
+ if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+ hdev->product == 0x0358 &&
+ WACOM_PEN_FIELD(field) &&
+ equivalent_usage == HID_GD_Y) {
+ field->logical_maximum = 43200;
+ }
+}
+
static void wacom_feature_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
@@ -221,6 +272,8 @@ static void wacom_feature_mapping(struct hid_device *hdev,
int ret;
u32 n;
+ wacom_hid_usage_quirk(hdev, field, usage);
+
switch (equivalent_usage) {
case HID_DG_CONTACTMAX:
/* leave touch_max as is if predefined */
@@ -300,13 +353,6 @@ static void wacom_feature_mapping(struct hid_device *hdev,
kfree(data);
break;
}
-
- if (hdev->vendor == USB_VENDOR_ID_WACOM &&
- hdev->product == 0x4200 /* Dell Canvas 27 */ &&
- field->application == HID_UP_MSVENDOR) {
- wacom->wacom_wac.mode_report = field->report->id;
- wacom->wacom_wac.mode_value = 2;
- }
}
/*
@@ -348,6 +394,7 @@ static void wacom_usage_mapping(struct hid_device *hdev,
struct wacom_features *features = &wacom->wacom_wac.features;
bool finger = WACOM_FINGER_FIELD(field);
bool pen = WACOM_PEN_FIELD(field);
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
/*
* Requiring Stylus Usage will ignore boot mouse
@@ -361,49 +408,9 @@ static void wacom_usage_mapping(struct hid_device *hdev,
else
return;
- /*
- * Bamboo models do not support HID_DG_CONTACTMAX.
- * And, Bamboo Pen only descriptor contains touch.
- */
- if (features->type > BAMBOO_PT) {
- /* ISDv4 touch devices at least supports one touch point */
- if (finger && !features->touch_max)
- features->touch_max = 1;
- }
-
- /*
- * ISDv4 devices which predate HID's adoption of the
- * HID_DG_BARELSWITCH2 usage use 0x000D0000 in its
- * position instead. We can accurately detect if a
- * usage with that value should be HID_DG_BARRELSWITCH2
- * based on the surrounding usages, which have remained
- * constant across generations.
- */
- if (features->type == HID_GENERIC &&
- usage->hid == 0x000D0000 &&
- field->application == HID_DG_PEN &&
- field->physical == HID_DG_STYLUS) {
- int i = usage->usage_index;
+ wacom_hid_usage_quirk(hdev, field, usage);
- if (i-4 >= 0 && i+1 < field->maxusage &&
- field->usage[i-4].hid == HID_DG_TIPSWITCH &&
- field->usage[i-3].hid == HID_DG_BARRELSWITCH &&
- field->usage[i-2].hid == HID_DG_ERASER &&
- field->usage[i-1].hid == HID_DG_INVERT &&
- field->usage[i+1].hid == HID_DG_INRANGE) {
- usage->hid = HID_DG_BARRELSWITCH2;
- }
- }
-
- /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
- if (hdev->vendor == USB_VENDOR_ID_WACOM &&
- hdev->product == 0x0358 &&
- WACOM_PEN_FIELD(field) &&
- wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
- field->logical_maximum = 43200;
- }
-
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
features->x_max = field->logical_maximum;
if (finger) {
@@ -703,18 +710,6 @@ struct wacom_hdev_data {
static LIST_HEAD(wacom_udev_list);
static DEFINE_MUTEX(wacom_udev_list_lock);
-static bool compare_device_paths(struct hid_device *hdev_a,
- struct hid_device *hdev_b, char separator)
-{
- int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
- int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
-
- if (n1 != n2 || n1 <= 0 || n2 <= 0)
- return false;
-
- return !strncmp(hdev_a->phys, hdev_b->phys, n1);
-}
-
static bool wacom_are_sibling(struct hid_device *hdev,
struct hid_device *sibling)
{
@@ -737,10 +732,10 @@ static bool wacom_are_sibling(struct hid_device *hdev,
* the same physical parent device path.
*/
if (hdev->vendor == sibling->vendor && hdev->product == sibling->product) {
- if (!compare_device_paths(hdev, sibling, '/'))
+ if (!hid_compare_device_paths(hdev, sibling, '/'))
return false;
} else {
- if (!compare_device_paths(hdev, sibling, '.'))
+ if (!hid_compare_device_paths(hdev, sibling, '.'))
return false;
}
@@ -787,7 +782,7 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
/* Try to find an already-probed interface from the same device */
list_for_each_entry(data, &wacom_udev_list, list) {
- if (compare_device_paths(hdev, data->dev, '/')) {
+ if (hid_compare_device_paths(hdev, data->dev, '/')) {
kref_get(&data->kref);
return data;
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0bb44d0088ed..e0a06be5ef5c 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -3365,8 +3365,14 @@ void wacom_setup_device_quirks(struct wacom *wacom)
if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
features->device_type |= WACOM_DEVICETYPE_PAD;
- features->x_max = 4096;
- features->y_max = 4096;
+ if (features->type == INTUOSHT2) {
+ features->x_max = features->x_max / 10;
+ features->y_max = features->y_max / 10;
+ }
+ else {
+ features->x_max = 4096;
+ features->y_max = 4096;
+ }
}
else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
features->device_type |= WACOM_DEVICETYPE_PAD;
@@ -4351,19 +4357,19 @@ static const struct wacom_features wacom_features_0x5E =
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x90 =
{ "Wacom ISDv4 90", 26202, 16325, 255, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */
static const struct wacom_features wacom_features_0x93 =
{ "Wacom ISDv4 93", 26202, 16325, 255, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 };
static const struct wacom_features wacom_features_0x97 =
{ "Wacom ISDv4 97", 26202, 16325, 511, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */
static const struct wacom_features wacom_features_0x9A =
{ "Wacom ISDv4 9A", 26202, 16325, 255, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 };
static const struct wacom_features wacom_features_0x9F =
{ "Wacom ISDv4 9F", 26202, 16325, 255, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 };
static const struct wacom_features wacom_features_0xE2 =
{ "Wacom ISDv4 E2", 26202, 16325, 255, 0,
TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
@@ -4378,13 +4384,13 @@ static const struct wacom_features wacom_features_0xE6 =
TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xEC =
{ "Wacom ISDv4 EC", 25710, 14500, 255, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */
static const struct wacom_features wacom_features_0xED =
{ "Wacom ISDv4 ED", 26202, 16325, 255, 0,
- TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 };
static const struct wacom_features wacom_features_0xEF =
{ "Wacom ISDv4 EF", 26202, 16325, 255, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */
static const struct wacom_features wacom_features_0x100 =
{ "Wacom ISDv4 100", 26202, 16325, 255, 0,
MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -4402,10 +4408,10 @@ static const struct wacom_features wacom_features_0x10F =
MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x116 =
{ "Wacom ISDv4 116", 26202, 16325, 255, 0,
- TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 };
static const struct wacom_features wacom_features_0x12C =
{ "Wacom ISDv4 12C", 27848, 15752, 2047, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */
static const struct wacom_features wacom_features_0x4001 =
{ "Wacom ISDv4 4001", 26202, 16325, 255, 0,
MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index 7765de2f1ef1..2ada82d2ec8c 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -20,6 +20,7 @@
* 02110-1301 USA
*/
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index ba0a092ae085..741857d80da1 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -29,12 +29,26 @@
#include <linux/hyperv.h>
#include <linux/uio.h>
#include <linux/interrupt.h>
+#include <asm/page.h>
#include "hyperv_vmbus.h"
#define NUM_PAGES_SPANNED(addr, len) \
((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
+static unsigned long virt_to_hvpfn(void *addr)
+{
+ unsigned long paddr;
+
+ if (is_vmalloc_addr(addr))
+ paddr = page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
+ else
+ paddr = __pa(addr);
+
+ return paddr >> PAGE_SHIFT;
+}
+
/*
* vmbus_setevent- Trigger an event notification on the specified
* channel.
@@ -298,8 +312,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size;
for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
- kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
+ gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
+ kbuffer + PAGE_SIZE * i);
*msginfo = msgheader;
pfnsum = pfncount;
@@ -350,9 +364,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
* so the hypervisor guarantees that this is ok.
*/
for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = slow_virt_to_phys(
- kbuffer + PAGE_SIZE * (pfnsum + i)) >>
- PAGE_SHIFT;
+ gpadl_body->pfn[i] = virt_to_hvpfn(
+ kbuffer + PAGE_SIZE * (pfnsum + i));
/* add to msg header */
list_add_tail(&msgbody->msglistentry,
@@ -380,8 +393,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size;
for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
- kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
+ gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
+ kbuffer + PAGE_SIZE * i);
*msginfo = msgheader;
}
@@ -558,11 +571,8 @@ static void reset_channel_cb(void *arg)
channel->onchannel_callback = NULL;
}
-static int vmbus_close_internal(struct vmbus_channel *channel)
+void vmbus_reset_channel_cb(struct vmbus_channel *channel)
{
- struct vmbus_channel_close_channel *msg;
- int ret;
-
/*
* vmbus_on_event(), running in the per-channel tasklet, can race
* with vmbus_close_internal() in the case of SMP guest, e.g., when
@@ -572,6 +582,29 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
*/
tasklet_disable(&channel->callback_event);
+ channel->sc_creation_callback = NULL;
+
+ /* Stop the callback asap */
+ if (channel->target_cpu != get_cpu()) {
+ put_cpu();
+ smp_call_function_single(channel->target_cpu, reset_channel_cb,
+ channel, true);
+ } else {
+ reset_channel_cb(channel);
+ put_cpu();
+ }
+
+ /* Re-enable tasklet for use on re-open */
+ tasklet_enable(&channel->callback_event);
+}
+
+static int vmbus_close_internal(struct vmbus_channel *channel)
+{
+ struct vmbus_channel_close_channel *msg;
+ int ret;
+
+ vmbus_reset_channel_cb(channel);
+
/*
* In case a device driver's probe() fails (e.g.,
* util_probe() -> vmbus_open() returns -ENOMEM) and the device is
@@ -585,16 +618,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
}
channel->state = CHANNEL_OPEN_STATE;
- channel->sc_creation_callback = NULL;
- /* Stop callback and cancel the timer asap */
- if (channel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(channel->target_cpu, reset_channel_cb,
- channel, true);
- } else {
- reset_channel_cb(channel);
- put_cpu();
- }
/* Send a closing message */
@@ -639,8 +662,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
out:
- /* re-enable tasklet for use on re-open */
- tasklet_enable(&channel->callback_event);
return ret;
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index ecc2bd275a73..0f0e091c117c 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -527,10 +527,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
struct hv_device *dev
= newchannel->primary_channel->device_obj;
- if (vmbus_add_channel_kobj(dev, newchannel)) {
- atomic_dec(&vmbus_connection.offer_in_progress);
+ if (vmbus_add_channel_kobj(dev, newchannel))
goto err_free_chan;
- }
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
@@ -895,6 +893,12 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
}
/*
+ * Before setting channel->rescind in vmbus_rescind_cleanup(), we
+ * should make sure the channel callback is not running any more.
+ */
+ vmbus_reset_channel_cb(channel);
+
+ /*
* Now wait for offer handling to complete.
*/
vmbus_rescind_cleanup(channel);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 658dc765753b..748a1c4172a6 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -64,7 +64,7 @@ int hv_init(void)
return -ENOMEM;
direct_mode_enabled = ms_hyperv.misc_features &
- HV_X64_STIMER_DIRECT_MODE_AVAILABLE;
+ HV_STIMER_DIRECT_MODE_AVAILABLE;
return 0;
}
@@ -127,14 +127,14 @@ static int hv_ce_set_next_event(unsigned long delta,
current_tick = hyperv_cs->read(NULL);
current_tick += delta;
- hv_init_timer(HV_X64_MSR_STIMER0_COUNT, current_tick);
+ hv_init_timer(0, current_tick);
return 0;
}
static int hv_ce_shutdown(struct clock_event_device *evt)
{
- hv_init_timer(HV_X64_MSR_STIMER0_COUNT, 0);
- hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, 0);
+ hv_init_timer(0, 0);
+ hv_init_timer_config(0, 0);
if (direct_mode_enabled)
hv_disable_stimer0_percpu_irq(stimer0_irq);
@@ -164,7 +164,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
timer_cfg.direct_mode = 0;
timer_cfg.sintx = VMBUS_MESSAGE_SINT;
}
- hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
+ hv_init_timer_config(0, timer_cfg.as_uint64);
return 0;
}
@@ -242,6 +242,10 @@ int hv_synic_alloc(void)
return 0;
err:
+ /*
+ * Any memory allocations that succeeded will be freed when
+ * the caller cleans up by calling hv_synic_free()
+ */
return -ENOMEM;
}
@@ -254,12 +258,10 @@ void hv_synic_free(void)
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
- if (hv_cpu->synic_event_page)
- free_page((unsigned long)hv_cpu->synic_event_page);
- if (hv_cpu->synic_message_page)
- free_page((unsigned long)hv_cpu->synic_message_page);
- if (hv_cpu->post_msg_page)
- free_page((unsigned long)hv_cpu->post_msg_page);
+ kfree(hv_cpu->clk_evt);
+ free_page((unsigned long)hv_cpu->synic_event_page);
+ free_page((unsigned long)hv_cpu->synic_message_page);
+ free_page((unsigned long)hv_cpu->post_msg_page);
}
kfree(hv_context.hv_numa_map);
@@ -298,18 +300,16 @@ int hv_synic_init(unsigned int cpu)
hv_set_siefp(siefp.as_uint64);
/* Setup the shared SINT. */
- hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
shared_sint.masked = false;
- if (ms_hyperv.hints & HV_X64_DEPRECATING_AEOI_RECOMMENDED)
+ if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
shared_sint.auto_eoi = false;
else
shared_sint.auto_eoi = true;
- hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
/* Enable the global synic bit */
hv_get_synic_state(sctrl.as_uint64);
@@ -322,7 +322,7 @@ int hv_synic_init(unsigned int cpu)
/*
* Register the per-cpu clockevent source.
*/
- if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
+ if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE)
clockevents_config_and_register(hv_cpu->clk_evt,
HV_TIMER_FREQUENCY,
HV_MIN_DELTA_TICKS,
@@ -337,7 +337,7 @@ void hv_synic_clockevents_cleanup(void)
{
int cpu;
- if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
+ if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
return;
if (direct_mode_enabled)
@@ -396,7 +396,7 @@ int hv_synic_cleanup(unsigned int cpu)
return -EBUSY;
/* Turn off clockevent device */
- if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
+ if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
struct hv_per_cpu_context *hv_cpu
= this_cpu_ptr(hv_context.cpu_context);
@@ -405,15 +405,13 @@ int hv_synic_cleanup(unsigned int cpu)
put_cpu_ptr(hv_cpu);
}
- hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
- hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
hv_get_simp(simp.as_uint64);
simp.simp_enabled = 0;
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b3e9f13f8bc3..b1b788082793 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1765,6 +1765,9 @@ static struct hv_driver balloon_drv = {
.id_table = id_table,
.probe = balloon_probe,
.remove = balloon_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int __init init_balloon_drv(void)
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 14dce25c104f..423205077bf6 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -487,6 +487,9 @@ static struct hv_driver util_drv = {
.id_table = id_table,
.probe = util_probe,
.remove = util_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int hv_ptp_enable(struct ptp_clock_info *info,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index be3c8b10b84a..3e90eb91db45 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -431,7 +431,24 @@ static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
}
/*
- * Update host ring buffer after iterating over packets.
+ * Update host ring buffer after iterating over packets. If the host has
+ * stopped queuing new entries because it found the ring buffer full, and
+ * sufficient space is being freed up, signal the host. But be careful to
+ * only signal the host when necessary, both for performance reasons and
+ * because Hyper-V protects itself by throttling guests that signal
+ * inappropriately.
+ *
+ * Determining when to signal is tricky. There are three key data inputs
+ * that must be handled in this order to avoid race conditions:
+ *
+ * 1. Update the read_index
+ * 2. Read the pending_send_sz
+ * 3. Read the current write_index
+ *
+ * The interrupt_mask is not used to determine when to signal. The
+ * interrupt_mask is used only on the guest->host ring buffer when
+ * sending requests to the host. The host does not use it on the host->
+ * guest ring buffer to indicate whether it should be signaled.
*/
void hv_pkt_iter_close(struct vmbus_channel *channel)
{
@@ -447,22 +464,30 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
start_read_index = rbi->ring_buffer->read_index;
rbi->ring_buffer->read_index = rbi->priv_read_index;
+ /*
+ * Older versions of Hyper-V (before WS2102 and Win8) do not
+ * implement pending_send_sz and simply poll if the host->guest
+ * ring buffer is full. No signaling is needed or expected.
+ */
if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
return;
/*
* Issue a full memory barrier before making the signaling decision.
- * Here is the reason for having this barrier:
- * If the reading of the pend_sz (in this function)
- * were to be reordered and read before we commit the new read
- * index (in the calling function) we could
- * have a problem. If the host were to set the pending_sz after we
- * have sampled pending_sz and go to sleep before we commit the
+ * If reading pending_send_sz were to be reordered and happen
+ * before we commit the new read_index, a race could occur. If the
+ * host were to set the pending_send_sz after we have sampled
+ * pending_send_sz, and the ring buffer blocks before we commit the
* read index, we could miss sending the interrupt. Issue a full
* memory barrier to address this.
*/
virt_mb();
+ /*
+ * If the pending_send_sz is zero, then the ring buffer is not
+ * blocked and there is no need to signal. This is far by the
+ * most common case, so exit quickly for best performance.
+ */
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
if (!pending_sz)
return;
@@ -476,14 +501,32 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
/*
- * If there was space before we began iteration,
- * then host was not blocked.
+ * We want to signal the host only if we're transitioning
+ * from a "not enough free space" state to a "enough free
+ * space" state. For example, it's possible that this function
+ * could run and free up enough space to signal the host, and then
+ * run again and free up additional space before the host has a
+ * chance to clear the pending_send_sz. The 2nd invocation would
+ * be a null transition from "enough free space" to "enough free
+ * space", which doesn't warrant a signal.
+ *
+ * Exactly filling the ring buffer is treated as "not enough
+ * space". The ring buffer always must have at least one byte
+ * empty so the empty and full conditions are distinguishable.
+ * hv_get_bytes_to_write() doesn't fully tell the truth in
+ * this regard.
+ *
+ * So first check if we were in the "enough free space" state
+ * before we began the iteration. If so, the host was not
+ * blocked, and there's no need to signal.
*/
-
if (curr_write_sz - bytes_read > pending_sz)
return;
- /* If pending write will not fit, don't give false hope. */
+ /*
+ * Similarly, if the new state is "not enough space", then
+ * there's no need to signal.
+ */
if (curr_write_sz <= pending_sz)
return;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b10fe26c4891..b1b548a21f91 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -56,6 +56,8 @@ static struct completion probe_event;
static int hyperv_cpuhp_online;
+static void *hv_panic_page;
+
static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
void *args)
{
@@ -208,6 +210,20 @@ static ssize_t modalias_show(struct device *dev,
}
static DEVICE_ATTR_RO(modalias);
+#ifdef CONFIG_NUMA
+static ssize_t numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ return sprintf(buf, "%d\n", hv_dev->channel->numa_node);
+}
+static DEVICE_ATTR_RO(numa_node);
+#endif
+
static ssize_t server_monitor_pending_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
@@ -490,6 +506,9 @@ static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_class_id.attr,
&dev_attr_device_id.attr,
&dev_attr_modalias.attr,
+#ifdef CONFIG_NUMA
+ &dev_attr_numa_node.attr,
+#endif
&dev_attr_server_monitor_pending.attr,
&dev_attr_client_monitor_pending.attr,
&dev_attr_server_monitor_latency.attr,
@@ -1018,6 +1037,72 @@ static void vmbus_isr(void)
add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
}
+/*
+ * Boolean to control whether to report panic messages over Hyper-V.
+ *
+ * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
+ */
+static int sysctl_record_panic_msg = 1;
+
+/*
+ * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
+ * buffer and call into Hyper-V to transfer the data.
+ */
+static void hv_kmsg_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ size_t bytes_written;
+ phys_addr_t panic_pa;
+
+ /* We are only interested in panics. */
+ if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
+ return;
+
+ panic_pa = virt_to_phys(hv_panic_page);
+
+ /*
+ * Write dump contents to the page. No need to synchronize; panic should
+ * be single-threaded.
+ */
+ kmsg_dump_get_buffer(dumper, true, hv_panic_page, PAGE_SIZE,
+ &bytes_written);
+ if (bytes_written)
+ hyperv_report_panic_msg(panic_pa, bytes_written);
+}
+
+static struct kmsg_dumper hv_kmsg_dumper = {
+ .dump = hv_kmsg_dump,
+};
+
+static struct ctl_table_header *hv_ctl_table_hdr;
+static int zero;
+static int one = 1;
+
+/*
+ * sysctl option to allow the user to control whether kmsg data should be
+ * reported to Hyper-V on panic.
+ */
+static struct ctl_table hv_ctl_table[] = {
+ {
+ .procname = "hyperv_record_panic_msg",
+ .data = &sysctl_record_panic_msg,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one
+ },
+ {}
+};
+
+static struct ctl_table hv_root_table[] = {
+ {
+ .procname = "kernel",
+ .mode = 0555,
+ .child = hv_ctl_table
+ },
+ {}
+};
/*
* vmbus_bus_init -Main vmbus driver initialization routine.
@@ -1065,6 +1150,32 @@ static int vmbus_bus_init(void)
* Only register if the crash MSRs are available
*/
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ u64 hyperv_crash_ctl;
+ /*
+ * Sysctl registration is not fatal, since by default
+ * reporting is enabled.
+ */
+ hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
+ if (!hv_ctl_table_hdr)
+ pr_err("Hyper-V: sysctl table register error");
+
+ /*
+ * Register for panic kmsg callback only if the right
+ * capability is supported by the hypervisor.
+ */
+ hv_get_crash_ctl(hyperv_crash_ctl);
+ if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
+ hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (hv_panic_page) {
+ ret = kmsg_dump_register(&hv_kmsg_dumper);
+ if (ret)
+ pr_err("Hyper-V: kmsg dump register "
+ "error 0x%x\n", ret);
+ } else
+ pr_err("Hyper-V: panic message page memory "
+ "allocation failed");
+ }
+
register_die_notifier(&hyperv_die_block);
atomic_notifier_chain_register(&panic_notifier_list,
&hyperv_panic_block);
@@ -1081,7 +1192,9 @@ err_alloc:
hv_remove_vmbus_irq();
bus_unregister(&hv_bus);
-
+ free_page((unsigned long)hv_panic_page);
+ unregister_sysctl_table(hv_ctl_table_hdr);
+ hv_ctl_table_hdr = NULL;
return ret;
}
@@ -1785,10 +1898,15 @@ static void __exit vmbus_exit(void)
vmbus_free_channels();
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ kmsg_dump_unregister(&hv_kmsg_dumper);
unregister_die_notifier(&hyperv_die_block);
atomic_notifier_chain_unregister(&panic_notifier_list,
&hyperv_panic_block);
}
+
+ free_page((unsigned long)hv_panic_page);
+ unregister_sysctl_table(hv_ctl_table_hdr);
+ hv_ctl_table_hdr = NULL;
bus_unregister(&hv_bus);
cpuhp_remove_state(hyperv_cpuhp_online);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f10840ad465c..81da17a42dc9 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -937,6 +937,18 @@ config SENSORS_MCP3021
This driver can also be built as a module. If so, the module
will be called mcp3021.
+config SENSORS_MLXREG_FAN
+ tristate "Mellanox Mellanox FAN driver"
+ depends on MELLANOX_PLATFORM
+ imply THERMAL
+ select REGMAP
+ help
+ This option enables support for the FAN control on the Mellanox
+ Ethernet and InfiniBand switches. The driver can be activated by the
+ platform device add call. Say Y to enable these. To compile this
+ driver as a module, choose 'M' here: the module will be called
+ mlxreg-fan.
+
config SENSORS_TC654
tristate "Microchip TC654/TC655 and compatibles"
depends on I2C
@@ -1256,6 +1268,16 @@ config SENSORS_NCT7904
This driver can also be built as a module. If so, the module
will be called nct7904.
+config SENSORS_NPCM7XX
+ tristate "Nuvoton NPCM750 and compatible PWM and Fan controllers"
+ imply THERMAL
+ help
+ This driver provides support for Nuvoton NPCM750/730/715/705 PWM
+ and Fan controllers.
+
+ This driver can also be built as a module. If so, the module
+ will be called npcm750-pwm-fan.
+
config SENSORS_NSA320
tristate "ZyXEL NSA320 and compatible fan speed and temperature sensors"
depends on GPIOLIB && OF
@@ -1298,6 +1320,16 @@ config SENSORS_PWM_FAN
This driver can also be built as a module. If so, the module
will be called pwm-fan.
+config SENSORS_RASPBERRYPI_HWMON
+ tristate "Raspberry Pi voltage monitor"
+ depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
+ help
+ If you say yes here you get support for voltage sensor on the
+ Raspberry Pi.
+
+ This driver can also be built as a module. If so, the module
+ will be called raspberrypi-hwmon.
+
config SENSORS_SHT15
tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index e7d52a36e6c4..93f7f41ea4ad 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -129,11 +129,13 @@ obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
obj-$(CONFIG_SENSORS_TC654) += tc654.o
+obj-$(CONFIG_SENSORS_MLXREG_FAN) += mlxreg-fan.o
obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
obj-$(CONFIG_SENSORS_NCT7802) += nct7802.o
obj-$(CONFIG_SENSORS_NCT7904) += nct7904.o
+obj-$(CONFIG_SENSORS_NPCM7XX) += npcm750-pwm-fan.o
obj-$(CONFIG_SENSORS_NSA320) += nsa320-hwmon.o
obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
@@ -141,6 +143,7 @@ obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
obj-$(CONFIG_SENSORS_POWR1220) += powr1220.o
obj-$(CONFIG_SENSORS_PWM_FAN) += pwm-fan.o
+obj-$(CONFIG_SENSORS_RASPBERRYPI_HWMON) += raspberrypi-hwmon.o
obj-$(CONFIG_SENSORS_S3C) += s3c-hwmon.o
obj-$(CONFIG_SENSORS_SCH56XX_COMMON)+= sch56xx-common.o
obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 9ef84998c7f3..90837f7c7d0f 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -194,8 +194,7 @@ struct adt7475_data {
struct mutex lock;
unsigned long measure_updated;
- unsigned long limits_updated;
- char valid;
+ bool valid;
u8 config4;
u8 config5;
@@ -326,6 +325,9 @@ static ssize_t show_voltage(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
unsigned short val;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
switch (sattr->nr) {
case ALARM:
return sprintf(buf, "%d\n",
@@ -381,6 +383,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
switch (sattr->nr) {
case HYSTERSIS:
mutex_lock(&data->lock);
@@ -625,6 +630,9 @@ static ssize_t show_point2(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out, val;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
mutex_lock(&data->lock);
out = (data->range[sattr->index] >> 4) & 0x0F;
val = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
@@ -683,6 +691,9 @@ static ssize_t show_tach(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
if (sattr->nr == ALARM)
out = (data->alarms >> (sattr->index + 10)) & 1;
else
@@ -720,6 +731,9 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->pwm[sattr->nr][sattr->index]);
}
@@ -729,6 +743,9 @@ static ssize_t show_pwmchan(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->pwmchan[sattr->index]);
}
@@ -738,6 +755,9 @@ static ssize_t show_pwmctrl(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->pwmctl[sattr->index]);
}
@@ -945,6 +965,9 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
int i = clamp_val(data->range[sattr->index] & 0xf, 0,
ARRAY_SIZE(pwmfreq_table) - 1);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", pwmfreq_table[i]);
}
@@ -1035,6 +1058,10 @@ static ssize_t cpu0_vid_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
@@ -1385,6 +1412,121 @@ static void adt7475_remove_files(struct i2c_client *client,
sysfs_remove_group(&client->dev.kobj, &vid_attr_group);
}
+static int adt7475_update_limits(struct i2c_client *client)
+{
+ struct adt7475_data *data = i2c_get_clientdata(client);
+ int i;
+ int ret;
+
+ ret = adt7475_read(REG_CONFIG4);
+ if (ret < 0)
+ return ret;
+ data->config4 = ret;
+
+ ret = adt7475_read(REG_CONFIG5);
+ if (ret < 0)
+ return ret;
+ data->config5 = ret;
+
+ for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
+ if (!(data->has_voltage & (1 << i)))
+ continue;
+ /* Adjust values so they match the input precision */
+ ret = adt7475_read(VOLTAGE_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->voltage[MIN][i] = ret << 2;
+
+ ret = adt7475_read(VOLTAGE_MAX_REG(i));
+ if (ret < 0)
+ return ret;
+ data->voltage[MAX][i] = ret << 2;
+ }
+
+ if (data->has_voltage & (1 << 5)) {
+ ret = adt7475_read(REG_VTT_MIN);
+ if (ret < 0)
+ return ret;
+ data->voltage[MIN][5] = ret << 2;
+
+ ret = adt7475_read(REG_VTT_MAX);
+ if (ret < 0)
+ return ret;
+ data->voltage[MAX][5] = ret << 2;
+ }
+
+ for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
+ /* Adjust values so they match the input precision */
+ ret = adt7475_read(TEMP_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[MIN][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_MAX_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[MAX][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_TMIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[AUTOMIN][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_THERM_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[THERM][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_OFFSET_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[OFFSET][i] = ret;
+ }
+ adt7475_read_hystersis(client);
+
+ for (i = 0; i < ADT7475_TACH_COUNT; i++) {
+ if (i == 3 && !data->has_fan4)
+ continue;
+ ret = adt7475_read_word(client, TACH_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->tach[MIN][i] = ret;
+ }
+
+ for (i = 0; i < ADT7475_PWM_COUNT; i++) {
+ if (i == 1 && !data->has_pwm2)
+ continue;
+ ret = adt7475_read(PWM_MAX_REG(i));
+ if (ret < 0)
+ return ret;
+ data->pwm[MAX][i] = ret;
+
+ ret = adt7475_read(PWM_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->pwm[MIN][i] = ret;
+ /* Set the channel and control information */
+ adt7475_read_pwm(client, i);
+ }
+
+ ret = adt7475_read(TEMP_TRANGE_REG(0));
+ if (ret < 0)
+ return ret;
+ data->range[0] = ret;
+
+ ret = adt7475_read(TEMP_TRANGE_REG(1));
+ if (ret < 0)
+ return ret;
+ data->range[1] = ret;
+
+ ret = adt7475_read(TEMP_TRANGE_REG(2));
+ if (ret < 0)
+ return ret;
+ data->range[2] = ret;
+
+ return 0;
+}
+
static int adt7475_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1562,6 +1704,11 @@ static int adt7475_probe(struct i2c_client *client,
(data->bypass_attn & (1 << 3)) ? " in3" : "",
(data->bypass_attn & (1 << 4)) ? " in4" : "");
+ /* Limits and settings, should never change update more than once */
+ ret = adt7475_update_limits(client);
+ if (ret)
+ goto eremove;
+
return 0;
eremove:
@@ -1658,121 +1805,122 @@ static void adt7475_read_pwm(struct i2c_client *client, int index)
}
}
-static struct adt7475_data *adt7475_update_device(struct device *dev)
+static int adt7475_update_measure(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
u16 ext;
int i;
+ int ret;
- mutex_lock(&data->lock);
+ ret = adt7475_read(REG_STATUS2);
+ if (ret < 0)
+ return ret;
+ data->alarms = ret << 8;
- /* Measurement values update every 2 seconds */
- if (time_after(jiffies, data->measure_updated + HZ * 2) ||
- !data->valid) {
- data->alarms = adt7475_read(REG_STATUS2) << 8;
- data->alarms |= adt7475_read(REG_STATUS1);
-
- ext = (adt7475_read(REG_EXTEND2) << 8) |
- adt7475_read(REG_EXTEND1);
- for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
- if (!(data->has_voltage & (1 << i)))
- continue;
- data->voltage[INPUT][i] =
- (adt7475_read(VOLTAGE_REG(i)) << 2) |
- ((ext >> (i * 2)) & 3);
- }
+ ret = adt7475_read(REG_STATUS1);
+ if (ret < 0)
+ return ret;
+ data->alarms |= ret;
- for (i = 0; i < ADT7475_TEMP_COUNT; i++)
- data->temp[INPUT][i] =
- (adt7475_read(TEMP_REG(i)) << 2) |
- ((ext >> ((i + 5) * 2)) & 3);
+ ret = adt7475_read(REG_EXTEND2);
+ if (ret < 0)
+ return ret;
- if (data->has_voltage & (1 << 5)) {
- data->alarms |= adt7475_read(REG_STATUS4) << 24;
- ext = adt7475_read(REG_EXTEND3);
- data->voltage[INPUT][5] = adt7475_read(REG_VTT) << 2 |
- ((ext >> 4) & 3);
- }
+ ext = (ret << 8);
- for (i = 0; i < ADT7475_TACH_COUNT; i++) {
- if (i == 3 && !data->has_fan4)
- continue;
- data->tach[INPUT][i] =
- adt7475_read_word(client, TACH_REG(i));
- }
+ ret = adt7475_read(REG_EXTEND1);
+ if (ret < 0)
+ return ret;
- /* Updated by hw when in auto mode */
- for (i = 0; i < ADT7475_PWM_COUNT; i++) {
- if (i == 1 && !data->has_pwm2)
- continue;
- data->pwm[INPUT][i] = adt7475_read(PWM_REG(i));
- }
+ ext |= ret;
+
+ for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
+ if (!(data->has_voltage & (1 << i)))
+ continue;
+ ret = adt7475_read(VOLTAGE_REG(i));
+ if (ret < 0)
+ return ret;
+ data->voltage[INPUT][i] =
+ (ret << 2) |
+ ((ext >> (i * 2)) & 3);
+ }
- if (data->has_vid)
- data->vid = adt7475_read(REG_VID) & 0x3f;
+ for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
+ ret = adt7475_read(TEMP_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[INPUT][i] =
+ (ret << 2) |
+ ((ext >> ((i + 5) * 2)) & 3);
+ }
- data->measure_updated = jiffies;
+ if (data->has_voltage & (1 << 5)) {
+ ret = adt7475_read(REG_STATUS4);
+ if (ret < 0)
+ return ret;
+ data->alarms |= ret << 24;
+
+ ret = adt7475_read(REG_EXTEND3);
+ if (ret < 0)
+ return ret;
+ ext = ret;
+
+ ret = adt7475_read(REG_VTT);
+ if (ret < 0)
+ return ret;
+ data->voltage[INPUT][5] = ret << 2 |
+ ((ext >> 4) & 3);
}
- /* Limits and settings, should never change update every 60 seconds */
- if (time_after(jiffies, data->limits_updated + HZ * 60) ||
- !data->valid) {
- data->config4 = adt7475_read(REG_CONFIG4);
- data->config5 = adt7475_read(REG_CONFIG5);
-
- for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
- if (!(data->has_voltage & (1 << i)))
- continue;
- /* Adjust values so they match the input precision */
- data->voltage[MIN][i] =
- adt7475_read(VOLTAGE_MIN_REG(i)) << 2;
- data->voltage[MAX][i] =
- adt7475_read(VOLTAGE_MAX_REG(i)) << 2;
- }
+ for (i = 0; i < ADT7475_TACH_COUNT; i++) {
+ if (i == 3 && !data->has_fan4)
+ continue;
+ ret = adt7475_read_word(client, TACH_REG(i));
+ if (ret < 0)
+ return ret;
+ data->tach[INPUT][i] = ret;
+ }
- if (data->has_voltage & (1 << 5)) {
- data->voltage[MIN][5] = adt7475_read(REG_VTT_MIN) << 2;
- data->voltage[MAX][5] = adt7475_read(REG_VTT_MAX) << 2;
- }
+ /* Updated by hw when in auto mode */
+ for (i = 0; i < ADT7475_PWM_COUNT; i++) {
+ if (i == 1 && !data->has_pwm2)
+ continue;
+ ret = adt7475_read(PWM_REG(i));
+ if (ret < 0)
+ return ret;
+ data->pwm[INPUT][i] = ret;
+ }
- for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
- /* Adjust values so they match the input precision */
- data->temp[MIN][i] =
- adt7475_read(TEMP_MIN_REG(i)) << 2;
- data->temp[MAX][i] =
- adt7475_read(TEMP_MAX_REG(i)) << 2;
- data->temp[AUTOMIN][i] =
- adt7475_read(TEMP_TMIN_REG(i)) << 2;
- data->temp[THERM][i] =
- adt7475_read(TEMP_THERM_REG(i)) << 2;
- data->temp[OFFSET][i] =
- adt7475_read(TEMP_OFFSET_REG(i));
- }
- adt7475_read_hystersis(client);
+ if (data->has_vid) {
+ ret = adt7475_read(REG_VID);
+ if (ret < 0)
+ return ret;
+ data->vid = ret & 0x3f;
+ }
- for (i = 0; i < ADT7475_TACH_COUNT; i++) {
- if (i == 3 && !data->has_fan4)
- continue;
- data->tach[MIN][i] =
- adt7475_read_word(client, TACH_MIN_REG(i));
- }
+ return 0;
+}
- for (i = 0; i < ADT7475_PWM_COUNT; i++) {
- if (i == 1 && !data->has_pwm2)
- continue;
- data->pwm[MAX][i] = adt7475_read(PWM_MAX_REG(i));
- data->pwm[MIN][i] = adt7475_read(PWM_MIN_REG(i));
- /* Set the channel and control information */
- adt7475_read_pwm(client, i);
- }
+static struct adt7475_data *adt7475_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adt7475_data *data = i2c_get_clientdata(client);
+ int ret;
- data->range[0] = adt7475_read(TEMP_TRANGE_REG(0));
- data->range[1] = adt7475_read(TEMP_TRANGE_REG(1));
- data->range[2] = adt7475_read(TEMP_TRANGE_REG(2));
+ mutex_lock(&data->lock);
- data->limits_updated = jiffies;
- data->valid = 1;
+ /* Measurement values update every 2 seconds */
+ if (time_after(jiffies, data->measure_updated + HZ * 2) ||
+ !data->valid) {
+ ret = adt7475_update_measure(dev);
+ if (ret) {
+ data->valid = false;
+ mutex_unlock(&data->lock);
+ return ERR_PTR(ret);
+ }
+ data->measure_updated = jiffies;
+ data->valid = true;
}
mutex_unlock(&data->lock);
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 1ea7ca510f84..aaebeb726d6a 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -443,8 +443,10 @@ static int emc1403_probe(struct i2c_client *client,
switch (id->driver_data) {
case emc1404:
data->groups[2] = &emc1404_group;
+ /* fall through */
case emc1403:
data->groups[1] = &emc1403_group;
+ /* fall through */
case emc1402:
data->groups[0] = &emc1402_group;
}
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index e88c01961948..33d51281272b 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -394,12 +394,16 @@ static const char * const hwmon_power_attr_templates[] = {
[hwmon_power_cap_hyst] = "power%d_cap_hyst",
[hwmon_power_cap_max] = "power%d_cap_max",
[hwmon_power_cap_min] = "power%d_cap_min",
+ [hwmon_power_min] = "power%d_min",
[hwmon_power_max] = "power%d_max",
+ [hwmon_power_lcrit] = "power%d_lcrit",
[hwmon_power_crit] = "power%d_crit",
[hwmon_power_label] = "power%d_label",
[hwmon_power_alarm] = "power%d_alarm",
[hwmon_power_cap_alarm] = "power%d_cap_alarm",
+ [hwmon_power_min_alarm] = "power%d_min_alarm",
[hwmon_power_max_alarm] = "power%d_max_alarm",
+ [hwmon_power_lcrit_alarm] = "power%d_lcrit_alarm",
[hwmon_power_crit_alarm] = "power%d_crit_alarm",
};
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index f829dadfd5a0..83472808c816 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -90,11 +90,20 @@ struct sensor_data {
char label[MAX_LABEL_LEN];
char name[MAX_ATTR_LEN];
struct device_attribute dev_attr;
+ struct sensor_group_data *sgrp_data;
+};
+
+struct sensor_group_data {
+ struct mutex mutex;
+ u32 gid;
+ bool enable;
};
struct platform_data {
const struct attribute_group *attr_groups[MAX_SENSOR_TYPE + 1];
+ struct sensor_group_data *sgrp_data;
u32 sensors_count; /* Total count of sensors from each group */
+ u32 nr_sensor_groups; /* Total number of sensor groups */
};
static ssize_t show_sensor(struct device *dev, struct device_attribute *devattr,
@@ -105,6 +114,9 @@ static ssize_t show_sensor(struct device *dev, struct device_attribute *devattr,
ssize_t ret;
u64 x;
+ if (sdata->sgrp_data && !sdata->sgrp_data->enable)
+ return -ENODATA;
+
ret = opal_get_sensor_data_u64(sdata->id, &x);
if (ret)
@@ -120,6 +132,46 @@ static ssize_t show_sensor(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%llu\n", x);
}
+static ssize_t show_enable(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_data *sdata = container_of(devattr, struct sensor_data,
+ dev_attr);
+
+ return sprintf(buf, "%u\n", sdata->sgrp_data->enable);
+}
+
+static ssize_t store_enable(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_data *sdata = container_of(devattr, struct sensor_data,
+ dev_attr);
+ struct sensor_group_data *sgrp_data = sdata->sgrp_data;
+ int ret;
+ bool data;
+
+ ret = kstrtobool(buf, &data);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&sgrp_data->mutex);
+ if (ret)
+ return ret;
+
+ if (data != sgrp_data->enable) {
+ ret = sensor_group_enable(sgrp_data->gid, data);
+ if (!ret)
+ sgrp_data->enable = data;
+ }
+
+ if (!ret)
+ ret = count;
+
+ mutex_unlock(&sgrp_data->mutex);
+ return ret;
+}
+
static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
char *buf)
{
@@ -292,12 +344,115 @@ static u32 get_sensor_hwmon_index(struct sensor_data *sdata,
return ++sensor_groups[sdata->type].hwmon_index;
}
+static int init_sensor_group_data(struct platform_device *pdev,
+ struct platform_data *pdata)
+{
+ struct sensor_group_data *sgrp_data;
+ struct device_node *groups, *sgrp;
+ int count = 0, ret = 0;
+ enum sensors type;
+
+ groups = of_find_compatible_node(NULL, NULL, "ibm,opal-sensor-group");
+ if (!groups)
+ return ret;
+
+ for_each_child_of_node(groups, sgrp) {
+ type = get_sensor_type(sgrp);
+ if (type != MAX_SENSOR_TYPE)
+ pdata->nr_sensor_groups++;
+ }
+
+ if (!pdata->nr_sensor_groups)
+ goto out;
+
+ sgrp_data = devm_kcalloc(&pdev->dev, pdata->nr_sensor_groups,
+ sizeof(*sgrp_data), GFP_KERNEL);
+ if (!sgrp_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for_each_child_of_node(groups, sgrp) {
+ u32 gid;
+
+ type = get_sensor_type(sgrp);
+ if (type == MAX_SENSOR_TYPE)
+ continue;
+
+ if (of_property_read_u32(sgrp, "sensor-group-id", &gid))
+ continue;
+
+ if (of_count_phandle_with_args(sgrp, "sensors", NULL) <= 0)
+ continue;
+
+ sensor_groups[type].attr_count++;
+ sgrp_data[count].gid = gid;
+ mutex_init(&sgrp_data[count].mutex);
+ sgrp_data[count++].enable = false;
+ }
+
+ pdata->sgrp_data = sgrp_data;
+out:
+ of_node_put(groups);
+ return ret;
+}
+
+static struct sensor_group_data *get_sensor_group(struct platform_data *pdata,
+ struct device_node *node,
+ enum sensors gtype)
+{
+ struct sensor_group_data *sgrp_data = pdata->sgrp_data;
+ struct device_node *groups, *sgrp;
+
+ groups = of_find_compatible_node(NULL, NULL, "ibm,opal-sensor-group");
+ if (!groups)
+ return NULL;
+
+ for_each_child_of_node(groups, sgrp) {
+ struct of_phandle_iterator it;
+ u32 gid;
+ int rc, i;
+ enum sensors type;
+
+ type = get_sensor_type(sgrp);
+ if (type != gtype)
+ continue;
+
+ if (of_property_read_u32(sgrp, "sensor-group-id", &gid))
+ continue;
+
+ of_for_each_phandle(&it, rc, sgrp, "sensors", NULL, 0)
+ if (it.phandle == node->phandle) {
+ of_node_put(it.node);
+ break;
+ }
+
+ if (rc)
+ continue;
+
+ for (i = 0; i < pdata->nr_sensor_groups; i++)
+ if (gid == sgrp_data[i].gid) {
+ of_node_put(sgrp);
+ of_node_put(groups);
+ return &sgrp_data[i];
+ }
+ }
+
+ of_node_put(groups);
+ return NULL;
+}
+
static int populate_attr_groups(struct platform_device *pdev)
{
struct platform_data *pdata = platform_get_drvdata(pdev);
const struct attribute_group **pgroups = pdata->attr_groups;
struct device_node *opal, *np;
enum sensors type;
+ int ret;
+
+ ret = init_sensor_group_data(pdev, pdata);
+ if (ret)
+ return ret;
opal = of_find_node_by_path("/ibm,opal/sensors");
for_each_child_of_node(opal, np) {
@@ -344,7 +499,10 @@ static int populate_attr_groups(struct platform_device *pdev)
static void create_hwmon_attr(struct sensor_data *sdata, const char *attr_name,
ssize_t (*show)(struct device *dev,
struct device_attribute *attr,
- char *buf))
+ char *buf),
+ ssize_t (*store)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count))
{
snprintf(sdata->name, MAX_ATTR_LEN, "%s%d_%s",
sensor_groups[sdata->type].name, sdata->hwmon_index,
@@ -352,23 +510,33 @@ static void create_hwmon_attr(struct sensor_data *sdata, const char *attr_name,
sysfs_attr_init(&sdata->dev_attr.attr);
sdata->dev_attr.attr.name = sdata->name;
- sdata->dev_attr.attr.mode = S_IRUGO;
sdata->dev_attr.show = show;
+ if (store) {
+ sdata->dev_attr.store = store;
+ sdata->dev_attr.attr.mode = 0664;
+ } else {
+ sdata->dev_attr.attr.mode = 0444;
+ }
}
static void populate_sensor(struct sensor_data *sdata, int od, int hd, int sid,
const char *attr_name, enum sensors type,
const struct attribute_group *pgroup,
+ struct sensor_group_data *sgrp_data,
ssize_t (*show)(struct device *dev,
struct device_attribute *attr,
- char *buf))
+ char *buf),
+ ssize_t (*store)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count))
{
sdata->id = sid;
sdata->type = type;
sdata->opal_index = od;
sdata->hwmon_index = hd;
- create_hwmon_attr(sdata, attr_name, show);
+ create_hwmon_attr(sdata, attr_name, show, store);
pgroup->attrs[sensor_groups[type].attr_count++] = &sdata->dev_attr.attr;
+ sdata->sgrp_data = sgrp_data;
}
static char *get_max_attr(enum sensors type)
@@ -403,24 +571,23 @@ static int create_device_attrs(struct platform_device *pdev)
const struct attribute_group **pgroups = pdata->attr_groups;
struct device_node *opal, *np;
struct sensor_data *sdata;
- u32 sensor_id;
- enum sensors type;
u32 count = 0;
- int err = 0;
+ u32 group_attr_id[MAX_SENSOR_TYPE] = {0};
- opal = of_find_node_by_path("/ibm,opal/sensors");
sdata = devm_kcalloc(&pdev->dev,
pdata->sensors_count, sizeof(*sdata),
GFP_KERNEL);
- if (!sdata) {
- err = -ENOMEM;
- goto exit_put_node;
- }
+ if (!sdata)
+ return -ENOMEM;
+ opal = of_find_node_by_path("/ibm,opal/sensors");
for_each_child_of_node(opal, np) {
+ struct sensor_group_data *sgrp_data;
const char *attr_name;
- u32 opal_index;
+ u32 opal_index, hw_id;
+ u32 sensor_id;
const char *label;
+ enum sensors type;
if (np->name == NULL)
continue;
@@ -456,14 +623,12 @@ static int create_device_attrs(struct platform_device *pdev)
opal_index = INVALID_INDEX;
}
- sdata[count].opal_index = opal_index;
- sdata[count].hwmon_index =
- get_sensor_hwmon_index(&sdata[count], sdata, count);
-
- create_hwmon_attr(&sdata[count], attr_name, show_sensor);
-
- pgroups[type]->attrs[sensor_groups[type].attr_count++] =
- &sdata[count++].dev_attr.attr;
+ hw_id = get_sensor_hwmon_index(&sdata[count], sdata, count);
+ sgrp_data = get_sensor_group(pdata, np, type);
+ populate_sensor(&sdata[count], opal_index, hw_id, sensor_id,
+ attr_name, type, pgroups[type], sgrp_data,
+ show_sensor, NULL);
+ count++;
if (!of_property_read_string(np, "label", &label)) {
/*
@@ -474,35 +639,43 @@ static int create_device_attrs(struct platform_device *pdev)
*/
make_sensor_label(np, &sdata[count], label);
- populate_sensor(&sdata[count], opal_index,
- sdata[count - 1].hwmon_index,
+ populate_sensor(&sdata[count], opal_index, hw_id,
sensor_id, "label", type, pgroups[type],
- show_label);
+ NULL, show_label, NULL);
count++;
}
if (!of_property_read_u32(np, "sensor-data-max", &sensor_id)) {
attr_name = get_max_attr(type);
- populate_sensor(&sdata[count], opal_index,
- sdata[count - 1].hwmon_index,
+ populate_sensor(&sdata[count], opal_index, hw_id,
sensor_id, attr_name, type,
- pgroups[type], show_sensor);
+ pgroups[type], sgrp_data, show_sensor,
+ NULL);
count++;
}
if (!of_property_read_u32(np, "sensor-data-min", &sensor_id)) {
attr_name = get_min_attr(type);
- populate_sensor(&sdata[count], opal_index,
- sdata[count - 1].hwmon_index,
+ populate_sensor(&sdata[count], opal_index, hw_id,
sensor_id, attr_name, type,
- pgroups[type], show_sensor);
+ pgroups[type], sgrp_data, show_sensor,
+ NULL);
+ count++;
+ }
+
+ if (sgrp_data && !sgrp_data->enable) {
+ sgrp_data->enable = true;
+ hw_id = ++group_attr_id[type];
+ populate_sensor(&sdata[count], opal_index, hw_id,
+ sgrp_data->gid, "enable", type,
+ pgroups[type], sgrp_data, show_enable,
+ store_enable);
count++;
}
}
-exit_put_node:
of_node_put(opal);
- return err;
+ return 0;
}
static int ibmpowernv_probe(struct platform_device *pdev)
@@ -517,6 +690,7 @@ static int ibmpowernv_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pdata);
pdata->sensors_count = 0;
+ pdata->nr_sensor_groups = 0;
err = populate_attr_groups(pdev);
if (err)
return err;
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 69031a0f7ed2..2f3f875c06ac 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -22,7 +22,6 @@
* struct iio_hwmon_state - device instance state
* @channels: filled with array of channels from iio
* @num_channels: number of channels in channels (saves counting twice)
- * @hwmon_dev: associated hwmon device
* @attr_group: the group of attributes
* @groups: null terminated array of attribute groups
* @attrs: null terminated array of attribute pointers.
@@ -30,7 +29,6 @@
struct iio_hwmon_state {
struct iio_channel *channels;
int num_channels;
- struct device *hwmon_dev;
struct attribute_group attr_group;
const struct attribute_group *groups[2];
struct attribute **attrs;
@@ -68,12 +66,13 @@ static int iio_hwmon_probe(struct platform_device *pdev)
enum iio_chan_type type;
struct iio_channel *channels;
const char *name = "iio_hwmon";
+ struct device *hwmon_dev;
char *sname;
if (dev->of_node && dev->of_node->name)
name = dev->of_node->name;
- channels = iio_channel_get_all(dev);
+ channels = devm_iio_channel_get_all(dev);
if (IS_ERR(channels)) {
if (PTR_ERR(channels) == -ENODEV)
return -EPROBE_DEFER;
@@ -81,10 +80,8 @@ static int iio_hwmon_probe(struct platform_device *pdev)
}
st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (st == NULL)
+ return -ENOMEM;
st->channels = channels;
@@ -95,22 +92,18 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->attrs = devm_kcalloc(dev,
st->num_channels + 1, sizeof(*st->attrs),
GFP_KERNEL);
- if (st->attrs == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (st->attrs == NULL)
+ return -ENOMEM;
for (i = 0; i < st->num_channels; i++) {
a = devm_kzalloc(dev, sizeof(*a), GFP_KERNEL);
- if (a == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (a == NULL)
+ return -ENOMEM;
sysfs_attr_init(&a->dev_attr.attr);
ret = iio_get_channel_type(&st->channels[i], &type);
if (ret < 0)
- goto error_release_channels;
+ return ret;
switch (type) {
case IIO_VOLTAGE:
@@ -134,13 +127,11 @@ static int iio_hwmon_probe(struct platform_device *pdev)
humidity_i++);
break;
default:
- ret = -EINVAL;
- goto error_release_channels;
- }
- if (a->dev_attr.attr.name == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
+ return -EINVAL;
}
+ if (a->dev_attr.attr.name == NULL)
+ return -ENOMEM;
+
a->dev_attr.show = iio_hwmon_read_val;
a->dev_attr.attr.mode = S_IRUGO;
a->index = i;
@@ -151,34 +142,13 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->groups[0] = &st->attr_group;
sname = devm_kstrdup(dev, name, GFP_KERNEL);
- if (!sname) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (!sname)
+ return -ENOMEM;
strreplace(sname, '-', '_');
- st->hwmon_dev = hwmon_device_register_with_groups(dev, sname, st,
- st->groups);
- if (IS_ERR(st->hwmon_dev)) {
- ret = PTR_ERR(st->hwmon_dev);
- goto error_release_channels;
- }
- platform_set_drvdata(pdev, st);
- return 0;
-
-error_release_channels:
- iio_channel_release_all(channels);
- return ret;
-}
-
-static int iio_hwmon_remove(struct platform_device *pdev)
-{
- struct iio_hwmon_state *st = platform_get_drvdata(pdev);
-
- hwmon_device_unregister(st->hwmon_dev);
- iio_channel_release_all(st->channels);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, sname, st,
+ st->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct of_device_id iio_hwmon_of_match[] = {
@@ -193,7 +163,6 @@ static struct platform_driver __refdata iio_hwmon_driver = {
.of_match_table = iio_hwmon_of_match,
},
.probe = iio_hwmon_probe,
- .remove = iio_hwmon_remove,
};
module_platform_driver(iio_hwmon_driver);
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 17c6460ae351..bb15d7816a29 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -99,12 +99,8 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen 7 1700X", 20000 },
{ 0x17, "AMD Ryzen 7 1800X", 20000 },
{ 0x17, "AMD Ryzen 7 2700X", 10000 },
- { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
- { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
- { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
- { 0x17, "AMD Ryzen Threadripper 1950", 10000 },
- { 0x17, "AMD Ryzen Threadripper 1920", 10000 },
- { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
+ { 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */
+ { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
};
static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
diff --git a/drivers/hwmon/max197.c b/drivers/hwmon/max197.c
index 638567fb7cd8..3d9e210beedf 100644
--- a/drivers/hwmon/max197.c
+++ b/drivers/hwmon/max197.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index 67860ad2e3d9..78fe8759d2a9 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -23,6 +23,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/hwmon.h>
#include <linux/slab.h>
#include <linux/init.h>
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
new file mode 100644
index 000000000000..de46577c7d5a
--- /dev/null
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+// Copyright (c) 2018 Vadim Pasternak <vadimp@mellanox.com>
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+#define MLXREG_FAN_MAX_TACHO 12
+#define MLXREG_FAN_MAX_STATE 10
+#define MLXREG_FAN_MIN_DUTY 51 /* 20% */
+#define MLXREG_FAN_MAX_DUTY 255 /* 100% */
+/*
+ * Minimum and maximum FAN allowed speed in percent: from 20% to 100%. Values
+ * MLXREG_FAN_MAX_STATE + x, where x is between 2 and 10 are used for
+ * setting FAN speed dynamic minimum. For example, if value is set to 14 (40%)
+ * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
+ * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
+ */
+#define MLXREG_FAN_SPEED_MIN (MLXREG_FAN_MAX_STATE + 2)
+#define MLXREG_FAN_SPEED_MAX (MLXREG_FAN_MAX_STATE * 2)
+#define MLXREG_FAN_SPEED_MIN_LEVEL 2 /* 20 percent */
+#define MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF 44
+#define MLXREG_FAN_TACHO_DIVIDER_DEF 1132
+/*
+ * FAN datasheet defines the formula for RPM calculations as RPM = 15/t-high.
+ * The logic in a programmable device measures the time t-high by sampling the
+ * tachometer every t-sample (with the default value 11.32 uS) and increment
+ * a counter (N) as long as the pulse has not change:
+ * RPM = 15 / (t-sample * (K + Regval)), where:
+ * Regval: is the value read from the programmable device register;
+ * - 0xff - represents tachometer fault;
+ * - 0xfe - represents tachometer minimum value , which is 4444 RPM;
+ * - 0x00 - represents tachometer maximum value , which is 300000 RPM;
+ * K: is 44 and it represents the minimum allowed samples per pulse;
+ * N: is equal K + Regval;
+ * In order to calculate RPM from the register value the following formula is
+ * used: RPM = 15 / ((Regval + K) * 11.32) * 10^(-6)), which in the
+ * default case is modified to:
+ * RPM = 15000000 * 100 / ((Regval + 44) * 1132);
+ * - for Regval 0x00, RPM will be 15000000 * 100 / (44 * 1132) = 30115;
+ * - for Regval 0xfe, RPM will be 15000000 * 100 / ((254 + 44) * 1132) = 4446;
+ * In common case the formula is modified to:
+ * RPM = 15000000 * 100 / ((Regval + samples) * divider).
+ */
+#define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \
+ ((rval) + (s)) * (d)))
+#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask)))
+#define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \
+ MLXREG_FAN_MAX_STATE, \
+ MLXREG_FAN_MAX_DUTY))
+#define MLXREG_FAN_PWM_STATE2DUTY(stat) (DIV_ROUND_CLOSEST((stat) * \
+ MLXREG_FAN_MAX_DUTY, \
+ MLXREG_FAN_MAX_STATE))
+
+/*
+ * struct mlxreg_fan_tacho - tachometer data (internal use):
+ *
+ * @connected: indicates if tachometer is connected;
+ * @reg: register offset;
+ * @mask: fault mask;
+ */
+struct mlxreg_fan_tacho {
+ bool connected;
+ u32 reg;
+ u32 mask;
+};
+
+/*
+ * struct mlxreg_fan_pwm - PWM data (internal use):
+ *
+ * @connected: indicates if PWM is connected;
+ * @reg: register offset;
+ */
+struct mlxreg_fan_pwm {
+ bool connected;
+ u32 reg;
+};
+
+/*
+ * struct mlxreg_fan - private data (internal use):
+ *
+ * @dev: basic device;
+ * @regmap: register map of parent device;
+ * @tacho: tachometer data;
+ * @pwm: PWM data;
+ * @samples: minimum allowed samples per pulse;
+ * @divider: divider value for tachometer RPM calculation;
+ * @cooling: cooling device levels;
+ * @cdev: cooling device;
+ */
+struct mlxreg_fan {
+ struct device *dev;
+ void *regmap;
+ struct mlxreg_core_platform_data *pdata;
+ struct mlxreg_fan_tacho tacho[MLXREG_FAN_MAX_TACHO];
+ struct mlxreg_fan_pwm pwm;
+ int samples;
+ int divider;
+ u8 cooling_levels[MLXREG_FAN_MAX_STATE + 1];
+ struct thermal_cooling_device *cdev;
+};
+
+static int
+mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct mlxreg_fan *fan = dev_get_drvdata(dev);
+ struct mlxreg_fan_tacho *tacho;
+ u32 regval;
+ int err;
+
+ switch (type) {
+ case hwmon_fan:
+ tacho = &fan->tacho[channel];
+ switch (attr) {
+ case hwmon_fan_input:
+ err = regmap_read(fan->regmap, tacho->reg, &regval);
+ if (err)
+ return err;
+
+ *val = MLXREG_FAN_GET_RPM(regval, fan->divider,
+ fan->samples);
+ break;
+
+ case hwmon_fan_fault:
+ err = regmap_read(fan->regmap, tacho->reg, &regval);
+ if (err)
+ return err;
+
+ *val = MLXREG_FAN_GET_FAULT(regval, tacho->mask);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ if (err)
+ return err;
+
+ *val = regval;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct mlxreg_fan *fan = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < MLXREG_FAN_MIN_DUTY ||
+ val > MLXREG_FAN_MAX_DUTY)
+ return -EINVAL;
+ return regmap_write(fan->regmap, fan->pwm.reg, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t
+mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ if (!(((struct mlxreg_fan *)data)->tacho[channel].connected))
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_fault:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+
+ case hwmon_pwm:
+ if (!(((struct mlxreg_fan *)data)->pwm.connected))
+ return 0;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const u32 mlxreg_fan_hwmon_fan_config[] = {
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ 0
+};
+
+static const struct hwmon_channel_info mlxreg_fan_hwmon_fan = {
+ .type = hwmon_fan,
+ .config = mlxreg_fan_hwmon_fan_config,
+};
+
+static const u32 mlxreg_fan_hwmon_pwm_config[] = {
+ HWMON_PWM_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info mlxreg_fan_hwmon_pwm = {
+ .type = hwmon_pwm,
+ .config = mlxreg_fan_hwmon_pwm_config,
+};
+
+static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = {
+ &mlxreg_fan_hwmon_fan,
+ &mlxreg_fan_hwmon_pwm,
+ NULL
+};
+
+static const struct hwmon_ops mlxreg_fan_hwmon_hwmon_ops = {
+ .is_visible = mlxreg_fan_is_visible,
+ .read = mlxreg_fan_read,
+ .write = mlxreg_fan_write,
+};
+
+static const struct hwmon_chip_info mlxreg_fan_hwmon_chip_info = {
+ .ops = &mlxreg_fan_hwmon_hwmon_ops,
+ .info = mlxreg_fan_hwmon_info,
+};
+
+static int mlxreg_fan_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = MLXREG_FAN_MAX_STATE;
+ return 0;
+}
+
+static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+
+{
+ struct mlxreg_fan *fan = cdev->devdata;
+ u32 regval;
+ int err;
+
+ err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query PWM duty\n");
+ return err;
+ }
+
+ *state = MLXREG_FAN_PWM_DUTY2STATE(regval);
+
+ return 0;
+}
+
+static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+
+{
+ struct mlxreg_fan *fan = cdev->devdata;
+ unsigned long cur_state;
+ u32 regval;
+ int i;
+ int err;
+
+ /*
+ * Verify if this request is for changing allowed FAN dynamical
+ * minimum. If it is - update cooling levels accordingly and update
+ * state, if current state is below the newly requested minimum state.
+ * For example, if current state is 5, and minimal state is to be
+ * changed from 4 to 6, fan->cooling_levels[0 to 5] will be changed all
+ * from 4 to 6. And state 5 (fan->cooling_levels[4]) should be
+ * overwritten.
+ */
+ if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
+ state -= MLXREG_FAN_MAX_STATE;
+ for (i = 0; i < state; i++)
+ fan->cooling_levels[i] = state;
+ for (i = state; i <= MLXREG_FAN_MAX_STATE; i++)
+ fan->cooling_levels[i] = i;
+
+ err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query PWM duty\n");
+ return err;
+ }
+
+ cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
+ if (state < cur_state)
+ return 0;
+
+ state = cur_state;
+ }
+
+ if (state > MLXREG_FAN_MAX_STATE)
+ return -EINVAL;
+
+ /* Normalize the state to the valid speed range. */
+ state = fan->cooling_levels[state];
+ err = regmap_write(fan->regmap, fan->pwm.reg,
+ MLXREG_FAN_PWM_STATE2DUTY(state));
+ if (err) {
+ dev_err(fan->dev, "Failed to write PWM duty\n");
+ return err;
+ }
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
+ .get_max_state = mlxreg_fan_get_max_state,
+ .get_cur_state = mlxreg_fan_get_cur_state,
+ .set_cur_state = mlxreg_fan_set_cur_state,
+};
+
+static int mlxreg_fan_config(struct mlxreg_fan *fan,
+ struct mlxreg_core_platform_data *pdata)
+{
+ struct mlxreg_core_data *data = pdata->data;
+ bool configured = false;
+ int tacho_num = 0, i;
+
+ fan->samples = MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF;
+ fan->divider = MLXREG_FAN_TACHO_DIVIDER_DEF;
+ for (i = 0; i < pdata->counter; i++, data++) {
+ if (strnstr(data->label, "tacho", sizeof(data->label))) {
+ if (tacho_num == MLXREG_FAN_MAX_TACHO) {
+ dev_err(fan->dev, "too many tacho entries: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ fan->tacho[tacho_num].reg = data->reg;
+ fan->tacho[tacho_num].mask = data->mask;
+ fan->tacho[tacho_num++].connected = true;
+ } else if (strnstr(data->label, "pwm", sizeof(data->label))) {
+ if (fan->pwm.connected) {
+ dev_err(fan->dev, "duplicate pwm entry: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ fan->pwm.reg = data->reg;
+ fan->pwm.connected = true;
+ } else if (strnstr(data->label, "conf", sizeof(data->label))) {
+ if (configured) {
+ dev_err(fan->dev, "duplicate conf entry: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ /* Validate that conf parameters are not zeros. */
+ if (!data->mask || !data->bit) {
+ dev_err(fan->dev, "invalid conf entry params: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ fan->samples = data->mask;
+ fan->divider = data->bit;
+ configured = true;
+ } else {
+ dev_err(fan->dev, "invalid label: %s\n", data->label);
+ return -EINVAL;
+ }
+ }
+
+ /* Init cooling levels per PWM state. */
+ for (i = 0; i < MLXREG_FAN_SPEED_MIN_LEVEL; i++)
+ fan->cooling_levels[i] = MLXREG_FAN_SPEED_MIN_LEVEL;
+ for (i = MLXREG_FAN_SPEED_MIN_LEVEL; i <= MLXREG_FAN_MAX_STATE; i++)
+ fan->cooling_levels[i] = i;
+
+ return 0;
+}
+
+static int mlxreg_fan_probe(struct platform_device *pdev)
+{
+ struct mlxreg_core_platform_data *pdata;
+ struct mlxreg_fan *fan;
+ struct device *hwm;
+ int err;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Failed to get platform data.\n");
+ return -EINVAL;
+ }
+
+ fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
+ if (!fan)
+ return -ENOMEM;
+
+ fan->dev = &pdev->dev;
+ fan->regmap = pdata->regmap;
+ platform_set_drvdata(pdev, fan);
+
+ err = mlxreg_fan_config(fan, pdata);
+ if (err)
+ return err;
+
+ hwm = devm_hwmon_device_register_with_info(&pdev->dev, "mlxreg_fan",
+ fan,
+ &mlxreg_fan_hwmon_chip_info,
+ NULL);
+ if (IS_ERR(hwm)) {
+ dev_err(&pdev->dev, "Failed to register hwmon device\n");
+ return PTR_ERR(hwm);
+ }
+
+ if (IS_REACHABLE(CONFIG_THERMAL)) {
+ fan->cdev = thermal_cooling_device_register("mlxreg_fan", fan,
+ &mlxreg_fan_cooling_ops);
+ if (IS_ERR(fan->cdev)) {
+ dev_err(&pdev->dev, "Failed to register cooling device\n");
+ return PTR_ERR(fan->cdev);
+ }
+ }
+
+ return 0;
+}
+
+static int mlxreg_fan_remove(struct platform_device *pdev)
+{
+ struct mlxreg_fan *fan = platform_get_drvdata(pdev);
+
+ if (IS_REACHABLE(CONFIG_THERMAL))
+ thermal_cooling_device_unregister(fan->cdev);
+
+ return 0;
+}
+
+static struct platform_driver mlxreg_fan_driver = {
+ .driver = {
+ .name = "mlxreg-fan",
+ },
+ .probe = mlxreg_fan_probe,
+ .remove = mlxreg_fan_remove,
+};
+
+module_platform_driver(mlxreg_fan_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox FAN driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mlxreg-fan");
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index f9d1349c3286..c6bd61e4695a 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -1050,8 +1050,8 @@ struct nct6775_data {
u64 beeps;
u8 pwm_num; /* number of pwm */
- u8 pwm_mode[NUM_FAN]; /* 1->DC variable voltage,
- * 0->PWM variable duty cycle
+ u8 pwm_mode[NUM_FAN]; /* 0->DC variable voltage,
+ * 1->PWM variable duty cycle
*/
enum pwm_enable pwm_enable[NUM_FAN];
/* 0->off
@@ -2541,7 +2541,7 @@ static void pwm_update_registers(struct nct6775_data *data, int nr)
case thermal_cruise:
nct6775_write_value(data, data->REG_TARGET[nr],
data->target_temp[nr]);
- /* intentional */
+ /* fall through */
default:
reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
reg = (reg & ~data->tolerance_mask) |
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 95a68ab175c7..7815ddf149f6 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -77,7 +77,7 @@ struct nct7904_data {
};
/* Access functions */
-static int nct7904_bank_lock(struct nct7904_data *data, unsigned bank)
+static int nct7904_bank_lock(struct nct7904_data *data, unsigned int bank)
{
int ret;
@@ -99,7 +99,7 @@ static inline void nct7904_bank_release(struct nct7904_data *data)
/* Read 1-byte register. Returns unsigned reg or -ERRNO on error. */
static int nct7904_read_reg(struct nct7904_data *data,
- unsigned bank, unsigned reg)
+ unsigned int bank, unsigned int reg)
{
struct i2c_client *client = data->client;
int ret;
@@ -117,7 +117,7 @@ static int nct7904_read_reg(struct nct7904_data *data,
* -ERRNO on error.
*/
static int nct7904_read_reg16(struct nct7904_data *data,
- unsigned bank, unsigned reg)
+ unsigned int bank, unsigned int reg)
{
struct i2c_client *client = data->client;
int ret, hi;
@@ -139,7 +139,7 @@ static int nct7904_read_reg16(struct nct7904_data *data,
/* Write 1-byte register. Returns 0 or -ERRNO on error. */
static int nct7904_write_reg(struct nct7904_data *data,
- unsigned bank, unsigned reg, u8 val)
+ unsigned int bank, unsigned int reg, u8 val)
{
struct i2c_client *client = data->client;
int ret;
@@ -159,7 +159,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
unsigned int cnt, rpm;
int ret;
- switch(attr) {
+ switch (attr) {
case hwmon_fan_input:
ret = nct7904_read_reg16(data, BANK_0,
FANIN1_HV_REG + channel * 2);
@@ -200,7 +200,7 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel,
index = nct7904_chan_to_index[channel];
- switch(attr) {
+ switch (attr) {
case hwmon_in_input:
ret = nct7904_read_reg16(data, BANK_0,
VSEN1_HV_REG + index * 2);
@@ -236,7 +236,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret, temp;
- switch(attr) {
+ switch (attr) {
case hwmon_temp_input:
if (channel == 0)
ret = nct7904_read_reg16(data, BANK_0, LTD_HV_REG);
@@ -276,7 +276,7 @@ static int nct7904_read_pwm(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret;
- switch(attr) {
+ switch (attr) {
case hwmon_pwm_input:
ret = nct7904_read_reg(data, BANK_3, FANCTL1_OUT_REG + channel);
if (ret < 0)
@@ -301,7 +301,7 @@ static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret;
- switch(attr) {
+ switch (attr) {
case hwmon_pwm_input:
if (val < 0 || val > 255)
return -EINVAL;
@@ -322,7 +322,7 @@ static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
static umode_t nct7904_pwm_is_visible(const void *_data, u32 attr, int channel)
{
- switch(attr) {
+ switch (attr) {
case hwmon_pwm_input:
case hwmon_pwm_enable:
return S_IRUGO | S_IWUSR;
@@ -431,15 +431,15 @@ static const struct hwmon_channel_info nct7904_in = {
};
static const u32 nct7904_fan_config[] = {
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- 0
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ 0
};
static const struct hwmon_channel_info nct7904_fan = {
@@ -448,11 +448,11 @@ static const struct hwmon_channel_info nct7904_fan = {
};
static const u32 nct7904_pwm_config[] = {
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- 0
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ 0
};
static const struct hwmon_channel_info nct7904_pwm = {
@@ -461,16 +461,16 @@ static const struct hwmon_channel_info nct7904_pwm = {
};
static const u32 nct7904_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- 0
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ 0
};
static const struct hwmon_channel_info nct7904_temp = {
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
new file mode 100644
index 000000000000..8474d601aa63
--- /dev/null
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -0,0 +1,1057 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2014-2018 Nuvoton Technology corporation.
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+
+/* NPCM7XX PWM registers */
+#define NPCM7XX_PWM_REG_BASE(base, n) ((base) + ((n) * 0x1000L))
+
+#define NPCM7XX_PWM_REG_PR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x00)
+#define NPCM7XX_PWM_REG_CSR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x04)
+#define NPCM7XX_PWM_REG_CR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x08)
+#define NPCM7XX_PWM_REG_CNRx(base, n, ch) \
+ (NPCM7XX_PWM_REG_BASE(base, n) + 0x0C + (12 * (ch)))
+#define NPCM7XX_PWM_REG_CMRx(base, n, ch) \
+ (NPCM7XX_PWM_REG_BASE(base, n) + 0x10 + (12 * (ch)))
+#define NPCM7XX_PWM_REG_PDRx(base, n, ch) \
+ (NPCM7XX_PWM_REG_BASE(base, n) + 0x14 + (12 * (ch)))
+#define NPCM7XX_PWM_REG_PIER(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x3C)
+#define NPCM7XX_PWM_REG_PIIR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x40)
+
+#define NPCM7XX_PWM_CTRL_CH0_MODE_BIT BIT(3)
+#define NPCM7XX_PWM_CTRL_CH1_MODE_BIT BIT(11)
+#define NPCM7XX_PWM_CTRL_CH2_MODE_BIT BIT(15)
+#define NPCM7XX_PWM_CTRL_CH3_MODE_BIT BIT(19)
+
+#define NPCM7XX_PWM_CTRL_CH0_INV_BIT BIT(2)
+#define NPCM7XX_PWM_CTRL_CH1_INV_BIT BIT(10)
+#define NPCM7XX_PWM_CTRL_CH2_INV_BIT BIT(14)
+#define NPCM7XX_PWM_CTRL_CH3_INV_BIT BIT(18)
+
+#define NPCM7XX_PWM_CTRL_CH0_EN_BIT BIT(0)
+#define NPCM7XX_PWM_CTRL_CH1_EN_BIT BIT(8)
+#define NPCM7XX_PWM_CTRL_CH2_EN_BIT BIT(12)
+#define NPCM7XX_PWM_CTRL_CH3_EN_BIT BIT(16)
+
+/* Define the maximum PWM channel number */
+#define NPCM7XX_PWM_MAX_CHN_NUM 8
+#define NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE 4
+#define NPCM7XX_PWM_MAX_MODULES 2
+
+/* Define the Counter Register, value = 100 for match 100% */
+#define NPCM7XX_PWM_COUNTER_DEFAULT_NUM 255
+#define NPCM7XX_PWM_CMR_DEFAULT_NUM 127
+#define NPCM7XX_PWM_CMR_MAX 255
+
+/* default all PWM channels PRESCALE2 = 1 */
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH0 0x4
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH1 0x40
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH2 0x400
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH3 0x4000
+
+#define PWM_OUTPUT_FREQ_25KHZ 25000
+#define PWN_CNT_DEFAULT 256
+#define MIN_PRESCALE1 2
+#define NPCM7XX_PWM_PRESCALE_SHIFT_CH01 8
+
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT (NPCM7XX_PWM_PRESCALE2_DEFAULT_CH0 | \
+ NPCM7XX_PWM_PRESCALE2_DEFAULT_CH1 | \
+ NPCM7XX_PWM_PRESCALE2_DEFAULT_CH2 | \
+ NPCM7XX_PWM_PRESCALE2_DEFAULT_CH3)
+
+#define NPCM7XX_PWM_CTRL_MODE_DEFAULT (NPCM7XX_PWM_CTRL_CH0_MODE_BIT | \
+ NPCM7XX_PWM_CTRL_CH1_MODE_BIT | \
+ NPCM7XX_PWM_CTRL_CH2_MODE_BIT | \
+ NPCM7XX_PWM_CTRL_CH3_MODE_BIT)
+
+/* NPCM7XX FAN Tacho registers */
+#define NPCM7XX_FAN_REG_BASE(base, n) ((base) + ((n) * 0x1000L))
+
+#define NPCM7XX_FAN_REG_TCNT1(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x00)
+#define NPCM7XX_FAN_REG_TCRA(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x02)
+#define NPCM7XX_FAN_REG_TCRB(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x04)
+#define NPCM7XX_FAN_REG_TCNT2(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x06)
+#define NPCM7XX_FAN_REG_TPRSC(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x08)
+#define NPCM7XX_FAN_REG_TCKC(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0A)
+#define NPCM7XX_FAN_REG_TMCTRL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0C)
+#define NPCM7XX_FAN_REG_TICTRL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0E)
+#define NPCM7XX_FAN_REG_TICLR(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x10)
+#define NPCM7XX_FAN_REG_TIEN(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x12)
+#define NPCM7XX_FAN_REG_TCPA(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x14)
+#define NPCM7XX_FAN_REG_TCPB(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x16)
+#define NPCM7XX_FAN_REG_TCPCFG(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x18)
+#define NPCM7XX_FAN_REG_TINASEL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x1A)
+#define NPCM7XX_FAN_REG_TINBSEL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x1C)
+
+#define NPCM7XX_FAN_TCKC_CLKX_NONE 0
+#define NPCM7XX_FAN_TCKC_CLK1_APB BIT(0)
+#define NPCM7XX_FAN_TCKC_CLK2_APB BIT(3)
+
+#define NPCM7XX_FAN_TMCTRL_TBEN BIT(6)
+#define NPCM7XX_FAN_TMCTRL_TAEN BIT(5)
+#define NPCM7XX_FAN_TMCTRL_TBEDG BIT(4)
+#define NPCM7XX_FAN_TMCTRL_TAEDG BIT(3)
+#define NPCM7XX_FAN_TMCTRL_MODE_5 BIT(2)
+
+#define NPCM7XX_FAN_TICLR_CLEAR_ALL GENMASK(5, 0)
+#define NPCM7XX_FAN_TICLR_TFCLR BIT(5)
+#define NPCM7XX_FAN_TICLR_TECLR BIT(4)
+#define NPCM7XX_FAN_TICLR_TDCLR BIT(3)
+#define NPCM7XX_FAN_TICLR_TCCLR BIT(2)
+#define NPCM7XX_FAN_TICLR_TBCLR BIT(1)
+#define NPCM7XX_FAN_TICLR_TACLR BIT(0)
+
+#define NPCM7XX_FAN_TIEN_ENABLE_ALL GENMASK(5, 0)
+#define NPCM7XX_FAN_TIEN_TFIEN BIT(5)
+#define NPCM7XX_FAN_TIEN_TEIEN BIT(4)
+#define NPCM7XX_FAN_TIEN_TDIEN BIT(3)
+#define NPCM7XX_FAN_TIEN_TCIEN BIT(2)
+#define NPCM7XX_FAN_TIEN_TBIEN BIT(1)
+#define NPCM7XX_FAN_TIEN_TAIEN BIT(0)
+
+#define NPCM7XX_FAN_TICTRL_TFPND BIT(5)
+#define NPCM7XX_FAN_TICTRL_TEPND BIT(4)
+#define NPCM7XX_FAN_TICTRL_TDPND BIT(3)
+#define NPCM7XX_FAN_TICTRL_TCPND BIT(2)
+#define NPCM7XX_FAN_TICTRL_TBPND BIT(1)
+#define NPCM7XX_FAN_TICTRL_TAPND BIT(0)
+
+#define NPCM7XX_FAN_TCPCFG_HIBEN BIT(7)
+#define NPCM7XX_FAN_TCPCFG_EQBEN BIT(6)
+#define NPCM7XX_FAN_TCPCFG_LOBEN BIT(5)
+#define NPCM7XX_FAN_TCPCFG_CPBSEL BIT(4)
+#define NPCM7XX_FAN_TCPCFG_HIAEN BIT(3)
+#define NPCM7XX_FAN_TCPCFG_EQAEN BIT(2)
+#define NPCM7XX_FAN_TCPCFG_LOAEN BIT(1)
+#define NPCM7XX_FAN_TCPCFG_CPASEL BIT(0)
+
+/* FAN General Definition */
+/* Define the maximum FAN channel number */
+#define NPCM7XX_FAN_MAX_MODULE 8
+#define NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE 2
+#define NPCM7XX_FAN_MAX_CHN_NUM 16
+
+/*
+ * Get Fan Tach Timeout (base on clock 214843.75Hz, 1 cnt = 4.654us)
+ * Timeout 94ms ~= 0x5000
+ * (The minimum FAN speed could to support ~640RPM/pulse 1,
+ * 320RPM/pulse 2, ...-- 10.6Hz)
+ */
+#define NPCM7XX_FAN_TIMEOUT 0x5000
+#define NPCM7XX_FAN_TCNT 0xFFFF
+#define NPCM7XX_FAN_TCPA (NPCM7XX_FAN_TCNT - NPCM7XX_FAN_TIMEOUT)
+#define NPCM7XX_FAN_TCPB (NPCM7XX_FAN_TCNT - NPCM7XX_FAN_TIMEOUT)
+
+#define NPCM7XX_FAN_POLL_TIMER_200MS 200
+#define NPCM7XX_FAN_DEFAULT_PULSE_PER_REVOLUTION 2
+#define NPCM7XX_FAN_TINASEL_FANIN_DEFAULT 0
+#define NPCM7XX_FAN_CLK_PRESCALE 255
+
+#define NPCM7XX_FAN_CMPA 0
+#define NPCM7XX_FAN_CMPB 1
+
+/* Obtain the fan number */
+#define NPCM7XX_FAN_INPUT(fan, cmp) (((fan) << 1) + (cmp))
+
+/* fan sample status */
+#define FAN_DISABLE 0xFF
+#define FAN_INIT 0x00
+#define FAN_PREPARE_TO_GET_FIRST_CAPTURE 0x01
+#define FAN_ENOUGH_SAMPLE 0x02
+
+struct npcm7xx_fan_dev {
+ u8 fan_st_flg;
+ u8 fan_pls_per_rev;
+ u16 fan_cnt;
+ u32 fan_cnt_tmp;
+};
+
+struct npcm7xx_cooling_device {
+ char name[THERMAL_NAME_LENGTH];
+ struct npcm7xx_pwm_fan_data *data;
+ struct thermal_cooling_device *tcdev;
+ int pwm_port;
+ u8 *cooling_levels;
+ u8 max_state;
+ u8 cur_state;
+};
+
+struct npcm7xx_pwm_fan_data {
+ void __iomem *pwm_base;
+ void __iomem *fan_base;
+ unsigned long pwm_clk_freq;
+ unsigned long fan_clk_freq;
+ struct clk *pwm_clk;
+ struct clk *fan_clk;
+ struct mutex pwm_lock[NPCM7XX_PWM_MAX_MODULES];
+ spinlock_t fan_lock[NPCM7XX_FAN_MAX_MODULE];
+ int fan_irq[NPCM7XX_FAN_MAX_MODULE];
+ bool pwm_present[NPCM7XX_PWM_MAX_CHN_NUM];
+ bool fan_present[NPCM7XX_FAN_MAX_CHN_NUM];
+ u32 input_clk_freq;
+ struct timer_list fan_timer;
+ struct npcm7xx_fan_dev fan_dev[NPCM7XX_FAN_MAX_CHN_NUM];
+ struct npcm7xx_cooling_device *cdev[NPCM7XX_PWM_MAX_CHN_NUM];
+ u8 fan_select;
+};
+
+static int npcm7xx_pwm_config_set(struct npcm7xx_pwm_fan_data *data,
+ int channel, u16 val)
+{
+ u32 pwm_ch = (channel % NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+ u32 module = (channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+ u32 tmp_buf, ctrl_en_bit, env_bit;
+
+ /*
+ * Config PWM Comparator register for setting duty cycle
+ */
+ mutex_lock(&data->pwm_lock[module]);
+
+ /* write new CMR value */
+ iowrite32(val, NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pwm_ch));
+ tmp_buf = ioread32(NPCM7XX_PWM_REG_CR(data->pwm_base, module));
+
+ switch (pwm_ch) {
+ case 0:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH0_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH0_INV_BIT;
+ break;
+ case 1:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH1_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH1_INV_BIT;
+ break;
+ case 2:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH2_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH2_INV_BIT;
+ break;
+ case 3:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH3_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH3_INV_BIT;
+ break;
+ default:
+ mutex_unlock(&data->pwm_lock[module]);
+ return -ENODEV;
+ }
+
+ if (val == 0) {
+ /* Disable PWM */
+ tmp_buf &= ~ctrl_en_bit;
+ tmp_buf |= env_bit;
+ } else {
+ /* Enable PWM */
+ tmp_buf |= ctrl_en_bit;
+ tmp_buf &= ~env_bit;
+ }
+
+ iowrite32(tmp_buf, NPCM7XX_PWM_REG_CR(data->pwm_base, module));
+ mutex_unlock(&data->pwm_lock[module]);
+
+ return 0;
+}
+
+static inline void npcm7xx_fan_start_capture(struct npcm7xx_pwm_fan_data *data,
+ u8 fan, u8 cmp)
+{
+ u8 fan_id;
+ u8 reg_mode;
+ u8 reg_int;
+ unsigned long flags;
+
+ fan_id = NPCM7XX_FAN_INPUT(fan, cmp);
+
+ /* to check whether any fan tach is enable */
+ if (data->fan_dev[fan_id].fan_st_flg != FAN_DISABLE) {
+ /* reset status */
+ spin_lock_irqsave(&data->fan_lock[fan], flags);
+
+ data->fan_dev[fan_id].fan_st_flg = FAN_INIT;
+ reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /*
+ * the interrupt enable bits do not need to be cleared before
+ * it sets, the interrupt enable bits are cleared only on reset.
+ * the clock unit control register is behaving in the same
+ * manner that the interrupt enable register behave.
+ */
+ if (cmp == NPCM7XX_FAN_CMPA) {
+ /* enable interrupt */
+ iowrite8(reg_int | (NPCM7XX_FAN_TIEN_TAIEN |
+ NPCM7XX_FAN_TIEN_TEIEN),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ reg_mode = NPCM7XX_FAN_TCKC_CLK1_APB
+ | ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base,
+ fan));
+
+ /* start to Capture */
+ iowrite8(reg_mode, NPCM7XX_FAN_REG_TCKC(data->fan_base,
+ fan));
+ } else {
+ /* enable interrupt */
+ iowrite8(reg_int | (NPCM7XX_FAN_TIEN_TBIEN |
+ NPCM7XX_FAN_TIEN_TFIEN),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ reg_mode =
+ NPCM7XX_FAN_TCKC_CLK2_APB
+ | ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base,
+ fan));
+
+ /* start to Capture */
+ iowrite8(reg_mode,
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+ }
+
+ spin_unlock_irqrestore(&data->fan_lock[fan], flags);
+ }
+}
+
+/*
+ * Enable a background timer to poll fan tach value, (200ms * 4)
+ * to polling all fan
+ */
+static void npcm7xx_fan_polling(struct timer_list *t)
+{
+ struct npcm7xx_pwm_fan_data *data;
+ int i;
+
+ data = from_timer(data, t, fan_timer);
+
+ /*
+ * Polling two module per one round,
+ * FAN01 & FAN89 / FAN23 & FAN1011 / FAN45 & FAN1213 / FAN67 & FAN1415
+ */
+ for (i = data->fan_select; i < NPCM7XX_FAN_MAX_MODULE;
+ i = i + 4) {
+ /* clear the flag and reset the counter (TCNT) */
+ iowrite8(NPCM7XX_FAN_TICLR_CLEAR_ALL,
+ NPCM7XX_FAN_REG_TICLR(data->fan_base, i));
+
+ if (data->fan_present[i * 2]) {
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT1(data->fan_base, i));
+ npcm7xx_fan_start_capture(data, i, NPCM7XX_FAN_CMPA);
+ }
+ if (data->fan_present[(i * 2) + 1]) {
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT2(data->fan_base, i));
+ npcm7xx_fan_start_capture(data, i, NPCM7XX_FAN_CMPB);
+ }
+ }
+
+ data->fan_select++;
+ data->fan_select &= 0x3;
+
+ /* reset the timer interval */
+ data->fan_timer.expires = jiffies +
+ msecs_to_jiffies(NPCM7XX_FAN_POLL_TIMER_200MS);
+ add_timer(&data->fan_timer);
+}
+
+static inline void npcm7xx_fan_compute(struct npcm7xx_pwm_fan_data *data,
+ u8 fan, u8 cmp, u8 fan_id, u8 flag_int,
+ u8 flag_mode, u8 flag_clear)
+{
+ u8 reg_int;
+ u8 reg_mode;
+ u16 fan_cap;
+
+ if (cmp == NPCM7XX_FAN_CMPA)
+ fan_cap = ioread16(NPCM7XX_FAN_REG_TCRA(data->fan_base, fan));
+ else
+ fan_cap = ioread16(NPCM7XX_FAN_REG_TCRB(data->fan_base, fan));
+
+ /* clear capature flag, H/W will auto reset the NPCM7XX_FAN_TCNTx */
+ iowrite8(flag_clear, NPCM7XX_FAN_REG_TICLR(data->fan_base, fan));
+
+ if (data->fan_dev[fan_id].fan_st_flg == FAN_INIT) {
+ /* First capture, drop it */
+ data->fan_dev[fan_id].fan_st_flg =
+ FAN_PREPARE_TO_GET_FIRST_CAPTURE;
+
+ /* reset counter */
+ data->fan_dev[fan_id].fan_cnt_tmp = 0;
+ } else if (data->fan_dev[fan_id].fan_st_flg < FAN_ENOUGH_SAMPLE) {
+ /*
+ * collect the enough sample,
+ * (ex: 2 pulse fan need to get 2 sample)
+ */
+ data->fan_dev[fan_id].fan_cnt_tmp +=
+ (NPCM7XX_FAN_TCNT - fan_cap);
+
+ data->fan_dev[fan_id].fan_st_flg++;
+ } else {
+ /* get enough sample or fan disable */
+ if (data->fan_dev[fan_id].fan_st_flg == FAN_ENOUGH_SAMPLE) {
+ data->fan_dev[fan_id].fan_cnt_tmp +=
+ (NPCM7XX_FAN_TCNT - fan_cap);
+
+ /* compute finial average cnt per pulse */
+ data->fan_dev[fan_id].fan_cnt =
+ data->fan_dev[fan_id].fan_cnt_tmp /
+ FAN_ENOUGH_SAMPLE;
+
+ data->fan_dev[fan_id].fan_st_flg = FAN_INIT;
+ }
+
+ reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /* disable interrupt */
+ iowrite8((reg_int & ~flag_int),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+ reg_mode = ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+ /* stop capturing */
+ iowrite8((reg_mode & ~flag_mode),
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+ }
+}
+
+static inline void npcm7xx_check_cmp(struct npcm7xx_pwm_fan_data *data,
+ u8 fan, u8 cmp, u8 flag)
+{
+ u8 reg_int;
+ u8 reg_mode;
+ u8 flag_timeout;
+ u8 flag_cap;
+ u8 flag_clear;
+ u8 flag_int;
+ u8 flag_mode;
+ u8 fan_id;
+
+ fan_id = NPCM7XX_FAN_INPUT(fan, cmp);
+
+ if (cmp == NPCM7XX_FAN_CMPA) {
+ flag_cap = NPCM7XX_FAN_TICTRL_TAPND;
+ flag_timeout = NPCM7XX_FAN_TICTRL_TEPND;
+ flag_int = NPCM7XX_FAN_TIEN_TAIEN | NPCM7XX_FAN_TIEN_TEIEN;
+ flag_mode = NPCM7XX_FAN_TCKC_CLK1_APB;
+ flag_clear = NPCM7XX_FAN_TICLR_TACLR | NPCM7XX_FAN_TICLR_TECLR;
+ } else {
+ flag_cap = NPCM7XX_FAN_TICTRL_TBPND;
+ flag_timeout = NPCM7XX_FAN_TICTRL_TFPND;
+ flag_int = NPCM7XX_FAN_TIEN_TBIEN | NPCM7XX_FAN_TIEN_TFIEN;
+ flag_mode = NPCM7XX_FAN_TCKC_CLK2_APB;
+ flag_clear = NPCM7XX_FAN_TICLR_TBCLR | NPCM7XX_FAN_TICLR_TFCLR;
+ }
+
+ if (flag & flag_timeout) {
+ reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /* disable interrupt */
+ iowrite8((reg_int & ~flag_int),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /* clear interrupt flag */
+ iowrite8(flag_clear,
+ NPCM7XX_FAN_REG_TICLR(data->fan_base, fan));
+
+ reg_mode = ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+ /* stop capturing */
+ iowrite8((reg_mode & ~flag_mode),
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+ /*
+ * If timeout occurs (NPCM7XX_FAN_TIMEOUT), the fan doesn't
+ * connect or speed is lower than 10.6Hz (320RPM/pulse2).
+ * In these situation, the RPM output should be zero.
+ */
+ data->fan_dev[fan_id].fan_cnt = 0;
+ } else {
+ /* input capture is occurred */
+ if (flag & flag_cap)
+ npcm7xx_fan_compute(data, fan, cmp, fan_id, flag_int,
+ flag_mode, flag_clear);
+ }
+}
+
+static irqreturn_t npcm7xx_fan_isr(int irq, void *dev_id)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_id;
+ unsigned long flags;
+ int module;
+ u8 flag;
+
+ module = irq - data->fan_irq[0];
+ spin_lock_irqsave(&data->fan_lock[module], flags);
+
+ flag = ioread8(NPCM7XX_FAN_REG_TICTRL(data->fan_base, module));
+ if (flag > 0) {
+ npcm7xx_check_cmp(data, module, NPCM7XX_FAN_CMPA, flag);
+ npcm7xx_check_cmp(data, module, NPCM7XX_FAN_CMPB, flag);
+ spin_unlock_irqrestore(&data->fan_lock[module], flags);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&data->fan_lock[module], flags);
+
+ return IRQ_NONE;
+}
+
+static int npcm7xx_read_pwm(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+ u32 pmw_ch = (channel % NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+ u32 module = (channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = ioread32
+ (NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pmw_ch));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int npcm7xx_write_pwm(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > NPCM7XX_PWM_CMR_MAX)
+ return -EINVAL;
+ err = npcm7xx_pwm_config_set(data, channel, (u16)val);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static umode_t npcm7xx_pwm_is_visible(const void *_data, u32 attr, int channel)
+{
+ const struct npcm7xx_pwm_fan_data *data = _data;
+
+ if (!data->pwm_present[channel])
+ return 0;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int npcm7xx_read_fan(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_fan_input:
+ *val = 0;
+ if (data->fan_dev[channel].fan_cnt <= 0)
+ return data->fan_dev[channel].fan_cnt;
+
+ /* Convert the raw reading to RPM */
+ if (data->fan_dev[channel].fan_cnt > 0 &&
+ data->fan_dev[channel].fan_pls_per_rev > 0)
+ *val = ((data->input_clk_freq * 60) /
+ (data->fan_dev[channel].fan_cnt *
+ data->fan_dev[channel].fan_pls_per_rev));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t npcm7xx_fan_is_visible(const void *_data, u32 attr, int channel)
+{
+ const struct npcm7xx_pwm_fan_data *data = _data;
+
+ if (!data->fan_present[channel])
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int npcm7xx_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return npcm7xx_read_pwm(dev, attr, channel, val);
+ case hwmon_fan:
+ return npcm7xx_read_fan(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int npcm7xx_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return npcm7xx_write_pwm(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t npcm7xx_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return npcm7xx_pwm_is_visible(data, attr, channel);
+ case hwmon_fan:
+ return npcm7xx_fan_is_visible(data, attr, channel);
+ default:
+ return 0;
+ }
+}
+
+static const u32 npcm7xx_pwm_config[] = {
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info npcm7xx_pwm = {
+ .type = hwmon_pwm,
+ .config = npcm7xx_pwm_config,
+};
+
+static const u32 npcm7xx_fan_config[] = {
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info npcm7xx_fan = {
+ .type = hwmon_fan,
+ .config = npcm7xx_fan_config,
+};
+
+static const struct hwmon_channel_info *npcm7xx_info[] = {
+ &npcm7xx_pwm,
+ &npcm7xx_fan,
+ NULL
+};
+
+static const struct hwmon_ops npcm7xx_hwmon_ops = {
+ .is_visible = npcm7xx_is_visible,
+ .read = npcm7xx_read,
+ .write = npcm7xx_write,
+};
+
+static const struct hwmon_chip_info npcm7xx_chip_info = {
+ .ops = &npcm7xx_hwmon_ops,
+ .info = npcm7xx_info,
+};
+
+static u32 npcm7xx_pwm_init(struct npcm7xx_pwm_fan_data *data)
+{
+ int m, ch;
+ u32 prescale_val, output_freq;
+
+ data->pwm_clk_freq = clk_get_rate(data->pwm_clk);
+
+ /* Adjust NPCM7xx PWMs output frequency to ~25Khz */
+ output_freq = data->pwm_clk_freq / PWN_CNT_DEFAULT;
+ prescale_val = DIV_ROUND_CLOSEST(output_freq, PWM_OUTPUT_FREQ_25KHZ);
+
+ /* If prescale_val = 0, then the prescale output clock is stopped */
+ if (prescale_val < MIN_PRESCALE1)
+ prescale_val = MIN_PRESCALE1;
+ /*
+ * prescale_val need to decrement in one because in the PWM Prescale
+ * register the Prescale value increment by one
+ */
+ prescale_val--;
+
+ /* Setting PWM Prescale Register value register to both modules */
+ prescale_val |= (prescale_val << NPCM7XX_PWM_PRESCALE_SHIFT_CH01);
+
+ for (m = 0; m < NPCM7XX_PWM_MAX_MODULES ; m++) {
+ iowrite32(prescale_val, NPCM7XX_PWM_REG_PR(data->pwm_base, m));
+ iowrite32(NPCM7XX_PWM_PRESCALE2_DEFAULT,
+ NPCM7XX_PWM_REG_CSR(data->pwm_base, m));
+ iowrite32(NPCM7XX_PWM_CTRL_MODE_DEFAULT,
+ NPCM7XX_PWM_REG_CR(data->pwm_base, m));
+
+ for (ch = 0; ch < NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE; ch++) {
+ iowrite32(NPCM7XX_PWM_COUNTER_DEFAULT_NUM,
+ NPCM7XX_PWM_REG_CNRx(data->pwm_base, m, ch));
+ }
+ }
+
+ return output_freq / ((prescale_val & 0xf) + 1);
+}
+
+static void npcm7xx_fan_init(struct npcm7xx_pwm_fan_data *data)
+{
+ int md;
+ int ch;
+ int i;
+ u32 apb_clk_freq;
+
+ for (md = 0; md < NPCM7XX_FAN_MAX_MODULE; md++) {
+ /* stop FAN0~7 clock */
+ iowrite8(NPCM7XX_FAN_TCKC_CLKX_NONE,
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, md));
+
+ /* disable all interrupt */
+ iowrite8(0x00, NPCM7XX_FAN_REG_TIEN(data->fan_base, md));
+
+ /* clear all interrupt */
+ iowrite8(NPCM7XX_FAN_TICLR_CLEAR_ALL,
+ NPCM7XX_FAN_REG_TICLR(data->fan_base, md));
+
+ /* set FAN0~7 clock prescaler */
+ iowrite8(NPCM7XX_FAN_CLK_PRESCALE,
+ NPCM7XX_FAN_REG_TPRSC(data->fan_base, md));
+
+ /* set FAN0~7 mode (high-to-low transition) */
+ iowrite8((NPCM7XX_FAN_TMCTRL_MODE_5 | NPCM7XX_FAN_TMCTRL_TBEN |
+ NPCM7XX_FAN_TMCTRL_TAEN),
+ NPCM7XX_FAN_REG_TMCTRL(data->fan_base, md));
+
+ /* set FAN0~7 Initial Count/Cap */
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT1(data->fan_base, md));
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT2(data->fan_base, md));
+
+ /* set FAN0~7 compare (equal to count) */
+ iowrite8((NPCM7XX_FAN_TCPCFG_EQAEN | NPCM7XX_FAN_TCPCFG_EQBEN),
+ NPCM7XX_FAN_REG_TCPCFG(data->fan_base, md));
+
+ /* set FAN0~7 compare value */
+ iowrite16(NPCM7XX_FAN_TCPA,
+ NPCM7XX_FAN_REG_TCPA(data->fan_base, md));
+ iowrite16(NPCM7XX_FAN_TCPB,
+ NPCM7XX_FAN_REG_TCPB(data->fan_base, md));
+
+ /* set FAN0~7 fan input FANIN 0~15 */
+ iowrite8(NPCM7XX_FAN_TINASEL_FANIN_DEFAULT,
+ NPCM7XX_FAN_REG_TINASEL(data->fan_base, md));
+ iowrite8(NPCM7XX_FAN_TINASEL_FANIN_DEFAULT,
+ NPCM7XX_FAN_REG_TINBSEL(data->fan_base, md));
+
+ for (i = 0; i < NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE; i++) {
+ ch = md * NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE + i;
+ data->fan_dev[ch].fan_st_flg = FAN_DISABLE;
+ data->fan_dev[ch].fan_pls_per_rev =
+ NPCM7XX_FAN_DEFAULT_PULSE_PER_REVOLUTION;
+ data->fan_dev[ch].fan_cnt = 0;
+ }
+ }
+
+ apb_clk_freq = clk_get_rate(data->fan_clk);
+
+ /* Fan tach input clock = APB clock / prescalar, default is 255. */
+ data->input_clk_freq = apb_clk_freq / (NPCM7XX_FAN_CLK_PRESCALE + 1);
+}
+
+static int
+npcm7xx_pwm_cz_get_max_state(struct thermal_cooling_device *tcdev,
+ unsigned long *state)
+{
+ struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+
+ *state = cdev->max_state;
+
+ return 0;
+}
+
+static int
+npcm7xx_pwm_cz_get_cur_state(struct thermal_cooling_device *tcdev,
+ unsigned long *state)
+{
+ struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+
+ *state = cdev->cur_state;
+
+ return 0;
+}
+
+static int
+npcm7xx_pwm_cz_set_cur_state(struct thermal_cooling_device *tcdev,
+ unsigned long state)
+{
+ struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+ int ret;
+
+ if (state > cdev->max_state)
+ return -EINVAL;
+
+ cdev->cur_state = state;
+ ret = npcm7xx_pwm_config_set(cdev->data, cdev->pwm_port,
+ cdev->cooling_levels[cdev->cur_state]);
+
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops npcm7xx_pwm_cool_ops = {
+ .get_max_state = npcm7xx_pwm_cz_get_max_state,
+ .get_cur_state = npcm7xx_pwm_cz_get_cur_state,
+ .set_cur_state = npcm7xx_pwm_cz_set_cur_state,
+};
+
+static int npcm7xx_create_pwm_cooling(struct device *dev,
+ struct device_node *child,
+ struct npcm7xx_pwm_fan_data *data,
+ u32 pwm_port, u8 num_levels)
+{
+ int ret;
+ struct npcm7xx_cooling_device *cdev;
+
+ cdev = devm_kzalloc(dev, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ cdev->cooling_levels = devm_kzalloc(dev, num_levels, GFP_KERNEL);
+ if (!cdev->cooling_levels)
+ return -ENOMEM;
+
+ cdev->max_state = num_levels - 1;
+ ret = of_property_read_u8_array(child, "cooling-levels",
+ cdev->cooling_levels,
+ num_levels);
+ if (ret) {
+ dev_err(dev, "Property 'cooling-levels' cannot be read.\n");
+ return ret;
+ }
+ snprintf(cdev->name, THERMAL_NAME_LENGTH, "%s%d", child->name,
+ pwm_port);
+
+ cdev->tcdev = thermal_of_cooling_device_register(child,
+ cdev->name,
+ cdev,
+ &npcm7xx_pwm_cool_ops);
+ if (IS_ERR(cdev->tcdev))
+ return PTR_ERR(cdev->tcdev);
+
+ cdev->data = data;
+ cdev->pwm_port = pwm_port;
+
+ data->cdev[pwm_port] = cdev;
+
+ return 0;
+}
+
+static int npcm7xx_en_pwm_fan(struct device *dev,
+ struct device_node *child,
+ struct npcm7xx_pwm_fan_data *data)
+{
+ u8 *fan_ch;
+ u32 pwm_port;
+ int ret, fan_cnt;
+ u8 index, ch;
+
+ ret = of_property_read_u32(child, "reg", &pwm_port);
+ if (ret)
+ return ret;
+
+ data->pwm_present[pwm_port] = true;
+ ret = npcm7xx_pwm_config_set(data, pwm_port,
+ NPCM7XX_PWM_CMR_DEFAULT_NUM);
+
+ ret = of_property_count_u8_elems(child, "cooling-levels");
+ if (ret > 0) {
+ ret = npcm7xx_create_pwm_cooling(dev, child, data, pwm_port,
+ ret);
+ if (ret)
+ return ret;
+ }
+
+ fan_cnt = of_property_count_u8_elems(child, "fan-tach-ch");
+ if (fan_cnt < 1)
+ return -EINVAL;
+
+ fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL);
+ if (!fan_ch)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(child, "fan-tach-ch", fan_ch, fan_cnt);
+ if (ret)
+ return ret;
+
+ for (ch = 0; ch < fan_cnt; ch++) {
+ index = fan_ch[ch];
+ data->fan_present[index] = true;
+ data->fan_dev[index].fan_st_flg = FAN_INIT;
+ }
+
+ return 0;
+}
+
+static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np, *child;
+ struct npcm7xx_pwm_fan_data *data;
+ struct resource *res;
+ struct device *hwmon;
+ char name[20];
+ int ret, cnt;
+ u32 output_freq;
+ u32 i;
+
+ np = dev->of_node;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm");
+ if (!res) {
+ dev_err(dev, "pwm resource not found\n");
+ return -ENODEV;
+ }
+
+ data->pwm_base = devm_ioremap_resource(dev, res);
+ dev_dbg(dev, "pwm base resource is %pR\n", res);
+ if (IS_ERR(data->pwm_base))
+ return PTR_ERR(data->pwm_base);
+
+ data->pwm_clk = devm_clk_get(dev, "pwm");
+ if (IS_ERR(data->pwm_clk)) {
+ dev_err(dev, "couldn't get pwm clock\n");
+ return PTR_ERR(data->pwm_clk);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fan");
+ if (!res) {
+ dev_err(dev, "fan resource not found\n");
+ return -ENODEV;
+ }
+
+ data->fan_base = devm_ioremap_resource(dev, res);
+ dev_dbg(dev, "fan base resource is %pR\n", res);
+ if (IS_ERR(data->fan_base))
+ return PTR_ERR(data->fan_base);
+
+ data->fan_clk = devm_clk_get(dev, "fan");
+ if (IS_ERR(data->fan_clk)) {
+ dev_err(dev, "couldn't get fan clock\n");
+ return PTR_ERR(data->fan_clk);
+ }
+
+ output_freq = npcm7xx_pwm_init(data);
+ npcm7xx_fan_init(data);
+
+ for (cnt = 0; cnt < NPCM7XX_PWM_MAX_MODULES ; cnt++)
+ mutex_init(&data->pwm_lock[cnt]);
+
+ for (i = 0; i < NPCM7XX_FAN_MAX_MODULE; i++) {
+ spin_lock_init(&data->fan_lock[i]);
+
+ data->fan_irq[i] = platform_get_irq(pdev, i);
+ if (data->fan_irq[i] < 0) {
+ dev_err(dev, "get IRQ fan%d failed\n", i);
+ return data->fan_irq[i];
+ }
+
+ sprintf(name, "NPCM7XX-FAN-MD%d", i);
+ ret = devm_request_irq(dev, data->fan_irq[i], npcm7xx_fan_isr,
+ 0, name, (void *)data);
+ if (ret) {
+ dev_err(dev, "register IRQ fan%d failed\n", i);
+ return ret;
+ }
+ }
+
+ for_each_child_of_node(np, child) {
+ ret = npcm7xx_en_pwm_fan(dev, child, data);
+ if (ret) {
+ dev_err(dev, "enable pwm and fan failed\n");
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "npcm7xx_pwm_fan",
+ data, &npcm7xx_chip_info,
+ NULL);
+ if (IS_ERR(hwmon)) {
+ dev_err(dev, "unable to register hwmon device\n");
+ return PTR_ERR(hwmon);
+ }
+
+ for (i = 0; i < NPCM7XX_FAN_MAX_CHN_NUM; i++) {
+ if (data->fan_present[i]) {
+ /* fan timer initialization */
+ data->fan_timer.expires = jiffies +
+ msecs_to_jiffies(NPCM7XX_FAN_POLL_TIMER_200MS);
+ timer_setup(&data->fan_timer,
+ npcm7xx_fan_polling, 0);
+ add_timer(&data->fan_timer);
+ break;
+ }
+ }
+
+ pr_info("NPCM7XX PWM-FAN Driver probed, output Freq %dHz[PWM], input Freq %dHz[FAN]\n",
+ output_freq, data->input_clk_freq);
+
+ return 0;
+}
+
+static const struct of_device_id of_pwm_fan_match_table[] = {
+ { .compatible = "nuvoton,npcm750-pwm-fan", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_pwm_fan_match_table);
+
+static struct platform_driver npcm7xx_pwm_fan_driver = {
+ .probe = npcm7xx_pwm_fan_probe,
+ .driver = {
+ .name = "npcm7xx_pwm_fan",
+ .of_match_table = of_pwm_fan_match_table,
+ },
+};
+
+module_platform_driver(npcm7xx_pwm_fan_driver);
+
+MODULE_DESCRIPTION("Nuvoton NPCM7XX PWM and Fan Tacho driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index e71aec69e76e..a82018aaf473 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -130,7 +130,7 @@ config SENSORS_MAX34440
default n
help
If you say yes here you get hardware monitoring support for Maxim
- MAX34440, MAX34441, MAX34446, MAX34460, and MAX34461.
+ MAX34440, MAX34441, MAX34446, MAX34451, MAX34460, and MAX34461.
This driver can also be built as a module. If so, the module will
be called max34440.
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 74a1f6f68fb3..47576c460010 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -27,7 +27,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { max34440, max34441, max34446, max34460, max34461 };
+enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
#define MAX34440_MFR_VOUT_PEAK 0xd4
#define MAX34440_MFR_IOUT_PEAK 0xd5
@@ -44,6 +44,9 @@ enum chips { max34440, max34441, max34446, max34460, max34461 };
#define MAX34440_STATUS_OT_FAULT BIT(5)
#define MAX34440_STATUS_OT_WARN BIT(6)
+#define MAX34451_MFR_CHANNEL_CONFIG 0xe4
+#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f
+
struct max34440_data {
int id;
struct pmbus_driver_info info;
@@ -67,7 +70,7 @@ static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
MAX34440_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_AVG:
- if (data->id != max34446)
+ if (data->id != max34446 && data->id != max34451)
return -ENXIO;
ret = pmbus_read_word_data(client, page,
MAX34446_MFR_IOUT_AVG);
@@ -143,7 +146,7 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = pmbus_write_word_data(client, page,
MAX34440_MFR_IOUT_PEAK, 0);
- if (!ret && data->id == max34446)
+ if (!ret && (data->id == max34446 || data->id == max34451))
ret = pmbus_write_word_data(client, page,
MAX34446_MFR_IOUT_AVG, 0);
@@ -202,6 +205,58 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
return ret;
}
+static int max34451_set_supported_funcs(struct i2c_client *client,
+ struct max34440_data *data)
+{
+ /*
+ * Each of the channel 0-15 can be configured to monitor the following
+ * functions based on MFR_CHANNEL_CONFIG[5:0]
+ * 0x10: Sequencing + voltage monitoring (only valid for PAGES 0–11)
+ * 0x20: Voltage monitoring (no sequencing)
+ * 0x21: Voltage read only
+ * 0x22: Current monitoring
+ * 0x23: Current read only
+ * 0x30: General-purpose input active low
+ * 0x34: General-purpose input active high
+ * 0x00: Disabled
+ */
+
+ int page, rv;
+
+ for (page = 0; page < 16; page++) {
+ rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (rv < 0)
+ return rv;
+
+ rv = i2c_smbus_read_word_data(client,
+ MAX34451_MFR_CHANNEL_CONFIG);
+ if (rv < 0)
+ return rv;
+
+ switch (rv & MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK) {
+ case 0x10:
+ case 0x20:
+ data->info.func[page] = PMBUS_HAVE_VOUT |
+ PMBUS_HAVE_STATUS_VOUT;
+ break;
+ case 0x21:
+ data->info.func[page] = PMBUS_HAVE_VOUT;
+ break;
+ case 0x22:
+ data->info.func[page] = PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_IOUT;
+ break;
+ case 0x23:
+ data->info.func[page] = PMBUS_HAVE_IOUT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
static struct pmbus_driver_info max34440_info[] = {
[max34440] = {
.pages = 14,
@@ -325,6 +380,30 @@ static struct pmbus_driver_info max34440_info[] = {
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
+ [max34451] = {
+ .pages = 21,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 2,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ /* func 0-15 is set dynamically before probing */
+ .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
[max34460] = {
.pages = 18,
.format[PSC_VOLTAGE_OUT] = direct,
@@ -398,6 +477,7 @@ static int max34440_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max34440_data *data;
+ int rv;
data = devm_kzalloc(&client->dev, sizeof(struct max34440_data),
GFP_KERNEL);
@@ -406,6 +486,12 @@ static int max34440_probe(struct i2c_client *client,
data->id = id->driver_data;
data->info = max34440_info[id->driver_data];
+ if (data->id == max34451) {
+ rv = max34451_set_supported_funcs(client, data);
+ if (rv)
+ return rv;
+ }
+
return pmbus_do_probe(client, id, &data->info);
}
@@ -413,6 +499,7 @@ static const struct i2c_device_id max34440_id[] = {
{"max34440", max34440},
{"max34441", max34441},
{"max34446", max34446},
+ {"max34451", max34451},
{"max34460", max34460},
{"max34461", max34461},
{}
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
new file mode 100644
index 000000000000..fb4e4a6bb1f6
--- /dev/null
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Raspberry Pi voltage sensor driver
+ *
+ * Based on firmware/raspberrypi.c by Noralf Trønnes
+ *
+ * Copyright (C) 2018 Stefan Wahren <stefan.wahren@i2se.com>
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
+#define UNDERVOLTAGE_STICKY_BIT BIT(16)
+
+struct rpi_hwmon_data {
+ struct device *hwmon_dev;
+ struct rpi_firmware *fw;
+ u32 last_throttled;
+ struct delayed_work get_values_poll_work;
+};
+
+static void rpi_firmware_get_throttled(struct rpi_hwmon_data *data)
+{
+ u32 new_uv, old_uv, value;
+ int ret;
+
+ /* Request firmware to clear sticky bits */
+ value = 0xffff;
+
+ ret = rpi_firmware_property(data->fw, RPI_FIRMWARE_GET_THROTTLED,
+ &value, sizeof(value));
+ if (ret) {
+ dev_err_once(data->hwmon_dev, "Failed to get throttled (%d)\n",
+ ret);
+ return;
+ }
+
+ new_uv = value & UNDERVOLTAGE_STICKY_BIT;
+ old_uv = data->last_throttled & UNDERVOLTAGE_STICKY_BIT;
+ data->last_throttled = value;
+
+ if (new_uv == old_uv)
+ return;
+
+ if (new_uv)
+ dev_crit(data->hwmon_dev, "Undervoltage detected!\n");
+ else
+ dev_info(data->hwmon_dev, "Voltage normalised\n");
+
+ sysfs_notify(&data->hwmon_dev->kobj, NULL, "in0_lcrit_alarm");
+}
+
+static void get_values_poll(struct work_struct *work)
+{
+ struct rpi_hwmon_data *data;
+
+ data = container_of(work, struct rpi_hwmon_data,
+ get_values_poll_work.work);
+
+ rpi_firmware_get_throttled(data);
+
+ /*
+ * We can't run faster than the sticky shift (100ms) since we get
+ * flipping in the sticky bits that are cleared.
+ */
+ schedule_delayed_work(&data->get_values_poll_work, 2 * HZ);
+}
+
+static int rpi_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct rpi_hwmon_data *data = dev_get_drvdata(dev);
+
+ *val = !!(data->last_throttled & UNDERVOLTAGE_STICKY_BIT);
+ return 0;
+}
+
+static umode_t rpi_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static const u32 rpi_in_config[] = {
+ HWMON_I_LCRIT_ALARM,
+ 0
+};
+
+static const struct hwmon_channel_info rpi_in = {
+ .type = hwmon_in,
+ .config = rpi_in_config,
+};
+
+static const struct hwmon_channel_info *rpi_info[] = {
+ &rpi_in,
+ NULL
+};
+
+static const struct hwmon_ops rpi_hwmon_ops = {
+ .is_visible = rpi_is_visible,
+ .read = rpi_read,
+};
+
+static const struct hwmon_chip_info rpi_chip_info = {
+ .ops = &rpi_hwmon_ops,
+ .info = rpi_info,
+};
+
+static int rpi_hwmon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpi_hwmon_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Parent driver assure that firmware is correct */
+ data->fw = dev_get_drvdata(dev->parent);
+
+ /* Init throttled */
+ ret = rpi_firmware_property(data->fw, RPI_FIRMWARE_GET_THROTTLED,
+ &data->last_throttled,
+ sizeof(data->last_throttled));
+
+ data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "rpi_volt",
+ data,
+ &rpi_chip_info,
+ NULL);
+
+ INIT_DELAYED_WORK(&data->get_values_poll_work, get_values_poll);
+ platform_set_drvdata(pdev, data);
+
+ if (!PTR_ERR_OR_ZERO(data->hwmon_dev))
+ schedule_delayed_work(&data->get_values_poll_work, 2 * HZ);
+
+ return PTR_ERR_OR_ZERO(data->hwmon_dev);
+}
+
+static int rpi_hwmon_remove(struct platform_device *pdev)
+{
+ struct rpi_hwmon_data *data = platform_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&data->get_values_poll_work);
+
+ return 0;
+}
+
+static struct platform_driver rpi_hwmon_driver = {
+ .probe = rpi_hwmon_probe,
+ .remove = rpi_hwmon_remove,
+ .driver = {
+ .name = "raspberrypi-hwmon",
+ },
+};
+module_platform_driver(rpi_hwmon_driver);
+
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index d16e6a3d38e8..2bad40d42210 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -367,6 +367,35 @@ out:
}
EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
+/**
+ * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name
+ * @np: device node from which to request the specific hwlock
+ * @name: hwlock name
+ *
+ * This function provides a means for DT users of the hwspinlock module to
+ * get the global lock id of a specific hwspinlock using the specified name of
+ * the hwspinlock device, so that it can be requested using the normal
+ * hwspin_lock_request_specific() API.
+ *
+ * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
+ * device is not yet registered, -EINVAL on invalid args specifier value or an
+ * appropriate error as returned from the OF parsing of the DT client node.
+ */
+int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
+{
+ int index;
+
+ if (!name)
+ return -EINVAL;
+
+ index = of_property_match_string(np, "hwlock-names", name);
+ if (index < 0)
+ return index;
+
+ return of_hwspin_lock_get_id(np, index);
+}
+EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname);
+
static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
{
struct hwspinlock *tmp;
@@ -500,6 +529,88 @@ int hwspin_lock_unregister(struct hwspinlock_device *bank)
}
EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
+static void devm_hwspin_lock_unreg(struct device *dev, void *res)
+{
+ hwspin_lock_unregister(*(struct hwspinlock_device **)res);
+}
+
+static int devm_hwspin_lock_device_match(struct device *dev, void *res,
+ void *data)
+{
+ struct hwspinlock_device **bank = res;
+
+ if (WARN_ON(!bank || !*bank))
+ return 0;
+
+ return *bank == data;
+}
+
+/**
+ * devm_hwspin_lock_unregister() - unregister an hw spinlock device for
+ * a managed device
+ * @dev: the backing device
+ * @bank: the hwspinlock device, which usually provides numerous hw locks
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to unregister an existing (and unused) hwspinlock.
+ *
+ * Should be called from a process context (might sleep)
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int devm_hwspin_lock_unregister(struct device *dev,
+ struct hwspinlock_device *bank)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_hwspin_lock_unreg,
+ devm_hwspin_lock_device_match, bank);
+ WARN_ON(ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister);
+
+/**
+ * devm_hwspin_lock_register() - register a new hw spinlock device for
+ * a managed device
+ * @dev: the backing device
+ * @bank: the hwspinlock device, which usually provides numerous hw locks
+ * @ops: hwspinlock handlers for this device
+ * @base_id: id of the first hardware spinlock in this bank
+ * @num_locks: number of hwspinlocks provided by this device
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to register a new hwspinlock device instance.
+ *
+ * Should be called from a process context (might sleep)
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int devm_hwspin_lock_register(struct device *dev,
+ struct hwspinlock_device *bank,
+ const struct hwspinlock_ops *ops,
+ int base_id, int num_locks)
+{
+ struct hwspinlock_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks);
+ if (!ret) {
+ *ptr = bank;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_hwspin_lock_register);
+
/**
* __hwspin_lock_request() - tag an hwspinlock as used and power it up
*
@@ -656,7 +767,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
*
* This function mark @hwlock as free again.
* Should only be called with an @hwlock that was retrieved from
- * an earlier call to omap_hwspin_lock_request{_specific}.
+ * an earlier call to hwspin_lock_request{_specific}.
*
* Should be called from a process context (might sleep)
*
@@ -706,6 +817,116 @@ out:
}
EXPORT_SYMBOL_GPL(hwspin_lock_free);
+static int devm_hwspin_lock_match(struct device *dev, void *res, void *data)
+{
+ struct hwspinlock **hwlock = res;
+
+ if (WARN_ON(!hwlock || !*hwlock))
+ return 0;
+
+ return *hwlock == data;
+}
+
+static void devm_hwspin_lock_release(struct device *dev, void *res)
+{
+ hwspin_lock_free(*(struct hwspinlock **)res);
+}
+
+/**
+ * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device
+ * @dev: the device to free the specific hwspinlock
+ * @hwlock: the specific hwspinlock to free
+ *
+ * This function mark @hwlock as free again.
+ * Should only be called with an @hwlock that was retrieved from
+ * an earlier call to hwspin_lock_request{_specific}.
+ *
+ * Should be called from a process context (might sleep)
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_hwspin_lock_release,
+ devm_hwspin_lock_match, hwlock);
+ WARN_ON(ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
+
+/**
+ * devm_hwspin_lock_request() - request an hwspinlock for a managed device
+ * @dev: the device to request an hwspinlock
+ *
+ * This function should be called by users of the hwspinlock device,
+ * in order to dynamically assign them an unused hwspinlock.
+ * Usually the user of this lock will then have to communicate the lock's id
+ * to the remote core before it can be used for synchronization (to get the
+ * id of a given hwlock, use hwspin_lock_get_id()).
+ *
+ * Should be called from a process context (might sleep)
+ *
+ * Returns the address of the assigned hwspinlock, or NULL on error
+ */
+struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
+{
+ struct hwspinlock **ptr, *hwlock;
+
+ ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ hwlock = hwspin_lock_request();
+ if (hwlock) {
+ *ptr = hwlock;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
+
+/**
+ * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for
+ * a managed device
+ * @dev: the device to request the specific hwspinlock
+ * @id: index of the specific hwspinlock that is requested
+ *
+ * This function should be called by users of the hwspinlock module,
+ * in order to assign them a specific hwspinlock.
+ * Usually early board code will be calling this function in order to
+ * reserve specific hwspinlock ids for predefined purposes.
+ *
+ * Should be called from a process context (might sleep)
+ *
+ * Returns the address of the assigned hwspinlock, or NULL on error
+ */
+struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
+ unsigned int id)
+{
+ struct hwspinlock **ptr, *hwlock;
+
+ ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ hwlock = hwspin_lock_request_specific(id);
+ if (hwlock) {
+ *ptr = hwlock;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific);
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Hardware spinlock interface");
MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index ef9cb3c164e1..ad34380cac49 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -31,6 +31,17 @@ config CORESIGHT_LINK_AND_SINK_TMC
complies with the generic implementation of the component without
special enhancement or added features.
+config CORESIGHT_CATU
+ bool "Coresight Address Translation Unit (CATU) driver"
+ depends on CORESIGHT_LINK_AND_SINK_TMC
+ help
+ Enable support for the Coresight Address Translation Unit (CATU).
+ CATU supports a scatter gather table of 4K pages, with forward/backward
+ lookup. CATU helps TMC ETR to use a large physically non-contiguous trace
+ buffer by translating the addresses used by ETR to the physical address
+ by looking up the provided table. CATU can also be used in pass-through
+ mode where the address is not translated.
+
config CORESIGHT_SINK_TPIU
bool "Coresight generic TPIU driver"
depends on CORESIGHT_LINKS_AND_SINKS
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 61db9dd0d571..41870ded51a3 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
obj-$(CONFIG_CORESIGHT_DYNAMIC_REPLICATOR) += coresight-dynamic-replicator.o
obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
+obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
new file mode 100644
index 000000000000..ff94e58845b7
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Arm Limited. All rights reserved.
+ *
+ * Coresight Address Translation Unit support
+ *
+ * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "coresight-catu.h"
+#include "coresight-priv.h"
+#include "coresight-tmc.h"
+
+#define csdev_to_catu_drvdata(csdev) \
+ dev_get_drvdata(csdev->dev.parent)
+
+/* Verbose output for CATU table contents */
+#ifdef CATU_DEBUG
+#define catu_dbg(x, ...) dev_dbg(x, __VA_ARGS__)
+#else
+#define catu_dbg(x, ...) do {} while (0)
+#endif
+
+struct catu_etr_buf {
+ struct tmc_sg_table *catu_table;
+ dma_addr_t sladdr;
+};
+
+/*
+ * CATU uses a page size of 4KB for page tables as well as data pages.
+ * Each 64bit entry in the table has the following format.
+ *
+ * 63 12 1 0
+ * ------------------------------------
+ * | Address [63-12] | SBZ | V|
+ * ------------------------------------
+ *
+ * Where bit[0] V indicates if the address is valid or not.
+ * Each 4K table pages have upto 256 data page pointers, taking upto 2K
+ * size. There are two Link pointers, pointing to the previous and next
+ * table pages respectively at the end of the 4K page. (i.e, entry 510
+ * and 511).
+ * E.g, a table of two pages could look like :
+ *
+ * Table Page 0 Table Page 1
+ * SLADDR ===> x------------------x x--> x-----------------x
+ * INADDR ->| Page 0 | V | | | Page 256 | V | <- INADDR+1M
+ * |------------------| | |-----------------|
+ * INADDR+4K ->| Page 1 | V | | | |
+ * |------------------| | |-----------------|
+ * | Page 2 | V | | | |
+ * |------------------| | |-----------------|
+ * | ... | V | | | ... |
+ * |------------------| | |-----------------|
+ * INADDR+1020K| Page 255 | V | | | Page 511 | V |
+ * SLADDR+2K==>|------------------| | |-----------------|
+ * | UNUSED | | | | |
+ * |------------------| | | |
+ * | UNUSED | | | | |
+ * |------------------| | | |
+ * | ... | | | | |
+ * |------------------| | |-----------------|
+ * | IGNORED | 0 | | | Table Page 0| 1 |
+ * |------------------| | |-----------------|
+ * | Table Page 1| 1 |--x | IGNORED | 0 |
+ * x------------------x x-----------------x
+ * SLADDR+4K==>
+ *
+ * The base input address (used by the ETR, programmed in INADDR_{LO,HI})
+ * must be aligned to 1MB (the size addressable by a single page table).
+ * The CATU maps INADDR{LO:HI} to the first page in the table pointed
+ * to by SLADDR{LO:HI} and so on.
+ *
+ */
+typedef u64 cate_t;
+
+#define CATU_PAGE_SHIFT 12
+#define CATU_PAGE_SIZE (1UL << CATU_PAGE_SHIFT)
+#define CATU_PAGES_PER_SYSPAGE (PAGE_SIZE / CATU_PAGE_SIZE)
+
+/* Page pointers are only allocated in the first 2K half */
+#define CATU_PTRS_PER_PAGE ((CATU_PAGE_SIZE >> 1) / sizeof(cate_t))
+#define CATU_PTRS_PER_SYSPAGE (CATU_PAGES_PER_SYSPAGE * CATU_PTRS_PER_PAGE)
+#define CATU_LINK_PREV ((CATU_PAGE_SIZE / sizeof(cate_t)) - 2)
+#define CATU_LINK_NEXT ((CATU_PAGE_SIZE / sizeof(cate_t)) - 1)
+
+#define CATU_ADDR_SHIFT 12
+#define CATU_ADDR_MASK ~(((cate_t)1 << CATU_ADDR_SHIFT) - 1)
+#define CATU_ENTRY_VALID ((cate_t)0x1)
+#define CATU_VALID_ENTRY(addr) \
+ (((cate_t)(addr) & CATU_ADDR_MASK) | CATU_ENTRY_VALID)
+#define CATU_ENTRY_ADDR(entry) ((cate_t)(entry) & ~((cate_t)CATU_ENTRY_VALID))
+
+/* CATU expects the INADDR to be aligned to 1M. */
+#define CATU_DEFAULT_INADDR (1ULL << 20)
+
+/*
+ * catu_get_table : Retrieve the table pointers for the given @offset
+ * within the buffer. The buffer is wrapped around to a valid offset.
+ *
+ * Returns : The CPU virtual address for the beginning of the table
+ * containing the data page pointer for @offset. If @daddrp is not NULL,
+ * @daddrp points the DMA address of the beginning of the table.
+ */
+static inline cate_t *catu_get_table(struct tmc_sg_table *catu_table,
+ unsigned long offset,
+ dma_addr_t *daddrp)
+{
+ unsigned long buf_size = tmc_sg_table_buf_size(catu_table);
+ unsigned int table_nr, pg_idx, pg_offset;
+ struct tmc_pages *table_pages = &catu_table->table_pages;
+ void *ptr;
+
+ /* Make sure offset is within the range */
+ offset %= buf_size;
+
+ /*
+ * Each table can address 1MB and a single kernel page can
+ * contain "CATU_PAGES_PER_SYSPAGE" CATU tables.
+ */
+ table_nr = offset >> 20;
+ /* Find the table page where the table_nr lies in */
+ pg_idx = table_nr / CATU_PAGES_PER_SYSPAGE;
+ pg_offset = (table_nr % CATU_PAGES_PER_SYSPAGE) * CATU_PAGE_SIZE;
+ if (daddrp)
+ *daddrp = table_pages->daddrs[pg_idx] + pg_offset;
+ ptr = page_address(table_pages->pages[pg_idx]);
+ return (cate_t *)((unsigned long)ptr + pg_offset);
+}
+
+#ifdef CATU_DEBUG
+static void catu_dump_table(struct tmc_sg_table *catu_table)
+{
+ int i;
+ cate_t *table;
+ unsigned long table_end, buf_size, offset = 0;
+
+ buf_size = tmc_sg_table_buf_size(catu_table);
+ dev_dbg(catu_table->dev,
+ "Dump table %p, tdaddr: %llx\n",
+ catu_table, catu_table->table_daddr);
+
+ while (offset < buf_size) {
+ table_end = offset + SZ_1M < buf_size ?
+ offset + SZ_1M : buf_size;
+ table = catu_get_table(catu_table, offset, NULL);
+ for (i = 0; offset < table_end; i++, offset += CATU_PAGE_SIZE)
+ dev_dbg(catu_table->dev, "%d: %llx\n", i, table[i]);
+ dev_dbg(catu_table->dev, "Prev : %llx, Next: %llx\n",
+ table[CATU_LINK_PREV], table[CATU_LINK_NEXT]);
+ dev_dbg(catu_table->dev, "== End of sub-table ===");
+ }
+ dev_dbg(catu_table->dev, "== End of Table ===");
+}
+
+#else
+static inline void catu_dump_table(struct tmc_sg_table *catu_table)
+{
+}
+#endif
+
+static inline cate_t catu_make_entry(dma_addr_t addr)
+{
+ return addr ? CATU_VALID_ENTRY(addr) : 0;
+}
+
+/*
+ * catu_populate_table : Populate the given CATU table.
+ * The table is always populated as a circular table.
+ * i.e, the "prev" link of the "first" table points to the "last"
+ * table and the "next" link of the "last" table points to the
+ * "first" table. The buffer should be made linear by calling
+ * catu_set_table().
+ */
+static void
+catu_populate_table(struct tmc_sg_table *catu_table)
+{
+ int i;
+ int sys_pidx; /* Index to current system data page */
+ int catu_pidx; /* Index of CATU page within the system data page */
+ unsigned long offset, buf_size, table_end;
+ dma_addr_t data_daddr;
+ dma_addr_t prev_taddr, next_taddr, cur_taddr;
+ cate_t *table_ptr, *next_table;
+
+ buf_size = tmc_sg_table_buf_size(catu_table);
+ sys_pidx = catu_pidx = 0;
+ offset = 0;
+
+ table_ptr = catu_get_table(catu_table, 0, &cur_taddr);
+ prev_taddr = 0; /* Prev link for the first table */
+
+ while (offset < buf_size) {
+ /*
+ * The @offset is always 1M aligned here and we have an
+ * empty table @table_ptr to fill. Each table can address
+ * upto 1MB data buffer. The last table may have fewer
+ * entries if the buffer size is not aligned.
+ */
+ table_end = (offset + SZ_1M) < buf_size ?
+ (offset + SZ_1M) : buf_size;
+ for (i = 0; offset < table_end;
+ i++, offset += CATU_PAGE_SIZE) {
+
+ data_daddr = catu_table->data_pages.daddrs[sys_pidx] +
+ catu_pidx * CATU_PAGE_SIZE;
+ catu_dbg(catu_table->dev,
+ "[table %5ld:%03d] 0x%llx\n",
+ (offset >> 20), i, data_daddr);
+ table_ptr[i] = catu_make_entry(data_daddr);
+ /* Move the pointers for data pages */
+ catu_pidx = (catu_pidx + 1) % CATU_PAGES_PER_SYSPAGE;
+ if (catu_pidx == 0)
+ sys_pidx++;
+ }
+
+ /*
+ * If we have finished all the valid entries, fill the rest of
+ * the table (i.e, last table page) with invalid entries,
+ * to fail the lookups.
+ */
+ if (offset == buf_size) {
+ memset(&table_ptr[i], 0,
+ sizeof(cate_t) * (CATU_PTRS_PER_PAGE - i));
+ next_taddr = 0;
+ } else {
+ next_table = catu_get_table(catu_table,
+ offset, &next_taddr);
+ }
+
+ table_ptr[CATU_LINK_PREV] = catu_make_entry(prev_taddr);
+ table_ptr[CATU_LINK_NEXT] = catu_make_entry(next_taddr);
+
+ catu_dbg(catu_table->dev,
+ "[table%5ld]: Cur: 0x%llx Prev: 0x%llx, Next: 0x%llx\n",
+ (offset >> 20) - 1, cur_taddr, prev_taddr, next_taddr);
+
+ /* Update the prev/next addresses */
+ if (next_taddr) {
+ prev_taddr = cur_taddr;
+ cur_taddr = next_taddr;
+ table_ptr = next_table;
+ }
+ }
+
+ /* Sync the table for device */
+ tmc_sg_table_sync_table(catu_table);
+}
+
+static struct tmc_sg_table *
+catu_init_sg_table(struct device *catu_dev, int node,
+ ssize_t size, void **pages)
+{
+ int nr_tpages;
+ struct tmc_sg_table *catu_table;
+
+ /*
+ * Each table can address upto 1MB and we can have
+ * CATU_PAGES_PER_SYSPAGE tables in a system page.
+ */
+ nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE;
+ catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
+ size >> PAGE_SHIFT, pages);
+ if (IS_ERR(catu_table))
+ return catu_table;
+
+ catu_populate_table(catu_table);
+ dev_dbg(catu_dev,
+ "Setup table %p, size %ldKB, %d table pages\n",
+ catu_table, (unsigned long)size >> 10, nr_tpages);
+ catu_dump_table(catu_table);
+ return catu_table;
+}
+
+static void catu_free_etr_buf(struct etr_buf *etr_buf)
+{
+ struct catu_etr_buf *catu_buf;
+
+ if (!etr_buf || etr_buf->mode != ETR_MODE_CATU || !etr_buf->private)
+ return;
+
+ catu_buf = etr_buf->private;
+ tmc_free_sg_table(catu_buf->catu_table);
+ kfree(catu_buf);
+}
+
+static ssize_t catu_get_data_etr_buf(struct etr_buf *etr_buf, u64 offset,
+ size_t len, char **bufpp)
+{
+ struct catu_etr_buf *catu_buf = etr_buf->private;
+
+ return tmc_sg_table_get_data(catu_buf->catu_table, offset, len, bufpp);
+}
+
+static void catu_sync_etr_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ struct catu_etr_buf *catu_buf = etr_buf->private;
+ struct tmc_sg_table *catu_table = catu_buf->catu_table;
+ u64 r_offset, w_offset;
+
+ /*
+ * ETR started off at etr_buf->hwaddr. Convert the RRP/RWP to
+ * offsets within the trace buffer.
+ */
+ r_offset = rrp - etr_buf->hwaddr;
+ w_offset = rwp - etr_buf->hwaddr;
+
+ if (!etr_buf->full) {
+ etr_buf->len = w_offset - r_offset;
+ if (w_offset < r_offset)
+ etr_buf->len += etr_buf->size;
+ } else {
+ etr_buf->len = etr_buf->size;
+ }
+
+ etr_buf->offset = r_offset;
+ tmc_sg_table_sync_data_range(catu_table, r_offset, etr_buf->len);
+}
+
+static int catu_alloc_etr_buf(struct tmc_drvdata *tmc_drvdata,
+ struct etr_buf *etr_buf, int node, void **pages)
+{
+ struct coresight_device *csdev;
+ struct device *catu_dev;
+ struct tmc_sg_table *catu_table;
+ struct catu_etr_buf *catu_buf;
+
+ csdev = tmc_etr_get_catu_device(tmc_drvdata);
+ if (!csdev)
+ return -ENODEV;
+ catu_dev = csdev->dev.parent;
+ catu_buf = kzalloc(sizeof(*catu_buf), GFP_KERNEL);
+ if (!catu_buf)
+ return -ENOMEM;
+
+ catu_table = catu_init_sg_table(catu_dev, node, etr_buf->size, pages);
+ if (IS_ERR(catu_table)) {
+ kfree(catu_buf);
+ return PTR_ERR(catu_table);
+ }
+
+ etr_buf->mode = ETR_MODE_CATU;
+ etr_buf->private = catu_buf;
+ etr_buf->hwaddr = CATU_DEFAULT_INADDR;
+
+ catu_buf->catu_table = catu_table;
+ /* Get the table base address */
+ catu_buf->sladdr = catu_table->table_daddr;
+
+ return 0;
+}
+
+const struct etr_buf_operations etr_catu_buf_ops = {
+ .alloc = catu_alloc_etr_buf,
+ .free = catu_free_etr_buf,
+ .sync = catu_sync_etr_buf,
+ .get_data = catu_get_data_etr_buf,
+};
+
+coresight_simple_reg32(struct catu_drvdata, devid, CORESIGHT_DEVID);
+coresight_simple_reg32(struct catu_drvdata, control, CATU_CONTROL);
+coresight_simple_reg32(struct catu_drvdata, status, CATU_STATUS);
+coresight_simple_reg32(struct catu_drvdata, mode, CATU_MODE);
+coresight_simple_reg32(struct catu_drvdata, axictrl, CATU_AXICTRL);
+coresight_simple_reg32(struct catu_drvdata, irqen, CATU_IRQEN);
+coresight_simple_reg64(struct catu_drvdata, sladdr,
+ CATU_SLADDRLO, CATU_SLADDRHI);
+coresight_simple_reg64(struct catu_drvdata, inaddr,
+ CATU_INADDRLO, CATU_INADDRHI);
+
+static struct attribute *catu_mgmt_attrs[] = {
+ &dev_attr_devid.attr,
+ &dev_attr_control.attr,
+ &dev_attr_status.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_axictrl.attr,
+ &dev_attr_irqen.attr,
+ &dev_attr_sladdr.attr,
+ &dev_attr_inaddr.attr,
+ NULL,
+};
+
+static const struct attribute_group catu_mgmt_group = {
+ .attrs = catu_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group *catu_groups[] = {
+ &catu_mgmt_group,
+ NULL,
+};
+
+
+static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
+{
+ return coresight_timeout(drvdata->base,
+ CATU_STATUS, CATU_STATUS_READY, 1);
+}
+
+static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
+{
+ u32 control, mode;
+ struct etr_buf *etr_buf = data;
+
+ if (catu_wait_for_ready(drvdata))
+ dev_warn(drvdata->dev, "Timeout while waiting for READY\n");
+
+ control = catu_read_control(drvdata);
+ if (control & BIT(CATU_CONTROL_ENABLE)) {
+ dev_warn(drvdata->dev, "CATU is already enabled\n");
+ return -EBUSY;
+ }
+
+ control |= BIT(CATU_CONTROL_ENABLE);
+
+ if (etr_buf && etr_buf->mode == ETR_MODE_CATU) {
+ struct catu_etr_buf *catu_buf = etr_buf->private;
+
+ mode = CATU_MODE_TRANSLATE;
+ catu_write_axictrl(drvdata, CATU_OS_AXICTRL);
+ catu_write_sladdr(drvdata, catu_buf->sladdr);
+ catu_write_inaddr(drvdata, CATU_DEFAULT_INADDR);
+ } else {
+ mode = CATU_MODE_PASS_THROUGH;
+ catu_write_sladdr(drvdata, 0);
+ catu_write_inaddr(drvdata, 0);
+ }
+
+ catu_write_irqen(drvdata, 0);
+ catu_write_mode(drvdata, mode);
+ catu_write_control(drvdata, control);
+ dev_dbg(drvdata->dev, "Enabled in %s mode\n",
+ (mode == CATU_MODE_PASS_THROUGH) ?
+ "Pass through" :
+ "Translate");
+ return 0;
+}
+
+static int catu_enable(struct coresight_device *csdev, void *data)
+{
+ int rc;
+ struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
+
+ CS_UNLOCK(catu_drvdata->base);
+ rc = catu_enable_hw(catu_drvdata, data);
+ CS_LOCK(catu_drvdata->base);
+ return rc;
+}
+
+static int catu_disable_hw(struct catu_drvdata *drvdata)
+{
+ int rc = 0;
+
+ catu_write_control(drvdata, 0);
+ if (catu_wait_for_ready(drvdata)) {
+ dev_info(drvdata->dev, "Timeout while waiting for READY\n");
+ rc = -EAGAIN;
+ }
+
+ dev_dbg(drvdata->dev, "Disabled\n");
+ return rc;
+}
+
+static int catu_disable(struct coresight_device *csdev, void *__unused)
+{
+ int rc;
+ struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
+
+ CS_UNLOCK(catu_drvdata->base);
+ rc = catu_disable_hw(catu_drvdata);
+ CS_LOCK(catu_drvdata->base);
+ return rc;
+}
+
+const struct coresight_ops_helper catu_helper_ops = {
+ .enable = catu_enable,
+ .disable = catu_disable,
+};
+
+const struct coresight_ops catu_ops = {
+ .helper_ops = &catu_helper_ops,
+};
+
+static int catu_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret = 0;
+ u32 dma_mask;
+ struct catu_drvdata *drvdata;
+ struct coresight_desc catu_desc;
+ struct coresight_platform_data *pdata = NULL;
+ struct device *dev = &adev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto out;
+ }
+ dev->platform_data = pdata;
+ }
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ drvdata->dev = dev;
+ dev_set_drvdata(dev, drvdata);
+ base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out;
+ }
+
+ /* Setup dma mask for the device */
+ dma_mask = readl_relaxed(base + CORESIGHT_DEVID) & 0x3f;
+ switch (dma_mask) {
+ case 32:
+ case 40:
+ case 44:
+ case 48:
+ case 52:
+ case 56:
+ case 64:
+ break;
+ default:
+ /* Default to the 40bits as supported by TMC-ETR */
+ dma_mask = 40;
+ }
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_mask));
+ if (ret)
+ goto out;
+
+ drvdata->base = base;
+ catu_desc.pdata = pdata;
+ catu_desc.dev = dev;
+ catu_desc.groups = catu_groups;
+ catu_desc.type = CORESIGHT_DEV_TYPE_HELPER;
+ catu_desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CATU;
+ catu_desc.ops = &catu_ops;
+ drvdata->csdev = coresight_register(&catu_desc);
+ if (IS_ERR(drvdata->csdev))
+ ret = PTR_ERR(drvdata->csdev);
+out:
+ pm_runtime_put(&adev->dev);
+ return ret;
+}
+
+static struct amba_id catu_ids[] = {
+ {
+ .id = 0x000bb9ee,
+ .mask = 0x000fffff,
+ },
+ {},
+};
+
+static struct amba_driver catu_driver = {
+ .drv = {
+ .name = "coresight-catu",
+ .owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
+ },
+ .probe = catu_probe,
+ .id_table = catu_ids,
+};
+
+builtin_amba_driver(catu_driver);
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
new file mode 100644
index 000000000000..1b281f0dcccc
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Arm Limited. All rights reserved.
+ *
+ * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
+ */
+
+#ifndef _CORESIGHT_CATU_H
+#define _CORESIGHT_CATU_H
+
+#include "coresight-priv.h"
+
+/* Register offset from base */
+#define CATU_CONTROL 0x000
+#define CATU_MODE 0x004
+#define CATU_AXICTRL 0x008
+#define CATU_IRQEN 0x00c
+#define CATU_SLADDRLO 0x020
+#define CATU_SLADDRHI 0x024
+#define CATU_INADDRLO 0x028
+#define CATU_INADDRHI 0x02c
+#define CATU_STATUS 0x100
+#define CATU_DEVARCH 0xfbc
+
+#define CATU_CONTROL_ENABLE 0
+
+#define CATU_MODE_PASS_THROUGH 0U
+#define CATU_MODE_TRANSLATE 1U
+
+#define CATU_AXICTRL_ARCACHE_SHIFT 4
+#define CATU_AXICTRL_ARCACHE_MASK 0xf
+#define CATU_AXICTRL_ARPROT_MASK 0x3
+#define CATU_AXICTRL_ARCACHE(arcache) \
+ (((arcache) & CATU_AXICTRL_ARCACHE_MASK) << CATU_AXICTRL_ARCACHE_SHIFT)
+
+#define CATU_AXICTRL_VAL(arcache, arprot) \
+ (CATU_AXICTRL_ARCACHE(arcache) | ((arprot) & CATU_AXICTRL_ARPROT_MASK))
+
+#define AXI3_AxCACHE_WB_READ_ALLOC 0x7
+/*
+ * AXI - ARPROT bits:
+ * See AMBA AXI & ACE Protocol specification (ARM IHI 0022E)
+ * sectionA4.7 Access Permissions.
+ *
+ * Bit 0: 0 - Unprivileged access, 1 - Privileged access
+ * Bit 1: 0 - Secure access, 1 - Non-secure access.
+ * Bit 2: 0 - Data access, 1 - instruction access.
+ *
+ * CATU AXICTRL:ARPROT[2] is res0 as we always access data.
+ */
+#define CATU_OS_ARPROT 0x2
+
+#define CATU_OS_AXICTRL \
+ CATU_AXICTRL_VAL(AXI3_AxCACHE_WB_READ_ALLOC, CATU_OS_ARPROT)
+
+#define CATU_STATUS_READY 8
+#define CATU_STATUS_ADRERR 0
+#define CATU_STATUS_AXIERR 4
+
+#define CATU_IRQEN_ON 0x1
+#define CATU_IRQEN_OFF 0x0
+
+struct catu_drvdata {
+ struct device *dev;
+ void __iomem *base;
+ struct coresight_device *csdev;
+ int irq;
+};
+
+#define CATU_REG32(name, offset) \
+static inline u32 \
+catu_read_##name(struct catu_drvdata *drvdata) \
+{ \
+ return coresight_read_reg_pair(drvdata->base, offset, -1); \
+} \
+static inline void \
+catu_write_##name(struct catu_drvdata *drvdata, u32 val) \
+{ \
+ coresight_write_reg_pair(drvdata->base, val, offset, -1); \
+}
+
+#define CATU_REG_PAIR(name, lo_off, hi_off) \
+static inline u64 \
+catu_read_##name(struct catu_drvdata *drvdata) \
+{ \
+ return coresight_read_reg_pair(drvdata->base, lo_off, hi_off); \
+} \
+static inline void \
+catu_write_##name(struct catu_drvdata *drvdata, u64 val) \
+{ \
+ coresight_write_reg_pair(drvdata->base, val, lo_off, hi_off); \
+}
+
+CATU_REG32(control, CATU_CONTROL);
+CATU_REG32(mode, CATU_MODE);
+CATU_REG32(irqen, CATU_IRQEN);
+CATU_REG32(axictrl, CATU_AXICTRL);
+CATU_REG_PAIR(sladdr, CATU_SLADDRLO, CATU_SLADDRHI)
+CATU_REG_PAIR(inaddr, CATU_INADDRLO, CATU_INADDRHI)
+
+static inline bool coresight_is_catu_device(struct coresight_device *csdev)
+{
+ if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
+ return false;
+ if (csdev->type != CORESIGHT_DEV_TYPE_HELPER)
+ return false;
+ if (csdev->subtype.helper_subtype != CORESIGHT_DEV_SUBTYPE_HELPER_CATU)
+ return false;
+ return true;
+}
+
+#ifdef CONFIG_CORESIGHT_CATU
+extern const struct etr_buf_operations etr_catu_buf_ops;
+#else
+/* Dummy declaration for the CATU ops */
+static const struct etr_buf_operations etr_catu_buf_ops;
+#endif
+
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 320d29df17e1..306119eaf16a 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -195,7 +195,6 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
bool lost = false;
int i;
u8 *buf_ptr;
- const u32 *barrier;
u32 read_data, depth;
u32 read_ptr, write_ptr;
u32 frame_off, frame_endoff;
@@ -226,19 +225,16 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
depth = drvdata->buffer_depth;
buf_ptr = drvdata->buf;
- barrier = barrier_pkt;
for (i = 0; i < depth; i++) {
read_data = readl_relaxed(drvdata->base +
ETB_RAM_READ_DATA_REG);
- if (lost && *barrier) {
- read_data = *barrier;
- barrier++;
- }
-
*(u32 *)buf_ptr = read_data;
buf_ptr += 4;
}
+ if (lost)
+ coresight_insert_barrier_packet(drvdata->buf);
+
if (frame_off) {
buf_ptr -= (frame_endoff * 4);
for (i = 0; i < frame_endoff; i++) {
@@ -447,7 +443,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
buf_ptr = buf->data_pages[cur] + offset;
read_data = readl_relaxed(drvdata->base +
ETB_RAM_READ_DATA_REG);
- if (lost && *barrier) {
+ if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
read_data = *barrier;
barrier++;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index e8b4549e30e2..79e1ad860d8a 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -168,8 +168,6 @@
* @seq_curr_state: current value of the sequencer register.
* @ctxid_idx: index for the context ID registers.
* @ctxid_pid: value for the context ID to trigger on.
- * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
- * the same value of ctxid_pid.
* @ctxid_mask: mask applicable to all the context IDs.
* @sync_freq: Synchronisation frequency.
* @timestamp_event: Defines an event that requests the insertion
@@ -202,7 +200,6 @@ struct etm_config {
u32 seq_curr_state;
u8 ctxid_idx;
u32 ctxid_pid[ETM_MAX_CTXID_CMP];
- u32 ctxid_vpid[ETM_MAX_CTXID_CMP];
u32 ctxid_mask;
u32 sync_freq;
u32 timestamp_event;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index 9435c1481f61..75487b3fad86 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm.h"
@@ -1025,8 +1026,15 @@ static ssize_t ctxid_pid_show(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
spin_lock(&drvdata->spinlock);
- val = config->ctxid_vpid[config->ctxid_idx];
+ val = config->ctxid_pid[config->ctxid_idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
@@ -1037,19 +1045,28 @@ static ssize_t ctxid_pid_store(struct device *dev,
const char *buf, size_t size)
{
int ret;
- unsigned long vpid, pid;
+ unsigned long pid;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
- ret = kstrtoul(buf, 16, &vpid);
+ /*
+ * When contextID tracing is enabled the tracers will insert the
+ * value found in the contextID register in the trace stream. But if
+ * a process is in a namespace the PID of that process as seen from the
+ * namespace won't be what the kernel sees, something that makes the
+ * feature confusing and can potentially leak kernel only information.
+ * As such refuse to use the feature if @current is not in the initial
+ * PID namespace.
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ ret = kstrtoul(buf, 16, &pid);
if (ret)
return ret;
- pid = coresight_vpid_to_pid(vpid);
-
spin_lock(&drvdata->spinlock);
config->ctxid_pid[config->ctxid_idx] = pid;
- config->ctxid_vpid[config->ctxid_idx] = vpid;
spin_unlock(&drvdata->spinlock);
return size;
@@ -1063,6 +1080,13 @@ static ssize_t ctxid_mask_show(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
val = config->ctxid_mask;
return sprintf(buf, "%#lx\n", val);
}
@@ -1076,6 +1100,13 @@ static ssize_t ctxid_mask_store(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 15ed64d51a5b..7c74263c333d 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -230,10 +230,8 @@ void etm_set_default(struct etm_config *config)
config->seq_curr_state = 0x0;
config->ctxid_idx = 0x0;
- for (i = 0; i < ETM_MAX_CTXID_CMP; i++) {
+ for (i = 0; i < ETM_MAX_CTXID_CMP; i++)
config->ctxid_pid[i] = 0x0;
- config->ctxid_vpid[i] = 0x0;
- }
config->ctxid_mask = 0x0;
/* Setting default to 1024 as per TRM recommendation */
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 4eb8da785ce0..a0365e23678e 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm4x.h"
@@ -250,10 +251,8 @@ static ssize_t reset_store(struct device *dev,
}
config->ctxid_idx = 0x0;
- for (i = 0; i < drvdata->numcidc; i++) {
+ for (i = 0; i < drvdata->numcidc; i++)
config->ctxid_pid[i] = 0x0;
- config->ctxid_vpid[i] = 0x0;
- }
config->ctxid_mask0 = 0x0;
config->ctxid_mask1 = 0x0;
@@ -1637,9 +1636,16 @@ static ssize_t ctxid_pid_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
spin_lock(&drvdata->spinlock);
idx = config->ctxid_idx;
- val = (unsigned long)config->ctxid_vpid[idx];
+ val = (unsigned long)config->ctxid_pid[idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1649,26 +1655,35 @@ static ssize_t ctxid_pid_store(struct device *dev,
const char *buf, size_t size)
{
u8 idx;
- unsigned long vpid, pid;
+ unsigned long pid;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
/*
+ * When contextID tracing is enabled the tracers will insert the
+ * value found in the contextID register in the trace stream. But if
+ * a process is in a namespace the PID of that process as seen from the
+ * namespace won't be what the kernel sees, something that makes the
+ * feature confusing and can potentially leak kernel only information.
+ * As such refuse to use the feature if @current is not in the initial
+ * PID namespace.
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ /*
* only implemented when ctxid tracing is enabled, i.e. at least one
* ctxid comparator is implemented and ctxid is greater than 0 bits
* in length
*/
if (!drvdata->ctxid_size || !drvdata->numcidc)
return -EINVAL;
- if (kstrtoul(buf, 16, &vpid))
+ if (kstrtoul(buf, 16, &pid))
return -EINVAL;
- pid = coresight_vpid_to_pid(vpid);
-
spin_lock(&drvdata->spinlock);
idx = config->ctxid_idx;
config->ctxid_pid[idx] = (u64)pid;
- config->ctxid_vpid[idx] = (u64)vpid;
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1682,6 +1697,13 @@ static ssize_t ctxid_masks_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
spin_lock(&drvdata->spinlock);
val1 = config->ctxid_mask0;
val2 = config->ctxid_mask1;
@@ -1699,6 +1721,13 @@ static ssize_t ctxid_masks_store(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ /*
* only implemented when ctxid tracing is enabled, i.e. at least one
* ctxid comparator is implemented and ctxid is greater than 0 bits
* in length
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 9bc04c50d45b..1d94ebec027b 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1027,7 +1027,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
}
pm_runtime_put(&adev->dev);
- dev_info(dev, "%s initialized\n", (char *)id->data);
+ dev_info(dev, "CPU%d: ETM v%d.%d initialized\n",
+ drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
if (boot_enable) {
coresight_enable(drvdata->csdev);
@@ -1045,23 +1046,19 @@ err_arch_supported:
return ret;
}
+#define ETM4x_AMBA_ID(pid) \
+ { \
+ .id = pid, \
+ .mask = 0x000fffff, \
+ }
+
static const struct amba_id etm4_ids[] = {
- { /* ETM 4.0 - Cortex-A53 */
- .id = 0x000bb95d,
- .mask = 0x000fffff,
- .data = "ETM 4.0",
- },
- { /* ETM 4.0 - Cortex-A57 */
- .id = 0x000bb95e,
- .mask = 0x000fffff,
- .data = "ETM 4.0",
- },
- { /* ETM 4.0 - A72, Maia, HiSilicon */
- .id = 0x000bb95a,
- .mask = 0x000fffff,
- .data = "ETM 4.0",
- },
- { 0, 0},
+ ETM4x_AMBA_ID(0x000bb95d), /* Cortex-A53 */
+ ETM4x_AMBA_ID(0x000bb95e), /* Cortex-A57 */
+ ETM4x_AMBA_ID(0x000bb95a), /* Cortex-A72 */
+ ETM4x_AMBA_ID(0x000bb959), /* Cortex-A73 */
+ ETM4x_AMBA_ID(0x000bb9da), /* Cortex-A35 */
+ {},
};
static struct amba_driver etm4x_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index b7c4a6f6c6b9..52786e9d8926 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -230,8 +230,6 @@
* @addr_type: Current status of the comparator register.
* @ctxid_idx: Context ID index selector.
* @ctxid_pid: Value of the context ID comparator.
- * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
- * the same value of ctxid_pid.
* @ctxid_mask0:Context ID comparator mask for comparator 0-3.
* @ctxid_mask1:Context ID comparator mask for comparator 4-7.
* @vmid_idx: VM ID index selector.
@@ -274,7 +272,6 @@ struct etmv4_config {
u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP];
u8 ctxid_idx;
u64 ctxid_pid[ETMv4_MAX_CTXID_CMP];
- u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP];
u32 ctxid_mask0;
u32 ctxid_mask1;
u8 vmid_idx;
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 0e5a74dae6a6..1a6cf3589866 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -57,7 +57,8 @@ static DEVICE_ATTR_RO(name)
#define coresight_simple_reg64(type, name, lo_off, hi_off) \
__coresight_simple_func(type, NULL, name, lo_off, hi_off)
-extern const u32 barrier_pkt[5];
+extern const u32 barrier_pkt[4];
+#define CORESIGHT_BARRIER_PKT_SIZE (sizeof(barrier_pkt))
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
@@ -91,6 +92,13 @@ struct cs_buffers {
void **data_pages;
};
+static inline void coresight_insert_barrier_packet(void *buf)
+{
+ if (buf)
+ memcpy(buf, barrier_pkt, CORESIGHT_BARRIER_PKT_SIZE);
+}
+
+
static inline void CS_LOCK(void __iomem *addr)
{
do {
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 61d849b11c26..0549249f4b39 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -32,39 +32,28 @@ static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
{
- bool lost = false;
char *bufp;
- const u32 *barrier;
- u32 read_data, status;
+ u32 read_data, lost;
int i;
- /*
- * Get a hold of the status register and see if a wrap around
- * has occurred.
- */
- status = readl_relaxed(drvdata->base + TMC_STS);
- if (status & TMC_STS_FULL)
- lost = true;
-
+ /* Check if the buffer wrapped around. */
+ lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
bufp = drvdata->buf;
drvdata->len = 0;
- barrier = barrier_pkt;
while (1) {
for (i = 0; i < drvdata->memwidth; i++) {
read_data = readl_relaxed(drvdata->base + TMC_RRD);
if (read_data == 0xFFFFFFFF)
- return;
-
- if (lost && *barrier) {
- read_data = *barrier;
- barrier++;
- }
-
+ goto done;
memcpy(bufp, &read_data, 4);
bufp += 4;
drvdata->len += 4;
}
}
+done:
+ if (lost)
+ coresight_insert_barrier_packet(drvdata->buf);
+ return;
}
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
@@ -109,6 +98,24 @@ static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
+/*
+ * Return the available trace data in the buffer from @pos, with
+ * a maximum limit of @len, updating the @bufpp on where to
+ * find it.
+ */
+ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
+{
+ ssize_t actual = len;
+
+ /* Adjust the len to available size @pos */
+ if (pos + actual > drvdata->len)
+ actual = drvdata->len - pos;
+ if (actual > 0)
+ *bufpp = drvdata->buf + pos;
+ return actual;
+}
+
static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 02f747afa2ba..2eda5de304c2 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -6,22 +6,912 @@
#include <linux/coresight.h>
#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "coresight-catu.h"
#include "coresight-priv.h"
#include "coresight-tmc.h"
+struct etr_flat_buf {
+ struct device *dev;
+ dma_addr_t daddr;
+ void *vaddr;
+ size_t size;
+};
+
+/*
+ * The TMC ETR SG has a page size of 4K. The SG table contains pointers
+ * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
+ * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
+ * contain more than one SG buffer and tables.
+ *
+ * A table entry has the following format:
+ *
+ * ---Bit31------------Bit4-------Bit1-----Bit0--
+ * | Address[39:12] | SBZ | Entry Type |
+ * ----------------------------------------------
+ *
+ * Address: Bits [39:12] of a physical page address. Bits [11:0] are
+ * always zero.
+ *
+ * Entry type:
+ * b00 - Reserved.
+ * b01 - Last entry in the tables, points to 4K page buffer.
+ * b10 - Normal entry, points to 4K page buffer.
+ * b11 - Link. The address points to the base of next table.
+ */
+
+typedef u32 sgte_t;
+
+#define ETR_SG_PAGE_SHIFT 12
+#define ETR_SG_PAGE_SIZE (1UL << ETR_SG_PAGE_SHIFT)
+#define ETR_SG_PAGES_PER_SYSPAGE (PAGE_SIZE / ETR_SG_PAGE_SIZE)
+#define ETR_SG_PTRS_PER_PAGE (ETR_SG_PAGE_SIZE / sizeof(sgte_t))
+#define ETR_SG_PTRS_PER_SYSPAGE (PAGE_SIZE / sizeof(sgte_t))
+
+#define ETR_SG_ET_MASK 0x3
+#define ETR_SG_ET_LAST 0x1
+#define ETR_SG_ET_NORMAL 0x2
+#define ETR_SG_ET_LINK 0x3
+
+#define ETR_SG_ADDR_SHIFT 4
+
+#define ETR_SG_ENTRY(addr, type) \
+ (sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \
+ (type & ETR_SG_ET_MASK))
+
+#define ETR_SG_ADDR(entry) \
+ (((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT)
+#define ETR_SG_ET(entry) ((entry) & ETR_SG_ET_MASK)
+
+/*
+ * struct etr_sg_table : ETR SG Table
+ * @sg_table: Generic SG Table holding the data/table pages.
+ * @hwaddr: hwaddress used by the TMC, which is the base
+ * address of the table.
+ */
+struct etr_sg_table {
+ struct tmc_sg_table *sg_table;
+ dma_addr_t hwaddr;
+};
+
+/*
+ * tmc_etr_sg_table_entries: Total number of table entries required to map
+ * @nr_pages system pages.
+ *
+ * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages.
+ * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
+ * with the last entry pointing to another page of table entries.
+ * If we spill over to a new page for mapping 1 entry, we could as
+ * well replace the link entry of the previous page with the last entry.
+ */
+static inline unsigned long __attribute_const__
+tmc_etr_sg_table_entries(int nr_pages)
+{
+ unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
+ unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1);
+ /*
+ * If we spill over to a new page for 1 entry, we could as well
+ * make it the LAST entry in the previous page, skipping the Link
+ * address.
+ */
+ if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2))
+ nr_sglinks--;
+ return nr_sgpages + nr_sglinks;
+}
+
+/*
+ * tmc_pages_get_offset: Go through all the pages in the tmc_pages
+ * and map the device address @addr to an offset within the virtual
+ * contiguous buffer.
+ */
+static long
+tmc_pages_get_offset(struct tmc_pages *tmc_pages, dma_addr_t addr)
+{
+ int i;
+ dma_addr_t page_start;
+
+ for (i = 0; i < tmc_pages->nr_pages; i++) {
+ page_start = tmc_pages->daddrs[i];
+ if (addr >= page_start && addr < (page_start + PAGE_SIZE))
+ return i * PAGE_SIZE + (addr - page_start);
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * tmc_pages_free : Unmap and free the pages used by tmc_pages.
+ * If the pages were not allocated in tmc_pages_alloc(), we would
+ * simply drop the refcount.
+ */
+static void tmc_pages_free(struct tmc_pages *tmc_pages,
+ struct device *dev, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < tmc_pages->nr_pages; i++) {
+ if (tmc_pages->daddrs && tmc_pages->daddrs[i])
+ dma_unmap_page(dev, tmc_pages->daddrs[i],
+ PAGE_SIZE, dir);
+ if (tmc_pages->pages && tmc_pages->pages[i])
+ __free_page(tmc_pages->pages[i]);
+ }
+
+ kfree(tmc_pages->pages);
+ kfree(tmc_pages->daddrs);
+ tmc_pages->pages = NULL;
+ tmc_pages->daddrs = NULL;
+ tmc_pages->nr_pages = 0;
+}
+
+/*
+ * tmc_pages_alloc : Allocate and map pages for a given @tmc_pages.
+ * If @pages is not NULL, the list of page virtual addresses are
+ * used as the data pages. The pages are then dma_map'ed for @dev
+ * with dma_direction @dir.
+ *
+ * Returns 0 upon success, else the error number.
+ */
+static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
+ struct device *dev, int node,
+ enum dma_data_direction dir, void **pages)
+{
+ int i, nr_pages;
+ dma_addr_t paddr;
+ struct page *page;
+
+ nr_pages = tmc_pages->nr_pages;
+ tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
+ GFP_KERNEL);
+ if (!tmc_pages->daddrs)
+ return -ENOMEM;
+ tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages),
+ GFP_KERNEL);
+ if (!tmc_pages->pages) {
+ kfree(tmc_pages->daddrs);
+ tmc_pages->daddrs = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ if (pages && pages[i]) {
+ page = virt_to_page(pages[i]);
+ /* Hold a refcount on the page */
+ get_page(page);
+ } else {
+ page = alloc_pages_node(node,
+ GFP_KERNEL | __GFP_ZERO, 0);
+ }
+ paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
+ if (dma_mapping_error(dev, paddr))
+ goto err;
+ tmc_pages->daddrs[i] = paddr;
+ tmc_pages->pages[i] = page;
+ }
+ return 0;
+err:
+ tmc_pages_free(tmc_pages, dev, dir);
+ return -ENOMEM;
+}
+
+static inline long
+tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
+{
+ return tmc_pages_get_offset(&sg_table->data_pages, addr);
+}
+
+static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
+{
+ if (sg_table->table_vaddr)
+ vunmap(sg_table->table_vaddr);
+ tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE);
+}
+
+static void tmc_free_data_pages(struct tmc_sg_table *sg_table)
+{
+ if (sg_table->data_vaddr)
+ vunmap(sg_table->data_vaddr);
+ tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE);
+}
+
+void tmc_free_sg_table(struct tmc_sg_table *sg_table)
+{
+ tmc_free_table_pages(sg_table);
+ tmc_free_data_pages(sg_table);
+}
+
+/*
+ * Alloc pages for the table. Since this will be used by the device,
+ * allocate the pages closer to the device (i.e, dev_to_node(dev)
+ * rather than the CPU node).
+ */
+static int tmc_alloc_table_pages(struct tmc_sg_table *sg_table)
+{
+ int rc;
+ struct tmc_pages *table_pages = &sg_table->table_pages;
+
+ rc = tmc_pages_alloc(table_pages, sg_table->dev,
+ dev_to_node(sg_table->dev),
+ DMA_TO_DEVICE, NULL);
+ if (rc)
+ return rc;
+ sg_table->table_vaddr = vmap(table_pages->pages,
+ table_pages->nr_pages,
+ VM_MAP,
+ PAGE_KERNEL);
+ if (!sg_table->table_vaddr)
+ rc = -ENOMEM;
+ else
+ sg_table->table_daddr = table_pages->daddrs[0];
+ return rc;
+}
+
+static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages)
+{
+ int rc;
+
+ /* Allocate data pages on the node requested by the caller */
+ rc = tmc_pages_alloc(&sg_table->data_pages,
+ sg_table->dev, sg_table->node,
+ DMA_FROM_DEVICE, pages);
+ if (!rc) {
+ sg_table->data_vaddr = vmap(sg_table->data_pages.pages,
+ sg_table->data_pages.nr_pages,
+ VM_MAP,
+ PAGE_KERNEL);
+ if (!sg_table->data_vaddr)
+ rc = -ENOMEM;
+ }
+ return rc;
+}
+
+/*
+ * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table
+ * and data buffers. TMC writes to the data buffers and reads from the SG
+ * Table pages.
+ *
+ * @dev - Device to which page should be DMA mapped.
+ * @node - Numa node for mem allocations
+ * @nr_tpages - Number of pages for the table entries.
+ * @nr_dpages - Number of pages for Data buffer.
+ * @pages - Optional list of virtual address of pages.
+ */
+struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
+ int node,
+ int nr_tpages,
+ int nr_dpages,
+ void **pages)
+{
+ long rc;
+ struct tmc_sg_table *sg_table;
+
+ sg_table = kzalloc(sizeof(*sg_table), GFP_KERNEL);
+ if (!sg_table)
+ return ERR_PTR(-ENOMEM);
+ sg_table->data_pages.nr_pages = nr_dpages;
+ sg_table->table_pages.nr_pages = nr_tpages;
+ sg_table->node = node;
+ sg_table->dev = dev;
+
+ rc = tmc_alloc_data_pages(sg_table, pages);
+ if (!rc)
+ rc = tmc_alloc_table_pages(sg_table);
+ if (rc) {
+ tmc_free_sg_table(sg_table);
+ kfree(sg_table);
+ return ERR_PTR(rc);
+ }
+
+ return sg_table;
+}
+
+/*
+ * tmc_sg_table_sync_data_range: Sync the data buffer written
+ * by the device from @offset upto a @size bytes.
+ */
+void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
+ u64 offset, u64 size)
+{
+ int i, index, start;
+ int npages = DIV_ROUND_UP(size, PAGE_SIZE);
+ struct device *dev = table->dev;
+ struct tmc_pages *data = &table->data_pages;
+
+ start = offset >> PAGE_SHIFT;
+ for (i = start; i < (start + npages); i++) {
+ index = i % data->nr_pages;
+ dma_sync_single_for_cpu(dev, data->daddrs[index],
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+}
+
+/* tmc_sg_sync_table: Sync the page table */
+void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
+{
+ int i;
+ struct device *dev = sg_table->dev;
+ struct tmc_pages *table_pages = &sg_table->table_pages;
+
+ for (i = 0; i < table_pages->nr_pages; i++)
+ dma_sync_single_for_device(dev, table_pages->daddrs[i],
+ PAGE_SIZE, DMA_TO_DEVICE);
+}
+
+/*
+ * tmc_sg_table_get_data: Get the buffer pointer for data @offset
+ * in the SG buffer. The @bufpp is updated to point to the buffer.
+ * Returns :
+ * the length of linear data available at @offset.
+ * or
+ * <= 0 if no data is available.
+ */
+ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
+ u64 offset, size_t len, char **bufpp)
+{
+ size_t size;
+ int pg_idx = offset >> PAGE_SHIFT;
+ int pg_offset = offset & (PAGE_SIZE - 1);
+ struct tmc_pages *data_pages = &sg_table->data_pages;
+
+ size = tmc_sg_table_buf_size(sg_table);
+ if (offset >= size)
+ return -EINVAL;
+
+ /* Make sure we don't go beyond the end */
+ len = (len < (size - offset)) ? len : size - offset;
+ /* Respect the page boundaries */
+ len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
+ if (len > 0)
+ *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
+ return len;
+}
+
+#ifdef ETR_SG_DEBUG
+/* Map a dma address to virtual address */
+static unsigned long
+tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table,
+ dma_addr_t addr, bool table)
+{
+ long offset;
+ unsigned long base;
+ struct tmc_pages *tmc_pages;
+
+ if (table) {
+ tmc_pages = &sg_table->table_pages;
+ base = (unsigned long)sg_table->table_vaddr;
+ } else {
+ tmc_pages = &sg_table->data_pages;
+ base = (unsigned long)sg_table->data_vaddr;
+ }
+
+ offset = tmc_pages_get_offset(tmc_pages, addr);
+ if (offset < 0)
+ return 0;
+ return base + offset;
+}
+
+/* Dump the given sg_table */
+static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
+{
+ sgte_t *ptr;
+ int i = 0;
+ dma_addr_t addr;
+ struct tmc_sg_table *sg_table = etr_table->sg_table;
+
+ ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
+ etr_table->hwaddr, true);
+ while (ptr) {
+ addr = ETR_SG_ADDR(*ptr);
+ switch (ETR_SG_ET(*ptr)) {
+ case ETR_SG_ET_NORMAL:
+ dev_dbg(sg_table->dev,
+ "%05d: %p\t:[N] 0x%llx\n", i, ptr, addr);
+ ptr++;
+ break;
+ case ETR_SG_ET_LINK:
+ dev_dbg(sg_table->dev,
+ "%05d: *** %p\t:{L} 0x%llx ***\n",
+ i, ptr, addr);
+ ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
+ addr, true);
+ break;
+ case ETR_SG_ET_LAST:
+ dev_dbg(sg_table->dev,
+ "%05d: ### %p\t:[L] 0x%llx ###\n",
+ i, ptr, addr);
+ return;
+ default:
+ dev_dbg(sg_table->dev,
+ "%05d: xxx %p\t:[INVALID] 0x%llx xxx\n",
+ i, ptr, addr);
+ return;
+ }
+ i++;
+ }
+ dev_dbg(sg_table->dev, "******* End of Table *****\n");
+}
+#else
+static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
+#endif
+
+/*
+ * Populate the SG Table page table entries from table/data
+ * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages.
+ * So does a Table page. So we keep track of indices of the tables
+ * in each system page and move the pointers accordingly.
+ */
+#define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size))
+static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table)
+{
+ dma_addr_t paddr;
+ int i, type, nr_entries;
+ int tpidx = 0; /* index to the current system table_page */
+ int sgtidx = 0; /* index to the sg_table within the current syspage */
+ int sgtentry = 0; /* the entry within the sg_table */
+ int dpidx = 0; /* index to the current system data_page */
+ int spidx = 0; /* index to the SG page within the current data page */
+ sgte_t *ptr; /* pointer to the table entry to fill */
+ struct tmc_sg_table *sg_table = etr_table->sg_table;
+ dma_addr_t *table_daddrs = sg_table->table_pages.daddrs;
+ dma_addr_t *data_daddrs = sg_table->data_pages.daddrs;
+
+ nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages);
+ /*
+ * Use the contiguous virtual address of the table to update entries.
+ */
+ ptr = sg_table->table_vaddr;
+ /*
+ * Fill all the entries, except the last entry to avoid special
+ * checks within the loop.
+ */
+ for (i = 0; i < nr_entries - 1; i++) {
+ if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) {
+ /*
+ * Last entry in a sg_table page is a link address to
+ * the next table page. If this sg_table is the last
+ * one in the system page, it links to the first
+ * sg_table in the next system page. Otherwise, it
+ * links to the next sg_table page within the system
+ * page.
+ */
+ if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) {
+ paddr = table_daddrs[tpidx + 1];
+ } else {
+ paddr = table_daddrs[tpidx] +
+ (ETR_SG_PAGE_SIZE * (sgtidx + 1));
+ }
+ type = ETR_SG_ET_LINK;
+ } else {
+ /*
+ * Update the indices to the data_pages to point to the
+ * next sg_page in the data buffer.
+ */
+ type = ETR_SG_ET_NORMAL;
+ paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
+ if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE))
+ dpidx++;
+ }
+ *ptr++ = ETR_SG_ENTRY(paddr, type);
+ /*
+ * Move to the next table pointer, moving the table page index
+ * if necessary
+ */
+ if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) {
+ if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE))
+ tpidx++;
+ }
+ }
+
+ /* Set up the last entry, which is always a data pointer */
+ paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
+ *ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST);
+}
+
+/*
+ * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
+ * populate the table.
+ *
+ * @dev - Device pointer for the TMC
+ * @node - NUMA node where the memory should be allocated
+ * @size - Total size of the data buffer
+ * @pages - Optional list of page virtual address
+ */
+static struct etr_sg_table *
+tmc_init_etr_sg_table(struct device *dev, int node,
+ unsigned long size, void **pages)
+{
+ int nr_entries, nr_tpages;
+ int nr_dpages = size >> PAGE_SHIFT;
+ struct tmc_sg_table *sg_table;
+ struct etr_sg_table *etr_table;
+
+ etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL);
+ if (!etr_table)
+ return ERR_PTR(-ENOMEM);
+ nr_entries = tmc_etr_sg_table_entries(nr_dpages);
+ nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE);
+
+ sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
+ if (IS_ERR(sg_table)) {
+ kfree(etr_table);
+ return ERR_PTR(PTR_ERR(sg_table));
+ }
+
+ etr_table->sg_table = sg_table;
+ /* TMC should use table base address for DBA */
+ etr_table->hwaddr = sg_table->table_daddr;
+ tmc_etr_sg_table_populate(etr_table);
+ /* Sync the table pages for the HW */
+ tmc_sg_table_sync_table(sg_table);
+ tmc_etr_sg_table_dump(etr_table);
+
+ return etr_table;
+}
+
+/*
+ * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer.
+ */
+static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ struct etr_flat_buf *flat_buf;
+
+ /* We cannot reuse existing pages for flat buf */
+ if (pages)
+ return -EINVAL;
+
+ flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL);
+ if (!flat_buf)
+ return -ENOMEM;
+
+ flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size,
+ &flat_buf->daddr, GFP_KERNEL);
+ if (!flat_buf->vaddr) {
+ kfree(flat_buf);
+ return -ENOMEM;
+ }
+
+ flat_buf->size = etr_buf->size;
+ flat_buf->dev = drvdata->dev;
+ etr_buf->hwaddr = flat_buf->daddr;
+ etr_buf->mode = ETR_MODE_FLAT;
+ etr_buf->private = flat_buf;
+ return 0;
+}
+
+static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
+{
+ struct etr_flat_buf *flat_buf = etr_buf->private;
+
+ if (flat_buf && flat_buf->daddr)
+ dma_free_coherent(flat_buf->dev, flat_buf->size,
+ flat_buf->vaddr, flat_buf->daddr);
+ kfree(flat_buf);
+}
+
+static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ /*
+ * Adjust the buffer to point to the beginning of the trace data
+ * and update the available trace data.
+ */
+ etr_buf->offset = rrp - etr_buf->hwaddr;
+ if (etr_buf->full)
+ etr_buf->len = etr_buf->size;
+ else
+ etr_buf->len = rwp - rrp;
+}
+
+static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
+ u64 offset, size_t len, char **bufpp)
+{
+ struct etr_flat_buf *flat_buf = etr_buf->private;
+
+ *bufpp = (char *)flat_buf->vaddr + offset;
+ /*
+ * tmc_etr_buf_get_data already adjusts the length to handle
+ * buffer wrapping around.
+ */
+ return len;
+}
+
+static const struct etr_buf_operations etr_flat_buf_ops = {
+ .alloc = tmc_etr_alloc_flat_buf,
+ .free = tmc_etr_free_flat_buf,
+ .sync = tmc_etr_sync_flat_buf,
+ .get_data = tmc_etr_get_data_flat_buf,
+};
+
+/*
+ * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
+ * appropriately.
+ */
+static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ struct etr_sg_table *etr_table;
+
+ etr_table = tmc_init_etr_sg_table(drvdata->dev, node,
+ etr_buf->size, pages);
+ if (IS_ERR(etr_table))
+ return -ENOMEM;
+ etr_buf->hwaddr = etr_table->hwaddr;
+ etr_buf->mode = ETR_MODE_ETR_SG;
+ etr_buf->private = etr_table;
+ return 0;
+}
+
+static void tmc_etr_free_sg_buf(struct etr_buf *etr_buf)
+{
+ struct etr_sg_table *etr_table = etr_buf->private;
+
+ if (etr_table) {
+ tmc_free_sg_table(etr_table->sg_table);
+ kfree(etr_table);
+ }
+}
+
+static ssize_t tmc_etr_get_data_sg_buf(struct etr_buf *etr_buf, u64 offset,
+ size_t len, char **bufpp)
+{
+ struct etr_sg_table *etr_table = etr_buf->private;
+
+ return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp);
+}
+
+static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ long r_offset, w_offset;
+ struct etr_sg_table *etr_table = etr_buf->private;
+ struct tmc_sg_table *table = etr_table->sg_table;
+
+ /* Convert hw address to offset in the buffer */
+ r_offset = tmc_sg_get_data_page_offset(table, rrp);
+ if (r_offset < 0) {
+ dev_warn(table->dev,
+ "Unable to map RRP %llx to offset\n", rrp);
+ etr_buf->len = 0;
+ return;
+ }
+
+ w_offset = tmc_sg_get_data_page_offset(table, rwp);
+ if (w_offset < 0) {
+ dev_warn(table->dev,
+ "Unable to map RWP %llx to offset\n", rwp);
+ etr_buf->len = 0;
+ return;
+ }
+
+ etr_buf->offset = r_offset;
+ if (etr_buf->full)
+ etr_buf->len = etr_buf->size;
+ else
+ etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) +
+ w_offset - r_offset;
+ tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
+}
+
+static const struct etr_buf_operations etr_sg_buf_ops = {
+ .alloc = tmc_etr_alloc_sg_buf,
+ .free = tmc_etr_free_sg_buf,
+ .sync = tmc_etr_sync_sg_buf,
+ .get_data = tmc_etr_get_data_sg_buf,
+};
+
+/*
+ * TMC ETR could be connected to a CATU device, which can provide address
+ * translation service. This is represented by the Output port of the TMC
+ * (ETR) connected to the input port of the CATU.
+ *
+ * Returns : coresight_device ptr for the CATU device if a CATU is found.
+ * : NULL otherwise.
+ */
+struct coresight_device *
+tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
+{
+ int i;
+ struct coresight_device *tmp, *etr = drvdata->csdev;
+
+ if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
+ return NULL;
+
+ for (i = 0; i < etr->nr_outport; i++) {
+ tmp = etr->conns[i].child_dev;
+ if (tmp && coresight_is_catu_device(tmp))
+ return tmp;
+ }
+
+ return NULL;
+}
+
+static inline void tmc_etr_enable_catu(struct tmc_drvdata *drvdata)
+{
+ struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
+
+ if (catu && helper_ops(catu)->enable)
+ helper_ops(catu)->enable(catu, drvdata->etr_buf);
+}
+
+static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
+{
+ struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
+
+ if (catu && helper_ops(catu)->disable)
+ helper_ops(catu)->disable(catu, drvdata->etr_buf);
+}
+
+static const struct etr_buf_operations *etr_buf_ops[] = {
+ [ETR_MODE_FLAT] = &etr_flat_buf_ops,
+ [ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
+ [ETR_MODE_CATU] = &etr_catu_buf_ops,
+};
+
+static inline int tmc_etr_mode_alloc_buf(int mode,
+ struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ int rc = -EINVAL;
+
+ switch (mode) {
+ case ETR_MODE_FLAT:
+ case ETR_MODE_ETR_SG:
+ case ETR_MODE_CATU:
+ if (etr_buf_ops[mode]->alloc)
+ rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
+ node, pages);
+ if (!rc)
+ etr_buf->ops = etr_buf_ops[mode];
+ return rc;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
+ * @drvdata : ETR device details.
+ * @size : size of the requested buffer.
+ * @flags : Required properties for the buffer.
+ * @node : Node for memory allocations.
+ * @pages : An optional list of pages.
+ */
+static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
+ ssize_t size, int flags,
+ int node, void **pages)
+{
+ int rc = -ENOMEM;
+ bool has_etr_sg, has_iommu;
+ bool has_sg, has_catu;
+ struct etr_buf *etr_buf;
+
+ has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
+ has_iommu = iommu_get_domain_for_dev(drvdata->dev);
+ has_catu = !!tmc_etr_get_catu_device(drvdata);
+
+ has_sg = has_catu || has_etr_sg;
+
+ etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
+ if (!etr_buf)
+ return ERR_PTR(-ENOMEM);
+
+ etr_buf->size = size;
+
+ /*
+ * If we have to use an existing list of pages, we cannot reliably
+ * use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
+ * we use the contiguous DMA memory if at least one of the following
+ * conditions is true:
+ * a) The ETR cannot use Scatter-Gather.
+ * b) we have a backing IOMMU
+ * c) The requested memory size is smaller (< 1M).
+ *
+ * Fallback to available mechanisms.
+ *
+ */
+ if (!pages &&
+ (!has_sg || has_iommu || size < SZ_1M))
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
+ etr_buf, node, pages);
+ if (rc && has_etr_sg)
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
+ etr_buf, node, pages);
+ if (rc && has_catu)
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
+ etr_buf, node, pages);
+ if (rc) {
+ kfree(etr_buf);
+ return ERR_PTR(rc);
+ }
+
+ dev_dbg(drvdata->dev, "allocated buffer of size %ldKB in mode %d\n",
+ (unsigned long)size >> 10, etr_buf->mode);
+ return etr_buf;
+}
+
+static void tmc_free_etr_buf(struct etr_buf *etr_buf)
+{
+ WARN_ON(!etr_buf->ops || !etr_buf->ops->free);
+ etr_buf->ops->free(etr_buf);
+ kfree(etr_buf);
+}
+
+/*
+ * tmc_etr_buf_get_data: Get the pointer the trace data at @offset
+ * with a maximum of @len bytes.
+ * Returns: The size of the linear data available @pos, with *bufpp
+ * updated to point to the buffer.
+ */
+static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
+ u64 offset, size_t len, char **bufpp)
+{
+ /* Adjust the length to limit this transaction to end of buffer */
+ len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset;
+
+ return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
+}
+
+static inline s64
+tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
+{
+ ssize_t len;
+ char *bufp;
+
+ len = tmc_etr_buf_get_data(etr_buf, offset,
+ CORESIGHT_BARRIER_PKT_SIZE, &bufp);
+ if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
+ return -EINVAL;
+ coresight_insert_barrier_packet(bufp);
+ return offset + CORESIGHT_BARRIER_PKT_SIZE;
+}
+
+/*
+ * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
+ * Makes sure the trace data is synced to the memory for consumption.
+ * @etr_buf->offset will hold the offset to the beginning of the trace data
+ * within the buffer, with @etr_buf->len bytes to consume.
+ */
+static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
+{
+ struct etr_buf *etr_buf = drvdata->etr_buf;
+ u64 rrp, rwp;
+ u32 status;
+
+ rrp = tmc_read_rrp(drvdata);
+ rwp = tmc_read_rwp(drvdata);
+ status = readl_relaxed(drvdata->base + TMC_STS);
+ etr_buf->full = status & TMC_STS_FULL;
+
+ WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
+
+ etr_buf->ops->sync(etr_buf, rrp, rwp);
+
+ /* Insert barrier packets at the beginning, if there was an overflow */
+ if (etr_buf->full)
+ tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
+}
+
static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
u32 axictl, sts;
+ struct etr_buf *etr_buf = drvdata->etr_buf;
- /* Zero out the memory to help with debug */
- memset(drvdata->vaddr, 0, drvdata->size);
+ /*
+ * If this ETR is connected to a CATU, enable it before we turn
+ * this on
+ */
+ tmc_etr_enable_catu(drvdata);
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
tmc_wait_for_tmcready(drvdata);
- writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
+ writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
@@ -34,16 +924,22 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
axictl |= TMC_AXICTL_ARCACHE_OS;
}
+ if (etr_buf->mode == ETR_MODE_ETR_SG) {
+ if (WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
+ return;
+ axictl |= TMC_AXICTL_SCT_GAT_MODE;
+ }
+
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
- tmc_write_dba(drvdata, drvdata->paddr);
+ tmc_write_dba(drvdata, etr_buf->hwaddr);
/*
* If the TMC pointers must be programmed before the session,
* we have to set it properly (i.e, RRP/RWP to base address and
* STS to "not full").
*/
if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
- tmc_write_rrp(drvdata, drvdata->paddr);
- tmc_write_rwp(drvdata, drvdata->paddr);
+ tmc_write_rrp(drvdata, etr_buf->hwaddr);
+ tmc_write_rwp(drvdata, etr_buf->hwaddr);
sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
writel_relaxed(sts, drvdata->base + TMC_STS);
}
@@ -58,37 +954,49 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
+/*
+ * Return the available trace data in the buffer (starts at etr_buf->offset,
+ * limited by etr_buf->len) from @pos, with a maximum limit of @len,
+ * also updating the @bufpp on where to find it. Since the trace data
+ * starts at anywhere in the buffer, depending on the RRP, we adjust the
+ * @len returned to handle buffer wrapping around.
+ */
+ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
{
- const u32 *barrier;
- u32 val;
- u32 *temp;
- u64 rwp;
+ s64 offset;
+ ssize_t actual = len;
+ struct etr_buf *etr_buf = drvdata->etr_buf;
- rwp = tmc_read_rwp(drvdata);
- val = readl_relaxed(drvdata->base + TMC_STS);
+ if (pos + actual > etr_buf->len)
+ actual = etr_buf->len - pos;
+ if (actual <= 0)
+ return actual;
- /*
- * Adjust the buffer to point to the beginning of the trace data
- * and update the available trace data.
- */
- if (val & TMC_STS_FULL) {
- drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
- drvdata->len = drvdata->size;
+ /* Compute the offset from which we read the data */
+ offset = etr_buf->offset + pos;
+ if (offset >= etr_buf->size)
+ offset -= etr_buf->size;
+ return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp);
+}
- barrier = barrier_pkt;
- temp = (u32 *)drvdata->buf;
+static struct etr_buf *
+tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
+{
+ return tmc_alloc_etr_buf(drvdata, drvdata->size,
+ 0, cpu_to_node(0), NULL);
+}
- while (*barrier) {
- *temp = *barrier;
- temp++;
- barrier++;
- }
+static void
+tmc_etr_free_sysfs_buf(struct etr_buf *buf)
+{
+ if (buf)
+ tmc_free_etr_buf(buf);
+}
- } else {
- drvdata->buf = drvdata->vaddr;
- drvdata->len = rwp - drvdata->paddr;
- }
+static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
+{
+ tmc_sync_etr_buf(drvdata);
}
static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
@@ -101,44 +1009,45 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
* read before the TMC is disabled.
*/
if (drvdata->mode == CS_MODE_SYSFS)
- tmc_etr_dump_hw(drvdata);
+ tmc_etr_sync_sysfs_buf(drvdata);
+
tmc_disable_hw(drvdata);
CS_LOCK(drvdata->base);
+
+ /* Disable CATU device if this ETR is connected to one */
+ tmc_etr_disable_catu(drvdata);
}
static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
- bool used = false;
unsigned long flags;
- void __iomem *vaddr = NULL;
- dma_addr_t paddr = 0;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct etr_buf *new_buf = NULL, *free_buf = NULL;
/*
- * If we don't have a buffer release the lock and allocate memory.
- * Otherwise keep the lock and move along.
+ * If we are enabling the ETR from disabled state, we need to make
+ * sure we have a buffer with the right size. The etr_buf is not reset
+ * immediately after we stop the tracing in SYSFS mode as we wait for
+ * the user to collect the data. We may be able to reuse the existing
+ * buffer, provided the size matches. Any allocation has to be done
+ * with the lock released.
*/
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (!drvdata->vaddr) {
+ if (!drvdata->etr_buf || (drvdata->etr_buf->size != drvdata->size)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- /*
- * Contiguous memory can't be allocated while a spinlock is
- * held. As such allocate memory here and free it if a buffer
- * has already been allocated (from a previous session).
- */
- vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
- &paddr, GFP_KERNEL);
- if (!vaddr)
- return -ENOMEM;
+ /* Allocate memory with the locks released */
+ free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
+ if (IS_ERR(new_buf))
+ return PTR_ERR(new_buf);
/* Let's try again */
spin_lock_irqsave(&drvdata->spinlock, flags);
}
- if (drvdata->reading) {
+ if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
ret = -EBUSY;
goto out;
}
@@ -146,21 +1055,19 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
- * touched.
+ * touched, even if the buffer size has changed.
*/
if (drvdata->mode == CS_MODE_SYSFS)
goto out;
/*
- * If drvdata::vaddr == NULL, use the memory allocated above.
- * Otherwise a buffer still exists from a previous session, so
- * simply use that.
+ * If we don't have a buffer or it doesn't match the requested size,
+ * use the buffer allocated above. Otherwise reuse the existing buffer.
*/
- if (drvdata->vaddr == NULL) {
- used = true;
- drvdata->vaddr = vaddr;
- drvdata->paddr = paddr;
- drvdata->buf = drvdata->vaddr;
+ if (!drvdata->etr_buf ||
+ (new_buf && drvdata->etr_buf->size != new_buf->size)) {
+ free_buf = drvdata->etr_buf;
+ drvdata->etr_buf = new_buf;
}
drvdata->mode = CS_MODE_SYSFS;
@@ -169,8 +1076,8 @@ out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free memory outside the spinlock if need be */
- if (!used && vaddr)
- dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
+ if (free_buf)
+ tmc_etr_free_sysfs_buf(free_buf);
if (!ret)
dev_info(drvdata->dev, "TMC-ETR enabled\n");
@@ -180,32 +1087,8 @@ out:
static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
{
- int ret = 0;
- unsigned long flags;
- struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->reading) {
- ret = -EINVAL;
- goto out;
- }
-
- /*
- * In Perf mode there can be only one writer per sink. There
- * is also no need to continue if the ETR is already operated
- * from sysFS.
- */
- if (drvdata->mode != CS_MODE_DISABLED) {
- ret = -EINVAL;
- goto out;
- }
-
- drvdata->mode = CS_MODE_PERF;
- tmc_etr_enable_hw(drvdata);
-out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- return ret;
+ /* We don't support perf mode yet ! */
+ return -EINVAL;
}
static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
@@ -273,8 +1156,8 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
goto out;
}
- /* If drvdata::buf is NULL the trace data has been read already */
- if (drvdata->buf == NULL) {
+ /* If drvdata::etr_buf is NULL the trace data has been read already */
+ if (drvdata->etr_buf == NULL) {
ret = -EINVAL;
goto out;
}
@@ -293,8 +1176,7 @@ out:
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
{
unsigned long flags;
- dma_addr_t paddr;
- void __iomem *vaddr = NULL;
+ struct etr_buf *etr_buf = NULL;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
@@ -306,9 +1188,8 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
if (drvdata->mode == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
- * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
- * so we don't have to explicitly clear it. Also, since the
- * tracer is still enabled drvdata::buf can't be NULL.
+ * buffer. Since the tracer is still enabled drvdata::buf can't
+ * be NULL.
*/
tmc_etr_enable_hw(drvdata);
} else {
@@ -316,17 +1197,16 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
* The ETR is not tracing and the buffer was just read.
* As such prepare to free the trace buffer.
*/
- vaddr = drvdata->vaddr;
- paddr = drvdata->paddr;
- drvdata->buf = drvdata->vaddr = NULL;
+ etr_buf = drvdata->etr_buf;
+ drvdata->etr_buf = NULL;
}
drvdata->reading = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free allocated memory out side of the spinlock */
- if (vaddr)
- dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
+ if (etr_buf)
+ tmc_free_etr_buf(etr_buf);
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 456f122df74f..1b817ec1192c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/property.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
@@ -123,35 +124,40 @@ static int tmc_open(struct inode *inode, struct file *file)
return 0;
}
+static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
+{
+ switch (drvdata->config_type) {
+ case TMC_CONFIG_TYPE_ETB:
+ case TMC_CONFIG_TYPE_ETF:
+ return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
+ case TMC_CONFIG_TYPE_ETR:
+ return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
+ }
+
+ return -EINVAL;
+}
+
static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
loff_t *ppos)
{
+ char *bufp;
+ ssize_t actual;
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
- char *bufp = drvdata->buf + *ppos;
+ actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
+ if (actual <= 0)
+ return 0;
- if (*ppos + len > drvdata->len)
- len = drvdata->len - *ppos;
-
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- if (bufp == (char *)(drvdata->vaddr + drvdata->size))
- bufp = drvdata->vaddr;
- else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
- bufp -= drvdata->size;
- if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
- len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
- }
-
- if (copy_to_user(data, bufp, len)) {
+ if (copy_to_user(data, bufp, actual)) {
dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
return -EFAULT;
}
- *ppos += len;
+ *ppos += actual;
+ dev_dbg(drvdata->dev, "%zu bytes copied\n", actual);
- dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
- __func__, len, (int)(drvdata->len - *ppos));
- return len;
+ return actual;
}
static int tmc_release(struct inode *inode, struct file *file)
@@ -271,8 +277,41 @@ static ssize_t trigger_cntr_store(struct device *dev,
}
static DEVICE_ATTR_RW(trigger_cntr);
+static ssize_t buffer_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%#x\n", drvdata->size);
+}
+
+static ssize_t buffer_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ /* Only permitted for TMC-ETRs */
+ if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
+ return -EPERM;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+ /* The buffer size should be page aligned */
+ if (val & (PAGE_SIZE - 1))
+ return -EINVAL;
+ drvdata->size = val;
+ return size;
+}
+
+static DEVICE_ATTR_RW(buffer_size);
+
static struct attribute *coresight_tmc_attrs[] = {
&dev_attr_trigger_cntr.attr,
+ &dev_attr_buffer_size.attr,
NULL,
};
@@ -291,6 +330,12 @@ const struct attribute_group *coresight_tmc_groups[] = {
NULL,
};
+static inline bool tmc_etr_can_use_sg(struct tmc_drvdata *drvdata)
+{
+ return fwnode_property_present(drvdata->dev->fwnode,
+ "arm,scatter-gather");
+}
+
/* Detect and initialise the capabilities of a TMC ETR */
static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
u32 devid, void *dev_caps)
@@ -300,7 +345,7 @@ static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
/* Set the unadvertised capabilities */
tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
- if (!(devid & TMC_DEVID_NOSCAT))
+ if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(drvdata))
tmc_etr_set_cap(drvdata, TMC_ETR_SG);
/* Check if the AXI address width is available */
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index dfaff077a7fc..7027bd60c4cc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -7,6 +7,7 @@
#ifndef _CORESIGHT_TMC_H
#define _CORESIGHT_TMC_H
+#include <linux/dma-mapping.h>
#include <linux/miscdevice.h>
#define TMC_RSZ 0x004
@@ -122,6 +123,36 @@ enum tmc_mem_intf_width {
#define CORESIGHT_SOC_600_ETR_CAPS \
(TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
+enum etr_mode {
+ ETR_MODE_FLAT, /* Uses contiguous flat buffer */
+ ETR_MODE_ETR_SG, /* Uses in-built TMC ETR SG mechanism */
+ ETR_MODE_CATU, /* Use SG mechanism in CATU */
+};
+
+struct etr_buf_operations;
+
+/**
+ * struct etr_buf - Details of the buffer used by ETR
+ * @mode : Mode of the ETR buffer, contiguous, Scatter Gather etc.
+ * @full : Trace data overflow
+ * @size : Size of the buffer.
+ * @hwaddr : Address to be programmed in the TMC:DBA{LO,HI}
+ * @offset : Offset of the trace data in the buffer for consumption.
+ * @len : Available trace data @buf (may round up to the beginning).
+ * @ops : ETR buffer operations for the mode.
+ * @private : Backend specific information for the buf
+ */
+struct etr_buf {
+ enum etr_mode mode;
+ bool full;
+ ssize_t size;
+ dma_addr_t hwaddr;
+ unsigned long offset;
+ s64 len;
+ const struct etr_buf_operations *ops;
+ void *private;
+};
+
/**
* struct tmc_drvdata - specifics associated to an TMC component
* @base: memory mapped base address for this component.
@@ -129,11 +160,10 @@ enum tmc_mem_intf_width {
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
* @spinlock: only one at a time pls.
- * @buf: area of memory where trace data get sent.
- * @paddr: DMA start location in RAM.
- * @vaddr: virtual representation of @paddr.
- * @size: trace buffer size.
- * @len: size of the available trace.
+ * @buf: Snapshot of the trace data for ETF/ETB.
+ * @etr_buf: details of buffer used in TMC-ETR
+ * @len: size of the available trace for ETF/ETB.
+ * @size: trace buffer size for this TMC (common for all modes).
* @mode: how this TMC is being used.
* @config_type: TMC variant, must be of type @tmc_config_type.
* @memwidth: width of the memory interface databus, in bytes.
@@ -148,11 +178,12 @@ struct tmc_drvdata {
struct miscdevice miscdev;
spinlock_t spinlock;
bool reading;
- char *buf;
- dma_addr_t paddr;
- void __iomem *vaddr;
- u32 size;
+ union {
+ char *buf; /* TMC ETB */
+ struct etr_buf *etr_buf; /* TMC ETR */
+ };
u32 len;
+ u32 size;
u32 mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
@@ -160,6 +191,47 @@ struct tmc_drvdata {
u32 etr_caps;
};
+struct etr_buf_operations {
+ int (*alloc)(struct tmc_drvdata *drvdata, struct etr_buf *etr_buf,
+ int node, void **pages);
+ void (*sync)(struct etr_buf *etr_buf, u64 rrp, u64 rwp);
+ ssize_t (*get_data)(struct etr_buf *etr_buf, u64 offset, size_t len,
+ char **bufpp);
+ void (*free)(struct etr_buf *etr_buf);
+};
+
+/**
+ * struct tmc_pages - Collection of pages used for SG.
+ * @nr_pages: Number of pages in the list.
+ * @daddrs: Array of DMA'able page address.
+ * @pages: Array pages for the buffer.
+ */
+struct tmc_pages {
+ int nr_pages;
+ dma_addr_t *daddrs;
+ struct page **pages;
+};
+
+/*
+ * struct tmc_sg_table - Generic SG table for TMC
+ * @dev: Device for DMA allocations
+ * @table_vaddr: Contiguous Virtual address for PageTable
+ * @data_vaddr: Contiguous Virtual address for Data Buffer
+ * @table_daddr: DMA address of the PageTable base
+ * @node: Node for Page allocations
+ * @table_pages: List of pages & dma address for Table
+ * @data_pages: List of pages & dma address for Data
+ */
+struct tmc_sg_table {
+ struct device *dev;
+ void *table_vaddr;
+ void *data_vaddr;
+ dma_addr_t table_daddr;
+ int node;
+ struct tmc_pages table_pages;
+ struct tmc_pages data_pages;
+};
+
/* Generic functions */
void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
@@ -172,10 +244,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etb_cs_ops;
extern const struct coresight_ops tmc_etf_cs_ops;
+ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp);
/* ETR functions */
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etr_cs_ops;
+ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp);
#define TMC_REG_PAIR(name, lo_off, hi_off) \
@@ -211,4 +287,23 @@ static inline bool tmc_etr_has_cap(struct tmc_drvdata *drvdata, u32 cap)
return !!(drvdata->etr_caps & cap);
}
+struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
+ int node,
+ int nr_tpages,
+ int nr_dpages,
+ void **pages);
+void tmc_free_sg_table(struct tmc_sg_table *sg_table);
+void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table);
+void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
+ u64 offset, u64 size);
+ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
+ u64 offset, size_t len, char **bufpp);
+static inline unsigned long
+tmc_sg_table_buf_size(struct tmc_sg_table *sg_table)
+{
+ return sg_table->data_pages.nr_pages << PAGE_SHIFT;
+}
+
+struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 01b7457fe8fc..459ef930d98c 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -40,8 +40,9 @@
/** register definition **/
/* FFSR - 0x300 */
-#define FFSR_FT_STOPPED BIT(1)
+#define FFSR_FT_STOPPED_BIT 1
/* FFCR - 0x304 */
+#define FFCR_FON_MAN_BIT 6
#define FFCR_FON_MAN BIT(6)
#define FFCR_STOP_FI BIT(12)
@@ -86,9 +87,9 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
/* Generate manual flush */
writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
/* Wait for flush to complete */
- coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0);
+ coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
/* Wait for formatter to stop */
- coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1);
+ coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
CS_LOCK(drvdata->base);
}
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 29e834aab539..3e07fd335f8c 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -51,8 +51,7 @@ static struct list_head *stm_path;
* beginning of the data collected in a buffer. That way the decoder knows that
* it needs to look for another sync sequence.
*/
-const u32 barrier_pkt[5] = {0x7fffffff, 0x7fffffff,
- 0x7fffffff, 0x7fffffff, 0x0};
+const u32 barrier_pkt[4] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
static int coresight_id_match(struct device *dev, void *data)
{
@@ -108,7 +107,7 @@ static int coresight_find_link_inport(struct coresight_device *csdev,
dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n",
dev_name(&parent->dev), dev_name(&csdev->dev));
- return 0;
+ return -ENODEV;
}
static int coresight_find_link_outport(struct coresight_device *csdev,
@@ -126,7 +125,7 @@ static int coresight_find_link_outport(struct coresight_device *csdev,
dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n",
dev_name(&csdev->dev), dev_name(&child->dev));
- return 0;
+ return -ENODEV;
}
static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
@@ -179,6 +178,9 @@ static int coresight_enable_link(struct coresight_device *csdev,
else
refport = 0;
+ if (refport < 0)
+ return refport;
+
if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
if (link_ops(csdev)->enable) {
ret = link_ops(csdev)->enable(csdev, inport, outport);
@@ -423,6 +425,42 @@ struct coresight_device *coresight_get_enabled_sink(bool deactivate)
return dev ? to_coresight_device(dev) : NULL;
}
+/*
+ * coresight_grab_device - Power up this device and any of the helper
+ * devices connected to it for trace operation. Since the helper devices
+ * don't appear on the trace path, they should be handled along with the
+ * the master device.
+ */
+static void coresight_grab_device(struct coresight_device *csdev)
+{
+ int i;
+
+ for (i = 0; i < csdev->nr_outport; i++) {
+ struct coresight_device *child = csdev->conns[i].child_dev;
+
+ if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
+ pm_runtime_get_sync(child->dev.parent);
+ }
+ pm_runtime_get_sync(csdev->dev.parent);
+}
+
+/*
+ * coresight_drop_device - Release this device and any of the helper
+ * devices connected to it.
+ */
+static void coresight_drop_device(struct coresight_device *csdev)
+{
+ int i;
+
+ pm_runtime_put(csdev->dev.parent);
+ for (i = 0; i < csdev->nr_outport; i++) {
+ struct coresight_device *child = csdev->conns[i].child_dev;
+
+ if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
+ pm_runtime_put(child->dev.parent);
+ }
+}
+
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
@@ -471,9 +509,9 @@ out:
if (!node)
return -ENOMEM;
+ coresight_grab_device(csdev);
node->csdev = csdev;
list_add(&node->link, path);
- pm_runtime_get_sync(csdev->dev.parent);
return 0;
}
@@ -517,7 +555,7 @@ void coresight_release_path(struct list_head *path)
list_for_each_entry_safe(nd, next, path, link) {
csdev = nd->csdev;
- pm_runtime_put_sync(csdev->dev.parent);
+ coresight_drop_device(csdev);
list_del(&nd->link);
kfree(nd);
}
@@ -768,6 +806,9 @@ static struct device_type coresight_dev_type[] = {
.name = "source",
.groups = coresight_source_groups,
},
+ {
+ .name = "helper",
+ },
};
static void coresight_device_release(struct device *dev)
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 634f58042c77..d293e55553bd 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1182,7 +1182,7 @@ static void msc_mmap_close(struct vm_area_struct *vma)
mutex_unlock(&msc->buf_mutex);
}
-static int msc_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
{
struct msc_iter *iter = vmf->vma->vm_file->private_data;
struct msc *msc = iter->msc;
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index 72c94c60fdd1..bed6ba63c983 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -18,4 +18,3 @@ obj-$(CONFIG_I2C_STUB) += i2c-stub.o
obj-$(CONFIG_I2C_SLAVE_EEPROM) += i2c-slave-eeprom.o
ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG
-CFLAGS_i2c-core-base.o := -Wno-deprecated-declarations
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 4a34f311e1ff..6ec65adaba49 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -647,10 +647,10 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
if (bit_adap->getscl == NULL)
adap->quirks = &i2c_bit_quirk_no_clk_stretch;
- /* Bring bus to a known state. Looks like STOP if bus is not free yet */
- setscl(bit_adap, 1);
- udelay(bit_adap->udelay);
- setsda(bit_adap, 1);
+ /*
+ * We tried forcing SCL/SDA to an initial state here. But that caused a
+ * regression, sadly. Check Bugzilla #200045 for details.
+ */
ret = add_adapter(adap);
if (ret < 0)
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 4f8df2ec87b1..451d4ae50e66 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -116,24 +116,21 @@ config I2C_I801
DH89xxCC (PCH)
Panther Point (PCH)
Lynx Point (PCH)
- Lynx Point-LP (PCH)
Avoton (SOC)
Wellsburg (PCH)
Coleto Creek (PCH)
Wildcat Point (PCH)
- Wildcat Point-LP (PCH)
BayTrail (SOC)
Braswell (SOC)
- Sunrise Point-H (PCH)
- Sunrise Point-LP (PCH)
- Kaby Lake-H (PCH)
+ Sunrise Point (PCH)
+ Kaby Lake (PCH)
DNV (SOC)
Broxton (SOC)
Lewisburg (PCH)
Gemini Lake (SOC)
- Cannon Lake-H (PCH)
- Cannon Lake-LP (PCH)
+ Cannon Lake (PCH)
Cedar Fork (PCH)
+ Ice Lake (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -762,6 +759,13 @@ config I2C_OMAP
Like OMAP1510/1610/1710/5912 and OMAP242x.
For details see http://www.ti.com/omap.
+config I2C_OWL
+ tristate "Actions Semiconductor Owl I2C Controller"
+ depends on ARCH_ACTIONS || COMPILE_TEST
+ help
+ Say Y here if you want to use the I2C bus controller on
+ the Actions Semiconductor Owl SoC's.
+
config I2C_PASEMI
tristate "PA Semi SMBus interface"
depends on PPC_PASEMI && PCI
@@ -828,6 +832,19 @@ config I2C_PXA_SLAVE
is necessary for systems where the PXA may be a target on the
I2C bus.
+config I2C_QCOM_GENI
+ tristate "Qualcomm Technologies Inc.'s GENI based I2C controller"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on QCOM_GENI_SE
+ help
+ This driver supports GENI serial engine based I2C controller in
+ master mode on the Qualcomm Technologies Inc.'s SoCs. If you say
+ yes to this option, support will be included for the built-in I2C
+ interface on the Qualcomm Technologies Inc.'s SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-qcom-geni.
+
config I2C_QUP
tristate "Qualcomm QUP based I2C controller"
depends on ARCH_QCOM
@@ -1330,4 +1347,15 @@ config I2C_ZX2967
This driver can also be built as a module. If so, the module will be
called i2c-zx2967.
+config I2C_FSI
+ tristate "FSI I2C driver"
+ depends on FSI
+ help
+ Driver for FSI bus attached I2C masters. These are I2C masters that
+ are connected to the system over an FSI bus, instead of the more
+ common PCI or MMIO interface.
+
+ This driver can also be built as a module. If so, the module will be
+ called as i2c-fsi.
+
endmenu
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 5a869144a0c5..18b26af82b1c 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
+obj-$(CONFIG_I2C_OWL) += i2c-owl.o
obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
@@ -83,6 +84,7 @@ obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
+obj-$(CONFIG_I2C_QCOM_GENI) += i2c-qcom-geni.o
obj-$(CONFIG_I2C_QUP) += i2c-qup.o
obj-$(CONFIG_I2C_RIIC) += i2c-riic.o
obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o
@@ -137,5 +139,6 @@ obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
obj-$(CONFIG_I2C_XGENE_SLIMPRO) += i2c-xgene-slimpro.o
obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
+obj-$(CONFIG_I2C_FSI) += i2c-fsi.o
ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 95a80a8f81b5..134567f3019f 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -384,6 +384,7 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
if (status)
return status;
len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX);
+ /* fall through */
case I2C_SMBUS_I2C_BLOCK_DATA:
for (i = 0; i < len; i++) {
status = amd_ec_read(smbus, AMD_SMB_DATA + i,
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 60e4d0e939a3..a4f956c6d567 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -111,22 +111,22 @@
#define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0)
enum aspeed_i2c_master_state {
+ ASPEED_I2C_MASTER_INACTIVE,
ASPEED_I2C_MASTER_START,
ASPEED_I2C_MASTER_TX_FIRST,
ASPEED_I2C_MASTER_TX,
ASPEED_I2C_MASTER_RX_FIRST,
ASPEED_I2C_MASTER_RX,
ASPEED_I2C_MASTER_STOP,
- ASPEED_I2C_MASTER_INACTIVE,
};
enum aspeed_i2c_slave_state {
+ ASPEED_I2C_SLAVE_STOP,
ASPEED_I2C_SLAVE_START,
ASPEED_I2C_SLAVE_READ_REQUESTED,
ASPEED_I2C_SLAVE_READ_PROCESSED,
ASPEED_I2C_SLAVE_WRITE_REQUESTED,
ASPEED_I2C_SLAVE_WRITE_RECEIVED,
- ASPEED_I2C_SLAVE_STOP,
};
struct aspeed_i2c_bus {
@@ -234,7 +234,6 @@ static bool aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus)
bool irq_handled = true;
u8 value;
- spin_lock(&bus->lock);
if (!slave) {
irq_handled = false;
goto out;
@@ -325,7 +324,6 @@ static bool aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus)
writel(status_ack, bus->base + ASPEED_I2C_INTR_STS_REG);
out:
- spin_unlock(&bus->lock);
return irq_handled;
}
#endif /* CONFIG_I2C_SLAVE */
@@ -389,7 +387,6 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
u8 recv_byte;
int ret;
- spin_lock(&bus->lock);
irq_status = readl(bus->base + ASPEED_I2C_INTR_STS_REG);
/* Ack all interrupt bits. */
writel(irq_status, bus->base + ASPEED_I2C_INTR_STS_REG);
@@ -407,7 +404,7 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
*/
ret = aspeed_i2c_is_irq_error(irq_status);
if (ret < 0) {
- dev_dbg(bus->dev, "received error interrupt: 0x%08x",
+ dev_dbg(bus->dev, "received error interrupt: 0x%08x\n",
irq_status);
bus->cmd_err = ret;
bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
@@ -416,7 +413,7 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
/* We are in an invalid state; reset bus to a known state. */
if (!bus->msgs) {
- dev_err(bus->dev, "bus in unknown state");
+ dev_err(bus->dev, "bus in unknown state\n");
bus->cmd_err = -EIO;
if (bus->master_state != ASPEED_I2C_MASTER_STOP)
aspeed_i2c_do_stop(bus);
@@ -431,7 +428,7 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
*/
if (bus->master_state == ASPEED_I2C_MASTER_START) {
if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
- pr_devel("no slave present at %02x", msg->addr);
+ pr_devel("no slave present at %02x\n", msg->addr);
status_ack |= ASPEED_I2CD_INTR_TX_NAK;
bus->cmd_err = -ENXIO;
aspeed_i2c_do_stop(bus);
@@ -451,11 +448,11 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
switch (bus->master_state) {
case ASPEED_I2C_MASTER_TX:
if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) {
- dev_dbg(bus->dev, "slave NACKed TX");
+ dev_dbg(bus->dev, "slave NACKed TX\n");
status_ack |= ASPEED_I2CD_INTR_TX_NAK;
goto error_and_stop;
} else if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
- dev_err(bus->dev, "slave failed to ACK TX");
+ dev_err(bus->dev, "slave failed to ACK TX\n");
goto error_and_stop;
}
status_ack |= ASPEED_I2CD_INTR_TX_ACK;
@@ -478,7 +475,7 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
/* fallthrough intended */
case ASPEED_I2C_MASTER_RX:
if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) {
- dev_err(bus->dev, "master failed to RX");
+ dev_err(bus->dev, "master failed to RX\n");
goto error_and_stop;
}
status_ack |= ASPEED_I2CD_INTR_RX_DONE;
@@ -509,7 +506,7 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
goto out_no_complete;
case ASPEED_I2C_MASTER_STOP:
if (unlikely(!(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))) {
- dev_err(bus->dev, "master failed to STOP");
+ dev_err(bus->dev, "master failed to STOP\n");
bus->cmd_err = -EIO;
/* Do not STOP as we have already tried. */
} else {
@@ -520,7 +517,7 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
goto out_complete;
case ASPEED_I2C_MASTER_INACTIVE:
dev_err(bus->dev,
- "master received interrupt 0x%08x, but is inactive",
+ "master received interrupt 0x%08x, but is inactive\n",
irq_status);
bus->cmd_err = -EIO;
/* Do not STOP as we should be inactive. */
@@ -547,22 +544,29 @@ out_no_complete:
dev_err(bus->dev,
"irq handled != irq. expected 0x%08x, but was 0x%08x\n",
irq_status, status_ack);
- spin_unlock(&bus->lock);
return !!irq_status;
}
static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
{
struct aspeed_i2c_bus *bus = dev_id;
+ bool ret;
+
+ spin_lock(&bus->lock);
#if IS_ENABLED(CONFIG_I2C_SLAVE)
if (aspeed_i2c_slave_irq(bus)) {
dev_dbg(bus->dev, "irq handled by slave.\n");
- return IRQ_HANDLED;
+ ret = true;
+ goto out;
}
#endif /* CONFIG_I2C_SLAVE */
- return aspeed_i2c_master_irq(bus) ? IRQ_HANDLED : IRQ_NONE;
+ ret = aspeed_i2c_master_irq(bus);
+
+out:
+ spin_unlock(&bus->lock);
+ return ret ? IRQ_HANDLED : IRQ_NONE;
}
static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
@@ -851,7 +855,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
if (IS_ERR(bus->rst)) {
dev_err(&pdev->dev,
- "missing or invalid reset controller device tree entry");
+ "missing or invalid reset controller device tree entry\n");
return PTR_ERR(bus->rst);
}
reset_control_deassert(bus->rst);
@@ -868,7 +872,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
if (!match)
bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
else
- bus->get_clk_reg_val = match->data;
+ bus->get_clk_reg_val = (u32 (*)(u32))match->data;
/* Initialize the I2C adapter */
spin_lock_init(&bus->lock);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 78792b4d6437..826d32049996 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -689,9 +689,9 @@ static int brcmstb_i2c_suspend(struct device *dev)
{
struct brcmstb_i2c_dev *i2c_dev = dev_get_drvdata(dev);
- i2c_lock_adapter(&i2c_dev->adapter);
+ i2c_lock_bus(&i2c_dev->adapter, I2C_LOCK_ROOT_ADAPTER);
i2c_dev->is_suspended = true;
- i2c_unlock_adapter(&i2c_dev->adapter);
+ i2c_unlock_bus(&i2c_dev->adapter, I2C_LOCK_ROOT_ADAPTER);
return 0;
}
@@ -700,10 +700,10 @@ static int brcmstb_i2c_resume(struct device *dev)
{
struct brcmstb_i2c_dev *i2c_dev = dev_get_drvdata(dev);
- i2c_lock_adapter(&i2c_dev->adapter);
+ i2c_lock_bus(&i2c_dev->adapter, I2C_LOCK_ROOT_ADAPTER);
brcmstb_i2c_set_bsc_reg_defaults(i2c_dev);
i2c_dev->is_suspended = false;
- i2c_unlock_adapter(&i2c_dev->adapter);
+ i2c_unlock_bus(&i2c_dev->adapter, I2C_LOCK_ROOT_ADAPTER);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 44cffad43701..c4d176f5ed79 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -234,7 +234,8 @@ static const struct irq_chip cht_wc_i2c_irq_chip = {
.name = "cht_wc_ext_chrg_irq_chip",
};
-static const char * const bq24190_suppliers[] = { "fusb302-typec-source" };
+static const char * const bq24190_suppliers[] = {
+ "tcpm-source-psy-i2c-fusb302" };
static const struct property_entry bq24190_props[] = {
PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers),
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 75d6ab177055..11caafa0e050 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -237,12 +237,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
/*
* It's not always possible to have 1 to 2 ratio when d=7, so fall back
* to minimal possible clkh in this case.
+ *
+ * Note:
+ * CLKH is not allowed to be 0, in this case I2C clock is not generated
+ * at all
*/
- if (clk >= clkl + d) {
+ if (clk > clkl + d) {
clkh = clk - clkl - d;
clkl -= d;
} else {
- clkh = 0;
+ clkh = 1;
clkl = clk - (d << 1);
}
@@ -714,14 +718,14 @@ static int i2c_davinci_cpufreq_transition(struct notifier_block *nb,
dev = container_of(nb, struct davinci_i2c_dev, freq_transition);
- i2c_lock_adapter(&dev->adapter);
+ i2c_lock_bus(&dev->adapter, I2C_LOCK_ROOT_ADAPTER);
if (val == CPUFREQ_PRECHANGE) {
davinci_i2c_reset_ctrl(dev, 0);
} else if (val == CPUFREQ_POSTCHANGE) {
i2c_davinci_calc_clk_dividers(dev);
davinci_i2c_reset_ctrl(dev, 1);
}
- i2c_unlock_adapter(&dev->adapter);
+ i2c_unlock_bus(&dev->adapter, I2C_LOCK_ROOT_ADAPTER);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
index dbda8c9c8a1c..a2a275cfc1f6 100644
--- a/drivers/i2c/busses/i2c-designware-baytrail.c
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel BayTrail PMIC I2C bus semaphore implementaion
* Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 48914dfc8ce8..69ec4a791f23 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Synopsys DesignWare I2C adapter driver.
*
@@ -6,20 +7,6 @@
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
- *
- * ----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * ----------------------------------------------------------------------------
- *
*/
#include <linux/clk.h>
#include <linux/delay.h>
@@ -31,6 +18,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/swab.h>
#include "i2c-designware-core.h"
@@ -94,6 +82,40 @@ void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
}
}
+/**
+ * i2c_dw_set_reg_access() - Set register access flags
+ * @dev: device private data
+ *
+ * Autodetects needed register access mode and sets access flags accordingly.
+ * This must be called before doing any other register access.
+ */
+int i2c_dw_set_reg_access(struct dw_i2c_dev *dev)
+{
+ u32 reg;
+ int ret;
+
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ return ret;
+
+ reg = dw_readl(dev, DW_IC_COMP_TYPE);
+ i2c_dw_release_lock(dev);
+
+ if (reg == swab32(DW_IC_COMP_TYPE_VALUE)) {
+ /* Configure register endianess access */
+ dev->flags |= ACCESS_SWAP;
+ } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
+ /* Configure register access mode 16bit */
+ dev->flags |= ACCESS_16BIT;
+ } else if (reg != DW_IC_COMP_TYPE_VALUE) {
+ dev_err(dev->dev,
+ "Unknown Synopsys component type: 0x%08x\n", reg);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
{
/*
@@ -149,6 +171,47 @@ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
return ((ic_clk * (tLOW + tf) + 500000) / 1000000) - 1 + offset;
}
+int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
+{
+ u32 reg;
+ int ret;
+
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ return ret;
+
+ /* Configure SDA Hold Time if required */
+ reg = dw_readl(dev, DW_IC_COMP_VERSION);
+ if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
+ if (!dev->sda_hold_time) {
+ /* Keep previous hold time setting if no one set it */
+ dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
+ }
+
+ /*
+ * Workaround for avoiding TX arbitration lost in case I2C
+ * slave pulls SDA down "too quickly" after falling egde of
+ * SCL by enabling non-zero SDA RX hold. Specification says it
+ * extends incoming SDA low to high transition while SCL is
+ * high but it apprears to help also above issue.
+ */
+ if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
+ dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
+
+ dev_dbg(dev->dev, "SDA Hold Time TX:RX = %d:%d\n",
+ dev->sda_hold_time & ~(u32)DW_IC_SDA_HOLD_RX_MASK,
+ dev->sda_hold_time >> DW_IC_SDA_HOLD_RX_SHIFT);
+ } else if (dev->sda_hold_time) {
+ dev_warn(dev->dev,
+ "Hardware too old to adjust SDA hold time.\n");
+ dev->sda_hold_time = 0;
+ }
+
+ i2c_dw_release_lock(dev);
+
+ return 0;
+}
+
void __i2c_dw_disable(struct dw_i2c_dev *dev)
{
int timeout = 100;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index d690e648bc01..e367b1af4ab2 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Synopsys DesignWare I2C adapter driver.
*
@@ -6,20 +7,6 @@
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
- *
- * ----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * ----------------------------------------------------------------------------
- *
*/
#include <linux/i2c.h>
@@ -212,7 +199,8 @@
* @tx_fifo_depth: depth of the hardware tx fifo
* @rx_fifo_depth: depth of the hardware rx fifo
* @rx_outstanding: current master-rx elements in tx fifo
- * @clk_freq: bus clock frequency
+ * @timings: bus clock frequency, SDA hold and other timings
+ * @sda_hold_time: SDA hold value
* @ss_hcnt: standard speed HCNT value
* @ss_lcnt: standard speed LCNT value
* @fs_hcnt: fast speed HCNT value
@@ -264,10 +252,8 @@ struct dw_i2c_dev {
unsigned int tx_fifo_depth;
unsigned int rx_fifo_depth;
int rx_outstanding;
- u32 clk_freq;
+ struct i2c_timings timings;
u32 sda_hold_time;
- u32 sda_falling_time;
- u32 scl_falling_time;
u16 ss_hcnt;
u16 ss_lcnt;
u16 fs_hcnt;
@@ -295,8 +281,10 @@ struct dw_i2c_dev {
u32 dw_readl(struct dw_i2c_dev *dev, int offset);
void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset);
+int i2c_dw_set_reg_access(struct dw_i2c_dev *dev);
u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset);
u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset);
+int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev);
unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev);
int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare);
int i2c_dw_acquire_lock(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 27436a937492..e18442b9973a 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Synopsys DesignWare I2C adapter driver (master only).
*
@@ -6,20 +7,6 @@
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
- *
- * ----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * ----------------------------------------------------------------------------
- *
*/
#include <linux/delay.h>
#include <linux/err.h>
@@ -45,90 +32,79 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
dw_writel(dev, dev->master_cfg, DW_IC_CON);
}
-/**
- * i2c_dw_init() - Initialize the designware I2C master hardware
- * @dev: device private data
- *
- * This functions configures and enables the I2C master.
- * This function is called during I2C init function, and in case of timeout at
- * run time.
- */
-static int i2c_dw_init_master(struct dw_i2c_dev *dev)
+static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
{
- u32 hcnt, lcnt;
- u32 reg, comp_param1;
+ u32 ic_clk = i2c_dw_clk_rate(dev);
+ const char *mode_str, *fp_str = "";
+ u32 comp_param1;
u32 sda_falling_time, scl_falling_time;
+ struct i2c_timings *t = &dev->timings;
int ret;
ret = i2c_dw_acquire_lock(dev);
if (ret)
return ret;
-
- reg = dw_readl(dev, DW_IC_COMP_TYPE);
- if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
- /* Configure register endianess access */
- dev->flags |= ACCESS_SWAP;
- } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
- /* Configure register access mode 16bit */
- dev->flags |= ACCESS_16BIT;
- } else if (reg != DW_IC_COMP_TYPE_VALUE) {
- dev_err(dev->dev,
- "Unknown Synopsys component type: 0x%08x\n", reg);
- i2c_dw_release_lock(dev);
- return -ENODEV;
- }
-
comp_param1 = dw_readl(dev, DW_IC_COMP_PARAM_1);
+ i2c_dw_release_lock(dev);
- /* Disable the adapter */
- __i2c_dw_disable(dev);
-
- /* Set standard and fast speed deviders for high/low periods */
-
- sda_falling_time = dev->sda_falling_time ?: 300; /* ns */
- scl_falling_time = dev->scl_falling_time ?: 300; /* ns */
+ /* Set standard and fast speed dividers for high/low periods */
+ sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
+ scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
- /* Set SCL timing parameters for standard-mode */
- if (dev->ss_hcnt && dev->ss_lcnt) {
- hcnt = dev->ss_hcnt;
- lcnt = dev->ss_lcnt;
- } else {
- hcnt = i2c_dw_scl_hcnt(i2c_dw_clk_rate(dev),
+ /* Calculate SCL timing parameters for standard mode if not set */
+ if (!dev->ss_hcnt || !dev->ss_lcnt) {
+ dev->ss_hcnt =
+ i2c_dw_scl_hcnt(ic_clk,
4000, /* tHD;STA = tHIGH = 4.0 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
- lcnt = i2c_dw_scl_lcnt(i2c_dw_clk_rate(dev),
+ dev->ss_lcnt =
+ i2c_dw_scl_lcnt(ic_clk,
4700, /* tLOW = 4.7 us */
scl_falling_time,
0); /* No offset */
}
- dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
- dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
- dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
-
- /* Set SCL timing parameters for fast-mode or fast-mode plus */
- if ((dev->clk_freq == 1000000) && dev->fp_hcnt && dev->fp_lcnt) {
- hcnt = dev->fp_hcnt;
- lcnt = dev->fp_lcnt;
- } else if (dev->fs_hcnt && dev->fs_lcnt) {
- hcnt = dev->fs_hcnt;
- lcnt = dev->fs_lcnt;
- } else {
- hcnt = i2c_dw_scl_hcnt(i2c_dw_clk_rate(dev),
+ dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
+ dev->ss_hcnt, dev->ss_lcnt);
+
+ /*
+ * Set SCL timing parameters for fast mode or fast mode plus. Only
+ * difference is the timing parameter values since the registers are
+ * the same.
+ */
+ if (t->bus_freq_hz == 1000000) {
+ /*
+ * Check are fast mode plus parameters available and use
+ * fast mode if not.
+ */
+ if (dev->fp_hcnt && dev->fp_lcnt) {
+ dev->fs_hcnt = dev->fp_hcnt;
+ dev->fs_lcnt = dev->fp_lcnt;
+ fp_str = " Plus";
+ }
+ }
+ /*
+ * Calculate SCL timing parameters for fast mode if not set. They are
+ * needed also in high speed mode.
+ */
+ if (!dev->fs_hcnt || !dev->fs_lcnt) {
+ dev->fs_hcnt =
+ i2c_dw_scl_hcnt(ic_clk,
600, /* tHD;STA = tHIGH = 0.6 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
- lcnt = i2c_dw_scl_lcnt(i2c_dw_clk_rate(dev),
+ dev->fs_lcnt =
+ i2c_dw_scl_lcnt(ic_clk,
1300, /* tLOW = 1.3 us */
scl_falling_time,
0); /* No offset */
}
- dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
- dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
- dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+ dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
+ fp_str, dev->fs_hcnt, dev->fs_lcnt);
+ /* Check is high speed possible and fall back to fast mode if not */
if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
DW_IC_CON_SPEED_HIGH) {
if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
@@ -136,37 +112,70 @@ static int i2c_dw_init_master(struct dw_i2c_dev *dev)
dev_err(dev->dev, "High Speed not supported!\n");
dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
dev->master_cfg |= DW_IC_CON_SPEED_FAST;
+ dev->hs_hcnt = 0;
+ dev->hs_lcnt = 0;
} else if (dev->hs_hcnt && dev->hs_lcnt) {
- hcnt = dev->hs_hcnt;
- lcnt = dev->hs_lcnt;
- dw_writel(dev, hcnt, DW_IC_HS_SCL_HCNT);
- dw_writel(dev, lcnt, DW_IC_HS_SCL_LCNT);
- dev_dbg(dev->dev, "HighSpeed-mode HCNT:LCNT = %d:%d\n",
- hcnt, lcnt);
+ dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
+ dev->hs_hcnt, dev->hs_lcnt);
}
}
- /* Configure SDA Hold Time if required */
- reg = dw_readl(dev, DW_IC_COMP_VERSION);
- if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
- if (!dev->sda_hold_time) {
- /* Keep previous hold time setting if no one set it */
- dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
- }
- /*
- * Workaround for avoiding TX arbitration lost in case I2C
- * slave pulls SDA down "too quickly" after falling egde of
- * SCL by enabling non-zero SDA RX hold. Specification says it
- * extends incoming SDA low to high transition while SCL is
- * high but it apprears to help also above issue.
- */
- if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
- dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
- dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
- } else if (dev->sda_hold_time) {
- dev_warn(dev->dev,
- "Hardware too old to adjust SDA hold time.\n");
+ ret = i2c_dw_set_sda_hold(dev);
+ if (ret)
+ goto out;
+
+ switch (dev->master_cfg & DW_IC_CON_SPEED_MASK) {
+ case DW_IC_CON_SPEED_STD:
+ mode_str = "Standard Mode";
+ break;
+ case DW_IC_CON_SPEED_HIGH:
+ mode_str = "High Speed Mode";
+ break;
+ default:
+ mode_str = "Fast Mode";
}
+ dev_dbg(dev->dev, "Bus speed: %s%s\n", mode_str, fp_str);
+
+out:
+ return ret;
+}
+
+/**
+ * i2c_dw_init() - Initialize the designware I2C master hardware
+ * @dev: device private data
+ *
+ * This functions configures and enables the I2C master.
+ * This function is called during I2C init function, and in case of timeout at
+ * run time.
+ */
+static int i2c_dw_init_master(struct dw_i2c_dev *dev)
+{
+ int ret;
+
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ return ret;
+
+ /* Disable the adapter */
+ __i2c_dw_disable(dev);
+
+ /* Write standard speed timing parameters */
+ dw_writel(dev, dev->ss_hcnt, DW_IC_SS_SCL_HCNT);
+ dw_writel(dev, dev->ss_lcnt, DW_IC_SS_SCL_LCNT);
+
+ /* Write fast mode/fast mode plus timing parameters */
+ dw_writel(dev, dev->fs_hcnt, DW_IC_FS_SCL_HCNT);
+ dw_writel(dev, dev->fs_lcnt, DW_IC_FS_SCL_LCNT);
+
+ /* Write high speed timing parameters if supported */
+ if (dev->hs_hcnt && dev->hs_lcnt) {
+ dw_writel(dev, dev->hs_hcnt, DW_IC_HS_SCL_HCNT);
+ dw_writel(dev, dev->hs_lcnt, DW_IC_HS_SCL_LCNT);
+ }
+
+ /* Write SDA hold time if supported */
+ if (dev->sda_hold_time)
+ dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
i2c_dw_configure_fifo_master(dev);
i2c_dw_release_lock(dev);
@@ -253,13 +262,6 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
break;
}
- if (msgs[dev->msg_write_idx].len == 0) {
- dev_err(dev->dev,
- "%s: invalid message length\n", __func__);
- dev->msg_err = -EINVAL;
- break;
- }
-
if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
/* new i2c_msg */
buf = msgs[dev->msg_write_idx].buf;
@@ -502,6 +504,10 @@ static const struct i2c_algorithm i2c_dw_algo = {
.functionality = i2c_dw_func,
};
+static const struct i2c_adapter_quirks i2c_dw_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
{
u32 stat;
@@ -681,6 +687,14 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
dev->disable = i2c_dw_disable;
dev->disable_int = i2c_dw_disable_int;
+ ret = i2c_dw_set_reg_access(dev);
+ if (ret)
+ return ret;
+
+ ret = i2c_dw_set_timings_master(dev);
+ if (ret)
+ return ret;
+
ret = dev->init(dev);
if (ret)
return ret;
@@ -689,6 +703,7 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
"Synopsys DesignWare I2C adapter");
adap->retries = 3;
adap->algo = &i2c_dw_algo;
+ adap->quirks = &i2c_dw_quirks;
adap->dev.parent = dev->dev;
i2c_set_adapdata(adap, dev);
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 86e1bd0b82e9..d50f80487214 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Synopsys DesignWare I2C adapter driver (master only).
*
@@ -7,22 +8,7 @@
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
* Copyright (C) 2011, 2015, 2016 Intel Corporation.
- *
- * ----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * ----------------------------------------------------------------------------
- *
*/
-
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -105,6 +91,7 @@ static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
case 0x0817:
c->bus_cfg &= ~DW_IC_CON_SPEED_MASK;
c->bus_cfg |= DW_IC_CON_SPEED_STD;
+ /* fall through */
case 0x0818:
case 0x0819:
c->bus_num = pdev->device - 0x817 + 3;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 5660daf6c92e..1a8d2da5b000 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Synopsys DesignWare I2C adapter driver.
*
@@ -6,20 +7,6 @@
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
- *
- * ----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * ----------------------------------------------------------------------------
- *
*/
#include <linux/acpi.h>
#include <linux/clk-provider.h>
@@ -96,6 +83,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
static int dw_i2c_acpi_configure(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct i2c_timings *t = &dev->timings;
u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0;
acpi_handle handle = ACPI_HANDLE(&pdev->dev);
const struct acpi_device_id *id;
@@ -115,7 +103,7 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht);
dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht);
- switch (dev->clk_freq) {
+ switch (t->bus_freq_hz) {
case 100000:
dev->sda_hold_time = ss_ht;
break;
@@ -175,6 +163,8 @@ static inline int dw_i2c_acpi_configure(struct platform_device *pdev)
static void i2c_dw_configure_master(struct dw_i2c_dev *dev)
{
+ struct i2c_timings *t = &dev->timings;
+
dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
@@ -182,7 +172,7 @@ static void i2c_dw_configure_master(struct dw_i2c_dev *dev)
dev->mode = DW_IC_MASTER;
- switch (dev->clk_freq) {
+ switch (t->bus_freq_hz) {
case 100000:
dev->master_cfg |= DW_IC_CON_SPEED_STD;
break;
@@ -240,7 +230,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct i2c_adapter *adap;
struct dw_i2c_dev *dev;
- u32 acpi_speed, ht = 0;
+ struct i2c_timings *t;
+ u32 acpi_speed;
struct resource *mem;
int i, irq, ret;
static const int supported_speeds[] = {
@@ -272,18 +263,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
reset_control_deassert(dev->rst);
}
- if (pdata) {
- dev->clk_freq = pdata->i2c_scl_freq;
- } else {
- device_property_read_u32(&pdev->dev, "i2c-sda-hold-time-ns",
- &ht);
- device_property_read_u32(&pdev->dev, "i2c-sda-falling-time-ns",
- &dev->sda_falling_time);
- device_property_read_u32(&pdev->dev, "i2c-scl-falling-time-ns",
- &dev->scl_falling_time);
- device_property_read_u32(&pdev->dev, "clock-frequency",
- &dev->clk_freq);
- }
+ t = &dev->timings;
+ if (pdata)
+ t->bus_freq_hz = pdata->i2c_scl_freq;
+ else
+ i2c_parse_fw_timings(&pdev->dev, t, false);
acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
/*
@@ -300,12 +284,12 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
* Find bus speed from the "clock-frequency" device property, ACPI
* or by using fast mode if neither is set.
*/
- if (acpi_speed && dev->clk_freq)
- dev->clk_freq = min(dev->clk_freq, acpi_speed);
- else if (acpi_speed || dev->clk_freq)
- dev->clk_freq = max(dev->clk_freq, acpi_speed);
+ if (acpi_speed && t->bus_freq_hz)
+ t->bus_freq_hz = min(t->bus_freq_hz, acpi_speed);
+ else if (acpi_speed || t->bus_freq_hz)
+ t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed);
else
- dev->clk_freq = 400000;
+ t->bus_freq_hz = 400000;
if (has_acpi_companion(&pdev->dev))
dw_i2c_acpi_configure(pdev);
@@ -314,11 +298,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
* Only standard mode at 100kHz, fast mode at 400kHz,
* fast mode plus at 1MHz and high speed mode at 3.4MHz are supported.
*/
- if (dev->clk_freq != 100000 && dev->clk_freq != 400000
- && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
+ if (t->bus_freq_hz != 100000 && t->bus_freq_hz != 400000 &&
+ t->bus_freq_hz != 1000000 && t->bus_freq_hz != 3400000) {
dev_err(&pdev->dev,
"%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
- dev->clk_freq);
+ t->bus_freq_hz);
ret = -EINVAL;
goto exit_reset;
}
@@ -334,12 +318,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (!i2c_dw_prepare_clk(dev, true)) {
+ u64 clk_khz;
+
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
+ clk_khz = dev->get_clk_rate_khz(dev);
- if (!dev->sda_hold_time && ht)
- dev->sda_hold_time = div_u64(
- (u64)dev->get_clk_rate_khz(dev) * ht + 500000,
- 1000000);
+ if (!dev->sda_hold_time && t->sda_hold_ns)
+ dev->sda_hold_time =
+ div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000);
}
dw_i2c_set_fifo_size(dev, pdev->id);
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 8ce2cd368477..e7f9305b2dd9 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Synopsys DesignWare I2C adapter driver (slave only).
*
* Based on the Synopsys DesignWare I2C adapter driver (master).
*
* Copyright (C) 2016 Synopsys Inc.
- *
- * ----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * ----------------------------------------------------------------------------
- *
*/
#include <linux/delay.h>
#include <linux/err.h>
@@ -51,53 +38,18 @@ static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev)
*/
static int i2c_dw_init_slave(struct dw_i2c_dev *dev)
{
- u32 reg, comp_param1;
int ret;
ret = i2c_dw_acquire_lock(dev);
if (ret)
return ret;
- reg = dw_readl(dev, DW_IC_COMP_TYPE);
- if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
- /* Configure register endianness access. */
- dev->flags |= ACCESS_SWAP;
- } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
- /* Configure register access mode 16bit. */
- dev->flags |= ACCESS_16BIT;
- } else if (reg != DW_IC_COMP_TYPE_VALUE) {
- dev_err(dev->dev,
- "Unknown Synopsys component type: 0x%08x\n", reg);
- i2c_dw_release_lock(dev);
- return -ENODEV;
- }
-
- comp_param1 = dw_readl(dev, DW_IC_COMP_PARAM_1);
-
/* Disable the adapter. */
__i2c_dw_disable(dev);
- /* Configure SDA Hold Time if required. */
- reg = dw_readl(dev, DW_IC_COMP_VERSION);
- if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
- if (!dev->sda_hold_time) {
- /* Keep previous hold time setting if no one set it. */
- dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
- }
- /*
- * Workaround for avoiding TX arbitration lost in case I2C
- * slave pulls SDA down "too quickly" after falling egde of
- * SCL by enabling non-zero SDA RX hold. Specification says it
- * extends incoming SDA low to high transition while SCL is
- * high but it apprears to help also above issue.
- */
- if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
- dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
+ /* Write SDA hold time if supported */
+ if (dev->sda_hold_time)
dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
- } else {
- dev_warn(dev->dev,
- "Hardware too old to adjust SDA hold time.\n");
- }
i2c_dw_configure_fifo_slave(dev);
i2c_dw_release_lock(dev);
@@ -299,6 +251,14 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
dev->disable = i2c_dw_disable;
dev->disable_int = i2c_dw_disable_int;
+ ret = i2c_dw_set_reg_access(dev);
+ if (ret)
+ return ret;
+
+ ret = i2c_dw_set_sda_hold(dev);
+ if (ret)
+ return ret;
+
ret = dev->init(dev);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index ba9b6ea48a31..35b302d983e0 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* I2C driver for the Renesas EMEV2 SoC
*
* Copyright (C) 2015 Wolfram Sang <wsa@sang-engineering.com>
* Copyright 2013 Codethink Ltd.
* Copyright 2010-2015 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index de82ad8ff534..c1ce2299a76e 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -176,7 +176,10 @@
#define EXYNOS5_I2C_TIMEOUT (msecs_to_jiffies(100))
-#define HSI2C_EXYNOS7 BIT(0)
+enum i2c_type_exynos {
+ I2C_TYPE_EXYNOS5,
+ I2C_TYPE_EXYNOS7,
+};
struct exynos5_i2c {
struct i2c_adapter adap;
@@ -212,27 +215,30 @@ struct exynos5_i2c {
/**
* struct exynos_hsi2c_variant - platform specific HSI2C driver data
* @fifo_depth: the fifo depth supported by the HSI2C module
+ * @hw: the hardware variant of Exynos I2C controller
*
* Specifies platform specific configuration of HSI2C module.
* Note: A structure for driver specific platform data is used for future
* expansion of its usage.
*/
struct exynos_hsi2c_variant {
- unsigned int fifo_depth;
- unsigned int hw;
+ unsigned int fifo_depth;
+ enum i2c_type_exynos hw;
};
static const struct exynos_hsi2c_variant exynos5250_hsi2c_data = {
.fifo_depth = 64,
+ .hw = I2C_TYPE_EXYNOS5,
};
static const struct exynos_hsi2c_variant exynos5260_hsi2c_data = {
.fifo_depth = 16,
+ .hw = I2C_TYPE_EXYNOS5,
};
static const struct exynos_hsi2c_variant exynos7_hsi2c_data = {
.fifo_depth = 16,
- .hw = HSI2C_EXYNOS7,
+ .hw = I2C_TYPE_EXYNOS7,
};
static const struct of_device_id exynos5_i2c_match[] = {
@@ -300,7 +306,7 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
*/
t_ftl_cycle = (readl(i2c->regs + HSI2C_CONF) >> 16) & 0x7;
temp = clkin / op_clk - 8 - t_ftl_cycle;
- if (i2c->variant->hw != HSI2C_EXYNOS7)
+ if (i2c->variant->hw != I2C_TYPE_EXYNOS7)
temp -= t_ftl_cycle;
div = temp / 512;
clk_cycle = temp / (div + 1) - 2;
@@ -424,7 +430,7 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
writel(int_status, i2c->regs + HSI2C_INT_STATUS);
/* handle interrupt related to the transfer status */
- if (i2c->variant->hw == HSI2C_EXYNOS7) {
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS7) {
if (int_status & HSI2C_INT_TRANS_DONE) {
i2c->trans_done = 1;
i2c->state = 0;
@@ -571,7 +577,7 @@ static void exynos5_i2c_bus_check(struct exynos5_i2c *i2c)
{
unsigned long timeout;
- if (i2c->variant->hw != HSI2C_EXYNOS7)
+ if (i2c->variant->hw != I2C_TYPE_EXYNOS7)
return;
/*
@@ -612,7 +618,7 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
unsigned long flags;
unsigned short trig_lvl;
- if (i2c->variant->hw == HSI2C_EXYNOS7)
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS7)
int_en |= HSI2C_INT_I2C_TRANS;
else
int_en |= HSI2C_INT_I2C;
diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c
new file mode 100644
index 000000000000..1e2be2219a60
--- /dev/null
+++ b/drivers/i2c/busses/i2c-fsi.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * FSI-attached I2C master algorithm
+ *
+ * Copyright 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fsi.h>
+#include <linux/i2c.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#define FSI_ENGID_I2C 0x7
+
+#define I2C_DEFAULT_CLK_DIV 6
+
+/* i2c registers */
+#define I2C_FSI_FIFO 0x00
+#define I2C_FSI_CMD 0x04
+#define I2C_FSI_MODE 0x08
+#define I2C_FSI_WATER_MARK 0x0C
+#define I2C_FSI_INT_MASK 0x10
+#define I2C_FSI_INT_COND 0x14
+#define I2C_FSI_OR_INT_MASK 0x14
+#define I2C_FSI_INTS 0x18
+#define I2C_FSI_AND_INT_MASK 0x18
+#define I2C_FSI_STAT 0x1C
+#define I2C_FSI_RESET_I2C 0x1C
+#define I2C_FSI_ESTAT 0x20
+#define I2C_FSI_RESET_ERR 0x20
+#define I2C_FSI_RESID_LEN 0x24
+#define I2C_FSI_SET_SCL 0x24
+#define I2C_FSI_PORT_BUSY 0x28
+#define I2C_FSI_RESET_SCL 0x2C
+#define I2C_FSI_SET_SDA 0x30
+#define I2C_FSI_RESET_SDA 0x34
+
+/* cmd register */
+#define I2C_CMD_WITH_START BIT(31)
+#define I2C_CMD_WITH_ADDR BIT(30)
+#define I2C_CMD_RD_CONT BIT(29)
+#define I2C_CMD_WITH_STOP BIT(28)
+#define I2C_CMD_FORCELAUNCH BIT(27)
+#define I2C_CMD_ADDR GENMASK(23, 17)
+#define I2C_CMD_READ BIT(16)
+#define I2C_CMD_LEN GENMASK(15, 0)
+
+/* mode register */
+#define I2C_MODE_CLKDIV GENMASK(31, 16)
+#define I2C_MODE_PORT GENMASK(15, 10)
+#define I2C_MODE_ENHANCED BIT(3)
+#define I2C_MODE_DIAG BIT(2)
+#define I2C_MODE_PACE_ALLOW BIT(1)
+#define I2C_MODE_WRAP BIT(0)
+
+/* watermark register */
+#define I2C_WATERMARK_HI GENMASK(15, 12)
+#define I2C_WATERMARK_LO GENMASK(7, 4)
+
+#define I2C_FIFO_HI_LVL 4
+#define I2C_FIFO_LO_LVL 4
+
+/* interrupt register */
+#define I2C_INT_INV_CMD BIT(15)
+#define I2C_INT_PARITY BIT(14)
+#define I2C_INT_BE_OVERRUN BIT(13)
+#define I2C_INT_BE_ACCESS BIT(12)
+#define I2C_INT_LOST_ARB BIT(11)
+#define I2C_INT_NACK BIT(10)
+#define I2C_INT_DAT_REQ BIT(9)
+#define I2C_INT_CMD_COMP BIT(8)
+#define I2C_INT_STOP_ERR BIT(7)
+#define I2C_INT_BUSY BIT(6)
+#define I2C_INT_IDLE BIT(5)
+
+/* status register */
+#define I2C_STAT_INV_CMD BIT(31)
+#define I2C_STAT_PARITY BIT(30)
+#define I2C_STAT_BE_OVERRUN BIT(29)
+#define I2C_STAT_BE_ACCESS BIT(28)
+#define I2C_STAT_LOST_ARB BIT(27)
+#define I2C_STAT_NACK BIT(26)
+#define I2C_STAT_DAT_REQ BIT(25)
+#define I2C_STAT_CMD_COMP BIT(24)
+#define I2C_STAT_STOP_ERR BIT(23)
+#define I2C_STAT_MAX_PORT GENMASK(19, 16)
+#define I2C_STAT_ANY_INT BIT(15)
+#define I2C_STAT_SCL_IN BIT(11)
+#define I2C_STAT_SDA_IN BIT(10)
+#define I2C_STAT_PORT_BUSY BIT(9)
+#define I2C_STAT_SELF_BUSY BIT(8)
+#define I2C_STAT_FIFO_COUNT GENMASK(7, 0)
+
+#define I2C_STAT_ERR (I2C_STAT_INV_CMD | \
+ I2C_STAT_PARITY | \
+ I2C_STAT_BE_OVERRUN | \
+ I2C_STAT_BE_ACCESS | \
+ I2C_STAT_LOST_ARB | \
+ I2C_STAT_NACK | \
+ I2C_STAT_STOP_ERR)
+#define I2C_STAT_ANY_RESP (I2C_STAT_ERR | \
+ I2C_STAT_DAT_REQ | \
+ I2C_STAT_CMD_COMP)
+
+/* extended status register */
+#define I2C_ESTAT_FIFO_SZ GENMASK(31, 24)
+#define I2C_ESTAT_SCL_IN_SY BIT(15)
+#define I2C_ESTAT_SDA_IN_SY BIT(14)
+#define I2C_ESTAT_S_SCL BIT(13)
+#define I2C_ESTAT_S_SDA BIT(12)
+#define I2C_ESTAT_M_SCL BIT(11)
+#define I2C_ESTAT_M_SDA BIT(10)
+#define I2C_ESTAT_HI_WATER BIT(9)
+#define I2C_ESTAT_LO_WATER BIT(8)
+#define I2C_ESTAT_PORT_BUSY BIT(7)
+#define I2C_ESTAT_SELF_BUSY BIT(6)
+#define I2C_ESTAT_VERSION GENMASK(4, 0)
+
+/* port busy register */
+#define I2C_PORT_BUSY_RESET BIT(31)
+
+/* wait for command complete or data request */
+#define I2C_CMD_SLEEP_MAX_US 500
+#define I2C_CMD_SLEEP_MIN_US 50
+
+/* wait after reset; choose time from legacy driver */
+#define I2C_RESET_SLEEP_MAX_US 2000
+#define I2C_RESET_SLEEP_MIN_US 1000
+
+/* choose timeout length from legacy driver; it's well tested */
+#define I2C_ABORT_TIMEOUT msecs_to_jiffies(100)
+
+struct fsi_i2c_master {
+ struct fsi_device *fsi;
+ u8 fifo_size;
+ struct list_head ports;
+ struct mutex lock;
+};
+
+struct fsi_i2c_port {
+ struct list_head list;
+ struct i2c_adapter adapter;
+ struct fsi_i2c_master *master;
+ u16 port;
+ u16 xfrd;
+};
+
+static int fsi_i2c_read_reg(struct fsi_device *fsi, unsigned int reg,
+ u32 *data)
+{
+ int rc;
+ __be32 data_be;
+
+ rc = fsi_device_read(fsi, reg, &data_be, sizeof(data_be));
+ if (rc)
+ return rc;
+
+ *data = be32_to_cpu(data_be);
+
+ return 0;
+}
+
+static int fsi_i2c_write_reg(struct fsi_device *fsi, unsigned int reg,
+ u32 *data)
+{
+ __be32 data_be = cpu_to_be32p(data);
+
+ return fsi_device_write(fsi, reg, &data_be, sizeof(data_be));
+}
+
+static int fsi_i2c_dev_init(struct fsi_i2c_master *i2c)
+{
+ int rc;
+ u32 mode = I2C_MODE_ENHANCED, extended_status, watermark;
+ u32 interrupt = 0;
+
+ /* since we use polling, disable interrupts */
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_INT_MASK, &interrupt);
+ if (rc)
+ return rc;
+
+ mode |= FIELD_PREP(I2C_MODE_CLKDIV, I2C_DEFAULT_CLK_DIV);
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return rc;
+
+ rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_ESTAT, &extended_status);
+ if (rc)
+ return rc;
+
+ i2c->fifo_size = FIELD_GET(I2C_ESTAT_FIFO_SZ, extended_status);
+ watermark = FIELD_PREP(I2C_WATERMARK_HI,
+ i2c->fifo_size - I2C_FIFO_HI_LVL);
+ watermark |= FIELD_PREP(I2C_WATERMARK_LO, I2C_FIFO_LO_LVL);
+
+ return fsi_i2c_write_reg(i2c->fsi, I2C_FSI_WATER_MARK, &watermark);
+}
+
+static int fsi_i2c_set_port(struct fsi_i2c_port *port)
+{
+ int rc;
+ struct fsi_device *fsi = port->master->fsi;
+ u32 mode, dummy = 0;
+
+ rc = fsi_i2c_read_reg(fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return rc;
+
+ if (FIELD_GET(I2C_MODE_PORT, mode) == port->port)
+ return 0;
+
+ mode = (mode & ~I2C_MODE_PORT) | FIELD_PREP(I2C_MODE_PORT, port->port);
+ rc = fsi_i2c_write_reg(fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return rc;
+
+ /* reset engine when port is changed */
+ return fsi_i2c_write_reg(fsi, I2C_FSI_RESET_ERR, &dummy);
+}
+
+static int fsi_i2c_start(struct fsi_i2c_port *port, struct i2c_msg *msg,
+ bool stop)
+{
+ struct fsi_i2c_master *i2c = port->master;
+ u32 cmd = I2C_CMD_WITH_START | I2C_CMD_WITH_ADDR;
+
+ port->xfrd = 0;
+
+ if (msg->flags & I2C_M_RD)
+ cmd |= I2C_CMD_READ;
+
+ if (stop || msg->flags & I2C_M_STOP)
+ cmd |= I2C_CMD_WITH_STOP;
+
+ cmd |= FIELD_PREP(I2C_CMD_ADDR, msg->addr);
+ cmd |= FIELD_PREP(I2C_CMD_LEN, msg->len);
+
+ return fsi_i2c_write_reg(i2c->fsi, I2C_FSI_CMD, &cmd);
+}
+
+static int fsi_i2c_get_op_bytes(int op_bytes)
+{
+ /* fsi is limited to max 4 byte aligned ops */
+ if (op_bytes > 4)
+ return 4;
+ else if (op_bytes == 3)
+ return 2;
+ return op_bytes;
+}
+
+static int fsi_i2c_write_fifo(struct fsi_i2c_port *port, struct i2c_msg *msg,
+ u8 fifo_count)
+{
+ int write;
+ int rc;
+ struct fsi_i2c_master *i2c = port->master;
+ int bytes_to_write = i2c->fifo_size - fifo_count;
+ int bytes_remaining = msg->len - port->xfrd;
+
+ bytes_to_write = min(bytes_to_write, bytes_remaining);
+
+ while (bytes_to_write) {
+ write = fsi_i2c_get_op_bytes(bytes_to_write);
+
+ rc = fsi_device_write(i2c->fsi, I2C_FSI_FIFO,
+ &msg->buf[port->xfrd], write);
+ if (rc)
+ return rc;
+
+ port->xfrd += write;
+ bytes_to_write -= write;
+ }
+
+ return 0;
+}
+
+static int fsi_i2c_read_fifo(struct fsi_i2c_port *port, struct i2c_msg *msg,
+ u8 fifo_count)
+{
+ int read;
+ int rc;
+ struct fsi_i2c_master *i2c = port->master;
+ int bytes_to_read;
+ int xfr_remaining = msg->len - port->xfrd;
+ u32 dummy;
+
+ bytes_to_read = min_t(int, fifo_count, xfr_remaining);
+
+ while (bytes_to_read) {
+ read = fsi_i2c_get_op_bytes(bytes_to_read);
+
+ if (xfr_remaining) {
+ rc = fsi_device_read(i2c->fsi, I2C_FSI_FIFO,
+ &msg->buf[port->xfrd], read);
+ if (rc)
+ return rc;
+
+ port->xfrd += read;
+ xfr_remaining -= read;
+ } else {
+ /* no more buffer but data in fifo, need to clear it */
+ rc = fsi_device_read(i2c->fsi, I2C_FSI_FIFO, &dummy,
+ read);
+ if (rc)
+ return rc;
+ }
+
+ bytes_to_read -= read;
+ }
+
+ return 0;
+}
+
+static int fsi_i2c_get_scl(struct i2c_adapter *adap)
+{
+ u32 stat = 0;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *i2c = port->master;
+
+ fsi_i2c_read_reg(i2c->fsi, I2C_FSI_STAT, &stat);
+
+ return !!(stat & I2C_STAT_SCL_IN);
+}
+
+static void fsi_i2c_set_scl(struct i2c_adapter *adap, int val)
+{
+ u32 dummy = 0;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *i2c = port->master;
+
+ if (val)
+ fsi_i2c_write_reg(i2c->fsi, I2C_FSI_SET_SCL, &dummy);
+ else
+ fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_SCL, &dummy);
+}
+
+static int fsi_i2c_get_sda(struct i2c_adapter *adap)
+{
+ u32 stat = 0;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *i2c = port->master;
+
+ fsi_i2c_read_reg(i2c->fsi, I2C_FSI_STAT, &stat);
+
+ return !!(stat & I2C_STAT_SDA_IN);
+}
+
+static void fsi_i2c_set_sda(struct i2c_adapter *adap, int val)
+{
+ u32 dummy = 0;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *i2c = port->master;
+
+ if (val)
+ fsi_i2c_write_reg(i2c->fsi, I2C_FSI_SET_SDA, &dummy);
+ else
+ fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_SDA, &dummy);
+}
+
+static void fsi_i2c_prepare_recovery(struct i2c_adapter *adap)
+{
+ int rc;
+ u32 mode;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *i2c = port->master;
+
+ rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return;
+
+ mode |= I2C_MODE_DIAG;
+ fsi_i2c_write_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+}
+
+static void fsi_i2c_unprepare_recovery(struct i2c_adapter *adap)
+{
+ int rc;
+ u32 mode;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *i2c = port->master;
+
+ rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return;
+
+ mode &= ~I2C_MODE_DIAG;
+ fsi_i2c_write_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+}
+
+static int fsi_i2c_reset_bus(struct fsi_i2c_master *i2c,
+ struct fsi_i2c_port *port)
+{
+ int rc;
+ u32 stat, dummy = 0;
+
+ /* force bus reset, ignore errors */
+ i2c_recover_bus(&port->adapter);
+
+ /* reset errors */
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_ERR, &dummy);
+ if (rc)
+ return rc;
+
+ /* wait for command complete */
+ usleep_range(I2C_RESET_SLEEP_MIN_US, I2C_RESET_SLEEP_MAX_US);
+
+ rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_STAT, &stat);
+ if (rc)
+ return rc;
+
+ if (stat & I2C_STAT_CMD_COMP)
+ return 0;
+
+ /* failed to get command complete; reset engine again */
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_I2C, &dummy);
+ if (rc)
+ return rc;
+
+ /* re-init engine again */
+ return fsi_i2c_dev_init(i2c);
+}
+
+static int fsi_i2c_reset_engine(struct fsi_i2c_master *i2c, u16 port)
+{
+ int rc;
+ u32 mode, dummy = 0;
+
+ /* reset engine */
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_I2C, &dummy);
+ if (rc)
+ return rc;
+
+ /* re-init engine */
+ rc = fsi_i2c_dev_init(i2c);
+ if (rc)
+ return rc;
+
+ rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return rc;
+
+ /* set port; default after reset is 0 */
+ if (port) {
+ mode &= ~I2C_MODE_PORT;
+ mode |= FIELD_PREP(I2C_MODE_PORT, port);
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return rc;
+ }
+
+ /* reset busy register; hw workaround */
+ dummy = I2C_PORT_BUSY_RESET;
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_PORT_BUSY, &dummy);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int fsi_i2c_abort(struct fsi_i2c_port *port, u32 status)
+{
+ int rc;
+ unsigned long start;
+ u32 cmd = I2C_CMD_WITH_STOP;
+ u32 stat;
+ struct fsi_i2c_master *i2c = port->master;
+ struct fsi_device *fsi = i2c->fsi;
+
+ rc = fsi_i2c_reset_engine(i2c, port->port);
+ if (rc)
+ return rc;
+
+ rc = fsi_i2c_read_reg(fsi, I2C_FSI_STAT, &stat);
+ if (rc)
+ return rc;
+
+ /* if sda is low, peform full bus reset */
+ if (!(stat & I2C_STAT_SDA_IN)) {
+ rc = fsi_i2c_reset_bus(i2c, port);
+ if (rc)
+ return rc;
+ }
+
+ /* skip final stop command for these errors */
+ if (status & (I2C_STAT_PARITY | I2C_STAT_LOST_ARB | I2C_STAT_STOP_ERR))
+ return 0;
+
+ /* write stop command */
+ rc = fsi_i2c_write_reg(fsi, I2C_FSI_CMD, &cmd);
+ if (rc)
+ return rc;
+
+ /* wait until we see command complete in the master */
+ start = jiffies;
+
+ do {
+ rc = fsi_i2c_read_reg(fsi, I2C_FSI_STAT, &status);
+ if (rc)
+ return rc;
+
+ if (status & I2C_STAT_CMD_COMP)
+ return 0;
+
+ usleep_range(I2C_CMD_SLEEP_MIN_US, I2C_CMD_SLEEP_MAX_US);
+ } while (time_after(start + I2C_ABORT_TIMEOUT, jiffies));
+
+ return -ETIMEDOUT;
+}
+
+static int fsi_i2c_handle_status(struct fsi_i2c_port *port,
+ struct i2c_msg *msg, u32 status)
+{
+ int rc;
+ u8 fifo_count;
+
+ if (status & I2C_STAT_ERR) {
+ rc = fsi_i2c_abort(port, status);
+ if (rc)
+ return rc;
+
+ if (status & I2C_STAT_INV_CMD)
+ return -EINVAL;
+
+ if (status & (I2C_STAT_PARITY | I2C_STAT_BE_OVERRUN |
+ I2C_STAT_BE_ACCESS))
+ return -EPROTO;
+
+ if (status & I2C_STAT_NACK)
+ return -ENXIO;
+
+ if (status & I2C_STAT_LOST_ARB)
+ return -EAGAIN;
+
+ if (status & I2C_STAT_STOP_ERR)
+ return -EBADMSG;
+
+ return -EIO;
+ }
+
+ if (status & I2C_STAT_DAT_REQ) {
+ fifo_count = FIELD_GET(I2C_STAT_FIFO_COUNT, status);
+
+ if (msg->flags & I2C_M_RD)
+ return fsi_i2c_read_fifo(port, msg, fifo_count);
+
+ return fsi_i2c_write_fifo(port, msg, fifo_count);
+ }
+
+ if (status & I2C_STAT_CMD_COMP) {
+ if (port->xfrd < msg->len)
+ return -ENODATA;
+
+ return msg->len;
+ }
+
+ return 0;
+}
+
+static int fsi_i2c_wait(struct fsi_i2c_port *port, struct i2c_msg *msg,
+ unsigned long timeout)
+{
+ u32 status = 0;
+ int rc;
+ unsigned long start = jiffies;
+
+ do {
+ rc = fsi_i2c_read_reg(port->master->fsi, I2C_FSI_STAT,
+ &status);
+ if (rc)
+ return rc;
+
+ if (status & I2C_STAT_ANY_RESP) {
+ rc = fsi_i2c_handle_status(port, msg, status);
+ if (rc < 0)
+ return rc;
+
+ /* cmd complete and all data xfrd */
+ if (rc == msg->len)
+ return 0;
+
+ /* need to xfr more data, but maybe don't need wait */
+ continue;
+ }
+
+ usleep_range(I2C_CMD_SLEEP_MIN_US, I2C_CMD_SLEEP_MAX_US);
+ } while (time_after(start + timeout, jiffies));
+
+ return -ETIMEDOUT;
+}
+
+static int fsi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ int i, rc;
+ unsigned long start_time;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *master = port->master;
+ struct i2c_msg *msg;
+
+ mutex_lock(&master->lock);
+
+ rc = fsi_i2c_set_port(port);
+ if (rc)
+ goto unlock;
+
+ for (i = 0; i < num; i++) {
+ msg = msgs + i;
+ start_time = jiffies;
+
+ rc = fsi_i2c_start(port, msg, i == num - 1);
+ if (rc)
+ goto unlock;
+
+ rc = fsi_i2c_wait(port, msg,
+ adap->timeout - (jiffies - start_time));
+ if (rc)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&master->lock);
+ return rc ? : num;
+}
+
+static u32 fsi_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_PROTOCOL_MANGLING |
+ I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
+}
+
+static struct i2c_bus_recovery_info fsi_i2c_bus_recovery_info = {
+ .recover_bus = i2c_generic_scl_recovery,
+ .get_scl = fsi_i2c_get_scl,
+ .set_scl = fsi_i2c_set_scl,
+ .get_sda = fsi_i2c_get_sda,
+ .set_sda = fsi_i2c_set_sda,
+ .prepare_recovery = fsi_i2c_prepare_recovery,
+ .unprepare_recovery = fsi_i2c_unprepare_recovery,
+};
+
+static const struct i2c_algorithm fsi_i2c_algorithm = {
+ .master_xfer = fsi_i2c_xfer,
+ .functionality = fsi_i2c_functionality,
+};
+
+static int fsi_i2c_probe(struct device *dev)
+{
+ struct fsi_i2c_master *i2c;
+ struct fsi_i2c_port *port;
+ struct device_node *np;
+ int rc;
+ u32 port_no;
+
+ i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ mutex_init(&i2c->lock);
+ i2c->fsi = to_fsi_dev(dev);
+ INIT_LIST_HEAD(&i2c->ports);
+
+ rc = fsi_i2c_dev_init(i2c);
+ if (rc)
+ return rc;
+
+ /* Add adapter for each i2c port of the master. */
+ for_each_available_child_of_node(dev->of_node, np) {
+ rc = of_property_read_u32(np, "reg", &port_no);
+ if (rc || port_no > USHRT_MAX)
+ continue;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ break;
+
+ port->master = i2c;
+ port->port = port_no;
+
+ port->adapter.owner = THIS_MODULE;
+ port->adapter.dev.of_node = np;
+ port->adapter.dev.parent = dev;
+ port->adapter.algo = &fsi_i2c_algorithm;
+ port->adapter.bus_recovery_info = &fsi_i2c_bus_recovery_info;
+ port->adapter.algo_data = port;
+
+ snprintf(port->adapter.name, sizeof(port->adapter.name),
+ "i2c_bus-%u", port_no);
+
+ rc = i2c_add_adapter(&port->adapter);
+ if (rc < 0) {
+ dev_err(dev, "Failed to register adapter: %d\n", rc);
+ kfree(port);
+ continue;
+ }
+
+ list_add(&port->list, &i2c->ports);
+ }
+
+ dev_set_drvdata(dev, i2c);
+
+ return 0;
+}
+
+static int fsi_i2c_remove(struct device *dev)
+{
+ struct fsi_i2c_master *i2c = dev_get_drvdata(dev);
+ struct fsi_i2c_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &i2c->ports, list) {
+ list_del(&port->list);
+ i2c_del_adapter(&port->adapter);
+ kfree(port);
+ }
+
+ return 0;
+}
+
+static const struct fsi_device_id fsi_i2c_ids[] = {
+ { FSI_ENGID_I2C, FSI_VERSION_ANY },
+ { }
+};
+
+static struct fsi_driver fsi_i2c_driver = {
+ .id_table = fsi_i2c_ids,
+ .drv = {
+ .name = "i2c-fsi",
+ .bus = &fsi_bus_type,
+ .probe = fsi_i2c_probe,
+ .remove = fsi_i2c_remove,
+ },
+};
+
+module_fsi_driver(fsi_i2c_driver);
+
+MODULE_AUTHOR("Eddie James <eajames@us.ibm.com>");
+MODULE_DESCRIPTION("FSI attached I2C master");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 005e6e0330c2..c008d209f0b8 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -78,49 +78,43 @@ static struct dentry *i2c_gpio_debug_dir;
#define getscl(bd) ((bd)->getscl((bd)->data))
#define WIRE_ATTRIBUTE(wire) \
-static int fops_##wire##_get(void *data, u64 *val) \
-{ \
- struct i2c_gpio_private_data *priv = data; \
- \
- i2c_lock_adapter(&priv->adap); \
- *val = get##wire(&priv->bit_data); \
- i2c_unlock_adapter(&priv->adap); \
- return 0; \
-} \
-static int fops_##wire##_set(void *data, u64 val) \
-{ \
- struct i2c_gpio_private_data *priv = data; \
- \
- i2c_lock_adapter(&priv->adap); \
- set##wire(&priv->bit_data, val); \
- i2c_unlock_adapter(&priv->adap); \
- return 0; \
-} \
+static int fops_##wire##_get(void *data, u64 *val) \
+{ \
+ struct i2c_gpio_private_data *priv = data; \
+ \
+ i2c_lock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); \
+ *val = get##wire(&priv->bit_data); \
+ i2c_unlock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); \
+ return 0; \
+} \
+static int fops_##wire##_set(void *data, u64 val) \
+{ \
+ struct i2c_gpio_private_data *priv = data; \
+ \
+ i2c_lock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); \
+ set##wire(&priv->bit_data, val); \
+ i2c_unlock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); \
+ return 0; \
+} \
DEFINE_DEBUGFS_ATTRIBUTE(fops_##wire, fops_##wire##_get, fops_##wire##_set, "%llu\n")
WIRE_ATTRIBUTE(scl);
WIRE_ATTRIBUTE(sda);
-static int fops_incomplete_transfer_set(void *data, u64 addr)
+static void i2c_gpio_incomplete_transfer(struct i2c_gpio_private_data *priv,
+ u32 pattern, u8 pattern_size)
{
- struct i2c_gpio_private_data *priv = data;
struct i2c_algo_bit_data *bit_data = &priv->bit_data;
- int i, pattern;
-
- if (addr > 0x7f)
- return -EINVAL;
+ int i;
- /* ADDR (7 bit) + RD (1 bit) + SDA hi (1 bit) */
- pattern = (addr << 2) | 3;
-
- i2c_lock_adapter(&priv->adap);
+ i2c_lock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER);
/* START condition */
setsda(bit_data, 0);
udelay(bit_data->udelay);
- /* Send ADDR+RD, request ACK, don't send STOP */
- for (i = 8; i >= 0; i--) {
+ /* Send pattern, request ACK, don't send STOP */
+ for (i = pattern_size - 1; i >= 0; i--) {
setscl(bit_data, 0);
udelay(bit_data->udelay / 2);
setsda(bit_data, (pattern >> i) & 1);
@@ -129,11 +123,44 @@ static int fops_incomplete_transfer_set(void *data, u64 addr)
udelay(bit_data->udelay);
}
- i2c_unlock_adapter(&priv->adap);
+ i2c_unlock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER);
+}
+
+static int fops_incomplete_addr_phase_set(void *data, u64 addr)
+{
+ struct i2c_gpio_private_data *priv = data;
+ u32 pattern;
+
+ if (addr > 0x7f)
+ return -EINVAL;
+
+ /* ADDR (7 bit) + RD (1 bit) + Client ACK, keep SDA hi (1 bit) */
+ pattern = (addr << 2) | 3;
+
+ i2c_gpio_incomplete_transfer(priv, pattern, 9);
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_incomplete_addr_phase, NULL, fops_incomplete_addr_phase_set, "%llu\n");
+
+static int fops_incomplete_write_byte_set(void *data, u64 addr)
+{
+ struct i2c_gpio_private_data *priv = data;
+ u32 pattern;
+
+ if (addr > 0x7f)
+ return -EINVAL;
+
+ /* ADDR (7 bit) + WR (1 bit) + Client ACK (1 bit) */
+ pattern = (addr << 2) | 1;
+ /* 0x00 (8 bit) + Client ACK, keep SDA hi (1 bit) */
+ pattern = (pattern << 9) | 1;
+
+ i2c_gpio_incomplete_transfer(priv, pattern, 18);
return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_incomplete_transfer, NULL, fops_incomplete_transfer_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_incomplete_write_byte, NULL, fops_incomplete_write_byte_set, "%llu\n");
static void i2c_gpio_fault_injector_init(struct platform_device *pdev)
{
@@ -156,8 +183,10 @@ static void i2c_gpio_fault_injector_init(struct platform_device *pdev)
debugfs_create_file_unsafe("scl", 0600, priv->debug_dir, priv, &fops_scl);
debugfs_create_file_unsafe("sda", 0600, priv->debug_dir, priv, &fops_sda);
- debugfs_create_file_unsafe("incomplete_transfer", 0200, priv->debug_dir,
- priv, &fops_incomplete_transfer);
+ debugfs_create_file_unsafe("incomplete_address_phase", 0200, priv->debug_dir,
+ priv, &fops_incomplete_addr_phase);
+ debugfs_create_file_unsafe("incomplete_write_byte", 0200, priv->debug_dir,
+ priv, &fops_incomplete_write_byte);
}
static void i2c_gpio_fault_injector_exit(struct platform_device *pdev)
@@ -279,9 +308,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
* required for an I2C bus.
*/
if (pdata->scl_is_open_drain)
- gflags = GPIOD_OUT_LOW;
+ gflags = GPIOD_OUT_HIGH;
else
- gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
+ gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags);
if (IS_ERR(priv->scl))
return PTR_ERR(priv->scl);
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 56dc69e7349f..ff340d7ae2e5 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Solutions Highlander FPGA I2C/SMBus support.
*
@@ -6,10 +7,6 @@
* Copyright (C) 2008 Paul Mundt
* Copyright (C) 2008 Renesas Solutions Corp.
* Copyright (C) 2008 Atom Create Engineering Co., Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file "COPYING" in the main directory
- * of this archive for more details.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index aa726607645e..941c223f6491 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -70,6 +70,7 @@
* Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes
* Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
* Cedar Fork (PCH) 0x18df 32 hard yes yes yes
+ * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -220,6 +221,7 @@
#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0
#define PCI_DEVICE_ID_INTEL_GEMINILAKE_SMBUS 0x31d4
+#define PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS 0x34a3
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
@@ -1034,6 +1036,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
{ 0, }
};
@@ -1518,6 +1521,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
+ case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
priv->features |= FEATURE_SMBUS_PEC;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 0207e194f84b..c406700789e1 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -368,6 +368,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
goto err_desc;
}
+ reinit_completion(&dma->cmd_complete);
txdesc->callback = i2c_imx_dma_callback;
txdesc->callback_param = i2c_imx;
if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -420,10 +421,14 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
return -EAGAIN;
}
- if (for_busy && (temp & I2SR_IBB))
+ if (for_busy && (temp & I2SR_IBB)) {
+ i2c_imx->stopped = 0;
break;
- if (!for_busy && !(temp & I2SR_IBB))
+ }
+ if (!for_busy && !(temp & I2SR_IBB)) {
+ i2c_imx->stopped = 1;
break;
+ }
if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
dev_dbg(&i2c_imx->adapter.dev,
"<%s> I2C bus is busy\n", __func__);
@@ -537,7 +542,6 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
result = i2c_imx_bus_busy(i2c_imx, 1);
if (result)
return result;
- i2c_imx->stopped = 0;
temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK;
temp &= ~I2CR_DMAEN;
@@ -566,10 +570,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
udelay(i2c_imx->disable_delay);
}
- if (!i2c_imx->stopped) {
+ if (!i2c_imx->stopped)
i2c_imx_bus_busy(i2c_imx, 0);
- i2c_imx->stopped = 1;
- }
/* Disable I2C controller */
temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
@@ -622,7 +624,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
* The first byte must be transmitted by the CPU.
*/
imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
- reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
@@ -668,9 +669,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
struct imx_i2c_dma *dma = i2c_imx->dma;
struct device *dev = &i2c_imx->adapter.dev;
- temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
- temp |= I2CR_DMAEN;
- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
dma->chan_using = dma->chan_rx;
dma->dma_transfer_dir = DMA_DEV_TO_MEM;
@@ -681,7 +679,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
if (result)
return result;
- reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
@@ -728,7 +725,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
i2c_imx_bus_busy(i2c_imx, 0);
- i2c_imx->stopped = 1;
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -784,6 +780,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
int i, result;
unsigned int temp;
int block_data = msgs->flags & I2C_M_RECV_LEN;
+ int use_dma = i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data;
dev_dbg(&i2c_imx->adapter.dev,
"<%s> write slave address: addr=0x%x\n",
@@ -810,12 +807,14 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
*/
if ((msgs->len - 1) || block_data)
temp &= ~I2CR_TXAK;
+ if (use_dma)
+ temp |= I2CR_DMAEN;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */
dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__);
- if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data)
+ if (use_dma)
return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg);
/* read data */
@@ -851,7 +850,6 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
i2c_imx_bus_busy(i2c_imx, 0);
- i2c_imx->stopped = 1;
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -1010,7 +1008,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
"gpio");
rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
- rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH);
+ rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 642c58946d8d..7d79317a1046 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -567,9 +567,6 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
msg->addr, msg->len, msg->flags, stop);
- if (msg->len == 0)
- return -EINVAL;
-
/*
* The MX28 I2C IP block can only do PIO READ for transfer of to up
* 4 bytes of length. The write transfer is not limited as it can use
@@ -683,6 +680,10 @@ static const struct i2c_algorithm mxs_i2c_algo = {
.functionality = mxs_i2c_func,
};
+static const struct i2c_adapter_quirks mxs_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static void mxs_i2c_derive_timing(struct mxs_i2c_dev *i2c, uint32_t speed)
{
/* The I2C block clock runs at 24MHz */
@@ -854,6 +855,7 @@ static int mxs_i2c_probe(struct platform_device *pdev)
strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &mxs_i2c_algo;
+ adap->quirks = &mxs_i2c_quirks;
adap->dev.parent = dev;
adap->nr = pdev->id;
adap->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 88444ef74943..87f9caacba85 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -2,7 +2,7 @@
* i2c-ocores.c: I2C bus driver for OpenCores I2C controller
* (https://opencores.org/project/i2c/overview)
*
- * Peter Korsgaard <jacmet@sunsite.dk>
+ * Peter Korsgaard <peter@korsgaard.com>
*
* Support for the GRLIB port of the controller by
* Andreas Larsson <andreas@gaisler.com>
@@ -576,7 +576,7 @@ static struct platform_driver ocores_i2c_driver = {
module_platform_driver(ocores_i2c_driver);
-MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
+MODULE_AUTHOR("Peter Korsgaard <peter@korsgaard.com>");
MODULE_DESCRIPTION("OpenCores I2C bus driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ocores-i2c");
diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c
new file mode 100644
index 000000000000..96b4572e6d9c
--- /dev/null
+++ b/drivers/i2c/busses/i2c-owl.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Actions Semiconductor Owl SoC's I2C driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Copyright (c) 2018 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+/* I2C registers */
+#define OWL_I2C_REG_CTL 0x0000
+#define OWL_I2C_REG_CLKDIV 0x0004
+#define OWL_I2C_REG_STAT 0x0008
+#define OWL_I2C_REG_ADDR 0x000C
+#define OWL_I2C_REG_TXDAT 0x0010
+#define OWL_I2C_REG_RXDAT 0x0014
+#define OWL_I2C_REG_CMD 0x0018
+#define OWL_I2C_REG_FIFOCTL 0x001C
+#define OWL_I2C_REG_FIFOSTAT 0x0020
+#define OWL_I2C_REG_DATCNT 0x0024
+#define OWL_I2C_REG_RCNT 0x0028
+
+/* I2Cx_CTL Bit Mask */
+#define OWL_I2C_CTL_RB BIT(1)
+#define OWL_I2C_CTL_GBCC(x) (((x) & 0x3) << 2)
+#define OWL_I2C_CTL_GBCC_NONE OWL_I2C_CTL_GBCC(0)
+#define OWL_I2C_CTL_GBCC_START OWL_I2C_CTL_GBCC(1)
+#define OWL_I2C_CTL_GBCC_STOP OWL_I2C_CTL_GBCC(2)
+#define OWL_I2C_CTL_GBCC_RSTART OWL_I2C_CTL_GBCC(3)
+#define OWL_I2C_CTL_IRQE BIT(5)
+#define OWL_I2C_CTL_EN BIT(7)
+#define OWL_I2C_CTL_AE BIT(8)
+#define OWL_I2C_CTL_SHSM BIT(10)
+
+#define OWL_I2C_DIV_FACTOR(x) ((x) & 0xff)
+
+/* I2Cx_STAT Bit Mask */
+#define OWL_I2C_STAT_RACK BIT(0)
+#define OWL_I2C_STAT_BEB BIT(1)
+#define OWL_I2C_STAT_IRQP BIT(2)
+#define OWL_I2C_STAT_LAB BIT(3)
+#define OWL_I2C_STAT_STPD BIT(4)
+#define OWL_I2C_STAT_STAD BIT(5)
+#define OWL_I2C_STAT_BBB BIT(6)
+#define OWL_I2C_STAT_TCB BIT(7)
+#define OWL_I2C_STAT_LBST BIT(8)
+#define OWL_I2C_STAT_SAMB BIT(9)
+#define OWL_I2C_STAT_SRGC BIT(10)
+
+/* I2Cx_CMD Bit Mask */
+#define OWL_I2C_CMD_SBE BIT(0)
+#define OWL_I2C_CMD_RBE BIT(4)
+#define OWL_I2C_CMD_DE BIT(8)
+#define OWL_I2C_CMD_NS BIT(9)
+#define OWL_I2C_CMD_SE BIT(10)
+#define OWL_I2C_CMD_MSS BIT(11)
+#define OWL_I2C_CMD_WRS BIT(12)
+#define OWL_I2C_CMD_SECL BIT(15)
+
+#define OWL_I2C_CMD_AS(x) (((x) & 0x7) << 1)
+#define OWL_I2C_CMD_SAS(x) (((x) & 0x7) << 5)
+
+/* I2Cx_FIFOCTL Bit Mask */
+#define OWL_I2C_FIFOCTL_NIB BIT(0)
+#define OWL_I2C_FIFOCTL_RFR BIT(1)
+#define OWL_I2C_FIFOCTL_TFR BIT(2)
+
+/* I2Cc_FIFOSTAT Bit Mask */
+#define OWL_I2C_FIFOSTAT_RNB BIT(1)
+#define OWL_I2C_FIFOSTAT_RFE BIT(2)
+#define OWL_I2C_FIFOSTAT_TFF BIT(5)
+#define OWL_I2C_FIFOSTAT_TFD GENMASK(23, 16)
+#define OWL_I2C_FIFOSTAT_RFD GENMASK(15, 8)
+
+/* I2C bus timeout */
+#define OWL_I2C_TIMEOUT msecs_to_jiffies(4 * 1000)
+
+#define OWL_I2C_MAX_RETRIES 50
+
+#define OWL_I2C_DEF_SPEED_HZ 100000
+#define OWL_I2C_MAX_SPEED_HZ 400000
+
+struct owl_i2c_dev {
+ struct i2c_adapter adap;
+ struct i2c_msg *msg;
+ struct completion msg_complete;
+ struct clk *clk;
+ spinlock_t lock;
+ void __iomem *base;
+ unsigned long clk_rate;
+ u32 bus_freq;
+ u32 msg_ptr;
+ int err;
+};
+
+static void owl_i2c_update_reg(void __iomem *reg, unsigned int val, bool state)
+{
+ unsigned int regval;
+
+ regval = readl(reg);
+
+ if (state)
+ regval |= val;
+ else
+ regval &= ~val;
+
+ writel(regval, reg);
+}
+
+static void owl_i2c_reset(struct owl_i2c_dev *i2c_dev)
+{
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
+ OWL_I2C_CTL_EN, false);
+ mdelay(1);
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
+ OWL_I2C_CTL_EN, true);
+
+ /* Clear status registers */
+ writel(0, i2c_dev->base + OWL_I2C_REG_STAT);
+}
+
+static int owl_i2c_reset_fifo(struct owl_i2c_dev *i2c_dev)
+{
+ unsigned int val, timeout = 0;
+
+ /* Reset FIFO */
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOCTL,
+ OWL_I2C_FIFOCTL_RFR | OWL_I2C_FIFOCTL_TFR,
+ true);
+
+ /* Wait 50ms for FIFO reset complete */
+ do {
+ val = readl(i2c_dev->base + OWL_I2C_REG_FIFOCTL);
+ if (!(val & (OWL_I2C_FIFOCTL_RFR | OWL_I2C_FIFOCTL_TFR)))
+ break;
+ usleep_range(500, 1000);
+ } while (timeout++ < OWL_I2C_MAX_RETRIES);
+
+ if (timeout > OWL_I2C_MAX_RETRIES) {
+ dev_err(&i2c_dev->adap.dev, "FIFO reset timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void owl_i2c_set_freq(struct owl_i2c_dev *i2c_dev)
+{
+ unsigned int val;
+
+ val = DIV_ROUND_UP(i2c_dev->clk_rate, i2c_dev->bus_freq * 16);
+
+ /* Set clock divider factor */
+ writel(OWL_I2C_DIV_FACTOR(val), i2c_dev->base + OWL_I2C_REG_CLKDIV);
+}
+
+static irqreturn_t owl_i2c_interrupt(int irq, void *_dev)
+{
+ struct owl_i2c_dev *i2c_dev = _dev;
+ struct i2c_msg *msg = i2c_dev->msg;
+ unsigned long flags;
+ unsigned int stat, fifostat;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+
+ i2c_dev->err = 0;
+
+ /* Handle NACK from slave */
+ fifostat = readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT);
+ if (fifostat & OWL_I2C_FIFOSTAT_RNB) {
+ i2c_dev->err = -ENXIO;
+ goto stop;
+ }
+
+ /* Handle bus error */
+ stat = readl(i2c_dev->base + OWL_I2C_REG_STAT);
+ if (stat & OWL_I2C_STAT_BEB) {
+ i2c_dev->err = -EIO;
+ goto stop;
+ }
+
+ /* Handle FIFO read */
+ if (msg->flags & I2C_M_RD) {
+ while ((readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT) &
+ OWL_I2C_FIFOSTAT_RFE) && i2c_dev->msg_ptr < msg->len) {
+ msg->buf[i2c_dev->msg_ptr++] = readl(i2c_dev->base +
+ OWL_I2C_REG_RXDAT);
+ }
+ } else {
+ /* Handle the remaining bytes which were not sent */
+ while (!(readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT) &
+ OWL_I2C_FIFOSTAT_TFF) && i2c_dev->msg_ptr < msg->len) {
+ writel(msg->buf[i2c_dev->msg_ptr++],
+ i2c_dev->base + OWL_I2C_REG_TXDAT);
+ }
+ }
+
+stop:
+ /* Clear pending interrupts */
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_STAT,
+ OWL_I2C_STAT_IRQP, true);
+
+ complete_all(&i2c_dev->msg_complete);
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static u32 owl_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static int owl_i2c_check_bus_busy(struct i2c_adapter *adap)
+{
+ struct owl_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ unsigned long timeout;
+
+ /* Check for Bus busy */
+ timeout = jiffies + OWL_I2C_TIMEOUT;
+ while (readl(i2c_dev->base + OWL_I2C_REG_STAT) & OWL_I2C_STAT_BBB) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(&adap->dev, "Bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int owl_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct owl_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ struct i2c_msg *msg;
+ unsigned long time_left, flags;
+ unsigned int i2c_cmd, val;
+ unsigned int addr;
+ int ret, idx;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+
+ /* Reset I2C controller */
+ owl_i2c_reset(i2c_dev);
+
+ /* Set bus frequency */
+ owl_i2c_set_freq(i2c_dev);
+
+ /*
+ * Spinlock should be released before calling reset FIFO and
+ * bus busy check since those functions may sleep
+ */
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+
+ /* Reset FIFO */
+ ret = owl_i2c_reset_fifo(i2c_dev);
+ if (ret)
+ goto unlocked_err_exit;
+
+ /* Check for bus busy */
+ ret = owl_i2c_check_bus_busy(adap);
+ if (ret)
+ goto unlocked_err_exit;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+
+ /* Check for Arbitration lost */
+ val = readl(i2c_dev->base + OWL_I2C_REG_STAT);
+ if (val & OWL_I2C_STAT_LAB) {
+ val &= ~OWL_I2C_STAT_LAB;
+ writel(val, i2c_dev->base + OWL_I2C_REG_STAT);
+ ret = -EAGAIN;
+ goto err_exit;
+ }
+
+ reinit_completion(&i2c_dev->msg_complete);
+
+ /* Enable I2C controller interrupt */
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
+ OWL_I2C_CTL_IRQE, true);
+
+ /*
+ * Select: FIFO enable, Master mode, Stop enable, Data count enable,
+ * Send start bit
+ */
+ i2c_cmd = OWL_I2C_CMD_SECL | OWL_I2C_CMD_MSS | OWL_I2C_CMD_SE |
+ OWL_I2C_CMD_NS | OWL_I2C_CMD_DE | OWL_I2C_CMD_SBE;
+
+ /* Handle repeated start condition */
+ if (num > 1) {
+ /* Set internal address length and enable repeated start */
+ i2c_cmd |= OWL_I2C_CMD_AS(msgs[0].len + 1) |
+ OWL_I2C_CMD_SAS(1) | OWL_I2C_CMD_RBE;
+
+ /* Write slave address */
+ addr = i2c_8bit_addr_from_msg(&msgs[0]);
+ writel(addr, i2c_dev->base + OWL_I2C_REG_TXDAT);
+
+ /* Write internal register address */
+ for (idx = 0; idx < msgs[0].len; idx++)
+ writel(msgs[0].buf[idx],
+ i2c_dev->base + OWL_I2C_REG_TXDAT);
+
+ msg = &msgs[1];
+ } else {
+ /* Set address length */
+ i2c_cmd |= OWL_I2C_CMD_AS(1);
+ msg = &msgs[0];
+ }
+
+ i2c_dev->msg = msg;
+ i2c_dev->msg_ptr = 0;
+
+ /* Set data count for the message */
+ writel(msg->len, i2c_dev->base + OWL_I2C_REG_DATCNT);
+
+ addr = i2c_8bit_addr_from_msg(msg);
+ writel(addr, i2c_dev->base + OWL_I2C_REG_TXDAT);
+
+ if (!(msg->flags & I2C_M_RD)) {
+ /* Write data to FIFO */
+ for (idx = 0; idx < msg->len; idx++) {
+ /* Check for FIFO full */
+ if (readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT) &
+ OWL_I2C_FIFOSTAT_TFF)
+ break;
+
+ writel(msg->buf[idx],
+ i2c_dev->base + OWL_I2C_REG_TXDAT);
+ }
+
+ i2c_dev->msg_ptr = idx;
+ }
+
+ /* Ignore the NACK if needed */
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOCTL,
+ OWL_I2C_FIFOCTL_NIB, true);
+ else
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOCTL,
+ OWL_I2C_FIFOCTL_NIB, false);
+
+ /* Start the transfer */
+ writel(i2c_cmd, i2c_dev->base + OWL_I2C_REG_CMD);
+
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+
+ time_left = wait_for_completion_timeout(&i2c_dev->msg_complete,
+ adap->timeout);
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+ if (time_left == 0) {
+ dev_err(&adap->dev, "Transaction timed out\n");
+ /* Send stop condition and release the bus */
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
+ OWL_I2C_CTL_GBCC_STOP | OWL_I2C_CTL_RB,
+ true);
+ ret = -ETIMEDOUT;
+ goto err_exit;
+ }
+
+ ret = i2c_dev->err < 0 ? i2c_dev->err : num;
+
+err_exit:
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+
+unlocked_err_exit:
+ /* Disable I2C controller */
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
+ OWL_I2C_CTL_EN, false);
+
+ return ret;
+}
+
+static const struct i2c_algorithm owl_i2c_algorithm = {
+ .master_xfer = owl_i2c_master_xfer,
+ .functionality = owl_i2c_func,
+};
+
+static const struct i2c_adapter_quirks owl_i2c_quirks = {
+ .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST,
+ .max_read_len = 240,
+ .max_write_len = 240,
+ .max_comb_1st_msg_len = 6,
+ .max_comb_2nd_msg_len = 240,
+};
+
+static int owl_i2c_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct owl_i2c_dev *i2c_dev;
+ struct resource *res;
+ int ret, irq;
+
+ i2c_dev = devm_kzalloc(dev, sizeof(*i2c_dev), GFP_KERNEL);
+ if (!i2c_dev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c_dev->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(i2c_dev->base))
+ return PTR_ERR(i2c_dev->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to get IRQ number\n");
+ return irq;
+ }
+
+ if (of_property_read_u32(dev->of_node, "clock-frequency",
+ &i2c_dev->bus_freq))
+ i2c_dev->bus_freq = OWL_I2C_DEF_SPEED_HZ;
+
+ /* We support only frequencies of 100k and 400k for now */
+ if (i2c_dev->bus_freq != OWL_I2C_DEF_SPEED_HZ &&
+ i2c_dev->bus_freq != OWL_I2C_MAX_SPEED_HZ) {
+ dev_err(dev, "invalid clock-frequency %d\n", i2c_dev->bus_freq);
+ return -EINVAL;
+ }
+
+ i2c_dev->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(i2c_dev->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(i2c_dev->clk);
+ }
+
+ ret = clk_prepare_enable(i2c_dev->clk);
+ if (ret)
+ return ret;
+
+ i2c_dev->clk_rate = clk_get_rate(i2c_dev->clk);
+ if (!i2c_dev->clk_rate) {
+ dev_err(dev, "input clock rate should not be zero\n");
+ ret = -EINVAL;
+ goto disable_clk;
+ }
+
+ init_completion(&i2c_dev->msg_complete);
+ spin_lock_init(&i2c_dev->lock);
+ i2c_dev->adap.owner = THIS_MODULE;
+ i2c_dev->adap.algo = &owl_i2c_algorithm;
+ i2c_dev->adap.timeout = OWL_I2C_TIMEOUT;
+ i2c_dev->adap.quirks = &owl_i2c_quirks;
+ i2c_dev->adap.dev.parent = dev;
+ i2c_dev->adap.dev.of_node = dev->of_node;
+ snprintf(i2c_dev->adap.name, sizeof(i2c_dev->adap.name),
+ "%s", "OWL I2C adapter");
+ i2c_set_adapdata(&i2c_dev->adap, i2c_dev);
+
+ platform_set_drvdata(pdev, i2c_dev);
+
+ ret = devm_request_irq(dev, irq, owl_i2c_interrupt, 0, pdev->name,
+ i2c_dev);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", irq);
+ goto disable_clk;
+ }
+
+ return i2c_add_adapter(&i2c_dev->adap);
+
+disable_clk:
+ clk_disable_unprepare(i2c_dev->clk);
+
+ return ret;
+}
+
+static const struct of_device_id owl_i2c_of_match[] = {
+ { .compatible = "actions,s900-i2c" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, owl_i2c_of_match);
+
+static struct platform_driver owl_i2c_driver = {
+ .probe = owl_i2c_probe,
+ .driver = {
+ .name = "owl-i2c",
+ .of_match_table = of_match_ptr(owl_i2c_of_match),
+ },
+};
+module_platform_driver(owl_i2c_driver);
+
+MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Actions Semiconductor Owl SoC's I2C driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index 55fd5c6f3cca..50803e5d995b 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -365,7 +365,6 @@ static int pasemi_smb_probe(struct pci_dev *dev,
smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
smbus->adapter.algo = &smbus_algorithm;
smbus->adapter.algo_data = smbus;
- smbus->adapter.nr = PCI_FUNC(dev->devfn);
/* set up the sysfs linkage to our parent device */
smbus->adapter.dev.parent = &dev->dev;
@@ -373,7 +372,7 @@ static int pasemi_smb_probe(struct pci_dev *dev,
reg_write(smbus, REG_CTL, (CTL_MTR | CTL_MRR |
(CLK_100K_DIV & CTL_CLK_M)));
- error = i2c_add_numbered_adapter(&smbus->adapter);
+ error = i2c_add_adapter(&smbus->adapter);
if (error)
goto out_release_region;
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index dae8ac618a52..0829cb696d9d 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -444,16 +444,6 @@ static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd(
{
enum pmcmsptwi_xfer_result retval;
- if ((cmd->type == MSP_TWI_CMD_WRITE && cmd->write_len == 0) ||
- (cmd->type == MSP_TWI_CMD_READ && cmd->read_len == 0) ||
- (cmd->type == MSP_TWI_CMD_WRITE_READ &&
- (cmd->read_len == 0 || cmd->write_len == 0))) {
- dev_err(&pmcmsptwi_adapter.dev,
- "%s: Cannot transfer less than 1 byte\n",
- __func__);
- return -EINVAL;
- }
-
mutex_lock(&data->lock);
dev_dbg(&pmcmsptwi_adapter.dev,
"Setting address to 0x%04x\n", cmd->addr);
@@ -532,11 +522,6 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
cmd.write_data = msg->buf;
}
- if (msg->len == 0) {
- dev_err(&adap->dev, "Zero-byte messages unsupported\n");
- return -EINVAL;
- }
-
cmd.addr = msg->addr;
if (msg->flags & I2C_M_TEN) {
@@ -578,7 +563,7 @@ static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)
}
static const struct i2c_adapter_quirks pmcmsptwi_i2c_quirks = {
- .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ | I2C_AQ_NO_ZERO_LEN,
.max_write_len = MSP_MAX_BYTES_PER_RW,
.max_read_len = MSP_MAX_BYTES_PER_RW,
.max_comb_1st_msg_len = MSP_MAX_BYTES_PER_RW,
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
new file mode 100644
index 000000000000..36732eb688a4
--- /dev/null
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/spinlock.h>
+
+#define SE_I2C_TX_TRANS_LEN 0x26c
+#define SE_I2C_RX_TRANS_LEN 0x270
+#define SE_I2C_SCL_COUNTERS 0x278
+
+#define SE_I2C_ERR (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |\
+ M_GP_IRQ_1_EN | M_GP_IRQ_3_EN | M_GP_IRQ_4_EN)
+#define SE_I2C_ABORT BIT(1)
+
+/* M_CMD OP codes for I2C */
+#define I2C_WRITE 0x1
+#define I2C_READ 0x2
+#define I2C_WRITE_READ 0x3
+#define I2C_ADDR_ONLY 0x4
+#define I2C_BUS_CLEAR 0x6
+#define I2C_STOP_ON_BUS 0x7
+/* M_CMD params for I2C */
+#define PRE_CMD_DELAY BIT(0)
+#define TIMESTAMP_BEFORE BIT(1)
+#define STOP_STRETCH BIT(2)
+#define TIMESTAMP_AFTER BIT(3)
+#define POST_COMMAND_DELAY BIT(4)
+#define IGNORE_ADD_NACK BIT(6)
+#define READ_FINISHED_WITH_ACK BIT(7)
+#define BYPASS_ADDR_PHASE BIT(8)
+#define SLV_ADDR_MSK GENMASK(15, 9)
+#define SLV_ADDR_SHFT 9
+/* I2C SCL COUNTER fields */
+#define HIGH_COUNTER_MSK GENMASK(29, 20)
+#define HIGH_COUNTER_SHFT 20
+#define LOW_COUNTER_MSK GENMASK(19, 10)
+#define LOW_COUNTER_SHFT 10
+#define CYCLE_COUNTER_MSK GENMASK(9, 0)
+
+enum geni_i2c_err_code {
+ GP_IRQ0,
+ NACK,
+ GP_IRQ2,
+ BUS_PROTO,
+ ARB_LOST,
+ GP_IRQ5,
+ GENI_OVERRUN,
+ GENI_ILLEGAL_CMD,
+ GENI_ABORT_DONE,
+ GENI_TIMEOUT,
+};
+
+#define DM_I2C_CB_ERR ((BIT(NACK) | BIT(BUS_PROTO) | BIT(ARB_LOST)) \
+ << 5)
+
+#define I2C_AUTO_SUSPEND_DELAY 250
+#define KHZ(freq) (1000 * freq)
+#define PACKING_BYTES_PW 4
+
+#define ABORT_TIMEOUT HZ
+#define XFER_TIMEOUT HZ
+#define RST_TIMEOUT HZ
+
+struct geni_i2c_dev {
+ struct geni_se se;
+ u32 tx_wm;
+ int irq;
+ int err;
+ struct i2c_adapter adap;
+ struct completion done;
+ struct i2c_msg *cur;
+ int cur_wr;
+ int cur_rd;
+ spinlock_t lock;
+ u32 clk_freq_out;
+ const struct geni_i2c_clk_fld *clk_fld;
+ int suspended;
+};
+
+struct geni_i2c_err_log {
+ int err;
+ const char *msg;
+};
+
+static const struct geni_i2c_err_log gi2c_log[] = {
+ [GP_IRQ0] = {-EIO, "Unknown I2C err GP_IRQ0"},
+ [NACK] = {-ENXIO, "NACK: slv unresponsive, check its power/reset-ln"},
+ [GP_IRQ2] = {-EIO, "Unknown I2C err GP IRQ2"},
+ [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unepxected start/stop"},
+ [ARB_LOST] = {-EAGAIN, "Bus arbitration lost, clock line undriveable"},
+ [GP_IRQ5] = {-EIO, "Unknown I2C err GP IRQ5"},
+ [GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"},
+ [GENI_ILLEGAL_CMD] = {-EIO, "Illegal cmd, check GENI cmd-state machine"},
+ [GENI_ABORT_DONE] = {-ETIMEDOUT, "Abort after timeout successful"},
+ [GENI_TIMEOUT] = {-ETIMEDOUT, "I2C TXN timed out"},
+};
+
+struct geni_i2c_clk_fld {
+ u32 clk_freq_out;
+ u8 clk_div;
+ u8 t_high_cnt;
+ u8 t_low_cnt;
+ u8 t_cycle_cnt;
+};
+
+/*
+ * Hardware uses the underlying formula to calculate time periods of
+ * SCL clock cycle. Firmware uses some additional cycles excluded from the
+ * below formula and it is confirmed that the time periods are within
+ * specification limits.
+ *
+ * time of high period of SCL: t_high = (t_high_cnt * clk_div) / source_clock
+ * time of low period of SCL: t_low = (t_low_cnt * clk_div) / source_clock
+ * time of full period of SCL: t_cycle = (t_cycle_cnt * clk_div) / source_clock
+ * clk_freq_out = t / t_cycle
+ * source_clock = 19.2 MHz
+ */
+static const struct geni_i2c_clk_fld geni_i2c_clk_map[] = {
+ {KHZ(100), 7, 10, 11, 26},
+ {KHZ(400), 2, 5, 12, 24},
+ {KHZ(1000), 1, 3, 9, 18},
+};
+
+static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
+{
+ int i;
+ const struct geni_i2c_clk_fld *itr = geni_i2c_clk_map;
+
+ for (i = 0; i < ARRAY_SIZE(geni_i2c_clk_map); i++, itr++) {
+ if (itr->clk_freq_out == gi2c->clk_freq_out) {
+ gi2c->clk_fld = itr;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static void qcom_geni_i2c_conf(struct geni_i2c_dev *gi2c)
+{
+ const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
+ u32 val;
+
+ writel_relaxed(0, gi2c->se.base + SE_GENI_CLK_SEL);
+
+ val = (itr->clk_div << CLK_DIV_SHFT) | SER_CLK_EN;
+ writel_relaxed(val, gi2c->se.base + GENI_SER_M_CLK_CFG);
+
+ val = itr->t_high_cnt << HIGH_COUNTER_SHFT;
+ val |= itr->t_low_cnt << LOW_COUNTER_SHFT;
+ val |= itr->t_cycle_cnt;
+ writel_relaxed(val, gi2c->se.base + SE_I2C_SCL_COUNTERS);
+}
+
+static void geni_i2c_err_misc(struct geni_i2c_dev *gi2c)
+{
+ u32 m_cmd = readl_relaxed(gi2c->se.base + SE_GENI_M_CMD0);
+ u32 m_stat = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS);
+ u32 geni_s = readl_relaxed(gi2c->se.base + SE_GENI_STATUS);
+ u32 geni_ios = readl_relaxed(gi2c->se.base + SE_GENI_IOS);
+ u32 dma = readl_relaxed(gi2c->se.base + SE_GENI_DMA_MODE_EN);
+ u32 rx_st, tx_st;
+
+ if (dma) {
+ rx_st = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
+ tx_st = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
+ } else {
+ rx_st = readl_relaxed(gi2c->se.base + SE_GENI_RX_FIFO_STATUS);
+ tx_st = readl_relaxed(gi2c->se.base + SE_GENI_TX_FIFO_STATUS);
+ }
+ dev_dbg(gi2c->se.dev, "DMA:%d tx_stat:0x%x, rx_stat:0x%x, irq-stat:0x%x\n",
+ dma, tx_st, rx_st, m_stat);
+ dev_dbg(gi2c->se.dev, "m_cmd:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
+ m_cmd, geni_s, geni_ios);
+}
+
+static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
+{
+ if (!gi2c->err)
+ gi2c->err = gi2c_log[err].err;
+ if (gi2c->cur)
+ dev_dbg(gi2c->se.dev, "len:%d, slv-addr:0x%x, RD/WR:%d\n",
+ gi2c->cur->len, gi2c->cur->addr, gi2c->cur->flags);
+
+ if (err != NACK && err != GENI_ABORT_DONE) {
+ dev_err(gi2c->se.dev, "%s\n", gi2c_log[err].msg);
+ geni_i2c_err_misc(gi2c);
+ }
+}
+
+static irqreturn_t geni_i2c_irq(int irq, void *dev)
+{
+ struct geni_i2c_dev *gi2c = dev;
+ int j;
+ u32 m_stat;
+ u32 rx_st;
+ u32 dm_tx_st;
+ u32 dm_rx_st;
+ u32 dma;
+ struct i2c_msg *cur;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gi2c->lock, flags);
+ m_stat = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS);
+ rx_st = readl_relaxed(gi2c->se.base + SE_GENI_RX_FIFO_STATUS);
+ dm_tx_st = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
+ dm_rx_st = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
+ dma = readl_relaxed(gi2c->se.base + SE_GENI_DMA_MODE_EN);
+ cur = gi2c->cur;
+
+ if (!cur ||
+ m_stat & (M_CMD_FAILURE_EN | M_CMD_ABORT_EN) ||
+ dm_rx_st & (DM_I2C_CB_ERR)) {
+ if (m_stat & M_GP_IRQ_1_EN)
+ geni_i2c_err(gi2c, NACK);
+ if (m_stat & M_GP_IRQ_3_EN)
+ geni_i2c_err(gi2c, BUS_PROTO);
+ if (m_stat & M_GP_IRQ_4_EN)
+ geni_i2c_err(gi2c, ARB_LOST);
+ if (m_stat & M_CMD_OVERRUN_EN)
+ geni_i2c_err(gi2c, GENI_OVERRUN);
+ if (m_stat & M_ILLEGAL_CMD_EN)
+ geni_i2c_err(gi2c, GENI_ILLEGAL_CMD);
+ if (m_stat & M_CMD_ABORT_EN)
+ geni_i2c_err(gi2c, GENI_ABORT_DONE);
+ if (m_stat & M_GP_IRQ_0_EN)
+ geni_i2c_err(gi2c, GP_IRQ0);
+
+ /* Disable the TX Watermark interrupt to stop TX */
+ if (!dma)
+ writel_relaxed(0, gi2c->se.base +
+ SE_GENI_TX_WATERMARK_REG);
+ goto irqret;
+ }
+
+ if (dma) {
+ dev_dbg(gi2c->se.dev, "i2c dma tx:0x%x, dma rx:0x%x\n",
+ dm_tx_st, dm_rx_st);
+ goto irqret;
+ }
+
+ if (cur->flags & I2C_M_RD &&
+ m_stat & (M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN)) {
+ u32 rxcnt = rx_st & RX_FIFO_WC_MSK;
+
+ for (j = 0; j < rxcnt; j++) {
+ u32 val;
+ int p = 0;
+
+ val = readl_relaxed(gi2c->se.base + SE_GENI_RX_FIFOn);
+ while (gi2c->cur_rd < cur->len && p < sizeof(val)) {
+ cur->buf[gi2c->cur_rd++] = val & 0xff;
+ val >>= 8;
+ p++;
+ }
+ if (gi2c->cur_rd == cur->len)
+ break;
+ }
+ } else if (!(cur->flags & I2C_M_RD) &&
+ m_stat & M_TX_FIFO_WATERMARK_EN) {
+ for (j = 0; j < gi2c->tx_wm; j++) {
+ u32 temp;
+ u32 val = 0;
+ int p = 0;
+
+ while (gi2c->cur_wr < cur->len && p < sizeof(val)) {
+ temp = cur->buf[gi2c->cur_wr++];
+ val |= temp << (p * 8);
+ p++;
+ }
+ writel_relaxed(val, gi2c->se.base + SE_GENI_TX_FIFOn);
+ /* TX Complete, Disable the TX Watermark interrupt */
+ if (gi2c->cur_wr == cur->len) {
+ writel_relaxed(0, gi2c->se.base +
+ SE_GENI_TX_WATERMARK_REG);
+ break;
+ }
+ }
+ }
+irqret:
+ if (m_stat)
+ writel_relaxed(m_stat, gi2c->se.base + SE_GENI_M_IRQ_CLEAR);
+
+ if (dma) {
+ if (dm_tx_st)
+ writel_relaxed(dm_tx_st, gi2c->se.base +
+ SE_DMA_TX_IRQ_CLR);
+ if (dm_rx_st)
+ writel_relaxed(dm_rx_st, gi2c->se.base +
+ SE_DMA_RX_IRQ_CLR);
+ }
+ /* if this is err with done-bit not set, handle that through timeout. */
+ if (m_stat & M_CMD_DONE_EN || m_stat & M_CMD_ABORT_EN)
+ complete(&gi2c->done);
+ else if (dm_tx_st & TX_DMA_DONE || dm_tx_st & TX_RESET_DONE)
+ complete(&gi2c->done);
+ else if (dm_rx_st & RX_DMA_DONE || dm_rx_st & RX_RESET_DONE)
+ complete(&gi2c->done);
+
+ spin_unlock_irqrestore(&gi2c->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void geni_i2c_abort_xfer(struct geni_i2c_dev *gi2c)
+{
+ u32 val;
+ unsigned long time_left = ABORT_TIMEOUT;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gi2c->lock, flags);
+ geni_i2c_err(gi2c, GENI_TIMEOUT);
+ gi2c->cur = NULL;
+ geni_se_abort_m_cmd(&gi2c->se);
+ spin_unlock_irqrestore(&gi2c->lock, flags);
+ do {
+ time_left = wait_for_completion_timeout(&gi2c->done, time_left);
+ val = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS);
+ } while (!(val & M_CMD_ABORT_EN) && time_left);
+
+ if (!(val & M_CMD_ABORT_EN))
+ dev_err(gi2c->se.dev, "Timeout abort_m_cmd\n");
+}
+
+static void geni_i2c_rx_fsm_rst(struct geni_i2c_dev *gi2c)
+{
+ u32 val;
+ unsigned long time_left = RST_TIMEOUT;
+
+ writel_relaxed(1, gi2c->se.base + SE_DMA_RX_FSM_RST);
+ do {
+ time_left = wait_for_completion_timeout(&gi2c->done, time_left);
+ val = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
+ } while (!(val & RX_RESET_DONE) && time_left);
+
+ if (!(val & RX_RESET_DONE))
+ dev_err(gi2c->se.dev, "Timeout resetting RX_FSM\n");
+}
+
+static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c)
+{
+ u32 val;
+ unsigned long time_left = RST_TIMEOUT;
+
+ writel_relaxed(1, gi2c->se.base + SE_DMA_TX_FSM_RST);
+ do {
+ time_left = wait_for_completion_timeout(&gi2c->done, time_left);
+ val = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
+ } while (!(val & TX_RESET_DONE) && time_left);
+
+ if (!(val & TX_RESET_DONE))
+ dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n");
+}
+
+static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ u32 m_param)
+{
+ dma_addr_t rx_dma;
+ enum geni_se_xfer_mode mode;
+ unsigned long time_left = XFER_TIMEOUT;
+
+ gi2c->cur = msg;
+ mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+ geni_se_select_mode(&gi2c->se, mode);
+ writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN);
+ geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param);
+ if (mode == GENI_SE_DMA) {
+ int ret;
+
+ ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len,
+ &rx_dma);
+ if (ret) {
+ mode = GENI_SE_FIFO;
+ geni_se_select_mode(&gi2c->se, mode);
+ }
+ }
+
+ time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ if (!time_left)
+ geni_i2c_abort_xfer(gi2c);
+
+ gi2c->cur_rd = 0;
+ if (mode == GENI_SE_DMA) {
+ if (gi2c->err)
+ geni_i2c_rx_fsm_rst(gi2c);
+ geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len);
+ }
+ return gi2c->err;
+}
+
+static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ u32 m_param)
+{
+ dma_addr_t tx_dma;
+ enum geni_se_xfer_mode mode;
+ unsigned long time_left;
+
+ gi2c->cur = msg;
+ mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+ geni_se_select_mode(&gi2c->se, mode);
+ writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN);
+ geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param);
+ if (mode == GENI_SE_DMA) {
+ int ret;
+
+ ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len,
+ &tx_dma);
+ if (ret) {
+ mode = GENI_SE_FIFO;
+ geni_se_select_mode(&gi2c->se, mode);
+ }
+ }
+
+ if (mode == GENI_SE_FIFO) /* Get FIFO IRQ */
+ writel_relaxed(1, gi2c->se.base + SE_GENI_TX_WATERMARK_REG);
+
+ time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ if (!time_left)
+ geni_i2c_abort_xfer(gi2c);
+
+ gi2c->cur_wr = 0;
+ if (mode == GENI_SE_DMA) {
+ if (gi2c->err)
+ geni_i2c_tx_fsm_rst(gi2c);
+ geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len);
+ }
+ return gi2c->err;
+}
+
+static int geni_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msgs[],
+ int num)
+{
+ struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
+ int i, ret;
+
+ gi2c->err = 0;
+ reinit_completion(&gi2c->done);
+ ret = pm_runtime_get_sync(gi2c->se.dev);
+ if (ret < 0) {
+ dev_err(gi2c->se.dev, "error turning SE resources:%d\n", ret);
+ pm_runtime_put_noidle(gi2c->se.dev);
+ /* Set device in suspended since resume failed */
+ pm_runtime_set_suspended(gi2c->se.dev);
+ return ret;
+ }
+
+ qcom_geni_i2c_conf(gi2c);
+ for (i = 0; i < num; i++) {
+ u32 m_param = i < (num - 1) ? STOP_STRETCH : 0;
+
+ m_param |= ((msgs[i].addr << SLV_ADDR_SHFT) & SLV_ADDR_MSK);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = geni_i2c_rx_one_msg(gi2c, &msgs[i], m_param);
+ else
+ ret = geni_i2c_tx_one_msg(gi2c, &msgs[i], m_param);
+
+ if (ret)
+ break;
+ }
+ if (ret == 0)
+ ret = num;
+
+ pm_runtime_mark_last_busy(gi2c->se.dev);
+ pm_runtime_put_autosuspend(gi2c->se.dev);
+ gi2c->cur = NULL;
+ gi2c->err = 0;
+ return ret;
+}
+
+static u32 geni_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_algorithm geni_i2c_algo = {
+ .master_xfer = geni_i2c_xfer,
+ .functionality = geni_i2c_func,
+};
+
+static int geni_i2c_probe(struct platform_device *pdev)
+{
+ struct geni_i2c_dev *gi2c;
+ struct resource *res;
+ u32 proto, tx_depth;
+ int ret;
+
+ gi2c = devm_kzalloc(&pdev->dev, sizeof(*gi2c), GFP_KERNEL);
+ if (!gi2c)
+ return -ENOMEM;
+
+ gi2c->se.dev = &pdev->dev;
+ gi2c->se.wrapper = dev_get_drvdata(pdev->dev.parent);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gi2c->se.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gi2c->se.base))
+ return PTR_ERR(gi2c->se.base);
+
+ gi2c->se.clk = devm_clk_get(&pdev->dev, "se");
+ if (IS_ERR(gi2c->se.clk)) {
+ ret = PTR_ERR(gi2c->se.clk);
+ dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
+ return ret;
+ }
+
+ ret = device_property_read_u32(&pdev->dev, "clock-frequency",
+ &gi2c->clk_freq_out);
+ if (ret) {
+ dev_info(&pdev->dev,
+ "Bus frequency not specified, default to 100kHz.\n");
+ gi2c->clk_freq_out = KHZ(100);
+ }
+
+ gi2c->irq = platform_get_irq(pdev, 0);
+ if (gi2c->irq < 0) {
+ dev_err(&pdev->dev, "IRQ error for i2c-geni\n");
+ return gi2c->irq;
+ }
+
+ ret = geni_i2c_clk_map_idx(gi2c);
+ if (ret) {
+ dev_err(&pdev->dev, "Invalid clk frequency %d Hz: %d\n",
+ gi2c->clk_freq_out, ret);
+ return ret;
+ }
+
+ gi2c->adap.algo = &geni_i2c_algo;
+ init_completion(&gi2c->done);
+ spin_lock_init(&gi2c->lock);
+ platform_set_drvdata(pdev, gi2c);
+ ret = devm_request_irq(&pdev->dev, gi2c->irq, geni_i2c_irq,
+ IRQF_TRIGGER_HIGH, "i2c_geni", gi2c);
+ if (ret) {
+ dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
+ gi2c->irq, ret);
+ return ret;
+ }
+ /* Disable the interrupt so that the system can enter low-power mode */
+ disable_irq(gi2c->irq);
+ i2c_set_adapdata(&gi2c->adap, gi2c);
+ gi2c->adap.dev.parent = &pdev->dev;
+ gi2c->adap.dev.of_node = pdev->dev.of_node;
+ strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
+
+ ret = geni_se_resources_on(&gi2c->se);
+ if (ret) {
+ dev_err(&pdev->dev, "Error turning on resources %d\n", ret);
+ return ret;
+ }
+ proto = geni_se_read_proto(&gi2c->se);
+ tx_depth = geni_se_get_tx_fifo_depth(&gi2c->se);
+ if (proto != GENI_SE_I2C) {
+ dev_err(&pdev->dev, "Invalid proto %d\n", proto);
+ geni_se_resources_off(&gi2c->se);
+ return -ENXIO;
+ }
+ gi2c->tx_wm = tx_depth - 1;
+ geni_se_init(&gi2c->se, gi2c->tx_wm, tx_depth);
+ geni_se_config_packing(&gi2c->se, BITS_PER_BYTE, PACKING_BYTES_PW,
+ true, true, true);
+ ret = geni_se_resources_off(&gi2c->se);
+ if (ret) {
+ dev_err(&pdev->dev, "Error turning off resources %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
+
+ ret = i2c_add_adapter(&gi2c->adap);
+ if (ret) {
+ dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
+ return ret;
+ }
+
+ gi2c->suspended = 1;
+ pm_runtime_set_suspended(gi2c->se.dev);
+ pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(gi2c->se.dev);
+ pm_runtime_enable(gi2c->se.dev);
+
+ return 0;
+}
+
+static int geni_i2c_remove(struct platform_device *pdev)
+{
+ struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(gi2c->se.dev);
+ i2c_del_adapter(&gi2c->adap);
+ return 0;
+}
+
+static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
+{
+ int ret;
+ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+ disable_irq(gi2c->irq);
+ ret = geni_se_resources_off(&gi2c->se);
+ if (ret) {
+ enable_irq(gi2c->irq);
+ return ret;
+
+ } else {
+ gi2c->suspended = 1;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
+{
+ int ret;
+ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+ ret = geni_se_resources_on(&gi2c->se);
+ if (ret)
+ return ret;
+
+ enable_irq(gi2c->irq);
+ gi2c->suspended = 0;
+ return 0;
+}
+
+static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
+{
+ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+ if (!gi2c->suspended) {
+ geni_i2c_runtime_suspend(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops geni_i2c_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL)
+ SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id geni_i2c_dt_match[] = {
+ { .compatible = "qcom,geni-i2c" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
+
+static struct platform_driver geni_i2c_driver = {
+ .probe = geni_i2c_probe,
+ .remove = geni_i2c_remove,
+ .driver = {
+ .name = "geni_i2c",
+ .pm = &geni_i2c_pm_ops,
+ .of_match_table = geni_i2c_dt_match,
+ },
+};
+
+module_platform_driver(geni_i2c_driver);
+
+MODULE_DESCRIPTION("I2C Controller Driver for GENI based QUP cores");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 5e310efd9446..52cf42b32f0a 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Renesas R-Car I2C unit
*
@@ -9,16 +10,8 @@
*
* This file is based on the drivers/i2c/busses/i2c-sh7760.c
* (c) 2005-2008 MSC Vertriebsges.m.b.H, Manuel Lauss <mlau@msc-ge.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
@@ -32,6 +25,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
/* register offsets */
@@ -111,8 +105,10 @@
#define ID_ARBLOST (1 << 3)
#define ID_NACK (1 << 4)
/* persistent flags */
-#define ID_P_PM_BLOCKED (1 << 31)
-#define ID_P_MASK ID_P_PM_BLOCKED
+#define ID_P_REP_AFTER_RD BIT(29)
+#define ID_P_NO_RXDMA BIT(30) /* HW forbids RXDMA sometimes */
+#define ID_P_PM_BLOCKED BIT(31)
+#define ID_P_MASK GENMASK(31, 29)
enum rcar_i2c_type {
I2C_RCAR_GEN1,
@@ -141,6 +137,8 @@ struct rcar_i2c_priv {
struct dma_chan *dma_rx;
struct scatterlist sg;
enum dma_data_direction dma_direction;
+
+ struct reset_control *rstc;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -179,8 +177,6 @@ static void rcar_i2c_set_scl(struct i2c_adapter *adap, int val)
rcar_i2c_write(priv, ICMCR, priv->recovery_icmcr);
};
-/* No get_sda, because the HW only reports its bus free logic, not SDA itself */
-
static void rcar_i2c_set_sda(struct i2c_adapter *adap, int val)
{
struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
@@ -193,10 +189,19 @@ static void rcar_i2c_set_sda(struct i2c_adapter *adap, int val)
rcar_i2c_write(priv, ICMCR, priv->recovery_icmcr);
};
+static int rcar_i2c_get_bus_free(struct i2c_adapter *adap)
+{
+ struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
+
+ return !(rcar_i2c_read(priv, ICMCR) & FSDA);
+
+};
+
static struct i2c_bus_recovery_info rcar_i2c_bri = {
.get_scl = rcar_i2c_get_scl,
.set_scl = rcar_i2c_set_scl,
.set_sda = rcar_i2c_set_sda,
+ .get_bus_free = rcar_i2c_get_bus_free,
.recover_bus = i2c_generic_scl_recovery,
};
static void rcar_i2c_init(struct rcar_i2c_priv *priv)
@@ -211,7 +216,7 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
{
- int i, ret;
+ int i;
for (i = 0; i < LOOP_TIMEOUT; i++) {
/* make sure that bus is not busy */
@@ -222,13 +227,7 @@ static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
/* Waiting did not help, try to recover */
priv->recovery_icmcr = MDBS | OBPC | FSDA | FSCL;
- ret = i2c_recover_bus(&priv->adap);
-
- /* No failure when recovering, so check bus busy bit again */
- if (ret == 0)
- ret = (rcar_i2c_read(priv, ICMCR) & FSDA) ? -EBUSY : 0;
-
- return ret;
+ return i2c_recover_bus(&priv->adap);
}
static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timings *t)
@@ -339,7 +338,10 @@ static void rcar_i2c_prepare_msg(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICMSR, 0);
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
} else {
- rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
+ if (priv->flags & ID_P_REP_AFTER_RD)
+ priv->flags &= ~ID_P_REP_AFTER_RD;
+ else
+ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
rcar_i2c_write(priv, ICMSR, 0);
}
rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND);
@@ -370,6 +372,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
sg_dma_len(&priv->sg), priv->dma_direction);
+ /* Gen3 can only do one RXDMA per transfer and we just completed it */
+ if (priv->devtype == I2C_RCAR_GEN3 &&
+ priv->dma_direction == DMA_FROM_DEVICE)
+ priv->flags |= ID_P_NO_RXDMA;
+
priv->dma_direction = DMA_NONE;
}
@@ -407,8 +414,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
unsigned char *buf;
int len;
- /* Do not use DMA if it's not available or for messages < 8 bytes */
- if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE))
+ /* Do various checks to see if DMA is feasible at all */
+ if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
+ (read && priv->flags & ID_P_NO_RXDMA))
return;
if (read) {
@@ -538,15 +546,15 @@ static void rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
priv->pos++;
}
- /*
- * If next received data is the _LAST_, go to STOP phase. Might be
- * overwritten by REP START when setting up a new msg. Not elegant
- * but the only stable sequence for REP START I have found so far.
- * If you want to change this code, make sure sending one transfer with
- * four messages (WR-RD-WR-RD) works!
- */
- if (priv->pos + 1 >= msg->len)
- rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
+ /* If next received data is the _LAST_, go to new phase. */
+ if (priv->pos + 1 == msg->len) {
+ if (priv->flags & ID_LAST_MSG) {
+ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
+ } else {
+ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
+ priv->flags |= ID_P_REP_AFTER_RD;
+ }
+ }
if (priv->pos == msg->len && !(priv->flags & ID_LAST_MSG))
rcar_i2c_next_msg(priv);
@@ -614,9 +622,11 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
struct rcar_i2c_priv *priv = ptr;
u32 msr, val;
- /* Clear START or STOP as soon as we can */
- val = rcar_i2c_read(priv, ICMCR);
- rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA);
+ /* Clear START or STOP immediately, except for REPSTART after read */
+ if (likely(!(priv->flags & ID_P_REP_AFTER_RD))) {
+ val = rcar_i2c_read(priv, ICMCR);
+ rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA);
+ }
msr = rcar_i2c_read(priv, ICMSR);
@@ -739,6 +749,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
}
}
+/* I2C is a special case, we need to poll the status of a reset */
+static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
+{
+ int i, ret;
+
+ ret = reset_control_reset(priv->rstc);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < LOOP_TIMEOUT; i++) {
+ ret = reset_control_status(priv->rstc);
+ if (ret == 0)
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs,
int num)
@@ -750,20 +779,24 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
pm_runtime_get_sync(dev);
+ /* Gen3 needs a reset before allowing RXDMA once */
+ if (priv->devtype == I2C_RCAR_GEN3) {
+ priv->flags |= ID_P_NO_RXDMA;
+ if (!IS_ERR(priv->rstc)) {
+ ret = rcar_i2c_do_reset(priv);
+ if (ret == 0)
+ priv->flags &= ~ID_P_NO_RXDMA;
+ }
+ }
+
rcar_i2c_init(priv);
ret = rcar_i2c_bus_barrier(priv);
if (ret < 0)
goto out;
- for (i = 0; i < num; i++) {
- /* This HW can't send STOP after address phase */
- if (msgs[i].len == 0) {
- ret = -EOPNOTSUPP;
- goto out;
- }
+ for (i = 0; i < num; i++)
rcar_i2c_request_dma(priv, msgs + i);
- }
/* init first message */
priv->msg = msgs;
@@ -850,6 +883,10 @@ static const struct i2c_algorithm rcar_i2c_algo = {
.unreg_slave = rcar_unreg_slave,
};
+static const struct i2c_adapter_quirks rcar_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static const struct of_device_id rcar_i2c_dt_ids[] = {
{ .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_GEN1 },
@@ -903,6 +940,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
adap->dev.parent = dev;
adap->dev.of_node = dev->of_node;
adap->bus_recovery_info = &rcar_i2c_bri;
+ adap->quirks = &rcar_i2c_quirks;
i2c_set_adapdata(adap, priv);
strlcpy(adap->name, pdev->name, sizeof(adap->name));
@@ -920,6 +958,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
+ if (priv->devtype == I2C_RCAR_GEN3) {
+ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (!IS_ERR(priv->rstc)) {
+ ret = reset_control_status(priv->rstc);
+ if (ret < 0)
+ priv->rstc = ERR_PTR(-ENOTSUPP);
+ }
+ }
+
/* Stay always active when multi-master to keep arbitration working */
if (of_property_read_bool(dev->of_node, "multi-master"))
priv->flags |= ID_P_PM_BLOCKED;
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 5f1fca7880b1..b75ff144b570 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RIIC driver
*
* Copyright (C) 2013 Wolfram Sang <wsa@sang-engineering.com>
* Copyright (C) 2013 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
/*
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 9fe2b6951895..2f2e28d60ef5 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -919,9 +919,9 @@ static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
if ((val == CPUFREQ_POSTCHANGE && delta_f < 0) ||
(val == CPUFREQ_PRECHANGE && delta_f > 0)) {
- i2c_lock_adapter(&i2c->adap);
+ i2c_lock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER);
ret = s3c24xx_i2c_clockrate(i2c, &got);
- i2c_unlock_adapter(&i2c->adap);
+ i2c_unlock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER);
if (ret < 0)
dev_err(i2c->dev, "cannot find frequency (%d)\n", ret);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 5fda4188a9e5..439e8778f849 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Mobile I2C Controller
*
@@ -7,15 +8,6 @@
*
* Portions of the code based on out-of-tree driver i2c-sh7343.c
* Copyright (c) 2006 Carlos Munoz <carlos@kenati.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -613,11 +605,6 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
bool do_init)
{
- if (usr_msg->len == 0 && (usr_msg->flags & I2C_M_RD)) {
- dev_err(pd->dev, "Unsupported zero length i2c read\n");
- return -EOPNOTSUPP;
- }
-
if (do_init) {
/* Initialize channel registers */
iic_wr(pd, ICCR, ICCR_SCP);
@@ -758,6 +745,10 @@ static const struct i2c_algorithm sh_mobile_i2c_algorithm = {
.master_xfer = sh_mobile_i2c_xfer,
};
+static const struct i2c_adapter_quirks sh_mobile_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN_READ,
+};
+
/*
* r8a7740 chip has lasting errata on I2C I/O pad reset.
* this is work-around for it.
@@ -925,6 +916,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
adap->owner = THIS_MODULE;
adap->algo = &sh_mobile_i2c_algorithm;
+ adap->quirks = &sh_mobile_i2c_quirks;
adap->dev.parent = &dev->dev;
adap->retries = 5;
adap->nr = dev->id;
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 4053259bccb8..a94e724f51dc 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -590,9 +590,9 @@ static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev)
{
struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
- i2c_lock_adapter(&i2c_dev->adap);
+ i2c_lock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
i2c_dev->is_suspended = true;
- i2c_unlock_adapter(&i2c_dev->adap);
+ i2c_unlock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
return pm_runtime_force_suspend(pdev);
}
@@ -601,9 +601,9 @@ static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev)
{
struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
- i2c_lock_adapter(&i2c_dev->adap);
+ i2c_lock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
i2c_dev->is_suspended = false;
- i2c_unlock_adapter(&i2c_dev->adap);
+ i2c_unlock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
return pm_runtime_force_resume(pdev);
}
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index e866c481bfc3..5503fa171df0 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -127,7 +127,7 @@ enum stu300_error {
/*
* The number of address send athemps tried before giving up.
- * If the first one failes it seems like 5 to 8 attempts are required.
+ * If the first one fails it seems like 5 to 8 attempts are required.
*/
#define NUM_ADDR_RESEND_ATTEMPTS 12
@@ -673,12 +673,6 @@ static int stu300_xfer_msg(struct i2c_adapter *adap,
msg->addr, msg->len, msg->flags, stop);
}
- /* Zero-length messages are not supported by this hardware */
- if (msg->len == 0) {
- ret = -EINVAL;
- goto exit_disable;
- }
-
/*
* For some reason, sending the address sometimes fails when running
* on the 13 MHz clock. No interrupt arrives. This is a work around,
@@ -863,6 +857,10 @@ static const struct i2c_algorithm stu300_algo = {
.functionality = stu300_func,
};
+static const struct i2c_adapter_quirks stu300_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static int stu300_probe(struct platform_device *pdev)
{
struct stu300_dev *dev;
@@ -920,6 +918,8 @@ static int stu300_probe(struct platform_device *pdev)
adap->algo = &stu300_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
+ adap->quirks = &stu300_quirks;
+
i2c_set_adapdata(adap, dev);
/* i2c device drivers may be active on return from add_adapter() */
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 5fccd1f1bca8..60c8561fbe65 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -115,6 +115,18 @@
#define I2C_CONFIG_LOAD_TIMEOUT 1000000
+#define I2C_MST_FIFO_CONTROL 0x0b4
+#define I2C_MST_FIFO_CONTROL_RX_FLUSH BIT(0)
+#define I2C_MST_FIFO_CONTROL_TX_FLUSH BIT(1)
+#define I2C_MST_FIFO_CONTROL_RX_TRIG(x) (((x) - 1) << 4)
+#define I2C_MST_FIFO_CONTROL_TX_TRIG(x) (((x) - 1) << 16)
+
+#define I2C_MST_FIFO_STATUS 0x0b8
+#define I2C_MST_FIFO_STATUS_RX_MASK 0xff
+#define I2C_MST_FIFO_STATUS_RX_SHIFT 0
+#define I2C_MST_FIFO_STATUS_TX_MASK 0xff0000
+#define I2C_MST_FIFO_STATUS_TX_SHIFT 16
+
/*
* msg_end_type: The bus control which need to be send at end of transfer.
* @MSG_END_STOP: Send stop pulse at end of transfer.
@@ -154,6 +166,7 @@ struct tegra_i2c_hw_feature {
u16 clk_divisor_fast_plus_mode;
bool has_multi_master_mode;
bool has_slcg_override_reg;
+ bool has_mst_fifo;
};
/**
@@ -266,13 +279,24 @@ static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev)
{
unsigned long timeout = jiffies + HZ;
- u32 val = i2c_readl(i2c_dev, I2C_FIFO_CONTROL);
+ unsigned int offset;
+ u32 mask, val;
- val |= I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH;
- i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+ if (i2c_dev->hw->has_mst_fifo) {
+ mask = I2C_MST_FIFO_CONTROL_TX_FLUSH |
+ I2C_MST_FIFO_CONTROL_RX_FLUSH;
+ offset = I2C_MST_FIFO_CONTROL;
+ } else {
+ mask = I2C_FIFO_CONTROL_TX_FLUSH |
+ I2C_FIFO_CONTROL_RX_FLUSH;
+ offset = I2C_FIFO_CONTROL;
+ }
- while (i2c_readl(i2c_dev, I2C_FIFO_CONTROL) &
- (I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH)) {
+ val = i2c_readl(i2c_dev, offset);
+ val |= mask;
+ i2c_writel(i2c_dev, val, offset);
+
+ while (i2c_readl(i2c_dev, offset) & mask) {
if (time_after(jiffies, timeout)) {
dev_warn(i2c_dev->dev, "timeout waiting for fifo flush\n");
return -ETIMEDOUT;
@@ -290,9 +314,15 @@ static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
size_t buf_remaining = i2c_dev->msg_buf_remaining;
int words_to_transfer;
- val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
- rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >>
- I2C_FIFO_STATUS_RX_SHIFT;
+ if (i2c_dev->hw->has_mst_fifo) {
+ val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS);
+ rx_fifo_avail = (val & I2C_MST_FIFO_STATUS_RX_MASK) >>
+ I2C_MST_FIFO_STATUS_RX_SHIFT;
+ } else {
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >>
+ I2C_FIFO_STATUS_RX_SHIFT;
+ }
/* Rounds down to not include partial word at the end of buf */
words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
@@ -321,6 +351,7 @@ static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
BUG_ON(rx_fifo_avail > 0 && buf_remaining > 0);
i2c_dev->msg_buf_remaining = buf_remaining;
i2c_dev->msg_buf = buf;
+
return 0;
}
@@ -332,9 +363,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
size_t buf_remaining = i2c_dev->msg_buf_remaining;
int words_to_transfer;
- val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
- tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >>
- I2C_FIFO_STATUS_TX_SHIFT;
+ if (i2c_dev->hw->has_mst_fifo) {
+ val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS);
+ tx_fifo_avail = (val & I2C_MST_FIFO_STATUS_TX_MASK) >>
+ I2C_MST_FIFO_STATUS_TX_SHIFT;
+ } else {
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >>
+ I2C_FIFO_STATUS_TX_SHIFT;
+ }
/* Rounds down to not include partial word at the end of buf */
words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
@@ -516,9 +553,15 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
i2c_writel(i2c_dev, 0x00, I2C_SL_ADDR2);
}
- val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
- 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
- i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+ if (i2c_dev->hw->has_mst_fifo) {
+ val = I2C_MST_FIFO_CONTROL_TX_TRIG(8) |
+ I2C_MST_FIFO_CONTROL_RX_TRIG(1);
+ i2c_writel(i2c_dev, val, I2C_MST_FIFO_CONTROL);
+ } else {
+ val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
+ 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+ }
err = tegra_i2c_flush_fifos(i2c_dev);
if (err)
@@ -545,6 +588,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
{
u32 cnfg;
+ /*
+ * NACK interrupt is generated before the I2C controller generates
+ * the STOP condition on the bus. So wait for 2 clock periods
+ * before disabling the controller so that the STOP condition has
+ * been delivered properly.
+ */
+ udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
+
cnfg = i2c_readl(i2c_dev, I2C_CNFG);
if (cnfg & I2C_CNFG_PACKET_MODE_EN)
i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
@@ -706,15 +757,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
return 0;
- /*
- * NACK interrupt is generated before the I2C controller generates
- * the STOP condition on the bus. So wait for 2 clock periods
- * before resetting the controller so that the STOP condition has
- * been delivered properly.
- */
- if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
- udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
-
tegra_i2c_init(i2c_dev);
if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
if (msg->flags & I2C_M_IGNORE_NAK)
@@ -803,6 +845,7 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_config_load_reg = false,
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
+ .has_mst_fifo = false,
};
static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
@@ -815,6 +858,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.has_config_load_reg = false,
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
+ .has_mst_fifo = false,
};
static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
@@ -827,6 +871,7 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
.has_config_load_reg = false,
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
+ .has_mst_fifo = false,
};
static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
@@ -839,6 +884,7 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
.has_config_load_reg = true,
.has_multi_master_mode = false,
.has_slcg_override_reg = true,
+ .has_mst_fifo = false,
};
static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
@@ -851,10 +897,25 @@ static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
.has_config_load_reg = true,
.has_multi_master_mode = true,
.has_slcg_override_reg = true,
+ .has_mst_fifo = false,
+};
+
+static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
+ .has_continue_xfer_support = true,
+ .has_per_pkt_xfer_complete_irq = true,
+ .has_single_clk_source = true,
+ .clk_divisor_hs_mode = 1,
+ .clk_divisor_std_fast_mode = 0x19,
+ .clk_divisor_fast_plus_mode = 0x10,
+ .has_config_load_reg = true,
+ .has_multi_master_mode = true,
+ .has_slcg_override_reg = true,
+ .has_mst_fifo = true,
};
/* Match table for of_platform binding */
static const struct of_device_id tegra_i2c_of_match[] = {
+ { .compatible = "nvidia,tegra194-i2c", .data = &tegra194_i2c_hw, },
{ .compatible = "nvidia,tegra210-i2c", .data = &tegra210_i2c_hw, },
{ .compatible = "nvidia,tegra124-i2c", .data = &tegra124_i2c_hw, },
{ .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, },
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 1f41a4f89c08..8a873975cf12 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -191,28 +191,43 @@ static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
if (priv->len_recv) {
/* read length byte */
rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+ /*
+ * We expect at least 2 interrupts for I2C_M_RECV_LEN
+ * transactions. The length is updated during the first
+ * interrupt, and the buffer contents are only copied
+ * during subsequent interrupts. If in case the interrupts
+ * get merged we would complete the transaction without
+ * copying out the bytes from RX fifo. To avoid this now we
+ * drain the fifo as and when data is available.
+ * We drained the rlen byte already, decrement total length
+ * by one.
+ */
+
+ len--;
if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) {
rlen = 0; /*abort transfer */
priv->msg_buf_remaining = 0;
priv->msg_len = 0;
- } else {
- *buf++ = rlen;
- if (priv->client_pec)
- ++rlen; /* account for error check byte */
- /* update remaining bytes and message length */
- priv->msg_buf_remaining = rlen;
- priv->msg_len = rlen + 1;
+ xlp9xx_i2c_update_rlen(priv);
+ return;
}
+
+ *buf++ = rlen;
+ if (priv->client_pec)
+ ++rlen; /* account for error check byte */
+ /* update remaining bytes and message length */
+ priv->msg_buf_remaining = rlen;
+ priv->msg_len = rlen + 1;
xlp9xx_i2c_update_rlen(priv);
priv->len_recv = false;
- } else {
- len = min(priv->msg_buf_remaining, len);
- for (i = 0; i < len; i++, buf++)
- *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
-
- priv->msg_buf_remaining -= len;
}
+ len = min(priv->msg_buf_remaining, len);
+ for (i = 0; i < len; i++, buf++)
+ *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+ priv->msg_buf_remaining -= len;
priv->msg_buf = buf;
if (priv->msg_buf_remaining)
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 484bfa15d58e..34cd4b308540 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -173,9 +173,6 @@ static int xlr_i2c_tx(struct xlr_i2c_private *priv, u16 len,
u8 offset;
u32 xfer;
- if (!len)
- return -EOPNOTSUPP;
-
offset = buf[0];
xlr_i2c_wreg(priv->iobase, XLR_I2C_ADDR, offset);
xlr_i2c_wreg(priv->iobase, XLR_I2C_DEVADDR, addr);
@@ -241,9 +238,6 @@ static int xlr_i2c_rx(struct xlr_i2c_private *priv, u16 len, u8 *buf, u16 addr)
unsigned long timeout, stoptime, checktime;
int nbytes, timedout;
- if (!len)
- return -EOPNOTSUPP;
-
xlr_i2c_wreg(priv->iobase, XLR_I2C_CFG,
XLR_I2C_CFG_NOADDR | priv->cfg->cfg_extra);
xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len - 1);
@@ -340,6 +334,10 @@ static const struct i2c_algorithm xlr_i2c_algo = {
.functionality = xlr_func,
};
+static const struct i2c_adapter_quirks xlr_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static const struct xlr_i2c_config xlr_i2c_config_default = {
.status_busy = XLR_I2C_BUS_BUSY,
.cfg_extra = 0,
@@ -427,6 +425,7 @@ static int xlr_i2c_probe(struct platform_device *pdev)
priv->adap.owner = THIS_MODULE;
priv->adap.algo_data = priv;
priv->adap.algo = &xlr_i2c_algo;
+ priv->adap.quirks = &xlr_i2c_quirks;
priv->adap.nr = pdev->id;
priv->adap.class = I2C_CLASS_HWMON;
snprintf(priv->adap.name, sizeof(priv->adap.name), "xlr-i2c");
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 7c3b4740b94b..32affd3fa8bd 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -453,8 +453,12 @@ static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
else
dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n",
data_len, client->addr, cmd, ret);
- } else {
+ /* 2 transfers must have completed successfully */
+ } else if (ret == 2) {
memcpy(data, buffer, data_len);
+ ret = 0;
+ } else {
+ ret = -EIO;
}
kfree(buffer);
@@ -482,11 +486,16 @@ static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
msgs[0].buf = buffer;
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret < 0)
- dev_err(&client->adapter->dev, "i2c write failed\n");
kfree(buffer);
- return ret;
+
+ if (ret < 0) {
+ dev_err(&client->adapter->dev, "i2c write failed: %d\n", ret);
+ return ret;
+ }
+
+ /* 1 transfer must have completed successfully */
+ return (ret == 1) ? 0 : -EIO;
}
static acpi_status
@@ -590,8 +599,6 @@ i2c_acpi_space_handler(u32 function, acpi_physical_address command,
if (action == ACPI_READ) {
status = acpi_gsb_i2c_read_bytes(client, command,
gsb->data, info->access_length);
- if (status > 0)
- status = 0;
} else {
status = acpi_gsb_i2c_write_bytes(client, command,
gsb->data, info->access_length);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 31d16ada6e7d..f15737763608 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -62,7 +62,7 @@
/*
* core_lock protects i2c_adapter_idr, and guarantees that device detection,
- * deletion of detected devices, and attach_adapter calls are serialized
+ * deletion of detected devices are serialized
*/
static DEFINE_MUTEX(core_lock);
static DEFINE_IDR(i2c_adapter_idr);
@@ -158,6 +158,22 @@ static void set_sda_gpio_value(struct i2c_adapter *adap, int val)
gpiod_set_value_cansleep(adap->bus_recovery_info->sda_gpiod, val);
}
+static int i2c_generic_bus_free(struct i2c_adapter *adap)
+{
+ struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+ int ret = -EOPNOTSUPP;
+
+ if (bri->get_bus_free)
+ ret = bri->get_bus_free(adap);
+ else if (bri->get_sda)
+ ret = bri->get_sda(adap);
+
+ if (ret < 0)
+ return ret;
+
+ return ret ? 0 : -EBUSY;
+}
+
/*
* We are generating clock pulses. ndelay() determines durating of clk pulses.
* We will generate clock with rate 100 KHz and so duration of both clock levels
@@ -169,21 +185,28 @@ static void set_sda_gpio_value(struct i2c_adapter *adap, int val)
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
{
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
- int i = 0, val = 1, ret = 0;
+ int i = 0, scl = 1, ret;
if (bri->prepare_recovery)
bri->prepare_recovery(adap);
- bri->set_scl(adap, val);
+ /*
+ * If we can set SDA, we will always create a STOP to ensure additional
+ * pulses will do no harm. This is achieved by letting SDA follow SCL
+ * half a cycle later. Check the 'incomplete_write_byte' fault injector
+ * for details.
+ */
+ bri->set_scl(adap, scl);
+ ndelay(RECOVERY_NDELAY / 2);
if (bri->set_sda)
- bri->set_sda(adap, 1);
- ndelay(RECOVERY_NDELAY);
+ bri->set_sda(adap, scl);
+ ndelay(RECOVERY_NDELAY / 2);
/*
* By this time SCL is high, as we need to give 9 falling-rising edges
*/
while (i++ < RECOVERY_CLK_CNT * 2) {
- if (val) {
+ if (scl) {
/* SCL shouldn't be low here */
if (!bri->get_scl(adap)) {
dev_err(&adap->dev,
@@ -191,32 +214,27 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
ret = -EBUSY;
break;
}
- /* Break if SDA is high */
- if (bri->get_sda && bri->get_sda(adap))
- break;
}
- val = !val;
- bri->set_scl(adap, val);
- ndelay(RECOVERY_NDELAY);
- }
-
- /* check if recovery actually succeeded */
- if (bri->get_sda && !bri->get_sda(adap))
- ret = -EBUSY;
-
- /* If all went well, send STOP for a sane bus state. */
- if (ret == 0 && bri->set_sda) {
- bri->set_scl(adap, 0);
+ scl = !scl;
+ bri->set_scl(adap, scl);
+ /* Creating STOP again, see above */
ndelay(RECOVERY_NDELAY / 2);
- bri->set_sda(adap, 0);
- ndelay(RECOVERY_NDELAY / 2);
- bri->set_scl(adap, 1);
- ndelay(RECOVERY_NDELAY / 2);
- bri->set_sda(adap, 1);
+ if (bri->set_sda)
+ bri->set_sda(adap, scl);
ndelay(RECOVERY_NDELAY / 2);
+
+ if (scl) {
+ ret = i2c_generic_bus_free(adap);
+ if (ret == 0)
+ break;
+ }
}
+ /* If we can't check bus status, assume recovery worked */
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+
if (bri->unprepare_recovery)
bri->unprepare_recovery(adap);
@@ -265,6 +283,10 @@ static void i2c_init_recovery(struct i2c_adapter *adap)
err_str = "no {get|set}_scl() found";
goto err;
}
+ if (!bri->set_sda && !bri->get_sda) {
+ err_str = "either get_sda() or set_sda() needed";
+ goto err;
+ }
}
return;
@@ -615,7 +637,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
static void i2c_adapter_lock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
- rt_mutex_lock(&adapter->bus_lock);
+ rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
}
/**
@@ -1102,15 +1124,6 @@ static int i2c_do_add_adapter(struct i2c_driver *driver,
/* Detect supported devices on that bus, and instantiate them */
i2c_detect(adap, driver);
- /* Let legacy drivers scan this bus for matching devices */
- if (driver->attach_adapter) {
- dev_warn(&adap->dev, "%s: attach_adapter method is deprecated\n",
- driver->driver.name);
- dev_warn(&adap->dev,
- "Please use another way to instantiate your i2c_client\n");
- /* We ignore the return code; if it fails, too bad */
- driver->attach_adapter(adap);
- }
return 0;
}
@@ -1554,6 +1567,8 @@ void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_de
ret = device_property_read_u32(dev, "i2c-sda-falling-time-ns", &t->sda_fall_ns);
if (ret && use_defaults)
t->sda_fall_ns = t->scl_fall_ns;
+
+ device_property_read_u32(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns);
}
EXPORT_SYMBOL_GPL(i2c_parse_fw_timings);
@@ -1817,9 +1832,15 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
if (msgs[i].flags & I2C_M_RD) {
if (do_len_check && i2c_quirk_exceeded(len, q->max_read_len))
return i2c_quirk_error(adap, &msgs[i], "msg too long");
+
+ if (q->flags & I2C_AQ_NO_ZERO_LEN_READ && len == 0)
+ return i2c_quirk_error(adap, &msgs[i], "no zero length");
} else {
if (do_len_check && i2c_quirk_exceeded(len, q->max_write_len))
return i2c_quirk_error(adap, &msgs[i], "msg too long");
+
+ if (q->flags & I2C_AQ_NO_ZERO_LEN_WRITE && len == 0)
+ return i2c_quirk_error(adap, &msgs[i], "no zero length");
}
}
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index 4a78c65e9971..47a9f70a24a9 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -47,9 +47,9 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
client->slave_cb = slave_cb;
- i2c_lock_adapter(client->adapter);
+ i2c_lock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER);
ret = client->adapter->algo->reg_slave(client);
- i2c_unlock_adapter(client->adapter);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER);
if (ret) {
client->slave_cb = NULL;
@@ -69,9 +69,9 @@ int i2c_slave_unregister(struct i2c_client *client)
return -EOPNOTSUPP;
}
- i2c_lock_adapter(client->adapter);
+ i2c_lock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER);
ret = client->adapter->algo->unreg_slave(client);
- i2c_unlock_adapter(client->adapter);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER);
if (ret == 0)
client->slave_cb = NULL;
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index f3f683041e7f..9cd66cabb84f 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -463,17 +463,20 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
msg[num-1].len++;
}
- status = i2c_transfer(adapter, msg, num);
+ status = __i2c_transfer(adapter, msg, num);
if (status < 0)
- return status;
- if (status != num)
- return -EIO;
+ goto cleanup;
+ if (status != num) {
+ status = -EIO;
+ goto cleanup;
+ }
+ status = 0;
/* Check PEC if last message is a read */
if (i && (msg[num-1].flags & I2C_M_RD)) {
status = i2c_smbus_check_pec(partial_pec, &msg[num-1]);
if (status < 0)
- return status;
+ goto cleanup;
}
if (read_write == I2C_SMBUS_READ)
@@ -499,12 +502,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
break;
}
+cleanup:
if (msg[0].flags & I2C_M_DMA_SAFE)
kfree(msg[0].buf);
if (msg[1].flags & I2C_M_DMA_SAFE)
kfree(msg[1].buf);
- return 0;
+ return status;
}
/**
@@ -520,9 +524,24 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
* This executes an SMBus protocol operation, and returns a negative
* errno code else zero on success.
*/
-s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
- char read_write, u8 command, int protocol,
- union i2c_smbus_data *data)
+s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int protocol, union i2c_smbus_data *data)
+{
+ s32 res;
+
+ i2c_lock_bus(adapter, I2C_LOCK_SEGMENT);
+ res = __i2c_smbus_xfer(adapter, addr, flags, read_write,
+ command, protocol, data);
+ i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT);
+
+ return res;
+}
+EXPORT_SYMBOL(i2c_smbus_xfer);
+
+s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int protocol, union i2c_smbus_data *data)
{
unsigned long orig_jiffies;
int try;
@@ -539,8 +558,6 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
flags &= I2C_M_TEN | I2C_CLIENT_PEC | I2C_CLIENT_SCCB;
if (adapter->algo->smbus_xfer) {
- i2c_lock_bus(adapter, I2C_LOCK_SEGMENT);
-
/* Retry automatically on arbitration loss */
orig_jiffies = jiffies;
for (res = 0, try = 0; try <= adapter->retries; try++) {
@@ -553,7 +570,6 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
orig_jiffies + adapter->timeout))
break;
}
- i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT);
if (res != -EOPNOTSUPP || !adapter->algo->master_xfer)
goto trace;
@@ -575,7 +591,7 @@ trace:
return res;
}
-EXPORT_SYMBOL(i2c_smbus_xfer);
+EXPORT_SYMBOL(__i2c_smbus_xfer);
/**
* i2c_smbus_read_i2c_block_data_or_emulated - read block or emulate
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 300ab4b672e4..f330690b4125 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -87,8 +87,8 @@ static int __i2c_mux_smbus_xfer(struct i2c_adapter *adap,
ret = muxc->select(muxc, priv->chan_id);
if (ret >= 0)
- ret = parent->algo->smbus_xfer(parent, addr, flags,
- read_write, command, size, data);
+ ret = __i2c_smbus_xfer(parent, addr, flags,
+ read_write, command, size, data);
if (muxc->deselect)
muxc->deselect(muxc, priv->chan_id);
@@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
struct i2c_mux_priv *priv = adapter->algo_data;
struct i2c_adapter *parent = priv->muxc->parent;
- rt_mutex_lock(&parent->mux_lock);
+ rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
if (!(flags & I2C_LOCK_ROOT_ADAPTER))
return;
i2c_lock_bus(parent, flags);
@@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
struct i2c_mux_priv *priv = adapter->algo_data;
struct i2c_adapter *parent = priv->muxc->parent;
- rt_mutex_lock(&parent->mux_lock);
+ rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
i2c_lock_bus(parent, flags);
}
diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
index 12ad8d65faf6..f2bf3e57ed67 100644
--- a/drivers/i2c/muxes/i2c-mux-mlxcpld.c
+++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
@@ -94,31 +94,11 @@ static int mlxcpld_mux_reg_write(struct i2c_adapter *adap,
struct i2c_client *client, u8 val)
{
struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
- int ret = -ENODEV;
-
- if (adap->algo->master_xfer) {
- struct i2c_msg msg;
- u8 msgbuf[] = {pdata->sel_reg_addr, val};
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = 2;
- msg.buf = msgbuf;
- ret = __i2c_transfer(adap, &msg, 1);
-
- if (ret >= 0 && ret != 1)
- ret = -EREMOTEIO;
- } else if (adap->algo->smbus_xfer) {
- union i2c_smbus_data data;
-
- data.byte = val;
- ret = adap->algo->smbus_xfer(adap, client->addr,
- client->flags, I2C_SMBUS_WRITE,
- pdata->sel_reg_addr,
- I2C_SMBUS_BYTE_DATA, &data);
- }
+ union i2c_smbus_data data = { .byte = val };
- return ret;
+ return __i2c_smbus_xfer(adap, client->addr, client->flags,
+ I2C_SMBUS_WRITE, pdata->sel_reg_addr,
+ I2C_SMBUS_BYTE_DATA, &data);
}
static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 6a39adaf433f..9e75d6b9140b 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -99,31 +99,11 @@ MODULE_DEVICE_TABLE(of, pca9541_of_match);
static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val)
{
struct i2c_adapter *adap = client->adapter;
- int ret;
-
- if (adap->algo->master_xfer) {
- struct i2c_msg msg;
- char buf[2];
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = 2;
- buf[0] = command;
- buf[1] = val;
- msg.buf = buf;
- ret = __i2c_transfer(adap, &msg, 1);
- } else {
- union i2c_smbus_data data;
-
- data.byte = val;
- ret = adap->algo->smbus_xfer(adap, client->addr,
- client->flags,
- I2C_SMBUS_WRITE,
- command,
- I2C_SMBUS_BYTE_DATA, &data);
- }
+ union i2c_smbus_data data = { .byte = val };
- return ret;
+ return __i2c_smbus_xfer(adap, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BYTE_DATA, &data);
}
/*
@@ -133,41 +113,14 @@ static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val)
static int pca9541_reg_read(struct i2c_client *client, u8 command)
{
struct i2c_adapter *adap = client->adapter;
+ union i2c_smbus_data data;
int ret;
- u8 val;
-
- if (adap->algo->master_xfer) {
- struct i2c_msg msg[2] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = &command
- },
- {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = &val
- }
- };
- ret = __i2c_transfer(adap, msg, 2);
- if (ret == 2)
- ret = val;
- else if (ret >= 0)
- ret = -EIO;
- } else {
- union i2c_smbus_data data;
-
- ret = adap->algo->smbus_xfer(adap, client->addr,
- client->flags,
- I2C_SMBUS_READ,
- command,
- I2C_SMBUS_BYTE_DATA, &data);
- if (!ret)
- ret = data.byte;
- }
- return ret;
+
+ ret = __i2c_smbus_xfer(adap, client->addr, client->flags,
+ I2C_SMBUS_READ, command,
+ I2C_SMBUS_BYTE_DATA, &data);
+
+ return ret ?: data.byte;
}
/*
@@ -345,11 +298,11 @@ static int pca9541_probe(struct i2c_client *client,
/*
* I2C accesses are unprotected here.
- * We have to lock the adapter before releasing the bus.
+ * We have to lock the I2C segment before releasing the bus.
*/
- i2c_lock_adapter(adap);
+ i2c_lock_bus(adap, I2C_LOCK_SEGMENT);
pca9541_release_bus(client);
- i2c_unlock_adapter(adap);
+ i2c_unlock_bus(adap, I2C_LOCK_SEGMENT);
/* Create mux adapter */
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index fbc748027087..24bd9275fde5 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -220,30 +220,11 @@ MODULE_DEVICE_TABLE(of, pca954x_of_match);
static int pca954x_reg_write(struct i2c_adapter *adap,
struct i2c_client *client, u8 val)
{
- int ret = -ENODEV;
-
- if (adap->algo->master_xfer) {
- struct i2c_msg msg;
- char buf[1];
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = 1;
- buf[0] = val;
- msg.buf = buf;
- ret = __i2c_transfer(adap, &msg, 1);
-
- if (ret >= 0 && ret != 1)
- ret = -EREMOTEIO;
- } else {
- union i2c_smbus_data data;
- ret = adap->algo->smbus_xfer(adap, client->addr,
- client->flags,
- I2C_SMBUS_WRITE,
- val, I2C_SMBUS_BYTE, &data);
- }
+ union i2c_smbus_data dummy;
- return ret;
+ return __i2c_smbus_xfer(adap, client->addr, client->flags,
+ I2C_SMBUS_WRITE, val,
+ I2C_SMBUS_BYTE, &dummy);
}
static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
@@ -368,7 +349,8 @@ static int pca954x_probe(struct i2c_client *client,
{
struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
- struct device_node *of_node = client->dev.of_node;
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
bool idle_disconnect_dt;
struct gpio_desc *gpio;
int num, force, class;
@@ -379,8 +361,7 @@ static int pca954x_probe(struct i2c_client *client,
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
return -ENODEV;
- muxc = i2c_mux_alloc(adap, &client->dev,
- PCA954X_MAX_NCHANS, sizeof(*data), 0,
+ muxc = i2c_mux_alloc(adap, dev, PCA954X_MAX_NCHANS, sizeof(*data), 0,
pca954x_select_chan, pca954x_deselect_mux);
if (!muxc)
return -ENOMEM;
@@ -390,7 +371,7 @@ static int pca954x_probe(struct i2c_client *client,
data->client = client;
/* Reset the mux if a reset GPIO is specified. */
- gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
if (gpio) {
@@ -400,7 +381,7 @@ static int pca954x_probe(struct i2c_client *client,
udelay(1);
}
- data->chip = of_device_get_match_data(&client->dev);
+ data->chip = of_device_get_match_data(dev);
if (!data->chip)
data->chip = &chips[id->driver_data];
@@ -414,8 +395,7 @@ static int pca954x_probe(struct i2c_client *client,
if (!ret &&
(id.manufacturer_id != data->chip->id.manufacturer_id ||
id.part_id != data->chip->id.part_id)) {
- dev_warn(&client->dev,
- "unexpected device id %03x-%03x-%x\n",
+ dev_warn(dev, "unexpected device id %03x-%03x-%x\n",
id.manufacturer_id, id.part_id,
id.die_revision);
return -ENODEV;
@@ -427,14 +407,14 @@ static int pca954x_probe(struct i2c_client *client,
* initializes the mux to disconnected state.
*/
if (i2c_smbus_write_byte(client, 0) < 0) {
- dev_warn(&client->dev, "probe failed\n");
+ dev_warn(dev, "probe failed\n");
return -ENODEV;
}
data->last_chan = 0; /* force the first selection */
- idle_disconnect_dt = of_node &&
- of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
+ idle_disconnect_dt = np &&
+ of_property_read_bool(np, "i2c-mux-idle-disconnect");
ret = pca954x_irq_setup(muxc);
if (ret)
@@ -465,7 +445,7 @@ static int pca954x_probe(struct i2c_client *client,
}
if (data->irq) {
- ret = devm_request_threaded_irq(&client->dev, data->client->irq,
+ ret = devm_request_threaded_irq(dev, data->client->irq,
NULL, pca954x_irq_handler,
IRQF_ONESHOT | IRQF_SHARED,
"pca954x", data);
@@ -473,8 +453,7 @@ static int pca954x_probe(struct i2c_client *client,
goto fail_cleanup;
}
- dev_info(&client->dev,
- "registered %d multiplexed busses for I2C %s %s\n",
+ dev_info(dev, "registered %d multiplexed busses for I2C %s %s\n",
num, data->chip->muxtype == pca954x_ismux
? "mux" : "switch", client->name);
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index e52c58c29d9a..4d565b0c5a6e 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -574,13 +574,14 @@ static u8 hpt3xx_udma_filter(ide_drive_t *drive)
if (!HPT370_ALLOW_ATA100_5 ||
check_in_drive_list(drive, bad_ata100_5))
return ATA_UDMA4;
+ /* else: fall through */
case HPT372 :
case HPT372A:
case HPT372N:
case HPT374 :
if (ata_id_is_sata(drive->id))
mask &= ~0x0e;
- /* Fall thru */
+ /* fall through */
default:
return mask;
}
@@ -600,7 +601,7 @@ static u8 hpt3xx_mdma_filter(ide_drive_t *drive)
case HPT374 :
if (ata_id_is_sata(drive->id))
return 0x00;
- /* Fall thru */
+ /* else: fall through */
default:
return 0x07;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 5f178384876f..44a7a255ef74 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -419,10 +419,11 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
int write, void *buffer, unsigned *bufflen,
- struct request_sense *sense, int timeout,
+ struct scsi_sense_hdr *sshdr, int timeout,
req_flags_t rq_flags)
{
struct cdrom_info *info = drive->driver_data;
+ struct scsi_sense_hdr local_sshdr;
int retries = 10;
bool failed;
@@ -430,6 +431,9 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
"rq_flags: 0x%x",
cmd[0], write, timeout, rq_flags);
+ if (!sshdr)
+ sshdr = &local_sshdr;
+
/* start of retry loop */
do {
struct request *rq;
@@ -456,8 +460,8 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
if (buffer)
*bufflen = scsi_req(rq)->resid_len;
- if (sense)
- memcpy(sense, scsi_req(rq)->sense, sizeof(*sense));
+ scsi_normalize_sense(scsi_req(rq)->sense,
+ scsi_req(rq)->sense_len, sshdr);
/*
* FIXME: we should probably abort/retry or something in case of
@@ -469,12 +473,10 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
* The request failed. Retry if it was due to a unit
* attention status (usually means media was changed).
*/
- struct request_sense *reqbuf = scsi_req(rq)->sense;
-
- if (reqbuf->sense_key == UNIT_ATTENTION)
+ if (sshdr->sense_key == UNIT_ATTENTION)
cdrom_saw_media_change(drive);
- else if (reqbuf->sense_key == NOT_READY &&
- reqbuf->asc == 4 && reqbuf->ascq != 4) {
+ else if (sshdr->sense_key == NOT_READY &&
+ sshdr->asc == 4 && sshdr->ascq != 4) {
/*
* The drive is in the process of loading
* a disk. Retry, but wait a little to give
@@ -864,7 +866,7 @@ static void msf_from_bcd(struct atapi_msf *msf)
msf->frame = bcd2bin(msf->frame);
}
-int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
+int cdrom_check_status(ide_drive_t *drive, struct scsi_sense_hdr *sshdr)
{
struct cdrom_info *info = drive->driver_data;
struct cdrom_device_info *cdi;
@@ -886,12 +888,11 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
*/
cmd[7] = cdi->sanyo_slot % 3;
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, RQF_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sshdr, 0, RQF_QUIET);
}
static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
- unsigned long *sectors_per_frame,
- struct request_sense *sense)
+ unsigned long *sectors_per_frame)
{
struct {
__be32 lba;
@@ -908,7 +909,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_READ_CDVD_CAPACITY;
- stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0,
+ stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, NULL, 0,
RQF_QUIET);
if (stat)
return stat;
@@ -944,8 +945,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
}
static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
- int format, char *buf, int buflen,
- struct request_sense *sense)
+ int format, char *buf, int buflen)
{
unsigned char cmd[BLK_MAX_CDB];
@@ -962,11 +962,11 @@ static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
if (msf_flag)
cmd[1] = 2;
- return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, RQF_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, NULL, 0, RQF_QUIET);
}
/* Try to read the entire TOC for the disk into our internal buffer. */
-int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
+int ide_cd_read_toc(ide_drive_t *drive)
{
int stat, ntracks, i;
struct cdrom_info *info = drive->driver_data;
@@ -996,14 +996,13 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
* Check to see if the existing data is still valid. If it is,
* just return.
*/
- (void) cdrom_check_status(drive, sense);
+ (void) cdrom_check_status(drive, NULL);
if (drive->atapi_flags & IDE_AFLAG_TOC_VALID)
return 0;
/* try to get the total cdrom capacity and sector size */
- stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame,
- sense);
+ stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame);
if (stat)
toc->capacity = 0x1fffff;
@@ -1016,7 +1015,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
/* first read just the header, so we know how long the TOC is */
stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
- sizeof(struct atapi_toc_header), sense);
+ sizeof(struct atapi_toc_header));
if (stat)
return stat;
@@ -1036,7 +1035,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
(char *)&toc->hdr,
sizeof(struct atapi_toc_header) +
(ntracks + 1) *
- sizeof(struct atapi_toc_entry), sense);
+ sizeof(struct atapi_toc_entry));
if (stat && toc->hdr.first_track > 1) {
/*
@@ -1056,8 +1055,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
(char *)&toc->hdr,
sizeof(struct atapi_toc_header) +
(ntracks + 1) *
- sizeof(struct atapi_toc_entry),
- sense);
+ sizeof(struct atapi_toc_entry));
if (stat)
return stat;
@@ -1094,7 +1092,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
if (toc->hdr.first_track != CDROM_LEADOUT) {
/* read the multisession information */
stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
- sizeof(ms_tmp), sense);
+ sizeof(ms_tmp));
if (stat)
return stat;
@@ -1108,7 +1106,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
/* re-read multisession information using MSF format */
stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
- sizeof(ms_tmp), sense);
+ sizeof(ms_tmp));
if (stat)
return stat;
@@ -1412,7 +1410,7 @@ static sector_t ide_cdrom_capacity(ide_drive_t *drive)
{
unsigned long capacity, sectors_per_frame;
- if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL))
+ if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame))
return 0;
return capacity * sectors_per_frame;
@@ -1710,9 +1708,8 @@ static unsigned int idecd_check_events(struct gendisk *disk,
static int idecd_revalidate_disk(struct gendisk *disk)
{
struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
- struct request_sense sense;
- ide_cd_read_toc(info->drive, &sense);
+ ide_cd_read_toc(info->drive);
return 0;
}
@@ -1736,7 +1733,6 @@ static int ide_cd_probe(ide_drive_t *drive)
{
struct cdrom_info *info;
struct gendisk *g;
- struct request_sense sense;
ide_debug_log(IDE_DBG_PROBE, "driver_req: %s, media: 0x%x",
drive->driver_req, drive->media);
@@ -1785,7 +1781,7 @@ static int ide_cd_probe(ide_drive_t *drive)
goto failed;
}
- ide_cd_read_toc(drive, &sense);
+ ide_cd_read_toc(drive);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
device_add_disk(&drive->gendev, g);
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 04f0f310a856..a69dc7f61c4d 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -98,11 +98,11 @@ void ide_cd_log_error(const char *, struct request *, struct request_sense *);
/* ide-cd.c functions used by ide-cd_ioctl.c */
int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *,
- unsigned *, struct request_sense *, int, req_flags_t);
-int ide_cd_read_toc(ide_drive_t *, struct request_sense *);
+ unsigned *, struct scsi_sense_hdr *, int, req_flags_t);
+int ide_cd_read_toc(ide_drive_t *);
int ide_cdrom_get_capabilities(ide_drive_t *, u8 *);
void ide_cdrom_update_speed(ide_drive_t *, u8 *);
-int cdrom_check_status(ide_drive_t *, struct request_sense *);
+int cdrom_check_status(ide_drive_t *, struct scsi_sense_hdr *);
/* ide-cd_ioctl.c */
int ide_cdrom_open_real(struct cdrom_device_info *, int);
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index b1322400887b..4a6e1a413ead 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -43,14 +43,14 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
{
ide_drive_t *drive = cdi->handle;
struct media_event_desc med;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
int stat;
if (slot_nr != CDSL_CURRENT)
return -EINVAL;
- stat = cdrom_check_status(drive, &sense);
- if (!stat || sense.sense_key == UNIT_ATTENTION)
+ stat = cdrom_check_status(drive, &sshdr);
+ if (!stat || sshdr.sense_key == UNIT_ATTENTION)
return CDS_DISC_OK;
if (!cdrom_get_media_event(cdi, &med)) {
@@ -62,8 +62,8 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
return CDS_NO_DISC;
}
- if (sense.sense_key == NOT_READY && sense.asc == 0x04
- && sense.ascq == 0x04)
+ if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04
+ && sshdr.ascq == 0x04)
return CDS_DISC_OK;
/*
@@ -71,8 +71,8 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
* just return TRAY_OPEN since ATAPI doesn't provide
* any other way to detect this...
*/
- if (sense.sense_key == NOT_READY) {
- if (sense.asc == 0x3a && sense.ascq == 1)
+ if (sshdr.sense_key == NOT_READY) {
+ if (sshdr.asc == 0x3a && sshdr.ascq == 1)
return CDS_NO_DISC;
else
return CDS_TRAY_OPEN;
@@ -105,8 +105,7 @@ unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
/* Eject the disk if EJECTFLAG is 0.
If EJECTFLAG is 1, try to reload the disk. */
static
-int cdrom_eject(ide_drive_t *drive, int ejectflag,
- struct request_sense *sense)
+int cdrom_eject(ide_drive_t *drive, int ejectflag)
{
struct cdrom_info *cd = drive->driver_data;
struct cdrom_device_info *cdi = &cd->devinfo;
@@ -129,20 +128,16 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
cmd[0] = GPCMD_START_STOP_UNIT;
cmd[4] = loej | (ejectflag != 0);
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, 0);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
}
/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
static
-int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
- struct request_sense *sense)
+int ide_cd_lockdoor(ide_drive_t *drive, int lockflag)
{
- struct request_sense my_sense;
+ struct scsi_sense_hdr sshdr;
int stat;
- if (sense == NULL)
- sense = &my_sense;
-
/* If the drive cannot lock the door, just pretend. */
if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0) {
stat = 0;
@@ -155,14 +150,14 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
cmd[4] = lockflag ? 1 : 0;
stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL,
- sense, 0, 0);
+ &sshdr, 0, 0);
}
/* If we got an illegal field error, the drive
probably cannot lock the door. */
if (stat != 0 &&
- sense->sense_key == ILLEGAL_REQUEST &&
- (sense->asc == 0x24 || sense->asc == 0x20)) {
+ sshdr.sense_key == ILLEGAL_REQUEST &&
+ (sshdr.asc == 0x24 || sshdr.asc == 0x20)) {
printk(KERN_ERR "%s: door locking not supported\n",
drive->name);
drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
@@ -170,7 +165,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
}
/* no medium, that's alright. */
- if (stat != 0 && sense->sense_key == NOT_READY && sense->asc == 0x3a)
+ if (stat != 0 && sshdr.sense_key == NOT_READY && sshdr.asc == 0x3a)
stat = 0;
if (stat == 0) {
@@ -186,23 +181,22 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
int ide_cdrom_tray_move(struct cdrom_device_info *cdi, int position)
{
ide_drive_t *drive = cdi->handle;
- struct request_sense sense;
if (position) {
- int stat = ide_cd_lockdoor(drive, 0, &sense);
+ int stat = ide_cd_lockdoor(drive, 0);
if (stat)
return stat;
}
- return cdrom_eject(drive, !position, &sense);
+ return cdrom_eject(drive, !position);
}
int ide_cdrom_lock_door(struct cdrom_device_info *cdi, int lock)
{
ide_drive_t *drive = cdi->handle;
- return ide_cd_lockdoor(drive, lock, NULL);
+ return ide_cd_lockdoor(drive, lock);
}
/*
@@ -213,7 +207,6 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
{
ide_drive_t *drive = cdi->handle;
struct cdrom_info *cd = drive->driver_data;
- struct request_sense sense;
u8 buf[ATAPI_CAPABILITIES_PAGE_SIZE];
int stat;
unsigned char cmd[BLK_MAX_CDB];
@@ -236,7 +229,7 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
cmd[5] = speed & 0xff;
}
- stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
+ stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
if (!ide_cdrom_get_capabilities(drive, buf)) {
ide_cdrom_update_speed(drive, buf);
@@ -252,11 +245,10 @@ int ide_cdrom_get_last_session(struct cdrom_device_info *cdi,
struct atapi_toc *toc;
ide_drive_t *drive = cdi->handle;
struct cdrom_info *info = drive->driver_data;
- struct request_sense sense;
int ret;
if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) {
- ret = ide_cd_read_toc(drive, &sense);
+ ret = ide_cd_read_toc(drive);
if (ret)
return ret;
}
@@ -300,7 +292,6 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
{
ide_drive_t *drive = cdi->handle;
struct cdrom_info *cd = drive->driver_data;
- struct request_sense sense;
struct request *rq;
int ret;
@@ -315,7 +306,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
* lock it again.
*/
if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED)
- (void)ide_cd_lockdoor(drive, 1, &sense);
+ (void)ide_cd_lockdoor(drive, 1);
return ret;
}
@@ -355,7 +346,6 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
struct atapi_toc_entry *first_toc, *last_toc;
unsigned long lba_start, lba_end;
int stat;
- struct request_sense sense;
unsigned char cmd[BLK_MAX_CDB];
stat = ide_cd_get_toc_entry(drive, ti->cdti_trk0, &first_toc);
@@ -380,7 +370,7 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]);
lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]);
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
}
static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
@@ -391,7 +381,7 @@ static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
int stat;
/* Make sure our saved TOC is valid. */
- stat = ide_cd_read_toc(drive, NULL);
+ stat = ide_cd_read_toc(drive);
if (stat)
return stat;
@@ -461,8 +451,8 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
layer. the packet must be complete, as we do not
touch it at all. */
- if (cgc->sense)
- memset(cgc->sense, 0, sizeof(struct request_sense));
+ if (cgc->sshdr)
+ memset(cgc->sshdr, 0, sizeof(*cgc->sshdr));
if (cgc->quiet)
flags |= RQF_QUIET;
@@ -470,7 +460,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
cgc->stat = ide_cd_queue_pc(drive, cgc->cmd,
cgc->data_direction == CGC_DATA_WRITE,
cgc->buffer, &len,
- cgc->sense, cgc->timeout, flags);
+ cgc->sshdr, cgc->timeout, flags);
if (!cgc->stat)
cgc->buflen -= len;
return cgc->stat;
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 5bd2aafc3753..a8df300f949c 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -427,6 +427,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
* (maintains previous driver behaviour)
*/
break;
+ /* else: fall through */
case CAPACITY_CURRENT:
/* Normal Zip/LS-120 disks */
if (memcmp(cap_desc, &floppy->cap_desc, 8))
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index a444bad7a2aa..0d93e0cfbeaf 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -460,7 +460,6 @@ void do_ide_request(struct request_queue *q)
struct ide_host *host = hwif->host;
struct request *rq = NULL;
ide_startstop_t startstop;
- unsigned long queue_run_ms = 3; /* old plug delay */
spin_unlock_irq(q->queue_lock);
@@ -480,9 +479,6 @@ repeat:
prev_port = hwif->host->cur_port;
if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
time_after(drive->sleep, jiffies)) {
- unsigned long left = jiffies - drive->sleep;
-
- queue_run_ms = jiffies_to_msecs(left + 1);
ide_unlock_port(hwif);
goto plug_device;
}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 416a2f353071..3b75a7b7a284 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -142,6 +142,7 @@ static void ide_classify_atapi_dev(ide_drive_t *drive)
}
/* Early cdrom models used zero */
type = ide_cdrom;
+ /* fall through */
case ide_cdrom:
drive->dev_flags |= IDE_DFLAG_REMOVABLE;
#ifdef CONFIG_PPC
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index aee7b46d2330..34c1165226a4 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1746,7 +1746,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
{
unsigned long t;
int speed;
- int buffer_size;
u16 *ctl = (u16 *)&tape->caps[12];
ide_debug_log(IDE_DBG_FUNC, "minor: %d", minor);
@@ -1781,7 +1780,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
*ctl /= 2;
tape->buffer_size = *ctl * tape->blk_size;
}
- buffer_size = tape->buffer_size;
/* select the "best" DSC read/write polling freq */
speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 89b29028d315..c21d5c50ae3a 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -128,7 +128,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
return pre_task_out_intr(drive, cmd);
}
handler = task_pio_intr;
- /* fall-through */
+ /* fall through */
case ATA_PROT_NODATA:
if (handler == NULL)
handler = task_no_data_intr;
@@ -140,6 +140,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
hwif->expiry = dma_ops->dma_timer_expiry;
ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD);
dma_ops->dma_start(drive);
+ /* fall through */
default:
return ide_started;
}
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index c3062b53056f..024bc7ba49ee 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -494,6 +494,7 @@ static int init_chipset_sis5513(struct pci_dev *dev)
pci_read_config_byte(dev, 0x09, &reg);
if ((reg & 0x0f) != 0x00)
pci_write_config_byte(dev, 0x09, reg&0xf0);
+ /* fall through */
case ATA_16:
/* force per drive recovery and active timings
needed on ATA_33 and below chips */
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 62ae7e5abcfa..829dc96c9dd6 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -40,7 +40,7 @@ config ADXL345_I2C
select REGMAP_I2C
help
Say Y here if you want to build support for the Analog Devices
- ADXL345 3-axis digital accelerometer.
+ ADXL345 or ADXL375 3-axis digital accelerometer.
To compile this driver as a module, choose M here: the module
will be called adxl345_i2c and you will also get adxl345_core
@@ -54,7 +54,7 @@ config ADXL345_SPI
select REGMAP_SPI
help
Say Y here if you want to build support for the Analog Devices
- ADXL345 3-axis digital accelerometer.
+ ADXL345 or ADXL375 3-axis digital accelerometer.
To compile this driver as a module, choose M here: the module
will be called adxl345_spi and you will also get adxl345_core
diff --git a/drivers/iio/accel/adxl345.h b/drivers/iio/accel/adxl345.h
index c1ddf3927c47..ccd63de7a55a 100644
--- a/drivers/iio/accel/adxl345.h
+++ b/drivers/iio/accel/adxl345.h
@@ -11,8 +11,13 @@
#ifndef _ADXL345_H_
#define _ADXL345_H_
+enum adxl345_device_type {
+ ADXL345,
+ ADXL375,
+};
+
int adxl345_core_probe(struct device *dev, struct regmap *regmap,
- const char *name);
+ enum adxl345_device_type type, const char *name);
int adxl345_core_remove(struct device *dev);
#endif /* _ADXL345_H_ */
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index 7251d0e63d74..780f87f72338 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -6,21 +6,35 @@
* This file is subject to the terms and conditions of version 2 of
* the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
+ *
+ * Datasheet: http://www.analog.com/media/en/technical-documentation/data-sheets/ADXL345.pdf
*/
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
#include "adxl345.h"
#define ADXL345_REG_DEVID 0x00
+#define ADXL345_REG_OFSX 0x1e
+#define ADXL345_REG_OFSY 0x1f
+#define ADXL345_REG_OFSZ 0x20
+#define ADXL345_REG_OFS_AXIS(index) (ADXL345_REG_OFSX + (index))
+#define ADXL345_REG_BW_RATE 0x2C
#define ADXL345_REG_POWER_CTL 0x2D
#define ADXL345_REG_DATA_FORMAT 0x31
#define ADXL345_REG_DATAX0 0x32
#define ADXL345_REG_DATAY0 0x34
#define ADXL345_REG_DATAZ0 0x36
+#define ADXL345_REG_DATA_AXIS(index) \
+ (ADXL345_REG_DATAX0 + (index) * sizeof(__le16))
+
+#define ADXL345_BW_RATE GENMASK(3, 0)
+#define ADXL345_BASE_RATE_NANO_HZ 97656250LL
+#define NHZ_PER_HZ 1000000000LL
#define ADXL345_POWER_CTL_MEASURE BIT(3)
#define ADXL345_POWER_CTL_STANDBY 0x00
@@ -42,24 +56,33 @@
*/
static const int adxl345_uscale = 38300;
+/*
+ * The Datasheet lists a resolution of Resolution is ~49 mg per LSB. That's
+ * ~480mm/s**2 per LSB.
+ */
+static const int adxl375_uscale = 480000;
+
struct adxl345_data {
struct regmap *regmap;
u8 data_range;
+ enum adxl345_device_type type;
};
-#define ADXL345_CHANNEL(reg, axis) { \
+#define ADXL345_CHANNEL(index, axis) { \
.type = IIO_ACCEL, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .address = reg, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
}
static const struct iio_chan_spec adxl345_channels[] = {
- ADXL345_CHANNEL(ADXL345_REG_DATAX0, X),
- ADXL345_CHANNEL(ADXL345_REG_DATAY0, Y),
- ADXL345_CHANNEL(ADXL345_REG_DATAZ0, Z),
+ ADXL345_CHANNEL(0, X),
+ ADXL345_CHANNEL(1, Y),
+ ADXL345_CHANNEL(2, Z),
};
static int adxl345_read_raw(struct iio_dev *indio_dev,
@@ -67,7 +90,9 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct adxl345_data *data = iio_priv(indio_dev);
- __le16 regval;
+ __le16 accel;
+ long long samp_freq_nhz;
+ unsigned int regval;
int ret;
switch (mask) {
@@ -77,29 +102,117 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
* ADXL345_REG_DATA(X0/Y0/Z0) contain the least significant byte
* and ADXL345_REG_DATA(X0/Y0/Z0) + 1 the most significant byte
*/
- ret = regmap_bulk_read(data->regmap, chan->address, &regval,
- sizeof(regval));
+ ret = regmap_bulk_read(data->regmap,
+ ADXL345_REG_DATA_AXIS(chan->address),
+ &accel, sizeof(accel));
if (ret < 0)
return ret;
- *val = sign_extend32(le16_to_cpu(regval), 12);
+ *val = sign_extend32(le16_to_cpu(accel), 12);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
- *val2 = adxl345_uscale;
+ switch (data->type) {
+ case ADXL345:
+ *val2 = adxl345_uscale;
+ break;
+ case ADXL375:
+ *val2 = adxl375_uscale;
+ break;
+ }
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = regmap_read(data->regmap,
+ ADXL345_REG_OFS_AXIS(chan->address), &regval);
+ if (ret < 0)
+ return ret;
+ /*
+ * 8-bit resolution at +/- 2g, that is 4x accel data scale
+ * factor
+ */
+ *val = sign_extend32(regval, 7) * 4;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = regmap_read(data->regmap, ADXL345_REG_BW_RATE, &regval);
+ if (ret < 0)
+ return ret;
+
+ samp_freq_nhz = ADXL345_BASE_RATE_NANO_HZ <<
+ (regval & ADXL345_BW_RATE);
+ *val = div_s64_rem(samp_freq_nhz, NHZ_PER_HZ, val2);
+
+ return IIO_VAL_INT_PLUS_NANO;
}
return -EINVAL;
}
+static int adxl345_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct adxl345_data *data = iio_priv(indio_dev);
+ s64 n;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ /*
+ * 8-bit resolution at +/- 2g, that is 4x accel data scale
+ * factor
+ */
+ return regmap_write(data->regmap,
+ ADXL345_REG_OFS_AXIS(chan->address),
+ val / 4);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ n = div_s64(val * NHZ_PER_HZ + val2, ADXL345_BASE_RATE_NANO_HZ);
+
+ return regmap_update_bits(data->regmap, ADXL345_REG_BW_RATE,
+ ADXL345_BW_RATE,
+ clamp_val(ilog2(n), 0,
+ ADXL345_BW_RATE));
+ }
+
+ return -EINVAL;
+}
+
+static int adxl345_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+"0.09765625 0.1953125 0.390625 0.78125 1.5625 3.125 6.25 12.5 25 50 100 200 400 800 1600 3200"
+);
+
+static struct attribute *adxl345_attrs[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group adxl345_attrs_group = {
+ .attrs = adxl345_attrs,
+};
+
static const struct iio_info adxl345_info = {
+ .attrs = &adxl345_attrs_group,
.read_raw = adxl345_read_raw,
+ .write_raw = adxl345_write_raw,
+ .write_raw_get_fmt = adxl345_write_raw_get_fmt,
};
int adxl345_core_probe(struct device *dev, struct regmap *regmap,
- const char *name)
+ enum adxl345_device_type type, const char *name)
{
struct adxl345_data *data;
struct iio_dev *indio_dev;
@@ -125,6 +238,7 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
data = iio_priv(indio_dev);
dev_set_drvdata(dev, indio_dev);
data->regmap = regmap;
+ data->type = type;
/* Enable full-resolution mode */
data->data_range = ADXL345_DATA_FORMAT_FULL_RES;
diff --git a/drivers/iio/accel/adxl345_i2c.c b/drivers/iio/accel/adxl345_i2c.c
index 05e1ec49700c..785c89de91e7 100644
--- a/drivers/iio/accel/adxl345_i2c.c
+++ b/drivers/iio/accel/adxl345_i2c.c
@@ -34,7 +34,8 @@ static int adxl345_i2c_probe(struct i2c_client *client,
return PTR_ERR(regmap);
}
- return adxl345_core_probe(&client->dev, regmap, id ? id->name : NULL);
+ return adxl345_core_probe(&client->dev, regmap, id->driver_data,
+ id ? id->name : NULL);
}
static int adxl345_i2c_remove(struct i2c_client *client)
@@ -43,7 +44,8 @@ static int adxl345_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id adxl345_i2c_id[] = {
- { "adxl345", 0 },
+ { "adxl345", ADXL345 },
+ { "adxl375", ADXL375 },
{ }
};
@@ -51,6 +53,7 @@ MODULE_DEVICE_TABLE(i2c, adxl345_i2c_id);
static const struct of_device_id adxl345_of_match[] = {
{ .compatible = "adi,adxl345" },
+ { .compatible = "adi,adxl375" },
{ },
};
diff --git a/drivers/iio/accel/adxl345_spi.c b/drivers/iio/accel/adxl345_spi.c
index 6d658196f81c..67b7c66a8492 100644
--- a/drivers/iio/accel/adxl345_spi.c
+++ b/drivers/iio/accel/adxl345_spi.c
@@ -42,7 +42,7 @@ static int adxl345_spi_probe(struct spi_device *spi)
return PTR_ERR(regmap);
}
- return adxl345_core_probe(&spi->dev, regmap, id->name);
+ return adxl345_core_probe(&spi->dev, regmap, id->driver_data, id->name);
}
static int adxl345_spi_remove(struct spi_device *spi)
@@ -51,7 +51,8 @@ static int adxl345_spi_remove(struct spi_device *spi)
}
static const struct spi_device_id adxl345_spi_id[] = {
- { "adxl345", 0 },
+ { "adxl345", ADXL345 },
+ { "adxl375", ADXL375 },
{ }
};
@@ -59,6 +60,7 @@ MODULE_DEVICE_TABLE(spi, adxl345_spi_id);
static const struct of_device_id adxl345_of_match[] = {
{ .compatible = "adi,adxl345" },
+ { .compatible = "adi,adxl375" },
{ },
};
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 7e3d82cff3d5..421a0a8a1379 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1053,7 +1053,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
if (src < 0)
return IRQ_NONE;
- if (!(src & data->chip_info->enabled_events))
+ if (!(src & (data->chip_info->enabled_events | MMA8452_INT_DRDY)))
return IRQ_NONE;
if (src & MMA8452_INT_DRDY) {
@@ -1547,6 +1547,7 @@ static int mma8452_probe(struct i2c_client *client,
case FXLS8471_DEVICE_ID:
if (ret == data->chip_info->chip_id)
break;
+ /* else: fall through */
default:
return -ENODEV;
}
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 4dceb75e3586..4964561595f5 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -797,6 +797,7 @@ static int sca3000_write_raw(struct iio_dev *indio_dev,
mutex_lock(&st->lock);
ret = sca3000_write_3db_freq(st, val);
mutex_unlock(&st->lock);
+ return ret;
default:
return -EINVAL;
}
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 056dddb27236..2ca5d1f6ade0 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -14,8 +14,8 @@
#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
+#include <linux/property.h>
-#include <linux/iio/common/st_sensors.h>
#include <linux/iio/common/st_sensors_i2c.h>
#include "st_accel.h"
@@ -107,8 +107,8 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id st_accel_acpi_match[] = {
- {"SMO8840", LNG2DM},
- {"SMO8A90", LNG2DM},
+ {"SMO8840", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
+ {"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
{ },
};
MODULE_DEVICE_TABLE(acpi, st_accel_acpi_match);
@@ -117,33 +117,33 @@ MODULE_DEVICE_TABLE(acpi, st_accel_acpi_match);
#endif
static const struct i2c_device_id st_accel_id_table[] = {
- { LSM303DLH_ACCEL_DEV_NAME, LSM303DLH },
- { LSM303DLHC_ACCEL_DEV_NAME, LSM303DLHC },
- { LIS3DH_ACCEL_DEV_NAME, LIS3DH },
- { LSM330D_ACCEL_DEV_NAME, LSM330D },
- { LSM330DL_ACCEL_DEV_NAME, LSM330DL },
- { LSM330DLC_ACCEL_DEV_NAME, LSM330DLC },
- { LIS331DLH_ACCEL_DEV_NAME, LIS331DLH },
- { LSM303DL_ACCEL_DEV_NAME, LSM303DL },
- { LSM303DLM_ACCEL_DEV_NAME, LSM303DLM },
- { LSM330_ACCEL_DEV_NAME, LSM330 },
- { LSM303AGR_ACCEL_DEV_NAME, LSM303AGR },
- { LIS2DH12_ACCEL_DEV_NAME, LIS2DH12 },
- { LIS3L02DQ_ACCEL_DEV_NAME, LIS3L02DQ },
- { LNG2DM_ACCEL_DEV_NAME, LNG2DM },
- { H3LIS331DL_ACCEL_DEV_NAME, H3LIS331DL },
- { LIS331DL_ACCEL_DEV_NAME, LIS331DL },
- { LIS3LV02DL_ACCEL_DEV_NAME, LIS3LV02DL },
- { LIS2DW12_ACCEL_DEV_NAME, LIS2DW12 },
+ { LSM303DLH_ACCEL_DEV_NAME },
+ { LSM303DLHC_ACCEL_DEV_NAME },
+ { LIS3DH_ACCEL_DEV_NAME },
+ { LSM330D_ACCEL_DEV_NAME },
+ { LSM330DL_ACCEL_DEV_NAME },
+ { LSM330DLC_ACCEL_DEV_NAME },
+ { LIS331DLH_ACCEL_DEV_NAME },
+ { LSM303DL_ACCEL_DEV_NAME },
+ { LSM303DLM_ACCEL_DEV_NAME },
+ { LSM330_ACCEL_DEV_NAME },
+ { LSM303AGR_ACCEL_DEV_NAME },
+ { LIS2DH12_ACCEL_DEV_NAME },
+ { LIS3L02DQ_ACCEL_DEV_NAME },
+ { LNG2DM_ACCEL_DEV_NAME },
+ { H3LIS331DL_ACCEL_DEV_NAME },
+ { LIS331DL_ACCEL_DEV_NAME },
+ { LIS3LV02DL_ACCEL_DEV_NAME },
+ { LIS2DW12_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
-static int st_accel_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int st_accel_i2c_probe(struct i2c_client *client)
{
struct iio_dev *indio_dev;
struct st_sensor_data *adata;
+ const char *match;
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*adata));
@@ -152,19 +152,9 @@ static int st_accel_i2c_probe(struct i2c_client *client,
adata = iio_priv(indio_dev);
- if (client->dev.of_node) {
- st_sensors_of_name_probe(&client->dev, st_accel_of_match,
- client->name, sizeof(client->name));
- } else if (ACPI_HANDLE(&client->dev)) {
- ret = st_sensors_match_acpi_device(&client->dev);
- if ((ret < 0) || (ret >= ST_ACCEL_MAX))
- return -ENODEV;
-
- strlcpy(client->name, st_accel_id_table[ret].name,
- sizeof(client->name));
- } else if (!id)
- return -ENODEV;
-
+ match = device_get_match_data(&client->dev);
+ if (match)
+ strlcpy(client->name, match, sizeof(client->name));
st_sensors_i2c_configure(indio_dev, client, adata);
@@ -188,7 +178,7 @@ static struct i2c_driver st_accel_driver = {
.of_match_table = of_match_ptr(st_accel_of_match),
.acpi_match_table = ACPI_PTR(st_accel_acpi_match),
},
- .probe = st_accel_i2c_probe,
+ .probe_new = st_accel_i2c_probe,
.remove = st_accel_i2c_remove,
.id_table = st_accel_id_table,
};
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 9da79070357c..4a754921fb6f 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -157,7 +157,6 @@ config AT91_SAMA5D2_ADC
tristate "Atmel AT91 SAMA5D2 ADC"
depends on ARCH_AT91 || COMPILE_TEST
depends on HAS_IOMEM
- depends on HAS_DMA
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
@@ -621,6 +620,16 @@ config ROCKCHIP_SARADC
To compile this driver as a module, choose M here: the
module will be called rockchip_saradc.
+config SC27XX_ADC
+ tristate "Spreadtrum SC27xx series PMICs ADC"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ Say yes here to build support for the integrated ADC inside the
+ Spreadtrum SC27xx series PMICs.
+
+ This driver can also be built as a module. If so, the module
+ will be called sc27xx_adc.
+
config SPEAR_ADC
tristate "ST SPEAr ADC"
depends on PLAT_SPEAR || COMPILE_TEST
@@ -647,7 +656,6 @@ config SD_ADC_MODULATOR
config STM32_ADC_CORE
tristate "STMicroelectronics STM32 adc core"
depends on ARCH_STM32 || COMPILE_TEST
- depends on HAS_DMA
depends on OF
depends on REGULATOR
select IIO_BUFFER
@@ -717,6 +725,7 @@ config SUN4I_GPADC
depends on IIO
depends on MFD_SUN4I_GPADC || MACH_SUN8I
depends on THERMAL || !THERMAL_OF
+ select REGMAP_IRQ
help
Say yes here to build support for Allwinner (A10, A13 and A31) SoCs
GPADC. This ADC provides 4 channels which can be used as an ADC or as
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 28a9423997f3..03db7b578f9c 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o
obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
+obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index cf1b048b0665..fc9510716ac7 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -209,6 +209,7 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{
int ret;
+ unsigned long timeout;
ret = ad_sigma_delta_set_channel(sigma_delta, channel);
if (ret)
@@ -224,8 +225,8 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
sigma_delta->irq_dis = false;
enable_irq(sigma_delta->spi->irq);
- ret = wait_for_completion_timeout(&sigma_delta->completion, 2*HZ);
- if (ret == 0) {
+ timeout = wait_for_completion_timeout(&sigma_delta->completion, 2 * HZ);
+ if (timeout == 0) {
sigma_delta->irq_dis = true;
disable_irq_nosync(sigma_delta->spi->irq);
ret = -EIO;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 8729d6524b4d..d5ea84cf6460 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -102,14 +102,26 @@
#define AT91_SAMA5D2_LCDR 0x20
/* Interrupt Enable Register */
#define AT91_SAMA5D2_IER 0x24
+/* Interrupt Enable Register - TS X measurement ready */
+#define AT91_SAMA5D2_IER_XRDY BIT(20)
+/* Interrupt Enable Register - TS Y measurement ready */
+#define AT91_SAMA5D2_IER_YRDY BIT(21)
+/* Interrupt Enable Register - TS pressure measurement ready */
+#define AT91_SAMA5D2_IER_PRDY BIT(22)
/* Interrupt Enable Register - general overrun error */
#define AT91_SAMA5D2_IER_GOVRE BIT(25)
+/* Interrupt Enable Register - Pen detect */
+#define AT91_SAMA5D2_IER_PEN BIT(29)
+/* Interrupt Enable Register - No pen detect */
+#define AT91_SAMA5D2_IER_NOPEN BIT(30)
/* Interrupt Disable Register */
#define AT91_SAMA5D2_IDR 0x28
/* Interrupt Mask Register */
#define AT91_SAMA5D2_IMR 0x2c
/* Interrupt Status Register */
#define AT91_SAMA5D2_ISR 0x30
+/* Interrupt Status Register - Pen touching sense status */
+#define AT91_SAMA5D2_ISR_PENS BIT(31)
/* Last Channel Trigger Mode Register */
#define AT91_SAMA5D2_LCTMR 0x34
/* Last Channel Compare Window Register */
@@ -118,6 +130,15 @@
#define AT91_SAMA5D2_OVER 0x3c
/* Extended Mode Register */
#define AT91_SAMA5D2_EMR 0x40
+/* Extended Mode Register - Oversampling rate */
+#define AT91_SAMA5D2_EMR_OSR(V) ((V) << 16)
+#define AT91_SAMA5D2_EMR_OSR_MASK GENMASK(17, 16)
+#define AT91_SAMA5D2_EMR_OSR_1SAMPLES 0
+#define AT91_SAMA5D2_EMR_OSR_4SAMPLES 1
+#define AT91_SAMA5D2_EMR_OSR_16SAMPLES 2
+
+/* Extended Mode Register - Averaging on single trigger event */
+#define AT91_SAMA5D2_EMR_ASTE(V) ((V) << 20)
/* Compare Window Register */
#define AT91_SAMA5D2_CWR 0x44
/* Channel Gain Register */
@@ -131,8 +152,38 @@
#define AT91_SAMA5D2_CDR0 0x50
/* Analog Control Register */
#define AT91_SAMA5D2_ACR 0x94
+/* Analog Control Register - Pen detect sensitivity mask */
+#define AT91_SAMA5D2_ACR_PENDETSENS_MASK GENMASK(1, 0)
+
/* Touchscreen Mode Register */
#define AT91_SAMA5D2_TSMR 0xb0
+/* Touchscreen Mode Register - No touch mode */
+#define AT91_SAMA5D2_TSMR_TSMODE_NONE 0
+/* Touchscreen Mode Register - 4 wire screen, no pressure measurement */
+#define AT91_SAMA5D2_TSMR_TSMODE_4WIRE_NO_PRESS 1
+/* Touchscreen Mode Register - 4 wire screen, pressure measurement */
+#define AT91_SAMA5D2_TSMR_TSMODE_4WIRE_PRESS 2
+/* Touchscreen Mode Register - 5 wire screen */
+#define AT91_SAMA5D2_TSMR_TSMODE_5WIRE 3
+/* Touchscreen Mode Register - Average samples mask */
+#define AT91_SAMA5D2_TSMR_TSAV_MASK GENMASK(5, 4)
+/* Touchscreen Mode Register - Average samples */
+#define AT91_SAMA5D2_TSMR_TSAV(x) ((x) << 4)
+/* Touchscreen Mode Register - Touch/trigger frequency ratio mask */
+#define AT91_SAMA5D2_TSMR_TSFREQ_MASK GENMASK(11, 8)
+/* Touchscreen Mode Register - Touch/trigger frequency ratio */
+#define AT91_SAMA5D2_TSMR_TSFREQ(x) ((x) << 8)
+/* Touchscreen Mode Register - Pen Debounce Time mask */
+#define AT91_SAMA5D2_TSMR_PENDBC_MASK GENMASK(31, 28)
+/* Touchscreen Mode Register - Pen Debounce Time */
+#define AT91_SAMA5D2_TSMR_PENDBC(x) ((x) << 28)
+/* Touchscreen Mode Register - No DMA for touch measurements */
+#define AT91_SAMA5D2_TSMR_NOTSDMA BIT(22)
+/* Touchscreen Mode Register - Disable pen detection */
+#define AT91_SAMA5D2_TSMR_PENDET_DIS (0 << 24)
+/* Touchscreen Mode Register - Enable pen detection */
+#define AT91_SAMA5D2_TSMR_PENDET_ENA BIT(24)
+
/* Touchscreen X Position Register */
#define AT91_SAMA5D2_XPOSR 0xb4
/* Touchscreen Y Position Register */
@@ -151,6 +202,12 @@
#define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL 2
/* Trigger Mode external trigger any edge */
#define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY 3
+/* Trigger Mode internal periodic */
+#define AT91_SAMA5D2_TRGR_TRGMOD_PERIODIC 5
+/* Trigger Mode - trigger period mask */
+#define AT91_SAMA5D2_TRGR_TRGPER_MASK GENMASK(31, 16)
+/* Trigger Mode - trigger period */
+#define AT91_SAMA5D2_TRGR_TRGPER(x) ((x) << 16)
/* Correction Select Register */
#define AT91_SAMA5D2_COSR 0xd0
@@ -169,6 +226,22 @@
#define AT91_SAMA5D2_SINGLE_CHAN_CNT 12
#define AT91_SAMA5D2_DIFF_CHAN_CNT 6
+#define AT91_SAMA5D2_TIMESTAMP_CHAN_IDX (AT91_SAMA5D2_SINGLE_CHAN_CNT + \
+ AT91_SAMA5D2_DIFF_CHAN_CNT + 1)
+
+#define AT91_SAMA5D2_TOUCH_X_CHAN_IDX (AT91_SAMA5D2_SINGLE_CHAN_CNT + \
+ AT91_SAMA5D2_DIFF_CHAN_CNT * 2)
+#define AT91_SAMA5D2_TOUCH_Y_CHAN_IDX (AT91_SAMA5D2_TOUCH_X_CHAN_IDX + 1)
+#define AT91_SAMA5D2_TOUCH_P_CHAN_IDX (AT91_SAMA5D2_TOUCH_Y_CHAN_IDX + 1)
+#define AT91_SAMA5D2_MAX_CHAN_IDX AT91_SAMA5D2_TOUCH_P_CHAN_IDX
+
+#define AT91_SAMA5D2_TOUCH_SAMPLE_PERIOD_US 2000 /* 2ms */
+#define AT91_SAMA5D2_TOUCH_PEN_DETECT_DEBOUNCE_US 200
+
+#define AT91_SAMA5D2_XYZ_MASK GENMASK(11, 0)
+
+#define AT91_SAMA5D2_MAX_POS_BITS 12
+
/*
* Maximum number of bytes to hold conversion from all channels
* without the timestamp.
@@ -184,6 +257,11 @@
#define AT91_HWFIFO_MAX_SIZE_STR "128"
#define AT91_HWFIFO_MAX_SIZE 128
+/* Possible values for oversampling ratio */
+#define AT91_OSR_1SAMPLES 1
+#define AT91_OSR_4SAMPLES 4
+#define AT91_OSR_16SAMPLES 16
+
#define AT91_SAMA5D2_CHAN_SINGLE(num, addr) \
{ \
.type = IIO_VOLTAGE, \
@@ -192,12 +270,13 @@
.scan_index = num, \
.scan_type = { \
.sign = 'u', \
- .realbits = 12, \
+ .realbits = 14, \
.storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.datasheet_name = "CH"#num, \
.indexed = 1, \
}
@@ -212,16 +291,50 @@
.scan_index = num + AT91_SAMA5D2_SINGLE_CHAN_CNT, \
.scan_type = { \
.sign = 's', \
- .realbits = 12, \
+ .realbits = 14, \
.storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.datasheet_name = "CH"#num"-CH"#num2, \
.indexed = 1, \
}
+#define AT91_SAMA5D2_CHAN_TOUCH(num, name, mod) \
+ { \
+ .type = IIO_POSITIONRELATIVE, \
+ .modified = 1, \
+ .channel = num, \
+ .channel2 = mod, \
+ .scan_index = num, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ }, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .datasheet_name = name, \
+ }
+#define AT91_SAMA5D2_CHAN_PRESSURE(num, name) \
+ { \
+ .type = IIO_PRESSURE, \
+ .channel = num, \
+ .scan_index = num, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ }, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .datasheet_name = name, \
+ }
+
#define at91_adc_readl(st, reg) readl_relaxed(st->base + reg)
#define at91_adc_writel(st, reg, val) writel_relaxed(val, st->base + reg)
@@ -260,6 +373,22 @@ struct at91_adc_dma {
s64 dma_ts;
};
+/**
+ * at91_adc_touch - at91-sama5d2 touchscreen information struct
+ * @sample_period_val: the value for periodic trigger interval
+ * @touching: is the pen touching the screen or not
+ * @x_pos: temporary placeholder for pressure computation
+ * @channels_bitmask: bitmask with the touchscreen channels enabled
+ * @workq: workqueue for buffer data pushing
+ */
+struct at91_adc_touch {
+ u16 sample_period_val;
+ bool touching;
+ u16 x_pos;
+ unsigned long channels_bitmask;
+ struct work_struct workq;
+};
+
struct at91_adc_state {
void __iomem *base;
int irq;
@@ -267,14 +396,17 @@ struct at91_adc_state {
struct regulator *reg;
struct regulator *vref;
int vref_uv;
+ unsigned int current_sample_rate;
struct iio_trigger *trig;
const struct at91_adc_trigger *selected_trig;
const struct iio_chan_spec *chan;
bool conversion_done;
u32 conversion_value;
+ unsigned int oversampling_ratio;
struct at91_adc_soc_info soc_info;
wait_queue_head_t wq_data_available;
struct at91_adc_dma dma_st;
+ struct at91_adc_touch touch_st;
u16 buffer[AT91_BUFFER_MAX_HWORDS];
/*
* lock to prevent concurrent 'single conversion' requests through
@@ -329,8 +461,10 @@ static const struct iio_chan_spec at91_adc_channels[] = {
AT91_SAMA5D2_CHAN_DIFF(6, 7, 0x68),
AT91_SAMA5D2_CHAN_DIFF(8, 9, 0x70),
AT91_SAMA5D2_CHAN_DIFF(10, 11, 0x78),
- IIO_CHAN_SOFT_TIMESTAMP(AT91_SAMA5D2_SINGLE_CHAN_CNT
- + AT91_SAMA5D2_DIFF_CHAN_CNT + 1),
+ IIO_CHAN_SOFT_TIMESTAMP(AT91_SAMA5D2_TIMESTAMP_CHAN_IDX),
+ AT91_SAMA5D2_CHAN_TOUCH(AT91_SAMA5D2_TOUCH_X_CHAN_IDX, "x", IIO_MOD_X),
+ AT91_SAMA5D2_CHAN_TOUCH(AT91_SAMA5D2_TOUCH_Y_CHAN_IDX, "y", IIO_MOD_Y),
+ AT91_SAMA5D2_CHAN_PRESSURE(AT91_SAMA5D2_TOUCH_P_CHAN_IDX, "pressure"),
};
static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan)
@@ -354,6 +488,231 @@ at91_adc_chan_get(struct iio_dev *indio_dev, int chan)
return indio_dev->channels + index;
}
+static inline int at91_adc_of_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ return at91_adc_chan_xlate(indio_dev, iiospec->args[0]);
+}
+
+static void at91_adc_config_emr(struct at91_adc_state *st)
+{
+ /* configure the extended mode register */
+ unsigned int emr = at91_adc_readl(st, AT91_SAMA5D2_EMR);
+
+ /* select oversampling per single trigger event */
+ emr |= AT91_SAMA5D2_EMR_ASTE(1);
+
+ /* delete leftover content if it's the case */
+ emr &= ~AT91_SAMA5D2_EMR_OSR_MASK;
+
+ /* select oversampling ratio from configuration */
+ switch (st->oversampling_ratio) {
+ case AT91_OSR_1SAMPLES:
+ emr |= AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_1SAMPLES) &
+ AT91_SAMA5D2_EMR_OSR_MASK;
+ break;
+ case AT91_OSR_4SAMPLES:
+ emr |= AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_4SAMPLES) &
+ AT91_SAMA5D2_EMR_OSR_MASK;
+ break;
+ case AT91_OSR_16SAMPLES:
+ emr |= AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_16SAMPLES) &
+ AT91_SAMA5D2_EMR_OSR_MASK;
+ break;
+ }
+
+ at91_adc_writel(st, AT91_SAMA5D2_EMR, emr);
+}
+
+static int at91_adc_adjust_val_osr(struct at91_adc_state *st, int *val)
+{
+ if (st->oversampling_ratio == AT91_OSR_1SAMPLES) {
+ /*
+ * in this case we only have 12 bits of real data, but channel
+ * is registered as 14 bits, so shift left two bits
+ */
+ *val <<= 2;
+ } else if (st->oversampling_ratio == AT91_OSR_4SAMPLES) {
+ /*
+ * in this case we have 13 bits of real data, but channel
+ * is registered as 14 bits, so left shift one bit
+ */
+ *val <<= 1;
+ }
+
+ return IIO_VAL_INT;
+}
+
+static void at91_adc_adjust_val_osr_array(struct at91_adc_state *st, void *buf,
+ int len)
+{
+ int i = 0, val;
+ u16 *buf_u16 = (u16 *) buf;
+
+ /*
+ * We are converting each two bytes (each sample).
+ * First convert the byte based array to u16, and convert each sample
+ * separately.
+ * Each value is two bytes in an array of chars, so to not shift
+ * more than we need, save the value separately.
+ * len is in bytes, so divide by two to get number of samples.
+ */
+ while (i < len / 2) {
+ val = buf_u16[i];
+ at91_adc_adjust_val_osr(st, &val);
+ buf_u16[i] = val;
+ i++;
+ }
+}
+
+static int at91_adc_configure_touch(struct at91_adc_state *st, bool state)
+{
+ u32 clk_khz = st->current_sample_rate / 1000;
+ int i = 0;
+ u16 pendbc;
+ u32 tsmr, acr;
+
+ if (!state) {
+ /* disabling touch IRQs and setting mode to no touch enabled */
+ at91_adc_writel(st, AT91_SAMA5D2_IDR,
+ AT91_SAMA5D2_IER_PEN | AT91_SAMA5D2_IER_NOPEN);
+ at91_adc_writel(st, AT91_SAMA5D2_TSMR, 0);
+ return 0;
+ }
+ /*
+ * debounce time is in microseconds, we need it in milliseconds to
+ * multiply with kilohertz, so, divide by 1000, but after the multiply.
+ * round up to make sure pendbc is at least 1
+ */
+ pendbc = round_up(AT91_SAMA5D2_TOUCH_PEN_DETECT_DEBOUNCE_US *
+ clk_khz / 1000, 1);
+
+ /* get the required exponent */
+ while (pendbc >> i++)
+ ;
+
+ pendbc = i;
+
+ tsmr = AT91_SAMA5D2_TSMR_TSMODE_4WIRE_PRESS;
+
+ tsmr |= AT91_SAMA5D2_TSMR_TSAV(2) & AT91_SAMA5D2_TSMR_TSAV_MASK;
+ tsmr |= AT91_SAMA5D2_TSMR_PENDBC(pendbc) &
+ AT91_SAMA5D2_TSMR_PENDBC_MASK;
+ tsmr |= AT91_SAMA5D2_TSMR_NOTSDMA;
+ tsmr |= AT91_SAMA5D2_TSMR_PENDET_ENA;
+ tsmr |= AT91_SAMA5D2_TSMR_TSFREQ(2) & AT91_SAMA5D2_TSMR_TSFREQ_MASK;
+
+ at91_adc_writel(st, AT91_SAMA5D2_TSMR, tsmr);
+
+ acr = at91_adc_readl(st, AT91_SAMA5D2_ACR);
+ acr &= ~AT91_SAMA5D2_ACR_PENDETSENS_MASK;
+ acr |= 0x02 & AT91_SAMA5D2_ACR_PENDETSENS_MASK;
+ at91_adc_writel(st, AT91_SAMA5D2_ACR, acr);
+
+ /* Sample Period Time = (TRGPER + 1) / ADCClock */
+ st->touch_st.sample_period_val =
+ round_up((AT91_SAMA5D2_TOUCH_SAMPLE_PERIOD_US *
+ clk_khz / 1000) - 1, 1);
+ /* enable pen detect IRQ */
+ at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_PEN);
+
+ return 0;
+}
+
+static u16 at91_adc_touch_pos(struct at91_adc_state *st, int reg)
+{
+ u32 val;
+ u32 scale, result, pos;
+
+ /*
+ * to obtain the actual position we must divide by scale
+ * and multiply with max, where
+ * max = 2^AT91_SAMA5D2_MAX_POS_BITS - 1
+ */
+ /* first half of register is the x or y, second half is the scale */
+ val = at91_adc_readl(st, reg);
+ if (!val)
+ dev_dbg(&iio_priv_to_dev(st)->dev, "pos is 0\n");
+
+ pos = val & AT91_SAMA5D2_XYZ_MASK;
+ result = (pos << AT91_SAMA5D2_MAX_POS_BITS) - pos;
+ scale = (val >> 16) & AT91_SAMA5D2_XYZ_MASK;
+ if (scale == 0) {
+ dev_err(&iio_priv_to_dev(st)->dev, "scale is 0\n");
+ return 0;
+ }
+ result /= scale;
+
+ return result;
+}
+
+static u16 at91_adc_touch_x_pos(struct at91_adc_state *st)
+{
+ st->touch_st.x_pos = at91_adc_touch_pos(st, AT91_SAMA5D2_XPOSR);
+ return st->touch_st.x_pos;
+}
+
+static u16 at91_adc_touch_y_pos(struct at91_adc_state *st)
+{
+ return at91_adc_touch_pos(st, AT91_SAMA5D2_YPOSR);
+}
+
+static u16 at91_adc_touch_pressure(struct at91_adc_state *st)
+{
+ u32 val;
+ u32 z1, z2;
+ u32 pres;
+ u32 rxp = 1;
+ u32 factor = 1000;
+
+ /* calculate the pressure */
+ val = at91_adc_readl(st, AT91_SAMA5D2_PRESSR);
+ z1 = val & AT91_SAMA5D2_XYZ_MASK;
+ z2 = (val >> 16) & AT91_SAMA5D2_XYZ_MASK;
+
+ if (z1 != 0)
+ pres = rxp * (st->touch_st.x_pos * factor / 1024) *
+ (z2 * factor / z1 - factor) /
+ factor;
+ else
+ pres = 0xFFFF; /* no pen contact */
+
+ /*
+ * The pressure from device grows down, minimum is 0xFFFF, maximum 0x0.
+ * We compute it this way, but let's return it in the expected way,
+ * growing from 0 to 0xFFFF.
+ */
+ return 0xFFFF - pres;
+}
+
+static int at91_adc_read_position(struct at91_adc_state *st, int chan, u16 *val)
+{
+ *val = 0;
+ if (!st->touch_st.touching)
+ return -ENODATA;
+ if (chan == AT91_SAMA5D2_TOUCH_X_CHAN_IDX)
+ *val = at91_adc_touch_x_pos(st);
+ else if (chan == AT91_SAMA5D2_TOUCH_Y_CHAN_IDX)
+ *val = at91_adc_touch_y_pos(st);
+ else
+ return -ENODATA;
+
+ return IIO_VAL_INT;
+}
+
+static int at91_adc_read_pressure(struct at91_adc_state *st, int chan, u16 *val)
+{
+ *val = 0;
+ if (!st->touch_st.touching)
+ return -ENODATA;
+ if (chan == AT91_SAMA5D2_TOUCH_P_CHAN_IDX)
+ *val = at91_adc_touch_pressure(st);
+ else
+ return -ENODATA;
+
+ return IIO_VAL_INT;
+}
+
static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
{
struct iio_dev *indio = iio_trigger_get_drvdata(trig);
@@ -375,6 +734,11 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
if (!chan)
continue;
+ /* these channel types cannot be handled by this trigger */
+ if (chan->type == IIO_POSITIONRELATIVE ||
+ chan->type == IIO_PRESSURE)
+ continue;
+
if (state) {
at91_adc_writel(st, AT91_SAMA5D2_CHER,
BIT(chan->channel));
@@ -520,7 +884,20 @@ static int at91_adc_dma_start(struct iio_dev *indio_dev)
static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
{
int ret;
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ /* check if we are enabling triggered buffer or the touchscreen */
+ if (bitmap_subset(indio_dev->active_scan_mask,
+ &st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
+ /* touchscreen enabling */
+ return at91_adc_configure_touch(st, true);
+ }
+ /* if we are not in triggered mode, we cannot enable the buffer. */
+ if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
+ return -EINVAL;
+
+ /* we continue with the triggered buffer */
ret = at91_adc_dma_start(indio_dev);
if (ret) {
dev_err(&indio_dev->dev, "buffer postenable failed\n");
@@ -536,6 +913,18 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
int ret;
u8 bit;
+ /* check if we are disabling triggered buffer or the touchscreen */
+ if (bitmap_subset(indio_dev->active_scan_mask,
+ &st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
+ /* touchscreen disable */
+ return at91_adc_configure_touch(st, false);
+ }
+ /* if we are not in triggered mode, nothing to do here */
+ if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
+ return -EINVAL;
+
+ /* continue with the triggered buffer */
ret = iio_triggered_buffer_predisable(indio_dev);
if (ret < 0)
dev_err(&indio_dev->dev, "buffer predisable failed\n");
@@ -558,6 +947,10 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
if (!chan)
continue;
+ /* these channel types are virtual, no need to do anything */
+ if (chan->type == IIO_POSITIONRELATIVE ||
+ chan->type == IIO_PRESSURE)
+ continue;
if (st->dma_st.dma_chan)
at91_adc_readl(st, chan->address);
}
@@ -613,6 +1006,7 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
{
struct at91_adc_state *st = iio_priv(indio_dev);
int i = 0;
+ int val;
u8 bit;
for_each_set_bit(bit, indio_dev->active_scan_mask,
@@ -622,7 +1016,24 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
if (!chan)
continue;
- st->buffer[i] = at91_adc_readl(st, chan->address);
+ /*
+ * Our external trigger only supports the voltage channels.
+ * In case someone requested a different type of channel
+ * just put zeroes to buffer.
+ * This should not happen because we check the scan mode
+ * and scan mask when we enable the buffer, and we don't allow
+ * the buffer to start with a mixed mask (voltage and something
+ * else).
+ * Thus, emit a warning.
+ */
+ if (chan->type == IIO_VOLTAGE) {
+ val = at91_adc_readl(st, chan->address);
+ at91_adc_adjust_val_osr(st, &val);
+ st->buffer[i] = val;
+ } else {
+ st->buffer[i] = 0;
+ WARN(true, "This trigger cannot handle this type of channel");
+ }
i++;
}
iio_push_to_buffers_with_timestamp(indio_dev, st->buffer,
@@ -654,6 +1065,14 @@ static void at91_adc_trigger_handler_dma(struct iio_dev *indio_dev)
interval = div_s64((ns - st->dma_st.dma_ts), sample_count);
while (transferred_len >= sample_size) {
+ /*
+ * for all the values in the current sample,
+ * adjust the values inside the buffer for oversampling
+ */
+ at91_adc_adjust_val_osr_array(st,
+ &st->dma_st.rx_buf[st->dma_st.buf_idx],
+ sample_size);
+
iio_push_to_buffers_with_timestamp(indio_dev,
(st->dma_st.rx_buf + st->dma_st.buf_idx),
(st->dma_st.dma_ts + interval * sample_index));
@@ -688,9 +1107,20 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
static int at91_adc_buffer_init(struct iio_dev *indio)
{
- return devm_iio_triggered_buffer_setup(&indio->dev, indio,
+ struct at91_adc_state *st = iio_priv(indio);
+
+ if (st->selected_trig->hw_trig) {
+ return devm_iio_triggered_buffer_setup(&indio->dev, indio,
&iio_pollfunc_store_time,
&at91_adc_trigger_handler, &at91_buffer_setup_ops);
+ }
+ /*
+ * we need to prepare the buffer ops in case we will get
+ * another buffer attached (like a callback buffer for the touchscreen)
+ */
+ indio->setup_ops = &at91_buffer_setup_ops;
+
+ return 0;
}
static unsigned at91_adc_startup_time(unsigned startup_time_min,
@@ -736,19 +1166,83 @@ static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq)
dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n",
freq, startup, prescal);
+ st->current_sample_rate = freq;
}
-static unsigned at91_adc_get_sample_freq(struct at91_adc_state *st)
+static inline unsigned at91_adc_get_sample_freq(struct at91_adc_state *st)
{
- unsigned f_adc, f_per = clk_get_rate(st->per_clk);
- unsigned mr, prescal;
+ return st->current_sample_rate;
+}
- mr = at91_adc_readl(st, AT91_SAMA5D2_MR);
- prescal = (mr >> AT91_SAMA5D2_MR_PRESCAL_OFFSET)
- & AT91_SAMA5D2_MR_PRESCAL_MAX;
- f_adc = f_per / (2 * (prescal + 1));
+static void at91_adc_touch_data_handler(struct iio_dev *indio_dev)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ u8 bit;
+ u16 val;
+ int i = 0;
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1) {
+ struct iio_chan_spec const *chan =
+ at91_adc_chan_get(indio_dev, bit);
+
+ if (chan->type == IIO_POSITIONRELATIVE)
+ at91_adc_read_position(st, chan->channel, &val);
+ else if (chan->type == IIO_PRESSURE)
+ at91_adc_read_pressure(st, chan->channel, &val);
+ else
+ continue;
+ st->buffer[i] = val;
+ i++;
+ }
+ /*
+ * Schedule work to push to buffers.
+ * This is intended to push to the callback buffer that another driver
+ * registered. We are still in a handler from our IRQ. If we push
+ * directly, it means the other driver has it's callback called
+ * from our IRQ context. Which is something we better avoid.
+ * Let's schedule it after our IRQ is completed.
+ */
+ schedule_work(&st->touch_st.workq);
+}
+
+static void at91_adc_pen_detect_interrupt(struct at91_adc_state *st)
+{
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, AT91_SAMA5D2_IER_PEN);
+ at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_NOPEN |
+ AT91_SAMA5D2_IER_XRDY | AT91_SAMA5D2_IER_YRDY |
+ AT91_SAMA5D2_IER_PRDY);
+ at91_adc_writel(st, AT91_SAMA5D2_TRGR,
+ AT91_SAMA5D2_TRGR_TRGMOD_PERIODIC |
+ AT91_SAMA5D2_TRGR_TRGPER(st->touch_st.sample_period_val));
+ st->touch_st.touching = true;
+}
+
+static void at91_adc_no_pen_detect_interrupt(struct at91_adc_state *st)
+{
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+
+ at91_adc_writel(st, AT91_SAMA5D2_TRGR,
+ AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER);
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, AT91_SAMA5D2_IER_NOPEN |
+ AT91_SAMA5D2_IER_XRDY | AT91_SAMA5D2_IER_YRDY |
+ AT91_SAMA5D2_IER_PRDY);
+ st->touch_st.touching = false;
+
+ at91_adc_touch_data_handler(indio_dev);
+
+ at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_PEN);
+}
+
+static void at91_adc_workq_handler(struct work_struct *workq)
+{
+ struct at91_adc_touch *touch_st = container_of(workq,
+ struct at91_adc_touch, workq);
+ struct at91_adc_state *st = container_of(touch_st,
+ struct at91_adc_state, touch_st);
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
- return f_adc;
+ iio_push_to_buffers(indio_dev, st->buffer);
}
static irqreturn_t at91_adc_interrupt(int irq, void *private)
@@ -757,17 +1251,39 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
struct at91_adc_state *st = iio_priv(indio);
u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR);
u32 imr = at91_adc_readl(st, AT91_SAMA5D2_IMR);
+ u32 rdy_mask = AT91_SAMA5D2_IER_XRDY | AT91_SAMA5D2_IER_YRDY |
+ AT91_SAMA5D2_IER_PRDY;
if (!(status & imr))
return IRQ_NONE;
-
- if (iio_buffer_enabled(indio) && !st->dma_st.dma_chan) {
+ if (status & AT91_SAMA5D2_IER_PEN) {
+ /* pen detected IRQ */
+ at91_adc_pen_detect_interrupt(st);
+ } else if ((status & AT91_SAMA5D2_IER_NOPEN)) {
+ /* nopen detected IRQ */
+ at91_adc_no_pen_detect_interrupt(st);
+ } else if ((status & AT91_SAMA5D2_ISR_PENS) &&
+ ((status & rdy_mask) == rdy_mask)) {
+ /* periodic trigger IRQ - during pen sense */
+ at91_adc_touch_data_handler(indio);
+ } else if (status & AT91_SAMA5D2_ISR_PENS) {
+ /*
+ * touching, but the measurements are not ready yet.
+ * read and ignore.
+ */
+ status = at91_adc_readl(st, AT91_SAMA5D2_XPOSR);
+ status = at91_adc_readl(st, AT91_SAMA5D2_YPOSR);
+ status = at91_adc_readl(st, AT91_SAMA5D2_PRESSR);
+ } else if (iio_buffer_enabled(indio) && !st->dma_st.dma_chan) {
+ /* triggered buffer without DMA */
disable_irq_nosync(irq);
iio_trigger_poll(indio->trig);
} else if (iio_buffer_enabled(indio) && st->dma_st.dma_chan) {
+ /* triggered buffer with DMA - should not happen */
disable_irq_nosync(irq);
WARN(true, "Unexpected irq occurred\n");
} else if (!iio_buffer_enabled(indio)) {
+ /* software requested conversion */
st->conversion_value = at91_adc_readl(st, st->chan->address);
st->conversion_done = true;
wake_up_interruptible(&st->wq_data_available);
@@ -775,58 +1291,100 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
return IRQ_HANDLED;
}
-static int at91_adc_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
+static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
{
struct at91_adc_state *st = iio_priv(indio_dev);
u32 cor = 0;
+ u16 tmp_val;
int ret;
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- /* we cannot use software trigger if hw trigger enabled */
+ /*
+ * Keep in mind that we cannot use software trigger or touchscreen
+ * if external trigger is enabled
+ */
+ if (chan->type == IIO_POSITIONRELATIVE) {
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
mutex_lock(&st->lock);
- st->chan = chan;
+ ret = at91_adc_read_position(st, chan->channel,
+ &tmp_val);
+ *val = tmp_val;
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
- if (chan->differential)
- cor = (BIT(chan->channel) | BIT(chan->channel2)) <<
- AT91_SAMA5D2_COR_DIFF_OFFSET;
-
- at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
- at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
- at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel));
- at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
-
- ret = wait_event_interruptible_timeout(st->wq_data_available,
- st->conversion_done,
- msecs_to_jiffies(1000));
- if (ret == 0)
- ret = -ETIMEDOUT;
-
- if (ret > 0) {
- *val = st->conversion_value;
- if (chan->scan_type.sign == 's')
- *val = sign_extend32(*val, 11);
- ret = IIO_VAL_INT;
- st->conversion_done = false;
- }
+ return at91_adc_adjust_val_osr(st, val);
+ }
+ if (chan->type == IIO_PRESSURE) {
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
- at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel));
- at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+ ret = at91_adc_read_pressure(st, chan->channel,
+ &tmp_val);
+ *val = tmp_val;
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
- /* Needed to ACK the DRDY interruption */
- at91_adc_readl(st, AT91_SAMA5D2_LCDR);
+ return at91_adc_adjust_val_osr(st, val);
+ }
- mutex_unlock(&st->lock);
+ /* in this case we have a voltage channel */
- iio_device_release_direct_mode(indio_dev);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
return ret;
+ mutex_lock(&st->lock);
+
+ st->chan = chan;
+
+ if (chan->differential)
+ cor = (BIT(chan->channel) | BIT(chan->channel2)) <<
+ AT91_SAMA5D2_COR_DIFF_OFFSET;
+
+ at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
+ at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
+
+ ret = wait_event_interruptible_timeout(st->wq_data_available,
+ st->conversion_done,
+ msecs_to_jiffies(1000));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+ if (ret > 0) {
+ *val = st->conversion_value;
+ ret = at91_adc_adjust_val_osr(st, val);
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(*val, 11);
+ st->conversion_done = false;
+ }
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+
+ /* Needed to ACK the DRDY interruption */
+ at91_adc_readl(st, AT91_SAMA5D2_LCDR);
+
+ mutex_unlock(&st->lock);
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+}
+
+static int at91_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return at91_adc_read_info_raw(indio_dev, chan, val);
case IIO_CHAN_INFO_SCALE:
*val = st->vref_uv / 1000;
if (chan->differential)
@@ -838,6 +1396,10 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
*val = at91_adc_get_sample_freq(st);
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = st->oversampling_ratio;
+ return IIO_VAL_INT;
+
default:
return -EINVAL;
}
@@ -849,16 +1411,28 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
{
struct at91_adc_state *st = iio_priv(indio_dev);
- if (mask != IIO_CHAN_INFO_SAMP_FREQ)
- return -EINVAL;
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ if ((val != AT91_OSR_1SAMPLES) && (val != AT91_OSR_4SAMPLES) &&
+ (val != AT91_OSR_16SAMPLES))
+ return -EINVAL;
+ /* if no change, optimize out */
+ if (val == st->oversampling_ratio)
+ return 0;
+ st->oversampling_ratio = val;
+ /* update ratio */
+ at91_adc_config_emr(st);
+ return 0;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val < st->soc_info.min_sample_rate ||
+ val > st->soc_info.max_sample_rate)
+ return -EINVAL;
- if (val < st->soc_info.min_sample_rate ||
- val > st->soc_info.max_sample_rate)
+ at91_adc_setup_samp_freq(st, val);
+ return 0;
+ default:
return -EINVAL;
-
- at91_adc_setup_samp_freq(st, val);
-
- return 0;
+ };
}
static void at91_adc_dma_init(struct platform_device *pdev)
@@ -974,11 +1548,23 @@ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
return 0;
}
-static const struct iio_info at91_adc_info = {
- .read_raw = &at91_adc_read_raw,
- .write_raw = &at91_adc_write_raw,
- .hwfifo_set_watermark = &at91_adc_set_watermark,
-};
+static int at91_adc_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ if (bitmap_subset(scan_mask, &st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1))
+ return 0;
+ /*
+ * if the new bitmap is a combination of touchscreen and regular
+ * channels, then we are not fine
+ */
+ if (bitmap_intersects(&st->touch_st.channels_bitmask, scan_mask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1))
+ return -EINVAL;
+ return 0;
+}
static void at91_adc_hw_init(struct at91_adc_state *st)
{
@@ -992,6 +1578,9 @@ static void at91_adc_hw_init(struct at91_adc_state *st)
AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH);
at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
+
+ /* configure extended mode register */
+ at91_adc_config_emr(st);
}
static ssize_t at91_adc_get_fifo_state(struct device *dev,
@@ -1022,6 +1611,20 @@ static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
static IIO_CONST_ATTR(hwfifo_watermark_min, "2");
static IIO_CONST_ATTR(hwfifo_watermark_max, AT91_HWFIFO_MAX_SIZE_STR);
+static IIO_CONST_ATTR(oversampling_ratio_available,
+ __stringify(AT91_OSR_1SAMPLES) " "
+ __stringify(AT91_OSR_4SAMPLES) " "
+ __stringify(AT91_OSR_16SAMPLES));
+
+static struct attribute *at91_adc_attributes[] = {
+ &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group at91_adc_attribute_group = {
+ .attrs = at91_adc_attributes,
+};
+
static const struct attribute *at91_adc_fifo_attributes[] = {
&iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
&iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
@@ -1030,6 +1633,15 @@ static const struct attribute *at91_adc_fifo_attributes[] = {
NULL,
};
+static const struct iio_info at91_adc_info = {
+ .attrs = &at91_adc_attribute_group,
+ .read_raw = &at91_adc_read_raw,
+ .write_raw = &at91_adc_write_raw,
+ .update_scan_mode = &at91_adc_update_scan_mode,
+ .of_xlate = &at91_adc_of_xlate,
+ .hwfifo_set_watermark = &at91_adc_set_watermark,
+};
+
static int at91_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
@@ -1044,13 +1656,22 @@ static int at91_adc_probe(struct platform_device *pdev)
indio_dev->dev.parent = &pdev->dev;
indio_dev->name = dev_name(&pdev->dev);
- indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
indio_dev->info = &at91_adc_info;
indio_dev->channels = at91_adc_channels;
indio_dev->num_channels = ARRAY_SIZE(at91_adc_channels);
st = iio_priv(indio_dev);
+ bitmap_set(&st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_TOUCH_X_CHAN_IDX, 1);
+ bitmap_set(&st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_TOUCH_Y_CHAN_IDX, 1);
+ bitmap_set(&st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_TOUCH_P_CHAN_IDX, 1);
+
+ st->oversampling_ratio = AT91_OSR_1SAMPLES;
+
ret = of_property_read_u32(pdev->dev.of_node,
"atmel,min-sample-rate-hz",
&st->soc_info.min_sample_rate);
@@ -1100,6 +1721,7 @@ static int at91_adc_probe(struct platform_device *pdev)
init_waitqueue_head(&st->wq_data_available);
mutex_init(&st->lock);
+ INIT_WORK(&st->touch_st.workq, at91_adc_workq_handler);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -1159,13 +1781,13 @@ static int at91_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
- if (st->selected_trig->hw_trig) {
- ret = at91_adc_buffer_init(indio_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
- goto per_clk_disable_unprepare;
- }
+ ret = at91_adc_buffer_init(indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
+ goto per_clk_disable_unprepare;
+ }
+ if (st->selected_trig->hw_trig) {
ret = at91_adc_trigger_init(indio_dev);
if (ret < 0) {
dev_err(&pdev->dev, "couldn't setup the triggers.\n");
@@ -1272,9 +1894,20 @@ static __maybe_unused int at91_adc_resume(struct device *dev)
at91_adc_hw_init(st);
/* reconfiguring trigger hardware state */
- if (iio_buffer_enabled(indio_dev))
- at91_adc_configure_trigger(st->trig, true);
+ if (!iio_buffer_enabled(indio_dev))
+ return 0;
+
+ /* check if we are enabling triggered buffer or the touchscreen */
+ if (bitmap_subset(indio_dev->active_scan_mask,
+ &st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
+ /* touchscreen enabling */
+ return at91_adc_configure_touch(st, true);
+ } else {
+ return at91_adc_configure_trigger(st->trig, true);
+ }
+ /* not needed but more explicit */
return 0;
vref_disable_resume:
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 9430b54121e0..36b59d8957fb 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -97,6 +97,14 @@ struct hx711_data {
* 2x32-bit channel + 64-bit timestamp
*/
u32 buffer[4];
+ /*
+ * delay after a rising edge on SCK until the data is ready DOUT
+ * this is dependent on the hx711 where the datasheet tells a
+ * maximum value of 100 ns
+ * but also on potential parasitic capacities on the wiring
+ */
+ u32 data_ready_delay_ns;
+ u32 clock_frequency;
};
static int hx711_cycle(struct hx711_data *hx711_data)
@@ -110,6 +118,14 @@ static int hx711_cycle(struct hx711_data *hx711_data)
*/
preempt_disable();
gpiod_set_value(hx711_data->gpiod_pd_sck, 1);
+
+ /*
+ * wait until DOUT is ready
+ * it turned out that parasitic capacities are extending the time
+ * until DOUT has reached it's value
+ */
+ ndelay(hx711_data->data_ready_delay_ns);
+
val = gpiod_get_value(hx711_data->gpiod_dout);
/*
* here we are not waiting for 0.2 us as suggested by the datasheet,
@@ -120,6 +136,12 @@ static int hx711_cycle(struct hx711_data *hx711_data)
gpiod_set_value(hx711_data->gpiod_pd_sck, 0);
preempt_enable();
+ /*
+ * make it a square wave for addressing cases with capacitance on
+ * PC_SCK
+ */
+ ndelay(hx711_data->data_ready_delay_ns);
+
return val;
}
@@ -458,6 +480,7 @@ static const struct iio_chan_spec hx711_chan_spec[] = {
static int hx711_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct hx711_data *hx711_data;
struct iio_dev *indio_dev;
int ret;
@@ -530,6 +553,22 @@ static int hx711_probe(struct platform_device *pdev)
hx711_data->gain_set = 128;
hx711_data->gain_chan_a = 128;
+ hx711_data->clock_frequency = 400000;
+ ret = of_property_read_u32(np, "clock-frequency",
+ &hx711_data->clock_frequency);
+
+ /*
+ * datasheet says the high level of PD_SCK has a maximum duration
+ * of 50 microseconds
+ */
+ if (hx711_data->clock_frequency < 20000) {
+ dev_warn(dev, "clock-frequency too low - assuming 400 kHz\n");
+ hx711_data->clock_frequency = 400000;
+ }
+
+ hx711_data->data_ready_delay_ns =
+ 1000000000 / hx711_data->clock_frequency;
+
platform_set_drvdata(pdev, indio_dev);
indio_dev->name = "hx711";
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 0635a79864bf..d1239624187d 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
+#include <linux/sched/task.h>
#include <linux/util_macros.h>
#include <linux/platform_data/ina2xx.h>
@@ -826,6 +827,7 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
{
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
unsigned int sampling_us = SAMPLING_PERIOD(chip);
+ struct task_struct *task;
dev_dbg(&indio_dev->dev, "Enabling buffer w/ scan_mask %02x, freq = %d, avg =%u\n",
(unsigned int)(*indio_dev->active_scan_mask),
@@ -835,11 +837,17 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
dev_dbg(&indio_dev->dev, "Async readout mode: %d\n",
chip->allow_async_readout);
- chip->task = kthread_run(ina2xx_capture_thread, (void *)indio_dev,
- "%s:%d-%uus", indio_dev->name, indio_dev->id,
- sampling_us);
+ task = kthread_create(ina2xx_capture_thread, (void *)indio_dev,
+ "%s:%d-%uus", indio_dev->name, indio_dev->id,
+ sampling_us);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ get_task_struct(task);
+ wake_up_process(task);
+ chip->task = task;
- return PTR_ERR_OR_ZERO(chip->task);
+ return 0;
}
static int ina2xx_buffer_disable(struct iio_dev *indio_dev)
@@ -848,6 +856,7 @@ static int ina2xx_buffer_disable(struct iio_dev *indio_dev)
if (chip->task) {
kthread_stop(chip->task);
+ put_task_struct(chip->task);
chip->task = NULL;
}
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 7fb4f525714a..a8d35aebee80 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1577,7 +1577,6 @@ static int max1363_probe(struct i2c_client *client,
struct max1363_state *st;
struct iio_dev *indio_dev;
struct regulator *vref;
- const struct of_device_id *match;
indio_dev = devm_iio_device_alloc(&client->dev,
sizeof(struct max1363_state));
@@ -1604,11 +1603,8 @@ static int max1363_probe(struct i2c_client *client,
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
- match = of_match_device(of_match_ptr(max1363_of_match),
- &client->dev);
- if (match)
- st->chip_info = of_device_get_match_data(&client->dev);
- else
+ st->chip_info = of_device_get_match_data(&client->dev);
+ if (!st->chip_info)
st->chip_info = &max1363_chip_info_tbl[id->driver_data];
st->client = client;
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 2948909f3ee3..da2d16dfa63e 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -922,6 +922,11 @@ static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
.name = "meson-meson8b-saradc",
};
+static const struct meson_sar_adc_data meson_sar_adc_meson8m2_data = {
+ .param = &meson_sar_adc_meson8_param,
+ .name = "meson-meson8m2-saradc",
+};
+
static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
.param = &meson_sar_adc_gxbb_param,
.name = "meson-gxbb-saradc",
@@ -952,6 +957,10 @@ static const struct of_device_id meson_sar_adc_of_match[] = {
.data = &meson_sar_adc_meson8b_data,
},
{
+ .compatible = "amlogic,meson8m2-saradc",
+ .data = &meson_sar_adc_meson8m2_data,
+ },
+ {
.compatible = "amlogic,meson-gxbb-saradc",
.data = &meson_sar_adc_gxbb_data,
}, {
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
new file mode 100644
index 000000000000..2b60efea0c39
--- /dev/null
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Spreadtrum Communications Inc.
+
+#include <linux/hwspinlock.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* PMIC global registers definition */
+#define SC27XX_MODULE_EN 0xc08
+#define SC27XX_MODULE_ADC_EN BIT(5)
+#define SC27XX_ARM_CLK_EN 0xc10
+#define SC27XX_CLK_ADC_EN BIT(5)
+#define SC27XX_CLK_ADC_CLK_EN BIT(6)
+
+/* ADC controller registers definition */
+#define SC27XX_ADC_CTL 0x0
+#define SC27XX_ADC_CH_CFG 0x4
+#define SC27XX_ADC_DATA 0x4c
+#define SC27XX_ADC_INT_EN 0x50
+#define SC27XX_ADC_INT_CLR 0x54
+#define SC27XX_ADC_INT_STS 0x58
+#define SC27XX_ADC_INT_RAW 0x5c
+
+/* Bits and mask definition for SC27XX_ADC_CTL register */
+#define SC27XX_ADC_EN BIT(0)
+#define SC27XX_ADC_CHN_RUN BIT(1)
+#define SC27XX_ADC_12BIT_MODE BIT(2)
+#define SC27XX_ADC_RUN_NUM_MASK GENMASK(7, 4)
+#define SC27XX_ADC_RUN_NUM_SHIFT 4
+
+/* Bits and mask definition for SC27XX_ADC_CH_CFG register */
+#define SC27XX_ADC_CHN_ID_MASK GENMASK(4, 0)
+#define SC27XX_ADC_SCALE_MASK GENMASK(10, 8)
+#define SC27XX_ADC_SCALE_SHIFT 8
+
+/* Bits definitions for SC27XX_ADC_INT_EN registers */
+#define SC27XX_ADC_IRQ_EN BIT(0)
+
+/* Bits definitions for SC27XX_ADC_INT_CLR registers */
+#define SC27XX_ADC_IRQ_CLR BIT(0)
+
+/* Mask definition for SC27XX_ADC_DATA register */
+#define SC27XX_ADC_DATA_MASK GENMASK(11, 0)
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define SC27XX_ADC_HWLOCK_TIMEOUT 5000
+
+/* Maximum ADC channel number */
+#define SC27XX_ADC_CHANNEL_MAX 32
+
+/* ADC voltage ratio definition */
+#define SC27XX_VOLT_RATIO(n, d) \
+ (((n) << SC27XX_RATIO_NUMERATOR_OFFSET) | (d))
+#define SC27XX_RATIO_NUMERATOR_OFFSET 16
+#define SC27XX_RATIO_DENOMINATOR_MASK GENMASK(15, 0)
+
+struct sc27xx_adc_data {
+ struct device *dev;
+ struct regmap *regmap;
+ /*
+ * One hardware spinlock to synchronize between the multiple
+ * subsystems which will access the unique ADC controller.
+ */
+ struct hwspinlock *hwlock;
+ struct completion completion;
+ int channel_scale[SC27XX_ADC_CHANNEL_MAX];
+ u32 base;
+ int value;
+ int irq;
+};
+
+struct sc27xx_adc_linear_graph {
+ int volt0;
+ int adc0;
+ int volt1;
+ int adc1;
+};
+
+/*
+ * According to the datasheet, we can convert one ADC value to one voltage value
+ * through 2 points in the linear graph. If the voltage is less than 1.2v, we
+ * should use the small-scale graph, and if more than 1.2v, we should use the
+ * big-scale graph.
+ */
+static const struct sc27xx_adc_linear_graph big_scale_graph = {
+ 4200, 3310,
+ 3600, 2832,
+};
+
+static const struct sc27xx_adc_linear_graph small_scale_graph = {
+ 1000, 3413,
+ 100, 341,
+};
+
+static int sc27xx_adc_get_ratio(int channel, int scale)
+{
+ switch (channel) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ return scale ? SC27XX_VOLT_RATIO(400, 1025) :
+ SC27XX_VOLT_RATIO(1, 1);
+ case 5:
+ return SC27XX_VOLT_RATIO(7, 29);
+ case 6:
+ return SC27XX_VOLT_RATIO(375, 9000);
+ case 7:
+ case 8:
+ return scale ? SC27XX_VOLT_RATIO(100, 125) :
+ SC27XX_VOLT_RATIO(1, 1);
+ case 19:
+ return SC27XX_VOLT_RATIO(1, 3);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ return SC27XX_VOLT_RATIO(1, 1);
+}
+
+static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
+ int scale, int *val)
+{
+ int ret;
+ u32 tmp;
+
+ reinit_completion(&data->completion);
+
+ ret = hwspin_lock_timeout_raw(data->hwlock, SC27XX_ADC_HWLOCK_TIMEOUT);
+ if (ret) {
+ dev_err(data->dev, "timeout to get the hwspinlock\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
+ SC27XX_ADC_EN, SC27XX_ADC_EN);
+ if (ret)
+ goto unlock_adc;
+
+ /* Configure the channel id and scale */
+ tmp = (scale << SC27XX_ADC_SCALE_SHIFT) & SC27XX_ADC_SCALE_MASK;
+ tmp |= channel & SC27XX_ADC_CHN_ID_MASK;
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CH_CFG,
+ SC27XX_ADC_CHN_ID_MASK | SC27XX_ADC_SCALE_MASK,
+ tmp);
+ if (ret)
+ goto disable_adc;
+
+ /* Select 12bit conversion mode, and only sample 1 time */
+ tmp = SC27XX_ADC_12BIT_MODE;
+ tmp |= (0 << SC27XX_ADC_RUN_NUM_SHIFT) & SC27XX_ADC_RUN_NUM_MASK;
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
+ SC27XX_ADC_RUN_NUM_MASK | SC27XX_ADC_12BIT_MODE,
+ tmp);
+ if (ret)
+ goto disable_adc;
+
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
+ SC27XX_ADC_CHN_RUN, SC27XX_ADC_CHN_RUN);
+ if (ret)
+ goto disable_adc;
+
+ wait_for_completion(&data->completion);
+
+disable_adc:
+ regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
+ SC27XX_ADC_EN, 0);
+unlock_adc:
+ hwspin_unlock_raw(data->hwlock);
+
+ if (!ret)
+ *val = data->value;
+
+ return ret;
+}
+
+static irqreturn_t sc27xx_adc_isr(int irq, void *dev_id)
+{
+ struct sc27xx_adc_data *data = dev_id;
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_CLR,
+ SC27XX_ADC_IRQ_CLR, SC27XX_ADC_IRQ_CLR);
+ if (ret)
+ return IRQ_RETVAL(ret);
+
+ ret = regmap_read(data->regmap, data->base + SC27XX_ADC_DATA,
+ &data->value);
+ if (ret)
+ return IRQ_RETVAL(ret);
+
+ data->value &= SC27XX_ADC_DATA_MASK;
+ complete(&data->completion);
+
+ return IRQ_HANDLED;
+}
+
+static void sc27xx_adc_volt_ratio(struct sc27xx_adc_data *data,
+ int channel, int scale,
+ u32 *div_numerator, u32 *div_denominator)
+{
+ u32 ratio = sc27xx_adc_get_ratio(channel, scale);
+
+ *div_numerator = ratio >> SC27XX_RATIO_NUMERATOR_OFFSET;
+ *div_denominator = ratio & SC27XX_RATIO_DENOMINATOR_MASK;
+}
+
+static int sc27xx_adc_to_volt(const struct sc27xx_adc_linear_graph *graph,
+ int raw_adc)
+{
+ int tmp;
+
+ tmp = (graph->volt0 - graph->volt1) * (raw_adc - graph->adc1);
+ tmp /= (graph->adc0 - graph->adc1);
+ tmp += graph->volt1;
+
+ return tmp < 0 ? 0 : tmp;
+}
+
+static int sc27xx_adc_convert_volt(struct sc27xx_adc_data *data, int channel,
+ int scale, int raw_adc)
+{
+ u32 numerator, denominator;
+ u32 volt;
+
+ /*
+ * Convert ADC values to voltage values according to the linear graph,
+ * and channel 5 and channel 1 has been calibrated, so we can just
+ * return the voltage values calculated by the linear graph. But other
+ * channels need be calculated to the real voltage values with the
+ * voltage ratio.
+ */
+ switch (channel) {
+ case 5:
+ return sc27xx_adc_to_volt(&big_scale_graph, raw_adc);
+
+ case 1:
+ return sc27xx_adc_to_volt(&small_scale_graph, raw_adc);
+
+ default:
+ volt = sc27xx_adc_to_volt(&small_scale_graph, raw_adc);
+ break;
+ }
+
+ sc27xx_adc_volt_ratio(data, channel, scale, &numerator, &denominator);
+
+ return (volt * denominator + numerator / 2) / numerator;
+}
+
+static int sc27xx_adc_read_processed(struct sc27xx_adc_data *data,
+ int channel, int scale, int *val)
+{
+ int ret, raw_adc;
+
+ ret = sc27xx_adc_read(data, channel, scale, &raw_adc);
+ if (ret)
+ return ret;
+
+ *val = sc27xx_adc_convert_volt(data, channel, scale, raw_adc);
+ return 0;
+}
+
+static int sc27xx_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct sc27xx_adc_data *data = iio_priv(indio_dev);
+ int scale = data->channel_scale[chan->channel];
+ int ret, tmp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ mutex_lock(&indio_dev->mlock);
+ ret = sc27xx_adc_read_processed(data, chan->channel, scale,
+ &tmp);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret)
+ return ret;
+
+ *val = tmp;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = scale;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sc27xx_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct sc27xx_adc_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ data->channel_scale[chan->channel] = val;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info sc27xx_info = {
+ .read_raw = &sc27xx_adc_read_raw,
+ .write_raw = &sc27xx_adc_write_raw,
+};
+
+#define SC27XX_ADC_CHANNEL(index) { \
+ .type = IIO_VOLTAGE, \
+ .channel = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = "CH##index", \
+ .indexed = 1, \
+}
+
+static const struct iio_chan_spec sc27xx_channels[] = {
+ SC27XX_ADC_CHANNEL(0),
+ SC27XX_ADC_CHANNEL(1),
+ SC27XX_ADC_CHANNEL(2),
+ SC27XX_ADC_CHANNEL(3),
+ SC27XX_ADC_CHANNEL(4),
+ SC27XX_ADC_CHANNEL(5),
+ SC27XX_ADC_CHANNEL(6),
+ SC27XX_ADC_CHANNEL(7),
+ SC27XX_ADC_CHANNEL(8),
+ SC27XX_ADC_CHANNEL(9),
+ SC27XX_ADC_CHANNEL(10),
+ SC27XX_ADC_CHANNEL(11),
+ SC27XX_ADC_CHANNEL(12),
+ SC27XX_ADC_CHANNEL(13),
+ SC27XX_ADC_CHANNEL(14),
+ SC27XX_ADC_CHANNEL(15),
+ SC27XX_ADC_CHANNEL(16),
+ SC27XX_ADC_CHANNEL(17),
+ SC27XX_ADC_CHANNEL(18),
+ SC27XX_ADC_CHANNEL(19),
+ SC27XX_ADC_CHANNEL(20),
+ SC27XX_ADC_CHANNEL(21),
+ SC27XX_ADC_CHANNEL(22),
+ SC27XX_ADC_CHANNEL(23),
+ SC27XX_ADC_CHANNEL(24),
+ SC27XX_ADC_CHANNEL(25),
+ SC27XX_ADC_CHANNEL(26),
+ SC27XX_ADC_CHANNEL(27),
+ SC27XX_ADC_CHANNEL(28),
+ SC27XX_ADC_CHANNEL(29),
+ SC27XX_ADC_CHANNEL(30),
+ SC27XX_ADC_CHANNEL(31),
+};
+
+static int sc27xx_adc_enable(struct sc27xx_adc_data *data)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+ SC27XX_MODULE_ADC_EN, SC27XX_MODULE_ADC_EN);
+ if (ret)
+ return ret;
+
+ /* Enable ADC work clock and controller clock */
+ ret = regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+ SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN,
+ SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN);
+ if (ret)
+ goto disable_adc;
+
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_EN,
+ SC27XX_ADC_IRQ_EN, SC27XX_ADC_IRQ_EN);
+ if (ret)
+ goto disable_clk;
+
+ return 0;
+
+disable_clk:
+ regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+ SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
+disable_adc:
+ regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+ SC27XX_MODULE_ADC_EN, 0);
+
+ return ret;
+}
+
+static void sc27xx_adc_disable(void *_data)
+{
+ struct sc27xx_adc_data *data = _data;
+
+ regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_EN,
+ SC27XX_ADC_IRQ_EN, 0);
+
+ /* Disable ADC work clock and controller clock */
+ regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+ SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
+
+ regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+ SC27XX_MODULE_ADC_EN, 0);
+}
+
+static void sc27xx_adc_free_hwlock(void *_data)
+{
+ struct hwspinlock *hwlock = _data;
+
+ hwspin_lock_free(hwlock);
+}
+
+static int sc27xx_adc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sc27xx_adc_data *sc27xx_data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*sc27xx_data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ sc27xx_data = iio_priv(indio_dev);
+
+ sc27xx_data->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!sc27xx_data->regmap) {
+ dev_err(&pdev->dev, "failed to get ADC regmap\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "reg", &sc27xx_data->base);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get ADC base address\n");
+ return ret;
+ }
+
+ sc27xx_data->irq = platform_get_irq(pdev, 0);
+ if (sc27xx_data->irq < 0) {
+ dev_err(&pdev->dev, "failed to get ADC irq number\n");
+ return sc27xx_data->irq;
+ }
+
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get hwspinlock id\n");
+ return ret;
+ }
+
+ sc27xx_data->hwlock = hwspin_lock_request_specific(ret);
+ if (!sc27xx_data->hwlock) {
+ dev_err(&pdev->dev, "failed to request hwspinlock\n");
+ return -ENXIO;
+ }
+
+ ret = devm_add_action(&pdev->dev, sc27xx_adc_free_hwlock,
+ sc27xx_data->hwlock);
+ if (ret) {
+ sc27xx_adc_free_hwlock(sc27xx_data->hwlock);
+ dev_err(&pdev->dev, "failed to add hwspinlock action\n");
+ return ret;
+ }
+
+ init_completion(&sc27xx_data->completion);
+ sc27xx_data->dev = &pdev->dev;
+
+ ret = sc27xx_adc_enable(sc27xx_data);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable ADC module\n");
+ return ret;
+ }
+
+ ret = devm_add_action(&pdev->dev, sc27xx_adc_disable, sc27xx_data);
+ if (ret) {
+ sc27xx_adc_disable(sc27xx_data);
+ dev_err(&pdev->dev, "failed to add ADC disable action\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, sc27xx_data->irq, NULL,
+ sc27xx_adc_isr, IRQF_ONESHOT,
+ pdev->name, sc27xx_data);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request ADC irq\n");
+ return ret;
+ }
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &sc27xx_info;
+ indio_dev->channels = sc27xx_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sc27xx_channels);
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret)
+ dev_err(&pdev->dev, "could not register iio (ADC)");
+
+ return ret;
+}
+
+static const struct of_device_id sc27xx_adc_of_match[] = {
+ { .compatible = "sprd,sc2731-adc", },
+ { }
+};
+
+static struct platform_driver sc27xx_adc_driver = {
+ .probe = sc27xx_adc_probe,
+ .driver = {
+ .name = "sc27xx-adc",
+ .of_match_table = sc27xx_adc_of_match,
+ },
+};
+
+module_platform_driver(sc27xx_adc_driver);
+
+MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum SC27XX ADC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 0225c1b333ab..a5bd5944bc66 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments ADS7950 SPI ADC driver
*
@@ -10,15 +11,6 @@
* And also on hwmon/ads79xx.c
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Nishanth Menon
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/acpi.h>
@@ -76,6 +68,9 @@ struct ti_ads7950_state {
__be16 rx_buf[TI_ADS7950_MAX_CHAN + TI_ADS7950_TIMESTAMP_SIZE]
____cacheline_aligned;
__be16 tx_buf[TI_ADS7950_MAX_CHAN];
+ __be16 single_tx;
+ __be16 single_rx;
+
};
struct ti_ads7950_chip_info {
@@ -295,18 +290,26 @@ out:
return IRQ_HANDLED;
}
-static int ti_ads7950_scan_direct(struct ti_ads7950_state *st, unsigned int ch)
+static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
{
+ struct ti_ads7950_state *st = iio_priv(indio_dev);
int ret, cmd;
+ mutex_lock(&indio_dev->mlock);
+
cmd = TI_ADS7950_CR_WRITE | TI_ADS7950_CR_CHAN(ch) | st->settings;
- st->tx_buf[0] = cpu_to_be16(cmd);
+ st->single_tx = cpu_to_be16(cmd);
ret = spi_sync(st->spi, &st->scan_single_msg);
if (ret)
- return ret;
+ goto out;
+
+ ret = be16_to_cpu(st->single_rx);
- return be16_to_cpu(st->rx_buf[0]);
+out:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
}
static int ti_ads7950_get_range(struct ti_ads7950_state *st)
@@ -338,13 +341,7 @@ static int ti_ads7950_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret < 0)
- return ret;
-
- ret = ti_ads7950_scan_direct(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ ret = ti_ads7950_scan_direct(indio_dev, chan->address);
if (ret < 0)
return ret;
@@ -410,13 +407,13 @@ static int ti_ads7950_probe(struct spi_device *spi)
* was read at the end of the first transfer.
*/
- st->scan_single_xfer[0].tx_buf = &st->tx_buf[0];
+ st->scan_single_xfer[0].tx_buf = &st->single_tx;
st->scan_single_xfer[0].len = 2;
st->scan_single_xfer[0].cs_change = 1;
- st->scan_single_xfer[1].tx_buf = &st->tx_buf[0];
+ st->scan_single_xfer[1].tx_buf = &st->single_tx;
st->scan_single_xfer[1].len = 2;
st->scan_single_xfer[1].cs_change = 1;
- st->scan_single_xfer[2].rx_buf = &st->rx_buf[0];
+ st->scan_single_xfer[2].rx_buf = &st->single_rx;
st->scan_single_xfer[2].len = 2;
spi_message_init_with_transfers(&st->scan_single_msg,
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index d4f21d1be6c8..3f6be5ac049a 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -322,6 +322,7 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
#define XADC_ZYNQ_TCK_RATE_MAX 50000000
#define XADC_ZYNQ_IGAP_DEFAULT 20
+#define XADC_ZYNQ_PCAP_RATE_MAX 200000000
static int xadc_zynq_setup(struct platform_device *pdev,
struct iio_dev *indio_dev, int irq)
@@ -332,6 +333,7 @@ static int xadc_zynq_setup(struct platform_device *pdev,
unsigned int div;
unsigned int igap;
unsigned int tck_rate;
+ int ret;
/* TODO: Figure out how to make igap and tck_rate configurable */
igap = XADC_ZYNQ_IGAP_DEFAULT;
@@ -340,9 +342,16 @@ static int xadc_zynq_setup(struct platform_device *pdev,
xadc->zynq_intmask = ~0;
pcap_rate = clk_get_rate(xadc->clk);
+ if (!pcap_rate)
+ return -EINVAL;
+
+ if (pcap_rate > XADC_ZYNQ_PCAP_RATE_MAX) {
+ ret = clk_set_rate(xadc->clk,
+ (unsigned long)XADC_ZYNQ_PCAP_RATE_MAX);
+ if (ret)
+ return ret;
+ }
- if (tck_rate > XADC_ZYNQ_TCK_RATE_MAX)
- tck_rate = XADC_ZYNQ_TCK_RATE_MAX;
if (tck_rate > pcap_rate / 2) {
div = 2;
} else {
@@ -368,6 +377,12 @@ static int xadc_zynq_setup(struct platform_device *pdev,
XADC_ZYNQ_CFG_REDGE | XADC_ZYNQ_CFG_WEDGE |
tck_div | XADC_ZYNQ_CFG_IGAP(igap));
+ if (pcap_rate > XADC_ZYNQ_PCAP_RATE_MAX) {
+ ret = clk_set_rate(xadc->clk, pcap_rate);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -889,6 +904,9 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
unsigned long clk_rate = xadc_get_dclk_rate(xadc);
unsigned int div;
+ if (!clk_rate)
+ return -EINVAL;
+
if (info != IIO_CHAN_INFO_SAMP_FREQ)
return -EINVAL;
@@ -1045,7 +1063,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
unsigned int num_channels;
const char *external_mux;
u32 ext_mux_chan;
- int reg;
+ u32 reg;
int ret;
*conf = 0;
@@ -1157,6 +1175,7 @@ static int xadc_probe(struct platform_device *pdev)
xadc = iio_priv(indio_dev);
xadc->ops = id->data;
+ xadc->irq = irq;
init_completion(&xadc->completion);
mutex_init(&xadc->mutex);
spin_lock_init(&xadc->lock);
@@ -1207,14 +1226,14 @@ static int xadc_probe(struct platform_device *pdev)
if (ret)
goto err_free_samplerate_trigger;
- ret = xadc->ops->setup(pdev, indio_dev, irq);
+ ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
+ dev_name(&pdev->dev), indio_dev);
if (ret)
goto err_clk_disable_unprepare;
- ret = request_irq(irq, xadc->ops->interrupt_handler, 0,
- dev_name(&pdev->dev), indio_dev);
+ ret = xadc->ops->setup(pdev, indio_dev, xadc->irq);
if (ret)
- goto err_clk_disable_unprepare;
+ goto err_free_irq;
for (i = 0; i < 16; i++)
xadc_read_adc_reg(xadc, XADC_REG_THRESHOLD(i),
@@ -1239,8 +1258,10 @@ static int xadc_probe(struct platform_device *pdev)
goto err_free_irq;
/* Disable all alarms */
- xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
- XADC_CONF1_ALARM_MASK);
+ ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
+ XADC_CONF1_ALARM_MASK);
+ if (ret)
+ goto err_free_irq;
/* Set thresholds to min/max */
for (i = 0; i < 16; i++) {
@@ -1268,7 +1289,7 @@ static int xadc_probe(struct platform_device *pdev)
return 0;
err_free_irq:
- free_irq(irq, indio_dev);
+ free_irq(xadc->irq, indio_dev);
err_clk_disable_unprepare:
clk_disable_unprepare(xadc->clk);
err_free_samplerate_trigger:
@@ -1290,7 +1311,6 @@ static int xadc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct xadc *xadc = iio_priv(indio_dev);
- int irq = platform_get_irq(pdev, 0);
iio_device_unregister(indio_dev);
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
@@ -1298,7 +1318,7 @@ static int xadc_remove(struct platform_device *pdev)
iio_trigger_free(xadc->convst_trigger);
iio_triggered_buffer_cleanup(indio_dev);
}
- free_irq(irq, indio_dev);
+ free_irq(xadc->irq, indio_dev);
clk_disable_unprepare(xadc->clk);
cancel_delayed_work(&xadc->zynq_unmask_work);
kfree(xadc->data);
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index 62edbdae1244..8c0009585c16 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -68,6 +68,7 @@ struct xadc {
spinlock_t lock;
struct completion completion;
+ int irq;
};
struct xadc_ops {
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index 5cb5be7612b4..b8e005be4f87 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -21,6 +21,29 @@ config ATLAS_PH_SENSOR
To compile this driver as module, choose M here: the
module will be called atlas-ph-sensor.
+config BME680
+ tristate "Bosch Sensortec BME680 sensor driver"
+ depends on (I2C || SPI)
+ select REGMAP
+ select BME680_I2C if I2C
+ select BME680_SPI if SPI
+ help
+ Say yes here to build support for Bosch Sensortec BME680 sensor with
+ temperature, pressure, humidity and gas sensing capability.
+
+ This driver can also be built as a module. If so, the module for I2C
+ would be called bme680_i2c and bme680_spi for SPI support.
+
+config BME680_I2C
+ tristate
+ depends on I2C && BME680
+ select REGMAP_I2C
+
+config BME680_SPI
+ tristate
+ depends on SPI && BME680
+ select REGMAP_SPI
+
config CCS811
tristate "AMS CCS811 VOC sensor"
depends on I2C
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index a629b29d1e0b..2f4c4ba4d781 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -4,6 +4,9 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-ph-sensor.o
+obj-$(CONFIG_BME680) += bme680_core.o
+obj-$(CONFIG_BME680_I2C) += bme680_i2c.o
+obj-$(CONFIG_BME680_SPI) += bme680_spi.o
obj-$(CONFIG_CCS811) += ccs811.o
obj-$(CONFIG_IAQCORE) += ams-iaq-core.o
obj-$(CONFIG_VZ89X) += vz89x.o
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
new file mode 100644
index 000000000000..e049323f209a
--- /dev/null
+++ b/drivers/iio/chemical/bme680.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BME680_H_
+#define BME680_H_
+
+#define BME680_REG_CHIP_I2C_ID 0xD0
+#define BME680_REG_CHIP_SPI_ID 0x50
+#define BME680_CHIP_ID_VAL 0x61
+#define BME680_REG_SOFT_RESET_I2C 0xE0
+#define BME680_REG_SOFT_RESET_SPI 0x60
+#define BME680_CMD_SOFTRESET 0xB6
+#define BME680_REG_STATUS 0x73
+#define BME680_SPI_MEM_PAGE_BIT BIT(4)
+#define BME680_SPI_MEM_PAGE_1_VAL 1
+
+#define BME680_REG_TEMP_MSB 0x22
+#define BME680_REG_PRESS_MSB 0x1F
+#define BM6880_REG_HUMIDITY_MSB 0x25
+#define BME680_REG_GAS_MSB 0x2A
+#define BME680_REG_GAS_R_LSB 0x2B
+#define BME680_GAS_STAB_BIT BIT(4)
+
+#define BME680_REG_CTRL_HUMIDITY 0x72
+#define BME680_OSRS_HUMIDITY_MASK GENMASK(2, 0)
+
+#define BME680_REG_CTRL_MEAS 0x74
+#define BME680_OSRS_TEMP_MASK GENMASK(7, 5)
+#define BME680_OSRS_PRESS_MASK GENMASK(4, 2)
+#define BME680_MODE_MASK GENMASK(1, 0)
+
+#define BME680_MODE_FORCED 1
+#define BME680_MODE_SLEEP 0
+
+#define BME680_REG_CONFIG 0x75
+#define BME680_FILTER_MASK GENMASK(4, 2)
+#define BME680_FILTER_COEFF_VAL BIT(1)
+
+/* TEMP/PRESS/HUMID reading skipped */
+#define BME680_MEAS_SKIPPED 0x8000
+
+#define BME680_MAX_OVERFLOW_VAL 0x40000000
+#define BME680_HUM_REG_SHIFT_VAL 4
+#define BME680_BIT_H1_DATA_MSK 0x0F
+
+#define BME680_REG_RES_HEAT_RANGE 0x02
+#define BME680_RHRANGE_MSK 0x30
+#define BME680_REG_RES_HEAT_VAL 0x00
+#define BME680_REG_RANGE_SW_ERR 0x04
+#define BME680_RSERROR_MSK 0xF0
+#define BME680_REG_RES_HEAT_0 0x5A
+#define BME680_REG_GAS_WAIT_0 0x64
+#define BME680_GAS_RANGE_MASK 0x0F
+#define BME680_ADC_GAS_RES_SHIFT 6
+#define BME680_AMB_TEMP 25
+
+#define BME680_REG_CTRL_GAS_1 0x71
+#define BME680_RUN_GAS_MASK BIT(4)
+#define BME680_NB_CONV_MASK GENMASK(3, 0)
+#define BME680_RUN_GAS_EN_BIT BIT(4)
+#define BME680_NB_CONV_0_VAL 0
+
+#define BME680_REG_MEAS_STAT_0 0x1D
+#define BME680_GAS_MEAS_BIT BIT(6)
+
+/* Calibration Parameters */
+#define BME680_T2_LSB_REG 0x8A
+#define BME680_T3_REG 0x8C
+#define BME680_P1_LSB_REG 0x8E
+#define BME680_P2_LSB_REG 0x90
+#define BME680_P3_REG 0x92
+#define BME680_P4_LSB_REG 0x94
+#define BME680_P5_LSB_REG 0x96
+#define BME680_P7_REG 0x98
+#define BME680_P6_REG 0x99
+#define BME680_P8_LSB_REG 0x9C
+#define BME680_P9_LSB_REG 0x9E
+#define BME680_P10_REG 0xA0
+#define BME680_H2_LSB_REG 0xE2
+#define BME680_H2_MSB_REG 0xE1
+#define BME680_H1_MSB_REG 0xE3
+#define BME680_H1_LSB_REG 0xE2
+#define BME680_H3_REG 0xE4
+#define BME680_H4_REG 0xE5
+#define BME680_H5_REG 0xE6
+#define BME680_H6_REG 0xE7
+#define BME680_H7_REG 0xE8
+#define BME680_T1_LSB_REG 0xE9
+#define BME680_GH2_LSB_REG 0xEB
+#define BME680_GH1_REG 0xED
+#define BME680_GH3_REG 0xEE
+
+extern const struct regmap_config bme680_regmap_config;
+
+int bme680_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name);
+
+#endif /* BME680_H_ */
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
new file mode 100644
index 000000000000..7d9bb62baa3f
--- /dev/null
+++ b/drivers/iio/chemical/bme680_core.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Bosch BME680 - Temperature, Pressure, Humidity & Gas Sensor
+ *
+ * Copyright (C) 2017 - 2018 Bosch Sensortec GmbH
+ * Copyright (C) 2018 Himanshu Jha <himanshujha199640@gmail.com>
+ *
+ * Datasheet:
+ * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME680-DS001-00.pdf
+ */
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/log2.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include "bme680.h"
+
+struct bme680_calib {
+ u16 par_t1;
+ s16 par_t2;
+ s8 par_t3;
+ u16 par_p1;
+ s16 par_p2;
+ s8 par_p3;
+ s16 par_p4;
+ s16 par_p5;
+ s8 par_p6;
+ s8 par_p7;
+ s16 par_p8;
+ s16 par_p9;
+ u8 par_p10;
+ u16 par_h1;
+ u16 par_h2;
+ s8 par_h3;
+ s8 par_h4;
+ s8 par_h5;
+ s8 par_h6;
+ s8 par_h7;
+ s8 par_gh1;
+ s16 par_gh2;
+ s8 par_gh3;
+ u8 res_heat_range;
+ s8 res_heat_val;
+ s8 range_sw_err;
+};
+
+struct bme680_data {
+ struct regmap *regmap;
+ struct bme680_calib bme680;
+ u8 oversampling_temp;
+ u8 oversampling_press;
+ u8 oversampling_humid;
+ u16 heater_dur;
+ u16 heater_temp;
+ /*
+ * Carryover value from temperature conversion, used in pressure
+ * and humidity compensation calculations.
+ */
+ s32 t_fine;
+};
+
+const struct regmap_config bme680_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+EXPORT_SYMBOL(bme680_regmap_config);
+
+static const struct iio_chan_spec bme680_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
+ {
+ .type = IIO_RESISTANCE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static const int bme680_oversampling_avail[] = { 1, 2, 4, 8, 16 };
+
+static int bme680_read_calib(struct bme680_data *data,
+ struct bme680_calib *calib)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int tmp, tmp_msb, tmp_lsb;
+ int ret;
+ __le16 buf;
+
+ /* Temperature related coefficients */
+ ret = regmap_bulk_read(data->regmap, BME680_T1_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_T1_LSB_REG\n");
+ return ret;
+ }
+ calib->par_t1 = le16_to_cpu(buf);
+
+ ret = regmap_bulk_read(data->regmap, BME680_T2_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_T2_LSB_REG\n");
+ return ret;
+ }
+ calib->par_t2 = le16_to_cpu(buf);
+
+ ret = regmap_read(data->regmap, BME680_T3_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_T3_REG\n");
+ return ret;
+ }
+ calib->par_t3 = tmp;
+
+ /* Pressure related coefficients */
+ ret = regmap_bulk_read(data->regmap, BME680_P1_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P1_LSB_REG\n");
+ return ret;
+ }
+ calib->par_p1 = le16_to_cpu(buf);
+
+ ret = regmap_bulk_read(data->regmap, BME680_P2_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P2_LSB_REG\n");
+ return ret;
+ }
+ calib->par_p2 = le16_to_cpu(buf);
+
+ ret = regmap_read(data->regmap, BME680_P3_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P3_REG\n");
+ return ret;
+ }
+ calib->par_p3 = tmp;
+
+ ret = regmap_bulk_read(data->regmap, BME680_P4_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P4_LSB_REG\n");
+ return ret;
+ }
+ calib->par_p4 = le16_to_cpu(buf);
+
+ ret = regmap_bulk_read(data->regmap, BME680_P5_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P5_LSB_REG\n");
+ return ret;
+ }
+ calib->par_p5 = le16_to_cpu(buf);
+
+ ret = regmap_read(data->regmap, BME680_P6_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P6_REG\n");
+ return ret;
+ }
+ calib->par_p6 = tmp;
+
+ ret = regmap_read(data->regmap, BME680_P7_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P7_REG\n");
+ return ret;
+ }
+ calib->par_p7 = tmp;
+
+ ret = regmap_bulk_read(data->regmap, BME680_P8_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P8_LSB_REG\n");
+ return ret;
+ }
+ calib->par_p8 = le16_to_cpu(buf);
+
+ ret = regmap_bulk_read(data->regmap, BME680_P9_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P9_LSB_REG\n");
+ return ret;
+ }
+ calib->par_p9 = le16_to_cpu(buf);
+
+ ret = regmap_read(data->regmap, BME680_P10_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P10_REG\n");
+ return ret;
+ }
+ calib->par_p10 = tmp;
+
+ /* Humidity related coefficients */
+ ret = regmap_read(data->regmap, BME680_H1_MSB_REG, &tmp_msb);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H1_MSB_REG\n");
+ return ret;
+ }
+
+ ret = regmap_read(data->regmap, BME680_H1_LSB_REG, &tmp_lsb);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H1_LSB_REG\n");
+ return ret;
+ }
+
+ calib->par_h1 = (tmp_msb << BME680_HUM_REG_SHIFT_VAL) |
+ (tmp_lsb & BME680_BIT_H1_DATA_MSK);
+
+ ret = regmap_read(data->regmap, BME680_H2_MSB_REG, &tmp_msb);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H2_MSB_REG\n");
+ return ret;
+ }
+
+ ret = regmap_read(data->regmap, BME680_H2_LSB_REG, &tmp_lsb);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H2_LSB_REG\n");
+ return ret;
+ }
+
+ calib->par_h2 = (tmp_msb << BME680_HUM_REG_SHIFT_VAL) |
+ (tmp_lsb >> BME680_HUM_REG_SHIFT_VAL);
+
+ ret = regmap_read(data->regmap, BME680_H3_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H3_REG\n");
+ return ret;
+ }
+ calib->par_h3 = tmp;
+
+ ret = regmap_read(data->regmap, BME680_H4_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H4_REG\n");
+ return ret;
+ }
+ calib->par_h4 = tmp;
+
+ ret = regmap_read(data->regmap, BME680_H5_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H5_REG\n");
+ return ret;
+ }
+ calib->par_h5 = tmp;
+
+ ret = regmap_read(data->regmap, BME680_H6_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H6_REG\n");
+ return ret;
+ }
+ calib->par_h6 = tmp;
+
+ ret = regmap_read(data->regmap, BME680_H7_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H7_REG\n");
+ return ret;
+ }
+ calib->par_h7 = tmp;
+
+ /* Gas heater related coefficients */
+ ret = regmap_read(data->regmap, BME680_GH1_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_GH1_REG\n");
+ return ret;
+ }
+ calib->par_gh1 = tmp;
+
+ ret = regmap_bulk_read(data->regmap, BME680_GH2_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_GH2_LSB_REG\n");
+ return ret;
+ }
+ calib->par_gh2 = le16_to_cpu(buf);
+
+ ret = regmap_read(data->regmap, BME680_GH3_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_GH3_REG\n");
+ return ret;
+ }
+ calib->par_gh3 = tmp;
+
+ /* Other coefficients */
+ ret = regmap_read(data->regmap, BME680_REG_RES_HEAT_RANGE, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read resistance heat range\n");
+ return ret;
+ }
+ calib->res_heat_range = (tmp & BME680_RHRANGE_MSK) / 16;
+
+ ret = regmap_read(data->regmap, BME680_REG_RES_HEAT_VAL, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read resistance heat value\n");
+ return ret;
+ }
+ calib->res_heat_val = tmp;
+
+ ret = regmap_read(data->regmap, BME680_REG_RANGE_SW_ERR, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read range software error\n");
+ return ret;
+ }
+ calib->range_sw_err = (tmp & BME680_RSERROR_MSK) / 16;
+
+ return 0;
+}
+
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L876
+ *
+ * Returns temperature measurement in DegC, resolutions is 0.01 DegC. Therefore,
+ * output value of "3233" represents 32.33 DegC.
+ */
+static s16 bme680_compensate_temp(struct bme680_data *data,
+ s32 adc_temp)
+{
+ struct bme680_calib *calib = &data->bme680;
+ s64 var1, var2, var3;
+ s16 calc_temp;
+
+ var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
+ var2 = (var1 * calib->par_t2) >> 11;
+ var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
+ var3 = (var3 * (calib->par_t3 << 4)) >> 14;
+ data->t_fine = var2 + var3;
+ calc_temp = (data->t_fine * 5 + 128) >> 8;
+
+ return calc_temp;
+}
+
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L896
+ *
+ * Returns pressure measurement in Pa. Output value of "97356" represents
+ * 97356 Pa = 973.56 hPa.
+ */
+static u32 bme680_compensate_press(struct bme680_data *data,
+ u32 adc_press)
+{
+ struct bme680_calib *calib = &data->bme680;
+ s32 var1, var2, var3, press_comp;
+
+ var1 = (data->t_fine >> 1) - 64000;
+ var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2;
+ var2 = var2 + (var1 * calib->par_p5 << 1);
+ var2 = (var2 >> 2) + (calib->par_p4 << 16);
+ var1 = (((((var1 >> 2) * (var1 >> 2)) >> 13) *
+ (calib->par_p3 << 5)) >> 3) +
+ ((calib->par_p2 * var1) >> 1);
+ var1 = var1 >> 18;
+ var1 = ((32768 + var1) * calib->par_p1) >> 15;
+ press_comp = 1048576 - adc_press;
+ press_comp = ((press_comp - (var2 >> 12)) * 3125);
+
+ if (press_comp >= BME680_MAX_OVERFLOW_VAL)
+ press_comp = ((press_comp / (u32)var1) << 1);
+ else
+ press_comp = ((press_comp << 1) / (u32)var1);
+
+ var1 = (calib->par_p9 * (((press_comp >> 3) *
+ (press_comp >> 3)) >> 13)) >> 12;
+ var2 = ((press_comp >> 2) * calib->par_p8) >> 13;
+ var3 = ((press_comp >> 8) * (press_comp >> 8) *
+ (press_comp >> 8) * calib->par_p10) >> 17;
+
+ press_comp += (var1 + var2 + var3 + (calib->par_p7 << 7)) >> 4;
+
+ return press_comp;
+}
+
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L937
+ *
+ * Returns humidity measurement in percent, resolution is 0.001 percent. Output
+ * value of "43215" represents 43.215 %rH.
+ */
+static u32 bme680_compensate_humid(struct bme680_data *data,
+ u16 adc_humid)
+{
+ struct bme680_calib *calib = &data->bme680;
+ s32 var1, var2, var3, var4, var5, var6, temp_scaled, calc_hum;
+
+ temp_scaled = (data->t_fine * 5 + 128) >> 8;
+ var1 = (adc_humid - ((s32) ((s32) calib->par_h1 * 16))) -
+ (((temp_scaled * (s32) calib->par_h3) / 100) >> 1);
+ var2 = ((s32) calib->par_h2 *
+ (((temp_scaled * calib->par_h4) / 100) +
+ (((temp_scaled * ((temp_scaled * calib->par_h5) / 100))
+ >> 6) / 100) + (1 << 14))) >> 10;
+ var3 = var1 * var2;
+ var4 = calib->par_h6 << 7;
+ var4 = (var4 + ((temp_scaled * calib->par_h7) / 100)) >> 4;
+ var5 = ((var3 >> 14) * (var3 >> 14)) >> 10;
+ var6 = (var4 * var5) >> 1;
+ calc_hum = (((var3 + var6) >> 10) * 1000) >> 12;
+
+ if (calc_hum > 100000) /* Cap at 100%rH */
+ calc_hum = 100000;
+ else if (calc_hum < 0)
+ calc_hum = 0;
+
+ return calc_hum;
+}
+
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L973
+ *
+ * Returns gas measurement in Ohm. Output value of "82986" represent 82986 ohms.
+ */
+static u32 bme680_compensate_gas(struct bme680_data *data, u16 gas_res_adc,
+ u8 gas_range)
+{
+ struct bme680_calib *calib = &data->bme680;
+ s64 var1;
+ u64 var2;
+ s64 var3;
+ u32 calc_gas_res;
+
+ /* Look up table for the possible gas range values */
+ const u32 lookupTable[16] = {2147483647u, 2147483647u,
+ 2147483647u, 2147483647u, 2147483647u,
+ 2126008810u, 2147483647u, 2130303777u,
+ 2147483647u, 2147483647u, 2143188679u,
+ 2136746228u, 2147483647u, 2126008810u,
+ 2147483647u, 2147483647u};
+
+ var1 = ((1340 + (5 * (s64) calib->range_sw_err)) *
+ ((s64) lookupTable[gas_range])) >> 16;
+ var2 = ((gas_res_adc << 15) - 16777216) + var1;
+ var3 = ((125000 << (15 - gas_range)) * var1) >> 9;
+ var3 += (var2 >> 1);
+ calc_gas_res = div64_s64(var3, (s64) var2);
+
+ return calc_gas_res;
+}
+
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L1002
+ */
+static u8 bme680_calc_heater_res(struct bme680_data *data, u16 temp)
+{
+ struct bme680_calib *calib = &data->bme680;
+ s32 var1, var2, var3, var4, var5, heatr_res_x100;
+ u8 heatr_res;
+
+ if (temp > 400) /* Cap temperature */
+ temp = 400;
+
+ var1 = (((s32) BME680_AMB_TEMP * calib->par_gh3) / 1000) * 256;
+ var2 = (calib->par_gh1 + 784) * (((((calib->par_gh2 + 154009) *
+ temp * 5) / 100)
+ + 3276800) / 10);
+ var3 = var1 + (var2 / 2);
+ var4 = (var3 / (calib->res_heat_range + 4));
+ var5 = 131 * calib->res_heat_val + 65536;
+ heatr_res_x100 = ((var4 / var5) - 250) * 34;
+ heatr_res = (heatr_res_x100 + 50) / 100;
+
+ return heatr_res;
+}
+
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L1188
+ */
+static u8 bme680_calc_heater_dur(u16 dur)
+{
+ u8 durval, factor = 0;
+
+ if (dur >= 0xfc0) {
+ durval = 0xff; /* Max duration */
+ } else {
+ while (dur > 0x3F) {
+ dur = dur / 4;
+ factor += 1;
+ }
+ durval = dur + (factor * 64);
+ }
+
+ return durval;
+}
+
+static int bme680_set_mode(struct bme680_data *data, bool mode)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ if (mode) {
+ ret = regmap_write_bits(data->regmap, BME680_REG_CTRL_MEAS,
+ BME680_MODE_MASK, BME680_MODE_FORCED);
+ if (ret < 0)
+ dev_err(dev, "failed to set forced mode\n");
+
+ } else {
+ ret = regmap_write_bits(data->regmap, BME680_REG_CTRL_MEAS,
+ BME680_MODE_MASK, BME680_MODE_SLEEP);
+ if (ret < 0)
+ dev_err(dev, "failed to set sleep mode\n");
+
+ }
+
+ return ret;
+}
+
+static int bme680_chip_config(struct bme680_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ u8 osrs = FIELD_PREP(BME680_OSRS_HUMIDITY_MASK,
+ data->oversampling_humid + 1);
+ /*
+ * Highly recommended to set oversampling of humidity before
+ * temperature/pressure oversampling.
+ */
+ ret = regmap_update_bits(data->regmap, BME680_REG_CTRL_HUMIDITY,
+ BME680_OSRS_HUMIDITY_MASK, osrs);
+ if (ret < 0) {
+ dev_err(dev, "failed to write ctrl_hum register\n");
+ return ret;
+ }
+
+ /* IIR filter settings */
+ ret = regmap_update_bits(data->regmap, BME680_REG_CONFIG,
+ BME680_FILTER_MASK,
+ BME680_FILTER_COEFF_VAL);
+ if (ret < 0) {
+ dev_err(dev, "failed to write config register\n");
+ return ret;
+ }
+
+ osrs = FIELD_PREP(BME680_OSRS_TEMP_MASK, data->oversampling_temp + 1) |
+ FIELD_PREP(BME680_OSRS_PRESS_MASK, data->oversampling_press + 1);
+
+ ret = regmap_write_bits(data->regmap, BME680_REG_CTRL_MEAS,
+ BME680_OSRS_TEMP_MASK |
+ BME680_OSRS_PRESS_MASK,
+ osrs);
+ if (ret < 0)
+ dev_err(dev, "failed to write ctrl_meas register\n");
+
+ return ret;
+}
+
+static int bme680_gas_config(struct bme680_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ u8 heatr_res, heatr_dur;
+
+ heatr_res = bme680_calc_heater_res(data, data->heater_temp);
+
+ /* set target heater temperature */
+ ret = regmap_write(data->regmap, BME680_REG_RES_HEAT_0, heatr_res);
+ if (ret < 0) {
+ dev_err(dev, "failed to write res_heat_0 register\n");
+ return ret;
+ }
+
+ heatr_dur = bme680_calc_heater_dur(data->heater_dur);
+
+ /* set target heating duration */
+ ret = regmap_write(data->regmap, BME680_REG_GAS_WAIT_0, heatr_dur);
+ if (ret < 0) {
+ dev_err(dev, "failted to write gas_wait_0 register\n");
+ return ret;
+ }
+
+ /* Selecting the runGas and NB conversion settings for the sensor */
+ ret = regmap_update_bits(data->regmap, BME680_REG_CTRL_GAS_1,
+ BME680_RUN_GAS_MASK | BME680_NB_CONV_MASK,
+ BME680_RUN_GAS_EN_BIT | BME680_NB_CONV_0_VAL);
+ if (ret < 0)
+ dev_err(dev, "failed to write ctrl_gas_1 register\n");
+
+ return ret;
+}
+
+static int bme680_read_temp(struct bme680_data *data,
+ int *val, int *val2)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ __be32 tmp = 0;
+ s32 adc_temp;
+ s16 comp_temp;
+
+ /* set forced mode to trigger measurement */
+ ret = bme680_set_mode(data, true);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
+ (u8 *) &tmp, 3);
+ if (ret < 0) {
+ dev_err(dev, "failed to read temperature\n");
+ return ret;
+ }
+
+ adc_temp = be32_to_cpu(tmp) >> 12;
+ if (adc_temp == BME680_MEAS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(dev, "reading temperature skipped\n");
+ return -EINVAL;
+ }
+ comp_temp = bme680_compensate_temp(data, adc_temp);
+ /*
+ * val might be NULL if we're called by the read_press/read_humid
+ * routine which is callled to get t_fine value used in
+ * compensate_press/compensate_humid to get compensated
+ * pressure/humidity readings.
+ */
+ if (val && val2) {
+ *val = comp_temp;
+ *val2 = 100;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return ret;
+}
+
+static int bme680_read_press(struct bme680_data *data,
+ int *val, int *val2)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ __be32 tmp = 0;
+ s32 adc_press;
+
+ /* Read and compensate temperature to get a reading of t_fine */
+ ret = bme680_read_temp(data, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BME680_REG_PRESS_MSB,
+ (u8 *) &tmp, 3);
+ if (ret < 0) {
+ dev_err(dev, "failed to read pressure\n");
+ return ret;
+ }
+
+ adc_press = be32_to_cpu(tmp) >> 12;
+ if (adc_press == BME680_MEAS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(dev, "reading pressure skipped\n");
+ return -EINVAL;
+ }
+
+ *val = bme680_compensate_press(data, adc_press);
+ *val2 = 100;
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bme680_read_humid(struct bme680_data *data,
+ int *val, int *val2)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ __be16 tmp = 0;
+ s32 adc_humidity;
+ u32 comp_humidity;
+
+ /* Read and compensate temperature to get a reading of t_fine */
+ ret = bme680_read_temp(data, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BM6880_REG_HUMIDITY_MSB,
+ (u8 *) &tmp, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read humidity\n");
+ return ret;
+ }
+
+ adc_humidity = be16_to_cpu(tmp);
+ if (adc_humidity == BME680_MEAS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(dev, "reading humidity skipped\n");
+ return -EINVAL;
+ }
+ comp_humidity = bme680_compensate_humid(data, adc_humidity);
+
+ *val = comp_humidity;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bme680_read_gas(struct bme680_data *data,
+ int *val)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ __be16 tmp = 0;
+ unsigned int check;
+ u16 adc_gas_res;
+ u8 gas_range;
+
+ /* Set heater settings */
+ ret = bme680_gas_config(data);
+ if (ret < 0) {
+ dev_err(dev, "failed to set gas config\n");
+ return ret;
+ }
+
+ /* set forced mode to trigger measurement */
+ ret = bme680_set_mode(data, true);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
+ if (check & BME680_GAS_MEAS_BIT) {
+ dev_err(dev, "gas measurement incomplete\n");
+ return -EBUSY;
+ }
+
+ ret = regmap_read(data->regmap, BME680_REG_GAS_R_LSB, &check);
+ if (ret < 0) {
+ dev_err(dev, "failed to read gas_r_lsb register\n");
+ return ret;
+ }
+
+ /*
+ * occurs if either the gas heating duration was insuffient
+ * to reach the target heater temperature or the target
+ * heater temperature was too high for the heater sink to
+ * reach.
+ */
+ if ((check & BME680_GAS_STAB_BIT) == 0) {
+ dev_err(dev, "heater failed to reach the target temperature\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_bulk_read(data->regmap, BME680_REG_GAS_MSB,
+ (u8 *) &tmp, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read gas resistance\n");
+ return ret;
+ }
+
+ gas_range = check & BME680_GAS_RANGE_MASK;
+ adc_gas_res = be16_to_cpu(tmp) >> BME680_ADC_GAS_RES_SHIFT;
+
+ *val = bme680_compensate_gas(data, adc_gas_res, gas_range);
+ return IIO_VAL_INT;
+}
+
+static int bme680_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bme680_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_TEMP:
+ return bme680_read_temp(data, val, val2);
+ case IIO_PRESSURE:
+ return bme680_read_press(data, val, val2);
+ case IIO_HUMIDITYRELATIVE:
+ return bme680_read_humid(data, val, val2);
+ case IIO_RESISTANCE:
+ return bme680_read_gas(data, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = 1 << data->oversampling_temp;
+ return IIO_VAL_INT;
+ case IIO_PRESSURE:
+ *val = 1 << data->oversampling_press;
+ return IIO_VAL_INT;
+ case IIO_HUMIDITYRELATIVE:
+ *val = 1 << data->oversampling_humid;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bme680_write_oversampling_ratio_temp(struct bme680_data *data,
+ int val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bme680_oversampling_avail); i++) {
+ if (bme680_oversampling_avail[i] == val) {
+ data->oversampling_temp = ilog2(val);
+
+ return bme680_chip_config(data);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bme680_write_oversampling_ratio_press(struct bme680_data *data,
+ int val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bme680_oversampling_avail); i++) {
+ if (bme680_oversampling_avail[i] == val) {
+ data->oversampling_press = ilog2(val);
+
+ return bme680_chip_config(data);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bme680_write_oversampling_ratio_humid(struct bme680_data *data,
+ int val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bme680_oversampling_avail); i++) {
+ if (bme680_oversampling_avail[i] == val) {
+ data->oversampling_humid = ilog2(val);
+
+ return bme680_chip_config(data);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bme680_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bme680_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_TEMP:
+ return bme680_write_oversampling_ratio_temp(data, val);
+ case IIO_PRESSURE:
+ return bme680_write_oversampling_ratio_press(data, val);
+ case IIO_HUMIDITYRELATIVE:
+ return bme680_write_oversampling_ratio_humid(data, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const char bme680_oversampling_ratio_show[] = "1 2 4 8 16";
+
+static IIO_CONST_ATTR(oversampling_ratio_available,
+ bme680_oversampling_ratio_show);
+
+static struct attribute *bme680_attributes[] = {
+ &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bme680_attribute_group = {
+ .attrs = bme680_attributes,
+};
+
+static const struct iio_info bme680_info = {
+ .read_raw = &bme680_read_raw,
+ .write_raw = &bme680_write_raw,
+ .attrs = &bme680_attribute_group,
+};
+
+static const char *bme680_match_acpi_device(struct device *dev)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return NULL;
+
+ return dev_name(dev);
+}
+
+int bme680_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name)
+{
+ struct iio_dev *indio_dev;
+ struct bme680_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ if (!name && ACPI_HANDLE(dev))
+ name = bme680_match_acpi_device(dev);
+
+ data = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+ data->regmap = regmap;
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
+ indio_dev->channels = bme680_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bme680_channels);
+ indio_dev->info = &bme680_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* default values for the sensor */
+ data->oversampling_humid = ilog2(2); /* 2X oversampling rate */
+ data->oversampling_press = ilog2(4); /* 4X oversampling rate */
+ data->oversampling_temp = ilog2(8); /* 8X oversampling rate */
+ data->heater_temp = 320; /* degree Celsius */
+ data->heater_dur = 150; /* milliseconds */
+
+ ret = bme680_chip_config(data);
+ if (ret < 0) {
+ dev_err(dev, "failed to set chip_config data\n");
+ return ret;
+ }
+
+ ret = bme680_gas_config(data);
+ if (ret < 0) {
+ dev_err(dev, "failed to set gas config data\n");
+ return ret;
+ }
+
+ ret = bme680_read_calib(data, &data->bme680);
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to read calibration coefficients at probe\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_GPL(bme680_core_probe);
+
+MODULE_AUTHOR("Himanshu Jha <himanshujha199640@gmail.com>");
+MODULE_DESCRIPTION("Bosch BME680 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
new file mode 100644
index 000000000000..06d4be539d2e
--- /dev/null
+++ b/drivers/iio/chemical/bme680_i2c.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BME680 - I2C Driver
+ *
+ * Copyright (C) 2018 Himanshu Jha <himanshujha199640@gmail.com>
+ *
+ * 7-Bit I2C slave address is:
+ * - 0x76 if SDO is pulled to GND
+ * - 0x77 if SDO is pulled to VDDIO
+ *
+ * Note: SDO pin cannot be left floating otherwise I2C address
+ * will be undefined.
+ */
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "bme680.h"
+
+static int bme680_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const char *name = NULL;
+ unsigned int val;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(client, &bme680_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C,
+ BME680_CMD_SOFTRESET);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to reset chip\n");
+ return ret;
+ }
+
+ ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading I2C chip ID\n");
+ return ret;
+ }
+
+ if (val != BME680_CHIP_ID_VAL) {
+ dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n",
+ val, BME680_CHIP_ID_VAL);
+ return -ENODEV;
+ }
+
+ if (id)
+ name = id->name;
+
+ return bme680_core_probe(&client->dev, regmap, name);
+}
+
+static const struct i2c_device_id bme680_i2c_id[] = {
+ {"bme680", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, bme680_i2c_id);
+
+static const struct acpi_device_id bme680_acpi_match[] = {
+ {"BME0680", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, bme680_acpi_match);
+
+static struct i2c_driver bme680_i2c_driver = {
+ .driver = {
+ .name = "bme680_i2c",
+ .acpi_match_table = ACPI_PTR(bme680_acpi_match),
+ },
+ .probe = bme680_i2c_probe,
+ .id_table = bme680_i2c_id,
+};
+module_i2c_driver(bme680_i2c_driver);
+
+MODULE_AUTHOR("Himanshu Jha <himanshujha199640@gmail.com>");
+MODULE_DESCRIPTION("BME680 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
new file mode 100644
index 000000000000..c9fb05e8d0b9
--- /dev/null
+++ b/drivers/iio/chemical/bme680_spi.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BME680 - SPI Driver
+ *
+ * Copyright (C) 2018 Himanshu Jha <himanshujha199640@gmail.com>
+ */
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "bme680.h"
+
+static int bme680_regmap_spi_write(void *context, const void *data,
+ size_t count)
+{
+ struct spi_device *spi = context;
+ u8 buf[2];
+
+ memcpy(buf, data, 2);
+ /*
+ * The SPI register address (= full register address without bit 7)
+ * and the write command (bit7 = RW = '0')
+ */
+ buf[0] &= ~0x80;
+
+ return spi_write_then_read(spi, buf, 2, NULL, 0);
+}
+
+static int bme680_regmap_spi_read(void *context, const void *reg,
+ size_t reg_size, void *val, size_t val_size)
+{
+ struct spi_device *spi = context;
+
+ return spi_write_then_read(spi, reg, reg_size, val, val_size);
+}
+
+static struct regmap_bus bme680_regmap_bus = {
+ .write = bme680_regmap_spi_write,
+ .read = bme680_regmap_spi_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static int bme680_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap *regmap;
+ unsigned int val;
+ int ret;
+
+ spi->bits_per_word = 8;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi_setup failed!\n");
+ return ret;
+ }
+
+ regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus,
+ &spi->dev, &bme680_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI,
+ BME680_CMD_SOFTRESET);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to reset chip\n");
+ return ret;
+ }
+
+ /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */
+ ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Error reading SPI chip ID\n");
+ return ret;
+ }
+
+ if (val != BME680_CHIP_ID_VAL) {
+ dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n",
+ val, BME680_CHIP_ID_VAL);
+ return -ENODEV;
+ }
+ /*
+ * select Page 1 of spi_mem_page to enable access to
+ * to registers from address 0x00 to 0x7F.
+ */
+ ret = regmap_write_bits(regmap, BME680_REG_STATUS,
+ BME680_SPI_MEM_PAGE_BIT,
+ BME680_SPI_MEM_PAGE_1_VAL);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n");
+ return ret;
+ }
+
+ return bme680_core_probe(&spi->dev, regmap, id->name);
+}
+
+static const struct spi_device_id bme680_spi_id[] = {
+ {"bme680", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(spi, bme680_spi_id);
+
+static const struct acpi_device_id bme680_acpi_match[] = {
+ {"BME0680", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, bme680_acpi_match);
+
+static struct spi_driver bme680_spi_driver = {
+ .driver = {
+ .name = "bme680_spi",
+ .acpi_match_table = ACPI_PTR(bme680_acpi_match),
+ },
+ .probe = bme680_spi_probe,
+ .id_table = bme680_spi_id,
+};
+module_spi_driver(bme680_spi_driver);
+
+MODULE_AUTHOR("Himanshu Jha <himanshujha199640@gmail.com>");
+MODULE_DESCRIPTION("Bosch BME680 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 57db19182e95..26fbd1bd9413 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -380,8 +380,7 @@ void st_sensors_of_name_probe(struct device *dev,
return;
/* The name from the OF match takes precedence if present */
- strncpy(name, of_id->data, len);
- name[len - 1] = '\0';
+ strlcpy(name, of_id->data, len);
}
EXPORT_SYMBOL(st_sensors_of_name_probe);
#else
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
index b56985078d8c..92be8d0f7735 100644
--- a/drivers/iio/counter/104-quad-8.c
+++ b/drivers/iio/counter/104-quad-8.c
@@ -59,6 +59,39 @@ struct quad8_iio {
unsigned int base;
};
+#define QUAD8_REG_CHAN_OP 0x11
+#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
+/* Borrow Toggle flip-flop */
+#define QUAD8_FLAG_BT BIT(0)
+/* Carry Toggle flip-flop */
+#define QUAD8_FLAG_CT BIT(1)
+/* Error flag */
+#define QUAD8_FLAG_E BIT(4)
+/* Up/Down flag */
+#define QUAD8_FLAG_UD BIT(5)
+/* Reset and Load Signal Decoders */
+#define QUAD8_CTR_RLD 0x00
+/* Counter Mode Register */
+#define QUAD8_CTR_CMR 0x20
+/* Input / Output Control Register */
+#define QUAD8_CTR_IOR 0x40
+/* Index Control Register */
+#define QUAD8_CTR_IDR 0x60
+/* Reset Byte Pointer (three byte data pointer) */
+#define QUAD8_RLD_RESET_BP 0x01
+/* Reset Counter */
+#define QUAD8_RLD_RESET_CNTR 0x02
+/* Reset Borrow Toggle, Carry Toggle, Compare Toggle, and Sign flags */
+#define QUAD8_RLD_RESET_FLAGS 0x04
+/* Reset Error flag */
+#define QUAD8_RLD_RESET_E 0x06
+/* Preset Register to Counter */
+#define QUAD8_RLD_PRESET_CNTR 0x08
+/* Transfer Counter to Output Latch */
+#define QUAD8_RLD_CNTR_OUT 0x10
+#define QUAD8_CHAN_OP_ENABLE_COUNTERS 0x00
+#define QUAD8_CHAN_OP_RESET_COUNTERS 0x01
+
static int quad8_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long mask)
{
@@ -72,19 +105,21 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (chan->type == IIO_INDEX) {
- *val = !!(inb(priv->base + 0x16) & BIT(chan->channel));
+ *val = !!(inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
+ & BIT(chan->channel));
return IIO_VAL_INT;
}
flags = inb(base_offset + 1);
- borrow = flags & BIT(0);
- carry = !!(flags & BIT(1));
+ borrow = flags & QUAD8_FLAG_BT;
+ carry = !!(flags & QUAD8_FLAG_CT);
/* Borrow XOR Carry effectively doubles count range */
*val = (borrow ^ carry) << 24;
/* Reset Byte Pointer; transfer Counter to Output Latch */
- outb(0x11, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
+ base_offset + 1);
for (i = 0; i < 3; i++)
*val |= (unsigned int)inb(base_offset) << (8 * i);
@@ -120,17 +155,17 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
/* Reset Byte Pointer */
- outb(0x01, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Counter can only be set via Preset Register */
for (i = 0; i < 3; i++)
outb(val >> (8 * i), base_offset);
/* Transfer Preset Register to Counter */
- outb(0x08, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
/* Reset Byte Pointer */
- outb(0x01, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Set Preset Register back to original value */
val = priv->preset[chan->channel];
@@ -138,9 +173,9 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
outb(val >> (8 * i), base_offset);
/* Reset Borrow, Carry, Compare, and Sign flags */
- outb(0x02, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
/* Reset Error flag */
- outb(0x06, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
return 0;
case IIO_CHAN_INFO_ENABLE:
@@ -153,7 +188,7 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
ior_cfg = val | priv->preset_enable[chan->channel] << 1;
/* Load I/O control configuration */
- outb(0x40 | ior_cfg, base_offset + 1);
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
return 0;
case IIO_CHAN_INFO_SCALE:
@@ -217,7 +252,7 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
priv->preset[chan->channel] = preset;
/* Reset Byte Pointer */
- outb(0x01, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Set Preset Register */
for (i = 0; i < 3; i++)
@@ -258,7 +293,7 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
(unsigned int)preset_enable << 1;
/* Load I/O control configuration to Input / Output Control Register */
- outb(0x40 | ior_cfg, base_offset);
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
return len;
}
@@ -274,7 +309,7 @@ static int quad8_get_noise_error(struct iio_dev *indio_dev,
struct quad8_iio *const priv = iio_priv(indio_dev);
const int base_offset = priv->base + 2 * chan->channel + 1;
- return !!(inb(base_offset) & BIT(4));
+ return !!(inb(base_offset) & QUAD8_FLAG_E);
}
static const struct iio_enum quad8_noise_error_enum = {
@@ -294,7 +329,7 @@ static int quad8_get_count_direction(struct iio_dev *indio_dev,
struct quad8_iio *const priv = iio_priv(indio_dev);
const int base_offset = priv->base + 2 * chan->channel + 1;
- return !!(inb(base_offset) & BIT(5));
+ return !!(inb(base_offset) & QUAD8_FLAG_UD);
}
static const struct iio_enum quad8_count_direction_enum = {
@@ -324,7 +359,7 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev,
mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
/* Load mode configuration to Counter Mode Register */
- outb(0x20 | mode_cfg, base_offset);
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
return 0;
}
@@ -364,7 +399,7 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
priv->synchronous_mode[chan->channel] = synchronous_mode;
/* Load Index Control configuration to Index Control Register */
- outb(0x60 | idr_cfg, base_offset);
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
return 0;
}
@@ -410,7 +445,7 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
priv->quadrature_mode[chan->channel] = quadrature_mode;
/* Load mode configuration to Counter Mode Register */
- outb(0x20 | mode_cfg, base_offset);
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
return 0;
}
@@ -446,7 +481,7 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev,
priv->index_polarity[chan->channel] = index_polarity;
/* Load Index Control configuration to Index Control Register */
- outb(0x60 | idr_cfg, base_offset);
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
return 0;
}
@@ -556,28 +591,28 @@ static int quad8_probe(struct device *dev, unsigned int id)
priv->base = base[id];
/* Reset all counters and disable interrupt function */
- outb(0x01, base[id] + 0x11);
+ outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
/* Set initial configuration for all counters */
for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
base_offset = base[id] + 2 * i;
/* Reset Byte Pointer */
- outb(0x01, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Reset Preset Register */
for (j = 0; j < 3; j++)
outb(0x00, base_offset);
/* Reset Borrow, Carry, Compare, and Sign flags */
- outb(0x04, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
/* Reset Error flag */
- outb(0x06, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
/* Binary encoding; Normal count; non-quadrature mode */
- outb(0x20, base_offset + 1);
+ outb(QUAD8_CTR_CMR, base_offset + 1);
/* Disable A and B inputs; preset on index; FLG1 as Carry */
- outb(0x40, base_offset + 1);
+ outb(QUAD8_CTR_IOR, base_offset + 1);
/* Disable index function; negative index polarity */
- outb(0x60, base_offset + 1);
+ outb(QUAD8_CTR_IDR, base_offset + 1);
}
/* Enable all counters */
- outb(0x00, base[id] + 0x11);
+ outb(QUAD8_CHAN_OP_ENABLE_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 06e90debb9f5..80beb64e9e0c 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -167,6 +167,16 @@ config AD5755
To compile this driver as a module, choose M here: the
module will be called ad5755.
+config AD5758
+ tristate "Analog Devices AD5758 DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5758 single channel
+ Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5758.
+
config AD5761
tristate "Analog Devices AD5761/61R/21/21R DAC driver"
depends on SPI_MASTER
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 57aa230d34ab..a1b37cf99441 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o
obj-$(CONFIG_AD5592R) += ad5592r.o
obj-$(CONFIG_AD5593R) += ad5593r.o
obj-$(CONFIG_AD5755) += ad5755.o
+obj-$(CONFIG_AD5755) += ad5758.o
obj-$(CONFIG_AD5761) += ad5761.o
obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index e136f0fd38f0..2ddbfc3fdbae 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -221,6 +221,7 @@ static struct iio_chan_spec name[] = { \
AD5868_CHANNEL(7, 7, bits, _shift), \
}
+DECLARE_AD5693_CHANNELS(ad5311r_channels, 10, 6);
DECLARE_AD5676_CHANNELS(ad5672_channels, 12, 4);
DECLARE_AD5676_CHANNELS(ad5676_channels, 16, 0);
DECLARE_AD5686_CHANNELS(ad5684_channels, 12, 4);
@@ -231,6 +232,12 @@ DECLARE_AD5693_CHANNELS(ad5692r_channels, 14, 2);
DECLARE_AD5693_CHANNELS(ad5691r_channels, 12, 4);
static const struct ad5686_chip_info ad5686_chip_info_tbl[] = {
+ [ID_AD5311R] = {
+ .channels = ad5311r_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 1,
+ .regmap_type = AD5693_REGMAP,
+ },
[ID_AD5671R] = {
.channels = ad5672_channels,
.int_vref_mv = 2500,
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
index d05cda9f1edd..57b3c61bfb91 100644
--- a/drivers/iio/dac/ad5686.h
+++ b/drivers/iio/dac/ad5686.h
@@ -45,6 +45,7 @@
* ad5686_supported_device_ids:
*/
enum ad5686_supported_device_ids {
+ ID_AD5311R,
ID_AD5671R,
ID_AD5672R,
ID_AD5675R,
diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c
index d18735d7d938..7350d9806a11 100644
--- a/drivers/iio/dac/ad5696-i2c.c
+++ b/drivers/iio/dac/ad5696-i2c.c
@@ -71,6 +71,7 @@ static int ad5686_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id ad5686_i2c_id[] = {
+ {"ad5311r", ID_AD5311R},
{"ad5671r", ID_AD5671R},
{"ad5675r", ID_AD5675R},
{"ad5691r", ID_AD5691R},
diff --git a/drivers/iio/dac/ad5758.c b/drivers/iio/dac/ad5758.c
new file mode 100644
index 000000000000..bd36333257af
--- /dev/null
+++ b/drivers/iio/dac/ad5758.c
@@ -0,0 +1,897 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * AD5758 Digital to analog converters driver
+ *
+ * Copyright 2018 Analog Devices Inc.
+ *
+ * TODO: Currently CRC is not supported in this driver
+ */
+#include <linux/bsearch.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* AD5758 registers definition */
+#define AD5758_NOP 0x00
+#define AD5758_DAC_INPUT 0x01
+#define AD5758_DAC_OUTPUT 0x02
+#define AD5758_CLEAR_CODE 0x03
+#define AD5758_USER_GAIN 0x04
+#define AD5758_USER_OFFSET 0x05
+#define AD5758_DAC_CONFIG 0x06
+#define AD5758_SW_LDAC 0x07
+#define AD5758_KEY 0x08
+#define AD5758_GP_CONFIG1 0x09
+#define AD5758_GP_CONFIG2 0x0A
+#define AD5758_DCDC_CONFIG1 0x0B
+#define AD5758_DCDC_CONFIG2 0x0C
+#define AD5758_WDT_CONFIG 0x0F
+#define AD5758_DIGITAL_DIAG_CONFIG 0x10
+#define AD5758_ADC_CONFIG 0x11
+#define AD5758_FAULT_PIN_CONFIG 0x12
+#define AD5758_TWO_STAGE_READBACK_SELECT 0x13
+#define AD5758_DIGITAL_DIAG_RESULTS 0x14
+#define AD5758_ANALOG_DIAG_RESULTS 0x15
+#define AD5758_STATUS 0x16
+#define AD5758_CHIP_ID 0x17
+#define AD5758_FREQ_MONITOR 0x18
+#define AD5758_DEVICE_ID_0 0x19
+#define AD5758_DEVICE_ID_1 0x1A
+#define AD5758_DEVICE_ID_2 0x1B
+#define AD5758_DEVICE_ID_3 0x1C
+
+/* AD5758_DAC_CONFIG */
+#define AD5758_DAC_CONFIG_RANGE_MSK GENMASK(3, 0)
+#define AD5758_DAC_CONFIG_RANGE_MODE(x) (((x) & 0xF) << 0)
+#define AD5758_DAC_CONFIG_INT_EN_MSK BIT(5)
+#define AD5758_DAC_CONFIG_INT_EN_MODE(x) (((x) & 0x1) << 5)
+#define AD5758_DAC_CONFIG_OUT_EN_MSK BIT(6)
+#define AD5758_DAC_CONFIG_OUT_EN_MODE(x) (((x) & 0x1) << 6)
+#define AD5758_DAC_CONFIG_SR_EN_MSK BIT(8)
+#define AD5758_DAC_CONFIG_SR_EN_MODE(x) (((x) & 0x1) << 8)
+#define AD5758_DAC_CONFIG_SR_CLOCK_MSK GENMASK(12, 9)
+#define AD5758_DAC_CONFIG_SR_CLOCK_MODE(x) (((x) & 0xF) << 9)
+#define AD5758_DAC_CONFIG_SR_STEP_MSK GENMASK(15, 13)
+#define AD5758_DAC_CONFIG_SR_STEP_MODE(x) (((x) & 0x7) << 13)
+
+/* AD5758_KEY */
+#define AD5758_KEY_CODE_RESET_1 0x15FA
+#define AD5758_KEY_CODE_RESET_2 0xAF51
+#define AD5758_KEY_CODE_SINGLE_ADC_CONV 0x1ADC
+#define AD5758_KEY_CODE_RESET_WDT 0x0D06
+#define AD5758_KEY_CODE_CALIB_MEM_REFRESH 0xFCBA
+
+/* AD5758_DCDC_CONFIG1 */
+#define AD5758_DCDC_CONFIG1_DCDC_VPROG_MSK GENMASK(4, 0)
+#define AD5758_DCDC_CONFIG1_DCDC_VPROG_MODE(x) (((x) & 0x1F) << 0)
+#define AD5758_DCDC_CONFIG1_DCDC_MODE_MSK GENMASK(6, 5)
+#define AD5758_DCDC_CONFIG1_DCDC_MODE_MODE(x) (((x) & 0x3) << 5)
+#define AD5758_DCDC_CONFIG1_PROT_SW_EN_MSK BIT(7)
+#define AD5758_DCDC_CONFIG1_PROT_SW_EN_MODE(x) (((x) & 0x1) << 7)
+
+/* AD5758_DCDC_CONFIG2 */
+#define AD5758_DCDC_CONFIG2_ILIMIT_MSK GENMASK(3, 1)
+#define AD5758_DCDC_CONFIG2_ILIMIT_MODE(x) (((x) & 0x7) << 1)
+#define AD5758_DCDC_CONFIG2_INTR_SAT_3WI_MSK BIT(11)
+#define AD5758_DCDC_CONFIG2_BUSY_3WI_MSK BIT(12)
+
+/* AD5758_DIGITAL_DIAG_RESULTS */
+#define AD5758_CAL_MEM_UNREFRESHED_MSK BIT(15)
+
+#define AD5758_WR_FLAG_MSK(x) (0x80 | ((x) & 0x1F))
+
+#define AD5758_FULL_SCALE_MICRO 65535000000ULL
+
+/**
+ * struct ad5758_state - driver instance specific data
+ * @spi: spi_device
+ * @lock: mutex lock
+ * @out_range: struct which stores the output range
+ * @dc_dc_mode: variable which stores the mode of operation
+ * @dc_dc_ilim: variable which stores the dc-to-dc converter current limit
+ * @slew_time: variable which stores the target slew time
+ * @pwr_down: variable which contains whether a channel is powered down or not
+ * @data: spi transfer buffers
+ */
+
+struct ad5758_range {
+ int reg;
+ int min;
+ int max;
+};
+
+struct ad5758_state {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct ad5758_range out_range;
+ unsigned int dc_dc_mode;
+ unsigned int dc_dc_ilim;
+ unsigned int slew_time;
+ bool pwr_down;
+ __be32 d32[3];
+};
+
+/**
+ * Output ranges corresponding to bits [3:0] from DAC_CONFIG register
+ * 0000: 0 V to 5 V voltage range
+ * 0001: 0 V to 10 V voltage range
+ * 0010: ±5 V voltage range
+ * 0011: ±10 V voltage range
+ * 1000: 0 mA to 20 mA current range
+ * 1001: 0 mA to 24 mA current range
+ * 1010: 4 mA to 20 mA current range
+ * 1011: ±20 mA current range
+ * 1100: ±24 mA current range
+ * 1101: -1 mA to +22 mA current range
+ */
+enum ad5758_output_range {
+ AD5758_RANGE_0V_5V,
+ AD5758_RANGE_0V_10V,
+ AD5758_RANGE_PLUSMINUS_5V,
+ AD5758_RANGE_PLUSMINUS_10V,
+ AD5758_RANGE_0mA_20mA = 8,
+ AD5758_RANGE_0mA_24mA,
+ AD5758_RANGE_4mA_24mA,
+ AD5758_RANGE_PLUSMINUS_20mA,
+ AD5758_RANGE_PLUSMINUS_24mA,
+ AD5758_RANGE_MINUS_1mA_PLUS_22mA,
+};
+
+enum ad5758_dc_dc_mode {
+ AD5758_DCDC_MODE_POWER_OFF,
+ AD5758_DCDC_MODE_DPC_CURRENT,
+ AD5758_DCDC_MODE_DPC_VOLTAGE,
+ AD5758_DCDC_MODE_PPC_CURRENT,
+};
+
+static const struct ad5758_range ad5758_voltage_range[] = {
+ { AD5758_RANGE_0V_5V, 0, 5000000 },
+ { AD5758_RANGE_0V_10V, 0, 10000000 },
+ { AD5758_RANGE_PLUSMINUS_5V, -5000000, 5000000 },
+ { AD5758_RANGE_PLUSMINUS_10V, -10000000, 10000000 }
+};
+
+static const struct ad5758_range ad5758_current_range[] = {
+ { AD5758_RANGE_0mA_20mA, 0, 20000},
+ { AD5758_RANGE_0mA_24mA, 0, 24000 },
+ { AD5758_RANGE_4mA_24mA, 4, 24000 },
+ { AD5758_RANGE_PLUSMINUS_20mA, -20000, 20000 },
+ { AD5758_RANGE_PLUSMINUS_24mA, -24000, 24000 },
+ { AD5758_RANGE_MINUS_1mA_PLUS_22mA, -1000, 22000 },
+};
+
+static const int ad5758_sr_clk[16] = {
+ 240000, 200000, 150000, 128000, 64000, 32000, 16000, 8000, 4000, 2000,
+ 1000, 512, 256, 128, 64, 16
+};
+
+static const int ad5758_sr_step[8] = {
+ 4, 12, 64, 120, 256, 500, 1820, 2048
+};
+
+static const int ad5758_dc_dc_ilim[6] = {
+ 150000, 200000, 250000, 300000, 350000, 400000
+};
+
+static int ad5758_spi_reg_read(struct ad5758_state *st, unsigned int addr)
+{
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->d32[0],
+ .len = 4,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->d32[1],
+ .rx_buf = &st->d32[2],
+ .len = 4,
+ },
+ };
+ int ret;
+
+ st->d32[0] = cpu_to_be32(
+ (AD5758_WR_FLAG_MSK(AD5758_TWO_STAGE_READBACK_SELECT) << 24) |
+ (addr << 8));
+ st->d32[1] = cpu_to_be32(AD5758_WR_FLAG_MSK(AD5758_NOP) << 24);
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+ if (ret < 0)
+ return ret;
+
+ return (be32_to_cpu(st->d32[2]) >> 8) & 0xFFFF;
+}
+
+static int ad5758_spi_reg_write(struct ad5758_state *st,
+ unsigned int addr,
+ unsigned int val)
+{
+ st->d32[0] = cpu_to_be32((AD5758_WR_FLAG_MSK(addr) << 24) |
+ ((val & 0xFFFF) << 8));
+
+ return spi_write(st->spi, &st->d32[0], sizeof(st->d32[0]));
+}
+
+static int ad5758_spi_write_mask(struct ad5758_state *st,
+ unsigned int addr,
+ unsigned long int mask,
+ unsigned int val)
+{
+ int regval;
+
+ regval = ad5758_spi_reg_read(st, addr);
+ if (regval < 0)
+ return regval;
+
+ regval &= ~mask;
+ regval |= val;
+
+ return ad5758_spi_reg_write(st, addr, regval);
+}
+
+static int cmpfunc(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static int ad5758_find_closest_match(const int *array,
+ unsigned int size, int val)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (val <= array[i])
+ return i;
+ }
+
+ return size - 1;
+}
+
+static int ad5758_wait_for_task_complete(struct ad5758_state *st,
+ unsigned int reg,
+ unsigned int mask)
+{
+ unsigned int timeout;
+ int ret;
+
+ timeout = 10;
+ do {
+ ret = ad5758_spi_reg_read(st, reg);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & mask))
+ return 0;
+
+ usleep_range(100, 1000);
+ } while (--timeout);
+
+ dev_err(&st->spi->dev,
+ "Error reading bit 0x%x in 0x%x register\n", mask, reg);
+
+ return -EIO;
+}
+
+static int ad5758_calib_mem_refresh(struct ad5758_state *st)
+{
+ int ret;
+
+ ret = ad5758_spi_reg_write(st, AD5758_KEY,
+ AD5758_KEY_CODE_CALIB_MEM_REFRESH);
+ if (ret < 0) {
+ dev_err(&st->spi->dev,
+ "Failed to initiate a calibration memory refresh\n");
+ return ret;
+ }
+
+ /* Wait to allow time for the internal calibrations to complete */
+ return ad5758_wait_for_task_complete(st, AD5758_DIGITAL_DIAG_RESULTS,
+ AD5758_CAL_MEM_UNREFRESHED_MSK);
+}
+
+static int ad5758_soft_reset(struct ad5758_state *st)
+{
+ int ret;
+
+ ret = ad5758_spi_reg_write(st, AD5758_KEY, AD5758_KEY_CODE_RESET_1);
+ if (ret < 0)
+ return ret;
+
+ ret = ad5758_spi_reg_write(st, AD5758_KEY, AD5758_KEY_CODE_RESET_2);
+
+ /* Perform a software reset and wait at least 100us */
+ usleep_range(100, 1000);
+
+ return ret;
+}
+
+static int ad5758_set_dc_dc_conv_mode(struct ad5758_state *st,
+ enum ad5758_dc_dc_mode mode)
+{
+ int ret;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DCDC_CONFIG1,
+ AD5758_DCDC_CONFIG1_DCDC_MODE_MSK,
+ AD5758_DCDC_CONFIG1_DCDC_MODE_MODE(mode));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Poll the BUSY_3WI bit in the DCDC_CONFIG2 register until it is 0.
+ * This allows the 3-wire interface communication to complete.
+ */
+ ret = ad5758_wait_for_task_complete(st, AD5758_DCDC_CONFIG2,
+ AD5758_DCDC_CONFIG2_BUSY_3WI_MSK);
+ if (ret < 0)
+ return ret;
+
+ st->dc_dc_mode = mode;
+
+ return ret;
+}
+
+static int ad5758_set_dc_dc_ilim(struct ad5758_state *st, unsigned int ilim)
+{
+ int ret;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DCDC_CONFIG2,
+ AD5758_DCDC_CONFIG2_ILIMIT_MSK,
+ AD5758_DCDC_CONFIG2_ILIMIT_MODE(ilim));
+ if (ret < 0)
+ return ret;
+ /*
+ * Poll the BUSY_3WI bit in the DCDC_CONFIG2 register until it is 0.
+ * This allows the 3-wire interface communication to complete.
+ */
+ return ad5758_wait_for_task_complete(st, AD5758_DCDC_CONFIG2,
+ AD5758_DCDC_CONFIG2_BUSY_3WI_MSK);
+}
+
+static int ad5758_slew_rate_set(struct ad5758_state *st,
+ unsigned int sr_clk_idx,
+ unsigned int sr_step_idx)
+{
+ unsigned int mode;
+ unsigned long int mask;
+ int ret;
+
+ mask = AD5758_DAC_CONFIG_SR_EN_MSK |
+ AD5758_DAC_CONFIG_SR_CLOCK_MSK |
+ AD5758_DAC_CONFIG_SR_STEP_MSK;
+ mode = AD5758_DAC_CONFIG_SR_EN_MODE(1) |
+ AD5758_DAC_CONFIG_SR_STEP_MODE(sr_step_idx) |
+ AD5758_DAC_CONFIG_SR_CLOCK_MODE(sr_clk_idx);
+
+ ret = ad5758_spi_write_mask(st, AD5758_DAC_CONFIG, mask, mode);
+ if (ret < 0)
+ return ret;
+
+ /* Wait to allow time for the internal calibrations to complete */
+ return ad5758_wait_for_task_complete(st, AD5758_DIGITAL_DIAG_RESULTS,
+ AD5758_CAL_MEM_UNREFRESHED_MSK);
+}
+
+static int ad5758_slew_rate_config(struct ad5758_state *st)
+{
+ unsigned int sr_clk_idx, sr_step_idx;
+ int i, res;
+ s64 diff_new, diff_old;
+ u64 sr_step, calc_slew_time;
+
+ sr_clk_idx = 0;
+ sr_step_idx = 0;
+ diff_old = S64_MAX;
+ /*
+ * The slew time can be determined by using the formula:
+ * Slew Time = (Full Scale Out / (Step Size x Update Clk Freq))
+ * where Slew time is expressed in microseconds
+ * Given the desired slew time, the following algorithm determines the
+ * best match for the step size and the update clock frequency.
+ */
+ for (i = 0; i < ARRAY_SIZE(ad5758_sr_clk); i++) {
+ /*
+ * Go through each valid update clock freq and determine a raw
+ * value for the step size by using the formula:
+ * Step Size = Full Scale Out / (Update Clk Freq * Slew Time)
+ */
+ sr_step = AD5758_FULL_SCALE_MICRO;
+ do_div(sr_step, ad5758_sr_clk[i]);
+ do_div(sr_step, st->slew_time);
+ /*
+ * After a raw value for step size was determined, find the
+ * closest valid match
+ */
+ res = ad5758_find_closest_match(ad5758_sr_step,
+ ARRAY_SIZE(ad5758_sr_step),
+ sr_step);
+ /* Calculate the slew time */
+ calc_slew_time = AD5758_FULL_SCALE_MICRO;
+ do_div(calc_slew_time, ad5758_sr_step[res]);
+ do_div(calc_slew_time, ad5758_sr_clk[i]);
+ /*
+ * Determine with how many microseconds the calculated slew time
+ * is different from the desired slew time and store the diff
+ * for the next iteration
+ */
+ diff_new = abs(st->slew_time - calc_slew_time);
+ if (diff_new < diff_old) {
+ diff_old = diff_new;
+ sr_clk_idx = i;
+ sr_step_idx = res;
+ }
+ }
+
+ return ad5758_slew_rate_set(st, sr_clk_idx, sr_step_idx);
+}
+
+static int ad5758_set_out_range(struct ad5758_state *st, int range)
+{
+ int ret;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DAC_CONFIG,
+ AD5758_DAC_CONFIG_RANGE_MSK,
+ AD5758_DAC_CONFIG_RANGE_MODE(range));
+ if (ret < 0)
+ return ret;
+
+ /* Wait to allow time for the internal calibrations to complete */
+ return ad5758_wait_for_task_complete(st, AD5758_DIGITAL_DIAG_RESULTS,
+ AD5758_CAL_MEM_UNREFRESHED_MSK);
+}
+
+static int ad5758_fault_prot_switch_en(struct ad5758_state *st, bool enable)
+{
+ int ret;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DCDC_CONFIG1,
+ AD5758_DCDC_CONFIG1_PROT_SW_EN_MSK,
+ AD5758_DCDC_CONFIG1_PROT_SW_EN_MODE(enable));
+ if (ret < 0)
+ return ret;
+ /*
+ * Poll the BUSY_3WI bit in the DCDC_CONFIG2 register until it is 0.
+ * This allows the 3-wire interface communication to complete.
+ */
+ return ad5758_wait_for_task_complete(st, AD5758_DCDC_CONFIG2,
+ AD5758_DCDC_CONFIG2_BUSY_3WI_MSK);
+}
+
+static int ad5758_internal_buffers_en(struct ad5758_state *st, bool enable)
+{
+ int ret;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DAC_CONFIG,
+ AD5758_DAC_CONFIG_INT_EN_MSK,
+ AD5758_DAC_CONFIG_INT_EN_MODE(enable));
+ if (ret < 0)
+ return ret;
+
+ /* Wait to allow time for the internal calibrations to complete */
+ return ad5758_wait_for_task_complete(st, AD5758_DIGITAL_DIAG_RESULTS,
+ AD5758_CAL_MEM_UNREFRESHED_MSK);
+}
+
+static int ad5758_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ad5758_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ if (readval) {
+ ret = ad5758_spi_reg_read(st, reg);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ *readval = ret;
+ ret = 0;
+ } else {
+ ret = ad5758_spi_reg_write(st, reg, writeval);
+ }
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad5758_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct ad5758_state *st = iio_priv(indio_dev);
+ int max, min, ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ ret = ad5758_spi_reg_read(st, AD5758_DAC_INPUT);
+ mutex_unlock(&st->lock);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ min = st->out_range.min;
+ max = st->out_range.max;
+ *val = (max - min) / 1000;
+ *val2 = 16;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ min = st->out_range.min;
+ max = st->out_range.max;
+ *val = ((min * (1 << 16)) / (max - min)) / 1000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad5758_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct ad5758_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ ret = ad5758_spi_reg_write(st, AD5758_DAC_INPUT, val);
+ mutex_unlock(&st->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t ad5758_read_powerdown(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ad5758_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->pwr_down);
+}
+
+static ssize_t ad5758_write_powerdown(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ struct ad5758_state *st = iio_priv(indio_dev);
+ bool pwr_down;
+ unsigned int dcdc_config1_mode, dc_dc_mode, dac_config_mode, val;
+ unsigned long int dcdc_config1_msk, dac_config_msk;
+ int ret;
+
+ ret = kstrtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ mutex_lock(&st->lock);
+ if (pwr_down) {
+ dc_dc_mode = AD5758_DCDC_MODE_POWER_OFF;
+ val = 0;
+ } else {
+ dc_dc_mode = st->dc_dc_mode;
+ val = 1;
+ }
+
+ dcdc_config1_mode = AD5758_DCDC_CONFIG1_DCDC_MODE_MODE(dc_dc_mode) |
+ AD5758_DCDC_CONFIG1_PROT_SW_EN_MODE(val);
+ dcdc_config1_msk = AD5758_DCDC_CONFIG1_DCDC_MODE_MSK |
+ AD5758_DCDC_CONFIG1_PROT_SW_EN_MSK;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DCDC_CONFIG1,
+ dcdc_config1_msk,
+ dcdc_config1_mode);
+ if (ret < 0)
+ goto err_unlock;
+
+ dac_config_mode = AD5758_DAC_CONFIG_OUT_EN_MODE(val) |
+ AD5758_DAC_CONFIG_INT_EN_MODE(val);
+ dac_config_msk = AD5758_DAC_CONFIG_OUT_EN_MSK |
+ AD5758_DAC_CONFIG_INT_EN_MSK;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DAC_CONFIG,
+ dac_config_msk,
+ dac_config_mode);
+ if (ret < 0)
+ goto err_unlock;
+
+ st->pwr_down = pwr_down;
+
+err_unlock:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static const struct iio_info ad5758_info = {
+ .read_raw = ad5758_read_raw,
+ .write_raw = ad5758_write_raw,
+ .debugfs_reg_access = &ad5758_reg_access,
+};
+
+static const struct iio_chan_spec_ext_info ad5758_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5758_read_powerdown,
+ .write = ad5758_write_powerdown,
+ .shared = IIO_SHARED_BY_TYPE,
+ },
+ { }
+};
+
+#define AD5758_DAC_CHAN(_chan_type) { \
+ .type = (_chan_type), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .indexed = 1, \
+ .output = 1, \
+ .ext_info = ad5758_ext_info, \
+}
+
+static const struct iio_chan_spec ad5758_voltage_ch[] = {
+ AD5758_DAC_CHAN(IIO_VOLTAGE)
+};
+
+static const struct iio_chan_spec ad5758_current_ch[] = {
+ AD5758_DAC_CHAN(IIO_CURRENT)
+};
+
+static bool ad5758_is_valid_mode(enum ad5758_dc_dc_mode mode)
+{
+ switch (mode) {
+ case AD5758_DCDC_MODE_DPC_CURRENT:
+ case AD5758_DCDC_MODE_DPC_VOLTAGE:
+ case AD5758_DCDC_MODE_PPC_CURRENT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int ad5758_crc_disable(struct ad5758_state *st)
+{
+ unsigned int mask;
+
+ mask = (AD5758_WR_FLAG_MSK(AD5758_DIGITAL_DIAG_CONFIG) << 24) | 0x5C3A;
+ st->d32[0] = cpu_to_be32(mask);
+
+ return spi_write(st->spi, &st->d32[0], 4);
+}
+
+static int ad5758_find_out_range(struct ad5758_state *st,
+ const struct ad5758_range *range,
+ unsigned int size,
+ int min, int max)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if ((min == range[i].min) && (max == range[i].max)) {
+ st->out_range.reg = range[i].reg;
+ st->out_range.min = range[i].min;
+ st->out_range.max = range[i].max;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ad5758_parse_dt(struct ad5758_state *st)
+{
+ unsigned int tmp, tmparray[2], size;
+ const struct ad5758_range *range;
+ int *index, ret;
+
+ st->dc_dc_ilim = 0;
+ ret = device_property_read_u32(&st->spi->dev,
+ "adi,dc-dc-ilim-microamp", &tmp);
+ if (ret) {
+ dev_dbg(&st->spi->dev,
+ "Missing \"dc-dc-ilim-microamp\" property\n");
+ } else {
+ index = bsearch(&tmp, ad5758_dc_dc_ilim,
+ ARRAY_SIZE(ad5758_dc_dc_ilim),
+ sizeof(int), cmpfunc);
+ if (!index)
+ dev_dbg(&st->spi->dev, "dc-dc-ilim out of range\n");
+ else
+ st->dc_dc_ilim = index - ad5758_dc_dc_ilim;
+ }
+
+ ret = device_property_read_u32(&st->spi->dev, "adi,dc-dc-mode",
+ &st->dc_dc_mode);
+ if (ret) {
+ dev_err(&st->spi->dev, "Missing \"dc-dc-mode\" property\n");
+ return ret;
+ }
+
+ if (!ad5758_is_valid_mode(st->dc_dc_mode))
+ return -EINVAL;
+
+ if (st->dc_dc_mode == AD5758_DCDC_MODE_DPC_VOLTAGE) {
+ ret = device_property_read_u32_array(&st->spi->dev,
+ "adi,range-microvolt",
+ tmparray, 2);
+ if (ret) {
+ dev_err(&st->spi->dev,
+ "Missing \"range-microvolt\" property\n");
+ return ret;
+ }
+ range = ad5758_voltage_range;
+ size = ARRAY_SIZE(ad5758_voltage_range);
+ } else {
+ ret = device_property_read_u32_array(&st->spi->dev,
+ "adi,range-microamp",
+ tmparray, 2);
+ if (ret) {
+ dev_err(&st->spi->dev,
+ "Missing \"range-microamp\" property\n");
+ return ret;
+ }
+ range = ad5758_current_range;
+ size = ARRAY_SIZE(ad5758_current_range);
+ }
+
+ ret = ad5758_find_out_range(st, range, size, tmparray[0], tmparray[1]);
+ if (ret) {
+ dev_err(&st->spi->dev, "range invalid\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(&st->spi->dev, "adi,slew-time-us", &tmp);
+ if (ret) {
+ dev_dbg(&st->spi->dev, "Missing \"slew-time-us\" property\n");
+ st->slew_time = 0;
+ } else {
+ st->slew_time = tmp;
+ }
+
+ return 0;
+}
+
+static int ad5758_init(struct ad5758_state *st)
+{
+ int regval, ret;
+
+ /* Disable CRC checks */
+ ret = ad5758_crc_disable(st);
+ if (ret < 0)
+ return ret;
+
+ /* Perform a software reset */
+ ret = ad5758_soft_reset(st);
+ if (ret < 0)
+ return ret;
+
+ /* Disable CRC checks */
+ ret = ad5758_crc_disable(st);
+ if (ret < 0)
+ return ret;
+
+ /* Perform a calibration memory refresh */
+ ret = ad5758_calib_mem_refresh(st);
+ if (ret < 0)
+ return ret;
+
+ regval = ad5758_spi_reg_read(st, AD5758_DIGITAL_DIAG_RESULTS);
+ if (regval < 0)
+ return regval;
+
+ /* Clear all the error flags */
+ ret = ad5758_spi_reg_write(st, AD5758_DIGITAL_DIAG_RESULTS, regval);
+ if (ret < 0)
+ return ret;
+
+ /* Set the dc-to-dc current limit */
+ ret = ad5758_set_dc_dc_ilim(st, st->dc_dc_ilim);
+ if (ret < 0)
+ return ret;
+
+ /* Configure the dc-to-dc controller mode */
+ ret = ad5758_set_dc_dc_conv_mode(st, st->dc_dc_mode);
+ if (ret < 0)
+ return ret;
+
+ /* Configure the output range */
+ ret = ad5758_set_out_range(st, st->out_range.reg);
+ if (ret < 0)
+ return ret;
+
+ /* Enable Slew Rate Control, set the slew rate clock and step */
+ if (st->slew_time) {
+ ret = ad5758_slew_rate_config(st);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable the VIOUT fault protection switch (FPS is closed) */
+ ret = ad5758_fault_prot_switch_en(st, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Power up the DAC and internal (INT) amplifiers */
+ ret = ad5758_internal_buffers_en(st, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Enable VIOUT */
+ return ad5758_spi_write_mask(st, AD5758_DAC_CONFIG,
+ AD5758_DAC_CONFIG_OUT_EN_MSK,
+ AD5758_DAC_CONFIG_OUT_EN_MODE(1));
+}
+
+static int ad5758_probe(struct spi_device *spi)
+{
+ struct ad5758_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+
+ mutex_init(&st->lock);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5758_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = 1;
+
+ ret = ad5758_parse_dt(st);
+ if (ret < 0)
+ return ret;
+
+ if (st->dc_dc_mode == AD5758_DCDC_MODE_DPC_VOLTAGE)
+ indio_dev->channels = ad5758_voltage_ch;
+ else
+ indio_dev->channels = ad5758_current_ch;
+
+ ret = ad5758_init(st);
+ if (ret < 0) {
+ dev_err(&spi->dev, "AD5758 init failed\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(&st->spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ad5758_id[] = {
+ { "ad5758", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5758_id);
+
+static struct spi_driver ad5758_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = ad5758_probe,
+ .id_table = ad5758_id,
+};
+
+module_spi_driver(ad5758_driver);
+
+MODULE_AUTHOR("Stefan Popa <stefan.popa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5758 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index cca278eaa138..15d498fc4758 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -1,7 +1,7 @@
/*
* LTC2632 Digital to analog convertors spi driver
*
- * Copyright 2017 Maxime Roussin-Bélanger
+ * Copyright 2017 Maxime Roussin-Bélanger
* expanded by Silvan Murer <silvan.murer@gmail.com>
*
* Licensed under the GPL-2.
@@ -87,12 +87,7 @@ static int ltc2632_read_raw(struct iio_dev *indio_dev,
int *val2,
long m)
{
- struct ltc2632_chip_info *chip_info;
-
const struct ltc2632_state *st = iio_priv(indio_dev);
- const struct spi_device_id *spi_dev_id = spi_get_device_id(st->spi_dev);
-
- chip_info = (struct ltc2632_chip_info *)spi_dev_id->driver_data;
switch (m) {
case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index dd21eebed6a8..e39d1e901353 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -97,9 +97,6 @@ static int dac5571_cmd_quad(struct dac5571_data *data, int channel, u16 val)
static int dac5571_pwrdwn_single(struct dac5571_data *data, int channel, u8 pwrdwn)
{
- unsigned int shift;
-
- shift = 12 - data->spec->resolution;
data->buf[1] = 0;
data->buf[0] = pwrdwn << DAC5571_SINGLE_PWRDWN_BITS;
@@ -111,9 +108,6 @@ static int dac5571_pwrdwn_single(struct dac5571_data *data, int channel, u8 pwrd
static int dac5571_pwrdwn_quad(struct dac5571_data *data, int channel, u8 pwrdwn)
{
- unsigned int shift;
-
- shift = 16 - data->spec->resolution;
data->buf[2] = 0;
data->buf[1] = pwrdwn << DAC5571_QUAD_PWRDWN_BITS;
data->buf[0] = (channel << DAC5571_CHANNEL_SELECT) |
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index ddb6a334ae68..f4a508107f0d 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -12,6 +12,7 @@
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -268,6 +269,9 @@ struct ad9523_state {
struct regulator *reg;
struct ad9523_platform_data *pdata;
struct iio_chan_spec ad9523_channels[AD9523_NUM_CHAN];
+ struct gpio_desc *pwrdown_gpio;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *sync_gpio;
unsigned long vcxo_freq;
unsigned long vco_freq;
@@ -275,6 +279,15 @@ struct ad9523_state {
unsigned char vco_out_map[AD9523_NUM_CHAN_ALT_CLK_SRC];
/*
+ * Lock for accessing device registers. Some operations require
+ * multiple consecutive R/W operations, during which the device
+ * shouldn't be interrupted. The buffers are also shared across
+ * all operations so need to be protected on stand alone reads and
+ * writes.
+ */
+ struct mutex lock;
+
+ /*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
@@ -500,6 +513,7 @@ static ssize_t ad9523_store(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct ad9523_state *st = iio_priv(indio_dev);
bool state;
int ret;
@@ -508,9 +522,9 @@ static ssize_t ad9523_store(struct device *dev,
return ret;
if (!state)
- return 0;
+ return len;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case AD9523_SYNC:
ret = ad9523_sync(indio_dev);
@@ -521,7 +535,7 @@ static ssize_t ad9523_store(struct device *dev,
default:
ret = -ENODEV;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -532,15 +546,16 @@ static ssize_t ad9523_show(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct ad9523_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad9523_read(indio_dev, AD9523_READBACK_0);
if (ret >= 0) {
ret = sprintf(buf, "%d\n", !!(ret & (1 <<
(u32)this_attr->address)));
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -623,9 +638,9 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
unsigned int code;
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad9523_read(indio_dev, AD9523_CHANNEL_CLOCK_DIST(chan->channel));
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
if (ret < 0)
return ret;
@@ -642,7 +657,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
AD9523_CLK_DIST_DIV_REV(ret);
*val = code / 1000000;
- *val2 = (code % 1000000) * 10;
+ *val2 = code % 1000000;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
@@ -659,7 +674,7 @@ static int ad9523_write_raw(struct iio_dev *indio_dev,
unsigned int reg;
int ret, tmp, code;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad9523_read(indio_dev, AD9523_CHANNEL_CLOCK_DIST(chan->channel));
if (ret < 0)
goto out;
@@ -705,7 +720,7 @@ static int ad9523_write_raw(struct iio_dev *indio_dev,
ad9523_io_update(indio_dev);
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -713,9 +728,10 @@ static int ad9523_reg_access(struct iio_dev *indio_dev,
unsigned int reg, unsigned int writeval,
unsigned int *readval)
{
+ struct ad9523_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (readval == NULL) {
ret = ad9523_write(indio_dev, reg | AD9523_R1B, writeval);
ad9523_io_update(indio_dev);
@@ -728,7 +744,7 @@ static int ad9523_reg_access(struct iio_dev *indio_dev,
}
out_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -967,6 +983,8 @@ static int ad9523_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
+
st->reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
@@ -974,6 +992,32 @@ static int ad9523_probe(struct spi_device *spi)
return ret;
}
+ st->pwrdown_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->pwrdown_gpio)) {
+ ret = PTR_ERR(st->pwrdown_gpio);
+ goto error_disable_reg;
+ }
+
+ st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->reset_gpio)) {
+ ret = PTR_ERR(st->reset_gpio);
+ goto error_disable_reg;
+ }
+
+ if (st->reset_gpio) {
+ udelay(1);
+ gpiod_direction_output(st->reset_gpio, 1);
+ }
+
+ st->sync_gpio = devm_gpiod_get_optional(&spi->dev, "sync",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->sync_gpio)) {
+ ret = PTR_ERR(st->sync_gpio);
+ goto error_disable_reg;
+ }
+
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
st->pdata = pdata;
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index ad6f91d06185..c771ae6803a9 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -81,9 +81,11 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
adis->tx[9] = (value >> 24) & 0xff;
adis->tx[6] = ADIS_WRITE_REG(reg + 2);
adis->tx[7] = (value >> 16) & 0xff;
+ /* fall through */
case 2:
adis->tx[4] = ADIS_WRITE_REG(reg + 1);
adis->tx[5] = (value >> 8) & 0xff;
+ /* fall through */
case 1:
adis->tx[2] = ADIS_WRITE_REG(reg);
adis->tx[3] = value & 0xff;
@@ -167,6 +169,7 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
adis->tx[2] = ADIS_READ_REG(reg + 2);
adis->tx[3] = 0;
spi_message_add_tail(&xfers[1], &msg);
+ /* fall through */
case 2:
adis->tx[4] = ADIS_READ_REG(reg);
adis->tx[5] = 0;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index f9c0624505a2..d80ef468508a 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -20,8 +20,6 @@
#include <linux/jiffies.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
#include <linux/iio/iio.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
@@ -84,7 +82,7 @@ static const struct inv_mpu6050_reg_map reg_set_6050 = {
static const struct inv_mpu6050_chip_config chip_config_6050 = {
.fsr = INV_MPU6050_FSR_2000DPS,
.lpf = INV_MPU6050_FILTER_20HZ,
- .fifo_rate = INV_MPU6050_INIT_FIFO_RATE,
+ .divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE),
.gyro_fifo_enable = false,
.accl_fifo_enable = false,
.accl_fs = INV_MPU6050_FS_02G,
@@ -106,6 +104,12 @@ static const struct inv_mpu6050_hw hw_info[] = {
.config = &chip_config_6050,
},
{
+ .whoami = INV_MPU6515_WHOAMI_VALUE,
+ .name = "MPU6515",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6050,
+ },
+ {
.whoami = INV_MPU6000_WHOAMI_VALUE,
.name = "MPU6000",
.reg = &reg_set_6050,
@@ -280,7 +284,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
if (result)
goto error_power_off;
- d = INV_MPU6050_ONE_K_HZ / INV_MPU6050_INIT_FIFO_RATE - 1;
+ d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE);
result = regmap_write(st->map, st->reg->sample_rate_div, d);
if (result)
goto error_power_off;
@@ -297,6 +301,13 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
memcpy(&st->chip_config, hw_info[st->chip_type].config,
sizeof(struct inv_mpu6050_chip_config));
+ /*
+ * Internal chip period is 1ms (1kHz).
+ * Let's use at the beginning the theorical value before measuring
+ * with interrupt timestamps.
+ */
+ st->chip_period = NSEC_PER_MSEC;
+
return inv_mpu6050_set_power_itg(st, false);
error_power_off:
@@ -630,7 +641,7 @@ static ssize_t
inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- s32 fifo_rate;
+ int fifo_rate;
u8 d;
int result;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -646,8 +657,13 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
if (result)
return result;
+ /* compute the chip sample rate divider */
+ d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(fifo_rate);
+ /* compute back the fifo rate to handle truncation cases */
+ fifo_rate = INV_MPU6050_DIVIDER_TO_FIFO_RATE(d);
+
mutex_lock(&st->lock);
- if (fifo_rate == st->chip_config.fifo_rate) {
+ if (d == st->chip_config.divider) {
result = 0;
goto fifo_rate_fail_unlock;
}
@@ -655,11 +671,10 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
if (result)
goto fifo_rate_fail_unlock;
- d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1;
result = regmap_write(st->map, st->reg->sample_rate_div, d);
if (result)
goto fifo_rate_fail_power_off;
- st->chip_config.fifo_rate = fifo_rate;
+ st->chip_config.divider = d;
result = inv_mpu6050_set_lpf(st, fifo_rate);
if (result)
@@ -687,7 +702,7 @@ inv_fifo_rate_show(struct device *dev, struct device_attribute *attr,
unsigned fifo_rate;
mutex_lock(&st->lock);
- fifo_rate = st->chip_config.fifo_rate;
+ fifo_rate = INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
mutex_unlock(&st->lock);
return scnprintf(buf, PAGE_SIZE, "%u\n", fifo_rate);
@@ -959,6 +974,8 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
}
irq_type = irqd_get_trigger_type(desc);
+ if (!irq_type)
+ irq_type = IRQF_TRIGGER_RISING;
if (irq_type == IRQF_TRIGGER_RISING)
st->irq_mask = INV_MPU6050_ACTIVE_HIGH;
else if (irq_type == IRQF_TRIGGER_FALLING)
@@ -1003,7 +1020,7 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
indio_dev->modes = INDIO_BUFFER_TRIGGERED;
result = devm_iio_triggered_buffer_setup(dev, indio_dev,
- inv_mpu6050_irq_handler,
+ iio_pollfunc_store_time,
inv_mpu6050_read_fifo,
NULL);
if (result) {
@@ -1016,8 +1033,6 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return result;
}
- INIT_KFIFO(st->timestamps);
- spin_lock_init(&st->time_stamp_lock);
result = devm_iio_device_register(dev, indio_dev);
if (result) {
dev_err(dev, "IIO register fail %d\n", result);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 495409d56207..dd758e3d403d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -174,6 +174,7 @@ static int inv_mpu_remove(struct i2c_client *client)
static const struct i2c_device_id inv_mpu_id[] = {
{"mpu6050", INV_MPU6050},
{"mpu6500", INV_MPU6500},
+ {"mpu6515", INV_MPU6515},
{"mpu9150", INV_MPU9150},
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
@@ -193,6 +194,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_MPU6500
},
{
+ .compatible = "invensense,mpu6515",
+ .data = (void *)INV_MPU6515
+ },
+ {
.compatible = "invensense,mpu9150",
.data = (void *)INV_MPU9150
},
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index c54da777945d..e69a59659dbc 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -12,8 +12,6 @@
*/
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -73,6 +71,7 @@ struct inv_mpu6050_reg_map {
enum inv_devices {
INV_MPU6050,
INV_MPU6500,
+ INV_MPU6515,
INV_MPU6000,
INV_MPU9150,
INV_MPU9250,
@@ -88,7 +87,7 @@ enum inv_devices {
* @accl_fs: accel full scale range.
* @accl_fifo_enable: enable accel data output
* @gyro_fifo_enable: enable gyro data output
- * @fifo_rate: FIFO update rate.
+ * @divider: chip sample rate divider (sample rate divider - 1)
*/
struct inv_mpu6050_chip_config {
unsigned int fsr:2;
@@ -96,7 +95,7 @@ struct inv_mpu6050_chip_config {
unsigned int accl_fs:2;
unsigned int accl_fifo_enable:1;
unsigned int gyro_fifo_enable:1;
- u16 fifo_rate;
+ u8 divider;
u8 user_ctrl;
};
@@ -116,40 +115,40 @@ struct inv_mpu6050_hw {
/*
* struct inv_mpu6050_state - Driver state variables.
- * @TIMESTAMP_FIFO_SIZE: fifo size for timestamp.
* @lock: Chip access lock.
* @trig: IIO trigger.
* @chip_config: Cached attribute information.
* @reg: Map of important registers.
* @hw: Other hardware-specific information.
* @chip_type: chip type.
- * @time_stamp_lock: spin lock to time stamp.
* @plat_data: platform data (deprecated in favor of @orientation).
* @orientation: sensor chip orientation relative to main hardware.
- * @timestamps: kfifo queue to store time stamp.
* @map regmap pointer.
* @irq interrupt number.
* @irq_mask the int_pin_cfg mask to configure interrupt type.
+ * @chip_period: chip internal period estimation (~1kHz).
+ * @it_timestamp: timestamp from previous interrupt.
+ * @data_timestamp: timestamp for next data sample.
*/
struct inv_mpu6050_state {
-#define TIMESTAMP_FIFO_SIZE 16
struct mutex lock;
struct iio_trigger *trig;
struct inv_mpu6050_chip_config chip_config;
const struct inv_mpu6050_reg_map *reg;
const struct inv_mpu6050_hw *hw;
enum inv_devices chip_type;
- spinlock_t time_stamp_lock;
struct i2c_mux_core *muxc;
struct i2c_client *mux_client;
unsigned int powerup_count;
struct inv_mpu6050_platform_data plat_data;
struct iio_mount_matrix orientation;
- DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
struct regmap *map;
int irq;
u8 irq_mask;
unsigned skip_samples;
+ s64 chip_period;
+ s64 it_timestamp;
+ s64 data_timestamp;
};
/*register and associated bit definition*/
@@ -174,6 +173,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_REG_RAW_GYRO 0x43
#define INV_MPU6050_REG_INT_STATUS 0x3A
+#define INV_MPU6050_BIT_FIFO_OVERFLOW_INT 0x10
#define INV_MPU6050_BIT_RAW_DATA_RDY_INT 0x01
#define INV_MPU6050_REG_USER_CTRL 0x6A
@@ -198,7 +198,6 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6
#define INV_MPU6050_FIFO_COUNT_BYTE 2
-#define INV_MPU6050_FIFO_THRESHOLD 500
/* mpu6500 registers */
#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
@@ -231,13 +230,24 @@ struct inv_mpu6050_state {
#define INV_MPU6050_LATCH_INT_EN 0x20
#define INV_MPU6050_BIT_BYPASS_EN 0x2
+/* Allowed timestamp period jitter in percent */
+#define INV_MPU6050_TS_PERIOD_JITTER 4
/* init parameters */
#define INV_MPU6050_INIT_FIFO_RATE 50
-#define INV_MPU6050_TIME_STAMP_TOR 5
#define INV_MPU6050_MAX_FIFO_RATE 1000
#define INV_MPU6050_MIN_FIFO_RATE 4
-#define INV_MPU6050_ONE_K_HZ 1000
+
+/* chip internal frequency: 1KHz */
+#define INV_MPU6050_INTERNAL_FREQ_HZ 1000
+/* return the frequency divider (chip sample rate divider + 1) */
+#define INV_MPU6050_FREQ_DIVIDER(st) \
+ ((st)->chip_config.divider + 1)
+/* chip sample rate divider to fifo rate */
+#define INV_MPU6050_FIFO_RATE_TO_DIVIDER(fifo_rate) \
+ ((INV_MPU6050_INTERNAL_FREQ_HZ / (fifo_rate)) - 1)
+#define INV_MPU6050_DIVIDER_TO_FIFO_RATE(divider) \
+ (INV_MPU6050_INTERNAL_FREQ_HZ / ((divider) + 1))
#define INV_MPU6050_REG_WHOAMI 117
@@ -247,6 +257,7 @@ struct inv_mpu6050_state {
#define INV_MPU9150_WHOAMI_VALUE 0x68
#define INV_MPU9250_WHOAMI_VALUE 0x71
#define INV_MPU9255_WHOAMI_VALUE 0x73
+#define INV_MPU6515_WHOAMI_VALUE 0x74
#define INV_ICM20608_WHOAMI_VALUE 0xAF
/* scan element definition */
@@ -300,7 +311,6 @@ enum inv_mpu6050_clock_sel_e {
NUM_CLK
};
-irqreturn_t inv_mpu6050_irq_handler(int irq, void *p);
irqreturn_t inv_mpu6050_read_fifo(int irq, void *p);
int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type);
int inv_reset_fifo(struct iio_dev *indio_dev);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 1795418438e4..548e042f7b5b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -19,18 +19,83 @@
#include <linux/jiffies.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/kfifo.h>
#include <linux/poll.h>
+#include <linux/math64.h>
+#include <asm/unaligned.h>
#include "inv_mpu_iio.h"
-static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+/**
+ * inv_mpu6050_update_period() - Update chip internal period estimation
+ *
+ * @st: driver state
+ * @timestamp: the interrupt timestamp
+ * @nb: number of data set in the fifo
+ *
+ * This function uses interrupt timestamps to estimate the chip period and
+ * to choose the data timestamp to come.
+ */
+static void inv_mpu6050_update_period(struct inv_mpu6050_state *st,
+ s64 timestamp, size_t nb)
{
- unsigned long flags;
+ /* Period boundaries for accepting timestamp */
+ const s64 period_min =
+ (NSEC_PER_MSEC * (100 - INV_MPU6050_TS_PERIOD_JITTER)) / 100;
+ const s64 period_max =
+ (NSEC_PER_MSEC * (100 + INV_MPU6050_TS_PERIOD_JITTER)) / 100;
+ const s32 divider = INV_MPU6050_FREQ_DIVIDER(st);
+ s64 delta, interval;
+ bool use_it_timestamp = false;
+
+ if (st->it_timestamp == 0) {
+ /* not initialized, forced to use it_timestamp */
+ use_it_timestamp = true;
+ } else if (nb == 1) {
+ /*
+ * Validate the use of it timestamp by checking if interrupt
+ * has been delayed.
+ * nb > 1 means interrupt was delayed for more than 1 sample,
+ * so it's obviously not good.
+ * Compute the chip period between 2 interrupts for validating.
+ */
+ delta = div_s64(timestamp - st->it_timestamp, divider);
+ if (delta > period_min && delta < period_max) {
+ /* update chip period and use it timestamp */
+ st->chip_period = (st->chip_period + delta) / 2;
+ use_it_timestamp = true;
+ }
+ }
+
+ if (use_it_timestamp) {
+ /*
+ * Manage case of multiple samples in the fifo (nb > 1):
+ * compute timestamp corresponding to the first sample using
+ * estimated chip period.
+ */
+ interval = (nb - 1) * st->chip_period * divider;
+ st->data_timestamp = timestamp - interval;
+ }
- /* take the spin lock sem to avoid interrupt kick in */
- spin_lock_irqsave(&st->time_stamp_lock, flags);
- kfifo_reset(&st->timestamps);
- spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+ /* save it timestamp */
+ st->it_timestamp = timestamp;
+}
+
+/**
+ * inv_mpu6050_get_timestamp() - Return the current data timestamp
+ *
+ * @st: driver state
+ * @return: current data timestamp
+ *
+ * This function returns the current data timestamp and prepares for next one.
+ */
+static s64 inv_mpu6050_get_timestamp(struct inv_mpu6050_state *st)
+{
+ s64 ts;
+
+ /* return current data timestamp and increment */
+ ts = st->data_timestamp;
+ st->data_timestamp += st->chip_period * INV_MPU6050_FREQ_DIVIDER(st);
+
+ return ts;
}
int inv_reset_fifo(struct iio_dev *indio_dev)
@@ -39,6 +104,9 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
u8 d;
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ /* reset it timestamp validation */
+ st->it_timestamp = 0;
+
/* disable interrupt */
result = regmap_write(st->map, st->reg->int_enable, 0);
if (result) {
@@ -62,9 +130,6 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
if (result)
goto reset_fifo_fail;
- /* clear timestamps fifo */
- inv_clear_kfifo(st);
-
/* enable interrupt */
if (st->chip_config.accl_fifo_enable ||
st->chip_config.gyro_fifo_enable) {
@@ -99,23 +164,6 @@ reset_fifo_fail:
}
/**
- * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
- */
-irqreturn_t inv_mpu6050_irq_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- struct inv_mpu6050_state *st = iio_priv(indio_dev);
- s64 timestamp;
-
- timestamp = iio_get_time_ns(indio_dev);
- kfifo_in_spinlocked(&st->timestamps, &timestamp, 1,
- &st->time_stamp_lock);
-
- return IRQ_WAKE_THREAD;
-}
-
-/**
* inv_mpu6050_read_fifo() - Transfer data from hardware FIFO to KFIFO.
*/
irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
@@ -129,6 +177,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
u16 fifo_count;
s64 timestamp;
int int_status;
+ size_t i, nb;
mutex_lock(&st->lock);
@@ -139,6 +188,9 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
"failed to ack interrupt\n");
goto flush_fifo;
}
+ /* handle fifo overflow by reseting fifo */
+ if (int_status & INV_MPU6050_BIT_FIFO_OVERFLOW_INT)
+ goto flush_fifo;
if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
dev_warn(regmap_get_device(st->map),
"spurious interrupt with status 0x%x\n", int_status);
@@ -163,38 +215,23 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
INV_MPU6050_FIFO_COUNT_BYTE);
if (result)
goto end_session;
- fifo_count = be16_to_cpup((__be16 *)(&data[0]));
- if (fifo_count < bytes_per_datum)
- goto end_session;
- /* fifo count can't be an odd number. If it is odd, reset the FIFO. */
- if (fifo_count & 1)
- goto flush_fifo;
- if (fifo_count > INV_MPU6050_FIFO_THRESHOLD)
- goto flush_fifo;
- /* Timestamp mismatch. */
- if (kfifo_len(&st->timestamps) >
- fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR)
- goto flush_fifo;
- do {
+ fifo_count = get_unaligned_be16(&data[0]);
+ /* compute and process all complete datum */
+ nb = fifo_count / bytes_per_datum;
+ inv_mpu6050_update_period(st, pf->timestamp, nb);
+ for (i = 0; i < nb; ++i) {
result = regmap_bulk_read(st->map, st->reg->fifo_r_w,
data, bytes_per_datum);
if (result)
goto flush_fifo;
-
- result = kfifo_out(&st->timestamps, &timestamp, 1);
- /* when there is no timestamp, put timestamp as 0 */
- if (result == 0)
- timestamp = 0;
-
/* skip first samples if needed */
- if (st->skip_samples)
+ if (st->skip_samples) {
st->skip_samples--;
- else
- iio_push_to_buffers_with_timestamp(indio_dev, data,
- timestamp);
-
- fifo_count -= bytes_per_datum;
- } while (fifo_count >= bytes_per_datum);
+ continue;
+ }
+ timestamp = inv_mpu6050_get_timestamp(st);
+ iio_push_to_buffers_with_timestamp(indio_dev, data, timestamp);
+ }
end_session:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 4994f920a836..7589f2ad1dae 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -298,8 +298,11 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
err = regmap_bulk_read(hw->regmap,
hw->settings->fifo_ops.fifo_diff.addr,
&fifo_status, sizeof(fifo_status));
- if (err < 0)
+ if (err < 0) {
+ dev_err(hw->dev, "failed to read fifo status (err=%d)\n",
+ err);
return err;
+ }
if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK))
return 0;
@@ -313,8 +316,12 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
err = st_lsm6dsx_read_block(hw, hw->buff, pattern_len);
- if (err < 0)
+ if (err < 0) {
+ dev_err(hw->dev,
+ "failed to read pattern from fifo (err=%d)\n",
+ err);
return err;
+ }
/*
* Data are written to the FIFO with a specific pattern
@@ -385,8 +392,11 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
if (unlikely(reset_ts)) {
err = st_lsm6dsx_reset_hw_ts(hw);
- if (err < 0)
+ if (err < 0) {
+ dev_err(hw->dev, "failed to reset hw ts (err=%d)\n",
+ err);
return err;
+ }
}
return read_len;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 19bdf3d2962a..a062cfddc5af 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -85,6 +85,8 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_COUNT] = "count",
[IIO_INDEX] = "index",
[IIO_GRAVITY] = "gravity",
+ [IIO_POSITIONRELATIVE] = "positionrelative",
+ [IIO_PHASE] = "phase",
};
static const char * const iio_modifier_names[] = {
@@ -108,6 +110,7 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_LIGHT_GREEN] = "green",
[IIO_MOD_LIGHT_BLUE] = "blue",
[IIO_MOD_LIGHT_UV] = "uv",
+ [IIO_MOD_LIGHT_DUV] = "duv",
[IIO_MOD_QUATERNION] = "quaternion",
[IIO_MOD_TEMP_AMBIENT] = "ambient",
[IIO_MOD_TEMP_OBJECT] = "object",
@@ -207,35 +210,27 @@ static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
*/
s64 iio_get_time_ns(const struct iio_dev *indio_dev)
{
- struct timespec tp;
+ struct timespec64 tp;
switch (iio_device_get_clock(indio_dev)) {
case CLOCK_REALTIME:
- ktime_get_real_ts(&tp);
- break;
+ return ktime_get_real_ns();
case CLOCK_MONOTONIC:
- ktime_get_ts(&tp);
- break;
+ return ktime_get_ns();
case CLOCK_MONOTONIC_RAW:
- getrawmonotonic(&tp);
- break;
+ return ktime_get_raw_ns();
case CLOCK_REALTIME_COARSE:
- tp = current_kernel_time();
- break;
+ return ktime_to_ns(ktime_get_coarse_real());
case CLOCK_MONOTONIC_COARSE:
- tp = get_monotonic_coarse();
- break;
+ ktime_get_coarse_ts64(&tp);
+ return timespec64_to_ns(&tp);
case CLOCK_BOOTTIME:
- get_monotonic_boottime(&tp);
- break;
+ return ktime_get_boot_ns();
case CLOCK_TAI:
- timekeeping_clocktai(&tp);
- break;
+ return ktime_get_tai_ns();
default:
BUG();
}
-
- return timespec_to_ns(&tp);
}
EXPORT_SYMBOL(iio_get_time_ns);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index c7ef8d1862d6..d66ea754ffff 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -1,3 +1,4 @@
+
#
# Light sensors
#
@@ -319,6 +320,17 @@ config PA12203001
This driver can also be built as a module. If so, the module
will be called pa12203001.
+config SI1133
+ tristate "SI1133 UV Index Sensor and Ambient Light Sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here if you want to build a driver for the Silicon Labs SI1133
+ UV Index Sensor and Ambient Light Sensor chip.
+
+ To compile this driver as a module, choose M here: the module will be
+ called si1133.
+
config SI1145
tristate "SI1132 and SI1141/2/3/5/6/7 combined ALS, UV index and proximity sensor"
depends on I2C
@@ -438,11 +450,12 @@ config US5182D
will be called us5182d.
config VCNL4000
- tristate "VCNL4000/4010/4020 combined ALS and proximity sensor"
+ tristate "VCNL4000/4010/4020/4200 combined ALS and proximity sensor"
depends on I2C
help
Say Y here if you want to build a driver for the Vishay VCNL4000,
- VCNL4010, VCNL4020 combined ambient light and proximity sensor.
+ VCNL4010, VCNL4020, VCNL4200 combined ambient light and proximity
+ sensor.
To compile this driver as a module, choose M here: the
module will be called vcnl4000.
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 80943af5d627..86337b114bc4 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_OPT3001) += opt3001.o
obj-$(CONFIG_PA12203001) += pa12203001.o
obj-$(CONFIG_RPR0521) += rpr0521.o
obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
+obj-$(CONFIG_SI1133) += si1133.o
obj-$(CONFIG_SI1145) += si1145.o
obj-$(CONFIG_STK3310) += stk3310.o
obj-$(CONFIG_ST_UVIS25) += st_uvis25_core.o
diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c
new file mode 100644
index 000000000000..015a21f0c2ef
--- /dev/null
+++ b/drivers/iio/light/si1133.c
@@ -0,0 +1,1071 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * si1133.c - Support for Silabs SI1133 combined ambient
+ * light and UV index sensors
+ *
+ * Copyright 2018 Maxime Roussin-Belanger <maxime.roussinbelanger@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/util_macros.h>
+
+#define SI1133_REG_PART_ID 0x00
+#define SI1133_REG_REV_ID 0x01
+#define SI1133_REG_MFR_ID 0x02
+#define SI1133_REG_INFO0 0x03
+#define SI1133_REG_INFO1 0x04
+
+#define SI1133_PART_ID 0x33
+
+#define SI1133_REG_HOSTIN0 0x0A
+#define SI1133_REG_COMMAND 0x0B
+#define SI1133_REG_IRQ_ENABLE 0x0F
+#define SI1133_REG_RESPONSE1 0x10
+#define SI1133_REG_RESPONSE0 0x11
+#define SI1133_REG_IRQ_STATUS 0x12
+#define SI1133_REG_MEAS_RATE 0x1A
+
+#define SI1133_IRQ_CHANNEL_ENABLE 0xF
+
+#define SI1133_CMD_RESET_CTR 0x00
+#define SI1133_CMD_RESET_SW 0x01
+#define SI1133_CMD_FORCE 0x11
+#define SI1133_CMD_START_AUTONOMOUS 0x13
+#define SI1133_CMD_PARAM_SET 0x80
+#define SI1133_CMD_PARAM_QUERY 0x40
+#define SI1133_CMD_PARAM_MASK 0x3F
+
+#define SI1133_CMD_ERR_MASK BIT(4)
+#define SI1133_CMD_SEQ_MASK 0xF
+#define SI1133_MAX_CMD_CTR 0xF
+
+#define SI1133_PARAM_REG_CHAN_LIST 0x01
+#define SI1133_PARAM_REG_ADCCONFIG(x) ((x) * 4) + 2
+#define SI1133_PARAM_REG_ADCSENS(x) ((x) * 4) + 3
+#define SI1133_PARAM_REG_ADCPOST(x) ((x) * 4) + 4
+
+#define SI1133_ADCMUX_MASK 0x1F
+
+#define SI1133_ADCCONFIG_DECIM_RATE(x) (x) << 5
+
+#define SI1133_ADCSENS_SCALE_MASK 0x70
+#define SI1133_ADCSENS_SCALE_SHIFT 4
+#define SI1133_ADCSENS_HSIG_MASK BIT(7)
+#define SI1133_ADCSENS_HSIG_SHIFT 7
+#define SI1133_ADCSENS_HW_GAIN_MASK 0xF
+#define SI1133_ADCSENS_NB_MEAS(x) fls(x) << SI1133_ADCSENS_SCALE_SHIFT
+
+#define SI1133_ADCPOST_24BIT_EN BIT(6)
+#define SI1133_ADCPOST_POSTSHIFT_BITQTY(x) (x & GENMASK(2, 0)) << 3
+
+#define SI1133_PARAM_ADCMUX_SMALL_IR 0x0
+#define SI1133_PARAM_ADCMUX_MED_IR 0x1
+#define SI1133_PARAM_ADCMUX_LARGE_IR 0x2
+#define SI1133_PARAM_ADCMUX_WHITE 0xB
+#define SI1133_PARAM_ADCMUX_LARGE_WHITE 0xD
+#define SI1133_PARAM_ADCMUX_UV 0x18
+#define SI1133_PARAM_ADCMUX_UV_DEEP 0x19
+
+#define SI1133_ERR_INVALID_CMD 0x0
+#define SI1133_ERR_INVALID_LOCATION_CMD 0x1
+#define SI1133_ERR_SATURATION_ADC_OR_OVERFLOW_ACCUMULATION 0x2
+#define SI1133_ERR_OUTPUT_BUFFER_OVERFLOW 0x3
+
+#define SI1133_COMPLETION_TIMEOUT_MS 500
+
+#define SI1133_CMD_MINSLEEP_US_LOW 5000
+#define SI1133_CMD_MINSLEEP_US_HIGH 7500
+#define SI1133_CMD_TIMEOUT_MS 25
+#define SI1133_CMD_LUX_TIMEOUT_MS 5000
+#define SI1133_CMD_TIMEOUT_US SI1133_CMD_TIMEOUT_MS * 1000
+
+#define SI1133_REG_HOSTOUT(x) (x) + 0x13
+
+#define SI1133_MEASUREMENT_FREQUENCY 1250
+
+#define SI1133_X_ORDER_MASK 0x0070
+#define SI1133_Y_ORDER_MASK 0x0007
+#define si1133_get_x_order(m) ((m) & SI1133_X_ORDER_MASK) >> 4
+#define si1133_get_y_order(m) ((m) & SI1133_Y_ORDER_MASK)
+
+#define SI1133_LUX_ADC_MASK 0xE
+#define SI1133_ADC_THRESHOLD 16000
+#define SI1133_INPUT_FRACTION_HIGH 7
+#define SI1133_INPUT_FRACTION_LOW 15
+#define SI1133_LUX_OUTPUT_FRACTION 12
+#define SI1133_LUX_BUFFER_SIZE 9
+
+static const int si1133_scale_available[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128};
+
+static IIO_CONST_ATTR(scale_available, "1 2 4 8 16 32 64 128");
+
+static IIO_CONST_ATTR_INT_TIME_AVAIL("0.0244 0.0488 0.0975 0.195 0.390 0.780 "
+ "1.560 3.120 6.24 12.48 25.0 50.0");
+
+/* A.K.A. HW_GAIN in datasheet */
+enum si1133_int_time {
+ _24_4_us = 0,
+ _48_8_us = 1,
+ _97_5_us = 2,
+ _195_0_us = 3,
+ _390_0_us = 4,
+ _780_0_us = 5,
+ _1_560_0_us = 6,
+ _3_120_0_us = 7,
+ _6_240_0_us = 8,
+ _12_480_0_us = 9,
+ _25_ms = 10,
+ _50_ms = 11,
+};
+
+/* Integration time in milliseconds, nanoseconds */
+static const int si1133_int_time_table[][2] = {
+ [_24_4_us] = {0, 24400},
+ [_48_8_us] = {0, 48800},
+ [_97_5_us] = {0, 97500},
+ [_195_0_us] = {0, 195000},
+ [_390_0_us] = {0, 390000},
+ [_780_0_us] = {0, 780000},
+ [_1_560_0_us] = {1, 560000},
+ [_3_120_0_us] = {3, 120000},
+ [_6_240_0_us] = {6, 240000},
+ [_12_480_0_us] = {12, 480000},
+ [_25_ms] = {25, 000000},
+ [_50_ms] = {50, 000000},
+};
+
+static const struct regmap_range si1133_reg_ranges[] = {
+ regmap_reg_range(0x00, 0x02),
+ regmap_reg_range(0x0A, 0x0B),
+ regmap_reg_range(0x0F, 0x0F),
+ regmap_reg_range(0x10, 0x12),
+ regmap_reg_range(0x13, 0x2C),
+};
+
+static const struct regmap_range si1133_reg_ro_ranges[] = {
+ regmap_reg_range(0x00, 0x02),
+ regmap_reg_range(0x10, 0x2C),
+};
+
+static const struct regmap_range si1133_precious_ranges[] = {
+ regmap_reg_range(0x12, 0x12),
+};
+
+static const struct regmap_access_table si1133_write_ranges_table = {
+ .yes_ranges = si1133_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si1133_reg_ranges),
+ .no_ranges = si1133_reg_ro_ranges,
+ .n_no_ranges = ARRAY_SIZE(si1133_reg_ro_ranges),
+};
+
+static const struct regmap_access_table si1133_read_ranges_table = {
+ .yes_ranges = si1133_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si1133_reg_ranges),
+};
+
+static const struct regmap_access_table si1133_precious_table = {
+ .yes_ranges = si1133_precious_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si1133_precious_ranges),
+};
+
+static const struct regmap_config si1133_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0x2C,
+
+ .wr_table = &si1133_write_ranges_table,
+ .rd_table = &si1133_read_ranges_table,
+
+ .precious_table = &si1133_precious_table,
+};
+
+struct si1133_data {
+ struct regmap *regmap;
+ struct i2c_client *client;
+
+ /* Lock protecting one command at a time can be processed */
+ struct mutex mutex;
+
+ int rsp_seq;
+ u8 scan_mask;
+ u8 adc_sens[6];
+ u8 adc_config[6];
+
+ struct completion completion;
+};
+
+struct si1133_coeff {
+ s16 info;
+ u16 mag;
+};
+
+struct si1133_lux_coeff {
+ struct si1133_coeff coeff_high[4];
+ struct si1133_coeff coeff_low[9];
+};
+
+static const struct si1133_lux_coeff lux_coeff = {
+ {
+ { 0, 209},
+ { 1665, 93},
+ { 2064, 65},
+ {-2671, 234}
+ },
+ {
+ { 0, 0},
+ { 1921, 29053},
+ {-1022, 36363},
+ { 2320, 20789},
+ { -367, 57909},
+ {-1774, 38240},
+ { -608, 46775},
+ {-1503, 51831},
+ {-1886, 58928}
+ }
+};
+
+static int si1133_calculate_polynomial_inner(u32 input, u8 fraction, u16 mag,
+ s8 shift)
+{
+ return ((input << fraction) / mag) << shift;
+}
+
+static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order,
+ u8 input_fraction, s8 sign,
+ const struct si1133_coeff *coeffs)
+{
+ s8 shift;
+ int x1 = 1;
+ int x2 = 1;
+ int y1 = 1;
+ int y2 = 1;
+
+ shift = ((u16)coeffs->info & 0xFF00) >> 8;
+ shift ^= 0xFF;
+ shift += 1;
+ shift = -shift;
+
+ if (x_order > 0) {
+ x1 = si1133_calculate_polynomial_inner(x, input_fraction,
+ coeffs->mag, shift);
+ if (x_order > 1)
+ x2 = x1;
+ }
+
+ if (y_order > 0) {
+ y1 = si1133_calculate_polynomial_inner(y, input_fraction,
+ coeffs->mag, shift);
+ if (y_order > 1)
+ y2 = y1;
+ }
+
+ return sign * x1 * x2 * y1 * y2;
+}
+
+/*
+ * The algorithm is from:
+ * https://siliconlabs.github.io/Gecko_SDK_Doc/efm32zg/html/si1133_8c_source.html#l00716
+ */
+static int si1133_calc_polynomial(u32 x, u32 y, u8 input_fraction, u8 num_coeff,
+ const struct si1133_coeff *coeffs)
+{
+ u8 x_order, y_order;
+ u8 counter;
+ s8 sign;
+ int output = 0;
+
+ for (counter = 0; counter < num_coeff; counter++) {
+ if (coeffs->info < 0)
+ sign = -1;
+ else
+ sign = 1;
+
+ x_order = si1133_get_x_order(coeffs->info);
+ y_order = si1133_get_y_order(coeffs->info);
+
+ if ((x_order == 0) && (y_order == 0))
+ output +=
+ sign * coeffs->mag << SI1133_LUX_OUTPUT_FRACTION;
+ else
+ output += si1133_calculate_output(x, y, x_order,
+ y_order,
+ input_fraction, sign,
+ coeffs);
+ coeffs++;
+ }
+
+ return abs(output);
+}
+
+static int si1133_cmd_reset_sw(struct si1133_data *data)
+{
+ struct device *dev = &data->client->dev;
+ unsigned int resp;
+ unsigned long timeout;
+ int err;
+
+ err = regmap_write(data->regmap, SI1133_REG_COMMAND,
+ SI1133_CMD_RESET_SW);
+ if (err)
+ return err;
+
+ timeout = jiffies + msecs_to_jiffies(SI1133_CMD_TIMEOUT_MS);
+ while (true) {
+ err = regmap_read(data->regmap, SI1133_REG_RESPONSE0, &resp);
+ if (err == -ENXIO) {
+ usleep_range(SI1133_CMD_MINSLEEP_US_LOW,
+ SI1133_CMD_MINSLEEP_US_HIGH);
+ continue;
+ }
+
+ if ((resp & SI1133_MAX_CMD_CTR) == SI1133_MAX_CMD_CTR)
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ dev_warn(dev, "Timeout on reset ctr resp: %d\n", resp);
+ return -ETIMEDOUT;
+ }
+ }
+
+ if (!err)
+ data->rsp_seq = SI1133_MAX_CMD_CTR;
+
+ return err;
+}
+
+static int si1133_parse_response_err(struct device *dev, u32 resp, u8 cmd)
+{
+ resp &= 0xF;
+
+ switch (resp) {
+ case SI1133_ERR_OUTPUT_BUFFER_OVERFLOW:
+ dev_warn(dev, "Output buffer overflow: %#02hhx\n", cmd);
+ return -EOVERFLOW;
+ case SI1133_ERR_SATURATION_ADC_OR_OVERFLOW_ACCUMULATION:
+ dev_warn(dev, "Saturation of the ADC or overflow of accumulation: %#02hhx\n",
+ cmd);
+ return -EOVERFLOW;
+ case SI1133_ERR_INVALID_LOCATION_CMD:
+ dev_warn(dev,
+ "Parameter access to an invalid location: %#02hhx\n",
+ cmd);
+ return -EINVAL;
+ case SI1133_ERR_INVALID_CMD:
+ dev_warn(dev, "Invalid command %#02hhx\n", cmd);
+ return -EINVAL;
+ default:
+ dev_warn(dev, "Unknown error %#02hhx\n", cmd);
+ return -EINVAL;
+ }
+}
+
+static int si1133_cmd_reset_counter(struct si1133_data *data)
+{
+ int err = regmap_write(data->regmap, SI1133_REG_COMMAND,
+ SI1133_CMD_RESET_CTR);
+ if (err)
+ return err;
+
+ data->rsp_seq = 0;
+
+ return 0;
+}
+
+static int si1133_command(struct si1133_data *data, u8 cmd)
+{
+ struct device *dev = &data->client->dev;
+ u32 resp;
+ int err;
+ int expected_seq;
+
+ mutex_lock(&data->mutex);
+
+ expected_seq = (data->rsp_seq + 1) & SI1133_MAX_CMD_CTR;
+
+ if (cmd == SI1133_CMD_FORCE)
+ reinit_completion(&data->completion);
+
+ err = regmap_write(data->regmap, SI1133_REG_COMMAND, cmd);
+ if (err) {
+ dev_warn(dev, "Failed to write command %#02hhx, ret=%d\n", cmd,
+ err);
+ goto out;
+ }
+
+ if (cmd == SI1133_CMD_FORCE) {
+ /* wait for irq */
+ if (!wait_for_completion_timeout(&data->completion,
+ msecs_to_jiffies(SI1133_COMPLETION_TIMEOUT_MS))) {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+ err = regmap_read(data->regmap, SI1133_REG_RESPONSE0, &resp);
+ if (err)
+ goto out;
+ } else {
+ err = regmap_read_poll_timeout(data->regmap,
+ SI1133_REG_RESPONSE0, resp,
+ (resp & SI1133_CMD_SEQ_MASK) ==
+ expected_seq ||
+ (resp & SI1133_CMD_ERR_MASK),
+ SI1133_CMD_MINSLEEP_US_LOW,
+ SI1133_CMD_TIMEOUT_MS * 1000);
+ if (err) {
+ dev_warn(dev,
+ "Failed to read command %#02hhx, ret=%d\n",
+ cmd, err);
+ goto out;
+ }
+ }
+
+ if (resp & SI1133_CMD_ERR_MASK) {
+ err = si1133_parse_response_err(dev, resp, cmd);
+ si1133_cmd_reset_counter(data);
+ } else {
+ data->rsp_seq = expected_seq;
+ }
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return err;
+}
+
+static int si1133_param_set(struct si1133_data *data, u8 param, u32 value)
+{
+ int err = regmap_write(data->regmap, SI1133_REG_HOSTIN0, value);
+
+ if (err)
+ return err;
+
+ return si1133_command(data, SI1133_CMD_PARAM_SET |
+ (param & SI1133_CMD_PARAM_MASK));
+}
+
+static int si1133_param_query(struct si1133_data *data, u8 param, u32 *result)
+{
+ int err = si1133_command(data, SI1133_CMD_PARAM_QUERY |
+ (param & SI1133_CMD_PARAM_MASK));
+ if (err)
+ return err;
+
+ return regmap_read(data->regmap, SI1133_REG_RESPONSE1, result);
+}
+
+#define SI1133_CHANNEL(_ch, _type) \
+ .type = _type, \
+ .channel = _ch, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
+
+static const struct iio_chan_spec si1133_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .channel = 0,
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_WHITE, IIO_INTENSITY)
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_LARGE_WHITE, IIO_INTENSITY)
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .extend_name = "large",
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_SMALL_IR, IIO_INTENSITY)
+ .extend_name = "small",
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_MED_IR, IIO_INTENSITY)
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_LARGE_IR, IIO_INTENSITY)
+ .extend_name = "large",
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_UV, IIO_UVINDEX)
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_UV_DEEP, IIO_UVINDEX)
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_DUV,
+ }
+};
+
+static int si1133_get_int_time_index(int milliseconds, int nanoseconds)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(si1133_int_time_table); i++) {
+ if (milliseconds == si1133_int_time_table[i][0] &&
+ nanoseconds == si1133_int_time_table[i][1])
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int si1133_set_integration_time(struct si1133_data *data, u8 adc,
+ int milliseconds, int nanoseconds)
+{
+ int index;
+
+ index = si1133_get_int_time_index(milliseconds, nanoseconds);
+ if (index < 0)
+ return index;
+
+ data->adc_sens[adc] &= 0xF0;
+ data->adc_sens[adc] |= index;
+
+ return si1133_param_set(data, SI1133_PARAM_REG_ADCSENS(0),
+ data->adc_sens[adc]);
+}
+
+static int si1133_set_chlist(struct si1133_data *data, u8 scan_mask)
+{
+ /* channel list already set, no need to reprogram */
+ if (data->scan_mask == scan_mask)
+ return 0;
+
+ data->scan_mask = scan_mask;
+
+ return si1133_param_set(data, SI1133_PARAM_REG_CHAN_LIST, scan_mask);
+}
+
+static int si1133_chan_set_adcconfig(struct si1133_data *data, u8 adc,
+ u8 adc_config)
+{
+ int err;
+
+ err = si1133_param_set(data, SI1133_PARAM_REG_ADCCONFIG(adc),
+ adc_config);
+ if (err)
+ return err;
+
+ data->adc_config[adc] = adc_config;
+
+ return 0;
+}
+
+static int si1133_update_adcconfig(struct si1133_data *data, uint8_t adc,
+ u8 mask, u8 shift, u8 value)
+{
+ u32 adc_config;
+ int err;
+
+ err = si1133_param_query(data, SI1133_PARAM_REG_ADCCONFIG(adc),
+ &adc_config);
+ if (err)
+ return err;
+
+ adc_config &= ~mask;
+ adc_config |= (value << shift);
+
+ return si1133_chan_set_adcconfig(data, adc, adc_config);
+}
+
+static int si1133_set_adcmux(struct si1133_data *data, u8 adc, u8 mux)
+{
+ if ((mux & data->adc_config[adc]) == mux)
+ return 0; /* mux already set to correct value */
+
+ return si1133_update_adcconfig(data, adc, SI1133_ADCMUX_MASK, 0, mux);
+}
+
+static int si1133_force_measurement(struct si1133_data *data)
+{
+ return si1133_command(data, SI1133_CMD_FORCE);
+}
+
+static int si1133_bulk_read(struct si1133_data *data, u8 start_reg, u8 length,
+ u8 *buffer)
+{
+ int err;
+
+ err = si1133_force_measurement(data);
+ if (err)
+ return err;
+
+ return regmap_bulk_read(data->regmap, start_reg, buffer, length);
+}
+
+static int si1133_measure(struct si1133_data *data,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ int err;
+
+ __be16 resp;
+
+ err = si1133_set_adcmux(data, 0, chan->channel);
+ if (err)
+ return err;
+
+ /* Deactivate lux measurements if they were active */
+ err = si1133_set_chlist(data, BIT(0));
+ if (err)
+ return err;
+
+ err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(resp),
+ (u8 *)&resp);
+ if (err)
+ return err;
+
+ *val = be16_to_cpu(resp);
+
+ return err;
+}
+
+static irqreturn_t si1133_threaded_irq_handler(int irq, void *private)
+{
+ struct iio_dev *iio_dev = private;
+ struct si1133_data *data = iio_priv(iio_dev);
+ u32 irq_status;
+ int err;
+
+ err = regmap_read(data->regmap, SI1133_REG_IRQ_STATUS, &irq_status);
+ if (err) {
+ dev_err_ratelimited(&iio_dev->dev, "Error reading IRQ\n");
+ goto out;
+ }
+
+ if (irq_status != data->scan_mask)
+ return IRQ_NONE;
+
+out:
+ complete(&data->completion);
+
+ return IRQ_HANDLED;
+}
+
+static int si1133_scale_to_swgain(int scale_integer, int scale_fractional)
+{
+ scale_integer = find_closest(scale_integer, si1133_scale_available,
+ ARRAY_SIZE(si1133_scale_available));
+ if (scale_integer < 0 ||
+ scale_integer > ARRAY_SIZE(si1133_scale_available) ||
+ scale_fractional != 0)
+ return -EINVAL;
+
+ return scale_integer;
+}
+
+static int si1133_chan_set_adcsens(struct si1133_data *data, u8 adc,
+ u8 adc_sens)
+{
+ int err;
+
+ err = si1133_param_set(data, SI1133_PARAM_REG_ADCSENS(adc), adc_sens);
+ if (err)
+ return err;
+
+ data->adc_sens[adc] = adc_sens;
+
+ return 0;
+}
+
+static int si1133_update_adcsens(struct si1133_data *data, u8 mask,
+ u8 shift, u8 value)
+{
+ int err;
+ u32 adc_sens;
+
+ err = si1133_param_query(data, SI1133_PARAM_REG_ADCSENS(0),
+ &adc_sens);
+ if (err)
+ return err;
+
+ adc_sens &= ~mask;
+ adc_sens |= (value << shift);
+
+ return si1133_chan_set_adcsens(data, 0, adc_sens);
+}
+
+static int si1133_get_lux(struct si1133_data *data, int *val)
+{
+ int err;
+ int lux;
+ u32 high_vis;
+ u32 low_vis;
+ u32 ir;
+ u8 buffer[SI1133_LUX_BUFFER_SIZE];
+
+ /* Activate lux channels */
+ err = si1133_set_chlist(data, SI1133_LUX_ADC_MASK);
+ if (err)
+ return err;
+
+ err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0),
+ SI1133_LUX_BUFFER_SIZE, buffer);
+ if (err)
+ return err;
+
+ high_vis = (buffer[0] << 16) | (buffer[1] << 8) | buffer[2];
+ low_vis = (buffer[3] << 16) | (buffer[4] << 8) | buffer[5];
+ ir = (buffer[6] << 16) | (buffer[7] << 8) | buffer[8];
+
+ if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD)
+ lux = si1133_calc_polynomial(high_vis, ir,
+ SI1133_INPUT_FRACTION_HIGH,
+ ARRAY_SIZE(lux_coeff.coeff_high),
+ &lux_coeff.coeff_high[0]);
+ else
+ lux = si1133_calc_polynomial(low_vis, ir,
+ SI1133_INPUT_FRACTION_LOW,
+ ARRAY_SIZE(lux_coeff.coeff_low),
+ &lux_coeff.coeff_low[0]);
+
+ *val = lux >> SI1133_LUX_OUTPUT_FRACTION;
+
+ return err;
+}
+
+static int si1133_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct si1133_data *data = iio_priv(iio_dev);
+ u8 adc_sens = data->adc_sens[0];
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ err = si1133_get_lux(data, val);
+ if (err)
+ return err;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_UVINDEX:
+ err = si1133_measure(data, chan, val);
+ if (err)
+ return err;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_UVINDEX:
+ adc_sens &= SI1133_ADCSENS_HW_GAIN_MASK;
+
+ *val = si1133_int_time_table[adc_sens][0];
+ *val2 = si1133_int_time_table[adc_sens][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_UVINDEX:
+ adc_sens &= SI1133_ADCSENS_SCALE_MASK;
+ adc_sens >>= SI1133_ADCSENS_SCALE_SHIFT;
+
+ *val = BIT(adc_sens);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_UVINDEX:
+ adc_sens >>= SI1133_ADCSENS_HSIG_SHIFT;
+
+ *val = adc_sens;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int si1133_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct si1133_data *data = iio_priv(iio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_UVINDEX:
+ val = si1133_scale_to_swgain(val, val2);
+ if (val < 0)
+ return val;
+
+ return si1133_update_adcsens(data,
+ SI1133_ADCSENS_SCALE_MASK,
+ SI1133_ADCSENS_SCALE_SHIFT,
+ val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ return si1133_set_integration_time(data, 0, val, val2);
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_UVINDEX:
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ return si1133_update_adcsens(data,
+ SI1133_ADCSENS_HSIG_MASK,
+ SI1133_ADCSENS_HSIG_SHIFT,
+ val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct attribute *si1133_attributes[] = {
+ &iio_const_attr_integration_time_available.dev_attr.attr,
+ &iio_const_attr_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group si1133_attribute_group = {
+ .attrs = si1133_attributes,
+};
+
+static const struct iio_info si1133_info = {
+ .read_raw = si1133_read_raw,
+ .write_raw = si1133_write_raw,
+ .attrs = &si1133_attribute_group,
+};
+
+/*
+ * si1133_init_lux_channels - Configure 3 different channels(adc) (1,2 and 3)
+ * The channel configuration for the lux measurement was taken from :
+ * https://siliconlabs.github.io/Gecko_SDK_Doc/efm32zg/html/si1133_8c_source.html#l00578
+ *
+ * Reserved the channel 0 for the other raw measurements
+ */
+static int si1133_init_lux_channels(struct si1133_data *data)
+{
+ int err;
+
+ err = si1133_chan_set_adcconfig(data, 1,
+ SI1133_ADCCONFIG_DECIM_RATE(1) |
+ SI1133_PARAM_ADCMUX_LARGE_WHITE);
+ if (err)
+ return err;
+
+ err = si1133_param_set(data, SI1133_PARAM_REG_ADCPOST(1),
+ SI1133_ADCPOST_24BIT_EN |
+ SI1133_ADCPOST_POSTSHIFT_BITQTY(0));
+ if (err)
+ return err;
+ err = si1133_chan_set_adcsens(data, 1, SI1133_ADCSENS_HSIG_MASK |
+ SI1133_ADCSENS_NB_MEAS(64) | _48_8_us);
+ if (err)
+ return err;
+
+ err = si1133_chan_set_adcconfig(data, 2,
+ SI1133_ADCCONFIG_DECIM_RATE(1) |
+ SI1133_PARAM_ADCMUX_LARGE_WHITE);
+ if (err)
+ return err;
+
+ err = si1133_param_set(data, SI1133_PARAM_REG_ADCPOST(2),
+ SI1133_ADCPOST_24BIT_EN |
+ SI1133_ADCPOST_POSTSHIFT_BITQTY(2));
+ if (err)
+ return err;
+
+ err = si1133_chan_set_adcsens(data, 2, SI1133_ADCSENS_HSIG_MASK |
+ SI1133_ADCSENS_NB_MEAS(1) | _3_120_0_us);
+ if (err)
+ return err;
+
+ err = si1133_chan_set_adcconfig(data, 3,
+ SI1133_ADCCONFIG_DECIM_RATE(1) |
+ SI1133_PARAM_ADCMUX_MED_IR);
+ if (err)
+ return err;
+
+ err = si1133_param_set(data, SI1133_PARAM_REG_ADCPOST(3),
+ SI1133_ADCPOST_24BIT_EN |
+ SI1133_ADCPOST_POSTSHIFT_BITQTY(2));
+ if (err)
+ return err;
+
+ return si1133_chan_set_adcsens(data, 3, SI1133_ADCSENS_HSIG_MASK |
+ SI1133_ADCSENS_NB_MEAS(64) | _48_8_us);
+}
+
+static int si1133_initialize(struct si1133_data *data)
+{
+ int err;
+
+ err = si1133_cmd_reset_sw(data);
+ if (err)
+ return err;
+
+ /* Turn off autonomous mode */
+ err = si1133_param_set(data, SI1133_REG_MEAS_RATE, 0);
+ if (err)
+ return err;
+
+ err = si1133_init_lux_channels(data);
+ if (err)
+ return err;
+
+ return regmap_write(data->regmap, SI1133_REG_IRQ_ENABLE,
+ SI1133_IRQ_CHANNEL_ENABLE);
+}
+
+static int si1133_validate_ids(struct iio_dev *iio_dev)
+{
+ struct si1133_data *data = iio_priv(iio_dev);
+
+ unsigned int part_id, rev_id, mfr_id;
+ int err;
+
+ err = regmap_read(data->regmap, SI1133_REG_PART_ID, &part_id);
+ if (err)
+ return err;
+
+ err = regmap_read(data->regmap, SI1133_REG_REV_ID, &rev_id);
+ if (err)
+ return err;
+
+ err = regmap_read(data->regmap, SI1133_REG_MFR_ID, &mfr_id);
+ if (err)
+ return err;
+
+ dev_info(&iio_dev->dev,
+ "Device ID part %#02hhx rev %#02hhx mfr %#02hhx\n",
+ part_id, rev_id, mfr_id);
+ if (part_id != SI1133_PART_ID) {
+ dev_err(&iio_dev->dev,
+ "Part ID mismatch got %#02hhx, expected %#02x\n",
+ part_id, SI1133_PART_ID);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int si1133_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct si1133_data *data;
+ struct iio_dev *iio_dev;
+ int err;
+
+ iio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(iio_dev);
+
+ init_completion(&data->completion);
+
+ data->regmap = devm_regmap_init_i2c(client, &si1133_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ err = PTR_ERR(data->regmap);
+ dev_err(&client->dev, "Failed to initialise regmap: %d\n", err);
+ return err;
+ }
+
+ i2c_set_clientdata(client, iio_dev);
+ data->client = client;
+
+ iio_dev->dev.parent = &client->dev;
+ iio_dev->name = id->name;
+ iio_dev->channels = si1133_channels;
+ iio_dev->num_channels = ARRAY_SIZE(si1133_channels);
+ iio_dev->info = &si1133_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ mutex_init(&data->mutex);
+
+ err = si1133_validate_ids(iio_dev);
+ if (err)
+ return err;
+
+ err = si1133_initialize(data);
+ if (err) {
+ dev_err(&client->dev,
+ "Error when initializing chip: %d\n", err);
+ return err;
+ }
+
+ if (!client->irq) {
+ dev_err(&client->dev,
+ "Required interrupt not provided, cannot proceed\n");
+ return -EINVAL;
+ }
+
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL,
+ si1133_threaded_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ client->name, iio_dev);
+ if (err) {
+ dev_warn(&client->dev, "Request irq %d failed: %i\n",
+ client->irq, err);
+ return err;
+ }
+
+ return devm_iio_device_register(&client->dev, iio_dev);
+}
+
+static const struct i2c_device_id si1133_ids[] = {
+ { "si1133", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si1133_ids);
+
+static struct i2c_driver si1133_driver = {
+ .driver = {
+ .name = "si1133",
+ },
+ .probe = si1133_probe,
+ .id_table = si1133_ids,
+};
+
+module_i2c_driver(si1133_driver);
+
+MODULE_AUTHOR("Maxime Roussin-Belanger <maxime.roussinbelanger@gmail.com>");
+MODULE_DESCRIPTION("Silabs SI1133, UV index sensor and ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index 34d42a2504c9..df5b2a0da96c 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -582,6 +582,8 @@ static int tsl2772_als_calibrate(struct iio_dev *indio_dev)
"%s: failed to get lux\n", __func__);
return lux_val;
}
+ if (lux_val == 0)
+ return -ERANGE;
ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) /
lux_val;
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index c599a90506ad..04fd0d4b6f19 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -1,5 +1,5 @@
/*
- * vcnl4000.c - Support for Vishay VCNL4000/4010/4020 combined ambient
+ * vcnl4000.c - Support for Vishay VCNL4000/4010/4020/4200 combined ambient
* light and proximity sensor
*
* Copyright 2012 Peter Meerwald <pmeerw@pmeerw.net>
@@ -8,13 +8,15 @@
* the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
*
- * IIO driver for VCNL4000 (7-bit I2C slave address 0x13)
+ * IIO driver for:
+ * VCNL4000/10/20 (7-bit I2C slave address 0x13)
+ * VCNL4200 (7-bit I2C slave address 0x51)
*
* TODO:
* allow to adjust IR current
* proximity threshold and event handling
* periodic ALS/proximity measurement (VCNL4010/20)
- * interrupts (VCNL4010/20)
+ * interrupts (VCNL4010/20, VCNL4200)
*/
#include <linux/module.h>
@@ -26,8 +28,9 @@
#include <linux/iio/sysfs.h>
#define VCNL4000_DRV_NAME "vcnl4000"
-#define VCNL4000_ID 0x01
-#define VCNL4010_ID 0x02 /* for VCNL4020, VCNL4010 */
+#define VCNL4000_PROD_ID 0x01
+#define VCNL4010_PROD_ID 0x02 /* for VCNL4020, VCNL4010 */
+#define VCNL4200_PROD_ID 0x58
#define VCNL4000_COMMAND 0x80 /* Command register */
#define VCNL4000_PROD_REV 0x81 /* Product ID and Revision ID */
@@ -40,23 +43,124 @@
#define VCNL4000_PS_MEAS_FREQ 0x89 /* Proximity test signal frequency */
#define VCNL4000_PS_MOD_ADJ 0x8a /* Proximity modulator timing adjustment */
+#define VCNL4200_AL_CONF 0x00 /* Ambient light configuration */
+#define VCNL4200_PS_CONF1 0x03 /* Proximity configuration */
+#define VCNL4200_PS_DATA 0x08 /* Proximity data */
+#define VCNL4200_AL_DATA 0x09 /* Ambient light data */
+#define VCNL4200_DEV_ID 0x0e /* Device ID, slave address and version */
+
/* Bit masks for COMMAND register */
#define VCNL4000_AL_RDY BIT(6) /* ALS data ready? */
#define VCNL4000_PS_RDY BIT(5) /* proximity data ready? */
#define VCNL4000_AL_OD BIT(4) /* start on-demand ALS measurement */
#define VCNL4000_PS_OD BIT(3) /* start on-demand proximity measurement */
+enum vcnl4000_device_ids {
+ VCNL4000,
+ VCNL4010,
+ VCNL4200,
+};
+
+struct vcnl4200_channel {
+ u8 reg;
+ ktime_t last_measurement;
+ ktime_t sampling_rate;
+ struct mutex lock;
+};
+
struct vcnl4000_data {
struct i2c_client *client;
- struct mutex lock;
+ enum vcnl4000_device_ids id;
+ int rev;
+ int al_scale;
+ const struct vcnl4000_chip_spec *chip_spec;
+ struct mutex vcnl4000_lock;
+ struct vcnl4200_channel vcnl4200_al;
+ struct vcnl4200_channel vcnl4200_ps;
+};
+
+struct vcnl4000_chip_spec {
+ const char *prod;
+ int (*init)(struct vcnl4000_data *data);
+ int (*measure_light)(struct vcnl4000_data *data, int *val);
+ int (*measure_proximity)(struct vcnl4000_data *data, int *val);
};
static const struct i2c_device_id vcnl4000_id[] = {
- { "vcnl4000", 0 },
+ { "vcnl4000", VCNL4000 },
+ { "vcnl4010", VCNL4010 },
+ { "vcnl4020", VCNL4010 },
+ { "vcnl4200", VCNL4200 },
{ }
};
MODULE_DEVICE_TABLE(i2c, vcnl4000_id);
+static int vcnl4000_init(struct vcnl4000_data *data)
+{
+ int ret, prod_id;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4000_PROD_REV);
+ if (ret < 0)
+ return ret;
+
+ prod_id = ret >> 4;
+ switch (prod_id) {
+ case VCNL4000_PROD_ID:
+ if (data->id != VCNL4000)
+ dev_warn(&data->client->dev,
+ "wrong device id, use vcnl4000");
+ break;
+ case VCNL4010_PROD_ID:
+ if (data->id != VCNL4010)
+ dev_warn(&data->client->dev,
+ "wrong device id, use vcnl4010/4020");
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ data->rev = ret & 0xf;
+ data->al_scale = 250000;
+ mutex_init(&data->vcnl4000_lock);
+
+ return 0;
+};
+
+static int vcnl4200_init(struct vcnl4000_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_DEV_ID);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & 0xff) != VCNL4200_PROD_ID)
+ return -ENODEV;
+
+ data->rev = (ret >> 8) & 0xf;
+
+ /* Set defaults and enable both channels */
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4200_AL_CONF, 0x00);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4200_PS_CONF1, 0x00);
+ if (ret < 0)
+ return ret;
+
+ data->al_scale = 24000;
+ data->vcnl4200_al.reg = VCNL4200_AL_DATA;
+ data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
+ /* Integration time is 50ms, but the experiments show 54ms in total. */
+ data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
+ data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
+ data->vcnl4200_al.last_measurement = ktime_set(0, 0);
+ data->vcnl4200_ps.last_measurement = ktime_set(0, 0);
+ mutex_init(&data->vcnl4200_al.lock);
+ mutex_init(&data->vcnl4200_ps.lock);
+
+ return 0;
+};
+
static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
u8 rdy_mask, u8 data_reg, int *val)
{
@@ -64,7 +168,7 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
__be16 buf;
int ret;
- mutex_lock(&data->lock);
+ mutex_lock(&data->vcnl4000_lock);
ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND,
req_mask);
@@ -93,16 +197,88 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
if (ret < 0)
goto fail;
- mutex_unlock(&data->lock);
+ mutex_unlock(&data->vcnl4000_lock);
*val = be16_to_cpu(buf);
return 0;
fail:
- mutex_unlock(&data->lock);
+ mutex_unlock(&data->vcnl4000_lock);
return ret;
}
+static int vcnl4200_measure(struct vcnl4000_data *data,
+ struct vcnl4200_channel *chan, int *val)
+{
+ int ret;
+ s64 delta;
+ ktime_t next_measurement;
+
+ mutex_lock(&chan->lock);
+
+ next_measurement = ktime_add(chan->last_measurement,
+ chan->sampling_rate);
+ delta = ktime_us_delta(next_measurement, ktime_get());
+ if (delta > 0)
+ usleep_range(delta, delta + 500);
+ chan->last_measurement = ktime_get();
+
+ mutex_unlock(&chan->lock);
+
+ ret = i2c_smbus_read_word_data(data->client, chan->reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+}
+
+static int vcnl4000_measure_light(struct vcnl4000_data *data, int *val)
+{
+ return vcnl4000_measure(data,
+ VCNL4000_AL_OD, VCNL4000_AL_RDY,
+ VCNL4000_AL_RESULT_HI, val);
+}
+
+static int vcnl4200_measure_light(struct vcnl4000_data *data, int *val)
+{
+ return vcnl4200_measure(data, &data->vcnl4200_al, val);
+}
+
+static int vcnl4000_measure_proximity(struct vcnl4000_data *data, int *val)
+{
+ return vcnl4000_measure(data,
+ VCNL4000_PS_OD, VCNL4000_PS_RDY,
+ VCNL4000_PS_RESULT_HI, val);
+}
+
+static int vcnl4200_measure_proximity(struct vcnl4000_data *data, int *val)
+{
+ return vcnl4200_measure(data, &data->vcnl4200_ps, val);
+}
+
+static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
+ [VCNL4000] = {
+ .prod = "VCNL4000",
+ .init = vcnl4000_init,
+ .measure_light = vcnl4000_measure_light,
+ .measure_proximity = vcnl4000_measure_proximity,
+ },
+ [VCNL4010] = {
+ .prod = "VCNL4010/4020",
+ .init = vcnl4000_init,
+ .measure_light = vcnl4000_measure_light,
+ .measure_proximity = vcnl4000_measure_proximity,
+ },
+ [VCNL4200] = {
+ .prod = "VCNL4200",
+ .init = vcnl4200_init,
+ .measure_light = vcnl4200_measure_light,
+ .measure_proximity = vcnl4200_measure_proximity,
+ },
+};
+
static const struct iio_chan_spec vcnl4000_channels[] = {
{
.type = IIO_LIGHT,
@@ -125,16 +301,12 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
case IIO_LIGHT:
- ret = vcnl4000_measure(data,
- VCNL4000_AL_OD, VCNL4000_AL_RDY,
- VCNL4000_AL_RESULT_HI, val);
+ ret = data->chip_spec->measure_light(data, val);
if (ret < 0)
return ret;
return IIO_VAL_INT;
case IIO_PROXIMITY:
- ret = vcnl4000_measure(data,
- VCNL4000_PS_OD, VCNL4000_PS_RDY,
- VCNL4000_PS_RESULT_HI, val);
+ ret = data->chip_spec->measure_proximity(data, val);
if (ret < 0)
return ret;
return IIO_VAL_INT;
@@ -146,7 +318,7 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
*val = 0;
- *val2 = 250000;
+ *val2 = data->al_scale;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
@@ -162,7 +334,7 @@ static int vcnl4000_probe(struct i2c_client *client,
{
struct vcnl4000_data *data;
struct iio_dev *indio_dev;
- int ret, prod_id;
+ int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -171,19 +343,15 @@ static int vcnl4000_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- mutex_init(&data->lock);
+ data->id = id->driver_data;
+ data->chip_spec = &vcnl4000_chip_spec_cfg[data->id];
- ret = i2c_smbus_read_byte_data(data->client, VCNL4000_PROD_REV);
+ ret = data->chip_spec->init(data);
if (ret < 0)
return ret;
- prod_id = ret >> 4;
- if (prod_id != VCNL4010_ID && prod_id != VCNL4000_ID)
- return -ENODEV;
-
dev_dbg(&client->dev, "%s Ambient light/proximity sensor, Rev: %02x\n",
- (prod_id == VCNL4010_ID) ? "VCNL4010/4020" : "VCNL4000",
- ret & 0xf);
+ data->chip_spec->prod, data->rev);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &vcnl4000_info;
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 5ec3e41b65f2..fe87d27779d9 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -415,10 +415,9 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
}
comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
- *val = comp_humidity;
- *val2 = 1024;
+ *val = comp_humidity * 1000 / 1024;
- return IIO_VAL_FRACTIONAL;
+ return IIO_VAL_INT;
}
static int bmp280_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index fbb59059e942..2026a1012012 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -94,9 +94,8 @@ static int st_press_i2c_probe(struct i2c_client *client,
if ((ret < 0) || (ret >= ST_PRESS_MAX))
return -ENODEV;
- strncpy(client->name, st_press_id_table[ret].name,
+ strlcpy(client->name, st_press_id_table[ret].name,
sizeof(client->name));
- client->name[sizeof(client->name) - 1] = '\0';
} else if (!id)
return -ENODEV;
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index f726f9427602..388ef70c11d2 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -20,6 +20,19 @@ endmenu
menu "Proximity and distance sensors"
+config ISL29501
+ tristate "Intersil ISL29501 Time Of Flight sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select IIO_KFIFO_BUF
+ help
+ Say Y here if you want to build a driver for the Intersil ISL29501
+ Time of Flight sensor.
+
+ To compile this driver as a module, choose M here: the module will be
+ called isl29501.
+
config LIDAR_LITE_V2
tristate "PulsedLight LIDAR sensor"
select IIO_BUFFER
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index 4f4ed45e87ef..cac3d7d3325e 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -5,6 +5,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AS3935) += as3935.o
+obj-$(CONFIG_ISL29501) += isl29501.o
obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o
obj-$(CONFIG_RFD77402) += rfd77402.o
obj-$(CONFIG_SRF04) += srf04.o
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
new file mode 100644
index 000000000000..e5e94540f404
--- /dev/null
+++ b/drivers/iio/proximity/isl29501.c
@@ -0,0 +1,1027 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * isl29501.c: ISL29501 Time of Flight sensor driver.
+ *
+ * Copyright (C) 2018
+ * Author: Mathieu Othacehe <m.othacehe@gmail.com>
+ *
+ * 7-bit I2C slave address: 0x57
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/of_device.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/* Control, setting and status registers */
+#define ISL29501_DEVICE_ID 0x00
+#define ISL29501_ID 0x0A
+
+/* Sampling control registers */
+#define ISL29501_INTEGRATION_PERIOD 0x10
+#define ISL29501_SAMPLE_PERIOD 0x11
+
+/* Closed loop calibration registers */
+#define ISL29501_CROSSTALK_I_MSB 0x24
+#define ISL29501_CROSSTALK_I_LSB 0x25
+#define ISL29501_CROSSTALK_I_EXPONENT 0x26
+#define ISL29501_CROSSTALK_Q_MSB 0x27
+#define ISL29501_CROSSTALK_Q_LSB 0x28
+#define ISL29501_CROSSTALK_Q_EXPONENT 0x29
+#define ISL29501_CROSSTALK_GAIN_MSB 0x2A
+#define ISL29501_CROSSTALK_GAIN_LSB 0x2B
+#define ISL29501_MAGNITUDE_REF_EXP 0x2C
+#define ISL29501_MAGNITUDE_REF_MSB 0x2D
+#define ISL29501_MAGNITUDE_REF_LSB 0x2E
+#define ISL29501_PHASE_OFFSET_MSB 0x2F
+#define ISL29501_PHASE_OFFSET_LSB 0x30
+
+/* Analog control registers */
+#define ISL29501_DRIVER_RANGE 0x90
+#define ISL29501_EMITTER_DAC 0x91
+
+#define ISL29501_COMMAND_REGISTER 0xB0
+
+/* Commands */
+#define ISL29501_EMUL_SAMPLE_START_PIN 0x49
+#define ISL29501_RESET_ALL_REGISTERS 0xD7
+#define ISL29501_RESET_INT_SM 0xD1
+
+/* Ambiant light and temperature corrections */
+#define ISL29501_TEMP_REFERENCE 0x31
+#define ISL29501_PHASE_EXPONENT 0x33
+#define ISL29501_TEMP_COEFF_A 0x34
+#define ISL29501_TEMP_COEFF_B 0x39
+#define ISL29501_AMBIANT_COEFF_A 0x36
+#define ISL29501_AMBIANT_COEFF_B 0x3B
+
+/* Data output registers */
+#define ISL29501_DISTANCE_MSB_DATA 0xD1
+#define ISL29501_DISTANCE_LSB_DATA 0xD2
+#define ISL29501_PRECISION_MSB 0xD3
+#define ISL29501_PRECISION_LSB 0xD4
+#define ISL29501_MAGNITUDE_EXPONENT 0xD5
+#define ISL29501_MAGNITUDE_MSB 0xD6
+#define ISL29501_MAGNITUDE_LSB 0xD7
+#define ISL29501_PHASE_MSB 0xD8
+#define ISL29501_PHASE_LSB 0xD9
+#define ISL29501_I_RAW_EXPONENT 0xDA
+#define ISL29501_I_RAW_MSB 0xDB
+#define ISL29501_I_RAW_LSB 0xDC
+#define ISL29501_Q_RAW_EXPONENT 0xDD
+#define ISL29501_Q_RAW_MSB 0xDE
+#define ISL29501_Q_RAW_LSB 0xDF
+#define ISL29501_DIE_TEMPERATURE 0xE2
+#define ISL29501_AMBIENT_LIGHT 0xE3
+#define ISL29501_GAIN_MSB 0xE6
+#define ISL29501_GAIN_LSB 0xE7
+
+#define ISL29501_MAX_EXP_VAL 15
+
+#define ISL29501_INT_TIME_AVAILABLE \
+ "0.00007 0.00014 0.00028 0.00057 0.00114 " \
+ "0.00228 0.00455 0.00910 0.01820 0.03640 " \
+ "0.07281 0.14561"
+
+#define ISL29501_CURRENT_SCALE_AVAILABLE \
+ "0.0039 0.0078 0.0118 0.0157 0.0196 " \
+ "0.0235 0.0275 0.0314 0.0352 0.0392 " \
+ "0.0431 0.0471 0.0510 0.0549 0.0588"
+
+enum isl29501_correction_coeff {
+ COEFF_TEMP_A,
+ COEFF_TEMP_B,
+ COEFF_LIGHT_A,
+ COEFF_LIGHT_B,
+ COEFF_MAX,
+};
+
+struct isl29501_private {
+ struct i2c_client *client;
+ struct mutex lock;
+ /* Exact representation of correction coefficients. */
+ unsigned int shadow_coeffs[COEFF_MAX];
+};
+
+enum isl29501_register_name {
+ REG_DISTANCE,
+ REG_PHASE,
+ REG_TEMPERATURE,
+ REG_AMBIENT_LIGHT,
+ REG_GAIN,
+ REG_GAIN_BIAS,
+ REG_PHASE_EXP,
+ REG_CALIB_PHASE_TEMP_A,
+ REG_CALIB_PHASE_TEMP_B,
+ REG_CALIB_PHASE_LIGHT_A,
+ REG_CALIB_PHASE_LIGHT_B,
+ REG_DISTANCE_BIAS,
+ REG_TEMPERATURE_BIAS,
+ REG_INT_TIME,
+ REG_SAMPLE_TIME,
+ REG_DRIVER_RANGE,
+ REG_EMITTER_DAC,
+};
+
+struct isl29501_register_desc {
+ u8 msb;
+ u8 lsb;
+};
+
+static const struct isl29501_register_desc isl29501_registers[] = {
+ [REG_DISTANCE] = {
+ .msb = ISL29501_DISTANCE_MSB_DATA,
+ .lsb = ISL29501_DISTANCE_LSB_DATA,
+ },
+ [REG_PHASE] = {
+ .msb = ISL29501_PHASE_MSB,
+ .lsb = ISL29501_PHASE_LSB,
+ },
+ [REG_TEMPERATURE] = {
+ .lsb = ISL29501_DIE_TEMPERATURE,
+ },
+ [REG_AMBIENT_LIGHT] = {
+ .lsb = ISL29501_AMBIENT_LIGHT,
+ },
+ [REG_GAIN] = {
+ .msb = ISL29501_GAIN_MSB,
+ .lsb = ISL29501_GAIN_LSB,
+ },
+ [REG_GAIN_BIAS] = {
+ .msb = ISL29501_CROSSTALK_GAIN_MSB,
+ .lsb = ISL29501_CROSSTALK_GAIN_LSB,
+ },
+ [REG_PHASE_EXP] = {
+ .lsb = ISL29501_PHASE_EXPONENT,
+ },
+ [REG_CALIB_PHASE_TEMP_A] = {
+ .lsb = ISL29501_TEMP_COEFF_A,
+ },
+ [REG_CALIB_PHASE_TEMP_B] = {
+ .lsb = ISL29501_TEMP_COEFF_B,
+ },
+ [REG_CALIB_PHASE_LIGHT_A] = {
+ .lsb = ISL29501_AMBIANT_COEFF_A,
+ },
+ [REG_CALIB_PHASE_LIGHT_B] = {
+ .lsb = ISL29501_AMBIANT_COEFF_B,
+ },
+ [REG_DISTANCE_BIAS] = {
+ .msb = ISL29501_PHASE_OFFSET_MSB,
+ .lsb = ISL29501_PHASE_OFFSET_LSB,
+ },
+ [REG_TEMPERATURE_BIAS] = {
+ .lsb = ISL29501_TEMP_REFERENCE,
+ },
+ [REG_INT_TIME] = {
+ .lsb = ISL29501_INTEGRATION_PERIOD,
+ },
+ [REG_SAMPLE_TIME] = {
+ .lsb = ISL29501_SAMPLE_PERIOD,
+ },
+ [REG_DRIVER_RANGE] = {
+ .lsb = ISL29501_DRIVER_RANGE,
+ },
+ [REG_EMITTER_DAC] = {
+ .lsb = ISL29501_EMITTER_DAC,
+ },
+};
+
+static int isl29501_register_read(struct isl29501_private *isl29501,
+ enum isl29501_register_name name,
+ u32 *val)
+{
+ const struct isl29501_register_desc *reg = &isl29501_registers[name];
+ u8 msb = 0, lsb = 0;
+ s32 ret;
+
+ mutex_lock(&isl29501->lock);
+ if (reg->msb) {
+ ret = i2c_smbus_read_byte_data(isl29501->client, reg->msb);
+ if (ret < 0)
+ goto err;
+ msb = ret;
+ }
+
+ if (reg->lsb) {
+ ret = i2c_smbus_read_byte_data(isl29501->client, reg->lsb);
+ if (ret < 0)
+ goto err;
+ lsb = ret;
+ }
+ mutex_unlock(&isl29501->lock);
+
+ *val = (msb << 8) + lsb;
+
+ return 0;
+err:
+ mutex_unlock(&isl29501->lock);
+
+ return ret;
+}
+
+static u32 isl29501_register_write(struct isl29501_private *isl29501,
+ enum isl29501_register_name name,
+ u32 value)
+{
+ const struct isl29501_register_desc *reg = &isl29501_registers[name];
+ u8 msb, lsb;
+ int ret;
+
+ if (!reg->msb && value > U8_MAX)
+ return -ERANGE;
+
+ if (value > U16_MAX)
+ return -ERANGE;
+
+ if (!reg->msb) {
+ lsb = value & 0xFF;
+ } else {
+ msb = (value >> 8) & 0xFF;
+ lsb = value & 0xFF;
+ }
+
+ mutex_lock(&isl29501->lock);
+ if (reg->msb) {
+ ret = i2c_smbus_write_byte_data(isl29501->client,
+ reg->msb, msb);
+ if (ret < 0)
+ goto err;
+ }
+
+ ret = i2c_smbus_write_byte_data(isl29501->client, reg->lsb, lsb);
+
+err:
+ mutex_unlock(&isl29501->lock);
+ return ret;
+}
+
+static ssize_t isl29501_read_ext(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct isl29501_private *isl29501 = iio_priv(indio_dev);
+ enum isl29501_register_name reg = private;
+ int ret;
+ u32 value, gain, coeff, exp;
+
+ switch (reg) {
+ case REG_GAIN:
+ case REG_GAIN_BIAS:
+ ret = isl29501_register_read(isl29501, reg, &gain);
+ if (ret < 0)
+ return ret;
+
+ value = gain;
+ break;
+ case REG_CALIB_PHASE_TEMP_A:
+ case REG_CALIB_PHASE_TEMP_B:
+ case REG_CALIB_PHASE_LIGHT_A:
+ case REG_CALIB_PHASE_LIGHT_B:
+ ret = isl29501_register_read(isl29501, REG_PHASE_EXP, &exp);
+ if (ret < 0)
+ return ret;
+
+ ret = isl29501_register_read(isl29501, reg, &coeff);
+ if (ret < 0)
+ return ret;
+
+ value = coeff << exp;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static int isl29501_set_shadow_coeff(struct isl29501_private *isl29501,
+ enum isl29501_register_name reg,
+ unsigned int val)
+{
+ enum isl29501_correction_coeff coeff;
+
+ switch (reg) {
+ case REG_CALIB_PHASE_TEMP_A:
+ coeff = COEFF_TEMP_A;
+ break;
+ case REG_CALIB_PHASE_TEMP_B:
+ coeff = COEFF_TEMP_B;
+ break;
+ case REG_CALIB_PHASE_LIGHT_A:
+ coeff = COEFF_LIGHT_A;
+ break;
+ case REG_CALIB_PHASE_LIGHT_B:
+ coeff = COEFF_LIGHT_B;
+ break;
+ default:
+ return -EINVAL;
+ }
+ isl29501->shadow_coeffs[coeff] = val;
+
+ return 0;
+}
+
+static int isl29501_write_coeff(struct isl29501_private *isl29501,
+ enum isl29501_correction_coeff coeff,
+ int val)
+{
+ enum isl29501_register_name reg;
+
+ switch (coeff) {
+ case COEFF_TEMP_A:
+ reg = REG_CALIB_PHASE_TEMP_A;
+ break;
+ case COEFF_TEMP_B:
+ reg = REG_CALIB_PHASE_TEMP_B;
+ break;
+ case COEFF_LIGHT_A:
+ reg = REG_CALIB_PHASE_LIGHT_A;
+ break;
+ case COEFF_LIGHT_B:
+ reg = REG_CALIB_PHASE_LIGHT_B;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return isl29501_register_write(isl29501, reg, val);
+}
+
+static unsigned int isl29501_find_corr_exp(unsigned int val,
+ unsigned int max_exp,
+ unsigned int max_mantissa)
+{
+ unsigned int exp = 1;
+
+ /*
+ * Correction coefficients are represented under
+ * mantissa * 2^exponent form, where mantissa and exponent
+ * are stored in two separate registers of the sensor.
+ *
+ * Compute and return the lowest exponent such as:
+ * mantissa = value / 2^exponent
+ *
+ * where mantissa < max_mantissa.
+ */
+ if (val <= max_mantissa)
+ return 0;
+
+ while ((val >> exp) > max_mantissa) {
+ exp++;
+
+ if (exp > max_exp)
+ return max_exp;
+ }
+
+ return exp;
+}
+
+static ssize_t isl29501_write_ext(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct isl29501_private *isl29501 = iio_priv(indio_dev);
+ enum isl29501_register_name reg = private;
+ unsigned int val;
+ int max_exp = 0;
+ int ret;
+ int i;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (reg) {
+ case REG_GAIN_BIAS:
+ if (val > U16_MAX)
+ return -ERANGE;
+
+ ret = isl29501_register_write(isl29501, reg, val);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case REG_CALIB_PHASE_TEMP_A:
+ case REG_CALIB_PHASE_TEMP_B:
+ case REG_CALIB_PHASE_LIGHT_A:
+ case REG_CALIB_PHASE_LIGHT_B:
+
+ if (val > (U8_MAX << ISL29501_MAX_EXP_VAL))
+ return -ERANGE;
+
+ /* Store the correction coefficient under its exact form. */
+ ret = isl29501_set_shadow_coeff(isl29501, reg, val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Find the highest exponent needed to represent
+ * correction coefficients.
+ */
+ for (i = 0; i < COEFF_MAX; i++) {
+ int corr;
+ int corr_exp;
+
+ corr = isl29501->shadow_coeffs[i];
+ corr_exp = isl29501_find_corr_exp(corr,
+ ISL29501_MAX_EXP_VAL,
+ U8_MAX / 2);
+ dev_dbg(&isl29501->client->dev,
+ "found exp of corr(%d) = %d\n", corr, corr_exp);
+
+ max_exp = max(max_exp, corr_exp);
+ }
+
+ /*
+ * Represent every correction coefficient under
+ * mantissa * 2^max_exponent form and force the
+ * writing of those coefficients on the sensor.
+ */
+ for (i = 0; i < COEFF_MAX; i++) {
+ int corr;
+ int mantissa;
+
+ corr = isl29501->shadow_coeffs[i];
+ if (!corr)
+ continue;
+
+ mantissa = corr >> max_exp;
+
+ ret = isl29501_write_coeff(isl29501, i, mantissa);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = isl29501_register_write(isl29501, REG_PHASE_EXP, max_exp);
+ if (ret < 0)
+ return ret;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#define _ISL29501_EXT_INFO(_name, _ident) { \
+ .name = _name, \
+ .read = isl29501_read_ext, \
+ .write = isl29501_write_ext, \
+ .private = _ident, \
+ .shared = IIO_SEPARATE, \
+}
+
+static const struct iio_chan_spec_ext_info isl29501_ext_info[] = {
+ _ISL29501_EXT_INFO("agc_gain", REG_GAIN),
+ _ISL29501_EXT_INFO("agc_gain_bias", REG_GAIN_BIAS),
+ _ISL29501_EXT_INFO("calib_phase_temp_a", REG_CALIB_PHASE_TEMP_A),
+ _ISL29501_EXT_INFO("calib_phase_temp_b", REG_CALIB_PHASE_TEMP_B),
+ _ISL29501_EXT_INFO("calib_phase_light_a", REG_CALIB_PHASE_LIGHT_A),
+ _ISL29501_EXT_INFO("calib_phase_light_b", REG_CALIB_PHASE_LIGHT_B),
+ { },
+};
+
+#define ISL29501_DISTANCE_SCAN_INDEX 0
+#define ISL29501_TIMESTAMP_SCAN_INDEX 1
+
+static const struct iio_chan_spec isl29501_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .scan_index = ISL29501_DISTANCE_SCAN_INDEX,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .ext_info = isl29501_ext_info,
+ },
+ {
+ .type = IIO_PHASE,
+ .scan_index = -1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_CURRENT,
+ .scan_index = -1,
+ .output = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_TEMP,
+ .scan_index = -1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ },
+ {
+ .type = IIO_INTENSITY,
+ .scan_index = -1,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_CLEAR,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(ISL29501_TIMESTAMP_SCAN_INDEX),
+};
+
+static int isl29501_reset_registers(struct isl29501_private *isl29501)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(isl29501->client,
+ ISL29501_COMMAND_REGISTER,
+ ISL29501_RESET_ALL_REGISTERS);
+ if (ret < 0) {
+ dev_err(&isl29501->client->dev,
+ "cannot reset registers %d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(isl29501->client,
+ ISL29501_COMMAND_REGISTER,
+ ISL29501_RESET_INT_SM);
+ if (ret < 0)
+ dev_err(&isl29501->client->dev,
+ "cannot reset state machine %d\n", ret);
+
+ return ret;
+}
+
+static int isl29501_begin_acquisition(struct isl29501_private *isl29501)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(isl29501->client,
+ ISL29501_COMMAND_REGISTER,
+ ISL29501_EMUL_SAMPLE_START_PIN);
+ if (ret < 0)
+ dev_err(&isl29501->client->dev,
+ "cannot begin acquisition %d\n", ret);
+
+ return ret;
+}
+
+static IIO_CONST_ATTR_INT_TIME_AVAIL(ISL29501_INT_TIME_AVAILABLE);
+static IIO_CONST_ATTR(out_current_scale_available,
+ ISL29501_CURRENT_SCALE_AVAILABLE);
+
+static struct attribute *isl29501_attributes[] = {
+ &iio_const_attr_integration_time_available.dev_attr.attr,
+ &iio_const_attr_out_current_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group isl29501_attribute_group = {
+ .attrs = isl29501_attributes,
+};
+
+static const int isl29501_current_scale_table[][2] = {
+ {0, 3900}, {0, 7800}, {0, 11800}, {0, 15700},
+ {0, 19600}, {0, 23500}, {0, 27500}, {0, 31400},
+ {0, 35200}, {0, 39200}, {0, 43100}, {0, 47100},
+ {0, 51000}, {0, 54900}, {0, 58800},
+};
+
+static const int isl29501_int_time[][2] = {
+ {0, 70}, /* 0.07 ms */
+ {0, 140}, /* 0.14 ms */
+ {0, 280}, /* 0.28 ms */
+ {0, 570}, /* 0.57 ms */
+ {0, 1140}, /* 1.14 ms */
+ {0, 2280}, /* 2.28 ms */
+ {0, 4550}, /* 4.55 ms */
+ {0, 9100}, /* 9.11 ms */
+ {0, 18200}, /* 18.2 ms */
+ {0, 36400}, /* 36.4 ms */
+ {0, 72810}, /* 72.81 ms */
+ {0, 145610} /* 145.28 ms */
+};
+
+static int isl29501_get_raw(struct isl29501_private *isl29501,
+ const struct iio_chan_spec *chan,
+ int *raw)
+{
+ int ret;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = isl29501_register_read(isl29501, REG_DISTANCE, raw);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_INTENSITY:
+ ret = isl29501_register_read(isl29501,
+ REG_AMBIENT_LIGHT,
+ raw);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_PHASE:
+ ret = isl29501_register_read(isl29501, REG_PHASE, raw);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CURRENT:
+ ret = isl29501_register_read(isl29501, REG_EMITTER_DAC, raw);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ ret = isl29501_register_read(isl29501, REG_TEMPERATURE, raw);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl29501_get_scale(struct isl29501_private *isl29501,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2)
+{
+ int ret;
+ u32 current_scale;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ /* distance = raw_distance * 33.31 / 65536 (m) */
+ *val = 3331;
+ *val2 = 6553600;
+
+ return IIO_VAL_FRACTIONAL;
+ case IIO_PHASE:
+ /* phase = raw_phase * 2pi / 65536 (rad) */
+ *val = 0;
+ *val2 = 95874;
+
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_INTENSITY:
+ /* light = raw_light * 35 / 10000 (mA) */
+ *val = 35;
+ *val2 = 10000;
+
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CURRENT:
+ ret = isl29501_register_read(isl29501,
+ REG_DRIVER_RANGE,
+ &current_scale);
+ if (ret < 0)
+ return ret;
+
+ if (current_scale > ARRAY_SIZE(isl29501_current_scale_table))
+ return -EINVAL;
+
+ if (!current_scale) {
+ *val = 0;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ }
+
+ *val = isl29501_current_scale_table[current_scale - 1][0];
+ *val2 = isl29501_current_scale_table[current_scale - 1][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ /* temperature = raw_temperature * 125 / 100000 (milli °C) */
+ *val = 125;
+ *val2 = 100000;
+
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl29501_get_calibbias(struct isl29501_private *isl29501,
+ const struct iio_chan_spec *chan,
+ int *bias)
+{
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return isl29501_register_read(isl29501,
+ REG_DISTANCE_BIAS,
+ bias);
+ case IIO_TEMP:
+ return isl29501_register_read(isl29501,
+ REG_TEMPERATURE_BIAS,
+ bias);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl29501_get_inttime(struct isl29501_private *isl29501,
+ int *val, int *val2)
+{
+ int ret;
+ u32 inttime;
+
+ ret = isl29501_register_read(isl29501, REG_INT_TIME, &inttime);
+ if (ret < 0)
+ return ret;
+
+ if (inttime >= ARRAY_SIZE(isl29501_int_time))
+ return -EINVAL;
+
+ *val = isl29501_int_time[inttime][0];
+ *val2 = isl29501_int_time[inttime][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int isl29501_get_freq(struct isl29501_private *isl29501,
+ int *val, int *val2)
+{
+ int ret;
+ int sample_time;
+ unsigned long long freq;
+ u32 temp;
+
+ ret = isl29501_register_read(isl29501, REG_SAMPLE_TIME, &sample_time);
+ if (ret < 0)
+ return ret;
+
+ /* freq = 1 / (0.000450 * (sample_time + 1) * 10^-6) */
+ freq = 1000000ULL * 1000000ULL;
+
+ do_div(freq, 450 * (sample_time + 1));
+
+ temp = do_div(freq, 1000000);
+ *val = freq;
+ *val2 = temp;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int isl29501_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct isl29501_private *isl29501 = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return isl29501_get_raw(isl29501, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+ return isl29501_get_scale(isl29501, chan, val, val2);
+ case IIO_CHAN_INFO_INT_TIME:
+ return isl29501_get_inttime(isl29501, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return isl29501_get_freq(isl29501, val, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return isl29501_get_calibbias(isl29501, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl29501_set_raw(struct isl29501_private *isl29501,
+ const struct iio_chan_spec *chan,
+ int raw)
+{
+ switch (chan->type) {
+ case IIO_CURRENT:
+ return isl29501_register_write(isl29501, REG_EMITTER_DAC, raw);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl29501_set_inttime(struct isl29501_private *isl29501,
+ int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(isl29501_int_time); i++) {
+ if (isl29501_int_time[i][0] == val &&
+ isl29501_int_time[i][1] == val2) {
+ return isl29501_register_write(isl29501,
+ REG_INT_TIME,
+ i);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int isl29501_set_scale(struct isl29501_private *isl29501,
+ const struct iio_chan_spec *chan,
+ int val, int val2)
+{
+ int i;
+
+ if (chan->type != IIO_CURRENT)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(isl29501_current_scale_table); i++) {
+ if (isl29501_current_scale_table[i][0] == val &&
+ isl29501_current_scale_table[i][1] == val2) {
+ return isl29501_register_write(isl29501,
+ REG_DRIVER_RANGE,
+ i + 1);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int isl29501_set_calibbias(struct isl29501_private *isl29501,
+ const struct iio_chan_spec *chan,
+ int bias)
+{
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return isl29501_register_write(isl29501,
+ REG_DISTANCE_BIAS,
+ bias);
+ case IIO_TEMP:
+ return isl29501_register_write(isl29501,
+ REG_TEMPERATURE_BIAS,
+ bias);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl29501_set_freq(struct isl29501_private *isl29501,
+ int val, int val2)
+{
+ int freq;
+ unsigned long long sample_time;
+
+ /* sample_freq = 1 / (0.000450 * (sample_time + 1) * 10^-6) */
+ freq = val * 1000000 + val2 % 1000000;
+ sample_time = 2222ULL * 1000000ULL;
+ do_div(sample_time, freq);
+
+ sample_time -= 1;
+
+ if (sample_time > 255)
+ return -ERANGE;
+
+ return isl29501_register_write(isl29501, REG_SAMPLE_TIME, sample_time);
+}
+
+static int isl29501_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct isl29501_private *isl29501 = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return isl29501_set_raw(isl29501, chan, val);
+ case IIO_CHAN_INFO_INT_TIME:
+ return isl29501_set_inttime(isl29501, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return isl29501_set_freq(isl29501, val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ return isl29501_set_scale(isl29501, chan, val, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return isl29501_set_calibbias(isl29501, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info isl29501_info = {
+ .read_raw = &isl29501_read_raw,
+ .write_raw = &isl29501_write_raw,
+ .attrs = &isl29501_attribute_group,
+};
+
+static int isl29501_init_chip(struct isl29501_private *isl29501)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(isl29501->client, ISL29501_DEVICE_ID);
+ if (ret < 0) {
+ dev_err(&isl29501->client->dev, "Error reading device id\n");
+ return ret;
+ }
+
+ if (ret != ISL29501_ID) {
+ dev_err(&isl29501->client->dev,
+ "Wrong chip id, got %x expected %x\n",
+ ret, ISL29501_DEVICE_ID);
+ return -ENODEV;
+ }
+
+ ret = isl29501_reset_registers(isl29501);
+ if (ret < 0)
+ return ret;
+
+ return isl29501_begin_acquisition(isl29501);
+}
+
+static irqreturn_t isl29501_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct isl29501_private *isl29501 = iio_priv(indio_dev);
+ const unsigned long *active_mask = indio_dev->active_scan_mask;
+ u32 buffer[4] = {}; /* 1x16-bit + ts */
+
+ if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask))
+ isl29501_register_read(isl29501, REG_DISTANCE, buffer);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int isl29501_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct isl29501_private *isl29501;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*isl29501));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ isl29501 = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ isl29501->client = client;
+
+ mutex_init(&isl29501->lock);
+
+ ret = isl29501_init_chip(isl29501);
+ if (ret < 0)
+ return ret;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = isl29501_channels;
+ indio_dev->num_channels = ARRAY_SIZE(isl29501_channels);
+ indio_dev->name = client->name;
+ indio_dev->info = &isl29501_info;
+
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ iio_pollfunc_store_time,
+ isl29501_trigger_handler,
+ NULL);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to setup iio triggered buffer\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id isl29501_id[] = {
+ {"isl29501", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, isl29501_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id isl29501_i2c_matches[] = {
+ { .compatible = "renesas,isl29501" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, isl29501_i2c_matches);
+#endif
+
+static struct i2c_driver isl29501_driver = {
+ .driver = {
+ .name = "isl29501",
+ },
+ .id_table = isl29501_id,
+ .probe = isl29501_probe,
+};
+module_i2c_driver(isl29501_driver);
+
+MODULE_AUTHOR("Mathieu Othacehe <m.othacehe@gmail.com>");
+MODULE_DESCRIPTION("ISL29501 Time of Flight sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index d619e8634a00..13a4cec64ea8 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -433,11 +433,11 @@ static int mlx90614_wakeup(struct mlx90614_data *data)
dev_dbg(&data->client->dev, "Requesting wake-up");
- i2c_lock_adapter(data->client->adapter);
+ i2c_lock_bus(data->client->adapter, I2C_LOCK_ROOT_ADAPTER);
gpiod_direction_output(data->wakeup_gpio, 0);
msleep(MLX90614_TIMING_WAKEUP);
gpiod_direction_input(data->wakeup_gpio);
- i2c_unlock_adapter(data->client->adapter);
+ i2c_unlock_bus(data->client->adapter, I2C_LOCK_ROOT_ADAPTER);
data->ready_timestamp = jiffies +
msecs_to_jiffies(MLX90614_TIMING_STARTUP);
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index b03af54367c0..abb6660c099c 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -36,8 +36,8 @@ config INFINIBAND_USER_ACCESS
rdma-core <https://github.com/linux-rdma/rdma-core>.
config INFINIBAND_USER_ACCESS_UCM
- bool "Userspace CM (UCM, DEPRECATED)"
- depends on BROKEN
+ tristate "Userspace CM (UCM, DEPRECATED)"
+ depends on BROKEN || COMPILE_TEST
depends on INFINIBAND_USER_ACCESS
help
The UCM module has known security flaws, which no one is
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 61667705d746..867cee5e27b2 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -35,6 +35,7 @@ ib_ucm-y := ucm.o
ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
rdma_core.o uverbs_std_types.o uverbs_ioctl.o \
- uverbs_ioctl_merge.o uverbs_std_types_cq.o \
+ uverbs_std_types_cq.o \
uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
- uverbs_std_types_mr.o uverbs_std_types_counters.o
+ uverbs_std_types_mr.o uverbs_std_types_counters.o \
+ uverbs_uapi.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 4f32c4062fb6..46b855a42884 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -188,7 +188,7 @@ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
return -ENODATA;
}
-int rdma_addr_size(struct sockaddr *addr)
+int rdma_addr_size(const struct sockaddr *addr)
{
switch (addr->sa_family) {
case AF_INET:
@@ -315,19 +315,17 @@ static int dst_fetch_ha(const struct dst_entry *dst,
int ret = 0;
n = dst_neigh_lookup(dst, daddr);
+ if (!n)
+ return -ENODATA;
- rcu_read_lock();
- if (!n || !(n->nud_state & NUD_VALID)) {
- if (n)
- neigh_event_send(n, NULL);
+ if (!(n->nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
ret = -ENODATA;
} else {
rdma_copy_addr(dev_addr, dst->dev, n->ha);
}
- rcu_read_unlock();
- if (n)
- neigh_release(n);
+ neigh_release(n);
return ret;
}
@@ -587,7 +585,7 @@ static void process_one_req(struct work_struct *_work)
spin_unlock_bh(&lock);
}
-int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr,
+int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
struct rdma_dev_addr *addr, int timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 81d66f56e38f..0bee1f4b914e 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -66,20 +66,28 @@ enum gid_attr_find_mask {
GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
};
-enum gid_table_entry_props {
- GID_TABLE_ENTRY_INVALID = 1UL << 0,
- GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
+enum gid_table_entry_state {
+ GID_TABLE_ENTRY_INVALID = 1,
+ GID_TABLE_ENTRY_VALID = 2,
+ /*
+ * Indicates that entry is pending to be removed, there may
+ * be active users of this GID entry.
+ * When last user of the GID entry releases reference to it,
+ * GID entry is detached from the table.
+ */
+ GID_TABLE_ENTRY_PENDING_DEL = 3,
};
struct ib_gid_table_entry {
- unsigned long props;
- union ib_gid gid;
- struct ib_gid_attr attr;
- void *context;
+ struct kref kref;
+ struct work_struct del_work;
+ struct ib_gid_attr attr;
+ void *context;
+ enum gid_table_entry_state state;
};
struct ib_gid_table {
- int sz;
+ int sz;
/* In RoCE, adding a GID to the table requires:
* (a) Find if this GID is already exists.
* (b) Find a free space.
@@ -91,13 +99,16 @@ struct ib_gid_table {
*
**/
/* Any writer to data_vec must hold this lock and the write side of
- * rwlock. readers must hold only rwlock. All writers must be in a
+ * rwlock. Readers must hold only rwlock. All writers must be in a
* sleepable context.
*/
- struct mutex lock;
- /* rwlock protects data_vec[ix]->props. */
- rwlock_t rwlock;
- struct ib_gid_table_entry *data_vec;
+ struct mutex lock;
+ /* rwlock protects data_vec[ix]->state and entry pointer.
+ */
+ rwlock_t rwlock;
+ struct ib_gid_table_entry **data_vec;
+ /* bit field, each bit indicates the index of default GID */
+ u32 default_gid_indices;
};
static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
@@ -135,6 +146,19 @@ bool rdma_is_zero_gid(const union ib_gid *gid)
}
EXPORT_SYMBOL(rdma_is_zero_gid);
+/** is_gid_index_default - Check if a given index belongs to
+ * reserved default GIDs or not.
+ * @table: GID table pointer
+ * @index: Index to check in GID table
+ * Returns true if index is one of the reserved default GID index otherwise
+ * returns false.
+ */
+static bool is_gid_index_default(const struct ib_gid_table *table,
+ unsigned int index)
+{
+ return index < 32 && (BIT(index) & table->default_gid_indices);
+}
+
int ib_cache_gid_parse_type_str(const char *buf)
{
unsigned int i;
@@ -164,26 +188,136 @@ static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
return device->cache.ports[port - rdma_start_port(device)].gid;
}
-static void del_roce_gid(struct ib_device *device, u8 port_num,
- struct ib_gid_table *table, int ix)
+static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
+{
+ return !entry;
+}
+
+static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
+{
+ return entry && entry->state == GID_TABLE_ENTRY_VALID;
+}
+
+static void schedule_free_gid(struct kref *kref)
{
+ struct ib_gid_table_entry *entry =
+ container_of(kref, struct ib_gid_table_entry, kref);
+
+ queue_work(ib_wq, &entry->del_work);
+}
+
+static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
+{
+ struct ib_device *device = entry->attr.device;
+ u8 port_num = entry->attr.port_num;
+ struct ib_gid_table *table = rdma_gid_table(device, port_num);
+
pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- device->name, port_num, ix,
- table->data_vec[ix].gid.raw);
+ device->name, port_num, entry->attr.index,
+ entry->attr.gid.raw);
+
+ if (rdma_cap_roce_gid_table(device, port_num) &&
+ entry->state != GID_TABLE_ENTRY_INVALID)
+ device->del_gid(&entry->attr, &entry->context);
+
+ write_lock_irq(&table->rwlock);
- if (rdma_cap_roce_gid_table(device, port_num))
- device->del_gid(&table->data_vec[ix].attr,
- &table->data_vec[ix].context);
- dev_put(table->data_vec[ix].attr.ndev);
+ /*
+ * The only way to avoid overwriting NULL in table is
+ * by comparing if it is same entry in table or not!
+ * If new entry in table is added by the time we free here,
+ * don't overwrite the table entry.
+ */
+ if (entry == table->data_vec[entry->attr.index])
+ table->data_vec[entry->attr.index] = NULL;
+ /* Now this index is ready to be allocated */
+ write_unlock_irq(&table->rwlock);
+
+ if (entry->attr.ndev)
+ dev_put(entry->attr.ndev);
+ kfree(entry);
}
-static int add_roce_gid(struct ib_gid_table *table,
- const union ib_gid *gid,
- const struct ib_gid_attr *attr)
+static void free_gid_entry(struct kref *kref)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(kref, struct ib_gid_table_entry, kref);
+
+ free_gid_entry_locked(entry);
+}
+
+/**
+ * free_gid_work - Release reference to the GID entry
+ * @work: Work structure to refer to GID entry which needs to be
+ * deleted.
+ *
+ * free_gid_work() frees the entry from the HCA's hardware table
+ * if provider supports it. It releases reference to netdevice.
+ */
+static void free_gid_work(struct work_struct *work)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(work, struct ib_gid_table_entry, del_work);
+ struct ib_device *device = entry->attr.device;
+ u8 port_num = entry->attr.port_num;
+ struct ib_gid_table *table = rdma_gid_table(device, port_num);
+
+ mutex_lock(&table->lock);
+ free_gid_entry_locked(entry);
+ mutex_unlock(&table->lock);
+}
+
+static struct ib_gid_table_entry *
+alloc_gid_entry(const struct ib_gid_attr *attr)
{
struct ib_gid_table_entry *entry;
- int ix = attr->index;
- int ret = 0;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+ kref_init(&entry->kref);
+ memcpy(&entry->attr, attr, sizeof(*attr));
+ if (entry->attr.ndev)
+ dev_hold(entry->attr.ndev);
+ INIT_WORK(&entry->del_work, free_gid_work);
+ entry->state = GID_TABLE_ENTRY_INVALID;
+ return entry;
+}
+
+static void store_gid_entry(struct ib_gid_table *table,
+ struct ib_gid_table_entry *entry)
+{
+ entry->state = GID_TABLE_ENTRY_VALID;
+
+ pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
+ entry->attr.device->name, entry->attr.port_num,
+ entry->attr.index, entry->attr.gid.raw);
+
+ lockdep_assert_held(&table->lock);
+ write_lock_irq(&table->rwlock);
+ table->data_vec[entry->attr.index] = entry;
+ write_unlock_irq(&table->rwlock);
+}
+
+static void get_gid_entry(struct ib_gid_table_entry *entry)
+{
+ kref_get(&entry->kref);
+}
+
+static void put_gid_entry(struct ib_gid_table_entry *entry)
+{
+ kref_put(&entry->kref, schedule_free_gid);
+}
+
+static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
+{
+ kref_put(&entry->kref, free_gid_entry);
+}
+
+static int add_roce_gid(struct ib_gid_table_entry *entry)
+{
+ const struct ib_gid_attr *attr = &entry->attr;
+ int ret;
if (!attr->ndev) {
pr_err("%s NULL netdev device=%s port=%d index=%d\n",
@@ -191,38 +325,22 @@ static int add_roce_gid(struct ib_gid_table *table,
attr->index);
return -EINVAL;
}
-
- entry = &table->data_vec[ix];
- if ((entry->props & GID_TABLE_ENTRY_INVALID) == 0) {
- WARN(1, "GID table corruption device=%s port=%d index=%d\n",
- attr->device->name, attr->port_num,
- attr->index);
- return -EINVAL;
- }
-
if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
- ret = attr->device->add_gid(gid, attr, &entry->context);
+ ret = attr->device->add_gid(attr, &entry->context);
if (ret) {
pr_err("%s GID add failed device=%s port=%d index=%d\n",
__func__, attr->device->name, attr->port_num,
attr->index);
- goto add_err;
+ return ret;
}
}
- dev_hold(attr->ndev);
-
-add_err:
- if (!ret)
- pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- attr->device->name, attr->port_num, ix, gid->raw);
- return ret;
+ return 0;
}
/**
* add_modify_gid - Add or modify GID table entry
*
* @table: GID table in which GID to be added or modified
- * @gid: GID content
* @attr: Attributes of the GID
*
* Returns 0 on success or appropriate error code. It accepts zero
@@ -230,34 +348,42 @@ add_err:
* GID. However such zero GIDs are not added to the cache.
*/
static int add_modify_gid(struct ib_gid_table *table,
- const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
- int ret;
+ struct ib_gid_table_entry *entry;
+ int ret = 0;
+
+ /*
+ * Invalidate any old entry in the table to make it safe to write to
+ * this index.
+ */
+ if (is_gid_entry_valid(table->data_vec[attr->index]))
+ put_gid_entry(table->data_vec[attr->index]);
+
+ /*
+ * Some HCA's report multiple GID entries with only one valid GID, and
+ * leave other unused entries as the zero GID. Convert zero GIDs to
+ * empty table entries instead of storing them.
+ */
+ if (rdma_is_zero_gid(&attr->gid))
+ return 0;
+
+ entry = alloc_gid_entry(attr);
+ if (!entry)
+ return -ENOMEM;
if (rdma_protocol_roce(attr->device, attr->port_num)) {
- ret = add_roce_gid(table, gid, attr);
+ ret = add_roce_gid(entry);
if (ret)
- return ret;
- } else {
- /*
- * Some HCA's report multiple GID entries with only one
- * valid GID, but remaining as zero GID.
- * So ignore such behavior for IB link layer and don't
- * fail the call, but don't add such entry to GID cache.
- */
- if (rdma_is_zero_gid(gid))
- return 0;
+ goto done;
}
- lockdep_assert_held(&table->lock);
- memcpy(&table->data_vec[attr->index].gid, gid, sizeof(*gid));
- memcpy(&table->data_vec[attr->index].attr, attr, sizeof(*attr));
-
- write_lock_irq(&table->rwlock);
- table->data_vec[attr->index].props &= ~GID_TABLE_ENTRY_INVALID;
- write_unlock_irq(&table->rwlock);
+ store_gid_entry(table, entry);
return 0;
+
+done:
+ put_gid_entry(entry);
+ return ret;
}
/**
@@ -272,16 +398,25 @@ static int add_modify_gid(struct ib_gid_table *table,
static void del_gid(struct ib_device *ib_dev, u8 port,
struct ib_gid_table *table, int ix)
{
+ struct ib_gid_table_entry *entry;
+
lockdep_assert_held(&table->lock);
+
+ pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
+ ib_dev->name, port, ix,
+ table->data_vec[ix]->attr.gid.raw);
+
write_lock_irq(&table->rwlock);
- table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
+ entry = table->data_vec[ix];
+ entry->state = GID_TABLE_ENTRY_PENDING_DEL;
+ /*
+ * For non RoCE protocol, GID entry slot is ready to use.
+ */
+ if (!rdma_protocol_roce(ib_dev, port))
+ table->data_vec[ix] = NULL;
write_unlock_irq(&table->rwlock);
- if (rdma_protocol_roce(ib_dev, port))
- del_roce_gid(ib_dev, port, table, ix);
- memset(&table->data_vec[ix].gid, 0, sizeof(table->data_vec[ix].gid));
- memset(&table->data_vec[ix].attr, 0, sizeof(table->data_vec[ix].attr));
- table->data_vec[ix].context = NULL;
+ put_gid_entry_locked(entry);
}
/* rwlock should be read locked, or lock should be held */
@@ -294,8 +429,8 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
int empty = pempty ? -1 : 0;
while (i < table->sz && (found < 0 || empty < 0)) {
- struct ib_gid_table_entry *data = &table->data_vec[i];
- struct ib_gid_attr *attr = &data->attr;
+ struct ib_gid_table_entry *data = table->data_vec[i];
+ struct ib_gid_attr *attr;
int curr_index = i;
i++;
@@ -306,9 +441,9 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
* so lookup free slot only if requested.
*/
if (pempty && empty < 0) {
- if (data->props & GID_TABLE_ENTRY_INVALID &&
- (default_gid ==
- !!(data->props & GID_TABLE_ENTRY_DEFAULT))) {
+ if (is_gid_entry_free(data) &&
+ default_gid ==
+ is_gid_index_default(table, curr_index)) {
/*
* Found an invalid (free) entry; allocate it.
* If default GID is requested, then our
@@ -323,22 +458,23 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
/*
* Additionally find_gid() is used to find valid entry during
- * lookup operation, where validity needs to be checked. So
- * find the empty entry first to continue to search for a free
- * slot and ignore its INVALID flag.
+ * lookup operation; so ignore the entries which are marked as
+ * pending for removal and the entries which are marked as
+ * invalid.
*/
- if (data->props & GID_TABLE_ENTRY_INVALID)
+ if (!is_gid_entry_valid(data))
continue;
if (found >= 0)
continue;
+ attr = &data->attr;
if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
attr->gid_type != val->gid_type)
continue;
if (mask & GID_ATTR_FIND_MASK_GID &&
- memcmp(gid, &data->gid, sizeof(*gid)))
+ memcmp(gid, &data->attr.gid, sizeof(*gid)))
continue;
if (mask & GID_ATTR_FIND_MASK_NETDEV &&
@@ -346,8 +482,7 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
continue;
if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
- !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
- default_gid)
+ is_gid_index_default(table, curr_index) != default_gid)
continue;
found = curr_index;
@@ -396,7 +531,8 @@ static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
attr->device = ib_dev;
attr->index = empty;
attr->port_num = port;
- ret = add_modify_gid(table, gid, attr);
+ attr->gid = *gid;
+ ret = add_modify_gid(table, attr);
if (!ret)
dispatch_gid_change_event(ib_dev, port);
@@ -492,7 +628,8 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
mutex_lock(&table->lock);
for (ix = 0; ix < table->sz; ix++) {
- if (table->data_vec[ix].attr.ndev == ndev) {
+ if (is_gid_entry_valid(table->data_vec[ix]) &&
+ table->data_vec[ix]->attr.ndev == ndev) {
del_gid(ib_dev, port, table, ix);
deleted = true;
}
@@ -506,103 +643,37 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
return 0;
}
-static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
- union ib_gid *gid, struct ib_gid_attr *attr)
-{
- struct ib_gid_table *table;
-
- table = rdma_gid_table(ib_dev, port);
-
- if (index < 0 || index >= table->sz)
- return -EINVAL;
-
- if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
- return -EINVAL;
-
- memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
- if (attr) {
- memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
- if (attr->ndev)
- dev_hold(attr->ndev);
- }
-
- return 0;
-}
-
-static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
- const union ib_gid *gid,
- const struct ib_gid_attr *val,
- unsigned long mask,
- u8 *port, u16 *index)
-{
- struct ib_gid_table *table;
- u8 p;
- int local_index;
- unsigned long flags;
-
- for (p = 0; p < ib_dev->phys_port_cnt; p++) {
- table = ib_dev->cache.ports[p].gid;
- read_lock_irqsave(&table->rwlock, flags);
- local_index = find_gid(table, gid, val, false, mask, NULL);
- if (local_index >= 0) {
- if (index)
- *index = local_index;
- if (port)
- *port = p + rdma_start_port(ib_dev);
- read_unlock_irqrestore(&table->rwlock, flags);
- return 0;
- }
- read_unlock_irqrestore(&table->rwlock, flags);
- }
-
- return -ENOENT;
-}
-
-static int ib_cache_gid_find(struct ib_device *ib_dev,
- const union ib_gid *gid,
- enum ib_gid_type gid_type,
- struct net_device *ndev, u8 *port,
- u16 *index)
-{
- unsigned long mask = GID_ATTR_FIND_MASK_GID |
- GID_ATTR_FIND_MASK_GID_TYPE;
- struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
-
- if (ndev)
- mask |= GID_ATTR_FIND_MASK_NETDEV;
-
- return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
- mask, port, index);
-}
-
/**
- * ib_find_cached_gid_by_port - Returns the GID table index where a specified
- * GID value occurs. It searches for the specified GID value in the local
- * software cache.
+ * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
+ * a valid GID entry for given search parameters. It searches for the specified
+ * GID value in the local software cache.
* @device: The device to query.
* @gid: The GID value to search for.
* @gid_type: The GID type to search for.
* @port_num: The port number of the device where the GID value should be
* searched.
- * @ndev: In RoCE, the net device of the device. Null means ignore.
- * @index: The index into the cached GID table where the GID was found. This
- * parameter may be NULL.
+ * @ndev: In RoCE, the net device of the device. NULL means ignore.
+ *
+ * Returns sgid attributes if the GID is found with valid reference or
+ * returns ERR_PTR for the error.
+ * The caller must invoke rdma_put_gid_attr() to release the reference.
*/
-int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
- const union ib_gid *gid,
- enum ib_gid_type gid_type,
- u8 port, struct net_device *ndev,
- u16 *index)
+const struct ib_gid_attr *
+rdma_find_gid_by_port(struct ib_device *ib_dev,
+ const union ib_gid *gid,
+ enum ib_gid_type gid_type,
+ u8 port, struct net_device *ndev)
{
int local_index;
struct ib_gid_table *table;
unsigned long mask = GID_ATTR_FIND_MASK_GID |
GID_ATTR_FIND_MASK_GID_TYPE;
struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
+ const struct ib_gid_attr *attr;
unsigned long flags;
if (!rdma_is_port_valid(ib_dev, port))
- return -ENOENT;
+ return ERR_PTR(-ENOENT);
table = rdma_gid_table(ib_dev, port);
@@ -612,89 +683,73 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
read_lock_irqsave(&table->rwlock, flags);
local_index = find_gid(table, gid, &val, false, mask, NULL);
if (local_index >= 0) {
- if (index)
- *index = local_index;
+ get_gid_entry(table->data_vec[local_index]);
+ attr = &table->data_vec[local_index]->attr;
read_unlock_irqrestore(&table->rwlock, flags);
- return 0;
+ return attr;
}
read_unlock_irqrestore(&table->rwlock, flags);
- return -ENOENT;
+ return ERR_PTR(-ENOENT);
}
-EXPORT_SYMBOL(ib_find_cached_gid_by_port);
+EXPORT_SYMBOL(rdma_find_gid_by_port);
/**
- * ib_cache_gid_find_by_filter - Returns the GID table index where a specified
- * GID value occurs
+ * rdma_find_gid_by_filter - Returns the GID table attribute where a
+ * specified GID value occurs
* @device: The device to query.
* @gid: The GID value to search for.
- * @port_num: The port number of the device where the GID value could be
+ * @port: The port number of the device where the GID value could be
* searched.
* @filter: The filter function is executed on any matching GID in the table.
* If the filter function returns true, the corresponding index is returned,
* otherwise, we continue searching the GID table. It's guaranteed that
* while filter is executed, ndev field is valid and the structure won't
* change. filter is executed in an atomic context. filter must not be NULL.
- * @index: The index into the cached GID table where the GID was found. This
- * parameter may be NULL.
*
- * ib_cache_gid_find_by_filter() searches for the specified GID value
+ * rdma_find_gid_by_filter() searches for the specified GID value
* of which the filter function returns true in the port's GID table.
- * This function is only supported on RoCE ports.
*
*/
-static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
- const union ib_gid *gid,
- u8 port,
- bool (*filter)(const union ib_gid *,
- const struct ib_gid_attr *,
- void *),
- void *context,
- u16 *index)
+const struct ib_gid_attr *rdma_find_gid_by_filter(
+ struct ib_device *ib_dev, const union ib_gid *gid, u8 port,
+ bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
+ void *),
+ void *context)
{
+ const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
struct ib_gid_table *table;
- unsigned int i;
unsigned long flags;
- bool found = false;
-
+ unsigned int i;
- if (!rdma_is_port_valid(ib_dev, port) ||
- !rdma_protocol_roce(ib_dev, port))
- return -EPROTONOSUPPORT;
+ if (!rdma_is_port_valid(ib_dev, port))
+ return ERR_PTR(-EINVAL);
table = rdma_gid_table(ib_dev, port);
read_lock_irqsave(&table->rwlock, flags);
for (i = 0; i < table->sz; i++) {
- struct ib_gid_attr attr;
+ struct ib_gid_table_entry *entry = table->data_vec[i];
- if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
+ if (!is_gid_entry_valid(entry))
continue;
- if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
+ if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
continue;
- memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
-
- if (filter(gid, &attr, context)) {
- found = true;
- if (index)
- *index = i;
+ if (filter(gid, &entry->attr, context)) {
+ get_gid_entry(entry);
+ res = &entry->attr;
break;
}
}
read_unlock_irqrestore(&table->rwlock, flags);
-
- if (!found)
- return -ENOENT;
- return 0;
+ return res;
}
static struct ib_gid_table *alloc_gid_table(int sz)
{
- struct ib_gid_table *table =
- kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
- int i;
+ struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return NULL;
@@ -707,12 +762,6 @@ static struct ib_gid_table *alloc_gid_table(int sz)
table->sz = sz;
rwlock_init(&table->rwlock);
-
- /* Mark all entries as invalid so that allocator can allocate
- * one of the invalid (free) entry.
- */
- for (i = 0; i < sz; i++)
- table->data_vec[i].props |= GID_TABLE_ENTRY_INVALID;
return table;
err_free_table:
@@ -720,12 +769,30 @@ err_free_table:
return NULL;
}
-static void release_gid_table(struct ib_gid_table *table)
+static void release_gid_table(struct ib_device *device, u8 port,
+ struct ib_gid_table *table)
{
- if (table) {
- kfree(table->data_vec);
- kfree(table);
+ bool leak = false;
+ int i;
+
+ if (!table)
+ return;
+
+ for (i = 0; i < table->sz; i++) {
+ if (is_gid_entry_free(table->data_vec[i]))
+ continue;
+ if (kref_read(&table->data_vec[i]->kref) > 1) {
+ pr_err("GID entry ref leak for %s (index %d) ref=%d\n",
+ device->name, i,
+ kref_read(&table->data_vec[i]->kref));
+ leak = true;
+ }
}
+ if (leak)
+ return;
+
+ kfree(table->data_vec);
+ kfree(table);
}
static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
@@ -739,7 +806,7 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
mutex_lock(&table->lock);
for (i = 0; i < table->sz; ++i) {
- if (!rdma_is_zero_gid(&table->data_vec[i].gid)) {
+ if (is_gid_entry_valid(table->data_vec[i])) {
del_gid(ib_dev, port, table, i);
deleted = true;
}
@@ -757,12 +824,9 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
{
union ib_gid gid = { };
struct ib_gid_attr gid_attr;
- struct ib_gid_table *table;
unsigned int gid_type;
unsigned long mask;
- table = rdma_gid_table(ib_dev, port);
-
mask = GID_ATTR_FIND_MASK_GID_TYPE |
GID_ATTR_FIND_MASK_DEFAULT |
GID_ATTR_FIND_MASK_NETDEV;
@@ -792,19 +856,12 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
unsigned int i;
unsigned long roce_gid_type_mask;
unsigned int num_default_gids;
- unsigned int current_gid = 0;
roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
num_default_gids = hweight_long(roce_gid_type_mask);
- for (i = 0; i < num_default_gids && i < table->sz; i++) {
- struct ib_gid_table_entry *entry = &table->data_vec[i];
-
- entry->props |= GID_TABLE_ENTRY_DEFAULT;
- current_gid = find_next_bit(&roce_gid_type_mask,
- BITS_PER_LONG,
- current_gid);
- entry->attr.gid_type = current_gid++;
- }
+ /* Reserve starting indices for default GIDs */
+ for (i = 0; i < num_default_gids && i < table->sz; i++)
+ table->default_gid_indices |= BIT(i);
}
@@ -815,7 +872,7 @@ static void gid_table_release_one(struct ib_device *ib_dev)
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
table = ib_dev->cache.ports[port].gid;
- release_gid_table(table);
+ release_gid_table(ib_dev, port, table);
ib_dev->cache.ports[port].gid = NULL;
}
}
@@ -869,69 +926,94 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
return err;
}
-int ib_get_cached_gid(struct ib_device *device,
- u8 port_num,
- int index,
- union ib_gid *gid,
- struct ib_gid_attr *gid_attr)
+/**
+ * rdma_query_gid - Read the GID content from the GID software cache
+ * @device: Device to query the GID
+ * @port_num: Port number of the device
+ * @index: Index of the GID table entry to read
+ * @gid: Pointer to GID where to store the entry's GID
+ *
+ * rdma_query_gid() only reads the GID entry content for requested device,
+ * port and index. It reads for IB, RoCE and iWarp link layers. It doesn't
+ * hold any reference to the GID table entry in the HCA or software cache.
+ *
+ * Returns 0 on success or appropriate error code.
+ *
+ */
+int rdma_query_gid(struct ib_device *device, u8 port_num,
+ int index, union ib_gid *gid)
{
- int res;
- unsigned long flags;
struct ib_gid_table *table;
+ unsigned long flags;
+ int res = -EINVAL;
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
table = rdma_gid_table(device, port_num);
read_lock_irqsave(&table->rwlock, flags);
- res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
- read_unlock_irqrestore(&table->rwlock, flags);
+ if (index < 0 || index >= table->sz ||
+ !is_gid_entry_valid(table->data_vec[index]))
+ goto done;
+
+ memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
+ res = 0;
+
+done:
+ read_unlock_irqrestore(&table->rwlock, flags);
return res;
}
-EXPORT_SYMBOL(ib_get_cached_gid);
+EXPORT_SYMBOL(rdma_query_gid);
/**
- * ib_find_cached_gid - Returns the port number and GID table index where
- * a specified GID value occurs.
+ * rdma_find_gid - Returns SGID attributes if the matching GID is found.
* @device: The device to query.
* @gid: The GID value to search for.
* @gid_type: The GID type to search for.
* @ndev: In RoCE, the net device of the device. NULL means ignore.
- * @port_num: The port number of the device where the GID value was found.
- * @index: The index into the cached GID table where the GID was found. This
- * parameter may be NULL.
*
- * ib_find_cached_gid() searches for the specified GID value in
- * the local software cache.
+ * rdma_find_gid() searches for the specified GID value in the software cache.
+ *
+ * Returns GID attributes if a valid GID is found or returns ERR_PTR for the
+ * error. The caller must invoke rdma_put_gid_attr() to release the reference.
+ *
*/
-int ib_find_cached_gid(struct ib_device *device,
- const union ib_gid *gid,
- enum ib_gid_type gid_type,
- struct net_device *ndev,
- u8 *port_num,
- u16 *index)
-{
- return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
-}
-EXPORT_SYMBOL(ib_find_cached_gid);
-
-int ib_find_gid_by_filter(struct ib_device *device,
- const union ib_gid *gid,
- u8 port_num,
- bool (*filter)(const union ib_gid *gid,
- const struct ib_gid_attr *,
- void *),
- void *context, u16 *index)
+const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
+ const union ib_gid *gid,
+ enum ib_gid_type gid_type,
+ struct net_device *ndev)
{
- /* Only RoCE GID table supports filter function */
- if (!rdma_protocol_roce(device, port_num) && filter)
- return -EPROTONOSUPPORT;
+ unsigned long mask = GID_ATTR_FIND_MASK_GID |
+ GID_ATTR_FIND_MASK_GID_TYPE;
+ struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
+ u8 p;
+
+ if (ndev)
+ mask |= GID_ATTR_FIND_MASK_NETDEV;
+
+ for (p = 0; p < device->phys_port_cnt; p++) {
+ struct ib_gid_table *table;
+ unsigned long flags;
+ int index;
+
+ table = device->cache.ports[p].gid;
+ read_lock_irqsave(&table->rwlock, flags);
+ index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
+ if (index >= 0) {
+ const struct ib_gid_attr *attr;
+
+ get_gid_entry(table->data_vec[index]);
+ attr = &table->data_vec[index]->attr;
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return attr;
+ }
+ read_unlock_irqrestore(&table->rwlock, flags);
+ }
- return ib_cache_gid_find_by_filter(device, gid,
- port_num, filter,
- context, index);
+ return ERR_PTR(-ENOENT);
}
+EXPORT_SYMBOL(rdma_find_gid);
int ib_get_cached_pkey(struct ib_device *device,
u8 port_num,
@@ -1089,12 +1171,92 @@ int ib_get_cached_port_state(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_port_state);
+/**
+ * rdma_get_gid_attr - Returns GID attributes for a port of a device
+ * at a requested gid_index, if a valid GID entry exists.
+ * @device: The device to query.
+ * @port_num: The port number on the device where the GID value
+ * is to be queried.
+ * @index: Index of the GID table entry whose attributes are to
+ * be queried.
+ *
+ * rdma_get_gid_attr() acquires reference count of gid attributes from the
+ * cached GID table. Caller must invoke rdma_put_gid_attr() to release
+ * reference to gid attribute regardless of link layer.
+ *
+ * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
+ * code.
+ */
+const struct ib_gid_attr *
+rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
+{
+ const struct ib_gid_attr *attr = ERR_PTR(-EINVAL);
+ struct ib_gid_table *table;
+ unsigned long flags;
+
+ if (!rdma_is_port_valid(device, port_num))
+ return ERR_PTR(-EINVAL);
+
+ table = rdma_gid_table(device, port_num);
+ if (index < 0 || index >= table->sz)
+ return ERR_PTR(-EINVAL);
+
+ read_lock_irqsave(&table->rwlock, flags);
+ if (!is_gid_entry_valid(table->data_vec[index]))
+ goto done;
+
+ get_gid_entry(table->data_vec[index]);
+ attr = &table->data_vec[index]->attr;
+done:
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return attr;
+}
+EXPORT_SYMBOL(rdma_get_gid_attr);
+
+/**
+ * rdma_put_gid_attr - Release reference to the GID attribute
+ * @attr: Pointer to the GID attribute whose reference
+ * needs to be released.
+ *
+ * rdma_put_gid_attr() must be used to release reference whose
+ * reference is acquired using rdma_get_gid_attr() or any APIs
+ * which returns a pointer to the ib_gid_attr regardless of link layer
+ * of IB or RoCE.
+ *
+ */
+void rdma_put_gid_attr(const struct ib_gid_attr *attr)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(attr, struct ib_gid_table_entry, attr);
+
+ put_gid_entry(entry);
+}
+EXPORT_SYMBOL(rdma_put_gid_attr);
+
+/**
+ * rdma_hold_gid_attr - Get reference to existing GID attribute
+ *
+ * @attr: Pointer to the GID attribute whose reference
+ * needs to be taken.
+ *
+ * Increase the reference count to a GID attribute to keep it from being
+ * freed. Callers are required to already be holding a reference to attribute.
+ *
+ */
+void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(attr, struct ib_gid_table_entry, attr);
+
+ get_gid_entry(entry);
+}
+EXPORT_SYMBOL(rdma_hold_gid_attr);
+
static int config_non_roce_gid_cache(struct ib_device *device,
u8 port, int gid_tbl_len)
{
struct ib_gid_attr gid_attr = {};
struct ib_gid_table *table;
- union ib_gid gid;
int ret = 0;
int i;
@@ -1106,14 +1268,14 @@ static int config_non_roce_gid_cache(struct ib_device *device,
for (i = 0; i < gid_tbl_len; ++i) {
if (!device->query_gid)
continue;
- ret = device->query_gid(device, port, i, &gid);
+ ret = device->query_gid(device, port, i, &gid_attr.gid);
if (ret) {
pr_warn("query_gid failed (%d) for %s (index %d)\n",
ret, device->name, i);
goto err;
}
gid_attr.index = i;
- add_modify_gid(table, &gid, &gid_attr);
+ add_modify_gid(table, &gid_attr);
}
err:
mutex_unlock(&table->lock);
@@ -1128,13 +1290,10 @@ static void ib_cache_update(struct ib_device *device,
struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
int i;
int ret;
- struct ib_gid_table *table;
if (!rdma_is_port_valid(device, port))
return;
- table = rdma_gid_table(device, port);
-
tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
if (!tprops)
return;
@@ -1296,4 +1455,9 @@ void ib_cache_cleanup_one(struct ib_device *device)
ib_unregister_event_handler(&device->cache.event_handler);
flush_workqueue(ib_wq);
gid_table_cleanup_one(device);
+
+ /*
+ * Flush the wq second time for any pending GID delete work.
+ */
+ flush_workqueue(ib_wq);
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 27a7b0a2e27a..6e39c27dca8e 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -474,7 +474,7 @@ static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
if (ret)
return ret;
- memcpy(&av->ah_attr, &new_ah_attr, sizeof(new_ah_attr));
+ rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
return 0;
}
@@ -508,31 +508,50 @@ static int add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
return ret;
}
-static struct cm_port *get_cm_port_from_path(struct sa_path_rec *path)
+static struct cm_port *
+get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
{
struct cm_device *cm_dev;
struct cm_port *port = NULL;
unsigned long flags;
- u8 p;
- struct net_device *ndev = ib_get_ndev_from_path(path);
-
- read_lock_irqsave(&cm.device_lock, flags);
- list_for_each_entry(cm_dev, &cm.device_list, list) {
- if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
- sa_conv_pathrec_to_gid_type(path),
- ndev, &p, NULL)) {
- port = cm_dev->port[p - 1];
- break;
+
+ if (attr) {
+ read_lock_irqsave(&cm.device_lock, flags);
+ list_for_each_entry(cm_dev, &cm.device_list, list) {
+ if (cm_dev->ib_device == attr->device) {
+ port = cm_dev->port[attr->port_num - 1];
+ break;
+ }
+ }
+ read_unlock_irqrestore(&cm.device_lock, flags);
+ } else {
+ /* SGID attribute can be NULL in following
+ * conditions.
+ * (a) Alternative path
+ * (b) IB link layer without GRH
+ * (c) LAP send messages
+ */
+ read_lock_irqsave(&cm.device_lock, flags);
+ list_for_each_entry(cm_dev, &cm.device_list, list) {
+ attr = rdma_find_gid(cm_dev->ib_device,
+ &path->sgid,
+ sa_conv_pathrec_to_gid_type(path),
+ NULL);
+ if (!IS_ERR(attr)) {
+ port = cm_dev->port[attr->port_num - 1];
+ break;
+ }
}
+ read_unlock_irqrestore(&cm.device_lock, flags);
+ if (port)
+ rdma_put_gid_attr(attr);
}
- read_unlock_irqrestore(&cm.device_lock, flags);
-
- if (ndev)
- dev_put(ndev);
return port;
}
-static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
+static int cm_init_av_by_path(struct sa_path_rec *path,
+ const struct ib_gid_attr *sgid_attr,
+ struct cm_av *av,
struct cm_id_private *cm_id_priv)
{
struct rdma_ah_attr new_ah_attr;
@@ -540,7 +559,7 @@ static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
struct cm_port *port;
int ret;
- port = get_cm_port_from_path(path);
+ port = get_cm_port_from_path(path, sgid_attr);
if (!port)
return -EINVAL;
cm_dev = port->cm_dev;
@@ -554,22 +573,26 @@ static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
/*
* av->ah_attr might be initialized based on wc or during
- * request processing time. So initialize a new ah_attr on stack.
+ * request processing time which might have reference to sgid_attr.
+ * So initialize a new ah_attr on stack.
* If initialization fails, old ah_attr is used for sending any
* responses. If initialization is successful, than new ah_attr
- * is used by overwriting the old one.
+ * is used by overwriting the old one. So that right ah_attr
+ * can be used to return an error response.
*/
ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
- &new_ah_attr);
+ &new_ah_attr, sgid_attr);
if (ret)
return ret;
av->timeout = path->packet_life_time + 1;
ret = add_cm_id_to_port_list(cm_id_priv, av, port);
- if (ret)
+ if (ret) {
+ rdma_destroy_ah_attr(&new_ah_attr);
return ret;
- memcpy(&av->ah_attr, &new_ah_attr, sizeof(new_ah_attr));
+ }
+ rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
return 0;
}
@@ -1091,6 +1114,9 @@ retest:
wait_for_completion(&cm_id_priv->comp);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);
+
+ rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
+ rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
kfree(cm_id_priv->private_data);
kfree(cm_id_priv);
}
@@ -1230,14 +1256,12 @@ new_id:
}
EXPORT_SYMBOL(ib_cm_insert_listen);
-static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
- enum cm_msg_sequence msg_seq)
+static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
{
u64 hi_tid, low_tid;
hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
- low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
- (msg_seq << 30));
+ low_tid = (u64)cm_id_priv->id.local_id;
return cpu_to_be64(hi_tid | low_tid);
}
@@ -1265,7 +1289,7 @@ static void cm_format_req(struct cm_req_msg *req_msg,
pri_path->opa.slid);
cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
- cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
+ cm_form_tid(cm_id_priv));
req_msg->local_comm_id = cm_id_priv->id.local_id;
req_msg->service_id = param->service_id;
@@ -1413,12 +1437,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
+ ret = cm_init_av_by_path(param->primary_path,
+ param->ppath_sgid_attr, &cm_id_priv->av,
cm_id_priv);
if (ret)
goto error1;
if (param->alternate_path) {
- ret = cm_init_av_by_path(param->alternate_path,
+ ret = cm_init_av_by_path(param->alternate_path, NULL,
&cm_id_priv->alt_av, cm_id_priv);
if (ret)
goto error1;
@@ -1646,7 +1671,7 @@ static void cm_opa_to_ib_sgid(struct cm_work *work,
(ib_is_opa_gid(&path->sgid))) {
union ib_gid sgid;
- if (ib_get_cached_gid(dev, port_num, 0, &sgid, NULL)) {
+ if (rdma_query_gid(dev, port_num, 0, &sgid)) {
dev_warn(&dev->dev,
"Error updating sgid in CM request\n");
return;
@@ -1691,6 +1716,7 @@ static void cm_format_req_event(struct cm_work *work,
param->retry_count = cm_req_get_retry_count(req_msg);
param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
param->srq = cm_req_get_srq(req_msg);
+ param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
work->cm_event.private_data = &req_msg->private_data;
}
@@ -1914,9 +1940,8 @@ static int cm_req_handler(struct cm_work *work)
struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
struct cm_req_msg *req_msg;
- union ib_gid gid;
- struct ib_gid_attr gid_attr;
const struct ib_global_route *grh;
+ const struct ib_gid_attr *gid_attr;
int ret;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1961,24 +1986,13 @@ static int cm_req_handler(struct cm_work *work)
if (cm_req_has_alt_path(req_msg))
memset(&work->path[1], 0, sizeof(work->path[1]));
grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
- ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
- work->port->port_num,
- grh->sgid_index,
- &gid, &gid_attr);
- if (ret) {
- ib_send_cm_rej(cm_id, IB_CM_REJ_UNSUPPORTED, NULL, 0, NULL, 0);
- goto rejected;
- }
+ gid_attr = grh->sgid_attr;
- if (gid_attr.ndev) {
+ if (gid_attr && gid_attr->ndev) {
work->path[0].rec_type =
- sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
- sa_path_set_ifindex(&work->path[0],
- gid_attr.ndev->ifindex);
- sa_path_set_ndev(&work->path[0],
- dev_net(gid_attr.ndev));
- dev_put(gid_attr.ndev);
+ sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
} else {
+ /* If no GID attribute or ndev is null, it is not RoCE. */
cm_path_set_rec_type(work->port->cm_dev->ib_device,
work->port->port_num,
&work->path[0],
@@ -1992,15 +2006,14 @@ static int cm_req_handler(struct cm_work *work)
sa_path_set_dmac(&work->path[0],
cm_id_priv->av.ah_attr.roce.dmac);
work->path[0].hop_limit = grh->hop_limit;
- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
+ ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
cm_id_priv);
if (ret) {
int err;
- err = ib_get_cached_gid(work->port->cm_dev->ib_device,
- work->port->port_num, 0,
- &work->path[0].sgid,
- NULL);
+ err = rdma_query_gid(work->port->cm_dev->ib_device,
+ work->port->port_num, 0,
+ &work->path[0].sgid);
if (err)
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
NULL, 0, NULL, 0);
@@ -2012,8 +2025,8 @@ static int cm_req_handler(struct cm_work *work)
goto rejected;
}
if (cm_req_has_alt_path(req_msg)) {
- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
- cm_id_priv);
+ ret = cm_init_av_by_path(&work->path[1], NULL,
+ &cm_id_priv->alt_av, cm_id_priv);
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
@@ -2451,7 +2464,7 @@ static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
u8 private_data_len)
{
cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
- cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
+ cm_form_tid(cm_id_priv));
dreq_msg->local_comm_id = cm_id_priv->id.local_id;
dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
@@ -3082,7 +3095,7 @@ static void cm_format_lap(struct cm_lap_msg *lap_msg,
alt_ext = opa_is_extended_lid(alternate_path->opa.dlid,
alternate_path->opa.slid);
cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
- cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
+ cm_form_tid(cm_id_priv));
lap_msg->local_comm_id = cm_id_priv->id.local_id;
lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
@@ -3136,7 +3149,7 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
+ ret = cm_init_av_by_path(alternate_path, NULL, &cm_id_priv->alt_av,
cm_id_priv);
if (ret)
goto out;
@@ -3279,7 +3292,7 @@ static int cm_lap_handler(struct cm_work *work)
if (ret)
goto unlock;
- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
+ cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av,
cm_id_priv);
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
@@ -3458,7 +3471,7 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
struct ib_cm_sidr_req_param *param)
{
cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
- cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
+ cm_form_tid(cm_id_priv));
sidr_req_msg->request_id = cm_id_priv->id.local_id;
sidr_req_msg->pkey = param->path->pkey;
sidr_req_msg->service_id = param->service_id;
@@ -3481,7 +3494,9 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
+ ret = cm_init_av_by_path(param->path, param->sgid_attr,
+ &cm_id_priv->av,
+ cm_id_priv);
if (ret)
goto out;
@@ -3518,6 +3533,7 @@ out:
EXPORT_SYMBOL(ib_send_cm_sidr_req);
static void cm_format_sidr_req_event(struct cm_work *work,
+ const struct cm_id_private *rx_cm_id,
struct ib_cm_id *listen_id)
{
struct cm_sidr_req_msg *sidr_req_msg;
@@ -3531,6 +3547,7 @@ static void cm_format_sidr_req_event(struct cm_work *work,
param->service_id = sidr_req_msg->service_id;
param->bth_pkey = cm_get_bth_pkey(work);
param->port = work->port->port_num;
+ param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
work->cm_event.private_data = &sidr_req_msg->private_data;
}
@@ -3588,7 +3605,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
cm_id_priv->id.service_id = sidr_req_msg->service_id;
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
- cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
+ cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id);
cm_process_work(cm_id_priv, work);
cm_deref_id(cur_cm_id_priv);
return 0;
@@ -3665,7 +3682,8 @@ error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
}
EXPORT_SYMBOL(ib_send_cm_sidr_rep);
-static void cm_format_sidr_rep_event(struct cm_work *work)
+static void cm_format_sidr_rep_event(struct cm_work *work,
+ const struct cm_id_private *cm_id_priv)
{
struct cm_sidr_rep_msg *sidr_rep_msg;
struct ib_cm_sidr_rep_event_param *param;
@@ -3678,6 +3696,7 @@ static void cm_format_sidr_rep_event(struct cm_work *work)
param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
param->info = &sidr_rep_msg->info;
param->info_len = sidr_rep_msg->info_length;
+ param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
work->cm_event.private_data = &sidr_rep_msg->private_data;
}
@@ -3701,7 +3720,7 @@ static int cm_sidr_rep_handler(struct cm_work *work)
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
spin_unlock_irq(&cm_id_priv->lock);
- cm_format_sidr_rep_event(work);
+ cm_format_sidr_rep_event(work, cm_id_priv);
cm_process_work(cm_id_priv, work);
return 0;
out:
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 8b76f0ef965e..476d4309576d 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -44,13 +44,6 @@
#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
-enum cm_msg_sequence {
- CM_MSG_SEQUENCE_REQ,
- CM_MSG_SEQUENCE_LAP,
- CM_MSG_SEQUENCE_DREQ,
- CM_MSG_SEQUENCE_SIDR
-};
-
struct cm_req_msg {
struct ib_mad_hdr hdr;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index bff10ab141b0..f72677291b69 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -366,7 +366,6 @@ struct cma_multicast {
void *context;
struct sockaddr_storage addr;
struct kref mcref;
- bool igmp_joined;
u8 join_state;
};
@@ -412,11 +411,11 @@ struct cma_req_info {
struct sockaddr_storage listen_addr_storage;
struct sockaddr_storage src_addr_storage;
struct ib_device *device;
- int port;
union ib_gid local_gid;
__be64 service_id;
+ int port;
+ bool has_gid;
u16 pkey;
- bool has_gid:1;
};
static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
@@ -491,12 +490,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
{
cma_ref_dev(cma_dev);
id_priv->cma_dev = cma_dev;
- id_priv->gid_type = 0;
id_priv->id.device = cma_dev->device;
id_priv->id.route.addr.dev_addr.transport =
rdma_node_get_transport(cma_dev->device->node_type);
list_add_tail(&id_priv->list, &cma_dev->id_list);
- id_priv->res.type = RDMA_RESTRACK_CM_ID;
rdma_restrack_add(&id_priv->res);
}
@@ -603,46 +600,53 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
return ret;
}
-static inline int cma_validate_port(struct ib_device *device, u8 port,
- enum ib_gid_type gid_type,
- union ib_gid *gid,
- struct rdma_id_private *id_priv)
+static const struct ib_gid_attr *
+cma_validate_port(struct ib_device *device, u8 port,
+ enum ib_gid_type gid_type,
+ union ib_gid *gid,
+ struct rdma_id_private *id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
int bound_if_index = dev_addr->bound_dev_if;
+ const struct ib_gid_attr *sgid_attr;
int dev_type = dev_addr->dev_type;
struct net_device *ndev = NULL;
- int ret = -ENODEV;
if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
- return ret;
+ return ERR_PTR(-ENODEV);
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
- return ret;
+ return ERR_PTR(-ENODEV);
if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
ndev = dev_get_by_index(dev_addr->net, bound_if_index);
if (!ndev)
- return ret;
+ return ERR_PTR(-ENODEV);
} else {
gid_type = IB_GID_TYPE_IB;
}
- ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
- ndev, NULL);
-
+ sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
if (ndev)
dev_put(ndev);
+ return sgid_attr;
+}
- return ret;
+static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
+ const struct ib_gid_attr *sgid_attr)
+{
+ WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr);
+ id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
}
static int cma_acquire_dev(struct rdma_id_private *id_priv,
- struct rdma_id_private *listen_id_priv)
+ const struct rdma_id_private *listen_id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr;
struct cma_device *cma_dev;
union ib_gid gid, iboe_gid, *gidp;
+ enum ib_gid_type gid_type;
int ret = -ENODEV;
u8 port;
@@ -662,14 +666,13 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
port = listen_id_priv->id.port_num;
gidp = rdma_protocol_roce(cma_dev->device, port) ?
&iboe_gid : &gid;
-
- ret = cma_validate_port(cma_dev->device, port,
- rdma_protocol_ib(cma_dev->device, port) ?
- IB_GID_TYPE_IB :
- listen_id_priv->gid_type, gidp,
- id_priv);
- if (!ret) {
+ gid_type = listen_id_priv->gid_type;
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, gidp, id_priv);
+ if (!IS_ERR(sgid_attr)) {
id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ ret = 0;
goto out;
}
}
@@ -683,14 +686,13 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
gidp = rdma_protocol_roce(cma_dev->device, port) ?
&iboe_gid : &gid;
-
- ret = cma_validate_port(cma_dev->device, port,
- rdma_protocol_ib(cma_dev->device, port) ?
- IB_GID_TYPE_IB :
- cma_dev->default_gid_type[port - 1],
- gidp, id_priv);
- if (!ret) {
+ gid_type = cma_dev->default_gid_type[port - 1];
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, gidp, id_priv);
+ if (!IS_ERR(sgid_attr)) {
id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ ret = 0;
goto out;
}
}
@@ -732,8 +734,8 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
continue;
- for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i,
- &gid, NULL);
+ for (i = 0; !rdma_query_gid(cur_dev->device,
+ p, i, &gid);
i++) {
if (!memcmp(&gid, dgid, sizeof(gid))) {
cma_dev = cur_dev;
@@ -785,12 +787,14 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
id_priv->res.kern_name = caller;
else
rdma_restrack_set_task(&id_priv->res, current);
+ id_priv->res.type = RDMA_RESTRACK_CM_ID;
id_priv->state = RDMA_CM_IDLE;
id_priv->id.context = context;
id_priv->id.event_handler = event_handler;
id_priv->id.ps = ps;
id_priv->id.qp_type = qp_type;
id_priv->tos_set = false;
+ id_priv->gid_type = IB_GID_TYPE_IB;
spin_lock_init(&id_priv->lock);
mutex_init(&id_priv->qp_mutex);
init_completion(&id_priv->comp);
@@ -1036,35 +1040,38 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
}
EXPORT_SYMBOL(rdma_init_qp_attr);
-static inline int cma_zero_addr(struct sockaddr *addr)
+static inline bool cma_zero_addr(const struct sockaddr *addr)
{
switch (addr->sa_family) {
case AF_INET:
return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
case AF_INET6:
- return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr);
+ return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr);
case AF_IB:
- return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr);
+ return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr);
default:
- return 0;
+ return false;
}
}
-static inline int cma_loopback_addr(struct sockaddr *addr)
+static inline bool cma_loopback_addr(const struct sockaddr *addr)
{
switch (addr->sa_family) {
case AF_INET:
- return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
+ return ipv4_is_loopback(
+ ((struct sockaddr_in *)addr)->sin_addr.s_addr);
case AF_INET6:
- return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr);
+ return ipv6_addr_loopback(
+ &((struct sockaddr_in6 *)addr)->sin6_addr);
case AF_IB:
- return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr);
+ return ib_addr_loopback(
+ &((struct sockaddr_ib *)addr)->sib_addr);
default:
- return 0;
+ return false;
}
}
-static inline int cma_any_addr(struct sockaddr *addr)
+static inline bool cma_any_addr(const struct sockaddr *addr)
{
return cma_zero_addr(addr) || cma_loopback_addr(addr);
}
@@ -1087,7 +1094,7 @@ static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
}
}
-static __be16 cma_port(struct sockaddr *addr)
+static __be16 cma_port(const struct sockaddr *addr)
{
struct sockaddr_ib *sib;
@@ -1105,15 +1112,15 @@ static __be16 cma_port(struct sockaddr *addr)
}
}
-static inline int cma_any_port(struct sockaddr *addr)
+static inline int cma_any_port(const struct sockaddr *addr)
{
return !cma_port(addr);
}
static void cma_save_ib_info(struct sockaddr *src_addr,
struct sockaddr *dst_addr,
- struct rdma_cm_id *listen_id,
- struct sa_path_rec *path)
+ const struct rdma_cm_id *listen_id,
+ const struct sa_path_rec *path)
{
struct sockaddr_ib *listen_ib, *ib;
@@ -1198,7 +1205,7 @@ static u16 cma_port_from_service_id(__be64 service_id)
static int cma_save_ip_info(struct sockaddr *src_addr,
struct sockaddr *dst_addr,
- struct ib_cm_event *ib_event,
+ const struct ib_cm_event *ib_event,
__be64 service_id)
{
struct cma_hdr *hdr;
@@ -1228,8 +1235,8 @@ static int cma_save_ip_info(struct sockaddr *src_addr,
static int cma_save_net_info(struct sockaddr *src_addr,
struct sockaddr *dst_addr,
- struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event,
+ const struct rdma_cm_id *listen_id,
+ const struct ib_cm_event *ib_event,
sa_family_t sa_family, __be64 service_id)
{
if (sa_family == AF_IB) {
@@ -1361,7 +1368,23 @@ static bool validate_net_dev(struct net_device *net_dev,
}
}
-static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
+static struct net_device *
+roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event)
+{
+ const struct ib_gid_attr *sgid_attr = NULL;
+
+ if (ib_event->event == IB_CM_REQ_RECEIVED)
+ sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr;
+ else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
+ sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr;
+
+ if (!sgid_attr)
+ return NULL;
+ dev_hold(sgid_attr->ndev);
+ return sgid_attr->ndev;
+}
+
+static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event,
struct cma_req_info *req)
{
struct sockaddr *listen_addr =
@@ -1376,8 +1399,12 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
if (err)
return ERR_PTR(err);
- net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey,
- gid, listen_addr);
+ if (rdma_protocol_roce(req->device, req->port))
+ net_dev = roce_get_net_dev_by_cm_event(ib_event);
+ else
+ net_dev = ib_get_net_dev_by_params(req->device, req->port,
+ req->pkey,
+ gid, listen_addr);
if (!net_dev)
return ERR_PTR(-ENODEV);
@@ -1440,14 +1467,20 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id,
const struct rdma_addr *addr = &id->route.addr;
if (!net_dev)
- /* This request is an AF_IB request or a RoCE request */
+ /* This request is an AF_IB request */
return (!id->port_num || id->port_num == port_num) &&
- (addr->src_addr.ss_family == AF_IB ||
- rdma_protocol_roce(id->device, port_num));
+ (addr->src_addr.ss_family == AF_IB);
- return !addr->dev_addr.bound_dev_if ||
- (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
- addr->dev_addr.bound_dev_if == net_dev->ifindex);
+ /*
+ * Net namespaces must match, and if the listner is listening
+ * on a specific netdevice than netdevice must match as well.
+ */
+ if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
+ (!!addr->dev_addr.bound_dev_if ==
+ (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
+ return true;
+ else
+ return false;
}
static struct rdma_id_private *cma_find_listener(
@@ -1480,9 +1513,10 @@ static struct rdma_id_private *cma_find_listener(
return ERR_PTR(-EINVAL);
}
-static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
- struct ib_cm_event *ib_event,
- struct net_device **net_dev)
+static struct rdma_id_private *
+cma_ib_id_from_event(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *ib_event,
+ struct net_device **net_dev)
{
struct cma_req_info req;
struct rdma_bind_list *bind_list;
@@ -1498,10 +1532,6 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
/* Assuming the protocol is AF_IB */
*net_dev = NULL;
- } else if (rdma_protocol_roce(req.device, req.port)) {
- /* TODO find the net dev matching the request parameters
- * through the RoCE GID table */
- *net_dev = NULL;
} else {
return ERR_CAST(*net_dev);
}
@@ -1629,6 +1659,21 @@ static void cma_release_port(struct rdma_id_private *id_priv)
mutex_unlock(&lock);
}
+static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv,
+ struct cma_multicast *mc)
+{
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ struct net_device *ndev = NULL;
+
+ if (dev_addr->bound_dev_if)
+ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+ if (ndev) {
+ cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
+ dev_put(ndev);
+ }
+ kref_put(&mc->mcref, release_mc);
+}
+
static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
{
struct cma_multicast *mc;
@@ -1642,22 +1687,7 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
ib_sa_free_multicast(mc->multicast.ib);
kfree(mc);
} else {
- if (mc->igmp_joined) {
- struct rdma_dev_addr *dev_addr =
- &id_priv->id.route.addr.dev_addr;
- struct net_device *ndev = NULL;
-
- if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(&init_net,
- dev_addr->bound_dev_if);
- if (ndev) {
- cma_igmp_send(ndev,
- &mc->multicast.ib->rec.mgid,
- false);
- dev_put(ndev);
- }
- }
- kref_put(&mc->mcref, release_mc);
+ cma_leave_roce_mc_group(id_priv, mc);
}
}
}
@@ -1699,6 +1729,10 @@ void rdma_destroy_id(struct rdma_cm_id *id)
cma_deref_id(id_priv->id.context);
kfree(id_priv->id.route.path_rec);
+
+ if (id_priv->id.route.addr.dev_addr.sgid_attr)
+ rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
+
put_net(id_priv->id.route.addr.dev_addr.net);
kfree(id_priv);
}
@@ -1730,7 +1764,7 @@ reject:
}
static void cma_set_rep_event_data(struct rdma_cm_event *event,
- struct ib_cm_rep_event_param *rep_data,
+ const struct ib_cm_rep_event_param *rep_data,
void *private_data)
{
event->param.conn.private_data = private_data;
@@ -1743,10 +1777,11 @@ static void cma_set_rep_event_data(struct rdma_cm_event *event,
event->param.conn.qp_num = rep_data->remote_qpn;
}
-static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
+static int cma_ib_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *ib_event)
{
struct rdma_id_private *id_priv = cm_id->context;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
int ret = 0;
mutex_lock(&id_priv->handler_mutex);
@@ -1756,7 +1791,6 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
id_priv->state != RDMA_CM_DISCONNECT))
goto out;
- memset(&event, 0, sizeof event);
switch (ib_event->event) {
case IB_CM_REQ_ERROR:
case IB_CM_REP_ERROR:
@@ -1825,9 +1859,10 @@ out:
return ret;
}
-static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event,
- struct net_device *net_dev)
+static struct rdma_id_private *
+cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
+ const struct ib_cm_event *ib_event,
+ struct net_device *net_dev)
{
struct rdma_id_private *listen_id_priv;
struct rdma_id_private *id_priv;
@@ -1888,11 +1923,12 @@ err:
return NULL;
}
-static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event,
- struct net_device *net_dev)
+static struct rdma_id_private *
+cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
+ const struct ib_cm_event *ib_event,
+ struct net_device *net_dev)
{
- struct rdma_id_private *listen_id_priv;
+ const struct rdma_id_private *listen_id_priv;
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
@@ -1932,7 +1968,7 @@ err:
}
static void cma_set_req_event_data(struct rdma_cm_event *event,
- struct ib_cm_req_event_param *req_data,
+ const struct ib_cm_req_event_param *req_data,
void *private_data, int offset)
{
event->param.conn.private_data = private_data + offset;
@@ -1946,7 +1982,8 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
event->param.conn.qp_num = req_data->remote_qpn;
}
-static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
+static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
+ const struct ib_cm_event *ib_event)
{
return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
(ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
@@ -1955,19 +1992,20 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e
(!id->qp_type));
}
-static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
+static int cma_ib_req_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *ib_event)
{
struct rdma_id_private *listen_id, *conn_id = NULL;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
struct net_device *net_dev;
u8 offset;
int ret;
- listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
+ listen_id = cma_ib_id_from_event(cm_id, ib_event, &net_dev);
if (IS_ERR(listen_id))
return PTR_ERR(listen_id);
- if (!cma_check_req_qp_type(&listen_id->id, ib_event)) {
+ if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) {
ret = -EINVAL;
goto net_dev_put;
}
@@ -1978,16 +2016,15 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
goto err1;
}
- memset(&event, 0, sizeof event);
offset = cma_user_data_offset(listen_id);
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
- conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev);
+ conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev);
event.param.ud.private_data = ib_event->private_data + offset;
event.param.ud.private_data_len =
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
} else {
- conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev);
+ conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev);
cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
ib_event->private_data, offset);
}
@@ -2087,7 +2124,7 @@ EXPORT_SYMBOL(rdma_read_gids);
static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{
struct rdma_id_private *id_priv = iw_id->context;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
int ret = 0;
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
@@ -2096,7 +2133,6 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
if (id_priv->state != RDMA_CM_CONNECT)
goto out;
- memset(&event, 0, sizeof event);
switch (iw_event->event) {
case IW_CM_EVENT_CLOSE:
event.event = RDMA_CM_EVENT_DISCONNECTED;
@@ -2156,11 +2192,17 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
{
struct rdma_cm_id *new_cm_id;
struct rdma_id_private *listen_id, *conn_id;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
int ret = -ECONNABORTED;
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
+ event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
+ event.param.conn.private_data = iw_event->private_data;
+ event.param.conn.private_data_len = iw_event->private_data_len;
+ event.param.conn.initiator_depth = iw_event->ird;
+ event.param.conn.responder_resources = iw_event->ord;
+
listen_id = cm_id->context;
mutex_lock(&listen_id->handler_mutex);
@@ -2202,13 +2244,6 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
- memset(&event, 0, sizeof event);
- event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
- event.param.conn.private_data = iw_event->private_data;
- event.param.conn.private_data_len = iw_event->private_data_len;
- event.param.conn.initiator_depth = iw_event->ird;
- event.param.conn.responder_resources = iw_event->ord;
-
/*
* Protect against the user destroying conn_id from another thread
* until we're done accessing it.
@@ -2241,7 +2276,8 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
addr = cma_src_addr(id_priv);
svc_id = rdma_get_service_id(&id_priv->id, addr);
- id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id);
+ id = ib_cm_insert_listen(id_priv->id.device,
+ cma_ib_req_handler, svc_id);
if (IS_ERR(id))
return PTR_ERR(id);
id_priv->cm_id.ib = id;
@@ -2561,8 +2597,6 @@ cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
route->path_rec->roce.route_resolved = true;
- sa_path_set_ndev(route->path_rec, addr->dev_addr.net);
- sa_path_set_ifindex(route->path_rec, ndev->ifindex);
sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
return ndev;
}
@@ -2791,7 +2825,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
p = 1;
port_found:
- ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL);
+ ret = rdma_query_gid(cma_dev->device, p, 0, &gid);
if (ret)
goto out;
@@ -2817,9 +2851,8 @@ static void addr_handler(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *dev_addr, void *context)
{
struct rdma_id_private *id_priv = context;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
- memset(&event, 0, sizeof event);
mutex_lock(&id_priv->handler_mutex);
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
RDMA_CM_ADDR_RESOLVED))
@@ -2910,7 +2943,7 @@ err:
}
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- struct sockaddr *dst_addr)
+ const struct sockaddr *dst_addr)
{
if (!src_addr || !src_addr->sa_family) {
src_addr = (struct sockaddr *) &id->route.addr.src_addr;
@@ -2931,31 +2964,25 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
}
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- struct sockaddr *dst_addr, int timeout_ms)
+ const struct sockaddr *dst_addr, int timeout_ms)
{
struct rdma_id_private *id_priv;
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
if (id_priv->state == RDMA_CM_IDLE) {
ret = cma_bind_addr(id, src_addr, dst_addr);
- if (ret) {
- memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
+ if (ret)
return ret;
- }
}
- if (cma_family(id_priv) != dst_addr->sa_family) {
- memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
+ if (cma_family(id_priv) != dst_addr->sa_family)
return -EINVAL;
- }
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
- memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
return -EINVAL;
- }
+ memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
atomic_inc(&id_priv->refcount);
if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
@@ -3451,18 +3478,18 @@ static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
}
static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *ib_event)
+ const struct ib_cm_event *ib_event)
{
struct rdma_id_private *id_priv = cm_id->context;
- struct rdma_cm_event event;
- struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
+ struct rdma_cm_event event = {};
+ const struct ib_cm_sidr_rep_event_param *rep =
+ &ib_event->param.sidr_rep_rcvd;
int ret = 0;
mutex_lock(&id_priv->handler_mutex);
if (id_priv->state != RDMA_CM_CONNECT)
goto out;
- memset(&event, 0, sizeof event);
switch (ib_event->event) {
case IB_CM_SIDR_REQ_ERROR:
event.event = RDMA_CM_EVENT_UNREACHABLE;
@@ -3488,7 +3515,8 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
ib_init_ah_attr_from_path(id_priv->id.device,
id_priv->id.port_num,
id_priv->id.route.path_rec,
- &event.param.ud.ah_attr);
+ &event.param.ud.ah_attr,
+ rep->sgid_attr);
event.param.ud.qp_num = rep->qpn;
event.param.ud.qkey = rep->qkey;
event.event = RDMA_CM_EVENT_ESTABLISHED;
@@ -3501,6 +3529,8 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
}
ret = id_priv->id.event_handler(&id_priv->id, &event);
+
+ rdma_destroy_ah_attr(&event.param.ud.ah_attr);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL;
@@ -3557,6 +3587,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
id_priv->cm_id.ib = id;
req.path = id_priv->id.route.path_rec;
+ req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
req.max_cm_retries = CMA_MAX_CM_RETRIES;
@@ -3618,6 +3649,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
if (route->num_paths == 2)
req.alternate_path = &route->path_rec[1];
+ req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
+ /* Alternate path SGID attribute currently unsupported */
req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
req.qp_num = id_priv->qp_num;
req.qp_type = id_priv->id.qp_type;
@@ -3928,7 +3961,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
{
struct rdma_id_private *id_priv;
struct cma_multicast *mc = multicast->context;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
int ret = 0;
id_priv = mc->id_priv;
@@ -3952,7 +3985,6 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
}
mutex_unlock(&id_priv->qp_mutex);
- memset(&event, 0, sizeof event);
event.status = status;
event.param.ud.private_data = mc->context;
if (!status) {
@@ -3981,6 +4013,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
ret = id_priv->id.event_handler(&id_priv->id, &event);
+
+ rdma_destroy_ah_attr(&event.param.ud.ah_attr);
if (ret) {
cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
@@ -4010,7 +4044,7 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
} else if (addr->sa_family == AF_IB) {
memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
- } else if ((addr->sa_family == AF_INET6)) {
+ } else if (addr->sa_family == AF_INET6) {
ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
if (id_priv->id.ps == RDMA_PS_UDP)
mc_map[7] = 0x01; /* Use RDMA CM signature */
@@ -4168,8 +4202,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
if (!send_only) {
err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
true);
- if (!err)
- mc->igmp_joined = true;
}
}
} else {
@@ -4221,26 +4253,29 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
memcpy(&mc->addr, addr, rdma_addr_size(addr));
mc->context = context;
mc->id_priv = id_priv;
- mc->igmp_joined = false;
mc->join_state = join_state;
- spin_lock(&id_priv->lock);
- list_add(&mc->list, &id_priv->mc_list);
- spin_unlock(&id_priv->lock);
if (rdma_protocol_roce(id->device, id->port_num)) {
kref_init(&mc->mcref);
ret = cma_iboe_join_multicast(id_priv, mc);
- } else if (rdma_cap_ib_mcast(id->device, id->port_num))
+ if (ret)
+ goto out_err;
+ } else if (rdma_cap_ib_mcast(id->device, id->port_num)) {
ret = cma_join_ib_multicast(id_priv, mc);
- else
+ if (ret)
+ goto out_err;
+ } else {
ret = -ENOSYS;
-
- if (ret) {
- spin_lock_irq(&id_priv->lock);
- list_del(&mc->list);
- spin_unlock_irq(&id_priv->lock);
- kfree(mc);
+ goto out_err;
}
+
+ spin_lock(&id_priv->lock);
+ list_add(&mc->list, &id_priv->mc_list);
+ spin_unlock(&id_priv->lock);
+
+ return 0;
+out_err:
+ kfree(mc);
return ret;
}
EXPORT_SYMBOL(rdma_join_multicast);
@@ -4268,23 +4303,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
ib_sa_free_multicast(mc->multicast.ib);
kfree(mc);
} else if (rdma_protocol_roce(id->device, id->port_num)) {
- if (mc->igmp_joined) {
- struct rdma_dev_addr *dev_addr =
- &id->route.addr.dev_addr;
- struct net_device *ndev = NULL;
-
- if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(dev_addr->net,
- dev_addr->bound_dev_if);
- if (ndev) {
- cma_igmp_send(ndev,
- &mc->multicast.ib->rec.mgid,
- false);
- dev_put(ndev);
- }
- mc->igmp_joined = false;
- }
- kref_put(&mc->mcref, release_mc);
+ cma_leave_roce_mc_group(id_priv, mc);
}
return;
}
@@ -4410,7 +4429,7 @@ free_cma_dev:
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
{
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
enum rdma_cm_state state;
int ret = 0;
@@ -4426,7 +4445,6 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
goto out;
- memset(&event, 0, sizeof event);
event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
ret = id_priv->id.event_handler(&id_priv->id, &event);
out:
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index fae417a391fb..77c7005c396c 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -91,8 +91,8 @@ void ib_device_unregister_sysfs(struct ib_device *device);
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
struct net_device *idev, void *cookie);
-typedef int (*roce_netdev_filter)(struct ib_device *device, u8 port,
- struct net_device *idev, void *cookie);
+typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port,
+ struct net_device *idev, void *cookie);
void ib_enum_roce_netdev(struct ib_device *ib_dev,
roce_netdev_filter filter,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 6fa4c59dc7a7..db3b6271f09d 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -105,8 +105,6 @@ static int ib_device_check_mandatory(struct ib_device *device)
IB_MANDATORY_FUNC(query_pkey),
IB_MANDATORY_FUNC(alloc_pd),
IB_MANDATORY_FUNC(dealloc_pd),
- IB_MANDATORY_FUNC(create_ah),
- IB_MANDATORY_FUNC(destroy_ah),
IB_MANDATORY_FUNC(create_qp),
IB_MANDATORY_FUNC(modify_qp),
IB_MANDATORY_FUNC(destroy_qp),
@@ -862,25 +860,6 @@ int ib_query_port(struct ib_device *device,
EXPORT_SYMBOL(ib_query_port);
/**
- * ib_query_gid - Get GID table entry
- * @device:Device to query
- * @port_num:Port number to query
- * @index:GID table index to query
- * @gid:Returned GID
- * @attr: Returned GID attributes related to this GID index (only in RoCE).
- * NULL means ignore.
- *
- * ib_query_gid() fetches the specified GID table entry from the cache.
- */
-int ib_query_gid(struct ib_device *device,
- u8 port_num, int index, union ib_gid *gid,
- struct ib_gid_attr *attr)
-{
- return ib_get_cached_gid(device, port_num, index, gid, attr);
-}
-EXPORT_SYMBOL(ib_query_gid);
-
-/**
* ib_enum_roce_netdev - enumerate all RoCE ports
* @ib_dev : IB device we want to query
* @filter: Should we call the callback?
@@ -1057,7 +1036,7 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
continue;
for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
- ret = ib_query_gid(device, port, i, &tmp_gid, NULL);
+ ret = rdma_query_gid(device, port, i, &tmp_gid);
if (ret)
return ret;
if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index f742ae7a768b..ef459f2f2eeb 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -38,6 +38,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dma-mapping.h>
+#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/security.h>
@@ -58,8 +59,13 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
+/*
+ * The mlx4 driver uses the top byte to distinguish which virtual function
+ * generated the MAD, so we must avoid using it.
+ */
+#define AGENT_ID_LIMIT (1 << 24)
+static DEFINE_IDR(ib_mad_clients);
static struct list_head ib_mad_port_list;
-static atomic_t ib_mad_client_id = ATOMIC_INIT(0);
/* Port list lock */
static DEFINE_SPINLOCK(ib_mad_port_list_lock);
@@ -190,6 +196,8 @@ EXPORT_SYMBOL(ib_response_mad);
/*
* ib_register_mad_agent - Register to send/receive MADs
+ *
+ * Context: Process context.
*/
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
u8 port_num,
@@ -210,7 +218,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
struct ib_mad_mgmt_vendor_class *vendor_class;
struct ib_mad_mgmt_method_table *method;
int ret2, qpn;
- unsigned long flags;
u8 mgmt_class, vclass;
/* Validate parameters */
@@ -376,13 +383,24 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
goto error4;
}
- spin_lock_irqsave(&port_priv->reg_lock, flags);
- mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id);
+ idr_preload(GFP_KERNEL);
+ idr_lock(&ib_mad_clients);
+ ret2 = idr_alloc_cyclic(&ib_mad_clients, mad_agent_priv, 0,
+ AGENT_ID_LIMIT, GFP_ATOMIC);
+ idr_unlock(&ib_mad_clients);
+ idr_preload_end();
+
+ if (ret2 < 0) {
+ ret = ERR_PTR(ret2);
+ goto error5;
+ }
+ mad_agent_priv->agent.hi_tid = ret2;
/*
* Make sure MAD registration (if supplied)
* is non overlapping with any existing ones
*/
+ spin_lock_irq(&port_priv->reg_lock);
if (mad_reg_req) {
mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
if (!is_vendor_class(mgmt_class)) {
@@ -393,7 +411,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
if (method) {
if (method_in_use(&method,
mad_reg_req))
- goto error5;
+ goto error6;
}
}
ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
@@ -409,24 +427,25 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
if (is_vendor_method_in_use(
vendor_class,
mad_reg_req))
- goto error5;
+ goto error6;
}
}
ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
}
if (ret2) {
ret = ERR_PTR(ret2);
- goto error5;
+ goto error6;
}
}
-
- /* Add mad agent into port's agent list */
- list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
+ spin_unlock_irq(&port_priv->reg_lock);
return &mad_agent_priv->agent;
+error6:
+ spin_unlock_irq(&port_priv->reg_lock);
+ idr_lock(&ib_mad_clients);
+ idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
+ idr_unlock(&ib_mad_clients);
error5:
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
error4:
kfree(reg_req);
@@ -575,7 +594,6 @@ static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv
static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
struct ib_mad_port_private *port_priv;
- unsigned long flags;
/* Note that we could still be handling received MADs */
@@ -587,10 +605,12 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
port_priv = mad_agent_priv->qp_info->port_priv;
cancel_delayed_work(&mad_agent_priv->timed_work);
- spin_lock_irqsave(&port_priv->reg_lock, flags);
+ spin_lock_irq(&port_priv->reg_lock);
remove_mad_reg_req(mad_agent_priv);
- list_del(&mad_agent_priv->agent_list);
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
+ spin_unlock_irq(&port_priv->reg_lock);
+ idr_lock(&ib_mad_clients);
+ idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
+ idr_unlock(&ib_mad_clients);
flush_workqueue(port_priv->wq);
ib_cancel_rmpp_recvs(mad_agent_priv);
@@ -601,7 +621,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
kfree(mad_agent_priv->reg_req);
- kfree(mad_agent_priv);
+ kfree_rcu(mad_agent_priv, rcu);
}
static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
@@ -625,6 +645,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
/*
* ib_unregister_mad_agent - Unregisters a client from using MAD services
+ *
+ * Context: Process context.
*/
void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
{
@@ -1159,7 +1181,6 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_mad_qp_info *qp_info;
struct list_head *list;
- struct ib_send_wr *bad_send_wr;
struct ib_mad_agent *mad_agent;
struct ib_sge *sge;
unsigned long flags;
@@ -1197,7 +1218,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
- &bad_send_wr);
+ NULL);
list = &qp_info->send_queue.list;
} else {
ret = 0;
@@ -1720,22 +1741,19 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
struct ib_mad_agent_private *mad_agent = NULL;
unsigned long flags;
- spin_lock_irqsave(&port_priv->reg_lock, flags);
if (ib_response_mad(mad_hdr)) {
u32 hi_tid;
- struct ib_mad_agent_private *entry;
/*
* Routing is based on high 32 bits of transaction ID
* of MAD.
*/
hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
- list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
- if (entry->agent.hi_tid == hi_tid) {
- mad_agent = entry;
- break;
- }
- }
+ rcu_read_lock();
+ mad_agent = idr_find(&ib_mad_clients, hi_tid);
+ if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount))
+ mad_agent = NULL;
+ rcu_read_unlock();
} else {
struct ib_mad_mgmt_class_table *class;
struct ib_mad_mgmt_method_table *method;
@@ -1744,6 +1762,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
const struct ib_vendor_mad *vendor_mad;
int index;
+ spin_lock_irqsave(&port_priv->reg_lock, flags);
/*
* Routing is based on version, class, and method
* For "newer" vendor MADs, also based on OUI
@@ -1783,20 +1802,19 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
~IB_MGMT_METHOD_RESP];
}
}
+ if (mad_agent)
+ atomic_inc(&mad_agent->refcount);
+out:
+ spin_unlock_irqrestore(&port_priv->reg_lock, flags);
}
- if (mad_agent) {
- if (mad_agent->agent.recv_handler)
- atomic_inc(&mad_agent->refcount);
- else {
- dev_notice(&port_priv->device->dev,
- "No receive handler for client %p on port %d\n",
- &mad_agent->agent, port_priv->port_num);
- mad_agent = NULL;
- }
+ if (mad_agent && !mad_agent->agent.recv_handler) {
+ dev_notice(&port_priv->device->dev,
+ "No receive handler for client %p on port %d\n",
+ &mad_agent->agent, port_priv->port_num);
+ deref_mad_agent(mad_agent);
+ mad_agent = NULL;
}
-out:
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
return mad_agent;
}
@@ -1896,8 +1914,8 @@ static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_
const struct ib_global_route *grh =
rdma_ah_read_grh(&attr);
- if (ib_get_cached_gid(device, port_num,
- grh->sgid_index, &sgid, NULL))
+ if (rdma_query_gid(device, port_num,
+ grh->sgid_index, &sgid))
return 0;
return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
16);
@@ -2457,7 +2475,6 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
struct ib_mad_qp_info *qp_info;
struct ib_mad_queue *send_queue;
- struct ib_send_wr *bad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags;
int ret;
@@ -2507,7 +2524,7 @@ retry:
if (queued_send_wr) {
ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
- &bad_send_wr);
+ NULL);
if (ret) {
dev_err(&port_priv->device->dev,
"ib_post_send failed: %d\n", ret);
@@ -2552,11 +2569,9 @@ static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
if (wc->status == IB_WC_WR_FLUSH_ERR) {
if (mad_send_wr->retry) {
/* Repost send */
- struct ib_send_wr *bad_send_wr;
-
mad_send_wr->retry = 0;
ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
- &bad_send_wr);
+ NULL);
if (!ret)
return false;
}
@@ -2872,7 +2887,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
int post, ret;
struct ib_mad_private *mad_priv;
struct ib_sge sg_list;
- struct ib_recv_wr recv_wr, *bad_recv_wr;
+ struct ib_recv_wr recv_wr;
struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
/* Initialize common scatter list fields */
@@ -2916,7 +2931,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
post = (++recv_queue->count < recv_queue->max_active);
list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
spin_unlock_irqrestore(&recv_queue->lock, flags);
- ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
+ ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
if (ret) {
spin_lock_irqsave(&recv_queue->lock, flags);
list_del(&mad_priv->header.mad_list.list);
@@ -3159,7 +3174,6 @@ static int ib_mad_port_open(struct ib_device *device,
port_priv->device = device;
port_priv->port_num = port_num;
spin_lock_init(&port_priv->reg_lock);
- INIT_LIST_HEAD(&port_priv->agent_list);
init_mad_qp(port_priv, &port_priv->qp_info[0]);
init_mad_qp(port_priv, &port_priv->qp_info[1]);
@@ -3338,6 +3352,9 @@ int ib_mad_init(void)
INIT_LIST_HEAD(&ib_mad_port_list);
+ /* Client ID 0 is used for snoop-only clients */
+ idr_alloc(&ib_mad_clients, NULL, 0, 0, GFP_KERNEL);
+
if (ib_register_client(&mad_client)) {
pr_err("Couldn't register ib_mad client\n");
return -EINVAL;
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 28669f6419e1..d84ae1671898 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -89,7 +89,6 @@ struct ib_rmpp_segment {
};
struct ib_mad_agent_private {
- struct list_head agent_list;
struct ib_mad_agent agent;
struct ib_mad_reg_req *reg_req;
struct ib_mad_qp_info *qp_info;
@@ -105,7 +104,10 @@ struct ib_mad_agent_private {
struct list_head rmpp_list;
atomic_t refcount;
- struct completion comp;
+ union {
+ struct completion comp;
+ struct rcu_head rcu;
+ };
};
struct ib_mad_snoop_private {
@@ -203,7 +205,6 @@ struct ib_mad_port_private {
spinlock_t reg_lock;
struct ib_mad_mgmt_version_table version[MAX_MGMT_VERSION];
- struct list_head agent_list;
struct workqueue_struct *wq;
struct ib_mad_qp_info qp_info[IB_MAD_QPS_CORE];
};
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 6c48f4193dda..d50ff70bb24b 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -716,14 +716,28 @@ int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
}
EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
+/**
+ * ib_init_ah_from_mcmember - Initialize AH attribute from multicast
+ * member record and gid of the device.
+ * @device: RDMA device
+ * @port_num: Port of the rdma device to consider
+ * @ndev: Optional netdevice, applicable only for RoCE
+ * @gid_type: GID type to consider
+ * @ah_attr: AH attribute to fillup on successful completion
+ *
+ * ib_init_ah_from_mcmember() initializes AH attribute based on multicast
+ * member record and other device properties. On success the caller is
+ * responsible to call rdma_destroy_ah_attr on the ah_attr. Returns 0 on
+ * success or appropriate error code.
+ *
+ */
int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
struct ib_sa_mcmember_rec *rec,
struct net_device *ndev,
enum ib_gid_type gid_type,
struct rdma_ah_attr *ah_attr)
{
- int ret;
- u16 gid_index;
+ const struct ib_gid_attr *sgid_attr;
/* GID table is not based on the netdevice for IB link layer,
* so ignore ndev during search.
@@ -733,26 +747,22 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
else if (!rdma_protocol_roce(device, port_num))
return -EINVAL;
- ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
- gid_type, port_num,
- ndev,
- &gid_index);
- if (ret)
- return ret;
+ sgid_attr = rdma_find_gid_by_port(device, &rec->port_gid,
+ gid_type, port_num, ndev);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
- memset(ah_attr, 0, sizeof *ah_attr);
+ memset(ah_attr, 0, sizeof(*ah_attr));
ah_attr->type = rdma_ah_find_type(device, port_num);
rdma_ah_set_dlid(ah_attr, be16_to_cpu(rec->mlid));
rdma_ah_set_sl(ah_attr, rec->sl);
rdma_ah_set_port_num(ah_attr, port_num);
rdma_ah_set_static_rate(ah_attr, rec->rate);
-
- rdma_ah_set_grh(ah_attr, &rec->mgid,
- be32_to_cpu(rec->flow_label),
- (u8)gid_index,
- rec->hop_limit,
- rec->traffic_class);
+ rdma_move_grh_sgid_attr(ah_attr, &rec->mgid,
+ be32_to_cpu(rec->flow_label),
+ rec->hop_limit, rec->traffic_class,
+ sgid_attr);
return 0;
}
EXPORT_SYMBOL(ib_init_ah_from_mcmember);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 340c7bea45ab..0385ab438320 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -237,15 +237,15 @@ static int fill_port_info(struct sk_buff *msg,
if (ret)
return ret;
- BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
- if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
- (u64)attr.port_cap_flags, RDMA_NLDEV_ATTR_PAD))
- return -EMSGSIZE;
- if (rdma_protocol_ib(device, port) &&
- nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
- attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
- return -EMSGSIZE;
if (rdma_protocol_ib(device, port)) {
+ BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
+ (u64)attr.port_cap_flags,
+ RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
+ attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
return -EMSGSIZE;
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index a6e904973ba8..6eb64c6f0802 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -32,6 +32,7 @@
#include <linux/file.h>
#include <linux/anon_inodes.h>
+#include <linux/sched/mm.h>
#include <rdma/ib_verbs.h>
#include <rdma/uverbs_types.h>
#include <linux/rcupdate.h>
@@ -41,51 +42,6 @@
#include "core_priv.h"
#include "rdma_core.h"
-int uverbs_ns_idx(u16 *id, unsigned int ns_count)
-{
- int ret = (*id & UVERBS_ID_NS_MASK) >> UVERBS_ID_NS_SHIFT;
-
- if (ret >= ns_count)
- return -EINVAL;
-
- *id &= ~UVERBS_ID_NS_MASK;
- return ret;
-}
-
-const struct uverbs_object_spec *uverbs_get_object(const struct ib_device *ibdev,
- uint16_t object)
-{
- const struct uverbs_root_spec *object_hash = ibdev->specs_root;
- const struct uverbs_object_spec_hash *objects;
- int ret = uverbs_ns_idx(&object, object_hash->num_buckets);
-
- if (ret < 0)
- return NULL;
-
- objects = object_hash->object_buckets[ret];
-
- if (object >= objects->num_objects)
- return NULL;
-
- return objects->objects[object];
-}
-
-const struct uverbs_method_spec *uverbs_get_method(const struct uverbs_object_spec *object,
- uint16_t method)
-{
- const struct uverbs_method_spec_hash *methods;
- int ret = uverbs_ns_idx(&method, object->num_buckets);
-
- if (ret < 0)
- return NULL;
-
- methods = object->method_buckets[ret];
- if (method >= methods->num_methods)
- return NULL;
-
- return methods->methods[method];
-}
-
void uverbs_uobject_get(struct ib_uobject *uobject)
{
kref_get(&uobject->ref);
@@ -96,7 +52,7 @@ static void uverbs_uobject_free(struct kref *ref)
struct ib_uobject *uobj =
container_of(ref, struct ib_uobject, ref);
- if (uobj->type->type_class->needs_kfree_rcu)
+ if (uobj->uapi_object->type_class->needs_kfree_rcu)
kfree_rcu(uobj, rcu);
else
kfree(uobj);
@@ -107,7 +63,8 @@ void uverbs_uobject_put(struct ib_uobject *uobject)
kref_put(&uobject->ref, uverbs_uobject_free);
}
-static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
+static int uverbs_try_lock_object(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
/*
* When a shared access is required, we use a positive counter. Each
@@ -120,27 +77,211 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
* concurrently, setting the counter to zero is enough for releasing
* this lock.
*/
- if (!exclusive)
- return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
+ switch (mode) {
+ case UVERBS_LOOKUP_READ:
+ return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
-EBUSY : 0;
+ case UVERBS_LOOKUP_WRITE:
+ /* lock is exclusive */
+ return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
+ case UVERBS_LOOKUP_DESTROY:
+ return 0;
+ }
+ return 0;
+}
+
+static void assert_uverbs_usecnt(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
+{
+#ifdef CONFIG_LOCKDEP
+ switch (mode) {
+ case UVERBS_LOOKUP_READ:
+ WARN_ON(atomic_read(&uobj->usecnt) <= 0);
+ break;
+ case UVERBS_LOOKUP_WRITE:
+ WARN_ON(atomic_read(&uobj->usecnt) != -1);
+ break;
+ case UVERBS_LOOKUP_DESTROY:
+ break;
+ }
+#endif
+}
+
+/*
+ * This must be called with the hw_destroy_rwsem locked for read or write,
+ * also the uobject itself must be locked for write.
+ *
+ * Upon return the HW object is guaranteed to be destroyed.
+ *
+ * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held,
+ * however the type's allocat_commit function cannot have been called and the
+ * uobject cannot be on the uobjects_lists
+ *
+ * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via
+ * rdma_lookup_get_uobject) and the object is left in a state where the caller
+ * needs to call rdma_lookup_put_uobject.
+ *
+ * For all other destroy modes this function internally unlocks the uobject
+ * and consumes the kref on the uobj.
+ */
+static int uverbs_destroy_uobject(struct ib_uobject *uobj,
+ enum rdma_remove_reason reason)
+{
+ struct ib_uverbs_file *ufile = uobj->ufile;
+ unsigned long flags;
+ int ret;
+
+ lockdep_assert_held(&ufile->hw_destroy_rwsem);
+ assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
+
+ if (uobj->object) {
+ ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason);
+ if (ret) {
+ if (ib_is_destroy_retryable(ret, reason, uobj))
+ return ret;
+
+ /* Nothing to be done, dangle the memory and move on */
+ WARN(true,
+ "ib_uverbs: failed to remove uobject id %d, driver err=%d",
+ uobj->id, ret);
+ }
+
+ uobj->object = NULL;
+ }
- /* lock is either WRITE or DESTROY - should be exclusive */
- return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
+ if (reason == RDMA_REMOVE_ABORT) {
+ WARN_ON(!list_empty(&uobj->list));
+ WARN_ON(!uobj->context);
+ uobj->uapi_object->type_class->alloc_abort(uobj);
+ }
+
+ uobj->context = NULL;
+
+ /*
+ * For DESTROY the usecnt is held write locked, the caller is expected
+ * to put it unlock and put the object when done with it. Only DESTROY
+ * can remove the IDR handle.
+ */
+ if (reason != RDMA_REMOVE_DESTROY)
+ atomic_set(&uobj->usecnt, 0);
+ else
+ uobj->uapi_object->type_class->remove_handle(uobj);
+
+ if (!list_empty(&uobj->list)) {
+ spin_lock_irqsave(&ufile->uobjects_lock, flags);
+ list_del_init(&uobj->list);
+ spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
+
+ /*
+ * Pairs with the get in rdma_alloc_commit_uobject(), could
+ * destroy uobj.
+ */
+ uverbs_uobject_put(uobj);
+ }
+
+ /*
+ * When aborting the stack kref remains owned by the core code, and is
+ * not transferred into the type. Pairs with the get in alloc_uobj
+ */
+ if (reason == RDMA_REMOVE_ABORT)
+ uverbs_uobject_put(uobj);
+
+ return 0;
}
-static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
- const struct uverbs_obj_type *type)
+/*
+ * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
+ * sequence. It should only be used from command callbacks. On success the
+ * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
+ * version requires the caller to have already obtained an
+ * LOOKUP_DESTROY uobject kref.
+ */
+int uobj_destroy(struct ib_uobject *uobj)
{
- struct ib_uobject *uobj = kzalloc(type->obj_size, GFP_KERNEL);
+ struct ib_uverbs_file *ufile = uobj->ufile;
+ int ret;
+
+ down_read(&ufile->hw_destroy_rwsem);
+
+ ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
+ if (ret)
+ goto out_unlock;
+
+ ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY);
+ if (ret) {
+ atomic_set(&uobj->usecnt, 0);
+ goto out_unlock;
+ }
+out_unlock:
+ up_read(&ufile->hw_destroy_rwsem);
+ return ret;
+}
+
+/*
+ * uobj_get_destroy destroys the HW object and returns a handle to the uobj
+ * with a NULL object pointer. The caller must pair this with
+ * uverbs_put_destroy.
+ */
+struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
+ u32 id, struct ib_uverbs_file *ufile)
+{
+ struct ib_uobject *uobj;
+ int ret;
+
+ uobj = rdma_lookup_get_uobject(obj, ufile, id, UVERBS_LOOKUP_DESTROY);
+ if (IS_ERR(uobj))
+ return uobj;
+
+ ret = uobj_destroy(uobj);
+ if (ret) {
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
+ return ERR_PTR(ret);
+ }
+
+ return uobj;
+}
+
+/*
+ * Does both uobj_get_destroy() and uobj_put_destroy(). Returns success_res
+ * on success (negative errno on failure). For use by callers that do not need
+ * the uobj.
+ */
+int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
+ struct ib_uverbs_file *ufile, int success_res)
+{
+ struct ib_uobject *uobj;
+
+ uobj = __uobj_get_destroy(obj, id, ufile);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
+
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+ return success_res;
+}
+
+/* alloc_uobj must be undone by uverbs_destroy_uobject() */
+static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
+ const struct uverbs_api_object *obj)
+{
+ struct ib_uobject *uobj;
+ struct ib_ucontext *ucontext;
+
+ ucontext = ib_uverbs_get_ucontext(ufile);
+ if (IS_ERR(ucontext))
+ return ERR_CAST(ucontext);
+
+ uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL);
if (!uobj)
return ERR_PTR(-ENOMEM);
/*
* user_handle should be filled by the handler,
* The object is added to the list in the commit stage.
*/
- uobj->context = context;
- uobj->type = type;
+ uobj->ufile = ufile;
+ uobj->context = ucontext;
+ INIT_LIST_HEAD(&uobj->list);
+ uobj->uapi_object = obj;
/*
* Allocated objects start out as write locked to deny any other
* syscalls from accessing them until they are committed. See
@@ -157,45 +298,39 @@ static int idr_add_uobj(struct ib_uobject *uobj)
int ret;
idr_preload(GFP_KERNEL);
- spin_lock(&uobj->context->ufile->idr_lock);
+ spin_lock(&uobj->ufile->idr_lock);
/*
* We start with allocating an idr pointing to NULL. This represents an
* object which isn't initialized yet. We'll replace it later on with
* the real object once we commit.
*/
- ret = idr_alloc(&uobj->context->ufile->idr, NULL, 0,
+ ret = idr_alloc(&uobj->ufile->idr, NULL, 0,
min_t(unsigned long, U32_MAX - 1, INT_MAX), GFP_NOWAIT);
if (ret >= 0)
uobj->id = ret;
- spin_unlock(&uobj->context->ufile->idr_lock);
+ spin_unlock(&uobj->ufile->idr_lock);
idr_preload_end();
return ret < 0 ? ret : 0;
}
-/*
- * It only removes it from the uobjects list, uverbs_uobject_put() is still
- * required.
- */
-static void uverbs_idr_remove_uobj(struct ib_uobject *uobj)
-{
- spin_lock(&uobj->context->ufile->idr_lock);
- idr_remove(&uobj->context->ufile->idr, uobj->id);
- spin_unlock(&uobj->context->ufile->idr_lock);
-}
-
/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
-static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext,
- int id, bool exclusive)
+static struct ib_uobject *
+lookup_get_idr_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode)
{
struct ib_uobject *uobj;
+ unsigned long idrno = id;
+
+ if (id < 0 || id > ULONG_MAX)
+ return ERR_PTR(-EINVAL);
rcu_read_lock();
/* object won't be released as we're protected in rcu */
- uobj = idr_find(&ucontext->ufile->idr, id);
+ uobj = idr_find(&ufile->idr, idrno);
if (!uobj) {
uobj = ERR_PTR(-ENOENT);
goto free;
@@ -215,19 +350,28 @@ free:
return uobj;
}
-static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext,
- int id, bool exclusive)
+static struct ib_uobject *
+lookup_get_fd_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode)
{
+ const struct uverbs_obj_fd_type *fd_type;
struct file *f;
struct ib_uobject *uobject;
- const struct uverbs_obj_fd_type *fd_type =
- container_of(type, struct uverbs_obj_fd_type, type);
+ int fdno = id;
- if (exclusive)
+ if (fdno != id)
+ return ERR_PTR(-EINVAL);
+
+ if (mode != UVERBS_LOOKUP_READ)
return ERR_PTR(-EOPNOTSUPP);
- f = fget(id);
+ if (!obj->type_attrs)
+ return ERR_PTR(-EIO);
+ fd_type =
+ container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
+
+ f = fget(fdno);
if (!f)
return ERR_PTR(-EBADF);
@@ -246,43 +390,55 @@ static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *ty
return uobject;
}
-struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext,
- int id, bool exclusive)
+struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode)
{
struct ib_uobject *uobj;
int ret;
- uobj = type->type_class->lookup_get(type, ucontext, id, exclusive);
+ if (!obj)
+ return ERR_PTR(-EINVAL);
+
+ uobj = obj->type_class->lookup_get(obj, ufile, id, mode);
if (IS_ERR(uobj))
return uobj;
- if (uobj->type != type) {
+ if (uobj->uapi_object != obj) {
ret = -EINVAL;
goto free;
}
- ret = uverbs_try_lock_object(uobj, exclusive);
- if (ret) {
- WARN(ucontext->cleanup_reason,
- "ib_uverbs: Trying to lookup_get while cleanup context\n");
+ /*
+ * If we have been disassociated block every command except for
+ * DESTROY based commands.
+ */
+ if (mode != UVERBS_LOOKUP_DESTROY &&
+ !srcu_dereference(ufile->device->ib_dev,
+ &ufile->device->disassociate_srcu)) {
+ ret = -EIO;
goto free;
}
+ ret = uverbs_try_lock_object(uobj, mode);
+ if (ret)
+ goto free;
+
return uobj;
free:
- uobj->type->type_class->lookup_put(uobj, exclusive);
+ obj->type_class->lookup_put(uobj, mode);
uverbs_uobject_put(uobj);
return ERR_PTR(ret);
}
-static struct ib_uobject *alloc_begin_idr_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext)
+static struct ib_uobject *
+alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile)
{
int ret;
struct ib_uobject *uobj;
- uobj = alloc_uobj(ucontext, type);
+ uobj = alloc_uobj(ufile, obj);
if (IS_ERR(uobj))
return uobj;
@@ -290,7 +446,7 @@ static struct ib_uobject *alloc_begin_idr_uobject(const struct uverbs_obj_type *
if (ret)
goto uobj_put;
- ret = ib_rdmacg_try_charge(&uobj->cg_obj, ucontext->device,
+ ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device,
RDMACG_RESOURCE_HCA_OBJECT);
if (ret)
goto idr_remove;
@@ -298,304 +454,305 @@ static struct ib_uobject *alloc_begin_idr_uobject(const struct uverbs_obj_type *
return uobj;
idr_remove:
- uverbs_idr_remove_uobj(uobj);
+ spin_lock(&ufile->idr_lock);
+ idr_remove(&ufile->idr, uobj->id);
+ spin_unlock(&ufile->idr_lock);
uobj_put:
uverbs_uobject_put(uobj);
return ERR_PTR(ret);
}
-static struct ib_uobject *alloc_begin_fd_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext)
+static struct ib_uobject *
+alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile)
{
- const struct uverbs_obj_fd_type *fd_type =
- container_of(type, struct uverbs_obj_fd_type, type);
int new_fd;
struct ib_uobject *uobj;
- struct ib_uobject_file *uobj_file;
- struct file *filp;
new_fd = get_unused_fd_flags(O_CLOEXEC);
if (new_fd < 0)
return ERR_PTR(new_fd);
- uobj = alloc_uobj(ucontext, type);
+ uobj = alloc_uobj(ufile, obj);
if (IS_ERR(uobj)) {
put_unused_fd(new_fd);
return uobj;
}
- uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
- filp = anon_inode_getfile(fd_type->name,
- fd_type->fops,
- uobj_file,
- fd_type->flags);
- if (IS_ERR(filp)) {
- put_unused_fd(new_fd);
- uverbs_uobject_put(uobj);
- return (void *)filp;
- }
-
- uobj_file->uobj.id = new_fd;
- uobj_file->uobj.object = filp;
- uobj_file->ufile = ucontext->ufile;
- INIT_LIST_HEAD(&uobj->list);
- kref_get(&uobj_file->ufile->ref);
+ uobj->id = new_fd;
+ uobj->ufile = ufile;
return uobj;
}
-struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext)
+struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile)
{
- return type->type_class->alloc_begin(type, ucontext);
-}
+ struct ib_uobject *ret;
-static int __must_check remove_commit_idr_uobject(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
-{
- const struct uverbs_obj_idr_type *idr_type =
- container_of(uobj->type, struct uverbs_obj_idr_type,
- type);
- int ret = idr_type->destroy_object(uobj, why);
+ if (!obj)
+ return ERR_PTR(-EINVAL);
/*
- * We can only fail gracefully if the user requested to destroy the
- * object. In the rest of the cases, just remove whatever you can.
+ * The hw_destroy_rwsem is held across the entire object creation and
+ * released during rdma_alloc_commit_uobject or
+ * rdma_alloc_abort_uobject
*/
- if (why == RDMA_REMOVE_DESTROY && ret)
- return ret;
-
- ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
- RDMACG_RESOURCE_HCA_OBJECT);
- uverbs_idr_remove_uobj(uobj);
+ if (!down_read_trylock(&ufile->hw_destroy_rwsem))
+ return ERR_PTR(-EIO);
+ ret = obj->type_class->alloc_begin(obj, ufile);
+ if (IS_ERR(ret)) {
+ up_read(&ufile->hw_destroy_rwsem);
+ return ret;
+ }
return ret;
}
-static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
+static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
{
- struct ib_uobject_file *uobj_file =
- container_of(uobj, struct ib_uobject_file, uobj);
- struct file *filp = uobj->object;
- int id = uobj_file->uobj.id;
+ ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
+ RDMACG_RESOURCE_HCA_OBJECT);
- /* Unsuccessful NEW */
- fput(filp);
- put_unused_fd(id);
+ spin_lock(&uobj->ufile->idr_lock);
+ idr_remove(&uobj->ufile->idr, uobj->id);
+ spin_unlock(&uobj->ufile->idr_lock);
}
-static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
{
- const struct uverbs_obj_fd_type *fd_type =
- container_of(uobj->type, struct uverbs_obj_fd_type, type);
- struct ib_uobject_file *uobj_file =
- container_of(uobj, struct ib_uobject_file, uobj);
- int ret = fd_type->context_closed(uobj_file, why);
+ const struct uverbs_obj_idr_type *idr_type =
+ container_of(uobj->uapi_object->type_attrs,
+ struct uverbs_obj_idr_type, type);
+ int ret = idr_type->destroy_object(uobj, why);
- if (why == RDMA_REMOVE_DESTROY && ret)
+ /*
+ * We can only fail gracefully if the user requested to destroy the
+ * object or when a retry may be called upon an error.
+ * In the rest of the cases, just remove whatever you can.
+ */
+ if (ib_is_destroy_retryable(ret, why, uobj))
return ret;
- if (why == RDMA_REMOVE_DURING_CLEANUP) {
- alloc_abort_fd_uobject(uobj);
- return ret;
- }
+ if (why == RDMA_REMOVE_ABORT)
+ return 0;
- uobj_file->uobj.context = NULL;
- return ret;
+ ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
+ RDMACG_RESOURCE_HCA_OBJECT);
+
+ return 0;
}
-static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
+static void remove_handle_idr_uobject(struct ib_uobject *uobj)
{
-#ifdef CONFIG_LOCKDEP
- if (exclusive)
- WARN_ON(atomic_read(&uobj->usecnt) != -1);
- else
- WARN_ON(atomic_read(&uobj->usecnt) <= 0);
-#endif
+ spin_lock(&uobj->ufile->idr_lock);
+ idr_remove(&uobj->ufile->idr, uobj->id);
+ spin_unlock(&uobj->ufile->idr_lock);
+ /* Matches the kref in alloc_commit_idr_uobject */
+ uverbs_uobject_put(uobj);
}
-static int __must_check _rdma_remove_commit_uobject(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
{
- int ret;
- struct ib_ucontext *ucontext = uobj->context;
-
- ret = uobj->type->type_class->remove_commit(uobj, why);
- if (ret && why == RDMA_REMOVE_DESTROY) {
- /* We couldn't remove the object, so just unlock the uobject */
- atomic_set(&uobj->usecnt, 0);
- uobj->type->type_class->lookup_put(uobj, true);
- } else {
- mutex_lock(&ucontext->uobjects_lock);
- list_del(&uobj->list);
- mutex_unlock(&ucontext->uobjects_lock);
- /* put the ref we took when we created the object */
- uverbs_uobject_put(uobj);
- }
-
- return ret;
+ put_unused_fd(uobj->id);
}
-/* This is called only for user requested DESTROY reasons */
-int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
+static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
{
- int ret;
- struct ib_ucontext *ucontext = uobj->context;
-
- /* put the ref count we took at lookup_get */
- uverbs_uobject_put(uobj);
- /* Cleanup is running. Calling this should have been impossible */
- if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
- WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
- return 0;
- }
- assert_uverbs_usecnt(uobj, true);
- ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
+ const struct uverbs_obj_fd_type *fd_type = container_of(
+ uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
+ int ret = fd_type->context_closed(uobj, why);
- up_read(&ucontext->cleanup_rwsem);
- return ret;
-}
+ if (ib_is_destroy_retryable(ret, why, uobj))
+ return ret;
-static int null_obj_type_class_remove_commit(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
-{
return 0;
}
-static const struct uverbs_obj_type null_obj_type = {
- .type_class = &((const struct uverbs_obj_type_class){
- .remove_commit = null_obj_type_class_remove_commit,
- /* be cautious */
- .needs_kfree_rcu = true}),
-};
-
-int rdma_explicit_destroy(struct ib_uobject *uobject)
+static void remove_handle_fd_uobject(struct ib_uobject *uobj)
{
- int ret;
- struct ib_ucontext *ucontext = uobject->context;
-
- /* Cleanup is running. Calling this should have been impossible */
- if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
- WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
- return 0;
- }
- assert_uverbs_usecnt(uobject, true);
- ret = uobject->type->type_class->remove_commit(uobject,
- RDMA_REMOVE_DESTROY);
- if (ret)
- goto out;
-
- uobject->type = &null_obj_type;
-
-out:
- up_read(&ucontext->cleanup_rwsem);
- return ret;
}
-static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
+static int alloc_commit_idr_uobject(struct ib_uobject *uobj)
{
- spin_lock(&uobj->context->ufile->idr_lock);
+ struct ib_uverbs_file *ufile = uobj->ufile;
+
+ spin_lock(&ufile->idr_lock);
/*
* We already allocated this IDR with a NULL object, so
* this shouldn't fail.
+ *
+ * NOTE: Once we set the IDR we loose ownership of our kref on uobj.
+ * It will be put by remove_commit_idr_uobject()
*/
- WARN_ON(idr_replace(&uobj->context->ufile->idr,
- uobj, uobj->id));
- spin_unlock(&uobj->context->ufile->idr_lock);
+ WARN_ON(idr_replace(&ufile->idr, uobj, uobj->id));
+ spin_unlock(&ufile->idr_lock);
+
+ return 0;
}
-static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
+static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
{
- struct ib_uobject_file *uobj_file =
- container_of(uobj, struct ib_uobject_file, uobj);
+ const struct uverbs_obj_fd_type *fd_type = container_of(
+ uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
+ int fd = uobj->id;
+ struct file *filp;
+
+ /*
+ * The kref for uobj is moved into filp->private data and put in
+ * uverbs_close_fd(). Once alloc_commit() succeeds uverbs_close_fd()
+ * must be guaranteed to be called from the provided fops release
+ * callback.
+ */
+ filp = anon_inode_getfile(fd_type->name,
+ fd_type->fops,
+ uobj,
+ fd_type->flags);
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
+
+ uobj->object = filp;
+
+ /* Matching put will be done in uverbs_close_fd() */
+ kref_get(&uobj->ufile->ref);
- fd_install(uobj_file->uobj.id, uobj->object);
/* This shouldn't be used anymore. Use the file object instead */
- uobj_file->uobj.id = 0;
- /* Get another reference as we export this to the fops */
- uverbs_uobject_get(&uobj_file->uobj);
+ uobj->id = 0;
+
+ /*
+ * NOTE: Once we install the file we loose ownership of our kref on
+ * uobj. It will be put by uverbs_close_fd()
+ */
+ fd_install(fd, filp);
+
+ return 0;
}
-int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
+/*
+ * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the
+ * caller can no longer assume uobj is valid. If this function fails it
+ * destroys the uboject, including the attached HW object.
+ */
+int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj)
{
- /* Cleanup is running. Calling this should have been impossible */
- if (!down_read_trylock(&uobj->context->cleanup_rwsem)) {
- int ret;
+ struct ib_uverbs_file *ufile = uobj->ufile;
+ int ret;
- WARN(true, "ib_uverbs: Cleanup is running while allocating an uobject\n");
- ret = uobj->type->type_class->remove_commit(uobj,
- RDMA_REMOVE_DURING_CLEANUP);
- if (ret)
- pr_warn("ib_uverbs: cleanup of idr object %d failed\n",
- uobj->id);
+ /* alloc_commit consumes the uobj kref */
+ ret = uobj->uapi_object->type_class->alloc_commit(uobj);
+ if (ret) {
+ uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
+ up_read(&ufile->hw_destroy_rwsem);
return ret;
}
+ /* kref is held so long as the uobj is on the uobj list. */
+ uverbs_uobject_get(uobj);
+ spin_lock_irq(&ufile->uobjects_lock);
+ list_add(&uobj->list, &ufile->uobjects);
+ spin_unlock_irq(&ufile->uobjects_lock);
+
/* matches atomic_set(-1) in alloc_uobj */
- assert_uverbs_usecnt(uobj, true);
atomic_set(&uobj->usecnt, 0);
- mutex_lock(&uobj->context->uobjects_lock);
- list_add(&uobj->list, &uobj->context->uobjects);
- mutex_unlock(&uobj->context->uobjects_lock);
-
- uobj->type->type_class->alloc_commit(uobj);
- up_read(&uobj->context->cleanup_rwsem);
+ /* Matches the down_read in rdma_alloc_begin_uobject */
+ up_read(&ufile->hw_destroy_rwsem);
return 0;
}
-static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
-{
- uverbs_idr_remove_uobj(uobj);
- ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
- RDMACG_RESOURCE_HCA_OBJECT);
- uverbs_uobject_put(uobj);
-}
-
+/*
+ * This consumes the kref for uobj. It is up to the caller to unwind the HW
+ * object and anything else connected to uobj before calling this.
+ */
void rdma_alloc_abort_uobject(struct ib_uobject *uobj)
{
- uobj->type->type_class->alloc_abort(uobj);
+ struct ib_uverbs_file *ufile = uobj->ufile;
+
+ uobj->object = NULL;
+ uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
+
+ /* Matches the down_read in rdma_alloc_begin_uobject */
+ up_read(&ufile->hw_destroy_rwsem);
}
-static void lookup_put_idr_uobject(struct ib_uobject *uobj, bool exclusive)
+static void lookup_put_idr_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
}
-static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
+static void lookup_put_fd_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
struct file *filp = uobj->object;
- WARN_ON(exclusive);
+ WARN_ON(mode != UVERBS_LOOKUP_READ);
/* This indirectly calls uverbs_close_fd and free the object */
fput(filp);
}
-void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
+void rdma_lookup_put_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
- assert_uverbs_usecnt(uobj, exclusive);
- uobj->type->type_class->lookup_put(uobj, exclusive);
+ assert_uverbs_usecnt(uobj, mode);
+ uobj->uapi_object->type_class->lookup_put(uobj, mode);
/*
* In order to unlock an object, either decrease its usecnt for
* read access or zero it in case of exclusive access. See
* uverbs_try_lock_object for locking schema information.
*/
- if (!exclusive)
+ switch (mode) {
+ case UVERBS_LOOKUP_READ:
atomic_dec(&uobj->usecnt);
- else
+ break;
+ case UVERBS_LOOKUP_WRITE:
atomic_set(&uobj->usecnt, 0);
+ break;
+ case UVERBS_LOOKUP_DESTROY:
+ break;
+ }
+ /* Pairs with the kref obtained by type->lookup_get */
uverbs_uobject_put(uobj);
}
+void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile)
+{
+ spin_lock_init(&ufile->idr_lock);
+ idr_init(&ufile->idr);
+}
+
+void release_ufile_idr_uobject(struct ib_uverbs_file *ufile)
+{
+ struct ib_uobject *entry;
+ int id;
+
+ /*
+ * At this point uverbs_cleanup_ufile() is guaranteed to have run, and
+ * there are no HW objects left, however the IDR is still populated
+ * with anything that has not been cleaned up by userspace. Since the
+ * kref on ufile is 0, nothing is allowed to call lookup_get.
+ *
+ * This is an optimized equivalent to remove_handle_idr_uobject
+ */
+ idr_for_each_entry(&ufile->idr, entry, id) {
+ WARN_ON(entry->object);
+ uverbs_uobject_put(entry);
+ }
+
+ idr_destroy(&ufile->idr);
+}
+
const struct uverbs_obj_type_class uverbs_idr_class = {
.alloc_begin = alloc_begin_idr_uobject,
.lookup_get = lookup_get_idr_uobject,
.alloc_commit = alloc_commit_idr_uobject,
.alloc_abort = alloc_abort_idr_uobject,
.lookup_put = lookup_put_idr_uobject,
- .remove_commit = remove_commit_idr_uobject,
+ .destroy_hw = destroy_hw_idr_uobject,
+ .remove_handle = remove_handle_idr_uobject,
/*
* When we destroy an object, we first just lock it for WRITE and
* actually DESTROY it in the finalize stage. So, the problematic
@@ -611,103 +768,180 @@ const struct uverbs_obj_type_class uverbs_idr_class = {
*/
.needs_kfree_rcu = true,
};
+EXPORT_SYMBOL(uverbs_idr_class);
-static void _uverbs_close_fd(struct ib_uobject_file *uobj_file)
+void uverbs_close_fd(struct file *f)
{
- struct ib_ucontext *ucontext;
- struct ib_uverbs_file *ufile = uobj_file->ufile;
- int ret;
+ struct ib_uobject *uobj = f->private_data;
+ struct ib_uverbs_file *ufile = uobj->ufile;
- mutex_lock(&uobj_file->ufile->cleanup_mutex);
+ if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
+ /*
+ * lookup_get_fd_uobject holds the kref on the struct file any
+ * time a FD uobj is locked, which prevents this release
+ * method from being invoked. Meaning we can always get the
+ * write lock here, or we have a kernel bug.
+ */
+ WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE));
+ uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE);
+ up_read(&ufile->hw_destroy_rwsem);
+ }
- /* uobject was either already cleaned up or is cleaned up right now anyway */
- if (!uobj_file->uobj.context ||
- !down_read_trylock(&uobj_file->uobj.context->cleanup_rwsem))
- goto unlock;
+ /* Matches the get in alloc_begin_fd_uobject */
+ kref_put(&ufile->ref, ib_uverbs_release_file);
- ucontext = uobj_file->uobj.context;
- ret = _rdma_remove_commit_uobject(&uobj_file->uobj, RDMA_REMOVE_CLOSE);
- up_read(&ucontext->cleanup_rwsem);
- if (ret)
- pr_warn("uverbs: unable to clean up uobject file in uverbs_close_fd.\n");
-unlock:
- mutex_unlock(&ufile->cleanup_mutex);
+ /* Pairs with filp->private_data in alloc_begin_fd_uobject */
+ uverbs_uobject_put(uobj);
}
-void uverbs_close_fd(struct file *f)
-{
- struct ib_uobject_file *uobj_file = f->private_data;
- struct kref *uverbs_file_ref = &uobj_file->ufile->ref;
+static void ufile_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct ib_device *ib_dev = ibcontext->device;
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+
+ owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
+ if (!owning_process)
+ return;
+
+ owning_mm = get_task_mm(owning_process);
+ if (!owning_mm) {
+ pr_info("no mm, disassociate ucontext is pending task termination\n");
+ while (1) {
+ put_task_struct(owning_process);
+ usleep_range(1000, 2000);
+ owning_process = get_pid_task(ibcontext->tgid,
+ PIDTYPE_PID);
+ if (!owning_process ||
+ owning_process->state == TASK_DEAD) {
+ pr_info("disassociate ucontext done, task was terminated\n");
+ /* in case task was dead need to release the
+ * task struct.
+ */
+ if (owning_process)
+ put_task_struct(owning_process);
+ return;
+ }
+ }
+ }
- _uverbs_close_fd(uobj_file);
- uverbs_uobject_put(&uobj_file->uobj);
- kref_put(uverbs_file_ref, ib_uverbs_release_file);
+ down_write(&owning_mm->mmap_sem);
+ ib_dev->disassociate_ucontext(ibcontext);
+ up_write(&owning_mm->mmap_sem);
+ mmput(owning_mm);
+ put_task_struct(owning_process);
}
-void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed)
+/*
+ * Drop the ucontext off the ufile and completely disconnect it from the
+ * ib_device
+ */
+static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
+ enum rdma_remove_reason reason)
{
- enum rdma_remove_reason reason = device_removed ?
- RDMA_REMOVE_DRIVER_REMOVE : RDMA_REMOVE_CLOSE;
- unsigned int cur_order = 0;
+ struct ib_ucontext *ucontext = ufile->ucontext;
+ int ret;
+
+ if (reason == RDMA_REMOVE_DRIVER_REMOVE)
+ ufile_disassociate_ucontext(ucontext);
+
+ put_pid(ucontext->tgid);
+ ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
+ RDMACG_RESOURCE_HCA_HANDLE);
- ucontext->cleanup_reason = reason;
/*
- * Waits for all remove_commit and alloc_commit to finish. Logically, We
- * want to hold this forever as the context is going to be destroyed,
- * but we'll release it since it causes a "held lock freed" BUG message.
+ * FIXME: Drivers are not permitted to fail dealloc_ucontext, remove
+ * the error return.
*/
- down_write(&ucontext->cleanup_rwsem);
+ ret = ucontext->device->dealloc_ucontext(ucontext);
+ WARN_ON(ret);
- while (!list_empty(&ucontext->uobjects)) {
- struct ib_uobject *obj, *next_obj;
- unsigned int next_order = UINT_MAX;
+ ufile->ucontext = NULL;
+}
+
+static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
+ enum rdma_remove_reason reason)
+{
+ struct ib_uobject *obj, *next_obj;
+ int ret = -EINVAL;
+ /*
+ * This shouldn't run while executing other commands on this
+ * context. Thus, the only thing we should take care of is
+ * releasing a FD while traversing this list. The FD could be
+ * closed and released from the _release fop of this FD.
+ * In order to mitigate this, we add a lock.
+ * We take and release the lock per traversal in order to let
+ * other threads (which might still use the FDs) chance to run.
+ */
+ list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) {
/*
- * This shouldn't run while executing other commands on this
- * context. Thus, the only thing we should take care of is
- * releasing a FD while traversing this list. The FD could be
- * closed and released from the _release fop of this FD.
- * In order to mitigate this, we add a lock.
- * We take and release the lock per order traversal in order
- * to let other threads (which might still use the FDs) chance
- * to run.
+ * if we hit this WARN_ON, that means we are
+ * racing with a lookup_get.
*/
- mutex_lock(&ucontext->uobjects_lock);
- list_for_each_entry_safe(obj, next_obj, &ucontext->uobjects,
- list) {
- if (obj->type->destroy_order == cur_order) {
- int ret;
-
- /*
- * if we hit this WARN_ON, that means we are
- * racing with a lookup_get.
- */
- WARN_ON(uverbs_try_lock_object(obj, true));
- ret = obj->type->type_class->remove_commit(obj,
- reason);
- list_del(&obj->list);
- if (ret)
- pr_warn("ib_uverbs: failed to remove uobject id %d order %u\n",
- obj->id, cur_order);
- /* put the ref we took when we created the object */
- uverbs_uobject_put(obj);
- } else {
- next_order = min(next_order,
- obj->type->destroy_order);
- }
- }
- mutex_unlock(&ucontext->uobjects_lock);
- cur_order = next_order;
+ WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
+ if (!uverbs_destroy_uobject(obj, reason))
+ ret = 0;
}
- up_write(&ucontext->cleanup_rwsem);
+ return ret;
}
-void uverbs_initialize_ucontext(struct ib_ucontext *ucontext)
+/*
+ * Destroy the uncontext and every uobject associated with it. If called with
+ * reason != RDMA_REMOVE_CLOSE this will not return until the destruction has
+ * been completed and ufile->ucontext is NULL.
+ *
+ * This is internally locked and can be called in parallel from multiple
+ * contexts.
+ */
+void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
+ enum rdma_remove_reason reason)
{
- ucontext->cleanup_reason = 0;
- mutex_init(&ucontext->uobjects_lock);
- INIT_LIST_HEAD(&ucontext->uobjects);
- init_rwsem(&ucontext->cleanup_rwsem);
+ if (reason == RDMA_REMOVE_CLOSE) {
+ /*
+ * During destruction we might trigger something that
+ * synchronously calls release on any file descriptor. For
+ * this reason all paths that come from file_operations
+ * release must use try_lock. They can progress knowing that
+ * there is an ongoing uverbs_destroy_ufile_hw that will clean
+ * up the driver resources.
+ */
+ if (!mutex_trylock(&ufile->ucontext_lock))
+ return;
+
+ } else {
+ mutex_lock(&ufile->ucontext_lock);
+ }
+
+ down_write(&ufile->hw_destroy_rwsem);
+
+ /*
+ * If a ucontext was never created then we can't have any uobjects to
+ * cleanup, nothing to do.
+ */
+ if (!ufile->ucontext)
+ goto done;
+
+ ufile->ucontext->closing = true;
+ ufile->ucontext->cleanup_retryable = true;
+ while (!list_empty(&ufile->uobjects))
+ if (__uverbs_cleanup_ufile(ufile, reason)) {
+ /*
+ * No entry was cleaned-up successfully during this
+ * iteration
+ */
+ break;
+ }
+
+ ufile->ucontext->cleanup_retryable = false;
+ if (!list_empty(&ufile->uobjects))
+ __uverbs_cleanup_ufile(ufile, reason);
+
+ ufile_destroy_ucontext(ufile, reason);
+
+done:
+ up_write(&ufile->hw_destroy_rwsem);
+ mutex_unlock(&ufile->ucontext_lock);
}
const struct uverbs_obj_type_class uverbs_fd_class = {
@@ -716,23 +950,33 @@ const struct uverbs_obj_type_class uverbs_fd_class = {
.alloc_commit = alloc_commit_fd_uobject,
.alloc_abort = alloc_abort_fd_uobject,
.lookup_put = lookup_put_fd_uobject,
- .remove_commit = remove_commit_fd_uobject,
+ .destroy_hw = destroy_hw_fd_uobject,
+ .remove_handle = remove_handle_fd_uobject,
.needs_kfree_rcu = false,
};
+EXPORT_SYMBOL(uverbs_fd_class);
-struct ib_uobject *uverbs_get_uobject_from_context(const struct uverbs_obj_type *type_attrs,
- struct ib_ucontext *ucontext,
- enum uverbs_obj_access access,
- int id)
+struct ib_uobject *
+uverbs_get_uobject_from_file(u16 object_id,
+ struct ib_uverbs_file *ufile,
+ enum uverbs_obj_access access, s64 id)
{
+ const struct uverbs_api_object *obj =
+ uapi_get_object(ufile->device->uapi, object_id);
+
switch (access) {
case UVERBS_ACCESS_READ:
- return rdma_lookup_get_uobject(type_attrs, ucontext, id, false);
+ return rdma_lookup_get_uobject(obj, ufile, id,
+ UVERBS_LOOKUP_READ);
case UVERBS_ACCESS_DESTROY:
+ /* Actual destruction is done inside uverbs_handle_method */
+ return rdma_lookup_get_uobject(obj, ufile, id,
+ UVERBS_LOOKUP_DESTROY);
case UVERBS_ACCESS_WRITE:
- return rdma_lookup_get_uobject(type_attrs, ucontext, id, true);
+ return rdma_lookup_get_uobject(obj, ufile, id,
+ UVERBS_LOOKUP_WRITE);
case UVERBS_ACCESS_NEW:
- return rdma_alloc_begin_uobject(type_attrs, ucontext);
+ return rdma_alloc_begin_uobject(obj, ufile);
default:
WARN_ON(true);
return ERR_PTR(-EOPNOTSUPP);
@@ -753,16 +997,14 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
switch (access) {
case UVERBS_ACCESS_READ:
- rdma_lookup_put_uobject(uobj, false);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ);
break;
case UVERBS_ACCESS_WRITE:
- rdma_lookup_put_uobject(uobj, true);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
break;
case UVERBS_ACCESS_DESTROY:
- if (commit)
- ret = rdma_remove_commit_uobject(uobj);
- else
- rdma_lookup_put_uobject(uobj, true);
+ if (uobj)
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
break;
case UVERBS_ACCESS_NEW:
if (commit)
@@ -777,43 +1019,3 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
return ret;
}
-
-int uverbs_finalize_objects(struct uverbs_attr_bundle *attrs_bundle,
- struct uverbs_attr_spec_hash * const *spec_hash,
- size_t num,
- bool commit)
-{
- unsigned int i;
- int ret = 0;
-
- for (i = 0; i < num; i++) {
- struct uverbs_attr_bundle_hash *curr_bundle =
- &attrs_bundle->hash[i];
- const struct uverbs_attr_spec_hash *curr_spec_bucket =
- spec_hash[i];
- unsigned int j;
-
- for (j = 0; j < curr_bundle->num_attrs; j++) {
- struct uverbs_attr *attr;
- const struct uverbs_attr_spec *spec;
-
- if (!uverbs_attr_is_valid_in_hash(curr_bundle, j))
- continue;
-
- attr = &curr_bundle->attrs[j];
- spec = &curr_spec_bucket->attrs[j];
-
- if (spec->type == UVERBS_ATTR_TYPE_IDR ||
- spec->type == UVERBS_ATTR_TYPE_FD) {
- int current_ret;
-
- current_ret = uverbs_finalize_object(attr->obj_attr.uobject,
- spec->obj.access,
- commit);
- if (!ret)
- ret = current_ret;
- }
- }
- }
- return ret;
-}
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index 1efcf93238dd..f962f2a593ba 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -43,20 +43,12 @@
#include <rdma/ib_verbs.h>
#include <linux/mutex.h>
-int uverbs_ns_idx(u16 *id, unsigned int ns_count);
-const struct uverbs_object_spec *uverbs_get_object(const struct ib_device *ibdev,
- uint16_t object);
-const struct uverbs_method_spec *uverbs_get_method(const struct uverbs_object_spec *object,
- uint16_t method);
-/*
- * These functions initialize the context and cleanups its uobjects.
- * The context has a list of objects which is protected by a mutex
- * on the context. initialize_ucontext should be called when we create
- * a context.
- * cleanup_ucontext removes all uobjects from the context and puts them.
- */
-void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed);
-void uverbs_initialize_ucontext(struct ib_ucontext *ucontext);
+struct ib_uverbs_device;
+
+void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
+ enum rdma_remove_reason reason);
+
+int uobj_destroy(struct ib_uobject *uobj);
/*
* uverbs_uobject_get is called in order to increase the reference count on
@@ -82,7 +74,7 @@ void uverbs_uobject_put(struct ib_uobject *uobject);
void uverbs_close_fd(struct file *f);
/*
- * Get an ib_uobject that corresponds to the given id from ucontext, assuming
+ * Get an ib_uobject that corresponds to the given id from ufile, assuming
* the object is from the given type. Lock it to the required access when
* applicable.
* This function could create (access == NEW), destroy (access == DESTROY)
@@ -90,13 +82,11 @@ void uverbs_close_fd(struct file *f);
* The action will be finalized only when uverbs_finalize_object or
* uverbs_finalize_objects are called.
*/
-struct ib_uobject *uverbs_get_uobject_from_context(const struct uverbs_obj_type *type_attrs,
- struct ib_ucontext *ucontext,
- enum uverbs_obj_access access,
- int id);
-int uverbs_finalize_object(struct ib_uobject *uobj,
- enum uverbs_obj_access access,
- bool commit);
+struct ib_uobject *
+uverbs_get_uobject_from_file(u16 object_id,
+ struct ib_uverbs_file *ufile,
+ enum uverbs_obj_access access, s64 id);
+
/*
* Note that certain finalize stages could return a status:
* (a) alloc_commit could return a failure if the object is committed at the
@@ -112,9 +102,63 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
* function. For example, this could happen when we couldn't destroy an
* object.
*/
-int uverbs_finalize_objects(struct uverbs_attr_bundle *attrs_bundle,
- struct uverbs_attr_spec_hash * const *spec_hash,
- size_t num,
- bool commit);
+int uverbs_finalize_object(struct ib_uobject *uobj,
+ enum uverbs_obj_access access,
+ bool commit);
+
+void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
+void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
+
+/*
+ * This is the runtime description of the uverbs API, used by the syscall
+ * machinery to validate and dispatch calls.
+ */
+
+/*
+ * Depending on ID the slot pointer in the radix tree points at one of these
+ * structs.
+ */
+struct uverbs_api_object {
+ const struct uverbs_obj_type *type_attrs;
+ const struct uverbs_obj_type_class *type_class;
+};
+
+struct uverbs_api_ioctl_method {
+ int (__rcu *handler)(struct ib_uverbs_file *ufile,
+ struct uverbs_attr_bundle *ctx);
+ DECLARE_BITMAP(attr_mandatory, UVERBS_API_ATTR_BKEY_LEN);
+ u16 bundle_size;
+ u8 use_stack:1;
+ u8 driver_method:1;
+ u8 key_bitmap_len;
+ u8 destroy_bkey;
+};
+
+struct uverbs_api_attr {
+ struct uverbs_attr_spec spec;
+};
+
+struct uverbs_api_object;
+struct uverbs_api {
+ /* radix tree contains struct uverbs_api_* pointers */
+ struct radix_tree_root radix;
+ enum rdma_driver_id driver_id;
+};
+
+static inline const struct uverbs_api_object *
+uapi_get_object(struct uverbs_api *uapi, u16 object_id)
+{
+ return radix_tree_lookup(&uapi->radix, uapi_key_obj(object_id));
+}
+
+char *uapi_key_format(char *S, unsigned int key);
+struct uverbs_api *uverbs_alloc_api(
+ const struct uverbs_object_tree_def *const *driver_specs,
+ enum rdma_driver_id driver_id);
+void uverbs_disassociate_api_pre(struct ib_uverbs_device *uverbs_dev);
+void uverbs_disassociate_api(struct uverbs_api *uapi);
+void uverbs_destroy_api(struct uverbs_api *uapi);
+void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
+ unsigned int num_attrs);
#endif /* RDMA_CORE_H */
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index a4fbdc5d28fa..ee366199b169 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -143,14 +143,15 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de
#define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
BONDING_SLAVE_STATE_NA)
-static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
+static bool
+is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
{
struct net_device *real_dev;
- int res;
+ bool res;
if (!rdma_ndev)
- return 0;
+ return false;
rcu_read_lock();
real_dev = rdma_vlan_dev_real_dev(cookie);
@@ -166,14 +167,15 @@ static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
return res;
}
-static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
+static bool
+is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
{
struct net_device *master_dev;
- int res;
+ bool res;
if (!rdma_ndev)
- return 0;
+ return false;
rcu_read_lock();
master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
@@ -184,22 +186,59 @@ static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
return res;
}
-static int pass_all_filter(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
+/** is_ndev_for_default_gid_filter - Check if a given netdevice
+ * can be considered for default GIDs or not.
+ * @ib_dev: IB device to check
+ * @port: Port to consider for adding default GID
+ * @rdma_ndev: rdma netdevice pointer
+ * @cookie_ndev: Netdevice to consider to form a default GID
+ *
+ * is_ndev_for_default_gid_filter() returns true if a given netdevice can be
+ * considered for deriving default RoCE GID, returns false otherwise.
+ */
+static bool
+is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *cookie_ndev = cookie;
+ bool res;
+
+ if (!rdma_ndev)
+ return false;
+
+ rcu_read_lock();
+
+ /*
+ * When rdma netdevice is used in bonding, bonding master netdevice
+ * should be considered for default GIDs. Therefore, ignore slave rdma
+ * netdevices when bonding is considered.
+ * Additionally when event(cookie) netdevice is bond master device,
+ * make sure that it the upper netdevice of rdma netdevice.
+ */
+ res = ((cookie_ndev == rdma_ndev && !netif_is_bond_slave(rdma_ndev)) ||
+ (netif_is_bond_master(cookie_ndev) &&
+ rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)));
+
+ rcu_read_unlock();
+ return res;
+}
+
+static bool pass_all_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
{
- return 1;
+ return true;
}
-static int upper_device_filter(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
+static bool upper_device_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
{
- int res;
+ bool res;
if (!rdma_ndev)
- return 0;
+ return false;
if (rdma_ndev == cookie)
- return 1;
+ return true;
rcu_read_lock();
res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
@@ -208,6 +247,34 @@ static int upper_device_filter(struct ib_device *ib_dev, u8 port,
return res;
}
+/**
+ * is_upper_ndev_bond_master_filter - Check if a given netdevice
+ * is bond master device of netdevice of the the RDMA device of port.
+ * @ib_dev: IB device to check
+ * @port: Port to consider for adding default GID
+ * @rdma_ndev: Pointer to rdma netdevice
+ * @cookie: Netdevice to consider to form a default GID
+ *
+ * is_upper_ndev_bond_master_filter() returns true if a cookie_netdev
+ * is bond master device and rdma_ndev is its lower netdevice. It might
+ * not have been established as slave device yet.
+ */
+static bool
+is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev,
+ void *cookie)
+{
+ struct net_device *cookie_ndev = cookie;
+ bool match = false;
+
+ rcu_read_lock();
+ if (netif_is_bond_master(cookie_ndev) &&
+ rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
+ match = true;
+ rcu_read_unlock();
+ return match;
+}
+
static void update_gid_ip(enum gid_op_type gid_op,
struct ib_device *ib_dev,
u8 port, struct net_device *ndev,
@@ -223,34 +290,10 @@ static void update_gid_ip(enum gid_op_type gid_op,
update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
}
-static void enum_netdev_default_gids(struct ib_device *ib_dev,
- u8 port, struct net_device *event_ndev,
- struct net_device *rdma_ndev)
-{
- unsigned long gid_type_mask;
-
- rcu_read_lock();
- if (!rdma_ndev ||
- ((rdma_ndev != event_ndev &&
- !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
- is_eth_active_slave_of_bonding_rcu(rdma_ndev,
- netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
- BONDING_SLAVE_STATE_INACTIVE)) {
- rcu_read_unlock();
- return;
- }
- rcu_read_unlock();
-
- gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
-
- ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, gid_type_mask,
- IB_CACHE_GID_DEFAULT_MODE_SET);
-}
-
static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
u8 port,
- struct net_device *event_ndev,
- struct net_device *rdma_ndev)
+ struct net_device *rdma_ndev,
+ struct net_device *event_ndev)
{
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
unsigned long gid_type_mask;
@@ -381,7 +424,6 @@ static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
struct net_device *rdma_ndev, void *cookie)
{
- enum_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
_add_netdev_ips(ib_dev, port, cookie);
}
@@ -391,6 +433,38 @@ static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
}
+/**
+ * del_default_gids - Delete default GIDs of the event/cookie netdevice
+ * @ib_dev: RDMA device pointer
+ * @port: Port of the RDMA device whose GID table to consider
+ * @rdma_ndev: Unused rdma netdevice
+ * @cookie: Pointer to event netdevice
+ *
+ * del_default_gids() deletes the default GIDs of the event/cookie netdevice.
+ */
+static void del_default_gids(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *cookie_ndev = cookie;
+ unsigned long gid_type_mask;
+
+ gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+
+ ib_cache_gid_set_default_gid(ib_dev, port, cookie_ndev, gid_type_mask,
+ IB_CACHE_GID_DEFAULT_MODE_DELETE);
+}
+
+static void add_default_gids(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *event_ndev = cookie;
+ unsigned long gid_type_mask;
+
+ gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+ ib_cache_gid_set_default_gid(ib_dev, port, event_ndev, gid_type_mask,
+ IB_CACHE_GID_DEFAULT_MODE_SET);
+}
+
static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
u8 port,
struct net_device *rdma_ndev,
@@ -405,9 +479,20 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
rtnl_lock();
down_read(&net_rwsem);
for_each_net(net)
- for_each_netdev(net, ndev)
- if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
- add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
+ for_each_netdev(net, ndev) {
+ /*
+ * Filter and add default GIDs of the primary netdevice
+ * when not in bonding mode, or add default GIDs
+ * of bond master device, when in bonding mode.
+ */
+ if (is_ndev_for_default_gid_filter(ib_dev, port,
+ rdma_ndev, ndev))
+ add_default_gids(ib_dev, port, rdma_ndev, ndev);
+
+ if (is_eth_port_of_netdev_filter(ib_dev, port,
+ rdma_ndev, ndev))
+ _add_netdev_ips(ib_dev, port, ndev);
+ }
up_read(&net_rwsem);
rtnl_unlock();
}
@@ -513,18 +598,12 @@ static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
rcu_read_unlock();
if (master_ndev) {
- bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
- rdma_ndev);
+ bond_delete_netdev_default_gids(ib_dev, port, rdma_ndev,
+ master_ndev);
dev_put(master_ndev);
}
}
-static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
-{
- bond_delete_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
-}
-
/* The following functions operate on all IB devices. netdevice_event and
* addr_event execute ib_enum_all_roce_netdevs through a work.
* ib_enum_all_roce_netdevs iterates through all IB devices.
@@ -575,40 +654,94 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
}
static const struct netdev_event_work_cmd add_cmd = {
- .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
+ .cb = add_netdev_ips,
+ .filter = is_eth_port_of_netdev_filter
+};
+
static const struct netdev_event_work_cmd add_cmd_upper_ips = {
- .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
+ .cb = add_netdev_upper_ips,
+ .filter = is_eth_port_of_netdev_filter
+};
-static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
- struct netdev_event_work_cmd *cmds)
+static void
+ndev_event_unlink(struct netdev_notifier_changeupper_info *changeupper_info,
+ struct netdev_event_work_cmd *cmds)
{
- static const struct netdev_event_work_cmd upper_ips_del_cmd = {
- .cb = del_netdev_upper_ips, .filter = upper_device_filter};
- static const struct netdev_event_work_cmd bonding_default_del_cmd = {
- .cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
-
- if (changeupper_info->linking == false) {
- cmds[0] = upper_ips_del_cmd;
- cmds[0].ndev = changeupper_info->upper_dev;
- cmds[1] = add_cmd;
- } else {
- cmds[0] = bonding_default_del_cmd;
- cmds[0].ndev = changeupper_info->upper_dev;
- cmds[1] = add_cmd_upper_ips;
- cmds[1].ndev = changeupper_info->upper_dev;
- cmds[1].filter_ndev = changeupper_info->upper_dev;
- }
+ static const struct netdev_event_work_cmd
+ upper_ips_del_cmd = {
+ .cb = del_netdev_upper_ips,
+ .filter = upper_device_filter
+ };
+
+ cmds[0] = upper_ips_del_cmd;
+ cmds[0].ndev = changeupper_info->upper_dev;
+ cmds[1] = add_cmd;
}
+static const struct netdev_event_work_cmd bonding_default_add_cmd = {
+ .cb = add_default_gids,
+ .filter = is_upper_ndev_bond_master_filter
+};
+
+static void
+ndev_event_link(struct net_device *event_ndev,
+ struct netdev_notifier_changeupper_info *changeupper_info,
+ struct netdev_event_work_cmd *cmds)
+{
+ static const struct netdev_event_work_cmd
+ bonding_default_del_cmd = {
+ .cb = del_default_gids,
+ .filter = is_upper_ndev_bond_master_filter
+ };
+ /*
+ * When a lower netdev is linked to its upper bonding
+ * netdev, delete lower slave netdev's default GIDs.
+ */
+ cmds[0] = bonding_default_del_cmd;
+ cmds[0].ndev = event_ndev;
+ cmds[0].filter_ndev = changeupper_info->upper_dev;
+
+ /* Now add bonding upper device default GIDs */
+ cmds[1] = bonding_default_add_cmd;
+ cmds[1].ndev = changeupper_info->upper_dev;
+ cmds[1].filter_ndev = changeupper_info->upper_dev;
+
+ /* Now add bonding upper device IP based GIDs */
+ cmds[2] = add_cmd_upper_ips;
+ cmds[2].ndev = changeupper_info->upper_dev;
+ cmds[2].filter_ndev = changeupper_info->upper_dev;
+}
+
+static void netdevice_event_changeupper(struct net_device *event_ndev,
+ struct netdev_notifier_changeupper_info *changeupper_info,
+ struct netdev_event_work_cmd *cmds)
+{
+ if (changeupper_info->linking)
+ ndev_event_link(event_ndev, changeupper_info, cmds);
+ else
+ ndev_event_unlink(changeupper_info, cmds);
+}
+
+static const struct netdev_event_work_cmd add_default_gid_cmd = {
+ .cb = add_default_gids,
+ .filter = is_ndev_for_default_gid_filter,
+};
+
static int netdevice_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
static const struct netdev_event_work_cmd del_cmd = {
.cb = del_netdev_ips, .filter = pass_all_filter};
- static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
- .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
- static const struct netdev_event_work_cmd default_del_cmd = {
- .cb = del_netdev_default_ips, .filter = pass_all_filter};
+ static const struct netdev_event_work_cmd
+ bonding_default_del_cmd_join = {
+ .cb = del_netdev_default_ips_join,
+ .filter = is_eth_port_inactive_slave_filter
+ };
+ static const struct netdev_event_work_cmd
+ netdev_del_cmd = {
+ .cb = del_netdev_ips,
+ .filter = is_eth_port_of_netdev_filter
+ };
static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
.cb = del_netdev_upper_ips, .filter = upper_device_filter};
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
@@ -621,7 +754,8 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
case NETDEV_REGISTER:
case NETDEV_UP:
cmds[0] = bonding_default_del_cmd_join;
- cmds[1] = add_cmd;
+ cmds[1] = add_default_gid_cmd;
+ cmds[2] = add_cmd;
break;
case NETDEV_UNREGISTER:
@@ -632,19 +766,22 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
break;
case NETDEV_CHANGEADDR:
- cmds[0] = default_del_cmd;
- cmds[1] = add_cmd;
+ cmds[0] = netdev_del_cmd;
+ cmds[1] = add_default_gid_cmd;
+ cmds[2] = add_cmd;
break;
case NETDEV_CHANGEUPPER:
- netdevice_event_changeupper(
+ netdevice_event_changeupper(ndev,
container_of(ptr, struct netdev_notifier_changeupper_info, info),
cmds);
break;
case NETDEV_BONDING_FAILOVER:
cmds[0] = bonding_event_ips_del_cmd;
- cmds[1] = bonding_default_del_cmd_join;
+ /* Add default GIDs of the bond device */
+ cmds[1] = bonding_default_add_cmd;
+ /* Add IP based GIDs of the bond device */
cmds[2] = add_cmd_upper_ips;
break;
@@ -660,7 +797,8 @@ static void update_gid_event_work_handler(struct work_struct *_work)
struct update_gid_event_work *work =
container_of(_work, struct update_gid_event_work, work);
- ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
+ ib_enum_all_roce_netdevs(is_eth_port_of_netdev_filter,
+ work->gid_attr.ndev,
callback_for_addr_gid_device_scan, work);
dev_put(work->gid_attr.ndev);
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index c8963e91f92a..683e6d11a564 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -87,7 +87,7 @@ static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
}
ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
- if (ret < nents) {
+ if (ret < 0 || ret < nents) {
ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
return -EINVAL;
}
@@ -325,7 +325,7 @@ out_unmap_sg:
EXPORT_SYMBOL(rdma_rw_ctx_init);
/**
- * rdma_rw_ctx_signature init - initialize a RW context with signature offload
+ * rdma_rw_ctx_signature_init - initialize a RW context with signature offload
* @ctx: context to initialize
* @qp: queue pair to operate on
* @port_num: port num to which the connection is bound
@@ -564,10 +564,10 @@ EXPORT_SYMBOL(rdma_rw_ctx_wrs);
int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
{
- struct ib_send_wr *first_wr, *bad_wr;
+ struct ib_send_wr *first_wr;
first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
- return ib_post_send(qp, first_wr, &bad_wr);
+ return ib_post_send(qp, first_wr, NULL);
}
EXPORT_SYMBOL(rdma_rw_ctx_post);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a61ec7e33613..7b794a14d6e8 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1227,20 +1227,10 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask;
}
-static int
-roce_resolve_route_from_path(struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec)
+static int roce_resolve_route_from_path(struct sa_path_rec *rec,
+ const struct ib_gid_attr *attr)
{
- struct net_device *resolved_dev;
- struct net_device *ndev;
- struct net_device *idev;
- struct rdma_dev_addr dev_addr = {
- .bound_dev_if = ((sa_path_get_ifindex(rec) >= 0) ?
- sa_path_get_ifindex(rec) : 0),
- .net = sa_path_get_ndev(rec) ?
- sa_path_get_ndev(rec) :
- &init_net
- };
+ struct rdma_dev_addr dev_addr = {};
union {
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
@@ -1250,9 +1240,14 @@ roce_resolve_route_from_path(struct ib_device *device, u8 port_num,
if (rec->roce.route_resolved)
return 0;
+ if (!attr || !attr->ndev)
+ return -EINVAL;
- if (!device->get_netdev)
- return -EOPNOTSUPP;
+ dev_addr.bound_dev_if = attr->ndev->ifindex;
+ /* TODO: Use net from the ib_gid_attr once it is added to it,
+ * until than, limit itself to init_net.
+ */
+ dev_addr.net = &init_net;
rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
@@ -1268,60 +1263,52 @@ roce_resolve_route_from_path(struct ib_device *device, u8 port_num,
rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
return -EINVAL;
- idev = device->get_netdev(device, port_num);
- if (!idev)
- return -ENODEV;
-
- resolved_dev = dev_get_by_index(dev_addr.net,
- dev_addr.bound_dev_if);
- if (!resolved_dev) {
- ret = -ENODEV;
- goto done;
- }
- ndev = ib_get_ndev_from_path(rec);
- rcu_read_lock();
- if ((ndev && ndev != resolved_dev) ||
- (resolved_dev != idev &&
- !rdma_is_upper_dev_rcu(idev, resolved_dev)))
- ret = -EHOSTUNREACH;
- rcu_read_unlock();
- dev_put(resolved_dev);
- if (ndev)
- dev_put(ndev);
-done:
- dev_put(idev);
- if (!ret)
- rec->roce.route_resolved = true;
- return ret;
+ rec->roce.route_resolved = true;
+ return 0;
}
static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr)
+ struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *gid_attr)
{
enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
- struct net_device *ndev;
- u16 gid_index;
- int ret;
- ndev = ib_get_ndev_from_path(rec);
- ret = ib_find_cached_gid_by_port(device, &rec->sgid, type,
- port_num, ndev, &gid_index);
- if (ndev)
- dev_put(ndev);
- if (ret)
- return ret;
+ if (!gid_attr) {
+ gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type,
+ port_num, NULL);
+ if (IS_ERR(gid_attr))
+ return PTR_ERR(gid_attr);
+ } else
+ rdma_hold_gid_attr(gid_attr);
- rdma_ah_set_grh(ah_attr, &rec->dgid,
- be32_to_cpu(rec->flow_label),
- gid_index, rec->hop_limit,
- rec->traffic_class);
+ rdma_move_grh_sgid_attr(ah_attr, &rec->dgid,
+ be32_to_cpu(rec->flow_label),
+ rec->hop_limit, rec->traffic_class,
+ gid_attr);
return 0;
}
+/**
+ * ib_init_ah_attr_from_path - Initialize address handle attributes based on
+ * an SA path record.
+ * @device: Device associated ah attributes initialization.
+ * @port_num: Port on the specified device.
+ * @rec: path record entry to use for ah attributes initialization.
+ * @ah_attr: address handle attributes to initialization from path record.
+ * @sgid_attr: SGID attribute to consider during initialization.
+ *
+ * When ib_init_ah_attr_from_path() returns success,
+ * (a) for IB link layer it optionally contains a reference to SGID attribute
+ * when GRH is present for IB link layer.
+ * (b) for RoCE link layer it contains a reference to SGID attribute.
+ * User must invoke rdma_destroy_ah_attr() to release reference to SGID
+ * attributes which are initialized using ib_init_ah_attr_from_path().
+ */
int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr)
+ struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *gid_attr)
{
int ret = 0;
@@ -1332,7 +1319,7 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
rdma_ah_set_static_rate(ah_attr, rec->rate);
if (sa_path_is_roce(rec)) {
- ret = roce_resolve_route_from_path(device, port_num, rec);
+ ret = roce_resolve_route_from_path(rec, gid_attr);
if (ret)
return ret;
@@ -1349,7 +1336,8 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
}
if (rec->hop_limit > 0 || sa_path_is_roce(rec))
- ret = init_ah_attr_grh_fields(device, port_num, rec, ah_attr);
+ ret = init_ah_attr_grh_fields(device, port_num,
+ rec, ah_attr, gid_attr);
return ret;
}
EXPORT_SYMBOL(ib_init_ah_attr_from_path);
@@ -1557,8 +1545,6 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
ARRAY_SIZE(path_rec_table),
mad->data, &rec);
rec.rec_type = SA_PATH_REC_TYPE_IB;
- sa_path_set_ndev(&rec, NULL);
- sa_path_set_ifindex(&rec, 0);
sa_path_set_dmac_zero(&rec);
if (query->conv_pr) {
@@ -2290,6 +2276,7 @@ static void update_sm_ah(struct work_struct *work)
struct ib_sa_sm_ah *new_ah;
struct ib_port_attr port_attr;
struct rdma_ah_attr ah_attr;
+ bool grh_required;
if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
pr_warn("Couldn't query port\n");
@@ -2314,16 +2301,27 @@ static void update_sm_ah(struct work_struct *work)
rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
rdma_ah_set_port_num(&ah_attr, port->port_num);
- if (port_attr.grh_required) {
- if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA) {
- rdma_ah_set_make_grd(&ah_attr, true);
- } else {
- rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
- rdma_ah_set_subnet_prefix(&ah_attr,
- cpu_to_be64(port_attr.subnet_prefix));
- rdma_ah_set_interface_id(&ah_attr,
- cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
- }
+
+ grh_required = rdma_is_grh_required(port->agent->device,
+ port->port_num);
+
+ /*
+ * The OPA sm_lid of 0xFFFF needs special handling so that it can be
+ * differentiated from a permissive LID of 0xFFFF. We set the
+ * grh_required flag here so the SA can program the DGID in the
+ * address handle appropriately
+ */
+ if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA &&
+ (grh_required ||
+ port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE)))
+ rdma_ah_set_make_grd(&ah_attr, true);
+
+ if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) {
+ rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
+ rdma_ah_set_subnet_prefix(&ah_attr,
+ cpu_to_be64(port_attr.subnet_prefix));
+ rdma_ah_set_interface_id(&ah_attr,
+ cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
}
new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 31c7efaf8e7a..7fd14ead7b37 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -42,6 +42,7 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_pma.h>
+#include <rdma/ib_cache.h>
struct ib_port;
@@ -346,7 +347,7 @@ static struct attribute *port_default_attrs[] = {
NULL
};
-static size_t print_ndev(struct ib_gid_attr *gid_attr, char *buf)
+static size_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf)
{
if (!gid_attr->ndev)
return -EINVAL;
@@ -354,33 +355,26 @@ static size_t print_ndev(struct ib_gid_attr *gid_attr, char *buf)
return sprintf(buf, "%s\n", gid_attr->ndev->name);
}
-static size_t print_gid_type(struct ib_gid_attr *gid_attr, char *buf)
+static size_t print_gid_type(const struct ib_gid_attr *gid_attr, char *buf)
{
return sprintf(buf, "%s\n", ib_cache_gid_type_str(gid_attr->gid_type));
}
-static ssize_t _show_port_gid_attr(struct ib_port *p,
- struct port_attribute *attr,
- char *buf,
- size_t (*print)(struct ib_gid_attr *gid_attr,
- char *buf))
+static ssize_t _show_port_gid_attr(
+ struct ib_port *p, struct port_attribute *attr, char *buf,
+ size_t (*print)(const struct ib_gid_attr *gid_attr, char *buf))
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
- union ib_gid gid;
- struct ib_gid_attr gid_attr = {};
+ const struct ib_gid_attr *gid_attr;
ssize_t ret;
- ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid,
- &gid_attr);
- if (ret)
- goto err;
+ gid_attr = rdma_get_gid_attr(p->ibdev, p->port_num, tab_attr->index);
+ if (IS_ERR(gid_attr))
+ return PTR_ERR(gid_attr);
- ret = print(&gid_attr, buf);
-
-err:
- if (gid_attr.ndev)
- dev_put(gid_attr.ndev);
+ ret = print(gid_attr, buf);
+ rdma_put_gid_attr(gid_attr);
return ret;
}
@@ -389,26 +383,28 @@ static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
- union ib_gid *pgid;
- union ib_gid gid;
+ const struct ib_gid_attr *gid_attr;
ssize_t ret;
- ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid, NULL);
+ gid_attr = rdma_get_gid_attr(p->ibdev, p->port_num, tab_attr->index);
+ if (IS_ERR(gid_attr)) {
+ const union ib_gid zgid = {};
+
+ /* If reading GID fails, it is likely due to GID entry being
+ * empty (invalid) or reserved GID in the table. User space
+ * expects to read GID table entries as long as it given index
+ * is within GID table size. Administrative/debugging tool
+ * fails to query rest of the GID entries if it hits error
+ * while querying a GID of the given index. To avoid user
+ * space throwing such error on fail to read gid, return zero
+ * GID as before. This maintains backward compatibility.
+ */
+ return sprintf(buf, "%pI6\n", zgid.raw);
+ }
- /* If reading GID fails, it is likely due to GID entry being empty
- * (invalid) or reserved GID in the table.
- * User space expects to read GID table entries as long as it given
- * index is within GID table size.
- * Administrative/debugging tool fails to query rest of the GID entries
- * if it hits error while querying a GID of the given index.
- * To avoid user space throwing such error on fail to read gid, return
- * zero GID as before. This maintains backward compatibility.
- */
- if (ret)
- pgid = &zgid;
- else
- pgid = &gid;
- return sprintf(buf, "%pI6\n", pgid->raw);
+ ret = sprintf(buf, "%pI6\n", gid_attr->gid.raw);
+ rdma_put_gid_attr(gid_attr);
+ return ret;
}
static ssize_t show_port_gid_attr_ndev(struct ib_port *p,
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 9eef96dacbd7..faa9e6116b2f 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -207,7 +207,7 @@ error:
}
static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
- struct ib_cm_req_event_param *kreq)
+ const struct ib_cm_req_event_param *kreq)
{
ureq->remote_ca_guid = kreq->remote_ca_guid;
ureq->remote_qkey = kreq->remote_qkey;
@@ -231,7 +231,7 @@ static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
}
static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
- struct ib_cm_rep_event_param *krep)
+ const struct ib_cm_rep_event_param *krep)
{
urep->remote_ca_guid = krep->remote_ca_guid;
urep->remote_qkey = krep->remote_qkey;
@@ -247,14 +247,14 @@ static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
}
static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
- struct ib_cm_sidr_rep_event_param *krep)
+ const struct ib_cm_sidr_rep_event_param *krep)
{
urep->status = krep->status;
urep->qkey = krep->qkey;
urep->qpn = krep->qpn;
};
-static int ib_ucm_event_process(struct ib_cm_event *evt,
+static int ib_ucm_event_process(const struct ib_cm_event *evt,
struct ib_ucm_event *uvt)
{
void *info = NULL;
@@ -351,7 +351,7 @@ err1:
}
static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event)
+ const struct ib_cm_event *event)
{
struct ib_ucm_event *uevent;
struct ib_ucm_context *ctx;
@@ -1000,14 +1000,11 @@ static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file,
const char __user *inbuf,
int in_len, int out_len)
{
- struct ib_cm_sidr_req_param param;
+ struct ib_cm_sidr_req_param param = {};
struct ib_ucm_context *ctx;
struct ib_ucm_sidr_req cmd;
int result;
- param.private_data = NULL;
- param.path = NULL;
-
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 54ab6335c48d..a41792dbae1f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -84,7 +84,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
struct ib_umem *umem;
struct page **page_list;
struct vm_area_struct **vma_list;
- unsigned long locked;
unsigned long lock_limit;
unsigned long cur_base;
unsigned long npages;
@@ -92,7 +91,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
int i;
unsigned long dma_attrs = 0;
struct scatterlist *sg, *sg_list_start;
- int need_release = 0;
unsigned int gup_flags = FOLL_WRITE;
if (dmasync)
@@ -121,10 +119,8 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (access & IB_ACCESS_ON_DEMAND) {
ret = ib_umem_odp_get(context, umem, access);
- if (ret) {
- kfree(umem);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto umem_kfree;
return umem;
}
@@ -135,8 +131,8 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) {
- kfree(umem);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto umem_kfree;
}
/*
@@ -149,41 +145,43 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
npages = ib_umem_num_pages(umem);
- down_write(&current->mm->mmap_sem);
-
- locked = npages + current->mm->pinned_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ down_write(&current->mm->mmap_sem);
+ current->mm->pinned_vm += npages;
+ if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ up_write(&current->mm->mmap_sem);
ret = -ENOMEM;
- goto out;
+ goto vma;
}
+ up_write(&current->mm->mmap_sem);
cur_base = addr & PAGE_MASK;
if (npages == 0 || npages > UINT_MAX) {
ret = -EINVAL;
- goto out;
+ goto vma;
}
ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
if (ret)
- goto out;
+ goto vma;
if (!umem->writable)
gup_flags |= FOLL_FORCE;
- need_release = 1;
sg_list_start = umem->sg_head.sgl;
+ down_read(&current->mm->mmap_sem);
while (npages) {
ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
gup_flags, page_list, vma_list);
-
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ up_read(&current->mm->mmap_sem);
+ goto umem_release;
+ }
umem->npages += ret;
cur_base += ret * PAGE_SIZE;
@@ -199,6 +197,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
/* preparing for next loop */
sg_list_start = sg;
}
+ up_read(&current->mm->mmap_sem);
umem->nmap = ib_dma_map_sg_attrs(context->device,
umem->sg_head.sgl,
@@ -206,27 +205,28 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
DMA_BIDIRECTIONAL,
dma_attrs);
- if (umem->nmap <= 0) {
+ if (!umem->nmap) {
ret = -ENOMEM;
- goto out;
+ goto umem_release;
}
ret = 0;
+ goto out;
-out:
- if (ret < 0) {
- if (need_release)
- __ib_umem_release(context->device, umem, 0);
- kfree(umem);
- } else
- current->mm->pinned_vm = locked;
-
+umem_release:
+ __ib_umem_release(context->device, umem, 0);
+vma:
+ down_write(&current->mm->mmap_sem);
+ current->mm->pinned_vm -= ib_umem_num_pages(umem);
up_write(&current->mm->mmap_sem);
+out:
if (vma_list)
free_page((unsigned long) vma_list);
free_page((unsigned long) page_list);
-
- return ret < 0 ? ERR_PTR(ret) : umem;
+umem_kfree:
+ if (ret)
+ kfree(umem);
+ return ret ? ERR_PTR(ret) : umem;
}
EXPORT_SYMBOL(ib_umem_get);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 182436b92ba9..6ec748eccff7 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -186,6 +186,7 @@ static void ib_umem_notifier_release(struct mmu_notifier *mn,
rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
ULLONG_MAX,
ib_umem_notifier_release_trampoline,
+ true,
NULL);
up_read(&context->umem_rwsem);
}
@@ -207,22 +208,31 @@ static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
return 0;
}
-static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
+static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+ int ret;
if (!context->invalidate_range)
- return;
+ return 0;
+
+ if (blockable)
+ down_read(&context->umem_rwsem);
+ else if (!down_read_trylock(&context->umem_rwsem))
+ return -EAGAIN;
ib_ucontext_notifier_start_account(context);
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
+ ret = rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
end,
- invalidate_range_start_trampoline, NULL);
+ invalidate_range_start_trampoline,
+ blockable, NULL);
up_read(&context->umem_rwsem);
+
+ return ret;
}
static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
@@ -242,10 +252,15 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
if (!context->invalidate_range)
return;
+ /*
+ * TODO: we currently bail out if there is any sleepable work to be done
+ * in ib_umem_notifier_invalidate_range_start so we shouldn't really block
+ * here. But this is ugly and fragile.
+ */
down_read(&context->umem_rwsem);
rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
end,
- invalidate_range_end_trampoline, NULL);
+ invalidate_range_end_trampoline, true, NULL);
up_read(&context->umem_rwsem);
ib_ucontext_notifier_end_account(context);
}
@@ -798,6 +813,7 @@ EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
u64 start, u64 last,
umem_call_back cb,
+ bool blockable,
void *cookie)
{
int ret_val = 0;
@@ -809,6 +825,9 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
for (node = rbt_ib_umem_iter_first(root, start, last - 1);
node; node = next) {
+ /* TODO move the blockable decision up to the callback */
+ if (!blockable)
+ return -EAGAIN;
next = rbt_ib_umem_iter_next(node, start, last - 1);
umem = container_of(node, struct ib_umem_odp, interval_tree);
ret_val = cb(umem->umem, start, last, cookie) || ret_val;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index bb98c9e4a7fd..c34a6852d691 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -268,6 +268,7 @@ static void recv_handler(struct ib_mad_agent *agent,
packet->mad.hdr.traffic_class = grh->traffic_class;
memcpy(packet->mad.hdr.gid, &grh->dgid, 16);
packet->mad.hdr.flow_label = cpu_to_be32(grh->flow_label);
+ rdma_destroy_ah_attr(&ah_attr);
}
if (queue_packet(file, agent, packet))
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index c0d40fc3a53a..5df8e548cc14 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -111,7 +111,7 @@ struct ib_uverbs_device {
struct mutex lists_mutex; /* protect lists */
struct list_head uverbs_file_list;
struct list_head uverbs_events_file_list;
- struct uverbs_root_spec *specs_root;
+ struct uverbs_api *uapi;
};
struct ib_uverbs_event_queue {
@@ -130,21 +130,37 @@ struct ib_uverbs_async_event_file {
};
struct ib_uverbs_completion_event_file {
- struct ib_uobject_file uobj_file;
+ struct ib_uobject uobj;
struct ib_uverbs_event_queue ev_queue;
};
struct ib_uverbs_file {
struct kref ref;
- struct mutex mutex;
- struct mutex cleanup_mutex; /* protect cleanup */
struct ib_uverbs_device *device;
+ struct mutex ucontext_lock;
+ /*
+ * ucontext must be accessed via ib_uverbs_get_ucontext() or with
+ * ucontext_lock held
+ */
struct ib_ucontext *ucontext;
struct ib_event_handler event_handler;
struct ib_uverbs_async_event_file *async_file;
struct list_head list;
int is_closed;
+ /*
+ * To access the uobjects list hw_destroy_rwsem must be held for write
+ * OR hw_destroy_rwsem held for read AND uobjects_lock held.
+ * hw_destroy_rwsem should be called across any destruction of the HW
+ * object of an associated uobject.
+ */
+ struct rw_semaphore hw_destroy_rwsem;
+ spinlock_t uobjects_lock;
+ struct list_head uobjects;
+
+ u64 uverbs_cmd_mask;
+ u64 uverbs_ex_cmd_mask;
+
struct idr idr;
/* spinlock protects write access to idr */
spinlock_t idr_lock;
@@ -196,7 +212,6 @@ struct ib_uwq_object {
struct ib_ucq_object {
struct ib_uobject uobject;
- struct ib_uverbs_file *uverbs_file;
struct list_head comp_list;
struct list_head async_list;
u32 comp_events_reported;
@@ -230,7 +245,7 @@ void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event);
-int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd,
+int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
enum rdma_remove_reason why);
int uverbs_dealloc_mw(struct ib_mw *mw);
@@ -238,12 +253,7 @@ void ib_uverbs_detach_umcast(struct ib_qp *qp,
struct ib_uqp_object *uobj);
void create_udata(struct uverbs_attr_bundle *ctx, struct ib_udata *udata);
-extern const struct uverbs_attr_def uverbs_uhw_compat_in;
-extern const struct uverbs_attr_def uverbs_uhw_compat_out;
long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-int uverbs_destroy_def_handler(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs);
struct ib_uverbs_flow_spec {
union {
@@ -292,7 +302,6 @@ extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS);
#define IB_UVERBS_DECLARE_CMD(name) \
ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
- struct ib_device *ib_dev, \
const char __user *buf, int in_len, \
int out_len)
@@ -334,7 +343,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
#define IB_UVERBS_DECLARE_EX_CMD(name) \
int ib_uverbs_ex_##name(struct ib_uverbs_file *file, \
- struct ib_device *ib_dev, \
struct ib_udata *ucore, \
struct ib_udata *uhw)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3e90b6a1d9d2..a21d5214afc3 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -48,11 +48,10 @@
#include "core_priv.h"
static struct ib_uverbs_completion_event_file *
-ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context)
+_ib_uverbs_lookup_comp_file(s32 fd, struct ib_uverbs_file *ufile)
{
- struct ib_uobject *uobj = uobj_get_read(UVERBS_OBJECT_COMP_CHANNEL,
- fd, context);
- struct ib_uobject_file *uobj_file;
+ struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
+ fd, ufile);
if (IS_ERR(uobj))
return (void *)uobj;
@@ -60,13 +59,13 @@ ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context)
uverbs_uobject_get(uobj);
uobj_put_read(uobj);
- uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
- return container_of(uobj_file, struct ib_uverbs_completion_event_file,
- uobj_file);
+ return container_of(uobj, struct ib_uverbs_completion_event_file,
+ uobj);
}
+#define ib_uverbs_lookup_comp_file(_fd, _ufile) \
+ _ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
@@ -76,6 +75,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
struct ib_ucontext *ucontext;
struct file *filp;
struct ib_rdmacg_object cg_obj;
+ struct ib_device *ib_dev;
int ret;
if (out_len < sizeof resp)
@@ -84,7 +84,13 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- mutex_lock(&file->mutex);
+ mutex_lock(&file->ucontext_lock);
+ ib_dev = srcu_dereference(file->device->ib_dev,
+ &file->device->disassociate_srcu);
+ if (!ib_dev) {
+ ret = -EIO;
+ goto err;
+ }
if (file->ucontext) {
ret = -EINVAL;
@@ -110,12 +116,12 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
ucontext->cg_obj = cg_obj;
/* ufile is required when some objects are released */
ucontext->ufile = file;
- uverbs_initialize_ucontext(ucontext);
rcu_read_lock();
ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
rcu_read_unlock();
ucontext->closing = 0;
+ ucontext->cleanup_retryable = false;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
ucontext->umem_tree = RB_ROOT_CACHED;
@@ -146,11 +152,15 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto err_file;
}
- file->ucontext = ucontext;
-
fd_install(resp.async_fd, filp);
- mutex_unlock(&file->mutex);
+ /*
+ * Make sure that ib_uverbs_get_ucontext() sees the pointer update
+ * only after all writes to setup the ucontext have completed
+ */
+ smp_store_release(&file->ucontext, ucontext);
+
+ mutex_unlock(&file->ucontext_lock);
return in_len;
@@ -169,15 +179,16 @@ err_alloc:
ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
err:
- mutex_unlock(&file->mutex);
+ mutex_unlock(&file->ucontext_lock);
return ret;
}
-static void copy_query_dev_fields(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
+static void copy_query_dev_fields(struct ib_ucontext *ucontext,
struct ib_uverbs_query_device_resp *resp,
struct ib_device_attr *attr)
{
+ struct ib_device *ib_dev = ucontext->device;
+
resp->fw_ver = attr->fw_ver;
resp->node_guid = ib_dev->node_guid;
resp->sys_image_guid = attr->sys_image_guid;
@@ -189,7 +200,7 @@ static void copy_query_dev_fields(struct ib_uverbs_file *file,
resp->max_qp = attr->max_qp;
resp->max_qp_wr = attr->max_qp_wr;
resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
- resp->max_sge = attr->max_sge;
+ resp->max_sge = min(attr->max_send_sge, attr->max_recv_sge);
resp->max_sge_rd = attr->max_sge_rd;
resp->max_cq = attr->max_cq;
resp->max_cqe = attr->max_cqe;
@@ -221,12 +232,16 @@ static void copy_query_dev_fields(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
struct ib_uverbs_query_device cmd;
struct ib_uverbs_query_device_resp resp;
+ struct ib_ucontext *ucontext;
+
+ ucontext = ib_uverbs_get_ucontext(file);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
if (out_len < sizeof resp)
return -ENOSPC;
@@ -235,7 +250,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
return -EFAULT;
memset(&resp, 0, sizeof resp);
- copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
+ copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs);
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
@@ -243,8 +258,28 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
return in_len;
}
+/*
+ * ib_uverbs_query_port_resp.port_cap_flags started out as just a copy of the
+ * PortInfo CapabilityMask, but was extended with unique bits.
+ */
+static u32 make_port_cap_flags(const struct ib_port_attr *attr)
+{
+ u32 res;
+
+ /* All IBA CapabilityMask bits are passed through here, except bit 26,
+ * which is overridden with IP_BASED_GIDS. This is due to a historical
+ * mistake in the implementation of IP_BASED_GIDS. Otherwise all other
+ * bits match the IBA definition across all kernel versions.
+ */
+ res = attr->port_cap_flags & ~(u32)IB_UVERBS_PCF_IP_BASED_GIDS;
+
+ if (attr->ip_gids)
+ res |= IB_UVERBS_PCF_IP_BASED_GIDS;
+
+ return res;
+}
+
ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
@@ -252,6 +287,13 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
struct ib_uverbs_query_port_resp resp;
struct ib_port_attr attr;
int ret;
+ struct ib_ucontext *ucontext;
+ struct ib_device *ib_dev;
+
+ ucontext = ib_uverbs_get_ucontext(file);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -269,12 +311,15 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
resp.max_mtu = attr.max_mtu;
resp.active_mtu = attr.active_mtu;
resp.gid_tbl_len = attr.gid_tbl_len;
- resp.port_cap_flags = attr.port_cap_flags;
+ resp.port_cap_flags = make_port_cap_flags(&attr);
resp.max_msg_sz = attr.max_msg_sz;
resp.bad_pkey_cntr = attr.bad_pkey_cntr;
resp.qkey_viol_cntr = attr.qkey_viol_cntr;
resp.pkey_tbl_len = attr.pkey_tbl_len;
+ if (rdma_is_grh_required(ib_dev, cmd.port_num))
+ resp.flags |= IB_UVERBS_QPF_GRH_REQUIRED;
+
if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) {
resp.lid = OPA_TO_IB_UCAST_LID(attr.lid);
resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid);
@@ -300,7 +345,6 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
@@ -310,6 +354,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
struct ib_pd *pd;
int ret;
+ struct ib_device *ib_dev;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -322,11 +367,11 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
- uobj = uobj_alloc(UVERBS_OBJECT_PD, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_PD, file, &ib_dev);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
+ pd = ib_dev->alloc_pd(ib_dev, uobj->context, &udata);
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err;
@@ -348,9 +393,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
goto err_copy;
}
- uobj_alloc_commit(uobj);
-
- return in_len;
+ return uobj_alloc_commit(uobj, in_len);
err_copy:
ib_dealloc_pd(pd);
@@ -361,25 +404,16 @@ err:
}
ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
struct ib_uverbs_dealloc_pd cmd;
- struct ib_uobject *uobj;
- int ret;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_PD, cmd.pd_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- ret = uobj_remove_commit(uobj);
-
- return ret ?: in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, file,
+ in_len);
}
struct xrcd_table_entry {
@@ -468,7 +502,6 @@ static void xrcd_table_delete(struct ib_uverbs_device *dev,
}
ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -481,6 +514,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
struct inode *inode = NULL;
int ret = 0;
int new_xrcd = 0;
+ struct ib_device *ib_dev;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -517,15 +551,15 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
}
}
- obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD,
- file->ucontext);
+ obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, file,
+ &ib_dev);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_tree_mutex_unlock;
}
if (!xrcd) {
- xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
+ xrcd = ib_dev->alloc_xrcd(ib_dev, obj->uobject.context, &udata);
if (IS_ERR(xrcd)) {
ret = PTR_ERR(xrcd);
goto err;
@@ -564,9 +598,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
mutex_unlock(&file->device->xrcd_tree_mutex);
- uobj_alloc_commit(&obj->uobject);
-
- return in_len;
+ return uobj_alloc_commit(&obj->uobject, in_len);
err_copy:
if (inode) {
@@ -591,32 +623,25 @@ err_tree_mutex_unlock:
}
ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_close_xrcd cmd;
- struct ib_uobject *uobj;
- int ret = 0;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_XRCD, cmd.xrcd_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- ret = uobj_remove_commit(uobj);
- return ret ?: in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, file,
+ in_len);
}
-int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
+int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject,
struct ib_xrcd *xrcd,
enum rdma_remove_reason why)
{
struct inode *inode;
int ret;
+ struct ib_uverbs_device *dev = uobject->context->ufile->device;
inode = xrcd->inode;
if (inode && !atomic_dec_and_test(&xrcd->usecnt))
@@ -624,16 +649,18 @@ int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
ret = ib_dealloc_xrcd(xrcd);
- if (why == RDMA_REMOVE_DESTROY && ret)
+ if (ib_is_destroy_retryable(ret, why, uobject)) {
atomic_inc(&xrcd->usecnt);
- else if (inode)
+ return ret;
+ }
+
+ if (inode)
xrcd_table_delete(dev, inode);
return ret;
}
ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -644,6 +671,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
struct ib_pd *pd;
struct ib_mr *mr;
int ret;
+ struct ib_device *ib_dev;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -663,11 +691,11 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
if (ret)
return ret;
- uobj = uobj_alloc(UVERBS_OBJECT_MR, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_MR, file, &ib_dev);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file);
if (!pd) {
ret = -EINVAL;
goto err_free;
@@ -711,9 +739,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
uobj_put_obj_read(pd);
- uobj_alloc_commit(uobj);
-
- return in_len;
+ return uobj_alloc_commit(uobj, in_len);
err_copy:
ib_dereg_mr(mr);
@@ -727,7 +753,6 @@ err_free:
}
ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -759,8 +784,7 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
(cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
return -EINVAL;
- uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle,
- file->ucontext);
+ uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, file);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
@@ -778,7 +802,8 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
}
if (cmd.flags & IB_MR_REREG_PD) {
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
+ file);
if (!pd) {
ret = -EINVAL;
goto put_uobjs;
@@ -819,29 +844,19 @@ put_uobjs:
}
ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_dereg_mr cmd;
- struct ib_uobject *uobj;
- int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- ret = uobj_remove_commit(uobj);
-
- return ret ?: in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, file,
+ in_len);
}
ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -852,6 +867,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
struct ib_mw *mw;
struct ib_udata udata;
int ret;
+ struct ib_device *ib_dev;
if (out_len < sizeof(resp))
return -ENOSPC;
@@ -859,11 +875,11 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT;
- uobj = uobj_alloc(UVERBS_OBJECT_MW, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_MW, file, &ib_dev);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file);
if (!pd) {
ret = -EINVAL;
goto err_free;
@@ -897,9 +913,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
}
uobj_put_obj_read(pd);
- uobj_alloc_commit(uobj);
-
- return in_len;
+ return uobj_alloc_commit(uobj, in_len);
err_copy:
uverbs_dealloc_mw(mw);
@@ -911,28 +925,19 @@ err_free:
}
ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_dealloc_mw cmd;
- struct ib_uobject *uobj;
- int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_MW, cmd.mw_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- ret = uobj_remove_commit(uobj);
- return ret ?: in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, file,
+ in_len);
}
ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -940,6 +945,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
struct ib_uverbs_create_comp_channel_resp resp;
struct ib_uobject *uobj;
struct ib_uverbs_completion_event_file *ev_file;
+ struct ib_device *ib_dev;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -947,14 +953,14 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, file, &ib_dev);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
resp.fd = uobj->id;
ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
- uobj_file.uobj);
+ uobj);
ib_uverbs_init_event_queue(&ev_file->ev_queue);
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
@@ -962,12 +968,10 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
return -EFAULT;
}
- uobj_alloc_commit(uobj);
- return in_len;
+ return uobj_alloc_commit(uobj, in_len);
}
static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw,
struct ib_uverbs_ex_create_cq *cmd,
@@ -985,21 +989,23 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
int ret;
struct ib_uverbs_ex_create_cq_resp resp;
struct ib_cq_init_attr attr = {};
-
- if (!ib_dev->create_cq)
- return ERR_PTR(-EOPNOTSUPP);
+ struct ib_device *ib_dev;
if (cmd->comp_vector >= file->device->num_comp_vectors)
return ERR_PTR(-EINVAL);
- obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ,
- file->ucontext);
+ obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, file,
+ &ib_dev);
if (IS_ERR(obj))
return obj;
+ if (!ib_dev->create_cq) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
if (cmd->comp_channel >= 0) {
- ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel,
- file->ucontext);
+ ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, file);
if (IS_ERR(ev_file)) {
ret = PTR_ERR(ev_file);
goto err;
@@ -1007,7 +1013,6 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
}
obj->uobject.user_handle = cmd->user_handle;
- obj->uverbs_file = file;
obj->comp_events_reported = 0;
obj->async_events_reported = 0;
INIT_LIST_HEAD(&obj->comp_list);
@@ -1019,7 +1024,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
attr.flags = cmd->flags;
- cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw);
+ cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context, uhw);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
goto err_file;
@@ -1047,7 +1052,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
if (ret)
goto err_cb;
- uobj_alloc_commit(&obj->uobject);
+ ret = uobj_alloc_commit(&obj->uobject, 0);
+ if (ret)
+ return ERR_PTR(ret);
return obj;
err_cb:
@@ -1075,7 +1082,6 @@ static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1106,7 +1112,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
cmd_ex.comp_vector = cmd.comp_vector;
cmd_ex.comp_channel = cmd.comp_channel;
- obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
+ obj = create_cq(file, &ucore, &uhw, &cmd_ex,
offsetof(typeof(cmd_ex), comp_channel) +
sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
NULL);
@@ -1129,7 +1135,6 @@ static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
}
int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -1155,7 +1160,7 @@ int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
sizeof(resp.response_length)))
return -ENOSPC;
- obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
+ obj = create_cq(file, ucore, uhw, &cmd,
min(ucore->inlen, sizeof(cmd)),
ib_uverbs_ex_create_cq_cb, NULL);
@@ -1163,7 +1168,6 @@ int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1181,7 +1185,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
- cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file);
if (!cq)
return -EINVAL;
@@ -1231,7 +1235,6 @@ static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
}
ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1246,7 +1249,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file);
if (!cq)
return -EINVAL;
@@ -1262,7 +1265,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
if (!ret)
break;
- ret = copy_wc_to_user(ib_dev, data_ptr, &wc);
+ ret = copy_wc_to_user(cq->device, data_ptr, &wc);
if (ret)
goto out_put;
@@ -1283,7 +1286,6 @@ out_put:
}
ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1293,7 +1295,7 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file);
if (!cq)
return -EINVAL;
@@ -1306,45 +1308,28 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_destroy_cq cmd;
struct ib_uverbs_destroy_cq_resp resp;
struct ib_uobject *uobj;
- struct ib_cq *cq;
struct ib_ucq_object *obj;
- int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_CQ, cmd.cq_handle,
- file->ucontext);
+ uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, file);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- /*
- * Make sure we don't free the memory in remove_commit as we still
- * needs the uobject memory to create the response.
- */
- uverbs_uobject_get(uobj);
- cq = uobj->object;
- obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
-
+ obj = container_of(uobj, struct ib_ucq_object, uobject);
memset(&resp, 0, sizeof(resp));
-
- ret = uobj_remove_commit(uobj);
- if (ret) {
- uverbs_uobject_put(uobj);
- return ret;
- }
-
resp.comp_events_reported = obj->comp_events_reported;
resp.async_events_reported = obj->async_events_reported;
- uverbs_uobject_put(uobj);
+ uobj_put_destroy(uobj);
+
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
@@ -1375,12 +1360,13 @@ static int create_qp(struct ib_uverbs_file *file,
int ret;
struct ib_rwq_ind_table *ind_tbl = NULL;
bool has_sq = true;
+ struct ib_device *ib_dev;
if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
return -EPERM;
- obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP,
- file->ucontext);
+ obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, file,
+ &ib_dev);
if (IS_ERR(obj))
return PTR_ERR(obj);
obj->uxrcd = NULL;
@@ -1390,9 +1376,9 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
sizeof(cmd->rwq_ind_tbl_handle) &&
(cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
- ind_tbl = uobj_get_obj_read(rwq_ind_table, UVERBS_OBJECT_RWQ_IND_TBL,
- cmd->rwq_ind_tbl_handle,
- file->ucontext);
+ ind_tbl = uobj_get_obj_read(rwq_ind_table,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ cmd->rwq_ind_tbl_handle, file);
if (!ind_tbl) {
ret = -EINVAL;
goto err_put;
@@ -1418,7 +1404,7 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd->qp_type == IB_QPT_XRC_TGT) {
xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
- file->ucontext);
+ file);
if (IS_ERR(xrcd_uobj)) {
ret = -EINVAL;
@@ -1437,8 +1423,8 @@ static int create_qp(struct ib_uverbs_file *file,
cmd->max_recv_sge = 0;
} else {
if (cmd->is_srq) {
- srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd->srq_handle,
- file->ucontext);
+ srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
+ cmd->srq_handle, file);
if (!srq || srq->srq_type == IB_SRQT_XRC) {
ret = -EINVAL;
goto err_put;
@@ -1447,8 +1433,9 @@ static int create_qp(struct ib_uverbs_file *file,
if (!ind_tbl) {
if (cmd->recv_cq_handle != cmd->send_cq_handle) {
- rcq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->recv_cq_handle,
- file->ucontext);
+ rcq = uobj_get_obj_read(
+ cq, UVERBS_OBJECT_CQ,
+ cmd->recv_cq_handle, file);
if (!rcq) {
ret = -EINVAL;
goto err_put;
@@ -1458,11 +1445,12 @@ static int create_qp(struct ib_uverbs_file *file,
}
if (has_sq)
- scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->send_cq_handle,
- file->ucontext);
+ scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
+ cmd->send_cq_handle, file);
if (!ind_tbl)
rcq = rcq ?: scq;
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file->ucontext);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
+ file);
if (!pd || (!scq && has_sq)) {
ret = -EINVAL;
goto err_put;
@@ -1602,9 +1590,7 @@ static int create_qp(struct ib_uverbs_file *file,
if (ind_tbl)
uobj_put_obj_read(ind_tbl);
- uobj_alloc_commit(&obj->uevent.uobject);
-
- return 0;
+ return uobj_alloc_commit(&obj->uevent.uobject, 0);
err_cb:
ib_destroy_qp(qp);
@@ -1637,7 +1623,6 @@ static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1698,7 +1683,6 @@ static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
}
int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -1735,7 +1719,6 @@ int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len, int out_len)
{
struct ib_uverbs_open_qp cmd;
@@ -1747,6 +1730,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
struct ib_qp *qp;
struct ib_qp_open_attr attr;
int ret;
+ struct ib_device *ib_dev;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -1759,13 +1743,12 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
- obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP,
- file->ucontext);
+ obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, file,
+ &ib_dev);
if (IS_ERR(obj))
return PTR_ERR(obj);
- xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle,
- file->ucontext);
+ xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, file);
if (IS_ERR(xrcd_uobj)) {
ret = -EINVAL;
goto err_put;
@@ -1809,10 +1792,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
qp->uobject = &obj->uevent.uobject;
uobj_put_read(xrcd_uobj);
-
- uobj_alloc_commit(&obj->uevent.uobject);
-
- return in_len;
+ return uobj_alloc_commit(&obj->uevent.uobject, in_len);
err_destroy:
ib_destroy_qp(qp);
@@ -1846,7 +1826,6 @@ static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
}
ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1867,7 +1846,7 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
goto out;
}
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (!qp) {
ret = -EINVAL;
goto out;
@@ -1968,11 +1947,11 @@ static int modify_qp(struct ib_uverbs_file *file,
struct ib_qp *qp;
int ret;
- attr = kmalloc(sizeof *attr, GFP_KERNEL);
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr)
return -ENOMEM;
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, file);
if (!qp) {
ret = -EINVAL;
goto out;
@@ -1984,15 +1963,64 @@ static int modify_qp(struct ib_uverbs_file *file,
goto release_qp;
}
- if ((cmd->base.attr_mask & IB_QP_AV) &&
- !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
- ret = -EINVAL;
- goto release_qp;
+ if ((cmd->base.attr_mask & IB_QP_AV)) {
+ if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+
+ if (cmd->base.attr_mask & IB_QP_STATE &&
+ cmd->base.qp_state == IB_QPS_RTR) {
+ /* We are in INIT->RTR TRANSITION (if we are not,
+ * this transition will be rejected in subsequent checks).
+ * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
+ * but the IB_QP_STATE flag is required.
+ *
+ * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
+ * when IB_QP_AV is set, has required inclusion of a valid
+ * port number in the primary AV. (AVs are created and handled
+ * differently for infiniband and ethernet (RoCE) ports).
+ *
+ * Check the port number included in the primary AV against
+ * the port number in the qp struct, which was set (and saved)
+ * in the RST->INIT transition.
+ */
+ if (cmd->base.dest.port_num != qp->real_qp->port) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+ } else {
+ /* We are in SQD->SQD. (If we are not, this transition will
+ * be rejected later in the verbs layer checks).
+ * Check for both IB_QP_PORT and IB_QP_AV, these can be set
+ * together in the SQD->SQD transition.
+ *
+ * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
+ * verbs layer driver does not track primary port changes
+ * resulting from path migration. Thus, in SQD, if the primary
+ * AV is modified, the primary port should also be modified).
+ *
+ * Note that in this transition, the IB_QP_STATE flag
+ * is not allowed.
+ */
+ if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
+ == (IB_QP_AV | IB_QP_PORT)) &&
+ cmd->base.port_num != cmd->base.dest.port_num) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+ if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
+ == IB_QP_AV) {
+ cmd->base.attr_mask |= IB_QP_PORT;
+ cmd->base.port_num = cmd->base.dest.port_num;
+ }
+ }
}
if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
(!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
- !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
+ !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
+ cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
ret = -EINVAL;
goto release_qp;
}
@@ -2049,7 +2077,6 @@ out:
}
ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2076,7 +2103,6 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
}
int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -2112,7 +2138,6 @@ int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2120,33 +2145,19 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
struct ib_uverbs_destroy_qp_resp resp;
struct ib_uobject *uobj;
struct ib_uqp_object *obj;
- int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- memset(&resp, 0, sizeof resp);
-
- uobj = uobj_get_write(UVERBS_OBJECT_QP, cmd.qp_handle,
- file->ucontext);
+ uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
- /*
- * Make sure we don't free the memory in remove_commit as we still
- * needs the uobject memory to create the response.
- */
- uverbs_uobject_get(uobj);
-
- ret = uobj_remove_commit(uobj);
- if (ret) {
- uverbs_uobject_put(uobj);
- return ret;
- }
-
+ memset(&resp, 0, sizeof(resp));
resp.events_reported = obj->uevent.events_reported;
- uverbs_uobject_put(uobj);
+
+ uobj_put_destroy(uobj);
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
@@ -2165,14 +2176,14 @@ static void *alloc_wr(size_t wr_size, __u32 num_sge)
}
ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_post_send cmd;
struct ib_uverbs_post_send_resp resp;
struct ib_uverbs_send_wr *user_wr;
- struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
+ struct ib_send_wr *wr = NULL, *last, *next;
+ const struct ib_send_wr *bad_wr;
struct ib_qp *qp;
int i, sg_ind;
int is_ud;
@@ -2193,7 +2204,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
if (!user_wr)
return -ENOMEM;
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (!qp)
goto out;
@@ -2229,8 +2240,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
goto out_put;
}
- ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH, user_wr->wr.ud.ah,
- file->ucontext);
+ ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
+ user_wr->wr.ud.ah, file);
if (!ud->ah) {
kfree(ud);
ret = -EINVAL;
@@ -2445,13 +2456,13 @@ err:
}
ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_post_recv cmd;
struct ib_uverbs_post_recv_resp resp;
- struct ib_recv_wr *wr, *next, *bad_wr;
+ struct ib_recv_wr *wr, *next;
+ const struct ib_recv_wr *bad_wr;
struct ib_qp *qp;
ssize_t ret = -EINVAL;
@@ -2464,7 +2475,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
if (IS_ERR(wr))
return PTR_ERR(wr);
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (!qp)
goto out;
@@ -2494,13 +2505,13 @@ out:
}
ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_post_srq_recv cmd;
struct ib_uverbs_post_srq_recv_resp resp;
- struct ib_recv_wr *wr, *next, *bad_wr;
+ struct ib_recv_wr *wr, *next;
+ const struct ib_recv_wr *bad_wr;
struct ib_srq *srq;
ssize_t ret = -EINVAL;
@@ -2513,12 +2524,13 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
if (IS_ERR(wr))
return PTR_ERR(wr);
- srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext);
+ srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file);
if (!srq)
goto out;
resp.bad_wr = 0;
- ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
+ ret = srq->device->post_srq_recv ?
+ srq->device->post_srq_recv(srq, wr, &bad_wr) : -EOPNOTSUPP;
uobj_put_obj_read(srq);
@@ -2543,7 +2555,6 @@ out:
}
ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2552,9 +2563,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
struct ib_pd *pd;
struct ib_ah *ah;
- struct rdma_ah_attr attr;
+ struct rdma_ah_attr attr = {};
int ret;
struct ib_udata udata;
+ struct ib_device *ib_dev;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -2562,19 +2574,21 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
- return -EINVAL;
-
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
- uobj = uobj_alloc(UVERBS_OBJECT_AH, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_AH, file, &ib_dev);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
+ if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file);
if (!pd) {
ret = -EINVAL;
goto err;
@@ -2616,9 +2630,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
}
uobj_put_obj_read(pd);
- uobj_alloc_commit(uobj);
-
- return in_len;
+ return uobj_alloc_commit(uobj, in_len);
err_copy:
rdma_destroy_ah(ah);
@@ -2632,27 +2644,18 @@ err:
}
ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len, int out_len)
{
struct ib_uverbs_destroy_ah cmd;
- struct ib_uobject *uobj;
- int ret;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_AH, cmd.ah_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- ret = uobj_remove_commit(uobj);
- return ret ?: in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, file,
+ in_len);
}
ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2665,7 +2668,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (!qp)
return -EINVAL;
@@ -2702,7 +2705,6 @@ out_put:
}
ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2716,7 +2718,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (!qp)
return -EINVAL;
@@ -2761,29 +2763,27 @@ static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
resources = kzalloc(sizeof(*resources), GFP_KERNEL);
if (!resources)
- goto err_res;
+ return NULL;
+
+ if (!num_specs)
+ goto out;
resources->counters =
kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
-
- if (!resources->counters)
- goto err_cnt;
-
resources->collection =
kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
- if (!resources->collection)
- goto err_collection;
+ if (!resources->counters || !resources->collection)
+ goto err;
+out:
resources->max = num_specs;
-
return resources;
-err_collection:
+err:
kfree(resources->counters);
-err_cnt:
kfree(resources);
-err_res:
+
return NULL;
}
@@ -2791,6 +2791,9 @@ void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
{
unsigned int i;
+ if (!uflow_res)
+ return;
+
for (i = 0; i < uflow_res->collection_num; i++)
atomic_dec(&uflow_res->collection[i]->usecnt);
@@ -2826,7 +2829,7 @@ static void flow_resources_add(struct ib_uflow_resources *uflow_res,
uflow_res->num++;
}
-static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
+static int kern_spec_to_ib_spec_action(struct ib_uverbs_file *ufile,
struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec,
struct ib_uflow_resources *uflow_res)
@@ -2855,7 +2858,7 @@ static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
ib_spec->action.act = uobj_get_obj_read(flow_action,
UVERBS_OBJECT_FLOW_ACTION,
kern_spec->action.handle,
- ucontext);
+ ufile);
if (!ib_spec->action.act)
return -EINVAL;
ib_spec->action.size =
@@ -2873,7 +2876,7 @@ static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
uobj_get_obj_read(counters,
UVERBS_OBJECT_COUNTERS,
kern_spec->flow_count.handle,
- ucontext);
+ ufile);
if (!ib_spec->flow_count.counters)
return -EINVAL;
ib_spec->flow_count.size =
@@ -3042,9 +3045,6 @@ static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
void *kern_spec_mask;
void *kern_spec_val;
- if (kern_spec->reserved)
- return -EINVAL;
-
kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
kern_spec_val = (void *)kern_spec +
@@ -3057,7 +3057,7 @@ static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
kern_filter_sz, ib_spec);
}
-static int kern_spec_to_ib_spec(struct ib_ucontext *ucontext,
+static int kern_spec_to_ib_spec(struct ib_uverbs_file *ufile,
struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec,
struct ib_uflow_resources *uflow_res)
@@ -3066,14 +3066,13 @@ static int kern_spec_to_ib_spec(struct ib_ucontext *ucontext,
return -EINVAL;
if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
- return kern_spec_to_ib_spec_action(ucontext, kern_spec, ib_spec,
+ return kern_spec_to_ib_spec_action(ufile, kern_spec, ib_spec,
uflow_res);
else
return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
}
int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -3087,6 +3086,7 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
struct ib_wq_init_attr wq_init_attr = {};
size_t required_cmd_sz;
size_t required_resp_len;
+ struct ib_device *ib_dev;
required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
@@ -3109,18 +3109,18 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
if (cmd.comp_mask)
return -EOPNOTSUPP;
- obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ,
- file->ucontext);
+ obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, file,
+ &ib_dev);
if (IS_ERR(obj))
return PTR_ERR(obj);
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file);
if (!pd) {
err = -EINVAL;
goto err_uobj;
}
- cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file);
if (!cq) {
err = -EINVAL;
goto err_put_pd;
@@ -3174,8 +3174,7 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
uobj_put_obj_read(pd);
uobj_put_obj_read(cq);
- uobj_alloc_commit(&obj->uevent.uobject);
- return 0;
+ return uobj_alloc_commit(&obj->uevent.uobject, 0);
err_copy:
ib_destroy_wq(wq);
@@ -3190,7 +3189,6 @@ err_uobj:
}
int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -3224,29 +3222,19 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
return -EOPNOTSUPP;
resp.response_length = required_resp_len;
- uobj = uobj_get_write(UVERBS_OBJECT_WQ, cmd.wq_handle,
- file->ucontext);
+ uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, file);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
- /*
- * Make sure we don't free the memory in remove_commit as we still
- * needs the uobject memory to create the response.
- */
- uverbs_uobject_get(uobj);
-
- ret = uobj_remove_commit(uobj);
resp.events_reported = obj->uevent.events_reported;
- uverbs_uobject_put(uobj);
- if (ret)
- return ret;
+
+ uobj_put_destroy(uobj);
return ib_copy_to_udata(ucore, &resp, resp.response_length);
}
int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -3275,7 +3263,7 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
return -EINVAL;
- wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, file->ucontext);
+ wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, file);
if (!wq)
return -EINVAL;
@@ -3296,7 +3284,6 @@ out:
}
int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -3314,6 +3301,7 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
u32 expected_in_size;
size_t required_cmd_sz_header;
size_t required_resp_len;
+ struct ib_device *ib_dev;
required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size);
required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num);
@@ -3369,8 +3357,8 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
num_read_wqs++) {
- wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, wqs_handles[num_read_wqs],
- file->ucontext);
+ wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
+ wqs_handles[num_read_wqs], file);
if (!wq) {
err = -EINVAL;
goto put_wqs;
@@ -3379,7 +3367,7 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
wqs[num_read_wqs] = wq;
}
- uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, file, &ib_dev);
if (IS_ERR(uobj)) {
err = PTR_ERR(uobj);
goto put_wqs;
@@ -3423,8 +3411,7 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
for (j = 0; j < num_read_wqs; j++)
uobj_put_obj_read(wqs[j]);
- uobj_alloc_commit(uobj);
- return 0;
+ return uobj_alloc_commit(uobj, 0);
err_copy:
ib_destroy_rwq_ind_table(rwq_ind_tbl);
@@ -3440,12 +3427,10 @@ err_free:
}
int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
- struct ib_uobject *uobj;
int ret;
size_t required_cmd_sz;
@@ -3466,16 +3451,11 @@ int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
if (cmd.comp_mask)
return -EOPNOTSUPP;
- uobj = uobj_get_write(UVERBS_OBJECT_RWQ_IND_TBL, cmd.ind_tbl_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- return uobj_remove_commit(uobj);
+ return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL,
+ cmd.ind_tbl_handle, file, 0);
}
int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -3488,10 +3468,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
struct ib_flow_attr *flow_attr;
struct ib_qp *qp;
struct ib_uflow_resources *uflow_res;
+ struct ib_uverbs_flow_spec_hdr *kern_spec;
int err = 0;
- void *kern_spec;
void *ib_spec;
int i;
+ struct ib_device *ib_dev;
if (ucore->inlen < sizeof(cmd))
return -EINVAL;
@@ -3538,8 +3519,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
if (!kern_flow_attr)
return -ENOMEM;
- memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
- err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
+ *kern_flow_attr = cmd.flow_attr;
+ err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore,
cmd.flow_attr.size);
if (err)
goto err_free_attr;
@@ -3547,18 +3528,28 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
kern_flow_attr = &cmd.flow_attr;
}
- uobj = uobj_alloc(UVERBS_OBJECT_FLOW, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_FLOW, file, &ib_dev);
if (IS_ERR(uobj)) {
err = PTR_ERR(uobj);
goto err_free_attr;
}
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (!qp) {
err = -EINVAL;
goto err_uobj;
}
+ if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ if (!qp->device->create_flow) {
+ err = -EOPNOTSUPP;
+ goto err_put;
+ }
+
flow_attr = kzalloc(struct_size(flow_attr, flows,
cmd.flow_attr.num_of_specs), GFP_KERNEL);
if (!flow_attr) {
@@ -3578,21 +3569,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
flow_attr->flags = kern_flow_attr->flags;
flow_attr->size = sizeof(*flow_attr);
- kern_spec = kern_flow_attr + 1;
+ kern_spec = kern_flow_attr->flow_specs;
ib_spec = flow_attr + 1;
for (i = 0; i < flow_attr->num_of_specs &&
- cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
- cmd.flow_attr.size >=
- ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
- err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec,
- uflow_res);
+ cmd.flow_attr.size >= sizeof(*kern_spec) &&
+ cmd.flow_attr.size >= kern_spec->size;
+ i++) {
+ err = kern_spec_to_ib_spec(
+ file, (struct ib_uverbs_flow_spec *)kern_spec,
+ ib_spec, uflow_res);
if (err)
goto err_free;
flow_attr->size +=
((union ib_flow_spec *) ib_spec)->size;
- cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
- kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
+ cmd.flow_attr.size -= kern_spec->size;
+ kern_spec = ((void *)kern_spec) + kern_spec->size;
ib_spec += ((union ib_flow_spec *) ib_spec)->size;
}
if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
@@ -3611,6 +3603,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
}
atomic_inc(&qp->usecnt);
flow_id->qp = qp;
+ flow_id->device = qp->device;
flow_id->uobject = uobj;
uobj->object = flow_id;
uflow = container_of(uobj, typeof(*uflow), uobject);
@@ -3625,13 +3618,13 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
goto err_copy;
uobj_put_obj_read(qp);
- uobj_alloc_commit(uobj);
kfree(flow_attr);
if (cmd.flow_attr.num_of_specs)
kfree(kern_flow_attr);
- return 0;
+ return uobj_alloc_commit(uobj, 0);
err_copy:
- ib_destroy_flow(flow_id);
+ if (!qp->device->destroy_flow(flow_id))
+ atomic_dec(&qp->usecnt);
err_free:
ib_uverbs_flow_resources_free(uflow_res);
err_free_flow_attr:
@@ -3647,12 +3640,10 @@ err_free_attr:
}
int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
struct ib_uverbs_destroy_flow cmd;
- struct ib_uobject *uobj;
int ret;
if (ucore->inlen < sizeof(cmd))
@@ -3665,17 +3656,11 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
if (cmd.comp_mask)
return -EINVAL;
- uobj = uobj_get_write(UVERBS_OBJECT_FLOW, cmd.flow_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- ret = uobj_remove_commit(uobj);
- return ret;
+ return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, file,
+ 0);
}
static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_uverbs_create_xsrq *cmd,
struct ib_udata *udata)
{
@@ -3686,9 +3671,10 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
struct ib_uobject *uninitialized_var(xrcd_uobj);
struct ib_srq_init_attr attr;
int ret;
+ struct ib_device *ib_dev;
- obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ,
- file->ucontext);
+ obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, file,
+ &ib_dev);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -3697,7 +3683,7 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
if (cmd->srq_type == IB_SRQT_XRC) {
xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
- file->ucontext);
+ file);
if (IS_ERR(xrcd_uobj)) {
ret = -EINVAL;
goto err;
@@ -3714,15 +3700,15 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
}
if (ib_srq_has_cq(cmd->srq_type)) {
- attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->cq_handle,
- file->ucontext);
+ attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
+ cmd->cq_handle, file);
if (!attr.ext.cq) {
ret = -EINVAL;
goto err_put_xrcd;
}
}
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file->ucontext);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file);
if (!pd) {
ret = -EINVAL;
goto err_put_cq;
@@ -3787,9 +3773,7 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
uobj_put_obj_read(attr.ext.cq);
uobj_put_obj_read(pd);
- uobj_alloc_commit(&obj->uevent.uobject);
-
- return 0;
+ return uobj_alloc_commit(&obj->uevent.uobject, 0);
err_copy:
ib_destroy_srq(srq);
@@ -3813,7 +3797,6 @@ err:
}
ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -3843,7 +3826,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
- ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
+ ret = __uverbs_create_xsrq(file, &xcmd, &udata);
if (ret)
return ret;
@@ -3851,7 +3834,6 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len, int out_len)
{
struct ib_uverbs_create_xsrq cmd;
@@ -3870,7 +3852,7 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
- ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
+ ret = __uverbs_create_xsrq(file, &cmd, &udata);
if (ret)
return ret;
@@ -3878,7 +3860,6 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -3894,7 +3875,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
- srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext);
+ srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file);
if (!srq)
return -EINVAL;
@@ -3909,7 +3890,6 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
@@ -3925,7 +3905,7 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext);
+ srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file);
if (!srq)
return -EINVAL;
@@ -3949,7 +3929,6 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -3957,32 +3936,20 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
struct ib_uverbs_destroy_srq_resp resp;
struct ib_uobject *uobj;
struct ib_uevent_object *obj;
- int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_SRQ, cmd.srq_handle,
- file->ucontext);
+ uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, file);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
obj = container_of(uobj, struct ib_uevent_object, uobject);
- /*
- * Make sure we don't free the memory in remove_commit as we still
- * needs the uobject memory to create the response.
- */
- uverbs_uobject_get(uobj);
-
memset(&resp, 0, sizeof(resp));
-
- ret = uobj_remove_commit(uobj);
- if (ret) {
- uverbs_uobject_put(uobj);
- return ret;
- }
resp.events_reported = obj->events_reported;
- uverbs_uobject_put(uobj);
+
+ uobj_put_destroy(uobj);
+
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
return -EFAULT;
@@ -3990,15 +3957,21 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
}
int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
struct ib_uverbs_ex_query_device_resp resp = { {0} };
struct ib_uverbs_ex_query_device cmd;
struct ib_device_attr attr = {0};
+ struct ib_ucontext *ucontext;
+ struct ib_device *ib_dev;
int err;
+ ucontext = ib_uverbs_get_ucontext(file);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
+
if (!ib_dev->query_device)
return -EOPNOTSUPP;
@@ -4024,7 +3997,7 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
if (err)
return err;
- copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
+ copy_query_dev_fields(ucontext, &resp.base, &attr);
if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
goto end;
@@ -4111,7 +4084,6 @@ end:
}
int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -4141,7 +4113,7 @@ int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file,
if (cmd.attr_mask > IB_CQ_MODERATE)
return -EOPNOTSUPP;
- cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file);
if (!cq)
return -EINVAL;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 8d32c4ae368c..1a6b229e3db3 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -35,6 +35,103 @@
#include "rdma_core.h"
#include "uverbs.h"
+struct bundle_alloc_head {
+ struct bundle_alloc_head *next;
+ u8 data[];
+};
+
+struct bundle_priv {
+ /* Must be first */
+ struct bundle_alloc_head alloc_head;
+ struct bundle_alloc_head *allocated_mem;
+ size_t internal_avail;
+ size_t internal_used;
+
+ struct radix_tree_root *radix;
+ const struct uverbs_api_ioctl_method *method_elm;
+ void __rcu **radix_slots;
+ unsigned long radix_slots_len;
+ u32 method_key;
+
+ struct ib_uverbs_attr __user *user_attrs;
+ struct ib_uverbs_attr *uattrs;
+
+ DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN);
+
+ /*
+ * Must be last. bundle ends in a flex array which overlaps
+ * internal_buffer.
+ */
+ struct uverbs_attr_bundle bundle;
+ u64 internal_buffer[32];
+};
+
+/*
+ * Each method has an absolute minimum amount of memory it needs to allocate,
+ * precompute that amount and determine if the onstack memory can be used or
+ * if allocation is need.
+ */
+void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
+ unsigned int num_attrs)
+{
+ struct bundle_priv *pbundle;
+ size_t bundle_size =
+ offsetof(struct bundle_priv, internal_buffer) +
+ sizeof(*pbundle->bundle.attrs) * method_elm->key_bitmap_len +
+ sizeof(*pbundle->uattrs) * num_attrs;
+
+ method_elm->use_stack = bundle_size <= sizeof(*pbundle);
+ method_elm->bundle_size =
+ ALIGN(bundle_size + 256, sizeof(*pbundle->internal_buffer));
+
+ /* Do not want order-2 allocations for this. */
+ WARN_ON_ONCE(method_elm->bundle_size > PAGE_SIZE);
+}
+
+/**
+ * uverbs_alloc() - Quickly allocate memory for use with a bundle
+ * @bundle: The bundle
+ * @size: Number of bytes to allocate
+ * @flags: Allocator flags
+ *
+ * The bundle allocator is intended for allocations that are connected with
+ * processing the system call related to the bundle. The allocated memory is
+ * always freed once the system call completes, and cannot be freed any other
+ * way.
+ *
+ * This tries to use a small pool of pre-allocated memory for performance.
+ */
+__malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
+ gfp_t flags)
+{
+ struct bundle_priv *pbundle =
+ container_of(bundle, struct bundle_priv, bundle);
+ size_t new_used;
+ void *res;
+
+ if (check_add_overflow(size, pbundle->internal_used, &new_used))
+ return ERR_PTR(-EOVERFLOW);
+
+ if (new_used > pbundle->internal_avail) {
+ struct bundle_alloc_head *buf;
+
+ buf = kvmalloc(struct_size(buf, data, size), flags);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+ buf->next = pbundle->allocated_mem;
+ pbundle->allocated_mem = buf;
+ return buf->data;
+ }
+
+ res = (void *)pbundle->internal_buffer + pbundle->internal_used;
+ pbundle->internal_used =
+ ALIGN(new_used, sizeof(*pbundle->internal_buffer));
+ if (flags & __GFP_ZERO)
+ memset(res, 0, size);
+ return res;
+}
+EXPORT_SYMBOL(_uverbs_alloc);
+
static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
u16 len)
{
@@ -46,45 +143,24 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
0, uattr->len - len);
}
-static int uverbs_process_attr(struct ib_device *ibdev,
- struct ib_ucontext *ucontext,
- const struct ib_uverbs_attr *uattr,
- u16 attr_id,
- const struct uverbs_attr_spec_hash *attr_spec_bucket,
- struct uverbs_attr_bundle_hash *attr_bundle_h,
- struct ib_uverbs_attr __user *uattr_ptr)
+static int uverbs_process_attr(struct bundle_priv *pbundle,
+ const struct uverbs_api_attr *attr_uapi,
+ struct ib_uverbs_attr *uattr, u32 attr_bkey)
{
- const struct uverbs_attr_spec *spec;
- const struct uverbs_attr_spec *val_spec;
- struct uverbs_attr *e;
- const struct uverbs_object_spec *object;
+ const struct uverbs_attr_spec *spec = &attr_uapi->spec;
+ struct uverbs_attr *e = &pbundle->bundle.attrs[attr_bkey];
+ const struct uverbs_attr_spec *val_spec = spec;
struct uverbs_obj_attr *o_attr;
- struct uverbs_attr *elements = attr_bundle_h->attrs;
-
- if (attr_id >= attr_spec_bucket->num_attrs) {
- if (uattr->flags & UVERBS_ATTR_F_MANDATORY)
- return -EINVAL;
- else
- return 0;
- }
-
- if (test_bit(attr_id, attr_bundle_h->valid_bitmap))
- return -EINVAL;
-
- spec = &attr_spec_bucket->attrs[attr_id];
- val_spec = spec;
- e = &elements[attr_id];
- e->uattr = uattr_ptr;
switch (spec->type) {
case UVERBS_ATTR_TYPE_ENUM_IN:
- if (uattr->attr_data.enum_data.elem_id >= spec->enum_def.num_elems)
+ if (uattr->attr_data.enum_data.elem_id >= spec->u.enum_def.num_elems)
return -EOPNOTSUPP;
if (uattr->attr_data.enum_data.reserved)
return -EINVAL;
- val_spec = &spec->enum_def.ids[uattr->attr_data.enum_data.elem_id];
+ val_spec = &spec->u2.enum_def.ids[uattr->attr_data.enum_data.elem_id];
/* Currently we only support PTR_IN based enums */
if (val_spec->type != UVERBS_ATTR_TYPE_PTR_IN)
@@ -98,64 +174,75 @@ static int uverbs_process_attr(struct ib_device *ibdev,
* longer struct will fail here if used with an old kernel and
* non-zero content, making ABI compat/discovery simpler.
*/
- if (uattr->len > val_spec->ptr.len &&
- val_spec->flags & UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO &&
- !uverbs_is_attr_cleared(uattr, val_spec->ptr.len))
+ if (uattr->len > val_spec->u.ptr.len &&
+ val_spec->zero_trailing &&
+ !uverbs_is_attr_cleared(uattr, val_spec->u.ptr.len))
return -EOPNOTSUPP;
/* fall through */
case UVERBS_ATTR_TYPE_PTR_OUT:
- if (uattr->len < val_spec->ptr.min_len ||
- (!(val_spec->flags & UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO) &&
- uattr->len > val_spec->ptr.len))
+ if (uattr->len < val_spec->u.ptr.min_len ||
+ (!val_spec->zero_trailing &&
+ uattr->len > val_spec->u.ptr.len))
return -EINVAL;
if (spec->type != UVERBS_ATTR_TYPE_ENUM_IN &&
uattr->attr_data.reserved)
return -EINVAL;
- e->ptr_attr.data = uattr->data;
+ e->ptr_attr.uattr_idx = uattr - pbundle->uattrs;
e->ptr_attr.len = uattr->len;
- e->ptr_attr.flags = uattr->flags;
+
+ if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) {
+ void *p;
+
+ p = uverbs_alloc(&pbundle->bundle, uattr->len);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ e->ptr_attr.ptr = p;
+
+ if (copy_from_user(p, u64_to_user_ptr(uattr->data),
+ uattr->len))
+ return -EFAULT;
+ } else {
+ e->ptr_attr.data = uattr->data;
+ }
break;
case UVERBS_ATTR_TYPE_IDR:
- if (uattr->data >> 32)
- return -EINVAL;
- /* fall through */
case UVERBS_ATTR_TYPE_FD:
if (uattr->attr_data.reserved)
return -EINVAL;
- if (uattr->len != 0 || !ucontext || uattr->data > INT_MAX)
+ if (uattr->len != 0)
return -EINVAL;
o_attr = &e->obj_attr;
- object = uverbs_get_object(ibdev, spec->obj.obj_type);
- if (!object)
- return -EINVAL;
- o_attr->type = object->type_attrs;
-
- o_attr->id = (int)uattr->data;
- o_attr->uobject = uverbs_get_uobject_from_context(
- o_attr->type,
- ucontext,
- spec->obj.access,
- o_attr->id);
+ o_attr->attr_elm = attr_uapi;
+ /*
+ * The type of uattr->data is u64 for UVERBS_ATTR_TYPE_IDR and
+ * s64 for UVERBS_ATTR_TYPE_FD. We can cast the u64 to s64
+ * here without caring about truncation as we know that the
+ * IDR implementation today rejects negative IDs
+ */
+ o_attr->uobject = uverbs_get_uobject_from_file(
+ spec->u.obj.obj_type,
+ pbundle->bundle.ufile,
+ spec->u.obj.access,
+ uattr->data_s64);
if (IS_ERR(o_attr->uobject))
return PTR_ERR(o_attr->uobject);
+ __set_bit(attr_bkey, pbundle->uobj_finalize);
- if (spec->obj.access == UVERBS_ACCESS_NEW) {
- u64 id = o_attr->uobject->id;
+ if (spec->u.obj.access == UVERBS_ACCESS_NEW) {
+ unsigned int uattr_idx = uattr - pbundle->uattrs;
+ s64 id = o_attr->uobject->id;
/* Copy the allocated id to the user-space */
- if (put_user(id, &e->uattr->data)) {
- uverbs_finalize_object(o_attr->uobject,
- UVERBS_ACCESS_NEW,
- false);
+ if (put_user(id, &pbundle->user_attrs[uattr_idx].data))
return -EFAULT;
- }
}
break;
@@ -163,220 +250,225 @@ static int uverbs_process_attr(struct ib_device *ibdev,
return -EOPNOTSUPP;
}
- set_bit(attr_id, attr_bundle_h->valid_bitmap);
return 0;
}
-static int uverbs_uattrs_process(struct ib_device *ibdev,
- struct ib_ucontext *ucontext,
- const struct ib_uverbs_attr *uattrs,
- size_t num_uattrs,
- const struct uverbs_method_spec *method,
- struct uverbs_attr_bundle *attr_bundle,
- struct ib_uverbs_attr __user *uattr_ptr)
+/*
+ * We search the radix tree with the method prefix and now we want to fast
+ * search the suffix bits to get a particular attribute pointer. It is not
+ * totally clear to me if this breaks the radix tree encasulation or not, but
+ * it uses the iter data to determine if the method iter points at the same
+ * chunk that will store the attribute, if so it just derefs it directly. By
+ * construction in most kernel configs the method and attrs will all fit in a
+ * single radix chunk, so in most cases this will have no search. Other cases
+ * this falls back to a full search.
+ */
+static void __rcu **uapi_get_attr_for_method(struct bundle_priv *pbundle,
+ u32 attr_key)
{
- size_t i;
- int ret = 0;
- int num_given_buckets = 0;
-
- for (i = 0; i < num_uattrs; i++) {
- const struct ib_uverbs_attr *uattr = &uattrs[i];
- u16 attr_id = uattr->attr_id;
- struct uverbs_attr_spec_hash *attr_spec_bucket;
-
- ret = uverbs_ns_idx(&attr_id, method->num_buckets);
- if (ret < 0) {
- if (uattr->flags & UVERBS_ATTR_F_MANDATORY) {
- uverbs_finalize_objects(attr_bundle,
- method->attr_buckets,
- num_given_buckets,
- false);
- return ret;
- }
- continue;
- }
+ void __rcu **slot;
- /*
- * ret is the found ns, so increase num_given_buckets if
- * necessary.
- */
- if (ret >= num_given_buckets)
- num_given_buckets = ret + 1;
-
- attr_spec_bucket = method->attr_buckets[ret];
- ret = uverbs_process_attr(ibdev, ucontext, uattr, attr_id,
- attr_spec_bucket, &attr_bundle->hash[ret],
- uattr_ptr++);
- if (ret) {
- uverbs_finalize_objects(attr_bundle,
- method->attr_buckets,
- num_given_buckets,
- false);
- return ret;
- }
+ if (likely(attr_key < pbundle->radix_slots_len)) {
+ void *entry;
+
+ slot = pbundle->radix_slots + attr_key;
+ entry = rcu_dereference_raw(*slot);
+ if (likely(!radix_tree_is_internal_node(entry) && entry))
+ return slot;
}
- return num_given_buckets;
+ return radix_tree_lookup_slot(pbundle->radix,
+ pbundle->method_key | attr_key);
}
-static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *method_spec,
- struct uverbs_attr_bundle *attr_bundle)
+static int uverbs_set_attr(struct bundle_priv *pbundle,
+ struct ib_uverbs_attr *uattr)
{
- unsigned int i;
-
- for (i = 0; i < attr_bundle->num_buckets; i++) {
- struct uverbs_attr_spec_hash *attr_spec_bucket =
- method_spec->attr_buckets[i];
+ u32 attr_key = uapi_key_attr(uattr->attr_id);
+ u32 attr_bkey = uapi_bkey_attr(attr_key);
+ const struct uverbs_api_attr *attr;
+ void __rcu **slot;
+ int ret;
- if (!bitmap_subset(attr_spec_bucket->mandatory_attrs_bitmask,
- attr_bundle->hash[i].valid_bitmap,
- attr_spec_bucket->num_attrs))
- return -EINVAL;
+ slot = uapi_get_attr_for_method(pbundle, attr_key);
+ if (!slot) {
+ /*
+ * Kernel does not support the attribute but user-space says it
+ * is mandatory
+ */
+ if (uattr->flags & UVERBS_ATTR_F_MANDATORY)
+ return -EPROTONOSUPPORT;
+ return 0;
}
+ attr = srcu_dereference(
+ *slot, &pbundle->bundle.ufile->device->disassociate_srcu);
- for (; i < method_spec->num_buckets; i++) {
- struct uverbs_attr_spec_hash *attr_spec_bucket =
- method_spec->attr_buckets[i];
+ /* Reject duplicate attributes from user-space */
+ if (test_bit(attr_bkey, pbundle->bundle.attr_present))
+ return -EINVAL;
- if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask,
- attr_spec_bucket->num_attrs))
- return -EINVAL;
- }
+ ret = uverbs_process_attr(pbundle, attr, uattr, attr_bkey);
+ if (ret)
+ return ret;
+
+ __set_bit(attr_bkey, pbundle->bundle.attr_present);
return 0;
}
-static int uverbs_handle_method(struct ib_uverbs_attr __user *uattr_ptr,
- const struct ib_uverbs_attr *uattrs,
- size_t num_uattrs,
- struct ib_device *ibdev,
- struct ib_uverbs_file *ufile,
- const struct uverbs_method_spec *method_spec,
- struct uverbs_attr_bundle *attr_bundle)
+static int ib_uverbs_run_method(struct bundle_priv *pbundle,
+ unsigned int num_attrs)
{
+ int (*handler)(struct ib_uverbs_file *ufile,
+ struct uverbs_attr_bundle *ctx);
+ size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs);
+ unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey;
+ unsigned int i;
int ret;
- int finalize_ret;
- int num_given_buckets;
- num_given_buckets = uverbs_uattrs_process(ibdev, ufile->ucontext, uattrs,
- num_uattrs, method_spec,
- attr_bundle, uattr_ptr);
- if (num_given_buckets <= 0)
+ /* See uverbs_disassociate_api() */
+ handler = srcu_dereference(
+ pbundle->method_elm->handler,
+ &pbundle->bundle.ufile->device->disassociate_srcu);
+ if (!handler)
+ return -EIO;
+
+ pbundle->uattrs = uverbs_alloc(&pbundle->bundle, uattrs_size);
+ if (IS_ERR(pbundle->uattrs))
+ return PTR_ERR(pbundle->uattrs);
+ if (copy_from_user(pbundle->uattrs, pbundle->user_attrs, uattrs_size))
+ return -EFAULT;
+
+ for (i = 0; i != num_attrs; i++) {
+ ret = uverbs_set_attr(pbundle, &pbundle->uattrs[i]);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ /* User space did not provide all the mandatory attributes */
+ if (unlikely(!bitmap_subset(pbundle->method_elm->attr_mandatory,
+ pbundle->bundle.attr_present,
+ pbundle->method_elm->key_bitmap_len)))
return -EINVAL;
- attr_bundle->num_buckets = num_given_buckets;
- ret = uverbs_validate_kernel_mandatory(method_spec, attr_bundle);
- if (ret)
- goto cleanup;
+ if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) {
+ struct uverbs_obj_attr *destroy_attr =
+ &pbundle->bundle.attrs[destroy_bkey].obj_attr;
- ret = method_spec->handler(ibdev, ufile, attr_bundle);
-cleanup:
- finalize_ret = uverbs_finalize_objects(attr_bundle,
- method_spec->attr_buckets,
- attr_bundle->num_buckets,
- !ret);
+ ret = uobj_destroy(destroy_attr->uobject);
+ if (ret)
+ return ret;
+ __clear_bit(destroy_bkey, pbundle->uobj_finalize);
- return ret ? ret : finalize_ret;
-}
+ ret = handler(pbundle->bundle.ufile, &pbundle->bundle);
+ uobj_put_destroy(destroy_attr->uobject);
+ } else {
+ ret = handler(pbundle->bundle.ufile, &pbundle->bundle);
+ }
-#define UVERBS_OPTIMIZE_USING_STACK_SZ 256
-static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct ib_uverbs_ioctl_hdr *hdr,
- void __user *buf)
-{
- const struct uverbs_object_spec *object_spec;
- const struct uverbs_method_spec *method_spec;
- long err = 0;
- unsigned int i;
- struct {
- struct ib_uverbs_attr *uattrs;
- struct uverbs_attr_bundle *uverbs_attr_bundle;
- } *ctx = NULL;
- struct uverbs_attr *curr_attr;
- unsigned long *curr_bitmap;
- size_t ctx_size;
- uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)];
-
- if (hdr->driver_id != ib_dev->driver_id)
+ /*
+ * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
+ * not invoke the method because the request is not supported. No
+ * other cases should return this code.
+ */
+ if (WARN_ON_ONCE(ret == -EPROTONOSUPPORT))
return -EINVAL;
- object_spec = uverbs_get_object(ib_dev, hdr->object_id);
- if (!object_spec)
- return -EPROTONOSUPPORT;
+ return ret;
+}
- method_spec = uverbs_get_method(object_spec, hdr->method_id);
- if (!method_spec)
- return -EPROTONOSUPPORT;
+static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
+{
+ unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len;
+ struct bundle_alloc_head *memblock;
+ unsigned int i;
+ int ret = 0;
- if ((method_spec->flags & UVERBS_ACTION_FLAG_CREATE_ROOT) ^ !file->ucontext)
- return -EINVAL;
+ i = -1;
+ while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
+ i + 1)) < key_bitmap_len) {
+ struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
+ int current_ret;
+
+ current_ret = uverbs_finalize_object(
+ attr->obj_attr.uobject,
+ attr->obj_attr.attr_elm->spec.u.obj.access, commit);
+ if (!ret)
+ ret = current_ret;
+ }
- ctx_size = sizeof(*ctx) +
- sizeof(struct uverbs_attr_bundle) +
- sizeof(struct uverbs_attr_bundle_hash) * method_spec->num_buckets +
- sizeof(*ctx->uattrs) * hdr->num_attrs +
- sizeof(*ctx->uverbs_attr_bundle->hash[0].attrs) *
- method_spec->num_child_attrs +
- sizeof(*ctx->uverbs_attr_bundle->hash[0].valid_bitmap) *
- (method_spec->num_child_attrs / BITS_PER_LONG +
- method_spec->num_buckets);
-
- if (ctx_size <= UVERBS_OPTIMIZE_USING_STACK_SZ)
- ctx = (void *)data;
- if (!ctx)
- ctx = kmalloc(ctx_size, GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->uverbs_attr_bundle = (void *)ctx + sizeof(*ctx);
- ctx->uattrs = (void *)(ctx->uverbs_attr_bundle + 1) +
- (sizeof(ctx->uverbs_attr_bundle->hash[0]) *
- method_spec->num_buckets);
- curr_attr = (void *)(ctx->uattrs + hdr->num_attrs);
- curr_bitmap = (void *)(curr_attr + method_spec->num_child_attrs);
+ for (memblock = pbundle->allocated_mem; memblock;) {
+ struct bundle_alloc_head *tmp = memblock;
- /*
- * We just fill the pointers and num_attrs here. The data itself will be
- * filled at a later stage (uverbs_process_attr)
- */
- for (i = 0; i < method_spec->num_buckets; i++) {
- unsigned int curr_num_attrs = method_spec->attr_buckets[i]->num_attrs;
-
- ctx->uverbs_attr_bundle->hash[i].attrs = curr_attr;
- curr_attr += curr_num_attrs;
- ctx->uverbs_attr_bundle->hash[i].num_attrs = curr_num_attrs;
- ctx->uverbs_attr_bundle->hash[i].valid_bitmap = curr_bitmap;
- bitmap_zero(curr_bitmap, curr_num_attrs);
- curr_bitmap += BITS_TO_LONGS(curr_num_attrs);
+ memblock = memblock->next;
+ kvfree(tmp);
}
- err = copy_from_user(ctx->uattrs, buf,
- sizeof(*ctx->uattrs) * hdr->num_attrs);
- if (err) {
- err = -EFAULT;
- goto out;
- }
+ return ret;
+}
- err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev,
- file, method_spec, ctx->uverbs_attr_bundle);
+static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
+ struct ib_uverbs_ioctl_hdr *hdr,
+ struct ib_uverbs_attr __user *user_attrs)
+{
+ const struct uverbs_api_ioctl_method *method_elm;
+ struct uverbs_api *uapi = ufile->device->uapi;
+ struct radix_tree_iter attrs_iter;
+ struct bundle_priv *pbundle;
+ struct bundle_priv onstack;
+ void __rcu **slot;
+ int destroy_ret;
+ int ret;
- /*
- * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
- * not invoke the method because the request is not supported. No
- * other cases should return this code.
- */
- if (unlikely(err == -EPROTONOSUPPORT)) {
- WARN_ON_ONCE(err == -EPROTONOSUPPORT);
- err = -EINVAL;
+ if (unlikely(hdr->driver_id != uapi->driver_id))
+ return -EINVAL;
+
+ slot = radix_tree_iter_lookup(
+ &uapi->radix, &attrs_iter,
+ uapi_key_obj(hdr->object_id) |
+ uapi_key_ioctl_method(hdr->method_id));
+ if (unlikely(!slot))
+ return -EPROTONOSUPPORT;
+ method_elm = srcu_dereference(*slot, &ufile->device->disassociate_srcu);
+
+ if (!method_elm->use_stack) {
+ pbundle = kmalloc(method_elm->bundle_size, GFP_KERNEL);
+ if (!pbundle)
+ return -ENOMEM;
+ pbundle->internal_avail =
+ method_elm->bundle_size -
+ offsetof(struct bundle_priv, internal_buffer);
+ pbundle->alloc_head.next = NULL;
+ pbundle->allocated_mem = &pbundle->alloc_head;
+ } else {
+ pbundle = &onstack;
+ pbundle->internal_avail = sizeof(pbundle->internal_buffer);
+ pbundle->allocated_mem = NULL;
}
-out:
- if (ctx != (void *)data)
- kfree(ctx);
- return err;
-}
-#define IB_UVERBS_MAX_CMD_SZ 4096
+ /* Space for the pbundle->bundle.attrs flex array */
+ pbundle->method_elm = method_elm;
+ pbundle->method_key = attrs_iter.index;
+ pbundle->bundle.ufile = ufile;
+ pbundle->radix = &uapi->radix;
+ pbundle->radix_slots = slot;
+ pbundle->radix_slots_len = radix_tree_chunk_size(&attrs_iter);
+ pbundle->user_attrs = user_attrs;
+
+ pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len *
+ sizeof(*pbundle->bundle.attrs),
+ sizeof(*pbundle->internal_buffer));
+ memset(pbundle->bundle.attr_present, 0,
+ sizeof(pbundle->bundle.attr_present));
+ memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
+
+ ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
+ destroy_ret = bundle_destroy(pbundle, ret == 0);
+ if (unlikely(destroy_ret && !ret))
+ return destroy_ret;
+
+ return ret;
+}
long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
@@ -384,39 +476,138 @@ long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct ib_uverbs_ioctl_hdr __user *user_hdr =
(struct ib_uverbs_ioctl_hdr __user *)arg;
struct ib_uverbs_ioctl_hdr hdr;
- struct ib_device *ib_dev;
int srcu_key;
- long err;
+ int err;
+
+ if (unlikely(cmd != RDMA_VERBS_IOCTL))
+ return -ENOIOCTLCMD;
+
+ err = copy_from_user(&hdr, user_hdr, sizeof(hdr));
+ if (err)
+ return -EFAULT;
+
+ if (hdr.length > PAGE_SIZE ||
+ hdr.length != struct_size(&hdr, attrs, hdr.num_attrs))
+ return -EINVAL;
+
+ if (hdr.reserved1 || hdr.reserved2)
+ return -EPROTONOSUPPORT;
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
- ib_dev = srcu_dereference(file->device->ib_dev,
- &file->device->disassociate_srcu);
- if (!ib_dev) {
- err = -EIO;
- goto out;
+ err = ib_uverbs_cmd_verbs(file, &hdr, user_hdr->attrs);
+ srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
+ return err;
+}
+
+int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 allowed_bits)
+{
+ const struct uverbs_attr *attr;
+ u64 flags;
+
+ attr = uverbs_attr_get(attrs_bundle, idx);
+ /* Missing attribute means 0 flags */
+ if (IS_ERR(attr)) {
+ *to = 0;
+ return 0;
}
- if (cmd == RDMA_VERBS_IOCTL) {
- err = copy_from_user(&hdr, user_hdr, sizeof(hdr));
+ /*
+ * New userspace code should use 8 bytes to pass flags, but we
+ * transparently support old userspaces that were using 4 bytes as
+ * well.
+ */
+ if (attr->ptr_attr.len == 8)
+ flags = attr->ptr_attr.data;
+ else if (attr->ptr_attr.len == 4)
+ flags = *(u32 *)&attr->ptr_attr.data;
+ else
+ return -EINVAL;
- if (err || hdr.length > IB_UVERBS_MAX_CMD_SZ ||
- hdr.length != sizeof(hdr) + hdr.num_attrs * sizeof(struct ib_uverbs_attr)) {
- err = -EINVAL;
- goto out;
- }
+ if (flags & ~allowed_bits)
+ return -EINVAL;
- if (hdr.reserved1 || hdr.reserved2) {
- err = -EPROTONOSUPPORT;
- goto out;
- }
+ *to = flags;
+ return 0;
+}
+EXPORT_SYMBOL(uverbs_get_flags64);
- err = ib_uverbs_cmd_verbs(ib_dev, file, &hdr,
- (__user void *)arg + sizeof(hdr));
+int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 allowed_bits)
+{
+ u64 flags;
+ int ret;
+
+ ret = uverbs_get_flags64(&flags, attrs_bundle, idx, allowed_bits);
+ if (ret)
+ return ret;
+
+ if (flags > U32_MAX)
+ return -EINVAL;
+ *to = flags;
+
+ return 0;
+}
+EXPORT_SYMBOL(uverbs_get_flags32);
+
+/*
+ * This is for ease of conversion. The purpose is to convert all drivers to
+ * use uverbs_attr_bundle instead of ib_udata. Assume attr == 0 is input and
+ * attr == 1 is output.
+ */
+void create_udata(struct uverbs_attr_bundle *bundle, struct ib_udata *udata)
+{
+ struct bundle_priv *pbundle =
+ container_of(bundle, struct bundle_priv, bundle);
+ const struct uverbs_attr *uhw_in =
+ uverbs_attr_get(bundle, UVERBS_ATTR_UHW_IN);
+ const struct uverbs_attr *uhw_out =
+ uverbs_attr_get(bundle, UVERBS_ATTR_UHW_OUT);
+
+ if (!IS_ERR(uhw_in)) {
+ udata->inlen = uhw_in->ptr_attr.len;
+ if (uverbs_attr_ptr_is_inline(uhw_in))
+ udata->inbuf =
+ &pbundle->user_attrs[uhw_in->ptr_attr.uattr_idx]
+ .data;
+ else
+ udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
} else {
- err = -ENOIOCTLCMD;
+ udata->inbuf = NULL;
+ udata->inlen = 0;
}
-out:
- srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
- return err;
+ if (!IS_ERR(uhw_out)) {
+ udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
+ udata->outlen = uhw_out->ptr_attr.len;
+ } else {
+ udata->outbuf = NULL;
+ udata->outlen = 0;
+ }
+}
+
+int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
+ const void *from, size_t size)
+{
+ struct bundle_priv *pbundle =
+ container_of(bundle, struct bundle_priv, bundle);
+ const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
+ u16 flags;
+ size_t min_size;
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ min_size = min_t(size_t, attr->ptr_attr.len, size);
+ if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
+ return -EFAULT;
+
+ flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
+ UVERBS_ATTR_F_VALID_OUTPUT;
+ if (put_user(flags,
+ &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
+ return -EFAULT;
+
+ return 0;
}
+EXPORT_SYMBOL(uverbs_copy_to);
diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c
deleted file mode 100644
index 6ceb672c4d46..000000000000
--- a/drivers/infiniband/core/uverbs_ioctl_merge.c
+++ /dev/null
@@ -1,664 +0,0 @@
-/*
- * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/uverbs_ioctl.h>
-#include <rdma/rdma_user_ioctl.h>
-#include <linux/bitops.h>
-#include "uverbs.h"
-
-#define UVERBS_NUM_NS (UVERBS_ID_NS_MASK >> UVERBS_ID_NS_SHIFT)
-#define GET_NS_ID(idx) (((idx) & UVERBS_ID_NS_MASK) >> UVERBS_ID_NS_SHIFT)
-#define GET_ID(idx) ((idx) & ~UVERBS_ID_NS_MASK)
-
-#define _for_each_element(elem, tmpi, tmpj, hashes, num_buckets_offset, \
- buckets_offset) \
- for (tmpj = 0, \
- elem = (*(const void ***)((hashes)[tmpi] + \
- (buckets_offset)))[0]; \
- tmpj < *(size_t *)((hashes)[tmpi] + (num_buckets_offset)); \
- tmpj++) \
- if ((elem = ((*(const void ***)(hashes[tmpi] + \
- (buckets_offset)))[tmpj])))
-
-/*
- * Iterate all elements of a few @hashes. The number of given hashes is
- * indicated by @num_hashes. The offset of the number of buckets in the hash is
- * represented by @num_buckets_offset, while the offset of the buckets array in
- * the hash structure is represented by @buckets_offset. tmpi and tmpj are two
- * short (or int) based indices that are given by the user. tmpi iterates over
- * the different hashes. @elem points the current element in the hashes[tmpi]
- * bucket we are looping on. To be honest, @hashes representation isn't exactly
- * a hash, but more a collection of elements. These elements' ids are treated
- * in a hash like manner, where the first upper bits are the bucket number.
- * These elements are later mapped into a perfect-hash.
- */
-#define for_each_element(elem, tmpi, tmpj, hashes, num_hashes, \
- num_buckets_offset, buckets_offset) \
- for (tmpi = 0; tmpi < (num_hashes); tmpi++) \
- _for_each_element(elem, tmpi, tmpj, hashes, num_buckets_offset,\
- buckets_offset)
-
-#define get_elements_iterators_entry_above(iters, num_elements, elements, \
- num_objects_fld, objects_fld, bucket,\
- min_id) \
- get_elements_above_id((const void **)iters, num_elements, \
- (const void **)(elements), \
- offsetof(typeof(**elements), \
- num_objects_fld), \
- offsetof(typeof(**elements), objects_fld),\
- offsetof(typeof(***(*elements)->objects_fld), id),\
- bucket, min_id)
-
-#define get_objects_above_id(iters, num_trees, trees, bucket, min_id) \
- get_elements_iterators_entry_above(iters, num_trees, trees, \
- num_objects, objects, bucket, min_id)
-
-#define get_methods_above_id(method_iters, num_iters, iters, bucket, min_id)\
- get_elements_iterators_entry_above(method_iters, num_iters, iters, \
- num_methods, methods, bucket, min_id)
-
-#define get_attrs_above_id(attrs_iters, num_iters, iters, bucket, min_id)\
- get_elements_iterators_entry_above(attrs_iters, num_iters, iters, \
- num_attrs, attrs, bucket, min_id)
-
-/*
- * get_elements_above_id get a few hashes represented by @elements and
- * @num_elements. The hashes fields are described by @num_offset, @data_offset
- * and @id_offset in the same way as required by for_each_element. The function
- * returns an array of @iters, represents an array of elements in the hashes
- * buckets, which their ids are the smallest ids in all hashes but are all
- * larger than the id given by min_id. Elements are only added to the iters
- * array if their id belongs to the bucket @bucket. The number of elements in
- * the returned array is returned by the function. @min_id is also updated to
- * reflect the new min_id of all elements in iters.
- */
-static size_t get_elements_above_id(const void **iters,
- unsigned int num_elements,
- const void **elements,
- size_t num_offset,
- size_t data_offset,
- size_t id_offset,
- u16 bucket,
- short *min_id)
-{
- size_t num_iters = 0;
- short min = SHRT_MAX;
- const void *elem;
- int i, j, last_stored = -1;
- unsigned int equal_min = 0;
-
- for_each_element(elem, i, j, elements, num_elements, num_offset,
- data_offset) {
- u16 id = *(u16 *)(elem + id_offset);
-
- if (GET_NS_ID(id) != bucket)
- continue;
-
- if (GET_ID(id) < *min_id ||
- (min != SHRT_MAX && GET_ID(id) > min))
- continue;
-
- /*
- * We first iterate all hashes represented by @elements. When
- * we do, we try to find an element @elem in the bucket @bucket
- * which its id is min. Since we can't ensure the user sorted
- * the elements in increasing order, we override this hash's
- * minimal id element we found, if a new element with a smaller
- * id was just found.
- */
- iters[last_stored == i ? num_iters - 1 : num_iters++] = elem;
- last_stored = i;
- if (min == GET_ID(id))
- equal_min++;
- else
- equal_min = 1;
- min = GET_ID(id);
- }
-
- /*
- * We only insert to our iters array an element, if its id is smaller
- * than all previous ids. Therefore, the final iters array is sorted so
- * that smaller ids are in the end of the array.
- * Therefore, we need to clean the beginning of the array to make sure
- * all ids of final elements are equal to min.
- */
- memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min);
-
- *min_id = min;
- return equal_min;
-}
-
-#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
- objects_fld, bucket) \
- find_max_element_id(num_elements, (const void **)(elements), \
- offsetof(typeof(**elements), num_objects_fld), \
- offsetof(typeof(**elements), objects_fld), \
- offsetof(typeof(***(*elements)->objects_fld), id),\
- bucket)
-
-static short find_max_element_ns_id(unsigned int num_elements,
- const void **elements,
- size_t num_offset,
- size_t data_offset,
- size_t id_offset)
-{
- short max_ns = SHRT_MIN;
- const void *elem;
- int i, j;
-
- for_each_element(elem, i, j, elements, num_elements, num_offset,
- data_offset) {
- u16 id = *(u16 *)(elem + id_offset);
-
- if (GET_NS_ID(id) > max_ns)
- max_ns = GET_NS_ID(id);
- }
-
- return max_ns;
-}
-
-static short find_max_element_id(unsigned int num_elements,
- const void **elements,
- size_t num_offset,
- size_t data_offset,
- size_t id_offset,
- u16 bucket)
-{
- short max_id = SHRT_MIN;
- const void *elem;
- int i, j;
-
- for_each_element(elem, i, j, elements, num_elements, num_offset,
- data_offset) {
- u16 id = *(u16 *)(elem + id_offset);
-
- if (GET_NS_ID(id) == bucket &&
- GET_ID(id) > max_id)
- max_id = GET_ID(id);
- }
- return max_id;
-}
-
-#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
- objects_fld, bucket) \
- find_max_element_id(num_elements, (const void **)(elements), \
- offsetof(typeof(**elements), num_objects_fld), \
- offsetof(typeof(**elements), objects_fld), \
- offsetof(typeof(***(*elements)->objects_fld), id),\
- bucket)
-
-#define find_max_element_ns_entry_id(num_elements, elements, \
- num_objects_fld, objects_fld) \
- find_max_element_ns_id(num_elements, (const void **)(elements), \
- offsetof(typeof(**elements), num_objects_fld),\
- offsetof(typeof(**elements), objects_fld), \
- offsetof(typeof(***(*elements)->objects_fld), id))
-
-/*
- * find_max_xxxx_ns_id gets a few elements. Each element is described by an id
- * which its upper bits represents a namespace. It finds the max namespace. This
- * could be used in order to know how many buckets do we need to allocate. If no
- * elements exist, SHRT_MIN is returned. Namespace represents here different
- * buckets. The common example is "common bucket" and "driver bucket".
- *
- * find_max_xxxx_id gets a few elements and a bucket. Each element is described
- * by an id which its upper bits represent a namespace. It returns the max id
- * which is contained in the same namespace defined in @bucket. This could be
- * used in order to know how many elements do we need to allocate in the bucket.
- * If no elements exist, SHRT_MIN is returned.
- */
-
-#define find_max_object_id(num_trees, trees, bucket) \
- find_max_element_entry_id(num_trees, trees, num_objects,\
- objects, bucket)
-#define find_max_object_ns_id(num_trees, trees) \
- find_max_element_ns_entry_id(num_trees, trees, \
- num_objects, objects)
-
-#define find_max_method_id(num_iters, iters, bucket) \
- find_max_element_entry_id(num_iters, iters, num_methods,\
- methods, bucket)
-#define find_max_method_ns_id(num_iters, iters) \
- find_max_element_ns_entry_id(num_iters, iters, \
- num_methods, methods)
-
-#define find_max_attr_id(num_iters, iters, bucket) \
- find_max_element_entry_id(num_iters, iters, num_attrs, \
- attrs, bucket)
-#define find_max_attr_ns_id(num_iters, iters) \
- find_max_element_ns_entry_id(num_iters, iters, \
- num_attrs, attrs)
-
-static void free_method(struct uverbs_method_spec *method)
-{
- unsigned int i;
-
- if (!method)
- return;
-
- for (i = 0; i < method->num_buckets; i++)
- kfree(method->attr_buckets[i]);
-
- kfree(method);
-}
-
-#define IS_ATTR_OBJECT(attr) ((attr)->type == UVERBS_ATTR_TYPE_IDR || \
- (attr)->type == UVERBS_ATTR_TYPE_FD)
-
-/*
- * This function gets array of size @num_method_defs which contains pointers to
- * method definitions @method_defs. The function allocates an
- * uverbs_method_spec structure and initializes its number of buckets and the
- * elements in buckets to the correct attributes. While doing that, it
- * validates that there aren't conflicts between attributes of different
- * method_defs.
- */
-static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_method_def **method_defs,
- size_t num_method_defs)
-{
- int bucket_idx;
- int max_attr_buckets = 0;
- size_t num_attr_buckets = 0;
- int res = 0;
- struct uverbs_method_spec *method = NULL;
- const struct uverbs_attr_def **attr_defs;
- unsigned int num_of_singularities = 0;
-
- max_attr_buckets = find_max_attr_ns_id(num_method_defs, method_defs);
- if (max_attr_buckets >= 0)
- num_attr_buckets = max_attr_buckets + 1;
-
- method = kzalloc(struct_size(method, attr_buckets, num_attr_buckets),
- GFP_KERNEL);
- if (!method)
- return ERR_PTR(-ENOMEM);
-
- method->num_buckets = num_attr_buckets;
- attr_defs = kcalloc(num_method_defs, sizeof(*attr_defs), GFP_KERNEL);
- if (!attr_defs) {
- res = -ENOMEM;
- goto free_method;
- }
- for (bucket_idx = 0; bucket_idx < method->num_buckets; bucket_idx++) {
- short min_id = SHRT_MIN;
- int attr_max_bucket = 0;
- struct uverbs_attr_spec_hash *hash = NULL;
-
- attr_max_bucket = find_max_attr_id(num_method_defs, method_defs,
- bucket_idx);
- if (attr_max_bucket < 0)
- continue;
-
- hash = kzalloc(sizeof(*hash) +
- ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1),
- sizeof(long)) +
- BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long),
- GFP_KERNEL);
- if (!hash) {
- res = -ENOMEM;
- goto free;
- }
- hash->num_attrs = attr_max_bucket + 1;
- method->num_child_attrs += hash->num_attrs;
- hash->mandatory_attrs_bitmask = (void *)(hash + 1) +
- ALIGN(sizeof(*hash->attrs) *
- (attr_max_bucket + 1),
- sizeof(long));
-
- method->attr_buckets[bucket_idx] = hash;
-
- do {
- size_t num_attr_defs;
- struct uverbs_attr_spec *attr;
- bool attr_obj_with_special_access;
-
- num_attr_defs =
- get_attrs_above_id(attr_defs,
- num_method_defs,
- method_defs,
- bucket_idx,
- &min_id);
- /* Last attr in bucket */
- if (!num_attr_defs)
- break;
-
- if (num_attr_defs > 1) {
- /*
- * We don't allow two attribute definitions for
- * the same attribute. This is usually a
- * programmer error. If required, it's better to
- * just add a new attribute to capture the new
- * semantics.
- */
- res = -EEXIST;
- goto free;
- }
-
- attr = &hash->attrs[min_id];
- memcpy(attr, &attr_defs[0]->attr, sizeof(*attr));
-
- attr_obj_with_special_access = IS_ATTR_OBJECT(attr) &&
- (attr->obj.access == UVERBS_ACCESS_NEW ||
- attr->obj.access == UVERBS_ACCESS_DESTROY);
- num_of_singularities += !!attr_obj_with_special_access;
- if (WARN(num_of_singularities > 1,
- "ib_uverbs: Method contains more than one object attr (%d) with new/destroy access\n",
- min_id) ||
- WARN(attr_obj_with_special_access &&
- !(attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY),
- "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy access but isn't mandatory\n",
- min_id) ||
- WARN(IS_ATTR_OBJECT(attr) &&
- attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
- "ib_uverbs: Tried to merge attr (%d) but it's an object with min_sz flag\n",
- min_id)) {
- res = -EINVAL;
- goto free;
- }
-
- if (attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY)
- set_bit(min_id, hash->mandatory_attrs_bitmask);
- min_id++;
-
- } while (1);
- }
- kfree(attr_defs);
- return method;
-
-free:
- kfree(attr_defs);
-free_method:
- free_method(method);
- return ERR_PTR(res);
-}
-
-static void free_object(struct uverbs_object_spec *object)
-{
- unsigned int i, j;
-
- if (!object)
- return;
-
- for (i = 0; i < object->num_buckets; i++) {
- struct uverbs_method_spec_hash *method_buckets =
- object->method_buckets[i];
-
- if (!method_buckets)
- continue;
-
- for (j = 0; j < method_buckets->num_methods; j++)
- free_method(method_buckets->methods[j]);
-
- kfree(method_buckets);
- }
-
- kfree(object);
-}
-
-/*
- * This function gets array of size @num_object_defs which contains pointers to
- * object definitions @object_defs. The function allocated an
- * uverbs_object_spec structure and initialize its number of buckets and the
- * elements in buckets to the correct methods. While doing that, it
- * sorts out the correct relationship between conflicts in the same method.
- */
-static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_object_def **object_defs,
- size_t num_object_defs)
-{
- u16 bucket_idx;
- int max_method_buckets = 0;
- u16 num_method_buckets = 0;
- int res = 0;
- struct uverbs_object_spec *object = NULL;
- const struct uverbs_method_def **method_defs;
-
- max_method_buckets = find_max_method_ns_id(num_object_defs, object_defs);
- if (max_method_buckets >= 0)
- num_method_buckets = max_method_buckets + 1;
-
- object = kzalloc(struct_size(object, method_buckets,
- num_method_buckets),
- GFP_KERNEL);
- if (!object)
- return ERR_PTR(-ENOMEM);
-
- object->num_buckets = num_method_buckets;
- method_defs = kcalloc(num_object_defs, sizeof(*method_defs), GFP_KERNEL);
- if (!method_defs) {
- res = -ENOMEM;
- goto free_object;
- }
-
- for (bucket_idx = 0; bucket_idx < object->num_buckets; bucket_idx++) {
- short min_id = SHRT_MIN;
- int methods_max_bucket = 0;
- struct uverbs_method_spec_hash *hash = NULL;
-
- methods_max_bucket = find_max_method_id(num_object_defs, object_defs,
- bucket_idx);
- if (methods_max_bucket < 0)
- continue;
-
- hash = kzalloc(struct_size(hash, methods,
- methods_max_bucket + 1),
- GFP_KERNEL);
- if (!hash) {
- res = -ENOMEM;
- goto free;
- }
-
- hash->num_methods = methods_max_bucket + 1;
- object->method_buckets[bucket_idx] = hash;
-
- do {
- size_t num_method_defs;
- struct uverbs_method_spec *method;
- int i;
-
- num_method_defs =
- get_methods_above_id(method_defs,
- num_object_defs,
- object_defs,
- bucket_idx,
- &min_id);
- /* Last method in bucket */
- if (!num_method_defs)
- break;
-
- method = build_method_with_attrs(method_defs,
- num_method_defs);
- if (IS_ERR(method)) {
- res = PTR_ERR(method);
- goto free;
- }
-
- /*
- * The last tree which is given as an argument to the
- * merge overrides previous method handler.
- * Therefore, we iterate backwards and search for the
- * first handler which != NULL. This also defines the
- * set of flags used for this handler.
- */
- for (i = num_method_defs - 1;
- i >= 0 && !method_defs[i]->handler; i--)
- ;
- hash->methods[min_id++] = method;
- /* NULL handler isn't allowed */
- if (WARN(i < 0,
- "ib_uverbs: tried to merge function id %d, but all handlers are NULL\n",
- min_id)) {
- res = -EINVAL;
- goto free;
- }
- method->handler = method_defs[i]->handler;
- method->flags = method_defs[i]->flags;
-
- } while (1);
- }
- kfree(method_defs);
- return object;
-
-free:
- kfree(method_defs);
-free_object:
- free_object(object);
- return ERR_PTR(res);
-}
-
-void uverbs_free_spec_tree(struct uverbs_root_spec *root)
-{
- unsigned int i, j;
-
- if (!root)
- return;
-
- for (i = 0; i < root->num_buckets; i++) {
- struct uverbs_object_spec_hash *object_hash =
- root->object_buckets[i];
-
- if (!object_hash)
- continue;
-
- for (j = 0; j < object_hash->num_objects; j++)
- free_object(object_hash->objects[j]);
-
- kfree(object_hash);
- }
-
- kfree(root);
-}
-EXPORT_SYMBOL(uverbs_free_spec_tree);
-
-struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
- const struct uverbs_object_tree_def **trees)
-{
- u16 bucket_idx;
- short max_object_buckets = 0;
- size_t num_objects_buckets = 0;
- struct uverbs_root_spec *root_spec = NULL;
- const struct uverbs_object_def **object_defs;
- int i;
- int res = 0;
-
- max_object_buckets = find_max_object_ns_id(num_trees, trees);
- /*
- * Devices which don't want to support ib_uverbs, should just allocate
- * an empty parsing tree. Every user-space command won't hit any valid
- * entry in the parsing tree and thus will fail.
- */
- if (max_object_buckets >= 0)
- num_objects_buckets = max_object_buckets + 1;
-
- root_spec = kzalloc(struct_size(root_spec, object_buckets,
- num_objects_buckets),
- GFP_KERNEL);
- if (!root_spec)
- return ERR_PTR(-ENOMEM);
- root_spec->num_buckets = num_objects_buckets;
-
- object_defs = kcalloc(num_trees, sizeof(*object_defs),
- GFP_KERNEL);
- if (!object_defs) {
- res = -ENOMEM;
- goto free_root;
- }
-
- for (bucket_idx = 0; bucket_idx < root_spec->num_buckets; bucket_idx++) {
- short min_id = SHRT_MIN;
- short objects_max_bucket;
- struct uverbs_object_spec_hash *hash = NULL;
-
- objects_max_bucket = find_max_object_id(num_trees, trees,
- bucket_idx);
- if (objects_max_bucket < 0)
- continue;
-
- hash = kzalloc(struct_size(hash, objects,
- objects_max_bucket + 1),
- GFP_KERNEL);
- if (!hash) {
- res = -ENOMEM;
- goto free;
- }
- hash->num_objects = objects_max_bucket + 1;
- root_spec->object_buckets[bucket_idx] = hash;
-
- do {
- size_t num_object_defs;
- struct uverbs_object_spec *object;
-
- num_object_defs = get_objects_above_id(object_defs,
- num_trees,
- trees,
- bucket_idx,
- &min_id);
- /* Last object in bucket */
- if (!num_object_defs)
- break;
-
- object = build_object_with_methods(object_defs,
- num_object_defs);
- if (IS_ERR(object)) {
- res = PTR_ERR(object);
- goto free;
- }
-
- /*
- * The last tree which is given as an argument to the
- * merge overrides previous object's type_attrs.
- * Therefore, we iterate backwards and search for the
- * first type_attrs which != NULL.
- */
- for (i = num_object_defs - 1;
- i >= 0 && !object_defs[i]->type_attrs; i--)
- ;
- /*
- * NULL is a valid type_attrs. It means an object we
- * can't instantiate (like DEVICE).
- */
- object->type_attrs = i < 0 ? NULL :
- object_defs[i]->type_attrs;
-
- hash->objects[min_id++] = object;
- } while (1);
- }
-
- kfree(object_defs);
- return root_spec;
-
-free:
- kfree(object_defs);
-free_root:
- uverbs_free_spec_tree(root_spec);
- return ERR_PTR(res);
-}
-EXPORT_SYMBOL(uverbs_alloc_spec_tree);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 2094d136513d..823beca448e1 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -41,8 +41,6 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/sched.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/task.h>
#include <linux/file.h>
#include <linux/cdev.h>
#include <linux/anon_inodes.h>
@@ -77,7 +75,6 @@ static struct class *uverbs_class;
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len) = {
[IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
@@ -118,7 +115,6 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
};
static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw) = {
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
@@ -138,6 +134,30 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
static void ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
+/*
+ * Must be called with the ufile->device->disassociate_srcu held, and the lock
+ * must be held until use of the ucontext is finished.
+ */
+struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile)
+{
+ /*
+ * We do not hold the hw_destroy_rwsem lock for this flow, instead
+ * srcu is used. It does not matter if someone races this with
+ * get_context, we get NULL or valid ucontext.
+ */
+ struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext);
+
+ if (!srcu_dereference(ufile->device->ib_dev,
+ &ufile->device->disassociate_srcu))
+ return ERR_PTR(-EIO);
+
+ if (!ucontext)
+ return ERR_PTR(-EINVAL);
+
+ return ucontext;
+}
+EXPORT_SYMBOL(ib_uverbs_get_ucontext);
+
int uverbs_dealloc_mw(struct ib_mw *mw)
{
struct ib_pd *pd = mw->pd;
@@ -154,6 +174,7 @@ static void ib_uverbs_release_dev(struct kobject *kobj)
struct ib_uverbs_device *dev =
container_of(kobj, struct ib_uverbs_device, kobj);
+ uverbs_destroy_api(dev->uapi);
cleanup_srcu_struct(&dev->disassociate_srcu);
kfree(dev);
}
@@ -184,7 +205,7 @@ void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
}
spin_unlock_irq(&ev_file->ev_queue.lock);
- uverbs_uobject_put(&ev_file->uobj_file.uobj);
+ uverbs_uobject_put(&ev_file->uobj);
}
spin_lock_irq(&file->async_file->ev_queue.lock);
@@ -220,20 +241,6 @@ void ib_uverbs_detach_umcast(struct ib_qp *qp,
}
}
-static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
- struct ib_ucontext *context,
- bool device_removed)
-{
- context->closing = 1;
- uverbs_cleanup_ucontext(context, device_removed);
- put_pid(context->tgid);
-
- ib_rdmacg_uncharge(&context->cg_obj, context->device,
- RDMACG_RESOURCE_HCA_HANDLE);
-
- return context->device->dealloc_ucontext(context);
-}
-
static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
{
complete(&dev->comp);
@@ -246,6 +253,8 @@ void ib_uverbs_release_file(struct kref *ref)
struct ib_device *ib_dev;
int srcu_key;
+ release_ufile_idr_uobject(file);
+
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
ib_dev = srcu_dereference(file->device->ib_dev,
&file->device->disassociate_srcu);
@@ -338,7 +347,7 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
filp->private_data;
return ib_uverbs_event_read(&comp_ev_file->ev_queue,
- comp_ev_file->uobj_file.ufile, filp,
+ comp_ev_file->uobj.ufile, filp,
buf, count, pos,
sizeof(struct ib_uverbs_comp_event_desc));
}
@@ -420,7 +429,9 @@ static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp)
static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
{
- struct ib_uverbs_completion_event_file *file = filp->private_data;
+ struct ib_uobject *uobj = filp->private_data;
+ struct ib_uverbs_completion_event_file *file = container_of(
+ uobj, struct ib_uverbs_completion_event_file, uobj);
struct ib_uverbs_event *entry, *tmp;
spin_lock_irq(&file->ev_queue.lock);
@@ -528,7 +539,7 @@ void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
struct ib_ucq_object, uobject);
- ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle,
+ ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle,
event->event, &uobj->async_list,
&uobj->async_events_reported);
}
@@ -637,13 +648,13 @@ err_put_refs:
return filp;
}
-static bool verify_command_mask(struct ib_device *ib_dev,
- u32 command, bool extended)
+static bool verify_command_mask(struct ib_uverbs_file *ufile, u32 command,
+ bool extended)
{
if (!extended)
- return ib_dev->uverbs_cmd_mask & BIT_ULL(command);
+ return ufile->uverbs_cmd_mask & BIT_ULL(command);
- return ib_dev->uverbs_ex_cmd_mask & BIT_ULL(command);
+ return ufile->uverbs_ex_cmd_mask & BIT_ULL(command);
}
static bool verify_command_idx(u32 command, bool extended)
@@ -713,7 +724,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
{
struct ib_uverbs_file *file = filp->private_data;
struct ib_uverbs_ex_cmd_hdr ex_hdr;
- struct ib_device *ib_dev;
struct ib_uverbs_cmd_hdr hdr;
bool extended;
int srcu_key;
@@ -748,24 +758,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
return ret;
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
- ib_dev = srcu_dereference(file->device->ib_dev,
- &file->device->disassociate_srcu);
- if (!ib_dev) {
- ret = -EIO;
- goto out;
- }
-
- /*
- * Must be after the ib_dev check, as once the RCU clears ib_dev ==
- * NULL means ucontext == NULL
- */
- if (!file->ucontext &&
- (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) {
- ret = -EINVAL;
- goto out;
- }
- if (!verify_command_mask(ib_dev, command, extended)) {
+ if (!verify_command_mask(file, command, extended)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -773,7 +767,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
buf += sizeof(hdr);
if (!extended) {
- ret = uverbs_cmd_table[command](file, ib_dev, buf,
+ ret = uverbs_cmd_table[command](file, buf,
hdr.in_words * 4,
hdr.out_words * 4);
} else {
@@ -792,7 +786,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
ex_hdr.provider_in_words * 8,
ex_hdr.provider_out_words * 8);
- ret = uverbs_ex_cmd_table[command](file, ib_dev, &ucore, &uhw);
+ ret = uverbs_ex_cmd_table[command](file, &ucore, &uhw);
ret = (ret) ? : count;
}
@@ -804,22 +798,18 @@ out:
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ib_uverbs_file *file = filp->private_data;
- struct ib_device *ib_dev;
+ struct ib_ucontext *ucontext;
int ret = 0;
int srcu_key;
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
- ib_dev = srcu_dereference(file->device->ib_dev,
- &file->device->disassociate_srcu);
- if (!ib_dev) {
- ret = -EIO;
+ ucontext = ib_uverbs_get_ucontext(file);
+ if (IS_ERR(ucontext)) {
+ ret = PTR_ERR(ucontext);
goto out;
}
- if (!file->ucontext)
- ret = -ENODEV;
- else
- ret = ib_dev->mmap(file->ucontext, vma);
+ ret = ucontext->device->mmap(ucontext, vma);
out:
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
return ret;
@@ -879,13 +869,12 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
}
file->device = dev;
- spin_lock_init(&file->idr_lock);
- idr_init(&file->idr);
- file->ucontext = NULL;
- file->async_file = NULL;
kref_init(&file->ref);
- mutex_init(&file->mutex);
- mutex_init(&file->cleanup_mutex);
+ mutex_init(&file->ucontext_lock);
+
+ spin_lock_init(&file->uobjects_lock);
+ INIT_LIST_HEAD(&file->uobjects);
+ init_rwsem(&file->hw_destroy_rwsem);
filp->private_data = file;
kobject_get(&dev->kobj);
@@ -893,6 +882,11 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
mutex_unlock(&dev->lists_mutex);
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
+ file->uverbs_cmd_mask = ib_dev->uverbs_cmd_mask;
+ file->uverbs_ex_cmd_mask = ib_dev->uverbs_ex_cmd_mask;
+
+ setup_ufile_idr_uobject(file);
+
return nonseekable_open(inode, filp);
err_module:
@@ -911,13 +905,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
{
struct ib_uverbs_file *file = filp->private_data;
- mutex_lock(&file->cleanup_mutex);
- if (file->ucontext) {
- ib_uverbs_cleanup_ucontext(file, file->ucontext, false);
- file->ucontext = NULL;
- }
- mutex_unlock(&file->cleanup_mutex);
- idr_destroy(&file->idr);
+ uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
mutex_lock(&file->device->lists_mutex);
if (!file->is_closed) {
@@ -1006,6 +994,19 @@ static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_VERBS_ABI_VERSION));
+static int ib_uverbs_create_uapi(struct ib_device *device,
+ struct ib_uverbs_device *uverbs_dev)
+{
+ struct uverbs_api *uapi;
+
+ uapi = uverbs_alloc_api(device->driver_specs, device->driver_id);
+ if (IS_ERR(uapi))
+ return PTR_ERR(uapi);
+
+ uverbs_dev->uapi = uapi;
+ return 0;
+}
+
static void ib_uverbs_add_one(struct ib_device *device)
{
int devnum;
@@ -1048,6 +1049,9 @@ static void ib_uverbs_add_one(struct ib_device *device)
rcu_assign_pointer(uverbs_dev->ib_dev, device);
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
+ if (ib_uverbs_create_uapi(device, uverbs_dev))
+ goto err;
+
cdev_init(&uverbs_dev->cdev, NULL);
uverbs_dev->cdev.owner = THIS_MODULE;
uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
@@ -1067,18 +1071,6 @@ static void ib_uverbs_add_one(struct ib_device *device)
if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
goto err_class;
- if (!device->specs_root) {
- const struct uverbs_object_tree_def *default_root[] = {
- uverbs_default_get_objects()};
-
- uverbs_dev->specs_root = uverbs_alloc_spec_tree(1,
- default_root);
- if (IS_ERR(uverbs_dev->specs_root))
- goto err_class;
-
- device->specs_root = uverbs_dev->specs_root;
- }
-
ib_set_client_data(device, &uverbs_client, uverbs_dev);
return;
@@ -1098,44 +1090,6 @@ err:
return;
}
-static void ib_uverbs_disassociate_ucontext(struct ib_ucontext *ibcontext)
-{
- struct ib_device *ib_dev = ibcontext->device;
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
-
- owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
- if (!owning_process)
- return;
-
- owning_mm = get_task_mm(owning_process);
- if (!owning_mm) {
- pr_info("no mm, disassociate ucontext is pending task termination\n");
- while (1) {
- put_task_struct(owning_process);
- usleep_range(1000, 2000);
- owning_process = get_pid_task(ibcontext->tgid,
- PIDTYPE_PID);
- if (!owning_process ||
- owning_process->state == TASK_DEAD) {
- pr_info("disassociate ucontext done, task was terminated\n");
- /* in case task was dead need to release the
- * task struct.
- */
- if (owning_process)
- put_task_struct(owning_process);
- return;
- }
- }
- }
-
- down_write(&owning_mm->mmap_sem);
- ib_dev->disassociate_ucontext(ibcontext);
- up_write(&owning_mm->mmap_sem);
- mmput(owning_mm);
- put_task_struct(owning_process);
-}
-
static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
struct ib_device *ib_dev)
{
@@ -1144,46 +1098,31 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
struct ib_event event;
/* Pending running commands to terminate */
- synchronize_srcu(&uverbs_dev->disassociate_srcu);
+ uverbs_disassociate_api_pre(uverbs_dev);
event.event = IB_EVENT_DEVICE_FATAL;
event.element.port_num = 0;
event.device = ib_dev;
mutex_lock(&uverbs_dev->lists_mutex);
while (!list_empty(&uverbs_dev->uverbs_file_list)) {
- struct ib_ucontext *ucontext;
file = list_first_entry(&uverbs_dev->uverbs_file_list,
struct ib_uverbs_file, list);
file->is_closed = 1;
list_del(&file->list);
kref_get(&file->ref);
- mutex_unlock(&uverbs_dev->lists_mutex);
-
-
- mutex_lock(&file->cleanup_mutex);
- ucontext = file->ucontext;
- file->ucontext = NULL;
- mutex_unlock(&file->cleanup_mutex);
- /* At this point ib_uverbs_close cannot be running
- * ib_uverbs_cleanup_ucontext
+ /* We must release the mutex before going ahead and calling
+ * uverbs_cleanup_ufile, as it might end up indirectly calling
+ * uverbs_close, for example due to freeing the resources (e.g
+ * mmput).
*/
- if (ucontext) {
- /* We must release the mutex before going ahead and
- * calling disassociate_ucontext. disassociate_ucontext
- * might end up indirectly calling uverbs_close,
- * for example due to freeing the resources
- * (e.g mmput).
- */
- ib_uverbs_event_handler(&file->event_handler, &event);
- ib_uverbs_disassociate_ucontext(ucontext);
- mutex_lock(&file->cleanup_mutex);
- ib_uverbs_cleanup_ucontext(file, ucontext, true);
- mutex_unlock(&file->cleanup_mutex);
- }
+ mutex_unlock(&uverbs_dev->lists_mutex);
- mutex_lock(&uverbs_dev->lists_mutex);
+ ib_uverbs_event_handler(&file->event_handler, &event);
+ uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
kref_put(&file->ref, ib_uverbs_release_file);
+
+ mutex_lock(&uverbs_dev->lists_mutex);
}
while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
@@ -1205,6 +1144,8 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
kill_fasync(&event_file->ev_queue.async_queue, SIGIO, POLL_IN);
}
mutex_unlock(&uverbs_dev->lists_mutex);
+
+ uverbs_disassociate_api(uverbs_dev->uapi);
}
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
@@ -1232,7 +1173,6 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
* cdev was deleted, however active clients can still issue
* commands and close their open files.
*/
- rcu_assign_pointer(uverbs_dev->ib_dev, NULL);
ib_uverbs_free_hw_resources(uverbs_dev, device);
wait_clients = 0;
}
@@ -1241,10 +1181,6 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
ib_uverbs_comp_dev(uverbs_dev);
if (wait_clients)
wait_for_completion(&uverbs_dev->comp);
- if (uverbs_dev->specs_root) {
- uverbs_free_spec_tree(uverbs_dev->specs_root);
- device->specs_root = NULL;
- }
kobject_put(&uverbs_dev->kobj);
}
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index bb372b4713a4..b8d715c68ca4 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -211,7 +211,5 @@ void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
/* TODO: No need to set this */
sa_path_set_dmac_zero(dst);
- sa_path_set_ndev(dst, NULL);
- sa_path_set_ifindex(dst, 0);
}
EXPORT_SYMBOL(ib_copy_path_rec_from_user);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index b570acbd94af..203cc96ac6f5 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -48,14 +48,18 @@ static int uverbs_free_ah(struct ib_uobject *uobject,
static int uverbs_free_flow(struct ib_uobject *uobject,
enum rdma_remove_reason why)
{
- int ret;
struct ib_flow *flow = (struct ib_flow *)uobject->object;
struct ib_uflow_object *uflow =
container_of(uobject, struct ib_uflow_object, uobject);
+ struct ib_qp *qp = flow->qp;
+ int ret;
- ret = ib_destroy_flow(flow);
- if (!ret)
+ ret = flow->device->destroy_flow(flow);
+ if (!ret) {
+ if (qp)
+ atomic_dec(&qp->usecnt);
ib_uverbs_flow_resources_free(uflow->resources);
+ }
return ret;
}
@@ -74,6 +78,13 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
container_of(uobject, struct ib_uqp_object, uevent.uobject);
int ret;
+ /*
+ * If this is a user triggered destroy then do not allow destruction
+ * until the user cleans up all the mcast bindings. Unlike in other
+ * places we forcibly clean up the mcast attachments for !DESTROY
+ * because the mcast attaches are not ubojects and will not be
+ * destroyed by anything else during cleanup processing.
+ */
if (why == RDMA_REMOVE_DESTROY) {
if (!list_empty(&uqp->mcast_list))
return -EBUSY;
@@ -82,7 +93,7 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
}
ret = ib_destroy_qp(qp);
- if (ret && why == RDMA_REMOVE_DESTROY)
+ if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
if (uqp->uxrcd)
@@ -100,8 +111,10 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
int ret;
ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
- if (!ret || why != RDMA_REMOVE_DESTROY)
- kfree(ind_tbl);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ kfree(ind_tbl);
return ret;
}
@@ -114,8 +127,10 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
int ret;
ret = ib_destroy_wq(wq);
- if (!ret || why != RDMA_REMOVE_DESTROY)
- ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent);
return ret;
}
@@ -129,8 +144,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject,
int ret;
ret = ib_destroy_srq(srq);
-
- if (ret && why == RDMA_REMOVE_DESTROY)
+ if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
if (srq_type == IB_SRQT_XRC) {
@@ -152,12 +166,12 @@ static int uverbs_free_xrcd(struct ib_uobject *uobject,
container_of(uobject, struct ib_uxrcd_object, uobject);
int ret;
+ ret = ib_destroy_usecnt(&uxrcd->refcnt, why, uobject);
+ if (ret)
+ return ret;
+
mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex);
- if (why == RDMA_REMOVE_DESTROY && atomic_read(&uxrcd->refcnt))
- ret = -EBUSY;
- else
- ret = ib_uverbs_dealloc_xrcd(uobject->context->ufile->device,
- xrcd, why);
+ ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why);
mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex);
return ret;
@@ -167,20 +181,22 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
enum rdma_remove_reason why)
{
struct ib_pd *pd = uobject->object;
+ int ret;
- if (why == RDMA_REMOVE_DESTROY && atomic_read(&pd->usecnt))
- return -EBUSY;
+ ret = ib_destroy_usecnt(&pd->usecnt, why, uobject);
+ if (ret)
+ return ret;
ib_dealloc_pd((struct ib_pd *)uobject->object);
return 0;
}
-static int uverbs_hot_unplug_completion_event_file(struct ib_uobject_file *uobj_file,
+static int uverbs_hot_unplug_completion_event_file(struct ib_uobject *uobj,
enum rdma_remove_reason why)
{
struct ib_uverbs_completion_event_file *comp_event_file =
- container_of(uobj_file, struct ib_uverbs_completion_event_file,
- uobj_file);
+ container_of(uobj, struct ib_uverbs_completion_event_file,
+ uobj);
struct ib_uverbs_event_queue *event_queue = &comp_event_file->ev_queue;
spin_lock_irq(&event_queue->lock);
@@ -194,119 +210,77 @@ static int uverbs_hot_unplug_completion_event_file(struct ib_uobject_file *uobj_
return 0;
};
-int uverbs_destroy_def_handler(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
+int uverbs_destroy_def_handler(struct ib_uverbs_file *file,
struct uverbs_attr_bundle *attrs)
{
return 0;
}
+EXPORT_SYMBOL(uverbs_destroy_def_handler);
-/*
- * This spec is used in order to pass information to the hardware driver in a
- * legacy way. Every verb that could get driver specific data should get this
- * spec.
- */
-const struct uverbs_attr_def uverbs_uhw_compat_in =
- UVERBS_ATTR_PTR_IN_SZ(UVERBS_ATTR_UHW_IN, UVERBS_ATTR_SIZE(0, USHRT_MAX),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO));
-const struct uverbs_attr_def uverbs_uhw_compat_out =
- UVERBS_ATTR_PTR_OUT_SZ(UVERBS_ATTR_UHW_OUT, UVERBS_ATTR_SIZE(0, USHRT_MAX),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO));
-
-void create_udata(struct uverbs_attr_bundle *ctx, struct ib_udata *udata)
-{
- /*
- * This is for ease of conversion. The purpose is to convert all drivers
- * to use uverbs_attr_bundle instead of ib_udata.
- * Assume attr == 0 is input and attr == 1 is output.
- */
- const struct uverbs_attr *uhw_in =
- uverbs_attr_get(ctx, UVERBS_ATTR_UHW_IN);
- const struct uverbs_attr *uhw_out =
- uverbs_attr_get(ctx, UVERBS_ATTR_UHW_OUT);
-
- if (!IS_ERR(uhw_in)) {
- udata->inlen = uhw_in->ptr_attr.len;
- if (uverbs_attr_ptr_is_inline(uhw_in))
- udata->inbuf = &uhw_in->uattr->data;
- else
- udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
- } else {
- udata->inbuf = NULL;
- udata->inlen = 0;
- }
-
- if (!IS_ERR(uhw_out)) {
- udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
- udata->outlen = uhw_out->ptr_attr.len;
- } else {
- udata->outbuf = NULL;
- udata->outlen = 0;
- }
-}
-
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COMP_CHANNEL,
- &UVERBS_TYPE_ALLOC_FD(0,
- sizeof(struct ib_uverbs_completion_event_file),
- uverbs_hot_unplug_completion_event_file,
- &uverbs_event_fops,
- "[infinibandevent]", O_RDONLY));
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_COMP_CHANNEL,
+ UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file),
+ uverbs_hot_unplug_completion_event_file,
+ &uverbs_event_fops,
+ "[infinibandevent]",
+ O_RDONLY));
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_QP,
- &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), 0,
- uverbs_free_qp));
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_QP,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp));
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW,
- &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_mw));
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw));
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_SRQ,
- &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object), 0,
- uverbs_free_srq));
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_SRQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
+ uverbs_free_srq));
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_AH,
- &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_ah));
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_ah));
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_FLOW,
- &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uflow_object),
- 0, uverbs_free_flow));
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_FLOW,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uflow_object),
+ uverbs_free_flow));
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_WQ,
- &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), 0,
- uverbs_free_wq));
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_WQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq));
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL,
- &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_rwq_ind_tbl));
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_rwq_ind_tbl));
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_XRCD,
- &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object), 0,
- uverbs_free_xrcd));
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_XRCD,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object),
+ uverbs_free_xrcd));
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_PD,
- /* 2 is used in order to free the PD after MRs */
- &UVERBS_TYPE_ALLOC_IDR(2, uverbs_free_pd));
-
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_DEVICE, NULL);
-
-static DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
- &UVERBS_OBJECT(UVERBS_OBJECT_DEVICE),
- &UVERBS_OBJECT(UVERBS_OBJECT_PD),
- &UVERBS_OBJECT(UVERBS_OBJECT_MR),
- &UVERBS_OBJECT(UVERBS_OBJECT_COMP_CHANNEL),
- &UVERBS_OBJECT(UVERBS_OBJECT_CQ),
- &UVERBS_OBJECT(UVERBS_OBJECT_QP),
- &UVERBS_OBJECT(UVERBS_OBJECT_AH),
- &UVERBS_OBJECT(UVERBS_OBJECT_MW),
- &UVERBS_OBJECT(UVERBS_OBJECT_SRQ),
- &UVERBS_OBJECT(UVERBS_OBJECT_FLOW),
- &UVERBS_OBJECT(UVERBS_OBJECT_WQ),
- &UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL),
- &UVERBS_OBJECT(UVERBS_OBJECT_XRCD),
- &UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION),
- &UVERBS_OBJECT(UVERBS_OBJECT_DM),
- &UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS));
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_pd));
+
+DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE);
+
+DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
+ &UVERBS_OBJECT(UVERBS_OBJECT_DEVICE),
+ &UVERBS_OBJECT(UVERBS_OBJECT_PD),
+ &UVERBS_OBJECT(UVERBS_OBJECT_MR),
+ &UVERBS_OBJECT(UVERBS_OBJECT_COMP_CHANNEL),
+ &UVERBS_OBJECT(UVERBS_OBJECT_CQ),
+ &UVERBS_OBJECT(UVERBS_OBJECT_QP),
+ &UVERBS_OBJECT(UVERBS_OBJECT_AH),
+ &UVERBS_OBJECT(UVERBS_OBJECT_MW),
+ &UVERBS_OBJECT(UVERBS_OBJECT_SRQ),
+ &UVERBS_OBJECT(UVERBS_OBJECT_FLOW),
+ &UVERBS_OBJECT(UVERBS_OBJECT_WQ),
+ &UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL),
+ &UVERBS_OBJECT(UVERBS_OBJECT_XRCD),
+ &UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION),
+ &UVERBS_OBJECT(UVERBS_OBJECT_DM),
+ &UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS));
const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
{
return &uverbs_default_objects;
}
-EXPORT_SYMBOL_GPL(uverbs_default_get_objects);
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
index 03b182a684a6..a0ffdcf9a51c 100644
--- a/drivers/infiniband/core/uverbs_std_types_counters.c
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -38,20 +38,22 @@ static int uverbs_free_counters(struct ib_uobject *uobject,
enum rdma_remove_reason why)
{
struct ib_counters *counters = uobject->object;
+ int ret;
- if (why == RDMA_REMOVE_DESTROY &&
- atomic_read(&counters->usecnt))
- return -EBUSY;
+ ret = ib_destroy_usecnt(&counters->usecnt, why, uobject);
+ if (ret)
+ return ret;
return counters->device->destroy_counters(counters);
}
-static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE);
+ struct ib_device *ib_dev = uobj->context->device;
struct ib_counters *counters;
- struct ib_uobject *uobj;
int ret;
/*
@@ -62,7 +64,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(struct ib_device *ib_de
if (!ib_dev->create_counters)
return -EOPNOTSUPP;
- uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE);
counters = ib_dev->create_counters(ib_dev, attrs);
if (IS_ERR(counters)) {
ret = PTR_ERR(counters);
@@ -80,9 +81,8 @@ err_create_counters:
return ret;
}
-static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
struct ib_counters_read_attr read_attr = {};
const struct uverbs_attr *uattr;
@@ -90,68 +90,62 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(struct ib_device *ib_dev,
uverbs_attr_get_obj(attrs, UVERBS_ATTR_READ_COUNTERS_HANDLE);
int ret;
- if (!ib_dev->read_counters)
+ if (!counters->device->read_counters)
return -EOPNOTSUPP;
if (!atomic_read(&counters->usecnt))
return -EINVAL;
- ret = uverbs_copy_from(&read_attr.flags, attrs,
- UVERBS_ATTR_READ_COUNTERS_FLAGS);
+ ret = uverbs_get_flags32(&read_attr.flags, attrs,
+ UVERBS_ATTR_READ_COUNTERS_FLAGS,
+ IB_UVERBS_READ_COUNTERS_PREFER_CACHED);
if (ret)
return ret;
uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF);
read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64);
- read_attr.counters_buff = kcalloc(read_attr.ncounters,
- sizeof(u64), GFP_KERNEL);
- if (!read_attr.counters_buff)
- return -ENOMEM;
-
- ret = ib_dev->read_counters(counters,
- &read_attr,
- attrs);
- if (ret)
- goto err_read;
+ read_attr.counters_buff = uverbs_zalloc(
+ attrs, array_size(read_attr.ncounters, sizeof(u64)));
+ if (IS_ERR(read_attr.counters_buff))
+ return PTR_ERR(read_attr.counters_buff);
- ret = uverbs_copy_to(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF,
- read_attr.counters_buff,
- read_attr.ncounters * sizeof(u64));
+ ret = counters->device->read_counters(counters, &read_attr, attrs);
+ if (ret)
+ return ret;
-err_read:
- kfree(read_attr.counters_buff);
- return ret;
+ return uverbs_copy_to(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF,
+ read_attr.counters_buff,
+ read_attr.ncounters * sizeof(u64));
}
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_COUNTERS_CREATE,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
- UVERBS_OBJECT_COUNTERS,
- UVERBS_ACCESS_NEW,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_COUNTERS_DESTROY,
- uverbs_destroy_def_handler,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_COUNTERS_HANDLE,
- UVERBS_OBJECT_COUNTERS,
- UVERBS_ACCESS_DESTROY,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-#define MAX_COUNTERS_BUFF_SIZE USHRT_MAX
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_COUNTERS_READ,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_READ_COUNTERS_HANDLE,
- UVERBS_OBJECT_COUNTERS,
- UVERBS_ACCESS_READ,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_READ_COUNTERS_BUFF,
- UVERBS_ATTR_SIZE(0, MAX_COUNTERS_BUFF_SIZE),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_READ_COUNTERS_FLAGS,
- UVERBS_ATTR_TYPE(__u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_COUNTERS_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_COUNTERS_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_COUNTERS_READ,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_READ_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_READ_COUNTERS_BUFF,
+ UVERBS_ATTR_MIN_SIZE(0),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_READ_COUNTERS_FLAGS,
+ enum ib_uverbs_read_counters_flags));
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COUNTERS,
- &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_counters),
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_counters),
&UVERBS_METHOD(UVERBS_METHOD_COUNTERS_CREATE),
&UVERBS_METHOD(UVERBS_METHOD_COUNTERS_DESTROY),
&UVERBS_METHOD(UVERBS_METHOD_COUNTERS_READ));
-
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index 3d293d01afea..5b5f2052cd52 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -44,21 +44,26 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
int ret;
ret = ib_destroy_cq(cq);
- if (!ret || why != RDMA_REMOVE_DESTROY)
- ib_uverbs_release_ucq(uobject->context->ufile, ev_queue ?
- container_of(ev_queue,
- struct ib_uverbs_completion_event_file,
- ev_queue) : NULL,
- ucq);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ ib_uverbs_release_ucq(
+ uobject->context->ufile,
+ ev_queue ? container_of(ev_queue,
+ struct ib_uverbs_completion_event_file,
+ ev_queue) :
+ NULL,
+ ucq);
return ret;
}
-static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
- struct ib_ucontext *ucontext = file->ucontext;
- struct ib_ucq_object *obj;
+ struct ib_ucq_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE),
+ typeof(*obj), uobject);
+ struct ib_device *ib_dev = obj->uobject.context->device;
struct ib_udata uhw;
int ret;
u64 user_handle;
@@ -67,7 +72,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
struct ib_uverbs_completion_event_file *ev_file = NULL;
struct ib_uobject *ev_file_uobj;
- if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_CREATE_CQ))
+ if (!ib_dev->create_cq || !ib_dev->destroy_cq)
return -EOPNOTSUPP;
ret = uverbs_copy_from(&attr.comp_vector, attrs,
@@ -81,28 +86,26 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
if (ret)
return ret;
- /* Optional param, if it doesn't exist, we get -ENOENT and skip it */
- if (IS_UVERBS_COPY_ERR(uverbs_copy_from(&attr.flags, attrs,
- UVERBS_ATTR_CREATE_CQ_FLAGS)))
- return -EFAULT;
+ ret = uverbs_get_flags32(&attr.flags, attrs,
+ UVERBS_ATTR_CREATE_CQ_FLAGS,
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION |
+ IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN);
+ if (ret)
+ return ret;
ev_file_uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL);
if (!IS_ERR(ev_file_uobj)) {
ev_file = container_of(ev_file_uobj,
struct ib_uverbs_completion_event_file,
- uobj_file.uobj);
+ uobj);
uverbs_uobject_get(ev_file_uobj);
}
- if (attr.comp_vector >= ucontext->ufile->device->num_comp_vectors) {
+ if (attr.comp_vector >= file->device->num_comp_vectors) {
ret = -EINVAL;
goto err_event_file;
}
- obj = container_of(uverbs_attr_get_uobject(attrs,
- UVERBS_ATTR_CREATE_CQ_HANDLE),
- typeof(*obj), uobject);
- obj->uverbs_file = ucontext->ufile;
obj->comp_events_reported = 0;
obj->async_events_reported = 0;
INIT_LIST_HEAD(&obj->comp_list);
@@ -111,7 +114,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
/* Temporary, only until drivers get the new uverbs_attr_bundle */
create_udata(attrs, &uhw);
- cq = ib_dev->create_cq(ib_dev, &attr, ucontext, &uhw);
+ cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context, &uhw);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
goto err_event_file;
@@ -143,69 +146,64 @@ err_event_file:
return ret;
};
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_CQ_CREATE,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_CQ_HANDLE, UVERBS_OBJECT_CQ,
- UVERBS_ACCESS_NEW,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_CQE,
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_CQ_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_CQE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL,
+ UVERBS_OBJECT_COMP_CHANNEL,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_COMP_VECTOR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_CQ_FLAGS,
+ enum ib_uverbs_ex_create_cq_flags),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_CQ_RESP_CQE,
UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_USER_HANDLE,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL,
- UVERBS_OBJECT_COMP_CHANNEL,
- UVERBS_ACCESS_READ),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_COMP_VECTOR, UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_FLAGS, UVERBS_ATTR_TYPE(u32)),
- &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_CQ_RESP_CQE, UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &uverbs_uhw_compat_in, &uverbs_uhw_compat_out);
-
-static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+ UA_MANDATORY),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj =
uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE);
- struct ib_uverbs_destroy_cq_resp resp;
- struct ib_ucq_object *obj;
- int ret;
-
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- obj = container_of(uobj, struct ib_ucq_object, uobject);
-
- if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_DESTROY_CQ))
- return -EOPNOTSUPP;
-
- ret = rdma_explicit_destroy(uobj);
- if (ret)
- return ret;
-
- resp.comp_events_reported = obj->comp_events_reported;
- resp.async_events_reported = obj->async_events_reported;
+ struct ib_ucq_object *obj =
+ container_of(uobj, struct ib_ucq_object, uobject);
+ struct ib_uverbs_destroy_cq_resp resp = {
+ .comp_events_reported = obj->comp_events_reported,
+ .async_events_reported = obj->async_events_reported
+ };
return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_CQ_RESP, &resp,
sizeof(resp));
}
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_CQ_DESTROY,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_CQ_HANDLE, UVERBS_OBJECT_CQ,
- UVERBS_ACCESS_DESTROY,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_CQ_RESP,
- UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_cq_resp),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_CQ,
- &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), 0,
- uverbs_free_cq),
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_CQ_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_CQ_RESP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_cq_resp),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_CQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), uverbs_free_cq),
+
#if IS_ENABLED(CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI)
- &UVERBS_METHOD(UVERBS_METHOD_CQ_CREATE),
- &UVERBS_METHOD(UVERBS_METHOD_CQ_DESTROY)
+ &UVERBS_METHOD(UVERBS_METHOD_CQ_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_CQ_DESTROY)
#endif
- );
-
+);
diff --git a/drivers/infiniband/core/uverbs_std_types_dm.c b/drivers/infiniband/core/uverbs_std_types_dm.c
index 8b681575b615..edc3ff7733d4 100644
--- a/drivers/infiniband/core/uverbs_std_types_dm.c
+++ b/drivers/infiniband/core/uverbs_std_types_dm.c
@@ -37,20 +37,24 @@ static int uverbs_free_dm(struct ib_uobject *uobject,
enum rdma_remove_reason why)
{
struct ib_dm *dm = uobject->object;
+ int ret;
- if (why == RDMA_REMOVE_DESTROY && atomic_read(&dm->usecnt))
- return -EBUSY;
+ ret = ib_destroy_usecnt(&dm->usecnt, why, uobject);
+ if (ret)
+ return ret;
return dm->device->dealloc_dm(dm);
}
-static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int
+UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
{
- struct ib_ucontext *ucontext = file->ucontext;
struct ib_dm_alloc_attr attr = {};
- struct ib_uobject *uobj;
+ struct ib_uobject *uobj =
+ uverbs_attr_get(attrs, UVERBS_ATTR_ALLOC_DM_HANDLE)
+ ->obj_attr.uobject;
+ struct ib_device *ib_dev = uobj->context->device;
struct ib_dm *dm;
int ret;
@@ -67,9 +71,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(struct ib_device *ib_dev,
if (ret)
return ret;
- uobj = uverbs_attr_get(attrs, UVERBS_ATTR_ALLOC_DM_HANDLE)->obj_attr.uobject;
-
- dm = ib_dev->alloc_dm(ib_dev, ucontext, &attr, attrs);
+ dm = ib_dev->alloc_dm(ib_dev, uobj->context, &attr, attrs);
if (IS_ERR(dm))
return PTR_ERR(dm);
@@ -83,26 +85,27 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(struct ib_device *ib_dev,
return 0;
}
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_DM_ALLOC,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_ALLOC_DM_HANDLE, UVERBS_OBJECT_DM,
- UVERBS_ACCESS_NEW,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_LENGTH,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_ALIGNMENT,
- UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_DM_FREE,
- uverbs_destroy_def_handler,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_FREE_DM_HANDLE,
- UVERBS_OBJECT_DM,
- UVERBS_ACCESS_DESTROY,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_DM_ALLOC,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_ALLOC_DM_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_ALIGNMENT,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_DM_FREE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_FREE_DM_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_DM,
- /* 1 is used in order to free the DM after MRs */
- &UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_dm),
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_dm),
&UVERBS_METHOD(UVERBS_METHOD_DM_ALLOC),
&UVERBS_METHOD(UVERBS_METHOD_DM_FREE));
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index a7be51cf2e42..d8cfafe23bd9 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -37,10 +37,11 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject,
enum rdma_remove_reason why)
{
struct ib_flow_action *action = uobject->object;
+ int ret;
- if (why == RDMA_REMOVE_DESTROY &&
- atomic_read(&action->usecnt))
- return -EBUSY;
+ ret = ib_destroy_usecnt(&action->usecnt, why, uobject);
+ if (ret)
+ return ret;
return action->device->destroy_flow_action(action);
}
@@ -303,12 +304,13 @@ static int parse_flow_action_esp(struct ib_device *ib_dev,
return 0;
}
-static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE);
+ struct ib_device *ib_dev = uobj->context->device;
int ret;
- struct ib_uobject *uobj;
struct ib_flow_action *action;
struct ib_flow_action_esp_attr esp_attr = {};
@@ -320,7 +322,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device
return ret;
/* No need to check as this attribute is marked as MANDATORY */
- uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE);
action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs);
if (IS_ERR(action))
return PTR_ERR(action);
@@ -334,102 +335,109 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device
return 0;
}
-static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE);
+ struct ib_flow_action *action = uobj->object;
int ret;
- struct ib_uobject *uobj;
- struct ib_flow_action *action;
struct ib_flow_action_esp_attr esp_attr = {};
- if (!ib_dev->modify_flow_action_esp)
+ if (!action->device->modify_flow_action_esp)
return -EOPNOTSUPP;
- ret = parse_flow_action_esp(ib_dev, file, attrs, &esp_attr, true);
+ ret = parse_flow_action_esp(action->device, file, attrs, &esp_attr,
+ true);
if (ret)
return ret;
- uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE);
- action = uobj->object;
-
if (action->type != IB_FLOW_ACTION_ESP)
return -EINVAL;
- return ib_dev->modify_flow_action_esp(action,
- &esp_attr.hdr,
- attrs);
+ return action->device->modify_flow_action_esp(action, &esp_attr.hdr,
+ attrs);
}
static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
[IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = {
- { .ptr = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_keymat_aes_gcm),
- .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
- } },
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_STRUCT(
+ struct ib_uverbs_flow_action_esp_keymat_aes_gcm,
+ aes_key),
},
};
static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = {
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = {
- { .ptr = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- /* No need to specify any data */
- .len = 0,
- } }
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
},
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = {
- { .ptr = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp, size),
- .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
- } }
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp,
+ size),
},
};
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE, UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_ACCESS_NEW,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp, hard_limit_pkts),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY |
- UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN, UVERBS_ATTR_TYPE(__u32)),
- &UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
- uverbs_flow_action_esp_keymat,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
- uverbs_flow_action_esp_replay),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_encap, type)));
-
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE, UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_ACCESS_WRITE,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp, hard_limit_pkts),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN, UVERBS_ATTR_TYPE(__u32)),
- &UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
- uverbs_flow_action_esp_keymat),
- &UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
- uverbs_flow_action_esp_replay),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_encap, type)));
-
-static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_FLOW_ACTION_DESTROY,
- uverbs_destroy_def_handler,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
- UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_ACCESS_DESTROY,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_FLOW_ACTION,
- &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow_action),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY));
-
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
+ hard_limit_pkts),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
+ UVERBS_ATTR_TYPE(__u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
+ uverbs_flow_action_esp_keymat,
+ UA_MANDATORY),
+ UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
+ uverbs_flow_action_esp_replay,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(
+ UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_WRITE,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
+ hard_limit_pkts),
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
+ UVERBS_ATTR_TYPE(__u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
+ uverbs_flow_action_esp_keymat,
+ UA_OPTIONAL),
+ UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
+ uverbs_flow_action_esp_replay,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(
+ UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_FLOW_ACTION_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action),
+ &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY),
+ &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY));
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index 68f7cadf088f..cf02e774303e 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -39,14 +39,18 @@ static int uverbs_free_mr(struct ib_uobject *uobject,
return ib_dereg_mr((struct ib_mr *)uobject->object);
}
-static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
struct ib_dm_mr_attr attr = {};
- struct ib_uobject *uobj;
- struct ib_dm *dm;
- struct ib_pd *pd;
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE);
+ struct ib_dm *dm =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_DM_HANDLE);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_PD_HANDLE);
+ struct ib_device *ib_dev = pd->device;
+
struct ib_mr *mr;
int ret;
@@ -62,8 +66,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(struct ib_device *ib_dev,
if (ret)
return ret;
- ret = uverbs_copy_from(&attr.access_flags, attrs,
- UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS);
+ ret = uverbs_get_flags32(&attr.access_flags, attrs,
+ UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
+ IB_ACCESS_SUPPORTED);
if (ret)
return ret;
@@ -74,12 +79,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(struct ib_device *ib_dev,
if (ret)
return ret;
- pd = uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_PD_HANDLE);
-
- dm = uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_DM_HANDLE);
-
- uobj = uverbs_attr_get(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE)->obj_attr.uobject;
-
if (attr.offset > dm->length || attr.length > dm->length ||
attr.length > dm->length - attr.offset)
return -EINVAL;
@@ -115,33 +114,36 @@ err_dereg:
return ret;
}
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_DM_MR_REG,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_HANDLE, UVERBS_OBJECT_MR,
- UVERBS_ACCESS_NEW,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_OFFSET,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_LENGTH,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_PD_HANDLE, UVERBS_OBJECT_PD,
- UVERBS_ACCESS_READ,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_DM_MR_REG,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_HANDLE,
+ UVERBS_OBJECT_MR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
+ enum ib_access_flags),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_DM_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_LKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_DM_HANDLE, UVERBS_OBJECT_DM,
- UVERBS_ACCESS_READ,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_LKEY,
- UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
- UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MR,
- /* 1 is used in order to free the MR after all the MWs */
- &UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_mr),
- &UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG));
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_MR,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_mr),
+ &UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG));
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
new file mode 100644
index 000000000000..73ea6f0db88f
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ */
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/rdma_user_ioctl.h>
+#include <linux/bitops.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static void *uapi_add_elm(struct uverbs_api *uapi, u32 key, size_t alloc_size)
+{
+ void *elm;
+ int rc;
+
+ if (key == UVERBS_API_KEY_ERR)
+ return ERR_PTR(-EOVERFLOW);
+
+ elm = kzalloc(alloc_size, GFP_KERNEL);
+ rc = radix_tree_insert(&uapi->radix, key, elm);
+ if (rc) {
+ kfree(elm);
+ return ERR_PTR(rc);
+ }
+
+ return elm;
+}
+
+static int uapi_merge_method(struct uverbs_api *uapi,
+ struct uverbs_api_object *obj_elm, u32 obj_key,
+ const struct uverbs_method_def *method,
+ bool is_driver)
+{
+ u32 method_key = obj_key | uapi_key_ioctl_method(method->id);
+ struct uverbs_api_ioctl_method *method_elm;
+ unsigned int i;
+
+ if (!method->attrs)
+ return 0;
+
+ method_elm = uapi_add_elm(uapi, method_key, sizeof(*method_elm));
+ if (IS_ERR(method_elm)) {
+ if (method_elm != ERR_PTR(-EEXIST))
+ return PTR_ERR(method_elm);
+
+ /*
+ * This occurs when a driver uses ADD_UVERBS_ATTRIBUTES_SIMPLE
+ */
+ if (WARN_ON(method->handler))
+ return -EINVAL;
+ method_elm = radix_tree_lookup(&uapi->radix, method_key);
+ if (WARN_ON(!method_elm))
+ return -EINVAL;
+ } else {
+ WARN_ON(!method->handler);
+ rcu_assign_pointer(method_elm->handler, method->handler);
+ if (method->handler != uverbs_destroy_def_handler)
+ method_elm->driver_method = is_driver;
+ }
+
+ for (i = 0; i != method->num_attrs; i++) {
+ const struct uverbs_attr_def *attr = (*method->attrs)[i];
+ struct uverbs_api_attr *attr_slot;
+
+ if (!attr)
+ continue;
+
+ /*
+ * ENUM_IN contains the 'ids' pointer to the driver's .rodata,
+ * so if it is specified by a driver then it always makes this
+ * into a driver method.
+ */
+ if (attr->attr.type == UVERBS_ATTR_TYPE_ENUM_IN)
+ method_elm->driver_method |= is_driver;
+
+ attr_slot =
+ uapi_add_elm(uapi, method_key | uapi_key_attr(attr->id),
+ sizeof(*attr_slot));
+ /* Attributes are not allowed to be modified by drivers */
+ if (IS_ERR(attr_slot))
+ return PTR_ERR(attr_slot);
+
+ attr_slot->spec = attr->attr;
+ }
+
+ return 0;
+}
+
+static int uapi_merge_tree(struct uverbs_api *uapi,
+ const struct uverbs_object_tree_def *tree,
+ bool is_driver)
+{
+ unsigned int i, j;
+ int rc;
+
+ if (!tree->objects)
+ return 0;
+
+ for (i = 0; i != tree->num_objects; i++) {
+ const struct uverbs_object_def *obj = (*tree->objects)[i];
+ struct uverbs_api_object *obj_elm;
+ u32 obj_key;
+
+ if (!obj)
+ continue;
+
+ obj_key = uapi_key_obj(obj->id);
+ obj_elm = uapi_add_elm(uapi, obj_key, sizeof(*obj_elm));
+ if (IS_ERR(obj_elm)) {
+ if (obj_elm != ERR_PTR(-EEXIST))
+ return PTR_ERR(obj_elm);
+
+ /* This occurs when a driver uses ADD_UVERBS_METHODS */
+ if (WARN_ON(obj->type_attrs))
+ return -EINVAL;
+ obj_elm = radix_tree_lookup(&uapi->radix, obj_key);
+ if (WARN_ON(!obj_elm))
+ return -EINVAL;
+ } else {
+ obj_elm->type_attrs = obj->type_attrs;
+ if (obj->type_attrs) {
+ obj_elm->type_class =
+ obj->type_attrs->type_class;
+ /*
+ * Today drivers are only permitted to use
+ * idr_class types. They cannot use FD types
+ * because we currently have no way to revoke
+ * the fops pointer after device
+ * disassociation.
+ */
+ if (WARN_ON(is_driver &&
+ obj->type_attrs->type_class !=
+ &uverbs_idr_class))
+ return -EINVAL;
+ }
+ }
+
+ if (!obj->methods)
+ continue;
+
+ for (j = 0; j != obj->num_methods; j++) {
+ const struct uverbs_method_def *method =
+ (*obj->methods)[j];
+ if (!method)
+ continue;
+
+ rc = uapi_merge_method(uapi, obj_elm, obj_key, method,
+ is_driver);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int
+uapi_finalize_ioctl_method(struct uverbs_api *uapi,
+ struct uverbs_api_ioctl_method *method_elm,
+ u32 method_key)
+{
+ struct radix_tree_iter iter;
+ unsigned int num_attrs = 0;
+ unsigned int max_bkey = 0;
+ bool single_uobj = false;
+ void __rcu **slot;
+
+ method_elm->destroy_bkey = UVERBS_API_ATTR_BKEY_LEN;
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter,
+ uapi_key_attrs_start(method_key)) {
+ struct uverbs_api_attr *elm =
+ rcu_dereference_protected(*slot, true);
+ u32 attr_key = iter.index & UVERBS_API_ATTR_KEY_MASK;
+ u32 attr_bkey = uapi_bkey_attr(attr_key);
+ u8 type = elm->spec.type;
+
+ if (uapi_key_attr_to_method(iter.index) !=
+ uapi_key_attr_to_method(method_key))
+ break;
+
+ if (elm->spec.mandatory)
+ __set_bit(attr_bkey, method_elm->attr_mandatory);
+
+ if (type == UVERBS_ATTR_TYPE_IDR ||
+ type == UVERBS_ATTR_TYPE_FD) {
+ u8 access = elm->spec.u.obj.access;
+
+ /*
+ * Verbs specs may only have one NEW/DESTROY, we don't
+ * have the infrastructure to abort multiple NEW's or
+ * cope with multiple DESTROY failure.
+ */
+ if (access == UVERBS_ACCESS_NEW ||
+ access == UVERBS_ACCESS_DESTROY) {
+ if (WARN_ON(single_uobj))
+ return -EINVAL;
+
+ single_uobj = true;
+ if (WARN_ON(!elm->spec.mandatory))
+ return -EINVAL;
+ }
+
+ if (access == UVERBS_ACCESS_DESTROY)
+ method_elm->destroy_bkey = attr_bkey;
+ }
+
+ max_bkey = max(max_bkey, attr_bkey);
+ num_attrs++;
+ }
+
+ method_elm->key_bitmap_len = max_bkey + 1;
+ WARN_ON(method_elm->key_bitmap_len > UVERBS_API_ATTR_BKEY_LEN);
+
+ uapi_compute_bundle_size(method_elm, num_attrs);
+ return 0;
+}
+
+static int uapi_finalize(struct uverbs_api *uapi)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ int rc;
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
+ struct uverbs_api_ioctl_method *method_elm =
+ rcu_dereference_protected(*slot, true);
+
+ if (uapi_key_is_ioctl_method(iter.index)) {
+ rc = uapi_finalize_ioctl_method(uapi, method_elm,
+ iter.index);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+void uverbs_destroy_api(struct uverbs_api *uapi)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ if (!uapi)
+ return;
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
+ kfree(rcu_dereference_protected(*slot, true));
+ radix_tree_iter_delete(&uapi->radix, &iter, slot);
+ }
+}
+
+struct uverbs_api *uverbs_alloc_api(
+ const struct uverbs_object_tree_def *const *driver_specs,
+ enum rdma_driver_id driver_id)
+{
+ struct uverbs_api *uapi;
+ int rc;
+
+ uapi = kzalloc(sizeof(*uapi), GFP_KERNEL);
+ if (!uapi)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_RADIX_TREE(&uapi->radix, GFP_KERNEL);
+ uapi->driver_id = driver_id;
+
+ rc = uapi_merge_tree(uapi, uverbs_default_get_objects(), false);
+ if (rc)
+ goto err;
+
+ for (; driver_specs && *driver_specs; driver_specs++) {
+ rc = uapi_merge_tree(uapi, *driver_specs, true);
+ if (rc)
+ goto err;
+ }
+
+ rc = uapi_finalize(uapi);
+ if (rc)
+ goto err;
+
+ return uapi;
+err:
+ if (rc != -ENOMEM)
+ pr_err("Setup of uverbs_api failed, kernel parsing tree description is not valid (%d)??\n",
+ rc);
+
+ uverbs_destroy_api(uapi);
+ return ERR_PTR(rc);
+}
+
+/*
+ * The pre version is done before destroying the HW objects, it only blocks
+ * off method access. All methods that require the ib_dev or the module data
+ * must test one of these assignments prior to continuing.
+ */
+void uverbs_disassociate_api_pre(struct ib_uverbs_device *uverbs_dev)
+{
+ struct uverbs_api *uapi = uverbs_dev->uapi;
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ rcu_assign_pointer(uverbs_dev->ib_dev, NULL);
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
+ if (uapi_key_is_ioctl_method(iter.index)) {
+ struct uverbs_api_ioctl_method *method_elm =
+ rcu_dereference_protected(*slot, true);
+
+ if (method_elm->driver_method)
+ rcu_assign_pointer(method_elm->handler, NULL);
+ }
+ }
+
+ synchronize_srcu(&uverbs_dev->disassociate_srcu);
+}
+
+/*
+ * Called when a driver disassociates from the ib_uverbs_device. The
+ * assumption is that the driver module will unload after. Replace everything
+ * related to the driver with NULL as a safety measure.
+ */
+void uverbs_disassociate_api(struct uverbs_api *uapi)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
+ if (uapi_key_is_object(iter.index)) {
+ struct uverbs_api_object *object_elm =
+ rcu_dereference_protected(*slot, true);
+
+ /*
+ * Some type_attrs are in the driver module. We don't
+ * bother to keep track of which since there should be
+ * no use of this after disassociate.
+ */
+ object_elm->type_attrs = NULL;
+ } else if (uapi_key_is_attr(iter.index)) {
+ struct uverbs_api_attr *elm =
+ rcu_dereference_protected(*slot, true);
+
+ if (elm->spec.type == UVERBS_ATTR_TYPE_ENUM_IN)
+ elm->spec.u2.enum_def.ids = NULL;
+ }
+ }
+}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 9d6beb948535..6ee03d6089eb 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -326,12 +326,162 @@ EXPORT_SYMBOL(ib_dealloc_pd);
/* Address handles */
+/**
+ * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
+ * @dest: Pointer to destination ah_attr. Contents of the destination
+ * pointer is assumed to be invalid and attribute are overwritten.
+ * @src: Pointer to source ah_attr.
+ */
+void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
+ const struct rdma_ah_attr *src)
+{
+ *dest = *src;
+ if (dest->grh.sgid_attr)
+ rdma_hold_gid_attr(dest->grh.sgid_attr);
+}
+EXPORT_SYMBOL(rdma_copy_ah_attr);
+
+/**
+ * rdma_replace_ah_attr - Replace valid ah_attr with new new one.
+ * @old: Pointer to existing ah_attr which needs to be replaced.
+ * old is assumed to be valid or zero'd
+ * @new: Pointer to the new ah_attr.
+ *
+ * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
+ * old the ah_attr is valid; after that it copies the new attribute and holds
+ * the reference to the replaced ah_attr.
+ */
+void rdma_replace_ah_attr(struct rdma_ah_attr *old,
+ const struct rdma_ah_attr *new)
+{
+ rdma_destroy_ah_attr(old);
+ *old = *new;
+ if (old->grh.sgid_attr)
+ rdma_hold_gid_attr(old->grh.sgid_attr);
+}
+EXPORT_SYMBOL(rdma_replace_ah_attr);
+
+/**
+ * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
+ * @dest: Pointer to destination ah_attr to copy to.
+ * dest is assumed to be valid or zero'd
+ * @src: Pointer to the new ah_attr.
+ *
+ * rdma_move_ah_attr() first releases any reference in the destination ah_attr
+ * if it is valid. This also transfers ownership of internal references from
+ * src to dest, making src invalid in the process. No new reference of the src
+ * ah_attr is taken.
+ */
+void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
+{
+ rdma_destroy_ah_attr(dest);
+ *dest = *src;
+ src->grh.sgid_attr = NULL;
+}
+EXPORT_SYMBOL(rdma_move_ah_attr);
+
+/*
+ * Validate that the rdma_ah_attr is valid for the device before passing it
+ * off to the driver.
+ */
+static int rdma_check_ah_attr(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr)
+{
+ if (!rdma_is_port_valid(device, ah_attr->port_num))
+ return -EINVAL;
+
+ if ((rdma_is_grh_required(device, ah_attr->port_num) ||
+ ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
+ !(ah_attr->ah_flags & IB_AH_GRH))
+ return -EINVAL;
+
+ if (ah_attr->grh.sgid_attr) {
+ /*
+ * Make sure the passed sgid_attr is consistent with the
+ * parameters
+ */
+ if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
+ ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
+ * On success the caller is responsible to call rdma_unfill_sgid_attr().
+ */
+static int rdma_fill_sgid_attr(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr **old_sgid_attr)
+{
+ const struct ib_gid_attr *sgid_attr;
+ struct ib_global_route *grh;
+ int ret;
+
+ *old_sgid_attr = ah_attr->grh.sgid_attr;
+
+ ret = rdma_check_ah_attr(device, ah_attr);
+ if (ret)
+ return ret;
+
+ if (!(ah_attr->ah_flags & IB_AH_GRH))
+ return 0;
+
+ grh = rdma_ah_retrieve_grh(ah_attr);
+ if (grh->sgid_attr)
+ return 0;
+
+ sgid_attr =
+ rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
+
+ /* Move ownerhip of the kref into the ah_attr */
+ grh->sgid_attr = sgid_attr;
+ return 0;
+}
+
+static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *old_sgid_attr)
+{
+ /*
+ * Fill didn't change anything, the caller retains ownership of
+ * whatever it passed
+ */
+ if (ah_attr->grh.sgid_attr == old_sgid_attr)
+ return;
+
+ /*
+ * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
+ * doesn't see any change in the rdma_ah_attr. If we get here
+ * old_sgid_attr is NULL.
+ */
+ rdma_destroy_ah_attr(ah_attr);
+}
+
+static const struct ib_gid_attr *
+rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *old_attr)
+{
+ if (old_attr)
+ rdma_put_gid_attr(old_attr);
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
+ return ah_attr->grh.sgid_attr;
+ }
+ return NULL;
+}
+
static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr,
struct ib_udata *udata)
{
struct ib_ah *ah;
+ if (!pd->device->create_ah)
+ return ERR_PTR(-EOPNOTSUPP);
+
ah = pd->device->create_ah(pd, ah_attr, udata);
if (!IS_ERR(ah)) {
@@ -339,15 +489,38 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
ah->pd = pd;
ah->uobject = NULL;
ah->type = ah_attr->type;
+ ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
+
atomic_inc(&pd->usecnt);
}
return ah;
}
+/**
+ * rdma_create_ah - Creates an address handle for the
+ * given address vector.
+ * @pd: The protection domain associated with the address handle.
+ * @ah_attr: The attributes of the address vector.
+ *
+ * It returns 0 on success and returns appropriate error code on error.
+ * The address handle is used to reference a local or global destination
+ * in all UD QP post sends.
+ */
struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
{
- return _rdma_create_ah(pd, ah_attr, NULL);
+ const struct ib_gid_attr *old_sgid_attr;
+ struct ib_ah *ah;
+ int ret;
+
+ ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ah = _rdma_create_ah(pd, ah_attr, NULL);
+
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
+ return ah;
}
EXPORT_SYMBOL(rdma_create_ah);
@@ -368,15 +541,27 @@ struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr,
struct ib_udata *udata)
{
+ const struct ib_gid_attr *old_sgid_attr;
+ struct ib_ah *ah;
int err;
+ err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
+ if (err)
+ return ERR_PTR(err);
+
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
err = ib_resolve_eth_dmac(pd->device, ah_attr);
- if (err)
- return ERR_PTR(err);
+ if (err) {
+ ah = ERR_PTR(err);
+ goto out;
+ }
}
- return _rdma_create_ah(pd, ah_attr, udata);
+ ah = _rdma_create_ah(pd, ah_attr, udata);
+
+out:
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
+ return ah;
}
EXPORT_SYMBOL(rdma_create_user_ah);
@@ -455,16 +640,16 @@ static bool find_gid_index(const union ib_gid *gid,
return true;
}
-static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
- u16 vlan_id, const union ib_gid *sgid,
- enum ib_gid_type gid_type,
- u16 *gid_index)
+static const struct ib_gid_attr *
+get_sgid_attr_from_eth(struct ib_device *device, u8 port_num,
+ u16 vlan_id, const union ib_gid *sgid,
+ enum ib_gid_type gid_type)
{
struct find_gid_index_context context = {.vlan_id = vlan_id,
.gid_type = gid_type};
- return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
- &context, gid_index);
+ return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
+ &context);
}
int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
@@ -508,39 +693,24 @@ EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
struct rdma_ah_attr *ah_attr)
{
- struct ib_gid_attr sgid_attr;
- struct ib_global_route *grh;
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
+ const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
int hop_limit = 0xff;
- union ib_gid sgid;
- int ret;
-
- grh = rdma_ah_retrieve_grh(ah_attr);
-
- ret = ib_query_gid(device,
- rdma_ah_get_port_num(ah_attr),
- grh->sgid_index,
- &sgid, &sgid_attr);
- if (ret || !sgid_attr.ndev) {
- if (!ret)
- ret = -ENXIO;
- return ret;
- }
+ int ret = 0;
/* If destination is link local and source GID is RoCEv1,
* IP stack is not used.
*/
if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
- sgid_attr.gid_type == IB_GID_TYPE_ROCE) {
+ sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
ah_attr->roce.dmac);
- goto done;
+ return ret;
}
- ret = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
+ ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
ah_attr->roce.dmac,
- sgid_attr.ndev, &hop_limit);
-done:
- dev_put(sgid_attr.ndev);
+ sgid_attr->ndev, &hop_limit);
grh->hop_limit = hop_limit;
return ret;
@@ -555,16 +725,18 @@ done:
* as sgid and, sgid is used as dgid because sgid contains destinations
* GID whom to respond to.
*
+ * On success the caller is responsible to call rdma_destroy_ah_attr on the
+ * attr.
*/
int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
struct rdma_ah_attr *ah_attr)
{
u32 flow_class;
- u16 gid_index;
int ret;
enum rdma_network_type net_type = RDMA_NETWORK_IB;
enum ib_gid_type gid_type = IB_GID_TYPE_IB;
+ const struct ib_gid_attr *sgid_attr;
int hoplimit = 0xff;
union ib_gid dgid;
union ib_gid sgid;
@@ -595,72 +767,141 @@ int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
if (!(wc->wc_flags & IB_WC_GRH))
return -EPROTOTYPE;
- ret = get_sgid_index_from_eth(device, port_num,
- vlan_id, &dgid,
- gid_type, &gid_index);
- if (ret)
- return ret;
+ sgid_attr = get_sgid_attr_from_eth(device, port_num,
+ vlan_id, &dgid,
+ gid_type);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
flow_class = be32_to_cpu(grh->version_tclass_flow);
- rdma_ah_set_grh(ah_attr, &sgid,
- flow_class & 0xFFFFF,
- (u8)gid_index, hoplimit,
- (flow_class >> 20) & 0xFF);
- return ib_resolve_unicast_gid_dmac(device, ah_attr);
+ rdma_move_grh_sgid_attr(ah_attr,
+ &sgid,
+ flow_class & 0xFFFFF,
+ hoplimit,
+ (flow_class >> 20) & 0xFF,
+ sgid_attr);
+
+ ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
+ if (ret)
+ rdma_destroy_ah_attr(ah_attr);
+
+ return ret;
} else {
rdma_ah_set_dlid(ah_attr, wc->slid);
rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
- if (wc->wc_flags & IB_WC_GRH) {
- if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
- ret = ib_find_cached_gid_by_port(device, &dgid,
- IB_GID_TYPE_IB,
- port_num, NULL,
- &gid_index);
- if (ret)
- return ret;
- } else {
- gid_index = 0;
- }
+ if ((wc->wc_flags & IB_WC_GRH) == 0)
+ return 0;
+
+ if (dgid.global.interface_id !=
+ cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
+ sgid_attr = rdma_find_gid_by_port(
+ device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
+ } else
+ sgid_attr = rdma_get_gid_attr(device, port_num, 0);
- flow_class = be32_to_cpu(grh->version_tclass_flow);
- rdma_ah_set_grh(ah_attr, &sgid,
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
+ flow_class = be32_to_cpu(grh->version_tclass_flow);
+ rdma_move_grh_sgid_attr(ah_attr,
+ &sgid,
flow_class & 0xFFFFF,
- (u8)gid_index, hoplimit,
- (flow_class >> 20) & 0xFF);
- }
+ hoplimit,
+ (flow_class >> 20) & 0xFF,
+ sgid_attr);
+
return 0;
}
}
EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
+/**
+ * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
+ * of the reference
+ *
+ * @attr: Pointer to AH attribute structure
+ * @dgid: Destination GID
+ * @flow_label: Flow label
+ * @hop_limit: Hop limit
+ * @traffic_class: traffic class
+ * @sgid_attr: Pointer to SGID attribute
+ *
+ * This takes ownership of the sgid_attr reference. The caller must ensure
+ * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
+ * calling this function.
+ */
+void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
+ u32 flow_label, u8 hop_limit, u8 traffic_class,
+ const struct ib_gid_attr *sgid_attr)
+{
+ rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
+ traffic_class);
+ attr->grh.sgid_attr = sgid_attr;
+}
+EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
+
+/**
+ * rdma_destroy_ah_attr - Release reference to SGID attribute of
+ * ah attribute.
+ * @ah_attr: Pointer to ah attribute
+ *
+ * Release reference to the SGID attribute of the ah attribute if it is
+ * non NULL. It is safe to call this multiple times, and safe to call it on
+ * a zero initialized ah_attr.
+ */
+void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
+{
+ if (ah_attr->grh.sgid_attr) {
+ rdma_put_gid_attr(ah_attr->grh.sgid_attr);
+ ah_attr->grh.sgid_attr = NULL;
+ }
+}
+EXPORT_SYMBOL(rdma_destroy_ah_attr);
+
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
const struct ib_grh *grh, u8 port_num)
{
struct rdma_ah_attr ah_attr;
+ struct ib_ah *ah;
int ret;
ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
if (ret)
return ERR_PTR(ret);
- return rdma_create_ah(pd, &ah_attr);
+ ah = rdma_create_ah(pd, &ah_attr);
+
+ rdma_destroy_ah_attr(&ah_attr);
+ return ah;
}
EXPORT_SYMBOL(ib_create_ah_from_wc);
int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
{
+ const struct ib_gid_attr *old_sgid_attr;
+ int ret;
+
if (ah->type != ah_attr->type)
return -EINVAL;
- return ah->device->modify_ah ?
+ ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
+ if (ret)
+ return ret;
+
+ ret = ah->device->modify_ah ?
ah->device->modify_ah(ah, ah_attr) :
-EOPNOTSUPP;
+
+ ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
+ return ret;
}
EXPORT_SYMBOL(rdma_modify_ah);
int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
{
+ ah_attr->grh.sgid_attr = NULL;
+
return ah->device->query_ah ?
ah->device->query_ah(ah, ah_attr) :
-EOPNOTSUPP;
@@ -669,13 +910,17 @@ EXPORT_SYMBOL(rdma_query_ah);
int rdma_destroy_ah(struct ib_ah *ah)
{
+ const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
struct ib_pd *pd;
int ret;
pd = ah->pd;
ret = ah->device->destroy_ah(ah);
- if (!ret)
+ if (!ret) {
atomic_dec(&pd->usecnt);
+ if (sgid_attr)
+ rdma_put_gid_attr(sgid_attr);
+ }
return ret;
}
@@ -1290,16 +1535,19 @@ bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
}
EXPORT_SYMBOL(ib_modify_qp_is_ok);
+/**
+ * ib_resolve_eth_dmac - Resolve destination mac address
+ * @device: Device to consider
+ * @ah_attr: address handle attribute which describes the
+ * source and destination parameters
+ * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
+ * returns 0 on success or appropriate error code. It initializes the
+ * necessary ah_attr fields when call is successful.
+ */
static int ib_resolve_eth_dmac(struct ib_device *device,
struct rdma_ah_attr *ah_attr)
{
- int ret = 0;
- struct ib_global_route *grh;
-
- if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
- return -EINVAL;
-
- grh = rdma_ah_retrieve_grh(ah_attr);
+ int ret = 0;
if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
@@ -1317,6 +1565,14 @@ static int ib_resolve_eth_dmac(struct ib_device *device,
return ret;
}
+static bool is_qp_type_connected(const struct ib_qp *qp)
+{
+ return (qp->qp_type == IB_QPT_UC ||
+ qp->qp_type == IB_QPT_RC ||
+ qp->qp_type == IB_QPT_XRC_INI ||
+ qp->qp_type == IB_QPT_XRC_TGT);
+}
+
/**
* IB core internal function to perform QP attributes modification.
*/
@@ -1324,8 +1580,53 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ const struct ib_gid_attr *old_sgid_attr_av;
+ const struct ib_gid_attr *old_sgid_attr_alt_av;
int ret;
+ if (attr_mask & IB_QP_AV) {
+ ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
+ &old_sgid_attr_av);
+ if (ret)
+ return ret;
+ }
+ if (attr_mask & IB_QP_ALT_PATH) {
+ /*
+ * FIXME: This does not track the migration state, so if the
+ * user loads a new alternate path after the HW has migrated
+ * from primary->alternate we will keep the wrong
+ * references. This is OK for IB because the reference
+ * counting does not serve any functional purpose.
+ */
+ ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
+ &old_sgid_attr_alt_av);
+ if (ret)
+ goto out_av;
+
+ /*
+ * Today the core code can only handle alternate paths and APM
+ * for IB. Ban them in roce mode.
+ */
+ if (!(rdma_protocol_ib(qp->device,
+ attr->alt_ah_attr.port_num) &&
+ rdma_protocol_ib(qp->device, port))) {
+ ret = EINVAL;
+ goto out;
+ }
+ }
+
+ /*
+ * If the user provided the qp_attr then we have to resolve it. Kernel
+ * users have to provide already resolved rdma_ah_attr's
+ */
+ if (udata && (attr_mask & IB_QP_AV) &&
+ attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
+ is_qp_type_connected(qp)) {
+ ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
+ if (ret)
+ goto out;
+ }
+
if (rdma_ib_or_roce(qp->device, port)) {
if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n",
@@ -1341,20 +1642,27 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
}
ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
- if (!ret && (attr_mask & IB_QP_PORT))
- qp->port = attr->port_num;
+ if (ret)
+ goto out;
+ if (attr_mask & IB_QP_PORT)
+ qp->port = attr->port_num;
+ if (attr_mask & IB_QP_AV)
+ qp->av_sgid_attr =
+ rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
+ if (attr_mask & IB_QP_ALT_PATH)
+ qp->alt_path_sgid_attr = rdma_update_sgid_attr(
+ &attr->alt_ah_attr, qp->alt_path_sgid_attr);
+
+out:
+ if (attr_mask & IB_QP_ALT_PATH)
+ rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
+out_av:
+ if (attr_mask & IB_QP_AV)
+ rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
return ret;
}
-static bool is_qp_type_connected(const struct ib_qp *qp)
-{
- return (qp->qp_type == IB_QPT_UC ||
- qp->qp_type == IB_QPT_RC ||
- qp->qp_type == IB_QPT_XRC_INI ||
- qp->qp_type == IB_QPT_XRC_TGT);
-}
-
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
* @ib_qp: The QP to modify.
@@ -1369,17 +1677,7 @@ static bool is_qp_type_connected(const struct ib_qp *qp)
int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
- struct ib_qp *qp = ib_qp->real_qp;
- int ret;
-
- if (attr_mask & IB_QP_AV &&
- attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
- is_qp_type_connected(qp)) {
- ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
- if (ret)
- return ret;
- }
- return _ib_modify_qp(qp, attr, attr_mask, udata);
+ return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
@@ -1451,6 +1749,9 @@ int ib_query_qp(struct ib_qp *qp,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr)
{
+ qp_attr->ah_attr.grh.sgid_attr = NULL;
+ qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
+
return qp->device->query_qp ?
qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
-EOPNOTSUPP;
@@ -1509,6 +1810,8 @@ static int __ib_destroy_shared_qp(struct ib_qp *qp)
int ib_destroy_qp(struct ib_qp *qp)
{
+ const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
+ const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
struct ib_pd *pd;
struct ib_cq *scq, *rcq;
struct ib_srq *srq;
@@ -1539,6 +1842,10 @@ int ib_destroy_qp(struct ib_qp *qp)
rdma_restrack_del(&qp->res);
ret = qp->device->destroy_qp(qp);
if (!ret) {
+ if (alt_path_sgid_attr)
+ rdma_put_gid_attr(alt_path_sgid_attr);
+ if (av_sgid_attr)
+ rdma_put_gid_attr(av_sgid_attr);
if (pd)
atomic_dec(&pd->usecnt);
if (scq)
@@ -1977,35 +2284,6 @@ int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
}
EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
-struct ib_flow *ib_create_flow(struct ib_qp *qp,
- struct ib_flow_attr *flow_attr,
- int domain)
-{
- struct ib_flow *flow_id;
- if (!qp->device->create_flow)
- return ERR_PTR(-EOPNOTSUPP);
-
- flow_id = qp->device->create_flow(qp, flow_attr, domain, NULL);
- if (!IS_ERR(flow_id)) {
- atomic_inc(&qp->usecnt);
- flow_id->qp = qp;
- }
- return flow_id;
-}
-EXPORT_SYMBOL(ib_create_flow);
-
-int ib_destroy_flow(struct ib_flow *flow_id)
-{
- int err;
- struct ib_qp *qp = flow_id->qp;
-
- err = qp->device->destroy_flow(flow_id);
- if (!err)
- atomic_dec(&qp->usecnt);
- return err;
-}
-EXPORT_SYMBOL(ib_destroy_flow);
-
int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status)
{
@@ -2200,7 +2478,6 @@ static void __ib_drain_sq(struct ib_qp *qp)
struct ib_cq *cq = qp->send_cq;
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct ib_drain_cqe sdrain;
- struct ib_send_wr *bad_swr;
struct ib_rdma_wr swr = {
.wr = {
.next = NULL,
@@ -2219,7 +2496,7 @@ static void __ib_drain_sq(struct ib_qp *qp)
sdrain.cqe.done = ib_drain_qp_done;
init_completion(&sdrain.done);
- ret = ib_post_send(qp, &swr.wr, &bad_swr);
+ ret = ib_post_send(qp, &swr.wr, NULL);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
return;
@@ -2240,7 +2517,7 @@ static void __ib_drain_rq(struct ib_qp *qp)
struct ib_cq *cq = qp->recv_cq;
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct ib_drain_cqe rdrain;
- struct ib_recv_wr rwr = {}, *bad_rwr;
+ struct ib_recv_wr rwr = {};
int ret;
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
@@ -2253,7 +2530,7 @@ static void __ib_drain_rq(struct ib_qp *qp)
rdrain.cqe.done = ib_drain_qp_done;
init_completion(&rdrain.done);
- ret = ib_post_recv(qp, &rwr, &bad_rwr);
+ ret = ib_post_recv(qp, &rwr, NULL);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index a76e206704d4..bbfb86eb2d24 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -166,7 +166,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
| IB_DEVICE_MEM_WINDOW
| IB_DEVICE_MEM_WINDOW_TYPE_2B
| IB_DEVICE_MEM_MGT_EXTENSIONS;
- ib_attr->max_sge = dev_attr->max_qp_sges;
+ ib_attr->max_send_sge = dev_attr->max_qp_sges;
+ ib_attr->max_recv_sge = dev_attr->max_qp_sges;
ib_attr->max_sge_rd = dev_attr->max_qp_sges;
ib_attr->max_cq = dev_attr->max_cq;
ib_attr->max_cqe = dev_attr->max_cq_wqes;
@@ -243,8 +244,8 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
port_attr->gid_tbl_len = dev_attr->max_sgid;
port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_DEVICE_MGMT_SUP |
- IB_PORT_VENDOR_CLASS_SUP |
- IB_PORT_IP_BASED_GIDS;
+ IB_PORT_VENDOR_CLASS_SUP;
+ port_attr->ip_gids = true;
port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
port_attr->bad_pkey_cntr = 0;
@@ -364,8 +365,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
return rc;
}
-int bnxt_re_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr, void **context)
+int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
{
int rc;
u32 tbl_idx = 0;
@@ -377,7 +377,7 @@ int bnxt_re_add_gid(const union ib_gid *gid,
if ((attr->ndev) && is_vlan_dev(attr->ndev))
vlan_id = vlan_dev_vlan_id(attr->ndev);
- rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
+ rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
rdev->qplib_res.netdev->dev_addr,
vlan_id, true, &tbl_idx);
if (rc == -EALREADY) {
@@ -673,8 +673,6 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
int rc;
u8 nw_type;
- struct ib_gid_attr sgid_attr;
-
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
return ERR_PTR(-EINVAL);
@@ -705,20 +703,11 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
grh->dgid.raw) &&
!rdma_link_local_addr((struct in6_addr *)
grh->dgid.raw)) {
- union ib_gid sgid;
+ const struct ib_gid_attr *sgid_attr;
- rc = ib_get_cached_gid(&rdev->ibdev, 1,
- grh->sgid_index, &sgid,
- &sgid_attr);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to query gid at index %d",
- grh->sgid_index);
- goto fail;
- }
- dev_put(sgid_attr.ndev);
+ sgid_attr = grh->sgid_attr;
/* Get network header type for this GID */
- nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
switch (nw_type) {
case RDMA_NETWORK_IPV4:
ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
@@ -1408,7 +1397,7 @@ struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
}
if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
- rc = -ENOTSUPP;
+ rc = -EOPNOTSUPP;
goto exit;
}
@@ -1530,8 +1519,8 @@ int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
return 0;
}
-int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
ib_srq);
@@ -1599,9 +1588,6 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
enum ib_qp_state curr_qp_state, new_qp_state;
int rc, entries;
- int status;
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
unsigned int flags;
u8 nw_type;
@@ -1668,6 +1654,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (qp_attr_mask & IB_QP_AV) {
const struct ib_global_route *grh =
rdma_ah_read_grh(&qp_attr->ah_attr);
+ const struct ib_gid_attr *sgid_attr;
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
@@ -1691,29 +1678,23 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
ether_addr_copy(qp->qplib_qp.ah.dmac,
qp_attr->ah_attr.roce.dmac);
- status = ib_get_cached_gid(&rdev->ibdev, 1,
- grh->sgid_index,
- &sgid, &sgid_attr);
- if (!status) {
- memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
- ETH_ALEN);
- dev_put(sgid_attr.ndev);
- nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
- &sgid);
- switch (nw_type) {
- case RDMA_NETWORK_IPV4:
- qp->qplib_qp.nw_type =
- CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
- break;
- case RDMA_NETWORK_IPV6:
- qp->qplib_qp.nw_type =
- CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
- break;
- default:
- qp->qplib_qp.nw_type =
- CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
- break;
- }
+ sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
+ memcpy(qp->qplib_qp.smac, sgid_attr->ndev->dev_addr,
+ ETH_ALEN);
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
+ switch (nw_type) {
+ case RDMA_NETWORK_IPV4:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
+ break;
+ case RDMA_NETWORK_IPV6:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
+ break;
+ default:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
+ break;
}
}
@@ -1895,19 +1876,17 @@ out:
/* Routine for sending QP1 packets for RoCE V1 an V2
*/
static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe,
int payload_size)
{
- struct ib_device *ibdev = &qp->rdev->ibdev;
struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
ib_ah);
struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
+ const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
struct bnxt_qplib_sge sge;
- union ib_gid sgid;
u8 nw_type;
u16 ether_type;
- struct ib_gid_attr sgid_attr;
union ib_gid dgid;
bool is_eth = false;
bool is_vlan = false;
@@ -1920,22 +1899,10 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
- rc = ib_get_cached_gid(ibdev, 1,
- qplib_ah->host_sgid_index, &sgid,
- &sgid_attr);
- if (rc) {
- dev_err(rdev_to_dev(qp->rdev),
- "Failed to query gid at index %d",
- qplib_ah->host_sgid_index);
- return rc;
- }
- if (sgid_attr.ndev) {
- if (is_vlan_dev(sgid_attr.ndev))
- vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
- dev_put(sgid_attr.ndev);
- }
+ if (is_vlan_dev(sgid_attr->ndev))
+ vlan_id = vlan_dev_vlan_id(sgid_attr->ndev);
/* Get network header type for this GID */
- nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
switch (nw_type) {
case RDMA_NETWORK_IPV4:
nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
@@ -1948,9 +1915,9 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
break;
}
memcpy(&dgid.raw, &qplib_ah->dgid, 16);
- is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
+ is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
if (is_udp) {
- if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
ip_version = 4;
ether_type = ETH_P_IP;
} else {
@@ -1983,9 +1950,10 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
}
if (is_grh || (ip_version == 6)) {
- memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
+ memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
+ sizeof(sgid_attr->gid));
memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
- sizeof(sgid));
+ sizeof(sgid_attr->gid));
qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
}
@@ -1995,7 +1963,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
- memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
+ memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
}
@@ -2080,7 +2048,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
* and the MAD datagram out to the provided SGE.
*/
static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
- struct ib_recv_wr *wr,
+ const struct ib_recv_wr *wr,
struct bnxt_qplib_swqe *wqe,
int payload_size)
{
@@ -2125,7 +2093,7 @@ static int is_ud_qp(struct bnxt_re_qp *qp)
}
static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
struct bnxt_re_ah *ah = NULL;
@@ -2163,7 +2131,7 @@ static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
return 0;
}
-static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
+static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
switch (wr->opcode) {
@@ -2195,7 +2163,7 @@ static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
return 0;
}
-static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
+static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
switch (wr->opcode) {
@@ -2222,7 +2190,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
return 0;
}
-static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
+static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
@@ -2241,7 +2209,7 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
return 0;
}
-static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
+static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
@@ -2283,7 +2251,7 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
}
static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
/* Copy the inline data to the data field */
@@ -2313,7 +2281,7 @@ static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
}
static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
int payload_sz = 0;
@@ -2345,7 +2313,7 @@ static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
struct bnxt_qplib_swqe wqe;
int rc = 0, payload_sz = 0;
@@ -2393,8 +2361,8 @@ bad:
return rc;
}
-int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_qplib_swqe wqe;
@@ -2441,7 +2409,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
default:
break;
}
- /* Fall thru to build the wqe */
+ /* fall through */
case IB_WR_SEND_WITH_INV:
rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
break;
@@ -2493,7 +2461,7 @@ bad:
static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp,
- struct ib_recv_wr *wr)
+ const struct ib_recv_wr *wr)
{
struct bnxt_qplib_swqe wqe;
int rc = 0;
@@ -2526,8 +2494,8 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
return rc;
}
-int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_qplib_swqe wqe;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 5c6414cad4af..aa33e7b82c84 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -158,8 +158,7 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey);
int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context);
-int bnxt_re_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr, void **context);
+int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context);
int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
int index, union ib_gid *gid);
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
@@ -182,8 +181,8 @@ int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
struct ib_udata *udata);
int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int bnxt_re_destroy_srq(struct ib_srq *srq);
-int bnxt_re_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr);
+int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
@@ -192,10 +191,10 @@ int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int bnxt_re_destroy_qp(struct ib_qp *qp);
-int bnxt_re_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
- struct ib_send_wr **bad_send_wr);
-int bnxt_re_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr);
+int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr);
+int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 50d8f1fc98d5..e426b990c1dd 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -2354,7 +2354,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
srq = qp->srq;
if (!srq)
return -EINVAL;
- if (wr_id_idx > srq->hwq.max_elements) {
+ if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process RC ");
dev_err(&cq->hwq.pdev->dev,
@@ -2369,7 +2369,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
} else {
rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
+ if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process RC ");
dev_err(&cq->hwq.pdev->dev,
@@ -2437,7 +2437,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
if (!srq)
return -EINVAL;
- if (wr_id_idx > srq->hwq.max_elements) {
+ if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process UD ");
dev_err(&cq->hwq.pdev->dev,
@@ -2452,7 +2452,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
} else {
rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
+ if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process UD ");
dev_err(&cq->hwq.pdev->dev,
@@ -2546,7 +2546,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
"QPLIB: FP: SRQ used but not defined??");
return -EINVAL;
}
- if (wr_id_idx > srq->hwq.max_elements) {
+ if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process Raw/QP1 ");
dev_err(&cq->hwq.pdev->dev,
@@ -2561,7 +2561,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
} else {
rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
+ if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
dev_err(&cq->hwq.pdev->dev,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 2f3f32eaa1d5..4097f3fa25c5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -197,7 +197,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid)
{
- if (index > sgid_tbl->max) {
+ if (index >= sgid_tbl->max) {
dev_err(&res->pdev->dev,
"QPLIB: Index %d exceeded SGID table max (%d)",
index, sgid_tbl->max);
@@ -402,7 +402,7 @@ int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
*pkey = 0xFFFF;
return 0;
}
- if (index > pkey_tbl->max) {
+ if (index >= pkey_tbl->max) {
dev_err(&res->pdev->dev,
"QPLIB: Index %d exceeded PKEY table max (%d)",
index, pkey_tbl->max);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
index 0a8542c20804..a098c0140580 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cq.c
@@ -32,38 +32,16 @@
#include "iwch_provider.h"
#include "iwch.h"
-/*
- * Get one cq entry from cxio and map it to openib.
- *
- * Returns:
- * 0 EMPTY;
- * 1 cqe returned
- * -EAGAIN caller must try again
- * any other -errno fatal error
- */
-static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
- struct ib_wc *wc)
+static int __iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
+ struct iwch_qp *qhp, struct ib_wc *wc)
{
- struct iwch_qp *qhp = NULL;
- struct t3_cqe cqe, *rd_cqe;
- struct t3_wq *wq;
+ struct t3_wq *wq = qhp ? &qhp->wq : NULL;
+ struct t3_cqe cqe;
u32 credit = 0;
u8 cqe_flushed;
u64 cookie;
int ret = 1;
- rd_cqe = cxio_next_cqe(&chp->cq);
-
- if (!rd_cqe)
- return 0;
-
- qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
- if (!qhp)
- wq = NULL;
- else {
- spin_lock(&qhp->lock);
- wq = &(qhp->wq);
- }
ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
&credit);
if (t3a_device(chp->rhp) && credit) {
@@ -79,7 +57,7 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
ret = 1;
wc->wr_id = cookie;
- wc->qp = &qhp->ibqp;
+ wc->qp = qhp ? &qhp->ibqp : NULL;
wc->vendor_err = CQE_STATUS(cqe);
wc->wc_flags = 0;
@@ -182,8 +160,38 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
}
}
out:
- if (wq)
+ return ret;
+}
+
+/*
+ * Get one cq entry from cxio and map it to openib.
+ *
+ * Returns:
+ * 0 EMPTY;
+ * 1 cqe returned
+ * -EAGAIN caller must try again
+ * any other -errno fatal error
+ */
+static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
+ struct ib_wc *wc)
+{
+ struct iwch_qp *qhp;
+ struct t3_cqe *rd_cqe;
+ int ret;
+
+ rd_cqe = cxio_next_cqe(&chp->cq);
+
+ if (!rd_cqe)
+ return 0;
+
+ qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
+ if (qhp) {
+ spin_lock(&qhp->lock);
+ ret = __iwch_poll_cq_one(rhp, chp, qhp, wc);
spin_unlock(&qhp->lock);
+ } else {
+ ret = __iwch_poll_cq_one(rhp, chp, NULL, wc);
+ }
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index be097c6723c0..1b9ff21aa1d5 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -61,42 +61,6 @@
#include <rdma/cxgb3-abi.h>
#include "common.h"
-static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- struct ib_udata *udata)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static int iwch_ah_destroy(struct ib_ah *ah)
-{
- return -ENOSYS;
-}
-
-static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- return -ENOSYS;
-}
-
-static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- return -ENOSYS;
-}
-
-static int iwch_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size,
- struct ib_mad_hdr *out_mad,
- size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- return -ENOSYS;
-}
-
static int iwch_dealloc_ucontext(struct ib_ucontext *context)
{
struct iwch_dev *rhp = to_iwch_dev(context->device);
@@ -1103,7 +1067,8 @@ static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
props->max_mr_size = dev->attr.max_mr_size;
props->max_qp = dev->attr.max_qps;
props->max_qp_wr = dev->attr.max_wrs;
- props->max_sge = dev->attr.max_sge_per_wr;
+ props->max_send_sge = dev->attr.max_sge_per_wr;
+ props->max_recv_sge = dev->attr.max_sge_per_wr;
props->max_sge_rd = 1;
props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
@@ -1398,8 +1363,6 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.mmap = iwch_mmap;
dev->ibdev.alloc_pd = iwch_allocate_pd;
dev->ibdev.dealloc_pd = iwch_deallocate_pd;
- dev->ibdev.create_ah = iwch_ah_create;
- dev->ibdev.destroy_ah = iwch_ah_destroy;
dev->ibdev.create_qp = iwch_create_qp;
dev->ibdev.modify_qp = iwch_ib_modify_qp;
dev->ibdev.destroy_qp = iwch_destroy_qp;
@@ -1414,9 +1377,6 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.dealloc_mw = iwch_dealloc_mw;
dev->ibdev.alloc_mr = iwch_alloc_mr;
dev->ibdev.map_mr_sg = iwch_map_mr_sg;
- dev->ibdev.attach_mcast = iwch_multicast_attach;
- dev->ibdev.detach_mcast = iwch_multicast_detach;
- dev->ibdev.process_mad = iwch_process_mad;
dev->ibdev.req_notify_cq = iwch_arm_cq;
dev->ibdev.post_send = iwch_post_send;
dev->ibdev.post_recv = iwch_post_receive;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 2e38ddefea8a..8adbe9658935 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -326,10 +326,10 @@ enum iwch_qp_query_flags {
};
u16 iwch_rqes_posted(struct iwch_qp *qhp);
-int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
int iwch_post_zb_read(struct iwch_ep *ep);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 3871e1fd8395..c649faad63f9 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -39,8 +39,8 @@
#define NO_SUPPORT -1
-static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 * flit_cnt)
+static int build_rdma_send(union t3_wr *wqe, const struct ib_send_wr *wr,
+ u8 *flit_cnt)
{
int i;
u32 plen;
@@ -84,8 +84,8 @@ static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
return 0;
}
-static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 *flit_cnt)
+static int build_rdma_write(union t3_wr *wqe, const struct ib_send_wr *wr,
+ u8 *flit_cnt)
{
int i;
u32 plen;
@@ -125,8 +125,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
return 0;
}
-static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 *flit_cnt)
+static int build_rdma_read(union t3_wr *wqe, const struct ib_send_wr *wr,
+ u8 *flit_cnt)
{
if (wr->num_sge > 1)
return -EINVAL;
@@ -146,8 +146,8 @@ static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
return 0;
}
-static int build_memreg(union t3_wr *wqe, struct ib_reg_wr *wr,
- u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
+static int build_memreg(union t3_wr *wqe, const struct ib_reg_wr *wr,
+ u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
{
struct iwch_mr *mhp = to_iwch_mr(wr->mr);
int i;
@@ -189,8 +189,8 @@ static int build_memreg(union t3_wr *wqe, struct ib_reg_wr *wr,
return 0;
}
-static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 *flit_cnt)
+static int build_inv_stag(union t3_wr *wqe, const struct ib_send_wr *wr,
+ u8 *flit_cnt)
{
wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
wqe->local_inv.reserved = 0;
@@ -246,7 +246,7 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
}
static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- struct ib_recv_wr *wr)
+ const struct ib_recv_wr *wr)
{
int i, err = 0;
u32 pbl_addr[T3_MAX_SGE];
@@ -286,7 +286,7 @@ static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
}
static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- struct ib_recv_wr *wr)
+ const struct ib_recv_wr *wr)
{
int i;
u32 pbl_addr;
@@ -348,8 +348,8 @@ static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
return 0;
}
-int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
int err = 0;
u8 uninitialized_var(t3_wr_flit_cnt);
@@ -463,8 +463,8 @@ out:
return err;
}
-int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int err = 0;
struct iwch_qp *qhp;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0912fa026327..0f83cbec33f3 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -587,24 +587,29 @@ static int send_flowc(struct c4iw_ep *ep)
{
struct fw_flowc_wr *flowc;
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
- int i;
u16 vlan = ep->l2t->vlan;
int nparams;
+ int flowclen, flowclen16;
if (WARN_ON(!skb))
return -ENOMEM;
if (vlan == CPL_L2T_VLAN_NONE)
- nparams = 8;
- else
nparams = 9;
+ else
+ nparams = 10;
- flowc = __skb_put(skb, FLOWC_LEN);
+ flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+ flowclen16 = DIV_ROUND_UP(flowclen, 16);
+ flowclen = flowclen16 * 16;
+
+ flowc = __skb_put(skb, flowclen);
+ memset(flowc, 0, flowclen);
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
FW_FLOWC_WR_NPARAMS_V(nparams));
- flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN,
- 16)) | FW_WR_FLOWID_V(ep->hwtid));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
+ FW_WR_FLOWID_V(ep->hwtid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
@@ -623,21 +628,13 @@ static int send_flowc(struct c4iw_ep *ep)
flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[7].val = cpu_to_be32(ep->emss);
- if (nparams == 9) {
+ flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
+ flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale);
+ if (nparams == 10) {
u16 pri;
-
pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
- flowc->mnemval[8].val = cpu_to_be32(pri);
- } else {
- /* Pad WR to 16 byte boundary */
- flowc->mnemval[8].mnemonic = 0;
- flowc->mnemval[8].val = 0;
- }
- for (i = 0; i < 9; i++) {
- flowc->mnemval[i].r4[0] = 0;
- flowc->mnemval[i].r4[1] = 0;
- flowc->mnemval[i].r4[2] = 0;
+ flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
+ flowc->mnemval[9].val = cpu_to_be32(pri);
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
@@ -1176,6 +1173,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_act_establish *req = cplhdr(skb);
+ unsigned short tcp_opt = ntohs(req->tcp_opt);
unsigned int tid = GET_TID(req);
unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct tid_info *t = dev->rdev.lldi.tids;
@@ -1196,8 +1194,9 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+ ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
- set_emss(ep, ntohs(req->tcp_opt));
+ set_emss(ep, tcp_opt);
/* dealloc the atid */
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
@@ -1853,10 +1852,33 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static void complete_cached_srq_buffers(struct c4iw_ep *ep,
+ __be32 srqidx_status)
+{
+ enum chip_type adapter_type;
+ u32 srqidx;
+
+ adapter_type = ep->com.dev->rdev.lldi.adapter_type;
+ srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(srqidx_status));
+
+ /*
+ * If this TCB had a srq buffer cached, then we must complete
+ * it. For user mode, that means saving the srqidx in the
+ * user/kernel status page for this qp. For kernel mode, just
+ * synthesize the CQE now.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T5 && srqidx) {
+ if (ep->com.qp->ibqp.uobject)
+ t4_set_wq_in_error(&ep->com.qp->wq, srqidx);
+ else
+ c4iw_flush_srqidx(ep->com.qp, srqidx);
+ }
+}
+
static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
- struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+ struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb);
int release = 0;
unsigned int tid = GET_TID(rpl);
@@ -1865,6 +1887,9 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
pr_warn("Abort rpl to freed endpoint\n");
return 0;
}
+
+ complete_cached_srq_buffers(ep, rpl->srqidx_status);
+
pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
@@ -2603,16 +2628,17 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_pass_establish *req = cplhdr(skb);
unsigned int tid = GET_TID(req);
int ret;
+ u16 tcp_opt = ntohs(req->tcp_opt);
ep = get_ep_from_tid(dev, tid);
pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+ ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
- pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid,
- ntohs(req->tcp_opt));
+ pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt);
- set_emss(ep, ntohs(req->tcp_opt));
+ set_emss(ep, tcp_opt);
dst_confirm(ep->dst);
mutex_lock(&ep->com.mutex);
@@ -2719,28 +2745,35 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
- struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct cpl_abort_req_rss6 *req = cplhdr(skb);
struct c4iw_ep *ep;
struct sk_buff *rpl_skb;
struct c4iw_qp_attributes attrs;
int ret;
int release = 0;
unsigned int tid = GET_TID(req);
+ u8 status;
+
u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- if (cxgb_is_neg_adv(req->status)) {
+ status = ABORT_RSS_STATUS_G(be32_to_cpu(req->srqidx_status));
+
+ if (cxgb_is_neg_adv(status)) {
pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
- ep->hwtid, req->status, neg_adv_str(req->status));
+ ep->hwtid, status, neg_adv_str(status));
ep->stats.abort_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
mutex_unlock(&dev->rdev.stats.lock);
goto deref_ep;
}
+
+ complete_cached_srq_buffers(ep, req->srqidx_status);
+
pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
ep->com.state);
set_bit(PEER_ABORT, &ep->com.history);
@@ -3444,9 +3477,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
}
insert_handle(dev, &dev->stid_idr, ep, ep->stid);
- memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
- sizeof(ep->com.local_addr));
-
state_set(&ep->com, LISTEN);
if (ep->com.local_addr.ss_family == AF_INET)
err = create_server4(dev, ep);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 2be2e1ac1b5f..6d3042794094 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -77,6 +77,10 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
int user = (uctx != &rdev->uctx);
int ret;
struct sk_buff *skb;
+ struct c4iw_ucontext *ucontext = NULL;
+
+ if (user)
+ ucontext = container_of(uctx, struct c4iw_ucontext, uctx);
cq->cqid = c4iw_get_cqid(rdev, uctx);
if (!cq->cqid) {
@@ -100,6 +104,16 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
dma_unmap_addr_set(cq, mapping, cq->dma_addr);
memset(cq->queue, 0, cq->memsize);
+ if (user && ucontext->is_32b_cqe) {
+ cq->qp_errp = &((struct t4_status_page *)
+ ((u8 *)cq->queue + (cq->size - 1) *
+ (sizeof(*cq->queue) / 2)))->qp_err;
+ } else {
+ cq->qp_errp = &((struct t4_status_page *)
+ ((u8 *)cq->queue + (cq->size - 1) *
+ sizeof(*cq->queue)))->qp_err;
+ }
+
/* build fw_ri_res_wr */
wr_len = sizeof *res_wr + sizeof *res;
@@ -132,7 +146,9 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_IQPCIECH_V(2) |
FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
FW_RI_RES_WR_IQO_F |
- FW_RI_RES_WR_IQESIZE_V(1));
+ ((user && ucontext->is_32b_cqe) ?
+ FW_RI_RES_WR_IQESIZE_V(1) :
+ FW_RI_RES_WR_IQESIZE_V(2)));
res->u.cq.iqsize = cpu_to_be16(cq->size);
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
@@ -166,7 +182,7 @@ err1:
return ret;
}
-static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
+static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx)
{
struct t4_cqe cqe;
@@ -179,6 +195,8 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
CQE_SWCQE_V(1) |
CQE_QPID_V(wq->sq.qid));
cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+ if (srqidx)
+ cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx);
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
}
@@ -191,7 +209,7 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
wq, cq, wq->rq.in_use, count);
while (in_use--) {
- insert_recv_cqe(wq, cq);
+ insert_recv_cqe(wq, cq, 0);
flushed++;
}
return flushed;
@@ -442,6 +460,72 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
pr_debug("cq %p count %d\n", cq, *count);
}
+static void post_pending_srq_wrs(struct t4_srq *srq)
+{
+ struct t4_srq_pending_wr *pwr;
+ u16 idx = 0;
+
+ while (srq->pending_in_use) {
+ pwr = &srq->pending_wrs[srq->pending_cidx];
+ srq->sw_rq[srq->pidx].wr_id = pwr->wr_id;
+ srq->sw_rq[srq->pidx].valid = 1;
+
+ pr_debug("%s posting pending cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
+ __func__,
+ srq->cidx, srq->pidx, srq->wq_pidx,
+ srq->in_use, srq->size,
+ (unsigned long long)pwr->wr_id);
+
+ c4iw_copy_wr_to_srq(srq, &pwr->wqe, pwr->len16);
+ t4_srq_consume_pending_wr(srq);
+ t4_srq_produce(srq, pwr->len16);
+ idx += DIV_ROUND_UP(pwr->len16 * 16, T4_EQ_ENTRY_SIZE);
+ }
+
+ if (idx) {
+ t4_ring_srq_db(srq, idx, pwr->len16, &pwr->wqe);
+ srq->queue[srq->size].status.host_wq_pidx =
+ srq->wq_pidx;
+ }
+}
+
+static u64 reap_srq_cqe(struct t4_cqe *hw_cqe, struct t4_srq *srq)
+{
+ int rel_idx = CQE_ABS_RQE_IDX(hw_cqe) - srq->rqt_abs_idx;
+ u64 wr_id;
+
+ srq->sw_rq[rel_idx].valid = 0;
+ wr_id = srq->sw_rq[rel_idx].wr_id;
+
+ if (rel_idx == srq->cidx) {
+ pr_debug("%s in order cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
+ __func__, rel_idx, srq->cidx, srq->pidx,
+ srq->wq_pidx, srq->in_use, srq->size,
+ (unsigned long long)srq->sw_rq[rel_idx].wr_id);
+ t4_srq_consume(srq);
+ while (srq->ooo_count && !srq->sw_rq[srq->cidx].valid) {
+ pr_debug("%s eat ooo cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
+ __func__, srq->cidx, srq->pidx,
+ srq->wq_pidx, srq->in_use,
+ srq->size, srq->ooo_count,
+ (unsigned long long)
+ srq->sw_rq[srq->cidx].wr_id);
+ t4_srq_consume_ooo(srq);
+ }
+ if (srq->ooo_count == 0 && srq->pending_in_use)
+ post_pending_srq_wrs(srq);
+ } else {
+ pr_debug("%s ooo cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
+ __func__, rel_idx, srq->cidx,
+ srq->pidx, srq->wq_pidx,
+ srq->in_use, srq->size,
+ srq->ooo_count,
+ (unsigned long long)srq->sw_rq[rel_idx].wr_id);
+ t4_srq_produce_ooo(srq);
+ }
+ return wr_id;
+}
+
/*
* poll_cq
*
@@ -459,7 +543,8 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
* -EOVERFLOW CQ overflow detected.
*/
static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit)
+ u8 *cqe_flushed, u64 *cookie, u32 *credit,
+ struct t4_srq *srq)
{
int ret = 0;
struct t4_cqe *hw_cqe, read_cqe;
@@ -524,7 +609,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
*/
if (CQE_TYPE(hw_cqe) == 1) {
if (CQE_STATUS(hw_cqe))
- t4_set_wq_in_error(wq);
+ t4_set_wq_in_error(wq, 0);
ret = -EAGAIN;
goto skip_cqe;
}
@@ -535,7 +620,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
*/
if (CQE_WRID_STAG(hw_cqe) == 1) {
if (CQE_STATUS(hw_cqe))
- t4_set_wq_in_error(wq);
+ t4_set_wq_in_error(wq, 0);
ret = -EAGAIN;
goto skip_cqe;
}
@@ -560,7 +645,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
*cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
- t4_set_wq_in_error(wq);
+ t4_set_wq_in_error(wq, 0);
}
/*
@@ -574,15 +659,9 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
* then we complete this with T4_ERR_MSN and mark the wq in
* error.
*/
-
- if (t4_rq_empty(wq)) {
- t4_set_wq_in_error(wq);
- ret = -EAGAIN;
- goto skip_cqe;
- }
if (unlikely(!CQE_STATUS(hw_cqe) &&
CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
- t4_set_wq_in_error(wq);
+ t4_set_wq_in_error(wq, 0);
hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
}
goto proc_cqe;
@@ -641,11 +720,16 @@ proc_cqe:
c4iw_log_wr_stats(wq, hw_cqe);
t4_sq_consume(wq);
} else {
- pr_debug("completing rq idx %u\n", wq->rq.cidx);
- *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
- if (c4iw_wr_log)
- c4iw_log_wr_stats(wq, hw_cqe);
- t4_rq_consume(wq);
+ if (!srq) {
+ pr_debug("completing rq idx %u\n", wq->rq.cidx);
+ *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
+ if (c4iw_wr_log)
+ c4iw_log_wr_stats(wq, hw_cqe);
+ t4_rq_consume(wq);
+ } else {
+ *cookie = reap_srq_cqe(hw_cqe, srq);
+ }
+ wq->rq.msn++;
goto skip_cqe;
}
@@ -668,46 +752,33 @@ skip_cqe:
return ret;
}
-/*
- * Get one cq entry from c4iw and map it to openib.
- *
- * Returns:
- * 0 cqe returned
- * -ENODATA EMPTY;
- * -EAGAIN caller must try again
- * any other -errno fatal error
- */
-static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
+static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
+ struct ib_wc *wc, struct c4iw_srq *srq)
{
- struct c4iw_qp *qhp = NULL;
- struct t4_cqe uninitialized_var(cqe), *rd_cqe;
- struct t4_wq *wq;
+ struct t4_cqe uninitialized_var(cqe);
+ struct t4_wq *wq = qhp ? &qhp->wq : NULL;
u32 credit = 0;
u8 cqe_flushed;
u64 cookie = 0;
int ret;
- ret = t4_next_cqe(&chp->cq, &rd_cqe);
-
- if (ret)
- return ret;
-
- qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
- if (!qhp)
- wq = NULL;
- else {
- spin_lock(&qhp->lock);
- wq = &(qhp->wq);
- }
- ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
+ ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit,
+ srq ? &srq->wq : NULL);
if (ret)
goto out;
wc->wr_id = cookie;
- wc->qp = &qhp->ibqp;
+ wc->qp = qhp ? &qhp->ibqp : NULL;
wc->vendor_err = CQE_STATUS(&cqe);
wc->wc_flags = 0;
+ /*
+ * Simulate a SRQ_LIMIT_REACHED HW notification if required.
+ */
+ if (srq && !(srq->flags & T4_SRQ_LIMIT_SUPPORT) && srq->armed &&
+ srq->wq.in_use < srq->srq_limit)
+ c4iw_dispatch_srq_limit_reached_event(srq);
+
pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
CQE_QPID(&cqe),
CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
@@ -720,15 +791,32 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->byte_len = CQE_LEN(&cqe);
else
wc->byte_len = 0;
- wc->opcode = IB_WC_RECV;
- if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
- CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
+
+ switch (CQE_OPCODE(&cqe)) {
+ case FW_RI_SEND:
+ wc->opcode = IB_WC_RECV;
+ break;
+ case FW_RI_SEND_WITH_INV:
+ case FW_RI_SEND_WITH_SE_INV:
+ wc->opcode = IB_WC_RECV;
wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
+ break;
+ case FW_RI_WRITE_IMMEDIATE:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->ex.imm_data = CQE_IMM_DATA(&cqe);
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ default:
+ pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
+ CQE_OPCODE(&cqe), CQE_QPID(&cqe));
+ ret = -EINVAL;
+ goto out;
}
} else {
switch (CQE_OPCODE(&cqe)) {
+ case FW_RI_WRITE_IMMEDIATE:
case FW_RI_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
@@ -819,8 +907,43 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
}
}
out:
- if (wq)
+ return ret;
+}
+
+/*
+ * Get one cq entry from c4iw and map it to openib.
+ *
+ * Returns:
+ * 0 cqe returned
+ * -ENODATA EMPTY;
+ * -EAGAIN caller must try again
+ * any other -errno fatal error
+ */
+static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
+{
+ struct c4iw_srq *srq = NULL;
+ struct c4iw_qp *qhp = NULL;
+ struct t4_cqe *rd_cqe;
+ int ret;
+
+ ret = t4_next_cqe(&chp->cq, &rd_cqe);
+
+ if (ret)
+ return ret;
+
+ qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
+ if (qhp) {
+ spin_lock(&qhp->lock);
+ srq = qhp->srq;
+ if (srq)
+ spin_lock(&srq->lock);
+ ret = __c4iw_poll_cq_one(chp, qhp, wc, srq);
spin_unlock(&qhp->lock);
+ if (srq)
+ spin_unlock(&srq->lock);
+ } else {
+ ret = __c4iw_poll_cq_one(chp, NULL, wc, NULL);
+ }
return ret;
}
@@ -876,6 +999,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
int vector = attr->comp_vector;
struct c4iw_dev *rhp;
struct c4iw_cq *chp;
+ struct c4iw_create_cq ucmd;
struct c4iw_create_cq_resp uresp;
struct c4iw_ucontext *ucontext = NULL;
int ret, wr_len;
@@ -891,9 +1015,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (vector >= rhp->rdev.lldi.nciq)
return ERR_PTR(-EINVAL);
+ if (ib_context) {
+ ucontext = to_c4iw_ucontext(ib_context);
+ if (udata->inlen < sizeof(ucmd))
+ ucontext->is_32b_cqe = 1;
+ }
+
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp)
return ERR_PTR(-ENOMEM);
+
chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
if (!chp->wr_waitp) {
ret = -ENOMEM;
@@ -908,9 +1039,6 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
goto err_free_wr_wait;
}
- if (ib_context)
- ucontext = to_c4iw_ucontext(ib_context);
-
/* account for the status page. */
entries++;
@@ -934,13 +1062,15 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (hwentries < 64)
hwentries = 64;
- memsize = hwentries * sizeof *chp->cq.queue;
+ memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ?
+ (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
/*
* memsize must be a multiple of the page size if its a user cq.
*/
if (ucontext)
memsize = roundup(memsize, PAGE_SIZE);
+
chp->cq.size = hwentries;
chp->cq.memsize = memsize;
chp->cq.vector = vector;
@@ -971,6 +1101,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (!mm2)
goto err_free_mm;
+ memset(&uresp, 0, sizeof(uresp));
uresp.qid_mask = rhp->rdev.cqmask;
uresp.cqid = chp->cq.cqid;
uresp.size = chp->cq.size;
@@ -980,9 +1111,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
ucontext->key += PAGE_SIZE;
uresp.gts_key = ucontext->key;
ucontext->key += PAGE_SIZE;
+ /* communicate to the userspace that
+ * kernel driver supports 64B CQE
+ */
+ uresp.flags |= C4IW_64B_CQE;
+
spin_unlock(&ucontext->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp,
- sizeof(uresp) - sizeof(uresp.reserved));
+ ucontext->is_32b_cqe ?
+ sizeof(uresp) - sizeof(uresp.flags) :
+ sizeof(uresp));
if (ret)
goto err_free_mm2;
@@ -1019,11 +1157,6 @@ err_free_chp:
return ERR_PTR(ret);
}
-int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
-{
- return -ENOSYS;
-}
-
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct c4iw_cq *chp;
@@ -1039,3 +1172,19 @@ int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
spin_unlock_irqrestore(&chp->lock, flag);
return ret;
}
+
+void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx)
+{
+ struct c4iw_cq *rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+ unsigned long flag;
+
+ /* locking heirarchy: cq lock first, then qp lock. */
+ spin_lock_irqsave(&rchp->lock, flag);
+ spin_lock(&qhp->lock);
+
+ /* create a SRQ RECV CQE for srqidx */
+ insert_recv_cqe(&qhp->wq, &rchp->cq, srqidx);
+
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&rchp->lock, flag);
+}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index a3c3418afd73..c13c0ba30f63 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -275,10 +275,11 @@ static int dump_qp(int id, void *p, void *data)
set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin);
cc = snprintf(qpd->buf + qpd->pos, space,
- "rc qp sq id %u rq id %u state %u "
+ "rc qp sq id %u %s id %u state %u "
"onchip %u ep tid %u state %u "
"%pI4:%u/%u->%pI4:%u/%u\n",
- qp->wq.sq.qid, qp->wq.rq.qid,
+ qp->wq.sq.qid, qp->srq ? "srq" : "rq",
+ qp->srq ? qp->srq->idx : qp->wq.rq.qid,
(int)qp->attr.state,
qp->wq.sq.flags & T4_SQ_ONCHIP,
ep->hwtid, (int)ep->com.state,
@@ -480,6 +481,9 @@ static int stats_show(struct seq_file *seq, void *v)
seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
+ seq_printf(seq, " SRQS: %10llu %10llu %10llu %10llu\n",
+ dev->rdev.stats.srqt.total, dev->rdev.stats.srqt.cur,
+ dev->rdev.stats.srqt.max, dev->rdev.stats.srqt.fail);
seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
@@ -530,6 +534,8 @@ static ssize_t stats_clear(struct file *file, const char __user *buf,
dev->rdev.stats.pbl.fail = 0;
dev->rdev.stats.rqt.max = 0;
dev->rdev.stats.rqt.fail = 0;
+ dev->rdev.stats.rqt.max = 0;
+ dev->rdev.stats.rqt.fail = 0;
dev->rdev.stats.ocqp.max = 0;
dev->rdev.stats.ocqp.fail = 0;
dev->rdev.stats.db_full = 0;
@@ -802,7 +808,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->qpmask = rdev->lldi.udb_density - 1;
rdev->cqmask = rdev->lldi.ucq_density - 1;
- pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
+ pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
rdev->lldi.vr->pbl.start,
@@ -811,7 +817,8 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->lldi.vr->qp.start,
rdev->lldi.vr->qp.size,
rdev->lldi.vr->cq.start,
- rdev->lldi.vr->cq.size);
+ rdev->lldi.vr->cq.size,
+ rdev->lldi.vr->srq.size);
pr_debug("udb %pR db_reg %p gts_reg %p qpmask 0x%x cqmask 0x%x\n",
&rdev->lldi.pdev->resource[2],
rdev->lldi.db_reg, rdev->lldi.gts_reg,
@@ -824,10 +831,12 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->stats.stag.total = rdev->lldi.vr->stag.size;
rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
+ rdev->stats.srqt.total = rdev->lldi.vr->srq.size;
rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
rdev->stats.qid.total = rdev->lldi.vr->qp.size;
- err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
+ err = c4iw_init_resource(rdev, c4iw_num_stags(rdev),
+ T4_MAX_NUM_PD, rdev->lldi.vr->srq.size);
if (err) {
pr_err("error %d initializing resources\n", err);
return err;
@@ -857,6 +866,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
rdev->status_page->cq_size = rdev->lldi.vr->cq.size;
+ rdev->status_page->write_cmpl_supported = rdev->lldi.write_cmpl_support;
if (c4iw_wr_log) {
rdev->wr_log = kcalloc(1 << c4iw_wr_log_size_order,
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 3e9d8b277ab9..8741d23168f3 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -70,9 +70,10 @@ static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
- pr_debug("%016llx %016llx %016llx %016llx\n",
+ pr_debug("%016llx %016llx %016llx %016llx - %016llx %016llx %016llx %016llx\n",
be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
- be64_to_cpu(p[3]));
+ be64_to_cpu(p[3]), be64_to_cpu(p[4]), be64_to_cpu(p[5]),
+ be64_to_cpu(p[6]), be64_to_cpu(p[7]));
/*
* Ingress WRITE and READ_RESP errors provide
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 870649ff049c..f0fceadd0d12 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -97,6 +97,7 @@ struct c4iw_resource {
struct c4iw_id_table tpt_table;
struct c4iw_id_table qid_table;
struct c4iw_id_table pdid_table;
+ struct c4iw_id_table srq_table;
};
struct c4iw_qid_list {
@@ -130,6 +131,8 @@ struct c4iw_stats {
struct c4iw_stat stag;
struct c4iw_stat pbl;
struct c4iw_stat rqt;
+ struct c4iw_stat srqt;
+ struct c4iw_stat srq;
struct c4iw_stat ocqp;
u64 db_full;
u64 db_empty;
@@ -549,6 +552,7 @@ struct c4iw_qp {
struct kref kref;
wait_queue_head_t wait;
int sq_sig_all;
+ struct c4iw_srq *srq;
struct work_struct free_work;
struct c4iw_ucontext *ucontext;
struct c4iw_wr_wait *wr_waitp;
@@ -559,6 +563,26 @@ static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
return container_of(ibqp, struct c4iw_qp, ibqp);
}
+struct c4iw_srq {
+ struct ib_srq ibsrq;
+ struct list_head db_fc_entry;
+ struct c4iw_dev *rhp;
+ struct t4_srq wq;
+ struct sk_buff *destroy_skb;
+ u32 srq_limit;
+ u32 pdid;
+ int idx;
+ u32 flags;
+ spinlock_t lock; /* protects srq */
+ struct c4iw_wr_wait *wr_waitp;
+ bool armed;
+};
+
+static inline struct c4iw_srq *to_c4iw_srq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct c4iw_srq, ibsrq);
+}
+
struct c4iw_ucontext {
struct ib_ucontext ibucontext;
struct c4iw_dev_ucontext uctx;
@@ -566,6 +590,7 @@ struct c4iw_ucontext {
spinlock_t mmap_lock;
struct list_head mmaps;
struct kref kref;
+ bool is_32b_cqe;
};
static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -885,7 +910,10 @@ enum conn_pre_alloc_buffers {
CN_MAX_CON_BUF
};
-#define FLOWC_LEN 80
+enum {
+ FLOWC_LEN = offsetof(struct fw_flowc_wr, mnemval[FW_FLOWC_MNEM_MAX])
+};
+
union cpl_wr_size {
struct cpl_abort_req abrt_req;
struct cpl_abort_rpl abrt_rpl;
@@ -952,6 +980,7 @@ struct c4iw_ep {
unsigned int retry_count;
int snd_win;
int rcv_win;
+ u32 snd_wscale;
struct c4iw_ep_stats stats;
};
@@ -988,7 +1017,8 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
struct c4iw_dev_ucontext *uctx);
u32 c4iw_get_resource(struct c4iw_id_table *id_table);
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt,
+ u32 nr_pdid, u32 nr_srqt);
int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
int c4iw_pblpool_create(struct c4iw_rdev *rdev);
int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
@@ -1007,10 +1037,10 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
struct c4iw_dev_ucontext *uctx);
int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
int c4iw_destroy_listen(struct iw_cm_id *cm_id);
@@ -1037,8 +1067,14 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_context,
struct ib_udata *udata);
-int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata);
+int c4iw_destroy_srq(struct ib_srq *ib_srq);
+struct ib_srq *c4iw_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *attrs,
+ struct ib_udata *udata);
int c4iw_destroy_qp(struct ib_qp *ib_qp);
struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *attrs,
@@ -1075,12 +1111,19 @@ extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
enum cxgb4_bar2_qtype qtype,
unsigned int *pbar2_qid, u64 *pbar2_pa);
+int c4iw_alloc_srq_idx(struct c4iw_rdev *rdev);
+void c4iw_free_srq_idx(struct c4iw_rdev *rdev, int idx);
extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
extern int c4iw_wr_log;
extern int db_fc_threshold;
extern int db_coalescing_threshold;
extern int use_dsgl;
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
+void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq);
+void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16);
+void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx);
+int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
typedef int c4iw_restrack_func(struct sk_buff *msg,
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 1445918e3239..7b76e6f81aeb 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
{
struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
- if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
+ if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
return -ENOMEM;
mhp->mpl[mhp->mpl_len++] = addr;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 1feade8bb4b3..4eda6872e617 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -58,41 +58,6 @@ static int fastreg_support = 1;
module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
-static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- struct ib_udata *udata)
-
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static int c4iw_ah_destroy(struct ib_ah *ah)
-{
- return -ENOSYS;
-}
-
-static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- return -ENOSYS;
-}
-
-static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- return -ENOSYS;
-}
-
-static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size,
- struct ib_mad_hdr *out_mad,
- size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- return -ENOSYS;
-}
-
void _c4iw_free_ucontext(struct kref *kref)
{
struct c4iw_ucontext *ucontext;
@@ -342,8 +307,12 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
props->max_mr_size = T4_MAX_MR_SIZE;
props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
+ props->max_srq = dev->rdev.lldi.vr->srq.size;
props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
- props->max_sge = T4_MAX_RECV_SGE;
+ props->max_srq_wr = dev->rdev.hw_queue.t4_max_qp_depth;
+ props->max_send_sge = min(T4_MAX_SEND_SGE, T4_MAX_WRITE_SGE);
+ props->max_recv_sge = T4_MAX_RECV_SGE;
+ props->max_srq_sge = T4_MAX_RECV_SGE;
props->max_sge_rd = 1;
props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
@@ -592,7 +561,10 @@ void c4iw_register_device(struct work_struct *work)
(1ull << IB_USER_VERBS_CMD_POLL_CQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV);
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
dev->ibdev.node_type = RDMA_NODE_RNIC;
BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
@@ -608,15 +580,15 @@ void c4iw_register_device(struct work_struct *work)
dev->ibdev.mmap = c4iw_mmap;
dev->ibdev.alloc_pd = c4iw_allocate_pd;
dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
- dev->ibdev.create_ah = c4iw_ah_create;
- dev->ibdev.destroy_ah = c4iw_ah_destroy;
dev->ibdev.create_qp = c4iw_create_qp;
dev->ibdev.modify_qp = c4iw_ib_modify_qp;
dev->ibdev.query_qp = c4iw_ib_query_qp;
dev->ibdev.destroy_qp = c4iw_destroy_qp;
+ dev->ibdev.create_srq = c4iw_create_srq;
+ dev->ibdev.modify_srq = c4iw_modify_srq;
+ dev->ibdev.destroy_srq = c4iw_destroy_srq;
dev->ibdev.create_cq = c4iw_create_cq;
dev->ibdev.destroy_cq = c4iw_destroy_cq;
- dev->ibdev.resize_cq = c4iw_resize_cq;
dev->ibdev.poll_cq = c4iw_poll_cq;
dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
@@ -625,12 +597,10 @@ void c4iw_register_device(struct work_struct *work)
dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
dev->ibdev.alloc_mr = c4iw_alloc_mr;
dev->ibdev.map_mr_sg = c4iw_map_mr_sg;
- dev->ibdev.attach_mcast = c4iw_multicast_attach;
- dev->ibdev.detach_mcast = c4iw_multicast_detach;
- dev->ibdev.process_mad = c4iw_process_mad;
dev->ibdev.req_notify_cq = c4iw_arm_cq;
dev->ibdev.post_send = c4iw_post_send;
dev->ibdev.post_recv = c4iw_post_receive;
+ dev->ibdev.post_srq_recv = c4iw_post_srq_recv;
dev->ibdev.alloc_hw_stats = c4iw_alloc_stats;
dev->ibdev.get_hw_stats = c4iw_get_mib;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index aef53305f1c3..b3203afa3b1d 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -147,21 +147,24 @@ static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
}
static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
- struct c4iw_dev_ucontext *uctx)
+ struct c4iw_dev_ucontext *uctx, int has_rq)
{
/*
* uP clears EQ contexts when the connection exits rdma mode,
* so no need to post a RESET WR for these EQs.
*/
- dma_free_coherent(&(rdev->lldi.pdev->dev),
- wq->rq.memsize, wq->rq.queue,
- dma_unmap_addr(&wq->rq, mapping));
dealloc_sq(rdev, &wq->sq);
- c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
- kfree(wq->rq.sw_rq);
kfree(wq->sq.sw_sq);
- c4iw_put_qpid(rdev, wq->rq.qid, uctx);
c4iw_put_qpid(rdev, wq->sq.qid, uctx);
+
+ if (has_rq) {
+ dma_free_coherent(&rdev->lldi.pdev->dev,
+ wq->rq.memsize, wq->rq.queue,
+ dma_unmap_addr(&wq->rq, mapping));
+ c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+ kfree(wq->rq.sw_rq);
+ c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+ }
return 0;
}
@@ -195,7 +198,8 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
struct t4_cq *rcq, struct t4_cq *scq,
struct c4iw_dev_ucontext *uctx,
- struct c4iw_wr_wait *wr_waitp)
+ struct c4iw_wr_wait *wr_waitp,
+ int need_rq)
{
int user = (uctx != &rdev->uctx);
struct fw_ri_res_wr *res_wr;
@@ -209,10 +213,12 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (!wq->sq.qid)
return -ENOMEM;
- wq->rq.qid = c4iw_get_qpid(rdev, uctx);
- if (!wq->rq.qid) {
- ret = -ENOMEM;
- goto free_sq_qid;
+ if (need_rq) {
+ wq->rq.qid = c4iw_get_qpid(rdev, uctx);
+ if (!wq->rq.qid) {
+ ret = -ENOMEM;
+ goto free_sq_qid;
+ }
}
if (!user) {
@@ -220,25 +226,31 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
GFP_KERNEL);
if (!wq->sq.sw_sq) {
ret = -ENOMEM;
- goto free_rq_qid;
+ goto free_rq_qid;//FIXME
}
- wq->rq.sw_rq = kcalloc(wq->rq.size, sizeof(*wq->rq.sw_rq),
- GFP_KERNEL);
- if (!wq->rq.sw_rq) {
- ret = -ENOMEM;
- goto free_sw_sq;
+ if (need_rq) {
+ wq->rq.sw_rq = kcalloc(wq->rq.size,
+ sizeof(*wq->rq.sw_rq),
+ GFP_KERNEL);
+ if (!wq->rq.sw_rq) {
+ ret = -ENOMEM;
+ goto free_sw_sq;
+ }
}
}
- /*
- * RQT must be a power of 2 and at least 16 deep.
- */
- wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
- wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
- if (!wq->rq.rqt_hwaddr) {
- ret = -ENOMEM;
- goto free_sw_rq;
+ if (need_rq) {
+ /*
+ * RQT must be a power of 2 and at least 16 deep.
+ */
+ wq->rq.rqt_size =
+ roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
+ wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
+ if (!wq->rq.rqt_hwaddr) {
+ ret = -ENOMEM;
+ goto free_sw_rq;
+ }
}
ret = alloc_sq(rdev, &wq->sq, user);
@@ -247,34 +259,39 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
- wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
- wq->rq.memsize, &(wq->rq.dma_addr),
- GFP_KERNEL);
- if (!wq->rq.queue) {
- ret = -ENOMEM;
- goto free_sq;
+ if (need_rq) {
+ wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
+ wq->rq.memsize,
+ &wq->rq.dma_addr,
+ GFP_KERNEL);
+ if (!wq->rq.queue) {
+ ret = -ENOMEM;
+ goto free_sq;
+ }
+ pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
+ wq->sq.queue,
+ (unsigned long long)virt_to_phys(wq->sq.queue),
+ wq->rq.queue,
+ (unsigned long long)virt_to_phys(wq->rq.queue));
+ memset(wq->rq.queue, 0, wq->rq.memsize);
+ dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
}
- pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
- wq->sq.queue,
- (unsigned long long)virt_to_phys(wq->sq.queue),
- wq->rq.queue,
- (unsigned long long)virt_to_phys(wq->rq.queue));
- memset(wq->rq.queue, 0, wq->rq.memsize);
- dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
wq->db = rdev->lldi.db_reg;
wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
&wq->sq.bar2_qid,
user ? &wq->sq.bar2_pa : NULL);
- wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS,
- &wq->rq.bar2_qid,
- user ? &wq->rq.bar2_pa : NULL);
+ if (need_rq)
+ wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
+ T4_BAR2_QTYPE_EGRESS,
+ &wq->rq.bar2_qid,
+ user ? &wq->rq.bar2_pa : NULL);
/*
* User mode must have bar2 access.
*/
- if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
+ if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
goto free_dma;
@@ -285,7 +302,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/* build fw_ri_res_wr */
wr_len = sizeof *res_wr + 2 * sizeof *res;
-
+ if (need_rq)
+ wr_len += sizeof(*res);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
@@ -296,7 +314,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res_wr = __skb_put_zero(skb, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
- FW_RI_RES_WR_NRES_V(2) |
+ FW_RI_RES_WR_NRES_V(need_rq ? 2 : 1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (uintptr_t)wr_waitp;
@@ -327,30 +345,36 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_EQSIZE_V(eqsize));
res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
- res++;
- res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
- res->u.sqrq.op = FW_RI_RES_OP_WRITE;
- /*
- * eqsize is the number of 64B entries plus the status page size.
- */
- eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
- rdev->hw_queue.t4_eq_status_entries;
- res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
- FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
- FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
- FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
- FW_RI_RES_WR_IQID_V(rcq->cqid));
- res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
- FW_RI_RES_WR_DCAEN_V(0) |
- FW_RI_RES_WR_DCACPU_V(0) |
- FW_RI_RES_WR_FBMIN_V(2) |
- FW_RI_RES_WR_FBMAX_V(3) |
- FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
- FW_RI_RES_WR_CIDXFTHRESH_V(0) |
- FW_RI_RES_WR_EQSIZE_V(eqsize));
- res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
- res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
+ if (need_rq) {
+ res++;
+ res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
+ res->u.sqrq.op = FW_RI_RES_OP_WRITE;
+
+ /*
+ * eqsize is the number of 64B entries plus the status page size
+ */
+ eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
+ rdev->hw_queue.t4_eq_status_entries;
+ res->u.sqrq.fetchszm_to_iqid =
+ /* no host cidx updates */
+ cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
+ /* don't keep in chip cache */
+ FW_RI_RES_WR_CPRIO_V(0) |
+ /* set by uP at ri_init time */
+ FW_RI_RES_WR_PCIECHN_V(0) |
+ FW_RI_RES_WR_IQID_V(rcq->cqid));
+ res->u.sqrq.dcaen_to_eqsize =
+ cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+ FW_RI_RES_WR_FBMAX_V(3) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
+ res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
+ res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
+ }
c4iw_init_wr_wait(wr_waitp);
ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
@@ -363,26 +387,30 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
return 0;
free_dma:
- dma_free_coherent(&(rdev->lldi.pdev->dev),
- wq->rq.memsize, wq->rq.queue,
- dma_unmap_addr(&wq->rq, mapping));
+ if (need_rq)
+ dma_free_coherent(&rdev->lldi.pdev->dev,
+ wq->rq.memsize, wq->rq.queue,
+ dma_unmap_addr(&wq->rq, mapping));
free_sq:
dealloc_sq(rdev, &wq->sq);
free_hwaddr:
- c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+ if (need_rq)
+ c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
free_sw_rq:
- kfree(wq->rq.sw_rq);
+ if (need_rq)
+ kfree(wq->rq.sw_rq);
free_sw_sq:
kfree(wq->sq.sw_sq);
free_rq_qid:
- c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+ if (need_rq)
+ c4iw_put_qpid(rdev, wq->rq.qid, uctx);
free_sq_qid:
c4iw_put_qpid(rdev, wq->sq.qid, uctx);
return ret;
}
static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
- struct ib_send_wr *wr, int max, u32 *plenp)
+ const struct ib_send_wr *wr, int max, u32 *plenp)
{
u8 *dstp, *srcp;
u32 plen = 0;
@@ -427,7 +455,12 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end,
{
int i;
u32 plen = 0;
- __be64 *flitp = (__be64 *)isglp->sge;
+ __be64 *flitp;
+
+ if ((__be64 *)isglp == queue_end)
+ isglp = (struct fw_ri_isgl *)queue_start;
+
+ flitp = (__be64 *)isglp->sge;
for (i = 0; i < num_sge; i++) {
if ((plen + sg_list[i].length) < plen)
@@ -452,7 +485,7 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end,
}
static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_send_wr *wr, u8 *len16)
+ const struct ib_send_wr *wr, u8 *len16)
{
u32 plen;
int size;
@@ -519,7 +552,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
}
static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_send_wr *wr, u8 *len16)
+ const struct ib_send_wr *wr, u8 *len16)
{
u32 plen;
int size;
@@ -527,7 +560,15 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
if (wr->num_sge > T4_MAX_SEND_SGE)
return -EINVAL;
- wqe->write.r2 = 0;
+
+ /*
+ * iWARP protocol supports 64 bit immediate data but rdma api
+ * limits it to 32bit.
+ */
+ if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wqe->write.iw_imm_data.ib_imm_data.imm_data32 = wr->ex.imm_data;
+ else
+ wqe->write.iw_imm_data.ib_imm_data.imm_data32 = 0;
wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
if (wr->num_sge) {
@@ -561,7 +602,58 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
return 0;
}
-static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static void build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp,
+ struct ib_send_wr *wr)
+{
+ memcpy((u8 *)immdp->data, (u8 *)(uintptr_t)wr->sg_list->addr, 16);
+ memset(immdp->r1, 0, 6);
+ immdp->op = FW_RI_DATA_IMMD;
+ immdp->immdlen = 16;
+}
+
+static void build_rdma_write_cmpl(struct t4_sq *sq,
+ struct fw_ri_rdma_write_cmpl_wr *wcwr,
+ const struct ib_send_wr *wr, u8 *len16)
+{
+ u32 plen;
+ int size;
+
+ /*
+ * This code assumes the struct fields preceding the write isgl
+ * fit in one 64B WR slot. This is because the WQE is built
+ * directly in the dma queue, and wrapping is only handled
+ * by the code buildling sgls. IE the "fixed part" of the wr
+ * structs must all fit in 64B. The WQE build code should probably be
+ * redesigned to avoid this restriction, but for now just add
+ * the BUILD_BUG_ON() to catch if this WQE struct gets too big.
+ */
+ BUILD_BUG_ON(offsetof(struct fw_ri_rdma_write_cmpl_wr, u) > 64);
+
+ wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
+ wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
+ wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
+ wcwr->r2 = 0;
+ wcwr->r3 = 0;
+
+ /* SEND_INV SGL */
+ if (wr->next->send_flags & IB_SEND_INLINE)
+ build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next);
+ else
+ build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
+ &wcwr->u_cmpl.isgl_src, wr->next->sg_list, 1, NULL);
+
+ /* WRITE SGL */
+ build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
+ wcwr->u.isgl_src, wr->sg_list, wr->num_sge, &plen);
+
+ size = sizeof(*wcwr) + sizeof(struct fw_ri_isgl) +
+ wr->num_sge * sizeof(struct fw_ri_sge);
+ wcwr->plen = cpu_to_be32(plen);
+ *len16 = DIV_ROUND_UP(size, 16);
+}
+
+static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
+ u8 *len16)
{
if (wr->num_sge > 1)
return -EINVAL;
@@ -590,8 +682,74 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
return 0;
}
+static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
+{
+ bool send_signaled = (wr->next->send_flags & IB_SEND_SIGNALED) ||
+ qhp->sq_sig_all;
+ bool write_signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
+ qhp->sq_sig_all;
+ struct t4_swsqe *swsqe;
+ union t4_wr *wqe;
+ u16 write_wrid;
+ u8 len16;
+ u16 idx;
+
+ /*
+ * The sw_sq entries still look like a WRITE and a SEND and consume
+ * 2 slots. The FW WR, however, will be a single uber-WR.
+ */
+ wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
+ qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
+ build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16);
+
+ /* WRITE swsqe */
+ swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
+ swsqe->opcode = FW_RI_RDMA_WRITE;
+ swsqe->idx = qhp->wq.sq.pidx;
+ swsqe->complete = 0;
+ swsqe->signaled = write_signaled;
+ swsqe->flushed = 0;
+ swsqe->wr_id = wr->wr_id;
+ if (c4iw_wr_log) {
+ swsqe->sge_ts =
+ cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
+ swsqe->host_time = ktime_get();
+ }
+
+ write_wrid = qhp->wq.sq.pidx;
+
+ /* just bump the sw_sq */
+ qhp->wq.sq.in_use++;
+ if (++qhp->wq.sq.pidx == qhp->wq.sq.size)
+ qhp->wq.sq.pidx = 0;
+
+ /* SEND_WITH_INV swsqe */
+ swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
+ swsqe->opcode = FW_RI_SEND_WITH_INV;
+ swsqe->idx = qhp->wq.sq.pidx;
+ swsqe->complete = 0;
+ swsqe->signaled = send_signaled;
+ swsqe->flushed = 0;
+ swsqe->wr_id = wr->next->wr_id;
+ if (c4iw_wr_log) {
+ swsqe->sge_ts =
+ cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
+ swsqe->host_time = ktime_get();
+ }
+
+ wqe->write_cmpl.flags_send = send_signaled ? FW_RI_COMPLETION_FLAG : 0;
+ wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx;
+
+ init_wr_hdr(wqe, write_wrid, FW_RI_RDMA_WRITE_CMPL_WR,
+ write_signaled ? FW_RI_COMPLETION_FLAG : 0, len16);
+ t4_sq_produce(&qhp->wq, len16);
+ idx = DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
+
+ t4_ring_sq_db(&qhp->wq, idx, wqe);
+}
+
static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
- struct ib_recv_wr *wr, u8 *len16)
+ const struct ib_recv_wr *wr, u8 *len16)
{
int ret;
@@ -605,8 +763,22 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
return 0;
}
+static int build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr,
+ u8 *len16)
+{
+ int ret;
+
+ ret = build_isgl((__be64 *)wqe, (__be64 *)(wqe + 1),
+ &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
+ if (ret)
+ return ret;
+ *len16 = DIV_ROUND_UP(sizeof(wqe->recv) +
+ wr->num_sge * sizeof(struct fw_ri_sge), 16);
+ return 0;
+}
+
static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
- struct ib_reg_wr *wr, struct c4iw_mr *mhp,
+ const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
u8 *len16)
{
__be64 *p = (__be64 *)fr->pbl;
@@ -638,8 +810,8 @@ static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
}
static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
- bool dsgl_supported)
+ const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
+ u8 *len16, bool dsgl_supported)
{
struct fw_ri_immd *imdp;
__be64 *p;
@@ -701,7 +873,8 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
return 0;
}
-static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
+ u8 *len16)
{
wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
wqe->inv.r2 = 0;
@@ -721,7 +894,7 @@ static void free_qp_work(struct work_struct *work)
pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
if (ucontext)
c4iw_put_ucontext(ucontext);
@@ -804,6 +977,9 @@ static int ib_to_fw_opcode(int ib_opcode)
case IB_WR_RDMA_WRITE:
opcode = FW_RI_RDMA_WRITE;
break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ opcode = FW_RI_WRITE_IMMEDIATE;
+ break;
case IB_WR_RDMA_READ:
case IB_WR_RDMA_READ_WITH_INV:
opcode = FW_RI_READ_REQ;
@@ -820,7 +996,8 @@ static int ib_to_fw_opcode(int ib_opcode)
return opcode;
}
-static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+static int complete_sq_drain_wr(struct c4iw_qp *qhp,
+ const struct ib_send_wr *wr)
{
struct t4_cqe cqe = {};
struct c4iw_cq *schp;
@@ -858,8 +1035,9 @@ static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
return 0;
}
-static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int complete_sq_drain_wrs(struct c4iw_qp *qhp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
int ret = 0;
@@ -874,7 +1052,8 @@ static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr,
return ret;
}
-static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+static void complete_rq_drain_wr(struct c4iw_qp *qhp,
+ const struct ib_recv_wr *wr)
{
struct t4_cqe cqe = {};
struct c4iw_cq *rchp;
@@ -906,7 +1085,8 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
}
}
-static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+static void complete_rq_drain_wrs(struct c4iw_qp *qhp,
+ const struct ib_recv_wr *wr)
{
while (wr) {
complete_rq_drain_wr(qhp, wr);
@@ -914,14 +1094,15 @@ static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
}
}
-int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
int err = 0;
u8 len16 = 0;
enum fw_wr_opcodes fw_opcode = 0;
enum fw_ri_wr_flags fw_flags;
struct c4iw_qp *qhp;
+ struct c4iw_dev *rhp;
union t4_wr *wqe = NULL;
u32 num_wrs;
struct t4_swsqe *swsqe;
@@ -929,6 +1110,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
u16 idx = 0;
qhp = to_c4iw_qp(ibqp);
+ rhp = qhp->rhp;
spin_lock_irqsave(&qhp->lock, flag);
/*
@@ -946,6 +1128,30 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
return -ENOMEM;
}
+
+ /*
+ * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
+ * the response for small NVMEe-oF READ requests. If the chain is
+ * exactly a WRITE->SEND_WITH_INV and the sgl depths and lengths
+ * meet the requirements of the fw_ri_write_cmpl_wr work request,
+ * then build and post the write_cmpl WR. If any of the tests
+ * below are not true, then we continue on with the tradtional WRITE
+ * and SEND WRs.
+ */
+ if (qhp->rhp->rdev.lldi.write_cmpl_support &&
+ CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >=
+ CHELSIO_T5 &&
+ wr && wr->next && !wr->next->next &&
+ wr->opcode == IB_WR_RDMA_WRITE &&
+ wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL &&
+ wr->next->opcode == IB_WR_SEND_WITH_INV &&
+ wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE &&
+ wr->next->num_sge == 1 && num_wrs >= 2) {
+ post_write_cmpl(qhp, wr);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return 0;
+ }
+
while (wr) {
if (num_wrs == 0) {
err = -ENOMEM;
@@ -973,6 +1179,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->opcode = FW_RI_SEND_WITH_INV;
err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) {
+ err = -EINVAL;
+ break;
+ }
+ fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE;
+ /*FALLTHROUGH*/
case IB_WR_RDMA_WRITE:
fw_opcode = FW_RI_RDMA_WRITE_WR;
swsqe->opcode = FW_RI_RDMA_WRITE;
@@ -983,8 +1196,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_opcode = FW_RI_RDMA_READ_WR;
swsqe->opcode = FW_RI_READ_REQ;
if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
- c4iw_invalidate_mr(qhp->rhp,
- wr->sg_list[0].lkey);
+ c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey);
fw_flags = FW_RI_RDMA_READ_INVALIDATE;
} else {
fw_flags = 0;
@@ -1000,7 +1212,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
swsqe->opcode = FW_RI_FAST_REGISTER;
- if (qhp->rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
+ if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
!mhp->attr.state && mhp->mpl_len <= 2) {
fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
@@ -1009,7 +1221,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_opcode = FW_RI_FR_NSMR_WR;
err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
mhp, &len16,
- qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
+ rhp->rdev.lldi.ulptx_memwrite_dsgl);
if (err)
break;
}
@@ -1022,7 +1234,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_opcode = FW_RI_INV_LSTAG_WR;
swsqe->opcode = FW_RI_LOCAL_INV;
err = build_inv_stag(wqe, wr, &len16);
- c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
+ c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey);
break;
default:
pr_warn("%s post of type=%d TBD!\n", __func__,
@@ -1041,7 +1253,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->wr_id = wr->wr_id;
if (c4iw_wr_log) {
swsqe->sge_ts = cxgb4_read_sge_timestamp(
- qhp->rhp->rdev.lldi.ports[0]);
+ rhp->rdev.lldi.ports[0]);
swsqe->host_time = ktime_get();
}
@@ -1055,7 +1267,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
t4_sq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
}
- if (!qhp->rhp->rdev.status_page->db_off) {
+ if (!rhp->rdev.status_page->db_off) {
t4_ring_sq_db(&qhp->wq, idx, wqe);
spin_unlock_irqrestore(&qhp->lock, flag);
} else {
@@ -1065,8 +1277,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return err;
}
-int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int err = 0;
struct c4iw_qp *qhp;
@@ -1145,6 +1357,89 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
return err;
}
+static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,
+ u64 wr_id, u8 len16)
+{
+ struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx];
+
+ pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u ooo_count %u wr_id 0x%llx pending_cidx %u pending_pidx %u pending_in_use %u\n",
+ __func__, srq->cidx, srq->pidx, srq->wq_pidx,
+ srq->in_use, srq->ooo_count,
+ (unsigned long long)wr_id, srq->pending_cidx,
+ srq->pending_pidx, srq->pending_in_use);
+ pwr->wr_id = wr_id;
+ pwr->len16 = len16;
+ memcpy(&pwr->wqe, wqe, len16 * 16);
+ t4_srq_produce_pending_wr(srq);
+}
+
+int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ union t4_recv_wr *wqe, lwqe;
+ struct c4iw_srq *srq;
+ unsigned long flag;
+ u8 len16 = 0;
+ u16 idx = 0;
+ int err = 0;
+ u32 num_wrs;
+
+ srq = to_c4iw_srq(ibsrq);
+ spin_lock_irqsave(&srq->lock, flag);
+ num_wrs = t4_srq_avail(&srq->wq);
+ if (num_wrs == 0) {
+ spin_unlock_irqrestore(&srq->lock, flag);
+ return -ENOMEM;
+ }
+ while (wr) {
+ if (wr->num_sge > T4_MAX_RECV_SGE) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+ wqe = &lwqe;
+ if (num_wrs)
+ err = build_srq_recv(wqe, wr, &len16);
+ else
+ err = -ENOMEM;
+ if (err) {
+ *bad_wr = wr;
+ break;
+ }
+
+ wqe->recv.opcode = FW_RI_RECV_WR;
+ wqe->recv.r1 = 0;
+ wqe->recv.wrid = srq->wq.pidx;
+ wqe->recv.r2[0] = 0;
+ wqe->recv.r2[1] = 0;
+ wqe->recv.r2[2] = 0;
+ wqe->recv.len16 = len16;
+
+ if (srq->wq.ooo_count ||
+ srq->wq.pending_in_use ||
+ srq->wq.sw_rq[srq->wq.pidx].valid) {
+ defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16);
+ } else {
+ srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id;
+ srq->wq.sw_rq[srq->wq.pidx].valid = 1;
+ c4iw_copy_wr_to_srq(&srq->wq, wqe, len16);
+ pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u wr_id 0x%llx\n",
+ __func__, srq->wq.cidx,
+ srq->wq.pidx, srq->wq.wq_pidx,
+ srq->wq.in_use,
+ (unsigned long long)wr->wr_id);
+ t4_srq_produce(&srq->wq, len16);
+ idx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
+ }
+ wr = wr->next;
+ num_wrs--;
+ }
+ if (idx)
+ t4_ring_srq_db(&srq->wq, idx, len16, wqe);
+ spin_unlock_irqrestore(&srq->lock, flag);
+ return err;
+}
+
static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
u8 *ecode)
{
@@ -1321,7 +1616,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
struct c4iw_cq *schp)
{
int count;
- int rq_flushed, sq_flushed;
+ int rq_flushed = 0, sq_flushed;
unsigned long flag;
pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
@@ -1340,11 +1635,13 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
return;
}
qhp->wq.flushed = 1;
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
c4iw_flush_hw_cq(rchp, qhp);
- c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
- rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
+ if (!qhp->srq) {
+ c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
+ rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
+ }
if (schp != rchp)
c4iw_flush_hw_cq(schp, qhp);
@@ -1388,7 +1685,7 @@ static void flush_qp(struct c4iw_qp *qhp)
schp = to_c4iw_cq(qhp->ibqp.send_cq);
if (qhp->ibqp.uobject) {
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
@@ -1517,16 +1814,21 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
- wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
+ if (qhp->srq) {
+ wqe->u.init.rq_eqid = cpu_to_be32(FW_RI_INIT_RQEQID_SRQ |
+ qhp->srq->idx);
+ } else {
+ wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
+ wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
+ wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
+ rhp->rdev.lldi.vr->rq.start);
+ }
wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
- wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
- wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
- rhp->rdev.lldi.vr->rq.start);
if (qhp->attr.mpa_attr.initiator)
build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
@@ -1643,7 +1945,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
case C4IW_QP_STATE_RTS:
switch (attrs->next_state) {
case C4IW_QP_STATE_CLOSING:
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
set_state(qhp, C4IW_QP_STATE_CLOSING);
ep = qhp->ep;
if (!internal) {
@@ -1656,7 +1958,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
goto err;
break;
case C4IW_QP_STATE_TERMINATE:
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
set_state(qhp, C4IW_QP_STATE_TERMINATE);
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
@@ -1673,7 +1975,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
break;
case C4IW_QP_STATE_ERROR:
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
set_state(qhp, C4IW_QP_STATE_ERROR);
if (!internal) {
abort = 1;
@@ -1819,7 +2121,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct c4iw_cq *schp;
struct c4iw_cq *rchp;
struct c4iw_create_qp_resp uresp;
- unsigned int sqsize, rqsize;
+ unsigned int sqsize, rqsize = 0;
struct c4iw_ucontext *ucontext;
int ret;
struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
@@ -1840,11 +2142,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
return ERR_PTR(-EINVAL);
- if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
- return ERR_PTR(-E2BIG);
- rqsize = attrs->cap.max_recv_wr + 1;
- if (rqsize < 8)
- rqsize = 8;
+ if (!attrs->srq) {
+ if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
+ return ERR_PTR(-E2BIG);
+ rqsize = attrs->cap.max_recv_wr + 1;
+ if (rqsize < 8)
+ rqsize = 8;
+ }
if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
return ERR_PTR(-E2BIG);
@@ -1869,19 +2173,23 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
(sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
qhp->wq.sq.flush_cidx = -1;
- qhp->wq.rq.size = rqsize;
- qhp->wq.rq.memsize =
- (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
- sizeof(*qhp->wq.rq.queue);
+ if (!attrs->srq) {
+ qhp->wq.rq.size = rqsize;
+ qhp->wq.rq.memsize =
+ (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
+ sizeof(*qhp->wq.rq.queue);
+ }
if (ucontext) {
qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
- qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
+ if (!attrs->srq)
+ qhp->wq.rq.memsize =
+ roundup(qhp->wq.rq.memsize, PAGE_SIZE);
}
ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
- qhp->wr_waitp);
+ qhp->wr_waitp, !attrs->srq);
if (ret)
goto err_free_wr_wait;
@@ -1894,10 +2202,12 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
- qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
- qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
+ if (!attrs->srq) {
+ qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
+ qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
+ }
qhp->attr.state = C4IW_QP_STATE_IDLE;
qhp->attr.next_state = C4IW_QP_STATE_IDLE;
qhp->attr.enable_rdma_read = 1;
@@ -1922,21 +2232,27 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ret = -ENOMEM;
goto err_remove_handle;
}
- rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
- if (!rq_key_mm) {
- ret = -ENOMEM;
- goto err_free_sq_key;
+ if (!attrs->srq) {
+ rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
+ if (!rq_key_mm) {
+ ret = -ENOMEM;
+ goto err_free_sq_key;
+ }
}
sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
if (!sq_db_key_mm) {
ret = -ENOMEM;
goto err_free_rq_key;
}
- rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
- if (!rq_db_key_mm) {
- ret = -ENOMEM;
- goto err_free_sq_db_key;
+ if (!attrs->srq) {
+ rq_db_key_mm =
+ kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
+ if (!rq_db_key_mm) {
+ ret = -ENOMEM;
+ goto err_free_sq_db_key;
+ }
}
+ memset(&uresp, 0, sizeof(uresp));
if (t4_sq_onchip(&qhp->wq.sq)) {
ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
GFP_KERNEL);
@@ -1945,30 +2261,35 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
goto err_free_rq_db_key;
}
uresp.flags = C4IW_QPF_ONCHIP;
- } else
- uresp.flags = 0;
+ }
+ if (rhp->rdev.lldi.write_w_imm_support)
+ uresp.flags |= C4IW_QPF_WRITE_W_IMM;
uresp.qid_mask = rhp->rdev.qpmask;
uresp.sqid = qhp->wq.sq.qid;
uresp.sq_size = qhp->wq.sq.size;
uresp.sq_memsize = qhp->wq.sq.memsize;
- uresp.rqid = qhp->wq.rq.qid;
- uresp.rq_size = qhp->wq.rq.size;
- uresp.rq_memsize = qhp->wq.rq.memsize;
+ if (!attrs->srq) {
+ uresp.rqid = qhp->wq.rq.qid;
+ uresp.rq_size = qhp->wq.rq.size;
+ uresp.rq_memsize = qhp->wq.rq.memsize;
+ }
spin_lock(&ucontext->mmap_lock);
if (ma_sync_key_mm) {
uresp.ma_sync_key = ucontext->key;
ucontext->key += PAGE_SIZE;
- } else {
- uresp.ma_sync_key = 0;
}
uresp.sq_key = ucontext->key;
ucontext->key += PAGE_SIZE;
- uresp.rq_key = ucontext->key;
- ucontext->key += PAGE_SIZE;
+ if (!attrs->srq) {
+ uresp.rq_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ }
uresp.sq_db_gts_key = ucontext->key;
ucontext->key += PAGE_SIZE;
- uresp.rq_db_gts_key = ucontext->key;
- ucontext->key += PAGE_SIZE;
+ if (!attrs->srq) {
+ uresp.rq_db_gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ }
spin_unlock(&ucontext->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
if (ret)
@@ -1977,18 +2298,23 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
sq_key_mm->addr = qhp->wq.sq.phys_addr;
sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
insert_mmap(ucontext, sq_key_mm);
- rq_key_mm->key = uresp.rq_key;
- rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
- rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
- insert_mmap(ucontext, rq_key_mm);
+ if (!attrs->srq) {
+ rq_key_mm->key = uresp.rq_key;
+ rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
+ rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
+ insert_mmap(ucontext, rq_key_mm);
+ }
sq_db_key_mm->key = uresp.sq_db_gts_key;
sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
sq_db_key_mm->len = PAGE_SIZE;
insert_mmap(ucontext, sq_db_key_mm);
- rq_db_key_mm->key = uresp.rq_db_gts_key;
- rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa;
- rq_db_key_mm->len = PAGE_SIZE;
- insert_mmap(ucontext, rq_db_key_mm);
+ if (!attrs->srq) {
+ rq_db_key_mm->key = uresp.rq_db_gts_key;
+ rq_db_key_mm->addr =
+ (u64)(unsigned long)qhp->wq.rq.bar2_pa;
+ rq_db_key_mm->len = PAGE_SIZE;
+ insert_mmap(ucontext, rq_db_key_mm);
+ }
if (ma_sync_key_mm) {
ma_sync_key_mm->key = uresp.ma_sync_key;
ma_sync_key_mm->addr =
@@ -2001,7 +2327,19 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
c4iw_get_ucontext(ucontext);
qhp->ucontext = ucontext;
}
+ if (!attrs->srq) {
+ qhp->wq.qp_errp =
+ &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
+ } else {
+ qhp->wq.qp_errp =
+ &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err;
+ qhp->wq.srqidxp =
+ &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx;
+ }
+
qhp->ibqp.qp_num = qhp->wq.sq.qid;
+ if (attrs->srq)
+ qhp->srq = to_c4iw_srq(attrs->srq);
INIT_LIST_HEAD(&qhp->db_fc_entry);
pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
@@ -2011,18 +2349,20 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
err_free_ma_sync_key:
kfree(ma_sync_key_mm);
err_free_rq_db_key:
- kfree(rq_db_key_mm);
+ if (!attrs->srq)
+ kfree(rq_db_key_mm);
err_free_sq_db_key:
kfree(sq_db_key_mm);
err_free_rq_key:
- kfree(rq_key_mm);
+ if (!attrs->srq)
+ kfree(rq_key_mm);
err_free_sq_key:
kfree(sq_key_mm);
err_remove_handle:
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
err_destroy_qp:
destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
err_free_wr_wait:
c4iw_put_wr_wait(qhp->wr_waitp);
err_free_qhp:
@@ -2088,6 +2428,45 @@ struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
}
+void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq)
+{
+ struct ib_event event = {};
+
+ event.device = &srq->rhp->ibdev;
+ event.element.srq = &srq->ibsrq;
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ ib_dispatch_event(&event);
+}
+
+int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
+ int ret = 0;
+
+ /*
+ * XXX 0 mask == a SW interrupt for srq_limit reached...
+ */
+ if (udata && !srq_attr_mask) {
+ c4iw_dispatch_srq_limit_reached_event(srq);
+ goto out;
+ }
+
+ /* no support for this yet */
+ if (srq_attr_mask & IB_SRQ_MAX_WR) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!udata && (srq_attr_mask & IB_SRQ_LIMIT)) {
+ srq->armed = true;
+ srq->srq_limit = attr->srq_limit;
+ }
+out:
+ return ret;
+}
+
int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_init_attr *init_attr)
{
@@ -2104,3 +2483,359 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
}
+
+static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
+ struct c4iw_wr_wait *wr_waitp)
+{
+ struct c4iw_rdev *rdev = &srq->rhp->rdev;
+ struct sk_buff *skb = srq->destroy_skb;
+ struct t4_srq *wq = &srq->wq;
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ int wr_len;
+
+ wr_len = sizeof(*res_wr) + sizeof(*res);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
+ FW_RI_RES_WR_NRES_V(1) |
+ FW_WR_COMPL_F);
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (uintptr_t)wr_waitp;
+ res = res_wr->res;
+ res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
+ res->u.srq.op = FW_RI_RES_OP_RESET;
+ res->u.srq.srqid = cpu_to_be32(srq->idx);
+ res->u.srq.eqid = cpu_to_be32(wq->qid);
+
+ c4iw_init_wr_wait(wr_waitp);
+ c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
+
+ dma_free_coherent(&rdev->lldi.pdev->dev,
+ wq->memsize, wq->queue,
+ pci_unmap_addr(wq, mapping));
+ c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
+ kfree(wq->sw_rq);
+ c4iw_put_qpid(rdev, wq->qid, uctx);
+}
+
+static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
+ struct c4iw_wr_wait *wr_waitp)
+{
+ struct c4iw_rdev *rdev = &srq->rhp->rdev;
+ int user = (uctx != &rdev->uctx);
+ struct t4_srq *wq = &srq->wq;
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ struct sk_buff *skb;
+ int wr_len;
+ int eqsize;
+ int ret = -ENOMEM;
+
+ wq->qid = c4iw_get_qpid(rdev, uctx);
+ if (!wq->qid)
+ goto err;
+
+ if (!user) {
+ wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq),
+ GFP_KERNEL);
+ if (!wq->sw_rq)
+ goto err_put_qpid;
+ wq->pending_wrs = kcalloc(srq->wq.size,
+ sizeof(*srq->wq.pending_wrs),
+ GFP_KERNEL);
+ if (!wq->pending_wrs)
+ goto err_free_sw_rq;
+ }
+
+ wq->rqt_size = wq->size;
+ wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size);
+ if (!wq->rqt_hwaddr)
+ goto err_free_pending_wrs;
+ wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
+ T4_RQT_ENTRY_SHIFT;
+
+ wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
+ wq->memsize, &wq->dma_addr,
+ GFP_KERNEL);
+ if (!wq->queue)
+ goto err_free_rqtpool;
+
+ memset(wq->queue, 0, wq->memsize);
+ pci_unmap_addr_set(wq, mapping, wq->dma_addr);
+
+ wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, T4_BAR2_QTYPE_EGRESS,
+ &wq->bar2_qid,
+ user ? &wq->bar2_pa : NULL);
+
+ /*
+ * User mode must have bar2 access.
+ */
+
+ if (user && !wq->bar2_va) {
+ pr_warn(MOD "%s: srqid %u not in BAR2 range.\n",
+ pci_name(rdev->lldi.pdev), wq->qid);
+ ret = -EINVAL;
+ goto err_free_queue;
+ }
+
+ /* build fw_ri_res_wr */
+ wr_len = sizeof(*res_wr) + sizeof(*res);
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ goto err_free_queue;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
+ FW_RI_RES_WR_NRES_V(1) |
+ FW_WR_COMPL_F);
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (uintptr_t)wr_waitp;
+ res = res_wr->res;
+ res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
+ res->u.srq.op = FW_RI_RES_OP_WRITE;
+
+ /*
+ * eqsize is the number of 64B entries plus the status page size.
+ */
+ eqsize = wq->size * T4_RQ_NUM_SLOTS +
+ rdev->hw_queue.t4_eq_status_entries;
+ res->u.srq.eqid = cpu_to_be32(wq->qid);
+ res->u.srq.fetchszm_to_iqid =
+ /* no host cidx updates */
+ cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
+ FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
+ FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
+ FW_RI_RES_WR_FETCHRO_V(0)); /* relaxed_ordering */
+ res->u.srq.dcaen_to_eqsize =
+ cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+ FW_RI_RES_WR_FBMAX_V(3) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
+ res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr);
+ res->u.srq.srqid = cpu_to_be32(srq->idx);
+ res->u.srq.pdid = cpu_to_be32(srq->pdid);
+ res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size);
+ res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr -
+ rdev->lldi.vr->rq.start);
+
+ c4iw_init_wr_wait(wr_waitp);
+
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__);
+ if (ret)
+ goto err_free_queue;
+
+ pr_debug("%s srq %u eqid %u pdid %u queue va %p pa 0x%llx\n"
+ " bar2_addr %p rqt addr 0x%x size %d\n",
+ __func__, srq->idx, wq->qid, srq->pdid, wq->queue,
+ (u64)virt_to_phys(wq->queue), wq->bar2_va,
+ wq->rqt_hwaddr, wq->rqt_size);
+
+ return 0;
+err_free_queue:
+ dma_free_coherent(&rdev->lldi.pdev->dev,
+ wq->memsize, wq->queue,
+ pci_unmap_addr(wq, mapping));
+err_free_rqtpool:
+ c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
+err_free_pending_wrs:
+ if (!user)
+ kfree(wq->pending_wrs);
+err_free_sw_rq:
+ if (!user)
+ kfree(wq->sw_rq);
+err_put_qpid:
+ c4iw_put_qpid(rdev, wq->qid, uctx);
+err:
+ return ret;
+}
+
+void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
+{
+ u64 *src, *dst;
+
+ src = (u64 *)wqe;
+ dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE);
+ while (len16) {
+ *dst++ = *src++;
+ if (dst >= (u64 *)&srq->queue[srq->size])
+ dst = (u64 *)srq->queue;
+ *dst++ = *src++;
+ if (dst >= (u64 *)&srq->queue[srq->size])
+ dst = (u64 *)srq->queue;
+ len16--;
+ }
+}
+
+struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_srq *srq;
+ struct c4iw_pd *php;
+ struct c4iw_create_srq_resp uresp;
+ struct c4iw_ucontext *ucontext;
+ struct c4iw_mm_entry *srq_key_mm, *srq_db_key_mm;
+ int rqsize;
+ int ret;
+ int wr_len;
+
+ pr_debug("%s ib_pd %p\n", __func__, pd);
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+
+ if (!rhp->rdev.lldi.vr->srq.size)
+ return ERR_PTR(-EINVAL);
+ if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
+ return ERR_PTR(-E2BIG);
+ if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
+ return ERR_PTR(-E2BIG);
+
+ /*
+ * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
+ */
+ rqsize = attrs->attr.max_wr + 1;
+ rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
+
+ ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
+
+ srq = kzalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq)
+ return ERR_PTR(-ENOMEM);
+
+ srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!srq->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_srq;
+ }
+
+ srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
+ if (srq->idx < 0) {
+ ret = -ENOMEM;
+ goto err_free_wr_wait;
+ }
+
+ wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
+ srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
+ if (!srq->destroy_skb) {
+ ret = -ENOMEM;
+ goto err_free_srq_idx;
+ }
+
+ srq->rhp = rhp;
+ srq->pdid = php->pdid;
+
+ srq->wq.size = rqsize;
+ srq->wq.memsize =
+ (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
+ sizeof(*srq->wq.queue);
+ if (ucontext)
+ srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE);
+
+ ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx :
+ &rhp->rdev.uctx, srq->wr_waitp);
+ if (ret)
+ goto err_free_skb;
+ attrs->attr.max_wr = rqsize - 1;
+
+ if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
+ srq->flags = T4_SRQ_LIMIT_SUPPORT;
+
+ ret = insert_handle(rhp, &rhp->qpidr, srq, srq->wq.qid);
+ if (ret)
+ goto err_free_queue;
+
+ if (udata) {
+ srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
+ if (!srq_key_mm) {
+ ret = -ENOMEM;
+ goto err_remove_handle;
+ }
+ srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
+ if (!srq_db_key_mm) {
+ ret = -ENOMEM;
+ goto err_free_srq_key_mm;
+ }
+ memset(&uresp, 0, sizeof(uresp));
+ uresp.flags = srq->flags;
+ uresp.qid_mask = rhp->rdev.qpmask;
+ uresp.srqid = srq->wq.qid;
+ uresp.srq_size = srq->wq.size;
+ uresp.srq_memsize = srq->wq.memsize;
+ uresp.rqt_abs_idx = srq->wq.rqt_abs_idx;
+ spin_lock(&ucontext->mmap_lock);
+ uresp.srq_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.srq_db_gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ spin_unlock(&ucontext->mmap_lock);
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_free_srq_db_key_mm;
+ srq_key_mm->key = uresp.srq_key;
+ srq_key_mm->addr = virt_to_phys(srq->wq.queue);
+ srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
+ insert_mmap(ucontext, srq_key_mm);
+ srq_db_key_mm->key = uresp.srq_db_gts_key;
+ srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
+ srq_db_key_mm->len = PAGE_SIZE;
+ insert_mmap(ucontext, srq_db_key_mm);
+ }
+
+ pr_debug("%s srq qid %u idx %u size %u memsize %lu num_entries %u\n",
+ __func__, srq->wq.qid, srq->idx, srq->wq.size,
+ (unsigned long)srq->wq.memsize, attrs->attr.max_wr);
+
+ spin_lock_init(&srq->lock);
+ return &srq->ibsrq;
+err_free_srq_db_key_mm:
+ kfree(srq_db_key_mm);
+err_free_srq_key_mm:
+ kfree(srq_key_mm);
+err_remove_handle:
+ remove_handle(rhp, &rhp->qpidr, srq->wq.qid);
+err_free_queue:
+ free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ srq->wr_waitp);
+err_free_skb:
+ if (srq->destroy_skb)
+ kfree_skb(srq->destroy_skb);
+err_free_srq_idx:
+ c4iw_free_srq_idx(&rhp->rdev, srq->idx);
+err_free_wr_wait:
+ c4iw_put_wr_wait(srq->wr_waitp);
+err_free_srq:
+ kfree(srq);
+ return ERR_PTR(ret);
+}
+
+int c4iw_destroy_srq(struct ib_srq *ibsrq)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_srq *srq;
+ struct c4iw_ucontext *ucontext;
+
+ srq = to_c4iw_srq(ibsrq);
+ rhp = srq->rhp;
+
+ pr_debug("%s id %d\n", __func__, srq->wq.qid);
+
+ remove_handle(rhp, &rhp->qpidr, srq->wq.qid);
+ ucontext = ibsrq->uobject ?
+ to_c4iw_ucontext(ibsrq->uobject->context) : NULL;
+ free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ srq->wr_waitp);
+ c4iw_free_srq_idx(&rhp->rdev, srq->idx);
+ c4iw_put_wr_wait(srq->wr_waitp);
+ kfree(srq);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 0ef25ae05e6f..57ed26b3cc21 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -53,7 +53,8 @@ static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
}
/* nr_* must be power of 2 */
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt,
+ u32 nr_pdid, u32 nr_srqt)
{
int err = 0;
err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
@@ -67,7 +68,17 @@ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
nr_pdid, 1, 0);
if (err)
goto pdid_err;
+ if (!nr_srqt)
+ err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0,
+ 1, 1, 0);
+ else
+ err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0,
+ nr_srqt, 0, 0);
+ if (err)
+ goto srq_err;
return 0;
+ srq_err:
+ c4iw_id_table_free(&rdev->resource.pdid_table);
pdid_err:
c4iw_id_table_free(&rdev->resource.qid_table);
qid_err:
@@ -371,13 +382,21 @@ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
{
unsigned rqt_start, rqt_chunk, rqt_top;
+ int skip = 0;
rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
if (!rdev->rqt_pool)
return -ENOMEM;
- rqt_start = rdev->lldi.vr->rq.start;
- rqt_chunk = rdev->lldi.vr->rq.size;
+ /*
+ * If SRQs are supported, then never use the first RQE from
+ * the RQT region. This is because HW uses RQT index 0 as NULL.
+ */
+ if (rdev->lldi.vr->srq.size)
+ skip = T4_RQT_ENTRY_SIZE;
+
+ rqt_start = rdev->lldi.vr->rq.start + skip;
+ rqt_chunk = rdev->lldi.vr->rq.size - skip;
rqt_top = rqt_start + rqt_chunk;
while (rqt_start < rqt_top) {
@@ -405,6 +424,32 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
kref_put(&rdev->rqt_kref, destroy_rqtpool);
}
+int c4iw_alloc_srq_idx(struct c4iw_rdev *rdev)
+{
+ int idx;
+
+ idx = c4iw_id_alloc(&rdev->resource.srq_table);
+ mutex_lock(&rdev->stats.lock);
+ if (idx == -1) {
+ rdev->stats.srqt.fail++;
+ mutex_unlock(&rdev->stats.lock);
+ return -ENOMEM;
+ }
+ rdev->stats.srqt.cur++;
+ if (rdev->stats.srqt.cur > rdev->stats.srqt.max)
+ rdev->stats.srqt.max = rdev->stats.srqt.cur;
+ mutex_unlock(&rdev->stats.lock);
+ return idx;
+}
+
+void c4iw_free_srq_idx(struct c4iw_rdev *rdev, int idx)
+{
+ c4iw_id_free(&rdev->resource.srq_table, idx);
+ mutex_lock(&rdev->stats.lock);
+ rdev->stats.srqt.cur--;
+ mutex_unlock(&rdev->stats.lock);
+}
+
/*
* On-Chip QP Memory.
*/
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 8369c7c8de83..e42021fd6fd6 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -52,12 +52,16 @@ struct t4_status_page {
__be16 pidx;
u8 qp_err; /* flit 1 - sw owns */
u8 db_off;
- u8 pad;
+ u8 pad[2];
u16 host_wq_pidx;
u16 host_cidx;
u16 host_pidx;
+ u16 pad2;
+ u32 srqidx;
};
+#define T4_RQT_ENTRY_SHIFT 6
+#define T4_RQT_ENTRY_SIZE BIT(T4_RQT_ENTRY_SHIFT)
#define T4_EQ_ENTRY_SIZE 64
#define T4_SQ_NUM_SLOTS 5
@@ -87,6 +91,9 @@ static inline int t4_max_fr_depth(int use_dsgl)
#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
#define T4_MAX_RECV_SGE 4
+#define T4_WRITE_CMPL_MAX_SGL 4
+#define T4_WRITE_CMPL_MAX_CQE 16
+
union t4_wr {
struct fw_ri_res_wr res;
struct fw_ri_wr ri;
@@ -97,6 +104,7 @@ union t4_wr {
struct fw_ri_fr_nsmr_wr fr;
struct fw_ri_fr_nsmr_tpte_wr fr_tpte;
struct fw_ri_inv_lstag_wr inv;
+ struct fw_ri_rdma_write_cmpl_wr write_cmpl;
struct t4_status_page status;
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
};
@@ -179,9 +187,32 @@ struct t4_cqe {
__be32 wrid_hi;
__be32 wrid_low;
} gen;
+ struct {
+ __be32 stag;
+ __be32 msn;
+ __be32 reserved;
+ __be32 abs_rqe_idx;
+ } srcqe;
+ struct {
+ __be32 mo;
+ __be32 msn;
+ /*
+ * Use union for immediate data to be consistent with
+ * stack's 32 bit data and iWARP spec's 64 bit data.
+ */
+ union {
+ struct {
+ __be32 imm_data32;
+ u32 reserved;
+ } ib_imm_data;
+ __be64 imm_data64;
+ } iw_imm_data;
+ } imm_data_rcqe;
+
u64 drain_cookie;
+ __be64 flits[3];
} u;
- __be64 reserved;
+ __be64 reserved[3];
__be64 bits_type_ts;
};
@@ -237,6 +268,9 @@ struct t4_cqe {
/* used for RQ completion processing */
#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
+#define CQE_ABS_RQE_IDX(x) (be32_to_cpu((x)->u.srcqe.abs_rqe_idx))
+#define CQE_IMM_DATA(x)( \
+ (x)->u.imm_data_rcqe.iw_imm_data.ib_imm_data.imm_data32)
/* used for SQ completion processing */
#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
@@ -320,6 +354,7 @@ struct t4_swrqe {
u64 wr_id;
ktime_t host_time;
u64 sge_ts;
+ int valid;
};
struct t4_rq {
@@ -349,8 +384,98 @@ struct t4_wq {
void __iomem *db;
struct c4iw_rdev *rdev;
int flushed;
+ u8 *qp_errp;
+ u32 *srqidxp;
+};
+
+struct t4_srq_pending_wr {
+ u64 wr_id;
+ union t4_recv_wr wqe;
+ u8 len16;
+};
+
+struct t4_srq {
+ union t4_recv_wr *queue;
+ dma_addr_t dma_addr;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ struct t4_swrqe *sw_rq;
+ void __iomem *bar2_va;
+ u64 bar2_pa;
+ size_t memsize;
+ u32 bar2_qid;
+ u32 qid;
+ u32 msn;
+ u32 rqt_hwaddr;
+ u32 rqt_abs_idx;
+ u16 rqt_size;
+ u16 size;
+ u16 cidx;
+ u16 pidx;
+ u16 wq_pidx;
+ u16 wq_pidx_inc;
+ u16 in_use;
+ struct t4_srq_pending_wr *pending_wrs;
+ u16 pending_cidx;
+ u16 pending_pidx;
+ u16 pending_in_use;
+ u16 ooo_count;
};
+static inline u32 t4_srq_avail(struct t4_srq *srq)
+{
+ return srq->size - 1 - srq->in_use;
+}
+
+static inline void t4_srq_produce(struct t4_srq *srq, u8 len16)
+{
+ srq->in_use++;
+ if (++srq->pidx == srq->size)
+ srq->pidx = 0;
+ srq->wq_pidx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
+ if (srq->wq_pidx >= srq->size * T4_RQ_NUM_SLOTS)
+ srq->wq_pidx %= srq->size * T4_RQ_NUM_SLOTS;
+ srq->queue[srq->size].status.host_pidx = srq->pidx;
+}
+
+static inline void t4_srq_produce_pending_wr(struct t4_srq *srq)
+{
+ srq->pending_in_use++;
+ srq->in_use++;
+ if (++srq->pending_pidx == srq->size)
+ srq->pending_pidx = 0;
+}
+
+static inline void t4_srq_consume_pending_wr(struct t4_srq *srq)
+{
+ srq->pending_in_use--;
+ srq->in_use--;
+ if (++srq->pending_cidx == srq->size)
+ srq->pending_cidx = 0;
+}
+
+static inline void t4_srq_produce_ooo(struct t4_srq *srq)
+{
+ srq->in_use--;
+ srq->ooo_count++;
+}
+
+static inline void t4_srq_consume_ooo(struct t4_srq *srq)
+{
+ srq->cidx++;
+ if (srq->cidx == srq->size)
+ srq->cidx = 0;
+ srq->queue[srq->size].status.host_cidx = srq->cidx;
+ srq->ooo_count--;
+}
+
+static inline void t4_srq_consume(struct t4_srq *srq)
+{
+ srq->in_use--;
+ if (++srq->cidx == srq->size)
+ srq->cidx = 0;
+ srq->queue[srq->size].status.host_cidx = srq->cidx;
+}
+
static inline int t4_rqes_posted(struct t4_wq *wq)
{
return wq->rq.in_use;
@@ -384,7 +509,6 @@ static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
static inline void t4_rq_consume(struct t4_wq *wq)
{
wq->rq.in_use--;
- wq->rq.msn++;
if (++wq->rq.cidx == wq->rq.size)
wq->rq.cidx = 0;
}
@@ -464,6 +588,25 @@ static inline void pio_copy(u64 __iomem *dst, u64 *src)
}
}
+static inline void t4_ring_srq_db(struct t4_srq *srq, u16 inc, u8 len16,
+ union t4_recv_wr *wqe)
+{
+ /* Flush host queue memory writes. */
+ wmb();
+ if (inc == 1 && srq->bar2_qid == 0 && wqe) {
+ pr_debug("%s : WC srq->pidx = %d; len16=%d\n",
+ __func__, srq->pidx, len16);
+ pio_copy(srq->bar2_va + SGE_UDB_WCDOORBELL, (u64 *)wqe);
+ } else {
+ pr_debug("%s: DB srq->pidx = %d; len16=%d\n",
+ __func__, srq->pidx, len16);
+ writel(PIDX_T5_V(inc) | QID_V(srq->bar2_qid),
+ srq->bar2_va + SGE_UDB_KDOORBELL);
+ }
+ /* Flush user doorbell area writes. */
+ wmb();
+}
+
static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
{
@@ -515,12 +658,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
static inline int t4_wq_in_error(struct t4_wq *wq)
{
- return wq->rq.queue[wq->rq.size].status.qp_err;
+ return *wq->qp_errp;
}
-static inline void t4_set_wq_in_error(struct t4_wq *wq)
+static inline void t4_set_wq_in_error(struct t4_wq *wq, u32 srqidx)
{
- wq->rq.queue[wq->rq.size].status.qp_err = 1;
+ if (srqidx)
+ *wq->srqidxp = srqidx;
+ *wq->qp_errp = 1;
}
static inline void t4_disable_wq_db(struct t4_wq *wq)
@@ -565,6 +710,7 @@ struct t4_cq {
u16 cidx_inc;
u8 gen;
u8 error;
+ u8 *qp_errp;
unsigned long flags;
};
@@ -698,18 +844,18 @@ static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
static inline int t4_cq_in_error(struct t4_cq *cq)
{
- return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
+ return *cq->qp_errp;
}
static inline void t4_set_cq_in_error(struct t4_cq *cq)
{
- ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
+ *cq->qp_errp = 1;
}
#endif
struct t4_dev_status_page {
u8 db_off;
- u8 pad1;
+ u8 write_cmpl_supported;
u16 pad2;
u32 pad3;
u64 qp_start;
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 58c531db4f4a..cbdb300a4794 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -50,7 +50,8 @@ enum fw_ri_wr_opcode {
FW_RI_BYPASS = 0xd,
FW_RI_RECEIVE = 0xe,
- FW_RI_SGE_EC_CR_RETURN = 0xf
+ FW_RI_SGE_EC_CR_RETURN = 0xf,
+ FW_RI_WRITE_IMMEDIATE = FW_RI_RDMA_INIT
};
enum fw_ri_wr_flags {
@@ -59,7 +60,8 @@ enum fw_ri_wr_flags {
FW_RI_SOLICITED_EVENT_FLAG = 0x04,
FW_RI_READ_FENCE_FLAG = 0x08,
FW_RI_LOCAL_FENCE_FLAG = 0x10,
- FW_RI_RDMA_READ_INVALIDATE = 0x20
+ FW_RI_RDMA_READ_INVALIDATE = 0x20,
+ FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40
};
enum fw_ri_mpa_attrs {
@@ -263,6 +265,7 @@ enum fw_ri_res_type {
FW_RI_RES_TYPE_SQ,
FW_RI_RES_TYPE_RQ,
FW_RI_RES_TYPE_CQ,
+ FW_RI_RES_TYPE_SRQ,
};
enum fw_ri_res_op {
@@ -296,6 +299,20 @@ struct fw_ri_res {
__be32 r6_lo;
__be64 r7;
} cq;
+ struct fw_ri_res_srq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 eqid;
+ __be32 r4[2];
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ __be32 srqid;
+ __be32 pdid;
+ __be32 hwsrqsize;
+ __be32 hwsrqaddr;
+ } srq;
} u;
};
@@ -531,7 +548,17 @@ struct fw_ri_rdma_write_wr {
__u16 wrid;
__u8 r1[3];
__u8 len16;
- __be64 r2;
+ /*
+ * Use union for immediate data to be consistent with stack's 32 bit
+ * data and iWARP spec's 64 bit data.
+ */
+ union {
+ struct {
+ __be32 imm_data32;
+ u32 reserved;
+ } ib_imm_data;
+ __be64 imm_data64;
+ } iw_imm_data;
__be32 plen;
__be32 stag_sink;
__be64 to_sink;
@@ -568,6 +595,37 @@ struct fw_ri_send_wr {
#define FW_RI_SEND_WR_SENDOP_G(x) \
(((x) >> FW_RI_SEND_WR_SENDOP_S) & FW_RI_SEND_WR_SENDOP_M)
+struct fw_ri_rdma_write_cmpl_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u8 r2;
+ __u8 flags_send;
+ __u16 wrid_send;
+ __be32 stag_inv;
+ __be32 plen;
+ __be32 stag_sink;
+ __be64 to_sink;
+ union fw_ri_cmpl {
+ struct fw_ri_immd_cmpl {
+ __u8 op;
+ __u8 r1[6];
+ __u8 immdlen;
+ __u8 data[16];
+ } immd_src;
+ struct fw_ri_isgl isgl_src;
+ } u_cmpl;
+ __be64 r3;
+#ifndef C99_NOT_SUPPORTED
+ union fw_ri_write {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
struct fw_ri_rdma_read_wr {
__u8 opcode;
__u8 flags;
@@ -707,6 +765,10 @@ enum fw_ri_init_p2ptype {
FW_RI_INIT_P2PTYPE_DISABLED = 0xf,
};
+enum fw_ri_init_rqeqid_srq {
+ FW_RI_INIT_RQEQID_SRQ = 1 << 31,
+};
+
struct fw_ri_wr {
__be32 op_compl;
__be32 flowid_len16;
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index fbe7198a715a..bedd5fba33b0 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -198,7 +198,7 @@ int node_affinity_init(void)
while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
node = pcibus_to_node(dev->bus);
if (node < 0)
- node = numa_node_id();
+ goto out;
hfi1_per_node_cntr[node]++;
}
@@ -206,6 +206,18 @@ int node_affinity_init(void)
}
return 0;
+
+out:
+ /*
+ * Invalid PCI NUMA node information found, note it, and populate
+ * our database 1:1.
+ */
+ pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n");
+ pr_err("HFI: System BIOS may need to be upgraded\n");
+ for (node = 0; node < node_affinity.num_possible_nodes; node++)
+ hfi1_per_node_cntr[node] = 1;
+
+ return 0;
}
static void node_affinity_destroy(struct hfi1_affinity_node *entry)
@@ -622,8 +634,14 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
int curr_cpu, possible, i, ret;
bool new_entry = false;
- if (node < 0)
- node = numa_node_id();
+ /*
+ * If the BIOS does not have the NUMA node information set, select
+ * NUMA 0 so we get consistent performance.
+ */
+ if (node < 0) {
+ dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
+ node = 0;
+ }
dd->node = node;
local_mask = cpumask_of_node(dd->node);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 6deb101cdd43..2c19bf772451 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -8143,8 +8143,15 @@ static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
}
}
-/*
+/**
+ * is_rcv_avail_int() - User receive context available IRQ handler
+ * @dd: valid dd
+ * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
+ *
* RX block receive available interrupt. Source is < 160.
+ *
+ * This is the general interrupt handler for user (PSM) receive contexts,
+ * and can only be used for non-threaded IRQs.
*/
static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
{
@@ -8154,12 +8161,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
if (likely(source < dd->num_rcv_contexts)) {
rcd = hfi1_rcd_get_by_index(dd, source);
if (rcd) {
- /* Check for non-user contexts, including vnic */
- if (source < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
- rcd->do_interrupt(rcd, 0);
- else
- handle_user_interrupt(rcd);
-
+ handle_user_interrupt(rcd);
hfi1_rcd_put(rcd);
return; /* OK */
}
@@ -8173,8 +8175,14 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
err_detail, source);
}
-/*
+/**
+ * is_rcv_urgent_int() - User receive context urgent IRQ handler
+ * @dd: valid dd
+ * @source: logical IRQ source (ofse from IS_RCVURGENT_START)
+ *
* RX block receive urgent interrupt. Source is < 160.
+ *
+ * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
*/
static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
{
@@ -8184,11 +8192,7 @@ static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
if (likely(source < dd->num_rcv_contexts)) {
rcd = hfi1_rcd_get_by_index(dd, source);
if (rcd) {
- /* only pay attention to user urgent interrupts */
- if (source >= dd->first_dyn_alloc_ctxt &&
- !rcd->is_vnic)
- handle_user_interrupt(rcd);
-
+ handle_user_interrupt(rcd);
hfi1_rcd_put(rcd);
return; /* OK */
}
@@ -8260,9 +8264,14 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
dd_dev_err(dd, "invalid interrupt source %u\n", source);
}
-/*
- * General interrupt handler. This is able to correctly handle
- * all interrupts in case INTx is used.
+/**
+ * gerneral_interrupt() - General interrupt handler
+ * @irq: MSIx IRQ vector
+ * @data: hfi1 devdata
+ *
+ * This is able to correctly handle all non-threaded interrupts. Receive
+ * context DATA IRQs are threaded and are not supported by this handler.
+ *
*/
static irqreturn_t general_interrupt(int irq, void *data)
{
@@ -10130,7 +10139,7 @@ static void set_lidlmc(struct hfi1_pportdata *ppd)
(((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
- for (i = 0; i < dd->chip_send_contexts; i++) {
+ for (i = 0; i < chip_send_contexts(dd); i++) {
hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
i, (u32)sreg);
write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
@@ -11857,7 +11866,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
* sequence numbers could land exactly on the same spot.
* E.g. a rcd restart before the receive header wrapped.
*/
- memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
+ memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
/* starting timeout */
rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
@@ -11952,9 +11961,8 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
- rcd->rcvctrl = rcvctrl;
hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
- write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
+ write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
/* work around sticky RcvCtxtStatus.BlockedRHQFull */
if (did_enable &&
@@ -12042,7 +12050,7 @@ u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
} else if (entry->flags & CNTR_SDMA) {
hfi1_cdbg(CNTR,
"\t Per SDMA Engine\n");
- for (j = 0; j < dd->chip_sdma_engines;
+ for (j = 0; j < chip_sdma_engines(dd);
j++) {
val =
entry->rw_cntr(entry, dd, j,
@@ -12418,6 +12426,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
struct hfi1_pportdata *ppd;
const char *bit_type_32 = ",32";
const int bit_type_32_sz = strlen(bit_type_32);
+ u32 sdma_engines = chip_sdma_engines(dd);
/* set up the stats timer; the add_timer is done at the end */
timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
@@ -12450,7 +12459,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
}
} else if (dev_cntrs[i].flags & CNTR_SDMA) {
dev_cntrs[i].offset = dd->ndevcntrs;
- for (j = 0; j < dd->chip_sdma_engines; j++) {
+ for (j = 0; j < sdma_engines; j++) {
snprintf(name, C_MAX_NAME, "%s%d",
dev_cntrs[i].name, j);
sz += strlen(name);
@@ -12507,7 +12516,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
*p++ = '\n';
}
} else if (dev_cntrs[i].flags & CNTR_SDMA) {
- for (j = 0; j < dd->chip_sdma_engines; j++) {
+ for (j = 0; j < sdma_engines; j++) {
snprintf(name, C_MAX_NAME, "%s%d",
dev_cntrs[i].name, j);
memcpy(p, name, strlen(name));
@@ -13020,9 +13029,9 @@ static void clear_all_interrupts(struct hfi1_devdata *dd)
write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
- for (i = 0; i < dd->chip_send_contexts; i++)
+ for (i = 0; i < chip_send_contexts(dd); i++)
write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
- for (i = 0; i < dd->chip_sdma_engines; i++)
+ for (i = 0; i < chip_sdma_engines(dd); i++)
write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
@@ -13030,48 +13039,30 @@ static void clear_all_interrupts(struct hfi1_devdata *dd)
write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
}
-/* Move to pcie.c? */
-static void disable_intx(struct pci_dev *pdev)
-{
- pci_intx(pdev, 0);
-}
-
/**
* hfi1_clean_up_interrupts() - Free all IRQ resources
* @dd: valid device data data structure
*
- * Free the MSI or INTx IRQs and assoicated PCI resources,
- * if they have been allocated.
+ * Free the MSIx and assoicated PCI resources, if they have been allocated.
*/
void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
{
int i;
+ struct hfi1_msix_entry *me = dd->msix_entries;
/* remove irqs - must happen before disabling/turning off */
- if (dd->num_msix_entries) {
- /* MSI-X */
- struct hfi1_msix_entry *me = dd->msix_entries;
-
- for (i = 0; i < dd->num_msix_entries; i++, me++) {
- if (!me->arg) /* => no irq, no affinity */
- continue;
- hfi1_put_irq_affinity(dd, me);
- pci_free_irq(dd->pcidev, i, me->arg);
- }
-
- /* clean structures */
- kfree(dd->msix_entries);
- dd->msix_entries = NULL;
- dd->num_msix_entries = 0;
- } else {
- /* INTx */
- if (dd->requested_intx_irq) {
- pci_free_irq(dd->pcidev, 0, dd);
- dd->requested_intx_irq = 0;
- }
- disable_intx(dd->pcidev);
+ for (i = 0; i < dd->num_msix_entries; i++, me++) {
+ if (!me->arg) /* => no irq, no affinity */
+ continue;
+ hfi1_put_irq_affinity(dd, me);
+ pci_free_irq(dd->pcidev, i, me->arg);
}
+ /* clean structures */
+ kfree(dd->msix_entries);
+ dd->msix_entries = NULL;
+ dd->num_msix_entries = 0;
+
pci_free_irq_vectors(dd->pcidev);
}
@@ -13121,20 +13112,6 @@ static void remap_sdma_interrupts(struct hfi1_devdata *dd,
msix_intr);
}
-static int request_intx_irq(struct hfi1_devdata *dd)
-{
- int ret;
-
- ret = pci_request_irq(dd->pcidev, 0, general_interrupt, NULL, dd,
- DRIVER_NAME "_%d", dd->unit);
- if (ret)
- dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
- ret);
- else
- dd->requested_intx_irq = 1;
- return ret;
-}
-
static int request_msix_irqs(struct hfi1_devdata *dd)
{
int first_general, last_general;
@@ -13253,11 +13230,6 @@ void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
{
int i;
- if (!dd->num_msix_entries) {
- synchronize_irq(pci_irq_vector(dd->pcidev, 0));
- return;
- }
-
for (i = 0; i < dd->vnic.num_ctxt; i++) {
struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
@@ -13346,7 +13318,6 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
{
u32 total;
int ret, request;
- int single_interrupt = 0; /* we expect to have all the interrupts */
/*
* Interrupt count:
@@ -13363,17 +13334,6 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
if (request < 0) {
ret = request;
goto fail;
- } else if (request == 0) {
- /* using INTx */
- /* dd->num_msix_entries already zero */
- single_interrupt = 1;
- dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
- } else if (request < total) {
- /* using MSI-X, with reduced interrupts */
- dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
- total, request);
- ret = -EINVAL;
- goto fail;
} else {
dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
GFP_KERNEL);
@@ -13394,10 +13354,7 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
/* reset general handler mask, chip MSI-X mappings */
reset_interrupts(dd);
- if (single_interrupt)
- ret = request_intx_irq(dd);
- else
- ret = request_msix_irqs(dd);
+ ret = request_msix_irqs(dd);
if (ret)
goto fail;
@@ -13429,6 +13386,8 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
int qos_rmt_count;
int user_rmt_reduced;
u32 n_usr_ctxts;
+ u32 send_contexts = chip_send_contexts(dd);
+ u32 rcv_contexts = chip_rcv_contexts(dd);
/*
* Kernel receive contexts:
@@ -13450,16 +13409,16 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
* Every kernel receive context needs an ACK send context.
* one send context is allocated for each VL{0-7} and VL15
*/
- if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
+ if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
dd_dev_err(dd,
"Reducing # kernel rcv contexts to: %d, from %lu\n",
- (int)(dd->chip_send_contexts - num_vls - 1),
+ send_contexts - num_vls - 1,
num_kernel_contexts);
- num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
+ num_kernel_contexts = send_contexts - num_vls - 1;
}
/* Accommodate VNIC contexts if possible */
- if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) {
+ if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
dd_dev_err(dd, "No receive contexts available for VNIC\n");
num_vnic_contexts = 0;
}
@@ -13477,13 +13436,13 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
/*
* Adjust the counts given a global max.
*/
- if (total_contexts + n_usr_ctxts > dd->chip_rcv_contexts) {
+ if (total_contexts + n_usr_ctxts > rcv_contexts) {
dd_dev_err(dd,
"Reducing # user receive contexts to: %d, from %u\n",
- (int)(dd->chip_rcv_contexts - total_contexts),
+ rcv_contexts - total_contexts,
n_usr_ctxts);
/* recalculate */
- n_usr_ctxts = dd->chip_rcv_contexts - total_contexts;
+ n_usr_ctxts = rcv_contexts - total_contexts;
}
/* each user context requires an entry in the RMT */
@@ -13509,7 +13468,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
dd->freectxts = n_usr_ctxts;
dd_dev_info(dd,
"rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
- (int)dd->chip_rcv_contexts,
+ rcv_contexts,
(int)dd->num_rcv_contexts,
(int)dd->n_krcv_queues,
dd->num_vnic_contexts,
@@ -13527,7 +13486,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
* contexts.
*/
dd->rcv_entries.group_size = RCV_INCREMENT;
- ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
+ ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
dd->rcv_entries.nctxt_extra = ngroups -
(dd->num_rcv_contexts * dd->rcv_entries.ngroups);
@@ -13552,7 +13511,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
dd_dev_info(
dd,
"send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
- dd->chip_send_contexts,
+ send_contexts,
dd->num_send_contexts,
dd->sc_sizes[SC_KERNEL].count,
dd->sc_sizes[SC_ACK].count,
@@ -13610,7 +13569,7 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
write_csr(dd, CCE_INT_MAP + (8 * i), 0);
/* SendCtxtCreditReturnAddr */
- for (i = 0; i < dd->chip_send_contexts; i++)
+ for (i = 0; i < chip_send_contexts(dd); i++)
write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
/* PIO Send buffers */
@@ -13623,7 +13582,7 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
/* RcvHdrAddr */
/* RcvHdrTailAddr */
/* RcvTidFlowTable */
- for (i = 0; i < dd->chip_rcv_contexts; i++) {
+ for (i = 0; i < chip_rcv_contexts(dd); i++) {
write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
@@ -13631,7 +13590,7 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
}
/* RcvArray */
- for (i = 0; i < dd->chip_rcv_array_count; i++)
+ for (i = 0; i < chip_rcv_array_count(dd); i++)
hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
/* RcvQPMapTable */
@@ -13789,7 +13748,7 @@ static void reset_txe_csrs(struct hfi1_devdata *dd)
write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
- for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
+ for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
@@ -13817,7 +13776,7 @@ static void reset_txe_csrs(struct hfi1_devdata *dd)
/*
* TXE Per-Context CSRs
*/
- for (i = 0; i < dd->chip_send_contexts; i++) {
+ for (i = 0; i < chip_send_contexts(dd); i++) {
write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
@@ -13835,7 +13794,7 @@ static void reset_txe_csrs(struct hfi1_devdata *dd)
/*
* TXE Per-SDMA CSRs
*/
- for (i = 0; i < dd->chip_sdma_engines; i++) {
+ for (i = 0; i < chip_sdma_engines(dd); i++) {
write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
/* SEND_DMA_STATUS read-only */
write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
@@ -13968,7 +13927,7 @@ static void reset_rxe_csrs(struct hfi1_devdata *dd)
/*
* RXE Kernel and User Per-Context CSRs
*/
- for (i = 0; i < dd->chip_rcv_contexts; i++) {
+ for (i = 0; i < chip_rcv_contexts(dd); i++) {
/* kernel */
write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
/* RCV_CTXT_STATUS read-only */
@@ -14084,13 +14043,13 @@ static int init_chip(struct hfi1_devdata *dd)
/* disable send contexts and SDMA engines */
write_csr(dd, SEND_CTRL, 0);
- for (i = 0; i < dd->chip_send_contexts; i++)
+ for (i = 0; i < chip_send_contexts(dd); i++)
write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
- for (i = 0; i < dd->chip_sdma_engines; i++)
+ for (i = 0; i < chip_sdma_engines(dd); i++)
write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
/* disable port (turn off RXE inbound traffic) and contexts */
write_csr(dd, RCV_CTRL, 0);
- for (i = 0; i < dd->chip_rcv_contexts; i++)
+ for (i = 0; i < chip_rcv_contexts(dd); i++)
write_csr(dd, RCV_CTXT_CTRL, 0);
/* mask all interrupt sources */
for (i = 0; i < CCE_NUM_INT_CSRS; i++)
@@ -14709,9 +14668,9 @@ static void init_txe(struct hfi1_devdata *dd)
write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
/* enable all per-context and per-SDMA engine errors */
- for (i = 0; i < dd->chip_send_contexts; i++)
+ for (i = 0; i < chip_send_contexts(dd); i++)
write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
- for (i = 0; i < dd->chip_sdma_engines; i++)
+ for (i = 0; i < chip_sdma_engines(dd); i++)
write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
/* set the local CU to AU mapping */
@@ -14979,11 +14938,13 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
"Functional simulator"
};
struct pci_dev *parent = pdev->bus->self;
+ u32 sdma_engines;
dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
sizeof(struct hfi1_pportdata));
if (IS_ERR(dd))
goto bail;
+ sdma_engines = chip_sdma_engines(dd);
ppd = dd->pport;
for (i = 0; i < dd->num_pports; i++, ppd++) {
int vl;
@@ -15081,11 +15042,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* give a reasonable active value, will be set on link up */
dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
- dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
- dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
- dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
- dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
- dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
/* fix up link widths for emulation _p */
ppd = dd->pport;
if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
@@ -15096,11 +15052,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
OPA_LINK_WIDTH_1X;
}
/* insure num_vls isn't larger than number of sdma engines */
- if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
+ if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
- num_vls, dd->chip_sdma_engines);
- num_vls = dd->chip_sdma_engines;
- ppd->vls_supported = dd->chip_sdma_engines;
+ num_vls, sdma_engines);
+ num_vls = sdma_engines;
+ ppd->vls_supported = sdma_engines;
ppd->vls_operational = ppd->vls_supported;
}
@@ -15216,13 +15172,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
*/
aspm_init(dd);
- dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
- /*
- * rcd[0] is guaranteed to be valid by this point. Also, all
- * context are using the same value, as per the module parameter.
- */
- dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
-
ret = init_pervl_scs(dd);
if (ret)
goto bail_cleanup;
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index fdf389e46e19..36b04d6300e5 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -656,6 +656,36 @@ static inline void write_uctxt_csr(struct hfi1_devdata *dd, int ctxt,
write_csr(dd, offset0 + (0x1000 * ctxt), value);
}
+static inline u32 chip_rcv_contexts(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, RCV_CONTEXTS);
+}
+
+static inline u32 chip_send_contexts(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, SEND_CONTEXTS);
+}
+
+static inline u32 chip_sdma_engines(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, SEND_DMA_ENGINES);
+}
+
+static inline u32 chip_pio_mem_size(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, SEND_PIO_MEM_SIZE);
+}
+
+static inline u32 chip_sdma_mem_size(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, SEND_DMA_MEM_SIZE);
+}
+
+static inline u32 chip_rcv_array_count(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, RCV_ARRAY_CNT);
+}
+
u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
u32 dw_len);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 94dca95db04f..a41f85558312 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -208,25 +208,25 @@ static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
(offset * RCV_BUF_BLOCK_SIZE));
}
-static inline void *hfi1_get_header(struct hfi1_devdata *dd,
+static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd,
__le32 *rhf_addr)
{
u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
- return (void *)(rhf_addr - dd->rhf_offset + offset);
+ return (void *)(rhf_addr - rcd->rhf_offset + offset);
}
-static inline struct ib_header *hfi1_get_msgheader(struct hfi1_devdata *dd,
+static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd,
__le32 *rhf_addr)
{
- return (struct ib_header *)hfi1_get_header(dd, rhf_addr);
+ return (struct ib_header *)hfi1_get_header(rcd, rhf_addr);
}
static inline struct hfi1_16b_header
- *hfi1_get_16B_header(struct hfi1_devdata *dd,
+ *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd,
__le32 *rhf_addr)
{
- return (struct hfi1_16b_header *)hfi1_get_header(dd, rhf_addr);
+ return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr);
}
/*
@@ -591,13 +591,12 @@ static void __prescan_rxq(struct hfi1_packet *packet)
init_ps_mdata(&mdata, packet);
while (1) {
- struct hfi1_devdata *dd = rcd->dd;
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
__le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
- dd->rhf_offset;
+ packet->rcd->rhf_offset;
struct rvt_qp *qp;
struct ib_header *hdr;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn, bth1;
int is_ecn = 0;
@@ -612,7 +611,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
if (etype != RHF_RCV_TYPE_IB)
goto next;
- packet->hdr = hfi1_get_msgheader(dd, rhf_addr);
+ packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr);
hdr = packet->hdr;
lnh = ib_get_lnh(hdr);
@@ -718,7 +717,7 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
ret = check_max_packet(packet, thread);
packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
- packet->rcd->dd->rhf_offset;
+ packet->rcd->rhf_offset;
packet->rhf = rhf_to_cpu(packet->rhf_addr);
return ret;
@@ -757,7 +756,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
* crashing down. There is no need to eat another
* comparison in this performance critical code.
*/
- packet->rcd->dd->rhf_rcv_function_map[packet->etype](packet);
+ packet->rcd->rhf_rcv_function_map[packet->etype](packet);
packet->numpkt++;
/* Set up for the next packet */
@@ -768,7 +767,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
ret = check_max_packet(packet, thread);
packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
- packet->rcd->dd->rhf_offset;
+ packet->rcd->rhf_offset;
packet->rhf = rhf_to_cpu(packet->rhf_addr);
return ret;
@@ -949,12 +948,12 @@ static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
u8 sc = SC15_PACKET;
if (etype == RHF_RCV_TYPE_IB) {
- struct ib_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
+ struct ib_header *hdr = hfi1_get_msgheader(packet->rcd,
packet->rhf_addr);
sc = hfi1_9B_get_sc5(hdr, packet->rhf);
} else if (etype == RHF_RCV_TYPE_BYPASS) {
struct hfi1_16b_header *hdr = hfi1_get_16B_header(
- packet->rcd->dd,
+ packet->rcd,
packet->rhf_addr);
sc = hfi1_16B_get_sc(hdr);
}
@@ -1034,7 +1033,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
packet.rhqoff += packet.rsize;
packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
packet.rhqoff +
- dd->rhf_offset;
+ rcd->rhf_offset;
packet.rhf = rhf_to_cpu(packet.rhf_addr);
} else if (skip_pkt) {
@@ -1384,7 +1383,7 @@ bail:
static inline void hfi1_setup_ib_header(struct hfi1_packet *packet)
{
packet->hdr = (struct hfi1_ib_message_header *)
- hfi1_get_msgheader(packet->rcd->dd,
+ hfi1_get_msgheader(packet->rcd,
packet->rhf_addr);
packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
}
@@ -1485,7 +1484,7 @@ static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
u8 l4;
packet->hdr = (struct hfi1_16b_header *)
- hfi1_get_16B_header(packet->rcd->dd,
+ hfi1_get_16B_header(packet->rcd,
packet->rhf_addr);
l4 = hfi1_16B_get_l4(packet->hdr);
if (l4 == OPA_16B_L4_IB_LOCAL) {
@@ -1575,7 +1574,7 @@ void handle_eflags(struct hfi1_packet *packet)
* The following functions are called by the interrupt handler. They are type
* specific handlers for each packet type.
*/
-int process_receive_ib(struct hfi1_packet *packet)
+static int process_receive_ib(struct hfi1_packet *packet)
{
if (hfi1_setup_9B_packet(packet))
return RHF_RCV_CONTINUE;
@@ -1607,7 +1606,7 @@ static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
return false;
}
-int process_receive_bypass(struct hfi1_packet *packet)
+static int process_receive_bypass(struct hfi1_packet *packet)
{
struct hfi1_devdata *dd = packet->rcd->dd;
@@ -1649,7 +1648,7 @@ int process_receive_bypass(struct hfi1_packet *packet)
return RHF_RCV_CONTINUE;
}
-int process_receive_error(struct hfi1_packet *packet)
+static int process_receive_error(struct hfi1_packet *packet)
{
/* KHdrHCRCErr -- KDETH packet with a bad HCRC */
if (unlikely(
@@ -1668,7 +1667,7 @@ int process_receive_error(struct hfi1_packet *packet)
return RHF_RCV_CONTINUE;
}
-int kdeth_process_expected(struct hfi1_packet *packet)
+static int kdeth_process_expected(struct hfi1_packet *packet)
{
hfi1_setup_9B_packet(packet);
if (unlikely(hfi1_dbg_should_fault_rx(packet)))
@@ -1682,7 +1681,7 @@ int kdeth_process_expected(struct hfi1_packet *packet)
return RHF_RCV_CONTINUE;
}
-int kdeth_process_eager(struct hfi1_packet *packet)
+static int kdeth_process_eager(struct hfi1_packet *packet)
{
hfi1_setup_9B_packet(packet);
if (unlikely(hfi1_dbg_should_fault_rx(packet)))
@@ -1695,7 +1694,7 @@ int kdeth_process_eager(struct hfi1_packet *packet)
return RHF_RCV_CONTINUE;
}
-int process_receive_invalid(struct hfi1_packet *packet)
+static int process_receive_invalid(struct hfi1_packet *packet)
{
dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
rhf_rcv_type(packet->rhf));
@@ -1719,9 +1718,8 @@ void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
init_ps_mdata(&mdata, &packet);
while (1) {
- struct hfi1_devdata *dd = rcd->dd;
__le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
- dd->rhf_offset;
+ rcd->rhf_offset;
struct ib_header *hdr;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn;
@@ -1738,7 +1736,7 @@ void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
if (etype > RHF_RCV_TYPE_IB)
goto next;
- packet.hdr = hfi1_get_msgheader(dd, rhf_addr);
+ packet.hdr = hfi1_get_msgheader(rcd, rhf_addr);
hdr = packet.hdr;
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
@@ -1760,3 +1758,14 @@ next:
update_ps_mdata(&mdata, rcd);
}
}
+
+const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = {
+ [RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected,
+ [RHF_RCV_TYPE_EAGER] = kdeth_process_eager,
+ [RHF_RCV_TYPE_IB] = process_receive_ib,
+ [RHF_RCV_TYPE_ERROR] = process_receive_error,
+ [RHF_RCV_TYPE_BYPASS] = process_receive_bypass,
+ [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
+};
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 0fc4aa9455c3..1fc75647e47b 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -411,7 +411,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
mapio = 1;
break;
case RCV_HDRQ:
- memlen = uctxt->rcvhdrq_size;
+ memlen = rcvhdrq_size(uctxt);
memvirt = uctxt->rcvhdrq;
break;
case RCV_EGRBUF: {
@@ -521,7 +521,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
break;
case SUBCTXT_RCV_HDRQ:
memaddr = (u64)uctxt->subctxt_rcvhdr_base;
- memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
+ memlen = rcvhdrq_size(uctxt) * uctxt->subctxt_cnt;
flags |= VM_IO | VM_DONTEXPAND;
vmf = 1;
break;
@@ -985,7 +985,11 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
* sub contexts.
* This has to be done here so the rest of the sub-contexts find the
* proper base context.
+ * NOTE: _set_bit() can be used here because the context creation is
+ * protected by the mutex (rather than the spin_lock), and will be the
+ * very first instance of this context.
*/
+ __set_bit(0, uctxt->in_use_ctxts);
if (uinfo->subctxt_cnt)
init_subctxts(uctxt, uinfo);
uctxt->userversion = uinfo->userversion;
@@ -1040,7 +1044,7 @@ static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
return -ENOMEM;
/* We can take the size of the RcvHdr Queue from the master */
- uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
+ uctxt->subctxt_rcvhdr_base = vmalloc_user(rcvhdrq_size(uctxt) *
num_subctxts);
if (!uctxt->subctxt_rcvhdr_base) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 4ab8b5bfbed1..d9470317983f 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -169,12 +169,6 @@ extern const struct pci_error_handlers hfi1_pci_err_handler;
struct hfi1_opcode_stats_perctx;
struct ctxt_eager_bufs {
- ssize_t size; /* total size of eager buffers */
- u32 count; /* size of buffers array */
- u32 numbufs; /* number of buffers allocated */
- u32 alloced; /* number of rcvarray entries used */
- u32 rcvtid_size; /* size of each eager rcv tid */
- u32 threshold; /* head update threshold */
struct eager_buffer {
void *addr;
dma_addr_t dma;
@@ -184,6 +178,12 @@ struct ctxt_eager_bufs {
void *addr;
dma_addr_t dma;
} *rcvtids;
+ u32 size; /* total size of eager buffers */
+ u32 rcvtid_size; /* size of each eager rcv tid */
+ u16 count; /* size of buffers array */
+ u16 numbufs; /* number of buffers allocated */
+ u16 alloced; /* number of rcvarray entries used */
+ u16 threshold; /* head update threshold */
};
struct exp_tid_set {
@@ -191,43 +191,84 @@ struct exp_tid_set {
u32 count;
};
+typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
struct hfi1_ctxtdata {
- /* shadow the ctxt's RcvCtrl register */
- u64 rcvctrl;
/* rcvhdrq base, needs mmap before useful */
void *rcvhdrq;
/* kernel virtual address where hdrqtail is updated */
volatile __le64 *rcvhdrtail_kvaddr;
- /* when waiting for rcv or pioavail */
- wait_queue_head_t wait;
- /* rcvhdrq size (for freeing) */
- size_t rcvhdrq_size;
+ /* so functions that need physical port can get it easily */
+ struct hfi1_pportdata *ppd;
+ /* so file ops can get at unit */
+ struct hfi1_devdata *dd;
+ /* this receive context's assigned PIO ACK send context */
+ struct send_context *sc;
+ /* per context recv functions */
+ const rhf_rcv_function_ptr *rhf_rcv_function_map;
+ /*
+ * The interrupt handler for a particular receive context can vary
+ * throughout it's lifetime. This is not a lock protected data member so
+ * it must be updated atomically and the prev and new value must always
+ * be valid. Worst case is we process an extra interrupt and up to 64
+ * packets with the wrong interrupt handler.
+ */
+ int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded);
+ /* verbs rx_stats per rcd */
+ struct hfi1_opcode_stats_perctx *opstats;
+ /* clear interrupt mask */
+ u64 imask;
+ /* ctxt rcvhdrq head offset */
+ u32 head;
/* number of rcvhdrq entries */
u16 rcvhdrq_cnt;
+ u8 ireg; /* clear interrupt register */
+ /* receive packet sequence counter */
+ u8 seq_cnt;
/* size of each of the rcvhdrq entries */
- u16 rcvhdrqentsize;
+ u8 rcvhdrqentsize;
+ /* offset of RHF within receive header entry */
+ u8 rhf_offset;
+ /* dynamic receive available interrupt timeout */
+ u8 rcvavail_timeout;
+ /* Indicates that this is vnic context */
+ bool is_vnic;
+ /* vnic queue index this context is mapped to */
+ u8 vnic_q_idx;
+ /* Is ASPM interrupt supported for this context */
+ bool aspm_intr_supported;
+ /* ASPM state (enabled/disabled) for this context */
+ bool aspm_enabled;
+ /* Is ASPM processing enabled for this context (in intr context) */
+ bool aspm_intr_enable;
+ struct ctxt_eager_bufs egrbufs;
+ /* QPs waiting for context processing */
+ struct list_head qp_wait_list;
+ /* tid allocation lists */
+ struct exp_tid_set tid_group_list;
+ struct exp_tid_set tid_used_list;
+ struct exp_tid_set tid_full_list;
+
+ /* Timer for re-enabling ASPM if interrupt activity quiets down */
+ struct timer_list aspm_timer;
+ /* per-context configuration flags */
+ unsigned long flags;
+ /* array of tid_groups */
+ struct tid_group *groups;
/* mmap of hdrq, must fit in 44 bits */
dma_addr_t rcvhdrq_dma;
dma_addr_t rcvhdrqtailaddr_dma;
- struct ctxt_eager_bufs egrbufs;
- /* this receive context's assigned PIO ACK send context */
- struct send_context *sc;
-
- /* dynamic receive available interrupt timeout */
- u32 rcvavail_timeout;
+ /* Last interrupt timestamp */
+ ktime_t aspm_ts_last_intr;
+ /* Last timestamp at which we scheduled a timer for this context */
+ ktime_t aspm_ts_timer_sched;
+ /* Lock to serialize between intr, timer intr and user threads */
+ spinlock_t aspm_lock;
/* Reference count the base context usage */
struct kref kref;
-
- /* Device context index */
- u16 ctxt;
- /*
- * non-zero if ctxt can be shared, and defines the maximum number of
- * sub-contexts for this device context.
- */
- u16 subctxt_cnt;
- /* non-zero if ctxt is being shared. */
- u16 subctxt_id;
- u8 uuid[16];
+ /* numa node of this context */
+ int numa_id;
+ /* associated msix interrupt. */
+ s16 msix_intr;
/* job key */
u16 jkey;
/* number of RcvArray groups for this context. */
@@ -238,87 +279,59 @@ struct hfi1_ctxtdata {
u16 expected_count;
/* index of first expected TID entry. */
u16 expected_base;
- /* array of tid_groups */
- struct tid_group *groups;
-
- struct exp_tid_set tid_group_list;
- struct exp_tid_set tid_used_list;
- struct exp_tid_set tid_full_list;
+ /* Device context index */
+ u8 ctxt;
- /* lock protecting all Expected TID data of user contexts */
+ /* PSM Specific fields */
+ /* lock protecting all Expected TID data */
struct mutex exp_mutex;
- /* per-context configuration flags */
- unsigned long flags;
- /* per-context event flags for fileops/intr communication */
- unsigned long event_flags;
- /* total number of polled urgent packets */
- u32 urgent;
- /* saved total number of polled urgent packets for poll edge trigger */
- u32 urgent_poll;
+ /* when waiting for rcv or pioavail */
+ wait_queue_head_t wait;
+ /* uuid from PSM */
+ u8 uuid[16];
/* same size as task_struct .comm[], command that opened context */
char comm[TASK_COMM_LEN];
- /* so file ops can get at unit */
- struct hfi1_devdata *dd;
- /* so functions that need physical port can get it easily */
- struct hfi1_pportdata *ppd;
- /* associated msix interrupt */
- u32 msix_intr;
+ /* Bitmask of in use context(s) */
+ DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS);
+ /* per-context event flags for fileops/intr communication */
+ unsigned long event_flags;
/* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
void *subctxt_uregbase;
/* An array of pages for the eager receive buffers * N */
void *subctxt_rcvegrbuf;
/* An array of pages for the eager header queue entries * N */
void *subctxt_rcvhdr_base;
- /* Bitmask of in use context(s) */
- DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS);
- /* The version of the library which opened this ctxt */
- u32 userversion;
+ /* total number of polled urgent packets */
+ u32 urgent;
+ /* saved total number of polled urgent packets for poll edge trigger */
+ u32 urgent_poll;
/* Type of packets or conditions we want to poll for */
u16 poll_type;
- /* receive packet sequence counter */
- u8 seq_cnt;
- /* ctxt rcvhdrq head offset */
- u32 head;
- /* QPs waiting for context processing */
- struct list_head qp_wait_list;
- /* interrupt handling */
- u64 imask; /* clear interrupt mask */
- int ireg; /* clear interrupt register */
- int numa_id; /* numa node of this context */
- /* verbs rx_stats per rcd */
- struct hfi1_opcode_stats_perctx *opstats;
-
- /* Is ASPM interrupt supported for this context */
- bool aspm_intr_supported;
- /* ASPM state (enabled/disabled) for this context */
- bool aspm_enabled;
- /* Timer for re-enabling ASPM if interrupt activity quietens down */
- struct timer_list aspm_timer;
- /* Lock to serialize between intr, timer intr and user threads */
- spinlock_t aspm_lock;
- /* Is ASPM processing enabled for this context (in intr context) */
- bool aspm_intr_enable;
- /* Last interrupt timestamp */
- ktime_t aspm_ts_last_intr;
- /* Last timestamp at which we scheduled a timer for this context */
- ktime_t aspm_ts_timer_sched;
-
+ /* non-zero if ctxt is being shared. */
+ u16 subctxt_id;
+ /* The version of the library which opened this ctxt */
+ u32 userversion;
/*
- * The interrupt handler for a particular receive context can vary
- * throughout it's lifetime. This is not a lock protected data member so
- * it must be updated atomically and the prev and new value must always
- * be valid. Worst case is we process an extra interrupt and up to 64
- * packets with the wrong interrupt handler.
+ * non-zero if ctxt can be shared, and defines the maximum number of
+ * sub-contexts for this device context.
*/
- int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded);
-
- /* Indicates that this is vnic context */
- bool is_vnic;
+ u8 subctxt_cnt;
- /* vnic queue index this context is mapped to */
- u8 vnic_q_idx;
};
+/**
+ * rcvhdrq_size - return total size in bytes for header queue
+ * @rcd: the receive context
+ *
+ * rcvhdrqentsize is in DWs, so we have to convert to bytes
+ *
+ */
+static inline u32 rcvhdrq_size(struct hfi1_ctxtdata *rcd)
+{
+ return PAGE_ALIGN(rcd->rcvhdrq_cnt *
+ rcd->rcvhdrqentsize * sizeof(u32));
+}
+
/*
* Represents a single packet at a high level. Put commonly computed things in
* here so we do not have to keep doing them over and over. The rule of thumb is
@@ -897,12 +910,11 @@ struct hfi1_pportdata {
u64 vl_xmit_flit_cnt[C_VL_COUNT + 1];
};
-typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
-
typedef void (*opcode_handler)(struct hfi1_packet *packet);
typedef void (*hfi1_make_req)(struct rvt_qp *qp,
struct hfi1_pkt_state *ps,
struct rvt_swqe *wqe);
+extern const rhf_rcv_function_ptr normal_rhf_rcv_functions[];
/* return values for the RHF receive functions */
@@ -1046,8 +1058,6 @@ struct hfi1_devdata {
dma_addr_t sdma_pad_phys;
/* for deallocation */
size_t sdma_heads_size;
- /* number from the chip */
- u32 chip_sdma_engines;
/* num used */
u32 num_sdma;
/* array of engines sized by num_sdma */
@@ -1102,8 +1112,6 @@ struct hfi1_devdata {
/* base receive interrupt timeout, in CSR units */
u32 rcv_intr_timeout_csr;
- u32 freezelen; /* max length of freezemsg */
- u64 __iomem *egrtidbase;
spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
spinlock_t uctxt_lock; /* protect rcd changes */
@@ -1130,25 +1138,6 @@ struct hfi1_devdata {
/* Base GUID for device (network order) */
u64 base_guid;
- /* these are the "32 bit" regs */
-
- /* value we put in kr_rcvhdrsize */
- u32 rcvhdrsize;
- /* number of receive contexts the chip supports */
- u32 chip_rcv_contexts;
- /* number of receive array entries */
- u32 chip_rcv_array_count;
- /* number of PIO send contexts the chip supports */
- u32 chip_send_contexts;
- /* number of bytes in the PIO memory buffer */
- u32 chip_pio_mem_size;
- /* number of bytes in the SDMA memory buffer */
- u32 chip_sdma_mem_size;
-
- /* size of each rcvegrbuffer */
- u32 rcvegrbufsize;
- /* log2 of above */
- u16 rcvegrbufsize_shift;
/* both sides of the PCIe link are gen3 capable */
u8 link_gen3_capable;
u8 dc_shutdown;
@@ -1221,9 +1210,6 @@ struct hfi1_devdata {
u32 num_msix_entries;
u32 first_dyn_msix_idx;
- /* INTx information */
- u32 requested_intx_irq; /* did we request one? */
-
/* general interrupt: mask of handled interrupts */
u64 gi_mask[CCE_NUM_INT_CSRS];
@@ -1289,8 +1275,6 @@ struct hfi1_devdata {
u64 sw_cce_err_status_aggregate;
/* Software counter that aggregates all bypass packet rcv errors */
u64 sw_rcv_bypass_packet_errors;
- /* receive interrupt function */
- rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
/* Save the enabled LCB error bits */
u64 lcb_err_en;
@@ -1329,10 +1313,7 @@ struct hfi1_devdata {
/* seqlock for sc2vl */
seqlock_t sc2vl_lock ____cacheline_aligned_in_smp;
u64 sc2vl[4];
- /* receive interrupt functions */
- rhf_rcv_function_ptr *rhf_rcv_function_map;
u64 __percpu *rcv_limit;
- u16 rhf_offset; /* offset of RHF within receive header entry */
/* adding a new field here would make it part of this cacheline */
/* OUI comes from the HW. Used everywhere as 3 separate bytes. */
@@ -1471,7 +1452,7 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp,
/* calculate the current RHF address */
static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd)
{
- return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->dd->rhf_offset;
+ return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset;
}
int hfi1_reset_device(int);
@@ -2021,12 +2002,6 @@ static inline void flush_wc(void)
}
void handle_eflags(struct hfi1_packet *packet);
-int process_receive_ib(struct hfi1_packet *packet);
-int process_receive_bypass(struct hfi1_packet *packet);
-int process_receive_error(struct hfi1_packet *packet);
-int kdeth_process_expected(struct hfi1_packet *packet);
-int kdeth_process_eager(struct hfi1_packet *packet);
-int process_receive_invalid(struct hfi1_packet *packet);
void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd);
/* global module parameter variables */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index f110842b91f5..758d273c32cf 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -364,9 +364,9 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
hfi1_exp_tid_group_init(rcd);
rcd->ppd = ppd;
rcd->dd = dd;
- __set_bit(0, rcd->in_use_ctxts);
rcd->numa_id = numa;
rcd->rcv_array_groups = dd->rcv_entries.ngroups;
+ rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
mutex_init(&rcd->exp_mutex);
@@ -404,6 +404,8 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
rcd->rcvhdrq_cnt = rcvhdrcnt;
rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
+ rcd->rhf_offset =
+ rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
/*
* Simple Eager buffer allocation: we have already pre-allocated
* the number of RcvArray entry groups. Each ctxtdata structure
@@ -853,24 +855,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
struct hfi1_ctxtdata *rcd;
struct hfi1_pportdata *ppd;
- /* Set up recv low level handlers */
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] =
- kdeth_process_expected;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] =
- kdeth_process_eager;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] =
- process_receive_error;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] =
- process_receive_bypass;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] =
- process_receive_invalid;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] =
- process_receive_invalid;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] =
- process_receive_invalid;
- dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
-
/* Set up send low level handlers */
dd->process_pio_send = hfi1_verbs_send_pio;
dd->process_dma_send = hfi1_verbs_send_dma;
@@ -936,7 +920,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
}
/* Allocate enough memory for user event notification. */
- len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
+ len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS *
sizeof(*dd->events));
dd->events = vmalloc_user(len);
if (!dd->events)
@@ -948,9 +932,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
dd->status = vmalloc_user(PAGE_SIZE);
if (!dd->status)
dd_dev_err(dd, "Failed to allocate dev status page\n");
- else
- dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) -
- sizeof(dd->status->freezemsg));
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
if (dd->status)
@@ -1144,7 +1125,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
return;
if (rcd->rcvhdrq) {
- dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
+ dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
rcd->rcvhdrq, rcd->rcvhdrq_dma);
rcd->rcvhdrq = NULL;
if (rcd->rcvhdrtail_kvaddr) {
@@ -1855,12 +1836,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
if (!rcd->rcvhdrq) {
gfp_t gfp_flags;
- /*
- * rcvhdrqentsize is in DWs, so we have to convert to bytes
- * (* sizeof(u32)).
- */
- amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
- sizeof(u32));
+ amt = rcvhdrq_size(rcd);
if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
gfp_flags = GFP_KERNEL;
@@ -1885,8 +1861,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
}
-
- rcd->rcvhdrq_size = amt;
}
/*
* These values are per-context:
@@ -1902,7 +1876,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
& RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
<< RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
- reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
+ reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK)
<< RCV_HDR_SIZE_HDR_SIZE_SHIFT;
write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
@@ -1938,9 +1912,9 @@ bail:
int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
{
struct hfi1_devdata *dd = rcd->dd;
- u32 max_entries, egrtop, alloced_bytes = 0, idx = 0;
+ u32 max_entries, egrtop, alloced_bytes = 0;
gfp_t gfp_flags;
- u16 order;
+ u16 order, idx = 0;
int ret = 0;
u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 70aceefe14d5..e1c7996c018e 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -67,9 +67,9 @@ struct mmu_rb_handler {
static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
-static void mmu_notifier_range_start(struct mmu_notifier *,
+static int mmu_notifier_range_start(struct mmu_notifier *,
struct mm_struct *,
- unsigned long, unsigned long);
+ unsigned long, unsigned long, bool);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
unsigned long, unsigned long);
static void do_remove(struct mmu_rb_handler *handler,
@@ -284,10 +284,11 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
handler->ops->remove(handler->ops_arg, node);
}
-static void mmu_notifier_range_start(struct mmu_notifier *mn,
+static int mmu_notifier_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct mmu_rb_handler *handler =
container_of(mn, struct mmu_rb_handler, mn);
@@ -313,6 +314,8 @@ static void mmu_notifier_range_start(struct mmu_notifier *mn,
if (added)
queue_work(handler->wq, &handler->del_work);
+
+ return 0;
}
/*
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 4d4371bf2c7c..eec83757d55f 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -157,6 +157,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
unsigned long len;
resource_size_t addr;
int ret = 0;
+ u32 rcv_array_count;
addr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
@@ -186,9 +187,9 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
goto nomem;
}
- dd->chip_rcv_array_count = readq(dd->kregbase1 + RCV_ARRAY_CNT);
- dd_dev_info(dd, "RcvArray count: %u\n", dd->chip_rcv_array_count);
- dd->base2_start = RCV_ARRAY + dd->chip_rcv_array_count * 8;
+ rcv_array_count = readq(dd->kregbase1 + RCV_ARRAY_CNT);
+ dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count);
+ dd->base2_start = RCV_ARRAY + rcv_array_count * 8;
dd->kregbase2 = ioremap_nocache(
addr + dd->base2_start,
@@ -214,13 +215,13 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
* to write an entire cacheline worth of entries in one shot.
*/
dd->rcvarray_wc = ioremap_wc(addr + RCV_ARRAY,
- dd->chip_rcv_array_count * 8);
+ rcv_array_count * 8);
if (!dd->rcvarray_wc) {
dd_dev_err(dd, "WC mapping of receive array failed\n");
goto nomem;
}
dd_dev_info(dd, "WC RcvArray: %p for %x\n",
- dd->rcvarray_wc, dd->chip_rcv_array_count * 8);
+ dd->rcvarray_wc, rcv_array_count * 8);
dd->flags |= HFI1_PRESENT; /* chip.c CSR routines now work */
return 0;
@@ -346,15 +347,13 @@ int pcie_speeds(struct hfi1_devdata *dd)
/*
* Returns:
* - actual number of interrupts allocated or
- * - 0 if fell back to INTx.
* - error
*/
int request_msix(struct hfi1_devdata *dd, u32 msireq)
{
int nvec;
- nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq,
- PCI_IRQ_MSIX | PCI_IRQ_LEGACY);
+ nvec = pci_alloc_irq_vectors(dd->pcidev, msireq, msireq, PCI_IRQ_MSIX);
if (nvec < 0) {
dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", nvec);
return nvec;
@@ -362,10 +361,6 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq)
tune_pcie_caps(dd);
- /* check for legacy IRQ */
- if (nvec == 1 && !dd->pcidev->msix_enabled)
- return 0;
-
return nvec;
}
@@ -905,9 +900,7 @@ static int trigger_sbr(struct hfi1_devdata *dd)
* delay after a reset is required. Per spec requirements,
* the link is either working or not after that point.
*/
- pci_reset_bridge_secondary_bus(dev->bus->self);
-
- return 0;
+ return pci_reset_bus(dev);
}
/*
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 9cac15d10c4f..c2c1cba5b23b 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -226,7 +226,7 @@ static const char *sc_type_name(int index)
int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
{
struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
- int total_blocks = (dd->chip_pio_mem_size / PIO_BLOCK_SIZE) - 1;
+ int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1;
int total_contexts = 0;
int fixed_blocks;
int pool_blocks;
@@ -343,8 +343,8 @@ int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
sc_type_name(i), count);
return -EINVAL;
}
- if (total_contexts + count > dd->chip_send_contexts)
- count = dd->chip_send_contexts - total_contexts;
+ if (total_contexts + count > chip_send_contexts(dd))
+ count = chip_send_contexts(dd) - total_contexts;
total_contexts += count;
@@ -507,7 +507,7 @@ static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
if (sci->type == type && sci->allocated == 0) {
sci->allocated = 1;
/* use a 1:1 mapping, but make them non-equal */
- context = dd->chip_send_contexts - index - 1;
+ context = chip_send_contexts(dd) - index - 1;
dd->hw_to_sw[context] = index;
*sw_index = index;
*hw_context = context;
@@ -1618,11 +1618,11 @@ static void sc_piobufavail(struct send_context *sc)
/* Wake up the most starved one first */
if (n)
hfi1_qp_wakeup(qps[max_idx],
- RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN);
+ RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
for (i = 0; i < n; i++)
if (i != max_idx)
hfi1_qp_wakeup(qps[i],
- RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN);
+ RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
}
/* translate a send credit update to a bit code of reasons */
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 1697d96151bd..9b1e84a6b1cc 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -273,7 +273,7 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_PATH_MIG_STATE &&
attr->path_mig_state == IB_MIG_MIGRATED &&
qp->s_mig_state == IB_MIG_ARMED) {
- qp->s_flags |= RVT_S_AHG_CLEAR;
+ qp->s_flags |= HFI1_S_AHG_CLEAR;
priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
@@ -717,7 +717,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp)
qp->remote_ah_attr = qp->alt_ah_attr;
qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
qp->s_pkey_index = qp->s_alt_pkey_index;
- qp->s_flags |= RVT_S_AHG_CLEAR;
+ qp->s_flags |= HFI1_S_AHG_CLEAR;
priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
qp_set_16b(qp);
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index b2d4cba8d15b..078cff7560b6 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -1,7 +1,7 @@
#ifndef _QP_H
#define _QP_H
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -70,6 +70,26 @@ static inline int hfi1_send_ok(struct rvt_qp *qp)
}
/*
+ * Driver specific s_flags starting at bit 31 down to HFI1_S_MIN_BIT_MASK
+ *
+ * HFI1_S_AHG_VALID - ahg header valid on chip
+ * HFI1_S_AHG_CLEAR - have send engine clear ahg state
+ * HFI1_S_WAIT_PIO_DRAIN - qp waiting for PIOs to drain
+ * HFI1_S_MIN_BIT_MASK - the lowest bit that can be used by hfi1
+ */
+#define HFI1_S_AHG_VALID 0x80000000
+#define HFI1_S_AHG_CLEAR 0x40000000
+#define HFI1_S_WAIT_PIO_DRAIN 0x20000000
+#define HFI1_S_MIN_BIT_MASK 0x01000000
+
+/*
+ * overload wait defines
+ */
+
+#define HFI1_S_ANY_WAIT_IO (RVT_S_ANY_WAIT_IO | HFI1_S_WAIT_PIO_DRAIN)
+#define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
+
+/*
* free_ahg - clear ahg from QP
*/
static inline void clear_ahg(struct rvt_qp *qp)
@@ -77,7 +97,7 @@ static inline void clear_ahg(struct rvt_qp *qp)
struct hfi1_qp_priv *priv = qp->priv;
priv->s_ahg->ahgcount = 0;
- qp->s_flags &= ~(RVT_S_AHG_VALID | RVT_S_AHG_CLEAR);
+ qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR);
if (priv->s_sde && qp->s_ahgidx >= 0)
sdma_ahg_free(priv->s_sde, qp->s_ahgidx);
qp->s_ahgidx = -1;
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 1a1a47ac53c6..9bd63abb2dfe 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -241,7 +241,7 @@ bail:
smp_wmb();
qp->s_flags &= ~(RVT_S_RESP_PENDING
| RVT_S_ACK_PENDING
- | RVT_S_AHG_VALID);
+ | HFI1_S_AHG_VALID);
return 0;
}
@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
lockdep_assert_held(&qp->s_lock);
ps->s_txreq = get_txreq(ps->dev, qp);
- if (IS_ERR(ps->s_txreq))
+ if (!ps->s_txreq)
goto bail_no_tx;
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
@@ -1024,7 +1024,7 @@ done:
if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
(cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
qp->s_flags |= RVT_S_WAIT_PSN;
- qp->s_flags &= ~RVT_S_AHG_VALID;
+ qp->s_flags &= ~HFI1_S_AHG_VALID;
}
/*
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index ef4c566e206f..5f56f3c1b4c4 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -194,7 +194,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
spin_lock_irqsave(&sqp->s_lock, flags);
/* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
+ if ((sqp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT)) ||
!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
goto unlock;
@@ -533,9 +533,9 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
{
struct hfi1_qp_priv *priv = qp->priv;
- if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR))
+ if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
clear_ahg(qp);
- if (!(qp->s_flags & RVT_S_AHG_VALID)) {
+ if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
/* first middle that needs copy */
if (qp->s_ahgidx < 0)
qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
@@ -544,7 +544,7 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
/* save to protect a change in another thread */
priv->s_ahg->ahgidx = qp->s_ahgidx;
- qp->s_flags |= RVT_S_AHG_VALID;
+ qp->s_flags |= HFI1_S_AHG_VALID;
}
} else {
/* subsequent middle after valid */
@@ -650,7 +650,7 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
if (middle)
build_ahg(qp, bth2);
else
- qp->s_flags &= ~RVT_S_AHG_VALID;
+ qp->s_flags &= ~HFI1_S_AHG_VALID;
bth0 |= pkey;
bth0 |= extra_bytes << 20;
@@ -727,7 +727,7 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
if (middle)
build_ahg(qp, bth2);
else
- qp->s_flags &= ~RVT_S_AHG_VALID;
+ qp->s_flags &= ~HFI1_S_AHG_VALID;
bth0 |= pkey;
bth0 |= extra_bytes << 20;
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 7fb350b87b49..88e326d6cc49 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1351,7 +1351,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
struct hfi1_pportdata *ppd = dd->pport + port;
u32 per_sdma_credits;
uint idle_cnt = sdma_idle_cnt;
- size_t num_engines = dd->chip_sdma_engines;
+ size_t num_engines = chip_sdma_engines(dd);
int ret = -ENOMEM;
if (!HFI1_CAP_IS_KSET(SDMA)) {
@@ -1360,18 +1360,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
}
if (mod_num_sdma &&
/* can't exceed chip support */
- mod_num_sdma <= dd->chip_sdma_engines &&
+ mod_num_sdma <= chip_sdma_engines(dd) &&
/* count must be >= vls */
mod_num_sdma >= num_vls)
num_engines = mod_num_sdma;
dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
- dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
+ dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd));
dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
- dd->chip_sdma_mem_size);
+ chip_sdma_mem_size(dd));
per_sdma_credits =
- dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE);
+ chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE);
/* set up freeze waitqueue */
init_waitqueue_head(&dd->sdma_unfreeze_wq);
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index b7b671017e59..e254dcec6f64 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
int middle = 0;
ps->s_txreq = get_txreq(ps->dev, qp);
- if (IS_ERR(ps->s_txreq))
+ if (!ps->s_txreq)
goto bail_no_tx;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 1ab332f1866e..70d39fc450a1 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
u32 lid;
ps->s_txreq = get_txreq(ps->dev, qp);
- if (IS_ERR(ps->s_txreq))
+ if (!ps->s_txreq)
goto bail_no_tx;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 08991874c0e2..13374c727b14 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1007,7 +1007,7 @@ static int pio_wait(struct rvt_qp *qp,
int was_empty;
dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
- dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
+ dev->n_piodrain += !!(flag & HFI1_S_WAIT_PIO_DRAIN);
qp->s_flags |= flag;
was_empty = list_empty(&sc->piowait);
iowait_queue(ps->pkts_sent, &priv->s_iowait,
@@ -1376,7 +1376,7 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
return pio_wait(qp,
ps->s_txreq->psc,
ps,
- RVT_S_WAIT_PIO_DRAIN);
+ HFI1_S_WAIT_PIO_DRAIN);
return sr(qp, ps, 0);
}
@@ -1410,7 +1410,8 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX;
rdi->dparms.props.max_qp = hfi1_max_qps;
rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
- rdi->dparms.props.max_sge = hfi1_max_sges;
+ rdi->dparms.props.max_send_sge = hfi1_max_sges;
+ rdi->dparms.props.max_recv_sge = hfi1_max_sges;
rdi->dparms.props.max_sge_rd = hfi1_max_sges;
rdi->dparms.props.max_cq = hfi1_max_cqs;
rdi->dparms.props.max_ah = hfi1_max_ahs;
@@ -1497,15 +1498,6 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
- /*
- * sm_lid of 0xFFFF needs special handling so that it can
- * be differentiated from a permissve LID of 0xFFFF.
- * We set the grh_required flag here so the SA can program
- * the DGID in the address handle appropriately
- */
- if (props->sm_lid == be16_to_cpu(IB_LID_PERMISSIVE))
- props->grh_required = true;
-
return 0;
}
@@ -1892,7 +1884,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
ibdev->process_mad = hfi1_process_mad;
ibdev->get_dev_fw_str = hfi1_get_dev_fw_str;
- strncpy(ibdev->node_desc, init_utsname()->nodename,
+ strlcpy(ibdev->node_desc, init_utsname()->nodename,
sizeof(ibdev->node_desc));
/*
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index 873e48ea923f..c4ab2d5b4502 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 - 2017 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
__must_hold(&qp->s_lock)
{
- struct verbs_txreq *tx = ERR_PTR(-EBUSY);
+ struct verbs_txreq *tx = NULL;
write_seqlock(&dev->txwait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 729244c3086c..1c19bbc764b2 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
if (unlikely(!tx)) {
/* call slow path to get the lock */
tx = __get_txreq(dev, qp);
- if (IS_ERR(tx))
+ if (!tx)
return tx;
}
tx->qp = qp;
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index 5d65582fe4d9..c643d80c5a53 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -120,8 +120,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
uctxt->seq_cnt = 1;
uctxt->is_vnic = true;
- if (dd->num_msix_entries)
- hfi1_set_vnic_msix_info(uctxt);
+ hfi1_set_vnic_msix_info(uctxt);
hfi1_stats.sps_ctxts++;
dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
@@ -136,8 +135,7 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
dd_dev_dbg(dd, "closing vnic context %d\n", uctxt->ctxt);
flush_wc();
- if (dd->num_msix_entries)
- hfi1_reset_vnic_msix_info(uctxt);
+ hfi1_reset_vnic_msix_info(uctxt);
/*
* Disable receive context and interrupt available, reset all
@@ -423,7 +421,7 @@ tx_finish:
static u16 hfi1_vnic_select_queue(struct net_device *netdev,
struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
@@ -818,14 +816,14 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
- dd->chip_sdma_engines, dd->num_vnic_contexts);
+ chip_sdma_engines(dd), dd->num_vnic_contexts);
if (!netdev)
return ERR_PTR(-ENOMEM);
rn = netdev_priv(netdev);
vinfo = opa_vnic_dev_priv(netdev);
vinfo->dd = dd;
- vinfo->num_tx_q = dd->chip_sdma_engines;
+ vinfo->num_tx_q = chip_sdma_engines(dd);
vinfo->num_rx_q = dd->num_vnic_contexts;
vinfo->netdev = netdev;
rn->free_rdma_netdev = hfi1_vnic_free_rn;
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index d74928621559..0d96c5bb38cd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -44,13 +44,11 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
+ const struct ib_gid_attr *gid_attr;
struct device *dev = hr_dev->dev;
- struct ib_gid_attr gid_attr;
struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
- union ib_gid sgid;
- int ret;
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
@@ -59,18 +57,9 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
/* Get mac address */
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
- /* Get source gid */
- ret = ib_get_cached_gid(ibpd->device, rdma_ah_get_port_num(ah_attr),
- grh->sgid_index, &sgid, &gid_attr);
- if (ret) {
- dev_err(dev, "get sgid failed! ret = %d\n", ret);
- kfree(ah);
- return ERR_PTR(ret);
- }
-
- if (is_vlan_dev(gid_attr.ndev))
- vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
- dev_put(gid_attr.ndev);
+ gid_attr = ah_attr->grh.sgid_attr;
+ if (is_vlan_dev(gid_attr->ndev))
+ vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) &
@@ -108,7 +97,7 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate);
rdma_ah_set_grh(ah_attr, NULL,
(le32_to_cpu(ah->av.sl_tclass_flowlabel) &
- HNS_ROCE_FLOW_LABLE_MASK), ah->av.gid_index,
+ HNS_ROCE_FLOW_LABEL_MASK), ah->av.gid_index,
ah->av.hop_limit,
(le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
HNS_ROCE_TCLASS_SHIFT));
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 319cb74aebaf..93d4b4ec002d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -382,15 +382,6 @@
#define ROCEE_VF_EQ_DB_CFG0_REG 0x238
#define ROCEE_VF_EQ_DB_CFG1_REG 0x23C
-#define ROCEE_VF_SMAC_CFG0_REG 0x12000
-#define ROCEE_VF_SMAC_CFG1_REG 0x12004
-
-#define ROCEE_VF_SGID_CFG0_REG 0x10000
-#define ROCEE_VF_SGID_CFG1_REG 0x10004
-#define ROCEE_VF_SGID_CFG2_REG 0x10008
-#define ROCEE_VF_SGID_CFG3_REG 0x1000c
-#define ROCEE_VF_SGID_CFG4_REG 0x10010
-
#define ROCEE_VF_ABN_INT_CFG_REG 0x13000
#define ROCEE_VF_ABN_INT_ST_REG 0x13004
#define ROCEE_VF_ABN_INT_EN_REG 0x13008
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index ebee2782a573..e2f93c1ce86a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -41,6 +41,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
found:
db->dma = sg_dma_address(page->umem->sg_head.sgl) +
(virt & ~PAGE_MASK);
+ page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
+ db->virt_addr = sg_virt(page->umem->sg_head.sgl);
db->u.user_page = page;
refcount_inc(&page->refcount);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 31221d506d9a..9a24fd0ee3e7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -76,7 +76,7 @@
/* 4G/4K = 1M */
#define HNS_ROCE_SL_SHIFT 28
#define HNS_ROCE_TCLASS_SHIFT 20
-#define HNS_ROCE_FLOW_LABLE_MASK 0xfffff
+#define HNS_ROCE_FLOW_LABEL_MASK 0xfffff
#define HNS_ROCE_MAX_PORTS 6
#define HNS_ROCE_MAX_GID_NUM 16
@@ -110,6 +110,7 @@
enum {
HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
+ HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
};
enum {
@@ -190,7 +191,8 @@ enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
- HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3)
+ HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3),
+ HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4),
};
enum hns_roce_mtt_type {
@@ -385,6 +387,7 @@ struct hns_roce_db {
struct hns_roce_user_db_page *user_page;
} u;
dma_addr_t dma;
+ void *virt_addr;
int index;
int order;
};
@@ -524,7 +527,9 @@ struct hns_roce_qp {
struct hns_roce_buf hr_buf;
struct hns_roce_wq rq;
struct hns_roce_db rdb;
+ struct hns_roce_db sdb;
u8 rdb_en;
+ u8 sdb_en;
u32 doorbell_qpn;
__le32 sq_signal_bits;
u32 sq_next_wqe;
@@ -579,22 +584,22 @@ struct hns_roce_ceqe {
};
struct hns_roce_aeqe {
- u32 asyn;
+ __le32 asyn;
union {
struct {
- u32 qp;
+ __le32 qp;
u32 rsv0;
u32 rsv1;
} qp_event;
struct {
- u32 cq;
+ __le32 cq;
u32 rsv0;
u32 rsv1;
} cq_event;
struct {
- u32 ceqe;
+ __le32 ceqe;
u32 rsv0;
u32 rsv1;
} ce_event;
@@ -641,6 +646,8 @@ struct hns_roce_eq {
int shift;
dma_addr_t cur_eqe_ba;
dma_addr_t nxt_eqe_ba;
+ int event_type;
+ int sub_type;
};
struct hns_roce_eq_table {
@@ -720,10 +727,21 @@ struct hns_roce_caps {
u32 eqe_ba_pg_sz;
u32 eqe_buf_pg_sz;
u32 eqe_hop_num;
+ u32 sl_num;
+ u32 tsq_buf_pg_sz;
+ u32 tpq_buf_pg_sz;
u32 chunk_sz; /* chunk size in non multihop mode*/
u64 flags;
};
+struct hns_roce_work {
+ struct hns_roce_dev *hr_dev;
+ struct work_struct work;
+ u32 qpn;
+ int event_type;
+ int sub_type;
+};
+
struct hns_roce_hw {
int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
int (*cmq_init)(struct hns_roce_dev *hr_dev);
@@ -736,7 +754,7 @@ struct hns_roce_hw {
u16 token, int event);
int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
- union ib_gid *gid, const struct ib_gid_attr *attr);
+ const union ib_gid *gid, const struct ib_gid_attr *attr);
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu);
@@ -760,10 +778,10 @@ struct hns_roce_hw {
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state);
int (*destroy_qp)(struct ib_qp *ibqp);
- int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
- int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr);
+ int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+ int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
@@ -816,6 +834,7 @@ struct hns_roce_dev {
u32 tptr_size; /*only for hw v1*/
const struct hns_roce_hw *hw;
void *priv;
+ struct workqueue_struct *irq_workq;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -864,7 +883,7 @@ static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp)
return container_of(hr_qp, struct hns_roce_sqp, hr_qp);
}
-static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest)
+static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
{
__raw_writeq(*(u64 *) val, dest);
}
@@ -982,7 +1001,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
int cnt);
-__be32 send_ieth(struct ib_send_wr *wr);
+__be32 send_ieth(const struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 63b5b3edabcb..f6faefed96e8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -170,7 +170,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
case 3:
mhop->l2_idx = table_idx & (chunk_ba_num - 1);
mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
- mhop->l0_idx = table_idx / chunk_ba_num / chunk_ba_num;
+ mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
break;
case 2:
mhop->l1_idx = table_idx & (chunk_ba_num - 1);
@@ -342,7 +342,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
} else {
break;
}
- msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
+ mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
}
bt_cmd_l = (u32)bt_ba;
@@ -494,6 +494,9 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
step_idx = 1;
} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
step_idx = 0;
+ } else {
+ ret = -EINVAL;
+ goto err_dma_alloc_l1;
}
/* set HEM base address to hardware */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 8013d69c5ac4..081aa91fc162 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -58,8 +58,9 @@ static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
rseg->len = 0;
}
-static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int hns_roce_v1_post_send(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
@@ -173,12 +174,14 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_FLOW_LABEL_M,
- UD_SEND_WQE_U32_36_FLOW_LABEL_S, 0);
+ UD_SEND_WQE_U32_36_FLOW_LABEL_S,
+ ah->av.sl_tclass_flowlabel &
+ HNS_ROCE_FLOW_LABEL_MASK);
roce_set_field(ud_sq_wqe->u32_36,
- UD_SEND_WQE_U32_36_PRIORITY_M,
- UD_SEND_WQE_U32_36_PRIORITY_S,
- ah->av.sl_tclass_flowlabel >>
- HNS_ROCE_SL_SHIFT);
+ UD_SEND_WQE_U32_36_PRIORITY_M,
+ UD_SEND_WQE_U32_36_PRIORITY_S,
+ le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
+ HNS_ROCE_SL_SHIFT);
roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_SGID_INDEX_M,
UD_SEND_WQE_U32_36_SGID_INDEX_S,
@@ -191,7 +194,9 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ah->av.hop_limit);
roce_set_field(ud_sq_wqe->u32_40,
UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
- UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, 0);
+ UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
+ ah->av.sl_tclass_flowlabel >>
+ HNS_ROCE_TCLASS_SHIFT);
memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
@@ -333,7 +338,7 @@ out:
doorbell[0] = le32_to_cpu(sq_db.u32_4);
doorbell[1] = le32_to_cpu(sq_db.u32_8);
- hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
+ hns_roce_write64_k((__le32 *)doorbell, qp->sq.db_reg_l);
qp->sq_next_wqe = ind;
}
@@ -342,14 +347,15 @@ out:
return ret;
}
-static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int ret = 0;
int nreq = 0;
int ind = 0;
int i = 0;
- u32 reg_val = 0;
+ u32 reg_val;
unsigned long flags = 0;
struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
struct hns_roce_wqe_data_seg *scat = NULL;
@@ -402,14 +408,18 @@ out:
wmb();
if (ibqp->qp_type == IB_QPT_GSI) {
+ __le32 tmp;
+
/* SW update GSI rq header */
reg_val = roce_read(to_hr_dev(ibqp->device),
ROCEE_QP1C_CFG3_0_REG +
QP1C_CFGN_OFFSET * hr_qp->phy_port);
- roce_set_field(reg_val,
+ tmp = cpu_to_le32(reg_val);
+ roce_set_field(tmp,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
hr_qp->rq.head);
+ reg_val = le32_to_cpu(tmp);
roce_write(to_hr_dev(ibqp->device),
ROCEE_QP1C_CFG3_0_REG +
QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
@@ -430,7 +440,8 @@ out:
doorbell[0] = le32_to_cpu(rq_db.u32_4);
doorbell[1] = le32_to_cpu(rq_db.u32_8);
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ hns_roce_write64_k((__le32 *)doorbell,
+ hr_qp->rq.db_reg_l);
}
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@@ -441,51 +452,63 @@ out:
static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
int sdb_mode, int odb_mode)
{
+ __le32 tmp;
u32 val;
val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
- roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
+ tmp = cpu_to_le32(val);
+ roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
+ roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
}
static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
u32 odb_mode)
{
+ __le32 tmp;
u32 val;
/* Configure SDB/ODB extend mode */
val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- roce_set_bit(val, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
- roce_set_bit(val, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
+ tmp = cpu_to_le32(val);
+ roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
+ roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
}
static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
u32 sdb_alful)
{
+ __le32 tmp;
u32 val;
/* Configure SDB */
val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
- roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
- roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
+ roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
}
static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
u32 odb_alful)
{
+ __le32 tmp;
u32 val;
/* Configure ODB */
val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
- roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
- roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
+ roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
}
@@ -496,6 +519,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
struct hns_roce_v1_priv *priv;
struct hns_roce_db_table *db;
dma_addr_t sdb_dma_addr;
+ __le32 tmp;
u32 val;
priv = (struct hns_roce_v1_priv *)hr_dev->priv;
@@ -511,7 +535,8 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
/* Configure extend SDB depth */
val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
- roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
db->ext_db->esdb_dep);
/*
@@ -519,8 +544,9 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
* using 4K page, and shift more 32 because of
* caculating the high 32 bit value evaluated to hardware.
*/
- roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
+ roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
@@ -535,6 +561,7 @@ static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
struct hns_roce_v1_priv *priv;
struct hns_roce_db_table *db;
dma_addr_t odb_dma_addr;
+ __le32 tmp;
u32 val;
priv = (struct hns_roce_v1_priv *)hr_dev->priv;
@@ -550,12 +577,14 @@ static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
/* Configure extend ODB depth */
val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
- roce_set_field(val, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
db->ext_db->eodb_dep);
- roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
+ roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
db->ext_db->eodb_dep);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
@@ -762,6 +791,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
if (!free_mr->mr_free_qp[i]) {
dev_err(dev, "Create loop qp failed!\n");
+ ret = -ENOMEM;
goto create_lp_qp_failed;
}
hr_qp = free_mr->mr_free_qp[i];
@@ -831,7 +861,7 @@ alloc_pd_failed:
if (hns_roce_ib_destroy_cq(cq))
dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
- return -EINVAL;
+ return ret;
}
static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
@@ -969,7 +999,8 @@ static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
{
struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
struct device *dev = &hr_dev->pdev->dev;
- struct ib_send_wr send_wr, *bad_wr;
+ struct ib_send_wr send_wr;
+ const struct ib_send_wr *bad_wr;
int ret;
memset(&send_wr, 0, sizeof(send_wr));
@@ -1161,9 +1192,10 @@ static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
{
int ret;
+ u32 val;
+ __le32 tmp;
int raq_shift = 0;
dma_addr_t addr;
- u32 val;
struct hns_roce_v1_priv *priv;
struct hns_roce_raq_table *raq;
struct device *dev = &hr_dev->pdev->dev;
@@ -1189,46 +1221,54 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
/* Configure raq_shift */
raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
- roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
/*
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
* using 4K page, and shift more 32 because of
* caculating the high 32 bit value evaluated to hardware.
*/
- roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
+ roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
raq->e_raq_buf->map >> 44);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
/* Configure raq threshold */
val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
- roce_set_field(val, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
HNS_ROCE_V1_EXT_RAQ_WF);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
/* Enable extend raq */
val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
- roce_set_field(val,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp,
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
POL_TIME_INTERVAL_VAL);
- roce_set_bit(val, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
- roce_set_field(val,
+ roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
+ roce_set_field(tmp,
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
2);
- roce_set_bit(val,
+ roce_set_bit(tmp,
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
/* Enable raq drop */
val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- roce_set_bit(val, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
+ tmp = cpu_to_le32(val);
+ roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
@@ -1255,20 +1295,25 @@ static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
{
+ __le32 tmp;
u32 val;
if (enable_flag) {
val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
/* Open all ports */
- roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
ALL_PORT_VAL_OPEN);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
} else {
val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
/* Close all ports */
- roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
}
}
@@ -1435,7 +1480,7 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
}
fwnode = &dsaf_node->fwnode;
} else if (is_acpi_device_node(dev->fwnode)) {
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
ret = acpi_node_get_property_reference(dev->fwnode,
"dsaf-handle", 0, &args);
@@ -1443,7 +1488,7 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
dev_err(dev, "could not find dsaf-handle\n");
return ret;
}
- fwnode = acpi_fwnode_handle(args.adev);
+ fwnode = args.fwnode;
} else {
dev_err(dev, "cannot read data from DT or ACPI\n");
return -ENXIO;
@@ -1498,13 +1543,11 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
int i = 0;
struct hns_roce_caps *caps = &hr_dev->caps;
- hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG));
- hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev,
- ROCEE_VENDOR_PART_ID_REG));
- hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev,
- ROCEE_SYS_IMAGE_GUID_L_REG)) |
- ((u64)le32_to_cpu(roce_read(hr_dev,
- ROCEE_SYS_IMAGE_GUID_H_REG)) << 32);
+ hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
+ hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
+ hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
+ ((u64)roce_read(hr_dev,
+ ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
hr_dev->hw_rev = HNS_ROCE_HW_VER1;
caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
@@ -1557,8 +1600,7 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
- caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
- ROCEE_ACK_DELAY_REG));
+ caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
caps->max_mtu = IB_MTU_2048;
return 0;
@@ -1568,21 +1610,25 @@ static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
{
int ret;
u32 val;
+ __le32 tmp;
struct device *dev = &hr_dev->pdev->dev;
/* DMAE user config */
val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
- roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
- roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
+ roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1 << PAGES_SHIFT_16);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
- roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
- roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
+ roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1 << PAGES_SHIFT_16);
@@ -1668,6 +1714,7 @@ static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
unsigned long end;
u32 val = 0;
+ __le32 tmp;
end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
while (hns_roce_v1_cmd_pending(hr_dev)) {
@@ -1679,15 +1726,17 @@ static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
cond_resched();
}
- roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
op);
- roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
+ roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
- roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
- roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
- roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
+ roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
+ roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
+ roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
+ val = le32_to_cpu(tmp);
writeq(in_param, hcr + 0);
writeq(out_param, hcr + 2);
writel(in_modifier, hcr + 4);
@@ -1717,7 +1766,7 @@ static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
return -ETIMEDOUT;
}
- status = le32_to_cpu((__force __be32)
+ status = le32_to_cpu((__force __le32)
__raw_readl(hcr + HCR_STATUS_OFFSET));
if ((status & STATUS_MASK) != 0x1) {
dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
@@ -1728,7 +1777,7 @@ static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
}
static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
- int gid_index, union ib_gid *gid,
+ int gid_index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
u32 *p = NULL;
@@ -1760,6 +1809,7 @@ static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
{
u32 reg_smac_l;
u16 reg_smac_h;
+ __le32 tmp;
u16 *p_h;
u32 *p;
u32 val;
@@ -1784,10 +1834,12 @@ static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
val = roce_read(hr_dev,
ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
+ tmp = cpu_to_le32(val);
p_h = (u16 *)(&addr[4]);
reg_smac_h = *p_h;
- roce_set_field(val, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
+ roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
val);
@@ -1797,12 +1849,15 @@ static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu)
{
+ __le32 tmp;
u32 val;
val = roce_read(hr_dev,
ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
- roce_set_field(val, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
val);
}
@@ -1848,9 +1903,9 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
- mpt_entry->virt_addr_l = (u32)mr->iova;
- mpt_entry->virt_addr_h = (u32)(mr->iova >> 32);
- mpt_entry->length = (u32)mr->size;
+ mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
+ mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
+ mpt_entry->length = cpu_to_le32((u32)mr->size);
roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
MPT_BYTE_28_PD_S, mr->pd);
@@ -1885,64 +1940,59 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_field(mpt_entry->mpt_byte_36,
MPT_BYTE_36_PA0_H_M,
MPT_BYTE_36_PA0_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
+ (u32)(pages[i] >> PAGES_SHIFT_32));
break;
case 1:
roce_set_field(mpt_entry->mpt_byte_36,
MPT_BYTE_36_PA1_L_M,
- MPT_BYTE_36_PA1_L_S,
- cpu_to_le32((u32)(pages[i])));
+ MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
roce_set_field(mpt_entry->mpt_byte_40,
MPT_BYTE_40_PA1_H_M,
MPT_BYTE_40_PA1_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
+ (u32)(pages[i] >> PAGES_SHIFT_24));
break;
case 2:
roce_set_field(mpt_entry->mpt_byte_40,
MPT_BYTE_40_PA2_L_M,
- MPT_BYTE_40_PA2_L_S,
- cpu_to_le32((u32)(pages[i])));
+ MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
roce_set_field(mpt_entry->mpt_byte_44,
MPT_BYTE_44_PA2_H_M,
MPT_BYTE_44_PA2_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
+ (u32)(pages[i] >> PAGES_SHIFT_16));
break;
case 3:
roce_set_field(mpt_entry->mpt_byte_44,
MPT_BYTE_44_PA3_L_M,
- MPT_BYTE_44_PA3_L_S,
- cpu_to_le32((u32)(pages[i])));
+ MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
roce_set_field(mpt_entry->mpt_byte_48,
MPT_BYTE_48_PA3_H_M,
MPT_BYTE_48_PA3_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_8)));
+ (u32)(pages[i] >> PAGES_SHIFT_8));
break;
case 4:
mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
roce_set_field(mpt_entry->mpt_byte_56,
MPT_BYTE_56_PA4_H_M,
MPT_BYTE_56_PA4_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
+ (u32)(pages[i] >> PAGES_SHIFT_32));
break;
case 5:
roce_set_field(mpt_entry->mpt_byte_56,
MPT_BYTE_56_PA5_L_M,
- MPT_BYTE_56_PA5_L_S,
- cpu_to_le32((u32)(pages[i])));
+ MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
roce_set_field(mpt_entry->mpt_byte_60,
MPT_BYTE_60_PA5_H_M,
MPT_BYTE_60_PA5_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
+ (u32)(pages[i] >> PAGES_SHIFT_24));
break;
case 6:
roce_set_field(mpt_entry->mpt_byte_60,
MPT_BYTE_60_PA6_L_M,
- MPT_BYTE_60_PA6_L_S,
- cpu_to_le32((u32)(pages[i])));
+ MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
roce_set_field(mpt_entry->mpt_byte_64,
MPT_BYTE_64_PA6_H_M,
MPT_BYTE_64_PA6_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
+ (u32)(pages[i] >> PAGES_SHIFT_16));
break;
default:
break;
@@ -1951,7 +2001,7 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
free_page((unsigned long) pages);
- mpt_entry->pbl_addr_l = (u32)(mr->pbl_dma_addr);
+ mpt_entry->pbl_addr_l = cpu_to_le32((u32)(mr->pbl_dma_addr));
roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
MPT_BYTE_12_PBL_ADDR_H_S,
@@ -1982,9 +2032,9 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{
- u32 doorbell[2];
+ __le32 doorbell[2];
- doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1);
+ doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
doorbell[1] = 0;
roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
@@ -2081,10 +2131,8 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
- cq_context->cqc_byte_4 = cpu_to_le32(cq_context->cqc_byte_4);
- cq_context->cq_bt_l = (u32)dma_handle;
- cq_context->cq_bt_l = cpu_to_le32(cq_context->cq_bt_l);
+ cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
roce_set_field(cq_context->cqc_byte_12,
CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
@@ -2096,15 +2144,12 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
ilog2((unsigned int)nent));
roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
- cq_context->cqc_byte_12 = cpu_to_le32(cq_context->cqc_byte_12);
- cq_context->cur_cqe_ba0_l = (u32)(mtts[0]);
- cq_context->cur_cqe_ba0_l = cpu_to_le32(cq_context->cur_cqe_ba0_l);
+ cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
roce_set_field(cq_context->cqc_byte_20,
CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
- CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S,
- cpu_to_le32((mtts[0]) >> 32));
+ CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
/* Dedicated hardware, directly set 0 */
roce_set_field(cq_context->cqc_byte_20,
CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
@@ -2118,9 +2163,8 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
tptr_dma_addr >> 44);
- cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20);
- cq_context->cqe_tptr_addr_l = (u32)(tptr_dma_addr >> 12);
+ cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
roce_set_field(cq_context->cqc_byte_32,
CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
@@ -2138,7 +2182,6 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->cqc_byte_32,
CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
- cq_context->cqc_byte_32 = cpu_to_le32(cq_context->cqc_byte_32);
}
static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
@@ -2151,7 +2194,7 @@ static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
{
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
u32 notification_flag;
- u32 doorbell[2];
+ __le32 doorbell[2];
notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
@@ -2159,7 +2202,8 @@ static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
* flags = 0; Notification Flag = 1, next
* flags = 1; Notification Flag = 0, solocited
*/
- doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1);
+ doorbell[0] =
+ cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
@@ -2416,7 +2460,7 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv;
unsigned long end = 0, flags = 0;
- uint32_t bt_cmd_val[2] = {0};
+ __le32 bt_cmd_val[2] = {0};
void __iomem *bt_cmd;
u64 bt_ba = 0;
@@ -2468,7 +2512,7 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
}
- bt_cmd_val[0] = (uint32_t)bt_ba;
+ bt_cmd_val[0] = (__le32)bt_ba;
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
@@ -2569,10 +2613,11 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_sqp_context *context;
struct device *dev = &hr_dev->pdev->dev;
dma_addr_t dma_handle = 0;
+ u32 __iomem *addr;
int rq_pa_start;
+ __le32 tmp;
u32 reg_val;
u64 *mtts;
- u32 __iomem *addr;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
@@ -2598,7 +2643,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
- context->sq_rq_bt_l = (u32)(dma_handle);
+ context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
roce_set_field(context->qp1c_bytes_12,
QP1C_BYTES_12_SQ_RQ_BT_H_M,
QP1C_BYTES_12_SQ_RQ_BT_H_S,
@@ -2610,7 +2655,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
roce_set_bit(context->qp1c_bytes_16,
QP1C_BYTES_16_SIGNALING_TYPE_S,
- hr_qp->sq_signal_bits);
+ le32_to_cpu(hr_qp->sq_signal_bits));
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
1);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
@@ -2624,7 +2669,8 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
- context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
+ context->cur_rq_wqe_ba_l =
+ cpu_to_le32((u32)(mtts[rq_pa_start]));
roce_set_field(context->qp1c_bytes_28,
QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
@@ -2643,7 +2689,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_32_TX_CQ_NUM_S,
to_hr_cq(ibqp->send_cq)->cqn);
- context->cur_sq_wqe_ba_l = (u32)mtts[0];
+ context->cur_sq_wqe_ba_l = cpu_to_le32((u32)mtts[0]);
roce_set_field(context->qp1c_bytes_40,
QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
@@ -2658,23 +2704,25 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
ROCEE_QP1C_CFG0_0_REG +
hr_qp->phy_port * sizeof(*context));
- writel(context->qp1c_bytes_4, addr);
- writel(context->sq_rq_bt_l, addr + 1);
- writel(context->qp1c_bytes_12, addr + 2);
- writel(context->qp1c_bytes_16, addr + 3);
- writel(context->qp1c_bytes_20, addr + 4);
- writel(context->cur_rq_wqe_ba_l, addr + 5);
- writel(context->qp1c_bytes_28, addr + 6);
- writel(context->qp1c_bytes_32, addr + 7);
- writel(context->cur_sq_wqe_ba_l, addr + 8);
- writel(context->qp1c_bytes_40, addr + 9);
+ writel(le32_to_cpu(context->qp1c_bytes_4), addr);
+ writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
+ writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
+ writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
+ writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
+ writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
+ writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
+ writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
+ writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
+ writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
}
/* Modify QP1C status */
reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
hr_qp->phy_port * sizeof(*context));
- roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
+ tmp = cpu_to_le32(reg_val);
+ roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
+ reg_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
hr_qp->phy_port * sizeof(*context), reg_val);
@@ -2712,7 +2760,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
dma_addr_t dma_handle_2 = 0;
dma_addr_t dma_handle = 0;
- uint32_t doorbell[2] = {0};
+ __le32 doorbell[2] = {0};
int rq_pa_start = 0;
u64 *mtts_2 = NULL;
int ret = -EINVAL;
@@ -2887,7 +2935,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
dmac = (u8 *)attr->ah_attr.roce.dmac;
- context->sq_rq_bt_l = (u32)(dma_handle);
+ context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
roce_set_field(context->qpc_bytes_24,
QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
@@ -2899,7 +2947,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
attr->min_rnr_timer);
- context->irrl_ba_l = (u32)(dma_handle_2);
+ context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
roce_set_field(context->qpc_bytes_32,
QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
@@ -2913,7 +2961,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
1);
roce_set_bit(context->qpc_bytes_32,
QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
- hr_qp->sq_signal_bits);
+ le32_to_cpu(hr_qp->sq_signal_bits));
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
hr_qp->port;
@@ -2991,7 +3039,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
- context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
+ context->cur_rq_wqe_ba_l =
+ cpu_to_le32((u32)(mtts[rq_pa_start]));
roce_set_field(context->qpc_bytes_76,
QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
@@ -3071,7 +3120,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
goto out;
}
- context->rx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
+ context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
roce_set_field(context->qpc_bytes_120,
QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
@@ -3219,7 +3268,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
- context->tx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
+ context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
roce_set_field(context->qpc_bytes_188,
QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
@@ -3386,16 +3435,16 @@ static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
addr = ROCEE_QP1C_CFG0_0_REG +
hr_qp->port * sizeof(struct hns_roce_sqp_context);
- context.qp1c_bytes_4 = roce_read(hr_dev, addr);
- context.sq_rq_bt_l = roce_read(hr_dev, addr + 1);
- context.qp1c_bytes_12 = roce_read(hr_dev, addr + 2);
- context.qp1c_bytes_16 = roce_read(hr_dev, addr + 3);
- context.qp1c_bytes_20 = roce_read(hr_dev, addr + 4);
- context.cur_rq_wqe_ba_l = roce_read(hr_dev, addr + 5);
- context.qp1c_bytes_28 = roce_read(hr_dev, addr + 6);
- context.qp1c_bytes_32 = roce_read(hr_dev, addr + 7);
- context.cur_sq_wqe_ba_l = roce_read(hr_dev, addr + 8);
- context.qp1c_bytes_40 = roce_read(hr_dev, addr + 9);
+ context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
+ context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
+ context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
+ context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
+ context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
+ context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
+ context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
+ context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
+ context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
+ context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
hr_qp->state = roce_get_field(context.qp1c_bytes_4,
QP1C_BYTES_4_QP_STATE_M,
@@ -3557,7 +3606,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
- qp_attr->rnr_retry = context->rnr_retry;
+ qp_attr->rnr_retry = (u8)context->rnr_retry;
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
@@ -3595,42 +3644,47 @@ static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev,
u32 *old_send, u32 *old_retry,
u32 *tsp_st, u32 *success_flags)
{
+ __le32 *old_send_tmp, *old_retry_tmp;
u32 sdb_retry_cnt;
u32 sdb_send_ptr;
u32 cur_cnt, old_cnt;
+ __le32 tmp, tmp1;
u32 send_ptr;
sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
- cur_cnt = roce_get_field(sdb_send_ptr,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ tmp = cpu_to_le32(sdb_send_ptr);
+ tmp1 = cpu_to_le32(sdb_retry_cnt);
+ cur_cnt = roce_get_field(tmp, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(sdb_retry_cnt,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ roce_get_field(tmp1, ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+
+ old_send_tmp = (__le32 *)old_send;
+ old_retry_tmp = (__le32 *)old_retry;
if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
- old_cnt = roce_get_field(*old_send,
+ old_cnt = roce_get_field(*old_send_tmp,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(*old_retry,
+ roce_get_field(*old_retry_tmp,
ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
*success_flags = 1;
} else {
- old_cnt = roce_get_field(*old_send,
+ old_cnt = roce_get_field(*old_send_tmp,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) {
*success_flags = 1;
} else {
- send_ptr = roce_get_field(*old_send,
+ send_ptr = roce_get_field(*old_send_tmp,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(sdb_retry_cnt,
+ roce_get_field(tmp1,
ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
- roce_set_field(*old_send,
+ roce_set_field(*old_send_tmp,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
send_ptr);
@@ -3646,11 +3700,14 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
{
struct device *dev = &hr_dev->pdev->dev;
u32 sdb_send_ptr, old_send;
+ __le32 sdb_issue_ptr_tmp;
+ __le32 sdb_send_ptr_tmp;
u32 success_flags = 0;
unsigned long end;
u32 old_retry;
u32 inv_cnt;
u32 tsp_st;
+ __le32 tmp;
if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
*wait_stage < HNS_ROCE_V1_DB_STAGE1) {
@@ -3679,10 +3736,12 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
ROCEE_SDB_SEND_PTR_REG);
}
- if (roce_get_field(sdb_issue_ptr,
+ sdb_send_ptr_tmp = cpu_to_le32(sdb_send_ptr);
+ sdb_issue_ptr_tmp = cpu_to_le32(sdb_issue_ptr);
+ if (roce_get_field(sdb_issue_ptr_tmp,
ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
- roce_get_field(sdb_send_ptr,
+ roce_get_field(sdb_send_ptr_tmp,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
@@ -3690,7 +3749,8 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
do {
tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
- if (roce_get_bit(tsp_st,
+ tmp = cpu_to_le32(tsp_st);
+ if (roce_get_bit(tmp,
ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
*wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
return 0;
@@ -3699,8 +3759,9 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
if (!time_before(jiffies, end)) {
dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
"issue 0x%x send 0x%x.\n",
- hr_qp->qpn, sdb_issue_ptr,
- sdb_send_ptr);
+ hr_qp->qpn,
+ le32_to_cpu(sdb_issue_ptr_tmp),
+ le32_to_cpu(sdb_send_ptr_tmp));
return 0;
}
@@ -4102,9 +4163,9 @@ static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
struct device *dev = &hr_dev->pdev->dev;
u32 cqn;
- cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ cqn = roce_get_field(aeqe->event.cq_event.cq,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
@@ -4340,6 +4401,7 @@ static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
u32 aeshift_val;
u32 ceshift_val;
u32 cemask_val;
+ __le32 tmp;
int i;
/*
@@ -4348,30 +4410,34 @@ static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
* interrupt, mask irq, clear irq, cancel mask operation
*/
aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
+ tmp = cpu_to_le32(aeshift_val);
/* AEQE overflow */
- if (roce_get_bit(aeshift_val,
+ if (roce_get_bit(tmp,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
dev_warn(dev, "AEQ overflow!\n");
/* Set mask */
caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(caepaemask_val,
- ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ tmp = cpu_to_le32(caepaemask_val);
+ roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
HNS_ROCE_INT_MASK_ENABLE);
+ caepaemask_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
/* Clear int state(INT_WC : write 1 clear) */
caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
- roce_set_bit(caepaest_val,
- ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
+ tmp = cpu_to_le32(caepaest_val);
+ roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
+ caepaest_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
/* Clear mask */
caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(caepaemask_val,
- ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ tmp = cpu_to_le32(caepaemask_val);
+ roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
HNS_ROCE_INT_MASK_DISABLE);
+ caepaemask_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
}
@@ -4379,8 +4445,9 @@ static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
i * CEQ_REG_OFFSET);
+ tmp = cpu_to_le32(ceshift_val);
- if (roce_get_bit(ceshift_val,
+ if (roce_get_bit(tmp,
ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
int_work++;
@@ -4389,9 +4456,11 @@ static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
cemask_val = roce_read(hr_dev,
ROCEE_CAEP_CE_IRQ_MASK_0_REG +
i * CEQ_REG_OFFSET);
- roce_set_bit(cemask_val,
+ tmp = cpu_to_le32(cemask_val);
+ roce_set_bit(tmp,
ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
HNS_ROCE_INT_MASK_ENABLE);
+ cemask_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
i * CEQ_REG_OFFSET, cemask_val);
@@ -4399,9 +4468,11 @@ static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
cealmovf_val = roce_read(hr_dev,
ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
i * CEQ_REG_OFFSET);
- roce_set_bit(cealmovf_val,
+ tmp = cpu_to_le32(cealmovf_val);
+ roce_set_bit(tmp,
ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
1);
+ cealmovf_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
i * CEQ_REG_OFFSET, cealmovf_val);
@@ -4409,9 +4480,11 @@ static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
cemask_val = roce_read(hr_dev,
ROCEE_CAEP_CE_IRQ_MASK_0_REG +
i * CEQ_REG_OFFSET);
- roce_set_bit(cemask_val,
+ tmp = cpu_to_le32(cemask_val);
+ roce_set_bit(tmp,
ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
HNS_ROCE_INT_MASK_DISABLE);
+ cemask_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
i * CEQ_REG_OFFSET, cemask_val);
}
@@ -4435,13 +4508,16 @@ static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
{
u32 aemask_val;
int masken = 0;
+ __le32 tmp;
int i;
/* AEQ INT */
aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ tmp = cpu_to_le32(aemask_val);
+ roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
masken);
- roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
+ roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
+ aemask_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
/* CEQ INT */
@@ -4473,20 +4549,24 @@ static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
int enable_flag)
{
void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
+ __le32 tmp;
u32 val;
val = readl(eqc);
+ tmp = cpu_to_le32(val);
if (enable_flag)
- roce_set_field(val,
+ roce_set_field(tmp,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
HNS_ROCE_EQ_STAT_VALID);
else
- roce_set_field(val,
+ roce_set_field(tmp,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
HNS_ROCE_EQ_STAT_INVALID);
+
+ val = le32_to_cpu(tmp);
writel(val, eqc);
}
@@ -4499,6 +4579,9 @@ static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
u32 eqconsindx_val = 0;
u32 eqcuridx_val = 0;
u32 eqshift_val = 0;
+ __le32 tmp2 = 0;
+ __le32 tmp1 = 0;
+ __le32 tmp = 0;
int num_bas;
int ret;
int i;
@@ -4530,14 +4613,13 @@ static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
}
eq->cons_index = 0;
- roce_set_field(eqshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
HNS_ROCE_EQ_STAT_INVALID);
- roce_set_field(eqshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
+ roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
eq->log_entries);
+ eqshift_val = le32_to_cpu(tmp);
writel(eqshift_val, eqc);
/* Configure eq extended address 12~44bit */
@@ -4549,18 +4631,18 @@ static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
* using 4K page, and shift more 32 because of
* caculating the high 32 bit value evaluated to hardware.
*/
- roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
+ roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
eq->buf_list[0].map >> 44);
- roce_set_field(eqcuridx_val,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
+ roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
+ eqcuridx_val = le32_to_cpu(tmp1);
writel(eqcuridx_val, eqc + 8);
/* Configure eq consumer index */
- roce_set_field(eqconsindx_val,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
+ roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
+ eqconsindx_val = le32_to_cpu(tmp2);
writel(eqconsindx_val, eqc + 0xc);
return 0;
@@ -4835,16 +4917,14 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
continue;
pdev = of_find_device_by_node(net_node);
} else if (is_acpi_device_node(dev->fwnode)) {
- struct acpi_reference_args args;
- struct fwnode_handle *fwnode;
+ struct fwnode_reference_args args;
ret = acpi_node_get_property_reference(dev->fwnode,
"eth-handle",
i, &args);
if (ret)
continue;
- fwnode = acpi_fwnode_handle(args.adev);
- pdev = hns_roce_find_pdev(fwnode);
+ pdev = hns_roce_find_pdev(args.fwnode);
} else {
dev_err(dev, "cannot read data from DT or ACPI\n");
return -ENXIO;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index e9a2717ea7cd..66440147d9eb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -260,7 +260,7 @@ struct hns_roce_cqe {
__le32 cqe_byte_4;
union {
__le32 r_key;
- __be32 immediate_data;
+ __le32 immediate_data;
};
__le32 byte_cnt;
__le32 cqe_byte_16;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index a6e11be0ea0f..0218c0f8c2a7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <net/addrconf.h>
+#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
#include "hnae3.h"
@@ -53,7 +54,7 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
-static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr,
+static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
unsigned int *sge_ind)
{
struct hns_roce_v2_wqe_data_seg *dseg;
@@ -100,10 +101,10 @@ static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr,
}
}
-static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
+static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind,
- struct ib_send_wr **bad_wr)
+ const struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_v2_wqe_data_seg *dseg = wqe;
@@ -164,23 +165,30 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
return 0;
}
-static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state);
+
+static int hns_roce_v2_post_send(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
- struct hns_roce_v2_wqe_data_seg *dseg;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
+ struct ib_qp_attr attr;
unsigned int sge_ind = 0;
unsigned int owner_bit;
unsigned long flags;
unsigned int ind;
void *wqe = NULL;
bool loopback;
+ int attr_mask;
u32 tmp_len;
int ret = 0;
u8 *smac;
@@ -273,7 +281,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
- ud_sq_wqe->immtdata = wr->ex.imm_data;
+ ud_sq_wqe->immtdata =
+ cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
break;
default:
ud_sq_wqe->immtdata = 0;
@@ -330,14 +339,13 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
- 0);
- roce_set_field(ud_sq_wqe->byte_36,
- V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
- V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
- 0);
+ ah->av.sl_tclass_flowlabel >>
+ HNS_ROCE_TCLASS_SHIFT);
roce_set_field(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
- V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, 0);
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
+ ah->av.sl_tclass_flowlabel &
+ HNS_ROCE_FLOW_LABEL_MASK);
roce_set_field(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_SL_M,
V2_UD_SEND_WQE_BYTE_40_SL_S,
@@ -371,7 +379,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
- rc_sq_wqe->immtdata = wr->ex.imm_data;
+ rc_sq_wqe->immtdata =
+ cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
break;
case IB_WR_SEND_WITH_INV:
rc_sq_wqe->inv_key =
@@ -485,7 +494,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
- dseg = wqe;
ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
&sge_ind, bad_wr);
@@ -523,6 +531,19 @@ out:
qp->sq_next_wqe = ind;
qp->next_sge = sge_ind;
+
+ if (qp->state == IB_QPS_ERR) {
+ attr_mask = IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+
+ ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
+ qp->state, IB_QPS_ERR);
+ if (ret) {
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+ *bad_wr = wr;
+ return ret;
+ }
+ }
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -530,16 +551,19 @@ out:
return ret;
}
-static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_rinl_sge *sge_list;
struct device *dev = hr_dev->dev;
+ struct ib_qp_attr attr;
unsigned long flags;
void *wqe = NULL;
+ int attr_mask;
int ret = 0;
int nreq;
int ind;
@@ -608,6 +632,20 @@ out:
wmb();
*hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
+
+ if (hr_qp->state == IB_QPS_ERR) {
+ attr_mask = IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+
+ ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
+ attr_mask, hr_qp->state,
+ IB_QPS_ERR);
+ if (ret) {
+ spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+ *bad_wr = wr;
+ return ret;
+ }
+ }
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@@ -702,8 +740,8 @@ static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
int ret;
/* Setup the queue entries for command queue */
- priv->cmq.csq.desc_num = 1024;
- priv->cmq.crq.desc_num = 1024;
+ priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
+ priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
/* Setup the lock for command queue */
spin_lock_init(&priv->cmq.csq.lock);
@@ -925,7 +963,8 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc[2];
- struct hns_roce_pf_res *res;
+ struct hns_roce_pf_res_a *req_a;
+ struct hns_roce_pf_res_b *req_b;
int ret;
int i;
@@ -943,21 +982,26 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
if (ret)
return ret;
- res = (struct hns_roce_pf_res *)desc[0].data;
+ req_a = (struct hns_roce_pf_res_a *)desc[0].data;
+ req_b = (struct hns_roce_pf_res_b *)desc[1].data;
- hr_dev->caps.qpc_bt_num = roce_get_field(res->qpc_bt_idx_num,
+ hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
PF_RES_DATA_1_PF_QPC_BT_NUM_M,
PF_RES_DATA_1_PF_QPC_BT_NUM_S);
- hr_dev->caps.srqc_bt_num = roce_get_field(res->srqc_bt_idx_num,
+ hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
- hr_dev->caps.cqc_bt_num = roce_get_field(res->cqc_bt_idx_num,
+ hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
PF_RES_DATA_3_PF_CQC_BT_NUM_M,
PF_RES_DATA_3_PF_CQC_BT_NUM_S);
- hr_dev->caps.mpt_bt_num = roce_get_field(res->mpt_bt_idx_num,
+ hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
PF_RES_DATA_4_PF_MPT_BT_NUM_M,
PF_RES_DATA_4_PF_MPT_BT_NUM_S);
+ hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
+ PF_RES_DATA_3_PF_SL_NUM_M,
+ PF_RES_DATA_3_PF_SL_NUM_S);
+
return 0;
}
@@ -1203,12 +1247,14 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->eqe_ba_pg_sz = 0;
caps->eqe_buf_pg_sz = 0;
caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
+ caps->tsq_buf_pg_sz = 0;
caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
HNS_ROCE_CAP_FLAG_RQ_INLINE |
- HNS_ROCE_CAP_FLAG_RECORD_DB;
+ HNS_ROCE_CAP_FLAG_RECORD_DB |
+ HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
@@ -1224,6 +1270,228 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
+static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
+ enum hns_roce_link_table_type type)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cfg_llm_a *req_a =
+ (struct hns_roce_cfg_llm_a *)desc[0].data;
+ struct hns_roce_cfg_llm_b *req_b =
+ (struct hns_roce_cfg_llm_b *)desc[1].data;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_link_table *link_tbl;
+ struct hns_roce_link_table_entry *entry;
+ enum hns_roce_opcode_type opcode;
+ u32 page_num;
+ int i;
+
+ switch (type) {
+ case TSQ_LINK_TABLE:
+ link_tbl = &priv->tsq;
+ opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
+ break;
+ case TPQ_LINK_TABLE:
+ link_tbl = &priv->tpq;
+ opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ page_num = link_tbl->npages;
+ entry = link_tbl->table.buf;
+ memset(req_a, 0, sizeof(*req_a));
+ memset(req_b, 0, sizeof(*req_b));
+
+ for (i = 0; i < 2; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
+
+ if (i == 0)
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ else
+ desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+
+ if (i == 0) {
+ req_a->base_addr_l = link_tbl->table.map & 0xffffffff;
+ req_a->base_addr_h = (link_tbl->table.map >> 32) &
+ 0xffffffff;
+ roce_set_field(req_a->depth_pgsz_init_en,
+ CFG_LLM_QUE_DEPTH_M,
+ CFG_LLM_QUE_DEPTH_S,
+ link_tbl->npages);
+ roce_set_field(req_a->depth_pgsz_init_en,
+ CFG_LLM_QUE_PGSZ_M,
+ CFG_LLM_QUE_PGSZ_S,
+ link_tbl->pg_sz);
+ req_a->head_ba_l = entry[0].blk_ba0;
+ req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr;
+ roce_set_field(req_a->head_ptr,
+ CFG_LLM_HEAD_PTR_M,
+ CFG_LLM_HEAD_PTR_S, 0);
+ } else {
+ req_b->tail_ba_l = entry[page_num - 1].blk_ba0;
+ roce_set_field(req_b->tail_ba_h,
+ CFG_LLM_TAIL_BA_H_M,
+ CFG_LLM_TAIL_BA_H_S,
+ entry[page_num - 1].blk_ba1_nxt_ptr &
+ HNS_ROCE_LINK_TABLE_BA1_M);
+ roce_set_field(req_b->tail_ptr,
+ CFG_LLM_TAIL_PTR_M,
+ CFG_LLM_TAIL_PTR_S,
+ (entry[page_num - 2].blk_ba1_nxt_ptr &
+ HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
+ HNS_ROCE_LINK_TABLE_NXT_PTR_S);
+ }
+ }
+ roce_set_field(req_a->depth_pgsz_init_en,
+ CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
+
+ return hns_roce_cmq_send(hr_dev, desc, 2);
+}
+
+static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
+ enum hns_roce_link_table_type type)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_link_table *link_tbl;
+ struct hns_roce_link_table_entry *entry;
+ struct device *dev = hr_dev->dev;
+ u32 buf_chk_sz;
+ dma_addr_t t;
+ int func_num = 1;
+ int pg_num_a;
+ int pg_num_b;
+ int pg_num;
+ int size;
+ int i;
+
+ switch (type) {
+ case TSQ_LINK_TABLE:
+ link_tbl = &priv->tsq;
+ buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
+ pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
+ pg_num_b = hr_dev->caps.sl_num * 4 + 2;
+ break;
+ case TPQ_LINK_TABLE:
+ link_tbl = &priv->tpq;
+ buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
+ pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
+ pg_num_b = 2 * 4 * func_num + 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pg_num = max(pg_num_a, pg_num_b);
+ size = pg_num * sizeof(struct hns_roce_link_table_entry);
+
+ link_tbl->table.buf = dma_alloc_coherent(dev, size,
+ &link_tbl->table.map,
+ GFP_KERNEL);
+ if (!link_tbl->table.buf)
+ goto out;
+
+ link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
+ GFP_KERNEL);
+ if (!link_tbl->pg_list)
+ goto err_kcalloc_failed;
+
+ entry = link_tbl->table.buf;
+ for (i = 0; i < pg_num; ++i) {
+ link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
+ &t, GFP_KERNEL);
+ if (!link_tbl->pg_list[i].buf)
+ goto err_alloc_buf_failed;
+
+ link_tbl->pg_list[i].map = t;
+ memset(link_tbl->pg_list[i].buf, 0, buf_chk_sz);
+
+ entry[i].blk_ba0 = (t >> 12) & 0xffffffff;
+ roce_set_field(entry[i].blk_ba1_nxt_ptr,
+ HNS_ROCE_LINK_TABLE_BA1_M,
+ HNS_ROCE_LINK_TABLE_BA1_S,
+ t >> 44);
+
+ if (i < (pg_num - 1))
+ roce_set_field(entry[i].blk_ba1_nxt_ptr,
+ HNS_ROCE_LINK_TABLE_NXT_PTR_M,
+ HNS_ROCE_LINK_TABLE_NXT_PTR_S,
+ i + 1);
+ }
+ link_tbl->npages = pg_num;
+ link_tbl->pg_sz = buf_chk_sz;
+
+ return hns_roce_config_link_table(hr_dev, type);
+
+err_alloc_buf_failed:
+ for (i -= 1; i >= 0; i--)
+ dma_free_coherent(dev, buf_chk_sz,
+ link_tbl->pg_list[i].buf,
+ link_tbl->pg_list[i].map);
+ kfree(link_tbl->pg_list);
+
+err_kcalloc_failed:
+ dma_free_coherent(dev, size, link_tbl->table.buf,
+ link_tbl->table.map);
+
+out:
+ return -ENOMEM;
+}
+
+static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_link_table *link_tbl)
+{
+ struct device *dev = hr_dev->dev;
+ int size;
+ int i;
+
+ size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
+
+ for (i = 0; i < link_tbl->npages; ++i)
+ if (link_tbl->pg_list[i].buf)
+ dma_free_coherent(dev, link_tbl->pg_sz,
+ link_tbl->pg_list[i].buf,
+ link_tbl->pg_list[i].map);
+ kfree(link_tbl->pg_list);
+
+ dma_free_coherent(dev, size, link_tbl->table.buf,
+ link_tbl->table.map);
+}
+
+static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ int ret;
+
+ /* TSQ includes SQ doorbell and ack doorbell */
+ ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
+ if (ret) {
+ dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
+ if (ret) {
+ dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
+ goto err_tpq_init_failed;
+ }
+
+ return 0;
+
+err_tpq_init_failed:
+ hns_roce_free_link_table(hr_dev, &priv->tsq);
+
+ return ret;
+}
+
+static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+
+ hns_roce_free_link_table(hr_dev, &priv->tpq);
+ hns_roce_free_link_table(hr_dev, &priv->tsq);
+}
+
static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
{
u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
@@ -1307,13 +1575,45 @@ static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
return 0;
}
+static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
+ int gid_index, const union ib_gid *gid,
+ enum hns_roce_sgid_type sgid_type)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cfg_sgid_tb *sgid_tb =
+ (struct hns_roce_cfg_sgid_tb *)desc.data;
+ u32 *p;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
+
+ roce_set_field(sgid_tb->table_idx_rsv,
+ CFG_SGID_TB_TABLE_IDX_M,
+ CFG_SGID_TB_TABLE_IDX_S, gid_index);
+ roce_set_field(sgid_tb->vf_sgid_type_rsv,
+ CFG_SGID_TB_VF_SGID_TYPE_M,
+ CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
+
+ p = (u32 *)&gid->raw[0];
+ sgid_tb->vf_sgid_l = cpu_to_le32(*p);
+
+ p = (u32 *)&gid->raw[4];
+ sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
+
+ p = (u32 *)&gid->raw[8];
+ sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
+
+ p = (u32 *)&gid->raw[0xc];
+ sgid_tb->vf_sgid_h = cpu_to_le32(*p);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
- int gid_index, union ib_gid *gid,
+ int gid_index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
- u32 *p;
- u32 val;
+ int ret;
if (!gid || !attr)
return -EINVAL;
@@ -1328,49 +1628,37 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
}
- p = (u32 *)&gid->raw[0];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG0_REG +
- 0x20 * gid_index);
-
- p = (u32 *)&gid->raw[4];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG1_REG +
- 0x20 * gid_index);
-
- p = (u32 *)&gid->raw[8];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG2_REG +
- 0x20 * gid_index);
-
- p = (u32 *)&gid->raw[0xc];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG3_REG +
- 0x20 * gid_index);
-
- val = roce_read(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index);
- roce_set_field(val, ROCEE_VF_SGID_CFG4_SGID_TYPE_M,
- ROCEE_VF_SGID_CFG4_SGID_TYPE_S, sgid_type);
-
- roce_write(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index, val);
+ ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
+ if (ret)
+ dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
- return 0;
+ return ret;
}
static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
u8 *addr)
{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cfg_smac_tb *smac_tb =
+ (struct hns_roce_cfg_smac_tb *)desc.data;
u16 reg_smac_h;
u32 reg_smac_l;
- u32 val;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
reg_smac_l = *(u32 *)(&addr[0]);
- roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_VF_SMAC_CFG0_REG +
- 0x08 * phy_port);
- val = roce_read(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port);
+ reg_smac_h = *(u16 *)(&addr[4]);
- reg_smac_h = *(u16 *)(&addr[4]);
- roce_set_field(val, ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M,
- ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S, reg_smac_h);
- roce_write(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port, val);
+ memset(smac_tb, 0, sizeof(*smac_tb));
+ roce_set_field(smac_tb->tb_idx_rsv,
+ CFG_SMAC_TB_IDX_M,
+ CFG_SMAC_TB_IDX_S, phy_port);
+ roce_set_field(smac_tb->vf_smac_h_rsv,
+ CFG_SMAC_TB_VF_SMAC_H_M,
+ CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
+ smac_tb->vf_smac_l = reg_smac_l;
- return 0;
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
@@ -1758,6 +2046,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_v2_cqe *cqe;
struct hns_roce_qp *hr_qp;
struct hns_roce_wq *wq;
+ struct ib_qp_attr attr;
+ int attr_mask;
int is_send;
u16 wqe_ctr;
u32 opcode;
@@ -1844,8 +2134,17 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
break;
}
- /* CQE status error, directly return */
- if (wc->status != IB_WC_SUCCESS)
+ /* flush cqe if wc status is error, excluding flush error */
+ if ((wc->status != IB_WC_SUCCESS) &&
+ (wc->status != IB_WC_WR_FLUSH_ERR)) {
+ attr_mask = IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+ return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
+ &attr, attr_mask,
+ (*cur_qp)->state, IB_QPS_ERR);
+ }
+
+ if (wc->status == IB_WC_WR_FLUSH_ERR)
return 0;
if (is_send) {
@@ -1931,7 +2230,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->immtdata;
+ wc->ex.imm_data =
+ cpu_to_be32(le32_to_cpu(cqe->immtdata));
break;
case HNS_ROCE_V2_OPCODE_SEND:
wc->opcode = IB_WC_RECV;
@@ -1940,7 +2240,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->immtdata;
+ wc->ex.imm_data =
+ cpu_to_be32(le32_to_cpu(cqe->immtdata));
break;
case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
wc->opcode = IB_WC_RECV;
@@ -2273,10 +2574,10 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
/* No VLAN need to set 0xFFF */
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
- V2_QPC_BYTE_24_VLAN_IDX_S, 0xfff);
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
- V2_QPC_BYTE_24_VLAN_IDX_S, 0);
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
+ V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
+ V2_QPC_BYTE_24_VLAN_ID_S, 0);
/*
* Set some fields in context to zero, Because the default values
@@ -2886,21 +3187,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
- V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
- V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
-
- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
- V2_QPC_BYTE_28_FL_S, grh->flow_label);
- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
- V2_QPC_BYTE_28_FL_S, 0);
-
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S, grh->traffic_class);
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S, 0);
-
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
@@ -2911,9 +3197,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
- memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
- memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
-
roce_set_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
@@ -2952,12 +3235,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_168_LP_SGEN_INI_M,
V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S, 0);
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
-
return 0;
}
@@ -3135,13 +3412,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_28_AT_S, 0);
}
- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S,
- rdma_ah_get_sl(&attr->ah_attr));
- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S, 0);
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
-
roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
@@ -3224,9 +3494,114 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
;
} else {
dev_err(dev, "Illegal state for QP!\n");
+ ret = -EINVAL;
goto out;
}
+ /* When QP state is err, SQ and RQ WQE should be flushed */
+ if (new_state == IB_QPS_ERR) {
+ roce_set_field(context->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
+ hr_qp->sq.head);
+ roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+ roce_set_field(context->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
+ hr_qp->rq.head);
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+ }
+
+ if (attr_mask & IB_QP_AV) {
+ const struct ib_global_route *grh =
+ rdma_ah_read_grh(&attr->ah_attr);
+ const struct ib_gid_attr *gid_attr = NULL;
+ u8 src_mac[ETH_ALEN];
+ int is_roce_protocol;
+ u16 vlan = 0xffff;
+ u8 ib_port;
+ u8 hr_port;
+
+ ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
+ hr_qp->port + 1;
+ hr_port = ib_port - 1;
+ is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
+ rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
+
+ if (is_roce_protocol) {
+ gid_attr = attr->ah_attr.grh.sgid_attr;
+ vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
+ memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
+ }
+
+ roce_set_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_VLAN_ID_M,
+ V2_QPC_BYTE_24_VLAN_ID_S, vlan);
+ roce_set_field(qpc_mask->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_VLAN_ID_M,
+ V2_QPC_BYTE_24_VLAN_ID_S, 0);
+
+ if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
+ dev_err(hr_dev->dev,
+ "sgid_index(%u) too large. max is %d\n",
+ grh->sgid_index,
+ hr_dev->caps.gid_table_len[hr_port]);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
+ dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ roce_set_field(context->byte_52_udpspn_dmac,
+ V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
+ (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
+ 0 : 0x12b7);
+
+ roce_set_field(qpc_mask->byte_52_udpspn_dmac,
+ V2_QPC_BYTE_52_UDPSPN_M,
+ V2_QPC_BYTE_52_UDPSPN_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGID_IDX_M,
+ V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
+
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGID_IDX_M,
+ V2_QPC_BYTE_20_SGID_IDX_S, 0);
+
+ roce_set_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_HOP_LIMIT_M,
+ V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
+ roce_set_field(qpc_mask->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_HOP_LIMIT_M,
+ V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
+
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S, grh->traffic_class);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S, 0);
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+ V2_QPC_BYTE_28_FL_S, grh->flow_label);
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+ V2_QPC_BYTE_28_FL_S, 0);
+ memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S,
+ rdma_ah_get_sl(&attr->ah_attr));
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S, 0);
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+ }
+
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
@@ -3497,6 +3872,11 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
if (is_user) {
+ if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
+ hns_roce_db_unmap_user(
+ to_hr_ucontext(hr_qp->ibqp.uobject->context),
+ &hr_qp->sdb);
+
if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
hns_roce_db_unmap_user(
to_hr_ucontext(hr_qp->ibqp.uobject->context),
@@ -3579,6 +3959,74 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
return ret;
}
+static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
+{
+ struct hns_roce_qp *hr_qp;
+ struct ib_qp_attr attr;
+ int attr_mask;
+ int ret;
+
+ hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
+ if (!hr_qp) {
+ dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
+ return;
+ }
+
+ if (hr_qp->ibqp.uobject) {
+ if (hr_qp->sdb_en == 1) {
+ hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
+ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+ } else {
+ dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
+ return;
+ }
+ }
+
+ attr_mask = IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+ ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+ hr_qp->state, IB_QPS_ERR);
+ if (ret)
+ dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
+ qpn);
+}
+
+static void hns_roce_irq_work_handle(struct work_struct *work)
+{
+ struct hns_roce_work *irq_work =
+ container_of(work, struct hns_roce_work, work);
+ u32 qpn = irq_work->qpn;
+
+ switch (irq_work->event_type) {
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ break;
+ default:
+ break;
+ }
+
+ kfree(irq_work);
+}
+
+static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq, u32 qpn)
+{
+ struct hns_roce_work *irq_work;
+
+ irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
+ if (!irq_work)
+ return;
+
+ INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
+ irq_work->hr_dev = hr_dev;
+ irq_work->qpn = qpn;
+ irq_work->event_type = eq->event_type;
+ irq_work->sub_type = eq->sub_type;
+ queue_work(hr_dev->irq_workq, &(irq_work->work));
+}
+
static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
{
u32 doorbell[2];
@@ -3681,14 +4129,9 @@ static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe,
- int event_type)
+ int event_type, u32 qpn)
{
struct device *dev = hr_dev->dev;
- u32 qpn;
-
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_COMM_EST:
@@ -3715,14 +4158,9 @@ static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe,
- int event_type)
+ int event_type, u32 cqn)
{
struct device *dev = hr_dev->dev;
- u32 cqn;
-
- cqn = roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
@@ -3787,6 +4225,9 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe;
int aeqe_found = 0;
int event_type;
+ int sub_type;
+ u32 qpn;
+ u32 cqn;
while ((aeqe = next_aeqe_sw_v2(eq))) {
@@ -3798,6 +4239,15 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
event_type = roce_get_field(aeqe->asyn,
HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
+ sub_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_V2_AEQE_SUB_TYPE_M,
+ HNS_ROCE_V2_AEQE_SUB_TYPE_S);
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+ cqn = roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
@@ -3811,7 +4261,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type);
+ hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type,
+ qpn);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
@@ -3820,7 +4271,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type);
+ hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type,
+ cqn);
break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
dev_warn(dev, "DB overflow.\n");
@@ -3843,6 +4295,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
break;
};
+ eq->event_type = event_type;
+ eq->sub_type = sub_type;
++eq->cons_index;
aeqe_found = 1;
@@ -3850,6 +4304,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
dev_warn(dev, "cons_index overflow, set back to 0.\n");
eq->cons_index = 0;
}
+ hns_roce_v2_init_irq_work(hr_dev, eq, qpn);
}
set_eq_cons_index_v2(eq);
@@ -4052,15 +4507,12 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
u32 bt_chk_sz;
u32 mhop_num;
int eqe_alloc;
- int ba_num;
int i = 0;
int j = 0;
mhop_num = hr_dev->caps.eqe_hop_num;
buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
- ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) /
- buf_chk_sz;
/* hop_num = 0 */
if (mhop_num == HNS_ROCE_HOP_NUM_0) {
@@ -4669,6 +5121,13 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
}
}
+ hr_dev->irq_workq =
+ create_singlethread_workqueue("hns_roce_irq_workqueue");
+ if (!hr_dev->irq_workq) {
+ dev_err(dev, "Create irq workqueue failed!\n");
+ goto err_request_irq_fail;
+ }
+
return 0;
err_request_irq_fail:
@@ -4719,12 +5178,17 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
kfree(hr_dev->irq_names[i]);
kfree(eq_table->eq);
+
+ flush_workqueue(hr_dev->irq_workq);
+ destroy_workqueue(hr_dev->irq_workq);
}
static const struct hns_roce_hw hns_roce_hw_v2 = {
.cmq_init = hns_roce_v2_cmq_init,
.cmq_exit = hns_roce_v2_cmq_exit,
.hw_profile = hns_roce_v2_profile,
+ .hw_init = hns_roce_v2_init,
+ .hw_exit = hns_roce_v2_exit,
.post_mbox = hns_roce_v2_post_mbox,
.chk_mbox = hns_roce_v2_chk_mbox,
.set_gid = hns_roce_v2_set_gid,
@@ -4749,6 +5213,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
/* required last entry */
{0, }
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index d47675f365c7..14aa308befef 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -112,6 +112,9 @@
(step_idx == 1 && hop_num == 1) || \
(step_idx == 2 && hop_num == 2))
+#define CMD_CSQ_DESC_NUM 1024
+#define CMD_CRQ_DESC_NUM 1024
+
enum {
NO_ARMED = 0x0,
REG_NXT_CEQE = 0x2,
@@ -203,6 +206,10 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_ALLOC_PF_RES = 0x8004,
HNS_ROCE_OPC_QUERY_PF_RES = 0x8400,
HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
+ HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
+ HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
+ HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
+ HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501,
HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506,
};
@@ -447,8 +454,8 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_24_TC_S 8
#define V2_QPC_BYTE_24_TC_M GENMASK(15, 8)
-#define V2_QPC_BYTE_24_VLAN_IDX_S 16
-#define V2_QPC_BYTE_24_VLAN_IDX_M GENMASK(27, 16)
+#define V2_QPC_BYTE_24_VLAN_ID_S 16
+#define V2_QPC_BYTE_24_VLAN_ID_M GENMASK(27, 16)
#define V2_QPC_BYTE_24_MTU_S 28
#define V2_QPC_BYTE_24_MTU_M GENMASK(31, 28)
@@ -768,7 +775,7 @@ struct hns_roce_v2_cqe {
__le32 byte_4;
union {
__le32 rkey;
- __be32 immtdata;
+ __le32 immtdata;
};
__le32 byte_12;
__le32 byte_16;
@@ -926,7 +933,7 @@ struct hns_roce_v2_cq_db {
struct hns_roce_v2_ud_send_wqe {
__le32 byte_4;
__le32 msg_len;
- __be32 immtdata;
+ __le32 immtdata;
__le32 byte_16;
__le32 byte_20;
__le32 byte_24;
@@ -1012,7 +1019,7 @@ struct hns_roce_v2_rc_send_wqe {
__le32 msg_len;
union {
__le32 inv_key;
- __be32 immtdata;
+ __le32 immtdata;
};
__le32 byte_16;
__le32 byte_20;
@@ -1061,6 +1068,40 @@ struct hns_roce_query_version {
__le32 rsv[5];
};
+struct hns_roce_cfg_llm_a {
+ __le32 base_addr_l;
+ __le32 base_addr_h;
+ __le32 depth_pgsz_init_en;
+ __le32 head_ba_l;
+ __le32 head_ba_h_nxtptr;
+ __le32 head_ptr;
+};
+
+#define CFG_LLM_QUE_DEPTH_S 0
+#define CFG_LLM_QUE_DEPTH_M GENMASK(12, 0)
+
+#define CFG_LLM_QUE_PGSZ_S 16
+#define CFG_LLM_QUE_PGSZ_M GENMASK(19, 16)
+
+#define CFG_LLM_INIT_EN_S 20
+#define CFG_LLM_INIT_EN_M GENMASK(20, 20)
+
+#define CFG_LLM_HEAD_PTR_S 0
+#define CFG_LLM_HEAD_PTR_M GENMASK(11, 0)
+
+struct hns_roce_cfg_llm_b {
+ __le32 tail_ba_l;
+ __le32 tail_ba_h;
+ __le32 tail_ptr;
+ __le32 rsv[3];
+};
+
+#define CFG_LLM_TAIL_BA_H_S 0
+#define CFG_LLM_TAIL_BA_H_M GENMASK(19, 0)
+
+#define CFG_LLM_TAIL_PTR_S 0
+#define CFG_LLM_TAIL_PTR_M GENMASK(11, 0)
+
struct hns_roce_cfg_global_param {
__le32 time_cfg_udp_port;
__le32 rsv[5];
@@ -1072,7 +1113,7 @@ struct hns_roce_cfg_global_param {
#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S 16
#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M GENMASK(31, 16)
-struct hns_roce_pf_res {
+struct hns_roce_pf_res_a {
__le32 rsv;
__le32 qpc_bt_idx_num;
__le32 srqc_bt_idx_num;
@@ -1111,6 +1152,32 @@ struct hns_roce_pf_res {
#define PF_RES_DATA_5_PF_EQC_BT_NUM_S 16
#define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16)
+struct hns_roce_pf_res_b {
+ __le32 rsv0;
+ __le32 smac_idx_num;
+ __le32 sgid_idx_num;
+ __le32 qid_idx_sl_num;
+ __le32 rsv[2];
+};
+
+#define PF_RES_DATA_1_PF_SMAC_IDX_S 0
+#define PF_RES_DATA_1_PF_SMAC_IDX_M GENMASK(7, 0)
+
+#define PF_RES_DATA_1_PF_SMAC_NUM_S 8
+#define PF_RES_DATA_1_PF_SMAC_NUM_M GENMASK(16, 8)
+
+#define PF_RES_DATA_2_PF_SGID_IDX_S 0
+#define PF_RES_DATA_2_PF_SGID_IDX_M GENMASK(7, 0)
+
+#define PF_RES_DATA_2_PF_SGID_NUM_S 8
+#define PF_RES_DATA_2_PF_SGID_NUM_M GENMASK(16, 8)
+
+#define PF_RES_DATA_3_PF_QID_IDX_S 0
+#define PF_RES_DATA_3_PF_QID_IDX_M GENMASK(9, 0)
+
+#define PF_RES_DATA_3_PF_SL_NUM_S 16
+#define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16)
+
struct hns_roce_vf_res_a {
__le32 vf_id;
__le32 vf_qpc_bt_idx_num;
@@ -1179,13 +1246,6 @@ struct hns_roce_vf_res_b {
#define VF_RES_B_DATA_3_VF_SL_NUM_S 16
#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
-/* Reg field definition */
-#define ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S 0
-#define ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M GENMASK(15, 0)
-
-#define ROCEE_VF_SGID_CFG4_SGID_TYPE_S 0
-#define ROCEE_VF_SGID_CFG4_SGID_TYPE_M GENMASK(1, 0)
-
struct hns_roce_cfg_bt_attr {
__le32 vf_qpc_cfg;
__le32 vf_srqc_cfg;
@@ -1230,6 +1290,32 @@ struct hns_roce_cfg_bt_attr {
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
+struct hns_roce_cfg_sgid_tb {
+ __le32 table_idx_rsv;
+ __le32 vf_sgid_l;
+ __le32 vf_sgid_ml;
+ __le32 vf_sgid_mh;
+ __le32 vf_sgid_h;
+ __le32 vf_sgid_type_rsv;
+};
+#define CFG_SGID_TB_TABLE_IDX_S 0
+#define CFG_SGID_TB_TABLE_IDX_M GENMASK(7, 0)
+
+#define CFG_SGID_TB_VF_SGID_TYPE_S 0
+#define CFG_SGID_TB_VF_SGID_TYPE_M GENMASK(1, 0)
+
+struct hns_roce_cfg_smac_tb {
+ __le32 tb_idx_rsv;
+ __le32 vf_smac_l;
+ __le32 vf_smac_h_rsv;
+ __le32 rsv[3];
+};
+#define CFG_SMAC_TB_IDX_S 0
+#define CFG_SMAC_TB_IDX_M GENMASK(7, 0)
+
+#define CFG_SMAC_TB_VF_SMAC_H_S 0
+#define CFG_SMAC_TB_VF_SMAC_H_M GENMASK(15, 0)
+
struct hns_roce_cmq_desc {
__le16 opcode;
__le16 flag;
@@ -1276,8 +1362,32 @@ struct hns_roce_v2_cmq {
u16 last_status;
};
+enum hns_roce_link_table_type {
+ TSQ_LINK_TABLE,
+ TPQ_LINK_TABLE,
+};
+
+struct hns_roce_link_table {
+ struct hns_roce_buf_list table;
+ struct hns_roce_buf_list *pg_list;
+ u32 npages;
+ u32 pg_sz;
+};
+
+struct hns_roce_link_table_entry {
+ u32 blk_ba0;
+ u32 blk_ba1_nxt_ptr;
+};
+#define HNS_ROCE_LINK_TABLE_BA1_S 0
+#define HNS_ROCE_LINK_TABLE_BA1_M GENMASK(19, 0)
+
+#define HNS_ROCE_LINK_TABLE_NXT_PTR_S 20
+#define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20)
+
struct hns_roce_v2_priv {
struct hns_roce_v2_cmq cmq;
+ struct hns_roce_link_table tsq;
+ struct hns_roce_link_table tpq;
};
struct hns_roce_eq_context {
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 21b901cfa2d6..c5cae9a38c04 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -74,8 +74,7 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
}
-static int hns_roce_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr, void **context)
+static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
u8 port = attr->port_num - 1;
@@ -87,8 +86,7 @@ static int hns_roce_add_gid(const union ib_gid *gid,
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
- ret = hr_dev->hw->set_gid(hr_dev, port, attr->index,
- (union ib_gid *)gid, attr);
+ ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
@@ -208,7 +206,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_qp_wr = hr_dev->caps.max_wqes;
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_RC_RNR_NAK_GEN;
- props->max_sge = max(hr_dev->caps.max_sq_sg, hr_dev->caps.max_rq_sg);
+ props->max_send_sge = hr_dev->caps.max_sq_sg;
+ props->max_recv_sge = hr_dev->caps.max_rq_sg;
props->max_sge_rd = 1;
props->max_cq = hr_dev->caps.num_cqs;
props->max_cqe = hr_dev->caps.max_cqes;
@@ -535,6 +534,9 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
(1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
(1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
+ ib_dev->uverbs_ex_cmd_mask |=
+ (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
+
/* HCA||device||port */
ib_dev->modify_device = hns_roce_modify_device;
ib_dev->query_device = hns_roce_query_device;
@@ -887,8 +889,7 @@ error_failed_cmd_init:
error_failed_cmq_init:
if (hr_dev->hw->reset) {
- ret = hr_dev->hw->reset(hr_dev, false);
- if (ret)
+ if (hr_dev->hw->reset(hr_dev, false))
dev_err(dev, "Dereset RoCE engine failed!\n");
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index b9f2c871ff9a..e11c149da04d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -37,7 +37,7 @@
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
{
- return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
+ return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0;
}
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index baaf906f7c2e..efb7e961ca65 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -115,7 +115,10 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
+ return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
+ base) ?
+ -ENOMEM :
+ 0;
}
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
@@ -489,6 +492,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
return 0;
}
+static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
+{
+ if (attr->qp_type == IB_QPT_XRC_TGT)
+ return 0;
+
+ return 1;
+}
+
static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
{
if (attr->qp_type == IB_QPT_XRC_INI ||
@@ -613,6 +624,23 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_mtt;
}
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
+ (udata->inlen >= sizeof(ucmd)) &&
+ (udata->outlen >= sizeof(resp)) &&
+ hns_roce_qp_has_sq(init_attr)) {
+ ret = hns_roce_db_map_user(
+ to_hr_ucontext(ib_pd->uobject->context),
+ ucmd.sdb_addr, &hr_qp->sdb);
+ if (ret) {
+ dev_err(dev, "sq record doorbell map failed!\n");
+ goto err_mtt;
+ }
+
+ /* indicate kernel supports sq record db */
+ resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
+ hr_qp->sdb_en = 1;
+ }
+
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp)) &&
hns_roce_qp_has_rq(init_attr)) {
@@ -621,7 +649,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ucmd.db_addr, &hr_qp->rdb);
if (ret) {
dev_err(dev, "rq record doorbell map failed!\n");
- goto err_mtt;
+ goto err_sq_dbmap;
}
}
} else {
@@ -734,7 +762,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
- /* indicate kernel supports record db */
+ /* indicate kernel supports rq record db */
resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (ret)
@@ -770,6 +798,16 @@ err_wrid:
kfree(hr_qp->rq.wrid);
}
+err_sq_dbmap:
+ if (ib_pd->uobject)
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
+ (udata->inlen >= sizeof(ucmd)) &&
+ (udata->outlen >= sizeof(resp)) &&
+ hns_roce_qp_has_sq(init_attr))
+ hns_roce_db_unmap_user(
+ to_hr_ucontext(ib_pd->uobject->context),
+ &hr_qp->sdb);
+
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
@@ -903,6 +941,17 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr_mask & IB_QP_STATE ?
attr->qp_state : cur_state;
+ if (ibqp->uobject &&
+ (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
+ if (hr_qp->sdb_en == 1) {
+ hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
+ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+ } else {
+ dev_warn(dev, "flush cqe is not supported in userspace!\n");
+ goto out;
+ }
+ }
+
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
IB_LINK_LAYER_ETHERNET)) {
dev_err(dev, "ib_modify_qp_is_ok failed\n");
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
index 2962979c06e9..d867ef1ac72a 100644
--- a/drivers/infiniband/hw/i40iw/Kconfig
+++ b/drivers/infiniband/hw/i40iw/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_I40IW
tristate "Intel(R) Ethernet X722 iWARP Driver"
depends on INET && I40E
+ depends on IPV6 || !IPV6
depends on PCI
select GENERIC_ALLOCATOR
---help---
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 7b2655128b9f..423818a7d333 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -57,6 +57,7 @@
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/ip_fib.h>
+#include <net/secure_seq.h>
#include <net/tcp.h>
#include <asm/checksum.h>
@@ -2164,7 +2165,6 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
struct i40iw_cm_listener *listener)
{
struct i40iw_cm_node *cm_node;
- struct timespec ts;
int oldarpindex;
int arpindex;
struct net_device *netdev = iwdev->netdev;
@@ -2214,10 +2214,26 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
cm_node->tcp_cntxt.rcv_wnd =
I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
- ts = current_kernel_time();
- cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
- cm_node->tcp_cntxt.mss = (cm_node->ipv4) ? (iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV4) :
- (iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV6);
+ if (cm_node->ipv4) {
+ cm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr[0]),
+ htonl(cm_node->rem_addr[0]),
+ htons(cm_node->loc_port),
+ htons(cm_node->rem_port));
+ cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV4;
+ } else if (IS_ENABLED(CONFIG_IPV6)) {
+ __be32 loc[4] = {
+ htonl(cm_node->loc_addr[0]), htonl(cm_node->loc_addr[1]),
+ htonl(cm_node->loc_addr[2]), htonl(cm_node->loc_addr[3])
+ };
+ __be32 rem[4] = {
+ htonl(cm_node->rem_addr[0]), htonl(cm_node->rem_addr[1]),
+ htonl(cm_node->rem_addr[2]), htonl(cm_node->rem_addr[3])
+ };
+ cm_node->tcp_cntxt.loc_seq_num = secure_tcpv6_seq(loc, rem,
+ htons(cm_node->loc_port),
+ htons(cm_node->rem_port));
+ cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV6;
+ }
cm_node->iwdev = iwdev;
cm_node->dev = &iwdev->sc_dev;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 2836c5420d60..55a1fbf0e670 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -435,45 +435,24 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
}
/**
- * i40iw_manage_apbvt - add or delete tcp port
+ * i40iw_cqp_manage_abvpt_cmd - send cqp command manage abpvt
* @iwdev: iwarp device
* @accel_local_port: port for apbvt
* @add_port: add or delete port
*/
-int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool add_port)
+static enum i40iw_status_code
+i40iw_cqp_manage_abvpt_cmd(struct i40iw_device *iwdev,
+ u16 accel_local_port,
+ bool add_port)
{
struct i40iw_apbvt_info *info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
- unsigned long flags;
- struct i40iw_cm_core *cm_core = &iwdev->cm_core;
- enum i40iw_status_code status = 0;
- bool in_use;
-
- /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
- * protect against race where add APBVT CQP can race ahead of the delete
- * APBVT for same port.
- */
- spin_lock_irqsave(&cm_core->apbvt_lock, flags);
-
- if (!add_port) {
- in_use = i40iw_port_in_use(cm_core, accel_local_port);
- if (in_use)
- goto exit;
- clear_bit(accel_local_port, cm_core->ports_in_use);
- } else {
- in_use = test_and_set_bit(accel_local_port,
- cm_core->ports_in_use);
- spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
- if (in_use)
- return 0;
- }
+ enum i40iw_status_code status;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);
- if (!cqp_request) {
- status = -ENOMEM;
- goto exit;
- }
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_apbvt_entry.info;
@@ -489,14 +468,54 @@ int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool ad
status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status)
i40iw_pr_err("CQP-OP Manage APBVT entry fail");
-exit:
- if (!add_port)
- spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
return status;
}
/**
+ * i40iw_manage_apbvt - add or delete tcp port
+ * @iwdev: iwarp device
+ * @accel_local_port: port for apbvt
+ * @add_port: add or delete port
+ */
+enum i40iw_status_code i40iw_manage_apbvt(struct i40iw_device *iwdev,
+ u16 accel_local_port,
+ bool add_port)
+{
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+ enum i40iw_status_code status;
+ unsigned long flags;
+ bool in_use;
+
+ /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
+ * protect against race where add APBVT CQP can race ahead of the delete
+ * APBVT for same port.
+ */
+ if (add_port) {
+ spin_lock_irqsave(&cm_core->apbvt_lock, flags);
+ in_use = __test_and_set_bit(accel_local_port,
+ cm_core->ports_in_use);
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ if (in_use)
+ return 0;
+ return i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port,
+ true);
+ } else {
+ spin_lock_irqsave(&cm_core->apbvt_lock, flags);
+ in_use = i40iw_port_in_use(cm_core, accel_local_port);
+ if (in_use) {
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return 0;
+ }
+ __clear_bit(accel_local_port, cm_core->ports_in_use);
+ status = i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port,
+ false);
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return status;
+ }
+}
+
+/**
* i40iw_manage_arp_cache - manage hw arp cache
* @iwdev: iwarp device
* @mac_addr: mac address ptr
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 68679ad4c6da..e2e6c74a7452 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -71,7 +71,8 @@ static int i40iw_query_device(struct ib_device *ibdev,
props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
props->max_qp = iwdev->max_qp - iwdev->used_qps;
props->max_qp_wr = I40IW_MAX_QP_WRS;
- props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ props->max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ props->max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
props->max_cq = iwdev->max_cq - iwdev->used_cqs;
props->max_cqe = iwdev->max_cqe;
props->max_mr = iwdev->max_mr - iwdev->used_mrs;
@@ -1409,6 +1410,7 @@ static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
struct vm_area_struct *vma;
struct hstate *h;
+ down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, addr);
if (vma && is_vm_hugetlb_page(vma)) {
h = hstate_vma(vma);
@@ -1417,6 +1419,7 @@ static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
iwmr->page_msk = huge_page_mask(h);
}
}
+ up_read(&current->mm->mmap_sem);
}
/**
@@ -2198,8 +2201,8 @@ static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, in
* @bad_wr: return of bad wr if err
*/
static int i40iw_post_send(struct ib_qp *ibqp,
- struct ib_send_wr *ib_wr,
- struct ib_send_wr **bad_wr)
+ const struct ib_send_wr *ib_wr,
+ const struct ib_send_wr **bad_wr)
{
struct i40iw_qp *iwqp;
struct i40iw_qp_uk *ukqp;
@@ -2374,9 +2377,8 @@ out:
* @ib_wr: work request for receive
* @bad_wr: bad wr caused an error
*/
-static int i40iw_post_recv(struct ib_qp *ibqp,
- struct ib_recv_wr *ib_wr,
- struct ib_recv_wr **bad_wr)
+static int i40iw_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr,
+ const struct ib_recv_wr **bad_wr)
{
struct i40iw_qp *iwqp;
struct i40iw_qp_uk *ukqp;
@@ -2701,21 +2703,6 @@ static int i40iw_query_gid(struct ib_device *ibdev,
}
/**
- * i40iw_modify_port Modify port properties
- * @ibdev: device pointer from stack
- * @port: port number
- * @port_modify_mask: mask for port modifications
- * @props: port properties
- */
-static int i40iw_modify_port(struct ib_device *ibdev,
- u8 port,
- int port_modify_mask,
- struct ib_port_modify *props)
-{
- return -ENOSYS;
-}
-
-/**
* i40iw_query_pkey - Query partition key
* @ibdev: device pointer from stack
* @port: port number
@@ -2732,28 +2719,6 @@ static int i40iw_query_pkey(struct ib_device *ibdev,
}
/**
- * i40iw_create_ah - create address handle
- * @ibpd: ptr of pd
- * @ah_attr: address handle attributes
- */
-static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
- struct rdma_ah_attr *attr,
- struct ib_udata *udata)
-
-{
- return ERR_PTR(-ENOSYS);
-}
-
-/**
- * i40iw_destroy_ah - Destroy address handle
- * @ah: pointer to address handle
- */
-static int i40iw_destroy_ah(struct ib_ah *ah)
-{
- return -ENOSYS;
-}
-
-/**
* i40iw_get_vector_affinity - report IRQ affinity mask
* @ibdev: IB device
* @comp_vector: completion vector index
@@ -2820,7 +2785,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
iwibdev->ibdev.dev.parent = &pcidev->dev;
iwibdev->ibdev.query_port = i40iw_query_port;
- iwibdev->ibdev.modify_port = i40iw_modify_port;
iwibdev->ibdev.query_pkey = i40iw_query_pkey;
iwibdev->ibdev.query_gid = i40iw_query_gid;
iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
@@ -2840,8 +2804,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
iwibdev->ibdev.query_device = i40iw_query_device;
- iwibdev->ibdev.create_ah = i40iw_create_ah;
- iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
iwibdev->ibdev.drain_sq = i40iw_drain_sq;
iwibdev->ibdev.drain_rq = i40iw_drain_rq;
iwibdev->ibdev.alloc_mr = i40iw_alloc_mr;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 9345d5b546d1..e9e3a6f390db 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -82,12 +82,11 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
struct mlx4_ib_ah *ah)
{
struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
+ const struct ib_gid_attr *gid_attr;
struct mlx4_dev *dev = ibdev->dev;
int is_mcast = 0;
struct in6_addr in6;
u16 vlan_tag = 0xffff;
- union ib_gid sgid;
- struct ib_gid_attr gid_attr;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
int ret;
@@ -96,25 +95,30 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
is_mcast = 1;
memcpy(ah->av.eth.mac, ah_attr->roce.dmac, ETH_ALEN);
- ret = ib_get_cached_gid(pd->device, rdma_ah_get_port_num(ah_attr),
- grh->sgid_index, &sgid, &gid_attr);
- if (ret)
- return ERR_PTR(ret);
eth_zero_addr(ah->av.eth.s_mac);
- if (is_vlan_dev(gid_attr.ndev))
- vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
- memcpy(ah->av.eth.s_mac, gid_attr.ndev->dev_addr, ETH_ALEN);
- dev_put(gid_attr.ndev);
+
+ /*
+ * If sgid_attr is NULL we are being called by mlx4_ib_create_ah_slave
+ * and we are directly creating an AV for a slave's gid_index.
+ */
+ gid_attr = ah_attr->grh.sgid_attr;
+ if (gid_attr) {
+ if (is_vlan_dev(gid_attr->ndev))
+ vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
+ memcpy(ah->av.eth.s_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
+ ret = mlx4_ib_gid_index_to_real_index(ibdev, gid_attr);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ ah->av.eth.gid_index = ret;
+ } else {
+ /* mlx4_ib_create_ah_slave fills in the s_mac and the vlan */
+ ah->av.eth.gid_index = ah_attr->grh.sgid_index;
+ }
+
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13;
ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn |
(rdma_ah_get_port_num(ah_attr) << 24));
- ret = mlx4_ib_gid_index_to_real_index(ibdev,
- rdma_ah_get_port_num(ah_attr),
- grh->sgid_index);
- if (ret < 0)
- return ERR_PTR(ret);
- ah->av.eth.gid_index = ret;
ah->av.eth.vlan = cpu_to_be16(vlan_tag);
ah->av.eth.hop_limit = grh->hop_limit;
if (rdma_ah_get_static_rate(ah_attr)) {
@@ -173,6 +177,40 @@ struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
return create_ib_ah(pd, ah_attr, ah); /* never fails */
}
+/* AH's created via this call must be free'd by mlx4_ib_destroy_ah. */
+struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr,
+ int slave_sgid_index, u8 *s_mac,
+ u16 vlan_tag)
+{
+ struct rdma_ah_attr slave_attr = *ah_attr;
+ struct mlx4_ib_ah *mah;
+ struct ib_ah *ah;
+
+ slave_attr.grh.sgid_attr = NULL;
+ slave_attr.grh.sgid_index = slave_sgid_index;
+ ah = mlx4_ib_create_ah(pd, &slave_attr, NULL);
+ if (IS_ERR(ah))
+ return ah;
+
+ ah->device = pd->device;
+ ah->pd = pd;
+ ah->type = ah_attr->type;
+ mah = to_mah(ah);
+
+ /* get rid of force-loopback bit */
+ mah->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
+
+ if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE)
+ memcpy(mah->av.eth.s_mac, s_mac, 6);
+
+ if (vlan_tag < 0x1000)
+ vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13;
+ mah->av.eth.vlan = cpu_to_be16(vlan_tag);
+
+ return ah;
+}
+
int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
{
struct mlx4_ib_ah *ah = to_mah(ibah);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 90a3e2642c2e..e5466d786bb1 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -506,7 +506,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
{
struct ib_sge list;
struct ib_ud_wr wr;
- struct ib_send_wr *bad_wr;
+ const struct ib_send_wr *bad_wr;
struct mlx4_ib_demux_pv_ctx *tun_ctx;
struct mlx4_ib_demux_pv_qp *tun_qp;
struct mlx4_rcv_tunnel_mad *tun_mad;
@@ -1310,7 +1310,8 @@ static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
int index)
{
struct ib_sge sg_list;
- struct ib_recv_wr recv_wr, *bad_recv_wr;
+ struct ib_recv_wr recv_wr;
+ const struct ib_recv_wr *bad_recv_wr;
int size;
size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
@@ -1361,19 +1362,16 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
{
struct ib_sge list;
struct ib_ud_wr wr;
- struct ib_send_wr *bad_wr;
+ const struct ib_send_wr *bad_wr;
struct mlx4_ib_demux_pv_ctx *sqp_ctx;
struct mlx4_ib_demux_pv_qp *sqp;
struct mlx4_mad_snd_buf *sqp_mad;
struct ib_ah *ah;
struct ib_qp *send_qp = NULL;
- struct ib_global_route *grh;
unsigned wire_tx_ix = 0;
int ret = 0;
u16 wire_pkey_ix;
int src_qpnum;
- u8 sgid_index;
-
sqp_ctx = dev->sriov.sqps[port-1];
@@ -1394,16 +1392,11 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
send_qp = sqp->qp;
/* create ah */
- grh = rdma_ah_retrieve_grh(attr);
- sgid_index = grh->sgid_index;
- grh->sgid_index = 0;
- ah = rdma_create_ah(sqp_ctx->pd, attr);
+ ah = mlx4_ib_create_ah_slave(sqp_ctx->pd, attr,
+ rdma_ah_retrieve_grh(attr)->sgid_index,
+ s_mac, vlan_id);
if (IS_ERR(ah))
return -ENOMEM;
- grh->sgid_index = sgid_index;
- to_mah(ah)->av.ib.gid_index = sgid_index;
- /* get rid of force-loopback bit */
- to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
spin_lock(&sqp->tx_lock);
if (sqp->tx_ix_head - sqp->tx_ix_tail >=
(MLX4_NUM_TUNNEL_BUFS - 1))
@@ -1445,12 +1438,6 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
wr.wr.num_sge = 1;
wr.wr.opcode = IB_WR_SEND;
wr.wr.send_flags = IB_SEND_SIGNALED;
- if (s_mac)
- memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
- if (vlan_id < 0x1000)
- vlan_id |= (rdma_ah_get_sl(attr) & 7) << 13;
- to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id);
-
ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
if (!ret)
@@ -1461,7 +1448,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
spin_unlock(&sqp->tx_lock);
sqp->tx_ring[wire_tx_ix].ah = NULL;
out:
- rdma_destroy_ah(ah);
+ mlx4_ib_destroy_ah(ah);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4ec519afc45b..ca0f1ee26091 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -246,9 +246,7 @@ static int mlx4_ib_update_gids(struct gid_entry *gids,
return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
}
-static int mlx4_ib_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- void **context)
+static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
{
struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
struct mlx4_ib_iboe *iboe = &ibdev->iboe;
@@ -271,8 +269,9 @@ static int mlx4_ib_add_gid(const union ib_gid *gid,
port_gid_table = &iboe->gids[attr->port_num - 1];
spin_lock_bh(&iboe->lock);
for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
- if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) &&
- (port_gid_table->gids[i].gid_type == attr->gid_type)) {
+ if (!memcmp(&port_gid_table->gids[i].gid,
+ &attr->gid, sizeof(attr->gid)) &&
+ port_gid_table->gids[i].gid_type == attr->gid_type) {
found = i;
break;
}
@@ -289,7 +288,8 @@ static int mlx4_ib_add_gid(const union ib_gid *gid,
ret = -ENOMEM;
} else {
*context = port_gid_table->gids[free].ctx;
- memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
+ memcpy(&port_gid_table->gids[free].gid,
+ &attr->gid, sizeof(attr->gid));
port_gid_table->gids[free].gid_type = attr->gid_type;
port_gid_table->gids[free].ctx->real_index = free;
port_gid_table->gids[free].ctx->refcount = 1;
@@ -380,17 +380,15 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
}
int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
- u8 port_num, int index)
+ const struct ib_gid_attr *attr)
{
struct mlx4_ib_iboe *iboe = &ibdev->iboe;
struct gid_cache_context *ctx = NULL;
- union ib_gid gid;
struct mlx4_port_gid_table *port_gid_table;
int real_index = -EINVAL;
int i;
- int ret;
unsigned long flags;
- struct ib_gid_attr attr;
+ u8 port_num = attr->port_num;
if (port_num > MLX4_MAX_PORTS)
return -EINVAL;
@@ -399,21 +397,15 @@ int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
port_num = 1;
if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
- return index;
-
- ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr);
- if (ret)
- return ret;
-
- if (attr.ndev)
- dev_put(attr.ndev);
+ return attr->index;
spin_lock_irqsave(&iboe->lock, flags);
port_gid_table = &iboe->gids[port_num - 1];
for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
- if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) &&
- attr.gid_type == port_gid_table->gids[i].gid_type) {
+ if (!memcmp(&port_gid_table->gids[i].gid,
+ &attr->gid, sizeof(attr->gid)) &&
+ attr->gid_type == port_gid_table->gids[i].gid_type) {
ctx = port_gid_table->gids[i].ctx;
break;
}
@@ -525,8 +517,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->page_size_cap = dev->dev->caps.page_size_cap;
props->max_qp = dev->dev->quotas.qp;
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
- props->max_sge = min(dev->dev->caps.max_sq_sg,
- dev->dev->caps.max_rq_sg);
+ props->max_send_sge = dev->dev->caps.max_sq_sg;
+ props->max_recv_sge = dev->dev->caps.max_rq_sg;
props->max_sge_rd = MLX4_MAX_SGE_RD;
props->max_cq = dev->dev->quotas.cq;
props->max_cqe = dev->dev->caps.max_cqes;
@@ -770,7 +762,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
IB_WIDTH_4X : IB_WIDTH_1X;
props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
IB_SPEED_FDR : IB_SPEED_QDR;
- props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
+ props->port_cap_flags = IB_PORT_CM_SUP;
+ props->ip_gids = true;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
props->pkey_tbl_len = 1;
@@ -2709,6 +2702,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
+ ibdev->ib_dev.drain_sq = mlx4_ib_drain_sq;
+ ibdev->ib_dev.drain_rq = mlx4_ib_drain_rq;
ibdev->ib_dev.post_send = mlx4_ib_post_send;
ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 7b1429917aba..e10dccc7958f 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -322,7 +322,6 @@ struct mlx4_ib_qp {
u32 doorbell_qpn;
__be32 sq_signal_bits;
unsigned sq_next_wqe;
- int sq_max_wqes_per_wr;
int sq_spare_wqes;
struct mlx4_ib_wq sq;
@@ -760,6 +759,10 @@ void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
struct ib_udata *udata);
+struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr,
+ int slave_sgid_index, u8 *s_mac,
+ u16 vlan_tag);
int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int mlx4_ib_destroy_ah(struct ib_ah *ah);
@@ -771,21 +774,23 @@ int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int mlx4_ib_destroy_srq(struct ib_srq *srq);
void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
-int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
int mlx4_ib_destroy_qp(struct ib_qp *qp);
+void mlx4_ib_drain_sq(struct ib_qp *qp);
+void mlx4_ib_drain_rq(struct ib_qp *qp);
int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
@@ -900,7 +905,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata);
int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
- u8 port_num, int index);
+ const struct ib_gid_attr *attr);
void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
int port);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 3b8045fd23ed..6dd3cd2c2f80 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -204,91 +204,26 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
/*
* Stamp a SQ WQE so that it is invalid if prefetched by marking the
- * first four bytes of every 64 byte chunk with
- * 0x7FFFFFF | (invalid_ownership_value << 31).
- *
- * When the max work request size is less than or equal to the WQE
- * basic block size, as an optimization, we can stamp all WQEs with
- * 0xffffffff, and skip the very first chunk of each WQE.
+ * first four bytes of every 64 byte chunk with 0xffffffff, except for
+ * the very first chunk of the WQE.
*/
-static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
+static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
{
__be32 *wqe;
int i;
int s;
- int ind;
void *buf;
- __be32 stamp;
struct mlx4_wqe_ctrl_seg *ctrl;
- if (qp->sq_max_wqes_per_wr > 1) {
- s = roundup(size, 1U << qp->sq.wqe_shift);
- for (i = 0; i < s; i += 64) {
- ind = (i >> qp->sq.wqe_shift) + n;
- stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) :
- cpu_to_be32(0xffffffff);
- buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
- wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
- *wqe = stamp;
- }
- } else {
- ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
- s = (ctrl->qpn_vlan.fence_size & 0x3f) << 4;
- for (i = 64; i < s; i += 64) {
- wqe = buf + i;
- *wqe = cpu_to_be32(0xffffffff);
- }
+ buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
+ ctrl = (struct mlx4_wqe_ctrl_seg *)buf;
+ s = (ctrl->qpn_vlan.fence_size & 0x3f) << 4;
+ for (i = 64; i < s; i += 64) {
+ wqe = buf + i;
+ *wqe = cpu_to_be32(0xffffffff);
}
}
-static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
-{
- struct mlx4_wqe_ctrl_seg *ctrl;
- struct mlx4_wqe_inline_seg *inl;
- void *wqe;
- int s;
-
- ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
- s = sizeof(struct mlx4_wqe_ctrl_seg);
-
- if (qp->ibqp.qp_type == IB_QPT_UD) {
- struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
- struct mlx4_av *av = (struct mlx4_av *)dgram->av;
- memset(dgram, 0, sizeof *dgram);
- av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn);
- s += sizeof(struct mlx4_wqe_datagram_seg);
- }
-
- /* Pad the remainder of the WQE with an inline data segment. */
- if (size > s) {
- inl = wqe + s;
- inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl));
- }
- ctrl->srcrb_flags = 0;
- ctrl->qpn_vlan.fence_size = size / 16;
- /*
- * Make sure descriptor is fully written before setting ownership bit
- * (because HW can start executing as soon as we do).
- */
- wmb();
-
- ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) |
- (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
-
- stamp_send_wqe(qp, n + qp->sq_spare_wqes, size);
-}
-
-/* Post NOP WQE to prevent wrap-around in the middle of WR */
-static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind)
-{
- unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1));
- if (unlikely(s < qp->sq_max_wqes_per_wr)) {
- post_nop_wqe(qp, ind, s << qp->sq.wqe_shift);
- ind += s;
- }
- return ind;
-}
-
static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
{
struct ib_event event;
@@ -433,8 +368,7 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
}
static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
- enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp,
- bool shrink_wqe)
+ enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
{
int s;
@@ -461,70 +395,20 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
if (s > dev->dev->caps.max_sq_desc_sz)
return -EINVAL;
+ qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
+
/*
- * Hermon supports shrinking WQEs, such that a single work
- * request can include multiple units of 1 << wqe_shift. This
- * way, work requests can differ in size, and do not have to
- * be a power of 2 in size, saving memory and speeding up send
- * WR posting. Unfortunately, if we do this then the
- * wqe_index field in CQEs can't be used to look up the WR ID
- * anymore, so we do this only if selective signaling is off.
- *
- * Further, on 32-bit platforms, we can't use vmap() to make
- * the QP buffer virtually contiguous. Thus we have to use
- * constant-sized WRs to make sure a WR is always fully within
- * a single page-sized chunk.
- *
- * Finally, we use NOP work requests to pad the end of the
- * work queue, to avoid wrap-around in the middle of WR. We
- * set NEC bit to avoid getting completions with error for
- * these NOP WRs, but since NEC is only supported starting
- * with firmware 2.2.232, we use constant-sized WRs for older
- * firmware.
- *
- * And, since MLX QPs only support SEND, we use constant-sized
- * WRs in this case.
- *
- * We look for the smallest value of wqe_shift such that the
- * resulting number of wqes does not exceed device
- * capabilities.
- *
- * We set WQE size to at least 64 bytes, this way stamping
- * invalidates each WQE.
+ * We need to leave 2 KB + 1 WR of headroom in the SQ to
+ * allow HW to prefetch.
*/
- if (shrink_wqe && dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
- qp->sq_signal_bits && BITS_PER_LONG == 64 &&
- type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI &&
- !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI |
- MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER)))
- qp->sq.wqe_shift = ilog2(64);
- else
- qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
-
- for (;;) {
- qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
-
- /*
- * We need to leave 2 KB + 1 WR of headroom in the SQ to
- * allow HW to prefetch.
- */
- qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr;
- qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr *
- qp->sq_max_wqes_per_wr +
- qp->sq_spare_wqes);
-
- if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes)
- break;
-
- if (qp->sq_max_wqes_per_wr <= 1)
- return -EINVAL;
-
- ++qp->sq.wqe_shift;
- }
-
- qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz,
- (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) -
- send_wqe_overhead(type, qp->flags)) /
+ qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
+ qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr +
+ qp->sq_spare_wqes);
+
+ qp->sq.max_gs =
+ (min(dev->dev->caps.max_sq_desc_sz,
+ (1 << qp->sq.wqe_shift)) -
+ send_wqe_overhead(type, qp->flags)) /
sizeof (struct mlx4_wqe_data_seg);
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
@@ -538,7 +422,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
}
cap->max_send_wr = qp->sq.max_post =
- (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
+ qp->sq.wqe_cnt - qp->sq_spare_wqes;
cap->max_send_sge = min(qp->sq.max_gs,
min(dev->dev->caps.max_sq_sg,
dev->dev->caps.max_rq_sg));
@@ -977,7 +861,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
{
int qpn;
int err;
- struct ib_qp_cap backup_cap;
struct mlx4_ib_sqp *sqp = NULL;
struct mlx4_ib_qp *qp;
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
@@ -1178,9 +1061,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
}
- memcpy(&backup_cap, &init_attr->cap, sizeof(backup_cap));
- err = set_kernel_sq_size(dev, &init_attr->cap,
- qp_type, qp, true);
+ err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
if (err)
goto err;
@@ -1192,20 +1073,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
*qp->db.db = 0;
}
- if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
+ if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2,
&qp->buf)) {
- memcpy(&init_attr->cap, &backup_cap,
- sizeof(backup_cap));
- err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
- qp, false);
- if (err)
- goto err_db;
-
- if (mlx4_buf_alloc(dev->dev, qp->buf_size,
- PAGE_SIZE * 2, &qp->buf)) {
- err = -ENOMEM;
- goto err_db;
- }
+ err = -ENOMEM;
+ goto err_db;
}
err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
@@ -1859,8 +1730,7 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev,
if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) {
const struct ib_global_route *grh = rdma_ah_read_grh(ah);
int real_sgid_index =
- mlx4_ib_gid_index_to_real_index(dev, port,
- grh->sgid_index);
+ mlx4_ib_gid_index_to_real_index(dev, grh->sgid_attr);
if (real_sgid_index < 0)
return real_sgid_index;
@@ -2176,6 +2046,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
{
struct ib_uobject *ibuobject;
struct ib_srq *ibsrq;
+ const struct ib_gid_attr *gid_attr = NULL;
struct ib_rwq_ind_table *rwq_ind_tbl;
enum ib_qp_type qp_type;
struct mlx4_ib_dev *dev;
@@ -2356,29 +2227,17 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
if (attr_mask & IB_QP_AV) {
u8 port_num = mlx4_is_bonded(dev->dev) ? 1 :
attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- union ib_gid gid;
- struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_IB};
u16 vlan = 0xffff;
u8 smac[ETH_ALEN];
- int status = 0;
int is_eth =
rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
if (is_eth) {
- int index =
- rdma_ah_read_grh(&attr->ah_attr)->sgid_index;
-
- status = ib_get_cached_gid(&dev->ib_dev, port_num,
- index, &gid, &gid_attr);
- if (!status) {
- vlan = rdma_vlan_dev_vlan_id(gid_attr.ndev);
- memcpy(smac, gid_attr.ndev->dev_addr, ETH_ALEN);
- dev_put(gid_attr.ndev);
- }
+ gid_attr = attr->ah_attr.grh.sgid_attr;
+ vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
+ memcpy(smac, gid_attr->ndev->dev_addr, ETH_ALEN);
}
- if (status)
- goto out;
if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
port_num, vlan, smac))
@@ -2389,7 +2248,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
if (is_eth &&
(cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR)) {
- u8 qpc_roce_mode = gid_type_to_qpc(gid_attr.gid_type);
+ u8 qpc_roce_mode = gid_type_to_qpc(gid_attr->gid_type);
if (qpc_roce_mode == MLX4_QPC_ROCE_MODE_UNDEFINED) {
err = -EINVAL;
@@ -2594,11 +2453,9 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
for (i = 0; i < qp->sq.wqe_cnt; ++i) {
ctrl = get_send_wqe(qp, i);
ctrl->owner_opcode = cpu_to_be32(1 << 31);
- if (qp->sq_max_wqes_per_wr == 1)
- ctrl->qpn_vlan.fence_size =
- 1 << (qp->sq.wqe_shift - 4);
-
- stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
+ ctrl->qpn_vlan.fence_size =
+ 1 << (qp->sq.wqe_shift - 4);
+ stamp_send_wqe(qp, i);
}
}
@@ -2937,7 +2794,7 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
}
static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
- struct ib_ud_wr *wr,
+ const struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
@@ -3085,7 +2942,7 @@ static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num,
}
#define MLX4_ROCEV2_QP1_SPORT 0xC000
-static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
+static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
struct ib_device *ib_dev = sqp->qp.ibqp.device;
@@ -3181,10 +3038,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
guid_cache[ah->av.ib.gid_index];
} else {
- ib_get_cached_gid(ib_dev,
- be32_to_cpu(ah->av.ib.port_pd) >> 24,
- ah->av.ib.gid_index,
- &sqp->ud_header.grh.source_gid, NULL);
+ sqp->ud_header.grh.source_gid =
+ ah->ibah.sgid_attr->gid;
}
}
memcpy(sqp->ud_header.grh.destination_gid.raw,
@@ -3369,7 +3224,7 @@ static __be32 convert_access(int acc)
}
static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
- struct ib_reg_wr *wr)
+ const struct ib_reg_wr *wr)
{
struct mlx4_ib_mr *mr = to_mmr(wr->mr);
@@ -3399,7 +3254,7 @@ static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
}
static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
- struct ib_atomic_wr *wr)
+ const struct ib_atomic_wr *wr)
{
if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->swap);
@@ -3415,7 +3270,7 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
}
static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
- struct ib_atomic_wr *wr)
+ const struct ib_atomic_wr *wr)
{
aseg->swap_add = cpu_to_be64(wr->swap);
aseg->swap_add_mask = cpu_to_be64(wr->swap_mask);
@@ -3424,7 +3279,7 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
}
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
- struct ib_ud_wr *wr)
+ const struct ib_ud_wr *wr)
{
memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
dseg->dqpn = cpu_to_be32(wr->remote_qpn);
@@ -3435,7 +3290,7 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
struct mlx4_wqe_datagram_seg *dseg,
- struct ib_ud_wr *wr,
+ const struct ib_ud_wr *wr,
enum mlx4_ib_qp_type qpt)
{
union mlx4_ext_av *av = &to_mah(wr->ah)->av;
@@ -3457,7 +3312,8 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
}
-static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len)
+static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe,
+ unsigned *mlx_seg_len)
{
struct mlx4_wqe_inline_seg *inl = wqe;
struct mlx4_ib_tunnel_header hdr;
@@ -3540,9 +3396,9 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
dseg->addr = cpu_to_be64(sg->addr);
}
-static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
- struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
- __be32 *lso_hdr_sz, __be32 *blh)
+static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe,
+ const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp,
+ unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh)
{
unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
@@ -3560,7 +3416,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
return 0;
}
-static __be32 send_ieth(struct ib_send_wr *wr)
+static __be32 send_ieth(const struct ib_send_wr *wr)
{
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
@@ -3582,8 +3438,8 @@ static void add_zero_len_inline(void *wqe)
inl->byte_count = cpu_to_be32(1 << 31);
}
-int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain)
{
struct mlx4_ib_qp *qp = to_mqp(ibqp);
void *wqe;
@@ -3593,7 +3449,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int nreq;
int err = 0;
unsigned ind;
- int uninitialized_var(stamp);
int uninitialized_var(size);
unsigned uninitialized_var(seglen);
__be32 dummy;
@@ -3623,7 +3478,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
spin_lock_irqsave(&qp->sq.lock, flags);
- if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
@@ -3865,22 +3721,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
(ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
- stamp = ind + qp->sq_spare_wqes;
- ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
-
/*
* We can improve latency by not stamping the last
* send queue WQE until after ringing the doorbell, so
* only stamp here if there are still more WQEs to post.
- *
- * Same optimization applies to padding with NOP wqe
- * in case of WQE shrinking (used to prevent wrap-around
- * in the middle of WR).
*/
- if (wr->next) {
- stamp_send_wqe(qp, stamp, size * 16);
- ind = pad_wraparound(qp, ind);
- }
+ if (wr->next)
+ stamp_send_wqe(qp, ind + qp->sq_spare_wqes);
+ ind++;
}
out:
@@ -3902,9 +3750,8 @@ out:
*/
mmiowb();
- stamp_send_wqe(qp, stamp, size * 16);
+ stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
- ind = pad_wraparound(qp, ind);
qp->sq_next_wqe = ind;
}
@@ -3913,8 +3760,14 @@ out:
return err;
}
-int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return _mlx4_ib_post_send(ibqp, wr, bad_wr, false);
+}
+
+static int _mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain)
{
struct mlx4_ib_qp *qp = to_mqp(ibqp);
struct mlx4_wqe_data_seg *scat;
@@ -3929,7 +3782,8 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
max_gs = qp->rq.max_gs;
spin_lock_irqsave(&qp->rq.lock, flags);
- if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
@@ -4000,6 +3854,12 @@ out:
return err;
}
+int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return _mlx4_ib_post_recv(ibqp, wr, bad_wr, false);
+}
+
static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
{
switch (mlx4_state) {
@@ -4047,9 +3907,9 @@ static void to_rdma_ah_attr(struct mlx4_ib_dev *ibdev,
u8 port_num = path->sched_queue & 0x40 ? 2 : 1;
memset(ah_attr, 0, sizeof(*ah_attr));
- ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num);
if (port_num == 0 || port_num > dev->caps.num_ports)
return;
+ ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE)
rdma_ah_set_sl(ah_attr, ((path->sched_queue >> 3) & 0x7) |
@@ -4465,3 +4325,132 @@ int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
kfree(ib_rwq_ind_tbl);
return 0;
}
+
+struct mlx4_ib_drain_cqe {
+ struct ib_cqe cqe;
+ struct completion done;
+};
+
+static void mlx4_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
+ struct mlx4_ib_drain_cqe,
+ cqe);
+
+ complete(&cqe->done);
+}
+
+/* This function returns only once the drained WR was completed */
+static void handle_drain_completion(struct ib_cq *cq,
+ struct mlx4_ib_drain_cqe *sdrain,
+ struct mlx4_ib_dev *dev)
+{
+ struct mlx4_dev *mdev = dev->dev;
+
+ if (cq->poll_ctx == IB_POLL_DIRECT) {
+ while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
+ ib_process_cq_direct(cq, -1);
+ return;
+ }
+
+ if (mdev->persist->state == MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ struct mlx4_ib_cq *mcq = to_mcq(cq);
+ bool triggered = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ /* Make sure that the CQ handler won't run if wasn't run yet */
+ if (!mcq->mcq.reset_notify_added)
+ mcq->mcq.reset_notify_added = 1;
+ else
+ triggered = true;
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ if (triggered) {
+ /* Wait for any scheduled/running task to be ended */
+ switch (cq->poll_ctx) {
+ case IB_POLL_SOFTIRQ:
+ irq_poll_disable(&cq->iop);
+ irq_poll_enable(&cq->iop);
+ break;
+ case IB_POLL_WORKQUEUE:
+ cancel_work_sync(&cq->work);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ }
+
+ /* Run the CQ handler - this makes sure that the drain WR will
+ * be processed if wasn't processed yet.
+ */
+ mcq->mcq.comp(&mcq->mcq);
+ }
+
+ wait_for_completion(&sdrain->done);
+}
+
+void mlx4_ib_drain_sq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->send_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx4_ib_drain_cqe sdrain;
+ const struct ib_send_wr *bad_swr;
+ struct ib_rdma_wr swr = {
+ .wr = {
+ .next = NULL,
+ { .wr_cqe = &sdrain.cqe, },
+ .opcode = IB_WR_RDMA_WRITE,
+ },
+ };
+ int ret;
+ struct mlx4_ib_dev *dev = to_mdev(qp->device);
+ struct mlx4_dev *mdev = dev->dev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ sdrain.cqe.done = mlx4_ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
+ ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &sdrain, dev);
+}
+
+void mlx4_ib_drain_rq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->recv_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx4_ib_drain_cqe rdrain;
+ struct ib_recv_wr rwr = {};
+ const struct ib_recv_wr *bad_rwr;
+ int ret;
+ struct mlx4_ib_dev *dev = to_mdev(qp->device);
+ struct mlx4_dev *mdev = dev->dev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = mlx4_ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
+ ret = _mlx4_ib_post_recv(qp, &rwr, &bad_rwr, true);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &rdrain, dev);
+}
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index ebee56cbc0e2..3731b31c3653 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -307,8 +307,8 @@ void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
spin_unlock(&srq->lock);
}
-int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx4_ib_srq *srq = to_msrq(ibsrq);
struct mlx4_wqe_srq_next_seg *next;
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index fb4d77be019b..0440966bc6ec 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,5 +1,5 @@
config MLX5_INFINIBAND
- tristate "Mellanox Connect-IB HCA support"
+ tristate "Mellanox 5th generation network adapters (ConnectX series) support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n
---help---
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index d42b922bede8..b8e4b15e2674 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o cong.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += flow.o
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index e6bde32a83f3..ffd03bf1a71e 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -37,7 +37,6 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
struct rdma_ah_attr *ah_attr)
{
enum ib_gid_type gid_type;
- int err;
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
@@ -53,18 +52,12 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
- err = mlx5_get_roce_gid_type(dev, ah_attr->port_num,
- ah_attr->grh.sgid_index,
- &gid_type);
- if (err)
- return ERR_PTR(err);
+ gid_type = ah_attr->grh.sgid_attr->gid_type;
memcpy(ah->av.rmac, ah_attr->roce.dmac,
sizeof(ah_attr->roce.dmac));
ah->av.udp_sport =
- mlx5_get_roce_udp_sport(dev,
- rdma_ah_get_port_num(ah_attr),
- rdma_ah_read_grh(ah_attr)->sgid_index);
+ mlx5_get_roce_udp_sport(dev, ah_attr->grh.sgid_attr);
ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
#define MLX5_ECN_ENABLED BIT(1)
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 188512bf46e6..c84fef9a8a08 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -32,6 +32,21 @@
#include "cmd.h"
+int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
+{
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
+ int err;
+
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *mkey = MLX5_GET(query_special_contexts_out, out,
+ dump_fill_mkey);
+ return err;
+}
+
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
{
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
@@ -170,3 +185,15 @@ int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length)
return err;
}
+
+int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
+{
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
+ return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPCNT,
+ 0, 0);
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index e7206c8a8011..88cbb1c41703 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -37,9 +37,11 @@
#include <linux/kernel.h>
#include <linux/mlx5/driver.h>
+int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out, int out_size);
+int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out);
int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
void *in, int in_size);
int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index 985fa2637390..7e4e358a4fd8 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -359,9 +359,6 @@ static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
int ret;
char lbuf[11];
- if (*pos)
- return 0;
-
ret = mlx5_ib_get_cc_params(param->dev, param->port_num, offset, &var);
if (ret)
return ret;
@@ -370,11 +367,7 @@ static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
if (ret < 0)
return ret;
- if (copy_to_user(buf, lbuf, ret))
- return -EFAULT;
-
- *pos += ret;
- return ret;
+ return simple_read_from_buffer(buf, count, pos, lbuf, ret);
}
static const struct file_operations dbg_cc_fops = {
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index ad39d64b8108..088205d7f1a1 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -1184,7 +1184,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
int err;
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
- return -ENOSYS;
+ return -EOPNOTSUPP;
if (cq_period > MLX5_MAX_CQ_PERIOD)
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
new file mode 100644
index 000000000000..ac116d63e466
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -0,0 +1,1119 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_umem.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_ib.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
+struct devx_obj {
+ struct mlx5_core_dev *mdev;
+ u32 obj_id;
+ u32 dinlen; /* destroy inbox length */
+ u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
+};
+
+struct devx_umem {
+ struct mlx5_core_dev *mdev;
+ struct ib_umem *umem;
+ u32 page_offset;
+ int page_shift;
+ int ncont;
+ u32 dinlen;
+ u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
+};
+
+struct devx_umem_reg_cmd {
+ void *in;
+ u32 inlen;
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+};
+
+static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
+{
+ return to_mucontext(ib_uverbs_get_ucontext(file));
+}
+
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+{
+ u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u64 general_obj_types;
+ void *hdr;
+ int err;
+
+ hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
+
+ general_obj_types = MLX5_CAP_GEN_64(dev->mdev, general_obj_types);
+ if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) ||
+ !(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
+ return -EINVAL;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
+
+ err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return 0;
+}
+
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
+
+ mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
+{
+ struct devx_obj *devx_obj = obj;
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_DESTROY_TIR:
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
+ obj_id);
+ return true;
+
+ case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
+ table_id);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+ u32 obj_id;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
+ break;
+ case MLX5_CMD_OP_QUERY_MKEY:
+ obj_id = MLX5_GET(query_mkey_in, in, mkey_index);
+ break;
+ case MLX5_CMD_OP_QUERY_CQ:
+ obj_id = MLX5_GET(query_cq_in, in, cqn);
+ break;
+ case MLX5_CMD_OP_MODIFY_CQ:
+ obj_id = MLX5_GET(modify_cq_in, in, cqn);
+ break;
+ case MLX5_CMD_OP_QUERY_SQ:
+ obj_id = MLX5_GET(query_sq_in, in, sqn);
+ break;
+ case MLX5_CMD_OP_MODIFY_SQ:
+ obj_id = MLX5_GET(modify_sq_in, in, sqn);
+ break;
+ case MLX5_CMD_OP_QUERY_RQ:
+ obj_id = MLX5_GET(query_rq_in, in, rqn);
+ break;
+ case MLX5_CMD_OP_MODIFY_RQ:
+ obj_id = MLX5_GET(modify_rq_in, in, rqn);
+ break;
+ case MLX5_CMD_OP_QUERY_RMP:
+ obj_id = MLX5_GET(query_rmp_in, in, rmpn);
+ break;
+ case MLX5_CMD_OP_MODIFY_RMP:
+ obj_id = MLX5_GET(modify_rmp_in, in, rmpn);
+ break;
+ case MLX5_CMD_OP_QUERY_RQT:
+ obj_id = MLX5_GET(query_rqt_in, in, rqtn);
+ break;
+ case MLX5_CMD_OP_MODIFY_RQT:
+ obj_id = MLX5_GET(modify_rqt_in, in, rqtn);
+ break;
+ case MLX5_CMD_OP_QUERY_TIR:
+ obj_id = MLX5_GET(query_tir_in, in, tirn);
+ break;
+ case MLX5_CMD_OP_MODIFY_TIR:
+ obj_id = MLX5_GET(modify_tir_in, in, tirn);
+ break;
+ case MLX5_CMD_OP_QUERY_TIS:
+ obj_id = MLX5_GET(query_tis_in, in, tisn);
+ break;
+ case MLX5_CMD_OP_MODIFY_TIS:
+ obj_id = MLX5_GET(modify_tis_in, in, tisn);
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ obj_id = MLX5_GET(query_flow_table_in, in, table_id);
+ break;
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ obj_id = MLX5_GET(modify_flow_table_in, in, table_id);
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ obj_id = MLX5_GET(query_flow_group_in, in, group_id);
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ obj_id = MLX5_GET(query_fte_in, in, flow_index);
+ break;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ obj_id = MLX5_GET(set_fte_in, in, flow_index);
+ break;
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id);
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id);
+ break;
+ case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
+ obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
+ break;
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ obj_id = MLX5_GET(query_scheduling_element_in, in,
+ scheduling_element_id);
+ break;
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ obj_id = MLX5_GET(modify_scheduling_element_in, in,
+ scheduling_element_id);
+ break;
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ break;
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index);
+ break;
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
+ break;
+ case MLX5_CMD_OP_QUERY_QP:
+ obj_id = MLX5_GET(query_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ obj_id = MLX5_GET(rst2init_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ obj_id = MLX5_GET(init2rtr_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ obj_id = MLX5_GET(rts2rts_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_2ERR_QP:
+ obj_id = MLX5_GET(qp_2err_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_2RST_QP:
+ obj_id = MLX5_GET(qp_2rst_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_QUERY_DCT:
+ obj_id = MLX5_GET(query_dct_in, in, dctn);
+ break;
+ case MLX5_CMD_OP_QUERY_XRQ:
+ obj_id = MLX5_GET(query_xrq_in, in, xrqn);
+ break;
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn);
+ break;
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn);
+ break;
+ case MLX5_CMD_OP_QUERY_SRQ:
+ obj_id = MLX5_GET(query_srq_in, in, srqn);
+ break;
+ case MLX5_CMD_OP_ARM_RQ:
+ obj_id = MLX5_GET(arm_rq_in, in, srq_number);
+ break;
+ case MLX5_CMD_OP_DRAIN_DCT:
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ obj_id = MLX5_GET(drain_dct_in, in, dctn);
+ break;
+ case MLX5_CMD_OP_ARM_XRQ:
+ obj_id = MLX5_GET(arm_xrq_in, in, xrqn);
+ break;
+ default:
+ return false;
+ }
+
+ if (obj_id == obj->obj_id)
+ return true;
+
+ return false;
+}
+
+static bool devx_is_obj_create_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ case MLX5_CMD_OP_CREATE_MKEY:
+ case MLX5_CMD_OP_CREATE_CQ:
+ case MLX5_CMD_OP_ALLOC_PD:
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_CREATE_RMP:
+ case MLX5_CMD_OP_CREATE_SQ:
+ case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_CREATE_RQT:
+ case MLX5_CMD_OP_CREATE_TIR:
+ case MLX5_CMD_OP_CREATE_TIS:
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_CREATE_QP:
+ case MLX5_CMD_OP_CREATE_SRQ:
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ case MLX5_CMD_OP_CREATE_DCT:
+ case MLX5_CMD_OP_CREATE_XRQ:
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ return true;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ {
+ u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
+ if (op_mod == 0)
+ return true;
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_obj_modify_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_MODIFY_CQ:
+ case MLX5_CMD_OP_MODIFY_RMP:
+ case MLX5_CMD_OP_MODIFY_SQ:
+ case MLX5_CMD_OP_MODIFY_RQ:
+ case MLX5_CMD_OP_MODIFY_RQT:
+ case MLX5_CMD_OP_MODIFY_TIR:
+ case MLX5_CMD_OP_MODIFY_TIS:
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_RST2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ case MLX5_CMD_OP_ARM_RQ:
+ case MLX5_CMD_OP_DRAIN_DCT:
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ case MLX5_CMD_OP_ARM_XRQ:
+ return true;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ {
+ u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
+
+ if (op_mod == 1)
+ return true;
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_obj_query_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_MKEY:
+ case MLX5_CMD_OP_QUERY_CQ:
+ case MLX5_CMD_OP_QUERY_RMP:
+ case MLX5_CMD_OP_QUERY_SQ:
+ case MLX5_CMD_OP_QUERY_RQ:
+ case MLX5_CMD_OP_QUERY_RQT:
+ case MLX5_CMD_OP_QUERY_TIR:
+ case MLX5_CMD_OP_QUERY_TIS:
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_QP:
+ case MLX5_CMD_OP_QUERY_SRQ:
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ case MLX5_CMD_OP_QUERY_DCT:
+ case MLX5_CMD_OP_QUERY_XRQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_general_cmd(void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_VPORT_STATE:
+ case MLX5_CMD_OP_QUERY_ADAPTER:
+ case MLX5_CMD_OP_QUERY_ISSI:
+ case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+ case MLX5_CMD_OP_QUERY_VNIC_ENV:
+ case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+ case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+ case MLX5_CMD_OP_NOP:
+ case MLX5_CMD_OP_QUERY_CONG_STATUS:
+ case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+ case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ int user_vector;
+ int dev_eqn;
+ unsigned int irqn;
+ int err;
+
+ if (uverbs_copy_from(&user_vector, attrs,
+ MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
+ return -EFAULT;
+
+ c = devx_ufile2uctx(file);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
+ if (err < 0)
+ return err;
+
+ if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
+ &dev_eqn, sizeof(dev_eqn)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ *Security note:
+ * The hardware protection mechanism works like this: Each device object that
+ * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
+ * the device specification manual) upon its creation. Then upon doorbell,
+ * hardware fetches the object context for which the doorbell was rang, and
+ * validates that the UAR through which the DB was rang matches the UAR ID
+ * of the object.
+ * If no match the doorbell is silently ignored by the hardware. Of course,
+ * the user cannot ring a doorbell on a UAR that was not mapped to it.
+ * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
+ * mailboxes (except tagging them with UID), we expose to the user its UAR
+ * ID, so it can embed it in these objects in the expected specification
+ * format. So the only thing the user can do is hurt itself by creating a
+ * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
+ * may ring a doorbell on its objects.
+ * The consequence of that will be that another user can schedule a QP/SQ
+ * of the buggy user for execution (just insert it to the hardware schedule
+ * queue or arm its CQ for event generation), no further harm is expected.
+ */
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ u32 user_idx;
+ s32 dev_idx;
+
+ c = devx_ufile2uctx(file);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ if (uverbs_copy_from(&user_idx, attrs,
+ MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
+ return -EFAULT;
+
+ dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
+ if (dev_idx < 0)
+ return dev_idx;
+
+ if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
+ &dev_idx, sizeof(dev_idx)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ void *cmd_in = uverbs_attr_get_alloced_ptr(
+ attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
+ void *cmd_out;
+ int err;
+
+ c = devx_ufile2uctx(file);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ /* Only white list of some general HCA commands are allowed for this method. */
+ if (!devx_is_general_cmd(cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(dev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
+ cmd_out_len);
+}
+
+static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
+ u32 *dinlen,
+ u32 *obj_id)
+{
+ u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
+ u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
+
+ switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
+ break;
+
+ case MLX5_CMD_OP_CREATE_MKEY:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ break;
+ case MLX5_CMD_OP_CREATE_CQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ break;
+ case MLX5_CMD_OP_ALLOC_PD:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ break;
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ break;
+ case MLX5_CMD_OP_CREATE_RMP:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ break;
+ case MLX5_CMD_OP_CREATE_SQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ break;
+ case MLX5_CMD_OP_CREATE_RQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ break;
+ case MLX5_CMD_OP_CREATE_RQT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ break;
+ case MLX5_CMD_OP_CREATE_TIR:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ break;
+ case MLX5_CMD_OP_CREATE_TIS:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ break;
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ break;
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
+ *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
+ MLX5_SET(destroy_flow_table_in, din, other_vport,
+ MLX5_GET(create_flow_table_in, in, other_vport));
+ MLX5_SET(destroy_flow_table_in, din, vport_number,
+ MLX5_GET(create_flow_table_in, in, vport_number));
+ MLX5_SET(destroy_flow_table_in, din, table_type,
+ MLX5_GET(create_flow_table_in, in, table_type));
+ MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ break;
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
+ *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
+ MLX5_SET(destroy_flow_group_in, din, other_vport,
+ MLX5_GET(create_flow_group_in, in, other_vport));
+ MLX5_SET(destroy_flow_group_in, din, vport_number,
+ MLX5_GET(create_flow_group_in, in, vport_number));
+ MLX5_SET(destroy_flow_group_in, din, table_type,
+ MLX5_GET(create_flow_group_in, in, table_type));
+ MLX5_SET(destroy_flow_group_in, din, table_id,
+ MLX5_GET(create_flow_group_in, in, table_id));
+ MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ break;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
+ *obj_id = MLX5_GET(set_fte_in, in, flow_index);
+ MLX5_SET(delete_fte_in, din, other_vport,
+ MLX5_GET(set_fte_in, in, other_vport));
+ MLX5_SET(delete_fte_in, din, vport_number,
+ MLX5_GET(set_fte_in, in, vport_number));
+ MLX5_SET(delete_fte_in, din, table_type,
+ MLX5_GET(set_fte_in, in, table_type));
+ MLX5_SET(delete_fte_in, din, table_id,
+ MLX5_GET(set_fte_in, in, table_id));
+ MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ break;
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+ break;
+ case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
+ break;
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+ break;
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
+ *obj_id = MLX5_GET(create_scheduling_element_out, out,
+ scheduling_element_id);
+ MLX5_SET(destroy_scheduling_element_in, din,
+ scheduling_hierarchy,
+ MLX5_GET(create_scheduling_element_in, in,
+ scheduling_hierarchy));
+ MLX5_SET(destroy_scheduling_element_in, din,
+ scheduling_element_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
+ break;
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
+ *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+ break;
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
+ *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
+ MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
+ break;
+ case MLX5_CMD_OP_CREATE_QP:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ break;
+ case MLX5_CMD_OP_CREATE_SRQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ break;
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ break;
+ case MLX5_CMD_OP_CREATE_DCT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ break;
+ case MLX5_CMD_OP_CREATE_XRQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
+ break;
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
+ MLX5_SET(detach_from_mcg_in, din, qpn,
+ MLX5_GET(attach_to_mcg_in, in, qpn));
+ memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
+ MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
+ MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ break;
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ break;
+ default:
+ /* The entry must match to one of the devx_is_obj_create_cmd */
+ WARN_ON(true);
+ break;
+ }
+}
+
+static int devx_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct devx_obj *obj = uobject->object;
+ int ret;
+
+ ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ kfree(obj);
+ return ret;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
+ void *cmd_out;
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
+ struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ struct devx_obj *obj;
+ int err;
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ if (!devx_is_obj_create_cmd(cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(dev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err)
+ goto obj_free;
+
+ uobj->object = obj;
+ obj->mdev = dev->mdev;
+ devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id);
+ WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
+ if (err)
+ goto obj_free;
+
+ return 0;
+
+obj_free:
+ kfree(obj);
+ return err;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
+ struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct devx_obj *obj = uobj->object;
+ void *cmd_out;
+ int err;
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ if (!devx_is_obj_modify_cmd(cmd_in))
+ return -EINVAL;
+
+ if (!devx_is_valid_obj_id(obj, cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(obj->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
+ cmd_out, cmd_out_len);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
+ struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct devx_obj *obj = uobj->object;
+ void *cmd_out;
+ int err;
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ if (!devx_is_obj_query_cmd(cmd_in))
+ return -EINVAL;
+
+ if (!devx_is_valid_obj_id(obj, cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(obj->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
+ cmd_out, cmd_out_len);
+}
+
+static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
+ struct uverbs_attr_bundle *attrs,
+ struct devx_umem *obj)
+{
+ u64 addr;
+ size_t size;
+ u32 access;
+ int npages;
+ int err;
+ u32 page_mask;
+
+ if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
+ uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
+ return -EFAULT;
+
+ err = uverbs_get_flags32(&access, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ IB_ACCESS_SUPPORTED);
+ if (err)
+ return err;
+
+ err = ib_check_mr_access(access);
+ if (err)
+ return err;
+
+ obj->umem = ib_umem_get(ucontext, addr, size, access, 0);
+ if (IS_ERR(obj->umem))
+ return PTR_ERR(obj->umem);
+
+ mlx5_ib_cont_pages(obj->umem, obj->umem->address,
+ MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
+ &obj->page_shift, &obj->ncont, NULL);
+
+ if (!npages) {
+ ib_umem_release(obj->umem);
+ return -EINVAL;
+ }
+
+ page_mask = (1 << obj->page_shift) - 1;
+ obj->page_offset = obj->umem->address & page_mask;
+
+ return 0;
+}
+
+static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
+ struct devx_umem *obj,
+ struct devx_umem_reg_cmd *cmd)
+{
+ cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
+ (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
+ cmd->in = uverbs_zalloc(attrs, cmd->inlen);
+ return PTR_ERR_OR_ZERO(cmd->in);
+}
+
+static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
+ struct devx_umem *obj,
+ struct devx_umem_reg_cmd *cmd)
+{
+ void *umem;
+ __be64 *mtt;
+
+ umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
+ mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd->in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd->in, obj_type, MLX5_OBJ_TYPE_UMEM);
+ MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
+ MLX5_SET(umem, umem, log_page_size, obj->page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(umem, umem, page_offset, obj->page_offset);
+ mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
+ (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
+ MLX5_IB_MTT_READ);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct devx_umem_reg_cmd cmd;
+ struct devx_umem *obj;
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
+ u32 obj_id;
+ struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ int err;
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
+ if (err)
+ goto err_obj_free;
+
+ err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
+ if (err)
+ goto err_umem_release;
+
+ devx_umem_reg_cmd_build(dev, obj, &cmd);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd.in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
+ sizeof(cmd.out));
+ if (err)
+ goto err_umem_release;
+
+ obj->mdev = dev->mdev;
+ uobj->object = obj;
+ devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
+ if (err)
+ goto err_umem_destroy;
+
+ return 0;
+
+err_umem_destroy:
+ mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
+err_umem_release:
+ ib_umem_release(obj->umem);
+err_obj_free:
+ kfree(obj);
+ return err;
+}
+
+static int devx_umem_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why)
+{
+ struct devx_umem *obj = uobject->object;
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ int err;
+
+ err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (ib_is_destroy_retryable(err, why, uobject))
+ return err;
+
+ ib_umem_release(obj->umem);
+ kfree(obj);
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_UMEM_REG,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ enum ib_access_flags),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DEVX_UMEM_DEREG,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_QUERY_EQN,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_QUERY_UAR,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OTHER,
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_WRITE,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
+
+DECLARE_UVERBS_OBJECT_TREE(devx_objects,
+ &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX),
+ &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ),
+ &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM));
+
+const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void)
+{
+ return &devx_objects;
+}
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
new file mode 100644
index 000000000000..1a29f47f836e
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_umem.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_ib.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
+ [MLX5_IB_FLOW_TYPE_NORMAL] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ .u.ptr = {
+ .len = sizeof(u16), /* data is priority */
+ .min_len = sizeof(u16),
+ }
+ },
+ [MLX5_IB_FLOW_TYPE_SNIFFER] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+ [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+ [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+};
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_flow_handler *flow_handler;
+ struct mlx5_ib_flow_matcher *fs_matcher;
+ void *devx_obj;
+ int dest_id, dest_type;
+ void *cmd_in;
+ int inlen;
+ bool dest_devx, dest_qp;
+ struct ib_qp *qp = NULL;
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
+ struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ dest_devx =
+ uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+ dest_qp = uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+
+ if ((dest_devx && dest_qp) || (!dest_devx && !dest_qp))
+ return -EINVAL;
+
+ if (dest_devx) {
+ devx_obj = uverbs_attr_get_obj(
+ attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+ if (IS_ERR(devx_obj))
+ return PTR_ERR(devx_obj);
+
+ /* Verify that the given DEVX object is a flow
+ * steering destination.
+ */
+ if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
+ return -EINVAL;
+ } else {
+ struct mlx5_ib_qp *mqp;
+
+ qp = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
+
+ if (qp->qp_type != IB_QPT_RAW_PACKET)
+ return -EINVAL;
+
+ mqp = to_mqp(qp);
+ if (mqp->flags & MLX5_IB_QP_RSS)
+ dest_id = mqp->rss_qp.tirn;
+ else
+ dest_id = mqp->raw_packet_qp.rq.tirn;
+ dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ }
+
+ if (dev->rep)
+ return -ENOTSUPP;
+
+ cmd_in = uverbs_attr_get_alloced_ptr(
+ attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
+ inlen = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
+ fs_matcher = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
+ flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, cmd_in, inlen,
+ dest_id, dest_type);
+ if (IS_ERR(flow_handler))
+ return PTR_ERR(flow_handler);
+
+ ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev);
+
+ return 0;
+}
+
+static int flow_matcher_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why)
+{
+ struct mlx5_ib_flow_matcher *obj = uobject->object;
+ int ret;
+
+ ret = ib_destroy_usecnt(&obj->usecnt, why, uobject);
+ if (ret)
+ return ret;
+
+ kfree(obj);
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
+ struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+ struct mlx5_ib_flow_matcher *obj;
+ int err;
+
+ obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ obj->mask_len = uverbs_attr_get_len(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
+ err = uverbs_copy_from(&obj->matcher_mask,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
+ if (err)
+ goto end;
+
+ obj->flow_type = uverbs_attr_get_enum_id(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
+
+ if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) {
+ err = uverbs_copy_from(&obj->priority,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
+ if (err)
+ goto end;
+ }
+
+ err = uverbs_copy_from(&obj->match_criteria_enable,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA);
+ if (err)
+ goto end;
+
+ uobj->object = obj;
+ obj->mdev = dev->mdev;
+ atomic_set(&obj->usecnt, 0);
+ return 0;
+
+end:
+ kfree(obj);
+ return err;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_CREATE_FLOW,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
+ UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_READ),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_READ));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DESTROY_FLOW,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(mlx5_ib_fs,
+ UVERBS_OBJECT_FLOW,
+ &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
+ UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
+ UA_MANDATORY),
+ UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
+ mlx5_ib_flow_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
+ UVERBS_ATTR_TYPE(u8),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
+
+DECLARE_UVERBS_OBJECT_TREE(flow_objects,
+ &UVERBS_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER));
+
+int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
+{
+ int i = 0;
+
+ root[i++] = &flow_objects;
+ root[i++] = &mlx5_ib_fs;
+
+ return i;
+}
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 79e6309460dc..4950df3f71b6 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -477,8 +477,8 @@ static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
return gsi->tx_qps[qp_index];
}
-int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
struct ib_qp *tx_qp;
@@ -522,8 +522,8 @@ err:
return ret;
}
-int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e3e330f59c2c..c414f3809e5c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -419,8 +419,8 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width);
- props->port_cap_flags |= IB_PORT_CM_SUP;
- props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
+ props->port_cap_flags |= IB_PORT_CM_SUP;
+ props->ip_gids = true;
props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
roce_address_table_size);
@@ -510,12 +510,11 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
vlan_id, port_num);
}
-static int mlx5_ib_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr,
+static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
__always_unused void **context)
{
return set_roce_addr(to_mdev(attr->device), attr->port_num,
- attr->index, gid, attr);
+ attr->index, &attr->gid, attr);
}
static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
@@ -525,41 +524,15 @@ static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
attr->index, NULL, NULL);
}
-__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
- int index)
+__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr)
{
- struct ib_gid_attr attr;
- union ib_gid gid;
-
- if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
- return 0;
-
- dev_put(attr.ndev);
-
- if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
+ if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
return 0;
return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
}
-int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
- int index, enum ib_gid_type *gid_type)
-{
- struct ib_gid_attr attr;
- union ib_gid gid;
- int ret;
-
- ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
- if (ret)
- return ret;
-
- dev_put(attr.ndev);
-
- *gid_type = attr.gid_type;
-
- return 0;
-}
-
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
@@ -915,7 +888,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
sizeof(struct mlx5_wqe_raddr_seg)) /
sizeof(struct mlx5_wqe_data_seg);
- props->max_sge = min(max_rq_sg, max_sq_sg);
+ props->max_send_sge = max_sq_sg;
+ props->max_recv_sge = max_rq_sg;
props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
@@ -1246,7 +1220,6 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
props->qkey_viol_cntr = rep->qkey_violation_counter;
props->subnet_timeout = rep->subnet_timeout;
props->init_type_reply = rep->init_type_reply;
- props->grh_required = rep->grh_required;
err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
if (err)
@@ -1585,31 +1558,26 @@ error:
return err;
}
-static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+static void deallocate_uars(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context)
{
struct mlx5_bfreg_info *bfregi;
- int err;
int i;
bfregi = &context->bfregi;
- for (i = 0; i < bfregi->num_sys_pages; i++) {
+ for (i = 0; i < bfregi->num_sys_pages; i++)
if (i < bfregi->num_static_sys_pages ||
- bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) {
- err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
- if (err) {
- mlx5_ib_warn(dev, "failed to free uar %d, err=%d\n", i, err);
- return err;
- }
- }
- }
-
- return 0;
+ bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
+ mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
}
static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
{
int err;
+ if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
+ return 0;
+
err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
if (err)
return err;
@@ -1631,6 +1599,9 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
{
+ if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
+ return;
+
mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
@@ -1660,6 +1631,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
int err;
size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
max_cqe_version);
+ u32 dump_fill_mkey;
bool lib_uar_4k;
if (!dev->ib_active)
@@ -1676,8 +1648,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err)
return ERR_PTR(err);
- if (req.flags)
- return ERR_PTR(-EINVAL);
+ if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
+ return ERR_PTR(-EOPNOTSUPP);
if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
return ERR_PTR(-EOPNOTSUPP);
@@ -1755,10 +1727,26 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
#endif
- if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
- err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
+ err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
+ if (err)
+ goto out_uars;
+
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
+ /* Block DEVX on Infiniband as of SELinux */
+ if (mlx5_ib_port_link_layer(ibdev, 1) != IB_LINK_LAYER_ETHERNET) {
+ err = -EPERM;
+ goto out_td;
+ }
+
+ err = mlx5_ib_devx_create(dev, context);
if (err)
- goto out_uars;
+ goto out_td;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
+ err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
+ if (err)
+ goto out_mdev;
}
INIT_LIST_HEAD(&context->vma_private_list);
@@ -1819,9 +1807,18 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.num_dyn_bfregs);
}
+ if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
+ if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
+ resp.dump_fill_mkey = dump_fill_mkey;
+ resp.comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
+ }
+ resp.response_length += sizeof(resp.dump_fill_mkey);
+ }
+
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
- goto out_td;
+ goto out_mdev;
bfregi->ver = ver;
bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
@@ -1831,9 +1828,11 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
return &context->ibucontext;
+out_mdev:
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
+ mlx5_ib_devx_destroy(dev, context);
out_td:
- if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
- mlx5_ib_dealloc_transport_domain(dev, context->tdn);
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn);
out_uars:
deallocate_uars(dev, context);
@@ -1856,9 +1855,11 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_bfreg_info *bfregi;
+ if (context->devx_uid)
+ mlx5_ib_devx_destroy(dev, context);
+
bfregi = &context->bfregi;
- if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
- mlx5_ib_dealloc_transport_domain(dev, context->tdn);
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn);
deallocate_uars(dev, context);
kfree(bfregi->sys_pages);
@@ -2040,7 +2041,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct mlx5_bfreg_info *bfregi = &context->bfregi;
int err;
unsigned long idx;
- phys_addr_t pfn, pa;
+ phys_addr_t pfn;
pgprot_t prot;
u32 bfreg_dyn_idx = 0;
u32 uar_index;
@@ -2131,8 +2132,6 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
goto err;
}
- pa = pfn << PAGE_SHIFT;
-
err = mlx5_ib_set_vma_data(vma, context);
if (err)
goto err;
@@ -2699,7 +2698,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
IPPROTO_GRE);
MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
- 0xffff);
+ ntohs(ib_spec->gre.mask.protocol));
MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
ntohs(ib_spec->gre.val.protocol));
@@ -2979,11 +2978,11 @@ static void counters_clear_description(struct ib_counters *counters)
static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
{
- struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
struct mlx5_ib_flow_handler *handler = container_of(flow_id,
struct mlx5_ib_flow_handler,
ibflow);
struct mlx5_ib_flow_handler *iter, *tmp;
+ struct mlx5_ib_dev *dev = handler->dev;
mutex_lock(&dev->flow_db->lock);
@@ -3001,6 +3000,8 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
counters_clear_description(handler->ibcounters);
mutex_unlock(&dev->flow_db->lock);
+ if (handler->flow_matcher)
+ atomic_dec(&handler->flow_matcher->usecnt);
kfree(handler);
return 0;
@@ -3021,6 +3022,26 @@ enum flow_table_type {
#define MLX5_FS_MAX_TYPES 6
#define MLX5_FS_MAX_ENTRIES BIT(16)
+
+static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
+ struct mlx5_ib_flow_prio *prio,
+ int priority,
+ int num_entries, int num_groups)
+{
+ struct mlx5_flow_table *ft;
+
+ ft = mlx5_create_auto_grouped_flow_table(ns, priority,
+ num_entries,
+ num_groups,
+ 0, 0);
+ if (IS_ERR(ft))
+ return ERR_CAST(ft);
+
+ prio->flow_table = ft;
+ prio->refcount = 0;
+ return prio;
+}
+
static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
struct ib_flow_attr *flow_attr,
enum flow_table_type ft_type)
@@ -3033,7 +3054,6 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
int num_entries;
int num_groups;
int priority;
- int err = 0;
max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
log_max_ft_size));
@@ -3083,21 +3103,10 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
return ERR_PTR(-ENOMEM);
ft = prio->flow_table;
- if (!ft) {
- ft = mlx5_create_auto_grouped_flow_table(ns, priority,
- num_entries,
- num_groups,
- 0, 0);
-
- if (!IS_ERR(ft)) {
- prio->refcount = 0;
- prio->flow_table = ft;
- } else {
- err = PTR_ERR(ft);
- }
- }
+ if (!ft)
+ return _get_prio(ns, prio, priority, num_entries, num_groups);
- return err ? ERR_PTR(err) : prio;
+ return prio;
}
static void set_underlay_qp(struct mlx5_ib_dev *dev,
@@ -3356,6 +3365,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
ft_prio->refcount++;
handler->prio = ft_prio;
+ handler->dev = dev;
ft_prio->flow_table = ft;
free:
@@ -3648,6 +3658,189 @@ free_ucmd:
return ERR_PTR(err);
}
+static struct mlx5_ib_flow_prio *_get_flow_table(struct mlx5_ib_dev *dev,
+ int priority, bool mcast)
+{
+ int max_table_size;
+ struct mlx5_flow_namespace *ns = NULL;
+ struct mlx5_ib_flow_prio *prio;
+
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ log_max_ft_size));
+ if (max_table_size < MLX5_FS_MAX_ENTRIES)
+ return ERR_PTR(-ENOMEM);
+
+ if (mcast)
+ priority = MLX5_IB_FLOW_MCAST_PRIO;
+ else
+ priority = ib_prio_to_core_prio(priority, false);
+
+ ns = mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS);
+ if (!ns)
+ return ERR_PTR(-ENOTSUPP);
+
+ prio = &dev->flow_db->prios[priority];
+
+ if (prio->flow_table)
+ return prio;
+
+ return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
+ MLX5_FS_MAX_TYPES);
+}
+
+static struct mlx5_ib_flow_handler *
+_create_raw_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ struct mlx5_flow_destination *dst,
+ struct mlx5_ib_flow_matcher *fs_matcher,
+ void *cmd_in, int inlen)
+{
+ struct mlx5_ib_flow_handler *handler;
+ struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft = ft_prio->flow_table;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler || !spec) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ INIT_LIST_HEAD(&handler->list);
+
+ memcpy(spec->match_value, cmd_in, inlen);
+ memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
+ fs_matcher->mask_len);
+ spec->match_criteria_enable = fs_matcher->match_criteria_enable;
+
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ handler->rule = mlx5_add_flow_rules(ft, spec,
+ &flow_act, dst, 1);
+
+ if (IS_ERR(handler->rule)) {
+ err = PTR_ERR(handler->rule);
+ goto free;
+ }
+
+ ft_prio->refcount++;
+ handler->prio = ft_prio;
+ handler->dev = dev;
+ ft_prio->flow_table = ft;
+
+free:
+ if (err)
+ kfree(handler);
+ kvfree(spec);
+ return err ? ERR_PTR(err) : handler;
+}
+
+static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
+ void *match_v)
+{
+ void *match_c;
+ void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
+ void *dmac, *dmac_mask;
+ void *ipv4, *ipv4_mask;
+
+ if (!(fs_matcher->match_criteria_enable &
+ (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
+ return false;
+
+ match_c = fs_matcher->matcher_mask.match_params;
+ match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+
+ dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
+ dmac_47_16);
+ dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
+ dmac_47_16);
+
+ if (is_multicast_ether_addr(dmac) &&
+ is_multicast_ether_addr(dmac_mask))
+ return true;
+
+ ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+
+ ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+
+ if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
+ ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
+ return true;
+
+ return false;
+}
+
+struct mlx5_ib_flow_handler *
+mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_matcher *fs_matcher,
+ void *cmd_in, int inlen, int dest_id,
+ int dest_type)
+{
+ struct mlx5_flow_destination *dst;
+ struct mlx5_ib_flow_prio *ft_prio;
+ int priority = fs_matcher->priority;
+ struct mlx5_ib_flow_handler *handler;
+ bool mcast;
+ int err;
+
+ if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
+ return ERR_PTR(-ENOMEM);
+
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst)
+ return ERR_PTR(-ENOMEM);
+
+ mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
+ mutex_lock(&dev->flow_db->lock);
+
+ ft_prio = _get_flow_table(dev, priority, mcast);
+ if (IS_ERR(ft_prio)) {
+ err = PTR_ERR(ft_prio);
+ goto unlock;
+ }
+
+ if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
+ dst->type = dest_type;
+ dst->tir_num = dest_id;
+ } else {
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
+ dst->ft_num = dest_id;
+ }
+
+ handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, cmd_in,
+ inlen);
+
+ if (IS_ERR(handler)) {
+ err = PTR_ERR(handler);
+ goto destroy_ft;
+ }
+
+ mutex_unlock(&dev->flow_db->lock);
+ atomic_inc(&fs_matcher->usecnt);
+ handler->flow_matcher = fs_matcher;
+
+ kfree(dst);
+
+ return handler;
+
+destroy_ft:
+ put_flow_table(dev, ft_prio, false);
+unlock:
+ mutex_unlock(&dev->flow_db->lock);
+ kfree(dst);
+
+ return ERR_PTR(err);
+}
+
static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
{
u32 flags = 0;
@@ -3672,12 +3865,11 @@ mlx5_ib_create_flow_action_esp(struct ib_device *device,
u64 flags;
int err = 0;
- if (IS_UVERBS_COPY_ERR(uverbs_copy_from(&action_flags, attrs,
- MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS)))
- return ERR_PTR(-EFAULT);
-
- if (action_flags >= (MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1))
- return ERR_PTR(-EOPNOTSUPP);
+ err = uverbs_get_flags64(
+ &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
+ ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
+ if (err)
+ return ERR_PTR(err);
flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
@@ -4466,7 +4658,8 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
cancel_work_sync(&devr->ports[port].pkey_change_work);
}
-static u32 get_core_cap_flags(struct ib_device *ibdev)
+static u32 get_core_cap_flags(struct ib_device *ibdev,
+ struct mlx5_hca_vport_context *rep)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
@@ -4475,11 +4668,14 @@ static u32 get_core_cap_flags(struct ib_device *ibdev)
bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
u32 ret = 0;
+ if (rep->grh_required)
+ ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
+
if (ll == IB_LINK_LAYER_INFINIBAND)
- return RDMA_CORE_PORT_IBA_IB;
+ return ret | RDMA_CORE_PORT_IBA_IB;
if (raw_support)
- ret = RDMA_CORE_PORT_RAW_PACKET;
+ ret |= RDMA_CORE_PORT_RAW_PACKET;
if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
return ret;
@@ -4502,17 +4698,23 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr attr;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
+ struct mlx5_hca_vport_context rep = {0};
int err;
- immutable->core_cap_flags = get_core_cap_flags(ibdev);
-
err = ib_query_port(ibdev, port_num, &attr);
if (err)
return err;
+ if (ll == IB_LINK_LAYER_INFINIBAND) {
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
+ &rep);
+ if (err)
+ return err;
+ }
+
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = get_core_cap_flags(ibdev);
+ immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
@@ -4610,7 +4812,7 @@ static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
}
}
-static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
{
int err;
@@ -4689,12 +4891,21 @@ static const struct mlx5_ib_counter extended_err_cnts[] = {
INIT_Q_COUNTER(req_cqe_flush_error),
};
+#define INIT_EXT_PPCNT_COUNTER(_name) \
+ { .name = #_name, .offset = \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
+
+static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
+ INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
+};
+
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
int i;
for (i = 0; i < dev->num_ports; i++) {
- if (dev->port[i].cnts.set_id)
+ if (dev->port[i].cnts.set_id_valid)
mlx5_core_dealloc_q_counter(dev->mdev,
dev->port[i].cnts.set_id);
kfree(dev->port[i].cnts.names);
@@ -4724,7 +4935,10 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
num_counters += ARRAY_SIZE(cong_cnts);
}
-
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
+ num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
+ }
cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
if (!cnts->names)
return -ENOMEM;
@@ -4781,6 +4995,13 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
offsets[j] = cong_cnts[i].offset;
}
}
+
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
+ names[j] = ext_ppcnt_cnts[i].name;
+ offsets[j] = ext_ppcnt_cnts[i].offset;
+ }
+ }
}
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
@@ -4826,7 +5047,8 @@ static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
return rdma_alloc_hw_stats_struct(port->cnts.names,
port->cnts.num_q_counters +
- port->cnts.num_cong_counters,
+ port->cnts.num_cong_counters +
+ port->cnts.num_ext_ppcnt_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -4859,6 +5081,34 @@ free:
return ret;
}
+static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_port *port,
+ struct rdma_hw_stats *stats)
+{
+ int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters;
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int ret, i;
+ void *out;
+
+ out = kvzalloc(sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
+ if (ret)
+ goto free;
+
+ for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) {
+ stats->value[i + offset] =
+ be64_to_cpup((__be64 *)(out +
+ port->cnts.offsets[i + offset]));
+ }
+
+free:
+ kvfree(out);
+ return ret;
+}
+
static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u8 port_num, int index)
@@ -4872,13 +5122,21 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
if (!stats)
return -EINVAL;
- num_counters = port->cnts.num_q_counters + port->cnts.num_cong_counters;
+ num_counters = port->cnts.num_q_counters +
+ port->cnts.num_cong_counters +
+ port->cnts.num_ext_ppcnt_counters;
/* q_counters are per IB device, query the master mdev */
ret = mlx5_ib_query_q_counters(dev->mdev, port, stats);
if (ret)
return ret;
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ ret = mlx5_ib_query_ext_ppcnt_counters(dev, port, stats);
+ if (ret)
+ return ret;
+ }
+
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
&mdev_port_num);
@@ -4905,11 +5163,6 @@ done:
return num_counters;
}
-static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
-{
- return mlx5_rdma_netdev_free(netdev);
-}
-
static struct net_device*
mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
u8 port_num,
@@ -4919,17 +5172,12 @@ mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
void (*setup)(struct net_device *))
{
struct net_device *netdev;
- struct rdma_netdev *rn;
if (type != RDMA_NETDEV_IPOIB)
return ERR_PTR(-EOPNOTSUPP);
netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
name, setup);
- if (likely(!IS_ERR_OR_NULL(netdev))) {
- rn = netdev_priv(netdev);
- rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev;
- }
return netdev;
}
@@ -5127,8 +5375,8 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
spin_lock(&ibdev->port[port_num].mp.mpi_lock);
if (ibdev->port[port_num].mp.mpi) {
- mlx5_ib_warn(ibdev, "port %d already affiliated.\n",
- port_num + 1);
+ mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
+ port_num + 1);
spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
return false;
}
@@ -5263,45 +5511,47 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
mlx5_nic_vport_disable_roce(dev->mdev);
}
-ADD_UVERBS_ATTRIBUTES_SIMPLE(mlx5_ib_dm, UVERBS_OBJECT_DM,
- UVERBS_METHOD_DM_ALLOC,
- &UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
- UVERBS_ATTR_TYPE(u16),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-ADD_UVERBS_ATTRIBUTES_SIMPLE(mlx5_ib_flow_action, UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
- &UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_dm,
+ UVERBS_OBJECT_DM,
+ UVERBS_METHOD_DM_ALLOC,
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY));
+
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_flow_action,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
+ enum mlx5_ib_uapi_flow_action_flags));
-#define NUM_TREES 2
static int populate_specs_root(struct mlx5_ib_dev *dev)
{
- const struct uverbs_object_tree_def *default_root[NUM_TREES + 1] = {
- uverbs_default_get_objects()};
- size_t num_trees = 1;
+ const struct uverbs_object_tree_def **trees = dev->driver_trees;
+ size_t num_trees = 0;
- if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
- !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
- default_root[num_trees++] = &mlx5_ib_flow_action;
+ if (mlx5_accel_ipsec_device_caps(dev->mdev) &
+ MLX5_ACCEL_IPSEC_CAP_DEVICE)
+ trees[num_trees++] = &mlx5_ib_flow_action;
- if (MLX5_CAP_DEV_MEM(dev->mdev, memic) &&
- !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
- default_root[num_trees++] = &mlx5_ib_dm;
+ if (MLX5_CAP_DEV_MEM(dev->mdev, memic))
+ trees[num_trees++] = &mlx5_ib_dm;
- dev->ib_dev.specs_root =
- uverbs_alloc_spec_tree(num_trees, default_root);
+ if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_UCTX)
+ trees[num_trees++] = mlx5_ib_get_devx_tree();
- return PTR_ERR_OR_ZERO(dev->ib_dev.specs_root);
-}
+ num_trees += mlx5_ib_get_flow_trees(trees + num_trees);
-static void depopulate_specs_root(struct mlx5_ib_dev *dev)
-{
- uverbs_free_spec_tree(dev->ib_dev.specs_root);
+ WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees));
+ trees[num_trees] = NULL;
+ dev->ib_dev.driver_specs = trees;
+
+ return 0;
}
static int mlx5_ib_read_counters(struct ib_counters *counters,
@@ -5552,6 +5802,8 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
dev->ib_dev.query_qp = mlx5_ib_query_qp;
dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
+ dev->ib_dev.drain_sq = mlx5_ib_drain_sq;
+ dev->ib_dev.drain_rq = mlx5_ib_drain_rq;
dev->ib_dev.post_send = mlx5_ib_post_send;
dev->ib_dev.post_recv = mlx5_ib_post_recv;
dev->ib_dev.create_cq = mlx5_ib_create_cq;
@@ -5649,9 +5901,9 @@ int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
return 0;
}
-static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev,
- u8 port_num)
+static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
{
+ u8 port_num;
int i;
for (i = 0; i < dev->num_ports; i++) {
@@ -5674,6 +5926,8 @@ static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev,
(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+
return mlx5_add_netdev_notifier(dev, port_num);
}
@@ -5690,14 +5944,12 @@ int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
enum rdma_link_layer ll;
int port_type_cap;
int err = 0;
- u8 port_num;
- port_num = mlx5_core_native_port_num(dev->mdev) - 1;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET)
- err = mlx5_ib_stage_common_roce_init(dev, port_num);
+ err = mlx5_ib_stage_common_roce_init(dev);
return err;
}
@@ -5712,19 +5964,17 @@ static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
int port_type_cap;
- u8 port_num;
int err;
- port_num = mlx5_core_native_port_num(dev->mdev) - 1;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) {
- err = mlx5_ib_stage_common_roce_init(dev, port_num);
+ err = mlx5_ib_stage_common_roce_init(dev);
if (err)
return err;
- err = mlx5_enable_eth(dev, port_num);
+ err = mlx5_enable_eth(dev);
if (err)
goto cleanup;
}
@@ -5741,9 +5991,7 @@ static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
int port_type_cap;
- u8 port_num;
- port_num = mlx5_core_native_port_num(dev->mdev) - 1;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
@@ -5842,11 +6090,6 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
return ib_register_device(&dev->ib_dev, NULL);
}
-static void mlx5_ib_stage_depopulate_specs(struct mlx5_ib_dev *dev)
-{
- depopulate_specs_root(dev);
-}
-
void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
{
destroy_umrc_res(dev);
@@ -5915,8 +6158,6 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
ib_dealloc_device((struct ib_device *)dev);
}
-static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
-
void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile)
{
@@ -5983,7 +6224,7 @@ static const struct mlx5_ib_profile pf_profile = {
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_SPECS,
mlx5_ib_stage_populate_specs,
- mlx5_ib_stage_depopulate_specs),
+ NULL),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup),
@@ -6031,7 +6272,7 @@ static const struct mlx5_ib_profile nic_rep_profile = {
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_SPECS,
mlx5_ib_stage_populate_specs,
- mlx5_ib_stage_depopulate_specs),
+ NULL),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup),
@@ -6046,7 +6287,7 @@ static const struct mlx5_ib_profile nic_rep_profile = {
mlx5_ib_stage_rep_reg_cleanup),
};
-static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
+static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_multiport_info *mpi;
struct mlx5_ib_dev *dev;
@@ -6080,8 +6321,6 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
if (!bound) {
list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
- } else {
- mlx5_ib_dbg(dev, "bound port %u\n", port_num + 1);
}
mutex_unlock(&mlx5_ib_multiport_mutex);
@@ -6099,11 +6338,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
- if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) {
- u8 port_num = mlx5_core_native_port_num(mdev) - 1;
-
- return mlx5_ib_add_slave_port(mdev, port_num);
- }
+ if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
+ return mlx5_ib_add_slave_port(mdev);
dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
if (!dev)
@@ -6113,7 +6349,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
MLX5_CAP_GEN(mdev, num_vhca_ports));
- if (MLX5_VPORT_MANAGER(mdev) &&
+ if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d89c8fe626f6..320d4dfe8c2f 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -46,6 +46,7 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/mlx5-abi.h>
#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
#define mlx5_ib_dbg(dev, format, arg...) \
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
@@ -78,12 +79,6 @@ enum {
MLX5_REQ_SCAT_DATA64_CQE = 0x22,
};
-enum mlx5_ib_latency_class {
- MLX5_IB_LATENCY_CLASS_LOW,
- MLX5_IB_LATENCY_CLASS_MEDIUM,
- MLX5_IB_LATENCY_CLASS_HIGH,
-};
-
enum mlx5_ib_mad_ifc_flags {
MLX5_MAD_IFC_IGNORE_MKEY = 1,
MLX5_MAD_IFC_IGNORE_BKEY = 2,
@@ -143,6 +138,7 @@ struct mlx5_ib_ucontext {
u64 lib_caps;
DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
+ u16 devx_uid;
};
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -176,6 +172,18 @@ struct mlx5_ib_flow_handler {
struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_handle *rule;
struct ib_counters *ibcounters;
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_flow_matcher *flow_matcher;
+};
+
+struct mlx5_ib_flow_matcher {
+ struct mlx5_ib_match_params matcher_mask;
+ int mask_len;
+ enum mlx5_ib_flow_type flow_type;
+ u16 priority;
+ struct mlx5_core_dev *mdev;
+ atomic_t usecnt;
+ u8 match_criteria_enable;
};
struct mlx5_ib_flow_db {
@@ -461,7 +469,7 @@ struct mlx5_umr_wr {
u32 mkey;
};
-static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
+static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
{
return container_of(wr, struct mlx5_umr_wr, wr);
}
@@ -665,6 +673,7 @@ struct mlx5_ib_counters {
size_t *offsets;
u32 num_q_counters;
u32 num_cong_counters;
+ u32 num_ext_ppcnt_counters;
u16 set_id;
bool set_id_valid;
};
@@ -851,6 +860,7 @@ to_mcounters(struct ib_counters *ibcntrs)
struct mlx5_ib_dev {
struct ib_device ib_dev;
+ const struct uverbs_object_tree_def *driver_trees[6];
struct mlx5_core_dev *mdev;
struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
@@ -1004,8 +1014,8 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
int mlx5_ib_destroy_srq(struct ib_srq *srq);
-int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
@@ -1014,10 +1024,12 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
int mlx5_ib_destroy_qp(struct ib_qp *qp);
-int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+void mlx5_ib_drain_sq(struct ib_qp *qp);
+void mlx5_ib_drain_rq(struct ib_qp *qp);
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
void *buffer, u32 length,
@@ -1183,10 +1195,8 @@ int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
u64 guid, int type);
-__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
- int index);
-int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
- int index, enum ib_gid_type *gid_type);
+__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr);
void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
@@ -1200,10 +1210,10 @@ int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
@@ -1217,6 +1227,36 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num);
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context);
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context);
+const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
+struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
+ struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
+ void *cmd_in, int inlen, int dest_id, int dest_type);
+bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
+int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
+#else
+static inline int
+mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; };
+static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context) {}
+static inline const struct uverbs_object_tree_def *
+mlx5_ib_get_devx_tree(void) { return NULL; }
+static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
+ int *dest_type)
+{
+ return false;
+}
+static inline int
+mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
+{
+ return 0;
+}
+#endif
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -1318,4 +1358,7 @@ static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
unsigned long mlx5_ib_get_xlt_emergency_page(void);
void mlx5_ib_put_xlt_emergency_page(void);
+int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, u32 bfregn,
+ bool dyn_bfreg);
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 90a9c461cedc..9fb1d9cb9401 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -271,16 +271,16 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
{
struct mlx5_cache_ent *ent = filp->private_data;
struct mlx5_ib_dev *dev = ent->dev;
- char lbuf[20];
+ char lbuf[20] = {0};
u32 var;
int err;
int c;
- if (copy_from_user(lbuf, buf, sizeof(lbuf)))
+ count = min(count, sizeof(lbuf) - 1);
+ if (copy_from_user(lbuf, buf, count))
return -EFAULT;
c = order2idx(dev, ent->order);
- lbuf[sizeof(lbuf) - 1] = 0;
if (sscanf(lbuf, "%u", &var) != 1)
return -EINVAL;
@@ -310,19 +310,11 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- if (*pos)
- return 0;
-
err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
if (err < 0)
return err;
- if (copy_to_user(buf, lbuf, err))
- return -EFAULT;
-
- *pos += err;
-
- return err;
+ return simple_read_from_buffer(buf, count, pos, lbuf, err);
}
static const struct file_operations size_fops = {
@@ -337,16 +329,16 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
{
struct mlx5_cache_ent *ent = filp->private_data;
struct mlx5_ib_dev *dev = ent->dev;
- char lbuf[20];
+ char lbuf[20] = {0};
u32 var;
int err;
int c;
- if (copy_from_user(lbuf, buf, sizeof(lbuf)))
+ count = min(count, sizeof(lbuf) - 1);
+ if (copy_from_user(lbuf, buf, count))
return -EFAULT;
c = order2idx(dev, ent->order);
- lbuf[sizeof(lbuf) - 1] = 0;
if (sscanf(lbuf, "%u", &var) != 1)
return -EINVAL;
@@ -372,19 +364,11 @@ static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- if (*pos)
- return 0;
-
err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
if (err < 0)
return err;
- if (copy_to_user(buf, lbuf, err))
- return -EFAULT;
-
- *pos += err;
-
- return err;
+ return simple_read_from_buffer(buf, count, pos, lbuf, err);
}
static const struct file_operations limit_fops = {
@@ -914,7 +898,7 @@ static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
struct mlx5_umr_wr *umrwr)
{
struct umr_common *umrc = &dev->umrc;
- struct ib_send_wr *bad;
+ const struct ib_send_wr *bad;
int err;
struct mlx5_ib_umr_context umr_context;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index f1a87a690a4c..d216e0d2921d 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -488,7 +488,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
down_read(&ctx->umem_rwsem);
rbt_ib_umem_for_each_in_range(&ctx->umem_tree, 0, ULLONG_MAX,
- mr_leaf_free, imr);
+ mr_leaf_free, true, imr);
up_read(&ctx->umem_rwsem);
wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index a4f1f638509f..6cba2a02d11b 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -563,32 +563,21 @@ static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
}
static int alloc_bfreg(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi,
- enum mlx5_ib_latency_class lat)
+ struct mlx5_bfreg_info *bfregi)
{
- int bfregn = -EINVAL;
+ int bfregn = -ENOMEM;
mutex_lock(&bfregi->lock);
- switch (lat) {
- case MLX5_IB_LATENCY_CLASS_LOW:
+ if (bfregi->ver >= 2) {
+ bfregn = alloc_high_class_bfreg(dev, bfregi);
+ if (bfregn < 0)
+ bfregn = alloc_med_class_bfreg(dev, bfregi);
+ }
+
+ if (bfregn < 0) {
BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
bfregn = 0;
bfregi->count[bfregn]++;
- break;
-
- case MLX5_IB_LATENCY_CLASS_MEDIUM:
- if (bfregi->ver < 2)
- bfregn = -ENOMEM;
- else
- bfregn = alloc_med_class_bfreg(dev, bfregi);
- break;
-
- case MLX5_IB_LATENCY_CLASS_HIGH:
- if (bfregi->ver < 2)
- bfregn = -ENOMEM;
- else
- bfregn = alloc_high_class_bfreg(dev, bfregi);
- break;
}
mutex_unlock(&bfregi->lock);
@@ -641,13 +630,13 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq);
-static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi, int bfregn,
- bool dyn_bfreg)
+int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, u32 bfregn,
+ bool dyn_bfreg)
{
- int bfregs_per_sys_page;
- int index_of_sys_page;
- int offset;
+ unsigned int bfregs_per_sys_page;
+ u32 index_of_sys_page;
+ u32 offset;
bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
MLX5_NON_FP_BFREGS_PER_UAR;
@@ -655,6 +644,10 @@ static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
if (dyn_bfreg) {
index_of_sys_page += bfregi->num_static_sys_pages;
+
+ if (index_of_sys_page >= bfregi->num_sys_pages)
+ return -EINVAL;
+
if (bfregn > bfregi->num_dyn_bfregs ||
bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
@@ -819,21 +812,9 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
bfregn = MLX5_CROSS_CHANNEL_BFREG;
}
else {
- bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
- if (bfregn < 0) {
- mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n");
- mlx5_ib_dbg(dev, "reverting to medium latency\n");
- bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM);
- if (bfregn < 0) {
- mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n");
- mlx5_ib_dbg(dev, "reverting to high latency\n");
- bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_LOW);
- if (bfregn < 0) {
- mlx5_ib_warn(dev, "bfreg allocation failed\n");
- return bfregn;
- }
- }
- }
+ bfregn = alloc_bfreg(dev, &context->bfregi);
+ if (bfregn < 0)
+ return bfregn;
}
mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
@@ -1626,7 +1607,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_resources *devr = &dev->devr;
int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_ib_create_qp_resp resp;
+ struct mlx5_ib_create_qp_resp resp = {};
struct mlx5_ib_cq *send_cq;
struct mlx5_ib_cq *recv_cq;
unsigned long flags;
@@ -2555,18 +2536,16 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (!(ah_flags & IB_AH_GRH))
return -EINVAL;
- err = mlx5_get_roce_gid_type(dev, port, grh->sgid_index,
- &gid_type);
- if (err)
- return err;
+
memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
if (qp->ibqp.qp_type == IB_QPT_RC ||
qp->ibqp.qp_type == IB_QPT_UC ||
qp->ibqp.qp_type == IB_QPT_XRC_INI ||
qp->ibqp.qp_type == IB_QPT_XRC_TGT)
- path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
- grh->sgid_index);
+ path->udp_sport =
+ mlx5_get_roce_udp_sport(dev, ah->grh.sgid_attr);
path->dci_cfi_prio_sl = (sl & 0x7) << 4;
+ gid_type = ah->grh.sgid_attr->gid_type;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
} else {
@@ -3529,7 +3508,7 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
}
static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
- struct ib_send_wr *wr, void *qend,
+ const struct ib_send_wr *wr, void *qend,
struct mlx5_ib_qp *qp, int *size)
{
void *seg = eseg;
@@ -3582,7 +3561,7 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
}
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
@@ -3730,9 +3709,9 @@ static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
struct mlx5_wqe_umr_ctrl_seg *umr,
- struct ib_send_wr *wr, int atomic)
+ const struct ib_send_wr *wr, int atomic)
{
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
memset(umr, 0, sizeof(*umr));
@@ -3803,9 +3782,10 @@ static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
seg->status = MLX5_MKEY_STATUS_FREE;
}
-static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
+static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
+ const struct ib_send_wr *wr)
{
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
memset(seg, 0, sizeof(*seg));
if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
@@ -3854,7 +3834,7 @@ static void set_reg_umr_inline_seg(void *seg, struct mlx5_ib_qp *qp,
seg += mr_list_size;
}
-static __be32 send_ieth(struct ib_send_wr *wr)
+static __be32 send_ieth(const struct ib_send_wr *wr)
{
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
@@ -3886,7 +3866,7 @@ static u8 wq_sig(void *wqe)
return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
}
-static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
+static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
void *wqe, int *sz)
{
struct mlx5_wqe_inline_seg *seg;
@@ -4032,7 +4012,7 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
return 0;
}
-static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
+static int set_sig_data_segment(const struct ib_sig_handover_wr *wr,
struct mlx5_ib_qp *qp, void **seg, int *size)
{
struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
@@ -4134,7 +4114,7 @@ static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
}
static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
- struct ib_sig_handover_wr *wr, u32 size,
+ const struct ib_sig_handover_wr *wr, u32 size,
u32 length, u32 pdn)
{
struct ib_mr *sig_mr = wr->sig_mr;
@@ -4165,10 +4145,10 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
}
-static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
- void **seg, int *size)
+static int set_sig_umr_wr(const struct ib_send_wr *send_wr,
+ struct mlx5_ib_qp *qp, void **seg, int *size)
{
- struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
+ const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
u32 pdn = get_pd(qp)->pdn;
u32 xlt_size;
@@ -4243,7 +4223,7 @@ static int set_psv_wr(struct ib_sig_domain *domain,
}
static int set_reg_wr(struct mlx5_ib_qp *qp,
- struct ib_reg_wr *wr,
+ const struct ib_reg_wr *wr,
void **seg, int *size)
{
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
@@ -4314,10 +4294,10 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
}
}
-static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct mlx5_wqe_ctrl_seg **ctrl,
- struct ib_send_wr *wr, unsigned *idx,
- int *size, int nreq)
+ const struct ib_send_wr *wr, unsigned *idx,
+ int *size, int nreq, bool send_signaled, bool solicited)
{
if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
return -ENOMEM;
@@ -4328,10 +4308,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
*(uint32_t *)(*seg + 8) = 0;
(*ctrl)->imm = send_ieth(wr);
(*ctrl)->fm_ce_se = qp->sq_signal_bits |
- (wr->send_flags & IB_SEND_SIGNALED ?
- MLX5_WQE_CTRL_CQ_UPDATE : 0) |
- (wr->send_flags & IB_SEND_SOLICITED ?
- MLX5_WQE_CTRL_SOLICITED : 0);
+ (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+ (solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
*seg += sizeof(**ctrl);
*size = sizeof(**ctrl) / 16;
@@ -4339,6 +4317,16 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
return 0;
}
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned *idx,
+ int *size, int nreq)
+{
+ return __begin_wqe(qp, seg, ctrl, wr, idx, size, nreq,
+ wr->send_flags & IB_SEND_SIGNALED,
+ wr->send_flags & IB_SEND_SOLICITED);
+}
+
static void finish_wqe(struct mlx5_ib_qp *qp,
struct mlx5_wqe_ctrl_seg *ctrl,
u8 size, unsigned idx, u64 wr_id,
@@ -4360,9 +4348,8 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
qp->sq.w_list[idx].next = qp->sq.cur_post;
}
-
-int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain)
{
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
@@ -4393,7 +4380,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
@@ -4498,10 +4485,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
* SET_PSV WQEs are not signaled and solicited
* on error
*/
- wr->send_flags &= ~IB_SEND_SIGNALED;
- wr->send_flags |= IB_SEND_SOLICITED;
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, nreq);
+ err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
+ &size, nreq, false, true);
if (err) {
mlx5_ib_warn(dev, "\n");
err = -ENOMEM;
@@ -4520,8 +4505,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
fence, MLX5_OPCODE_SET_PSV);
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, nreq);
+ err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
+ &size, nreq, false, true);
if (err) {
mlx5_ib_warn(dev, "\n");
err = -ENOMEM;
@@ -4690,13 +4675,19 @@ out:
return err;
}
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return _mlx5_ib_post_send(ibqp, wr, bad_wr, false);
+}
+
static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
{
sig->signature = calc_sig(sig, size);
}
-int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain)
{
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_wqe_data_seg *scat;
@@ -4714,7 +4705,7 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->rq.lock, flags);
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
@@ -4776,6 +4767,12 @@ out:
return err;
}
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return _mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
+}
+
static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
{
switch (mlx5_state) {
@@ -5365,7 +5362,9 @@ static int set_user_rq_size(struct mlx5_ib_dev *dev,
rwq->wqe_count = ucmd->rq_wqe_count;
rwq->wqe_shift = ucmd->rq_wqe_shift;
- rwq->buf_size = (rwq->wqe_count << rwq->wqe_shift);
+ if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size))
+ return -EINVAL;
+
rwq->log_rq_stride = rwq->wqe_shift;
rwq->log_rq_size = ilog2(rwq->wqe_count);
return 0;
@@ -5697,3 +5696,132 @@ out:
kvfree(in);
return err;
}
+
+struct mlx5_ib_drain_cqe {
+ struct ib_cqe cqe;
+ struct completion done;
+};
+
+static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
+ struct mlx5_ib_drain_cqe,
+ cqe);
+
+ complete(&cqe->done);
+}
+
+/* This function returns only once the drained WR was completed */
+static void handle_drain_completion(struct ib_cq *cq,
+ struct mlx5_ib_drain_cqe *sdrain,
+ struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ if (cq->poll_ctx == IB_POLL_DIRECT) {
+ while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
+ ib_process_cq_direct(cq, -1);
+ return;
+ }
+
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ struct mlx5_ib_cq *mcq = to_mcq(cq);
+ bool triggered = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ /* Make sure that the CQ handler won't run if wasn't run yet */
+ if (!mcq->mcq.reset_notify_added)
+ mcq->mcq.reset_notify_added = 1;
+ else
+ triggered = true;
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ if (triggered) {
+ /* Wait for any scheduled/running task to be ended */
+ switch (cq->poll_ctx) {
+ case IB_POLL_SOFTIRQ:
+ irq_poll_disable(&cq->iop);
+ irq_poll_enable(&cq->iop);
+ break;
+ case IB_POLL_WORKQUEUE:
+ cancel_work_sync(&cq->work);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ }
+
+ /* Run the CQ handler - this makes sure that the drain WR will
+ * be processed if wasn't processed yet.
+ */
+ mcq->mcq.comp(&mcq->mcq);
+ }
+
+ wait_for_completion(&sdrain->done);
+}
+
+void mlx5_ib_drain_sq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->send_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx5_ib_drain_cqe sdrain;
+ const struct ib_send_wr *bad_swr;
+ struct ib_rdma_wr swr = {
+ .wr = {
+ .next = NULL,
+ { .wr_cqe = &sdrain.cqe, },
+ .opcode = IB_WR_RDMA_WRITE,
+ },
+ };
+ int ret;
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ sdrain.cqe.done = mlx5_ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
+ ret = _mlx5_ib_post_send(qp, &swr.wr, &bad_swr, true);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &sdrain, dev);
+}
+
+void mlx5_ib_drain_rq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->recv_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx5_ib_drain_cqe rdrain;
+ struct ib_recv_wr rwr = {};
+ const struct ib_recv_wr *bad_rwr;
+ int ret;
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = mlx5_ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
+ ret = _mlx5_ib_post_recv(qp, &rwr, &bad_rwr, true);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &rdrain, dev);
+}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 0af7b7905550..d359fecf7a5b 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
- if (desc_size == 0 || srq->msrq.max_gs > desc_size)
- return ERR_PTR(-EINVAL);
+ if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
+ err = -EINVAL;
+ goto err_srq;
+ }
desc_size = roundup_pow_of_two(desc_size);
desc_size = max_t(size_t, 32, desc_size);
- if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
- return ERR_PTR(-EINVAL);
+ if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
+ err = -EINVAL;
+ goto err_srq;
+ }
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
sizeof(struct mlx5_wqe_data_seg);
srq->msrq.wqe_shift = ilog2(desc_size);
buf_size = srq->msrq.max * desc_size;
- if (buf_size < desc_size)
- return ERR_PTR(-EINVAL);
+ if (buf_size < desc_size) {
+ err = -EINVAL;
+ goto err_srq;
+ }
in.type = init_attr->srq_type;
if (pd->uobject)
@@ -440,8 +446,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
spin_unlock(&srq->lock);
}
-int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
struct mlx5_wqe_srq_next_seg *next;
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index e7f6223e9c60..0823c0bc7e73 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -281,10 +281,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
header->grh.flow_label =
ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff);
header->grh.hop_limit = ah->av->hop_limit;
- ib_get_cached_gid(&dev->ib_dev,
- be32_to_cpu(ah->av->port_pd) >> 24,
- ah->av->gid_index % dev->limits.gid_table_len,
- &header->grh.source_gid, NULL);
+ header->grh.source_gid = ah->ibah.sgid_attr->gid;
memcpy(header->grh.destination_gid.raw,
ah->av->dgid, 16);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 5508afbf1c67..220a3e4717a3 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -519,10 +519,10 @@ int mthca_max_srq_sge(struct mthca_dev *dev);
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
enum ib_event_type event_type);
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
-int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mthca_tavor_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int mthca_arbel_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
enum ib_event_type event_type);
@@ -530,14 +530,14 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
struct ib_qp_init_attr *qp_init_attr);
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata);
-int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
int index, int *dbd, __be32 *new_wqe);
int mthca_alloc_qp(struct mthca_dev *dev,
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 541f237965c7..0d3473b4596e 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -96,8 +96,9 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->page_size_cap = mdev->limits.page_size_cap;
props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
props->max_qp_wr = mdev->limits.max_wqes;
- props->max_sge = mdev->limits.max_sg;
- props->max_sge_rd = props->max_sge;
+ props->max_send_sge = mdev->limits.max_sg;
+ props->max_recv_sge = mdev->limits.max_sg;
+ props->max_sge_rd = mdev->limits.max_sg;
props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
props->max_cqe = mdev->limits.max_cqes;
props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
@@ -448,7 +449,7 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
int err;
if (init_attr->srq_type != IB_SRQT_BASIC)
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
srq = kmalloc(sizeof *srq, GFP_KERNEL);
if (!srq)
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index af1c49d70b89..3d37f2373d63 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1488,7 +1488,7 @@ void mthca_free_qp(struct mthca_dev *dev,
/* Create UD header for an MLX send and build a data segment for it */
static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
- int ind, struct ib_ud_wr *wr,
+ int ind, const struct ib_ud_wr *wr,
struct mthca_mlx_seg *mlx,
struct mthca_data_seg *data)
{
@@ -1581,7 +1581,7 @@ static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
}
static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
- struct ib_atomic_wr *wr)
+ const struct ib_atomic_wr *wr)
{
if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->swap);
@@ -1594,7 +1594,7 @@ static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
}
static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
- struct ib_ud_wr *wr)
+ const struct ib_ud_wr *wr)
{
useg->lkey = cpu_to_be32(to_mah(wr->ah)->key);
useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma);
@@ -1604,15 +1604,15 @@ static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
}
static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
- struct ib_ud_wr *wr)
+ const struct ib_ud_wr *wr)
{
memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE);
useg->dqpn = cpu_to_be32(wr->remote_qpn);
useg->qkey = cpu_to_be32(wr->remote_qkey);
}
-int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
@@ -1814,8 +1814,8 @@ out:
return err;
}
-int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
@@ -1925,8 +1925,8 @@ out:
return err;
}
-int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
@@ -2165,8 +2165,8 @@ out:
return err;
}
-int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index f79732bc73b4..9a3fc6fb0d7e 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -472,8 +472,8 @@ void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
spin_unlock(&srq->lock);
}
-int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
@@ -572,8 +572,8 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
return err;
}
-int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 00c27291dc26..bedaa02749fb 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -159,7 +159,7 @@ do { \
#define NES_EVENT_TIMEOUT 1200000
#else
-#define nes_debug(level, fmt, args...)
+#define nes_debug(level, fmt, args...) no_printk(fmt, ##args)
#define assert(expr) do {} while (0)
#define NES_EVENT_TIMEOUT 100000
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6cdfbf8c5674..2b67ace5b614 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -58,6 +58,7 @@
#include <net/neighbour.h>
#include <net/route.h>
#include <net/ip_fib.h>
+#include <net/secure_seq.h>
#include <net/tcp.h>
#include <linux/fcntl.h>
@@ -1445,7 +1446,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
struct nes_cm_listener *listener)
{
struct nes_cm_node *cm_node;
- struct timespec ts;
int oldarpindex = 0;
int arpindex = 0;
struct nes_device *nesdev;
@@ -1496,8 +1496,10 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >>
NES_CM_DEFAULT_RCV_WND_SCALE;
- ts = current_kernel_time();
- cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec);
+ cm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr),
+ htonl(cm_node->rem_addr),
+ htons(cm_node->loc_port),
+ htons(cm_node->rem_port));
cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) -
sizeof(struct tcphdr) - ETH_HLEN - VLAN_HLEN;
cm_node->tcp_cntxt.rcv_nxt = 0;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 18a7de1c3923..bd0675d8f298 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -70,8 +70,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
static void nes_terminate_start_timer(struct nes_qp *nesqp);
-#ifdef CONFIG_INFINIBAND_NES_DEBUG
-static unsigned char *nes_iwarp_state_str[] = {
+static const char *const nes_iwarp_state_str[] = {
"Non-Existent",
"Idle",
"RTS",
@@ -82,7 +81,7 @@ static unsigned char *nes_iwarp_state_str[] = {
"RSVD2",
};
-static unsigned char *nes_tcp_state_str[] = {
+static const char *const nes_tcp_state_str[] = {
"Non-Existent",
"Closed",
"Listen",
@@ -100,7 +99,6 @@ static unsigned char *nes_tcp_state_str[] = {
"RSVD3",
"RSVD4",
};
-#endif
static inline void print_ip(struct nes_cm_node *cm_node)
{
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 32f26556c808..6940c7215961 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -436,7 +436,8 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
props->max_mr_size = 0x80000000;
props->max_qp = nesibdev->max_qp;
props->max_qp_wr = nesdev->nesadapter->max_qp_wr - 2;
- props->max_sge = nesdev->nesadapter->max_sge;
+ props->max_send_sge = nesdev->nesadapter->max_sge;
+ props->max_recv_sge = nesdev->nesadapter->max_sge;
props->max_cq = nesibdev->max_cq;
props->max_cqe = nesdev->nesadapter->max_cqe;
props->max_mr = nesibdev->max_mr;
@@ -754,26 +755,6 @@ static int nes_dealloc_pd(struct ib_pd *ibpd)
/**
- * nes_create_ah
- */
-static struct ib_ah *nes_create_ah(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- struct ib_udata *udata)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-
-/**
- * nes_destroy_ah
- */
-static int nes_destroy_ah(struct ib_ah *ah)
-{
- return -ENOSYS;
-}
-
-
-/**
* nes_get_encoded_size
*/
static inline u8 nes_get_encoded_size(int *size)
@@ -3004,42 +2985,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return err;
}
-
-/**
- * nes_muticast_attach
- */
-static int nes_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- nes_debug(NES_DBG_INIT, "\n");
- return -ENOSYS;
-}
-
-
-/**
- * nes_multicast_detach
- */
-static int nes_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- nes_debug(NES_DBG_INIT, "\n");
- return -ENOSYS;
-}
-
-
-/**
- * nes_process_mad
- */
-static int nes_process_mad(struct ib_device *ibdev, int mad_flags,
- u8 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- nes_debug(NES_DBG_INIT, "\n");
- return -ENOSYS;
-}
-
static inline void
-fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselkey)
+fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, const struct ib_send_wr *ib_wr,
+ u32 uselkey)
{
int sge_index;
int total_payload_length = 0;
@@ -3065,8 +3013,8 @@ fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselke
/**
* nes_post_send
*/
-static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
- struct ib_send_wr **bad_wr)
+static int nes_post_send(struct ib_qp *ibqp, const struct ib_send_wr *ib_wr,
+ const struct ib_send_wr **bad_wr)
{
u64 u64temp;
unsigned long flags = 0;
@@ -3327,8 +3275,8 @@ out:
/**
* nes_post_recv
*/
-static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
- struct ib_recv_wr **bad_wr)
+static int nes_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr,
+ const struct ib_recv_wr **bad_wr)
{
u64 u64temp;
unsigned long flags = 0;
@@ -3735,8 +3683,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.mmap = nes_mmap;
nesibdev->ibdev.alloc_pd = nes_alloc_pd;
nesibdev->ibdev.dealloc_pd = nes_dealloc_pd;
- nesibdev->ibdev.create_ah = nes_create_ah;
- nesibdev->ibdev.destroy_ah = nes_destroy_ah;
nesibdev->ibdev.create_qp = nes_create_qp;
nesibdev->ibdev.modify_qp = nes_modify_qp;
nesibdev->ibdev.query_qp = nes_query_qp;
@@ -3753,10 +3699,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.alloc_mr = nes_alloc_mr;
nesibdev->ibdev.map_mr_sg = nes_map_mr_sg;
- nesibdev->ibdev.attach_mcast = nes_multicast_attach;
- nesibdev->ibdev.detach_mcast = nes_multicast_detach;
- nesibdev->ibdev.process_mad = nes_process_mad;
-
nesibdev->ibdev.req_notify_cq = nes_req_notify_cq;
nesibdev->ibdev.post_send = nes_post_send;
nesibdev->ibdev.post_recv = nes_post_recv;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 3897b64532e1..58188fe5aed2 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -71,7 +71,7 @@ static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
}
static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
- struct rdma_ah_attr *attr, union ib_gid *sgid,
+ struct rdma_ah_attr *attr, const union ib_gid *sgid,
int pdid, bool *isvlan, u16 vlan_tag)
{
int status;
@@ -164,17 +164,14 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
struct ocrdma_ah *ah;
bool isvlan = false;
u16 vlan_tag = 0xffff;
- struct ib_gid_attr sgid_attr;
+ const struct ib_gid_attr *sgid_attr;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
- const struct ib_global_route *grh;
- union ib_gid sgid;
if ((attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
!(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))
return ERR_PTR(-EINVAL);
- grh = rdma_ah_read_grh(attr);
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
@@ -186,20 +183,15 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
if (status)
goto av_err;
- status = ib_get_cached_gid(&dev->ibdev, 1, grh->sgid_index, &sgid,
- &sgid_attr);
- if (status) {
- pr_err("%s(): Failed to query sgid, status = %d\n",
- __func__, status);
- goto av_conf_err;
- }
- if (is_vlan_dev(sgid_attr.ndev))
- vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
- dev_put(sgid_attr.ndev);
+ sgid_attr = attr->grh.sgid_attr;
+ if (is_vlan_dev(sgid_attr->ndev))
+ vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev);
+
/* Get network header type for this GID */
- ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ ah->hdr_type = rdma_gid_attr_network_type(sgid_attr);
- status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan, vlan_tag);
+ status = set_av_attr(dev, ah, attr, &sgid_attr->gid, pd->id,
+ &isvlan, vlan_tag);
if (status)
goto av_conf_err;
@@ -262,12 +254,6 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
}
-int ocrdma_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
-{
- /* modify_ah is unsupported */
- return -ENOSYS;
-}
-
int ocrdma_process_mad(struct ib_device *ibdev,
int process_mad_flags,
u8 port_num,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 1a65c47945aa..c0c32c9b80ae 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -55,7 +55,6 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
struct ib_udata *udata);
int ocrdma_destroy_ah(struct ib_ah *ah);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
-int ocrdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int ocrdma_process_mad(struct ib_device *,
int process_mad_flags,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 6c136e5017fe..e578281471af 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1365,8 +1365,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
OCRDMA_HBA_ATTRB_PTNUM_MASK)
>> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
- strncpy(dev->model_number,
- hba_attribs->controller_model_number, 31);
+ strlcpy(dev->model_number,
+ hba_attribs->controller_model_number,
+ sizeof(dev->model_number));
}
dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
free_mqe:
@@ -2494,8 +2495,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
{
int status;
struct rdma_ah_attr *ah_attr = &attrs->ah_attr;
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
+ const struct ib_gid_attr *sgid_attr;
u32 vlan_id = 0xFFFF;
u8 mac_addr[6], hdr_type;
union {
@@ -2525,25 +2525,23 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
memcpy(&cmd->params.dgid[0], &grh->dgid.raw[0],
sizeof(cmd->params.dgid));
- status = ib_get_cached_gid(&dev->ibdev, 1, grh->sgid_index,
- &sgid, &sgid_attr);
- if (!status) {
- vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
- memcpy(mac_addr, sgid_attr.ndev->dev_addr, ETH_ALEN);
- dev_put(sgid_attr.ndev);
- }
+ sgid_attr = ah_attr->grh.sgid_attr;
+ vlan_id = rdma_vlan_dev_vlan_id(sgid_attr->ndev);
+ memcpy(mac_addr, sgid_attr->ndev->dev_addr, ETH_ALEN);
qp->sgid_idx = grh->sgid_index;
- memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
+ memcpy(&cmd->params.sgid[0], &sgid_attr->gid.raw[0],
+ sizeof(cmd->params.sgid));
status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
if (status)
return status;
+
cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
(mac_addr[2] << 16) | (mac_addr[3] << 24);
- hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ hdr_type = rdma_gid_attr_network_type(sgid_attr);
if (hdr_type == RDMA_NETWORK_IPV4) {
- rdma_gid2ip(&sgid_addr._sockaddr, &sgid);
+ rdma_gid2ip(&sgid_addr._sockaddr, &sgid_attr->gid);
rdma_gid2ip(&dgid_addr._sockaddr, &grh->dgid);
memcpy(&cmd->params.dgid[0],
&dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 5962c0ed9847..7832ee3e0c84 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -176,7 +176,6 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.create_ah = ocrdma_create_ah;
dev->ibdev.destroy_ah = ocrdma_destroy_ah;
dev->ibdev.query_ah = ocrdma_query_ah;
- dev->ibdev.modify_ah = ocrdma_modify_ah;
dev->ibdev.poll_cq = ocrdma_poll_cq;
dev->ibdev.post_send = ocrdma_post_send;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 82e20fc32890..c158ca9fde6d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -89,7 +89,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_MGT_EXTENSIONS;
- attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge);
+ attr->max_send_sge = dev->attr.max_send_sge;
+ attr->max_recv_sge = dev->attr.max_recv_sge;
attr->max_sge_rd = dev->attr.max_rdma_sge;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
@@ -196,11 +197,10 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->sm_lid = 0;
props->sm_sl = 0;
props->state = port_state;
- props->port_cap_flags =
- IB_PORT_CM_SUP |
- IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP |
- IB_PORT_IP_BASED_GIDS;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_DEVICE_MGMT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP;
+ props->ip_gids = true;
props->gid_tbl_len = OCRDMA_MAX_SGID;
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
@@ -1774,13 +1774,13 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
* protect against proessing in-flight CQEs for this QP.
*/
spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
- if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
+ if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) {
spin_lock(&qp->rq_cq->cq_lock);
-
- ocrdma_del_qpn_map(dev, qp);
-
- if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
+ ocrdma_del_qpn_map(dev, qp);
spin_unlock(&qp->rq_cq->cq_lock);
+ } else {
+ ocrdma_del_qpn_map(dev, qp);
+ }
spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
if (!pd->uctx) {
@@ -1953,7 +1953,7 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq)
/* unprivileged verbs and their support functions. */
static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
struct ocrdma_ewqe_ud_hdr *ud_hdr =
(struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
@@ -2000,7 +2000,7 @@ static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr,
struct ocrdma_sge *sge,
- struct ib_send_wr *wr, u32 wqe_size)
+ const struct ib_send_wr *wr, u32 wqe_size)
{
int i;
char *dpp_addr;
@@ -2038,7 +2038,7 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
}
static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
int status;
struct ocrdma_sge *sge;
@@ -2057,7 +2057,7 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
}
static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
int status;
struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
@@ -2075,7 +2075,7 @@ static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
}
static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
struct ocrdma_sge *sge = ext_rw + 1;
@@ -2105,7 +2105,7 @@ static int get_encoded_page_size(int pg_sz)
static int ocrdma_build_reg(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr,
- struct ib_reg_wr *wr)
+ const struct ib_reg_wr *wr)
{
u64 fbo;
struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
@@ -2166,8 +2166,8 @@ static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
iowrite32(val, qp->sq_db);
}
-int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
int status = 0;
struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
@@ -2278,8 +2278,8 @@ static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
iowrite32(val, qp->rq_db);
}
-static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
- u16 tag)
+static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe,
+ const struct ib_recv_wr *wr, u16 tag)
{
u32 wqe_size = 0;
struct ocrdma_sge *sge;
@@ -2299,8 +2299,8 @@ static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
ocrdma_cpu_to_le32(rqe, wqe_size);
}
-int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int status = 0;
unsigned long flags;
@@ -2369,8 +2369,8 @@ static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
}
-int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int status = 0;
unsigned long flags;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 9a9971708646..b69cfdce7970 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -43,10 +43,10 @@
#ifndef __OCRDMA_VERBS_H__
#define __OCRDMA_VERBS_H__
-int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
- struct ib_send_wr **bad_wr);
-int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
- struct ib_recv_wr **bad_wr);
+int ocrdma_post_send(struct ib_qp *, const struct ib_send_wr *,
+ const struct ib_send_wr **bad_wr);
+int ocrdma_post_recv(struct ib_qp *, const struct ib_recv_wr *,
+ const struct ib_recv_wr **bad_wr);
int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
@@ -100,8 +100,8 @@ int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,
enum ib_srq_attr_mask, struct ib_udata *);
int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);
int ocrdma_destroy_srq(struct ib_srq *);
-int ocrdma_post_srq_recv(struct ib_srq *, struct ib_recv_wr *,
- struct ib_recv_wr **bad_recv_wr);
+int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *,
+ const struct ib_recv_wr **bad_recv_wr);
int ocrdma_dereg_mr(struct ib_mr *);
struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index ad22b32bbd9c..a0af6d424aed 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -191,6 +191,11 @@ static int qedr_register_device(struct qedr_dev *dev)
QEDR_UVERBS(MODIFY_QP) |
QEDR_UVERBS(QUERY_QP) |
QEDR_UVERBS(DESTROY_QP) |
+ QEDR_UVERBS(CREATE_SRQ) |
+ QEDR_UVERBS(DESTROY_SRQ) |
+ QEDR_UVERBS(QUERY_SRQ) |
+ QEDR_UVERBS(MODIFY_SRQ) |
+ QEDR_UVERBS(POST_SRQ_RECV) |
QEDR_UVERBS(REG_MR) |
QEDR_UVERBS(DEREG_MR) |
QEDR_UVERBS(POLL_CQ) |
@@ -229,6 +234,11 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.query_qp = qedr_query_qp;
dev->ibdev.destroy_qp = qedr_destroy_qp;
+ dev->ibdev.create_srq = qedr_create_srq;
+ dev->ibdev.destroy_srq = qedr_destroy_srq;
+ dev->ibdev.modify_srq = qedr_modify_srq;
+ dev->ibdev.query_srq = qedr_query_srq;
+ dev->ibdev.post_srq_recv = qedr_post_srq_recv;
dev->ibdev.query_pkey = qedr_query_pkey;
dev->ibdev.create_ah = qedr_create_ah;
@@ -325,8 +335,8 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
spin_lock_init(&dev->sgid_lock);
if (IS_IWARP(dev)) {
- spin_lock_init(&dev->idr_lock);
- idr_init(&dev->qpidr);
+ spin_lock_init(&dev->qpidr.idr_lock);
+ idr_init(&dev->qpidr.idr);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
}
@@ -653,42 +663,70 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
#define EVENT_TYPE_NOT_DEFINED 0
#define EVENT_TYPE_CQ 1
#define EVENT_TYPE_QP 2
+#define EVENT_TYPE_SRQ 3
struct qedr_dev *dev = (struct qedr_dev *)context;
struct regpair *async_handle = (struct regpair *)fw_handle;
u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
u8 event_type = EVENT_TYPE_NOT_DEFINED;
struct ib_event event;
+ struct ib_srq *ibsrq;
+ struct qedr_srq *srq;
+ unsigned long flags;
struct ib_cq *ibcq;
struct ib_qp *ibqp;
struct qedr_cq *cq;
struct qedr_qp *qp;
+ u16 srq_id;
- switch (e_code) {
- case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
- event.event = IB_EVENT_CQ_ERR;
- event_type = EVENT_TYPE_CQ;
- break;
- case ROCE_ASYNC_EVENT_SQ_DRAINED:
- event.event = IB_EVENT_SQ_DRAINED;
- event_type = EVENT_TYPE_QP;
- break;
- case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
- event.event = IB_EVENT_QP_FATAL;
- event_type = EVENT_TYPE_QP;
- break;
- case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
- event.event = IB_EVENT_QP_REQ_ERR;
- event_type = EVENT_TYPE_QP;
- break;
- case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
- event.event = IB_EVENT_QP_ACCESS_ERR;
- event_type = EVENT_TYPE_QP;
- break;
- default:
+ if (IS_ROCE(dev)) {
+ switch (e_code) {
+ case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
+ event.event = IB_EVENT_CQ_ERR;
+ event_type = EVENT_TYPE_CQ;
+ break;
+ case ROCE_ASYNC_EVENT_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
+ event.event = IB_EVENT_QP_FATAL;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_SRQ_LIMIT:
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ event_type = EVENT_TYPE_SRQ;
+ break;
+ case ROCE_ASYNC_EVENT_SRQ_EMPTY:
+ event.event = IB_EVENT_SRQ_ERR;
+ event_type = EVENT_TYPE_SRQ;
+ break;
+ default:
+ DP_ERR(dev, "unsupported event %d on handle=%llx\n",
+ e_code, roce_handle64);
+ }
+ } else {
+ switch (e_code) {
+ case QED_IWARP_EVENT_SRQ_LIMIT:
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ event_type = EVENT_TYPE_SRQ;
+ break;
+ case QED_IWARP_EVENT_SRQ_EMPTY:
+ event.event = IB_EVENT_SRQ_ERR;
+ event_type = EVENT_TYPE_SRQ;
+ break;
+ default:
DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
roce_handle64);
+ }
}
-
switch (event_type) {
case EVENT_TYPE_CQ:
cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
@@ -722,6 +760,25 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
}
DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp);
break;
+ case EVENT_TYPE_SRQ:
+ srq_id = (u16)roce_handle64;
+ spin_lock_irqsave(&dev->srqidr.idr_lock, flags);
+ srq = idr_find(&dev->srqidr.idr, srq_id);
+ if (srq) {
+ ibsrq = &srq->ibsrq;
+ if (ibsrq->event_handler) {
+ event.device = ibsrq->device;
+ event.element.srq = ibsrq;
+ ibsrq->event_handler(&event,
+ ibsrq->srq_context);
+ }
+ } else {
+ DP_NOTICE(dev,
+ "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
+ roce_handle64);
+ }
+ spin_unlock_irqrestore(&dev->srqidr.idr_lock, flags);
+ DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
default:
break;
}
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 86d4511e0d75..a2d708dceb8d 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -58,6 +58,7 @@
#define QEDR_MSG_RQ " RQ"
#define QEDR_MSG_SQ " SQ"
#define QEDR_MSG_QP " QP"
+#define QEDR_MSG_SRQ " SRQ"
#define QEDR_MSG_GSI " GSI"
#define QEDR_MSG_IWARP " IW"
@@ -122,6 +123,11 @@ struct qedr_device_attr {
#define QEDR_ENET_STATE_BIT (0)
+struct qedr_idr {
+ spinlock_t idr_lock; /* Protect idr data-structure */
+ struct idr idr;
+};
+
struct qedr_dev {
struct ib_device ibdev;
struct qed_dev *cdev;
@@ -165,8 +171,8 @@ struct qedr_dev {
struct qedr_cq *gsi_rqcq;
struct qedr_qp *gsi_qp;
enum qed_rdma_type rdma_type;
- spinlock_t idr_lock; /* Protect qpidr data-structure */
- struct idr qpidr;
+ struct qedr_idr qpidr;
+ struct qedr_idr srqidr;
struct workqueue_struct *iwarp_wq;
u16 iwarp_max_mtu;
@@ -337,6 +343,34 @@ struct qedr_qp_hwq_info {
qed_chain_get_capacity(p_info->pbl) \
} while (0)
+struct qedr_srq_hwq_info {
+ u32 max_sges;
+ u32 max_wr;
+ struct qed_chain pbl;
+ u64 p_phys_addr_tbl;
+ u32 wqe_prod;
+ u32 sge_prod;
+ u32 wr_prod_cnt;
+ u32 wr_cons_cnt;
+ u32 num_elems;
+
+ u32 *virt_prod_pair_addr;
+ dma_addr_t phy_prod_pair_addr;
+};
+
+struct qedr_srq {
+ struct ib_srq ibsrq;
+ struct qedr_dev *dev;
+
+ struct qedr_userq usrq;
+ struct qedr_srq_hwq_info hw_srq;
+ struct ib_umem *prod_umem;
+ u16 srq_id;
+ u32 srq_limit;
+ /* lock to protect srq recv post */
+ spinlock_t lock;
+};
+
enum qedr_qp_err_bitmap {
QEDR_QP_ERR_SQ_FULL = 1,
QEDR_QP_ERR_RQ_FULL = 2,
@@ -538,4 +572,9 @@ static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct qedr_mr, ibmr);
}
+
+static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct qedr_srq, ibsrq);
+}
#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
index 7e1f7021396a..228dd7d49622 100644
--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -161,12 +161,23 @@ struct rdma_rq_sge {
#define RDMA_RQ_SGE_L_KEY_HI_SHIFT 29
};
+struct rdma_srq_wqe_header {
+ struct regpair wr_id;
+ u8 num_sges /* number of SGEs in WQE */;
+ u8 reserved2[7];
+};
+
struct rdma_srq_sge {
struct regpair addr;
__le32 length;
__le32 l_key;
};
+union rdma_srq_elm {
+ struct rdma_srq_wqe_header header;
+ struct rdma_srq_sge sge;
+};
+
/* Rdma doorbell data for flags update */
struct rdma_pwm_flags_data {
__le16 icid; /* internal CID */
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 26dc374787f7..505fa3648762 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -491,7 +491,7 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int rc = 0;
int i;
- qp = idr_find(&dev->qpidr, conn_param->qpn);
+ qp = idr_find(&dev->qpidr.idr, conn_param->qpn);
laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
@@ -679,7 +679,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
- qp = idr_find(&dev->qpidr, conn_param->qpn);
+ qp = idr_find(&dev->qpidr.idr, conn_param->qpn);
if (!qp) {
DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
return -EINVAL;
@@ -737,9 +737,9 @@ void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
struct qedr_qp *qp = get_qedr_qp(ibqp);
if (atomic_dec_and_test(&qp->refcnt)) {
- spin_lock_irq(&qp->dev->idr_lock);
- idr_remove(&qp->dev->qpidr, qp->qp_id);
- spin_unlock_irq(&qp->dev->idr_lock);
+ spin_lock_irq(&qp->dev->qpidr.idr_lock);
+ idr_remove(&qp->dev->qpidr.idr, qp->qp_id);
+ spin_unlock_irq(&qp->dev->qpidr.idr_lock);
kfree(qp);
}
}
@@ -748,5 +748,5 @@ struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
- return idr_find(&dev->qpidr, qpn);
+ return idr_find(&dev->qpidr.idr, qpn);
}
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index 0f14e687bb91..85578887421b 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -380,18 +380,17 @@ int qedr_destroy_gsi_qp(struct qedr_dev *dev)
#define QEDR_GSI_QPN (1)
static inline int qedr_gsi_build_header(struct qedr_dev *dev,
struct qedr_qp *qp,
- struct ib_send_wr *swr,
+ const struct ib_send_wr *swr,
struct ib_ud_header *udh,
int *roce_mode)
{
bool has_vlan = false, has_grh_ipv6 = true;
struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
- union ib_gid sgid;
+ const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
int send_size = 0;
u16 vlan_id = 0;
u16 ether_type;
- struct ib_gid_attr sgid_attr;
int rc;
int ip_ver = 0;
@@ -402,28 +401,16 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
for (i = 0; i < swr->num_sge; ++i)
send_size += swr->sg_list[i].length;
- rc = ib_get_cached_gid(qp->ibqp.device, rdma_ah_get_port_num(ah_attr),
- grh->sgid_index, &sgid, &sgid_attr);
- if (rc) {
- DP_ERR(dev,
- "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
- rdma_ah_get_port_num(ah_attr),
- grh->sgid_index);
- return rc;
- }
-
- vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+ vlan_id = rdma_vlan_dev_vlan_id(sgid_attr->ndev);
if (vlan_id < VLAN_CFI_MASK)
has_vlan = true;
- dev_put(sgid_attr.ndev);
-
- has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
+ has_udp = (sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
if (!has_udp) {
/* RoCE v1 */
ether_type = ETH_P_IBOE;
*roce_mode = ROCE_V1;
- } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
+ } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
/* RoCE v2 IPv4 */
ip_ver = 4;
ether_type = ETH_P_IP;
@@ -471,7 +458,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
udh->grh.flow_label = grh->flow_label;
udh->grh.hop_limit = grh->hop_limit;
udh->grh.destination_gid = grh->dgid;
- memcpy(&udh->grh.source_gid.raw, &sgid.raw,
+ memcpy(&udh->grh.source_gid.raw, sgid_attr->gid.raw,
sizeof(udh->grh.source_gid.raw));
} else {
/* IPv4 header */
@@ -482,7 +469,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
udh->ip4.frag_off = htons(IP_DF);
udh->ip4.ttl = grh->hop_limit;
- ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
+ ipv4_addr = qedr_get_ipv4_from_gid(sgid_attr->gid.raw);
udh->ip4.saddr = ipv4_addr;
ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
udh->ip4.daddr = ipv4_addr;
@@ -501,7 +488,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
struct qedr_qp *qp,
- struct ib_send_wr *swr,
+ const struct ib_send_wr *swr,
struct qed_roce_ll2_packet **p_packet)
{
u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
@@ -550,8 +537,8 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
return 0;
}
-int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct qed_roce_ll2_packet *pkt = NULL;
struct qedr_qp *qp = get_qedr_qp(ibqp);
@@ -620,8 +607,8 @@ err:
return rc;
}
-int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
struct qedr_qp *qp = get_qedr_qp(ibqp);
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.h b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
index a55916323ea9..d46dcd3f6424 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.h
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
@@ -46,10 +46,10 @@ static inline u32 qedr_get_ipv4_from_gid(const u8 *gid)
/* RDMA CM */
int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
+int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs,
struct qedr_qp *qp);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index f07b8df96f43..8cc3df24e04e 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -51,6 +51,10 @@
#include <rdma/qedr-abi.h>
#include "qedr_roce_cm.h"
+#define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
+#define RDMA_MAX_SGE_PER_SRQ (4)
+#define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
+
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
@@ -84,6 +88,19 @@ int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
return 0;
}
+int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+ struct qedr_device_attr *qattr = &dev->attr;
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+
+ srq_attr->srq_limit = srq->srq_limit;
+ srq_attr->max_wr = qattr->max_srq_wr;
+ srq_attr->max_sge = qattr->max_sge;
+
+ return 0;
+}
+
int qedr_query_device(struct ib_device *ibdev,
struct ib_device_attr *attr, struct ib_udata *udata)
{
@@ -112,7 +129,8 @@ int qedr_query_device(struct ib_device *ibdev,
IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
- attr->max_sge = qattr->max_sge;
+ attr->max_send_sge = qattr->max_sge;
+ attr->max_recv_sge = qattr->max_sge;
attr->max_sge_rd = qattr->max_sge;
attr->max_cq = qattr->max_cq;
attr->max_cqe = qattr->max_cqe;
@@ -224,7 +242,7 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
attr->lmc = 0;
attr->sm_lid = 0;
attr->sm_sl = 0;
- attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
+ attr->ip_gids = true;
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
attr->gid_tbl_len = 1;
attr->pkey_tbl_len = 1;
@@ -1075,27 +1093,19 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
struct qed_rdma_modify_qp_in_params
*qp_params)
{
+ const struct ib_gid_attr *gid_attr;
enum rdma_network_type nw_type;
- struct ib_gid_attr gid_attr;
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
- union ib_gid gid;
u32 ipv4_addr;
- int rc = 0;
int i;
- rc = ib_get_cached_gid(ibqp->device,
- rdma_ah_get_port_num(&attr->ah_attr),
- grh->sgid_index, &gid, &gid_attr);
- if (rc)
- return rc;
-
- qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
+ gid_attr = grh->sgid_attr;
+ qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr->ndev);
- dev_put(gid_attr.ndev);
- nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
+ nw_type = rdma_gid_attr_network_type(gid_attr);
switch (nw_type) {
case RDMA_NETWORK_IPV6:
- memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+ memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
sizeof(qp_params->sgid));
memcpy(&qp_params->dgid.bytes[0],
&grh->dgid,
@@ -1105,7 +1115,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
break;
case RDMA_NETWORK_IB:
- memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+ memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
sizeof(qp_params->sgid));
memcpy(&qp_params->dgid.bytes[0],
&grh->dgid,
@@ -1115,7 +1125,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
case RDMA_NETWORK_IPV4:
memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
- ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
+ ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
qp_params->sgid.ipv4_addr = ipv4_addr;
ipv4_addr =
qedr_get_ipv4_from_gid(grh->dgid.raw);
@@ -1189,6 +1199,21 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
return 0;
}
+static int qedr_copy_srq_uresp(struct qedr_dev *dev,
+ struct qedr_srq *srq, struct ib_udata *udata)
+{
+ struct qedr_create_srq_uresp uresp = {};
+ int rc;
+
+ uresp.srq_id = srq->srq_id;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev, "create srq: problem copying data to user space\n");
+
+ return rc;
+}
+
static void qedr_copy_rq_uresp(struct qedr_dev *dev,
struct qedr_create_qp_uresp *uresp,
struct qedr_qp *qp)
@@ -1255,13 +1280,18 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
qp->state = QED_ROCE_QP_STATE_RESET;
qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
qp->sq_cq = get_qedr_cq(attrs->send_cq);
- qp->rq_cq = get_qedr_cq(attrs->recv_cq);
qp->dev = dev;
- qp->rq.max_sges = attrs->cap.max_recv_sge;
- DP_DEBUG(dev, QEDR_MSG_QP,
- "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
- qp->rq.max_sges, qp->rq_cq->icid);
+ if (attrs->srq) {
+ qp->srq = get_qedr_srq(attrs->srq);
+ } else {
+ qp->rq_cq = get_qedr_cq(attrs->recv_cq);
+ qp->rq.max_sges = attrs->cap.max_recv_sge;
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
+ qp->rq.max_sges, qp->rq_cq->icid);
+ }
+
DP_DEBUG(dev, QEDR_MSG_QP,
"QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
pd->pd_id, qp->qp_type, qp->max_inline_data,
@@ -1276,9 +1306,303 @@ static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
qp->sq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
qp->sq.db_data.data.icid = qp->icid + 1;
- qp->rq.db = dev->db_addr +
- DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
- qp->rq.db_data.data.icid = qp->icid;
+ if (!qp->srq) {
+ qp->rq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ qp->rq.db_data.data.icid = qp->icid;
+ }
+}
+
+static int qedr_check_srq_params(struct ib_pd *ibpd, struct qedr_dev *dev,
+ struct ib_srq_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ if (attrs->attr.max_wr > qattr->max_srq_wr) {
+ DP_ERR(dev,
+ "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
+ attrs->attr.max_wr, qattr->max_srq_wr);
+ return -EINVAL;
+ }
+
+ if (attrs->attr.max_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
+ attrs->attr.max_sge, qattr->max_sge);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qedr_free_srq_user_params(struct qedr_srq *srq)
+{
+ qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
+ ib_umem_release(srq->usrq.umem);
+ ib_umem_release(srq->prod_umem);
+}
+
+static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
+{
+ struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
+ struct qedr_dev *dev = srq->dev;
+
+ dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
+
+ dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
+ hw_srq->virt_prod_pair_addr,
+ hw_srq->phy_prod_pair_addr);
+}
+
+static int qedr_init_srq_user_params(struct ib_ucontext *ib_ctx,
+ struct qedr_srq *srq,
+ struct qedr_create_srq_ureq *ureq,
+ int access, int dmasync)
+{
+ struct scatterlist *sg;
+ int rc;
+
+ rc = qedr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
+ ureq->srq_len, access, dmasync, 1);
+ if (rc)
+ return rc;
+
+ srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
+ sizeof(struct rdma_srq_producers),
+ access, dmasync);
+ if (IS_ERR(srq->prod_umem)) {
+ qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
+ ib_umem_release(srq->usrq.umem);
+ DP_ERR(srq->dev,
+ "create srq: failed ib_umem_get for producer, got %ld\n",
+ PTR_ERR(srq->prod_umem));
+ return PTR_ERR(srq->prod_umem);
+ }
+
+ sg = srq->prod_umem->sg_head.sgl;
+ srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
+
+ return 0;
+}
+
+static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
+ struct qedr_dev *dev,
+ struct ib_srq_init_attr *init_attr)
+{
+ struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
+ dma_addr_t phy_prod_pair_addr;
+ u32 num_elems;
+ void *va;
+ int rc;
+
+ va = dma_alloc_coherent(&dev->pdev->dev,
+ sizeof(struct rdma_srq_producers),
+ &phy_prod_pair_addr, GFP_KERNEL);
+ if (!va) {
+ DP_ERR(dev,
+ "create srq: failed to allocate dma memory for producer\n");
+ return -ENOMEM;
+ }
+
+ hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
+ hw_srq->virt_prod_pair_addr = va;
+
+ num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ num_elems,
+ QEDR_SRQ_WQE_ELEM_SIZE,
+ &hw_srq->pbl, NULL);
+ if (rc)
+ goto err0;
+
+ hw_srq->num_elems = num_elems;
+
+ return 0;
+
+err0:
+ dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
+ va, phy_prod_pair_addr);
+ return rc;
+}
+
+static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
+ void *ptr, u32 id);
+static void qedr_idr_remove(struct qedr_dev *dev,
+ struct qedr_idr *qidr, u32 id);
+
+struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct qed_rdma_destroy_srq_in_params destroy_in_params;
+ struct qed_rdma_create_srq_in_params in_params = {};
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qed_rdma_create_srq_out_params out_params;
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_create_srq_ureq ureq = {};
+ u64 pbl_base_addr, phy_prod_pair_addr;
+ struct ib_ucontext *ib_ctx = NULL;
+ struct qedr_srq_hwq_info *hw_srq;
+ struct qedr_ucontext *ctx = NULL;
+ u32 page_cnt, page_size;
+ struct qedr_srq *srq;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create SRQ called from %s (pd %p)\n",
+ (udata) ? "User lib" : "kernel", pd);
+
+ rc = qedr_check_srq_params(ibpd, dev, init_attr, udata);
+ if (rc)
+ return ERR_PTR(-EINVAL);
+
+ srq = kzalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq)
+ return ERR_PTR(-ENOMEM);
+
+ srq->dev = dev;
+ hw_srq = &srq->hw_srq;
+ spin_lock_init(&srq->lock);
+
+ hw_srq->max_wr = init_attr->attr.max_wr;
+ hw_srq->max_sges = init_attr->attr.max_sge;
+
+ if (udata && ibpd->uobject && ibpd->uobject->context) {
+ ib_ctx = ibpd->uobject->context;
+ ctx = get_qedr_ucontext(ib_ctx);
+
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ DP_ERR(dev,
+ "create srq: problem copying data from user space\n");
+ goto err0;
+ }
+
+ rc = qedr_init_srq_user_params(ib_ctx, srq, &ureq, 0, 0);
+ if (rc)
+ goto err0;
+
+ page_cnt = srq->usrq.pbl_info.num_pbes;
+ pbl_base_addr = srq->usrq.pbl_tbl->pa;
+ phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
+ page_size = BIT(srq->usrq.umem->page_shift);
+ } else {
+ struct qed_chain *pbl;
+
+ rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
+ if (rc)
+ goto err0;
+
+ pbl = &hw_srq->pbl;
+ page_cnt = qed_chain_get_page_cnt(pbl);
+ pbl_base_addr = qed_chain_get_pbl_phys(pbl);
+ phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
+ page_size = QED_CHAIN_PAGE_SIZE;
+ }
+
+ in_params.pd_id = pd->pd_id;
+ in_params.pbl_base_addr = pbl_base_addr;
+ in_params.prod_pair_addr = phy_prod_pair_addr;
+ in_params.num_pages = page_cnt;
+ in_params.page_size = page_size;
+
+ rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
+ if (rc)
+ goto err1;
+
+ srq->srq_id = out_params.srq_id;
+
+ if (udata) {
+ rc = qedr_copy_srq_uresp(dev, srq, udata);
+ if (rc)
+ goto err2;
+ }
+
+ rc = qedr_idr_add(dev, &dev->srqidr, srq, srq->srq_id);
+ if (rc)
+ goto err2;
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
+ return &srq->ibsrq;
+
+err2:
+ destroy_in_params.srq_id = srq->srq_id;
+
+ dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
+err1:
+ if (udata)
+ qedr_free_srq_user_params(srq);
+ else
+ qedr_free_srq_kernel_params(srq);
+err0:
+ kfree(srq);
+
+ return ERR_PTR(-EFAULT);
+}
+
+int qedr_destroy_srq(struct ib_srq *ibsrq)
+{
+ struct qed_rdma_destroy_srq_in_params in_params = {};
+ struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+
+ qedr_idr_remove(dev, &dev->srqidr, srq->srq_id);
+ in_params.srq_id = srq->srq_id;
+ dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
+
+ if (ibsrq->pd->uobject)
+ qedr_free_srq_user_params(srq);
+ else
+ qedr_free_srq_kernel_params(srq);
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "destroy srq: destroyed srq with srq_id=0x%0x\n",
+ srq->srq_id);
+ kfree(srq);
+
+ return 0;
+}
+
+int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
+{
+ struct qed_rdma_modify_srq_in_params in_params = {};
+ struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+ int rc;
+
+ if (attr_mask & IB_SRQ_MAX_WR) {
+ DP_ERR(dev,
+ "modify srq: invalid attribute mask=0x%x specified for %p\n",
+ attr_mask, srq);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_SRQ_LIMIT) {
+ if (attr->srq_limit >= srq->hw_srq.max_wr) {
+ DP_ERR(dev,
+ "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
+ attr->srq_limit, srq->hw_srq.max_wr);
+ return -EINVAL;
+ }
+
+ in_params.srq_id = srq->srq_id;
+ in_params.wqe_limit = attr->srq_limit;
+ rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
+ if (rc)
+ return rc;
+ }
+
+ srq->srq_limit = attr->srq_limit;
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
+
+ return 0;
}
static inline void
@@ -1299,9 +1623,17 @@ qedr_init_common_qp_in_params(struct qedr_dev *dev,
params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
params->stats_queue = 0;
- params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
params->srq_id = 0;
params->use_srq = false;
+
+ if (!qp->srq) {
+ params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+
+ } else {
+ params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+ params->srq_id = qp->srq->srq_id;
+ params->use_srq = true;
+ }
}
static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
@@ -1318,32 +1650,27 @@ static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
}
-static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
+static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
+ void *ptr, u32 id)
{
int rc;
- if (!rdma_protocol_iwarp(&dev->ibdev, 1))
- return 0;
-
idr_preload(GFP_KERNEL);
- spin_lock_irq(&dev->idr_lock);
+ spin_lock_irq(&qidr->idr_lock);
- rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
+ rc = idr_alloc(&qidr->idr, ptr, id, id + 1, GFP_ATOMIC);
- spin_unlock_irq(&dev->idr_lock);
+ spin_unlock_irq(&qidr->idr_lock);
idr_preload_end();
return rc < 0 ? rc : 0;
}
-static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
+static void qedr_idr_remove(struct qedr_dev *dev, struct qedr_idr *qidr, u32 id)
{
- if (!rdma_protocol_iwarp(&dev->ibdev, 1))
- return;
-
- spin_lock_irq(&dev->idr_lock);
- idr_remove(&dev->qpidr, id);
- spin_unlock_irq(&dev->idr_lock);
+ spin_lock_irq(&qidr->idr_lock);
+ idr_remove(&qidr->idr, id);
+ spin_unlock_irq(&qidr->idr_lock);
}
static inline void
@@ -1356,9 +1683,10 @@ qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
&qp->usq.pbl_info, FW_PAGE_SHIFT);
-
- qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
- qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
+ if (!qp->srq) {
+ qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
+ qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
+ }
qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
&qp->urq.pbl_info, FW_PAGE_SHIFT);
@@ -1404,11 +1732,13 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
if (rc)
return rc;
- /* RQ - read access only (0), dma sync not required (0) */
- rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
- ureq.rq_len, 0, 0, alloc_and_init);
- if (rc)
- return rc;
+ if (!qp->srq) {
+ /* RQ - read access only (0), dma sync not required (0) */
+ rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
+ ureq.rq_len, 0, 0, alloc_and_init);
+ if (rc)
+ return rc;
+ }
memset(&in_params, 0, sizeof(in_params));
qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
@@ -1416,8 +1746,10 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
in_params.qp_handle_hi = ureq.qp_handle_hi;
in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
- in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
- in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+ if (!qp->srq) {
+ in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
+ in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+ }
qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
&in_params, &out_params);
@@ -1679,16 +2011,13 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
if (rc)
return ERR_PTR(rc);
- if (attrs->srq)
- return ERR_PTR(-EINVAL);
-
DP_DEBUG(dev, QEDR_MSG_QP,
"create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
udata ? "user library" : "kernel", attrs->event_handler, pd,
get_qedr_cq(attrs->send_cq),
get_qedr_cq(attrs->send_cq)->icid,
get_qedr_cq(attrs->recv_cq),
- get_qedr_cq(attrs->recv_cq)->icid);
+ attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
@@ -1715,9 +2044,11 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
qp->ibqp.qp_num = qp->qp_id;
- rc = qedr_idr_add(dev, qp, qp->qp_id);
- if (rc)
- goto err;
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ rc = qedr_idr_add(dev, &dev->qpidr, qp, qp->qp_id);
+ if (rc)
+ goto err;
+ }
return &qp->ibqp;
@@ -2289,8 +2620,9 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
qedr_free_qp_resources(dev, qp);
- if (atomic_dec_and_test(&qp->refcnt)) {
- qedr_idr_remove(dev, qp->qp_id);
+ if (atomic_dec_and_test(&qp->refcnt) &&
+ rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ qedr_idr_remove(dev, &dev->qpidr, qp->qp_id);
kfree(qp);
}
return rc;
@@ -2305,7 +2637,7 @@ struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
if (!ah)
return ERR_PTR(-ENOMEM);
- ah->attr = *attr;
+ rdma_copy_ah_attr(&ah->attr, attr);
return &ah->ibah;
}
@@ -2314,6 +2646,7 @@ int qedr_destroy_ah(struct ib_ah *ibah)
{
struct qedr_ah *ah = get_qedr_ah(ibah);
+ rdma_destroy_ah_attr(&ah->attr);
kfree(ah);
return 0;
}
@@ -2705,9 +3038,9 @@ static void swap_wqe_data64(u64 *p)
static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
struct qedr_qp *qp, u8 *wqe_size,
- struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr, u8 *bits,
- u8 bit)
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr,
+ u8 *bits, u8 bit)
{
u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
char *seg_prt, *wqe;
@@ -2790,7 +3123,7 @@ static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
} while (0)
static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
u32 data_size = 0;
int i;
@@ -2814,8 +3147,8 @@ static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
struct qedr_qp *qp,
struct rdma_sq_rdma_wqe_1st *rwqe,
struct rdma_sq_rdma_wqe_2nd *rwqe2,
- struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
@@ -2837,8 +3170,8 @@ static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
struct qedr_qp *qp,
struct rdma_sq_send_wqe_1st *swqe,
struct rdma_sq_send_wqe_2st *swqe2,
- struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
memset(swqe2, 0, sizeof(*swqe2));
if (wr->send_flags & IB_SEND_INLINE) {
@@ -2854,7 +3187,7 @@ static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
static int qedr_prepare_reg(struct qedr_qp *qp,
struct rdma_sq_fmr_wqe_1st *fwqe1,
- struct ib_reg_wr *wr)
+ const struct ib_reg_wr *wr)
{
struct qedr_mr *mr = get_qedr_mr(wr->mr);
struct rdma_sq_fmr_wqe_2nd *fwqe2;
@@ -2916,7 +3249,8 @@ static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
}
}
-static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+static inline bool qedr_can_post_send(struct qedr_qp *qp,
+ const struct ib_send_wr *wr)
{
int wq_is_full, err_wr, pbl_is_full;
struct qedr_dev *dev = qp->dev;
@@ -2953,8 +3287,8 @@ static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
return true;
}
-static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
struct qedr_qp *qp = get_qedr_qp(ibqp);
@@ -3168,8 +3502,8 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return rc;
}
-int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
struct qedr_qp *qp = get_qedr_qp(ibqp);
@@ -3234,8 +3568,104 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return rc;
}
-int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
+{
+ u32 used;
+
+ /* Calculate number of elements used based on producer
+ * count and consumer count and subtract it from max
+ * work request supported so that we get elements left.
+ */
+ used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
+
+ return hw_srq->max_wr - used;
+}
+
+int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+ struct qedr_srq_hwq_info *hw_srq;
+ struct qedr_dev *dev = srq->dev;
+ struct qed_chain *pbl;
+ unsigned long flags;
+ int status = 0;
+ u32 num_sge;
+ u32 offset;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ hw_srq = &srq->hw_srq;
+ pbl = &srq->hw_srq.pbl;
+ while (wr) {
+ struct rdma_srq_wqe_header *hdr;
+ int i;
+
+ if (!qedr_srq_elem_left(hw_srq) ||
+ wr->num_sge > srq->hw_srq.max_sges) {
+ DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
+ hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
+ wr->num_sge, srq->hw_srq.max_sges);
+ status = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+
+ hdr = qed_chain_produce(pbl);
+ num_sge = wr->num_sge;
+ /* Set number of sge and work request id in header */
+ SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
+
+ srq->hw_srq.wr_prod_cnt++;
+ hw_srq->wqe_prod++;
+ hw_srq->sge_prod++;
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
+ wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
+
+ /* Set SGE length, lkey and address */
+ SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
+ wr->sg_list[i].length, wr->sg_list[i].lkey);
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "[%d]: len %d key %x addr %x:%x\n",
+ i, srq_sge->length, srq_sge->l_key,
+ srq_sge->addr.hi, srq_sge->addr.lo);
+ hw_srq->sge_prod++;
+ }
+
+ /* Flush WQE and SGE information before
+ * updating producer.
+ */
+ wmb();
+
+ /* SRQ producer is 8 bytes. Need to update SGE producer index
+ * in first 4 bytes and need to update WQE producer in
+ * next 4 bytes.
+ */
+ *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
+ offset = offsetof(struct rdma_srq_producers, wqe_prod);
+ *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
+ hw_srq->wqe_prod;
+
+ /* Flush producer after updating it. */
+ wmb();
+ wr = wr->next;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
+ qed_chain_get_elem_left(pbl));
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return status;
+}
+
+int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
struct qedr_dev *dev = qp->dev;
@@ -3625,6 +4055,31 @@ static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
wc->wr_id = wr_id;
}
+static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, struct ib_wc *wc,
+ struct rdma_cqe_responder *resp)
+{
+ struct qedr_srq *srq = qp->srq;
+ u64 wr_id;
+
+ wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
+ le32_to_cpu(resp->srq_wr_id.lo), u64);
+
+ if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = 0;
+ wc->wr_id = wr_id;
+ wc->byte_len = 0;
+ wc->src_qp = qp->id;
+ wc->qp = &qp->ibqp;
+ wc->wr_id = wr_id;
+ } else {
+ __process_resp_one(dev, qp, cq, wc, resp, wr_id);
+ }
+ srq->hw_srq.wr_cons_cnt++;
+
+ return 1;
+}
static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
struct qedr_cq *cq, struct ib_wc *wc,
struct rdma_cqe_responder *resp)
@@ -3674,6 +4129,19 @@ static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
}
}
+static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, int num_entries,
+ struct ib_wc *wc,
+ struct rdma_cqe_responder *resp)
+{
+ int cnt;
+
+ cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
+ consume_cqe(cq);
+
+ return cnt;
+}
+
static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
struct qedr_cq *cq, int num_entries,
struct ib_wc *wc, struct rdma_cqe_responder *resp,
@@ -3751,6 +4219,11 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
&cqe->resp, &update);
break;
+ case RDMA_CQE_TYPE_RESPONDER_SRQ:
+ cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
+ wc, &cqe->resp);
+ update = 1;
+ break;
case RDMA_CQE_TYPE_INVALID:
default:
DP_ERR(dev, "Error: invalid CQE type = %d\n",
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 2c57e4c592a6..0b7d0124b16c 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -66,6 +66,15 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *);
int qedr_destroy_qp(struct ib_qp *ibqp);
+struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *attr,
+ struct ib_udata *udata);
+int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
+int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
+int qedr_destroy_srq(struct ib_srq *ibsrq);
+int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_recv_wr);
struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
struct ib_udata *udata);
int qedr_destroy_ah(struct ib_ah *ibah);
@@ -82,10 +91,10 @@ int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
-int qedr_post_send(struct ib_qp *, struct ib_send_wr *,
- struct ib_send_wr **bad_wr);
-int qedr_post_recv(struct ib_qp *, struct ib_recv_wr *,
- struct ib_recv_wr **bad_wr);
+int qedr_post_send(struct ib_qp *, const struct ib_send_wr *,
+ const struct ib_send_wr **bad_wr);
+int qedr_post_recv(struct ib_qp *, const struct ib_recv_wr *,
+ const struct ib_recv_wr **bad_wr);
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
u8 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 14b4057a2b8f..41babbc0db58 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1489,7 +1489,8 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_mr_size = ~0ULL;
rdi->dparms.props.max_qp = ib_qib_max_qps;
rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
- rdi->dparms.props.max_sge = ib_qib_max_sges;
+ rdi->dparms.props.max_send_sge = ib_qib_max_sges;
+ rdi->dparms.props.max_recv_sge = ib_qib_max_sges;
rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
rdi->dparms.props.max_cq = ib_qib_max_cqs;
rdi->dparms.props.max_cqe = ib_qib_max_cqes;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index f9a46768a19a..666613eef88f 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -78,9 +78,6 @@ struct qib_verbs_txreq;
#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
-/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
-#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
-
#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
/* Values for set/get portinfo VLCap OperationalVLs */
@@ -314,7 +311,7 @@ void qib_rc_rnr_retry(unsigned long arg);
void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
-int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr);
+int qib_post_ud_send(struct rvt_qp *qp, const struct ib_send_wr *wr);
void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
diff --git a/drivers/infiniband/hw/usnic/Kconfig b/drivers/infiniband/hw/usnic/Kconfig
index 29ab11c34f3f..d1dae2af4ca9 100644
--- a/drivers/infiniband/hw/usnic/Kconfig
+++ b/drivers/infiniband/hw/usnic/Kconfig
@@ -1,10 +1,10 @@
config INFINIBAND_USNIC
tristate "Verbs support for Cisco VIC"
depends on NETDEVICES && ETHERNET && INET && PCI && INTEL_IOMMU
+ depends on INFINIBAND_USER_ACCESS
select ENIC
select NET_VENDOR_CISCO
select PCI_IOV
- select INFINIBAND_USER_ACCESS
---help---
This is a low-level driver for Cisco's Virtual Interface
Cards (VICs), including the VIC 1240 and 1280 cards.
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c
index 995a26b65156..7875883621f4 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.c
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.c
@@ -92,8 +92,8 @@ struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev)
ufdev->pdev = pdev;
ufdev->netdev = pci_get_drvdata(pdev);
spin_lock_init(&ufdev->lock);
- strncpy(ufdev->name, netdev_name(ufdev->netdev),
- sizeof(ufdev->name) - 1);
+ BUILD_BUG_ON(sizeof(ufdev->name) != sizeof(ufdev->netdev->name));
+ strcpy(ufdev->name, ufdev->netdev->name);
return ufdev;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h
index 0b2cc4e79707..f0b71d593da5 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.h
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.h
@@ -57,7 +57,7 @@ struct usnic_fwd_dev {
char mac[ETH_ALEN];
unsigned int mtu;
__be32 inaddr;
- char name[IFNAMSIZ+1];
+ char name[IFNAMSIZ];
};
struct usnic_fwd_flow {
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index a688a5669168..9973ac893635 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -666,7 +666,7 @@ int usnic_ib_dereg_mr(struct ib_mr *ibmr)
usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
- usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
+ usnic_uiom_reg_release(mr->umem, ibmr->uobject->context);
kfree(mr);
return 0;
}
@@ -771,15 +771,15 @@ int usnic_ib_destroy_ah(struct ib_ah *ah)
return -EINVAL;
}
-int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int usnic_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
usnic_dbg("\n");
return -EINVAL;
}
-int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int usnic_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
usnic_dbg("\n");
return -EINVAL;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 1fda94425116..2a2c9beb715f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -80,10 +80,10 @@ struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
struct ib_udata *udata);
int usnic_ib_destroy_ah(struct ib_ah *ah);
-int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int usnic_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int usnic_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
struct ib_wc *wc);
int usnic_ib_req_notify_cq(struct ib_cq *cq,
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 4381c0a9a873..9dd39daa602b 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -41,6 +41,7 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <linux/pci.h>
+#include <rdma/ib_verbs.h>
#include "usnic_log.h"
#include "usnic_uiom.h"
@@ -88,7 +89,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
for_each_sg(chunk->page_list, sg, chunk->nents, i) {
page = sg_page(sg);
pa = sg_phys(sg);
- if (dirty)
+ if (!PageDirty(page) && dirty)
set_page_dirty_lock(page);
put_page(page);
usnic_dbg("pa: %pa\n", &pa);
@@ -114,6 +115,16 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
dma_addr_t pa;
unsigned int gup_flags;
+ /*
+ * If the combination of the addr and size requested for this memory
+ * region causes an integer overflow, return error.
+ */
+ if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
+ return -EINVAL;
+
+ if (!size)
+ return -EINVAL;
+
if (!can_do_mlock())
return -EPERM;
@@ -127,7 +138,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
down_write(&current->mm->mmap_sem);
- locked = npages + current->mm->locked_vm;
+ locked = npages + current->mm->pinned_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
@@ -143,7 +154,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = 0;
while (npages) {
- ret = get_user_pages(cur_base,
+ ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
gup_flags, page_list, NULL);
@@ -186,7 +197,7 @@ out:
if (ret < 0)
usnic_uiom_put_pages(chunk_list, 0);
else
- current->mm->locked_vm = locked;
+ current->mm->pinned_vm = locked;
up_write(&current->mm->mmap_sem);
free_page((unsigned long) page_list);
@@ -420,18 +431,22 @@ out_free_uiomr:
return ERR_PTR(err);
}
-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
+ struct ib_ucontext *ucontext)
{
+ struct task_struct *task;
struct mm_struct *mm;
unsigned long diff;
__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
- mm = get_task_mm(current);
- if (!mm) {
- kfree(uiomr);
- return;
- }
+ task = get_pid_task(ucontext->tgid, PIDTYPE_PID);
+ if (!task)
+ goto out;
+ mm = get_task_mm(task);
+ put_task_struct(task);
+ if (!mm)
+ goto out;
diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
@@ -443,7 +458,7 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
* up here and not be able to take the mmap_sem. In that case
* we defer the vm_locked accounting to the system workqueue.
*/
- if (closing) {
+ if (ucontext->closing) {
if (!down_write_trylock(&mm->mmap_sem)) {
INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
uiomr->mm = mm;
@@ -455,9 +470,10 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
} else
down_write(&mm->mmap_sem);
- current->mm->locked_vm -= diff;
+ mm->pinned_vm -= diff;
up_write(&mm->mmap_sem);
mmput(mm);
+out:
kfree(uiomr);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
index 431efe4143f4..8c096acff123 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -39,6 +39,8 @@
#include "usnic_uiom_interval_tree.h"
+struct ib_ucontext;
+
#define USNIC_UIOM_READ (1)
#define USNIC_UIOM_WRITE (2)
@@ -89,7 +91,8 @@ void usnic_uiom_free_dev_list(struct device **devs);
struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
unsigned long addr, size_t size,
int access, int dmasync);
-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing);
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
+ struct ib_ucontext *ucontext);
int usnic_uiom_init(char *drv_name);
void usnic_uiom_fini(void);
#endif /* USNIC_UIOM_H_ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 44cb1cfba417..42b8685c997e 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -378,11 +378,6 @@ static inline enum ib_port_speed pvrdma_port_speed_to_ib(
return (enum ib_port_speed)speed;
}
-static inline int pvrdma_qp_attr_mask_to_ib(int attr_mask)
-{
- return attr_mask;
-}
-
static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask)
{
return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index f95b97646c25..0f004c737620 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -278,19 +278,6 @@ int pvrdma_destroy_cq(struct ib_cq *cq)
return ret;
}
-/**
- * pvrdma_modify_cq - modify the CQ moderation parameters
- * @ibcq: the CQ to modify
- * @cq_count: number of CQEs that will trigger an event
- * @cq_period: max period of time in usec before triggering an event
- *
- * @return: -EOPNOTSUPP as CQ resize is not supported.
- */
-int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
-{
- return -EOPNOTSUPP;
-}
-
static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
{
return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr(
@@ -428,16 +415,3 @@ int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
/* Ensure we do not return errors from poll_cq */
return npolled;
}
-
-/**
- * pvrdma_resize_cq - resize CQ
- * @ibcq: the completion queue
- * @entries: CQ entries
- * @udata: user data
- *
- * @return: -EOPNOTSUPP as CQ resize is not supported.
- */
-int pvrdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
-{
- return -EOPNOTSUPP;
-}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 0be33a81bbe6..a5719899f49a 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -62,9 +62,7 @@ static DEFINE_MUTEX(pvrdma_device_list_lock);
static LIST_HEAD(pvrdma_device_list);
static struct workqueue_struct *event_wq;
-static int pvrdma_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- void **context);
+static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context);
static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context);
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
@@ -216,8 +214,6 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
dev->ib_dev.post_send = pvrdma_post_send;
dev->ib_dev.post_recv = pvrdma_post_recv;
dev->ib_dev.create_cq = pvrdma_create_cq;
- dev->ib_dev.modify_cq = pvrdma_modify_cq;
- dev->ib_dev.resize_cq = pvrdma_resize_cq;
dev->ib_dev.destroy_cq = pvrdma_destroy_cq;
dev->ib_dev.poll_cq = pvrdma_poll_cq;
dev->ib_dev.req_notify_cq = pvrdma_req_notify_cq;
@@ -261,7 +257,6 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
dev->ib_dev.modify_srq = pvrdma_modify_srq;
dev->ib_dev.query_srq = pvrdma_query_srq;
dev->ib_dev.destroy_srq = pvrdma_destroy_srq;
- dev->ib_dev.post_srq_recv = pvrdma_post_srq_recv;
dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq,
sizeof(struct pvrdma_srq *),
@@ -650,13 +645,11 @@ static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
return 0;
}
-static int pvrdma_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- void **context)
+static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context)
{
struct pvrdma_dev *dev = to_vdev(attr->device);
- return pvrdma_add_gid_at_index(dev, gid,
+ return pvrdma_add_gid_at_index(dev, &attr->gid,
ib_gid_type_to_pvrdma(attr->gid_type),
attr->index);
}
@@ -699,8 +692,12 @@ static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context)
}
static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
+ struct net_device *ndev,
unsigned long event)
{
+ struct pci_dev *pdev_net;
+ unsigned int slot;
+
switch (event) {
case NETDEV_REBOOT:
case NETDEV_DOWN:
@@ -718,6 +715,24 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
else
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
break;
+ case NETDEV_UNREGISTER:
+ dev_put(dev->netdev);
+ dev->netdev = NULL;
+ break;
+ case NETDEV_REGISTER:
+ /* vmxnet3 will have same bus, slot. But func will be 0 */
+ slot = PCI_SLOT(dev->pdev->devfn);
+ pdev_net = pci_get_slot(dev->pdev->bus,
+ PCI_DEVFN(slot, 0));
+ if ((dev->netdev == NULL) &&
+ (pci_get_drvdata(pdev_net) == ndev)) {
+ /* this is our netdev */
+ dev->netdev = ndev;
+ dev_hold(ndev);
+ }
+ pci_dev_put(pdev_net);
+ break;
+
default:
dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
event, dev->ib_dev.name);
@@ -734,8 +749,11 @@ static void pvrdma_netdevice_event_work(struct work_struct *work)
mutex_lock(&pvrdma_device_list_lock);
list_for_each_entry(dev, &pvrdma_device_list, device_link) {
- if (dev->netdev == netdev_work->event_netdev) {
- pvrdma_netdevice_event_handle(dev, netdev_work->event);
+ if ((netdev_work->event == NETDEV_REGISTER) ||
+ (dev->netdev == netdev_work->event_netdev)) {
+ pvrdma_netdevice_event_handle(dev,
+ netdev_work->event_netdev,
+ netdev_work->event);
break;
}
}
@@ -968,6 +986,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
ret = -ENODEV;
goto err_free_cq_ring;
}
+ dev_hold(dev->netdev);
dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name);
@@ -1040,6 +1059,10 @@ err_free_intrs:
pvrdma_free_irq(dev);
pci_free_irq_vectors(pdev);
err_free_cq_ring:
+ if (dev->netdev) {
+ dev_put(dev->netdev);
+ dev->netdev = NULL;
+ }
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
err_free_async_ring:
pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
@@ -1079,6 +1102,11 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
flush_workqueue(event_wq);
+ if (dev->netdev) {
+ dev_put(dev->netdev);
+ dev->netdev = NULL;
+ }
+
/* Unregister ib device */
ib_unregister_device(&dev->ib_dev);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index eb5b1065ec08..60083c0363a5 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -599,7 +599,8 @@ static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
qp->rq.offset + n * qp->rq.wqe_size);
}
-static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr)
+static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr,
+ const struct ib_reg_wr *wr)
{
struct pvrdma_user_mr *mr = to_vmr(wr->mr);
@@ -623,8 +624,8 @@ static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr)
*
* @return: 0 on success, otherwise errno returned.
*/
-int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct pvrdma_qp *qp = to_vqp(ibqp);
struct pvrdma_dev *dev = to_vdev(ibqp->device);
@@ -827,8 +828,8 @@ out:
*
* @return: 0 on success, otherwise errno returned.
*/
-int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct pvrdma_dev *dev = to_vdev(ibqp->device);
unsigned long flags;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index af235967a9c2..dc0ce877c7a3 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -52,13 +52,6 @@
#include "pvrdma.h"
-int pvrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- /* No support for kernel clients. */
- return -EOPNOTSUPP;
-}
-
/**
* pvrdma_query_srq - query shared receive queue
* @ibsrq: the shared receive queue to query
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index a51463cd2f37..b65d10b0a875 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -82,7 +82,8 @@ int pvrdma_query_device(struct ib_device *ibdev,
props->max_qp = dev->dsr->caps.max_qp;
props->max_qp_wr = dev->dsr->caps.max_qp_wr;
props->device_cap_flags = dev->dsr->caps.device_cap_flags;
- props->max_sge = dev->dsr->caps.max_sge;
+ props->max_send_sge = dev->dsr->caps.max_sge;
+ props->max_recv_sge = dev->dsr->caps.max_sge;
props->max_sge_rd = PVRDMA_GET_CAP(dev, dev->dsr->caps.max_sge,
dev->dsr->caps.max_sge_rd);
props->max_srq = dev->dsr->caps.max_srq;
@@ -154,7 +155,8 @@ int pvrdma_query_port(struct ib_device *ibdev, u8 port,
props->gid_tbl_len = resp->attrs.gid_tbl_len;
props->port_cap_flags =
pvrdma_port_cap_flags_to_ib(resp->attrs.port_cap_flags);
- props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
+ props->port_cap_flags |= IB_PORT_CM_SUP;
+ props->ip_gids = true;
props->max_msg_sz = resp->attrs.max_msg_sz;
props->bad_pkey_cntr = resp->attrs.bad_pkey_cntr;
props->qkey_viol_cntr = resp->attrs.qkey_viol_cntr;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index b7b25728a7e5..b2e3ab50cb08 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -412,15 +412,10 @@ struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
-int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
-int pvrdma_resize_cq(struct ib_cq *ibcq, int entries,
- struct ib_udata *udata);
struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata);
-int pvrdma_resize_cq(struct ib_cq *ibcq, int entries,
- struct ib_udata *udata);
int pvrdma_destroy_cq(struct ib_cq *cq);
int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
@@ -435,8 +430,6 @@ int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int pvrdma_destroy_srq(struct ib_srq *srq);
-int pvrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
@@ -446,9 +439,9 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int pvrdma_destroy_qp(struct ib_qp *qp);
-int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
#endif /* __PVRDMA_VERBS_H__ */
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index ba3639a0d77c..89ec0f64abfc 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -120,7 +120,8 @@ struct ib_ah *rvt_create_ah(struct ib_pd *pd,
dev->n_ahs_allocated++;
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
- ah->attr = *ah_attr;
+ rdma_copy_ah_attr(&ah->attr, ah_attr);
+
atomic_set(&ah->refcount, 0);
if (dev->driver_f.notify_new_ah)
@@ -148,6 +149,7 @@ int rvt_destroy_ah(struct ib_ah *ibah)
dev->n_ahs_allocated--;
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+ rdma_destroy_ah_attr(&ah->attr);
kfree(ah);
return 0;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 41183bd665ca..5ce403c6cddb 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -780,14 +780,15 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (!rdi)
return ERR_PTR(-EINVAL);
- if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
+ if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
init_attr->create_flags)
return ERR_PTR(-EINVAL);
/* Check receive queue parameters if no SRQ is specified. */
if (!init_attr->srq) {
- if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
+ if (init_attr->cap.max_recv_sge >
+ rdi->dparms.props.max_recv_sge ||
init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
return ERR_PTR(-EINVAL);
@@ -1336,13 +1337,13 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp->qp_access_flags = attr->qp_access_flags;
if (attr_mask & IB_QP_AV) {
- qp->remote_ah_attr = attr->ah_attr;
+ rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
}
if (attr_mask & IB_QP_ALT_PATH) {
- qp->alt_ah_attr = attr->alt_ah_attr;
+ rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
qp->s_alt_pkey_index = attr->alt_pkey_index;
}
@@ -1459,6 +1460,8 @@ int rvt_destroy_qp(struct ib_qp *ibqp)
vfree(qp->s_wq);
rdi->driver_f.qp_priv_free(rdi, qp);
kfree(qp->s_ack_queue);
+ rdma_destroy_ah_attr(&qp->remote_ah_attr);
+ rdma_destroy_ah_attr(&qp->alt_ah_attr);
kfree(qp);
return 0;
}
@@ -1535,8 +1538,8 @@ int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
*
* Return: 0 on success otherwise errno
*/
-int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_rwq *wq = qp->r_rq.wq;
@@ -1617,7 +1620,7 @@ int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
static inline int rvt_qp_valid_operation(
struct rvt_qp *qp,
const struct rvt_operation_params *post_parms,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
int len;
@@ -1714,7 +1717,7 @@ static inline int rvt_qp_is_avail(
* @wr: the work request to send
*/
static int rvt_post_one_wr(struct rvt_qp *qp,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
int *call_send)
{
struct rvt_swqe *wqe;
@@ -1888,8 +1891,8 @@ bail_inval_free:
*
* Return: 0 on success else errno
*/
-int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
@@ -1945,8 +1948,8 @@ bail:
*
* Return: 0 on success else errno
*/
-int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
struct rvt_rwq *wq;
diff --git a/drivers/infiniband/sw/rdmavt/qp.h b/drivers/infiniband/sw/rdmavt/qp.h
index 8409f80d5f25..264811fdc530 100644
--- a/drivers/infiniband/sw/rdmavt/qp.h
+++ b/drivers/infiniband/sw/rdmavt/qp.h
@@ -60,10 +60,10 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int rvt_destroy_qp(struct ib_qp *ibqp);
int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_init_attr *init_attr);
-int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
#endif /* DEF_RVTQP_H */
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index 3707952b4364..78e06fc456c5 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -82,7 +82,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
struct ib_srq *ret;
if (srq_init_attr->srq_type != IB_SRQT_BASIC)
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
if (srq_init_attr->attr.max_sge == 0 ||
srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 7121e1b1eb89..10999fa69281 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -91,7 +91,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_qp = RXE_MAX_QP;
rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
- rxe->attr.max_sge = RXE_MAX_SGE;
+ rxe->attr.max_send_sge = RXE_MAX_SGE;
+ rxe->attr.max_recv_sge = RXE_MAX_SGE;
rxe->attr.max_sge_rd = RXE_MAX_SGE_RD;
rxe->attr.max_cq = RXE_MAX_CQ;
rxe->attr.max_cqe = (1 << RXE_MAX_LOG_CQE) - 1;
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index 7f1ae364088a..26fe8d7dbc55 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -55,29 +55,41 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr)
void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
struct rdma_ah_attr *attr)
{
+ const struct ib_global_route *grh = rdma_ah_read_grh(attr);
+
memset(av, 0, sizeof(*av));
- memcpy(&av->grh, rdma_ah_read_grh(attr),
- sizeof(*rdma_ah_read_grh(attr)));
+ memcpy(av->grh.dgid.raw, grh->dgid.raw, sizeof(grh->dgid.raw));
+ av->grh.flow_label = grh->flow_label;
+ av->grh.sgid_index = grh->sgid_index;
+ av->grh.hop_limit = grh->hop_limit;
+ av->grh.traffic_class = grh->traffic_class;
av->port_num = port_num;
}
void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr)
{
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
+
attr->type = RDMA_AH_ATTR_TYPE_ROCE;
- memcpy(rdma_ah_retrieve_grh(attr), &av->grh, sizeof(av->grh));
+
+ memcpy(grh->dgid.raw, av->grh.dgid.raw, sizeof(av->grh.dgid.raw));
+ grh->flow_label = av->grh.flow_label;
+ grh->sgid_index = av->grh.sgid_index;
+ grh->hop_limit = av->grh.hop_limit;
+ grh->traffic_class = av->grh.traffic_class;
+
rdma_ah_set_ah_flags(attr, IB_AH_GRH);
rdma_ah_set_port_num(attr, av->port_num);
}
-void rxe_av_fill_ip_info(struct rxe_av *av,
- struct rdma_ah_attr *attr,
- struct ib_gid_attr *sgid_attr,
- union ib_gid *sgid)
+void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
{
- rdma_gid2ip((struct sockaddr *)&av->sgid_addr, sgid);
+ const struct ib_gid_attr *sgid_attr = attr->grh.sgid_attr;
+
+ rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
rdma_gid2ip((struct sockaddr *)&av->dgid_addr,
&rdma_ah_read_grh(attr)->dgid);
- av->network_type = ib_gid_to_network_type(sgid_attr->gid_type, sgid);
+ av->network_type = rdma_gid_attr_network_type(sgid_attr);
}
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 98d470d1f3fc..83311dd07019 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -276,6 +276,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
if (wqe->wr.opcode != IB_WR_RDMA_READ &&
wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
+ wqe->status = IB_WC_FATAL_ERR;
return COMPST_ERROR;
}
reset_retry_counters(qp);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index a51ece596c43..87d14f7ef21b 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -43,10 +43,7 @@ void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr);
-void rxe_av_fill_ip_info(struct rxe_av *av,
- struct rdma_ah_attr *attr,
- struct ib_gid_attr *sgid_attr,
- union ib_gid *sgid);
+void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr);
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 59ec6d918ed4..8094cbaa54a9 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -182,39 +182,19 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
#endif
-/*
- * Derive the net_device from the av.
- * For physical devices, this will just return rxe->ndev.
- * But for VLAN devices, it will return the vlan dev.
- * Caller should dev_put() the returned net_device.
- */
-static struct net_device *rxe_netdev_from_av(struct rxe_dev *rxe,
- int port_num,
- struct rxe_av *av)
-{
- union ib_gid gid;
- struct ib_gid_attr attr;
- struct net_device *ndev = rxe->ndev;
-
- if (ib_get_cached_gid(&rxe->ib_dev, port_num, av->grh.sgid_index,
- &gid, &attr) == 0 &&
- attr.ndev && attr.ndev != ndev)
- ndev = attr.ndev;
- else
- /* Only to ensure that caller may call dev_put() */
- dev_hold(ndev);
-
- return ndev;
-}
-
static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
struct rxe_qp *qp,
struct rxe_av *av)
{
+ const struct ib_gid_attr *attr;
struct dst_entry *dst = NULL;
struct net_device *ndev;
- ndev = rxe_netdev_from_av(rxe, qp->attr.port_num, av);
+ attr = rdma_get_gid_attr(&rxe->ib_dev, qp->attr.port_num,
+ av->grh.sgid_index);
+ if (IS_ERR(attr))
+ return NULL;
+ ndev = attr->ndev;
if (qp_type(qp) == IB_QPT_RC)
dst = sk_dst_get(qp->sk->sk);
@@ -243,9 +223,13 @@ static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
rt6_get_cookie((struct rt6_info *)dst);
#endif
}
- }
- dev_put(ndev);
+ if (dst && (qp_type(qp) == IB_QPT_RC)) {
+ dst_hold(dst);
+ sk_dst_set(qp->sk->sk, dst);
+ }
+ }
+ rdma_put_gid_attr(attr);
return dst;
}
@@ -418,11 +402,7 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
av->grh.traffic_class, av->grh.hop_limit, df, xnet);
- if (qp_type(qp) == IB_QPT_RC)
- sk_dst_set(qp->sk->sk, dst);
- else
- dst_release(dst);
-
+ dst_release(dst);
return 0;
}
@@ -450,11 +430,7 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
av->grh.traffic_class,
av->grh.hop_limit);
- if (qp_type(qp) == IB_QPT_RC)
- sk_dst_set(qp->sk->sk, dst);
- else
- dst_release(dst);
-
+ dst_release(dst);
return 0;
}
@@ -536,9 +512,13 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
unsigned int hdr_len;
struct sk_buff *skb;
struct net_device *ndev;
+ const struct ib_gid_attr *attr;
const int port_num = 1;
- ndev = rxe_netdev_from_av(rxe, port_num, av);
+ attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index);
+ if (IS_ERR(attr))
+ return NULL;
+ ndev = attr->ndev;
if (av->network_type == RDMA_NETWORK_IPV4)
hdr_len = ETH_HLEN + sizeof(struct udphdr) +
@@ -550,10 +530,8 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
GFP_ATOMIC);
- if (unlikely(!skb)) {
- dev_put(ndev);
- return NULL;
- }
+ if (unlikely(!skb))
+ goto out;
skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
@@ -568,7 +546,8 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
pkt->hdr = skb_put_zero(skb, paylen);
pkt->mask |= RXE_GRH_MASK;
- dev_put(ndev);
+out:
+ rdma_put_gid_attr(attr);
return skb;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 1b596fbbe251..4555510d86c4 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -83,7 +83,7 @@ enum rxe_device_param {
RXE_MAX_SGE_RD = 32,
RXE_MAX_CQ = 16384,
RXE_MAX_LOG_CQE = 15,
- RXE_MAX_MR = 2 * 1024,
+ RXE_MAX_MR = 256 * 1024,
RXE_MAX_PD = 0x7ffc,
RXE_MAX_QP_RD_ATOM = 128,
RXE_MAX_EE_RD_ATOM = 0,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index b9f7aa1114b2..c58452daffc7 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -49,9 +49,9 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
goto err1;
}
- if (cap->max_send_sge > rxe->attr.max_sge) {
+ if (cap->max_send_sge > rxe->attr.max_send_sge) {
pr_warn("invalid send sge = %d > %d\n",
- cap->max_send_sge, rxe->attr.max_sge);
+ cap->max_send_sge, rxe->attr.max_send_sge);
goto err1;
}
@@ -62,9 +62,9 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
goto err1;
}
- if (cap->max_recv_sge > rxe->attr.max_sge) {
+ if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
pr_warn("invalid recv sge = %d > %d\n",
- cap->max_recv_sge, rxe->attr.max_sge);
+ cap->max_recv_sge, rxe->attr.max_recv_sge);
goto err1;
}
}
@@ -580,9 +580,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
struct ib_udata *udata)
{
int err;
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
@@ -623,30 +620,14 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
qp->attr.qkey = attr->qkey;
if (mask & IB_QP_AV) {
- ib_get_cached_gid(&rxe->ib_dev, 1,
- rdma_ah_read_grh(&attr->ah_attr)->sgid_index,
- &sgid, &sgid_attr);
rxe_av_from_attr(attr->port_num, &qp->pri_av, &attr->ah_attr);
- rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr,
- &sgid_attr, &sgid);
- if (sgid_attr.ndev)
- dev_put(sgid_attr.ndev);
+ rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr);
}
if (mask & IB_QP_ALT_PATH) {
- u8 sgid_index =
- rdma_ah_read_grh(&attr->alt_ah_attr)->sgid_index;
-
- ib_get_cached_gid(&rxe->ib_dev, 1, sgid_index,
- &sgid, &sgid_attr);
-
rxe_av_from_attr(attr->alt_port_num, &qp->alt_av,
&attr->alt_ah_attr);
- rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr,
- &sgid_attr, &sgid);
- if (sgid_attr.ndev)
- dev_put(sgid_attr.ndev);
-
+ rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr);
qp->attr.alt_port_num = attr->alt_port_num;
qp->attr.alt_pkey_index = attr->alt_pkey_index;
qp->attr.alt_timeout = attr->alt_timeout;
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index dfba44a40f0b..d30dbac24583 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -225,9 +225,14 @@ static int hdr_check(struct rxe_pkt_info *pkt)
goto err1;
}
+ if (unlikely(qpn == 0)) {
+ pr_warn_once("QP 0 not supported");
+ goto err1;
+ }
+
if (qpn != IB_MULTICAST_QPN) {
- index = (qpn == 0) ? port->qp_smi_index :
- ((qpn == 1) ? port->qp_gsi_index : qpn);
+ index = (qpn == 1) ? port->qp_gsi_index : qpn;
+
qp = rxe_pool_get_index(&rxe->qp_pool, index);
if (unlikely(!qp)) {
pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
@@ -256,8 +261,7 @@ static int hdr_check(struct rxe_pkt_info *pkt)
return 0;
err2:
- if (qp)
- rxe_drop_ref(qp);
+ rxe_drop_ref(qp);
err1:
return -EINVAL;
}
@@ -328,6 +332,7 @@ err1:
static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
{
+ const struct ib_gid_attr *gid_attr;
union ib_gid dgid;
union ib_gid *pdgid;
@@ -339,9 +344,14 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
}
- return ib_find_cached_gid_by_port(&rxe->ib_dev, pdgid,
- IB_GID_TYPE_ROCE_UDP_ENCAP,
- 1, skb->dev, NULL);
+ gid_attr = rdma_find_gid_by_port(&rxe->ib_dev, pdgid,
+ IB_GID_TYPE_ROCE_UDP_ENCAP,
+ 1, skb->dev);
+ if (IS_ERR(gid_attr))
+ return PTR_ERR(gid_attr);
+
+ rdma_put_gid_attr(gid_attr);
+ return 0;
}
/* rxe_rcv is called from the interface driver */
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5b57de30dee4..aa5833318372 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -884,6 +884,11 @@ static enum resp_states do_complete(struct rxe_qp *qp,
else
wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ if (is_vlan_dev(skb->dev)) {
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ wc->vlan_id = vlan_dev_vlan_id(skb->dev);
+ }
+
if (pkt->mask & RXE_IMMDT_MASK) {
wc->wc_flags |= IB_WC_WITH_IMM;
wc->ex.imm_data = immdt_imm(pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 9deafc3aa6af..f5b1e0ad6142 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -222,25 +222,11 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd)
return 0;
}
-static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
- struct rxe_av *av)
+static void rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
+ struct rxe_av *av)
{
- int err;
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
-
- err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr),
- rdma_ah_read_grh(attr)->sgid_index, &sgid,
- &sgid_attr);
- if (err) {
- pr_err("Failed to query sgid. err = %d\n", err);
- return err;
- }
-
rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
- rxe_av_fill_ip_info(av, attr, &sgid_attr, &sgid);
- dev_put(sgid_attr.ndev);
- return 0;
+ rxe_av_fill_ip_info(av, attr);
}
static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
@@ -255,28 +241,17 @@ static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
err = rxe_av_chk_attr(rxe, attr);
if (err)
- goto err1;
+ return ERR_PTR(err);
ah = rxe_alloc(&rxe->ah_pool);
- if (!ah) {
- err = -ENOMEM;
- goto err1;
- }
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
rxe_add_ref(pd);
ah->pd = pd;
- err = rxe_init_av(rxe, attr, &ah->av);
- if (err)
- goto err2;
-
+ rxe_init_av(rxe, attr, &ah->av);
return &ah->ibah;
-
-err2:
- rxe_drop_ref(pd);
- rxe_drop_ref(ah);
-err1:
- return ERR_PTR(err);
}
static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
@@ -289,10 +264,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
if (err)
return err;
- err = rxe_init_av(rxe, attr, &ah->av);
- if (err)
- return err;
-
+ rxe_init_av(rxe, attr, &ah->av);
return 0;
}
@@ -315,7 +287,7 @@ static int rxe_destroy_ah(struct ib_ah *ibah)
return 0;
}
-static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
+static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
{
int err;
int i;
@@ -466,8 +438,8 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq)
return 0;
}
-static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int err = 0;
unsigned long flags;
@@ -582,7 +554,7 @@ static int rxe_destroy_qp(struct ib_qp *ibqp)
return 0;
}
-static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, unsigned int length)
{
int num_sge = ibwr->num_sge;
@@ -610,7 +582,7 @@ err1:
}
static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
- struct ib_send_wr *ibwr)
+ const struct ib_send_wr *ibwr)
{
wr->wr_id = ibwr->wr_id;
wr->num_sge = ibwr->num_sge;
@@ -665,7 +637,7 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
}
}
-static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, unsigned int length,
struct rxe_send_wqe *wqe)
{
@@ -713,7 +685,7 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
return 0;
}
-static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, u32 length)
{
int err;
@@ -754,8 +726,8 @@ err1:
return err;
}
-static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
int err = 0;
unsigned int mask;
@@ -797,8 +769,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
return err;
}
-static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct rxe_qp *qp = to_rqp(ibqp);
@@ -820,8 +792,8 @@ static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return rxe_post_send_kernel(qp, wr, bad_wr);
}
-static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int err = 0;
struct rxe_qp *qp = to_rqp(ibqp);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index a50b062ed13e..1abe3c62f106 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -91,11 +91,9 @@ enum {
IPOIB_STOP_REAPER = 7,
IPOIB_FLAG_ADMIN_CM = 9,
IPOIB_FLAG_UMCAST = 10,
- IPOIB_STOP_NEIGH_GC = 11,
IPOIB_NEIGH_TBL_FLUSH = 12,
IPOIB_FLAG_DEV_ADDR_SET = 13,
IPOIB_FLAG_DEV_ADDR_CTRL = 14,
- IPOIB_FLAG_GOING_DOWN = 15,
IPOIB_MAX_BACKOFF_SECONDS = 16,
@@ -252,11 +250,11 @@ struct ipoib_cm_tx {
struct ipoib_neigh *neigh;
struct ipoib_path *path;
struct ipoib_tx_buf *tx_ring;
- unsigned tx_head;
- unsigned tx_tail;
+ unsigned int tx_head;
+ unsigned int tx_tail;
unsigned long flags;
u32 mtu;
- unsigned max_send_sge;
+ unsigned int max_send_sge;
};
struct ipoib_cm_rx_buf {
@@ -325,15 +323,22 @@ struct ipoib_dev_priv {
spinlock_t lock;
struct net_device *dev;
+ void (*next_priv_destructor)(struct net_device *dev);
struct napi_struct send_napi;
struct napi_struct recv_napi;
unsigned long flags;
+ /*
+ * This protects access to the child_intfs list.
+ * To READ from child_intfs the RTNL or vlan_rwsem read side must be
+ * held. To WRITE RTNL and the vlan_rwsem write side must be held (in
+ * that order) This lock exists because we have a few contexts where
+ * we need the child_intfs, but do not want to grab the RTNL.
+ */
struct rw_semaphore vlan_rwsem;
struct mutex mcast_mutex;
- struct mutex sysfs_mutex;
struct rb_root path_tree;
struct list_head path_list;
@@ -373,8 +378,8 @@ struct ipoib_dev_priv {
struct ipoib_rx_buf *rx_ring;
struct ipoib_tx_buf *tx_ring;
- unsigned tx_head;
- unsigned tx_tail;
+ unsigned int tx_head;
+ unsigned int tx_tail;
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
struct ib_ud_wr tx_wr;
struct ib_wc send_wc[MAX_SEND_CQE];
@@ -404,7 +409,7 @@ struct ipoib_dev_priv {
#endif
u64 hca_caps;
struct ipoib_ethtool_st ethtool;
- unsigned max_send_sge;
+ unsigned int max_send_sge;
bool sm_fullmember_sendonly_support;
const struct net_device_ops *rn_ops;
};
@@ -414,7 +419,7 @@ struct ipoib_ah {
struct ib_ah *ah;
struct list_head list;
struct kref ref;
- unsigned last_send;
+ unsigned int last_send;
int valid;
};
@@ -483,6 +488,7 @@ static inline void ipoib_put_ah(struct ipoib_ah *ah)
kref_put(&ah->ref, ipoib_free_ah);
}
int ipoib_open(struct net_device *dev);
+void ipoib_intf_free(struct net_device *dev);
int ipoib_add_pkey_attr(struct net_device *dev);
int ipoib_add_umcast_attr(struct net_device *dev);
@@ -510,9 +516,6 @@ void ipoib_ib_dev_down(struct net_device *dev);
int ipoib_ib_dev_stop_default(struct net_device *dev);
void ipoib_pkey_dev_check_presence(struct net_device *dev);
-int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
-void ipoib_dev_cleanup(struct net_device *dev);
-
void ipoib_mcast_join_task(struct work_struct *work);
void ipoib_mcast_carrier_on_task(struct work_struct *work);
void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
@@ -600,7 +603,6 @@ void ipoib_pkey_open(struct ipoib_dev_priv *priv);
void ipoib_drain_cq(struct net_device *dev);
void ipoib_set_ethtool_ops(struct net_device *dev);
-void ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
#define IPOIB_FLAGS_RC 0x80
#define IPOIB_FLAGS_UC 0x40
@@ -729,7 +731,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
static inline
int ipoib_cm_dev_init(struct net_device *dev)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 6535d9beb24d..ea01b8dd2be6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -78,7 +78,7 @@ static struct ib_send_wr ipoib_cm_rx_drain_wr = {
};
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event);
+ const struct ib_cm_event *event);
static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
u64 mapping[IPOIB_CM_RX_SG])
@@ -94,7 +94,6 @@ static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- struct ib_recv_wr *bad_wr;
int i, ret;
priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
@@ -102,7 +101,7 @@ static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
for (i = 0; i < priv->cm.num_frags; ++i)
priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
- ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
+ ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, NULL);
if (unlikely(ret)) {
ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
@@ -120,7 +119,6 @@ static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
struct ib_sge *sge, int id)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- struct ib_recv_wr *bad_wr;
int i, ret;
wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
@@ -128,7 +126,7 @@ static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
for (i = 0; i < IPOIB_CM_RX_SG; ++i)
sge[i].addr = rx->rx_ring[id].mapping[i];
- ret = ib_post_recv(rx->qp, wr, &bad_wr);
+ ret = ib_post_recv(rx->qp, wr, NULL);
if (unlikely(ret)) {
ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
@@ -212,7 +210,6 @@ static void ipoib_cm_free_rx_ring(struct net_device *dev,
static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
{
- struct ib_send_wr *bad_wr;
struct ipoib_cm_rx *p;
/* We only reserved 1 extra slot in CQ for drain WRs, so
@@ -227,7 +224,7 @@ static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
*/
p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID;
- if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
+ if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, NULL))
ipoib_warn(priv, "failed to post drain wr\n");
list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
@@ -275,7 +272,7 @@ static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
static int ipoib_cm_modify_rx_qp(struct net_device *dev,
struct ib_cm_id *cm_id, struct ib_qp *qp,
- unsigned psn)
+ unsigned int psn)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_attr qp_attr;
@@ -363,7 +360,7 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
if (!rx->rx_ring)
return -ENOMEM;
- t = kmalloc(sizeof *t, GFP_KERNEL);
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
if (!t) {
ret = -ENOMEM;
goto err_free_1;
@@ -421,8 +418,9 @@ err_free_1:
}
static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
- struct ib_qp *qp, struct ib_cm_req_event_param *req,
- unsigned psn)
+ struct ib_qp *qp,
+ const struct ib_cm_req_event_param *req,
+ unsigned int psn)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_data data = {};
@@ -432,7 +430,7 @@ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
rep.private_data = &data;
- rep.private_data_len = sizeof data;
+ rep.private_data_len = sizeof(data);
rep.flow_control = 0;
rep.rnr_retry_count = req->rnr_retry_count;
rep.srq = ipoib_cm_has_srq(dev);
@@ -441,16 +439,17 @@ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
return ib_send_cm_rep(cm_id, &rep);
}
-static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int ipoib_cm_req_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
{
struct net_device *dev = cm_id->context;
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_rx *p;
- unsigned psn;
+ unsigned int psn;
int ret;
ipoib_dbg(priv, "REQ arrived\n");
- p = kzalloc(sizeof *p, GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev = dev;
@@ -503,7 +502,7 @@ err_qp:
}
static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event)
+ const struct ib_cm_event *event)
{
struct ipoib_cm_rx *p;
struct ipoib_dev_priv *priv;
@@ -547,7 +546,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
0, PAGE_SIZE);
--skb_shinfo(skb)->nr_frags;
} else {
- size = min(length, (unsigned) PAGE_SIZE);
+ size = min_t(unsigned int, length, PAGE_SIZE);
skb_frag_size_set(frag, size);
skb->data_len += size;
@@ -641,8 +640,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
}
}
- frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
- (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
+ frags = PAGE_ALIGN(wc->byte_len -
+ min_t(u32, wc->byte_len, IPOIB_CM_HEAD_SIZE)) /
+ PAGE_SIZE;
newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
mapping, GFP_ATOMIC);
@@ -657,7 +657,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
}
ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
- memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
+ memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof(*mapping));
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
@@ -698,13 +698,11 @@ static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id,
struct ipoib_tx_buf *tx_req)
{
- struct ib_send_wr *bad_wr;
-
ipoib_build_sge(priv, tx_req);
priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
- return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr);
+ return ib_post_send(tx->qp, &priv->tx_wr.wr, NULL);
}
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
@@ -712,7 +710,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_tx_buf *tx_req;
int rc;
- unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
+ unsigned int usable_sge = tx->max_send_sge - !!skb_headlen(skb);
if (unlikely(skb->len > tx->mtu)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -982,7 +980,8 @@ void ipoib_cm_dev_stop(struct net_device *dev)
cancel_delayed_work(&priv->cm.stale_task);
}
-static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
{
struct ipoib_cm_tx *p = cm_id->context;
struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
@@ -1068,8 +1067,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
struct ib_qp *tx_qp;
if (dev->features & NETIF_F_SG)
- attr.cap.max_send_sge =
- min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
+ attr.cap.max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
+ MAX_SKB_FRAGS + 1);
tx_qp = ib_create_qp(priv->pd, &attr);
tx->max_send_sge = attr.cap.max_send_sge;
@@ -1094,7 +1093,7 @@ static int ipoib_cm_send_req(struct net_device *dev,
req.qp_num = qp->qp_num;
req.qp_type = qp->qp_type;
req.private_data = &data;
- req.private_data_len = sizeof data;
+ req.private_data_len = sizeof(data);
req.flow_control = 0;
req.starting_psn = 0; /* FIXME */
@@ -1152,7 +1151,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
ret = -ENOMEM;
goto err_tx;
}
- memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
+ memset(p->tx_ring, 0, ipoib_sendq_size * sizeof(*p->tx_ring));
p->qp = ipoib_cm_create_tx_qp(p->dev, p);
memalloc_noio_restore(noio_flag);
@@ -1248,7 +1247,7 @@ timeout:
}
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event)
+ const struct ib_cm_event *event)
{
struct ipoib_cm_tx *tx = cm_id->context;
struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
@@ -1305,7 +1304,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_tx *tx;
- tx = kzalloc(sizeof *tx, GFP_ATOMIC);
+ tx = kzalloc(sizeof(*tx), GFP_ATOMIC);
if (!tx)
return NULL;
@@ -1370,7 +1369,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
neigh->daddr + QPN_AND_OPTIONS_OFFSET);
goto free_neigh;
}
- memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
+ memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
@@ -1428,7 +1427,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
struct net_device *dev = priv->dev;
struct sk_buff *skb;
unsigned long flags;
- unsigned mtu = priv->mcast_mtu;
+ unsigned int mtu = priv->mcast_mtu;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
@@ -1518,19 +1517,16 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
{
struct net_device *dev = to_net_dev(d);
int ret;
- struct ipoib_dev_priv *priv = ipoib_priv(dev);
-
- if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
- return -EPERM;
-
- if (!mutex_trylock(&priv->sysfs_mutex))
- return restart_syscall();
if (!rtnl_trylock()) {
- mutex_unlock(&priv->sysfs_mutex);
return restart_syscall();
}
+ if (dev->reg_state != NETREG_REGISTERED) {
+ rtnl_unlock();
+ return -EPERM;
+ }
+
ret = ipoib_set_mode(dev, buf);
/* The assumption is that the function ipoib_set_mode returned
@@ -1539,7 +1535,6 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
*/
if (ret != -EBUSY)
rtnl_unlock();
- mutex_unlock(&priv->sysfs_mutex);
return (!ret || ret == -EBUSY) ? count : ret;
}
@@ -1564,7 +1559,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
if (IS_ERR(priv->cm.srq)) {
- if (PTR_ERR(priv->cm.srq) != -ENOSYS)
+ if (PTR_ERR(priv->cm.srq) != -EOPNOTSUPP)
pr_warn("%s: failed to allocate SRQ, error %ld\n",
priv->ca->name, PTR_ERR(priv->cm.srq));
priv->cm.srq = NULL;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 2706bf26cbac..83429925dfc6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -102,7 +102,7 @@ static int ipoib_set_coalesce(struct net_device *dev,
ret = rdma_set_cq_moderation(priv->recv_cq,
coal->rx_max_coalesced_frames,
coal->rx_coalesce_usecs);
- if (ret && ret != -ENOSYS) {
+ if (ret && ret != -EOPNOTSUPP) {
ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
return ret;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index ea302b054601..178488028734 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -262,15 +262,15 @@ static const struct file_operations ipoib_path_fops = {
void ipoib_create_debug_files(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- char name[IFNAMSIZ + sizeof "_path"];
+ char name[IFNAMSIZ + sizeof("_path")];
- snprintf(name, sizeof name, "%s_mcg", dev->name);
+ snprintf(name, sizeof(name), "%s_mcg", dev->name);
priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
ipoib_root, dev, &ipoib_mcg_fops);
if (!priv->mcg_dentry)
ipoib_warn(priv, "failed to create mcg debug file\n");
- snprintf(name, sizeof name, "%s_path", dev->name);
+ snprintf(name, sizeof(name), "%s_path", dev->name);
priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
ipoib_root, dev, &ipoib_path_fops);
if (!priv->path_dentry)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f47f9ace1f48..9006a13af1de 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -40,6 +40,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <rdma/ib_cache.h>
#include "ipoib.h"
@@ -57,7 +58,7 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ipoib_ah *ah;
struct ib_ah *vah;
- ah = kmalloc(sizeof *ah, GFP_KERNEL);
+ ah = kmalloc(sizeof(*ah), GFP_KERNEL);
if (!ah)
return ERR_PTR(-ENOMEM);
@@ -100,7 +101,6 @@ static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
static int ipoib_ib_post_receive(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- struct ib_recv_wr *bad_wr;
int ret;
priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
@@ -108,7 +108,7 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
- ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
+ ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL);
if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
@@ -202,7 +202,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
}
memcpy(mapping, priv->rx_ring[wr_id].mapping,
- IPOIB_UD_RX_SG * sizeof *mapping);
+ IPOIB_UD_RX_SG * sizeof(*mapping));
/*
* If we can't allocate a new RX buffer, dump
@@ -541,7 +541,6 @@ static inline int post_send(struct ipoib_dev_priv *priv,
struct ipoib_tx_buf *tx_req,
void *head, int hlen)
{
- struct ib_send_wr *bad_wr;
struct sk_buff *skb = tx_req->skb;
ipoib_build_sge(priv, tx_req);
@@ -558,7 +557,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
} else
priv->tx_wr.wr.opcode = IB_WR_SEND;
- return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr);
+ return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL);
}
int ipoib_send(struct net_device *dev, struct sk_buff *skb,
@@ -568,7 +567,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_tx_buf *tx_req;
int hlen, rc;
void *phead;
- unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb);
+ unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
if (skb_is_gso(skb)) {
hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -1069,7 +1068,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
bool ret = false;
netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
- if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
+ if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
return false;
netif_addr_lock_bh(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 26cde95bc0f3..e3d28f9ad9c0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -215,11 +215,6 @@ static int ipoib_stop(struct net_device *dev)
return 0;
}
-static void ipoib_uninit(struct net_device *dev)
-{
- ipoib_dev_cleanup(dev);
-}
-
static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -634,7 +629,7 @@ struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
{
struct ipoib_path_iter *iter;
- iter = kmalloc(sizeof *iter, GFP_KERNEL);
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return NULL;
@@ -770,8 +765,10 @@ static void path_rec_completion(int status,
struct rdma_ah_attr av;
if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
- pathrec, &av))
+ pathrec, &av, NULL)) {
ah = ipoib_create_ah(dev, priv->pd, &av);
+ rdma_destroy_ah_attr(&av);
+ }
}
spin_lock_irqsave(&priv->lock, flags);
@@ -883,7 +880,7 @@ static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
if (!priv->broadcast)
return NULL;
- path = kzalloc(sizeof *path, GFP_ATOMIC);
+ path = kzalloc(sizeof(*path), GFP_ATOMIC);
if (!path)
return NULL;
@@ -1199,11 +1196,13 @@ static void ipoib_timeout(struct net_device *dev)
static int ipoib_hard_header(struct sk_buff *skb,
struct net_device *dev,
unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
+ const void *daddr,
+ const void *saddr,
+ unsigned int len)
{
struct ipoib_header *header;
- header = skb_push(skb, sizeof *header);
+ header = skb_push(skb, sizeof(*header));
header->proto = htons(type);
header->reserved = 0;
@@ -1306,9 +1305,6 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
int i;
LIST_HEAD(remove_list);
- if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- return;
-
spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl,
@@ -1320,9 +1316,6 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
/* neigh is obsolete if it was idle for two GC periods */
dt = 2 * arp_tbl.gc_interval;
neigh_obsolete = jiffies - dt;
- /* handle possible race condition */
- if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- goto out_unlock;
for (i = 0; i < htbl->size; i++) {
struct ipoib_neigh *neigh;
@@ -1360,9 +1353,8 @@ static void ipoib_reap_neigh(struct work_struct *work)
__ipoib_reap_neigh(priv);
- if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- queue_delayed_work(priv->wq, &priv->neigh_reap_task,
- arp_tbl.gc_interval);
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+ arp_tbl.gc_interval);
}
@@ -1371,7 +1363,7 @@ static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
{
struct ipoib_neigh *neigh;
- neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
+ neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC);
if (!neigh)
return NULL;
@@ -1524,9 +1516,8 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
if (!htbl)
return -ENOMEM;
- set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
size = roundup_pow_of_two(arp_tbl.gc_thresh3);
- buckets = kcalloc(size, sizeof(*buckets), GFP_KERNEL);
+ buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
if (!buckets) {
kfree(htbl);
return -ENOMEM;
@@ -1539,7 +1530,6 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
atomic_set(&ntbl->entries, 0);
/* start garbage collection */
- clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
@@ -1554,7 +1544,7 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
struct ipoib_neigh __rcu **buckets = htbl->buckets;
struct ipoib_neigh_table *ntbl = htbl->ntbl;
- kfree(buckets);
+ kvfree(buckets);
kfree(htbl);
complete(&ntbl->deleted);
}
@@ -1649,15 +1639,11 @@ out_unlock:
static void ipoib_neigh_hash_uninit(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- int stopped;
ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
init_completion(&priv->ntbl.deleted);
- /* Stop GC if called at init fail need to cancel work */
- stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- if (!stopped)
- cancel_delayed_work(&priv->neigh_reap_task);
+ cancel_delayed_work_sync(&priv->neigh_reap_task);
ipoib_flush_neighs(priv);
@@ -1755,13 +1741,11 @@ static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd);
}
-int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
+static int ipoib_dev_init(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret = -ENOMEM;
- priv->ca = ca;
- priv->port = port;
priv->qp = NULL;
/*
@@ -1777,7 +1761,7 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
/* create pd, which used both for control and datapath*/
priv->pd = ib_alloc_pd(priv->ca, 0);
if (IS_ERR(priv->pd)) {
- pr_warn("%s: failed to allocate PD\n", ca->name);
+ pr_warn("%s: failed to allocate PD\n", priv->ca->name);
goto clean_wq;
}
@@ -1787,7 +1771,8 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
goto out_free_pd;
}
- if (ipoib_neigh_hash_init(priv) < 0) {
+ ret = ipoib_neigh_hash_init(priv);
+ if (ret) {
pr_warn("%s failed to init neigh hash\n", dev->name);
goto out_dev_uninit;
}
@@ -1796,12 +1781,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
if (ipoib_ib_dev_open(dev)) {
pr_warn("%s failed to open device\n", dev->name);
ret = -ENODEV;
- goto out_dev_uninit;
+ goto out_hash_uninit;
}
}
return 0;
+out_hash_uninit:
+ ipoib_neigh_hash_uninit(dev);
+
out_dev_uninit:
ipoib_ib_dev_cleanup(dev);
@@ -1821,21 +1809,151 @@ out:
return ret;
}
-void ipoib_dev_cleanup(struct net_device *dev)
+/*
+ * This must be called before doing an unregister_netdev on a parent device to
+ * shutdown the IB event handler.
+ */
+static void ipoib_parent_unregister_pre(struct net_device *ndev)
{
- struct ipoib_dev_priv *priv = ipoib_priv(dev), *cpriv, *tcpriv;
- LIST_HEAD(head);
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
- ASSERT_RTNL();
+ /*
+ * ipoib_set_mac checks netif_running before pushing work, clearing
+ * running ensures the it will not add more work.
+ */
+ rtnl_lock();
+ dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
+ rtnl_unlock();
- /* Delete any child interfaces first */
- list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
- /* Stop GC on child */
- set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
- cancel_delayed_work(&cpriv->neigh_reap_task);
- unregister_netdevice_queue(cpriv->dev, &head);
+ /* ipoib_event() cannot be running once this returns */
+ ib_unregister_event_handler(&priv->event_handler);
+
+ /*
+ * Work on the queue grabs the rtnl lock, so this cannot be done while
+ * also holding it.
+ */
+ flush_workqueue(ipoib_workqueue);
+}
+
+static void ipoib_set_dev_features(struct ipoib_dev_priv *priv)
+{
+ priv->hca_caps = priv->ca->attrs.device_cap_flags;
+
+ if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
+ priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
+ if (priv->hca_caps & IB_DEVICE_UD_TSO)
+ priv->dev->hw_features |= NETIF_F_TSO;
+
+ priv->dev->features |= priv->dev->hw_features;
+ }
+}
+
+static int ipoib_parent_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ struct ib_port_attr attr;
+ int result;
+
+ result = ib_query_port(priv->ca, priv->port, &attr);
+ if (result) {
+ pr_warn("%s: ib_query_port %d failed\n", priv->ca->name,
+ priv->port);
+ return result;
+ }
+ priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+
+ result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
+ if (result) {
+ pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
+ priv->ca->name, priv->port, result);
+ return result;
}
- unregister_netdevice_many(&head);
+
+ result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid);
+ if (result) {
+ pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n",
+ priv->ca->name, priv->port, result);
+ return result;
+ }
+ memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
+ sizeof(union ib_gid));
+
+ SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
+ priv->dev->dev_id = priv->port - 1;
+
+ return 0;
+}
+
+static void ipoib_child_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ dev_hold(priv->parent);
+
+ down_write(&ppriv->vlan_rwsem);
+ list_add_tail(&priv->list, &ppriv->child_intfs);
+ up_write(&ppriv->vlan_rwsem);
+
+ priv->max_ib_mtu = ppriv->max_ib_mtu;
+ set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
+ memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
+ memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid));
+}
+
+static int ipoib_ndo_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ int rc;
+
+ if (priv->parent) {
+ ipoib_child_init(ndev);
+ } else {
+ rc = ipoib_parent_init(ndev);
+ if (rc)
+ return rc;
+ }
+
+ /* MTU will be reset when mcast join happens */
+ ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
+ priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
+ ndev->max_mtu = IPOIB_CM_MTU;
+
+ ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
+
+ /*
+ * Set the full membership bit, so that we join the right
+ * broadcast group, etc.
+ */
+ priv->pkey |= 0x8000;
+
+ ndev->broadcast[8] = priv->pkey >> 8;
+ ndev->broadcast[9] = priv->pkey & 0xff;
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+
+ ipoib_set_dev_features(priv);
+
+ rc = ipoib_dev_init(ndev);
+ if (rc) {
+ pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
+ priv->ca->name, priv->dev->name, priv->port, rc);
+ }
+
+ return 0;
+}
+
+static void ipoib_ndo_uninit(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ASSERT_RTNL();
+
+ /*
+ * ipoib_remove_one guarantees the children are removed before the
+ * parent, and that is the only place where a parent can be removed.
+ */
+ WARN_ON(!list_empty(&priv->child_intfs));
ipoib_neigh_hash_uninit(dev);
@@ -1847,6 +1965,16 @@ void ipoib_dev_cleanup(struct net_device *dev)
destroy_workqueue(priv->wq);
priv->wq = NULL;
}
+
+ if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ down_write(&ppriv->vlan_rwsem);
+ list_del(&priv->list);
+ up_write(&ppriv->vlan_rwsem);
+
+ dev_put(priv->parent);
+ }
}
static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
@@ -1894,7 +2022,8 @@ static const struct header_ops ipoib_header_ops = {
};
static const struct net_device_ops ipoib_netdev_ops_pf = {
- .ndo_uninit = ipoib_uninit,
+ .ndo_init = ipoib_ndo_init,
+ .ndo_uninit = ipoib_ndo_uninit,
.ndo_open = ipoib_open,
.ndo_stop = ipoib_stop,
.ndo_change_mtu = ipoib_change_mtu,
@@ -1913,7 +2042,8 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
};
static const struct net_device_ops ipoib_netdev_ops_vf = {
- .ndo_uninit = ipoib_uninit,
+ .ndo_init = ipoib_ndo_init,
+ .ndo_uninit = ipoib_ndo_uninit,
.ndo_open = ipoib_open,
.ndo_stop = ipoib_stop,
.ndo_change_mtu = ipoib_change_mtu,
@@ -1945,6 +2075,13 @@ void ipoib_setup_common(struct net_device *dev)
netif_keep_dst(dev);
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
+
+ /*
+ * unregister_netdev always frees the netdev, we use this mode
+ * consistently to unify all the various unregister paths, including
+ * those connected to rtnl_link_ops which require it.
+ */
+ dev->needs_free_netdev = true;
}
static void ipoib_build_priv(struct net_device *dev)
@@ -1955,7 +2092,6 @@ static void ipoib_build_priv(struct net_device *dev)
spin_lock_init(&priv->lock);
init_rwsem(&priv->vlan_rwsem);
mutex_init(&priv->mcast_mutex);
- mutex_init(&priv->sysfs_mutex);
INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs);
@@ -1999,9 +2135,7 @@ static struct net_device
rn->send = ipoib_send;
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
- rn->free_rdma_netdev = free_netdev;
rn->hca = hca;
-
dev->netdev_ops = &ipoib_netdev_default_pf;
return dev;
@@ -2039,6 +2173,9 @@ struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
if (!priv)
return NULL;
+ priv->ca = hca;
+ priv->port = port;
+
dev = ipoib_get_netdev(hca, port, name);
if (!dev)
goto free_priv;
@@ -2053,6 +2190,15 @@ struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
rn = netdev_priv(dev);
rn->clnt_priv = priv;
+
+ /*
+ * Only the child register_netdev flows can handle priv_destructor
+ * being set, so we force it to NULL here and handle manually until it
+ * is safe to turn on.
+ */
+ priv->next_priv_destructor = dev->priv_destructor;
+ dev->priv_destructor = NULL;
+
ipoib_build_priv(dev);
return priv;
@@ -2061,6 +2207,27 @@ free_priv:
return NULL;
}
+void ipoib_intf_free(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ dev->priv_destructor = priv->next_priv_destructor;
+ if (dev->priv_destructor)
+ dev->priv_destructor(dev);
+
+ /*
+ * There are some error flows around register_netdev failing that may
+ * attempt to call priv_destructor twice, prevent that from happening.
+ */
+ dev->priv_destructor = NULL;
+
+ /* unregister/destroy is very complicated. Make bugs more obvious. */
+ rn->clnt_priv = NULL;
+
+ kfree(priv);
+}
+
static ssize_t show_pkey(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2186,12 +2353,6 @@ static ssize_t create_child(struct device *dev,
if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
return -EINVAL;
- /*
- * Set the full membership bit, so that we join the right
- * broadcast group, etc.
- */
- pkey |= 0x8000;
-
ret = ipoib_vlan_add(to_net_dev(dev), pkey);
return ret ? ret : count;
@@ -2223,87 +2384,19 @@ int ipoib_add_pkey_attr(struct net_device *dev)
return device_create_file(&dev->dev, &dev_attr_pkey);
}
-void ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
-{
- priv->hca_caps = hca->attrs.device_cap_flags;
-
- if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
- priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
-
- if (priv->hca_caps & IB_DEVICE_UD_TSO)
- priv->dev->hw_features |= NETIF_F_TSO;
-
- priv->dev->features |= priv->dev->hw_features;
- }
-}
-
static struct net_device *ipoib_add_port(const char *format,
struct ib_device *hca, u8 port)
{
struct ipoib_dev_priv *priv;
- struct ib_port_attr attr;
- struct rdma_netdev *rn;
- int result = -ENOMEM;
+ struct net_device *ndev;
+ int result;
priv = ipoib_intf_alloc(hca, port, format);
if (!priv) {
pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port);
- goto alloc_mem_failed;
- }
-
- SET_NETDEV_DEV(priv->dev, hca->dev.parent);
- priv->dev->dev_id = port - 1;
-
- result = ib_query_port(hca, port, &attr);
- if (result) {
- pr_warn("%s: ib_query_port %d failed\n", hca->name, port);
- goto device_init_failed;
- }
-
- priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
-
- /* MTU will be reset when mcast join happens */
- priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
- priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
- priv->dev->max_mtu = IPOIB_CM_MTU;
-
- priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
-
- result = ib_query_pkey(hca, port, 0, &priv->pkey);
- if (result) {
- pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
- hca->name, port, result);
- goto device_init_failed;
- }
-
- ipoib_set_dev_features(priv, hca);
-
- /*
- * Set the full membership bit, so that we join the right
- * broadcast group, etc.
- */
- priv->pkey |= 0x8000;
-
- priv->dev->broadcast[8] = priv->pkey >> 8;
- priv->dev->broadcast[9] = priv->pkey & 0xff;
-
- result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL);
- if (result) {
- pr_warn("%s: ib_query_gid port %d failed (ret = %d)\n",
- hca->name, port, result);
- goto device_init_failed;
- }
-
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
- sizeof(union ib_gid));
- set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
-
- result = ipoib_dev_init(priv->dev, hca, port);
- if (result) {
- pr_warn("%s: failed to initialize port %d (ret = %d)\n",
- hca->name, port, result);
- goto device_init_failed;
+ return ERR_PTR(-ENOMEM);
}
+ ndev = priv->dev;
INIT_IB_EVENT_HANDLER(&priv->event_handler,
priv->ca, ipoib_event);
@@ -2312,46 +2405,43 @@ static struct net_device *ipoib_add_port(const char *format,
/* call event handler to ensure pkey in sync */
queue_work(ipoib_workqueue, &priv->flush_heavy);
- result = register_netdev(priv->dev);
+ result = register_netdev(ndev);
if (result) {
pr_warn("%s: couldn't register ipoib port %d; error %d\n",
hca->name, port, result);
- goto register_failed;
+
+ ipoib_parent_unregister_pre(ndev);
+ ipoib_intf_free(ndev);
+ free_netdev(ndev);
+
+ return ERR_PTR(result);
}
- result = -ENOMEM;
- if (ipoib_cm_add_mode_attr(priv->dev))
+ /*
+ * We cannot set priv_destructor before register_netdev because we
+ * need priv to be always valid during the error flow to execute
+ * ipoib_parent_unregister_pre(). Instead handle it manually and only
+ * enter priv_destructor mode once we are completely registered.
+ */
+ ndev->priv_destructor = ipoib_intf_free;
+
+ if (ipoib_cm_add_mode_attr(ndev))
goto sysfs_failed;
- if (ipoib_add_pkey_attr(priv->dev))
+ if (ipoib_add_pkey_attr(ndev))
goto sysfs_failed;
- if (ipoib_add_umcast_attr(priv->dev))
+ if (ipoib_add_umcast_attr(ndev))
goto sysfs_failed;
- if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
+ if (device_create_file(&ndev->dev, &dev_attr_create_child))
goto sysfs_failed;
- if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
+ if (device_create_file(&ndev->dev, &dev_attr_delete_child))
goto sysfs_failed;
- return priv->dev;
+ return ndev;
sysfs_failed:
- unregister_netdev(priv->dev);
-
-register_failed:
- ib_unregister_event_handler(&priv->event_handler);
- flush_workqueue(ipoib_workqueue);
- /* Stop GC if started before flush */
- set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(priv->wq);
- ipoib_dev_cleanup(priv->dev);
-
-device_init_failed:
- rn = netdev_priv(priv->dev);
- rn->free_rdma_netdev(priv->dev);
- kfree(priv);
-
-alloc_mem_failed:
- return ERR_PTR(result);
+ ipoib_parent_unregister_pre(ndev);
+ unregister_netdev(ndev);
+ return ERR_PTR(-ENOMEM);
}
static void ipoib_add_one(struct ib_device *device)
@@ -2362,7 +2452,7 @@ static void ipoib_add_one(struct ib_device *device)
int p;
int count = 0;
- dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
+ dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
if (!dev_list)
return;
@@ -2396,39 +2486,18 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
return;
list_for_each_entry_safe(priv, tmp, dev_list, list) {
- struct rdma_netdev *parent_rn = netdev_priv(priv->dev);
-
- ib_unregister_event_handler(&priv->event_handler);
- flush_workqueue(ipoib_workqueue);
-
- /* mark interface in the middle of destruction */
- set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
+ LIST_HEAD(head);
+ ipoib_parent_unregister_pre(priv->dev);
rtnl_lock();
- dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
- rtnl_unlock();
-
- /* Stop GC */
- set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(priv->wq);
-
- /* Wrap rtnl_lock/unlock with mutex to protect sysfs calls */
- mutex_lock(&priv->sysfs_mutex);
- unregister_netdev(priv->dev);
- mutex_unlock(&priv->sysfs_mutex);
-
- parent_rn->free_rdma_netdev(priv->dev);
- list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
- struct rdma_netdev *child_rn;
+ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
+ list)
+ unregister_netdevice_queue(cpriv->dev, &head);
+ unregister_netdevice_queue(priv->dev, &head);
+ unregister_netdevice_many(&head);
- child_rn = netdev_priv(cpriv->dev);
- child_rn->free_rdma_netdev(cpriv->dev);
- kfree(cpriv);
- }
-
- kfree(priv);
+ rtnl_unlock();
}
kfree(dev_list);
@@ -2476,8 +2545,7 @@ static int __init ipoib_init_module(void)
* its private workqueue, and we only queue up flush events
* on our global flush workqueue. This avoids the deadlocks.
*/
- ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush",
- WQ_MEM_RECLAIM);
+ ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 0);
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 6709328d90f8..b9e9562f5034 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -140,7 +140,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
{
struct ipoib_mcast *mcast;
- mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ mcast = kzalloc(sizeof(*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!mcast)
return NULL;
@@ -822,6 +822,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
+ neigh->ah->valid = 1;
list_add_tail(&neigh->list, &mcast->neigh_list);
}
}
@@ -917,7 +918,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
continue;
- memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
+ memcpy(mgid.raw, ha->addr + 4, sizeof(mgid));
mcast = __ipoib_mcast_find(dev, &mgid);
if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -997,7 +998,7 @@ struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
{
struct ipoib_mcast_iter *iter;
- iter = kmalloc(sizeof *iter, GFP_KERNEL);
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return NULL;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index 3e44087935ae..d4d553a51fa9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -122,15 +122,6 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
} else
child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
- if (child_pkey == 0 || child_pkey == 0x8000)
- return -EINVAL;
-
- /*
- * Set the full membership bit, so that we join the right
- * broadcast group, etc.
- */
- child_pkey |= 0x8000;
-
err = __ipoib_vlan_add(ppriv, ipoib_priv(dev),
child_pkey, IPOIB_RTNL_CHILD);
@@ -139,19 +130,6 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
return err;
}
-static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head)
-{
- struct ipoib_dev_priv *priv, *ppriv;
-
- priv = ipoib_priv(dev);
- ppriv = ipoib_priv(priv->parent);
-
- down_write(&ppriv->vlan_rwsem);
- unregister_netdevice_queue(dev, head);
- list_del(&priv->list);
- up_write(&ppriv->vlan_rwsem);
-}
-
static size_t ipoib_get_size(const struct net_device *dev)
{
return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
@@ -167,7 +145,6 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
.setup = ipoib_setup_common,
.newlink = ipoib_new_child_link,
.changelink = ipoib_changelink,
- .dellink = ipoib_unregister_child_dev,
.get_size = ipoib_get_size,
.fill_info = ipoib_fill_info,
};
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 984a88096f39..9f36ca786df8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -52,7 +52,7 @@ int ipoib_mcast_attach(struct net_device *dev, struct ib_device *hca,
if (set_qkey) {
ret = -ENOMEM;
- qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
+ qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
if (!qp_attr)
goto out;
@@ -147,7 +147,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
.cap = {
.max_send_wr = ipoib_sendq_size,
.max_recv_wr = ipoib_recvq_size,
- .max_send_sge = min_t(u32, priv->ca->attrs.max_sge,
+ .max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
MAX_SKB_FRAGS + 1),
.max_recv_sge = IPOIB_UD_RX_SG
},
@@ -168,8 +168,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
else
size += ipoib_recvq_size * ipoib_max_conn_qp;
} else
- if (ret != -ENOSYS)
- return -ENODEV;
+ if (ret != -EOPNOTSUPP)
+ return ret;
req_vec = (priv->port - 1) * 2;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 55a9b71ed05a..341753fbda54 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -50,68 +50,112 @@ static ssize_t show_parent(struct device *d, struct device_attribute *attr,
}
static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
+static bool is_child_unique(struct ipoib_dev_priv *ppriv,
+ struct ipoib_dev_priv *priv)
+{
+ struct ipoib_dev_priv *tpriv;
+
+ ASSERT_RTNL();
+
+ /*
+ * Since the legacy sysfs interface uses pkey for deletion it cannot
+ * support more than one interface with the same pkey, it creates
+ * ambiguity. The RTNL interface deletes using the netdev so it does
+ * not have a problem to support duplicated pkeys.
+ */
+ if (priv->child_type != IPOIB_LEGACY_CHILD)
+ return true;
+
+ /*
+ * First ensure this isn't a duplicate. We check the parent device and
+ * then all of the legacy child interfaces to make sure the Pkey
+ * doesn't match.
+ */
+ if (ppriv->pkey == priv->pkey)
+ return false;
+
+ list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
+ if (tpriv->pkey == priv->pkey &&
+ tpriv->child_type == IPOIB_LEGACY_CHILD)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * NOTE: If this function fails then the priv->dev will remain valid, however
+ * priv can have been freed and must not be touched by caller in the error
+ * case.
+ *
+ * If (ndev->reg_state == NETREG_UNINITIALIZED) then it is up to the caller to
+ * free the net_device (just as rtnl_newlink does) otherwise the net_device
+ * will be freed when the rtnl is unlocked.
+ */
int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
u16 pkey, int type)
{
+ struct net_device *ndev = priv->dev;
int result;
- priv->max_ib_mtu = ppriv->max_ib_mtu;
- /* MTU will be reset when mcast join happens */
- priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
- priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
- priv->parent = ppriv->dev;
- set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
+ ASSERT_RTNL();
+
+ /*
+ * Racing with unregister of the parent must be prevented by the
+ * caller.
+ */
+ WARN_ON(ppriv->dev->reg_state != NETREG_REGISTERED);
- ipoib_set_dev_features(priv, ppriv->ca);
+ if (pkey == 0 || pkey == 0x8000) {
+ result = -EINVAL;
+ goto out_early;
+ }
+ priv->parent = ppriv->dev;
priv->pkey = pkey;
+ priv->child_type = type;
- memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
- memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid));
- set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
- priv->dev->broadcast[8] = pkey >> 8;
- priv->dev->broadcast[9] = pkey & 0xff;
-
- result = ipoib_dev_init(priv->dev, ppriv->ca, ppriv->port);
- if (result < 0) {
- ipoib_warn(ppriv, "failed to initialize subinterface: "
- "device %s, port %d",
- ppriv->ca->name, ppriv->port);
- goto err;
+ if (!is_child_unique(ppriv, priv)) {
+ result = -ENOTUNIQ;
+ goto out_early;
}
- result = register_netdevice(priv->dev);
+ /* We do not need to touch priv if register_netdevice fails */
+ ndev->priv_destructor = ipoib_intf_free;
+
+ result = register_netdevice(ndev);
if (result) {
ipoib_warn(priv, "failed to initialize; error %i", result);
- goto register_failed;
+
+ /*
+ * register_netdevice sometimes calls priv_destructor,
+ * sometimes not. Make sure it was done.
+ */
+ goto out_early;
}
/* RTNL childs don't need proprietary sysfs entries */
if (type == IPOIB_LEGACY_CHILD) {
- if (ipoib_cm_add_mode_attr(priv->dev))
+ if (ipoib_cm_add_mode_attr(ndev))
goto sysfs_failed;
- if (ipoib_add_pkey_attr(priv->dev))
+ if (ipoib_add_pkey_attr(ndev))
goto sysfs_failed;
- if (ipoib_add_umcast_attr(priv->dev))
+ if (ipoib_add_umcast_attr(ndev))
goto sysfs_failed;
- if (device_create_file(&priv->dev->dev, &dev_attr_parent))
+ if (device_create_file(&ndev->dev, &dev_attr_parent))
goto sysfs_failed;
}
- priv->child_type = type;
- list_add_tail(&priv->list, &ppriv->child_intfs);
-
return 0;
sysfs_failed:
- result = -ENOMEM;
unregister_netdevice(priv->dev);
+ return -ENOMEM;
-register_failed:
- ipoib_dev_cleanup(priv->dev);
-
-err:
+out_early:
+ if (ndev->priv_destructor)
+ ndev->priv_destructor(ndev);
return result;
}
@@ -119,129 +163,124 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv;
char intf_name[IFNAMSIZ];
- struct ipoib_dev_priv *tpriv;
+ struct net_device *ndev;
int result;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- ppriv = ipoib_priv(pdev);
-
- if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
- return -EPERM;
-
- snprintf(intf_name, sizeof intf_name, "%s.%04x",
- ppriv->dev->name, pkey);
-
- if (!mutex_trylock(&ppriv->sysfs_mutex))
+ if (!rtnl_trylock())
return restart_syscall();
- if (!rtnl_trylock()) {
- mutex_unlock(&ppriv->sysfs_mutex);
- return restart_syscall();
- }
-
- if (!down_write_trylock(&ppriv->vlan_rwsem)) {
+ if (pdev->reg_state != NETREG_REGISTERED) {
rtnl_unlock();
- mutex_unlock(&ppriv->sysfs_mutex);
- return restart_syscall();
+ return -EPERM;
}
+ ppriv = ipoib_priv(pdev);
+
+ snprintf(intf_name, sizeof(intf_name), "%s.%04x",
+ ppriv->dev->name, pkey);
+
priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
if (!priv) {
result = -ENOMEM;
goto out;
}
-
- /*
- * First ensure this isn't a duplicate. We check the parent device and
- * then all of the legacy child interfaces to make sure the Pkey
- * doesn't match.
- */
- if (ppriv->pkey == pkey) {
- result = -ENOTUNIQ;
- goto out;
- }
-
- list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
- if (tpriv->pkey == pkey &&
- tpriv->child_type == IPOIB_LEGACY_CHILD) {
- result = -ENOTUNIQ;
- goto out;
- }
- }
+ ndev = priv->dev;
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
+ if (result && ndev->reg_state == NETREG_UNINITIALIZED)
+ free_netdev(ndev);
+
out:
- up_write(&ppriv->vlan_rwsem);
rtnl_unlock();
- mutex_unlock(&ppriv->sysfs_mutex);
- if (result && priv) {
- struct rdma_netdev *rn;
+ return result;
+}
+
+struct ipoib_vlan_delete_work {
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+/*
+ * sysfs callbacks of a netdevice cannot obtain the rtnl lock as
+ * unregister_netdev ultimately deletes the sysfs files while holding the rtnl
+ * lock. This deadlocks the system.
+ *
+ * A callback can use rtnl_trylock to avoid the deadlock but it cannot call
+ * unregister_netdev as that internally takes and releases the rtnl_lock. So
+ * instead we find the netdev to unregister and then do the actual unregister
+ * from the global work queue where we can obtain the rtnl_lock safely.
+ */
+static void ipoib_vlan_delete_task(struct work_struct *work)
+{
+ struct ipoib_vlan_delete_work *pwork =
+ container_of(work, struct ipoib_vlan_delete_work, work);
+ struct net_device *dev = pwork->dev;
+
+ rtnl_lock();
+
+ /* Unregistering tasks can race with another task or parent removal */
+ if (dev->reg_state == NETREG_REGISTERED) {
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
- rn = netdev_priv(priv->dev);
- rn->free_rdma_netdev(priv->dev);
- kfree(priv);
+ ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
+ unregister_netdevice(dev);
}
- return result;
+ rtnl_unlock();
+
+ kfree(pwork);
}
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv, *tpriv;
- struct net_device *dev = NULL;
+ int rc;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- ppriv = ipoib_priv(pdev);
-
- if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
- return -EPERM;
-
- if (!mutex_trylock(&ppriv->sysfs_mutex))
+ if (!rtnl_trylock())
return restart_syscall();
- if (!rtnl_trylock()) {
- mutex_unlock(&ppriv->sysfs_mutex);
- return restart_syscall();
- }
-
- if (!down_write_trylock(&ppriv->vlan_rwsem)) {
+ if (pdev->reg_state != NETREG_REGISTERED) {
rtnl_unlock();
- mutex_unlock(&ppriv->sysfs_mutex);
- return restart_syscall();
+ return -EPERM;
}
+ ppriv = ipoib_priv(pdev);
+
+ rc = -ENODEV;
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
- list_del(&priv->list);
- dev = priv->dev;
+ struct ipoib_vlan_delete_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ down_write(&ppriv->vlan_rwsem);
+ list_del_init(&priv->list);
+ up_write(&ppriv->vlan_rwsem);
+ work->dev = priv->dev;
+ INIT_WORK(&work->work, ipoib_vlan_delete_task);
+ queue_work(ipoib_workqueue, &work->work);
+
+ rc = 0;
break;
}
}
- up_write(&ppriv->vlan_rwsem);
-
- if (dev) {
- ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
- unregister_netdevice(dev);
- }
+out:
rtnl_unlock();
- mutex_unlock(&ppriv->sysfs_mutex);
-
- if (dev) {
- struct rdma_netdev *rn;
-
- rn = netdev_priv(dev);
- rn->free_rdma_netdev(priv->dev);
- kfree(priv);
- return 0;
- }
- return -ENODEV;
+ return rc;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9a6434c31db2..3fecd87c9f2b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -610,12 +610,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
uint32_t initial_cmdsn)
{
struct iscsi_cls_session *cls_session;
- struct iscsi_session *session;
struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
u32 max_fr_sectors;
- u16 max_cmds;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
@@ -633,8 +631,8 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
*/
if (ep) {
iser_conn = ep->dd_data;
- max_cmds = iser_conn->max_cmds;
shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
+ shost->can_queue = min_t(u16, cmds_max, iser_conn->max_cmds);
mutex_lock(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_UP) {
@@ -660,7 +658,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
}
mutex_unlock(&iser_conn->state_mutex);
} else {
- max_cmds = ISER_DEF_XMIT_CMDS_MAX;
+ shost->can_queue = min_t(u16, cmds_max, ISER_DEF_XMIT_CMDS_MAX);
if (iscsi_host_add(shost, NULL))
goto free_host;
}
@@ -676,21 +674,13 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
iser_warn("max_sectors was reduced from %u to %u\n",
iser_max_sectors, shost->max_sectors);
- if (cmds_max > max_cmds) {
- iser_info("cmds_max changed from %u to %u\n",
- cmds_max, max_cmds);
- cmds_max = max_cmds;
- }
-
cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
- cmds_max, 0,
+ shost->can_queue, 0,
sizeof(struct iscsi_iser_task),
initial_cmdsn, 0);
if (!cls_session)
goto remove_host;
- session = cls_session->dd_data;
- shost->can_queue = session->scsi_cmds_max;
return cls_session;
remove_host:
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index ca844a926e6a..009be8889d71 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -311,7 +311,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
- domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
+ domain->sig.dif.ref_tag = t10_pi_ref_tag(sc->request);
/*
* At the moment we hard code those, but in the future
* we will take them from sc.
@@ -405,7 +405,8 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- wr = sig_handover_wr(iser_tx_next_wr(tx_desc));
+ wr = container_of(iser_tx_next_wr(tx_desc), struct ib_sig_handover_wr,
+ wr);
wr->wr.opcode = IB_WR_REG_SIG_MR;
wr->wr.wr_cqe = cqe;
wr->wr.sg_list = &data_reg->sge;
@@ -457,7 +458,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
return n < 0 ? n : -EINVAL;
}
- wr = reg_wr(iser_tx_next_wr(tx_desc));
+ wr = container_of(iser_tx_next_wr(tx_desc), struct ib_reg_wr, wr);
wr->wr.opcode = IB_WR_REG_MR;
wr->wr.wr_cqe = cqe;
wr->wr.send_flags = 0;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 616d978cbf2b..b686a4aaffe8 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1022,7 +1022,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_login_desc *desc = &iser_conn->login_desc;
- struct ib_recv_wr wr, *wr_failed;
+ struct ib_recv_wr wr;
int ib_ret;
desc->sge.addr = desc->rsp_dma;
@@ -1036,7 +1036,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
wr.next = NULL;
ib_conn->post_recv_buf_count++;
- ib_ret = ib_post_recv(ib_conn->qp, &wr, &wr_failed);
+ ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL);
if (ib_ret) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
ib_conn->post_recv_buf_count--;
@@ -1050,7 +1050,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
struct ib_conn *ib_conn = &iser_conn->ib_conn;
unsigned int my_rx_head = iser_conn->rx_desc_head;
struct iser_rx_desc *rx_desc;
- struct ib_recv_wr *wr, *wr_failed;
+ struct ib_recv_wr *wr;
int i, ib_ret;
for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) {
@@ -1067,7 +1067,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
wr->next = NULL; /* mark end of work requests list */
ib_conn->post_recv_buf_count += count;
- ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &wr_failed);
+ ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL);
if (ib_ret) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
ib_conn->post_recv_buf_count -= count;
@@ -1086,7 +1086,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
bool signal)
{
- struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc);
+ struct ib_send_wr *wr = iser_tx_next_wr(tx_desc);
int ib_ret;
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
@@ -1100,10 +1100,10 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
wr->opcode = IB_WR_SEND;
wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
- ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr);
+ ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, NULL);
if (ib_ret)
iser_err("ib_post_send failed, ret:%d opcode:%d\n",
- ib_ret, bad_wr->opcode);
+ ib_ret, wr->opcode);
return ib_ret;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index cccbcf0eb035..f39670c5c25c 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -136,7 +136,7 @@ isert_create_qp(struct isert_conn *isert_conn,
attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
- attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
+ attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge;
attr.cap.max_recv_sge = 1;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
@@ -299,7 +299,8 @@ isert_create_device_ib_res(struct isert_device *device)
struct ib_device *ib_dev = device->ib_device;
int ret;
- isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge);
+ isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n",
+ ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge);
isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
ret = isert_alloc_comps(device);
@@ -809,7 +810,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
static int
isert_post_recvm(struct isert_conn *isert_conn, u32 count)
{
- struct ib_recv_wr *rx_wr, *rx_wr_failed;
+ struct ib_recv_wr *rx_wr;
int i, ret;
struct iser_rx_desc *rx_desc;
@@ -825,8 +826,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
- ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
- &rx_wr_failed);
+ ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL);
if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
@@ -836,7 +836,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
static int
isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
{
- struct ib_recv_wr *rx_wr_failed, rx_wr;
+ struct ib_recv_wr rx_wr;
int ret;
if (!rx_desc->in_use) {
@@ -853,7 +853,7 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
rx_wr.num_sge = 1;
rx_wr.next = NULL;
- ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
+ ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
@@ -864,7 +864,7 @@ static int
isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
{
struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct ib_send_wr send_wr, *send_wr_failed;
+ struct ib_send_wr send_wr;
int ret;
ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
@@ -879,7 +879,7 @@ isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_des
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
+ ret = ib_post_send(isert_conn->qp, &send_wr, NULL);
if (ret)
isert_err("ib_post_send() failed, ret: %d\n", ret);
@@ -967,7 +967,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
static int
isert_login_post_recv(struct isert_conn *isert_conn)
{
- struct ib_recv_wr rx_wr, *rx_wr_fail;
+ struct ib_recv_wr rx_wr;
struct ib_sge sge;
int ret;
@@ -986,7 +986,7 @@ isert_login_post_recv(struct isert_conn *isert_conn)
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
- ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
+ ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
if (ret)
isert_err("ib_post_recv() failed: %d\n", ret);
@@ -1829,7 +1829,6 @@ isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
static int
isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
{
- struct ib_send_wr *wr_failed;
int ret;
ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
@@ -1838,8 +1837,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
return ret;
}
- ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
- &wr_failed);
+ ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL);
if (ret) {
isert_err("ib_post_send failed with %d\n", ret);
return ret;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index 0c8aec62a425..61558788b3fa 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -95,7 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
}
static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
@@ -107,7 +107,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb);
rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
- accel_priv, fallback);
+ sb_dev, fallback);
skb_pull(skb, sizeof(*mdata));
return rc;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 9786b24b956f..444d16520506 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -57,13 +57,10 @@
#define DRV_NAME "ib_srp"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "2.0"
-#define DRV_RELDATE "July 26, 2015"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_INFO(release_date, DRV_RELDATE);
#if !defined(CONFIG_DYNAMIC_DEBUG)
#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
@@ -145,7 +142,8 @@ static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
const char *opname);
-static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event);
static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
@@ -1211,7 +1209,6 @@ static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
u32 rkey)
{
- struct ib_send_wr *bad_wr;
struct ib_send_wr wr = {
.opcode = IB_WR_LOCAL_INV,
.next = NULL,
@@ -1222,7 +1219,7 @@ static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
wr.wr_cqe = &req->reg_cqe;
req->reg_cqe.done = srp_inv_rkey_err_done;
- return ib_post_send(ch->qp, &wr, &bad_wr);
+ return ib_post_send(ch->qp, &wr, NULL);
}
static void srp_unmap_data(struct scsi_cmnd *scmnd,
@@ -1503,7 +1500,6 @@ static int srp_map_finish_fr(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_send_wr *bad_wr;
struct ib_reg_wr wr;
struct srp_fr_desc *desc;
u32 rkey;
@@ -1567,7 +1563,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
srp_map_desc(state, desc->mr->iova,
desc->mr->length, desc->mr->rkey);
- err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
+ err = ib_post_send(ch->qp, &wr.wr, NULL);
if (unlikely(err)) {
WARN_ON_ONCE(err == -ENOMEM);
return err;
@@ -2018,7 +2014,7 @@ static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
{
struct srp_target_port *target = ch->target;
struct ib_sge list;
- struct ib_send_wr wr, *bad_wr;
+ struct ib_send_wr wr;
list.addr = iu->dma;
list.length = len;
@@ -2033,13 +2029,13 @@ static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
- return ib_post_send(ch->qp, &wr, &bad_wr);
+ return ib_post_send(ch->qp, &wr, NULL);
}
static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
{
struct srp_target_port *target = ch->target;
- struct ib_recv_wr wr, *bad_wr;
+ struct ib_recv_wr wr;
struct ib_sge list;
list.addr = iu->dma;
@@ -2053,7 +2049,7 @@ static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
wr.sg_list = &list;
wr.num_sge = 1;
- return ib_post_recv(ch->qp, &wr, &bad_wr);
+ return ib_post_recv(ch->qp, &wr, NULL);
}
static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
@@ -2558,7 +2554,7 @@ error:
}
static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event,
+ const struct ib_cm_event *event,
struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
@@ -2643,7 +2639,8 @@ static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
}
}
-static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
{
struct srp_rdma_ch *ch = cm_id->context;
struct srp_target_port *target = ch->target;
@@ -3843,7 +3840,7 @@ static ssize_t srp_create_target(struct device *dev,
INIT_WORK(&target->tl_err_work, srp_tl_err_work);
INIT_WORK(&target->remove_work, srp_remove_work);
spin_lock_init(&target->lock);
- ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
+ ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
if (ret)
goto out;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 3081c629a7f7..f37cbad022a2 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -575,8 +575,7 @@ static int srpt_refresh_port(struct srpt_port *sport)
sport->sm_lid = port_attr.sm_lid;
sport->lid = port_attr.lid;
- ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid,
- NULL);
+ ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
if (ret)
goto err_query_port;
@@ -720,7 +719,7 @@ static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
&& ioctx_size != sizeof(struct srpt_send_ioctx));
- ring = kmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
+ ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
if (!ring)
goto out;
for (i = 0; i < ring_size; ++i) {
@@ -734,7 +733,7 @@ static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
err:
while (--i >= 0)
srpt_free_ioctx(sdev, ring[i], dma_size, dir);
- kfree(ring);
+ kvfree(ring);
ring = NULL;
out:
return ring;
@@ -759,7 +758,7 @@ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
for (i = 0; i < ring_size; ++i)
srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
- kfree(ioctx_ring);
+ kvfree(ioctx_ring);
}
/**
@@ -817,7 +816,7 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *ioctx)
{
struct ib_sge list;
- struct ib_recv_wr wr, *bad_wr;
+ struct ib_recv_wr wr;
BUG_ON(!sdev);
list.addr = ioctx->ioctx.dma;
@@ -831,9 +830,9 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
wr.num_sge = 1;
if (sdev->use_srq)
- return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+ return ib_post_srq_recv(sdev->srq, &wr, NULL);
else
- return ib_post_recv(ch->qp, &wr, &bad_wr);
+ return ib_post_recv(ch->qp, &wr, NULL);
}
/**
@@ -847,7 +846,6 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
*/
static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
{
- struct ib_send_wr *bad_wr;
struct ib_rdma_wr wr = {
.wr = {
.next = NULL,
@@ -860,7 +858,7 @@ static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
ch->qp->qp_num);
- return ib_post_send(ch->qp, &wr.wr, &bad_wr);
+ return ib_post_send(ch->qp, &wr.wr, NULL);
}
static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1754,13 +1752,15 @@ retry:
*/
qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
qp_init->cap.max_rdma_ctxs = sq_size / 2;
- qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
+ qp_init->cap.max_send_sge = min(attrs->max_send_sge,
+ SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port;
if (sdev->use_srq) {
qp_init->srq = sdev->srq;
} else {
qp_init->cap.max_recv_wr = ch->rq_size;
- qp_init->cap.max_recv_sge = qp_init->cap.max_send_sge;
+ qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
+ SRPT_MAX_SG_PER_WQE);
}
if (ch->using_rdma_cm) {
@@ -1833,8 +1833,7 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch)
int ret;
if (!srpt_set_ch_state(ch, CH_DRAINING)) {
- pr_debug("%s-%d: already closed\n", ch->sess_name,
- ch->qp->qp_num);
+ pr_debug("%s: already closed\n", ch->sess_name);
return false;
}
@@ -1940,8 +1939,8 @@ static void __srpt_close_all_ch(struct srpt_port *sport)
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
if (srpt_disconnect_ch(ch) >= 0)
- pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
- ch->sess_name, ch->qp->qp_num,
+ pr_info("Closing channel %s because target %s_%d has been disabled\n",
+ ch->sess_name,
sport->sdev->device->name, sport->port);
srpt_close_ch(ch);
}
@@ -2029,8 +2028,7 @@ static void srpt_release_channel_work(struct work_struct *w)
target_sess_cmd_list_set_waiting(se_sess);
target_wait_for_sess_cmds(se_sess);
- transport_deregister_session_configfs(se_sess);
- transport_deregister_session(se_sess);
+ target_remove_session(se_sess);
ch->sess = NULL;
if (ch->using_rdma_cm)
@@ -2087,7 +2085,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
struct rdma_conn_param rdma_cm;
struct ib_cm_rep_param ib_cm;
} *rep_param = NULL;
- struct srpt_rdma_ch *ch;
+ struct srpt_rdma_ch *ch = NULL;
char i_port_id[36];
u32 it_iu_len;
int i, ret;
@@ -2221,26 +2219,28 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
pr_debug("registering session %s\n", ch->sess_name);
if (sport->port_guid_tpg.se_tpg_wwn)
- ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0,
+ ch->sess = target_setup_session(&sport->port_guid_tpg, 0, 0,
TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
+ ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
TARGET_PROT_NORMAL, i_port_id, ch,
NULL);
/* Retry without leading "0x" */
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
+ ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
if (IS_ERR_OR_NULL(ch->sess)) {
+ WARN_ON_ONCE(ch->sess == NULL);
ret = PTR_ERR(ch->sess);
+ ch->sess = NULL;
pr_info("Rejected login for initiator %s: ret = %d.\n",
ch->sess_name, ret);
rej->reason = cpu_to_be32(ret == -ENOMEM ?
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
- goto reject;
+ goto destroy_ib;
}
mutex_lock(&sport->mutex);
@@ -2279,7 +2279,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
ret);
- goto destroy_ib;
+ goto reject;
}
pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
@@ -2358,8 +2358,11 @@ free_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
ch->max_rsp_size, DMA_TO_DEVICE);
+
free_ch:
- if (ib_cm_id)
+ if (rdma_cm_id)
+ rdma_cm_id->context = NULL;
+ else
ib_cm_id->context = NULL;
kfree(ch);
ch = NULL;
@@ -2379,6 +2382,15 @@ reject:
ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
rej, sizeof(*rej));
+ if (ch && ch->sess) {
+ srpt_close_ch(ch);
+ /*
+ * Tell the caller not to free cm_id since
+ * srpt_release_channel_work() will do that.
+ */
+ ret = 0;
+ }
+
out:
kfree(rep_param);
kfree(rsp);
@@ -2388,7 +2400,7 @@ out:
}
static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
- struct ib_cm_req_event_param *param,
+ const struct ib_cm_req_event_param *param,
void *private_data)
{
char sguid[40];
@@ -2500,7 +2512,8 @@ static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
* a non-zero value in any other case will trigger a race with the
* ib_destroy_cm_id() call in srpt_release_channel().
*/
-static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int srpt_cm_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
{
struct srpt_rdma_ch *ch = cm_id->context;
int ret;
@@ -2610,7 +2623,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx =
container_of(se_cmd, struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
- struct ib_send_wr *first_wr = NULL, *bad_wr;
+ struct ib_send_wr *first_wr = NULL;
struct ib_cqe *cqe = &ioctx->rdma_cqe;
enum srpt_command_state new_state;
int ret, i;
@@ -2634,7 +2647,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd)
cqe = NULL;
}
- ret = ib_post_send(ch->qp, first_wr, &bad_wr);
+ ret = ib_post_send(ch->qp, first_wr, NULL);
if (ret) {
pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
__func__, ret, ioctx->n_rdma,
@@ -2672,7 +2685,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
container_of(cmd, struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
struct srpt_device *sdev = ch->sport->sdev;
- struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr;
+ struct ib_send_wr send_wr, *first_wr = &send_wr;
struct ib_sge sge;
enum srpt_command_state state;
int resp_len, ret, i;
@@ -2745,7 +2758,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- ret = ib_post_send(ch->qp, first_wr, &bad_wr);
+ ret = ib_post_send(ch->qp, first_wr, NULL);
if (ret < 0) {
pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
__func__, ioctx->cmd.tag, ret);
@@ -2969,7 +2982,8 @@ static void srpt_add_one(struct ib_device *device)
pr_debug("device = %p\n", device);
- sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
+ GFP_KERNEL);
if (!sdev)
goto err;
@@ -3023,8 +3037,6 @@ static void srpt_add_one(struct ib_device *device)
srpt_event_handler);
ib_register_event_handler(&sdev->event_handler);
- WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
-
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
INIT_LIST_HEAD(&sport->nexus_list);
@@ -3597,11 +3609,9 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
/**
* srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
* @wwn: Corresponds to $driver/$port.
- * @group: Not used.
* @name: $tpg.
*/
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
const char *name)
{
struct srpt_port *sport = wwn->priv;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 2361483476a0..444dfd7281b5 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -396,9 +396,9 @@ struct srpt_port {
* @sdev_mutex: Serializes use_srq changes.
* @use_srq: Whether or not to use SRQ.
* @ioctx_ring: Per-HCA SRQ.
- * @port: Information about the ports owned by this HCA.
* @event_handler: Per-HCA asynchronous IB event handler.
* @list: Node in srpt_dev_list.
+ * @port: Information about the ports owned by this HCA.
*/
struct srpt_device {
struct ib_device *device;
@@ -410,9 +410,9 @@ struct srpt_device {
struct mutex sdev_mutex;
bool use_srq;
struct srpt_recv_ioctx **ioctx_ring;
- struct srpt_port port[2];
struct ib_event_handler event_handler;
struct list_head list;
+ struct srpt_port port[];
};
#endif /* IB_SRPT_H */
diff --git a/drivers/input/evbug.c b/drivers/input/evbug.c
index cd4e6679d61a..5419c1c1f621 100644
--- a/drivers/input/evbug.c
+++ b/drivers/input/evbug.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index c81c79d01d93..370206f987f9 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -481,7 +481,7 @@ static int evdev_release(struct inode *inode, struct file *file)
evdev_detach_client(evdev, client);
for (i = 0; i < EV_CNT; ++i)
- kfree(client->evmasks[i]);
+ bitmap_free(client->evmasks[i]);
kvfree(client);
@@ -925,17 +925,15 @@ static int evdev_handle_get_val(struct evdev_client *client,
{
int ret;
unsigned long *mem;
- size_t len;
- len = BITS_TO_LONGS(maxbit) * sizeof(unsigned long);
- mem = kmalloc(len, GFP_KERNEL);
+ mem = bitmap_alloc(maxbit, GFP_KERNEL);
if (!mem)
return -ENOMEM;
spin_lock_irq(&dev->event_lock);
spin_lock(&client->buffer_lock);
- memcpy(mem, bits, len);
+ bitmap_copy(mem, bits, maxbit);
spin_unlock(&dev->event_lock);
@@ -947,7 +945,7 @@ static int evdev_handle_get_val(struct evdev_client *client,
if (ret < 0)
evdev_queue_syn_dropped(client);
- kfree(mem);
+ bitmap_free(mem);
return ret;
}
@@ -1003,13 +1001,13 @@ static int evdev_set_mask(struct evdev_client *client,
if (!cnt)
return 0;
- mask = kcalloc(sizeof(unsigned long), BITS_TO_LONGS(cnt), GFP_KERNEL);
+ mask = bitmap_zalloc(cnt, GFP_KERNEL);
if (!mask)
return -ENOMEM;
error = bits_from_user(mask, cnt - 1, codes_size, codes, compat);
if (error < 0) {
- kfree(mask);
+ bitmap_free(mask);
return error;
}
@@ -1018,7 +1016,7 @@ static int evdev_set_mask(struct evdev_client *client,
client->evmasks[type] = mask;
spin_unlock_irqrestore(&client->buffer_lock, flags);
- kfree(oldmask);
+ bitmap_free(oldmask);
return 0;
}
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index 2909e9561cf3..afdc20ca0e24 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <asm/io.h>
diff --git a/drivers/input/gameport/lightning.c b/drivers/input/gameport/lightning.c
index 85d6ee09f11f..c6e74c7945cb 100644
--- a/drivers/input/gameport/lightning.c
+++ b/drivers/input/gameport/lightning.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <asm/io.h>
diff --git a/drivers/input/gameport/ns558.c b/drivers/input/gameport/ns558.c
index 7c217848613e..6437645858f9 100644
--- a/drivers/input/gameport/ns558.c
+++ b/drivers/input/gameport/ns558.c
@@ -21,10 +21,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <asm/io.h>
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index cf30523c6ef6..6c7326c93721 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -131,8 +131,10 @@ EXPORT_SYMBOL(input_mt_destroy_slots);
* inactive, or if the tool type is changed, a new tracking id is
* assigned to the slot. The tool type is only reported if the
* corresponding absbit field is set.
+ *
+ * Returns true if contact is active.
*/
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active)
{
struct input_mt *mt = dev->mt;
@@ -140,22 +142,24 @@ void input_mt_report_slot_state(struct input_dev *dev,
int id;
if (!mt)
- return;
+ return false;
slot = &mt->slots[mt->slot];
slot->frame = mt->frame;
if (!active) {
input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
- return;
+ return false;
}
id = input_mt_get_value(slot, ABS_MT_TRACKING_ID);
- if (id < 0 || input_mt_get_value(slot, ABS_MT_TOOL_TYPE) != tool_type)
+ if (id < 0)
id = input_mt_new_trkid(mt);
input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id);
input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type);
+
+ return true;
}
EXPORT_SYMBOL(input_mt_report_slot_state);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 6365c1958264..3304aaaffe87 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -480,11 +480,19 @@ EXPORT_SYMBOL(input_inject_event);
*/
void input_alloc_absinfo(struct input_dev *dev)
{
- if (!dev->absinfo)
- dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo),
- GFP_KERNEL);
+ if (dev->absinfo)
+ return;
- WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__);
+ dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
+ if (!dev->absinfo) {
+ dev_err(dev->dev.parent ?: &dev->dev,
+ "%s: unable to allocate memory\n", __func__);
+ /*
+ * We will handle this allocation failure in
+ * input_register_device() when we refuse to register input
+ * device with ABS bits but without absinfo.
+ */
+ }
}
EXPORT_SYMBOL(input_alloc_absinfo);
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index 55efdfc7eb62..98307039a534 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index 15a71acb6997..f466c0d34247 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index c65b5fa69f1e..2b82a838c511 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/types.h>
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index c79dbcb4d146..2b445c8d3fcd 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index ae3ee24a2368..14cb956beac4 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index de0dd4756c84..804b1b80a8be 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
@@ -263,6 +259,7 @@ static unsigned char db9_saturn_read_packet(struct parport *port, unsigned char
db9_saturn_write_sub(port, type, 3, powered, 0);
return data[0] = 0xe3;
}
+ /* else: fall through */
default:
return data[0];
}
@@ -282,11 +279,14 @@ static int db9_saturn_report(unsigned char id, unsigned char data[60], struct in
switch (data[j]) {
case 0x16: /* multi controller (analog 4 axis) */
input_report_abs(dev, db9_abs[5], data[j + 6]);
+ /* fall through */
case 0x15: /* mission stick (analog 3 axis) */
input_report_abs(dev, db9_abs[3], data[j + 4]);
input_report_abs(dev, db9_abs[4], data[j + 5]);
+ /* fall through */
case 0x13: /* racing controller (analog 1 axis) */
input_report_abs(dev, db9_abs[2], data[j + 3]);
+ /* fall through */
case 0x34: /* saturn keyboard (udlr ZXC ASD QE Esc) */
case 0x02: /* digital pad (digital 2 axis + buttons) */
input_report_abs(dev, db9_abs[0], !(data[j + 1] & 128) - !(data[j + 1] & 64));
@@ -380,6 +380,7 @@ static void db9_timer(struct timer_list *t)
input_report_abs(dev2, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev2, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev2, BTN_TRIGGER, ~data & DB9_FIRE1);
+ /* fall through */
case DB9_MULTI_0802:
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 4e10ffdf8a36..d62e73dd9f7f 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -24,10 +24,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 0f519db64748..50a60065ab14 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index eac9c5b8d73e..e10395ba62bc 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index a9ac2f9cfce0..43ff817d80ac 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/iforce/iforce-ff.c b/drivers/input/joystick/iforce/iforce-ff.c
index 0de9a0943a9e..3536d5f5ad18 100644
--- a/drivers/input/joystick/iforce/iforce-ff.c
+++ b/drivers/input/joystick/iforce/iforce-ff.c
@@ -19,10 +19,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include "iforce.h"
@@ -56,7 +52,7 @@ static int make_magnitude_modifier(struct iforce* iforce,
iforce_send_packet(iforce, FF_CMD_MAGNITUDE, data);
- iforce_dump_packet("magnitude: ", FF_CMD_MAGNITUDE, data);
+ iforce_dump_packet(iforce, "magnitude", FF_CMD_MAGNITUDE, data);
return 0;
}
@@ -178,7 +174,7 @@ static int make_condition_modifier(struct iforce* iforce,
data[9] = (100 * lsat) >> 16;
iforce_send_packet(iforce, FF_CMD_CONDITION, data);
- iforce_dump_packet("condition", FF_CMD_CONDITION, data);
+ iforce_dump_packet(iforce, "condition", FF_CMD_CONDITION, data);
return 0;
}
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index daeeb4c7e3b0..58d5cfe46526 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -19,10 +19,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include "iforce.h"
@@ -33,21 +29,14 @@ MODULE_LICENSE("GPL");
static signed short btn_joystick[] =
{ BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE,
- BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, BTN_A, BTN_B, BTN_C, -1 };
-
-static signed short btn_avb_pegasus[] =
-{ BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE,
- BTN_BASE2, BTN_BASE3, BTN_BASE4, -1 };
+ BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, BTN_A,
+ BTN_B, BTN_C, BTN_DEAD, -1 };
-static signed short btn_wheel[] =
-{ BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE,
- BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, BTN_A, BTN_B, BTN_C, -1 };
-
-static signed short btn_avb_tw[] =
+static signed short btn_joystick_avb[] =
{ BTN_TRIGGER, BTN_THUMB, BTN_TOP, BTN_TOP2, BTN_BASE,
- BTN_BASE2, BTN_BASE3, BTN_BASE4, -1 };
+ BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_DEAD, -1 };
-static signed short btn_avb_wheel[] =
+static signed short btn_wheel[] =
{ BTN_GEAR_DOWN, BTN_GEAR_UP, BTN_BASE, BTN_BASE2, BTN_BASE3,
BTN_BASE4, BTN_BASE5, BTN_BASE6, -1 };
@@ -73,9 +62,9 @@ static struct iforce_device iforce_device[] = {
{ 0x044f, 0xa01c, "Thrustmaster Motor Sport GT", btn_wheel, abs_wheel, ff_iforce },
{ 0x046d, 0xc281, "Logitech WingMan Force", btn_joystick, abs_joystick, ff_iforce },
{ 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce },
- { 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_avb_pegasus, abs_avb_pegasus, ff_iforce },
- { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_avb_wheel, abs_wheel, ff_iforce },
- { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_avb_tw, abs_wheel, ff_iforce }, //?
+ { 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce },
+ { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce },
+ { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
{ 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //?
@@ -360,7 +349,7 @@ int iforce_init_device(struct iforce *iforce)
for (i = 0; c[i]; i++)
if (!iforce_get_id_packet(iforce, c + i))
- iforce_dump_packet("info", iforce->ecmd, iforce->edata);
+ iforce_dump_packet(iforce, "info", iforce->ecmd, iforce->edata);
/*
* Disable spring, enable force feedback.
@@ -388,7 +377,6 @@ int iforce_init_device(struct iforce *iforce)
for (i = 0; iforce->type->btn[i] >= 0; i++)
set_bit(iforce->type->btn[i], input_dev->keybit);
- set_bit(BTN_DEAD, input_dev->keybit);
for (i = 0; iforce->type->abs[i] >= 0; i++) {
diff --git a/drivers/input/joystick/iforce/iforce-packets.c b/drivers/input/joystick/iforce/iforce-packets.c
index 08f98f2eaf88..c10169f4554e 100644
--- a/drivers/input/joystick/iforce/iforce-packets.c
+++ b/drivers/input/joystick/iforce/iforce-packets.c
@@ -19,10 +19,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include "iforce.h"
@@ -33,14 +29,10 @@ static struct {
} iforce_hat_to_axis[16] = {{ 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, 0}, {-1,-1}};
-void iforce_dump_packet(char *msg, u16 cmd, unsigned char *data)
+void iforce_dump_packet(struct iforce *iforce, char *msg, u16 cmd, unsigned char *data)
{
- int i;
-
- printk(KERN_DEBUG __FILE__ ": %s cmd = %04x, data = ", msg, cmd);
- for (i = 0; i < LO(cmd); i++)
- printk("%02x ", data[i]);
- printk("\n");
+ dev_dbg(iforce->dev->dev.parent, "%s %s cmd = %04x, data = %*ph\n",
+ __func__, msg, cmd, LO(cmd), data);
}
/*
@@ -255,7 +247,7 @@ int iforce_get_id_packet(struct iforce *iforce, char *packet)
iforce->cr.bRequest = packet[0];
iforce->ctrl->dev = iforce->usbdev;
- status = usb_submit_urb(iforce->ctrl, GFP_ATOMIC);
+ status = usb_submit_urb(iforce->ctrl, GFP_KERNEL);
if (status) {
dev_err(&iforce->intf->dev,
"usb_submit_urb failed %d\n", status);
diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
index 154e827b559b..f4ba4a751fe0 100644
--- a/drivers/input/joystick/iforce/iforce-serio.c
+++ b/drivers/input/joystick/iforce/iforce-serio.c
@@ -19,10 +19,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include "iforce.h"
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index e8724f1a4a25..78073259c9a1 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -19,10 +19,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include "iforce.h"
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index 96ae4f5bd0eb..0e9d01f8bcb6 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -19,10 +19,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
@@ -158,7 +154,7 @@ int iforce_init_device(struct iforce *iforce);
int iforce_control_playback(struct iforce*, u16 id, unsigned int);
void iforce_process_packet(struct iforce *iforce, u16 cmd, unsigned char *data);
int iforce_send_packet(struct iforce *iforce, u16 cmd, unsigned char* data);
-void iforce_dump_packet(char *msg, u16 cmd, unsigned char *data) ;
+void iforce_dump_packet(struct iforce *iforce, char *msg, u16 cmd, unsigned char *data);
int iforce_get_id_packet(struct iforce *iforce, char *packet);
/* iforce-ff.c */
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index 17c2c800743c..598788b3da62 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/joydump.c b/drivers/input/joystick/joydump.c
index 7f4dff9a566f..344ab44ff581 100644
--- a/drivers/input/joystick/joydump.c
+++ b/drivers/input/joystick/joydump.c
@@ -21,10 +21,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index a9d0e3edca94..95a34ab34fc3 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/pxrc.c b/drivers/input/joystick/pxrc.c
index 07a0dbd3ced2..ea2bf5951d67 100644
--- a/drivers/input/joystick/pxrc.c
+++ b/drivers/input/joystick/pxrc.c
@@ -3,7 +3,6 @@
* Driver for Phoenix RC Flight Controller Adapter
*
* Copyright (C) 2018 Marcus Folkesson <marcus.folkesson@gmail.com>
- *
*/
#include <linux/kernel.h>
@@ -16,31 +15,22 @@
#include <linux/mutex.h>
#include <linux/input.h>
-#define PXRC_VENDOR_ID (0x1781)
-#define PXRC_PRODUCT_ID (0x0898)
-
-static const struct usb_device_id pxrc_table[] = {
- { USB_DEVICE(PXRC_VENDOR_ID, PXRC_PRODUCT_ID) },
- { }
-};
-MODULE_DEVICE_TABLE(usb, pxrc_table);
+#define PXRC_VENDOR_ID 0x1781
+#define PXRC_PRODUCT_ID 0x0898
struct pxrc {
struct input_dev *input;
- struct usb_device *udev;
struct usb_interface *intf;
struct urb *urb;
struct mutex pm_mutex;
bool is_open;
- __u8 epaddr;
char phys[64];
- unsigned char *data;
- size_t bsize;
};
static void pxrc_usb_irq(struct urb *urb)
{
struct pxrc *pxrc = urb->context;
+ u8 *data = urb->transfer_buffer;
int error;
switch (urb->status) {
@@ -68,15 +58,15 @@ static void pxrc_usb_irq(struct urb *urb)
}
if (urb->actual_length == 8) {
- input_report_abs(pxrc->input, ABS_X, pxrc->data[0]);
- input_report_abs(pxrc->input, ABS_Y, pxrc->data[2]);
- input_report_abs(pxrc->input, ABS_RX, pxrc->data[3]);
- input_report_abs(pxrc->input, ABS_RY, pxrc->data[4]);
- input_report_abs(pxrc->input, ABS_RUDDER, pxrc->data[5]);
- input_report_abs(pxrc->input, ABS_THROTTLE, pxrc->data[6]);
- input_report_abs(pxrc->input, ABS_MISC, pxrc->data[7]);
-
- input_report_key(pxrc->input, BTN_A, pxrc->data[1]);
+ input_report_abs(pxrc->input, ABS_X, data[0]);
+ input_report_abs(pxrc->input, ABS_Y, data[2]);
+ input_report_abs(pxrc->input, ABS_RX, data[3]);
+ input_report_abs(pxrc->input, ABS_RY, data[4]);
+ input_report_abs(pxrc->input, ABS_RUDDER, data[5]);
+ input_report_abs(pxrc->input, ABS_THROTTLE, data[6]);
+ input_report_abs(pxrc->input, ABS_MISC, data[7]);
+
+ input_report_key(pxrc->input, BTN_A, data[1]);
}
exit:
@@ -120,61 +110,73 @@ static void pxrc_close(struct input_dev *input)
mutex_unlock(&pxrc->pm_mutex);
}
-static int pxrc_usb_init(struct pxrc *pxrc)
+static void pxrc_free_urb(void *_pxrc)
{
+ struct pxrc *pxrc = _pxrc;
+
+ usb_free_urb(pxrc->urb);
+}
+
+static int pxrc_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct pxrc *pxrc;
struct usb_endpoint_descriptor *epirq;
- unsigned int pipe;
- int retval;
+ size_t xfer_size;
+ void *xfer_buf;
+ int error;
- /* Set up the endpoint information */
- /* This device only has an interrupt endpoint */
- retval = usb_find_common_endpoints(pxrc->intf->cur_altsetting,
- NULL, NULL, &epirq, NULL);
- if (retval) {
- dev_err(&pxrc->intf->dev,
- "Could not find endpoint\n");
- goto error;
+ /*
+ * Locate the endpoint information. This device only has an
+ * interrupt endpoint.
+ */
+ error = usb_find_common_endpoints(intf->cur_altsetting,
+ NULL, NULL, &epirq, NULL);
+ if (error) {
+ dev_err(&intf->dev, "Could not find endpoint\n");
+ return error;
}
- pxrc->bsize = usb_endpoint_maxp(epirq);
- pxrc->epaddr = epirq->bEndpointAddress;
- pxrc->data = devm_kmalloc(&pxrc->intf->dev, pxrc->bsize, GFP_KERNEL);
- if (!pxrc->data) {
- retval = -ENOMEM;
- goto error;
- }
+ pxrc = devm_kzalloc(&intf->dev, sizeof(*pxrc), GFP_KERNEL);
+ if (!pxrc)
+ return -ENOMEM;
- usb_set_intfdata(pxrc->intf, pxrc);
- usb_make_path(pxrc->udev, pxrc->phys, sizeof(pxrc->phys));
- strlcat(pxrc->phys, "/input0", sizeof(pxrc->phys));
+ mutex_init(&pxrc->pm_mutex);
+ pxrc->intf = intf;
- pxrc->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!pxrc->urb) {
- retval = -ENOMEM;
- goto error;
- }
+ usb_set_intfdata(pxrc->intf, pxrc);
- pipe = usb_rcvintpipe(pxrc->udev, pxrc->epaddr),
- usb_fill_int_urb(pxrc->urb, pxrc->udev, pipe, pxrc->data, pxrc->bsize,
- pxrc_usb_irq, pxrc, 1);
+ xfer_size = usb_endpoint_maxp(epirq);
+ xfer_buf = devm_kmalloc(&intf->dev, xfer_size, GFP_KERNEL);
+ if (!xfer_buf)
+ return -ENOMEM;
-error:
- return retval;
+ pxrc->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!pxrc->urb)
+ return -ENOMEM;
+ error = devm_add_action_or_reset(&intf->dev, pxrc_free_urb, pxrc);
+ if (error)
+ return error;
-}
+ usb_fill_int_urb(pxrc->urb, udev,
+ usb_rcvintpipe(udev, epirq->bEndpointAddress),
+ xfer_buf, xfer_size, pxrc_usb_irq, pxrc, 1);
-static int pxrc_input_init(struct pxrc *pxrc)
-{
- pxrc->input = devm_input_allocate_device(&pxrc->intf->dev);
- if (pxrc->input == NULL) {
- dev_err(&pxrc->intf->dev, "couldn't allocate input device\n");
+ pxrc->input = devm_input_allocate_device(&intf->dev);
+ if (!pxrc->input) {
+ dev_err(&intf->dev, "couldn't allocate input device\n");
return -ENOMEM;
}
pxrc->input->name = "PXRC Flight Controller Adapter";
+
+ usb_make_path(udev, pxrc->phys, sizeof(pxrc->phys));
+ strlcat(pxrc->phys, "/input0", sizeof(pxrc->phys));
pxrc->input->phys = pxrc->phys;
- usb_to_input_id(pxrc->udev, &pxrc->input->id);
+
+ usb_to_input_id(udev, &pxrc->input->id);
pxrc->input->open = pxrc_open;
pxrc->input->close = pxrc_close;
@@ -190,46 +192,16 @@ static int pxrc_input_init(struct pxrc *pxrc)
input_set_drvdata(pxrc->input, pxrc);
- return input_register_device(pxrc->input);
-}
-
-static int pxrc_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct pxrc *pxrc;
- int retval;
-
- pxrc = devm_kzalloc(&intf->dev, sizeof(*pxrc), GFP_KERNEL);
- if (!pxrc)
- return -ENOMEM;
-
- mutex_init(&pxrc->pm_mutex);
- pxrc->udev = usb_get_dev(interface_to_usbdev(intf));
- pxrc->intf = intf;
-
- retval = pxrc_usb_init(pxrc);
- if (retval)
- goto error;
-
- retval = pxrc_input_init(pxrc);
- if (retval)
- goto err_free_urb;
+ error = input_register_device(pxrc->input);
+ if (error)
+ return error;
return 0;
-
-err_free_urb:
- usb_free_urb(pxrc->urb);
-
-error:
- return retval;
}
static void pxrc_disconnect(struct usb_interface *intf)
{
- struct pxrc *pxrc = usb_get_intfdata(intf);
-
- usb_free_urb(pxrc->urb);
- usb_set_intfdata(intf, NULL);
+ /* All driver resources are devm-managed. */
}
static int pxrc_suspend(struct usb_interface *intf, pm_message_t message)
@@ -284,6 +256,12 @@ static int pxrc_reset_resume(struct usb_interface *intf)
return pxrc_resume(intf);
}
+static const struct usb_device_id pxrc_table[] = {
+ { USB_DEVICE(PXRC_VENDOR_ID, PXRC_PRODUCT_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, pxrc_table);
+
static struct usb_driver pxrc_driver = {
.name = "pxrc",
.probe = pxrc_probe,
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 5e602a6852b7..f46bf4d41972 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index bb3faeff8cac..ffb9c1f495b6 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -24,10 +24,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index 05da0ed514e2..20540ee71d7f 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c
index cb10e7b097ae..ba8579435d6c 100644
--- a/drivers/input/joystick/stinger.c
+++ b/drivers/input/joystick/stinger.c
@@ -21,10 +21,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index 7e17cde464f0..6f4a01cfe79f 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index e2685753e460..bf2f9925e416 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c
index ef5391ba4470..b60cab168e2a 100644
--- a/drivers/input/joystick/warrior.c
+++ b/drivers/input/joystick/warrior.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 48e36acbeb49..cd620e009bad 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -125,7 +125,7 @@ static const struct xpad_device {
u8 mapping;
u8 xtype;
} xpad_device[] = {
- { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
+ { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 6bd97ffee761..4713957b0cbb 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -721,7 +721,7 @@ config KEYBOARD_CROS_EC
help
Say Y here to enable the matrix keyboard used by ChromeOS devices
and implemented on the ChromeOS EC. You must enable one bus option
- (MFD_CROS_EC_I2C or MFD_CROS_EC_SPI) to use this.
+ (CROS_EC_I2C or CROS_EC_SPI) to use this.
To compile this driver as a module, choose M here: the
module will be called cros_ec_keyb.
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 32d94c63dc33..2835fba71c33 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -885,6 +885,7 @@ static int adp5589_probe(struct i2c_client *client,
switch (id->driver_data) {
case ADP5585_02:
kpad->support_row5 = true;
+ /* fall through */
case ADP5585_01:
kpad->is_adp5585 = true;
kpad->var = &const_adp5585;
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index e04a3b4e55d6..420e33c49e58 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index f1235831283d..6f62da2909ec 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -34,10 +34,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 489ddd37bd4e..81be6f781f0b 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -1,25 +1,15 @@
-/*
- * ChromeOS EC keyboard driver
- *
- * Copyright (C) 2012 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
- */
+// SPDX-License-Identifier: GPL-2.0
+// ChromeOS EC keyboard driver
+//
+// Copyright (C) 2012 Google, Inc.
+//
+// This driver uses the ChromeOS EC byte-level message-based protocol for
+// communicating the keyboard state (which keys are pressed) from a keyboard EC
+// to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
+// but everything else (including deghosting) is done here. The main
+// motivation for this is to keep the EC firmware as simple as possible, since
+// it cannot be easily upgraded and EC flash/IRAM space is relatively
+// expensive.
#include <linux/module.h>
#include <linux/bitops.h>
@@ -170,9 +160,6 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
int col, row;
int new_state;
int old_state;
- int num_cols;
-
- num_cols = len;
if (ckdev->ghost_filter && cros_ec_keyb_has_ghosting(ckdev, kb_state)) {
/*
@@ -242,19 +229,17 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
u32 val;
unsigned int ev_type;
+ /*
+ * If not wake enabled, discard key state changes during
+ * suspend. Switches will be re-checked in
+ * cros_ec_keyb_resume() to be sure nothing is lost.
+ */
+ if (queued_during_suspend && !device_may_wakeup(ckdev->dev))
+ return NOTIFY_OK;
+
switch (ckdev->ec->event_data.event_type) {
case EC_MKBP_EVENT_KEY_MATRIX:
- if (device_may_wakeup(ckdev->dev)) {
- pm_wakeup_event(ckdev->dev, 0);
- } else {
- /*
- * If keyboard is not wake enabled, discard key state
- * changes during suspend. Switches will be re-checked
- * in cros_ec_keyb_resume() to be sure nothing is lost.
- */
- if (queued_during_suspend)
- return NOTIFY_OK;
- }
+ pm_wakeup_event(ckdev->dev, 0);
if (ckdev->ec->event_size != ckdev->cols) {
dev_err(ckdev->dev,
@@ -268,10 +253,7 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
break;
case EC_MKBP_EVENT_SYSRQ:
- if (device_may_wakeup(ckdev->dev))
- pm_wakeup_event(ckdev->dev, 0);
- else if (queued_during_suspend)
- return NOTIFY_OK;
+ pm_wakeup_event(ckdev->dev, 0);
val = get_unaligned_le32(&ckdev->ec->event_data.data.sysrq);
dev_dbg(ckdev->dev, "sysrq code from EC: %#x\n", val);
@@ -280,10 +262,7 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
case EC_MKBP_EVENT_BUTTON:
case EC_MKBP_EVENT_SWITCH:
- if (device_may_wakeup(ckdev->dev))
- pm_wakeup_event(ckdev->dev, 0);
- else if (queued_during_suspend)
- return NOTIFY_OK;
+ pm_wakeup_event(ckdev->dev, 0);
if (ckdev->ec->event_data.event_type == EC_MKBP_EVENT_BUTTON) {
val = get_unaligned_le32(
@@ -683,6 +662,6 @@ static struct platform_driver cros_ec_keyb_driver = {
module_platform_driver(cros_ec_keyb_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChromeOS EC keyboard driver");
MODULE_ALIAS("platform:cros-ec-keyb");
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
index f6e643b589b6..e8dae6195b30 100644
--- a/drivers/input/keyboard/goldfish_events.c
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -45,7 +45,7 @@ struct event_dev {
static irqreturn_t events_interrupt(int irq, void *dev_id)
{
struct event_dev *edev = dev_id;
- unsigned type, code, value;
+ unsigned int type, code, value;
type = __raw_readl(edev->addr + REG_READ);
code = __raw_readl(edev->addr + REG_READ);
@@ -57,7 +57,7 @@ static irqreturn_t events_interrupt(int irq, void *dev_id)
}
static void events_import_bits(struct event_dev *edev,
- unsigned long bits[], unsigned type, size_t count)
+ unsigned long bits[], unsigned int type, size_t count)
{
void __iomem *addr = edev->addr;
int i, j;
@@ -99,6 +99,7 @@ static void events_import_abs_params(struct event_dev *edev)
for (j = 0; j < ARRAY_SIZE(val); j++) {
int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
+
val[j] = __raw_readl(edev->addr + REG_DATA + offset);
}
@@ -112,7 +113,7 @@ static int events_probe(struct platform_device *pdev)
struct input_dev *input_dev;
struct event_dev *edev;
struct resource *res;
- unsigned keymapnamelen;
+ unsigned int keymapnamelen;
void __iomem *addr;
int irq;
int i;
@@ -150,7 +151,7 @@ static int events_probe(struct platform_device *pdev)
for (i = 0; i < keymapnamelen; i++)
edev->name[i] = __raw_readb(edev->addr + REG_DATA + i);
- pr_debug("events_probe() keymap=%s\n", edev->name);
+ pr_debug("%s: keymap=%s\n", __func__, edev->name);
input_dev->name = edev->name;
input_dev->id.bustype = BUS_HOST;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 052e37675086..492a971b95b5 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -196,7 +196,7 @@ static ssize_t gpio_keys_attr_show_helper(struct gpio_keys_drvdata *ddata,
ssize_t ret;
int i;
- bits = kcalloc(BITS_TO_LONGS(n_events), sizeof(*bits), GFP_KERNEL);
+ bits = bitmap_zalloc(n_events, GFP_KERNEL);
if (!bits)
return -ENOMEM;
@@ -216,7 +216,7 @@ static ssize_t gpio_keys_attr_show_helper(struct gpio_keys_drvdata *ddata,
buf[ret++] = '\n';
buf[ret] = '\0';
- kfree(bits);
+ bitmap_free(bits);
return ret;
}
@@ -240,7 +240,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
ssize_t error;
int i;
- bits = kcalloc(BITS_TO_LONGS(n_events), sizeof(*bits), GFP_KERNEL);
+ bits = bitmap_zalloc(n_events, GFP_KERNEL);
if (!bits)
return -ENOMEM;
@@ -284,7 +284,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
mutex_unlock(&ddata->disable_lock);
out:
- kfree(bits);
+ bitmap_free(bits);
return error;
}
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index a4e404aaf64b..5c7afdec192c 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -57,8 +57,8 @@ MODULE_LICENSE("GPL v2");
#define HIL_DATA 0x1
#define HIL_CMD 0x3
#define HIL_IRQ 2
- #define hil_readb(p) readb(p)
- #define hil_writeb(v,p) writeb((v),(p))
+ #define hil_readb(p) readb((const volatile void __iomem *)(p))
+ #define hil_writeb(v, p) writeb((v), (volatile void __iomem *)(p))
#else
#error "HIL is not supported on this platform"
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 25d61d8d4fc4..539cb670de41 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -1,11 +1,7 @@
-/*
- * Driver for the IMX keypad port.
- * Copyright (C) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Driver for the IMX keypad port.
+// Copyright (C) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c
index fb9b8e23ab93..de26e2df0ad5 100644
--- a/drivers/input/keyboard/newtonkbd.c
+++ b/drivers/input/keyboard/newtonkbd.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <j.cormack@doc.ic.ac.uk>, or by paper mail:
- * Justin Cormack, 68 Dartmouth Park Road, London NW5 1SN, UK.
*/
#include <linux/slab.h>
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 53c768b95939..effb63205d3d 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -1,14 +1,7 @@
-/*
- * Driver for the IMX SNVS ON/OFF Power Key
- * Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Driver for the IMX SNVS ON/OFF Power Key
+// Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/device.h>
#include <linux/err.h>
diff --git a/drivers/input/keyboard/stowaway.c b/drivers/input/keyboard/stowaway.c
index 8b6de9a692dc..15a5e74dbe91 100644
--- a/drivers/input/keyboard/stowaway.c
+++ b/drivers/input/keyboard/stowaway.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <marek.vasut@gmail.com>, or by paper mail:
- * Marek Vasut, Liskovecka 559, Frydek-Mistek, 738 01 Czech Republic
*/
#include <linux/slab.h>
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index c95707ea2656..ad5d7f94f95a 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c
index 8f64b9ded8d0..f7598114b962 100644
--- a/drivers/input/keyboard/xtkbd.c
+++ b/drivers/input/keyboard/xtkbd.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/slab.h>
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index c25606e00693..ca59a2be9bc5 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -841,4 +841,14 @@ config INPUT_RAVE_SP_PWRBUTTON
To compile this driver as a module, choose M here: the
module will be called rave-sp-pwrbutton.
+config INPUT_SC27XX_VIBRA
+ tristate "Spreadtrum sc27xx vibrator support"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ select INPUT_FF_MEMLESS
+ help
+ This option enables support for Spreadtrum sc27xx vibrator driver.
+
+ To compile this driver as a module, choose M here. The module will
+ be called sc27xx_vibra.
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 72cde28649e2..9d0f9d1ff68f 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o
obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o
obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o
+obj-$(CONFIG_INPUT_SC27XX_VIBRA) += sc27xx-vibra.o
obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o
obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 67482b248b2d..a8937ceac66a 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -466,7 +466,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
remote->in_endpoint = endpoint;
remote->toggle = -1; /* Set to -1 so we will always not match the toggle from the first remote message. */
- remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma);
+ remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_KERNEL, &remote->in_dma);
if (!remote->in_buffer) {
error = -ENOMEM;
goto fail1;
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 18ad956454f1..48153e0ca19a 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -20,6 +20,7 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
@@ -28,6 +29,7 @@
#define PON_RT_STS 0x10
#define PON_KPDPWR_N_SET BIT(0)
+#define PON_RESIN_N_SET BIT(1)
#define PON_PS_HOLD_RST_CTL 0x5a
#define PON_PS_HOLD_RST_CTL2 0x5b
@@ -38,10 +40,15 @@
#define PON_PULL_CTL 0x70
#define PON_KPDPWR_PULL_UP BIT(1)
+#define PON_RESIN_PULL_UP BIT(0)
#define PON_DBC_CTL 0x71
#define PON_DBC_DELAY_MASK 0x7
+struct pm8941_data {
+ unsigned int pull_up_bit;
+ unsigned int status_bit;
+};
struct pm8941_pwrkey {
struct device *dev;
@@ -52,6 +59,9 @@ struct pm8941_pwrkey {
unsigned int revision;
struct notifier_block reboot_notifier;
+
+ u32 code;
+ const struct pm8941_data *data;
};
static int pm8941_reboot_notify(struct notifier_block *nb,
@@ -124,7 +134,8 @@ static irqreturn_t pm8941_pwrkey_irq(int irq, void *_data)
if (error)
return IRQ_HANDLED;
- input_report_key(pwrkey->input, KEY_POWER, !!(sts & PON_KPDPWR_N_SET));
+ input_report_key(pwrkey->input, pwrkey->code,
+ sts & pwrkey->data->status_bit);
input_sync(pwrkey->input);
return IRQ_HANDLED;
@@ -157,6 +168,7 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
{
struct pm8941_pwrkey *pwrkey;
bool pull_up;
+ struct device *parent;
u32 req_delay;
int error;
@@ -175,12 +187,30 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
return -ENOMEM;
pwrkey->dev = &pdev->dev;
+ pwrkey->data = of_device_get_match_data(&pdev->dev);
- pwrkey->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ parent = pdev->dev.parent;
+ pwrkey->regmap = dev_get_regmap(parent, NULL);
if (!pwrkey->regmap) {
- dev_err(&pdev->dev, "failed to locate regmap\n");
- return -ENODEV;
+ /*
+ * We failed to get regmap for parent. Let's see if we are
+ * a child of pon node and read regmap and reg from its
+ * parent.
+ */
+ pwrkey->regmap = dev_get_regmap(parent->parent, NULL);
+ if (!pwrkey->regmap) {
+ dev_err(&pdev->dev, "failed to locate regmap\n");
+ return -ENODEV;
+ }
+
+ error = of_property_read_u32(parent->of_node,
+ "reg", &pwrkey->baseaddr);
+ } else {
+ error = of_property_read_u32(pdev->dev.of_node, "reg",
+ &pwrkey->baseaddr);
}
+ if (error)
+ return error;
pwrkey->irq = platform_get_irq(pdev, 0);
if (pwrkey->irq < 0) {
@@ -188,11 +218,6 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
return pwrkey->irq;
}
- error = of_property_read_u32(pdev->dev.of_node, "reg",
- &pwrkey->baseaddr);
- if (error)
- return error;
-
error = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_REV2,
&pwrkey->revision);
if (error) {
@@ -200,13 +225,21 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
return error;
}
+ error = of_property_read_u32(pdev->dev.of_node, "linux,code",
+ &pwrkey->code);
+ if (error) {
+ dev_dbg(&pdev->dev,
+ "no linux,code assuming power (%d)\n", error);
+ pwrkey->code = KEY_POWER;
+ }
+
pwrkey->input = devm_input_allocate_device(&pdev->dev);
if (!pwrkey->input) {
dev_dbg(&pdev->dev, "unable to allocate input device\n");
return -ENOMEM;
}
- input_set_capability(pwrkey->input, EV_KEY, KEY_POWER);
+ input_set_capability(pwrkey->input, EV_KEY, pwrkey->code);
pwrkey->input->name = "pm8941_pwrkey";
pwrkey->input->phys = "pm8941_pwrkey/input0";
@@ -225,8 +258,8 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
error = regmap_update_bits(pwrkey->regmap,
pwrkey->baseaddr + PON_PULL_CTL,
- PON_KPDPWR_PULL_UP,
- pull_up ? PON_KPDPWR_PULL_UP : 0);
+ pwrkey->data->pull_up_bit,
+ pull_up ? pwrkey->data->pull_up_bit : 0);
if (error) {
dev_err(&pdev->dev, "failed to set pull: %d\n", error);
return error;
@@ -271,8 +304,19 @@ static int pm8941_pwrkey_remove(struct platform_device *pdev)
return 0;
}
+static const struct pm8941_data pwrkey_data = {
+ .pull_up_bit = PON_KPDPWR_PULL_UP,
+ .status_bit = PON_KPDPWR_N_SET,
+};
+
+static const struct pm8941_data resin_data = {
+ .pull_up_bit = PON_RESIN_PULL_UP,
+ .status_bit = PON_RESIN_N_SET,
+};
+
static const struct of_device_id pm8941_pwr_key_id_table[] = {
- { .compatible = "qcom,pm8941-pwrkey" },
+ { .compatible = "qcom,pm8941-pwrkey", .data = &pwrkey_data },
+ { .compatible = "qcom,pm8941-resin", .data = &resin_data },
{ }
};
MODULE_DEVICE_TABLE(of, pm8941_pwr_key_id_table);
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 5c8c79623c87..e8de3aaf9f63 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -277,7 +277,7 @@ static int powermate_input_event(struct input_dev *dev, unsigned int type, unsig
static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm)
{
pm->data = usb_alloc_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
- GFP_ATOMIC, &pm->data_dma);
+ GFP_KERNEL, &pm->data_dma);
if (!pm->data)
return -1;
diff --git a/drivers/input/misc/sc27xx-vibra.c b/drivers/input/misc/sc27xx-vibra.c
new file mode 100644
index 000000000000..295251abbdac
--- /dev/null
+++ b/drivers/input/misc/sc27xx-vibra.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Spreadtrum Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+
+#define CUR_DRV_CAL_SEL GENMASK(13, 12)
+#define SLP_LDOVIBR_PD_EN BIT(9)
+#define LDO_VIBR_PD BIT(8)
+
+struct vibra_info {
+ struct input_dev *input_dev;
+ struct work_struct play_work;
+ struct regmap *regmap;
+ u32 base;
+ u32 strength;
+ bool enabled;
+};
+
+static void sc27xx_vibra_set(struct vibra_info *info, bool on)
+{
+ if (on) {
+ regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD, 0);
+ regmap_update_bits(info->regmap, info->base,
+ SLP_LDOVIBR_PD_EN, 0);
+ info->enabled = true;
+ } else {
+ regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD,
+ LDO_VIBR_PD);
+ regmap_update_bits(info->regmap, info->base,
+ SLP_LDOVIBR_PD_EN, SLP_LDOVIBR_PD_EN);
+ info->enabled = false;
+ }
+}
+
+static int sc27xx_vibra_hw_init(struct vibra_info *info)
+{
+ return regmap_update_bits(info->regmap, info->base, CUR_DRV_CAL_SEL, 0);
+}
+
+static void sc27xx_vibra_play_work(struct work_struct *work)
+{
+ struct vibra_info *info = container_of(work, struct vibra_info,
+ play_work);
+
+ if (info->strength && !info->enabled)
+ sc27xx_vibra_set(info, true);
+ else if (info->strength == 0 && info->enabled)
+ sc27xx_vibra_set(info, false);
+}
+
+static int sc27xx_vibra_play(struct input_dev *input, void *data,
+ struct ff_effect *effect)
+{
+ struct vibra_info *info = input_get_drvdata(input);
+
+ info->strength = effect->u.rumble.weak_magnitude;
+ schedule_work(&info->play_work);
+
+ return 0;
+}
+
+static void sc27xx_vibra_close(struct input_dev *input)
+{
+ struct vibra_info *info = input_get_drvdata(input);
+
+ cancel_work_sync(&info->play_work);
+ if (info->enabled)
+ sc27xx_vibra_set(info, false);
+}
+
+static int sc27xx_vibra_probe(struct platform_device *pdev)
+{
+ struct vibra_info *info;
+ int error;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!info->regmap) {
+ dev_err(&pdev->dev, "failed to get vibrator regmap.\n");
+ return -ENODEV;
+ }
+
+ error = device_property_read_u32(&pdev->dev, "reg", &info->base);
+ if (error) {
+ dev_err(&pdev->dev, "failed to get vibrator base address.\n");
+ return error;
+ }
+
+ info->input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!info->input_dev) {
+ dev_err(&pdev->dev, "failed to allocate input device.\n");
+ return -ENOMEM;
+ }
+
+ info->input_dev->name = "sc27xx:vibrator";
+ info->input_dev->id.version = 0;
+ info->input_dev->close = sc27xx_vibra_close;
+
+ input_set_drvdata(info->input_dev, info);
+ input_set_capability(info->input_dev, EV_FF, FF_RUMBLE);
+ INIT_WORK(&info->play_work, sc27xx_vibra_play_work);
+ info->enabled = false;
+
+ error = sc27xx_vibra_hw_init(info);
+ if (error) {
+ dev_err(&pdev->dev, "failed to initialize the vibrator.\n");
+ return error;
+ }
+
+ error = input_ff_create_memless(info->input_dev, NULL,
+ sc27xx_vibra_play);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register vibrator to FF.\n");
+ return error;
+ }
+
+ error = input_register_device(info->input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register input device.\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sc27xx_vibra_of_match[] = {
+ { .compatible = "sprd,sc2731-vibrator", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sc27xx_vibra_of_match);
+
+static struct platform_driver sc27xx_vibra_driver = {
+ .driver = {
+ .name = "sc27xx-vibrator",
+ .of_match_table = sc27xx_vibra_of_match,
+ },
+ .probe = sc27xx_vibra_probe,
+};
+
+module_platform_driver(sc27xx_vibra_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SC27xx Vibrator Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xiaotong Lu <xiaotong.lu@spreadtrum.com>");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index d91f3b1c5375..594f72e39639 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -63,6 +63,9 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *);
static void xenkbd_handle_motion_event(struct xenkbd_info *info,
struct xenkbd_motion *motion)
{
+ if (unlikely(!info->ptr))
+ return;
+
input_report_rel(info->ptr, REL_X, motion->rel_x);
input_report_rel(info->ptr, REL_Y, motion->rel_y);
if (motion->rel_z)
@@ -73,6 +76,9 @@ static void xenkbd_handle_motion_event(struct xenkbd_info *info,
static void xenkbd_handle_position_event(struct xenkbd_info *info,
struct xenkbd_position *pos)
{
+ if (unlikely(!info->ptr))
+ return;
+
input_report_abs(info->ptr, ABS_X, pos->abs_x);
input_report_abs(info->ptr, ABS_Y, pos->abs_y);
if (pos->rel_z)
@@ -97,6 +103,9 @@ static void xenkbd_handle_key_event(struct xenkbd_info *info,
return;
}
+ if (unlikely(!dev))
+ return;
+
input_event(dev, EV_KEY, key->keycode, value);
input_sync(dev);
}
@@ -192,7 +201,7 @@ static int xenkbd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int ret, i;
- unsigned int abs, touch;
+ bool with_mtouch, with_kbd, with_ptr;
struct xenkbd_info *info;
struct input_dev *kbd, *ptr, *mtouch;
@@ -211,106 +220,127 @@ static int xenkbd_probe(struct xenbus_device *dev,
if (!info->page)
goto error_nomem;
- /* Set input abs params to match backend screen res */
- abs = xenbus_read_unsigned(dev->otherend,
- XENKBD_FIELD_FEAT_ABS_POINTER, 0);
- ptr_size[KPARAM_X] = xenbus_read_unsigned(dev->otherend,
- XENKBD_FIELD_WIDTH,
- ptr_size[KPARAM_X]);
- ptr_size[KPARAM_Y] = xenbus_read_unsigned(dev->otherend,
- XENKBD_FIELD_HEIGHT,
- ptr_size[KPARAM_Y]);
- if (abs) {
- ret = xenbus_write(XBT_NIL, dev->nodename,
- XENKBD_FIELD_REQ_ABS_POINTER, "1");
- if (ret) {
- pr_warn("xenkbd: can't request abs-pointer\n");
- abs = 0;
- }
- }
+ /*
+ * The below are reverse logic, e.g. if the feature is set, then
+ * do not expose the corresponding virtual device.
+ */
+ with_kbd = !xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_FEAT_DSBL_KEYBRD, 0);
+
+ with_ptr = !xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_FEAT_DSBL_POINTER, 0);
- touch = xenbus_read_unsigned(dev->nodename,
- XENKBD_FIELD_FEAT_MTOUCH, 0);
- if (touch) {
+ /* Direct logic: if set, then create multi-touch device. */
+ with_mtouch = xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_FEAT_MTOUCH, 0);
+ if (with_mtouch) {
ret = xenbus_write(XBT_NIL, dev->nodename,
XENKBD_FIELD_REQ_MTOUCH, "1");
if (ret) {
pr_warn("xenkbd: can't request multi-touch");
- touch = 0;
+ with_mtouch = 0;
}
}
/* keyboard */
- kbd = input_allocate_device();
- if (!kbd)
- goto error_nomem;
- kbd->name = "Xen Virtual Keyboard";
- kbd->phys = info->phys;
- kbd->id.bustype = BUS_PCI;
- kbd->id.vendor = 0x5853;
- kbd->id.product = 0xffff;
-
- __set_bit(EV_KEY, kbd->evbit);
- for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
- __set_bit(i, kbd->keybit);
- for (i = KEY_OK; i < KEY_MAX; i++)
- __set_bit(i, kbd->keybit);
-
- ret = input_register_device(kbd);
- if (ret) {
- input_free_device(kbd);
- xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
- goto error;
+ if (with_kbd) {
+ kbd = input_allocate_device();
+ if (!kbd)
+ goto error_nomem;
+ kbd->name = "Xen Virtual Keyboard";
+ kbd->phys = info->phys;
+ kbd->id.bustype = BUS_PCI;
+ kbd->id.vendor = 0x5853;
+ kbd->id.product = 0xffff;
+
+ __set_bit(EV_KEY, kbd->evbit);
+ for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
+ __set_bit(i, kbd->keybit);
+ for (i = KEY_OK; i < KEY_MAX; i++)
+ __set_bit(i, kbd->keybit);
+
+ ret = input_register_device(kbd);
+ if (ret) {
+ input_free_device(kbd);
+ xenbus_dev_fatal(dev, ret,
+ "input_register_device(kbd)");
+ goto error;
+ }
+ info->kbd = kbd;
}
- info->kbd = kbd;
/* pointing device */
- ptr = input_allocate_device();
- if (!ptr)
- goto error_nomem;
- ptr->name = "Xen Virtual Pointer";
- ptr->phys = info->phys;
- ptr->id.bustype = BUS_PCI;
- ptr->id.vendor = 0x5853;
- ptr->id.product = 0xfffe;
-
- if (abs) {
- __set_bit(EV_ABS, ptr->evbit);
- input_set_abs_params(ptr, ABS_X, 0, ptr_size[KPARAM_X], 0, 0);
- input_set_abs_params(ptr, ABS_Y, 0, ptr_size[KPARAM_Y], 0, 0);
- } else {
- input_set_capability(ptr, EV_REL, REL_X);
- input_set_capability(ptr, EV_REL, REL_Y);
- }
- input_set_capability(ptr, EV_REL, REL_WHEEL);
+ if (with_ptr) {
+ unsigned int abs;
+
+ /* Set input abs params to match backend screen res */
+ abs = xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_FEAT_ABS_POINTER, 0);
+ ptr_size[KPARAM_X] = xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_WIDTH,
+ ptr_size[KPARAM_X]);
+ ptr_size[KPARAM_Y] = xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_HEIGHT,
+ ptr_size[KPARAM_Y]);
+ if (abs) {
+ ret = xenbus_write(XBT_NIL, dev->nodename,
+ XENKBD_FIELD_REQ_ABS_POINTER, "1");
+ if (ret) {
+ pr_warn("xenkbd: can't request abs-pointer\n");
+ abs = 0;
+ }
+ }
- __set_bit(EV_KEY, ptr->evbit);
- for (i = BTN_LEFT; i <= BTN_TASK; i++)
- __set_bit(i, ptr->keybit);
+ ptr = input_allocate_device();
+ if (!ptr)
+ goto error_nomem;
+ ptr->name = "Xen Virtual Pointer";
+ ptr->phys = info->phys;
+ ptr->id.bustype = BUS_PCI;
+ ptr->id.vendor = 0x5853;
+ ptr->id.product = 0xfffe;
+
+ if (abs) {
+ __set_bit(EV_ABS, ptr->evbit);
+ input_set_abs_params(ptr, ABS_X, 0,
+ ptr_size[KPARAM_X], 0, 0);
+ input_set_abs_params(ptr, ABS_Y, 0,
+ ptr_size[KPARAM_Y], 0, 0);
+ } else {
+ input_set_capability(ptr, EV_REL, REL_X);
+ input_set_capability(ptr, EV_REL, REL_Y);
+ }
+ input_set_capability(ptr, EV_REL, REL_WHEEL);
- ret = input_register_device(ptr);
- if (ret) {
- input_free_device(ptr);
- xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
- goto error;
+ __set_bit(EV_KEY, ptr->evbit);
+ for (i = BTN_LEFT; i <= BTN_TASK; i++)
+ __set_bit(i, ptr->keybit);
+
+ ret = input_register_device(ptr);
+ if (ret) {
+ input_free_device(ptr);
+ xenbus_dev_fatal(dev, ret,
+ "input_register_device(ptr)");
+ goto error;
+ }
+ info->ptr = ptr;
}
- info->ptr = ptr;
/* multi-touch device */
- if (touch) {
+ if (with_mtouch) {
int num_cont, width, height;
mtouch = input_allocate_device();
if (!mtouch)
goto error_nomem;
- num_cont = xenbus_read_unsigned(info->xbdev->nodename,
+ num_cont = xenbus_read_unsigned(info->xbdev->otherend,
XENKBD_FIELD_MT_NUM_CONTACTS,
1);
- width = xenbus_read_unsigned(info->xbdev->nodename,
+ width = xenbus_read_unsigned(info->xbdev->otherend,
XENKBD_FIELD_MT_WIDTH,
XENFB_WIDTH);
- height = xenbus_read_unsigned(info->xbdev->nodename,
+ height = xenbus_read_unsigned(info->xbdev->otherend,
XENKBD_FIELD_MT_HEIGHT,
XENFB_HEIGHT);
@@ -346,6 +376,11 @@ static int xenkbd_probe(struct xenbus_device *dev,
info->mtouch = mtouch;
}
+ if (!(with_kbd || with_ptr || with_mtouch)) {
+ ret = -ENXIO;
+ goto error;
+ }
+
ret = xenkbd_connect_backend(dev, info);
if (ret < 0)
goto error;
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index f0c9bf87b4e3..1365cd94ed9b 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -894,12 +894,12 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* allocate usb buffers */
yld->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN,
- GFP_ATOMIC, &yld->irq_dma);
+ GFP_KERNEL, &yld->irq_dma);
if (yld->irq_data == NULL)
return usb_cleanup(yld, -ENOMEM);
yld->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN,
- GFP_ATOMIC, &yld->ctl_dma);
+ GFP_KERNEL, &yld->ctl_dma);
if (!yld->ctl_data)
return usb_cleanup(yld, -ENOMEM);
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 032d27983b6c..f1e66e257cff 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -472,6 +472,7 @@ static int atp_status_check(struct urb *urb)
dev->info->datalen, dev->urb->actual_length);
dev->overflow_warned = true;
}
+ /* fall through */
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
@@ -810,7 +811,7 @@ static int atp_open(struct input_dev *input)
{
struct atp *dev = input_get_drvdata(input);
- if (usb_submit_urb(dev->urb, GFP_ATOMIC))
+ if (usb_submit_urb(dev->urb, GFP_KERNEL))
return -EIO;
dev->open = true;
@@ -976,7 +977,7 @@ static int atp_recover(struct atp *dev)
if (error)
return error;
- if (dev->open && usb_submit_urb(dev->urb, GFP_ATOMIC))
+ if (dev->open && usb_submit_urb(dev->urb, GFP_KERNEL))
return -EIO;
return 0;
@@ -994,7 +995,7 @@ static int atp_resume(struct usb_interface *iface)
{
struct atp *dev = usb_get_intfdata(iface);
- if (dev->open && usb_submit_urb(dev->urb, GFP_ATOMIC))
+ if (dev->open && usb_submit_urb(dev->urb, GFP_KERNEL))
return -EIO;
return 0;
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index 5775d40b3d53..14239fbd72cf 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -2554,6 +2554,7 @@ static int cyapa_gen5_do_operational_check(struct cyapa *cyapa)
}
cyapa->state = CYAPA_STATE_GEN5_APP;
+ /* fall through */
case CYAPA_STATE_GEN5_APP:
/*
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index 016397850b1b..c1b524ab4623 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -680,6 +680,7 @@ static int cyapa_gen6_operational_check(struct cyapa *cyapa)
}
cyapa->state = CYAPA_STATE_GEN6_APP;
+ /* fall through */
case CYAPA_STATE_GEN6_APP:
/*
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index 599544c1a91c..243e0fa6e3e3 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -27,6 +27,8 @@
#define ETP_DISABLE_POWER 0x0001
#define ETP_PRESSURE_OFFSET 25
+#define ETP_CALIBRATE_MAX_LEN 3
+
/* IAP Firmware handling */
#define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
#define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 8ff75114e762..f5ae24865355 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -613,7 +613,7 @@ static ssize_t calibrate_store(struct device *dev,
int tries = 20;
int retval;
int error;
- u8 val[3];
+ u8 val[ETP_CALIBRATE_MAX_LEN];
retval = mutex_lock_interruptible(&data->sysfs_mutex);
if (retval)
@@ -1345,6 +1345,9 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN060C", 0 },
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
+ { "ELAN0618", 0 },
+ { "ELAN061D", 0 },
+ { "ELAN0622", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index cfcb32559925..88e315d2cfd3 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -56,7 +56,7 @@
static int elan_smbus_initialize(struct i2c_client *client)
{
u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
- u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
+ u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
int len, error;
/* Get hello packet */
@@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
{
int error;
+ u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
error = i2c_smbus_read_block_data(client,
- ETP_SMBUS_CALIBRATE_QUERY, val);
+ ETP_SMBUS_CALIBRATE_QUERY, buf);
if (error < 0)
return error;
+ memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
return 0;
}
@@ -383,7 +387,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
len = i2c_smbus_read_block_data(client,
ETP_SMBUS_IAP_PASSWORD_READ,
val);
- if (len < sizeof(u16)) {
+ if (len < (int)sizeof(u16)) {
error = len < 0 ? len : -EIO;
dev_err(dev, "failed to read iap password: %d\n",
error);
@@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
{
int len;
+ BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
+
len = i2c_smbus_read_block_data(client,
ETP_SMBUS_PACKET_QUERY,
&report[ETP_SMBUS_REPORT_OFFSET]);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index fb4d902c4403..44f57cf6675b 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -340,7 +340,7 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
*/
if (packet[3] & 0x80)
fingers = 4;
- /* pass through... */
+ /* fall through */
case 1:
/*
* byte 1: . . . . x11 x10 x9 x8
@@ -799,7 +799,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
else if (ic_version == 7 && etd->info.samples[1] == 0x2A)
sanity_check = ((packet[3] & 0x1c) == 0x10);
else
- sanity_check = ((packet[0] & 0x0c) == 0x04 &&
+ sanity_check = ((packet[0] & 0x08) == 0x00 &&
(packet[3] & 0x1c) == 0x10);
if (!sanity_check)
@@ -1175,6 +1175,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
{ }
};
+static const char * const middle_button_pnp_ids[] = {
+ "LEN2131", /* ThinkPad P52 w/ NFC */
+ "LEN2132", /* ThinkPad P52 */
+ NULL
+};
+
/*
* Set the appropriate event bits for the input subsystem
*/
@@ -1194,7 +1200,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
__clear_bit(EV_REL, dev->evbit);
__set_bit(BTN_LEFT, dev->keybit);
- if (dmi_check_system(elantech_dmi_has_middle_button))
+ if (dmi_check_system(elantech_dmi_has_middle_button) ||
+ psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
__set_bit(BTN_MIDDLE, dev->keybit);
__set_bit(BTN_RIGHT, dev->keybit);
diff --git a/drivers/input/mouse/inport.c b/drivers/input/mouse/inport.c
index 9ce71dfa0de1..b9e68606c44a 100644
--- a/drivers/input/mouse/inport.c
+++ b/drivers/input/mouse/inport.c
@@ -26,10 +26,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/mouse/logibm.c b/drivers/input/mouse/logibm.c
index 6f165e053f4d..2fd6c84cd5b7 100644
--- a/drivers/input/mouse/logibm.c
+++ b/drivers/input/mouse/logibm.c
@@ -27,10 +27,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c
index 7b02b652e267..b8965e6bc890 100644
--- a/drivers/input/mouse/pc110pad.c
+++ b/drivers/input/mouse/pc110pad.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 5ff5b1952be0..d3ff1fc09af7 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -192,8 +192,8 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
else
input_report_rel(dev, REL_WHEEL, -wheel);
- input_report_key(dev, BTN_SIDE, BIT(4));
- input_report_key(dev, BTN_EXTRA, BIT(5));
+ input_report_key(dev, BTN_SIDE, packet[3] & BIT(4));
+ input_report_key(dev, BTN_EXTRA, packet[3] & BIT(5));
break;
}
break;
@@ -203,13 +203,13 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
input_report_rel(dev, REL_WHEEL, -(s8) packet[3]);
/* Extra buttons on Genius NewNet 3D */
- input_report_key(dev, BTN_SIDE, BIT(6));
- input_report_key(dev, BTN_EXTRA, BIT(7));
+ input_report_key(dev, BTN_SIDE, packet[0] & BIT(6));
+ input_report_key(dev, BTN_EXTRA, packet[0] & BIT(7));
break;
case PSMOUSE_THINKPS:
/* Extra button on ThinkingMouse */
- input_report_key(dev, BTN_EXTRA, BIT(3));
+ input_report_key(dev, BTN_EXTRA, packet[0] & BIT(3));
/*
* Without this bit of weirdness moving up gives wildly
@@ -223,7 +223,7 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
* Cortron PS2 Trackball reports SIDE button in the
* 4th bit of the first byte.
*/
- input_report_key(dev, BTN_SIDE, BIT(3));
+ input_report_key(dev, BTN_SIDE, packet[0] & BIT(3));
packet[0] |= BIT(3);
break;
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index 8df526620ebf..3e8fb8136452 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
@@ -143,7 +139,8 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
switch (sermouse->type) {
case SERIO_MS:
- sermouse->type = SERIO_MP;
+ sermouse->type = SERIO_MP;
+ /* fall through */
case SERIO_MP:
if ((data >> 2) & 3) break; /* M++ Wireless Extension packet. */
@@ -154,6 +151,7 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
case SERIO_MZP:
case SERIO_MZPP:
input_report_key(dev, BTN_SIDE, (data >> 5) & 1);
+ /* fall through */
case SERIO_MZ:
input_report_key(dev, BTN_MIDDLE, (data >> 4) & 1);
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index 7172b88cd064..fad2eae4a118 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -3,6 +3,7 @@
#
config RMI4_CORE
tristate "Synaptics RMI4 bus support"
+ select IRQ_DOMAIN
help
Say Y here if you want to support the Synaptics RMI4 bus. This is
required for all RMI4 device support.
diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c
index 8bb866c7b985..8eeffa066022 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.c
+++ b/drivers/input/rmi4/rmi_2d_sensor.c
@@ -32,15 +32,15 @@ void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor,
if (obj->type == RMI_2D_OBJECT_NONE)
return;
- if (axis_align->swap_axes)
- swap(obj->x, obj->y);
-
if (axis_align->flip_x)
obj->x = sensor->max_x - obj->x;
if (axis_align->flip_y)
obj->y = sensor->max_y - obj->y;
+ if (axis_align->swap_axes)
+ swap(obj->x, obj->y);
+
/*
* Here checking if X offset or y offset are specified is
* redundant. We just add the offsets or clip the values.
@@ -120,15 +120,15 @@ void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y)
x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x));
y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y));
- if (axis_align->swap_axes)
- swap(x, y);
-
if (axis_align->flip_x)
x = min(RMI_2D_REL_POS_MAX, -x);
if (axis_align->flip_y)
y = min(RMI_2D_REL_POS_MAX, -y);
+ if (axis_align->swap_axes)
+ swap(x, y);
+
if (x || y) {
input_report_rel(sensor->input, REL_X, x);
input_report_rel(sensor->input, REL_Y, y);
@@ -141,17 +141,10 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
struct input_dev *input = sensor->input;
int res_x;
int res_y;
+ int max_x, max_y;
int input_flags = 0;
if (sensor->report_abs) {
- if (sensor->axis_align.swap_axes) {
- swap(sensor->max_x, sensor->max_y);
- swap(sensor->axis_align.clip_x_low,
- sensor->axis_align.clip_y_low);
- swap(sensor->axis_align.clip_x_high,
- sensor->axis_align.clip_y_high);
- }
-
sensor->min_x = sensor->axis_align.clip_x_low;
if (sensor->axis_align.clip_x_high)
sensor->max_x = min(sensor->max_x,
@@ -163,14 +156,19 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
sensor->axis_align.clip_y_high);
set_bit(EV_ABS, input->evbit);
- input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x,
- 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y,
- 0, 0);
+
+ max_x = sensor->max_x;
+ max_y = sensor->max_y;
+ if (sensor->axis_align.swap_axes)
+ swap(max_x, max_y);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
if (sensor->x_mm && sensor->y_mm) {
res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm;
res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm;
+ if (sensor->axis_align.swap_axes)
+ swap(res_x, res_y);
input_abs_set_res(input, ABS_X, res_x);
input_abs_set_res(input, ABS_Y, res_y);
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index c5fa53adba8d..bd0d5ff01b08 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -9,6 +9,8 @@
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/pm.h>
#include <linux/rmi.h>
@@ -167,6 +169,39 @@ static inline void rmi_function_of_probe(struct rmi_function *fn)
{}
#endif
+static struct irq_chip rmi_irq_chip = {
+ .name = "rmi4",
+};
+
+static int rmi_create_function_irq(struct rmi_function *fn,
+ struct rmi_function_handler *handler)
+{
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
+ int i, error;
+
+ for (i = 0; i < fn->num_of_irqs; i++) {
+ set_bit(fn->irq_pos + i, fn->irq_mask);
+
+ fn->irq[i] = irq_create_mapping(drvdata->irqdomain,
+ fn->irq_pos + i);
+
+ irq_set_chip_data(fn->irq[i], fn);
+ irq_set_chip_and_handler(fn->irq[i], &rmi_irq_chip,
+ handle_simple_irq);
+ irq_set_nested_thread(fn->irq[i], 1);
+
+ error = devm_request_threaded_irq(&fn->dev, fn->irq[i], NULL,
+ handler->attention, IRQF_ONESHOT,
+ dev_name(&fn->dev), fn);
+ if (error) {
+ dev_err(&fn->dev, "Error %d registering IRQ\n", error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
static int rmi_function_probe(struct device *dev)
{
struct rmi_function *fn = to_rmi_function(dev);
@@ -178,7 +213,14 @@ static int rmi_function_probe(struct device *dev)
if (handler->probe) {
error = handler->probe(fn);
- return error;
+ if (error)
+ return error;
+ }
+
+ if (fn->num_of_irqs && handler->attention) {
+ error = rmi_create_function_irq(fn, handler);
+ if (error)
+ return error;
}
return 0;
@@ -230,12 +272,18 @@ err_put_device:
void rmi_unregister_function(struct rmi_function *fn)
{
+ int i;
+
rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n",
fn->fd.function_number);
device_del(&fn->dev);
of_node_put(fn->dev.of_node);
put_device(&fn->dev);
+
+ for (i = 0; i < fn->num_of_irqs; i++)
+ irq_dispose_mapping(fn->irq[i]);
+
}
/**
diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h
index b7625a9ac66a..96383eab41ba 100644
--- a/drivers/input/rmi4/rmi_bus.h
+++ b/drivers/input/rmi4/rmi_bus.h
@@ -14,6 +14,12 @@
struct rmi_device;
+/*
+ * The interrupt source count in the function descriptor can represent up to
+ * 6 interrupt sources in the normal manner.
+ */
+#define RMI_FN_MAX_IRQS 6
+
/**
* struct rmi_function - represents the implementation of an RMI4
* function for a particular device (basically, a driver for that RMI4 function)
@@ -26,6 +32,7 @@ struct rmi_device;
* @irq_pos: The position in the irq bitfield this function holds
* @irq_mask: For convenience, can be used to mask IRQ bits off during ATTN
* interrupt handling.
+ * @irqs: assigned virq numbers (up to num_of_irqs)
*
* @node: entry in device's list of functions
*/
@@ -36,6 +43,7 @@ struct rmi_function {
struct list_head node;
unsigned int num_of_irqs;
+ int irq[RMI_FN_MAX_IRQS];
unsigned int irq_pos;
unsigned long irq_mask[];
};
@@ -76,7 +84,7 @@ struct rmi_function_handler {
void (*remove)(struct rmi_function *fn);
int (*config)(struct rmi_function *fn);
int (*reset)(struct rmi_function *fn);
- int (*attention)(struct rmi_function *fn, unsigned long *irq_bits);
+ irqreturn_t (*attention)(int irq, void *ctx);
int (*suspend)(struct rmi_function *fn);
int (*resume)(struct rmi_function *fn);
};
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 7d29053dfb0f..fc3ab93b7aea 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -21,6 +21,7 @@
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/irqdomain.h>
#include <uapi/linux/input.h>
#include <linux/rmi.h>
#include "rmi_bus.h"
@@ -127,28 +128,11 @@ static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
return 0;
}
-static void process_one_interrupt(struct rmi_driver_data *data,
- struct rmi_function *fn)
-{
- struct rmi_function_handler *fh;
-
- if (!fn || !fn->dev.driver)
- return;
-
- fh = to_rmi_function_handler(fn->dev.driver);
- if (fh->attention) {
- bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask,
- data->irq_count);
- if (!bitmap_empty(data->fn_irq_bits, data->irq_count))
- fh->attention(fn, data->fn_irq_bits);
- }
-}
-
static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
{
struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
struct device *dev = &rmi_dev->dev;
- struct rmi_function *entry;
+ int i;
int error;
if (!data)
@@ -173,16 +157,8 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
*/
mutex_unlock(&data->irq_mutex);
- /*
- * It would be nice to be able to use irq_chip to handle these
- * nested IRQs. Unfortunately, most of the current customers for
- * this driver are using older kernels (3.0.x) that don't support
- * the features required for that. Once they've shifted to more
- * recent kernels (say, 3.3 and higher), this should be switched to
- * use irq_chip.
- */
- list_for_each_entry(entry, &data->function_list, node)
- process_one_interrupt(data, entry);
+ for_each_set_bit(i, data->irq_status, data->irq_count)
+ handle_nested_irq(irq_find_mapping(data->irqdomain, i));
if (data->input)
input_sync(data->input);
@@ -1001,9 +977,13 @@ EXPORT_SYMBOL_GPL(rmi_driver_resume);
static int rmi_driver_remove(struct device *dev)
{
struct rmi_device *rmi_dev = to_rmi_device(dev);
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
rmi_disable_irq(rmi_dev, false);
+ irq_domain_remove(data->irqdomain);
+ data->irqdomain = NULL;
+
rmi_f34_remove_sysfs(rmi_dev);
rmi_free_function_list(rmi_dev);
@@ -1035,7 +1015,8 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
{
struct rmi_device *rmi_dev = data->rmi_dev;
struct device *dev = &rmi_dev->dev;
- int irq_count;
+ struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode;
+ int irq_count = 0;
size_t size;
int retval;
@@ -1046,7 +1027,6 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
* being accessed.
*/
rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
- irq_count = 0;
data->bootloader_mode = false;
retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
@@ -1058,6 +1038,15 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
if (data->bootloader_mode)
dev_warn(dev, "Device in bootloader mode.\n");
+ /* Allocate and register a linear revmap irq_domain */
+ data->irqdomain = irq_domain_create_linear(fwnode, irq_count,
+ &irq_domain_simple_ops,
+ data);
+ if (!data->irqdomain) {
+ dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
data->irq_count = irq_count;
data->num_of_irq_regs = (data->irq_count + 7) / 8;
@@ -1080,10 +1069,9 @@ int rmi_init_functions(struct rmi_driver_data *data)
{
struct rmi_device *rmi_dev = data->rmi_dev;
struct device *dev = &rmi_dev->dev;
- int irq_count;
+ int irq_count = 0;
int retval;
- irq_count = 0;
rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
if (retval < 0) {
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index 8a07ae147df6..4edaa14fe878 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -681,9 +681,9 @@ static int rmi_f01_resume(struct rmi_function *fn)
return 0;
}
-static int rmi_f01_attention(struct rmi_function *fn,
- unsigned long *irq_bits)
+static irqreturn_t rmi_f01_attention(int irq, void *ctx)
{
+ struct rmi_function *fn = ctx;
struct rmi_device *rmi_dev = fn->rmi_dev;
int error;
u8 device_status;
@@ -692,7 +692,7 @@ static int rmi_f01_attention(struct rmi_function *fn,
if (error) {
dev_err(&fn->dev,
"Failed to read device status: %d.\n", error);
- return error;
+ return IRQ_RETVAL(error);
}
if (RMI_F01_STATUS_BOOTLOADER(device_status))
@@ -704,11 +704,11 @@ static int rmi_f01_attention(struct rmi_function *fn,
error = rmi_dev->driver->reset_handler(rmi_dev);
if (error) {
dev_err(&fn->dev, "Device reset failed: %d\n", error);
- return error;
+ return IRQ_RETVAL(error);
}
}
- return 0;
+ return IRQ_HANDLED;
}
struct rmi_function_handler rmi_f01_handler = {
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
index 88822196d6b7..aaa1edc95522 100644
--- a/drivers/input/rmi4/rmi_f03.c
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -244,8 +244,9 @@ static int rmi_f03_config(struct rmi_function *fn)
return 0;
}
-static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f03_attention(int irq, void *ctx)
{
+ struct rmi_function *fn = ctx;
struct rmi_device *rmi_dev = fn->rmi_dev;
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
struct f03_data *f03 = dev_get_drvdata(&fn->dev);
@@ -262,7 +263,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
/* First grab the data passed by the transport device */
if (drvdata->attn_data.size < ob_len) {
dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n");
- return 0;
+ return IRQ_HANDLED;
}
memcpy(obs, drvdata->attn_data.data, ob_len);
@@ -277,7 +278,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
"%s: Failed to read F03 output buffers: %d\n",
__func__, error);
serio_interrupt(f03->serio, 0, SERIO_TIMEOUT);
- return error;
+ return IRQ_RETVAL(error);
}
}
@@ -303,7 +304,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
serio_interrupt(f03->serio, ob_data, serio_flags);
}
- return 0;
+ return IRQ_HANDLED;
}
static void rmi_f03_remove(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index 12a233251793..df64d6aed4f7 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -570,9 +570,7 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
}
static void rmi_f11_finger_handler(struct f11_data *f11,
- struct rmi_2d_sensor *sensor,
- unsigned long *irq_bits, int num_irq_regs,
- int size)
+ struct rmi_2d_sensor *sensor, int size)
{
const u8 *f_state = f11->data.f_state;
u8 finger_state;
@@ -581,12 +579,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
int rel_fingers;
int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES;
- int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask,
- num_irq_regs * 8);
- int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
- num_irq_regs * 8);
-
- if (abs_bits) {
+ if (sensor->report_abs) {
if (abs_size > size)
abs_fingers = size / RMI_F11_ABS_BYTES;
else
@@ -604,19 +597,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
finger_state, i);
}
- }
- if (rel_bits) {
- if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
- rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
- else
- rel_fingers = sensor->nbr_fingers;
-
- for (i = 0; i < rel_fingers; i++)
- rmi_f11_rel_pos_report(f11, i);
- }
-
- if (abs_bits) {
/*
* the absolute part is made in 2 parts to allow the kernel
* tracking to take place.
@@ -638,7 +619,16 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
}
input_mt_sync_frame(sensor->input);
+ } else if (sensor->report_rel) {
+ if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
+ rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
+ else
+ rel_fingers = sensor->nbr_fingers;
+
+ for (i = 0; i < rel_fingers; i++)
+ rmi_f11_rel_pos_report(f11, i);
}
+
}
static int f11_2d_construct_data(struct f11_data *f11)
@@ -1276,8 +1266,9 @@ static int rmi_f11_config(struct rmi_function *fn)
return 0;
}
-static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f11_attention(int irq, void *ctx)
{
+ struct rmi_function *fn = ctx;
struct rmi_device *rmi_dev = fn->rmi_dev;
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
struct f11_data *f11 = dev_get_drvdata(&fn->dev);
@@ -1303,13 +1294,12 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
data_base_addr, f11->sensor.data_pkt,
f11->sensor.pkt_size);
if (error < 0)
- return error;
+ return IRQ_RETVAL(error);
}
- rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
- drvdata->num_of_irq_regs, valid_bytes);
+ rmi_f11_finger_handler(f11, &f11->sensor, valid_bytes);
- return 0;
+ return IRQ_HANDLED;
}
static int rmi_f11_resume(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index a3d1aa88f2a9..5c7f48915779 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -197,10 +197,10 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size)
rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
}
-static int rmi_f12_attention(struct rmi_function *fn,
- unsigned long *irq_nr_regs)
+static irqreturn_t rmi_f12_attention(int irq, void *ctx)
{
int retval;
+ struct rmi_function *fn = ctx;
struct rmi_device *rmi_dev = fn->rmi_dev;
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
struct f12_data *f12 = dev_get_drvdata(&fn->dev);
@@ -222,7 +222,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
if (retval < 0) {
dev_err(&fn->dev, "Failed to read object data. Code: %d.\n",
retval);
- return retval;
+ return IRQ_RETVAL(retval);
}
}
@@ -232,7 +232,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
input_mt_sync_frame(sensor->input);
- return 0;
+ return IRQ_HANDLED;
}
static int rmi_f12_write_control_regs(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 82e0f0d43d55..5e3ed5ac0c3e 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -122,8 +122,9 @@ static void rmi_f30_report_button(struct rmi_function *fn,
}
}
-static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f30_attention(int irq, void *ctx)
{
+ struct rmi_function *fn = ctx;
struct f30_data *f30 = dev_get_drvdata(&fn->dev);
struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
int error;
@@ -134,7 +135,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
if (drvdata->attn_data.size < f30->register_count) {
dev_warn(&fn->dev,
"F30 interrupted, but data is missing\n");
- return 0;
+ return IRQ_HANDLED;
}
memcpy(f30->data_regs, drvdata->attn_data.data,
f30->register_count);
@@ -147,7 +148,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
dev_err(&fn->dev,
"%s: Failed to read F30 data registers: %d\n",
__func__, error);
- return error;
+ return IRQ_RETVAL(error);
}
}
@@ -159,7 +160,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
rmi_f03_commit_buttons(f30->f03);
}
- return 0;
+ return IRQ_HANDLED;
}
static int rmi_f30_config(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
index f1f5ac539d5d..87a7d4ba382d 100644
--- a/drivers/input/rmi4/rmi_f34.c
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -100,8 +100,9 @@ static int rmi_f34_command(struct f34_data *f34, u8 command,
return 0;
}
-static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f34_attention(int irq, void *ctx)
{
+ struct rmi_function *fn = ctx;
struct f34_data *f34 = dev_get_drvdata(&fn->dev);
int ret;
u8 status;
@@ -126,7 +127,7 @@ static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
complete(&f34->v7.cmd_done);
}
- return 0;
+ return IRQ_HANDLED;
}
static int rmi_f34_write_blocks(struct f34_data *f34, const void *data,
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index e8a59d164019..a6f515bcab22 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -610,11 +610,6 @@ error:
mutex_unlock(&f54->data_mutex);
}
-static int rmi_f54_attention(struct rmi_function *fn, unsigned long *irqbits)
-{
- return 0;
-}
-
static int rmi_f54_config(struct rmi_function *fn)
{
struct rmi_driver *drv = fn->rmi_dev->driver;
@@ -756,6 +751,5 @@ struct rmi_function_handler rmi_f54_handler = {
.func = 0x54,
.probe = rmi_f54_probe,
.config = rmi_f54_config,
- .attention = rmi_f54_attention,
.remove = rmi_f54_remove,
};
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index 3df501c3421b..f8663d7891f2 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -20,32 +20,33 @@
* However, when used with the E3 mailboard that producecs non-standard
* scancodes, a custom key table must be prepared and loaded from userspace.
*/
-#include <linux/gpio.h>
#include <linux/irq.h>
+#include <linux/platform_data/ams-delta-fiq.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/serio.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <asm/mach-types.h>
-#include <mach/board-ams-delta.h>
-
-#include <mach/ams-delta-fiq.h>
+#define DRIVER_NAME "ams-delta-serio"
MODULE_AUTHOR("Matt Callow");
MODULE_DESCRIPTION("AMS Delta (E3) keyboard port driver");
MODULE_LICENSE("GPL");
-static struct serio *ams_delta_serio;
+struct ams_delta_serio {
+ struct serio *serio;
+ struct regulator *vcc;
+ unsigned int *fiq_buffer;
+};
-static int check_data(int data)
+static int check_data(struct serio *serio, int data)
{
int i, parity = 0;
/* check valid stop bit */
if (!(data & 0x400)) {
- dev_warn(&ams_delta_serio->dev,
- "invalid stop bit, data=0x%X\n",
- data);
+ dev_warn(&serio->dev, "invalid stop bit, data=0x%X\n", data);
return SERIO_FRAME;
}
/* calculate the parity */
@@ -55,9 +56,9 @@ static int check_data(int data)
}
/* it should be odd */
if (!(parity & 0x01)) {
- dev_warn(&ams_delta_serio->dev,
- "parity check failed, data=0x%X parity=0x%X\n",
- data, parity);
+ dev_warn(&serio->dev,
+ "parity check failed, data=0x%X parity=0x%X\n", data,
+ parity);
return SERIO_PARITY;
}
return 0;
@@ -65,127 +66,130 @@ static int check_data(int data)
static irqreturn_t ams_delta_serio_interrupt(int irq, void *dev_id)
{
- int *circ_buff = &fiq_buffer[FIQ_CIRC_BUFF];
+ struct ams_delta_serio *priv = dev_id;
+ int *circ_buff = &priv->fiq_buffer[FIQ_CIRC_BUFF];
int data, dfl;
u8 scancode;
- fiq_buffer[FIQ_IRQ_PEND] = 0;
+ priv->fiq_buffer[FIQ_IRQ_PEND] = 0;
/*
* Read data from the circular buffer, check it
* and then pass it on the serio
*/
- while (fiq_buffer[FIQ_KEYS_CNT] > 0) {
+ while (priv->fiq_buffer[FIQ_KEYS_CNT] > 0) {
- data = circ_buff[fiq_buffer[FIQ_HEAD_OFFSET]++];
- fiq_buffer[FIQ_KEYS_CNT]--;
- if (fiq_buffer[FIQ_HEAD_OFFSET] == fiq_buffer[FIQ_BUF_LEN])
- fiq_buffer[FIQ_HEAD_OFFSET] = 0;
+ data = circ_buff[priv->fiq_buffer[FIQ_HEAD_OFFSET]++];
+ priv->fiq_buffer[FIQ_KEYS_CNT]--;
+ if (priv->fiq_buffer[FIQ_HEAD_OFFSET] ==
+ priv->fiq_buffer[FIQ_BUF_LEN])
+ priv->fiq_buffer[FIQ_HEAD_OFFSET] = 0;
- dfl = check_data(data);
+ dfl = check_data(priv->serio, data);
scancode = (u8) (data >> 1) & 0xFF;
- serio_interrupt(ams_delta_serio, scancode, dfl);
+ serio_interrupt(priv->serio, scancode, dfl);
}
return IRQ_HANDLED;
}
static int ams_delta_serio_open(struct serio *serio)
{
- /* enable keyboard */
- gpio_set_value(AMS_DELTA_GPIO_PIN_KEYBRD_PWR, 1);
+ struct ams_delta_serio *priv = serio->port_data;
- return 0;
+ /* enable keyboard */
+ return regulator_enable(priv->vcc);
}
static void ams_delta_serio_close(struct serio *serio)
{
+ struct ams_delta_serio *priv = serio->port_data;
+
/* disable keyboard */
- gpio_set_value(AMS_DELTA_GPIO_PIN_KEYBRD_PWR, 0);
+ regulator_disable(priv->vcc);
}
-static const struct gpio ams_delta_gpios[] __initconst_or_module = {
- {
- .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_DATA,
- .flags = GPIOF_DIR_IN,
- .label = "serio-data",
- },
- {
- .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_CLK,
- .flags = GPIOF_DIR_IN,
- .label = "serio-clock",
- },
- {
- .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_PWR,
- .flags = GPIOF_OUT_INIT_LOW,
- .label = "serio-power",
- },
- {
- .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_DATAOUT,
- .flags = GPIOF_OUT_INIT_LOW,
- .label = "serio-dataout",
- },
-};
-
-static int __init ams_delta_serio_init(void)
+static int ams_delta_serio_init(struct platform_device *pdev)
{
- int err;
-
- if (!machine_is_ams_delta())
- return -ENODEV;
+ struct ams_delta_serio *priv;
+ struct serio *serio;
+ int irq, err;
- ams_delta_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
- if (!ams_delta_serio)
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- ams_delta_serio->id.type = SERIO_8042;
- ams_delta_serio->open = ams_delta_serio_open;
- ams_delta_serio->close = ams_delta_serio_close;
- strlcpy(ams_delta_serio->name, "AMS DELTA keyboard adapter",
- sizeof(ams_delta_serio->name));
- strlcpy(ams_delta_serio->phys, "GPIO/serio0",
- sizeof(ams_delta_serio->phys));
-
- err = gpio_request_array(ams_delta_gpios,
- ARRAY_SIZE(ams_delta_gpios));
- if (err) {
- pr_err("ams_delta_serio: Couldn't request gpio pins\n");
- goto serio;
+ priv->fiq_buffer = pdev->dev.platform_data;
+ if (!priv->fiq_buffer)
+ return -EINVAL;
+
+ priv->vcc = devm_regulator_get(&pdev->dev, "vcc");
+ if (IS_ERR(priv->vcc)) {
+ err = PTR_ERR(priv->vcc);
+ dev_err(&pdev->dev, "regulator request failed (%d)\n", err);
+ /*
+ * When running on a non-dt platform and requested regulator
+ * is not available, devm_regulator_get() never returns
+ * -EPROBE_DEFER as it is not able to justify if the regulator
+ * may still appear later. On the other hand, the board can
+ * still set full constriants flag at late_initcall in order
+ * to instruct devm_regulator_get() to returnn a dummy one
+ * if sufficient. Hence, if we get -ENODEV here, let's convert
+ * it to -EPROBE_DEFER and wait for the board to decide or
+ * let Deferred Probe infrastructure handle this error.
+ */
+ if (err == -ENODEV)
+ err = -EPROBE_DEFER;
+ return err;
}
- err = request_irq(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
- ams_delta_serio_interrupt, IRQ_TYPE_EDGE_RISING,
- "ams-delta-serio", 0);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ err = devm_request_irq(&pdev->dev, irq, ams_delta_serio_interrupt,
+ IRQ_TYPE_EDGE_RISING, DRIVER_NAME, priv);
if (err < 0) {
- pr_err("ams_delta_serio: couldn't request gpio interrupt %d\n",
- gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
- goto gpio;
+ dev_err(&pdev->dev, "IRQ request failed (%d)\n", err);
+ return err;
}
- /*
- * Since GPIO register handling for keyboard clock pin is performed
- * at FIQ level, switch back from edge to simple interrupt handler
- * to avoid bad interaction.
- */
- irq_set_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
- handle_simple_irq);
- serio_register_port(ams_delta_serio);
- dev_info(&ams_delta_serio->dev, "%s\n", ams_delta_serio->name);
+ serio = kzalloc(sizeof(*serio), GFP_KERNEL);
+ if (!serio)
+ return -ENOMEM;
+
+ priv->serio = serio;
+
+ serio->id.type = SERIO_8042;
+ serio->open = ams_delta_serio_open;
+ serio->close = ams_delta_serio_close;
+ strlcpy(serio->name, "AMS DELTA keyboard adapter", sizeof(serio->name));
+ strlcpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys));
+ serio->dev.parent = &pdev->dev;
+ serio->port_data = priv;
+
+ serio_register_port(serio);
+
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(&serio->dev, "%s\n", serio->name);
return 0;
-gpio:
- gpio_free_array(ams_delta_gpios,
- ARRAY_SIZE(ams_delta_gpios));
-serio:
- kfree(ams_delta_serio);
- return err;
}
-module_init(ams_delta_serio_init);
-static void __exit ams_delta_serio_exit(void)
+static int ams_delta_serio_exit(struct platform_device *pdev)
{
- serio_unregister_port(ams_delta_serio);
- free_irq(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0);
- gpio_free_array(ams_delta_gpios,
- ARRAY_SIZE(ams_delta_gpios));
+ struct ams_delta_serio *priv = platform_get_drvdata(pdev);
+
+ serio_unregister_port(priv->serio);
+
+ return 0;
}
-module_exit(ams_delta_serio_exit);
+
+static struct platform_driver ams_delta_serio_driver = {
+ .probe = ams_delta_serio_init,
+ .remove = ams_delta_serio_exit,
+ .driver = {
+ .name = DRIVER_NAME
+ },
+};
+module_platform_driver(ams_delta_serio_driver);
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 9c54c43c9749..2d1e2993b5a8 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 25151d9214e0..47a0e81a2989 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -424,6 +424,9 @@ static struct hv_driver hv_kbd_drv = {
.id_table = id_table,
.probe = hv_kbd_probe,
.remove = hv_kbd_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int __init hv_kbd_init(void)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index b353d494ad40..136f6e7bf797 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
},
},
+ {
+ /* Lenovo LaVie Z */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+ },
+ },
{ }
};
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 824f4c1c1f31..b8bc71569349 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -573,6 +573,9 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
port = &i8042_ports[port_no];
serio = port->exists ? port->serio : NULL;
+ if (irq && serio)
+ pm_wakeup_event(&serio->dev, 0);
+
filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n",
port_no, irq,
dfl & SERIO_PARITY ? ", bad parity" : "",
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index d0fccc8ec259..fbb6b33845fa 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 8cf964736902..a308d7811427 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -21,10 +21,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 24a90c8db5b3..2e1fb0649260 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 545fa6e89035..c82cd5079d0e 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1712,7 +1712,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
}
aiptek->data = usb_alloc_coherent(usbdev, AIPTEK_PACKET_LENGTH,
- GFP_ATOMIC, &aiptek->data_dma);
+ GFP_KERNEL, &aiptek->data_dma);
if (!aiptek->data) {
dev_warn(&intf->dev, "cannot allocate usb buffer\n");
goto fail1;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 32267c1afebc..2a80675cfd94 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -92,6 +92,19 @@ config TOUCHSCREEN_AD7879_SPI
To compile this driver as a module, choose M here: the
module will be called ad7879-spi.
+config TOUCHSCREEN_ADC
+ tristate "Generic ADC based resistive touchscreen"
+ depends on IIO
+ select IIO_BUFFER_CB
+ help
+ Say Y here if you want to use the generic ADC
+ resistive touchscreen driver.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called resistive-adc-touch.ko.
+
config TOUCHSCREEN_AR1021_I2C
tristate "Microchip AR1020/1021 i2c touchscreen"
depends on I2C && OF
@@ -151,6 +164,18 @@ config TOUCHSCREEN_BU21013
To compile this driver as a module, choose M here: the
module will be called bu21013_ts.
+config TOUCHSCREEN_BU21029
+ tristate "Rohm BU21029 based touch panel controllers"
+ depends on I2C
+ help
+ Say Y here if you have a Rohm BU21029 touchscreen controller
+ connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bu21029_ts.
+
config TOUCHSCREEN_CHIPONE_ICN8318
tristate "chipone icn8318 touchscreen controller"
depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index fd4fd32fb73f..5911a4190cd2 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -13,11 +13,13 @@ obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o
obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
obj-$(CONFIG_TOUCHSCREEN_AD7879_I2C) += ad7879-i2c.o
obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o
+obj-$(CONFIG_TOUCHSCREEN_ADC) += resistive-adc-touch.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_AR1021_I2C) += ar1021_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
+obj-$(CONFIG_TOUCHSCREEN_BU21029) += bu21029_ts.o
obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8318) += chipone_icn8318.o
obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8505) += chipone_icn8505.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 54fe190fd4bc..3232af5dcf89 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -75,6 +75,7 @@
#define MXT_SPT_DIGITIZER_T43 43
#define MXT_SPT_MESSAGECOUNT_T44 44
#define MXT_SPT_CTECONFIG_T46 46
+#define MXT_SPT_DYNAMICCONFIGURATIONCONTAINER_T71 71
#define MXT_TOUCH_MULTITOUCHSCREEN_T100 100
/* MXT_GEN_MESSAGE_T5 object */
@@ -88,12 +89,12 @@
#define MXT_COMMAND_DIAGNOSTIC 5
/* Define for T6 status byte */
-#define MXT_T6_STATUS_RESET (1 << 7)
-#define MXT_T6_STATUS_OFL (1 << 6)
-#define MXT_T6_STATUS_SIGERR (1 << 5)
-#define MXT_T6_STATUS_CAL (1 << 4)
-#define MXT_T6_STATUS_CFGERR (1 << 3)
-#define MXT_T6_STATUS_COMSERR (1 << 2)
+#define MXT_T6_STATUS_RESET BIT(7)
+#define MXT_T6_STATUS_OFL BIT(6)
+#define MXT_T6_STATUS_SIGERR BIT(5)
+#define MXT_T6_STATUS_CAL BIT(4)
+#define MXT_T6_STATUS_CFGERR BIT(3)
+#define MXT_T6_STATUS_COMSERR BIT(2)
/* MXT_GEN_POWER_T7 field */
struct t7_config {
@@ -112,14 +113,14 @@ struct t7_config {
#define MXT_T9_RANGE 18
/* MXT_TOUCH_MULTI_T9 status */
-#define MXT_T9_UNGRIP (1 << 0)
-#define MXT_T9_SUPPRESS (1 << 1)
-#define MXT_T9_AMP (1 << 2)
-#define MXT_T9_VECTOR (1 << 3)
-#define MXT_T9_MOVE (1 << 4)
-#define MXT_T9_RELEASE (1 << 5)
-#define MXT_T9_PRESS (1 << 6)
-#define MXT_T9_DETECT (1 << 7)
+#define MXT_T9_UNGRIP BIT(0)
+#define MXT_T9_SUPPRESS BIT(1)
+#define MXT_T9_AMP BIT(2)
+#define MXT_T9_VECTOR BIT(3)
+#define MXT_T9_MOVE BIT(4)
+#define MXT_T9_RELEASE BIT(5)
+#define MXT_T9_PRESS BIT(6)
+#define MXT_T9_DETECT BIT(7)
struct t9_range {
__le16 x;
@@ -127,9 +128,9 @@ struct t9_range {
} __packed;
/* MXT_TOUCH_MULTI_T9 orient */
-#define MXT_T9_ORIENT_SWITCH (1 << 0)
-#define MXT_T9_ORIENT_INVERTX (1 << 1)
-#define MXT_T9_ORIENT_INVERTY (1 << 2)
+#define MXT_T9_ORIENT_SWITCH BIT(0)
+#define MXT_T9_ORIENT_INVERTX BIT(1)
+#define MXT_T9_ORIENT_INVERTY BIT(2)
/* MXT_SPT_COMMSCONFIG_T18 */
#define MXT_COMMS_CTRL 0
@@ -214,7 +215,7 @@ enum t100_type {
#define MXT_FRAME_CRC_PASS 0x04
#define MXT_APP_CRC_FAIL 0x40 /* valid 7 8 bit only */
#define MXT_BOOT_STATUS_MASK 0x3f
-#define MXT_BOOT_EXTENDED_ID (1 << 5)
+#define MXT_BOOT_EXTENDED_ID BIT(5)
#define MXT_BOOT_ID_MASK 0x1f
/* Touchscreen absolute values */
@@ -276,6 +277,19 @@ enum mxt_suspend_mode {
MXT_SUSPEND_T9_CTRL = 1,
};
+/* Config update context */
+struct mxt_cfg {
+ u8 *raw;
+ size_t raw_size;
+ off_t raw_pos;
+
+ u8 *mem;
+ size_t mem_size;
+ int start_ofs;
+
+ struct mxt_info info;
+};
+
/* Each client has this additional data */
struct mxt_data {
struct i2c_client *client;
@@ -317,6 +331,7 @@ struct mxt_data {
u8 T6_reportid;
u16 T6_address;
u16 T7_address;
+ u16 T71_address;
u8 T9_reportid_min;
u8 T9_reportid_max;
u8 T19_reportid;
@@ -382,6 +397,7 @@ static bool mxt_object_readable(unsigned int type)
case MXT_SPT_USERDATA_T38:
case MXT_SPT_DIGITIZER_T43:
case MXT_SPT_CTECONFIG_T46:
+ case MXT_SPT_DYNAMICCONFIGURATIONCONTAINER_T71:
return true;
default:
return false;
@@ -712,13 +728,13 @@ static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
u8 status = msg[1];
u32 crc = msg[2] | (msg[3] << 8) | (msg[4] << 16);
- complete(&data->crc_completion);
-
if (crc != data->config_crc) {
data->config_crc = crc;
dev_dbg(dev, "T6 Config Checksum: 0x%06X\n", crc);
}
+ complete(&data->crc_completion);
+
/* Detect reset */
if (status & MXT_T6_STATUS_RESET)
complete(&data->reset_completion);
@@ -827,6 +843,10 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
mxt_input_sync(data);
}
+ /* if active, pressure must be non-zero */
+ if (!amplitude)
+ amplitude = MXT_PRESSURE_DEFAULT;
+
/* Touch active */
input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 1);
input_report_abs(input_dev, ABS_MT_POSITION_X, x);
@@ -1279,12 +1299,7 @@ static u32 mxt_calculate_crc(u8 *base, off_t start_off, off_t end_off)
return crc;
}
-static int mxt_prepare_cfg_mem(struct mxt_data *data,
- const struct firmware *cfg,
- unsigned int data_pos,
- unsigned int cfg_start_ofs,
- u8 *config_mem,
- size_t config_mem_size)
+static int mxt_prepare_cfg_mem(struct mxt_data *data, struct mxt_cfg *cfg)
{
struct device *dev = &data->client->dev;
struct mxt_object *object;
@@ -1295,9 +1310,9 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
u16 reg;
u8 val;
- while (data_pos < cfg->size) {
+ while (cfg->raw_pos < cfg->raw_size) {
/* Read type, instance, length */
- ret = sscanf(cfg->data + data_pos, "%x %x %x%n",
+ ret = sscanf(cfg->raw + cfg->raw_pos, "%x %x %x%n",
&type, &instance, &size, &offset);
if (ret == 0) {
/* EOF */
@@ -1306,20 +1321,20 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
dev_err(dev, "Bad format: failed to parse object\n");
return -EINVAL;
}
- data_pos += offset;
+ cfg->raw_pos += offset;
object = mxt_get_object(data, type);
if (!object) {
/* Skip object */
for (i = 0; i < size; i++) {
- ret = sscanf(cfg->data + data_pos, "%hhx%n",
+ ret = sscanf(cfg->raw + cfg->raw_pos, "%hhx%n",
&val, &offset);
if (ret != 1) {
dev_err(dev, "Bad format in T%d at %d\n",
type, i);
return -EINVAL;
}
- data_pos += offset;
+ cfg->raw_pos += offset;
}
continue;
}
@@ -1354,7 +1369,7 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
reg = object->start_address + mxt_obj_size(object) * instance;
for (i = 0; i < size; i++) {
- ret = sscanf(cfg->data + data_pos, "%hhx%n",
+ ret = sscanf(cfg->raw + cfg->raw_pos, "%hhx%n",
&val,
&offset);
if (ret != 1) {
@@ -1362,15 +1377,15 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
type, i);
return -EINVAL;
}
- data_pos += offset;
+ cfg->raw_pos += offset;
if (i > mxt_obj_size(object))
continue;
- byte_offset = reg + i - cfg_start_ofs;
+ byte_offset = reg + i - cfg->start_ofs;
- if (byte_offset >= 0 && byte_offset < config_mem_size) {
- *(config_mem + byte_offset) = val;
+ if (byte_offset >= 0 && byte_offset < cfg->mem_size) {
+ *(cfg->mem + byte_offset) = val;
} else {
dev_err(dev, "Bad object: reg:%d, T%d, ofs=%d\n",
reg, object->type, byte_offset);
@@ -1382,22 +1397,21 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
return 0;
}
-static int mxt_upload_cfg_mem(struct mxt_data *data, unsigned int cfg_start,
- u8 *config_mem, size_t config_mem_size)
+static int mxt_upload_cfg_mem(struct mxt_data *data, struct mxt_cfg *cfg)
{
unsigned int byte_offset = 0;
int error;
/* Write configuration as blocks */
- while (byte_offset < config_mem_size) {
- unsigned int size = config_mem_size - byte_offset;
+ while (byte_offset < cfg->mem_size) {
+ unsigned int size = cfg->mem_size - byte_offset;
if (size > MXT_MAX_BLOCK_WRITE)
size = MXT_MAX_BLOCK_WRITE;
error = __mxt_write_reg(data->client,
- cfg_start + byte_offset,
- size, config_mem + byte_offset);
+ cfg->start_ofs + byte_offset,
+ size, cfg->mem + byte_offset);
if (error) {
dev_err(&data->client->dev,
"Config write error, ret=%d\n", error);
@@ -1431,65 +1445,75 @@ static int mxt_init_t7_power_cfg(struct mxt_data *data);
* <SIZE> - 2-byte object size as hex
* <CONTENTS> - array of <SIZE> 1-byte hex values
*/
-static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
+static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw)
{
struct device *dev = &data->client->dev;
- struct mxt_info cfg_info;
+ struct mxt_cfg cfg;
int ret;
int offset;
- int data_pos;
int i;
- int cfg_start_ofs;
u32 info_crc, config_crc, calculated_crc;
- u8 *config_mem;
- size_t config_mem_size;
+ u16 crc_start = 0;
+
+ /* Make zero terminated copy of the OBP_RAW file */
+ cfg.raw = kmemdup_nul(fw->data, fw->size, GFP_KERNEL);
+ if (!cfg.raw)
+ return -ENOMEM;
+
+ cfg.raw_size = fw->size;
mxt_update_crc(data, MXT_COMMAND_REPORTALL, 1);
- if (strncmp(cfg->data, MXT_CFG_MAGIC, strlen(MXT_CFG_MAGIC))) {
+ if (strncmp(cfg.raw, MXT_CFG_MAGIC, strlen(MXT_CFG_MAGIC))) {
dev_err(dev, "Unrecognised config file\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_raw;
}
- data_pos = strlen(MXT_CFG_MAGIC);
+ cfg.raw_pos = strlen(MXT_CFG_MAGIC);
/* Load information block and check */
for (i = 0; i < sizeof(struct mxt_info); i++) {
- ret = sscanf(cfg->data + data_pos, "%hhx%n",
- (unsigned char *)&cfg_info + i,
+ ret = sscanf(cfg.raw + cfg.raw_pos, "%hhx%n",
+ (unsigned char *)&cfg.info + i,
&offset);
if (ret != 1) {
dev_err(dev, "Bad format\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_raw;
}
- data_pos += offset;
+ cfg.raw_pos += offset;
}
- if (cfg_info.family_id != data->info->family_id) {
+ if (cfg.info.family_id != data->info->family_id) {
dev_err(dev, "Family ID mismatch!\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_raw;
}
- if (cfg_info.variant_id != data->info->variant_id) {
+ if (cfg.info.variant_id != data->info->variant_id) {
dev_err(dev, "Variant ID mismatch!\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_raw;
}
/* Read CRCs */
- ret = sscanf(cfg->data + data_pos, "%x%n", &info_crc, &offset);
+ ret = sscanf(cfg.raw + cfg.raw_pos, "%x%n", &info_crc, &offset);
if (ret != 1) {
dev_err(dev, "Bad format: failed to parse Info CRC\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_raw;
}
- data_pos += offset;
+ cfg.raw_pos += offset;
- ret = sscanf(cfg->data + data_pos, "%x%n", &config_crc, &offset);
+ ret = sscanf(cfg.raw + cfg.raw_pos, "%x%n", &config_crc, &offset);
if (ret != 1) {
dev_err(dev, "Bad format: failed to parse Config CRC\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_raw;
}
- data_pos += offset;
+ cfg.raw_pos += offset;
/*
* The Info Block CRC is calculated over mxt_info and the object
@@ -1515,39 +1539,39 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
}
/* Malloc memory to store configuration */
- cfg_start_ofs = MXT_OBJECT_START +
+ cfg.start_ofs = MXT_OBJECT_START +
data->info->object_num * sizeof(struct mxt_object) +
MXT_INFO_CHECKSUM_SIZE;
- config_mem_size = data->mem_size - cfg_start_ofs;
- config_mem = kzalloc(config_mem_size, GFP_KERNEL);
- if (!config_mem) {
- dev_err(dev, "Failed to allocate memory\n");
- return -ENOMEM;
+ cfg.mem_size = data->mem_size - cfg.start_ofs;
+ cfg.mem = kzalloc(cfg.mem_size, GFP_KERNEL);
+ if (!cfg.mem) {
+ ret = -ENOMEM;
+ goto release_raw;
}
- ret = mxt_prepare_cfg_mem(data, cfg, data_pos, cfg_start_ofs,
- config_mem, config_mem_size);
+ ret = mxt_prepare_cfg_mem(data, &cfg);
if (ret)
goto release_mem;
/* Calculate crc of the received configs (not the raw config file) */
- if (data->T7_address < cfg_start_ofs) {
- dev_err(dev, "Bad T7 address, T7addr = %x, config offset %x\n",
- data->T7_address, cfg_start_ofs);
- ret = 0;
- goto release_mem;
- }
+ if (data->T71_address)
+ crc_start = data->T71_address;
+ else if (data->T7_address)
+ crc_start = data->T7_address;
+ else
+ dev_warn(dev, "Could not find CRC start\n");
- calculated_crc = mxt_calculate_crc(config_mem,
- data->T7_address - cfg_start_ofs,
- config_mem_size);
+ if (crc_start > cfg.start_ofs) {
+ calculated_crc = mxt_calculate_crc(cfg.mem,
+ crc_start - cfg.start_ofs,
+ cfg.mem_size);
- if (config_crc > 0 && config_crc != calculated_crc)
- dev_warn(dev, "Config CRC error, calculated=%06X, file=%06X\n",
- calculated_crc, config_crc);
+ if (config_crc > 0 && config_crc != calculated_crc)
+ dev_warn(dev, "Config CRC in file inconsistent, calculated=%06X, file=%06X\n",
+ calculated_crc, config_crc);
+ }
- ret = mxt_upload_cfg_mem(data, cfg_start_ofs,
- config_mem, config_mem_size);
+ ret = mxt_upload_cfg_mem(data, &cfg);
if (ret)
goto release_mem;
@@ -1562,8 +1586,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
/* T7 config may have changed */
mxt_init_t7_power_cfg(data);
+release_raw:
+ kfree(cfg.raw);
release_mem:
- kfree(config_mem);
+ kfree(cfg.mem);
return ret;
}
@@ -1591,6 +1617,7 @@ static void mxt_free_object_table(struct mxt_data *data)
data->T5_msg_size = 0;
data->T6_reportid = 0;
data->T7_address = 0;
+ data->T71_address = 0;
data->T9_reportid_min = 0;
data->T9_reportid_max = 0;
data->T19_reportid = 0;
@@ -1656,12 +1683,16 @@ static int mxt_parse_object_table(struct mxt_data *data,
case MXT_GEN_POWER_T7:
data->T7_address = object->start_address;
break;
+ case MXT_SPT_DYNAMICCONFIGURATIONCONTAINER_T71:
+ data->T71_address = object->start_address;
+ break;
case MXT_TOUCH_MULTI_T9:
data->multitouch = MXT_TOUCH_MULTI_T9;
+ /* Only handle messages from first T9 instance */
data->T9_reportid_min = min_id;
- data->T9_reportid_max = max_id;
- data->num_touchids = object->num_report_ids
- * mxt_obj_instances(object);
+ data->T9_reportid_max = min_id +
+ object->num_report_ids - 1;
+ data->num_touchids = object->num_report_ids;
break;
case MXT_SPT_MESSAGECOUNT_T44:
data->T44_address = object->start_address;
@@ -1981,10 +2012,8 @@ static int mxt_initialize_input_device(struct mxt_data *data)
/* Register input device */
input_dev = input_allocate_device();
- if (!input_dev) {
- dev_err(dev, "Failed to allocate memory\n");
+ if (!input_dev)
return -ENOMEM;
- }
input_dev->name = "Atmel maXTouch Touchscreen";
input_dev->phys = data->phys;
@@ -2055,12 +2084,6 @@ static int mxt_initialize_input_device(struct mxt_data *data)
}
if (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 &&
- data->t100_aux_ampl) {
- input_set_abs_params(input_dev, ABS_MT_PRESSURE,
- 0, 255, 0, 0);
- }
-
- if (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 &&
data->t100_aux_vect) {
input_set_abs_params(input_dev, ABS_MT_ORIENTATION,
0, 255, 0, 0);
diff --git a/drivers/input/touchscreen/bu21029_ts.c b/drivers/input/touchscreen/bu21029_ts.c
new file mode 100644
index 000000000000..49a8d4bbca3a
--- /dev/null
+++ b/drivers/input/touchscreen/bu21029_ts.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rohm BU21029 touchscreen controller driver
+ *
+ * Copyright (C) 2015-2018 Bosch Sicherheitssysteme GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/timer.h>
+
+/*
+ * HW_ID1 Register (PAGE=0, ADDR=0x0E, Reset value=0x02, Read only)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | HW_IDH |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * HW_ID2 Register (PAGE=0, ADDR=0x0F, Reset value=0x29, Read only)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | HW_IDL |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * HW_IDH: high 8bits of IC's ID
+ * HW_IDL: low 8bits of IC's ID
+ */
+#define BU21029_HWID_REG (0x0E << 3)
+#define SUPPORTED_HWID 0x0229
+
+/*
+ * CFR0 Register (PAGE=0, ADDR=0x00, Reset value=0x20)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | 0 | 0 | CALIB | INTRM | 0 | 0 | 0 | 0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * CALIB: 0 = not to use calibration result (*)
+ * 1 = use calibration result
+ * INTRM: 0 = INT output depend on "pen down" (*)
+ * 1 = INT output always "0"
+ */
+#define BU21029_CFR0_REG (0x00 << 3)
+#define CFR0_VALUE 0x00
+
+/*
+ * CFR1 Register (PAGE=0, ADDR=0x01, Reset value=0xA6)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | MAV | AVE[2:0] | 0 | SMPL[2:0] |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * MAV: 0 = median average filter off
+ * 1 = median average filter on (*)
+ * AVE: AVE+1 = number of average samples for MAV,
+ * if AVE>SMPL, then AVE=SMPL (=3)
+ * SMPL: SMPL+1 = number of conversion samples for MAV (=7)
+ */
+#define BU21029_CFR1_REG (0x01 << 3)
+#define CFR1_VALUE 0xA6
+
+/*
+ * CFR2 Register (PAGE=0, ADDR=0x02, Reset value=0x04)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | INTVL_TIME[3:0] | TIME_ST_ADC[3:0] |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * INTVL_TIME: waiting time between completion of conversion
+ * and start of next conversion, only usable in
+ * autoscan mode (=20.480ms)
+ * TIME_ST_ADC: waiting time between application of voltage
+ * to panel and start of A/D conversion (=100us)
+ */
+#define BU21029_CFR2_REG (0x02 << 3)
+#define CFR2_VALUE 0xC9
+
+/*
+ * CFR3 Register (PAGE=0, ADDR=0x0B, Reset value=0x72)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | RM8 | STRETCH| PU90K | DUAL | PIDAC_OFS[3:0] |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * RM8: 0 = coordinate resolution is 12bit (*)
+ * 1 = coordinate resolution is 8bit
+ * STRETCH: 0 = SCL_STRETCH function off
+ * 1 = SCL_STRETCH function on (*)
+ * PU90K: 0 = internal pull-up resistance for touch detection is ~50kohms (*)
+ * 1 = internal pull-up resistance for touch detection is ~90kohms
+ * DUAL: 0 = dual touch detection off (*)
+ * 1 = dual touch detection on
+ * PIDAC_OFS: dual touch detection circuit adjustment, it is not necessary
+ * to change this from initial value
+ */
+#define BU21029_CFR3_REG (0x0B << 3)
+#define CFR3_VALUE 0x42
+
+/*
+ * LDO Register (PAGE=0, ADDR=0x0C, Reset value=0x00)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | 0 | PVDD[2:0] | 0 | AVDD[2:0] |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * PVDD: output voltage of panel output regulator (=2.000V)
+ * AVDD: output voltage of analog circuit regulator (=2.000V)
+ */
+#define BU21029_LDO_REG (0x0C << 3)
+#define LDO_VALUE 0x77
+
+/*
+ * Serial Interface Command Byte 1 (CID=1)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | 1 | CF | CMSK | PDM | STP |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * CF: conversion function, see table 3 in datasheet p6 (=0000, automatic scan)
+ * CMSK: 0 = executes convert function (*)
+ * 1 = reads the convert result
+ * PDM: 0 = power down after convert function stops (*)
+ * 1 = keep power on after convert function stops
+ * STP: 1 = abort current conversion and power down, set to "0" automatically
+ */
+#define BU21029_AUTOSCAN 0x80
+
+/*
+ * The timeout value needs to be larger than INTVL_TIME + tConv4 (sample and
+ * conversion time), where tConv4 is calculated by formula:
+ * tPON + tDLY1 + (tTIME_ST_ADC + (tADC * tSMPL) * 2 + tDLY2) * 3
+ * see figure 8 in datasheet p15 for details of each field.
+ */
+#define PEN_UP_TIMEOUT_MS 50
+
+#define STOP_DELAY_MIN_US 50
+#define STOP_DELAY_MAX_US 1000
+#define START_DELAY_MS 2
+#define BUF_LEN 8
+#define SCALE_12BIT (1 << 12)
+#define MAX_12BIT ((1 << 12) - 1)
+#define DRIVER_NAME "bu21029"
+
+struct bu21029_ts_data {
+ struct i2c_client *client;
+ struct input_dev *in_dev;
+ struct timer_list timer;
+ struct regulator *vdd;
+ struct gpio_desc *reset_gpios;
+ u32 x_plate_ohms;
+ struct touchscreen_properties prop;
+};
+
+static void bu21029_touch_report(struct bu21029_ts_data *bu21029, const u8 *buf)
+{
+ u16 x, y, z1, z2;
+ u32 rz;
+ s32 max_pressure = input_abs_get_max(bu21029->in_dev, ABS_PRESSURE);
+
+ /*
+ * compose upper 8 and lower 4 bits into a 12bit value:
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * | ByteH | ByteL |
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * |b07|b06|b05|b04|b03|b02|b01|b00|b07|b06|b05|b04|b03|b02|b01|b00|
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * |v11|v10|v09|v08|v07|v06|v05|v04|v03|v02|v01|v00| 0 | 0 | 0 | 0 |
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ */
+ x = (buf[0] << 4) | (buf[1] >> 4);
+ y = (buf[2] << 4) | (buf[3] >> 4);
+ z1 = (buf[4] << 4) | (buf[5] >> 4);
+ z2 = (buf[6] << 4) | (buf[7] >> 4);
+
+ if (z1 && z2) {
+ /*
+ * calculate Rz (pressure resistance value) by equation:
+ * Rz = Rx * (x/Q) * ((z2/z1) - 1), where
+ * Rx is x-plate resistance,
+ * Q is the touch screen resolution (8bit = 256, 12bit = 4096)
+ * x, z1, z2 are the measured positions.
+ */
+ rz = z2 - z1;
+ rz *= x;
+ rz *= bu21029->x_plate_ohms;
+ rz /= z1;
+ rz = DIV_ROUND_CLOSEST(rz, SCALE_12BIT);
+ if (rz <= max_pressure) {
+ touchscreen_report_pos(bu21029->in_dev, &bu21029->prop,
+ x, y, false);
+ input_report_abs(bu21029->in_dev, ABS_PRESSURE,
+ max_pressure - rz);
+ input_report_key(bu21029->in_dev, BTN_TOUCH, 1);
+ input_sync(bu21029->in_dev);
+ }
+ }
+}
+
+static void bu21029_touch_release(struct timer_list *t)
+{
+ struct bu21029_ts_data *bu21029 = from_timer(bu21029, t, timer);
+
+ input_report_abs(bu21029->in_dev, ABS_PRESSURE, 0);
+ input_report_key(bu21029->in_dev, BTN_TOUCH, 0);
+ input_sync(bu21029->in_dev);
+}
+
+static irqreturn_t bu21029_touch_soft_irq(int irq, void *data)
+{
+ struct bu21029_ts_data *bu21029 = data;
+ u8 buf[BUF_LEN];
+ int error;
+
+ /*
+ * Read touch data and deassert interrupt (will assert again after
+ * INTVL_TIME + tConv4 for continuous touch)
+ */
+ error = i2c_smbus_read_i2c_block_data(bu21029->client, BU21029_AUTOSCAN,
+ sizeof(buf), buf);
+ if (error < 0)
+ goto out;
+
+ bu21029_touch_report(bu21029, buf);
+
+ /* reset timer for pen up detection */
+ mod_timer(&bu21029->timer,
+ jiffies + msecs_to_jiffies(PEN_UP_TIMEOUT_MS));
+
+out:
+ return IRQ_HANDLED;
+}
+
+static void bu21029_put_chip_in_reset(struct bu21029_ts_data *bu21029)
+{
+ if (bu21029->reset_gpios) {
+ gpiod_set_value_cansleep(bu21029->reset_gpios, 1);
+ usleep_range(STOP_DELAY_MIN_US, STOP_DELAY_MAX_US);
+ }
+}
+
+static int bu21029_start_chip(struct input_dev *dev)
+{
+ struct bu21029_ts_data *bu21029 = input_get_drvdata(dev);
+ struct i2c_client *i2c = bu21029->client;
+ struct {
+ u8 reg;
+ u8 value;
+ } init_table[] = {
+ {BU21029_CFR0_REG, CFR0_VALUE},
+ {BU21029_CFR1_REG, CFR1_VALUE},
+ {BU21029_CFR2_REG, CFR2_VALUE},
+ {BU21029_CFR3_REG, CFR3_VALUE},
+ {BU21029_LDO_REG, LDO_VALUE}
+ };
+ int error, i;
+ __be16 hwid;
+
+ error = regulator_enable(bu21029->vdd);
+ if (error) {
+ dev_err(&i2c->dev, "failed to power up chip: %d", error);
+ return error;
+ }
+
+ /* take chip out of reset */
+ if (bu21029->reset_gpios) {
+ gpiod_set_value_cansleep(bu21029->reset_gpios, 0);
+ msleep(START_DELAY_MS);
+ }
+
+ error = i2c_smbus_read_i2c_block_data(i2c, BU21029_HWID_REG,
+ sizeof(hwid), (u8 *)&hwid);
+ if (error < 0) {
+ dev_err(&i2c->dev, "failed to read HW ID\n");
+ goto err_out;
+ }
+
+ if (be16_to_cpu(hwid) != SUPPORTED_HWID) {
+ dev_err(&i2c->dev,
+ "unsupported HW ID 0x%x\n", be16_to_cpu(hwid));
+ error = -ENODEV;
+ goto err_out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(init_table); ++i) {
+ error = i2c_smbus_write_byte_data(i2c,
+ init_table[i].reg,
+ init_table[i].value);
+ if (error < 0) {
+ dev_err(&i2c->dev,
+ "failed to write %#02x to register %#02x: %d\n",
+ init_table[i].value, init_table[i].reg,
+ error);
+ goto err_out;
+ }
+ }
+
+ error = i2c_smbus_write_byte(i2c, BU21029_AUTOSCAN);
+ if (error < 0) {
+ dev_err(&i2c->dev, "failed to start autoscan\n");
+ goto err_out;
+ }
+
+ enable_irq(bu21029->client->irq);
+ return 0;
+
+err_out:
+ bu21029_put_chip_in_reset(bu21029);
+ regulator_disable(bu21029->vdd);
+ return error;
+}
+
+static void bu21029_stop_chip(struct input_dev *dev)
+{
+ struct bu21029_ts_data *bu21029 = input_get_drvdata(dev);
+
+ disable_irq(bu21029->client->irq);
+ del_timer_sync(&bu21029->timer);
+
+ bu21029_put_chip_in_reset(bu21029);
+ regulator_disable(bu21029->vdd);
+}
+
+static int bu21029_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bu21029_ts_data *bu21029;
+ struct input_dev *in_dev;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WRITE_BYTE |
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ dev_err(&client->dev,
+ "i2c functionality support is not sufficient\n");
+ return -EIO;
+ }
+
+ bu21029 = devm_kzalloc(&client->dev, sizeof(*bu21029), GFP_KERNEL);
+ if (!bu21029)
+ return -ENOMEM;
+
+ error = device_property_read_u32(&client->dev, "rohm,x-plate-ohms",
+ &bu21029->x_plate_ohms);
+ if (error) {
+ dev_err(&client->dev,
+ "invalid 'x-plate-ohms' supplied: %d\n", error);
+ return error;
+ }
+
+ bu21029->vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(bu21029->vdd)) {
+ error = PTR_ERR(bu21029->vdd);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "failed to acquire 'vdd' supply: %d\n", error);
+ return error;
+ }
+
+ bu21029->reset_gpios = devm_gpiod_get_optional(&client->dev,
+ "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(bu21029->reset_gpios)) {
+ error = PTR_ERR(bu21029->reset_gpios);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "failed to acquire 'reset' gpio: %d\n", error);
+ return error;
+ }
+
+ in_dev = devm_input_allocate_device(&client->dev);
+ if (!in_dev) {
+ dev_err(&client->dev, "unable to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ bu21029->client = client;
+ bu21029->in_dev = in_dev;
+ timer_setup(&bu21029->timer, bu21029_touch_release, 0);
+
+ in_dev->name = DRIVER_NAME;
+ in_dev->id.bustype = BUS_I2C;
+ in_dev->open = bu21029_start_chip;
+ in_dev->close = bu21029_stop_chip;
+
+ input_set_capability(in_dev, EV_KEY, BTN_TOUCH);
+ input_set_abs_params(in_dev, ABS_X, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(in_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(in_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
+ touchscreen_parse_properties(in_dev, false, &bu21029->prop);
+
+ input_set_drvdata(in_dev, bu21029);
+
+ irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, bu21029_touch_soft_irq,
+ IRQF_ONESHOT, DRIVER_NAME, bu21029);
+ if (error) {
+ dev_err(&client->dev,
+ "unable to request touch irq: %d\n", error);
+ return error;
+ }
+
+ error = input_register_device(in_dev);
+ if (error) {
+ dev_err(&client->dev,
+ "unable to register input device: %d\n", error);
+ return error;
+ }
+
+ i2c_set_clientdata(client, bu21029);
+
+ return 0;
+}
+
+static int __maybe_unused bu21029_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct bu21029_ts_data *bu21029 = i2c_get_clientdata(i2c);
+
+ if (!device_may_wakeup(dev)) {
+ mutex_lock(&bu21029->in_dev->mutex);
+ if (bu21029->in_dev->users)
+ bu21029_stop_chip(bu21029->in_dev);
+ mutex_unlock(&bu21029->in_dev->mutex);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused bu21029_resume(struct device *dev)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct bu21029_ts_data *bu21029 = i2c_get_clientdata(i2c);
+
+ if (!device_may_wakeup(dev)) {
+ mutex_lock(&bu21029->in_dev->mutex);
+ if (bu21029->in_dev->users)
+ bu21029_start_chip(bu21029->in_dev);
+ mutex_unlock(&bu21029->in_dev->mutex);
+ }
+
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(bu21029_pm_ops, bu21029_suspend, bu21029_resume);
+
+static const struct i2c_device_id bu21029_ids[] = {
+ { DRIVER_NAME, 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, bu21029_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id bu21029_of_ids[] = {
+ { .compatible = "rohm,bu21029" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bu21029_of_ids);
+#endif
+
+static struct i2c_driver bu21029_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(bu21029_of_ids),
+ .pm = &bu21029_pm_ops,
+ },
+ .id_table = bu21029_ids,
+ .probe = bu21029_probe,
+};
+module_i2c_driver(bu21029_driver);
+
+MODULE_AUTHOR("Zhu Yi <yi.zhu5@cn.bosch.com>");
+MODULE_DESCRIPTION("Rohm BU21029 touchscreen controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 2facad75eb6d..7fe41965c5d1 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -1,9 +1,9 @@
/*
* Touch Screen driver for EETI's I2C connected touch screen panels
- * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ * Copyright (c) 2009,2018 Daniel Mack <daniel@zonque.org>
*
* See EETI's software guide for the protocol specification:
- * http://home.eeti.com.tw/web20/eg/guide.htm
+ * http://home.eeti.com.tw/documentation.html
*
* Based on migor_ts.c
* Copyright (c) 2008 Magnus Damm
@@ -25,28 +25,22 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/input.h>
+#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/timer.h>
#include <linux/gpio/consumer.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
-static bool flip_x;
-module_param(flip_x, bool, 0644);
-MODULE_PARM_DESC(flip_x, "flip x coordinate");
-
-static bool flip_y;
-module_param(flip_y, bool, 0644);
-MODULE_PARM_DESC(flip_y, "flip y coordinate");
-
struct eeti_ts {
struct i2c_client *client;
struct input_dev *input;
struct gpio_desc *attn_gpio;
+ struct touchscreen_properties props;
bool running;
};
@@ -73,17 +67,10 @@ static void eeti_ts_report_event(struct eeti_ts *eeti, u8 *buf)
x >>= res - EETI_TS_BITDEPTH;
y >>= res - EETI_TS_BITDEPTH;
- if (flip_x)
- x = EETI_MAXVAL - x;
-
- if (flip_y)
- y = EETI_MAXVAL - y;
-
if (buf[0] & REPORT_BIT_HAS_PRESSURE)
input_report_abs(eeti->input, ABS_PRESSURE, buf[5]);
- input_report_abs(eeti->input, ABS_X, x);
- input_report_abs(eeti->input, ABS_Y, y);
+ touchscreen_report_pos(eeti->input, &eeti->props, x, y, false);
input_report_key(eeti->input, BTN_TOUCH, buf[0] & REPORT_BIT_PRESSED);
input_sync(eeti->input);
}
@@ -178,6 +165,8 @@ static int eeti_ts_probe(struct i2c_client *client,
input_set_abs_params(input, ABS_Y, 0, EETI_MAXVAL, 0, 0);
input_set_abs_params(input, ABS_PRESSURE, 0, 0xff, 0, 0);
+ touchscreen_parse_properties(input, false, &eeti->props);
+
input->name = client->name;
input->id.bustype = BUS_I2C;
input->open = eeti_ts_open;
@@ -262,10 +251,18 @@ static const struct i2c_device_id eeti_ts_id[] = {
};
MODULE_DEVICE_TABLE(i2c, eeti_ts_id);
+#ifdef CONFIG_OF
+static const struct of_device_id of_eeti_ts_match[] = {
+ { .compatible = "eeti,exc3000-i2c", },
+ { }
+};
+#endif
+
static struct i2c_driver eeti_ts_driver = {
.driver = {
.name = "eeti_ts",
.pm = &eeti_ts_pm,
+ .of_match_table = of_match_ptr(of_eeti_ts_match),
},
.probe = eeti_ts_probe,
.id_table = eeti_ts_id,
@@ -274,5 +271,5 @@ static struct i2c_driver eeti_ts_driver = {
module_i2c_driver(eeti_ts_driver);
MODULE_DESCRIPTION("EETI Touchscreen driver");
-MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_AUTHOR("Daniel Mack <daniel@zonque.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 752ae9cf4514..80e69bb8283e 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for EETI eGalax Multiple Touch Controller
*
* Copyright (C) 2011 Freescale Semiconductor, Inc.
*
* based on max11801_ts.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/* EETI eGalax serial touch screen controller is a I2C based multiple
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index 83433e8efff7..7f2942f3cec6 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -352,6 +352,7 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
case 1: /* 6-byte protocol */
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0);
+ /* fall through */
case 2: /* 4-byte protocol */
input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0);
diff --git a/drivers/input/touchscreen/fsl-imx25-tcq.c b/drivers/input/touchscreen/fsl-imx25-tcq.c
index 47fe1f184bbc..1d6c8f490b40 100644
--- a/drivers/input/touchscreen/fsl-imx25-tcq.c
+++ b/drivers/input/touchscreen/fsl-imx25-tcq.c
@@ -1,16 +1,11 @@
-/*
- * Copyright (C) 2014-2015 Pengutronix, Markus Pargmann <mpa@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- *
- * Based on driver from 2011:
- * Juergen Beisert, Pengutronix <kernel@pengutronix.de>
- *
- * This is the driver for the imx25 TCQ (Touchscreen Conversion Queue)
- * connected to the imx25 ADC.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2014-2015 Pengutronix, Markus Pargmann <mpa@pengutronix.de>
+// Based on driver from 2011:
+// Juergen Beisert, Pengutronix <kernel@pengutronix.de>
+//
+// This is the driver for the imx25 TCQ (Touchscreen Conversion Queue)
+// connected to the imx25 ADC.
#include <linux/clk.h>
#include <linux/device.h>
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c
index 481586909d28..054c2537b392 100644
--- a/drivers/input/touchscreen/gunze.c
+++ b/drivers/input/touchscreen/gunze.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/errno.h>
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index ee82a975bfd2..c10fc594f94d 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -1,12 +1,8 @@
-/*
- * Freescale i.MX6UL touchscreen controller driver
- *
- * Copyright (C) 2015 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale i.MX6UL touchscreen controller driver
+//
+// Copyright (C) 2015 Freescale Semiconductor, Inc.
#include <linux/errno.h>
#include <linux/kernel.h>
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index d1c09e6a2cb6..c89853a36f9e 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -466,7 +466,7 @@ static bool raydium_i2c_boot_trigger(struct i2c_client *client)
}
}
- return 0;
+ return false;
}
static bool raydium_i2c_fw_trigger(struct i2c_client *client)
@@ -492,7 +492,7 @@ static bool raydium_i2c_fw_trigger(struct i2c_client *client)
}
}
- return 0;
+ return false;
}
static int raydium_i2c_check_path(struct i2c_client *client)
diff --git a/drivers/input/touchscreen/resistive-adc-touch.c b/drivers/input/touchscreen/resistive-adc-touch.c
new file mode 100644
index 000000000000..cfc8bb4553f7
--- /dev/null
+++ b/drivers/input/touchscreen/resistive-adc-touch.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADC generic resistive touchscreen (GRTS)
+ * This is a generic input driver that connects to an ADC
+ * given the channels in device tree, and reports events to the input
+ * subsystem.
+ *
+ * Copyright (C) 2017,2018 Microchip Technology,
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ *
+ */
+#include <linux/input.h>
+#include <linux/input/touchscreen.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define DRIVER_NAME "resistive-adc-touch"
+#define GRTS_DEFAULT_PRESSURE_MIN 50000
+#define GRTS_MAX_POS_MASK GENMASK(11, 0)
+
+/**
+ * grts_state - generic resistive touch screen information struct
+ * @pressure_min: number representing the minimum for the pressure
+ * @pressure: are we getting pressure info or not
+ * @iio_chans: list of channels acquired
+ * @iio_cb: iio_callback buffer for the data
+ * @input: the input device structure that we register
+ * @prop: touchscreen properties struct
+ */
+struct grts_state {
+ u32 pressure_min;
+ bool pressure;
+ struct iio_channel *iio_chans;
+ struct iio_cb_buffer *iio_cb;
+ struct input_dev *input;
+ struct touchscreen_properties prop;
+};
+
+static int grts_cb(const void *data, void *private)
+{
+ const u16 *touch_info = data;
+ struct grts_state *st = private;
+ unsigned int x, y, press = 0x0;
+
+ /* channel data coming in buffer in the order below */
+ x = touch_info[0];
+ y = touch_info[1];
+ if (st->pressure)
+ press = touch_info[2];
+
+ if ((!x && !y) || (st->pressure && (press < st->pressure_min))) {
+ /* report end of touch */
+ input_report_key(st->input, BTN_TOUCH, 0);
+ input_sync(st->input);
+ return 0;
+ }
+
+ /* report proper touch to subsystem*/
+ touchscreen_report_pos(st->input, &st->prop, x, y, false);
+ if (st->pressure)
+ input_report_abs(st->input, ABS_PRESSURE, press);
+ input_report_key(st->input, BTN_TOUCH, 1);
+ input_sync(st->input);
+
+ return 0;
+}
+
+static int grts_open(struct input_dev *dev)
+{
+ int error;
+ struct grts_state *st = input_get_drvdata(dev);
+
+ error = iio_channel_start_all_cb(st->iio_cb);
+ if (error) {
+ dev_err(dev->dev.parent, "failed to start callback buffer.\n");
+ return error;
+ }
+ return 0;
+}
+
+static void grts_close(struct input_dev *dev)
+{
+ struct grts_state *st = input_get_drvdata(dev);
+
+ iio_channel_stop_all_cb(st->iio_cb);
+}
+
+static void grts_disable(void *data)
+{
+ iio_channel_release_all_cb(data);
+}
+
+static int grts_probe(struct platform_device *pdev)
+{
+ struct grts_state *st;
+ struct input_dev *input;
+ struct device *dev = &pdev->dev;
+ struct iio_channel *chan;
+ int error;
+
+ st = devm_kzalloc(dev, sizeof(struct grts_state), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ /* get the channels from IIO device */
+ st->iio_chans = devm_iio_channel_get_all(dev);
+ if (IS_ERR(st->iio_chans)) {
+ error = PTR_ERR(st->iio_chans);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "can't get iio channels.\n");
+ return error;
+ }
+
+ chan = &st->iio_chans[0];
+ st->pressure = false;
+ while (chan && chan->indio_dev) {
+ if (!strcmp(chan->channel->datasheet_name, "pressure"))
+ st->pressure = true;
+ chan++;
+ }
+
+ if (st->pressure) {
+ error = device_property_read_u32(dev,
+ "touchscreen-min-pressure",
+ &st->pressure_min);
+ if (error) {
+ dev_dbg(dev, "can't get touchscreen-min-pressure property.\n");
+ st->pressure_min = GRTS_DEFAULT_PRESSURE_MIN;
+ }
+ }
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "failed to allocate input device.\n");
+ return -ENOMEM;
+ }
+
+ input->name = DRIVER_NAME;
+ input->id.bustype = BUS_HOST;
+ input->open = grts_open;
+ input->close = grts_close;
+
+ input_set_abs_params(input, ABS_X, 0, GRTS_MAX_POS_MASK - 1, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, GRTS_MAX_POS_MASK - 1, 0, 0);
+ if (st->pressure)
+ input_set_abs_params(input, ABS_PRESSURE, st->pressure_min,
+ 0xffff, 0, 0);
+
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
+
+ /* parse optional device tree properties */
+ touchscreen_parse_properties(input, false, &st->prop);
+
+ st->input = input;
+ input_set_drvdata(input, st);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "failed to register input device.");
+ return error;
+ }
+
+ st->iio_cb = iio_channel_get_all_cb(dev, grts_cb, st);
+ if (IS_ERR(st->iio_cb)) {
+ dev_err(dev, "failed to allocate callback buffer.\n");
+ return PTR_ERR(st->iio_cb);
+ }
+
+ error = devm_add_action_or_reset(dev, grts_disable, st->iio_cb);
+ if (error) {
+ dev_err(dev, "failed to add disable action.\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id grts_of_match[] = {
+ {
+ .compatible = "resistive-adc-touch",
+ }, {
+ /* sentinel */
+ },
+};
+
+MODULE_DEVICE_TABLE(of, grts_of_match);
+
+static struct platform_driver grts_driver = {
+ .probe = grts_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(grts_of_match),
+ },
+};
+
+module_platform_driver(grts_driver);
+
+MODULE_AUTHOR("Eugen Hristev <eugen.hristev@microchip.com>");
+MODULE_DESCRIPTION("Generic ADC Resistive Touch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/rohm_bu21023.c b/drivers/input/touchscreen/rohm_bu21023.c
index bda0500c9b57..714affdd742f 100644
--- a/drivers/input/touchscreen/rohm_bu21023.c
+++ b/drivers/input/touchscreen/rohm_bu21023.c
@@ -304,7 +304,7 @@ static int rohm_i2c_burst_read(struct i2c_client *client, u8 start, void *buf,
msg[1].len = len;
msg[1].buf = buf;
- i2c_lock_adapter(adap);
+ i2c_lock_bus(adap, I2C_LOCK_SEGMENT);
for (i = 0; i < 2; i++) {
if (__i2c_transfer(adap, &msg[i], 1) < 0) {
@@ -313,7 +313,7 @@ static int rohm_i2c_burst_read(struct i2c_client *client, u8 start, void *buf,
}
}
- i2c_unlock_adapter(adap);
+ i2c_unlock_bus(adap, I2C_LOCK_SEGMENT);
return ret;
}
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index ff7043f74a3d..d196ac3d8b8c 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -603,6 +603,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = {
{ "GSL3692", 0 },
{ "MSSL1680", 0 },
{ "MSSL0001", 0 },
+ { "MSSL0002", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index 20f7f3902757..166edeb77776 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -1142,7 +1142,7 @@ static int __maybe_unused wdt87xx_resume(struct device *dev)
* The chip may have been reset while system is resuming,
* give it some time to settle.
*/
- mdelay(100);
+ msleep(100);
error = wdt87xx_send_command(client, VND_CMD_START, 0);
if (error)
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e055d228bfb9..c60395b7470f 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -60,6 +60,27 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
endmenu
+config IOMMU_DEBUGFS
+ bool "Export IOMMU internals in DebugFS"
+ depends on DEBUG_FS
+ help
+ Allows exposure of IOMMU device internals. This option enables
+ the use of debugfs by IOMMU drivers as required. Devices can,
+ at initialization time, cause the IOMMU code to create a top-level
+ debug/iommu directory, and then populate a subdirectory with
+ entries as required.
+
+config IOMMU_DEFAULT_PASSTHROUGH
+ bool "IOMMU passthrough by default"
+ depends on IOMMU_API
+ help
+ Enable passthrough by default, removing the need to pass in
+ iommu.passthrough=on or iommu=pt through command line. If this
+ is enabled, you can still disable with iommu.passthrough=off
+ or iommu=nopt depending on the architecture.
+
+ If unsure, say N here.
+
config IOMMU_IOVA
tristate
@@ -135,6 +156,18 @@ config AMD_IOMMU_V2
hardware. Select this option if you want to use devices that support
the PCI PRI and PASID interface.
+config AMD_IOMMU_DEBUGFS
+ bool "Enable AMD IOMMU internals in DebugFS"
+ depends on AMD_IOMMU && IOMMU_DEBUGFS
+ ---help---
+ !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! !!!WARNING!!!
+
+ DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!!
+ Exposes AMD IOMMU device internals in DebugFS.
+
+ This option is -NOT- intended for production environments, and should
+ not generally be enabled.
+
# Intel IOMMU support
config DMAR_TABLE
bool
@@ -142,7 +175,6 @@ config DMAR_TABLE
config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
- select DMA_DIRECT_OPS
select IOMMU_API
select IOMMU_IOVA
select NEED_DMA_MAP_STATE
@@ -285,8 +317,8 @@ config IPMMU_VMSA
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
help
- Support for the Renesas VMSA-compatible IPMMU Renesas found in the
- R-Mobile APE6 and R-Car H2/M2 SoCs.
+ Support for the Renesas VMSA-compatible IPMMU found in the R-Mobile
+ APE6, R-Car Gen2, and R-Car Gen3 SoCs.
If unsure, say N.
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 1fb695854809..ab5eba6edf82 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
+obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o
obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
@@ -10,11 +11,12 @@ obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o
+obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 596b95c50051..4e04fff23977 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1404,6 +1404,8 @@ static u64 *fetch_pte(struct protection_domain *domain,
int level;
u64 *pte;
+ *page_size = 0;
+
if (address > PM_LEVEL_SIZE(domain->mode))
return NULL;
@@ -1944,12 +1946,6 @@ static int __attach_device(struct iommu_dev_data *dev_data,
{
int ret;
- /*
- * Must be called with IRQs disabled. Warn here to detect early
- * when its not.
- */
- WARN_ON(!irqs_disabled());
-
/* lock domain */
spin_lock(&domain->lock);
@@ -2115,12 +2111,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
{
struct protection_domain *domain;
- /*
- * Must be called with IRQs disabled. Warn here to detect early
- * when its not.
- */
- WARN_ON(!irqs_disabled());
-
domain = dev_data->domain;
spin_lock(&domain->lock);
@@ -2405,9 +2395,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
}
if (amd_iommu_unmap_flush) {
- dma_ops_free_iova(dma_dom, dma_addr, pages);
domain_flush_tlb(&dma_dom->domain);
domain_flush_complete(&dma_dom->domain);
+ dma_ops_free_iova(dma_dom, dma_addr, pages);
} else {
pages = __roundup_pow_of_two(pages);
queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0);
@@ -2620,7 +2610,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
return NULL;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), flag);
+ get_order(size), flag & __GFP_NOWARN);
if (!page)
return NULL;
}
@@ -3192,7 +3182,6 @@ const struct iommu_ops amd_iommu_ops = {
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
.unmap = amd_iommu_unmap,
- .map_sg = default_iommu_map_sg,
.iova_to_phys = amd_iommu_iova_to_phys,
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
@@ -3874,7 +3863,8 @@ static void irte_ga_prepare(void *entry,
irte->lo.fields_remap.int_type = delivery_mode;
irte->lo.fields_remap.dm = dest_mode;
irte->hi.fields.vector = vector;
- irte->lo.fields_remap.destination = dest_apicid;
+ irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
+ irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid);
irte->lo.fields_remap.valid = 1;
}
@@ -3927,7 +3917,10 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
if (!irte->lo.fields_remap.guest_mode) {
irte->hi.fields.vector = vector;
- irte->lo.fields_remap.destination = dest_apicid;
+ irte->lo.fields_remap.destination =
+ APICID_TO_IRTE_DEST_LO(dest_apicid);
+ irte->hi.fields.destination =
+ APICID_TO_IRTE_DEST_HI(dest_apicid);
modify_irte_ga(devid, index, irte, NULL);
}
}
@@ -4344,7 +4337,10 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
irte->lo.val = 0;
irte->hi.fields.vector = cfg->vector;
irte->lo.fields_remap.guest_mode = 0;
- irte->lo.fields_remap.destination = cfg->dest_apicid;
+ irte->lo.fields_remap.destination =
+ APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
+ irte->hi.fields.destination =
+ APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
irte->lo.fields_remap.dm = apic->irq_dest_mode;
@@ -4461,8 +4457,12 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
raw_spin_lock_irqsave(&table->lock, flags);
if (ref->lo.fields_vapic.guest_mode) {
- if (cpu >= 0)
- ref->lo.fields_vapic.destination = cpu;
+ if (cpu >= 0) {
+ ref->lo.fields_vapic.destination =
+ APICID_TO_IRTE_DEST_LO(cpu);
+ ref->hi.fields.destination =
+ APICID_TO_IRTE_DEST_HI(cpu);
+ }
ref->lo.fields_vapic.is_run = is_run;
barrier();
}
diff --git a/drivers/iommu/amd_iommu_debugfs.c b/drivers/iommu/amd_iommu_debugfs.c
new file mode 100644
index 000000000000..c6a5c737ef09
--- /dev/null
+++ b/drivers/iommu/amd_iommu_debugfs.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD IOMMU driver
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/iommu.h>
+#include <linux/pci.h>
+#include "amd_iommu_proto.h"
+#include "amd_iommu_types.h"
+
+static struct dentry *amd_iommu_debugfs;
+static DEFINE_MUTEX(amd_iommu_debugfs_lock);
+
+#define MAX_NAME_LEN 20
+
+void amd_iommu_debugfs_setup(struct amd_iommu *iommu)
+{
+ char name[MAX_NAME_LEN + 1];
+
+ mutex_lock(&amd_iommu_debugfs_lock);
+ if (!amd_iommu_debugfs)
+ amd_iommu_debugfs = debugfs_create_dir("amd",
+ iommu_debugfs_dir);
+ mutex_unlock(&amd_iommu_debugfs_lock);
+
+ snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index);
+ iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs);
+}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 904c575d1677..84b3e4445d46 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -153,6 +153,7 @@ bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;
int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
+static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled;
@@ -280,9 +281,9 @@ static void clear_translation_pre_enabled(struct amd_iommu *iommu)
static void init_translation_status(struct amd_iommu *iommu)
{
- u32 ctrl;
+ u64 ctrl;
- ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
if (ctrl & (1<<CONTROL_IOMMU_EN))
iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
}
@@ -386,30 +387,30 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
/* Generic functions to enable/disable certain features of the IOMMU. */
static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
{
- u32 ctrl;
+ u64 ctrl;
- ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
- ctrl |= (1 << bit);
- writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl |= (1ULL << bit);
+ writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}
static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
{
- u32 ctrl;
+ u64 ctrl;
- ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
- ctrl &= ~(1 << bit);
- writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl &= ~(1ULL << bit);
+ writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}
static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
{
- u32 ctrl;
+ u64 ctrl;
- ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
ctrl &= ~CTRL_INV_TO_MASK;
ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
- writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}
/* Function to enable the hardware */
@@ -827,6 +828,19 @@ static int iommu_init_ga(struct amd_iommu *iommu)
return ret;
}
+static void iommu_enable_xt(struct amd_iommu *iommu)
+{
+#ifdef CONFIG_IRQ_REMAP
+ /*
+ * XT mode (32-bit APIC destination ID) requires
+ * GA mode (128-bit IRTE support) as a prerequisite.
+ */
+ if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
+ amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+ iommu_feature_enable(iommu, CONTROL_XT_EN);
+#endif /* CONFIG_IRQ_REMAP */
+}
+
static void iommu_enable_gt(struct amd_iommu *iommu)
{
if (!iommu_feature(iommu, FEATURE_GT))
@@ -1507,6 +1521,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
+ amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
break;
case 0x11:
case 0x40:
@@ -1516,6 +1532,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0))
+ amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
break;
default:
return -EINVAL;
@@ -1832,6 +1850,8 @@ static void print_iommu_info(void)
pr_info("AMD-Vi: Interrupt remapping enabled\n");
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
pr_info("AMD-Vi: virtual APIC enabled\n");
+ if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+ pr_info("AMD-Vi: X2APIC enabled\n");
}
}
@@ -2168,6 +2188,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
iommu_enable_ga(iommu);
+ iommu_enable_xt(iommu);
iommu_enable(iommu);
iommu_flush_all_caches(iommu);
}
@@ -2212,6 +2233,7 @@ static void early_enable_iommus(void)
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
iommu_enable_ga(iommu);
+ iommu_enable_xt(iommu);
iommu_set_device_table(iommu);
iommu_flush_all_caches(iommu);
}
@@ -2691,8 +2713,7 @@ int __init amd_iommu_enable(void)
return ret;
irq_remapping_enabled = 1;
-
- return 0;
+ return amd_iommu_xt_mode;
}
void amd_iommu_disable(void)
@@ -2721,6 +2742,7 @@ int __init amd_iommu_enable_faulting(void)
*/
static int __init amd_iommu_init(void)
{
+ struct amd_iommu *iommu;
int ret;
ret = iommu_go_to_state(IOMMU_INITIALIZED);
@@ -2730,14 +2752,15 @@ static int __init amd_iommu_init(void)
disable_iommus();
free_iommu_resources();
} else {
- struct amd_iommu *iommu;
-
uninit_device_table_dma();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
}
}
+ for_each_iommu(iommu)
+ amd_iommu_debugfs_setup(iommu);
+
return ret;
}
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 640c286a0ab9..a8cd0296fb16 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -33,6 +33,12 @@ extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
extern int amd_iommu_init_api(void);
+#ifdef CONFIG_AMD_IOMMU_DEBUGFS
+void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
+#else
+static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
+#endif
+
/* Needed for interrupt remapping */
extern int amd_iommu_prepare(void);
extern int amd_iommu_enable(void);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 986cbe0cc189..e2b342e65a7b 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -161,6 +161,7 @@
#define CONTROL_GAM_EN 0x19ULL
#define CONTROL_GALOG_EN 0x1CULL
#define CONTROL_GAINT_EN 0x1DULL
+#define CONTROL_XT_EN 0x32ULL
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
#define CTRL_INV_TO_NONE 0
@@ -378,9 +379,11 @@
#define IOMMU_CAP_EFR 27
/* IOMMU Feature Reporting Field (for IVHD type 10h */
+#define IOMMU_FEAT_XTSUP_SHIFT 0
#define IOMMU_FEAT_GASUP_SHIFT 6
/* IOMMU Extended Feature Register (EFR) */
+#define IOMMU_EFR_XTSUP_SHIFT 2
#define IOMMU_EFR_GASUP_SHIFT 7
#define MAX_DOMAIN_ID 65536
@@ -437,7 +440,6 @@ extern struct kmem_cache *amd_iommu_irq_cache;
#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
-
/*
* This struct is used to pass information about
* incoming PPR faults around.
@@ -594,6 +596,11 @@ struct amd_iommu {
u32 flags;
volatile u64 __aligned(8) cmd_sem;
+
+#ifdef CONFIG_AMD_IOMMU_DEBUGFS
+ /* DebugFS Info */
+ struct dentry *debugfs;
+#endif
};
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
@@ -810,6 +817,9 @@ union irte {
} fields;
};
+#define APICID_TO_IRTE_DEST_LO(x) (x & 0xffffff)
+#define APICID_TO_IRTE_DEST_HI(x) ((x >> 24) & 0xff)
+
union irte_ga_lo {
u64 val;
@@ -823,8 +833,8 @@ union irte_ga_lo {
dm : 1,
/* ------ */
guest_mode : 1,
- destination : 8,
- rsvd : 48;
+ destination : 24,
+ ga_tag : 32;
} fields_remap;
/* For guest vAPIC */
@@ -837,8 +847,7 @@ union irte_ga_lo {
is_run : 1,
/* ------ */
guest_mode : 1,
- destination : 8,
- rsvd2 : 16,
+ destination : 24,
ga_tag : 32;
} fields_vapic;
};
@@ -849,7 +858,8 @@ union irte_ga_hi {
u64 vector : 8,
rsvd_1 : 4,
ga_root_ptr : 40,
- rsvd_2 : 12;
+ rsvd_2 : 4,
+ destination : 8;
} fields;
};
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 1d0b53a04a08..58da65df03f5 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -508,7 +508,7 @@ static void do_fault(struct work_struct *work)
{
struct fault *fault = container_of(work, struct fault, work);
struct vm_area_struct *vma;
- int ret = VM_FAULT_ERROR;
+ vm_fault_t ret = VM_FAULT_ERROR;
unsigned int flags = 0;
struct mm_struct *mm;
u64 address;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 1d647104bccc..5059d09f3202 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -24,6 +24,7 @@
#include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/err.h>
@@ -366,7 +367,7 @@
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
-static bool disable_bypass;
+static bool disable_bypass = 1;
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
@@ -1301,6 +1302,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
/* Sync our overflow flag, as we believe we're up to speed */
q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
+ writel(q->cons, q->cons_reg);
return IRQ_HANDLED;
}
@@ -1997,7 +1999,6 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
- .map_sg = default_iommu_map_sg,
.flush_iotlb_all = arm_smmu_iotlb_sync,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
@@ -2211,8 +2212,12 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
reg &= ~clr;
reg |= set;
writel_relaxed(reg | GBPA_UPDATE, gbpa);
- return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
- 1, ARM_SMMU_POLL_TIMEOUT_US);
+ ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
+ 1, ARM_SMMU_POLL_TIMEOUT_US);
+
+ if (ret)
+ dev_err(smmu->dev, "GBPA not responding to update\n");
+ return ret;
}
static void arm_smmu_free_msis(void *data)
@@ -2392,8 +2397,15 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Clear CR0 and sync (disables SMMU and queue processing) */
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
- if (reg & CR0_SMMUEN)
+ if (reg & CR0_SMMUEN) {
+ if (is_kdump_kernel()) {
+ arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
+ arm_smmu_device_disable(smmu);
+ return -EBUSY;
+ }
+
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
+ }
ret = arm_smmu_device_disable(smmu);
if (ret)
@@ -2491,10 +2503,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
enables |= CR0_SMMUEN;
} else {
ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
- if (ret) {
- dev_err(smmu->dev, "GBPA not responding to update\n");
+ if (ret)
return ret;
- }
}
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
@@ -2915,8 +2925,6 @@ static struct platform_driver arm_smmu_driver = {
};
module_platform_driver(arm_smmu_driver);
-IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3");
-
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index f7a96bcf94a6..fd1b80ef9490 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1562,7 +1562,6 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
- .map_sg = default_iommu_map_sg,
.flush_iotlb_all = arm_smmu_iotlb_sync,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
@@ -2103,12 +2102,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
- if (smmu->version == ARM_SMMU_V2 &&
- smmu->num_context_banks != smmu->num_context_irqs) {
- dev_err(dev,
- "found only %d context interrupt(s) but %d required\n",
- smmu->num_context_irqs, smmu->num_context_banks);
- return -ENODEV;
+ if (smmu->version == ARM_SMMU_V2) {
+ if (smmu->num_context_banks > smmu->num_context_irqs) {
+ dev_err(dev,
+ "found only %d context irq(s) but %d required\n",
+ smmu->num_context_irqs, smmu->num_context_banks);
+ return -ENODEV;
+ }
+
+ /* Ignore superfluous interrupts */
+ smmu->num_context_irqs = smmu->num_context_banks;
}
for (i = 0; i < smmu->num_global_irqs; ++i) {
@@ -2211,13 +2214,6 @@ static struct platform_driver arm_smmu_driver = {
};
module_platform_driver(arm_smmu_driver);
-IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1");
-IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2");
-IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400");
-IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401");
-IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500");
-IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2");
-
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index ddcbbdb5d658..511ff9a1d6d9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -367,6 +367,9 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
iova_len = roundup_pow_of_two(iova_len);
+ if (dev->bus_dma_mask)
+ dma_limit &= dev->bus_dma_mask;
+
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end);
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 75456b5aa825..d9c748b6f9e4 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1339,8 +1339,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
qi_submit_sync(&desc, iommu);
}
-void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
- u64 addr, unsigned mask)
+void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u16 qdep, u64 addr, unsigned mask)
{
struct qi_desc desc;
@@ -1355,7 +1355,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
qdep = 0;
desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
- QI_DIOTLB_TYPE;
+ QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
qi_submit_sync(&desc, iommu);
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 85879cfec52f..1bd0cd7168df 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1332,7 +1332,6 @@ static const struct iommu_ops exynos_iommu_ops = {
.detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,
.unmap = exynos_iommu_unmap,
- .map_sg = default_iommu_map_sg,
.iova_to_phys = exynos_iommu_iova_to_phys,
.device_group = generic_device_group,
.add_device = exynos_iommu_add_device,
@@ -1390,5 +1389,3 @@ err_reg_driver:
return ret;
}
core_initcall(exynos_iommu_init);
-
-IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu");
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 14e4b3722428..5f3f10cf9d9d 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -31,7 +31,6 @@
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-direct.h>
#include <linux/mempool.h>
#include <linux/memory.h>
#include <linux/cpu.h>
@@ -53,6 +52,7 @@
#include <asm/iommu.h>
#include "irq_remapping.h"
+#include "intel-pasid.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -380,60 +380,6 @@ static int hw_pass_through = 1;
for (idx = 0; idx < g_num_of_iommus; idx++) \
if (domain->iommu_refcnt[idx])
-struct dmar_domain {
- int nid; /* node id */
-
- unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
- /* Refcount of devices per iommu */
-
-
- u16 iommu_did[DMAR_UNITS_SUPPORTED];
- /* Domain ids per IOMMU. Use u16 since
- * domain ids are 16 bit wide according
- * to VT-d spec, section 9.3 */
-
- bool has_iotlb_device;
- struct list_head devices; /* all devices' list */
- struct iova_domain iovad; /* iova's that belong to this domain */
-
- struct dma_pte *pgd; /* virtual address */
- int gaw; /* max guest address width */
-
- /* adjusted guest address width, 0 is level 2 30-bit */
- int agaw;
-
- int flags; /* flags to find out type of domain */
-
- int iommu_coherency;/* indicate coherency of iommu access */
- int iommu_snooping; /* indicate snooping control feature*/
- int iommu_count; /* reference count of iommu */
- int iommu_superpage;/* Level of superpages supported:
- 0 == 4KiB (no superpages), 1 == 2MiB,
- 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
- u64 max_addr; /* maximum mapped address */
-
- struct iommu_domain domain; /* generic domain data structure for
- iommu core */
-};
-
-/* PCI domain-device relationship */
-struct device_domain_info {
- struct list_head link; /* link to domain siblings */
- struct list_head global; /* link to global list */
- u8 bus; /* PCI bus number */
- u8 devfn; /* PCI devfn number */
- u8 pasid_supported:3;
- u8 pasid_enabled:1;
- u8 pri_supported:1;
- u8 pri_enabled:1;
- u8 ats_supported:1;
- u8 ats_enabled:1;
- u8 ats_qdep;
- struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
- struct intel_iommu *iommu; /* IOMMU used by this device */
- struct dmar_domain *domain; /* pointer to domain */
-};
-
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */
@@ -485,14 +431,37 @@ static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int intel_iommu_ecs = 1;
+static int intel_iommu_pasid28;
static int iommu_identity_mapping;
#define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
-#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap))
-#define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap))
+/* Broadwell and Skylake have broken ECS support — normal so-called "second
+ * level" translation of DMA requests-without-PASID doesn't actually happen
+ * unless you also set the NESTE bit in an extended context-entry. Which of
+ * course means that SVM doesn't work because it's trying to do nested
+ * translation of the physical addresses it finds in the process page tables,
+ * through the IOVA->phys mapping found in the "second level" page tables.
+ *
+ * The VT-d specification was retroactively changed to change the definition
+ * of the capability bits and pretend that Broadwell/Skylake never happened...
+ * but unfortunately the wrong bit was changed. It's ECS which is broken, but
+ * for some reason it was the PASID capability bit which was redefined (from
+ * bit 28 on BDW/SKL to bit 40 in future).
+ *
+ * So our test for ECS needs to eschew those implementations which set the old
+ * PASID capabiity bit 28, since those are the ones on which ECS is broken.
+ * Unless we are working around the 'pasid28' limitations, that is, by putting
+ * the device into passthrough mode for normal DMA and thus masking the bug.
+ */
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
+ (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
+/* PASID support is thus enabled if ECS is enabled and *either* of the old
+ * or new capability bits are set. */
+#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
+ (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -501,6 +470,27 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
+/*
+ * Iterate over elements in device_domain_list and call the specified
+ * callback @fn against each element. This helper should only be used
+ * in the context where the device_domain_lock has already been holden.
+ */
+int for_each_device_domain(int (*fn)(struct device_domain_info *info,
+ void *data), void *data)
+{
+ int ret = 0;
+ struct device_domain_info *info;
+
+ assert_spin_locked(&device_domain_lock);
+ list_for_each_entry(info, &device_domain_list, global) {
+ ret = fn(info, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
const struct iommu_ops intel_iommu_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
@@ -555,6 +545,11 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable extended context table support\n");
intel_iommu_ecs = 0;
+ } else if (!strncmp(str, "pasid28", 7)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: enable pre-production PASID support\n");
+ intel_iommu_pasid28 = 1;
+ iommu_identity_mapping |= IDENTMAP_GFX;
} else if (!strncmp(str, "tboot_noforce", 13)) {
printk(KERN_INFO
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@ -602,7 +597,7 @@ static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
domains[did & 0xff] = domain;
}
-static inline void *alloc_pgtable_page(int node)
+void *alloc_pgtable_page(int node)
{
struct page *page;
void *vaddr = NULL;
@@ -613,7 +608,7 @@ static inline void *alloc_pgtable_page(int node)
return vaddr;
}
-static inline void free_pgtable_page(void *vaddr)
+void free_pgtable_page(void *vaddr)
{
free_page((unsigned long)vaddr);
}
@@ -696,7 +691,7 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
}
/* This functionin only returns single iommu in a domain */
-static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
+struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{
int iommu_id;
@@ -1474,6 +1469,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
return;
pdev = to_pci_dev(info->dev);
+ /* For IOMMU that supports device IOTLB throttling (DIT), we assign
+ * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
+ * queue depth at PF level. If DIT is not set, PFSID will be treated as
+ * reserved, which should be set to 0.
+ */
+ if (!ecap_dit(info->iommu->ecap))
+ info->pfsid = 0;
+ else {
+ struct pci_dev *pf_pdev;
+
+ /* pdev will be returned if device is not a vf */
+ pf_pdev = pci_physfn(pdev);
+ info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
+ }
#ifdef CONFIG_INTEL_IOMMU_SVM
/* The PCIe spec, in its wisdom, declares that the behaviour of
@@ -1539,7 +1548,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -1773,7 +1783,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if (pasid_enabled(iommu)) {
if (ecap_prs(iommu->ecap))
intel_svm_finish_prq(iommu);
- intel_svm_free_pasid_tables(iommu);
+ intel_svm_exit(iommu);
}
#endif
}
@@ -2468,6 +2478,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->dev = dev;
info->domain = domain;
info->iommu = iommu;
+ info->pasid_table = NULL;
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -2525,6 +2536,15 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
list_add(&info->global, &device_domain_list);
if (dev)
dev->archdata.iommu = info;
+
+ if (dev && dev_is_pci(dev) && info->pasid_supported) {
+ ret = intel_pasid_alloc_table(dev);
+ if (ret) {
+ __dmar_remove_one_dev_info(info);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ return NULL;
+ }
+ }
spin_unlock_irqrestore(&device_domain_lock, flags);
if (dev && domain_context_mapping(domain, dev)) {
@@ -3277,6 +3297,18 @@ static int __init init_dmars(void)
}
for_each_active_iommu(iommu, drhd) {
+ /*
+ * Find the max pasid size of all IOMMU's in the system.
+ * We need to ensure the system pasid table is no bigger
+ * than the smallest supported.
+ */
+ if (pasid_enabled(iommu)) {
+ u32 temp = 2 << ecap_pss(iommu->ecap);
+
+ intel_pasid_max_id = min_t(u32, temp,
+ intel_pasid_max_id);
+ }
+
g_iommus[iommu->seq_id] = iommu;
intel_iommu_init_qi(iommu);
@@ -3332,7 +3364,7 @@ static int __init init_dmars(void)
hw_pass_through = 0;
#ifdef CONFIG_INTEL_IOMMU_SVM
if (pasid_enabled(iommu))
- intel_svm_alloc_pasid_tables(iommu);
+ intel_svm_init(iommu);
#endif
}
@@ -3499,7 +3531,7 @@ static unsigned long intel_alloc_iova(struct device *dev,
return iova_pfn;
}
-static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
+struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
{
struct dmar_domain *domain, *tmp;
struct dmar_rmrr_unit *rmrr;
@@ -3713,30 +3745,62 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs)
{
- void *vaddr;
+ struct page *page = NULL;
+ int order;
- vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
- if (iommu_no_mapping(dev) || !vaddr)
- return vaddr;
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
- *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
- PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
- dev->coherent_dma_mask);
- if (!*dma_handle)
- goto out_free_pages;
- return vaddr;
+ if (!iommu_no_mapping(dev))
+ flags &= ~(GFP_DMA | GFP_DMA32);
+ else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+ flags |= GFP_DMA;
+ else
+ flags |= GFP_DMA32;
+ }
+
+ if (gfpflags_allow_blocking(flags)) {
+ unsigned int count = size >> PAGE_SHIFT;
+
+ page = dma_alloc_from_contiguous(dev, count, order,
+ flags & __GFP_NOWARN);
+ if (page && iommu_no_mapping(dev) &&
+ page_to_phys(page) + size > dev->coherent_dma_mask) {
+ dma_release_from_contiguous(dev, page, count);
+ page = NULL;
+ }
+ }
+
+ if (!page)
+ page = alloc_pages(flags, order);
+ if (!page)
+ return NULL;
+ memset(page_address(page), 0, size);
+
+ *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
+ DMA_BIDIRECTIONAL,
+ dev->coherent_dma_mask);
+ if (*dma_handle)
+ return page_address(page);
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, order);
-out_free_pages:
- dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
return NULL;
}
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- if (!iommu_no_mapping(dev))
- intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
- dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+ int order;
+ struct page *page = virt_to_page(vaddr);
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ intel_unmap(dev, dma_handle, size);
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, order);
}
static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
@@ -4295,7 +4359,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
#ifdef CONFIG_INTEL_IOMMU_SVM
if (pasid_enabled(iommu))
- intel_svm_alloc_pasid_tables(iommu);
+ intel_svm_init(iommu);
#endif
if (dmaru->ignored) {
@@ -4847,6 +4911,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
if (info->dev) {
iommu_disable_dev_iotlb(info);
domain_context_clear(iommu, info->dev);
+ intel_pasid_free_table(info->dev);
}
unlink_domain_info(info);
@@ -5172,22 +5237,16 @@ static void intel_iommu_put_resv_regions(struct device *dev,
#ifdef CONFIG_INTEL_IOMMU_SVM
#define MAX_NR_PASID_BITS (20)
-static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
+static inline unsigned long intel_iommu_get_pts(struct device *dev)
{
- /*
- * Convert ecap_pss to extend context entry pts encoding, also
- * respect the soft pasid_max value set by the iommu.
- * - number of PASID bits = ecap_pss + 1
- * - number of PASID table entries = 2^(pts + 5)
- * Therefore, pts = ecap_pss - 4
- * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
- */
- if (ecap_pss(iommu->ecap) < 5)
+ int pts, max_pasid;
+
+ max_pasid = intel_pasid_get_dev_max_id(dev);
+ pts = find_first_bit((unsigned long *)&max_pasid, MAX_NR_PASID_BITS);
+ if (pts < 5)
return 0;
- /* pasid_max is encoded as actual number of entries not the bits */
- return find_first_bit((unsigned long *)&iommu->pasid_max,
- MAX_NR_PASID_BITS) - 5;
+ return pts - 5;
}
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
@@ -5223,8 +5282,8 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
if (!(ctx_lo & CONTEXT_PASIDE)) {
if (iommu->pasid_state_table)
context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
- context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
- intel_iommu_get_pts(iommu);
+ context[1].lo = (u64)virt_to_phys(info->pasid_table->table) |
+ intel_iommu_get_pts(sdev->dev);
wmb();
/* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
@@ -5291,11 +5350,6 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
return NULL;
}
- if (!iommu->pasid_table) {
- dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
- return NULL;
- }
-
return iommu;
}
#endif /* CONFIG_INTEL_IOMMU_SVM */
@@ -5308,7 +5362,6 @@ const struct iommu_ops intel_iommu_ops = {
.detach_dev = intel_iommu_detach_device,
.map = intel_iommu_map,
.unmap = intel_iommu_unmap,
- .map_sg = default_iommu_map_sg,
.iova_to_phys = intel_iommu_iova_to_phys,
.add_device = intel_iommu_add_device,
.remove_device = intel_iommu_remove_device,
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
new file mode 100644
index 000000000000..fe95c9bd4d33
--- /dev/null
+++ b/drivers/iommu/intel-pasid.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * intel-pasid.c - PASID idr, table and entry manipulation
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#define pr_fmt(fmt) "DMAR: " fmt
+
+#include <linux/dmar.h>
+#include <linux/intel-iommu.h>
+#include <linux/iommu.h>
+#include <linux/memory.h>
+#include <linux/pci.h>
+#include <linux/pci-ats.h>
+#include <linux/spinlock.h>
+
+#include "intel-pasid.h"
+
+/*
+ * Intel IOMMU system wide PASID name space:
+ */
+static DEFINE_SPINLOCK(pasid_lock);
+u32 intel_pasid_max_id = PASID_MAX;
+static DEFINE_IDR(pasid_idr);
+
+int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp)
+{
+ int ret, min, max;
+
+ min = max_t(int, start, PASID_MIN);
+ max = min_t(int, end, intel_pasid_max_id);
+
+ WARN_ON(in_interrupt());
+ idr_preload(gfp);
+ spin_lock(&pasid_lock);
+ ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC);
+ spin_unlock(&pasid_lock);
+ idr_preload_end();
+
+ return ret;
+}
+
+void intel_pasid_free_id(int pasid)
+{
+ spin_lock(&pasid_lock);
+ idr_remove(&pasid_idr, pasid);
+ spin_unlock(&pasid_lock);
+}
+
+void *intel_pasid_lookup_id(int pasid)
+{
+ void *p;
+
+ spin_lock(&pasid_lock);
+ p = idr_find(&pasid_idr, pasid);
+ spin_unlock(&pasid_lock);
+
+ return p;
+}
+
+/*
+ * Per device pasid table management:
+ */
+static inline void
+device_attach_pasid_table(struct device_domain_info *info,
+ struct pasid_table *pasid_table)
+{
+ info->pasid_table = pasid_table;
+ list_add(&info->table, &pasid_table->dev);
+}
+
+static inline void
+device_detach_pasid_table(struct device_domain_info *info,
+ struct pasid_table *pasid_table)
+{
+ info->pasid_table = NULL;
+ list_del(&info->table);
+}
+
+struct pasid_table_opaque {
+ struct pasid_table **pasid_table;
+ int segment;
+ int bus;
+ int devfn;
+};
+
+static int search_pasid_table(struct device_domain_info *info, void *opaque)
+{
+ struct pasid_table_opaque *data = opaque;
+
+ if (info->iommu->segment == data->segment &&
+ info->bus == data->bus &&
+ info->devfn == data->devfn &&
+ info->pasid_table) {
+ *data->pasid_table = info->pasid_table;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int get_alias_pasid_table(struct pci_dev *pdev, u16 alias, void *opaque)
+{
+ struct pasid_table_opaque *data = opaque;
+
+ data->segment = pci_domain_nr(pdev->bus);
+ data->bus = PCI_BUS_NUM(alias);
+ data->devfn = alias & 0xff;
+
+ return for_each_device_domain(&search_pasid_table, data);
+}
+
+/*
+ * Allocate a pasid table for @dev. It should be called in a
+ * single-thread context.
+ */
+int intel_pasid_alloc_table(struct device *dev)
+{
+ struct device_domain_info *info;
+ struct pasid_table *pasid_table;
+ struct pasid_table_opaque data;
+ struct page *pages;
+ size_t size, count;
+ int ret, order;
+
+ info = dev->archdata.iommu;
+ if (WARN_ON(!info || !dev_is_pci(dev) ||
+ !info->pasid_supported || info->pasid_table))
+ return -EINVAL;
+
+ /* DMA alias device already has a pasid table, use it: */
+ data.pasid_table = &pasid_table;
+ ret = pci_for_each_dma_alias(to_pci_dev(dev),
+ &get_alias_pasid_table, &data);
+ if (ret)
+ goto attach_out;
+
+ pasid_table = kzalloc(sizeof(*pasid_table), GFP_ATOMIC);
+ if (!pasid_table)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&pasid_table->dev);
+
+ size = sizeof(struct pasid_entry);
+ count = min_t(int, pci_max_pasids(to_pci_dev(dev)), intel_pasid_max_id);
+ order = get_order(size * count);
+ pages = alloc_pages_node(info->iommu->node,
+ GFP_ATOMIC | __GFP_ZERO,
+ order);
+ if (!pages)
+ return -ENOMEM;
+
+ pasid_table->table = page_address(pages);
+ pasid_table->order = order;
+ pasid_table->max_pasid = count;
+
+attach_out:
+ device_attach_pasid_table(info, pasid_table);
+
+ return 0;
+}
+
+void intel_pasid_free_table(struct device *dev)
+{
+ struct device_domain_info *info;
+ struct pasid_table *pasid_table;
+
+ info = dev->archdata.iommu;
+ if (!info || !dev_is_pci(dev) ||
+ !info->pasid_supported || !info->pasid_table)
+ return;
+
+ pasid_table = info->pasid_table;
+ device_detach_pasid_table(info, pasid_table);
+
+ if (!list_empty(&pasid_table->dev))
+ return;
+
+ free_pages((unsigned long)pasid_table->table, pasid_table->order);
+ kfree(pasid_table);
+}
+
+struct pasid_table *intel_pasid_get_table(struct device *dev)
+{
+ struct device_domain_info *info;
+
+ info = dev->archdata.iommu;
+ if (!info)
+ return NULL;
+
+ return info->pasid_table;
+}
+
+int intel_pasid_get_dev_max_id(struct device *dev)
+{
+ struct device_domain_info *info;
+
+ info = dev->archdata.iommu;
+ if (!info || !info->pasid_table)
+ return 0;
+
+ return info->pasid_table->max_pasid;
+}
+
+struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid)
+{
+ struct pasid_table *pasid_table;
+ struct pasid_entry *entries;
+
+ pasid_table = intel_pasid_get_table(dev);
+ if (WARN_ON(!pasid_table || pasid < 0 ||
+ pasid >= intel_pasid_get_dev_max_id(dev)))
+ return NULL;
+
+ entries = pasid_table->table;
+
+ return &entries[pasid];
+}
+
+/*
+ * Interfaces for PASID table entry manipulation:
+ */
+static inline void pasid_clear_entry(struct pasid_entry *pe)
+{
+ WRITE_ONCE(pe->val, 0);
+}
+
+void intel_pasid_clear_entry(struct device *dev, int pasid)
+{
+ struct pasid_entry *pe;
+
+ pe = intel_pasid_get_entry(dev, pasid);
+ if (WARN_ON(!pe))
+ return;
+
+ pasid_clear_entry(pe);
+}
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h
new file mode 100644
index 000000000000..1c05ed6fc5a5
--- /dev/null
+++ b/drivers/iommu/intel-pasid.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * intel-pasid.h - PASID idr, table and entry header
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#ifndef __INTEL_PASID_H
+#define __INTEL_PASID_H
+
+#define PASID_MIN 0x1
+#define PASID_MAX 0x100000
+
+struct pasid_entry {
+ u64 val;
+};
+
+/* The representative of a PASID table */
+struct pasid_table {
+ void *table; /* pasid table pointer */
+ int order; /* page order of pasid table */
+ int max_pasid; /* max pasid */
+ struct list_head dev; /* device list */
+};
+
+extern u32 intel_pasid_max_id;
+int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp);
+void intel_pasid_free_id(int pasid);
+void *intel_pasid_lookup_id(int pasid);
+int intel_pasid_alloc_table(struct device *dev);
+void intel_pasid_free_table(struct device *dev);
+struct pasid_table *intel_pasid_get_table(struct device *dev);
+int intel_pasid_get_dev_max_id(struct device *dev);
+struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid);
+void intel_pasid_clear_entry(struct device *dev, int pasid);
+
+#endif /* __INTEL_PASID_H */
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 45f6e581cd56..4a03e5090952 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -24,23 +24,22 @@
#include <linux/pci-ats.h>
#include <linux/dmar.h>
#include <linux/interrupt.h>
+#include <linux/mm_types.h>
#include <asm/page.h>
+#include "intel-pasid.h"
+
#define PASID_ENTRY_P BIT_ULL(0)
#define PASID_ENTRY_FLPM_5LP BIT_ULL(9)
#define PASID_ENTRY_SRE BIT_ULL(11)
static irqreturn_t prq_event_thread(int irq, void *d);
-struct pasid_entry {
- u64 val;
-};
-
struct pasid_state_entry {
u64 val;
};
-int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
+int intel_svm_init(struct intel_iommu *iommu)
{
struct page *pages;
int order;
@@ -65,15 +64,6 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
iommu->pasid_max = 0x20000;
order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!pages) {
- pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
- iommu->name);
- return -ENOMEM;
- }
- iommu->pasid_table = page_address(pages);
- pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
-
if (ecap_dis(iommu->ecap)) {
/* Just making it explicit... */
BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
@@ -85,24 +75,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
iommu->name);
}
- idr_init(&iommu->pasid_idr);
-
return 0;
}
-int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
+int intel_svm_exit(struct intel_iommu *iommu)
{
int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
- if (iommu->pasid_table) {
- free_pages((unsigned long)iommu->pasid_table, order);
- iommu->pasid_table = NULL;
- }
if (iommu->pasid_state_table) {
free_pages((unsigned long)iommu->pasid_state_table, order);
iommu->pasid_state_table = NULL;
}
- idr_destroy(&iommu->pasid_idr);
+
return 0;
}
@@ -278,11 +262,9 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* page) so that we end up taking a fault that the hardware really
* *has* to handle gracefully without affecting other processes.
*/
- svm->iommu->pasid_table[svm->pasid].val = 0;
- wmb();
-
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) {
+ intel_pasid_clear_entry(sdev->dev, svm->pasid);
intel_flush_pasid_dev(svm, sdev, svm->pasid);
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
}
@@ -298,10 +280,12 @@ static const struct mmu_notifier_ops intel_mmuops = {
};
static DEFINE_MUTEX(pasid_mutex);
+static LIST_HEAD(global_svm_list);
int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
{
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct pasid_entry *entry;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
struct mm_struct *mm = NULL;
@@ -309,7 +293,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
int pasid_max;
int ret;
- if (WARN_ON(!iommu || !iommu->pasid_table))
+ if (!iommu)
return -EINVAL;
if (dev_is_pci(dev)) {
@@ -329,13 +313,13 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
mutex_lock(&pasid_mutex);
if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) {
- int i;
+ struct intel_svm *t;
- idr_for_each_entry(&iommu->pasid_idr, svm, i) {
- if (svm->mm != mm ||
- (svm->flags & SVM_FLAG_PRIVATE_PASID))
+ list_for_each_entry(t, &global_svm_list, list) {
+ if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID))
continue;
+ svm = t;
if (svm->pasid >= pasid_max) {
dev_warn(dev,
"Limited PASID width. Cannot use existing PASID %d\n",
@@ -387,13 +371,13 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
}
svm->iommu = iommu;
- if (pasid_max > iommu->pasid_max)
- pasid_max = iommu->pasid_max;
+ if (pasid_max > intel_pasid_max_id)
+ pasid_max = intel_pasid_max_id;
/* Do not use PASID 0 in caching mode (virtualised IOMMU) */
- ret = idr_alloc(&iommu->pasid_idr, svm,
- !!cap_caching_mode(iommu->cap),
- pasid_max - 1, GFP_KERNEL);
+ ret = intel_pasid_alloc_id(svm,
+ !!cap_caching_mode(iommu->cap),
+ pasid_max - 1, GFP_KERNEL);
if (ret < 0) {
kfree(svm);
kfree(sdev);
@@ -404,11 +388,12 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
svm->mm = mm;
svm->flags = flags;
INIT_LIST_HEAD_RCU(&svm->devs);
+ INIT_LIST_HEAD(&svm->list);
ret = -ENOMEM;
if (mm) {
ret = mmu_notifier_register(&svm->notifier, mm);
if (ret) {
- idr_remove(&svm->iommu->pasid_idr, svm->pasid);
+ intel_pasid_free_id(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
@@ -420,7 +405,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (cpu_feature_enabled(X86_FEATURE_LA57))
pasid_entry_val |= PASID_ENTRY_FLPM_5LP;
- iommu->pasid_table[svm->pasid].val = pasid_entry_val;
+ entry = intel_pasid_get_entry(dev, svm->pasid);
+ entry->val = pasid_entry_val;
wmb();
@@ -430,6 +416,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
*/
if (cap_caching_mode(iommu->cap))
intel_flush_pasid_dev(svm, sdev, svm->pasid);
+
+ list_add_tail(&svm->list, &global_svm_list);
}
list_add_rcu(&sdev->list, &svm->devs);
@@ -453,10 +441,10 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
mutex_lock(&pasid_mutex);
iommu = intel_svm_device_to_iommu(dev);
- if (!iommu || !iommu->pasid_table)
+ if (!iommu)
goto out;
- svm = idr_find(&iommu->pasid_idr, pasid);
+ svm = intel_pasid_lookup_id(pasid);
if (!svm)
goto out;
@@ -476,15 +464,15 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
intel_flush_pasid_dev(svm, sdev, svm->pasid);
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
kfree_rcu(sdev, rcu);
+ intel_pasid_clear_entry(dev, svm->pasid);
if (list_empty(&svm->devs)) {
- svm->iommu->pasid_table[svm->pasid].val = 0;
- wmb();
-
- idr_remove(&svm->iommu->pasid_idr, svm->pasid);
+ intel_pasid_free_id(svm->pasid);
if (svm->mm)
mmu_notifier_unregister(&svm->notifier, svm->mm);
+ list_del(&svm->list);
+
/* We mandate that no page faults may be outstanding
* for the PASID when intel_svm_unbind_mm() is called.
* If that is not obeyed, subtle errors will happen.
@@ -511,10 +499,10 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid)
mutex_lock(&pasid_mutex);
iommu = intel_svm_device_to_iommu(dev);
- if (!iommu || !iommu->pasid_table)
+ if (!iommu)
goto out;
- svm = idr_find(&iommu->pasid_idr, pasid);
+ svm = intel_pasid_lookup_id(pasid);
if (!svm)
goto out;
@@ -594,7 +582,8 @@ static irqreturn_t prq_event_thread(int irq, void *d)
struct vm_area_struct *vma;
struct page_req_dsc *req;
struct qi_desc resp;
- int ret, result;
+ int result;
+ vm_fault_t ret;
u64 address;
handled = 1;
@@ -612,7 +601,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!svm || svm->pasid != req->pasid) {
rcu_read_lock();
- svm = idr_find(&iommu->pasid_idr, req->pasid);
+ svm = intel_pasid_lookup_id(req->pasid);
/* It *can't* go away, because the driver is not permitted
* to unbind the mm while any page faults are outstanding.
* So we only need RCU to protect the internal idr code. */
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 50e3a9fcf43e..b5948ba6b3b3 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -192,6 +192,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
struct device *dev = cfg->iommu_dev;
+ phys_addr_t phys;
dma_addr_t dma;
size_t size = ARM_V7S_TABLE_SIZE(lvl);
void *table = NULL;
@@ -200,6 +201,10 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
else if (lvl == 2)
table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
+ phys = virt_to_phys(table);
+ if (phys != (arm_v7s_iopte)phys)
+ /* Doesn't fit in PTE */
+ goto out_free;
if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
@@ -209,7 +214,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
* address directly, so if the DMA layer suggests otherwise by
* translating or truncating them, that bodes very badly...
*/
- if (dma != virt_to_phys(table))
+ if (dma != phys)
goto out_unmap;
}
kmemleak_ignore(table);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 010a254305dd..88641b4560bc 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -237,7 +237,8 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
void *pages;
VM_BUG_ON((gfp & __GFP_HIGHMEM));
- p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
+ p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
+ gfp | __GFP_ZERO, order);
if (!p)
return NULL;
diff --git a/drivers/iommu/iommu-debugfs.c b/drivers/iommu/iommu-debugfs.c
new file mode 100644
index 000000000000..3b1bf88fd1b0
--- /dev/null
+++ b/drivers/iommu/iommu-debugfs.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IOMMU debugfs core infrastructure
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/iommu.h>
+#include <linux/debugfs.h>
+
+struct dentry *iommu_debugfs_dir;
+
+/**
+ * iommu_debugfs_setup - create the top-level iommu directory in debugfs
+ *
+ * Provide base enablement for using debugfs to expose internal data of an
+ * IOMMU driver. When called, this function creates the
+ * /sys/kernel/debug/iommu directory.
+ *
+ * Emit a strong warning at boot time to indicate that this feature is
+ * enabled.
+ *
+ * This function is called from iommu_init; drivers may then call
+ * iommu_debugfs_new_driver_dir() to instantiate a vendor-specific
+ * directory to be used to expose internal data.
+ */
+void iommu_debugfs_setup(void)
+{
+ if (!iommu_debugfs_dir) {
+ iommu_debugfs_dir = debugfs_create_dir("iommu", NULL);
+ pr_warn("\n");
+ pr_warn("*************************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** IOMMU DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
+ pr_warn("** **\n");
+ pr_warn("** This means that this kernel is built to expose internal **\n");
+ pr_warn("** IOMMU data structures, which may compromise security on **\n");
+ pr_warn("** your system. **\n");
+ pr_warn("** **\n");
+ pr_warn("** If you see this message and you are not debugging the **\n");
+ pr_warn("** kernel, report this immediately to your vendor! **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("*************************************************************\n");
+ }
+}
+
+/**
+ * iommu_debugfs_new_driver_dir - create a vendor directory under debugfs/iommu
+ * @vendor: name of the vendor-specific subdirectory to create
+ *
+ * This function is called by an IOMMU driver to create the top-level debugfs
+ * directory for that driver.
+ *
+ * Return: upon success, a pointer to the dentry for the new directory.
+ * NULL in case of failure.
+ */
+struct dentry *iommu_debugfs_new_driver_dir(const char *vendor)
+{
+ return debugfs_create_dir(vendor, iommu_debugfs_dir);
+}
+EXPORT_SYMBOL_GPL(iommu_debugfs_new_driver_dir);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 63b37563db7e..8c15c5980299 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -36,7 +36,11 @@
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
+#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
+static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
+#else
static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
+#endif
struct iommu_callback_data {
const struct iommu_ops *ops;
@@ -294,11 +298,39 @@ static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
return (str - buf);
}
+static ssize_t iommu_group_show_type(struct iommu_group *group,
+ char *buf)
+{
+ char *type = "unknown\n";
+
+ if (group->default_domain) {
+ switch (group->default_domain->type) {
+ case IOMMU_DOMAIN_BLOCKED:
+ type = "blocked\n";
+ break;
+ case IOMMU_DOMAIN_IDENTITY:
+ type = "identity\n";
+ break;
+ case IOMMU_DOMAIN_UNMANAGED:
+ type = "unmanaged\n";
+ break;
+ case IOMMU_DOMAIN_DMA:
+ type = "DMA";
+ break;
+ }
+ }
+ strcpy(buf, type);
+
+ return strlen(type);
+}
+
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
static IOMMU_GROUP_ATTR(reserved_regions, 0444,
iommu_group_show_resv_regions, NULL);
+static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
+
static void iommu_group_release(struct kobject *kobj)
{
struct iommu_group *group = to_iommu_group(kobj);
@@ -380,6 +412,10 @@ struct iommu_group *iommu_group_alloc(void)
if (ret)
return ERR_PTR(ret);
+ ret = iommu_group_create_file(group, &iommu_group_attr_type);
+ if (ret)
+ return ERR_PTR(ret);
+
pr_debug("Allocated group %d\n", group->id);
return group;
@@ -1637,8 +1673,8 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
-size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot)
+size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
{
struct scatterlist *s;
size_t mapped = 0;
@@ -1678,7 +1714,7 @@ out_err:
return 0;
}
-EXPORT_SYMBOL_GPL(default_iommu_map_sg);
+EXPORT_SYMBOL_GPL(iommu_map_sg);
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
@@ -1748,6 +1784,8 @@ static int __init iommu_init(void)
NULL, kernel_kobj);
BUG_ON(!iommu_group_kset);
+ iommu_debugfs_setup();
+
return 0;
}
core_initcall(iommu_init);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 40ae6e87cb88..22b94f8a9a04 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -47,6 +47,7 @@ struct ipmmu_features {
unsigned int number_of_contexts;
bool setup_imbuscr;
bool twobit_imttbcr_sl0;
+ bool reserved_context;
};
struct ipmmu_vmsa_device {
@@ -73,7 +74,7 @@ struct ipmmu_vmsa_domain {
struct io_pgtable_ops *iop;
unsigned int context_id;
- spinlock_t lock; /* Protects mappings */
+ struct mutex mutex; /* Protects mappings */
};
static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
@@ -194,7 +195,9 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
#define IMPMBA(n) (0x0280 + ((n) * 4))
#define IMPMBD(n) (0x02c0 + ((n) * 4))
-#define IMUCTR(n) (0x0300 + ((n) * 16))
+#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
+#define IMUCTR0(n) (0x0300 + ((n) * 16))
+#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16))
#define IMUCTR_FIXADDEN (1 << 31)
#define IMUCTR_FIXADD_MASK (0xff << 16)
#define IMUCTR_FIXADD_SHIFT 16
@@ -204,7 +207,9 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
#define IMUCTR_FLUSH (1 << 1)
#define IMUCTR_MMUEN (1 << 0)
-#define IMUASID(n) (0x0308 + ((n) * 16))
+#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
+#define IMUASID0(n) (0x0308 + ((n) * 16))
+#define IMUASID32(n) (0x0608 + (((n) - 32) * 16))
#define IMUASID_ASID8_MASK (0xff << 8)
#define IMUASID_ASID8_SHIFT 8
#define IMUASID_ASID0_MASK (0xff << 0)
@@ -595,7 +600,7 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
if (!domain)
return NULL;
- spin_lock_init(&domain->lock);
+ mutex_init(&domain->mutex);
return &domain->io_domain;
}
@@ -641,7 +646,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
- unsigned long flags;
unsigned int i;
int ret = 0;
@@ -650,7 +654,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
return -ENXIO;
}
- spin_lock_irqsave(&domain->lock, flags);
+ mutex_lock(&domain->mutex);
if (!domain->mmu) {
/* The domain hasn't been used yet, initialize it. */
@@ -674,7 +678,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
} else
dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
- spin_unlock_irqrestore(&domain->lock, flags);
+ mutex_unlock(&domain->mutex);
if (ret < 0)
return ret;
@@ -756,8 +760,12 @@ static bool ipmmu_slave_whitelist(struct device *dev)
return false;
}
-static const struct soc_device_attribute soc_r8a7795[] = {
+static const struct soc_device_attribute soc_rcar_gen3[] = {
{ .soc_id = "r8a7795", },
+ { .soc_id = "r8a7796", },
+ { .soc_id = "r8a77965", },
+ { .soc_id = "r8a77970", },
+ { .soc_id = "r8a77995", },
{ /* sentinel */ }
};
@@ -765,7 +773,7 @@ static int ipmmu_of_xlate(struct device *dev,
struct of_phandle_args *spec)
{
/* For R-Car Gen3 use a white list to opt-in slave devices */
- if (soc_device_match(soc_r8a7795) && !ipmmu_slave_whitelist(dev))
+ if (soc_device_match(soc_rcar_gen3) && !ipmmu_slave_whitelist(dev))
return -ENODEV;
iommu_fwspec_add_ids(dev, spec->args, 1);
@@ -889,7 +897,6 @@ static const struct iommu_ops ipmmu_ops = {
.unmap = ipmmu_unmap,
.flush_iotlb_all = ipmmu_iotlb_sync,
.iotlb_sync = ipmmu_iotlb_sync,
- .map_sg = default_iommu_map_sg,
.iova_to_phys = ipmmu_iova_to_phys,
.add_device = ipmmu_add_device,
.remove_device = ipmmu_remove_device,
@@ -917,14 +924,16 @@ static const struct ipmmu_features ipmmu_features_default = {
.number_of_contexts = 1, /* software only tested with one context */
.setup_imbuscr = true,
.twobit_imttbcr_sl0 = false,
+ .reserved_context = false,
};
-static const struct ipmmu_features ipmmu_features_r8a7795 = {
+static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.use_ns_alias_offset = false,
.has_cache_leaf_nodes = true,
.number_of_contexts = 8,
.setup_imbuscr = false,
.twobit_imttbcr_sl0 = true,
+ .reserved_context = true,
};
static const struct of_device_id ipmmu_of_ids[] = {
@@ -933,7 +942,19 @@ static const struct of_device_id ipmmu_of_ids[] = {
.data = &ipmmu_features_default,
}, {
.compatible = "renesas,ipmmu-r8a7795",
- .data = &ipmmu_features_r8a7795,
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
+ .compatible = "renesas,ipmmu-r8a7796",
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
+ .compatible = "renesas,ipmmu-r8a77965",
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
+ .compatible = "renesas,ipmmu-r8a77970",
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
+ .compatible = "renesas,ipmmu-r8a77995",
+ .data = &ipmmu_features_rcar_gen3,
}, {
/* Terminator */
},
@@ -955,7 +976,7 @@ static int ipmmu_probe(struct platform_device *pdev)
}
mmu->dev = &pdev->dev;
- mmu->num_utlbs = 32;
+ mmu->num_utlbs = 48;
spin_lock_init(&mmu->lock);
bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
mmu->features = of_device_get_match_data(&pdev->dev);
@@ -1018,6 +1039,11 @@ static int ipmmu_probe(struct platform_device *pdev)
}
ipmmu_device_reset(mmu);
+
+ if (mmu->features->reserved_context) {
+ dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
+ set_bit(0, mmu->ctx);
+ }
}
/*
@@ -1081,12 +1107,19 @@ static struct platform_driver ipmmu_driver = {
static int __init ipmmu_init(void)
{
+ struct device_node *np;
static bool setup_done;
int ret;
if (setup_done)
return 0;
+ np = of_find_matching_node(NULL, ipmmu_of_ids);
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
ret = platform_driver_register(&ipmmu_driver);
if (ret < 0)
return ret;
@@ -1108,9 +1141,6 @@ static void __exit ipmmu_exit(void)
subsys_initcall(ipmmu_init);
module_exit(ipmmu_exit);
-IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa");
-IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795");
-
MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 0d3350463a3f..fc5f0b53adaf 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -395,20 +395,15 @@ static int msm_iommu_add_device(struct device *dev)
struct msm_iommu_dev *iommu;
struct iommu_group *group;
unsigned long flags;
- int ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
-
iommu = find_iommu_for_dev(dev);
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
if (iommu)
iommu_device_link(&iommu->iommu, dev);
else
- ret = -ENODEV;
-
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
-
- if (ret)
- return ret;
+ return -ENODEV;
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
@@ -425,13 +420,12 @@ static void msm_iommu_remove_device(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&msm_iommu_lock, flags);
-
iommu = find_iommu_for_dev(dev);
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
if (iommu)
iommu_device_unlink(&iommu->iommu, dev);
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
-
iommu_group_remove_device(dev);
}
@@ -708,7 +702,6 @@ static struct iommu_ops msm_iommu_ops = {
.detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
- .map_sg = default_iommu_map_sg,
.iova_to_phys = msm_iommu_iova_to_phys,
.add_device = msm_iommu_add_device,
.remove_device = msm_iommu_remove_device,
@@ -877,7 +870,5 @@ static void __exit msm_iommu_driver_exit(void)
subsys_initcall(msm_iommu_driver_init);
module_exit(msm_iommu_driver_exit);
-IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu");
-
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index f2832a10fcea..f9f69f7111a9 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -495,7 +495,6 @@ static struct iommu_ops mtk_iommu_ops = {
.detach_dev = mtk_iommu_detach_device,
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
- .map_sg = default_iommu_map_sg,
.flush_iotlb_all = mtk_iommu_iotlb_sync,
.iotlb_sync = mtk_iommu_iotlb_sync,
.iova_to_phys = mtk_iommu_iova_to_phys,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index a7c2a973784f..676c029494e4 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -531,7 +531,6 @@ static struct iommu_ops mtk_iommu_ops = {
.detach_dev = mtk_iommu_detach_device,
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
- .map_sg = default_iommu_map_sg,
.iova_to_phys = mtk_iommu_iova_to_phys,
.add_device = mtk_iommu_add_device,
.remove_device = mtk_iommu_remove_device,
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 5c36a8b7656a..f7787e757244 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -27,9 +27,6 @@
#define NO_IOMMU 1
-static const struct of_device_id __iommu_of_table_sentinel
- __used __section(__iommu_of_table_end);
-
/**
* of_get_dma_window - Parse *dma-window property and returns 0 if found.
*
@@ -98,19 +95,6 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
}
EXPORT_SYMBOL_GPL(of_get_dma_window);
-static bool of_iommu_driver_present(struct device_node *np)
-{
- /*
- * If the IOMMU still isn't ready by the time we reach init, assume
- * it never will be. We don't want to defer indefinitely, nor attempt
- * to dereference __iommu_of_table after it's been freed.
- */
- if (system_state >= SYSTEM_RUNNING)
- return false;
-
- return of_match_node(&__iommu_of_table, np);
-}
-
static int of_iommu_xlate(struct device *dev,
struct of_phandle_args *iommu_spec)
{
@@ -120,8 +104,7 @@ static int of_iommu_xlate(struct device *dev,
ops = iommu_ops_from_fwnode(fwnode);
if ((ops && !ops->of_xlate) ||
- !of_device_is_available(iommu_spec->np) ||
- (!ops && !of_iommu_driver_present(iommu_spec->np)))
+ !of_device_is_available(iommu_spec->np))
return NO_IOMMU;
err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
@@ -133,7 +116,7 @@ static int of_iommu_xlate(struct device *dev,
* a proper probe-ordering dependency mechanism in future.
*/
if (!ops)
- return -EPROBE_DEFER;
+ return driver_deferred_probe_check_state(dev);
return ops->of_xlate(dev, iommu_spec);
}
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index af4a8e7fcd27..d2fb347aa4ff 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -550,7 +550,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
pte_ready:
iopte = iopte_offset(iopgd, da);
- *pt_dma = virt_to_phys(iopte);
+ *pt_dma = iopgd_page_paddr(iopgd);
dev_vdbg(obj->dev,
"%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
__func__, da, iopgd, *iopgd, iopte, *iopte);
@@ -738,7 +738,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
}
bytes *= nent;
memset(iopte, 0, nent * sizeof(*iopte));
- pt_dma = virt_to_phys(iopte);
+ pt_dma = iopgd_page_paddr(iopgd);
flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
/*
@@ -1548,7 +1548,6 @@ static const struct iommu_ops omap_iommu_ops = {
.detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
- .map_sg = default_iommu_map_sg,
.iova_to_phys = omap_iommu_iova_to_phys,
.add_device = omap_iommu_add_device,
.remove_device = omap_iommu_remove_device,
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index fe88a4880d3a..ee70e9921cf1 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -590,7 +590,6 @@ static const struct iommu_ops qcom_iommu_ops = {
.detach_dev = qcom_iommu_detach_dev,
.map = qcom_iommu_map,
.unmap = qcom_iommu_unmap,
- .map_sg = default_iommu_map_sg,
.flush_iotlb_all = qcom_iommu_iotlb_sync,
.iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
@@ -945,7 +944,5 @@ static void __exit qcom_iommu_exit(void)
module_init(qcom_iommu_init);
module_exit(qcom_iommu_exit);
-IOMMU_OF_DECLARE(qcom_iommu_dev, "qcom,msm-iommu-v1");
-
MODULE_DESCRIPTION("IOMMU API for QCOM IOMMU v1 implementations");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 054cd2c8e9c8..258115b10fa9 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -521,10 +521,11 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
u32 int_status;
dma_addr_t iova;
irqreturn_t ret = IRQ_NONE;
- int i;
+ int i, err;
- if (WARN_ON(!pm_runtime_get_if_in_use(iommu->dev)))
- return 0;
+ err = pm_runtime_get_if_in_use(iommu->dev);
+ if (WARN_ON_ONCE(err <= 0))
+ return ret;
if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
goto out;
@@ -620,11 +621,15 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
list_for_each(pos, &rk_domain->iommus) {
struct rk_iommu *iommu;
+ int ret;
iommu = list_entry(pos, struct rk_iommu, node);
/* Only zap TLBs of IOMMUs that are powered on. */
- if (pm_runtime_get_if_in_use(iommu->dev)) {
+ ret = pm_runtime_get_if_in_use(iommu->dev);
+ if (WARN_ON_ONCE(ret < 0))
+ continue;
+ if (ret) {
WARN_ON(clk_bulk_enable(iommu->num_clocks,
iommu->clocks));
rk_iommu_zap_lines(iommu, iova, size);
@@ -891,6 +896,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
+ int ret;
/* Allow 'virtual devices' (eg drm) to detach from domain */
iommu = rk_iommu_from_dev(dev);
@@ -909,7 +915,9 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
list_del_init(&iommu->node);
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
- if (pm_runtime_get_if_in_use(iommu->dev)) {
+ ret = pm_runtime_get_if_in_use(iommu->dev);
+ WARN_ON_ONCE(ret < 0);
+ if (ret > 0) {
rk_iommu_disable(iommu);
pm_runtime_put(iommu->dev);
}
@@ -946,7 +954,8 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
list_add_tail(&iommu->node, &rk_domain->iommus);
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
- if (!pm_runtime_get_if_in_use(iommu->dev))
+ ret = pm_runtime_get_if_in_use(iommu->dev);
+ if (!ret || WARN_ON_ONCE(ret < 0))
return 0;
ret = rk_iommu_enable(iommu);
@@ -1110,7 +1119,6 @@ static const struct iommu_ops rk_iommu_ops = {
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
.unmap = rk_iommu_unmap,
- .map_sg = default_iommu_map_sg,
.add_device = rk_iommu_add_device,
.remove_device = rk_iommu_remove_device,
.iova_to_phys = rk_iommu_iova_to_phys,
@@ -1152,17 +1160,6 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (iommu->num_mmu == 0)
return PTR_ERR(iommu->bases[0]);
- i = 0;
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
- if (irq < 0)
- return irq;
-
- err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
- IRQF_SHARED, dev_name(dev), iommu);
- if (err)
- return err;
- }
-
iommu->reset_disabled = device_property_read_bool(dev,
"rockchip,disable-mmu-reset");
@@ -1219,6 +1216,19 @@ static int rk_iommu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
+ i = 0;
+ while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
+ IRQF_SHARED, dev_name(dev), iommu);
+ if (err) {
+ pm_runtime_disable(dev);
+ goto err_remove_sysfs;
+ }
+ }
+
return 0;
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
@@ -1284,8 +1294,6 @@ static int __init rk_iommu_init(void)
}
subsys_initcall(rk_iommu_init);
-IOMMU_OF_DECLARE(rk_iommu_of, "rockchip,iommu");
-
MODULE_DESCRIPTION("IOMMU API for Rockchip");
MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
MODULE_ALIAS("platform:rockchip-iommu");
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index a004f6da35f2..7b1361d57a17 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -377,7 +377,6 @@ static const struct iommu_ops gart_iommu_ops = {
.remove_device = gart_iommu_remove_device,
.device_group = generic_device_group,
.map = gart_iommu_map,
- .map_sg = default_iommu_map_sg,
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 44d40bc771b5..0d03341317c4 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -876,7 +876,6 @@ static const struct iommu_ops tegra_smmu_ops = {
.device_group = tegra_smmu_device_group,
.map = tegra_smmu_map,
.unmap = tegra_smmu_unmap,
- .map_sg = default_iommu_map_sg,
.iova_to_phys = tegra_smmu_iova_to_phys,
.of_xlate = tegra_smmu_of_xlate,
.pgsize_bitmap = SZ_4K,
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index a16b320739b4..8a9c169b6f99 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -304,6 +304,13 @@ static int tpci200_register(struct tpci200_board *tpci200)
ioremap_nocache(pci_resource_start(tpci200->info->pdev,
TPCI200_IP_INTERFACE_BAR),
TPCI200_IFACE_SIZE);
+ if (!tpci200->info->interface_regs) {
+ dev_err(&tpci200->info->pdev->dev,
+ "(bn 0x%X, sn 0x%X) failed to map driver user space!",
+ tpci200->info->pdev->bus->number,
+ tpci200->info->pdev->devfn);
+ goto out_release_mem8_space;
+ }
/* Initialize lock that protects interface_regs */
spin_lock_init(&tpci200->regs_lock);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index e9233db16e03..383e7b70221d 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -8,7 +8,7 @@ config ARM_GIC
bool
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ARM_GIC_PM
@@ -34,7 +34,7 @@ config GIC_NON_BANKED
config ARM_GIC_V3
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select IRQ_DOMAIN_HIERARCHY
select PARTITION_PERCPU
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -66,7 +66,7 @@ config ARM_NVIC
config ARM_VIC
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
config ARM_VIC_NR
int
@@ -93,14 +93,14 @@ config ATMEL_AIC_IRQ
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
config ATMEL_AIC5_IRQ
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
config I8259
@@ -137,7 +137,7 @@ config DW_APB_ICTL
config FARADAY_FTINTC010
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
config HISILICON_IRQ_MBIGEN
@@ -162,7 +162,7 @@ config CLPS711X_IRQCHIP
bool
depends on ARCH_CLPS711X
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
default y
@@ -181,7 +181,7 @@ config OMAP_IRQCHIP
config ORION_IRQCHIP
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
config PIC32_EVIC
bool
@@ -372,3 +372,15 @@ config QCOM_PDC
IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
endmenu
+
+config SIFIVE_PLIC
+ bool "SiFive Platform-Level Interrupt Controller"
+ depends on RISCV
+ help
+ This enables support for the PLIC chip found in SiFive (and
+ potentially other) RISC-V systems. The PLIC controls devices
+ interrupts and connects them to each core's local interrupt
+ controller. Aside from timer and software interrupts, all other
+ interrupt sources are subordinate to the PLIC.
+
+ If you don't know what to do here, say Y.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 15f268f646bf..fbd1ec8070ef 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -87,3 +87,4 @@ obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
obj-$(CONFIG_NDS32) += irq-ativic32.o
obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o
+obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 4eca5c763766..606efa64adff 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -45,6 +45,9 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
*/
info->scratchpad[0].ul = mc_bus_dev->icid;
msi_info = msi_get_domain_info(msi_domain->parent);
+
+ /* Allocate at least 32 MSIs, and always as a power of 2 */
+ nvec = max_t(int, 32, roundup_pow_of_two(nvec));
return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
}
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 25a98de5cfb2..8d6d009d1d58 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -66,7 +66,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
{
struct pci_dev *pdev, *alias_dev;
struct msi_domain_info *msi_info;
- int alias_count = 0;
+ int alias_count = 0, minnvec = 1;
if (!dev_is_pci(dev))
return -EINVAL;
@@ -86,8 +86,18 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
- return msi_info->ops->msi_prepare(domain->parent,
- dev, max(nvec, alias_count), info);
+ /*
+ * Always allocate a power of 2, and special case device 0 for
+ * broken systems where the DevID is not wired (and all devices
+ * appear as DevID 0). For that reason, we generously allocate a
+ * minimum of 32 MSIs for DevID 0. If you want more because all
+ * your devices are aliasing to DevID 0, consider fixing your HW.
+ */
+ nvec = max(nvec, alias_count);
+ if (!info->scratchpad[0].ul)
+ minnvec = 32;
+ nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
+ return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
}
static struct msi_domain_ops its_pci_msi_ops = {
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 8881a053c173..7b8e87b493fe 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -73,6 +73,8 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = dev_id;
+ /* Allocate at least 32 MSIs, and always as a power of 2 */
+ nvec = max_t(int, 32, roundup_pow_of_two(nvec));
return msi_info->ops->msi_prepare(domain->parent,
dev, nvec, info);
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d7842d312d3e..316a57530f6d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -23,6 +23,8 @@
#include <linux/dma-iommu.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/msi.h>
@@ -160,7 +162,7 @@ static struct {
} vpe_proxy;
static LIST_HEAD(its_nodes);
-static DEFINE_SPINLOCK(its_lock);
+static DEFINE_RAW_SPINLOCK(its_lock);
static struct rdists *gic_rdists;
static struct irq_domain *its_parent;
@@ -1421,112 +1423,176 @@ static struct irq_chip its_irq_chip = {
.irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
};
+
/*
* How we allocate LPIs:
*
- * The GIC has id_bits bits for interrupt identifiers. From there, we
- * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
- * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
- * bits to the right.
+ * lpi_range_list contains ranges of LPIs that are to available to
+ * allocate from. To allocate LPIs, just pick the first range that
+ * fits the required allocation, and reduce it by the required
+ * amount. Once empty, remove the range from the list.
+ *
+ * To free a range of LPIs, add a free range to the list, sort it and
+ * merge the result if the new range happens to be adjacent to an
+ * already free block.
*
- * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
+ * The consequence of the above is that allocation is cost is low, but
+ * freeing is expensive. We assumes that freeing rarely occurs.
*/
-#define IRQS_PER_CHUNK_SHIFT 5
-#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
-#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
-static unsigned long *lpi_bitmap;
-static u32 lpi_chunks;
-static DEFINE_SPINLOCK(lpi_lock);
+static DEFINE_MUTEX(lpi_range_lock);
+static LIST_HEAD(lpi_range_list);
+
+struct lpi_range {
+ struct list_head entry;
+ u32 base_id;
+ u32 span;
+};
-static int its_lpi_to_chunk(int lpi)
+static struct lpi_range *mk_lpi_range(u32 base, u32 span)
{
- return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
+ struct lpi_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (range) {
+ INIT_LIST_HEAD(&range->entry);
+ range->base_id = base;
+ range->span = span;
+ }
+
+ return range;
}
-static int its_chunk_to_lpi(int chunk)
+static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
{
- return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
+ struct lpi_range *ra, *rb;
+
+ ra = container_of(a, struct lpi_range, entry);
+ rb = container_of(b, struct lpi_range, entry);
+
+ return rb->base_id - ra->base_id;
}
-static int __init its_lpi_init(u32 id_bits)
+static void merge_lpi_ranges(void)
{
- lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
+ struct lpi_range *range, *tmp;
- lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long),
- GFP_KERNEL);
- if (!lpi_bitmap) {
- lpi_chunks = 0;
- return -ENOMEM;
+ list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+ if (!list_is_last(&range->entry, &lpi_range_list) &&
+ (tmp->base_id == (range->base_id + range->span))) {
+ tmp->base_id = range->base_id;
+ tmp->span += range->span;
+ list_del(&range->entry);
+ kfree(range);
+ }
}
+}
- pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
- return 0;
+static int alloc_lpi_range(u32 nr_lpis, u32 *base)
+{
+ struct lpi_range *range, *tmp;
+ int err = -ENOSPC;
+
+ mutex_lock(&lpi_range_lock);
+
+ list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+ if (range->span >= nr_lpis) {
+ *base = range->base_id;
+ range->base_id += nr_lpis;
+ range->span -= nr_lpis;
+
+ if (range->span == 0) {
+ list_del(&range->entry);
+ kfree(range);
+ }
+
+ err = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&lpi_range_lock);
+
+ pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
+ return err;
}
-static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
+static int free_lpi_range(u32 base, u32 nr_lpis)
{
- unsigned long *bitmap = NULL;
- int chunk_id;
- int nr_chunks;
- int i;
+ struct lpi_range *new;
+ int err = 0;
+
+ mutex_lock(&lpi_range_lock);
+
+ new = mk_lpi_range(base, nr_lpis);
+ if (!new) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ list_add(&new->entry, &lpi_range_list);
+ list_sort(NULL, &lpi_range_list, lpi_range_cmp);
+ merge_lpi_ranges();
+out:
+ mutex_unlock(&lpi_range_lock);
+ return err;
+}
+
+static int __init its_lpi_init(u32 id_bits)
+{
+ u32 lpis = (1UL << id_bits) - 8192;
+ u32 numlpis;
+ int err;
+
+ numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
+
+ if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
+ lpis = numlpis;
+ pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
+ lpis);
+ }
- nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
+ /*
+ * Initializing the allocator is just the same as freeing the
+ * full range of LPIs.
+ */
+ err = free_lpi_range(8192, lpis);
+ pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
+ return err;
+}
- spin_lock(&lpi_lock);
+static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
+{
+ unsigned long *bitmap = NULL;
+ int err = 0;
do {
- chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
- 0, nr_chunks, 0);
- if (chunk_id < lpi_chunks)
+ err = alloc_lpi_range(nr_irqs, base);
+ if (!err)
break;
- nr_chunks--;
- } while (nr_chunks > 0);
+ nr_irqs /= 2;
+ } while (nr_irqs > 0);
- if (!nr_chunks)
+ if (err)
goto out;
- bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK),
- sizeof(long),
- GFP_ATOMIC);
+ bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
if (!bitmap)
goto out;
- for (i = 0; i < nr_chunks; i++)
- set_bit(chunk_id + i, lpi_bitmap);
-
- *base = its_chunk_to_lpi(chunk_id);
- *nr_ids = nr_chunks * IRQS_PER_CHUNK;
+ *nr_ids = nr_irqs;
out:
- spin_unlock(&lpi_lock);
-
if (!bitmap)
*base = *nr_ids = 0;
return bitmap;
}
-static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
+static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
{
- int lpi;
-
- spin_lock(&lpi_lock);
-
- for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
- int chunk = its_lpi_to_chunk(lpi);
-
- BUG_ON(chunk > lpi_chunks);
- if (test_bit(chunk, lpi_bitmap)) {
- clear_bit(chunk, lpi_bitmap);
- } else {
- pr_err("Bad LPI chunk %d\n", chunk);
- }
- }
-
- spin_unlock(&lpi_lock);
-
+ WARN_ON(free_lpi_range(base, nr_ids));
kfree(bitmap);
}
@@ -1559,7 +1625,7 @@ static int __init its_alloc_lpi_tables(void)
{
phys_addr_t paddr;
- lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
+ lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
if (!gic_rdists->prop_page) {
pr_err("Failed to allocate PROPBASE\n");
@@ -1997,12 +2063,12 @@ static void its_cpu_init_collections(void)
{
struct its_node *its;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry)
its_cpu_init_collection(its);
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
}
static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
@@ -2134,17 +2200,20 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (!its_alloc_device_table(its, dev_id))
return NULL;
+ if (WARN_ON(!is_power_of_2(nvecs)))
+ nvecs = roundup_pow_of_two(nvecs);
+
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/*
- * We allocate at least one chunk worth of LPIs bet device,
- * and thus that many ITEs. The device may require less though.
+ * Even if the device wants a single LPI, the ITT must be
+ * sized as a power of two (and you need at least one bit...).
*/
- nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
+ nr_ites = max(2, nvecs);
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc(sz, GFP_KERNEL);
if (alloc_lpis) {
- lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+ lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
col_map = kcalloc(nr_lpis, sizeof(*col_map),
GFP_KERNEL);
@@ -2379,9 +2448,9 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
/* If all interrupts have been freed, start mopping the floor */
if (bitmap_empty(its_dev->event_map.lpi_map,
its_dev->event_map.nr_lpis)) {
- its_lpi_free_chunks(its_dev->event_map.lpi_map,
- its_dev->event_map.lpi_base,
- its_dev->event_map.nr_lpis);
+ its_lpi_free(its_dev->event_map.lpi_map,
+ its_dev->event_map.lpi_base,
+ its_dev->event_map.nr_lpis);
kfree(its_dev->event_map.col_map);
/* Unmap device/itt */
@@ -2780,7 +2849,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain,
}
if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
- its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
+ its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
its_free_prop_table(vm->vprop_page);
}
}
@@ -2795,18 +2864,18 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
BUG_ON(!vm);
- bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
+ bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
if (!bitmap)
return -ENOMEM;
if (nr_ids < nr_irqs) {
- its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_lpi_free(bitmap, base, nr_ids);
return -ENOMEM;
}
vprop_page = its_allocate_prop_table(GFP_KERNEL);
if (!vprop_page) {
- its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_lpi_free(bitmap, base, nr_ids);
return -ENOMEM;
}
@@ -2833,7 +2902,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
if (i > 0)
its_vpe_irq_domain_free(domain, virq, i - 1);
- its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_lpi_free(bitmap, base, nr_ids);
its_free_prop_table(vprop_page);
}
@@ -3070,7 +3139,7 @@ static int its_save_disable(void)
struct its_node *its;
int err = 0;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry) {
void __iomem *base;
@@ -3102,7 +3171,7 @@ err:
writel_relaxed(its->ctlr_save, base + GITS_CTLR);
}
}
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
return err;
}
@@ -3112,7 +3181,7 @@ static void its_restore_enable(void)
struct its_node *its;
int ret;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry) {
void __iomem *base;
int i;
@@ -3164,7 +3233,7 @@ static void its_restore_enable(void)
GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
its_cpu_init_collection(its);
}
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
}
static struct syscore_ops its_syscore_ops = {
@@ -3398,9 +3467,9 @@ static int __init its_probe_one(struct resource *res,
if (err)
goto out_free_tables;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_add(&its->entry, &its_nodes);
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
return 0;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 76ea56d779a1..e214181b77b7 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -877,7 +877,7 @@ static struct irq_chip gic_eoimode1_chip = {
.flags = IRQCHIP_SET_TYPE_MASKED,
};
-#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
+#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
@@ -1091,7 +1091,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
*/
typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
- gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
+ gic_data.rdists.gicd_typer = typer;
gic_irqs = GICD_TYPER_IRQS(typer);
if (gic_irqs > 1020)
gic_irqs = 1020;
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index fc5953dea509..2ff08986b536 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -165,6 +165,7 @@ static int __init intc_1chip_of_init(struct device_node *node,
return ingenic_intc_of_init(node, 1);
}
IRQCHIP_DECLARE(jz4740_intc, "ingenic,jz4740-intc", intc_1chip_of_init);
+IRQCHIP_DECLARE(jz4725b_intc, "ingenic,jz4725b-intc", intc_1chip_of_init);
static int __init intc_2chip_of_init(struct device_node *node,
struct device_node *parent)
diff --git a/drivers/irqchip/irq-renesas-h8s.c b/drivers/irqchip/irq-renesas-h8s.c
index aed31afb0216..85234d456638 100644
--- a/drivers/irqchip/irq-renesas-h8s.c
+++ b/drivers/irqchip/irq-renesas-h8s.c
@@ -12,7 +12,7 @@
#include <asm/io.h>
static void *intc_baseaddr;
-#define IPRA ((unsigned long)intc_baseaddr)
+#define IPRA (intc_baseaddr)
static const unsigned char ipr_table[] = {
0x03, 0x02, 0x01, 0x00, 0x13, 0x12, 0x11, 0x10, /* 16 - 23 */
@@ -34,7 +34,7 @@ static const unsigned char ipr_table[] = {
static void h8s_disable_irq(struct irq_data *data)
{
int pos;
- unsigned int addr;
+ void __iomem *addr;
unsigned short pri;
int irq = data->irq;
@@ -48,7 +48,7 @@ static void h8s_disable_irq(struct irq_data *data)
static void h8s_enable_irq(struct irq_data *data)
{
int pos;
- unsigned int addr;
+ void __iomem *addr;
unsigned short pri;
int irq = data->irq;
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
new file mode 100644
index 000000000000..532e9d68c704
--- /dev/null
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2018 Christoph Hellwig
+ */
+#define pr_fmt(fmt) "plic: " fmt
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/*
+ * This driver implements a version of the RISC-V PLIC with the actual layout
+ * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
+ *
+ * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
+ *
+ * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
+ * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
+ * Spec.
+ */
+
+#define MAX_DEVICES 1024
+#define MAX_CONTEXTS 15872
+
+/*
+ * Each interrupt source has a priority register associated with it.
+ * We always hardwire it to one in Linux.
+ */
+#define PRIORITY_BASE 0
+#define PRIORITY_PER_ID 4
+
+/*
+ * Each hart context has a vector of interrupt enable bits associated with it.
+ * There's one bit for each interrupt source.
+ */
+#define ENABLE_BASE 0x2000
+#define ENABLE_PER_HART 0x80
+
+/*
+ * Each hart context has a set of control registers associated with it. Right
+ * now there's only two: a source priority threshold over which the hart will
+ * take an interrupt, and a register to claim interrupts.
+ */
+#define CONTEXT_BASE 0x200000
+#define CONTEXT_PER_HART 0x1000
+#define CONTEXT_THRESHOLD 0x00
+#define CONTEXT_CLAIM 0x04
+
+static void __iomem *plic_regs;
+
+struct plic_handler {
+ bool present;
+ int ctxid;
+};
+static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
+
+static inline void __iomem *plic_hart_offset(int ctxid)
+{
+ return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART;
+}
+
+static inline u32 __iomem *plic_enable_base(int ctxid)
+{
+ return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART;
+}
+
+/*
+ * Protect mask operations on the registers given that we can't assume that
+ * atomic memory operations work on them.
+ */
+static DEFINE_RAW_SPINLOCK(plic_toggle_lock);
+
+static inline void plic_toggle(int ctxid, int hwirq, int enable)
+{
+ u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32);
+ u32 hwirq_mask = 1 << (hwirq % 32);
+
+ raw_spin_lock(&plic_toggle_lock);
+ if (enable)
+ writel(readl(reg) | hwirq_mask, reg);
+ else
+ writel(readl(reg) & ~hwirq_mask, reg);
+ raw_spin_unlock(&plic_toggle_lock);
+}
+
+static inline void plic_irq_toggle(struct irq_data *d, int enable)
+{
+ int cpu;
+
+ writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+ for_each_cpu(cpu, irq_data_get_affinity_mask(d)) {
+ struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+
+ if (handler->present)
+ plic_toggle(handler->ctxid, d->hwirq, enable);
+ }
+}
+
+static void plic_irq_enable(struct irq_data *d)
+{
+ plic_irq_toggle(d, 1);
+}
+
+static void plic_irq_disable(struct irq_data *d)
+{
+ plic_irq_toggle(d, 0);
+}
+
+static struct irq_chip plic_chip = {
+ .name = "SiFive PLIC",
+ /*
+ * There is no need to mask/unmask PLIC interrupts. They are "masked"
+ * by reading claim and "unmasked" when writing it back.
+ */
+ .irq_enable = plic_irq_enable,
+ .irq_disable = plic_irq_disable,
+};
+
+static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
+ irq_set_chip_data(irq, NULL);
+ irq_set_noprobe(irq);
+ return 0;
+}
+
+static const struct irq_domain_ops plic_irqdomain_ops = {
+ .map = plic_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static struct irq_domain *plic_irqdomain;
+
+/*
+ * Handling an interrupt is a two-step process: first you claim the interrupt
+ * by reading the claim register, then you complete the interrupt by writing
+ * that source ID back to the same claim register. This automatically enables
+ * and disables the interrupt, so there's nothing else to do.
+ */
+static void plic_handle_irq(struct pt_regs *regs)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+ void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM;
+ irq_hw_number_t hwirq;
+
+ WARN_ON_ONCE(!handler->present);
+
+ csr_clear(sie, SIE_SEIE);
+ while ((hwirq = readl(claim))) {
+ int irq = irq_find_mapping(plic_irqdomain, hwirq);
+
+ if (unlikely(irq <= 0))
+ pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
+ hwirq);
+ else
+ generic_handle_irq(irq);
+ writel(hwirq, claim);
+ }
+ csr_set(sie, SIE_SEIE);
+}
+
+/*
+ * Walk up the DT tree until we find an active RISC-V core (HART) node and
+ * extract the cpuid from it.
+ */
+static int plic_find_hart_id(struct device_node *node)
+{
+ for (; node; node = node->parent) {
+ if (of_device_is_compatible(node, "riscv"))
+ return riscv_of_processor_hart(node);
+ }
+
+ return -1;
+}
+
+static int __init plic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int error = 0, nr_handlers, nr_mapped = 0, i;
+ u32 nr_irqs;
+
+ if (plic_regs) {
+ pr_warn("PLIC already present.\n");
+ return -ENXIO;
+ }
+
+ plic_regs = of_iomap(node, 0);
+ if (WARN_ON(!plic_regs))
+ return -EIO;
+
+ error = -EINVAL;
+ of_property_read_u32(node, "riscv,ndev", &nr_irqs);
+ if (WARN_ON(!nr_irqs))
+ goto out_iounmap;
+
+ nr_handlers = of_irq_count(node);
+ if (WARN_ON(!nr_handlers))
+ goto out_iounmap;
+ if (WARN_ON(nr_handlers < num_possible_cpus()))
+ goto out_iounmap;
+
+ error = -ENOMEM;
+ plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
+ &plic_irqdomain_ops, NULL);
+ if (WARN_ON(!plic_irqdomain))
+ goto out_iounmap;
+
+ for (i = 0; i < nr_handlers; i++) {
+ struct of_phandle_args parent;
+ struct plic_handler *handler;
+ irq_hw_number_t hwirq;
+ int cpu;
+
+ if (of_irq_parse_one(node, i, &parent)) {
+ pr_err("failed to parse parent for context %d.\n", i);
+ continue;
+ }
+
+ /* skip context holes */
+ if (parent.args[0] == -1)
+ continue;
+
+ cpu = plic_find_hart_id(parent.np);
+ if (cpu < 0) {
+ pr_warn("failed to parse hart ID for context %d.\n", i);
+ continue;
+ }
+
+ handler = per_cpu_ptr(&plic_handlers, cpu);
+ handler->present = true;
+ handler->ctxid = i;
+
+ /* priority must be > threshold to trigger an interrupt */
+ writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD);
+ for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+ plic_toggle(i, hwirq, 0);
+ nr_mapped++;
+ }
+
+ pr_info("mapped %d interrupts to %d (out of %d) handlers.\n",
+ nr_irqs, nr_mapped, nr_handlers);
+ set_handle_irq(plic_handle_irq);
+ return 0;
+
+out_iounmap:
+ iounmap(plic_regs);
+ return error;
+}
+
+IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
+IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 3a7e8905a97e..3df527fcf4e1 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -159,6 +159,7 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
};
static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
+ { .exti = 0, .irq_parent = 6 },
{ .exti = 1, .irq_parent = 7 },
{ .exti = 2, .irq_parent = 8 },
{ .exti = 3, .irq_parent = 9 },
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index a15a9510c904..e539500752d4 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -98,14 +98,12 @@ static void xtensa_mx_irq_unmask(struct irq_data *d)
static void xtensa_mx_irq_enable(struct irq_data *d)
{
- variant_irq_enable(d->hwirq);
xtensa_mx_irq_unmask(d);
}
static void xtensa_mx_irq_disable(struct irq_data *d)
{
xtensa_mx_irq_mask(d);
- variant_irq_disable(d->hwirq);
}
static void xtensa_mx_irq_ack(struct irq_data *d)
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index f728755fa292..000cb5462bcf 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -55,14 +55,12 @@ static void xtensa_irq_unmask(struct irq_data *d)
static void xtensa_irq_enable(struct irq_data *d)
{
- variant_irq_enable(d->hwirq);
xtensa_irq_unmask(d);
}
static void xtensa_irq_disable(struct irq_data *d)
{
xtensa_irq_mask(d);
- variant_irq_disable(d->hwirq);
}
static void xtensa_irq_ack(struct irq_data *d)
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 6e0c2814d032..ef5560b848ab 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -1321,7 +1322,7 @@ static inline void capinc_tty_exit(void) { }
* /proc/capi/capi20:
* minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
-static int capi20_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused capi20_proc_show(struct seq_file *m, void *v)
{
struct capidev *cdev;
struct list_head *l;
@@ -1344,7 +1345,7 @@ static int capi20_proc_show(struct seq_file *m, void *v)
* /proc/capi/capi20ncci:
* applid ncci
*/
-static int capi20ncci_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused capi20ncci_proc_show(struct seq_file *m, void *v)
{
struct capidev *cdev;
struct capincci *np;
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index ee510f901720..e8949f3dcae1 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -2451,7 +2452,7 @@ lower_callback(struct notifier_block *nb, unsigned long val, void *v)
* /proc/capi/capidrv:
* nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
-static int capidrv_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused capidrv_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%lu %lu %lu %lu\n",
global.ap.nrecvctlpkt,
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 20d0a080a2b0..ecdeb89645d0 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -739,6 +739,7 @@ static void read_int_callback(struct urb *urb)
case HD_OPEN_B2CHANNEL_ACK:
++channel;
+ /* fall through */
case HD_OPEN_B1CHANNEL_ACK:
bcs = cs->bcs + channel;
update_basstate(ucs, BS_B1OPEN << channel, 0);
@@ -752,6 +753,7 @@ static void read_int_callback(struct urb *urb)
case HD_CLOSE_B2CHANNEL_ACK:
++channel;
+ /* fall through */
case HD_CLOSE_B1CHANNEL_ACK:
bcs = cs->bcs + channel;
update_basstate(ucs, 0, BS_B1OPEN << channel);
@@ -765,6 +767,7 @@ static void read_int_callback(struct urb *urb)
case HD_B2_FLOW_CONTROL:
++channel;
+ /* fall through */
case HD_B1_FLOW_CONTROL:
bcs = cs->bcs + channel;
atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES,
@@ -972,16 +975,14 @@ static int starturbs(struct bc_state *bcs)
rc = -EFAULT;
goto error;
}
+ usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
+ usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel),
+ ubc->isoinbuf + k * BAS_INBUFSIZE,
+ BAS_INBUFSIZE, read_iso_callback, bcs,
+ BAS_FRAMETIME);
- urb->dev = bcs->cs->hw.bas->udev;
- urb->pipe = usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel);
urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = ubc->isoinbuf + k * BAS_INBUFSIZE;
- urb->transfer_buffer_length = BAS_INBUFSIZE;
urb->number_of_packets = BAS_NUMFRAMES;
- urb->interval = BAS_FRAMETIME;
- urb->complete = read_iso_callback;
- urb->context = bcs;
for (j = 0; j < BAS_NUMFRAMES; j++) {
urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME;
urb->iso_frame_desc[j].length = BAS_MAXFRAME;
@@ -1005,15 +1006,15 @@ static int starturbs(struct bc_state *bcs)
rc = -EFAULT;
goto error;
}
- urb->dev = bcs->cs->hw.bas->udev;
- urb->pipe = usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel);
+ usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
+ usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel),
+ ubc->isooutbuf->data,
+ sizeof(ubc->isooutbuf->data),
+ write_iso_callback, &ubc->isoouturbs[k],
+ BAS_FRAMETIME);
+
urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = ubc->isooutbuf->data;
- urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
urb->number_of_packets = BAS_NUMFRAMES;
- urb->interval = BAS_FRAMETIME;
- urb->complete = write_iso_callback;
- urb->context = &ubc->isoouturbs[k];
for (j = 0; j < BAS_NUMFRAMES; ++j) {
urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE;
urb->iso_frame_desc[j].length = BAS_NORMFRAME;
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index ae2b2669af1b..8eb28a83832e 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -361,6 +361,7 @@ modehdlc(struct bchannel *bch, int protocol)
switch (protocol) {
case -1: /* used for init */
bch->state = -1;
+ /* fall through */
case ISDN_P_NONE:
if (bch->state == ISDN_P_NONE)
break;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 34c93874af23..ebb3fa2e1d00 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1296,6 +1296,7 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
case (-1): /* used for init */
bch->state = -1;
bch->nr = bc;
+ /* fall through */
case (ISDN_P_NONE):
if (bch->state == ISDN_P_NONE)
return 0;
@@ -2219,7 +2220,7 @@ hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct hfc_pci *card;
struct _hfc_map *m = (struct _hfc_map *)ent->driver_data;
- card = kzalloc(sizeof(struct hfc_pci), GFP_ATOMIC);
+ card = kzalloc(sizeof(struct hfc_pci), GFP_KERNEL);
if (!card) {
printk(KERN_ERR "No kmem for HFC card\n");
return err;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 17cc879ad2bb..6d05946b445e 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -819,6 +819,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
int fifon = fifo->fifonum;
int i;
int hdlc = 0;
+ unsigned long flags;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s: fifo(%i) len(%i) "
@@ -835,7 +836,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
return;
}
- spin_lock(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (fifo->dch) {
rx_skb = fifo->dch->rx_skb;
maxlen = fifo->dch->maxlen;
@@ -844,7 +845,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
if (fifo->bch) {
if (test_bit(FLG_RX_OFF, &fifo->bch->Flags)) {
fifo->bch->dropcnt += len;
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
maxlen = bchannel_get_rxbuf(fifo->bch, len);
@@ -854,7 +855,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
skb_trim(rx_skb, 0);
pr_warning("%s.B%d: No bufferspace for %d bytes\n",
hw->name, fifo->bch->nr, len);
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
maxlen = fifo->bch->maxlen;
@@ -878,7 +879,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
} else {
printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n",
hw->name, __func__);
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
}
@@ -888,7 +889,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
"for fifo(%d) HFCUSB_D_RX\n",
hw->name, __func__, fifon);
skb_trim(rx_skb, 0);
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
}
@@ -942,7 +943,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
/* deliver transparent data to layer2 */
recv_Bchannel(fifo->bch, MISDN_ID_ANY, false);
}
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
static void
@@ -979,18 +980,19 @@ rx_iso_complete(struct urb *urb)
__u8 *buf;
static __u8 eof[8];
__u8 s0_state;
+ unsigned long flags;
fifon = fifo->fifonum;
status = urb->status;
- spin_lock(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
/*
* ISO transfer only partially completed,
@@ -1096,15 +1098,16 @@ rx_int_complete(struct urb *urb)
struct usb_fifo *fifo = (struct usb_fifo *) urb->context;
struct hfcsusb *hw = fifo->hw;
static __u8 eof[8];
+ unsigned long flags;
- spin_lock(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
fifon = fifo->fifonum;
if ((!fifo->active) || (urb->status)) {
@@ -1172,12 +1175,13 @@ tx_iso_complete(struct urb *urb)
int *tx_idx;
int frame_complete, fifon, status, fillempty = 0;
__u8 threshbit, *p;
+ unsigned long flags;
- spin_lock(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
@@ -1195,7 +1199,7 @@ tx_iso_complete(struct urb *urb)
} else {
printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n",
hw->name, __func__);
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
@@ -1375,7 +1379,7 @@ tx_iso_complete(struct urb *urb)
hw->name, __func__,
symbolic(urb_errlist, status), status, fifon);
}
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index 1fc290659e94..3e01012be4ab 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -887,6 +887,7 @@ release_card(struct inf_hw *card) {
release_card(card->sc[i]);
card->sc[i] = NULL;
}
+ /* fall through */
default:
pci_disable_device(card->pdev);
pci_set_drvdata(card->pdev, NULL);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index b791688d0228..386731ec2489 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -972,6 +972,7 @@ isar_pump_statev_fax(struct isar_ch *ch, u8 devt) {
break;
case PCTRL_CMD_FTM:
p1 = 2;
+ /* fall through */
case PCTRL_CMD_FTH:
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_SILON, 1, &p1);
@@ -1177,6 +1178,7 @@ setup_pump(struct isar_ch *ch) {
send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG,
PMOD_DTMF, 1, param);
}
+ /* fall through */
case ISDN_P_B_MODEM_ASYNC:
ctrl = PMOD_DATAMODEM;
if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) {
@@ -1268,6 +1270,7 @@ setup_iom2(struct isar_ch *ch) {
case ISDN_P_B_MODEM_ASYNC:
case ISDN_P_B_T30_FAX:
cmsb |= IOM_CTRL_RCV;
+ /* fall through */
case ISDN_P_B_L2DTMF:
if (test_bit(FLG_DTMFSEND, &ch->bch.Flags))
cmsb |= IOM_CTRL_RCV;
@@ -1560,6 +1563,7 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
ich->is->name, hh->id);
ret = -EINVAL;
}
+ /* fall through */
default:
pr_info("%s: %s unknown prim(%x,%x)\n",
ich->is->name, __func__, hh->prim, hh->id);
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 89d9ba8ed535..2b317cb63d06 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1084,7 +1084,7 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- card = kzalloc(sizeof(struct tiger_hw), GFP_ATOMIC);
+ card = kzalloc(sizeof(struct tiger_hw), GFP_KERNEL);
if (!card) {
pr_info("No kmem for Netjet\n");
return err;
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index a18b605fb4f2..b161456c942e 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -207,6 +207,7 @@ modehdlc(struct BCState *bcs, int mode, int bc)
bcs->mode = 1;
bcs->channel = bc;
bc = 0;
+ /* fall through */
case (L1_MODE_NULL):
if (bcs->mode == L1_MODE_NULL)
return;
diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c
index ddec47a911a0..9ee06328784c 100644
--- a/drivers/isdn/hisax/callc.c
+++ b/drivers/isdn/hisax/callc.c
@@ -1012,7 +1012,7 @@ dummy_pstack(struct PStack *st, int pr, void *arg) {
static int
init_PStack(struct PStack **stp) {
- *stp = kmalloc(sizeof(struct PStack), GFP_ATOMIC);
+ *stp = kmalloc(sizeof(struct PStack), GFP_KERNEL);
if (!*stp)
return -ENOMEM;
(*stp)->next = NULL;
@@ -1369,6 +1369,7 @@ leased_l1l2(struct PStack *st, int pr, void *arg)
case (PH_ACTIVATE | INDICATION):
case (PH_ACTIVATE | CONFIRM):
event = EV_LEASED;
+ /* fall through */
case (PH_DEACTIVATE | INDICATION):
case (PH_DEACTIVATE | CONFIRM):
if (test_bit(FLG_TWO_DCHAN, &chanp->cs->HW_Flags))
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 7108bdb8742e..b12e6cae26c2 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1029,7 +1029,7 @@ static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
*cs_out = NULL;
- cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
+ cs = kzalloc(sizeof(struct IsdnCardState), GFP_KERNEL);
if (!cs) {
printk(KERN_WARNING
"HiSax: No memory for IsdnCardState(card %d)\n",
@@ -1059,12 +1059,12 @@ static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
"HiSax: Card Type %d out of range\n", card->typ);
goto outf_cs;
}
- if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
+ if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_KERNEL))) {
printk(KERN_WARNING
"HiSax: No memory for dlog(card %d)\n", cardnr + 1);
goto outf_cs;
}
- if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
+ if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_KERNEL))) {
printk(KERN_WARNING
"HiSax: No memory for status_buf(card %d)\n",
cardnr + 1);
@@ -1123,7 +1123,7 @@ static int hisax_cs_setup(int cardnr, struct IsdnCard *card,
{
int ret;
- if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) {
+ if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_KERNEL))) {
printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n");
ll_unload(cs);
goto outf_cs;
@@ -1843,6 +1843,7 @@ static void hisax_b_l2l1(struct PStack *st, int pr, void *arg)
case PH_DEACTIVATE | REQUEST:
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
skb_queue_purge(&bcs->squeue);
+ /* fall through */
default:
B_L2L1(b_if, pr, arg);
break;
diff --git a/drivers/isdn/hisax/gazel.c b/drivers/isdn/hisax/gazel.c
index 35c6df6534ec..a6d8af02354a 100644
--- a/drivers/isdn/hisax/gazel.c
+++ b/drivers/isdn/hisax/gazel.c
@@ -108,6 +108,7 @@ ReadISAC(struct IsdnCardState *cs, u_char offset)
switch (cs->subtyp) {
case R647:
off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+ /* fall through */
case R685:
return (readreg(cs->hw.gazel.isac, off2));
case R753:
@@ -125,6 +126,7 @@ WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
switch (cs->subtyp) {
case R647:
off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+ /* fall through */
case R685:
writereg(cs->hw.gazel.isac, off2, value);
break;
@@ -203,6 +205,7 @@ ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
switch (cs->subtyp) {
case R647:
off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+ /* fall through */
case R685:
return (readreg(cs->hw.gazel.hscx[hscx], off2));
case R753:
@@ -220,6 +223,7 @@ WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
switch (cs->subtyp) {
case R647:
off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+ /* fall through */
case R685:
writereg(cs->hw.gazel.hscx[hscx], off2, value);
break;
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 97ecb3073045..1d4cd01d4685 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -432,16 +432,12 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe,
{
int k;
- urb->dev = dev;
- urb->pipe = pipe;
- urb->complete = complete;
+ usb_fill_int_urb(urb, dev, pipe, buf, packet_size * num_packets,
+ complete, context, interval);
+
urb->number_of_packets = num_packets;
- urb->transfer_buffer_length = packet_size * num_packets;
- urb->context = context;
- urb->transfer_buffer = buf;
urb->transfer_flags = URB_ISO_ASAP;
urb->actual_length = 0;
- urb->interval = interval;
for (k = 0; k < num_packets; k++) {
urb->iso_frame_desc[k].offset = packet_size * k;
urb->iso_frame_desc[k].length = packet_size;
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index d01ff116797b..82c1879f5664 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -1089,6 +1089,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) {
break;
case PCTRL_CMD_FTM:
p1 = 2;
+ /* fall through */
case PCTRL_CMD_FTH:
sendmsg(cs, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_SILON, 1, &p1);
@@ -1097,6 +1098,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) {
case PCTRL_CMD_FRM:
if (frm_extra_delay)
mdelay(frm_extra_delay);
+ /* fall through */
case PCTRL_CMD_FRH:
p1 = bcs->hw.isar.mod = bcs->hw.isar.newmod;
bcs->hw.isar.newmod = 0;
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index da0a1c6aa329..98f60d1523f4 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -88,6 +88,7 @@ l3_1tr6_setup_req(struct l3_process *pc, u_char pr, void *arg)
break;
case 'C':
channel = 0x08;
+ /* fall through */
case 'P':
channel |= 0x80;
teln++;
diff --git a/drivers/isdn/hisax/l3dss1.c b/drivers/isdn/hisax/l3dss1.c
index 18a3484b1f7e..368d152a8f1d 100644
--- a/drivers/isdn/hisax/l3dss1.c
+++ b/drivers/isdn/hisax/l3dss1.c
@@ -1282,6 +1282,7 @@ l3dss1_setup_req(struct l3_process *pc, u_char pr,
switch (0x5f & *teln) {
case 'C':
channel = 0x08;
+ /* fall through */
case 'P':
channel |= 0x80;
teln++;
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index 1cb9930d5e24..f207fda691c7 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -408,15 +408,10 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev,
{
int k;
- urb->dev = dev;
- urb->pipe = pipe;
- urb->interval = 1;
- urb->transfer_buffer = buf;
+ usb_fill_int_urb(urb, dev, pipe, buf, num_packets * packet_size,
+ complete, context, 1);
+
urb->number_of_packets = num_packets;
- urb->transfer_buffer_length = num_packets * packet_size;
- urb->actual_length = 0;
- urb->complete = complete;
- urb->context = context;
urb->transfer_flags = URB_ISO_ASAP;
for (k = 0; k < num_packets; k++) {
urb->iso_frame_desc[k].offset = packet_size * k;
diff --git a/drivers/isdn/hysdn/hysdn_boot.c b/drivers/isdn/hysdn/hysdn_boot.c
index 4a0425378f37..ba177c3a621b 100644
--- a/drivers/isdn/hysdn/hysdn_boot.c
+++ b/drivers/isdn/hysdn/hysdn_boot.c
@@ -99,6 +99,7 @@ pof_handle_data(hysdn_card *card, int datlen)
case TAG_CBOOTDTA:
DecryptBuf(boot, datlen); /* we need to encrypt the buffer */
+ /* fall through */
case TAG_BOOTDTA:
if (card->debug_flags & LOG_POF_RECORD)
hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
@@ -137,6 +138,7 @@ pof_handle_data(hysdn_card *card, int datlen)
case TAG_CABSDATA:
DecryptBuf(boot, datlen); /* we need to encrypt the buffer */
+ /* fall through */
case TAG_ABSDATA:
if (card->debug_flags & LOG_POF_RECORD)
hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 7a501dbe7123..6a5b3f00f9ad 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1640,13 +1640,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
} else
return -EINVAL;
case IIOCDBGVAR:
- if (arg) {
- if (copy_to_user(argp, &dev, sizeof(ulong)))
- return -EFAULT;
- return 0;
- } else
- return -EINVAL;
- break;
+ return -EINVAL;
default:
if ((cmd & IIOCDRVCTL) == IIOCDRVCTL)
cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 960f26348bb5..b730037a0e2d 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -787,7 +787,7 @@ isdn_tty_suspend(char *id, modem_info *info, atemu *m)
cmd.parm.cmsg.para[3] = 4; /* 16 bit 0x0004 Suspend */
cmd.parm.cmsg.para[4] = 0;
cmd.parm.cmsg.para[5] = l;
- strncpy(&cmd.parm.cmsg.para[6], id, l);
+ memcpy(&cmd.parm.cmsg.para[6], id, l);
cmd.command = CAPI_PUT_MESSAGE;
cmd.driver = info->isdn_driver;
cmd.arg = info->isdn_channel;
@@ -877,7 +877,7 @@ isdn_tty_resume(char *id, modem_info *info, atemu *m)
cmd.parm.cmsg.para[3] = 5; /* 16 bit 0x0005 Resume */
cmd.parm.cmsg.para[4] = 0;
cmd.parm.cmsg.para[5] = l;
- strncpy(&cmd.parm.cmsg.para[6], id, l);
+ memcpy(&cmd.parm.cmsg.para[6], id, l);
cmd.command = CAPI_PUT_MESSAGE;
info->dialing = 1;
// strcpy(dev->num[i], n);
diff --git a/drivers/isdn/i4l/isdn_v110.c b/drivers/isdn/i4l/isdn_v110.c
index 8b74ce412524..2a5f6668756c 100644
--- a/drivers/isdn/i4l/isdn_v110.c
+++ b/drivers/isdn/i4l/isdn_v110.c
@@ -354,6 +354,7 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen)
printk(KERN_WARNING "isdn_v110 (EncodeMatrix): buffer full!\n");
return line;
}
+ /* else: fall through */
case 128:
m[line] = 128; /* leftmost -> set byte to 1000000 */
mbit = 64; /* current bit in the matrix line */
@@ -386,20 +387,28 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen)
switch (++line % 10) {
case 1:
m[line++] = 0xfe;
+ /* fall through */
case 2:
m[line++] = 0xfe;
+ /* fall through */
case 3:
m[line++] = 0xfe;
+ /* fall through */
case 4:
m[line++] = 0xfe;
+ /* fall through */
case 5:
m[line++] = 0xbf;
+ /* fall through */
case 6:
m[line++] = 0xfe;
+ /* fall through */
case 7:
m[line++] = 0xfe;
+ /* fall through */
case 8:
m[line++] = 0xfe;
+ /* fall through */
case 9:
m[line++] = 0xfe;
}
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 98f90aadd141..18c0a1281914 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -588,7 +588,7 @@ static const struct proto_ops data_sock_ops = {
.getname = data_sock_getname,
.sendmsg = mISDN_sock_sendmsg,
.recvmsg = mISDN_sock_recvmsg,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = data_sock_setsockopt,
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 422dced7c90a..d97c6dd52223 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -539,6 +539,7 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
rq.protocol = ISDN_P_NT_S0;
if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
rq.protocol = ISDN_P_NT_E1;
+ /* fall through */
case ISDN_P_LAPD_TE:
ch->recv = mISDN_queue_message;
ch->peer = &dev->D.st->own;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6e3a998f3370..44097a3e0fcc 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -57,12 +57,13 @@ config LEDS_AAT1290
depends on PINCTRL
help
This option enables support for the LEDs on the AAT1290.
+
config LEDS_APU
- tristate "Front panel LED support for PC Engines APU/APU2 boards"
+ tristate "Front panel LED support for PC Engines APU/APU2/APU3 boards"
depends on LEDS_CLASS
depends on X86 && DMI
help
- This driver makes the PC Engines APU/APU2 front panel LEDs
+ This driver makes the PC Engines APU/APU2/APU3 front panel LEDs
accessible from userspace programs through the LED subsystem.
To compile this driver as a module, choose M here: the
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 431123b048a2..17d73db1456e 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -103,15 +103,16 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
EXPORT_SYMBOL_GPL(led_trigger_show);
/* Caller must ensure led_cdev->trigger_lock held */
-void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
+int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
{
unsigned long flags;
char *event = NULL;
char *envp[2];
const char *name;
+ int ret;
if (!led_cdev->trigger && !trig)
- return;
+ return 0;
name = trig ? trig->name : "none";
event = kasprintf(GFP_KERNEL, "TRIGGER=%s", name);
@@ -126,7 +127,10 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
led_stop_software_blink(led_cdev);
if (led_cdev->trigger->deactivate)
led_cdev->trigger->deactivate(led_cdev);
+ device_remove_groups(led_cdev->dev, led_cdev->trigger->groups);
led_cdev->trigger = NULL;
+ led_cdev->trigger_data = NULL;
+ led_cdev->activated = false;
led_set_brightness(led_cdev, LED_OFF);
}
if (trig) {
@@ -134,8 +138,20 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
list_add_tail(&led_cdev->trig_list, &trig->led_cdevs);
write_unlock_irqrestore(&trig->leddev_list_lock, flags);
led_cdev->trigger = trig;
+
if (trig->activate)
- trig->activate(led_cdev);
+ ret = trig->activate(led_cdev);
+ else
+ ret = 0;
+
+ if (ret)
+ goto err_activate;
+
+ ret = device_add_groups(led_cdev->dev, trig->groups);
+ if (ret) {
+ dev_err(led_cdev->dev, "Failed to add trigger attributes\n");
+ goto err_add_groups;
+ }
}
if (event) {
@@ -146,6 +162,23 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
"%s: Error sending uevent\n", __func__);
kfree(event);
}
+
+ return 0;
+
+err_add_groups:
+
+ if (trig->deactivate)
+ trig->deactivate(led_cdev);
+err_activate:
+
+ led_cdev->trigger = NULL;
+ led_cdev->trigger_data = NULL;
+ write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
+ list_del(&led_cdev->trig_list);
+ write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
+ led_set_brightness(led_cdev, LED_OFF);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(led_trigger_set);
diff --git a/drivers/leds/leds-apu.c b/drivers/leds/leds-apu.c
index 8c93d68964c7..8d42e46e2de3 100644
--- a/drivers/leds/leds-apu.c
+++ b/drivers/leds/leds-apu.c
@@ -102,6 +102,13 @@ static const struct apu_led_profile apu2_led_profile[] = {
{ "apu2:green:3", LED_OFF, APU2_FCH_GPIO_BASE + 70 * APU2_IOSIZE },
};
+/* Same as apu2_led_profile, but with "3" in the LED names. */
+static const struct apu_led_profile apu3_led_profile[] = {
+ { "apu3:green:1", LED_ON, APU2_FCH_GPIO_BASE + 68 * APU2_IOSIZE },
+ { "apu3:green:2", LED_OFF, APU2_FCH_GPIO_BASE + 69 * APU2_IOSIZE },
+ { "apu3:green:3", LED_OFF, APU2_FCH_GPIO_BASE + 70 * APU2_IOSIZE },
+};
+
static const struct dmi_system_id apu_led_dmi_table[] __initconst = {
{
.ident = "apu",
@@ -134,6 +141,30 @@ static const struct dmi_system_id apu_led_dmi_table[] __initconst = {
DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu2")
}
},
+ /* PC Engines APU3 with "Legacy" bios < 4.0.8 */
+ {
+ .ident = "apu3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "APU3")
+ }
+ },
+ /* PC Engines APU3 with "Legacy" bios >= 4.0.8 */
+ {
+ .ident = "apu3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "apu3")
+ }
+ },
+ /* PC Engines APU2 with "Mainline" bios */
+ {
+ .ident = "apu3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu3")
+ }
+ },
{}
};
MODULE_DEVICE_TABLE(dmi, apu_led_dmi_table);
@@ -235,6 +266,14 @@ static int __init apu_led_probe(struct platform_device *pdev)
apu_led->platform = APU2_LED_PLATFORM;
apu_led->num_led_instances = ARRAY_SIZE(apu2_led_profile);
apu_led->iosize = APU2_IOSIZE;
+ } else if (dmi_match(DMI_BOARD_NAME, "APU3") ||
+ dmi_match(DMI_BOARD_NAME, "apu3") ||
+ dmi_match(DMI_BOARD_NAME, "PC Engines apu3")) {
+ apu_led->profile = apu3_led_profile;
+ /* Otherwise identical to APU2. */
+ apu_led->platform = APU2_LED_PLATFORM;
+ apu_led->num_led_instances = ARRAY_SIZE(apu3_led_profile);
+ apu_led->iosize = APU2_IOSIZE;
}
spin_lock_init(&apu_led->lock);
@@ -259,7 +298,10 @@ static int __init apu_led_init(void)
if (!(dmi_match(DMI_PRODUCT_NAME, "APU") ||
dmi_match(DMI_PRODUCT_NAME, "APU2") ||
dmi_match(DMI_PRODUCT_NAME, "apu2") ||
- dmi_match(DMI_PRODUCT_NAME, "PC Engines apu2"))) {
+ dmi_match(DMI_PRODUCT_NAME, "PC Engines apu2") ||
+ dmi_match(DMI_PRODUCT_NAME, "APU3") ||
+ dmi_match(DMI_PRODUCT_NAME, "apu3") ||
+ dmi_match(DMI_PRODUCT_NAME, "PC Engines apu3"))) {
pr_err("Unknown PC Engines board: %s\n",
dmi_get_system_info(DMI_PRODUCT_NAME));
return -ENODEV;
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 437173d1712c..4f413a7c5f05 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -1,17 +1,6 @@
-/*
- * TI lm3692x LED Driver
- *
- * Copyright (C) 2017 Texas Instruments
- *
- * Author: Dan Murphy <dmurphy@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * Data sheet is located
- * http://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
- */
+// SPDX-License-Identifier: GPL-2.0
+// TI LM3692x LED chip family driver
+// Copyright (C) 2017-18 Texas Instruments Incorporated - http://www.ti.com/
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -26,6 +15,9 @@
#include <linux/slab.h>
#include <uapi/linux/uleds.h>
+#define LM36922_MODEL 0
+#define LM36923_MODEL 1
+
#define LM3692X_REV 0x0
#define LM3692X_RESET 0x1
#define LM3692X_EN 0x10
@@ -44,6 +36,9 @@
#define LM3692X_DEVICE_EN BIT(0)
#define LM3692X_LED1_EN BIT(1)
#define LM3692X_LED2_EN BIT(2)
+#define LM36923_LED3_EN BIT(3)
+#define LM3692X_ENABLE_MASK (LM3692X_DEVICE_EN | LM3692X_LED1_EN | \
+ LM3692X_LED2_EN | LM36923_LED3_EN)
/* Brightness Control Bits */
#define LM3692X_BL_ADJ_POL BIT(0)
@@ -109,6 +104,8 @@
* @enable_gpio - VDDIO/EN gpio to enable communication interface
* @regulator - LED supply regulator pointer
* @label - LED label
+ * @led_enable - LED sync to be enabled
+ * @model_id - Current device model ID enumerated
*/
struct lm3692x_led {
struct mutex lock;
@@ -118,6 +115,8 @@ struct lm3692x_led {
struct gpio_desc *enable_gpio;
struct regulator *regulator;
char label[LED_MAX_NAME_SIZE];
+ int led_enable;
+ int model_id;
};
static const struct reg_default lm3692x_reg_defs[] = {
@@ -200,6 +199,7 @@ out:
static int lm3692x_init(struct lm3692x_led *led)
{
+ int enable_state;
int ret;
if (led->regulator) {
@@ -226,9 +226,25 @@ static int lm3692x_init(struct lm3692x_led *led)
/*
* For glitch free operation, the following data should
- * only be written while device enable bit is 0
+ * only be written while LEDx enable bits are 0 and the device enable
+ * bit is set to 1.
* per Section 7.5.14 of the data sheet
*/
+ ret = regmap_write(led->regmap, LM3692X_EN, LM3692X_DEVICE_EN);
+ if (ret)
+ goto out;
+
+ /* Set the brightness to 0 so when enabled the LEDs do not come
+ * on with full brightness.
+ */
+ ret = regmap_write(led->regmap, LM3692X_BRT_MSB, 0);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_LSB, 0);
+ if (ret)
+ goto out;
+
ret = regmap_write(led->regmap, LM3692X_PWM_CTRL,
LM3692X_PWM_FILTER_100 | LM3692X_PWM_SAMP_24MHZ);
if (ret)
@@ -258,6 +274,38 @@ static int lm3692x_init(struct lm3692x_led *led)
if (ret)
goto out;
+ switch (led->led_enable) {
+ case 0:
+ default:
+ if (led->model_id == LM36923_MODEL)
+ enable_state = LM3692X_LED1_EN | LM3692X_LED2_EN |
+ LM36923_LED3_EN;
+ else
+ enable_state = LM3692X_LED1_EN | LM3692X_LED2_EN;
+
+ break;
+ case 1:
+ enable_state = LM3692X_LED1_EN;
+ break;
+ case 2:
+ enable_state = LM3692X_LED2_EN;
+ break;
+
+ case 3:
+ if (led->model_id == LM36923_MODEL) {
+ enable_state = LM36923_LED3_EN;
+ break;
+ }
+
+ ret = -EINVAL;
+ dev_err(&led->client->dev,
+ "LED3 sync not available on this device\n");
+ goto out;
+ }
+
+ ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_ENABLE_MASK,
+ enable_state | LM3692X_DEVICE_EN);
+
return ret;
out:
dev_err(&led->client->dev, "Fail writing initialization values\n");
@@ -274,52 +322,75 @@ out:
return ret;
}
-
-static int lm3692x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm3692x_probe_dt(struct lm3692x_led *led)
{
- int ret;
- struct lm3692x_led *led;
- struct device_node *np = client->dev.of_node;
- struct device_node *child_node;
+ struct fwnode_handle *child = NULL;
const char *name;
+ int ret;
- led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
- if (!led)
- return -ENOMEM;
-
- for_each_available_child_of_node(np, child_node) {
- led->led_dev.default_trigger = of_get_property(child_node,
- "linux,default-trigger",
- NULL);
-
- ret = of_property_read_string(child_node, "label", &name);
- if (!ret)
- snprintf(led->label, sizeof(led->label),
- "%s:%s", id->name, name);
- else
- snprintf(led->label, sizeof(led->label),
- "%s::backlight_cluster", id->name);
- };
-
- led->enable_gpio = devm_gpiod_get_optional(&client->dev,
+ led->enable_gpio = devm_gpiod_get_optional(&led->client->dev,
"enable", GPIOD_OUT_LOW);
if (IS_ERR(led->enable_gpio)) {
ret = PTR_ERR(led->enable_gpio);
- dev_err(&client->dev, "Failed to get enable gpio: %d\n", ret);
+ dev_err(&led->client->dev, "Failed to get enable gpio: %d\n",
+ ret);
return ret;
}
- led->regulator = devm_regulator_get(&client->dev, "vled");
+ led->regulator = devm_regulator_get(&led->client->dev, "vled");
if (IS_ERR(led->regulator))
led->regulator = NULL;
- led->client = client;
+ child = device_get_next_child_node(&led->client->dev, child);
+ if (!child) {
+ dev_err(&led->client->dev, "No LED Child node\n");
+ return -ENODEV;
+ }
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led->led_dev.default_trigger);
+
+ ret = fwnode_property_read_string(child, "label", &name);
+ if (ret)
+ snprintf(led->label, sizeof(led->label),
+ "%s::", led->client->name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s:%s", led->client->name, name);
+
+ ret = fwnode_property_read_u32(child, "reg", &led->led_enable);
+ if (ret) {
+ dev_err(&led->client->dev, "reg DT property missing\n");
+ return ret;
+ }
+
led->led_dev.name = led->label;
- led->led_dev.brightness_set_blocking = lm3692x_brightness_set;
- mutex_init(&led->lock);
+ ret = devm_led_classdev_register(&led->client->dev, &led->led_dev);
+ if (ret) {
+ dev_err(&led->client->dev, "led register err: %d\n", ret);
+ return ret;
+ }
+
+ led->led_dev.dev->of_node = to_of_node(child);
+
+ return 0;
+}
+static int lm3692x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lm3692x_led *led;
+ int ret;
+
+ led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ mutex_init(&led->lock);
+ led->client = client;
+ led->led_dev.brightness_set_blocking = lm3692x_brightness_set;
+ led->model_id = id->driver_data;
i2c_set_clientdata(client, led);
led->regmap = devm_regmap_init_i2c(client, &lm3692x_regmap_config);
@@ -330,15 +401,13 @@ static int lm3692x_probe(struct i2c_client *client,
return ret;
}
- ret = lm3692x_init(led);
+ ret = lm3692x_probe_dt(led);
if (ret)
return ret;
- ret = devm_led_classdev_register(&client->dev, &led->led_dev);
- if (ret) {
- dev_err(&client->dev, "led register err: %d\n", ret);
+ ret = lm3692x_init(led);
+ if (ret)
return ret;
- }
return 0;
}
@@ -348,6 +417,12 @@ static int lm3692x_remove(struct i2c_client *client)
struct lm3692x_led *led = i2c_get_clientdata(client);
int ret;
+ ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_DEVICE_EN, 0);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed to disable regulator\n");
+ return ret;
+ }
+
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
@@ -364,8 +439,8 @@ static int lm3692x_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3692x_id[] = {
- { "lm36922", 0 },
- { "lm36923", 1 },
+ { "lm36922", LM36922_MODEL },
+ { "lm36923", LM36923_MODEL },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm3692x_id);
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 5ec730a31b65..de3623e0d094 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -1,32 +1,21 @@
-/*
- * LEDs driver for LT3593 controllers
- *
- * See the datasheet at http://cds.linear.com/docs/Datasheet/3593f.pdf
- *
- * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
- *
- * Based on leds-gpio.c,
- *
- * Copyright (C) 2007 8D Technologies inc.
- * Raphael Assenat <raph@8d.com>
- * Copyright (C) 2008 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2009,2018 Daniel Mack <daniel@zonque.org>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <uapi/linux/uleds.h>
struct lt3593_led_data {
+ char name[LED_MAX_NAME_SIZE];
struct led_classdev cdev;
- unsigned gpio;
+ struct gpio_desc *gpiod;
};
static int lt3593_led_set(struct led_classdev *led_cdev,
@@ -46,137 +35,168 @@ static int lt3593_led_set(struct led_classdev *led_cdev,
*/
if (value == 0) {
- gpio_set_value_cansleep(led_dat->gpio, 0);
+ gpiod_set_value_cansleep(led_dat->gpiod, 0);
return 0;
}
pulses = 32 - (value * 32) / 255;
if (pulses == 0) {
- gpio_set_value_cansleep(led_dat->gpio, 0);
+ gpiod_set_value_cansleep(led_dat->gpiod, 0);
mdelay(1);
- gpio_set_value_cansleep(led_dat->gpio, 1);
+ gpiod_set_value_cansleep(led_dat->gpiod, 1);
return 0;
}
- gpio_set_value_cansleep(led_dat->gpio, 1);
+ gpiod_set_value_cansleep(led_dat->gpiod, 1);
while (pulses--) {
- gpio_set_value_cansleep(led_dat->gpio, 0);
+ gpiod_set_value_cansleep(led_dat->gpiod, 0);
udelay(1);
- gpio_set_value_cansleep(led_dat->gpio, 1);
+ gpiod_set_value_cansleep(led_dat->gpiod, 1);
udelay(1);
}
return 0;
}
-static int create_lt3593_led(const struct gpio_led *template,
- struct lt3593_led_data *led_dat, struct device *parent)
+static struct lt3593_led_data *lt3593_led_probe_pdata(struct device *dev)
{
+ struct gpio_led_platform_data *pdata = dev_get_platdata(dev);
+ const struct gpio_led *template = &pdata->leds[0];
+ struct lt3593_led_data *led_data;
int ret, state;
- /* skip leds on GPIOs that aren't available */
- if (!gpio_is_valid(template->gpio)) {
- dev_info(parent, "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
- KBUILD_MODNAME, template->gpio, template->name);
- return 0;
- }
+ if (pdata->num_leds != 1)
+ return ERR_PTR(-EINVAL);
- led_dat->cdev.name = template->name;
- led_dat->cdev.default_trigger = template->default_trigger;
- led_dat->gpio = template->gpio;
+ led_data = devm_kzalloc(dev, sizeof(*led_data), GFP_KERNEL);
+ if (!led_data)
+ return ERR_PTR(-ENOMEM);
- led_dat->cdev.brightness_set_blocking = lt3593_led_set;
+ led_data->cdev.name = template->name;
+ led_data->cdev.default_trigger = template->default_trigger;
+ led_data->cdev.brightness_set_blocking = lt3593_led_set;
state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
- led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
+ led_data->cdev.brightness = state ? LED_FULL : LED_OFF;
if (!template->retain_state_suspended)
- led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led_data->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = devm_gpio_request_one(parent, template->gpio, state ?
+ ret = devm_gpio_request_one(dev, template->gpio, state ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
template->name);
if (ret < 0)
- return ret;
-
- ret = led_classdev_register(parent, &led_dat->cdev);
- if (ret < 0)
- return ret;
+ return ERR_PTR(ret);
- dev_info(parent, "%s: registered LT3593 LED '%s' at GPIO %d\n",
- KBUILD_MODNAME, template->name, template->gpio);
+ led_data->gpiod = gpio_to_desc(template->gpio);
+ if (!led_data->gpiod)
+ return ERR_PTR(-EPROBE_DEFER);
- return 0;
-}
+ ret = devm_led_classdev_register(dev, &led_data->cdev);
+ if (ret < 0)
+ return ERR_PTR(ret);
-static void delete_lt3593_led(struct lt3593_led_data *led)
-{
- if (!gpio_is_valid(led->gpio))
- return;
+ dev_info(dev, "registered LT3593 LED '%s' at GPIO %d\n",
+ template->name, template->gpio);
- led_classdev_unregister(&led->cdev);
+ return led_data;
}
static int lt3593_led_probe(struct platform_device *pdev)
{
- struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct lt3593_led_data *leds_data;
- int i, ret = 0;
+ struct device *dev = &pdev->dev;
+ struct lt3593_led_data *led_data;
+ struct fwnode_handle *child;
+ int ret, state = LEDS_GPIO_DEFSTATE_OFF;
+ enum gpiod_flags flags = GPIOD_OUT_LOW;
+ const char *tmp;
+
+ if (dev_get_platdata(dev)) {
+ led_data = lt3593_led_probe_pdata(dev);
+ if (IS_ERR(led_data))
+ return PTR_ERR(led_data);
+
+ goto out;
+ }
- if (!pdata)
- return -EBUSY;
+ if (!dev->of_node)
+ return -ENODEV;
- leds_data = devm_kcalloc(&pdev->dev,
- pdata->num_leds, sizeof(struct lt3593_led_data),
- GFP_KERNEL);
- if (!leds_data)
+ led_data = devm_kzalloc(dev, sizeof(*led_data), GFP_KERNEL);
+ if (!led_data)
return -ENOMEM;
- for (i = 0; i < pdata->num_leds; i++) {
- ret = create_lt3593_led(&pdata->leds[i], &leds_data[i],
- &pdev->dev);
- if (ret < 0)
- goto err;
+ if (device_get_child_node_count(dev) != 1) {
+ dev_err(dev, "Device must have exactly one LED sub-node.");
+ return -EINVAL;
}
- platform_set_drvdata(pdev, leds_data);
+ led_data->gpiod = devm_gpiod_get(dev, "lltc,ctrl", 0);
+ if (IS_ERR(led_data->gpiod))
+ return PTR_ERR(led_data->gpiod);
- return 0;
+ child = device_get_next_child_node(dev, NULL);
-err:
- for (i = i - 1; i >= 0; i--)
- delete_lt3593_led(&leds_data[i]);
+ ret = fwnode_property_read_string(child, "label", &tmp);
+ if (ret < 0)
+ snprintf(led_data->name, sizeof(led_data->name),
+ "lt3593::");
+ else
+ snprintf(led_data->name, sizeof(led_data->name),
+ "lt3593:%s", tmp);
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led_data->cdev.default_trigger);
+
+ if (!fwnode_property_read_string(child, "default-state", &tmp)) {
+ if (!strcmp(tmp, "keep")) {
+ state = LEDS_GPIO_DEFSTATE_KEEP;
+ flags = GPIOD_ASIS;
+ } else if (!strcmp(tmp, "on")) {
+ state = LEDS_GPIO_DEFSTATE_ON;
+ flags = GPIOD_OUT_HIGH;
+ }
+ }
- return ret;
-}
+ led_data->cdev.name = led_data->name;
+ led_data->cdev.brightness_set_blocking = lt3593_led_set;
+ led_data->cdev.brightness = state ? LED_FULL : LED_OFF;
-static int lt3593_led_remove(struct platform_device *pdev)
-{
- int i;
- struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct lt3593_led_data *leds_data;
+ ret = devm_led_classdev_register(dev, &led_data->cdev);
+ if (ret < 0) {
+ fwnode_handle_put(child);
+ return ret;
+ }
- leds_data = platform_get_drvdata(pdev);
+ led_data->cdev.dev->of_node = dev->of_node;
- for (i = 0; i < pdata->num_leds; i++)
- delete_lt3593_led(&leds_data[i]);
+out:
+ platform_set_drvdata(pdev, led_data);
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id of_lt3593_leds_match[] = {
+ { .compatible = "lltc,lt3593", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lt3593_leds_match);
+#endif
+
static struct platform_driver lt3593_led_driver = {
.probe = lt3593_led_probe,
- .remove = lt3593_led_remove,
.driver = {
.name = "leds-lt3593",
+ .of_match_table = of_match_ptr(of_lt3593_leds_match),
},
};
module_platform_driver(lt3593_led_driver);
-MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_AUTHOR("Daniel Mack <daniel@zonque.org>");
MODULE_DESCRIPTION("LED driver for LT3593 controllers");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:leds-lt3593");
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
index 4edf74f1d6d4..8c019c28f9f5 100644
--- a/drivers/leds/leds-max8997.c
+++ b/drivers/leds/leds-max8997.c
@@ -268,7 +268,7 @@ static int max8997_led_probe(struct platform_device *pdev)
mode = pdata->led_pdata->mode[led->id];
brightness = pdata->led_pdata->brightness[led->id];
- max8997_led_set_mode(led, pdata->led_pdata->mode[led->id]);
+ max8997_led_set_mode(led, mode);
if (brightness > led->cdev.max_brightness)
brightness = led->cdev.max_brightness;
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 14fe5cd43232..a0a7dc2ef87c 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -42,8 +42,8 @@
struct ns2_led_data {
struct led_classdev cdev;
- unsigned cmd;
- unsigned slow;
+ unsigned int cmd;
+ unsigned int slow;
bool can_sleep;
unsigned char sata; /* True when SATA mode active. */
rwlock_t rw_lock; /* Lock GPIOs. */
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index a2559b4fdfff..4018af769969 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -10,7 +10,6 @@ if LEDS_TRIGGERS
config LEDS_TRIGGER_TIMER
tristate "LED Timer Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by a programmable timer
via sysfs. Some LED hardware can be programmed to start
@@ -21,7 +20,6 @@ config LEDS_TRIGGER_TIMER
config LEDS_TRIGGER_ONESHOT
tristate "LED One-shot Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to blink in one-shot pulses with parameters
controlled via sysfs. It's useful to notify the user on
@@ -36,7 +34,6 @@ config LEDS_TRIGGER_ONESHOT
config LEDS_TRIGGER_DISK
bool "LED Disk Trigger"
depends on IDE_GD_ATA || ATA
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by disk activity.
If unsure, say Y.
@@ -44,14 +41,12 @@ config LEDS_TRIGGER_DISK
config LEDS_TRIGGER_MTD
bool "LED MTD (NAND/NOR) Trigger"
depends on MTD
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by MTD activity.
If unsure, say N.
config LEDS_TRIGGER_HEARTBEAT
tristate "LED Heartbeat Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by a CPU load average.
The flash frequency is a hyperbolic function of the 1-minute
@@ -60,7 +55,6 @@ config LEDS_TRIGGER_HEARTBEAT
config LEDS_TRIGGER_BACKLIGHT
tristate "LED backlight Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled as a backlight device: they
turn off and on when the display is blanked and unblanked.
@@ -69,7 +63,6 @@ config LEDS_TRIGGER_BACKLIGHT
config LEDS_TRIGGER_CPU
bool "LED CPU Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
@@ -79,7 +72,6 @@ config LEDS_TRIGGER_CPU
config LEDS_TRIGGER_ACTIVITY
tristate "LED activity Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by an immediate CPU usage.
The flash frequency and duty cycle varies from faint flashes to
@@ -88,7 +80,6 @@ config LEDS_TRIGGER_ACTIVITY
config LEDS_TRIGGER_GPIO
tristate "LED GPIO Trigger"
- depends on LEDS_TRIGGERS
depends on GPIOLIB || COMPILE_TEST
help
This allows LEDs to be controlled by gpio events. It's good
@@ -101,7 +92,6 @@ config LEDS_TRIGGER_GPIO
config LEDS_TRIGGER_DEFAULT_ON
tristate "LED Default ON Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be initialised in the ON state.
If unsure, say Y.
@@ -111,7 +101,6 @@ comment "iptables trigger is under Netfilter config (LED target)"
config LEDS_TRIGGER_TRANSIENT
tristate "LED Transient Trigger"
- depends on LEDS_TRIGGERS
help
This allows one time activation of a transient state on
GPIO/PWM based hardware.
@@ -119,7 +108,6 @@ config LEDS_TRIGGER_TRANSIENT
config LEDS_TRIGGER_CAMERA
tristate "LED Camera Flash/Torch Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled as a camera flash/torch device.
This enables direct flash/torch on/off by the driver, kernel space.
@@ -127,7 +115,6 @@ config LEDS_TRIGGER_CAMERA
config LEDS_TRIGGER_PANIC
bool "LED Panic Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be configured to blink on a kernel panic.
Enabling this option will allow to mark certain LEDs as panic indicators,
@@ -137,7 +124,7 @@ config LEDS_TRIGGER_PANIC
config LEDS_TRIGGER_NETDEV
tristate "LED Netdev Trigger"
- depends on NET && LEDS_TRIGGERS
+ depends on NET
help
This allows LEDs to be controlled by network device activity.
If unsure, say Y.
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
index 5081894082bd..bcbf41c90c30 100644
--- a/drivers/leds/trigger/ledtrig-activity.c
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -7,8 +7,8 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
@@ -37,7 +37,6 @@ static void led_activity_function(struct timer_list *t)
struct activity_data *activity_data = from_timer(activity_data, t,
timer);
struct led_classdev *led_cdev = activity_data->led_cdev;
- struct timespec boot_time;
unsigned int target;
unsigned int usage;
int delay;
@@ -57,8 +56,6 @@ static void led_activity_function(struct timer_list *t)
return;
}
- get_monotonic_boottime(&boot_time);
-
cpus = 0;
curr_used = 0;
@@ -76,7 +73,7 @@ static void led_activity_function(struct timer_list *t)
* down to 16us, ensuring we won't overflow 32-bit computations below
* even up to 3k CPUs, while keeping divides cheap on smaller systems.
*/
- curr_boot = timespec_to_ns(&boot_time) * cpus;
+ curr_boot = ktime_get_boot_ns() * cpus;
diff_boot = (curr_boot - activity_data->last_boot) >> 16;
diff_used = (curr_used - activity_data->last_used) >> 16;
activity_data->last_boot = curr_boot;
@@ -155,8 +152,7 @@ static void led_activity_function(struct timer_list *t)
static ssize_t led_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct activity_data *activity_data = led_cdev->trigger_data;
+ struct activity_data *activity_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", activity_data->invert);
}
@@ -165,8 +161,7 @@ static ssize_t led_invert_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct activity_data *activity_data = led_cdev->trigger_data;
+ struct activity_data *activity_data = led_trigger_get_drvdata(dev);
unsigned long state;
int ret;
@@ -181,21 +176,21 @@ static ssize_t led_invert_store(struct device *dev,
static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
-static void activity_activate(struct led_classdev *led_cdev)
+static struct attribute *activity_led_attrs[] = {
+ &dev_attr_invert.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(activity_led);
+
+static int activity_activate(struct led_classdev *led_cdev)
{
struct activity_data *activity_data;
- int rc;
activity_data = kzalloc(sizeof(*activity_data), GFP_KERNEL);
if (!activity_data)
- return;
+ return -ENOMEM;
- led_cdev->trigger_data = activity_data;
- rc = device_create_file(led_cdev->dev, &dev_attr_invert);
- if (rc) {
- kfree(led_cdev->trigger_data);
- return;
- }
+ led_set_trigger_data(led_cdev, activity_data);
activity_data->led_cdev = led_cdev;
timer_setup(&activity_data->timer, led_activity_function, 0);
@@ -203,26 +198,24 @@ static void activity_activate(struct led_classdev *led_cdev)
led_cdev->blink_brightness = led_cdev->max_brightness;
led_activity_function(&activity_data->timer);
set_bit(LED_BLINK_SW, &led_cdev->work_flags);
- led_cdev->activated = true;
+
+ return 0;
}
static void activity_deactivate(struct led_classdev *led_cdev)
{
- struct activity_data *activity_data = led_cdev->trigger_data;
-
- if (led_cdev->activated) {
- del_timer_sync(&activity_data->timer);
- device_remove_file(led_cdev->dev, &dev_attr_invert);
- kfree(activity_data);
- clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
- led_cdev->activated = false;
- }
+ struct activity_data *activity_data = led_get_trigger_data(led_cdev);
+
+ del_timer_sync(&activity_data->timer);
+ kfree(activity_data);
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
}
static struct led_trigger activity_led_trigger = {
.name = "activity",
.activate = activity_activate,
.deactivate = activity_deactivate,
+ .groups = activity_led_groups,
};
static int activity_reboot_notifier(struct notifier_block *nb,
@@ -272,4 +265,4 @@ module_exit(activity_exit);
MODULE_AUTHOR("Willy Tarreau <w@1wt.eu>");
MODULE_DESCRIPTION("Activity LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-backlight.c b/drivers/leds/trigger/ledtrig-backlight.c
index 1ca1f1608f76..c2b57beef718 100644
--- a/drivers/leds/trigger/ledtrig-backlight.c
+++ b/drivers/leds/trigger/ledtrig-backlight.c
@@ -64,8 +64,7 @@ static int fb_notifier_callback(struct notifier_block *p,
static ssize_t bl_trig_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct bl_trig_notifier *n = led->trigger_data;
+ struct bl_trig_notifier *n = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", n->invert);
}
@@ -73,8 +72,8 @@ static ssize_t bl_trig_invert_show(struct device *dev,
static ssize_t bl_trig_invert_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t num)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct bl_trig_notifier *n = led->trigger_data;
+ struct led_classdev *led = led_trigger_get_led(dev);
+ struct bl_trig_notifier *n = led_trigger_get_drvdata(dev);
unsigned long invert;
int ret;
@@ -97,22 +96,22 @@ static ssize_t bl_trig_invert_store(struct device *dev,
}
static DEVICE_ATTR(inverted, 0644, bl_trig_invert_show, bl_trig_invert_store);
-static void bl_trig_activate(struct led_classdev *led)
+static struct attribute *bl_trig_attrs[] = {
+ &dev_attr_inverted.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(bl_trig);
+
+static int bl_trig_activate(struct led_classdev *led)
{
int ret;
struct bl_trig_notifier *n;
n = kzalloc(sizeof(struct bl_trig_notifier), GFP_KERNEL);
- led->trigger_data = n;
- if (!n) {
- dev_err(led->dev, "unable to allocate backlight trigger\n");
- return;
- }
-
- ret = device_create_file(led->dev, &dev_attr_inverted);
- if (ret)
- goto err_invert;
+ if (!n)
+ return -ENOMEM;
+ led_set_trigger_data(led, n);
n->led = led;
n->brightness = led->brightness;
@@ -122,46 +121,25 @@ static void bl_trig_activate(struct led_classdev *led)
ret = fb_register_client(&n->notifier);
if (ret)
dev_err(led->dev, "unable to register backlight trigger\n");
- led->activated = true;
- return;
-
-err_invert:
- led->trigger_data = NULL;
- kfree(n);
+ return 0;
}
static void bl_trig_deactivate(struct led_classdev *led)
{
- struct bl_trig_notifier *n =
- (struct bl_trig_notifier *) led->trigger_data;
-
- if (led->activated) {
- device_remove_file(led->dev, &dev_attr_inverted);
- fb_unregister_client(&n->notifier);
- kfree(n);
- led->activated = false;
- }
+ struct bl_trig_notifier *n = led_get_trigger_data(led);
+
+ fb_unregister_client(&n->notifier);
+ kfree(n);
}
static struct led_trigger bl_led_trigger = {
.name = "backlight",
.activate = bl_trig_activate,
- .deactivate = bl_trig_deactivate
+ .deactivate = bl_trig_deactivate,
+ .groups = bl_trig_groups,
};
-
-static int __init bl_trig_init(void)
-{
- return led_trigger_register(&bl_led_trigger);
-}
-
-static void __exit bl_trig_exit(void)
-{
- led_trigger_unregister(&bl_led_trigger);
-}
-
-module_init(bl_trig_init);
-module_exit(bl_trig_exit);
+module_led_trigger(bl_led_trigger);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_DESCRIPTION("Backlight emulation LED trigger");
diff --git a/drivers/leds/trigger/ledtrig-camera.c b/drivers/leds/trigger/ledtrig-camera.c
index 9bd73a8bad5c..091a09a20c58 100644
--- a/drivers/leds/trigger/ledtrig-camera.c
+++ b/drivers/leds/trigger/ledtrig-camera.c
@@ -10,7 +10,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -54,4 +53,4 @@ module_exit(ledtrig_camera_exit);
MODULE_DESCRIPTION("LED Trigger for Camera Flash/Torch Control");
MODULE_AUTHOR("Milo Kim");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-default-on.c b/drivers/leds/trigger/ledtrig-default-on.c
index ff455cb46680..7f6d9219711e 100644
--- a/drivers/leds/trigger/ledtrig-default-on.c
+++ b/drivers/leds/trigger/ledtrig-default-on.c
@@ -8,7 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -17,29 +16,18 @@
#include <linux/leds.h>
#include "../leds.h"
-static void defon_trig_activate(struct led_classdev *led_cdev)
+static int defon_trig_activate(struct led_classdev *led_cdev)
{
led_set_brightness_nosleep(led_cdev, led_cdev->max_brightness);
+ return 0;
}
static struct led_trigger defon_led_trigger = {
.name = "default-on",
.activate = defon_trig_activate,
};
-
-static int __init defon_trig_init(void)
-{
- return led_trigger_register(&defon_led_trigger);
-}
-
-static void __exit defon_trig_exit(void)
-{
- led_trigger_unregister(&defon_led_trigger);
-}
-
-module_init(defon_trig_init);
-module_exit(defon_trig_exit);
+module_led_trigger(defon_led_trigger);
MODULE_AUTHOR("Nick Forbes <nick.forbes@incepta.com>");
MODULE_DESCRIPTION("Default-ON LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c
index 8891e88d54dd..ed0db8ed825f 100644
--- a/drivers/leds/trigger/ledtrig-gpio.c
+++ b/drivers/leds/trigger/ledtrig-gpio.c
@@ -6,7 +6,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -29,7 +28,7 @@ struct gpio_trig_data {
static irqreturn_t gpio_trig_irq(int irq, void *_led)
{
struct led_classdev *led = _led;
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_get_trigger_data(led);
int tmp;
tmp = gpio_get_value_cansleep(gpio_data->gpio);
@@ -52,8 +51,7 @@ static irqreturn_t gpio_trig_irq(int irq, void *_led)
static ssize_t gpio_trig_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", gpio_data->desired_brightness);
}
@@ -61,8 +59,7 @@ static ssize_t gpio_trig_brightness_show(struct device *dev,
static ssize_t gpio_trig_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
unsigned desired_brightness;
int ret;
@@ -82,8 +79,7 @@ static DEVICE_ATTR(desired_brightness, 0644, gpio_trig_brightness_show,
static ssize_t gpio_trig_inverted_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", gpio_data->inverted);
}
@@ -91,8 +87,8 @@ static ssize_t gpio_trig_inverted_show(struct device *dev,
static ssize_t gpio_trig_inverted_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct led_classdev *led = led_trigger_get_led(dev);
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
unsigned long inverted;
int ret;
@@ -116,8 +112,7 @@ static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show,
static ssize_t gpio_trig_gpio_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", gpio_data->gpio);
}
@@ -125,8 +120,8 @@ static ssize_t gpio_trig_gpio_show(struct device *dev,
static ssize_t gpio_trig_gpio_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct led_classdev *led = led_trigger_get_led(dev);
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
unsigned gpio;
int ret;
@@ -163,76 +158,45 @@ static ssize_t gpio_trig_gpio_store(struct device *dev,
}
static DEVICE_ATTR(gpio, 0644, gpio_trig_gpio_show, gpio_trig_gpio_store);
-static void gpio_trig_activate(struct led_classdev *led)
+static struct attribute *gpio_trig_attrs[] = {
+ &dev_attr_desired_brightness.attr,
+ &dev_attr_inverted.attr,
+ &dev_attr_gpio.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(gpio_trig);
+
+static int gpio_trig_activate(struct led_classdev *led)
{
struct gpio_trig_data *gpio_data;
- int ret;
gpio_data = kzalloc(sizeof(*gpio_data), GFP_KERNEL);
if (!gpio_data)
- return;
-
- ret = device_create_file(led->dev, &dev_attr_gpio);
- if (ret)
- goto err_gpio;
-
- ret = device_create_file(led->dev, &dev_attr_inverted);
- if (ret)
- goto err_inverted;
-
- ret = device_create_file(led->dev, &dev_attr_desired_brightness);
- if (ret)
- goto err_brightness;
+ return -ENOMEM;
gpio_data->led = led;
- led->trigger_data = gpio_data;
- led->activated = true;
-
- return;
-
-err_brightness:
- device_remove_file(led->dev, &dev_attr_inverted);
+ led_set_trigger_data(led, gpio_data);
-err_inverted:
- device_remove_file(led->dev, &dev_attr_gpio);
-
-err_gpio:
- kfree(gpio_data);
+ return 0;
}
static void gpio_trig_deactivate(struct led_classdev *led)
{
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_get_trigger_data(led);
- if (led->activated) {
- device_remove_file(led->dev, &dev_attr_gpio);
- device_remove_file(led->dev, &dev_attr_inverted);
- device_remove_file(led->dev, &dev_attr_desired_brightness);
- if (gpio_data->gpio != 0)
- free_irq(gpio_to_irq(gpio_data->gpio), led);
- kfree(gpio_data);
- led->activated = false;
- }
+ if (gpio_data->gpio != 0)
+ free_irq(gpio_to_irq(gpio_data->gpio), led);
+ kfree(gpio_data);
}
static struct led_trigger gpio_led_trigger = {
.name = "gpio",
.activate = gpio_trig_activate,
.deactivate = gpio_trig_deactivate,
+ .groups = gpio_trig_groups,
};
-
-static int __init gpio_trig_init(void)
-{
- return led_trigger_register(&gpio_led_trigger);
-}
-module_init(gpio_trig_init);
-
-static void __exit gpio_trig_exit(void)
-{
- led_trigger_unregister(&gpio_led_trigger);
-}
-module_exit(gpio_trig_exit);
+module_led_trigger(gpio_led_trigger);
MODULE_AUTHOR("Felipe Balbi <me@felipebalbi.com>");
MODULE_DESCRIPTION("GPIO LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index f0896de410b8..7a2b12e19329 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -9,8 +9,8 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -96,8 +96,8 @@ static void led_heartbeat_function(struct timer_list *t)
static ssize_t led_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
+ struct heartbeat_trig_data *heartbeat_data =
+ led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", heartbeat_data->invert);
}
@@ -105,8 +105,8 @@ static ssize_t led_invert_show(struct device *dev,
static ssize_t led_invert_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
+ struct heartbeat_trig_data *heartbeat_data =
+ led_trigger_get_drvdata(dev);
unsigned long state;
int ret;
@@ -121,22 +121,22 @@ static ssize_t led_invert_store(struct device *dev,
static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
-static void heartbeat_trig_activate(struct led_classdev *led_cdev)
+static struct attribute *heartbeat_trig_attrs[] = {
+ &dev_attr_invert.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(heartbeat_trig);
+
+static int heartbeat_trig_activate(struct led_classdev *led_cdev)
{
struct heartbeat_trig_data *heartbeat_data;
- int rc;
heartbeat_data = kzalloc(sizeof(*heartbeat_data), GFP_KERNEL);
if (!heartbeat_data)
- return;
+ return -ENOMEM;
- led_cdev->trigger_data = heartbeat_data;
+ led_set_trigger_data(led_cdev, heartbeat_data);
heartbeat_data->led_cdev = led_cdev;
- rc = device_create_file(led_cdev->dev, &dev_attr_invert);
- if (rc) {
- kfree(led_cdev->trigger_data);
- return;
- }
timer_setup(&heartbeat_data->timer, led_heartbeat_function, 0);
heartbeat_data->phase = 0;
@@ -144,26 +144,25 @@ static void heartbeat_trig_activate(struct led_classdev *led_cdev)
led_cdev->blink_brightness = led_cdev->max_brightness;
led_heartbeat_function(&heartbeat_data->timer);
set_bit(LED_BLINK_SW, &led_cdev->work_flags);
- led_cdev->activated = true;
+
+ return 0;
}
static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
{
- struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
-
- if (led_cdev->activated) {
- del_timer_sync(&heartbeat_data->timer);
- device_remove_file(led_cdev->dev, &dev_attr_invert);
- kfree(heartbeat_data);
- clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
- led_cdev->activated = false;
- }
+ struct heartbeat_trig_data *heartbeat_data =
+ led_get_trigger_data(led_cdev);
+
+ del_timer_sync(&heartbeat_data->timer);
+ kfree(heartbeat_data);
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
}
static struct led_trigger heartbeat_led_trigger = {
.name = "heartbeat",
.activate = heartbeat_trig_activate,
.deactivate = heartbeat_trig_deactivate,
+ .groups = heartbeat_trig_groups,
};
static int heartbeat_reboot_notifier(struct notifier_block *nb,
@@ -213,4 +212,4 @@ module_exit(heartbeat_trig_exit);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Heartbeat LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 6df4781a6308..3dd3ed46d473 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -94,8 +94,7 @@ static void set_baseline_state(struct led_netdev_data *trigger_data)
static ssize_t device_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
ssize_t len;
spin_lock_bh(&trigger_data->lock);
@@ -109,8 +108,7 @@ static ssize_t device_name_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
if (size >= IFNAMSIZ)
return -EINVAL;
@@ -150,8 +148,7 @@ static DEVICE_ATTR_RW(device_name);
static ssize_t netdev_led_attr_show(struct device *dev, char *buf,
enum netdev_led_attr attr)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
int bit;
switch (attr) {
@@ -174,8 +171,7 @@ static ssize_t netdev_led_attr_show(struct device *dev, char *buf,
static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
size_t size, enum netdev_led_attr attr)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
unsigned long state;
int ret;
int bit;
@@ -255,8 +251,7 @@ static DEVICE_ATTR_RW(rx);
static ssize_t interval_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n",
jiffies_to_msecs(atomic_read(&trigger_data->interval)));
@@ -266,8 +261,7 @@ static ssize_t interval_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
unsigned long value;
int ret;
@@ -288,15 +282,23 @@ static ssize_t interval_store(struct device *dev,
static DEVICE_ATTR_RW(interval);
+static struct attribute *netdev_trig_attrs[] = {
+ &dev_attr_device_name.attr,
+ &dev_attr_link.attr,
+ &dev_attr_rx.attr,
+ &dev_attr_tx.attr,
+ &dev_attr_interval.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(netdev_trig);
+
static int netdev_trig_notify(struct notifier_block *nb,
unsigned long evt, void *dv)
{
struct net_device *dev =
netdev_notifier_info_to_dev((struct netdev_notifier_info *)dv);
- struct led_netdev_data *trigger_data = container_of(nb,
- struct
- led_netdev_data,
- notifier);
+ struct led_netdev_data *trigger_data =
+ container_of(nb, struct led_netdev_data, notifier);
if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
&& evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
@@ -342,10 +344,8 @@ static int netdev_trig_notify(struct notifier_block *nb,
/* here's the real work! */
static void netdev_trig_work(struct work_struct *work)
{
- struct led_netdev_data *trigger_data = container_of(work,
- struct
- led_netdev_data,
- work.work);
+ struct led_netdev_data *trigger_data =
+ container_of(work, struct led_netdev_data, work.work);
struct rtnl_link_stats64 *dev_stats;
unsigned int new_activity;
struct rtnl_link_stats64 temp;
@@ -388,14 +388,14 @@ static void netdev_trig_work(struct work_struct *work)
(atomic_read(&trigger_data->interval)*2));
}
-static void netdev_trig_activate(struct led_classdev *led_cdev)
+static int netdev_trig_activate(struct led_classdev *led_cdev)
{
struct led_netdev_data *trigger_data;
int rc;
trigger_data = kzalloc(sizeof(struct led_netdev_data), GFP_KERNEL);
if (!trigger_data)
- return;
+ return -ENOMEM;
spin_lock_init(&trigger_data->lock);
@@ -412,69 +412,34 @@ static void netdev_trig_activate(struct led_classdev *led_cdev)
atomic_set(&trigger_data->interval, msecs_to_jiffies(50));
trigger_data->last_activity = 0;
- led_cdev->trigger_data = trigger_data;
+ led_set_trigger_data(led_cdev, trigger_data);
- rc = device_create_file(led_cdev->dev, &dev_attr_device_name);
- if (rc)
- goto err_out;
- rc = device_create_file(led_cdev->dev, &dev_attr_link);
- if (rc)
- goto err_out_device_name;
- rc = device_create_file(led_cdev->dev, &dev_attr_rx);
- if (rc)
- goto err_out_link;
- rc = device_create_file(led_cdev->dev, &dev_attr_tx);
- if (rc)
- goto err_out_rx;
- rc = device_create_file(led_cdev->dev, &dev_attr_interval);
- if (rc)
- goto err_out_tx;
rc = register_netdevice_notifier(&trigger_data->notifier);
if (rc)
- goto err_out_interval;
- return;
-
-err_out_interval:
- device_remove_file(led_cdev->dev, &dev_attr_interval);
-err_out_tx:
- device_remove_file(led_cdev->dev, &dev_attr_tx);
-err_out_rx:
- device_remove_file(led_cdev->dev, &dev_attr_rx);
-err_out_link:
- device_remove_file(led_cdev->dev, &dev_attr_link);
-err_out_device_name:
- device_remove_file(led_cdev->dev, &dev_attr_device_name);
-err_out:
- led_cdev->trigger_data = NULL;
- kfree(trigger_data);
+ kfree(trigger_data);
+
+ return rc;
}
static void netdev_trig_deactivate(struct led_classdev *led_cdev)
{
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
-
- if (trigger_data) {
- unregister_netdevice_notifier(&trigger_data->notifier);
+ struct led_netdev_data *trigger_data = led_get_trigger_data(led_cdev);
- device_remove_file(led_cdev->dev, &dev_attr_device_name);
- device_remove_file(led_cdev->dev, &dev_attr_link);
- device_remove_file(led_cdev->dev, &dev_attr_rx);
- device_remove_file(led_cdev->dev, &dev_attr_tx);
- device_remove_file(led_cdev->dev, &dev_attr_interval);
+ unregister_netdevice_notifier(&trigger_data->notifier);
- cancel_delayed_work_sync(&trigger_data->work);
+ cancel_delayed_work_sync(&trigger_data->work);
- if (trigger_data->net_dev)
- dev_put(trigger_data->net_dev);
+ if (trigger_data->net_dev)
+ dev_put(trigger_data->net_dev);
- kfree(trigger_data);
- }
+ kfree(trigger_data);
}
static struct led_trigger netdev_led_trigger = {
.name = "netdev",
.activate = netdev_trig_activate,
.deactivate = netdev_trig_deactivate,
+ .groups = netdev_trig_groups,
};
static int __init netdev_trig_init(void)
diff --git a/drivers/leds/trigger/ledtrig-oneshot.c b/drivers/leds/trigger/ledtrig-oneshot.c
index b8ea9f0f1e19..95c9be4b6e7e 100644
--- a/drivers/leds/trigger/ledtrig-oneshot.c
+++ b/drivers/leds/trigger/ledtrig-oneshot.c
@@ -8,7 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -29,8 +28,8 @@ struct oneshot_trig_data {
static ssize_t led_shot(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
+ struct oneshot_trig_data *oneshot_data = led_trigger_get_drvdata(dev);
led_blink_set_oneshot(led_cdev,
&led_cdev->blink_delay_on, &led_cdev->blink_delay_off,
@@ -42,8 +41,7 @@ static ssize_t led_shot(struct device *dev,
static ssize_t led_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
+ struct oneshot_trig_data *oneshot_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", oneshot_data->invert);
}
@@ -51,8 +49,8 @@ static ssize_t led_invert_show(struct device *dev,
static ssize_t led_invert_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
+ struct oneshot_trig_data *oneshot_data = led_trigger_get_drvdata(dev);
unsigned long state;
int ret;
@@ -73,7 +71,7 @@ static ssize_t led_invert_store(struct device *dev,
static ssize_t led_delay_on_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
}
@@ -81,7 +79,7 @@ static ssize_t led_delay_on_show(struct device *dev,
static ssize_t led_delay_on_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
int ret;
@@ -93,10 +91,11 @@ static ssize_t led_delay_on_store(struct device *dev,
return size;
}
+
static ssize_t led_delay_off_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
}
@@ -104,7 +103,7 @@ static ssize_t led_delay_off_show(struct device *dev,
static ssize_t led_delay_off_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
int ret;
@@ -122,59 +121,36 @@ static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store);
static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
static DEVICE_ATTR(shot, 0200, NULL, led_shot);
-static void oneshot_trig_activate(struct led_classdev *led_cdev)
+static struct attribute *oneshot_trig_attrs[] = {
+ &dev_attr_delay_on.attr,
+ &dev_attr_delay_off.attr,
+ &dev_attr_invert.attr,
+ &dev_attr_shot.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(oneshot_trig);
+
+static int oneshot_trig_activate(struct led_classdev *led_cdev)
{
struct oneshot_trig_data *oneshot_data;
- int rc;
oneshot_data = kzalloc(sizeof(*oneshot_data), GFP_KERNEL);
if (!oneshot_data)
- return;
-
- led_cdev->trigger_data = oneshot_data;
-
- rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
- if (rc)
- goto err_out_trig_data;
- rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
- if (rc)
- goto err_out_delayon;
- rc = device_create_file(led_cdev->dev, &dev_attr_invert);
- if (rc)
- goto err_out_delayoff;
- rc = device_create_file(led_cdev->dev, &dev_attr_shot);
- if (rc)
- goto err_out_invert;
+ return -ENOMEM;
+
+ led_set_trigger_data(led_cdev, oneshot_data);
led_cdev->blink_delay_on = DEFAULT_DELAY;
led_cdev->blink_delay_off = DEFAULT_DELAY;
- led_cdev->activated = true;
-
- return;
-
-err_out_invert:
- device_remove_file(led_cdev->dev, &dev_attr_invert);
-err_out_delayoff:
- device_remove_file(led_cdev->dev, &dev_attr_delay_off);
-err_out_delayon:
- device_remove_file(led_cdev->dev, &dev_attr_delay_on);
-err_out_trig_data:
- kfree(led_cdev->trigger_data);
+ return 0;
}
static void oneshot_trig_deactivate(struct led_classdev *led_cdev)
{
- struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
+ struct oneshot_trig_data *oneshot_data = led_get_trigger_data(led_cdev);
- if (led_cdev->activated) {
- device_remove_file(led_cdev->dev, &dev_attr_delay_on);
- device_remove_file(led_cdev->dev, &dev_attr_delay_off);
- device_remove_file(led_cdev->dev, &dev_attr_invert);
- device_remove_file(led_cdev->dev, &dev_attr_shot);
- kfree(oneshot_data);
- led_cdev->activated = false;
- }
+ kfree(oneshot_data);
/* Stop blinking */
led_set_brightness(led_cdev, LED_OFF);
@@ -184,20 +160,9 @@ static struct led_trigger oneshot_led_trigger = {
.name = "oneshot",
.activate = oneshot_trig_activate,
.deactivate = oneshot_trig_deactivate,
+ .groups = oneshot_trig_groups,
};
-
-static int __init oneshot_trig_init(void)
-{
- return led_trigger_register(&oneshot_led_trigger);
-}
-
-static void __exit oneshot_trig_exit(void)
-{
- led_trigger_unregister(&oneshot_led_trigger);
-}
-
-module_init(oneshot_trig_init);
-module_exit(oneshot_trig_exit);
+module_led_trigger(oneshot_led_trigger);
MODULE_AUTHOR("Fabio Baltieri <fabio.baltieri@gmail.com>");
MODULE_DESCRIPTION("One-shot LED trigger");
diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c
index 8d09327b5719..7c14983781ee 100644
--- a/drivers/leds/trigger/ledtrig-timer.c
+++ b/drivers/leds/trigger/ledtrig-timer.c
@@ -8,7 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -21,7 +20,7 @@
static ssize_t led_delay_on_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
}
@@ -29,7 +28,7 @@ static ssize_t led_delay_on_show(struct device *dev,
static ssize_t led_delay_on_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
ssize_t ret = -EINVAL;
@@ -46,7 +45,7 @@ static ssize_t led_delay_on_store(struct device *dev,
static ssize_t led_delay_off_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
}
@@ -54,7 +53,7 @@ static ssize_t led_delay_off_show(struct device *dev,
static ssize_t led_delay_off_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
ssize_t ret = -EINVAL;
@@ -71,37 +70,23 @@ static ssize_t led_delay_off_store(struct device *dev,
static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store);
static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store);
-static void timer_trig_activate(struct led_classdev *led_cdev)
-{
- int rc;
-
- led_cdev->trigger_data = NULL;
-
- rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
- if (rc)
- return;
- rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
- if (rc)
- goto err_out_delayon;
+static struct attribute *timer_trig_attrs[] = {
+ &dev_attr_delay_on.attr,
+ &dev_attr_delay_off.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(timer_trig);
+static int timer_trig_activate(struct led_classdev *led_cdev)
+{
led_blink_set(led_cdev, &led_cdev->blink_delay_on,
&led_cdev->blink_delay_off);
- led_cdev->activated = true;
- return;
-
-err_out_delayon:
- device_remove_file(led_cdev->dev, &dev_attr_delay_on);
+ return 0;
}
static void timer_trig_deactivate(struct led_classdev *led_cdev)
{
- if (led_cdev->activated) {
- device_remove_file(led_cdev->dev, &dev_attr_delay_on);
- device_remove_file(led_cdev->dev, &dev_attr_delay_off);
- led_cdev->activated = false;
- }
-
/* Stop blinking */
led_set_brightness(led_cdev, LED_OFF);
}
@@ -110,21 +95,10 @@ static struct led_trigger timer_led_trigger = {
.name = "timer",
.activate = timer_trig_activate,
.deactivate = timer_trig_deactivate,
+ .groups = timer_trig_groups,
};
-
-static int __init timer_trig_init(void)
-{
- return led_trigger_register(&timer_led_trigger);
-}
-
-static void __exit timer_trig_exit(void)
-{
- led_trigger_unregister(&timer_led_trigger);
-}
-
-module_init(timer_trig_init);
-module_exit(timer_trig_exit);
+module_led_trigger(timer_led_trigger);
MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
MODULE_DESCRIPTION("Timer LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index 9d1769073562..a80bb82aacc2 100644
--- a/drivers/leds/trigger/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -42,8 +42,8 @@ static void transient_timer_function(struct timer_list *t)
static ssize_t transient_activate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data =
+ led_trigger_get_drvdata(dev);
return sprintf(buf, "%d\n", transient_data->activate);
}
@@ -51,8 +51,9 @@ static ssize_t transient_activate_show(struct device *dev,
static ssize_t transient_activate_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
+ struct transient_trig_data *transient_data =
+ led_trigger_get_drvdata(dev);
unsigned long state;
ssize_t ret;
@@ -94,8 +95,7 @@ static ssize_t transient_activate_store(struct device *dev,
static ssize_t transient_duration_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%lu\n", transient_data->duration);
}
@@ -103,8 +103,8 @@ static ssize_t transient_duration_show(struct device *dev,
static ssize_t transient_duration_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data =
+ led_trigger_get_drvdata(dev);
unsigned long state;
ssize_t ret;
@@ -119,8 +119,8 @@ static ssize_t transient_duration_store(struct device *dev,
static ssize_t transient_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data =
+ led_trigger_get_drvdata(dev);
int state;
state = (transient_data->state == LED_FULL) ? 1 : 0;
@@ -130,8 +130,8 @@ static ssize_t transient_state_show(struct device *dev,
static ssize_t transient_state_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data =
+ led_trigger_get_drvdata(dev);
unsigned long state;
ssize_t ret;
@@ -152,82 +152,46 @@ static DEVICE_ATTR(duration, 0644, transient_duration_show,
transient_duration_store);
static DEVICE_ATTR(state, 0644, transient_state_show, transient_state_store);
-static void transient_trig_activate(struct led_classdev *led_cdev)
+static struct attribute *transient_trig_attrs[] = {
+ &dev_attr_activate.attr,
+ &dev_attr_duration.attr,
+ &dev_attr_state.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(transient_trig);
+
+static int transient_trig_activate(struct led_classdev *led_cdev)
{
- int rc;
struct transient_trig_data *tdata;
tdata = kzalloc(sizeof(struct transient_trig_data), GFP_KERNEL);
- if (!tdata) {
- dev_err(led_cdev->dev,
- "unable to allocate transient trigger\n");
- return;
- }
- led_cdev->trigger_data = tdata;
- tdata->led_cdev = led_cdev;
+ if (!tdata)
+ return -ENOMEM;
- rc = device_create_file(led_cdev->dev, &dev_attr_activate);
- if (rc)
- goto err_out;
-
- rc = device_create_file(led_cdev->dev, &dev_attr_duration);
- if (rc)
- goto err_out_duration;
-
- rc = device_create_file(led_cdev->dev, &dev_attr_state);
- if (rc)
- goto err_out_state;
+ led_set_trigger_data(led_cdev, tdata);
+ tdata->led_cdev = led_cdev;
timer_setup(&tdata->timer, transient_timer_function, 0);
- led_cdev->activated = true;
-
- return;
-
-err_out_state:
- device_remove_file(led_cdev->dev, &dev_attr_duration);
-err_out_duration:
- device_remove_file(led_cdev->dev, &dev_attr_activate);
-err_out:
- dev_err(led_cdev->dev, "unable to register transient trigger\n");
- led_cdev->trigger_data = NULL;
- kfree(tdata);
+
+ return 0;
}
static void transient_trig_deactivate(struct led_classdev *led_cdev)
{
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data = led_get_trigger_data(led_cdev);
- if (led_cdev->activated) {
- del_timer_sync(&transient_data->timer);
- led_set_brightness_nosleep(led_cdev,
- transient_data->restore_state);
- device_remove_file(led_cdev->dev, &dev_attr_activate);
- device_remove_file(led_cdev->dev, &dev_attr_duration);
- device_remove_file(led_cdev->dev, &dev_attr_state);
- led_cdev->trigger_data = NULL;
- led_cdev->activated = false;
- kfree(transient_data);
- }
+ del_timer_sync(&transient_data->timer);
+ led_set_brightness_nosleep(led_cdev, transient_data->restore_state);
+ kfree(transient_data);
}
static struct led_trigger transient_trigger = {
.name = "transient",
.activate = transient_trig_activate,
.deactivate = transient_trig_deactivate,
+ .groups = transient_trig_groups,
};
-
-static int __init transient_trig_init(void)
-{
- return led_trigger_register(&transient_trigger);
-}
-
-static void __exit transient_trig_exit(void)
-{
- led_trigger_unregister(&transient_trigger);
-}
-
-module_init(transient_trig_init);
-module_exit(transient_trig_exit);
+module_led_trigger(transient_trigger);
MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
MODULE_DESCRIPTION("Transient LED trigger");
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 9c03f35d9df1..439bf90d084d 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -17,23 +17,25 @@ menuconfig NVM
if NVM
-config NVM_DEBUG
- bool "Open-Channel SSD debugging support"
- default n
- ---help---
- Exposes a debug management interface to create/remove targets at:
+config NVM_PBLK
+ tristate "Physical Block Device Open-Channel SSD target"
+ help
+ Allows an open-channel SSD to be exposed as a block device to the
+ host. The target assumes the device exposes raw flash and must be
+ explicitly managed by the host.
- /sys/module/lnvm/parameters/configure_debug
+ Please note the disk format is considered EXPERIMENTAL for now.
- It is required to create/remove targets without IOCTLs.
+if NVM_PBLK
-config NVM_PBLK
- tristate "Physical Block Device Open-Channel SSD target"
- ---help---
- Allows an open-channel SSD to be exposed as a block device to the
- host. The target assumes the device exposes raw flash and must be
- explicitly managed by the host.
+config NVM_PBLK_DEBUG
+ bool "PBlk Debug Support"
+ default n
+ help
+ Enables debug support for pblk. This includes extra checks, more
+ vocal error messages, and extra tracking fields in the pblk sysfs
+ entries.
- Please note the disk format is considered EXPERIMENTAL for now.
+endif # NVM_PBLK_DEBUG
endif # NVM
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index b1c6d7eb6115..f565a56b898a 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -27,7 +27,8 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
int nr_entries = pblk_get_secs(bio);
int i, ret;
- generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0);
+ generic_start_io_acct(q, REQ_OP_WRITE, bio_sectors(bio),
+ &pblk->disk->part0);
/* Update the write buffer head (mem) with the entries that we can
* write. The write in itself cannot fail, so there is no need to
@@ -67,7 +68,7 @@ retry:
atomic64_add(nr_entries, &pblk->user_wa);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(nr_entries, &pblk->inflight_writes);
atomic_long_add(nr_entries, &pblk->req_writes);
#endif
@@ -75,7 +76,7 @@ retry:
pblk_rl_inserted(&pblk->rl, nr_entries);
out:
- generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time);
+ generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time);
pblk_write_should_kick(pblk);
return ret;
}
@@ -123,7 +124,7 @@ retry:
atomic64_add(valid_entries, &pblk->gc_wa);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(valid_entries, &pblk->inflight_writes);
atomic_long_add(valid_entries, &pblk->recov_gc_writes);
#endif
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index ed9cc977c8b3..00984b486fea 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -35,7 +35,7 @@ static void pblk_line_mark_bb(struct work_struct *work)
line = &pblk->lines[pblk_ppa_to_line(*ppa)];
pos = pblk_ppa_to_pos(&dev->geo, *ppa);
- pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
+ pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
line->id, pos);
}
@@ -51,12 +51,12 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
struct ppa_addr *ppa;
int pos = pblk_ppa_to_pos(geo, ppa_addr);
- pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
+ pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
atomic_long_inc(&pblk->erase_failed);
atomic_dec(&line->blk_in_line);
if (test_and_set_bit(pos, line->blk_bitmap))
- pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
+ pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
line->id, pos);
/* Not necessary to mark bad blocks on 2.0 spec. */
@@ -194,7 +194,7 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
u64 paddr;
int line_id;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a device address */
BUG_ON(pblk_addr_in_cache(ppa));
BUG_ON(pblk_ppa_empty(ppa));
@@ -264,6 +264,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
switch (type) {
case PBLK_WRITE:
kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
+ /* fall through */
case PBLK_WRITE_INT:
pool = &pblk->w_rq_pool;
break;
@@ -274,7 +275,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
pool = &pblk->e_rq_pool;
break;
default:
- pr_err("pblk: trying to free unknown rqd type\n");
+ pblk_err(pblk, "trying to free unknown rqd type\n");
return;
}
@@ -310,7 +311,7 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
if (ret != PBLK_EXPOSED_PAGE_SIZE) {
- pr_err("pblk: could not add page to bio\n");
+ pblk_err(pblk, "could not add page to bio\n");
mempool_free(page, &pblk->page_bio_pool);
goto err;
}
@@ -410,7 +411,7 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
line->state = PBLK_LINESTATE_CORRUPT;
line->gc_group = PBLK_LINEGC_NONE;
move_list = &l_mg->corrupt_list;
- pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
+ pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
line->id, vsc,
line->sec_in_line,
lm->high_thrs, lm->mid_thrs);
@@ -430,7 +431,7 @@ void pblk_discard(struct pblk *pblk, struct bio *bio)
void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
{
atomic_long_inc(&pblk->write_failed);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
pblk_print_failed_rqd(pblk, rqd, rqd->error);
#endif
}
@@ -452,9 +453,9 @@ void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
atomic_long_inc(&pblk->read_failed);
break;
default:
- pr_err("pblk: unknown read error:%d\n", rqd->error);
+ pblk_err(pblk, "unknown read error:%d\n", rqd->error);
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
pblk_print_failed_rqd(pblk, rqd, rqd->error);
#endif
}
@@ -470,7 +471,7 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
atomic_inc(&pblk->inflight_io);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
if (pblk_check_io(pblk, rqd))
return NVM_IO_ERR;
#endif
@@ -484,7 +485,7 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
atomic_inc(&pblk->inflight_io);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
if (pblk_check_io(pblk, rqd))
return NVM_IO_ERR;
#endif
@@ -517,7 +518,7 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
for (i = 0; i < nr_secs; i++) {
page = vmalloc_to_page(kaddr);
if (!page) {
- pr_err("pblk: could not map vmalloc bio\n");
+ pblk_err(pblk, "could not map vmalloc bio\n");
bio_put(bio);
bio = ERR_PTR(-ENOMEM);
goto out;
@@ -525,7 +526,7 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
if (ret != PAGE_SIZE) {
- pr_err("pblk: could not add page to bio\n");
+ pblk_err(pblk, "could not add page to bio\n");
bio_put(bio);
bio = ERR_PTR(-ENOMEM);
goto out;
@@ -711,7 +712,7 @@ next_rq:
while (test_bit(pos, line->blk_bitmap)) {
paddr += min;
if (pblk_boundary_paddr_checks(pblk, paddr)) {
- pr_err("pblk: corrupt emeta line:%d\n",
+ pblk_err(pblk, "corrupt emeta line:%d\n",
line->id);
bio_put(bio);
ret = -EINTR;
@@ -723,7 +724,7 @@ next_rq:
}
if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
- pr_err("pblk: corrupt emeta line:%d\n",
+ pblk_err(pblk, "corrupt emeta line:%d\n",
line->id);
bio_put(bio);
ret = -EINTR;
@@ -738,7 +739,7 @@ next_rq:
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
- pr_err("pblk: emeta I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
bio_put(bio);
goto free_rqd_dma;
}
@@ -843,7 +844,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
*/
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
- pr_err("pblk: smeta I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
bio_put(bio);
goto free_ppa_list;
}
@@ -905,7 +906,7 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- pr_err("pblk: could not sync erase line:%d,blk:%d\n",
+ pblk_err(pblk, "could not sync erase line:%d,blk:%d\n",
pblk_ppa_to_line(ppa),
pblk_ppa_to_pos(geo, ppa));
@@ -945,7 +946,7 @@ int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
ret = pblk_blk_erase_sync(pblk, ppa);
if (ret) {
- pr_err("pblk: failed to erase line %d\n", line->id);
+ pblk_err(pblk, "failed to erase line %d\n", line->id);
return ret;
}
} while (1);
@@ -1012,7 +1013,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
list_add_tail(&line->list, &l_mg->bad_list);
spin_unlock(&l_mg->free_lock);
- pr_debug("pblk: line %d is bad\n", line->id);
+ pblk_debug(pblk, "line %d is bad\n", line->id);
return 0;
}
@@ -1122,7 +1123,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
line->cur_sec = off + lm->smeta_sec;
if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
- pr_debug("pblk: line smeta I/O failed. Retry\n");
+ pblk_debug(pblk, "line smeta I/O failed. Retry\n");
return 0;
}
@@ -1154,7 +1155,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
spin_unlock(&line->lock);
list_add_tail(&line->list, &l_mg->bad_list);
- pr_err("pblk: unexpected line %d is bad\n", line->id);
+ pblk_err(pblk, "unexpected line %d is bad\n", line->id);
return 0;
}
@@ -1299,7 +1300,7 @@ struct pblk_line *pblk_line_get(struct pblk *pblk)
retry:
if (list_empty(&l_mg->free_list)) {
- pr_err("pblk: no free lines\n");
+ pblk_err(pblk, "no free lines\n");
return NULL;
}
@@ -1315,7 +1316,7 @@ retry:
list_add_tail(&line->list, &l_mg->bad_list);
- pr_debug("pblk: line %d is bad\n", line->id);
+ pblk_debug(pblk, "line %d is bad\n", line->id);
goto retry;
}
@@ -1329,7 +1330,7 @@ retry:
list_add(&line->list, &l_mg->corrupt_list);
goto retry;
default:
- pr_err("pblk: failed to prepare line %d\n", line->id);
+ pblk_err(pblk, "failed to prepare line %d\n", line->id);
list_add(&line->list, &l_mg->free_list);
l_mg->nr_free_lines++;
return NULL;
@@ -1477,7 +1478,7 @@ static void pblk_line_close_meta_sync(struct pblk *pblk)
ret = pblk_submit_meta_io(pblk, line);
if (ret) {
- pr_err("pblk: sync meta line %d failed (%d)\n",
+ pblk_err(pblk, "sync meta line %d failed (%d)\n",
line->id, ret);
return;
}
@@ -1507,7 +1508,7 @@ void __pblk_pipeline_flush(struct pblk *pblk)
ret = pblk_recov_pad(pblk);
if (ret) {
- pr_err("pblk: could not close data on teardown(%d)\n", ret);
+ pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
return;
}
@@ -1687,7 +1688,7 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- pr_err("pblk: could not async erase line:%d,blk:%d\n",
+ pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
pblk_ppa_to_line(ppa),
pblk_ppa_to_pos(geo, ppa));
}
@@ -1726,7 +1727,7 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
struct list_head *move_list;
int i;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
"pblk: corrupt closed line %d\n", line->id);
#endif
@@ -1856,7 +1857,7 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
* Only send one inflight I/O per LUN. Since we map at a page
* granurality, all ppas in the I/O will map to the same LUN
*/
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
int i;
for (i = 1; i < nr_ppas; i++)
@@ -1866,7 +1867,8 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
if (ret == -ETIME || ret == -EINTR)
- pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret);
+ pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
+ -ret);
}
void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
@@ -1901,7 +1903,7 @@ void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
struct pblk_lun *rlun;
int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
int i;
for (i = 1; i < nr_ppas; i++)
@@ -1951,7 +1953,7 @@ void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
{
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a cache address */
BUG_ON(!pblk_addr_in_cache(ppa));
BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
@@ -1966,7 +1968,7 @@ int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
struct ppa_addr ppa_l2p, ppa_gc;
int ret = 1;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a cache address */
BUG_ON(!pblk_addr_in_cache(ppa_new));
BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
@@ -2003,14 +2005,14 @@ void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
{
struct ppa_addr ppa_l2p;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a device address */
BUG_ON(pblk_addr_in_cache(ppa_mapped));
#endif
/* Invalidate and discard padded entries */
if (lba == ADDR_EMPTY) {
atomic64_inc(&pblk->pad_wa);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->padded_wb);
#endif
if (!pblk_ppa_empty(ppa_mapped))
@@ -2036,7 +2038,7 @@ void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
goto out;
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
#endif
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 080469d90b40..157c2567c9e8 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -90,7 +90,7 @@ static void pblk_gc_line_ws(struct work_struct *work)
gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
if (!gc_rq->data) {
- pr_err("pblk: could not GC line:%d (%d/%d)\n",
+ pblk_err(pblk, "could not GC line:%d (%d/%d)\n",
line->id, *line->vsc, gc_rq->nr_secs);
goto out;
}
@@ -98,7 +98,7 @@ static void pblk_gc_line_ws(struct work_struct *work)
/* Read from GC victim block */
ret = pblk_submit_read_gc(pblk, gc_rq);
if (ret) {
- pr_err("pblk: failed GC read in line:%d (err:%d)\n",
+ pblk_err(pblk, "failed GC read in line:%d (err:%d)\n",
line->id, ret);
goto out;
}
@@ -146,7 +146,7 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
ret = pblk_line_read_emeta(pblk, line, emeta_buf);
if (ret) {
- pr_err("pblk: line %d read emeta failed (%d)\n",
+ pblk_err(pblk, "line %d read emeta failed (%d)\n",
line->id, ret);
pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
return NULL;
@@ -160,7 +160,7 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
ret = pblk_recov_check_emeta(pblk, emeta_buf);
if (ret) {
- pr_err("pblk: inconsistent emeta (line %d)\n",
+ pblk_err(pblk, "inconsistent emeta (line %d)\n",
line->id);
pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
return NULL;
@@ -201,7 +201,7 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
} else {
lba_list = get_lba_list_from_emeta(pblk, line);
if (!lba_list) {
- pr_err("pblk: could not interpret emeta (line %d)\n",
+ pblk_err(pblk, "could not interpret emeta (line %d)\n",
line->id);
goto fail_free_invalid_bitmap;
}
@@ -213,7 +213,7 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
spin_unlock(&line->lock);
if (sec_left < 0) {
- pr_err("pblk: corrupted GC line (%d)\n", line->id);
+ pblk_err(pblk, "corrupted GC line (%d)\n", line->id);
goto fail_free_lba_list;
}
@@ -289,7 +289,7 @@ fail_free_ws:
kref_put(&line->ref, pblk_line_put);
atomic_dec(&gc->read_inflight_gc);
- pr_err("pblk: Failed to GC line %d\n", line->id);
+ pblk_err(pblk, "failed to GC line %d\n", line->id);
}
static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
@@ -297,7 +297,7 @@ static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
struct pblk_gc *gc = &pblk->gc;
struct pblk_line_ws *line_ws;
- pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id);
+ pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id);
line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
if (!line_ws)
@@ -351,7 +351,7 @@ static int pblk_gc_read(struct pblk *pblk)
pblk_gc_kick(pblk);
if (pblk_gc_line(pblk, line))
- pr_err("pblk: failed to GC line %d\n", line->id);
+ pblk_err(pblk, "failed to GC line %d\n", line->id);
return 0;
}
@@ -522,8 +522,8 @@ static int pblk_gc_reader_ts(void *data)
io_schedule();
}
-#ifdef CONFIG_NVM_DEBUG
- pr_info("pblk: flushing gc pipeline, %d lines left\n",
+#ifdef CONFIG_NVM_PBLK_DEBUG
+ pblk_info(pblk, "flushing gc pipeline, %d lines left\n",
atomic_read(&gc->pipeline_gc));
#endif
@@ -540,7 +540,7 @@ static int pblk_gc_reader_ts(void *data)
static void pblk_gc_start(struct pblk *pblk)
{
pblk->gc.gc_active = 1;
- pr_debug("pblk: gc start\n");
+ pblk_debug(pblk, "gc start\n");
}
void pblk_gc_should_start(struct pblk *pblk)
@@ -605,14 +605,14 @@ int pblk_gc_init(struct pblk *pblk)
gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
if (IS_ERR(gc->gc_ts)) {
- pr_err("pblk: could not allocate GC main kthread\n");
+ pblk_err(pblk, "could not allocate GC main kthread\n");
return PTR_ERR(gc->gc_ts);
}
gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
"pblk-gc-writer-ts");
if (IS_ERR(gc->gc_writer_ts)) {
- pr_err("pblk: could not allocate GC writer kthread\n");
+ pblk_err(pblk, "could not allocate GC writer kthread\n");
ret = PTR_ERR(gc->gc_writer_ts);
goto fail_free_main_kthread;
}
@@ -620,7 +620,7 @@ int pblk_gc_init(struct pblk *pblk)
gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
"pblk-gc-reader-ts");
if (IS_ERR(gc->gc_reader_ts)) {
- pr_err("pblk: could not allocate GC reader kthread\n");
+ pblk_err(pblk, "could not allocate GC reader kthread\n");
ret = PTR_ERR(gc->gc_reader_ts);
goto fail_free_writer_kthread;
}
@@ -641,7 +641,7 @@ int pblk_gc_init(struct pblk *pblk)
gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
if (!gc->gc_line_reader_wq) {
- pr_err("pblk: could not allocate GC line reader workqueue\n");
+ pblk_err(pblk, "could not allocate GC line reader workqueue\n");
ret = -ENOMEM;
goto fail_free_reader_kthread;
}
@@ -650,7 +650,7 @@ int pblk_gc_init(struct pblk *pblk)
gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!gc->gc_reader_wq) {
- pr_err("pblk: could not allocate GC reader workqueue\n");
+ pblk_err(pblk, "could not allocate GC reader workqueue\n");
ret = -ENOMEM;
goto fail_free_reader_line_wq;
}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index b57f764d6a16..537e98f2b24a 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -91,7 +91,7 @@ static size_t pblk_trans_map_size(struct pblk *pblk)
return entry_size * pblk->rl.nr_secs;
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
static u32 pblk_l2p_crc(struct pblk *pblk)
{
size_t map_size;
@@ -117,13 +117,13 @@ static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
} else {
line = pblk_recov_l2p(pblk);
if (IS_ERR(line)) {
- pr_err("pblk: could not recover l2p table\n");
+ pblk_err(pblk, "could not recover l2p table\n");
return -EFAULT;
}
}
-#ifdef CONFIG_NVM_DEBUG
- pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
+#ifdef CONFIG_NVM_PBLK_DEBUG
+ pblk_info(pblk, "init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
#endif
/* Free full lines directly as GC has not been started yet */
@@ -166,7 +166,7 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
static void pblk_rwb_free(struct pblk *pblk)
{
if (pblk_rb_tear_down_check(&pblk->rwb))
- pr_err("pblk: write buffer error on tear down\n");
+ pblk_err(pblk, "write buffer error on tear down\n");
pblk_rb_data_free(&pblk->rwb);
vfree(pblk_rb_entries_ref(&pblk->rwb));
@@ -179,11 +179,14 @@ static int pblk_rwb_init(struct pblk *pblk)
struct pblk_rb_entry *entries;
unsigned long nr_entries, buffer_size;
unsigned int power_size, power_seg_sz;
+ int pgs_in_buffer;
- if (write_buffer_size && (write_buffer_size > pblk->pgs_in_buffer))
+ pgs_in_buffer = max(geo->mw_cunits, geo->ws_opt) * geo->all_luns;
+
+ if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
buffer_size = write_buffer_size;
else
- buffer_size = pblk->pgs_in_buffer;
+ buffer_size = pgs_in_buffer;
nr_entries = pblk_rb_calculate_size(buffer_size);
@@ -200,7 +203,8 @@ static int pblk_rwb_init(struct pblk *pblk)
/* Minimum pages needed within a lun */
#define ADDR_POOL_SIZE 64
-static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst)
+static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo,
+ struct nvm_addrf_12 *dst)
{
struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
int power_len;
@@ -208,14 +212,14 @@ static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst)
/* Re-calculate channel and lun format to adapt to configuration */
power_len = get_count_order(geo->num_ch);
if (1 << power_len != geo->num_ch) {
- pr_err("pblk: supports only power-of-two channel config.\n");
+ pblk_err(pblk, "supports only power-of-two channel config.\n");
return -EINVAL;
}
dst->ch_len = power_len;
power_len = get_count_order(geo->num_lun);
if (1 << power_len != geo->num_lun) {
- pr_err("pblk: supports only power-of-two LUN config.\n");
+ pblk_err(pblk, "supports only power-of-two LUN config.\n");
return -EINVAL;
}
dst->lun_len = power_len;
@@ -282,18 +286,19 @@ static int pblk_set_addrf(struct pblk *pblk)
case NVM_OCSSD_SPEC_12:
div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
if (mod) {
- pr_err("pblk: bad configuration of sectors/pages\n");
+ pblk_err(pblk, "bad configuration of sectors/pages\n");
return -EINVAL;
}
- pblk->addrf_len = pblk_set_addrf_12(geo, (void *)&pblk->addrf);
+ pblk->addrf_len = pblk_set_addrf_12(pblk, geo,
+ (void *)&pblk->addrf);
break;
case NVM_OCSSD_SPEC_20:
pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf,
- &pblk->uaddrf);
+ &pblk->uaddrf);
break;
default:
- pr_err("pblk: OCSSD revision not supported (%d)\n",
+ pblk_err(pblk, "OCSSD revision not supported (%d)\n",
geo->version);
return -EINVAL;
}
@@ -366,15 +371,13 @@ static int pblk_core_init(struct pblk *pblk)
atomic64_set(&pblk->nr_flush, 0);
pblk->nr_flush_rst = 0;
- pblk->pgs_in_buffer = geo->mw_cunits * geo->all_luns;
-
pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
- pr_err("pblk: vector list too big(%u > %u)\n",
+ pblk_err(pblk, "vector list too big(%u > %u)\n",
pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS);
return -EINVAL;
}
@@ -607,7 +610,7 @@ static int pblk_luns_init(struct pblk *pblk)
/* TODO: Implement unbalanced LUN support */
if (geo->num_lun < 0) {
- pr_err("pblk: unbalanced LUN config.\n");
+ pblk_err(pblk, "unbalanced LUN config.\n");
return -EINVAL;
}
@@ -716,10 +719,11 @@ static int pblk_setup_line_meta_12(struct pblk *pblk, struct pblk_line *line,
/*
* In 1.2 spec. chunk state is not persisted by the device. Thus
- * some of the values are reset each time pblk is instantiated.
+ * some of the values are reset each time pblk is instantiated,
+ * so we have to assume that the block is closed.
*/
if (lun_bb_meta[line->id] == NVM_BLK_T_FREE)
- chunk->state = NVM_CHK_ST_FREE;
+ chunk->state = NVM_CHK_ST_CLOSED;
else
chunk->state = NVM_CHK_ST_OFFLINE;
@@ -1026,7 +1030,7 @@ add_emeta_page:
lm->emeta_sec[0], geo->clba);
if (lm->min_blk_line > lm->blk_per_line) {
- pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
+ pblk_err(pblk, "config. not supported. Min. LUN in line:%d\n",
lm->blk_per_line);
return -EINVAL;
}
@@ -1078,7 +1082,7 @@ static int pblk_lines_init(struct pblk *pblk)
}
if (!nr_free_chks) {
- pr_err("pblk: too many bad blocks prevent for sane instance\n");
+ pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
return -EINTR;
}
@@ -1108,7 +1112,7 @@ static int pblk_writer_init(struct pblk *pblk)
int err = PTR_ERR(pblk->writer_ts);
if (err != -EINTR)
- pr_err("pblk: could not allocate writer kthread (%d)\n",
+ pblk_err(pblk, "could not allocate writer kthread (%d)\n",
err);
return err;
}
@@ -1154,7 +1158,7 @@ static void pblk_tear_down(struct pblk *pblk, bool graceful)
pblk_rb_sync_l2p(&pblk->rwb);
pblk_rl_free(&pblk->rl);
- pr_debug("pblk: consistent tear down (graceful:%d)\n", graceful);
+ pblk_debug(pblk, "consistent tear down (graceful:%d)\n", graceful);
}
static void pblk_exit(void *private, bool graceful)
@@ -1165,8 +1169,8 @@ static void pblk_exit(void *private, bool graceful)
pblk_gc_exit(pblk, graceful);
pblk_tear_down(pblk, graceful);
-#ifdef CONFIG_NVM_DEBUG
- pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
+#ifdef CONFIG_NVM_PBLK_DEBUG
+ pblk_info(pblk, "exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
#endif
pblk_free(pblk);
@@ -1189,34 +1193,35 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
struct pblk *pblk;
int ret;
- /* pblk supports 1.2 and 2.0 versions */
+ pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
+ if (!pblk)
+ return ERR_PTR(-ENOMEM);
+
+ pblk->dev = dev;
+ pblk->disk = tdisk;
+ pblk->state = PBLK_STATE_RUNNING;
+ pblk->gc.gc_enabled = 0;
+
if (!(geo->version == NVM_OCSSD_SPEC_12 ||
geo->version == NVM_OCSSD_SPEC_20)) {
- pr_err("pblk: OCSSD version not supported (%u)\n",
+ pblk_err(pblk, "OCSSD version not supported (%u)\n",
geo->version);
+ kfree(pblk);
return ERR_PTR(-EINVAL);
}
if (geo->version == NVM_OCSSD_SPEC_12 && geo->dom & NVM_RSP_L2P) {
- pr_err("pblk: host-side L2P table not supported. (%x)\n",
+ pblk_err(pblk, "host-side L2P table not supported. (%x)\n",
geo->dom);
+ kfree(pblk);
return ERR_PTR(-EINVAL);
}
- pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
- if (!pblk)
- return ERR_PTR(-ENOMEM);
-
- pblk->dev = dev;
- pblk->disk = tdisk;
- pblk->state = PBLK_STATE_RUNNING;
- pblk->gc.gc_enabled = 0;
-
spin_lock_init(&pblk->resubmit_lock);
spin_lock_init(&pblk->trans_lock);
spin_lock_init(&pblk->lock);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_set(&pblk->inflight_writes, 0);
atomic_long_set(&pblk->padded_writes, 0);
atomic_long_set(&pblk->padded_wb, 0);
@@ -1241,38 +1246,38 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
ret = pblk_core_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize core\n");
+ pblk_err(pblk, "could not initialize core\n");
goto fail;
}
ret = pblk_lines_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize lines\n");
+ pblk_err(pblk, "could not initialize lines\n");
goto fail_free_core;
}
ret = pblk_rwb_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize write buffer\n");
+ pblk_err(pblk, "could not initialize write buffer\n");
goto fail_free_lines;
}
ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
if (ret) {
- pr_err("pblk: could not initialize maps\n");
+ pblk_err(pblk, "could not initialize maps\n");
goto fail_free_rwb;
}
ret = pblk_writer_init(pblk);
if (ret) {
if (ret != -EINTR)
- pr_err("pblk: could not initialize write thread\n");
+ pblk_err(pblk, "could not initialize write thread\n");
goto fail_free_l2p;
}
ret = pblk_gc_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize gc\n");
+ pblk_err(pblk, "could not initialize gc\n");
goto fail_stop_writer;
}
@@ -1287,8 +1292,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
- pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
- tdisk->disk_name,
+ pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
geo->all_luns, pblk->l_mg.nr_lines,
(unsigned long long)pblk->rl.nr_secs,
pblk->rwb.nr_entries);
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 55e9442a99e2..f6eec0212dfc 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -111,7 +111,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
} while (iter > 0);
up_write(&pblk_rb_lock);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_set(&rb->inflight_flush_point, 0);
#endif
@@ -308,7 +308,7 @@ void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
entry = &rb->entries[ring_pos];
flags = READ_ONCE(entry->w_ctx.flags);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Caller must guarantee that the entry is free */
BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
#endif
@@ -332,7 +332,7 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
entry = &rb->entries[ring_pos];
flags = READ_ONCE(entry->w_ctx.flags);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Caller must guarantee that the entry is free */
BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
#endif
@@ -362,7 +362,7 @@ static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
return 0;
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_inc(&rb->inflight_flush_point);
#endif
@@ -547,7 +547,7 @@ try:
page = virt_to_page(entry->data);
if (!page) {
- pr_err("pblk: could not allocate write bio page\n");
+ pblk_err(pblk, "could not allocate write bio page\n");
flags &= ~PBLK_WRITTEN_DATA;
flags |= PBLK_SUBMITTED_ENTRY;
/* Release flags on context. Protect from writes */
@@ -557,7 +557,7 @@ try:
if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
rb->seg_size) {
- pr_err("pblk: could not add page to write bio\n");
+ pblk_err(pblk, "could not add page to write bio\n");
flags &= ~PBLK_WRITTEN_DATA;
flags |= PBLK_SUBMITTED_ENTRY;
/* Release flags on context. Protect from writes */
@@ -576,19 +576,19 @@ try:
if (pad) {
if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
- pr_err("pblk: could not pad page in write bio\n");
+ pblk_err(pblk, "could not pad page in write bio\n");
return NVM_IO_ERR;
}
if (pad < pblk->min_write_pgs)
atomic64_inc(&pblk->pad_dist[pad - 1]);
else
- pr_warn("pblk: padding more than min. sectors\n");
+ pblk_warn(pblk, "padding more than min. sectors\n");
atomic64_add(pad, &pblk->pad_wa);
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(pad, &pblk->padded_writes);
#endif
@@ -613,7 +613,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
int ret = 1;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Caller must ensure that the access will not cause an overflow */
BUG_ON(pos >= rb->nr_entries);
#endif
@@ -820,7 +820,7 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->subm,
rb->sync,
rb->l2p_update,
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_read(&rb->inflight_flush_point),
#else
0,
@@ -838,7 +838,7 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->subm,
rb->sync,
rb->l2p_update,
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_read(&rb->inflight_flush_point),
#else
0,
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 18694694e5f0..5a46d7f9302f 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -28,7 +28,7 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
sector_t lba, struct ppa_addr ppa,
int bio_iter, bool advanced_bio)
{
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a cache address */
BUG_ON(pblk_ppa_empty(ppa));
BUG_ON(!pblk_addr_in_cache(ppa));
@@ -79,7 +79,7 @@ retry:
WARN_ON(test_and_set_bit(i, read_bitmap));
meta_list[i].lba = cpu_to_le64(lba);
advanced_bio = true;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
} else {
@@ -97,7 +97,7 @@ next:
else
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(nr_secs, &pblk->inflight_reads);
#endif
}
@@ -117,13 +117,13 @@ static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
continue;
if (lba != blba + i) {
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
struct ppa_addr *p;
p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
- print_ppa(&pblk->dev->geo, p, "seq", i);
+ print_ppa(pblk, p, "seq", i);
#endif
- pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
+ pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
lba, (u64)blba + i);
WARN_ON(1);
}
@@ -149,14 +149,14 @@ static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
meta_lba = le64_to_cpu(meta_lba_list[j].lba);
if (lba != meta_lba) {
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
struct ppa_addr *p;
int nr_ppas = rqd->nr_ppas;
p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
- print_ppa(&pblk->dev->geo, p, "seq", j);
+ print_ppa(pblk, p, "seq", j);
#endif
- pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
+ pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
lba, meta_lba);
WARN_ON(1);
}
@@ -185,7 +185,7 @@ static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
static void pblk_end_user_read(struct bio *bio)
{
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
#endif
bio_endio(bio);
@@ -199,7 +199,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
struct bio *int_bio = rqd->bio;
unsigned long start_time = r_ctx->start_time;
- generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
+ generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
if (rqd->error)
pblk_log_read_err(pblk, rqd);
@@ -212,7 +212,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
if (put_line)
pblk_read_put_rqd_kref(pblk, rqd);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
#endif
@@ -231,74 +231,36 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
__pblk_end_io_read(pblk, rqd, true);
}
-static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
- struct bio *orig_bio, unsigned int bio_init_idx,
- unsigned long *read_bitmap)
+static void pblk_end_partial_read(struct nvm_rq *rqd)
{
- struct pblk_sec_meta *meta_list = rqd->meta_list;
- struct bio *new_bio;
+ struct pblk *pblk = rqd->private;
+ struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
+ struct pblk_pr_ctx *pr_ctx = r_ctx->private;
+ struct bio *new_bio = rqd->bio;
+ struct bio *bio = pr_ctx->orig_bio;
struct bio_vec src_bv, dst_bv;
- void *ppa_ptr = NULL;
- void *src_p, *dst_p;
- dma_addr_t dma_ppa_list = 0;
- __le64 *lba_list_mem, *lba_list_media;
- int nr_secs = rqd->nr_ppas;
+ struct pblk_sec_meta *meta_list = rqd->meta_list;
+ int bio_init_idx = pr_ctx->bio_init_idx;
+ unsigned long *read_bitmap = pr_ctx->bitmap;
+ int nr_secs = pr_ctx->orig_nr_secs;
int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
- int i, ret, hole;
-
- /* Re-use allocated memory for intermediate lbas */
- lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
- lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
-
- new_bio = bio_alloc(GFP_KERNEL, nr_holes);
-
- if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
- goto fail_add_pages;
-
- if (nr_holes != new_bio->bi_vcnt) {
- pr_err("pblk: malformed bio\n");
- goto fail;
- }
-
- for (i = 0; i < nr_secs; i++)
- lba_list_mem[i] = meta_list[i].lba;
-
- new_bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
-
- rqd->bio = new_bio;
- rqd->nr_ppas = nr_holes;
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
-
- if (unlikely(nr_holes == 1)) {
- ppa_ptr = rqd->ppa_list;
- dma_ppa_list = rqd->dma_ppa_list;
- rqd->ppa_addr = rqd->ppa_list[0];
- }
-
- ret = pblk_submit_io_sync(pblk, rqd);
- if (ret) {
- bio_put(rqd->bio);
- pr_err("pblk: sync read IO submission failed\n");
- goto fail;
- }
-
- if (rqd->error) {
- atomic_long_inc(&pblk->read_failed);
-#ifdef CONFIG_NVM_DEBUG
- pblk_print_failed_rqd(pblk, rqd, rqd->error);
-#endif
- }
+ __le64 *lba_list_mem, *lba_list_media;
+ void *src_p, *dst_p;
+ int hole, i;
if (unlikely(nr_holes == 1)) {
struct ppa_addr ppa;
ppa = rqd->ppa_addr;
- rqd->ppa_list = ppa_ptr;
- rqd->dma_ppa_list = dma_ppa_list;
+ rqd->ppa_list = pr_ctx->ppa_ptr;
+ rqd->dma_ppa_list = pr_ctx->dma_ppa_list;
rqd->ppa_list[0] = ppa;
}
+ /* Re-use allocated memory for intermediate lbas */
+ lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
+ lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
+
for (i = 0; i < nr_secs; i++) {
lba_list_media[i] = meta_list[i].lba;
meta_list[i].lba = lba_list_mem[i];
@@ -316,7 +278,7 @@ static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
meta_list[hole].lba = lba_list_media[i];
src_bv = new_bio->bi_io_vec[i++];
- dst_bv = orig_bio->bi_io_vec[bio_init_idx + hole];
+ dst_bv = bio->bi_io_vec[bio_init_idx + hole];
src_p = kmap_atomic(src_bv.bv_page);
dst_p = kmap_atomic(dst_bv.bv_page);
@@ -334,19 +296,107 @@ static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
} while (hole < nr_secs);
bio_put(new_bio);
+ kfree(pr_ctx);
/* restore original request */
rqd->bio = NULL;
rqd->nr_ppas = nr_secs;
+ bio_endio(bio);
__pblk_end_io_read(pblk, rqd, false);
- return NVM_IO_DONE;
+}
-fail:
- /* Free allocated pages in new bio */
+static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
+ unsigned int bio_init_idx,
+ unsigned long *read_bitmap,
+ int nr_holes)
+{
+ struct pblk_sec_meta *meta_list = rqd->meta_list;
+ struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
+ struct pblk_pr_ctx *pr_ctx;
+ struct bio *new_bio, *bio = r_ctx->private;
+ __le64 *lba_list_mem;
+ int nr_secs = rqd->nr_ppas;
+ int i;
+
+ /* Re-use allocated memory for intermediate lbas */
+ lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
+
+ new_bio = bio_alloc(GFP_KERNEL, nr_holes);
+
+ if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
+ goto fail_bio_put;
+
+ if (nr_holes != new_bio->bi_vcnt) {
+ WARN_ONCE(1, "pblk: malformed bio\n");
+ goto fail_free_pages;
+ }
+
+ pr_ctx = kmalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
+ if (!pr_ctx)
+ goto fail_free_pages;
+
+ for (i = 0; i < nr_secs; i++)
+ lba_list_mem[i] = meta_list[i].lba;
+
+ new_bio->bi_iter.bi_sector = 0; /* internal bio */
+ bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
+
+ rqd->bio = new_bio;
+ rqd->nr_ppas = nr_holes;
+ rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+
+ pr_ctx->ppa_ptr = NULL;
+ pr_ctx->orig_bio = bio;
+ bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
+ pr_ctx->bio_init_idx = bio_init_idx;
+ pr_ctx->orig_nr_secs = nr_secs;
+ r_ctx->private = pr_ctx;
+
+ if (unlikely(nr_holes == 1)) {
+ pr_ctx->ppa_ptr = rqd->ppa_list;
+ pr_ctx->dma_ppa_list = rqd->dma_ppa_list;
+ rqd->ppa_addr = rqd->ppa_list[0];
+ }
+ return 0;
+
+fail_free_pages:
pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
-fail_add_pages:
- pr_err("pblk: failed to perform partial read\n");
+fail_bio_put:
+ bio_put(new_bio);
+
+ return -ENOMEM;
+}
+
+static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
+ unsigned int bio_init_idx,
+ unsigned long *read_bitmap, int nr_secs)
+{
+ int nr_holes;
+ int ret;
+
+ nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
+
+ if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap,
+ nr_holes))
+ return NVM_IO_ERR;
+
+ rqd->end_io = pblk_end_partial_read;
+
+ ret = pblk_submit_io(pblk, rqd);
+ if (ret) {
+ bio_put(rqd->bio);
+ pblk_err(pblk, "partial read IO submission failed\n");
+ goto err;
+ }
+
+ return NVM_IO_OK;
+
+err:
+ pblk_err(pblk, "failed to perform partial read\n");
+
+ /* Free allocated pages in new bio */
+ pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
__pblk_end_io_read(pblk, rqd, false);
return NVM_IO_ERR;
}
@@ -359,7 +409,7 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->inflight_reads);
#endif
@@ -382,7 +432,7 @@ retry:
WARN_ON(test_and_set_bit(0, read_bitmap));
meta_list[0].lba = cpu_to_le64(lba);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
} else {
@@ -401,7 +451,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
struct pblk_g_ctx *r_ctx;
struct nvm_rq *rqd;
unsigned int bio_init_idx;
- unsigned long read_bitmap; /* Max 64 ppas per request */
+ DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
int ret = NVM_IO_ERR;
/* logic error: lba out-of-bounds. Ignore read request */
@@ -411,9 +461,10 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
return NVM_IO_ERR;
}
- generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
+ generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
+ &pblk->disk->part0);
- bitmap_zero(&read_bitmap, nr_secs);
+ bitmap_zero(read_bitmap, nr_secs);
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
@@ -436,7 +487,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
&rqd->dma_meta_list);
if (!rqd->meta_list) {
- pr_err("pblk: not able to allocate ppa list\n");
+ pblk_err(pblk, "not able to allocate ppa list\n");
goto fail_rqd_free;
}
@@ -444,32 +495,32 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
- pblk_read_ppalist_rq(pblk, rqd, bio, blba, &read_bitmap);
+ pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
} else {
- pblk_read_rq(pblk, rqd, bio, blba, &read_bitmap);
+ pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
}
- if (bitmap_full(&read_bitmap, nr_secs)) {
+ if (bitmap_full(read_bitmap, nr_secs)) {
atomic_inc(&pblk->inflight_io);
__pblk_end_io_read(pblk, rqd, false);
return NVM_IO_DONE;
}
/* All sectors are to be read from the device */
- if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
+ if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
struct bio *int_bio = NULL;
/* Clone read bio to deal with read errors internally */
int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
if (!int_bio) {
- pr_err("pblk: could not clone read bio\n");
+ pblk_err(pblk, "could not clone read bio\n");
goto fail_end_io;
}
rqd->bio = int_bio;
if (pblk_submit_io(pblk, rqd)) {
- pr_err("pblk: read IO submission failed\n");
+ pblk_err(pblk, "read IO submission failed\n");
ret = NVM_IO_ERR;
goto fail_end_io;
}
@@ -480,8 +531,15 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
/* The read bio request could be partially filled by the write buffer,
* but there are some holes that need to be read from the drive.
*/
- return pblk_partial_read(pblk, rqd, bio, bio_init_idx, &read_bitmap);
+ ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
+ nr_secs);
+ if (ret)
+ goto fail_meta_free;
+
+ return NVM_IO_OK;
+fail_meta_free:
+ nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
fail_rqd_free:
pblk_free_rqd(pblk, rqd, PBLK_READ);
return ret;
@@ -514,7 +572,7 @@ static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(valid_secs, &pblk->inflight_reads);
#endif
@@ -548,7 +606,7 @@ static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
rqd->ppa_addr = ppa_l2p;
valid_secs = 1;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->inflight_reads);
#endif
@@ -595,7 +653,8 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
- pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
+ pblk_err(pblk, "could not allocate GC bio (%lu)\n",
+ PTR_ERR(bio));
goto err_free_dma;
}
@@ -609,7 +668,7 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
if (pblk_submit_io_sync(pblk, &rqd)) {
ret = -EIO;
- pr_err("pblk: GC read request failed\n");
+ pblk_err(pblk, "GC read request failed\n");
goto err_free_bio;
}
@@ -619,12 +678,12 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
if (rqd.error) {
atomic_long_inc(&pblk->read_failed_gc);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
pblk_print_failed_rqd(pblk, &rqd, rqd.error);
#endif
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index 3a5069183859..e232e47e1353 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -77,7 +77,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
}
if (nr_valid_lbas != nr_lbas)
- pr_err("pblk: line %d - inconsistent lba list(%llu/%llu)\n",
+ pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
line->id, nr_valid_lbas, nr_lbas);
line->left_msecs = 0;
@@ -184,7 +184,7 @@ next_read_rq:
/* If read fails, more padding is needed */
ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
- pr_err("pblk: I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "I/O submission failed: %d\n", ret);
return ret;
}
@@ -194,7 +194,7 @@ next_read_rq:
* we cannot recover from here. Need FTL log.
*/
if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
- pr_err("pblk: L2P recovery failed (%d)\n", rqd->error);
+ pblk_err(pblk, "L2P recovery failed (%d)\n", rqd->error);
return -EINTR;
}
@@ -273,7 +273,7 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
next_pad_rq:
rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
if (rq_ppas < pblk->min_write_pgs) {
- pr_err("pblk: corrupted pad line %d\n", line->id);
+ pblk_err(pblk, "corrupted pad line %d\n", line->id);
goto fail_free_pad;
}
@@ -342,7 +342,7 @@ next_pad_rq:
ret = pblk_submit_io(pblk, rqd);
if (ret) {
- pr_err("pblk: I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "I/O submission failed: %d\n", ret);
pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
goto fail_free_bio;
}
@@ -356,12 +356,12 @@ next_pad_rq:
if (!wait_for_completion_io_timeout(&pad_rq->wait,
msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: pad write timed out\n");
+ pblk_err(pblk, "pad write timed out\n");
ret = -ETIME;
}
if (!pblk_line_is_full(line))
- pr_err("pblk: corrupted padded line: %d\n", line->id);
+ pblk_err(pblk, "corrupted padded line: %d\n", line->id);
vfree(data);
free_rq:
@@ -461,7 +461,7 @@ next_rq:
ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
- pr_err("pblk: I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "I/O submission failed: %d\n", ret);
return ret;
}
@@ -501,11 +501,11 @@ next_rq:
ret = pblk_recov_pad_oob(pblk, line, pad_secs);
if (ret)
- pr_err("pblk: OOB padding failed (err:%d)\n", ret);
+ pblk_err(pblk, "OOB padding failed (err:%d)\n", ret);
ret = pblk_recov_read_oob(pblk, line, p, r_ptr);
if (ret)
- pr_err("pblk: OOB read failed (err:%d)\n", ret);
+ pblk_err(pblk, "OOB read failed (err:%d)\n", ret);
left_ppas = 0;
}
@@ -592,7 +592,7 @@ next_rq:
ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
- pr_err("pblk: I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "I/O submission failed: %d\n", ret);
bio_put(bio);
return ret;
}
@@ -671,14 +671,14 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
ret = pblk_recov_scan_oob(pblk, line, p, &done);
if (ret) {
- pr_err("pblk: could not recover L2P from OOB\n");
+ pblk_err(pblk, "could not recover L2P from OOB\n");
goto out;
}
if (!done) {
ret = pblk_recov_scan_all_oob(pblk, line, p);
if (ret) {
- pr_err("pblk: could not recover L2P from OOB\n");
+ pblk_err(pblk, "could not recover L2P from OOB\n");
goto out;
}
}
@@ -737,14 +737,15 @@ static int pblk_recov_check_line_version(struct pblk *pblk,
struct line_header *header = &emeta->header;
if (header->version_major != EMETA_VERSION_MAJOR) {
- pr_err("pblk: line major version mismatch: %d, expected: %d\n",
- header->version_major, EMETA_VERSION_MAJOR);
+ pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
+ header->version_major, EMETA_VERSION_MAJOR);
return 1;
}
-#ifdef NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
if (header->version_minor > EMETA_VERSION_MINOR)
- pr_info("pblk: newer line minor version found: %d\n", line_v);
+ pblk_info(pblk, "newer line minor version found: %d\n",
+ header->version_minor);
#endif
return 0;
@@ -851,7 +852,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
continue;
if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
- pr_err("pblk: found incompatible line version %u\n",
+ pblk_err(pblk, "found incompatible line version %u\n",
smeta_buf->header.version_major);
return ERR_PTR(-EINVAL);
}
@@ -863,7 +864,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
}
if (memcmp(pblk->instance_uuid, smeta_buf->header.uuid, 16)) {
- pr_debug("pblk: ignore line %u due to uuid mismatch\n",
+ pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
i);
continue;
}
@@ -887,7 +888,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
pblk_recov_line_add_ordered(&recov_list, line);
found_lines++;
- pr_debug("pblk: recovering data line %d, seq:%llu\n",
+ pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
line->id, smeta_buf->seq_nr);
}
@@ -947,7 +948,7 @@ next:
line->emeta = NULL;
} else {
if (open_lines > 1)
- pr_err("pblk: failed to recover L2P\n");
+ pblk_err(pblk, "failed to recover L2P\n");
open_lines++;
line->meta_line = meta_line;
@@ -976,7 +977,7 @@ next:
out:
if (found_lines != recovered_lines)
- pr_err("pblk: failed to recover all found lines %d/%d\n",
+ pblk_err(pblk, "failed to recover all found lines %d/%d\n",
found_lines, recovered_lines);
return data_line;
@@ -999,7 +1000,7 @@ int pblk_recov_pad(struct pblk *pblk)
ret = pblk_recov_pad_oob(pblk, line, left_msecs);
if (ret) {
- pr_err("pblk: Tear down padding failed (%d)\n", ret);
+ pblk_err(pblk, "tear down padding failed (%d)\n", ret);
return ret;
}
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index 88a0a7c407aa..9fc3dfa168b4 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -268,7 +268,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
spin_unlock(&l_mg->free_lock);
if (nr_free_lines != free_line_cnt)
- pr_err("pblk: corrupted free line list:%d/%d\n",
+ pblk_err(pblk, "corrupted free line list:%d/%d\n",
nr_free_lines, free_line_cnt);
sz = snprintf(page, PAGE_SIZE - sz,
@@ -421,7 +421,7 @@ static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page)
return sz;
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
static ssize_t pblk_sysfs_stats_debug(struct pblk *pblk, char *page)
{
return snprintf(page, PAGE_SIZE,
@@ -598,7 +598,7 @@ static struct attribute sys_padding_dist = {
.mode = 0644,
};
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
static struct attribute sys_stats_debug_attr = {
.name = "stats",
.mode = 0444,
@@ -619,7 +619,7 @@ static struct attribute *pblk_attrs[] = {
&sys_write_amp_mileage,
&sys_write_amp_trip,
&sys_padding_dist,
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
&sys_stats_debug_attr,
#endif
NULL,
@@ -654,7 +654,7 @@ static ssize_t pblk_sysfs_show(struct kobject *kobj, struct attribute *attr,
return pblk_sysfs_get_write_amp_trip(pblk, buf);
else if (strcmp(attr->name, "padding_dist") == 0)
return pblk_sysfs_get_padding_dist(pblk, buf);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
else if (strcmp(attr->name, "stats") == 0)
return pblk_sysfs_stats_debug(pblk, buf);
#endif
@@ -697,8 +697,7 @@ int pblk_sysfs_init(struct gendisk *tdisk)
kobject_get(&parent_dev->kobj),
"%s", "pblk");
if (ret) {
- pr_err("pblk: could not register %s/pblk\n",
- tdisk->disk_name);
+ pblk_err(pblk, "could not register\n");
return ret;
}
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index f353e52941f5..ee774a86cf1e 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -38,7 +38,7 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
/* Release flags on context. Protect from writes */
smp_store_release(&w_ctx->flags, flags);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_dec(&rwb->inflight_flush_point);
#endif
}
@@ -51,7 +51,7 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
c_ctx->nr_padded);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
#endif
@@ -78,7 +78,7 @@ static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
unsigned long flags;
unsigned long pos;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
#endif
@@ -196,7 +196,7 @@ static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
list_add_tail(&r_ctx->list, &pblk->resubmit_list);
spin_unlock(&pblk->resubmit_lock);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
#endif
}
@@ -238,7 +238,7 @@ static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
if (!recovery) {
- pr_err("pblk: could not allocate recovery work\n");
+ pblk_err(pblk, "could not allocate recovery work\n");
return;
}
@@ -258,7 +258,7 @@ static void pblk_end_io_write(struct nvm_rq *rqd)
pblk_end_w_fail(pblk, rqd);
return;
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
else
WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
#endif
@@ -279,7 +279,7 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
if (rqd->error) {
pblk_log_write_err(pblk, rqd);
- pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
+ pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
line->w_err_gc->has_write_err = 1;
}
@@ -356,11 +356,11 @@ static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
if ((!secs_to_sync && secs_to_flush)
|| (secs_to_sync < 0)
|| (secs_to_sync > secs_avail && !secs_to_flush)) {
- pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
+ pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
secs_avail, secs_to_sync, secs_to_flush);
}
#endif
@@ -397,7 +397,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
l_mg->emeta_alloc_type, GFP_KERNEL);
if (IS_ERR(bio)) {
- pr_err("pblk: failed to map emeta io");
+ pblk_err(pblk, "failed to map emeta io");
ret = PTR_ERR(bio);
goto fail_free_rqd;
}
@@ -428,7 +428,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
ret = pblk_submit_io(pblk, rqd);
if (ret) {
- pr_err("pblk: emeta I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
goto fail_rollback;
}
@@ -518,7 +518,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
/* Assign lbas to ppas and populate request structure */
err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
if (err) {
- pr_err("pblk: could not setup write request: %d\n", err);
+ pblk_err(pblk, "could not setup write request: %d\n", err);
return NVM_IO_ERR;
}
@@ -527,7 +527,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
/* Submit data write for current data line */
err = pblk_submit_io(pblk, rqd);
if (err) {
- pr_err("pblk: data I/O submission failed: %d\n", err);
+ pblk_err(pblk, "data I/O submission failed: %d\n", err);
return NVM_IO_ERR;
}
@@ -549,7 +549,8 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
/* Submit metadata write for previous data line */
err = pblk_submit_meta_io(pblk, meta_line);
if (err) {
- pr_err("pblk: metadata I/O submission failed: %d", err);
+ pblk_err(pblk, "metadata I/O submission failed: %d",
+ err);
return NVM_IO_ERR;
}
}
@@ -614,7 +615,7 @@ static int pblk_submit_write(struct pblk *pblk)
secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
secs_to_flush);
if (secs_to_sync > pblk->max_write_pgs) {
- pr_err("pblk: bad buffer sync calculation\n");
+ pblk_err(pblk, "bad buffer sync calculation\n");
return 1;
}
@@ -633,14 +634,14 @@ static int pblk_submit_write(struct pblk *pblk)
if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
secs_avail)) {
- pr_err("pblk: corrupted write bio\n");
+ pblk_err(pblk, "corrupted write bio\n");
goto fail_put_bio;
}
if (pblk_submit_io_set(pblk, rqd))
goto fail_free_bio;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(secs_to_sync, &pblk->sub_writes);
#endif
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 34cc1d64a9d4..4760af7b6499 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -119,6 +119,16 @@ struct pblk_g_ctx {
u64 lba;
};
+/* partial read context */
+struct pblk_pr_ctx {
+ struct bio *orig_bio;
+ DECLARE_BITMAP(bitmap, NVM_MAX_VLBA);
+ unsigned int orig_nr_secs;
+ unsigned int bio_init_idx;
+ void *ppa_ptr;
+ dma_addr_t dma_ppa_list;
+};
+
/* Pad context */
struct pblk_pad_rq {
struct pblk *pblk;
@@ -193,7 +203,7 @@ struct pblk_rb {
spinlock_t w_lock; /* Write lock */
spinlock_t s_lock; /* Sync lock */
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_t inflight_flush_point; /* Not served REQ_FLUSH | REQ_FUA */
#endif
};
@@ -608,9 +618,6 @@ struct pblk {
int min_write_pgs; /* Minimum amount of pages required by controller */
int max_write_pgs; /* Maximum amount of pages supported by controller */
- int pgs_in_buffer; /* Number of pages that need to be held in buffer to
- * guarantee successful reads.
- */
sector_t capacity; /* Device capacity when bad blocks are subtracted */
@@ -639,7 +646,7 @@ struct pblk {
u64 nr_flush_rst; /* Flushes reset value for pad dist.*/
atomic64_t nr_flush; /* Number of flush/fua I/O */
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Non-persistent debug counters, 4kb sector I/Os */
atomic_long_t inflight_writes; /* Inflight writes (user and gc) */
atomic_long_t padded_writes; /* Sectors padded due to flush/fua */
@@ -706,6 +713,15 @@ struct pblk_line_ws {
#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
+#define pblk_err(pblk, fmt, ...) \
+ pr_err("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
+#define pblk_info(pblk, fmt, ...) \
+ pr_info("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
+#define pblk_warn(pblk, fmt, ...) \
+ pr_warn("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
+#define pblk_debug(pblk, fmt, ...) \
+ pr_debug("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
+
/*
* pblk ring buffer operations
*/
@@ -1282,20 +1298,22 @@ static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
return !(nr_secs % pblk->min_write_pgs);
}
-#ifdef CONFIG_NVM_DEBUG
-static inline void print_ppa(struct nvm_geo *geo, struct ppa_addr *p,
+#ifdef CONFIG_NVM_PBLK_DEBUG
+static inline void print_ppa(struct pblk *pblk, struct ppa_addr *p,
char *msg, int error)
{
+ struct nvm_geo *geo = &pblk->dev->geo;
+
if (p->c.is_cached) {
- pr_err("ppa: (%s: %x) cache line: %llu\n",
+ pblk_err(pblk, "ppa: (%s: %x) cache line: %llu\n",
msg, error, (u64)p->c.line);
} else if (geo->version == NVM_OCSSD_SPEC_12) {
- pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
+ pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
msg, error,
p->g.ch, p->g.lun, p->g.blk,
p->g.pg, p->g.pl, p->g.sec);
} else {
- pr_err("ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
+ pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
msg, error,
p->m.grp, p->m.pu, p->m.chk, p->m.sec);
}
@@ -1307,16 +1325,16 @@ static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
int bit = -1;
if (rqd->nr_ppas == 1) {
- print_ppa(&pblk->dev->geo, &rqd->ppa_addr, "rqd", error);
+ print_ppa(pblk, &rqd->ppa_addr, "rqd", error);
return;
}
while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
bit + 1)) < rqd->nr_ppas) {
- print_ppa(&pblk->dev->geo, &rqd->ppa_list[bit], "rqd", error);
+ print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error);
}
- pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
+ pblk_err(pblk, "error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
}
static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
@@ -1347,7 +1365,7 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
continue;
}
- print_ppa(geo, ppa, "boundary", i);
+ print_ppa(tgt_dev->q->queuedata, ppa, "boundary", i);
return 1;
}
@@ -1377,7 +1395,7 @@ static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
spin_lock(&line->lock);
if (line->state != PBLK_LINESTATE_OPEN) {
- pr_err("pblk: bad ppa: line:%d,state:%d\n",
+ pblk_err(pblk, "bad ppa: line:%d,state:%d\n",
line->id, line->state);
WARN_ON(1);
spin_unlock(&line->lock);
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 97a420c11eed..47c350cdfb12 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -39,17 +39,6 @@ config ADB_IOP
<http://www.angelfire.com/ca2/dev68k/iopdesc.html> to enable direct
support for it, say 'Y' here.
-config ADB_PMU68K
- bool "Include PMU (Powerbook) ADB driver"
- depends on ADB && MAC
- help
- Say Y here if want your kernel to support the m68k based Powerbooks.
- This includes the PowerBook 140, PowerBook 145, PowerBook 150,
- PowerBook 160, PowerBook 165, PowerBook 165c, PowerBook 170,
- PowerBook 180, PowerBook, 180c, PowerBook 190cs, PowerBook 520,
- PowerBook Duo 210, PowerBook Duo 230, PowerBook Duo 250,
- PowerBook Duo 270c, PowerBook Duo 280 and PowerBook Duo 280c.
-
# we want to change this to something like CONFIG_SYSCTRL_CUDA/PMU
config ADB_CUDA
bool "Support for Cuda/Egret based Macs and PowerMacs"
@@ -65,8 +54,8 @@ config ADB_CUDA
If unsure say Y.
config ADB_PMU
- bool "Support for PMU based PowerMacs"
- depends on PPC_PMAC
+ bool "Support for PMU based PowerMacs and PowerBooks"
+ depends on PPC_PMAC || MAC
help
On PowerBooks, iBooks, and recent iMacs and Power Macintoshes, the
PMU is an embedded microprocessor whose primary function is to
@@ -79,7 +68,7 @@ config ADB_PMU
config ADB_PMU_LED
bool "Support for the Power/iBook front LED"
- depends on ADB_PMU
+ depends on PPC_PMAC && ADB_PMU
select NEW_LEDS
select LEDS_CLASS
help
@@ -122,7 +111,7 @@ config PMAC_MEDIABAY
config PMAC_BACKLIGHT
bool "Backlight control for LCD screens"
- depends on ADB_PMU && FB = y && (BROKEN || !PPC64)
+ depends on PPC_PMAC && ADB_PMU && FB = y && (BROKEN || !PPC64)
select FB_BACKLIGHT
help
Say Y here to enable Macintosh specific extensions of the generic
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index ee803638e595..49819b1b6f20 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -22,7 +22,6 @@ obj-$(CONFIG_PMAC_SMU) += smu.o
obj-$(CONFIG_ADB) += adb.o
obj-$(CONFIG_ADB_MACII) += via-macii.o
obj-$(CONFIG_ADB_IOP) += adb-iop.o
-obj-$(CONFIG_ADB_PMU68K) += via-pmu68k.o
obj-$(CONFIG_ADB_MACIO) += macio-adb.o
obj-$(CONFIG_THERM_WINDTUNNEL) += therm_windtunnel.o
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 4c8097e0e6fe..76e98f0f7a3e 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -65,7 +65,7 @@ static struct adb_driver *adb_driver_list[] = {
#ifdef CONFIG_ADB_IOP
&adb_iop_driver,
#endif
-#if defined(CONFIG_ADB_PMU) || defined(CONFIG_ADB_PMU68K)
+#ifdef CONFIG_ADB_PMU
&via_pmu_driver,
#endif
#ifdef CONFIG_ADB_MACIO
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 68dcbcb4fc5b..8c744578122a 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -432,7 +432,6 @@ static struct i2c_driver g4fan_driver = {
.driver = {
.name = "therm_windtunnel",
},
- .attach_adapter = do_attach,
.probe = do_probe,
.remove = do_remove,
.id_table = therm_windtunnel_id,
@@ -445,7 +444,29 @@ static struct i2c_driver g4fan_driver = {
static int therm_of_probe(struct platform_device *dev)
{
- return i2c_add_driver( &g4fan_driver );
+ struct i2c_adapter *adap;
+ int ret, i = 0;
+
+ adap = i2c_get_adapter(0);
+ if (!adap)
+ return -EPROBE_DEFER;
+
+ ret = i2c_add_driver(&g4fan_driver);
+ if (ret) {
+ i2c_put_adapter(adap);
+ return ret;
+ }
+
+ /* We assume Macs have consecutive I2C bus numbers starting at 0 */
+ while (adap) {
+ do_attach(adap);
+ if (x.running)
+ return 0;
+ i2c_put_adapter(adap);
+ adap = i2c_get_adapter(++i);
+ }
+
+ return -ENODEV;
}
static int
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 25c1ce811053..d72c450aebe5 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Device driver for the via-pmu on Apple Powermacs.
+ * Device driver for the PMU in Apple PowerBooks and PowerMacs.
*
* The VIA (versatile interface adapter) interfaces to the PMU,
* a 6805 microprocessor core whose primary function is to control
@@ -49,20 +49,26 @@
#include <linux/compat.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <asm/prom.h>
+#include <linux/uaccess.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/irq.h>
+#ifdef CONFIG_PPC_PMAC
#include <asm/pmac_feature.h>
#include <asm/pmac_pfunc.h>
#include <asm/pmac_low_i2c.h>
-#include <linux/uaccess.h>
+#include <asm/prom.h>
#include <asm/mmu_context.h>
#include <asm/cputable.h>
#include <asm/time.h>
#include <asm/backlight.h>
+#else
+#include <asm/macintosh.h>
+#include <asm/macints.h>
+#include <asm/mac_via.h>
+#endif
#include "via-pmu-event.h"
@@ -76,7 +82,6 @@
#define BATTERY_POLLING_COUNT 2
static DEFINE_MUTEX(pmu_info_proc_mutex);
-static volatile unsigned char __iomem *via;
/* VIA registers - spaced 0x200 bytes apart */
#define RS 0x200 /* skip between registers */
@@ -98,8 +103,13 @@ static volatile unsigned char __iomem *via;
#define ANH (15*RS) /* A-side data, no handshake */
/* Bits in B data register: both active low */
+#ifdef CONFIG_PPC_PMAC
#define TACK 0x08 /* Transfer acknowledge (input) */
#define TREQ 0x10 /* Transfer request (output) */
+#else
+#define TACK 0x02
+#define TREQ 0x04
+#endif
/* Bits in ACR */
#define SR_CTRL 0x1c /* Shift register control bits */
@@ -114,6 +124,7 @@ static volatile unsigned char __iomem *via;
#define CB1_INT 0x10 /* transition on CB1 input */
static volatile enum pmu_state {
+ uninitialized = 0,
idle,
sending,
intack,
@@ -140,11 +151,15 @@ static int data_index;
static int data_len;
static volatile int adb_int_pending;
static volatile int disable_poll;
-static struct device_node *vias;
static int pmu_kind = PMU_UNKNOWN;
static int pmu_fully_inited;
static int pmu_has_adb;
+#ifdef CONFIG_PPC_PMAC
+static volatile unsigned char __iomem *via1;
+static volatile unsigned char __iomem *via2;
+static struct device_node *vias;
static struct device_node *gpio_node;
+#endif
static unsigned char __iomem *gpio_reg;
static int gpio_irq = 0;
static int gpio_irq_enabled = -1;
@@ -157,7 +172,9 @@ static int drop_interrupts;
static int option_lid_wakeup = 1;
#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
static unsigned long async_req_locks;
-static unsigned int pmu_irq_stats[11];
+
+#define NUM_IRQ_STATS 13
+static unsigned int pmu_irq_stats[NUM_IRQ_STATS];
static struct proc_dir_entry *proc_pmu_root;
static struct proc_dir_entry *proc_pmu_info;
@@ -271,10 +288,11 @@ static char *pbook_type[] = {
int __init find_via_pmu(void)
{
+#ifdef CONFIG_PPC_PMAC
u64 taddr;
const u32 *reg;
- if (via)
+ if (pmu_state != uninitialized)
return 1;
vias = of_find_node_by_name(NULL, "via-pmu");
if (vias == NULL)
@@ -339,50 +357,70 @@ int __init find_via_pmu(void)
} else
pmu_kind = PMU_UNKNOWN;
- via = ioremap(taddr, 0x2000);
- if (via == NULL) {
+ via1 = via2 = ioremap(taddr, 0x2000);
+ if (via1 == NULL) {
printk(KERN_ERR "via-pmu: Can't map address !\n");
goto fail_via_remap;
}
- out_8(&via[IER], IER_CLR | 0x7f); /* disable all intrs */
- out_8(&via[IFR], 0x7f); /* clear IFR */
+ out_8(&via1[IER], IER_CLR | 0x7f); /* disable all intrs */
+ out_8(&via1[IFR], 0x7f); /* clear IFR */
pmu_state = idle;
if (!init_pmu())
goto fail_init;
- printk(KERN_INFO "PMU driver v%d initialized for %s, firmware: %02x\n",
- PMU_DRIVER_VERSION, pbook_type[pmu_kind], pmu_version);
-
sys_ctrler = SYS_CTRLER_PMU;
return 1;
fail_init:
- iounmap(via);
- via = NULL;
+ iounmap(via1);
+ via1 = via2 = NULL;
fail_via_remap:
iounmap(gpio_reg);
gpio_reg = NULL;
fail:
of_node_put(vias);
vias = NULL;
+ pmu_state = uninitialized;
return 0;
+#else
+ if (macintosh_config->adb_type != MAC_ADB_PB2)
+ return 0;
+
+ pmu_kind = PMU_UNKNOWN;
+
+ spin_lock_init(&pmu_lock);
+
+ pmu_has_adb = 1;
+
+ pmu_intr_mask = PMU_INT_PCEJECT |
+ PMU_INT_SNDBRT |
+ PMU_INT_ADB |
+ PMU_INT_TICK;
+
+ pmu_state = idle;
+
+ if (!init_pmu()) {
+ pmu_state = uninitialized;
+ return 0;
+ }
+
+ return 1;
+#endif /* !CONFIG_PPC_PMAC */
}
#ifdef CONFIG_ADB
static int pmu_probe(void)
{
- return vias == NULL? -ENODEV: 0;
+ return pmu_state == uninitialized ? -ENODEV : 0;
}
-static int __init pmu_init(void)
+static int pmu_init(void)
{
- if (vias == NULL)
- return -ENODEV;
- return 0;
+ return pmu_state == uninitialized ? -ENODEV : 0;
}
#endif /* CONFIG_ADB */
@@ -395,13 +433,14 @@ static int __init pmu_init(void)
*/
static int __init via_pmu_start(void)
{
- unsigned int irq;
+ unsigned int __maybe_unused irq;
- if (vias == NULL)
+ if (pmu_state == uninitialized)
return -ENODEV;
batt_req.complete = 1;
+#ifdef CONFIG_PPC_PMAC
irq = irq_of_parse_and_map(vias, 0);
if (!irq) {
printk(KERN_ERR "via-pmu: can't map interrupt\n");
@@ -437,7 +476,20 @@ static int __init via_pmu_start(void)
}
/* Enable interrupts */
- out_8(&via[IER], IER_SET | SR_INT | CB1_INT);
+ out_8(&via1[IER], IER_SET | SR_INT | CB1_INT);
+#else
+ if (request_irq(IRQ_MAC_ADB_SR, via_pmu_interrupt, IRQF_NO_SUSPEND,
+ "VIA-PMU-SR", NULL)) {
+ pr_err("%s: couldn't get SR irq\n", __func__);
+ return -ENODEV;
+ }
+ if (request_irq(IRQ_MAC_ADB_CL, via_pmu_interrupt, IRQF_NO_SUSPEND,
+ "VIA-PMU-CL", NULL)) {
+ pr_err("%s: couldn't get CL irq\n", __func__);
+ free_irq(IRQ_MAC_ADB_SR, NULL);
+ return -ENODEV;
+ }
+#endif /* !CONFIG_PPC_PMAC */
pmu_fully_inited = 1;
@@ -463,7 +515,7 @@ arch_initcall(via_pmu_start);
*/
static int __init via_pmu_dev_init(void)
{
- if (vias == NULL)
+ if (pmu_state == uninitialized)
return -ENODEV;
#ifdef CONFIG_PMAC_BACKLIGHT
@@ -534,8 +586,9 @@ init_pmu(void)
int timeout;
struct adb_request req;
- out_8(&via[B], via[B] | TREQ); /* negate TREQ */
- out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */
+ /* Negate TREQ. Set TACK to input and TREQ to output. */
+ out_8(&via2[B], in_8(&via2[B]) | TREQ);
+ out_8(&via2[DIRB], (in_8(&via2[DIRB]) | TREQ) & ~TACK);
pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
timeout = 100000;
@@ -587,6 +640,10 @@ init_pmu(void)
option_server_mode ? "enabled" : "disabled");
}
}
+
+ printk(KERN_INFO "PMU driver v%d initialized for %s, firmware: %02x\n",
+ PMU_DRIVER_VERSION, pbook_type[pmu_kind], pmu_version);
+
return 1;
}
@@ -625,6 +682,7 @@ static void pmu_set_server_mode(int server_mode)
static void
done_battery_state_ohare(struct adb_request* req)
{
+#ifdef CONFIG_PPC_PMAC
/* format:
* [0] : flags
* 0x01 : AC indicator
@@ -706,6 +764,7 @@ done_battery_state_ohare(struct adb_request* req)
pmu_batteries[pmu_cur_battery].amperage = amperage;
pmu_batteries[pmu_cur_battery].voltage = voltage;
pmu_batteries[pmu_cur_battery].time_remaining = time;
+#endif /* CONFIG_PPC_PMAC */
clear_bit(0, &async_req_locks);
}
@@ -816,9 +875,9 @@ static int pmu_info_proc_show(struct seq_file *m, void *v)
static int pmu_irqstats_proc_show(struct seq_file *m, void *v)
{
int i;
- static const char *irq_names[] = {
- "Total CB1 triggered events",
- "Total GPIO1 triggered events",
+ static const char *irq_names[NUM_IRQ_STATS] = {
+ "Unknown interrupt (type 0)",
+ "Unknown interrupt (type 1)",
"PC-Card eject button",
"Sound/Brightness button",
"ADB message",
@@ -827,10 +886,12 @@ static int pmu_irqstats_proc_show(struct seq_file *m, void *v)
"Tick timer",
"Ghost interrupt (zero len)",
"Empty interrupt (empty mask)",
- "Max irqs in a row"
+ "Max irqs in a row",
+ "Total CB1 triggered events",
+ "Total GPIO1 triggered events",
};
- for (i=0; i<11; i++) {
+ for (i = 0; i < NUM_IRQ_STATS; i++) {
seq_printf(m, " %2u: %10u (%s)\n",
i, pmu_irq_stats[i], irq_names[i]);
}
@@ -928,7 +989,7 @@ static int pmu_send_request(struct adb_request *req, int sync)
{
int i, ret;
- if ((vias == NULL) || (!pmu_fully_inited)) {
+ if (pmu_state == uninitialized || !pmu_fully_inited) {
req->complete = 1;
return -ENXIO;
}
@@ -1022,7 +1083,7 @@ static int __pmu_adb_autopoll(int devs)
static int pmu_adb_autopoll(int devs)
{
- if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb)
+ if (pmu_state == uninitialized || !pmu_fully_inited || !pmu_has_adb)
return -ENXIO;
adb_dev_map = devs;
@@ -1035,7 +1096,7 @@ static int pmu_adb_reset_bus(void)
struct adb_request req;
int save_autopoll = adb_dev_map;
- if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb)
+ if (pmu_state == uninitialized || !pmu_fully_inited || !pmu_has_adb)
return -ENXIO;
/* anyone got a better idea?? */
@@ -1071,7 +1132,7 @@ pmu_request(struct adb_request *req, void (*done)(struct adb_request *),
va_list list;
int i;
- if (vias == NULL)
+ if (pmu_state == uninitialized)
return -ENXIO;
if (nbytes < 0 || nbytes > 32) {
@@ -1096,7 +1157,7 @@ pmu_queue_request(struct adb_request *req)
unsigned long flags;
int nsend;
- if (via == NULL) {
+ if (pmu_state == uninitialized) {
req->complete = 1;
return -ENXIO;
}
@@ -1136,7 +1197,7 @@ wait_for_ack(void)
* reported
*/
int timeout = 4000;
- while ((in_8(&via[B]) & TACK) == 0) {
+ while ((in_8(&via2[B]) & TACK) == 0) {
if (--timeout < 0) {
printk(KERN_ERR "PMU not responding (!ack)\n");
return;
@@ -1150,23 +1211,19 @@ wait_for_ack(void)
static inline void
send_byte(int x)
{
- volatile unsigned char __iomem *v = via;
-
- out_8(&v[ACR], in_8(&v[ACR]) | SR_OUT | SR_EXT);
- out_8(&v[SR], x);
- out_8(&v[B], in_8(&v[B]) & ~TREQ); /* assert TREQ */
- (void)in_8(&v[B]);
+ out_8(&via1[ACR], in_8(&via1[ACR]) | SR_OUT | SR_EXT);
+ out_8(&via1[SR], x);
+ out_8(&via2[B], in_8(&via2[B]) & ~TREQ); /* assert TREQ */
+ (void)in_8(&via2[B]);
}
static inline void
recv_byte(void)
{
- volatile unsigned char __iomem *v = via;
-
- out_8(&v[ACR], (in_8(&v[ACR]) & ~SR_OUT) | SR_EXT);
- in_8(&v[SR]); /* resets SR */
- out_8(&v[B], in_8(&v[B]) & ~TREQ);
- (void)in_8(&v[B]);
+ out_8(&via1[ACR], (in_8(&via1[ACR]) & ~SR_OUT) | SR_EXT);
+ in_8(&via1[SR]); /* resets SR */
+ out_8(&via2[B], in_8(&via2[B]) & ~TREQ);
+ (void)in_8(&via2[B]);
}
static inline void
@@ -1209,7 +1266,7 @@ pmu_start(void)
void
pmu_poll(void)
{
- if (!via)
+ if (pmu_state == uninitialized)
return;
if (disable_poll)
return;
@@ -1219,7 +1276,7 @@ pmu_poll(void)
void
pmu_poll_adb(void)
{
- if (!via)
+ if (pmu_state == uninitialized)
return;
if (disable_poll)
return;
@@ -1234,7 +1291,7 @@ pmu_poll_adb(void)
void
pmu_wait_complete(struct adb_request *req)
{
- if (!via)
+ if (pmu_state == uninitialized)
return;
while((pmu_state != idle && pmu_state != locked) || !req->complete)
via_pmu_interrupt(0, NULL);
@@ -1250,7 +1307,7 @@ pmu_suspend(void)
{
unsigned long flags;
- if (!via)
+ if (pmu_state == uninitialized)
return;
spin_lock_irqsave(&pmu_lock, flags);
@@ -1269,7 +1326,7 @@ pmu_suspend(void)
if (!adb_int_pending && pmu_state == idle && !req_awaiting_reply) {
if (gpio_irq >= 0)
disable_irq_nosync(gpio_irq);
- out_8(&via[IER], CB1_INT | IER_CLR);
+ out_8(&via1[IER], CB1_INT | IER_CLR);
spin_unlock_irqrestore(&pmu_lock, flags);
break;
}
@@ -1281,7 +1338,7 @@ pmu_resume(void)
{
unsigned long flags;
- if (!via || (pmu_suspended < 1))
+ if (pmu_state == uninitialized || pmu_suspended < 1)
return;
spin_lock_irqsave(&pmu_lock, flags);
@@ -1293,7 +1350,7 @@ pmu_resume(void)
adb_int_pending = 1;
if (gpio_irq >= 0)
enable_irq(gpio_irq);
- out_8(&via[IER], CB1_INT | IER_SET);
+ out_8(&via1[IER], CB1_INT | IER_SET);
spin_unlock_irqrestore(&pmu_lock, flags);
pmu_poll();
}
@@ -1302,7 +1359,8 @@ pmu_resume(void)
static void
pmu_handle_data(unsigned char *data, int len)
{
- unsigned char ints, pirq;
+ unsigned char ints;
+ int idx;
int i = 0;
asleep = 0;
@@ -1324,25 +1382,24 @@ pmu_handle_data(unsigned char *data, int len)
ints &= ~(PMU_INT_ADB_AUTO | PMU_INT_AUTO_SRQ_POLL);
next:
-
if (ints == 0) {
if (i > pmu_irq_stats[10])
pmu_irq_stats[10] = i;
return;
}
-
- for (pirq = 0; pirq < 8; pirq++)
- if (ints & (1 << pirq))
- break;
- pmu_irq_stats[pirq]++;
i++;
- ints &= ~(1 << pirq);
+
+ idx = ffs(ints) - 1;
+ ints &= ~BIT(idx);
+
+ pmu_irq_stats[idx]++;
/* Note: for some reason, we get an interrupt with len=1,
* data[0]==0 after each normal ADB interrupt, at least
* on the Pismo. Still investigating... --BenH
*/
- if ((1 << pirq) & PMU_INT_ADB) {
+ switch (BIT(idx)) {
+ case PMU_INT_ADB:
if ((data[0] & PMU_INT_ADB_AUTO) == 0) {
struct adb_request *req = req_awaiting_reply;
if (!req) {
@@ -1358,6 +1415,7 @@ next:
}
pmu_done(req);
} else {
+#ifdef CONFIG_XMON
if (len == 4 && data[1] == 0x2c) {
extern int xmon_wants_key, xmon_adb_keycode;
if (xmon_wants_key) {
@@ -1365,6 +1423,7 @@ next:
return;
}
}
+#endif /* CONFIG_XMON */
#ifdef CONFIG_ADB
/*
* XXX On the [23]400 the PMU gives us an up
@@ -1378,25 +1437,28 @@ next:
adb_input(data+1, len-1, 1);
#endif /* CONFIG_ADB */
}
- }
+ break;
+
/* Sound/brightness button pressed */
- else if ((1 << pirq) & PMU_INT_SNDBRT) {
+ case PMU_INT_SNDBRT:
#ifdef CONFIG_PMAC_BACKLIGHT
if (len == 3)
pmac_backlight_set_legacy_brightness_pmu(data[1] >> 4);
#endif
- }
+ break;
+
/* Tick interrupt */
- else if ((1 << pirq) & PMU_INT_TICK) {
- /* Environement or tick interrupt, query batteries */
+ case PMU_INT_TICK:
+ /* Environment or tick interrupt, query batteries */
if (pmu_battery_count) {
if ((--query_batt_timer) == 0) {
query_battery_state();
query_batt_timer = BATTERY_POLLING_COUNT;
}
}
- }
- else if ((1 << pirq) & PMU_INT_ENVIRONMENT) {
+ break;
+
+ case PMU_INT_ENVIRONMENT:
if (pmu_battery_count)
query_battery_state();
pmu_pass_intr(data, len);
@@ -1406,7 +1468,9 @@ next:
via_pmu_event(PMU_EVT_POWER, !!(data[1]&8));
via_pmu_event(PMU_EVT_LID, data[1]&1);
}
- } else {
+ break;
+
+ default:
pmu_pass_intr(data, len);
}
goto next;
@@ -1418,21 +1482,20 @@ pmu_sr_intr(void)
struct adb_request *req;
int bite = 0;
- if (via[B] & TREQ) {
- printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
- out_8(&via[IFR], SR_INT);
+ if (in_8(&via2[B]) & TREQ) {
+ printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via2[B]));
return NULL;
}
/* The ack may not yet be low when we get the interrupt */
- while ((in_8(&via[B]) & TACK) != 0)
+ while ((in_8(&via2[B]) & TACK) != 0)
;
/* if reading grab the byte, and reset the interrupt */
if (pmu_state == reading || pmu_state == reading_intr)
- bite = in_8(&via[SR]);
+ bite = in_8(&via1[SR]);
/* reset TREQ and wait for TACK to go high */
- out_8(&via[B], in_8(&via[B]) | TREQ);
+ out_8(&via2[B], in_8(&via2[B]) | TREQ);
wait_for_ack();
switch (pmu_state) {
@@ -1533,26 +1596,46 @@ via_pmu_interrupt(int irq, void *arg)
++disable_poll;
for (;;) {
- intr = in_8(&via[IFR]) & (SR_INT | CB1_INT);
+ /* On 68k Macs, VIA interrupts are dispatched individually.
+ * Unless we are polling, the relevant IRQ flag has already
+ * been cleared.
+ */
+ intr = 0;
+ if (IS_ENABLED(CONFIG_PPC_PMAC) || !irq) {
+ intr = in_8(&via1[IFR]) & (SR_INT | CB1_INT);
+ out_8(&via1[IFR], intr);
+ }
+#ifndef CONFIG_PPC_PMAC
+ switch (irq) {
+ case IRQ_MAC_ADB_CL:
+ intr = CB1_INT;
+ break;
+ case IRQ_MAC_ADB_SR:
+ intr = SR_INT;
+ break;
+ }
+#endif
if (intr == 0)
break;
handled = 1;
if (++nloop > 1000) {
printk(KERN_DEBUG "PMU: stuck in intr loop, "
"intr=%x, ier=%x pmu_state=%d\n",
- intr, in_8(&via[IER]), pmu_state);
+ intr, in_8(&via1[IER]), pmu_state);
break;
}
- out_8(&via[IFR], intr);
if (intr & CB1_INT) {
adb_int_pending = 1;
- pmu_irq_stats[0]++;
+ pmu_irq_stats[11]++;
}
if (intr & SR_INT) {
req = pmu_sr_intr();
if (req)
break;
}
+#ifndef CONFIG_PPC_PMAC
+ break;
+#endif
}
recheck:
@@ -1619,7 +1702,7 @@ pmu_unlock(void)
}
-static irqreturn_t
+static __maybe_unused irqreturn_t
gpio1_interrupt(int irq, void *arg)
{
unsigned long flags;
@@ -1630,7 +1713,7 @@ gpio1_interrupt(int irq, void *arg)
disable_irq_nosync(gpio_irq);
gpio_irq_enabled = 0;
}
- pmu_irq_stats[1]++;
+ pmu_irq_stats[12]++;
adb_int_pending = 1;
spin_unlock_irqrestore(&pmu_lock, flags);
via_pmu_interrupt(0, NULL);
@@ -1644,7 +1727,7 @@ pmu_enable_irled(int on)
{
struct adb_request req;
- if (vias == NULL)
+ if (pmu_state == uninitialized)
return ;
if (pmu_kind == PMU_KEYLARGO_BASED)
return ;
@@ -1659,7 +1742,7 @@ pmu_restart(void)
{
struct adb_request req;
- if (via == NULL)
+ if (pmu_state == uninitialized)
return;
local_irq_disable();
@@ -1684,7 +1767,7 @@ pmu_shutdown(void)
{
struct adb_request req;
- if (via == NULL)
+ if (pmu_state == uninitialized)
return;
local_irq_disable();
@@ -1712,7 +1795,7 @@ pmu_shutdown(void)
int
pmu_present(void)
{
- return via != NULL;
+ return pmu_state != uninitialized;
}
#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
@@ -1725,29 +1808,29 @@ static u32 save_via[8];
static void
save_via_state(void)
{
- save_via[0] = in_8(&via[ANH]);
- save_via[1] = in_8(&via[DIRA]);
- save_via[2] = in_8(&via[B]);
- save_via[3] = in_8(&via[DIRB]);
- save_via[4] = in_8(&via[PCR]);
- save_via[5] = in_8(&via[ACR]);
- save_via[6] = in_8(&via[T1CL]);
- save_via[7] = in_8(&via[T1CH]);
+ save_via[0] = in_8(&via1[ANH]);
+ save_via[1] = in_8(&via1[DIRA]);
+ save_via[2] = in_8(&via1[B]);
+ save_via[3] = in_8(&via1[DIRB]);
+ save_via[4] = in_8(&via1[PCR]);
+ save_via[5] = in_8(&via1[ACR]);
+ save_via[6] = in_8(&via1[T1CL]);
+ save_via[7] = in_8(&via1[T1CH]);
}
static void
restore_via_state(void)
{
- out_8(&via[ANH], save_via[0]);
- out_8(&via[DIRA], save_via[1]);
- out_8(&via[B], save_via[2]);
- out_8(&via[DIRB], save_via[3]);
- out_8(&via[PCR], save_via[4]);
- out_8(&via[ACR], save_via[5]);
- out_8(&via[T1CL], save_via[6]);
- out_8(&via[T1CH], save_via[7]);
- out_8(&via[IER], IER_CLR | 0x7f); /* disable all intrs */
- out_8(&via[IFR], 0x7f); /* clear IFR */
- out_8(&via[IER], IER_SET | SR_INT | CB1_INT);
+ out_8(&via1[ANH], save_via[0]);
+ out_8(&via1[DIRA], save_via[1]);
+ out_8(&via1[B], save_via[2]);
+ out_8(&via1[DIRB], save_via[3]);
+ out_8(&via1[PCR], save_via[4]);
+ out_8(&via1[ACR], save_via[5]);
+ out_8(&via1[T1CL], save_via[6]);
+ out_8(&via1[T1CH], save_via[7]);
+ out_8(&via1[IER], IER_CLR | 0x7f); /* disable all intrs */
+ out_8(&via1[IFR], 0x7f); /* clear IFR */
+ out_8(&via1[IER], IER_SET | SR_INT | CB1_INT);
}
#define GRACKLE_PM (1<<7)
@@ -2253,6 +2336,7 @@ static int pmu_ioctl(struct file *filp,
int error = -EINVAL;
switch (cmd) {
+#ifdef CONFIG_PPC_PMAC
case PMU_IOC_SLEEP:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -2262,6 +2346,7 @@ static int pmu_ioctl(struct file *filp,
return put_user(0, argp);
else
return put_user(1, argp);
+#endif
#ifdef CONFIG_PMAC_BACKLIGHT_LEGACY
/* Compatibility ioctl's for backlight */
@@ -2378,7 +2463,7 @@ static struct miscdevice pmu_device = {
static int pmu_device_init(void)
{
- if (!via)
+ if (pmu_state == uninitialized)
return 0;
if (misc_register(&pmu_device) < 0)
printk(KERN_ERR "via-pmu: cannot register misc device.\n");
@@ -2389,33 +2474,33 @@ device_initcall(pmu_device_init);
#ifdef DEBUG_SLEEP
static inline void
-polled_handshake(volatile unsigned char __iomem *via)
+polled_handshake(void)
{
- via[B] &= ~TREQ; eieio();
- while ((via[B] & TACK) != 0)
+ via2[B] &= ~TREQ; eieio();
+ while ((via2[B] & TACK) != 0)
;
- via[B] |= TREQ; eieio();
- while ((via[B] & TACK) == 0)
+ via2[B] |= TREQ; eieio();
+ while ((via2[B] & TACK) == 0)
;
}
static inline void
-polled_send_byte(volatile unsigned char __iomem *via, int x)
+polled_send_byte(int x)
{
- via[ACR] |= SR_OUT | SR_EXT; eieio();
- via[SR] = x; eieio();
- polled_handshake(via);
+ via1[ACR] |= SR_OUT | SR_EXT; eieio();
+ via1[SR] = x; eieio();
+ polled_handshake();
}
static inline int
-polled_recv_byte(volatile unsigned char __iomem *via)
+polled_recv_byte(void)
{
int x;
- via[ACR] = (via[ACR] & ~SR_OUT) | SR_EXT; eieio();
- x = via[SR]; eieio();
- polled_handshake(via);
- x = via[SR]; eieio();
+ via1[ACR] = (via1[ACR] & ~SR_OUT) | SR_EXT; eieio();
+ x = via1[SR]; eieio();
+ polled_handshake();
+ x = via1[SR]; eieio();
return x;
}
@@ -2424,7 +2509,6 @@ pmu_polled_request(struct adb_request *req)
{
unsigned long flags;
int i, l, c;
- volatile unsigned char __iomem *v = via;
req->complete = 1;
c = req->data[0];
@@ -2436,21 +2520,21 @@ pmu_polled_request(struct adb_request *req)
while (pmu_state != idle)
pmu_poll();
- while ((via[B] & TACK) == 0)
+ while ((via2[B] & TACK) == 0)
;
- polled_send_byte(v, c);
+ polled_send_byte(c);
if (l < 0) {
l = req->nbytes - 1;
- polled_send_byte(v, l);
+ polled_send_byte(l);
}
for (i = 1; i <= l; ++i)
- polled_send_byte(v, req->data[i]);
+ polled_send_byte(req->data[i]);
l = pmu_data_len[c][1];
if (l < 0)
- l = polled_recv_byte(v);
+ l = polled_recv_byte();
for (i = 0; i < l; ++i)
- req->reply[i + req->reply_len] = polled_recv_byte(v);
+ req->reply[i + req->reply_len] = polled_recv_byte();
if (req->done)
(*req->done)(req);
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
deleted file mode 100644
index d545ed45e482..000000000000
--- a/drivers/macintosh/via-pmu68k.c
+++ /dev/null
@@ -1,850 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device driver for the PMU on 68K-based Apple PowerBooks
- *
- * The VIA (versatile interface adapter) interfaces to the PMU,
- * a 6805 microprocessor core whose primary function is to control
- * battery charging and system power on the PowerBooks.
- * The PMU also controls the ADB (Apple Desktop Bus) which connects
- * to the keyboard and mouse, as well as the non-volatile RAM
- * and the RTC (real time clock) chip.
- *
- * Adapted for 68K PMU by Joshua M. Thompson
- *
- * Based largely on the PowerMac PMU code by Paul Mackerras and
- * Fabio Riccardi.
- *
- * Also based on the PMU driver from MkLinux by Apple Computer, Inc.
- * and the Open Software Foundation, Inc.
- */
-
-#include <stdarg.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/miscdevice.h>
-#include <linux/blkdev.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-
-#include <linux/adb.h>
-#include <linux/pmu.h>
-#include <linux/cuda.h>
-
-#include <asm/macintosh.h>
-#include <asm/macints.h>
-#include <asm/mac_via.h>
-
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-
-/* Misc minor number allocated for /dev/pmu */
-#define PMU_MINOR 154
-
-/* VIA registers - spaced 0x200 bytes apart */
-#define RS 0x200 /* skip between registers */
-#define B 0 /* B-side data */
-#define A RS /* A-side data */
-#define DIRB (2*RS) /* B-side direction (1=output) */
-#define DIRA (3*RS) /* A-side direction (1=output) */
-#define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */
-#define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */
-#define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */
-#define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */
-#define T2CL (8*RS) /* Timer 2 ctr/latch (low 8 bits) */
-#define T2CH (9*RS) /* Timer 2 counter (high 8 bits) */
-#define SR (10*RS) /* Shift register */
-#define ACR (11*RS) /* Auxiliary control register */
-#define PCR (12*RS) /* Peripheral control register */
-#define IFR (13*RS) /* Interrupt flag register */
-#define IER (14*RS) /* Interrupt enable register */
-#define ANH (15*RS) /* A-side data, no handshake */
-
-/* Bits in B data register: both active low */
-#define TACK 0x02 /* Transfer acknowledge (input) */
-#define TREQ 0x04 /* Transfer request (output) */
-
-/* Bits in ACR */
-#define SR_CTRL 0x1c /* Shift register control bits */
-#define SR_EXT 0x0c /* Shift on external clock */
-#define SR_OUT 0x10 /* Shift out if 1 */
-
-/* Bits in IFR and IER */
-#define SR_INT 0x04 /* Shift register full/empty */
-#define CB1_INT 0x10 /* transition on CB1 input */
-
-static enum pmu_state {
- idle,
- sending,
- intack,
- reading,
- reading_intr,
-} pmu_state;
-
-static struct adb_request *current_req;
-static struct adb_request *last_req;
-static struct adb_request *req_awaiting_reply;
-static unsigned char interrupt_data[32];
-static unsigned char *reply_ptr;
-static int data_index;
-static int data_len;
-static int adb_int_pending;
-static int pmu_adb_flags;
-static int adb_dev_map;
-static struct adb_request bright_req_1, bright_req_2, bright_req_3;
-static int pmu_kind = PMU_UNKNOWN;
-static int pmu_fully_inited;
-
-int asleep;
-
-static int pmu_probe(void);
-static int pmu_init(void);
-static void pmu_start(void);
-static irqreturn_t pmu_interrupt(int irq, void *arg);
-static int pmu_send_request(struct adb_request *req, int sync);
-static int pmu_autopoll(int devs);
-void pmu_poll(void);
-static int pmu_reset_bus(void);
-
-static int init_pmu(void);
-static void pmu_start(void);
-static void send_byte(int x);
-static void recv_byte(void);
-static void pmu_done(struct adb_request *req);
-static void pmu_handle_data(unsigned char *data, int len);
-static void set_volume(int level);
-static void pmu_enable_backlight(int on);
-static void pmu_set_brightness(int level);
-
-struct adb_driver via_pmu_driver = {
- .name = "68K PMU",
- .probe = pmu_probe,
- .init = pmu_init,
- .send_request = pmu_send_request,
- .autopoll = pmu_autopoll,
- .poll = pmu_poll,
- .reset_bus = pmu_reset_bus,
-};
-
-/*
- * This table indicates for each PMU opcode:
- * - the number of data bytes to be sent with the command, or -1
- * if a length byte should be sent,
- * - the number of response bytes which the PMU will return, or
- * -1 if it will send a length byte.
- */
-static s8 pmu_data_len[256][2] = {
-/* 0 1 2 3 4 5 6 7 */
-/*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
-/*10*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*18*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0, 0},
-/*20*/ {-1, 0},{ 0, 0},{ 2, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*28*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0,-1},
-/*30*/ { 4, 0},{20, 0},{-1, 0},{ 3, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*38*/ { 0, 4},{ 0,20},{ 2,-1},{ 2, 1},{ 3,-1},{-1,-1},{-1,-1},{ 4, 0},
-/*40*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*48*/ { 0, 1},{ 0, 1},{-1,-1},{ 1, 0},{ 1, 0},{-1,-1},{-1,-1},{-1,-1},
-/*50*/ { 1, 0},{ 0, 0},{ 2, 0},{ 2, 0},{-1, 0},{ 1, 0},{ 3, 0},{ 1, 0},
-/*58*/ { 0, 1},{ 1, 0},{ 0, 2},{ 0, 2},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},
-/*60*/ { 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*68*/ { 0, 3},{ 0, 3},{ 0, 2},{ 0, 8},{ 0,-1},{ 0,-1},{-1,-1},{-1,-1},
-/*70*/ { 1, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*78*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{ 5, 1},{ 4, 1},{ 4, 1},
-/*80*/ { 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*88*/ { 0, 5},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
-/*90*/ { 1, 0},{ 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*98*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
-/*a0*/ { 2, 0},{ 2, 0},{ 2, 0},{ 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0},
-/*a8*/ { 1, 1},{ 1, 0},{ 3, 0},{ 2, 0},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
-/*b0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*b8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
-/*c0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*c8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
-/*d0*/ { 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*d8*/ { 1, 1},{ 1, 1},{-1,-1},{-1,-1},{ 0, 1},{ 0,-1},{-1,-1},{-1,-1},
-/*e0*/ {-1, 0},{ 4, 0},{ 0, 1},{-1, 0},{-1, 0},{ 4, 0},{-1, 0},{-1, 0},
-/*e8*/ { 3,-1},{-1,-1},{ 0, 1},{-1,-1},{ 0,-1},{-1,-1},{-1,-1},{ 0, 0},
-/*f0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
-/*f8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
-};
-
-int __init find_via_pmu(void)
-{
- switch (macintosh_config->adb_type) {
- case MAC_ADB_PB1:
- pmu_kind = PMU_68K_V1;
- break;
- case MAC_ADB_PB2:
- pmu_kind = PMU_68K_V2;
- break;
- default:
- pmu_kind = PMU_UNKNOWN;
- return -ENODEV;
- }
-
- pmu_state = idle;
-
- if (!init_pmu())
- goto fail_init;
-
- pr_info("adb: PMU 68K driver v0.5 for Unified ADB\n");
-
- return 1;
-
-fail_init:
- pmu_kind = PMU_UNKNOWN;
- return 0;
-}
-
-static int pmu_probe(void)
-{
- if (pmu_kind == PMU_UNKNOWN)
- return -ENODEV;
- return 0;
-}
-
-static int pmu_init(void)
-{
- if (pmu_kind == PMU_UNKNOWN)
- return -ENODEV;
- return 0;
-}
-
-static int __init via_pmu_start(void)
-{
- if (pmu_kind == PMU_UNKNOWN)
- return -ENODEV;
-
- if (request_irq(IRQ_MAC_ADB_SR, pmu_interrupt, 0, "PMU_SR",
- pmu_interrupt)) {
- pr_err("%s: can't get SR irq\n", __func__);
- return -ENODEV;
- }
- if (request_irq(IRQ_MAC_ADB_CL, pmu_interrupt, 0, "PMU_CL",
- pmu_interrupt)) {
- pr_err("%s: can't get CL irq\n", __func__);
- free_irq(IRQ_MAC_ADB_SR, pmu_interrupt);
- return -ENODEV;
- }
-
- pmu_fully_inited = 1;
-
- /* Enable backlight */
- pmu_enable_backlight(1);
-
- return 0;
-}
-
-arch_initcall(via_pmu_start);
-
-static int __init init_pmu(void)
-{
- int timeout;
- volatile struct adb_request req;
-
- via2[B] |= TREQ; /* negate TREQ */
- via2[DIRB] = (via2[DIRB] | TREQ) & ~TACK; /* TACK in, TREQ out */
-
- pmu_request((struct adb_request *) &req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB);
- timeout = 100000;
- while (!req.complete) {
- if (--timeout < 0) {
- printk(KERN_ERR "pmu_init: no response from PMU\n");
- return -EAGAIN;
- }
- udelay(10);
- pmu_poll();
- }
-
- /* ack all pending interrupts */
- timeout = 100000;
- interrupt_data[0] = 1;
- while (interrupt_data[0] || pmu_state != idle) {
- if (--timeout < 0) {
- printk(KERN_ERR "pmu_init: timed out acking intrs\n");
- return -EAGAIN;
- }
- if (pmu_state == idle) {
- adb_int_pending = 1;
- pmu_interrupt(0, NULL);
- }
- pmu_poll();
- udelay(10);
- }
-
- pmu_request((struct adb_request *) &req, NULL, 2, PMU_SET_INTR_MASK,
- PMU_INT_ADB_AUTO|PMU_INT_SNDBRT|PMU_INT_ADB);
- timeout = 100000;
- while (!req.complete) {
- if (--timeout < 0) {
- printk(KERN_ERR "pmu_init: no response from PMU\n");
- return -EAGAIN;
- }
- udelay(10);
- pmu_poll();
- }
-
- bright_req_1.complete = 1;
- bright_req_2.complete = 1;
- bright_req_3.complete = 1;
-
- return 1;
-}
-
-int
-pmu_get_model(void)
-{
- return pmu_kind;
-}
-
-/* Send an ADB command */
-static int
-pmu_send_request(struct adb_request *req, int sync)
-{
- int i, ret;
-
- if (!pmu_fully_inited)
- {
- req->complete = 1;
- return -ENXIO;
- }
-
- ret = -EINVAL;
-
- switch (req->data[0]) {
- case PMU_PACKET:
- for (i = 0; i < req->nbytes - 1; ++i)
- req->data[i] = req->data[i+1];
- --req->nbytes;
- if (pmu_data_len[req->data[0]][1] != 0) {
- req->reply[0] = ADB_RET_OK;
- req->reply_len = 1;
- } else
- req->reply_len = 0;
- ret = pmu_queue_request(req);
- break;
- case CUDA_PACKET:
- switch (req->data[1]) {
- case CUDA_GET_TIME:
- if (req->nbytes != 2)
- break;
- req->data[0] = PMU_READ_RTC;
- req->nbytes = 1;
- req->reply_len = 3;
- req->reply[0] = CUDA_PACKET;
- req->reply[1] = 0;
- req->reply[2] = CUDA_GET_TIME;
- ret = pmu_queue_request(req);
- break;
- case CUDA_SET_TIME:
- if (req->nbytes != 6)
- break;
- req->data[0] = PMU_SET_RTC;
- req->nbytes = 5;
- for (i = 1; i <= 4; ++i)
- req->data[i] = req->data[i+1];
- req->reply_len = 3;
- req->reply[0] = CUDA_PACKET;
- req->reply[1] = 0;
- req->reply[2] = CUDA_SET_TIME;
- ret = pmu_queue_request(req);
- break;
- case CUDA_GET_PRAM:
- if (req->nbytes != 4)
- break;
- req->data[0] = PMU_READ_NVRAM;
- req->data[1] = req->data[2];
- req->data[2] = req->data[3];
- req->nbytes = 3;
- req->reply_len = 3;
- req->reply[0] = CUDA_PACKET;
- req->reply[1] = 0;
- req->reply[2] = CUDA_GET_PRAM;
- ret = pmu_queue_request(req);
- break;
- case CUDA_SET_PRAM:
- if (req->nbytes != 5)
- break;
- req->data[0] = PMU_WRITE_NVRAM;
- req->data[1] = req->data[2];
- req->data[2] = req->data[3];
- req->data[3] = req->data[4];
- req->nbytes = 4;
- req->reply_len = 3;
- req->reply[0] = CUDA_PACKET;
- req->reply[1] = 0;
- req->reply[2] = CUDA_SET_PRAM;
- ret = pmu_queue_request(req);
- break;
- }
- break;
- case ADB_PACKET:
- for (i = req->nbytes - 1; i > 1; --i)
- req->data[i+2] = req->data[i];
- req->data[3] = req->nbytes - 2;
- req->data[2] = pmu_adb_flags;
- /*req->data[1] = req->data[1];*/
- req->data[0] = PMU_ADB_CMD;
- req->nbytes += 2;
- req->reply_expected = 1;
- req->reply_len = 0;
- ret = pmu_queue_request(req);
- break;
- }
- if (ret)
- {
- req->complete = 1;
- return ret;
- }
-
- if (sync) {
- while (!req->complete)
- pmu_poll();
- }
-
- return 0;
-}
-
-/* Enable/disable autopolling */
-static int
-pmu_autopoll(int devs)
-{
- struct adb_request req;
-
- if (!pmu_fully_inited) return -ENXIO;
-
- if (devs) {
- adb_dev_map = devs;
- pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86,
- adb_dev_map >> 8, adb_dev_map);
- pmu_adb_flags = 2;
- } else {
- pmu_request(&req, NULL, 1, PMU_ADB_POLL_OFF);
- pmu_adb_flags = 0;
- }
- while (!req.complete)
- pmu_poll();
- return 0;
-}
-
-/* Reset the ADB bus */
-static int
-pmu_reset_bus(void)
-{
- struct adb_request req;
- long timeout;
- int save_autopoll = adb_dev_map;
-
- if (!pmu_fully_inited) return -ENXIO;
-
- /* anyone got a better idea?? */
- pmu_autopoll(0);
-
- req.nbytes = 5;
- req.done = NULL;
- req.data[0] = PMU_ADB_CMD;
- req.data[1] = 0;
- req.data[2] = 3; /* ADB_BUSRESET ??? */
- req.data[3] = 0;
- req.data[4] = 0;
- req.reply_len = 0;
- req.reply_expected = 1;
- if (pmu_queue_request(&req) != 0)
- {
- printk(KERN_ERR "pmu_adb_reset_bus: pmu_queue_request failed\n");
- return -EIO;
- }
- while (!req.complete)
- pmu_poll();
- timeout = 100000;
- while (!req.complete) {
- if (--timeout < 0) {
- printk(KERN_ERR "pmu_adb_reset_bus (reset): no response from PMU\n");
- return -EIO;
- }
- udelay(10);
- pmu_poll();
- }
-
- if (save_autopoll != 0)
- pmu_autopoll(save_autopoll);
-
- return 0;
-}
-
-/* Construct and send a pmu request */
-int
-pmu_request(struct adb_request *req, void (*done)(struct adb_request *),
- int nbytes, ...)
-{
- va_list list;
- int i;
-
- if (nbytes < 0 || nbytes > 32) {
- printk(KERN_ERR "pmu_request: bad nbytes (%d)\n", nbytes);
- req->complete = 1;
- return -EINVAL;
- }
- req->nbytes = nbytes;
- req->done = done;
- va_start(list, nbytes);
- for (i = 0; i < nbytes; ++i)
- req->data[i] = va_arg(list, int);
- va_end(list);
- if (pmu_data_len[req->data[0]][1] != 0) {
- req->reply[0] = ADB_RET_OK;
- req->reply_len = 1;
- } else
- req->reply_len = 0;
- req->reply_expected = 0;
- return pmu_queue_request(req);
-}
-
-int
-pmu_queue_request(struct adb_request *req)
-{
- unsigned long flags;
- int nsend;
-
- if (req->nbytes <= 0) {
- req->complete = 1;
- return 0;
- }
- nsend = pmu_data_len[req->data[0]][0];
- if (nsend >= 0 && req->nbytes != nsend + 1) {
- req->complete = 1;
- return -EINVAL;
- }
-
- req->next = NULL;
- req->sent = 0;
- req->complete = 0;
- local_irq_save(flags);
-
- if (current_req != 0) {
- last_req->next = req;
- last_req = req;
- } else {
- current_req = req;
- last_req = req;
- if (pmu_state == idle)
- pmu_start();
- }
-
- local_irq_restore(flags);
- return 0;
-}
-
-static void
-send_byte(int x)
-{
- via1[ACR] |= SR_CTRL;
- via1[SR] = x;
- via2[B] &= ~TREQ; /* assert TREQ */
-}
-
-static void
-recv_byte(void)
-{
- char c;
-
- via1[ACR] = (via1[ACR] | SR_EXT) & ~SR_OUT;
- c = via1[SR]; /* resets SR */
- via2[B] &= ~TREQ;
-}
-
-static void
-pmu_start(void)
-{
- unsigned long flags;
- struct adb_request *req;
-
- /* assert pmu_state == idle */
- /* get the packet to send */
- local_irq_save(flags);
- req = current_req;
- if (req == 0 || pmu_state != idle
- || (req->reply_expected && req_awaiting_reply))
- goto out;
-
- pmu_state = sending;
- data_index = 1;
- data_len = pmu_data_len[req->data[0]][0];
-
- /* set the shift register to shift out and send a byte */
- send_byte(req->data[0]);
-
-out:
- local_irq_restore(flags);
-}
-
-void
-pmu_poll(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- if (via1[IFR] & SR_INT) {
- via1[IFR] = SR_INT;
- pmu_interrupt(IRQ_MAC_ADB_SR, NULL);
- }
- if (via1[IFR] & CB1_INT) {
- via1[IFR] = CB1_INT;
- pmu_interrupt(IRQ_MAC_ADB_CL, NULL);
- }
- local_irq_restore(flags);
-}
-
-static irqreturn_t
-pmu_interrupt(int irq, void *dev_id)
-{
- struct adb_request *req;
- int timeout, bite = 0; /* to prevent compiler warning */
-
-#if 0
- printk("pmu_interrupt: irq %d state %d acr %02X, b %02X data_index %d/%d adb_int_pending %d\n",
- irq, pmu_state, (uint) via1[ACR], (uint) via2[B], data_index, data_len, adb_int_pending);
-#endif
-
- if (irq == IRQ_MAC_ADB_CL) { /* CB1 interrupt */
- adb_int_pending = 1;
- } else if (irq == IRQ_MAC_ADB_SR) { /* SR interrupt */
- if (via2[B] & TACK) {
- printk(KERN_DEBUG "PMU: SR_INT but ack still high! (%x)\n", via2[B]);
- }
-
- /* if reading grab the byte */
- if ((via1[ACR] & SR_OUT) == 0) bite = via1[SR];
-
- /* reset TREQ and wait for TACK to go high */
- via2[B] |= TREQ;
- timeout = 3200;
- while (!(via2[B] & TACK)) {
- if (--timeout < 0) {
- printk(KERN_ERR "PMU not responding (!ack)\n");
- goto finish;
- }
- udelay(10);
- }
-
- switch (pmu_state) {
- case sending:
- req = current_req;
- if (data_len < 0) {
- data_len = req->nbytes - 1;
- send_byte(data_len);
- break;
- }
- if (data_index <= data_len) {
- send_byte(req->data[data_index++]);
- break;
- }
- req->sent = 1;
- data_len = pmu_data_len[req->data[0]][1];
- if (data_len == 0) {
- pmu_state = idle;
- current_req = req->next;
- if (req->reply_expected)
- req_awaiting_reply = req;
- else
- pmu_done(req);
- } else {
- pmu_state = reading;
- data_index = 0;
- reply_ptr = req->reply + req->reply_len;
- recv_byte();
- }
- break;
-
- case intack:
- data_index = 0;
- data_len = -1;
- pmu_state = reading_intr;
- reply_ptr = interrupt_data;
- recv_byte();
- break;
-
- case reading:
- case reading_intr:
- if (data_len == -1) {
- data_len = bite;
- if (bite > 32)
- printk(KERN_ERR "PMU: bad reply len %d\n",
- bite);
- } else {
- reply_ptr[data_index++] = bite;
- }
- if (data_index < data_len) {
- recv_byte();
- break;
- }
-
- if (pmu_state == reading_intr) {
- pmu_handle_data(interrupt_data, data_index);
- } else {
- req = current_req;
- current_req = req->next;
- req->reply_len += data_index;
- pmu_done(req);
- }
- pmu_state = idle;
-
- break;
-
- default:
- printk(KERN_ERR "pmu_interrupt: unknown state %d?\n",
- pmu_state);
- }
- }
-finish:
- if (pmu_state == idle) {
- if (adb_int_pending) {
- pmu_state = intack;
- send_byte(PMU_INT_ACK);
- adb_int_pending = 0;
- } else if (current_req) {
- pmu_start();
- }
- }
-
-#if 0
- printk("pmu_interrupt: exit state %d acr %02X, b %02X data_index %d/%d adb_int_pending %d\n",
- pmu_state, (uint) via1[ACR], (uint) via2[B], data_index, data_len, adb_int_pending);
-#endif
- return IRQ_HANDLED;
-}
-
-static void
-pmu_done(struct adb_request *req)
-{
- req->complete = 1;
- if (req->done)
- (*req->done)(req);
-}
-
-/* Interrupt data could be the result data from an ADB cmd */
-static void
-pmu_handle_data(unsigned char *data, int len)
-{
- static int show_pmu_ints = 1;
-
- asleep = 0;
- if (len < 1) {
- adb_int_pending = 0;
- return;
- }
- if (data[0] & PMU_INT_ADB) {
- if ((data[0] & PMU_INT_ADB_AUTO) == 0) {
- struct adb_request *req = req_awaiting_reply;
- if (req == 0) {
- printk(KERN_ERR "PMU: extra ADB reply\n");
- return;
- }
- req_awaiting_reply = NULL;
- if (len <= 2)
- req->reply_len = 0;
- else {
- memcpy(req->reply, data + 1, len - 1);
- req->reply_len = len - 1;
- }
- pmu_done(req);
- } else {
- adb_input(data+1, len-1, 1);
- }
- } else {
- if (data[0] == 0x08 && len == 3) {
- /* sound/brightness buttons pressed */
- pmu_set_brightness(data[1] >> 3);
- set_volume(data[2]);
- } else if (show_pmu_ints
- && !(data[0] == PMU_INT_TICK && len == 1)) {
- int i;
- printk(KERN_DEBUG "pmu intr");
- for (i = 0; i < len; ++i)
- printk(" %.2x", data[i]);
- printk("\n");
- }
- }
-}
-
-static int backlight_level = -1;
-static int backlight_enabled = 0;
-
-#define LEVEL_TO_BRIGHT(lev) ((lev) < 1? 0x7f: 0x4a - ((lev) << 1))
-
-static void
-pmu_enable_backlight(int on)
-{
- struct adb_request req;
-
- if (on) {
- /* first call: get current backlight value */
- if (backlight_level < 0) {
- switch(pmu_kind) {
- case PMU_68K_V1:
- case PMU_68K_V2:
- pmu_request(&req, NULL, 3, PMU_READ_NVRAM, 0x14, 0xe);
- while (!req.complete)
- pmu_poll();
- printk(KERN_DEBUG "pmu: nvram returned bright: %d\n", (int)req.reply[1]);
- backlight_level = req.reply[1];
- break;
- default:
- backlight_enabled = 0;
- return;
- }
- }
- pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT,
- LEVEL_TO_BRIGHT(backlight_level));
- while (!req.complete)
- pmu_poll();
- }
- pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
- PMU_POW_BACKLIGHT | (on ? PMU_POW_ON : PMU_POW_OFF));
- while (!req.complete)
- pmu_poll();
- backlight_enabled = on;
-}
-
-static void
-pmu_set_brightness(int level)
-{
- int bright;
-
- backlight_level = level;
- bright = LEVEL_TO_BRIGHT(level);
- if (!backlight_enabled)
- return;
- if (bright_req_1.complete)
- pmu_request(&bright_req_1, NULL, 2, PMU_BACKLIGHT_BRIGHT,
- bright);
- if (bright_req_2.complete)
- pmu_request(&bright_req_2, NULL, 2, PMU_POWER_CTRL,
- PMU_POW_BACKLIGHT | (bright < 0x7f ? PMU_POW_ON : PMU_POW_OFF));
-}
-
-void
-pmu_enable_irled(int on)
-{
- struct adb_request req;
-
- pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_IRLED |
- (on ? PMU_POW_ON : PMU_POW_OFF));
- while (!req.complete)
- pmu_poll();
-}
-
-static void
-set_volume(int level)
-{
-}
-
-int
-pmu_present(void)
-{
- return (pmu_kind != PMU_UNKNOWN);
-}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index e63d29a95e76..841c005d8ebb 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -15,6 +15,12 @@ config ARM_MHU
The controller has 3 mailbox channels, the last of which can be
used in Secure mode only.
+config IMX_MBOX
+ tristate "i.MX Mailbox"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ Mailbox implementation for i.MX Messaging Unit (MU).
+
config PLATFORM_MHU
tristate "Platform MHU Mailbox"
depends on OF
@@ -189,4 +195,14 @@ config STM32_IPCC
Mailbox implementation for STMicroelectonics STM32 family chips
with hardware for Inter-Processor Communication Controller (IPCC)
between processors. Say Y here if you want to have this support.
+
+config MTK_CMDQ_MBOX
+ tristate "MediaTek CMDQ Mailbox Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select MTK_INFRACFG
+ help
+ Say yes here to add support for the MediaTek Command Queue (CMDQ)
+ mailbox driver. The CMDQ is used to help read/write registers with
+ critical time limitation, such as updating display configuration
+ during the vblank.
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 4d501bea7863..c818b5d011ae 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_MAILBOX_TEST) += mailbox-test.o
obj-$(CONFIG_ARM_MHU) += arm_mhu.o
+obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
+
obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
@@ -40,3 +42,5 @@ obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o
+
+obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
new file mode 100644
index 000000000000..363d35d5e49d
--- /dev/null
+++ b/drivers/mailbox/imx-mailbox.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+/* Transmit Register */
+#define IMX_MU_xTRn(x) (0x00 + 4 * (x))
+/* Receive Register */
+#define IMX_MU_xRRn(x) (0x10 + 4 * (x))
+/* Status Register */
+#define IMX_MU_xSR 0x20
+#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
+#define IMX_MU_xSR_RFn(x) BIT(24 + (3 - (x)))
+#define IMX_MU_xSR_TEn(x) BIT(20 + (3 - (x)))
+#define IMX_MU_xSR_BRDIP BIT(9)
+
+/* Control Register */
+#define IMX_MU_xCR 0x24
+/* General Purpose Interrupt Enable */
+#define IMX_MU_xCR_GIEn(x) BIT(28 + (3 - (x)))
+/* Receive Interrupt Enable */
+#define IMX_MU_xCR_RIEn(x) BIT(24 + (3 - (x)))
+/* Transmit Interrupt Enable */
+#define IMX_MU_xCR_TIEn(x) BIT(20 + (3 - (x)))
+/* General Purpose Interrupt Request */
+#define IMX_MU_xCR_GIRn(x) BIT(16 + (3 - (x)))
+
+#define IMX_MU_CHANS 16
+#define IMX_MU_CHAN_NAME_SIZE 20
+
+enum imx_mu_chan_type {
+ IMX_MU_TYPE_TX, /* Tx */
+ IMX_MU_TYPE_RX, /* Rx */
+ IMX_MU_TYPE_TXDB, /* Tx doorbell */
+ IMX_MU_TYPE_RXDB, /* Rx doorbell */
+};
+
+struct imx_mu_con_priv {
+ unsigned int idx;
+ char irq_desc[IMX_MU_CHAN_NAME_SIZE];
+ enum imx_mu_chan_type type;
+ struct mbox_chan *chan;
+ struct tasklet_struct txdb_tasklet;
+};
+
+struct imx_mu_priv {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t xcr_lock; /* control register lock */
+
+ struct mbox_controller mbox;
+ struct mbox_chan mbox_chans[IMX_MU_CHANS];
+
+ struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
+ struct clk *clk;
+ int irq;
+
+ bool side_b;
+};
+
+static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct imx_mu_priv, mbox);
+}
+
+static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
+{
+ iowrite32(val, priv->base + offs);
+}
+
+static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
+{
+ return ioread32(priv->base + offs);
+}
+
+static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->xcr_lock, flags);
+ val = imx_mu_read(priv, IMX_MU_xCR);
+ val &= ~clr;
+ val |= set;
+ imx_mu_write(priv, val, IMX_MU_xCR);
+ spin_unlock_irqrestore(&priv->xcr_lock, flags);
+
+ return val;
+}
+
+static void imx_mu_txdb_tasklet(unsigned long data)
+{
+ struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
+
+ mbox_chan_txdone(cp->chan, 0);
+}
+
+static irqreturn_t imx_mu_isr(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+ u32 val, ctrl, dat;
+
+ ctrl = imx_mu_read(priv, IMX_MU_xCR);
+ val = imx_mu_read(priv, IMX_MU_xSR);
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ val &= IMX_MU_xSR_TEn(cp->idx) &
+ (ctrl & IMX_MU_xCR_TIEn(cp->idx));
+ break;
+ case IMX_MU_TYPE_RX:
+ val &= IMX_MU_xSR_RFn(cp->idx) &
+ (ctrl & IMX_MU_xCR_RIEn(cp->idx));
+ break;
+ case IMX_MU_TYPE_RXDB:
+ val &= IMX_MU_xSR_GIPn(cp->idx) &
+ (ctrl & IMX_MU_xCR_GIEn(cp->idx));
+ break;
+ default:
+ break;
+ }
+
+ if (!val)
+ return IRQ_NONE;
+
+ if (val == IMX_MU_xSR_TEn(cp->idx)) {
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
+ mbox_chan_txdone(chan, 0);
+ } else if (val == IMX_MU_xSR_RFn(cp->idx)) {
+ dat = imx_mu_read(priv, IMX_MU_xRRn(cp->idx));
+ mbox_chan_received_data(chan, (void *)&dat);
+ } else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
+ imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), IMX_MU_xSR);
+ mbox_chan_received_data(chan, NULL);
+ } else {
+ dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int imx_mu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+ u32 *arg = data;
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ imx_mu_write(priv, *arg, IMX_MU_xTRn(cp->idx));
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
+ break;
+ case IMX_MU_TYPE_TXDB:
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
+ tasklet_schedule(&cp->txdb_tasklet);
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_startup(struct mbox_chan *chan)
+{
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+ int ret;
+
+ if (cp->type == IMX_MU_TYPE_TXDB) {
+ /* Tx doorbell don't have ACK support */
+ tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
+ (unsigned long)cp);
+ return 0;
+ }
+
+ ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED, cp->irq_desc,
+ chan);
+ if (ret) {
+ dev_err(priv->dev,
+ "Unable to acquire IRQ %d\n", priv->irq);
+ return ret;
+ }
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_RX:
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(cp->idx), 0);
+ break;
+ case IMX_MU_TYPE_RXDB:
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIEn(cp->idx), 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void imx_mu_shutdown(struct mbox_chan *chan)
+{
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+
+ if (cp->type == IMX_MU_TYPE_TXDB)
+ tasklet_kill(&cp->txdb_tasklet);
+
+ imx_mu_xcr_rmw(priv, 0,
+ IMX_MU_xCR_TIEn(cp->idx) | IMX_MU_xCR_RIEn(cp->idx));
+
+ free_irq(priv->irq, chan);
+}
+
+static const struct mbox_chan_ops imx_mu_ops = {
+ .send_data = imx_mu_send_data,
+ .startup = imx_mu_startup,
+ .shutdown = imx_mu_shutdown,
+};
+
+static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ u32 type, idx, chan;
+
+ if (sp->args_count != 2) {
+ dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = sp->args[0]; /* channel type */
+ idx = sp->args[1]; /* index */
+ chan = type * 4 + idx;
+
+ if (chan >= mbox->num_chans) {
+ dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &mbox->chans[chan];
+}
+
+static void imx_mu_init_generic(struct imx_mu_priv *priv)
+{
+ if (priv->side_b)
+ return;
+
+ /* Set default MU configuration */
+ imx_mu_write(priv, 0, IMX_MU_xCR);
+}
+
+static int imx_mu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *iomem;
+ struct imx_mu_priv *priv;
+ unsigned int i;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ if (PTR_ERR(priv->clk) != -ENOENT)
+ return PTR_ERR(priv->clk);
+
+ priv->clk = NULL;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ for (i = 0; i < IMX_MU_CHANS; i++) {
+ struct imx_mu_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i % 4;
+ cp->type = i >> 2;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ }
+
+ priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
+
+ spin_lock_init(&priv->xcr_lock);
+
+ priv->mbox.dev = dev;
+ priv->mbox.ops = &imx_mu_ops;
+ priv->mbox.chans = priv->mbox_chans;
+ priv->mbox.num_chans = IMX_MU_CHANS;
+ priv->mbox.of_xlate = imx_mu_xlate;
+ priv->mbox.txdone_irq = true;
+
+ platform_set_drvdata(pdev, priv);
+
+ imx_mu_init_generic(priv);
+
+ return mbox_controller_register(&priv->mbox);
+}
+
+static int imx_mu_remove(struct platform_device *pdev)
+{
+ struct imx_mu_priv *priv = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&priv->mbox);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id imx_mu_dt_ids[] = {
+ { .compatible = "fsl,imx6sx-mu" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
+
+static struct platform_driver imx_mu_driver = {
+ .probe = imx_mu_probe,
+ .remove = imx_mu_remove,
+ .driver = {
+ .name = "imx_mu",
+ .of_match_table = imx_mu_dt_ids,
+ },
+};
+module_platform_driver(imx_mu_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
+MODULE_DESCRIPTION("Message Unit driver for i.MX");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index a7040163dd43..b8b2b3533f46 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -195,9 +195,9 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
- if (!mb_base)
- return -ENOMEM;
+ mb_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(mb_base))
+ return PTR_ERR(mb_base);
/* Setup mailbox links */
for (i = 0; i < MBOX_CNT; i++) {
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
new file mode 100644
index 000000000000..aec46d5d3506
--- /dev/null
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/mtk-cmdq-mailbox.h>
+#include <linux/of_device.h>
+
+#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
+#define CMDQ_IRQ_MASK 0xffff
+#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
+
+#define CMDQ_CURR_IRQ_STATUS 0x10
+#define CMDQ_THR_SLOT_CYCLES 0x30
+#define CMDQ_THR_BASE 0x100
+#define CMDQ_THR_SIZE 0x80
+#define CMDQ_THR_WARM_RESET 0x00
+#define CMDQ_THR_ENABLE_TASK 0x04
+#define CMDQ_THR_SUSPEND_TASK 0x08
+#define CMDQ_THR_CURR_STATUS 0x0c
+#define CMDQ_THR_IRQ_STATUS 0x10
+#define CMDQ_THR_IRQ_ENABLE 0x14
+#define CMDQ_THR_CURR_ADDR 0x20
+#define CMDQ_THR_END_ADDR 0x24
+#define CMDQ_THR_WAIT_TOKEN 0x30
+#define CMDQ_THR_PRIORITY 0x40
+
+#define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
+#define CMDQ_THR_ENABLED 0x1
+#define CMDQ_THR_DISABLED 0x0
+#define CMDQ_THR_SUSPEND 0x1
+#define CMDQ_THR_RESUME 0x0
+#define CMDQ_THR_STATUS_SUSPENDED BIT(1)
+#define CMDQ_THR_DO_WARM_RESET BIT(0)
+#define CMDQ_THR_IRQ_DONE 0x1
+#define CMDQ_THR_IRQ_ERROR 0x12
+#define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
+#define CMDQ_THR_IS_WAITING BIT(31)
+
+#define CMDQ_JUMP_BY_OFFSET 0x10000000
+#define CMDQ_JUMP_BY_PA 0x10000001
+
+struct cmdq_thread {
+ struct mbox_chan *chan;
+ void __iomem *base;
+ struct list_head task_busy_list;
+ u32 priority;
+ bool atomic_exec;
+};
+
+struct cmdq_task {
+ struct cmdq *cmdq;
+ struct list_head list_entry;
+ dma_addr_t pa_base;
+ struct cmdq_thread *thread;
+ struct cmdq_pkt *pkt; /* the packet sent from mailbox client */
+};
+
+struct cmdq {
+ struct mbox_controller mbox;
+ void __iomem *base;
+ u32 irq;
+ u32 thread_nr;
+ struct cmdq_thread *thread;
+ struct clk *clock;
+ bool suspended;
+};
+
+static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
+{
+ u32 status;
+
+ writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
+
+ /* If already disabled, treat as suspended successful. */
+ if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
+ return 0;
+
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
+ status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
+ dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
+ (u32)(thread->base - cmdq->base));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void cmdq_thread_resume(struct cmdq_thread *thread)
+{
+ writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
+}
+
+static void cmdq_init(struct cmdq *cmdq)
+{
+ WARN_ON(clk_enable(cmdq->clock) < 0);
+ writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
+ clk_disable(cmdq->clock);
+}
+
+static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
+{
+ u32 warm_reset;
+
+ writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
+ warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
+ 0, 10)) {
+ dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
+ (u32)(thread->base - cmdq->base));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
+{
+ cmdq_thread_reset(cmdq, thread);
+ writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
+}
+
+/* notify GCE to re-fetch commands by setting GCE thread PC */
+static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
+{
+ writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
+ thread->base + CMDQ_THR_CURR_ADDR);
+}
+
+static void cmdq_task_insert_into_thread(struct cmdq_task *task)
+{
+ struct device *dev = task->cmdq->mbox.dev;
+ struct cmdq_thread *thread = task->thread;
+ struct cmdq_task *prev_task = list_last_entry(
+ &thread->task_busy_list, typeof(*task), list_entry);
+ u64 *prev_task_base = prev_task->pkt->va_base;
+
+ /* let previous task jump to this task */
+ dma_sync_single_for_cpu(dev, prev_task->pa_base,
+ prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
+ prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
+ (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base;
+ dma_sync_single_for_device(dev, prev_task->pa_base,
+ prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
+
+ cmdq_thread_invalidate_fetched_data(thread);
+}
+
+static bool cmdq_command_is_wfe(u64 cmd)
+{
+ u64 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
+ u64 wfe_op = (u64)(CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) << 32;
+ u64 wfe_mask = (u64)CMDQ_OP_CODE_MASK << 32 | 0xffffffff;
+
+ return ((cmd & wfe_mask) == (wfe_op | wfe_option));
+}
+
+/* we assume tasks in the same display GCE thread are waiting the same event. */
+static void cmdq_task_remove_wfe(struct cmdq_task *task)
+{
+ struct device *dev = task->cmdq->mbox.dev;
+ u64 *base = task->pkt->va_base;
+ int i;
+
+ dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size,
+ DMA_TO_DEVICE);
+ for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++)
+ if (cmdq_command_is_wfe(base[i]))
+ base[i] = (u64)CMDQ_JUMP_BY_OFFSET << 32 |
+ CMDQ_JUMP_PASS;
+ dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size,
+ DMA_TO_DEVICE);
+}
+
+static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
+{
+ return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
+}
+
+static void cmdq_thread_wait_end(struct cmdq_thread *thread,
+ unsigned long end_pa)
+{
+ struct device *dev = thread->chan->mbox->dev;
+ unsigned long curr_pa;
+
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_ADDR,
+ curr_pa, curr_pa == end_pa, 1, 20))
+ dev_err(dev, "GCE thread cannot run to end.\n");
+}
+
+static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
+{
+ struct cmdq_task_cb *cb = &task->pkt->async_cb;
+ struct cmdq_cb_data data;
+
+ WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL);
+ data.sta = sta;
+ data.data = cb->data;
+ cb->cb(data);
+
+ list_del(&task->list_entry);
+}
+
+static void cmdq_task_handle_error(struct cmdq_task *task)
+{
+ struct cmdq_thread *thread = task->thread;
+ struct cmdq_task *next_task;
+
+ dev_err(task->cmdq->mbox.dev, "task 0x%p error\n", task);
+ WARN_ON(cmdq_thread_suspend(task->cmdq, thread) < 0);
+ next_task = list_first_entry_or_null(&thread->task_busy_list,
+ struct cmdq_task, list_entry);
+ if (next_task)
+ writel(next_task->pa_base, thread->base + CMDQ_THR_CURR_ADDR);
+ cmdq_thread_resume(thread);
+}
+
+static void cmdq_thread_irq_handler(struct cmdq *cmdq,
+ struct cmdq_thread *thread)
+{
+ struct cmdq_task *task, *tmp, *curr_task = NULL;
+ u32 curr_pa, irq_flag, task_end_pa;
+ bool err;
+
+ irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
+ writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
+
+ /*
+ * When ISR call this function, another CPU core could run
+ * "release task" right before we acquire the spin lock, and thus
+ * reset / disable this GCE thread, so we need to check the enable
+ * bit of this GCE thread.
+ */
+ if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
+ return;
+
+ if (irq_flag & CMDQ_THR_IRQ_ERROR)
+ err = true;
+ else if (irq_flag & CMDQ_THR_IRQ_DONE)
+ err = false;
+ else
+ return;
+
+ curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
+
+ list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
+ list_entry) {
+ task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
+ if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
+ curr_task = task;
+
+ if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
+ cmdq_task_exec_done(task, CMDQ_CB_NORMAL);
+ kfree(task);
+ } else if (err) {
+ cmdq_task_exec_done(task, CMDQ_CB_ERROR);
+ cmdq_task_handle_error(curr_task);
+ kfree(task);
+ }
+
+ if (curr_task)
+ break;
+ }
+
+ if (list_empty(&thread->task_busy_list)) {
+ cmdq_thread_disable(cmdq, thread);
+ clk_disable(cmdq->clock);
+ }
+}
+
+static irqreturn_t cmdq_irq_handler(int irq, void *dev)
+{
+ struct cmdq *cmdq = dev;
+ unsigned long irq_status, flags = 0L;
+ int bit;
+
+ irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & CMDQ_IRQ_MASK;
+ if (!(irq_status ^ CMDQ_IRQ_MASK))
+ return IRQ_NONE;
+
+ for_each_clear_bit(bit, &irq_status, fls(CMDQ_IRQ_MASK)) {
+ struct cmdq_thread *thread = &cmdq->thread[bit];
+
+ spin_lock_irqsave(&thread->chan->lock, flags);
+ cmdq_thread_irq_handler(cmdq, thread);
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cmdq_suspend(struct device *dev)
+{
+ struct cmdq *cmdq = dev_get_drvdata(dev);
+ struct cmdq_thread *thread;
+ int i;
+ bool task_running = false;
+
+ cmdq->suspended = true;
+
+ for (i = 0; i < cmdq->thread_nr; i++) {
+ thread = &cmdq->thread[i];
+ if (!list_empty(&thread->task_busy_list)) {
+ task_running = true;
+ break;
+ }
+ }
+
+ if (task_running)
+ dev_warn(dev, "exist running task(s) in suspend\n");
+
+ clk_unprepare(cmdq->clock);
+
+ return 0;
+}
+
+static int cmdq_resume(struct device *dev)
+{
+ struct cmdq *cmdq = dev_get_drvdata(dev);
+
+ WARN_ON(clk_prepare(cmdq->clock) < 0);
+ cmdq->suspended = false;
+ return 0;
+}
+
+static int cmdq_remove(struct platform_device *pdev)
+{
+ struct cmdq *cmdq = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&cmdq->mbox);
+ clk_unprepare(cmdq->clock);
+
+ if (cmdq->mbox.chans)
+ devm_kfree(&pdev->dev, cmdq->mbox.chans);
+
+ if (cmdq->thread)
+ devm_kfree(&pdev->dev, cmdq->thread);
+
+ devm_kfree(&pdev->dev, cmdq);
+
+ return 0;
+}
+
+static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
+ struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
+ struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+ struct cmdq_task *task;
+ unsigned long curr_pa, end_pa;
+
+ /* Client should not flush new tasks if suspended. */
+ WARN_ON(cmdq->suspended);
+
+ task = kzalloc(sizeof(*task), GFP_ATOMIC);
+ task->cmdq = cmdq;
+ INIT_LIST_HEAD(&task->list_entry);
+ task->pa_base = pkt->pa_base;
+ task->thread = thread;
+ task->pkt = pkt;
+
+ if (list_empty(&thread->task_busy_list)) {
+ WARN_ON(clk_enable(cmdq->clock) < 0);
+ WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
+
+ writel(task->pa_base, thread->base + CMDQ_THR_CURR_ADDR);
+ writel(task->pa_base + pkt->cmd_buf_size,
+ thread->base + CMDQ_THR_END_ADDR);
+ writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
+ writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
+ writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
+ } else {
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
+ end_pa = readl(thread->base + CMDQ_THR_END_ADDR);
+
+ /*
+ * Atomic execution should remove the following wfe, i.e. only
+ * wait event at first task, and prevent to pause when running.
+ */
+ if (thread->atomic_exec) {
+ /* GCE is executing if command is not WFE */
+ if (!cmdq_thread_is_in_wfe(thread)) {
+ cmdq_thread_resume(thread);
+ cmdq_thread_wait_end(thread, end_pa);
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ /* set to this task directly */
+ writel(task->pa_base,
+ thread->base + CMDQ_THR_CURR_ADDR);
+ } else {
+ cmdq_task_insert_into_thread(task);
+ cmdq_task_remove_wfe(task);
+ smp_mb(); /* modify jump before enable thread */
+ }
+ } else {
+ /* check boundary */
+ if (curr_pa == end_pa - CMDQ_INST_SIZE ||
+ curr_pa == end_pa) {
+ /* set to this task directly */
+ writel(task->pa_base,
+ thread->base + CMDQ_THR_CURR_ADDR);
+ } else {
+ cmdq_task_insert_into_thread(task);
+ smp_mb(); /* modify jump before enable thread */
+ }
+ }
+ writel(task->pa_base + pkt->cmd_buf_size,
+ thread->base + CMDQ_THR_END_ADDR);
+ cmdq_thread_resume(thread);
+ }
+ list_move_tail(&task->list_entry, &thread->task_busy_list);
+
+ return 0;
+}
+
+static int cmdq_mbox_startup(struct mbox_chan *chan)
+{
+ return 0;
+}
+
+static void cmdq_mbox_shutdown(struct mbox_chan *chan)
+{
+}
+
+static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
+ .send_data = cmdq_mbox_send_data,
+ .startup = cmdq_mbox_startup,
+ .shutdown = cmdq_mbox_shutdown,
+};
+
+static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ int ind = sp->args[0];
+ struct cmdq_thread *thread;
+
+ if (ind >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+ thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
+ thread->priority = sp->args[1];
+ thread->atomic_exec = (sp->args[2] != 0);
+ thread->chan = &mbox->chans[ind];
+
+ return &mbox->chans[ind];
+}
+
+static int cmdq_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct cmdq *cmdq;
+ int err, i;
+
+ cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
+ if (!cmdq)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cmdq->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cmdq->base)) {
+ dev_err(dev, "failed to ioremap gce\n");
+ return PTR_ERR(cmdq->base);
+ }
+
+ cmdq->irq = platform_get_irq(pdev, 0);
+ if (!cmdq->irq) {
+ dev_err(dev, "failed to get irq\n");
+ return -EINVAL;
+ }
+ err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
+ "mtk_cmdq", cmdq);
+ if (err < 0) {
+ dev_err(dev, "failed to register ISR (%d)\n", err);
+ return err;
+ }
+
+ dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
+ dev, cmdq->base, cmdq->irq);
+
+ cmdq->clock = devm_clk_get(dev, "gce");
+ if (IS_ERR(cmdq->clock)) {
+ dev_err(dev, "failed to get gce clk\n");
+ return PTR_ERR(cmdq->clock);
+ }
+
+ cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev);
+ cmdq->mbox.dev = dev;
+ cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
+ sizeof(*cmdq->mbox.chans), GFP_KERNEL);
+ if (!cmdq->mbox.chans)
+ return -ENOMEM;
+
+ cmdq->mbox.num_chans = cmdq->thread_nr;
+ cmdq->mbox.ops = &cmdq_mbox_chan_ops;
+ cmdq->mbox.of_xlate = cmdq_xlate;
+
+ /* make use of TXDONE_BY_ACK */
+ cmdq->mbox.txdone_irq = false;
+ cmdq->mbox.txdone_poll = false;
+
+ cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
+ sizeof(*cmdq->thread), GFP_KERNEL);
+ if (!cmdq->thread)
+ return -ENOMEM;
+
+ for (i = 0; i < cmdq->thread_nr; i++) {
+ cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
+ CMDQ_THR_SIZE * i;
+ INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
+ cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
+ }
+
+ err = mbox_controller_register(&cmdq->mbox);
+ if (err < 0) {
+ dev_err(dev, "failed to register mailbox: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, cmdq);
+ WARN_ON(clk_prepare(cmdq->clock) < 0);
+
+ cmdq_init(cmdq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cmdq_pm_ops = {
+ .suspend = cmdq_suspend,
+ .resume = cmdq_resume,
+};
+
+static const struct of_device_id cmdq_of_ids[] = {
+ {.compatible = "mediatek,mt8173-gce", .data = (void *)16},
+ {}
+};
+
+static struct platform_driver cmdq_drv = {
+ .probe = cmdq_probe,
+ .remove = cmdq_remove,
+ .driver = {
+ .name = "mtk_cmdq",
+ .pm = &cmdq_pm_ops,
+ .of_match_table = cmdq_of_ids,
+ }
+};
+
+static int __init cmdq_drv_init(void)
+{
+ return platform_driver_register(&cmdq_drv);
+}
+
+static void __exit cmdq_drv_exit(void)
+{
+ platform_driver_unregister(&cmdq_drv);
+}
+
+subsys_initcall(cmdq_drv_init);
+module_exit(cmdq_drv_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index e1e2c085e68e..db66e952a871 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OMAP mailbox driver
*
@@ -6,15 +7,6 @@
*
* Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
* Suman Anna <s-anna@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/interrupt.h>
@@ -77,6 +69,10 @@ struct omap_mbox_queue {
bool full;
};
+struct omap_mbox_match_data {
+ u32 intr_type;
+};
+
struct omap_mbox_device {
struct device *dev;
struct mutex cfg_lock;
@@ -646,18 +642,21 @@ static const struct dev_pm_ops omap_mbox_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
};
+static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
+static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
+
static const struct of_device_id omap_mailbox_of_match[] = {
{
.compatible = "ti,omap2-mailbox",
- .data = (void *)MBOX_INTR_CFG_TYPE1,
+ .data = &omap2_data,
},
{
.compatible = "ti,omap3-mailbox",
- .data = (void *)MBOX_INTR_CFG_TYPE1,
+ .data = &omap2_data,
},
{
.compatible = "ti,omap4-mailbox",
- .data = (void *)MBOX_INTR_CFG_TYPE2,
+ .data = &omap4_data,
},
{
/* end */
@@ -700,7 +699,7 @@ static int omap_mbox_probe(struct platform_device *pdev)
struct omap_mbox_fifo *fifo;
struct device_node *node = pdev->dev.of_node;
struct device_node *child;
- const struct of_device_id *match;
+ const struct omap_mbox_match_data *match_data;
u32 intr_type, info_count;
u32 num_users, num_fifos;
u32 tmp[3];
@@ -712,10 +711,10 @@ static int omap_mbox_probe(struct platform_device *pdev)
return -ENODEV;
}
- match = of_match_device(omap_mailbox_of_match, &pdev->dev);
- if (!match)
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
return -ENODEV;
- intr_type = (u32)match->data;
+ intr_type = match_data->intr_type;
if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
return -ENODEV;
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
index 5d04738c3c8a..5bceafbf6699 100644
--- a/drivers/mailbox/ti-msgmgr.c
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -25,6 +25,17 @@
#define Q_STATE_OFFSET(queue) ((queue) * 0x4)
#define Q_STATE_ENTRY_COUNT_MASK (0xFFF000)
+#define SPROXY_THREAD_OFFSET(tid) (0x1000 * (tid))
+#define SPROXY_THREAD_DATA_OFFSET(tid, reg) \
+ (SPROXY_THREAD_OFFSET(tid) + ((reg) * 0x4) + 0x4)
+
+#define SPROXY_THREAD_STATUS_OFFSET(tid) (SPROXY_THREAD_OFFSET(tid))
+
+#define SPROXY_THREAD_STATUS_COUNT_MASK (0xFF)
+
+#define SPROXY_THREAD_CTRL_OFFSET(tid) (0x1000 + SPROXY_THREAD_OFFSET(tid))
+#define SPROXY_THREAD_CTRL_DIR_MASK (0x1 << 31)
+
/**
* struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor
* @queue_id: Queue Number for this path
@@ -42,14 +53,18 @@ struct ti_msgmgr_valid_queue_desc {
* @queue_count: Number of Queues
* @max_message_size: Message size in bytes
* @max_messages: Number of messages
- * @q_slices: Number of queue engines
- * @q_proxies: Number of queue proxies per page
* @data_first_reg: First data register for proxy data region
* @data_last_reg: Last data register for proxy data region
+ * @status_cnt_mask: Mask for getting the status value
+ * @status_err_mask: Mask for getting the error value, if applicable
* @tx_polled: Do I need to use polled mechanism for tx
* @tx_poll_timeout_ms: Timeout in ms if polled
* @valid_queues: List of Valid queues that the processor can access
+ * @data_region_name: Name of the proxy data region
+ * @status_region_name: Name of the proxy status region
+ * @ctrl_region_name: Name of the proxy control region
* @num_valid_queues: Number of valid queues
+ * @is_sproxy: Is this an Secure Proxy instance?
*
* This structure is used in of match data to describe how integration
* for a specific compatible SoC is done.
@@ -58,14 +73,18 @@ struct ti_msgmgr_desc {
u8 queue_count;
u8 max_message_size;
u8 max_messages;
- u8 q_slices;
- u8 q_proxies;
u8 data_first_reg;
u8 data_last_reg;
+ u32 status_cnt_mask;
+ u32 status_err_mask;
bool tx_polled;
int tx_poll_timeout_ms;
const struct ti_msgmgr_valid_queue_desc *valid_queues;
+ const char *data_region_name;
+ const char *status_region_name;
+ const char *ctrl_region_name;
int num_valid_queues;
+ bool is_sproxy;
};
/**
@@ -78,6 +97,7 @@ struct ti_msgmgr_desc {
* @queue_buff_start: First register of Data Buffer
* @queue_buff_end: Last (or confirmation) register of Data buffer
* @queue_state: Queue status register
+ * @queue_ctrl: Queue Control register
* @chan: Mailbox channel
* @rx_buff: Receive buffer pointer allocated at probe, max_message_size
*/
@@ -90,6 +110,7 @@ struct ti_queue_inst {
void __iomem *queue_buff_start;
void __iomem *queue_buff_end;
void __iomem *queue_state;
+ void __iomem *queue_ctrl;
struct mbox_chan *chan;
u32 *rx_buff;
};
@@ -100,6 +121,7 @@ struct ti_queue_inst {
* @desc: Description of the SoC integration
* @queue_proxy_region: Queue proxy region where queue buffers are located
* @queue_state_debug_region: Queue status register regions
+ * @queue_ctrl_region: Queue Control register regions
* @num_valid_queues: Number of valid queues defined for the processor
* Note: other queues are probably reserved for other processors
* in the SoC.
@@ -112,6 +134,7 @@ struct ti_msgmgr_inst {
const struct ti_msgmgr_desc *desc;
void __iomem *queue_proxy_region;
void __iomem *queue_state_debug_region;
+ void __iomem *queue_ctrl_region;
u8 num_valid_queues;
struct ti_queue_inst *qinsts;
struct mbox_controller mbox;
@@ -120,25 +143,54 @@ struct ti_msgmgr_inst {
/**
* ti_msgmgr_queue_get_num_messages() - Get the number of pending messages
+ * @d: Description of message manager
* @qinst: Queue instance for which we check the number of pending messages
*
* Return: number of messages pending in the queue (0 == no pending messages)
*/
-static inline int ti_msgmgr_queue_get_num_messages(struct ti_queue_inst *qinst)
+static inline int
+ti_msgmgr_queue_get_num_messages(const struct ti_msgmgr_desc *d,
+ struct ti_queue_inst *qinst)
{
u32 val;
+ u32 status_cnt_mask = d->status_cnt_mask;
/*
* We cannot use relaxed operation here - update may happen
* real-time.
*/
- val = readl(qinst->queue_state) & Q_STATE_ENTRY_COUNT_MASK;
- val >>= __ffs(Q_STATE_ENTRY_COUNT_MASK);
+ val = readl(qinst->queue_state) & status_cnt_mask;
+ val >>= __ffs(status_cnt_mask);
return val;
}
/**
+ * ti_msgmgr_queue_is_error() - Check to see if there is queue error
+ * @d: Description of message manager
+ * @qinst: Queue instance for which we check the number of pending messages
+ *
+ * Return: true if error, else false
+ */
+static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d,
+ struct ti_queue_inst *qinst)
+{
+ u32 val;
+
+ /* Msgmgr has no error detection */
+ if (!d->is_sproxy)
+ return false;
+
+ /*
+ * We cannot use relaxed operation here - update may happen
+ * real-time.
+ */
+ val = readl(qinst->queue_state) & d->status_err_mask;
+
+ return val ? true : false;
+}
+
+/**
* ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue
* @irq: Interrupt number
* @p: Channel Pointer
@@ -171,8 +223,14 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
return IRQ_NONE;
}
+ desc = inst->desc;
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on Rx channel %s\n", qinst->name);
+ return IRQ_NONE;
+ }
+
/* Do I actually have messages to read? */
- msg_count = ti_msgmgr_queue_get_num_messages(qinst);
+ msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
if (!msg_count) {
/* Shared IRQ? */
dev_dbg(dev, "Spurious event - 0 pending data!\n");
@@ -185,7 +243,6 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
* of how many bytes I should be reading. Let the client figure this
* out.. I just read the full message and pass it on..
*/
- desc = inst->desc;
message.len = desc->max_message_size;
message.buf = (u8 *)qinst->rx_buff;
@@ -228,12 +285,20 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
{
struct ti_queue_inst *qinst = chan->con_priv;
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ const struct ti_msgmgr_desc *desc = inst->desc;
int msg_count;
if (qinst->is_tx)
return false;
- msg_count = ti_msgmgr_queue_get_num_messages(qinst);
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on channel %s\n", qinst->name);
+ return false;
+ }
+
+ msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
return msg_count ? true : false;
}
@@ -247,12 +312,25 @@ static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan)
{
struct ti_queue_inst *qinst = chan->con_priv;
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ const struct ti_msgmgr_desc *desc = inst->desc;
int msg_count;
if (!qinst->is_tx)
return false;
- msg_count = ti_msgmgr_queue_get_num_messages(qinst);
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on channel %s\n", qinst->name);
+ return false;
+ }
+
+ msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
+
+ if (desc->is_sproxy) {
+ /* In secure proxy, msg_count indicates how many we can send */
+ return msg_count ? true : false;
+ }
/* if we have any messages pending.. */
return msg_count ? false : true;
@@ -282,6 +360,11 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
}
desc = inst->desc;
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on channel %s\n", qinst->name);
+ return false;
+ }
+
if (desc->max_message_size < message->len) {
dev_err(dev, "Queue %s message length %zu > max %d\n",
qinst->name, message->len, desc->max_message_size);
@@ -315,6 +398,53 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
}
/**
+ * ti_msgmgr_queue_rx_irq_req() - RX IRQ request
+ * @dev: device pointer
+ * @d: descriptor for ti_msgmgr
+ * @qinst: Queue instance
+ * @chan: Channel pointer
+ */
+static int ti_msgmgr_queue_rx_irq_req(struct device *dev,
+ const struct ti_msgmgr_desc *d,
+ struct ti_queue_inst *qinst,
+ struct mbox_chan *chan)
+{
+ int ret = 0;
+ char of_rx_irq_name[7];
+ struct device_node *np;
+
+ snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
+ "rx_%03d", d->is_sproxy ? qinst->proxy_id : qinst->queue_id);
+
+ /* Get the IRQ if not found */
+ if (qinst->irq < 0) {
+ np = of_node_get(dev->of_node);
+ if (!np)
+ return -ENODATA;
+ qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
+ of_node_put(np);
+
+ if (qinst->irq < 0) {
+ dev_err(dev,
+ "QID %d PID %d:No IRQ[%s]: %d\n",
+ qinst->queue_id, qinst->proxy_id,
+ of_rx_irq_name, qinst->irq);
+ return qinst->irq;
+ }
+ }
+
+ /* With the expectation that the IRQ might be shared in SoC */
+ ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
+ IRQF_SHARED, qinst->name, chan);
+ if (ret) {
+ dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
+ qinst->irq, qinst->name, ret);
+ }
+
+ return ret;
+}
+
+/**
* ti_msgmgr_queue_startup() - Startup queue
* @chan: Channel pointer
*
@@ -322,19 +452,39 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
*/
static int ti_msgmgr_queue_startup(struct mbox_chan *chan)
{
- struct ti_queue_inst *qinst = chan->con_priv;
struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst = chan->con_priv;
+ const struct ti_msgmgr_desc *d = inst->desc;
int ret;
+ int msg_count;
+
+ /*
+ * If sproxy is starting and can send messages, we are a Tx thread,
+ * else Rx
+ */
+ if (d->is_sproxy) {
+ qinst->is_tx = (readl(qinst->queue_ctrl) &
+ SPROXY_THREAD_CTRL_DIR_MASK) ? false : true;
+
+ msg_count = ti_msgmgr_queue_get_num_messages(d, qinst);
+
+ if (!msg_count && qinst->is_tx) {
+ dev_err(dev, "%s: Cannot transmit with 0 credits!\n",
+ qinst->name);
+ return -EINVAL;
+ }
+ }
if (!qinst->is_tx) {
- /*
- * With the expectation that the IRQ might be shared in SoC
- */
- ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
- IRQF_SHARED, qinst->name, chan);
+ /* Allocate usage buffer for rx */
+ qinst->rx_buff = kzalloc(d->max_message_size, GFP_KERNEL);
+ if (!qinst->rx_buff)
+ return -ENOMEM;
+ /* Request IRQ */
+ ret = ti_msgmgr_queue_rx_irq_req(dev, d, qinst, chan);
if (ret) {
- dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
- qinst->irq, qinst->name, ret);
+ kfree(qinst->rx_buff);
return ret;
}
}
@@ -350,8 +500,10 @@ static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan)
{
struct ti_queue_inst *qinst = chan->con_priv;
- if (!qinst->is_tx)
+ if (!qinst->is_tx) {
free_irq(qinst->irq, chan);
+ kfree(qinst->rx_buff);
+ }
}
/**
@@ -368,20 +520,38 @@ static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
struct ti_msgmgr_inst *inst;
int req_qid, req_pid;
struct ti_queue_inst *qinst;
- int i;
+ const struct ti_msgmgr_desc *d;
+ int i, ncells;
inst = container_of(mbox, struct ti_msgmgr_inst, mbox);
if (WARN_ON(!inst))
return ERR_PTR(-EINVAL);
- /* #mbox-cells is 2 */
- if (p->args_count != 2) {
- dev_err(inst->dev, "Invalid arguments in dt[%d] instead of 2\n",
- p->args_count);
+ d = inst->desc;
+
+ if (d->is_sproxy)
+ ncells = 1;
+ else
+ ncells = 2;
+ if (p->args_count != ncells) {
+ dev_err(inst->dev, "Invalid arguments in dt[%d]. Must be %d\n",
+ p->args_count, ncells);
return ERR_PTR(-EINVAL);
}
- req_qid = p->args[0];
- req_pid = p->args[1];
+ if (ncells == 1) {
+ req_qid = 0;
+ req_pid = p->args[0];
+ } else {
+ req_qid = p->args[0];
+ req_pid = p->args[1];
+ }
+
+ if (d->is_sproxy) {
+ if (req_pid > d->num_valid_queues)
+ goto err;
+ qinst = &inst->qinsts[req_pid];
+ return qinst->chan;
+ }
for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues;
i++, qinst++) {
@@ -389,6 +559,7 @@ static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
return qinst->chan;
}
+err:
dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %s\n",
req_qid, req_pid, p->np->name);
return ERR_PTR(-ENOENT);
@@ -415,6 +586,8 @@ static int ti_msgmgr_queue_setup(int idx, struct device *dev,
struct ti_queue_inst *qinst,
struct mbox_chan *chan)
{
+ char *dir;
+
qinst->proxy_id = qd->proxy_id;
qinst->queue_id = qd->queue_id;
@@ -424,40 +597,43 @@ static int ti_msgmgr_queue_setup(int idx, struct device *dev,
return -ERANGE;
}
- qinst->is_tx = qd->is_tx;
- snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
- dev_name(dev), qinst->is_tx ? "tx" : "rx", qinst->queue_id,
- qinst->proxy_id);
-
- if (!qinst->is_tx) {
- char of_rx_irq_name[7];
-
- snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
- "rx_%03d", qinst->queue_id);
-
- qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
- if (qinst->irq < 0) {
- dev_crit(dev,
- "[%d]QID %d PID %d:No IRQ[%s]: %d\n",
- idx, qinst->queue_id, qinst->proxy_id,
- of_rx_irq_name, qinst->irq);
- return qinst->irq;
- }
- /* Allocate usage buffer for rx */
- qinst->rx_buff = devm_kzalloc(dev,
- d->max_message_size, GFP_KERNEL);
- if (!qinst->rx_buff)
- return -ENOMEM;
+ if (d->is_sproxy) {
+ qinst->queue_buff_start = inst->queue_proxy_region +
+ SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
+ d->data_first_reg);
+ qinst->queue_buff_end = inst->queue_proxy_region +
+ SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
+ d->data_last_reg);
+ qinst->queue_state = inst->queue_state_debug_region +
+ SPROXY_THREAD_STATUS_OFFSET(qinst->proxy_id);
+ qinst->queue_ctrl = inst->queue_ctrl_region +
+ SPROXY_THREAD_CTRL_OFFSET(qinst->proxy_id);
+
+ /* XXX: DONOT read registers here!.. Some may be unusable */
+ dir = "thr";
+ snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d",
+ dev_name(dev), dir, qinst->proxy_id);
+ } else {
+ qinst->queue_buff_start = inst->queue_proxy_region +
+ Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
+ d->data_first_reg);
+ qinst->queue_buff_end = inst->queue_proxy_region +
+ Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
+ d->data_last_reg);
+ qinst->queue_state =
+ inst->queue_state_debug_region +
+ Q_STATE_OFFSET(qinst->queue_id);
+ qinst->is_tx = qd->is_tx;
+ dir = qinst->is_tx ? "tx" : "rx";
+ snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
+ dev_name(dev), dir, qinst->queue_id, qinst->proxy_id);
}
- qinst->queue_buff_start = inst->queue_proxy_region +
- Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_first_reg);
- qinst->queue_buff_end = inst->queue_proxy_region +
- Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_last_reg);
- qinst->queue_state = inst->queue_state_debug_region +
- Q_STATE_OFFSET(qinst->queue_id);
qinst->chan = chan;
+ /* Setup an error value for IRQ - Lazy allocation */
+ qinst->irq = -EINVAL;
+
chan->con_priv = qinst;
dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n",
@@ -494,19 +670,37 @@ static const struct ti_msgmgr_desc k2g_desc = {
.queue_count = 64,
.max_message_size = 64,
.max_messages = 128,
- .q_slices = 1,
- .q_proxies = 1,
+ .data_region_name = "queue_proxy_region",
+ .status_region_name = "queue_state_debug_region",
.data_first_reg = 16,
.data_last_reg = 31,
+ .status_cnt_mask = Q_STATE_ENTRY_COUNT_MASK,
.tx_polled = false,
.valid_queues = k2g_valid_queues,
.num_valid_queues = ARRAY_SIZE(k2g_valid_queues),
+ .is_sproxy = false,
+};
+
+static const struct ti_msgmgr_desc am654_desc = {
+ .queue_count = 190,
+ .num_valid_queues = 190,
+ .max_message_size = 60,
+ .data_region_name = "target_data",
+ .status_region_name = "rt",
+ .ctrl_region_name = "scfg",
+ .data_first_reg = 0,
+ .data_last_reg = 14,
+ .status_cnt_mask = SPROXY_THREAD_STATUS_COUNT_MASK,
+ .tx_polled = false,
+ .is_sproxy = true,
};
static const struct of_device_id ti_msgmgr_of_match[] = {
{.compatible = "ti,k2g-message-manager", .data = &k2g_desc},
+ {.compatible = "ti,am654-secure-proxy", .data = &am654_desc},
{ /* Sentinel */ }
};
+
MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
static int ti_msgmgr_probe(struct platform_device *pdev)
@@ -546,17 +740,25 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
inst->desc = desc;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "queue_proxy_region");
+ desc->data_region_name);
inst->queue_proxy_region = devm_ioremap_resource(dev, res);
if (IS_ERR(inst->queue_proxy_region))
return PTR_ERR(inst->queue_proxy_region);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "queue_state_debug_region");
+ desc->status_region_name);
inst->queue_state_debug_region = devm_ioremap_resource(dev, res);
if (IS_ERR(inst->queue_state_debug_region))
return PTR_ERR(inst->queue_state_debug_region);
+ if (desc->is_sproxy) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ desc->ctrl_region_name);
+ inst->queue_ctrl_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(inst->queue_ctrl_region))
+ return PTR_ERR(inst->queue_ctrl_region);
+ }
+
dev_dbg(dev, "proxy region=%p, queue_state=%p\n",
inst->queue_proxy_region, inst->queue_state_debug_region);
@@ -578,12 +780,29 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
return -ENOMEM;
inst->chans = chans;
- for (i = 0, queue_desc = desc->valid_queues;
- i < queue_count; i++, qinst++, chans++, queue_desc++) {
- ret = ti_msgmgr_queue_setup(i, dev, np, inst,
- desc, queue_desc, qinst, chans);
- if (ret)
- return ret;
+ if (desc->is_sproxy) {
+ struct ti_msgmgr_valid_queue_desc sproxy_desc;
+
+ /* All proxies may be valid in Secure Proxy instance */
+ for (i = 0; i < queue_count; i++, qinst++, chans++) {
+ sproxy_desc.queue_id = 0;
+ sproxy_desc.proxy_id = i;
+ ret = ti_msgmgr_queue_setup(i, dev, np, inst,
+ desc, &sproxy_desc, qinst,
+ chans);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /* Only Some proxies are valid in Message Manager */
+ for (i = 0, queue_desc = desc->valid_queues;
+ i < queue_count; i++, qinst++, chans++, queue_desc++) {
+ ret = ti_msgmgr_queue_setup(i, dev, np, inst,
+ desc, queue_desc, qinst,
+ chans);
+ if (ret)
+ return ret;
+ }
}
mbox = &inst->mbox;
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 17bf109c58e9..f6e0a8b3a61e 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -1,7 +1,8 @@
config BCACHE
tristate "Block device as cache"
- ---help---
+ select CRC64
+ help
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
@@ -10,7 +11,7 @@ config BCACHE
config BCACHE_DEBUG
bool "Bcache debugging"
depends on BCACHE
- ---help---
+ help
Don't select this option unless you're a developer
Enables extra debugging tools, allows expensive runtime checks to be
@@ -20,7 +21,7 @@ config BCACHE_CLOSURES_DEBUG
bool "Debug closures"
depends on BCACHE
select DEBUG_FS
- ---help---
+ help
Keeps all active closures in a linked list and provides a debugfs
interface to list them, which makes it possible to see asynchronous
operations that get stuck.
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 7fa2631b422c..7a28232d868b 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -87,8 +87,8 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
{
struct cache *ca;
struct bucket *b;
- unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
- unsigned i;
+ unsigned int next = c->nbuckets * c->sb.bucket_size / 1024;
+ unsigned int i;
int r;
atomic_sub(sectors, &c->rescale);
@@ -169,7 +169,7 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
#define bucket_prio(b) \
({ \
- unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
+ unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
\
(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
})
@@ -244,6 +244,7 @@ static void invalidate_buckets_random(struct cache *ca)
while (!fifo_full(&ca->free_inc)) {
size_t n;
+
get_random_bytes(&n, sizeof(n));
n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
@@ -301,7 +302,7 @@ do { \
static int bch_allocator_push(struct cache *ca, long bucket)
{
- unsigned i;
+ unsigned int i;
/* Prios/gens are actually the most important reserve */
if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
@@ -385,7 +386,7 @@ out:
/* Allocation */
-long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
+long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
{
DEFINE_WAIT(w);
struct bucket *b;
@@ -421,7 +422,7 @@ out:
if (expensive_debug_checks(ca->set)) {
size_t iter;
long i;
- unsigned j;
+ unsigned int j;
for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
@@ -470,14 +471,14 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
void bch_bucket_free(struct cache_set *c, struct bkey *k)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
__bch_bucket_free(PTR_CACHE(c, k, i),
PTR_BUCKET(c, k, i));
}
-int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait)
{
int i;
@@ -510,10 +511,11 @@ err:
return -1;
}
-int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
+int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait)
{
int ret;
+
mutex_lock(&c->bucket_lock);
ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
mutex_unlock(&c->bucket_lock);
@@ -524,8 +526,8 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
struct open_bucket {
struct list_head list;
- unsigned last_write_point;
- unsigned sectors_free;
+ unsigned int last_write_point;
+ unsigned int sectors_free;
BKEY_PADDED(key);
};
@@ -556,7 +558,7 @@ struct open_bucket {
*/
static struct open_bucket *pick_data_bucket(struct cache_set *c,
const struct bkey *search,
- unsigned write_point,
+ unsigned int write_point,
struct bkey *alloc)
{
struct open_bucket *ret, *ret_task = NULL;
@@ -595,12 +597,16 @@ found:
*
* If s->writeback is true, will not fail.
*/
-bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
- unsigned write_point, unsigned write_prio, bool wait)
+bool bch_alloc_sectors(struct cache_set *c,
+ struct bkey *k,
+ unsigned int sectors,
+ unsigned int write_point,
+ unsigned int write_prio,
+ bool wait)
{
struct open_bucket *b;
BKEY_PADDED(key) alloc;
- unsigned i;
+ unsigned int i;
/*
* We might have to allocate a new bucket, which we can't do with a
@@ -613,7 +619,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
spin_lock(&c->data_bucket_lock);
while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
- unsigned watermark = write_prio
+ unsigned int watermark = write_prio
? RESERVE_MOVINGGC
: RESERVE_NONE;
@@ -702,6 +708,7 @@ int bch_open_buckets_alloc(struct cache_set *c)
for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
+
if (!b)
return -ENOMEM;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index d6bf294f3907..83504dd8100a 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -252,7 +252,7 @@ struct bcache_device {
struct kobject kobj;
struct cache_set *c;
- unsigned id;
+ unsigned int id;
#define BCACHEDEVNAME_SIZE 12
char name[BCACHEDEVNAME_SIZE];
@@ -264,18 +264,19 @@ struct bcache_device {
#define BCACHE_DEV_UNLINK_DONE 2
#define BCACHE_DEV_WB_RUNNING 3
#define BCACHE_DEV_RATE_DW_RUNNING 4
- unsigned nr_stripes;
- unsigned stripe_size;
+ unsigned int nr_stripes;
+ unsigned int stripe_size;
atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes;
struct bio_set bio_split;
- unsigned data_csum:1;
+ unsigned int data_csum:1;
- int (*cache_miss)(struct btree *, struct search *,
- struct bio *, unsigned);
- int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
+ int (*cache_miss)(struct btree *b, struct search *s,
+ struct bio *bio, unsigned int sectors);
+ int (*ioctl)(struct bcache_device *d, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
};
struct io {
@@ -284,7 +285,7 @@ struct io {
struct list_head lru;
unsigned long jiffies;
- unsigned sequential;
+ unsigned int sequential;
sector_t last;
};
@@ -328,13 +329,6 @@ struct cached_dev {
*/
atomic_t has_dirty;
- /*
- * Set to zero by things that touch the backing volume-- except
- * writeback. Incremented by writeback. Used to determine when to
- * accelerate idle writeback.
- */
- atomic_t backing_idle;
-
struct bch_ratelimit writeback_rate;
struct delayed_work writeback_rate_update;
@@ -365,18 +359,18 @@ struct cached_dev {
struct cache_accounting accounting;
/* The rest of this all shows up in sysfs */
- unsigned sequential_cutoff;
- unsigned readahead;
+ unsigned int sequential_cutoff;
+ unsigned int readahead;
- unsigned io_disable:1;
- unsigned verify:1;
- unsigned bypass_torture_test:1;
+ unsigned int io_disable:1;
+ unsigned int verify:1;
+ unsigned int bypass_torture_test:1;
- unsigned partial_stripes_expensive:1;
- unsigned writeback_metadata:1;
- unsigned writeback_running:1;
+ unsigned int partial_stripes_expensive:1;
+ unsigned int writeback_metadata:1;
+ unsigned int writeback_running:1;
unsigned char writeback_percent;
- unsigned writeback_delay;
+ unsigned int writeback_delay;
uint64_t writeback_rate_target;
int64_t writeback_rate_proportional;
@@ -384,16 +378,16 @@ struct cached_dev {
int64_t writeback_rate_integral_scaled;
int32_t writeback_rate_change;
- unsigned writeback_rate_update_seconds;
- unsigned writeback_rate_i_term_inverse;
- unsigned writeback_rate_p_term_inverse;
- unsigned writeback_rate_minimum;
+ unsigned int writeback_rate_update_seconds;
+ unsigned int writeback_rate_i_term_inverse;
+ unsigned int writeback_rate_p_term_inverse;
+ unsigned int writeback_rate_minimum;
enum stop_on_failure stop_when_cache_set_failed;
#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
atomic_t io_errors;
- unsigned error_limit;
- unsigned offline_seconds;
+ unsigned int error_limit;
+ unsigned int offline_seconds;
char backing_dev_name[BDEVNAME_SIZE];
};
@@ -423,9 +417,9 @@ struct cache {
/*
* When allocating new buckets, prio_write() gets first dibs - since we
* may not be allocate at all without writing priorities and gens.
- * prio_buckets[] contains the last buckets we wrote priorities to (so
- * gc can mark them as metadata), prio_next[] contains the buckets
- * allocated for the next prio write.
+ * prio_last_buckets[] contains the last buckets we wrote priorities to
+ * (so gc can mark them as metadata), prio_buckets[] contains the
+ * buckets allocated for the next prio write.
*/
uint64_t *prio_buckets;
uint64_t *prio_last_buckets;
@@ -454,7 +448,7 @@ struct cache {
* until a gc finishes - otherwise we could pointlessly burn a ton of
* cpu
*/
- unsigned invalidate_needs_gc;
+ unsigned int invalidate_needs_gc;
bool discard; /* Get rid of? */
@@ -474,11 +468,12 @@ struct cache {
struct gc_stat {
size_t nodes;
+ size_t nodes_pre;
size_t key_bytes;
size_t nkeys;
uint64_t data; /* sectors */
- unsigned in_use; /* percent */
+ unsigned int in_use; /* percent */
};
/*
@@ -514,6 +509,8 @@ struct cache_set {
struct cache_accounting accounting;
unsigned long flags;
+ atomic_t idle_counter;
+ atomic_t at_max_writeback_rate;
struct cache_sb sb;
@@ -522,9 +519,11 @@ struct cache_set {
int caches_loaded;
struct bcache_device **devices;
- unsigned devices_max_used;
+ unsigned int devices_max_used;
+ atomic_t attached_dev_nr;
struct list_head cached_devs;
uint64_t cached_dev_sectors;
+ atomic_long_t flash_dev_dirty_sectors;
struct closure caching;
struct closure sb_write;
@@ -550,7 +549,7 @@ struct cache_set {
* Default number of pages for a new btree node - may be less than a
* full bucket
*/
- unsigned btree_pages;
+ unsigned int btree_pages;
/*
* Lists of struct btrees; lru is the list for structs that have memory
@@ -573,7 +572,7 @@ struct cache_set {
struct list_head btree_cache_freed;
/* Number of elements in btree_cache + btree_cache_freeable lists */
- unsigned btree_cache_used;
+ unsigned int btree_cache_used;
/*
* If we need to allocate memory for a new btree node and that
@@ -603,6 +602,10 @@ struct cache_set {
*/
atomic_t rescale;
/*
+ * used for GC, identify if any front side I/Os is inflight
+ */
+ atomic_t search_inflight;
+ /*
* When we invalidate buckets, we use both the priority and the amount
* of good data to determine which buckets to reuse first - to weight
* those together consistently we keep track of the smallest nonzero
@@ -611,8 +614,8 @@ struct cache_set {
uint16_t min_prio;
/*
- * max(gen - last_gc) for all buckets. When it gets too big we have to gc
- * to keep gens from wrapping around.
+ * max(gen - last_gc) for all buckets. When it gets too big we have to
+ * gc to keep gens from wrapping around.
*/
uint8_t need_gc;
struct gc_stat gc_stats;
@@ -647,7 +650,7 @@ struct cache_set {
struct mutex verify_lock;
#endif
- unsigned nr_uuids;
+ unsigned int nr_uuids;
struct uuid_entry *uuids;
BKEY_PADDED(uuid_bucket);
struct closure uuid_write;
@@ -668,12 +671,12 @@ struct cache_set {
struct journal journal;
#define CONGESTED_MAX 1024
- unsigned congested_last_us;
+ unsigned int congested_last_us;
atomic_t congested;
/* The rest of this all shows up in sysfs */
- unsigned congested_read_threshold_us;
- unsigned congested_write_threshold_us;
+ unsigned int congested_read_threshold_us;
+ unsigned int congested_write_threshold_us;
struct time_stats btree_gc_time;
struct time_stats btree_split_time;
@@ -692,16 +695,16 @@ struct cache_set {
ON_ERROR_PANIC,
} on_error;
#define DEFAULT_IO_ERROR_LIMIT 8
- unsigned error_limit;
- unsigned error_decay;
+ unsigned int error_limit;
+ unsigned int error_decay;
unsigned short journal_delay_ms;
bool expensive_debug_checks;
- unsigned verify:1;
- unsigned key_merging_disabled:1;
- unsigned gc_always_rewrite:1;
- unsigned shrinker_disabled:1;
- unsigned copy_gc_enabled:1;
+ unsigned int verify:1;
+ unsigned int key_merging_disabled:1;
+ unsigned int gc_always_rewrite:1;
+ unsigned int shrinker_disabled:1;
+ unsigned int copy_gc_enabled:1;
#define BUCKET_HASH_BITS 12
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
@@ -710,7 +713,7 @@ struct cache_set {
};
struct bbio {
- unsigned submit_time_us;
+ unsigned int submit_time_us;
union {
struct bkey key;
uint64_t _pad[3];
@@ -727,10 +730,10 @@ struct bbio {
#define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
#define btree_blocks(b) \
- ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
+ ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
#define btree_default_blocks(c) \
- ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
+ ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
@@ -759,21 +762,21 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
static inline struct cache *PTR_CACHE(struct cache_set *c,
const struct bkey *k,
- unsigned ptr)
+ unsigned int ptr)
{
return c->cache[PTR_DEV(k, ptr)];
}
static inline size_t PTR_BUCKET_NR(struct cache_set *c,
const struct bkey *k,
- unsigned ptr)
+ unsigned int ptr)
{
return sector_to_bucket(c, PTR_OFFSET(k, ptr));
}
static inline struct bucket *PTR_BUCKET(struct cache_set *c,
const struct bkey *k,
- unsigned ptr)
+ unsigned int ptr)
{
return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
}
@@ -781,17 +784,18 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
static inline uint8_t gen_after(uint8_t a, uint8_t b)
{
uint8_t r = a - b;
+
return r > 128U ? 0 : r;
}
static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
- unsigned i)
+ unsigned int i)
{
return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
}
static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
- unsigned i)
+ unsigned int i)
{
return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
}
@@ -877,16 +881,16 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
#define BUCKET_GC_GEN_MAX 96U
#define kobj_attribute_write(n, fn) \
- static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
+ static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
#define kobj_attribute_rw(n, show, store) \
static struct kobj_attribute ksysfs_##n = \
- __ATTR(n, S_IWUSR|S_IRUSR, show, store)
+ __ATTR(n, 0600, show, store)
static inline void wake_up_allocators(struct cache_set *c)
{
struct cache *ca;
- unsigned i;
+ unsigned int i;
for_each_cache(ca, c, i)
wake_up_process(ca->alloc_thread);
@@ -922,40 +926,43 @@ static inline void wait_for_kthread_stop(void)
/* Forward declarations */
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
-void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
-void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
- blk_status_t, const char *);
-void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
- const char *);
-void bch_bbio_free(struct bio *, struct cache_set *);
-struct bio *bch_bbio_alloc(struct cache_set *);
-
-void __bch_submit_bbio(struct bio *, struct cache_set *);
-void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
-
-uint8_t bch_inc_gen(struct cache *, struct bucket *);
-void bch_rescale_priorities(struct cache_set *, int);
-
-bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
-void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
-
-void __bch_bucket_free(struct cache *, struct bucket *);
-void bch_bucket_free(struct cache_set *, struct bkey *);
-
-long bch_bucket_alloc(struct cache *, unsigned, bool);
-int __bch_bucket_alloc_set(struct cache_set *, unsigned,
- struct bkey *, int, bool);
-int bch_bucket_alloc_set(struct cache_set *, unsigned,
- struct bkey *, int, bool);
-bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
- unsigned, unsigned, bool);
+void bch_count_io_errors(struct cache *ca, blk_status_t error,
+ int is_read, const char *m);
+void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
+ blk_status_t error, const char *m);
+void bch_bbio_endio(struct cache_set *c, struct bio *bio,
+ blk_status_t error, const char *m);
+void bch_bbio_free(struct bio *bio, struct cache_set *c);
+struct bio *bch_bbio_alloc(struct cache_set *c);
+
+void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
+void bch_submit_bbio(struct bio *bio, struct cache_set *c,
+ struct bkey *k, unsigned int ptr);
+
+uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
+void bch_rescale_priorities(struct cache_set *c, int sectors);
+
+bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
+void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
+
+void __bch_bucket_free(struct cache *ca, struct bucket *b);
+void bch_bucket_free(struct cache_set *c, struct bkey *k);
+
+long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ struct bkey *k, int n, bool wait);
+int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ struct bkey *k, int n, bool wait);
+bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
+ unsigned int sectors, unsigned int write_point,
+ unsigned int write_prio, bool wait);
bool bch_cached_dev_error(struct cached_dev *dc);
__printf(2, 3)
-bool bch_cache_set_error(struct cache_set *, const char *, ...);
+bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
-void bch_prio_write(struct cache *);
-void bch_write_bdev_super(struct cached_dev *, struct closure *);
+void bch_prio_write(struct cache *ca);
+void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
extern struct workqueue_struct *bcache_wq;
extern struct mutex bch_register_lock;
@@ -967,35 +974,36 @@ extern struct kobj_type bch_cache_set_ktype;
extern struct kobj_type bch_cache_set_internal_ktype;
extern struct kobj_type bch_cache_ktype;
-void bch_cached_dev_release(struct kobject *);
-void bch_flash_dev_release(struct kobject *);
-void bch_cache_set_release(struct kobject *);
-void bch_cache_release(struct kobject *);
+void bch_cached_dev_release(struct kobject *kobj);
+void bch_flash_dev_release(struct kobject *kobj);
+void bch_cache_set_release(struct kobject *kobj);
+void bch_cache_release(struct kobject *kobj);
-int bch_uuid_write(struct cache_set *);
-void bcache_write_super(struct cache_set *);
+int bch_uuid_write(struct cache_set *c);
+void bcache_write_super(struct cache_set *c);
int bch_flash_dev_create(struct cache_set *c, uint64_t size);
-int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
-void bch_cached_dev_detach(struct cached_dev *);
-void bch_cached_dev_run(struct cached_dev *);
-void bcache_device_stop(struct bcache_device *);
+int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ uint8_t *set_uuid);
+void bch_cached_dev_detach(struct cached_dev *dc);
+void bch_cached_dev_run(struct cached_dev *dc);
+void bcache_device_stop(struct bcache_device *d);
-void bch_cache_set_unregister(struct cache_set *);
-void bch_cache_set_stop(struct cache_set *);
+void bch_cache_set_unregister(struct cache_set *c);
+void bch_cache_set_stop(struct cache_set *c);
-struct cache_set *bch_cache_set_alloc(struct cache_sb *);
-void bch_btree_cache_free(struct cache_set *);
-int bch_btree_cache_alloc(struct cache_set *);
-void bch_moving_init_cache_set(struct cache_set *);
-int bch_open_buckets_alloc(struct cache_set *);
-void bch_open_buckets_free(struct cache_set *);
+struct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
+void bch_btree_cache_free(struct cache_set *c);
+int bch_btree_cache_alloc(struct cache_set *c);
+void bch_moving_init_cache_set(struct cache_set *c);
+int bch_open_buckets_alloc(struct cache_set *c);
+void bch_open_buckets_free(struct cache_set *c);
int bch_cache_allocator_start(struct cache *ca);
void bch_debug_exit(void);
-int bch_debug_init(struct kobject *);
+void bch_debug_init(struct kobject *kobj);
void bch_request_exit(void);
int bch_request_init(void);
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index f3403b45bc28..8f07fa6e1739 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -18,31 +18,31 @@
#ifdef CONFIG_BCACHE_DEBUG
-void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
{
struct bkey *k, *next;
for (k = i->start; k < bset_bkey_last(i); k = next) {
next = bkey_next(k);
- printk(KERN_ERR "block %u key %u/%u: ", set,
- (unsigned) ((u64 *) k - i->d), i->keys);
+ pr_err("block %u key %u/%u: ", set,
+ (unsigned int) ((u64 *) k - i->d), i->keys);
if (b->ops->key_dump)
b->ops->key_dump(b, k);
else
- printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
+ pr_err("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
if (next < bset_bkey_last(i) &&
bkey_cmp(k, b->ops->is_extents ?
&START_KEY(next) : next) > 0)
- printk(KERN_ERR "Key skipped backwards\n");
+ pr_err("Key skipped backwards\n");
}
}
void bch_dump_bucket(struct btree_keys *b)
{
- unsigned i;
+ unsigned int i;
console_lock();
for (i = 0; i <= b->nsets; i++)
@@ -53,7 +53,7 @@ void bch_dump_bucket(struct btree_keys *b)
int __bch_count_data(struct btree_keys *b)
{
- unsigned ret = 0;
+ unsigned int ret = 0;
struct btree_iter iter;
struct bkey *k;
@@ -128,7 +128,7 @@ static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
/* Keylists */
-int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
+int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
{
size_t oldsize = bch_keylist_nkeys(l);
size_t newsize = oldsize + u64s;
@@ -180,7 +180,7 @@ void bch_keylist_pop_front(struct keylist *l)
/* Key/pointer manipulation */
void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
- unsigned i)
+ unsigned int i)
{
BUG_ON(i > KEY_PTRS(src));
@@ -194,7 +194,7 @@ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
bool __bch_cut_front(const struct bkey *where, struct bkey *k)
{
- unsigned i, len = 0;
+ unsigned int i, len = 0;
if (bkey_cmp(where, &START_KEY(k)) <= 0)
return false;
@@ -214,7 +214,7 @@ bool __bch_cut_front(const struct bkey *where, struct bkey *k)
bool __bch_cut_back(const struct bkey *where, struct bkey *k)
{
- unsigned len = 0;
+ unsigned int len = 0;
if (bkey_cmp(where, k) >= 0)
return false;
@@ -240,9 +240,9 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k)
#define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
struct bkey_float {
- unsigned exponent:BKEY_EXPONENT_BITS;
- unsigned m:BKEY_MID_BITS;
- unsigned mantissa:BKEY_MANTISSA_BITS;
+ unsigned int exponent:BKEY_EXPONENT_BITS;
+ unsigned int m:BKEY_MID_BITS;
+ unsigned int mantissa:BKEY_MANTISSA_BITS;
} __packed;
/*
@@ -311,7 +311,9 @@ void bch_btree_keys_free(struct btree_keys *b)
}
EXPORT_SYMBOL(bch_btree_keys_free);
-int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
+int bch_btree_keys_alloc(struct btree_keys *b,
+ unsigned int page_order,
+ gfp_t gfp)
{
struct bset_tree *t = b->set;
@@ -345,7 +347,7 @@ EXPORT_SYMBOL(bch_btree_keys_alloc);
void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
bool *expensive_debug_checks)
{
- unsigned i;
+ unsigned int i;
b->ops = ops;
b->expensive_debug_checks = expensive_debug_checks;
@@ -366,7 +368,11 @@ EXPORT_SYMBOL(bch_btree_keys_init);
/* Binary tree stuff for auxiliary search trees */
-static unsigned inorder_next(unsigned j, unsigned size)
+/*
+ * return array index next to j when does in-order traverse
+ * of a binary tree which is stored in a linear array
+ */
+static unsigned int inorder_next(unsigned int j, unsigned int size)
{
if (j * 2 + 1 < size) {
j = j * 2 + 1;
@@ -379,7 +385,11 @@ static unsigned inorder_next(unsigned j, unsigned size)
return j;
}
-static unsigned inorder_prev(unsigned j, unsigned size)
+/*
+ * return array index previous to j when does in-order traverse
+ * of a binary tree which is stored in a linear array
+ */
+static unsigned int inorder_prev(unsigned int j, unsigned int size)
{
if (j * 2 < size) {
j = j * 2;
@@ -392,7 +402,8 @@ static unsigned inorder_prev(unsigned j, unsigned size)
return j;
}
-/* I have no idea why this code works... and I'm the one who wrote it
+/*
+ * I have no idea why this code works... and I'm the one who wrote it
*
* However, I do know what it does:
* Given a binary tree constructed in an array (i.e. how you normally implement
@@ -405,10 +416,12 @@ static unsigned inorder_prev(unsigned j, unsigned size)
* extra is a function of size:
* extra = (size - rounddown_pow_of_two(size - 1)) << 1;
*/
-static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
+static unsigned int __to_inorder(unsigned int j,
+ unsigned int size,
+ unsigned int extra)
{
- unsigned b = fls(j);
- unsigned shift = fls(size - 1) - b;
+ unsigned int b = fls(j);
+ unsigned int shift = fls(size - 1) - b;
j ^= 1U << (b - 1);
j <<= 1;
@@ -421,14 +434,20 @@ static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
return j;
}
-static unsigned to_inorder(unsigned j, struct bset_tree *t)
+/*
+ * Return the cacheline index in bset_tree->data, where j is index
+ * from a linear array which stores the auxiliar binary tree
+ */
+static unsigned int to_inorder(unsigned int j, struct bset_tree *t)
{
return __to_inorder(j, t->size, t->extra);
}
-static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
+static unsigned int __inorder_to_tree(unsigned int j,
+ unsigned int size,
+ unsigned int extra)
{
- unsigned shift;
+ unsigned int shift;
if (j > extra)
j += j - extra;
@@ -441,7 +460,11 @@ static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
return j;
}
-static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
+/*
+ * Return an index from a linear array which stores the auxiliar binary
+ * tree, j is the cacheline index of t->data.
+ */
+static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t)
{
return __inorder_to_tree(j, t->size, t->extra);
}
@@ -452,14 +475,15 @@ void inorder_test(void)
unsigned long done = 0;
ktime_t start = ktime_get();
- for (unsigned size = 2;
+ for (unsigned int size = 2;
size < 65536000;
size++) {
- unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
- unsigned i = 1, j = rounddown_pow_of_two(size - 1);
+ unsigned int extra =
+ (size - rounddown_pow_of_two(size - 1)) << 1;
+ unsigned int i = 1, j = rounddown_pow_of_two(size - 1);
if (!(size % 4096))
- printk(KERN_NOTICE "loop %u, %llu per us\n", size,
+ pr_notice("loop %u, %llu per us\n", size,
done / ktime_us_delta(ktime_get(), start));
while (1) {
@@ -502,30 +526,31 @@ void inorder_test(void)
* of the previous key so we can walk backwards to it from t->tree[j]'s key.
*/
-static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
- unsigned offset)
+static struct bkey *cacheline_to_bkey(struct bset_tree *t,
+ unsigned int cacheline,
+ unsigned int offset)
{
return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
}
-static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
+static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
{
return ((void *) k - (void *) t->data) / BSET_CACHELINE;
}
-static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
- unsigned cacheline,
+static unsigned int bkey_to_cacheline_offset(struct bset_tree *t,
+ unsigned int cacheline,
struct bkey *k)
{
return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
}
-static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
+static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j)
{
return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
}
-static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
+static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j)
{
return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
}
@@ -534,7 +559,7 @@ static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
* For the write set - the one we're currently inserting keys into - we don't
* maintain a full search tree, we just keep a simple lookup table in t->prev.
*/
-static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
+static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline)
{
return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
}
@@ -546,14 +571,29 @@ static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
return low;
}
-static inline unsigned bfloat_mantissa(const struct bkey *k,
+/*
+ * Calculate mantissa value for struct bkey_float.
+ * If most significant bit of f->exponent is not set, then
+ * - f->exponent >> 6 is 0
+ * - p[0] points to bkey->low
+ * - p[-1] borrows bits from KEY_INODE() of bkey->high
+ * if most isgnificant bits of f->exponent is set, then
+ * - f->exponent >> 6 is 1
+ * - p[0] points to bits from KEY_INODE() of bkey->high
+ * - p[-1] points to other bits from KEY_INODE() of
+ * bkey->high too.
+ * See make_bfloat() to check when most significant bit of f->exponent
+ * is set or not.
+ */
+static inline unsigned int bfloat_mantissa(const struct bkey *k,
struct bkey_float *f)
{
const uint64_t *p = &k->low - (f->exponent >> 6);
+
return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
}
-static void make_bfloat(struct bset_tree *t, unsigned j)
+static void make_bfloat(struct bset_tree *t, unsigned int j)
{
struct bkey_float *f = &t->tree[j];
struct bkey *m = tree_to_bkey(t, j);
@@ -570,6 +610,16 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
BUG_ON(m < l || m > r);
BUG_ON(bkey_next(p) != m);
+ /*
+ * If l and r have different KEY_INODE values (different backing
+ * device), f->exponent records how many least significant bits
+ * are different in KEY_INODE values and sets most significant
+ * bits to 1 (by +64).
+ * If l and r have same KEY_INODE value, f->exponent records
+ * how many different bits in least significant bits of bkey->low.
+ * See bfloat_mantiss() how the most significant bit of
+ * f->exponent is used to calculate bfloat mantissa value.
+ */
if (KEY_INODE(l) != KEY_INODE(r))
f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
else
@@ -591,7 +641,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
{
if (t != b->set) {
- unsigned j = roundup(t[-1].size,
+ unsigned int j = roundup(t[-1].size,
64 / sizeof(struct bkey_float));
t->tree = t[-1].tree + j;
@@ -633,17 +683,26 @@ void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
}
EXPORT_SYMBOL(bch_bset_init_next);
+/*
+ * Build auxiliary binary tree 'struct bset_tree *t', this tree is used to
+ * accelerate bkey search in a btree node (pointed by bset_tree->data in
+ * memory). After search in the auxiliar tree by calling bset_search_tree(),
+ * a struct bset_search_iter is returned which indicates range [l, r] from
+ * bset_tree->data where the searching bkey might be inside. Then a followed
+ * linear comparison does the exact search, see __bch_bset_search() for how
+ * the auxiliary tree is used.
+ */
void bch_bset_build_written_tree(struct btree_keys *b)
{
struct bset_tree *t = bset_tree_last(b);
struct bkey *prev = NULL, *k = t->data->start;
- unsigned j, cacheline = 1;
+ unsigned int j, cacheline = 1;
b->last_set_unwritten = 0;
bset_alloc_tree(b, t);
- t->size = min_t(unsigned,
+ t->size = min_t(unsigned int,
bkey_to_cacheline(t, bset_bkey_last(t->data)),
b->set->tree + btree_keys_cachelines(b) - t->tree);
@@ -683,7 +742,7 @@ EXPORT_SYMBOL(bch_bset_build_written_tree);
void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
{
struct bset_tree *t;
- unsigned inorder, j = 1;
+ unsigned int inorder, j = 1;
for (t = b->set; t <= bset_tree_last(b); t++)
if (k < bset_bkey_last(t->data))
@@ -730,14 +789,15 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
struct bset_tree *t,
struct bkey *k)
{
- unsigned shift = bkey_u64s(k);
- unsigned j = bkey_to_cacheline(t, k);
+ unsigned int shift = bkey_u64s(k);
+ unsigned int j = bkey_to_cacheline(t, k);
/* We're getting called from btree_split() or btree_gc, just bail out */
if (!t->size)
return;
- /* k is the key we just inserted; we need to find the entry in the
+ /*
+ * k is the key we just inserted; we need to find the entry in the
* lookup table for the first key that is strictly greater than k:
* it's either k's cacheline or the next one
*/
@@ -745,7 +805,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
table_to_bkey(t, j) <= k)
j++;
- /* Adjust all the lookup table entries, and find a new key for any that
+ /*
+ * Adjust all the lookup table entries, and find a new key for any that
* have gotten too big
*/
for (; j < t->size; j++) {
@@ -770,7 +831,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
k != bset_bkey_last(t->data);
k = bkey_next(k))
if (t->size == bkey_to_cacheline(t, k)) {
- t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
+ t->prev[t->size] =
+ bkey_to_cacheline_offset(t, t->size, k);
t->size++;
}
}
@@ -818,10 +880,10 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where,
}
EXPORT_SYMBOL(bch_bset_insert);
-unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bkey *replace_key)
{
- unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
+ unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
struct btree_iter iter;
@@ -873,10 +935,10 @@ struct bset_search_iter {
static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
const struct bkey *search)
{
- unsigned li = 0, ri = t->size;
+ unsigned int li = 0, ri = t->size;
while (li + 1 != ri) {
- unsigned m = (li + ri) >> 1;
+ unsigned int m = (li + ri) >> 1;
if (bkey_cmp(table_to_bkey(t, m), search) > 0)
ri = m;
@@ -895,10 +957,22 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
{
struct bkey *l, *r;
struct bkey_float *f;
- unsigned inorder, j, n = 1;
+ unsigned int inorder, j, n = 1;
do {
- unsigned p = n << 4;
+ /*
+ * A bit trick here.
+ * If p < t->size, (int)(p - t->size) is a minus value and
+ * the most significant bit is set, right shifting 31 bits
+ * gets 1. If p >= t->size, the most significant bit is
+ * not set, right shifting 31 bits gets 0.
+ * So the following 2 lines equals to
+ * if (p >= t->size)
+ * p = 0;
+ * but a branch instruction is avoided.
+ */
+ unsigned int p = n << 4;
+
p &= ((int) (p - t->size)) >> 31;
prefetch(&t->tree[p]);
@@ -907,6 +981,9 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
f = &t->tree[j];
/*
+ * Similar bit trick, use subtract operation to avoid a branch
+ * instruction.
+ *
* n = (f->mantissa > bfloat_mantissa())
* ? j * 2
* : j * 2 + 1;
@@ -915,7 +992,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
* to work - that's done in make_bfloat()
*/
if (likely(f->exponent != 127))
- n = j * 2 + (((unsigned)
+ n = j * 2 + (((unsigned int)
(f->mantissa -
bfloat_mantissa(search, f))) >> 31);
else
@@ -1046,6 +1123,7 @@ static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
struct bset_tree *start)
{
struct bkey *ret = NULL;
+
iter->size = ARRAY_SIZE(iter->data);
iter->used = 0;
@@ -1121,7 +1199,8 @@ void bch_bset_sort_state_free(struct bset_sort_state *state)
mempool_exit(&state->pool);
}
-int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
+int bch_bset_sort_state_init(struct bset_sort_state *state,
+ unsigned int page_order)
{
spin_lock_init(&state->time.lock);
@@ -1174,7 +1253,7 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
}
static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
- unsigned start, unsigned order, bool fixup,
+ unsigned int start, unsigned int order, bool fixup,
struct bset_sort_state *state)
{
uint64_t start_time;
@@ -1225,7 +1304,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
bch_time_stats_update(&state->time, start_time);
}
-void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
+void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
struct bset_sort_state *state)
{
size_t order = b->page_order, keys = 0;
@@ -1235,7 +1314,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
if (start) {
- unsigned i;
+ unsigned int i;
for (i = start; i <= b->nsets; i++)
keys += b->set[i].data->keys;
@@ -1260,8 +1339,8 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
struct bset_sort_state *state)
{
uint64_t start_time = local_clock();
-
struct btree_iter iter;
+
bch_btree_iter_init(b, &iter, NULL);
btree_mergesort(b, new->set->data, &iter, false, true);
@@ -1275,7 +1354,7 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
{
- unsigned crit = SORT_CRIT;
+ unsigned int crit = SORT_CRIT;
int i;
/* Don't sort if nothing to do */
@@ -1304,7 +1383,7 @@ EXPORT_SYMBOL(bch_btree_sort_lazy);
void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i <= b->nsets; i++) {
struct bset_tree *t = &b->set[i];
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index b867f2200495..bac76aabca6d 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -163,10 +163,10 @@ struct bset_tree {
*/
/* size of the binary tree and prev array */
- unsigned size;
+ unsigned int size;
/* function of size - precalculated for to_inorder() */
- unsigned extra;
+ unsigned int extra;
/* copy of the last key in the set */
struct bkey end;
@@ -187,18 +187,25 @@ struct bset_tree {
};
struct btree_keys_ops {
- bool (*sort_cmp)(struct btree_iter_set,
- struct btree_iter_set);
- struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *);
- bool (*insert_fixup)(struct btree_keys *, struct bkey *,
- struct btree_iter *, struct bkey *);
- bool (*key_invalid)(struct btree_keys *,
- const struct bkey *);
- bool (*key_bad)(struct btree_keys *, const struct bkey *);
- bool (*key_merge)(struct btree_keys *,
- struct bkey *, struct bkey *);
- void (*key_to_text)(char *, size_t, const struct bkey *);
- void (*key_dump)(struct btree_keys *, const struct bkey *);
+ bool (*sort_cmp)(struct btree_iter_set l,
+ struct btree_iter_set r);
+ struct bkey *(*sort_fixup)(struct btree_iter *iter,
+ struct bkey *tmp);
+ bool (*insert_fixup)(struct btree_keys *b,
+ struct bkey *insert,
+ struct btree_iter *iter,
+ struct bkey *replace_key);
+ bool (*key_invalid)(struct btree_keys *bk,
+ const struct bkey *k);
+ bool (*key_bad)(struct btree_keys *bk,
+ const struct bkey *k);
+ bool (*key_merge)(struct btree_keys *bk,
+ struct bkey *l, struct bkey *r);
+ void (*key_to_text)(char *buf,
+ size_t size,
+ const struct bkey *k);
+ void (*key_dump)(struct btree_keys *keys,
+ const struct bkey *k);
/*
* Only used for deciding whether to use START_KEY(k) or just the key
@@ -211,7 +218,7 @@ struct btree_keys {
const struct btree_keys_ops *ops;
uint8_t page_order;
uint8_t nsets;
- unsigned last_set_unwritten:1;
+ unsigned int last_set_unwritten:1;
bool *expensive_debug_checks;
/*
@@ -239,12 +246,14 @@ static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
}
-static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i)
+static inline unsigned int bset_byte_offset(struct btree_keys *b,
+ struct bset *i)
{
return ((size_t) i) - ((size_t) b->set->data);
}
-static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)
+static inline unsigned int bset_sector_offset(struct btree_keys *b,
+ struct bset *i)
{
return bset_byte_offset(b, i) >> 9;
}
@@ -273,25 +282,27 @@ static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
}
static inline struct bset *bset_next_set(struct btree_keys *b,
- unsigned block_bytes)
+ unsigned int block_bytes)
{
struct bset *i = bset_tree_last(b)->data;
return ((void *) i) + roundup(set_bytes(i), block_bytes);
}
-void bch_btree_keys_free(struct btree_keys *);
-int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t);
-void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
- bool *);
-
-void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
-void bch_bset_build_written_tree(struct btree_keys *);
-void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
-bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *);
-void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
-unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
- struct bkey *);
+void bch_btree_keys_free(struct btree_keys *b);
+int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order,
+ gfp_t gfp);
+void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
+ bool *expensive_debug_checks);
+
+void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic);
+void bch_bset_build_written_tree(struct btree_keys *b);
+void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k);
+bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r);
+void bch_bset_insert(struct btree_keys *b, struct bkey *where,
+ struct bkey *insert);
+unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ struct bkey *replace_key);
enum {
BTREE_INSERT_STATUS_NO_INSERT = 0,
@@ -313,18 +324,21 @@ struct btree_iter {
} data[MAX_BSETS];
};
-typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *);
+typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
-struct bkey *bch_btree_iter_next(struct btree_iter *);
-struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
- struct btree_keys *, ptr_filter_fn);
+struct bkey *bch_btree_iter_next(struct btree_iter *iter);
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
+ struct btree_keys *b,
+ ptr_filter_fn fn);
-void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
-struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *,
- struct bkey *);
+void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+ struct bkey *end);
+struct bkey *bch_btree_iter_init(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bkey *search);
-struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *,
- const struct bkey *);
+struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
+ const struct bkey *search);
/*
* Returns the first key that is strictly greater than search
@@ -349,21 +363,23 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
struct bset_sort_state {
mempool_t pool;
- unsigned page_order;
- unsigned crit_factor;
+ unsigned int page_order;
+ unsigned int crit_factor;
struct time_stats time;
};
-void bch_bset_sort_state_free(struct bset_sort_state *);
-int bch_bset_sort_state_init(struct bset_sort_state *, unsigned);
-void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *);
-void bch_btree_sort_into(struct btree_keys *, struct btree_keys *,
- struct bset_sort_state *);
-void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
- struct bset_sort_state *);
-void bch_btree_sort_partial(struct btree_keys *, unsigned,
- struct bset_sort_state *);
+void bch_bset_sort_state_free(struct bset_sort_state *state);
+int bch_bset_sort_state_init(struct bset_sort_state *state,
+ unsigned int page_order);
+void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state);
+void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+ struct bset_sort_state *state);
+void bch_btree_sort_and_fix_extents(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bset_sort_state *state);
+void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ struct bset_sort_state *state);
static inline void bch_btree_sort(struct btree_keys *b,
struct bset_sort_state *state)
@@ -377,13 +393,13 @@ struct bset_stats {
size_t floats, failed;
};
-void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
+void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
/* Bkey utility code */
#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
-static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
+static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
{
return bkey_idx(i->start, idx);
}
@@ -401,10 +417,10 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
}
-void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
- unsigned);
-bool __bch_cut_front(const struct bkey *, struct bkey *);
-bool __bch_cut_back(const struct bkey *, struct bkey *);
+void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
+ unsigned int i);
+bool __bch_cut_front(const struct bkey *where, struct bkey *k);
+bool __bch_cut_back(const struct bkey *where, struct bkey *k);
static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
{
@@ -522,18 +538,20 @@ static inline size_t bch_keylist_bytes(struct keylist *l)
return bch_keylist_nkeys(l) * sizeof(uint64_t);
}
-struct bkey *bch_keylist_pop(struct keylist *);
-void bch_keylist_pop_front(struct keylist *);
-int __bch_keylist_realloc(struct keylist *, unsigned);
+struct bkey *bch_keylist_pop(struct keylist *l);
+void bch_keylist_pop_front(struct keylist *l);
+int __bch_keylist_realloc(struct keylist *l, unsigned int u64s);
/* Debug stuff */
#ifdef CONFIG_BCACHE_DEBUG
-int __bch_count_data(struct btree_keys *);
-void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...);
-void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
-void bch_dump_bucket(struct btree_keys *);
+int __bch_count_data(struct btree_keys *b);
+void __printf(2, 3) __bch_check_keys(struct btree_keys *b,
+ const char *fmt,
+ ...);
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
+void bch_dump_bucket(struct btree_keys *b);
#else
@@ -541,7 +559,7 @@ static inline int __bch_count_data(struct btree_keys *b) { return -1; }
static inline void __printf(2, 3)
__bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
static inline void bch_dump_bucket(struct btree_keys *b) {}
-void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
#endif
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 547c9eedc2f4..e7d4817681f2 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -90,6 +90,9 @@
#define MAX_NEED_GC 64
#define MAX_SAVE_PRIO 72
+#define MAX_GC_TIMES 100
+#define MIN_GC_NODES 100
+#define GC_SLEEP_MS 100
#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
@@ -180,7 +183,7 @@ static void bch_btree_init_next(struct btree *b)
void bkey_put(struct cache_set *c, struct bkey *k)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i))
@@ -284,6 +287,7 @@ err:
static void btree_node_read_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
+
closure_put(cl);
}
@@ -432,7 +436,10 @@ static void do_btree_node_write(struct btree *b)
continue_at(cl, btree_node_write_done, NULL);
} else {
- /* No problem for multipage bvec since the bio is just allocated */
+ /*
+ * No problem for multipage bvec since the bio is
+ * just allocated
+ */
b->bio->bi_vcnt = 0;
bch_bio_map(b->bio, i);
@@ -476,7 +483,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
void bch_btree_node_write(struct btree *b, struct closure *parent)
{
- unsigned nsets = b->keys.nsets;
+ unsigned int nsets = b->keys.nsets;
lockdep_assert_held(&b->lock);
@@ -578,7 +585,7 @@ static void mca_bucket_free(struct btree *b)
list_move(&b->list, &b->c->btree_cache_freeable);
}
-static unsigned btree_order(struct bkey *k)
+static unsigned int btree_order(struct bkey *k)
{
return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
}
@@ -586,7 +593,7 @@ static unsigned btree_order(struct bkey *k)
static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
{
if (!bch_btree_keys_alloc(&b->keys,
- max_t(unsigned,
+ max_t(unsigned int,
ilog2(b->c->btree_pages),
btree_order(k)),
gfp)) {
@@ -601,6 +608,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
struct bkey *k, gfp_t gfp)
{
struct btree *b = kzalloc(sizeof(struct btree), gfp);
+
if (!b)
return NULL;
@@ -617,7 +625,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
return b;
}
-static int mca_reap(struct btree *b, unsigned min_order, bool flush)
+static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
{
struct closure cl;
@@ -743,6 +751,7 @@ void bch_btree_cache_free(struct cache_set *c)
{
struct btree *b;
struct closure cl;
+
closure_init_stack(&cl);
if (c->shrink.list.next)
@@ -783,7 +792,7 @@ void bch_btree_cache_free(struct cache_set *c)
int bch_btree_cache_alloc(struct cache_set *c)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < mca_reserve(c); i++)
if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
@@ -1008,6 +1017,13 @@ retry:
BUG_ON(b->level != level);
}
+ if (btree_node_io_error(b)) {
+ rw_unlock(write, b);
+ return ERR_PTR(-EIO);
+ }
+
+ BUG_ON(!b->written);
+
b->parent = parent;
b->accessed = 1;
@@ -1019,13 +1035,6 @@ retry:
for (; i <= b->keys.nsets; i++)
prefetch(b->keys.set[i].data);
- if (btree_node_io_error(b)) {
- rw_unlock(write, b);
- return ERR_PTR(-EIO);
- }
-
- BUG_ON(!b->written);
-
return b;
}
@@ -1121,6 +1130,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
struct btree_op *op)
{
struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
+
if (!IS_ERR_OR_NULL(n)) {
mutex_lock(&n->write_lock);
bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
@@ -1133,7 +1143,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
static void make_btree_freeing_key(struct btree *b, struct bkey *k)
{
- unsigned i;
+ unsigned int i;
mutex_lock(&b->c->bucket_lock);
@@ -1154,7 +1164,7 @@ static int btree_check_reserve(struct btree *b, struct btree_op *op)
{
struct cache_set *c = b->c;
struct cache *ca;
- unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
+ unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
mutex_lock(&c->bucket_lock);
@@ -1178,7 +1188,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
struct bkey *k)
{
uint8_t stale = 0;
- unsigned i;
+ unsigned int i;
struct bucket *g;
/*
@@ -1216,7 +1226,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
/* guard against overflow */
- SET_GC_SECTORS_USED(g, min_t(unsigned,
+ SET_GC_SECTORS_USED(g, min_t(unsigned int,
GC_SECTORS_USED(g) + KEY_SIZE(k),
MAX_GC_SECTORS_USED));
@@ -1230,7 +1240,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i) &&
@@ -1256,7 +1266,7 @@ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{
uint8_t stale = 0;
- unsigned keys = 0, good_keys = 0;
+ unsigned int keys = 0, good_keys = 0;
struct bkey *k;
struct btree_iter iter;
struct bset_tree *t;
@@ -1299,16 +1309,18 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
struct gc_merge_info {
struct btree *b;
- unsigned keys;
+ unsigned int keys;
};
-static int bch_btree_insert_node(struct btree *, struct btree_op *,
- struct keylist *, atomic_t *, struct bkey *);
+static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ atomic_t *journal_ref,
+ struct bkey *replace_key);
static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
struct gc_stat *gc, struct gc_merge_info *r)
{
- unsigned i, nodes = 0, keys = 0, blocks;
+ unsigned int i, nodes = 0, keys = 0, blocks;
struct btree *new_nodes[GC_MERGE_NODES];
struct keylist keylist;
struct closure cl;
@@ -1508,11 +1520,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
return -EINTR;
}
-static unsigned btree_gc_count_keys(struct btree *b)
+static unsigned int btree_gc_count_keys(struct btree *b)
{
struct bkey *k;
struct btree_iter iter;
- unsigned ret = 0;
+ unsigned int ret = 0;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
ret += bkey_u64s(k);
@@ -1520,6 +1532,32 @@ static unsigned btree_gc_count_keys(struct btree *b)
return ret;
}
+static size_t btree_gc_min_nodes(struct cache_set *c)
+{
+ size_t min_nodes;
+
+ /*
+ * Since incremental GC would stop 100ms when front
+ * side I/O comes, so when there are many btree nodes,
+ * if GC only processes constant (100) nodes each time,
+ * GC would last a long time, and the front side I/Os
+ * would run out of the buckets (since no new bucket
+ * can be allocated during GC), and be blocked again.
+ * So GC should not process constant nodes, but varied
+ * nodes according to the number of btree nodes, which
+ * realized by dividing GC into constant(100) times,
+ * so when there are many btree nodes, GC can process
+ * more nodes each time, otherwise, GC will process less
+ * nodes each time (but no less than MIN_GC_NODES)
+ */
+ min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
+ if (min_nodes < MIN_GC_NODES)
+ min_nodes = MIN_GC_NODES;
+
+ return min_nodes;
+}
+
+
static int btree_gc_recurse(struct btree *b, struct btree_op *op,
struct closure *writes, struct gc_stat *gc)
{
@@ -1585,6 +1623,13 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
r->b = NULL;
+ if (atomic_read(&b->c->search_inflight) &&
+ gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
+ gc->nodes_pre = gc->nodes;
+ ret = -EAGAIN;
+ break;
+ }
+
if (need_resched()) {
ret = -EAGAIN;
break;
@@ -1642,7 +1687,7 @@ static void btree_gc_start(struct cache_set *c)
{
struct cache *ca;
struct bucket *b;
- unsigned i;
+ unsigned int i;
if (!c->gc_mark_valid)
return;
@@ -1668,7 +1713,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
{
struct bucket *b;
struct cache *ca;
- unsigned i;
+ unsigned int i;
mutex_lock(&c->bucket_lock);
@@ -1686,7 +1731,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
struct bcache_device *d = c->devices[i];
struct cached_dev *dc;
struct keybuf_key *w, *n;
- unsigned j;
+ unsigned int j;
if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
continue;
@@ -1753,7 +1798,10 @@ static void bch_btree_gc(struct cache_set *c)
closure_sync(&writes);
cond_resched();
- if (ret && ret != -EAGAIN)
+ if (ret == -EAGAIN)
+ schedule_timeout_interruptible(msecs_to_jiffies
+ (GC_SLEEP_MS));
+ else if (ret)
pr_warn("gc failed!");
} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
@@ -1775,7 +1823,7 @@ static void bch_btree_gc(struct cache_set *c)
static bool gc_should_run(struct cache_set *c)
{
struct cache *ca;
- unsigned i;
+ unsigned int i;
for_each_cache(ca, c, i)
if (ca->invalidate_needs_gc)
@@ -1834,8 +1882,14 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
do {
k = bch_btree_iter_next_filter(&iter, &b->keys,
bch_ptr_bad);
- if (k)
+ if (k) {
btree_node_prefetch(b, k);
+ /*
+ * initiallize c->gc_stats.nodes
+ * for incremental GC
+ */
+ b->c->gc_stats.nodes++;
+ }
if (p)
ret = btree(check_recurse, p, b, op);
@@ -1860,7 +1914,7 @@ void bch_initial_gc_finish(struct cache_set *c)
{
struct cache *ca;
struct bucket *b;
- unsigned i;
+ unsigned int i;
bch_btree_gc_finish(c);
@@ -1900,7 +1954,7 @@ void bch_initial_gc_finish(struct cache_set *c)
static bool btree_insert_key(struct btree *b, struct bkey *k,
struct bkey *replace_key)
{
- unsigned status;
+ unsigned int status;
BUG_ON(bkey_cmp(k, &b->key) > 0);
@@ -1999,7 +2053,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
if (split) {
- unsigned keys = 0;
+ unsigned int keys = 0;
trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
@@ -2177,10 +2231,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
rw_lock(true, b, b->level);
if (b->key.ptr[0] != btree_ptr ||
- b->seq != seq + 1) {
+ b->seq != seq + 1) {
op->lock = b->level;
goto out;
- }
+ }
}
SET_KEY_PTRS(check_key, 1);
@@ -2255,7 +2309,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys,
void bch_btree_set_root(struct btree *b)
{
- unsigned i;
+ unsigned int i;
struct closure cl;
closure_init_stack(&cl);
@@ -2367,7 +2421,7 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
struct refill {
struct btree_op op;
- unsigned nr_found;
+ unsigned int nr_found;
struct keybuf *buf;
struct bkey *end;
keybuf_pred_fn *pred;
@@ -2443,6 +2497,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
if (!RB_EMPTY_ROOT(&buf->keys)) {
struct keybuf_key *w;
+
w = RB_FIRST(&buf->keys, struct keybuf_key, node);
buf->start = START_KEY(&w->key);
@@ -2474,6 +2529,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
{
bool ret = false;
struct keybuf_key *p, *w, s;
+
s.key = *start;
if (bkey_cmp(end, &buf->start) <= 0 ||
@@ -2500,6 +2556,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
{
struct keybuf_key *w;
+
spin_lock(&buf->lock);
w = RB_FIRST(&buf->keys, struct keybuf_key, node);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index d211e2c25b6b..a68d6c55783b 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -152,7 +152,7 @@ static inline bool btree_node_ ## flag(struct btree *b) \
{ return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
\
static inline void set_btree_node_ ## flag(struct btree *b) \
-{ set_bit(BTREE_NODE_ ## flag, &b->flags); } \
+{ set_bit(BTREE_NODE_ ## flag, &b->flags); }
enum btree_flags {
BTREE_NODE_io_error,
@@ -184,7 +184,7 @@ static inline struct bset *btree_bset_last(struct btree *b)
return bset_tree_last(&b->keys)->data;
}
-static inline unsigned bset_block_offset(struct btree *b, struct bset *i)
+static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
{
return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
}
@@ -213,7 +213,7 @@ struct btree_op {
/* Btree level at which we start taking write locks */
short lock;
- unsigned insert_collision:1;
+ unsigned int insert_collision:1;
};
static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
@@ -238,26 +238,28 @@ static inline void rw_unlock(bool w, struct btree *b)
(w ? up_write : up_read)(&b->lock);
}
-void bch_btree_node_read_done(struct btree *);
-void __bch_btree_node_write(struct btree *, struct closure *);
-void bch_btree_node_write(struct btree *, struct closure *);
-
-void bch_btree_set_root(struct btree *);
-struct btree *__bch_btree_node_alloc(struct cache_set *, struct btree_op *,
- int, bool, struct btree *);
-struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *,
- struct bkey *, int, bool, struct btree *);
-
-int bch_btree_insert_check_key(struct btree *, struct btree_op *,
- struct bkey *);
-int bch_btree_insert(struct cache_set *, struct keylist *,
- atomic_t *, struct bkey *);
-
-int bch_gc_thread_start(struct cache_set *);
-void bch_initial_gc_finish(struct cache_set *);
-void bch_moving_gc(struct cache_set *);
-int bch_btree_check(struct cache_set *);
-void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
+void bch_btree_node_read_done(struct btree *b);
+void __bch_btree_node_write(struct btree *b, struct closure *parent);
+void bch_btree_node_write(struct btree *b, struct closure *parent);
+
+void bch_btree_set_root(struct btree *b);
+struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+ int level, bool wait,
+ struct btree *parent);
+struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
+ struct bkey *k, int level, bool write,
+ struct btree *parent);
+
+int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+ struct bkey *check_key);
+int bch_btree_insert(struct cache_set *c, struct keylist *keys,
+ atomic_t *journal_ref, struct bkey *replace_key);
+
+int bch_gc_thread_start(struct cache_set *c);
+void bch_initial_gc_finish(struct cache_set *c);
+void bch_moving_gc(struct cache_set *c);
+int bch_btree_check(struct cache_set *c);
+void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
static inline void wake_up_gc(struct cache_set *c)
{
@@ -272,9 +274,9 @@ static inline void wake_up_gc(struct cache_set *c)
#define MAP_END_KEY 1
-typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
-int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
- struct bkey *, btree_map_nodes_fn *, int);
+typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b);
+int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_nodes_fn *fn, int flags);
static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_nodes_fn *fn)
@@ -290,21 +292,23 @@ static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
}
-typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
- struct bkey *);
-int bch_btree_map_keys(struct btree_op *, struct cache_set *,
- struct bkey *, btree_map_keys_fn *, int);
-
-typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
-
-void bch_keybuf_init(struct keybuf *);
-void bch_refill_keybuf(struct cache_set *, struct keybuf *,
- struct bkey *, keybuf_pred_fn *);
-bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
- struct bkey *);
-void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
-struct keybuf_key *bch_keybuf_next(struct keybuf *);
-struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
- struct bkey *, keybuf_pred_fn *);
+typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
+ struct bkey *k);
+int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_keys_fn *fn, int flags);
+
+typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
+
+void bch_keybuf_init(struct keybuf *buf);
+void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
+ struct bkey *end, keybuf_pred_fn *pred);
+bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
+ struct bkey *end);
+void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w);
+struct keybuf_key *bch_keybuf_next(struct keybuf *buf);
+struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
+ struct keybuf *buf,
+ struct bkey *end,
+ keybuf_pred_fn *pred);
void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
#endif
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 0e14969182c6..73f5319295bc 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Asynchronous refcounty things
*
@@ -162,12 +163,13 @@ static struct dentry *closure_debug;
static int debug_seq_show(struct seq_file *f, void *data)
{
struct closure *cl;
+
spin_lock_irq(&closure_list_lock);
list_for_each_entry(cl, &closure_list, all) {
int r = atomic_read(&cl->remaining);
- seq_printf(f, "%p: %pF -> %pf p %p r %i ",
+ seq_printf(f, "%p: %pS -> %pS p %p r %i ",
cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK);
@@ -177,7 +179,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
r & CLOSURE_RUNNING ? "R" : "");
if (r & CLOSURE_WAITING)
- seq_printf(f, " W %pF\n",
+ seq_printf(f, " W %pS\n",
(void *) cl->waiting_on);
seq_printf(f, "\n");
@@ -199,11 +201,16 @@ static const struct file_operations debug_ops = {
.release = single_release
};
-int __init closure_debug_init(void)
+void __init closure_debug_init(void)
{
- closure_debug = debugfs_create_file("closures",
- 0400, bcache_debug, NULL, &debug_ops);
- return IS_ERR_OR_NULL(closure_debug);
+ if (!IS_ERR_OR_NULL(bcache_debug))
+ /*
+ * it is unnecessary to check return value of
+ * debugfs_create_file(), we should not care
+ * about this.
+ */
+ closure_debug = debugfs_create_file(
+ "closures", 0400, bcache_debug, NULL, &debug_ops);
}
#endif
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 71427eb5fdae..eca0d496b686 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -159,7 +159,7 @@ struct closure {
#define CLOSURE_MAGIC_DEAD 0xc054dead
#define CLOSURE_MAGIC_ALIVE 0xc054a11e
- unsigned magic;
+ unsigned int magic;
struct list_head all;
unsigned long ip;
unsigned long waiting_on;
@@ -186,13 +186,13 @@ static inline void closure_sync(struct closure *cl)
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
-int closure_debug_init(void);
+void closure_debug_init(void);
void closure_debug_create(struct closure *cl);
void closure_debug_destroy(struct closure *cl);
#else
-static inline int closure_debug_init(void) { return 0; }
+static inline void closure_debug_init(void) {}
static inline void closure_debug_create(struct closure *cl) {}
static inline void closure_debug_destroy(struct closure *cl) {}
@@ -289,10 +289,12 @@ static inline void closure_init_stack(struct closure *cl)
}
/**
- * closure_wake_up - wake up all closures on a wait list.
+ * closure_wake_up - wake up all closures on a wait list,
+ * with memory barrier
*/
static inline void closure_wake_up(struct closure_waitlist *list)
{
+ /* Memory barrier for the wait list */
smp_mb();
__closure_wake_up(list);
}
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index d030ce3025a6..06da66b2488a 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -67,34 +67,35 @@ void bch_btree_verify(struct btree *b)
if (inmemory->keys != sorted->keys ||
memcmp(inmemory->start,
sorted->start,
- (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
+ (void *) bset_bkey_last(inmemory) -
+ (void *) inmemory->start)) {
struct bset *i;
- unsigned j;
+ unsigned int j;
console_lock();
- printk(KERN_ERR "*** in memory:\n");
+ pr_err("*** in memory:\n");
bch_dump_bset(&b->keys, inmemory, 0);
- printk(KERN_ERR "*** read back in:\n");
+ pr_err("*** read back in:\n");
bch_dump_bset(&v->keys, sorted, 0);
for_each_written_bset(b, ondisk, i) {
- unsigned block = ((void *) i - (void *) ondisk) /
+ unsigned int block = ((void *) i - (void *) ondisk) /
block_bytes(b->c);
- printk(KERN_ERR "*** on disk block %u:\n", block);
+ pr_err("*** on disk block %u:\n", block);
bch_dump_bset(&b->keys, i, block);
}
- printk(KERN_ERR "*** block %zu not written\n",
+ pr_err("*** block %zu not written\n",
((void *) i - (void *) ondisk) / block_bytes(b->c));
for (j = 0; j < inmemory->keys; j++)
if (inmemory->d[j] != sorted->d[j])
break;
- printk(KERN_ERR "b->written %u\n", b->written);
+ pr_err("b->written %u\n", b->written);
console_unlock();
panic("verify failed at %u\n", j);
@@ -110,11 +111,15 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
struct bio_vec bv, cbv;
struct bvec_iter iter, citer = { 0 };
- check = bio_clone_kmalloc(bio, GFP_NOIO);
+ check = bio_kmalloc(GFP_NOIO, bio_segments(bio));
if (!check)
return;
+ check->bi_disk = bio->bi_disk;
check->bi_opf = REQ_OP_READ;
+ check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
+ check->bi_iter.bi_size = bio->bi_iter.bi_size;
+ bch_bio_map(check, NULL);
if (bch_bio_alloc_pages(check, GFP_NOIO))
goto out_put;
@@ -172,9 +177,9 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
while (size) {
struct keybuf_key *w;
- unsigned bytes = min(i->bytes, size);
-
+ unsigned int bytes = min(i->bytes, size);
int err = copy_to_user(buf, i->buf, bytes);
+
if (err)
return err;
@@ -233,8 +238,8 @@ void bch_debug_init_cache_set(struct cache_set *c)
{
if (!IS_ERR_OR_NULL(bcache_debug)) {
char name[50];
- snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
+ snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
&cache_set_debug_ops);
}
@@ -248,11 +253,12 @@ void bch_debug_exit(void)
debugfs_remove_recursive(bcache_debug);
}
-int __init bch_debug_init(struct kobject *kobj)
+void __init bch_debug_init(struct kobject *kobj)
{
- if (!IS_ENABLED(CONFIG_DEBUG_FS))
- return 0;
-
+ /*
+ * it is unnecessary to check return value of
+ * debugfs_create_file(), we should not care
+ * about this.
+ */
bcache_debug = debugfs_create_dir("bcache", NULL);
- return IS_ERR_OR_NULL(bcache_debug);
}
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index acc48d3fa274..fb3d4dff4b26 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -8,8 +8,8 @@ struct cache_set;
#ifdef CONFIG_BCACHE_DEBUG
-void bch_btree_verify(struct btree *);
-void bch_data_verify(struct cached_dev *, struct bio *);
+void bch_btree_verify(struct btree *b);
+void bch_data_verify(struct cached_dev *dc, struct bio *bio);
#define expensive_debug_checks(c) ((c)->expensive_debug_checks)
#define key_merging_disabled(c) ((c)->key_merging_disabled)
@@ -27,7 +27,7 @@ static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
#endif
#ifdef CONFIG_DEBUG_FS
-void bch_debug_init_cache_set(struct cache_set *);
+void bch_debug_init_cache_set(struct cache_set *c);
#else
static inline void bch_debug_init_cache_set(struct cache_set *c) {}
#endif
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 1d096742eb41..c809724e6571 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -46,7 +46,7 @@ static bool bch_key_sort_cmp(struct btree_iter_set l,
static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) {
@@ -67,7 +67,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) {
@@ -96,7 +96,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
{
- unsigned i = 0;
+ unsigned int i = 0;
char *out = buf, *end = buf + size;
#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
@@ -126,22 +126,22 @@ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
{
struct btree *b = container_of(keys, struct btree, keys);
- unsigned j;
+ unsigned int j;
char buf[80];
bch_extent_to_text(buf, sizeof(buf), k);
- printk(" %s", buf);
+ pr_err(" %s", buf);
for (j = 0; j < KEY_PTRS(k); j++) {
size_t n = PTR_BUCKET_NR(b->c, k, j);
- printk(" bucket %zu", n);
+ pr_err(" bucket %zu", n);
if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
- printk(" prio %i",
+ pr_err(" prio %i",
PTR_BUCKET(b->c, k, j)->prio);
}
- printk(" %s\n", bch_ptr_status(b->c, k));
+ pr_err(" %s\n", bch_ptr_status(b->c, k));
}
/* Btree ptrs */
@@ -166,12 +166,13 @@ bad:
static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
{
struct btree *b = container_of(bk, struct btree, keys);
+
return __bch_btree_ptr_invalid(b->c, k);
}
static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
{
- unsigned i;
+ unsigned int i;
char buf[80];
struct bucket *g;
@@ -204,7 +205,7 @@ err:
static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
{
struct btree *b = container_of(bk, struct btree, keys);
- unsigned i;
+ unsigned int i;
if (!bkey_cmp(k, &ZERO_KEY) ||
!KEY_PTRS(k) ||
@@ -327,13 +328,14 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
struct cache_set *c = container_of(b, struct btree, keys)->c;
uint64_t old_offset;
- unsigned old_size, sectors_found = 0;
+ unsigned int old_size, sectors_found = 0;
BUG_ON(!KEY_OFFSET(insert));
BUG_ON(!KEY_SIZE(insert));
while (1) {
struct bkey *k = bch_btree_iter_next(iter);
+
if (!k)
break;
@@ -363,7 +365,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
* k might have been split since we inserted/found the
* key we're replacing
*/
- unsigned i;
+ unsigned int i;
uint64_t offset = KEY_START(k) -
KEY_START(replace_key);
@@ -498,11 +500,12 @@ bad:
static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
{
struct btree *b = container_of(bk, struct btree, keys);
+
return __bch_extent_invalid(b->c, k);
}
static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
- unsigned ptr)
+ unsigned int ptr)
{
struct bucket *g = PTR_BUCKET(b->c, k, ptr);
char buf[80];
@@ -534,7 +537,7 @@ err:
static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
{
struct btree *b = container_of(bk, struct btree, keys);
- unsigned i, stale;
+ unsigned int i, stale;
if (!KEY_PTRS(k) ||
bch_extent_invalid(bk, k))
@@ -574,10 +577,12 @@ static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
~((uint64_t)1 << 63);
}
-static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
+static bool bch_extent_merge(struct btree_keys *bk,
+ struct bkey *l,
+ struct bkey *r)
{
struct btree *b = container_of(bk, struct btree, keys);
- unsigned i;
+ unsigned int i;
if (key_merging_disabled(b->c))
return false;
diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
index 0cd3575afa1d..4d667e05bb73 100644
--- a/drivers/md/bcache/extents.h
+++ b/drivers/md/bcache/extents.h
@@ -8,8 +8,8 @@ extern const struct btree_keys_ops bch_extent_keys_ops;
struct bkey;
struct cache_set;
-void bch_extent_to_text(char *, size_t, const struct bkey *);
-bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
-bool __bch_extent_invalid(struct cache_set *, const struct bkey *);
+void bch_extent_to_text(char *buf, size_t size, const struct bkey *k);
+bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k);
+bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k);
#endif /* _BCACHE_EXTENTS_H */
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9612873afee2..c25097968319 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -17,6 +17,7 @@
void bch_bbio_free(struct bio *bio, struct cache_set *c)
{
struct bbio *b = container_of(bio, struct bbio, bio);
+
mempool_free(b, &c->bio_meta);
}
@@ -42,9 +43,10 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
}
void bch_submit_bbio(struct bio *bio, struct cache_set *c,
- struct bkey *k, unsigned ptr)
+ struct bkey *k, unsigned int ptr)
{
struct bbio *b = container_of(bio, struct bbio, bio);
+
bch_bkey_copy_single_ptr(&b->key, k, ptr);
__bch_submit_bbio(bio, c);
}
@@ -52,7 +54,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
/* IO errors */
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
{
- unsigned errors;
+ unsigned int errors;
WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
@@ -75,16 +77,16 @@ void bch_count_io_errors(struct cache *ca,
*/
if (ca->set->error_decay) {
- unsigned count = atomic_inc_return(&ca->io_count);
+ unsigned int count = atomic_inc_return(&ca->io_count);
while (count > ca->set->error_decay) {
- unsigned errors;
- unsigned old = count;
- unsigned new = count - ca->set->error_decay;
+ unsigned int errors;
+ unsigned int old = count;
+ unsigned int new = count - ca->set->error_decay;
/*
* First we subtract refresh from count; each time we
- * succesfully do so, we rescale the errors once:
+ * successfully do so, we rescale the errors once:
*/
count = atomic_cmpxchg(&ca->io_count, old, new);
@@ -104,7 +106,7 @@ void bch_count_io_errors(struct cache *ca,
}
if (error) {
- unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
+ unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
&ca->io_errors);
errors >>= IO_ERROR_SHIFT;
@@ -126,18 +128,18 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
struct cache *ca = PTR_CACHE(c, &b->key, 0);
int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
- unsigned threshold = op_is_write(bio_op(bio))
+ unsigned int threshold = op_is_write(bio_op(bio))
? c->congested_write_threshold_us
: c->congested_read_threshold_us;
if (threshold) {
- unsigned t = local_clock_us();
-
+ unsigned int t = local_clock_us();
int us = t - b->submit_time_us;
int congested = atomic_read(&c->congested);
if (us > (int) threshold) {
int ms = us / 1024;
+
c->congested_last_us = t;
ms = min(ms, CONGESTED_MAX + congested);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 18f1b5239620..6116bbf870d8 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -28,11 +28,12 @@
static void journal_read_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
+
closure_put(cl);
}
static int journal_read_bucket(struct cache *ca, struct list_head *list,
- unsigned bucket_index)
+ unsigned int bucket_index)
{
struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio;
@@ -40,7 +41,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
struct journal_replay *i;
struct jset *j, *data = ca->set->journal.w[0].data;
struct closure cl;
- unsigned len, left, offset = 0;
+ unsigned int len, left, offset = 0;
int ret = 0;
sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
@@ -50,7 +51,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
while (offset < ca->sb.bucket_size) {
reread: left = ca->sb.bucket_size - offset;
- len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
+ len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
bio_reset(bio);
bio->bi_iter.bi_sector = bucket + offset;
@@ -154,12 +155,12 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
})
struct cache *ca;
- unsigned iter;
+ unsigned int iter;
for_each_cache(ca, c, iter) {
struct journal_device *ja = &ca->journal;
DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
- unsigned i, l, r, m;
+ unsigned int i, l, r, m;
uint64_t seq;
bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
@@ -192,7 +193,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
l < ca->sb.njournal_buckets;
- l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
+ l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets,
+ l + 1))
if (read_bucket(l))
goto bsearch;
@@ -304,7 +306,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
k < bset_bkey_last(&i->j);
k = bkey_next(k))
if (!__bch_extent_invalid(c, k)) {
- unsigned j;
+ unsigned int j;
for (j = 0; j < KEY_PTRS(k); j++)
if (ptr_available(c, k, j))
@@ -492,7 +494,7 @@ static void journal_reclaim(struct cache_set *c)
struct bkey *k = &c->journal.key;
struct cache *ca;
uint64_t last_seq;
- unsigned iter, n = 0;
+ unsigned int iter, n = 0;
atomic_t p __maybe_unused;
atomic_long_inc(&c->reclaim);
@@ -526,7 +528,7 @@ static void journal_reclaim(struct cache_set *c)
for_each_cache(ca, c, iter) {
struct journal_device *ja = &ca->journal;
- unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
+ unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
/* No space available on this device */
if (next == ja->discard_idx)
@@ -580,7 +582,7 @@ static void journal_write_endio(struct bio *bio)
closure_put(&w->c->journal.io);
}
-static void journal_write(struct closure *);
+static void journal_write(struct closure *cl);
static void journal_write_done(struct closure *cl)
{
@@ -609,11 +611,12 @@ static void journal_write_unlocked(struct closure *cl)
struct cache *ca;
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
- unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
+ unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
c->sb.block_size;
struct bio *bio;
struct bio_list list;
+
bio_list_init(&list);
if (!w->need_write) {
@@ -705,7 +708,7 @@ static void journal_try_write(struct cache_set *c)
}
static struct journal_write *journal_wait_for_write(struct cache_set *c,
- unsigned nkeys)
+ unsigned int nkeys)
__acquires(&c->journal.lock)
{
size_t sectors;
@@ -828,6 +831,7 @@ void bch_journal_free(struct cache_set *c)
free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
free_fifo(&c->journal.pin);
+ free_heap(&c->flush_btree);
}
int bch_journal_alloc(struct cache_set *c)
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index b5788199188f..66f0facff84b 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -110,7 +110,7 @@ struct journal {
struct delayed_work work;
/* Number of blocks free in the bucket(s) we're currently writing to */
- unsigned blocks_free;
+ unsigned int blocks_free;
uint64_t seq;
DECLARE_FIFO(atomic_t, pin);
@@ -131,13 +131,13 @@ struct journal_device {
uint64_t seq[SB_JOURNAL_BUCKETS];
/* Journal bucket we're currently writing to */
- unsigned cur_idx;
+ unsigned int cur_idx;
/* Last journal bucket that still contains an open journal entry */
- unsigned last_idx;
+ unsigned int last_idx;
/* Next journal bucket to be discarded */
- unsigned discard_idx;
+ unsigned int discard_idx;
#define DISCARD_READY 0
#define DISCARD_IN_FLIGHT 1
@@ -167,14 +167,16 @@ struct cache_set;
struct btree_op;
struct keylist;
-atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
-void bch_journal_next(struct journal *);
-void bch_journal_mark(struct cache_set *, struct list_head *);
-void bch_journal_meta(struct cache_set *, struct closure *);
-int bch_journal_read(struct cache_set *, struct list_head *);
-int bch_journal_replay(struct cache_set *, struct list_head *);
-
-void bch_journal_free(struct cache_set *);
-int bch_journal_alloc(struct cache_set *);
+atomic_t *bch_journal(struct cache_set *c,
+ struct keylist *keys,
+ struct closure *parent);
+void bch_journal_next(struct journal *j);
+void bch_journal_mark(struct cache_set *c, struct list_head *list);
+void bch_journal_meta(struct cache_set *c, struct closure *cl);
+int bch_journal_read(struct cache_set *c, struct list_head *list);
+int bch_journal_replay(struct cache_set *c, struct list_head *list);
+
+void bch_journal_free(struct cache_set *c);
+int bch_journal_alloc(struct cache_set *c);
#endif /* _BCACHE_JOURNAL_H */
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index a24c3a95b2c0..7891fb512736 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -23,7 +23,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
{
struct cache_set *c = container_of(buf, struct cache_set,
moving_gc_keys);
- unsigned i;
+ unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i) &&
@@ -38,6 +38,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
static void moving_io_destructor(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
+
kfree(io);
}
@@ -186,9 +187,10 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
}
-static unsigned bucket_heap_top(struct cache *ca)
+static unsigned int bucket_heap_top(struct cache *ca)
{
struct bucket *b;
+
return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
}
@@ -196,7 +198,7 @@ void bch_moving_gc(struct cache_set *c)
{
struct cache *ca;
struct bucket *b;
- unsigned i;
+ unsigned int i;
if (!c->copy_gc_enabled)
return;
@@ -204,9 +206,9 @@ void bch_moving_gc(struct cache_set *c)
mutex_lock(&c->bucket_lock);
for_each_cache(ca, c, i) {
- unsigned sectors_to_move = 0;
- unsigned reserve_sectors = ca->sb.bucket_size *
- fifo_used(&ca->free[RESERVE_MOVINGGC]);
+ unsigned int sectors_to_move = 0;
+ unsigned int reserve_sectors = ca->sb.bucket_size *
+ fifo_used(&ca->free[RESERVE_MOVINGGC]);
ca->heap.used = 0;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index ae67f5fa8047..51be355a3309 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -25,9 +25,9 @@
struct kmem_cache *bch_search_cache;
-static void bch_data_insert_start(struct closure *);
+static void bch_data_insert_start(struct closure *cl);
-static unsigned cache_mode(struct cached_dev *dc)
+static unsigned int cache_mode(struct cached_dev *dc)
{
return BDEV_CACHE_MODE(&dc->sb);
}
@@ -45,6 +45,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
bio_for_each_segment(bv, bio, iter) {
void *d = kmap(bv.bv_page) + bv.bv_offset;
+
csum = bch_crc64_update(csum, d, bv.bv_len);
kunmap(bv.bv_page);
}
@@ -98,7 +99,7 @@ static void bch_data_insert_keys(struct closure *cl)
closure_return(cl);
}
-static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
+static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
struct cache_set *c)
{
size_t oldsize = bch_keylist_nkeys(l);
@@ -107,7 +108,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
/*
* The journalling code doesn't handle the case where the keys to insert
* is bigger than an empty write: If we just return -ENOMEM here,
- * bio_insert() and bio_invalidate() will insert the keys created so far
+ * bch_data_insert_keys() will insert the keys created so far
* and finish the rest when the keylist is empty.
*/
if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
@@ -125,7 +126,7 @@ static void bch_data_invalidate(struct closure *cl)
bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
while (bio_sectors(bio)) {
- unsigned sectors = min(bio_sectors(bio),
+ unsigned int sectors = min(bio_sectors(bio),
1U << (KEY_SIZE_BITS - 1));
if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
@@ -135,7 +136,9 @@ static void bch_data_invalidate(struct closure *cl)
bio->bi_iter.bi_size -= sectors << 9;
bch_keylist_add(&op->insert_keys,
- &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
+ &KEY(op->inode,
+ bio->bi_iter.bi_sector,
+ sectors));
}
op->insert_data_done = true;
@@ -151,7 +154,7 @@ static void bch_data_insert_error(struct closure *cl)
/*
* Our data write just errored, which means we've got a bunch of keys to
- * insert that point to data that wasn't succesfully written.
+ * insert that point to data that wasn't successfully written.
*
* We don't have to insert those keys but we still have to invalidate
* that region of the cache - so, if we just strip off all the pointers
@@ -211,7 +214,7 @@ static void bch_data_insert_start(struct closure *cl)
bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
do {
- unsigned i;
+ unsigned int i;
struct bkey *k;
struct bio_set *split = &op->c->bio_split;
@@ -328,7 +331,7 @@ void bch_data_insert(struct closure *cl)
/* Congested? */
-unsigned bch_get_congested(struct cache_set *c)
+unsigned int bch_get_congested(struct cache_set *c)
{
int i;
long rand;
@@ -372,8 +375,8 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
{
struct cache_set *c = dc->disk.c;
- unsigned mode = cache_mode(dc);
- unsigned sectors, congested = bch_get_congested(c);
+ unsigned int mode = cache_mode(dc);
+ unsigned int sectors, congested = bch_get_congested(c);
struct task_struct *task = current;
struct io *i;
@@ -469,11 +472,11 @@ struct search {
struct bio *cache_miss;
struct bcache_device *d;
- unsigned insert_bio_sectors;
- unsigned recoverable:1;
- unsigned write:1;
- unsigned read_dirty_data:1;
- unsigned cache_missed:1;
+ unsigned int insert_bio_sectors;
+ unsigned int recoverable:1;
+ unsigned int write:1;
+ unsigned int read_dirty_data:1;
+ unsigned int cache_missed:1;
unsigned long start_time;
@@ -514,20 +517,20 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
struct search *s = container_of(op, struct search, op);
struct bio *n, *bio = &s->bio.bio;
struct bkey *bio_key;
- unsigned ptr;
+ unsigned int ptr;
if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
return MAP_CONTINUE;
if (KEY_INODE(k) != s->iop.inode ||
KEY_START(k) > bio->bi_iter.bi_sector) {
- unsigned bio_sectors = bio_sectors(bio);
- unsigned sectors = KEY_INODE(k) == s->iop.inode
+ unsigned int bio_sectors = bio_sectors(bio);
+ unsigned int sectors = KEY_INODE(k) == s->iop.inode
? min_t(uint64_t, INT_MAX,
KEY_START(k) - bio->bi_iter.bi_sector)
: INT_MAX;
-
int ret = s->d->cache_miss(b, s, bio, sectors);
+
if (ret != MAP_CONTINUE)
return ret;
@@ -623,6 +626,7 @@ static void request_endio(struct bio *bio)
if (bio->bi_status) {
struct search *s = container_of(cl, struct search, cl);
+
s->iop.status = bio->bi_status;
/* Only cache read errors are recoverable */
s->recoverable = false;
@@ -667,8 +671,7 @@ static void backing_request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- generic_end_io_acct(s->d->disk->queue,
- bio_data_dir(s->orig_bio),
+ generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
&s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
@@ -702,6 +705,8 @@ static void search_free(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
+ atomic_dec(&s->d->c->search_inflight);
+
if (s->iop.bio)
bio_put(s->iop.bio);
@@ -719,6 +724,7 @@ static inline struct search *search_alloc(struct bio *bio,
closure_init(&s->cl, NULL);
do_bio_hook(s, bio, request_endio);
+ atomic_inc(&d->c->search_inflight);
s->orig_bio = bio;
s->cache_miss = NULL;
@@ -811,7 +817,8 @@ static void cached_dev_read_done(struct closure *cl)
if (s->iop.bio) {
bio_reset(s->iop.bio);
- s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
+ s->iop.bio->bi_iter.bi_sector =
+ s->cache_miss->bi_iter.bi_sector;
bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
bch_bio_map(s->iop.bio, NULL);
@@ -854,10 +861,10 @@ static void cached_dev_read_done_bh(struct closure *cl)
}
static int cached_dev_cache_miss(struct btree *b, struct search *s,
- struct bio *bio, unsigned sectors)
+ struct bio *bio, unsigned int sectors)
{
int ret = MAP_CONTINUE;
- unsigned reada = 0;
+ unsigned int reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio;
@@ -1062,8 +1069,7 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_end_io = ddip->bi_end_io;
bio->bi_private = ddip->bi_private;
- generic_end_io_acct(ddip->d->disk->queue,
- bio_data_dir(bio),
+ generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
&ddip->d->disk->part0, ddip->start_time);
if (bio->bi_status) {
@@ -1102,6 +1108,44 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
generic_make_request(bio);
}
+static void quit_max_writeback_rate(struct cache_set *c,
+ struct cached_dev *this_dc)
+{
+ int i;
+ struct bcache_device *d;
+ struct cached_dev *dc;
+
+ /*
+ * mutex bch_register_lock may compete with other parallel requesters,
+ * or attach/detach operations on other backing device. Waiting to
+ * the mutex lock may increase I/O request latency for seconds or more.
+ * To avoid such situation, if mutext_trylock() failed, only writeback
+ * rate of current cached device is set to 1, and __update_write_back()
+ * will decide writeback rate of other cached devices (remember now
+ * c->idle_counter is 0 already).
+ */
+ if (mutex_trylock(&bch_register_lock)) {
+ for (i = 0; i < c->devices_max_used; i++) {
+ if (!c->devices[i])
+ continue;
+
+ if (UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+
+ d = c->devices[i];
+ dc = container_of(d, struct cached_dev, disk);
+ /*
+ * set writeback rate to default minimum value,
+ * then let update_writeback_rate() to decide the
+ * upcoming rate.
+ */
+ atomic_long_set(&dc->writeback_rate.rate, 1);
+ }
+ mutex_unlock(&bch_register_lock);
+ } else
+ atomic_long_set(&this_dc->writeback_rate.rate, 1);
+}
+
/* Cached devices - read & write stuff */
static blk_qc_t cached_dev_make_request(struct request_queue *q,
@@ -1119,8 +1163,25 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
return BLK_QC_T_NONE;
}
- atomic_set(&dc->backing_idle, 0);
- generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
+ if (likely(d->c)) {
+ if (atomic_read(&d->c->idle_counter))
+ atomic_set(&d->c->idle_counter, 0);
+ /*
+ * If at_max_writeback_rate of cache set is true and new I/O
+ * comes, quit max writeback rate of all cached devices
+ * attached to this cache set, and set at_max_writeback_rate
+ * to false.
+ */
+ if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
+ atomic_set(&d->c->at_max_writeback_rate, 0);
+ quit_max_writeback_rate(d->c, dc);
+ }
+ }
+
+ generic_start_io_acct(q,
+ bio_op(bio),
+ bio_sectors(bio),
+ &d->disk->part0);
bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset;
@@ -1156,6 +1217,7 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+
return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
}
@@ -1170,7 +1232,7 @@ static int cached_dev_congested(void *data, int bits)
return 1;
if (cached_dev_get(dc)) {
- unsigned i;
+ unsigned int i;
struct cache *ca;
for_each_cache(ca, d->c, i) {
@@ -1197,9 +1259,9 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
/* Flash backed devices */
static int flash_dev_cache_miss(struct btree *b, struct search *s,
- struct bio *bio, unsigned sectors)
+ struct bio *bio, unsigned int sectors)
{
- unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
+ unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
swap(bio->bi_iter.bi_size, bytes);
zero_fill_bio(bio);
@@ -1229,7 +1291,6 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
struct search *s;
struct closure *cl;
struct bcache_device *d = bio->bi_disk->private_data;
- int rw = bio_data_dir(bio);
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
bio->bi_status = BLK_STS_IOERR;
@@ -1237,7 +1298,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
return BLK_QC_T_NONE;
}
- generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
+ generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
s = search_alloc(bio, d);
cl = &s->cl;
@@ -1254,7 +1315,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
flash_dev_nodata,
bcache_wq);
return BLK_QC_T_NONE;
- } else if (rw) {
+ } else if (bio_data_dir(bio)) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));
@@ -1283,7 +1344,7 @@ static int flash_dev_congested(void *data, int bits)
struct bcache_device *d = data;
struct request_queue *q;
struct cache *ca;
- unsigned i;
+ unsigned int i;
int ret = 0;
for_each_cache(ca, d->c, i) {
@@ -1306,8 +1367,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
void bch_request_exit(void)
{
- if (bch_search_cache)
- kmem_cache_destroy(bch_search_cache);
+ kmem_cache_destroy(bch_search_cache);
}
int __init bch_request_init(void)
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index dea0886b81c1..aa055cfeb099 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -8,7 +8,7 @@ struct data_insert_op {
struct bio *bio;
struct workqueue_struct *wq;
- unsigned inode;
+ unsigned int inode;
uint16_t write_point;
uint16_t write_prio;
blk_status_t status;
@@ -17,15 +17,15 @@ struct data_insert_op {
uint16_t flags;
struct {
- unsigned bypass:1;
- unsigned writeback:1;
- unsigned flush_journal:1;
- unsigned csum:1;
+ unsigned int bypass:1;
+ unsigned int writeback:1;
+ unsigned int flush_journal:1;
+ unsigned int csum:1;
- unsigned replace:1;
- unsigned replace_collision:1;
+ unsigned int replace:1;
+ unsigned int replace_collision:1;
- unsigned insert_data_done:1;
+ unsigned int insert_data_done:1;
};
};
@@ -33,7 +33,7 @@ struct data_insert_op {
BKEY_PADDED(replace_key);
};
-unsigned bch_get_congested(struct cache_set *);
+unsigned int bch_get_congested(struct cache_set *c);
void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index be119326297b..894410f3f829 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -33,11 +33,11 @@
* stored left shifted by 16, and scaled back in the sysfs show() function.
*/
-static const unsigned DAY_RESCALE = 288;
-static const unsigned HOUR_RESCALE = 12;
-static const unsigned FIVE_MINUTE_RESCALE = 1;
-static const unsigned accounting_delay = (HZ * 300) / 22;
-static const unsigned accounting_weight = 32;
+static const unsigned int DAY_RESCALE = 288;
+static const unsigned int HOUR_RESCALE = 12;
+static const unsigned int FIVE_MINUTE_RESCALE = 1;
+static const unsigned int accounting_delay = (HZ * 300) / 22;
+static const unsigned int accounting_weight = 32;
/* sysfs reading/writing */
@@ -152,7 +152,7 @@ static void scale_accounting(struct timer_list *t)
struct cache_accounting *acc = from_timer(acc, t, timer);
#define move_stat(name) do { \
- unsigned t = atomic_xchg(&acc->collector.name, 0); \
+ unsigned int t = atomic_xchg(&acc->collector.name, 0); \
t <<= 16; \
acc->five_minute.name += t; \
acc->hour.name += t; \
@@ -200,6 +200,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
bool hit, bool bypass)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+
mark_cache_stats(&dc->accounting.collector, hit, bypass);
mark_cache_stats(&c->accounting.collector, hit, bypass);
}
@@ -207,6 +208,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+
atomic_inc(&dc->accounting.collector.cache_readaheads);
atomic_inc(&c->accounting.collector.cache_readaheads);
}
@@ -214,6 +216,7 @@ void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+
atomic_inc(&dc->accounting.collector.cache_miss_collisions);
atomic_inc(&c->accounting.collector.cache_miss_collisions);
}
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index 0b70f9de0c03..abfaabf7e7fc 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -23,7 +23,7 @@ struct cache_stats {
unsigned long cache_miss_collisions;
unsigned long sectors_bypassed;
- unsigned rescale;
+ unsigned int rescale;
};
struct cache_accounting {
@@ -53,10 +53,13 @@ void bch_cache_accounting_clear(struct cache_accounting *acc);
void bch_cache_accounting_destroy(struct cache_accounting *acc);
-void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *,
- bool, bool);
-void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *);
-void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *);
-void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int);
+void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+ bool hit, bool bypass);
+void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d);
+void bch_mark_cache_miss_collision(struct cache_set *c,
+ struct bcache_device *d);
+void bch_mark_sectors_bypassed(struct cache_set *c,
+ struct cached_dev *dc,
+ int sectors);
#endif /* _BCACHE_STATS_H_ */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index fa4058e43202..94c756c66bd7 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bcache setup/teardown code, and some metadata io - read a superblock and
* figure out what to do with it.
@@ -61,7 +62,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
const char *err;
struct cache_sb *s;
struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
- unsigned i;
+ unsigned int i;
if (!bh)
return "IO error";
@@ -149,7 +150,8 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
goto err;
err = "Invalid superblock: device too small";
- if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
+ if (get_capacity(bdev->bd_disk) <
+ sb->bucket_size * sb->nbuckets)
goto err;
err = "Bad UUID";
@@ -181,7 +183,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
goto err;
}
- sb->last_mount = get_seconds();
+ sb->last_mount = (u32)ktime_get_real_seconds();
err = NULL;
get_page(bh->b_page);
@@ -202,7 +204,7 @@ static void write_bdev_super_endio(struct bio *bio)
static void __write_super(struct cache_sb *sb, struct bio *bio)
{
struct cache_sb *out = page_address(bio_first_page_all(bio));
- unsigned i;
+ unsigned int i;
bio->bi_iter.bi_sector = SB_SECTOR;
bio->bi_iter.bi_size = SB_SIZE;
@@ -282,7 +284,7 @@ void bcache_write_super(struct cache_set *c)
{
struct closure *cl = &c->sb_write;
struct cache *ca;
- unsigned i;
+ unsigned int i;
down(&c->sb_write_mutex);
closure_init(cl, &c->cl);
@@ -334,7 +336,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
{
struct closure *cl = &c->uuid_write;
struct uuid_entry *u;
- unsigned i;
+ unsigned int i;
char buf[80];
BUG_ON(!parent);
@@ -415,8 +417,8 @@ static int __uuid_write(struct cache_set *c)
{
BKEY_PADDED(key) k;
struct closure cl;
- closure_init_stack(&cl);
+ closure_init_stack(&cl);
lockdep_assert_held(&bch_register_lock);
if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
@@ -456,6 +458,7 @@ static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
static struct uuid_entry *uuid_find_empty(struct cache_set *c)
{
static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
+
return uuid_find(c, zero_uuid);
}
@@ -463,8 +466,8 @@ static struct uuid_entry *uuid_find_empty(struct cache_set *c)
* Bucket priorities/gens:
*
* For each bucket, we store on disk its
- * 8 bit gen
- * 16 bit priority
+ * 8 bit gen
+ * 16 bit priority
*
* See alloc.c for an explanation of the gen. The priority is used to implement
* lru (and in the future other) cache replacement policies; for most purposes
@@ -587,7 +590,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
struct prio_set *p = ca->disk_buckets;
struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
struct bucket *b;
- unsigned bucket_nr = 0;
+ unsigned int bucket_nr = 0;
for (b = ca->buckets;
b < ca->buckets + ca->sb.nbuckets;
@@ -599,7 +602,8 @@ static void prio_read(struct cache *ca, uint64_t bucket)
prio_io(ca, bucket, REQ_OP_READ, 0);
- if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
+ if (p->csum !=
+ bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities");
if (p->magic != pset_magic(&ca->sb))
@@ -619,6 +623,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
static int open_dev(struct block_device *b, fmode_t mode)
{
struct bcache_device *d = b->bd_disk->private_data;
+
if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
return -ENXIO;
@@ -629,6 +634,7 @@ static int open_dev(struct block_device *b, fmode_t mode)
static void release_dev(struct gendisk *b, fmode_t mode)
{
struct bcache_device *d = b->private_data;
+
closure_put(&d->cl);
}
@@ -662,7 +668,7 @@ static void bcache_device_unlink(struct bcache_device *d)
lockdep_assert_held(&bch_register_lock);
if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
- unsigned i;
+ unsigned int i;
struct cache *ca;
sysfs_remove_link(&d->c->kobj, d->name);
@@ -676,7 +682,7 @@ static void bcache_device_unlink(struct bcache_device *d)
static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
const char *name)
{
- unsigned i;
+ unsigned int i;
struct cache *ca;
for_each_cache(ca, d->c, i)
@@ -696,12 +702,14 @@ static void bcache_device_detach(struct bcache_device *d)
{
lockdep_assert_held(&bch_register_lock);
+ atomic_dec(&d->c->attached_dev_nr);
+
if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
struct uuid_entry *u = d->c->uuids + d->id;
SET_UUID_FLASH_ONLY(u, 0);
memcpy(u->uuid, invalid_uuid, 16);
- u->invalidated = cpu_to_le32(get_seconds());
+ u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
bch_uuid_write(d->c);
}
@@ -713,7 +721,7 @@ static void bcache_device_detach(struct bcache_device *d)
}
static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
- unsigned id)
+ unsigned int id)
{
d->id = id;
d->c = c;
@@ -760,7 +768,7 @@ static void bcache_device_free(struct bcache_device *d)
closure_debug_destroy(&d->cl);
}
-static int bcache_device_init(struct bcache_device *d, unsigned block_size,
+static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
sector_t sectors)
{
struct request_queue *q;
@@ -776,7 +784,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (!d->nr_stripes || d->nr_stripes > max_stripes) {
pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
- (unsigned)d->nr_stripes);
+ (unsigned int)d->nr_stripes);
return -ENOMEM;
}
@@ -796,11 +804,12 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
return idx;
if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
- BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
- !(d->disk = alloc_disk(BCACHE_MINORS))) {
- ida_simple_remove(&bcache_device_idx, idx);
- return -ENOMEM;
- }
+ BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
+ goto err;
+
+ d->disk = alloc_disk(BCACHE_MINORS);
+ if (!d->disk)
+ goto err;
set_capacity(d->disk, sectors);
snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
@@ -834,6 +843,11 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
blk_queue_write_cache(q, true, true);
return 0;
+
+err:
+ ida_simple_remove(&bcache_device_idx, idx);
+ return -ENOMEM;
+
}
/* Cached device */
@@ -911,6 +925,7 @@ void bch_cached_dev_run(struct cached_dev *dc)
if (!d->c &&
BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
struct closure cl;
+
closure_init_stack(&cl);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
@@ -920,8 +935,10 @@ void bch_cached_dev_run(struct cached_dev *dc)
add_disk(d->disk);
bd_link_disk_holder(dc->bdev, dc->disk.disk);
- /* won't show up in the uevent file, use udevadm monitor -e instead
- * only class / kset properties are persistent */
+ /*
+ * won't show up in the uevent file, use udevadm monitor -e instead
+ * only class / kset properties are persistent
+ */
kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
kfree(env[1]);
kfree(env[2]);
@@ -968,6 +985,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
{
struct cached_dev *dc = container_of(w, struct cached_dev, detach);
struct closure cl;
+
closure_init_stack(&cl);
BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
@@ -1027,7 +1045,7 @@ void bch_cached_dev_detach(struct cached_dev *dc)
int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
uint8_t *set_uuid)
{
- uint32_t rtime = cpu_to_le32(get_seconds());
+ uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
struct uuid_entry *u;
struct cached_dev *exist_dc, *t;
@@ -1070,7 +1088,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
(BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
memcpy(u->uuid, invalid_uuid, 16);
- u->invalidated = cpu_to_le32(get_seconds());
+ u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
u = NULL;
}
@@ -1089,12 +1107,14 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
}
}
- /* Deadlocks since we're called via sysfs...
- sysfs_remove_file(&dc->kobj, &sysfs_attach);
+ /*
+ * Deadlocks since we're called via sysfs...
+ * sysfs_remove_file(&dc->kobj, &sysfs_attach);
*/
if (bch_is_zero(u->uuid, 16)) {
struct closure cl;
+
closure_init_stack(&cl);
memcpy(u->uuid, dc->sb.uuid, 16);
@@ -1116,11 +1136,11 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
list_move(&dc->list, &c->cached_devs);
calc_cached_dev_sectors(c);
- smp_wmb();
/*
* dc->c must be set before dc->count != 0 - paired with the mb in
* cached_dev_get()
*/
+ smp_wmb();
refcount_set(&dc->count, 1);
/* Block writeback thread, but spawn it */
@@ -1138,6 +1158,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
bch_cached_dev_run(dc);
bcache_device_link(&dc->disk, c, "bdev");
+ atomic_inc(&c->attached_dev_nr);
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
@@ -1203,7 +1224,7 @@ static void cached_dev_flush(struct closure *cl)
continue_at(cl, cached_dev_free, system_wq);
}
-static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
+static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
{
int ret;
struct io *io;
@@ -1285,6 +1306,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
pr_info("registered backing device %s", dc->backing_dev_name);
list_add(&dc->list, &uncached_devices);
+ /* attach to a matched cache set if it exists */
list_for_each_entry(c, &bch_cache_sets, list)
bch_cached_dev_attach(dc, c, NULL);
@@ -1310,7 +1332,10 @@ void bch_flash_dev_release(struct kobject *kobj)
static void flash_dev_free(struct closure *cl)
{
struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+
mutex_lock(&bch_register_lock);
+ atomic_long_sub(bcache_dev_sectors_dirty(d),
+ &d->c->flash_dev_dirty_sectors);
bcache_device_free(d);
mutex_unlock(&bch_register_lock);
kobject_put(&d->kobj);
@@ -1390,7 +1415,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
get_random_bytes(u->uuid, 16);
memset(u->label, 0, 32);
- u->first_reg = u->last_reg = cpu_to_le32(get_seconds());
+ u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds());
SET_UUID_FLASH_ONLY(u, 1);
u->sectors = size >> 9;
@@ -1447,17 +1472,18 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
pr_info("CACHE_SET_IO_DISABLE already set");
- /* XXX: we can be called from atomic context
- acquire_console_sem();
- */
+ /*
+ * XXX: we can be called from atomic context
+ * acquire_console_sem();
+ */
- printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
+ pr_err("bcache: error on %pU: ", c->sb.set_uuid);
va_start(args, fmt);
vprintk(fmt, args);
va_end(args);
- printk(", disabling caching\n");
+ pr_err(", disabling caching\n");
if (c->on_error == ON_ERROR_PANIC)
panic("panic forced after error\n");
@@ -1469,6 +1495,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
void bch_cache_set_release(struct kobject *kobj)
{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+
kfree(c);
module_put(THIS_MODULE);
}
@@ -1477,7 +1504,7 @@ static void cache_set_free(struct closure *cl)
{
struct cache_set *c = container_of(cl, struct cache_set, cl);
struct cache *ca;
- unsigned i;
+ unsigned int i;
if (!IS_ERR_OR_NULL(c->debug))
debugfs_remove(c->debug);
@@ -1520,7 +1547,7 @@ static void cache_set_flush(struct closure *cl)
struct cache_set *c = container_of(cl, struct cache_set, caching);
struct cache *ca;
struct btree *b;
- unsigned i;
+ unsigned int i;
bch_cache_accounting_destroy(&c->accounting);
@@ -1659,6 +1686,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
{
int iter_size;
struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
+
if (!c)
return NULL;
@@ -1687,6 +1715,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->block_bits = ilog2(sb->block_size);
c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
c->devices_max_used = 0;
+ atomic_set(&c->attached_dev_nr, 0);
c->btree_pages = bucket_pages(c);
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
@@ -1718,8 +1747,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) ||
mempool_init_slab_pool(&c->search, 32, bch_search_cache) ||
mempool_init_kmalloc_pool(&c->bio_meta, 2,
- sizeof(struct bbio) + sizeof(struct bio_vec) *
- bucket_pages(c)) ||
+ sizeof(struct bbio) + sizeof(struct bio_vec) *
+ bucket_pages(c)) ||
mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
@@ -1749,7 +1778,7 @@ static void run_cache_set(struct cache_set *c)
struct cached_dev *dc, *t;
struct cache *ca;
struct closure cl;
- unsigned i;
+ unsigned int i;
closure_init_stack(&cl);
@@ -1791,7 +1820,9 @@ static void run_cache_set(struct cache_set *c)
goto err;
err = "error reading btree root";
- c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL);
+ c->root = bch_btree_node_get(c, NULL, k,
+ j->btree_level,
+ true, NULL);
if (IS_ERR_OR_NULL(c->root))
goto err;
@@ -1840,7 +1871,7 @@ static void run_cache_set(struct cache_set *c)
pr_notice("invalidating existing data");
for_each_cache(ca, c, i) {
- unsigned j;
+ unsigned int j;
ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
2, SB_JOURNAL_BUCKETS);
@@ -1894,7 +1925,7 @@ static void run_cache_set(struct cache_set *c)
goto err;
closure_sync(&cl);
- c->sb.last_mount = get_seconds();
+ c->sb.last_mount = (u32)ktime_get_real_seconds();
bcache_write_super(c);
list_for_each_entry_safe(dc, t, &uncached_devices, list)
@@ -1985,7 +2016,7 @@ err:
void bch_cache_release(struct kobject *kobj)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
- unsigned i;
+ unsigned int i;
if (ca->set) {
BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
@@ -2085,7 +2116,9 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
goto err;
}
- if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
+ if (kobject_add(&ca->kobj,
+ &part_to_dev(bdev->bd_part)->kobj,
+ "bcache")) {
err = "error calling kobject_add";
ret = -ENOMEM;
goto out;
@@ -2114,13 +2147,14 @@ err:
/* Global interfaces/init */
-static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
- const char *, size_t);
+static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ const char *buffer, size_t size);
kobj_attribute_write(register, register_bcache);
kobj_attribute_write(register_quiet, register_bcache);
-static bool bch_is_open_backing(struct block_device *bdev) {
+static bool bch_is_open_backing(struct block_device *bdev)
+{
struct cache_set *c, *tc;
struct cached_dev *dc, *t;
@@ -2134,10 +2168,11 @@ static bool bch_is_open_backing(struct block_device *bdev) {
return false;
}
-static bool bch_is_open_cache(struct block_device *bdev) {
+static bool bch_is_open_cache(struct block_device *bdev)
+{
struct cache_set *c, *tc;
struct cache *ca;
- unsigned i;
+ unsigned int i;
list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
for_each_cache(ca, c, i)
@@ -2146,7 +2181,8 @@ static bool bch_is_open_cache(struct block_device *bdev) {
return false;
}
-static bool bch_is_open(struct block_device *bdev) {
+static bool bch_is_open(struct block_device *bdev)
+{
return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
}
@@ -2163,8 +2199,12 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!try_module_get(THIS_MODULE))
return -EBUSY;
- if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
- !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
+ path = kstrndup(buffer, size, GFP_KERNEL);
+ if (!path)
+ goto err;
+
+ sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
+ if (!sb)
goto err;
err = "failed to open device";
@@ -2199,6 +2239,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
err = "failed to register device";
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+
if (!dc)
goto err_close;
@@ -2207,6 +2248,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
mutex_unlock(&bch_register_lock);
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+
if (!ca)
goto err_close;
@@ -2324,13 +2366,21 @@ static int __init bcache_init(void)
return bcache_major;
}
- if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
- !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
- bch_request_init() ||
- bch_debug_init(bcache_kobj) || closure_debug_init() ||
+ bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
+ if (!bcache_wq)
+ goto err;
+
+ bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
+ if (!bcache_kobj)
+ goto err;
+
+ if (bch_request_init() ||
sysfs_create_files(bcache_kobj, files))
goto err;
+ bch_debug_init(bcache_kobj);
+ closure_debug_init();
+
return 0;
err:
bcache_exit();
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 225b15aa0340..150cf4f4cf74 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -130,8 +130,10 @@ rw_attribute(btree_shrinker_disabled);
rw_attribute(copy_gc_enabled);
rw_attribute(size);
-static ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
- size_t selected)
+static ssize_t bch_snprint_string_list(char *buf,
+ size_t size,
+ const char * const list[],
+ size_t selected)
{
char *out = buf;
size_t i;
@@ -148,7 +150,8 @@ SHOW(__bch_cached_dev)
{
struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj);
- const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
+ char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
+ int wb = dc->writeback_running;
#define var(stat) (dc->stat)
@@ -170,7 +173,8 @@ SHOW(__bch_cached_dev)
var_printf(writeback_running, "%i");
var_print(writeback_delay);
var_print(writeback_percent);
- sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
+ sysfs_hprint(writeback_rate,
+ wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
sysfs_hprint(io_errors, atomic_read(&dc->io_errors));
sysfs_printf(io_error_limit, "%i", dc->error_limit);
sysfs_printf(io_disable, "%i", dc->io_disable);
@@ -188,15 +192,22 @@ SHOW(__bch_cached_dev)
char change[20];
s64 next_io;
- bch_hprint(rate, dc->writeback_rate.rate << 9);
- bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
- bch_hprint(target, dc->writeback_rate_target << 9);
- bch_hprint(proportional,dc->writeback_rate_proportional << 9);
- bch_hprint(integral, dc->writeback_rate_integral_scaled << 9);
- bch_hprint(change, dc->writeback_rate_change << 9);
-
- next_io = div64_s64(dc->writeback_rate.next - local_clock(),
- NSEC_PER_MSEC);
+ /*
+ * Except for dirty and target, other values should
+ * be 0 if writeback is not running.
+ */
+ bch_hprint(rate,
+ wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
+ : 0);
+ bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
+ bch_hprint(target, dc->writeback_rate_target << 9);
+ bch_hprint(proportional,
+ wb ? dc->writeback_rate_proportional << 9 : 0);
+ bch_hprint(integral,
+ wb ? dc->writeback_rate_integral_scaled << 9 : 0);
+ bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
+ next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
+ NSEC_PER_MSEC) : 0;
return sprintf(buf,
"rate:\t\t%s/sec\n"
@@ -255,8 +266,19 @@ STORE(__cached_dev)
sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
- sysfs_strtoul_clamp(writeback_rate,
- dc->writeback_rate.rate, 1, INT_MAX);
+ if (attr == &sysfs_writeback_rate) {
+ ssize_t ret;
+ long int v = atomic_long_read(&dc->writeback_rate.rate);
+
+ ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
+
+ if (!ret) {
+ atomic_long_set(&dc->writeback_rate.rate, v);
+ ret = size;
+ }
+
+ return ret;
+ }
sysfs_strtoul_clamp(writeback_rate_update_seconds,
dc->writeback_rate_update_seconds,
@@ -287,7 +309,7 @@ STORE(__cached_dev)
if (v < 0)
return v;
- if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
+ if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
SET_BDEV_CACHE_MODE(&dc->sb, v);
bch_write_bdev_super(dc, NULL);
}
@@ -321,8 +343,9 @@ STORE(__cached_dev)
add_uevent_var(env, "DRIVER=bcache");
add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
add_uevent_var(env, "CACHED_LABEL=%s", buf);
- kobject_uevent_env(
- &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
+ kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
+ KOBJ_CHANGE,
+ env->envp);
kfree(env);
}
@@ -338,8 +361,8 @@ STORE(__cached_dev)
if (!v)
return size;
}
-
- pr_err("Can't attach %s: cache set not found", buf);
+ if (v == -ENOENT)
+ pr_err("Can't attach %s: cache set not found", buf);
return v;
}
@@ -439,6 +462,7 @@ STORE(__bch_flash_dev)
if (attr == &sysfs_size) {
uint64_t v;
+
strtoi_h_or_return(buf, v);
u->sectors = v >> 9;
@@ -513,9 +537,9 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf)
op.stats.floats, op.stats.failed);
}
-static unsigned bch_root_usage(struct cache_set *c)
+static unsigned int bch_root_usage(struct cache_set *c)
{
- unsigned bytes = 0;
+ unsigned int bytes = 0;
struct bkey *k;
struct btree *b;
struct btree_iter iter;
@@ -550,9 +574,9 @@ static size_t bch_cache_size(struct cache_set *c)
return ret;
}
-static unsigned bch_cache_max_chain(struct cache_set *c)
+static unsigned int bch_cache_max_chain(struct cache_set *c)
{
- unsigned ret = 0;
+ unsigned int ret = 0;
struct hlist_head *h;
mutex_lock(&c->bucket_lock);
@@ -560,7 +584,7 @@ static unsigned bch_cache_max_chain(struct cache_set *c)
for (h = c->bucket_hash;
h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
h++) {
- unsigned i = 0;
+ unsigned int i = 0;
struct hlist_node *p;
hlist_for_each(p, h)
@@ -573,13 +597,13 @@ static unsigned bch_cache_max_chain(struct cache_set *c)
return ret;
}
-static unsigned bch_btree_used(struct cache_set *c)
+static unsigned int bch_btree_used(struct cache_set *c)
{
return div64_u64(c->gc_stats.key_bytes * 100,
(c->gc_stats.nodes ?: 1) * btree_bytes(c));
}
-static unsigned bch_average_key_size(struct cache_set *c)
+static unsigned int bch_average_key_size(struct cache_set *c)
{
return c->gc_stats.nkeys
? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
@@ -683,6 +707,7 @@ STORE(__bch_cache_set)
if (attr == &sysfs_flash_vol_create) {
int r;
uint64_t v;
+
strtoi_h_or_return(buf, v);
r = bch_flash_dev_create(c, v);
@@ -716,6 +741,7 @@ STORE(__bch_cache_set)
if (attr == &sysfs_prune_cache) {
struct shrink_control sc;
+
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
c->shrink.scan_objects(&c->shrink, &sc);
@@ -769,12 +795,14 @@ STORE_LOCKED(bch_cache_set)
SHOW(bch_cache_set_internal)
{
struct cache_set *c = container_of(kobj, struct cache_set, internal);
+
return bch_cache_set_show(&c->kobj, attr, buf);
}
STORE(bch_cache_set_internal)
{
struct cache_set *c = container_of(kobj, struct cache_set, internal);
+
return bch_cache_set_store(&c->kobj, attr, buf, size);
}
@@ -976,7 +1004,7 @@ STORE(__bch_cache)
if (v < 0)
return v;
- if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
+ if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
mutex_lock(&ca->set->bucket_lock);
SET_CACHE_REPLACEMENT(&ca->sb, v);
mutex_unlock(&ca->set->bucket_lock);
diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
index b54fe9602529..3fe82425859c 100644
--- a/drivers/md/bcache/sysfs.h
+++ b/drivers/md/bcache/sysfs.h
@@ -44,9 +44,9 @@ STORE(fn) \
static struct attribute sysfs_##_name = \
{ .name = #_name, .mode = _mode }
-#define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
-#define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
-#define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
+#define write_attribute(n) __sysfs_attribute(n, 0200)
+#define read_attribute(n) __sysfs_attribute(n, 0444)
+#define rw_attribute(n) __sysfs_attribute(n, 0644)
#define sysfs_printf(file, fmt, ...) \
do { \
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index fc479b026d6d..20eddeac1531 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* random utiility code, for bcache but in theory not specific to bcache
*
@@ -133,6 +134,7 @@ bool bch_is_zero(const char *p, size_t n)
int bch_parse_uuid(const char *s, char *uuid)
{
size_t i, j, x;
+
memset(uuid, 0, 16);
for (i = 0, j = 0;
@@ -200,7 +202,7 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
{
uint64_t now = local_clock();
- d->next += div_u64(done * NSEC_PER_SEC, d->rate);
+ d->next += div_u64(done * NSEC_PER_SEC, atomic_long_read(&d->rate));
/* Bound the time. Don't let us fall further than 2 seconds behind
* (this prevents unnecessary backlog that would make it impossible
@@ -279,134 +281,3 @@ int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
return 0;
}
-
-/*
- * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
- * use permitted, subject to terms of PostgreSQL license; see.)
-
- * If we have a 64-bit integer type, then a 64-bit CRC looks just like the
- * usual sort of implementation. (See Ross Williams' excellent introduction
- * A PAINLESS GUIDE TO CRC ERROR DETECTION ALGORITHMS, available from
- * ftp://ftp.rocksoft.com/papers/crc_v3.txt or several other net sites.)
- * If we have no working 64-bit type, then fake it with two 32-bit registers.
- *
- * The present implementation is a normal (not "reflected", in Williams'
- * terms) 64-bit CRC, using initial all-ones register contents and a final
- * bit inversion. The chosen polynomial is borrowed from the DLT1 spec
- * (ECMA-182, available from http://www.ecma.ch/ecma1/STAND/ECMA-182.HTM):
- *
- * x^64 + x^62 + x^57 + x^55 + x^54 + x^53 + x^52 + x^47 + x^46 + x^45 +
- * x^40 + x^39 + x^38 + x^37 + x^35 + x^33 + x^32 + x^31 + x^29 + x^27 +
- * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
- * x^7 + x^4 + x + 1
-*/
-
-static const uint64_t crc_table[256] = {
- 0x0000000000000000ULL, 0x42F0E1EBA9EA3693ULL, 0x85E1C3D753D46D26ULL,
- 0xC711223CFA3E5BB5ULL, 0x493366450E42ECDFULL, 0x0BC387AEA7A8DA4CULL,
- 0xCCD2A5925D9681F9ULL, 0x8E224479F47CB76AULL, 0x9266CC8A1C85D9BEULL,
- 0xD0962D61B56FEF2DULL, 0x17870F5D4F51B498ULL, 0x5577EEB6E6BB820BULL,
- 0xDB55AACF12C73561ULL, 0x99A54B24BB2D03F2ULL, 0x5EB4691841135847ULL,
- 0x1C4488F3E8F96ED4ULL, 0x663D78FF90E185EFULL, 0x24CD9914390BB37CULL,
- 0xE3DCBB28C335E8C9ULL, 0xA12C5AC36ADFDE5AULL, 0x2F0E1EBA9EA36930ULL,
- 0x6DFEFF5137495FA3ULL, 0xAAEFDD6DCD770416ULL, 0xE81F3C86649D3285ULL,
- 0xF45BB4758C645C51ULL, 0xB6AB559E258E6AC2ULL, 0x71BA77A2DFB03177ULL,
- 0x334A9649765A07E4ULL, 0xBD68D2308226B08EULL, 0xFF9833DB2BCC861DULL,
- 0x388911E7D1F2DDA8ULL, 0x7A79F00C7818EB3BULL, 0xCC7AF1FF21C30BDEULL,
- 0x8E8A101488293D4DULL, 0x499B3228721766F8ULL, 0x0B6BD3C3DBFD506BULL,
- 0x854997BA2F81E701ULL, 0xC7B97651866BD192ULL, 0x00A8546D7C558A27ULL,
- 0x4258B586D5BFBCB4ULL, 0x5E1C3D753D46D260ULL, 0x1CECDC9E94ACE4F3ULL,
- 0xDBFDFEA26E92BF46ULL, 0x990D1F49C77889D5ULL, 0x172F5B3033043EBFULL,
- 0x55DFBADB9AEE082CULL, 0x92CE98E760D05399ULL, 0xD03E790CC93A650AULL,
- 0xAA478900B1228E31ULL, 0xE8B768EB18C8B8A2ULL, 0x2FA64AD7E2F6E317ULL,
- 0x6D56AB3C4B1CD584ULL, 0xE374EF45BF6062EEULL, 0xA1840EAE168A547DULL,
- 0x66952C92ECB40FC8ULL, 0x2465CD79455E395BULL, 0x3821458AADA7578FULL,
- 0x7AD1A461044D611CULL, 0xBDC0865DFE733AA9ULL, 0xFF3067B657990C3AULL,
- 0x711223CFA3E5BB50ULL, 0x33E2C2240A0F8DC3ULL, 0xF4F3E018F031D676ULL,
- 0xB60301F359DBE0E5ULL, 0xDA050215EA6C212FULL, 0x98F5E3FE438617BCULL,
- 0x5FE4C1C2B9B84C09ULL, 0x1D14202910527A9AULL, 0x93366450E42ECDF0ULL,
- 0xD1C685BB4DC4FB63ULL, 0x16D7A787B7FAA0D6ULL, 0x5427466C1E109645ULL,
- 0x4863CE9FF6E9F891ULL, 0x0A932F745F03CE02ULL, 0xCD820D48A53D95B7ULL,
- 0x8F72ECA30CD7A324ULL, 0x0150A8DAF8AB144EULL, 0x43A04931514122DDULL,
- 0x84B16B0DAB7F7968ULL, 0xC6418AE602954FFBULL, 0xBC387AEA7A8DA4C0ULL,
- 0xFEC89B01D3679253ULL, 0x39D9B93D2959C9E6ULL, 0x7B2958D680B3FF75ULL,
- 0xF50B1CAF74CF481FULL, 0xB7FBFD44DD257E8CULL, 0x70EADF78271B2539ULL,
- 0x321A3E938EF113AAULL, 0x2E5EB66066087D7EULL, 0x6CAE578BCFE24BEDULL,
- 0xABBF75B735DC1058ULL, 0xE94F945C9C3626CBULL, 0x676DD025684A91A1ULL,
- 0x259D31CEC1A0A732ULL, 0xE28C13F23B9EFC87ULL, 0xA07CF2199274CA14ULL,
- 0x167FF3EACBAF2AF1ULL, 0x548F120162451C62ULL, 0x939E303D987B47D7ULL,
- 0xD16ED1D631917144ULL, 0x5F4C95AFC5EDC62EULL, 0x1DBC74446C07F0BDULL,
- 0xDAAD56789639AB08ULL, 0x985DB7933FD39D9BULL, 0x84193F60D72AF34FULL,
- 0xC6E9DE8B7EC0C5DCULL, 0x01F8FCB784FE9E69ULL, 0x43081D5C2D14A8FAULL,
- 0xCD2A5925D9681F90ULL, 0x8FDAB8CE70822903ULL, 0x48CB9AF28ABC72B6ULL,
- 0x0A3B7B1923564425ULL, 0x70428B155B4EAF1EULL, 0x32B26AFEF2A4998DULL,
- 0xF5A348C2089AC238ULL, 0xB753A929A170F4ABULL, 0x3971ED50550C43C1ULL,
- 0x7B810CBBFCE67552ULL, 0xBC902E8706D82EE7ULL, 0xFE60CF6CAF321874ULL,
- 0xE224479F47CB76A0ULL, 0xA0D4A674EE214033ULL, 0x67C58448141F1B86ULL,
- 0x253565A3BDF52D15ULL, 0xAB1721DA49899A7FULL, 0xE9E7C031E063ACECULL,
- 0x2EF6E20D1A5DF759ULL, 0x6C0603E6B3B7C1CAULL, 0xF6FAE5C07D3274CDULL,
- 0xB40A042BD4D8425EULL, 0x731B26172EE619EBULL, 0x31EBC7FC870C2F78ULL,
- 0xBFC9838573709812ULL, 0xFD39626EDA9AAE81ULL, 0x3A28405220A4F534ULL,
- 0x78D8A1B9894EC3A7ULL, 0x649C294A61B7AD73ULL, 0x266CC8A1C85D9BE0ULL,
- 0xE17DEA9D3263C055ULL, 0xA38D0B769B89F6C6ULL, 0x2DAF4F0F6FF541ACULL,
- 0x6F5FAEE4C61F773FULL, 0xA84E8CD83C212C8AULL, 0xEABE6D3395CB1A19ULL,
- 0x90C79D3FEDD3F122ULL, 0xD2377CD44439C7B1ULL, 0x15265EE8BE079C04ULL,
- 0x57D6BF0317EDAA97ULL, 0xD9F4FB7AE3911DFDULL, 0x9B041A914A7B2B6EULL,
- 0x5C1538ADB04570DBULL, 0x1EE5D94619AF4648ULL, 0x02A151B5F156289CULL,
- 0x4051B05E58BC1E0FULL, 0x87409262A28245BAULL, 0xC5B073890B687329ULL,
- 0x4B9237F0FF14C443ULL, 0x0962D61B56FEF2D0ULL, 0xCE73F427ACC0A965ULL,
- 0x8C8315CC052A9FF6ULL, 0x3A80143F5CF17F13ULL, 0x7870F5D4F51B4980ULL,
- 0xBF61D7E80F251235ULL, 0xFD913603A6CF24A6ULL, 0x73B3727A52B393CCULL,
- 0x31439391FB59A55FULL, 0xF652B1AD0167FEEAULL, 0xB4A25046A88DC879ULL,
- 0xA8E6D8B54074A6ADULL, 0xEA16395EE99E903EULL, 0x2D071B6213A0CB8BULL,
- 0x6FF7FA89BA4AFD18ULL, 0xE1D5BEF04E364A72ULL, 0xA3255F1BE7DC7CE1ULL,
- 0x64347D271DE22754ULL, 0x26C49CCCB40811C7ULL, 0x5CBD6CC0CC10FAFCULL,
- 0x1E4D8D2B65FACC6FULL, 0xD95CAF179FC497DAULL, 0x9BAC4EFC362EA149ULL,
- 0x158E0A85C2521623ULL, 0x577EEB6E6BB820B0ULL, 0x906FC95291867B05ULL,
- 0xD29F28B9386C4D96ULL, 0xCEDBA04AD0952342ULL, 0x8C2B41A1797F15D1ULL,
- 0x4B3A639D83414E64ULL, 0x09CA82762AAB78F7ULL, 0x87E8C60FDED7CF9DULL,
- 0xC51827E4773DF90EULL, 0x020905D88D03A2BBULL, 0x40F9E43324E99428ULL,
- 0x2CFFE7D5975E55E2ULL, 0x6E0F063E3EB46371ULL, 0xA91E2402C48A38C4ULL,
- 0xEBEEC5E96D600E57ULL, 0x65CC8190991CB93DULL, 0x273C607B30F68FAEULL,
- 0xE02D4247CAC8D41BULL, 0xA2DDA3AC6322E288ULL, 0xBE992B5F8BDB8C5CULL,
- 0xFC69CAB42231BACFULL, 0x3B78E888D80FE17AULL, 0x7988096371E5D7E9ULL,
- 0xF7AA4D1A85996083ULL, 0xB55AACF12C735610ULL, 0x724B8ECDD64D0DA5ULL,
- 0x30BB6F267FA73B36ULL, 0x4AC29F2A07BFD00DULL, 0x08327EC1AE55E69EULL,
- 0xCF235CFD546BBD2BULL, 0x8DD3BD16FD818BB8ULL, 0x03F1F96F09FD3CD2ULL,
- 0x41011884A0170A41ULL, 0x86103AB85A2951F4ULL, 0xC4E0DB53F3C36767ULL,
- 0xD8A453A01B3A09B3ULL, 0x9A54B24BB2D03F20ULL, 0x5D45907748EE6495ULL,
- 0x1FB5719CE1045206ULL, 0x919735E51578E56CULL, 0xD367D40EBC92D3FFULL,
- 0x1476F63246AC884AULL, 0x568617D9EF46BED9ULL, 0xE085162AB69D5E3CULL,
- 0xA275F7C11F7768AFULL, 0x6564D5FDE549331AULL, 0x279434164CA30589ULL,
- 0xA9B6706FB8DFB2E3ULL, 0xEB46918411358470ULL, 0x2C57B3B8EB0BDFC5ULL,
- 0x6EA7525342E1E956ULL, 0x72E3DAA0AA188782ULL, 0x30133B4B03F2B111ULL,
- 0xF7021977F9CCEAA4ULL, 0xB5F2F89C5026DC37ULL, 0x3BD0BCE5A45A6B5DULL,
- 0x79205D0E0DB05DCEULL, 0xBE317F32F78E067BULL, 0xFCC19ED95E6430E8ULL,
- 0x86B86ED5267CDBD3ULL, 0xC4488F3E8F96ED40ULL, 0x0359AD0275A8B6F5ULL,
- 0x41A94CE9DC428066ULL, 0xCF8B0890283E370CULL, 0x8D7BE97B81D4019FULL,
- 0x4A6ACB477BEA5A2AULL, 0x089A2AACD2006CB9ULL, 0x14DEA25F3AF9026DULL,
- 0x562E43B4931334FEULL, 0x913F6188692D6F4BULL, 0xD3CF8063C0C759D8ULL,
- 0x5DEDC41A34BBEEB2ULL, 0x1F1D25F19D51D821ULL, 0xD80C07CD676F8394ULL,
- 0x9AFCE626CE85B507ULL,
-};
-
-uint64_t bch_crc64_update(uint64_t crc, const void *_data, size_t len)
-{
- const unsigned char *data = _data;
-
- while (len--) {
- int i = ((int) (crc >> 56) ^ *data++) & 0xFF;
- crc = crc_table[i] ^ (crc << 8);
- }
-
- return crc;
-}
-
-uint64_t bch_crc64(const void *data, size_t len)
-{
- uint64_t crc = 0xffffffffffffffffULL;
-
- crc = bch_crc64_update(crc, data, len);
-
- return crc ^ 0xffffffffffffffffULL;
-}
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index cced87f8eb27..00aab6abcfe4 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -11,6 +11,7 @@
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#include <linux/crc64.h>
#include "closure.h"
@@ -288,10 +289,10 @@ do { \
#define ANYSINT_MAX(t) \
((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
-int bch_strtoint_h(const char *, int *);
-int bch_strtouint_h(const char *, unsigned int *);
-int bch_strtoll_h(const char *, long long *);
-int bch_strtoull_h(const char *, unsigned long long *);
+int bch_strtoint_h(const char *cp, int *res);
+int bch_strtouint_h(const char *cp, unsigned int *res);
+int bch_strtoll_h(const char *cp, long long *res);
+int bch_strtoull_h(const char *cp, unsigned long long *res);
static inline int bch_strtol_h(const char *cp, long *res)
{
@@ -347,7 +348,7 @@ static inline int bch_strtoul_h(const char *cp, long *res)
snprintf(buf, size, \
__builtin_types_compatible_p(typeof(var), int) \
? "%i\n" : \
- __builtin_types_compatible_p(typeof(var), unsigned) \
+ __builtin_types_compatible_p(typeof(var), unsigned int) \
? "%u\n" : \
__builtin_types_compatible_p(typeof(var), long) \
? "%li\n" : \
@@ -379,7 +380,7 @@ struct time_stats {
void bch_time_stats_update(struct time_stats *stats, uint64_t time);
-static inline unsigned local_clock_us(void)
+static inline unsigned int local_clock_us(void)
{
return local_clock() >> 10;
}
@@ -402,7 +403,8 @@ do { \
__print_time_stat(stats, name, \
average_duration, duration_units); \
sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \
- div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\
+ div_u64((stats)->max_duration, \
+ NSEC_PER_ ## duration_units)); \
\
sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
? div_s64(local_clock() - (stats)->last, \
@@ -442,7 +444,7 @@ struct bch_ratelimit {
* Rate at which we want to do work, in units per second
* The units here correspond to the units passed to bch_next_delay()
*/
- uint32_t rate;
+ atomic_long_t rate;
};
static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
@@ -542,10 +544,27 @@ dup: \
#define RB_PREV(ptr, member) \
container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
+static inline uint64_t bch_crc64(const void *p, size_t len)
+{
+ uint64_t crc = 0xffffffffffffffffULL;
+
+ crc = crc64_be(crc, p, len);
+ return crc ^ 0xffffffffffffffffULL;
+}
+
+static inline uint64_t bch_crc64_update(uint64_t crc,
+ const void *p,
+ size_t len)
+{
+ crc = crc64_be(crc, p, len);
+ return crc;
+}
+
/* Does linear interpolation between powers of two */
-static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
+static inline unsigned int fract_exp_two(unsigned int x,
+ unsigned int fract_bits)
{
- unsigned fract = x & ~(~0 << fract_bits);
+ unsigned int fract = x & ~(~0 << fract_bits);
x >>= fract_bits;
x = 1 << x;
@@ -561,8 +580,4 @@ static inline sector_t bdev_sectors(struct block_device *bdev)
{
return bdev->bd_inode->i_size >> 9;
}
-
-uint64_t bch_crc64_update(uint64_t, const void *, size_t);
-uint64_t bch_crc64(const void *, size_t);
-
#endif /* _BCACHE_UTIL_H */
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index ad45ebe1a74b..08c3a9f9676c 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -27,7 +27,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
* flash-only devices
*/
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
- bcache_flash_devs_sectors_dirty(c);
+ atomic_long_read(&c->flash_dev_dirty_sectors);
/*
* Unfortunately there is no control of global dirty data. If the
@@ -104,11 +104,56 @@ static void __update_writeback_rate(struct cached_dev *dc)
dc->writeback_rate_proportional = proportional_scaled;
dc->writeback_rate_integral_scaled = integral_scaled;
- dc->writeback_rate_change = new_rate - dc->writeback_rate.rate;
- dc->writeback_rate.rate = new_rate;
+ dc->writeback_rate_change = new_rate -
+ atomic_long_read(&dc->writeback_rate.rate);
+ atomic_long_set(&dc->writeback_rate.rate, new_rate);
dc->writeback_rate_target = target;
}
+static bool set_at_max_writeback_rate(struct cache_set *c,
+ struct cached_dev *dc)
+{
+ /*
+ * Idle_counter is increased everytime when update_writeback_rate() is
+ * called. If all backing devices attached to the same cache set have
+ * identical dc->writeback_rate_update_seconds values, it is about 6
+ * rounds of update_writeback_rate() on each backing device before
+ * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
+ * to each dc->writeback_rate.rate.
+ * In order to avoid extra locking cost for counting exact dirty cached
+ * devices number, c->attached_dev_nr is used to calculate the idle
+ * throushold. It might be bigger if not all cached device are in write-
+ * back mode, but it still works well with limited extra rounds of
+ * update_writeback_rate().
+ */
+ if (atomic_inc_return(&c->idle_counter) <
+ atomic_read(&c->attached_dev_nr) * 6)
+ return false;
+
+ if (atomic_read(&c->at_max_writeback_rate) != 1)
+ atomic_set(&c->at_max_writeback_rate, 1);
+
+ atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
+
+ /* keep writeback_rate_target as existing value */
+ dc->writeback_rate_proportional = 0;
+ dc->writeback_rate_integral_scaled = 0;
+ dc->writeback_rate_change = 0;
+
+ /*
+ * Check c->idle_counter and c->at_max_writeback_rate agagain in case
+ * new I/O arrives during before set_at_max_writeback_rate() returns.
+ * Then the writeback rate is set to 1, and its new value should be
+ * decided via __update_writeback_rate().
+ */
+ if ((atomic_read(&c->idle_counter) <
+ atomic_read(&c->attached_dev_nr) * 6) ||
+ !atomic_read(&c->at_max_writeback_rate))
+ return false;
+
+ return true;
+}
+
static void update_writeback_rate(struct work_struct *work)
{
struct cached_dev *dc = container_of(to_delayed_work(work),
@@ -136,13 +181,20 @@ static void update_writeback_rate(struct work_struct *work)
return;
}
- down_read(&dc->writeback_lock);
-
- if (atomic_read(&dc->has_dirty) &&
- dc->writeback_percent)
- __update_writeback_rate(dc);
+ if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
+ /*
+ * If the whole cache set is idle, set_at_max_writeback_rate()
+ * will set writeback rate to a max number. Then it is
+ * unncessary to update writeback rate for an idle cache set
+ * in maximum writeback rate number(s).
+ */
+ if (!set_at_max_writeback_rate(c, dc)) {
+ down_read(&dc->writeback_lock);
+ __update_writeback_rate(dc);
+ up_read(&dc->writeback_lock);
+ }
+ }
- up_read(&dc->writeback_lock);
/*
* CACHE_SET_IO_DISABLE might be set via sysfs interface,
@@ -163,7 +215,8 @@ static void update_writeback_rate(struct work_struct *work)
smp_mb();
}
-static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
+static unsigned int writeback_delay(struct cached_dev *dc,
+ unsigned int sectors)
{
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
!dc->writeback_percent)
@@ -197,6 +250,7 @@ static void dirty_init(struct keybuf_key *w)
static void dirty_io_destructor(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+
kfree(io);
}
@@ -211,7 +265,7 @@ static void write_dirty_finish(struct closure *cl)
/* This is kind of a dumb way of signalling errors. */
if (KEY_DIRTY(&w->key)) {
int ret;
- unsigned i;
+ unsigned int i;
struct keylist keys;
bch_keylist_init(&keys);
@@ -325,7 +379,7 @@ static void read_dirty_submit(struct closure *cl)
static void read_dirty(struct cached_dev *dc)
{
- unsigned delay = 0;
+ unsigned int delay = 0;
struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
size_t size;
int nk, i;
@@ -390,7 +444,8 @@ static void read_dirty(struct cached_dev *dc)
io = kzalloc(sizeof(struct dirty_io) +
sizeof(struct bio_vec) *
- DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ DIV_ROUND_UP(KEY_SIZE(&w->key),
+ PAGE_SECTORS),
GFP_KERNEL);
if (!io)
goto err;
@@ -413,7 +468,8 @@ static void read_dirty(struct cached_dev *dc)
down(&dc->in_flight);
- /* We've acquired a semaphore for the maximum
+ /*
+ * We've acquired a semaphore for the maximum
* simultaneous number of writebacks; from here
* everything happens asynchronously.
*/
@@ -422,27 +478,6 @@ static void read_dirty(struct cached_dev *dc)
delay = writeback_delay(dc, size);
- /* If the control system would wait for at least half a
- * second, and there's been no reqs hitting the backing disk
- * for awhile: use an alternate mode where we have at most
- * one contiguous set of writebacks in flight at a time. If
- * someone wants to do IO it will be quick, as it will only
- * have to contend with one operation in flight, and we'll
- * be round-tripping data to the backing disk as quickly as
- * it can accept it.
- */
- if (delay >= HZ / 2) {
- /* 3 means at least 1.5 seconds, up to 7.5 if we
- * have slowed way down.
- */
- if (atomic_inc_return(&dc->backing_idle) >= 3) {
- /* Wait for current I/Os to finish */
- closure_sync(&cl);
- /* And immediately launch a new set. */
- delay = 0;
- }
- }
-
while (!kthread_should_stop() &&
!test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
delay) {
@@ -467,20 +502,23 @@ err:
/* Scan for dirty data */
-void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
+void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
uint64_t offset, int nr_sectors)
{
struct bcache_device *d = c->devices[inode];
- unsigned stripe_offset, stripe, sectors_dirty;
+ unsigned int stripe_offset, stripe, sectors_dirty;
if (!d)
return;
+ if (UUID_FLASH_ONLY(&c->uuids[inode]))
+ atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
+
stripe = offset_to_stripe(d, offset);
stripe_offset = offset & (d->stripe_size - 1);
while (nr_sectors) {
- int s = min_t(unsigned, abs(nr_sectors),
+ int s = min_t(unsigned int, abs(nr_sectors),
d->stripe_size - stripe_offset);
if (nr_sectors < 0)
@@ -504,7 +542,9 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{
- struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
+ struct cached_dev *dc = container_of(buf,
+ struct cached_dev,
+ writeback_keys);
BUG_ON(KEY_INODE(k) != dc->disk.id);
@@ -514,7 +554,7 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
static void refill_full_stripes(struct cached_dev *dc)
{
struct keybuf *buf = &dc->writeback_keys;
- unsigned start_stripe, stripe, next_stripe;
+ unsigned int start_stripe, stripe, next_stripe;
bool wrapped = false;
stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
@@ -645,8 +685,10 @@ static int bch_writeback_thread(void *arg)
* data on cache. BCACHE_DEV_DETACHING flag is set in
* bch_cached_dev_detach().
*/
- if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
+ up_write(&dc->writeback_lock);
break;
+ }
}
up_write(&dc->writeback_lock);
@@ -654,7 +696,7 @@ static int bch_writeback_thread(void *arg)
read_dirty(dc);
if (searched_full_index) {
- unsigned delay = dc->writeback_delay * HZ;
+ unsigned int delay = dc->writeback_delay * HZ;
while (delay &&
!kthread_should_stop() &&
@@ -673,10 +715,14 @@ static int bch_writeback_thread(void *arg)
}
/* Init */
+#define INIT_KEYS_EACH_TIME 500000
+#define INIT_KEYS_SLEEP_MS 100
struct sectors_dirty_init {
struct btree_op op;
- unsigned inode;
+ unsigned int inode;
+ size_t count;
+ struct bkey start;
};
static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
@@ -691,18 +737,37 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
KEY_START(k), KEY_SIZE(k));
+ op->count++;
+ if (atomic_read(&b->c->search_inflight) &&
+ !(op->count % INIT_KEYS_EACH_TIME)) {
+ bkey_copy_key(&op->start, k);
+ return -EAGAIN;
+ }
+
return MAP_CONTINUE;
}
void bch_sectors_dirty_init(struct bcache_device *d)
{
struct sectors_dirty_init op;
+ int ret;
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
-
- bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
- sectors_dirty_init_fn, 0);
+ op.count = 0;
+ op.start = KEY(op.inode, 0, 0);
+
+ do {
+ ret = bch_btree_map_keys(&op.op, d->c, &op.start,
+ sectors_dirty_init_fn, 0);
+ if (ret == -EAGAIN)
+ schedule_timeout_interruptible(
+ msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
+ else if (ret < 0) {
+ pr_warn("sectors dirty init failed, ret=%d!", ret);
+ break;
+ }
+ } while (ret == -EAGAIN);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -715,7 +780,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_running = true;
dc->writeback_percent = 10;
dc->writeback_delay = 30;
- dc->writeback_rate.rate = 1024;
+ atomic_long_set(&dc->writeback_rate.rate, 1024);
dc->writeback_rate_minimum = 8;
dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 610fb01de629..d2b9fdbc8994 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -28,26 +28,7 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
-static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
-{
- uint64_t i, ret = 0;
-
- mutex_lock(&bch_register_lock);
-
- for (i = 0; i < c->devices_max_used; i++) {
- struct bcache_device *d = c->devices[i];
-
- if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
- continue;
- ret += bcache_dev_sectors_dirty(d);
- }
-
- mutex_unlock(&bch_register_lock);
-
- return ret;
-}
-
-static inline unsigned offset_to_stripe(struct bcache_device *d,
+static inline unsigned int offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{
do_div(offset, d->stripe_size);
@@ -56,9 +37,9 @@ static inline unsigned offset_to_stripe(struct bcache_device *d,
static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
uint64_t offset,
- unsigned nr_sectors)
+ unsigned int nr_sectors)
{
- unsigned stripe = offset_to_stripe(&dc->disk, offset);
+ unsigned int stripe = offset_to_stripe(&dc->disk, offset);
while (1) {
if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
@@ -73,9 +54,9 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
}
static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
- unsigned cache_mode, bool would_skip)
+ unsigned int cache_mode, bool would_skip)
{
- unsigned in_use = dc->disk.c->gc_stats.in_use;
+ unsigned int in_use = dc->disk.c->gc_stats.in_use;
if (cache_mode != CACHE_MODE_WRITEBACK ||
test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
@@ -115,10 +96,11 @@ static inline void bch_writeback_add(struct cached_dev *dc)
}
}
-void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
+void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
+ uint64_t offset, int nr_sectors);
-void bch_sectors_dirty_init(struct bcache_device *);
-void bch_cached_dev_writeback_init(struct cached_dev *);
-int bch_cached_dev_writeback_start(struct cached_dev *);
+void bch_sectors_dirty_init(struct bcache_device *d);
+void bch_cached_dev_writeback_init(struct cached_dev *dc);
+int bch_cached_dev_writeback_start(struct cached_dev *dc);
#endif
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 0d7212410e21..69dddeab124c 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -363,7 +363,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
disk_super->version = cpu_to_le32(cmd->version);
memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
- disk_super->policy_hint_size = 0;
+ disk_super->policy_hint_size = cpu_to_le32(0);
__copy_sm_root(cmd, disk_super);
@@ -701,6 +701,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
+ disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
@@ -1322,6 +1323,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
dm_oblock_t oblock;
unsigned flags;
+ bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
memcpy(&mapping, mapping_value_le, sizeof(mapping));
@@ -1332,8 +1334,10 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
memcpy(&hint, hint_value_le, sizeof(hint));
}
+ if (cmd->clean_when_opened)
+ dirty = flags & M_DIRTY;
- r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY,
+ r = fn(context, oblock, to_cblock(cb), dirty,
le32_to_cpu(hint), hints_valid);
if (r) {
DMERR("policy couldn't load cache block %llu",
@@ -1361,7 +1365,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
dm_oblock_t oblock;
unsigned flags;
- bool dirty;
+ bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
memcpy(&mapping, mapping_value_le, sizeof(mapping));
@@ -1372,8 +1376,9 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
memcpy(&hint, hint_value_le, sizeof(hint));
}
+ if (cmd->clean_when_opened)
+ dirty = dm_bitset_cursor_get_value(dirty_cursor);
- dirty = dm_bitset_cursor_get_value(dirty_cursor);
r = fn(context, oblock, to_cblock(cb), dirty,
le32_to_cpu(hint), hints_valid);
if (r) {
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index ce14a3d1f609..a53413371725 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1188,9 +1188,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
queue_continuation(mg->cache->wq, &mg->k);
}
-static int copy(struct dm_cache_migration *mg, bool promote)
+static void copy(struct dm_cache_migration *mg, bool promote)
{
- int r;
struct dm_io_region o_region, c_region;
struct cache *cache = mg->cache;
@@ -1203,11 +1202,9 @@ static int copy(struct dm_cache_migration *mg, bool promote)
c_region.count = cache->sectors_per_block;
if (promote)
- r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
+ dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
else
- r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
-
- return r;
+ dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
}
static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
@@ -1449,12 +1446,7 @@ static void mg_full_copy(struct work_struct *ws)
}
init_continuation(&mg->k, mg_upgrade_lock);
-
- if (copy(mg, is_policy_promote)) {
- DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
- mg->k.input = BLK_STS_IOERR;
- mg_complete(mg, false);
- }
+ copy(mg, is_policy_promote);
}
static void mg_copy(struct work_struct *ws)
@@ -2250,7 +2242,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
{0, 2, "Invalid number of cache feature arguments"},
};
- int r;
+ int r, mode_ctr = 0;
unsigned argc;
const char *arg;
struct cache_features *cf = &ca->features;
@@ -2264,14 +2256,20 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
while (argc--) {
arg = dm_shift_arg(as);
- if (!strcasecmp(arg, "writeback"))
+ if (!strcasecmp(arg, "writeback")) {
cf->io_mode = CM_IO_WRITEBACK;
+ mode_ctr++;
+ }
- else if (!strcasecmp(arg, "writethrough"))
+ else if (!strcasecmp(arg, "writethrough")) {
cf->io_mode = CM_IO_WRITETHROUGH;
+ mode_ctr++;
+ }
- else if (!strcasecmp(arg, "passthrough"))
+ else if (!strcasecmp(arg, "passthrough")) {
cf->io_mode = CM_IO_PASSTHROUGH;
+ mode_ctr++;
+ }
else if (!strcasecmp(arg, "metadata2"))
cf->metadata_version = 2;
@@ -2282,6 +2280,11 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
}
}
+ if (mode_ctr > 1) {
+ *error = "Duplicate cache io_mode features requested";
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index b61b069c33af..f266c81f396f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -99,7 +99,7 @@ struct crypt_iv_operations {
};
struct iv_essiv_private {
- struct crypto_ahash *hash_tfm;
+ struct crypto_shash *hash_tfm;
u8 *salt;
};
@@ -144,7 +144,7 @@ struct crypt_config {
struct workqueue_struct *io_queue;
struct workqueue_struct *crypt_queue;
- wait_queue_head_t write_thread_wait;
+ spinlock_t write_thread_lock;
struct task_struct *write_thread;
struct rb_root write_tree;
@@ -327,25 +327,22 @@ static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_essiv_init(struct crypt_config *cc)
{
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- AHASH_REQUEST_ON_STACK(req, essiv->hash_tfm);
- struct scatterlist sg;
+ SHASH_DESC_ON_STACK(desc, essiv->hash_tfm);
struct crypto_cipher *essiv_tfm;
int err;
- sg_init_one(&sg, cc->key, cc->key_size);
- ahash_request_set_tfm(req, essiv->hash_tfm);
- ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
- ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
+ desc->tfm = essiv->hash_tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_ahash_digest(req);
- ahash_request_zero(req);
+ err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
+ shash_desc_zero(desc);
if (err)
return err;
essiv_tfm = cc->iv_private;
err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
- crypto_ahash_digestsize(essiv->hash_tfm));
+ crypto_shash_digestsize(essiv->hash_tfm));
if (err)
return err;
@@ -356,7 +353,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
static int crypt_iv_essiv_wipe(struct crypt_config *cc)
{
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- unsigned salt_size = crypto_ahash_digestsize(essiv->hash_tfm);
+ unsigned salt_size = crypto_shash_digestsize(essiv->hash_tfm);
struct crypto_cipher *essiv_tfm;
int r, err = 0;
@@ -408,7 +405,7 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc)
struct crypto_cipher *essiv_tfm;
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- crypto_free_ahash(essiv->hash_tfm);
+ crypto_free_shash(essiv->hash_tfm);
essiv->hash_tfm = NULL;
kzfree(essiv->salt);
@@ -426,7 +423,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
struct crypto_cipher *essiv_tfm = NULL;
- struct crypto_ahash *hash_tfm = NULL;
+ struct crypto_shash *hash_tfm = NULL;
u8 *salt = NULL;
int err;
@@ -436,14 +433,14 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
}
/* Allocate hash algorithm */
- hash_tfm = crypto_alloc_ahash(opts, 0, CRYPTO_ALG_ASYNC);
+ hash_tfm = crypto_alloc_shash(opts, 0, 0);
if (IS_ERR(hash_tfm)) {
ti->error = "Error initializing ESSIV hash";
err = PTR_ERR(hash_tfm);
goto bad;
}
- salt = kzalloc(crypto_ahash_digestsize(hash_tfm), GFP_KERNEL);
+ salt = kzalloc(crypto_shash_digestsize(hash_tfm), GFP_KERNEL);
if (!salt) {
ti->error = "Error kmallocing salt storage in ESSIV";
err = -ENOMEM;
@@ -454,7 +451,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
essiv_tfm = alloc_essiv_cipher(cc, ti, salt,
- crypto_ahash_digestsize(hash_tfm));
+ crypto_shash_digestsize(hash_tfm));
if (IS_ERR(essiv_tfm)) {
crypt_iv_essiv_dtr(cc);
return PTR_ERR(essiv_tfm);
@@ -465,7 +462,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
bad:
if (hash_tfm && !IS_ERR(hash_tfm))
- crypto_free_ahash(hash_tfm);
+ crypto_free_shash(hash_tfm);
kfree(salt);
return err;
}
@@ -1620,36 +1617,31 @@ static int dmcrypt_write(void *data)
struct rb_root write_tree;
struct blk_plug plug;
- DECLARE_WAITQUEUE(wait, current);
-
- spin_lock_irq(&cc->write_thread_wait.lock);
+ spin_lock_irq(&cc->write_thread_lock);
continue_locked:
if (!RB_EMPTY_ROOT(&cc->write_tree))
goto pop_from_list;
set_current_state(TASK_INTERRUPTIBLE);
- __add_wait_queue(&cc->write_thread_wait, &wait);
- spin_unlock_irq(&cc->write_thread_wait.lock);
+ spin_unlock_irq(&cc->write_thread_lock);
if (unlikely(kthread_should_stop())) {
set_current_state(TASK_RUNNING);
- remove_wait_queue(&cc->write_thread_wait, &wait);
break;
}
schedule();
set_current_state(TASK_RUNNING);
- spin_lock_irq(&cc->write_thread_wait.lock);
- __remove_wait_queue(&cc->write_thread_wait, &wait);
+ spin_lock_irq(&cc->write_thread_lock);
goto continue_locked;
pop_from_list:
write_tree = cc->write_tree;
cc->write_tree = RB_ROOT;
- spin_unlock_irq(&cc->write_thread_wait.lock);
+ spin_unlock_irq(&cc->write_thread_lock);
BUG_ON(rb_parent(write_tree.rb_node));
@@ -1693,7 +1685,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
return;
}
- spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
+ spin_lock_irqsave(&cc->write_thread_lock, flags);
+ if (RB_EMPTY_ROOT(&cc->write_tree))
+ wake_up_process(cc->write_thread);
rbp = &cc->write_tree.rb_node;
parent = NULL;
sector = io->sector;
@@ -1706,9 +1700,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
}
rb_link_node(&io->rb_node, parent, rbp);
rb_insert_color(&io->rb_node, &cc->write_tree);
-
- wake_up_locked(&cc->write_thread_wait);
- spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
+ spin_unlock_irqrestore(&cc->write_thread_lock, flags);
}
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
@@ -2831,7 +2823,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- init_waitqueue_head(&cc->write_thread_wait);
+ spin_lock_init(&cc->write_thread_lock);
cc->write_tree = RB_ROOT;
cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
@@ -3069,11 +3061,11 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
*/
limits->max_segment_size = PAGE_SIZE;
- if (cc->sector_size != (1 << SECTOR_SHIFT)) {
- limits->logical_block_size = cc->sector_size;
- limits->physical_block_size = cc->sector_size;
- blk_limits_io_min(limits, cc->sector_size);
- }
+ limits->logical_block_size =
+ max_t(unsigned short, limits->logical_block_size, cc->sector_size);
+ limits->physical_block_size =
+ max_t(unsigned, limits->physical_block_size, cc->sector_size);
+ limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
}
static struct target_type crypt_target = {
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 1783d80c9cad..2fb7bb4304ad 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -17,6 +17,13 @@
#define DM_MSG_PREFIX "delay"
+struct delay_class {
+ struct dm_dev *dev;
+ sector_t start;
+ unsigned delay;
+ unsigned ops;
+};
+
struct delay_c {
struct timer_list delay_timer;
struct mutex timer_lock;
@@ -25,19 +32,16 @@ struct delay_c {
struct list_head delayed_bios;
atomic_t may_delay;
- struct dm_dev *dev_read;
- sector_t start_read;
- unsigned read_delay;
- unsigned reads;
+ struct delay_class read;
+ struct delay_class write;
+ struct delay_class flush;
- struct dm_dev *dev_write;
- sector_t start_write;
- unsigned write_delay;
- unsigned writes;
+ int argc;
};
struct dm_delay_info {
struct delay_c *context;
+ struct delay_class *class;
struct list_head list;
unsigned long expires;
};
@@ -77,7 +81,7 @@ static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
{
struct dm_delay_info *delayed, *next;
unsigned long next_expires = 0;
- int start_timer = 0;
+ unsigned long start_timer = 0;
struct bio_list flush_bios = { };
mutex_lock(&delayed_bios_lock);
@@ -87,10 +91,7 @@ static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
sizeof(struct dm_delay_info));
list_del(&delayed->list);
bio_list_add(&flush_bios, bio);
- if ((bio_data_dir(bio) == WRITE))
- delayed->context->writes--;
- else
- delayed->context->reads--;
+ delayed->class->ops--;
continue;
}
@@ -100,7 +101,6 @@ static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
} else
next_expires = min(next_expires, delayed->expires);
}
-
mutex_unlock(&delayed_bios_lock);
if (start_timer)
@@ -117,6 +117,50 @@ static void flush_expired_bios(struct work_struct *work)
flush_bios(flush_delayed_bios(dc, 0));
}
+static void delay_dtr(struct dm_target *ti)
+{
+ struct delay_c *dc = ti->private;
+
+ destroy_workqueue(dc->kdelayd_wq);
+
+ if (dc->read.dev)
+ dm_put_device(ti, dc->read.dev);
+ if (dc->write.dev)
+ dm_put_device(ti, dc->write.dev);
+ if (dc->flush.dev)
+ dm_put_device(ti, dc->flush.dev);
+
+ mutex_destroy(&dc->timer_lock);
+
+ kfree(dc);
+}
+
+static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **argv)
+{
+ int ret;
+ unsigned long long tmpll;
+ char dummy;
+
+ if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
+ ti->error = "Invalid device sector";
+ return -EINVAL;
+ }
+ c->start = tmpll;
+
+ if (sscanf(argv[2], "%u%c", &c->delay, &dummy) != 1) {
+ ti->error = "Invalid delay";
+ return -EINVAL;
+ }
+
+ ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &c->dev);
+ if (ret) {
+ ti->error = "Device lookup failed";
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* Mapping parameters:
* <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
@@ -128,134 +172,89 @@ static void flush_expired_bios(struct work_struct *work)
static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct delay_c *dc;
- unsigned long long tmpll;
- char dummy;
int ret;
- if (argc != 3 && argc != 6) {
- ti->error = "Requires exactly 3 or 6 arguments";
+ if (argc != 3 && argc != 6 && argc != 9) {
+ ti->error = "Requires exactly 3, 6 or 9 arguments";
return -EINVAL;
}
- dc = kmalloc(sizeof(*dc), GFP_KERNEL);
+ dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc) {
ti->error = "Cannot allocate context";
return -ENOMEM;
}
- dc->reads = dc->writes = 0;
+ ti->private = dc;
+ timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
+ INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
+ INIT_LIST_HEAD(&dc->delayed_bios);
+ mutex_init(&dc->timer_lock);
+ atomic_set(&dc->may_delay, 1);
+ dc->argc = argc;
- ret = -EINVAL;
- if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
- ti->error = "Invalid device sector";
+ ret = delay_class_ctr(ti, &dc->read, argv);
+ if (ret)
goto bad;
- }
- dc->start_read = tmpll;
- if (sscanf(argv[2], "%u%c", &dc->read_delay, &dummy) != 1) {
- ti->error = "Invalid delay";
- goto bad;
+ if (argc == 3) {
+ ret = delay_class_ctr(ti, &dc->write, argv);
+ if (ret)
+ goto bad;
+ ret = delay_class_ctr(ti, &dc->flush, argv);
+ if (ret)
+ goto bad;
+ goto out;
}
- ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
- &dc->dev_read);
- if (ret) {
- ti->error = "Device lookup failed";
+ ret = delay_class_ctr(ti, &dc->write, argv + 3);
+ if (ret)
goto bad;
- }
-
- ret = -EINVAL;
- dc->dev_write = NULL;
- if (argc == 3)
+ if (argc == 6) {
+ ret = delay_class_ctr(ti, &dc->flush, argv + 3);
+ if (ret)
+ goto bad;
goto out;
-
- if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
- ti->error = "Invalid write device sector";
- goto bad_dev_read;
}
- dc->start_write = tmpll;
- if (sscanf(argv[5], "%u%c", &dc->write_delay, &dummy) != 1) {
- ti->error = "Invalid write delay";
- goto bad_dev_read;
- }
-
- ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
- &dc->dev_write);
- if (ret) {
- ti->error = "Write device lookup failed";
- goto bad_dev_read;
- }
+ ret = delay_class_ctr(ti, &dc->flush, argv + 6);
+ if (ret)
+ goto bad;
out:
- ret = -EINVAL;
dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
if (!dc->kdelayd_wq) {
+ ret = -EINVAL;
DMERR("Couldn't start kdelayd");
- goto bad_queue;
+ goto bad;
}
- timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
-
- INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
- INIT_LIST_HEAD(&dc->delayed_bios);
- mutex_init(&dc->timer_lock);
- atomic_set(&dc->may_delay, 1);
-
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->per_io_data_size = sizeof(struct dm_delay_info);
- ti->private = dc;
return 0;
-bad_queue:
- if (dc->dev_write)
- dm_put_device(ti, dc->dev_write);
-bad_dev_read:
- dm_put_device(ti, dc->dev_read);
bad:
- kfree(dc);
+ delay_dtr(ti);
return ret;
}
-static void delay_dtr(struct dm_target *ti)
-{
- struct delay_c *dc = ti->private;
-
- destroy_workqueue(dc->kdelayd_wq);
-
- dm_put_device(ti, dc->dev_read);
-
- if (dc->dev_write)
- dm_put_device(ti, dc->dev_write);
-
- mutex_destroy(&dc->timer_lock);
-
- kfree(dc);
-}
-
-static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
+static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
{
struct dm_delay_info *delayed;
unsigned long expires = 0;
- if (!delay || !atomic_read(&dc->may_delay))
+ if (!c->delay || !atomic_read(&dc->may_delay))
return DM_MAPIO_REMAPPED;
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
delayed->context = dc;
- delayed->expires = expires = jiffies + msecs_to_jiffies(delay);
+ delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
mutex_lock(&delayed_bios_lock);
-
- if (bio_data_dir(bio) == WRITE)
- dc->writes++;
- else
- dc->reads++;
-
+ c->ops++;
list_add_tail(&delayed->list, &dc->delayed_bios);
-
mutex_unlock(&delayed_bios_lock);
queue_timeout(dc, expires);
@@ -282,23 +281,28 @@ static void delay_resume(struct dm_target *ti)
static int delay_map(struct dm_target *ti, struct bio *bio)
{
struct delay_c *dc = ti->private;
-
- if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
- bio_set_dev(bio, dc->dev_write->bdev);
- if (bio_sectors(bio))
- bio->bi_iter.bi_sector = dc->start_write +
- dm_target_offset(ti, bio->bi_iter.bi_sector);
-
- return delay_bio(dc, dc->write_delay, bio);
+ struct delay_class *c;
+ struct dm_delay_info *delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
+
+ if (bio_data_dir(bio) == WRITE) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH))
+ c = &dc->flush;
+ else
+ c = &dc->write;
+ } else {
+ c = &dc->read;
}
+ delayed->class = c;
+ bio_set_dev(bio, c->dev->bdev);
+ if (bio_sectors(bio))
+ bio->bi_iter.bi_sector = c->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
- bio_set_dev(bio, dc->dev_read->bdev);
- bio->bi_iter.bi_sector = dc->start_read +
- dm_target_offset(ti, bio->bi_iter.bi_sector);
-
- return delay_bio(dc, dc->read_delay, bio);
+ return delay_bio(dc, c, bio);
}
+#define DMEMIT_DELAY_CLASS(c) \
+ DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
+
static void delay_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
@@ -307,17 +311,19 @@ static void delay_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
- DMEMIT("%u %u", dc->reads, dc->writes);
+ DMEMIT("%u %u %u", dc->read.ops, dc->write.ops, dc->flush.ops);
break;
case STATUSTYPE_TABLE:
- DMEMIT("%s %llu %u", dc->dev_read->name,
- (unsigned long long) dc->start_read,
- dc->read_delay);
- if (dc->dev_write)
- DMEMIT(" %s %llu %u", dc->dev_write->name,
- (unsigned long long) dc->start_write,
- dc->write_delay);
+ DMEMIT_DELAY_CLASS(&dc->read);
+ if (dc->argc >= 6) {
+ DMEMIT(" ");
+ DMEMIT_DELAY_CLASS(&dc->write);
+ }
+ if (dc->argc >= 9) {
+ DMEMIT(" ");
+ DMEMIT_DELAY_CLASS(&dc->flush);
+ }
break;
}
}
@@ -328,12 +334,15 @@ static int delay_iterate_devices(struct dm_target *ti,
struct delay_c *dc = ti->private;
int ret = 0;
- ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data);
+ ret = fn(ti, dc->read.dev, dc->read.start, ti->len, data);
+ if (ret)
+ goto out;
+ ret = fn(ti, dc->write.dev, dc->write.start, ti->len, data);
+ if (ret)
+ goto out;
+ ret = fn(ti, dc->flush.dev, dc->flush.start, ti->len, data);
if (ret)
goto out;
-
- if (dc->dev_write)
- ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data);
out:
return ret;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 86438b2f10dd..378878599466 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -31,6 +31,8 @@
#define MIN_LOG2_INTERLEAVE_SECTORS 3
#define MAX_LOG2_INTERLEAVE_SECTORS 31
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
+#define RECALC_SECTORS 8192
+#define RECALC_WRITE_SUPER 16
/*
* Warning - DEBUG_PRINT prints security-sensitive data to the log,
@@ -44,7 +46,8 @@
*/
#define SB_MAGIC "integrt"
-#define SB_VERSION 1
+#define SB_VERSION_1 1
+#define SB_VERSION_2 2
#define SB_SECTORS 8
#define MAX_SECTORS_PER_BLOCK 8
@@ -57,9 +60,12 @@ struct superblock {
__u64 provided_data_sectors; /* userspace uses this value */
__u32 flags;
__u8 log2_sectors_per_block;
+ __u8 pad[3];
+ __u64 recalc_sector;
};
#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
+#define SB_FLAG_RECALCULATING 0x2
#define JOURNAL_ENTRY_ROUNDUP 8
@@ -139,6 +145,7 @@ struct alg_spec {
struct dm_integrity_c {
struct dm_dev *dev;
+ struct dm_dev *meta_dev;
unsigned tag_size;
__s8 log2_tag_size;
sector_t start;
@@ -170,7 +177,8 @@ struct dm_integrity_c {
unsigned short journal_section_sectors;
unsigned journal_sections;
unsigned journal_entries;
- sector_t device_sectors;
+ sector_t data_device_sectors;
+ sector_t meta_device_sectors;
unsigned initial_sectors;
unsigned metadata_run;
__s8 log2_metadata_run;
@@ -178,7 +186,7 @@ struct dm_integrity_c {
__u8 sectors_per_block;
unsigned char mode;
- bool suspending;
+ int suspending;
int failed;
@@ -186,6 +194,7 @@ struct dm_integrity_c {
/* these variables are locked with endio_wait.lock */
struct rb_root in_progress;
+ struct list_head wait_list;
wait_queue_head_t endio_wait;
struct workqueue_struct *wait_wq;
@@ -210,6 +219,11 @@ struct dm_integrity_c {
struct workqueue_struct *writer_wq;
struct work_struct writer_work;
+ struct workqueue_struct *recalc_wq;
+ struct work_struct recalc_work;
+ u8 *recalc_buffer;
+ u8 *recalc_tags;
+
struct bio_list flush_bio_list;
unsigned long autocommit_jiffies;
@@ -233,7 +247,14 @@ struct dm_integrity_c {
struct dm_integrity_range {
sector_t logical_sector;
unsigned n_sectors;
- struct rb_node node;
+ bool waiting;
+ union {
+ struct rb_node node;
+ struct {
+ struct task_struct *task;
+ struct list_head wait_entry;
+ };
+ };
};
struct dm_integrity_io {
@@ -337,10 +358,14 @@ static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
sector_t *area, sector_t *offset)
{
- __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
-
- *area = data_sector >> log2_interleave_sectors;
- *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
+ if (!ic->meta_dev) {
+ __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
+ *area = data_sector >> log2_interleave_sectors;
+ *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
+ } else {
+ *area = 0;
+ *offset = data_sector;
+ }
}
#define sector_to_block(ic, n) \
@@ -379,6 +404,9 @@ static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector
{
sector_t result;
+ if (ic->meta_dev)
+ return offset;
+
result = area << ic->sb->log2_interleave_sectors;
if (likely(ic->log2_metadata_run >= 0))
result += (area + 1) << ic->log2_metadata_run;
@@ -386,6 +414,8 @@ static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector
result += (area + 1) * ic->metadata_run;
result += (sector_t)ic->initial_sectors + offset;
+ result += ic->start;
+
return result;
}
@@ -395,6 +425,14 @@ static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
*sec_ptr -= ic->journal_sections;
}
+static void sb_set_version(struct dm_integrity_c *ic)
+{
+ if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
+ ic->sb->version = SB_VERSION_2;
+ else
+ ic->sb->version = SB_VERSION_1;
+}
+
static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
{
struct dm_io_request io_req;
@@ -406,7 +444,7 @@ static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
io_req.mem.ptr.addr = ic->sb;
io_req.notify.fn = NULL;
io_req.client = ic->io;
- io_loc.bdev = ic->dev->bdev;
+ io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
io_loc.sector = ic->start;
io_loc.count = SB_SECTORS;
@@ -753,7 +791,7 @@ static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned
io_req.notify.fn = NULL;
}
io_req.client = ic->io;
- io_loc.bdev = ic->dev->bdev;
+ io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
io_loc.sector = ic->start + SB_SECTORS + sector;
io_loc.count = n_sectors;
@@ -857,7 +895,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
io_req.notify.context = data;
io_req.client = ic->io;
io_loc.bdev = ic->dev->bdev;
- io_loc.sector = ic->start + target;
+ io_loc.sector = target;
io_loc.count = n_sectors;
r = dm_io(&io_req, 1, &io_loc, NULL);
@@ -867,13 +905,27 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
}
}
-static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
+static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
+{
+ return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
+ range2->logical_sector + range2->n_sectors > range2->logical_sector;
+}
+
+static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
{
struct rb_node **n = &ic->in_progress.rb_node;
struct rb_node *parent;
BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
+ if (likely(check_waiting)) {
+ struct dm_integrity_range *range;
+ list_for_each_entry(range, &ic->wait_list, wait_entry) {
+ if (unlikely(ranges_overlap(range, new_range)))
+ return false;
+ }
+ }
+
parent = NULL;
while (*n) {
@@ -898,7 +950,22 @@ static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *
static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
{
rb_erase(&range->node, &ic->in_progress);
- wake_up_locked(&ic->endio_wait);
+ while (unlikely(!list_empty(&ic->wait_list))) {
+ struct dm_integrity_range *last_range =
+ list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
+ struct task_struct *last_range_task;
+ if (!ranges_overlap(range, last_range))
+ break;
+ last_range_task = last_range->task;
+ list_del(&last_range->wait_entry);
+ if (!add_new_range(ic, last_range, false)) {
+ last_range->task = last_range_task;
+ list_add(&last_range->wait_entry, &ic->wait_list);
+ break;
+ }
+ last_range->waiting = false;
+ wake_up_process(last_range_task);
+ }
}
static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
@@ -910,6 +977,19 @@ static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *r
spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
}
+static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
+{
+ new_range->waiting = true;
+ list_add_tail(&new_range->wait_entry, &ic->wait_list);
+ new_range->task = current;
+ do {
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&ic->endio_wait.lock);
+ io_schedule();
+ spin_lock_irq(&ic->endio_wait.lock);
+ } while (unlikely(new_range->waiting));
+}
+
static void init_journal_node(struct journal_node *node)
{
RB_CLEAR_NODE(&node->node);
@@ -1599,8 +1679,12 @@ retry:
dio->range.n_sectors = min(dio->range.n_sectors,
ic->free_sectors << ic->sb->log2_sectors_per_block);
- if (unlikely(!dio->range.n_sectors))
- goto sleep;
+ if (unlikely(!dio->range.n_sectors)) {
+ if (from_map)
+ goto offload_to_thread;
+ sleep_on_endio_wait(ic);
+ goto retry;
+ }
range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
ic->free_sectors -= range_sectors;
journal_section = ic->free_section;
@@ -1654,22 +1738,20 @@ retry:
}
}
}
- if (unlikely(!add_new_range(ic, &dio->range))) {
+ if (unlikely(!add_new_range(ic, &dio->range, true))) {
/*
* We must not sleep in the request routine because it could
* stall bios on current->bio_list.
* So, we offload the bio to a workqueue if we have to sleep.
*/
-sleep:
if (from_map) {
+offload_to_thread:
spin_unlock_irq(&ic->endio_wait.lock);
INIT_WORK(&dio->work, integrity_bio_wait);
queue_work(ic->wait_wq, &dio->work);
return;
- } else {
- sleep_on_endio_wait(ic);
- goto retry;
}
+ wait_and_add_new_range(ic, &dio->range);
}
spin_unlock_irq(&ic->endio_wait.lock);
@@ -1701,14 +1783,18 @@ sleep:
bio->bi_end_io = integrity_end_io;
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
- bio->bi_iter.bi_sector += ic->start;
generic_make_request(bio);
if (need_sync_io) {
wait_for_completion_io(&read_comp);
+ if (unlikely(ic->recalc_wq != NULL) &&
+ ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
+ goto skip_check;
if (likely(!bio->bi_status))
integrity_metadata(&dio->work);
else
+skip_check:
dec_in_flight(dio);
} else {
@@ -1892,8 +1978,8 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
spin_lock_irq(&ic->endio_wait.lock);
- while (unlikely(!add_new_range(ic, &io->range)))
- sleep_on_endio_wait(ic);
+ if (unlikely(!add_new_range(ic, &io->range, true)))
+ wait_and_add_new_range(ic, &io->range);
if (likely(!from_replay)) {
struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
@@ -1981,7 +2067,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
- if (READ_ONCE(ic->suspending))
+ if (READ_ONCE(ic->suspending) && !ic->meta_dev)
return;
spin_lock_irq(&ic->endio_wait.lock);
@@ -2008,6 +2094,108 @@ static void integrity_writer(struct work_struct *w)
spin_unlock_irq(&ic->endio_wait.lock);
}
+static void recalc_write_super(struct dm_integrity_c *ic)
+{
+ int r;
+
+ dm_integrity_flush_buffers(ic);
+ if (dm_integrity_failed(ic))
+ return;
+
+ sb_set_version(ic);
+ r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+}
+
+static void integrity_recalc(struct work_struct *w)
+{
+ struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
+ struct dm_integrity_range range;
+ struct dm_io_request io_req;
+ struct dm_io_region io_loc;
+ sector_t area, offset;
+ sector_t metadata_block;
+ unsigned metadata_offset;
+ __u8 *t;
+ unsigned i;
+ int r;
+ unsigned super_counter = 0;
+
+ spin_lock_irq(&ic->endio_wait.lock);
+
+next_chunk:
+
+ if (unlikely(READ_ONCE(ic->suspending)))
+ goto unlock_ret;
+
+ range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
+ if (unlikely(range.logical_sector >= ic->provided_data_sectors))
+ goto unlock_ret;
+
+ get_area_and_offset(ic, range.logical_sector, &area, &offset);
+ range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
+ if (!ic->meta_dev)
+ range.n_sectors = min(range.n_sectors, (1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
+
+ if (unlikely(!add_new_range(ic, &range, true)))
+ wait_and_add_new_range(ic, &range);
+
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
+ recalc_write_super(ic);
+ super_counter = 0;
+ }
+
+ if (unlikely(dm_integrity_failed(ic)))
+ goto err;
+
+ io_req.bi_op = REQ_OP_READ;
+ io_req.bi_op_flags = 0;
+ io_req.mem.type = DM_IO_VMA;
+ io_req.mem.ptr.addr = ic->recalc_buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = ic->io;
+ io_loc.bdev = ic->dev->bdev;
+ io_loc.sector = get_data_sector(ic, area, offset);
+ io_loc.count = range.n_sectors;
+
+ r = dm_io(&io_req, 1, &io_loc, NULL);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "reading data", r);
+ goto err;
+ }
+
+ t = ic->recalc_tags;
+ for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
+ integrity_sector_checksum(ic, range.logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
+ t += ic->tag_size;
+ }
+
+ metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
+
+ r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "writing tags", r);
+ goto err;
+ }
+
+ spin_lock_irq(&ic->endio_wait.lock);
+ remove_range_unlocked(ic, &range);
+ ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
+ goto next_chunk;
+
+err:
+ remove_range(ic, &range);
+ return;
+
+unlock_ret:
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ recalc_write_super(ic);
+}
+
static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
unsigned n_sections, unsigned char commit_seq)
{
@@ -2210,17 +2398,22 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
del_timer_sync(&ic->autocommit_timer);
- ic->suspending = true;
+ WRITE_ONCE(ic->suspending, 1);
+
+ if (ic->recalc_wq)
+ drain_workqueue(ic->recalc_wq);
queue_work(ic->commit_wq, &ic->commit_work);
drain_workqueue(ic->commit_wq);
if (ic->mode == 'J') {
+ if (ic->meta_dev)
+ queue_work(ic->writer_wq, &ic->writer_work);
drain_workqueue(ic->writer_wq);
dm_integrity_flush_buffers(ic);
}
- ic->suspending = false;
+ WRITE_ONCE(ic->suspending, 0);
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
@@ -2232,6 +2425,16 @@ static void dm_integrity_resume(struct dm_target *ti)
struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
replay_journal(ic);
+
+ if (ic->recalc_wq && ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
+ if (recalc_pos < ic->provided_data_sectors) {
+ queue_work(ic->recalc_wq, &ic->recalc_work);
+ } else if (recalc_pos > ic->provided_data_sectors) {
+ ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
+ recalc_write_super(ic);
+ }
+ }
}
static void dm_integrity_status(struct dm_target *ti, status_type_t type,
@@ -2243,7 +2446,13 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
- DMEMIT("%llu", (unsigned long long)atomic64_read(&ic->number_of_mismatches));
+ DMEMIT("%llu %llu",
+ (unsigned long long)atomic64_read(&ic->number_of_mismatches),
+ (unsigned long long)ic->provided_data_sectors);
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
+ DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic->sb->recalc_sector));
+ else
+ DMEMIT(" -");
break;
case STATUSTYPE_TABLE: {
@@ -2251,19 +2460,25 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
watermark_percentage += ic->journal_entries / 2;
do_div(watermark_percentage, ic->journal_entries);
arg_count = 5;
+ arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
+ arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
ic->tag_size, ic->mode, arg_count);
+ if (ic->meta_dev)
+ DMEMIT(" meta_device:%s", ic->meta_dev->name);
+ if (ic->sectors_per_block != 1)
+ DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
+ DMEMIT(" recalculate");
DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
DMEMIT(" commit_time:%u", ic->autocommit_msec);
- if (ic->sectors_per_block != 1)
- DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
#define EMIT_ALG(a, n) \
do { \
@@ -2286,7 +2501,10 @@ static int dm_integrity_iterate_devices(struct dm_target *ti,
{
struct dm_integrity_c *ic = ti->private;
- return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
+ if (!ic->meta_dev)
+ return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
+ else
+ return fn(ti, ic->dev, 0, ti->len, data);
}
static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2319,26 +2537,38 @@ static void calculate_journal_section_size(struct dm_integrity_c *ic)
static int calculate_device_limits(struct dm_integrity_c *ic)
{
__u64 initial_sectors;
- sector_t last_sector, last_area, last_offset;
calculate_journal_section_size(ic);
initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
- if (initial_sectors + METADATA_PADDING_SECTORS >= ic->device_sectors || initial_sectors > UINT_MAX)
+ if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
return -EINVAL;
ic->initial_sectors = initial_sectors;
- ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
- (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
- if (!(ic->metadata_run & (ic->metadata_run - 1)))
- ic->log2_metadata_run = __ffs(ic->metadata_run);
- else
- ic->log2_metadata_run = -1;
+ if (!ic->meta_dev) {
+ sector_t last_sector, last_area, last_offset;
- get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
- last_sector = get_data_sector(ic, last_area, last_offset);
+ ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
+ (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
+ if (!(ic->metadata_run & (ic->metadata_run - 1)))
+ ic->log2_metadata_run = __ffs(ic->metadata_run);
+ else
+ ic->log2_metadata_run = -1;
- if (ic->start + last_sector < last_sector || ic->start + last_sector >= ic->device_sectors)
- return -EINVAL;
+ get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
+ last_sector = get_data_sector(ic, last_area, last_offset);
+ if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
+ return -EINVAL;
+ } else {
+ __u64 meta_size = ic->provided_data_sectors * ic->tag_size;
+ meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
+ >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
+ meta_size <<= ic->log2_buffer_sectors;
+ if (ic->initial_sectors + meta_size < ic->initial_sectors ||
+ ic->initial_sectors + meta_size > ic->meta_device_sectors)
+ return -EINVAL;
+ ic->metadata_run = 1;
+ ic->log2_metadata_run = 0;
+ }
return 0;
}
@@ -2350,7 +2580,6 @@ static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sec
memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
memcpy(ic->sb->magic, SB_MAGIC, 8);
- ic->sb->version = SB_VERSION;
ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
if (ic->journal_mac_alg.alg_string)
@@ -2360,28 +2589,55 @@ static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sec
journal_sections = journal_sectors / ic->journal_section_sectors;
if (!journal_sections)
journal_sections = 1;
- ic->sb->journal_sections = cpu_to_le32(journal_sections);
- if (!interleave_sectors)
- interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
- ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
- ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
- ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
-
- ic->provided_data_sectors = 0;
- for (test_bit = fls64(ic->device_sectors) - 1; test_bit >= 3; test_bit--) {
- __u64 prev_data_sectors = ic->provided_data_sectors;
+ if (!ic->meta_dev) {
+ ic->sb->journal_sections = cpu_to_le32(journal_sections);
+ if (!interleave_sectors)
+ interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
+ ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
+ ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
+ ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
+
+ ic->provided_data_sectors = 0;
+ for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
+ __u64 prev_data_sectors = ic->provided_data_sectors;
+
+ ic->provided_data_sectors |= (sector_t)1 << test_bit;
+ if (calculate_device_limits(ic))
+ ic->provided_data_sectors = prev_data_sectors;
+ }
+ if (!ic->provided_data_sectors)
+ return -EINVAL;
+ } else {
+ ic->sb->log2_interleave_sectors = 0;
+ ic->provided_data_sectors = ic->data_device_sectors;
+ ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
+
+try_smaller_buffer:
+ ic->sb->journal_sections = cpu_to_le32(0);
+ for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
+ __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
+ __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
+ if (test_journal_sections > journal_sections)
+ continue;
+ ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
+ if (calculate_device_limits(ic))
+ ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
- ic->provided_data_sectors |= (sector_t)1 << test_bit;
- if (calculate_device_limits(ic))
- ic->provided_data_sectors = prev_data_sectors;
+ }
+ if (!le32_to_cpu(ic->sb->journal_sections)) {
+ if (ic->log2_buffer_sectors > 3) {
+ ic->log2_buffer_sectors--;
+ goto try_smaller_buffer;
+ }
+ return -EINVAL;
+ }
}
- if (!ic->provided_data_sectors)
- return -EINVAL;
-
ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
+ sb_set_version(ic);
+
return 0;
}
@@ -2828,6 +3084,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{0, 9, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
+ bool recalculate;
bool should_write_sb;
__u64 threshold;
unsigned long long start;
@@ -2848,6 +3105,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->per_io_data_size = sizeof(struct dm_integrity_io);
ic->in_progress = RB_ROOT;
+ INIT_LIST_HEAD(&ic->wait_list);
init_waitqueue_head(&ic->endio_wait);
bio_list_init(&ic->flush_bio_list);
init_waitqueue_head(&ic->copy_to_journal_wait);
@@ -2883,13 +3141,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- ic->device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
- journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
- ic->device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
+ journal_sectors = 0;
interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
buffer_sectors = DEFAULT_BUFFER_SECTORS;
journal_watermark = DEFAULT_JOURNAL_WATERMARK;
sync_msec = DEFAULT_SYNC_MSEC;
+ recalculate = false;
ic->sectors_per_block = 1;
as.argc = argc - DIRECT_ARGUMENTS;
@@ -2908,7 +3165,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
- journal_sectors = val;
+ journal_sectors = val ? val : 1;
else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
interleave_sectors = val;
else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
@@ -2917,7 +3174,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
journal_watermark = val;
else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
sync_msec = val;
- else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
+ else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
+ if (ic->meta_dev) {
+ dm_put_device(ti, ic->meta_dev);
+ ic->meta_dev = NULL;
+ }
+ r = dm_get_device(ti, strchr(opt_string, ':') + 1, dm_table_get_mode(ti->table), &ic->meta_dev);
+ if (r) {
+ ti->error = "Device lookup failed";
+ goto bad;
+ }
+ } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
if (val < 1 << SECTOR_SHIFT ||
val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
(val & (val -1))) {
@@ -2941,6 +3208,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
"Invalid journal_mac argument");
if (r)
goto bad;
+ } else if (!strcmp(opt_string, "recalculate")) {
+ recalculate = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
@@ -2948,6 +3217,21 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
}
+ ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ if (!ic->meta_dev)
+ ic->meta_device_sectors = ic->data_device_sectors;
+ else
+ ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
+
+ if (!journal_sectors) {
+ journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
+ ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
+ }
+
+ if (!buffer_sectors)
+ buffer_sectors = 1;
+ ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
+
r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
"Invalid internal hash", "Error setting internal hash key");
if (r)
@@ -3062,7 +3346,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
should_write_sb = true;
}
- if (ic->sb->version != SB_VERSION) {
+ if (!ic->sb->version || ic->sb->version > SB_VERSION_2) {
r = -EINVAL;
ti->error = "Unknown version";
goto bad;
@@ -3083,11 +3367,19 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
/* make sure that ti->max_io_len doesn't overflow */
- if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
- ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
- r = -EINVAL;
- ti->error = "Invalid interleave_sectors in the superblock";
- goto bad;
+ if (!ic->meta_dev) {
+ if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
+ ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
+ r = -EINVAL;
+ ti->error = "Invalid interleave_sectors in the superblock";
+ goto bad;
+ }
+ } else {
+ if (ic->sb->log2_interleave_sectors) {
+ r = -EINVAL;
+ ti->error = "Invalid interleave_sectors in the superblock";
+ goto bad;
+ }
}
ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
@@ -3101,20 +3393,28 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Journal mac mismatch";
goto bad;
}
+
+try_smaller_buffer:
r = calculate_device_limits(ic);
if (r) {
+ if (ic->meta_dev) {
+ if (ic->log2_buffer_sectors > 3) {
+ ic->log2_buffer_sectors--;
+ goto try_smaller_buffer;
+ }
+ }
ti->error = "The device is too small";
goto bad;
}
+ if (!ic->meta_dev)
+ ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
+
if (ti->len > ic->provided_data_sectors) {
r = -EINVAL;
ti->error = "Not enough provided sectors for requested mapping size";
goto bad;
}
- if (!buffer_sectors)
- buffer_sectors = 1;
- ic->log2_buffer_sectors = min3((int)__fls(buffer_sectors), (int)__ffs(ic->metadata_run), 31 - SECTOR_SHIFT);
threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
threshold += 50;
@@ -3138,8 +3438,40 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
(unsigned long long)ic->provided_data_sectors);
DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
- ic->bufio = dm_bufio_client_create(ic->dev->bdev, 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors),
- 1, 0, NULL, NULL);
+ if (recalculate && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
+ ic->sb->recalc_sector = cpu_to_le64(0);
+ }
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ if (!ic->internal_hash) {
+ r = -EINVAL;
+ ti->error = "Recalculate is only valid with internal hash";
+ goto bad;
+ }
+ ic->recalc_wq = alloc_workqueue("dm-intergrity-recalc", WQ_MEM_RECLAIM, 1);
+ if (!ic->recalc_wq ) {
+ ti->error = "Cannot allocate workqueue";
+ r = -ENOMEM;
+ goto bad;
+ }
+ INIT_WORK(&ic->recalc_work, integrity_recalc);
+ ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
+ if (!ic->recalc_buffer) {
+ ti->error = "Cannot allocate buffer for recalculating";
+ r = -ENOMEM;
+ goto bad;
+ }
+ ic->recalc_tags = kvmalloc((RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size, GFP_KERNEL);
+ if (!ic->recalc_tags) {
+ ti->error = "Cannot allocate tags for recalculating";
+ r = -ENOMEM;
+ goto bad;
+ }
+ }
+
+ ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
+ 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
if (IS_ERR(ic->bufio)) {
r = PTR_ERR(ic->bufio);
ti->error = "Cannot initialize dm-bufio";
@@ -3171,9 +3503,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ic->just_formatted = true;
}
- r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
- if (r)
- goto bad;
+ if (!ic->meta_dev) {
+ r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
+ if (r)
+ goto bad;
+ }
if (!ic->internal_hash)
dm_integrity_set(ti, ic);
@@ -3192,6 +3526,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
struct dm_integrity_c *ic = ti->private;
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
+ BUG_ON(!list_empty(&ic->wait_list));
if (ic->metadata_wq)
destroy_workqueue(ic->metadata_wq);
@@ -3201,6 +3536,12 @@ static void dm_integrity_dtr(struct dm_target *ti)
destroy_workqueue(ic->commit_wq);
if (ic->writer_wq)
destroy_workqueue(ic->writer_wq);
+ if (ic->recalc_wq)
+ destroy_workqueue(ic->recalc_wq);
+ if (ic->recalc_buffer)
+ vfree(ic->recalc_buffer);
+ if (ic->recalc_tags)
+ kvfree(ic->recalc_tags);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
mempool_exit(&ic->journal_io_mempool);
@@ -3208,6 +3549,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
dm_io_client_destroy(ic->io);
if (ic->dev)
dm_put_device(ti, ic->dev);
+ if (ic->meta_dev)
+ dm_put_device(ti, ic->meta_dev);
dm_integrity_free_page_list(ic, ic->journal);
dm_integrity_free_page_list(ic, ic->journal_io);
dm_integrity_free_page_list(ic, ic->journal_xor);
@@ -3248,7 +3591,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 3c7547a3c371..2fc4213e02b5 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -487,6 +487,8 @@ static int run_complete_job(struct kcopyd_job *job)
if (atomic_dec_and_test(&kc->nr_jobs))
wake_up(&kc->destroyq);
+ cond_resched();
+
return 0;
}
@@ -741,9 +743,9 @@ static void split_job(struct kcopyd_job *master_job)
}
}
-int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
- unsigned int num_dests, struct dm_io_region *dests,
- unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
+void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{
struct kcopyd_job *job;
int i;
@@ -818,16 +820,14 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->progress = 0;
split_job(job);
}
-
- return 0;
}
EXPORT_SYMBOL(dm_kcopyd_copy);
-int dm_kcopyd_zero(struct dm_kcopyd_client *kc,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context)
+void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
+ unsigned num_dests, struct dm_io_region *dests,
+ unsigned flags, dm_kcopyd_notify_fn fn, void *context)
{
- return dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
+ dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
}
EXPORT_SYMBOL(dm_kcopyd_zero);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index ab13fcec3fca..cae689de75fd 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -588,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout)
}
/* Return md raid10 algorithm for @name */
-static const int raid10_name_to_format(const char *name)
+static int raid10_name_to_format(const char *name)
{
if (!strcasecmp(name, "near"))
return ALGORITHM_RAID10_NEAR;
@@ -3859,7 +3859,7 @@ static int __load_dirty_region_bitmap(struct raid_set *rs)
/* Try loading the bitmap unless "raid0", which does not have one */
if (!rs_is_raid0(rs) &&
!test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
- r = bitmap_load(&rs->md);
+ r = md_bitmap_load(&rs->md);
if (r)
DMERR("Failed to load bitmap");
}
@@ -3987,8 +3987,8 @@ static int raid_preresume(struct dm_target *ti)
/* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
- r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
- to_bytes(rs->requested_bitmap_chunk_sectors), 0);
+ r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors,
+ to_bytes(rs->requested_bitmap_chunk_sectors), 0);
if (r)
DMERR("Failed to resize bitmap");
}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 5903e492bb34..79eab1071ec2 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -326,9 +326,8 @@ static void recovery_complete(int read_err, unsigned long write_err,
dm_rh_recovery_end(reg, !(read_err || write_err));
}
-static int recover(struct mirror_set *ms, struct dm_region *reg)
+static void recover(struct mirror_set *ms, struct dm_region *reg)
{
- int r;
unsigned i;
struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
struct mirror *m;
@@ -367,10 +366,8 @@ static int recover(struct mirror_set *ms, struct dm_region *reg)
if (!errors_handled(ms))
set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
- r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
- flags, recovery_complete, reg);
-
- return r;
+ dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
+ flags, recovery_complete, reg);
}
static void reset_ms_flags(struct mirror_set *ms)
@@ -388,7 +385,6 @@ static void do_recovery(struct mirror_set *ms)
{
struct dm_region *reg;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
- int r;
/*
* Start quiescing some regions.
@@ -398,11 +394,8 @@ static void do_recovery(struct mirror_set *ms)
/*
* Copy any already quiesced regions.
*/
- while ((reg = dm_rh_recovery_start(ms->rh))) {
- r = recover(ms, reg);
- if (r)
- dm_rh_recovery_end(reg, 0);
- }
+ while ((reg = dm_rh_recovery_start(ms->rh)))
+ recover(ms, reg);
/*
* Update the in sync flag.
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 97de7a7334d4..ae4b33d10924 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -85,7 +85,7 @@ struct dm_snapshot {
* A list of pending exceptions that completed out of order.
* Protected by kcopyd single-threaded callback.
*/
- struct list_head out_of_order_list;
+ struct rb_root out_of_order_tree;
mempool_t pending_pool;
@@ -200,7 +200,7 @@ struct dm_snap_pending_exception {
/* A sequence number, it is used for in-order completion. */
sector_t exception_sequence;
- struct list_head out_of_order_entry;
+ struct rb_node out_of_order_node;
/*
* For writing a complete chunk, bypassing the copy.
@@ -1173,7 +1173,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
atomic_set(&s->pending_exceptions_count, 0);
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
- INIT_LIST_HEAD(&s->out_of_order_list);
+ s->out_of_order_tree = RB_ROOT;
mutex_init(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
@@ -1539,28 +1539,41 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
pe->copy_error = read_err || write_err;
if (pe->exception_sequence == s->exception_complete_sequence) {
+ struct rb_node *next;
+
s->exception_complete_sequence++;
complete_exception(pe);
- while (!list_empty(&s->out_of_order_list)) {
- pe = list_entry(s->out_of_order_list.next,
- struct dm_snap_pending_exception, out_of_order_entry);
+ next = rb_first(&s->out_of_order_tree);
+ while (next) {
+ pe = rb_entry(next, struct dm_snap_pending_exception,
+ out_of_order_node);
if (pe->exception_sequence != s->exception_complete_sequence)
break;
+ next = rb_next(next);
s->exception_complete_sequence++;
- list_del(&pe->out_of_order_entry);
+ rb_erase(&pe->out_of_order_node, &s->out_of_order_tree);
complete_exception(pe);
+ cond_resched();
}
} else {
- struct list_head *lh;
+ struct rb_node *parent = NULL;
+ struct rb_node **p = &s->out_of_order_tree.rb_node;
struct dm_snap_pending_exception *pe2;
- list_for_each_prev(lh, &s->out_of_order_list) {
- pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
- if (pe2->exception_sequence < pe->exception_sequence)
- break;
+ while (*p) {
+ pe2 = rb_entry(*p, struct dm_snap_pending_exception, out_of_order_node);
+ parent = *p;
+
+ BUG_ON(pe->exception_sequence == pe2->exception_sequence);
+ if (pe->exception_sequence < pe2->exception_sequence)
+ p = &((*p)->rb_left);
+ else
+ p = &((*p)->rb_right);
}
- list_add(&pe->out_of_order_entry, lh);
+
+ rb_link_node(&pe->out_of_order_node, parent, p);
+ rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
}
}
@@ -1694,8 +1707,6 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!s->valid)
return DM_MAPIO_KILL;
- /* FIXME: should only take write lock if we need
- * to copy an exception */
mutex_lock(&s->lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 938766794c2e..3d0e2c198f06 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -885,9 +885,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return q && blk_queue_dax(q);
+ return bdev_dax_supported(dev->bdev, PAGE_SIZE);
}
static bool dm_table_supports_dax(struct dm_table *t)
@@ -1907,6 +1905,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (dm_table_supports_dax(t))
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
+
if (dm_table_supports_dax_write_cache(t))
dax_write_cache(t->md->dax_dev, true);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 36ef284ad086..72142021b5c9 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -776,7 +776,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
static int __commit_transaction(struct dm_pool_metadata *pmd)
{
int r;
- size_t metadata_len, data_len;
struct thin_disk_superblock *disk_super;
struct dm_block *sblock;
@@ -797,14 +796,6 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
if (r < 0)
return r;
- r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
- if (r < 0)
- return r;
-
- r = dm_sm_root_size(pmd->data_sm, &data_len);
- if (r < 0)
- return r;
-
r = save_sm_roots(pmd);
if (r < 0)
return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 7945238df1c0..7bd60a150f8f 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1220,18 +1220,13 @@ static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
sector_t begin, sector_t end)
{
- int r;
struct dm_io_region to;
to.bdev = tc->pool_dev->bdev;
to.sector = begin;
to.count = end - begin;
- r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
- if (r < 0) {
- DMERR_LIMIT("dm_kcopyd_zero() failed");
- copy_complete(1, 1, m);
- }
+ dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
}
static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
@@ -1257,7 +1252,6 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
struct dm_bio_prison_cell *cell, struct bio *bio,
sector_t len)
{
- int r;
struct pool *pool = tc->pool;
struct dm_thin_new_mapping *m = get_next_mapping(pool);
@@ -1296,19 +1290,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
to.sector = data_dest * pool->sectors_per_block;
to.count = len;
- r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
- 0, copy_complete, m);
- if (r < 0) {
- DMERR_LIMIT("dm_kcopyd_copy() failed");
- copy_complete(1, 1, m);
-
- /*
- * We allow the zero to be issued, to simplify the
- * error path. Otherwise we'd need to start
- * worrying about decrementing the prepare_actions
- * counter.
- */
- }
+ dm_kcopyd_copy(pool->copier, &from, 1, &to,
+ 0, copy_complete, m);
/*
* Do we need to zero a tail region?
@@ -1386,6 +1369,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+static void requeue_bios(struct pool *pool);
+
static void check_for_space(struct pool *pool)
{
int r;
@@ -1398,8 +1383,10 @@ static void check_for_space(struct pool *pool)
if (r)
return;
- if (nr_free)
+ if (nr_free) {
set_pool_mode(pool, PM_WRITE);
+ requeue_bios(pool);
+ }
}
/*
@@ -1476,7 +1463,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
r = dm_pool_alloc_data_block(pool->pmd, result);
if (r) {
- metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+ if (r == -ENOSPC)
+ set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
+ else
+ metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
return r;
}
@@ -2513,6 +2503,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
case PM_WRITE:
if (old_mode != new_mode)
notify_of_pool_mode_change(pool, "write");
+ if (old_mode == PM_OUT_OF_DATA_SPACE)
+ cancel_delayed_work_sync(&pool->no_space_timeout);
pool->out_of_data_space = false;
pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
dm_pool_metadata_read_write(pool->pmd);
@@ -3883,6 +3875,8 @@ static void pool_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("- ");
+ DMEMIT("%llu ", (unsigned long long)calc_metadata_threshold(pt));
+
break;
case STATUSTYPE_TABLE:
@@ -3972,7 +3966,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 19, 0},
+ .version = {1, 20, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4346,7 +4340,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 19, 0},
+ .version = {1, 20, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 1839236a0f81..5f1f80d424dd 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -136,6 +136,7 @@ struct dm_writecache {
struct dm_target *ti;
struct dm_dev *dev;
struct dm_dev *ssd_dev;
+ sector_t start_sector;
void *memory_map;
uint64_t memory_map_size;
size_t metadata_sectors;
@@ -259,7 +260,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
if (da != p) {
long i;
wc->memory_map = NULL;
- pages = kvmalloc(p * sizeof(struct page *), GFP_KERNEL);
+ pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
r = -ENOMEM;
goto err2;
@@ -292,6 +293,10 @@ static int persistent_memory_claim(struct dm_writecache *wc)
}
dax_read_unlock(id);
+
+ wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
+ wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
+
return 0;
err3:
kvfree(pages);
@@ -310,7 +315,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
static void persistent_memory_release(struct dm_writecache *wc)
{
if (wc->memory_vmapped)
- vunmap(wc->memory_map);
+ vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
}
static struct page *persistent_memory_page(void *addr)
@@ -358,7 +363,7 @@ static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
{
- return wc->metadata_sectors +
+ return wc->start_sector + wc->metadata_sectors +
((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
}
@@ -451,7 +456,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc)
COMPLETION_INITIALIZER_ONSTACK(endio.c),
ATOMIC_INIT(1),
};
- unsigned bitmap_bits = wc->dirty_bitmap_size * BITS_PER_LONG;
+ unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
unsigned i = 0;
while (1) {
@@ -470,6 +475,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc)
if (unlikely(region.sector + region.count > wc->metadata_sectors))
region.count = wc->metadata_sectors - region.sector;
+ region.sector += wc->start_sector;
atomic_inc(&endio.count);
req.bi_op = REQ_OP_WRITE;
req.bi_op_flags = REQ_SYNC;
@@ -858,7 +864,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
if (wc->entries)
return 0;
- wc->entries = vmalloc(sizeof(struct wc_entry) * wc->n_blocks);
+ wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
if (!wc->entries)
return -ENOMEM;
for (b = 0; b < wc->n_blocks; b++) {
@@ -1480,9 +1486,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
wb->bio.bi_iter.bi_sector = read_original_sector(wc, e);
wb->page_offset = PAGE_SIZE;
if (max_pages <= WB_LIST_INLINE ||
- unlikely(!(wb->wc_list = kmalloc(max_pages * sizeof(struct wc_entry *),
- GFP_NOIO | __GFP_NORETRY |
- __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
+ unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
+ GFP_NOIO | __GFP_NORETRY |
+ __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
wb->wc_list = wb->wc_list_inline;
max_pages = WB_LIST_INLINE;
}
@@ -1945,14 +1951,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
- if (WC_MODE_PMEM(wc)) {
- r = persistent_memory_claim(wc);
- if (r) {
- ti->error = "Unable to map persistent memory for cache";
- goto bad;
- }
- }
-
/*
* Parse the cache block size
*/
@@ -1981,7 +1979,16 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
while (opt_params) {
string = dm_shift_arg(&as), opt_params--;
- if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
+ if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
+ unsigned long long start_sector;
+ string = dm_shift_arg(&as), opt_params--;
+ if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
+ goto invalid_optional;
+ wc->start_sector = start_sector;
+ if (wc->start_sector != start_sector ||
+ wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
+ goto invalid_optional;
+ } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
string = dm_shift_arg(&as), opt_params--;
if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
goto invalid_optional;
@@ -2038,12 +2045,20 @@ invalid_optional:
goto bad;
}
- if (!WC_MODE_PMEM(wc)) {
+ if (WC_MODE_PMEM(wc)) {
+ r = persistent_memory_claim(wc);
+ if (r) {
+ ti->error = "Unable to map persistent memory for cache";
+ goto bad;
+ }
+ } else {
struct dm_io_region region;
struct dm_io_request req;
size_t n_blocks, n_metadata_blocks;
uint64_t n_bitmap_bits;
+ wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
+
bio_list_init(&wc->flush_list);
wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
if (IS_ERR(wc->flush_thread)) {
@@ -2096,7 +2111,7 @@ invalid_optional:
}
region.bdev = wc->ssd_dev->bdev;
- region.sector = 0;
+ region.sector = wc->start_sector;
region.count = wc->metadata_sectors;
req.bi_op = REQ_OP_READ;
req.bi_op_flags = REQ_SYNC;
@@ -2224,6 +2239,8 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
wc->dev->name, wc->ssd_dev->name, wc->block_size);
extra_args = 0;
+ if (wc->start_sector)
+ extra_args += 2;
if (wc->high_wm_percent_set)
extra_args += 2;
if (wc->low_wm_percent_set)
@@ -2238,6 +2255,8 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
extra_args++;
DMEMIT("%u", extra_args);
+ if (wc->start_sector)
+ DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
if (wc->high_wm_percent_set) {
x = (uint64_t)wc->freelist_high_watermark * 100;
x += wc->n_blocks / 2;
@@ -2264,7 +2283,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
static struct target_type writecache_target = {
.name = "writecache",
- .version = {1, 0, 0},
+ .version = {1, 1, 1},
.module = THIS_MODULE,
.ctr = writecache_ctr,
.dtr = writecache_dtr,
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index 44a119e12f1a..edf4b95eb075 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -161,10 +161,8 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
/* Copy the valid region */
set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
- ret = dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
- dmz_reclaim_kcopy_end, zrc);
- if (ret)
- return ret;
+ dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
+ dmz_reclaim_kcopy_end, zrc);
/* Wait for copy to complete */
wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 3c0e45f4dcf5..a44183ff4be0 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -787,7 +787,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Chunk BIO work */
mutex_init(&dmz->chunk_lock);
- INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
+ INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
0, dev->name);
if (!dmz->chunk_wq) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e65429a29c06..20f7e4ef5342 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -609,7 +609,8 @@ static void start_io_acct(struct dm_io *io)
io->start_time = jiffies;
- generic_start_io_acct(md->queue, rw, bio_sectors(bio), &dm_disk(md)->part0);
+ generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
+ &dm_disk(md)->part0);
atomic_set(&dm_disk(md)->part0.in_flight[rw],
atomic_inc_return(&md->pending[rw]));
@@ -628,7 +629,8 @@ static void end_io_acct(struct dm_io *io)
int pending;
int rw = bio_data_dir(bio);
- generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time);
+ generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
+ io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
@@ -1056,8 +1058,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
if (len < 1)
goto out;
nr_pages = min(len, nr_pages);
- if (ti->type->direct_access)
- ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
+ ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
out:
dm_put_live_table(md, srcu_idx);
@@ -1606,10 +1607,9 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* the usage of io->orig_bio in dm_remap_zone_report()
* won't be affected by this reassignment.
*/
- struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
- &md->queue->bio_split);
+ struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
+ GFP_NOIO, &md->queue->bio_split);
ci.io->orig_bio = b;
- bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
bio_chain(b, bio);
ret = generic_make_request(bio);
break;
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index f983c3fdf204..2fc8c113977f 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -46,8 +46,8 @@ static inline char *bmname(struct bitmap *bitmap)
* if we find our page, we increment the page's refcount so that it stays
* allocated while we're using it
*/
-static int bitmap_checkpage(struct bitmap_counts *bitmap,
- unsigned long page, int create, int no_hijack)
+static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
+ unsigned long page, int create, int no_hijack)
__releases(bitmap->lock)
__acquires(bitmap->lock)
{
@@ -115,7 +115,7 @@ __acquires(bitmap->lock)
/* if page is completely empty, put it back on the free list, or dealloc it */
/* if page was hijacked, unmark the flag so it might get alloced next time */
/* Note: lock should be held when calling this */
-static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
+static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
{
char *ptr;
@@ -280,7 +280,7 @@ restart:
return -EINVAL;
}
-static void bitmap_file_kick(struct bitmap *bitmap);
+static void md_bitmap_file_kick(struct bitmap *bitmap);
/*
* write out a page to a file
*/
@@ -310,7 +310,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
atomic_read(&bitmap->pending_writes)==0);
}
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
- bitmap_file_kick(bitmap);
+ md_bitmap_file_kick(bitmap);
}
static void end_bitmap_write(struct buffer_head *bh, int uptodate)
@@ -421,11 +421,11 @@ out:
*/
/*
- * bitmap_wait_writes() should be called before writing any bitmap
+ * md_bitmap_wait_writes() should be called before writing any bitmap
* blocks, to ensure previous writes, particularly from
- * bitmap_daemon_work(), have completed.
+ * md_bitmap_daemon_work(), have completed.
*/
-static void bitmap_wait_writes(struct bitmap *bitmap)
+static void md_bitmap_wait_writes(struct bitmap *bitmap)
{
if (bitmap->storage.file)
wait_event(bitmap->write_wait,
@@ -443,7 +443,7 @@ static void bitmap_wait_writes(struct bitmap *bitmap)
/* update the event counter and sync the superblock to disk */
-void bitmap_update_sb(struct bitmap *bitmap)
+void md_bitmap_update_sb(struct bitmap *bitmap)
{
bitmap_super_t *sb;
@@ -476,10 +476,10 @@ void bitmap_update_sb(struct bitmap *bitmap)
kunmap_atomic(sb);
write_page(bitmap, bitmap->storage.sb_page, 1);
}
-EXPORT_SYMBOL(bitmap_update_sb);
+EXPORT_SYMBOL(md_bitmap_update_sb);
/* print out the bitmap file superblock */
-void bitmap_print_sb(struct bitmap *bitmap)
+void md_bitmap_print_sb(struct bitmap *bitmap)
{
bitmap_super_t *sb;
@@ -518,7 +518,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
*
* Returns: 0 on success, -Exxx on error
*/
-static int bitmap_new_disk_sb(struct bitmap *bitmap)
+static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
{
bitmap_super_t *sb;
unsigned long chunksize, daemon_sleep, write_behind;
@@ -577,7 +577,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
}
/* read the superblock from the bitmap file and initialize some bitmap fields */
-static int bitmap_read_sb(struct bitmap *bitmap)
+static int md_bitmap_read_sb(struct bitmap *bitmap)
{
char *reason = NULL;
bitmap_super_t *sb;
@@ -727,7 +727,7 @@ out_no_sb:
bitmap->mddev->bitmap_info.space > sectors_reserved)
bitmap->mddev->bitmap_info.space = sectors_reserved;
if (err) {
- bitmap_print_sb(bitmap);
+ md_bitmap_print_sb(bitmap);
if (bitmap->cluster_slot < 0)
md_cluster_stop(bitmap->mddev);
}
@@ -774,9 +774,9 @@ static inline struct page *filemap_get_page(struct bitmap_storage *store,
return store->filemap[file_page_index(store, chunk)];
}
-static int bitmap_storage_alloc(struct bitmap_storage *store,
- unsigned long chunks, int with_super,
- int slot_number)
+static int md_bitmap_storage_alloc(struct bitmap_storage *store,
+ unsigned long chunks, int with_super,
+ int slot_number)
{
int pnum, offset = 0;
unsigned long num_pages;
@@ -830,7 +830,7 @@ static int bitmap_storage_alloc(struct bitmap_storage *store,
return 0;
}
-static void bitmap_file_unmap(struct bitmap_storage *store)
+static void md_bitmap_file_unmap(struct bitmap_storage *store)
{
struct page **map, *sb_page;
int pages;
@@ -862,12 +862,12 @@ static void bitmap_file_unmap(struct bitmap_storage *store)
* then it is no longer reliable, so we stop using it and we mark the file
* as failed in the superblock
*/
-static void bitmap_file_kick(struct bitmap *bitmap)
+static void md_bitmap_file_kick(struct bitmap *bitmap)
{
char *path, *ptr = NULL;
if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
- bitmap_update_sb(bitmap);
+ md_bitmap_update_sb(bitmap);
if (bitmap->storage.file) {
path = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -923,7 +923,7 @@ static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
* we set the bit immediately, then we record the page number so that
* when an unplug occurs, we can flush the dirty pages out to disk
*/
-static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
+static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
{
unsigned long bit;
struct page *page;
@@ -952,7 +952,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
}
-static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
+static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
{
unsigned long bit;
struct page *page;
@@ -980,7 +980,7 @@ static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
}
}
-static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
+static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
{
unsigned long bit;
struct page *page;
@@ -1005,7 +1005,7 @@ static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
/* this gets called when the md device is ready to unplug its underlying
* (slave) device queues -- before we let any writes go down, we need to
* sync the dirty pages of the bitmap file to disk */
-void bitmap_unplug(struct bitmap *bitmap)
+void md_bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i;
int dirty, need_write;
@@ -1025,7 +1025,7 @@ void bitmap_unplug(struct bitmap *bitmap)
BITMAP_PAGE_NEEDWRITE);
if (dirty || need_write) {
if (!writing) {
- bitmap_wait_writes(bitmap);
+ md_bitmap_wait_writes(bitmap);
if (bitmap->mddev->queue)
blk_add_trace_msg(bitmap->mddev->queue,
"md bitmap_unplug");
@@ -1036,14 +1036,14 @@ void bitmap_unplug(struct bitmap *bitmap)
}
}
if (writing)
- bitmap_wait_writes(bitmap);
+ md_bitmap_wait_writes(bitmap);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
- bitmap_file_kick(bitmap);
+ md_bitmap_file_kick(bitmap);
}
-EXPORT_SYMBOL(bitmap_unplug);
+EXPORT_SYMBOL(md_bitmap_unplug);
-static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
+static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
* the in-memory bitmap from the on-disk bitmap -- also, sets up the
* memory mapping of the bitmap file
@@ -1055,7 +1055,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
* We ignore all bits for sectors that end earlier than 'start'.
* This is used when reading an out-of-date bitmap...
*/
-static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
+static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
{
unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
struct page *page = NULL;
@@ -1078,9 +1078,9 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
/* if the disk bit is set, set the memory bit */
int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
>= start);
- bitmap_set_memory_bits(bitmap,
- (sector_t)i << bitmap->counts.chunkshift,
- needed);
+ md_bitmap_set_memory_bits(bitmap,
+ (sector_t)i << bitmap->counts.chunkshift,
+ needed);
}
return 0;
}
@@ -1159,9 +1159,9 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
/* if the disk bit is set, set the memory bit */
int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
>= start);
- bitmap_set_memory_bits(bitmap,
- (sector_t)i << bitmap->counts.chunkshift,
- needed);
+ md_bitmap_set_memory_bits(bitmap,
+ (sector_t)i << bitmap->counts.chunkshift,
+ needed);
bit_cnt++;
}
offset = 0;
@@ -1179,7 +1179,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
return ret;
}
-void bitmap_write_all(struct bitmap *bitmap)
+void md_bitmap_write_all(struct bitmap *bitmap)
{
/* We don't actually write all bitmap blocks here,
* just flag them as needing to be written
@@ -1198,16 +1198,16 @@ void bitmap_write_all(struct bitmap *bitmap)
bitmap->allclean = 0;
}
-static void bitmap_count_page(struct bitmap_counts *bitmap,
- sector_t offset, int inc)
+static void md_bitmap_count_page(struct bitmap_counts *bitmap,
+ sector_t offset, int inc)
{
sector_t chunk = offset >> bitmap->chunkshift;
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
bitmap->bp[page].count += inc;
- bitmap_checkfree(bitmap, page);
+ md_bitmap_checkfree(bitmap, page);
}
-static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
+static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
{
sector_t chunk = offset >> bitmap->chunkshift;
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
@@ -1217,16 +1217,16 @@ static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
bp->pending = 1;
}
-static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
- sector_t offset, sector_t *blocks,
- int create);
+static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
+ sector_t offset, sector_t *blocks,
+ int create);
/*
* bitmap daemon -- periodically wakes up to clean bits and flush pages
* out to disk
*/
-void bitmap_daemon_work(struct mddev *mddev)
+void md_bitmap_daemon_work(struct mddev *mddev)
{
struct bitmap *bitmap;
unsigned long j;
@@ -1301,10 +1301,8 @@ void bitmap_daemon_work(struct mddev *mddev)
}
counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
}
- bmc = bitmap_get_counter(counts,
- block,
- &blocks, 0);
+ bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
if (!bmc) {
j |= PAGE_COUNTER_MASK;
continue;
@@ -1312,17 +1310,17 @@ void bitmap_daemon_work(struct mddev *mddev)
if (*bmc == 1 && !bitmap->need_sync) {
/* We can clear the bit */
*bmc = 0;
- bitmap_count_page(counts, block, -1);
- bitmap_file_clear_bit(bitmap, block);
+ md_bitmap_count_page(counts, block, -1);
+ md_bitmap_file_clear_bit(bitmap, block);
} else if (*bmc && *bmc <= 2) {
*bmc = 1;
- bitmap_set_pending(counts, block);
+ md_bitmap_set_pending(counts, block);
bitmap->allclean = 0;
}
}
spin_unlock_irq(&counts->lock);
- bitmap_wait_writes(bitmap);
+ md_bitmap_wait_writes(bitmap);
/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
* DIRTY pages need to be written by bitmap_unplug so it can wait
* for them.
@@ -1352,9 +1350,9 @@ void bitmap_daemon_work(struct mddev *mddev)
mutex_unlock(&mddev->bitmap_info.mutex);
}
-static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
- sector_t offset, sector_t *blocks,
- int create)
+static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
+ sector_t offset, sector_t *blocks,
+ int create)
__releases(bitmap->lock)
__acquires(bitmap->lock)
{
@@ -1368,7 +1366,7 @@ __acquires(bitmap->lock)
sector_t csize;
int err;
- err = bitmap_checkpage(bitmap, page, create, 0);
+ err = md_bitmap_checkpage(bitmap, page, create, 0);
if (bitmap->bp[page].hijacked ||
bitmap->bp[page].map == NULL)
@@ -1394,7 +1392,7 @@ __acquires(bitmap->lock)
&(bitmap->bp[page].map[pageoff]);
}
-int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
+int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
{
if (!bitmap)
return 0;
@@ -1415,7 +1413,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
bitmap_counter_t *bmc;
spin_lock_irq(&bitmap->counts.lock);
- bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
+ bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
if (!bmc) {
spin_unlock_irq(&bitmap->counts.lock);
return 0;
@@ -1437,8 +1435,8 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
switch (*bmc) {
case 0:
- bitmap_file_set_bit(bitmap, offset);
- bitmap_count_page(&bitmap->counts, offset, 1);
+ md_bitmap_file_set_bit(bitmap, offset);
+ md_bitmap_count_page(&bitmap->counts, offset, 1);
/* fall through */
case 1:
*bmc = 2;
@@ -1456,10 +1454,10 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
}
return 0;
}
-EXPORT_SYMBOL(bitmap_startwrite);
+EXPORT_SYMBOL(md_bitmap_startwrite);
-void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
- int success, int behind)
+void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
+ unsigned long sectors, int success, int behind)
{
if (!bitmap)
return;
@@ -1477,7 +1475,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
bitmap_counter_t *bmc;
spin_lock_irqsave(&bitmap->counts.lock, flags);
- bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
+ bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
if (!bmc) {
spin_unlock_irqrestore(&bitmap->counts.lock, flags);
return;
@@ -1498,7 +1496,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
(*bmc)--;
if (*bmc <= 2) {
- bitmap_set_pending(&bitmap->counts, offset);
+ md_bitmap_set_pending(&bitmap->counts, offset);
bitmap->allclean = 0;
}
spin_unlock_irqrestore(&bitmap->counts.lock, flags);
@@ -1509,7 +1507,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
sectors = 0;
}
}
-EXPORT_SYMBOL(bitmap_endwrite);
+EXPORT_SYMBOL(md_bitmap_endwrite);
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
int degraded)
@@ -1521,7 +1519,7 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
return 1; /* always resync if no bitmap */
}
spin_lock_irq(&bitmap->counts.lock);
- bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
+ bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
rv = 0;
if (bmc) {
/* locked */
@@ -1539,8 +1537,8 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
return rv;
}
-int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
- int degraded)
+int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
+ int degraded)
{
/* bitmap_start_sync must always report on multiples of whole
* pages, otherwise resync (which is very PAGE_SIZE based) will
@@ -1561,9 +1559,9 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
}
return rv;
}
-EXPORT_SYMBOL(bitmap_start_sync);
+EXPORT_SYMBOL(md_bitmap_start_sync);
-void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
+void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
{
bitmap_counter_t *bmc;
unsigned long flags;
@@ -1573,7 +1571,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i
return;
}
spin_lock_irqsave(&bitmap->counts.lock, flags);
- bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
+ bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
if (bmc == NULL)
goto unlock;
/* locked */
@@ -1584,7 +1582,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i
*bmc |= NEEDED_MASK;
else {
if (*bmc <= 2) {
- bitmap_set_pending(&bitmap->counts, offset);
+ md_bitmap_set_pending(&bitmap->counts, offset);
bitmap->allclean = 0;
}
}
@@ -1592,9 +1590,9 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i
unlock:
spin_unlock_irqrestore(&bitmap->counts.lock, flags);
}
-EXPORT_SYMBOL(bitmap_end_sync);
+EXPORT_SYMBOL(md_bitmap_end_sync);
-void bitmap_close_sync(struct bitmap *bitmap)
+void md_bitmap_close_sync(struct bitmap *bitmap)
{
/* Sync has finished, and any bitmap chunks that weren't synced
* properly have been aborted. It remains to us to clear the
@@ -1605,13 +1603,13 @@ void bitmap_close_sync(struct bitmap *bitmap)
if (!bitmap)
return;
while (sector < bitmap->mddev->resync_max_sectors) {
- bitmap_end_sync(bitmap, sector, &blocks, 0);
+ md_bitmap_end_sync(bitmap, sector, &blocks, 0);
sector += blocks;
}
}
-EXPORT_SYMBOL(bitmap_close_sync);
+EXPORT_SYMBOL(md_bitmap_close_sync);
-void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
+void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
{
sector_t s = 0;
sector_t blocks;
@@ -1633,15 +1631,15 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
- bitmap_end_sync(bitmap, s, &blocks, 0);
+ md_bitmap_end_sync(bitmap, s, &blocks, 0);
s += blocks;
}
bitmap->last_end_sync = jiffies;
sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
}
-EXPORT_SYMBOL(bitmap_cond_end_sync);
+EXPORT_SYMBOL(md_bitmap_cond_end_sync);
-void bitmap_sync_with_cluster(struct mddev *mddev,
+void md_bitmap_sync_with_cluster(struct mddev *mddev,
sector_t old_lo, sector_t old_hi,
sector_t new_lo, sector_t new_hi)
{
@@ -1649,20 +1647,20 @@ void bitmap_sync_with_cluster(struct mddev *mddev,
sector_t sector, blocks = 0;
for (sector = old_lo; sector < new_lo; ) {
- bitmap_end_sync(bitmap, sector, &blocks, 0);
+ md_bitmap_end_sync(bitmap, sector, &blocks, 0);
sector += blocks;
}
WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
for (sector = old_hi; sector < new_hi; ) {
- bitmap_start_sync(bitmap, sector, &blocks, 0);
+ md_bitmap_start_sync(bitmap, sector, &blocks, 0);
sector += blocks;
}
WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
}
-EXPORT_SYMBOL(bitmap_sync_with_cluster);
+EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
-static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
+static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
{
/* For each chunk covered by any of these sectors, set the
* counter to 2 and possibly set resync_needed. They should all
@@ -1672,15 +1670,15 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
sector_t secs;
bitmap_counter_t *bmc;
spin_lock_irq(&bitmap->counts.lock);
- bmc = bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
+ bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
if (!bmc) {
spin_unlock_irq(&bitmap->counts.lock);
return;
}
if (!*bmc) {
*bmc = 2;
- bitmap_count_page(&bitmap->counts, offset, 1);
- bitmap_set_pending(&bitmap->counts, offset);
+ md_bitmap_count_page(&bitmap->counts, offset, 1);
+ md_bitmap_set_pending(&bitmap->counts, offset);
bitmap->allclean = 0;
}
if (needed)
@@ -1689,14 +1687,14 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
}
/* dirty the memory and file bits for bitmap chunks "s" to "e" */
-void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
+void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
{
unsigned long chunk;
for (chunk = s; chunk <= e; chunk++) {
sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
- bitmap_set_memory_bits(bitmap, sec, 1);
- bitmap_file_set_bit(bitmap, sec);
+ md_bitmap_set_memory_bits(bitmap, sec, 1);
+ md_bitmap_file_set_bit(bitmap, sec);
if (sec < bitmap->mddev->recovery_cp)
/* We are asserting that the array is dirty,
* so move the recovery_cp address back so
@@ -1709,7 +1707,7 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
/*
* flush out any pending updates
*/
-void bitmap_flush(struct mddev *mddev)
+void md_bitmap_flush(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
long sleep;
@@ -1722,18 +1720,18 @@ void bitmap_flush(struct mddev *mddev)
*/
sleep = mddev->bitmap_info.daemon_sleep * 2;
bitmap->daemon_lastrun -= sleep;
- bitmap_daemon_work(mddev);
+ md_bitmap_daemon_work(mddev);
bitmap->daemon_lastrun -= sleep;
- bitmap_daemon_work(mddev);
+ md_bitmap_daemon_work(mddev);
bitmap->daemon_lastrun -= sleep;
- bitmap_daemon_work(mddev);
- bitmap_update_sb(bitmap);
+ md_bitmap_daemon_work(mddev);
+ md_bitmap_update_sb(bitmap);
}
/*
* free memory that was allocated
*/
-void bitmap_free(struct bitmap *bitmap)
+void md_bitmap_free(struct bitmap *bitmap)
{
unsigned long k, pages;
struct bitmap_page *bp;
@@ -1753,7 +1751,7 @@ void bitmap_free(struct bitmap *bitmap)
atomic_read(&bitmap->pending_writes) == 0);
/* release the bitmap file */
- bitmap_file_unmap(&bitmap->storage);
+ md_bitmap_file_unmap(&bitmap->storage);
bp = bitmap->counts.bp;
pages = bitmap->counts.pages;
@@ -1767,9 +1765,9 @@ void bitmap_free(struct bitmap *bitmap)
kfree(bp);
kfree(bitmap);
}
-EXPORT_SYMBOL(bitmap_free);
+EXPORT_SYMBOL(md_bitmap_free);
-void bitmap_wait_behind_writes(struct mddev *mddev)
+void md_bitmap_wait_behind_writes(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
@@ -1783,14 +1781,14 @@ void bitmap_wait_behind_writes(struct mddev *mddev)
}
}
-void bitmap_destroy(struct mddev *mddev)
+void md_bitmap_destroy(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap) /* there was no bitmap */
return;
- bitmap_wait_behind_writes(mddev);
+ md_bitmap_wait_behind_writes(mddev);
mutex_lock(&mddev->bitmap_info.mutex);
spin_lock(&mddev->lock);
@@ -1800,7 +1798,7 @@ void bitmap_destroy(struct mddev *mddev)
if (mddev->thread)
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- bitmap_free(bitmap);
+ md_bitmap_free(bitmap);
}
/*
@@ -1808,7 +1806,7 @@ void bitmap_destroy(struct mddev *mddev)
* if this returns an error, bitmap_destroy must be called to do clean up
* once mddev->bitmap is set
*/
-struct bitmap *bitmap_create(struct mddev *mddev, int slot)
+struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
{
struct bitmap *bitmap;
sector_t blocks = mddev->resync_max_sectors;
@@ -1863,9 +1861,9 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
* instructing us to create a new on-disk bitmap instance.
*/
if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
- err = bitmap_new_disk_sb(bitmap);
+ err = md_bitmap_new_disk_sb(bitmap);
else
- err = bitmap_read_sb(bitmap);
+ err = md_bitmap_read_sb(bitmap);
} else {
err = 0;
if (mddev->bitmap_info.chunksize == 0 ||
@@ -1878,7 +1876,7 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
goto error;
bitmap->daemon_lastrun = jiffies;
- err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
+ err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
if (err)
goto error;
@@ -1891,11 +1889,11 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
return bitmap;
error:
- bitmap_free(bitmap);
+ md_bitmap_free(bitmap);
return ERR_PTR(err);
}
-int bitmap_load(struct mddev *mddev)
+int md_bitmap_load(struct mddev *mddev)
{
int err = 0;
sector_t start = 0;
@@ -1915,10 +1913,10 @@ int bitmap_load(struct mddev *mddev)
*/
while (sector < mddev->resync_max_sectors) {
sector_t blocks;
- bitmap_start_sync(bitmap, sector, &blocks, 0);
+ md_bitmap_start_sync(bitmap, sector, &blocks, 0);
sector += blocks;
}
- bitmap_close_sync(bitmap);
+ md_bitmap_close_sync(bitmap);
if (mddev->degraded == 0
|| bitmap->events_cleared == mddev->events)
@@ -1927,7 +1925,7 @@ int bitmap_load(struct mddev *mddev)
start = mddev->recovery_cp;
mutex_lock(&mddev->bitmap_info.mutex);
- err = bitmap_init_from_disk(bitmap, start);
+ err = md_bitmap_init_from_disk(bitmap, start);
mutex_unlock(&mddev->bitmap_info.mutex);
if (err)
@@ -1940,29 +1938,29 @@ int bitmap_load(struct mddev *mddev)
mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
md_wakeup_thread(mddev->thread);
- bitmap_update_sb(bitmap);
+ md_bitmap_update_sb(bitmap);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
err = -EIO;
out:
return err;
}
-EXPORT_SYMBOL_GPL(bitmap_load);
+EXPORT_SYMBOL_GPL(md_bitmap_load);
struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
{
int rv = 0;
struct bitmap *bitmap;
- bitmap = bitmap_create(mddev, slot);
+ bitmap = md_bitmap_create(mddev, slot);
if (IS_ERR(bitmap)) {
rv = PTR_ERR(bitmap);
return ERR_PTR(rv);
}
- rv = bitmap_init_from_disk(bitmap, 0);
+ rv = md_bitmap_init_from_disk(bitmap, 0);
if (rv) {
- bitmap_free(bitmap);
+ md_bitmap_free(bitmap);
return ERR_PTR(rv);
}
@@ -1973,7 +1971,7 @@ EXPORT_SYMBOL(get_bitmap_from_slot);
/* Loads the bitmap associated with slot and copies the resync information
* to our bitmap
*/
-int bitmap_copy_from_slot(struct mddev *mddev, int slot,
+int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
sector_t *low, sector_t *high, bool clear_bits)
{
int rv = 0, i, j;
@@ -1990,35 +1988,35 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
counts = &bitmap->counts;
for (j = 0; j < counts->chunks; j++) {
block = (sector_t)j << counts->chunkshift;
- if (bitmap_file_test_bit(bitmap, block)) {
+ if (md_bitmap_file_test_bit(bitmap, block)) {
if (!lo)
lo = block;
hi = block;
- bitmap_file_clear_bit(bitmap, block);
- bitmap_set_memory_bits(mddev->bitmap, block, 1);
- bitmap_file_set_bit(mddev->bitmap, block);
+ md_bitmap_file_clear_bit(bitmap, block);
+ md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
+ md_bitmap_file_set_bit(mddev->bitmap, block);
}
}
if (clear_bits) {
- bitmap_update_sb(bitmap);
+ md_bitmap_update_sb(bitmap);
/* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
* BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
for (i = 0; i < bitmap->storage.file_pages; i++)
if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
- bitmap_unplug(bitmap);
+ md_bitmap_unplug(bitmap);
}
- bitmap_unplug(mddev->bitmap);
+ md_bitmap_unplug(mddev->bitmap);
*low = lo;
*high = hi;
return rv;
}
-EXPORT_SYMBOL_GPL(bitmap_copy_from_slot);
+EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
-void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
+void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
{
unsigned long chunk_kb;
struct bitmap_counts *counts;
@@ -2045,7 +2043,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
seq_printf(seq, "\n");
}
-int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
int chunksize, int init)
{
/* If chunk_size is 0, choose an appropriate chunk size.
@@ -2106,12 +2104,12 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
memset(&store, 0, sizeof(store));
if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
- ret = bitmap_storage_alloc(&store, chunks,
- !bitmap->mddev->bitmap_info.external,
- mddev_is_clustered(bitmap->mddev)
- ? bitmap->cluster_slot : 0);
+ ret = md_bitmap_storage_alloc(&store, chunks,
+ !bitmap->mddev->bitmap_info.external,
+ mddev_is_clustered(bitmap->mddev)
+ ? bitmap->cluster_slot : 0);
if (ret) {
- bitmap_file_unmap(&store);
+ md_bitmap_file_unmap(&store);
goto err;
}
@@ -2120,7 +2118,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL);
ret = -ENOMEM;
if (!new_bp) {
- bitmap_file_unmap(&store);
+ md_bitmap_file_unmap(&store);
goto err;
}
@@ -2134,7 +2132,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
memcpy(page_address(store.sb_page),
page_address(bitmap->storage.sb_page),
sizeof(bitmap_super_t));
- bitmap_file_unmap(&bitmap->storage);
+ md_bitmap_file_unmap(&bitmap->storage);
bitmap->storage = store;
old_counts = bitmap->counts;
@@ -2154,7 +2152,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (mddev_is_clustered(bitmap->mddev)) {
unsigned long page;
for (page = 0; page < pages; page++) {
- ret = bitmap_checkpage(&bitmap->counts, page, 1, 1);
+ ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
if (ret) {
unsigned long k;
@@ -2184,27 +2182,23 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap_counter_t *bmc_old, *bmc_new;
int set;
- bmc_old = bitmap_get_counter(&old_counts, block,
- &old_blocks, 0);
+ bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
set = bmc_old && NEEDED(*bmc_old);
if (set) {
- bmc_new = bitmap_get_counter(&bitmap->counts, block,
- &new_blocks, 1);
+ bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
if (*bmc_new == 0) {
/* need to set on-disk bits too. */
sector_t end = block + new_blocks;
sector_t start = block >> chunkshift;
start <<= chunkshift;
while (start < end) {
- bitmap_file_set_bit(bitmap, block);
+ md_bitmap_file_set_bit(bitmap, block);
start += 1 << chunkshift;
}
*bmc_new = 2;
- bitmap_count_page(&bitmap->counts,
- block, 1);
- bitmap_set_pending(&bitmap->counts,
- block);
+ md_bitmap_count_page(&bitmap->counts, block, 1);
+ md_bitmap_set_pending(&bitmap->counts, block);
}
*bmc_new |= NEEDED_MASK;
if (new_blocks < old_blocks)
@@ -2225,18 +2219,15 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
int i;
while (block < (chunks << chunkshift)) {
bitmap_counter_t *bmc;
- bmc = bitmap_get_counter(&bitmap->counts, block,
- &new_blocks, 1);
+ bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
if (bmc) {
/* new space. It needs to be resynced, so
* we set NEEDED_MASK.
*/
if (*bmc == 0) {
*bmc = NEEDED_MASK | 2;
- bitmap_count_page(&bitmap->counts,
- block, 1);
- bitmap_set_pending(&bitmap->counts,
- block);
+ md_bitmap_count_page(&bitmap->counts, block, 1);
+ md_bitmap_set_pending(&bitmap->counts, block);
}
}
block += new_blocks;
@@ -2247,14 +2238,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
spin_unlock_irq(&bitmap->counts.lock);
if (!init) {
- bitmap_unplug(bitmap);
+ md_bitmap_unplug(bitmap);
bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
}
ret = 0;
err:
return ret;
}
-EXPORT_SYMBOL_GPL(bitmap_resize);
+EXPORT_SYMBOL_GPL(md_bitmap_resize);
static ssize_t
location_show(struct mddev *mddev, char *page)
@@ -2298,7 +2289,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
}
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
- bitmap_destroy(mddev);
+ md_bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
}
mddev->bitmap_info.offset = 0;
@@ -2337,18 +2328,18 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
if (mddev->pers) {
struct bitmap *bitmap;
mddev->pers->quiesce(mddev, 1);
- bitmap = bitmap_create(mddev, -1);
+ bitmap = md_bitmap_create(mddev, -1);
if (IS_ERR(bitmap))
rv = PTR_ERR(bitmap);
else {
mddev->bitmap = bitmap;
- rv = bitmap_load(mddev);
+ rv = md_bitmap_load(mddev);
if (rv)
mddev->bitmap_info.offset = 0;
}
mddev->pers->quiesce(mddev, 0);
if (rv) {
- bitmap_destroy(mddev);
+ md_bitmap_destroy(mddev);
goto out;
}
}
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index 5df35ca90f58..cfd7395de8fd 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -236,43 +236,43 @@ struct bitmap {
/* the bitmap API */
/* these are used only by md/bitmap */
-struct bitmap *bitmap_create(struct mddev *mddev, int slot);
-int bitmap_load(struct mddev *mddev);
-void bitmap_flush(struct mddev *mddev);
-void bitmap_destroy(struct mddev *mddev);
+struct bitmap *md_bitmap_create(struct mddev *mddev, int slot);
+int md_bitmap_load(struct mddev *mddev);
+void md_bitmap_flush(struct mddev *mddev);
+void md_bitmap_destroy(struct mddev *mddev);
-void bitmap_print_sb(struct bitmap *bitmap);
-void bitmap_update_sb(struct bitmap *bitmap);
-void bitmap_status(struct seq_file *seq, struct bitmap *bitmap);
+void md_bitmap_print_sb(struct bitmap *bitmap);
+void md_bitmap_update_sb(struct bitmap *bitmap);
+void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap);
-int bitmap_setallbits(struct bitmap *bitmap);
-void bitmap_write_all(struct bitmap *bitmap);
+int md_bitmap_setallbits(struct bitmap *bitmap);
+void md_bitmap_write_all(struct bitmap *bitmap);
-void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
+void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
/* these are exported */
-int bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
- unsigned long sectors, int behind);
-void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
+int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
+ unsigned long sectors, int behind);
+void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
unsigned long sectors, int success, int behind);
-int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
-void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
-void bitmap_close_sync(struct bitmap *bitmap);
-void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
-void bitmap_sync_with_cluster(struct mddev *mddev,
- sector_t old_lo, sector_t old_hi,
- sector_t new_lo, sector_t new_hi);
-
-void bitmap_unplug(struct bitmap *bitmap);
-void bitmap_daemon_work(struct mddev *mddev);
-
-int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
- int chunksize, int init);
+int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
+void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
+void md_bitmap_close_sync(struct bitmap *bitmap);
+void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
+void md_bitmap_sync_with_cluster(struct mddev *mddev,
+ sector_t old_lo, sector_t old_hi,
+ sector_t new_lo, sector_t new_hi);
+
+void md_bitmap_unplug(struct bitmap *bitmap);
+void md_bitmap_daemon_work(struct mddev *mddev);
+
+int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ int chunksize, int init);
struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot);
-int bitmap_copy_from_slot(struct mddev *mddev, int slot,
- sector_t *lo, sector_t *hi, bool clear_bits);
-void bitmap_free(struct bitmap *bitmap);
-void bitmap_wait_behind_writes(struct mddev *mddev);
+int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
+ sector_t *lo, sector_t *hi, bool clear_bits);
+void md_bitmap_free(struct bitmap *bitmap);
+void md_bitmap_wait_behind_writes(struct mddev *mddev);
#endif
#endif
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 021cbf9ef1bf..94329e03001e 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -304,15 +304,6 @@ static void recover_bitmaps(struct md_thread *thread)
while (cinfo->recovery_map) {
slot = fls64((u64)cinfo->recovery_map) - 1;
- /* Clear suspend_area associated with the bitmap */
- spin_lock_irq(&cinfo->suspend_lock);
- list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
- if (slot == s->slot) {
- list_del(&s->list);
- kfree(s);
- }
- spin_unlock_irq(&cinfo->suspend_lock);
-
snprintf(str, 64, "bitmap%04d", slot);
bm_lockres = lockres_init(mddev, str, NULL, 1);
if (!bm_lockres) {
@@ -326,19 +317,35 @@ static void recover_bitmaps(struct md_thread *thread)
str, ret);
goto clear_bit;
}
- ret = bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
+ ret = md_bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
if (ret) {
pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
goto clear_bit;
}
+
+ /* Clear suspend_area associated with the bitmap */
+ spin_lock_irq(&cinfo->suspend_lock);
+ list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+ if (slot == s->slot) {
+ list_del(&s->list);
+ kfree(s);
+ }
+ spin_unlock_irq(&cinfo->suspend_lock);
+
if (hi > 0) {
if (lo < mddev->recovery_cp)
mddev->recovery_cp = lo;
/* wake up thread to continue resync in case resync
* is not finished */
if (mddev->recovery_cp != MaxSector) {
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
+ /*
+ * clear the REMOTE flag since we will launch
+ * resync thread in current node.
+ */
+ clear_bit(MD_RESYNCING_REMOTE,
+ &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
}
}
clear_bit:
@@ -457,6 +464,11 @@ static void process_suspend_info(struct mddev *mddev,
struct suspend_info *s;
if (!hi) {
+ /*
+ * clear the REMOTE flag since resync or recovery is finished
+ * in remote node.
+ */
+ clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
remove_suspend_info(mddev, slot);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -480,9 +492,7 @@ static void process_suspend_info(struct mddev *mddev,
* resync thread is running in another node,
* so we don't need to do the resync again
* with the same section */
- bitmap_sync_with_cluster(mddev, cinfo->sync_low,
- cinfo->sync_hi,
- lo, hi);
+ md_bitmap_sync_with_cluster(mddev, cinfo->sync_low, cinfo->sync_hi, lo, hi);
cinfo->sync_low = lo;
cinfo->sync_hi = hi;
@@ -585,6 +595,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
revalidate_disk(mddev->gendisk);
break;
case RESYNCING:
+ set_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
process_suspend_info(mddev, le32_to_cpu(msg->slot),
le64_to_cpu(msg->low),
le64_to_cpu(msg->high));
@@ -829,7 +840,7 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
}
/* Read the disk bitmap sb and check if it needs recovery */
- ret = bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
+ ret = md_bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
if (ret) {
pr_warn("md-cluster: Could not gather bitmaps from slot %d", i);
lockres_free(bm_lockres);
@@ -1127,13 +1138,13 @@ static int cluster_check_sync_size(struct mddev *mddev)
bm_lockres = lockres_init(mddev, str, NULL, 1);
if (!bm_lockres) {
pr_err("md-cluster: Cannot initialize %s\n", str);
- bitmap_free(bitmap);
+ md_bitmap_free(bitmap);
return -1;
}
bm_lockres->flags |= DLM_LKF_NOQUEUE;
rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
if (!rv)
- bitmap_update_sb(bitmap);
+ md_bitmap_update_sb(bitmap);
lockres_free(bm_lockres);
sb = kmap_atomic(bitmap->storage.sb_page);
@@ -1141,11 +1152,11 @@ static int cluster_check_sync_size(struct mddev *mddev)
sync_size = sb->sync_size;
else if (sync_size != sb->sync_size) {
kunmap_atomic(sb);
- bitmap_free(bitmap);
+ md_bitmap_free(bitmap);
return -1;
}
kunmap_atomic(sb);
- bitmap_free(bitmap);
+ md_bitmap_free(bitmap);
}
return (my_sync_size == sync_size) ? 0 : -1;
@@ -1265,8 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
static int resync_finish(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
dlm_unlock_sync(cinfo->resync_lockres);
- return resync_info_update(mddev, 0, 0);
+
+ /*
+ * If resync thread is interrupted so we can't say resync is finished,
+ * another node will launch resync thread to continue.
+ */
+ if (test_bit(MD_CLOSING, &mddev->flags))
+ return 0;
+ else
+ return resync_info_update(mddev, 0, 0);
}
static int area_resyncing(struct mddev *mddev, int direction,
@@ -1442,7 +1463,7 @@ static int gather_bitmaps(struct md_rdev *rdev)
for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) {
if (sn == (cinfo->slot_number - 1))
continue;
- err = bitmap_copy_from_slot(mddev, sn, &lo, &hi, false);
+ err = md_bitmap_copy_from_slot(mddev, sn, &lo, &hi, false);
if (err) {
pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
goto out;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 29b0cd9ec951..63ceabb4e020 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -204,10 +204,6 @@ static int start_readonly;
*/
static bool create_on_open = true;
-/* bio_clone_mddev
- * like bio_clone_bioset, but with a local bio set
- */
-
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev)
{
@@ -335,6 +331,7 @@ EXPORT_SYMBOL(md_handle_request);
static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
+ const int sgrp = op_stat_group(bio_op(bio));
struct mddev *mddev = q->queuedata;
unsigned int sectors;
int cpu;
@@ -363,8 +360,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
md_handle_request(mddev, bio);
cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[sgrp]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[sgrp], sectors);
part_stat_unlock();
return BLK_QC_T_NONE;
@@ -2571,7 +2568,7 @@ repeat:
if (mddev->queue)
blk_add_trace_msg(mddev->queue, "md md_update_sb");
rewrite:
- bitmap_update_sb(mddev->bitmap);
+ md_bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
@@ -4384,10 +4381,10 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
if (buf == end) break;
}
if (*end && !isspace(*end)) break;
- bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
+ md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
buf = skip_spaces(end);
}
- bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
+ md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
out:
mddev_unlock(mddev);
return len;
@@ -5547,7 +5544,8 @@ int md_run(struct mddev *mddev)
else
pr_warn("md: personality for level %s is not loaded!\n",
mddev->clevel);
- return -EINVAL;
+ err = -EINVAL;
+ goto abort;
}
spin_unlock(&pers_lock);
if (mddev->level != pers->level) {
@@ -5560,7 +5558,8 @@ int md_run(struct mddev *mddev)
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
module_put(pers->owner);
- return -EINVAL;
+ err = -EINVAL;
+ goto abort;
}
if (pers->sync_request) {
@@ -5613,7 +5612,7 @@ int md_run(struct mddev *mddev)
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
struct bitmap *bitmap;
- bitmap = bitmap_create(mddev, -1);
+ bitmap = md_bitmap_create(mddev, -1);
if (IS_ERR(bitmap)) {
err = PTR_ERR(bitmap);
pr_warn("%s: failed to create bitmap (%d)\n",
@@ -5628,8 +5627,8 @@ int md_run(struct mddev *mddev)
pers->free(mddev, mddev->private);
mddev->private = NULL;
module_put(pers->owner);
- bitmap_destroy(mddev);
- return err;
+ md_bitmap_destroy(mddev);
+ goto abort;
}
if (mddev->queue) {
bool nonrot = true;
@@ -5713,9 +5712,9 @@ static int do_md_run(struct mddev *mddev)
err = md_run(mddev);
if (err)
goto out;
- err = bitmap_load(mddev);
+ err = md_bitmap_load(mddev);
if (err) {
- bitmap_destroy(mddev);
+ md_bitmap_destroy(mddev);
goto out;
}
@@ -5857,7 +5856,7 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
- bitmap_flush(mddev);
+ md_bitmap_flush(mddev);
if (mddev->ro == 0 &&
((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
@@ -5879,7 +5878,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
static void mddev_detach(struct mddev *mddev)
{
- bitmap_wait_behind_writes(mddev);
+ md_bitmap_wait_behind_writes(mddev);
if (mddev->pers && mddev->pers->quiesce) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
@@ -5892,7 +5891,7 @@ static void mddev_detach(struct mddev *mddev)
static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
- bitmap_destroy(mddev);
+ md_bitmap_destroy(mddev);
mddev_detach(mddev);
/* Ensure ->event_work is done */
flush_workqueue(md_misc_wq);
@@ -6711,21 +6710,21 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
if (fd >= 0) {
struct bitmap *bitmap;
- bitmap = bitmap_create(mddev, -1);
+ bitmap = md_bitmap_create(mddev, -1);
mddev_suspend(mddev);
if (!IS_ERR(bitmap)) {
mddev->bitmap = bitmap;
- err = bitmap_load(mddev);
+ err = md_bitmap_load(mddev);
} else
err = PTR_ERR(bitmap);
if (err) {
- bitmap_destroy(mddev);
+ md_bitmap_destroy(mddev);
fd = -1;
}
mddev_resume(mddev);
} else if (fd < 0) {
mddev_suspend(mddev);
- bitmap_destroy(mddev);
+ md_bitmap_destroy(mddev);
mddev_resume(mddev);
}
}
@@ -7011,15 +7010,15 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
- bitmap = bitmap_create(mddev, -1);
+ bitmap = md_bitmap_create(mddev, -1);
mddev_suspend(mddev);
if (!IS_ERR(bitmap)) {
mddev->bitmap = bitmap;
- rv = bitmap_load(mddev);
+ rv = md_bitmap_load(mddev);
} else
rv = PTR_ERR(bitmap);
if (rv)
- bitmap_destroy(mddev);
+ md_bitmap_destroy(mddev);
mddev_resume(mddev);
} else {
/* remove the bitmap */
@@ -7044,7 +7043,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
md_cluster_ops->leave(mddev);
}
mddev_suspend(mddev);
- bitmap_destroy(mddev);
+ md_bitmap_destroy(mddev);
mddev_resume(mddev);
mddev->bitmap_info.offset = 0;
}
@@ -7678,6 +7677,23 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
resync -= atomic_read(&mddev->recovery_active);
if (resync == 0) {
+ if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, mddev)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Faulty, &rdev->flags) &&
+ rdev->recovery_offset != MaxSector &&
+ rdev->recovery_offset) {
+ seq_printf(seq, "\trecover=REMOTE");
+ return 1;
+ }
+ if (mddev->reshape_position != MaxSector)
+ seq_printf(seq, "\treshape=REMOTE");
+ else
+ seq_printf(seq, "\tresync=REMOTE");
+ return 1;
+ }
if (mddev->recovery_cp < MaxSector) {
seq_printf(seq, "\tresync=PENDING");
return 1;
@@ -7907,7 +7923,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
} else
seq_printf(seq, "\n ");
- bitmap_status(seq, mddev->bitmap);
+ md_bitmap_status(seq, mddev->bitmap);
seq_printf(seq, "\n");
}
@@ -8044,8 +8060,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
- curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
- (int)part_stat_read(&disk->part0, sectors[1]) -
+ curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
@@ -8779,7 +8794,7 @@ void md_check_recovery(struct mddev *mddev)
return;
if (mddev->bitmap)
- bitmap_daemon_work(mddev);
+ md_bitmap_daemon_work(mddev);
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
@@ -8916,7 +8931,7 @@ void md_check_recovery(struct mddev *mddev)
* which has the bitmap stored on all devices.
* So make sure all bitmap pages get written
*/
- bitmap_write_all(mddev->bitmap);
+ md_bitmap_write_all(mddev->bitmap);
}
INIT_WORK(&mddev->del_work, md_start_sync);
queue_work(md_misc_wq, &mddev->del_work);
@@ -9164,7 +9179,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
if (ret)
pr_info("md-cluster: resize failed\n");
else
- bitmap_update_sb(mddev->bitmap);
+ md_bitmap_update_sb(mddev->bitmap);
}
/* Check for change of roles in the active devices */
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2d148bdaba74..8afd6bfdbfb9 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -496,6 +496,7 @@ enum recovery_flags {
MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
+ MD_RESYNCING_REMOTE, /* remote node is running resync thread */
};
static inline int __must_check mddev_lock(struct mddev *mddev)
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 829b4ce057d8..0a3b8ae4a29c 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -69,9 +69,9 @@ static struct dm_block_validator index_validator = {
*/
#define BITMAP_CSUM_XOR 240779
-static void bitmap_prepare_for_write(struct dm_block_validator *v,
- struct dm_block *b,
- size_t block_size)
+static void dm_bitmap_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
{
struct disk_bitmap_header *disk_header = dm_block_data(b);
@@ -81,9 +81,9 @@ static void bitmap_prepare_for_write(struct dm_block_validator *v,
BITMAP_CSUM_XOR));
}
-static int bitmap_check(struct dm_block_validator *v,
- struct dm_block *b,
- size_t block_size)
+static int dm_bitmap_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
{
struct disk_bitmap_header *disk_header = dm_block_data(b);
__le32 csum_disk;
@@ -108,8 +108,8 @@ static int bitmap_check(struct dm_block_validator *v,
static struct dm_block_validator dm_sm_bitmap_validator = {
.name = "sm_bitmap",
- .prepare_for_write = bitmap_prepare_for_write,
- .check = bitmap_check
+ .prepare_for_write = dm_bitmap_prepare_for_write,
+ .check = dm_bitmap_check,
};
/*----------------------------------------------------------------*/
@@ -124,7 +124,7 @@ static void *dm_bitmap_data(struct dm_block *b)
#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
-static unsigned bitmap_word_used(void *addr, unsigned b)
+static unsigned dm_bitmap_word_used(void *addr, unsigned b)
{
__le64 *words_le = addr;
__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
@@ -170,7 +170,7 @@ static int sm_find_free(void *addr, unsigned begin, unsigned end,
{
while (begin < end) {
if (!(begin & (ENTRIES_PER_WORD - 1)) &&
- bitmap_word_used(addr, begin)) {
+ dm_bitmap_word_used(addr, begin)) {
begin += ENTRIES_PER_WORD;
continue;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 8e05c1092aef..4e990246225e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -385,10 +385,10 @@ static void close_write(struct r1bio *r1_bio)
r1_bio->behind_master_bio = NULL;
}
/* clear the bitmap if all writes complete successfully */
- bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
- r1_bio->sectors,
- !test_bit(R1BIO_Degraded, &r1_bio->state),
- test_bit(R1BIO_BehindIO, &r1_bio->state));
+ md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
+ r1_bio->sectors,
+ !test_bit(R1BIO_Degraded, &r1_bio->state),
+ test_bit(R1BIO_BehindIO, &r1_bio->state));
md_write_end(r1_bio->mddev);
}
@@ -781,7 +781,7 @@ static int raid1_congested(struct mddev *mddev, int bits)
static void flush_bio_list(struct r1conf *conf, struct bio *bio)
{
/* flush any pending bitmap writes to disk before proceeding w/ I/O */
- bitmap_unplug(conf->mddev->bitmap);
+ md_bitmap_unplug(conf->mddev->bitmap);
wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */
@@ -1470,10 +1470,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
alloc_behind_master_bio(r1_bio, bio);
}
- bitmap_startwrite(bitmap, r1_bio->sector,
- r1_bio->sectors,
- test_bit(R1BIO_BehindIO,
- &r1_bio->state));
+ md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
+ test_bit(R1BIO_BehindIO, &r1_bio->state));
first_clone = 0;
}
@@ -1880,8 +1878,7 @@ static void end_sync_write(struct bio *bio)
long sectors_to_go = r1_bio->sectors;
/* make sure these bits doesn't get cleared. */
do {
- bitmap_end_sync(mddev->bitmap, s,
- &sync_blocks, 1);
+ md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
s += sync_blocks;
sectors_to_go -= sync_blocks;
} while (sectors_to_go > 0);
@@ -2626,12 +2623,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* We can find the current addess in mddev->curr_resync
*/
if (mddev->curr_resync < max_sector) /* aborted */
- bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
- &sync_blocks, 1);
+ md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
+ &sync_blocks, 1);
else /* completed sync */
conf->fullsync = 0;
- bitmap_close_sync(mddev->bitmap);
+ md_bitmap_close_sync(mddev->bitmap);
close_sync(conf);
if (mddev_is_clustered(mddev)) {
@@ -2651,7 +2648,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
/* before building a request, check if we can skip these blocks..
* This call the bitmap_start_sync doesn't actually record anything
*/
- if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
+ if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */
*skipped = 1;
@@ -2669,7 +2666,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* sector_nr + two times RESYNC_SECTORS
*/
- bitmap_cond_end_sync(mddev->bitmap, sector_nr,
+ md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
@@ -2828,8 +2825,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (len == 0)
break;
if (sync_blocks == 0) {
- if (!bitmap_start_sync(mddev->bitmap, sector_nr,
- &sync_blocks, still_degraded) &&
+ if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
+ &sync_blocks, still_degraded) &&
!conf->fullsync &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break;
@@ -3165,7 +3162,7 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
mddev->array_sectors > newsize)
return -EINVAL;
if (mddev->bitmap) {
- int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
+ int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
if (ret)
return ret;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 478cf446827f..981898049491 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -440,10 +440,10 @@ static void raid10_end_read_request(struct bio *bio)
static void close_write(struct r10bio *r10_bio)
{
/* clear the bitmap if all writes complete successfully */
- bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
- r10_bio->sectors,
- !test_bit(R10BIO_Degraded, &r10_bio->state),
- 0);
+ md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
+ r10_bio->sectors,
+ !test_bit(R10BIO_Degraded, &r10_bio->state),
+ 0);
md_write_end(r10_bio->mddev);
}
@@ -917,7 +917,7 @@ static void flush_pending_writes(struct r10conf *conf)
blk_start_plug(&plug);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
- bitmap_unplug(conf->mddev->bitmap);
+ md_bitmap_unplug(conf->mddev->bitmap);
wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */
@@ -1102,7 +1102,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
/* we aren't scheduling, so we can do the write-out directly. */
bio = bio_list_get(&plug->pending);
- bitmap_unplug(mddev->bitmap);
+ md_bitmap_unplug(mddev->bitmap);
wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */
@@ -1519,7 +1519,7 @@ retry_write:
}
atomic_set(&r10_bio->remaining, 1);
- bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
+ md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
for (i = 0; i < conf->copies; i++) {
if (r10_bio->devs[i].bio)
@@ -2991,13 +2991,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (mddev->curr_resync < max_sector) { /* aborted */
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
- bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
- &sync_blocks, 1);
+ md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
+ &sync_blocks, 1);
else for (i = 0; i < conf->geo.raid_disks; i++) {
sector_t sect =
raid10_find_virt(conf, mddev->curr_resync, i);
- bitmap_end_sync(mddev->bitmap, sect,
- &sync_blocks, 1);
+ md_bitmap_end_sync(mddev->bitmap, sect,
+ &sync_blocks, 1);
}
} else {
/* completed sync */
@@ -3018,7 +3018,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
conf->fullsync = 0;
}
- bitmap_close_sync(mddev->bitmap);
+ md_bitmap_close_sync(mddev->bitmap);
close_sync(conf);
*skipped = 1;
return sectors_skipped;
@@ -3112,8 +3112,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* we only need to recover the block if it is set in
* the bitmap
*/
- must_sync = bitmap_start_sync(mddev->bitmap, sect,
- &sync_blocks, 1);
+ must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
+ &sync_blocks, 1);
if (sync_blocks < max_sync)
max_sync = sync_blocks;
if (!must_sync &&
@@ -3158,8 +3158,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
}
- must_sync = bitmap_start_sync(mddev->bitmap, sect,
- &sync_blocks, still_degraded);
+ must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
+ &sync_blocks, still_degraded);
any_working = 0;
for (j=0; j<conf->copies;j++) {
@@ -3335,13 +3335,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* safety reason, which ensures curr_resync_completed is
* updated in bitmap_cond_end_sync.
*/
- bitmap_cond_end_sync(mddev->bitmap, sector_nr,
- mddev_is_clustered(mddev) &&
- (sector_nr + 2 * RESYNC_SECTORS >
- conf->cluster_sync_high));
+ md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
+ mddev_is_clustered(mddev) &&
+ (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
- if (!bitmap_start_sync(mddev->bitmap, sector_nr,
- &sync_blocks, mddev->degraded) &&
+ if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
+ &sync_blocks, mddev->degraded) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
&mddev->recovery)) {
/* We can skip this block */
@@ -3893,6 +3892,13 @@ static int raid10_run(struct mddev *mddev)
disk->rdev->saved_raid_disk < 0)
conf->fullsync = 1;
}
+
+ if (disk->replacement &&
+ !test_bit(In_sync, &disk->replacement->flags) &&
+ disk->replacement->saved_raid_disk < 0) {
+ conf->fullsync = 1;
+ }
+
disk->recovery_disabled = mddev->recovery_disabled - 1;
}
@@ -4015,7 +4021,7 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
mddev->array_sectors > size)
return -EINVAL;
if (mddev->bitmap) {
- int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
+ int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
if (ret)
return ret;
}
@@ -4280,10 +4286,9 @@ static int raid10_start_reshape(struct mddev *mddev)
spin_unlock_irq(&conf->device_lock);
if (mddev->delta_disks && mddev->bitmap) {
- ret = bitmap_resize(mddev->bitmap,
- raid10_size(mddev, 0,
- conf->geo.raid_disks),
- 0, 0);
+ ret = md_bitmap_resize(mddev->bitmap,
+ raid10_size(mddev, 0, conf->geo.raid_disks),
+ 0, 0);
if (ret)
goto abort;
}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 2b775abf377b..e6e925add700 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -324,10 +324,10 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
if (sh->dev[i].written) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
r5c_return_dev_pending_writes(conf, &sh->dev[i]);
- bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS,
- !test_bit(STRIPE_DEGRADED, &sh->state),
- 0);
+ md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS,
+ !test_bit(STRIPE_DEGRADED, &sh->state),
+ 0);
}
}
}
@@ -717,7 +717,6 @@ static void r5c_disable_writeback_async(struct work_struct *work)
static void r5l_submit_current_io(struct r5l_log *log)
{
struct r5l_io_unit *io = log->current_io;
- struct bio *bio;
struct r5l_meta_block *block;
unsigned long flags;
u32 crc;
@@ -730,7 +729,6 @@ static void r5l_submit_current_io(struct r5l_log *log)
block->meta_size = cpu_to_le32(io->meta_offset);
crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
block->checksum = cpu_to_le32(crc);
- bio = io->current_bio;
log->current_io = NULL;
spin_lock_irqsave(&log->io_list_lock, flags);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2031506a0ecd..4ce0d7502fad 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -409,16 +409,14 @@ void raid5_release_stripe(struct stripe_head *sh)
md_wakeup_thread(conf->mddev->thread);
return;
slow_path:
- local_irq_save(flags);
/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
- if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
+ if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) {
INIT_LIST_HEAD(&list);
hash = sh->hash_lock_index;
do_release_stripe(conf, sh, &list);
- spin_unlock(&conf->device_lock);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
release_inactive_stripe_list(conf, &list, hash);
}
- local_irq_restore(flags);
}
static inline void remove_hash(struct stripe_head *sh)
@@ -3301,8 +3299,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
*/
set_bit(STRIPE_BITMAP_PENDING, &sh->state);
spin_unlock_irq(&sh->stripe_lock);
- bitmap_startwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS, 0);
+ md_bitmap_startwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS, 0);
spin_lock_irq(&sh->stripe_lock);
clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
if (!sh->batch_head) {
@@ -3392,8 +3390,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi = nextbi;
}
if (bitmap_end)
- bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS, 0, 0);
+ md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS, 0, 0);
bitmap_end = 0;
/* and fail all 'written' */
bi = sh->dev[i].written;
@@ -3438,8 +3436,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
}
}
if (bitmap_end)
- bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS, 0, 0);
+ md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS, 0, 0);
/* If we were in the middle of a write the parity block might
* still be locked - so just clear all R5_LOCKED flags
*/
@@ -3779,10 +3777,10 @@ returnbi:
bio_endio(wbi);
wbi = wbi2;
}
- bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS,
- !test_bit(STRIPE_DEGRADED, &sh->state),
- 0);
+ md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS,
+ !test_bit(STRIPE_DEGRADED, &sh->state),
+ 0);
if (head_sh->batch_head) {
sh = list_first_entry(&sh->batch_list,
struct stripe_head,
@@ -4521,6 +4519,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
s->failed++;
if (rdev && !test_bit(Faulty, &rdev->flags))
do_recovery = 1;
+ else if (!rdev) {
+ rdev = rcu_dereference(
+ conf->disks[i].replacement);
+ if (rdev && !test_bit(Faulty, &rdev->flags))
+ do_recovery = 1;
+ }
}
if (test_bit(R5_InJournal, &dev->flags))
@@ -5539,10 +5543,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
for (d = 0;
d < conf->raid_disks - conf->max_degraded;
d++)
- bitmap_startwrite(mddev->bitmap,
- sh->sector,
- STRIPE_SECTORS,
- 0);
+ md_bitmap_startwrite(mddev->bitmap,
+ sh->sector,
+ STRIPE_SECTORS,
+ 0);
sh->bm_seq = conf->seq_flush + 1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
@@ -6020,11 +6024,11 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
}
if (mddev->curr_resync < max_sector) /* aborted */
- bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
- &sync_blocks, 1);
+ md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
+ &sync_blocks, 1);
else /* completed sync */
conf->fullsync = 0;
- bitmap_close_sync(mddev->bitmap);
+ md_bitmap_close_sync(mddev->bitmap);
return 0;
}
@@ -6053,7 +6057,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
}
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
!conf->fullsync &&
- !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
+ !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
sync_blocks >= STRIPE_SECTORS) {
/* we can skip this block, and probably more */
sync_blocks /= STRIPE_SECTORS;
@@ -6061,7 +6065,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
}
- bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
+ md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
if (sh == NULL) {
@@ -6084,7 +6088,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
}
rcu_read_unlock();
- bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
+ md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -6285,7 +6289,7 @@ static void raid5d(struct md_thread *thread)
/* Now is a good time to flush some bitmap updates */
conf->seq_flush++;
spin_unlock_irq(&conf->device_lock);
- bitmap_unplug(mddev->bitmap);
+ md_bitmap_unplug(mddev->bitmap);
spin_lock_irq(&conf->device_lock);
conf->seq_write = conf->seq_flush;
activate_bit_delay(conf, conf->temp_inactive_list);
@@ -7741,7 +7745,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
mddev->array_sectors > newsize)
return -EINVAL;
if (mddev->bitmap) {
- int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
+ int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0);
if (ret)
return ret;
}
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index b7fad0ec5710..030b2602faf0 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -74,7 +74,7 @@ void cec_queue_event_fh(struct cec_fh *fh,
const struct cec_event *new_ev, u64 ts)
{
static const u16 max_events[CEC_NUM_EVENTS] = {
- 1, 1, 800, 800, 8, 8,
+ 1, 1, 800, 800, 8, 8, 8, 8
};
struct cec_event_entry *entry;
unsigned int ev_idx = new_ev->event - 1;
@@ -176,6 +176,22 @@ void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
}
EXPORT_SYMBOL_GPL(cec_queue_pin_hpd_event);
+/* Notify userspace that the 5V pin changed state at the given time. */
+void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
+{
+ struct cec_event ev = {
+ .event = is_high ? CEC_EVENT_PIN_5V_HIGH :
+ CEC_EVENT_PIN_5V_LOW,
+ };
+ struct cec_fh *fh;
+
+ mutex_lock(&adap->devnode.lock);
+ list_for_each_entry(fh, &adap->devnode.fhs, list)
+ cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
+ mutex_unlock(&adap->devnode.lock);
+}
+EXPORT_SYMBOL_GPL(cec_queue_pin_5v_event);
+
/*
* Queue a new message for this filehandle.
*
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 10b67fc40318..b6536bbad530 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -579,6 +579,14 @@ static int cec_open(struct inode *inode, struct file *filp)
cec_queue_event_fh(fh, &ev, 0);
}
}
+ if (adap->pin && adap->pin->ops->read_5v) {
+ err = adap->pin->ops->read_5v(adap);
+ if (err >= 0) {
+ ev.event = err ? CEC_EVENT_PIN_5V_HIGH :
+ CEC_EVENT_PIN_5V_LOW;
+ cec_queue_event_fh(fh, &ev, 0);
+ }
+ }
#endif
list_add(&fh->list, &devnode->fhs);
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 16dffa06c913..dd2078b27a41 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -21,6 +21,7 @@ struct cec_notifier {
struct list_head head;
struct kref kref;
struct device *dev;
+ const char *conn;
struct cec_adapter *cec_adap;
void (*callback)(struct cec_adapter *adap, u16 pa);
@@ -30,13 +31,14 @@ struct cec_notifier {
static LIST_HEAD(cec_notifiers);
static DEFINE_MUTEX(cec_notifiers_lock);
-struct cec_notifier *cec_notifier_get(struct device *dev)
+struct cec_notifier *cec_notifier_get_conn(struct device *dev, const char *conn)
{
struct cec_notifier *n;
mutex_lock(&cec_notifiers_lock);
list_for_each_entry(n, &cec_notifiers, head) {
- if (n->dev == dev) {
+ if (n->dev == dev &&
+ (!conn || !strcmp(n->conn, conn))) {
kref_get(&n->kref);
mutex_unlock(&cec_notifiers_lock);
return n;
@@ -46,6 +48,8 @@ struct cec_notifier *cec_notifier_get(struct device *dev)
if (!n)
goto unlock;
n->dev = dev;
+ if (conn)
+ n->conn = kstrdup(conn, GFP_KERNEL);
n->phys_addr = CEC_PHYS_ADDR_INVALID;
mutex_init(&n->lock);
kref_init(&n->kref);
@@ -54,7 +58,7 @@ unlock:
mutex_unlock(&cec_notifiers_lock);
return n;
}
-EXPORT_SYMBOL_GPL(cec_notifier_get);
+EXPORT_SYMBOL_GPL(cec_notifier_get_conn);
static void cec_notifier_release(struct kref *kref)
{
@@ -62,6 +66,7 @@ static void cec_notifier_release(struct kref *kref)
container_of(kref, struct cec_notifier, kref);
list_del(&n->head);
+ kfree(n->conn);
kfree(n);
}
diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c
index 40891f4f842b..c95d4583498e 100644
--- a/drivers/media/common/siano/smsdvb-debugfs.c
+++ b/drivers/media/common/siano/smsdvb-debugfs.c
@@ -500,7 +500,7 @@ void smsdvb_debugfs_release(struct smsdvb_client_t *client)
client->debugfs = NULL;
}
-int smsdvb_debugfs_register(void)
+void smsdvb_debugfs_register(void)
{
struct dentry *d;
@@ -517,15 +517,15 @@ int smsdvb_debugfs_register(void)
d = debugfs_create_dir("smsdvb", usb_debug_root);
if (IS_ERR_OR_NULL(d)) {
pr_err("Couldn't create sysfs node for smsdvb\n");
- return PTR_ERR(d);
- } else {
- smsdvb_debugfs_usb_root = d;
+ return;
}
- return 0;
+ smsdvb_debugfs_usb_root = d;
}
void smsdvb_debugfs_unregister(void)
{
+ if (!smsdvb_debugfs_usb_root)
+ return;
debugfs_remove_recursive(smsdvb_debugfs_usb_root);
smsdvb_debugfs_usb_root = NULL;
}
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index c0faad1ba428..43cfd1dbda01 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -1047,9 +1047,9 @@ static void smsdvb_release(struct dvb_frontend *fe)
static const struct dvb_frontend_ops smsdvb_fe_ops = {
.info = {
.name = "Siano Mobile Digital MDTV Receiver",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 250000,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 250 * kHz,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/common/siano/smsdvb.h b/drivers/media/common/siano/smsdvb.h
index b15754d95ec0..befeb9817e54 100644
--- a/drivers/media/common/siano/smsdvb.h
+++ b/drivers/media/common/siano/smsdvb.h
@@ -107,7 +107,7 @@ struct RECEPTION_STATISTICS_PER_SLICES_S {
int smsdvb_debugfs_create(struct smsdvb_client_t *client);
void smsdvb_debugfs_release(struct smsdvb_client_t *client);
-int smsdvb_debugfs_register(void);
+void smsdvb_debugfs_register(void);
void smsdvb_debugfs_unregister(void);
#else
@@ -119,10 +119,7 @@ static inline int smsdvb_debugfs_create(struct smsdvb_client_t *client)
static inline void smsdvb_debugfs_release(struct smsdvb_client_t *client) {}
-static inline int smsdvb_debugfs_register(void)
-{
- return 0;
-};
+static inline void smsdvb_debugfs_register(void) {}
static inline void smsdvb_debugfs_unregister(void) {};
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index f32ec7342ef0..5653e8eebe2b 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -1377,6 +1377,11 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
struct vb2_buffer *vb;
int ret;
+ if (q->error) {
+ dprintk(1, "fatal error occurred on queue\n");
+ return -EIO;
+ }
+
vb = q->bufs[index];
switch (vb->state) {
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index f1178f6f434d..aff0ab7bf83d 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -222,7 +222,7 @@ struct vb2_dc_attachment {
enum dma_data_direction dma_dir;
};
-static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
struct dma_buf_attachment *dbuf_attach)
{
struct vb2_dc_attachment *attach;
@@ -358,7 +358,6 @@ static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
.map_dma_buf = vb2_dc_dmabuf_ops_map,
.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
.map = vb2_dc_dmabuf_ops_kmap,
- .map_atomic = vb2_dc_dmabuf_ops_kmap,
.vmap = vb2_dc_dmabuf_ops_vmap,
.mmap = vb2_dc_dmabuf_ops_mmap,
.release = vb2_dc_dmabuf_ops_release,
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 753ed3138dcc..015e737095cd 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -371,7 +371,7 @@ struct vb2_dma_sg_attachment {
enum dma_data_direction dma_dir;
};
-static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
struct dma_buf_attachment *dbuf_attach)
{
struct vb2_dma_sg_attachment *attach;
@@ -507,7 +507,6 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
.map = vb2_dma_sg_dmabuf_ops_kmap,
- .map_atomic = vb2_dma_sg_dmabuf_ops_kmap,
.vmap = vb2_dma_sg_dmabuf_ops_vmap,
.mmap = vb2_dma_sg_dmabuf_ops_mmap,
.release = vb2_dma_sg_dmabuf_ops_release,
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 359fb9804d16..6dfbd5b05907 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -209,7 +209,7 @@ struct vb2_vmalloc_attachment {
enum dma_data_direction dma_dir;
};
-static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
struct dma_buf_attachment *dbuf_attach)
{
struct vb2_vmalloc_attachment *attach;
@@ -346,7 +346,6 @@ static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
.map = vb2_vmalloc_dmabuf_ops_kmap,
- .map_atomic = vb2_vmalloc_dmabuf_ops_kmap,
.vmap = vb2_vmalloc_dmabuf_ops_vmap,
.mmap = vb2_vmalloc_dmabuf_ops_mmap,
.release = vb2_vmalloc_dmabuf_ops_release,
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index 1310526b0d49..4d371cea0d5d 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -1391,7 +1391,7 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file,
struct dvb_ca_slot *sl;
slot = info->num;
- if ((slot > ca->slot_count) || (slot < 0)) {
+ if ((slot >= ca->slot_count) || (slot < 0)) {
err = -EINVAL;
goto out_unlock;
}
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index ce25aef39008..c4e7ebfe4d29 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -894,21 +894,67 @@ static int dvb_frontend_start(struct dvb_frontend *fe)
}
static void dvb_frontend_get_frequency_limits(struct dvb_frontend *fe,
- u32 *freq_min, u32 *freq_max)
+ u32 *freq_min, u32 *freq_max,
+ u32 *tolerance)
{
- *freq_min = max(fe->ops.info.frequency_min, fe->ops.tuner_ops.info.frequency_min);
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 tuner_min = fe->ops.tuner_ops.info.frequency_min_hz;
+ u32 tuner_max = fe->ops.tuner_ops.info.frequency_max_hz;
+ u32 frontend_min = fe->ops.info.frequency_min_hz;
+ u32 frontend_max = fe->ops.info.frequency_max_hz;
+
+ *freq_min = max(frontend_min, tuner_min);
- if (fe->ops.info.frequency_max == 0)
- *freq_max = fe->ops.tuner_ops.info.frequency_max;
- else if (fe->ops.tuner_ops.info.frequency_max == 0)
- *freq_max = fe->ops.info.frequency_max;
+ if (frontend_max == 0)
+ *freq_max = tuner_max;
+ else if (tuner_max == 0)
+ *freq_max = frontend_max;
else
- *freq_max = min(fe->ops.info.frequency_max, fe->ops.tuner_ops.info.frequency_max);
+ *freq_max = min(frontend_max, tuner_max);
if (*freq_min == 0 || *freq_max == 0)
dev_warn(fe->dvb->device,
"DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n",
fe->dvb->num, fe->id);
+
+ /* If the standard is for satellite, convert frequencies to kHz */
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ case SYS_DVBS2:
+ case SYS_TURBO:
+ case SYS_ISDBS:
+ *freq_min /= kHz;
+ *freq_max /= kHz;
+ if (tolerance)
+ *tolerance = fe->ops.info.frequency_tolerance_hz / kHz;
+
+ break;
+ default:
+ if (tolerance)
+ *tolerance = fe->ops.info.frequency_tolerance_hz;
+ break;
+ }
+}
+
+static u32 dvb_frontend_get_stepsize(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 fe_step = fe->ops.info.frequency_stepsize_hz;
+ u32 tuner_step = fe->ops.tuner_ops.info.frequency_step_hz;
+ u32 step = max(fe_step, tuner_step);
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ case SYS_DVBS2:
+ case SYS_TURBO:
+ case SYS_ISDBS:
+ step /= kHz;
+ break;
+ default:
+ break;
+ }
+
+ return step;
}
static int dvb_frontend_check_parameters(struct dvb_frontend *fe)
@@ -918,7 +964,7 @@ static int dvb_frontend_check_parameters(struct dvb_frontend *fe)
u32 freq_max;
/* range check: frequency */
- dvb_frontend_get_frequency_limits(fe, &freq_min, &freq_max);
+ dvb_frontend_get_frequency_limits(fe, &freq_min, &freq_max, NULL);
if ((freq_min && c->frequency < freq_min) ||
(freq_max && c->frequency > freq_max)) {
dev_warn(fe->dvb->device, "DVB: adapter %i frontend %i frequency %u out of range (%u..%u)\n",
@@ -2244,8 +2290,8 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
case SYS_ISDBT:
case SYS_DTMB:
fepriv->min_delay = HZ / 20;
- fepriv->step_size = fe->ops.info.frequency_stepsize * 2;
- fepriv->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
+ fepriv->step_size = dvb_frontend_get_stepsize(fe) * 2;
+ fepriv->max_drift = (dvb_frontend_get_stepsize(fe) * 2) + 1;
break;
default:
/*
@@ -2374,9 +2420,17 @@ static int dvb_frontend_handle_ioctl(struct file *file,
case FE_GET_INFO: {
struct dvb_frontend_info *info = parg;
-
- memcpy(info, &fe->ops.info, sizeof(struct dvb_frontend_info));
- dvb_frontend_get_frequency_limits(fe, &info->frequency_min, &info->frequency_max);
+ memset(info, 0, sizeof(*info));
+
+ strcpy(info->name, fe->ops.info.name);
+ info->symbol_rate_min = fe->ops.info.symbol_rate_min;
+ info->symbol_rate_max = fe->ops.info.symbol_rate_max;
+ info->symbol_rate_tolerance = fe->ops.info.symbol_rate_tolerance;
+ info->caps = fe->ops.info.caps;
+ info->frequency_stepsize = dvb_frontend_get_stepsize(fe);
+ dvb_frontend_get_frequency_limits(fe, &info->frequency_min,
+ &info->frequency_max,
+ &info->frequency_tolerance);
/*
* Associate the 4 delivery systems supported by DVBv3
@@ -2406,10 +2460,10 @@ static int dvb_frontend_handle_ioctl(struct file *file,
dev_err(fe->dvb->device,
"%s: doesn't know how to handle a DVBv3 call to delivery system %i\n",
__func__, c->delivery_system);
- fe->ops.info.type = FE_OFDM;
+ info->type = FE_OFDM;
}
dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
- __func__, c->delivery_system, fe->ops.info.type);
+ __func__, c->delivery_system, info->type);
/* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 64d6793674b9..3c8778570331 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -440,8 +440,10 @@ static int dvb_register_media_device(struct dvb_device *dvbdev,
if (!dvbdev->entity)
return 0;
- link = media_create_intf_link(dvbdev->entity, &dvbdev->intf_devnode->intf,
- MEDIA_LNK_FL_ENABLED);
+ link = media_create_intf_link(dvbdev->entity,
+ &dvbdev->intf_devnode->intf,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link)
return -ENOMEM;
#endif
@@ -599,7 +601,8 @@ static int dvb_create_io_intf_links(struct dvb_adapter *adap,
if (strncmp(entity->name, name, strlen(name)))
continue;
link = media_create_intf_link(entity, intf,
- MEDIA_LNK_FL_ENABLED);
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link)
return -ENOMEM;
}
@@ -754,14 +757,16 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
media_device_for_each_intf(intf, mdev) {
if (intf->type == MEDIA_INTF_T_DVB_CA && ca) {
link = media_create_intf_link(ca, intf,
- MEDIA_LNK_FL_ENABLED);
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link)
return -ENOMEM;
}
if (intf->type == MEDIA_INTF_T_DVB_FE && tuner) {
link = media_create_intf_link(tuner, intf,
- MEDIA_LNK_FL_ENABLED);
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link)
return -ENOMEM;
}
@@ -773,7 +778,8 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
*/
if (intf->type == MEDIA_INTF_T_DVB_DVR && demux) {
link = media_create_intf_link(demux, intf,
- MEDIA_LNK_FL_ENABLED);
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link)
return -ENOMEM;
}
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 9ecaa9d0744a..048285134cdf 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -739,6 +739,16 @@ config DVB_TC90522
Toshiba TC90522 2xISDB-S 8PSK + 2xISDB-T OFDM demodulator.
Say Y when you want to support this frontend.
+config DVB_MN88443X
+ tristate "Socionext MN88443x"
+ depends on DVB_CORE && I2C
+ select REGMAP_I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A driver for Socionext/Panasonic MN884433 and MN884434
+ ISDB-S + ISDB-T demodulator.
+ Say Y when you want to support this frontend.
+
comment "Digital terrestrial only tuners/PLL"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index 67a783fd5ed0..779dfd027b24 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -125,6 +125,7 @@ obj-$(CONFIG_DVB_AF9033) += af9033.o
obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
obj-$(CONFIG_DVB_GP8PSK_FE) += gp8psk-fe.o
obj-$(CONFIG_DVB_TC90522) += tc90522.o
+obj-$(CONFIG_DVB_MN88443X) += mn88443x.o
obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
obj-$(CONFIG_DVB_HELENE) += helene.o
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index 482bce49819a..35a93b251aab 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -1136,10 +1136,9 @@ static const struct dvb_frontend_ops af9013_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Afatech AF9013",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 250000,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 250 * kHz,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
@@ -1312,10 +1311,10 @@ static int af9013_wregs(struct i2c_client *client, u8 cmd, u16 reg,
memcpy(&buf[3], val, len);
if (lock)
- i2c_lock_adapter(client->adapter);
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
ret = __i2c_transfer(client->adapter, msg, 1);
if (lock)
- i2c_unlock_adapter(client->adapter);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
if (ret < 0) {
goto err;
} else if (ret != 1) {
@@ -1353,10 +1352,10 @@ static int af9013_rregs(struct i2c_client *client, u8 cmd, u16 reg,
buf[2] = cmd;
if (lock)
- i2c_lock_adapter(client->adapter);
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
ret = __i2c_transfer(client->adapter, msg, 2);
if (lock)
- i2c_unlock_adapter(client->adapter);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
if (ret < 0) {
goto err;
} else if (ret != 2) {
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index aaed7cfe5f66..0cd57013ea25 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1020,10 +1020,9 @@ static const struct dvb_frontend_ops af9033_ops = {
.delsys = {SYS_DVBT},
.info = {
.name = "Afatech AF9033 (DVB-T)",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 250000,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 250 * kHz,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
index 9b2f2da1d989..f59a102b0a64 100644
--- a/drivers/media/dvb-frontends/as102_fe.c
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -419,9 +419,9 @@ static const struct dvb_frontend_ops as102_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Abilis AS102 DVB-T",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_INVERSION_AUTO
| FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4
| FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
index 9746c6dd7fb8..52ce0e6e2a15 100644
--- a/drivers/media/dvb-frontends/ascot2e.c
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -468,9 +468,9 @@ static int ascot2e_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops ascot2e_tuner_ops = {
.info = {
.name = "Sony ASCOT2E",
- .frequency_min = 1000000,
- .frequency_max = 1200000000,
- .frequency_step = 25000,
+ .frequency_min_hz = 1 * MHz,
+ .frequency_max_hz = 1200 * MHz,
+ .frequency_step_hz = 25 * kHz,
},
.init = ascot2e_init,
.release = ascot2e_release,
diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c
index 7b0f3239dbba..cbcc65dc9d54 100644
--- a/drivers/media/dvb-frontends/atbm8830.c
+++ b/drivers/media/dvb-frontends/atbm8830.c
@@ -428,9 +428,9 @@ static const struct dvb_frontend_ops atbm8830_ops = {
.delsys = { SYS_DTMB },
.info = {
.name = "AltoBeam ATBM8830/8831 DMB-TH",
- .frequency_min = 474000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 10000,
+ .frequency_min_hz = 474 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 10 * kHz,
.caps =
FE_CAN_FEC_AUTO |
FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index 8f659bd1c79e..076f737aa8c0 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -897,9 +897,9 @@ static const struct dvb_frontend_ops au8522_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Auvitek AU8522 QAM/8VSB Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index 05df133dc5be..e92542b92d34 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -840,10 +840,8 @@ static const struct dvb_frontend_ops bcm3510_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Broadcom BCM3510 VSB/QAM frontend",
- .frequency_min = 54000000,
- .frequency_max = 803000000,
- /* stepsize is just a guess */
- .frequency_stepsize = 0,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 803 * MHz,
.caps =
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index 9ffd2c7ac74a..961380162cdd 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -412,9 +412,9 @@ static const struct dvb_frontend_ops cx22700_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Conexant CX22700 DVB-T",
- .frequency_min = 470000000,
- .frequency_max = 860000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
diff --git a/drivers/media/dvb-frontends/cx22702.c b/drivers/media/dvb-frontends/cx22702.c
index e8b1e6b7e7e5..ab9b2924bcca 100644
--- a/drivers/media/dvb-frontends/cx22702.c
+++ b/drivers/media/dvb-frontends/cx22702.c
@@ -622,9 +622,9 @@ static const struct dvb_frontend_ops cx22702_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Conexant CX22702 DVB-T",
- .frequency_min = 177000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 177 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 2f3a1c237489..9441bdc73097 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -629,10 +629,10 @@ static const struct dvb_frontend_ops cx24110_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Conexant CX24110 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1011, /* kHz for QPSK frontends */
- .frequency_tolerance = 29500,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1011 * kHz,
+ .frequency_tolerance_hz = 29500 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index 037db3e9d2dd..91a5033b6bd7 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -533,10 +533,10 @@ static void cx24113_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops cx24113_tuner_ops = {
.info = {
- .name = "Conexant CX24113",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 125,
+ .name = "Conexant CX24113",
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_step_hz = 125 * kHz,
},
.release = cx24113_release,
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index 2dbc7349d870..220f26663647 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -1465,10 +1465,10 @@ static const struct dvb_frontend_ops cx24116_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24116/CX24118",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1011, /* kHz for QPSK frontends */
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1011 * kHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index ba55d75d916c..667bc8be848d 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -1622,10 +1622,10 @@ static const struct dvb_frontend_ops cx24117_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24117/CX24132",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1011, /* kHz for QPSK frontends */
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1011 * kHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index ccbabdae6a69..dd3ec316e7c2 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -1555,10 +1555,10 @@ static const struct dvb_frontend_ops cx24120_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24120/CX24118",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1011, /* kHz for QPSK frontends */
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1011 * kHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index bf33e7390aaf..e49215020a93 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -1111,10 +1111,10 @@ static const struct dvb_frontend_ops cx24123_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Conexant CX24123/CX24109",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1011, /* kHz for QPSK frontends */
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1011 * kHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c
index c2e7caf9b010..eb1d7478fa8d 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t.c
@@ -431,8 +431,8 @@ int cxd2820r_get_tune_settings_t(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
s->min_delay_ms = 500;
- s->step_size = fe->ops.info.frequency_stepsize * 2;
- s->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
+ s->step_size = fe->ops.info.frequency_stepsize_hz * 2;
+ s->max_drift = (fe->ops.info.frequency_stepsize_hz * 2) + 1;
return 0;
}
diff --git a/drivers/media/dvb-frontends/cxd2820r_t2.c b/drivers/media/dvb-frontends/cxd2820r_t2.c
index e641fde75379..f330ec1710b4 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t2.c
@@ -426,8 +426,8 @@ int cxd2820r_get_tune_settings_t2(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
s->min_delay_ms = 1500;
- s->step_size = fe->ops.info.frequency_stepsize * 2;
- s->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
+ s->step_size = fe->ops.info.frequency_stepsize_hz * 2;
+ s->max_drift = (fe->ops.info.frequency_stepsize_hz * 2) + 1;
return 0;
}
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 85905d3503ff..c98093ed3dd7 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3942,9 +3942,8 @@ static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Sony CXD2841ER DVB-S/S2 demodulator",
- .frequency_min = 500000,
- .frequency_max = 2500000,
- .frequency_stepsize = 0,
+ .frequency_min_hz = 500 * MHz,
+ .frequency_max_hz = 2500 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500,
@@ -3988,8 +3987,8 @@ static struct dvb_frontend_ops cxd2841er_t_c_ops = {
FE_CAN_HIERARCHY_AUTO |
FE_CAN_MUTE_TS |
FE_CAN_2G_MODULATION,
- .frequency_min = 42000000,
- .frequency_max = 1002000000,
+ .frequency_min_hz = 42 * MHz,
+ .frequency_max_hz = 1002 * MHz,
.symbol_rate_min = 870000,
.symbol_rate_max = 11700000
},
diff --git a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
index bd9101e246d5..f87e27481ea7 100644
--- a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
+++ b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
@@ -1833,9 +1833,9 @@ static enum dvbfe_algo cxd2880_get_frontend_algo(struct dvb_frontend *fe)
static struct dvb_frontend_ops cxd2880_dvbt_t2_ops = {
.info = {
.name = "Sony CXD2880",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 1000,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 1 * kHz,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
index 932d235118e2..37ebd5af8fd4 100644
--- a/drivers/media/dvb-frontends/dib0070.c
+++ b/drivers/media/dvb-frontends/dib0070.c
@@ -726,10 +726,10 @@ static void dib0070_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops dib0070_ops = {
.info = {
- .name = "DiBcom DiB0070",
- .frequency_min = 45000000,
- .frequency_max = 860000000,
- .frequency_step = 1000,
+ .name = "DiBcom DiB0070",
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 1 * kHz,
},
.release = dib0070_release,
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index ee7af34979ed..44a074261e69 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -2578,9 +2578,9 @@ static int dib0090_set_params(struct dvb_frontend *fe)
static const struct dvb_tuner_ops dib0090_ops = {
.info = {
.name = "DiBcom DiB0090",
- .frequency_min = 45000000,
- .frequency_max = 860000000,
- .frequency_step = 1000,
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 1 * kHz,
},
.release = dib0090_release,
@@ -2593,9 +2593,9 @@ static const struct dvb_tuner_ops dib0090_ops = {
static const struct dvb_tuner_ops dib0090_fw_ops = {
.info = {
.name = "DiBcom DiB0090",
- .frequency_min = 45000000,
- .frequency_max = 860000000,
- .frequency_step = 1000,
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 1 * kHz,
},
.release = dib0090_release,
diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
index 5861f346db49..bbbd53280477 100644
--- a/drivers/media/dvb-frontends/dib3000mb.c
+++ b/drivers/media/dvb-frontends/dib3000mb.c
@@ -786,9 +786,9 @@ static const struct dvb_frontend_ops dib3000mb_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 3000M-B DVB-T",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index 7e5d474806a5..c9e1db251723 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -944,9 +944,9 @@ static const struct dvb_frontend_ops dib3000mc_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 3000MC/P",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
index 6a1d357d0c7c..b79358d09de6 100644
--- a/drivers/media/dvb-frontends/dib7000m.c
+++ b/drivers/media/dvb-frontends/dib7000m.c
@@ -1443,9 +1443,9 @@ static const struct dvb_frontend_ops dib7000m_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 7000MA/MB/PA/PB/MC",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 5a8dbc0b25fb..58387860b62d 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -2824,9 +2824,9 @@ static const struct dvb_frontend_ops dib7000p_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 7000PC",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 22eec8f65485..3c3f8cb14845 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -4390,9 +4390,9 @@ static const struct dvb_frontend_ops dib8000_ops = {
.delsys = { SYS_ISDBT },
.info = {
.name = "DiBcom 8000 ISDB-T",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index b8edb55696bb..0183fb1346ef 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -2553,9 +2553,9 @@ static const struct dvb_frontend_ops dib9000_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 9000",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 5706898e84cc..9628d4067fe1 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -2841,8 +2841,7 @@ ctrl_set_cfg_mpeg_output(struct drx_demod_instance *demod, struct drx_cfg_mpeg_o
/* coef = 188/204 */
max_bit_rate =
(ext_attr->curr_symbol_rate / 8) * nr_bits * 188;
- /* pass through as b/c Annex A/c need following settings */
- /* fall-through */
+ /* fall-through - as b/c Annex A/C need following settings */
case DRX_STANDARD_ITU_B:
rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_USAGE__A, FEC_OC_FCT_USAGE__PRE, 0);
if (rc != 0) {
@@ -11811,8 +11810,8 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
block_hdr.CRC = be16_to_cpu(*(__be16 *)(mc_data));
mc_data += sizeof(u16);
- pr_debug("%u: addr %u, size %u, flags 0x%04x, CRC 0x%04x\n",
- (unsigned)(mc_data - mc_data_init), block_hdr.addr,
+ pr_debug("%zd: addr %u, size %u, flags 0x%04x, CRC 0x%04x\n",
+ (mc_data - mc_data_init), block_hdr.addr,
block_hdr.size, block_hdr.flags, block_hdr.CRC);
/* Check block header on:
@@ -11842,8 +11841,8 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
mc_block_nr_bytes,
mc_data, 0x0000)) {
rc = -EIO;
- pr_err("error writing firmware at pos %u\n",
- (unsigned)(mc_data - mc_data_init));
+ pr_err("error writing firmware at pos %zd\n",
+ mc_data - mc_data_init);
goto release;
}
break;
@@ -11866,8 +11865,8 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
(u16)bytes_to_comp,
(u8 *)mc_data_buffer,
0x0000)) {
- pr_err("error reading firmware at pos %u\n",
- (unsigned)(mc_data - mc_data_init));
+ pr_err("error reading firmware at pos %zd\n",
+ mc_data - mc_data_init);
return -EIO;
}
@@ -11875,8 +11874,8 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
bytes_to_comp);
if (result) {
- pr_err("error verifying firmware at pos %u\n",
- (unsigned)(mc_data - mc_data_init));
+ pr_err("error verifying firmware at pos %zd\n",
+ mc_data - mc_data_init);
return -EIO;
}
@@ -12374,9 +12373,9 @@ static const struct dvb_frontend_ops drx39xxj_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Micronas DRX39xxj family Frontend",
- .frequency_stepsize = 62500,
- .frequency_min = 51000000,
- .frequency_max = 858000000,
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 3b7d31a22d82..684d428efb0d 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -1970,8 +1970,7 @@ static int DRX_Start(struct drxd_state *state, s32 off)
switch (p->transmission_mode) {
default: /* Not set, detect it automatically */
operationMode |= SC_RA_RAM_OP_AUTO_MODE__M;
- /* try first guess DRX_FFTMODE_8K */
- /* fall through */
+ /* fall through - try first guess DRX_FFTMODE_8K */
case TRANSMISSION_MODE_8K:
transmissionParams |= SC_RA_RAM_OP_PARAM_MODE_8K;
if (state->type_A) {
@@ -2144,8 +2143,7 @@ static int DRX_Start(struct drxd_state *state, s32 off)
switch (p->modulation) {
default:
operationMode |= SC_RA_RAM_OP_AUTO_CONST__M;
- /* try first guess DRX_CONSTELLATION_QAM64 */
- /* fall through */
+ /* fall through - try first guess DRX_CONSTELLATION_QAM64 */
case QAM_64:
transmissionParams |= SC_RA_RAM_OP_PARAM_CONST_QAM64;
if (state->type_A) {
@@ -2912,10 +2910,9 @@ static const struct dvb_frontend_ops drxd_ops = {
.delsys = { SYS_DVBT},
.info = {
.name = "Micronas DRXD DVB-T",
- .frequency_min = 47125000,
- .frequency_max = 855250000,
- .frequency_stepsize = 166667,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 47125 * kHz,
+ .frequency_max_hz = 855250 * kHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 5a26ad93be10..84ac3f73f8fe 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -213,7 +213,7 @@ static inline u32 log10times100(u32 value)
static int drxk_i2c_lock(struct drxk_state *state)
{
- i2c_lock_adapter(state->i2c);
+ i2c_lock_bus(state->i2c, I2C_LOCK_SEGMENT);
state->drxk_i2c_exclusive_lock = true;
return 0;
@@ -224,7 +224,7 @@ static void drxk_i2c_unlock(struct drxk_state *state)
if (!state->drxk_i2c_exclusive_lock)
return;
- i2c_unlock_adapter(state->i2c);
+ i2c_unlock_bus(state->i2c, I2C_LOCK_SEGMENT);
state->drxk_i2c_exclusive_lock = false;
}
@@ -3270,13 +3270,11 @@ static int dvbt_sc_command(struct drxk_state *state,
case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
status |= write16(state, OFDM_SC_RA_RAM_PARAM1__A, param1);
- /* All commands using 1 parameters */
- /* fall through */
+ /* fall through - All commands using 1 parameters */
case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING:
case OFDM_SC_RA_RAM_CMD_USER_IO:
status |= write16(state, OFDM_SC_RA_RAM_PARAM0__A, param0);
- /* All commands using 0 parameters */
- /* fall through */
+ /* fall through - All commands using 0 parameters */
case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM:
case OFDM_SC_RA_RAM_CMD_NULL:
/* Write command */
@@ -3784,8 +3782,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case TRANSMISSION_MODE_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M;
- /* try first guess DRX_FFTMODE_8K */
- /* fall through */
+ /* fall through - try first guess DRX_FFTMODE_8K */
case TRANSMISSION_MODE_8K:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K;
break;
@@ -3799,8 +3796,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
default:
case GUARD_INTERVAL_AUTO:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M;
- /* try first guess DRX_GUARD_1DIV4 */
- /* fall through */
+ /* fall through - try first guess DRX_GUARD_1DIV4 */
case GUARD_INTERVAL_1_4:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4;
break;
@@ -3841,8 +3837,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case QAM_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M;
- /* try first guess DRX_CONSTELLATION_QAM64 */
- /* fall through */
+ /* fall through - try first guess DRX_CONSTELLATION_QAM64 */
case QAM_64:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64;
break;
@@ -3885,8 +3880,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case FEC_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M;
- /* try first guess DRX_CODERATE_2DIV3 */
- /* fall through */
+ /* fall through - try first guess DRX_CODERATE_2DIV3 */
case FEC_2_3:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3;
break;
@@ -6744,13 +6738,13 @@ static const struct dvb_frontend_ops drxk_ops = {
/* .delsys will be filled dynamically */
.info = {
.name = "DRXK",
- .frequency_min = 47000000,
- .frequency_max = 865000000,
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 865 * MHz,
/* For DVB-C */
- .symbol_rate_min = 870000,
+ .symbol_rate_min = 870000,
.symbol_rate_max = 11700000,
/* For DVB-T */
- .frequency_stepsize = 166667,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 2ff90e5eabce..46a55146cb07 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -1100,10 +1100,10 @@ static const struct dvb_frontend_ops ds3000_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Montage Technology DS3000",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1011, /* kHz for QPSK frontends */
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1011 * kHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index e3894ff403d7..6d4b2eec67b4 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -799,6 +799,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
struct dvb_pll_priv *priv = NULL;
int ret;
const struct dvb_pll_desc *desc;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
b1 = kmalloc(1, GFP_KERNEL);
if (!b1)
@@ -844,8 +845,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
strncpy(fe->ops.tuner_ops.info.name, desc->name,
sizeof(fe->ops.tuner_ops.info.name));
- fe->ops.tuner_ops.info.frequency_min = desc->min;
- fe->ops.tuner_ops.info.frequency_max = desc->max;
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ case SYS_DVBS2:
+ case SYS_TURBO:
+ case SYS_ISDBS:
+ fe->ops.tuner_ops.info.frequency_min_hz = desc->min * kHz;
+ fe->ops.tuner_ops.info.frequency_max_hz = desc->max * kHz;
+ break;
+ default:
+ fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
+ fe->ops.tuner_ops.info.frequency_max_hz = desc->max;
+ }
+
if (!desc->initdata)
fe->ops.tuner_ops.init = NULL;
if (!desc->sleepdata)
@@ -884,6 +896,17 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (!dvb_pll_attach(fe, client->addr, client->adapter, desc_id))
return -ENOMEM;
+ /*
+ * Unset tuner_ops.release (== dvb_pll_release)
+ * which has been just set in the above dvb_pll_attach(),
+ * because if tuner_ops.release was left defined,
+ * this module would be 'put' twice on exit:
+ * once by dvb_frontend_detach() and another by dvb_module_release().
+ *
+ * dvb_pll_release is instead executed in the i2c driver's .remove(),
+ * keeping dvb_pll_attach untouched for legacy (dvb_attach) drivers.
+ */
+ fe->ops.tuner_ops.release = NULL;
dev_info(&client->dev, "DVB Simple Tuner attached.\n");
return 0;
}
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.c b/drivers/media/dvb-frontends/dvb_dummy_fe.c
index 6650d4f61ef2..a4cbcae7967d 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.c
@@ -170,9 +170,9 @@ static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Dummy DVB-T",
- .frequency_min = 0,
- .frequency_max = 863250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 0,
+ .frequency_max_hz = 863250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO |
@@ -201,11 +201,11 @@ static const struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "Dummy DVB-C",
- .frequency_stepsize = 62500,
- .frequency_min = 51000000,
- .frequency_max = 858000000,
- .symbol_rate_min = (57840000/2)/64, /* SACLK/64 == (XIN/2)/64 */
- .symbol_rate_max = (57840000/2)/4, /* SACLK/4 */
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
+ .symbol_rate_min = (57840000 / 2) / 64, /* SACLK/64 == (XIN/2)/64 */
+ .symbol_rate_max = (57840000 / 2) / 4, /* SACLK/4 */
.caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
FE_CAN_QAM_128 | FE_CAN_QAM_256 |
FE_CAN_FEC_AUTO | FE_CAN_INVERSION_AUTO
@@ -230,10 +230,10 @@ static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Dummy DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 250, /* kHz for QPSK frontends */
- .frequency_tolerance = 29500,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 250 * kHz,
+ .frequency_tolerance_hz = 29500 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c
index a772ef8bfe1c..238f09aa72f2 100644
--- a/drivers/media/dvb-frontends/gp8psk-fe.c
+++ b/drivers/media/dvb-frontends/gp8psk-fe.c
@@ -355,9 +355,9 @@ static const struct dvb_frontend_ops gp8psk_fe_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Genpix DVB-S",
- .frequency_min = 800000,
- .frequency_max = 2250000,
- .frequency_stepsize = 100,
+ .frequency_min_hz = 800 * MHz,
+ .frequency_max_hz = 2250 * MHz,
+ .frequency_stepsize_hz = 100 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index a0d0b53c91d7..d7790cb98a0c 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -666,7 +666,7 @@ static int helene_set_params_s(struct dvb_frontend *fe)
return 0;
}
-static int helene_set_params(struct dvb_frontend *fe)
+static int helene_set_params_t(struct dvb_frontend *fe)
{
u8 data[MAX_WRITE_REGSIZE];
u32 frequency;
@@ -835,6 +835,19 @@ static int helene_set_params(struct dvb_frontend *fe)
return 0;
}
+static int helene_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ if (p->delivery_system == SYS_DVBT ||
+ p->delivery_system == SYS_DVBT2 ||
+ p->delivery_system == SYS_ISDBT ||
+ p->delivery_system == SYS_DVBC_ANNEX_A)
+ return helene_set_params_t(fe);
+
+ return helene_set_params_s(fe);
+}
+
static int helene_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct helene_priv *priv = fe->tuner_priv;
@@ -843,26 +856,26 @@ static int helene_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static const struct dvb_tuner_ops helene_tuner_ops = {
+static const struct dvb_tuner_ops helene_tuner_ops_t = {
.info = {
.name = "Sony HELENE Ter tuner",
- .frequency_min = 1000000,
- .frequency_max = 1200000000,
- .frequency_step = 25000,
+ .frequency_min_hz = 1 * MHz,
+ .frequency_max_hz = 1200 * MHz,
+ .frequency_step_hz = 25 * kHz,
},
.init = helene_init,
.release = helene_release,
.sleep = helene_sleep,
- .set_params = helene_set_params,
+ .set_params = helene_set_params_t,
.get_frequency = helene_get_frequency,
};
static const struct dvb_tuner_ops helene_tuner_ops_s = {
.info = {
.name = "Sony HELENE Sat tuner",
- .frequency_min = 500000,
- .frequency_max = 2500000,
- .frequency_step = 1000,
+ .frequency_min_hz = 500 * MHz,
+ .frequency_max_hz = 2500 * MHz,
+ .frequency_step_hz = 1 * MHz,
},
.init = helene_init,
.release = helene_release,
@@ -871,6 +884,20 @@ static const struct dvb_tuner_ops helene_tuner_ops_s = {
.get_frequency = helene_get_frequency,
};
+static const struct dvb_tuner_ops helene_tuner_ops = {
+ .info = {
+ .name = "Sony HELENE Sat/Ter tuner",
+ .frequency_min_hz = 1 * MHz,
+ .frequency_max_hz = 2500 * MHz,
+ .frequency_step_hz = 25 * kHz,
+ },
+ .init = helene_init,
+ .release = helene_release,
+ .sleep = helene_sleep,
+ .set_params = helene_set_params,
+ .get_frequency = helene_get_frequency,
+};
+
/* power-on tuner
* call once after reset
*/
@@ -897,7 +924,10 @@ static int helene_x_pon(struct helene_priv *priv)
helene_write_regs(priv, 0x99, cdata, sizeof(cdata));
/* 0x81 - 0x94 */
- data[0] = 0x18; /* xtal 24 MHz */
+ if (priv->xtal == SONY_HELENE_XTAL_16000)
+ data[0] = 0x10; /* xtal 16 MHz */
+ else
+ data[0] = 0x18; /* xtal 24 MHz */
data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */
data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */
data[3] = 0x80; /* REFOUT signal output 500mVpp */
@@ -1032,7 +1062,7 @@ struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
- memcpy(&fe->ops.tuner_ops, &helene_tuner_ops,
+ memcpy(&fe->ops.tuner_ops, &helene_tuner_ops_t,
sizeof(struct dvb_tuner_ops));
fe->tuner_priv = priv;
dev_info(&priv->i2c->dev,
@@ -1042,6 +1072,59 @@ struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
}
EXPORT_SYMBOL(helene_attach);
+static int helene_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct helene_config *config = client->dev.platform_data;
+ struct dvb_frontend *fe = config->fe;
+ struct device *dev = &client->dev;
+ struct helene_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->i2c_address = client->addr;
+ priv->i2c = client->adapter;
+ priv->set_tuner_data = config->set_tuner_priv;
+ priv->set_tuner = config->set_tuner_callback;
+ priv->xtal = config->xtal;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (helene_x_pon(priv) != 0)
+ return -EINVAL;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ memcpy(&fe->ops.tuner_ops, &helene_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = priv;
+ i2c_set_clientdata(client, priv);
+
+ dev_info(dev, "Sony HELENE attached on addr=%x at I2C adapter %p\n",
+ priv->i2c_address, priv->i2c);
+
+ return 0;
+}
+
+static const struct i2c_device_id helene_id[] = {
+ { "helene", },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, helene_id);
+
+static struct i2c_driver helene_driver = {
+ .driver = {
+ .name = "helene",
+ },
+ .probe = helene_probe,
+ .id_table = helene_id,
+};
+module_i2c_driver(helene_driver);
+
MODULE_DESCRIPTION("Sony HELENE Sat/Ter tuner driver");
MODULE_AUTHOR("Abylay Ospan <aospan@netup.ru>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/helene.h b/drivers/media/dvb-frontends/helene.h
index c9fc81c7e4e7..8562d01bc93e 100644
--- a/drivers/media/dvb-frontends/helene.h
+++ b/drivers/media/dvb-frontends/helene.h
@@ -39,6 +39,7 @@ enum helene_xtal {
* @set_tuner_callback: Callback function that notifies the parent driver
* which tuner is active now
* @xtal: Cristal frequency as described by &enum helene_xtal
+ * @fe: Frontend for which connects this tuner
*/
struct helene_config {
u8 i2c_address;
@@ -46,6 +47,8 @@ struct helene_config {
void *set_tuner_priv;
int (*set_tuner_callback)(void *, int);
enum helene_xtal xtal;
+
+ struct dvb_frontend *fe;
};
#if IS_REACHABLE(CONFIG_DVB_HELENE)
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
index 5e7e265a52e6..02bc08081971 100644
--- a/drivers/media/dvb-frontends/horus3a.c
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -330,9 +330,9 @@ static int horus3a_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops horus3a_tuner_ops = {
.info = {
.name = "Sony Horus3a",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 1000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_step_hz = 1 * MHz,
},
.init = horus3a_init,
.release = horus3a_release,
diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
index 04f7f6854f73..c3a6e81ae87f 100644
--- a/drivers/media/dvb-frontends/itd1000.c
+++ b/drivers/media/dvb-frontends/itd1000.c
@@ -353,10 +353,10 @@ static void itd1000_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops itd1000_tuner_ops = {
.info = {
- .name = "Integrant ITD1000",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 125, /* kHz for QPSK frontends */
+ .name = "Integrant ITD1000",
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_step_hz = 125 * kHz,
},
.release = itd1000_release,
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index 965012ad5c59..a30707b61b1f 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -135,8 +135,8 @@ static int ix2505v_set_params(struct dvb_frontend *fe)
u8 gain, cc, ref, psc, local_osc, lpf;
u8 data[4] = {0};
- if ((frequency < fe->ops.info.frequency_min)
- || (frequency > fe->ops.info.frequency_max))
+ if ((frequency < fe->ops.info.frequency_min_hz / kHz)
+ || (frequency > fe->ops.info.frequency_max_hz / kHz))
return -EINVAL;
if (state->config->tuner_gain)
@@ -256,8 +256,8 @@ static int ix2505v_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops ix2505v_tuner_ops = {
.info = {
.name = "Sharp IX2505V (B0017)",
- .frequency_min = 950000,
- .frequency_max = 2175000
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2175 * MHz
},
.release = ix2505v_release,
.set_params = ix2505v_set_params,
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index 249c18761e6e..9afb5bf6424b 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -575,10 +575,9 @@ static const struct dvb_frontend_ops l64781_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "LSI L64781 DVB-T",
- /* .frequency_min = ???,*/
- /* .frequency_max = ???,*/
- .frequency_stepsize = 166666,
- /* .frequency_tolerance = ???,*/
+ /* .frequency_min_hz = ???,*/
+ /* .frequency_max_hz = ???,*/
+ .frequency_stepsize_hz = 166666,
/* .symbol_rate_tolerance = ???,*/
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index 9854096839ae..408151e33fa7 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1349,9 +1349,9 @@ static const struct dvb_frontend_ops lg2160_ops = {
.delsys = { SYS_ATSCMH },
.info = {
.name = "LG Electronics LG2160 ATSC/MH Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
},
.i2c_gate_ctrl = lg216x_i2c_gate_ctrl,
#if 0
@@ -1375,9 +1375,9 @@ static const struct dvb_frontend_ops lg2161_ops = {
.delsys = { SYS_ATSCMH },
.info = {
.name = "LG Electronics LG2161 ATSC/MH Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
},
.i2c_gate_ctrl = lg216x_i2c_gate_ctrl,
#if 0
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index 735d73060265..857e9b4d69b4 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -1164,9 +1164,9 @@ static const struct dvb_frontend_ops lgdt3304_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3304 VSB/QAM Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
.i2c_gate_ctrl = lgdt3305_i2c_gate_ctrl,
@@ -1187,9 +1187,9 @@ static const struct dvb_frontend_ops lgdt3305_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3305 VSB/QAM Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
.i2c_gate_ctrl = lgdt3305_i2c_gate_ctrl,
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 32de824476db..0e1f5daaf20c 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -2157,9 +2157,9 @@ static const struct dvb_frontend_ops lgdt3306a_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3306A VSB/QAM Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_AUTO | FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
.i2c_gate_ctrl = lgdt3306a_i2c_gate_ctrl,
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index f6731738b073..10d584ce538d 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -944,9 +944,9 @@ static const struct dvb_frontend_ops lgdt3302_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3302 VSB/QAM Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 5056941, /* QAM 64 */
.symbol_rate_max = 10762000, /* VSB 8 */
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
@@ -966,9 +966,9 @@ static const struct dvb_frontend_ops lgdt3303_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3303 VSB/QAM Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 5056941, /* QAM 64 */
.symbol_rate_max = 10762000, /* VSB 8 */
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
diff --git a/drivers/media/dvb-frontends/lgs8gl5.c b/drivers/media/dvb-frontends/lgs8gl5.c
index f47e5a1af16d..07e5bcee9c1e 100644
--- a/drivers/media/dvb-frontends/lgs8gl5.c
+++ b/drivers/media/dvb-frontends/lgs8gl5.c
@@ -416,10 +416,9 @@ static const struct dvb_frontend_ops lgs8gl5_ops = {
.delsys = { SYS_DTMB },
.info = {
.name = "Legend Silicon LGS-8GL5 DMB-TH",
- .frequency_min = 474000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 10000,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 474 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 10 * kHz,
.caps = FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_32 |
FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c
index 84af8a12f26a..a6bcf1571d10 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.c
+++ b/drivers/media/dvb-frontends/lgs8gxx.c
@@ -985,9 +985,9 @@ static const struct dvb_frontend_ops lgs8gxx_ops = {
.delsys = { SYS_DTMB },
.info = {
.name = "Legend Silicon LGS8913/LGS8GXX DMB-TH",
- .frequency_min = 474000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 10000,
+ .frequency_min_hz = 474 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 10 * kHz,
.caps =
FE_CAN_FEC_AUTO |
FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 65d157fe76d1..dffd2d4bf1c8 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1300,9 +1300,9 @@ static const struct dvb_frontend_ops m88ds3103_ops = {
.delsys = {SYS_DVBS, SYS_DVBS2},
.info = {
.name = "Montage Technology M88DS3103",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 496ce27fa63a..d5bc85501f9e 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -759,10 +759,10 @@ static const struct dvb_frontend_ops m88rs2000_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "M88RS2000 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1000, /* kHz for QPSK frontends */
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1 * MHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
index 377cd984b069..da505a5d035f 100644
--- a/drivers/media/dvb-frontends/mb86a16.c
+++ b/drivers/media/dvb-frontends/mb86a16.c
@@ -1808,10 +1808,9 @@ static const struct dvb_frontend_ops mb86a16_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Fujitsu MB86A16 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 3000,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 3 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500,
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index c3b1b88e2e7a..66fc77db0e75 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -2111,9 +2111,9 @@ static const struct dvb_frontend_ops mb86a20s_ops = {
FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_QAM_AUTO |
FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO,
/* Actually, those values depend on the used tuner */
- .frequency_min = 45000000,
- .frequency_max = 864000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 864 * MHz,
+ .frequency_stepsize_hz = 62500,
},
.release = mb86a20s_release,
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
new file mode 100644
index 000000000000..9ec1aeef03d5
--- /dev/null
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -0,0 +1,802 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Socionext MN88443x series demodulator driver for ISDB-S/ISDB-T.
+//
+// Copyright (c) 2018 Socionext Inc.
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <media/dvb_math.h>
+
+#include "mn88443x.h"
+
+/* ISDB-S registers */
+#define ATSIDU_S 0x2f
+#define ATSIDL_S 0x30
+#define TSSET_S 0x31
+#define AGCREAD_S 0x5a
+#define CPMON1_S 0x5e
+#define CPMON1_S_FSYNC BIT(5)
+#define CPMON1_S_ERRMON BIT(4)
+#define CPMON1_S_SIGOFF BIT(3)
+#define CPMON1_S_W2LOCK BIT(2)
+#define CPMON1_S_W1LOCK BIT(1)
+#define CPMON1_S_DW1LOCK BIT(0)
+#define TRMON_S 0x60
+#define BERCNFLG_S 0x68
+#define BERCNFLG_S_BERVRDY BIT(5)
+#define BERCNFLG_S_BERVCHK BIT(4)
+#define BERCNFLG_S_BERDRDY BIT(3)
+#define BERCNFLG_S_BERDCHK BIT(2)
+#define CNRDXU_S 0x69
+#define CNRDXL_S 0x6a
+#define CNRDYU_S 0x6b
+#define CNRDYL_S 0x6c
+#define BERVRDU_S 0x71
+#define BERVRDL_S 0x72
+#define DOSET1_S 0x73
+
+/* Primary ISDB-T */
+#define PLLASET1 0x00
+#define PLLASET2 0x01
+#define PLLBSET1 0x02
+#define PLLBSET2 0x03
+#define PLLSET 0x04
+#define OUTCSET 0x08
+#define OUTCSET_CHDRV_8MA 0xff
+#define OUTCSET_CHDRV_4MA 0x00
+#define PLDWSET 0x09
+#define PLDWSET_NORMAL 0x00
+#define PLDWSET_PULLDOWN 0xff
+#define HIZSET1 0x0a
+#define HIZSET2 0x0b
+
+/* Secondary ISDB-T (for MN884434 only) */
+#define RCVSET 0x00
+#define TSSET1_M 0x01
+#define TSSET2_M 0x02
+#define TSSET3_M 0x03
+#define INTACSET 0x08
+#define HIZSET3 0x0b
+
+/* ISDB-T registers */
+#define TSSET1 0x05
+#define TSSET1_TSASEL_MASK GENMASK(4, 3)
+#define TSSET1_TSASEL_ISDBT (0x0 << 3)
+#define TSSET1_TSASEL_ISDBS (0x1 << 3)
+#define TSSET1_TSASEL_NONE (0x2 << 3)
+#define TSSET1_TSBSEL_MASK GENMASK(2, 1)
+#define TSSET1_TSBSEL_ISDBS (0x0 << 1)
+#define TSSET1_TSBSEL_ISDBT (0x1 << 1)
+#define TSSET1_TSBSEL_NONE (0x2 << 1)
+#define TSSET2 0x06
+#define TSSET3 0x07
+#define TSSET3_INTASEL_MASK GENMASK(7, 6)
+#define TSSET3_INTASEL_T (0x0 << 6)
+#define TSSET3_INTASEL_S (0x1 << 6)
+#define TSSET3_INTASEL_NONE (0x2 << 6)
+#define TSSET3_INTBSEL_MASK GENMASK(5, 4)
+#define TSSET3_INTBSEL_S (0x0 << 4)
+#define TSSET3_INTBSEL_T (0x1 << 4)
+#define TSSET3_INTBSEL_NONE (0x2 << 4)
+#define OUTSET2 0x0d
+#define PWDSET 0x0f
+#define PWDSET_OFDMPD_MASK GENMASK(3, 2)
+#define PWDSET_OFDMPD_DOWN BIT(3)
+#define PWDSET_PSKPD_MASK GENMASK(1, 0)
+#define PWDSET_PSKPD_DOWN BIT(1)
+#define CLKSET1_T 0x11
+#define MDSET_T 0x13
+#define MDSET_T_MDAUTO_MASK GENMASK(7, 4)
+#define MDSET_T_MDAUTO_AUTO (0xf << 4)
+#define MDSET_T_MDAUTO_MANUAL (0x0 << 4)
+#define MDSET_T_FFTS_MASK GENMASK(3, 2)
+#define MDSET_T_FFTS_MODE1 (0x0 << 2)
+#define MDSET_T_FFTS_MODE2 (0x1 << 2)
+#define MDSET_T_FFTS_MODE3 (0x2 << 2)
+#define MDSET_T_GI_MASK GENMASK(1, 0)
+#define MDSET_T_GI_1_32 (0x0 << 0)
+#define MDSET_T_GI_1_16 (0x1 << 0)
+#define MDSET_T_GI_1_8 (0x2 << 0)
+#define MDSET_T_GI_1_4 (0x3 << 0)
+#define MDASET_T 0x14
+#define ADCSET1_T 0x20
+#define ADCSET1_T_REFSEL_MASK GENMASK(1, 0)
+#define ADCSET1_T_REFSEL_2V (0x3 << 0)
+#define ADCSET1_T_REFSEL_1_5V (0x2 << 0)
+#define ADCSET1_T_REFSEL_1V (0x1 << 0)
+#define NCOFREQU_T 0x24
+#define NCOFREQM_T 0x25
+#define NCOFREQL_T 0x26
+#define FADU_T 0x27
+#define FADM_T 0x28
+#define FADL_T 0x29
+#define AGCSET2_T 0x2c
+#define AGCSET2_T_IFPOLINV_INC BIT(0)
+#define AGCSET2_T_RFPOLINV_INC BIT(1)
+#define AGCV3_T 0x3e
+#define MDRD_T 0xa2
+#define MDRD_T_SEGID_MASK GENMASK(5, 4)
+#define MDRD_T_SEGID_13 (0x0 << 4)
+#define MDRD_T_SEGID_1 (0x1 << 4)
+#define MDRD_T_SEGID_3 (0x2 << 4)
+#define MDRD_T_FFTS_MASK GENMASK(3, 2)
+#define MDRD_T_FFTS_MODE1 (0x0 << 2)
+#define MDRD_T_FFTS_MODE2 (0x1 << 2)
+#define MDRD_T_FFTS_MODE3 (0x2 << 2)
+#define MDRD_T_GI_MASK GENMASK(1, 0)
+#define MDRD_T_GI_1_32 (0x0 << 0)
+#define MDRD_T_GI_1_16 (0x1 << 0)
+#define MDRD_T_GI_1_8 (0x2 << 0)
+#define MDRD_T_GI_1_4 (0x3 << 0)
+#define SSEQRD_T 0xa3
+#define SSEQRD_T_SSEQSTRD_MASK GENMASK(3, 0)
+#define SSEQRD_T_SSEQSTRD_RESET (0x0 << 0)
+#define SSEQRD_T_SSEQSTRD_TUNING (0x1 << 0)
+#define SSEQRD_T_SSEQSTRD_AGC (0x2 << 0)
+#define SSEQRD_T_SSEQSTRD_SEARCH (0x3 << 0)
+#define SSEQRD_T_SSEQSTRD_CLOCK_SYNC (0x4 << 0)
+#define SSEQRD_T_SSEQSTRD_FREQ_SYNC (0x8 << 0)
+#define SSEQRD_T_SSEQSTRD_FRAME_SYNC (0x9 << 0)
+#define SSEQRD_T_SSEQSTRD_SYNC (0xa << 0)
+#define SSEQRD_T_SSEQSTRD_LOCK (0xb << 0)
+#define AGCRDU_T 0xa8
+#define AGCRDL_T 0xa9
+#define CNRDU_T 0xbe
+#define CNRDL_T 0xbf
+#define BERFLG_T 0xc0
+#define BERFLG_T_BERDRDY BIT(7)
+#define BERFLG_T_BERDCHK BIT(6)
+#define BERFLG_T_BERVRDYA BIT(5)
+#define BERFLG_T_BERVCHKA BIT(4)
+#define BERFLG_T_BERVRDYB BIT(3)
+#define BERFLG_T_BERVCHKB BIT(2)
+#define BERFLG_T_BERVRDYC BIT(1)
+#define BERFLG_T_BERVCHKC BIT(0)
+#define BERRDU_T 0xc1
+#define BERRDM_T 0xc2
+#define BERRDL_T 0xc3
+#define BERLENRDU_T 0xc4
+#define BERLENRDL_T 0xc5
+#define ERRFLG_T 0xc6
+#define ERRFLG_T_BERDOVF BIT(7)
+#define ERRFLG_T_BERVOVFA BIT(6)
+#define ERRFLG_T_BERVOVFB BIT(5)
+#define ERRFLG_T_BERVOVFC BIT(4)
+#define ERRFLG_T_NERRFA BIT(3)
+#define ERRFLG_T_NERRFB BIT(2)
+#define ERRFLG_T_NERRFC BIT(1)
+#define ERRFLG_T_NERRF BIT(0)
+#define DOSET1_T 0xcf
+
+#define CLK_LOW 4000000
+#define CLK_DIRECT 20200000
+#define CLK_MAX 25410000
+
+#define S_T_FREQ 8126984 /* 512 / 63 MHz */
+
+struct mn88443x_spec {
+ bool primary;
+};
+
+struct mn88443x_priv {
+ const struct mn88443x_spec *spec;
+
+ struct dvb_frontend fe;
+ struct clk *mclk;
+ struct gpio_desc *reset_gpio;
+ u32 clk_freq;
+ u32 if_freq;
+
+ /* Common */
+ bool use_clkbuf;
+
+ /* ISDB-S */
+ struct i2c_client *client_s;
+ struct regmap *regmap_s;
+
+ /* ISDB-T */
+ struct i2c_client *client_t;
+ struct regmap *regmap_t;
+};
+
+static void mn88443x_cmn_power_on(struct mn88443x_priv *chip)
+{
+ struct regmap *r_t = chip->regmap_t;
+
+ clk_prepare_enable(chip->mclk);
+
+ gpiod_set_value_cansleep(chip->reset_gpio, 1);
+ usleep_range(100, 1000);
+ gpiod_set_value_cansleep(chip->reset_gpio, 0);
+
+ if (chip->spec->primary) {
+ regmap_write(r_t, OUTCSET, OUTCSET_CHDRV_8MA);
+ regmap_write(r_t, PLDWSET, PLDWSET_NORMAL);
+ regmap_write(r_t, HIZSET1, 0x80);
+ regmap_write(r_t, HIZSET2, 0xe0);
+ } else {
+ regmap_write(r_t, HIZSET3, 0x8f);
+ }
+}
+
+static void mn88443x_cmn_power_off(struct mn88443x_priv *chip)
+{
+ gpiod_set_value_cansleep(chip->reset_gpio, 1);
+
+ clk_disable_unprepare(chip->mclk);
+}
+
+static void mn88443x_s_sleep(struct mn88443x_priv *chip)
+{
+ struct regmap *r_t = chip->regmap_t;
+
+ regmap_update_bits(r_t, PWDSET, PWDSET_PSKPD_MASK,
+ PWDSET_PSKPD_DOWN);
+}
+
+static void mn88443x_s_wake(struct mn88443x_priv *chip)
+{
+ struct regmap *r_t = chip->regmap_t;
+
+ regmap_update_bits(r_t, PWDSET, PWDSET_PSKPD_MASK, 0);
+}
+
+static void mn88443x_s_tune(struct mn88443x_priv *chip,
+ struct dtv_frontend_properties *c)
+{
+ struct regmap *r_s = chip->regmap_s;
+
+ regmap_write(r_s, ATSIDU_S, c->stream_id >> 8);
+ regmap_write(r_s, ATSIDL_S, c->stream_id);
+ regmap_write(r_s, TSSET_S, 0);
+}
+
+static int mn88443x_s_read_status(struct mn88443x_priv *chip,
+ struct dtv_frontend_properties *c,
+ enum fe_status *status)
+{
+ struct regmap *r_s = chip->regmap_s;
+ u32 cpmon, tmpu, tmpl, flg;
+ u64 tmp;
+
+ /* Sync detection */
+ regmap_read(r_s, CPMON1_S, &cpmon);
+
+ *status = 0;
+ if (cpmon & CPMON1_S_FSYNC)
+ *status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ if (cpmon & CPMON1_S_W2LOCK)
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
+
+ /* Signal strength */
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ if (*status & FE_HAS_SIGNAL) {
+ u32 agc;
+
+ regmap_read(r_s, AGCREAD_S, &tmpu);
+ agc = tmpu << 8;
+
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = agc;
+ }
+
+ /* C/N rate */
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ if (*status & FE_HAS_VITERBI) {
+ u32 cnr = 0, x, y, d;
+ u64 d_3 = 0;
+
+ regmap_read(r_s, CNRDXU_S, &tmpu);
+ regmap_read(r_s, CNRDXL_S, &tmpl);
+ x = (tmpu << 8) | tmpl;
+ regmap_read(r_s, CNRDYU_S, &tmpu);
+ regmap_read(r_s, CNRDYL_S, &tmpl);
+ y = (tmpu << 8) | tmpl;
+
+ /* CNR[dB]: 10 * log10(D) - 30.74 / D^3 - 3 */
+ /* D = x^2 / (2^15 * y - x^2) */
+ d = (y << 15) - x * x;
+ if (d > 0) {
+ /* (2^4 * D)^3 = 2^12 * D^3 */
+ /* 3.074 * 2^(12 + 24) = 211243671486 */
+ d_3 = div_u64(16 * x * x, d);
+ d_3 = d_3 * d_3 * d_3;
+ if (d_3)
+ d_3 = div_u64(211243671486ULL, d_3);
+ }
+
+ if (d_3) {
+ /* 0.3 * 2^24 = 5033164 */
+ tmp = (s64)2 * intlog10(x) - intlog10(abs(d)) - d_3
+ - 5033164;
+ cnr = div_u64(tmp * 10000, 1 << 24);
+ }
+
+ if (cnr) {
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].uvalue = cnr;
+ }
+ }
+
+ /* BER */
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ regmap_read(r_s, BERCNFLG_S, &flg);
+
+ if ((*status & FE_HAS_VITERBI) && (flg & BERCNFLG_S_BERVRDY)) {
+ u32 bit_err, bit_cnt;
+
+ regmap_read(r_s, BERVRDU_S, &tmpu);
+ regmap_read(r_s, BERVRDL_S, &tmpl);
+ bit_err = (tmpu << 8) | tmpl;
+ bit_cnt = (1 << 13) * 204;
+
+ if (bit_cnt) {
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = bit_err;
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue = bit_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static void mn88443x_t_sleep(struct mn88443x_priv *chip)
+{
+ struct regmap *r_t = chip->regmap_t;
+
+ regmap_update_bits(r_t, PWDSET, PWDSET_OFDMPD_MASK,
+ PWDSET_OFDMPD_DOWN);
+}
+
+static void mn88443x_t_wake(struct mn88443x_priv *chip)
+{
+ struct regmap *r_t = chip->regmap_t;
+
+ regmap_update_bits(r_t, PWDSET, PWDSET_OFDMPD_MASK, 0);
+}
+
+static bool mn88443x_t_is_valid_clk(u32 adckt, u32 if_freq)
+{
+ if (if_freq == DIRECT_IF_57MHZ) {
+ if (adckt >= CLK_DIRECT && adckt <= 21000000)
+ return true;
+ if (adckt >= 25300000 && adckt <= CLK_MAX)
+ return true;
+ } else if (if_freq == DIRECT_IF_44MHZ) {
+ if (adckt >= 25000000 && adckt <= CLK_MAX)
+ return true;
+ } else if (if_freq >= LOW_IF_4MHZ && if_freq < DIRECT_IF_44MHZ) {
+ if (adckt >= CLK_DIRECT && adckt <= CLK_MAX)
+ return true;
+ }
+
+ return false;
+}
+
+static int mn88443x_t_set_freq(struct mn88443x_priv *chip)
+{
+ struct device *dev = &chip->client_s->dev;
+ struct regmap *r_t = chip->regmap_t;
+ s64 adckt, nco, ad_t;
+ u32 m, v;
+
+ /* Clock buffer (but not supported) or XTAL */
+ if (chip->clk_freq >= CLK_LOW && chip->clk_freq < CLK_DIRECT) {
+ chip->use_clkbuf = true;
+ regmap_write(r_t, CLKSET1_T, 0x07);
+
+ adckt = 0;
+ } else {
+ chip->use_clkbuf = false;
+ regmap_write(r_t, CLKSET1_T, 0x00);
+
+ adckt = chip->clk_freq;
+ }
+ if (!mn88443x_t_is_valid_clk(adckt, chip->if_freq)) {
+ dev_err(dev, "Invalid clock, CLK:%d, ADCKT:%lld, IF:%d\n",
+ chip->clk_freq, adckt, chip->if_freq);
+ return -EINVAL;
+ }
+
+ /* Direct IF or Low IF */
+ if (chip->if_freq == DIRECT_IF_57MHZ ||
+ chip->if_freq == DIRECT_IF_44MHZ)
+ nco = adckt * 2 - chip->if_freq;
+ else
+ nco = -((s64)chip->if_freq);
+ nco = div_s64(nco << 24, adckt);
+ ad_t = div_s64(adckt << 22, S_T_FREQ);
+
+ regmap_write(r_t, NCOFREQU_T, nco >> 16);
+ regmap_write(r_t, NCOFREQM_T, nco >> 8);
+ regmap_write(r_t, NCOFREQL_T, nco);
+ regmap_write(r_t, FADU_T, ad_t >> 16);
+ regmap_write(r_t, FADM_T, ad_t >> 8);
+ regmap_write(r_t, FADL_T, ad_t);
+
+ /* Level of IF */
+ m = ADCSET1_T_REFSEL_MASK;
+ v = ADCSET1_T_REFSEL_1_5V;
+ regmap_update_bits(r_t, ADCSET1_T, m, v);
+
+ /* Polarity of AGC */
+ v = AGCSET2_T_IFPOLINV_INC | AGCSET2_T_RFPOLINV_INC;
+ regmap_update_bits(r_t, AGCSET2_T, v, v);
+
+ /* Lower output level of AGC */
+ regmap_write(r_t, AGCV3_T, 0x00);
+
+ regmap_write(r_t, MDSET_T, 0xfa);
+
+ return 0;
+}
+
+static void mn88443x_t_tune(struct mn88443x_priv *chip,
+ struct dtv_frontend_properties *c)
+{
+ struct regmap *r_t = chip->regmap_t;
+ u32 m, v;
+
+ m = MDSET_T_MDAUTO_MASK | MDSET_T_FFTS_MASK | MDSET_T_GI_MASK;
+ v = MDSET_T_MDAUTO_AUTO | MDSET_T_FFTS_MODE3 | MDSET_T_GI_1_8;
+ regmap_update_bits(r_t, MDSET_T, m, v);
+
+ regmap_write(r_t, MDASET_T, 0);
+}
+
+static int mn88443x_t_read_status(struct mn88443x_priv *chip,
+ struct dtv_frontend_properties *c,
+ enum fe_status *status)
+{
+ struct regmap *r_t = chip->regmap_t;
+ u32 seqrd, st, flg, tmpu, tmpm, tmpl;
+ u64 tmp;
+
+ /* Sync detection */
+ regmap_read(r_t, SSEQRD_T, &seqrd);
+ st = seqrd & SSEQRD_T_SSEQSTRD_MASK;
+
+ *status = 0;
+ if (st >= SSEQRD_T_SSEQSTRD_SYNC)
+ *status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ if (st >= SSEQRD_T_SSEQSTRD_FRAME_SYNC)
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
+
+ /* Signal strength */
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ if (*status & FE_HAS_SIGNAL) {
+ u32 agc;
+
+ regmap_read(r_t, AGCRDU_T, &tmpu);
+ regmap_read(r_t, AGCRDL_T, &tmpl);
+ agc = (tmpu << 8) | tmpl;
+
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = agc;
+ }
+
+ /* C/N rate */
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ if (*status & FE_HAS_VITERBI) {
+ u32 cnr;
+
+ regmap_read(r_t, CNRDU_T, &tmpu);
+ regmap_read(r_t, CNRDL_T, &tmpl);
+
+ if (tmpu || tmpl) {
+ /* CNR[dB]: 10 * (log10(65536 / value) + 0.2) */
+ /* intlog10(65536) = 80807124, 0.2 * 2^24 = 3355443 */
+ tmp = (u64)80807124 - intlog10((tmpu << 8) | tmpl)
+ + 3355443;
+ cnr = div_u64(tmp * 10000, 1 << 24);
+ } else {
+ cnr = 0;
+ }
+
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].uvalue = cnr;
+ }
+
+ /* BER */
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ regmap_read(r_t, BERFLG_T, &flg);
+
+ if ((*status & FE_HAS_VITERBI) && (flg & BERFLG_T_BERVRDYA)) {
+ u32 bit_err, bit_cnt;
+
+ regmap_read(r_t, BERRDU_T, &tmpu);
+ regmap_read(r_t, BERRDM_T, &tmpm);
+ regmap_read(r_t, BERRDL_T, &tmpl);
+ bit_err = (tmpu << 16) | (tmpm << 8) | tmpl;
+
+ regmap_read(r_t, BERLENRDU_T, &tmpu);
+ regmap_read(r_t, BERLENRDL_T, &tmpl);
+ bit_cnt = ((tmpu << 8) | tmpl) * 203 * 8;
+
+ if (bit_cnt) {
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = bit_err;
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue = bit_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static int mn88443x_sleep(struct dvb_frontend *fe)
+{
+ struct mn88443x_priv *chip = fe->demodulator_priv;
+
+ mn88443x_s_sleep(chip);
+ mn88443x_t_sleep(chip);
+
+ return 0;
+}
+
+static int mn88443x_set_frontend(struct dvb_frontend *fe)
+{
+ struct mn88443x_priv *chip = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct regmap *r_s = chip->regmap_s;
+ struct regmap *r_t = chip->regmap_t;
+ u8 tssel = 0, intsel = 0;
+
+ if (c->delivery_system == SYS_ISDBS) {
+ mn88443x_s_wake(chip);
+ mn88443x_t_sleep(chip);
+
+ tssel = TSSET1_TSASEL_ISDBS;
+ intsel = TSSET3_INTASEL_S;
+ } else if (c->delivery_system == SYS_ISDBT) {
+ mn88443x_s_sleep(chip);
+ mn88443x_t_wake(chip);
+
+ mn88443x_t_set_freq(chip);
+
+ tssel = TSSET1_TSASEL_ISDBT;
+ intsel = TSSET3_INTASEL_T;
+ }
+
+ regmap_update_bits(r_t, TSSET1,
+ TSSET1_TSASEL_MASK | TSSET1_TSBSEL_MASK,
+ tssel | TSSET1_TSBSEL_NONE);
+ regmap_write(r_t, TSSET2, 0);
+ regmap_update_bits(r_t, TSSET3,
+ TSSET3_INTASEL_MASK | TSSET3_INTBSEL_MASK,
+ intsel | TSSET3_INTBSEL_NONE);
+
+ regmap_write(r_t, DOSET1_T, 0x95);
+ regmap_write(r_s, DOSET1_S, 0x80);
+
+ if (c->delivery_system == SYS_ISDBS)
+ mn88443x_s_tune(chip, c);
+ else if (c->delivery_system == SYS_ISDBT)
+ mn88443x_t_tune(chip, c);
+
+ if (fe->ops.tuner_ops.set_params) {
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ fe->ops.tuner_ops.set_params(fe);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ }
+
+ return 0;
+}
+
+static int mn88443x_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *s)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ s->min_delay_ms = 850;
+
+ if (c->delivery_system == SYS_ISDBS) {
+ s->max_drift = 30000 * 2 + 1;
+ s->step_size = 30000;
+ } else if (c->delivery_system == SYS_ISDBT) {
+ s->max_drift = 142857 * 2 + 1;
+ s->step_size = 142857 * 2;
+ }
+
+ return 0;
+}
+
+static int mn88443x_read_status(struct dvb_frontend *fe, enum fe_status *status)
+{
+ struct mn88443x_priv *chip = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ if (c->delivery_system == SYS_ISDBS)
+ return mn88443x_s_read_status(chip, c, status);
+
+ if (c->delivery_system == SYS_ISDBT)
+ return mn88443x_t_read_status(chip, c, status);
+
+ return -EINVAL;
+}
+
+static const struct dvb_frontend_ops mn88443x_ops = {
+ .delsys = { SYS_ISDBS, SYS_ISDBT },
+ .info = {
+ .name = "Socionext MN88443x",
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 2071 * MHz,
+ .symbol_rate_min = 28860000,
+ .symbol_rate_max = 28860000,
+ .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_AUTO |
+ FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO,
+ },
+
+ .sleep = mn88443x_sleep,
+ .set_frontend = mn88443x_set_frontend,
+ .get_tune_settings = mn88443x_get_tune_settings,
+ .read_status = mn88443x_read_status,
+};
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_NONE,
+};
+
+static int mn88443x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mn88443x_config *conf = client->dev.platform_data;
+ struct mn88443x_priv *chip;
+ struct device *dev = &client->dev;
+ int ret;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ if (dev->of_node)
+ chip->spec = of_device_get_match_data(dev);
+ else
+ chip->spec = (struct mn88443x_spec *)id->driver_data;
+ if (!chip->spec)
+ return -EINVAL;
+
+ chip->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(chip->mclk) && !conf) {
+ dev_err(dev, "Failed to request mclk: %ld\n",
+ PTR_ERR(chip->mclk));
+ return PTR_ERR(chip->mclk);
+ }
+
+ ret = of_property_read_u32(dev->of_node, "if-frequency",
+ &chip->if_freq);
+ if (ret && !conf) {
+ dev_err(dev, "Failed to load IF frequency: %d.\n", ret);
+ return ret;
+ }
+
+ chip->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(chip->reset_gpio)) {
+ dev_err(dev, "Failed to request reset_gpio: %ld\n",
+ PTR_ERR(chip->reset_gpio));
+ return PTR_ERR(chip->reset_gpio);
+ }
+
+ if (conf) {
+ chip->mclk = conf->mclk;
+ chip->if_freq = conf->if_freq;
+ chip->reset_gpio = conf->reset_gpio;
+
+ *conf->fe = &chip->fe;
+ }
+
+ chip->client_s = client;
+ chip->regmap_s = devm_regmap_init_i2c(chip->client_s, &regmap_config);
+ if (IS_ERR(chip->regmap_s))
+ return PTR_ERR(chip->regmap_s);
+
+ /*
+ * Chip has two I2C addresses for each satellite/terrestrial system.
+ * ISDB-T uses address ISDB-S + 4, so we register a dummy client.
+ */
+ chip->client_t = i2c_new_dummy(client->adapter, client->addr + 4);
+ if (!chip->client_t)
+ return -ENODEV;
+
+ chip->regmap_t = devm_regmap_init_i2c(chip->client_t, &regmap_config);
+ if (IS_ERR(chip->regmap_t)) {
+ ret = PTR_ERR(chip->regmap_t);
+ goto err_i2c_t;
+ }
+
+ chip->clk_freq = clk_get_rate(chip->mclk);
+
+ memcpy(&chip->fe.ops, &mn88443x_ops, sizeof(mn88443x_ops));
+ chip->fe.demodulator_priv = chip;
+ i2c_set_clientdata(client, chip);
+
+ mn88443x_cmn_power_on(chip);
+ mn88443x_s_sleep(chip);
+ mn88443x_t_sleep(chip);
+
+ return 0;
+
+err_i2c_t:
+ i2c_unregister_device(chip->client_t);
+
+ return ret;
+}
+
+static int mn88443x_remove(struct i2c_client *client)
+{
+ struct mn88443x_priv *chip = i2c_get_clientdata(client);
+
+ mn88443x_cmn_power_off(chip);
+
+ i2c_unregister_device(chip->client_t);
+
+ return 0;
+}
+
+static const struct mn88443x_spec mn88443x_spec_pri = {
+ .primary = true,
+};
+
+static const struct mn88443x_spec mn88443x_spec_sec = {
+ .primary = false,
+};
+
+static const struct of_device_id mn88443x_of_match[] = {
+ { .compatible = "socionext,mn884433", .data = &mn88443x_spec_pri, },
+ { .compatible = "socionext,mn884434-0", .data = &mn88443x_spec_pri, },
+ { .compatible = "socionext,mn884434-1", .data = &mn88443x_spec_sec, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mn88443x_of_match);
+
+static const struct i2c_device_id mn88443x_i2c_id[] = {
+ { "mn884433", (kernel_ulong_t)&mn88443x_spec_pri },
+ { "mn884434-0", (kernel_ulong_t)&mn88443x_spec_pri },
+ { "mn884434-1", (kernel_ulong_t)&mn88443x_spec_sec },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mn88443x_i2c_id);
+
+static struct i2c_driver mn88443x_driver = {
+ .driver = {
+ .name = "mn88443x",
+ .of_match_table = of_match_ptr(mn88443x_of_match),
+ },
+ .probe = mn88443x_probe,
+ .remove = mn88443x_remove,
+ .id_table = mn88443x_i2c_id,
+};
+
+module_i2c_driver(mn88443x_driver);
+
+MODULE_AUTHOR("Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>");
+MODULE_DESCRIPTION("Socionext MN88443x series demodulator driver.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/dvb-frontends/mn88443x.h b/drivers/media/dvb-frontends/mn88443x.h
new file mode 100644
index 000000000000..b19aaf6a1ea3
--- /dev/null
+++ b/drivers/media/dvb-frontends/mn88443x.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Socionext MN88443x series demodulator driver for ISDB-S/ISDB-T.
+ *
+ * Copyright (c) 2018 Socionext Inc.
+ */
+
+#ifndef MN88443X_H
+#define MN88443X_H
+
+#include <media/dvb_frontend.h>
+
+/* ISDB-T IF frequency */
+#define DIRECT_IF_57MHZ 57000000
+#define DIRECT_IF_44MHZ 44000000
+#define LOW_IF_4MHZ 4000000
+
+struct mn88443x_config {
+ struct clk *mclk;
+ u32 if_freq;
+ struct gpio_desc *reset_gpio;
+
+ /* Everything after that is returned by the driver. */
+ struct dvb_frontend **fe;
+};
+
+#endif /* MN88443X_H */
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index e2a3fc521620..aad07adda37d 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -559,8 +559,8 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
dprintk("%s: Freq %d\n", __func__, p->frequency);
- if ((p->frequency < fe->ops.info.frequency_min)
- || (p->frequency > fe->ops.info.frequency_max))
+ if ((p->frequency < fe->ops.info.frequency_min_hz / kHz)
+ || (p->frequency > fe->ops.info.frequency_max_hz / kHz))
return -EINVAL;
if (((int)p->inversion < INVERSION_OFF)
@@ -755,10 +755,10 @@ static const struct dvb_frontend_ops mt312_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Zarlink ???? DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
/* FIXME: adjust freq to real used xtal */
- .frequency_stepsize = (MT312_PLL_CLK / 1000) / 128,
+ .frequency_stepsize_hz = MT312_PLL_CLK / 128,
.symbol_rate_min = MT312_SYS_CLK / 128, /* FIXME as above */
.symbol_rate_max = MT312_SYS_CLK / 2,
.caps =
diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
index a440b76444d3..da3e466d50e2 100644
--- a/drivers/media/dvb-frontends/mt352.c
+++ b/drivers/media/dvb-frontends/mt352.c
@@ -567,10 +567,9 @@ static const struct dvb_frontend_ops mt352_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Zarlink MT352 DVB-T",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 166667,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 274d8fca0763..295f37d5f10e 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -784,10 +784,8 @@ static struct dvb_frontend_ops mxl_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "MaxLinear MxL5xx DVB-S/S2 tuner-demodulator",
- .frequency_min = 300000,
- .frequency_max = 2350000,
- .frequency_stepsize = 0,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 300 * MHz,
+ .frequency_max_hz = 2350 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index a6cc4952eb74..0961e686ff68 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -1212,9 +1212,9 @@ static const struct dvb_frontend_ops nxt200x_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Nextwave NXT200X VSB/QAM frontend",
- .frequency_min = 54000000,
- .frequency_max = 860000000,
- .frequency_stepsize = 166666, /* stepsize is just a guess */
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_stepsize_hz = 166666, /* stepsize is just a guess */
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_8VSB | FE_CAN_QAM_64 | FE_CAN_QAM_256
diff --git a/drivers/media/dvb-frontends/nxt6000.c b/drivers/media/dvb-frontends/nxt6000.c
index 109a635d166a..72e447e8ba64 100644
--- a/drivers/media/dvb-frontends/nxt6000.c
+++ b/drivers/media/dvb-frontends/nxt6000.c
@@ -596,9 +596,9 @@ static const struct dvb_frontend_ops nxt6000_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "NxtWave NXT6000 DVB-T",
- .frequency_min = 0,
- .frequency_max = 863250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 0,
+ .frequency_max_hz = 863250 * kHz,
+ .frequency_stepsize_hz = 62500,
/*.frequency_tolerance = *//* FIXME: 12% of SR */
.symbol_rate_min = 0, /* FIXME */
.symbol_rate_max = 9360000, /* FIXME */
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index b4c9aadcb552..fc35f37eb3c0 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -583,9 +583,9 @@ static const struct dvb_frontend_ops or51132_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Oren OR51132 VSB/QAM Frontend",
- .frequency_min = 44000000,
- .frequency_max = 958000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 44 * MHz,
+ .frequency_max_hz = 958 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index b65ba34fd00a..a39bbd8ff1f0 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -530,10 +530,10 @@ struct dvb_frontend* or51211_attach(const struct or51211_config* config,
static const struct dvb_frontend_ops or51211_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
- .name = "Oren OR51211 VSB Frontend",
- .frequency_min = 44000000,
- .frequency_max = 958000000,
- .frequency_stepsize = 166666,
+ .name = "Oren OR51211 VSB Frontend",
+ .frequency_min_hz = 44 * MHz,
+ .frequency_max_hz = 958 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_8VSB
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 7bbfe11d11ed..c0659568471b 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -24,9 +24,9 @@ static int rtl2830_bulk_write(struct i2c_client *client, unsigned int reg,
struct rtl2830_dev *dev = i2c_get_clientdata(client);
int ret;
- i2c_lock_adapter(client->adapter);
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
ret = regmap_bulk_write(dev->regmap, reg, val, val_count);
- i2c_unlock_adapter(client->adapter);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
return ret;
}
@@ -36,9 +36,9 @@ static int rtl2830_update_bits(struct i2c_client *client, unsigned int reg,
struct rtl2830_dev *dev = i2c_get_clientdata(client);
int ret;
- i2c_lock_adapter(client->adapter);
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
ret = regmap_update_bits(dev->regmap, reg, mask, val);
- i2c_unlock_adapter(client->adapter);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
return ret;
}
@@ -48,9 +48,9 @@ static int rtl2830_bulk_read(struct i2c_client *client, unsigned int reg,
struct rtl2830_dev *dev = i2c_get_clientdata(client);
int ret;
- i2c_lock_adapter(client->adapter);
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
ret = regmap_bulk_read(dev->regmap, reg, val, val_count);
- i2c_unlock_adapter(client->adapter);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
return ret;
}
@@ -159,8 +159,8 @@ static int rtl2830_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
s->min_delay_ms = 500;
- s->step_size = fe->ops.info.frequency_stepsize * 2;
- s->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
+ s->step_size = fe->ops.info.frequency_stepsize_hz * 2;
+ s->max_drift = (fe->ops.info.frequency_stepsize_hz * 2) + 1;
return 0;
}
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index fa3b8169c1a5..2f1f5cbaf03c 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -408,8 +408,8 @@ static int rtl2832_get_tune_settings(struct dvb_frontend *fe,
dev_dbg(&client->dev, "\n");
s->min_delay_ms = 1000;
- s->step_size = fe->ops.info.frequency_stepsize * 2;
- s->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
+ s->step_size = fe->ops.info.frequency_stepsize_hz * 2;
+ s->max_drift = (fe->ops.info.frequency_stepsize_hz * 2) + 1;
return 0;
}
@@ -841,9 +841,9 @@ static const struct dvb_frontend_ops rtl2832_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Realtek RTL2832 (DVB-T)",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index c6e78d870ccd..d448d9d4879c 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -301,7 +301,7 @@ static int rtl2832_sdr_submit_urbs(struct rtl2832_sdr_dev *dev)
for (i = 0; i < dev->urbs_initialized; i++) {
dev_dbg(&pdev->dev, "submit urb=%d\n", i);
- ret = usb_submit_urb(dev->urb_list[i], GFP_ATOMIC);
+ ret = usb_submit_urb(dev->urb_list[i], GFP_KERNEL);
if (ret) {
dev_err(&pdev->dev,
"Could not submit urb no. %d - get them all back\n",
@@ -345,7 +345,7 @@ static int rtl2832_sdr_alloc_stream_bufs(struct rtl2832_sdr_dev *dev)
for (dev->buf_num = 0; dev->buf_num < MAX_BULK_BUFS; dev->buf_num++) {
dev->buf_list[dev->buf_num] = usb_alloc_coherent(dev->udev,
- BULK_BUFFER_SIZE, GFP_ATOMIC,
+ BULK_BUFFER_SIZE, GFP_KERNEL,
&dev->dma_addr[dev->buf_num]);
if (!dev->buf_list[dev->buf_num]) {
dev_dbg(&pdev->dev, "alloc buf=%d failed\n",
@@ -390,7 +390,7 @@ static int rtl2832_sdr_alloc_urbs(struct rtl2832_sdr_dev *dev)
/* allocate the URBs */
for (i = 0; i < MAX_BULK_BUFS; i++) {
dev_dbg(&pdev->dev, "alloc urb=%d\n", i);
- dev->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
+ dev->urb_list[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb_list[i]) {
for (j = 0; j < i; j++)
usb_free_urb(dev->urb_list[j]);
diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
index a23ba8727218..ceeb0c3551ce 100644
--- a/drivers/media/dvb-frontends/s5h1409.c
+++ b/drivers/media/dvb-frontends/s5h1409.c
@@ -999,9 +999,9 @@ static const struct dvb_frontend_ops s5h1409_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Samsung S5H1409 QAM/8VSB Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index af5962807f2c..98aeed1d2284 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -918,9 +918,9 @@ static const struct dvb_frontend_ops s5h1411_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Samsung S5H1411 QAM/8VSB Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
index 8b2222530227..a65cdf8e8cd9 100644
--- a/drivers/media/dvb-frontends/s5h1420.c
+++ b/drivers/media/dvb-frontends/s5h1420.c
@@ -934,10 +934,10 @@ static const struct dvb_frontend_ops s5h1420_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Samsung S5H1420/PnpNetwork PN1010 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125, /* kHz for QPSK frontends */
- .frequency_tolerance = 29500,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
+ .frequency_tolerance_hz = 29500 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
/* .symbol_rate_tolerance = ???,*/
diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c
index 740a60df0455..4dc3febc0e12 100644
--- a/drivers/media/dvb-frontends/s5h1432.c
+++ b/drivers/media/dvb-frontends/s5h1432.c
@@ -370,9 +370,9 @@ static const struct dvb_frontend_ops s5h1432_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Samsung s5h1432 DVB-T Frontend",
- .frequency_min = 177000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 177 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index 6c9015236655..79276871112a 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -510,15 +510,14 @@ static const struct dvb_frontend_ops s921_ops = {
/* Use dib8000 values per default */
.info = {
.name = "Sharp S921",
- .frequency_min = 470000000,
+ .frequency_min_hz = 470 * MHz,
/*
* Max should be 770MHz instead, according with Sharp docs,
* but Leadership doc says it works up to 806 MHz. This is
* required to get channel 69, used in Brazil
*/
- .frequency_max = 806000000,
- .frequency_tolerance = 0,
- .caps = FE_CAN_INVERSION_AUTO |
+ .frequency_max_hz = 806 * MHz,
+ .caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 2dd336f95cbf..feacd8da421d 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -1120,7 +1120,7 @@ static const struct dvb_frontend_ops si2165_ops = {
.symbol_rate_min = 1000000,
.symbol_rate_max = 7200000,
/* For DVB-T */
- .frequency_stepsize = 166667,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
index 9b32a1b3205e..8546a236d452 100644
--- a/drivers/media/dvb-frontends/si21xx.c
+++ b/drivers/media/dvb-frontends/si21xx.c
@@ -870,10 +870,9 @@ static const struct dvb_frontend_ops si21xx_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "SL SI21XX DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125, /* kHz for QPSK frontends */
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
index 1d57a20093fc..8d31cf3f4f07 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/media/dvb-frontends/sp8870.c
@@ -584,9 +584,9 @@ static const struct dvb_frontend_ops sp8870_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Spase SP8870 DVB-T",
- .frequency_min = 470000000,
- .frequency_max = 860000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 |
FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
index 57a0d0ae2b48..c02f50995df4 100644
--- a/drivers/media/dvb-frontends/sp887x.c
+++ b/drivers/media/dvb-frontends/sp887x.c
@@ -594,9 +594,9 @@ static const struct dvb_frontend_ops sp887x_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Spase SP887x DVB-T",
- .frequency_min = 50500000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 50500 * kHz,
+ .frequency_max_hz = 858000 * kHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 3c654ae16e78..874e9c9125d6 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -1584,10 +1584,8 @@ static const struct dvb_frontend_ops stb0899_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STB0899 Multistandard",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 0,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
.symbol_rate_min = 5000000,
.symbol_rate_max = 45000000,
diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c
index 69c03892f2da..786b9eccde00 100644
--- a/drivers/media/dvb-frontends/stb6000.c
+++ b/drivers/media/dvb-frontends/stb6000.c
@@ -188,8 +188,8 @@ static int stb6000_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops stb6000_tuner_ops = {
.info = {
.name = "ST STB6000",
- .frequency_min = 950000,
- .frequency_max = 2150000
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz
},
.release = stb6000_release,
.sleep = stb6000_sleep,
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index 3a851f524b16..30ac584dfab3 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -527,9 +527,8 @@ static int stb6100_set_params(struct dvb_frontend *fe)
static const struct dvb_tuner_ops stb6100_ops = {
.info = {
.name = "STB6100 Silicon Tuner",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.init = stb6100_init,
diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
index f947ed947aae..c9a9fa4e2c1b 100644
--- a/drivers/media/dvb-frontends/stv0288.c
+++ b/drivers/media/dvb-frontends/stv0288.c
@@ -533,10 +533,9 @@ static const struct dvb_frontend_ops stv0288_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "ST STV0288 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1000, /* kHz for QPSK frontends */
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
index b823c04e24d3..9a9915f71483 100644
--- a/drivers/media/dvb-frontends/stv0297.c
+++ b/drivers/media/dvb-frontends/stv0297.c
@@ -694,9 +694,9 @@ static const struct dvb_frontend_ops stv0297_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "ST STV0297 DVB-C",
- .frequency_min = 47000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 870000,
.symbol_rate_max = 11700000,
.caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index 633b90e6fe86..4f466394a16c 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -717,10 +717,9 @@ static const struct dvb_frontend_ops stv0299_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "ST STV0299 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125, /* kHz for QPSK frontends */
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 5435c908e298..5b91e740e135 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -1693,10 +1693,9 @@ static const struct dvb_frontend_ops stv0367ter_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "ST STV0367 DVB-T",
- .frequency_min = 47000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 15625,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 15625,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_FEC_AUTO |
@@ -2867,9 +2866,9 @@ static const struct dvb_frontend_ops stv0367cab_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "ST STV0367 DVB-C",
- .frequency_min = 47000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 870000,
.symbol_rate_max = 11700000,
.caps = 0x400 |/* FE_CAN_QAM_4 */
@@ -3273,10 +3272,9 @@ static const struct dvb_frontend_ops stv0367ddb_ops = {
.delsys = { SYS_DVBC_ANNEX_A, SYS_DVBT },
.info = {
.name = "ST STV0367 DDB DVB-C/T",
- .frequency_min = 47000000,
- .frequency_max = 865000000,
- .frequency_stepsize = 166667,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 865 * MHz,
+ .frequency_stepsize_hz = 166667,
.symbol_rate_min = 870000,
.symbol_rate_max = 11700000,
.caps = /* DVB-C */
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index 72f17b97ca04..254618a06140 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -1875,10 +1875,9 @@ static const struct dvb_frontend_ops stv0900_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STV0900 frontend",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500,
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 9133f65d4623..a0622bb71803 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -4905,10 +4905,8 @@ static const struct dvb_frontend_ops stv090x_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STV090x Multistandard",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 0,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/stv0910.c b/drivers/media/dvb-frontends/stv0910.c
index 41444fa1c0bb..4c86073f1a8d 100644
--- a/drivers/media/dvb-frontends/stv0910.c
+++ b/drivers/media/dvb-frontends/stv0910.c
@@ -682,8 +682,8 @@ static int get_bit_error_rate_s(struct stv *state, u32 *bernumerator,
return -EINVAL;
if ((regs[0] & 0x80) == 0) {
- state->last_berdenominator = 1 << ((state->berscale * 2) +
- 10 + 3);
+ state->last_berdenominator = 1ULL << ((state->berscale * 2) +
+ 10 + 3);
state->last_bernumerator = ((u32)(regs[0] & 0x7F) << 16) |
((u32)regs[1] << 8) | regs[2];
if (state->last_bernumerator < 256 && state->berscale < 6) {
@@ -1724,10 +1724,8 @@ static const struct dvb_frontend_ops stv0910_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "ST STV0910",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 0,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
.symbol_rate_min = 100000,
.symbol_rate_max = 70000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 6aad0efa3174..7db9a5bceccc 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -371,9 +371,9 @@ static int stv6110_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
static const struct dvb_tuner_ops stv6110_tuner_ops = {
.info = {
.name = "ST STV6110",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 1000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_step_hz = 1 * MHz,
},
.init = stv6110_init,
.release = stv6110_release,
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index d8950028d021..82c002d3833a 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -347,10 +347,9 @@ static void stv6110x_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops stv6110x_ops = {
.info = {
- .name = "STV6110(A) Silicon Tuner",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 0,
+ .name = "STV6110(A) Silicon Tuner",
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.release = stv6110x_release
};
diff --git a/drivers/media/dvb-frontends/stv6111.c b/drivers/media/dvb-frontends/stv6111.c
index 9b715b6fe152..0cf460111acb 100644
--- a/drivers/media/dvb-frontends/stv6111.c
+++ b/drivers/media/dvb-frontends/stv6111.c
@@ -646,9 +646,8 @@ static int get_rf_strength(struct dvb_frontend *fe, u16 *st)
static const struct dvb_tuner_ops tuner_ops = {
.info = {
.name = "ST STV6111",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 0
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.set_params = set_params,
.release = release,
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index 7abf6b0916ed..2ad81a438d6a 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -714,8 +714,8 @@ static const struct dvb_frontend_ops tc90522_ops_sat = {
.delsys = { SYS_ISDBS },
.info = {
.name = "Toshiba TC90522 ISDB-S module",
- .frequency_min = 950000,
- .frequency_max = 2150000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
.caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_AUTO |
FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO |
FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO,
@@ -734,9 +734,9 @@ static const struct dvb_frontend_ops tc90522_ops_ter = {
.delsys = { SYS_ISDBT },
.info = {
.name = "Toshiba TC90522 ISDB-T module",
- .frequency_min = 470000000,
- .frequency_max = 770000000,
- .frequency_stepsize = 142857,
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 770 * MHz,
+ .frequency_stepsize_hz = 142857,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c
index 4f588ebde39d..5cd885d4ea04 100644
--- a/drivers/media/dvb-frontends/tda10021.c
+++ b/drivers/media/dvb-frontends/tda10021.c
@@ -487,11 +487,11 @@ static const struct dvb_frontend_ops tda10021_ops = {
.delsys = { SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C },
.info = {
.name = "Philips TDA10021 DVB-C",
- .frequency_stepsize = 62500,
- .frequency_min = 47000000,
- .frequency_max = 862000000,
- .symbol_rate_min = (XIN/2)/64, /* SACLK/64 == (XIN/2)/64 */
- .symbol_rate_max = (XIN/2)/4, /* SACLK/4 */
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 62500,
+ .symbol_rate_min = (XIN / 2) / 64, /* SACLK/64 == (XIN/2)/64 */
+ .symbol_rate_max = (XIN / 2) / 4, /* SACLK/4 */
#if 0
.frequency_tolerance = ???,
.symbol_rate_tolerance = ???, /* ppm */ /* == 8% (spec p. 5) */
diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c
index 6c84916234e3..0a9a54563ebe 100644
--- a/drivers/media/dvb-frontends/tda10023.c
+++ b/drivers/media/dvb-frontends/tda10023.c
@@ -575,9 +575,9 @@ static const struct dvb_frontend_ops tda10023_ops = {
.delsys = { SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C },
.info = {
.name = "Philips TDA10023 DVB-C",
- .frequency_stepsize = 62500,
- .frequency_min = 47000000,
- .frequency_max = 862000000,
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 0, /* set in tda10023_attach */
.symbol_rate_max = 0, /* set in tda10023_attach */
.caps = 0x400 | //FE_CAN_QAM_4
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index de82a2558e15..c01d60a88af2 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -1156,9 +1156,9 @@ static const struct dvb_frontend_ops tda10048_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "NXP TDA10048HN DVB-T",
- .frequency_min = 177000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 177 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index 7dcfb4a4b2d0..e506f66657bb 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -329,7 +329,7 @@ static int tda1004x_do_upload(struct tda1004x_state *state,
tda1004x_write_byteI(state, dspCodeCounterReg, 0);
fw_msg.addr = state->config->demod_address;
- i2c_lock_adapter(state->i2c);
+ i2c_lock_bus(state->i2c, I2C_LOCK_SEGMENT);
buf[0] = dspCodeInReg;
while (pos != len) {
// work out how much to send this time
@@ -342,14 +342,14 @@ static int tda1004x_do_upload(struct tda1004x_state *state,
fw_msg.len = tx_size + 1;
if (__i2c_transfer(state->i2c, &fw_msg, 1) != 1) {
printk(KERN_ERR "tda1004x: Error during firmware upload\n");
- i2c_unlock_adapter(state->i2c);
+ i2c_unlock_bus(state->i2c, I2C_LOCK_SEGMENT);
return -EIO;
}
pos += tx_size;
dprintk("%s: fw_pos=0x%x\n", __func__, pos);
}
- i2c_unlock_adapter(state->i2c);
+ i2c_unlock_bus(state->i2c, I2C_LOCK_SEGMENT);
/* give the DSP a chance to settle 03/10/05 Hac */
msleep(100);
@@ -1249,9 +1249,9 @@ static const struct dvb_frontend_ops tda10045_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Philips TDA10045H DVB-T",
- .frequency_min = 51000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps =
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
@@ -1319,9 +1319,9 @@ static const struct dvb_frontend_ops tda10046_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Philips TDA10046H DVB-T",
- .frequency_min = 51000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps =
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 1ed67c08e699..097c42d3f8c2 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -681,8 +681,8 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
cmd.args[5] = (c->frequency >> 0) & 0xff;
cmd.args[6] = ((c->symbol_rate / 1000) >> 8) & 0xff;
cmd.args[7] = ((c->symbol_rate / 1000) >> 0) & 0xff;
- cmd.args[8] = (tda10071_ops.info.frequency_tolerance >> 8) & 0xff;
- cmd.args[9] = (tda10071_ops.info.frequency_tolerance >> 0) & 0xff;
+ cmd.args[8] = ((tda10071_ops.info.frequency_tolerance_hz / 1000) >> 8) & 0xff;
+ cmd.args[9] = ((tda10071_ops.info.frequency_tolerance_hz / 1000) >> 0) & 0xff;
cmd.args[10] = rolloff;
cmd.args[11] = inversion;
cmd.args[12] = pilot;
@@ -1106,9 +1106,9 @@ static const struct dvb_frontend_ops tda10071_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "NXP TDA10071",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
index 1a95c521e97f..8323e4e53d66 100644
--- a/drivers/media/dvb-frontends/tda10086.c
+++ b/drivers/media/dvb-frontends/tda10086.c
@@ -710,9 +710,9 @@ static const struct dvb_frontend_ops tda10086_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Philips TDA10086 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125, /* kHz for QPSK frontends */
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index 2e1d36ae943b..5ce58612315d 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -1154,6 +1154,7 @@ static int set_params(struct dvb_frontend *fe)
default:
return -EINVAL;
}
+ break;
case SYS_DVBC_ANNEX_A:
case SYS_DVBC_ANNEX_C:
if (bw <= 6000000)
@@ -1214,9 +1215,9 @@ static int get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
static const struct dvb_tuner_ops tuner_ops = {
.info = {
.name = "NXP TDA18271C2D",
- .frequency_min = 47125000,
- .frequency_max = 865000000,
- .frequency_step = 62500
+ .frequency_min_hz = 47125 * kHz,
+ .frequency_max_hz = 865 * MHz,
+ .frequency_step_hz = 62500
},
.init = init,
.sleep = sleep,
diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
index 3ef7140ed7f3..8766c9ff6680 100644
--- a/drivers/media/dvb-frontends/tda665x.c
+++ b/drivers/media/dvb-frontends/tda665x.c
@@ -231,9 +231,9 @@ struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
info = &fe->ops.tuner_ops.info;
memcpy(info->name, config->name, sizeof(config->name));
- info->frequency_min = config->frequency_min;
- info->frequency_max = config->frequency_max;
- info->frequency_step = config->frequency_offst;
+ info->frequency_min_hz = config->frequency_min;
+ info->frequency_max_hz = config->frequency_max;
+ info->frequency_step_hz = config->frequency_offst;
printk(KERN_DEBUG "%s: Attaching TDA665x (%s) tuner\n", __func__, info->name);
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index 29b4f64c030c..53b26060db7e 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -453,10 +453,9 @@ static const struct dvb_frontend_ops tda8083_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Philips TDA8083 DVB-S",
- .frequency_min = 920000, /* TDA8060 */
- .frequency_max = 2200000, /* TDA8060 */
- .frequency_stepsize = 125, /* kHz for QPSK frontends */
- /* .frequency_tolerance = ???,*/
+ .frequency_min_hz = 920 * MHz, /* TDA8060 */
+ .frequency_max_hz = 2200 * MHz, /* TDA8060 */
+ .frequency_stepsize_hz = 125 * kHz,
.symbol_rate_min = 12000000,
.symbol_rate_max = 30000000,
/* .symbol_rate_tolerance = ???,*/
diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c
index f72a54e7eb23..500f50b81b66 100644
--- a/drivers/media/dvb-frontends/tda8261.c
+++ b/drivers/media/dvb-frontends/tda8261.c
@@ -163,10 +163,9 @@ static void tda8261_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops tda8261_ops = {
.info = {
- .name = "TDA8261",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 0
+ .name = "TDA8261",
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.set_params = tda8261_set_params,
@@ -190,7 +189,7 @@ struct dvb_frontend *tda8261_attach(struct dvb_frontend *fe,
fe->tuner_priv = state;
fe->ops.tuner_ops = tda8261_ops;
- fe->ops.tuner_ops.info.frequency_step = div_tab[config->step_size];
+ fe->ops.tuner_ops.info.frequency_step_hz = div_tab[config->step_size] * kHz;
pr_info("%s: Attaching TDA8261 8PSK/QPSK tuner\n", __func__);
diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c
index da427b4c2aaa..100da5d5fdc5 100644
--- a/drivers/media/dvb-frontends/tda826x.c
+++ b/drivers/media/dvb-frontends/tda826x.c
@@ -131,8 +131,8 @@ static int tda826x_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops tda826x_tuner_ops = {
.info = {
.name = "Philips TDA826X",
- .frequency_min = 950000,
- .frequency_max = 2175000
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2175 * MHz
},
.release = tda826x_release,
.sleep = tda826x_sleep,
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index c55882a8da06..3e3e40878633 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -498,8 +498,8 @@ static int ts2020_read_signal_strength(struct dvb_frontend *fe,
static const struct dvb_tuner_ops ts2020_tuner_ops = {
.info = {
.name = "TS2020",
- .frequency_min = 950000,
- .frequency_max = 2150000
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz
},
.init = ts2020_init,
.release = ts2020_release,
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 1d41abd47f04..b233b7be0b84 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -155,9 +155,9 @@ static int tua6100_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops tua6100_tuner_ops = {
.info = {
.name = "Infineon TUA6100",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 1000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_step_hz = 1 * MHz,
},
.release = tua6100_release,
.sleep = tua6100_sleep,
diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c
index 17600989f121..eb1249d81310 100644
--- a/drivers/media/dvb-frontends/ves1820.c
+++ b/drivers/media/dvb-frontends/ves1820.c
@@ -412,9 +412,9 @@ static const struct dvb_frontend_ops ves1820_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "VLSI VES1820 DVB-C",
- .frequency_stepsize = 62500,
- .frequency_min = 47000000,
- .frequency_max = 862000000,
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_16 |
FE_CAN_QAM_32 |
FE_CAN_QAM_64 |
diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c
index 0c7b3286b04d..ddc5bfd84cd5 100644
--- a/drivers/media/dvb-frontends/ves1x93.c
+++ b/drivers/media/dvb-frontends/ves1x93.c
@@ -516,10 +516,10 @@ static const struct dvb_frontend_ops ves1x93_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "VLSI VES1x93 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125, /* kHz for QPSK frontends */
- .frequency_tolerance = 29500,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
+ .frequency_tolerance_hz = 29500 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
/* .symbol_rate_tolerance = ???,*/
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index 89dd65ae88ad..f1c92338015d 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -311,8 +311,8 @@ static int zl10036_set_params(struct dvb_frontend *fe)
/* ensure correct values
* maybe redundant as core already checks this */
- if ((frequency < fe->ops.info.frequency_min)
- || (frequency > fe->ops.info.frequency_max))
+ if ((frequency < fe->ops.info.frequency_min_hz / kHz)
+ || (frequency > fe->ops.info.frequency_max_hz / kHz))
return -EINVAL;
/*
@@ -443,8 +443,8 @@ static int zl10036_init(struct dvb_frontend *fe)
static const struct dvb_tuner_ops zl10036_tuner_ops = {
.info = {
.name = "Zarlink ZL10036",
- .frequency_min = 950000,
- .frequency_max = 2175000
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2175 * MHz
},
.init = zl10036_init,
.release = zl10036_release,
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index c9901f45deb7..42e63a3fa121 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -635,10 +635,9 @@ static const struct dvb_frontend_ops zl10353_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Zarlink ZL10353 DVB-T",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 166667,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_FEC_AUTO |
diff --git a/drivers/media/firewire/firedtv-fe.c b/drivers/media/firewire/firedtv-fe.c
index a2ef4ede8ebe..69087ae6c1d0 100644
--- a/drivers/media/firewire/firedtv-fe.c
+++ b/drivers/media/firewire/firedtv-fe.c
@@ -152,7 +152,7 @@ static int fdtv_set_frontend(struct dvb_frontend *fe)
void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
{
struct dvb_frontend_ops *ops = &fdtv->fe.ops;
- struct dvb_frontend_info *fi = &ops->info;
+ struct dvb_frontend_internal_info *fi = &ops->info;
ops->init = fdtv_dvb_init;
ops->sleep = fdtv_sleep;
@@ -174,9 +174,9 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
case FIREDTV_DVB_S:
ops->delsys[0] = SYS_DVBS;
- fi->frequency_min = 950000;
- fi->frequency_max = 2150000;
- fi->frequency_stepsize = 125;
+ fi->frequency_min_hz = 950 * MHz;
+ fi->frequency_max_hz = 2150 * MHz;
+ fi->frequency_stepsize_hz = 125 * kHz;
fi->symbol_rate_min = 1000000;
fi->symbol_rate_max = 40000000;
@@ -194,9 +194,9 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
ops->delsys[0] = SYS_DVBS;
ops->delsys[1] = SYS_DVBS2;
- fi->frequency_min = 950000;
- fi->frequency_max = 2150000;
- fi->frequency_stepsize = 125;
+ fi->frequency_min_hz = 950 * MHz;
+ fi->frequency_max_hz = 2150 * MHz;
+ fi->frequency_stepsize_hz = 125 * kHz;
fi->symbol_rate_min = 1000000;
fi->symbol_rate_max = 40000000;
@@ -214,9 +214,9 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
case FIREDTV_DVB_C:
ops->delsys[0] = SYS_DVBC_ANNEX_A;
- fi->frequency_min = 47000000;
- fi->frequency_max = 866000000;
- fi->frequency_stepsize = 62500;
+ fi->frequency_min_hz = 47 * MHz;
+ fi->frequency_max_hz = 866 * MHz;
+ fi->frequency_stepsize_hz = 62500;
fi->symbol_rate_min = 870000;
fi->symbol_rate_max = 6900000;
@@ -232,9 +232,9 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
case FIREDTV_DVB_T:
ops->delsys[0] = SYS_DVBT;
- fi->frequency_min = 49000000;
- fi->frequency_max = 861000000;
- fi->frequency_stepsize = 62500;
+ fi->frequency_min_hz = 49 * MHz;
+ fi->frequency_max_hz = 861 * MHz;
+ fi->frequency_stepsize_hz = 62500;
fi->caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_2_3 |
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 341452fe98df..82af97430e5b 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -326,6 +326,16 @@ config VIDEO_AD5820
This is a driver for the AD5820 camera lens voice coil.
It is used for example in Nokia N900 (RX-51).
+config VIDEO_AK7375
+ tristate "AK7375 lens voice coil support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2_SUBDEV_API
+ help
+ This is a driver for the AK7375 camera lens voice coil.
+ AK7375 is a 12 bit DAC with 120mA output current sink
+ capability. This is designed for linear control of
+ voice coil motors, controlled via I2C serial interface.
+
config VIDEO_DW9714
tristate "DW9714 lens voice coil support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
@@ -336,6 +346,16 @@ config VIDEO_DW9714
capability. This is designed for linear control of
voice coil motors, controlled via I2C serial interface.
+config VIDEO_DW9807_VCM
+ tristate "DW9807 lens voice coil support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a driver for the DW9807 camera lens voice coil.
+ DW9807 is a 10 bit DAC with 100mA output current sink
+ capability. This is designed for linear control of
+ voice coil motors, controlled via I2C serial interface.
+
config VIDEO_SAA7110
tristate "Philips SAA7110 video decoder"
depends on VIDEO_V4L2 && I2C
@@ -378,7 +398,7 @@ config VIDEO_TVP514X
depends on VIDEO_V4L2 && I2C
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the TI TVP5146/47
+ This is a Video4Linux2 sensor driver for the TI TVP5146/47
decoder. It is currently working with the TI OMAP3 camera
controller.
@@ -580,7 +600,7 @@ config VIDEO_IMX258
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the Sony
+ This is a Video4Linux2 sensor driver for the Sony
IMX258 camera.
To compile this driver as a module, choose M here: the
@@ -591,7 +611,7 @@ config VIDEO_IMX274
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a V4L2 sensor-level driver for the Sony IMX274
+ This is a V4L2 sensor driver for the Sony IMX274
CMOS image sensor.
config VIDEO_OV2640
@@ -599,7 +619,7 @@ config VIDEO_OV2640
depends on VIDEO_V4L2 && I2C
depends on MEDIA_CAMERA_SUPPORT
help
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV2640 camera.
To compile this driver as a module, choose M here: the
@@ -611,19 +631,31 @@ config VIDEO_OV2659
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV2659 camera.
To compile this driver as a module, choose M here: the
module will be called ov2659.
+config VIDEO_OV2680
+ tristate "OmniVision OV2680 sensor support"
+ depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
+ depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
+ ---help---
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV2680 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov2680.
+
config VIDEO_OV2685
tristate "OmniVision OV2685 sensor support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV2685 camera.
To compile this driver as a module, choose M here: the
@@ -636,7 +668,7 @@ config VIDEO_OV5640
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the Omnivision
+ This is a Video4Linux2 sensor driver for the Omnivision
OV5640 camera sensor with a MIPI CSI-2 interface.
config VIDEO_OV5645
@@ -646,7 +678,7 @@ config VIDEO_OV5645
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV5645 camera.
To compile this driver as a module, choose M here: the
@@ -658,7 +690,7 @@ config VIDEO_OV5647
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV5647 camera.
To compile this driver as a module, choose M here: the
@@ -669,7 +701,7 @@ config VIDEO_OV6650
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV6650 camera.
To compile this driver as a module, choose M here: the
@@ -682,7 +714,7 @@ config VIDEO_OV5670
depends on MEDIA_CONTROLLER
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV5670 camera.
To compile this driver as a module, choose M here: the
@@ -693,7 +725,7 @@ config VIDEO_OV5695
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV5695 camera.
To compile this driver as a module, choose M here: the
@@ -705,7 +737,7 @@ config VIDEO_OV7251
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV7251 camera.
To compile this driver as a module, choose M here: the
@@ -716,7 +748,7 @@ config VIDEO_OV772X
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV772x camera.
To compile this driver as a module, choose M here: the
@@ -727,7 +759,7 @@ config VIDEO_OV7640
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV7640 camera.
To compile this driver as a module, choose M here: the
@@ -739,7 +771,7 @@ config VIDEO_OV7670
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV7670 VGA camera. It currently only works with the M88ALP01
controller.
@@ -748,14 +780,14 @@ config VIDEO_OV7740
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV7740 VGA camera sensor.
config VIDEO_OV9650
tristate "OmniVision OV9650/OV9652 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
---help---
- This is a V4L2 sensor-level driver for the Omnivision
+ This is a V4L2 sensor driver for the Omnivision
OV9650 and OV9652 camera sensors.
config VIDEO_OV13858
@@ -764,7 +796,7 @@ config VIDEO_OV13858
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV13858 camera.
config VIDEO_VS6624
@@ -772,7 +804,7 @@ config VIDEO_VS6624
depends on VIDEO_V4L2 && I2C
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the ST VS6624
+ This is a Video4Linux2 sensor driver for the ST VS6624
camera.
To compile this driver as a module, choose M here: the
@@ -800,7 +832,7 @@ config VIDEO_MT9P031
depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
---help---
- This is a Video4Linux2 sensor-level driver for the Aptina
+ This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt9p031 5 Mpixel camera.
config VIDEO_MT9T001
@@ -808,7 +840,7 @@ config VIDEO_MT9T001
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the Aptina
+ This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt0t001 3 Mpixel camera.
config VIDEO_MT9T112
@@ -816,7 +848,7 @@ config VIDEO_MT9T112
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the Aptina
+ This is a Video4Linux2 sensor driver for the Aptina
(Micron) MT9T111 and MT9T112 3 Mpixel camera.
To compile this driver as a module, choose M here: the
@@ -827,7 +859,7 @@ config VIDEO_MT9V011
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the Micron
+ This is a Video4Linux2 sensor driver for the Micron
mt0v011 1.3 Mpixel camera. It currently only works with the
em28xx driver.
@@ -838,9 +870,20 @@ config VIDEO_MT9V032
select REGMAP_I2C
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the Micron
+ This is a Video4Linux2 sensor driver for the Micron
MT9V032 752x480 CMOS sensor.
+config VIDEO_MT9V111
+ tristate "Aptina MT9V111 sensor support"
+ depends on I2C && VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
+ help
+ This is a Video4Linux2 sensor driver for the Aptina/Micron
+ MT9V111 sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mt9v111.
+
config VIDEO_SR030PC30
tristate "Siliconfile SR030PC30 sensor support"
depends on I2C && VIDEO_V4L2
@@ -857,12 +900,23 @@ config VIDEO_NOON010PC30
source "drivers/media/i2c/m5mols/Kconfig"
+config VIDEO_RJ54N1
+ tristate "Sharp RJ54N1CB0C sensor support"
+ depends on I2C && VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
+ help
+ This is a V4L2 sensor driver for Sharp RJ54N1CB0C CMOS image
+ sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rj54n1.
+
config VIDEO_S5K6AA
tristate "Samsung S5K6AAFX sensor support"
depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
---help---
- This is a V4L2 sensor-level driver for Samsung S5K6AA(FX) 1.3M
+ This is a V4L2 sensor driver for Samsung S5K6AA(FX) 1.3M
camera sensor with an embedded SoC image signal processor.
config VIDEO_S5K6A3
@@ -870,7 +924,7 @@ config VIDEO_S5K6A3
depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
---help---
- This is a V4L2 sensor-level driver for Samsung S5K6A3 raw
+ This is a V4L2 sensor driver for Samsung S5K6A3 raw
camera sensor.
config VIDEO_S5K4ECGX
@@ -878,7 +932,7 @@ config VIDEO_S5K4ECGX
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select CRC32
---help---
- This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
+ This is a V4L2 sensor driver for Samsung S5K4ECGX 5M
camera sensor with an embedded SoC image signal processor.
config VIDEO_S5K5BAF
@@ -886,7 +940,7 @@ config VIDEO_S5K5BAF
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
---help---
- This is a V4L2 sensor-level driver for Samsung S5K5BAF 2M
+ This is a V4L2 sensor driver for Samsung S5K5BAF 2M
camera sensor with an embedded SoC image signal processor.
source "drivers/media/i2c/smiapp/Kconfig"
@@ -897,7 +951,7 @@ config VIDEO_S5C73M3
depends on I2C && SPI && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
---help---
- This is a V4L2 sensor-level driver for Samsung S5C73M3
+ This is a V4L2 sensor driver for Samsung S5C73M3
8 Mpixel camera.
comment "Flash devices"
@@ -1002,6 +1056,7 @@ config VIDEO_I2C
tristate "I2C transport video support"
depends on VIDEO_V4L2 && I2C
select VIDEOBUF2_VMALLOC
+ imply HWMON
---help---
Enable the I2C transport video support which supports the
following:
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index d679d57cd3b3..a94eb03d10d4 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -23,7 +23,9 @@ obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
obj-$(CONFIG_VIDEO_SAA6752HS) += saa6752hs.o
obj-$(CONFIG_VIDEO_AD5820) += ad5820.o
+obj-$(CONFIG_VIDEO_AK7375) += ak7375.o
obj-$(CONFIG_VIDEO_DW9714) += dw9714.o
+obj-$(CONFIG_VIDEO_DW9807_VCM) += dw9807-vcm.o
obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
@@ -63,6 +65,7 @@ obj-$(CONFIG_VIDEO_SONY_BTF_MPX) += sony-btf-mpx.o
obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_VIDEO_OV2640) += ov2640.o
+obj-$(CONFIG_VIDEO_OV2680) += ov2680.o
obj-$(CONFIG_VIDEO_OV2685) += ov2685.o
obj-$(CONFIG_VIDEO_OV5640) += ov5640.o
obj-$(CONFIG_VIDEO_OV5645) += ov5645.o
@@ -84,8 +87,10 @@ obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
obj-$(CONFIG_VIDEO_MT9T112) += mt9t112.o
obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
+obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o
obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
+obj-$(CONFIG_VIDEO_RJ54N1) += rj54n1cb0c.o
obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
obj-$(CONFIG_VIDEO_S5K6A3) += s5k6a3.o
obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 91ff06088572..5b008b0002c0 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -1134,6 +1134,7 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
goto err_hdl;
}
state->pad.flags = MEDIA_PAD_FL_SINK;
+ sd->entity.function = MEDIA_ENT_F_DV_ENCODER;
err = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (err)
goto err_hdl;
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 25d24a3f10a7..de10367d550b 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -461,6 +461,22 @@ static int adv7180_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
return 0;
}
+static int adv7180_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct adv7180_state *state = to_state(sd);
+
+ if (state->curr_norm & V4L2_STD_525_60) {
+ fi->interval.numerator = 1001;
+ fi->interval.denominator = 30000;
+ } else {
+ fi->interval.numerator = 1;
+ fi->interval.denominator = 25;
+ }
+
+ return 0;
+}
+
static void adv7180_set_power_pin(struct adv7180_state *state, bool on)
{
if (!state->pwdn_gpio)
@@ -644,6 +660,9 @@ static int adv7180_mbus_fmt(struct v4l2_subdev *sd,
fmt->width = 720;
fmt->height = state->curr_norm & V4L2_STD_525_60 ? 480 : 576;
+ if (state->field == V4L2_FIELD_ALTERNATE)
+ fmt->height /= 2;
+
return 0;
}
@@ -711,11 +730,11 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
switch (format->format.field) {
case V4L2_FIELD_NONE:
- if (!(state->chip_info->flags & ADV7180_FLAG_I2P))
- format->format.field = V4L2_FIELD_INTERLACED;
- break;
+ if (state->chip_info->flags & ADV7180_FLAG_I2P)
+ break;
+ /* fall through */
default:
- format->format.field = V4L2_FIELD_INTERLACED;
+ format->format.field = V4L2_FIELD_ALTERNATE;
break;
}
@@ -817,6 +836,7 @@ static int adv7180_subscribe_event(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.s_std = adv7180_s_std,
.g_std = adv7180_g_std,
+ .g_frame_interval = adv7180_g_frame_interval,
.querystd = adv7180_querystd,
.g_input_status = adv7180_g_input_status,
.s_routing = adv7180_s_routing,
@@ -1291,7 +1311,7 @@ static int adv7180_probe(struct i2c_client *client,
return -ENOMEM;
state->client = client;
- state->field = V4L2_FIELD_INTERLACED;
+ state->field = V4L2_FIELD_ALTERNATE;
state->chip_info = (struct adv7180_chip_info *)id->driver_data;
state->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
@@ -1335,7 +1355,7 @@ static int adv7180_probe(struct i2c_client *client,
goto err_unregister_vpp_client;
state->pad.flags = MEDIA_PAD_FL_SOURCE;
- sd->entity.flags |= MEDIA_ENT_F_ATV_DECODER;
+ sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
ret = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (ret)
goto err_free_ctrl;
diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c
index 820b44ed56a8..469be87a3761 100644
--- a/drivers/media/i2c/adv748x/adv748x-csi2.c
+++ b/drivers/media/i2c/adv748x/adv748x-csi2.c
@@ -284,7 +284,7 @@ int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx)
adv748x_csi2_set_virtual_channel(tx, 0);
adv748x_subdev_init(&tx->sd, state, &adv748x_csi2_ops,
- MEDIA_ENT_F_UNKNOWN,
+ MEDIA_ENT_F_VID_IF_BRIDGE,
is_txa(tx) ? "txa" : "txb");
/* Ensure that matching is based upon the endpoint fwnodes */
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 5731751d3f2a..55c2ea0720d9 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1847,6 +1847,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
goto err_hdl;
}
state->pad.flags = MEDIA_PAD_FL_SINK;
+ sd->entity.function = MEDIA_ENT_F_DV_ENCODER;
err = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (err)
goto err_hdl;
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index cac2081e876e..668be2bca57a 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -3108,12 +3108,9 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
return -EINVAL;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), &bus_cfg);
- if (ret) {
- of_node_put(endpoint);
- return ret;
- }
-
of_node_put(endpoint);
+ if (ret)
+ return ret;
if (!of_property_read_u32(np, "default-input", &v))
state->pdata.default_input = v;
@@ -3502,6 +3499,7 @@ static int adv76xx_probe(struct i2c_client *client,
for (i = 0; i < state->source_pad; ++i)
state->pads[i].flags = MEDIA_PAD_FL_SINK;
state->pads[state->source_pad].flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.function = MEDIA_ENT_F_DV_DECODER;
err = media_entity_pads_init(&sd->entity, state->source_pad + 1,
state->pads);
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index fddac32e5051..4f8fbdd00e35 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3102,7 +3102,7 @@ static int adv7842_ddr_ram_test(struct v4l2_subdev *sd)
sdp_write(sd, 0x12, 0x00); /* Disable 3D comb, Frame TBC & 3DNR */
io_write(sd, 0xFF, 0x04); /* Reset memory controller */
- mdelay(5);
+ usleep_range(5000, 6000);
sdp_write(sd, 0x12, 0x00); /* Disable 3D Comb, Frame TBC & 3DNR */
sdp_io_write(sd, 0x2A, 0x01); /* Memory BIST Initialisation */
@@ -3116,12 +3116,12 @@ static int adv7842_ddr_ram_test(struct v4l2_subdev *sd)
sdp_io_write(sd, 0x7d, 0x00); /* Memory BIST Initialisation */
sdp_io_write(sd, 0x7e, 0x1a); /* Memory BIST Initialisation */
- mdelay(5);
+ usleep_range(5000, 6000);
sdp_io_write(sd, 0xd9, 0xd5); /* Enable BIST Test */
sdp_write(sd, 0x12, 0x05); /* Enable FRAME TBC & 3D COMB */
- mdelay(20);
+ msleep(20);
for (i = 0; i < 10; i++) {
u8 result = sdp_io_read(sd, 0xdb);
@@ -3132,7 +3132,7 @@ static int adv7842_ddr_ram_test(struct v4l2_subdev *sd)
else
pass++;
}
- mdelay(20);
+ msleep(20);
}
v4l2_dbg(1, debug, sd,
@@ -3541,6 +3541,7 @@ static int adv7842_probe(struct i2c_client *client,
INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
adv7842_delayed_work_enable_hotplug);
+ sd->entity.function = MEDIA_ENT_F_DV_DECODER;
state->pad.flags = MEDIA_PAD_FL_SOURCE;
err = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (err)
diff --git a/drivers/media/i2c/ak7375.c b/drivers/media/i2c/ak7375.c
new file mode 100644
index 000000000000..7b14b11605ca
--- /dev/null
+++ b/drivers/media/i2c/ak7375.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define AK7375_MAX_FOCUS_POS 4095
+/*
+ * This sets the minimum granularity for the focus positions.
+ * A value of 1 gives maximum accuracy for a desired focus position
+ */
+#define AK7375_FOCUS_STEPS 1
+/*
+ * This acts as the minimum granularity of lens movement.
+ * Keep this value power of 2, so the control steps can be
+ * uniformly adjusted for gradual lens movement, with desired
+ * number of control steps.
+ */
+#define AK7375_CTRL_STEPS 64
+#define AK7375_CTRL_DELAY_US 1000
+
+#define AK7375_REG_POSITION 0x0
+#define AK7375_REG_CONT 0x2
+#define AK7375_MODE_ACTIVE 0x0
+#define AK7375_MODE_STANDBY 0x40
+
+/* ak7375 device structure */
+struct ak7375_device {
+ struct v4l2_ctrl_handler ctrls_vcm;
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl *focus;
+ /* active or standby mode */
+ bool active;
+};
+
+static inline struct ak7375_device *to_ak7375_vcm(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct ak7375_device, ctrls_vcm);
+}
+
+static inline struct ak7375_device *sd_to_ak7375_vcm(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct ak7375_device, sd);
+}
+
+static int ak7375_i2c_write(struct ak7375_device *ak7375,
+ u8 addr, u16 data, u8 size)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ak7375->sd);
+ u8 buf[3];
+ int ret;
+
+ if (size != 1 && size != 2)
+ return -EINVAL;
+ buf[0] = addr;
+ buf[size] = data & 0xff;
+ if (size == 2)
+ buf[1] = (data >> 8) & 0xff;
+ ret = i2c_master_send(client, (const char *)buf, size + 1);
+ if (ret < 0)
+ return ret;
+ if (ret != size + 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int ak7375_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ak7375_device *dev_vcm = to_ak7375_vcm(ctrl);
+
+ if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE)
+ return ak7375_i2c_write(dev_vcm, AK7375_REG_POSITION,
+ ctrl->val << 4, 2);
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops ak7375_vcm_ctrl_ops = {
+ .s_ctrl = ak7375_set_ctrl,
+};
+
+static int ak7375_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(sd->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(sd->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ak7375_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ pm_runtime_put(sd->dev);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops ak7375_int_ops = {
+ .open = ak7375_open,
+ .close = ak7375_close,
+};
+
+static const struct v4l2_subdev_ops ak7375_ops = { };
+
+static void ak7375_subdev_cleanup(struct ak7375_device *ak7375_dev)
+{
+ v4l2_async_unregister_subdev(&ak7375_dev->sd);
+ v4l2_ctrl_handler_free(&ak7375_dev->ctrls_vcm);
+ media_entity_cleanup(&ak7375_dev->sd.entity);
+}
+
+static int ak7375_init_controls(struct ak7375_device *dev_vcm)
+{
+ struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm;
+ const struct v4l2_ctrl_ops *ops = &ak7375_vcm_ctrl_ops;
+
+ v4l2_ctrl_handler_init(hdl, 1);
+
+ dev_vcm->focus = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE,
+ 0, AK7375_MAX_FOCUS_POS, AK7375_FOCUS_STEPS, 0);
+
+ if (hdl->error)
+ dev_err(dev_vcm->sd.dev, "%s fail error: 0x%x\n",
+ __func__, hdl->error);
+ dev_vcm->sd.ctrl_handler = hdl;
+
+ return hdl->error;
+}
+
+static int ak7375_probe(struct i2c_client *client)
+{
+ struct ak7375_device *ak7375_dev;
+ int ret;
+
+ ak7375_dev = devm_kzalloc(&client->dev, sizeof(*ak7375_dev),
+ GFP_KERNEL);
+ if (!ak7375_dev)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ak7375_dev->sd, client, &ak7375_ops);
+ ak7375_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ak7375_dev->sd.internal_ops = &ak7375_int_ops;
+ ak7375_dev->sd.entity.function = MEDIA_ENT_F_LENS;
+
+ ret = ak7375_init_controls(ak7375_dev);
+ if (ret)
+ goto err_cleanup;
+
+ ret = media_entity_pads_init(&ak7375_dev->sd.entity, 0, NULL);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = v4l2_async_register_subdev(&ak7375_dev->sd);
+ if (ret < 0)
+ goto err_cleanup;
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+err_cleanup:
+ v4l2_ctrl_handler_free(&ak7375_dev->ctrls_vcm);
+ media_entity_cleanup(&ak7375_dev->sd.entity);
+
+ return ret;
+}
+
+static int ak7375_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
+
+ ak7375_subdev_cleanup(ak7375_dev);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ return 0;
+}
+
+/*
+ * This function sets the vcm position, so it consumes least current
+ * The lens position is gradually moved in units of AK7375_CTRL_STEPS,
+ * to make the movements smoothly.
+ */
+static int __maybe_unused ak7375_vcm_suspend(struct device *dev)
+{
+
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
+ int ret, val;
+
+ if (!ak7375_dev->active)
+ return 0;
+
+ for (val = ak7375_dev->focus->val & ~(AK7375_CTRL_STEPS - 1);
+ val >= 0; val -= AK7375_CTRL_STEPS) {
+ ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_POSITION,
+ val << 4, 2);
+ if (ret)
+ dev_err_once(dev, "%s I2C failure: %d\n",
+ __func__, ret);
+ usleep_range(AK7375_CTRL_DELAY_US, AK7375_CTRL_DELAY_US + 10);
+ }
+
+ ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_CONT,
+ AK7375_MODE_STANDBY, 1);
+ if (ret)
+ dev_err(dev, "%s I2C failure: %d\n", __func__, ret);
+
+ ak7375_dev->active = false;
+
+ return 0;
+}
+
+/*
+ * This function sets the vcm position to the value set by the user
+ * through v4l2_ctrl_ops s_ctrl handler
+ * The lens position is gradually moved in units of AK7375_CTRL_STEPS,
+ * to make the movements smoothly.
+ */
+static int __maybe_unused ak7375_vcm_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
+ int ret, val;
+
+ if (ak7375_dev->active)
+ return 0;
+
+ ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_CONT,
+ AK7375_MODE_ACTIVE, 1);
+ if (ret) {
+ dev_err(dev, "%s I2C failure: %d\n", __func__, ret);
+ return ret;
+ }
+
+ for (val = ak7375_dev->focus->val % AK7375_CTRL_STEPS;
+ val <= ak7375_dev->focus->val;
+ val += AK7375_CTRL_STEPS) {
+ ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_POSITION,
+ val << 4, 2);
+ if (ret)
+ dev_err_ratelimited(dev, "%s I2C failure: %d\n",
+ __func__, ret);
+ usleep_range(AK7375_CTRL_DELAY_US, AK7375_CTRL_DELAY_US + 10);
+ }
+
+ ak7375_dev->active = true;
+
+ return 0;
+}
+
+static const struct of_device_id ak7375_of_table[] = {
+ { .compatible = "asahi-kasei,ak7375" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ak7375_of_table);
+
+static const struct dev_pm_ops ak7375_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ak7375_vcm_suspend, ak7375_vcm_resume)
+ SET_RUNTIME_PM_OPS(ak7375_vcm_suspend, ak7375_vcm_resume, NULL)
+};
+
+static struct i2c_driver ak7375_i2c_driver = {
+ .driver = {
+ .name = "ak7375",
+ .pm = &ak7375_pm_ops,
+ .of_match_table = ak7375_of_table,
+ },
+ .probe_new = ak7375_probe,
+ .remove = ak7375_remove,
+};
+module_i2c_driver(ak7375_i2c_driver);
+
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_DESCRIPTION("AK7375 VCM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/cx25840/cx25840-core.h b/drivers/media/i2c/cx25840/cx25840-core.h
index fb13a624d2e3..c323b1af1f83 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.h
+++ b/drivers/media/i2c/cx25840/cx25840-core.h
@@ -45,6 +45,35 @@ enum cx25840_media_pads {
CX25840_NUM_PADS
};
+/**
+ * struct cx25840_state - a device instance private data
+ * @c: i2c_client struct representing this device
+ * @sd: our V4L2 sub-device
+ * @hdl: our V4L2 control handler
+ * @volume: audio volume V4L2 control (non-cx2583x devices only)
+ * @mute: audio mute V4L2 control (non-cx2583x devices only)
+ * @pvr150_workaround: whether we enable workaround for Hauppauge PVR150
+ * hardware bug (audio dropping out)
+ * @radio: set if we are currently in the radio mode, otherwise
+ * the current mode is non-radio (that is, video)
+ * @std: currently set video standard
+ * @vid_input: currently set video input
+ * @aud_input: currently set audio input
+ * @audclk_freq: currently set audio sample rate
+ * @audmode: currently set audio mode (when in non-radio mode)
+ * @vbi_line_offset: vbi line number offset
+ * @id: exact device model
+ * @rev: raw device id read from the chip
+ * @is_initialized: whether we have already loaded firmware into the chip
+ * and initialized it
+ * @vbi_regs_offset: offset of vbi regs
+ * @fw_wait: wait queue to wake an initalization function up when
+ * firmware loading (on a separate workqueue) finishes
+ * @fw_work: a work that actually loads the firmware on a separate
+ * workqueue
+ * @ir_state: a pointer to chip IR controller private data
+ * @pads: array of supported chip pads (currently only a stub)
+ */
struct cx25840_state {
struct i2c_client *c;
struct v4l2_subdev sd;
@@ -66,8 +95,8 @@ struct cx25840_state {
u32 rev;
int is_initialized;
unsigned vbi_regs_offset;
- wait_queue_head_t fw_wait; /* wake up when the fw load is finished */
- struct work_struct fw_work; /* work entry for fw load */
+ wait_queue_head_t fw_wait;
+ struct work_struct fw_work;
struct cx25840_ir_state *ir_state;
#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_pad pads[CX25840_NUM_PADS];
diff --git a/drivers/media/i2c/dw9807-vcm.c b/drivers/media/i2c/dw9807-vcm.c
new file mode 100644
index 000000000000..8ba3920b6e2f
--- /dev/null
+++ b/drivers/media/i2c/dw9807-vcm.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define DW9807_MAX_FOCUS_POS 1023
+/*
+ * This sets the minimum granularity for the focus positions.
+ * A value of 1 gives maximum accuracy for a desired focus position.
+ */
+#define DW9807_FOCUS_STEPS 1
+/*
+ * This acts as the minimum granularity of lens movement.
+ * Keep this value power of 2, so the control steps can be
+ * uniformly adjusted for gradual lens movement, with desired
+ * number of control steps.
+ */
+#define DW9807_CTRL_STEPS 16
+#define DW9807_CTRL_DELAY_US 1000
+
+#define DW9807_CTL_ADDR 0x02
+/*
+ * DW9807 separates two registers to control the VCM position.
+ * One for MSB value, another is LSB value.
+ */
+#define DW9807_MSB_ADDR 0x03
+#define DW9807_LSB_ADDR 0x04
+#define DW9807_STATUS_ADDR 0x05
+#define DW9807_MODE_ADDR 0x06
+#define DW9807_RESONANCE_ADDR 0x07
+
+#define MAX_RETRY 10
+
+struct dw9807_device {
+ struct v4l2_ctrl_handler ctrls_vcm;
+ struct v4l2_subdev sd;
+ u16 current_val;
+};
+
+static inline struct dw9807_device *sd_to_dw9807_vcm(
+ struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct dw9807_device, sd);
+}
+
+static int dw9807_i2c_check(struct i2c_client *client)
+{
+ const char status_addr = DW9807_STATUS_ADDR;
+ char status_result;
+ int ret;
+
+ ret = i2c_master_send(client, &status_addr, sizeof(status_addr));
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C write STATUS address fail ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = i2c_master_recv(client, &status_result, sizeof(status_result));
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read STATUS value fail ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ return status_result;
+}
+
+static int dw9807_set_dac(struct i2c_client *client, u16 data)
+{
+ const char tx_data[3] = {
+ DW9807_MSB_ADDR, ((data >> 8) & 0x03), (data & 0xff)
+ };
+ int val, ret;
+
+ /*
+ * According to the datasheet, need to check the bus status before we
+ * write VCM position. This ensure that we really write the value
+ * into the register
+ */
+ ret = readx_poll_timeout(dw9807_i2c_check, client, val, val <= 0,
+ DW9807_CTRL_DELAY_US, MAX_RETRY * DW9807_CTRL_DELAY_US);
+
+ if (ret || val < 0) {
+ if (ret) {
+ dev_warn(&client->dev,
+ "Cannot do the write operation because VCM is busy\n");
+ }
+
+ return ret ? -EBUSY : val;
+ }
+
+ /* Write VCM position to registers */
+ ret = i2c_master_send(client, tx_data, sizeof(tx_data));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "I2C write MSB fail ret=%d\n", ret);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw9807_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct dw9807_device *dev_vcm = container_of(ctrl->handler,
+ struct dw9807_device, ctrls_vcm);
+
+ if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE) {
+ struct i2c_client *client = v4l2_get_subdevdata(&dev_vcm->sd);
+
+ dev_vcm->current_val = ctrl->val;
+ return dw9807_set_dac(client, ctrl->val);
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops dw9807_vcm_ctrl_ops = {
+ .s_ctrl = dw9807_set_ctrl,
+};
+
+static int dw9807_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int rval;
+
+ rval = pm_runtime_get_sync(sd->dev);
+ if (rval < 0) {
+ pm_runtime_put_noidle(sd->dev);
+ return rval;
+ }
+
+ return 0;
+}
+
+static int dw9807_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ pm_runtime_put(sd->dev);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops dw9807_int_ops = {
+ .open = dw9807_open,
+ .close = dw9807_close,
+};
+
+static const struct v4l2_subdev_ops dw9807_ops = { };
+
+static void dw9807_subdev_cleanup(struct dw9807_device *dw9807_dev)
+{
+ v4l2_async_unregister_subdev(&dw9807_dev->sd);
+ v4l2_ctrl_handler_free(&dw9807_dev->ctrls_vcm);
+ media_entity_cleanup(&dw9807_dev->sd.entity);
+}
+
+static int dw9807_init_controls(struct dw9807_device *dev_vcm)
+{
+ struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm;
+ const struct v4l2_ctrl_ops *ops = &dw9807_vcm_ctrl_ops;
+ struct i2c_client *client = v4l2_get_subdevdata(&dev_vcm->sd);
+
+ v4l2_ctrl_handler_init(hdl, 1);
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE,
+ 0, DW9807_MAX_FOCUS_POS, DW9807_FOCUS_STEPS, 0);
+
+ dev_vcm->sd.ctrl_handler = hdl;
+ if (hdl->error) {
+ dev_err(&client->dev, "%s fail error: 0x%x\n",
+ __func__, hdl->error);
+ return hdl->error;
+ }
+
+ return 0;
+}
+
+static int dw9807_probe(struct i2c_client *client)
+{
+ struct dw9807_device *dw9807_dev;
+ int rval;
+
+ dw9807_dev = devm_kzalloc(&client->dev, sizeof(*dw9807_dev),
+ GFP_KERNEL);
+ if (dw9807_dev == NULL)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&dw9807_dev->sd, client, &dw9807_ops);
+ dw9807_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dw9807_dev->sd.internal_ops = &dw9807_int_ops;
+
+ rval = dw9807_init_controls(dw9807_dev);
+ if (rval)
+ goto err_cleanup;
+
+ rval = media_entity_pads_init(&dw9807_dev->sd.entity, 0, NULL);
+ if (rval < 0)
+ goto err_cleanup;
+
+ dw9807_dev->sd.entity.function = MEDIA_ENT_F_LENS;
+
+ rval = v4l2_async_register_subdev(&dw9807_dev->sd);
+ if (rval < 0)
+ goto err_cleanup;
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+err_cleanup:
+ dw9807_subdev_cleanup(dw9807_dev);
+
+ return rval;
+}
+
+static int dw9807_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct dw9807_device *dw9807_dev = sd_to_dw9807_vcm(sd);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ dw9807_subdev_cleanup(dw9807_dev);
+
+ return 0;
+}
+
+/*
+ * This function sets the vcm position, so it consumes least current
+ * The lens position is gradually moved in units of DW9807_CTRL_STEPS,
+ * to make the movements smoothly.
+ */
+static int __maybe_unused dw9807_vcm_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct dw9807_device *dw9807_dev = sd_to_dw9807_vcm(sd);
+ const char tx_data[2] = { DW9807_CTL_ADDR, 0x01 };
+ int ret, val;
+
+ for (val = dw9807_dev->current_val & ~(DW9807_CTRL_STEPS - 1);
+ val >= 0; val -= DW9807_CTRL_STEPS) {
+ ret = dw9807_set_dac(client, val);
+ if (ret)
+ dev_err_once(dev, "%s I2C failure: %d", __func__, ret);
+ usleep_range(DW9807_CTRL_DELAY_US, DW9807_CTRL_DELAY_US + 10);
+ }
+
+ /* Power down */
+ ret = i2c_master_send(client, tx_data, sizeof(tx_data));
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C write CTL fail ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This function sets the vcm position to the value set by the user
+ * through v4l2_ctrl_ops s_ctrl handler
+ * The lens position is gradually moved in units of DW9807_CTRL_STEPS,
+ * to make the movements smoothly.
+ */
+static int __maybe_unused dw9807_vcm_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct dw9807_device *dw9807_dev = sd_to_dw9807_vcm(sd);
+ const char tx_data[2] = { DW9807_CTL_ADDR, 0x00 };
+ int ret, val;
+
+ /* Power on */
+ ret = i2c_master_send(client, tx_data, sizeof(tx_data));
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C write CTL fail ret = %d\n", ret);
+ return ret;
+ }
+
+ for (val = dw9807_dev->current_val % DW9807_CTRL_STEPS;
+ val < dw9807_dev->current_val + DW9807_CTRL_STEPS - 1;
+ val += DW9807_CTRL_STEPS) {
+ ret = dw9807_set_dac(client, val);
+ if (ret)
+ dev_err_ratelimited(dev, "%s I2C failure: %d",
+ __func__, ret);
+ usleep_range(DW9807_CTRL_DELAY_US, DW9807_CTRL_DELAY_US + 10);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id dw9807_of_table[] = {
+ { .compatible = "dongwoon,dw9807-vcm" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dw9807_of_table);
+
+static const struct dev_pm_ops dw9807_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dw9807_vcm_suspend, dw9807_vcm_resume)
+ SET_RUNTIME_PM_OPS(dw9807_vcm_suspend, dw9807_vcm_resume, NULL)
+};
+
+static struct i2c_driver dw9807_i2c_driver = {
+ .driver = {
+ .name = "dw9807",
+ .pm = &dw9807_pm_ops,
+ .of_match_table = dw9807_of_table,
+ },
+ .probe_new = dw9807_probe,
+ .remove = dw9807_remove,
+};
+
+module_i2c_driver(dw9807_i2c_driver);
+
+MODULE_AUTHOR("Chiang, Alan <alanx.chiang@intel.com>");
+MODULE_DESCRIPTION("DW9807 VCM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index e9eff9039ef5..37ef38947e01 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1446,6 +1446,7 @@ static int et8ek8_probe(struct i2c_client *client,
sensor->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sensor->subdev.internal_ops = &et8ek8_internal_ops;
+ sensor->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&sensor->subdev.entity, 1, &sensor->pad);
if (ret < 0) {
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index f3b124723aa0..31a1e2294843 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -1221,6 +1221,14 @@ static int imx258_probe(struct i2c_client *client)
if (val != 19200000)
return -EINVAL;
+ /*
+ * Check that the device is mounted upside down. The driver only
+ * supports a single pixel order right now.
+ */
+ ret = device_property_read_u32(&client->dev, "rotation", &val);
+ if (ret || val != 180)
+ return -EINVAL;
+
imx258 = devm_kzalloc(&client->dev, sizeof(*imx258), GFP_KERNEL);
if (!imx258)
return -ENOMEM;
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 63fb94e7da37..f8c70f1a34fe 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -5,6 +5,7 @@
*
* Leon Luo <leonl@leopardimaging.com>
* Edwin Zou <edwinz@leopardimaging.com>
+ * Luca Ceresoli <luca@lucaceresoli.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -25,6 +26,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/regmap.h>
@@ -74,7 +76,7 @@
*/
#define IMX274_MIN_EXPOSURE_TIME (4 * 260 / 72)
-#define IMX274_DEFAULT_MODE IMX274_MODE_3840X2160
+#define IMX274_DEFAULT_MODE IMX274_BINNING_OFF
#define IMX274_MAX_WIDTH (3840)
#define IMX274_MAX_HEIGHT (2160)
#define IMX274_MAX_FRAME_RATE (120)
@@ -111,6 +113,20 @@
#define IMX274_SHR_REG_LSB 0x300C /* SHR */
#define IMX274_SVR_REG_MSB 0x300F /* SVR */
#define IMX274_SVR_REG_LSB 0x300E /* SVR */
+#define IMX274_HTRIM_EN_REG 0x3037
+#define IMX274_HTRIM_START_REG_LSB 0x3038
+#define IMX274_HTRIM_START_REG_MSB 0x3039
+#define IMX274_HTRIM_END_REG_LSB 0x303A
+#define IMX274_HTRIM_END_REG_MSB 0x303B
+#define IMX274_VWIDCUTEN_REG 0x30DD
+#define IMX274_VWIDCUT_REG_LSB 0x30DE
+#define IMX274_VWIDCUT_REG_MSB 0x30DF
+#define IMX274_VWINPOS_REG_LSB 0x30E0
+#define IMX274_VWINPOS_REG_MSB 0x30E1
+#define IMX274_WRITE_VSIZE_REG_LSB 0x3130
+#define IMX274_WRITE_VSIZE_REG_MSB 0x3131
+#define IMX274_Y_OUT_SIZE_REG_LSB 0x3132
+#define IMX274_Y_OUT_SIZE_REG_MSB 0x3133
#define IMX274_VMAX_REG_1 0x30FA /* VMAX, MSB */
#define IMX274_VMAX_REG_2 0x30F9 /* VMAX */
#define IMX274_VMAX_REG_3 0x30F8 /* VMAX, LSB */
@@ -140,17 +156,35 @@ static const struct regmap_config imx274_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-enum imx274_mode {
- IMX274_MODE_3840X2160,
- IMX274_MODE_1920X1080,
- IMX274_MODE_1280X720,
+enum imx274_binning {
+ IMX274_BINNING_OFF,
+ IMX274_BINNING_2_1,
+ IMX274_BINNING_3_1,
};
/*
- * imx274 format related structure
+ * Parameters for each imx274 readout mode.
+ *
+ * These are the values to configure the sensor in one of the
+ * implemented modes.
+ *
+ * @init_regs: registers to initialize the mode
+ * @bin_ratio: downscale factor (e.g. 3 for 3:1 binning)
+ * @min_frame_len: Minimum frame length for each mode (see "Frame Rate
+ * Adjustment (CSI-2)" in the datasheet)
+ * @min_SHR: Minimum SHR register value (see "Shutter Setting (CSI-2)" in the
+ * datasheet)
+ * @max_fps: Maximum frames per second
+ * @nocpiop: Number of clocks per internal offset period (see "Integration Time
+ * in Each Readout Drive Mode (CSI-2)" in the datasheet)
*/
struct imx274_frmfmt {
- struct v4l2_frmsize_discrete size;
+ const struct reg_8 *init_regs;
+ unsigned int bin_ratio;
+ int min_frame_len;
+ int min_SHR;
+ int max_fps;
+ int nocpiop;
};
/*
@@ -197,31 +231,14 @@ static const struct reg_8 imx274_mode1_3840x2160_raw10[] = {
{0x3004, 0x01},
{0x3005, 0x01},
{0x3006, 0x00},
- {0x3007, 0x02},
+ {0x3007, 0xa2},
{0x3018, 0xA2}, /* output XVS, HVS */
{0x306B, 0x05},
{0x30E2, 0x01},
- {0x30F6, 0x07}, /* HMAX, 263 */
- {0x30F7, 0x01}, /* HMAX */
-
- {0x30dd, 0x01}, /* crop to 2160 */
- {0x30de, 0x06},
- {0x30df, 0x00},
- {0x30e0, 0x12},
- {0x30e1, 0x00},
- {0x3037, 0x01}, /* to crop to 3840 */
- {0x3038, 0x0c},
- {0x3039, 0x00},
- {0x303a, 0x0c},
- {0x303b, 0x0f},
{0x30EE, 0x01},
- {0x3130, 0x86},
- {0x3131, 0x08},
- {0x3132, 0x7E},
- {0x3133, 0x08},
{0x3342, 0x0A},
{0x3343, 0x00},
{0x3344, 0x16},
@@ -255,32 +272,14 @@ static const struct reg_8 imx274_mode3_1920x1080_raw10[] = {
{0x3004, 0x02},
{0x3005, 0x21},
{0x3006, 0x00},
- {0x3007, 0x11},
+ {0x3007, 0xb1},
{0x3018, 0xA2}, /* output XVS, HVS */
{0x306B, 0x05},
{0x30E2, 0x02},
- {0x30F6, 0x04}, /* HMAX, 260 */
- {0x30F7, 0x01}, /* HMAX */
-
- {0x30dd, 0x01}, /* to crop to 1920x1080 */
- {0x30de, 0x05},
- {0x30df, 0x00},
- {0x30e0, 0x04},
- {0x30e1, 0x00},
- {0x3037, 0x01},
- {0x3038, 0x0c},
- {0x3039, 0x00},
- {0x303a, 0x0c},
- {0x303b, 0x0f},
-
{0x30EE, 0x01},
- {0x3130, 0x4E},
- {0x3131, 0x04},
- {0x3132, 0x46},
- {0x3133, 0x04},
{0x3342, 0x0A},
{0x3343, 0x00},
{0x3344, 0x1A},
@@ -313,31 +312,14 @@ static const struct reg_8 imx274_mode5_1280x720_raw10[] = {
{0x3004, 0x03},
{0x3005, 0x31},
{0x3006, 0x00},
- {0x3007, 0x09},
+ {0x3007, 0xa9},
{0x3018, 0xA2}, /* output XVS, HVS */
{0x306B, 0x05},
{0x30E2, 0x03},
- {0x30F6, 0x04}, /* HMAX, 260 */
- {0x30F7, 0x01}, /* HMAX */
-
- {0x30DD, 0x01},
- {0x30DE, 0x07},
- {0x30DF, 0x00},
- {0x40E0, 0x04},
- {0x30E1, 0x00},
- {0x3030, 0xD4},
- {0x3031, 0x02},
- {0x3032, 0xD0},
- {0x3033, 0x02},
-
{0x30EE, 0x01},
- {0x3130, 0xE2},
- {0x3131, 0x02},
- {0x3132, 0xDE},
- {0x3133, 0x02},
{0x3342, 0x0A},
{0x3343, 0x00},
{0x3344, 0x1B},
@@ -476,58 +458,35 @@ static const struct reg_8 imx274_tp_regs[] = {
{IMX274_TABLE_END, 0x00}
};
-static const struct reg_8 *mode_table[] = {
- [IMX274_MODE_3840X2160] = imx274_mode1_3840x2160_raw10,
- [IMX274_MODE_1920X1080] = imx274_mode3_1920x1080_raw10,
- [IMX274_MODE_1280X720] = imx274_mode5_1280x720_raw10,
-};
-
-/*
- * imx274 format related structure
- */
+/* nocpiop happens to be the same number for the implemented modes */
static const struct imx274_frmfmt imx274_formats[] = {
- { {3840, 2160} },
- { {1920, 1080} },
- { {1280, 720} },
-};
-
-/*
- * minimal frame length for each mode
- * refer to datasheet section "Frame Rate Adjustment (CSI-2)"
- */
-static const int min_frame_len[] = {
- 4550, /* mode 1, 4K */
- 2310, /* mode 3, 1080p */
- 2310 /* mode 5, 720p */
-};
-
-/*
- * minimal numbers of SHR register
- * refer to datasheet table "Shutter Setting (CSI-2)"
- */
-static const int min_SHR[] = {
- 12, /* mode 1, 4K */
- 8, /* mode 3, 1080p */
- 8 /* mode 5, 720p */
-};
-
-static const int max_frame_rate[] = {
- 60, /* mode 1 , 4K */
- 120, /* mode 3, 1080p */
- 120 /* mode 5, 720p */
-};
-
-/*
- * Number of clocks per internal offset period
- * a constant based on mode
- * refer to section "Integration Time in Each Readout Drive Mode (CSI-2)"
- * in the datasheet
- * for the implemented 3 modes, it happens to be the same number
- */
-static const int nocpiop[] = {
- 112, /* mode 1 , 4K */
- 112, /* mode 3, 1080p */
- 112 /* mode 5, 720p */
+ {
+ /* mode 1, 4K */
+ .bin_ratio = 1,
+ .init_regs = imx274_mode1_3840x2160_raw10,
+ .min_frame_len = 4550,
+ .min_SHR = 12,
+ .max_fps = 60,
+ .nocpiop = 112,
+ },
+ {
+ /* mode 3, 1080p */
+ .bin_ratio = 2,
+ .init_regs = imx274_mode3_1920x1080_raw10,
+ .min_frame_len = 2310,
+ .min_SHR = 8,
+ .max_fps = 120,
+ .nocpiop = 112,
+ },
+ {
+ /* mode 5, 720p */
+ .bin_ratio = 3,
+ .init_regs = imx274_mode5_1280x720_raw10,
+ .min_frame_len = 2310,
+ .min_SHR = 8,
+ .max_fps = 120,
+ .nocpiop = 112,
+ },
};
/*
@@ -549,29 +508,40 @@ struct imx274_ctrls {
/*
* struct stim274 - imx274 device structure
* @sd: V4L2 subdevice structure
- * @pd: Media pad structure
+ * @pad: Media pad structure
* @client: Pointer to I2C client
* @ctrls: imx274 control structure
+ * @crop: rect to be captured
+ * @compose: compose rect, i.e. output resolution
* @format: V4L2 media bus frame format structure
+ * (width and height are in sync with the compose rect)
* @frame_rate: V4L2 frame rate structure
* @regmap: Pointer to regmap structure
* @reset_gpio: Pointer to reset gpio
* @lock: Mutex structure
- * @mode_index: Resolution mode index
+ * @mode: Parameters for the selected readout mode
*/
struct stimx274 {
struct v4l2_subdev sd;
struct media_pad pad;
struct i2c_client *client;
struct imx274_ctrls ctrls;
+ struct v4l2_rect crop;
struct v4l2_mbus_framefmt format;
struct v4l2_fract frame_interval;
struct regmap *regmap;
struct gpio_desc *reset_gpio;
struct mutex lock; /* mutex lock for operations */
- u32 mode_index;
+ const struct imx274_frmfmt *mode;
};
+#define IMX274_ROUND(dim, step, flags) \
+ ((flags) & V4L2_SEL_FLAG_GE \
+ ? roundup((dim), (step)) \
+ : ((flags) & V4L2_SEL_FLAG_LE \
+ ? rounddown((dim), (step)) \
+ : rounddown((dim) + (step) / 2, (step))))
+
/*
* Function declaration
*/
@@ -602,20 +572,18 @@ static inline struct stimx274 *to_imx274(struct v4l2_subdev *sd)
}
/*
- * imx274_regmap_util_write_table_8 - Function for writing register table
- * @regmap: Pointer to device reg map structure
- * @table: Table containing register values
- * @wait_ms_addr: Flag for performing delay
- * @end_addr: Flag for incating end of table
+ * Writing a register table
+ *
+ * @priv: Pointer to device
+ * @table: Table containing register values (with optional delays)
*
* This is used to write register table into sensor's reg map.
*
* Return: 0 on success, errors otherwise
*/
-static int imx274_regmap_util_write_table_8(struct regmap *regmap,
- const struct reg_8 table[],
- u16 wait_ms_addr, u16 end_addr)
+static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
{
+ struct regmap *regmap = priv->regmap;
int err = 0;
const struct reg_8 *next;
u8 val;
@@ -627,8 +595,8 @@ static int imx274_regmap_util_write_table_8(struct regmap *regmap,
for (next = table;; next++) {
if ((next->addr != range_start + range_count) ||
- (next->addr == end_addr) ||
- (next->addr == wait_ms_addr) ||
+ (next->addr == IMX274_TABLE_END) ||
+ (next->addr == IMX274_TABLE_WAIT_MS) ||
(range_count == max_range_vals)) {
if (range_count == 1)
err = regmap_write(regmap,
@@ -647,10 +615,10 @@ static int imx274_regmap_util_write_table_8(struct regmap *regmap,
range_count = 0;
/* Handle special address values */
- if (next->addr == end_addr)
+ if (next->addr == IMX274_TABLE_END)
break;
- if (next->addr == wait_ms_addr) {
+ if (next->addr == IMX274_TABLE_WAIT_MS) {
msleep_range(next->val);
continue;
}
@@ -697,25 +665,42 @@ static inline int imx274_write_reg(struct stimx274 *priv, u16 addr, u8 val)
return err;
}
-static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
+/**
+ * Write a multibyte register.
+ *
+ * Uses a bulk write where possible.
+ *
+ * @priv: Pointer to device structure
+ * @addr: Address of the LSB register. Other registers must be
+ * consecutive, least-to-most significant.
+ * @val: Value to be written to the register (cpu endianness)
+ * @nbytes: Number of bits to write (range: [1..3])
+ */
+static int imx274_write_mbreg(struct stimx274 *priv, u16 addr, u32 val,
+ size_t nbytes)
{
- return imx274_regmap_util_write_table_8(priv->regmap,
- table, IMX274_TABLE_WAIT_MS, IMX274_TABLE_END);
+ __le32 val_le = cpu_to_le32(val);
+ int err;
+
+ err = regmap_bulk_write(priv->regmap, addr, &val_le, nbytes);
+ if (err)
+ dev_err(&priv->client->dev,
+ "%s : i2c bulk write failed, %x = %x (%zu bytes)\n",
+ __func__, addr, val, nbytes);
+ else
+ dev_dbg(&priv->client->dev,
+ "%s : addr 0x%x, val=0x%x (%zu bytes)\n",
+ __func__, addr, val, nbytes);
+ return err;
}
/*
- * imx274_mode_regs - Function for set mode registers per mode index
+ * Set mode registers to start stream.
* @priv: Pointer to device structure
- * @mode: Mode index value
- *
- * This is used to start steam per mode index.
- * mode = 0, start stream for sensor Mode 1: 4K/raw10
- * mode = 1, start stream for sensor Mode 3: 1080p/raw10
- * mode = 2, start stream for sensor Mode 5: 720p/raw10
*
* Return: 0 on success, errors otherwise
*/
-static int imx274_mode_regs(struct stimx274 *priv, int mode)
+static int imx274_mode_regs(struct stimx274 *priv)
{
int err = 0;
@@ -727,7 +712,7 @@ static int imx274_mode_regs(struct stimx274 *priv, int mode)
if (err)
return err;
- err = imx274_write_table(priv, mode_table[mode]);
+ err = imx274_write_table(priv, priv->mode->init_regs);
return err;
}
@@ -830,6 +815,114 @@ static int imx274_s_ctrl(struct v4l2_ctrl *ctrl)
return ret;
}
+static int imx274_binning_goodness(struct stimx274 *imx274,
+ int w, int ask_w,
+ int h, int ask_h, u32 flags)
+{
+ struct device *dev = &imx274->client->dev;
+ const int goodness = 100000;
+ int val = 0;
+
+ if (flags & V4L2_SEL_FLAG_GE) {
+ if (w < ask_w)
+ val -= goodness;
+ if (h < ask_h)
+ val -= goodness;
+ }
+
+ if (flags & V4L2_SEL_FLAG_LE) {
+ if (w > ask_w)
+ val -= goodness;
+ if (h > ask_h)
+ val -= goodness;
+ }
+
+ val -= abs(w - ask_w);
+ val -= abs(h - ask_h);
+
+ dev_dbg(dev, "%s: ask %dx%d, size %dx%d, goodness %d\n",
+ __func__, ask_w, ask_h, w, h, val);
+
+ return val;
+}
+
+/**
+ * Helper function to change binning and set both compose and format.
+ *
+ * We have two entry points to change binning: set_fmt and
+ * set_selection(COMPOSE). Both have to compute the new output size
+ * and set it in both the compose rect and the frame format size. We
+ * also need to do the same things after setting cropping to restore
+ * 1:1 binning.
+ *
+ * This function contains the common code for these three cases, it
+ * has many arguments in order to accommodate the needs of all of
+ * them.
+ *
+ * Must be called with imx274->lock locked.
+ *
+ * @imx274: The device object
+ * @cfg: The pad config we are editing for TRY requests
+ * @which: V4L2_SUBDEV_FORMAT_ACTIVE or V4L2_SUBDEV_FORMAT_TRY from the caller
+ * @width: Input-output parameter: set to the desired width before
+ * the call, contains the chosen value after returning successfully
+ * @height: Input-output parameter for height (see @width)
+ * @flags: Selection flags from struct v4l2_subdev_selection, or 0 if not
+ * available (when called from set_fmt)
+ */
+static int __imx274_change_compose(struct stimx274 *imx274,
+ struct v4l2_subdev_pad_config *cfg,
+ u32 which,
+ u32 *width,
+ u32 *height,
+ u32 flags)
+{
+ struct device *dev = &imx274->client->dev;
+ const struct v4l2_rect *cur_crop;
+ struct v4l2_mbus_framefmt *tgt_fmt;
+ unsigned int i;
+ const struct imx274_frmfmt *best_mode = &imx274_formats[0];
+ int best_goodness = INT_MIN;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY) {
+ cur_crop = &cfg->try_crop;
+ tgt_fmt = &cfg->try_fmt;
+ } else {
+ cur_crop = &imx274->crop;
+ tgt_fmt = &imx274->format;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(imx274_formats); i++) {
+ unsigned int ratio = imx274_formats[i].bin_ratio;
+
+ int goodness = imx274_binning_goodness(
+ imx274,
+ cur_crop->width / ratio, *width,
+ cur_crop->height / ratio, *height,
+ flags);
+
+ if (goodness >= best_goodness) {
+ best_goodness = goodness;
+ best_mode = &imx274_formats[i];
+ }
+ }
+
+ *width = cur_crop->width / best_mode->bin_ratio;
+ *height = cur_crop->height / best_mode->bin_ratio;
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ imx274->mode = best_mode;
+
+ dev_dbg(dev, "%s: selected %u:1 binning\n",
+ __func__, best_mode->bin_ratio);
+
+ tgt_fmt->width = *width;
+ tgt_fmt->height = *height;
+ tgt_fmt->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
/**
* imx274_get_fmt - Get the pad format
* @sd: Pointer to V4L2 Sub device structure
@@ -868,45 +961,238 @@ static int imx274_set_fmt(struct v4l2_subdev *sd,
{
struct v4l2_mbus_framefmt *fmt = &format->format;
struct stimx274 *imx274 = to_imx274(sd);
- struct i2c_client *client = imx274->client;
- int index;
-
- dev_dbg(&client->dev,
- "%s: width = %d height = %d code = %d\n",
- __func__, fmt->width, fmt->height, fmt->code);
+ int err = 0;
mutex_lock(&imx274->lock);
- for (index = 0; index < ARRAY_SIZE(imx274_formats); index++) {
- if (imx274_formats[index].size.width == fmt->width &&
- imx274_formats[index].size.height == fmt->height)
- break;
- }
+ err = __imx274_change_compose(imx274, cfg, format->which,
+ &fmt->width, &fmt->height, 0);
- if (index >= ARRAY_SIZE(imx274_formats)) {
- /* default to first format */
- index = 0;
- }
-
- imx274->mode_index = index;
+ if (err)
+ goto out;
- if (fmt->width > IMX274_MAX_WIDTH)
- fmt->width = IMX274_MAX_WIDTH;
- if (fmt->height > IMX274_MAX_HEIGHT)
- fmt->height = IMX274_MAX_HEIGHT;
- fmt->width = fmt->width & (~IMX274_MASK_LSB_2_BITS);
- fmt->height = fmt->height & (~IMX274_MASK_LSB_2_BITS);
+ /*
+ * __imx274_change_compose already set width and height in the
+ * applicable format, but we need to keep all other format
+ * values, so do a full copy here
+ */
fmt->field = V4L2_FIELD_NONE;
-
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
cfg->try_fmt = *fmt;
else
imx274->format = *fmt;
+out:
+ mutex_unlock(&imx274->lock);
+
+ return err;
+}
+
+static int imx274_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+ const struct v4l2_rect *src_crop;
+ const struct v4l2_mbus_framefmt *src_fmt;
+ int ret = 0;
+
+ if (sel->pad != 0)
+ return -EINVAL;
+
+ if (sel->target == V4L2_SEL_TGT_CROP_BOUNDS) {
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = IMX274_MAX_WIDTH;
+ sel->r.height = IMX274_MAX_HEIGHT;
+ return 0;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ src_crop = &cfg->try_crop;
+ src_fmt = &cfg->try_fmt;
+ } else {
+ src_crop = &imx274->crop;
+ src_fmt = &imx274->format;
+ }
+
+ mutex_lock(&imx274->lock);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *src_crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = src_crop->width;
+ sel->r.height = src_crop->height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = src_fmt->width;
+ sel->r.height = src_fmt->height;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&imx274->lock);
+
+ return ret;
+}
+
+static int imx274_set_selection_crop(struct stimx274 *imx274,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_rect *tgt_crop;
+ struct v4l2_rect new_crop;
+ bool size_changed;
+
+ /*
+ * h_step could be 12 or 24 depending on the binning. But we
+ * won't know the binning until we choose the mode later in
+ * __imx274_change_compose(). Thus let's be safe and use the
+ * most conservative value in all cases.
+ */
+ const u32 h_step = 24;
+
+ new_crop.width = min_t(u32,
+ IMX274_ROUND(sel->r.width, h_step, sel->flags),
+ IMX274_MAX_WIDTH);
+
+ /* Constraint: HTRIMMING_END - HTRIMMING_START >= 144 */
+ if (new_crop.width < 144)
+ new_crop.width = 144;
+
+ new_crop.left = min_t(u32,
+ IMX274_ROUND(sel->r.left, h_step, 0),
+ IMX274_MAX_WIDTH - new_crop.width);
+
+ new_crop.height = min_t(u32,
+ IMX274_ROUND(sel->r.height, 2, sel->flags),
+ IMX274_MAX_HEIGHT);
+
+ new_crop.top = min_t(u32, IMX274_ROUND(sel->r.top, 2, 0),
+ IMX274_MAX_HEIGHT - new_crop.height);
+
+ sel->r = new_crop;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ tgt_crop = &cfg->try_crop;
+ else
+ tgt_crop = &imx274->crop;
+
+ mutex_lock(&imx274->lock);
+
+ size_changed = (new_crop.width != tgt_crop->width ||
+ new_crop.height != tgt_crop->height);
+
+ /* __imx274_change_compose needs the new size in *tgt_crop */
+ *tgt_crop = new_crop;
+
+ /* if crop size changed then reset the output image size */
+ if (size_changed)
+ __imx274_change_compose(imx274, cfg, sel->which,
+ &new_crop.width, &new_crop.height,
+ sel->flags);
+
mutex_unlock(&imx274->lock);
+
return 0;
}
+static int imx274_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+
+ if (sel->pad != 0)
+ return -EINVAL;
+
+ if (sel->target == V4L2_SEL_TGT_CROP)
+ return imx274_set_selection_crop(imx274, cfg, sel);
+
+ if (sel->target == V4L2_SEL_TGT_COMPOSE) {
+ int err;
+
+ mutex_lock(&imx274->lock);
+ err = __imx274_change_compose(imx274, cfg, sel->which,
+ &sel->r.width, &sel->r.height,
+ sel->flags);
+ mutex_unlock(&imx274->lock);
+
+ /*
+ * __imx274_change_compose already set width and
+ * height in set->r, we still need to set top-left
+ */
+ if (!err) {
+ sel->r.top = 0;
+ sel->r.left = 0;
+ }
+
+ return err;
+ }
+
+ return -EINVAL;
+}
+
+static int imx274_apply_trimming(struct stimx274 *imx274)
+{
+ u32 h_start;
+ u32 h_end;
+ u32 hmax;
+ u32 v_cut;
+ s32 v_pos;
+ u32 write_v_size;
+ u32 y_out_size;
+ int err;
+
+ h_start = imx274->crop.left + 12;
+ h_end = h_start + imx274->crop.width;
+
+ /* Use the minimum allowed value of HMAX */
+ /* Note: except in mode 1, (width / 16 + 23) is always < hmax_min */
+ /* Note: 260 is the minimum HMAX in all implemented modes */
+ hmax = max_t(u32, 260, (imx274->crop.width) / 16 + 23);
+
+ /* invert v_pos if VFLIP */
+ v_pos = imx274->ctrls.vflip->cur.val ?
+ (-imx274->crop.top / 2) : (imx274->crop.top / 2);
+ v_cut = (IMX274_MAX_HEIGHT - imx274->crop.height) / 2;
+ write_v_size = imx274->crop.height + 22;
+ y_out_size = imx274->crop.height + 14;
+
+ err = imx274_write_mbreg(imx274, IMX274_HMAX_REG_LSB, hmax, 2);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_HTRIM_EN_REG, 1, 1);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_HTRIM_START_REG_LSB,
+ h_start, 2);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_HTRIM_END_REG_LSB,
+ h_end, 2);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_VWIDCUTEN_REG, 1, 1);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_VWIDCUT_REG_LSB,
+ v_cut, 2);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_VWINPOS_REG_LSB,
+ v_pos, 2);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_WRITE_VSIZE_REG_LSB,
+ write_v_size, 2);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_Y_OUT_SIZE_REG_LSB,
+ y_out_size, 2);
+
+ return err;
+}
+
/**
* imx274_g_frame_interval - Get the frame interval
* @sd: Pointer to V4L2 Sub device structure
@@ -1035,14 +1321,19 @@ static int imx274_s_stream(struct v4l2_subdev *sd, int on)
struct stimx274 *imx274 = to_imx274(sd);
int ret = 0;
- dev_dbg(&imx274->client->dev, "%s : %s, mode index = %d\n", __func__,
- on ? "Stream Start" : "Stream Stop", imx274->mode_index);
+ dev_dbg(&imx274->client->dev, "%s : %s, mode index = %td\n", __func__,
+ on ? "Stream Start" : "Stream Stop",
+ imx274->mode - &imx274_formats[0]);
mutex_lock(&imx274->lock);
if (on) {
/* load mode registers */
- ret = imx274_mode_regs(imx274, imx274->mode_index);
+ ret = imx274_mode_regs(imx274);
+ if (ret)
+ goto fail;
+
+ ret = imx274_apply_trimming(imx274);
if (ret)
goto fail;
@@ -1075,8 +1366,7 @@ static int imx274_s_stream(struct v4l2_subdev *sd, int on)
}
mutex_unlock(&imx274->lock);
- dev_dbg(&imx274->client->dev,
- "%s : Done: mode = %d\n", __func__, imx274->mode_index);
+ dev_dbg(&imx274->client->dev, "%s : Done\n", __func__);
return 0;
fail:
@@ -1146,14 +1436,14 @@ static int imx274_clamp_coarse_time(struct stimx274 *priv, u32 *val,
if (err)
return err;
- if (*frame_length < min_frame_len[priv->mode_index])
- *frame_length = min_frame_len[priv->mode_index];
+ if (*frame_length < priv->mode->min_frame_len)
+ *frame_length = priv->mode->min_frame_len;
*val = *frame_length - *val; /* convert to raw shr */
if (*val > *frame_length - IMX274_SHR_LIMIT_CONST)
*val = *frame_length - IMX274_SHR_LIMIT_CONST;
- else if (*val < min_SHR[priv->mode_index])
- *val = min_SHR[priv->mode_index];
+ else if (*val < priv->mode->min_SHR)
+ *val = priv->mode->min_SHR;
return 0;
}
@@ -1182,15 +1472,6 @@ static int imx274_set_digital_gain(struct stimx274 *priv, u32 dgain)
reg_val & IMX274_MASK_LSB_4_BITS);
}
-static inline void imx274_calculate_gain_regs(struct reg_8 regs[2], u16 gain)
-{
- regs->addr = IMX274_ANALOG_GAIN_ADDR_MSB;
- regs->val = (gain >> IMX274_SHIFT_8_BITS) & IMX274_MASK_LSB_3_BITS;
-
- (regs + 1)->addr = IMX274_ANALOG_GAIN_ADDR_LSB;
- (regs + 1)->val = (gain) & IMX274_MASK_LSB_8_BITS;
-}
-
/*
* imx274_set_gain - Function called when setting gain
* @priv: Pointer to device structure
@@ -1204,10 +1485,8 @@ static inline void imx274_calculate_gain_regs(struct reg_8 regs[2], u16 gain)
*/
static int imx274_set_gain(struct stimx274 *priv, struct v4l2_ctrl *ctrl)
{
- struct reg_8 reg_list[2];
int err;
u32 gain, analog_gain, digital_gain, gain_reg;
- int i;
gain = (u32)(ctrl->val);
@@ -1248,14 +1527,10 @@ static int imx274_set_gain(struct stimx274 *priv, struct v4l2_ctrl *ctrl)
if (gain_reg > IMX274_GAIN_REG_MAX)
gain_reg = IMX274_GAIN_REG_MAX;
- imx274_calculate_gain_regs(reg_list, (u16)gain_reg);
-
- for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
- err = imx274_write_reg(priv, reg_list[i].addr,
- reg_list[i].val);
- if (err)
- goto fail;
- }
+ err = imx274_write_mbreg(priv, IMX274_ANALOG_GAIN_ADDR_LSB, gain_reg,
+ 2);
+ if (err)
+ goto fail;
if (IMX274_GAIN_CONST - gain_reg == 0) {
err = -EINVAL;
@@ -1277,16 +1552,6 @@ fail:
return err;
}
-static inline void imx274_calculate_coarse_time_regs(struct reg_8 regs[2],
- u32 coarse_time)
-{
- regs->addr = IMX274_SHR_REG_MSB;
- regs->val = (coarse_time >> IMX274_SHIFT_8_BITS)
- & IMX274_MASK_LSB_8_BITS;
- (regs + 1)->addr = IMX274_SHR_REG_LSB;
- (regs + 1)->val = (coarse_time) & IMX274_MASK_LSB_8_BITS;
-}
-
/*
* imx274_set_coarse_time - Function called when setting SHR value
* @priv: Pointer to device structure
@@ -1298,10 +1563,8 @@ static inline void imx274_calculate_coarse_time_regs(struct reg_8 regs[2],
*/
static int imx274_set_coarse_time(struct stimx274 *priv, u32 *val)
{
- struct reg_8 reg_list[2];
int err;
u32 coarse_time, frame_length;
- int i;
coarse_time = *val;
@@ -1310,16 +1573,9 @@ static int imx274_set_coarse_time(struct stimx274 *priv, u32 *val)
if (err)
goto fail;
- /* prepare SHR registers */
- imx274_calculate_coarse_time_regs(reg_list, coarse_time);
-
- /* write to SHR registers */
- for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
- err = imx274_write_reg(priv, reg_list[i].addr,
- reg_list[i].val);
- if (err)
- goto fail;
- }
+ err = imx274_write_mbreg(priv, IMX274_SHR_REG_LSB, coarse_time, 2);
+ if (err)
+ goto fail;
*val = frame_length - coarse_time;
return 0;
@@ -1365,7 +1621,7 @@ static int imx274_set_exposure(struct stimx274 *priv, int val)
}
coarse_time = (IMX274_PIXCLK_CONST1 / IMX274_PIXCLK_CONST2 * val
- - nocpiop[priv->mode_index]) / hmax;
+ - priv->mode->nocpiop) / hmax;
/* step 2: convert exposure_time into SHR value */
@@ -1375,7 +1631,7 @@ static int imx274_set_exposure(struct stimx274 *priv, int val)
goto fail;
priv->ctrls.exposure->val =
- (coarse_time * hmax + nocpiop[priv->mode_index])
+ (coarse_time * hmax + priv->mode->nocpiop)
/ (IMX274_PIXCLK_CONST1 / IMX274_PIXCLK_CONST2);
dev_dbg(&priv->client->dev,
@@ -1448,19 +1704,6 @@ static int imx274_set_test_pattern(struct stimx274 *priv, int val)
return err;
}
-static inline void imx274_calculate_frame_length_regs(struct reg_8 regs[3],
- u32 frame_length)
-{
- regs->addr = IMX274_VMAX_REG_1;
- regs->val = (frame_length >> IMX274_SHIFT_16_BITS)
- & IMX274_MASK_LSB_4_BITS;
- (regs + 1)->addr = IMX274_VMAX_REG_2;
- (regs + 1)->val = (frame_length >> IMX274_SHIFT_8_BITS)
- & IMX274_MASK_LSB_8_BITS;
- (regs + 2)->addr = IMX274_VMAX_REG_3;
- (regs + 2)->val = (frame_length) & IMX274_MASK_LSB_8_BITS;
-}
-
/*
* imx274_set_frame_length - Function called when setting frame length
* @priv: Pointer to device structure
@@ -1472,23 +1715,17 @@ static inline void imx274_calculate_frame_length_regs(struct reg_8 regs[3],
*/
static int imx274_set_frame_length(struct stimx274 *priv, u32 val)
{
- struct reg_8 reg_list[3];
int err;
u32 frame_length;
- int i;
dev_dbg(&priv->client->dev, "%s : input length = %d\n",
__func__, val);
frame_length = (u32)val;
- imx274_calculate_frame_length_regs(reg_list, frame_length);
- for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
- err = imx274_write_reg(priv, reg_list[i].addr,
- reg_list[i].val);
- if (err)
- goto fail;
- }
+ err = imx274_write_mbreg(priv, IMX274_VMAX_REG_3, frame_length, 3);
+ if (err)
+ goto fail;
return 0;
@@ -1529,10 +1766,9 @@ static int imx274_set_frame_interval(struct stimx274 *priv,
/ frame_interval.numerator);
/* boundary check */
- if (req_frame_rate > max_frame_rate[priv->mode_index]) {
+ if (req_frame_rate > priv->mode->max_fps) {
frame_interval.numerator = 1;
- frame_interval.denominator =
- max_frame_rate[priv->mode_index];
+ frame_interval.denominator = priv->mode->max_fps;
} else if (req_frame_rate < IMX274_MIN_FRAME_RATE) {
frame_interval.numerator = 1;
frame_interval.denominator = IMX274_MIN_FRAME_RATE;
@@ -1589,6 +1825,8 @@ fail:
static const struct v4l2_subdev_pad_ops imx274_pad_ops = {
.get_fmt = imx274_get_fmt,
.set_fmt = imx274_set_fmt,
+ .get_selection = imx274_get_selection,
+ .set_selection = imx274_set_selection,
};
static const struct v4l2_subdev_video_ops imx274_video_ops = {
@@ -1632,6 +1870,18 @@ static int imx274_probe(struct i2c_client *client,
mutex_init(&imx274->lock);
+ /* initialize format */
+ imx274->mode = &imx274_formats[IMX274_DEFAULT_MODE];
+ imx274->crop.width = IMX274_MAX_WIDTH;
+ imx274->crop.height = IMX274_MAX_HEIGHT;
+ imx274->format.width = imx274->crop.width / imx274->mode->bin_ratio;
+ imx274->format.height = imx274->crop.height / imx274->mode->bin_ratio;
+ imx274->format.field = V4L2_FIELD_NONE;
+ imx274->format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
+ imx274->format.colorspace = V4L2_COLORSPACE_SRGB;
+ imx274->frame_interval.numerator = 1;
+ imx274->frame_interval.denominator = IMX274_DEF_FRAME_RATE;
+
/* initialize regmap */
imx274->regmap = devm_regmap_init_i2c(client, &imx274_regmap_config);
if (IS_ERR(imx274->regmap)) {
@@ -1720,16 +1970,6 @@ static int imx274_probe(struct i2c_client *client,
goto err_ctrls;
}
- /* initialize format */
- imx274->mode_index = IMX274_MODE_3840X2160;
- imx274->format.width = imx274_formats[0].size.width;
- imx274->format.height = imx274_formats[0].size.height;
- imx274->format.field = V4L2_FIELD_NONE;
- imx274->format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
- imx274->format.colorspace = V4L2_COLORSPACE_SRGB;
- imx274->frame_interval.numerator = 1;
- imx274->frame_interval.denominator = IMX274_DEF_FRAME_RATE;
-
/* load default control values */
ret = imx274_load_default(imx274);
if (ret) {
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index b600e03aa94b..49c6644cbba7 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -1,6 +1,6 @@
/*
* drivers/media/i2c/lm3560.c
- * General device driver for TI lm3560, FLASH LED Driver
+ * General device driver for TI lm3559, lm3560, FLASH LED Driver
*
* Copyright (C) 2013 Texas Instruments
*
@@ -465,6 +465,7 @@ static int lm3560_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3560_id_table[] = {
+ {LM3559_NAME, 0},
{LM3560_NAME, 0},
{}
};
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index 6a9e068462fd..b385f2b632ad 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -793,6 +793,7 @@ static int mt9m032_probe(struct i2c_client *client,
v4l2_ctrl_cluster(2, &sensor->hflip);
sensor->subdev.ctrl_handler = &sensor->ctrls;
+ sensor->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&sensor->subdev.entity, 1, &sensor->pad);
if (ret < 0)
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 91d822fc4443..715be3632b01 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -1111,6 +1111,7 @@ static int mt9p031_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&mt9p031->subdev, client, &mt9p031_subdev_ops);
mt9p031->subdev.internal_ops = &mt9p031_subdev_internal_ops;
+ mt9p031->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
mt9p031->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&mt9p031->subdev.entity, 1, &mt9p031->pad);
if (ret < 0)
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index 9d981d9f5686..f683d2cb0486 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -943,6 +943,7 @@ static int mt9t001_probe(struct i2c_client *client,
mt9t001->subdev.internal_ops = &mt9t001_subdev_internal_ops;
mt9t001->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ mt9t001->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
mt9t001->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&mt9t001->subdev.entity, 1, &mt9t001->pad);
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 4de63b2df334..f74730d24d8f 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -1162,6 +1162,7 @@ static int mt9v032_probe(struct i2c_client *client,
mt9v032->subdev.internal_ops = &mt9v032_subdev_internal_ops;
mt9v032->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ mt9v032->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
mt9v032->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&mt9v032->subdev.entity, 1, &mt9v032->pad);
if (ret < 0)
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
new file mode 100644
index 000000000000..b5410aeb5fe2
--- /dev/null
+++ b/drivers/media/i2c/mt9v111.c
@@ -0,0 +1,1298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 sensor driver for Aptina MT9V111 image sensor
+ * Copyright (C) 2018 Jacopo Mondi <jacopo@jmondi.org>
+ *
+ * Based on mt9v032 driver
+ * Copyright (C) 2010, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * Based on mt9v011 driver
+ * Copyright (c) 2009 Mauro Carvalho Chehab <mchehab@kernel.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/module.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-subdev.h>
+
+/*
+ * MT9V111 is a 1/4-Inch CMOS digital image sensor with an integrated
+ * Image Flow Processing (IFP) engine and a sensor core loosely based on
+ * MT9V011.
+ *
+ * The IFP can produce several output image formats from the sensor core
+ * output. This driver currently supports only YUYV format permutations.
+ *
+ * The driver allows manual frame rate control through s_frame_interval subdev
+ * operation or V4L2_CID_V/HBLANK controls, but it is known that the
+ * auto-exposure algorithm might modify the programmed frame rate. While the
+ * driver initially programs the sensor with auto-exposure and
+ * auto-white-balancing enabled, it is possible to disable them and more
+ * precisely control the frame rate.
+ *
+ * While it seems possible to instruct the auto-exposure control algorithm to
+ * respect a programmed frame rate when adjusting the pixel integration time,
+ * registers controlling this feature are not documented in the public
+ * available sensor manual used to develop this driver (09005aef80e90084,
+ * MT9V111_1.fm - Rev. G 1/05 EN).
+ */
+
+#define MT9V111_CHIP_ID_HIGH 0x82
+#define MT9V111_CHIP_ID_LOW 0x3a
+
+#define MT9V111_R01_ADDR_SPACE 0x01
+#define MT9V111_R01_IFP 0x01
+#define MT9V111_R01_CORE 0x04
+
+#define MT9V111_IFP_R06_OPMODE_CTRL 0x06
+#define MT9V111_IFP_R06_OPMODE_CTRL_AWB_EN BIT(1)
+#define MT9V111_IFP_R06_OPMODE_CTRL_AE_EN BIT(14)
+#define MT9V111_IFP_R07_IFP_RESET 0x07
+#define MT9V111_IFP_R07_IFP_RESET_MASK BIT(0)
+#define MT9V111_IFP_R08_OUTFMT_CTRL 0x08
+#define MT9V111_IFP_R08_OUTFMT_CTRL_FLICKER BIT(11)
+#define MT9V111_IFP_R08_OUTFMT_CTRL_PCLK BIT(5)
+#define MT9V111_IFP_R3A_OUTFMT_CTRL2 0x3a
+#define MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_CBCR BIT(0)
+#define MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_YC BIT(1)
+#define MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_MASK GENMASK(2, 0)
+#define MT9V111_IFP_RA5_HPAN 0xa5
+#define MT9V111_IFP_RA6_HZOOM 0xa6
+#define MT9V111_IFP_RA7_HOUT 0xa7
+#define MT9V111_IFP_RA8_VPAN 0xa8
+#define MT9V111_IFP_RA9_VZOOM 0xa9
+#define MT9V111_IFP_RAA_VOUT 0xaa
+#define MT9V111_IFP_DECIMATION_MASK GENMASK(9, 0)
+#define MT9V111_IFP_DECIMATION_FREEZE BIT(15)
+
+#define MT9V111_CORE_R03_WIN_HEIGHT 0x03
+#define MT9V111_CORE_R03_WIN_V_OFFS 2
+#define MT9V111_CORE_R04_WIN_WIDTH 0x04
+#define MT9V111_CORE_R04_WIN_H_OFFS 114
+#define MT9V111_CORE_R05_HBLANK 0x05
+#define MT9V111_CORE_R05_MIN_HBLANK 0x09
+#define MT9V111_CORE_R05_MAX_HBLANK GENMASK(9, 0)
+#define MT9V111_CORE_R05_DEF_HBLANK 0x26
+#define MT9V111_CORE_R06_VBLANK 0x06
+#define MT9V111_CORE_R06_MIN_VBLANK 0x03
+#define MT9V111_CORE_R06_MAX_VBLANK GENMASK(11, 0)
+#define MT9V111_CORE_R06_DEF_VBLANK 0x04
+#define MT9V111_CORE_R07_OUT_CTRL 0x07
+#define MT9V111_CORE_R07_OUT_CTRL_SAMPLE BIT(4)
+#define MT9V111_CORE_R09_PIXEL_INT 0x09
+#define MT9V111_CORE_R09_PIXEL_INT_MASK GENMASK(11, 0)
+#define MT9V111_CORE_R0D_CORE_RESET 0x0d
+#define MT9V111_CORE_R0D_CORE_RESET_MASK BIT(0)
+#define MT9V111_CORE_RFF_CHIP_VER 0xff
+
+#define MT9V111_PIXEL_ARRAY_WIDTH 640
+#define MT9V111_PIXEL_ARRAY_HEIGHT 480
+
+#define MT9V111_MAX_CLKIN 27000000
+
+/* The default sensor configuration at startup time. */
+static struct v4l2_mbus_framefmt mt9v111_def_fmt = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+};
+
+struct mt9v111_dev {
+ struct device *dev;
+ struct i2c_client *client;
+
+ u8 addr_space;
+
+ struct v4l2_subdev sd;
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+ struct media_pad pad;
+#endif
+
+ struct v4l2_ctrl *auto_awb;
+ struct v4l2_ctrl *auto_exp;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl_handler ctrls;
+
+ /* Output image format and sizes. */
+ struct v4l2_mbus_framefmt fmt;
+ unsigned int fps;
+
+ /* Protects power up/down sequences. */
+ struct mutex pwr_mutex;
+ int pwr_count;
+
+ /* Protects stream on/off sequences. */
+ struct mutex stream_mutex;
+ bool streaming;
+
+ /* Flags to mark HW settings as not yet applied. */
+ bool pending;
+
+ /* Clock provider and system clock frequency. */
+ struct clk *clk;
+ u32 sysclk;
+
+ struct gpio_desc *oe;
+ struct gpio_desc *standby;
+ struct gpio_desc *reset;
+};
+
+#define sd_to_mt9v111(__sd) container_of((__sd), struct mt9v111_dev, sd)
+
+/*
+ * mt9v111_mbus_fmt - List all media bus formats supported by the driver.
+ *
+ * Only list the media bus code here. The image sizes are freely configurable
+ * in the pixel array sizes range.
+ *
+ * The desired frame interval, in the supported frame interval range, is
+ * obtained by configuring blanking as the sensor does not have a PLL but
+ * only a fixed clock divider that generates the output pixel clock.
+ */
+static struct mt9v111_mbus_fmt {
+ u32 code;
+} mt9v111_formats[] = {
+ {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ },
+ {
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ },
+ {
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ },
+ {
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ },
+};
+
+static u32 mt9v111_frame_intervals[] = {5, 10, 15, 20, 30};
+
+/*
+ * mt9v111_frame_sizes - List sensor's supported resolutions.
+ *
+ * Resolution generated through decimation in the IFP block from the
+ * full VGA pixel array.
+ */
+static struct v4l2_rect mt9v111_frame_sizes[] = {
+ {
+ .width = 640,
+ .height = 480,
+ },
+ {
+ .width = 352,
+ .height = 288
+ },
+ {
+ .width = 320,
+ .height = 240,
+ },
+ {
+ .width = 176,
+ .height = 144,
+ },
+ {
+ .width = 160,
+ .height = 120,
+ },
+};
+
+/* --- Device I/O access --- */
+
+static int __mt9v111_read(struct i2c_client *c, u8 reg, u16 *val)
+{
+ struct i2c_msg msg[2];
+ __be16 buf;
+ int ret;
+
+ msg[0].addr = c->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &reg;
+
+ msg[1].addr = c->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 2;
+ msg[1].buf = (char *)&buf;
+
+ ret = i2c_transfer(c->adapter, msg, 2);
+ if (ret < 0) {
+ dev_err(&c->dev, "i2c read transfer error: %d\n", ret);
+ return ret;
+ }
+
+ *val = be16_to_cpu(buf);
+
+ dev_dbg(&c->dev, "%s: %x=%x\n", __func__, reg, *val);
+
+ return 0;
+}
+
+static int __mt9v111_write(struct i2c_client *c, u8 reg, u16 val)
+{
+ struct i2c_msg msg;
+ u8 buf[3] = { 0 };
+ int ret;
+
+ buf[0] = reg;
+ buf[1] = val >> 8;
+ buf[2] = val & 0xff;
+
+ msg.addr = c->addr;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = (char *)buf;
+
+ dev_dbg(&c->dev, "%s: %x = %x%x\n", __func__, reg, buf[1], buf[2]);
+
+ ret = i2c_transfer(c->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&c->dev, "i2c write transfer error: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __mt9v111_addr_space_select(struct i2c_client *c, u16 addr_space)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(c);
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+ u16 val;
+ int ret;
+
+ if (mt9v111->addr_space == addr_space)
+ return 0;
+
+ ret = __mt9v111_write(c, MT9V111_R01_ADDR_SPACE, addr_space);
+ if (ret)
+ return ret;
+
+ /* Verify address space has been updated */
+ ret = __mt9v111_read(c, MT9V111_R01_ADDR_SPACE, &val);
+ if (ret)
+ return ret;
+
+ if (val != addr_space)
+ return -EINVAL;
+
+ mt9v111->addr_space = addr_space;
+
+ return 0;
+}
+
+static int mt9v111_read(struct i2c_client *c, u8 addr_space, u8 reg, u16 *val)
+{
+ int ret;
+
+ /* Select register address space first. */
+ ret = __mt9v111_addr_space_select(c, addr_space);
+ if (ret)
+ return ret;
+
+ ret = __mt9v111_read(c, reg, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mt9v111_write(struct i2c_client *c, u8 addr_space, u8 reg, u16 val)
+{
+ int ret;
+
+ /* Select register address space first. */
+ ret = __mt9v111_addr_space_select(c, addr_space);
+ if (ret)
+ return ret;
+
+ ret = __mt9v111_write(c, reg, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mt9v111_update(struct i2c_client *c, u8 addr_space, u8 reg,
+ u16 mask, u16 val)
+{
+ u16 current_val;
+ int ret;
+
+ /* Select register address space first. */
+ ret = __mt9v111_addr_space_select(c, addr_space);
+ if (ret)
+ return ret;
+
+ /* Read the current register value, then update it. */
+ ret = __mt9v111_read(c, reg, &current_val);
+ if (ret)
+ return ret;
+
+ current_val &= ~mask;
+ current_val |= (val & mask);
+ ret = __mt9v111_write(c, reg, current_val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* --- Sensor HW operations --- */
+
+static int __mt9v111_power_on(struct v4l2_subdev *sd)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+ int ret;
+
+ ret = clk_prepare_enable(mt9v111->clk);
+ if (ret)
+ return ret;
+
+ clk_set_rate(mt9v111->clk, mt9v111->sysclk);
+
+ gpiod_set_value(mt9v111->standby, 0);
+ usleep_range(500, 1000);
+
+ gpiod_set_value(mt9v111->oe, 1);
+ usleep_range(500, 1000);
+
+ return 0;
+}
+
+static int __mt9v111_power_off(struct v4l2_subdev *sd)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+
+ gpiod_set_value(mt9v111->oe, 0);
+ usleep_range(500, 1000);
+
+ gpiod_set_value(mt9v111->standby, 1);
+ usleep_range(500, 1000);
+
+ clk_disable_unprepare(mt9v111->clk);
+
+ return 0;
+}
+
+static int __mt9v111_hw_reset(struct mt9v111_dev *mt9v111)
+{
+ if (!mt9v111->reset)
+ return -EINVAL;
+
+ gpiod_set_value(mt9v111->reset, 1);
+ usleep_range(500, 1000);
+
+ gpiod_set_value(mt9v111->reset, 0);
+ usleep_range(500, 1000);
+
+ return 0;
+}
+
+static int __mt9v111_sw_reset(struct mt9v111_dev *mt9v111)
+{
+ struct i2c_client *c = mt9v111->client;
+ int ret;
+
+ /* Software reset core and IFP blocks. */
+
+ ret = mt9v111_update(c, MT9V111_R01_CORE,
+ MT9V111_CORE_R0D_CORE_RESET,
+ MT9V111_CORE_R0D_CORE_RESET_MASK, 1);
+ if (ret)
+ return ret;
+ usleep_range(500, 1000);
+
+ ret = mt9v111_update(c, MT9V111_R01_CORE,
+ MT9V111_CORE_R0D_CORE_RESET,
+ MT9V111_CORE_R0D_CORE_RESET_MASK, 0);
+ if (ret)
+ return ret;
+ usleep_range(500, 1000);
+
+ ret = mt9v111_update(c, MT9V111_R01_IFP,
+ MT9V111_IFP_R07_IFP_RESET,
+ MT9V111_IFP_R07_IFP_RESET_MASK, 1);
+ if (ret)
+ return ret;
+ usleep_range(500, 1000);
+
+ ret = mt9v111_update(c, MT9V111_R01_IFP,
+ MT9V111_IFP_R07_IFP_RESET,
+ MT9V111_IFP_R07_IFP_RESET_MASK, 0);
+ if (ret)
+ return ret;
+ usleep_range(500, 1000);
+
+ return 0;
+}
+
+static int mt9v111_calc_frame_rate(struct mt9v111_dev *mt9v111,
+ struct v4l2_fract *tpf)
+{
+ unsigned int fps = tpf->numerator ?
+ tpf->denominator / tpf->numerator :
+ tpf->denominator;
+ unsigned int best_diff;
+ unsigned int frm_cols;
+ unsigned int row_pclk;
+ unsigned int best_fps;
+ unsigned int pclk;
+ unsigned int diff;
+ unsigned int idx;
+ unsigned int hb;
+ unsigned int vb;
+ unsigned int i;
+ int ret;
+
+ /* Approximate to the closest supported frame interval. */
+ best_diff = ~0L;
+ for (i = 0, idx = 0; i < ARRAY_SIZE(mt9v111_frame_intervals); i++) {
+ diff = abs(fps - mt9v111_frame_intervals[i]);
+ if (diff < best_diff) {
+ idx = i;
+ best_diff = diff;
+ }
+ }
+ fps = mt9v111_frame_intervals[idx];
+
+ /*
+ * The sensor does not provide a PLL circuitry and pixel clock is
+ * generated dividing the master clock source by two.
+ *
+ * Trow = (W + Hblank + 114) * 2 * (1 / SYSCLK)
+ * TFrame = Trow * (H + Vblank + 2)
+ *
+ * FPS = (SYSCLK / 2) / (Trow * (H + Vblank + 2))
+ *
+ * This boils down to tune H and V blanks to best approximate the
+ * above equation.
+ *
+ * Test all available H/V blank values, until we reach the
+ * desired frame rate.
+ */
+ best_fps = vb = hb = 0;
+ pclk = DIV_ROUND_CLOSEST(mt9v111->sysclk, 2);
+ row_pclk = MT9V111_PIXEL_ARRAY_WIDTH + 7 + MT9V111_CORE_R04_WIN_H_OFFS;
+ frm_cols = MT9V111_PIXEL_ARRAY_HEIGHT + 7 + MT9V111_CORE_R03_WIN_V_OFFS;
+
+ best_diff = ~0L;
+ for (vb = MT9V111_CORE_R06_MIN_VBLANK;
+ vb < MT9V111_CORE_R06_MAX_VBLANK; vb++) {
+ for (hb = MT9V111_CORE_R05_MIN_HBLANK;
+ hb < MT9V111_CORE_R05_MAX_HBLANK; hb += 10) {
+ unsigned int t_frame = (row_pclk + hb) *
+ (frm_cols + vb);
+ unsigned int t_fps = DIV_ROUND_CLOSEST(pclk, t_frame);
+
+ diff = abs(fps - t_fps);
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_fps = t_fps;
+
+ if (diff == 0)
+ break;
+ }
+ }
+
+ if (diff == 0)
+ break;
+ }
+
+ ret = v4l2_ctrl_s_ctrl_int64(mt9v111->hblank, hb);
+ if (ret)
+ return ret;
+
+ ret = v4l2_ctrl_s_ctrl_int64(mt9v111->vblank, vb);
+ if (ret)
+ return ret;
+
+ tpf->numerator = 1;
+ tpf->denominator = best_fps;
+
+ return 0;
+}
+
+static int mt9v111_hw_config(struct mt9v111_dev *mt9v111)
+{
+ struct i2c_client *c = mt9v111->client;
+ unsigned int ret;
+ u16 outfmtctrl2;
+
+ /* Force device reset. */
+ ret = __mt9v111_hw_reset(mt9v111);
+ if (ret == -EINVAL)
+ ret = __mt9v111_sw_reset(mt9v111);
+ if (ret)
+ return ret;
+
+ /* Configure internal clock sample rate. */
+ ret = mt9v111->sysclk < DIV_ROUND_CLOSEST(MT9V111_MAX_CLKIN, 2) ?
+ mt9v111_update(c, MT9V111_R01_CORE,
+ MT9V111_CORE_R07_OUT_CTRL,
+ MT9V111_CORE_R07_OUT_CTRL_SAMPLE, 1) :
+ mt9v111_update(c, MT9V111_R01_CORE,
+ MT9V111_CORE_R07_OUT_CTRL,
+ MT9V111_CORE_R07_OUT_CTRL_SAMPLE, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure output image format components ordering.
+ *
+ * TODO: IFP block can also output several RGB permutations, we only
+ * support YUYV permutations at the moment.
+ */
+ switch (mt9v111->fmt.code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ outfmtctrl2 = MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_YC;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ outfmtctrl2 = MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_CBCR;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ outfmtctrl2 = MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_YC |
+ MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_CBCR;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ outfmtctrl2 = 0;
+ break;
+ }
+
+ ret = mt9v111_update(c, MT9V111_R01_IFP, MT9V111_IFP_R3A_OUTFMT_CTRL2,
+ MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_MASK,
+ outfmtctrl2);
+ if (ret)
+ return ret;
+
+ /*
+ * Do not change default sensor's core configuration:
+ * output the whole 640x480 pixel array, skip 18 columns and 6 rows.
+ *
+ * Instead, control the output image size through IFP block.
+ *
+ * TODO: No zoom&pan support. Currently we control the output image
+ * size only through decimation, with no zoom support.
+ */
+ ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA5_HPAN,
+ MT9V111_IFP_DECIMATION_FREEZE);
+ if (ret)
+ return ret;
+
+ ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA8_VPAN,
+ MT9V111_IFP_DECIMATION_FREEZE);
+ if (ret)
+ return ret;
+
+ ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA6_HZOOM,
+ MT9V111_IFP_DECIMATION_FREEZE |
+ MT9V111_PIXEL_ARRAY_WIDTH);
+ if (ret)
+ return ret;
+
+ ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA9_VZOOM,
+ MT9V111_IFP_DECIMATION_FREEZE |
+ MT9V111_PIXEL_ARRAY_HEIGHT);
+ if (ret)
+ return ret;
+
+ ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA7_HOUT,
+ MT9V111_IFP_DECIMATION_FREEZE |
+ mt9v111->fmt.width);
+ if (ret)
+ return ret;
+
+ ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RAA_VOUT,
+ mt9v111->fmt.height);
+ if (ret)
+ return ret;
+
+ /* Apply controls to set auto exp, auto awb and timings */
+ ret = v4l2_ctrl_handler_setup(&mt9v111->ctrls);
+ if (ret)
+ return ret;
+
+ /*
+ * Set pixel integration time to the whole frame time.
+ * This value controls the the shutter delay when running with AE
+ * disabled. If longer than frame time, it affects the output
+ * frame rate.
+ */
+ return mt9v111_write(c, MT9V111_R01_CORE, MT9V111_CORE_R09_PIXEL_INT,
+ MT9V111_PIXEL_ARRAY_HEIGHT);
+}
+
+/* --- V4L2 subdev operations --- */
+
+static int mt9v111_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+ int pwr_count;
+ int ret = 0;
+
+ mutex_lock(&mt9v111->pwr_mutex);
+
+ /*
+ * Make sure we're transitioning from 0 to 1, or viceversa,
+ * before actually changing the power state.
+ */
+ pwr_count = mt9v111->pwr_count;
+ pwr_count += on ? 1 : -1;
+ if (pwr_count == !!on) {
+ ret = on ? __mt9v111_power_on(sd) :
+ __mt9v111_power_off(sd);
+ if (!ret)
+ /* All went well, updated power counter. */
+ mt9v111->pwr_count = pwr_count;
+
+ mutex_unlock(&mt9v111->pwr_mutex);
+
+ return ret;
+ }
+
+ /*
+ * Update power counter to keep track of how many nested calls we
+ * received.
+ */
+ WARN_ON(pwr_count < 0 || pwr_count > 1);
+ mt9v111->pwr_count = pwr_count;
+
+ mutex_unlock(&mt9v111->pwr_mutex);
+
+ return ret;
+}
+
+static int mt9v111_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(subdev);
+ int ret;
+
+ mutex_lock(&mt9v111->stream_mutex);
+
+ if (mt9v111->streaming == enable) {
+ mutex_unlock(&mt9v111->stream_mutex);
+ return 0;
+ }
+
+ ret = mt9v111_s_power(subdev, enable);
+ if (ret)
+ goto error_unlock;
+
+ if (enable && mt9v111->pending) {
+ ret = mt9v111_hw_config(mt9v111);
+ if (ret)
+ goto error_unlock;
+
+ /*
+ * No need to update control here as far as only H/VBLANK are
+ * supported and immediately programmed to registers in .s_ctrl
+ */
+
+ mt9v111->pending = false;
+ }
+
+ mt9v111->streaming = enable ? true : false;
+ mutex_unlock(&mt9v111->stream_mutex);
+
+ return 0;
+
+error_unlock:
+ mutex_unlock(&mt9v111->stream_mutex);
+
+ return ret;
+}
+
+static int mt9v111_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *ival)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+ struct v4l2_fract *tpf = &ival->interval;
+ unsigned int fps = tpf->numerator ?
+ tpf->denominator / tpf->numerator :
+ tpf->denominator;
+ unsigned int max_fps;
+
+ if (!tpf->numerator)
+ tpf->numerator = 1;
+
+ mutex_lock(&mt9v111->stream_mutex);
+
+ if (mt9v111->streaming) {
+ mutex_unlock(&mt9v111->stream_mutex);
+ return -EBUSY;
+ }
+
+ if (mt9v111->fps == fps) {
+ mutex_unlock(&mt9v111->stream_mutex);
+ return 0;
+ }
+
+ /* Make sure frame rate/image sizes constraints are respected. */
+ if (mt9v111->fmt.width < QVGA_WIDTH &&
+ mt9v111->fmt.height < QVGA_HEIGHT)
+ max_fps = 90;
+ else if (mt9v111->fmt.width < CIF_WIDTH &&
+ mt9v111->fmt.height < CIF_HEIGHT)
+ max_fps = 60;
+ else
+ max_fps = mt9v111->sysclk <
+ DIV_ROUND_CLOSEST(MT9V111_MAX_CLKIN, 2) ? 15 :
+ 30;
+
+ if (fps > max_fps) {
+ mutex_unlock(&mt9v111->stream_mutex);
+ return -EINVAL;
+ }
+
+ mt9v111_calc_frame_rate(mt9v111, tpf);
+
+ mt9v111->fps = fps;
+ mt9v111->pending = true;
+
+ mutex_unlock(&mt9v111->stream_mutex);
+
+ return 0;
+}
+
+static int mt9v111_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *ival)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+ struct v4l2_fract *tpf = &ival->interval;
+
+ mutex_lock(&mt9v111->stream_mutex);
+
+ tpf->numerator = 1;
+ tpf->denominator = mt9v111->fps;
+
+ mutex_unlock(&mt9v111->stream_mutex);
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *__mt9v111_get_pad_format(
+ struct mt9v111_dev *mt9v111,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+#if IS_ENABLED(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ return v4l2_subdev_get_try_format(&mt9v111->sd, cfg, pad);
+#else
+ return &cfg->try_fmt;
+#endif
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &mt9v111->fmt;
+ default:
+ return NULL;
+ }
+}
+
+static int mt9v111_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad || code->index > ARRAY_SIZE(mt9v111_formats) - 1)
+ return -EINVAL;
+
+ code->code = mt9v111_formats[code->index].code;
+
+ return 0;
+}
+
+static int mt9v111_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ unsigned int i;
+
+ if (fie->pad || fie->index >= ARRAY_SIZE(mt9v111_frame_intervals))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mt9v111_frame_sizes); i++)
+ if (fie->width == mt9v111_frame_sizes[i].width &&
+ fie->height == mt9v111_frame_sizes[i].height)
+ break;
+
+ if (i == ARRAY_SIZE(mt9v111_frame_sizes))
+ return -EINVAL;
+
+ fie->interval.numerator = 1;
+ fie->interval.denominator = mt9v111_frame_intervals[fie->index];
+
+ return 0;
+}
+
+static int mt9v111_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->pad || fse->index >= ARRAY_SIZE(mt9v111_frame_sizes))
+ return -EINVAL;
+
+ fse->min_width = mt9v111_frame_sizes[fse->index].width;
+ fse->max_width = mt9v111_frame_sizes[fse->index].width;
+ fse->min_height = mt9v111_frame_sizes[fse->index].height;
+ fse->max_height = mt9v111_frame_sizes[fse->index].height;
+
+ return 0;
+}
+
+static int mt9v111_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(subdev);
+
+ if (format->pad)
+ return -EINVAL;
+
+ mutex_lock(&mt9v111->stream_mutex);
+ format->format = *__mt9v111_get_pad_format(mt9v111, cfg, format->pad,
+ format->which);
+ mutex_unlock(&mt9v111->stream_mutex);
+
+ return 0;
+}
+
+static int mt9v111_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(subdev);
+ struct v4l2_mbus_framefmt new_fmt;
+ struct v4l2_mbus_framefmt *__fmt;
+ unsigned int best_fit = ~0L;
+ unsigned int idx = 0;
+ unsigned int i;
+
+ mutex_lock(&mt9v111->stream_mutex);
+ if (mt9v111->streaming) {
+ mutex_unlock(&mt9v111->stream_mutex);
+ return -EBUSY;
+ }
+
+ if (format->pad) {
+ mutex_unlock(&mt9v111->stream_mutex);
+ return -EINVAL;
+ }
+
+ /* Update mbus format code and sizes. */
+ for (i = 0; i < ARRAY_SIZE(mt9v111_formats); i++) {
+ if (format->format.code == mt9v111_formats[i].code) {
+ new_fmt.code = mt9v111_formats[i].code;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(mt9v111_formats))
+ new_fmt.code = mt9v111_formats[0].code;
+
+ for (i = 0; i < ARRAY_SIZE(mt9v111_frame_sizes); i++) {
+ unsigned int fit = abs(mt9v111_frame_sizes[i].width -
+ format->format.width) +
+ abs(mt9v111_frame_sizes[i].height -
+ format->format.height);
+ if (fit < best_fit) {
+ best_fit = fit;
+ idx = i;
+
+ if (fit == 0)
+ break;
+ }
+ }
+ new_fmt.width = mt9v111_frame_sizes[idx].width;
+ new_fmt.height = mt9v111_frame_sizes[idx].height;
+
+ /* Update the device (or pad) format if it has changed. */
+ __fmt = __mt9v111_get_pad_format(mt9v111, cfg, format->pad,
+ format->which);
+
+ /* Format hasn't changed, stop here. */
+ if (__fmt->code == new_fmt.code &&
+ __fmt->width == new_fmt.width &&
+ __fmt->height == new_fmt.height)
+ goto done;
+
+ /* Update the format and sizes, then mark changes as pending. */
+ __fmt->code = new_fmt.code;
+ __fmt->width = new_fmt.width;
+ __fmt->height = new_fmt.height;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ mt9v111->pending = true;
+
+ dev_dbg(mt9v111->dev, "%s: mbus_code: %x - (%ux%u)\n",
+ __func__, __fmt->code, __fmt->width, __fmt->height);
+
+done:
+ format->format = *__fmt;
+
+ mutex_unlock(&mt9v111->stream_mutex);
+
+ return 0;
+}
+
+static int mt9v111_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ cfg->try_fmt = mt9v111_def_fmt;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops mt9v111_core_ops = {
+ .s_power = mt9v111_s_power,
+};
+
+static const struct v4l2_subdev_video_ops mt9v111_video_ops = {
+ .s_stream = mt9v111_s_stream,
+ .s_frame_interval = mt9v111_s_frame_interval,
+ .g_frame_interval = mt9v111_g_frame_interval,
+};
+
+static const struct v4l2_subdev_pad_ops mt9v111_pad_ops = {
+ .init_cfg = mt9v111_init_cfg,
+ .enum_mbus_code = mt9v111_enum_mbus_code,
+ .enum_frame_size = mt9v111_enum_frame_size,
+ .enum_frame_interval = mt9v111_enum_frame_interval,
+ .get_fmt = mt9v111_get_format,
+ .set_fmt = mt9v111_set_format,
+};
+
+static const struct v4l2_subdev_ops mt9v111_ops = {
+ .core = &mt9v111_core_ops,
+ .video = &mt9v111_video_ops,
+ .pad = &mt9v111_pad_ops,
+};
+
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+static const struct media_entity_operations mt9v111_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+#endif
+
+/* --- V4L2 ctrl --- */
+static int mt9v111_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9v111_dev *mt9v111 = container_of(ctrl->handler,
+ struct mt9v111_dev,
+ ctrls);
+ int ret;
+
+ mutex_lock(&mt9v111->pwr_mutex);
+ /*
+ * If sensor is powered down, just cache new control values,
+ * no actual register access.
+ */
+ if (!mt9v111->pwr_count) {
+ mt9v111->pending = true;
+ mutex_unlock(&mt9v111->pwr_mutex);
+ return 0;
+ }
+ mutex_unlock(&mt9v111->pwr_mutex);
+
+ /*
+ * Flickering control gets disabled if both auto exp and auto awb
+ * are disabled too. If any of the two is enabled, enable it.
+ *
+ * Disabling flickering when ae and awb are off allows a more precise
+ * control of the programmed frame rate.
+ */
+ if (mt9v111->auto_exp->is_new || mt9v111->auto_awb->is_new) {
+ if (mt9v111->auto_exp->val == V4L2_EXPOSURE_MANUAL &&
+ mt9v111->auto_awb->val == V4L2_WHITE_BALANCE_MANUAL)
+ ret = mt9v111_update(mt9v111->client, MT9V111_R01_IFP,
+ MT9V111_IFP_R08_OUTFMT_CTRL,
+ MT9V111_IFP_R08_OUTFMT_CTRL_FLICKER,
+ 0);
+ else
+ ret = mt9v111_update(mt9v111->client, MT9V111_R01_IFP,
+ MT9V111_IFP_R08_OUTFMT_CTRL,
+ MT9V111_IFP_R08_OUTFMT_CTRL_FLICKER,
+ 1);
+ if (ret)
+ return ret;
+ }
+
+ ret = -EINVAL;
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = mt9v111_update(mt9v111->client, MT9V111_R01_IFP,
+ MT9V111_IFP_R06_OPMODE_CTRL,
+ MT9V111_IFP_R06_OPMODE_CTRL_AWB_EN,
+ ctrl->val == V4L2_WHITE_BALANCE_AUTO ?
+ MT9V111_IFP_R06_OPMODE_CTRL_AWB_EN : 0);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = mt9v111_update(mt9v111->client, MT9V111_R01_IFP,
+ MT9V111_IFP_R06_OPMODE_CTRL,
+ MT9V111_IFP_R06_OPMODE_CTRL_AE_EN,
+ ctrl->val == V4L2_EXPOSURE_AUTO ?
+ MT9V111_IFP_R06_OPMODE_CTRL_AE_EN : 0);
+ break;
+ case V4L2_CID_HBLANK:
+ ret = mt9v111_update(mt9v111->client, MT9V111_R01_CORE,
+ MT9V111_CORE_R05_HBLANK,
+ MT9V111_CORE_R05_MAX_HBLANK,
+ mt9v111->hblank->val);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = mt9v111_update(mt9v111->client, MT9V111_R01_CORE,
+ MT9V111_CORE_R06_VBLANK,
+ MT9V111_CORE_R06_MAX_VBLANK,
+ mt9v111->vblank->val);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mt9v111_ctrl_ops = {
+ .s_ctrl = mt9v111_s_ctrl,
+};
+
+static int mt9v111_chip_probe(struct mt9v111_dev *mt9v111)
+{
+ int ret;
+ u16 val;
+
+ ret = __mt9v111_power_on(&mt9v111->sd);
+ if (ret)
+ return ret;
+
+ ret = mt9v111_read(mt9v111->client, MT9V111_R01_CORE,
+ MT9V111_CORE_RFF_CHIP_VER, &val);
+ if (ret)
+ goto power_off;
+
+ if ((val >> 8) != MT9V111_CHIP_ID_HIGH &&
+ (val & 0xff) != MT9V111_CHIP_ID_LOW) {
+ dev_err(mt9v111->dev,
+ "Unable to identify MT9V111 chip: 0x%2x%2x\n",
+ val >> 8, val & 0xff);
+ ret = -EIO;
+ goto power_off;
+ }
+
+ dev_dbg(mt9v111->dev, "Chip identified: 0x%2x%2x\n",
+ val >> 8, val & 0xff);
+
+power_off:
+ __mt9v111_power_off(&mt9v111->sd);
+
+ return ret;
+}
+
+static int mt9v111_probe(struct i2c_client *client)
+{
+ struct mt9v111_dev *mt9v111;
+ struct v4l2_fract tpf;
+ int ret;
+
+ mt9v111 = devm_kzalloc(&client->dev, sizeof(*mt9v111), GFP_KERNEL);
+ if (!mt9v111)
+ return -ENOMEM;
+
+ mt9v111->dev = &client->dev;
+ mt9v111->client = client;
+
+ mt9v111->clk = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(mt9v111->clk))
+ return PTR_ERR(mt9v111->clk);
+
+ mt9v111->sysclk = clk_get_rate(mt9v111->clk);
+ if (mt9v111->sysclk > MT9V111_MAX_CLKIN)
+ return -EINVAL;
+
+ mt9v111->oe = devm_gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(mt9v111->oe)) {
+ dev_err(&client->dev, "Unable to get GPIO \"enable\": %ld\n",
+ PTR_ERR(mt9v111->oe));
+ return PTR_ERR(mt9v111->oe);
+ }
+
+ mt9v111->standby = devm_gpiod_get_optional(&client->dev, "standby",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(mt9v111->standby)) {
+ dev_err(&client->dev, "Unable to get GPIO \"standby\": %ld\n",
+ PTR_ERR(mt9v111->standby));
+ return PTR_ERR(mt9v111->standby);
+ }
+
+ mt9v111->reset = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(mt9v111->reset)) {
+ dev_err(&client->dev, "Unable to get GPIO \"reset\": %ld\n",
+ PTR_ERR(mt9v111->reset));
+ return PTR_ERR(mt9v111->reset);
+ }
+
+ mutex_init(&mt9v111->pwr_mutex);
+ mutex_init(&mt9v111->stream_mutex);
+
+ v4l2_ctrl_handler_init(&mt9v111->ctrls, 5);
+
+ mt9v111->auto_awb = v4l2_ctrl_new_std(&mt9v111->ctrls,
+ &mt9v111_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1,
+ V4L2_WHITE_BALANCE_AUTO);
+ if (IS_ERR_OR_NULL(mt9v111->auto_awb)) {
+ ret = PTR_ERR(mt9v111->auto_awb);
+ goto error_free_ctrls;
+ }
+
+ mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls,
+ &mt9v111_ctrl_ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL,
+ 0, V4L2_EXPOSURE_AUTO);
+ if (IS_ERR_OR_NULL(mt9v111->auto_exp)) {
+ ret = PTR_ERR(mt9v111->auto_exp);
+ goto error_free_ctrls;
+ }
+
+ /* Initialize timings */
+ mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
+ V4L2_CID_HBLANK,
+ MT9V111_CORE_R05_MIN_HBLANK,
+ MT9V111_CORE_R05_MAX_HBLANK, 1,
+ MT9V111_CORE_R05_DEF_HBLANK);
+ if (IS_ERR_OR_NULL(mt9v111->hblank)) {
+ ret = PTR_ERR(mt9v111->hblank);
+ goto error_free_ctrls;
+ }
+
+ mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
+ V4L2_CID_VBLANK,
+ MT9V111_CORE_R06_MIN_VBLANK,
+ MT9V111_CORE_R06_MAX_VBLANK, 1,
+ MT9V111_CORE_R06_DEF_VBLANK);
+ if (IS_ERR_OR_NULL(mt9v111->vblank)) {
+ ret = PTR_ERR(mt9v111->vblank);
+ goto error_free_ctrls;
+ }
+
+ /* PIXEL_RATE is fixed: just expose it to user space. */
+ v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1,
+ DIV_ROUND_CLOSEST(mt9v111->sysclk, 2));
+
+ mt9v111->sd.ctrl_handler = &mt9v111->ctrls;
+
+ /* Start with default configuration: 640x480 UYVY. */
+ mt9v111->fmt = mt9v111_def_fmt;
+
+ /* Re-calculate blankings for 640x480@15fps. */
+ mt9v111->fps = 15;
+ tpf.numerator = 1;
+ tpf.denominator = mt9v111->fps;
+ mt9v111_calc_frame_rate(mt9v111, &tpf);
+
+ mt9v111->pwr_count = 0;
+ mt9v111->addr_space = MT9V111_R01_IFP;
+ mt9v111->pending = true;
+
+ v4l2_i2c_subdev_init(&mt9v111->sd, client, &mt9v111_ops);
+
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+ mt9v111->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ mt9v111->sd.entity.ops = &mt9v111_subdev_entity_ops;
+ mt9v111->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
+ if (ret)
+ goto error_free_ctrls;
+#endif
+
+ ret = mt9v111_chip_probe(mt9v111);
+ if (ret)
+ goto error_free_ctrls;
+
+ ret = v4l2_async_register_subdev(&mt9v111->sd);
+ if (ret)
+ goto error_free_ctrls;
+
+ return 0;
+
+error_free_ctrls:
+ v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&mt9v111->sd.entity);
+#endif
+
+ mutex_destroy(&mt9v111->pwr_mutex);
+ mutex_destroy(&mt9v111->stream_mutex);
+
+ return ret;
+}
+
+static int mt9v111_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+
+ v4l2_async_unregister_subdev(sd);
+
+ v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&sd->entity);
+#endif
+
+ mutex_destroy(&mt9v111->pwr_mutex);
+ mutex_destroy(&mt9v111->stream_mutex);
+
+ devm_gpiod_put(mt9v111->dev, mt9v111->oe);
+ devm_gpiod_put(mt9v111->dev, mt9v111->standby);
+ devm_gpiod_put(mt9v111->dev, mt9v111->reset);
+
+ devm_clk_put(mt9v111->dev, mt9v111->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mt9v111_of_match[] = {
+ { .compatible = "aptina,mt9v111", },
+ { /* sentinel */ },
+};
+
+static struct i2c_driver mt9v111_driver = {
+ .driver = {
+ .name = "mt9v111",
+ .of_match_table = mt9v111_of_match,
+ },
+ .probe_new = mt9v111_probe,
+ .remove = mt9v111_remove,
+};
+
+module_i2c_driver(mt9v111_driver);
+
+MODULE_DESCRIPTION("V4L2 sensor driver for Aptina MT9V111");
+MODULE_AUTHOR("Jacopo Mondi <jacopo@jmondi.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
new file mode 100644
index 000000000000..f753a1c333ef
--- /dev/null
+++ b/drivers/media/i2c/ov2680.c
@@ -0,0 +1,1186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Omnivision OV2680 CMOS Image Sensor driver
+ *
+ * Copyright (C) 2018 Linaro Ltd
+ *
+ * Based on OV5640 Sensor Driver
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2014-2017 Mentor Graphics Inc.
+ *
+ */
+
+#include <asm/unaligned.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#define OV2680_XVCLK_VALUE 24000000
+
+#define OV2680_CHIP_ID 0x2680
+
+#define OV2680_REG_STREAM_CTRL 0x0100
+#define OV2680_REG_SOFT_RESET 0x0103
+
+#define OV2680_REG_CHIP_ID_HIGH 0x300a
+#define OV2680_REG_CHIP_ID_LOW 0x300b
+
+#define OV2680_REG_R_MANUAL 0x3503
+#define OV2680_REG_GAIN_PK 0x350a
+#define OV2680_REG_EXPOSURE_PK_HIGH 0x3500
+#define OV2680_REG_TIMING_HTS 0x380c
+#define OV2680_REG_TIMING_VTS 0x380e
+#define OV2680_REG_FORMAT1 0x3820
+#define OV2680_REG_FORMAT2 0x3821
+
+#define OV2680_REG_ISP_CTRL00 0x5080
+
+#define OV2680_FRAME_RATE 30
+
+#define OV2680_REG_VALUE_8BIT 1
+#define OV2680_REG_VALUE_16BIT 2
+#define OV2680_REG_VALUE_24BIT 3
+
+#define OV2680_WIDTH_MAX 1600
+#define OV2680_HEIGHT_MAX 1200
+
+enum ov2680_mode_id {
+ OV2680_MODE_QUXGA_800_600,
+ OV2680_MODE_720P_1280_720,
+ OV2680_MODE_UXGA_1600_1200,
+ OV2680_MODE_MAX,
+};
+
+struct reg_value {
+ u16 reg_addr;
+ u8 val;
+};
+
+static const char * const ov2680_supply_name[] = {
+ "DOVDD",
+ "DVDD",
+ "AVDD",
+};
+
+#define OV2680_NUM_SUPPLIES ARRAY_SIZE(ov2680_supply_name)
+
+struct ov2680_mode_info {
+ const char *name;
+ enum ov2680_mode_id id;
+ u32 width;
+ u32 height;
+ const struct reg_value *reg_data;
+ u32 reg_data_size;
+};
+
+struct ov2680_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct {
+ struct v4l2_ctrl *auto_exp;
+ struct v4l2_ctrl *exposure;
+ };
+ struct {
+ struct v4l2_ctrl *auto_gain;
+ struct v4l2_ctrl *gain;
+ };
+
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *test_pattern;
+};
+
+struct ov2680_dev {
+ struct i2c_client *i2c_client;
+ struct v4l2_subdev sd;
+
+ struct media_pad pad;
+ struct clk *xvclk;
+ u32 xvclk_freq;
+ struct regulator_bulk_data supplies[OV2680_NUM_SUPPLIES];
+
+ struct gpio_desc *reset_gpio;
+ struct mutex lock; /* protect members */
+
+ bool mode_pending_changes;
+ bool is_enabled;
+ bool is_streaming;
+
+ struct ov2680_ctrls ctrls;
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_fract frame_interval;
+
+ const struct ov2680_mode_info *current_mode;
+};
+
+static const char * const test_pattern_menu[] = {
+ "Disabled",
+ "Color Bars",
+ "Random Data",
+ "Square",
+ "Black Image",
+};
+
+static const int ov2680_hv_flip_bayer_order[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+};
+
+static const struct reg_value ov2680_setting_30fps_QUXGA_800_600[] = {
+ {0x3086, 0x01}, {0x370a, 0x23}, {0x3808, 0x03}, {0x3809, 0x20},
+ {0x380a, 0x02}, {0x380b, 0x58}, {0x380c, 0x06}, {0x380d, 0xac},
+ {0x380e, 0x02}, {0x380f, 0x84}, {0x3811, 0x04}, {0x3813, 0x04},
+ {0x3814, 0x31}, {0x3815, 0x31}, {0x3820, 0xc0}, {0x4008, 0x00},
+ {0x4009, 0x03}, {0x4837, 0x1e}, {0x3501, 0x4e}, {0x3502, 0xe0},
+};
+
+static const struct reg_value ov2680_setting_30fps_720P_1280_720[] = {
+ {0x3086, 0x00}, {0x3808, 0x05}, {0x3809, 0x00}, {0x380a, 0x02},
+ {0x380b, 0xd0}, {0x380c, 0x06}, {0x380d, 0xa8}, {0x380e, 0x05},
+ {0x380f, 0x0e}, {0x3811, 0x08}, {0x3813, 0x06}, {0x3814, 0x11},
+ {0x3815, 0x11}, {0x3820, 0xc0}, {0x4008, 0x00},
+};
+
+static const struct reg_value ov2680_setting_30fps_UXGA_1600_1200[] = {
+ {0x3086, 0x00}, {0x3501, 0x4e}, {0x3502, 0xe0}, {0x3808, 0x06},
+ {0x3809, 0x40}, {0x380a, 0x04}, {0x380b, 0xb0}, {0x380c, 0x06},
+ {0x380d, 0xa8}, {0x380e, 0x05}, {0x380f, 0x0e}, {0x3811, 0x00},
+ {0x3813, 0x00}, {0x3814, 0x11}, {0x3815, 0x11}, {0x3820, 0xc0},
+ {0x4008, 0x00}, {0x4837, 0x18}
+};
+
+static const struct ov2680_mode_info ov2680_mode_init_data = {
+ "mode_quxga_800_600", OV2680_MODE_QUXGA_800_600, 800, 600,
+ ov2680_setting_30fps_QUXGA_800_600,
+ ARRAY_SIZE(ov2680_setting_30fps_QUXGA_800_600),
+};
+
+static const struct ov2680_mode_info ov2680_mode_data[OV2680_MODE_MAX] = {
+ {"mode_quxga_800_600", OV2680_MODE_QUXGA_800_600,
+ 800, 600, ov2680_setting_30fps_QUXGA_800_600,
+ ARRAY_SIZE(ov2680_setting_30fps_QUXGA_800_600)},
+ {"mode_720p_1280_720", OV2680_MODE_720P_1280_720,
+ 1280, 720, ov2680_setting_30fps_720P_1280_720,
+ ARRAY_SIZE(ov2680_setting_30fps_720P_1280_720)},
+ {"mode_uxga_1600_1200", OV2680_MODE_UXGA_1600_1200,
+ 1600, 1200, ov2680_setting_30fps_UXGA_1600_1200,
+ ARRAY_SIZE(ov2680_setting_30fps_UXGA_1600_1200)},
+};
+
+static struct ov2680_dev *to_ov2680_dev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ov2680_dev, sd);
+}
+
+static struct device *ov2680_to_dev(struct ov2680_dev *sensor)
+{
+ return &sensor->i2c_client->dev;
+}
+
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct ov2680_dev,
+ ctrls.handler)->sd;
+}
+
+static int __ov2680_write_reg(struct ov2680_dev *sensor, u16 reg,
+ unsigned int len, u32 val)
+{
+ struct i2c_client *client = sensor->i2c_client;
+ u8 buf[6];
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
+ ret = i2c_master_send(client, buf, len + 2);
+ if (ret != len + 2) {
+ dev_err(&client->dev, "write error: reg=0x%4x: %d\n", reg, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+#define ov2680_write_reg(s, r, v) \
+ __ov2680_write_reg(s, r, OV2680_REG_VALUE_8BIT, v)
+
+#define ov2680_write_reg16(s, r, v) \
+ __ov2680_write_reg(s, r, OV2680_REG_VALUE_16BIT, v)
+
+#define ov2680_write_reg24(s, r, v) \
+ __ov2680_write_reg(s, r, OV2680_REG_VALUE_24BIT, v)
+
+static int __ov2680_read_reg(struct ov2680_dev *sensor, u16 reg,
+ unsigned int len, u32 *val)
+{
+ struct i2c_client *client = sensor->i2c_client;
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2] = { reg >> 8, reg & 0xff };
+ u8 data_buf[4] = { 0, };
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "read error: reg=0x%4x: %d\n", reg, ret);
+ return -EIO;
+ }
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+#define ov2680_read_reg(s, r, v) \
+ __ov2680_read_reg(s, r, OV2680_REG_VALUE_8BIT, v)
+
+#define ov2680_read_reg16(s, r, v) \
+ __ov2680_read_reg(s, r, OV2680_REG_VALUE_16BIT, v)
+
+#define ov2680_read_reg24(s, r, v) \
+ __ov2680_read_reg(s, r, OV2680_REG_VALUE_24BIT, v)
+
+static int ov2680_mod_reg(struct ov2680_dev *sensor, u16 reg, u8 mask, u8 val)
+{
+ u32 readval;
+ int ret;
+
+ ret = ov2680_read_reg(sensor, reg, &readval);
+ if (ret < 0)
+ return ret;
+
+ readval &= ~mask;
+ val &= mask;
+ val |= readval;
+
+ return ov2680_write_reg(sensor, reg, val);
+}
+
+static int ov2680_load_regs(struct ov2680_dev *sensor,
+ const struct ov2680_mode_info *mode)
+{
+ const struct reg_value *regs = mode->reg_data;
+ unsigned int i;
+ int ret = 0;
+ u16 reg_addr;
+ u8 val;
+
+ for (i = 0; i < mode->reg_data_size; ++i, ++regs) {
+ reg_addr = regs->reg_addr;
+ val = regs->val;
+
+ ret = ov2680_write_reg(sensor, reg_addr, val);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void ov2680_power_up(struct ov2680_dev *sensor)
+{
+ if (!sensor->reset_gpio)
+ return;
+
+ gpiod_set_value(sensor->reset_gpio, 0);
+ usleep_range(5000, 10000);
+}
+
+static void ov2680_power_down(struct ov2680_dev *sensor)
+{
+ if (!sensor->reset_gpio)
+ return;
+
+ gpiod_set_value(sensor->reset_gpio, 1);
+ usleep_range(5000, 10000);
+}
+
+static int ov2680_bayer_order(struct ov2680_dev *sensor)
+{
+ u32 format1;
+ u32 format2;
+ u32 hv_flip;
+ int ret;
+
+ ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT1, &format1);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT2, &format2);
+ if (ret < 0)
+ return ret;
+
+ hv_flip = (format2 & BIT(2) << 1) | (format1 & BIT(2));
+
+ sensor->fmt.code = ov2680_hv_flip_bayer_order[hv_flip];
+
+ return 0;
+}
+
+static int ov2680_vflip_enable(struct ov2680_dev *sensor)
+{
+ int ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(2));
+ if (ret < 0)
+ return ret;
+
+ return ov2680_bayer_order(sensor);
+}
+
+static int ov2680_vflip_disable(struct ov2680_dev *sensor)
+{
+ int ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(0));
+ if (ret < 0)
+ return ret;
+
+ return ov2680_bayer_order(sensor);
+}
+
+static int ov2680_hflip_enable(struct ov2680_dev *sensor)
+{
+ int ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(2));
+ if (ret < 0)
+ return ret;
+
+ return ov2680_bayer_order(sensor);
+}
+
+static int ov2680_hflip_disable(struct ov2680_dev *sensor)
+{
+ int ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(0));
+ if (ret < 0)
+ return ret;
+
+ return ov2680_bayer_order(sensor);
+}
+
+static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value)
+{
+ int ret;
+
+ if (!value)
+ return ov2680_mod_reg(sensor, OV2680_REG_ISP_CTRL00, BIT(7), 0);
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_ISP_CTRL00, 0x03, value - 1);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_ISP_CTRL00, BIT(7), BIT(7));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ov2680_gain_set(struct ov2680_dev *sensor, bool auto_gain)
+{
+ struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ u32 gain;
+ int ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(1),
+ auto_gain ? 0 : BIT(1));
+ if (ret < 0)
+ return ret;
+
+ if (auto_gain || !ctrls->gain->is_new)
+ return 0;
+
+ gain = ctrls->gain->val;
+
+ ret = ov2680_write_reg16(sensor, OV2680_REG_GAIN_PK, gain);
+
+ return 0;
+}
+
+static int ov2680_gain_get(struct ov2680_dev *sensor)
+{
+ u32 gain;
+ int ret;
+
+ ret = ov2680_read_reg16(sensor, OV2680_REG_GAIN_PK, &gain);
+ if (ret)
+ return ret;
+
+ return gain;
+}
+
+static int ov2680_exposure_set(struct ov2680_dev *sensor, bool auto_exp)
+{
+ struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ u32 exp;
+ int ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(0),
+ auto_exp ? 0 : BIT(0));
+ if (ret < 0)
+ return ret;
+
+ if (auto_exp || !ctrls->exposure->is_new)
+ return 0;
+
+ exp = (u32)ctrls->exposure->val;
+ exp <<= 4;
+
+ return ov2680_write_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, exp);
+}
+
+static int ov2680_exposure_get(struct ov2680_dev *sensor)
+{
+ int ret;
+ u32 exp;
+
+ ret = ov2680_read_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, &exp);
+ if (ret)
+ return ret;
+
+ return exp >> 4;
+}
+
+static int ov2680_stream_enable(struct ov2680_dev *sensor)
+{
+ return ov2680_write_reg(sensor, OV2680_REG_STREAM_CTRL, 1);
+}
+
+static int ov2680_stream_disable(struct ov2680_dev *sensor)
+{
+ return ov2680_write_reg(sensor, OV2680_REG_STREAM_CTRL, 0);
+}
+
+static int ov2680_mode_set(struct ov2680_dev *sensor)
+{
+ struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ int ret;
+
+ ret = ov2680_gain_set(sensor, false);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2680_exposure_set(sensor, false);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2680_load_regs(sensor, sensor->current_mode);
+ if (ret < 0)
+ return ret;
+
+ if (ctrls->auto_gain->val) {
+ ret = ov2680_gain_set(sensor, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ctrls->auto_exp->val == V4L2_EXPOSURE_AUTO) {
+ ret = ov2680_exposure_set(sensor, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ sensor->mode_pending_changes = false;
+
+ return 0;
+}
+
+static int ov2680_mode_restore(struct ov2680_dev *sensor)
+{
+ int ret;
+
+ ret = ov2680_load_regs(sensor, &ov2680_mode_init_data);
+ if (ret < 0)
+ return ret;
+
+ return ov2680_mode_set(sensor);
+}
+
+static int ov2680_power_off(struct ov2680_dev *sensor)
+{
+ if (!sensor->is_enabled)
+ return 0;
+
+ clk_disable_unprepare(sensor->xvclk);
+ ov2680_power_down(sensor);
+ regulator_bulk_disable(OV2680_NUM_SUPPLIES, sensor->supplies);
+ sensor->is_enabled = false;
+
+ return 0;
+}
+
+static int ov2680_power_on(struct ov2680_dev *sensor)
+{
+ struct device *dev = ov2680_to_dev(sensor);
+ int ret;
+
+ if (sensor->is_enabled)
+ return 0;
+
+ ret = regulator_bulk_enable(OV2680_NUM_SUPPLIES, sensor->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ if (!sensor->reset_gpio) {
+ ret = ov2680_write_reg(sensor, OV2680_REG_SOFT_RESET, 0x01);
+ if (ret != 0) {
+ dev_err(dev, "sensor soft reset failed\n");
+ return ret;
+ }
+ usleep_range(1000, 2000);
+ } else {
+ ov2680_power_down(sensor);
+ ov2680_power_up(sensor);
+ }
+
+ ret = clk_prepare_enable(sensor->xvclk);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2680_mode_restore(sensor);
+ if (ret < 0)
+ goto disable;
+
+ sensor->is_enabled = true;
+
+ /* Set clock lane into LP-11 state */
+ ov2680_stream_enable(sensor);
+ usleep_range(1000, 2000);
+ ov2680_stream_disable(sensor);
+
+ return 0;
+
+disable:
+ dev_err(dev, "failed to enable sensor: %d\n", ret);
+ ov2680_power_off(sensor);
+
+ return ret;
+}
+
+static int ov2680_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ int ret = 0;
+
+ mutex_lock(&sensor->lock);
+
+ if (on)
+ ret = ov2680_power_on(sensor);
+ else
+ ret = ov2680_power_off(sensor);
+
+ mutex_unlock(&sensor->lock);
+
+ if (on && ret == 0) {
+ ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ov2680_s_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+
+ mutex_lock(&sensor->lock);
+ fi->interval = sensor->frame_interval;
+ mutex_unlock(&sensor->lock);
+
+ return 0;
+}
+
+static int ov2680_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ int ret = 0;
+
+ mutex_lock(&sensor->lock);
+
+ if (sensor->is_streaming == !!enable)
+ goto unlock;
+
+ if (enable && sensor->mode_pending_changes) {
+ ret = ov2680_mode_set(sensor);
+ if (ret < 0)
+ goto unlock;
+ }
+
+ if (enable)
+ ret = ov2680_stream_enable(sensor);
+ else
+ ret = ov2680_stream_disable(sensor);
+
+ sensor->is_streaming = !!enable;
+
+unlock:
+ mutex_unlock(&sensor->lock);
+
+ return ret;
+}
+
+static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+
+ if (code->pad != 0 || code->index != 0)
+ return -EINVAL;
+
+ code->code = sensor->fmt.code;
+
+ return 0;
+}
+
+static int ov2680_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ struct v4l2_mbus_framefmt *fmt = NULL;
+ int ret = 0;
+
+ if (format->pad != 0)
+ return -EINVAL;
+
+ mutex_lock(&sensor->lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ fmt = v4l2_subdev_get_try_format(&sensor->sd, cfg, format->pad);
+#else
+ ret = -ENOTTY;
+#endif
+ } else {
+ fmt = &sensor->fmt;
+ }
+
+ if (fmt)
+ format->format = *fmt;
+
+ mutex_unlock(&sensor->lock);
+
+ return ret;
+}
+
+static int ov2680_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ struct v4l2_mbus_framefmt *try_fmt;
+#endif
+ const struct ov2680_mode_info *mode;
+ int ret = 0;
+
+ if (format->pad != 0)
+ return -EINVAL;
+
+ mutex_lock(&sensor->lock);
+
+ if (sensor->is_streaming) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ mode = v4l2_find_nearest_size(ov2680_mode_data,
+ ARRAY_SIZE(ov2680_mode_data), width,
+ height, fmt->width, fmt->height);
+ if (!mode) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ try_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ format->format = *try_fmt;
+#else
+ ret = -ENOTTY;
+#endif
+
+ goto unlock;
+ }
+
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = sensor->fmt.code;
+ fmt->colorspace = sensor->fmt.colorspace;
+
+ sensor->current_mode = mode;
+ sensor->fmt = format->format;
+ sensor->mode_pending_changes = true;
+
+unlock:
+ mutex_unlock(&sensor->lock);
+
+ return ret;
+}
+
+static int ov2680_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = cfg ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
+ .format = {
+ .width = 800,
+ .height = 600,
+ }
+ };
+
+ return ov2680_set_fmt(sd, cfg, &fmt);
+}
+
+static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= OV2680_MODE_MAX || index < 0)
+ return -EINVAL;
+
+ fse->min_width = ov2680_mode_data[index].width;
+ fse->min_height = ov2680_mode_data[index].height;
+ fse->max_width = ov2680_mode_data[index].width;
+ fse->max_height = ov2680_mode_data[index].height;
+
+ return 0;
+}
+
+static int ov2680_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct v4l2_fract tpf;
+
+ if (fie->index >= OV2680_MODE_MAX || fie->width > OV2680_WIDTH_MAX ||
+ fie->height > OV2680_HEIGHT_MAX ||
+ fie->which > V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ tpf.denominator = OV2680_FRAME_RATE;
+ tpf.numerator = 1;
+
+ fie->interval = tpf;
+
+ return 0;
+}
+
+static int ov2680_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ int val;
+
+ if (!sensor->is_enabled)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ val = ov2680_gain_get(sensor);
+ if (val < 0)
+ return val;
+ ctrls->gain->val = val;
+ break;
+ case V4L2_CID_EXPOSURE:
+ val = ov2680_exposure_get(sensor);
+ if (val < 0)
+ return val;
+ ctrls->exposure->val = val;
+ break;
+ }
+
+ return 0;
+}
+
+static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ struct ov2680_ctrls *ctrls = &sensor->ctrls;
+
+ if (!sensor->is_enabled)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ return ov2680_gain_set(sensor, !!ctrl->val);
+ case V4L2_CID_GAIN:
+ return ov2680_gain_set(sensor, !!ctrls->auto_gain->val);
+ case V4L2_CID_EXPOSURE_AUTO:
+ return ov2680_exposure_set(sensor, !!ctrl->val);
+ case V4L2_CID_EXPOSURE:
+ return ov2680_exposure_set(sensor, !!ctrls->auto_exp->val);
+ case V4L2_CID_VFLIP:
+ if (sensor->is_streaming)
+ return -EBUSY;
+ if (ctrl->val)
+ return ov2680_vflip_enable(sensor);
+ else
+ return ov2680_vflip_disable(sensor);
+ case V4L2_CID_HFLIP:
+ if (sensor->is_streaming)
+ return -EBUSY;
+ if (ctrl->val)
+ return ov2680_hflip_enable(sensor);
+ else
+ return ov2680_hflip_disable(sensor);
+ case V4L2_CID_TEST_PATTERN:
+ return ov2680_test_pattern_set(sensor, ctrl->val);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops ov2680_ctrl_ops = {
+ .g_volatile_ctrl = ov2680_g_volatile_ctrl,
+ .s_ctrl = ov2680_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops ov2680_core_ops = {
+ .s_power = ov2680_s_power,
+};
+
+static const struct v4l2_subdev_video_ops ov2680_video_ops = {
+ .g_frame_interval = ov2680_s_g_frame_interval,
+ .s_frame_interval = ov2680_s_g_frame_interval,
+ .s_stream = ov2680_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov2680_pad_ops = {
+ .init_cfg = ov2680_init_cfg,
+ .enum_mbus_code = ov2680_enum_mbus_code,
+ .get_fmt = ov2680_get_fmt,
+ .set_fmt = ov2680_set_fmt,
+ .enum_frame_size = ov2680_enum_frame_size,
+ .enum_frame_interval = ov2680_enum_frame_interval,
+};
+
+static const struct v4l2_subdev_ops ov2680_subdev_ops = {
+ .core = &ov2680_core_ops,
+ .video = &ov2680_video_ops,
+ .pad = &ov2680_pad_ops,
+};
+
+static int ov2680_mode_init(struct ov2680_dev *sensor)
+{
+ const struct ov2680_mode_info *init_mode;
+
+ /* set initial mode */
+ sensor->fmt.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ sensor->fmt.width = 800;
+ sensor->fmt.height = 600;
+ sensor->fmt.field = V4L2_FIELD_NONE;
+ sensor->fmt.colorspace = V4L2_COLORSPACE_SRGB;
+
+ sensor->frame_interval.denominator = OV2680_FRAME_RATE;
+ sensor->frame_interval.numerator = 1;
+
+ init_mode = &ov2680_mode_init_data;
+
+ sensor->current_mode = init_mode;
+
+ sensor->mode_pending_changes = true;
+
+ return 0;
+}
+
+static int ov2680_v4l2_init(struct ov2680_dev *sensor)
+{
+ const struct v4l2_ctrl_ops *ops = &ov2680_ctrl_ops;
+ struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ int ret = 0;
+
+ v4l2_i2c_subdev_init(&sensor->sd, sensor->i2c_client,
+ &ov2680_subdev_ops);
+
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ sensor->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+#endif
+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
+ if (ret < 0)
+ return ret;
+
+ v4l2_ctrl_handler_init(hdl, 7);
+
+ hdl->lock = &sensor->lock;
+
+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ ctrls->test_pattern = v4l2_ctrl_new_std_menu_items(hdl,
+ &ov2680_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(test_pattern_menu) - 1,
+ 0, 0, test_pattern_menu);
+
+ ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0,
+ V4L2_EXPOSURE_AUTO);
+
+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
+ 0, 32767, 1, 0);
+
+ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
+ 0, 1, 1, 1);
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, 0, 2047, 1, 0);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ goto cleanup_entity;
+ }
+
+ ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true);
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true);
+
+ sensor->sd.ctrl_handler = hdl;
+
+ ret = v4l2_async_register_subdev(&sensor->sd);
+ if (ret < 0)
+ goto cleanup_entity;
+
+ return 0;
+
+cleanup_entity:
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(hdl);
+
+ return ret;
+}
+
+static int ov2680_get_regulators(struct ov2680_dev *sensor)
+{
+ int i;
+
+ for (i = 0; i < OV2680_NUM_SUPPLIES; i++)
+ sensor->supplies[i].supply = ov2680_supply_name[i];
+
+ return devm_regulator_bulk_get(&sensor->i2c_client->dev,
+ OV2680_NUM_SUPPLIES,
+ sensor->supplies);
+}
+
+static int ov2680_check_id(struct ov2680_dev *sensor)
+{
+ struct device *dev = ov2680_to_dev(sensor);
+ u32 chip_id;
+ int ret;
+
+ ov2680_power_on(sensor);
+
+ ret = ov2680_read_reg16(sensor, OV2680_REG_CHIP_ID_HIGH, &chip_id);
+ if (ret < 0) {
+ dev_err(dev, "failed to read chip id high\n");
+ return -ENODEV;
+ }
+
+ if (chip_id != OV2680_CHIP_ID) {
+ dev_err(dev, "chip id: 0x%04x does not match expected 0x%04x\n",
+ chip_id, OV2680_CHIP_ID);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int ov2860_parse_dt(struct ov2680_dev *sensor)
+{
+ struct device *dev = ov2680_to_dev(sensor);
+ int ret;
+
+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(sensor->reset_gpio);
+ if (ret < 0) {
+ dev_dbg(dev, "error while getting reset gpio: %d\n", ret);
+ return ret;
+ }
+
+ sensor->xvclk = devm_clk_get(dev, "xvclk");
+ if (IS_ERR(sensor->xvclk)) {
+ dev_err(dev, "xvclk clock missing or invalid\n");
+ return PTR_ERR(sensor->xvclk);
+ }
+
+ sensor->xvclk_freq = clk_get_rate(sensor->xvclk);
+ if (sensor->xvclk_freq != OV2680_XVCLK_VALUE) {
+ dev_err(dev, "wrong xvclk frequency %d HZ, expected: %d Hz\n",
+ sensor->xvclk_freq, OV2680_XVCLK_VALUE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ov2680_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ov2680_dev *sensor;
+ int ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor->i2c_client = client;
+
+ ret = ov2860_parse_dt(sensor);
+ if (ret < 0)
+ return -EINVAL;
+
+ ret = ov2680_mode_init(sensor);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2680_get_regulators(sensor);
+ if (ret < 0) {
+ dev_err(dev, "failed to get regulators\n");
+ return ret;
+ }
+
+ mutex_init(&sensor->lock);
+
+ ret = ov2680_v4l2_init(sensor);
+ if (ret < 0)
+ goto lock_destroy;
+
+ ret = ov2680_check_id(sensor);
+ if (ret < 0)
+ goto error_cleanup;
+
+ dev_info(dev, "ov2680 init correctly\n");
+
+ return 0;
+
+error_cleanup:
+ dev_err(dev, "ov2680 init fail: %d\n", ret);
+
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_async_unregister_subdev(&sensor->sd);
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+
+lock_destroy:
+ mutex_destroy(&sensor->lock);
+
+ return ret;
+}
+
+static int ov2680_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+
+ v4l2_async_unregister_subdev(&sensor->sd);
+ mutex_destroy(&sensor->lock);
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+
+ return 0;
+}
+
+static int __maybe_unused ov2680_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+
+ if (sensor->is_streaming)
+ ov2680_stream_disable(sensor);
+
+ return 0;
+}
+
+static int __maybe_unused ov2680_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ int ret;
+
+ if (sensor->is_streaming) {
+ ret = ov2680_stream_enable(sensor);
+ if (ret < 0)
+ goto stream_disable;
+ }
+
+ return 0;
+
+stream_disable:
+ ov2680_stream_disable(sensor);
+ sensor->is_streaming = false;
+
+ return ret;
+}
+
+static const struct dev_pm_ops ov2680_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov2680_suspend, ov2680_resume)
+};
+
+static const struct of_device_id ov2680_dt_ids[] = {
+ { .compatible = "ovti,ov2680" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ov2680_dt_ids);
+
+static struct i2c_driver ov2680_i2c_driver = {
+ .driver = {
+ .name = "ov2680",
+ .pm = &ov2680_pm_ops,
+ .of_match_table = of_match_ptr(ov2680_dt_ids),
+ },
+ .probe_new = ov2680_probe,
+ .remove = ov2680_remove,
+};
+module_i2c_driver(ov2680_i2c_driver);
+
+MODULE_AUTHOR("Rui Miguel Silva <rui.silva@linaro.org>");
+MODULE_DESCRIPTION("OV2680 CMOS Image Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index f6e40cc9745c..071f4bc240ca 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -30,7 +30,7 @@
/* min/typical/max system clock (xclk) frequencies */
#define OV5640_XCLK_MIN 6000000
-#define OV5640_XCLK_MAX 24000000
+#define OV5640_XCLK_MAX 54000000
#define OV5640_DEFAULT_SLAVE_ID 0x3c
@@ -64,6 +64,7 @@
#define OV5640_REG_TIMING_DVPVO 0x380a
#define OV5640_REG_TIMING_HTS 0x380c
#define OV5640_REG_TIMING_VTS 0x380e
+#define OV5640_REG_TIMING_TC_REG20 0x3820
#define OV5640_REG_TIMING_TC_REG21 0x3821
#define OV5640_REG_AEC_CTRL00 0x3a00
#define OV5640_REG_AEC_B50_STEP 0x3a08
@@ -199,6 +200,8 @@ struct ov5640_ctrls {
struct v4l2_ctrl *contrast;
struct v4l2_ctrl *hue;
struct v4l2_ctrl *test_pattern;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
};
struct ov5640_dev {
@@ -212,6 +215,7 @@ struct ov5640_dev {
struct regulator_bulk_data supplies[OV5640_NUM_SUPPLIES];
struct gpio_desc *reset_gpio;
struct gpio_desc *pwdn_gpio;
+ bool upside_down;
/* lock to protect all members below */
struct mutex lock;
@@ -341,7 +345,7 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
static const struct reg_value ov5640_setting_30fps_VGA_640_480[] = {
{0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -360,7 +364,7 @@ static const struct reg_value ov5640_setting_30fps_VGA_640_480[] = {
static const struct reg_value ov5640_setting_15fps_VGA_640_480[] = {
{0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -379,7 +383,7 @@ static const struct reg_value ov5640_setting_15fps_VGA_640_480[] = {
static const struct reg_value ov5640_setting_30fps_XGA_1024_768[] = {
{0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -399,7 +403,7 @@ static const struct reg_value ov5640_setting_30fps_XGA_1024_768[] = {
static const struct reg_value ov5640_setting_15fps_XGA_1024_768[] = {
{0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -418,7 +422,7 @@ static const struct reg_value ov5640_setting_15fps_XGA_1024_768[] = {
static const struct reg_value ov5640_setting_30fps_QVGA_320_240[] = {
{0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -437,7 +441,7 @@ static const struct reg_value ov5640_setting_30fps_QVGA_320_240[] = {
static const struct reg_value ov5640_setting_15fps_QVGA_320_240[] = {
{0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -456,7 +460,7 @@ static const struct reg_value ov5640_setting_15fps_QVGA_320_240[] = {
static const struct reg_value ov5640_setting_30fps_QCIF_176_144[] = {
{0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -475,7 +479,7 @@ static const struct reg_value ov5640_setting_30fps_QCIF_176_144[] = {
static const struct reg_value ov5640_setting_15fps_QCIF_176_144[] = {
{0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -494,7 +498,7 @@ static const struct reg_value ov5640_setting_15fps_QCIF_176_144[] = {
static const struct reg_value ov5640_setting_30fps_NTSC_720_480[] = {
{0x3035, 0x12, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -513,7 +517,7 @@ static const struct reg_value ov5640_setting_30fps_NTSC_720_480[] = {
static const struct reg_value ov5640_setting_15fps_NTSC_720_480[] = {
{0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -532,7 +536,7 @@ static const struct reg_value ov5640_setting_15fps_NTSC_720_480[] = {
static const struct reg_value ov5640_setting_30fps_PAL_720_576[] = {
{0x3035, 0x12, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -551,7 +555,7 @@ static const struct reg_value ov5640_setting_30fps_PAL_720_576[] = {
static const struct reg_value ov5640_setting_15fps_PAL_720_576[] = {
{0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -571,7 +575,7 @@ static const struct reg_value ov5640_setting_30fps_720P_1280_720[] = {
{0x3008, 0x42, 0, 0},
{0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
@@ -591,7 +595,7 @@ static const struct reg_value ov5640_setting_30fps_720P_1280_720[] = {
static const struct reg_value ov5640_setting_15fps_720P_1280_720[] = {
{0x3035, 0x41, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
@@ -611,7 +615,7 @@ static const struct reg_value ov5640_setting_30fps_1080P_1920_1080[] = {
{0x3008, 0x42, 0, 0},
{0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
+ {0x3814, 0x11, 0, 0},
{0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
@@ -644,7 +648,7 @@ static const struct reg_value ov5640_setting_15fps_1080P_1920_1080[] = {
{0x3008, 0x42, 0, 0},
{0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
+ {0x3814, 0x11, 0, 0},
{0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
@@ -673,10 +677,9 @@ static const struct reg_value ov5640_setting_15fps_1080P_1920_1080[] = {
};
static const struct reg_value ov5640_setting_15fps_QSXGA_2592_1944[] = {
- {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0},
{0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
+ {0x3814, 0x11, 0, 0},
{0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
@@ -1340,6 +1343,27 @@ static int ov5640_binning_on(struct ov5640_dev *sensor)
return temp ? 1 : 0;
}
+static int ov5640_set_binning(struct ov5640_dev *sensor, bool enable)
+{
+ int ret;
+
+ /*
+ * TIMING TC REG21:
+ * - [0]: Horizontal binning enable
+ */
+ ret = ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG21,
+ BIT(0), enable ? BIT(0) : 0);
+ if (ret)
+ return ret;
+ /*
+ * TIMING TC REG20:
+ * - [0]: Undocumented, but hardcoded init sequences
+ * are always setting REG21/REG20 bit 0 to same value...
+ */
+ return ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG20,
+ BIT(0), enable ? BIT(0) : 0);
+}
+
static int ov5640_set_virtual_channel(struct ov5640_dev *sensor)
{
struct i2c_client *client = sensor->i2c_client;
@@ -1389,24 +1413,16 @@ static const struct ov5640_mode_info *
ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
int width, int height, bool nearest)
{
- const struct ov5640_mode_info *mode = NULL;
- int i;
-
- for (i = OV5640_NUM_MODES - 1; i >= 0; i--) {
- mode = &ov5640_mode_data[fr][i];
-
- if (!mode->reg_data)
- continue;
+ const struct ov5640_mode_info *mode;
- if ((nearest && mode->hact <= width &&
- mode->vact <= height) ||
- (!nearest && mode->hact == width &&
- mode->vact == height))
- break;
- }
+ mode = v4l2_find_nearest_size(ov5640_mode_data[fr],
+ ARRAY_SIZE(ov5640_mode_data[fr]),
+ hact, vact,
+ width, height);
- if (nearest && i < 0)
- mode = &ov5640_mode_data[fr][0];
+ if (!mode ||
+ (!nearest && (mode->hact != width || mode->vact != height)))
+ return NULL;
return mode;
}
@@ -1640,6 +1656,9 @@ static int ov5640_set_mode(struct ov5640_dev *sensor,
if (ret < 0)
return ret;
+ ret = ov5640_set_binning(sensor, dn_mode != SCALING);
+ if (ret < 0)
+ return ret;
ret = ov5640_set_ae_target(sensor, sensor->ae_target);
if (ret < 0)
return ret;
@@ -1947,9 +1966,11 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
goto out;
}
- sensor->current_mode = new_mode;
- sensor->fmt = *mbus_fmt;
- sensor->pending_mode_change = true;
+ if (new_mode != sensor->current_mode) {
+ sensor->current_mode = new_mode;
+ sensor->fmt = *mbus_fmt;
+ sensor->pending_mode_change = true;
+ }
out:
mutex_unlock(&sensor->lock);
return ret;
@@ -2193,6 +2214,43 @@ static int ov5640_set_ctrl_light_freq(struct ov5640_dev *sensor, int value)
BIT(2) : 0);
}
+static int ov5640_set_ctrl_hflip(struct ov5640_dev *sensor, int value)
+{
+ /*
+ * If sensor is mounted upside down, mirror logic is inversed.
+ *
+ * Sensor is a BSI (Back Side Illuminated) one,
+ * so image captured is physically mirrored.
+ * This is why mirror logic is inversed in
+ * order to cancel this mirror effect.
+ */
+
+ /*
+ * TIMING TC REG21:
+ * - [2]: ISP mirror
+ * - [1]: Sensor mirror
+ */
+ return ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG21,
+ BIT(2) | BIT(1),
+ (!(value ^ sensor->upside_down)) ?
+ (BIT(2) | BIT(1)) : 0);
+}
+
+static int ov5640_set_ctrl_vflip(struct ov5640_dev *sensor, int value)
+{
+ /* If sensor is mounted upside down, flip logic is inversed */
+
+ /*
+ * TIMING TC REG20:
+ * - [2]: ISP vflip
+ * - [1]: Sensor vflip
+ */
+ return ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG20,
+ BIT(2) | BIT(1),
+ (value ^ sensor->upside_down) ?
+ (BIT(2) | BIT(1)) : 0);
+}
+
static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
@@ -2264,6 +2322,12 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_POWER_LINE_FREQUENCY:
ret = ov5640_set_ctrl_light_freq(sensor, ctrl->val);
break;
+ case V4L2_CID_HFLIP:
+ ret = ov5640_set_ctrl_hflip(sensor, ctrl->val);
+ break;
+ case V4L2_CID_VFLIP:
+ ret = ov5640_set_ctrl_vflip(sensor, ctrl->val);
+ break;
default:
ret = -EINVAL;
break;
@@ -2325,6 +2389,10 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(test_pattern_menu) - 1,
0, 0, test_pattern_menu);
+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
ctrls->light_freq =
v4l2_ctrl_new_std_menu(hdl, ops,
@@ -2435,9 +2503,17 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
sensor->current_fr = frame_rate;
sensor->frame_interval = fi->interval;
- sensor->current_mode = ov5640_find_mode(sensor, frame_rate, mode->hact,
- mode->vact, true);
- sensor->pending_mode_change = true;
+ mode = ov5640_find_mode(sensor, frame_rate, mode->hact,
+ mode->vact, true);
+ if (!mode) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (mode != sensor->current_mode) {
+ sensor->current_mode = mode;
+ sensor->pending_mode_change = true;
+ }
out:
mutex_unlock(&sensor->lock);
return ret;
@@ -2558,6 +2634,7 @@ static int ov5640_probe(struct i2c_client *client,
struct fwnode_handle *endpoint;
struct ov5640_dev *sensor;
struct v4l2_mbus_framefmt *fmt;
+ u32 rotation;
int ret;
sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
@@ -2583,6 +2660,22 @@ static int ov5640_probe(struct i2c_client *client,
sensor->ae_target = 52;
+ /* optional indication of physical rotation of sensor */
+ ret = fwnode_property_read_u32(dev_fwnode(&client->dev), "rotation",
+ &rotation);
+ if (!ret) {
+ switch (rotation) {
+ case 180:
+ sensor->upside_down = true;
+ /* fall through */
+ case 0:
+ break;
+ default:
+ dev_warn(dev, "%u degrees rotation is not supported, ignoring...\n",
+ rotation);
+ }
+ }
+
endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev),
NULL);
if (!endpoint) {
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index b3f762578f7f..1722cdab0daf 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -510,8 +510,8 @@ static const struct reg_value ov5645_setting_full[] = {
};
static const s64 link_freq[] = {
- 222880000,
- 334320000
+ 224000000,
+ 336000000
};
static const struct ov5645_mode_info ov5645_mode_info_data[] = {
@@ -520,7 +520,7 @@ static const struct ov5645_mode_info ov5645_mode_info_data[] = {
.height = 960,
.data = ov5645_setting_sxga,
.data_size = ARRAY_SIZE(ov5645_setting_sxga),
- .pixel_clock = 111440000,
+ .pixel_clock = 112000000,
.link_freq = 0 /* an index in link_freq[] */
},
{
@@ -528,7 +528,7 @@ static const struct ov5645_mode_info ov5645_mode_info_data[] = {
.height = 1080,
.data = ov5645_setting_1080p,
.data_size = ARRAY_SIZE(ov5645_setting_1080p),
- .pixel_clock = 167160000,
+ .pixel_clock = 168000000,
.link_freq = 1 /* an index in link_freq[] */
},
{
@@ -536,7 +536,7 @@ static const struct ov5645_mode_info ov5645_mode_info_data[] = {
.height = 1944,
.data = ov5645_setting_full,
.data_size = ARRAY_SIZE(ov5645_setting_full),
- .pixel_clock = 167160000,
+ .pixel_clock = 168000000,
.link_freq = 1 /* an index in link_freq[] */
},
};
@@ -1145,7 +1145,8 @@ static int ov5645_probe(struct i2c_client *client,
return ret;
}
- if (xclk_freq != 23880000) {
+ /* external clock must be 24MHz, allow 1% tolerance */
+ if (xclk_freq < 23760000 || xclk_freq > 24240000) {
dev_err(dev, "external clock frequency %u is not supported\n",
xclk_freq);
return -EINVAL;
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 3474ef832c1e..31bf577b0bd3 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1744,14 +1744,12 @@ static int ov7670_parse_dt(struct device *dev,
return -EINVAL;
ret = v4l2_fwnode_endpoint_parse(ep, &bus_cfg);
- if (ret) {
- fwnode_handle_put(ep);
+ fwnode_handle_put(ep);
+ if (ret)
return ret;
- }
if (bus_cfg.bus_type != V4L2_MBUS_PARALLEL) {
dev_err(dev, "Unsupported media bus type\n");
- fwnode_handle_put(ep);
return ret;
}
info->mbus_config = bus_cfg.bus.parallel.flags;
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index e2550708abc8..7158c31d8403 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -419,11 +419,18 @@ struct ov772x_priv {
struct gpio_desc *rstb_gpio;
const struct ov772x_color_format *cfmt;
const struct ov772x_win_size *win;
- unsigned short flag_vflip:1;
- unsigned short flag_hflip:1;
+ struct v4l2_ctrl *vflip_ctrl;
+ struct v4l2_ctrl *hflip_ctrl;
/* band_filter = COM8[5] ? 256 - BDBASE : 0 */
- unsigned short band_filter;
+ struct v4l2_ctrl *band_filter_ctrl;
unsigned int fps;
+ /* lock to protect power_count and streaming */
+ struct mutex lock;
+ int power_count;
+ int streaming;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_pad pad;
+#endif
};
/*
@@ -542,9 +549,19 @@ static struct ov772x_priv *to_ov772x(struct v4l2_subdev *sd)
return container_of(sd, struct ov772x_priv, subdev);
}
-static inline int ov772x_read(struct i2c_client *client, u8 addr)
+static int ov772x_read(struct i2c_client *client, u8 addr)
{
- return i2c_smbus_read_byte_data(client, addr);
+ int ret;
+ u8 val;
+
+ ret = i2c_master_send(client, &addr, 1);
+ if (ret < 0)
+ return ret;
+ ret = i2c_master_recv(client, &val, 1);
+ if (ret < 0)
+ return ret;
+
+ return val;
}
static inline int ov772x_write(struct i2c_client *client, u8 addr, u8 value)
@@ -587,39 +604,40 @@ static int ov772x_s_stream(struct v4l2_subdev *sd, int enable)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(sd);
+ int ret = 0;
- if (!enable) {
- ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, SOFT_SLEEP_MODE);
- return 0;
- }
+ mutex_lock(&priv->lock);
- ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, 0);
+ if (priv->streaming == enable)
+ goto done;
- dev_dbg(&client->dev, "format %d, win %s\n",
- priv->cfmt->code, priv->win->name);
+ ret = ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE,
+ enable ? 0 : SOFT_SLEEP_MODE);
+ if (ret)
+ goto done;
- return 0;
+ if (enable) {
+ dev_dbg(&client->dev, "format %d, win %s\n",
+ priv->cfmt->code, priv->win->name);
+ }
+ priv->streaming = enable;
+
+done:
+ mutex_unlock(&priv->lock);
+
+ return ret;
}
-static int ov772x_set_frame_rate(struct ov772x_priv *priv,
- struct v4l2_fract *tpf,
- const struct ov772x_color_format *cfmt,
- const struct ov772x_win_size *win)
+static unsigned int ov772x_select_fps(struct ov772x_priv *priv,
+ struct v4l2_fract *tpf)
{
- struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
- unsigned long fin = clk_get_rate(priv->clk);
unsigned int fps = tpf->numerator ?
tpf->denominator / tpf->numerator :
tpf->denominator;
unsigned int best_diff;
- unsigned int fsize;
- unsigned int pclk;
unsigned int diff;
unsigned int idx;
unsigned int i;
- u8 clkrc = 0;
- u8 com4 = 0;
- int ret;
/* Approximate to the closest supported frame interval. */
best_diff = ~0L;
@@ -630,7 +648,25 @@ static int ov772x_set_frame_rate(struct ov772x_priv *priv,
best_diff = diff;
}
}
- fps = ov772x_frame_intervals[idx];
+
+ return ov772x_frame_intervals[idx];
+}
+
+static int ov772x_set_frame_rate(struct ov772x_priv *priv,
+ unsigned int fps,
+ const struct ov772x_color_format *cfmt,
+ const struct ov772x_win_size *win)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
+ unsigned long fin = clk_get_rate(priv->clk);
+ unsigned int best_diff;
+ unsigned int fsize;
+ unsigned int pclk;
+ unsigned int diff;
+ unsigned int i;
+ u8 clkrc = 0;
+ u8 com4 = 0;
+ int ret;
/* Use image size (with blankings) to calculate desired pixel clock. */
switch (cfmt->com7 & OFMT_MASK) {
@@ -695,10 +731,6 @@ static int ov772x_set_frame_rate(struct ov772x_priv *priv,
if (ret < 0)
return ret;
- tpf->numerator = 1;
- tpf->denominator = fps;
- priv->fps = tpf->denominator;
-
return 0;
}
@@ -719,8 +751,37 @@ static int ov772x_s_frame_interval(struct v4l2_subdev *sd,
{
struct ov772x_priv *priv = to_ov772x(sd);
struct v4l2_fract *tpf = &ival->interval;
+ unsigned int fps;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
- return ov772x_set_frame_rate(priv, tpf, priv->cfmt, priv->win);
+ if (priv->streaming) {
+ ret = -EBUSY;
+ goto error;
+ }
+
+ fps = ov772x_select_fps(priv, tpf);
+
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any changes to H/W at this time. Instead
+ * the frame rate will be restored right after power-up.
+ */
+ if (priv->power_count > 0) {
+ ret = ov772x_set_frame_rate(priv, fps, priv->cfmt, priv->win);
+ if (ret)
+ goto error;
+ }
+
+ tpf->numerator = 1;
+ tpf->denominator = fps;
+ priv->fps = fps;
+
+error:
+ mutex_unlock(&priv->lock);
+
+ return ret;
}
static int ov772x_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -732,17 +793,25 @@ static int ov772x_s_ctrl(struct v4l2_ctrl *ctrl)
int ret = 0;
u8 val;
+ /* v4l2_ctrl_lock() locks our own mutex */
+
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any controls to H/W at this time. Instead
+ * the controls will be restored right after power-up.
+ */
+ if (priv->power_count == 0)
+ return 0;
+
switch (ctrl->id) {
case V4L2_CID_VFLIP:
val = ctrl->val ? VFLIP_IMG : 0x00;
- priv->flag_vflip = ctrl->val;
- if (priv->info->flags & OV772X_FLAG_VFLIP)
+ if (priv->info && (priv->info->flags & OV772X_FLAG_VFLIP))
val ^= VFLIP_IMG;
return ov772x_mask_set(client, COM3, VFLIP_IMG, val);
case V4L2_CID_HFLIP:
val = ctrl->val ? HFLIP_IMG : 0x00;
- priv->flag_hflip = ctrl->val;
- if (priv->info->flags & OV772X_FLAG_HFLIP)
+ if (priv->info && (priv->info->flags & OV772X_FLAG_HFLIP))
val ^= HFLIP_IMG;
return ov772x_mask_set(client, COM3, HFLIP_IMG, val);
case V4L2_CID_BAND_STOP_FILTER:
@@ -761,8 +830,7 @@ static int ov772x_s_ctrl(struct v4l2_ctrl *ctrl)
ret = ov772x_mask_set(client, BDBASE,
0xff, val);
}
- if (!ret)
- priv->band_filter = ctrl->val;
+
return ret;
}
@@ -824,10 +892,10 @@ static int ov772x_power_on(struct ov772x_priv *priv)
* available to handle this cleanly, request the GPIO temporarily
* to avoid conflicts.
*/
- priv->rstb_gpio = gpiod_get_optional(&client->dev, "rstb",
+ priv->rstb_gpio = gpiod_get_optional(&client->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(priv->rstb_gpio)) {
- dev_info(&client->dev, "Unable to get GPIO \"rstb\"");
+ dev_info(&client->dev, "Unable to get GPIO \"reset\"");
return PTR_ERR(priv->rstb_gpio);
}
@@ -855,12 +923,45 @@ static int ov772x_power_off(struct ov772x_priv *priv)
return 0;
}
+static int ov772x_set_params(struct ov772x_priv *priv,
+ const struct ov772x_color_format *cfmt,
+ const struct ov772x_win_size *win);
+
static int ov772x_s_power(struct v4l2_subdev *sd, int on)
{
struct ov772x_priv *priv = to_ov772x(sd);
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ /* If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (priv->power_count == !on) {
+ if (on) {
+ ret = ov772x_power_on(priv);
+ /*
+ * Restore the format, the frame rate, and
+ * the controls
+ */
+ if (!ret)
+ ret = ov772x_set_params(priv, priv->cfmt,
+ priv->win);
+ } else {
+ ret = ov772x_power_off(priv);
+ }
+ }
- return on ? ov772x_power_on(priv) :
- ov772x_power_off(priv);
+ if (!ret) {
+ /* Update the power count. */
+ priv->power_count += on ? 1 : -1;
+ WARN(priv->power_count < 0, "Unbalanced power count\n");
+ WARN(priv->power_count > 1, "Duplicated s_power call\n");
+ }
+
+ mutex_unlock(&priv->lock);
+
+ return ret;
}
static const struct ov772x_win_size *ov772x_select_win(u32 width, u32 height)
@@ -901,19 +1002,14 @@ static void ov772x_select_params(const struct v4l2_mbus_framefmt *mf,
*win = ov772x_select_win(mf->width, mf->height);
}
-static int ov772x_set_params(struct ov772x_priv *priv,
- const struct ov772x_color_format *cfmt,
- const struct ov772x_win_size *win)
+static int ov772x_edgectrl(struct ov772x_priv *priv)
{
struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
- struct v4l2_fract tpf;
int ret;
- u8 val;
- /* Reset hardware. */
- ov772x_reset(client);
+ if (!priv->info)
+ return 0;
- /* Edge Ctrl. */
if (priv->info->edgectrl.strength & OV772X_MANUAL_EDGE_CTRL) {
/*
* Manual Edge Control Mode.
@@ -924,19 +1020,19 @@ static int ov772x_set_params(struct ov772x_priv *priv,
ret = ov772x_mask_set(client, DSPAUTO, EDGE_ACTRL, 0x00);
if (ret < 0)
- goto ov772x_set_fmt_error;
+ return ret;
ret = ov772x_mask_set(client,
EDGE_TRSHLD, OV772X_EDGE_THRESHOLD_MASK,
priv->info->edgectrl.threshold);
if (ret < 0)
- goto ov772x_set_fmt_error;
+ return ret;
ret = ov772x_mask_set(client,
EDGE_STRNGT, OV772X_EDGE_STRENGTH_MASK,
priv->info->edgectrl.strength);
if (ret < 0)
- goto ov772x_set_fmt_error;
+ return ret;
} else if (priv->info->edgectrl.upper > priv->info->edgectrl.lower) {
/*
@@ -948,15 +1044,34 @@ static int ov772x_set_params(struct ov772x_priv *priv,
EDGE_UPPER, OV772X_EDGE_UPPER_MASK,
priv->info->edgectrl.upper);
if (ret < 0)
- goto ov772x_set_fmt_error;
+ return ret;
ret = ov772x_mask_set(client,
EDGE_LOWER, OV772X_EDGE_LOWER_MASK,
priv->info->edgectrl.lower);
if (ret < 0)
- goto ov772x_set_fmt_error;
+ return ret;
}
+ return 0;
+}
+
+static int ov772x_set_params(struct ov772x_priv *priv,
+ const struct ov772x_color_format *cfmt,
+ const struct ov772x_win_size *win)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
+ int ret;
+ u8 val;
+
+ /* Reset hardware. */
+ ov772x_reset(client);
+
+ /* Edge Ctrl. */
+ ret = ov772x_edgectrl(priv);
+ if (ret < 0)
+ return ret;
+
/* Format and window size. */
ret = ov772x_write(client, HSTART, win->rect.left >> 2);
if (ret < 0)
@@ -1007,13 +1122,13 @@ static int ov772x_set_params(struct ov772x_priv *priv,
/* Set COM3. */
val = cfmt->com3;
- if (priv->info->flags & OV772X_FLAG_VFLIP)
+ if (priv->info && (priv->info->flags & OV772X_FLAG_VFLIP))
val |= VFLIP_IMG;
- if (priv->info->flags & OV772X_FLAG_HFLIP)
+ if (priv->info && (priv->info->flags & OV772X_FLAG_HFLIP))
val |= HFLIP_IMG;
- if (priv->flag_vflip)
+ if (priv->vflip_ctrl->val)
val ^= VFLIP_IMG;
- if (priv->flag_hflip)
+ if (priv->hflip_ctrl->val)
val ^= HFLIP_IMG;
ret = ov772x_mask_set(client,
@@ -1027,18 +1142,18 @@ static int ov772x_set_params(struct ov772x_priv *priv,
goto ov772x_set_fmt_error;
/* COM4, CLKRC: Set pixel clock and framerate. */
- tpf.numerator = 1;
- tpf.denominator = priv->fps;
- ret = ov772x_set_frame_rate(priv, &tpf, cfmt, win);
+ ret = ov772x_set_frame_rate(priv, priv->fps, cfmt, win);
if (ret < 0)
goto ov772x_set_fmt_error;
/* Set COM8. */
- if (priv->band_filter) {
+ if (priv->band_filter_ctrl->val) {
+ unsigned short band_filter = priv->band_filter_ctrl->val;
+
ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
if (!ret)
ret = ov772x_mask_set(client, BDBASE,
- 0xff, 256 - priv->band_filter);
+ 0xff, 256 - band_filter);
if (ret < 0)
goto ov772x_set_fmt_error;
}
@@ -1102,7 +1217,7 @@ static int ov772x_set_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf = &format->format;
const struct ov772x_color_format *cfmt;
const struct ov772x_win_size *win;
- int ret;
+ int ret = 0;
if (format->pad)
return -EINVAL;
@@ -1123,30 +1238,50 @@ static int ov772x_set_fmt(struct v4l2_subdev *sd,
return 0;
}
- ret = ov772x_set_params(priv, cfmt, win);
- if (ret < 0)
- return ret;
+ mutex_lock(&priv->lock);
+
+ if (priv->streaming) {
+ ret = -EBUSY;
+ goto error;
+ }
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any changes to H/W at this time. Instead
+ * the format will be restored right after power-up.
+ */
+ if (priv->power_count > 0) {
+ ret = ov772x_set_params(priv, cfmt, win);
+ if (ret < 0)
+ goto error;
+ }
priv->win = win;
priv->cfmt = cfmt;
- return 0;
+error:
+ mutex_unlock(&priv->lock);
+
+ return ret;
}
static int ov772x_video_probe(struct ov772x_priv *priv)
{
struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
- u8 pid, ver;
+ int pid, ver, midh, midl;
const char *devname;
int ret;
- ret = ov772x_s_power(&priv->subdev, 1);
+ ret = ov772x_power_on(priv);
if (ret < 0)
return ret;
/* Check and show product ID and manufacturer ID. */
pid = ov772x_read(client, PID);
+ if (pid < 0)
+ return pid;
ver = ov772x_read(client, VER);
+ if (ver < 0)
+ return ver;
switch (VERSION(pid, ver)) {
case OV7720:
@@ -1162,17 +1297,21 @@ static int ov772x_video_probe(struct ov772x_priv *priv)
goto done;
}
+ midh = ov772x_read(client, MIDH);
+ if (midh < 0)
+ return midh;
+ midl = ov772x_read(client, MIDL);
+ if (midl < 0)
+ return midl;
+
dev_info(&client->dev,
"%s Product ID %0x:%0x Manufacturer ID %x:%x\n",
- devname,
- pid,
- ver,
- ov772x_read(client, MIDH),
- ov772x_read(client, MIDL));
+ devname, pid, ver, midh, midl);
+
ret = v4l2_ctrl_handler_setup(&priv->hdl);
done:
- ov772x_s_power(&priv->subdev, 0);
+ ov772x_power_off(priv);
return ret;
}
@@ -1250,48 +1389,54 @@ static int ov772x_probe(struct i2c_client *client,
struct i2c_adapter *adapter = client->adapter;
int ret;
- if (!client->dev.platform_data) {
- dev_err(&client->dev, "Missing ov772x platform data\n");
+ if (!client->dev.of_node && !client->dev.platform_data) {
+ dev_err(&client->dev,
+ "Missing ov772x platform data for non-DT device\n");
return -EINVAL;
}
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_PROTOCOL_MANGLING)) {
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&adapter->dev,
- "I2C-Adapter doesn't support SMBUS_BYTE_DATA or PROTOCOL_MANGLING\n");
+ "I2C-Adapter doesn't support SMBUS_BYTE_DATA\n");
return -EIO;
}
- client->flags |= I2C_CLIENT_SCCB;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->info = client->dev.platform_data;
+ mutex_init(&priv->lock);
v4l2_i2c_subdev_init(&priv->subdev, client, &ov772x_subdev_ops);
+ priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
v4l2_ctrl_handler_init(&priv->hdl, 3);
- v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
- V4L2_CID_BAND_STOP_FILTER, 0, 256, 1, 0);
+ /* Use our mutex for the controls */
+ priv->hdl.lock = &priv->lock;
+ priv->vflip_ctrl = v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ priv->hflip_ctrl = v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ priv->band_filter_ctrl = v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
+ V4L2_CID_BAND_STOP_FILTER,
+ 0, 256, 1, 0);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error)
- return priv->hdl.error;
+ if (priv->hdl.error) {
+ ret = priv->hdl.error;
+ goto error_mutex_destroy;
+ }
- priv->clk = clk_get(&client->dev, "xclk");
+ priv->clk = clk_get(&client->dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(&client->dev, "Unable to get xclk clock\n");
ret = PTR_ERR(priv->clk);
goto error_ctrl_free;
}
- priv->pwdn_gpio = gpiod_get_optional(&client->dev, "pwdn",
+ priv->pwdn_gpio = gpiod_get_optional(&client->dev, "powerdown",
GPIOD_OUT_LOW);
if (IS_ERR(priv->pwdn_gpio)) {
- dev_info(&client->dev, "Unable to get GPIO \"pwdn\"");
+ dev_info(&client->dev, "Unable to get GPIO \"powerdown\"");
ret = PTR_ERR(priv->pwdn_gpio);
goto error_clk_put;
}
@@ -1300,16 +1445,26 @@ static int ov772x_probe(struct i2c_client *client,
if (ret < 0)
goto error_gpio_put;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ priv->pad.flags = MEDIA_PAD_FL_SOURCE;
+ priv->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&priv->subdev.entity, 1, &priv->pad);
+ if (ret < 0)
+ goto error_gpio_put;
+#endif
+
priv->cfmt = &ov772x_cfmts[0];
priv->win = &ov772x_win_sizes[0];
priv->fps = 15;
ret = v4l2_async_register_subdev(&priv->subdev);
if (ret)
- goto error_gpio_put;
+ goto error_entity_cleanup;
return 0;
+error_entity_cleanup:
+ media_entity_cleanup(&priv->subdev.entity);
error_gpio_put:
if (priv->pwdn_gpio)
gpiod_put(priv->pwdn_gpio);
@@ -1317,6 +1472,8 @@ error_clk_put:
clk_put(priv->clk);
error_ctrl_free:
v4l2_ctrl_handler_free(&priv->hdl);
+error_mutex_destroy:
+ mutex_destroy(&priv->lock);
return ret;
}
@@ -1325,11 +1482,13 @@ static int ov772x_remove(struct i2c_client *client)
{
struct ov772x_priv *priv = to_ov772x(i2c_get_clientdata(client));
+ media_entity_cleanup(&priv->subdev.entity);
clk_put(priv->clk);
if (priv->pwdn_gpio)
gpiod_put(priv->pwdn_gpio);
v4l2_async_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
+ mutex_destroy(&priv->lock);
return 0;
}
@@ -1340,9 +1499,17 @@ static const struct i2c_device_id ov772x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ov772x_id);
+static const struct of_device_id ov772x_of_match[] = {
+ { .compatible = "ovti,ov7725", },
+ { .compatible = "ovti,ov7720", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ov772x_of_match);
+
static struct i2c_driver ov772x_i2c_driver = {
.driver = {
.name = "ov772x",
+ .of_match_table = ov772x_of_match,
},
.probe = ov772x_probe,
.remove = ov772x_remove,
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
new file mode 100644
index 000000000000..6ad998ad1b16
--- /dev/null
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -0,0 +1,1437 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for RJ54N1CB0C CMOS Image Sensor from Sharp
+ *
+ * Copyright (C) 2018, Jacopo Mondi <jacopo@jmondi.org>
+ *
+ * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+
+#include <media/i2c/rj54n1cb0c.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#define RJ54N1_DEV_CODE 0x0400
+#define RJ54N1_DEV_CODE2 0x0401
+#define RJ54N1_OUT_SEL 0x0403
+#define RJ54N1_XY_OUTPUT_SIZE_S_H 0x0404
+#define RJ54N1_X_OUTPUT_SIZE_S_L 0x0405
+#define RJ54N1_Y_OUTPUT_SIZE_S_L 0x0406
+#define RJ54N1_XY_OUTPUT_SIZE_P_H 0x0407
+#define RJ54N1_X_OUTPUT_SIZE_P_L 0x0408
+#define RJ54N1_Y_OUTPUT_SIZE_P_L 0x0409
+#define RJ54N1_LINE_LENGTH_PCK_S_H 0x040a
+#define RJ54N1_LINE_LENGTH_PCK_S_L 0x040b
+#define RJ54N1_LINE_LENGTH_PCK_P_H 0x040c
+#define RJ54N1_LINE_LENGTH_PCK_P_L 0x040d
+#define RJ54N1_RESIZE_N 0x040e
+#define RJ54N1_RESIZE_N_STEP 0x040f
+#define RJ54N1_RESIZE_STEP 0x0410
+#define RJ54N1_RESIZE_HOLD_H 0x0411
+#define RJ54N1_RESIZE_HOLD_L 0x0412
+#define RJ54N1_H_OBEN_OFS 0x0413
+#define RJ54N1_V_OBEN_OFS 0x0414
+#define RJ54N1_RESIZE_CONTROL 0x0415
+#define RJ54N1_STILL_CONTROL 0x0417
+#define RJ54N1_INC_USE_SEL_H 0x0425
+#define RJ54N1_INC_USE_SEL_L 0x0426
+#define RJ54N1_MIRROR_STILL_MODE 0x0427
+#define RJ54N1_INIT_START 0x0428
+#define RJ54N1_SCALE_1_2_LEV 0x0429
+#define RJ54N1_SCALE_4_LEV 0x042a
+#define RJ54N1_Y_GAIN 0x04d8
+#define RJ54N1_APT_GAIN_UP 0x04fa
+#define RJ54N1_RA_SEL_UL 0x0530
+#define RJ54N1_BYTE_SWAP 0x0531
+#define RJ54N1_OUT_SIGPO 0x053b
+#define RJ54N1_WB_SEL_WEIGHT_I 0x054e
+#define RJ54N1_BIT8_WB 0x0569
+#define RJ54N1_HCAPS_WB 0x056a
+#define RJ54N1_VCAPS_WB 0x056b
+#define RJ54N1_HCAPE_WB 0x056c
+#define RJ54N1_VCAPE_WB 0x056d
+#define RJ54N1_EXPOSURE_CONTROL 0x058c
+#define RJ54N1_FRAME_LENGTH_S_H 0x0595
+#define RJ54N1_FRAME_LENGTH_S_L 0x0596
+#define RJ54N1_FRAME_LENGTH_P_H 0x0597
+#define RJ54N1_FRAME_LENGTH_P_L 0x0598
+#define RJ54N1_PEAK_H 0x05b7
+#define RJ54N1_PEAK_50 0x05b8
+#define RJ54N1_PEAK_60 0x05b9
+#define RJ54N1_PEAK_DIFF 0x05ba
+#define RJ54N1_IOC 0x05ef
+#define RJ54N1_TG_BYPASS 0x0700
+#define RJ54N1_PLL_L 0x0701
+#define RJ54N1_PLL_N 0x0702
+#define RJ54N1_PLL_EN 0x0704
+#define RJ54N1_RATIO_TG 0x0706
+#define RJ54N1_RATIO_T 0x0707
+#define RJ54N1_RATIO_R 0x0708
+#define RJ54N1_RAMP_TGCLK_EN 0x0709
+#define RJ54N1_OCLK_DSP 0x0710
+#define RJ54N1_RATIO_OP 0x0711
+#define RJ54N1_RATIO_O 0x0712
+#define RJ54N1_OCLK_SEL_EN 0x0713
+#define RJ54N1_CLK_RST 0x0717
+#define RJ54N1_RESET_STANDBY 0x0718
+#define RJ54N1_FWFLG 0x07fe
+
+#define E_EXCLK (1 << 7)
+#define SOFT_STDBY (1 << 4)
+#define SEN_RSTX (1 << 2)
+#define TG_RSTX (1 << 1)
+#define DSP_RSTX (1 << 0)
+
+#define RESIZE_HOLD_SEL (1 << 2)
+#define RESIZE_GO (1 << 1)
+
+/*
+ * When cropping, the camera automatically centers the cropped region, there
+ * doesn't seem to be a way to specify an explicit location of the rectangle.
+ */
+#define RJ54N1_COLUMN_SKIP 0
+#define RJ54N1_ROW_SKIP 0
+#define RJ54N1_MAX_WIDTH 1600
+#define RJ54N1_MAX_HEIGHT 1200
+
+#define PLL_L 2
+#define PLL_N 0x31
+
+/* I2C addresses: 0x50, 0x51, 0x60, 0x61 */
+
+/* RJ54N1CB0C has only one fixed colorspace per pixelcode */
+struct rj54n1_datafmt {
+ u32 code;
+ enum v4l2_colorspace colorspace;
+};
+
+/* Find a data format by a pixel code in an array */
+static const struct rj54n1_datafmt *rj54n1_find_datafmt(
+ u32 code, const struct rj54n1_datafmt *fmt,
+ int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ if (fmt[i].code == code)
+ return fmt + i;
+
+ return NULL;
+}
+
+static const struct rj54n1_datafmt rj54n1_colour_fmts[] = {
+ {MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
+};
+
+struct rj54n1_clock_div {
+ u8 ratio_tg; /* can be 0 or an odd number */
+ u8 ratio_t;
+ u8 ratio_r;
+ u8 ratio_op;
+ u8 ratio_o;
+};
+
+struct rj54n1 {
+ struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler hdl;
+ struct clk *clk;
+ struct gpio_desc *pwup_gpio;
+ struct gpio_desc *enable_gpio;
+ struct rj54n1_clock_div clk_div;
+ const struct rj54n1_datafmt *fmt;
+ struct v4l2_rect rect; /* Sensor window */
+ unsigned int tgclk_mhz;
+ bool auto_wb;
+ unsigned short width; /* Output window */
+ unsigned short height;
+ unsigned short resize; /* Sensor * 1024 / resize = Output */
+ unsigned short scale;
+ u8 bank;
+};
+
+struct rj54n1_reg_val {
+ u16 reg;
+ u8 val;
+};
+
+static const struct rj54n1_reg_val bank_4[] = {
+ {0x417, 0},
+ {0x42c, 0},
+ {0x42d, 0xf0},
+ {0x42e, 0},
+ {0x42f, 0x50},
+ {0x430, 0xf5},
+ {0x431, 0x16},
+ {0x432, 0x20},
+ {0x433, 0},
+ {0x434, 0xc8},
+ {0x43c, 8},
+ {0x43e, 0x90},
+ {0x445, 0x83},
+ {0x4ba, 0x58},
+ {0x4bb, 4},
+ {0x4bc, 0x20},
+ {0x4db, 4},
+ {0x4fe, 2},
+};
+
+static const struct rj54n1_reg_val bank_5[] = {
+ {0x514, 0},
+ {0x516, 0},
+ {0x518, 0},
+ {0x51a, 0},
+ {0x51d, 0xff},
+ {0x56f, 0x28},
+ {0x575, 0x40},
+ {0x5bc, 0x48},
+ {0x5c1, 6},
+ {0x5e5, 0x11},
+ {0x5e6, 0x43},
+ {0x5e7, 0x33},
+ {0x5e8, 0x21},
+ {0x5e9, 0x30},
+ {0x5ea, 0x0},
+ {0x5eb, 0xa5},
+ {0x5ec, 0xff},
+ {0x5fe, 2},
+};
+
+static const struct rj54n1_reg_val bank_7[] = {
+ {0x70a, 0},
+ {0x714, 0xff},
+ {0x715, 0xff},
+ {0x716, 0x1f},
+ {0x7FE, 2},
+};
+
+static const struct rj54n1_reg_val bank_8[] = {
+ {0x800, 0x00},
+ {0x801, 0x01},
+ {0x802, 0x61},
+ {0x805, 0x00},
+ {0x806, 0x00},
+ {0x807, 0x00},
+ {0x808, 0x00},
+ {0x809, 0x01},
+ {0x80A, 0x61},
+ {0x80B, 0x00},
+ {0x80C, 0x01},
+ {0x80D, 0x00},
+ {0x80E, 0x00},
+ {0x80F, 0x00},
+ {0x810, 0x00},
+ {0x811, 0x01},
+ {0x812, 0x61},
+ {0x813, 0x00},
+ {0x814, 0x11},
+ {0x815, 0x00},
+ {0x816, 0x41},
+ {0x817, 0x00},
+ {0x818, 0x51},
+ {0x819, 0x01},
+ {0x81A, 0x1F},
+ {0x81B, 0x00},
+ {0x81C, 0x01},
+ {0x81D, 0x00},
+ {0x81E, 0x11},
+ {0x81F, 0x00},
+ {0x820, 0x41},
+ {0x821, 0x00},
+ {0x822, 0x51},
+ {0x823, 0x00},
+ {0x824, 0x00},
+ {0x825, 0x00},
+ {0x826, 0x47},
+ {0x827, 0x01},
+ {0x828, 0x4F},
+ {0x829, 0x00},
+ {0x82A, 0x00},
+ {0x82B, 0x00},
+ {0x82C, 0x30},
+ {0x82D, 0x00},
+ {0x82E, 0x40},
+ {0x82F, 0x00},
+ {0x830, 0xB3},
+ {0x831, 0x00},
+ {0x832, 0xE3},
+ {0x833, 0x00},
+ {0x834, 0x00},
+ {0x835, 0x00},
+ {0x836, 0x00},
+ {0x837, 0x00},
+ {0x838, 0x00},
+ {0x839, 0x01},
+ {0x83A, 0x61},
+ {0x83B, 0x00},
+ {0x83C, 0x01},
+ {0x83D, 0x00},
+ {0x83E, 0x00},
+ {0x83F, 0x00},
+ {0x840, 0x00},
+ {0x841, 0x01},
+ {0x842, 0x61},
+ {0x843, 0x00},
+ {0x844, 0x1D},
+ {0x845, 0x00},
+ {0x846, 0x00},
+ {0x847, 0x00},
+ {0x848, 0x00},
+ {0x849, 0x01},
+ {0x84A, 0x1F},
+ {0x84B, 0x00},
+ {0x84C, 0x05},
+ {0x84D, 0x00},
+ {0x84E, 0x19},
+ {0x84F, 0x01},
+ {0x850, 0x21},
+ {0x851, 0x01},
+ {0x852, 0x5D},
+ {0x853, 0x00},
+ {0x854, 0x00},
+ {0x855, 0x00},
+ {0x856, 0x19},
+ {0x857, 0x01},
+ {0x858, 0x21},
+ {0x859, 0x00},
+ {0x85A, 0x00},
+ {0x85B, 0x00},
+ {0x85C, 0x00},
+ {0x85D, 0x00},
+ {0x85E, 0x00},
+ {0x85F, 0x00},
+ {0x860, 0xB3},
+ {0x861, 0x00},
+ {0x862, 0xE3},
+ {0x863, 0x00},
+ {0x864, 0x00},
+ {0x865, 0x00},
+ {0x866, 0x00},
+ {0x867, 0x00},
+ {0x868, 0x00},
+ {0x869, 0xE2},
+ {0x86A, 0x00},
+ {0x86B, 0x01},
+ {0x86C, 0x06},
+ {0x86D, 0x00},
+ {0x86E, 0x00},
+ {0x86F, 0x00},
+ {0x870, 0x60},
+ {0x871, 0x8C},
+ {0x872, 0x10},
+ {0x873, 0x00},
+ {0x874, 0xE0},
+ {0x875, 0x00},
+ {0x876, 0x27},
+ {0x877, 0x01},
+ {0x878, 0x00},
+ {0x879, 0x00},
+ {0x87A, 0x00},
+ {0x87B, 0x03},
+ {0x87C, 0x00},
+ {0x87D, 0x00},
+ {0x87E, 0x00},
+ {0x87F, 0x00},
+ {0x880, 0x00},
+ {0x881, 0x00},
+ {0x882, 0x00},
+ {0x883, 0x00},
+ {0x884, 0x00},
+ {0x885, 0x00},
+ {0x886, 0xF8},
+ {0x887, 0x00},
+ {0x888, 0x03},
+ {0x889, 0x00},
+ {0x88A, 0x64},
+ {0x88B, 0x00},
+ {0x88C, 0x03},
+ {0x88D, 0x00},
+ {0x88E, 0xB1},
+ {0x88F, 0x00},
+ {0x890, 0x03},
+ {0x891, 0x01},
+ {0x892, 0x1D},
+ {0x893, 0x00},
+ {0x894, 0x03},
+ {0x895, 0x01},
+ {0x896, 0x4B},
+ {0x897, 0x00},
+ {0x898, 0xE5},
+ {0x899, 0x00},
+ {0x89A, 0x01},
+ {0x89B, 0x00},
+ {0x89C, 0x01},
+ {0x89D, 0x04},
+ {0x89E, 0xC8},
+ {0x89F, 0x00},
+ {0x8A0, 0x01},
+ {0x8A1, 0x01},
+ {0x8A2, 0x61},
+ {0x8A3, 0x00},
+ {0x8A4, 0x01},
+ {0x8A5, 0x00},
+ {0x8A6, 0x00},
+ {0x8A7, 0x00},
+ {0x8A8, 0x00},
+ {0x8A9, 0x00},
+ {0x8AA, 0x7F},
+ {0x8AB, 0x03},
+ {0x8AC, 0x00},
+ {0x8AD, 0x00},
+ {0x8AE, 0x00},
+ {0x8AF, 0x00},
+ {0x8B0, 0x00},
+ {0x8B1, 0x00},
+ {0x8B6, 0x00},
+ {0x8B7, 0x01},
+ {0x8B8, 0x00},
+ {0x8B9, 0x00},
+ {0x8BA, 0x02},
+ {0x8BB, 0x00},
+ {0x8BC, 0xFF},
+ {0x8BD, 0x00},
+ {0x8FE, 2},
+};
+
+static const struct rj54n1_reg_val bank_10[] = {
+ {0x10bf, 0x69}
+};
+
+/* Clock dividers - these are default register values, divider = register + 1 */
+static const struct rj54n1_clock_div clk_div = {
+ .ratio_tg = 3 /* default: 5 */,
+ .ratio_t = 4 /* default: 1 */,
+ .ratio_r = 4 /* default: 0 */,
+ .ratio_op = 1 /* default: 5 */,
+ .ratio_o = 9 /* default: 0 */,
+};
+
+static struct rj54n1 *to_rj54n1(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct rj54n1, subdev);
+}
+
+static int reg_read(struct i2c_client *client, const u16 reg)
+{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ int ret;
+
+ /* set bank */
+ if (rj54n1->bank != reg >> 8) {
+ dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8);
+ ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8);
+ if (ret < 0)
+ return ret;
+ rj54n1->bank = reg >> 8;
+ }
+ return i2c_smbus_read_byte_data(client, reg & 0xff);
+}
+
+static int reg_write(struct i2c_client *client, const u16 reg,
+ const u8 data)
+{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ int ret;
+
+ /* set bank */
+ if (rj54n1->bank != reg >> 8) {
+ dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8);
+ ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8);
+ if (ret < 0)
+ return ret;
+ rj54n1->bank = reg >> 8;
+ }
+ dev_dbg(&client->dev, "[0x%x] = 0x%x\n", reg & 0xff, data);
+ return i2c_smbus_write_byte_data(client, reg & 0xff, data);
+}
+
+static int reg_set(struct i2c_client *client, const u16 reg,
+ const u8 data, const u8 mask)
+{
+ int ret;
+
+ ret = reg_read(client, reg);
+ if (ret < 0)
+ return ret;
+ return reg_write(client, reg, (ret & ~mask) | (data & mask));
+}
+
+static int reg_write_multiple(struct i2c_client *client,
+ const struct rj54n1_reg_val *rv, const int n)
+{
+ int i, ret;
+
+ for (i = 0; i < n; i++) {
+ ret = reg_write(client, rv->reg, rv->val);
+ if (ret < 0)
+ return ret;
+ rv++;
+ }
+
+ return 0;
+}
+
+static int rj54n1_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad || code->index >= ARRAY_SIZE(rj54n1_colour_fmts))
+ return -EINVAL;
+
+ code->code = rj54n1_colour_fmts[code->index].code;
+ return 0;
+}
+
+static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* Switch between preview and still shot modes */
+ return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80);
+}
+
+static int rj54n1_set_rect(struct i2c_client *client,
+ u16 reg_x, u16 reg_y, u16 reg_xy,
+ u32 width, u32 height)
+{
+ int ret;
+
+ ret = reg_write(client, reg_xy,
+ ((width >> 4) & 0x70) |
+ ((height >> 8) & 7));
+
+ if (!ret)
+ ret = reg_write(client, reg_x, width & 0xff);
+ if (!ret)
+ ret = reg_write(client, reg_y, height & 0xff);
+
+ return ret;
+}
+
+/*
+ * Some commands, specifically certain initialisation sequences, require
+ * a commit operation.
+ */
+static int rj54n1_commit(struct i2c_client *client)
+{
+ int ret = reg_write(client, RJ54N1_INIT_START, 1);
+ msleep(10);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_INIT_START, 0);
+ return ret;
+}
+
+static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
+ s32 *out_w, s32 *out_h);
+
+static int rj54n1_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ const struct v4l2_rect *rect = &sel->r;
+ int output_w, output_h, input_w = rect->width, input_h = rect->height;
+ int ret;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /* arbitrary minimum width and height, edges unimportant */
+ v4l_bound_align_image(&input_w, 8, RJ54N1_MAX_WIDTH, 0,
+ &input_h, 8, RJ54N1_MAX_HEIGHT, 0, 0);
+
+ output_w = (input_w * 1024 + rj54n1->resize / 2) / rj54n1->resize;
+ output_h = (input_h * 1024 + rj54n1->resize / 2) / rj54n1->resize;
+
+ dev_dbg(&client->dev, "Scaling for %dx%d : %u = %dx%d\n",
+ input_w, input_h, rj54n1->resize, output_w, output_h);
+
+ ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
+ if (ret < 0)
+ return ret;
+
+ rj54n1->width = output_w;
+ rj54n1->height = output_h;
+ rj54n1->resize = ret;
+ rj54n1->rect.width = input_w;
+ rj54n1->rect.height = input_h;
+
+ return 0;
+}
+
+static int rj54n1_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.left = RJ54N1_COLUMN_SKIP;
+ sel->r.top = RJ54N1_ROW_SKIP;
+ sel->r.width = RJ54N1_MAX_WIDTH;
+ sel->r.height = RJ54N1_MAX_HEIGHT;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = rj54n1->rect;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rj54n1_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+
+ if (format->pad)
+ return -EINVAL;
+
+ mf->code = rj54n1->fmt->code;
+ mf->colorspace = rj54n1->fmt->colorspace;
+ mf->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ mf->xfer_func = V4L2_XFER_FUNC_SRGB;
+ mf->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mf->field = V4L2_FIELD_NONE;
+ mf->width = rj54n1->width;
+ mf->height = rj54n1->height;
+
+ return 0;
+}
+
+/*
+ * The actual geometry configuration routine. It scales the input window into
+ * the output one, updates the window sizes and returns an error or the resize
+ * coefficient on success. Note: we only use the "Fixed Scaling" on this camera.
+ */
+static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
+ s32 *out_w, s32 *out_h)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ unsigned int skip, resize, input_w = *in_w, input_h = *in_h,
+ output_w = *out_w, output_h = *out_h;
+ u16 inc_sel, wb_bit8, wb_left, wb_right, wb_top, wb_bottom;
+ unsigned int peak, peak_50, peak_60;
+ int ret;
+
+ /*
+ * We have a problem with crops, where the window is larger than 512x384
+ * and output window is larger than a half of the input one. In this
+ * case we have to either reduce the input window to equal or below
+ * 512x384 or the output window to equal or below 1/2 of the input.
+ */
+ if (output_w > max(512U, input_w / 2)) {
+ if (2 * output_w > RJ54N1_MAX_WIDTH) {
+ input_w = RJ54N1_MAX_WIDTH;
+ output_w = RJ54N1_MAX_WIDTH / 2;
+ } else {
+ input_w = output_w * 2;
+ }
+
+ dev_dbg(&client->dev, "Adjusted output width: in %u, out %u\n",
+ input_w, output_w);
+ }
+
+ if (output_h > max(384U, input_h / 2)) {
+ if (2 * output_h > RJ54N1_MAX_HEIGHT) {
+ input_h = RJ54N1_MAX_HEIGHT;
+ output_h = RJ54N1_MAX_HEIGHT / 2;
+ } else {
+ input_h = output_h * 2;
+ }
+
+ dev_dbg(&client->dev, "Adjusted output height: in %u, out %u\n",
+ input_h, output_h);
+ }
+
+ /* Idea: use the read mode for snapshots, handle separate geometries */
+ ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_S_L,
+ RJ54N1_Y_OUTPUT_SIZE_S_L,
+ RJ54N1_XY_OUTPUT_SIZE_S_H, output_w, output_h);
+ if (!ret)
+ ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_P_L,
+ RJ54N1_Y_OUTPUT_SIZE_P_L,
+ RJ54N1_XY_OUTPUT_SIZE_P_H, output_w, output_h);
+
+ if (ret < 0)
+ return ret;
+
+ if (output_w > input_w && output_h > input_h) {
+ input_w = output_w;
+ input_h = output_h;
+
+ resize = 1024;
+ } else {
+ unsigned int resize_x, resize_y;
+ resize_x = (input_w * 1024 + output_w / 2) / output_w;
+ resize_y = (input_h * 1024 + output_h / 2) / output_h;
+
+ /* We want max(resize_x, resize_y), check if it still fits */
+ if (resize_x > resize_y &&
+ (output_h * resize_x + 512) / 1024 > RJ54N1_MAX_HEIGHT)
+ resize = (RJ54N1_MAX_HEIGHT * 1024 + output_h / 2) /
+ output_h;
+ else if (resize_y > resize_x &&
+ (output_w * resize_y + 512) / 1024 > RJ54N1_MAX_WIDTH)
+ resize = (RJ54N1_MAX_WIDTH * 1024 + output_w / 2) /
+ output_w;
+ else
+ resize = max(resize_x, resize_y);
+
+ /* Prohibited value ranges */
+ switch (resize) {
+ case 2040 ... 2047:
+ resize = 2039;
+ break;
+ case 4080 ... 4095:
+ resize = 4079;
+ break;
+ case 8160 ... 8191:
+ resize = 8159;
+ break;
+ case 16320 ... 16384:
+ resize = 16319;
+ }
+ }
+
+ /* Set scaling */
+ ret = reg_write(client, RJ54N1_RESIZE_HOLD_L, resize & 0xff);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESIZE_HOLD_H, resize >> 8);
+
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Configure a skipping bitmask. The sensor will select a skipping value
+ * among set bits automatically. This is very unclear in the datasheet
+ * too. I was told, in this register one enables all skipping values,
+ * that are required for a specific resize, and the camera selects
+ * automatically, which ones to use. But it is unclear how to identify,
+ * which cropping values are needed. Secondly, why don't we just set all
+ * bits and let the camera choose? Would it increase processing time and
+ * reduce the framerate? Using 0xfffc for INC_USE_SEL doesn't seem to
+ * improve the image quality or stability for larger frames (see comment
+ * above), but I didn't check the framerate.
+ */
+ skip = min(resize / 1024, 15U);
+
+ inc_sel = 1 << skip;
+
+ if (inc_sel <= 2)
+ inc_sel = 0xc;
+ else if (resize & 1023 && skip < 15)
+ inc_sel |= 1 << (skip + 1);
+
+ ret = reg_write(client, RJ54N1_INC_USE_SEL_L, inc_sel & 0xfc);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_INC_USE_SEL_H, inc_sel >> 8);
+
+ if (!rj54n1->auto_wb) {
+ /* Auto white balance window */
+ wb_left = output_w / 16;
+ wb_right = (3 * output_w / 4 - 3) / 4;
+ wb_top = output_h / 16;
+ wb_bottom = (3 * output_h / 4 - 3) / 4;
+ wb_bit8 = ((wb_left >> 2) & 0x40) | ((wb_top >> 4) & 0x10) |
+ ((wb_right >> 6) & 4) | ((wb_bottom >> 8) & 1);
+
+ if (!ret)
+ ret = reg_write(client, RJ54N1_BIT8_WB, wb_bit8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_HCAPS_WB, wb_left);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_VCAPS_WB, wb_top);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_HCAPE_WB, wb_right);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_VCAPE_WB, wb_bottom);
+ }
+
+ /* Antiflicker */
+ peak = 12 * RJ54N1_MAX_WIDTH * (1 << 14) * resize / rj54n1->tgclk_mhz /
+ 10000;
+ peak_50 = peak / 6;
+ peak_60 = peak / 5;
+
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PEAK_H,
+ ((peak_50 >> 4) & 0xf0) | (peak_60 >> 8));
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PEAK_50, peak_50);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PEAK_60, peak_60);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PEAK_DIFF, peak / 150);
+
+ /* Start resizing */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESIZE_CONTROL,
+ RESIZE_HOLD_SEL | RESIZE_GO | 1);
+
+ if (ret < 0)
+ return ret;
+
+ /* Constant taken from manufacturer's example */
+ msleep(230);
+
+ ret = reg_write(client, RJ54N1_RESIZE_CONTROL, RESIZE_HOLD_SEL | 1);
+ if (ret < 0)
+ return ret;
+
+ *in_w = (output_w * resize + 512) / 1024;
+ *in_h = (output_h * resize + 512) / 1024;
+ *out_w = output_w;
+ *out_h = output_h;
+
+ dev_dbg(&client->dev, "Scaled for %dx%d : %u = %ux%u, skip %u\n",
+ *in_w, *in_h, resize, output_w, output_h, skip);
+
+ return resize;
+}
+
+static int rj54n1_set_clock(struct i2c_client *client)
+{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ int ret;
+
+ /* Enable external clock */
+ ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | SOFT_STDBY);
+ /* Leave stand-by. Note: use this when implementing suspend / resume */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK);
+
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PLL_L, PLL_L);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PLL_N, PLL_N);
+
+ /* TGCLK dividers */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RATIO_TG,
+ rj54n1->clk_div.ratio_tg);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RATIO_T,
+ rj54n1->clk_div.ratio_t);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RATIO_R,
+ rj54n1->clk_div.ratio_r);
+
+ /* Enable TGCLK & RAMP */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RAMP_TGCLK_EN, 3);
+
+ /* Disable clock output */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_OCLK_DSP, 0);
+
+ /* Set divisors */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RATIO_OP,
+ rj54n1->clk_div.ratio_op);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RATIO_O,
+ rj54n1->clk_div.ratio_o);
+
+ /* Enable OCLK */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1);
+
+ /* Use PLL for Timing Generator, write 2 to reserved bits */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_TG_BYPASS, 2);
+
+ /* Take sensor out of reset */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESET_STANDBY,
+ E_EXCLK | SEN_RSTX);
+ /* Enable PLL */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PLL_EN, 1);
+
+ /* Wait for PLL to stabilise */
+ msleep(10);
+
+ /* Enable clock to frequency divider */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_CLK_RST, 1);
+
+ if (!ret)
+ ret = reg_read(client, RJ54N1_CLK_RST);
+ if (ret != 1) {
+ dev_err(&client->dev,
+ "Resetting RJ54N1CB0C clock failed: %d!\n", ret);
+ return -EIO;
+ }
+
+ /* Start the PLL */
+ ret = reg_set(client, RJ54N1_OCLK_DSP, 1, 1);
+
+ /* Enable OCLK */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1);
+
+ return ret;
+}
+
+static int rj54n1_reg_init(struct i2c_client *client)
+{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ int ret = rj54n1_set_clock(client);
+
+ if (!ret)
+ ret = reg_write_multiple(client, bank_7, ARRAY_SIZE(bank_7));
+ if (!ret)
+ ret = reg_write_multiple(client, bank_10, ARRAY_SIZE(bank_10));
+
+ /* Set binning divisors */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_SCALE_1_2_LEV, 3 | (7 << 4));
+ if (!ret)
+ ret = reg_write(client, RJ54N1_SCALE_4_LEV, 0xf);
+
+ /* Switch to fixed resize mode */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESIZE_CONTROL,
+ RESIZE_HOLD_SEL | 1);
+
+ /* Set gain */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_Y_GAIN, 0x84);
+
+ /*
+ * Mirror the image back: default is upside down and left-to-right...
+ * Set manual preview / still shot switching
+ */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_MIRROR_STILL_MODE, 0x27);
+
+ if (!ret)
+ ret = reg_write_multiple(client, bank_4, ARRAY_SIZE(bank_4));
+
+ /* Auto exposure area */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_EXPOSURE_CONTROL, 0x80);
+ /* Check current auto WB config */
+ if (!ret)
+ ret = reg_read(client, RJ54N1_WB_SEL_WEIGHT_I);
+ if (ret >= 0) {
+ rj54n1->auto_wb = ret & 0x80;
+ ret = reg_write_multiple(client, bank_5, ARRAY_SIZE(bank_5));
+ }
+ if (!ret)
+ ret = reg_write_multiple(client, bank_8, ARRAY_SIZE(bank_8));
+
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESET_STANDBY,
+ E_EXCLK | DSP_RSTX | SEN_RSTX);
+
+ /* Commit init */
+ if (!ret)
+ ret = rj54n1_commit(client);
+
+ /* Take DSP, TG, sensor out of reset */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESET_STANDBY,
+ E_EXCLK | DSP_RSTX | TG_RSTX | SEN_RSTX);
+
+ /* Start register update? Same register as 0x?FE in many bank_* sets */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_FWFLG, 2);
+
+ /* Constant taken from manufacturer's example */
+ msleep(700);
+
+ return ret;
+}
+
+static int rj54n1_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ const struct rj54n1_datafmt *fmt;
+ int output_w, output_h, max_w, max_h,
+ input_w = rj54n1->rect.width, input_h = rj54n1->rect.height;
+ int align = mf->code == MEDIA_BUS_FMT_SBGGR10_1X10 ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE;
+ int ret;
+
+ if (format->pad)
+ return -EINVAL;
+
+ dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n",
+ __func__, mf->code, mf->width, mf->height);
+
+ fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts,
+ ARRAY_SIZE(rj54n1_colour_fmts));
+ if (!fmt) {
+ fmt = rj54n1->fmt;
+ mf->code = fmt->code;
+ }
+
+ mf->field = V4L2_FIELD_NONE;
+ mf->colorspace = fmt->colorspace;
+
+ v4l_bound_align_image(&mf->width, 112, RJ54N1_MAX_WIDTH, align,
+ &mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *mf;
+ return 0;
+ }
+
+ /*
+ * Verify if the sensor has just been powered on. TODO: replace this
+ * with proper PM, when a suitable API is available.
+ */
+ ret = reg_read(client, RJ54N1_RESET_STANDBY);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & E_EXCLK)) {
+ ret = rj54n1_reg_init(client);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */
+ switch (mf->code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 0);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 0);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
+ break;
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
+ break;
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 4);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 4);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 4);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 4);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 5);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ /* Special case: a raw mode with 10 bits of data per clock tick */
+ if (!ret)
+ ret = reg_set(client, RJ54N1_OCLK_SEL_EN,
+ (mf->code == MEDIA_BUS_FMT_SBGGR10_1X10) << 1, 2);
+
+ if (ret < 0)
+ return ret;
+
+ /* Supported scales 1:1 >= scale > 1:16 */
+ max_w = mf->width * (16 * 1024 - 1) / 1024;
+ if (input_w > max_w)
+ input_w = max_w;
+ max_h = mf->height * (16 * 1024 - 1) / 1024;
+ if (input_h > max_h)
+ input_h = max_h;
+
+ output_w = mf->width;
+ output_h = mf->height;
+
+ ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
+ if (ret < 0)
+ return ret;
+
+ fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts,
+ ARRAY_SIZE(rj54n1_colour_fmts));
+
+ rj54n1->fmt = fmt;
+ rj54n1->resize = ret;
+ rj54n1->rect.width = input_w;
+ rj54n1->rect.height = input_h;
+ rj54n1->width = output_w;
+ rj54n1->height = output_h;
+
+ mf->width = output_w;
+ mf->height = output_h;
+ mf->field = V4L2_FIELD_NONE;
+ mf->colorspace = fmt->colorspace;
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int rj54n1_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg < 0x400 || reg->reg > 0x1fff)
+ /* Registers > 0x0800 are only available from Sharp support */
+ return -EINVAL;
+
+ reg->size = 1;
+ reg->val = reg_read(client, reg->reg);
+
+ if (reg->val > 0xff)
+ return -EIO;
+
+ return 0;
+}
+
+static int rj54n1_s_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg < 0x400 || reg->reg > 0x1fff)
+ /* Registers >= 0x0800 are only available from Sharp support */
+ return -EINVAL;
+
+ if (reg_write(client, reg->reg, reg->val) < 0)
+ return -EIO;
+
+ return 0;
+}
+#endif
+
+static int rj54n1_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+
+ if (on) {
+ if (rj54n1->pwup_gpio)
+ gpiod_set_value(rj54n1->pwup_gpio, 1);
+ if (rj54n1->enable_gpio)
+ gpiod_set_value(rj54n1->enable_gpio, 1);
+
+ msleep(1);
+
+ return clk_prepare_enable(rj54n1->clk);
+ }
+
+ clk_disable_unprepare(rj54n1->clk);
+
+ if (rj54n1->enable_gpio)
+ gpiod_set_value(rj54n1->enable_gpio, 0);
+ if (rj54n1->pwup_gpio)
+ gpiod_set_value(rj54n1->pwup_gpio, 0);
+
+ return 0;
+}
+
+static int rj54n1_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct rj54n1 *rj54n1 = container_of(ctrl->handler, struct rj54n1, hdl);
+ struct v4l2_subdev *sd = &rj54n1->subdev;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int data;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ if (ctrl->val)
+ data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 1);
+ else
+ data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 1, 1);
+ if (data < 0)
+ return -EIO;
+ return 0;
+ case V4L2_CID_HFLIP:
+ if (ctrl->val)
+ data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 2);
+ else
+ data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 2, 2);
+ if (data < 0)
+ return -EIO;
+ return 0;
+ case V4L2_CID_GAIN:
+ if (reg_write(client, RJ54N1_Y_GAIN, ctrl->val * 2) < 0)
+ return -EIO;
+ return 0;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ /* Auto WB area - whole image */
+ if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->val << 7,
+ 0x80) < 0)
+ return -EIO;
+ rj54n1->auto_wb = ctrl->val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops rj54n1_ctrl_ops = {
+ .s_ctrl = rj54n1_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = rj54n1_g_register,
+ .s_register = rj54n1_s_register,
+#endif
+ .s_power = rj54n1_s_power,
+};
+
+static const struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
+ .s_stream = rj54n1_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops rj54n1_subdev_pad_ops = {
+ .enum_mbus_code = rj54n1_enum_mbus_code,
+ .get_selection = rj54n1_get_selection,
+ .set_selection = rj54n1_set_selection,
+ .get_fmt = rj54n1_get_fmt,
+ .set_fmt = rj54n1_set_fmt,
+};
+
+static const struct v4l2_subdev_ops rj54n1_subdev_ops = {
+ .core = &rj54n1_subdev_core_ops,
+ .video = &rj54n1_subdev_video_ops,
+ .pad = &rj54n1_subdev_pad_ops,
+};
+
+/*
+ * Interface active, can use i2c. If it fails, it can indeed mean, that
+ * this wasn't our capture interface, so, we wait for the right one
+ */
+static int rj54n1_video_probe(struct i2c_client *client,
+ struct rj54n1_pdata *priv)
+{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ int data1, data2;
+ int ret;
+
+ ret = rj54n1_s_power(&rj54n1->subdev, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Read out the chip version register */
+ data1 = reg_read(client, RJ54N1_DEV_CODE);
+ data2 = reg_read(client, RJ54N1_DEV_CODE2);
+
+ if (data1 != 0x51 || data2 != 0x10) {
+ ret = -ENODEV;
+ dev_info(&client->dev, "No RJ54N1CB0C found, read 0x%x:0x%x\n",
+ data1, data2);
+ goto done;
+ }
+
+ /* Configure IOCTL polarity from the platform data: 0 or 1 << 7. */
+ ret = reg_write(client, RJ54N1_IOC, priv->ioctl_high << 7);
+ if (ret < 0)
+ goto done;
+
+ dev_info(&client->dev, "Detected a RJ54N1CB0C chip ID 0x%x:0x%x\n",
+ data1, data2);
+
+ ret = v4l2_ctrl_handler_setup(&rj54n1->hdl);
+
+done:
+ rj54n1_s_power(&rj54n1->subdev, 0);
+ return ret;
+}
+
+static int rj54n1_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct rj54n1 *rj54n1;
+ struct i2c_adapter *adapter = client->adapter;
+ struct rj54n1_pdata *rj54n1_priv;
+ int ret;
+
+ if (!client->dev.platform_data) {
+ dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n");
+ return -EINVAL;
+ }
+
+ rj54n1_priv = client->dev.platform_data;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_warn(&adapter->dev,
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n");
+ return -EIO;
+ }
+
+ rj54n1 = devm_kzalloc(&client->dev, sizeof(struct rj54n1), GFP_KERNEL);
+ if (!rj54n1)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&rj54n1->subdev, client, &rj54n1_subdev_ops);
+ v4l2_ctrl_handler_init(&rj54n1->hdl, 4);
+ v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ V4L2_CID_GAIN, 0, 127, 1, 66);
+ v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+ rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
+ if (rj54n1->hdl.error)
+ return rj54n1->hdl.error;
+
+ rj54n1->clk_div = clk_div;
+ rj54n1->rect.left = RJ54N1_COLUMN_SKIP;
+ rj54n1->rect.top = RJ54N1_ROW_SKIP;
+ rj54n1->rect.width = RJ54N1_MAX_WIDTH;
+ rj54n1->rect.height = RJ54N1_MAX_HEIGHT;
+ rj54n1->width = RJ54N1_MAX_WIDTH;
+ rj54n1->height = RJ54N1_MAX_HEIGHT;
+ rj54n1->fmt = &rj54n1_colour_fmts[0];
+ rj54n1->resize = 1024;
+ rj54n1->tgclk_mhz = (rj54n1_priv->mclk_freq / PLL_L * PLL_N) /
+ (clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1);
+
+ rj54n1->clk = clk_get(&client->dev, NULL);
+ if (IS_ERR(rj54n1->clk)) {
+ ret = PTR_ERR(rj54n1->clk);
+ goto err_free_ctrl;
+ }
+
+ rj54n1->pwup_gpio = gpiod_get_optional(&client->dev, "powerup",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(rj54n1->pwup_gpio)) {
+ dev_info(&client->dev, "Unable to get GPIO \"powerup\": %ld\n",
+ PTR_ERR(rj54n1->pwup_gpio));
+ ret = PTR_ERR(rj54n1->pwup_gpio);
+ goto err_clk_put;
+ }
+
+ rj54n1->enable_gpio = gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(rj54n1->enable_gpio)) {
+ dev_info(&client->dev, "Unable to get GPIO \"enable\": %ld\n",
+ PTR_ERR(rj54n1->enable_gpio));
+ ret = PTR_ERR(rj54n1->enable_gpio);
+ goto err_gpio_put;
+ }
+
+ ret = rj54n1_video_probe(client, rj54n1_priv);
+ if (ret < 0)
+ goto err_gpio_put;
+
+ ret = v4l2_async_register_subdev(&rj54n1->subdev);
+ if (ret)
+ goto err_gpio_put;
+
+ return 0;
+
+err_gpio_put:
+ if (rj54n1->enable_gpio)
+ gpiod_put(rj54n1->enable_gpio);
+
+ if (rj54n1->pwup_gpio)
+ gpiod_put(rj54n1->pwup_gpio);
+
+err_clk_put:
+ clk_put(rj54n1->clk);
+
+err_free_ctrl:
+ v4l2_ctrl_handler_free(&rj54n1->hdl);
+
+ return ret;
+}
+
+static int rj54n1_remove(struct i2c_client *client)
+{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+
+ if (rj54n1->enable_gpio)
+ gpiod_put(rj54n1->enable_gpio);
+ if (rj54n1->pwup_gpio)
+ gpiod_put(rj54n1->pwup_gpio);
+
+ clk_put(rj54n1->clk);
+ v4l2_ctrl_handler_free(&rj54n1->hdl);
+ v4l2_async_unregister_subdev(&rj54n1->subdev);
+
+ return 0;
+}
+
+static const struct i2c_device_id rj54n1_id[] = {
+ { "rj54n1cb0c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rj54n1_id);
+
+static struct i2c_driver rj54n1_i2c_driver = {
+ .driver = {
+ .name = "rj54n1cb0c",
+ },
+ .probe = rj54n1_probe,
+ .remove = rj54n1_remove,
+ .id_table = rj54n1_id,
+};
+
+module_i2c_driver(rj54n1_i2c_driver);
+
+MODULE_DESCRIPTION("Sharp RJ54N1CB0C Camera driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index e1f8208581aa..1236683da8f7 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -1892,7 +1892,7 @@ static int scaling_goodness(struct v4l2_subdev *subdev, int w, int ask_w,
val -= SCALING_GOODNESS_EXTREME;
dev_dbg(&client->dev, "w %d ask_w %d h %d ask_h %d goodness %d\n",
- w, ask_h, h, ask_h, val);
+ w, ask_w, h, ask_h, val);
return val;
}
@@ -2764,6 +2764,7 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
struct v4l2_fwnode_endpoint *bus_cfg;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
+ u32 rotation;
int i;
int rval;
@@ -2800,6 +2801,21 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
dev_dbg(dev, "lanes %u\n", hwcfg->lanes);
+ rval = fwnode_property_read_u32(fwnode, "rotation", &rotation);
+ if (!rval) {
+ switch (rotation) {
+ case 180:
+ hwcfg->module_board_orient =
+ SMIAPP_MODULE_BOARD_ORIENT_180;
+ /* Fall through */
+ case 0:
+ break;
+ default:
+ dev_err(dev, "invalid rotation %u\n", rotation);
+ goto out_err;
+ }
+ }
+
/* NVM size is not mandatory */
fwnode_property_read_u32(fwnode, "nokia,nvm-size", &hwcfg->nvm_size);
@@ -3171,4 +3187,4 @@ module_i2c_driver(smiapp_i2c_driver);
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@iki.fi>");
MODULE_DESCRIPTION("Generic SMIA/SMIA++ camera module driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 806383500313..14377af7c888 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -834,7 +834,7 @@ static int ov772x_set_params(struct ov772x_priv *priv,
* set COM8
*/
if (priv->band_filter) {
- ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 1);
+ ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
if (!ret)
ret = ov772x_mask_set(client, BDBASE,
0xff, 256 - priv->band_filter);
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 393bbbbbaad7..44c41933415a 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1918,7 +1918,8 @@ static int tc358743_probe_of(struct tc358743_state *state)
endpoint = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep));
if (IS_ERR(endpoint)) {
dev_err(dev, "failed to parse endpoint\n");
- return PTR_ERR(endpoint);
+ ret = PTR_ERR(endpoint);
+ goto put_node;
}
if (endpoint->bus_type != V4L2_MBUS_CSI2 ||
@@ -2013,6 +2014,8 @@ disable_clk:
clk_disable_unprepare(refclk);
free_endpoint:
v4l2_fwnode_endpoint_free(endpoint);
+put_node:
+ of_node_put(ep);
return ret;
}
#else
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 039a92c3294a..d114ac5243ec 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2570,7 +2570,7 @@ static int tda1997x_probe(struct i2c_client *client,
id->name, i2c_adapter_id(client->adapter),
client->addr);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
- sd->entity.function = MEDIA_ENT_F_DTV_DECODER;
+ sd->entity.function = MEDIA_ENT_F_DV_DECODER;
sd->entity.ops = &tda1997x_media_ops;
/* set allowed mbus modes based on chip, bus-type, and bus-width */
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 6a9890531d01..675b9ae212ab 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -1084,7 +1084,7 @@ tvp514x_probe(struct i2c_client *client, const struct i2c_device_id *id)
#if defined(CONFIG_MEDIA_CONTROLLER)
decoder->pad.flags = MEDIA_PAD_FL_SOURCE;
decoder->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- decoder->sd.entity.flags |= MEDIA_ENT_F_ATV_DECODER;
+ decoder->sd.entity.function = MEDIA_ENT_F_ATV_DECODER;
ret = media_entity_pads_init(&decoder->sd.entity, 1, &decoder->pad);
if (ret < 0) {
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index b162c2fe62c3..76e6bed5a1da 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -872,7 +872,7 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
f = &format->format;
f->width = decoder->rect.width;
- f->height = decoder->rect.height;
+ f->height = decoder->rect.height / 2;
f->code = MEDIA_BUS_FMT_UYVY8_2X8;
f->field = V4L2_FIELD_ALTERNATE;
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 4599b7e28a8d..4f5c627579c7 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -1010,7 +1010,7 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
#if defined(CONFIG_MEDIA_CONTROLLER)
device->pad.flags = MEDIA_PAD_FL_SOURCE;
device->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- device->sd.entity.flags |= MEDIA_ENT_F_ATV_DECODER;
+ device->sd.entity.function = MEDIA_ENT_F_ATV_DECODER;
error = media_entity_pads_init(&device->sd.entity, 1, &device->pad);
if (error < 0)
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 0b347cc19aa5..06d29d8f6be8 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/freezer.h>
+#include <linux/hwmon.h>
#include <linux/kthread.h>
#include <linux/i2c.h>
#include <linux/list.h>
@@ -77,6 +78,9 @@ struct video_i2c_chip {
/* xfer function */
int (*xfer)(struct video_i2c_data *data, char *buf);
+
+ /* hwmon init function */
+ int (*hwmon_init)(struct video_i2c_data *data);
};
static int amg88xx_xfer(struct video_i2c_data *data, char *buf)
@@ -101,6 +105,74 @@ static int amg88xx_xfer(struct video_i2c_data *data, char *buf)
return (ret == 2) ? 0 : -EIO;
}
+#if IS_ENABLED(CONFIG_HWMON)
+
+static const u32 amg88xx_temp_config[] = {
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info amg88xx_temp = {
+ .type = hwmon_temp,
+ .config = amg88xx_temp_config,
+};
+
+static const struct hwmon_channel_info *amg88xx_info[] = {
+ &amg88xx_temp,
+ NULL
+};
+
+static umode_t amg88xx_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static int amg88xx_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct video_i2c_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int tmp = i2c_smbus_read_word_data(client, 0x0e);
+
+ if (tmp < 0)
+ return tmp;
+
+ /*
+ * Check for sign bit, this isn't a two's complement value but an
+ * absolute temperature that needs to be inverted in the case of being
+ * negative.
+ */
+ if (tmp & BIT(11))
+ tmp = -(tmp & 0x7ff);
+
+ *val = (tmp * 625) / 10;
+
+ return 0;
+}
+
+static const struct hwmon_ops amg88xx_hwmon_ops = {
+ .is_visible = amg88xx_is_visible,
+ .read = amg88xx_read,
+};
+
+static const struct hwmon_chip_info amg88xx_chip_info = {
+ .ops = &amg88xx_hwmon_ops,
+ .info = amg88xx_info,
+};
+
+static int amg88xx_hwmon_init(struct video_i2c_data *data)
+{
+ void *hwmon = devm_hwmon_device_register_with_info(&data->client->dev,
+ "amg88xx", data, &amg88xx_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+#else
+#define amg88xx_hwmon_init NULL
+#endif
+
#define AMG88XX 0
static const struct video_i2c_chip video_i2c_chip[] = {
@@ -111,6 +183,7 @@ static const struct video_i2c_chip video_i2c_chip[] = {
.buffer_size = 128,
.bpp = 16,
.xfer = &amg88xx_xfer,
+ .hwmon_init = amg88xx_hwmon_init,
},
};
@@ -505,6 +578,14 @@ static int video_i2c_probe(struct i2c_client *client,
video_set_drvdata(&data->vdev, data);
i2c_set_clientdata(client, data);
+ if (data->chip->hwmon_init) {
+ ret = data->chip->hwmon_init(data);
+ if (ret < 0) {
+ dev_warn(&client->dev,
+ "failed to register hwmon device\n");
+ }
+ }
+
ret = video_register_device(&data->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0)
goto error_unregister_device;
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 1658816a9844..bc9825f4a73d 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -770,7 +770,7 @@ static int vs6624_probe(struct i2c_client *client,
return ret;
}
/* wait 100ms before any further i2c writes are performed */
- mdelay(100);
+ msleep(100);
sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL);
if (sensor == NULL)
@@ -782,7 +782,7 @@ static int vs6624_probe(struct i2c_client *client,
vs6624_writeregs(sd, vs6624_p1);
vs6624_write(sd, VS6624_MICRO_EN, 0x2);
vs6624_write(sd, VS6624_DIO_EN, 0x1);
- mdelay(10);
+ usleep_range(10000, 11000);
vs6624_writeregs(sd, vs6624_p2);
vs6624_writeregs(sd, vs6624_default);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index ae59c3177555..fcdf3d5dc4b6 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -16,9 +16,6 @@
* GNU General Public License for more details.
*/
-/* We need to access legacy defines from linux/media.h */
-#define __NEED_MEDIA_LEGACY_API
-
#include <linux/compat.h>
#include <linux/export.h>
#include <linux/idr.h>
@@ -28,6 +25,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/usb.h>
+#include <linux/version.h>
#include <media/media-device.h>
#include <media/media-devnode.h>
@@ -35,6 +33,16 @@
#ifdef CONFIG_MEDIA_CONTROLLER
+/*
+ * Legacy defines from linux/media.h. This is the only place we need this
+ * so we just define it here. The media.h header doesn't expose it to the
+ * kernel to prevent it from being used by drivers, but here (and only here!)
+ * we need it to handle the legacy behavior.
+ */
+#define MEDIA_ENT_SUBTYPE_MASK 0x0000ffff
+#define MEDIA_ENT_T_DEVNODE_UNKNOWN (MEDIA_ENT_F_OLD_BASE | \
+ MEDIA_ENT_SUBTYPE_MASK)
+
/* -----------------------------------------------------------------------------
* Userspace API
*/
@@ -259,6 +267,7 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
memset(&kentity, 0, sizeof(kentity));
kentity.id = entity->graph_obj.id;
kentity.function = entity->function;
+ kentity.flags = entity->flags;
strlcpy(kentity.name, entity->name,
sizeof(kentity.name));
@@ -324,6 +333,7 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
kpad.id = pad->graph_obj.id;
kpad.entity_id = pad->entity->graph_obj.id;
kpad.flags = pad->flags;
+ kpad.index = pad->index;
if (copy_to_user(upad, &kpad, sizeof(kpad)))
ret = -EFAULT;
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index de3f44b8dec6..cf05e11da01b 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3511,7 +3511,7 @@ static void bttv_irq_debug_low_latency(struct bttv *btv, u32 rc)
}
pr_notice("%d: Uhm. Looks like we have unusual high IRQ latencies\n",
btv->c.nr);
- pr_notice("%d: Lets try to catch the culpit red-handed ...\n",
+ pr_notice("%d: Lets try to catch the culprit red-handed ...\n",
btv->c.nr);
dump_stack();
}
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
index 2e33b7236672..b98de2a22f78 100644
--- a/drivers/media/pci/bt8xx/dst.c
+++ b/drivers/media/pci/bt8xx/dst.c
@@ -1739,9 +1739,9 @@ static const struct dvb_frontend_ops dst_dvbt_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DST DVB-T",
- .frequency_min = 137000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 137 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_AUTO |
FE_CAN_QAM_AUTO |
FE_CAN_QAM_16 |
@@ -1768,10 +1768,10 @@ static const struct dvb_frontend_ops dst_dvbs_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "DST DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1000, /* kHz for QPSK frontends */
- .frequency_tolerance = 29500,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1 * MHz,
+ .frequency_tolerance_hz = 29500 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
/* . symbol_rate_tolerance = ???,*/
@@ -1797,9 +1797,9 @@ static const struct dvb_frontend_ops dst_dvbc_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "DST DVB-C",
- .frequency_stepsize = 62500,
- .frequency_min = 51000000,
- .frequency_max = 858000000,
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_FEC_AUTO |
@@ -1826,9 +1826,9 @@ static const struct dvb_frontend_ops dst_atsc_ops = {
.delsys = { SYS_ATSC },
.info = {
.name = "DST ATSC",
- .frequency_stepsize = 62500,
- .frequency_min = 510000000,
- .frequency_max = 858000000,
+ .frequency_min_hz = 510 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_FEC_AUTO | FE_CAN_QAM_AUTO | FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index 5ef6e2051d45..2f810b7130e6 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -386,10 +386,6 @@ static int advbt771_samsung_tdtc9251dh0_tuner_calc_regs(struct dvb_frontend *fe,
bs = 0x02;
else if (c->frequency < 470000000)
bs = 0x02;
- else if (c->frequency < 600000000)
- bs = 0x08;
- else if (c->frequency < 730000000)
- bs = 0x08;
else
bs = 0x08;
@@ -606,8 +602,8 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
if (card->fe != NULL) {
card->fe->ops.tuner_ops.calc_regs = thomson_dtt7579_tuner_calc_regs;
- card->fe->ops.info.frequency_min = 174000000;
- card->fe->ops.info.frequency_max = 862000000;
+ card->fe->ops.info.frequency_min_hz = 174 * MHz;
+ card->fe->ops.info.frequency_max_hz = 862 * MHz;
}
break;
@@ -659,8 +655,8 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
card->fe = dvb_attach(mt352_attach, &advbt771_samsung_tdtc9251dh0_config, card->i2c_adapter);
if (card->fe != NULL) {
card->fe->ops.tuner_ops.calc_regs = advbt771_samsung_tdtc9251dh0_tuner_calc_regs;
- card->fe->ops.info.frequency_min = 174000000;
- card->fe->ops.info.frequency_max = 862000000;
+ card->fe->ops.info.frequency_min_hz = 174 * MHz;
+ card->fe->ops.info.frequency_max_hz = 862 * MHz;
}
break;
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index c8b1a6206c65..4885e833c052 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -670,7 +670,7 @@ static int cobalt_probe(struct pci_dev *pci_dev,
/* FIXME - module parameter arrays constrain max instances */
i = atomic_inc_return(&cobalt_instance) - 1;
- cobalt = kzalloc(sizeof(struct cobalt), GFP_ATOMIC);
+ cobalt = kzalloc(sizeof(struct cobalt), GFP_KERNEL);
if (cobalt == NULL)
return -ENOMEM;
cobalt->pci_dev = pci_dev;
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 8f314ca320c7..0c389a3fb4e5 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -1134,8 +1134,6 @@ free_mem:
free_workqueues:
destroy_workqueue(cx->in_work_queue);
err:
- if (retval == 0)
- retval = -ENODEV;
CX18_ERR("Error %d on initialization\n", retval);
v4l2_device_unregister(&cx->v4l2_dev);
diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
index 70aec9bb7e95..62bc8049b320 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -346,7 +346,7 @@ static int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
mutex_unlock(&inter->fpga_mutex);
for (;;) {
- mdelay(50);
+ msleep(50);
mutex_lock(&inter->fpga_mutex);
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 9f50748fdf56..ed3210dc50bc 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -1497,20 +1497,20 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the demod into reset and protect the eeprom */
mc417_gpio_clear(dev, GPIO_15 | GPIO_14);
- mdelay(100);
+ msleep(100);
/* Bring the demod and blaster out of reset */
mc417_gpio_set(dev, GPIO_15 | GPIO_14);
- mdelay(100);
+ msleep(100);
/* Force the TDA8295A into reset and back */
cx23885_gpio_enable(dev, GPIO_2, 1);
cx23885_gpio_set(dev, GPIO_2);
- mdelay(20);
+ msleep(20);
cx23885_gpio_clear(dev, GPIO_2);
- mdelay(20);
+ msleep(20);
cx23885_gpio_set(dev, GPIO_2);
- mdelay(20);
+ msleep(20);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1200:
/* GPIO-0 tda10048 demodulator reset */
@@ -1518,9 +1518,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00050000);
- mdelay(20);
+ msleep(20);
cx_clear(GP0_IO, 0x00000005);
- mdelay(20);
+ msleep(20);
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1700:
@@ -1539,9 +1539,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00050000);
- mdelay(20);
+ msleep(20);
cx_clear(GP0_IO, 0x00000005);
- mdelay(20);
+ msleep(20);
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1400:
@@ -1551,9 +1551,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00050000);
- mdelay(20);
+ msleep(20);
cx_clear(GP0_IO, 0x00000005);
- mdelay(20);
+ msleep(20);
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP:
@@ -1564,9 +1564,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x000f0000);
- mdelay(20);
+ msleep(20);
cx_clear(GP0_IO, 0x0000000f);
- mdelay(20);
+ msleep(20);
cx_set(GP0_IO, 0x000f000f);
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
@@ -1578,9 +1578,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x000f0000);
- mdelay(20);
+ msleep(20);
cx_clear(GP0_IO, 0x0000000f);
- mdelay(20);
+ msleep(20);
cx_set(GP0_IO, 0x000f000f);
break;
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
@@ -1596,9 +1596,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00040000);
- mdelay(20);
+ msleep(20);
cx_clear(GP0_IO, 0x00000004);
- mdelay(20);
+ msleep(20);
cx_set(GP0_IO, 0x00040004);
break;
case CX23885_BOARD_TBS_6920:
@@ -1608,11 +1608,11 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_write(MC417_CTL, 0x00000036);
cx_write(MC417_OEN, 0x00001000);
cx_set(MC417_RWD, 0x00000002);
- mdelay(200);
+ msleep(200);
cx_clear(MC417_RWD, 0x00000800);
- mdelay(200);
+ msleep(200);
cx_set(MC417_RWD, 0x00000800);
- mdelay(200);
+ msleep(200);
break;
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
/* GPIO-0 INTA from CiMax1
@@ -1630,7 +1630,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x00040000); /* GPIO as out */
/* GPIO1 and GPIO2 as INTA and INTB from CiMaxes, reset low */
cx_clear(GP0_IO, 0x00030004);
- mdelay(100);/* reset delay */
+ msleep(100);/* reset delay */
cx_set(GP0_IO, 0x00040004); /* GPIO as out, reset high */
cx_write(MC417_CTL, 0x00000037);/* enable GPIO3-18 pins */
/* GPIO-15 IN as ~ACK, rest as OUT */
@@ -1653,7 +1653,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx23885_gpio_enable(dev, GPIO_9 | GPIO_6 | GPIO_5, 1);
cx23885_gpio_set(dev, GPIO_9 | GPIO_6 | GPIO_5);
cx23885_gpio_clear(dev, GPIO_9);
- mdelay(20);
+ msleep(20);
cx23885_gpio_set(dev, GPIO_9);
break;
case CX23885_BOARD_MYGICA_X8506:
@@ -1664,18 +1664,18 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* GPIO-2 demod reset */
cx23885_gpio_enable(dev, GPIO_0 | GPIO_1 | GPIO_2, 1);
cx23885_gpio_clear(dev, GPIO_1 | GPIO_2);
- mdelay(100);
+ msleep(100);
cx23885_gpio_set(dev, GPIO_0 | GPIO_1 | GPIO_2);
- mdelay(100);
+ msleep(100);
break;
case CX23885_BOARD_MYGICA_X8558PRO:
/* GPIO-0 reset first ATBM8830 */
/* GPIO-1 reset second ATBM8830 */
cx23885_gpio_enable(dev, GPIO_0 | GPIO_1, 1);
cx23885_gpio_clear(dev, GPIO_0 | GPIO_1);
- mdelay(100);
+ msleep(100);
cx23885_gpio_set(dev, GPIO_0 | GPIO_1);
- mdelay(100);
+ msleep(100);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1699,11 +1699,11 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the demod into reset and protect the eeprom */
mc417_gpio_clear(dev, GPIO_14 | GPIO_13);
- mdelay(100);
+ msleep(100);
/* Bring the demod out of reset */
mc417_gpio_set(dev, GPIO_14);
- mdelay(100);
+ msleep(100);
/* CX24228 GPIO */
/* Connected to IF / Mux */
@@ -1728,7 +1728,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x00060000); /* GPIO-1,2 as out */
/* GPIO-0 as INT, reset & TMS low */
cx_clear(GP0_IO, 0x00010006);
- mdelay(100);/* reset delay */
+ msleep(100);/* reset delay */
cx_set(GP0_IO, 0x00000004); /* reset high */
cx_write(MC417_CTL, 0x00000037);/* enable GPIO-3..18 pins */
/* GPIO-17 is TDO in, GPIO-15 is ~RDY in, rest is out */
@@ -1747,36 +1747,36 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1);
cx23885_gpio_clear(dev, GPIO_8 | GPIO_9);
- mdelay(100);
+ msleep(100);
cx23885_gpio_set(dev, GPIO_8 | GPIO_9);
- mdelay(100);
+ msleep(100);
break;
case CX23885_BOARD_AVERMEDIA_HC81R:
cx_clear(MC417_CTL, 1);
/* GPIO-0,1,2 setup direction as output */
cx_set(GP0_IO, 0x00070000);
- mdelay(10);
+ usleep_range(10000, 11000);
/* AF9013 demod reset */
cx_set(GP0_IO, 0x00010001);
- mdelay(10);
+ usleep_range(10000, 11000);
cx_clear(GP0_IO, 0x00010001);
- mdelay(10);
+ usleep_range(10000, 11000);
cx_set(GP0_IO, 0x00010001);
- mdelay(10);
+ usleep_range(10000, 11000);
/* demod tune? */
cx_clear(GP0_IO, 0x00030003);
- mdelay(10);
+ usleep_range(10000, 11000);
cx_set(GP0_IO, 0x00020002);
- mdelay(10);
+ usleep_range(10000, 11000);
cx_set(GP0_IO, 0x00010001);
- mdelay(10);
+ usleep_range(10000, 11000);
cx_clear(GP0_IO, 0x00020002);
/* XC3028L tuner reset */
cx_set(GP0_IO, 0x00040004);
cx_clear(GP0_IO, 0x00040004);
cx_set(GP0_IO, 0x00040004);
- mdelay(60);
+ msleep(60);
break;
case CX23885_BOARD_DVBSKY_T9580:
case CX23885_BOARD_DVBSKY_S952:
@@ -1785,7 +1785,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_write(MC417_CTL, 0x00000037);
cx23885_gpio_enable(dev, GPIO_2 | GPIO_11, 1);
cx23885_gpio_clear(dev, GPIO_2 | GPIO_11);
- mdelay(100);
+ msleep(100);
cx23885_gpio_set(dev, GPIO_2 | GPIO_11);
break;
case CX23885_BOARD_DVBSKY_T980C:
@@ -1807,7 +1807,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x00060002); /* GPIO 1/2 as output */
cx_clear(GP0_IO, 0x00010004); /* GPIO 0 as input */
- mdelay(100); /* reset delay */
+ msleep(100); /* reset delay */
cx_set(GP0_IO, 0x00060004); /* GPIO as out, reset high */
cx_clear(GP0_IO, 0x00010002);
cx_write(MC417_CTL, 0x00000037); /* enable GPIO3-18 pins */
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 94b996ff12a9..39804d830305 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -667,7 +667,7 @@ static void cx23885_reset(struct cx23885_dev *dev)
/* clear dma in progress */
cx23885_clear_bridge_error(dev);
- mdelay(100);
+ msleep(100);
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
720*4, 0);
diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.c b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
deleted file mode 100644
index ada26d4acfb4..000000000000
--- a/drivers/media/pci/cx25821/cx25821-audio-upstream.c
+++ /dev/null
@@ -1,679 +0,0 @@
-/*
- * Driver for the Conexant CX25821 PCIe bridge
- *
- * Copyright (C) 2009 Conexant Systems Inc.
- * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "cx25821-video.h"
-#include "cx25821-audio-upstream.h"
-
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/syscalls.h>
-#include <linux/file.h>
-#include <linux/fcntl.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards");
-MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>");
-MODULE_LICENSE("GPL");
-
-static int _intr_msk = FLD_AUD_SRC_RISCI1 | FLD_AUD_SRC_OF |
- FLD_AUD_SRC_SYNC | FLD_AUD_SRC_OPC_ERR;
-
-static int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
- const struct sram_channel *ch,
- unsigned int bpl, u32 risc)
-{
- unsigned int i, lines;
- u32 cdt;
-
- if (ch->cmds_start == 0) {
- cx_write(ch->ptr1_reg, 0);
- cx_write(ch->ptr2_reg, 0);
- cx_write(ch->cnt2_reg, 0);
- cx_write(ch->cnt1_reg, 0);
- return 0;
- }
-
- bpl = (bpl + 7) & ~7; /* alignment */
- cdt = ch->cdt;
- lines = ch->fifo_size / bpl;
-
- if (lines > 3)
- lines = 3;
-
- BUG_ON(lines < 2);
-
- /* write CDT */
- for (i = 0; i < lines; i++) {
- cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
- cx_write(cdt + 16 * i + 4, 0);
- cx_write(cdt + 16 * i + 8, 0);
- cx_write(cdt + 16 * i + 12, 0);
- }
-
- /* write CMDS */
- cx_write(ch->cmds_start + 0, risc);
-
- cx_write(ch->cmds_start + 4, 0);
- cx_write(ch->cmds_start + 8, cdt);
- cx_write(ch->cmds_start + 12, AUDIO_CDT_SIZE_QW);
- cx_write(ch->cmds_start + 16, ch->ctrl_start);
-
- /* IQ size */
- cx_write(ch->cmds_start + 20, AUDIO_IQ_SIZE_DW);
-
- for (i = 24; i < 80; i += 4)
- cx_write(ch->cmds_start + i, 0);
-
- /* fill registers */
- cx_write(ch->ptr1_reg, ch->fifo_start);
- cx_write(ch->ptr2_reg, cdt);
- cx_write(ch->cnt2_reg, AUDIO_CDT_SIZE_QW);
- cx_write(ch->cnt1_reg, AUDIO_CLUSTER_SIZE_QW - 1);
-
- return 0;
-}
-
-static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev,
- __le32 *rp,
- dma_addr_t databuf_phys_addr,
- unsigned int bpl,
- int fifo_enable)
-{
- unsigned int line;
- const struct sram_channel *sram_ch =
- dev->channels[dev->_audio_upstream_channel].sram_channels;
- int offset = 0;
-
- /* scan lines */
- for (line = 0; line < LINES_PER_AUDIO_BUFFER; line++) {
- *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl);
- *(rp++) = cpu_to_le32(databuf_phys_addr + offset);
- *(rp++) = cpu_to_le32(0); /* bits 63-32 */
-
- /* Check if we need to enable the FIFO
- * after the first 3 lines.
- * For the upstream audio channel,
- * the risc engine will enable the FIFO */
- if (fifo_enable && line == 2) {
- *(rp++) = RISC_WRITECR;
- *(rp++) = sram_ch->dma_ctl;
- *(rp++) = sram_ch->fld_aud_fifo_en;
- *(rp++) = 0x00000020;
- }
-
- offset += AUDIO_LINE_SIZE;
- }
-
- return rp;
-}
-
-static int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
- struct pci_dev *pci,
- unsigned int bpl, unsigned int lines)
-{
- __le32 *rp;
- int fifo_enable = 0;
- int frame = 0, i = 0;
- int frame_size = AUDIO_DATA_BUF_SZ;
- int databuf_offset = 0;
- int risc_flag = RISC_CNT_INC;
- dma_addr_t risc_phys_jump_addr;
-
- /* Virtual address of Risc buffer program */
- rp = dev->_risc_virt_addr;
-
- /* sync instruction */
- *(rp++) = cpu_to_le32(RISC_RESYNC | AUDIO_SYNC_LINE);
-
- for (frame = 0; frame < NUM_AUDIO_FRAMES; frame++) {
- databuf_offset = frame_size * frame;
-
- if (frame == 0) {
- fifo_enable = 1;
- risc_flag = RISC_CNT_RESET;
- } else {
- fifo_enable = 0;
- risc_flag = RISC_CNT_INC;
- }
-
- /* Calculate physical jump address */
- if ((frame + 1) == NUM_AUDIO_FRAMES) {
- risc_phys_jump_addr =
- dev->_risc_phys_start_addr +
- RISC_SYNC_INSTRUCTION_SIZE;
- } else {
- risc_phys_jump_addr =
- dev->_risc_phys_start_addr +
- RISC_SYNC_INSTRUCTION_SIZE +
- AUDIO_RISC_DMA_BUF_SIZE * (frame + 1);
- }
-
- rp = cx25821_risc_field_upstream_audio(dev, rp,
- dev->_audiodata_buf_phys_addr + databuf_offset,
- bpl, fifo_enable);
-
- if (USE_RISC_NOOP_AUDIO) {
- for (i = 0; i < NUM_NO_OPS; i++)
- *(rp++) = cpu_to_le32(RISC_NOOP);
- }
-
- /* Loop to (Nth)FrameRISC or to Start of Risc program &
- * generate IRQ */
- *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag);
- *(rp++) = cpu_to_le32(risc_phys_jump_addr);
- *(rp++) = cpu_to_le32(0);
-
- /* Recalculate virtual address based on frame index */
- rp = dev->_risc_virt_addr + RISC_SYNC_INSTRUCTION_SIZE / 4 +
- (AUDIO_RISC_DMA_BUF_SIZE * (frame + 1) / 4);
- }
-
- return 0;
-}
-
-static void cx25821_free_memory_audio(struct cx25821_dev *dev)
-{
- if (dev->_risc_virt_addr) {
- pci_free_consistent(dev->pci, dev->_audiorisc_size,
- dev->_risc_virt_addr, dev->_risc_phys_addr);
- dev->_risc_virt_addr = NULL;
- }
-
- if (dev->_audiodata_buf_virt_addr) {
- pci_free_consistent(dev->pci, dev->_audiodata_buf_size,
- dev->_audiodata_buf_virt_addr,
- dev->_audiodata_buf_phys_addr);
- dev->_audiodata_buf_virt_addr = NULL;
- }
-}
-
-void cx25821_stop_upstream_audio(struct cx25821_dev *dev)
-{
- const struct sram_channel *sram_ch =
- dev->channels[AUDIO_UPSTREAM_SRAM_CHANNEL_B].sram_channels;
- u32 tmp = 0;
-
- if (!dev->_audio_is_running) {
- printk(KERN_DEBUG
- pr_fmt("No audio file is currently running so return!\n"));
- return;
- }
- /* Disable RISC interrupts */
- cx_write(sram_ch->int_msk, 0);
-
- /* Turn OFF risc and fifo enable in AUD_DMA_CNTRL */
- tmp = cx_read(sram_ch->dma_ctl);
- cx_write(sram_ch->dma_ctl,
- tmp & ~(sram_ch->fld_aud_fifo_en | sram_ch->fld_aud_risc_en));
-
- /* Clear data buffer memory */
- if (dev->_audiodata_buf_virt_addr)
- memset(dev->_audiodata_buf_virt_addr, 0,
- dev->_audiodata_buf_size);
-
- dev->_audio_is_running = 0;
- dev->_is_first_audio_frame = 0;
- dev->_audioframe_count = 0;
- dev->_audiofile_status = END_OF_FILE;
-
- flush_work(&dev->_audio_work_entry);
-
- kfree(dev->_audiofilename);
-}
-
-void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev)
-{
- if (dev->_audio_is_running)
- cx25821_stop_upstream_audio(dev);
-
- cx25821_free_memory_audio(dev);
-}
-
-static int cx25821_get_audio_data(struct cx25821_dev *dev,
- const struct sram_channel *sram_ch)
-{
- struct file *file;
- int frame_index_temp = dev->_audioframe_index;
- int i = 0;
- int frame_size = AUDIO_DATA_BUF_SZ;
- int frame_offset = frame_size * frame_index_temp;
- char mybuf[AUDIO_LINE_SIZE];
- loff_t file_offset = dev->_audioframe_count * frame_size;
- char *p = NULL;
-
- if (dev->_audiofile_status == END_OF_FILE)
- return 0;
-
- file = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0);
- if (IS_ERR(file)) {
- pr_err("%s(): ERROR opening file(%s) with errno = %ld!\n",
- __func__, dev->_audiofilename, -PTR_ERR(file));
- return PTR_ERR(file);
- }
-
- if (dev->_audiodata_buf_virt_addr)
- p = (char *)dev->_audiodata_buf_virt_addr + frame_offset;
-
- for (i = 0; i < dev->_audio_lines_count; i++) {
- int n = kernel_read(file, mybuf, AUDIO_LINE_SIZE, &file_offset);
- if (n < AUDIO_LINE_SIZE) {
- pr_info("Done: exit %s() since no more bytes to read from Audio file\n",
- __func__);
- dev->_audiofile_status = END_OF_FILE;
- fput(file);
- return 0;
- }
- dev->_audiofile_status = IN_PROGRESS;
- if (p) {
- memcpy(p, mybuf, n);
- p += n;
- }
- }
- dev->_audioframe_count++;
- fput(file);
-
- return 0;
-}
-
-static void cx25821_audioups_handler(struct work_struct *work)
-{
- struct cx25821_dev *dev = container_of(work, struct cx25821_dev,
- _audio_work_entry);
-
- if (!dev) {
- pr_err("ERROR %s(): since container_of(work_struct) FAILED!\n",
- __func__);
- return;
- }
-
- cx25821_get_audio_data(dev, dev->channels[dev->_audio_upstream_channel].
- sram_channels);
-}
-
-static int cx25821_openfile_audio(struct cx25821_dev *dev,
- const struct sram_channel *sram_ch)
-{
- char *p = (void *)dev->_audiodata_buf_virt_addr;
- struct file *file;
- loff_t file_offset = 0;
- int i, j;
-
- file = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0);
- if (IS_ERR(file)) {
- pr_err("%s(): ERROR opening file(%s) with errno = %ld!\n",
- __func__, dev->_audiofilename, PTR_ERR(file));
- return PTR_ERR(file);
- }
-
- for (j = 0; j < NUM_AUDIO_FRAMES; j++) {
- for (i = 0; i < dev->_audio_lines_count; i++) {
- char buf[AUDIO_LINE_SIZE];
- loff_t offset = file_offset;
- int n = kernel_read(file, buf, AUDIO_LINE_SIZE, &file_offset);
-
- if (n < AUDIO_LINE_SIZE) {
- pr_info("Done: exit %s() since no more bytes to read from Audio file\n",
- __func__);
- dev->_audiofile_status = END_OF_FILE;
- fput(file);
- return 0;
- }
-
- if (p)
- memcpy(p + offset, buf, n);
- }
- dev->_audioframe_count++;
- }
- dev->_audiofile_status = IN_PROGRESS;
- fput(file);
- return 0;
-}
-
-static int cx25821_audio_upstream_buffer_prepare(struct cx25821_dev *dev,
- const struct sram_channel *sram_ch,
- int bpl)
-{
- int ret = 0;
- dma_addr_t dma_addr;
- dma_addr_t data_dma_addr;
-
- cx25821_free_memory_audio(dev);
-
- dev->_risc_virt_addr = pci_alloc_consistent(dev->pci,
- dev->audio_upstream_riscbuf_size, &dma_addr);
- dev->_risc_virt_start_addr = dev->_risc_virt_addr;
- dev->_risc_phys_start_addr = dma_addr;
- dev->_risc_phys_addr = dma_addr;
- dev->_audiorisc_size = dev->audio_upstream_riscbuf_size;
-
- if (!dev->_risc_virt_addr) {
- printk(KERN_DEBUG
- pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for RISC program! Returning\n"));
- return -ENOMEM;
- }
- /* Clear out memory at address */
- memset(dev->_risc_virt_addr, 0, dev->_audiorisc_size);
-
- /* For Audio Data buffer allocation */
- dev->_audiodata_buf_virt_addr = pci_alloc_consistent(dev->pci,
- dev->audio_upstream_databuf_size, &data_dma_addr);
- dev->_audiodata_buf_phys_addr = data_dma_addr;
- dev->_audiodata_buf_size = dev->audio_upstream_databuf_size;
-
- if (!dev->_audiodata_buf_virt_addr) {
- printk(KERN_DEBUG
- pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for data buffer! Returning\n"));
- return -ENOMEM;
- }
- /* Clear out memory at address */
- memset(dev->_audiodata_buf_virt_addr, 0, dev->_audiodata_buf_size);
-
- ret = cx25821_openfile_audio(dev, sram_ch);
- if (ret < 0)
- return ret;
-
- /* Creating RISC programs */
- ret = cx25821_risc_buffer_upstream_audio(dev, dev->pci, bpl,
- dev->_audio_lines_count);
- if (ret < 0) {
- printk(KERN_DEBUG
- pr_fmt("ERROR creating audio upstream RISC programs!\n"));
- goto error;
- }
-
- return 0;
-
-error:
- return ret;
-}
-
-static int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
- u32 status)
-{
- int i = 0;
- u32 int_msk_tmp;
- const struct sram_channel *channel = dev->channels[chan_num].sram_channels;
- dma_addr_t risc_phys_jump_addr;
- __le32 *rp;
-
- if (status & FLD_AUD_SRC_RISCI1) {
- /* Get interrupt_index of the program that interrupted */
- u32 prog_cnt = cx_read(channel->gpcnt);
-
- /* Since we've identified our IRQ, clear our bits from the
- * interrupt mask and interrupt status registers */
- cx_write(channel->int_msk, 0);
- cx_write(channel->int_stat, cx_read(channel->int_stat));
-
- spin_lock(&dev->slock);
-
- while (prog_cnt != dev->_last_index_irq) {
- /* Update _last_index_irq */
- if (dev->_last_index_irq < (NUMBER_OF_PROGRAMS - 1))
- dev->_last_index_irq++;
- else
- dev->_last_index_irq = 0;
-
- dev->_audioframe_index = dev->_last_index_irq;
-
- schedule_work(&dev->_audio_work_entry);
- }
-
- if (dev->_is_first_audio_frame) {
- dev->_is_first_audio_frame = 0;
-
- if (dev->_risc_virt_start_addr != NULL) {
- risc_phys_jump_addr =
- dev->_risc_phys_start_addr +
- RISC_SYNC_INSTRUCTION_SIZE +
- AUDIO_RISC_DMA_BUF_SIZE;
-
- rp = cx25821_risc_field_upstream_audio(dev,
- dev->_risc_virt_start_addr + 1,
- dev->_audiodata_buf_phys_addr,
- AUDIO_LINE_SIZE, FIFO_DISABLE);
-
- if (USE_RISC_NOOP_AUDIO) {
- for (i = 0; i < NUM_NO_OPS; i++) {
- *(rp++) =
- cpu_to_le32(RISC_NOOP);
- }
- }
- /* Jump to 2nd Audio Frame */
- *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 |
- RISC_CNT_RESET);
- *(rp++) = cpu_to_le32(risc_phys_jump_addr);
- *(rp++) = cpu_to_le32(0);
- }
- }
-
- spin_unlock(&dev->slock);
- } else {
- if (status & FLD_AUD_SRC_OF)
- pr_warn("%s(): Audio Received Overflow Error Interrupt!\n",
- __func__);
-
- if (status & FLD_AUD_SRC_SYNC)
- pr_warn("%s(): Audio Received Sync Error Interrupt!\n",
- __func__);
-
- if (status & FLD_AUD_SRC_OPC_ERR)
- pr_warn("%s(): Audio Received OpCode Error Interrupt!\n",
- __func__);
-
- /* Read and write back the interrupt status register to clear
- * our bits */
- cx_write(channel->int_stat, cx_read(channel->int_stat));
- }
-
- if (dev->_audiofile_status == END_OF_FILE) {
- pr_warn("EOF Channel Audio Framecount = %d\n",
- dev->_audioframe_count);
- return -1;
- }
- /* ElSE, set the interrupt mask register, re-enable irq. */
- int_msk_tmp = cx_read(channel->int_msk);
- cx_write(channel->int_msk, int_msk_tmp |= _intr_msk);
-
- return 0;
-}
-
-static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id)
-{
- struct cx25821_dev *dev = dev_id;
- u32 audio_status;
- int handled = 0;
- const struct sram_channel *sram_ch;
-
- if (!dev)
- return -1;
-
- sram_ch = dev->channels[dev->_audio_upstream_channel].sram_channels;
-
- audio_status = cx_read(sram_ch->int_stat);
-
- /* Only deal with our interrupt */
- if (audio_status) {
- handled = cx25821_audio_upstream_irq(dev,
- dev->_audio_upstream_channel, audio_status);
- }
-
- if (handled < 0)
- cx25821_stop_upstream_audio(dev);
- else
- handled += handled;
-
- return IRQ_RETVAL(handled);
-}
-
-static void cx25821_wait_fifo_enable(struct cx25821_dev *dev,
- const struct sram_channel *sram_ch)
-{
- int count = 0;
- u32 tmp;
-
- do {
- /* Wait 10 microsecond before checking to see if the FIFO is
- * turned ON. */
- udelay(10);
-
- tmp = cx_read(sram_ch->dma_ctl);
-
- /* 10 millisecond timeout */
- if (count++ > 1000) {
- pr_err("ERROR: %s() fifo is NOT turned on. Timeout!\n",
- __func__);
- return;
- }
-
- } while (!(tmp & sram_ch->fld_aud_fifo_en));
-
-}
-
-static int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
- const struct sram_channel *sram_ch)
-{
- u32 tmp = 0;
- int err = 0;
-
- /* Set the physical start address of the RISC program in the initial
- * program counter(IPC) member of the CMDS. */
- cx_write(sram_ch->cmds_start + 0, dev->_risc_phys_addr);
- /* Risc IPC High 64 bits 63-32 */
- cx_write(sram_ch->cmds_start + 4, 0);
-
- /* reset counter */
- cx_write(sram_ch->gpcnt_ctl, 3);
-
- /* Set the line length (It looks like we do not need to set the
- * line length) */
- cx_write(sram_ch->aud_length, AUDIO_LINE_SIZE & FLD_AUD_DST_LN_LNGTH);
-
- /* Set the input mode to 16-bit */
- tmp = cx_read(sram_ch->aud_cfg);
- tmp |= FLD_AUD_SRC_ENABLE | FLD_AUD_DST_PK_MODE | FLD_AUD_CLK_ENABLE |
- FLD_AUD_MASTER_MODE | FLD_AUD_CLK_SELECT_PLL_D |
- FLD_AUD_SONY_MODE;
- cx_write(sram_ch->aud_cfg, tmp);
-
- /* Read and write back the interrupt status register to clear it */
- tmp = cx_read(sram_ch->int_stat);
- cx_write(sram_ch->int_stat, tmp);
-
- /* Clear our bits from the interrupt status register. */
- cx_write(sram_ch->int_stat, _intr_msk);
-
- /* Set the interrupt mask register, enable irq. */
- cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit));
- tmp = cx_read(sram_ch->int_msk);
- cx_write(sram_ch->int_msk, tmp |= _intr_msk);
-
- err = request_irq(dev->pci->irq, cx25821_upstream_irq_audio,
- IRQF_SHARED, dev->name, dev);
- if (err < 0) {
- pr_err("%s: can't get upstream IRQ %d\n", dev->name,
- dev->pci->irq);
- goto fail_irq;
- }
-
- /* Start the DMA engine */
- tmp = cx_read(sram_ch->dma_ctl);
- cx_set(sram_ch->dma_ctl, tmp | sram_ch->fld_aud_risc_en);
-
- dev->_audio_is_running = 1;
- dev->_is_first_audio_frame = 1;
-
- /* The fifo_en bit turns on by the first Risc program */
- cx25821_wait_fifo_enable(dev, sram_ch);
-
- return 0;
-
-fail_irq:
- cx25821_dev_unregister(dev);
- return err;
-}
-
-int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
-{
- const struct sram_channel *sram_ch;
- int err = 0;
-
- if (dev->_audio_is_running) {
- pr_warn("Audio Channel is still running so return!\n");
- return 0;
- }
-
- dev->_audio_upstream_channel = channel_select;
- sram_ch = dev->channels[channel_select].sram_channels;
-
- /* Work queue */
- INIT_WORK(&dev->_audio_work_entry, cx25821_audioups_handler);
-
- dev->_last_index_irq = 0;
- dev->_audio_is_running = 0;
- dev->_audioframe_count = 0;
- dev->_audiofile_status = RESET_STATUS;
- dev->_audio_lines_count = LINES_PER_AUDIO_BUFFER;
- _line_size = AUDIO_LINE_SIZE;
-
- if ((dev->input_audiofilename) &&
- (strcmp(dev->input_audiofilename, "") != 0))
- dev->_audiofilename = kstrdup(dev->input_audiofilename,
- GFP_KERNEL);
- else
- dev->_audiofilename = kstrdup(_defaultAudioName,
- GFP_KERNEL);
-
- if (!dev->_audiofilename) {
- err = -ENOMEM;
- goto error;
- }
-
- cx25821_sram_channel_setup_upstream_audio(dev, sram_ch,
- _line_size, 0);
-
- dev->audio_upstream_riscbuf_size =
- AUDIO_RISC_DMA_BUF_SIZE * NUM_AUDIO_PROGS +
- RISC_SYNC_INSTRUCTION_SIZE;
- dev->audio_upstream_databuf_size = AUDIO_DATA_BUF_SZ * NUM_AUDIO_PROGS;
-
- /* Allocating buffers and prepare RISC program */
- err = cx25821_audio_upstream_buffer_prepare(dev, sram_ch,
- _line_size);
- if (err < 0) {
- pr_err("%s: Failed to set up Audio upstream buffers!\n",
- dev->name);
- goto error;
- }
- /* Start RISC engine */
- cx25821_start_audio_dma_upstream(dev, sram_ch);
-
- return 0;
-
-error:
- cx25821_dev_unregister(dev);
-
- return err;
-}
diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.h b/drivers/media/pci/cx25821/cx25821-audio-upstream.h
deleted file mode 100644
index 2bc875d1ec9f..000000000000
--- a/drivers/media/pci/cx25821/cx25821-audio-upstream.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Driver for the Conexant CX25821 PCIe bridge
- *
- * Copyright (C) 2009 Conexant Systems Inc.
- * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- */
-
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
-
-#define NUM_AUDIO_PROGS 8
-#define NUM_AUDIO_FRAMES 8
-#define END_OF_FILE 0
-#define IN_PROGRESS 1
-#define RESET_STATUS -1
-#define FIFO_DISABLE 0
-#define FIFO_ENABLE 1
-#define NUM_NO_OPS 4
-
-#define RISC_READ_INSTRUCTION_SIZE 12
-#define RISC_JUMP_INSTRUCTION_SIZE 12
-#define RISC_WRITECR_INSTRUCTION_SIZE 16
-#define RISC_SYNC_INSTRUCTION_SIZE 4
-#define DWORD_SIZE 4
-#define AUDIO_SYNC_LINE 4
-
-#define LINES_PER_AUDIO_BUFFER 15
-#define AUDIO_LINE_SIZE 128
-#define AUDIO_DATA_BUF_SZ (AUDIO_LINE_SIZE * LINES_PER_AUDIO_BUFFER)
-
-#define USE_RISC_NOOP_AUDIO 1
-
-#ifdef USE_RISC_NOOP_AUDIO
-#define AUDIO_RISC_DMA_BUF_SIZE \
- (LINES_PER_AUDIO_BUFFER * RISC_READ_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + NUM_NO_OPS * DWORD_SIZE + \
- RISC_JUMP_INSTRUCTION_SIZE)
-#endif
-
-#ifndef USE_RISC_NOOP_AUDIO
-#define AUDIO_RISC_DMA_BUF_SIZE \
- (LINES_PER_AUDIO_BUFFER * RISC_READ_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + RISC_JUMP_INSTRUCTION_SIZE)
-#endif
-
-static int _line_size;
-char *_defaultAudioName = "/root/audioGOOD.wav";
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 040c6c251d3a..2f0171134f7e 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -428,7 +428,7 @@ static void cx25821_registers_init(struct cx25821_dev *dev)
tmp |= FLD_USE_ALT_PLL_REF;
cx_write(CLK_RST, tmp & ~(FLD_VID_I_CLK_NOE | FLD_VID_J_CLK_NOE));
- mdelay(100);
+ msleep(100);
}
int cx25821_sram_channel_setup(struct cx25821_dev *dev,
@@ -803,7 +803,7 @@ static void cx25821_initialize(struct cx25821_dev *dev)
cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
cx_write(PAD_CTRL, 0x12); /* for I2C */
cx25821_registers_init(dev); /* init Pecos registers */
- mdelay(100);
+ msleep(100);
for (i = 0; i < VID_CHANNEL_NUM; i++) {
cx25821_set_vip_mode(dev, dev->channels[i].sram_channels);
diff --git a/drivers/media/pci/cx25821/cx25821-gpio.c b/drivers/media/pci/cx25821/cx25821-gpio.c
index 76b8f619e55a..f5ffaf880e5f 100644
--- a/drivers/media/pci/cx25821/cx25821-gpio.c
+++ b/drivers/media/pci/cx25821/cx25821-gpio.c
@@ -88,7 +88,7 @@ void cx25821_gpio_init(struct cx25821_dev *dev)
default:
/* set GPIO 5 to select the path for Medusa/Athena */
cx25821_set_gpiopin_logicvalue(dev, 5, 1);
- mdelay(20);
+ msleep(20);
break;
}
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.c b/drivers/media/pci/cx25821/cx25821-video-upstream.c
deleted file mode 100644
index 6c838c8a7924..000000000000
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.c
+++ /dev/null
@@ -1,673 +0,0 @@
-/*
- * Driver for the Conexant CX25821 PCIe bridge
- *
- * Copyright (C) 2009 Conexant Systems Inc.
- * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "cx25821-video.h"
-#include "cx25821-video-upstream.h"
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards");
-MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>");
-MODULE_LICENSE("GPL");
-
-static int _intr_msk = FLD_VID_SRC_RISC1 | FLD_VID_SRC_UF | FLD_VID_SRC_SYNC |
- FLD_VID_SRC_OPC_ERR;
-
-int cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev,
- const struct sram_channel *ch,
- unsigned int bpl, u32 risc)
-{
- unsigned int i, lines;
- u32 cdt;
-
- if (ch->cmds_start == 0) {
- cx_write(ch->ptr1_reg, 0);
- cx_write(ch->ptr2_reg, 0);
- cx_write(ch->cnt2_reg, 0);
- cx_write(ch->cnt1_reg, 0);
- return 0;
- }
-
- bpl = (bpl + 7) & ~7; /* alignment */
- cdt = ch->cdt;
- lines = ch->fifo_size / bpl;
-
- if (lines > 4)
- lines = 4;
-
- BUG_ON(lines < 2);
-
- /* write CDT */
- for (i = 0; i < lines; i++) {
- cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
- cx_write(cdt + 16 * i + 4, 0);
- cx_write(cdt + 16 * i + 8, 0);
- cx_write(cdt + 16 * i + 12, 0);
- }
-
- /* write CMDS */
- cx_write(ch->cmds_start + 0, risc);
-
- cx_write(ch->cmds_start + 4, 0);
- cx_write(ch->cmds_start + 8, cdt);
- cx_write(ch->cmds_start + 12, (lines * 16) >> 3);
- cx_write(ch->cmds_start + 16, ch->ctrl_start);
-
- cx_write(ch->cmds_start + 20, VID_IQ_SIZE_DW);
-
- for (i = 24; i < 80; i += 4)
- cx_write(ch->cmds_start + i, 0);
-
- /* fill registers */
- cx_write(ch->ptr1_reg, ch->fifo_start);
- cx_write(ch->ptr2_reg, cdt);
- cx_write(ch->cnt2_reg, (lines * 16) >> 3);
- cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
-
- return 0;
-}
-
-static __le32 *cx25821_update_riscprogram(struct cx25821_channel *chan,
- __le32 *rp, unsigned int offset,
- unsigned int bpl, u32 sync_line,
- unsigned int lines, int fifo_enable,
- int field_type)
-{
- struct cx25821_video_out_data *out = chan->out;
- unsigned int line, i;
- int dist_betwn_starts = bpl * 2;
-
- *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
-
- if (USE_RISC_NOOP_VIDEO) {
- for (i = 0; i < NUM_NO_OPS; i++)
- *(rp++) = cpu_to_le32(RISC_NOOP);
- }
-
- /* scan lines */
- for (line = 0; line < lines; line++) {
- *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl);
- *(rp++) = cpu_to_le32(out->_data_buf_phys_addr + offset);
- *(rp++) = cpu_to_le32(0); /* bits 63-32 */
-
- if ((lines <= NTSC_FIELD_HEIGHT)
- || (line < (NTSC_FIELD_HEIGHT - 1)) || !(out->is_60hz)) {
- offset += dist_betwn_starts;
- }
- }
-
- return rp;
-}
-
-static __le32 *cx25821_risc_field_upstream(struct cx25821_channel *chan, __le32 *rp,
- dma_addr_t databuf_phys_addr,
- unsigned int offset, u32 sync_line,
- unsigned int bpl, unsigned int lines,
- int fifo_enable, int field_type)
-{
- struct cx25821_video_out_data *out = chan->out;
- unsigned int line, i;
- const struct sram_channel *sram_ch = chan->sram_channels;
- int dist_betwn_starts = bpl * 2;
-
- /* sync instruction */
- if (sync_line != NO_SYNC_LINE)
- *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
-
- if (USE_RISC_NOOP_VIDEO) {
- for (i = 0; i < NUM_NO_OPS; i++)
- *(rp++) = cpu_to_le32(RISC_NOOP);
- }
-
- /* scan lines */
- for (line = 0; line < lines; line++) {
- *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl);
- *(rp++) = cpu_to_le32(databuf_phys_addr + offset);
- *(rp++) = cpu_to_le32(0); /* bits 63-32 */
-
- if ((lines <= NTSC_FIELD_HEIGHT)
- || (line < (NTSC_FIELD_HEIGHT - 1)) || !(out->is_60hz))
- /* to skip the other field line */
- offset += dist_betwn_starts;
-
- /* check if we need to enable the FIFO after the first 4 lines
- * For the upstream video channel, the risc engine will enable
- * the FIFO. */
- if (fifo_enable && line == 3) {
- *(rp++) = cpu_to_le32(RISC_WRITECR);
- *(rp++) = cpu_to_le32(sram_ch->dma_ctl);
- *(rp++) = cpu_to_le32(FLD_VID_FIFO_EN);
- *(rp++) = cpu_to_le32(0x00000001);
- }
- }
-
- return rp;
-}
-
-static int cx25821_risc_buffer_upstream(struct cx25821_channel *chan,
- struct pci_dev *pci,
- unsigned int top_offset,
- unsigned int bpl, unsigned int lines)
-{
- struct cx25821_video_out_data *out = chan->out;
- __le32 *rp;
- int fifo_enable = 0;
- /* get line count for single field */
- int singlefield_lines = lines >> 1;
- int odd_num_lines = singlefield_lines;
- int frame = 0;
- int frame_size = 0;
- int databuf_offset = 0;
- int risc_program_size = 0;
- int risc_flag = RISC_CNT_RESET;
- unsigned int bottom_offset = bpl;
- dma_addr_t risc_phys_jump_addr;
-
- if (out->is_60hz) {
- odd_num_lines = singlefield_lines + 1;
- risc_program_size = FRAME1_VID_PROG_SIZE;
- frame_size = (bpl == Y411_LINE_SZ) ?
- FRAME_SIZE_NTSC_Y411 : FRAME_SIZE_NTSC_Y422;
- } else {
- risc_program_size = PAL_VID_PROG_SIZE;
- frame_size = (bpl == Y411_LINE_SZ) ?
- FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422;
- }
-
- /* Virtual address of Risc buffer program */
- rp = out->_dma_virt_addr;
-
- for (frame = 0; frame < NUM_FRAMES; frame++) {
- databuf_offset = frame_size * frame;
-
- if (UNSET != top_offset) {
- fifo_enable = (frame == 0) ? FIFO_ENABLE : FIFO_DISABLE;
- rp = cx25821_risc_field_upstream(chan, rp,
- out->_data_buf_phys_addr +
- databuf_offset, top_offset, 0, bpl,
- odd_num_lines, fifo_enable, ODD_FIELD);
- }
-
- fifo_enable = FIFO_DISABLE;
-
- /* Even Field */
- rp = cx25821_risc_field_upstream(chan, rp,
- out->_data_buf_phys_addr +
- databuf_offset, bottom_offset,
- 0x200, bpl, singlefield_lines,
- fifo_enable, EVEN_FIELD);
-
- if (frame == 0) {
- risc_flag = RISC_CNT_RESET;
- risc_phys_jump_addr = out->_dma_phys_start_addr +
- risc_program_size;
- } else {
- risc_phys_jump_addr = out->_dma_phys_start_addr;
- risc_flag = RISC_CNT_INC;
- }
-
- /* Loop to 2ndFrameRISC or to Start of Risc
- * program & generate IRQ
- */
- *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag);
- *(rp++) = cpu_to_le32(risc_phys_jump_addr);
- *(rp++) = cpu_to_le32(0);
- }
-
- return 0;
-}
-
-void cx25821_stop_upstream_video(struct cx25821_channel *chan)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
- const struct sram_channel *sram_ch = chan->sram_channels;
- u32 tmp = 0;
-
- if (!out->_is_running) {
- pr_info("No video file is currently running so return!\n");
- return;
- }
-
- /* Set the interrupt mask register, disable irq. */
- cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) & ~(1 << sram_ch->irq_bit));
-
- /* Disable RISC interrupts */
- tmp = cx_read(sram_ch->int_msk);
- cx_write(sram_ch->int_msk, tmp & ~_intr_msk);
-
- /* Turn OFF risc and fifo enable */
- tmp = cx_read(sram_ch->dma_ctl);
- cx_write(sram_ch->dma_ctl, tmp & ~(FLD_VID_FIFO_EN | FLD_VID_RISC_EN));
-
- free_irq(dev->pci->irq, chan);
-
- /* Clear data buffer memory */
- if (out->_data_buf_virt_addr)
- memset(out->_data_buf_virt_addr, 0, out->_data_buf_size);
-
- out->_is_running = 0;
- out->_is_first_frame = 0;
- out->_frame_count = 0;
- out->_file_status = END_OF_FILE;
-
- tmp = cx_read(VID_CH_MODE_SEL);
- cx_write(VID_CH_MODE_SEL, tmp & 0xFFFFFE00);
-}
-
-void cx25821_free_mem_upstream(struct cx25821_channel *chan)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
-
- if (out->_is_running)
- cx25821_stop_upstream_video(chan);
-
- if (out->_dma_virt_addr) {
- pci_free_consistent(dev->pci, out->_risc_size,
- out->_dma_virt_addr, out->_dma_phys_addr);
- out->_dma_virt_addr = NULL;
- }
-
- if (out->_data_buf_virt_addr) {
- pci_free_consistent(dev->pci, out->_data_buf_size,
- out->_data_buf_virt_addr,
- out->_data_buf_phys_addr);
- out->_data_buf_virt_addr = NULL;
- }
-}
-
-int cx25821_write_frame(struct cx25821_channel *chan,
- const char __user *data, size_t count)
-{
- struct cx25821_video_out_data *out = chan->out;
- int line_size = (out->_pixel_format == PIXEL_FRMT_411) ?
- Y411_LINE_SZ : Y422_LINE_SZ;
- int frame_size = 0;
- int frame_offset = 0;
- int curpos = out->curpos;
-
- if (out->is_60hz)
- frame_size = (line_size == Y411_LINE_SZ) ?
- FRAME_SIZE_NTSC_Y411 : FRAME_SIZE_NTSC_Y422;
- else
- frame_size = (line_size == Y411_LINE_SZ) ?
- FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422;
-
- if (curpos == 0) {
- out->cur_frame_index = out->_frame_index;
- if (wait_event_interruptible(out->waitq, out->cur_frame_index != out->_frame_index))
- return -EINTR;
- out->cur_frame_index = out->_frame_index;
- }
-
- frame_offset = out->cur_frame_index ? frame_size : 0;
-
- if (frame_size - curpos < count)
- count = frame_size - curpos;
- if (copy_from_user((__force char *)out->_data_buf_virt_addr + frame_offset + curpos,
- data, count))
- return -EFAULT;
- curpos += count;
- if (curpos == frame_size) {
- out->_frame_count++;
- curpos = 0;
- }
- out->curpos = curpos;
-
- return count;
-}
-
-static int cx25821_upstream_buffer_prepare(struct cx25821_channel *chan,
- const struct sram_channel *sram_ch,
- int bpl)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
- int ret = 0;
- dma_addr_t dma_addr;
- dma_addr_t data_dma_addr;
-
- if (out->_dma_virt_addr != NULL)
- pci_free_consistent(dev->pci, out->upstream_riscbuf_size,
- out->_dma_virt_addr, out->_dma_phys_addr);
-
- out->_dma_virt_addr = pci_alloc_consistent(dev->pci,
- out->upstream_riscbuf_size, &dma_addr);
- out->_dma_virt_start_addr = out->_dma_virt_addr;
- out->_dma_phys_start_addr = dma_addr;
- out->_dma_phys_addr = dma_addr;
- out->_risc_size = out->upstream_riscbuf_size;
-
- if (!out->_dma_virt_addr) {
- pr_err("FAILED to allocate memory for Risc buffer! Returning\n");
- return -ENOMEM;
- }
-
- /* Clear memory at address */
- memset(out->_dma_virt_addr, 0, out->_risc_size);
-
- if (out->_data_buf_virt_addr != NULL)
- pci_free_consistent(dev->pci, out->upstream_databuf_size,
- out->_data_buf_virt_addr,
- out->_data_buf_phys_addr);
- /* For Video Data buffer allocation */
- out->_data_buf_virt_addr = pci_alloc_consistent(dev->pci,
- out->upstream_databuf_size, &data_dma_addr);
- out->_data_buf_phys_addr = data_dma_addr;
- out->_data_buf_size = out->upstream_databuf_size;
-
- if (!out->_data_buf_virt_addr) {
- pr_err("FAILED to allocate memory for data buffer! Returning\n");
- return -ENOMEM;
- }
-
- /* Clear memory at address */
- memset(out->_data_buf_virt_addr, 0, out->_data_buf_size);
-
- /* Create RISC programs */
- ret = cx25821_risc_buffer_upstream(chan, dev->pci, 0, bpl,
- out->_lines_count);
- if (ret < 0) {
- pr_info("Failed creating Video Upstream Risc programs!\n");
- goto error;
- }
-
- return 0;
-
-error:
- return ret;
-}
-
-static int cx25821_video_upstream_irq(struct cx25821_channel *chan, u32 status)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
- u32 int_msk_tmp;
- const struct sram_channel *channel = chan->sram_channels;
- int singlefield_lines = NTSC_FIELD_HEIGHT;
- int line_size_in_bytes = Y422_LINE_SZ;
- int odd_risc_prog_size = 0;
- dma_addr_t risc_phys_jump_addr;
- __le32 *rp;
-
- if (status & FLD_VID_SRC_RISC1) {
- /* We should only process one program per call */
- u32 prog_cnt = cx_read(channel->gpcnt);
-
- /* Since we've identified our IRQ, clear our bits from the
- * interrupt mask and interrupt status registers */
- int_msk_tmp = cx_read(channel->int_msk);
- cx_write(channel->int_msk, int_msk_tmp & ~_intr_msk);
- cx_write(channel->int_stat, _intr_msk);
-
- wake_up(&out->waitq);
-
- spin_lock(&dev->slock);
-
- out->_frame_index = prog_cnt;
-
- if (out->_is_first_frame) {
- out->_is_first_frame = 0;
-
- if (out->is_60hz) {
- singlefield_lines += 1;
- odd_risc_prog_size = ODD_FLD_NTSC_PROG_SIZE;
- } else {
- singlefield_lines = PAL_FIELD_HEIGHT;
- odd_risc_prog_size = ODD_FLD_PAL_PROG_SIZE;
- }
-
- if (out->_dma_virt_start_addr != NULL) {
- line_size_in_bytes =
- (out->_pixel_format ==
- PIXEL_FRMT_411) ? Y411_LINE_SZ :
- Y422_LINE_SZ;
- risc_phys_jump_addr =
- out->_dma_phys_start_addr +
- odd_risc_prog_size;
-
- rp = cx25821_update_riscprogram(chan,
- out->_dma_virt_start_addr, TOP_OFFSET,
- line_size_in_bytes, 0x0,
- singlefield_lines, FIFO_DISABLE,
- ODD_FIELD);
-
- /* Jump to Even Risc program of 1st Frame */
- *(rp++) = cpu_to_le32(RISC_JUMP);
- *(rp++) = cpu_to_le32(risc_phys_jump_addr);
- *(rp++) = cpu_to_le32(0);
- }
- }
-
- spin_unlock(&dev->slock);
- } else {
- if (status & FLD_VID_SRC_UF)
- pr_err("%s(): Video Received Underflow Error Interrupt!\n",
- __func__);
-
- if (status & FLD_VID_SRC_SYNC)
- pr_err("%s(): Video Received Sync Error Interrupt!\n",
- __func__);
-
- if (status & FLD_VID_SRC_OPC_ERR)
- pr_err("%s(): Video Received OpCode Error Interrupt!\n",
- __func__);
- }
-
- if (out->_file_status == END_OF_FILE) {
- pr_err("EOF Channel 1 Framecount = %d\n", out->_frame_count);
- return -1;
- }
- /* ElSE, set the interrupt mask register, re-enable irq. */
- int_msk_tmp = cx_read(channel->int_msk);
- cx_write(channel->int_msk, int_msk_tmp |= _intr_msk);
-
- return 0;
-}
-
-static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
-{
- struct cx25821_channel *chan = dev_id;
- struct cx25821_dev *dev = chan->dev;
- u32 vid_status;
- int handled = 0;
- const struct sram_channel *sram_ch;
-
- if (!dev)
- return -1;
-
- sram_ch = chan->sram_channels;
-
- vid_status = cx_read(sram_ch->int_stat);
-
- /* Only deal with our interrupt */
- if (vid_status)
- handled = cx25821_video_upstream_irq(chan, vid_status);
-
- return IRQ_RETVAL(handled);
-}
-
-static void cx25821_set_pixelengine(struct cx25821_channel *chan,
- const struct sram_channel *ch,
- int pix_format)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
- int width = WIDTH_D1;
- int height = out->_lines_count;
- int num_lines, odd_num_lines;
- u32 value;
- int vip_mode = OUTPUT_FRMT_656;
-
- value = ((pix_format & 0x3) << 12) | (vip_mode & 0x7);
- value &= 0xFFFFFFEF;
- value |= out->is_60hz ? 0 : 0x10;
- cx_write(ch->vid_fmt_ctl, value);
-
- /* set number of active pixels in each line.
- * Default is 720 pixels in both NTSC and PAL format */
- cx_write(ch->vid_active_ctl1, width);
-
- num_lines = (height / 2) & 0x3FF;
- odd_num_lines = num_lines;
-
- if (out->is_60hz)
- odd_num_lines += 1;
-
- value = (num_lines << 16) | odd_num_lines;
-
- /* set number of active lines in field 0 (top) and field 1 (bottom) */
- cx_write(ch->vid_active_ctl2, value);
-
- cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3);
-}
-
-static int cx25821_start_video_dma_upstream(struct cx25821_channel *chan,
- const struct sram_channel *sram_ch)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
- u32 tmp = 0;
- int err = 0;
-
- /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
- * channel A-C
- */
- tmp = cx_read(VID_CH_MODE_SEL);
- cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF);
-
- /* Set the physical start address of the RISC program in the initial
- * program counter(IPC) member of the cmds.
- */
- cx_write(sram_ch->cmds_start + 0, out->_dma_phys_addr);
- /* Risc IPC High 64 bits 63-32 */
- cx_write(sram_ch->cmds_start + 4, 0);
-
- /* reset counter */
- cx_write(sram_ch->gpcnt_ctl, 3);
-
- /* Clear our bits from the interrupt status register. */
- cx_write(sram_ch->int_stat, _intr_msk);
-
- /* Set the interrupt mask register, enable irq. */
- cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit));
- tmp = cx_read(sram_ch->int_msk);
- cx_write(sram_ch->int_msk, tmp |= _intr_msk);
-
- err = request_irq(dev->pci->irq, cx25821_upstream_irq,
- IRQF_SHARED, dev->name, chan);
- if (err < 0) {
- pr_err("%s: can't get upstream IRQ %d\n",
- dev->name, dev->pci->irq);
- goto fail_irq;
- }
-
- /* Start the DMA engine */
- tmp = cx_read(sram_ch->dma_ctl);
- cx_set(sram_ch->dma_ctl, tmp | FLD_VID_RISC_EN);
-
- out->_is_running = 1;
- out->_is_first_frame = 1;
-
- return 0;
-
-fail_irq:
- cx25821_dev_unregister(dev);
- return err;
-}
-
-int cx25821_vidupstream_init(struct cx25821_channel *chan,
- int pixel_format)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
- const struct sram_channel *sram_ch;
- u32 tmp;
- int err = 0;
- int data_frame_size = 0;
- int risc_buffer_size = 0;
-
- if (out->_is_running) {
- pr_info("Video Channel is still running so return!\n");
- return 0;
- }
-
- sram_ch = chan->sram_channels;
-
- out->is_60hz = dev->tvnorm & V4L2_STD_525_60;
-
- /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
- * channel A-C
- */
- tmp = cx_read(VID_CH_MODE_SEL);
- cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF);
-
- out->_is_running = 0;
- out->_frame_count = 0;
- out->_file_status = RESET_STATUS;
- out->_lines_count = out->is_60hz ? 480 : 576;
- out->_pixel_format = pixel_format;
- out->_line_size = (out->_pixel_format == PIXEL_FRMT_422) ?
- (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
- data_frame_size = out->is_60hz ? NTSC_DATA_BUF_SZ : PAL_DATA_BUF_SZ;
- risc_buffer_size = out->is_60hz ?
- NTSC_RISC_BUF_SIZE : PAL_RISC_BUF_SIZE;
-
- out->_is_running = 0;
- out->_frame_count = 0;
- out->_file_status = RESET_STATUS;
- out->_lines_count = out->is_60hz ? 480 : 576;
- out->_pixel_format = pixel_format;
- out->_line_size = (out->_pixel_format == PIXEL_FRMT_422) ?
- (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
- out->curpos = 0;
- init_waitqueue_head(&out->waitq);
-
- err = cx25821_sram_channel_setup_upstream(dev, sram_ch,
- out->_line_size, 0);
-
- /* setup fifo + format */
- cx25821_set_pixelengine(chan, sram_ch, out->_pixel_format);
-
- out->upstream_riscbuf_size = risc_buffer_size * 2;
- out->upstream_databuf_size = data_frame_size * 2;
-
- /* Allocating buffers and prepare RISC program */
- err = cx25821_upstream_buffer_prepare(chan, sram_ch, out->_line_size);
- if (err < 0) {
- pr_err("%s: Failed to set up Video upstream buffers!\n",
- dev->name);
- goto error;
- }
-
- cx25821_start_video_dma_upstream(chan, sram_ch);
-
- return 0;
-
-error:
- cx25821_dev_unregister(dev);
-
- return err;
-}
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.h b/drivers/media/pci/cx25821/cx25821-video-upstream.h
deleted file mode 100644
index b6cf95f2d11b..000000000000
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Driver for the Conexant CX25821 PCIe bridge
- *
- * Copyright (C) 2009 Conexant Systems Inc.
- * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- */
-
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
-
-#define OUTPUT_FRMT_656 0
-#define OPEN_FILE_1 0
-#define NUM_PROGS 8
-#define NUM_FRAMES 2
-#define ODD_FIELD 0
-#define EVEN_FIELD 1
-#define TOP_OFFSET 0
-#define FIFO_DISABLE 0
-#define FIFO_ENABLE 1
-#define TEST_FRAMES 5
-#define END_OF_FILE 0
-#define IN_PROGRESS 1
-#define RESET_STATUS -1
-#define NUM_NO_OPS 5
-
-/* PAL and NTSC line sizes and number of lines. */
-#define WIDTH_D1 720
-#define NTSC_LINES_PER_FRAME 480
-#define PAL_LINES_PER_FRAME 576
-#define PAL_LINE_SZ 1440
-#define Y422_LINE_SZ 1440
-#define Y411_LINE_SZ 1080
-#define NTSC_FIELD_HEIGHT 240
-#define NTSC_ODD_FLD_LINES 241
-#define PAL_FIELD_HEIGHT 288
-
-#define FRAME_SIZE_NTSC_Y422 (NTSC_LINES_PER_FRAME * Y422_LINE_SZ)
-#define FRAME_SIZE_NTSC_Y411 (NTSC_LINES_PER_FRAME * Y411_LINE_SZ)
-#define FRAME_SIZE_PAL_Y422 (PAL_LINES_PER_FRAME * Y422_LINE_SZ)
-#define FRAME_SIZE_PAL_Y411 (PAL_LINES_PER_FRAME * Y411_LINE_SZ)
-
-#define NTSC_DATA_BUF_SZ (Y422_LINE_SZ * NTSC_LINES_PER_FRAME)
-#define PAL_DATA_BUF_SZ (Y422_LINE_SZ * PAL_LINES_PER_FRAME)
-
-#define RISC_WRITECR_INSTRUCTION_SIZE 16
-#define RISC_SYNC_INSTRUCTION_SIZE 4
-#define JUMP_INSTRUCTION_SIZE 12
-#define MAXSIZE_NO_OPS 36
-#define DWORD_SIZE 4
-
-#define USE_RISC_NOOP_VIDEO 1
-
-#ifdef USE_RISC_NOOP_VIDEO
-#define PAL_US_VID_PROG_SIZE \
- (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + RISC_SYNC_INSTRUCTION_SIZE + \
- NUM_NO_OPS * DWORD_SIZE)
-
-#define PAL_RISC_BUF_SIZE (2 * PAL_US_VID_PROG_SIZE)
-
-#define PAL_VID_PROG_SIZE \
- ((PAL_FIELD_HEIGHT * 2) * 3 * DWORD_SIZE + \
- 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- JUMP_INSTRUCTION_SIZE + 2 * NUM_NO_OPS * DWORD_SIZE)
-
-#define ODD_FLD_PAL_PROG_SIZE \
- (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
- RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- NUM_NO_OPS * DWORD_SIZE)
-
-#define ODD_FLD_NTSC_PROG_SIZE \
- (NTSC_ODD_FLD_LINES * 3 * DWORD_SIZE + \
- RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- NUM_NO_OPS * DWORD_SIZE)
-
-#define NTSC_US_VID_PROG_SIZE \
- ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE + \
- NUM_NO_OPS * DWORD_SIZE)
-
-#define NTSC_RISC_BUF_SIZE \
- (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
-
-#define FRAME1_VID_PROG_SIZE \
- ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * 3 * DWORD_SIZE + \
- 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- JUMP_INSTRUCTION_SIZE + 2 * NUM_NO_OPS * DWORD_SIZE)
-
-#endif
-
-#ifndef USE_RISC_NOOP_VIDEO
-#define PAL_US_VID_PROG_SIZE \
- (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + RISC_SYNC_INSTRUCTION_SIZE + \
- JUMP_INSTRUCTION_SIZE)
-
-#define PAL_RISC_BUF_SIZE (2 * PAL_US_VID_PROG_SIZE)
-
-#define PAL_VID_PROG_SIZE \
- ((PAL_FIELD_HEIGHT * 2) * 3 * DWORD_SIZE + \
- 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- JUMP_INSTRUCTION_SIZE)
-
-#define ODD_FLD_PAL_PROG_SIZE \
- (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
- RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
-
-#define ODD_FLD_NTSC_PROG_SIZE \
- (NTSC_ODD_FLD_LINES * 3 * DWORD_SIZE + \
- RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
-
-#define NTSC_US_VID_PROG_SIZE \
- ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
-
-#define NTSC_RISC_BUF_SIZE \
- (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
-
-#define FRAME1_VID_PROG_SIZE \
- ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * 3 * DWORD_SIZE + \
- 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- JUMP_INSTRUCTION_SIZE)
-
-#endif
diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h
index b3eb2dabb30b..25eba4ac4499 100644
--- a/drivers/media/pci/cx25821/cx25821.h
+++ b/drivers/media/pci/cx25821/cx25821.h
@@ -432,18 +432,6 @@ extern int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
const struct sram_channel *ch,
unsigned int bpl, u32 risc);
-extern int cx25821_vidupstream_init(struct cx25821_channel *chan, int pixel_format);
-extern int cx25821_audio_upstream_init(struct cx25821_dev *dev,
- int channel_select);
-extern int cx25821_write_frame(struct cx25821_channel *chan,
- const char __user *data, size_t count);
-extern void cx25821_free_mem_upstream(struct cx25821_channel *chan);
-extern void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev);
-extern void cx25821_stop_upstream_video(struct cx25821_channel *chan);
-extern void cx25821_stop_upstream_audio(struct cx25821_dev *dev);
-extern int cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev,
- const struct sram_channel *ch,
- unsigned int bpl, u32 risc);
extern void cx25821_set_pixel_format(struct cx25821_dev *dev, int channel,
u32 format);
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index e5c3387cd1e8..89a65478ae36 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -962,8 +962,11 @@ static int cx88_audio_initdev(struct pci_dev *pci,
goto error;
/* If there's a wm8775 then add a Line-In ALC switch */
- if (core->sd_wm8775)
- snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
+ if (core->sd_wm8775) {
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
+ if (err < 0)
+ goto error;
+ }
strcpy(card->driver, "CX88x");
sprintf(card->shortname, "Conexant CX%x", pci->device);
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 4c92d2388c26..07e1483e987d 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -3307,9 +3307,9 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
case CX88_BOARD_PROLINK_PV_8000GT:
cx_write(MO_GP2_IO, 0xcf7);
- mdelay(50);
+ msleep(50);
cx_write(MO_GP2_IO, 0xef5);
- mdelay(50);
+ msleep(50);
cx_write(MO_GP2_IO, 0xcf7);
usleep_range(10000, 20000);
break;
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index ccfde28d4af2..90b208747e0f 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -1226,9 +1226,9 @@ static int dvb_register(struct cx8802_dev *dev)
/* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
- mdelay(100);
+ msleep(100);
cx_set(MO_GP0_IO, 1);
- mdelay(200);
+ msleep(200);
/* Select RF connector callback */
fusionhdtv_3_gold.pll_rf_set = lgdt330x_pll_rf_set;
@@ -1248,9 +1248,9 @@ static int dvb_register(struct cx8802_dev *dev)
/* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
- mdelay(100);
+ msleep(100);
cx_set(MO_GP0_IO, 9);
- mdelay(200);
+ msleep(200);
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_3_gold,
0x0e,
@@ -1267,9 +1267,9 @@ static int dvb_register(struct cx8802_dev *dev)
/* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
- mdelay(100);
+ msleep(100);
cx_set(MO_GP0_IO, 1);
- mdelay(200);
+ msleep(200);
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_5_gold,
0x0e,
@@ -1289,9 +1289,9 @@ static int dvb_register(struct cx8802_dev *dev)
/* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
- mdelay(100);
+ msleep(100);
cx_set(MO_GP0_IO, 1);
- mdelay(200);
+ msleep(200);
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&pchdtv_hd5500,
0x59,
@@ -1583,9 +1583,9 @@ static int dvb_register(struct cx8802_dev *dev)
cx_set(MO_GP0_IO, 0x0101);
cx_clear(MO_GP0_IO, 0x01);
- mdelay(100);
+ msleep(100);
cx_set(MO_GP0_IO, 0x01);
- mdelay(200);
+ msleep(200);
fe0->dvb.frontend = dvb_attach(stv0299_attach,
&samsung_stv0299_config,
diff --git a/drivers/media/pci/ddbridge/Makefile b/drivers/media/pci/ddbridge/Makefile
index 9b9e35f171b7..5b6d5bbc38af 100644
--- a/drivers/media/pci/ddbridge/Makefile
+++ b/drivers/media/pci/ddbridge/Makefile
@@ -4,7 +4,8 @@
#
ddbridge-objs := ddbridge-main.o ddbridge-core.o ddbridge-ci.o \
- ddbridge-hw.o ddbridge-i2c.o ddbridge-max.o ddbridge-mci.o
+ ddbridge-hw.o ddbridge-i2c.o ddbridge-max.o ddbridge-mci.o \
+ ddbridge-sx8.o
obj-$(CONFIG_DVB_DDBRIDGE) += ddbridge.o
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index d5b0d1eaf3ad..c1b982e8e6c9 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1191,6 +1191,13 @@ static const struct lnbh25_config lnbh25_cfg = {
.data2_config = LNBH25_TEN
};
+static int has_lnbh25(struct i2c_adapter *i2c, u8 adr)
+{
+ u8 val;
+
+ return i2c_read_reg(i2c, adr, 0, &val) ? 0 : 1;
+}
+
static int demod_attach_stv0910(struct ddb_input *input, int type, int tsfast)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
@@ -1224,14 +1231,15 @@ static int demod_attach_stv0910(struct ddb_input *input, int type, int tsfast)
/* attach lnbh25 - leftshift by one as the lnbh25 driver expects 8bit
* i2c addresses
*/
- lnbcfg.i2c_address = (((input->nr & 1) ? 0x0d : 0x0c) << 1);
- if (!dvb_attach(lnbh25_attach, dvb->fe, &lnbcfg, i2c)) {
+ if (has_lnbh25(i2c, 0x0d))
+ lnbcfg.i2c_address = (((input->nr & 1) ? 0x0d : 0x0c) << 1);
+ else
lnbcfg.i2c_address = (((input->nr & 1) ? 0x09 : 0x08) << 1);
- if (!dvb_attach(lnbh25_attach, dvb->fe, &lnbcfg, i2c)) {
- dev_err(dev, "No LNBH25 found!\n");
- dvb_frontend_detach(dvb->fe);
- return -ENODEV;
- }
+
+ if (!dvb_attach(lnbh25_attach, dvb->fe, &lnbcfg, i2c)) {
+ dev_err(dev, "No LNBH25 found!\n");
+ dvb_frontend_detach(dvb->fe);
+ return -ENODEV;
}
return 0;
@@ -1584,8 +1592,8 @@ static int dvb_input_attach(struct ddb_input *input)
if (demod_attach_dummy(input) < 0)
goto err_detach;
break;
- case DDB_TUNER_MCI:
- if (ddb_fe_attach_mci(input) < 0)
+ case DDB_TUNER_MCI_SX8:
+ if (ddb_fe_attach_mci(input, port->type) < 0)
goto err_detach;
break;
default:
@@ -1842,6 +1850,7 @@ static void ddb_port_probe(struct ddb_port *port)
{
struct ddb *dev = port->dev;
u32 l = port->lnr;
+ struct ddb_link *link = &dev->link[l];
u8 id, type;
port->name = "NO MODULE";
@@ -1851,7 +1860,7 @@ static void ddb_port_probe(struct ddb_port *port)
/* Handle missing ports and ports without I2C */
if (dummy_tuner && !port->nr &&
- dev->link[0].ids.device == 0x0005) {
+ link->ids.device == 0x0005) {
port->name = "DUMMY";
port->class = DDB_PORT_TUNER;
port->type = DDB_TUNER_DUMMY;
@@ -1865,14 +1874,14 @@ static void ddb_port_probe(struct ddb_port *port)
return;
}
- if (port->nr == 1 && dev->link[l].info->type == DDB_OCTOPUS_CI &&
- dev->link[l].info->i2c_mask == 1) {
+ if (port->nr == 1 && link->info->type == DDB_OCTOPUS_CI &&
+ link->info->i2c_mask == 1) {
port->name = "NO TAB";
port->class = DDB_PORT_NONE;
return;
}
- if (dev->link[l].info->type == DDB_OCTOPUS_MAX) {
+ if (link->info->type == DDB_OCTOPUS_MAX) {
port->name = "DUAL DVB-S2 MAX";
port->type_name = "MXL5XX";
port->class = DDB_PORT_TUNER;
@@ -1883,17 +1892,17 @@ static void ddb_port_probe(struct ddb_port *port)
return;
}
- if (dev->link[l].info->type == DDB_OCTOPUS_MCI) {
- if (port->nr >= dev->link[l].info->mci)
+ if (link->info->type == DDB_OCTOPUS_MCI) {
+ if (port->nr >= link->info->mci_ports)
return;
port->name = "DUAL MCI";
port->type_name = "MCI";
port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_MCI;
+ port->type = DDB_TUNER_MCI + link->info->mci_type;
return;
}
- if (port->nr > 1 && dev->link[l].info->type == DDB_OCTOPUS_CI) {
+ if (port->nr > 1 && link->info->type == DDB_OCTOPUS_CI) {
port->name = "CI internal";
port->type_name = "INTERNAL";
port->class = DDB_PORT_CI;
@@ -1978,7 +1987,7 @@ static void ddb_port_probe(struct ddb_port *port)
port->class = DDB_PORT_TUNER;
if (id == 0x51) {
if (port->nr == 0 &&
- dev->link[l].info->ts_quirks & TS_QUIRK_REVERSED)
+ link->info->ts_quirks & TS_QUIRK_REVERSED)
port->type = DDB_TUNER_DVBS_STV0910_PR;
else
port->type = DDB_TUNER_DVBS_STV0910_P;
diff --git a/drivers/media/pci/ddbridge/ddbridge-hw.c b/drivers/media/pci/ddbridge/ddbridge-hw.c
index 1d3ee6accdd5..f3cbac07b41f 100644
--- a/drivers/media/pci/ddbridge/ddbridge-hw.c
+++ b/drivers/media/pci/ddbridge/ddbridge-hw.c
@@ -318,7 +318,8 @@ static const struct ddb_info ddb_s2x_48 = {
.port_num = 4,
.i2c_mask = 0x00,
.tempmon_irq = 24,
- .mci = 4
+ .mci_ports = 4,
+ .mci_type = 0,
};
/****************************************************************************/
diff --git a/drivers/media/pci/ddbridge/ddbridge-i2c.c b/drivers/media/pci/ddbridge/ddbridge-i2c.c
index 667340c86ea7..5a28d7611713 100644
--- a/drivers/media/pci/ddbridge/ddbridge-i2c.c
+++ b/drivers/media/pci/ddbridge/ddbridge-i2c.c
@@ -73,7 +73,10 @@ static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
}
return -EIO;
}
- if (val & 0x70000)
+ val &= 0x70000;
+ if (val == 0x20000)
+ dev_err(dev->dev, "I2C bus error\n");
+ if (val)
return -EIO;
return 0;
}
diff --git a/drivers/media/pci/ddbridge/ddbridge-max.c b/drivers/media/pci/ddbridge/ddbridge-max.c
index 739e4b444cf4..8da1c7b91577 100644
--- a/drivers/media/pci/ddbridge/ddbridge-max.c
+++ b/drivers/media/pci/ddbridge/ddbridge-max.c
@@ -457,21 +457,29 @@ int ddb_fe_attach_mxl5xx(struct ddb_input *input)
/******************************************************************************/
/* MAX MCI related functions */
-int ddb_fe_attach_mci(struct ddb_input *input)
+int ddb_fe_attach_mci(struct ddb_input *input, u32 type)
{
struct ddb *dev = input->port->dev;
struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
struct ddb_port *port = input->port;
struct ddb_link *link = &dev->link[port->lnr];
int demod, tuner;
+ struct mci_cfg cfg;
demod = input->nr;
tuner = demod & 3;
- if (fmode == 3)
- tuner = 0;
- dvb->fe = ddb_mci_attach(input, 0, demod, &dvb->set_input);
+ switch (type) {
+ case DDB_TUNER_MCI_SX8:
+ cfg = ddb_max_sx8_cfg;
+ if (fmode == 3)
+ tuner = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dvb->fe = ddb_mci_attach(input, &cfg, demod, &dvb->set_input);
if (!dvb->fe) {
- dev_err(dev->dev, "No MAXSX8 found!\n");
+ dev_err(dev->dev, "No MCI card found!\n");
return -ENODEV;
}
if (!dvb->set_input) {
diff --git a/drivers/media/pci/ddbridge/ddbridge-max.h b/drivers/media/pci/ddbridge/ddbridge-max.h
index 82efc53baa94..9838c73973b6 100644
--- a/drivers/media/pci/ddbridge/ddbridge-max.h
+++ b/drivers/media/pci/ddbridge/ddbridge-max.h
@@ -25,6 +25,6 @@
int ddb_lnb_init_fmode(struct ddb *dev, struct ddb_link *link, u32 fm);
int ddb_fe_attach_mxl5xx(struct ddb_input *input);
-int ddb_fe_attach_mci(struct ddb_input *input);
+int ddb_fe_attach_mci(struct ddb_input *input, u32 type);
#endif /* _DDBRIDGE_MAX_H */
diff --git a/drivers/media/pci/ddbridge/ddbridge-mci.c b/drivers/media/pci/ddbridge/ddbridge-mci.c
index 4ac634fc96e4..97384ae9ad27 100644
--- a/drivers/media/pci/ddbridge/ddbridge-mci.c
+++ b/drivers/media/pci/ddbridge/ddbridge-mci.c
@@ -2,9 +2,9 @@
/*
* ddbridge-mci.c: Digital Devices microcode interface
*
- * Copyright (C) 2017 Digital Devices GmbH
- * Ralph Metzler <rjkm@metzlerbros.de>
- * Marcus Metzler <mocm@metzlerbros.de>
+ * Copyright (C) 2017-2018 Digital Devices GmbH
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ * Marcus Metzler <mocm@metzlerbros.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -22,42 +22,6 @@
static LIST_HEAD(mci_list);
-static const u32 MCLK = (1550000000 / 12);
-static const u32 MAX_DEMOD_LDPC_BITRATE = (1550000000 / 6);
-static const u32 MAX_LDPC_BITRATE = (720000000);
-
-struct mci_base {
- struct list_head mci_list;
- void *key;
- struct ddb_link *link;
- struct completion completion;
-
- struct device *dev;
- struct mutex tuner_lock; /* concurrent tuner access lock */
- u8 adr;
- struct mutex mci_lock; /* concurrent MCI access lock */
- int count;
-
- u8 tuner_use_count[MCI_TUNER_MAX];
- u8 assigned_demod[MCI_DEMOD_MAX];
- u32 used_ldpc_bitrate[MCI_DEMOD_MAX];
- u8 demod_in_use[MCI_DEMOD_MAX];
- u32 iq_mode;
-};
-
-struct mci {
- struct mci_base *base;
- struct dvb_frontend fe;
- int nr;
- int demod;
- int tuner;
- int first_time_lock;
- int started;
- struct mci_result signal_info;
-
- u32 bb_mode;
-};
-
static int mci_reset(struct mci *state)
{
struct ddb_link *link = state->base->link;
@@ -84,7 +48,7 @@ static int mci_reset(struct mci *state)
return 0;
}
-static int mci_config(struct mci *state, u32 config)
+int ddb_mci_config(struct mci *state, u32 config)
{
struct ddb_link *link = state->base->link;
@@ -122,16 +86,16 @@ static int _mci_cmd_unlocked(struct mci *state,
return 0;
}
-static int mci_cmd(struct mci *state,
- struct mci_command *command,
- struct mci_result *result)
+int ddb_mci_cmd(struct mci *state,
+ struct mci_command *command,
+ struct mci_result *result)
{
int stat;
mutex_lock(&state->base->mci_lock);
stat = _mci_cmd_unlocked(state,
(u32 *)command, sizeof(*command) / sizeof(u32),
- (u32 *)result, sizeof(*result) / sizeof(u32));
+ (u32 *)result, sizeof(*result) / sizeof(u32));
mutex_unlock(&state->base->mci_lock);
return stat;
}
@@ -143,344 +107,6 @@ static void mci_handler(void *priv)
complete(&base->completion);
}
-static void release(struct dvb_frontend *fe)
-{
- struct mci *state = fe->demodulator_priv;
-
- state->base->count--;
- if (state->base->count == 0) {
- list_del(&state->base->mci_list);
- kfree(state->base);
- }
- kfree(state);
-}
-
-static int read_status(struct dvb_frontend *fe, enum fe_status *status)
-{
- int stat;
- struct mci *state = fe->demodulator_priv;
- struct mci_command cmd;
- struct mci_result res;
-
- cmd.command = MCI_CMD_GETSTATUS;
- cmd.demod = state->demod;
- stat = mci_cmd(state, &cmd, &res);
- if (stat)
- return stat;
- *status = 0x00;
- if (res.status == SX8_DEMOD_WAIT_MATYPE)
- *status = 0x0f;
- if (res.status == SX8_DEMOD_LOCKED)
- *status = 0x1f;
- return stat;
-}
-
-static int mci_set_tuner(struct dvb_frontend *fe, u32 tuner, u32 on)
-{
- struct mci *state = fe->demodulator_priv;
- struct mci_command cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.tuner = state->tuner;
- cmd.command = on ? SX8_CMD_INPUT_ENABLE : SX8_CMD_INPUT_DISABLE;
- return mci_cmd(state, &cmd, NULL);
-}
-
-static int stop(struct dvb_frontend *fe)
-{
- struct mci *state = fe->demodulator_priv;
- struct mci_command cmd;
- u32 input = state->tuner;
-
- memset(&cmd, 0, sizeof(cmd));
- if (state->demod != DEMOD_UNUSED) {
- cmd.command = MCI_CMD_STOP;
- cmd.demod = state->demod;
- mci_cmd(state, &cmd, NULL);
- if (state->base->iq_mode) {
- cmd.command = MCI_CMD_STOP;
- cmd.demod = state->demod;
- cmd.output = 0;
- mci_cmd(state, &cmd, NULL);
- mci_config(state, SX8_TSCONFIG_MODE_NORMAL);
- }
- }
- mutex_lock(&state->base->tuner_lock);
- state->base->tuner_use_count[input]--;
- if (!state->base->tuner_use_count[input])
- mci_set_tuner(fe, input, 0);
- if (state->demod < MCI_DEMOD_MAX)
- state->base->demod_in_use[state->demod] = 0;
- state->base->used_ldpc_bitrate[state->nr] = 0;
- state->demod = DEMOD_UNUSED;
- state->base->assigned_demod[state->nr] = DEMOD_UNUSED;
- state->base->iq_mode = 0;
- mutex_unlock(&state->base->tuner_lock);
- state->started = 0;
- return 0;
-}
-
-static int start(struct dvb_frontend *fe, u32 flags, u32 modmask, u32 ts_config)
-{
- struct mci *state = fe->demodulator_priv;
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- u32 used_ldpc_bitrate = 0, free_ldpc_bitrate;
- u32 used_demods = 0;
- struct mci_command cmd;
- u32 input = state->tuner;
- u32 bits_per_symbol = 0;
- int i, stat = 0;
-
- if (p->symbol_rate >= (MCLK / 2))
- flags &= ~1;
- if ((flags & 3) == 0)
- return -EINVAL;
-
- if (flags & 2) {
- u32 tmp = modmask;
-
- bits_per_symbol = 1;
- while (tmp & 1) {
- tmp >>= 1;
- bits_per_symbol++;
- }
- }
-
- mutex_lock(&state->base->tuner_lock);
- if (state->base->iq_mode) {
- stat = -EBUSY;
- goto unlock;
- }
- for (i = 0; i < MCI_DEMOD_MAX; i++) {
- used_ldpc_bitrate += state->base->used_ldpc_bitrate[i];
- if (state->base->demod_in_use[i])
- used_demods++;
- }
- if (used_ldpc_bitrate >= MAX_LDPC_BITRATE ||
- ((ts_config & SX8_TSCONFIG_MODE_MASK) >
- SX8_TSCONFIG_MODE_NORMAL && used_demods > 0)) {
- stat = -EBUSY;
- goto unlock;
- }
- free_ldpc_bitrate = MAX_LDPC_BITRATE - used_ldpc_bitrate;
- if (free_ldpc_bitrate > MAX_DEMOD_LDPC_BITRATE)
- free_ldpc_bitrate = MAX_DEMOD_LDPC_BITRATE;
-
- while (p->symbol_rate * bits_per_symbol > free_ldpc_bitrate)
- bits_per_symbol--;
-
- if (bits_per_symbol < 2) {
- stat = -EBUSY;
- goto unlock;
- }
- i = (p->symbol_rate > (MCLK / 2)) ? 3 : 7;
- while (i >= 0 && state->base->demod_in_use[i])
- i--;
- if (i < 0) {
- stat = -EBUSY;
- goto unlock;
- }
- state->base->demod_in_use[i] = 1;
- state->base->used_ldpc_bitrate[state->nr] = p->symbol_rate
- * bits_per_symbol;
- state->demod = i;
- state->base->assigned_demod[state->nr] = i;
-
- if (!state->base->tuner_use_count[input])
- mci_set_tuner(fe, input, 1);
- state->base->tuner_use_count[input]++;
- state->base->iq_mode = (ts_config > 1);
-unlock:
- mutex_unlock(&state->base->tuner_lock);
- if (stat)
- return stat;
- memset(&cmd, 0, sizeof(cmd));
-
- if (state->base->iq_mode) {
- cmd.command = SX8_CMD_SELECT_IQOUT;
- cmd.demod = state->demod;
- cmd.output = 0;
- mci_cmd(state, &cmd, NULL);
- mci_config(state, ts_config);
- }
- if (p->stream_id != NO_STREAM_ID_FILTER && p->stream_id != 0x80000000)
- flags |= 0x80;
- dev_dbg(state->base->dev, "MCI-%d: tuner=%d demod=%d\n",
- state->nr, state->tuner, state->demod);
- cmd.command = MCI_CMD_SEARCH_DVBS;
- cmd.dvbs2_search.flags = flags;
- cmd.dvbs2_search.s2_modulation_mask =
- modmask & ((1 << (bits_per_symbol - 1)) - 1);
- cmd.dvbs2_search.retry = 2;
- cmd.dvbs2_search.frequency = p->frequency * 1000;
- cmd.dvbs2_search.symbol_rate = p->symbol_rate;
- cmd.dvbs2_search.scrambling_sequence_index =
- p->scrambling_sequence_index;
- cmd.dvbs2_search.input_stream_id =
- (p->stream_id != NO_STREAM_ID_FILTER) ? p->stream_id : 0;
- cmd.tuner = state->tuner;
- cmd.demod = state->demod;
- cmd.output = state->nr;
- if (p->stream_id == 0x80000000)
- cmd.output |= 0x80;
- stat = mci_cmd(state, &cmd, NULL);
- if (stat)
- stop(fe);
- return stat;
-}
-
-static int start_iq(struct dvb_frontend *fe, u32 ts_config)
-{
- struct mci *state = fe->demodulator_priv;
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- u32 used_demods = 0;
- struct mci_command cmd;
- u32 input = state->tuner;
- int i, stat = 0;
-
- mutex_lock(&state->base->tuner_lock);
- if (state->base->iq_mode) {
- stat = -EBUSY;
- goto unlock;
- }
- for (i = 0; i < MCI_DEMOD_MAX; i++)
- if (state->base->demod_in_use[i])
- used_demods++;
- if (used_demods > 0) {
- stat = -EBUSY;
- goto unlock;
- }
- state->demod = 0;
- state->base->assigned_demod[state->nr] = 0;
- if (!state->base->tuner_use_count[input])
- mci_set_tuner(fe, input, 1);
- state->base->tuner_use_count[input]++;
- state->base->iq_mode = (ts_config > 1);
-unlock:
- mutex_unlock(&state->base->tuner_lock);
- if (stat)
- return stat;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.command = SX8_CMD_START_IQ;
- cmd.dvbs2_search.frequency = p->frequency * 1000;
- cmd.dvbs2_search.symbol_rate = p->symbol_rate;
- cmd.tuner = state->tuner;
- cmd.demod = state->demod;
- cmd.output = 7;
- mci_config(state, ts_config);
- stat = mci_cmd(state, &cmd, NULL);
- if (stat)
- stop(fe);
- return stat;
-}
-
-static int set_parameters(struct dvb_frontend *fe)
-{
- int stat = 0;
- struct mci *state = fe->demodulator_priv;
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- u32 ts_config, iq_mode = 0, isi;
-
- if (state->started)
- stop(fe);
-
- isi = p->stream_id;
- if (isi != NO_STREAM_ID_FILTER)
- iq_mode = (isi & 0x30000000) >> 28;
-
- switch (iq_mode) {
- case 1:
- ts_config = (SX8_TSCONFIG_TSHEADER | SX8_TSCONFIG_MODE_IQ);
- break;
- case 2:
- ts_config = (SX8_TSCONFIG_TSHEADER | SX8_TSCONFIG_MODE_IQ);
- break;
- default:
- ts_config = SX8_TSCONFIG_MODE_NORMAL;
- break;
- }
-
- if (iq_mode != 2) {
- u32 flags = 3;
- u32 mask = 3;
-
- if (p->modulation == APSK_16 ||
- p->modulation == APSK_32) {
- flags = 2;
- mask = 15;
- }
- stat = start(fe, flags, mask, ts_config);
- } else {
- stat = start_iq(fe, ts_config);
- }
-
- if (!stat) {
- state->started = 1;
- state->first_time_lock = 1;
- state->signal_info.status = SX8_DEMOD_WAIT_SIGNAL;
- }
-
- return stat;
-}
-
-static int tune(struct dvb_frontend *fe, bool re_tune,
- unsigned int mode_flags,
- unsigned int *delay, enum fe_status *status)
-{
- int r;
-
- if (re_tune) {
- r = set_parameters(fe);
- if (r)
- return r;
- }
- r = read_status(fe, status);
- if (r)
- return r;
-
- if (*status & FE_HAS_LOCK)
- return 0;
- *delay = HZ / 10;
- return 0;
-}
-
-static enum dvbfe_algo get_algo(struct dvb_frontend *fe)
-{
- return DVBFE_ALGO_HW;
-}
-
-static int set_input(struct dvb_frontend *fe, int input)
-{
- struct mci *state = fe->demodulator_priv;
-
- state->tuner = input;
- dev_dbg(state->base->dev, "MCI-%d: input=%d\n", state->nr, input);
- return 0;
-}
-
-static struct dvb_frontend_ops mci_ops = {
- .delsys = { SYS_DVBS, SYS_DVBS2 },
- .info = {
- .name = "Digital Devices MaxSX8 MCI DVB-S/S2/S2X",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 0,
- .frequency_tolerance = 0,
- .symbol_rate_min = 100000,
- .symbol_rate_max = 100000000,
- .caps = FE_CAN_INVERSION_AUTO |
- FE_CAN_FEC_AUTO |
- FE_CAN_QPSK |
- FE_CAN_2G_MODULATION |
- FE_CAN_MULTISTREAM,
- },
- .get_frontend_algo = get_algo,
- .tune = tune,
- .release = release,
- .read_status = read_status,
-};
-
static struct mci_base *match_base(void *key)
{
struct mci_base *p;
@@ -498,8 +124,7 @@ static int probe(struct mci *state)
}
struct dvb_frontend
-*ddb_mci_attach(struct ddb_input *input,
- int mci_type, int nr,
+*ddb_mci_attach(struct ddb_input *input, struct mci_cfg *cfg, int nr,
int (**fn_set_input)(struct dvb_frontend *fe, int input))
{
struct ddb_port *port = input->port;
@@ -507,9 +132,9 @@ struct dvb_frontend
struct ddb_link *link = &dev->link[port->lnr];
struct mci_base *base;
struct mci *state;
- void *key = mci_type ? (void *)port : (void *)link;
+ void *key = cfg->type ? (void *)port : (void *)link;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
+ state = kzalloc(cfg->state_size, GFP_KERNEL);
if (!state)
return NULL;
@@ -518,7 +143,7 @@ struct dvb_frontend
base->count++;
state->base = base;
} else {
- base = kzalloc(sizeof(*base), GFP_KERNEL);
+ base = kzalloc(cfg->base_size, GFP_KERNEL);
if (!base)
goto fail;
base->key = key;
@@ -535,15 +160,17 @@ struct dvb_frontend
goto fail;
}
list_add(&base->mci_list, &mci_list);
+ if (cfg->base_init)
+ cfg->base_init(base);
}
- state->fe.ops = mci_ops;
+ memcpy(&state->fe.ops, cfg->fe_ops, sizeof(struct dvb_frontend_ops));
state->fe.demodulator_priv = state;
state->nr = nr;
- *fn_set_input = set_input;
-
+ *fn_set_input = cfg->set_input;
state->tuner = nr;
state->demod = nr;
-
+ if (cfg->init)
+ cfg->init(state);
return &state->fe;
fail:
kfree(state);
diff --git a/drivers/media/pci/ddbridge/ddbridge-mci.h b/drivers/media/pci/ddbridge/ddbridge-mci.h
index 209cc2b92dff..24241111c634 100644
--- a/drivers/media/pci/ddbridge/ddbridge-mci.h
+++ b/drivers/media/pci/ddbridge/ddbridge-mci.h
@@ -2,9 +2,9 @@
/*
* ddbridge-mci.h: Digital Devices micro code interface
*
- * Copyright (C) 2017 Digital Devices GmbH
- * Marcus Metzler <mocm@metzlerbros.de>
- * Ralph Metzler <rjkm@metzlerbros.de>
+ * Copyright (C) 2017-2018 Digital Devices GmbH
+ * Marcus Metzler <mocm@metzlerbros.de>
+ * Ralph Metzler <rjkm@metzlerbros.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -42,6 +42,22 @@
#define SX8_TSCONFIG_MODE_NORMAL (0x00000001)
#define SX8_TSCONFIG_MODE_IQ (0x00000003)
+/*
+ * IQMode is only available on MaxSX8 on a single tuner
+ *
+ * IQ_MODE_SAMPLES
+ * sampling rate is 1550/24 MHz (64.583 MHz)
+ * channel agc is frozen, to allow stitching the FFT results together
+ *
+ * IQ_MODE_VTM
+ * sampling rate is the supplied symbolrate
+ * channel agc is active
+ *
+ * in both cases down sampling is done with a RRC Filter (currently fixed to
+ * alpha = 0.05) which causes some (ca 5%) aliasing at the edges from
+ * outside the spectrum
+ */
+
#define SX8_TSCONFIG_TSHEADER (0x00000004)
#define SX8_TSCONFIG_BURST (0x00000008)
@@ -51,55 +67,65 @@
#define SX8_TSCONFIG_BURSTSIZE_8K (0x00000020)
#define SX8_TSCONFIG_BURSTSIZE_16K (0x00000030)
-#define SX8_DEMOD_STOPPED (0)
-#define SX8_DEMOD_IQ_MODE (1)
-#define SX8_DEMOD_WAIT_SIGNAL (2)
-#define SX8_DEMOD_WAIT_MATYPE (3)
-#define SX8_DEMOD_TIMEOUT (14)
-#define SX8_DEMOD_LOCKED (15)
-
-#define MCI_CMD_STOP (0x01)
-#define MCI_CMD_GETSTATUS (0x02)
-#define MCI_CMD_GETSIGNALINFO (0x03)
-#define MCI_CMD_RFPOWER (0x04)
-
-#define MCI_CMD_SEARCH_DVBS (0x10)
+#define SX8_DEMOD_STOPPED (0)
+#define SX8_DEMOD_IQ_MODE (1)
+#define SX8_DEMOD_WAIT_SIGNAL (2)
+#define SX8_DEMOD_WAIT_MATYPE (3)
+#define SX8_DEMOD_TIMEOUT (14)
+#define SX8_DEMOD_LOCKED (15)
-#define MCI_CMD_GET_IQSYMBOL (0x30)
+#define MCI_CMD_STOP (0x01)
+#define MCI_CMD_GETSTATUS (0x02)
+#define MCI_CMD_GETSIGNALINFO (0x03)
+#define MCI_CMD_RFPOWER (0x04)
-#define SX8_CMD_INPUT_ENABLE (0x40)
-#define SX8_CMD_INPUT_DISABLE (0x41)
-#define SX8_CMD_START_IQ (0x42)
-#define SX8_CMD_STOP_IQ (0x43)
-#define SX8_CMD_SELECT_IQOUT (0x44)
-#define SX8_CMD_SELECT_TSOUT (0x45)
+#define MCI_CMD_SEARCH_DVBS (0x10)
-#define SX8_ERROR_UNSUPPORTED (0x80)
+#define MCI_CMD_GET_IQSYMBOL (0x30)
-#define SX8_SUCCESS(status) (status < SX8_ERROR_UNSUPPORTED)
+#define SX8_CMD_INPUT_ENABLE (0x40)
+#define SX8_CMD_INPUT_DISABLE (0x41)
+#define SX8_CMD_START_IQ (0x42)
+#define SX8_CMD_STOP_IQ (0x43)
+#define SX8_CMD_ENABLE_IQOUTPUT (0x44)
+#define SX8_CMD_DISABLE_IQOUTPUT (0x45)
-#define SX8_CMD_DIAG_READ8 (0xE0)
-#define SX8_CMD_DIAG_READ32 (0xE1)
-#define SX8_CMD_DIAG_WRITE8 (0xE2)
-#define SX8_CMD_DIAG_WRITE32 (0xE3)
+#define MCI_STATUS_OK (0x00)
+#define MCI_STATUS_UNSUPPORTED (0x80)
+#define MCI_STATUS_RETRY (0xFD)
+#define MCI_STATUS_NOT_READY (0xFE)
+#define MCI_STATUS_ERROR (0xFF)
-#define SX8_CMD_DIAG_READRF (0xE8)
-#define SX8_CMD_DIAG_WRITERF (0xE9)
+#define MCI_SUCCESS(status) ((status & MCI_STATUS_UNSUPPORTED) == 0)
struct mci_command {
union {
u32 command_word;
struct {
- u8 command;
- u8 tuner;
- u8 demod;
- u8 output;
+ u8 command;
+ u8 tuner;
+ u8 demod;
+ u8 output;
};
};
union {
u32 params[31];
struct {
+ /*
+ * Bit 0: DVB-S Enabled
+ * Bit 1: DVB-S2 Enabled
+ * Bit 7: InputStreamID
+ */
u8 flags;
+ /*
+ * Bit 0: QPSK,
+ * Bit 1: 8PSK/8APSK
+ * Bit 2: 16APSK
+ * Bit 3: 32APSK
+ * Bit 4: 64APSK
+ * Bit 5: 128APSK
+ * Bit 6: 256APSK
+ */
u8 s2_modulation_mask;
u8 rsvd1;
u8 retry;
@@ -108,7 +134,36 @@ struct mci_command {
u8 input_stream_id;
u8 rsvd2[3];
u32 scrambling_sequence_index;
+ u32 frequency_range;
} dvbs2_search;
+
+ struct {
+ u8 tap;
+ u8 rsvd;
+ u16 point;
+ } get_iq_symbol;
+
+ struct {
+ /*
+ * Bit 0: 0=VTM/1=SCAN
+ * Bit 1: Set Gain
+ */
+ u8 flags;
+ u8 roll_off;
+ u8 rsvd1;
+ u8 rsvd2;
+ u32 frequency;
+ u32 symbol_rate; /* Only in VTM mode */
+ u16 gain;
+ } sx8_start_iq;
+
+ struct {
+ /*
+ * Bit 1:0 = STVVGLNA Gain.
+ * 0 = AGC, 1 = 0dB, 2 = Minimum, 3 = Maximum
+ */
+ u8 flags;
+ } sx8_input_enable;
};
};
@@ -116,41 +171,92 @@ struct mci_result {
union {
u32 status_word;
struct {
- u8 status;
- u8 rsvd;
+ u8 status;
+ u8 mode;
u16 time;
};
};
union {
u32 result[27];
struct {
+ /* 1 = DVB-S, 2 = DVB-S2X */
u8 standard;
/* puncture rate for DVB-S */
u8 pls_code;
- /* 7-6: rolloff, 5-2: rsrvd, 1:short, 0:pilots */
+ /* 2-0: rolloff */
u8 roll_off;
u8 rsvd;
+ /* actual frequency in Hz */
u32 frequency;
+ /* actual symbolrate in Hz */
u32 symbol_rate;
+ /* channel power in dBm x 100 */
s16 channel_power;
+ /* band power in dBm x 100 */
s16 band_power;
+ /*
+ * SNR in dB x 100
+ * Note: negative values are valid in DVB-S2
+ */
s16 signal_to_noise;
s16 rsvd2;
+ /*
+ * Counter for packet errors
+ * (set to 0 on start command)
+ */
u32 packet_errors;
+ /* Bit error rate: PreRS in DVB-S, PreBCH in DVB-S2X */
u32 ber_numerator;
u32 ber_denominator;
} dvbs2_signal_info;
+
struct {
- u8 i_symbol;
- u8 q_symbol;
- } dvbs2_signal_iq;
+ s16 i;
+ s16 q;
+ } iq_symbol;
};
u32 version[4];
};
+struct mci_base {
+ struct list_head mci_list;
+ void *key;
+ struct ddb_link *link;
+ struct completion completion;
+ struct device *dev;
+ struct mutex tuner_lock; /* concurrent tuner access lock */
+ struct mutex mci_lock; /* concurrent MCI access lock */
+ int count;
+ int type;
+};
+
+struct mci {
+ struct mci_base *base;
+ struct dvb_frontend fe;
+ int nr;
+ int demod;
+ int tuner;
+};
+
+struct mci_cfg {
+ int type;
+ struct dvb_frontend_ops *fe_ops;
+ u32 base_size;
+ u32 state_size;
+ int (*init)(struct mci *mci);
+ int (*base_init)(struct mci_base *mci_base);
+ int (*set_input)(struct dvb_frontend *fe, int input);
+};
+
+/* defined in ddbridge-sx8.c */
+extern const struct mci_cfg ddb_max_sx8_cfg;
+
+int ddb_mci_cmd(struct mci *state, struct mci_command *command,
+ struct mci_result *result);
+int ddb_mci_config(struct mci *state, u32 config);
+
struct dvb_frontend
-*ddb_mci_attach(struct ddb_input *input,
- int mci_type, int nr,
+*ddb_mci_attach(struct ddb_input *input, struct mci_cfg *cfg, int nr,
int (**fn_set_input)(struct dvb_frontend *fe, int input));
#endif /* _DDBRIDGE_MCI_H_ */
diff --git a/drivers/media/pci/ddbridge/ddbridge-regs.h b/drivers/media/pci/ddbridge/ddbridge-regs.h
index b978b5991940..f9e1cbb99b53 100644
--- a/drivers/media/pci/ddbridge/ddbridge-regs.h
+++ b/drivers/media/pci/ddbridge/ddbridge-regs.h
@@ -34,14 +34,6 @@
#define GPIO_DIRECTION 0x28
/* ------------------------------------------------------------------------- */
-/* MDIO */
-
-#define MDIO_CTRL 0x20
-#define MDIO_ADR 0x24
-#define MDIO_REG 0x28
-#define MDIO_VAL 0x2C
-
-/* ------------------------------------------------------------------------- */
#define BOARD_CONTROL 0x30
diff --git a/drivers/media/pci/ddbridge/ddbridge-sx8.c b/drivers/media/pci/ddbridge/ddbridge-sx8.c
new file mode 100644
index 000000000000..64f05f5ee039
--- /dev/null
+++ b/drivers/media/pci/ddbridge/ddbridge-sx8.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ddbridge-sx8.c: Digital Devices MAX SX8 driver
+ *
+ * Copyright (C) 2018 Digital Devices GmbH
+ * Marcus Metzler <mocm@metzlerbros.de>
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ddbridge.h"
+#include "ddbridge-io.h"
+#include "ddbridge-mci.h"
+
+static const u32 MCLK = (1550000000 / 12);
+static const u32 MAX_LDPC_BITRATE = (720000000);
+static const u32 MAX_DEMOD_LDPC_BITRATE = (1550000000 / 6);
+
+#define SX8_TUNER_NUM 4
+#define SX8_DEMOD_NUM 8
+#define SX8_DEMOD_NONE 0xff
+
+struct sx8_base {
+ struct mci_base mci_base;
+
+ u8 tuner_use_count[SX8_TUNER_NUM];
+ u32 gain_mode[SX8_TUNER_NUM];
+
+ u32 used_ldpc_bitrate[SX8_DEMOD_NUM];
+ u8 demod_in_use[SX8_DEMOD_NUM];
+ u32 iq_mode;
+ u32 burst_size;
+ u32 direct_mode;
+};
+
+struct sx8 {
+ struct mci mci;
+
+ int first_time_lock;
+ int started;
+ struct mci_result signal_info;
+
+ u32 bb_mode;
+ u32 local_frequency;
+};
+
+static void release(struct dvb_frontend *fe)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_base *mci_base = state->mci.base;
+
+ mci_base->count--;
+ if (mci_base->count == 0) {
+ list_del(&mci_base->mci_list);
+ kfree(mci_base);
+ }
+ kfree(state);
+}
+
+static int get_info(struct dvb_frontend *fe)
+{
+ int stat;
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.command = MCI_CMD_GETSIGNALINFO;
+ cmd.demod = state->mci.demod;
+ stat = ddb_mci_cmd(&state->mci, &cmd, &state->signal_info);
+ return stat;
+}
+
+static int get_snr(struct dvb_frontend *fe)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ p->cnr.len = 1;
+ p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ p->cnr.stat[0].svalue =
+ (s64)state->signal_info.dvbs2_signal_info.signal_to_noise
+ * 10;
+ return 0;
+}
+
+static int get_strength(struct dvb_frontend *fe)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ s32 str;
+
+ str = 100000 -
+ (state->signal_info.dvbs2_signal_info.channel_power
+ * 10 + 108750);
+ p->strength.len = 1;
+ p->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ p->strength.stat[0].svalue = str;
+ return 0;
+}
+
+static int read_status(struct dvb_frontend *fe, enum fe_status *status)
+{
+ int stat;
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_command cmd;
+ struct mci_result res;
+
+ cmd.command = MCI_CMD_GETSTATUS;
+ cmd.demod = state->mci.demod;
+ stat = ddb_mci_cmd(&state->mci, &cmd, &res);
+ if (stat)
+ return stat;
+ *status = 0x00;
+ get_info(fe);
+ get_strength(fe);
+ if (res.status == SX8_DEMOD_WAIT_MATYPE)
+ *status = 0x0f;
+ if (res.status == SX8_DEMOD_LOCKED) {
+ *status = 0x1f;
+ get_snr(fe);
+ }
+ return stat;
+}
+
+static int mci_set_tuner(struct dvb_frontend *fe, u32 tuner, u32 on)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_base *mci_base = state->mci.base;
+ struct sx8_base *sx8_base = (struct sx8_base *)mci_base;
+ struct mci_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.tuner = state->mci.tuner;
+ cmd.command = on ? SX8_CMD_INPUT_ENABLE : SX8_CMD_INPUT_DISABLE;
+ cmd.sx8_input_enable.flags = sx8_base->gain_mode[state->mci.tuner];
+ return ddb_mci_cmd(&state->mci, &cmd, NULL);
+}
+
+static int stop(struct dvb_frontend *fe)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_base *mci_base = state->mci.base;
+ struct sx8_base *sx8_base = (struct sx8_base *)mci_base;
+ struct mci_command cmd;
+ u32 input = state->mci.tuner;
+
+ memset(&cmd, 0, sizeof(cmd));
+ if (state->mci.demod != SX8_DEMOD_NONE) {
+ cmd.command = MCI_CMD_STOP;
+ cmd.demod = state->mci.demod;
+ ddb_mci_cmd(&state->mci, &cmd, NULL);
+ if (sx8_base->iq_mode) {
+ cmd.command = SX8_CMD_DISABLE_IQOUTPUT;
+ cmd.demod = state->mci.demod;
+ cmd.output = 0;
+ ddb_mci_cmd(&state->mci, &cmd, NULL);
+ ddb_mci_config(&state->mci, SX8_TSCONFIG_MODE_NORMAL);
+ }
+ }
+ mutex_lock(&mci_base->tuner_lock);
+ sx8_base->tuner_use_count[input]--;
+ if (!sx8_base->tuner_use_count[input])
+ mci_set_tuner(fe, input, 0);
+ if (state->mci.demod < SX8_DEMOD_NUM) {
+ sx8_base->demod_in_use[state->mci.demod] = 0;
+ state->mci.demod = SX8_DEMOD_NONE;
+ }
+ sx8_base->used_ldpc_bitrate[state->mci.nr] = 0;
+ sx8_base->iq_mode = 0;
+ mutex_unlock(&mci_base->tuner_lock);
+ state->started = 0;
+ return 0;
+}
+
+static int start(struct dvb_frontend *fe, u32 flags, u32 modmask, u32 ts_config)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_base *mci_base = state->mci.base;
+ struct sx8_base *sx8_base = (struct sx8_base *)mci_base;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 used_ldpc_bitrate = 0, free_ldpc_bitrate;
+ u32 used_demods = 0;
+ struct mci_command cmd;
+ u32 input = state->mci.tuner;
+ u32 bits_per_symbol = 0;
+ int i = -1, stat = 0;
+
+ if (p->symbol_rate >= (MCLK / 2))
+ flags &= ~1;
+ if ((flags & 3) == 0)
+ return -EINVAL;
+
+ if (flags & 2) {
+ u32 tmp = modmask;
+
+ bits_per_symbol = 1;
+ while (tmp & 1) {
+ tmp >>= 1;
+ bits_per_symbol++;
+ }
+ }
+
+ mutex_lock(&mci_base->tuner_lock);
+ if (sx8_base->iq_mode) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+
+ if (sx8_base->direct_mode) {
+ if (p->symbol_rate >= MCLK / 2) {
+ if (state->mci.nr < 4)
+ i = state->mci.nr;
+ } else {
+ i = state->mci.nr;
+ }
+ } else {
+ for (i = 0; i < SX8_DEMOD_NUM; i++) {
+ used_ldpc_bitrate += sx8_base->used_ldpc_bitrate[i];
+ if (sx8_base->demod_in_use[i])
+ used_demods++;
+ }
+ if (used_ldpc_bitrate >= MAX_LDPC_BITRATE ||
+ ((ts_config & SX8_TSCONFIG_MODE_MASK) >
+ SX8_TSCONFIG_MODE_NORMAL && used_demods > 0)) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+ free_ldpc_bitrate = MAX_LDPC_BITRATE - used_ldpc_bitrate;
+ if (free_ldpc_bitrate > MAX_DEMOD_LDPC_BITRATE)
+ free_ldpc_bitrate = MAX_DEMOD_LDPC_BITRATE;
+
+ while (p->symbol_rate * bits_per_symbol > free_ldpc_bitrate)
+ bits_per_symbol--;
+ if (bits_per_symbol < 2) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+
+ modmask &= ((1 << (bits_per_symbol - 1)) - 1);
+ if (((flags & 0x02) != 0) && modmask == 0) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+
+ i = (p->symbol_rate > (MCLK / 2)) ? 3 : 7;
+ while (i >= 0 && sx8_base->demod_in_use[i])
+ i--;
+ }
+
+ if (i < 0) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+ sx8_base->demod_in_use[i] = 1;
+ sx8_base->used_ldpc_bitrate[state->mci.nr] = p->symbol_rate
+ * bits_per_symbol;
+ state->mci.demod = i;
+
+ if (!sx8_base->tuner_use_count[input])
+ mci_set_tuner(fe, input, 1);
+ sx8_base->tuner_use_count[input]++;
+ sx8_base->iq_mode = (ts_config > 1);
+unlock:
+ mutex_unlock(&mci_base->tuner_lock);
+ if (stat)
+ return stat;
+ memset(&cmd, 0, sizeof(cmd));
+
+ if (sx8_base->iq_mode) {
+ cmd.command = SX8_CMD_ENABLE_IQOUTPUT;
+ cmd.demod = state->mci.demod;
+ cmd.output = 0;
+ ddb_mci_cmd(&state->mci, &cmd, NULL);
+ ddb_mci_config(&state->mci, ts_config);
+ }
+ if (p->stream_id != NO_STREAM_ID_FILTER && p->stream_id != 0x80000000)
+ flags |= 0x80;
+ dev_dbg(mci_base->dev, "MCI-%d: tuner=%d demod=%d\n",
+ state->mci.nr, state->mci.tuner, state->mci.demod);
+ cmd.command = MCI_CMD_SEARCH_DVBS;
+ cmd.dvbs2_search.flags = flags;
+ cmd.dvbs2_search.s2_modulation_mask = modmask;
+ cmd.dvbs2_search.retry = 2;
+ cmd.dvbs2_search.frequency = p->frequency * 1000;
+ cmd.dvbs2_search.symbol_rate = p->symbol_rate;
+ cmd.dvbs2_search.scrambling_sequence_index =
+ p->scrambling_sequence_index | 0x80000000;
+ cmd.dvbs2_search.input_stream_id =
+ (p->stream_id != NO_STREAM_ID_FILTER) ? p->stream_id : 0;
+ cmd.tuner = state->mci.tuner;
+ cmd.demod = state->mci.demod;
+ cmd.output = state->mci.nr;
+ if (p->stream_id == 0x80000000)
+ cmd.output |= 0x80;
+ stat = ddb_mci_cmd(&state->mci, &cmd, NULL);
+ if (stat)
+ stop(fe);
+ return stat;
+}
+
+static int start_iq(struct dvb_frontend *fe, u32 flags, u32 roll_off,
+ u32 ts_config)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_base *mci_base = state->mci.base;
+ struct sx8_base *sx8_base = (struct sx8_base *)mci_base;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 used_demods = 0;
+ struct mci_command cmd;
+ u32 input = state->mci.tuner;
+ int i, stat = 0;
+
+ mutex_lock(&mci_base->tuner_lock);
+ if (sx8_base->iq_mode) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+ for (i = 0; i < SX8_DEMOD_NUM; i++)
+ if (sx8_base->demod_in_use[i])
+ used_demods++;
+ if (used_demods > 0) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+ state->mci.demod = 0;
+ if (!sx8_base->tuner_use_count[input])
+ mci_set_tuner(fe, input, 1);
+ sx8_base->tuner_use_count[input]++;
+ sx8_base->iq_mode = (ts_config > 1);
+unlock:
+ mutex_unlock(&mci_base->tuner_lock);
+ if (stat)
+ return stat;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.command = SX8_CMD_START_IQ;
+ cmd.sx8_start_iq.flags = flags;
+ cmd.sx8_start_iq.roll_off = roll_off;
+ cmd.sx8_start_iq.frequency = p->frequency * 1000;
+ cmd.sx8_start_iq.symbol_rate = p->symbol_rate;
+ cmd.tuner = state->mci.tuner;
+ cmd.demod = state->mci.demod;
+ stat = ddb_mci_cmd(&state->mci, &cmd, NULL);
+ if (stat)
+ stop(fe);
+ ddb_mci_config(&state->mci, ts_config);
+ return stat;
+}
+
+static int set_parameters(struct dvb_frontend *fe)
+{
+ int stat = 0;
+ struct sx8 *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 ts_config = SX8_TSCONFIG_MODE_NORMAL, iq_mode = 0, isi;
+
+ if (state->started)
+ stop(fe);
+
+ isi = p->stream_id;
+ if (isi != NO_STREAM_ID_FILTER)
+ iq_mode = (isi & 0x30000000) >> 28;
+
+ if (iq_mode)
+ ts_config = (SX8_TSCONFIG_TSHEADER | SX8_TSCONFIG_MODE_IQ);
+ if (iq_mode < 3) {
+ u32 mask;
+
+ switch (p->modulation) {
+ /* uncomment whenever these modulations hit the DVB API
+ * case APSK_256:
+ * mask = 0x7f;
+ * break;
+ * case APSK_128:
+ * mask = 0x3f;
+ * break;
+ * case APSK_64:
+ * mask = 0x1f;
+ * break;
+ */
+ case APSK_32:
+ mask = 0x0f;
+ break;
+ case APSK_16:
+ mask = 0x07;
+ break;
+ default:
+ mask = 0x03;
+ break;
+ }
+ stat = start(fe, 3, mask, ts_config);
+ } else {
+ u32 flags = (iq_mode == 2) ? 1 : 0;
+
+ stat = start_iq(fe, flags, 4, ts_config);
+ }
+ if (!stat) {
+ state->started = 1;
+ state->first_time_lock = 1;
+ state->signal_info.status = SX8_DEMOD_WAIT_SIGNAL;
+ }
+
+ return stat;
+}
+
+static int tune(struct dvb_frontend *fe, bool re_tune,
+ unsigned int mode_flags,
+ unsigned int *delay, enum fe_status *status)
+{
+ int r;
+
+ if (re_tune) {
+ r = set_parameters(fe);
+ if (r)
+ return r;
+ }
+ r = read_status(fe, status);
+ if (r)
+ return r;
+
+ if (*status & FE_HAS_LOCK)
+ return 0;
+ *delay = HZ / 10;
+ return 0;
+}
+
+static enum dvbfe_algo get_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_HW;
+}
+
+static int set_input(struct dvb_frontend *fe, int input)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_base *mci_base = state->mci.base;
+
+ if (input >= SX8_TUNER_NUM)
+ return -EINVAL;
+
+ state->mci.tuner = input;
+ dev_dbg(mci_base->dev, "MCI-%d: input=%d\n", state->mci.nr, input);
+ return 0;
+}
+
+static struct dvb_frontend_ops sx8_ops = {
+ .delsys = { SYS_DVBS, SYS_DVBS2 },
+ .info = {
+ .name = "Digital Devices MaxSX8 MCI DVB-S/S2/S2X",
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .symbol_rate_min = 100000,
+ .symbol_rate_max = 100000000,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_2G_MODULATION |
+ FE_CAN_MULTISTREAM,
+ },
+ .get_frontend_algo = get_algo,
+ .tune = tune,
+ .release = release,
+ .read_status = read_status,
+};
+
+static int init(struct mci *mci)
+{
+ struct sx8 *state = (struct sx8 *)mci;
+
+ state->mci.demod = SX8_DEMOD_NONE;
+ return 0;
+}
+
+const struct mci_cfg ddb_max_sx8_cfg = {
+ .type = 0,
+ .fe_ops = &sx8_ops,
+ .base_size = sizeof(struct sx8_base),
+ .state_size = sizeof(struct sx8),
+ .init = init,
+ .set_input = set_input,
+};
diff --git a/drivers/media/pci/ddbridge/ddbridge.h b/drivers/media/pci/ddbridge/ddbridge.h
index a66b1125cc74..8a354dfb6c22 100644
--- a/drivers/media/pci/ddbridge/ddbridge.h
+++ b/drivers/media/pci/ddbridge/ddbridge.h
@@ -120,21 +120,23 @@ struct ddb_info {
#define DDB_OCTOPUS_MCI 9
char *name;
u32 i2c_mask;
+ u32 board_control;
+ u32 board_control_2;
+
u8 port_num;
u8 led_num;
u8 fan_num;
u8 temp_num;
u8 temp_bus;
- u32 board_control;
- u32 board_control_2;
- u8 mdio_num;
u8 con_clock; /* use a continuous clock */
u8 ts_quirks;
#define TS_QUIRK_SERIAL 1
#define TS_QUIRK_REVERSED 2
#define TS_QUIRK_ALT_OSC 8
+ u8 mci_ports;
+ u8 mci_type;
+
u32 tempmon_irq;
- u8 mci;
const struct ddb_regmap *regmap;
};
@@ -255,7 +257,6 @@ struct ddb_port {
#define DDB_CI_EXTERNAL_XO2_B 13
#define DDB_TUNER_DVBS_STV0910_PR 14
#define DDB_TUNER_DVBC2T2I_SONY_P 15
-#define DDB_TUNER_MCI 16
#define DDB_TUNER_XO2 32
#define DDB_TUNER_DVBS_STV0910 (DDB_TUNER_XO2 + 0)
@@ -265,6 +266,9 @@ struct ddb_port {
#define DDB_TUNER_ATSC_ST (DDB_TUNER_XO2 + 4)
#define DDB_TUNER_DVBC2T2I_SONY (DDB_TUNER_XO2 + 5)
+#define DDB_TUNER_MCI 48
+#define DDB_TUNER_MCI_SX8 (DDB_TUNER_MCI + 0)
+
struct ddb_input *input[2];
struct ddb_output *output;
struct dvb_ca_en50221 *en;
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index c9db108751a7..1ddb0576fb7b 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -986,6 +986,9 @@ static int dm1105_probe(struct pci_dev *pdev,
int ret = -ENOMEM;
int i;
+ if (dm1105_devcount >= ARRAY_SIZE(card))
+ return -ENODEV;
+
dev = kzalloc(sizeof(struct dm1105_dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 6b2ffdc96961..dd727098daf4 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -999,7 +999,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
int vbi_buf_size;
struct ivtv *itv;
- itv = kzalloc(sizeof(struct ivtv), GFP_ATOMIC);
+ itv = kzalloc(sizeof(struct ivtv), GFP_KERNEL);
if (itv == NULL)
return -ENOMEM;
itv->pdev = pdev;
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index 522cd111e399..e9ce54dd5e01 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -293,6 +293,7 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
.platform_data = &pdata,
};
+ memset(&pdata, 0, sizeof(pdata));
pdata.pvr150_workaround = itv->pvr150_workaround;
sd = v4l2_i2c_new_subdev_board(&itv->v4l2_dev, adap,
&cx25840_info, NULL);
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index b19058e36853..5ddaa8ed11a5 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -1178,7 +1178,7 @@ static int ivtvfb_init_card(struct ivtv *itv)
}
itv->osd_info = kzalloc(sizeof(struct osd_info),
- GFP_ATOMIC|__GFP_NOWARN);
+ GFP_KERNEL|__GFP_NOWARN);
if (itv->osd_info == NULL) {
IVTVFB_ERR("Failed to allocate memory for osd_info\n");
return -ENOMEM;
diff --git a/drivers/media/pci/mantis/mantis_vp3030.c b/drivers/media/pci/mantis/mantis_vp3030.c
index 14f6e153000c..9797c9fd8259 100644
--- a/drivers/media/pci/mantis/mantis_vp3030.c
+++ b/drivers/media/pci/mantis/mantis_vp3030.c
@@ -42,8 +42,8 @@ static struct zl10353_config mantis_vp3030_config = {
static struct tda665x_config env57h12d5_config = {
.name = "ENV57H12D5 (ET-50DT)",
.addr = 0x60,
- .frequency_min = 47000000,
- .frequency_max = 862000000,
+ .frequency_min = 47 * MHz,
+ .frequency_max = 862 * MHz,
.frequency_offst = 3616667,
.ref_multiplier = 6, /* 1/6 MHz */
.ref_divider = 100000, /* 1/6 MHz */
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
index b13e319d24b7..5f1613aec93c 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
@@ -214,11 +214,6 @@ static int netup_i2c_xfer(struct i2c_adapter *adap,
struct netup_i2c *i2c = i2c_get_adapdata(adap);
u16 reg;
- if (num <= 0) {
- dev_dbg(i2c->adap.dev.parent,
- "%s(): num == %d\n", __func__, num);
- return -EINVAL;
- }
spin_lock_irqsave(&i2c->lock, flags);
if (i2c->state != STATE_DONE) {
dev_dbg(i2c->adap.dev.parent,
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index fda969a85684..7f878fc41b7e 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -1443,9 +1443,7 @@ static struct pci_driver pt1_driver = {
.probe = pt1_probe,
.remove = pt1_remove,
.id_table = pt1_id_table,
-#ifdef CONFIG_PM_SLEEP
.driver.pm = &pt1_pm_ops,
-#endif
};
module_pci_driver(pt1_driver);
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index 64ab91c24c18..221de91a8bae 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -629,12 +629,12 @@ static __poll_t fops_poll(struct file *file, poll_table *wait)
port->last_poll_msecs_diff);
if (!video_is_registered(port->v4l_device))
- return -EIO;
+ return EPOLLERR;
if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
if (atomic_inc_return(&port->v4l_reader_count) == 1) {
if (saa7164_vbi_initialize(port) < 0)
- return -EINVAL;
+ return EPOLLERR;
saa7164_vbi_start_streaming(port);
msleep(200);
}
@@ -644,7 +644,7 @@ static __poll_t fops_poll(struct file *file, poll_table *wait)
if ((file->f_flags & O_NONBLOCK) == 0) {
if (wait_event_interruptible(port->wait_read,
saa7164_vbi_next_buf(port))) {
- return -ERESTARTSYS;
+ return EPOLLERR;
}
}
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 069c4a853346..1858efedaf1a 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -116,6 +116,7 @@ static inline struct vip_buffer *to_vip_buffer(struct vb2_v4l2_buffer *vb2)
* @sequence: sequence number of acquired buffer
* @active: current active buffer
* @lock: used in videobuf2 callback
+ * @v4l_lock: serialize its video4linux ioctls
* @tcount: Number of top frames
* @bcount: Number of bottom frames
* @overflow: Number of FIFO overflows
@@ -145,6 +146,7 @@ struct sta2x11_vip {
unsigned int sequence;
struct vip_buffer *active; /* current active buffer */
spinlock_t lock; /* Used in videobuf2 callback */
+ struct mutex v4l_lock;
/* Interrupt counters */
int tcount, bcount;
@@ -385,6 +387,8 @@ static const struct vb2_ops vip_video_qops = {
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
@@ -870,6 +874,7 @@ static int sta2x11_vip_init_buffer(struct sta2x11_vip *vip)
vip->vb_vidq.mem_ops = &vb2_dma_contig_memops;
vip->vb_vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vip->vb_vidq.dev = &vip->pdev->dev;
+ vip->vb_vidq.lock = &vip->v4l_lock;
err = vb2_queue_init(&vip->vb_vidq);
if (err)
return err;
@@ -1034,6 +1039,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
vip->std = V4L2_STD_PAL;
vip->format = formats_50[0];
vip->config = config;
+ mutex_init(&vip->v4l_lock);
ret = sta2x11_vip_init_controls(vip);
if (ret)
@@ -1080,6 +1086,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
vip->video_dev = video_dev_template;
vip->video_dev.v4l2_dev = &vip->v4l2_dev;
vip->video_dev.queue = &vip->vb_vidq;
+ vip->video_dev.lock = &vip->v4l_lock;
video_set_drvdata(&vip->video_dev, vip);
ret = video_register_device(&vip->video_dev, VFL_TYPE_GRABBER, -1);
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
index 0ea8dd44026c..3a06c000f97b 100644
--- a/drivers/media/pci/tw686x/tw686x-video.c
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -1190,6 +1190,14 @@ int tw686x_video_init(struct tw686x_dev *dev)
return err;
}
+ /* Initialize vc->dev and vc->ch for the error path */
+ for (ch = 0; ch < max_channels(dev); ch++) {
+ struct tw686x_video_channel *vc = &dev->video_channels[ch];
+
+ vc->dev = dev;
+ vc->ch = ch;
+ }
+
for (ch = 0; ch < max_channels(dev); ch++) {
struct tw686x_video_channel *vc = &dev->video_channels[ch];
struct video_device *vdev;
@@ -1198,9 +1206,6 @@ int tw686x_video_init(struct tw686x_dev *dev)
spin_lock_init(&vc->qlock);
INIT_LIST_HEAD(&vc->vidq_queued);
- vc->dev = dev;
- vc->ch = ch;
-
/* default settings */
err = tw686x_set_standard(vc, V4L2_STD_NTSC);
if (err)
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 2728376b04b5..94c1fe0e9787 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -90,7 +90,7 @@ config VIDEO_PXA27x
This is a v4l2 driver for the PXA27x Quick Capture Interface
config VIDEO_QCOM_CAMSS
- tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver"
+ tristate "Qualcomm V4L2 Camera Subsystem driver"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
select VIDEOBUF2_DMA_SG
@@ -384,8 +384,8 @@ config VIDEO_SH_VEU
config VIDEO_RENESAS_FDP1
tristate "Renesas Fine Display Processor"
depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_SHMOBILE || COMPILE_TEST
- depends on (!ARCH_RENESAS && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
---help---
@@ -514,6 +514,9 @@ config VIDEO_VIM2M
---help---
This is a virtual test device for the memory-to-memory driver
framework.
+
+source "drivers/media/platform/vicodec/Kconfig"
+
endif #V4L_TEST_DRIVERS
menuconfig DVB_PLATFORM_DRIVERS
@@ -533,6 +536,17 @@ menuconfig CEC_PLATFORM_DRIVERS
if CEC_PLATFORM_DRIVERS
+config VIDEO_CROS_EC_CEC
+ tristate "ChromeOS EC CEC driver"
+ depends on MFD_CROS_EC
+ select CEC_CORE
+ select CEC_NOTIFIER
+ ---help---
+ If you say yes here you will get support for the
+ ChromeOS Embedded Controller's CEC.
+ The CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
config VIDEO_MESON_AO_CEC
tristate "Amlogic Meson AO CEC driver"
depends on ARCH_MESON || COMPILE_TEST
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 04bc1502a30e..41322ab65802 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
obj-$(CONFIG_VIDEO_VIMC) += vimc/
obj-$(CONFIG_VIDEO_VIVID) += vivid/
obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
+obj-$(CONFIG_VIDEO_VICODEC) += vicodec/
obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
@@ -88,8 +89,10 @@ obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp/
obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk-jpeg/
-obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss-8x16/
+obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss/
obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/
obj-y += meson/
+
+obj-y += cros-ec-cec/
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index e5be21a31640..e8db4df1e7c4 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -1106,23 +1106,20 @@ static int isi_graph_parse(struct atmel_isi *isi, struct device_node *node)
struct device_node *ep = NULL;
struct device_node *remote;
- while (1) {
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- return -EINVAL;
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ return -EINVAL;
- remote = of_graph_get_remote_port_parent(ep);
- if (!remote) {
- of_node_put(ep);
- return -EINVAL;
- }
+ remote = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!remote)
+ return -EINVAL;
- /* Remote node to connect */
- isi->entity.node = remote;
- isi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- isi->entity.asd.match.fwnode = of_fwnode_handle(remote);
- return 0;
- }
+ /* Remote node to connect */
+ isi->entity.node = remote;
+ isi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ isi->entity.asd.match.fwnode = of_fwnode_handle(remote);
+ return 0;
}
static int isi_graph_init(struct atmel_isi *isi)
diff --git a/drivers/media/platform/cadence/Kconfig b/drivers/media/platform/cadence/Kconfig
index 3bf0f2454384..cf6124da3c54 100644
--- a/drivers/media/platform/cadence/Kconfig
+++ b/drivers/media/platform/cadence/Kconfig
@@ -11,6 +11,7 @@ if VIDEO_CADENCE
config VIDEO_CADENCE_CSI2RX
tristate "Cadence MIPI-CSI2 RX Controller"
+ depends on VIDEO_V4L2
depends on MEDIA_CONTROLLER
depends on VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -22,6 +23,7 @@ config VIDEO_CADENCE_CSI2RX
config VIDEO_CADENCE_CSI2TX
tristate "Cadence MIPI-CSI2 TX Controller"
+ depends on VIDEO_V4L2
depends on MEDIA_CONTROLLER
depends on VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index a0f02916006b..43e43c7b3e98 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -13,6 +13,7 @@
#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/platform/cadence/cdns-csi2tx.c b/drivers/media/platform/cadence/cdns-csi2tx.c
index dfa1d88d955b..40d0de690ff4 100644
--- a/drivers/media/platform/cadence/cdns-csi2tx.c
+++ b/drivers/media/platform/cadence/cdns-csi2tx.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/platform/cec-gpio/cec-gpio.c b/drivers/media/platform/cec-gpio/cec-gpio.c
index 69f8242209c2..d2861749d640 100644
--- a/drivers/media/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/platform/cec-gpio/cec-gpio.c
@@ -23,6 +23,11 @@ struct cec_gpio {
int hpd_irq;
bool hpd_is_high;
ktime_t hpd_ts;
+
+ struct gpio_desc *v5_gpio;
+ int v5_irq;
+ bool v5_is_high;
+ ktime_t v5_ts;
};
static bool cec_gpio_read(struct cec_adapter *adap)
@@ -65,6 +70,26 @@ static irqreturn_t cec_hpd_gpio_irq_handler_thread(int irq, void *priv)
return IRQ_HANDLED;
}
+static irqreturn_t cec_5v_gpio_irq_handler(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+ bool is_high = gpiod_get_value(cec->v5_gpio);
+
+ if (is_high == cec->v5_is_high)
+ return IRQ_HANDLED;
+ cec->v5_ts = ktime_get();
+ cec->v5_is_high = is_high;
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t cec_5v_gpio_irq_handler_thread(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+
+ cec_queue_pin_5v_event(cec->adap, cec->v5_is_high, cec->v5_ts);
+ return IRQ_HANDLED;
+}
+
static irqreturn_t cec_hpd_gpio_irq_handler(int irq, void *priv)
{
struct cec_gpio *cec = priv;
@@ -119,6 +144,9 @@ static void cec_gpio_status(struct cec_adapter *adap, struct seq_file *file)
if (cec->hpd_gpio)
seq_printf(file, "hpd: %s\n",
cec->hpd_is_high ? "high" : "low");
+ if (cec->v5_gpio)
+ seq_printf(file, "5V: %s\n",
+ cec->v5_is_high ? "high" : "low");
}
static int cec_gpio_read_hpd(struct cec_adapter *adap)
@@ -130,6 +158,15 @@ static int cec_gpio_read_hpd(struct cec_adapter *adap)
return gpiod_get_value(cec->hpd_gpio);
}
+static int cec_gpio_read_5v(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (!cec->v5_gpio)
+ return -ENOTTY;
+ return gpiod_get_value(cec->v5_gpio);
+}
+
static void cec_gpio_free(struct cec_adapter *adap)
{
cec_gpio_disable_irq(adap);
@@ -144,6 +181,7 @@ static const struct cec_pin_ops cec_gpio_pin_ops = {
.status = cec_gpio_status,
.free = cec_gpio_free,
.read_hpd = cec_gpio_read_hpd,
+ .read_5v = cec_gpio_read_5v,
};
static int cec_gpio_probe(struct platform_device *pdev)
@@ -167,6 +205,10 @@ static int cec_gpio_probe(struct platform_device *pdev)
if (IS_ERR(cec->hpd_gpio))
return PTR_ERR(cec->hpd_gpio);
+ cec->v5_gpio = devm_gpiod_get_optional(dev, "v5", GPIOD_IN);
+ if (IS_ERR(cec->v5_gpio))
+ return PTR_ERR(cec->v5_gpio);
+
cec->adap = cec_pin_allocate_adapter(&cec_gpio_pin_ops,
cec, pdev->name, CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR |
CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN);
@@ -185,6 +227,18 @@ static int cec_gpio_probe(struct platform_device *pdev)
return ret;
}
+ if (cec->v5_gpio) {
+ cec->v5_irq = gpiod_to_irq(cec->v5_gpio);
+ ret = devm_request_threaded_irq(dev, cec->v5_irq,
+ cec_5v_gpio_irq_handler,
+ cec_5v_gpio_irq_handler_thread,
+ IRQF_ONESHOT |
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "v5-gpio", cec);
+ if (ret)
+ return ret;
+ }
+
ret = cec_register_adapter(cec->adap, &pdev->dev);
if (ret) {
cec_delete_adapter(cec->adap);
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 68ed2a564ad1..d26c2d85a009 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -261,11 +261,12 @@ void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list)
while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) {
/*
- * Only queue a single JPEG into the bitstream buffer, except
- * to increase payload over 512 bytes or if in hold state.
+ * Only queue two JPEGs into the bitstream buffer to keep
+ * latency low. We need at least one complete buffer and the
+ * header of another buffer (for prescan) in the bitstream.
*/
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
- (coda_get_bitstream_payload(ctx) >= 512) && !ctx->hold)
+ ctx->num_metas > 1)
break;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
@@ -389,32 +390,29 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
struct coda_q_data *q_data, u32 fourcc)
{
struct coda_dev *dev = ctx->dev;
- int width, height;
- int ysize;
+ unsigned int ysize, ycbcr_size;
int ret;
int i;
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264 ||
ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4 ||
- ctx->codec->dst_fourcc == V4L2_PIX_FMT_MPEG4) {
- width = round_up(q_data->width, 16);
- height = round_up(q_data->height, 16);
- } else {
- width = round_up(q_data->width, 8);
- height = q_data->height;
- }
- ysize = width * height;
+ ctx->codec->dst_fourcc == V4L2_PIX_FMT_MPEG4)
+ ysize = round_up(q_data->rect.width, 16) *
+ round_up(q_data->rect.height, 16);
+ else
+ ysize = round_up(q_data->rect.width, 8) * q_data->rect.height;
+
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
+ ycbcr_size = round_up(ysize, 4096) + ysize / 2;
+ else
+ ycbcr_size = ysize + ysize / 2;
/* Allocate frame buffers */
for (i = 0; i < ctx->num_internal_frames; i++) {
- size_t size;
+ size_t size = ycbcr_size;
char *name;
- if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
- size = round_up(ysize, 4096) + ysize / 2;
- else
- size = ysize + ysize / 2;
/* Add space for mvcol buffers */
if (dev->devtype->product != CODA_DX6 &&
(ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
@@ -499,8 +497,8 @@ static int coda_alloc_context_buffers(struct coda_ctx *ctx,
if (!ctx->slicebuf.vaddr && q_data->fourcc == V4L2_PIX_FMT_H264) {
/* worst case slice size */
- size = (DIV_ROUND_UP(q_data->width, 16) *
- DIV_ROUND_UP(q_data->height, 16)) * 3200 / 8 + 512;
+ size = (DIV_ROUND_UP(q_data->rect.width, 16) *
+ DIV_ROUND_UP(q_data->rect.height, 16)) * 3200 / 8 + 512;
ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size,
"slicebuf");
if (ret < 0)
@@ -538,6 +536,8 @@ static int coda_encode_header(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
{
struct vb2_buffer *vb = &buf->vb2_buf;
struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data_src;
+ struct v4l2_rect *r;
size_t bufsize;
int ret;
int i;
@@ -551,6 +551,23 @@ static int coda_encode_header(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
if (dev->devtype->product == CODA_960)
bufsize /= 1024;
coda_write(dev, bufsize, CODA_CMD_ENC_HEADER_BB_SIZE);
+ if (dev->devtype->product == CODA_960 &&
+ ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264 &&
+ header_code == CODA_HEADER_H264_SPS) {
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ r = &q_data_src->rect;
+
+ if (r->width % 16 || r->height % 16) {
+ u32 crop_right = round_up(r->width, 16) - r->width;
+ u32 crop_bottom = round_up(r->height, 16) - r->height;
+
+ coda_write(dev, crop_right,
+ CODA9_CMD_ENC_HEADER_FRAME_CROP_H);
+ coda_write(dev, crop_bottom,
+ CODA9_CMD_ENC_HEADER_FRAME_CROP_V);
+ header_code |= CODA9_HEADER_FRAME_CROP;
+ }
+ }
coda_write(dev, header_code, CODA_CMD_ENC_HEADER_CODE);
ret = coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER);
if (ret < 0) {
@@ -632,7 +649,7 @@ static void coda_setup_iram(struct coda_ctx *ctx)
struct coda_q_data *q_data_src;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- mb_width = DIV_ROUND_UP(q_data_src->width, 16);
+ mb_width = DIV_ROUND_UP(q_data_src->rect.width, 16);
w128 = mb_width * 128;
w64 = mb_width * 64;
@@ -721,6 +738,7 @@ static u32 coda_supported_firmwares[] = {
CODA_FIRMWARE_VERNUM(CODA_HX4, 1, 4, 50),
CODA_FIRMWARE_VERNUM(CODA_7541, 1, 4, 50),
CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 5),
+ CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 9),
CODA_FIRMWARE_VERNUM(CODA_960, 2, 3, 10),
CODA_FIRMWARE_VERNUM(CODA_960, 3, 1, 1),
};
@@ -928,25 +946,25 @@ static int coda_start_encoding(struct coda_ctx *ctx)
value = 0;
switch (dev->devtype->product) {
case CODA_DX6:
- value = (q_data_src->width & CODADX6_PICWIDTH_MASK)
+ value = (q_data_src->rect.width & CODADX6_PICWIDTH_MASK)
<< CODADX6_PICWIDTH_OFFSET;
- value |= (q_data_src->height & CODADX6_PICHEIGHT_MASK)
+ value |= (q_data_src->rect.height & CODADX6_PICHEIGHT_MASK)
<< CODA_PICHEIGHT_OFFSET;
break;
case CODA_HX4:
case CODA_7541:
if (dst_fourcc == V4L2_PIX_FMT_H264) {
- value = (round_up(q_data_src->width, 16) &
+ value = (round_up(q_data_src->rect.width, 16) &
CODA7_PICWIDTH_MASK) << CODA7_PICWIDTH_OFFSET;
- value |= (round_up(q_data_src->height, 16) &
+ value |= (round_up(q_data_src->rect.height, 16) &
CODA7_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
break;
}
/* fallthrough */
case CODA_960:
- value = (q_data_src->width & CODA7_PICWIDTH_MASK)
+ value = (q_data_src->rect.width & CODA7_PICWIDTH_MASK)
<< CODA7_PICWIDTH_OFFSET;
- value |= (q_data_src->height & CODA7_PICHEIGHT_MASK)
+ value |= (q_data_src->rect.height & CODA7_PICHEIGHT_MASK)
<< CODA_PICHEIGHT_OFFSET;
}
coda_write(dev, value, CODA_CMD_ENC_SEQ_SRC_SIZE);
@@ -1198,6 +1216,27 @@ static int coda_start_encoding(struct coda_ctx *ctx)
goto out;
/*
+ * If visible width or height are not aligned to macroblock
+ * size, the crop_right and crop_bottom SPS fields must be set
+ * to the difference between visible and coded size. This is
+ * only supported by CODA960 firmware. All others do not allow
+ * writing frame cropping parameters, so we have to manually
+ * fix up the SPS RBSP (Sequence Parameter Set Raw Byte
+ * Sequence Payload) ourselves.
+ */
+ if (ctx->dev->devtype->product != CODA_960 &&
+ ((q_data_src->rect.width % 16) ||
+ (q_data_src->rect.height % 16))) {
+ ret = coda_h264_sps_fixup(ctx, q_data_src->rect.width,
+ q_data_src->rect.height,
+ &ctx->vpu_header[0][0],
+ &ctx->vpu_header_size[0],
+ sizeof(ctx->vpu_header[0]));
+ if (ret < 0)
+ goto out;
+ }
+
+ /*
* Get PPS in the first frame and copy it to an
* intermediate buffer.
*/
@@ -1362,7 +1401,8 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
if (dev->devtype->product == CODA_960) {
coda_write(dev, 4/*FIXME: 0*/, CODA9_CMD_ENC_PIC_SRC_INDEX);
- coda_write(dev, q_data_src->width, CODA9_CMD_ENC_PIC_SRC_STRIDE);
+ coda_write(dev, q_data_src->bytesperline,
+ CODA9_CMD_ENC_PIC_SRC_STRIDE);
coda_write(dev, 0, CODA9_CMD_ENC_PIC_SUB_FRAME_SYNC);
reg = CODA9_CMD_ENC_PIC_SRC_ADDR_Y;
@@ -1582,10 +1622,8 @@ static int coda_decoder_reqbufs(struct coda_ctx *ctx,
static bool coda_reorder_enable(struct coda_ctx *ctx)
{
- const char * const *profile_names;
- const char * const *level_names;
struct coda_dev *dev = ctx->dev;
- int profile, level;
+ int profile;
if (dev->devtype->product != CODA_HX4 &&
dev->devtype->product != CODA_7541 &&
@@ -1599,24 +1637,9 @@ static bool coda_reorder_enable(struct coda_ctx *ctx)
return true;
profile = coda_h264_profile(ctx->params.h264_profile_idc);
- if (profile < 0) {
- v4l2_warn(&dev->v4l2_dev, "Invalid H264 Profile: %d\n",
- ctx->params.h264_profile_idc);
- return false;
- }
-
- level = coda_h264_level(ctx->params.h264_level_idc);
- if (level < 0) {
- v4l2_warn(&dev->v4l2_dev, "Invalid H264 Level: %d\n",
- ctx->params.h264_level_idc);
- return false;
- }
-
- profile_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
- level_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
-
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "H264 Profile/Level: %s L%s\n",
- profile_names[profile], level_names[level]);
+ if (profile < 0)
+ v4l2_warn(&dev->v4l2_dev, "Unknown H264 Profile: %u\n",
+ ctx->params.h264_profile_idc);
/* Baseline profile does not support reordering */
return profile > V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
@@ -1694,6 +1717,8 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
coda_write(dev, 512, CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE);
}
}
+ if (src_fourcc == V4L2_PIX_FMT_JPEG)
+ coda_write(dev, 0, CODA_CMD_DEC_SEQ_JPG_THUMB_EN);
if (dev->devtype->product != CODA_960)
coda_write(dev, 0, CODA_CMD_DEC_SEQ_SRC_SIZE);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index c7631e117dd3..726b3b93a486 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -569,8 +569,6 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
f->fmt.pix.height * 2;
break;
case V4L2_PIX_FMT_JPEG:
- f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
- /* fallthrough */
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_MPEG4:
case V4L2_PIX_FMT_MPEG2:
@@ -935,6 +933,40 @@ static int coda_g_selection(struct file *file, void *fh,
return 0;
}
+static int coda_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_q_data *q_data;
+
+ if (ctx->inst_type == CODA_INST_ENCODER &&
+ s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->target == V4L2_SEL_TGT_CROP) {
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = clamp(s->r.width, 2U, q_data->width);
+ s->r.height = clamp(s->r.height, 2U, q_data->height);
+
+ if (s->flags & V4L2_SEL_FLAG_LE) {
+ s->r.width = round_up(s->r.width, 2);
+ s->r.height = round_up(s->r.height, 2);
+ } else {
+ s->r.width = round_down(s->r.width, 2);
+ s->r.height = round_down(s->r.height, 2);
+ }
+
+ q_data->rect = s->r;
+
+ return 0;
+ }
+
+ return coda_g_selection(file, fh, s);
+}
+
static int coda_try_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec)
{
@@ -1148,6 +1180,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_selection = coda_g_selection,
+ .vidioc_s_selection = coda_s_selection,
.vidioc_try_encoder_cmd = coda_try_encoder_cmd,
.vidioc_encoder_cmd = coda_encoder_cmd,
@@ -1297,28 +1330,10 @@ static void coda_job_abort(void *priv)
"Aborting task\n");
}
-static void coda_lock(void *m2m_priv)
-{
- struct coda_ctx *ctx = m2m_priv;
- struct coda_dev *pcdev = ctx->dev;
-
- mutex_lock(&pcdev->dev_mutex);
-}
-
-static void coda_unlock(void *m2m_priv)
-{
- struct coda_ctx *ctx = m2m_priv;
- struct coda_dev *pcdev = ctx->dev;
-
- mutex_unlock(&pcdev->dev_mutex);
-}
-
static const struct v4l2_m2m_ops coda_m2m_ops = {
.device_run = coda_device_run,
.job_ready = coda_job_ready,
.job_abort = coda_job_abort,
- .lock = coda_lock,
- .unlock = coda_unlock,
};
static void set_default_params(struct coda_ctx *ctx)
@@ -1413,6 +1428,72 @@ static int coda_buf_prepare(struct vb2_buffer *vb)
return 0;
}
+static void coda_update_menu_ctrl(struct v4l2_ctrl *ctrl, int value)
+{
+ if (!ctrl)
+ return;
+
+ v4l2_ctrl_lock(ctrl);
+
+ /*
+ * Extend the control range if the parsed stream contains a known but
+ * unsupported value or level.
+ */
+ if (value > ctrl->maximum) {
+ __v4l2_ctrl_modify_range(ctrl, ctrl->minimum, value,
+ ctrl->menu_skip_mask & ~(1 << value),
+ ctrl->default_value);
+ } else if (value < ctrl->minimum) {
+ __v4l2_ctrl_modify_range(ctrl, value, ctrl->maximum,
+ ctrl->menu_skip_mask & ~(1 << value),
+ ctrl->default_value);
+ }
+
+ __v4l2_ctrl_s_ctrl(ctrl, value);
+
+ v4l2_ctrl_unlock(ctrl);
+}
+
+static void coda_update_h264_profile_ctrl(struct coda_ctx *ctx)
+{
+ const char * const *profile_names;
+ int profile;
+
+ profile = coda_h264_profile(ctx->params.h264_profile_idc);
+ if (profile < 0) {
+ v4l2_warn(&ctx->dev->v4l2_dev, "Invalid H264 Profile: %u\n",
+ ctx->params.h264_profile_idc);
+ return;
+ }
+
+ coda_update_menu_ctrl(ctx->h264_profile_ctrl, profile);
+
+ profile_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "Parsed H264 Profile: %s\n",
+ profile_names[profile]);
+}
+
+static void coda_update_h264_level_ctrl(struct coda_ctx *ctx)
+{
+ const char * const *level_names;
+ int level;
+
+ level = coda_h264_level(ctx->params.h264_level_idc);
+ if (level < 0) {
+ v4l2_warn(&ctx->dev->v4l2_dev, "Invalid H264 Level: %u\n",
+ ctx->params.h264_level_idc);
+ return;
+ }
+
+ coda_update_menu_ctrl(ctx->h264_level_ctrl, level);
+
+ level_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "Parsed H264 Level: %s\n",
+ level_names[level]);
+}
+
static void coda_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -1441,8 +1522,11 @@ static void coda_buf_queue(struct vb2_buffer *vb)
* whether to enable reordering during sequence
* initialization.
*/
- if (!ctx->params.h264_profile_idc)
+ if (!ctx->params.h264_profile_idc) {
coda_sps_parse_profile(ctx, vb);
+ coda_update_h264_profile_ctrl(ctx);
+ coda_update_h264_level_ctrl(ctx);
+ }
}
mutex_lock(&ctx->bitstream_mutex);
@@ -1538,12 +1622,12 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
goto out;
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- if ((q_data_src->width != q_data_dst->width &&
- round_up(q_data_src->width, 16) != q_data_dst->width) ||
- (q_data_src->height != q_data_dst->height &&
- round_up(q_data_src->height, 16) != q_data_dst->height)) {
+ if ((q_data_src->rect.width != q_data_dst->width &&
+ round_up(q_data_src->rect.width, 16) != q_data_dst->width) ||
+ (q_data_src->rect.height != q_data_dst->height &&
+ round_up(q_data_src->rect.height, 16) != q_data_dst->height)) {
v4l2_err(v4l2_dev, "can't convert %dx%d to %dx%d\n",
- q_data_src->width, q_data_src->height,
+ q_data_src->rect.width, q_data_src->rect.height,
q_data_dst->width, q_data_dst->height);
ret = -EINVAL;
goto err;
@@ -1652,6 +1736,7 @@ static void coda_stop_streaming(struct vb2_queue *q)
ctx->bitstream.vaddr, ctx->bitstream.size);
ctx->runcounter = 0;
ctx->aborting = 0;
+ ctx->hold = false;
}
if (!ctx->streamon_out && !ctx->streamon_cap)
@@ -1880,6 +1965,47 @@ static void coda_jpeg_encode_ctrls(struct coda_ctx *ctx)
V4L2_CID_JPEG_RESTART_INTERVAL, 0, 100, 1, 0);
}
+static void coda_decode_ctrls(struct coda_ctx *ctx)
+{
+ u64 mask;
+ u8 max;
+
+ ctx->h264_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
+ &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
+ if (ctx->h264_profile_ctrl)
+ ctx->h264_profile_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ if (ctx->dev->devtype->product == CODA_HX4 ||
+ ctx->dev->devtype->product == CODA_7541) {
+ max = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+ mask = ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0));
+ } else if (ctx->dev->devtype->product == CODA_960) {
+ max = V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
+ mask = ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1));
+ } else {
+ return;
+ }
+ ctx->h264_level_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
+ &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, max, mask,
+ max);
+ if (ctx->h264_level_ctrl)
+ ctx->h264_level_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+}
+
static int coda_ctrls_setup(struct coda_ctx *ctx)
{
v4l2_ctrl_handler_init(&ctx->ctrls, 2);
@@ -1893,6 +2019,9 @@ static int coda_ctrls_setup(struct coda_ctx *ctx)
coda_jpeg_encode_ctrls(ctx);
else
coda_encode_ctrls(ctx);
+ } else {
+ if (ctx->cvd->src_formats[0] == V4L2_PIX_FMT_H264)
+ coda_decode_ctrls(ctx);
}
if (ctx->ctrls.error) {
@@ -2092,9 +2221,9 @@ static int coda_open(struct file *file)
INIT_LIST_HEAD(&ctx->buffer_meta_list);
spin_lock_init(&ctx->buffer_meta_lock);
- coda_lock(ctx);
+ mutex_lock(&dev->dev_mutex);
list_add(&ctx->list, &dev->instances);
- coda_unlock(ctx);
+ mutex_unlock(&dev->dev_mutex);
v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Created instance %d (%p)\n",
ctx->idx, ctx);
@@ -2142,9 +2271,9 @@ static int coda_release(struct file *file)
flush_work(&ctx->seq_end_work);
}
- coda_lock(ctx);
+ mutex_lock(&dev->dev_mutex);
list_del(&ctx->list);
- coda_unlock(ctx);
+ mutex_unlock(&dev->dev_mutex);
if (ctx->dev->devtype->product == CODA_DX6)
coda_free_aux_buf(dev, &ctx->workbuf);
diff --git a/drivers/media/platform/coda/coda-h264.c b/drivers/media/platform/coda/coda-h264.c
index 0e27412e01f5..635356a839cf 100644
--- a/drivers/media/platform/coda/coda-h264.c
+++ b/drivers/media/platform/coda/coda-h264.c
@@ -108,6 +108,325 @@ int coda_h264_level(int level_idc)
case 32: return V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
case 40: return V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
case 41: return V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
+ case 42: return V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
+ case 50: return V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
+ case 51: return V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
default: return -EINVAL;
}
}
+
+struct rbsp {
+ char *buf;
+ int size;
+ int pos;
+};
+
+static inline int rbsp_read_bit(struct rbsp *rbsp)
+{
+ int shift = 7 - (rbsp->pos % 8);
+ int ofs = rbsp->pos++ / 8;
+
+ if (ofs >= rbsp->size)
+ return -EINVAL;
+
+ return (rbsp->buf[ofs] >> shift) & 1;
+}
+
+static inline int rbsp_write_bit(struct rbsp *rbsp, int bit)
+{
+ int shift = 7 - (rbsp->pos % 8);
+ int ofs = rbsp->pos++ / 8;
+
+ if (ofs >= rbsp->size)
+ return -EINVAL;
+
+ rbsp->buf[ofs] &= ~(1 << shift);
+ rbsp->buf[ofs] |= bit << shift;
+
+ return 0;
+}
+
+static inline int rbsp_read_bits(struct rbsp *rbsp, int num, int *val)
+{
+ int i, ret;
+ int tmp = 0;
+
+ if (num > 32)
+ return -EINVAL;
+
+ for (i = 0; i < num; i++) {
+ ret = rbsp_read_bit(rbsp);
+ if (ret < 0)
+ return ret;
+ tmp |= ret << (num - i - 1);
+ }
+
+ if (val)
+ *val = tmp;
+
+ return 0;
+}
+
+static int rbsp_write_bits(struct rbsp *rbsp, int num, int value)
+{
+ int ret;
+
+ while (num--) {
+ ret = rbsp_write_bit(rbsp, (value >> num) & 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rbsp_read_uev(struct rbsp *rbsp, unsigned int *val)
+{
+ int leading_zero_bits = 0;
+ unsigned int tmp = 0;
+ int ret;
+
+ while ((ret = rbsp_read_bit(rbsp)) == 0)
+ leading_zero_bits++;
+ if (ret < 0)
+ return ret;
+
+ if (leading_zero_bits > 0) {
+ ret = rbsp_read_bits(rbsp, leading_zero_bits, &tmp);
+ if (ret)
+ return ret;
+ }
+
+ if (val)
+ *val = (1 << leading_zero_bits) - 1 + tmp;
+
+ return 0;
+}
+
+static int rbsp_write_uev(struct rbsp *rbsp, unsigned int value)
+{
+ int i;
+ int ret;
+ int tmp = value + 1;
+ int leading_zero_bits = fls(tmp) - 1;
+
+ for (i = 0; i < leading_zero_bits; i++) {
+ ret = rbsp_write_bit(rbsp, 0);
+ if (ret)
+ return ret;
+ }
+
+ return rbsp_write_bits(rbsp, leading_zero_bits + 1, tmp);
+}
+
+static int rbsp_read_sev(struct rbsp *rbsp, int *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = rbsp_read_uev(rbsp, &tmp);
+ if (ret)
+ return ret;
+
+ if (val) {
+ if (tmp & 1)
+ *val = (tmp + 1) / 2;
+ else
+ *val = -(tmp / 2);
+ }
+
+ return 0;
+}
+
+/**
+ * coda_h264_sps_fixup - fixes frame cropping values in h.264 SPS
+ * @ctx: encoder context
+ * @width: visible width
+ * @height: visible height
+ * @buf: buffer containing h.264 SPS RBSP, starting with NAL header
+ * @size: modified RBSP size return value
+ * @max_size: available size in buf
+ *
+ * Rewrites the frame cropping values in an h.264 SPS RBSP correctly for the
+ * given visible width and height.
+ */
+int coda_h264_sps_fixup(struct coda_ctx *ctx, int width, int height, char *buf,
+ int *size, int max_size)
+{
+ int profile_idc;
+ unsigned int pic_order_cnt_type;
+ int pic_width_in_mbs_minus1, pic_height_in_map_units_minus1;
+ int frame_mbs_only_flag, frame_cropping_flag;
+ int vui_parameters_present_flag;
+ unsigned int crop_right, crop_bottom;
+ struct rbsp sps;
+ int pos;
+ int ret;
+
+ if (*size < 8 || *size >= max_size)
+ return -EINVAL;
+
+ sps.buf = buf + 5; /* Skip NAL header */
+ sps.size = *size - 5;
+
+ profile_idc = sps.buf[0];
+ /* Skip constraint_set[0-5]_flag, reserved_zero_2bits */
+ /* Skip level_idc */
+ sps.pos = 24;
+
+ /* seq_parameter_set_id */
+ ret = rbsp_read_uev(&sps, NULL);
+ if (ret)
+ return ret;
+
+ if (profile_idc == 100 || profile_idc == 110 || profile_idc == 122 ||
+ profile_idc == 244 || profile_idc == 44 || profile_idc == 83 ||
+ profile_idc == 86 || profile_idc == 118 || profile_idc == 128 ||
+ profile_idc == 138 || profile_idc == 139 || profile_idc == 134 ||
+ profile_idc == 135) {
+ dev_err(ctx->fh.vdev->dev_parent,
+ "%s: Handling profile_idc %d not implemented\n",
+ __func__, profile_idc);
+ return -EINVAL;
+ }
+
+ /* log2_max_frame_num_minus4 */
+ ret = rbsp_read_uev(&sps, NULL);
+ if (ret)
+ return ret;
+
+ ret = rbsp_read_uev(&sps, &pic_order_cnt_type);
+ if (ret)
+ return ret;
+
+ if (pic_order_cnt_type == 0) {
+ /* log2_max_pic_order_cnt_lsb_minus4 */
+ ret = rbsp_read_uev(&sps, NULL);
+ if (ret)
+ return ret;
+ } else if (pic_order_cnt_type == 1) {
+ unsigned int i, num_ref_frames_in_pic_order_cnt_cycle;
+
+ /* delta_pic_order_always_zero_flag */
+ ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ /* offset_for_non_ref_pic */
+ ret = rbsp_read_sev(&sps, NULL);
+ if (ret)
+ return ret;
+ /* offset_for_top_to_bottom_field */
+ ret = rbsp_read_sev(&sps, NULL);
+ if (ret)
+ return ret;
+
+ ret = rbsp_read_uev(&sps,
+ &num_ref_frames_in_pic_order_cnt_cycle);
+ if (ret)
+ return ret;
+ for (i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++) {
+ /* offset_for_ref_frame */
+ ret = rbsp_read_sev(&sps, NULL);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* max_num_ref_frames */
+ ret = rbsp_read_uev(&sps, NULL);
+ if (ret)
+ return ret;
+
+ /* gaps_in_frame_num_value_allowed_flag */
+ ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ ret = rbsp_read_uev(&sps, &pic_width_in_mbs_minus1);
+ if (ret)
+ return ret;
+ ret = rbsp_read_uev(&sps, &pic_height_in_map_units_minus1);
+ if (ret)
+ return ret;
+ frame_mbs_only_flag = ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ if (!frame_mbs_only_flag) {
+ /* mb_adaptive_frame_field_flag */
+ ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ }
+ /* direct_8x8_inference_flag */
+ ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+
+ /* Mark position of the frame cropping flag */
+ pos = sps.pos;
+ frame_cropping_flag = ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ if (frame_cropping_flag) {
+ unsigned int crop_left, crop_top;
+
+ ret = rbsp_read_uev(&sps, &crop_left);
+ if (ret)
+ return ret;
+ ret = rbsp_read_uev(&sps, &crop_right);
+ if (ret)
+ return ret;
+ ret = rbsp_read_uev(&sps, &crop_top);
+ if (ret)
+ return ret;
+ ret = rbsp_read_uev(&sps, &crop_bottom);
+ if (ret)
+ return ret;
+ }
+ vui_parameters_present_flag = ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ if (vui_parameters_present_flag) {
+ dev_err(ctx->fh.vdev->dev_parent,
+ "%s: Handling vui_parameters not implemented\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ crop_right = round_up(width, 16) - width;
+ crop_bottom = round_up(height, 16) - height;
+ crop_right /= 2;
+ if (frame_mbs_only_flag)
+ crop_bottom /= 2;
+ else
+ crop_bottom /= 4;
+
+
+ sps.size = max_size - 5;
+ sps.pos = pos;
+ frame_cropping_flag = 1;
+ ret = rbsp_write_bit(&sps, frame_cropping_flag);
+ if (ret)
+ return ret;
+ ret = rbsp_write_uev(&sps, 0); /* crop_left */
+ if (ret)
+ return ret;
+ ret = rbsp_write_uev(&sps, crop_right);
+ if (ret)
+ return ret;
+ ret = rbsp_write_uev(&sps, 0); /* crop_top */
+ if (ret)
+ return ret;
+ ret = rbsp_write_uev(&sps, crop_bottom);
+ if (ret)
+ return ret;
+ ret = rbsp_write_bit(&sps, 0); /* vui_parameters_present_flag */
+ if (ret)
+ return ret;
+ ret = rbsp_write_bit(&sps, 1);
+ if (ret)
+ return ret;
+
+ *size = 5 + DIV_ROUND_UP(sps.pos, 8);
+
+ return 0;
+}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index c70cfab2433f..19ac0b9dc6eb 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -214,6 +214,8 @@ struct coda_ctx {
enum v4l2_quantization quantization;
struct coda_params params;
struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *h264_profile_ctrl;
+ struct v4l2_ctrl *h264_level_ctrl;
struct v4l2_fh fh;
int gopcounter;
int runcounter;
@@ -303,6 +305,8 @@ int coda_h264_padding(int size, char *p);
int coda_h264_profile(int profile_idc);
int coda_h264_level(int level_idc);
int coda_sps_parse_profile(struct coda_ctx *ctx, struct vb2_buffer *vb);
+int coda_h264_sps_fixup(struct coda_ctx *ctx, int width, int height, char *buf,
+ int *size, int max_size);
bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb);
int coda_jpeg_write_tables(struct coda_ctx *ctx);
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index 3b650b8aabe9..5e7b00a97671 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -157,6 +157,7 @@
#define CODA_CMD_DEC_SEQ_START_BYTE 0x190
#define CODA_CMD_DEC_SEQ_PS_BB_START 0x194
#define CODA_CMD_DEC_SEQ_PS_BB_SIZE 0x198
+#define CODA_CMD_DEC_SEQ_JPG_THUMB_EN 0x19c
#define CODA_CMD_DEC_SEQ_MP4_ASP_CLASS 0x19c
#define CODA_MP4_CLASS_MPEG4 0
#define CODA_CMD_DEC_SEQ_X264_MV_EN 0x19c
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
index 85a66e4e2f9a..96ab4b61669a 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -18,6 +18,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
diff --git a/drivers/media/platform/cros-ec-cec/Makefile b/drivers/media/platform/cros-ec-cec/Makefile
new file mode 100644
index 000000000000..9ce97f93febe
--- /dev/null
+++ b/drivers/media/platform/cros-ec-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_CROS_EC_CEC) += cros-ec-cec.o
diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
new file mode 100644
index 000000000000..7bc4d8a9af28
--- /dev/null
+++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CEC driver for ChromeOS Embedded Controller
+ *
+ * Copyright (c) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/pci.h>
+#include <linux/cec.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+
+#define DRV_NAME "cros-ec-cec"
+
+/**
+ * struct cros_ec_cec - Driver data for EC CEC
+ *
+ * @cros_ec: Pointer to EC device
+ * @notifier: Notifier info for responding to EC events
+ * @adap: CEC adapter
+ * @notify: CEC notifier pointer
+ * @rx_msg: storage for a received message
+ */
+struct cros_ec_cec {
+ struct cros_ec_device *cros_ec;
+ struct notifier_block notifier;
+ struct cec_adapter *adap;
+ struct cec_notifier *notify;
+ struct cec_msg rx_msg;
+};
+
+static void handle_cec_message(struct cros_ec_cec *cros_ec_cec)
+{
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ uint8_t *cec_message = cros_ec->event_data.data.cec_message;
+ unsigned int len = cros_ec->event_size;
+
+ cros_ec_cec->rx_msg.len = len;
+ memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);
+
+ cec_received_msg(cros_ec_cec->adap, &cros_ec_cec->rx_msg);
+}
+
+static void handle_cec_event(struct cros_ec_cec *cros_ec_cec)
+{
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ uint32_t events = cros_ec->event_data.data.cec_events;
+
+ if (events & EC_MKBP_CEC_SEND_OK)
+ cec_transmit_attempt_done(cros_ec_cec->adap,
+ CEC_TX_STATUS_OK);
+
+ /* FW takes care of all retries, tell core to avoid more retries */
+ if (events & EC_MKBP_CEC_SEND_FAILED)
+ cec_transmit_attempt_done(cros_ec_cec->adap,
+ CEC_TX_STATUS_MAX_RETRIES |
+ CEC_TX_STATUS_NACK);
+}
+
+static int cros_ec_cec_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_cec *cros_ec_cec;
+ struct cros_ec_device *cros_ec;
+
+ cros_ec_cec = container_of(nb, struct cros_ec_cec, notifier);
+ cros_ec = cros_ec_cec->cros_ec;
+
+ if (cros_ec->event_data.event_type == EC_MKBP_EVENT_CEC_EVENT) {
+ handle_cec_event(cros_ec_cec);
+ return NOTIFY_OK;
+ }
+
+ if (cros_ec->event_data.event_type == EC_MKBP_EVENT_CEC_MESSAGE) {
+ handle_cec_message(cros_ec_cec);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int cros_ec_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct cros_ec_cec *cros_ec_cec = adap->priv;
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_cec_set data;
+ } __packed msg = {};
+ int ret;
+
+ msg.msg.command = EC_CMD_CEC_SET;
+ msg.msg.outsize = sizeof(msg.data);
+ msg.data.cmd = CEC_CMD_LOGICAL_ADDRESS;
+ msg.data.val = logical_addr;
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ if (ret < 0) {
+ dev_err(cros_ec->dev,
+ "error setting CEC logical address on EC: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_ec_cec_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *cec_msg)
+{
+ struct cros_ec_cec *cros_ec_cec = adap->priv;
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_cec_write data;
+ } __packed msg = {};
+ int ret;
+
+ msg.msg.command = EC_CMD_CEC_WRITE_MSG;
+ msg.msg.outsize = cec_msg->len;
+ memcpy(msg.data.msg, cec_msg->msg, cec_msg->len);
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ if (ret < 0) {
+ dev_err(cros_ec->dev,
+ "error writing CEC msg on EC: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_ec_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct cros_ec_cec *cros_ec_cec = adap->priv;
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_cec_set data;
+ } __packed msg = {};
+ int ret;
+
+ msg.msg.command = EC_CMD_CEC_SET;
+ msg.msg.outsize = sizeof(msg.data);
+ msg.data.cmd = CEC_CMD_ENABLE;
+ msg.data.val = enable;
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ if (ret < 0) {
+ dev_err(cros_ec->dev,
+ "error %sabling CEC on EC: %d\n",
+ (enable ? "en" : "dis"), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct cec_adap_ops cros_ec_cec_ops = {
+ .adap_enable = cros_ec_cec_adap_enable,
+ .adap_log_addr = cros_ec_cec_set_log_addr,
+ .adap_transmit = cros_ec_cec_transmit,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int cros_ec_cec_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct cros_ec_cec *cros_ec_cec = dev_get_drvdata(&pdev->dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(cros_ec_cec->cros_ec->irq);
+
+ return 0;
+}
+
+static int cros_ec_cec_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct cros_ec_cec *cros_ec_cec = dev_get_drvdata(&pdev->dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(cros_ec_cec->cros_ec->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_ec_cec_pm_ops,
+ cros_ec_cec_suspend, cros_ec_cec_resume);
+
+#if IS_ENABLED(CONFIG_PCI) && IS_ENABLED(CONFIG_DMI)
+
+/*
+ * The Firmware only handles a single CEC interface tied to a single HDMI
+ * connector we specify along with the DRM device name handling the HDMI output
+ */
+
+struct cec_dmi_match {
+ char *sys_vendor;
+ char *product_name;
+ char *devname;
+ char *conn;
+};
+
+static const struct cec_dmi_match cec_dmi_match_table[] = {
+ /* Google Fizz */
+ { "Google", "Fizz", "0000:00:02.0", "Port B" },
+};
+
+static int cros_ec_cec_get_notifier(struct device *dev,
+ struct cec_notifier **notify)
+{
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(cec_dmi_match_table) ; ++i) {
+ const struct cec_dmi_match *m = &cec_dmi_match_table[i];
+
+ if (dmi_match(DMI_SYS_VENDOR, m->sys_vendor) &&
+ dmi_match(DMI_PRODUCT_NAME, m->product_name)) {
+ struct device *d;
+
+ /* Find the device, bail out if not yet registered */
+ d = bus_find_device_by_name(&pci_bus_type, NULL,
+ m->devname);
+ if (!d)
+ return -EPROBE_DEFER;
+
+ *notify = cec_notifier_get_conn(d, m->conn);
+ return 0;
+ }
+ }
+
+ /* Hardware support must be added in the cec_dmi_match_table */
+ dev_warn(dev, "CEC notifier not configured for this hardware\n");
+
+ return -ENODEV;
+}
+
+#else
+
+static int cros_ec_cec_get_notifier(struct device *dev,
+ struct cec_notifier **notify)
+{
+ return -ENODEV;
+}
+
+#endif
+
+static int cros_ec_cec_probe(struct platform_device *pdev)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_device *cros_ec = ec_dev->ec_dev;
+ struct cros_ec_cec *cros_ec_cec;
+ int ret;
+
+ cros_ec_cec = devm_kzalloc(&pdev->dev, sizeof(*cros_ec_cec),
+ GFP_KERNEL);
+ if (!cros_ec_cec)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cros_ec_cec);
+ cros_ec_cec->cros_ec = cros_ec;
+
+ ret = cros_ec_cec_get_notifier(&pdev->dev, &cros_ec_cec->notify);
+ if (ret)
+ return ret;
+
+ ret = device_init_wakeup(&pdev->dev, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize wakeup\n");
+ return ret;
+ }
+
+ cros_ec_cec->adap = cec_allocate_adapter(&cros_ec_cec_ops, cros_ec_cec,
+ DRV_NAME, CEC_CAP_DEFAULTS, 1);
+ if (IS_ERR(cros_ec_cec->adap))
+ return PTR_ERR(cros_ec_cec->adap);
+
+ /* Get CEC events from the EC. */
+ cros_ec_cec->notifier.notifier_call = cros_ec_cec_event;
+ ret = blocking_notifier_chain_register(&cros_ec->event_notifier,
+ &cros_ec_cec->notifier);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register notifier\n");
+ cec_delete_adapter(cros_ec_cec->adap);
+ return ret;
+ }
+
+ ret = cec_register_adapter(cros_ec_cec->adap, &pdev->dev);
+ if (ret < 0) {
+ cec_delete_adapter(cros_ec_cec->adap);
+ return ret;
+ }
+
+ cec_register_cec_notifier(cros_ec_cec->adap, cros_ec_cec->notify);
+
+ return 0;
+}
+
+static int cros_ec_cec_remove(struct platform_device *pdev)
+{
+ struct cros_ec_cec *cros_ec_cec = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = blocking_notifier_chain_unregister(
+ &cros_ec_cec->cros_ec->event_notifier,
+ &cros_ec_cec->notifier);
+
+ if (ret) {
+ dev_err(dev, "failed to unregister notifier\n");
+ return ret;
+ }
+
+ cec_unregister_adapter(cros_ec_cec->adap);
+
+ if (cros_ec_cec->notify)
+ cec_notifier_put(cros_ec_cec->notify);
+
+ return 0;
+}
+
+static struct platform_driver cros_ec_cec_driver = {
+ .probe = cros_ec_cec_probe,
+ .remove = cros_ec_cec_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &cros_ec_cec_pm_ops,
+ },
+};
+
+module_platform_driver(cros_ec_cec_driver);
+
+MODULE_DESCRIPTION("CEC driver for ChromeOS ECs");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index 7f610320426d..c551a25d90d9 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -18,6 +18,7 @@
*
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index ba157827192c..ddcad7b3e76c 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -11,6 +11,7 @@
* GNU General Public License for more details.
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ctype.h>
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 7be636237acf..0f324055cc9f 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1114,6 +1114,14 @@ vpif_init_free_channel_objects:
return err;
}
+static void free_vpif_objs(void)
+{
+ int i;
+
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++)
+ kfree(vpif_obj.dev[i]);
+}
+
static int vpif_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
@@ -1255,11 +1263,6 @@ static __init int vpif_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!pdev->dev.platform_data) {
- dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
- return -EINVAL;
- }
-
vpif_dev = &pdev->dev;
err = initialize_vpif();
@@ -1271,7 +1274,7 @@ static __init int vpif_probe(struct platform_device *pdev)
err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
if (err) {
v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
- return err;
+ goto vpif_free;
}
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
@@ -1314,7 +1317,10 @@ static __init int vpif_probe(struct platform_device *pdev)
if (vpif_obj.sd[i])
vpif_obj.sd[i]->grp_id = 1 << i;
}
- vpif_probe_complete();
+ err = vpif_probe_complete();
+ if (err) {
+ goto probe_subdev_out;
+ }
} else {
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
@@ -1334,6 +1340,8 @@ probe_subdev_out:
kfree(vpif_obj.sd);
vpif_unregister:
v4l2_device_unregister(&vpif_obj.v4l2_dev);
+vpif_free:
+ free_vpif_objs();
return err;
}
@@ -1355,8 +1363,8 @@ static int vpif_remove(struct platform_device *device)
ch = vpif_obj.dev[i];
/* Unregister video device */
video_unregister_device(&ch->video_dev);
- kfree(vpif_obj.dev[i]);
}
+ free_vpif_objs();
return 0;
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index e9ff27949a91..c9d2f6c5311a 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -713,7 +713,7 @@ static __poll_t gsc_m2m_poll(struct file *file,
__poll_t ret;
if (mutex_lock_interruptible(&gsc->lock))
- return -ERESTARTSYS;
+ return EPOLLERR;
ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
mutex_unlock(&gsc->lock);
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 55ba696b8cf4..a920164f53f1 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -384,12 +384,17 @@ static void __isp_video_try_fmt(struct fimc_isp *isp,
struct v4l2_pix_format_mplane *pixm,
const struct fimc_fmt **fmt)
{
- *fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+ const struct fimc_fmt *__fmt;
+
+ __fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+
+ if (fmt)
+ *fmt = __fmt;
pixm->colorspace = V4L2_COLORSPACE_SRGB;
pixm->field = V4L2_FIELD_NONE;
- pixm->num_planes = (*fmt)->memplanes;
- pixm->pixelformat = (*fmt)->fourcc;
+ pixm->num_planes = __fmt->memplanes;
+ pixm->pixelformat = __fmt->fourcc;
/*
* TODO: double check with the docmentation these width/height
* constraints are correct.
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 78b48a1fa26c..deb499f76412 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1201,8 +1201,7 @@ static const struct media_device_ops fimc_md_ops = {
static ssize_t fimc_md_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct fimc_md *fmd = platform_get_drvdata(pdev);
+ struct fimc_md *fmd = dev_get_drvdata(dev);
if (fmd->user_subdev_api)
return strlcpy(buf, "Sub-device API (sub-dev)\n", PAGE_SIZE);
@@ -1214,8 +1213,7 @@ static ssize_t fimc_md_sysfs_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct fimc_md *fmd = platform_get_drvdata(pdev);
+ struct fimc_md *fmd = dev_get_drvdata(dev);
bool subdev_api;
int i;
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index cba46a656338..b4e28a299e26 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -891,8 +891,7 @@ e_clkput:
static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csis_state *state = sd_to_csis_state(sd);
int ret = 0;
@@ -921,8 +920,7 @@ static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
static int s5pcsis_pm_resume(struct device *dev, bool runtime)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csis_state *state = sd_to_csis_state(sd);
int ret = 0;
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index e41510ce69a4..0273302aa741 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1414,7 +1414,7 @@ static int viu_of_probe(struct platform_device *op)
sizeof(struct viu_reg), DRV_NAME)) {
dev_err(&op->dev, "Error while requesting mem region\n");
ret = -EBUSY;
- goto err;
+ goto err_irq;
}
/* remap registers */
@@ -1422,7 +1422,7 @@ static int viu_of_probe(struct platform_device *op)
if (!viu_regs) {
dev_err(&op->dev, "Can't map register set\n");
ret = -ENOMEM;
- goto err;
+ goto err_irq;
}
/* Prepare our private structure */
@@ -1430,7 +1430,7 @@ static int viu_of_probe(struct platform_device *op)
if (!viu_dev) {
dev_err(&op->dev, "Can't allocate private structure\n");
ret = -ENOMEM;
- goto err;
+ goto err_irq;
}
viu_dev->vr = viu_regs;
@@ -1446,16 +1446,21 @@ static int viu_of_probe(struct platform_device *op)
ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
if (ret < 0) {
dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
- goto err;
+ goto err_irq;
}
ad = i2c_get_adapter(0);
+ if (!ad) {
+ ret = -EFAULT;
+ dev_err(&op->dev, "couldn't get i2c adapter\n");
+ goto err_v4l2;
+ }
v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
if (viu_dev->hdl.error) {
ret = viu_dev->hdl.error;
dev_err(&op->dev, "couldn't register control\n");
- goto err_vdev;
+ goto err_i2c;
}
/* This control handler will inherit the control(s) from the
sub-device(s). */
@@ -1471,7 +1476,7 @@ static int viu_of_probe(struct platform_device *op)
vdev = video_device_alloc();
if (vdev == NULL) {
ret = -ENOMEM;
- goto err_vdev;
+ goto err_hdl;
}
*vdev = viu_template;
@@ -1492,7 +1497,7 @@ static int viu_of_probe(struct platform_device *op)
ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
video_device_release(viu_dev->vdev);
- goto err_vdev;
+ goto err_unlock;
}
/* enable VIU clock */
@@ -1500,12 +1505,12 @@ static int viu_of_probe(struct platform_device *op)
if (IS_ERR(clk)) {
dev_err(&op->dev, "failed to lookup the clock!\n");
ret = PTR_ERR(clk);
- goto err_clk;
+ goto err_vdev;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(&op->dev, "failed to enable the clock!\n");
- goto err_clk;
+ goto err_vdev;
}
viu_dev->clk = clk;
@@ -1516,7 +1521,7 @@ static int viu_of_probe(struct platform_device *op)
if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
dev_err(&op->dev, "Request VIU IRQ failed.\n");
ret = -ENODEV;
- goto err_irq;
+ goto err_clk;
}
mutex_unlock(&viu_dev->lock);
@@ -1524,16 +1529,19 @@ static int viu_of_probe(struct platform_device *op)
dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
return ret;
-err_irq:
- clk_disable_unprepare(viu_dev->clk);
err_clk:
- video_unregister_device(viu_dev->vdev);
+ clk_disable_unprepare(viu_dev->clk);
err_vdev:
- v4l2_ctrl_handler_free(&viu_dev->hdl);
+ video_unregister_device(viu_dev->vdev);
+err_unlock:
mutex_unlock(&viu_dev->lock);
+err_hdl:
+ v4l2_ctrl_handler_free(&viu_dev->hdl);
+err_i2c:
i2c_put_adapter(ad);
+err_v4l2:
v4l2_device_unregister(&viu_dev->v4l2_dev);
-err:
+err_irq:
irq_dispose_mapping(viu_irq);
return ret;
}
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 1e4195144f39..5f84d2aa47ab 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -181,20 +181,6 @@ static void deinterlace_job_abort(void *priv)
v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx);
}
-static void deinterlace_lock(void *priv)
-{
- struct deinterlace_ctx *ctx = priv;
- struct deinterlace_dev *pcdev = ctx->dev;
- mutex_lock(&pcdev->dev_mutex);
-}
-
-static void deinterlace_unlock(void *priv)
-{
- struct deinterlace_ctx *ctx = priv;
- struct deinterlace_dev *pcdev = ctx->dev;
- mutex_unlock(&pcdev->dev_mutex);
-}
-
static void dma_callback(void *data)
{
struct deinterlace_ctx *curr_ctx = data;
@@ -856,6 +842,8 @@ static const struct vb2_ops deinterlace_qops = {
.queue_setup = deinterlace_queue_setup,
.buf_prepare = deinterlace_buf_prepare,
.buf_queue = deinterlace_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
@@ -872,6 +860,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->dev = ctx->dev->v4l2_dev.dev;
+ src_vq->lock = &ctx->dev->dev_mutex;
q_data[V4L2_M2M_SRC].fmt = &formats[0];
q_data[V4L2_M2M_SRC].width = 640;
q_data[V4L2_M2M_SRC].height = 480;
@@ -890,6 +879,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->dev = ctx->dev->v4l2_dev.dev;
+ dst_vq->lock = &ctx->dev->dev_mutex;
q_data[V4L2_M2M_DST].fmt = &formats[0];
q_data[V4L2_M2M_DST].width = 640;
q_data[V4L2_M2M_DST].height = 480;
@@ -956,9 +946,9 @@ static __poll_t deinterlace_poll(struct file *file,
struct deinterlace_ctx *ctx = file->private_data;
__poll_t ret;
- deinterlace_lock(ctx);
+ mutex_lock(&ctx->dev->dev_mutex);
ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
- deinterlace_unlock(ctx);
+ mutex_unlock(&ctx->dev->dev_mutex);
return ret;
}
@@ -992,8 +982,6 @@ static const struct v4l2_m2m_ops m2m_ops = {
.device_run = deinterlace_device_run,
.job_ready = deinterlace_job_ready,
.job_abort = deinterlace_job_abort,
- .lock = deinterlace_lock,
- .unlock = deinterlace_unlock,
};
static int deinterlace_probe(struct platform_device *pdev)
@@ -1040,7 +1028,6 @@ static int deinterlace_probe(struct platform_device *pdev)
}
video_set_drvdata(vfd, pcdev);
- snprintf(vfd->name, sizeof(vfd->name), "%s", deinterlace_videodev.name);
v4l2_info(&pcdev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME
" Device registered as /dev/video%d\n", vfd->num);
diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c
index 8040a6285c3f..cd4be38ab5ac 100644
--- a/drivers/media/platform/meson/ao-cec.c
+++ b/drivers/media/platform/meson/ao-cec.c
@@ -524,7 +524,7 @@ static int meson_ao_cec_transmit(struct cec_adapter *adap, u8 attempts,
return ret;
if (reg == TX_BUSY) {
- dev_err(&ao_cec->pdev->dev, "%s: busy TX: aborting\n",
+ dev_dbg(&ao_cec->pdev->dev, "%s: busy TX: aborting\n",
__func__);
meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_ABORT, &ret);
}
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index 328e8f650d9b..4f24da8afecc 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -861,14 +861,9 @@ static int mtk_jpeg_job_ready(void *priv)
return (ctx->state == MTK_JPEG_RUNNING) ? 1 : 0;
}
-static void mtk_jpeg_job_abort(void *priv)
-{
-}
-
static const struct v4l2_m2m_ops mtk_jpeg_m2m_ops = {
.device_run = mtk_jpeg_device_run,
.job_ready = mtk_jpeg_job_ready,
- .job_abort = mtk_jpeg_job_abort,
};
static int mtk_jpeg_queue_init(void *priv, struct vb2_queue *src_vq,
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
index 583d47724ee8..ceffc31cc6eb 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -394,20 +394,6 @@ static bool mtk_mdp_ctx_state_is_set(struct mtk_mdp_ctx *ctx, u32 mask)
return ret;
}
-static void mtk_mdp_ctx_lock(struct vb2_queue *vq)
-{
- struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
-
- mutex_lock(&ctx->mdp_dev->lock);
-}
-
-static void mtk_mdp_ctx_unlock(struct vb2_queue *vq)
-{
- struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
-
- mutex_unlock(&ctx->mdp_dev->lock);
-}
-
static void mtk_mdp_set_frame_size(struct mtk_mdp_frame *frame, int width,
int height)
{
@@ -455,10 +441,6 @@ static void mtk_mdp_m2m_stop_streaming(struct vb2_queue *q)
pm_runtime_put(&ctx->mdp_dev->pdev->dev);
}
-static void mtk_mdp_m2m_job_abort(void *priv)
-{
-}
-
/* The color format (num_planes) must be already configured. */
static void mtk_mdp_prepare_addr(struct mtk_mdp_ctx *ctx,
struct vb2_buffer *vb,
@@ -625,10 +607,10 @@ static const struct vb2_ops mtk_mdp_m2m_qops = {
.queue_setup = mtk_mdp_m2m_queue_setup,
.buf_prepare = mtk_mdp_m2m_buf_prepare,
.buf_queue = mtk_mdp_m2m_buf_queue,
- .wait_prepare = mtk_mdp_ctx_unlock,
- .wait_finish = mtk_mdp_ctx_lock,
.stop_streaming = mtk_mdp_m2m_stop_streaming,
.start_streaming = mtk_mdp_m2m_start_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int mtk_mdp_m2m_querycap(struct file *file, void *fh,
@@ -996,6 +978,7 @@ static int mtk_mdp_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->dev = &ctx->mdp_dev->pdev->dev;
+ src_vq->lock = &ctx->mdp_dev->lock;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -1010,6 +993,7 @@ static int mtk_mdp_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->dev = &ctx->mdp_dev->pdev->dev;
+ dst_vq->lock = &ctx->mdp_dev->lock;
return vb2_queue_init(dst_vq);
}
@@ -1227,7 +1211,6 @@ static const struct v4l2_file_operations mtk_mdp_m2m_fops = {
static const struct v4l2_m2m_ops mtk_mdp_m2m_ops = {
.device_run = mtk_mdp_m2m_device_run,
- .job_abort = mtk_mdp_m2m_job_abort,
};
int mtk_mdp_register_m2m_device(struct mtk_mdp_dev *mdp)
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 86f0a7134365..0c8a8b4c4e1c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -1400,6 +1400,11 @@ int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
0, 32, 1, 1);
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_hdl,
+ &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ 0, V4L2_MPEG_VIDEO_VP9_PROFILE_0);
if (ctx->ctrl_hdl.error) {
mtk_v4l2_err("Adding control failed %d",
@@ -1411,28 +1416,10 @@ int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
return 0;
}
-static void m2mops_vdec_lock(void *m2m_priv)
-{
- struct mtk_vcodec_ctx *ctx = m2m_priv;
-
- mtk_v4l2_debug(3, "[%d]", ctx->id);
- mutex_lock(&ctx->dev->dev_mutex);
-}
-
-static void m2mops_vdec_unlock(void *m2m_priv)
-{
- struct mtk_vcodec_ctx *ctx = m2m_priv;
-
- mtk_v4l2_debug(3, "[%d]", ctx->id);
- mutex_unlock(&ctx->dev->dev_mutex);
-}
-
const struct v4l2_m2m_ops mtk_vdec_m2m_ops = {
.device_run = m2mops_vdec_device_run,
.job_ready = m2mops_vdec_job_ready,
.job_abort = m2mops_vdec_job_abort,
- .lock = m2mops_vdec_lock,
- .unlock = m2mops_vdec_unlock,
};
static const struct vb2_ops mtk_vdec_vb2_ops = {
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 1b1a28abbf1f..6ad408514a99 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -1181,26 +1181,10 @@ static void m2mops_venc_job_abort(void *priv)
ctx->state = MTK_STATE_ABORT;
}
-static void m2mops_venc_lock(void *m2m_priv)
-{
- struct mtk_vcodec_ctx *ctx = m2m_priv;
-
- mutex_lock(&ctx->dev->dev_mutex);
-}
-
-static void m2mops_venc_unlock(void *m2m_priv)
-{
- struct mtk_vcodec_ctx *ctx = m2m_priv;
-
- mutex_unlock(&ctx->dev->dev_mutex);
-}
-
const struct v4l2_m2m_ops mtk_venc_m2m_ops = {
.device_run = m2mops_venc_device_run,
.job_ready = m2mops_venc_job_ready,
.job_abort = m2mops_venc_job_abort,
- .lock = m2mops_venc_lock,
- .unlock = m2mops_venc_unlock,
};
void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx)
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index 1ff6a93262b7..f8d35e3ac1dc 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -960,4 +960,4 @@ static struct platform_driver mtk_vpu_driver = {
module_platform_driver(mtk_vpu_driver);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Mediatek Video Prosessor Unit driver");
+MODULE_DESCRIPTION("Mediatek Video Processor Unit driver");
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 5a8eff60e95f..64195c4ddeaf 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -250,20 +250,6 @@ static void emmaprp_job_abort(void *priv)
v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx);
}
-static void emmaprp_lock(void *priv)
-{
- struct emmaprp_ctx *ctx = priv;
- struct emmaprp_dev *pcdev = ctx->dev;
- mutex_lock(&pcdev->dev_mutex);
-}
-
-static void emmaprp_unlock(void *priv)
-{
- struct emmaprp_ctx *ctx = priv;
- struct emmaprp_dev *pcdev = ctx->dev;
- mutex_unlock(&pcdev->dev_mutex);
-}
-
static inline void emmaprp_dump_regs(struct emmaprp_dev *pcdev)
{
dprintk(pcdev,
@@ -747,6 +733,8 @@ static const struct vb2_ops emmaprp_qops = {
.queue_setup = emmaprp_queue_setup,
.buf_prepare = emmaprp_buf_prepare,
.buf_queue = emmaprp_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
@@ -763,6 +751,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->dev = ctx->dev->v4l2_dev.dev;
+ src_vq->lock = &ctx->dev->dev_mutex;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -776,6 +765,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->dev = ctx->dev->v4l2_dev.dev;
+ dst_vq->lock = &ctx->dev->dev_mutex;
return vb2_queue_init(dst_vq);
}
@@ -885,8 +875,6 @@ static const struct video_device emmaprp_videodev = {
static const struct v4l2_m2m_ops m2m_ops = {
.device_run = emmaprp_device_run,
.job_abort = emmaprp_job_abort,
- .lock = emmaprp_lock,
- .unlock = emmaprp_unlock,
};
static int emmaprp_probe(struct platform_device *pdev)
@@ -934,7 +922,6 @@ static int emmaprp_probe(struct platform_device *pdev)
vfd->v4l2_dev = &pcdev->v4l2_dev;
video_set_drvdata(vfd, pcdev);
- snprintf(vfd->name, sizeof(vfd->name), "%s", emmaprp_videodev.name);
pcdev->vfd = vfd;
v4l2_info(&pcdev->v4l2_dev, EMMAPRP_MODULE_NAME
" Device registered as /dev/video%d\n", vfd->num);
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index d827b6c285a6..4b5e55d41ad4 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -8,6 +8,7 @@ config VIDEO_OMAP2_VOUT
depends on MMU
depends on FB_OMAP2 || (COMPILE_TEST && FB_OMAP2=n)
depends on ARCH_OMAP2 || ARCH_OMAP3 || COMPILE_TEST
+ depends on VIDEO_V4L2
select VIDEOBUF_GEN
select VIDEOBUF_DMA_CONTIG
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index f22cf351e3ee..842e2235047d 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -300,7 +300,7 @@ static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data)
static int isp_xclk_init(struct isp_device *isp)
{
struct device_node *np = isp->dev->of_node;
- struct clk_init_data init;
+ struct clk_init_data init = {};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
@@ -971,7 +971,7 @@ static void isp_resume_module_pipeline(struct media_entity *me)
* Returns 0 if suspend left in idle state all the submodules properly,
* or returns 1 if a general Reset is required to suspend the submodules.
*/
-static int isp_suspend_modules(struct isp_device *isp)
+static int __maybe_unused isp_suspend_modules(struct isp_device *isp)
{
unsigned long timeout;
@@ -1005,7 +1005,7 @@ static int isp_suspend_modules(struct isp_device *isp)
* isp_resume_modules - Resume ISP submodules.
* @isp: OMAP3 ISP device
*/
-static void isp_resume_modules(struct isp_device *isp)
+static void __maybe_unused isp_resume_modules(struct isp_device *isp)
{
omap3isp_stat_resume(&isp->isp_aewb);
omap3isp_stat_resume(&isp->isp_af);
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index d85ffbfb7c1f..b6e9e93bde7a 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2375,8 +2375,6 @@ static int pxa_camera_probe(struct platform_device *pdev)
.src_maxburst = 8,
.direction = DMA_DEV_TO_MEM,
};
- dma_cap_mask_t mask;
- struct pxad_param params;
char clk_name[V4L2_CLK_NAME_SIZE];
int irq;
int err = 0, i;
@@ -2450,34 +2448,20 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->base = base;
/* request dma */
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_cap_set(DMA_PRIVATE, mask);
-
- params.prio = 0;
- params.drcmr = 68;
- pcdev->dma_chans[0] =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &params, &pdev->dev, "CI_Y");
+ pcdev->dma_chans[0] = dma_request_slave_channel(&pdev->dev, "CI_Y");
if (!pcdev->dma_chans[0]) {
dev_err(&pdev->dev, "Can't request DMA for Y\n");
return -ENODEV;
}
- params.drcmr = 69;
- pcdev->dma_chans[1] =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &params, &pdev->dev, "CI_U");
+ pcdev->dma_chans[1] = dma_request_slave_channel(&pdev->dev, "CI_U");
if (!pcdev->dma_chans[1]) {
dev_err(&pdev->dev, "Can't request DMA for Y\n");
err = -ENODEV;
goto exit_free_dma_y;
}
- params.drcmr = 70;
- pcdev->dma_chans[2] =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &params, &pdev->dev, "CI_V");
+ pcdev->dma_chans[2] = dma_request_slave_channel(&pdev->dev, "CI_V");
if (!pcdev->dma_chans[2]) {
dev_err(&pdev->dev, "Can't request DMA for V\n");
err = -ENODEV;
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.h b/drivers/media/platform/qcom/camss-8x16/camss-vfe.h
deleted file mode 100644
index 53d5b66a9dfb..000000000000
--- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * camss-vfe.h
- *
- * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module
- *
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef QC_MSM_CAMSS_VFE_H
-#define QC_MSM_CAMSS_VFE_H
-
-#include <linux/clk.h>
-#include <linux/spinlock_types.h>
-#include <media/media-entity.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-subdev.h>
-
-#include "camss-video.h"
-
-#define MSM_VFE_PAD_SINK 0
-#define MSM_VFE_PAD_SRC 1
-#define MSM_VFE_PADS_NUM 2
-
-#define MSM_VFE_LINE_NUM 4
-#define MSM_VFE_IMAGE_MASTERS_NUM 7
-#define MSM_VFE_COMPOSITE_IRQ_NUM 4
-
-#define MSM_VFE_VFE0_UB_SIZE 1023
-#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
-#define MSM_VFE_VFE1_UB_SIZE 1535
-#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
-
-enum vfe_output_state {
- VFE_OUTPUT_OFF,
- VFE_OUTPUT_RESERVED,
- VFE_OUTPUT_SINGLE,
- VFE_OUTPUT_CONTINUOUS,
- VFE_OUTPUT_IDLE,
- VFE_OUTPUT_STOPPING
-};
-
-enum vfe_line_id {
- VFE_LINE_NONE = -1,
- VFE_LINE_RDI0 = 0,
- VFE_LINE_RDI1 = 1,
- VFE_LINE_RDI2 = 2,
- VFE_LINE_PIX = 3
-};
-
-struct vfe_output {
- u8 wm_num;
- u8 wm_idx[3];
-
- int active_buf;
- struct camss_buffer *buf[2];
- struct camss_buffer *last_buffer;
- struct list_head pending_bufs;
-
- unsigned int drop_update_idx;
-
- enum vfe_output_state state;
- unsigned int sequence;
- int wait_sof;
- int wait_reg_update;
- struct completion sof;
- struct completion reg_update;
-};
-
-struct vfe_line {
- enum vfe_line_id id;
- struct v4l2_subdev subdev;
- struct media_pad pads[MSM_VFE_PADS_NUM];
- struct v4l2_mbus_framefmt fmt[MSM_VFE_PADS_NUM];
- struct v4l2_rect compose;
- struct v4l2_rect crop;
- struct camss_video video_out;
- struct vfe_output output;
-};
-
-struct vfe_device {
- u8 id;
- void __iomem *base;
- u32 irq;
- char irq_name[30];
- struct camss_clock *clock;
- int nclocks;
- struct completion reset_complete;
- struct completion halt_complete;
- struct mutex power_lock;
- int power_count;
- struct mutex stream_lock;
- int stream_count;
- spinlock_t output_lock;
- enum vfe_line_id wm_output_map[MSM_VFE_IMAGE_MASTERS_NUM];
- struct vfe_line line[MSM_VFE_LINE_NUM];
- u32 reg_update;
- u8 was_streaming;
-};
-
-struct resources;
-
-int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res);
-
-int msm_vfe_register_entities(struct vfe_device *vfe,
- struct v4l2_device *v4l2_dev);
-
-void msm_vfe_unregister_entities(struct vfe_device *vfe);
-
-void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id);
-void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id);
-
-void msm_vfe_stop_streaming(struct vfe_device *vfe);
-
-#endif /* QC_MSM_CAMSS_VFE_H */
diff --git a/drivers/media/platform/qcom/camss-8x16/Makefile b/drivers/media/platform/qcom/camss/Makefile
index 3c4024fbb768..f5e6e255f2a1 100644
--- a/drivers/media/platform/qcom/camss-8x16/Makefile
+++ b/drivers/media/platform/qcom/camss/Makefile
@@ -3,8 +3,12 @@
qcom-camss-objs += \
camss.o \
camss-csid.o \
+ camss-csiphy-2ph-1-0.o \
+ camss-csiphy-3ph-1-0.o \
camss-csiphy.o \
camss-ispif.o \
+ camss-vfe-4-1.o \
+ camss-vfe-4-7.o \
camss-vfe.o \
camss-video.o \
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 226f36ef7419..729b31891466 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -1,19 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camss-csid.c
*
* Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
*
* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/completion.h>
@@ -21,9 +13,11 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <media/media-entity.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-subdev.h>
#include "camss-csid.h"
@@ -34,21 +28,35 @@
#define CAMSS_CSID_HW_VERSION 0x0
#define CAMSS_CSID_CORE_CTRL_0 0x004
#define CAMSS_CSID_CORE_CTRL_1 0x008
-#define CAMSS_CSID_RST_CMD 0x00c
-#define CAMSS_CSID_CID_LUT_VC_n(n) (0x010 + 0x4 * (n))
-#define CAMSS_CSID_CID_n_CFG(n) (0x020 + 0x4 * (n))
-#define CAMSS_CSID_IRQ_CLEAR_CMD 0x060
-#define CAMSS_CSID_IRQ_MASK 0x064
-#define CAMSS_CSID_IRQ_STATUS 0x068
-#define CAMSS_CSID_TG_CTRL 0x0a0
+#define CAMSS_CSID_RST_CMD(v) ((v) == CAMSS_8x16 ? 0x00c : 0x010)
+#define CAMSS_CSID_CID_LUT_VC_n(v, n) \
+ (((v) == CAMSS_8x16 ? 0x010 : 0x014) + 0x4 * (n))
+#define CAMSS_CSID_CID_n_CFG(v, n) \
+ (((v) == CAMSS_8x16 ? 0x020 : 0x024) + 0x4 * (n))
+#define CAMSS_CSID_CID_n_CFG_ISPIF_EN BIT(0)
+#define CAMSS_CSID_CID_n_CFG_RDI_EN BIT(1)
+#define CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT 4
+#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_8 (0 << 8)
+#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_16 (1 << 8)
+#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_LSB (0 << 9)
+#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_MSB (1 << 9)
+#define CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP (0 << 10)
+#define CAMSS_CSID_CID_n_CFG_RDI_MODE_PLAIN_PACKING (1 << 10)
+#define CAMSS_CSID_IRQ_CLEAR_CMD(v) ((v) == CAMSS_8x16 ? 0x060 : 0x064)
+#define CAMSS_CSID_IRQ_MASK(v) ((v) == CAMSS_8x16 ? 0x064 : 0x068)
+#define CAMSS_CSID_IRQ_STATUS(v) ((v) == CAMSS_8x16 ? 0x068 : 0x06c)
+#define CAMSS_CSID_TG_CTRL(v) ((v) == CAMSS_8x16 ? 0x0a0 : 0x0a8)
#define CAMSS_CSID_TG_CTRL_DISABLE 0xa06436
#define CAMSS_CSID_TG_CTRL_ENABLE 0xa06437
-#define CAMSS_CSID_TG_VC_CFG 0x0a4
+#define CAMSS_CSID_TG_VC_CFG(v) ((v) == CAMSS_8x16 ? 0x0a4 : 0x0ac)
#define CAMSS_CSID_TG_VC_CFG_H_BLANKING 0x3ff
#define CAMSS_CSID_TG_VC_CFG_V_BLANKING 0x7f
-#define CAMSS_CSID_TG_DT_n_CGG_0(n) (0x0ac + 0xc * (n))
-#define CAMSS_CSID_TG_DT_n_CGG_1(n) (0x0b0 + 0xc * (n))
-#define CAMSS_CSID_TG_DT_n_CGG_2(n) (0x0b4 + 0xc * (n))
+#define CAMSS_CSID_TG_DT_n_CGG_0(v, n) \
+ (((v) == CAMSS_8x16 ? 0x0ac : 0x0b4) + 0xc * (n))
+#define CAMSS_CSID_TG_DT_n_CGG_1(v, n) \
+ (((v) == CAMSS_8x16 ? 0x0b0 : 0x0b8) + 0xc * (n))
+#define CAMSS_CSID_TG_DT_n_CGG_2(v, n) \
+ (((v) == CAMSS_8x16 ? 0x0b4 : 0x0bc) + 0xc * (n))
#define DATA_TYPE_EMBEDDED_DATA_8BIT 0x12
#define DATA_TYPE_YUV422_8BIT 0x1e
@@ -56,15 +64,17 @@
#define DATA_TYPE_RAW_8BIT 0x2a
#define DATA_TYPE_RAW_10BIT 0x2b
#define DATA_TYPE_RAW_12BIT 0x2c
+#define DATA_TYPE_RAW_14BIT 0x2d
#define DECODE_FORMAT_UNCOMPRESSED_6_BIT 0x0
#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1
#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2
#define DECODE_FORMAT_UNCOMPRESSED_12_BIT 0x3
+#define DECODE_FORMAT_UNCOMPRESSED_14_BIT 0x8
#define CSID_RESET_TIMEOUT_MS 500
-struct csid_fmts {
+struct csid_format {
u32 code;
u8 data_type;
u8 decode_format;
@@ -72,7 +82,7 @@ struct csid_fmts {
u8 spp; /* bus samples per pixel */
};
-static const struct csid_fmts csid_input_fmts[] = {
+static const struct csid_format csid_formats_8x16[] = {
{
MEDIA_BUS_FMT_UYVY8_2X8,
DATA_TYPE_YUV422_8BIT,
@@ -184,20 +194,241 @@ static const struct csid_fmts csid_input_fmts[] = {
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
- }
+ },
+ {
+ MEDIA_BUS_FMT_Y10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
};
-static const struct csid_fmts *csid_get_fmt_entry(u32 code)
+static const struct csid_format csid_formats_8x96[] = {
+ {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR14_1X14,
+ DATA_TYPE_RAW_14BIT,
+ DECODE_FORMAT_UNCOMPRESSED_14_BIT,
+ 14,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG14_1X14,
+ DATA_TYPE_RAW_14BIT,
+ DECODE_FORMAT_UNCOMPRESSED_14_BIT,
+ 14,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG14_1X14,
+ DATA_TYPE_RAW_14BIT,
+ DECODE_FORMAT_UNCOMPRESSED_14_BIT,
+ 14,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB14_1X14,
+ DATA_TYPE_RAW_14BIT,
+ DECODE_FORMAT_UNCOMPRESSED_14_BIT,
+ 14,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_Y10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+};
+
+static u32 csid_find_code(u32 *code, unsigned int n_code,
+ unsigned int index, u32 req_code)
+{
+ int i;
+
+ if (!req_code && (index >= n_code))
+ return 0;
+
+ for (i = 0; i < n_code; i++)
+ if (req_code) {
+ if (req_code == code[i])
+ return req_code;
+ } else {
+ if (i == index)
+ return code[i];
+ }
+
+ return code[0];
+}
+
+static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
+ unsigned int index, u32 src_req_code)
+{
+ if (csid->camss->version == CAMSS_8x16) {
+ if (index > 0)
+ return 0;
+
+ return sink_code;
+ } else if (csid->camss->version == CAMSS_8x96) {
+ switch (sink_code) {
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ };
+
+ return csid_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_Y10_1X10:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
+ };
+
+ return csid_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ default:
+ if (index > 0)
+ return 0;
+
+ return sink_code;
+ }
+ } else {
+ return 0;
+ }
+}
+
+static const struct csid_format *csid_get_fmt_entry(
+ const struct csid_format *formats,
+ unsigned int nformat,
+ u32 code)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++)
- if (code == csid_input_fmts[i].code)
- return &csid_input_fmts[i];
+ for (i = 0; i < nformat; i++)
+ if (code == formats[i].code)
+ return &formats[i];
WARN(1, "Unknown format\n");
- return &csid_input_fmts[0];
+ return &formats[0];
}
/*
@@ -210,10 +441,11 @@ static const struct csid_fmts *csid_get_fmt_entry(u32 code)
static irqreturn_t csid_isr(int irq, void *dev)
{
struct csid_device *csid = dev;
+ enum camss_version ver = csid->camss->version;
u32 value;
- value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS);
- writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD);
+ value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS(ver));
+ writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD(ver));
if ((value >> 11) & 0x1)
complete(&csid->reset_complete);
@@ -227,7 +459,7 @@ static irqreturn_t csid_isr(int irq, void *dev)
*/
static int csid_set_clock_rates(struct csid_device *csid)
{
- struct device *dev = to_device_index(csid, csid->id);
+ struct device *dev = csid->camss->dev;
u32 pixel_clock;
int i, j;
int ret;
@@ -240,11 +472,16 @@ static int csid_set_clock_rates(struct csid_device *csid)
struct camss_clock *clock = &csid->clock[i];
if (!strcmp(clock->name, "csi0") ||
- !strcmp(clock->name, "csi1")) {
- u8 bpp = csid_get_fmt_entry(
- csid->fmt[MSM_CSIPHY_PAD_SINK].code)->bpp;
+ !strcmp(clock->name, "csi1") ||
+ !strcmp(clock->name, "csi2") ||
+ !strcmp(clock->name, "csi3")) {
+ const struct csid_format *f = csid_get_fmt_entry(
+ csid->formats,
+ csid->nformats,
+ csid->fmt[MSM_CSIPHY_PAD_SINK].code);
u8 num_lanes = csid->phy.lane_cnt;
- u64 min_rate = pixel_clock * bpp / (2 * num_lanes * 4);
+ u64 min_rate = pixel_clock * f->bpp /
+ (2 * num_lanes * 4);
long rate;
camss_add_clock_margin(&min_rate);
@@ -294,13 +531,13 @@ static int csid_reset(struct csid_device *csid)
reinit_completion(&csid->reset_complete);
- writel_relaxed(0x7fff, csid->base + CAMSS_CSID_RST_CMD);
+ writel_relaxed(0x7fff, csid->base +
+ CAMSS_CSID_RST_CMD(csid->camss->version));
time = wait_for_completion_timeout(&csid->reset_complete,
msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
if (!time) {
- dev_err(to_device_index(csid, csid->id),
- "CSID reset timeout\n");
+ dev_err(csid->camss->dev, "CSID reset timeout\n");
return -EIO;
}
@@ -317,25 +554,33 @@ static int csid_reset(struct csid_device *csid)
static int csid_set_power(struct v4l2_subdev *sd, int on)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
- struct device *dev = to_device_index(csid, csid->id);
+ struct device *dev = csid->camss->dev;
int ret;
if (on) {
u32 hw_version;
- ret = regulator_enable(csid->vdda);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
+ ret = regulator_enable(csid->vdda);
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
ret = csid_set_clock_rates(csid);
if (ret < 0) {
regulator_disable(csid->vdda);
+ pm_runtime_put_sync(dev);
return ret;
}
ret = camss_enable_clocks(csid->nclocks, csid->clock, dev);
if (ret < 0) {
regulator_disable(csid->vdda);
+ pm_runtime_put_sync(dev);
return ret;
}
@@ -346,6 +591,7 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
disable_irq(csid->irq);
camss_disable_clocks(csid->nclocks, csid->clock);
regulator_disable(csid->vdda);
+ pm_runtime_put_sync(dev);
return ret;
}
@@ -355,6 +601,7 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
disable_irq(csid->irq);
camss_disable_clocks(csid->nclocks, csid->clock);
ret = regulator_disable(csid->vdda);
+ pm_runtime_put_sync(dev);
}
return ret;
@@ -373,6 +620,7 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct csid_testgen_config *tg = &csid->testgen;
+ enum camss_version ver = csid->camss->version;
u32 val;
if (enable) {
@@ -383,7 +631,7 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
ret = v4l2_ctrl_handler_setup(&csid->ctrls);
if (ret < 0) {
- dev_err(to_device_index(csid, csid->id),
+ dev_err(csid->camss->dev,
"could not sync v4l2 controls: %d\n", ret);
return ret;
}
@@ -392,40 +640,47 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
!media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
return -ENOLINK;
- dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)->
- data_type;
-
if (tg->enabled) {
/* Config Test Generator */
struct v4l2_mbus_framefmt *f =
&csid->fmt[MSM_CSID_PAD_SRC];
- u8 bpp = csid_get_fmt_entry(f->code)->bpp;
- u8 spp = csid_get_fmt_entry(f->code)->spp;
- u32 num_bytes_per_line = f->width * bpp * spp / 8;
+ const struct csid_format *format = csid_get_fmt_entry(
+ csid->formats, csid->nformats, f->code);
+ u32 num_bytes_per_line =
+ f->width * format->bpp * format->spp / 8;
u32 num_lines = f->height;
/* 31:24 V blank, 23:13 H blank, 3:2 num of active DT */
/* 1:0 VC */
val = ((CAMSS_CSID_TG_VC_CFG_V_BLANKING & 0xff) << 24) |
((CAMSS_CSID_TG_VC_CFG_H_BLANKING & 0x7ff) << 13);
- writel_relaxed(val, csid->base + CAMSS_CSID_TG_VC_CFG);
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_TG_VC_CFG(ver));
/* 28:16 bytes per lines, 12:0 num of lines */
val = ((num_bytes_per_line & 0x1fff) << 16) |
(num_lines & 0x1fff);
writel_relaxed(val, csid->base +
- CAMSS_CSID_TG_DT_n_CGG_0(0));
+ CAMSS_CSID_TG_DT_n_CGG_0(ver, 0));
+
+ dt = format->data_type;
/* 5:0 data type */
val = dt;
writel_relaxed(val, csid->base +
- CAMSS_CSID_TG_DT_n_CGG_1(0));
+ CAMSS_CSID_TG_DT_n_CGG_1(ver, 0));
/* 2:0 output test pattern */
val = tg->payload_mode;
writel_relaxed(val, csid->base +
- CAMSS_CSID_TG_DT_n_CGG_2(0));
+ CAMSS_CSID_TG_DT_n_CGG_2(ver, 0));
+
+ df = format->decode_format;
} else {
+ struct v4l2_mbus_framefmt *f =
+ &csid->fmt[MSM_CSID_PAD_SINK];
+ const struct csid_format *format = csid_get_fmt_entry(
+ csid->formats, csid->nformats, f->code);
struct csid_phy_config *phy = &csid->phy;
val = phy->lane_cnt - 1;
@@ -439,30 +694,54 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
writel_relaxed(val,
csid->base + CAMSS_CSID_CORE_CTRL_1);
+
+ dt = format->data_type;
+ df = format->decode_format;
}
/* Config LUT */
dt_shift = (cid % 4) * 8;
- df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)->
- decode_format;
- val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
+ val = readl_relaxed(csid->base +
+ CAMSS_CSID_CID_LUT_VC_n(ver, vc));
val &= ~(0xff << dt_shift);
val |= dt << dt_shift;
- writel_relaxed(val, csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_CID_LUT_VC_n(ver, vc));
+
+ val = CAMSS_CSID_CID_n_CFG_ISPIF_EN;
+ val |= CAMSS_CSID_CID_n_CFG_RDI_EN;
+ val |= df << CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT;
+ val |= CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP;
+
+ if (csid->camss->version == CAMSS_8x96) {
+ u32 sink_code = csid->fmt[MSM_CSID_PAD_SINK].code;
+ u32 src_code = csid->fmt[MSM_CSID_PAD_SRC].code;
+
+ if ((sink_code == MEDIA_BUS_FMT_SBGGR10_1X10 &&
+ src_code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) ||
+ (sink_code == MEDIA_BUS_FMT_Y10_1X10 &&
+ src_code == MEDIA_BUS_FMT_Y10_2X8_PADHI_LE)) {
+ val |= CAMSS_CSID_CID_n_CFG_RDI_MODE_PLAIN_PACKING;
+ val |= CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_16;
+ val |= CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_LSB;
+ }
+ }
- val = (df << 4) | 0x3;
- writel_relaxed(val, csid->base + CAMSS_CSID_CID_n_CFG(cid));
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_CID_n_CFG(ver, cid));
if (tg->enabled) {
val = CAMSS_CSID_TG_CTRL_ENABLE;
- writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL);
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_TG_CTRL(ver));
}
} else {
if (tg->enabled) {
val = CAMSS_CSID_TG_CTRL_DISABLE;
- writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL);
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_TG_CTRL(ver));
}
}
@@ -510,12 +789,12 @@ static void csid_try_format(struct csid_device *csid,
case MSM_CSID_PAD_SINK:
/* Set format on sink pad */
- for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++)
- if (fmt->code == csid_input_fmts[i].code)
+ for (i = 0; i < csid->nformats; i++)
+ if (fmt->code == csid->formats[i].code)
break;
/* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(csid_input_fmts))
+ if (i >= csid->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
@@ -528,23 +807,23 @@ static void csid_try_format(struct csid_device *csid,
case MSM_CSID_PAD_SRC:
if (csid->testgen_mode->cur.val == 0) {
- /* Test generator is disabled, keep pad formats */
- /* in sync - set and return a format same as sink pad */
- struct v4l2_mbus_framefmt format;
+ /* Test generator is disabled, */
+ /* keep pad formats in sync */
+ u32 code = fmt->code;
- format = *__csid_get_format(csid, cfg,
- MSM_CSID_PAD_SINK, which);
- *fmt = format;
+ *fmt = *__csid_get_format(csid, cfg,
+ MSM_CSID_PAD_SINK, which);
+ fmt->code = csid_src_pad_code(csid, fmt->code, 0, code);
} else {
- /* Test generator is enabled, set format on source*/
+ /* Test generator is enabled, set format on source */
/* pad to allow test generator usage */
- for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++)
- if (csid_input_fmts[i].code == fmt->code)
+ for (i = 0; i < csid->nformats; i++)
+ if (csid->formats[i].code == fmt->code)
break;
/* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(csid_input_fmts))
+ if (i >= csid->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
@@ -570,27 +849,29 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
if (code->pad == MSM_CSID_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(csid_input_fmts))
+ if (code->index >= csid->nformats)
return -EINVAL;
- code->code = csid_input_fmts[code->index].code;
+ code->code = csid->formats[code->index].code;
} else {
if (csid->testgen_mode->cur.val == 0) {
- if (code->index > 0)
- return -EINVAL;
+ struct v4l2_mbus_framefmt *sink_fmt;
- format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SINK,
- code->which);
+ sink_fmt = __csid_get_format(csid, cfg,
+ MSM_CSID_PAD_SINK,
+ code->which);
- code->code = format->code;
+ code->code = csid_src_pad_code(csid, sink_fmt->code,
+ code->index, 0);
+ if (!code->code)
+ return -EINVAL;
} else {
- if (code->index >= ARRAY_SIZE(csid_input_fmts))
+ if (code->index >= csid->nformats)
return -EINVAL;
- code->code = csid_input_fmts[code->index].code;
+ code->code = csid->formats[code->index].code;
}
}
@@ -798,17 +1079,30 @@ static const struct v4l2_ctrl_ops csid_ctrl_ops = {
*
* Return 0 on success or a negative error code otherwise
*/
-int msm_csid_subdev_init(struct csid_device *csid,
+int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
const struct resources *res, u8 id)
{
- struct device *dev = to_device_index(csid, id);
+ struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *r;
int i, j;
int ret;
+ csid->camss = camss;
csid->id = id;
+ if (camss->version == CAMSS_8x16) {
+ csid->formats = csid_formats_8x16;
+ csid->nformats =
+ ARRAY_SIZE(csid_formats_8x16);
+ } else if (camss->version == CAMSS_8x96) {
+ csid->formats = csid_formats_8x96;
+ csid->nformats =
+ ARRAY_SIZE(csid_formats_8x96);
+ } else {
+ return -EINVAL;
+ }
+
/* Memory */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
@@ -980,6 +1274,8 @@ static int csid_link_setup(struct media_entity *entity,
static const struct v4l2_subdev_core_ops csid_core_ops = {
.s_power = csid_set_power,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops csid_video_ops = {
@@ -1020,12 +1316,13 @@ int msm_csid_register_entity(struct csid_device *csid,
{
struct v4l2_subdev *sd = &csid->subdev;
struct media_pad *pads = csid->pads;
- struct device *dev = to_device_index(csid, csid->id);
+ struct device *dev = csid->camss->dev;
int ret;
v4l2_subdev_init(sd, &csid_v4l2_ops);
sd->internal_ops = &csid_v4l2_internal_ops;
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
MSM_CSID_NAME, csid->id);
v4l2_set_subdevdata(sd, csid);
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
index 8682d3081bc3..1824b3745e10 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-csid.h
+++ b/drivers/media/platform/qcom/camss/camss-csid.h
@@ -1,19 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* camss-csid.h
*
* Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
*
* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#ifndef QC_MSM_CAMSS_CSID_H
#define QC_MSM_CAMSS_CSID_H
@@ -50,6 +42,7 @@ struct csid_phy_config {
};
struct csid_device {
+ struct camss *camss;
u8 id;
struct v4l2_subdev subdev;
struct media_pad pads[MSM_CSID_PADS_NUM];
@@ -65,11 +58,13 @@ struct csid_device {
struct v4l2_mbus_framefmt fmt[MSM_CSID_PADS_NUM];
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *testgen_mode;
+ const struct csid_format *formats;
+ unsigned int nformats;
};
struct resources;
-int msm_csid_subdev_init(struct csid_device *csid,
+int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
const struct resources *res, u8 id);
int msm_csid_register_entity(struct csid_device *csid,
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
new file mode 100644
index 000000000000..c832539397d7
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-csiphy-2ph-1-0.c
+ *
+ * Qualcomm MSM Camera Subsystem - CSIPHY Module 2phase v1.0
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016-2018 Linaro Ltd.
+ */
+
+#include "camss-csiphy.h"
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
+#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
+#define CAMSS_CSI_PHY_GLBL_RESET 0x140
+#define CAMSS_CSI_PHY_GLBL_PWR_CFG 0x144
+#define CAMSS_CSI_PHY_GLBL_IRQ_CMD 0x164
+#define CAMSS_CSI_PHY_HW_VERSION 0x188
+#define CAMSS_CSI_PHY_INTERRUPT_STATUSn(n) (0x18c + 0x4 * (n))
+#define CAMSS_CSI_PHY_INTERRUPT_MASKn(n) (0x1ac + 0x4 * (n))
+#define CAMSS_CSI_PHY_INTERRUPT_CLEARn(n) (0x1cc + 0x4 * (n))
+#define CAMSS_CSI_PHY_GLBL_T_INIT_CFG0 0x1ec
+#define CAMSS_CSI_PHY_T_WAKEUP_CFG0 0x1f4
+
+static void csiphy_hw_version_read(struct csiphy_device *csiphy,
+ struct device *dev)
+{
+ u8 hw_version = readl_relaxed(csiphy->base +
+ CAMSS_CSI_PHY_HW_VERSION);
+
+ dev_dbg(dev, "CSIPHY HW Version = 0x%02x\n", hw_version);
+}
+
+/*
+ * csiphy_reset - Perform software reset on CSIPHY module
+ * @csiphy: CSIPHY device
+ */
+static void csiphy_reset(struct csiphy_device *csiphy)
+{
+ writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
+ usleep_range(5000, 8000);
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
+}
+
+/*
+ * csiphy_settle_cnt_calc - Calculate settle count value
+ *
+ * Helper function to calculate settle count value. This is
+ * based on the CSI2 T_hs_settle parameter which in turn
+ * is calculated based on the CSI2 transmitter pixel clock
+ * frequency.
+ *
+ * Return settle count value or 0 if the CSI2 pixel clock
+ * frequency is not available
+ */
+static u8 csiphy_settle_cnt_calc(u32 pixel_clock, u8 bpp, u8 num_lanes,
+ u32 timer_clk_rate)
+{
+ u32 mipi_clock; /* Hz */
+ u32 ui; /* ps */
+ u32 timer_period; /* ps */
+ u32 t_hs_prepare_max; /* ps */
+ u32 t_hs_prepare_zero_min; /* ps */
+ u32 t_hs_settle; /* ps */
+ u8 settle_cnt;
+
+ mipi_clock = pixel_clock * bpp / (2 * num_lanes);
+ ui = div_u64(1000000000000LL, mipi_clock);
+ ui /= 2;
+ t_hs_prepare_max = 85000 + 6 * ui;
+ t_hs_prepare_zero_min = 145000 + 10 * ui;
+ t_hs_settle = (t_hs_prepare_max + t_hs_prepare_zero_min) / 2;
+
+ timer_period = div_u64(1000000000000LL, timer_clk_rate);
+ settle_cnt = t_hs_settle / timer_period - 1;
+
+ return settle_cnt;
+}
+
+static void csiphy_lanes_enable(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg,
+ u32 pixel_clock, u8 bpp, u8 lane_mask)
+{
+ struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
+ u8 settle_cnt;
+ u8 val, l = 0;
+ int i = 0;
+
+ settle_cnt = csiphy_settle_cnt_calc(pixel_clock, bpp, c->num_data,
+ csiphy->timer_clk_rate);
+
+ writel_relaxed(0x1, csiphy->base +
+ CAMSS_CSI_PHY_GLBL_T_INIT_CFG0);
+ writel_relaxed(0x1, csiphy->base +
+ CAMSS_CSI_PHY_T_WAKEUP_CFG0);
+
+ val = 0x1;
+ val |= lane_mask << 1;
+ writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
+
+ val = cfg->combo_mode << 4;
+ writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
+
+ for (i = 0; i <= c->num_data; i++) {
+ if (i == c->num_data)
+ l = c->clk.pos;
+ else
+ l = c->data[i].pos;
+
+ writel_relaxed(0x10, csiphy->base +
+ CAMSS_CSI_PHY_LNn_CFG2(l));
+ writel_relaxed(settle_cnt, csiphy->base +
+ CAMSS_CSI_PHY_LNn_CFG3(l));
+ writel_relaxed(0x3f, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_MASKn(l));
+ writel_relaxed(0x3f, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_CLEARn(l));
+ }
+}
+
+static void csiphy_lanes_disable(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg)
+{
+ struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
+ u8 l = 0;
+ int i = 0;
+
+ for (i = 0; i <= c->num_data; i++) {
+ if (i == c->num_data)
+ l = c->clk.pos;
+ else
+ l = c->data[i].pos;
+
+ writel_relaxed(0x0, csiphy->base +
+ CAMSS_CSI_PHY_LNn_CFG2(l));
+ }
+
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
+}
+
+/*
+ * csiphy_isr - CSIPHY module interrupt handler
+ * @irq: Interrupt line
+ * @dev: CSIPHY device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t csiphy_isr(int irq, void *dev)
+{
+ struct csiphy_device *csiphy = dev;
+ u8 i;
+
+ for (i = 0; i < 8; i++) {
+ u8 val = readl_relaxed(csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_STATUSn(i));
+ writel_relaxed(val, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
+ writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
+ writel_relaxed(0x0, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
+ }
+
+ return IRQ_HANDLED;
+}
+
+const struct csiphy_hw_ops csiphy_ops_2ph_1_0 = {
+ .hw_version_read = csiphy_hw_version_read,
+ .reset = csiphy_reset,
+ .lanes_enable = csiphy_lanes_enable,
+ .lanes_disable = csiphy_lanes_disable,
+ .isr = csiphy_isr,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
new file mode 100644
index 000000000000..bcd0dfd33618
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-csiphy-3ph-1-0.c
+ *
+ * Qualcomm MSM Camera Subsystem - CSIPHY Module 3phase v1.0
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016-2018 Linaro Ltd.
+ */
+
+#include "camss-csiphy.h"
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6))
+#define CSIPHY_3PH_LNn_CFG2(n) (0x004 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG2_LP_REC_EN_INT BIT(3)
+#define CSIPHY_3PH_LNn_CFG3(n) (0x008 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG4(n) (0x00c + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS 0xa4
+#define CSIPHY_3PH_LNn_CFG5(n) (0x010 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG5_T_HS_DTERM 0x02
+#define CSIPHY_3PH_LNn_CFG5_HS_REC_EQ_FQ_INT 0x50
+#define CSIPHY_3PH_LNn_TEST_IMP(n) (0x01c + 0x100 * (n))
+#define CSIPHY_3PH_LNn_TEST_IMP_HS_TERM_IMP 0xa
+#define CSIPHY_3PH_LNn_MISC1(n) (0x028 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_MISC1_IS_CLKLANE BIT(2)
+#define CSIPHY_3PH_LNn_CFG6(n) (0x02c + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG6_SWI_FORCE_INIT_EXIT BIT(0)
+#define CSIPHY_3PH_LNn_CFG7(n) (0x030 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG7_SWI_T_INIT 0x2
+#define CSIPHY_3PH_LNn_CFG8(n) (0x034 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG8_SWI_SKIP_WAKEUP BIT(0)
+#define CSIPHY_3PH_LNn_CFG8_SKEW_FILTER_ENABLE BIT(1)
+#define CSIPHY_3PH_LNn_CFG9(n) (0x038 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG9_SWI_T_WAKEUP 0x1
+#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15(n) (0x03c + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL 0xb8
+
+#define CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(n) (0x800 + 0x4 * (n))
+#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B BIT(0)
+#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID BIT(1)
+#define CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(n) (0x8b0 + 0x4 * (n))
+
+static void csiphy_hw_version_read(struct csiphy_device *csiphy,
+ struct device *dev)
+{
+ u32 hw_version;
+
+ writel(CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID,
+ csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+
+ hw_version = readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(12));
+ hw_version |= readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(13)) << 8;
+ hw_version |= readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(14)) << 16;
+ hw_version |= readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(15)) << 24;
+
+ dev_err(dev, "CSIPHY 3PH HW Version = 0x%08x\n", hw_version);
+}
+
+/*
+ * csiphy_reset - Perform software reset on CSIPHY module
+ * @csiphy: CSIPHY device
+ */
+static void csiphy_reset(struct csiphy_device *csiphy)
+{
+ writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
+ usleep_range(5000, 8000);
+ writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
+}
+
+static irqreturn_t csiphy_isr(int irq, void *dev)
+{
+ struct csiphy_device *csiphy = dev;
+ int i;
+
+ for (i = 0; i < 11; i++) {
+ int c = i + 22;
+ u8 val = readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(i));
+
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(c));
+ }
+
+ writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10));
+ writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10));
+
+ for (i = 22; i < 33; i++)
+ writel_relaxed(0x0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(i));
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csiphy_settle_cnt_calc - Calculate settle count value
+ *
+ * Helper function to calculate settle count value. This is
+ * based on the CSI2 T_hs_settle parameter which in turn
+ * is calculated based on the CSI2 transmitter pixel clock
+ * frequency.
+ *
+ * Return settle count value or 0 if the CSI2 pixel clock
+ * frequency is not available
+ */
+static u8 csiphy_settle_cnt_calc(u32 pixel_clock, u8 bpp, u8 num_lanes,
+ u32 timer_clk_rate)
+{
+ u32 mipi_clock; /* Hz */
+ u32 ui; /* ps */
+ u32 timer_period; /* ps */
+ u32 t_hs_prepare_max; /* ps */
+ u32 t_hs_settle; /* ps */
+ u8 settle_cnt;
+
+ mipi_clock = pixel_clock * bpp / (2 * num_lanes);
+ ui = div_u64(1000000000000LL, mipi_clock);
+ ui /= 2;
+ t_hs_prepare_max = 85000 + 6 * ui;
+ t_hs_settle = t_hs_prepare_max;
+
+ timer_period = div_u64(1000000000000LL, timer_clk_rate);
+ settle_cnt = t_hs_settle / timer_period - 6;
+
+ return settle_cnt;
+}
+
+static void csiphy_lanes_enable(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg,
+ u32 pixel_clock, u8 bpp, u8 lane_mask)
+{
+ struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
+ u8 settle_cnt;
+ u8 val, l = 0;
+ int i;
+
+ settle_cnt = csiphy_settle_cnt_calc(pixel_clock, bpp, c->num_data,
+ csiphy->timer_clk_rate);
+
+ val = BIT(c->clk.pos);
+ for (i = 0; i < c->num_data; i++)
+ val |= BIT(c->data[i].pos * 2);
+
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5));
+
+ val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+
+ for (i = 0; i <= c->num_data; i++) {
+ if (i == c->num_data)
+ l = 7;
+ else
+ l = c->data[i].pos * 2;
+
+ val = CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG;
+ val |= 0x17;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l));
+
+ val = CSIPHY_3PH_LNn_CFG2_LP_REC_EN_INT;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG2(l));
+
+ val = settle_cnt;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG3(l));
+
+ val = CSIPHY_3PH_LNn_CFG5_T_HS_DTERM |
+ CSIPHY_3PH_LNn_CFG5_HS_REC_EQ_FQ_INT;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG5(l));
+
+ val = CSIPHY_3PH_LNn_CFG6_SWI_FORCE_INIT_EXIT;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG6(l));
+
+ val = CSIPHY_3PH_LNn_CFG7_SWI_T_INIT;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG7(l));
+
+ val = CSIPHY_3PH_LNn_CFG8_SWI_SKIP_WAKEUP |
+ CSIPHY_3PH_LNn_CFG8_SKEW_FILTER_ENABLE;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG8(l));
+
+ val = CSIPHY_3PH_LNn_CFG9_SWI_T_WAKEUP;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG9(l));
+
+ val = CSIPHY_3PH_LNn_TEST_IMP_HS_TERM_IMP;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_TEST_IMP(l));
+
+ val = CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL;
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_LNn_CSI_LANE_CTRL15(l));
+ }
+
+ val = CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l));
+
+ val = CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG4(l));
+
+ val = CSIPHY_3PH_LNn_MISC1_IS_CLKLANE;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_MISC1(l));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(11));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(12));
+
+ val = 0xfb;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(13));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(14));
+
+ val = 0x7f;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(15));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(16));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(17));
+
+ val = 0xef;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(18));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(19));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(20));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(21));
+}
+
+static void csiphy_lanes_disable(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg)
+{
+ writel_relaxed(0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5));
+
+ writel_relaxed(0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+}
+
+const struct csiphy_hw_ops csiphy_ops_3ph_1_0 = {
+ .hw_version_read = csiphy_hw_version_read,
+ .reset = csiphy_reset,
+ .lanes_enable = csiphy_lanes_enable,
+ .lanes_disable = csiphy_lanes_disable,
+ .isr = csiphy_isr,
+};
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 7e61caba6a2d..4559f3b1b38c 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -1,19 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camss-csiphy.c
*
* Qualcomm MSM Camera Subsystem - CSIPHY Module
*
* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2016-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2016-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/delay.h>
@@ -21,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <media/media-entity.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
@@ -30,131 +23,75 @@
#define MSM_CSIPHY_NAME "msm_csiphy"
-#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
-#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
-#define CAMSS_CSI_PHY_GLBL_RESET 0x140
-#define CAMSS_CSI_PHY_GLBL_PWR_CFG 0x144
-#define CAMSS_CSI_PHY_GLBL_IRQ_CMD 0x164
-#define CAMSS_CSI_PHY_HW_VERSION 0x188
-#define CAMSS_CSI_PHY_INTERRUPT_STATUSn(n) (0x18c + 0x4 * (n))
-#define CAMSS_CSI_PHY_INTERRUPT_MASKn(n) (0x1ac + 0x4 * (n))
-#define CAMSS_CSI_PHY_INTERRUPT_CLEARn(n) (0x1cc + 0x4 * (n))
-#define CAMSS_CSI_PHY_GLBL_T_INIT_CFG0 0x1ec
-#define CAMSS_CSI_PHY_T_WAKEUP_CFG0 0x1f4
-
-static const struct {
+struct csiphy_format {
u32 code;
u8 bpp;
-} csiphy_formats[] = {
- {
- MEDIA_BUS_FMT_UYVY8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_VYUY8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_YUYV8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_YVYU8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SBGGR8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SGBRG8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SGRBG8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SRGGB8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SBGGR10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SGBRG10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SGRBG10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SRGGB10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SBGGR12_1X12,
- 12,
- },
- {
- MEDIA_BUS_FMT_SGBRG12_1X12,
- 12,
- },
- {
- MEDIA_BUS_FMT_SGRBG12_1X12,
- 12,
- },
- {
- MEDIA_BUS_FMT_SRGGB12_1X12,
- 12,
- }
+};
+
+static const struct csiphy_format csiphy_formats_8x16[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
+ { MEDIA_BUS_FMT_Y10_1X10, 10 },
+};
+
+static const struct csiphy_format csiphy_formats_8x96[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, 14 },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
+ { MEDIA_BUS_FMT_Y10_1X10, 10 },
};
/*
* csiphy_get_bpp - map media bus format to bits per pixel
+ * @formats: supported media bus formats array
+ * @nformats: size of @formats array
* @code: media bus format code
*
* Return number of bits per pixel
*/
-static u8 csiphy_get_bpp(u32 code)
+static u8 csiphy_get_bpp(const struct csiphy_format *formats,
+ unsigned int nformats, u32 code)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(csiphy_formats); i++)
- if (code == csiphy_formats[i].code)
- return csiphy_formats[i].bpp;
+ for (i = 0; i < nformats; i++)
+ if (code == formats[i].code)
+ return formats[i].bpp;
WARN(1, "Unknown format\n");
- return csiphy_formats[0].bpp;
-}
-
-/*
- * csiphy_isr - CSIPHY module interrupt handler
- * @irq: Interrupt line
- * @dev: CSIPHY device
- *
- * Return IRQ_HANDLED on success
- */
-static irqreturn_t csiphy_isr(int irq, void *dev)
-{
- struct csiphy_device *csiphy = dev;
- u8 i;
-
- for (i = 0; i < 8; i++) {
- u8 val = readl_relaxed(csiphy->base +
- CAMSS_CSI_PHY_INTERRUPT_STATUSn(i));
- writel_relaxed(val, csiphy->base +
- CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
- writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
- writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
- writel_relaxed(0x0, csiphy->base +
- CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
- }
-
- return IRQ_HANDLED;
+ return formats[0].bpp;
}
/*
@@ -163,7 +100,7 @@ static irqreturn_t csiphy_isr(int irq, void *dev)
*/
static int csiphy_set_clock_rates(struct csiphy_device *csiphy)
{
- struct device *dev = to_device_index(csiphy, csiphy->id);
+ struct device *dev = csiphy->camss->dev;
u32 pixel_clock;
int i, j;
int ret;
@@ -176,8 +113,10 @@ static int csiphy_set_clock_rates(struct csiphy_device *csiphy)
struct camss_clock *clock = &csiphy->clock[i];
if (!strcmp(clock->name, "csiphy0_timer") ||
- !strcmp(clock->name, "csiphy1_timer")) {
- u8 bpp = csiphy_get_bpp(
+ !strcmp(clock->name, "csiphy1_timer") ||
+ !strcmp(clock->name, "csiphy2_timer")) {
+ u8 bpp = csiphy_get_bpp(csiphy->formats,
+ csiphy->nformats,
csiphy->fmt[MSM_CSIPHY_PAD_SINK].code);
u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data;
u64 min_rate = pixel_clock * bpp / (2 * num_lanes * 4);
@@ -221,17 +160,6 @@ static int csiphy_set_clock_rates(struct csiphy_device *csiphy)
}
/*
- * csiphy_reset - Perform software reset on CSIPHY module
- * @csiphy: CSIPHY device
- */
-static void csiphy_reset(struct csiphy_device *csiphy)
-{
- writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
- usleep_range(5000, 8000);
- writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
-}
-
-/*
* csiphy_set_power - Power on/off CSIPHY module
* @sd: CSIPHY V4L2 subdevice
* @on: Requested power state
@@ -241,31 +169,38 @@ static void csiphy_reset(struct csiphy_device *csiphy)
static int csiphy_set_power(struct v4l2_subdev *sd, int on)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
- struct device *dev = to_device_index(csiphy, csiphy->id);
+ struct device *dev = csiphy->camss->dev;
if (on) {
- u8 hw_version;
int ret;
- ret = csiphy_set_clock_rates(csiphy);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
+ ret = csiphy_set_clock_rates(csiphy);
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
ret = camss_enable_clocks(csiphy->nclocks, csiphy->clock, dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
return ret;
+ }
enable_irq(csiphy->irq);
- csiphy_reset(csiphy);
+ csiphy->ops->reset(csiphy);
- hw_version = readl_relaxed(csiphy->base +
- CAMSS_CSI_PHY_HW_VERSION);
- dev_dbg(dev, "CSIPHY HW Version = 0x%02x\n", hw_version);
+ csiphy->ops->hw_version_read(csiphy, dev);
} else {
disable_irq(csiphy->irq);
camss_disable_clocks(csiphy->nclocks, csiphy->clock);
+
+ pm_runtime_put_sync(dev);
}
return 0;
@@ -291,58 +226,6 @@ static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg)
}
/*
- * csiphy_settle_cnt_calc - Calculate settle count value
- * @csiphy: CSIPHY device
- *
- * Helper function to calculate settle count value. This is
- * based on the CSI2 T_hs_settle parameter which in turn
- * is calculated based on the CSI2 transmitter pixel clock
- * frequency.
- *
- * Return settle count value or 0 if the CSI2 pixel clock
- * frequency is not available
- */
-static u8 csiphy_settle_cnt_calc(struct csiphy_device *csiphy)
-{
- u8 bpp = csiphy_get_bpp(
- csiphy->fmt[MSM_CSIPHY_PAD_SINK].code);
- u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data;
- u32 pixel_clock; /* Hz */
- u32 mipi_clock; /* Hz */
- u32 ui; /* ps */
- u32 timer_period; /* ps */
- u32 t_hs_prepare_max; /* ps */
- u32 t_hs_prepare_zero_min; /* ps */
- u32 t_hs_settle; /* ps */
- u8 settle_cnt;
- int ret;
-
- ret = camss_get_pixel_clock(&csiphy->subdev.entity, &pixel_clock);
- if (ret) {
- dev_err(to_device_index(csiphy, csiphy->id),
- "Cannot get CSI2 transmitter's pixel clock\n");
- return 0;
- }
- if (!pixel_clock) {
- dev_err(to_device_index(csiphy, csiphy->id),
- "Got pixel clock == 0, cannot continue\n");
- return 0;
- }
-
- mipi_clock = pixel_clock * bpp / (2 * num_lanes);
- ui = div_u64(1000000000000LL, mipi_clock);
- ui /= 2;
- t_hs_prepare_max = 85000 + 6 * ui;
- t_hs_prepare_zero_min = 145000 + 10 * ui;
- t_hs_settle = (t_hs_prepare_max + t_hs_prepare_zero_min) / 2;
-
- timer_period = div_u64(1000000000000LL, csiphy->timer_clk_rate);
- settle_cnt = t_hs_settle / timer_period;
-
- return settle_cnt;
-}
-
-/*
* csiphy_stream_on - Enable streaming on CSIPHY module
* @csiphy: CSIPHY device
*
@@ -354,14 +237,24 @@ static u8 csiphy_settle_cnt_calc(struct csiphy_device *csiphy)
static int csiphy_stream_on(struct csiphy_device *csiphy)
{
struct csiphy_config *cfg = &csiphy->cfg;
+ u32 pixel_clock;
u8 lane_mask = csiphy_get_lane_mask(&cfg->csi2->lane_cfg);
- u8 settle_cnt;
+ u8 bpp = csiphy_get_bpp(csiphy->formats, csiphy->nformats,
+ csiphy->fmt[MSM_CSIPHY_PAD_SINK].code);
u8 val;
- int i = 0;
+ int ret;
- settle_cnt = csiphy_settle_cnt_calc(csiphy);
- if (!settle_cnt)
+ ret = camss_get_pixel_clock(&csiphy->subdev.entity, &pixel_clock);
+ if (ret) {
+ dev_err(csiphy->camss->dev,
+ "Cannot get CSI2 transmitter's pixel clock\n");
return -EINVAL;
+ }
+ if (!pixel_clock) {
+ dev_err(csiphy->camss->dev,
+ "Got pixel clock == 0, cannot continue\n");
+ return -EINVAL;
+ }
val = readl_relaxed(csiphy->base_clk_mux);
if (cfg->combo_mode && (lane_mask & 0x18) == 0x18) {
@@ -372,34 +265,9 @@ static int csiphy_stream_on(struct csiphy_device *csiphy)
val |= cfg->csid_id;
}
writel_relaxed(val, csiphy->base_clk_mux);
+ wmb();
- writel_relaxed(0x1, csiphy->base +
- CAMSS_CSI_PHY_GLBL_T_INIT_CFG0);
- writel_relaxed(0x1, csiphy->base +
- CAMSS_CSI_PHY_T_WAKEUP_CFG0);
-
- val = 0x1;
- val |= lane_mask << 1;
- writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
-
- val = cfg->combo_mode << 4;
- writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
-
- while (lane_mask) {
- if (lane_mask & 0x1) {
- writel_relaxed(0x10, csiphy->base +
- CAMSS_CSI_PHY_LNn_CFG2(i));
- writel_relaxed(settle_cnt, csiphy->base +
- CAMSS_CSI_PHY_LNn_CFG3(i));
- writel_relaxed(0x3f, csiphy->base +
- CAMSS_CSI_PHY_INTERRUPT_MASKn(i));
- writel_relaxed(0x3f, csiphy->base +
- CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
- }
-
- lane_mask >>= 1;
- i++;
- }
+ csiphy->ops->lanes_enable(csiphy, cfg, pixel_clock, bpp, lane_mask);
return 0;
}
@@ -412,19 +280,7 @@ static int csiphy_stream_on(struct csiphy_device *csiphy)
*/
static void csiphy_stream_off(struct csiphy_device *csiphy)
{
- u8 lane_mask = csiphy_get_lane_mask(&csiphy->cfg.csi2->lane_cfg);
- int i = 0;
-
- while (lane_mask) {
- if (lane_mask & 0x1)
- writel_relaxed(0x0, csiphy->base +
- CAMSS_CSI_PHY_LNn_CFG2(i));
-
- lane_mask >>= 1;
- i++;
- }
-
- writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
+ csiphy->ops->lanes_disable(csiphy, &csiphy->cfg);
}
@@ -489,12 +345,12 @@ static void csiphy_try_format(struct csiphy_device *csiphy,
case MSM_CSIPHY_PAD_SINK:
/* Set format on sink pad */
- for (i = 0; i < ARRAY_SIZE(csiphy_formats); i++)
- if (fmt->code == csiphy_formats[i].code)
+ for (i = 0; i < csiphy->nformats; i++)
+ if (fmt->code == csiphy->formats[i].code)
break;
/* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(csiphy_formats))
+ if (i >= csiphy->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
@@ -530,10 +386,10 @@ static int csiphy_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *format;
if (code->pad == MSM_CSIPHY_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(csiphy_formats))
+ if (code->index >= csiphy->nformats)
return -EINVAL;
- code->code = csiphy_formats[code->index].code;
+ code->code = csiphy->formats[code->index].code;
} else {
if (code->index > 0)
return -EINVAL;
@@ -677,18 +533,32 @@ static int csiphy_init_formats(struct v4l2_subdev *sd,
*
* Return 0 on success or a negative error code otherwise
*/
-int msm_csiphy_subdev_init(struct csiphy_device *csiphy,
+int msm_csiphy_subdev_init(struct camss *camss,
+ struct csiphy_device *csiphy,
const struct resources *res, u8 id)
{
- struct device *dev = to_device_index(csiphy, id);
+ struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *r;
int i, j;
int ret;
+ csiphy->camss = camss;
csiphy->id = id;
csiphy->cfg.combo_mode = 0;
+ if (camss->version == CAMSS_8x16) {
+ csiphy->ops = &csiphy_ops_2ph_1_0;
+ csiphy->formats = csiphy_formats_8x16;
+ csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x16);
+ } else if (camss->version == CAMSS_8x96) {
+ csiphy->ops = &csiphy_ops_3ph_1_0;
+ csiphy->formats = csiphy_formats_8x96;
+ csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x96);
+ } else {
+ return -EINVAL;
+ }
+
/* Memory */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
@@ -717,7 +587,8 @@ int msm_csiphy_subdev_init(struct csiphy_device *csiphy,
csiphy->irq = r->start;
snprintf(csiphy->irq_name, sizeof(csiphy->irq_name), "%s_%s%d",
dev_name(dev), MSM_CSIPHY_NAME, csiphy->id);
- ret = devm_request_irq(dev, csiphy->irq, csiphy_isr,
+
+ ret = devm_request_irq(dev, csiphy->irq, csiphy->ops->isr,
IRQF_TRIGGER_RISING, csiphy->irq_name, csiphy);
if (ret < 0) {
dev_err(dev, "request_irq failed: %d\n", ret);
@@ -846,7 +717,7 @@ int msm_csiphy_register_entity(struct csiphy_device *csiphy,
{
struct v4l2_subdev *sd = &csiphy->subdev;
struct media_pad *pads = csiphy->pads;
- struct device *dev = to_device_index(csiphy, csiphy->id);
+ struct device *dev = csiphy->camss->dev;
int ret;
v4l2_subdev_init(sd, &csiphy_v4l2_ops);
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h
index ba8781122065..376f865ad383 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-csiphy.h
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.h
@@ -1,24 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* camss-csiphy.h
*
* Qualcomm MSM Camera Subsystem - CSIPHY Module
*
* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2016-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2016-2018 Linaro Ltd.
*/
#ifndef QC_MSM_CAMSS_CSIPHY_H
#define QC_MSM_CAMSS_CSIPHY_H
#include <linux/clk.h>
+#include <linux/interrupt.h>
#include <media/media-entity.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mediabus.h>
@@ -49,7 +42,22 @@ struct csiphy_config {
struct csiphy_csi2_cfg *csi2;
};
+struct csiphy_device;
+
+struct csiphy_hw_ops {
+ void (*hw_version_read)(struct csiphy_device *csiphy,
+ struct device *dev);
+ void (*reset)(struct csiphy_device *csiphy);
+ void (*lanes_enable)(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg,
+ u32 pixel_clock, u8 bpp, u8 lane_mask);
+ void (*lanes_disable)(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg);
+ irqreturn_t (*isr)(int irq, void *dev);
+};
+
struct csiphy_device {
+ struct camss *camss;
u8 id;
struct v4l2_subdev subdev;
struct media_pad pads[MSM_CSIPHY_PADS_NUM];
@@ -62,11 +70,15 @@ struct csiphy_device {
u32 timer_clk_rate;
struct csiphy_config cfg;
struct v4l2_mbus_framefmt fmt[MSM_CSIPHY_PADS_NUM];
+ const struct csiphy_hw_ops *ops;
+ const struct csiphy_format *formats;
+ unsigned int nformats;
};
struct resources;
-int msm_csiphy_subdev_init(struct csiphy_device *csiphy,
+int msm_csiphy_subdev_init(struct camss *camss,
+ struct csiphy_device *csiphy,
const struct resources *res, u8 id);
int msm_csiphy_register_entity(struct csiphy_device *csiphy,
@@ -74,4 +86,7 @@ int msm_csiphy_register_entity(struct csiphy_device *csiphy,
void msm_csiphy_unregister_entity(struct csiphy_device *csiphy);
+extern const struct csiphy_hw_ops csiphy_ops_2ph_1_0;
+extern const struct csiphy_hw_ops csiphy_ops_3ph_1_0;
+
#endif /* QC_MSM_CAMSS_CSIPHY_H */
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index 9d1af9353c1d..7f269021d08c 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -1,19 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camss-ispif.c
*
* Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/completion.h>
@@ -22,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <media/media-entity.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
@@ -31,12 +24,6 @@
#define MSM_ISPIF_NAME "msm_ispif"
-#define ispif_line_array(ptr_line) \
- ((const struct ispif_line (*)[]) &(ptr_line[-(ptr_line->id)]))
-
-#define to_ispif(ptr_line) \
- container_of(ispif_line_array(ptr_line), struct ispif_device, ptr_line)
-
#define ISPIF_RST_CMD_0 0x008
#define ISPIF_RST_CMD_0_STROBED_RST_EN (1 << 0)
#define ISPIF_RST_CMD_0_MISC_LOGIC_RST (1 << 1)
@@ -89,6 +76,13 @@
(0x254 + 0x200 * (m) + 0x4 * (n))
#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) \
(0x264 + 0x200 * (m) + 0x4 * (n))
+/* PACK_CFG registers are 8x96 only */
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(m, n) \
+ (0x270 + 0x200 * (m) + 0x4 * (n))
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(m, n) \
+ (0x27c + 0x200 * (m) + 0x4 * (n))
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0_CID_c_PLAIN(c) \
+ (1 << ((cid % 8) * 4))
#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) \
(0x2c0 + 0x200 * (m) + 0x4 * (n))
#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) \
@@ -109,7 +103,7 @@ enum ispif_intf_cmd {
CMD_ALL_NO_CHANGE = 0xffffffff,
};
-static const u32 ispif_formats[] = {
+static const u32 ispif_formats_8x16[] = {
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_VYUY8_2X8,
MEDIA_BUS_FMT_YUYV8_2X8,
@@ -126,16 +120,107 @@ static const u32 ispif_formats[] = {
MEDIA_BUS_FMT_SGBRG12_1X12,
MEDIA_BUS_FMT_SGRBG12_1X12,
MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_Y10_1X10,
+};
+
+static const u32 ispif_formats_8x96[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR14_1X14,
+ MEDIA_BUS_FMT_SGBRG14_1X14,
+ MEDIA_BUS_FMT_SGRBG14_1X14,
+ MEDIA_BUS_FMT_SRGGB14_1X14,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
};
/*
- * ispif_isr - ISPIF module interrupt handler
+ * ispif_isr_8x96 - ISPIF module interrupt handler for 8x96
* @irq: Interrupt line
* @dev: ISPIF device
*
* Return IRQ_HANDLED on success
*/
-static irqreturn_t ispif_isr(int irq, void *dev)
+static irqreturn_t ispif_isr_8x96(int irq, void *dev)
+{
+ struct ispif_device *ispif = dev;
+ u32 value0, value1, value2, value3, value4, value5;
+
+ value0 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(0));
+ value1 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(0));
+ value2 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(0));
+ value3 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(1));
+ value4 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(1));
+ value5 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(1));
+
+ writel_relaxed(value0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(0));
+ writel_relaxed(value1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(0));
+ writel_relaxed(value2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(0));
+ writel_relaxed(value3, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(1));
+ writel_relaxed(value4, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(1));
+ writel_relaxed(value5, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(1));
+
+ writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD);
+
+ if ((value0 >> 27) & 0x1)
+ complete(&ispif->reset_complete);
+
+ if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 pix0 overflow\n");
+
+ if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 rdi0 overflow\n");
+
+ if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 pix1 overflow\n");
+
+ if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 rdi1 overflow\n");
+
+ if (unlikely(value2 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 rdi2 overflow\n");
+
+ if (unlikely(value3 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 pix0 overflow\n");
+
+ if (unlikely(value3 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 rdi0 overflow\n");
+
+ if (unlikely(value4 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 pix1 overflow\n");
+
+ if (unlikely(value4 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 rdi1 overflow\n");
+
+ if (unlikely(value5 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 rdi2 overflow\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * ispif_isr_8x16 - ISPIF module interrupt handler for 8x16
+ * @irq: Interrupt line
+ * @dev: ISPIF device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t ispif_isr_8x16(int irq, void *dev)
{
struct ispif_device *ispif = dev;
u32 value0, value1, value2;
@@ -183,6 +268,14 @@ static int ispif_reset(struct ispif_device *ispif)
u32 val;
int ret;
+ ret = camss_pm_domain_on(to_camss(ispif), PM_DOMAIN_VFE0);
+ if (ret < 0)
+ return ret;
+
+ ret = camss_pm_domain_on(to_camss(ispif), PM_DOMAIN_VFE1);
+ if (ret < 0)
+ return ret;
+
ret = camss_enable_clocks(ispif->nclocks_for_reset,
ispif->clock_for_reset,
to_device(ispif));
@@ -215,12 +308,15 @@ static int ispif_reset(struct ispif_device *ispif)
msecs_to_jiffies(ISPIF_RESET_TIMEOUT_MS));
if (!time) {
dev_err(to_device(ispif), "ISPIF reset timeout\n");
- return -EIO;
+ ret = -EIO;
}
camss_disable_clocks(ispif->nclocks_for_reset, ispif->clock_for_reset);
- return 0;
+ camss_pm_domain_off(to_camss(ispif), PM_DOMAIN_VFE0);
+ camss_pm_domain_off(to_camss(ispif), PM_DOMAIN_VFE1);
+
+ return ret;
}
/*
@@ -233,7 +329,7 @@ static int ispif_reset(struct ispif_device *ispif)
static int ispif_set_power(struct v4l2_subdev *sd, int on)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
- struct ispif_device *ispif = to_ispif(line);
+ struct ispif_device *ispif = line->ispif;
struct device *dev = to_device(ispif);
int ret = 0;
@@ -246,12 +342,19 @@ static int ispif_set_power(struct v4l2_subdev *sd, int on)
goto exit;
}
- ret = camss_enable_clocks(ispif->nclocks, ispif->clock, dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto exit;
+ ret = camss_enable_clocks(ispif->nclocks, ispif->clock, dev);
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
+ goto exit;
+ }
+
ret = ispif_reset(ispif);
if (ret < 0) {
+ pm_runtime_put_sync(dev);
camss_disable_clocks(ispif->nclocks, ispif->clock);
goto exit;
}
@@ -266,6 +369,7 @@ static int ispif_set_power(struct v4l2_subdev *sd, int on)
goto exit;
} else if (ispif->power_count == 1) {
camss_disable_clocks(ispif->nclocks, ispif->clock);
+ pm_runtime_put_sync(dev);
}
ispif->power_count--;
@@ -578,6 +682,55 @@ static void ispif_config_irq(struct ispif_device *ispif, enum ispif_intf intf,
}
/*
+ * ispif_config_pack - Config packing for PRDI mode
+ * @ispif: ISPIF device
+ * @code: media bus format code
+ * @intf: VFE interface
+ * @cid: desired CID to handle
+ * @vfe: VFE HW module id
+ * @enable: enable or disable
+ */
+static void ispif_config_pack(struct ispif_device *ispif, u32 code,
+ enum ispif_intf intf, u8 cid, u8 vfe, u8 enable)
+{
+ u32 addr, val;
+
+ if (code != MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE &&
+ code != MEDIA_BUS_FMT_Y10_2X8_PADHI_LE)
+ return;
+
+ switch (intf) {
+ case RDI0:
+ if (cid < 8)
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 0);
+ else
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 0);
+ break;
+ case RDI1:
+ if (cid < 8)
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 1);
+ else
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 1);
+ break;
+ case RDI2:
+ if (cid < 8)
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 2);
+ else
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 2);
+ break;
+ default:
+ return;
+ }
+
+ if (enable)
+ val = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0_CID_c_PLAIN(cid);
+ else
+ val = 0;
+
+ writel_relaxed(val, ispif->base + addr);
+}
+
+/*
* ispif_set_intf_cmd - Set command to enable/disable interface
* @ispif: ISPIF device
* @cmd: interface command
@@ -619,7 +772,7 @@ static void ispif_set_intf_cmd(struct ispif_device *ispif, u8 cmd,
static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
- struct ispif_device *ispif = to_ispif(line);
+ struct ispif_device *ispif = line->ispif;
enum ispif_intf intf = line->interface;
u8 csid = line->csid_id;
u8 vfe = line->vfe_id;
@@ -645,6 +798,10 @@ static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
ispif_select_csid(ispif, intf, csid, vfe, 1);
ispif_select_cid(ispif, intf, cid, vfe, 1);
ispif_config_irq(ispif, intf, vfe, 1);
+ if (to_camss(ispif)->version == CAMSS_8x96)
+ ispif_config_pack(ispif,
+ line->fmt[MSM_ISPIF_PAD_SINK].code,
+ intf, cid, vfe, 1);
ispif_set_intf_cmd(ispif, CMD_ENABLE_FRAME_BOUNDARY,
intf, vfe, vc);
} else {
@@ -658,6 +815,10 @@ static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
mutex_lock(&ispif->config_lock);
+ if (to_camss(ispif)->version == CAMSS_8x96)
+ ispif_config_pack(ispif,
+ line->fmt[MSM_ISPIF_PAD_SINK].code,
+ intf, cid, vfe, 0);
ispif_config_irq(ispif, intf, vfe, 0);
ispif_select_cid(ispif, intf, cid, vfe, 0);
ispif_select_csid(ispif, intf, csid, vfe, 0);
@@ -710,12 +871,12 @@ static void ispif_try_format(struct ispif_line *line,
case MSM_ISPIF_PAD_SINK:
/* Set format on sink pad */
- for (i = 0; i < ARRAY_SIZE(ispif_formats); i++)
- if (fmt->code == ispif_formats[i])
+ for (i = 0; i < line->nformats; i++)
+ if (fmt->code == line->formats[i])
break;
/* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(ispif_formats))
+ if (i >= line->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
@@ -753,10 +914,10 @@ static int ispif_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *format;
if (code->pad == MSM_ISPIF_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(ispif_formats))
+ if (code->index >= line->nformats)
return -EINVAL;
- code->code = ispif_formats[code->index];
+ code->code = line->formats[code->index];
} else {
if (code->index > 0)
return -EINVAL;
@@ -907,6 +1068,36 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
int i;
int ret;
+ /* Number of ISPIF lines - same as number of CSID hardware modules */
+ if (to_camss(ispif)->version == CAMSS_8x16)
+ ispif->line_num = 2;
+ else if (to_camss(ispif)->version == CAMSS_8x96)
+ ispif->line_num = 4;
+ else
+ return -EINVAL;
+
+ ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line),
+ GFP_KERNEL);
+ if (!ispif->line)
+ return -ENOMEM;
+
+ for (i = 0; i < ispif->line_num; i++) {
+ ispif->line[i].ispif = ispif;
+ ispif->line[i].id = i;
+
+ if (to_camss(ispif)->version == CAMSS_8x16) {
+ ispif->line[i].formats = ispif_formats_8x16;
+ ispif->line[i].nformats =
+ ARRAY_SIZE(ispif_formats_8x16);
+ } else if (to_camss(ispif)->version == CAMSS_8x96) {
+ ispif->line[i].formats = ispif_formats_8x96;
+ ispif->line[i].nformats =
+ ARRAY_SIZE(ispif_formats_8x96);
+ } else {
+ return -EINVAL;
+ }
+ }
+
/* Memory */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
@@ -935,8 +1126,14 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
ispif->irq = r->start;
snprintf(ispif->irq_name, sizeof(ispif->irq_name), "%s_%s",
dev_name(dev), MSM_ISPIF_NAME);
- ret = devm_request_irq(dev, ispif->irq, ispif_isr,
+ if (to_camss(ispif)->version == CAMSS_8x16)
+ ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x16,
+ IRQF_TRIGGER_RISING, ispif->irq_name, ispif);
+ else if (to_camss(ispif)->version == CAMSS_8x96)
+ ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x96,
IRQF_TRIGGER_RISING, ispif->irq_name, ispif);
+ else
+ ret = -EINVAL;
if (ret < 0) {
dev_err(dev, "request_irq failed: %d\n", ret);
return ret;
@@ -987,9 +1184,6 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
clock->nfreqs = 0;
}
- for (i = 0; i < ARRAY_SIZE(ispif->line); i++)
- ispif->line[i].id = i;
-
mutex_init(&ispif->power_lock);
ispif->power_count = 0;
@@ -1108,7 +1302,7 @@ int msm_ispif_register_entities(struct ispif_device *ispif,
int ret;
int i;
- for (i = 0; i < ARRAY_SIZE(ispif->line); i++) {
+ for (i = 0; i < ispif->line_num; i++) {
struct v4l2_subdev *sd = &ispif->line[i].subdev;
struct media_pad *pads = ispif->line[i].pads;
@@ -1169,7 +1363,7 @@ void msm_ispif_unregister_entities(struct ispif_device *ispif)
mutex_destroy(&ispif->power_lock);
mutex_destroy(&ispif->config_lock);
- for (i = 0; i < ARRAY_SIZE(ispif->line); i++) {
+ for (i = 0; i < ispif->line_num; i++) {
struct v4l2_subdev *sd = &ispif->line[i].subdev;
v4l2_device_unregister_subdev(sd);
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-ispif.h b/drivers/media/platform/qcom/camss/camss-ispif.h
index f668306020c3..1a5ba2425a42 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-ispif.h
+++ b/drivers/media/platform/qcom/camss/camss-ispif.h
@@ -1,19 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* camss-ispif.h
*
* Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module
*
* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#ifndef QC_MSM_CAMSS_ISPIF_H
#define QC_MSM_CAMSS_ISPIF_H
@@ -23,14 +15,11 @@
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
-/* Number of ISPIF lines - same as number of CSID hardware modules */
-#define MSM_ISPIF_LINE_NUM 2
-
#define MSM_ISPIF_PAD_SINK 0
#define MSM_ISPIF_PAD_SRC 1
#define MSM_ISPIF_PADS_NUM 2
-#define MSM_ISPIF_VFE_NUM 1
+#define MSM_ISPIF_VFE_NUM 2
enum ispif_intf {
PIX0,
@@ -46,6 +35,7 @@ struct ispif_intf_cmd_reg {
};
struct ispif_line {
+ struct ispif_device *ispif;
u8 id;
u8 csid_id;
u8 vfe_id;
@@ -53,6 +43,8 @@ struct ispif_line {
struct v4l2_subdev subdev;
struct media_pad pads[MSM_ISPIF_PADS_NUM];
struct v4l2_mbus_framefmt fmt[MSM_ISPIF_PADS_NUM];
+ const u32 *formats;
+ unsigned int nformats;
};
struct ispif_device {
@@ -69,7 +61,8 @@ struct ispif_device {
struct mutex power_lock;
struct ispif_intf_cmd_reg intf_cmd[MSM_ISPIF_VFE_NUM];
struct mutex config_lock;
- struct ispif_line line[MSM_ISPIF_LINE_NUM];
+ unsigned int line_num;
+ struct ispif_line *line;
};
struct resources_ispif;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
new file mode 100644
index 000000000000..da3a9fed9f2d
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -0,0 +1,1018 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-vfe-4-1.c
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v4.1
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+
+#include "camss-vfe.h"
+
+#define VFE_0_HW_VERSION 0x000
+
+#define VFE_0_GLOBAL_RESET_CMD 0x00c
+#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
+#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
+#define VFE_0_GLOBAL_RESET_CMD_BUS BIT(2)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG BIT(3)
+#define VFE_0_GLOBAL_RESET_CMD_REGISTER BIT(4)
+#define VFE_0_GLOBAL_RESET_CMD_TIMER BIT(5)
+#define VFE_0_GLOBAL_RESET_CMD_PM BIT(6)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR BIT(7)
+#define VFE_0_GLOBAL_RESET_CMD_TESTGEN BIT(8)
+
+#define VFE_0_MODULE_CFG 0x018
+#define VFE_0_MODULE_CFG_DEMUX BIT(2)
+#define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE BIT(3)
+#define VFE_0_MODULE_CFG_SCALE_ENC BIT(23)
+#define VFE_0_MODULE_CFG_CROP_ENC BIT(27)
+
+#define VFE_0_CORE_CFG 0x01c
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
+
+#define VFE_0_IRQ_CMD 0x024
+#define VFE_0_IRQ_CMD_GLOBAL_CLEAR BIT(0)
+
+#define VFE_0_IRQ_MASK_0 0x028
+#define VFE_0_IRQ_MASK_0_CAMIF_SOF BIT(0)
+#define VFE_0_IRQ_MASK_0_CAMIF_EOF BIT(1)
+#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
+#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
+ ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
+#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
+#define VFE_0_IRQ_MASK_0_RESET_ACK BIT(31)
+#define VFE_0_IRQ_MASK_1 0x02c
+#define VFE_0_IRQ_MASK_1_CAMIF_ERROR BIT(0)
+#define VFE_0_IRQ_MASK_1_VIOLATION BIT(7)
+#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK BIT(8)
+#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9)
+#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29)
+
+#define VFE_0_IRQ_CLEAR_0 0x030
+#define VFE_0_IRQ_CLEAR_1 0x034
+
+#define VFE_0_IRQ_STATUS_0 0x038
+#define VFE_0_IRQ_STATUS_0_CAMIF_SOF BIT(0)
+#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
+#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
+ ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
+#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
+#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
+#define VFE_0_IRQ_STATUS_0_RESET_ACK BIT(31)
+#define VFE_0_IRQ_STATUS_1 0x03c
+#define VFE_0_IRQ_STATUS_1_VIOLATION BIT(7)
+#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK BIT(8)
+#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29)
+
+#define VFE_0_IRQ_COMPOSITE_MASK_0 0x40
+#define VFE_0_VIOLATION_STATUS 0x48
+
+#define VFE_0_BUS_CMD 0x4c
+#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) BIT(x)
+
+#define VFE_0_BUS_CFG 0x050
+
+#define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2))
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN BIT(1)
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7
+
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1f << 2)
+
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
+ (0x088 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
+ (0x08c + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
+
+#define VFE_0_BUS_PING_PONG_STATUS 0x268
+
+#define VFE_0_BUS_BDG_CMD 0x2c0
+#define VFE_0_BUS_BDG_CMD_HALT_REQ 1
+
+#define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4
+#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5
+#define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8
+#define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc
+#define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0
+#define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4
+#define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8
+#define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc
+#define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0
+#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5
+
+#define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x)))
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
+#define VFE_0_RDI_CFG_x_RDI_EN_BIT BIT(2)
+#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
+#define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) BIT(16 + (r))
+
+#define VFE_0_CAMIF_CMD 0x2f4
+#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
+#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
+#define VFE_0_CAMIF_CMD_NO_CHANGE 3
+#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS BIT(2)
+#define VFE_0_CAMIF_CFG 0x2f8
+#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN BIT(6)
+#define VFE_0_CAMIF_FRAME_CFG 0x300
+#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304
+#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308
+#define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c
+#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314
+#define VFE_0_CAMIF_STATUS 0x31c
+#define VFE_0_CAMIF_STATUS_HALT BIT(31)
+
+#define VFE_0_REG_UPDATE 0x378
+#define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n))
+#define VFE_0_REG_UPDATE_line_n(n) \
+ ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
+
+#define VFE_0_DEMUX_CFG 0x424
+#define VFE_0_DEMUX_CFG_PERIOD 0x3
+#define VFE_0_DEMUX_GAIN_0 0x428
+#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
+#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
+#define VFE_0_DEMUX_GAIN_1 0x42c
+#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
+#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
+#define VFE_0_DEMUX_EVEN_CFG 0x438
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
+#define VFE_0_DEMUX_ODD_CFG 0x43c
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
+
+#define VFE_0_SCALE_ENC_Y_CFG 0x75c
+#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760
+#define VFE_0_SCALE_ENC_Y_H_PHASE 0x764
+#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c
+#define VFE_0_SCALE_ENC_Y_V_PHASE 0x770
+#define VFE_0_SCALE_ENC_CBCR_CFG 0x778
+#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c
+#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780
+#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790
+#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794
+
+#define VFE_0_CROP_ENC_Y_WIDTH 0x854
+#define VFE_0_CROP_ENC_Y_HEIGHT 0x858
+#define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c
+#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860
+
+#define VFE_0_CLAMP_ENC_MAX_CFG 0x874
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
+#define VFE_0_CLAMP_ENC_MIN_CFG 0x878
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
+
+#define VFE_0_CGC_OVERRIDE_1 0x974
+#define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) BIT(x)
+
+#define CAMIF_TIMEOUT_SLEEP_US 1000
+#define CAMIF_TIMEOUT_ALL_US 1000000
+
+#define MSM_VFE_VFE0_UB_SIZE 1023
+#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
+
+static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+{
+ u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
+
+ dev_dbg(dev, "VFE HW Version = 0x%08x\n", hw_version);
+}
+
+static u16 vfe_get_ub_size(u8 vfe_id)
+{
+ if (vfe_id == 0)
+ return MSM_VFE_VFE0_UB_SIZE_RDI;
+
+ return 0;
+}
+
+static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
+{
+ u32 bits = readl_relaxed(vfe->base + reg);
+
+ writel_relaxed(bits & ~clr_bits, vfe->base + reg);
+}
+
+static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
+{
+ u32 bits = readl_relaxed(vfe->base + reg);
+
+ writel_relaxed(bits | set_bits, vfe->base + reg);
+}
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN |
+ VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
+ VFE_0_GLOBAL_RESET_CMD_PM |
+ VFE_0_GLOBAL_RESET_CMD_TIMER |
+ VFE_0_GLOBAL_RESET_CMD_REGISTER |
+ VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
+ VFE_0_GLOBAL_RESET_CMD_BUS |
+ VFE_0_GLOBAL_RESET_CMD_CAMIF |
+ VFE_0_GLOBAL_RESET_CMD_CORE;
+
+ writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
+}
+
+static void vfe_halt_request(struct vfe_device *vfe)
+{
+ writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
+ vfe->base + VFE_0_BUS_BDG_CMD);
+}
+
+static void vfe_halt_clear(struct vfe_device *vfe)
+{
+ writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
+}
+
+static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+ else
+ vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+}
+
+static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
+ else
+ vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
+}
+
+#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
+
+static int vfe_word_per_line(u32 format, u32 pixel_per_line)
+{
+ int val = 0;
+
+ switch (format) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ val = CALC_WORD(pixel_per_line, 1, 8);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ val = CALC_WORD(pixel_per_line, 2, 8);
+ break;
+ }
+
+ return val;
+}
+
+static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
+ u16 *width, u16 *height, u16 *bytesperline)
+{
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+ if (plane == 1)
+ *height /= 2;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+ break;
+ }
+}
+
+static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
+ struct v4l2_pix_format_mplane *pix,
+ u8 plane, u32 enable)
+{
+ u32 reg;
+
+ if (enable) {
+ u16 width = 0, height = 0, bytesperline = 0, wpl;
+
+ vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
+
+ wpl = vfe_word_per_line(pix->pixelformat, width);
+
+ reg = height - 1;
+ reg |= ((wpl + 1) / 2 - 1) << 16;
+
+ writel_relaxed(reg, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
+
+ wpl = vfe_word_per_line(pix->pixelformat, bytesperline);
+
+ reg = 0x3;
+ reg |= (height - 1) << 4;
+ reg |= wpl << 16;
+
+ writel_relaxed(reg, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
+ } else {
+ writel_relaxed(0, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
+ writel_relaxed(0, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
+ }
+}
+
+static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
+{
+ u32 reg;
+
+ reg = readl_relaxed(vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+
+ reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
+
+ reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
+ & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
+
+ writel_relaxed(reg,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+}
+
+static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
+ u32 pattern)
+{
+ writel_relaxed(pattern,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
+}
+
+static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm,
+ u16 offset, u16 depth)
+{
+ u32 reg;
+
+ reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
+ depth;
+ writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
+}
+
+static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
+{
+ wmb();
+ writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
+ wmb();
+}
+
+static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
+{
+ writel_relaxed(addr,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
+}
+
+static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
+{
+ writel_relaxed(addr,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
+}
+
+static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
+{
+ u32 reg;
+
+ reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
+
+ return (reg >> wm) & 0x1;
+}
+
+static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
+{
+ if (enable)
+ writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG);
+ else
+ writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
+}
+
+static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
+ VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
+
+ switch (id) {
+ case VFE_LINE_RDI0:
+ default:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ }
+
+ if (wm % 2 == 1)
+ reg <<= 16;
+
+ vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
+}
+
+static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
+{
+ writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
+ vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
+}
+
+static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg);
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
+
+ switch (id) {
+ case VFE_LINE_RDI0:
+ default:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ }
+
+ if (wm % 2 == 1)
+ reg <<= 16;
+
+ vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
+}
+
+static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
+ u8 enable)
+{
+ struct vfe_line *line = container_of(output, struct vfe_line, output);
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ unsigned int i;
+
+ for (i = 0; i < output->wm_num; i++) {
+ if (i == 0) {
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ } else if (i == 1) {
+ reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
+ reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+ } else {
+ /* On current devices output->wm_num is always <= 2 */
+ break;
+ }
+
+ if (output->wm_idx[i] % 2 == 1)
+ reg <<= 16;
+
+ if (enable)
+ vfe_reg_set(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
+ reg);
+ else
+ vfe_reg_clr(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
+ reg);
+ }
+}
+
+static void vfe_set_realign_cfg(struct vfe_device *vfe, struct vfe_line *line,
+ u8 enable)
+{
+ /* empty */
+}
+static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
+{
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
+ VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
+
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
+ cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
+ wmb();
+ writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
+ wmb();
+}
+
+static inline void vfe_reg_update_clear(struct vfe_device *vfe,
+ enum vfe_line_id line_id)
+{
+ vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
+}
+
+static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id line_id, u8 enable)
+{
+ u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
+ VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
+ u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
+ VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ }
+}
+
+static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
+ enum vfe_line_id line_id, u8 enable)
+{
+ struct vfe_output *output = &vfe->line[line_id].output;
+ unsigned int i;
+ u32 irq_en0;
+ u32 irq_en1;
+ u32 comp_mask = 0;
+
+ irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
+ irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
+ irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
+ irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
+ irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
+ for (i = 0; i < output->wm_num; i++) {
+ irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
+ output->wm_idx[i]);
+ comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
+ }
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
+ }
+}
+
+static void vfe_enable_irq_common(struct vfe_device *vfe)
+{
+ u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
+ u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
+ VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
+
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+}
+
+static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 val, even_cfg, odd_cfg;
+
+ writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
+
+ val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
+ writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
+
+ val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
+ writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
+
+ switch (line->fmt[MSM_VFE_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
+ break;
+ }
+
+ writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
+ writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
+}
+
+static inline u8 vfe_calc_interp_reso(u16 input, u16 output)
+{
+ if (input / output >= 16)
+ return 0;
+
+ if (input / output >= 8)
+ return 1;
+
+ if (input / output >= 4)
+ return 2;
+
+ return 3;
+}
+
+static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ u16 input, output;
+ u8 interp_reso;
+ u32 phase_mult;
+
+ writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].width;
+ output = line->compose.width;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (13 + interp_reso)) / output;
+ reg = (interp_reso << 20) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].height;
+ output = line->compose.height;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (13 + interp_reso)) / output;
+ reg = (interp_reso << 20) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
+
+ writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].width;
+ output = line->compose.width / 2;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (13 + interp_reso)) / output;
+ reg = (interp_reso << 20) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].height;
+ output = line->compose.height;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
+ output = line->compose.height / 2;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (13 + interp_reso)) / output;
+ reg = (interp_reso << 20) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
+}
+
+static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ u16 first, last;
+
+ first = line->crop.left;
+ last = line->crop.left + line->crop.width - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
+
+ first = line->crop.top;
+ last = line->crop.top + line->crop.height - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
+
+ first = line->crop.left / 2;
+ last = line->crop.left / 2 + line->crop.width / 2 - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
+
+ first = line->crop.top;
+ last = line->crop.top + line->crop.height - 1;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
+ first = line->crop.top / 2;
+ last = line->crop.top / 2 + line->crop.height / 2 - 1;
+ }
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
+}
+
+static void vfe_set_clamp_cfg(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
+ VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
+ VFE_0_CLAMP_ENC_MAX_CFG_CH2;
+
+ writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
+
+ val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
+ VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
+ VFE_0_CLAMP_ENC_MIN_CFG_CH2;
+
+ writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
+}
+
+static void vfe_set_qos(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
+ u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
+
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
+ writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
+}
+
+static void vfe_set_ds(struct vfe_device *vfe)
+{
+ /* empty */
+}
+
+static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm);
+
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val);
+ else
+ vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val);
+
+ wmb();
+}
+
+static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 val;
+
+ switch (line->fmt[MSM_VFE_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
+ break;
+ }
+
+ writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].width * 2;
+ val |= line->fmt[MSM_VFE_PAD_SINK].height << 16;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
+
+ val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
+
+ val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
+}
+
+static void vfe_set_camif_cmd(struct vfe_device *vfe, u8 enable)
+{
+ u32 cmd;
+
+ cmd = VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS | VFE_0_CAMIF_CMD_NO_CHANGE;
+ writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
+ wmb();
+
+ if (enable)
+ cmd = VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY;
+ else
+ cmd = VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY;
+
+ writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
+}
+
+static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
+{
+ u32 val = VFE_0_MODULE_CFG_DEMUX |
+ VFE_0_MODULE_CFG_CHROMA_UPSAMPLE |
+ VFE_0_MODULE_CFG_SCALE_ENC |
+ VFE_0_MODULE_CFG_CROP_ENC;
+
+ if (enable)
+ writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG);
+ else
+ writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG);
+}
+
+static int vfe_camif_wait_for_stop(struct vfe_device *vfe, struct device *dev)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
+ val,
+ (val & VFE_0_CAMIF_STATUS_HALT),
+ CAMIF_TIMEOUT_SLEEP_US,
+ CAMIF_TIMEOUT_ALL_US);
+ if (ret < 0)
+ dev_err(dev, "%s: camif stop timeout\n", __func__);
+
+ return ret;
+}
+
+static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
+{
+ *value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
+ *value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
+
+ writel_relaxed(*value0, vfe->base + VFE_0_IRQ_CLEAR_0);
+ writel_relaxed(*value1, vfe->base + VFE_0_IRQ_CLEAR_1);
+
+ wmb();
+ writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
+}
+
+static void vfe_violation_read(struct vfe_device *vfe)
+{
+ u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
+
+ pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
+}
+
+/*
+ * vfe_isr - ISPIF module interrupt handler
+ * @irq: Interrupt line
+ * @dev: VFE device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ struct vfe_device *vfe = dev;
+ u32 value0, value1;
+ int i, j;
+
+ vfe->ops->isr_read(vfe, &value0, &value1);
+
+ trace_printk("VFE: status0 = 0x%08x, status1 = 0x%08x\n",
+ value0, value1);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
+ vfe->isr_ops.reset_ack(vfe);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION)
+ vfe->ops->violation_read(vfe);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK)
+ vfe->isr_ops.halt_ack(vfe);
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
+ vfe->isr_ops.reg_update(vfe, i);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
+ vfe->isr_ops.sof(vfe, VFE_LINE_PIX);
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
+ if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
+ vfe->isr_ops.sof(vfe, i);
+
+ for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
+ vfe->isr_ops.comp_done(vfe, i);
+ for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
+ if (vfe->wm_output_map[j] == VFE_LINE_PIX)
+ value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
+ }
+
+ for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
+ vfe->isr_ops.wm_done(vfe, i);
+
+ return IRQ_HANDLED;
+}
+
+const struct vfe_hw_ops vfe_ops_4_1 = {
+ .hw_version_read = vfe_hw_version_read,
+ .get_ub_size = vfe_get_ub_size,
+ .global_reset = vfe_global_reset,
+ .halt_request = vfe_halt_request,
+ .halt_clear = vfe_halt_clear,
+ .wm_enable = vfe_wm_enable,
+ .wm_frame_based = vfe_wm_frame_based,
+ .wm_line_based = vfe_wm_line_based,
+ .wm_set_framedrop_period = vfe_wm_set_framedrop_period,
+ .wm_set_framedrop_pattern = vfe_wm_set_framedrop_pattern,
+ .wm_set_ub_cfg = vfe_wm_set_ub_cfg,
+ .bus_reload_wm = vfe_bus_reload_wm,
+ .wm_set_ping_addr = vfe_wm_set_ping_addr,
+ .wm_set_pong_addr = vfe_wm_set_pong_addr,
+ .wm_get_ping_pong_status = vfe_wm_get_ping_pong_status,
+ .bus_enable_wr_if = vfe_bus_enable_wr_if,
+ .bus_connect_wm_to_rdi = vfe_bus_connect_wm_to_rdi,
+ .wm_set_subsample = vfe_wm_set_subsample,
+ .bus_disconnect_wm_from_rdi = vfe_bus_disconnect_wm_from_rdi,
+ .set_xbar_cfg = vfe_set_xbar_cfg,
+ .set_realign_cfg = vfe_set_realign_cfg,
+ .set_rdi_cid = vfe_set_rdi_cid,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
+ .enable_irq_wm_line = vfe_enable_irq_wm_line,
+ .enable_irq_pix_line = vfe_enable_irq_pix_line,
+ .enable_irq_common = vfe_enable_irq_common,
+ .set_demux_cfg = vfe_set_demux_cfg,
+ .set_scale_cfg = vfe_set_scale_cfg,
+ .set_crop_cfg = vfe_set_crop_cfg,
+ .set_clamp_cfg = vfe_set_clamp_cfg,
+ .set_qos = vfe_set_qos,
+ .set_ds = vfe_set_ds,
+ .set_cgc_override = vfe_set_cgc_override,
+ .set_camif_cfg = vfe_set_camif_cfg,
+ .set_camif_cmd = vfe_set_camif_cmd,
+ .set_module_cfg = vfe_set_module_cfg,
+ .camif_wait_for_stop = vfe_camif_wait_for_stop,
+ .isr_read = vfe_isr_read,
+ .violation_read = vfe_violation_read,
+ .isr = vfe_isr,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
new file mode 100644
index 000000000000..4c584bffd179
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -0,0 +1,1140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-vfe-4-7.c
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v4.7
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+
+#include "camss-vfe.h"
+
+#define VFE_0_HW_VERSION 0x000
+
+#define VFE_0_GLOBAL_RESET_CMD 0x018
+#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
+#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
+#define VFE_0_GLOBAL_RESET_CMD_BUS BIT(2)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG BIT(3)
+#define VFE_0_GLOBAL_RESET_CMD_REGISTER BIT(4)
+#define VFE_0_GLOBAL_RESET_CMD_PM BIT(5)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR BIT(6)
+#define VFE_0_GLOBAL_RESET_CMD_TESTGEN BIT(7)
+#define VFE_0_GLOBAL_RESET_CMD_DSP BIT(8)
+#define VFE_0_GLOBAL_RESET_CMD_IDLE_CGC BIT(9)
+
+#define VFE_0_MODULE_LENS_EN 0x040
+#define VFE_0_MODULE_LENS_EN_DEMUX BIT(2)
+#define VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE BIT(3)
+
+#define VFE_0_MODULE_ZOOM_EN 0x04c
+#define VFE_0_MODULE_ZOOM_EN_SCALE_ENC BIT(1)
+#define VFE_0_MODULE_ZOOM_EN_CROP_ENC BIT(2)
+#define VFE_0_MODULE_ZOOM_EN_REALIGN_BUF BIT(9)
+
+#define VFE_0_CORE_CFG 0x050
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
+#define VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN BIT(4)
+
+#define VFE_0_IRQ_CMD 0x058
+#define VFE_0_IRQ_CMD_GLOBAL_CLEAR BIT(0)
+
+#define VFE_0_IRQ_MASK_0 0x05c
+#define VFE_0_IRQ_MASK_0_CAMIF_SOF BIT(0)
+#define VFE_0_IRQ_MASK_0_CAMIF_EOF BIT(1)
+#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
+#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
+ ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
+#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
+#define VFE_0_IRQ_MASK_0_RESET_ACK BIT(31)
+#define VFE_0_IRQ_MASK_1 0x060
+#define VFE_0_IRQ_MASK_1_CAMIF_ERROR BIT(0)
+#define VFE_0_IRQ_MASK_1_VIOLATION BIT(7)
+#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK BIT(8)
+#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9)
+#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29)
+
+#define VFE_0_IRQ_CLEAR_0 0x064
+#define VFE_0_IRQ_CLEAR_1 0x068
+
+#define VFE_0_IRQ_STATUS_0 0x06c
+#define VFE_0_IRQ_STATUS_0_CAMIF_SOF BIT(0)
+#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
+#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
+ ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
+#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
+#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
+#define VFE_0_IRQ_STATUS_0_RESET_ACK BIT(31)
+#define VFE_0_IRQ_STATUS_1 0x070
+#define VFE_0_IRQ_STATUS_1_VIOLATION BIT(7)
+#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK BIT(8)
+#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29)
+
+#define VFE_0_IRQ_COMPOSITE_MASK_0 0x074
+#define VFE_0_VIOLATION_STATUS 0x07c
+
+#define VFE_0_BUS_CMD 0x80
+#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) BIT(x)
+
+#define VFE_0_BUS_CFG 0x084
+
+#define VFE_0_BUS_XBAR_CFG_x(x) (0x90 + 0x4 * ((x) / 2))
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN BIT(2)
+#define VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN BIT(3)
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTRA (0x1 << 4)
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER (0x2 << 4)
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0x0
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 0xc
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 0xd
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 0xe
+
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x0a0 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x0a4 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x0ac + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x0b4 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT 1
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1f << 2)
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x0b8 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x0bc + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x0c0 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
+ (0x0c4 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
+ (0x0c8 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
+
+#define VFE_0_BUS_PING_PONG_STATUS 0x338
+
+#define VFE_0_BUS_BDG_CMD 0x400
+#define VFE_0_BUS_BDG_CMD_HALT_REQ 1
+
+#define VFE_0_BUS_BDG_QOS_CFG_0 0x404
+#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa9aaa9
+#define VFE_0_BUS_BDG_QOS_CFG_1 0x408
+#define VFE_0_BUS_BDG_QOS_CFG_2 0x40c
+#define VFE_0_BUS_BDG_QOS_CFG_3 0x410
+#define VFE_0_BUS_BDG_QOS_CFG_4 0x414
+#define VFE_0_BUS_BDG_QOS_CFG_5 0x418
+#define VFE_0_BUS_BDG_QOS_CFG_6 0x41c
+#define VFE_0_BUS_BDG_QOS_CFG_7 0x420
+#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa9
+
+#define VFE_0_BUS_BDG_DS_CFG_0 0x424
+#define VFE_0_BUS_BDG_DS_CFG_0_CFG 0xcccc0011
+#define VFE_0_BUS_BDG_DS_CFG_1 0x428
+#define VFE_0_BUS_BDG_DS_CFG_2 0x42c
+#define VFE_0_BUS_BDG_DS_CFG_3 0x430
+#define VFE_0_BUS_BDG_DS_CFG_4 0x434
+#define VFE_0_BUS_BDG_DS_CFG_5 0x438
+#define VFE_0_BUS_BDG_DS_CFG_6 0x43c
+#define VFE_0_BUS_BDG_DS_CFG_7 0x440
+#define VFE_0_BUS_BDG_DS_CFG_8 0x444
+#define VFE_0_BUS_BDG_DS_CFG_9 0x448
+#define VFE_0_BUS_BDG_DS_CFG_10 0x44c
+#define VFE_0_BUS_BDG_DS_CFG_11 0x450
+#define VFE_0_BUS_BDG_DS_CFG_12 0x454
+#define VFE_0_BUS_BDG_DS_CFG_13 0x458
+#define VFE_0_BUS_BDG_DS_CFG_14 0x45c
+#define VFE_0_BUS_BDG_DS_CFG_15 0x460
+#define VFE_0_BUS_BDG_DS_CFG_16 0x464
+#define VFE_0_BUS_BDG_DS_CFG_16_CFG 0x40000103
+
+#define VFE_0_RDI_CFG_x(x) (0x46c + (0x4 * (x)))
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
+#define VFE_0_RDI_CFG_x_RDI_EN_BIT BIT(2)
+#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
+
+#define VFE_0_CAMIF_CMD 0x478
+#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
+#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
+#define VFE_0_CAMIF_CMD_NO_CHANGE 3
+#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS BIT(2)
+#define VFE_0_CAMIF_CFG 0x47c
+#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN BIT(6)
+#define VFE_0_CAMIF_FRAME_CFG 0x484
+#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x488
+#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x48c
+#define VFE_0_CAMIF_SUBSAMPLE_CFG 0x490
+#define VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN 0x498
+#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x49c
+#define VFE_0_CAMIF_STATUS 0x4a4
+#define VFE_0_CAMIF_STATUS_HALT BIT(31)
+
+#define VFE_0_REG_UPDATE 0x4ac
+#define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n))
+#define VFE_0_REG_UPDATE_line_n(n) \
+ ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
+
+#define VFE_0_DEMUX_CFG 0x560
+#define VFE_0_DEMUX_CFG_PERIOD 0x3
+#define VFE_0_DEMUX_GAIN_0 0x564
+#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
+#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
+#define VFE_0_DEMUX_GAIN_1 0x568
+#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
+#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
+#define VFE_0_DEMUX_EVEN_CFG 0x574
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
+#define VFE_0_DEMUX_ODD_CFG 0x578
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
+
+#define VFE_0_SCALE_ENC_Y_CFG 0x91c
+#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x920
+#define VFE_0_SCALE_ENC_Y_H_PHASE 0x924
+#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x934
+#define VFE_0_SCALE_ENC_Y_V_PHASE 0x938
+#define VFE_0_SCALE_ENC_CBCR_CFG 0x948
+#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x94c
+#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x950
+#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x960
+#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x964
+
+#define VFE_0_CROP_ENC_Y_WIDTH 0x974
+#define VFE_0_CROP_ENC_Y_HEIGHT 0x978
+#define VFE_0_CROP_ENC_CBCR_WIDTH 0x97c
+#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x980
+
+#define VFE_0_CLAMP_ENC_MAX_CFG 0x984
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
+#define VFE_0_CLAMP_ENC_MIN_CFG 0x988
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
+
+#define VFE_0_REALIGN_BUF_CFG 0xaac
+#define VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL BIT(2)
+#define VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL BIT(3)
+#define VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE BIT(4)
+
+#define CAMIF_TIMEOUT_SLEEP_US 1000
+#define CAMIF_TIMEOUT_ALL_US 1000000
+
+#define MSM_VFE_VFE0_UB_SIZE 2047
+#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
+#define MSM_VFE_VFE1_UB_SIZE 1535
+#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
+
+static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+{
+ u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
+
+ dev_err(dev, "VFE HW Version = 0x%08x\n", hw_version);
+}
+
+static u16 vfe_get_ub_size(u8 vfe_id)
+{
+ if (vfe_id == 0)
+ return MSM_VFE_VFE0_UB_SIZE_RDI;
+ else if (vfe_id == 1)
+ return MSM_VFE_VFE1_UB_SIZE_RDI;
+
+ return 0;
+}
+
+static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
+{
+ u32 bits = readl_relaxed(vfe->base + reg);
+
+ writel_relaxed(bits & ~clr_bits, vfe->base + reg);
+}
+
+static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
+{
+ u32 bits = readl_relaxed(vfe->base + reg);
+
+ writel_relaxed(bits | set_bits, vfe->base + reg);
+}
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_IDLE_CGC |
+ VFE_0_GLOBAL_RESET_CMD_DSP |
+ VFE_0_GLOBAL_RESET_CMD_TESTGEN |
+ VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
+ VFE_0_GLOBAL_RESET_CMD_PM |
+ VFE_0_GLOBAL_RESET_CMD_REGISTER |
+ VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
+ VFE_0_GLOBAL_RESET_CMD_BUS |
+ VFE_0_GLOBAL_RESET_CMD_CAMIF |
+ VFE_0_GLOBAL_RESET_CMD_CORE;
+
+ writel_relaxed(BIT(31), vfe->base + VFE_0_IRQ_MASK_0);
+ wmb();
+ writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
+}
+
+static void vfe_halt_request(struct vfe_device *vfe)
+{
+ writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
+ vfe->base + VFE_0_BUS_BDG_CMD);
+}
+
+static void vfe_halt_clear(struct vfe_device *vfe)
+{
+ writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
+}
+
+static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+ else
+ vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+}
+
+static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT);
+ else
+ vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT);
+}
+
+#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
+
+static int vfe_word_per_line_by_pixel(u32 format, u32 pixel_per_line)
+{
+ int val = 0;
+
+ switch (format) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ val = CALC_WORD(pixel_per_line, 1, 8);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ val = CALC_WORD(pixel_per_line, 2, 8);
+ break;
+ }
+
+ return val;
+}
+
+static int vfe_word_per_line_by_bytes(u32 bytes_per_line)
+{
+ return CALC_WORD(bytes_per_line, 1, 8);
+}
+
+static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
+ u16 *width, u16 *height, u16 *bytesperline)
+{
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+ if (plane == 1)
+ *height /= 2;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_UYVY:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[plane].bytesperline;
+ break;
+
+ }
+}
+
+static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
+ struct v4l2_pix_format_mplane *pix,
+ u8 plane, u32 enable)
+{
+ u32 reg;
+
+ if (enable) {
+ u16 width = 0, height = 0, bytesperline = 0, wpl;
+
+ vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
+
+ wpl = vfe_word_per_line_by_pixel(pix->pixelformat, width);
+
+ reg = height - 1;
+ reg |= ((wpl + 3) / 4 - 1) << 16;
+
+ writel_relaxed(reg, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
+
+ wpl = vfe_word_per_line_by_bytes(bytesperline);
+
+ reg = 0x3;
+ reg |= (height - 1) << 2;
+ reg |= ((wpl + 1) / 2) << 16;
+
+ writel_relaxed(reg, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
+ } else {
+ writel_relaxed(0, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
+ writel_relaxed(0, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
+ }
+}
+
+static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
+{
+ u32 reg;
+
+ reg = readl_relaxed(vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+
+ reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
+
+ reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
+ & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
+
+ writel_relaxed(reg,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+}
+
+static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
+ u32 pattern)
+{
+ writel_relaxed(pattern,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
+}
+
+static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm,
+ u16 offset, u16 depth)
+{
+ u32 reg;
+
+ reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
+ depth;
+ writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
+}
+
+static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
+{
+ wmb();
+ writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
+ wmb();
+}
+
+static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
+{
+ writel_relaxed(addr,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
+}
+
+static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
+{
+ writel_relaxed(addr,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
+}
+
+static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
+{
+ u32 reg;
+
+ reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
+
+ return (reg >> wm) & 0x1;
+}
+
+static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
+{
+ if (enable)
+ writel_relaxed(0x101, vfe->base + VFE_0_BUS_CFG);
+ else
+ writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
+}
+
+static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
+ VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
+
+ switch (id) {
+ case VFE_LINE_RDI0:
+ default:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ }
+
+ if (wm % 2 == 1)
+ reg <<= 16;
+
+ vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
+}
+
+static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
+{
+ writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
+ vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
+}
+
+static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
+
+ switch (id) {
+ case VFE_LINE_RDI0:
+ default:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ }
+
+ if (wm % 2 == 1)
+ reg <<= 16;
+
+ vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
+}
+
+static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
+ u8 enable)
+{
+ struct vfe_line *line = container_of(output, struct vfe_line, output);
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+
+ switch (p) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+
+ if (output->wm_idx[0] % 2 == 1)
+ reg <<= 16;
+
+ if (enable)
+ vfe_reg_set(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
+ reg);
+ else
+ vfe_reg_clr(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
+ reg);
+
+ reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
+ reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+
+ if (output->wm_idx[1] % 2 == 1)
+ reg <<= 16;
+
+ if (enable)
+ vfe_reg_set(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]),
+ reg);
+ else
+ vfe_reg_clr(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]),
+ reg);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_UYVY:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN;
+ reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
+
+ if (p == V4L2_PIX_FMT_YUYV || p == V4L2_PIX_FMT_YVYU)
+ reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+
+ if (output->wm_idx[0] % 2 == 1)
+ reg <<= 16;
+
+ if (enable)
+ vfe_reg_set(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
+ reg);
+ else
+ vfe_reg_clr(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
+ reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void vfe_set_realign_cfg(struct vfe_device *vfe, struct vfe_line *line,
+ u8 enable)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 val = VFE_0_MODULE_ZOOM_EN_REALIGN_BUF;
+
+ if (p != V4L2_PIX_FMT_YUYV && p != V4L2_PIX_FMT_YVYU &&
+ p != V4L2_PIX_FMT_VYUY && p != V4L2_PIX_FMT_UYVY)
+ return;
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val);
+ return;
+ }
+
+ val = VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE;
+
+ if (p == V4L2_PIX_FMT_UYVY || p == V4L2_PIX_FMT_YUYV)
+ val |= VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL;
+ else
+ val |= VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL;
+
+ writel_relaxed(val, vfe->base + VFE_0_REALIGN_BUF_CFG);
+}
+
+static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
+{
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
+ VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
+
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
+ cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
+ wmb();
+ writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
+ wmb();
+}
+
+static inline void vfe_reg_update_clear(struct vfe_device *vfe,
+ enum vfe_line_id line_id)
+{
+ vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
+}
+
+static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id line_id, u8 enable)
+{
+ u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
+ VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
+ u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
+ VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ }
+}
+
+static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
+ enum vfe_line_id line_id, u8 enable)
+{
+ struct vfe_output *output = &vfe->line[line_id].output;
+ unsigned int i;
+ u32 irq_en0;
+ u32 irq_en1;
+ u32 comp_mask = 0;
+
+ irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
+ irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
+ irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
+ irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
+ irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
+ for (i = 0; i < output->wm_num; i++) {
+ irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
+ output->wm_idx[i]);
+ comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
+ }
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
+ }
+}
+
+static void vfe_enable_irq_common(struct vfe_device *vfe)
+{
+ u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
+ u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
+ VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
+
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+}
+
+static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 val, even_cfg, odd_cfg;
+
+ writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
+
+ val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
+ writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
+
+ val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
+ writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
+
+ switch (line->fmt[MSM_VFE_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
+ break;
+ }
+
+ writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
+ writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
+}
+
+static inline u8 vfe_calc_interp_reso(u16 input, u16 output)
+{
+ if (input / output >= 16)
+ return 0;
+
+ if (input / output >= 8)
+ return 1;
+
+ if (input / output >= 4)
+ return 2;
+
+ return 3;
+}
+
+static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ u16 input, output;
+ u8 interp_reso;
+ u32 phase_mult;
+
+ writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].width - 1;
+ output = line->compose.width - 1;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (14 + interp_reso)) / output;
+ reg = (interp_reso << 28) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].height - 1;
+ output = line->compose.height - 1;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (14 + interp_reso)) / output;
+ reg = (interp_reso << 28) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
+
+ writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].width - 1;
+ output = line->compose.width / 2 - 1;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (14 + interp_reso)) / output;
+ reg = (interp_reso << 28) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].height - 1;
+ output = line->compose.height - 1;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
+ output = line->compose.height / 2 - 1;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (14 + interp_reso)) / output;
+ reg = (interp_reso << 28) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
+}
+
+static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ u16 first, last;
+
+ first = line->crop.left;
+ last = line->crop.left + line->crop.width - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
+
+ first = line->crop.top;
+ last = line->crop.top + line->crop.height - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
+
+ first = line->crop.left / 2;
+ last = line->crop.left / 2 + line->crop.width / 2 - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
+
+ first = line->crop.top;
+ last = line->crop.top + line->crop.height - 1;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
+ first = line->crop.top / 2;
+ last = line->crop.top / 2 + line->crop.height / 2 - 1;
+ }
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
+}
+
+static void vfe_set_clamp_cfg(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
+ VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
+ VFE_0_CLAMP_ENC_MAX_CFG_CH2;
+
+ writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
+
+ val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
+ VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
+ VFE_0_CLAMP_ENC_MIN_CFG_CH2;
+
+ writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
+}
+
+static void vfe_set_qos(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
+ u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
+
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
+ writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
+}
+
+static void vfe_set_ds(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_BUS_BDG_DS_CFG_0_CFG;
+ u32 val16 = VFE_0_BUS_BDG_DS_CFG_16_CFG;
+
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_0);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_1);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_2);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_3);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_4);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_5);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_6);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_7);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_8);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_9);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_10);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_11);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_12);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_13);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_14);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_15);
+ writel_relaxed(val16, vfe->base + VFE_0_BUS_BDG_DS_CFG_16);
+}
+
+static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ /* empty */
+}
+
+static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 val;
+
+ switch (line->fmt[MSM_VFE_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
+ break;
+ }
+
+ val |= VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN;
+ writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
+ val |= (line->fmt[MSM_VFE_PAD_SINK].height - 1) << 16;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
+
+ val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
+
+ val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
+}
+
+static void vfe_set_camif_cmd(struct vfe_device *vfe, u8 enable)
+{
+ u32 cmd;
+
+ cmd = VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS | VFE_0_CAMIF_CMD_NO_CHANGE;
+ writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
+ wmb();
+
+ if (enable)
+ cmd = VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY;
+ else
+ cmd = VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY;
+
+ writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
+}
+
+static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
+{
+ u32 val_lens = VFE_0_MODULE_LENS_EN_DEMUX |
+ VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE;
+ u32 val_zoom = VFE_0_MODULE_ZOOM_EN_SCALE_ENC |
+ VFE_0_MODULE_ZOOM_EN_CROP_ENC;
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_MODULE_LENS_EN, val_lens);
+ vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_MODULE_LENS_EN, val_lens);
+ vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom);
+ }
+}
+
+static int vfe_camif_wait_for_stop(struct vfe_device *vfe, struct device *dev)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
+ val,
+ (val & VFE_0_CAMIF_STATUS_HALT),
+ CAMIF_TIMEOUT_SLEEP_US,
+ CAMIF_TIMEOUT_ALL_US);
+ if (ret < 0)
+ dev_err(dev, "%s: camif stop timeout\n", __func__);
+
+ return ret;
+}
+
+static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
+{
+ *value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
+ *value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
+
+ writel_relaxed(*value0, vfe->base + VFE_0_IRQ_CLEAR_0);
+ writel_relaxed(*value1, vfe->base + VFE_0_IRQ_CLEAR_1);
+
+ wmb();
+ writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
+}
+
+static void vfe_violation_read(struct vfe_device *vfe)
+{
+ u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
+
+ pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
+}
+
+/*
+ * vfe_isr - ISPIF module interrupt handler
+ * @irq: Interrupt line
+ * @dev: VFE device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ struct vfe_device *vfe = dev;
+ u32 value0, value1;
+ int i, j;
+
+ vfe->ops->isr_read(vfe, &value0, &value1);
+
+ trace_printk("VFE: status0 = 0x%08x, status1 = 0x%08x\n",
+ value0, value1);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
+ vfe->isr_ops.reset_ack(vfe);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION)
+ vfe->ops->violation_read(vfe);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK)
+ vfe->isr_ops.halt_ack(vfe);
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
+ vfe->isr_ops.reg_update(vfe, i);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
+ vfe->isr_ops.sof(vfe, VFE_LINE_PIX);
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
+ if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
+ vfe->isr_ops.sof(vfe, i);
+
+ for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
+ vfe->isr_ops.comp_done(vfe, i);
+ for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
+ if (vfe->wm_output_map[j] == VFE_LINE_PIX)
+ value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
+ }
+
+ for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
+ vfe->isr_ops.wm_done(vfe, i);
+
+ return IRQ_HANDLED;
+}
+
+const struct vfe_hw_ops vfe_ops_4_7 = {
+ .hw_version_read = vfe_hw_version_read,
+ .get_ub_size = vfe_get_ub_size,
+ .global_reset = vfe_global_reset,
+ .halt_request = vfe_halt_request,
+ .halt_clear = vfe_halt_clear,
+ .wm_enable = vfe_wm_enable,
+ .wm_frame_based = vfe_wm_frame_based,
+ .wm_line_based = vfe_wm_line_based,
+ .wm_set_framedrop_period = vfe_wm_set_framedrop_period,
+ .wm_set_framedrop_pattern = vfe_wm_set_framedrop_pattern,
+ .wm_set_ub_cfg = vfe_wm_set_ub_cfg,
+ .bus_reload_wm = vfe_bus_reload_wm,
+ .wm_set_ping_addr = vfe_wm_set_ping_addr,
+ .wm_set_pong_addr = vfe_wm_set_pong_addr,
+ .wm_get_ping_pong_status = vfe_wm_get_ping_pong_status,
+ .bus_enable_wr_if = vfe_bus_enable_wr_if,
+ .bus_connect_wm_to_rdi = vfe_bus_connect_wm_to_rdi,
+ .wm_set_subsample = vfe_wm_set_subsample,
+ .bus_disconnect_wm_from_rdi = vfe_bus_disconnect_wm_from_rdi,
+ .set_xbar_cfg = vfe_set_xbar_cfg,
+ .set_realign_cfg = vfe_set_realign_cfg,
+ .set_rdi_cid = vfe_set_rdi_cid,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
+ .enable_irq_wm_line = vfe_enable_irq_wm_line,
+ .enable_irq_pix_line = vfe_enable_irq_pix_line,
+ .enable_irq_common = vfe_enable_irq_common,
+ .set_demux_cfg = vfe_set_demux_cfg,
+ .set_scale_cfg = vfe_set_scale_cfg,
+ .set_crop_cfg = vfe_set_crop_cfg,
+ .set_clamp_cfg = vfe_set_clamp_cfg,
+ .set_qos = vfe_set_qos,
+ .set_ds = vfe_set_ds,
+ .set_cgc_override = vfe_set_cgc_override,
+ .set_camif_cfg = vfe_set_camif_cfg,
+ .set_camif_cmd = vfe_set_camif_cmd,
+ .set_module_cfg = vfe_set_module_cfg,
+ .camif_wait_for_stop = vfe_camif_wait_for_stop,
+ .isr_read = vfe_isr_read,
+ .violation_read = vfe_violation_read,
+ .isr = vfe_isr,
+};
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index a6329a8a7c4a..ed6a557de65d 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -1,28 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camss-vfe.c
*
* Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
-#include <linux/iopoll.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/spinlock_types.h>
#include <linux/spinlock.h>
#include <media/media-entity.h>
@@ -38,194 +30,7 @@
((const struct vfe_line (*)[]) &(ptr_line[-(ptr_line->id)]))
#define to_vfe(ptr_line) \
- container_of(vfe_line_array(ptr_line), struct vfe_device, ptr_line)
-
-#define VFE_0_HW_VERSION 0x000
-
-#define VFE_0_GLOBAL_RESET_CMD 0x00c
-#define VFE_0_GLOBAL_RESET_CMD_CORE (1 << 0)
-#define VFE_0_GLOBAL_RESET_CMD_CAMIF (1 << 1)
-#define VFE_0_GLOBAL_RESET_CMD_BUS (1 << 2)
-#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG (1 << 3)
-#define VFE_0_GLOBAL_RESET_CMD_REGISTER (1 << 4)
-#define VFE_0_GLOBAL_RESET_CMD_TIMER (1 << 5)
-#define VFE_0_GLOBAL_RESET_CMD_PM (1 << 6)
-#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR (1 << 7)
-#define VFE_0_GLOBAL_RESET_CMD_TESTGEN (1 << 8)
-
-#define VFE_0_MODULE_CFG 0x018
-#define VFE_0_MODULE_CFG_DEMUX (1 << 2)
-#define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE (1 << 3)
-#define VFE_0_MODULE_CFG_SCALE_ENC (1 << 23)
-#define VFE_0_MODULE_CFG_CROP_ENC (1 << 27)
-
-#define VFE_0_CORE_CFG 0x01c
-#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
-#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
-#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
-#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
-
-#define VFE_0_IRQ_CMD 0x024
-#define VFE_0_IRQ_CMD_GLOBAL_CLEAR (1 << 0)
-
-#define VFE_0_IRQ_MASK_0 0x028
-#define VFE_0_IRQ_MASK_0_CAMIF_SOF (1 << 0)
-#define VFE_0_IRQ_MASK_0_CAMIF_EOF (1 << 1)
-#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5))
-#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
- ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
-#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8))
-#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25))
-#define VFE_0_IRQ_MASK_0_RESET_ACK (1 << 31)
-#define VFE_0_IRQ_MASK_1 0x02c
-#define VFE_0_IRQ_MASK_1_CAMIF_ERROR (1 << 0)
-#define VFE_0_IRQ_MASK_1_VIOLATION (1 << 7)
-#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK (1 << 8)
-#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) (1 << ((n) + 9))
-#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) (1 << ((n) + 29))
-
-#define VFE_0_IRQ_CLEAR_0 0x030
-#define VFE_0_IRQ_CLEAR_1 0x034
-
-#define VFE_0_IRQ_STATUS_0 0x038
-#define VFE_0_IRQ_STATUS_0_CAMIF_SOF (1 << 0)
-#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5))
-#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
- ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
-#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8))
-#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25))
-#define VFE_0_IRQ_STATUS_0_RESET_ACK (1 << 31)
-#define VFE_0_IRQ_STATUS_1 0x03c
-#define VFE_0_IRQ_STATUS_1_VIOLATION (1 << 7)
-#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK (1 << 8)
-#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) (1 << ((n) + 29))
-
-#define VFE_0_IRQ_COMPOSITE_MASK_0 0x40
-#define VFE_0_VIOLATION_STATUS 0x48
-
-#define VFE_0_BUS_CMD 0x4c
-#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) (1 << (x))
-
-#define VFE_0_BUS_CFG 0x050
-
-#define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2))
-#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN (1 << 1)
-#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
-#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
-#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0
-#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5
-#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6
-#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7
-
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1F << 2)
-
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
- (0x088 + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
- (0x08c + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
-
-#define VFE_0_BUS_PING_PONG_STATUS 0x268
-
-#define VFE_0_BUS_BDG_CMD 0x2c0
-#define VFE_0_BUS_BDG_CMD_HALT_REQ 1
-
-#define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4
-#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5
-#define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8
-#define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc
-#define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0
-#define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4
-#define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8
-#define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc
-#define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0
-#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5
-
-#define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x)))
-#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
-#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
-#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
-#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
-#define VFE_0_RDI_CFG_x_RDI_EN_BIT (1 << 2)
-#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
-#define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) (1 << (16 + (r)))
-
-#define VFE_0_CAMIF_CMD 0x2f4
-#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
-#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
-#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS (1 << 2)
-#define VFE_0_CAMIF_CFG 0x2f8
-#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN (1 << 6)
-#define VFE_0_CAMIF_FRAME_CFG 0x300
-#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304
-#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308
-#define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c
-#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314
-#define VFE_0_CAMIF_STATUS 0x31c
-#define VFE_0_CAMIF_STATUS_HALT (1 << 31)
-
-#define VFE_0_REG_UPDATE 0x378
-#define VFE_0_REG_UPDATE_RDIn(n) (1 << (1 + (n)))
-#define VFE_0_REG_UPDATE_line_n(n) \
- ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
-
-#define VFE_0_DEMUX_CFG 0x424
-#define VFE_0_DEMUX_CFG_PERIOD 0x3
-#define VFE_0_DEMUX_GAIN_0 0x428
-#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
-#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
-#define VFE_0_DEMUX_GAIN_1 0x42c
-#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
-#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
-#define VFE_0_DEMUX_EVEN_CFG 0x438
-#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
-#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
-#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
-#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
-#define VFE_0_DEMUX_ODD_CFG 0x43c
-#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
-#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
-#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
-#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
-
-#define VFE_0_SCALE_ENC_Y_CFG 0x75c
-#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760
-#define VFE_0_SCALE_ENC_Y_H_PHASE 0x764
-#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c
-#define VFE_0_SCALE_ENC_Y_V_PHASE 0x770
-#define VFE_0_SCALE_ENC_CBCR_CFG 0x778
-#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c
-#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780
-#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790
-#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794
-
-#define VFE_0_CROP_ENC_Y_WIDTH 0x854
-#define VFE_0_CROP_ENC_Y_HEIGHT 0x858
-#define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c
-#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860
-
-#define VFE_0_CLAMP_ENC_MAX_CFG 0x874
-#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
-#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
-#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
-#define VFE_0_CLAMP_ENC_MIN_CFG 0x878
-#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
-#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
-#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
-
-#define VFE_0_CGC_OVERRIDE_1 0x974
-#define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) (1 << (x))
+ container_of(vfe_line_array(ptr_line), struct vfe_device, line)
/* VFE reset timeout */
#define VFE_RESET_TIMEOUT_MS 50
@@ -238,633 +43,230 @@
#define VFE_NEXT_SOF_MS 500
-#define CAMIF_TIMEOUT_SLEEP_US 1000
-#define CAMIF_TIMEOUT_ALL_US 1000000
-
#define SCALER_RATIO_MAX 16
-static const struct {
+struct vfe_format {
u32 code;
u8 bpp;
-} vfe_formats[] = {
- {
- MEDIA_BUS_FMT_UYVY8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_VYUY8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_YUYV8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_YVYU8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SBGGR8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SGBRG8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SGRBG8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SRGGB8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SBGGR10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SGBRG10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SGRBG10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SRGGB10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SBGGR12_1X12,
- 12,
- },
- {
- MEDIA_BUS_FMT_SGBRG12_1X12,
- 12,
- },
- {
- MEDIA_BUS_FMT_SGRBG12_1X12,
- 12,
- },
- {
- MEDIA_BUS_FMT_SRGGB12_1X12,
- 12,
- }
+};
+
+static const struct vfe_format formats_rdi_8x16[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
+ { MEDIA_BUS_FMT_Y10_1X10, 10 },
+};
+
+static const struct vfe_format formats_pix_8x16[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+};
+
+static const struct vfe_format formats_rdi_8x96[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, 16 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, 14 },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
+ { MEDIA_BUS_FMT_Y10_1X10, 10 },
+ { MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, 16 },
+};
+
+static const struct vfe_format formats_pix_8x96[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
};
/*
* vfe_get_bpp - map media bus format to bits per pixel
+ * @formats: supported media bus formats array
+ * @nformats: size of @formats array
* @code: media bus format code
*
* Return number of bits per pixel
*/
-static u8 vfe_get_bpp(u32 code)
+static u8 vfe_get_bpp(const struct vfe_format *formats,
+ unsigned int nformats, u32 code)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(vfe_formats); i++)
- if (code == vfe_formats[i].code)
- return vfe_formats[i].bpp;
+ for (i = 0; i < nformats; i++)
+ if (code == formats[i].code)
+ return formats[i].bpp;
WARN(1, "Unknown format\n");
- return vfe_formats[0].bpp;
-}
-
-static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
-{
- u32 bits = readl_relaxed(vfe->base + reg);
-
- writel_relaxed(bits & ~clr_bits, vfe->base + reg);
-}
-
-static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
-{
- u32 bits = readl_relaxed(vfe->base + reg);
-
- writel_relaxed(bits | set_bits, vfe->base + reg);
-}
-
-static void vfe_global_reset(struct vfe_device *vfe)
-{
- u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN |
- VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
- VFE_0_GLOBAL_RESET_CMD_PM |
- VFE_0_GLOBAL_RESET_CMD_TIMER |
- VFE_0_GLOBAL_RESET_CMD_REGISTER |
- VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
- VFE_0_GLOBAL_RESET_CMD_BUS |
- VFE_0_GLOBAL_RESET_CMD_CAMIF |
- VFE_0_GLOBAL_RESET_CMD_CORE;
-
- writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
-}
-
-static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
-{
- if (enable)
- vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
- 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
- else
- vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
- 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+ return formats[0].bpp;
}
-static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
-{
- if (enable)
- vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
- 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
- else
- vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
- 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
-}
-
-#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
-
-static int vfe_word_per_line(uint32_t format, uint32_t pixel_per_line)
+static u32 vfe_find_code(u32 *code, unsigned int n_code,
+ unsigned int index, u32 req_code)
{
- int val = 0;
-
- switch (format) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- val = CALC_WORD(pixel_per_line, 1, 8);
- break;
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_YVYU:
- case V4L2_PIX_FMT_UYVY:
- case V4L2_PIX_FMT_VYUY:
- val = CALC_WORD(pixel_per_line, 2, 8);
- break;
- }
-
- return val;
-}
-
-static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
- u16 *width, u16 *height, u16 *bytesperline)
-{
- switch (pix->pixelformat) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- *width = pix->width;
- *height = pix->height;
- *bytesperline = pix->plane_fmt[0].bytesperline;
- if (plane == 1)
- *height /= 2;
- break;
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- *width = pix->width;
- *height = pix->height;
- *bytesperline = pix->plane_fmt[0].bytesperline;
- break;
- }
-}
-
-static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
- struct v4l2_pix_format_mplane *pix,
- u8 plane, u32 enable)
-{
- u32 reg;
-
- if (enable) {
- u16 width = 0, height = 0, bytesperline = 0, wpl;
-
- vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
-
- wpl = vfe_word_per_line(pix->pixelformat, width);
-
- reg = height - 1;
- reg |= ((wpl + 1) / 2 - 1) << 16;
-
- writel_relaxed(reg, vfe->base +
- VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
-
- wpl = vfe_word_per_line(pix->pixelformat, bytesperline);
-
- reg = 0x3;
- reg |= (height - 1) << 4;
- reg |= wpl << 16;
-
- writel_relaxed(reg, vfe->base +
- VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
- } else {
- writel_relaxed(0, vfe->base +
- VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
- writel_relaxed(0, vfe->base +
- VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
- }
-}
-
-static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
-{
- u32 reg;
-
- reg = readl_relaxed(vfe->base +
- VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
-
- reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
-
- reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
- & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
-
- writel_relaxed(reg,
- vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
-}
-
-static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
- u32 pattern)
-{
- writel_relaxed(pattern,
- vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
-}
-
-static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm, u16 offset,
- u16 depth)
-{
- u32 reg;
-
- reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
- depth;
- writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
-}
-
-static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
-{
- wmb();
- writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
- wmb();
-}
-
-static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
-{
- writel_relaxed(addr,
- vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
-}
-
-static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
-{
- writel_relaxed(addr,
- vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
-}
-
-static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
-{
- u32 reg;
-
- reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
-
- return (reg >> wm) & 0x1;
-}
-
-static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
-{
- if (enable)
- writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG);
- else
- writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
-}
-
-static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
- enum vfe_line_id id)
-{
- u32 reg;
-
- reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
- reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
- vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
-
- reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
- reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
- VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
- vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
-
- switch (id) {
- case VFE_LINE_RDI0:
- default:
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- break;
- case VFE_LINE_RDI1:
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- break;
- case VFE_LINE_RDI2:
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- break;
- }
-
- if (wm % 2 == 1)
- reg <<= 16;
-
- vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
-}
-
-static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
-{
- writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
- vfe->base +
- VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
-}
-
-static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
- enum vfe_line_id id)
-{
- u32 reg;
-
- reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
- vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg);
-
- reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
- vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
-
- switch (id) {
- case VFE_LINE_RDI0:
- default:
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- break;
- case VFE_LINE_RDI1:
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- break;
- case VFE_LINE_RDI2:
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- break;
- }
-
- if (wm % 2 == 1)
- reg <<= 16;
-
- vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
-}
+ int i;
-static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
- u8 enable)
-{
- struct vfe_line *line = container_of(output, struct vfe_line, output);
- u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
- u32 reg;
- unsigned int i;
+ if (!req_code && (index >= n_code))
+ return 0;
- for (i = 0; i < output->wm_num; i++) {
- if (i == 0) {
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- } else if (i == 1) {
- reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
- if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
- reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+ for (i = 0; i < n_code; i++)
+ if (req_code) {
+ if (req_code == code[i])
+ return req_code;
} else {
- /* On current devices output->wm_num is always <= 2 */
- break;
+ if (i == index)
+ return code[i];
}
- if (output->wm_idx[i] % 2 == 1)
- reg <<= 16;
-
- if (enable)
- vfe_reg_set(vfe,
- VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
- reg);
- else
- vfe_reg_clr(vfe,
- VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
- reg);
- }
-}
-
-static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
-{
- vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
- VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
-
- vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
- cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
+ return code[0];
}
-static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
+ unsigned int index, u32 src_req_code)
{
- vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
- wmb();
- writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
- wmb();
-}
-
-static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
- enum vfe_line_id line_id, u8 enable)
-{
- u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
- VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
- u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
- VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
-
- if (enable) {
- vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
- vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
- } else {
- vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
- vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
- }
-}
-
-static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
- enum vfe_line_id line_id, u8 enable)
-{
- struct vfe_output *output = &vfe->line[line_id].output;
- unsigned int i;
- u32 irq_en0;
- u32 irq_en1;
- u32 comp_mask = 0;
-
- irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
- irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
- irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
- irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
- irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
- for (i = 0; i < output->wm_num; i++) {
- irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
- output->wm_idx[i]);
- comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
- }
-
- if (enable) {
- vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
- vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
- vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
- } else {
- vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
- vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
- vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
- }
-}
-
-static void vfe_enable_irq_common(struct vfe_device *vfe)
-{
- u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
- u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
- VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
-
- vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
- vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
-}
-
-static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
-{
- u32 val, even_cfg, odd_cfg;
-
- writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
-
- val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
- writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
-
- val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
- writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
+ struct vfe_device *vfe = to_vfe(line);
- switch (line->fmt[MSM_VFE_PAD_SINK].code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
- even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
- odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
- break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
- odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
- break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
- default:
- even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
- odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
- break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
- even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
- odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
- break;
- }
+ if (vfe->camss->version == CAMSS_8x16)
+ switch (sink_code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_YVYU8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ default:
+ if (index > 0)
+ return 0;
- writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
- writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
-}
+ return sink_code;
+ }
+ else if (vfe->camss->version == CAMSS_8x96)
+ switch (sink_code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YVYU8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ default:
+ if (index > 0)
+ return 0;
-static inline u8 vfe_calc_interp_reso(u16 input, u16 output)
-{
- if (input / output >= 16)
+ return sink_code;
+ }
+ else
return 0;
-
- if (input / output >= 8)
- return 1;
-
- if (input / output >= 4)
- return 2;
-
- return 3;
-}
-
-static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
-{
- u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
- u32 reg;
- u16 input, output;
- u8 interp_reso;
- u32 phase_mult;
-
- writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
-
- input = line->fmt[MSM_VFE_PAD_SINK].width;
- output = line->compose.width;
- reg = (output << 16) | input;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
-
- interp_reso = vfe_calc_interp_reso(input, output);
- phase_mult = input * (1 << (13 + interp_reso)) / output;
- reg = (interp_reso << 20) | phase_mult;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
-
- input = line->fmt[MSM_VFE_PAD_SINK].height;
- output = line->compose.height;
- reg = (output << 16) | input;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
-
- interp_reso = vfe_calc_interp_reso(input, output);
- phase_mult = input * (1 << (13 + interp_reso)) / output;
- reg = (interp_reso << 20) | phase_mult;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
-
- writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
-
- input = line->fmt[MSM_VFE_PAD_SINK].width;
- output = line->compose.width / 2;
- reg = (output << 16) | input;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
-
- interp_reso = vfe_calc_interp_reso(input, output);
- phase_mult = input * (1 << (13 + interp_reso)) / output;
- reg = (interp_reso << 20) | phase_mult;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
-
- input = line->fmt[MSM_VFE_PAD_SINK].height;
- output = line->compose.height;
- if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
- output = line->compose.height / 2;
- reg = (output << 16) | input;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
-
- interp_reso = vfe_calc_interp_reso(input, output);
- phase_mult = input * (1 << (13 + interp_reso)) / output;
- reg = (interp_reso << 20) | phase_mult;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
-}
-
-static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
-{
- u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
- u32 reg;
- u16 first, last;
-
- first = line->crop.left;
- last = line->crop.left + line->crop.width - 1;
- reg = (first << 16) | last;
- writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
-
- first = line->crop.top;
- last = line->crop.top + line->crop.height - 1;
- reg = (first << 16) | last;
- writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
-
- first = line->crop.left / 2;
- last = line->crop.left / 2 + line->crop.width / 2 - 1;
- reg = (first << 16) | last;
- writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
-
- first = line->crop.top;
- last = line->crop.top + line->crop.height - 1;
- if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
- first = line->crop.top / 2;
- last = line->crop.top / 2 + line->crop.height / 2 - 1;
- }
- reg = (first << 16) | last;
- writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
-}
-
-static void vfe_set_clamp_cfg(struct vfe_device *vfe)
-{
- u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
- VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
- VFE_0_CLAMP_ENC_MAX_CFG_CH2;
-
- writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
-
- val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
- VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
- VFE_0_CLAMP_ENC_MIN_CFG_CH2;
-
- writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
}
/*
@@ -879,12 +281,12 @@ static int vfe_reset(struct vfe_device *vfe)
reinit_completion(&vfe->reset_complete);
- vfe_global_reset(vfe);
+ vfe->ops->global_reset(vfe);
time = wait_for_completion_timeout(&vfe->reset_complete,
msecs_to_jiffies(VFE_RESET_TIMEOUT_MS));
if (!time) {
- dev_err(to_device(vfe), "VFE reset timeout\n");
+ dev_err(vfe->camss->dev, "VFE reset timeout\n");
return -EIO;
}
@@ -903,13 +305,12 @@ static int vfe_halt(struct vfe_device *vfe)
reinit_completion(&vfe->halt_complete);
- writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
- vfe->base + VFE_0_BUS_BDG_CMD);
+ vfe->ops->halt_request(vfe);
time = wait_for_completion_timeout(&vfe->halt_complete,
msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
if (!time) {
- dev_err(to_device(vfe), "VFE halt timeout\n");
+ dev_err(vfe->camss->dev, "VFE halt timeout\n");
return -EIO;
}
@@ -927,10 +328,6 @@ static void vfe_init_outputs(struct vfe_device *vfe)
output->buf[0] = NULL;
output->buf[1] = NULL;
INIT_LIST_HEAD(&output->pending_bufs);
-
- output->wm_num = 1;
- if (vfe->line[i].id == VFE_LINE_PIX)
- output->wm_num = 2;
}
}
@@ -942,115 +339,6 @@ static void vfe_reset_output_maps(struct vfe_device *vfe)
vfe->wm_output_map[i] = VFE_LINE_NONE;
}
-static void vfe_set_qos(struct vfe_device *vfe)
-{
- u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
- u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
-
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
- writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
-}
-
-static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
-{
- u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm);
-
- if (enable)
- vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val);
- else
- vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val);
-
- wmb();
-}
-
-static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
-{
- u32 val = VFE_0_MODULE_CFG_DEMUX |
- VFE_0_MODULE_CFG_CHROMA_UPSAMPLE |
- VFE_0_MODULE_CFG_SCALE_ENC |
- VFE_0_MODULE_CFG_CROP_ENC;
-
- if (enable)
- writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG);
- else
- writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG);
-}
-
-static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
-{
- u32 val;
-
- switch (line->fmt[MSM_VFE_PAD_SINK].code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
- val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
- break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
- break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
- default:
- val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
- break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
- val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
- break;
- }
-
- writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
-
- val = line->fmt[MSM_VFE_PAD_SINK].width * 2;
- val |= line->fmt[MSM_VFE_PAD_SINK].height << 16;
- writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
-
- val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
- writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
-
- val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
- writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
-
- val = 0xffffffff;
- writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0);
-
- val = 0xffffffff;
- writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
-
- val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
- vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
-
- val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
- writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
-}
-
-static void vfe_set_camif_cmd(struct vfe_device *vfe, u32 cmd)
-{
- writel_relaxed(VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS,
- vfe->base + VFE_0_CAMIF_CMD);
-
- writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
-}
-
-static int vfe_camif_wait_for_stop(struct vfe_device *vfe)
-{
- u32 val;
- int ret;
-
- ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
- val,
- (val & VFE_0_CAMIF_STATUS_HALT),
- CAMIF_TIMEOUT_SLEEP_US,
- CAMIF_TIMEOUT_ALL_US);
- if (ret < 0)
- dev_err(to_device(vfe), "%s: camif stop timeout\n", __func__);
-
- return ret;
-}
-
static void vfe_output_init_addrs(struct vfe_device *vfe,
struct vfe_output *output, u8 sync)
{
@@ -1071,10 +359,10 @@ static void vfe_output_init_addrs(struct vfe_device *vfe,
else
pong_addr = ping_addr;
- vfe_wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
- vfe_wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
+ vfe->ops->wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
+ vfe->ops->wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
if (sync)
- vfe_bus_reload_wm(vfe, output->wm_idx[i]);
+ vfe->ops->bus_reload_wm(vfe, output->wm_idx[i]);
}
}
@@ -1090,9 +378,9 @@ static void vfe_output_update_ping_addr(struct vfe_device *vfe,
else
addr = 0;
- vfe_wm_set_ping_addr(vfe, output->wm_idx[i], addr);
+ vfe->ops->wm_set_ping_addr(vfe, output->wm_idx[i], addr);
if (sync)
- vfe_bus_reload_wm(vfe, output->wm_idx[i]);
+ vfe->ops->bus_reload_wm(vfe, output->wm_idx[i]);
}
}
@@ -1108,9 +396,9 @@ static void vfe_output_update_pong_addr(struct vfe_device *vfe,
else
addr = 0;
- vfe_wm_set_pong_addr(vfe, output->wm_idx[i], addr);
+ vfe->ops->wm_set_pong_addr(vfe, output->wm_idx[i], addr);
if (sync)
- vfe_bus_reload_wm(vfe, output->wm_idx[i]);
+ vfe->ops->bus_reload_wm(vfe, output->wm_idx[i]);
}
}
@@ -1154,12 +442,13 @@ static void vfe_output_frame_drop(struct vfe_device *vfe,
drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx;
for (i = 0; i < output->wm_num; i++) {
- vfe_wm_set_framedrop_period(vfe, output->wm_idx[i],
- drop_period);
- vfe_wm_set_framedrop_pattern(vfe, output->wm_idx[i],
- drop_pattern);
+ vfe->ops->wm_set_framedrop_period(vfe, output->wm_idx[i],
+ drop_period);
+ vfe->ops->wm_set_framedrop_pattern(vfe, output->wm_idx[i],
+ drop_pattern);
}
- vfe_reg_update(vfe, container_of(output, struct vfe_line, output)->id);
+ vfe->ops->reg_update(vfe,
+ container_of(output, struct vfe_line, output)->id);
}
static struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output)
@@ -1214,7 +503,7 @@ static void vfe_buf_update_wm_on_next(struct vfe_device *vfe,
break;
case VFE_OUTPUT_SINGLE:
default:
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Next buf in wrong state! %d\n",
output->state);
break;
@@ -1234,7 +523,7 @@ static void vfe_buf_update_wm_on_last(struct vfe_device *vfe,
vfe_output_frame_drop(vfe, output, 0);
break;
default:
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Last buff in wrong state! %d\n",
output->state);
break;
@@ -1263,7 +552,7 @@ static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
output->state = VFE_OUTPUT_CONTINUOUS;
} else {
vfe_buf_add_pending(output, new_buf);
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Inactive buffer is busy\n");
}
break;
@@ -1278,7 +567,7 @@ static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
output->state = VFE_OUTPUT_SINGLE;
} else {
vfe_buf_add_pending(output, new_buf);
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Output idle with buffer set!\n");
}
break;
@@ -1294,6 +583,7 @@ static int vfe_get_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
+ struct v4l2_format *f = &line->video_out.active_fmt;
unsigned long flags;
int i;
int wm_idx;
@@ -1302,17 +592,29 @@ static int vfe_get_output(struct vfe_line *line)
output = &line->output;
if (output->state != VFE_OUTPUT_OFF) {
- dev_err(to_device(vfe), "Output is running\n");
+ dev_err(vfe->camss->dev, "Output is running\n");
goto error;
}
output->state = VFE_OUTPUT_RESERVED;
output->active_buf = 0;
+ switch (f->fmt.pix_mp.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ output->wm_num = 2;
+ break;
+ default:
+ output->wm_num = 1;
+ break;
+ }
+
for (i = 0; i < output->wm_num; i++) {
wm_idx = vfe_reserve_wm(vfe, line->id);
if (wm_idx < 0) {
- dev_err(to_device(vfe), "Can not reserve wm\n");
+ dev_err(vfe->camss->dev, "Can not reserve wm\n");
goto error_get_wm;
}
output->wm_idx[i] = wm_idx;
@@ -1356,27 +658,21 @@ static int vfe_enable_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
+ const struct vfe_hw_ops *ops = vfe->ops;
unsigned long flags;
unsigned int i;
u16 ub_size;
- switch (vfe->id) {
- case 0:
- ub_size = MSM_VFE_VFE0_UB_SIZE_RDI;
- break;
- case 1:
- ub_size = MSM_VFE_VFE1_UB_SIZE_RDI;
- break;
- default:
+ ub_size = ops->get_ub_size(vfe->id);
+ if (!ub_size)
return -EINVAL;
- }
spin_lock_irqsave(&vfe->output_lock, flags);
- vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line->id);
+ ops->reg_update_clear(vfe, line->id);
if (output->state != VFE_OUTPUT_RESERVED) {
- dev_err(to_device(vfe), "Output is not in reserved state %d\n",
+ dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
output->state);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return -EINVAL;
@@ -1418,42 +714,43 @@ static int vfe_enable_output(struct vfe_line *line)
vfe_output_init_addrs(vfe, output, 0);
if (line->id != VFE_LINE_PIX) {
- vfe_set_cgc_override(vfe, output->wm_idx[0], 1);
- vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
- vfe_bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
- vfe_wm_set_subsample(vfe, output->wm_idx[0]);
- vfe_set_rdi_cid(vfe, line->id, 0);
- vfe_wm_set_ub_cfg(vfe, output->wm_idx[0],
- (ub_size + 1) * output->wm_idx[0], ub_size);
- vfe_wm_frame_based(vfe, output->wm_idx[0], 1);
- vfe_wm_enable(vfe, output->wm_idx[0], 1);
- vfe_bus_reload_wm(vfe, output->wm_idx[0]);
+ ops->set_cgc_override(vfe, output->wm_idx[0], 1);
+ ops->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
+ ops->bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
+ ops->wm_set_subsample(vfe, output->wm_idx[0]);
+ ops->set_rdi_cid(vfe, line->id, 0);
+ ops->wm_set_ub_cfg(vfe, output->wm_idx[0],
+ (ub_size + 1) * output->wm_idx[0], ub_size);
+ ops->wm_frame_based(vfe, output->wm_idx[0], 1);
+ ops->wm_enable(vfe, output->wm_idx[0], 1);
+ ops->bus_reload_wm(vfe, output->wm_idx[0]);
} else {
ub_size /= output->wm_num;
for (i = 0; i < output->wm_num; i++) {
- vfe_set_cgc_override(vfe, output->wm_idx[i], 1);
- vfe_wm_set_subsample(vfe, output->wm_idx[i]);
- vfe_wm_set_ub_cfg(vfe, output->wm_idx[i],
- (ub_size + 1) * output->wm_idx[i],
- ub_size);
- vfe_wm_line_based(vfe, output->wm_idx[i],
+ ops->set_cgc_override(vfe, output->wm_idx[i], 1);
+ ops->wm_set_subsample(vfe, output->wm_idx[i]);
+ ops->wm_set_ub_cfg(vfe, output->wm_idx[i],
+ (ub_size + 1) * output->wm_idx[i],
+ ub_size);
+ ops->wm_line_based(vfe, output->wm_idx[i],
&line->video_out.active_fmt.fmt.pix_mp,
i, 1);
- vfe_wm_enable(vfe, output->wm_idx[i], 1);
- vfe_bus_reload_wm(vfe, output->wm_idx[i]);
+ ops->wm_enable(vfe, output->wm_idx[i], 1);
+ ops->bus_reload_wm(vfe, output->wm_idx[i]);
}
- vfe_enable_irq_pix_line(vfe, 0, line->id, 1);
- vfe_set_module_cfg(vfe, 1);
- vfe_set_camif_cfg(vfe, line);
- vfe_set_xbar_cfg(vfe, output, 1);
- vfe_set_demux_cfg(vfe, line);
- vfe_set_scale_cfg(vfe, line);
- vfe_set_crop_cfg(vfe, line);
- vfe_set_clamp_cfg(vfe);
- vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY);
+ ops->enable_irq_pix_line(vfe, 0, line->id, 1);
+ ops->set_module_cfg(vfe, 1);
+ ops->set_camif_cfg(vfe, line);
+ ops->set_realign_cfg(vfe, line, 1);
+ ops->set_xbar_cfg(vfe, output, 1);
+ ops->set_demux_cfg(vfe, line);
+ ops->set_scale_cfg(vfe, line);
+ ops->set_crop_cfg(vfe, line);
+ ops->set_clamp_cfg(vfe);
+ ops->set_camif_cmd(vfe, 1);
}
- vfe_reg_update(vfe, line->id);
+ ops->reg_update(vfe, line->id);
spin_unlock_irqrestore(&vfe->output_lock, flags);
@@ -1464,6 +761,7 @@ static int vfe_disable_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
+ const struct vfe_hw_ops *ops = vfe->ops;
unsigned long flags;
unsigned long time;
unsigned int i;
@@ -1476,43 +774,45 @@ static int vfe_disable_output(struct vfe_line *line)
time = wait_for_completion_timeout(&output->sof,
msecs_to_jiffies(VFE_NEXT_SOF_MS));
if (!time)
- dev_err(to_device(vfe), "VFE sof timeout\n");
+ dev_err(vfe->camss->dev, "VFE sof timeout\n");
spin_lock_irqsave(&vfe->output_lock, flags);
for (i = 0; i < output->wm_num; i++)
- vfe_wm_enable(vfe, output->wm_idx[i], 0);
+ ops->wm_enable(vfe, output->wm_idx[i], 0);
- vfe_reg_update(vfe, line->id);
+ ops->reg_update(vfe, line->id);
output->wait_reg_update = 1;
spin_unlock_irqrestore(&vfe->output_lock, flags);
time = wait_for_completion_timeout(&output->reg_update,
msecs_to_jiffies(VFE_NEXT_SOF_MS));
if (!time)
- dev_err(to_device(vfe), "VFE reg update timeout\n");
+ dev_err(vfe->camss->dev, "VFE reg update timeout\n");
spin_lock_irqsave(&vfe->output_lock, flags);
if (line->id != VFE_LINE_PIX) {
- vfe_wm_frame_based(vfe, output->wm_idx[0], 0);
- vfe_bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id);
- vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
- vfe_set_cgc_override(vfe, output->wm_idx[0], 0);
+ ops->wm_frame_based(vfe, output->wm_idx[0], 0);
+ ops->bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0],
+ line->id);
+ ops->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
+ ops->set_cgc_override(vfe, output->wm_idx[0], 0);
spin_unlock_irqrestore(&vfe->output_lock, flags);
} else {
for (i = 0; i < output->wm_num; i++) {
- vfe_wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
- vfe_set_cgc_override(vfe, output->wm_idx[i], 0);
+ ops->wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
+ ops->set_cgc_override(vfe, output->wm_idx[i], 0);
}
- vfe_enable_irq_pix_line(vfe, 0, line->id, 0);
- vfe_set_module_cfg(vfe, 0);
- vfe_set_xbar_cfg(vfe, output, 0);
+ ops->enable_irq_pix_line(vfe, 0, line->id, 0);
+ ops->set_module_cfg(vfe, 0);
+ ops->set_realign_cfg(vfe, line, 0);
+ ops->set_xbar_cfg(vfe, output, 0);
- vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY);
+ ops->set_camif_cmd(vfe, 0);
spin_unlock_irqrestore(&vfe->output_lock, flags);
- vfe_camif_wait_for_stop(vfe);
+ ops->camif_wait_for_stop(vfe, vfe->camss->dev);
}
return 0;
@@ -1532,11 +832,13 @@ static int vfe_enable(struct vfe_line *line)
mutex_lock(&vfe->stream_lock);
if (!vfe->stream_count) {
- vfe_enable_irq_common(vfe);
+ vfe->ops->enable_irq_common(vfe);
+
+ vfe->ops->bus_enable_wr_if(vfe, 1);
- vfe_bus_enable_wr_if(vfe, 1);
+ vfe->ops->set_qos(vfe);
- vfe_set_qos(vfe);
+ vfe->ops->set_ds(vfe);
}
vfe->stream_count++;
@@ -1563,7 +865,7 @@ error_get_output:
mutex_lock(&vfe->stream_lock);
if (vfe->stream_count == 1)
- vfe_bus_enable_wr_if(vfe, 0);
+ vfe->ops->bus_enable_wr_if(vfe, 0);
vfe->stream_count--;
@@ -1589,7 +891,7 @@ static int vfe_disable(struct vfe_line *line)
mutex_lock(&vfe->stream_lock);
if (vfe->stream_count == 1)
- vfe_bus_enable_wr_if(vfe, 0);
+ vfe->ops->bus_enable_wr_if(vfe, 0);
vfe->stream_count--;
@@ -1628,7 +930,7 @@ static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
unsigned long flags;
spin_lock_irqsave(&vfe->output_lock, flags);
- vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
+ vfe->ops->reg_update_clear(vfe, line_id);
output = &vfe->line[line_id].output;
@@ -1698,19 +1000,19 @@ static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
u64 ts = ktime_get_ns();
unsigned int i;
- active_index = vfe_wm_get_ping_pong_status(vfe, wm);
+ active_index = vfe->ops->wm_get_ping_pong_status(vfe, wm);
spin_lock_irqsave(&vfe->output_lock, flags);
if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Received wm done for unmapped index\n");
goto out_unlock;
}
output = &vfe->line[vfe->wm_output_map[wm]].output;
if (output->active_buf == active_index) {
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Active buffer mismatch!\n");
goto out_unlock;
}
@@ -1718,7 +1020,7 @@ static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
ready_buf = output->buf[!active_index];
if (!ready_buf) {
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Missing ready buf %d %d!\n",
!active_index, output->state);
goto out_unlock;
@@ -1740,12 +1042,12 @@ static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
if (active_index)
for (i = 0; i < output->wm_num; i++)
- vfe_wm_set_ping_addr(vfe, output->wm_idx[i],
- new_addr[i]);
+ vfe->ops->wm_set_ping_addr(vfe, output->wm_idx[i],
+ new_addr[i]);
else
for (i = 0; i < output->wm_num; i++)
- vfe_wm_set_pong_addr(vfe, output->wm_idx[i],
- new_addr[i]);
+ vfe->ops->wm_set_pong_addr(vfe, output->wm_idx[i],
+ new_addr[i]);
spin_unlock_irqrestore(&vfe->output_lock, flags);
@@ -1776,67 +1078,15 @@ static void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp)
}
}
-/*
- * vfe_isr - ISPIF module interrupt handler
- * @irq: Interrupt line
- * @dev: VFE device
- *
- * Return IRQ_HANDLED on success
- */
-static irqreturn_t vfe_isr(int irq, void *dev)
+static inline void vfe_isr_reset_ack(struct vfe_device *vfe)
{
- struct vfe_device *vfe = dev;
- u32 value0, value1;
- u32 violation;
- int i, j;
-
- value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
- value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
-
- writel_relaxed(value0, vfe->base + VFE_0_IRQ_CLEAR_0);
- writel_relaxed(value1, vfe->base + VFE_0_IRQ_CLEAR_1);
-
- wmb();
- writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
-
- if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
- complete(&vfe->reset_complete);
-
- if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION) {
- violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
- dev_err_ratelimited(to_device(vfe),
- "VFE: violation = 0x%08x\n", violation);
- }
-
- if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK) {
- complete(&vfe->halt_complete);
- writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
- }
-
- for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++)
- if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
- vfe_isr_reg_update(vfe, i);
-
- if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
- vfe_isr_sof(vfe, VFE_LINE_PIX);
-
- for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
- if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
- vfe_isr_sof(vfe, i);
-
- for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
- if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
- vfe_isr_comp_done(vfe, i);
- for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
- if (vfe->wm_output_map[j] == VFE_LINE_PIX)
- value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
- }
-
- for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
- if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
- vfe_isr_wm_done(vfe, i);
+ complete(&vfe->reset_complete);
+}
- return IRQ_HANDLED;
+static inline void vfe_isr_halt_ack(struct vfe_device *vfe)
+{
+ complete(&vfe->halt_complete);
+ vfe->ops->halt_clear(vfe);
}
/*
@@ -1847,7 +1097,7 @@ static irqreturn_t vfe_isr(int irq, void *dev)
*/
static int vfe_set_clock_rates(struct vfe_device *vfe)
{
- struct device *dev = to_device(vfe);
+ struct device *dev = vfe->camss->dev;
u32 pixel_clock[MSM_VFE_LINE_NUM];
int i, j;
int ret;
@@ -1862,7 +1112,8 @@ static int vfe_set_clock_rates(struct vfe_device *vfe)
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
- if (!strcmp(clock->name, "camss_vfe_vfe")) {
+ if (!strcmp(clock->name, "vfe0") ||
+ !strcmp(clock->name, "vfe1")) {
u64 min_rate = 0;
long rate;
@@ -1873,8 +1124,11 @@ static int vfe_set_clock_rates(struct vfe_device *vfe)
if (j == VFE_LINE_PIX) {
tmp = pixel_clock[j];
} else {
- bpp = vfe_get_bpp(vfe->line[j].
- fmt[MSM_VFE_PAD_SINK].code);
+ struct vfe_line *l = &vfe->line[j];
+
+ bpp = vfe_get_bpp(l->formats,
+ l->nformats,
+ l->fmt[MSM_VFE_PAD_SINK].code);
tmp = pixel_clock[j] * bpp / 64;
}
@@ -1940,7 +1194,8 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
- if (!strcmp(clock->name, "camss_vfe_vfe")) {
+ if (!strcmp(clock->name, "vfe0") ||
+ !strcmp(clock->name, "vfe1")) {
u64 min_rate = 0;
unsigned long rate;
@@ -1951,8 +1206,11 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
if (j == VFE_LINE_PIX) {
tmp = pixel_clock[j];
} else {
- bpp = vfe_get_bpp(vfe->line[j].
- fmt[MSM_VFE_PAD_SINK].code);
+ struct vfe_line *l = &vfe->line[j];
+
+ bpp = vfe_get_bpp(l->formats,
+ l->nformats,
+ l->fmt[MSM_VFE_PAD_SINK].code);
tmp = pixel_clock[j] * bpp / 64;
}
@@ -1984,12 +1242,20 @@ static int vfe_get(struct vfe_device *vfe)
mutex_lock(&vfe->power_lock);
if (vfe->power_count == 0) {
+ ret = camss_pm_domain_on(vfe->camss, vfe->id);
+ if (ret < 0)
+ goto error_pm_domain;
+
+ ret = pm_runtime_get_sync(vfe->camss->dev);
+ if (ret < 0)
+ goto error_pm_runtime_get;
+
ret = vfe_set_clock_rates(vfe);
if (ret < 0)
goto error_clocks;
ret = camss_enable_clocks(vfe->nclocks, vfe->clock,
- to_device(vfe));
+ vfe->camss->dev);
if (ret < 0)
goto error_clocks;
@@ -2015,6 +1281,12 @@ error_reset:
camss_disable_clocks(vfe->nclocks, vfe->clock);
error_clocks:
+ pm_runtime_put_sync(vfe->camss->dev);
+
+error_pm_runtime_get:
+ camss_pm_domain_off(vfe->camss, vfe->id);
+
+error_pm_domain:
mutex_unlock(&vfe->power_lock);
return ret;
@@ -2029,7 +1301,7 @@ static void vfe_put(struct vfe_device *vfe)
mutex_lock(&vfe->power_lock);
if (vfe->power_count == 0) {
- dev_err(to_device(vfe), "vfe power off on power_count == 0\n");
+ dev_err(vfe->camss->dev, "vfe power off on power_count == 0\n");
goto exit;
} else if (vfe->power_count == 1) {
if (vfe->was_streaming) {
@@ -2037,6 +1309,8 @@ static void vfe_put(struct vfe_device *vfe)
vfe_halt(vfe);
}
camss_disable_clocks(vfe->nclocks, vfe->clock);
+ pm_runtime_put_sync(vfe->camss->dev);
+ camss_pm_domain_off(vfe->camss, vfe->id);
}
vfe->power_count--;
@@ -2046,26 +1320,6 @@ exit:
}
/*
- * vfe_video_pad_to_line - Get pointer to VFE line by media pad
- * @pad: Media pad
- *
- * Return pointer to vfe line structure
- */
-static struct vfe_line *vfe_video_pad_to_line(struct media_pad *pad)
-{
- struct media_pad *vfe_pad;
- struct v4l2_subdev *subdev;
-
- vfe_pad = media_entity_remote_pad(pad);
- if (vfe_pad == NULL)
- return NULL;
-
- subdev = media_entity_to_v4l2_subdev(vfe_pad->entity);
-
- return container_of(subdev, struct vfe_line, subdev);
-}
-
-/*
* vfe_queue_buffer - Add empty buffer
* @vid: Video device structure
* @buf: Buffer to be enqueued
@@ -2078,16 +1332,11 @@ static struct vfe_line *vfe_video_pad_to_line(struct media_pad *pad)
static int vfe_queue_buffer(struct camss_video *vid,
struct camss_buffer *buf)
{
- struct vfe_device *vfe = &vid->camss->vfe;
- struct vfe_line *line;
+ struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
+ struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
unsigned long flags;
- line = vfe_video_pad_to_line(&vid->pad);
- if (!line) {
- dev_err(to_device(vfe), "Can not queue buffer\n");
- return -1;
- }
output = &line->output;
spin_lock_irqsave(&vfe->output_lock, flags);
@@ -2112,16 +1361,11 @@ static int vfe_queue_buffer(struct camss_video *vid,
static int vfe_flush_buffers(struct camss_video *vid,
enum vb2_buffer_state state)
{
- struct vfe_device *vfe = &vid->camss->vfe;
- struct vfe_line *line;
+ struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
+ struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
unsigned long flags;
- line = vfe_video_pad_to_line(&vid->pad);
- if (!line) {
- dev_err(to_device(vfe), "Can not flush buffers\n");
- return -1;
- }
output = &line->output;
spin_lock_irqsave(&vfe->output_lock, flags);
@@ -2158,15 +1402,11 @@ static int vfe_set_power(struct v4l2_subdev *sd, int on)
int ret;
if (on) {
- u32 hw_version;
-
ret = vfe_get(vfe);
if (ret < 0)
return ret;
- hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
- dev_dbg(to_device(vfe),
- "VFE HW Version = 0x%08x\n", hw_version);
+ vfe->ops->hw_version_read(vfe, vfe->camss->dev);
} else {
vfe_put(vfe);
}
@@ -2192,12 +1432,12 @@ static int vfe_set_stream(struct v4l2_subdev *sd, int enable)
if (enable) {
ret = vfe_enable(line);
if (ret < 0)
- dev_err(to_device(vfe),
+ dev_err(vfe->camss->dev,
"Failed to enable vfe outputs\n");
} else {
ret = vfe_disable(line);
if (ret < 0)
- dev_err(to_device(vfe),
+ dev_err(vfe->camss->dev,
"Failed to disable vfe outputs\n");
}
@@ -2286,12 +1526,12 @@ static void vfe_try_format(struct vfe_line *line,
case MSM_VFE_PAD_SINK:
/* Set format on sink pad */
- for (i = 0; i < ARRAY_SIZE(vfe_formats); i++)
- if (fmt->code == vfe_formats[i].code)
+ for (i = 0; i < line->nformats; i++)
+ if (fmt->code == line->formats[i].code)
break;
/* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(vfe_formats))
+ if (i >= line->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
@@ -2304,11 +1544,11 @@ static void vfe_try_format(struct vfe_line *line,
case MSM_VFE_PAD_SRC:
/* Set and return a format same as sink pad */
-
code = fmt->code;
- *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
- which);
+ *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
+
+ fmt->code = vfe_src_pad_code(line, fmt->code, 0, code);
if (line->id == VFE_LINE_PIX) {
struct v4l2_rect *rect;
@@ -2317,34 +1557,6 @@ static void vfe_try_format(struct vfe_line *line,
fmt->width = rect->width;
fmt->height = rect->height;
-
- switch (fmt->code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
- if (code == MEDIA_BUS_FMT_YUYV8_1_5X8)
- fmt->code = MEDIA_BUS_FMT_YUYV8_1_5X8;
- else
- fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
- break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- if (code == MEDIA_BUS_FMT_YVYU8_1_5X8)
- fmt->code = MEDIA_BUS_FMT_YVYU8_1_5X8;
- else
- fmt->code = MEDIA_BUS_FMT_YVYU8_2X8;
- break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
- default:
- if (code == MEDIA_BUS_FMT_UYVY8_1_5X8)
- fmt->code = MEDIA_BUS_FMT_UYVY8_1_5X8;
- else
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
- break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
- if (code == MEDIA_BUS_FMT_VYUY8_1_5X8)
- fmt->code = MEDIA_BUS_FMT_VYUY8_1_5X8;
- else
- fmt->code = MEDIA_BUS_FMT_VYUY8_2X8;
- break;
- }
}
break;
@@ -2448,21 +1660,22 @@ static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_mbus_code_enum *code)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
if (code->pad == MSM_VFE_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(vfe_formats))
+ if (code->index >= line->nformats)
return -EINVAL;
- code->code = vfe_formats[code->index].code;
+ code->code = line->formats[code->index].code;
} else {
- if (code->index > 0)
- return -EINVAL;
+ struct v4l2_mbus_framefmt *sink_fmt;
- format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
- code->which);
+ sink_fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
+ code->which);
- code->code = format->code;
+ code->code = vfe_src_pad_code(line, sink_fmt->code,
+ code->index, 0);
+ if (!code->code)
+ return -EINVAL;
}
return 0;
@@ -2751,15 +1964,29 @@ static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
*
* Return 0 on success or a negative error code otherwise
*/
-int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res)
+int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
+ const struct resources *res, u8 id)
{
- struct device *dev = to_device(vfe);
+ struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *r;
- struct camss *camss = to_camss(vfe);
int i, j;
int ret;
+ vfe->isr_ops.reset_ack = vfe_isr_reset_ack;
+ vfe->isr_ops.halt_ack = vfe_isr_halt_ack;
+ vfe->isr_ops.reg_update = vfe_isr_reg_update;
+ vfe->isr_ops.sof = vfe_isr_sof;
+ vfe->isr_ops.comp_done = vfe_isr_comp_done;
+ vfe->isr_ops.wm_done = vfe_isr_wm_done;
+
+ if (camss->version == CAMSS_8x16)
+ vfe->ops = &vfe_ops_4_1;
+ else if (camss->version == CAMSS_8x96)
+ vfe->ops = &vfe_ops_4_7;
+ else
+ return -EINVAL;
+
/* Memory */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
@@ -2781,7 +2008,7 @@ int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res)
vfe->irq = r->start;
snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d",
dev_name(dev), MSM_VFE_NAME, vfe->id);
- ret = devm_request_irq(dev, vfe->irq, vfe_isr,
+ ret = devm_request_irq(dev, vfe->irq, vfe->ops->isr,
IRQF_TRIGGER_RISING, vfe->irq_name, vfe);
if (ret < 0) {
dev_err(dev, "request_irq failed: %d\n", ret);
@@ -2836,16 +2063,38 @@ int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res)
spin_lock_init(&vfe->output_lock);
- vfe->id = 0;
+ vfe->camss = camss;
+ vfe->id = id;
vfe->reg_update = 0;
for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
- vfe->line[i].video_out.type =
- V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- vfe->line[i].video_out.camss = camss;
- vfe->line[i].id = i;
- init_completion(&vfe->line[i].output.sof);
- init_completion(&vfe->line[i].output.reg_update);
+ struct vfe_line *l = &vfe->line[i];
+
+ l->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ l->video_out.camss = camss;
+ l->id = i;
+ init_completion(&l->output.sof);
+ init_completion(&l->output.reg_update);
+
+ if (camss->version == CAMSS_8x16) {
+ if (i == VFE_LINE_PIX) {
+ l->formats = formats_pix_8x16;
+ l->nformats = ARRAY_SIZE(formats_pix_8x16);
+ } else {
+ l->formats = formats_rdi_8x16;
+ l->nformats = ARRAY_SIZE(formats_rdi_8x16);
+ }
+ } else if (camss->version == CAMSS_8x96) {
+ if (i == VFE_LINE_PIX) {
+ l->formats = formats_pix_8x96;
+ l->nformats = ARRAY_SIZE(formats_pix_8x96);
+ } else {
+ l->formats = formats_rdi_8x96;
+ l->nformats = ARRAY_SIZE(formats_rdi_8x96);
+ }
+ } else {
+ return -EINVAL;
+ }
}
init_completion(&vfe->reset_complete);
@@ -2968,7 +2217,7 @@ void msm_vfe_stop_streaming(struct vfe_device *vfe)
int msm_vfe_register_entities(struct vfe_device *vfe,
struct v4l2_device *v4l2_dev)
{
- struct device *dev = to_device(vfe);
+ struct device *dev = vfe->camss->dev;
struct v4l2_subdev *sd;
struct media_pad *pads;
struct camss_video *video_out;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
new file mode 100644
index 000000000000..0d10071ae881
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * camss-vfe.h
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+#ifndef QC_MSM_CAMSS_VFE_H
+#define QC_MSM_CAMSS_VFE_H
+
+#include <linux/clk.h>
+#include <linux/spinlock_types.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "camss-video.h"
+
+#define MSM_VFE_PAD_SINK 0
+#define MSM_VFE_PAD_SRC 1
+#define MSM_VFE_PADS_NUM 2
+
+#define MSM_VFE_LINE_NUM 4
+#define MSM_VFE_IMAGE_MASTERS_NUM 7
+#define MSM_VFE_COMPOSITE_IRQ_NUM 4
+
+enum vfe_output_state {
+ VFE_OUTPUT_OFF,
+ VFE_OUTPUT_RESERVED,
+ VFE_OUTPUT_SINGLE,
+ VFE_OUTPUT_CONTINUOUS,
+ VFE_OUTPUT_IDLE,
+ VFE_OUTPUT_STOPPING
+};
+
+enum vfe_line_id {
+ VFE_LINE_NONE = -1,
+ VFE_LINE_RDI0 = 0,
+ VFE_LINE_RDI1 = 1,
+ VFE_LINE_RDI2 = 2,
+ VFE_LINE_PIX = 3
+};
+
+struct vfe_output {
+ u8 wm_num;
+ u8 wm_idx[3];
+
+ int active_buf;
+ struct camss_buffer *buf[2];
+ struct camss_buffer *last_buffer;
+ struct list_head pending_bufs;
+
+ unsigned int drop_update_idx;
+
+ enum vfe_output_state state;
+ unsigned int sequence;
+ int wait_sof;
+ int wait_reg_update;
+ struct completion sof;
+ struct completion reg_update;
+};
+
+struct vfe_line {
+ enum vfe_line_id id;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[MSM_VFE_PADS_NUM];
+ struct v4l2_mbus_framefmt fmt[MSM_VFE_PADS_NUM];
+ struct v4l2_rect compose;
+ struct v4l2_rect crop;
+ struct camss_video video_out;
+ struct vfe_output output;
+ const struct vfe_format *formats;
+ unsigned int nformats;
+};
+
+struct vfe_device;
+
+struct vfe_hw_ops {
+ void (*hw_version_read)(struct vfe_device *vfe, struct device *dev);
+ u16 (*get_ub_size)(u8 vfe_id);
+ void (*global_reset)(struct vfe_device *vfe);
+ void (*halt_request)(struct vfe_device *vfe);
+ void (*halt_clear)(struct vfe_device *vfe);
+ void (*wm_enable)(struct vfe_device *vfe, u8 wm, u8 enable);
+ void (*wm_frame_based)(struct vfe_device *vfe, u8 wm, u8 enable);
+ void (*wm_line_based)(struct vfe_device *vfe, u32 wm,
+ struct v4l2_pix_format_mplane *pix,
+ u8 plane, u32 enable);
+ void (*wm_set_framedrop_period)(struct vfe_device *vfe, u8 wm, u8 per);
+ void (*wm_set_framedrop_pattern)(struct vfe_device *vfe, u8 wm,
+ u32 pattern);
+ void (*wm_set_ub_cfg)(struct vfe_device *vfe, u8 wm, u16 offset,
+ u16 depth);
+ void (*bus_reload_wm)(struct vfe_device *vfe, u8 wm);
+ void (*wm_set_ping_addr)(struct vfe_device *vfe, u8 wm, u32 addr);
+ void (*wm_set_pong_addr)(struct vfe_device *vfe, u8 wm, u32 addr);
+ int (*wm_get_ping_pong_status)(struct vfe_device *vfe, u8 wm);
+ void (*bus_enable_wr_if)(struct vfe_device *vfe, u8 enable);
+ void (*bus_connect_wm_to_rdi)(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id);
+ void (*wm_set_subsample)(struct vfe_device *vfe, u8 wm);
+ void (*bus_disconnect_wm_from_rdi)(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id);
+ void (*set_xbar_cfg)(struct vfe_device *vfe, struct vfe_output *output,
+ u8 enable);
+ void (*set_rdi_cid)(struct vfe_device *vfe, enum vfe_line_id id,
+ u8 cid);
+ void (*set_realign_cfg)(struct vfe_device *vfe, struct vfe_line *line,
+ u8 enable);
+ void (*reg_update)(struct vfe_device *vfe, enum vfe_line_id line_id);
+ void (*reg_update_clear)(struct vfe_device *vfe,
+ enum vfe_line_id line_id);
+ void (*enable_irq_wm_line)(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id line_id, u8 enable);
+ void (*enable_irq_pix_line)(struct vfe_device *vfe, u8 comp,
+ enum vfe_line_id line_id, u8 enable);
+ void (*enable_irq_common)(struct vfe_device *vfe);
+ void (*set_demux_cfg)(struct vfe_device *vfe, struct vfe_line *line);
+ void (*set_scale_cfg)(struct vfe_device *vfe, struct vfe_line *line);
+ void (*set_crop_cfg)(struct vfe_device *vfe, struct vfe_line *line);
+ void (*set_clamp_cfg)(struct vfe_device *vfe);
+ void (*set_qos)(struct vfe_device *vfe);
+ void (*set_ds)(struct vfe_device *vfe);
+ void (*set_cgc_override)(struct vfe_device *vfe, u8 wm, u8 enable);
+ void (*set_camif_cfg)(struct vfe_device *vfe, struct vfe_line *line);
+ void (*set_camif_cmd)(struct vfe_device *vfe, u8 enable);
+ void (*set_module_cfg)(struct vfe_device *vfe, u8 enable);
+ int (*camif_wait_for_stop)(struct vfe_device *vfe, struct device *dev);
+ void (*isr_read)(struct vfe_device *vfe, u32 *value0, u32 *value1);
+ void (*violation_read)(struct vfe_device *vfe);
+ irqreturn_t (*isr)(int irq, void *dev);
+};
+
+struct vfe_isr_ops {
+ void (*reset_ack)(struct vfe_device *vfe);
+ void (*halt_ack)(struct vfe_device *vfe);
+ void (*reg_update)(struct vfe_device *vfe, enum vfe_line_id line_id);
+ void (*sof)(struct vfe_device *vfe, enum vfe_line_id line_id);
+ void (*comp_done)(struct vfe_device *vfe, u8 comp);
+ void (*wm_done)(struct vfe_device *vfe, u8 wm);
+};
+
+struct vfe_device {
+ struct camss *camss;
+ u8 id;
+ void __iomem *base;
+ u32 irq;
+ char irq_name[30];
+ struct camss_clock *clock;
+ int nclocks;
+ struct completion reset_complete;
+ struct completion halt_complete;
+ struct mutex power_lock;
+ int power_count;
+ struct mutex stream_lock;
+ int stream_count;
+ spinlock_t output_lock;
+ enum vfe_line_id wm_output_map[MSM_VFE_IMAGE_MASTERS_NUM];
+ struct vfe_line line[MSM_VFE_LINE_NUM];
+ u32 reg_update;
+ u8 was_streaming;
+ const struct vfe_hw_ops *ops;
+ struct vfe_isr_ops isr_ops;
+};
+
+struct resources;
+
+int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
+ const struct resources *res, u8 id);
+
+int msm_vfe_register_entities(struct vfe_device *vfe,
+ struct v4l2_device *v4l2_dev);
+
+void msm_vfe_unregister_entities(struct vfe_device *vfe);
+
+void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id);
+void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id);
+
+void msm_vfe_stop_streaming(struct vfe_device *vfe);
+
+extern const struct vfe_hw_ops vfe_ops_4_1;
+extern const struct vfe_hw_ops vfe_ops_4_7;
+
+#endif /* QC_MSM_CAMSS_VFE_H */
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index ffaa2849e0c1..c9bb0d023db4 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -1,19 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camss-video.c
*
* Qualcomm MSM Camera Subsystem - V4L2 device node
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/slab.h>
#include <media/media-entity.h>
@@ -49,7 +41,7 @@ struct camss_format_info {
unsigned int bpp[3];
};
-static const struct camss_format_info formats_rdi[] = {
+static const struct camss_format_info formats_rdi_8x16[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
@@ -82,9 +74,60 @@ static const struct camss_format_info formats_rdi[] = {
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
};
-static const struct camss_format_info formats_pix[] = {
+static const struct camss_format_info formats_rdi_8x96[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_PIX_FMT_SBGGR10, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, V4L2_PIX_FMT_SBGGR14P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, V4L2_PIX_FMT_SGBRG14P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, V4L2_PIX_FMT_SGRBG14P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, V4L2_PIX_FMT_SRGGB14P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, V4L2_PIX_FMT_Y10, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+};
+
+static const struct camss_format_info formats_pix_8x16[] = {
{ MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV12, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV12, 1,
@@ -119,6 +162,49 @@ static const struct camss_format_info formats_pix[] = {
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
};
+static const struct camss_format_info formats_pix_8x96[] = {
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+};
+
/* -----------------------------------------------------------------------------
* Helper functions
*/
@@ -798,11 +884,24 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
mutex_init(&video->lock);
- video->formats = formats_rdi;
- video->nformats = ARRAY_SIZE(formats_rdi);
- if (is_pix) {
- video->formats = formats_pix;
- video->nformats = ARRAY_SIZE(formats_pix);
+ if (video->camss->version == CAMSS_8x16) {
+ if (is_pix) {
+ video->formats = formats_pix_8x16;
+ video->nformats = ARRAY_SIZE(formats_pix_8x16);
+ } else {
+ video->formats = formats_rdi_8x16;
+ video->nformats = ARRAY_SIZE(formats_rdi_8x16);
+ }
+ } else if (video->camss->version == CAMSS_8x96) {
+ if (is_pix) {
+ video->formats = formats_pix_8x96;
+ video->nformats = ARRAY_SIZE(formats_pix_8x96);
+ } else {
+ video->formats = formats_rdi_8x96;
+ video->nformats = ARRAY_SIZE(formats_rdi_8x96);
+ }
+ } else {
+ goto error_video_register;
}
ret = msm_video_init_format(video);
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-video.h b/drivers/media/platform/qcom/camss/camss-video.h
index 38bd1f2eec54..aa35e8cc6fd5 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-video.h
+++ b/drivers/media/platform/qcom/camss/camss-video.h
@@ -1,19 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* camss-video.h
*
* Qualcomm MSM Camera Subsystem - V4L2 device node
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#ifndef QC_MSM_CAMSS_VIDEO_H
#define QC_MSM_CAMSS_VIDEO_H
diff --git a/drivers/media/platform/qcom/camss-8x16/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 23fda6207a23..dcc0c30ef1b1 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -1,19 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camss.c
*
* Qualcomm MSM Camera Subsystem - Core
*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/media-bus-format.h>
@@ -22,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -36,12 +30,11 @@
#define CAMSS_CLOCK_MARGIN_NUMERATOR 105
#define CAMSS_CLOCK_MARGIN_DENOMINATOR 100
-static const struct resources csiphy_res[] = {
+static const struct resources csiphy_res_8x16[] = {
/* CSIPHY0 */
{
.regulator = { NULL },
- .clock = { "camss_top_ahb", "ispif_ahb",
- "camss_ahb", "csiphy0_timer" },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
@@ -53,8 +46,7 @@ static const struct resources csiphy_res[] = {
/* CSIPHY1 */
{
.regulator = { NULL },
- .clock = { "camss_top_ahb", "ispif_ahb",
- "camss_ahb", "csiphy1_timer" },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
@@ -64,12 +56,11 @@ static const struct resources csiphy_res[] = {
}
};
-static const struct resources csid_res[] = {
+static const struct resources csid_res_8x16[] = {
/* CSID0 */
{
.regulator = { "vdda" },
- .clock = { "camss_top_ahb", "ispif_ahb",
- "csi0_ahb", "camss_ahb",
+ .clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb",
"csi0", "csi0_phy", "csi0_pix", "csi0_rdi" },
.clock_rate = { { 0 },
{ 0 },
@@ -86,8 +77,7 @@ static const struct resources csid_res[] = {
/* CSID1 */
{
.regulator = { "vdda" },
- .clock = { "camss_top_ahb", "ispif_ahb",
- "csi1_ahb", "camss_ahb",
+ .clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb",
"csi1", "csi1_phy", "csi1_pix", "csi1_rdi" },
.clock_rate = { { 0 },
{ 0 },
@@ -102,35 +92,195 @@ static const struct resources csid_res[] = {
},
};
-static const struct resources_ispif ispif_res = {
+static const struct resources_ispif ispif_res_8x16 = {
/* ISPIF */
- .clock = { "camss_top_ahb", "camss_ahb", "ispif_ahb",
+ .clock = { "top_ahb", "ahb", "ispif_ahb",
"csi0", "csi0_pix", "csi0_rdi",
"csi1", "csi1_pix", "csi1_rdi" },
- .clock_for_reset = { "camss_vfe_vfe", "camss_csi_vfe" },
+ .clock_for_reset = { "vfe0", "csi_vfe0" },
.reg = { "ispif", "csi_clk_mux" },
.interrupt = "ispif"
};
-static const struct resources vfe_res = {
+static const struct resources vfe_res_8x16[] = {
+ /* VFE0 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "vfe0", "csi_vfe0",
+ "vfe_ahb", "vfe_axi", "ahb" },
+ .clock_rate = { { 0 },
+ { 50000000, 80000000, 100000000, 160000000,
+ 177780000, 200000000, 266670000, 320000000,
+ 400000000, 465000000 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" }
+ }
+};
+
+static const struct resources csiphy_res_8x96[] = {
+ /* CSIPHY0 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 } },
+ .reg = { "csiphy0", "csiphy0_clk_mux" },
+ .interrupt = { "csiphy0" }
+ },
+
+ /* CSIPHY1 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 } },
+ .reg = { "csiphy1", "csiphy1_clk_mux" },
+ .interrupt = { "csiphy1" }
+ },
+
+ /* CSIPHY2 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy2_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 } },
+ .reg = { "csiphy2", "csiphy2_clk_mux" },
+ .interrupt = { "csiphy2" }
+ }
+};
+
+static const struct resources csid_res_8x96[] = {
+ /* CSID0 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb",
+ "csi0", "csi0_phy", "csi0_pix", "csi0_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" }
+ },
+
+ /* CSID1 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb",
+ "csi1", "csi1_phy", "csi1_pix", "csi1_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" }
+ },
+
+ /* CSID2 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi2_ahb", "ahb",
+ "csi2", "csi2_phy", "csi2_pix", "csi2_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid2" },
+ .interrupt = { "csid2" }
+ },
+
+ /* CSID3 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi3_ahb", "ahb",
+ "csi3", "csi3_phy", "csi3_pix", "csi3_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid3" },
+ .interrupt = { "csid3" }
+ }
+};
+
+static const struct resources_ispif ispif_res_8x96 = {
+ /* ISPIF */
+ .clock = { "top_ahb", "ahb", "ispif_ahb",
+ "csi0", "csi0_pix", "csi0_rdi",
+ "csi1", "csi1_pix", "csi1_rdi",
+ "csi2", "csi2_pix", "csi2_rdi",
+ "csi3", "csi3_pix", "csi3_rdi" },
+ .clock_for_reset = { "vfe0", "csi_vfe0", "vfe1", "csi_vfe1" },
+ .reg = { "ispif", "csi_clk_mux" },
+ .interrupt = "ispif"
+};
+
+static const struct resources vfe_res_8x96[] = {
/* VFE0 */
- .regulator = { NULL },
- .clock = { "camss_top_ahb", "camss_vfe_vfe", "camss_csi_vfe",
- "iface", "bus", "camss_ahb" },
- .clock_rate = { { 0 },
- { 50000000, 80000000, 100000000, 160000000,
- 177780000, 200000000, 266670000, 320000000,
- 400000000, 465000000 },
- { 0 },
- { 0 },
- { 0 },
- { 0 },
- { 0 },
- { 0 },
- { 0 } },
- .reg = { "vfe0" },
- .interrupt = { "vfe0" }
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ahb", "vfe0", "csi_vfe0", "vfe_ahb",
+ "vfe0_ahb", "vfe_axi", "vfe0_stream"},
+ .clock_rate = { { 0 },
+ { 0 },
+ { 75000000, 100000000, 300000000,
+ 320000000, 480000000, 600000000 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" }
+ },
+
+ /* VFE1 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ahb", "vfe1", "csi_vfe1", "vfe_ahb",
+ "vfe1_ahb", "vfe_axi", "vfe1_stream"},
+ .clock_rate = { { 0 },
+ { 0 },
+ { 75000000, 100000000, 300000000,
+ 320000000, 480000000, 600000000 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" }
+ }
};
/*
@@ -245,6 +395,26 @@ int camss_get_pixel_clock(struct media_entity *entity, u32 *pixel_clock)
return 0;
}
+int camss_pm_domain_on(struct camss *camss, int id)
+{
+ if (camss->version == CAMSS_8x96) {
+ camss->genpd_link[id] = device_link_add(camss->dev,
+ camss->genpd[id], DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
+
+ if (!camss->genpd_link[id])
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void camss_pm_domain_off(struct camss *camss, int id)
+{
+ if (camss->version == CAMSS_8x96)
+ device_link_del(camss->genpd_link[id]);
+}
+
/*
* camss_of_parse_endpoint_node - Parse port endpoint node
* @dev: Device
@@ -304,6 +474,7 @@ static int camss_of_parse_ports(struct device *dev,
if (of_device_is_available(node))
notifier->num_subdevs++;
+ of_node_put(node);
size = sizeof(*notifier->subdevs) * notifier->num_subdevs;
notifier->subdevs = devm_kzalloc(dev, size, GFP_KERNEL);
if (!notifier->subdevs) {
@@ -334,16 +505,16 @@ static int camss_of_parse_ports(struct device *dev,
}
remote = of_graph_get_remote_port_parent(node);
- of_node_put(node);
-
if (!remote) {
dev_err(dev, "Cannot get remote parent\n");
+ of_node_put(node);
return -EINVAL;
}
csd->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
csd->asd.match.fwnode = of_fwnode_handle(remote);
}
+ of_node_put(node);
return notifier->num_subdevs;
}
@@ -356,11 +527,29 @@ static int camss_of_parse_ports(struct device *dev,
*/
static int camss_init_subdevices(struct camss *camss)
{
+ const struct resources *csiphy_res;
+ const struct resources *csid_res;
+ const struct resources_ispif *ispif_res;
+ const struct resources *vfe_res;
unsigned int i;
int ret;
- for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) {
- ret = msm_csiphy_subdev_init(&camss->csiphy[i],
+ if (camss->version == CAMSS_8x16) {
+ csiphy_res = csiphy_res_8x16;
+ csid_res = csid_res_8x16;
+ ispif_res = &ispif_res_8x16;
+ vfe_res = vfe_res_8x16;
+ } else if (camss->version == CAMSS_8x96) {
+ csiphy_res = csiphy_res_8x96;
+ csid_res = csid_res_8x96;
+ ispif_res = &ispif_res_8x96;
+ vfe_res = vfe_res_8x96;
+ } else {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < camss->csiphy_num; i++) {
+ ret = msm_csiphy_subdev_init(camss, &camss->csiphy[i],
&csiphy_res[i], i);
if (ret < 0) {
dev_err(camss->dev,
@@ -370,8 +559,8 @@ static int camss_init_subdevices(struct camss *camss)
}
}
- for (i = 0; i < ARRAY_SIZE(camss->csid); i++) {
- ret = msm_csid_subdev_init(&camss->csid[i],
+ for (i = 0; i < camss->csid_num; i++) {
+ ret = msm_csid_subdev_init(camss, &camss->csid[i],
&csid_res[i], i);
if (ret < 0) {
dev_err(camss->dev,
@@ -381,17 +570,21 @@ static int camss_init_subdevices(struct camss *camss)
}
}
- ret = msm_ispif_subdev_init(&camss->ispif, &ispif_res);
+ ret = msm_ispif_subdev_init(&camss->ispif, ispif_res);
if (ret < 0) {
dev_err(camss->dev, "Failed to init ispif sub-device: %d\n",
ret);
return ret;
}
- ret = msm_vfe_subdev_init(&camss->vfe, &vfe_res);
- if (ret < 0) {
- dev_err(camss->dev, "Fail to init vfe sub-device: %d\n", ret);
- return ret;
+ for (i = 0; i < camss->vfe_num; i++) {
+ ret = msm_vfe_subdev_init(camss, &camss->vfe[i],
+ &vfe_res[i], i);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Fail to init vfe%d sub-device: %d\n", i, ret);
+ return ret;
+ }
}
return 0;
@@ -405,10 +598,10 @@ static int camss_init_subdevices(struct camss *camss)
*/
static int camss_register_entities(struct camss *camss)
{
- int i, j;
+ int i, j, k;
int ret;
- for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) {
+ for (i = 0; i < camss->csiphy_num; i++) {
ret = msm_csiphy_register_entity(&camss->csiphy[i],
&camss->v4l2_dev);
if (ret < 0) {
@@ -419,7 +612,7 @@ static int camss_register_entities(struct camss *camss)
}
}
- for (i = 0; i < ARRAY_SIZE(camss->csid); i++) {
+ for (i = 0; i < camss->csid_num; i++) {
ret = msm_csid_register_entity(&camss->csid[i],
&camss->v4l2_dev);
if (ret < 0) {
@@ -437,15 +630,19 @@ static int camss_register_entities(struct camss *camss)
goto err_reg_ispif;
}
- ret = msm_vfe_register_entities(&camss->vfe, &camss->v4l2_dev);
- if (ret < 0) {
- dev_err(camss->dev, "Failed to register vfe entities: %d\n",
- ret);
- goto err_reg_vfe;
+ for (i = 0; i < camss->vfe_num; i++) {
+ ret = msm_vfe_register_entities(&camss->vfe[i],
+ &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to register vfe%d entities: %d\n",
+ i, ret);
+ goto err_reg_vfe;
+ }
}
- for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) {
- for (j = 0; j < ARRAY_SIZE(camss->csid); j++) {
+ for (i = 0; i < camss->csiphy_num; i++) {
+ for (j = 0; j < camss->csid_num; j++) {
ret = media_create_pad_link(
&camss->csiphy[i].subdev.entity,
MSM_CSIPHY_PAD_SRC,
@@ -463,8 +660,8 @@ static int camss_register_entities(struct camss *camss)
}
}
- for (i = 0; i < ARRAY_SIZE(camss->csid); i++) {
- for (j = 0; j < ARRAY_SIZE(camss->ispif.line); j++) {
+ for (i = 0; i < camss->csid_num; i++) {
+ for (j = 0; j < camss->ispif.line_num; j++) {
ret = media_create_pad_link(
&camss->csid[i].subdev.entity,
MSM_CSID_PAD_SRC,
@@ -482,39 +679,42 @@ static int camss_register_entities(struct camss *camss)
}
}
- for (i = 0; i < ARRAY_SIZE(camss->ispif.line); i++) {
- for (j = 0; j < ARRAY_SIZE(camss->vfe.line); j++) {
- ret = media_create_pad_link(
- &camss->ispif.line[i].subdev.entity,
- MSM_ISPIF_PAD_SRC,
- &camss->vfe.line[j].subdev.entity,
- MSM_VFE_PAD_SINK,
- 0);
- if (ret < 0) {
- dev_err(camss->dev,
- "Failed to link %s->%s entities: %d\n",
- camss->ispif.line[i].subdev.entity.name,
- camss->vfe.line[j].subdev.entity.name,
- ret);
- goto err_link;
+ for (i = 0; i < camss->ispif.line_num; i++)
+ for (k = 0; k < camss->vfe_num; k++)
+ for (j = 0; j < ARRAY_SIZE(camss->vfe[k].line); j++) {
+ ret = media_create_pad_link(
+ &camss->ispif.line[i].subdev.entity,
+ MSM_ISPIF_PAD_SRC,
+ &camss->vfe[k].line[j].subdev.entity,
+ MSM_VFE_PAD_SINK,
+ 0);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to link %s->%s entities: %d\n",
+ camss->ispif.line[i].subdev.entity.name,
+ camss->vfe[k].line[j].subdev.entity.name,
+ ret);
+ goto err_link;
+ }
}
- }
- }
return 0;
err_link:
- msm_vfe_unregister_entities(&camss->vfe);
+ i = camss->vfe_num;
err_reg_vfe:
+ for (i--; i >= 0; i--)
+ msm_vfe_unregister_entities(&camss->vfe[i]);
+
msm_ispif_unregister_entities(&camss->ispif);
err_reg_ispif:
- i = ARRAY_SIZE(camss->csid);
+ i = camss->csid_num;
err_reg_csid:
for (i--; i >= 0; i--)
msm_csid_unregister_entity(&camss->csid[i]);
- i = ARRAY_SIZE(camss->csiphy);
+ i = camss->csiphy_num;
err_reg_csiphy:
for (i--; i >= 0; i--)
msm_csiphy_unregister_entity(&camss->csiphy[i]);
@@ -532,14 +732,16 @@ static void camss_unregister_entities(struct camss *camss)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++)
+ for (i = 0; i < camss->csiphy_num; i++)
msm_csiphy_unregister_entity(&camss->csiphy[i]);
- for (i = 0; i < ARRAY_SIZE(camss->csid); i++)
+ for (i = 0; i < camss->csid_num; i++)
msm_csid_unregister_entity(&camss->csid[i]);
msm_ispif_unregister_entities(&camss->ispif);
- msm_vfe_unregister_entities(&camss->vfe);
+
+ for (i = 0; i < camss->vfe_num; i++)
+ msm_vfe_unregister_entities(&camss->vfe[i]);
}
static int camss_subdev_notifier_bound(struct v4l2_async_notifier *async,
@@ -631,6 +833,35 @@ static int camss_probe(struct platform_device *pdev)
camss->dev = dev;
platform_set_drvdata(pdev, camss);
+ if (of_device_is_compatible(dev->of_node, "qcom,msm8916-camss")) {
+ camss->version = CAMSS_8x16;
+ camss->csiphy_num = 2;
+ camss->csid_num = 2;
+ camss->vfe_num = 1;
+ } else if (of_device_is_compatible(dev->of_node,
+ "qcom,msm8996-camss")) {
+ camss->version = CAMSS_8x96;
+ camss->csiphy_num = 3;
+ camss->csid_num = 4;
+ camss->vfe_num = 2;
+ } else {
+ return -EINVAL;
+ }
+
+ camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy),
+ GFP_KERNEL);
+ if (!camss->csiphy)
+ return -ENOMEM;
+
+ camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid),
+ GFP_KERNEL);
+ if (!camss->csid)
+ return -ENOMEM;
+
+ camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL);
+ if (!camss->vfe)
+ return -ENOMEM;
+
ret = camss_of_parse_ports(dev, &camss->notifier);
if (ret < 0)
return ret;
@@ -687,6 +918,23 @@ static int camss_probe(struct platform_device *pdev)
}
}
+ if (camss->version == CAMSS_8x96) {
+ camss->genpd[PM_DOMAIN_VFE0] = dev_pm_domain_attach_by_id(
+ camss->dev, PM_DOMAIN_VFE0);
+ if (IS_ERR(camss->genpd[PM_DOMAIN_VFE0]))
+ return PTR_ERR(camss->genpd[PM_DOMAIN_VFE0]);
+
+ camss->genpd[PM_DOMAIN_VFE1] = dev_pm_domain_attach_by_id(
+ camss->dev, PM_DOMAIN_VFE1);
+ if (IS_ERR(camss->genpd[PM_DOMAIN_VFE1])) {
+ dev_pm_domain_detach(camss->genpd[PM_DOMAIN_VFE0],
+ true);
+ return PTR_ERR(camss->genpd[PM_DOMAIN_VFE1]);
+ }
+ }
+
+ pm_runtime_enable(dev);
+
return 0;
err_register_subdevs:
@@ -703,6 +951,13 @@ void camss_delete(struct camss *camss)
media_device_unregister(&camss->media_dev);
media_device_cleanup(&camss->media_dev);
+ pm_runtime_disable(camss->dev);
+
+ if (camss->version == CAMSS_8x96) {
+ dev_pm_domain_detach(camss->genpd[PM_DOMAIN_VFE0], true);
+ dev_pm_domain_detach(camss->genpd[PM_DOMAIN_VFE1], true);
+ }
+
kfree(camss);
}
@@ -714,9 +969,12 @@ void camss_delete(struct camss *camss)
*/
static int camss_remove(struct platform_device *pdev)
{
+ unsigned int i;
+
struct camss *camss = platform_get_drvdata(pdev);
- msm_vfe_stop_streaming(&camss->vfe);
+ for (i = 0; i < camss->vfe_num; i++)
+ msm_vfe_stop_streaming(&camss->vfe[i]);
v4l2_async_notifier_unregister(&camss->notifier);
camss_unregister_entities(camss);
@@ -729,17 +987,35 @@ static int camss_remove(struct platform_device *pdev)
static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8916-camss" },
+ { .compatible = "qcom,msm8996-camss" },
{ }
};
MODULE_DEVICE_TABLE(of, camss_dt_match);
+static int camss_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int camss_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops camss_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(camss_runtime_suspend, camss_runtime_resume, NULL)
+};
+
static struct platform_driver qcom_camss_driver = {
.probe = camss_probe,
.remove = camss_remove,
.driver = {
.name = "qcom-camss",
.of_match_table = camss_dt_match,
+ .pm = &camss_pm_ops,
},
};
diff --git a/drivers/media/platform/qcom/camss-8x16/camss.h b/drivers/media/platform/qcom/camss/camss.h
index 4ad223443e4b..418996d8dad8 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -1,23 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* camss.h
*
* Qualcomm MSM Camera Subsystem - Core
*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#ifndef QC_MSM_CAMSS_H
#define QC_MSM_CAMSS_H
+#include <linux/device.h>
#include <linux/types.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
@@ -31,9 +24,6 @@
#include "camss-ispif.h"
#include "camss-vfe.h"
-#define CAMSS_CSID_NUM 2
-#define CAMSS_CSIPHY_NUM 2
-
#define to_camss(ptr_module) \
container_of(ptr_module, struct camss, ptr_module)
@@ -50,7 +40,7 @@
#define to_device_index(ptr_module, index) \
(to_camss_index(ptr_module, index)->dev)
-#define CAMSS_RES_MAX 15
+#define CAMSS_RES_MAX 17
struct resources {
char *regulator[CAMSS_RES_MAX];
@@ -67,16 +57,33 @@ struct resources_ispif {
char *interrupt;
};
+enum pm_domain {
+ PM_DOMAIN_VFE0,
+ PM_DOMAIN_VFE1,
+ PM_DOMAIN_COUNT
+};
+
+enum camss_version {
+ CAMSS_8x16,
+ CAMSS_8x96,
+};
+
struct camss {
+ enum camss_version version;
struct v4l2_device v4l2_dev;
struct v4l2_async_notifier notifier;
struct media_device media_dev;
struct device *dev;
- struct csiphy_device csiphy[CAMSS_CSIPHY_NUM];
- struct csid_device csid[CAMSS_CSID_NUM];
+ int csiphy_num;
+ struct csiphy_device *csiphy;
+ int csid_num;
+ struct csid_device *csid;
struct ispif_device ispif;
- struct vfe_device vfe;
+ int vfe_num;
+ struct vfe_device *vfe;
atomic_t ref_count;
+ struct device *genpd[PM_DOMAIN_COUNT];
+ struct device_link *genpd_link[PM_DOMAIN_COUNT];
};
struct camss_camera_interface {
@@ -101,6 +108,8 @@ int camss_enable_clocks(int nclocks, struct camss_clock *clock,
struct device *dev);
void camss_disable_clocks(int nclocks, struct camss_clock *clock);
int camss_get_pixel_clock(struct media_entity *entity, u32 *pixel_clock);
+int camss_pm_domain_on(struct camss *camss, int id);
+void camss_pm_domain_off(struct camss *camss, int id);
void camss_delete(struct camss *camss);
#endif /* QC_MSM_CAMSS_H */
diff --git a/drivers/media/platform/qcom/venus/Makefile b/drivers/media/platform/qcom/venus/Makefile
index bfd4edf7c83f..b44b11b03e12 100644
--- a/drivers/media/platform/qcom/venus/Makefile
+++ b/drivers/media/platform/qcom/venus/Makefile
@@ -2,7 +2,8 @@
# Makefile for Qualcomm Venus driver
venus-core-objs += core.o helpers.o firmware.o \
- hfi_venus.o hfi_msgs.o hfi_cmds.o hfi.o
+ hfi_venus.o hfi_msgs.o hfi_cmds.o hfi.o \
+ hfi_parser.o
venus-dec-objs += vdec.o vdec_ctrls.o
venus-enc-objs += venc.o venc_ctrls.o
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 41eef376eb2d..bb6add9d340e 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -152,6 +152,83 @@ static void venus_clks_disable(struct venus_core *core)
clk_disable_unprepare(core->clks[i]);
}
+static u32 to_v4l2_codec_type(u32 codec)
+{
+ switch (codec) {
+ case HFI_VIDEO_CODEC_H264:
+ return V4L2_PIX_FMT_H264;
+ case HFI_VIDEO_CODEC_H263:
+ return V4L2_PIX_FMT_H263;
+ case HFI_VIDEO_CODEC_MPEG1:
+ return V4L2_PIX_FMT_MPEG1;
+ case HFI_VIDEO_CODEC_MPEG2:
+ return V4L2_PIX_FMT_MPEG2;
+ case HFI_VIDEO_CODEC_MPEG4:
+ return V4L2_PIX_FMT_MPEG4;
+ case HFI_VIDEO_CODEC_VC1:
+ return V4L2_PIX_FMT_VC1_ANNEX_G;
+ case HFI_VIDEO_CODEC_VP8:
+ return V4L2_PIX_FMT_VP8;
+ case HFI_VIDEO_CODEC_VP9:
+ return V4L2_PIX_FMT_VP9;
+ case HFI_VIDEO_CODEC_DIVX:
+ case HFI_VIDEO_CODEC_DIVX_311:
+ return V4L2_PIX_FMT_XVID;
+ default:
+ return 0;
+ }
+}
+
+static int venus_enumerate_codecs(struct venus_core *core, u32 type)
+{
+ const struct hfi_inst_ops dummy_ops = {};
+ struct venus_inst *inst;
+ u32 codec, codecs;
+ unsigned int i;
+ int ret;
+
+ if (core->res->hfi_version != HFI_VERSION_1XX)
+ return 0;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ mutex_init(&inst->lock);
+ inst->core = core;
+ inst->session_type = type;
+ if (type == VIDC_SESSION_TYPE_DEC)
+ codecs = core->dec_codecs;
+ else
+ codecs = core->enc_codecs;
+
+ ret = hfi_session_create(inst, &dummy_ops);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < MAX_CODEC_NUM; i++) {
+ codec = (1 << i) & codecs;
+ if (!codec)
+ continue;
+
+ ret = hfi_session_init(inst, to_v4l2_codec_type(codec));
+ if (ret)
+ goto done;
+
+ ret = hfi_session_deinit(inst);
+ if (ret)
+ goto done;
+ }
+
+done:
+ hfi_session_destroy(inst);
+err:
+ mutex_destroy(&inst->lock);
+ kfree(inst);
+
+ return ret;
+}
+
static int venus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -219,6 +296,14 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
goto err_venus_shutdown;
+ ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC);
+ if (ret)
+ goto err_venus_shutdown;
+
+ ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_ENC);
+ if (ret)
+ goto err_venus_shutdown;
+
ret = v4l2_device_register(dev, &core->v4l2_dev);
if (ret)
goto err_core_deinit;
@@ -365,9 +450,31 @@ static const struct venus_resources msm8996_res = {
.fwname = "qcom/venus-4.2/venus.mdt",
};
+static const struct freq_tbl sdm845_freq_table[] = {
+ { 1944000, 380000000 }, /* 4k UHD @ 60 */
+ { 972000, 320000000 }, /* 4k UHD @ 30 */
+ { 489600, 200000000 }, /* 1080p @ 60 */
+ { 244800, 100000000 }, /* 1080p @ 30 */
+};
+
+static const struct venus_resources sdm845_res = {
+ .freq_tbl = sdm845_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(sdm845_freq_table),
+ .clks = {"core", "iface", "bus" },
+ .clks_num = 3,
+ .max_load = 2563200,
+ .hfi_version = HFI_VERSION_4XX,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/venus-5.2/venus.mdt",
+};
+
static const struct of_device_id venus_dt_match[] = {
{ .compatible = "qcom,msm8916-venus", .data = &msm8916_res, },
{ .compatible = "qcom,msm8996-venus", .data = &msm8996_res, },
+ { .compatible = "qcom,sdm845-venus", .data = &sdm845_res, },
{ }
};
MODULE_DEVICE_TABLE(of, venus_dt_match);
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 0360d295f4c8..2f02365f4818 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -57,6 +57,30 @@ struct venus_format {
u32 type;
};
+#define MAX_PLANES 4
+#define MAX_FMT_ENTRIES 32
+#define MAX_CAP_ENTRIES 32
+#define MAX_ALLOC_MODE_ENTRIES 16
+#define MAX_CODEC_NUM 32
+
+struct raw_formats {
+ u32 buftype;
+ u32 fmt;
+};
+
+struct venus_caps {
+ u32 codec;
+ u32 domain;
+ bool cap_bufs_mode_dynamic;
+ unsigned int num_caps;
+ struct hfi_capability caps[MAX_CAP_ENTRIES];
+ unsigned int num_pl;
+ struct hfi_profile_level pl[HFI_MAX_PROFILE_COUNT];
+ unsigned int num_fmts;
+ struct raw_formats fmts[MAX_FMT_ENTRIES];
+ bool valid; /* used only for Venus v1xx */
+};
+
/**
* struct venus_core - holds core parameters valid for all instances
*
@@ -65,6 +89,8 @@ struct venus_format {
* @clks: an array of struct clk pointers
* @core0_clk: a struct clk pointer for core0
* @core1_clk: a struct clk pointer for core1
+ * @core0_bus_clk: a struct clk pointer for core0 bus clock
+ * @core1_bus_clk: a struct clk pointer for core1 bus clock
* @vdev_dec: a reference to video device structure for decoder instances
* @vdev_enc: a reference to video device structure for encoder instances
* @v4l2_dev: a holder for v4l2 device structure
@@ -94,6 +120,8 @@ struct venus_core {
struct clk *clks[VIDC_CLKS_NUM_MAX];
struct clk *core0_clk;
struct clk *core1_clk;
+ struct clk *core0_bus_clk;
+ struct clk *core1_bus_clk;
struct video_device *vdev_dec;
struct video_device *vdev_enc;
struct v4l2_device v4l2_dev;
@@ -109,8 +137,8 @@ struct venus_core {
unsigned int error;
bool sys_error;
const struct hfi_core_ops *core_ops;
- u32 enc_codecs;
- u32 dec_codecs;
+ unsigned long enc_codecs;
+ unsigned long dec_codecs;
unsigned int max_sessions_supported;
#define ENC_ROTATION_CAPABILITY 0x1
#define ENC_SCALING_CAPABILITY 0x2
@@ -120,6 +148,8 @@ struct venus_core {
void *priv;
const struct hfi_ops *ops;
struct delayed_work work;
+ struct venus_caps caps[MAX_CODEC_NUM];
+ unsigned int codecs_count;
};
struct vdec_controls {
@@ -160,10 +190,12 @@ struct venc_controls {
u32 mpeg4;
u32 h264;
u32 vpx;
+ u32 hevc;
} profile;
struct {
u32 mpeg4;
u32 h264;
+ u32 hevc;
} level;
};
@@ -185,6 +217,7 @@ struct venus_buffer {
* @list: used for attach an instance to the core
* @lock: instance lock
* @core: a reference to the core struct
+ * @dpbbufs: a list of decoded picture buffers
* @internalbufs: a list of internal bufferes
* @registeredbufs: a list of registered capture bufferes
* @delayed_process a list of delayed buffers
@@ -209,9 +242,15 @@ struct venus_buffer {
* @num_output_bufs: holds number of output buffers
* @input_buf_size holds input buffer size
* @output_buf_size: holds output buffer size
+ * @output2_buf_size: holds secondary decoder output buffer size
+ * @dpb_buftype: decoded picture buffer type
+ * @dpb_fmt: decoded picture buffer raw format
+ * @opb_buftype: output picture buffer type
+ * @opb_fmt: output picture buffer raw format
* @reconfig: a flag raised by decoder when the stream resolution changed
* @reconfig_width: holds the new width
* @reconfig_height: holds the new height
+ * @hfi_codec: current codec for this instance in HFI space
* @sequence_cap: a sequence counter for capture queue
* @sequence_out: a sequence counter for output queue
* @m2m_dev: a reference to m2m device structure
@@ -224,27 +263,12 @@ struct venus_buffer {
* @priv: a private for HFI operations callbacks
* @session_type: the type of the session (decoder or encoder)
* @hprop: a union used as a holder by get property
- * @cap_width: width capability
- * @cap_height: height capability
- * @cap_mbs_per_frame: macroblocks per frame capability
- * @cap_mbs_per_sec: macroblocks per second capability
- * @cap_framerate: framerate capability
- * @cap_scale_x: horizontal scaling capability
- * @cap_scale_y: vertical scaling capability
- * @cap_bitrate: bitrate capability
- * @cap_hier_p: hier capability
- * @cap_ltr_count: LTR count capability
- * @cap_secure_output2_threshold: secure OUTPUT2 threshold capability
- * @cap_bufs_mode_static: buffers allocation mode capability
- * @cap_bufs_mode_dynamic: buffers allocation mode capability
- * @pl_count: count of supported profiles/levels
- * @pl: supported profiles/levels
- * @bufreq: holds buffer requirements
*/
struct venus_inst {
struct list_head list;
struct mutex lock;
struct venus_core *core;
+ struct list_head dpbbufs;
struct list_head internalbufs;
struct list_head registeredbufs;
struct list_head delayed_process;
@@ -273,9 +297,15 @@ struct venus_inst {
unsigned int num_output_bufs;
unsigned int input_buf_size;
unsigned int output_buf_size;
+ unsigned int output2_buf_size;
+ u32 dpb_buftype;
+ u32 dpb_fmt;
+ u32 opb_buftype;
+ u32 opb_fmt;
bool reconfig;
u32 reconfig_width;
u32 reconfig_height;
+ u32 hfi_codec;
u32 sequence_cap;
u32 sequence_out;
struct v4l2_m2m_dev *m2m_dev;
@@ -287,24 +317,12 @@ struct venus_inst {
const struct hfi_inst_ops *ops;
u32 session_type;
union hfi_get_property hprop;
- struct hfi_capability cap_width;
- struct hfi_capability cap_height;
- struct hfi_capability cap_mbs_per_frame;
- struct hfi_capability cap_mbs_per_sec;
- struct hfi_capability cap_framerate;
- struct hfi_capability cap_scale_x;
- struct hfi_capability cap_scale_y;
- struct hfi_capability cap_bitrate;
- struct hfi_capability cap_hier_p;
- struct hfi_capability cap_ltr_count;
- struct hfi_capability cap_secure_output2_threshold;
- bool cap_bufs_mode_static;
- bool cap_bufs_mode_dynamic;
- unsigned int pl_count;
- struct hfi_profile_level pl[HFI_MAX_PROFILE_COUNT];
- struct hfi_buffer_requirements bufreq[HFI_BUFFER_TYPE_MAX];
};
+#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX)
+#define IS_V3(core) ((core)->res->hfi_version == HFI_VERSION_3XX)
+#define IS_V4(core) ((core)->res->hfi_version == HFI_VERSION_4XX)
+
#define ctrl_to_inst(ctrl) \
container_of((ctrl)->handler, struct venus_inst, ctrl_handler)
@@ -318,4 +336,18 @@ static inline void *to_hfi_priv(struct venus_core *core)
return core->priv;
}
+static inline struct venus_caps *
+venus_caps_by_codec(struct venus_core *core, u32 codec, u32 domain)
+{
+ unsigned int c;
+
+ for (c = 0; c < core->codecs_count; c++) {
+ if (core->caps[c].codec == codec &&
+ core->caps[c].domain == domain)
+ return &core->caps[c];
+ }
+
+ return NULL;
+}
+
#endif
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 0ce9559a2924..cd3b96e6f24b 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -13,6 +13,7 @@
*
*/
#include <linux/clk.h>
+#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
@@ -24,6 +25,7 @@
#include "core.h"
#include "helpers.h"
#include "hfi_helper.h"
+#include "hfi_venus_io.h"
struct intbuf {
struct list_head list;
@@ -69,6 +71,9 @@ bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
case V4L2_PIX_FMT_XVID:
codec = HFI_VIDEO_CODEC_DIVX;
break;
+ case V4L2_PIX_FMT_HEVC:
+ codec = HFI_VIDEO_CODEC_HEVC;
+ break;
default:
return false;
}
@@ -83,6 +88,106 @@ bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
}
EXPORT_SYMBOL_GPL(venus_helper_check_codec);
+static int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
+{
+ struct intbuf *buf;
+ int ret = 0;
+
+ list_for_each_entry(buf, &inst->dpbbufs, list) {
+ struct hfi_frame_data fdata;
+
+ memset(&fdata, 0, sizeof(fdata));
+ fdata.alloc_len = buf->size;
+ fdata.device_addr = buf->da;
+ fdata.buffer_type = buf->type;
+
+ ret = hfi_session_process_buf(inst, &fdata);
+ if (ret)
+ goto fail;
+ }
+
+fail:
+ return ret;
+}
+
+int venus_helper_free_dpb_bufs(struct venus_inst *inst)
+{
+ struct intbuf *buf, *n;
+
+ list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) {
+ list_del_init(&buf->list);
+ dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
+ buf->attrs);
+ kfree(buf);
+ }
+
+ INIT_LIST_HEAD(&inst->dpbbufs);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_free_dpb_bufs);
+
+int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev;
+ enum hfi_version ver = core->res->hfi_version;
+ struct hfi_buffer_requirements bufreq;
+ u32 buftype = inst->dpb_buftype;
+ unsigned int dpb_size = 0;
+ struct intbuf *buf;
+ unsigned int i;
+ u32 count;
+ int ret;
+
+ /* no need to allocate dpb buffers */
+ if (!inst->dpb_fmt)
+ return 0;
+
+ if (inst->dpb_buftype == HFI_BUFFER_OUTPUT)
+ dpb_size = inst->output_buf_size;
+ else if (inst->dpb_buftype == HFI_BUFFER_OUTPUT2)
+ dpb_size = inst->output2_buf_size;
+
+ if (!dpb_size)
+ return 0;
+
+ ret = venus_helper_get_bufreq(inst, buftype, &bufreq);
+ if (ret)
+ return ret;
+
+ count = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+
+ for (i = 0; i < count; i++) {
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ buf->type = buftype;
+ buf->size = dpb_size;
+ buf->attrs = DMA_ATTR_WRITE_COMBINE |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
+ buf->attrs);
+ if (!buf->va) {
+ kfree(buf);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ list_add_tail(&buf->list, &inst->dpbbufs);
+ }
+
+ return 0;
+
+fail:
+ venus_helper_free_dpb_bufs(inst);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_alloc_dpb_bufs);
+
static int intbufs_set_buffer(struct venus_inst *inst, u32 type)
{
struct venus_core *core = inst->core;
@@ -166,21 +271,38 @@ static int intbufs_unset_buffers(struct venus_inst *inst)
return ret;
}
-static const unsigned int intbuf_types[] = {
- HFI_BUFFER_INTERNAL_SCRATCH,
- HFI_BUFFER_INTERNAL_SCRATCH_1,
- HFI_BUFFER_INTERNAL_SCRATCH_2,
+static const unsigned int intbuf_types_1xx[] = {
+ HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_1XX),
+ HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_1XX),
+ HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_1XX),
+ HFI_BUFFER_INTERNAL_PERSIST,
+ HFI_BUFFER_INTERNAL_PERSIST_1,
+};
+
+static const unsigned int intbuf_types_4xx[] = {
+ HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_4XX),
+ HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_4XX),
+ HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_4XX),
HFI_BUFFER_INTERNAL_PERSIST,
HFI_BUFFER_INTERNAL_PERSIST_1,
};
static int intbufs_alloc(struct venus_inst *inst)
{
- unsigned int i;
+ const unsigned int *intbuf;
+ size_t arr_sz, i;
int ret;
- for (i = 0; i < ARRAY_SIZE(intbuf_types); i++) {
- ret = intbufs_set_buffer(inst, intbuf_types[i]);
+ if (IS_V4(inst->core)) {
+ arr_sz = ARRAY_SIZE(intbuf_types_4xx);
+ intbuf = intbuf_types_4xx;
+ } else {
+ arr_sz = ARRAY_SIZE(intbuf_types_1xx);
+ intbuf = intbuf_types_1xx;
+ }
+
+ for (i = 0; i < arr_sz; i++) {
+ ret = intbufs_set_buffer(inst, intbuf[i]);
if (ret)
goto error;
}
@@ -257,20 +379,23 @@ static int load_scale_clocks(struct venus_core *core)
set_freq:
- if (core->res->hfi_version == HFI_VERSION_3XX) {
- ret = clk_set_rate(clk, freq);
- ret |= clk_set_rate(core->core0_clk, freq);
- ret |= clk_set_rate(core->core1_clk, freq);
- } else {
- ret = clk_set_rate(clk, freq);
- }
+ ret = clk_set_rate(clk, freq);
+ if (ret)
+ goto err;
- if (ret) {
- dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
- return ret;
- }
+ ret = clk_set_rate(core->core0_clk, freq);
+ if (ret)
+ goto err;
+
+ ret = clk_set_rate(core->core1_clk, freq);
+ if (ret)
+ goto err;
return 0;
+
+err:
+ dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
+ return ret;
}
static void fill_buffer_desc(const struct venus_buffer *buf,
@@ -325,7 +450,10 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len)
fdata.flags |= HFI_BUFFERFLAG_EOS;
} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- fdata.buffer_type = HFI_BUFFER_OUTPUT;
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ fdata.buffer_type = HFI_BUFFER_OUTPUT;
+ else
+ fdata.buffer_type = inst->opb_buftype;
fdata.filled_len = 0;
fdata.offset = 0;
}
@@ -337,18 +465,16 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
return 0;
}
-static inline int is_reg_unreg_needed(struct venus_inst *inst)
+static bool is_dynamic_bufmode(struct venus_inst *inst)
{
- if (inst->session_type == VIDC_SESSION_TYPE_DEC &&
- inst->core->res->hfi_version == HFI_VERSION_3XX)
- return 0;
+ struct venus_core *core = inst->core;
+ struct venus_caps *caps;
- if (inst->session_type == VIDC_SESSION_TYPE_DEC &&
- inst->cap_bufs_mode_dynamic &&
- inst->core->res->hfi_version == HFI_VERSION_1XX)
+ caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
+ if (!caps)
return 0;
- return 1;
+ return caps->cap_bufs_mode_dynamic;
}
static int session_unregister_bufs(struct venus_inst *inst)
@@ -357,7 +483,7 @@ static int session_unregister_bufs(struct venus_inst *inst)
struct hfi_buffer_desc bd;
int ret = 0;
- if (!is_reg_unreg_needed(inst))
+ if (is_dynamic_bufmode(inst))
return 0;
list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) {
@@ -377,7 +503,7 @@ static int session_register_bufs(struct venus_inst *inst)
struct venus_buffer *buf;
int ret = 0;
- if (!is_reg_unreg_needed(inst))
+ if (is_dynamic_bufmode(inst))
return 0;
list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
@@ -392,6 +518,20 @@ static int session_register_bufs(struct venus_inst *inst)
return ret;
}
+static u32 to_hfi_raw_fmt(u32 v4l2_fmt)
+{
+ switch (v4l2_fmt) {
+ case V4L2_PIX_FMT_NV12:
+ return HFI_COLOR_FORMAT_NV12;
+ case V4L2_PIX_FMT_NV21:
+ return HFI_COLOR_FORMAT_NV21;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
struct hfi_buffer_requirements *req)
{
@@ -423,6 +563,104 @@ int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
}
EXPORT_SYMBOL_GPL(venus_helper_get_bufreq);
+static u32 get_framesize_raw_nv12(u32 width, u32 height)
+{
+ u32 y_stride, uv_stride, y_plane;
+ u32 y_sclines, uv_sclines, uv_plane;
+ u32 size;
+
+ y_stride = ALIGN(width, 128);
+ uv_stride = ALIGN(width, 128);
+ y_sclines = ALIGN(height, 32);
+ uv_sclines = ALIGN(((height + 1) >> 1), 16);
+
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + SZ_4K;
+ size = y_plane + uv_plane + SZ_8K;
+
+ return ALIGN(size, SZ_4K);
+}
+
+static u32 get_framesize_raw_nv12_ubwc(u32 width, u32 height)
+{
+ u32 y_meta_stride, y_meta_plane;
+ u32 y_stride, y_plane;
+ u32 uv_meta_stride, uv_meta_plane;
+ u32 uv_stride, uv_plane;
+ u32 extradata = SZ_16K;
+
+ y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64);
+ y_meta_plane = y_meta_stride * ALIGN(DIV_ROUND_UP(height, 8), 16);
+ y_meta_plane = ALIGN(y_meta_plane, SZ_4K);
+
+ y_stride = ALIGN(width, 128);
+ y_plane = ALIGN(y_stride * ALIGN(height, 32), SZ_4K);
+
+ uv_meta_stride = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
+ uv_meta_plane = uv_meta_stride * ALIGN(DIV_ROUND_UP(height / 2, 8), 16);
+ uv_meta_plane = ALIGN(uv_meta_plane, SZ_4K);
+
+ uv_stride = ALIGN(width, 128);
+ uv_plane = ALIGN(uv_stride * ALIGN(height / 2, 32), SZ_4K);
+
+ return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane +
+ max(extradata, y_stride * 48), SZ_4K);
+}
+
+u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height)
+{
+ switch (hfi_fmt) {
+ case HFI_COLOR_FORMAT_NV12:
+ case HFI_COLOR_FORMAT_NV21:
+ return get_framesize_raw_nv12(width, height);
+ case HFI_COLOR_FORMAT_NV12_UBWC:
+ return get_framesize_raw_nv12_ubwc(width, height);
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_framesz_raw);
+
+u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height)
+{
+ u32 hfi_fmt, sz;
+ bool compressed;
+
+ switch (v4l2_fmt) {
+ case V4L2_PIX_FMT_MPEG:
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_H264_NO_SC:
+ case V4L2_PIX_FMT_H264_MVC:
+ case V4L2_PIX_FMT_H263:
+ case V4L2_PIX_FMT_MPEG1:
+ case V4L2_PIX_FMT_MPEG2:
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_XVID:
+ case V4L2_PIX_FMT_VC1_ANNEX_G:
+ case V4L2_PIX_FMT_VC1_ANNEX_L:
+ case V4L2_PIX_FMT_VP8:
+ case V4L2_PIX_FMT_VP9:
+ case V4L2_PIX_FMT_HEVC:
+ compressed = true;
+ break;
+ default:
+ compressed = false;
+ break;
+ }
+
+ if (compressed) {
+ sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2;
+ return ALIGN(sz, SZ_4K);
+ }
+
+ hfi_fmt = to_hfi_raw_fmt(v4l2_fmt);
+ if (!hfi_fmt)
+ return 0;
+
+ return venus_helper_get_framesz_raw(hfi_fmt, width, height);
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_framesz);
+
int venus_helper_set_input_resolution(struct venus_inst *inst,
unsigned int width, unsigned int height)
{
@@ -438,12 +676,13 @@ int venus_helper_set_input_resolution(struct venus_inst *inst,
EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution);
int venus_helper_set_output_resolution(struct venus_inst *inst,
- unsigned int width, unsigned int height)
+ unsigned int width, unsigned int height,
+ u32 buftype)
{
u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
struct hfi_framesize fs;
- fs.buffer_type = HFI_BUFFER_OUTPUT;
+ fs.buffer_type = buftype;
fs.width = width;
fs.height = height;
@@ -451,8 +690,37 @@ int venus_helper_set_output_resolution(struct venus_inst *inst,
}
EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution);
+int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_WORK_MODE;
+ struct hfi_video_work_mode wm;
+
+ if (!IS_V4(inst->core))
+ return 0;
+
+ wm.video_work_mode = mode;
+
+ return hfi_session_set_property(inst, ptype, &wm);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_work_mode);
+
+int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
+{
+ const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
+ struct hfi_videocores_usage_type cu;
+
+ if (!IS_V4(inst->core))
+ return 0;
+
+ cu.video_core_enable_mask = usage;
+
+ return hfi_session_set_property(inst, ptype, &cu);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
+
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
- unsigned int output_bufs)
+ unsigned int output_bufs,
+ unsigned int output2_bufs)
{
u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
struct hfi_buffer_count_actual buf_count;
@@ -468,41 +736,122 @@ int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
buf_count.type = HFI_BUFFER_OUTPUT;
buf_count.count_actual = output_bufs;
- return hfi_session_set_property(inst, ptype, &buf_count);
+ ret = hfi_session_set_property(inst, ptype, &buf_count);
+ if (ret)
+ return ret;
+
+ if (output2_bufs) {
+ buf_count.type = HFI_BUFFER_OUTPUT2;
+ buf_count.count_actual = output2_bufs;
+
+ ret = hfi_session_set_property(inst, ptype, &buf_count);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs);
-int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt)
+int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format,
+ u32 buftype)
{
+ const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
struct hfi_uncompressed_format_select fmt;
- u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
- int ret;
+
+ fmt.buffer_type = buftype;
+ fmt.format = hfi_format;
+
+ return hfi_session_set_property(inst, ptype, &fmt);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_raw_format);
+
+int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt)
+{
+ u32 hfi_format, buftype;
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
- fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ buftype = HFI_BUFFER_OUTPUT;
else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
- fmt.buffer_type = HFI_BUFFER_INPUT;
+ buftype = HFI_BUFFER_INPUT;
else
return -EINVAL;
- switch (pixfmt) {
- case V4L2_PIX_FMT_NV12:
- fmt.format = HFI_COLOR_FORMAT_NV12;
- break;
- case V4L2_PIX_FMT_NV21:
- fmt.format = HFI_COLOR_FORMAT_NV21;
- break;
- default:
+ hfi_format = to_hfi_raw_fmt(pixfmt);
+ if (!hfi_format)
return -EINVAL;
- }
- ret = hfi_session_set_property(inst, ptype, &fmt);
+ return venus_helper_set_raw_format(inst, hfi_format, buftype);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_color_format);
+
+int venus_helper_set_multistream(struct venus_inst *inst, bool out_en,
+ bool out2_en)
+{
+ struct hfi_multi_stream multi = {0};
+ u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
+ int ret;
+
+ multi.buffer_type = HFI_BUFFER_OUTPUT;
+ multi.enable = out_en;
+
+ ret = hfi_session_set_property(inst, ptype, &multi);
+ if (ret)
+ return ret;
+
+ multi.buffer_type = HFI_BUFFER_OUTPUT2;
+ multi.enable = out2_en;
+
+ return hfi_session_set_property(inst, ptype, &multi);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_multistream);
+
+int venus_helper_set_dyn_bufmode(struct venus_inst *inst)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
+ struct hfi_buffer_alloc_mode mode;
+ int ret;
+
+ if (!is_dynamic_bufmode(inst))
+ return 0;
+
+ mode.type = HFI_BUFFER_OUTPUT;
+ mode.mode = HFI_BUFFER_MODE_DYNAMIC;
+
+ ret = hfi_session_set_property(inst, ptype, &mode);
if (ret)
return ret;
+ mode.type = HFI_BUFFER_OUTPUT2;
+
+ return hfi_session_set_property(inst, ptype, &mode);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_dyn_bufmode);
+
+int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
+ struct hfi_buffer_size_actual bufsz;
+
+ bufsz.type = buftype;
+ bufsz.size = bufsize;
+
+ return hfi_session_set_property(inst, ptype, &bufsz);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_bufsize);
+
+unsigned int venus_helper_get_opb_size(struct venus_inst *inst)
+{
+ /* the encoder has only one output */
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ return inst->output_buf_size;
+
+ if (inst->opb_buftype == HFI_BUFFER_OUTPUT)
+ return inst->output_buf_size;
+ else if (inst->opb_buftype == HFI_BUFFER_OUTPUT2)
+ return inst->output2_buf_size;
+
return 0;
}
-EXPORT_SYMBOL_GPL(venus_helper_set_color_format);
+EXPORT_SYMBOL_GPL(venus_helper_get_opb_size);
static void delayed_process_buf_func(struct work_struct *work)
{
@@ -602,9 +951,10 @@ EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init);
int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int out_buf_size = venus_helper_get_opb_size(inst);
if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
- vb2_plane_size(vb, 0) < inst->output_buf_size)
+ vb2_plane_size(vb, 0) < out_buf_size)
return -EINVAL;
if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
vb2_plane_size(vb, 0) < inst->input_buf_size)
@@ -674,6 +1024,8 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
if (ret)
hfi_session_abort(inst);
+ venus_helper_free_dpb_bufs(inst);
+
load_scale_clocks(core);
INIT_LIST_HEAD(&inst->registeredbufs);
}
@@ -712,8 +1064,14 @@ int venus_helper_vb2_start_streaming(struct venus_inst *inst)
if (ret)
goto err_unload_res;
+ ret = venus_helper_queue_dpb_bufs(inst);
+ if (ret)
+ goto err_session_stop;
+
return 0;
+err_session_stop:
+ hfi_session_stop(inst);
err_unload_res:
hfi_session_unload_res(inst);
err_unreg_bufs:
@@ -766,3 +1124,113 @@ void venus_helper_init_instance(struct venus_inst *inst)
}
}
EXPORT_SYMBOL_GPL(venus_helper_init_instance);
+
+static bool find_fmt_from_caps(struct venus_caps *caps, u32 buftype, u32 fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < caps->num_fmts; i++) {
+ if (caps->fmts[i].buftype == buftype &&
+ caps->fmts[i].fmt == fmt)
+ return true;
+ }
+
+ return false;
+}
+
+int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
+ u32 *out_fmt, u32 *out2_fmt, bool ubwc)
+{
+ struct venus_core *core = inst->core;
+ struct venus_caps *caps;
+ u32 ubwc_fmt, fmt = to_hfi_raw_fmt(v4l2_fmt);
+ bool found, found_ubwc;
+
+ *out_fmt = *out2_fmt = 0;
+
+ if (!fmt)
+ return -EINVAL;
+
+ caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
+ if (!caps)
+ return -EINVAL;
+
+ if (ubwc) {
+ ubwc_fmt = fmt | HFI_COLOR_FORMAT_UBWC_BASE;
+ found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
+ ubwc_fmt);
+ found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
+
+ if (found_ubwc && found) {
+ *out_fmt = ubwc_fmt;
+ *out2_fmt = fmt;
+ return 0;
+ }
+ }
+
+ found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt);
+ if (found) {
+ *out_fmt = fmt;
+ *out2_fmt = 0;
+ return 0;
+ }
+
+ found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
+ if (found) {
+ *out_fmt = 0;
+ *out2_fmt = fmt;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_out_fmts);
+
+int venus_helper_power_enable(struct venus_core *core, u32 session_type,
+ bool enable)
+{
+ void __iomem *ctrl, *stat;
+ u32 val;
+ int ret;
+
+ if (!IS_V3(core) && !IS_V4(core))
+ return 0;
+
+ if (IS_V3(core)) {
+ if (session_type == VIDC_SESSION_TYPE_DEC)
+ ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
+ else
+ ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
+ if (enable)
+ writel(0, ctrl);
+ else
+ writel(1, ctrl);
+
+ return 0;
+ }
+
+ if (session_type == VIDC_SESSION_TYPE_DEC) {
+ ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
+ stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
+ } else {
+ ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
+ stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
+ }
+
+ if (enable) {
+ writel(0, ctrl);
+
+ ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100);
+ if (ret)
+ return ret;
+ } else {
+ writel(1, ctrl);
+
+ ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_power_enable);
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index 971392be5df5..2475f284f396 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -33,14 +33,33 @@ void venus_helper_m2m_device_run(void *priv);
void venus_helper_m2m_job_abort(void *priv);
int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
struct hfi_buffer_requirements *req);
+u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height);
+u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height);
int venus_helper_set_input_resolution(struct venus_inst *inst,
unsigned int width, unsigned int height);
int venus_helper_set_output_resolution(struct venus_inst *inst,
- unsigned int width, unsigned int height);
+ unsigned int width, unsigned int height,
+ u32 buftype);
+int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode);
+int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage);
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
- unsigned int output_bufs);
+ unsigned int output_bufs,
+ unsigned int output2_bufs);
+int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format,
+ u32 buftype);
int venus_helper_set_color_format(struct venus_inst *inst, u32 fmt);
+int venus_helper_set_dyn_bufmode(struct venus_inst *inst);
+int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype);
+int venus_helper_set_multistream(struct venus_inst *inst, bool out_en,
+ bool out2_en);
+unsigned int venus_helper_get_opb_size(struct venus_inst *inst);
void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf);
void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx);
void venus_helper_init_instance(struct venus_inst *inst);
+int venus_helper_get_out_fmts(struct venus_inst *inst, u32 fmt, u32 *out_fmt,
+ u32 *out2_fmt, bool ubwc);
+int venus_helper_alloc_dpb_bufs(struct venus_inst *inst);
+int venus_helper_free_dpb_bufs(struct venus_inst *inst);
+int venus_helper_power_enable(struct venus_core *core, u32 session_type,
+ bool enable);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index bca894a00c07..24207829982f 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -49,6 +49,8 @@ static u32 to_codec_type(u32 pixfmt)
return HFI_VIDEO_CODEC_VP9;
case V4L2_PIX_FMT_XVID:
return HFI_VIDEO_CODEC_DIVX;
+ case V4L2_PIX_FMT_HEVC:
+ return HFI_VIDEO_CODEC_HEVC;
default:
return 0;
}
@@ -203,13 +205,12 @@ int hfi_session_init(struct venus_inst *inst, u32 pixfmt)
{
struct venus_core *core = inst->core;
const struct hfi_ops *ops = core->ops;
- u32 codec;
int ret;
- codec = to_codec_type(pixfmt);
+ inst->hfi_codec = to_codec_type(pixfmt);
reinit_completion(&inst->done);
- ret = ops->session_init(inst, inst->session_type, codec);
+ ret = ops->session_init(inst, inst->session_type, inst->hfi_codec);
if (ret)
return ret;
@@ -312,7 +313,7 @@ int hfi_session_continue(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
- if (core->res->hfi_version != HFI_VERSION_3XX)
+ if (core->res->hfi_version == HFI_VERSION_1XX)
return 0;
return core->ops->session_continue(inst);
@@ -473,7 +474,8 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
if (fd->buffer_type == HFI_BUFFER_INPUT)
return ops->session_etb(inst, fd);
- else if (fd->buffer_type == HFI_BUFFER_OUTPUT)
+ else if (fd->buffer_type == HFI_BUFFER_OUTPUT ||
+ fd->buffer_type == HFI_BUFFER_OUTPUT2)
return ops->session_ftb(inst, fd);
return -EINVAL;
diff --git a/drivers/media/platform/qcom/venus/hfi.h b/drivers/media/platform/qcom/venus/hfi.h
index 5466b7d60dd0..6038d8e0ab22 100644
--- a/drivers/media/platform/qcom/venus/hfi.h
+++ b/drivers/media/platform/qcom/venus/hfi.h
@@ -74,6 +74,16 @@ struct hfi_event_data {
u32 tag;
u32 profile;
u32 level;
+ /* the following properties start appear from v4 onwards */
+ u32 bit_depth;
+ u32 pic_struct;
+ u32 colour_space;
+ u32 entropy_mode;
+ u32 buf_count;
+ struct {
+ u32 left, top;
+ u32 width, height;
+ } input_crop;
};
/* define core states */
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 1cfeb7743041..e8389d8d8c48 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -1166,6 +1166,63 @@ pkt_session_set_property_3xx(struct hfi_session_set_property_pkt *pkt,
return ret;
}
+static int
+pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
+ void *cookie, u32 ptype, void *pdata)
+{
+ void *prop_data;
+
+ if (!pkt || !cookie || !pdata)
+ return -EINVAL;
+
+ prop_data = &pkt->data[1];
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->num_properties = 1;
+ pkt->data[0] = ptype;
+
+ /*
+ * Any session set property which is different in 3XX packetization
+ * should be added as a new case below. All unchanged session set
+ * properties will be handled in the default case.
+ */
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL: {
+ struct hfi_buffer_count_actual *in = pdata;
+ struct hfi_buffer_count_actual_4xx *count = prop_data;
+
+ count->count_actual = in->count_actual;
+ count->type = in->type;
+ count->count_min_host = in->count_actual;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_WORK_MODE: {
+ struct hfi_video_work_mode *in = pdata, *wm = prop_data;
+
+ wm->video_work_mode = in->video_work_mode;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*wm);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE: {
+ struct hfi_videocores_usage_type *in = pdata, *cu = prop_data;
+
+ cu->video_core_enable_mask = in->video_core_enable_mask;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
+ /* not implemented on Venus 4xx */
+ break;
+ default:
+ return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
+ }
+
+ return 0;
+}
+
int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt,
void *cookie, u32 ptype)
{
@@ -1181,7 +1238,10 @@ int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt,
if (hfi_ver == HFI_VERSION_1XX)
return pkt_session_set_property_1x(pkt, cookie, ptype, pdata);
- return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
+ if (hfi_ver == HFI_VERSION_3XX)
+ return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
+
+ return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
}
void pkt_set_version(enum hfi_version version)
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index 55d8eb21403a..15804ad7e65d 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -121,6 +121,7 @@
#define HFI_EXTRADATA_METADATA_FILLER 0x7fe00002
#define HFI_INDEX_EXTRADATA_INPUT_CROP 0x0700000e
+#define HFI_INDEX_EXTRADATA_OUTPUT_CROP 0x0700000f
#define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM 0x07000010
#define HFI_INDEX_EXTRADATA_ASPECT_RATIO 0x7f100003
@@ -376,13 +377,18 @@
#define HFI_BUFFER_OUTPUT2 0x3
#define HFI_BUFFER_INTERNAL_PERSIST 0x4
#define HFI_BUFFER_INTERNAL_PERSIST_1 0x5
-#define HFI_BUFFER_INTERNAL_SCRATCH 0x1000001
-#define HFI_BUFFER_EXTRADATA_INPUT 0x1000002
-#define HFI_BUFFER_EXTRADATA_OUTPUT 0x1000003
-#define HFI_BUFFER_EXTRADATA_OUTPUT2 0x1000004
-#define HFI_BUFFER_INTERNAL_SCRATCH_1 0x1000005
-#define HFI_BUFFER_INTERNAL_SCRATCH_2 0x1000006
-
+#define HFI_BUFFER_INTERNAL_SCRATCH(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0x6 : 0x1000001)
+#define HFI_BUFFER_INTERNAL_SCRATCH_1(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0x7 : 0x1000005)
+#define HFI_BUFFER_INTERNAL_SCRATCH_2(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0x8 : 0x1000006)
+#define HFI_BUFFER_EXTRADATA_INPUT(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0xc : 0x1000002)
+#define HFI_BUFFER_EXTRADATA_OUTPUT(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0xa : 0x1000003)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0xb : 0x1000004)
#define HFI_BUFFER_TYPE_MAX 11
#define HFI_BUFFER_MODE_STATIC 0x1000001
@@ -424,12 +430,14 @@
#define HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED 0x100e
#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT 0x100f
#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED 0x1010
+#define HFI_PROPERTY_PARAM_WORK_MODE 0x1015
/*
* HFI_PROPERTY_CONFIG_COMMON_START
* HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000
*/
#define HFI_PROPERTY_CONFIG_FRAME_RATE 0x2001
+#define HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE 0x2002
/*
* HFI_PROPERTY_PARAM_VDEC_COMMON_START
@@ -438,6 +446,9 @@
#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM 0x1003001
#define HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR 0x1003002
#define HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2 0x1003003
+#define HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH 0x1003007
+#define HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT 0x1003009
+#define HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE 0x100300a
/*
* HFI_PROPERTY_CONFIG_VDEC_COMMON_START
@@ -518,6 +529,7 @@
enum hfi_version {
HFI_VERSION_1XX,
HFI_VERSION_3XX,
+ HFI_VERSION_4XX
};
struct hfi_buffer_info {
@@ -767,12 +779,56 @@ struct hfi_framesize {
u32 height;
};
+#define VIDC_CORE_ID_DEFAULT 0
+#define VIDC_CORE_ID_1 1
+#define VIDC_CORE_ID_2 2
+#define VIDC_CORE_ID_3 3
+
+struct hfi_videocores_usage_type {
+ u32 video_core_enable_mask;
+};
+
+#define VIDC_WORK_MODE_1 1
+#define VIDC_WORK_MODE_2 2
+
+struct hfi_video_work_mode {
+ u32 video_work_mode;
+};
+
struct hfi_h264_vui_timing_info {
u32 enable;
u32 fixed_framerate;
u32 time_scale;
};
+struct hfi_bit_depth {
+ u32 buffer_type;
+ u32 bit_depth;
+};
+
+struct hfi_picture_type {
+ u32 is_sync_frame;
+ u32 picture_type;
+};
+
+struct hfi_pic_struct {
+ u32 progressive_only;
+};
+
+struct hfi_colour_space {
+ u32 colour_space;
+};
+
+struct hfi_extradata_input_crop {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+};
+
#define HFI_COLOR_FORMAT_MONOCHROME 0x01
#define HFI_COLOR_FORMAT_NV12 0x02
#define HFI_COLOR_FORMAT_NV21 0x03
@@ -802,10 +858,23 @@ struct hfi_uncompressed_format_select {
u32 format;
};
+struct hfi_uncompressed_plane_constraints {
+ u32 stride_multiples;
+ u32 max_stride;
+ u32 min_plane_buffer_height_multiple;
+ u32 buffer_alignment;
+};
+
+struct hfi_uncompressed_plane_info {
+ u32 format;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_constraints plane_constraints[1];
+};
+
struct hfi_uncompressed_format_supported {
u32 buffer_type;
u32 format_entries;
- u32 format_info[1];
+ struct hfi_uncompressed_plane_info plane_info[1];
};
struct hfi_uncompressed_plane_actual {
@@ -819,19 +888,6 @@ struct hfi_uncompressed_plane_actual_info {
struct hfi_uncompressed_plane_actual plane_format[1];
};
-struct hfi_uncompressed_plane_constraints {
- u32 stride_multiples;
- u32 max_stride;
- u32 min_plane_buffer_height_multiple;
- u32 buffer_alignment;
-};
-
-struct hfi_uncompressed_plane_info {
- u32 format;
- u32 num_planes;
- struct hfi_uncompressed_plane_constraints plane_format[1];
-};
-
struct hfi_uncompressed_plane_actual_constraints_info {
u32 buffer_type;
u32 num_planes;
@@ -961,6 +1017,12 @@ struct hfi_buffer_count_actual {
u32 count_actual;
};
+struct hfi_buffer_count_actual_4xx {
+ u32 type;
+ u32 count_actual;
+ u32 count_min_host;
+};
+
struct hfi_buffer_size_actual {
u32 type;
u32 size;
@@ -971,6 +1033,14 @@ struct hfi_buffer_display_hold_count_actual {
u32 hold_count;
};
+/* HFI 4XX reorder the fields, use these macros */
+#define HFI_BUFREQ_HOLD_COUNT(bufreq, ver) \
+ ((ver) == HFI_VERSION_4XX ? 0 : (bufreq)->hold_count)
+#define HFI_BUFREQ_COUNT_MIN(bufreq, ver) \
+ ((ver) == HFI_VERSION_4XX ? (bufreq)->hold_count : (bufreq)->count_min)
+#define HFI_BUFREQ_COUNT_MIN_HOST(bufreq, ver) \
+ ((ver) == HFI_VERSION_4XX ? (bufreq)->count_min : 0)
+
struct hfi_buffer_requirements {
u32 type;
u32 size;
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index 90c93d9603dc..0ecdaa15c296 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -21,14 +21,21 @@
#include "hfi.h"
#include "hfi_helper.h"
#include "hfi_msgs.h"
+#include "hfi_parser.h"
static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
struct hfi_msg_event_notify_pkt *pkt)
{
+ enum hfi_version ver = core->res->hfi_version;
struct hfi_event_data event = {0};
int num_properties_changed;
struct hfi_framesize *frame_sz;
struct hfi_profile_level *profile_level;
+ struct hfi_bit_depth *pixel_depth;
+ struct hfi_pic_struct *pic_struct;
+ struct hfi_colour_space *colour_info;
+ struct hfi_buffer_requirements *bufreq;
+ struct hfi_extradata_input_crop *crop;
u8 *data_ptr;
u32 ptype;
@@ -60,14 +67,52 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
frame_sz = (struct hfi_framesize *)data_ptr;
event.width = frame_sz->width;
event.height = frame_sz->height;
- data_ptr += sizeof(frame_sz);
+ data_ptr += sizeof(*frame_sz);
break;
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
data_ptr += sizeof(u32);
profile_level = (struct hfi_profile_level *)data_ptr;
event.profile = profile_level->profile;
event.level = profile_level->level;
- data_ptr += sizeof(profile_level);
+ data_ptr += sizeof(*profile_level);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH:
+ data_ptr += sizeof(u32);
+ pixel_depth = (struct hfi_bit_depth *)data_ptr;
+ event.bit_depth = pixel_depth->bit_depth;
+ data_ptr += sizeof(*pixel_depth);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT:
+ data_ptr += sizeof(u32);
+ pic_struct = (struct hfi_pic_struct *)data_ptr;
+ event.pic_struct = pic_struct->progressive_only;
+ data_ptr += sizeof(*pic_struct);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
+ data_ptr += sizeof(u32);
+ colour_info = (struct hfi_colour_space *)data_ptr;
+ event.colour_space = colour_info->colour_space;
+ data_ptr += sizeof(*colour_info);
+ break;
+ case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
+ data_ptr += sizeof(u32);
+ event.entropy_mode = *(u32 *)data_ptr;
+ data_ptr += sizeof(u32);
+ break;
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ data_ptr += sizeof(u32);
+ bufreq = (struct hfi_buffer_requirements *)data_ptr;
+ event.buf_count = HFI_BUFREQ_COUNT_MIN(bufreq, ver);
+ data_ptr += sizeof(*bufreq);
+ break;
+ case HFI_INDEX_EXTRADATA_INPUT_CROP:
+ data_ptr += sizeof(u32);
+ crop = (struct hfi_extradata_input_crop *)data_ptr;
+ event.input_crop.left = crop->left;
+ event.input_crop.top = crop->top;
+ event.input_crop.width = crop->width;
+ event.input_crop.height = crop->height;
+ data_ptr += sizeof(*crop);
break;
default:
break;
@@ -173,81 +218,28 @@ static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst,
void *packet)
{
struct hfi_msg_sys_init_done_pkt *pkt = packet;
- u32 rem_bytes, read_bytes = 0, num_properties;
- u32 error, ptype;
- u8 *data;
+ int rem_bytes;
+ u32 error;
error = pkt->error_type;
if (error != HFI_ERR_NONE)
- goto err_no_prop;
-
- num_properties = pkt->num_properties;
+ goto done;
- if (!num_properties) {
+ if (!pkt->num_properties) {
error = HFI_ERR_SYS_INVALID_PARAMETER;
- goto err_no_prop;
+ goto done;
}
rem_bytes = pkt->hdr.size - sizeof(*pkt) + sizeof(u32);
-
- if (!rem_bytes) {
+ if (rem_bytes <= 0) {
/* missing property data */
error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
- goto err_no_prop;
+ goto done;
}
- data = (u8 *)&pkt->data[0];
-
- if (core->res->hfi_version == HFI_VERSION_3XX)
- goto err_no_prop;
-
- while (num_properties && rem_bytes >= sizeof(u32)) {
- ptype = *((u32 *)data);
- data += sizeof(u32);
+ error = hfi_parser(core, inst, pkt->data, rem_bytes);
- switch (ptype) {
- case HFI_PROPERTY_PARAM_CODEC_SUPPORTED: {
- struct hfi_codec_supported *prop;
-
- prop = (struct hfi_codec_supported *)data;
-
- if (rem_bytes < sizeof(*prop)) {
- error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
- break;
- }
-
- read_bytes += sizeof(*prop) + sizeof(u32);
- core->dec_codecs = prop->dec_codecs;
- core->enc_codecs = prop->enc_codecs;
- break;
- }
- case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED: {
- struct hfi_max_sessions_supported *prop;
-
- if (rem_bytes < sizeof(*prop)) {
- error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
- break;
- }
-
- prop = (struct hfi_max_sessions_supported *)data;
- read_bytes += sizeof(*prop) + sizeof(u32);
- core->max_sessions_supported = prop->max_sessions;
- break;
- }
- default:
- error = HFI_ERR_SYS_INVALID_PARAMETER;
- break;
- }
-
- if (error)
- break;
-
- rem_bytes -= read_bytes;
- data += read_bytes;
- num_properties--;
- }
-
-err_no_prop:
+done:
core->error = error;
complete(&core->done);
}
@@ -325,51 +317,6 @@ static void hfi_sys_pc_prepare_done(struct venus_core *core,
dev_dbg(core->dev, "pc prepare done (error %x)\n", pkt->error_type);
}
-static void
-hfi_copy_cap_prop(struct hfi_capability *in, struct venus_inst *inst)
-{
- if (!in || !inst)
- return;
-
- switch (in->capability_type) {
- case HFI_CAPABILITY_FRAME_WIDTH:
- inst->cap_width = *in;
- break;
- case HFI_CAPABILITY_FRAME_HEIGHT:
- inst->cap_height = *in;
- break;
- case HFI_CAPABILITY_MBS_PER_FRAME:
- inst->cap_mbs_per_frame = *in;
- break;
- case HFI_CAPABILITY_MBS_PER_SECOND:
- inst->cap_mbs_per_sec = *in;
- break;
- case HFI_CAPABILITY_FRAMERATE:
- inst->cap_framerate = *in;
- break;
- case HFI_CAPABILITY_SCALE_X:
- inst->cap_scale_x = *in;
- break;
- case HFI_CAPABILITY_SCALE_Y:
- inst->cap_scale_y = *in;
- break;
- case HFI_CAPABILITY_BITRATE:
- inst->cap_bitrate = *in;
- break;
- case HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS:
- inst->cap_hier_p = *in;
- break;
- case HFI_CAPABILITY_ENC_LTR_COUNT:
- inst->cap_ltr_count = *in;
- break;
- case HFI_CAPABILITY_CP_OUTPUT2_THRESH:
- inst->cap_secure_output2_threshold = *in;
- break;
- default:
- break;
- }
-}
-
static unsigned int
session_get_prop_profile_level(struct hfi_msg_session_property_info_pkt *pkt,
struct hfi_profile_level *profile_level)
@@ -459,248 +406,27 @@ done:
complete(&inst->done);
}
-static u32 init_done_read_prop(struct venus_core *core, struct venus_inst *inst,
- struct hfi_msg_session_init_done_pkt *pkt)
-{
- struct device *dev = core->dev;
- u32 rem_bytes, num_props;
- u32 ptype, next_offset = 0;
- u32 err;
- u8 *data;
-
- rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt) + sizeof(u32);
- if (!rem_bytes) {
- dev_err(dev, "%s: missing property info\n", __func__);
- return HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
- }
-
- err = pkt->error_type;
- if (err)
- return err;
-
- data = (u8 *)&pkt->data[0];
- num_props = pkt->num_properties;
-
- while (err == HFI_ERR_NONE && num_props && rem_bytes >= sizeof(u32)) {
- ptype = *((u32 *)data);
- next_offset = sizeof(u32);
-
- switch (ptype) {
- case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED: {
- struct hfi_codec_mask_supported *masks =
- (struct hfi_codec_mask_supported *)
- (data + next_offset);
-
- next_offset += sizeof(*masks);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED: {
- struct hfi_capabilities *caps;
- struct hfi_capability *cap;
- u32 num_caps;
-
- if ((rem_bytes - next_offset) < sizeof(*cap)) {
- err = HFI_ERR_SESSION_INVALID_PARAMETER;
- break;
- }
-
- caps = (struct hfi_capabilities *)(data + next_offset);
-
- num_caps = caps->num_capabilities;
- cap = &caps->data[0];
- next_offset += sizeof(u32);
-
- while (num_caps &&
- (rem_bytes - next_offset) >= sizeof(u32)) {
- hfi_copy_cap_prop(cap, inst);
- cap++;
- next_offset += sizeof(*cap);
- num_caps--;
- }
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED: {
- struct hfi_uncompressed_format_supported *prop =
- (struct hfi_uncompressed_format_supported *)
- (data + next_offset);
- u32 num_fmt_entries;
- u8 *fmt;
- struct hfi_uncompressed_plane_info *inf;
-
- if ((rem_bytes - next_offset) < sizeof(*prop)) {
- err = HFI_ERR_SESSION_INVALID_PARAMETER;
- break;
- }
-
- num_fmt_entries = prop->format_entries;
- next_offset = sizeof(*prop) - sizeof(u32);
- fmt = (u8 *)&prop->format_info[0];
-
- dev_dbg(dev, "uncomm format support num entries:%u\n",
- num_fmt_entries);
-
- while (num_fmt_entries) {
- struct hfi_uncompressed_plane_constraints *cnts;
- u32 bytes_to_skip;
-
- inf = (struct hfi_uncompressed_plane_info *)fmt;
-
- if ((rem_bytes - next_offset) < sizeof(*inf)) {
- err = HFI_ERR_SESSION_INVALID_PARAMETER;
- break;
- }
-
- dev_dbg(dev, "plane info: fmt:%x, planes:%x\n",
- inf->format, inf->num_planes);
-
- cnts = &inf->plane_format[0];
- dev_dbg(dev, "%u %u %u %u\n",
- cnts->stride_multiples,
- cnts->max_stride,
- cnts->min_plane_buffer_height_multiple,
- cnts->buffer_alignment);
-
- bytes_to_skip = sizeof(*inf) - sizeof(*cnts) +
- inf->num_planes * sizeof(*cnts);
-
- fmt += bytes_to_skip;
- next_offset += bytes_to_skip;
- num_fmt_entries--;
- }
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED: {
- struct hfi_properties_supported *prop =
- (struct hfi_properties_supported *)
- (data + next_offset);
-
- next_offset += sizeof(*prop) - sizeof(u32)
- + prop->num_properties * sizeof(u32);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED: {
- struct hfi_profile_level_supported *prop =
- (struct hfi_profile_level_supported *)
- (data + next_offset);
- struct hfi_profile_level *pl;
- unsigned int prop_count = 0;
- unsigned int count = 0;
- u8 *ptr;
-
- ptr = (u8 *)&prop->profile_level[0];
- prop_count = prop->profile_count;
-
- if (prop_count > HFI_MAX_PROFILE_COUNT)
- prop_count = HFI_MAX_PROFILE_COUNT;
-
- while (prop_count) {
- ptr++;
- pl = (struct hfi_profile_level *)ptr;
-
- inst->pl[count].profile = pl->profile;
- inst->pl[count].level = pl->level;
- prop_count--;
- count++;
- ptr += sizeof(*pl) / sizeof(u32);
- }
-
- inst->pl_count = count;
- next_offset += sizeof(*prop) - sizeof(*pl) +
- prop->profile_count * sizeof(*pl);
-
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED: {
- next_offset +=
- sizeof(struct hfi_interlace_format_supported);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED: {
- struct hfi_nal_stream_format *nal =
- (struct hfi_nal_stream_format *)
- (data + next_offset);
- dev_dbg(dev, "NAL format: %x\n", nal->format);
- next_offset += sizeof(*nal);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT: {
- next_offset += sizeof(u32);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE: {
- u32 *max_seq_sz = (u32 *)(data + next_offset);
-
- dev_dbg(dev, "max seq header sz: %x\n", *max_seq_sz);
- next_offset += sizeof(u32);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: {
- next_offset += sizeof(struct hfi_intra_refresh);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED: {
- struct hfi_buffer_alloc_mode_supported *prop =
- (struct hfi_buffer_alloc_mode_supported *)
- (data + next_offset);
- unsigned int i;
-
- for (i = 0; i < prop->num_entries; i++) {
- if (prop->buffer_type == HFI_BUFFER_OUTPUT ||
- prop->buffer_type == HFI_BUFFER_OUTPUT2) {
- switch (prop->data[i]) {
- case HFI_BUFFER_MODE_STATIC:
- inst->cap_bufs_mode_static = true;
- break;
- case HFI_BUFFER_MODE_DYNAMIC:
- inst->cap_bufs_mode_dynamic = true;
- break;
- default:
- break;
- }
- }
- }
- next_offset += sizeof(*prop) -
- sizeof(u32) + prop->num_entries * sizeof(u32);
- num_props--;
- break;
- }
- default:
- dev_dbg(dev, "%s: default case %#x\n", __func__, ptype);
- break;
- }
-
- rem_bytes -= next_offset;
- data += next_offset;
- }
-
- return err;
-}
-
static void hfi_session_init_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_init_done_pkt *pkt = packet;
- unsigned int error;
+ int rem_bytes;
+ u32 error;
error = pkt->error_type;
if (error != HFI_ERR_NONE)
goto done;
- if (core->res->hfi_version != HFI_VERSION_1XX)
+ if (!IS_V1(core))
goto done;
- error = init_done_read_prop(core, inst, pkt);
+ rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt) + sizeof(u32);
+ if (rem_bytes <= 0) {
+ error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
+ goto done;
+ }
+ error = hfi_parser(core, inst, pkt->data, rem_bytes);
done:
inst->error = error;
complete(&inst->done);
@@ -779,7 +505,8 @@ static void hfi_session_ftb_done(struct venus_core *core,
error = HFI_ERR_SESSION_INVALID_PARAMETER;
}
- if (buffer_type != HFI_BUFFER_OUTPUT)
+ if (buffer_type != HFI_BUFFER_OUTPUT &&
+ buffer_type != HFI_BUFFER_OUTPUT2)
goto done;
if (hfi_flags & HFI_BUFFERFLAG_EOS)
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
new file mode 100644
index 000000000000..2293d936e49c
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Linaro Ltd.
+ *
+ * Author: Stanimir Varbanov <stanimir.varbanov@linaro.org>
+ */
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+
+#include "core.h"
+#include "hfi_helper.h"
+#include "hfi_parser.h"
+
+typedef void (*func)(struct venus_caps *cap, const void *data,
+ unsigned int size);
+
+static void init_codecs(struct venus_core *core)
+{
+ struct venus_caps *caps = core->caps, *cap;
+ unsigned long bit;
+
+ for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
+ cap = &caps[core->codecs_count++];
+ cap->codec = BIT(bit);
+ cap->domain = VIDC_SESSION_TYPE_DEC;
+ cap->valid = false;
+ }
+
+ for_each_set_bit(bit, &core->enc_codecs, MAX_CODEC_NUM) {
+ cap = &caps[core->codecs_count++];
+ cap->codec = BIT(bit);
+ cap->domain = VIDC_SESSION_TYPE_ENC;
+ cap->valid = false;
+ }
+}
+
+static void for_each_codec(struct venus_caps *caps, unsigned int caps_num,
+ u32 codecs, u32 domain, func cb, void *data,
+ unsigned int size)
+{
+ struct venus_caps *cap;
+ unsigned int i;
+
+ for (i = 0; i < caps_num; i++) {
+ cap = &caps[i];
+ if (cap->valid && cap->domain == domain)
+ continue;
+ if (cap->codec & codecs && cap->domain == domain)
+ cb(cap, data, size);
+ }
+}
+
+static void
+fill_buf_mode(struct venus_caps *cap, const void *data, unsigned int num)
+{
+ const u32 *type = data;
+
+ if (*type == HFI_BUFFER_MODE_DYNAMIC)
+ cap->cap_bufs_mode_dynamic = true;
+}
+
+static void
+parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
+{
+ struct hfi_buffer_alloc_mode_supported *mode = data;
+ u32 num_entries = mode->num_entries;
+ u32 *type;
+
+ if (num_entries > MAX_ALLOC_MODE_ENTRIES)
+ return;
+
+ type = mode->data;
+
+ while (num_entries--) {
+ if (mode->buffer_type == HFI_BUFFER_OUTPUT ||
+ mode->buffer_type == HFI_BUFFER_OUTPUT2)
+ for_each_codec(core->caps, ARRAY_SIZE(core->caps),
+ codecs, domain, fill_buf_mode, type, 1);
+
+ type++;
+ }
+}
+
+static void fill_profile_level(struct venus_caps *cap, const void *data,
+ unsigned int num)
+{
+ const struct hfi_profile_level *pl = data;
+
+ memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
+ cap->num_pl += num;
+}
+
+static void
+parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data)
+{
+ struct hfi_profile_level_supported *pl = data;
+ struct hfi_profile_level *proflevel = pl->profile_level;
+ struct hfi_profile_level pl_arr[HFI_MAX_PROFILE_COUNT] = {};
+
+ if (pl->profile_count > HFI_MAX_PROFILE_COUNT)
+ return;
+
+ memcpy(pl_arr, proflevel, pl->profile_count * sizeof(*proflevel));
+
+ for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
+ fill_profile_level, pl_arr, pl->profile_count);
+}
+
+static void
+fill_caps(struct venus_caps *cap, const void *data, unsigned int num)
+{
+ const struct hfi_capability *caps = data;
+
+ memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
+ cap->num_caps += num;
+}
+
+static void
+parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data)
+{
+ struct hfi_capabilities *caps = data;
+ struct hfi_capability *cap = caps->data;
+ u32 num_caps = caps->num_capabilities;
+ struct hfi_capability caps_arr[MAX_CAP_ENTRIES] = {};
+
+ if (num_caps > MAX_CAP_ENTRIES)
+ return;
+
+ memcpy(caps_arr, cap, num_caps * sizeof(*cap));
+
+ for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
+ fill_caps, caps_arr, num_caps);
+}
+
+static void fill_raw_fmts(struct venus_caps *cap, const void *fmts,
+ unsigned int num_fmts)
+{
+ const struct raw_formats *formats = fmts;
+
+ memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
+ cap->num_fmts += num_fmts;
+}
+
+static void
+parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
+{
+ struct hfi_uncompressed_format_supported *fmt = data;
+ struct hfi_uncompressed_plane_info *pinfo = fmt->plane_info;
+ struct hfi_uncompressed_plane_constraints *constr;
+ struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {};
+ u32 entries = fmt->format_entries;
+ unsigned int i = 0;
+ u32 num_planes;
+
+ while (entries) {
+ num_planes = pinfo->num_planes;
+
+ rawfmts[i].fmt = pinfo->format;
+ rawfmts[i].buftype = fmt->buffer_type;
+ i++;
+
+ if (pinfo->num_planes > MAX_PLANES)
+ break;
+
+ pinfo = (void *)pinfo + sizeof(*constr) * num_planes +
+ 2 * sizeof(u32);
+ entries--;
+ }
+
+ for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
+ fill_raw_fmts, rawfmts, i);
+}
+
+static void parse_codecs(struct venus_core *core, void *data)
+{
+ struct hfi_codec_supported *codecs = data;
+
+ core->dec_codecs = codecs->dec_codecs;
+ core->enc_codecs = codecs->enc_codecs;
+
+ if (IS_V1(core)) {
+ core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC;
+ core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK;
+ }
+}
+
+static void parse_max_sessions(struct venus_core *core, const void *data)
+{
+ const struct hfi_max_sessions_supported *sessions = data;
+
+ core->max_sessions_supported = sessions->max_sessions;
+}
+
+static void parse_codecs_mask(u32 *codecs, u32 *domain, void *data)
+{
+ struct hfi_codec_mask_supported *mask = data;
+
+ *codecs = mask->codecs;
+ *domain = mask->video_domains;
+}
+
+static void parser_init(struct venus_inst *inst, u32 *codecs, u32 *domain)
+{
+ if (!inst || !IS_V1(inst->core))
+ return;
+
+ *codecs = inst->hfi_codec;
+ *domain = inst->session_type;
+}
+
+static void parser_fini(struct venus_inst *inst, u32 codecs, u32 domain)
+{
+ struct venus_caps *caps, *cap;
+ unsigned int i;
+ u32 dom;
+
+ if (!inst || !IS_V1(inst->core))
+ return;
+
+ caps = inst->core->caps;
+ dom = inst->session_type;
+
+ for (i = 0; i < MAX_CODEC_NUM; i++) {
+ cap = &caps[i];
+ if (cap->codec & codecs && cap->domain == dom)
+ cap->valid = true;
+ }
+}
+
+u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
+ u32 size)
+{
+ unsigned int words_count = size >> 2;
+ u32 *word = buf, *data, codecs = 0, domain = 0;
+
+ if (size % 4)
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ parser_init(inst, &codecs, &domain);
+
+ while (words_count) {
+ data = word + 1;
+
+ switch (*word) {
+ case HFI_PROPERTY_PARAM_CODEC_SUPPORTED:
+ parse_codecs(core, data);
+ init_codecs(core);
+ break;
+ case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED:
+ parse_max_sessions(core, data);
+ break;
+ case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED:
+ parse_codecs_mask(&codecs, &domain, data);
+ break;
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
+ parse_raw_formats(core, codecs, domain, data);
+ break;
+ case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
+ parse_caps(core, codecs, domain, data);
+ break;
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
+ parse_profile_level(core, codecs, domain, data);
+ break;
+ case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED:
+ parse_alloc_mode(core, codecs, domain, data);
+ break;
+ default:
+ break;
+ }
+
+ word++;
+ words_count--;
+ }
+
+ parser_fini(inst, codecs, domain);
+
+ return HFI_ERR_NONE;
+}
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.h b/drivers/media/platform/qcom/venus/hfi_parser.h
new file mode 100644
index 000000000000..3e931c747e19
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_parser.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Linaro Ltd. */
+#ifndef __VENUS_HFI_PARSER_H__
+#define __VENUS_HFI_PARSER_H__
+
+#include "core.h"
+
+u32 hfi_parser(struct venus_core *core, struct venus_inst *inst,
+ void *buf, u32 size);
+
+#define WHICH_CAP_MIN 0
+#define WHICH_CAP_MAX 1
+#define WHICH_CAP_STEP 2
+
+static inline u32 get_cap(struct venus_inst *inst, u32 type, u32 which)
+{
+ struct venus_core *core = inst->core;
+ struct hfi_capability *cap = NULL;
+ struct venus_caps *caps;
+ unsigned int i;
+
+ caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
+ if (!caps)
+ return 0;
+
+ for (i = 0; i < caps->num_caps; i++) {
+ if (caps->caps[i].capability_type == type) {
+ cap = &caps->caps[i];
+ break;
+ }
+ }
+
+ if (!cap)
+ return 0;
+
+ switch (which) {
+ case WHICH_CAP_MIN:
+ return cap->min;
+ case WHICH_CAP_MAX:
+ return cap->max;
+ case WHICH_CAP_STEP:
+ return cap->step_size;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline u32 cap_min(struct venus_inst *inst, u32 type)
+{
+ return get_cap(inst, type, WHICH_CAP_MIN);
+}
+
+static inline u32 cap_max(struct venus_inst *inst, u32 type)
+{
+ return get_cap(inst, type, WHICH_CAP_MAX);
+}
+
+static inline u32 cap_step(struct venus_inst *inst, u32 type)
+{
+ return get_cap(inst, type, WHICH_CAP_STEP);
+}
+
+static inline u32 frame_width_min(struct venus_inst *inst)
+{
+ return cap_min(inst, HFI_CAPABILITY_FRAME_WIDTH);
+}
+
+static inline u32 frame_width_max(struct venus_inst *inst)
+{
+ return cap_max(inst, HFI_CAPABILITY_FRAME_WIDTH);
+}
+
+static inline u32 frame_width_step(struct venus_inst *inst)
+{
+ return cap_step(inst, HFI_CAPABILITY_FRAME_WIDTH);
+}
+
+static inline u32 frame_height_min(struct venus_inst *inst)
+{
+ return cap_min(inst, HFI_CAPABILITY_FRAME_HEIGHT);
+}
+
+static inline u32 frame_height_max(struct venus_inst *inst)
+{
+ return cap_max(inst, HFI_CAPABILITY_FRAME_HEIGHT);
+}
+
+static inline u32 frame_height_step(struct venus_inst *inst)
+{
+ return cap_step(inst, HFI_CAPABILITY_FRAME_HEIGHT);
+}
+
+static inline u32 frate_min(struct venus_inst *inst)
+{
+ return cap_min(inst, HFI_CAPABILITY_FRAMERATE);
+}
+
+static inline u32 frate_max(struct venus_inst *inst)
+{
+ return cap_max(inst, HFI_CAPABILITY_FRAMERATE);
+}
+
+static inline u32 frate_step(struct venus_inst *inst)
+{
+ return cap_step(inst, HFI_CAPABILITY_FRAMERATE);
+}
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 734ce11b0ed0..124085556b94 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -532,6 +532,24 @@ static int venus_halt_axi(struct venus_hfi_device *hdev)
u32 val;
int ret;
+ if (IS_V4(hdev->core)) {
+ val = venus_readl(hdev, WRAPPER_CPU_AXI_HALT);
+ val |= WRAPPER_CPU_AXI_HALT_HALT;
+ venus_writel(hdev, WRAPPER_CPU_AXI_HALT, val);
+
+ ret = readl_poll_timeout(base + WRAPPER_CPU_AXI_HALT_STATUS,
+ val,
+ val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
+ POLL_INTERVAL_US,
+ VBIF_AXI_HALT_ACK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "AXI bus port halt timeout\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
/* Halt AXI and AXI IMEM VBIF Access */
val = venus_readl(hdev, VBIF_AXI_HALT_CTRL0);
val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
@@ -861,6 +879,14 @@ static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
if (ret)
dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
+ /*
+ * Idle indicator is disabled by default on some 4xx firmware versions,
+ * enable it explicitly in order to make suspend functional by checking
+ * WFI (wait-for-interrupt) bit.
+ */
+ if (IS_V4(hdev->core))
+ venus_sys_idle_indicator = true;
+
ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator);
if (ret)
dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
@@ -1073,6 +1099,10 @@ static int venus_core_init(struct venus_core *core)
if (ret)
dev_warn(dev, "failed to send image version pkt to fw\n");
+ ret = venus_sys_set_default_properties(hdev);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -1117,10 +1147,6 @@ static int venus_session_init(struct venus_inst *inst, u32 session_type,
struct hfi_session_init_pkt pkt;
int ret;
- ret = venus_sys_set_default_properties(hdev);
- if (ret)
- return ret;
-
ret = pkt_session_init(&pkt, inst, session_type, codec);
if (ret)
goto err;
@@ -1426,13 +1452,40 @@ static int venus_suspend_1xx(struct venus_core *core)
return 0;
}
+static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
+{
+ u32 ctrl_status, cpu_status;
+
+ cpu_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+
+ if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
+ ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
+ return true;
+
+ return false;
+}
+
+static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
+{
+ u32 ctrl_status, cpu_status;
+
+ cpu_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+
+ if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
+ ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
+ return true;
+
+ return false;
+}
+
static int venus_suspend_3xx(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct device *dev = core->dev;
- u32 ctrl_status, wfi_status;
+ bool val;
int ret;
- int cnt = 100;
if (!hdev->power_enabled || hdev->suspended)
return 0;
@@ -1446,29 +1499,30 @@ static int venus_suspend_3xx(struct venus_core *core)
return -EINVAL;
}
- ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
- if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
- wfi_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
- ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
-
- ret = venus_prepare_power_collapse(hdev, false);
- if (ret) {
- dev_err(dev, "prepare for power collapse fail (%d)\n",
- ret);
- return ret;
- }
+ /*
+ * Power collapse sequence for Venus 3xx and 4xx versions:
+ * 1. Check for ARM9 and video core to be idle by checking WFI bit
+ * (bit 0) in CPU status register and by checking Idle (bit 30) in
+ * Control status register for video core.
+ * 2. Send a command to prepare for power collapse.
+ * 3. Check for WFI and PC_READY bits.
+ */
+ ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val,
+ 1500, 100 * 1500);
+ if (ret)
+ return ret;
- cnt = 100;
- while (cnt--) {
- wfi_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
- ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
- if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY &&
- wfi_status & BIT(0))
- break;
- usleep_range(1000, 1500);
- }
+ ret = venus_prepare_power_collapse(hdev, false);
+ if (ret) {
+ dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
+ return ret;
}
+ ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val,
+ 1500, 100 * 1500);
+ if (ret)
+ return ret;
+
mutex_lock(&hdev->lock);
ret = venus_power_off(hdev);
@@ -1487,7 +1541,7 @@ static int venus_suspend_3xx(struct venus_core *core)
static int venus_suspend(struct venus_core *core)
{
- if (core->res->hfi_version == HFI_VERSION_3XX)
+ if (IS_V3(core) || IS_V4(core))
return venus_suspend_3xx(core);
return venus_suspend_1xx(core);
diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h
index 98cc350113ab..def0926a6dee 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus_io.h
+++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h
@@ -104,10 +104,20 @@
#define WRAPPER_CPU_CLOCK_CONFIG (WRAPPER_BASE + 0x2000)
#define WRAPPER_CPU_AXI_HALT (WRAPPER_BASE + 0x2008)
+#define WRAPPER_CPU_AXI_HALT_HALT BIT(16)
#define WRAPPER_CPU_AXI_HALT_STATUS (WRAPPER_BASE + 0x200c)
+#define WRAPPER_CPU_AXI_HALT_STATUS_IDLE BIT(24)
#define WRAPPER_CPU_CGC_DIS (WRAPPER_BASE + 0x2010)
#define WRAPPER_CPU_STATUS (WRAPPER_BASE + 0x2014)
+#define WRAPPER_CPU_STATUS_WFI BIT(0)
#define WRAPPER_SW_RESET (WRAPPER_BASE + 0x3000)
+/* Venus 4xx */
+#define WRAPPER_VCODEC0_MMCC_POWER_STATUS (WRAPPER_BASE + 0x90)
+#define WRAPPER_VCODEC0_MMCC_POWER_CONTROL (WRAPPER_BASE + 0x94)
+
+#define WRAPPER_VCODEC1_MMCC_POWER_STATUS (WRAPPER_BASE + 0x110)
+#define WRAPPER_VCODEC1_MMCC_POWER_CONTROL (WRAPPER_BASE + 0x114)
+
#endif
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 49bbd1861d3a..dfbbbf0f746f 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -24,33 +25,11 @@
#include <media/videobuf2-dma-sg.h>
#include "hfi_venus_io.h"
+#include "hfi_parser.h"
#include "core.h"
#include "helpers.h"
#include "vdec.h"
-static u32 get_framesize_uncompressed(unsigned int plane, u32 width, u32 height)
-{
- u32 y_stride, uv_stride, y_plane;
- u32 y_sclines, uv_sclines, uv_plane;
- u32 size;
-
- y_stride = ALIGN(width, 128);
- uv_stride = ALIGN(width, 128);
- y_sclines = ALIGN(height, 32);
- uv_sclines = ALIGN(((height + 1) >> 1), 16);
-
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines + SZ_4K;
- size = y_plane + uv_plane + SZ_8K;
-
- return ALIGN(size, SZ_4K);
-}
-
-static u32 get_framesize_compressed(unsigned int width, unsigned int height)
-{
- return ((width * height * 3 / 2) / 2) + 128;
-}
-
/*
* Three resons to keep MPLANE formats (despite that the number of planes
* currently is one):
@@ -99,6 +78,10 @@ static const struct venus_format vdec_formats[] = {
.pixfmt = V4L2_PIX_FMT_XVID,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_HEVC,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
},
};
@@ -159,7 +142,6 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
const struct venus_format *fmt;
- unsigned int p;
memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
@@ -173,14 +155,12 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
else
return NULL;
fmt = find_format(inst, pixmp->pixelformat, f->type);
- pixmp->width = 1280;
- pixmp->height = 720;
}
- pixmp->width = clamp(pixmp->width, inst->cap_width.min,
- inst->cap_width.max);
- pixmp->height = clamp(pixmp->height, inst->cap_height.min,
- inst->cap_height.max);
+ pixmp->width = clamp(pixmp->width, frame_width_min(inst),
+ frame_width_max(inst));
+ pixmp->height = clamp(pixmp->height, frame_height_min(inst),
+ frame_height_max(inst));
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
pixmp->height = ALIGN(pixmp->height, 32);
@@ -190,18 +170,14 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
pixmp->num_planes = fmt->num_planes;
pixmp->flags = 0;
- if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- for (p = 0; p < pixmp->num_planes; p++) {
- pfmt[p].sizeimage =
- get_framesize_uncompressed(p, pixmp->width,
- pixmp->height);
- pfmt[p].bytesperline = ALIGN(pixmp->width, 128);
- }
- } else {
- pfmt[0].sizeimage = get_framesize_compressed(pixmp->width,
- pixmp->height);
+ pfmt[0].sizeimage = venus_helper_get_framesz(pixmp->pixelformat,
+ pixmp->width,
+ pixmp->height);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 128);
+ else
pfmt[0].bytesperline = 0;
- }
return fmt;
}
@@ -442,12 +418,12 @@ static int vdec_enum_framesizes(struct file *file, void *fh,
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise.min_width = inst->cap_width.min;
- fsize->stepwise.max_width = inst->cap_width.max;
- fsize->stepwise.step_width = inst->cap_width.step_size;
- fsize->stepwise.min_height = inst->cap_height.min;
- fsize->stepwise.max_height = inst->cap_height.max;
- fsize->stepwise.step_height = inst->cap_height.step_size;
+ fsize->stepwise.min_width = frame_width_min(inst);
+ fsize->stepwise.max_width = frame_width_max(inst);
+ fsize->stepwise.step_width = frame_width_step(inst);
+ fsize->stepwise.min_height = frame_height_min(inst);
+ fsize->stepwise.max_height = frame_height_max(inst);
+ fsize->stepwise.step_height = frame_height_step(inst);
return 0;
}
@@ -544,11 +520,41 @@ static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
static int vdec_set_properties(struct venus_inst *inst)
{
struct vdec_controls *ctr = &inst->controls.dec;
+ struct hfi_enable en = { .enable = 1 };
+ u32 ptype;
+ int ret;
+
+ if (ctr->post_loop_deb_mode) {
+ ptype = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
+ ret = hfi_session_set_property(inst, ptype, &en);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+#define is_ubwc_fmt(fmt) (!!((fmt) & HFI_COLOR_FORMAT_UBWC_BASE))
+
+static int vdec_output_conf(struct venus_inst *inst)
+{
struct venus_core *core = inst->core;
struct hfi_enable en = { .enable = 1 };
+ u32 width = inst->out_width;
+ u32 height = inst->out_height;
+ u32 out_fmt, out2_fmt;
+ bool ubwc = false;
u32 ptype;
int ret;
+ ret = venus_helper_set_work_mode(inst, VIDC_WORK_MODE_2);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_set_core_usage(inst, VIDC_CORE_ID_1);
+ if (ret)
+ return ret;
+
if (core->res->hfi_version == HFI_VERSION_1XX) {
ptype = HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
ret = hfi_session_set_property(inst, ptype, &en);
@@ -556,27 +562,84 @@ static int vdec_set_properties(struct venus_inst *inst)
return ret;
}
- if (core->res->hfi_version == HFI_VERSION_3XX ||
- inst->cap_bufs_mode_dynamic) {
- struct hfi_buffer_alloc_mode mode;
+ /* Force searching UBWC formats for bigger then HD resolutions */
+ if (width > 1920 && height > ALIGN(1080, 32))
+ ubwc = true;
+
+ /* For Venus v4 UBWC format is mandatory */
+ if (IS_V4(core))
+ ubwc = true;
+
+ ret = venus_helper_get_out_fmts(inst, inst->fmt_cap->pixfmt, &out_fmt,
+ &out2_fmt, ubwc);
+ if (ret)
+ return ret;
+
+ inst->output_buf_size =
+ venus_helper_get_framesz_raw(out_fmt, width, height);
+ inst->output2_buf_size =
+ venus_helper_get_framesz_raw(out2_fmt, width, height);
+
+ if (is_ubwc_fmt(out_fmt)) {
+ inst->opb_buftype = HFI_BUFFER_OUTPUT2;
+ inst->opb_fmt = out2_fmt;
+ inst->dpb_buftype = HFI_BUFFER_OUTPUT;
+ inst->dpb_fmt = out_fmt;
+ } else if (is_ubwc_fmt(out2_fmt)) {
+ inst->opb_buftype = HFI_BUFFER_OUTPUT;
+ inst->opb_fmt = out_fmt;
+ inst->dpb_buftype = HFI_BUFFER_OUTPUT2;
+ inst->dpb_fmt = out2_fmt;
+ } else {
+ inst->opb_buftype = HFI_BUFFER_OUTPUT;
+ inst->opb_fmt = out_fmt;
+ inst->dpb_buftype = 0;
+ inst->dpb_fmt = 0;
+ }
+
+ ret = venus_helper_set_raw_format(inst, inst->opb_fmt,
+ inst->opb_buftype);
+ if (ret)
+ return ret;
- ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
- mode.type = HFI_BUFFER_OUTPUT;
- mode.mode = HFI_BUFFER_MODE_DYNAMIC;
+ if (inst->dpb_fmt) {
+ ret = venus_helper_set_multistream(inst, false, true);
+ if (ret)
+ return ret;
- ret = hfi_session_set_property(inst, ptype, &mode);
+ ret = venus_helper_set_raw_format(inst, inst->dpb_fmt,
+ inst->dpb_buftype);
if (ret)
return ret;
- }
- if (ctr->post_loop_deb_mode) {
- ptype = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
- en.enable = 1;
- ret = hfi_session_set_property(inst, ptype, &en);
+ ret = venus_helper_set_output_resolution(inst, width, height,
+ HFI_BUFFER_OUTPUT2);
if (ret)
return ret;
}
+ if (IS_V3(core) || IS_V4(core)) {
+ if (inst->output2_buf_size) {
+ ret = venus_helper_set_bufsize(inst,
+ inst->output2_buf_size,
+ HFI_BUFFER_OUTPUT2);
+ if (ret)
+ return ret;
+ }
+
+ if (inst->output_buf_size) {
+ ret = venus_helper_set_bufsize(inst,
+ inst->output_buf_size,
+ HFI_BUFFER_OUTPUT);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = venus_helper_set_dyn_bufmode(inst);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -603,19 +666,32 @@ deinit:
return ret;
}
-static int vdec_cap_num_buffers(struct venus_inst *inst, unsigned int *num)
+static int vdec_num_buffers(struct venus_inst *inst, unsigned int *in_num,
+ unsigned int *out_num)
{
+ enum hfi_version ver = inst->core->res->hfi_version;
struct hfi_buffer_requirements bufreq;
int ret;
+ *in_num = *out_num = 0;
+
ret = vdec_init_session(inst);
if (ret)
return ret;
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
+ if (ret)
+ goto deinit;
+
+ *in_num = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
+ if (ret)
+ goto deinit;
- *num = bufreq.count_actual;
+ *out_num = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+deinit:
hfi_session_deinit(inst);
return ret;
@@ -626,10 +702,12 @@ static int vdec_queue_setup(struct vb2_queue *q,
unsigned int sizes[], struct device *alloc_devs[])
{
struct venus_inst *inst = vb2_get_drv_priv(q);
- unsigned int p, num;
+ unsigned int in_num, out_num;
int ret = 0;
if (*num_planes) {
+ unsigned int output_buf_size = venus_helper_get_opb_size(inst);
+
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
*num_planes != inst->fmt_out->num_planes)
return -EINVAL;
@@ -643,41 +721,35 @@ static int vdec_queue_setup(struct vb2_queue *q,
return -EINVAL;
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
- sizes[0] < inst->output_buf_size)
+ sizes[0] < output_buf_size)
return -EINVAL;
return 0;
}
+ ret = vdec_num_buffers(inst, &in_num, &out_num);
+ if (ret)
+ return ret;
+
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
*num_planes = inst->fmt_out->num_planes;
- sizes[0] = get_framesize_compressed(inst->out_width,
+ sizes[0] = venus_helper_get_framesz(inst->fmt_out->pixfmt,
+ inst->out_width,
inst->out_height);
inst->input_buf_size = sizes[0];
+ *num_buffers = max(*num_buffers, in_num);
inst->num_input_bufs = *num_buffers;
-
- ret = vdec_cap_num_buffers(inst, &num);
- if (ret)
- break;
-
- inst->num_output_bufs = num;
+ inst->num_output_bufs = out_num;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
*num_planes = inst->fmt_cap->num_planes;
-
- ret = vdec_cap_num_buffers(inst, &num);
- if (ret)
- break;
-
- *num_buffers = max(*num_buffers, num);
-
- for (p = 0; p < *num_planes; p++)
- sizes[p] = get_framesize_uncompressed(p, inst->width,
- inst->height);
-
- inst->num_output_bufs = *num_buffers;
+ sizes[0] = venus_helper_get_framesz(inst->fmt_cap->pixfmt,
+ inst->width,
+ inst->height);
inst->output_buf_size = sizes[0];
+ *num_buffers = max(*num_buffers, out_num);
+ inst->num_output_bufs = *num_buffers;
break;
default:
ret = -EINVAL;
@@ -689,6 +761,7 @@ static int vdec_queue_setup(struct vb2_queue *q,
static int vdec_verify_conf(struct venus_inst *inst)
{
+ enum hfi_version ver = inst->core->res->hfi_version;
struct hfi_buffer_requirements bufreq;
int ret;
@@ -700,14 +773,14 @@ static int vdec_verify_conf(struct venus_inst *inst)
return ret;
if (inst->num_output_bufs < bufreq.count_actual ||
- inst->num_output_bufs < bufreq.count_min)
+ inst->num_output_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
return -EINVAL;
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
if (ret)
return ret;
- if (inst->num_input_bufs < bufreq.count_min)
+ if (inst->num_input_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
return -EINVAL;
return 0;
@@ -716,8 +789,6 @@ static int vdec_verify_conf(struct venus_inst *inst)
static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct venus_inst *inst = vb2_get_drv_priv(q);
- struct venus_core *core = inst->core;
- u32 ptype;
int ret;
mutex_lock(&inst->lock);
@@ -746,24 +817,20 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
if (ret)
goto deinit_sess;
- if (core->res->hfi_version == HFI_VERSION_3XX) {
- struct hfi_buffer_size_actual buf_sz;
-
- ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
- buf_sz.type = HFI_BUFFER_OUTPUT;
- buf_sz.size = inst->output_buf_size;
-
- ret = hfi_session_set_property(inst, ptype, &buf_sz);
- if (ret)
- goto deinit_sess;
- }
+ ret = vdec_output_conf(inst);
+ if (ret)
+ goto deinit_sess;
ret = vdec_verify_conf(inst);
if (ret)
goto deinit_sess;
ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
- VB2_MAX_FRAME);
+ VB2_MAX_FRAME, VB2_MAX_FRAME);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venus_helper_alloc_dpb_bufs(inst);
if (ret)
goto deinit_sess;
@@ -818,9 +885,11 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
vbuf->field = V4L2_FIELD_NONE;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ unsigned int opb_sz = venus_helper_get_opb_size(inst);
+
vb = &vbuf->vb2_buf;
vb->planes[0].bytesused =
- max_t(unsigned int, inst->output_buf_size, bytesused);
+ max_t(unsigned int, opb_sz, bytesused);
vb->planes[0].data_offset = data_offset;
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
@@ -901,22 +970,7 @@ static void vdec_inst_init(struct venus_inst *inst)
inst->fps = 30;
inst->timeperframe.numerator = 1;
inst->timeperframe.denominator = 30;
-
- inst->cap_width.min = 64;
- inst->cap_width.max = 1920;
- if (inst->core->res->hfi_version == HFI_VERSION_3XX)
- inst->cap_width.max = 3840;
- inst->cap_width.step_size = 1;
- inst->cap_height.min = 64;
- inst->cap_height.max = ALIGN(1080, 32);
- if (inst->core->res->hfi_version == HFI_VERSION_3XX)
- inst->cap_height.max = ALIGN(2160, 32);
- inst->cap_height.step_size = 1;
- inst->cap_framerate.min = 1;
- inst->cap_framerate.max = 30;
- inst->cap_framerate.step_size = 1;
- inst->cap_mbs_per_frame.min = 16;
- inst->cap_mbs_per_frame.max = 8160;
+ inst->hfi_codec = HFI_VIDEO_CODEC_H264;
}
static const struct v4l2_m2m_ops vdec_m2m_ops = {
@@ -973,6 +1027,7 @@ static int vdec_open(struct file *file)
if (!inst)
return -ENOMEM;
+ INIT_LIST_HEAD(&inst->dpbbufs);
INIT_LIST_HEAD(&inst->registeredbufs);
INIT_LIST_HEAD(&inst->internalbufs);
INIT_LIST_HEAD(&inst->list);
@@ -1080,12 +1135,18 @@ static int vdec_probe(struct platform_device *pdev)
if (!core)
return -EPROBE_DEFER;
- if (core->res->hfi_version == HFI_VERSION_3XX) {
+ if (IS_V3(core) || IS_V4(core)) {
core->core0_clk = devm_clk_get(dev, "core");
if (IS_ERR(core->core0_clk))
return PTR_ERR(core->core0_clk);
}
+ if (IS_V4(core)) {
+ core->core0_bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(core->core0_bus_clk))
+ return PTR_ERR(core->core0_bus_clk);
+ }
+
platform_set_drvdata(pdev, core);
vdev = video_device_alloc();
@@ -1130,15 +1191,21 @@ static int vdec_remove(struct platform_device *pdev)
static __maybe_unused int vdec_runtime_suspend(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
- if (core->res->hfi_version == HFI_VERSION_1XX)
+ if (IS_V1(core))
return 0;
- writel(0, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL);
+ ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, true);
+ if (ret)
+ return ret;
+
+ if (IS_V4(core))
+ clk_disable_unprepare(core->core0_bus_clk);
+
clk_disable_unprepare(core->core0_clk);
- writel(1, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL);
- return 0;
+ return venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false);
}
static __maybe_unused int vdec_runtime_resume(struct device *dev)
@@ -1146,13 +1213,29 @@ static __maybe_unused int vdec_runtime_resume(struct device *dev)
struct venus_core *core = dev_get_drvdata(dev);
int ret;
- if (core->res->hfi_version == HFI_VERSION_1XX)
+ if (IS_V1(core))
return 0;
- writel(0, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL);
+ ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, true);
+ if (ret)
+ return ret;
+
ret = clk_prepare_enable(core->core0_clk);
- writel(1, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL);
+ if (ret)
+ goto err_power_disable;
+ if (IS_V4(core))
+ ret = clk_prepare_enable(core->core0_bus_clk);
+
+ if (ret)
+ goto err_unprepare_core0;
+
+ return venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false);
+
+err_unprepare_core0:
+ clk_disable_unprepare(core->core0_clk);
+err_power_disable:
+ venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false);
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/vdec_ctrls.c b/drivers/media/platform/qcom/venus/vdec_ctrls.c
index 032839bbc967..f4604b0cd57e 100644
--- a/drivers/media/platform/qcom/venus/vdec_ctrls.c
+++ b/drivers/media/platform/qcom/venus/vdec_ctrls.c
@@ -29,7 +29,7 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
ctr->profile = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
@@ -54,7 +54,7 @@ static int vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
ret = hfi_session_get_property(inst, ptype, &hprop);
if (!ret)
ctr->profile = hprop.profile_level.profile;
@@ -130,8 +130,10 @@ int vdec_ctrl_init(struct venus_inst *inst)
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
- ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_VPX_PROFILE, 0, 3, 1, 0);
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_3,
+ 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 6b2ce479584e..41249d1443fa 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -24,38 +25,13 @@
#include <media/v4l2-ctrls.h>
#include "hfi_venus_io.h"
+#include "hfi_parser.h"
#include "core.h"
#include "helpers.h"
#include "venc.h"
#define NUM_B_FRAMES_MAX 4
-static u32 get_framesize_uncompressed(unsigned int plane, u32 width, u32 height)
-{
- u32 y_stride, uv_stride, y_plane;
- u32 y_sclines, uv_sclines, uv_plane;
- u32 size;
-
- y_stride = ALIGN(width, 128);
- uv_stride = ALIGN(width, 128);
- y_sclines = ALIGN(height, 32);
- uv_sclines = ALIGN(((height + 1) >> 1), 16);
-
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines + SZ_4K;
- size = y_plane + uv_plane + SZ_8K;
- size = ALIGN(size, SZ_4K);
-
- return size;
-}
-
-static u32 get_framesize_compressed(u32 width, u32 height)
-{
- u32 sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2;
-
- return ALIGN(sz, SZ_4K);
-}
-
/*
* Three resons to keep MPLANE formats (despite that the number of planes
* currently is one):
@@ -84,6 +60,10 @@ static const struct venus_format venc_formats[] = {
.pixfmt = V4L2_PIX_FMT_VP8,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_HEVC,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
},
};
@@ -223,7 +203,7 @@ static int venc_v4l2_to_hfi(int id, int value)
case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC:
return HFI_H264_ENTROPY_CABAC;
}
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
switch (value) {
case 0:
default:
@@ -245,6 +225,46 @@ static int venc_v4l2_to_hfi(int id, int value)
case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY:
return HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
}
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
+ default:
+ return HFI_HEVC_PROFILE_MAIN;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
+ return HFI_HEVC_PROFILE_MAIN_STILL_PIC;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
+ return HFI_HEVC_PROFILE_MAIN10;
+ }
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ default:
+ return HFI_HEVC_LEVEL_1;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ return HFI_HEVC_LEVEL_2;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ return HFI_HEVC_LEVEL_21;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ return HFI_HEVC_LEVEL_3;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ return HFI_HEVC_LEVEL_31;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ return HFI_HEVC_LEVEL_4;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ return HFI_HEVC_LEVEL_41;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ return HFI_HEVC_LEVEL_5;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ return HFI_HEVC_LEVEL_51;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
+ return HFI_HEVC_LEVEL_52;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
+ return HFI_HEVC_LEVEL_6;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
+ return HFI_HEVC_LEVEL_61;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
+ return HFI_HEVC_LEVEL_62;
+ }
}
return 0;
@@ -283,7 +303,6 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
const struct venus_format *fmt;
- unsigned int p;
memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
@@ -297,14 +316,12 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
else
return NULL;
fmt = find_format(inst, pixmp->pixelformat, f->type);
- pixmp->width = 1280;
- pixmp->height = 720;
}
- pixmp->width = clamp(pixmp->width, inst->cap_width.min,
- inst->cap_width.max);
- pixmp->height = clamp(pixmp->height, inst->cap_height.min,
- inst->cap_height.max);
+ pixmp->width = clamp(pixmp->width, frame_width_min(inst),
+ frame_width_max(inst));
+ pixmp->height = clamp(pixmp->height, frame_height_min(inst),
+ frame_height_max(inst));
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
pixmp->height = ALIGN(pixmp->height, 32);
@@ -317,19 +334,14 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
pixmp->num_planes = fmt->num_planes;
pixmp->flags = 0;
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- for (p = 0; p < pixmp->num_planes; p++) {
- pfmt[p].sizeimage =
- get_framesize_uncompressed(p, pixmp->width,
- pixmp->height);
+ pfmt[0].sizeimage = venus_helper_get_framesz(pixmp->pixelformat,
+ pixmp->width,
+ pixmp->height);
- pfmt[p].bytesperline = ALIGN(pixmp->width, 128);
- }
- } else {
- pfmt[0].sizeimage = get_framesize_compressed(pixmp->width,
- pixmp->height);
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 128);
+ else
pfmt[0].bytesperline = 0;
- }
return fmt;
}
@@ -553,12 +565,12 @@ static int venc_enum_framesizes(struct file *file, void *fh,
if (fsize->index)
return -EINVAL;
- fsize->stepwise.min_width = inst->cap_width.min;
- fsize->stepwise.max_width = inst->cap_width.max;
- fsize->stepwise.step_width = inst->cap_width.step_size;
- fsize->stepwise.min_height = inst->cap_height.min;
- fsize->stepwise.max_height = inst->cap_height.max;
- fsize->stepwise.step_height = inst->cap_height.step_size;
+ fsize->stepwise.min_width = frame_width_min(inst);
+ fsize->stepwise.max_width = frame_width_max(inst);
+ fsize->stepwise.step_width = frame_width_step(inst);
+ fsize->stepwise.min_height = frame_height_min(inst);
+ fsize->stepwise.max_height = frame_height_max(inst);
+ fsize->stepwise.step_height = frame_height_step(inst);
return 0;
}
@@ -586,18 +598,18 @@ static int venc_enum_frameintervals(struct file *file, void *fh,
if (!fival->width || !fival->height)
return -EINVAL;
- if (fival->width > inst->cap_width.max ||
- fival->width < inst->cap_width.min ||
- fival->height > inst->cap_height.max ||
- fival->height < inst->cap_height.min)
+ if (fival->width > frame_width_max(inst) ||
+ fival->width < frame_width_min(inst) ||
+ fival->height > frame_height_max(inst) ||
+ fival->height < frame_height_min(inst))
return -EINVAL;
fival->stepwise.min.numerator = 1;
- fival->stepwise.min.denominator = inst->cap_framerate.max;
+ fival->stepwise.min.denominator = frate_max(inst);
fival->stepwise.max.numerator = 1;
- fival->stepwise.max.denominator = inst->cap_framerate.min;
+ fival->stepwise.max.denominator = frate_min(inst);
fival->stepwise.step.numerator = 1;
- fival->stepwise.step.denominator = inst->cap_framerate.max;
+ fival->stepwise.step.denominator = frate_max(inst);
return 0;
}
@@ -642,6 +654,14 @@ static int venc_set_properties(struct venus_inst *inst)
u32 ptype, rate_control, bitrate, profile = 0, level = 0;
int ret;
+ ret = venus_helper_set_work_mode(inst, VIDC_WORK_MODE_2);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_set_core_usage(inst, VIDC_CORE_ID_2);
+ if (ret)
+ return ret;
+
ptype = HFI_PROPERTY_CONFIG_FRAME_RATE;
frate.buffer_type = HFI_BUFFER_OUTPUT;
frate.framerate = inst->fps * (1 << 16);
@@ -756,7 +776,7 @@ static int venc_set_properties(struct venus_inst *inst)
level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_H264_LEVEL,
ctr->level.h264);
} else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_VP8) {
- profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_VPX_PROFILE,
+ profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
ctr->profile.vpx);
level = 0;
} else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_MPEG4) {
@@ -767,6 +787,11 @@ static int venc_set_properties(struct venus_inst *inst)
} else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H263) {
profile = 0;
level = 0;
+ } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
+ profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ ctr->profile.hevc);
+ level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ ctr->level.hevc);
}
ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
@@ -794,7 +819,8 @@ static int venc_init_session(struct venus_inst *inst)
goto deinit;
ret = venus_helper_set_output_resolution(inst, inst->width,
- inst->height);
+ inst->height,
+ HFI_BUFFER_OUTPUT);
if (ret)
goto deinit;
@@ -835,7 +861,7 @@ static int venc_queue_setup(struct vb2_queue *q,
unsigned int sizes[], struct device *alloc_devs[])
{
struct venus_inst *inst = vb2_get_drv_priv(q);
- unsigned int p, num, min = 4;
+ unsigned int num, min = 4;
int ret = 0;
if (*num_planes) {
@@ -870,16 +896,18 @@ static int venc_queue_setup(struct vb2_queue *q,
*num_buffers = max(*num_buffers, num);
inst->num_input_bufs = *num_buffers;
- for (p = 0; p < *num_planes; ++p)
- sizes[p] = get_framesize_uncompressed(p, inst->width,
- inst->height);
+ sizes[0] = venus_helper_get_framesz(inst->fmt_out->pixfmt,
+ inst->width,
+ inst->height);
inst->input_buf_size = sizes[0];
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
*num_planes = inst->fmt_cap->num_planes;
*num_buffers = max(*num_buffers, min);
inst->num_output_bufs = *num_buffers;
- sizes[0] = get_framesize_compressed(inst->width, inst->height);
+ sizes[0] = venus_helper_get_framesz(inst->fmt_cap->pixfmt,
+ inst->width,
+ inst->height);
inst->output_buf_size = sizes[0];
break;
default:
@@ -892,6 +920,7 @@ static int venc_queue_setup(struct vb2_queue *q,
static int venc_verify_conf(struct venus_inst *inst)
{
+ enum hfi_version ver = inst->core->res->hfi_version;
struct hfi_buffer_requirements bufreq;
int ret;
@@ -903,7 +932,7 @@ static int venc_verify_conf(struct venus_inst *inst)
return ret;
if (inst->num_output_bufs < bufreq.count_actual ||
- inst->num_output_bufs < bufreq.count_min)
+ inst->num_output_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
return -EINVAL;
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
@@ -911,7 +940,7 @@ static int venc_verify_conf(struct venus_inst *inst)
return ret;
if (inst->num_input_bufs < bufreq.count_actual ||
- inst->num_input_bufs < bufreq.count_min)
+ inst->num_input_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
return -EINVAL;
return 0;
@@ -952,7 +981,7 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
goto deinit_sess;
ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
- inst->num_output_bufs);
+ inst->num_output_bufs, 0);
if (ret)
goto deinit_sess;
@@ -1090,22 +1119,7 @@ static void venc_inst_init(struct venus_inst *inst)
inst->fps = 15;
inst->timeperframe.numerator = 1;
inst->timeperframe.denominator = 15;
-
- inst->cap_width.min = 96;
- inst->cap_width.max = 1920;
- if (inst->core->res->hfi_version == HFI_VERSION_3XX)
- inst->cap_width.max = 3840;
- inst->cap_width.step_size = 2;
- inst->cap_height.min = 64;
- inst->cap_height.max = ALIGN(1080, 32);
- if (inst->core->res->hfi_version == HFI_VERSION_3XX)
- inst->cap_height.max = ALIGN(2160, 32);
- inst->cap_height.step_size = 2;
- inst->cap_framerate.min = 1;
- inst->cap_framerate.max = 30;
- inst->cap_framerate.step_size = 1;
- inst->cap_mbs_per_frame.min = 24;
- inst->cap_mbs_per_frame.max = 8160;
+ inst->hfi_codec = HFI_VIDEO_CODEC_H264;
}
static int venc_open(struct file *file)
@@ -1118,6 +1132,7 @@ static int venc_open(struct file *file)
if (!inst)
return -ENOMEM;
+ INIT_LIST_HEAD(&inst->dpbbufs);
INIT_LIST_HEAD(&inst->registeredbufs);
INIT_LIST_HEAD(&inst->internalbufs);
INIT_LIST_HEAD(&inst->list);
@@ -1224,12 +1239,18 @@ static int venc_probe(struct platform_device *pdev)
if (!core)
return -EPROBE_DEFER;
- if (core->res->hfi_version == HFI_VERSION_3XX) {
+ if (IS_V3(core) || IS_V4(core)) {
core->core1_clk = devm_clk_get(dev, "core");
if (IS_ERR(core->core1_clk))
return PTR_ERR(core->core1_clk);
}
+ if (IS_V4(core)) {
+ core->core1_bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(core->core1_bus_clk))
+ return PTR_ERR(core->core1_bus_clk);
+ }
+
platform_set_drvdata(pdev, core);
vdev = video_device_alloc();
@@ -1274,15 +1295,21 @@ static int venc_remove(struct platform_device *pdev)
static __maybe_unused int venc_runtime_suspend(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
- if (core->res->hfi_version == HFI_VERSION_1XX)
+ if (IS_V1(core))
return 0;
- writel(0, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL);
+ ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, true);
+ if (ret)
+ return ret;
+
+ if (IS_V4(core))
+ clk_disable_unprepare(core->core1_bus_clk);
+
clk_disable_unprepare(core->core1_clk);
- writel(1, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL);
- return 0;
+ return venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false);
}
static __maybe_unused int venc_runtime_resume(struct device *dev)
@@ -1290,13 +1317,29 @@ static __maybe_unused int venc_runtime_resume(struct device *dev)
struct venus_core *core = dev_get_drvdata(dev);
int ret;
- if (core->res->hfi_version == HFI_VERSION_1XX)
+ if (IS_V1(core))
return 0;
- writel(0, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL);
+ ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, true);
+ if (ret)
+ return ret;
+
ret = clk_prepare_enable(core->core1_clk);
- writel(1, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL);
+ if (ret)
+ goto err_power_disable;
+
+ if (IS_V4(core))
+ ret = clk_prepare_enable(core->core1_bus_clk);
+
+ if (ret)
+ goto err_unprepare_core1;
+
+ return venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false);
+err_unprepare_core1:
+ clk_disable_unprepare(core->core1_clk);
+err_power_disable:
+ venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false);
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index 21e938a28662..459101728d26 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -101,7 +101,7 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
ctr->profile.h264 = ctrl->val;
break;
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
ctr->profile.vpx = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
@@ -248,6 +248,11 @@ int venc_ctrl_init(struct venus_inst *inst)
V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES,
0, V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_3,
+ 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
+
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BITRATE, BITRATE_MIN, BITRATE_MAX,
BITRATE_STEP, BITRATE_DEFAULT);
@@ -257,9 +262,6 @@ int venc_ctrl_init(struct venus_inst *inst)
BITRATE_STEP, BITRATE_DEFAULT_PEAK);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_VPX_PROFILE, 0, 3, 1, 0);
-
- v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 1, 51, 1, 26);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index 2988031d285d..43c78620c9d8 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -1,19 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* rcar-fcp.c -- R-Car Frame Compression Processor Driver
*
* Copyright (C) 2016 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/device.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/media/platform/rcar-vin/Kconfig b/drivers/media/platform/rcar-vin/Kconfig
index baf4eafff6e3..e3eb8fee2536 100644
--- a/drivers/media/platform/rcar-vin/Kconfig
+++ b/drivers/media/platform/rcar-vin/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config VIDEO_RCAR_CSI2
tristate "R-Car MIPI CSI-2 Receiver"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF
diff --git a/drivers/media/platform/rcar-vin/Makefile b/drivers/media/platform/rcar-vin/Makefile
index 5ab803d3e7c1..00d809f5d2c1 100644
--- a/drivers/media/platform/rcar-vin/Makefile
+++ b/drivers/media/platform/rcar-vin/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rcar-vin-objs = rcar-core.o rcar-dma.o rcar-v4l2.o
obj-$(CONFIG_VIDEO_RCAR_CSI2) += rcar-csi2.o
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index d3072e166a1c..ce09799976ef 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Renesas R-Car VIN
*
@@ -7,11 +8,6 @@
* Copyright (C) 2008 Magnus Damm
*
* Based on the soc-camera rcar_vin driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/module.h>
@@ -46,6 +42,8 @@
*/
#define rvin_group_id_to_master(vin) ((vin) < 4 ? 0 : 4)
+#define v4l2_dev_to_vin(d) container_of(d, struct rvin_dev, v4l2_dev)
+
/* -----------------------------------------------------------------------------
* Media Controller link notification
*/
@@ -169,9 +167,37 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags,
/* Add the new link to the existing mask and check if it works. */
csi_id = rvin_group_entity_to_csi_id(group, link->source->entity);
+
+ if (csi_id == -ENODEV) {
+ struct v4l2_subdev *sd;
+ unsigned int i;
+
+ /*
+ * Make sure the source entity subdevice is registered as
+ * a parallel input of one of the enabled VINs if it is not
+ * one of the CSI-2 subdevices.
+ *
+ * No hardware configuration required for parallel inputs,
+ * we can return here.
+ */
+ sd = media_entity_to_v4l2_subdev(link->source->entity);
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (group->vin[i] && group->vin[i]->parallel &&
+ group->vin[i]->parallel->subdev == sd) {
+ group->vin[i]->is_csi = false;
+ ret = 0;
+ goto out;
+ }
+ }
+
+ vin_err(vin, "Subdevice %s not registered to any VIN\n",
+ link->source->entity->name);
+ ret = -ENODEV;
+ goto out;
+ }
+
channel = rvin_group_csi_pad_to_channel(link->source->index);
mask_new = mask & rvin_group_get_mask(vin, csi_id, channel);
-
vin_dbg(vin, "Try link change mask: 0x%x new: 0x%x\n", mask, mask_new);
if (!mask_new) {
@@ -181,6 +207,11 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags,
/* New valid CHSEL found, set the new value. */
ret = rvin_set_channel_routing(group->vin[master_id], __ffs(mask_new));
+ if (ret)
+ goto out;
+
+ vin->is_csi = true;
+
out:
mutex_unlock(&group->lock);
@@ -359,8 +390,6 @@ out:
* Async notifier
*/
-#define notifier_to_vin(n) container_of(n, struct rvin_dev, notifier)
-
static int rvin_find_pad(struct v4l2_subdev *sd, int direction)
{
unsigned int pad;
@@ -376,12 +405,12 @@ static int rvin_find_pad(struct v4l2_subdev *sd, int direction)
}
/* -----------------------------------------------------------------------------
- * Digital async notifier
+ * Parallel async notifier
*/
/* The vin lock should be held when calling the subdevice attach and detach */
-static int rvin_digital_subdevice_attach(struct rvin_dev *vin,
- struct v4l2_subdev *subdev)
+static int rvin_parallel_subdevice_attach(struct rvin_dev *vin,
+ struct v4l2_subdev *subdev)
{
struct v4l2_subdev_mbus_code_enum code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -392,15 +421,20 @@ static int rvin_digital_subdevice_attach(struct rvin_dev *vin,
ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SOURCE);
if (ret < 0)
return ret;
- vin->digital->source_pad = ret;
+ vin->parallel->source_pad = ret;
ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SINK);
- vin->digital->sink_pad = ret < 0 ? 0 : ret;
+ vin->parallel->sink_pad = ret < 0 ? 0 : ret;
+
+ if (vin->info->use_mc) {
+ vin->parallel->subdev = subdev;
+ return 0;
+ }
/* Find compatible subdevices mbus format */
vin->mbus_code = 0;
code.index = 0;
- code.pad = vin->digital->source_pad;
+ code.pad = vin->parallel->source_pad;
while (!vin->mbus_code &&
!v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &code)) {
code.index++;
@@ -450,23 +484,27 @@ static int rvin_digital_subdevice_attach(struct rvin_dev *vin,
vin->vdev.ctrl_handler = &vin->ctrl_handler;
- vin->digital->subdev = subdev;
+ vin->parallel->subdev = subdev;
return 0;
}
-static void rvin_digital_subdevice_detach(struct rvin_dev *vin)
+static void rvin_parallel_subdevice_detach(struct rvin_dev *vin)
{
rvin_v4l2_unregister(vin);
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ vin->parallel->subdev = NULL;
- vin->vdev.ctrl_handler = NULL;
- vin->digital->subdev = NULL;
+ if (!vin->info->use_mc) {
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ vin->vdev.ctrl_handler = NULL;
+ }
}
-static int rvin_digital_notify_complete(struct v4l2_async_notifier *notifier)
+static int rvin_parallel_notify_complete(struct v4l2_async_notifier *notifier)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ struct media_entity *source;
+ struct media_entity *sink;
int ret;
ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
@@ -475,31 +513,50 @@ static int rvin_digital_notify_complete(struct v4l2_async_notifier *notifier)
return ret;
}
- return rvin_v4l2_register(vin);
+ if (!video_is_registered(&vin->vdev)) {
+ ret = rvin_v4l2_register(vin);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!vin->info->use_mc)
+ return 0;
+
+ /* If we're running with media-controller, link the subdevs. */
+ source = &vin->parallel->subdev->entity;
+ sink = &vin->vdev.entity;
+
+ ret = media_create_pad_link(source, vin->parallel->source_pad,
+ sink, vin->parallel->sink_pad, 0);
+ if (ret)
+ vin_err(vin, "Error adding link from %s to %s: %d\n",
+ source->name, sink->name, ret);
+
+ return ret;
}
-static void rvin_digital_notify_unbind(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
- vin_dbg(vin, "unbind digital subdev %s\n", subdev->name);
+ vin_dbg(vin, "unbind parallel subdev %s\n", subdev->name);
mutex_lock(&vin->lock);
- rvin_digital_subdevice_detach(vin);
+ rvin_parallel_subdevice_detach(vin);
mutex_unlock(&vin->lock);
}
-static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
int ret;
mutex_lock(&vin->lock);
- ret = rvin_digital_subdevice_attach(vin, subdev);
+ ret = rvin_parallel_subdevice_attach(vin, subdev);
mutex_unlock(&vin->lock);
if (ret)
return ret;
@@ -507,70 +564,71 @@ static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
v4l2_set_subdev_hostdata(subdev, vin);
vin_dbg(vin, "bound subdev %s source pad: %u sink pad: %u\n",
- subdev->name, vin->digital->source_pad,
- vin->digital->sink_pad);
+ subdev->name, vin->parallel->source_pad,
+ vin->parallel->sink_pad);
return 0;
}
-static const struct v4l2_async_notifier_operations rvin_digital_notify_ops = {
- .bound = rvin_digital_notify_bound,
- .unbind = rvin_digital_notify_unbind,
- .complete = rvin_digital_notify_complete,
+static const struct v4l2_async_notifier_operations rvin_parallel_notify_ops = {
+ .bound = rvin_parallel_notify_bound,
+ .unbind = rvin_parallel_notify_unbind,
+ .complete = rvin_parallel_notify_complete,
};
-static int rvin_digital_parse_v4l2(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd)
+static int rvin_parallel_parse_v4l2(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
{
struct rvin_dev *vin = dev_get_drvdata(dev);
- struct rvin_graph_entity *rvge =
- container_of(asd, struct rvin_graph_entity, asd);
+ struct rvin_parallel_entity *rvpe =
+ container_of(asd, struct rvin_parallel_entity, asd);
if (vep->base.port || vep->base.id)
return -ENOTCONN;
- vin->mbus_cfg.type = vep->bus_type;
+ vin->parallel = rvpe;
+ vin->parallel->mbus_type = vep->bus_type;
- switch (vin->mbus_cfg.type) {
+ switch (vin->parallel->mbus_type) {
case V4L2_MBUS_PARALLEL:
vin_dbg(vin, "Found PARALLEL media bus\n");
- vin->mbus_cfg.flags = vep->bus.parallel.flags;
+ vin->parallel->mbus_flags = vep->bus.parallel.flags;
break;
case V4L2_MBUS_BT656:
vin_dbg(vin, "Found BT656 media bus\n");
- vin->mbus_cfg.flags = 0;
+ vin->parallel->mbus_flags = 0;
break;
default:
vin_err(vin, "Unknown media bus type\n");
return -EINVAL;
}
- vin->digital = rvge;
-
return 0;
}
-static int rvin_digital_graph_init(struct rvin_dev *vin)
+static int rvin_parallel_init(struct rvin_dev *vin)
{
int ret;
- ret = v4l2_async_notifier_parse_fwnode_endpoints(
- vin->dev, &vin->notifier,
- sizeof(struct rvin_graph_entity), rvin_digital_parse_v4l2);
+ ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(
+ vin->dev, &vin->notifier, sizeof(struct rvin_parallel_entity),
+ 0, rvin_parallel_parse_v4l2);
if (ret)
return ret;
- if (!vin->digital)
- return -ENODEV;
+ /* If using mc, it's fine not to have any input registered. */
+ if (!vin->parallel)
+ return vin->info->use_mc ? 0 : -ENODEV;
- vin_dbg(vin, "Found digital subdevice %pOF\n",
- to_of_node(vin->digital->asd.match.fwnode));
+ vin_dbg(vin, "Found parallel subdevice %pOF\n",
+ to_of_node(vin->parallel->asd.match.fwnode));
- vin->notifier.ops = &rvin_digital_notify_ops;
+ vin->notifier.ops = &rvin_parallel_notify_ops;
ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
+ v4l2_async_notifier_cleanup(&vin->group->notifier);
return ret;
}
@@ -583,7 +641,7 @@ static int rvin_digital_graph_init(struct rvin_dev *vin)
static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
const struct rvin_group_route *route;
unsigned int i;
int ret;
@@ -596,7 +654,8 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
/* Register all video nodes for the group. */
for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (vin->group->vin[i]) {
+ if (vin->group->vin[i] &&
+ !video_is_registered(&vin->group->vin[i]->vdev)) {
ret = rvin_v4l2_register(vin->group->vin[i]);
if (ret)
return ret;
@@ -649,7 +708,7 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
unsigned int i;
for (i = 0; i < RCAR_VIN_NUM; i++)
@@ -673,7 +732,7 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
unsigned int i;
mutex_lock(&vin->group->lock);
@@ -707,11 +766,9 @@ static int rvin_mc_parse_of_endpoint(struct device *dev,
return -EINVAL;
if (!of_device_is_available(to_of_node(asd->match.fwnode))) {
-
vin_dbg(vin, "OF device %pOF disabled, ignoring\n",
to_of_node(asd->match.fwnode));
return -ENOTCONN;
-
}
if (vin->group->csi[vep->base.id].fwnode) {
@@ -736,12 +793,6 @@ static int rvin_mc_parse_of_graph(struct rvin_dev *vin)
mutex_lock(&vin->group->lock);
- /* If there already is a notifier something has gone wrong, bail out. */
- if (WARN_ON(vin->group->notifier)) {
- mutex_unlock(&vin->group->lock);
- return -EINVAL;
- }
-
/* If not all VIN's are registered don't register the notifier. */
for (i = 0; i < RCAR_VIN_NUM; i++)
if (vin->group->vin[i])
@@ -753,19 +804,16 @@ static int rvin_mc_parse_of_graph(struct rvin_dev *vin)
}
/*
- * Have all VIN's look for subdevices. Some subdevices will overlap
- * but the parser function can handle it, so each subdevice will
- * only be registered once with the notifier.
+ * Have all VIN's look for CSI-2 subdevices. Some subdevices will
+ * overlap but the parser function can handle it, so each subdevice
+ * will only be registered once with the group notifier.
*/
-
- vin->group->notifier = &vin->notifier;
-
for (i = 0; i < RCAR_VIN_NUM; i++) {
if (!vin->group->vin[i])
continue;
ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(
- vin->group->vin[i]->dev, vin->group->notifier,
+ vin->group->vin[i]->dev, &vin->group->notifier,
sizeof(struct v4l2_async_subdev), 1,
rvin_mc_parse_of_endpoint);
if (ret) {
@@ -776,11 +824,15 @@ static int rvin_mc_parse_of_graph(struct rvin_dev *vin)
mutex_unlock(&vin->group->lock);
- vin->group->notifier->ops = &rvin_group_notify_ops;
+ if (!vin->group->notifier.num_subdevs)
+ return 0;
- ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
+ vin->group->notifier.ops = &rvin_group_notify_ops;
+ ret = v4l2_async_notifier_register(&vin->v4l2_dev,
+ &vin->group->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
+ v4l2_async_notifier_cleanup(&vin->group->notifier);
return ret;
}
@@ -791,10 +843,6 @@ static int rvin_mc_init(struct rvin_dev *vin)
{
int ret;
- /* All our sources are CSI-2 */
- vin->mbus_cfg.type = V4L2_MBUS_CSI2;
- vin->mbus_cfg.flags = 0;
-
vin->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad);
if (ret)
@@ -974,7 +1022,51 @@ static const struct rvin_info rcar_info_r8a7796 = {
.routes = rcar_info_r8a7796_routes,
};
-static const struct rvin_group_route _rcar_info_r8a77970_routes[] = {
+static const struct rvin_group_route rcar_info_r8a77965_routes[] = {
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 0, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 1, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(1) | BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 1, .mask = BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 2, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 2, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 2, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 3, .mask = BIT(1) | BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 3, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 4, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 4, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 5, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 5, .mask = BIT(1) | BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 5, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 5, .mask = BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 6, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 6, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 6, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 6, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 6, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 7, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 7, .mask = BIT(1) | BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 7, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 7, .mask = BIT(4) },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77965 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77965_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a77970_routes[] = {
{ .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
{ .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
{ .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(3) },
@@ -990,7 +1082,19 @@ static const struct rvin_info rcar_info_r8a77970 = {
.use_mc = true,
.max_width = 4096,
.max_height = 4096,
- .routes = _rcar_info_r8a77970_routes,
+ .routes = rcar_info_r8a77970_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a77995_routes[] = {
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77995 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77995_routes,
};
static const struct of_device_id rvin_of_id_table[] = {
@@ -1031,9 +1135,17 @@ static const struct of_device_id rvin_of_id_table[] = {
.data = &rcar_info_r8a7796,
},
{
+ .compatible = "renesas,vin-r8a77965",
+ .data = &rcar_info_r8a77965,
+ },
+ {
.compatible = "renesas,vin-r8a77970",
.data = &rcar_info_r8a77970,
},
+ {
+ .compatible = "renesas,vin-r8a77995",
+ .data = &rcar_info_r8a77995,
+ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, rvin_of_id_table);
@@ -1085,20 +1197,35 @@ static int rcar_vin_probe(struct platform_device *pdev)
return ret;
platform_set_drvdata(pdev, vin);
- if (vin->info->use_mc)
+
+ if (vin->info->use_mc) {
ret = rvin_mc_init(vin);
- else
- ret = rvin_digital_graph_init(vin);
- if (ret < 0)
- goto error;
+ if (ret)
+ goto error_dma_unregister;
+ }
+
+ ret = rvin_parallel_init(vin);
+ if (ret)
+ goto error_group_unregister;
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
return 0;
-error:
+
+error_group_unregister:
+ if (vin->info->use_mc) {
+ mutex_lock(&vin->group->lock);
+ if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
+ v4l2_async_notifier_unregister(&vin->group->notifier);
+ v4l2_async_notifier_cleanup(&vin->group->notifier);
+ }
+ mutex_unlock(&vin->group->lock);
+ rvin_group_put(vin);
+ }
+
+error_dma_unregister:
rvin_dma_unregister(vin);
- v4l2_async_notifier_cleanup(&vin->notifier);
return ret;
}
@@ -1116,8 +1243,10 @@ static int rcar_vin_remove(struct platform_device *pdev)
if (vin->info->use_mc) {
mutex_lock(&vin->group->lock);
- if (vin->group->notifier == &vin->notifier)
- vin->group->notifier = NULL;
+ if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
+ v4l2_async_notifier_unregister(&vin->group->notifier);
+ v4l2_async_notifier_cleanup(&vin->group->notifier);
+ }
mutex_unlock(&vin->group->lock);
rvin_group_put(vin);
} else {
@@ -1142,4 +1271,4 @@ module_platform_driver(rcar_vin_driver);
MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
MODULE_DESCRIPTION("Renesas R-Car VIN camera host driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index daef72d410a3..dc5ae8025832 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -339,6 +339,7 @@ enum rcar_csi2_pads {
struct rcar_csi2_info {
int (*init_phtw)(struct rcar_csi2 *priv, unsigned int mbps);
+ int (*confirm_start)(struct rcar_csi2 *priv);
const struct rcsi2_mbps_reg *hsfreqrange;
unsigned int csi0clkfreqrange;
bool clear_ulps;
@@ -545,6 +546,13 @@ static int rcsi2_start(struct rcar_csi2 *priv)
if (ret)
return ret;
+ /* Confirm start */
+ if (priv->info->confirm_start) {
+ ret = priv->info->confirm_start(priv);
+ if (ret)
+ return ret;
+ }
+
/* Clear Ultra Low Power interrupt. */
if (priv->info->clear_ulps)
rcsi2_write(priv, INTSTATE_REG,
@@ -881,6 +889,11 @@ static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps)
static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
{
+ return rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
+}
+
+static int rcsi2_confirm_start_v3m_e3(struct rcar_csi2 *priv)
+{
static const struct phtw_value step1[] = {
{ .data = 0xed, .code = 0x34 },
{ .data = 0xed, .code = 0x44 },
@@ -890,12 +903,6 @@ static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
{ /* sentinel */ },
};
- int ret;
-
- ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
- if (ret)
- return ret;
-
return rcsi2_phtw_write_array(priv, step1);
}
@@ -949,6 +956,7 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77965 = {
static const struct rcar_csi2_info rcar_csi2_info_r8a77970 = {
.init_phtw = rcsi2_init_phtw_v3m_e3,
+ .confirm_start = rcsi2_confirm_start_v3m_e3,
};
static const struct of_device_id rcar_csi2_of_table[] = {
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index ac07f99e3516..92323310f735 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Renesas R-Car VIN
*
@@ -7,11 +8,6 @@
* Copyright (C) 2008 Magnus Damm
*
* Based on the soc-camera rcar_vin driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/delay.h>
@@ -123,6 +119,7 @@
/* Video n Data Mode Register 2 bits */
#define VNDMR2_VPS (1 << 30)
#define VNDMR2_HPS (1 << 29)
+#define VNDMR2_CES (1 << 28)
#define VNDMR2_FTEV (1 << 17)
#define VNDMR2_VLV(n) ((n & 0xf) << 12)
@@ -659,8 +656,12 @@ static int rvin_setup(struct rvin_dev *vin)
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
- vnmc |= vin->mbus_cfg.type == V4L2_MBUS_BT656 ?
- VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
+ if (!vin->is_csi &&
+ vin->parallel->mbus_type == V4L2_MBUS_BT656)
+ vnmc |= VNMC_INF_YUV8_BT656;
+ else
+ vnmc |= VNMC_INF_YUV8_BT601;
+
input_is_yuv = true;
break;
case MEDIA_BUS_FMT_RGB888_1X24:
@@ -668,8 +669,12 @@ static int rvin_setup(struct rvin_dev *vin)
break;
case MEDIA_BUS_FMT_UYVY10_2X10:
/* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
- vnmc |= vin->mbus_cfg.type == V4L2_MBUS_BT656 ?
- VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
+ if (!vin->is_csi &&
+ vin->parallel->mbus_type == V4L2_MBUS_BT656)
+ vnmc |= VNMC_INF_YUV10_BT656;
+ else
+ vnmc |= VNMC_INF_YUV10_BT601;
+
input_is_yuv = true;
break;
default:
@@ -682,13 +687,19 @@ static int rvin_setup(struct rvin_dev *vin)
else
dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1);
- /* Hsync Signal Polarity Select */
- if (!(vin->mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
- dmr2 |= VNDMR2_HPS;
+ if (!vin->is_csi) {
+ /* Hsync Signal Polarity Select */
+ if (!(vin->parallel->mbus_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
+ dmr2 |= VNDMR2_HPS;
+
+ /* Vsync Signal Polarity Select */
+ if (!(vin->parallel->mbus_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
+ dmr2 |= VNDMR2_VPS;
- /* Vsync Signal Polarity Select */
- if (!(vin->mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
- dmr2 |= VNDMR2_VPS;
+ /* Data Enable Polarity Select */
+ if (vin->parallel->mbus_flags & V4L2_MBUS_DATA_ENABLE_LOW)
+ dmr2 |= VNDMR2_CES;
+ }
/*
* Output format
@@ -733,8 +744,8 @@ static int rvin_setup(struct rvin_dev *vin)
vnmc |= VNMC_BPS;
if (vin->info->model == RCAR_GEN3) {
- /* Select between CSI-2 and Digital input */
- if (vin->mbus_cfg.type == V4L2_MBUS_CSI2)
+ /* Select between CSI-2 and parallel input */
+ if (vin->is_csi)
vnmc &= ~VNMC_DPINE;
else
vnmc |= VNMC_DPINE;
@@ -856,7 +867,7 @@ static int rvin_capture_start(struct rvin_dev *vin)
/* Continuous Frame Capture Mode */
rvin_write(vin, VNFC_C_FRAME, VNFC_REG);
- vin->state = RUNNING;
+ vin->state = STARTING;
return 0;
}
@@ -910,6 +921,20 @@ static irqreturn_t rvin_irq(int irq, void *data)
vnms = rvin_read(vin, VNMS_REG);
slot = (vnms & VNMS_FBS_MASK) >> VNMS_FBS_SHIFT;
+ /*
+ * To hand buffers back in a known order to userspace start
+ * to capture first from slot 0.
+ */
+ if (vin->state == STARTING) {
+ if (slot != 0) {
+ vin_dbg(vin, "Starting sync slot: %d\n", slot);
+ goto done;
+ }
+
+ vin_dbg(vin, "Capture start synced!\n");
+ vin->state = RUNNING;
+ }
+
/* Capture frame */
if (vin->queue_buf[slot]) {
vin->queue_buf[slot]->field = vin->format.field;
@@ -1074,7 +1099,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
/* No media controller used, simply pass operation to subdevice. */
if (!vin->info->use_mc) {
- ret = v4l2_subdev_call(vin->digital->subdev, video, s_stream,
+ ret = v4l2_subdev_call(vin->parallel->subdev, video, s_stream,
on);
return ret == -ENOIOCTLCMD ? 0 : ret;
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index e78fba84d590..5a54779cfc27 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Renesas R-Car VIN
*
@@ -7,11 +8,6 @@
* Copyright (C) 2008 Magnus Damm
*
* Based on the soc-camera rcar_vin driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/pm_runtime.h>
@@ -144,7 +140,7 @@ static int rvin_reset_format(struct rvin_dev *vin)
{
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .pad = vin->digital->source_pad,
+ .pad = vin->parallel->source_pad,
};
int ret;
@@ -175,7 +171,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
struct v4l2_subdev_pad_config *pad_cfg;
struct v4l2_subdev_format format = {
.which = which,
- .pad = vin->digital->source_pad,
+ .pad = vin->parallel->source_pad,
};
enum v4l2_field field;
u32 width, height;
@@ -517,7 +513,7 @@ static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
if (timings->pad)
return -EINVAL;
- timings->pad = vin->digital->sink_pad;
+ timings->pad = vin->parallel->sink_pad;
ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);
@@ -569,7 +565,7 @@ static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
if (cap->pad)
return -EINVAL;
- cap->pad = vin->digital->sink_pad;
+ cap->pad = vin->parallel->sink_pad;
ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
@@ -587,7 +583,7 @@ static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
if (edid->pad)
return -EINVAL;
- edid->pad = vin->digital->sink_pad;
+ edid->pad = vin->parallel->sink_pad;
ret = v4l2_subdev_call(sd, pad, get_edid, edid);
@@ -605,7 +601,7 @@ static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
if (edid->pad)
return -EINVAL;
- edid->pad = vin->digital->sink_pad;
+ edid->pad = vin->parallel->sink_pad;
ret = v4l2_subdev_call(sd, pad, set_edid, edid);
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index c2aef789b491..0b13b34d03e3 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for Renesas R-Car VIN
*
@@ -7,11 +8,6 @@
* Copyright (C) 2008 Magnus Damm
*
* Based on the soc-camera rcar_vin driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __RCAR_VIN__
@@ -53,11 +49,13 @@ enum rvin_csi_id {
/**
* STOPPED - No operation in progress
+ * STARTING - Capture starting up
* RUNNING - Operation in progress have buffers
* STOPPING - Stopping operation
*/
enum rvin_dma_state {
STOPPED = 0,
+ STARTING,
RUNNING,
STOPPING,
};
@@ -73,16 +71,22 @@ struct rvin_video_format {
};
/**
- * struct rvin_graph_entity - Video endpoint from async framework
+ * struct rvin_parallel_entity - Parallel video input endpoint descriptor
* @asd: sub-device descriptor for async framework
* @subdev: subdevice matched using async framework
+ * @mbus_type: media bus type
+ * @mbus_flags: media bus configuration flags
* @source_pad: source pad of remote subdevice
* @sink_pad: sink pad of remote subdevice
+ *
*/
-struct rvin_graph_entity {
+struct rvin_parallel_entity {
struct v4l2_async_subdev asd;
struct v4l2_subdev *subdev;
+ enum v4l2_mbus_type mbus_type;
+ unsigned int mbus_flags;
+
unsigned int source_pad;
unsigned int sink_pad;
};
@@ -146,7 +150,8 @@ struct rvin_info {
* @v4l2_dev: V4L2 device
* @ctrl_handler: V4L2 control handler
* @notifier: V4L2 asynchronous subdevs notifier
- * @digital: entity in the DT for local digital subdevice
+ *
+ * @parallel: parallel input subdevice descriptor
*
* @group: Gen3 CSI group
* @id: Gen3 group id for this VIN
@@ -164,7 +169,8 @@ struct rvin_info {
* @sequence: V4L2 buffers sequence number
* @state: keeps track of operation state
*
- * @mbus_cfg: media bus configuration from DT
+ * @is_csi: flag to mark the VIN as using a CSI-2 subdevice
+ *
* @mbus_code: media bus format code
* @format: active V4L2 pixel format
*
@@ -182,7 +188,8 @@ struct rvin_dev {
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_async_notifier notifier;
- struct rvin_graph_entity *digital;
+
+ struct rvin_parallel_entity *parallel;
struct rvin_group *group;
unsigned int id;
@@ -199,7 +206,8 @@ struct rvin_dev {
unsigned int sequence;
enum rvin_dma_state state;
- struct v4l2_mbus_config mbus_cfg;
+ bool is_csi;
+
u32 mbus_code;
struct v4l2_pix_format format;
@@ -209,7 +217,7 @@ struct rvin_dev {
v4l2_std_id std;
};
-#define vin_to_source(vin) ((vin)->digital->subdev)
+#define vin_to_source(vin) ((vin)->parallel->subdev)
/* Debug */
#define vin_dbg(d, fmt, arg...) dev_dbg(d->dev, fmt, ##arg)
@@ -225,8 +233,7 @@ struct rvin_dev {
*
* @lock: protects the count, notifier, vin and csi members
* @count: number of enabled VIN instances found in DT
- * @notifier: pointer to the notifier of a VIN which handles the
- * groups async sub-devices.
+ * @notifier: group notifier for CSI-2 async subdevices
* @vin: VIN instances which are part of the group
* @csi: array of pairs of fwnode and subdev pointers
* to all CSI-2 subdevices.
@@ -238,7 +245,7 @@ struct rvin_group {
struct mutex lock;
unsigned int count;
- struct v4l2_async_notifier *notifier;
+ struct v4l2_async_notifier notifier;
struct rvin_dev *vin[RCAR_VIN_NUM];
struct {
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index dc7e280c91b4..81413ab52475 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* R-Car Gen3 Digital Radio Interface (DRIF) driver
*
* Copyright (C) 2017 Renesas Electronics Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -1499,5 +1495,5 @@ module_platform_driver(rcar_drif_driver);
MODULE_DESCRIPTION("Renesas R-Car Gen3 DRIF driver");
MODULE_ALIAS("platform:" RCAR_DRIF_DRV_NAME);
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>");
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index b13dec3081e5..2a15b7cca338 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Renesas R-Car Fine Display Processor
*
@@ -8,11 +9,6 @@
*
* This code is developed and inspired from the vim2m, rcar_jpu,
* m2m-deinterlace, and vsp1 drivers.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version
*/
#include <linux/clk.h>
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 8b44a849ab41..e5c882423f57 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author: Mikhail Ulyanov
* Copyright (C) 2014-2015 Cogent Embedded, Inc. <source@cogentembedded.com>
@@ -11,10 +12,6 @@
* 1) Rotation
* 2) Cropping
* 3) V4L2_CID_JPEG_ACTIVE_MARKER
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <asm/unaligned.h>
@@ -198,7 +195,6 @@
* @vfd_decoder: video device node for decoder mem2mem mode
* @m2m_dev: v4l2 mem2mem device data
* @curr: pointer to current context
- * @irq_queue: interrupt handler waitqueue
* @regs: JPEG IP registers mapping
* @irq: JPEG IP irq
* @clk: JPEG IP clock
@@ -213,7 +209,6 @@ struct jpu {
struct video_device vfd_decoder;
struct v4l2_m2m_dev *m2m_dev;
struct jpu_ctx *curr;
- wait_queue_head_t irq_queue;
void __iomem *regs;
unsigned int irq;
@@ -1494,24 +1489,8 @@ static void jpu_device_run(void *priv)
spin_unlock_irqrestore(&ctx->jpu->lock, flags);
}
-static int jpu_job_ready(void *priv)
-{
- return 1;
-}
-
-static void jpu_job_abort(void *priv)
-{
- struct jpu_ctx *ctx = priv;
-
- if (!wait_event_timeout(ctx->jpu->irq_queue, !ctx->jpu->curr,
- msecs_to_jiffies(JPU_JOB_TIMEOUT)))
- jpu_cleanup(ctx, true);
-}
-
static const struct v4l2_m2m_ops jpu_m2m_ops = {
.device_run = jpu_device_run,
- .job_ready = jpu_job_ready,
- .job_abort = jpu_job_abort,
};
/*
@@ -1592,9 +1571,6 @@ static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
v4l2_m2m_job_finish(jpu->m2m_dev, curr_ctx->fh.m2m_ctx);
- /* ...wakeup abort routine if needed */
- wake_up(&jpu->irq_queue);
-
return IRQ_HANDLED;
handled:
@@ -1628,7 +1604,6 @@ static int jpu_probe(struct platform_device *pdev)
if (!jpu)
return -ENOMEM;
- init_waitqueue_head(&jpu->irq_queue);
mutex_init(&jpu->mutex);
spin_lock_init(&jpu->lock);
jpu->dev = &pdev->dev;
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
index fe4fe944592d..ad782901cd7a 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas-ceu.c
@@ -254,6 +254,18 @@ static const struct ceu_fmt ceu_fmt_list[] = {
.fourcc = V4L2_PIX_FMT_YUYV,
.bpp = 16,
},
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .bpp = 16,
+ },
};
static const struct ceu_fmt *get_ceu_fmt_from_fourcc(unsigned int fourcc)
@@ -272,6 +284,9 @@ static bool ceu_fmt_mplane(struct v4l2_pix_format_mplane *pix)
{
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
return false;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
@@ -380,7 +395,9 @@ static int ceu_hw_config(struct ceu_device *ceudev)
switch (pix->pixelformat) {
/* Data fetch sync mode */
case V4L2_PIX_FMT_YUYV:
- /* TODO: handle YUYV permutations through DTARY bits. */
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
camcr = CEU_CAMCR_JPEG;
cdocr |= CEU_CDOCR_NO_DOWSAMPLE;
cfzsr = (pix->height << 16) | pix->width;
@@ -568,6 +585,9 @@ static void ceu_calc_plane_sizes(struct ceu_device *ceudev,
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
pix->num_planes = 1;
bpl = pix->width * ceu_fmt->bpp / 8;
szimage = pix->height * bpl;
@@ -762,42 +782,59 @@ static const struct vb2_ops ceu_vb2_ops = {
/* --- CEU image formats handling --- */
/*
- * ceu_try_fmt() - test format on CEU and sensor
+ * __ceu_try_fmt() - test format on CEU and sensor
* @ceudev: The CEU device.
* @v4l2_fmt: format to test.
+ * @sd_mbus_code: the media bus code accepted by the subdevice; output param.
*
* Returns 0 for success, < 0 for errors.
*/
-static int ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
+static int __ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt,
+ u32 *sd_mbus_code)
{
struct ceu_subdev *ceu_sd = ceudev->sd;
struct v4l2_pix_format_mplane *pix = &v4l2_fmt->fmt.pix_mp;
struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
struct v4l2_subdev_pad_config pad_cfg;
const struct ceu_fmt *ceu_fmt;
+ u32 mbus_code_old;
+ u32 mbus_code;
int ret;
/*
* Set format on sensor sub device: bus format used to produce memory
- * format is selected at initialization time.
+ * format is selected depending on YUV component ordering or
+ * at initialization time.
*/
struct v4l2_subdev_format sd_format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
- .format = {
- .code = ceu_sd->mbus_fmt.mbus_code,
- },
};
+ mbus_code_old = ceu_sd->mbus_fmt.mbus_code;
+
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUYV:
+ mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ mbus_code = MEDIA_BUS_FMT_UYVY8_2X8;
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ mbus_code = MEDIA_BUS_FMT_YVYU8_2X8;
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ mbus_code = MEDIA_BUS_FMT_VYUY8_2X8;
+ break;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
+ mbus_code = ceu_sd->mbus_fmt.mbus_code;
break;
default:
pix->pixelformat = V4L2_PIX_FMT_NV16;
+ mbus_code = ceu_sd->mbus_fmt.mbus_code;
break;
}
@@ -808,9 +845,25 @@ static int ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
&pix->height, 4, CEU_MAX_HEIGHT, 4, 0);
v4l2_fill_mbus_format_mplane(&sd_format.format, pix);
+
+ /*
+ * Try with the mbus_code matching YUYV components ordering first,
+ * if that one fails, fallback to default selected at initialization
+ * time.
+ */
+ sd_format.format.code = mbus_code;
ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, &pad_cfg, &sd_format);
- if (ret)
- return ret;
+ if (ret) {
+ if (ret == -EINVAL) {
+ /* fallback */
+ sd_format.format.code = mbus_code_old;
+ ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt,
+ &pad_cfg, &sd_format);
+ }
+
+ if (ret)
+ return ret;
+ }
/* Apply size returned by sensor as the CEU can't scale. */
v4l2_fill_pix_format_mplane(pix, &sd_format.format);
@@ -818,16 +871,30 @@ static int ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
/* Calculate per-plane sizes based on image format. */
ceu_calc_plane_sizes(ceudev, ceu_fmt, pix);
+ /* Report to caller the configured mbus format. */
+ *sd_mbus_code = sd_format.format.code;
+
return 0;
}
/*
+ * ceu_try_fmt() - Wrapper for __ceu_try_fmt; discard configured mbus_fmt
+ */
+static int ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
+{
+ u32 mbus_code;
+
+ return __ceu_try_fmt(ceudev, v4l2_fmt, &mbus_code);
+}
+
+/*
* ceu_set_fmt() - Apply the supplied format to both sensor and CEU
*/
static int ceu_set_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
{
struct ceu_subdev *ceu_sd = ceudev->sd;
struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
+ u32 mbus_code;
int ret;
/*
@@ -836,15 +903,13 @@ static int ceu_set_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
*/
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .format = {
- .code = ceu_sd->mbus_fmt.mbus_code,
- },
};
- ret = ceu_try_fmt(ceudev, v4l2_fmt);
+ ret = __ceu_try_fmt(ceudev, v4l2_fmt, &mbus_code);
if (ret)
return ret;
+ format.format.code = mbus_code;
v4l2_fill_mbus_format_mplane(&format.format, &v4l2_fmt->fmt.pix_mp);
ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, NULL, &format);
if (ret)
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
index fa1ba98c96dc..356821c2dacf 100644
--- a/drivers/media/platform/rockchip/rga/rga-buf.c
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -64,43 +64,44 @@ static void rga_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
+static void rga_buf_return_buffers(struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ break;
+ v4l2_m2m_buf_done(vbuf, state);
+ }
+}
+
static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct rga_ctx *ctx = vb2_get_drv_priv(q);
struct rockchip_rga *rga = ctx->rga;
- int ret, i;
+ int ret;
ret = pm_runtime_get_sync(rga->dev);
-
- if (!ret)
- return 0;
-
- for (i = 0; i < q->num_buffers; ++i) {
- if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(q->bufs[i]),
- VB2_BUF_STATE_QUEUED);
- }
+ if (ret < 0) {
+ rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
+ return ret;
}
- return ret;
+ return 0;
}
static void rga_buf_stop_streaming(struct vb2_queue *q)
{
struct rga_ctx *ctx = vb2_get_drv_priv(q);
struct rockchip_rga *rga = ctx->rga;
- struct vb2_v4l2_buffer *vbuf;
-
- for (;;) {
- if (V4L2_TYPE_IS_OUTPUT(q->type))
- vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- else
- vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- if (!vbuf)
- break;
- v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
- }
+ rga_buf_return_buffers(q, VB2_BUF_STATE_ERROR);
pm_runtime_put(rga->dev);
}
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index d508a8ba6f89..ab5a6f95044a 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -39,18 +39,6 @@
static int debug;
module_param(debug, int, 0644);
-static void job_abort(void *prv)
-{
- struct rga_ctx *ctx = prv;
- struct rockchip_rga *rga = ctx->rga;
-
- if (!rga->curr) /* No job currently running */
- return;
-
- wait_event_timeout(rga->irq_queue,
- !rga->curr, msecs_to_jiffies(RGA_TIMEOUT));
-}
-
static void device_run(void *prv)
{
struct rga_ctx *ctx = prv;
@@ -104,8 +92,6 @@ static irqreturn_t rga_isr(int irq, void *prv)
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
v4l2_m2m_job_finish(rga->m2m_dev, ctx->fh.m2m_ctx);
-
- wake_up(&rga->irq_queue);
}
return IRQ_HANDLED;
@@ -113,7 +99,6 @@ static irqreturn_t rga_isr(int irq, void *prv)
static struct v4l2_m2m_ops rga_m2m_ops = {
.device_run = device_run,
- .job_abort = job_abort,
};
static int
@@ -838,8 +823,6 @@ static int rga_probe(struct platform_device *pdev)
spin_lock_init(&rga->ctrl_lock);
mutex_init(&rga->mutex);
- init_waitqueue_head(&rga->irq_queue);
-
ret = rga_parse_dt(rga);
if (ret)
dev_err(&pdev->dev, "Unable to parse OF data\n");
@@ -882,7 +865,6 @@ static int rga_probe(struct platform_device *pdev)
vfd->v4l2_dev = &rga->v4l2_dev;
video_set_drvdata(vfd, rga);
- snprintf(vfd->name, sizeof(vfd->name), "%s", rga_videodev.name);
rga->vfd = vfd;
platform_set_drvdata(pdev, rga);
@@ -943,7 +925,7 @@ static int rga_remove(struct platform_device *pdev)
{
struct rockchip_rga *rga = platform_get_drvdata(pdev);
- dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, &rga->cmdbuf_virt,
+ dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, rga->cmdbuf_virt,
rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
free_pages((unsigned long)rga->src_mmu_pages, 3);
diff --git a/drivers/media/platform/rockchip/rga/rga.h b/drivers/media/platform/rockchip/rga/rga.h
index 5d43e7ea88af..72d8a159fa7b 100644
--- a/drivers/media/platform/rockchip/rga/rga.h
+++ b/drivers/media/platform/rockchip/rga/rga.h
@@ -86,8 +86,6 @@ struct rockchip_rga {
/* ctrl parm lock */
spinlock_t ctrl_lock;
- wait_queue_head_t irq_queue;
-
struct rga_ctx *curr;
dma_addr_t cmdbuf_phy;
void *cmdbuf_virt;
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 9ab8e7ee2e1e..c02dce8b4c6c 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -117,6 +117,8 @@ static int sensor_set_power(struct camif_dev *camif, int on)
if (camif->sensor.power_count == !on)
err = v4l2_subdev_call(sensor->sd, core, s_power, on);
+ if (err == -ENOIOCTLCMD)
+ err = 0;
if (!err)
sensor->power_count += on ? 1 : -1;
@@ -599,7 +601,7 @@ static __poll_t s3c_camif_poll(struct file *file,
mutex_lock(&camif->lock);
if (vp->owner && vp->owner != file->private_data)
- ret = -EBUSY;
+ ret = EPOLLERR;
else
ret = vb2_poll(&vp->vb_queue, file, wait);
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 66aa8cf1d048..e901201b6fcc 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -142,6 +142,8 @@ static const struct vb2_ops g2d_qops = {
.queue_setup = g2d_queue_setup,
.buf_prepare = g2d_buf_prepare,
.buf_queue = g2d_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
@@ -481,19 +483,6 @@ static int vidioc_s_crop(struct file *file, void *prv, const struct v4l2_crop *c
return 0;
}
-static void job_abort(void *prv)
-{
- struct g2d_ctx *ctx = prv;
- struct g2d_dev *dev = ctx->dev;
-
- if (dev->curr == NULL) /* No job currently running */
- return;
-
- wait_event_timeout(dev->irq_queue,
- dev->curr == NULL,
- msecs_to_jiffies(G2D_TIMEOUT));
-}
-
static void device_run(void *prv)
{
struct g2d_ctx *ctx = prv;
@@ -563,7 +552,6 @@ static irqreturn_t g2d_isr(int irq, void *prv)
v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
dev->curr = NULL;
- wake_up(&dev->irq_queue);
return IRQ_HANDLED;
}
@@ -613,7 +601,6 @@ static const struct video_device g2d_videodev = {
static const struct v4l2_m2m_ops g2d_m2m_ops = {
.device_run = device_run,
- .job_abort = job_abort,
};
static const struct of_device_id exynos_g2d_match[];
@@ -633,7 +620,6 @@ static int g2d_probe(struct platform_device *pdev)
spin_lock_init(&dev->ctrl_lock);
mutex_init(&dev->mutex);
atomic_set(&dev->num_inst, 0);
- init_waitqueue_head(&dev->irq_queue);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -702,7 +688,6 @@ static int g2d_probe(struct platform_device *pdev)
goto rel_vdev;
}
video_set_drvdata(vfd, dev);
- snprintf(vfd->name, sizeof(vfd->name), "%s", g2d_videodev.name);
dev->vfd = vfd;
v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n",
vfd->num);
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
index dd812b557e87..9ffb458a1b93 100644
--- a/drivers/media/platform/s5p-g2d/g2d.h
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -31,7 +31,6 @@ struct g2d_dev {
struct g2d_ctx *curr;
struct g2d_variant *variant;
int irq;
- wait_queue_head_t irq_queue;
};
struct g2d_frame {
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 79b63da27f53..04fd2e0493c0 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -2467,26 +2467,19 @@ static int s5p_jpeg_job_ready(void *priv)
return 1;
}
-static void s5p_jpeg_job_abort(void *priv)
-{
-}
-
static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
.device_run = s5p_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
- .job_abort = s5p_jpeg_job_abort,
};
static struct v4l2_m2m_ops exynos3250_jpeg_m2m_ops = {
.device_run = exynos3250_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
- .job_abort = s5p_jpeg_job_abort,
};
static struct v4l2_m2m_ops exynos4_jpeg_m2m_ops = {
.device_run = exynos4_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
- .job_abort = s5p_jpeg_job_abort,
};
/*
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index a80251ed3143..927a1235408d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -254,24 +254,24 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_buf *dst_buf, *src_buf;
- size_t dec_y_addr;
+ struct s5p_mfc_buf *dst_buf, *src_buf;
+ u32 dec_y_addr;
unsigned int frame_type;
/* Make sure we actually have a new frame before continuing. */
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
return;
- dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
+ dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
/* Copy timestamp / timecode from decoded src to dst and set
appropriate flags. */
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
- == dec_y_addr) {
- dst_buf->b->timecode =
- src_buf->b->timecode;
+ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
+
+ if (addr == dec_y_addr) {
+ dst_buf->b->timecode = src_buf->b->timecode;
dst_buf->b->vb2_buf.timestamp =
src_buf->b->vb2_buf.timestamp;
dst_buf->b->flags &=
@@ -307,10 +307,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *dst_buf;
- size_t dspl_y_addr;
+ u32 dspl_y_addr;
unsigned int frame_type;
- dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
+ dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
if (IS_MFCV6_PLUS(dev))
frame_type = s5p_mfc_hw_call(dev->mfc_ops,
get_disp_frame_type, ctx);
@@ -329,9 +329,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
/* The MFC returns address of the buffer, now we have to
* check which videobuf does it correspond to */
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
+ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
+
/* Check if this is the buffer we're looking for */
- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
- == dspl_y_addr) {
+ if (addr == dspl_y_addr) {
list_del(&dst_buf->list);
ctx->dst_queue_cnt--;
dst_buf->b->sequence = ctx->sequence;
@@ -1449,8 +1450,7 @@ static int s5p_mfc_remove(struct platform_device *pdev)
static int s5p_mfc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+ struct s5p_mfc_dev *m_dev = dev_get_drvdata(dev);
int ret;
if (m_dev->num_inst == 0)
@@ -1484,8 +1484,7 @@ static int s5p_mfc_suspend(struct device *dev)
static int s5p_mfc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+ struct s5p_mfc_dev *m_dev = dev_get_drvdata(dev);
if (m_dev->num_inst == 0)
return 0;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 570f391f2cfd..3ad4f5073002 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -692,12 +692,12 @@ static struct mfc_control controls[] = {
.default_value = 10,
},
{
- .id = V4L2_CID_MPEG_VIDEO_VPX_PROFILE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 0,
+ .id = V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
+ .maximum = V4L2_MPEG_VIDEO_VP8_PROFILE_3,
+ .default_value = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
+ .menu_skip_mask = 0,
},
{
.id = V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP,
@@ -2057,7 +2057,7 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP:
p->codec.vp8.rc_p_frame_qp = ctrl->val;
break;
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
p->codec.vp8.profile = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
@@ -2711,4 +2711,3 @@ void s5p_mfc_enc_init(struct s5p_mfc_ctx *ctx)
f.fmt.pix_mp.pixelformat = DEF_DST_FMT_ENC;
ctx->dst_fmt = find_format(&f, MFC_FMT_ENC);
}
-
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 1a0cde017fdf..1d274c64de09 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* sh-mobile VEU mem2mem driver
*
* Copyright (C) 2012 Renesas Electronics Corporation
* Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
* Copyright (C) 2008 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License as
- * published by the Free Software Foundation
*/
#include <linux/err.h>
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 4dccf29e9d78..6135e13e24d4 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Video Output Unit (VOU) driver
*
* Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/dma-mapping.h>
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 9897213f2618..0a2c0daaffef 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Driver for SuperH Mobile CEU interface
*
@@ -7,11 +8,6 @@
*
* Copyright (C) 2006, Sascha Hauer, Pengutronix
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/init.h>
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
index ce00e90d4e3c..6745a6e3f464 100644
--- a/drivers/media/platform/soc_camera/soc_camera_platform.c
+++ b/drivers/media/platform/soc_camera/soc_camera_platform.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic Platform Camera Driver
*
* Copyright (C) 2008 Magnus Damm
* Based on mt9m001 driver,
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/init.h>
diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
index 232d508c5b66..0b42acd4e3a6 100644
--- a/drivers/media/platform/sti/delta/delta-v4l2.c
+++ b/drivers/media/platform/sti/delta/delta-v4l2.c
@@ -339,22 +339,6 @@ static void register_decoders(struct delta_dev *delta)
}
}
-static void delta_lock(void *priv)
-{
- struct delta_ctx *ctx = priv;
- struct delta_dev *delta = ctx->dev;
-
- mutex_lock(&delta->lock);
-}
-
-static void delta_unlock(void *priv)
-{
- struct delta_ctx *ctx = priv;
- struct delta_dev *delta = ctx->dev;
-
- mutex_unlock(&delta->lock);
-}
-
static int delta_open_decoder(struct delta_ctx *ctx, u32 streamformat,
u32 pixelformat, const struct delta_dec **pdec)
{
@@ -1099,8 +1083,6 @@ static const struct v4l2_m2m_ops delta_m2m_ops = {
.device_run = delta_device_run,
.job_ready = delta_job_ready,
.job_abort = delta_job_abort,
- .lock = delta_lock,
- .unlock = delta_unlock,
};
/*
diff --git a/drivers/media/platform/sti/hva/hva-v4l2.c b/drivers/media/platform/sti/hva/hva-v4l2.c
index 15080cb00fa7..5a807c7c5e79 100644
--- a/drivers/media/platform/sti/hva/hva-v4l2.c
+++ b/drivers/media/platform/sti/hva/hva-v4l2.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <media/v4l2-event.h>
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index 2e1933d872ee..721564176d8c 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -22,7 +22,9 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/videodev2.h>
@@ -84,19 +86,14 @@
enum state {
STOPPED = 0,
+ WAIT_FOR_BUFFER,
RUNNING,
- STOPPING,
};
#define MIN_WIDTH 16U
-#define MAX_WIDTH 2048U
+#define MAX_WIDTH 2592U
#define MIN_HEIGHT 16U
-#define MAX_HEIGHT 2048U
-
-#define MIN_JPEG_WIDTH 16U
-#define MAX_JPEG_WIDTH 2592U
-#define MIN_JPEG_HEIGHT 16U
-#define MAX_JPEG_HEIGHT 2592U
+#define MAX_HEIGHT 2592U
#define TIMEOUT_MS 1000
@@ -194,7 +191,7 @@ static inline void reg_clear(void __iomem *base, u32 reg, u32 mask)
reg_write(base, reg, reg_read(base, reg) & ~mask);
}
-static int dcmi_start_capture(struct stm32_dcmi *dcmi);
+static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf);
static void dcmi_buffer_done(struct stm32_dcmi *dcmi,
struct dcmi_buf *buf,
@@ -206,6 +203,8 @@ static void dcmi_buffer_done(struct stm32_dcmi *dcmi,
if (!buf)
return;
+ list_del_init(&buf->list);
+
vbuf = &buf->vb;
vbuf->sequence = dcmi->sequence++;
@@ -223,6 +222,8 @@ static void dcmi_buffer_done(struct stm32_dcmi *dcmi,
static int dcmi_restart_capture(struct stm32_dcmi *dcmi)
{
+ struct dcmi_buf *buf;
+
spin_lock_irq(&dcmi->irqlock);
if (dcmi->state != RUNNING) {
@@ -232,34 +233,30 @@ static int dcmi_restart_capture(struct stm32_dcmi *dcmi)
/* Restart a new DMA transfer with next buffer */
if (list_empty(&dcmi->buffers)) {
- dev_err(dcmi->dev, "%s: No more buffer queued, cannot capture buffer\n",
- __func__);
- dcmi->errors_count++;
- dcmi->active = NULL;
-
+ dev_dbg(dcmi->dev, "Capture restart is deferred to next buffer queueing\n");
+ dcmi->state = WAIT_FOR_BUFFER;
spin_unlock_irq(&dcmi->irqlock);
- return -EINVAL;
+ return 0;
}
-
- dcmi->active = list_entry(dcmi->buffers.next,
- struct dcmi_buf, list);
- list_del_init(&dcmi->active->list);
+ buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
+ dcmi->active = buf;
spin_unlock_irq(&dcmi->irqlock);
- return dcmi_start_capture(dcmi);
+ return dcmi_start_capture(dcmi, buf);
}
static void dcmi_dma_callback(void *param)
{
struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param;
- struct dma_chan *chan = dcmi->dma_chan;
struct dma_tx_state state;
enum dma_status status;
struct dcmi_buf *buf = dcmi->active;
+ spin_lock_irq(&dcmi->irqlock);
+
/* Check DMA status */
- status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state);
+ status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
switch (status) {
case DMA_IN_PROGRESS:
@@ -270,6 +267,9 @@ static void dcmi_dma_callback(void *param)
break;
case DMA_ERROR:
dev_err(dcmi->dev, "%s: Received DMA_ERROR\n", __func__);
+
+ /* Return buffer to V4L2 in error state */
+ dcmi_buffer_done(dcmi, buf, 0, -EIO);
break;
case DMA_COMPLETE:
dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__);
@@ -277,15 +277,19 @@ static void dcmi_dma_callback(void *param)
/* Return buffer to V4L2 */
dcmi_buffer_done(dcmi, buf, buf->size, 0);
+ spin_unlock_irq(&dcmi->irqlock);
+
/* Restart capture */
if (dcmi_restart_capture(dcmi))
dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete\n",
__func__);
- break;
+ return;
default:
dev_err(dcmi->dev, "%s: Received unknown status\n", __func__);
break;
}
+
+ spin_unlock_irq(&dcmi->irqlock);
}
static int dcmi_start_dma(struct stm32_dcmi *dcmi,
@@ -313,10 +317,11 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
/* Prepare a DMA transaction */
desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
buf->size,
- DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
if (!desc) {
- dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer size %zu\n",
- __func__, buf->size);
+ dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n",
+ __func__, &buf->paddr, buf->size);
return -EINVAL;
}
@@ -336,10 +341,9 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
return 0;
}
-static int dcmi_start_capture(struct stm32_dcmi *dcmi)
+static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf)
{
int ret;
- struct dcmi_buf *buf = dcmi->active;
if (!buf)
return -EINVAL;
@@ -382,7 +386,6 @@ static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
{
struct dma_tx_state state;
enum dma_status status;
- struct dma_chan *chan = dcmi->dma_chan;
struct dcmi_buf *buf = dcmi->active;
if (!buf)
@@ -390,8 +393,7 @@ static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
/*
* Because of variable JPEG buffer size sent by sensor,
- * DMA transfer never completes due to transfer size
- * never reached.
+ * DMA transfer never completes due to transfer size never reached.
* In order to ensure that all the JPEG data are transferred
* in active buffer memory, DMA is drained.
* Then DMA tx status gives the amount of data transferred
@@ -400,10 +402,10 @@ static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
*/
/* Drain DMA */
- dmaengine_synchronize(chan);
+ dmaengine_synchronize(dcmi->dma_chan);
/* Get DMA residue to get JPEG size */
- status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state);
+ status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
if (status != DMA_ERROR && state.residue < buf->size) {
/* Return JPEG buffer to V4L2 with received JPEG buffer size */
dcmi_buffer_done(dcmi, buf, buf->size - state.residue, 0);
@@ -430,18 +432,6 @@ static irqreturn_t dcmi_irq_thread(int irq, void *arg)
spin_lock_irq(&dcmi->irqlock);
- /* Stop capture is required */
- if (dcmi->state == STOPPING) {
- reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
-
- dcmi->state = STOPPED;
-
- complete(&dcmi->complete);
-
- spin_unlock_irq(&dcmi->irqlock);
- return IRQ_HANDLED;
- }
-
if ((dcmi->misr & IT_OVR) || (dcmi->misr & IT_ERR)) {
dcmi->errors_count++;
if (dcmi->misr & IT_OVR)
@@ -495,8 +485,6 @@ static int dcmi_queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = size;
- dcmi->active = NULL;
-
dev_dbg(dcmi->dev, "Setup queue, count=%d, size=%d\n",
*nbuffers, size);
@@ -554,21 +542,24 @@ static void dcmi_buf_queue(struct vb2_buffer *vb)
spin_lock_irq(&dcmi->irqlock);
- if (dcmi->state == RUNNING && !dcmi->active) {
+ /* Enqueue to video buffers list */
+ list_add_tail(&buf->list, &dcmi->buffers);
+
+ if (dcmi->state == WAIT_FOR_BUFFER) {
+ dcmi->state = RUNNING;
dcmi->active = buf;
dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n",
buf->vb.vb2_buf.index);
spin_unlock_irq(&dcmi->irqlock);
- if (dcmi_start_capture(dcmi))
+ if (dcmi_start_capture(dcmi, buf))
dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
__func__);
- } else {
- /* Enqueue to video buffers list */
- list_add_tail(&buf->list, &dcmi->buffers);
- spin_unlock_irq(&dcmi->irqlock);
+ return;
}
+
+ spin_unlock_irq(&dcmi->irqlock);
}
static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -578,9 +569,9 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
u32 val = 0;
int ret;
- ret = clk_enable(dcmi->mclk);
+ ret = pm_runtime_get_sync(dcmi->dev);
if (ret) {
- dev_err(dcmi->dev, "%s: Failed to start streaming, cannot enable clock\n",
+ dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync\n",
__func__);
goto err_release_buffers;
}
@@ -590,7 +581,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret && ret != -ENOIOCTLCMD) {
dev_err(dcmi->dev, "%s: Failed to start streaming, subdev streamon error",
__func__);
- goto err_disable_clock;
+ goto err_pm_put;
}
spin_lock_irq(&dcmi->irqlock);
@@ -636,13 +627,10 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
/* Enable dcmi */
reg_set(dcmi->regs, DCMI_CR, CR_ENABLE);
- dcmi->state = RUNNING;
-
dcmi->sequence = 0;
dcmi->errors_count = 0;
dcmi->overrun_count = 0;
dcmi->buffers_count = 0;
- dcmi->active = NULL;
/*
* Start transfer if at least one buffer has been queued,
@@ -650,17 +638,20 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
*/
if (list_empty(&dcmi->buffers)) {
dev_dbg(dcmi->dev, "Start streaming is deferred to next buffer queueing\n");
+ dcmi->state = WAIT_FOR_BUFFER;
spin_unlock_irq(&dcmi->irqlock);
return 0;
}
- dcmi->active = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
- list_del_init(&dcmi->active->list);
+ buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
+ dcmi->active = buf;
+
+ dcmi->state = RUNNING;
dev_dbg(dcmi->dev, "Start streaming, starting capture\n");
spin_unlock_irq(&dcmi->irqlock);
- ret = dcmi_start_capture(dcmi);
+ ret = dcmi_start_capture(dcmi, buf);
if (ret) {
dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture\n",
__func__);
@@ -675,8 +666,8 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
err_subdev_streamoff:
v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
-err_disable_clock:
- clk_disable(dcmi->mclk);
+err_pm_put:
+ pm_runtime_put(dcmi->dev);
err_release_buffers:
spin_lock_irq(&dcmi->irqlock);
@@ -684,15 +675,11 @@ err_release_buffers:
* Return all buffers to vb2 in QUEUED state.
* This will give ownership back to userspace
*/
- if (dcmi->active) {
- buf = dcmi->active;
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
- dcmi->active = NULL;
- }
list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
list_del_init(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
}
+ dcmi->active = NULL;
spin_unlock_irq(&dcmi->irqlock);
return ret;
@@ -702,8 +689,6 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
{
struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
struct dcmi_buf *buf, *node;
- unsigned long time_ms = msecs_to_jiffies(TIMEOUT_MS);
- long timeout;
int ret;
/* Disable stream on the sub device */
@@ -713,13 +698,6 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
__func__, ret);
spin_lock_irq(&dcmi->irqlock);
- dcmi->state = STOPPING;
- spin_unlock_irq(&dcmi->irqlock);
-
- timeout = wait_for_completion_interruptible_timeout(&dcmi->complete,
- time_ms);
-
- spin_lock_irq(&dcmi->irqlock);
/* Disable interruptions */
reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
@@ -727,29 +705,21 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
/* Disable DCMI */
reg_clear(dcmi->regs, DCMI_CR, CR_ENABLE);
- if (!timeout) {
- dev_err(dcmi->dev, "%s: Timeout during stop streaming\n",
- __func__);
- dcmi->state = STOPPED;
- }
-
/* Return all queued buffers to vb2 in ERROR state */
- if (dcmi->active) {
- buf = dcmi->active;
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- dcmi->active = NULL;
- }
list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
list_del_init(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
+ dcmi->active = NULL;
+ dcmi->state = STOPPED;
+
spin_unlock_irq(&dcmi->irqlock);
/* Stop all pending DMA operations */
dmaengine_terminate_all(dcmi->dma_chan);
- clk_disable(dcmi->mclk);
+ pm_runtime_put(dcmi->dev);
if (dcmi->errors_count)
dev_warn(dcmi->dev, "Some errors found while streaming: errors=%d (overrun=%d), buffers=%d\n",
@@ -843,14 +813,8 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
}
/* Limit to hardware capabilities */
- if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
- pix->width = clamp(pix->width, MIN_JPEG_WIDTH, MAX_JPEG_WIDTH);
- pix->height =
- clamp(pix->height, MIN_JPEG_HEIGHT, MAX_JPEG_HEIGHT);
- } else {
- pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
- pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
- }
+ pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
+ pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
/* No crop if JPEG is requested */
do_crop = dcmi->do_crop && (pix->pixelformat != V4L2_PIX_FMT_JPEG);
@@ -1605,23 +1569,20 @@ static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
struct device_node *ep = NULL;
struct device_node *remote;
- while (1) {
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- return -EINVAL;
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ return -EINVAL;
- remote = of_graph_get_remote_port_parent(ep);
- if (!remote) {
- of_node_put(ep);
- return -EINVAL;
- }
+ remote = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!remote)
+ return -EINVAL;
- /* Remote node to connect */
- dcmi->entity.node = remote;
- dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- dcmi->entity.asd.match.fwnode = of_fwnode_handle(remote);
- return 0;
- }
+ /* Remote node to connect */
+ dcmi->entity.node = remote;
+ dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ dcmi->entity.asd.match.fwnode = of_fwnode_handle(remote);
+ return 0;
}
static int dcmi_graph_init(struct stm32_dcmi *dcmi)
@@ -1696,23 +1657,20 @@ static int dcmi_probe(struct platform_device *pdev)
}
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
+ of_node_put(np);
if (ret) {
dev_err(&pdev->dev, "Could not parse the endpoint\n");
- of_node_put(np);
return -ENODEV;
}
if (ep.bus_type == V4L2_MBUS_CSI2) {
dev_err(&pdev->dev, "CSI bus not supported\n");
- of_node_put(np);
return -ENODEV;
}
dcmi->bus.flags = ep.bus.parallel.flags;
dcmi->bus.bus_width = ep.bus.parallel.bus_width;
dcmi->bus.data_shift = ep.bus.parallel.data_shift;
- of_node_put(np);
-
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev, "Could not get irq\n");
@@ -1751,12 +1709,6 @@ static int dcmi_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- ret = clk_prepare(mclk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to prepare mclk %p\n", mclk);
- goto err_dma_release;
- }
-
spin_lock_init(&dcmi->irqlock);
mutex_init(&dcmi->lock);
init_completion(&dcmi->complete);
@@ -1772,7 +1724,7 @@ static int dcmi_probe(struct platform_device *pdev)
/* Initialize the top-level structure */
ret = v4l2_device_register(&pdev->dev, &dcmi->v4l2_dev);
if (ret)
- goto err_clk_unprepare;
+ goto err_dma_release;
dcmi->vdev = video_device_alloc();
if (!dcmi->vdev) {
@@ -1832,14 +1784,15 @@ static int dcmi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Probe done\n");
platform_set_drvdata(pdev, dcmi);
+
+ pm_runtime_enable(&pdev->dev);
+
return 0;
err_device_release:
video_device_release(dcmi->vdev);
err_device_unregister:
v4l2_device_unregister(&dcmi->v4l2_dev);
-err_clk_unprepare:
- clk_unprepare(dcmi->mclk);
err_dma_release:
dma_release_channel(dcmi->dma_chan);
@@ -1850,20 +1803,72 @@ static int dcmi_remove(struct platform_device *pdev)
{
struct stm32_dcmi *dcmi = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
+
v4l2_async_notifier_unregister(&dcmi->notifier);
v4l2_device_unregister(&dcmi->v4l2_dev);
- clk_unprepare(dcmi->mclk);
+
dma_release_channel(dcmi->dma_chan);
return 0;
}
+static __maybe_unused int dcmi_runtime_suspend(struct device *dev)
+{
+ struct stm32_dcmi *dcmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dcmi->mclk);
+
+ return 0;
+}
+
+static __maybe_unused int dcmi_runtime_resume(struct device *dev)
+{
+ struct stm32_dcmi *dcmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(dcmi->mclk);
+ if (ret)
+ dev_err(dev, "%s: Failed to prepare_enable clock\n", __func__);
+
+ return ret;
+}
+
+static __maybe_unused int dcmi_suspend(struct device *dev)
+{
+ /* disable clock */
+ pm_runtime_force_suspend(dev);
+
+ /* change pinctrl state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static __maybe_unused int dcmi_resume(struct device *dev)
+{
+ /* restore pinctl default state */
+ pinctrl_pm_select_default_state(dev);
+
+ /* clock enable */
+ pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dcmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dcmi_suspend, dcmi_resume)
+ SET_RUNTIME_PM_OPS(dcmi_runtime_suspend,
+ dcmi_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_dcmi_driver = {
.probe = dcmi_probe,
.remove = dcmi_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(stm32_dcmi_of_match),
+ .pm = &dcmi_pm_ops,
},
};
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index e395aa85c8ad..d70871d0ad2d 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -953,23 +953,6 @@ static void job_abort(void *priv)
ctx->aborting = 1;
}
-/*
- * Lock access to the device
- */
-static void vpe_lock(void *priv)
-{
- struct vpe_ctx *ctx = priv;
- struct vpe_dev *dev = ctx->dev;
- mutex_lock(&dev->dev_mutex);
-}
-
-static void vpe_unlock(void *priv)
-{
- struct vpe_ctx *ctx = priv;
- struct vpe_dev *dev = ctx->dev;
- mutex_unlock(&dev->dev_mutex);
-}
-
static void vpe_dump_regs(struct vpe_dev *dev)
{
#define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
@@ -2434,8 +2417,6 @@ static const struct v4l2_m2m_ops m2m_ops = {
.device_run = device_run,
.job_ready = job_ready,
.job_abort = job_abort,
- .lock = vpe_lock,
- .unlock = vpe_unlock,
};
static int vpe_runtime_get(struct platform_device *pdev)
@@ -2485,7 +2466,6 @@ static void vpe_fw_cb(struct platform_device *pdev)
}
video_set_drvdata(vfd, dev);
- snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name);
dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
vfd->num);
}
diff --git a/drivers/media/platform/vicodec/Kconfig b/drivers/media/platform/vicodec/Kconfig
new file mode 100644
index 000000000000..2503bcb1529f
--- /dev/null
+++ b/drivers/media/platform/vicodec/Kconfig
@@ -0,0 +1,13 @@
+config VIDEO_VICODEC
+ tristate "Virtual Codec Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ default n
+ help
+ Driver for a Virtual Codec
+
+ This driver can be compared to the vim2m driver for emulating
+ a video device node that exposes an emulated hardware codec.
+
+ When in doubt, say N.
diff --git a/drivers/media/platform/vicodec/Makefile b/drivers/media/platform/vicodec/Makefile
new file mode 100644
index 000000000000..197229428953
--- /dev/null
+++ b/drivers/media/platform/vicodec/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+vicodec-objs := vicodec-core.o vicodec-codec.o
+
+obj-$(CONFIG_VIDEO_VICODEC) += vicodec.o
diff --git a/drivers/media/platform/vicodec/vicodec-codec.c b/drivers/media/platform/vicodec/vicodec-codec.c
new file mode 100644
index 000000000000..2d047646f614
--- /dev/null
+++ b/drivers/media/platform/vicodec/vicodec-codec.c
@@ -0,0 +1,797 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2016 Tom aan de Wiel
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * 8x8 Fast Walsh Hadamard Transform in sequency order based on the paper:
+ *
+ * A Recursive Algorithm for Sequency-Ordered Fast Walsh Transforms,
+ * R.D. Brown, 1977
+ */
+
+#include <linux/string.h>
+#include "vicodec-codec.h"
+
+#define ALL_ZEROS 15
+#define DEADZONE_WIDTH 20
+
+static const uint8_t zigzag[64] = {
+ 0,
+ 1, 8,
+ 2, 9, 16,
+ 3, 10, 17, 24,
+ 4, 11, 18, 25, 32,
+ 5, 12, 19, 26, 33, 40,
+ 6, 13, 20, 27, 34, 41, 48,
+ 7, 14, 21, 28, 35, 42, 49, 56,
+ 15, 22, 29, 36, 43, 50, 57,
+ 23, 30, 37, 44, 51, 58,
+ 31, 38, 45, 52, 59,
+ 39, 46, 53, 60,
+ 47, 54, 61,
+ 55, 62,
+ 63,
+};
+
+
+static int rlc(const s16 *in, __be16 *output, int blocktype)
+{
+ s16 block[8 * 8];
+ s16 *wp = block;
+ int i = 0;
+ int x, y;
+ int ret = 0;
+
+ /* read in block from framebuffer */
+ int lastzero_run = 0;
+ int to_encode;
+
+ for (y = 0; y < 8; y++) {
+ for (x = 0; x < 8; x++) {
+ *wp = in[x + y * 8];
+ wp++;
+ }
+ }
+
+ /* keep track of amount of trailing zeros */
+ for (i = 63; i >= 0 && !block[zigzag[i]]; i--)
+ lastzero_run++;
+
+ *output++ = (blocktype == PBLOCK ? htons(PFRAME_BIT) : 0);
+ ret++;
+
+ to_encode = 8 * 8 - (lastzero_run > 14 ? lastzero_run : 0);
+
+ i = 0;
+ while (i < to_encode) {
+ int cnt = 0;
+ int tmp;
+
+ /* count leading zeros */
+ while ((tmp = block[zigzag[i]]) == 0 && cnt < 14) {
+ cnt++;
+ i++;
+ if (i == to_encode) {
+ cnt--;
+ break;
+ }
+ }
+ /* 4 bits for run, 12 for coefficient (quantization by 4) */
+ *output++ = htons((cnt | tmp << 4));
+ i++;
+ ret++;
+ }
+ if (lastzero_run > 14) {
+ *output = htons(ALL_ZEROS | 0);
+ ret++;
+ }
+
+ return ret;
+}
+
+/*
+ * This function will worst-case increase rlc_in by 65*2 bytes:
+ * one s16 value for the header and 8 * 8 coefficients of type s16.
+ */
+static s16 derlc(const __be16 **rlc_in, s16 *dwht_out)
+{
+ /* header */
+ const __be16 *input = *rlc_in;
+ s16 ret = ntohs(*input++);
+ int dec_count = 0;
+ s16 block[8 * 8 + 16];
+ s16 *wp = block;
+ int i;
+
+ /*
+ * Now de-compress, it expands one byte to up to 15 bytes
+ * (or fills the remainder of the 64 bytes with zeroes if it
+ * is the last byte to expand).
+ *
+ * So block has to be 8 * 8 + 16 bytes, the '+ 16' is to
+ * allow for overflow if the incoming data was malformed.
+ */
+ while (dec_count < 8 * 8) {
+ s16 in = ntohs(*input++);
+ int length = in & 0xf;
+ int coeff = in >> 4;
+
+ /* fill remainder with zeros */
+ if (length == 15) {
+ for (i = 0; i < 64 - dec_count; i++)
+ *wp++ = 0;
+ break;
+ }
+
+ for (i = 0; i < length; i++)
+ *wp++ = 0;
+ *wp++ = coeff;
+ dec_count += length + 1;
+ }
+
+ wp = block;
+
+ for (i = 0; i < 64; i++) {
+ int pos = zigzag[i];
+ int y = pos / 8;
+ int x = pos % 8;
+
+ dwht_out[x + y * 8] = *wp++;
+ }
+ *rlc_in = input;
+ return ret;
+}
+
+static const int quant_table[] = {
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3,
+ 2, 2, 2, 2, 2, 2, 3, 6,
+ 2, 2, 2, 2, 2, 3, 6, 6,
+ 2, 2, 2, 2, 3, 6, 6, 6,
+ 2, 2, 2, 3, 6, 6, 6, 6,
+ 2, 2, 3, 6, 6, 6, 6, 8,
+};
+
+static const int quant_table_p[] = {
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 6,
+ 3, 3, 3, 3, 3, 3, 6, 6,
+ 3, 3, 3, 3, 3, 6, 6, 9,
+ 3, 3, 3, 3, 6, 6, 9, 9,
+ 3, 3, 3, 6, 6, 9, 9, 10,
+};
+
+static void quantize_intra(s16 *coeff, s16 *de_coeff)
+{
+ const int *quant = quant_table;
+ int i, j;
+
+ for (j = 0; j < 8; j++) {
+ for (i = 0; i < 8; i++, quant++, coeff++, de_coeff++) {
+ *coeff >>= *quant;
+ if (*coeff >= -DEADZONE_WIDTH &&
+ *coeff <= DEADZONE_WIDTH)
+ *coeff = *de_coeff = 0;
+ else
+ *de_coeff = *coeff << *quant;
+ }
+ }
+}
+
+static void dequantize_intra(s16 *coeff)
+{
+ const int *quant = quant_table;
+ int i, j;
+
+ for (j = 0; j < 8; j++)
+ for (i = 0; i < 8; i++, quant++, coeff++)
+ *coeff <<= *quant;
+}
+
+static void quantize_inter(s16 *coeff, s16 *de_coeff)
+{
+ const int *quant = quant_table_p;
+ int i, j;
+
+ for (j = 0; j < 8; j++) {
+ for (i = 0; i < 8; i++, quant++, coeff++, de_coeff++) {
+ *coeff >>= *quant;
+ if (*coeff >= -DEADZONE_WIDTH &&
+ *coeff <= DEADZONE_WIDTH)
+ *coeff = *de_coeff = 0;
+ else
+ *de_coeff = *coeff << *quant;
+ }
+ }
+}
+
+static void dequantize_inter(s16 *coeff)
+{
+ const int *quant = quant_table_p;
+ int i, j;
+
+ for (j = 0; j < 8; j++)
+ for (i = 0; i < 8; i++, quant++, coeff++)
+ *coeff <<= *quant;
+}
+
+static void fwht(const u8 *block, s16 *output_block, unsigned int stride,
+ unsigned int input_step, bool intra)
+{
+ /* we'll need more than 8 bits for the transformed coefficients */
+ s32 workspace1[8], workspace2[8];
+ const u8 *tmp = block;
+ s16 *out = output_block;
+ int add = intra ? 256 : 0;
+ unsigned int i;
+
+ /* stage 1 */
+ stride *= input_step;
+
+ for (i = 0; i < 8; i++, tmp += stride, out += 8) {
+ if (input_step == 1) {
+ workspace1[0] = tmp[0] + tmp[1] - add;
+ workspace1[1] = tmp[0] - tmp[1];
+
+ workspace1[2] = tmp[2] + tmp[3] - add;
+ workspace1[3] = tmp[2] - tmp[3];
+
+ workspace1[4] = tmp[4] + tmp[5] - add;
+ workspace1[5] = tmp[4] - tmp[5];
+
+ workspace1[6] = tmp[6] + tmp[7] - add;
+ workspace1[7] = tmp[6] - tmp[7];
+ } else {
+ workspace1[0] = tmp[0] + tmp[2] - add;
+ workspace1[1] = tmp[0] - tmp[2];
+
+ workspace1[2] = tmp[4] + tmp[6] - add;
+ workspace1[3] = tmp[4] - tmp[6];
+
+ workspace1[4] = tmp[8] + tmp[10] - add;
+ workspace1[5] = tmp[8] - tmp[10];
+
+ workspace1[6] = tmp[12] + tmp[14] - add;
+ workspace1[7] = tmp[12] - tmp[14];
+ }
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ out[0] = workspace2[0] + workspace2[4];
+ out[1] = workspace2[0] - workspace2[4];
+ out[2] = workspace2[1] - workspace2[5];
+ out[3] = workspace2[1] + workspace2[5];
+ out[4] = workspace2[2] + workspace2[6];
+ out[5] = workspace2[2] - workspace2[6];
+ out[6] = workspace2[3] - workspace2[7];
+ out[7] = workspace2[3] + workspace2[7];
+ }
+
+ out = output_block;
+
+ for (i = 0; i < 8; i++, out++) {
+ /* stage 1 */
+ workspace1[0] = out[0] + out[1 * 8];
+ workspace1[1] = out[0] - out[1 * 8];
+
+ workspace1[2] = out[2 * 8] + out[3 * 8];
+ workspace1[3] = out[2 * 8] - out[3 * 8];
+
+ workspace1[4] = out[4 * 8] + out[5 * 8];
+ workspace1[5] = out[4 * 8] - out[5 * 8];
+
+ workspace1[6] = out[6 * 8] + out[7 * 8];
+ workspace1[7] = out[6 * 8] - out[7 * 8];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+ /* stage 3 */
+ out[0 * 8] = workspace2[0] + workspace2[4];
+ out[1 * 8] = workspace2[0] - workspace2[4];
+ out[2 * 8] = workspace2[1] - workspace2[5];
+ out[3 * 8] = workspace2[1] + workspace2[5];
+ out[4 * 8] = workspace2[2] + workspace2[6];
+ out[5 * 8] = workspace2[2] - workspace2[6];
+ out[6 * 8] = workspace2[3] - workspace2[7];
+ out[7 * 8] = workspace2[3] + workspace2[7];
+ }
+}
+
+/*
+ * Not the nicest way of doing it, but P-blocks get twice the range of
+ * that of the I-blocks. Therefore we need a type bigger than 8 bits.
+ * Furthermore values can be negative... This is just a version that
+ * works with 16 signed data
+ */
+static void fwht16(const s16 *block, s16 *output_block, int stride, int intra)
+{
+ /* we'll need more than 8 bits for the transformed coefficients */
+ s32 workspace1[8], workspace2[8];
+ const s16 *tmp = block;
+ s16 *out = output_block;
+ int i;
+
+ for (i = 0; i < 8; i++, tmp += stride, out += 8) {
+ /* stage 1 */
+ workspace1[0] = tmp[0] + tmp[1];
+ workspace1[1] = tmp[0] - tmp[1];
+
+ workspace1[2] = tmp[2] + tmp[3];
+ workspace1[3] = tmp[2] - tmp[3];
+
+ workspace1[4] = tmp[4] + tmp[5];
+ workspace1[5] = tmp[4] - tmp[5];
+
+ workspace1[6] = tmp[6] + tmp[7];
+ workspace1[7] = tmp[6] - tmp[7];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ out[0] = workspace2[0] + workspace2[4];
+ out[1] = workspace2[0] - workspace2[4];
+ out[2] = workspace2[1] - workspace2[5];
+ out[3] = workspace2[1] + workspace2[5];
+ out[4] = workspace2[2] + workspace2[6];
+ out[5] = workspace2[2] - workspace2[6];
+ out[6] = workspace2[3] - workspace2[7];
+ out[7] = workspace2[3] + workspace2[7];
+ }
+
+ out = output_block;
+
+ for (i = 0; i < 8; i++, out++) {
+ /* stage 1 */
+ workspace1[0] = out[0] + out[1*8];
+ workspace1[1] = out[0] - out[1*8];
+
+ workspace1[2] = out[2*8] + out[3*8];
+ workspace1[3] = out[2*8] - out[3*8];
+
+ workspace1[4] = out[4*8] + out[5*8];
+ workspace1[5] = out[4*8] - out[5*8];
+
+ workspace1[6] = out[6*8] + out[7*8];
+ workspace1[7] = out[6*8] - out[7*8];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ out[0*8] = workspace2[0] + workspace2[4];
+ out[1*8] = workspace2[0] - workspace2[4];
+ out[2*8] = workspace2[1] - workspace2[5];
+ out[3*8] = workspace2[1] + workspace2[5];
+ out[4*8] = workspace2[2] + workspace2[6];
+ out[5*8] = workspace2[2] - workspace2[6];
+ out[6*8] = workspace2[3] - workspace2[7];
+ out[7*8] = workspace2[3] + workspace2[7];
+ }
+}
+
+static void ifwht(const s16 *block, s16 *output_block, int intra)
+{
+ /*
+ * we'll need more than 8 bits for the transformed coefficients
+ * use native unit of cpu
+ */
+ int workspace1[8], workspace2[8];
+ int inter = intra ? 0 : 1;
+ const s16 *tmp = block;
+ s16 *out = output_block;
+ int i;
+
+ for (i = 0; i < 8; i++, tmp += 8, out += 8) {
+ /* stage 1 */
+ workspace1[0] = tmp[0] + tmp[1];
+ workspace1[1] = tmp[0] - tmp[1];
+
+ workspace1[2] = tmp[2] + tmp[3];
+ workspace1[3] = tmp[2] - tmp[3];
+
+ workspace1[4] = tmp[4] + tmp[5];
+ workspace1[5] = tmp[4] - tmp[5];
+
+ workspace1[6] = tmp[6] + tmp[7];
+ workspace1[7] = tmp[6] - tmp[7];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ out[0] = workspace2[0] + workspace2[4];
+ out[1] = workspace2[0] - workspace2[4];
+ out[2] = workspace2[1] - workspace2[5];
+ out[3] = workspace2[1] + workspace2[5];
+ out[4] = workspace2[2] + workspace2[6];
+ out[5] = workspace2[2] - workspace2[6];
+ out[6] = workspace2[3] - workspace2[7];
+ out[7] = workspace2[3] + workspace2[7];
+ }
+
+ out = output_block;
+
+ for (i = 0; i < 8; i++, out++) {
+ /* stage 1 */
+ workspace1[0] = out[0] + out[1 * 8];
+ workspace1[1] = out[0] - out[1 * 8];
+
+ workspace1[2] = out[2 * 8] + out[3 * 8];
+ workspace1[3] = out[2 * 8] - out[3 * 8];
+
+ workspace1[4] = out[4 * 8] + out[5 * 8];
+ workspace1[5] = out[4 * 8] - out[5 * 8];
+
+ workspace1[6] = out[6 * 8] + out[7 * 8];
+ workspace1[7] = out[6 * 8] - out[7 * 8];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ if (inter) {
+ int d;
+
+ out[0 * 8] = workspace2[0] + workspace2[4];
+ out[1 * 8] = workspace2[0] - workspace2[4];
+ out[2 * 8] = workspace2[1] - workspace2[5];
+ out[3 * 8] = workspace2[1] + workspace2[5];
+ out[4 * 8] = workspace2[2] + workspace2[6];
+ out[5 * 8] = workspace2[2] - workspace2[6];
+ out[6 * 8] = workspace2[3] - workspace2[7];
+ out[7 * 8] = workspace2[3] + workspace2[7];
+
+ for (d = 0; d < 8; d++)
+ out[8 * d] >>= 6;
+ } else {
+ int d;
+
+ out[0 * 8] = workspace2[0] + workspace2[4];
+ out[1 * 8] = workspace2[0] - workspace2[4];
+ out[2 * 8] = workspace2[1] - workspace2[5];
+ out[3 * 8] = workspace2[1] + workspace2[5];
+ out[4 * 8] = workspace2[2] + workspace2[6];
+ out[5 * 8] = workspace2[2] - workspace2[6];
+ out[6 * 8] = workspace2[3] - workspace2[7];
+ out[7 * 8] = workspace2[3] + workspace2[7];
+
+ for (d = 0; d < 8; d++) {
+ out[8 * d] >>= 6;
+ out[8 * d] += 128;
+ }
+ }
+ }
+}
+
+static void fill_encoder_block(const u8 *input, s16 *dst,
+ unsigned int stride, unsigned int input_step)
+{
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 8; j++, input += input_step)
+ *dst++ = *input;
+ input += (stride - 8) * input_step;
+ }
+}
+
+static int var_intra(const s16 *input)
+{
+ int32_t mean = 0;
+ int32_t ret = 0;
+ const s16 *tmp = input;
+ int i;
+
+ for (i = 0; i < 8 * 8; i++, tmp++)
+ mean += *tmp;
+ mean /= 64;
+ tmp = input;
+ for (i = 0; i < 8 * 8; i++, tmp++)
+ ret += (*tmp - mean) < 0 ? -(*tmp - mean) : (*tmp - mean);
+ return ret;
+}
+
+static int var_inter(const s16 *old, const s16 *new)
+{
+ int32_t ret = 0;
+ int i;
+
+ for (i = 0; i < 8 * 8; i++, old++, new++)
+ ret += (*old - *new) < 0 ? -(*old - *new) : (*old - *new);
+ return ret;
+}
+
+static int decide_blocktype(const u8 *cur, const u8 *reference,
+ s16 *deltablock, unsigned int stride,
+ unsigned int input_step)
+{
+ s16 tmp[64];
+ s16 old[64];
+ s16 *work = tmp;
+ unsigned int k, l;
+ int vari;
+ int vard;
+
+ fill_encoder_block(cur, tmp, stride, input_step);
+ fill_encoder_block(reference, old, 8, 1);
+ vari = var_intra(tmp);
+
+ for (k = 0; k < 8; k++) {
+ for (l = 0; l < 8; l++) {
+ *deltablock = *work - *reference;
+ deltablock++;
+ work++;
+ reference++;
+ }
+ }
+ deltablock -= 64;
+ vard = var_inter(old, tmp);
+ return vari <= vard ? IBLOCK : PBLOCK;
+}
+
+static void fill_decoder_block(u8 *dst, const s16 *input, int stride)
+{
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 8; j++)
+ *dst++ = *input++;
+ dst += stride - 8;
+ }
+}
+
+static void add_deltas(s16 *deltas, const u8 *ref, int stride)
+{
+ int k, l;
+
+ for (k = 0; k < 8; k++) {
+ for (l = 0; l < 8; l++) {
+ *deltas += *ref++;
+ /*
+ * Due to quantizing, it might possible that the
+ * decoded coefficients are slightly out of range
+ */
+ if (*deltas < 0)
+ *deltas = 0;
+ else if (*deltas > 255)
+ *deltas = 255;
+ deltas++;
+ }
+ ref += stride - 8;
+ }
+}
+
+static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max,
+ struct cframe *cf, u32 height, u32 width,
+ unsigned int input_step,
+ bool is_intra, bool next_is_intra)
+{
+ u8 *input_start = input;
+ __be16 *rlco_start = *rlco;
+ s16 deltablock[64];
+ __be16 pframe_bit = htons(PFRAME_BIT);
+ u32 encoding = 0;
+ unsigned int last_size = 0;
+ unsigned int i, j;
+
+ for (j = 0; j < height / 8; j++) {
+ for (i = 0; i < width / 8; i++) {
+ /* intra code, first frame is always intra coded. */
+ int blocktype = IBLOCK;
+ unsigned int size;
+
+ if (!is_intra)
+ blocktype = decide_blocktype(input, refp,
+ deltablock, width, input_step);
+ if (is_intra || blocktype == IBLOCK) {
+ fwht(input, cf->coeffs, width, input_step, 1);
+ quantize_intra(cf->coeffs, cf->de_coeffs);
+ blocktype = IBLOCK;
+ } else {
+ /* inter code */
+ encoding |= FRAME_PCODED;
+ fwht16(deltablock, cf->coeffs, 8, 0);
+ quantize_inter(cf->coeffs, cf->de_coeffs);
+ }
+ if (!next_is_intra) {
+ ifwht(cf->de_coeffs, cf->de_fwht, blocktype);
+
+ if (blocktype == PBLOCK)
+ add_deltas(cf->de_fwht, refp, 8);
+ fill_decoder_block(refp, cf->de_fwht, 8);
+ }
+
+ input += 8 * input_step;
+ refp += 8 * 8;
+
+ if (encoding & FRAME_UNENCODED)
+ continue;
+
+ size = rlc(cf->coeffs, *rlco, blocktype);
+ if (last_size == size &&
+ !memcmp(*rlco + 1, *rlco - size + 1, 2 * size - 2)) {
+ __be16 *last_rlco = *rlco - size;
+ s16 hdr = ntohs(*last_rlco);
+
+ if (!((*last_rlco ^ **rlco) & pframe_bit) &&
+ (hdr & DUPS_MASK) < DUPS_MASK)
+ *last_rlco = htons(hdr + 2);
+ else
+ *rlco += size;
+ } else {
+ *rlco += size;
+ }
+ if (*rlco >= rlco_max)
+ encoding |= FRAME_UNENCODED;
+ last_size = size;
+ }
+ input += width * 7 * input_step;
+ }
+ if (encoding & FRAME_UNENCODED) {
+ u8 *out = (u8 *)rlco_start;
+
+ input = input_start;
+ /*
+ * The compressed stream should never contain the magic
+ * header, so when we copy the YUV data we replace 0xff
+ * by 0xfe. Since YUV is limited range such values
+ * shouldn't appear anyway.
+ */
+ for (i = 0; i < height * width; i++, input += input_step)
+ *out++ = (*input == 0xff) ? 0xfe : *input;
+ *rlco = (__be16 *)out;
+ }
+ return encoding;
+}
+
+u32 encode_frame(struct raw_frame *frm, struct raw_frame *ref_frm,
+ struct cframe *cf, bool is_intra, bool next_is_intra)
+{
+ unsigned int size = frm->height * frm->width;
+ __be16 *rlco = cf->rlc_data;
+ __be16 *rlco_max;
+ u32 encoding;
+
+ rlco_max = rlco + size / 2 - 256;
+ encoding = encode_plane(frm->luma, ref_frm->luma, &rlco, rlco_max, cf,
+ frm->height, frm->width,
+ 1, is_intra, next_is_intra);
+ if (encoding & FRAME_UNENCODED)
+ encoding |= LUMA_UNENCODED;
+ encoding &= ~FRAME_UNENCODED;
+ rlco_max = rlco + size / 8 - 256;
+ encoding |= encode_plane(frm->cb, ref_frm->cb, &rlco, rlco_max, cf,
+ frm->height / 2, frm->width / 2,
+ frm->chroma_step, is_intra, next_is_intra);
+ if (encoding & FRAME_UNENCODED)
+ encoding |= CB_UNENCODED;
+ encoding &= ~FRAME_UNENCODED;
+ rlco_max = rlco + size / 8 - 256;
+ encoding |= encode_plane(frm->cr, ref_frm->cr, &rlco, rlco_max, cf,
+ frm->height / 2, frm->width / 2,
+ frm->chroma_step, is_intra, next_is_intra);
+ if (encoding & FRAME_UNENCODED)
+ encoding |= CR_UNENCODED;
+ encoding &= ~FRAME_UNENCODED;
+ cf->size = (rlco - cf->rlc_data) * sizeof(*rlco);
+ return encoding;
+}
+
+static void decode_plane(struct cframe *cf, const __be16 **rlco, u8 *ref,
+ u32 height, u32 width, bool uncompressed)
+{
+ unsigned int copies = 0;
+ s16 copy[8 * 8];
+ s16 stat;
+ unsigned int i, j;
+
+ if (uncompressed) {
+ memcpy(ref, *rlco, width * height);
+ *rlco += width * height / 2;
+ return;
+ }
+
+ /*
+ * When decoding each macroblock the rlco pointer will be increased
+ * by 65 * 2 bytes worst-case.
+ * To avoid overflow the buffer has to be 65/64th of the actual raw
+ * image size, just in case someone feeds it malicious data.
+ */
+ for (j = 0; j < height / 8; j++) {
+ for (i = 0; i < width / 8; i++) {
+ u8 *refp = ref + j * 8 * width + i * 8;
+
+ if (copies) {
+ memcpy(cf->de_fwht, copy, sizeof(copy));
+ if (stat & PFRAME_BIT)
+ add_deltas(cf->de_fwht, refp, width);
+ fill_decoder_block(refp, cf->de_fwht, width);
+ copies--;
+ continue;
+ }
+
+ stat = derlc(rlco, cf->coeffs);
+
+ if (stat & PFRAME_BIT)
+ dequantize_inter(cf->coeffs);
+ else
+ dequantize_intra(cf->coeffs);
+
+ ifwht(cf->coeffs, cf->de_fwht,
+ (stat & PFRAME_BIT) ? 0 : 1);
+
+ copies = (stat & DUPS_MASK) >> 1;
+ if (copies)
+ memcpy(copy, cf->de_fwht, sizeof(copy));
+ if (stat & PFRAME_BIT)
+ add_deltas(cf->de_fwht, refp, width);
+ fill_decoder_block(refp, cf->de_fwht, width);
+ }
+ }
+}
+
+void decode_frame(struct cframe *cf, struct raw_frame *ref, u32 hdr_flags)
+{
+ const __be16 *rlco = cf->rlc_data;
+
+ decode_plane(cf, &rlco, ref->luma, cf->height, cf->width,
+ hdr_flags & VICODEC_FL_LUMA_IS_UNCOMPRESSED);
+ decode_plane(cf, &rlco, ref->cb, cf->height / 2, cf->width / 2,
+ hdr_flags & VICODEC_FL_CB_IS_UNCOMPRESSED);
+ decode_plane(cf, &rlco, ref->cr, cf->height / 2, cf->width / 2,
+ hdr_flags & VICODEC_FL_CR_IS_UNCOMPRESSED);
+}
diff --git a/drivers/media/platform/vicodec/vicodec-codec.h b/drivers/media/platform/vicodec/vicodec-codec.h
new file mode 100644
index 000000000000..cdfad1332a3e
--- /dev/null
+++ b/drivers/media/platform/vicodec/vicodec-codec.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2016 Tom aan de Wiel
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef VICODEC_RLC_H
+#define VICODEC_RLC_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+
+/*
+ * The compressed format consists of a cframe_hdr struct followed by the
+ * compressed frame data. The header contains the size of that data.
+ * Each Y, Cb and Cr plane is compressed separately. If the compressed
+ * size of each plane becomes larger than the uncompressed size, then
+ * that plane is stored uncompressed and the corresponding bit is set
+ * in the flags field of the header.
+ *
+ * Each compressed plane consists of macroblocks and each macroblock
+ * is run-length-encoded. Each macroblock starts with a 16 bit value.
+ * Bit 15 indicates if this is a P-coded macroblock (1) or not (0).
+ * P-coded macroblocks contain a delta against the previous frame.
+ *
+ * Bits 1-12 contain a number. If non-zero, then this same macroblock
+ * repeats that number of times. This results in a high degree of
+ * compression for generated images like colorbars.
+ *
+ * Following this macroblock header the MB coefficients are run-length
+ * encoded: the top 12 bits contain the coefficient, the bottom 4 bits
+ * tell how many times this coefficient occurs. The value 0xf indicates
+ * that the remainder of the macroblock should be filled with zeroes.
+ *
+ * All 16 and 32 bit values are stored in big-endian (network) order.
+ *
+ * Each cframe_hdr starts with an 8 byte magic header that is
+ * guaranteed not to occur in the compressed frame data. This header
+ * can be used to sync to the next frame.
+ *
+ * This codec uses the Fast Walsh Hadamard Transform. Tom aan de Wiel
+ * developed this as part of a university project, specifically for use
+ * with this driver. His project report can be found here:
+ *
+ * https://hverkuil.home.xs4all.nl/fwht.pdf
+ */
+
+/*
+ * Note: bit 0 of the header must always be 0. Otherwise it cannot
+ * be guaranteed that the magic 8 byte sequence (see below) can
+ * never occur in the rlc output.
+ */
+#define PFRAME_BIT (1 << 15)
+#define DUPS_MASK 0x1ffe
+
+/*
+ * This is a sequence of 8 bytes with the low 4 bits set to 0xf.
+ *
+ * This sequence cannot occur in the encoded data
+ */
+#define VICODEC_MAGIC1 0x4f4f4f4f
+#define VICODEC_MAGIC2 0xffffffff
+
+#define VICODEC_VERSION 1
+
+#define VICODEC_MAX_WIDTH 3840
+#define VICODEC_MAX_HEIGHT 2160
+#define VICODEC_MIN_WIDTH 640
+#define VICODEC_MIN_HEIGHT 480
+
+#define PBLOCK 0
+#define IBLOCK 1
+
+/* Set if this is an interlaced format */
+#define VICODEC_FL_IS_INTERLACED BIT(0)
+/* Set if this is a bottom-first (NTSC) interlaced format */
+#define VICODEC_FL_IS_BOTTOM_FIRST BIT(1)
+/* Set if each 'frame' contains just one field */
+#define VICODEC_FL_IS_ALTERNATE BIT(2)
+/*
+ * If VICODEC_FL_IS_ALTERNATE was set, then this is set if this
+ * 'frame' is the bottom field, else it is the top field.
+ */
+#define VICODEC_FL_IS_BOTTOM_FIELD BIT(3)
+/* Set if this frame is uncompressed */
+#define VICODEC_FL_LUMA_IS_UNCOMPRESSED BIT(4)
+#define VICODEC_FL_CB_IS_UNCOMPRESSED BIT(5)
+#define VICODEC_FL_CR_IS_UNCOMPRESSED BIT(6)
+
+struct cframe_hdr {
+ u32 magic1;
+ u32 magic2;
+ __be32 version;
+ __be32 width, height;
+ __be32 flags;
+ __be32 colorspace;
+ __be32 xfer_func;
+ __be32 ycbcr_enc;
+ __be32 quantization;
+ __be32 size;
+};
+
+struct cframe {
+ unsigned int width, height;
+ __be16 *rlc_data;
+ s16 coeffs[8 * 8];
+ s16 de_coeffs[8 * 8];
+ s16 de_fwht[8 * 8];
+ u32 size;
+};
+
+struct raw_frame {
+ unsigned int width, height;
+ unsigned int chroma_step;
+ u8 *luma, *cb, *cr;
+};
+
+#define FRAME_PCODED BIT(0)
+#define FRAME_UNENCODED BIT(1)
+#define LUMA_UNENCODED BIT(2)
+#define CB_UNENCODED BIT(3)
+#define CR_UNENCODED BIT(4)
+
+u32 encode_frame(struct raw_frame *frm, struct raw_frame *ref_frm,
+ struct cframe *cf, bool is_intra, bool next_is_intra);
+void decode_frame(struct cframe *cf, struct raw_frame *ref, u32 hdr_flags);
+
+#endif
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
new file mode 100644
index 000000000000..408cd55d3580
--- /dev/null
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -0,0 +1,1506 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A virtual codec example device.
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This is a virtual codec device driver for testing the codec framework.
+ * It simulates a device that uses memory buffers for both source and
+ * destination and encodes or decodes the data.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vicodec-codec.h"
+
+MODULE_DESCRIPTION("Virtual codec device");
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_LICENSE("GPL v2");
+
+static bool multiplanar;
+module_param(multiplanar, bool, 0444);
+MODULE_PARM_DESC(multiplanar,
+ " use multi-planar API instead of single-planar API");
+
+static unsigned int debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, " activates debug info");
+
+#define VICODEC_NAME "vicodec"
+#define MAX_WIDTH 4096U
+#define MIN_WIDTH 640U
+#define MAX_HEIGHT 2160U
+#define MIN_HEIGHT 480U
+
+#define dprintk(dev, fmt, arg...) \
+ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+
+static void vicodec_dev_release(struct device *dev)
+{
+}
+
+static struct platform_device vicodec_pdev = {
+ .name = VICODEC_NAME,
+ .dev.release = vicodec_dev_release,
+};
+
+/* Per-queue, driver-specific private data */
+struct vicodec_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int flags;
+ unsigned int sizeimage;
+ unsigned int sequence;
+ u32 fourcc;
+};
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+struct vicodec_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device enc_vfd;
+ struct video_device dec_vfd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device mdev;
+#endif
+
+ struct mutex enc_mutex;
+ struct mutex dec_mutex;
+ spinlock_t enc_lock;
+ spinlock_t dec_lock;
+
+ struct v4l2_m2m_dev *enc_dev;
+ struct v4l2_m2m_dev *dec_dev;
+};
+
+struct vicodec_ctx {
+ struct v4l2_fh fh;
+ struct vicodec_dev *dev;
+ bool is_enc;
+ spinlock_t *lock;
+
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *ctrl_gop_size;
+ unsigned int gop_size;
+ unsigned int gop_cnt;
+
+ /* Abort requested by m2m */
+ int aborting;
+ struct vb2_v4l2_buffer *last_src_buf;
+ struct vb2_v4l2_buffer *last_dst_buf;
+
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quantization;
+
+ /* Source and destination queue data */
+ struct vicodec_q_data q_data[2];
+ struct raw_frame ref_frame;
+ u8 *compressed_frame;
+ u32 cur_buf_offset;
+ u32 comp_max_size;
+ u32 comp_size;
+ u32 comp_magic_cnt;
+ u32 comp_frame_size;
+ bool comp_has_frame;
+ bool comp_has_next_frame;
+};
+
+static const u32 pixfmts_yuv[] = {
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+};
+
+static inline struct vicodec_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct vicodec_ctx, fh);
+}
+
+static struct vicodec_q_data *get_q_data(struct vicodec_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return &ctx->q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return &ctx->q_data[V4L2_M2M_DST];
+ default:
+ WARN_ON(1);
+ break;
+ }
+ return NULL;
+}
+
+static void encode(struct vicodec_ctx *ctx,
+ struct vicodec_q_data *q_data,
+ u8 *p_in, u8 *p_out)
+{
+ unsigned int size = q_data->width * q_data->height;
+ struct cframe_hdr *p_hdr;
+ struct cframe cf;
+ struct raw_frame rf;
+ u32 encoding;
+
+ rf.width = q_data->width;
+ rf.height = q_data->height;
+ rf.luma = p_in;
+
+ switch (q_data->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ rf.cb = rf.luma + size;
+ rf.cr = rf.cb + size / 4;
+ rf.chroma_step = 1;
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ rf.cr = rf.luma + size;
+ rf.cb = rf.cr + size / 4;
+ rf.chroma_step = 1;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ rf.cb = rf.luma + size;
+ rf.cr = rf.cb + 1;
+ rf.chroma_step = 2;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ rf.cr = rf.luma + size;
+ rf.cb = rf.cr + 1;
+ rf.chroma_step = 2;
+ break;
+ }
+
+ cf.width = q_data->width;
+ cf.height = q_data->height;
+ cf.rlc_data = (__be16 *)(p_out + sizeof(*p_hdr));
+
+ encoding = encode_frame(&rf, &ctx->ref_frame, &cf, !ctx->gop_cnt,
+ ctx->gop_cnt == ctx->gop_size - 1);
+ if (encoding != FRAME_PCODED)
+ ctx->gop_cnt = 0;
+ if (++ctx->gop_cnt == ctx->gop_size)
+ ctx->gop_cnt = 0;
+
+ p_hdr = (struct cframe_hdr *)p_out;
+ p_hdr->magic1 = VICODEC_MAGIC1;
+ p_hdr->magic2 = VICODEC_MAGIC2;
+ p_hdr->version = htonl(VICODEC_VERSION);
+ p_hdr->width = htonl(cf.width);
+ p_hdr->height = htonl(cf.height);
+ p_hdr->flags = htonl(q_data->flags);
+ if (encoding & LUMA_UNENCODED)
+ p_hdr->flags |= htonl(VICODEC_FL_LUMA_IS_UNCOMPRESSED);
+ if (encoding & CB_UNENCODED)
+ p_hdr->flags |= htonl(VICODEC_FL_CB_IS_UNCOMPRESSED);
+ if (encoding & CR_UNENCODED)
+ p_hdr->flags |= htonl(VICODEC_FL_CR_IS_UNCOMPRESSED);
+ p_hdr->colorspace = htonl(ctx->colorspace);
+ p_hdr->xfer_func = htonl(ctx->xfer_func);
+ p_hdr->ycbcr_enc = htonl(ctx->ycbcr_enc);
+ p_hdr->quantization = htonl(ctx->quantization);
+ p_hdr->size = htonl(cf.size);
+ ctx->ref_frame.width = cf.width;
+ ctx->ref_frame.height = cf.height;
+}
+
+static int decode(struct vicodec_ctx *ctx,
+ struct vicodec_q_data *q_data,
+ u8 *p_in, u8 *p_out)
+{
+ unsigned int size = q_data->width * q_data->height;
+ unsigned int i;
+ struct cframe_hdr *p_hdr;
+ struct cframe cf;
+ u8 *p;
+
+ p_hdr = (struct cframe_hdr *)p_in;
+ cf.width = ntohl(p_hdr->width);
+ cf.height = ntohl(p_hdr->height);
+ q_data->flags = ntohl(p_hdr->flags);
+ ctx->colorspace = ntohl(p_hdr->colorspace);
+ ctx->xfer_func = ntohl(p_hdr->xfer_func);
+ ctx->ycbcr_enc = ntohl(p_hdr->ycbcr_enc);
+ ctx->quantization = ntohl(p_hdr->quantization);
+ cf.rlc_data = (__be16 *)(p_in + sizeof(*p_hdr));
+
+ if (p_hdr->magic1 != VICODEC_MAGIC1 ||
+ p_hdr->magic2 != VICODEC_MAGIC2 ||
+ ntohl(p_hdr->version) != VICODEC_VERSION ||
+ cf.width < VICODEC_MIN_WIDTH ||
+ cf.width > VICODEC_MAX_WIDTH ||
+ cf.height < VICODEC_MIN_HEIGHT ||
+ cf.height > VICODEC_MAX_HEIGHT ||
+ (cf.width & 7) || (cf.height & 7))
+ return -EINVAL;
+
+ /* TODO: support resolution changes */
+ if (cf.width != q_data->width || cf.height != q_data->height)
+ return -EINVAL;
+
+ decode_frame(&cf, &ctx->ref_frame, q_data->flags);
+ memcpy(p_out, ctx->ref_frame.luma, size);
+ p_out += size;
+
+ switch (q_data->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ memcpy(p_out, ctx->ref_frame.cb, size / 4);
+ p_out += size / 4;
+ memcpy(p_out, ctx->ref_frame.cr, size / 4);
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ memcpy(p_out, ctx->ref_frame.cr, size / 4);
+ p_out += size / 4;
+ memcpy(p_out, ctx->ref_frame.cb, size / 4);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ for (i = 0, p = p_out; i < size / 4; i++, p += 2)
+ *p = ctx->ref_frame.cb[i];
+ for (i = 0, p = p_out + 1; i < size / 4; i++, p += 2)
+ *p = ctx->ref_frame.cr[i];
+ break;
+ case V4L2_PIX_FMT_NV21:
+ for (i = 0, p = p_out; i < size / 4; i++, p += 2)
+ *p = ctx->ref_frame.cr[i];
+ for (i = 0, p = p_out + 1; i < size / 4; i++, p += 2)
+ *p = ctx->ref_frame.cb[i];
+ break;
+ }
+ return 0;
+}
+
+static int device_process(struct vicodec_ctx *ctx,
+ struct vb2_v4l2_buffer *in_vb,
+ struct vb2_v4l2_buffer *out_vb)
+{
+ struct vicodec_dev *dev = ctx->dev;
+ struct vicodec_q_data *q_out, *q_cap;
+ u8 *p_in, *p_out;
+ int ret;
+
+ q_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_cap = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (ctx->is_enc)
+ p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0);
+ else
+ p_in = ctx->compressed_frame;
+ p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0);
+ if (!p_in || !p_out) {
+ v4l2_err(&dev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return -EFAULT;
+ }
+
+ if (ctx->is_enc) {
+ struct cframe_hdr *p_hdr = (struct cframe_hdr *)p_out;
+
+ encode(ctx, q_out, p_in, p_out);
+ vb2_set_plane_payload(&out_vb->vb2_buf, 0,
+ sizeof(*p_hdr) + ntohl(p_hdr->size));
+ } else {
+ ret = decode(ctx, q_cap, p_in, p_out);
+ if (ret)
+ return ret;
+ vb2_set_plane_payload(&out_vb->vb2_buf, 0,
+ q_cap->width * q_cap->height * 3 / 2);
+ }
+
+ out_vb->sequence = q_cap->sequence++;
+ out_vb->vb2_buf.timestamp = in_vb->vb2_buf.timestamp;
+
+ if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+ out_vb->timecode = in_vb->timecode;
+ out_vb->field = in_vb->field;
+ out_vb->flags &= ~V4L2_BUF_FLAG_LAST;
+ out_vb->flags |= in_vb->flags &
+ (V4L2_BUF_FLAG_TIMECODE |
+ V4L2_BUF_FLAG_KEYFRAME |
+ V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_BFRAME |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
+
+ return 0;
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/* device_run() - prepares and starts the device */
+static void device_run(void *priv)
+{
+ static const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
+ struct vicodec_ctx *ctx = priv;
+ struct vicodec_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct vicodec_q_data *q_out;
+ u32 state;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ q_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ state = VB2_BUF_STATE_DONE;
+ if (device_process(ctx, src_buf, dst_buf))
+ state = VB2_BUF_STATE_ERROR;
+ ctx->last_dst_buf = dst_buf;
+
+ spin_lock(ctx->lock);
+ if (!ctx->comp_has_next_frame && src_buf == ctx->last_src_buf) {
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_event_queue_fh(&ctx->fh, &eos_event);
+ }
+ if (ctx->is_enc) {
+ src_buf->sequence = q_out->sequence++;
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, state);
+ } else if (vb2_get_plane_payload(&src_buf->vb2_buf, 0) == ctx->cur_buf_offset) {
+ src_buf->sequence = q_out->sequence++;
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, state);
+ ctx->cur_buf_offset = 0;
+ ctx->comp_has_next_frame = false;
+ }
+ v4l2_m2m_buf_done(dst_buf, state);
+ ctx->comp_size = 0;
+ ctx->comp_magic_cnt = 0;
+ ctx->comp_has_frame = false;
+ spin_unlock(ctx->lock);
+
+ if (ctx->is_enc)
+ v4l2_m2m_job_finish(dev->enc_dev, ctx->fh.m2m_ctx);
+ else
+ v4l2_m2m_job_finish(dev->dec_dev, ctx->fh.m2m_ctx);
+}
+
+static void job_remove_out_buf(struct vicodec_ctx *ctx, u32 state)
+{
+ struct vb2_v4l2_buffer *src_buf;
+ struct vicodec_q_data *q_out;
+
+ q_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ spin_lock(ctx->lock);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ src_buf->sequence = q_out->sequence++;
+ v4l2_m2m_buf_done(src_buf, state);
+ ctx->cur_buf_offset = 0;
+ spin_unlock(ctx->lock);
+}
+
+static int job_ready(void *priv)
+{
+ static const u8 magic[] = {
+ 0x4f, 0x4f, 0x4f, 0x4f, 0xff, 0xff, 0xff, 0xff
+ };
+ struct vicodec_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src_buf;
+ u8 *p_out;
+ u8 *p;
+ u32 sz;
+ u32 state;
+
+ if (ctx->is_enc || ctx->comp_has_frame)
+ return 1;
+
+restart:
+ ctx->comp_has_next_frame = false;
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ if (!src_buf)
+ return 0;
+ p_out = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ sz = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
+ p = p_out + ctx->cur_buf_offset;
+
+ state = VB2_BUF_STATE_DONE;
+
+ if (!ctx->comp_size) {
+ state = VB2_BUF_STATE_ERROR;
+ for (; p < p_out + sz; p++) {
+ u32 copy;
+
+ p = memchr(p, magic[ctx->comp_magic_cnt], sz);
+ if (!p) {
+ ctx->comp_magic_cnt = 0;
+ break;
+ }
+ copy = sizeof(magic) - ctx->comp_magic_cnt;
+ if (p_out + sz - p < copy)
+ copy = p_out + sz - p;
+ memcpy(ctx->compressed_frame + ctx->comp_magic_cnt,
+ p, copy);
+ ctx->comp_magic_cnt += copy;
+ if (!memcmp(ctx->compressed_frame, magic, ctx->comp_magic_cnt)) {
+ p += copy;
+ state = VB2_BUF_STATE_DONE;
+ break;
+ }
+ ctx->comp_magic_cnt = 0;
+ }
+ if (ctx->comp_magic_cnt < sizeof(magic)) {
+ job_remove_out_buf(ctx, state);
+ goto restart;
+ }
+ ctx->comp_size = sizeof(magic);
+ }
+ if (ctx->comp_size < sizeof(struct cframe_hdr)) {
+ struct cframe_hdr *p_hdr = (struct cframe_hdr *)ctx->compressed_frame;
+ u32 copy = sizeof(struct cframe_hdr) - ctx->comp_size;
+
+ if (copy > p_out + sz - p)
+ copy = p_out + sz - p;
+ memcpy(ctx->compressed_frame + ctx->comp_size,
+ p, copy);
+ p += copy;
+ ctx->comp_size += copy;
+ if (ctx->comp_size < sizeof(struct cframe_hdr)) {
+ job_remove_out_buf(ctx, state);
+ goto restart;
+ }
+ ctx->comp_frame_size = ntohl(p_hdr->size) + sizeof(*p_hdr);
+ if (ctx->comp_frame_size > ctx->comp_max_size)
+ ctx->comp_frame_size = ctx->comp_max_size;
+ }
+ if (ctx->comp_size < ctx->comp_frame_size) {
+ u32 copy = ctx->comp_frame_size - ctx->comp_size;
+
+ if (copy > p_out + sz - p)
+ copy = p_out + sz - p;
+ memcpy(ctx->compressed_frame + ctx->comp_size,
+ p, copy);
+ p += copy;
+ ctx->comp_size += copy;
+ if (ctx->comp_size < ctx->comp_frame_size) {
+ job_remove_out_buf(ctx, state);
+ goto restart;
+ }
+ }
+ ctx->cur_buf_offset = p - p_out;
+ ctx->comp_has_frame = true;
+ ctx->comp_has_next_frame = false;
+ if (sz - ctx->cur_buf_offset >= sizeof(struct cframe_hdr)) {
+ struct cframe_hdr *p_hdr = (struct cframe_hdr *)p;
+ u32 frame_size = ntohl(p_hdr->size);
+ u32 remaining = sz - ctx->cur_buf_offset - sizeof(*p_hdr);
+
+ if (!memcmp(p, magic, sizeof(magic)))
+ ctx->comp_has_next_frame = remaining >= frame_size;
+ }
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct vicodec_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+/*
+ * video ioctls
+ */
+
+static u32 find_fmt(u32 fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixfmts_yuv); i++)
+ if (pixfmts_yuv[i] == fmt)
+ return fmt;
+ return pixfmts_yuv[0];
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, VICODEC_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, VICODEC_NAME, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", VICODEC_NAME);
+ cap->device_caps = V4L2_CAP_STREAMING |
+ (multiplanar ?
+ V4L2_CAP_VIDEO_M2M_MPLANE :
+ V4L2_CAP_VIDEO_M2M);
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, bool is_enc, bool is_out)
+{
+ bool is_yuv = (is_enc && is_out) || (!is_enc && !is_out);
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(f->type) && !multiplanar)
+ return -EINVAL;
+ if (!V4L2_TYPE_IS_MULTIPLANAR(f->type) && multiplanar)
+ return -EINVAL;
+ if (f->index >= (is_yuv ? ARRAY_SIZE(pixfmts_yuv) : 1))
+ return -EINVAL;
+
+ if (is_yuv)
+ f->pixelformat = pixfmts_yuv[f->index];
+ else
+ f->pixelformat = V4L2_PIX_FMT_FWHT;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+
+ return enum_fmt(f, ctx->is_enc, false);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+
+ return enum_fmt(f, ctx->is_enc, true);
+}
+
+static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct vicodec_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (multiplanar)
+ return -EINVAL;
+ pix = &f->fmt.pix;
+ pix->width = q_data->width;
+ pix->height = q_data->height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = q_data->fourcc;
+ if (q_data->fourcc == V4L2_PIX_FMT_FWHT)
+ pix->bytesperline = 0;
+ else
+ pix->bytesperline = q_data->width;
+ pix->sizeimage = q_data->sizeimage;
+ pix->colorspace = ctx->colorspace;
+ pix->xfer_func = ctx->xfer_func;
+ pix->ycbcr_enc = ctx->ycbcr_enc;
+ pix->quantization = ctx->quantization;
+ break;
+
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (!multiplanar)
+ return -EINVAL;
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->width = q_data->width;
+ pix_mp->height = q_data->height;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = q_data->fourcc;
+ pix_mp->num_planes = 1;
+ if (q_data->fourcc == V4L2_PIX_FMT_FWHT)
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ else
+ pix_mp->plane_fmt[0].bytesperline = q_data->width;
+ pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage;
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quantization;
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ memset(pix_mp->plane_fmt[0].reserved, 0,
+ sizeof(pix_mp->plane_fmt[0].reserved));
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(file2ctx(file), f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(file2ctx(file), f);
+}
+
+static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &f->fmt.pix;
+ pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH) & ~7;
+ pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT) & ~7;
+ pix->bytesperline = pix->width;
+ pix->sizeimage = pix->width * pix->height * 3 / 2;
+ pix->field = V4L2_FIELD_NONE;
+ if (pix->pixelformat == V4L2_PIX_FMT_FWHT) {
+ pix->bytesperline = 0;
+ pix->sizeimage += sizeof(struct cframe_hdr);
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->width = clamp(pix_mp->width, MIN_WIDTH, MAX_WIDTH) & ~7;
+ pix_mp->height =
+ clamp(pix_mp->height, MIN_HEIGHT, MAX_HEIGHT) & ~7;
+ pix_mp->plane_fmt[0].bytesperline = pix_mp->width;
+ pix_mp->plane_fmt[0].sizeimage =
+ pix_mp->width * pix_mp->height * 3 / 2;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->num_planes = 1;
+ if (pix_mp->pixelformat == V4L2_PIX_FMT_FWHT) {
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ pix_mp->plane_fmt[0].sizeimage +=
+ sizeof(struct cframe_hdr);
+ }
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ memset(pix_mp->plane_fmt[0].reserved, 0,
+ sizeof(pix_mp->plane_fmt[0].reserved));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (multiplanar)
+ return -EINVAL;
+ pix = &f->fmt.pix;
+ pix->pixelformat = ctx->is_enc ? V4L2_PIX_FMT_FWHT :
+ find_fmt(f->fmt.pix.pixelformat);
+ pix->colorspace = ctx->colorspace;
+ pix->xfer_func = ctx->xfer_func;
+ pix->ycbcr_enc = ctx->ycbcr_enc;
+ pix->quantization = ctx->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (!multiplanar)
+ return -EINVAL;
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->pixelformat = ctx->is_enc ? V4L2_PIX_FMT_FWHT :
+ find_fmt(pix_mp->pixelformat);
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quantization;
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ memset(pix_mp->plane_fmt[0].reserved, 0,
+ sizeof(pix_mp->plane_fmt[0].reserved));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(ctx, f);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (multiplanar)
+ return -EINVAL;
+ pix = &f->fmt.pix;
+ pix->pixelformat = !ctx->is_enc ? V4L2_PIX_FMT_FWHT :
+ find_fmt(pix->pixelformat);
+ if (!pix->colorspace)
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (!multiplanar)
+ return -EINVAL;
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->pixelformat = !ctx->is_enc ? V4L2_PIX_FMT_FWHT :
+ find_fmt(pix_mp->pixelformat);
+ if (!pix_mp->colorspace)
+ pix_mp->colorspace = V4L2_COLORSPACE_REC709;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(ctx, f);
+}
+
+static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
+{
+ struct vicodec_q_data *q_data;
+ struct vb2_queue *vq;
+ bool fmt_changed = true;
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &f->fmt.pix;
+ if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type))
+ fmt_changed =
+ q_data->fourcc != pix->pixelformat ||
+ q_data->width != pix->width ||
+ q_data->height != pix->height;
+
+ if (vb2_is_busy(vq) && fmt_changed)
+ return -EBUSY;
+
+ q_data->fourcc = pix->pixelformat;
+ q_data->width = pix->width;
+ q_data->height = pix->height;
+ q_data->sizeimage = pix->sizeimage;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pix_mp = &f->fmt.pix_mp;
+ if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type))
+ fmt_changed =
+ q_data->fourcc != pix_mp->pixelformat ||
+ q_data->width != pix_mp->width ||
+ q_data->height != pix_mp->height;
+
+ if (vb2_is_busy(vq) && fmt_changed)
+ return -EBUSY;
+
+ q_data->fourcc = pix_mp->pixelformat;
+ q_data->width = pix_mp->width;
+ q_data->height = pix_mp->height;
+ q_data->sizeimage = pix_mp->plane_fmt[0].sizeimage;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dprintk(ctx->dev,
+ "Setting format for type %d, wxh: %dx%d, fourcc: %08x\n",
+ f->type, q_data->width, q_data->height, q_data->fourcc);
+
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(file2ctx(file), f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+ int ret;
+
+ ret = vidioc_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = vidioc_s_fmt(file2ctx(file), f);
+ if (!ret) {
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &f->fmt.pix;
+ ctx->colorspace = pix->colorspace;
+ ctx->xfer_func = pix->xfer_func;
+ ctx->ycbcr_enc = pix->ycbcr_enc;
+ ctx->quantization = pix->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pix_mp = &f->fmt.pix_mp;
+ ctx->colorspace = pix_mp->colorspace;
+ ctx->xfer_func = pix_mp->xfer_func;
+ ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->quantization = pix_mp->quantization;
+ break;
+ default:
+ break;
+ }
+ }
+ return ret;
+}
+
+static void vicodec_mark_last_buf(struct vicodec_ctx *ctx)
+{
+ static const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
+
+ spin_lock(ctx->lock);
+ ctx->last_src_buf = v4l2_m2m_last_src_buf(ctx->fh.m2m_ctx);
+ if (!ctx->last_src_buf && ctx->last_dst_buf) {
+ ctx->last_dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_event_queue_fh(&ctx->fh, &eos_event);
+ }
+ spin_unlock(ctx->lock);
+}
+
+static int vicodec_try_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec)
+{
+ if (ec->cmd != V4L2_ENC_CMD_STOP)
+ return -EINVAL;
+
+ if (ec->flags & V4L2_ENC_CMD_STOP_AT_GOP_END)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vicodec_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ int ret;
+
+ ret = vicodec_try_encoder_cmd(file, fh, ec);
+ if (ret < 0)
+ return ret;
+
+ vicodec_mark_last_buf(ctx);
+ return 0;
+}
+
+static int vicodec_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+
+ if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vicodec_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ int ret;
+
+ ret = vicodec_try_decoder_cmd(file, fh, dc);
+ if (ret < 0)
+ return ret;
+
+ vicodec_mark_last_buf(ctx);
+ return 0;
+}
+
+static int vicodec_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ switch (fsize->pixel_format) {
+ case V4L2_PIX_FMT_FWHT:
+ break;
+ default:
+ if (find_fmt(fsize->pixel_format) == fsize->pixel_format)
+ break;
+ return -EINVAL;
+ }
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+
+ fsize->stepwise.min_width = MIN_WIDTH;
+ fsize->stepwise.max_width = MAX_WIDTH;
+ fsize->stepwise.step_width = 8;
+ fsize->stepwise.min_height = MIN_HEIGHT;
+ fsize->stepwise.max_height = MAX_HEIGHT;
+ fsize->stepwise.step_height = 8;
+
+ return 0;
+}
+
+static int vicodec_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
+}
+
+static const struct v4l2_ioctl_ops vicodec_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_try_encoder_cmd = vicodec_try_encoder_cmd,
+ .vidioc_encoder_cmd = vicodec_encoder_cmd,
+ .vidioc_try_decoder_cmd = vicodec_try_decoder_cmd,
+ .vidioc_decoder_cmd = vicodec_decoder_cmd,
+ .vidioc_enum_framesizes = vicodec_enum_framesizes,
+
+ .vidioc_subscribe_event = vicodec_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+
+/*
+ * Queue operations
+ */
+
+static int vicodec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vicodec_q_data *q_data = get_q_data(ctx, vq->type);
+ unsigned int size = q_data->sizeimage;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+ return 0;
+}
+
+static int vicodec_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vicodec_q_data *q_data;
+
+ dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dprintk(ctx->dev, "%s field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ dprintk(ctx->dev,
+ "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)q_data->sizeimage);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vicodec_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void vicodec_return_bufs(struct vb2_queue *q, u32 state)
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vbuf == NULL)
+ return;
+ spin_lock(ctx->lock);
+ v4l2_m2m_buf_done(vbuf, state);
+ spin_unlock(ctx->lock);
+ }
+}
+
+static int vicodec_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct vicodec_q_data *q_data = get_q_data(ctx, q->type);
+ unsigned int size = q_data->width * q_data->height;
+
+ q_data->sequence = 0;
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type))
+ return 0;
+
+ ctx->ref_frame.width = ctx->ref_frame.height = 0;
+ ctx->ref_frame.luma = kvmalloc(size * 3 / 2, GFP_KERNEL);
+ ctx->comp_max_size = size * 3 / 2 + sizeof(struct cframe_hdr);
+ ctx->compressed_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL);
+ if (!ctx->ref_frame.luma || !ctx->compressed_frame) {
+ kvfree(ctx->ref_frame.luma);
+ kvfree(ctx->compressed_frame);
+ vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED);
+ return -ENOMEM;
+ }
+ ctx->ref_frame.cb = ctx->ref_frame.luma + size;
+ ctx->ref_frame.cr = ctx->ref_frame.cb + size / 4;
+ ctx->last_src_buf = NULL;
+ ctx->last_dst_buf = NULL;
+ v4l2_ctrl_grab(ctx->ctrl_gop_size, true);
+ ctx->gop_size = v4l2_ctrl_g_ctrl(ctx->ctrl_gop_size);
+ ctx->gop_cnt = 0;
+ ctx->cur_buf_offset = 0;
+ ctx->comp_size = 0;
+ ctx->comp_magic_cnt = 0;
+ ctx->comp_has_frame = false;
+
+ return 0;
+}
+
+static void vicodec_stop_streaming(struct vb2_queue *q)
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
+
+ vicodec_return_bufs(q, VB2_BUF_STATE_ERROR);
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type))
+ return;
+
+ kvfree(ctx->ref_frame.luma);
+ kvfree(ctx->compressed_frame);
+ v4l2_ctrl_grab(ctx->ctrl_gop_size, false);
+}
+
+static const struct vb2_ops vicodec_qops = {
+ .queue_setup = vicodec_queue_setup,
+ .buf_prepare = vicodec_buf_prepare,
+ .buf_queue = vicodec_buf_queue,
+ .start_streaming = vicodec_start_streaming,
+ .stop_streaming = vicodec_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct vicodec_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = (multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &vicodec_qops;
+ src_vq->mem_ops = &vb2_vmalloc_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = ctx->is_enc ? &ctx->dev->enc_mutex :
+ &ctx->dev->dec_mutex;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = (multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &vicodec_qops;
+ dst_vq->mem_ops = &vb2_vmalloc_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = src_vq->lock;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int vicodec_open(struct file *file)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct vicodec_dev *dev = video_drvdata(file);
+ struct vicodec_ctx *ctx = NULL;
+ struct v4l2_ctrl_handler *hdl;
+ unsigned int size;
+ int rc = 0;
+
+ if (mutex_lock_interruptible(vfd->lock))
+ return -ERESTARTSYS;
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto open_unlock;
+ }
+
+ if (vfd == &dev->enc_vfd)
+ ctx->is_enc = true;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dev = dev;
+ hdl = &ctx->hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ ctx->ctrl_gop_size = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 1, 16, 1, 10);
+ if (hdl->error) {
+ rc = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ kfree(ctx);
+ goto open_unlock;
+ }
+ ctx->fh.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+
+ ctx->q_data[V4L2_M2M_SRC].fourcc =
+ ctx->is_enc ? V4L2_PIX_FMT_YUV420 : V4L2_PIX_FMT_FWHT;
+ ctx->q_data[V4L2_M2M_SRC].width = 1280;
+ ctx->q_data[V4L2_M2M_SRC].height = 720;
+ size = 1280 * 720 * 3 / 2;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = size;
+ ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
+ ctx->q_data[V4L2_M2M_DST].fourcc =
+ ctx->is_enc ? V4L2_PIX_FMT_FWHT : V4L2_PIX_FMT_YUV420;
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+
+ size += sizeof(struct cframe_hdr);
+ if (ctx->is_enc) {
+ ctx->q_data[V4L2_M2M_DST].sizeimage = size;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->enc_dev, ctx,
+ &queue_init);
+ ctx->lock = &dev->enc_lock;
+ } else {
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = size;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->dec_dev, ctx,
+ &queue_init);
+ ctx->lock = &dev->dec_lock;
+ }
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ rc = PTR_ERR(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ goto open_unlock;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+open_unlock:
+ mutex_unlock(vfd->lock);
+ return rc;
+}
+
+static int vicodec_release(struct file *file)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct vicodec_ctx *ctx = file2ctx(file);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ mutex_lock(vfd->lock);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(vfd->lock);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vicodec_fops = {
+ .owner = THIS_MODULE,
+ .open = vicodec_open,
+ .release = vicodec_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device vicodec_videodev = {
+ .name = VICODEC_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &vicodec_fops,
+ .ioctl_ops = &vicodec_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_abort = job_abort,
+ .job_ready = job_ready,
+};
+
+static int vicodec_probe(struct platform_device *pdev)
+{
+ struct vicodec_dev *dev;
+ struct video_device *vfd;
+ int ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->enc_lock);
+ spin_lock_init(&dev->dec_lock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &pdev->dev;
+ strlcpy(dev->mdev.model, "vicodec", sizeof(dev->mdev.model));
+ media_device_init(&dev->mdev);
+ dev->v4l2_dev.mdev = &dev->mdev;
+#endif
+
+ mutex_init(&dev->enc_mutex);
+ mutex_init(&dev->dec_mutex);
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->enc_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->enc_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init vicodec device\n");
+ ret = PTR_ERR(dev->enc_dev);
+ goto unreg_dev;
+ }
+
+ dev->dec_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->dec_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init vicodec device\n");
+ ret = PTR_ERR(dev->dec_dev);
+ goto err_enc_m2m;
+ }
+
+ dev->enc_vfd = vicodec_videodev;
+ vfd = &dev->enc_vfd;
+ vfd->lock = &dev->enc_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ strlcpy(vfd->name, "vicodec-enc", sizeof(vfd->name));
+ v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto err_dec_m2m;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ dev->dec_vfd = vicodec_videodev;
+ vfd = &dev->dec_vfd;
+ vfd->lock = &dev->dec_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ strlcpy(vfd->name, "vicodec-dec", sizeof(vfd->name));
+ v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto unreg_enc;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ ret = v4l2_m2m_register_media_controller(dev->enc_dev,
+ &dev->enc_vfd, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
+ goto unreg_m2m;
+ }
+
+ ret = v4l2_m2m_register_media_controller(dev->dec_dev,
+ &dev->dec_vfd, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
+ goto unreg_m2m_enc_mc;
+ }
+
+ ret = media_device_register(&dev->mdev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n");
+ goto unreg_m2m_dec_mc;
+ }
+#endif
+ return 0;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+unreg_m2m_dec_mc:
+ v4l2_m2m_unregister_media_controller(dev->dec_dev);
+unreg_m2m_enc_mc:
+ v4l2_m2m_unregister_media_controller(dev->enc_dev);
+unreg_m2m:
+ video_unregister_device(&dev->dec_vfd);
+#endif
+unreg_enc:
+ video_unregister_device(&dev->enc_vfd);
+err_dec_m2m:
+ v4l2_m2m_release(dev->dec_dev);
+err_enc_m2m:
+ v4l2_m2m_release(dev->enc_dev);
+unreg_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int vicodec_remove(struct platform_device *pdev)
+{
+ struct vicodec_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " VICODEC_NAME);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_unregister(&dev->mdev);
+ v4l2_m2m_unregister_media_controller(dev->enc_dev);
+ v4l2_m2m_unregister_media_controller(dev->dec_dev);
+ media_device_cleanup(&dev->mdev);
+#endif
+
+ v4l2_m2m_release(dev->enc_dev);
+ v4l2_m2m_release(dev->dec_dev);
+ video_unregister_device(&dev->enc_vfd);
+ video_unregister_device(&dev->dec_vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return 0;
+}
+
+static struct platform_driver vicodec_pdrv = {
+ .probe = vicodec_probe,
+ .remove = vicodec_remove,
+ .driver = {
+ .name = VICODEC_NAME,
+ },
+};
+
+static void __exit vicodec_exit(void)
+{
+ platform_driver_unregister(&vicodec_pdrv);
+ platform_device_unregister(&vicodec_pdev);
+}
+
+static int __init vicodec_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&vicodec_pdev);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&vicodec_pdrv);
+ if (ret)
+ platform_device_unregister(&vicodec_pdev);
+
+ return ret;
+}
+
+module_init(vicodec_init);
+module_exit(vicodec_exit);
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index 1fb887293337..c01e1592ad0a 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -34,6 +34,13 @@ struct video_mux {
int active;
};
+static const struct v4l2_mbus_framefmt video_mux_format_mbus_default = {
+ .width = 1,
+ .height = 1,
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .field = V4L2_FIELD_NONE,
+};
+
static inline struct video_mux *v4l2_subdev_to_video_mux(struct v4l2_subdev *sd)
{
return container_of(sd, struct video_mux, subdev);
@@ -180,6 +187,88 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
if (!source_mbusformat)
return -EINVAL;
+ /* No size limitations except V4L2 compliance requirements */
+ v4l_bound_align_image(&sdformat->format.width, 1, 65536, 0,
+ &sdformat->format.height, 1, 65536, 0, 0);
+
+ /* All formats except LVDS and vendor specific formats are acceptable */
+ switch (sdformat->format.code) {
+ case MEDIA_BUS_FMT_RGB444_1X12:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ case MEDIA_BUS_FMT_BGR565_2X8_BE:
+ case MEDIA_BUS_FMT_BGR565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_GBR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_ARGB8888_1X32:
+ case MEDIA_BUS_FMT_RGB888_1X32_PADHI:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_RGB121212_1X36:
+ case MEDIA_BUS_FMT_RGB161616_1X48:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_UV8_1X8:
+ case MEDIA_BUS_FMT_UYVY8_1_5X8:
+ case MEDIA_BUS_FMT_VYUY8_1_5X8:
+ case MEDIA_BUS_FMT_YUYV8_1_5X8:
+ case MEDIA_BUS_FMT_YVYU8_1_5X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ case MEDIA_BUS_FMT_VYUY10_2X10:
+ case MEDIA_BUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_YVYU10_2X10:
+ case MEDIA_BUS_FMT_Y12_1X12:
+ case MEDIA_BUS_FMT_UYVY12_2X12:
+ case MEDIA_BUS_FMT_VYUY12_2X12:
+ case MEDIA_BUS_FMT_YUYV12_2X12:
+ case MEDIA_BUS_FMT_YVYU12_2X12:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
+ case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VYUY10_1X20:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YVYU10_1X20:
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ case MEDIA_BUS_FMT_UYVY12_1X24:
+ case MEDIA_BUS_FMT_VYUY12_1X24:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ case MEDIA_BUS_FMT_YVYU12_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ case MEDIA_BUS_FMT_AYUV8_1X32:
+ case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+ case MEDIA_BUS_FMT_YUV12_1X36:
+ case MEDIA_BUS_FMT_YUV16_1X48:
+ case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+ case MEDIA_BUS_FMT_JPEG_1X8:
+ case MEDIA_BUS_FMT_AHSV8888_1X32:
+ break;
+ default:
+ sdformat->format.code = MEDIA_BUS_FMT_Y8_1X8;
+ break;
+ }
+ if (sdformat->format.field == V4L2_FIELD_ANY)
+ sdformat->format.field = V4L2_FIELD_NONE;
+
mutex_lock(&vmux->lock);
/* Source pad mirrors active sink pad, no limitations on sink pads */
@@ -197,7 +286,27 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
return 0;
}
+static int video_mux_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+ struct v4l2_mbus_framefmt *mbusformat;
+ unsigned int i;
+
+ mutex_lock(&vmux->lock);
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ mbusformat = v4l2_subdev_get_try_format(sd, cfg, i);
+ *mbusformat = video_mux_format_mbus_default;
+ }
+
+ mutex_unlock(&vmux->lock);
+
+ return 0;
+}
+
static const struct v4l2_subdev_pad_ops video_mux_pad_ops = {
+ .init_cfg = video_mux_init_cfg,
.get_fmt = video_mux_get_format,
.set_fmt = video_mux_set_format,
};
@@ -214,8 +323,8 @@ static int video_mux_probe(struct platform_device *pdev)
struct device_node *ep;
struct video_mux *vmux;
unsigned int num_pads = 0;
+ unsigned int i;
int ret;
- int i;
vmux = devm_kzalloc(dev, sizeof(*vmux), GFP_KERNEL);
if (!vmux)
@@ -260,9 +369,11 @@ static int video_mux_probe(struct platform_device *pdev)
sizeof(*vmux->format_mbus),
GFP_KERNEL);
- for (i = 0; i < num_pads - 1; i++)
- vmux->pads[i].flags = MEDIA_PAD_FL_SINK;
- vmux->pads[num_pads - 1].flags = MEDIA_PAD_FL_SOURCE;
+ for (i = 0; i < num_pads; i++) {
+ vmux->pads[i].flags = (i < num_pads - 1) ? MEDIA_PAD_FL_SINK
+ : MEDIA_PAD_FL_SOURCE;
+ vmux->format_mbus[i] = video_mux_format_mbus_default;
+ }
vmux->subdev.entity.function = MEDIA_ENT_F_VID_MUX;
ret = media_entity_pads_init(&vmux->subdev.entity, num_pads,
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 065483e62db4..462099a141e4 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -140,6 +140,9 @@ static struct vim2m_fmt *find_format(struct v4l2_format *f)
struct vim2m_dev {
struct v4l2_device v4l2_dev;
struct video_device vfd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device mdev;
+#endif
atomic_t num_inst;
struct mutex dev_mutex;
@@ -1016,11 +1019,10 @@ static int vim2m_probe(struct platform_device *pdev)
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
- goto unreg_dev;
+ goto unreg_v4l2;
}
video_set_drvdata(vfd, dev);
- snprintf(vfd->name, sizeof(vfd->name), "%s", vim2m_videodev.name);
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
@@ -1031,15 +1033,39 @@ static int vim2m_probe(struct platform_device *pdev)
if (IS_ERR(dev->m2m_dev)) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(dev->m2m_dev);
- goto err_m2m;
+ goto unreg_dev;
+ }
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &pdev->dev;
+ strlcpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model));
+ media_device_init(&dev->mdev);
+ dev->v4l2_dev.mdev = &dev->mdev;
+
+ ret = v4l2_m2m_register_media_controller(dev->m2m_dev,
+ vfd, MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
+ goto unreg_m2m;
}
+ ret = media_device_register(&dev->mdev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n");
+ goto unreg_m2m_mc;
+ }
+#endif
return 0;
-err_m2m:
+#ifdef CONFIG_MEDIA_CONTROLLER
+unreg_m2m_mc:
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+unreg_m2m:
v4l2_m2m_release(dev->m2m_dev);
- video_unregister_device(&dev->vfd);
+#endif
unreg_dev:
+ video_unregister_device(&dev->vfd);
+unreg_v4l2:
v4l2_device_unregister(&dev->v4l2_dev);
return ret;
@@ -1050,6 +1076,12 @@ static int vim2m_remove(struct platform_device *pdev)
struct vim2m_dev *dev = platform_get_drvdata(pdev);
v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_NAME);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_unregister(&dev->mdev);
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+ media_device_cleanup(&dev->mdev);
+#endif
v4l2_m2m_release(dev->m2m_dev);
del_timer_sync(&dev->timer);
video_unregister_device(&dev->vfd);
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 88a1e5670c72..ec68feaac378 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -17,6 +17,7 @@
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index fe088a953860..9246f265de31 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -328,7 +328,6 @@ static int vimc_probe(struct platform_device *pdev)
if (ret) {
media_device_cleanup(&vimc->mdev);
vimc_rm_subdevs(vimc);
- kfree(vimc);
return ret;
}
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index 6e10b63ba9ec..77887f66f323 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -17,6 +17,7 @@
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index e583ec7a91da..b0952ee86296 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -17,6 +17,7 @@
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 605e2a2d5dd5..b2b89315e7ba 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -19,6 +19,7 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/v4l2-mediabus.h>
#include <linux/vmalloc.h>
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index 6b0bfa091592..5429193fbb91 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -295,7 +295,7 @@ static int vivid_user_vid_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_AUTOGAIN:
- dev->gain->val = dev->jiffies_vid_cap & 0xff;
+ dev->gain->val = (jiffies_to_msecs(jiffies) / 1000) & 0xff;
break;
}
return 0;
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 3fdb280c36ca..f06003bb8e42 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -477,7 +477,7 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
/* Updates stream time, only update at the start of a new frame. */
if (dev->field_cap != V4L2_FIELD_ALTERNATE ||
- (buf->vb.sequence & 1) == 0)
+ (dev->vid_cap_seq_count & 1) == 0)
dev->ms_vid_cap =
jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index 33f632331474..56c62122a81a 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -53,6 +53,7 @@ struct vsp1_uif;
#define VSP1_HAS_HGO (1 << 7)
#define VSP1_HAS_HGT (1 << 8)
#define VSP1_HAS_BRS (1 << 9)
+#define VSP1_HAS_EXT_DL (1 << 10)
struct vsp1_device_info {
u32 version;
@@ -68,6 +69,8 @@ struct vsp1_device_info {
bool uapi;
};
+#define vsp1_feature(vsp1, f) ((vsp1)->info->features & (f))
+
struct vsp1_device {
struct device *dev;
const struct vsp1_device_info *info;
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index d9b9cdd8fbe2..26289adaf658 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -22,29 +22,79 @@
#define VSP1_DLH_INT_ENABLE (1 << 1)
#define VSP1_DLH_AUTO_START (1 << 0)
+#define VSP1_DLH_EXT_PRE_CMD_EXEC (1 << 9)
+#define VSP1_DLH_EXT_POST_CMD_EXEC (1 << 8)
+
struct vsp1_dl_header_list {
u32 num_bytes;
u32 addr;
-} __attribute__((__packed__));
+} __packed;
struct vsp1_dl_header {
u32 num_lists;
struct vsp1_dl_header_list lists[8];
u32 next_header;
u32 flags;
-} __attribute__((__packed__));
+} __packed;
+
+/**
+ * struct vsp1_dl_ext_header - Extended display list header
+ * @padding: padding zero bytes for alignment
+ * @pre_ext_dl_num_cmd: number of pre-extended command bodies to parse
+ * @flags: enables or disables execution of the pre and post command
+ * @pre_ext_dl_plist: start address of pre-extended display list bodies
+ * @post_ext_dl_num_cmd: number of post-extended command bodies to parse
+ * @post_ext_dl_plist: start address of post-extended display list bodies
+ */
+struct vsp1_dl_ext_header {
+ u32 padding;
+
+ /*
+ * The datasheet represents flags as stored before pre_ext_dl_num_cmd,
+ * expecting 32-bit accesses. The flags are appropriate to the whole
+ * header, not just the pre_ext command, and thus warrant being
+ * separated out. Due to byte ordering, and representing as 16 bit
+ * values here, the flags must be positioned after the
+ * pre_ext_dl_num_cmd.
+ */
+ u16 pre_ext_dl_num_cmd;
+ u16 flags;
+ u32 pre_ext_dl_plist;
+
+ u32 post_ext_dl_num_cmd;
+ u32 post_ext_dl_plist;
+} __packed;
+
+struct vsp1_dl_header_extended {
+ struct vsp1_dl_header header;
+ struct vsp1_dl_ext_header ext;
+} __packed;
struct vsp1_dl_entry {
u32 addr;
u32 data;
-} __attribute__((__packed__));
+} __packed;
+
+/**
+ * struct vsp1_pre_ext_dl_body - Pre Extended Display List Body
+ * @opcode: Extended display list command operation code
+ * @flags: Pre-extended command flags. These are specific to each command
+ * @address_set: Source address set pointer. Must have 16-byte alignment
+ * @reserved: Zero bits for alignment.
+ */
+struct vsp1_pre_ext_dl_body {
+ u32 opcode;
+ u32 flags;
+ u32 address_set;
+ u32 reserved;
+} __packed;
/**
* struct vsp1_dl_body - Display list body
* @list: entry in the display list list of bodies
* @free: entry in the pool free body list
+ * @refcnt: reference tracking for the body
* @pool: pool to which this body belongs
- * @vsp1: the VSP1 device
* @entries: array of entries
* @dma: DMA address of the entries
* @size: size of the DMA memory in bytes
@@ -58,7 +108,6 @@ struct vsp1_dl_body {
refcount_t refcnt;
struct vsp1_dl_body_pool *pool;
- struct vsp1_device *vsp1;
struct vsp1_dl_entry *entries;
dma_addr_t dma;
@@ -93,13 +142,40 @@ struct vsp1_dl_body_pool {
};
/**
+ * struct vsp1_cmd_pool - Display List commands pool
+ * @dma: DMA address of the entries
+ * @size: size of the full DMA memory pool in bytes
+ * @mem: CPU memory pointer for the pool
+ * @cmds: Array of command structures for the pool
+ * @free: Free pool entries
+ * @lock: Protects the free list
+ * @vsp1: the VSP1 device
+ */
+struct vsp1_dl_cmd_pool {
+ /* DMA allocation */
+ dma_addr_t dma;
+ size_t size;
+ void *mem;
+
+ struct vsp1_dl_ext_cmd *cmds;
+ struct list_head free;
+
+ spinlock_t lock;
+
+ struct vsp1_device *vsp1;
+};
+
+/**
* struct vsp1_dl_list - Display list
* @list: entry in the display list manager lists
* @dlm: the display list manager
- * @header: display list header, NULL for headerless lists
+ * @header: display list header
+ * @extension: extended display list header. NULL for normal lists
* @dma: DMA address for the header
* @body0: first display list body
* @bodies: list of extra display list bodies
+ * @pre_cmd: pre command to be issued through extended dl header
+ * @post_cmd: post command to be issued through extended dl header
* @has_chain: if true, indicates that there's a partition chain
* @chain: entry in the display list partition chain
* @internal: whether the display list is used for internal purpose
@@ -109,26 +185,24 @@ struct vsp1_dl_list {
struct vsp1_dl_manager *dlm;
struct vsp1_dl_header *header;
+ struct vsp1_dl_ext_header *extension;
dma_addr_t dma;
struct vsp1_dl_body *body0;
struct list_head bodies;
+ struct vsp1_dl_ext_cmd *pre_cmd;
+ struct vsp1_dl_ext_cmd *post_cmd;
+
bool has_chain;
struct list_head chain;
bool internal;
};
-enum vsp1_dl_mode {
- VSP1_DL_MODE_HEADER,
- VSP1_DL_MODE_HEADERLESS,
-};
-
/**
* struct vsp1_dl_manager - Display List manager
* @index: index of the related WPF
- * @mode: display list operation mode (header or headerless)
* @singleshot: execute the display list in single-shot mode
* @vsp1: the VSP1 device
* @lock: protects the free, active, queued, and pending lists
@@ -137,10 +211,10 @@ enum vsp1_dl_mode {
* @queued: list queued to the hardware (written to the DL registers)
* @pending: list waiting to be queued to the hardware
* @pool: body pool for the display list bodies
+ * @cmdpool: commands pool for extended display list
*/
struct vsp1_dl_manager {
unsigned int index;
- enum vsp1_dl_mode mode;
bool singleshot;
struct vsp1_device *vsp1;
@@ -151,6 +225,7 @@ struct vsp1_dl_manager {
struct vsp1_dl_list *pending;
struct vsp1_dl_body_pool *pool;
+ struct vsp1_dl_cmd_pool *cmdpool;
};
/* -----------------------------------------------------------------------------
@@ -314,12 +389,164 @@ void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
}
/* -----------------------------------------------------------------------------
+ * Display List Extended Command Management
+ */
+
+enum vsp1_extcmd_type {
+ VSP1_EXTCMD_AUTODISP,
+ VSP1_EXTCMD_AUTOFLD,
+};
+
+struct vsp1_extended_command_info {
+ u16 opcode;
+ size_t body_size;
+};
+
+static const struct vsp1_extended_command_info vsp1_extended_commands[] = {
+ [VSP1_EXTCMD_AUTODISP] = { 0x02, 96 },
+ [VSP1_EXTCMD_AUTOFLD] = { 0x03, 160 },
+};
+
+/**
+ * vsp1_dl_cmd_pool_create - Create a pool of commands from a single allocation
+ * @vsp1: The VSP1 device
+ * @type: The command pool type
+ * @num_cmds: The number of commands to allocate
+ *
+ * Allocate a pool of commands each with enough memory to contain the private
+ * data of each command. The allocation sizes are dependent upon the command
+ * type.
+ *
+ * Return a pointer to the pool on success or NULL if memory can't be allocated.
+ */
+static struct vsp1_dl_cmd_pool *
+vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
+ unsigned int num_cmds)
+{
+ struct vsp1_dl_cmd_pool *pool;
+ unsigned int i;
+ size_t cmd_size;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free);
+
+ pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL);
+ if (!pool->cmds) {
+ kfree(pool);
+ return NULL;
+ }
+
+ cmd_size = sizeof(struct vsp1_pre_ext_dl_body) +
+ vsp1_extended_commands[type].body_size;
+ cmd_size = ALIGN(cmd_size, 16);
+
+ pool->size = cmd_size * num_cmds;
+ pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
+ GFP_KERNEL);
+ if (!pool->mem) {
+ kfree(pool->cmds);
+ kfree(pool);
+ return NULL;
+ }
+
+ for (i = 0; i < num_cmds; ++i) {
+ struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i];
+ size_t cmd_offset = i * cmd_size;
+ /* data_offset must be 16 byte aligned for DMA. */
+ size_t data_offset = sizeof(struct vsp1_pre_ext_dl_body) +
+ cmd_offset;
+
+ cmd->pool = pool;
+ cmd->opcode = vsp1_extended_commands[type].opcode;
+
+ /*
+ * TODO: Auto-disp can utilise more than one extended body
+ * command per cmd.
+ */
+ cmd->num_cmds = 1;
+ cmd->cmds = pool->mem + cmd_offset;
+ cmd->cmd_dma = pool->dma + cmd_offset;
+
+ cmd->data = pool->mem + data_offset;
+ cmd->data_dma = pool->dma + data_offset;
+
+ list_add_tail(&cmd->free, &pool->free);
+ }
+
+ return pool;
+}
+
+static
+struct vsp1_dl_ext_cmd *vsp1_dl_ext_cmd_get(struct vsp1_dl_cmd_pool *pool)
+{
+ struct vsp1_dl_ext_cmd *cmd = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ if (!list_empty(&pool->free)) {
+ cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd,
+ free);
+ list_del(&cmd->free);
+ }
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return cmd;
+}
+
+static void vsp1_dl_ext_cmd_put(struct vsp1_dl_ext_cmd *cmd)
+{
+ unsigned long flags;
+
+ if (!cmd)
+ return;
+
+ /* Reset flags, these mark data usage. */
+ cmd->flags = 0;
+
+ spin_lock_irqsave(&cmd->pool->lock, flags);
+ list_add_tail(&cmd->free, &cmd->pool->free);
+ spin_unlock_irqrestore(&cmd->pool->lock, flags);
+}
+
+static void vsp1_dl_ext_cmd_pool_destroy(struct vsp1_dl_cmd_pool *pool)
+{
+ if (!pool)
+ return;
+
+ if (pool->mem)
+ dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
+ pool->dma);
+
+ kfree(pool->cmds);
+ kfree(pool);
+}
+
+struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+
+ if (dl->pre_cmd)
+ return dl->pre_cmd;
+
+ dl->pre_cmd = vsp1_dl_ext_cmd_get(dlm->cmdpool);
+
+ return dl->pre_cmd;
+}
+
+/* ----------------------------------------------------------------------------
* Display List Transaction Management
*/
static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
{
struct vsp1_dl_list *dl;
+ size_t header_offset;
dl = kzalloc(sizeof(*dl), GFP_KERNEL);
if (!dl)
@@ -332,16 +559,14 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
dl->body0 = vsp1_dl_body_get(dlm->pool);
if (!dl->body0)
return NULL;
- if (dlm->mode == VSP1_DL_MODE_HEADER) {
- size_t header_offset = dl->body0->max_entries
- * sizeof(*dl->body0->entries);
- dl->header = ((void *)dl->body0->entries) + header_offset;
- dl->dma = dl->body0->dma + header_offset;
+ header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
- memset(dl->header, 0, sizeof(*dl->header));
- dl->header->lists[0].addr = dl->body0->dma;
- }
+ dl->header = ((void *)dl->body0->entries) + header_offset;
+ dl->dma = dl->body0->dma + header_offset;
+
+ memset(dl->header, 0, sizeof(*dl->header));
+ dl->header->lists[0].addr = dl->body0->dma;
return dl;
}
@@ -398,7 +623,7 @@ struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
/* This function must be called with the display list manager lock held.*/
static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
{
- struct vsp1_dl_list *dl_child;
+ struct vsp1_dl_list *dl_next;
if (!dl)
return;
@@ -408,14 +633,20 @@ static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
* hardware operation.
*/
if (dl->has_chain) {
- list_for_each_entry(dl_child, &dl->chain, chain)
- __vsp1_dl_list_put(dl_child);
+ list_for_each_entry(dl_next, &dl->chain, chain)
+ __vsp1_dl_list_put(dl_next);
}
dl->has_chain = false;
vsp1_dl_list_bodies_put(dl);
+ vsp1_dl_ext_cmd_put(dl->pre_cmd);
+ vsp1_dl_ext_cmd_put(dl->post_cmd);
+
+ dl->pre_cmd = NULL;
+ dl->post_cmd = NULL;
+
/*
* body0 is reused as as an optimisation as presently every display list
* has at least one body, thus we reinitialise the entries list.
@@ -473,16 +704,9 @@ struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl)
*
* The reference must be explicitly released by a call to vsp1_dl_body_put()
* when the body isn't needed anymore.
- *
- * Additional bodies are only usable for display lists in header mode.
- * Attempting to add a body to a header-less display list will return an error.
*/
int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
{
- /* Multi-body lists are only available in header mode. */
- if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
- return -EINVAL;
-
refcount_inc(&dlb->refcnt);
list_add_tail(&dlb->list, &dl->bodies);
@@ -503,22 +727,23 @@ int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
* Adding a display list to a chain passes ownership of the display list to
* the head display list item. The chain is released when the head dl item is
* put back with __vsp1_dl_list_put().
- *
- * Chained display lists are only usable in header mode. Attempts to add a
- * display list to a chain in header-less mode will return an error.
*/
int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
struct vsp1_dl_list *dl)
{
- /* Chained lists are only available in header mode. */
- if (head->dlm->mode != VSP1_DL_MODE_HEADER)
- return -EINVAL;
-
head->has_chain = true;
list_add_tail(&dl->chain, &head->chain);
return 0;
}
+static void vsp1_dl_ext_cmd_fill_header(struct vsp1_dl_ext_cmd *cmd)
+{
+ cmd->cmds[0].opcode = cmd->opcode;
+ cmd->cmds[0].flags = cmd->flags;
+ cmd->cmds[0].address_set = cmd->data_dma;
+ cmd->cmds[0].reserved = 0;
+}
+
static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
{
struct vsp1_dl_manager *dlm = dl->dlm;
@@ -571,6 +796,27 @@ static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
*/
dl->header->flags = VSP1_DLH_INT_ENABLE;
}
+
+ if (!dl->extension)
+ return;
+
+ dl->extension->flags = 0;
+
+ if (dl->pre_cmd) {
+ dl->extension->pre_ext_dl_plist = dl->pre_cmd->cmd_dma;
+ dl->extension->pre_ext_dl_num_cmd = dl->pre_cmd->num_cmds;
+ dl->extension->flags |= VSP1_DLH_EXT_PRE_CMD_EXEC;
+
+ vsp1_dl_ext_cmd_fill_header(dl->pre_cmd);
+ }
+
+ if (dl->post_cmd) {
+ dl->extension->post_ext_dl_plist = dl->post_cmd->cmd_dma;
+ dl->extension->post_ext_dl_num_cmd = dl->post_cmd->num_cmds;
+ dl->extension->flags |= VSP1_DLH_EXT_POST_CMD_EXEC;
+
+ vsp1_dl_ext_cmd_fill_header(dl->post_cmd);
+ }
}
static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
@@ -581,17 +827,10 @@ static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
return false;
/*
- * Check whether the VSP1 has taken the update. In headerless mode the
- * hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE
- * register, and in header mode by clearing the UPDHDR bit in the CMD
- * register.
+ * Check whether the VSP1 has taken the update. The hardware indicates
+ * this by clearing the UPDHDR bit in the CMD register.
*/
- if (dlm->mode == VSP1_DL_MODE_HEADERLESS)
- return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE)
- & VI6_DL_BODY_SIZE_UPD);
- else
- return !!(vsp1_read(vsp1, VI6_CMD(dlm->index))
- & VI6_CMD_UPDHDR);
+ return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR);
}
static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
@@ -599,26 +838,14 @@ static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
struct vsp1_dl_manager *dlm = dl->dlm;
struct vsp1_device *vsp1 = dlm->vsp1;
- if (dlm->mode == VSP1_DL_MODE_HEADERLESS) {
- /*
- * In headerless mode, program the hardware directly with the
- * display list body address and size and set the UPD bit. The
- * bit will be cleared by the hardware when the display list
- * processing starts.
- */
- vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0->dma);
- vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
- (dl->body0->num_entries * sizeof(*dl->header->lists)));
- } else {
- /*
- * In header mode, program the display list header address. If
- * the hardware is idle (single-shot mode or first frame in
- * continuous mode) it will then be started independently. If
- * the hardware is operating, the VI6_DL_HDR_REF_ADDR register
- * will be updated with the display list address.
- */
- vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
- }
+ /*
+ * Program the display list header address. If the hardware is idle
+ * (single-shot mode or first frame in continuous mode) it will then be
+ * started independently. If the hardware is operating, the
+ * VI6_DL_HDR_REF_ADDR register will be updated with the display list
+ * address.
+ */
+ vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
}
static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
@@ -673,18 +900,16 @@ static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
{
struct vsp1_dl_manager *dlm = dl->dlm;
- struct vsp1_dl_list *dl_child;
+ struct vsp1_dl_list *dl_next;
unsigned long flags;
- if (dlm->mode == VSP1_DL_MODE_HEADER) {
- /* Fill the header for the head and chained display lists. */
- vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
+ /* Fill the header for the head and chained display lists. */
+ vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
- list_for_each_entry(dl_child, &dl->chain, chain) {
- bool last = list_is_last(&dl_child->chain, &dl->chain);
+ list_for_each_entry(dl_next, &dl->chain, chain) {
+ bool last = list_is_last(&dl_next->chain, &dl->chain);
- vsp1_dl_list_fill_header(dl_child, last);
- }
+ vsp1_dl_list_fill_header(dl_next, last);
}
dl->internal = internal;
@@ -713,7 +938,7 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
* has completed at frame end. If the flag is not returned display list
* completion has been delayed by one frame because the display list commit
* raced with the frame end interrupt. The function always returns with the flag
- * set in header mode as display list processing is then not continuous and
+ * set in single-shot mode as display list processing is then not continuous and
* races never occur.
*
* The VSP1_DL_FRAME_END_INTERNAL flag indicates that the previous display list
@@ -722,6 +947,8 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
*/
unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
{
+ struct vsp1_device *vsp1 = dlm->vsp1;
+ u32 status = vsp1_read(vsp1, VI6_STATUS);
unsigned int flags = 0;
spin_lock(&dlm->lock);
@@ -747,6 +974,14 @@ unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
goto done;
/*
+ * Progressive streams report only TOP fields. If we have a BOTTOM
+ * field, we are interlaced, and expect the frame to complete on the
+ * next frame end interrupt.
+ */
+ if (status & VI6_STATUS_FLD_STD(dlm->index))
+ goto done;
+
+ /*
* The device starts processing the queued display list right after the
* frame end interrupt. The display list thus becomes active.
*/
@@ -781,16 +1016,17 @@ done:
/* Hardware Setup */
void vsp1_dlm_setup(struct vsp1_device *vsp1)
{
+ unsigned int i;
u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
| VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
| VI6_DL_CTRL_DLE;
+ u32 ext_dl = (0x02 << VI6_DL_EXT_CTRL_POLINT_SHIFT)
+ | VI6_DL_EXT_CTRL_DLPRI | VI6_DL_EXT_CTRL_EXT;
- /*
- * The DRM pipeline operates with display lists in Continuous Frame
- * Mode, all other pipelines use manual start.
- */
- if (vsp1->drm)
- ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
+ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
+ for (i = 0; i < vsp1->info->wpf_count; ++i)
+ vsp1_write(vsp1, VI6_DL_EXT_CTRL(i), ext_dl);
+ }
vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
@@ -831,8 +1067,6 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
return NULL;
dlm->index = index;
- dlm->mode = index == 0 && !vsp1->info->uapi
- ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
dlm->singleshot = vsp1->info->uapi;
dlm->vsp1 = vsp1;
@@ -841,14 +1075,16 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
/*
* Initialize the display list body and allocate DMA memory for the body
- * and the optional header. Both are allocated together to avoid memory
+ * and the header. Both are allocated together to avoid memory
* fragmentation, with the header located right after the body in
* memory. An extra body is allocated on top of the prealloc to account
* for the cached body used by the vsp1_pipeline object.
*/
- header_size = dlm->mode == VSP1_DL_MODE_HEADER
- ? ALIGN(sizeof(struct vsp1_dl_header), 8)
- : 0;
+ header_size = vsp1_feature(vsp1, VSP1_HAS_EXT_DL) ?
+ sizeof(struct vsp1_dl_header_extended) :
+ sizeof(struct vsp1_dl_header);
+
+ header_size = ALIGN(header_size, 8);
dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1,
VSP1_DL_NUM_ENTRIES, header_size);
@@ -859,12 +1095,28 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
struct vsp1_dl_list *dl;
dl = vsp1_dl_list_alloc(dlm);
- if (!dl)
+ if (!dl) {
+ vsp1_dlm_destroy(dlm);
return NULL;
+ }
+
+ /* The extended header immediately follows the header. */
+ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL))
+ dl->extension = (void *)dl->header
+ + sizeof(*dl->header);
list_add_tail(&dl->list, &dlm->free);
}
+ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
+ dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1,
+ VSP1_EXTCMD_AUTOFLD, prealloc);
+ if (!dlm->cmdpool) {
+ vsp1_dlm_destroy(dlm);
+ return NULL;
+ }
+ }
+
return dlm;
}
@@ -881,4 +1133,5 @@ void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
}
vsp1_dl_body_pool_destroy(dlm->pool);
+ vsp1_dl_ext_cmd_pool_destroy(dlm->cmdpool);
}
diff --git a/drivers/media/platform/vsp1/vsp1_dl.h b/drivers/media/platform/vsp1/vsp1_dl.h
index 7dba0469c92e..125750dc8b5c 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.h
+++ b/drivers/media/platform/vsp1/vsp1_dl.h
@@ -20,6 +20,33 @@ struct vsp1_dl_manager;
#define VSP1_DL_FRAME_END_COMPLETED BIT(0)
#define VSP1_DL_FRAME_END_INTERNAL BIT(1)
+/**
+ * struct vsp1_dl_ext_cmd - Extended Display command
+ * @pool: pool to which this command belongs
+ * @free: entry in the pool of free commands list
+ * @opcode: command type opcode
+ * @flags: flags used by the command
+ * @cmds: array of command bodies for this extended cmd
+ * @num_cmds: quantity of commands in @cmds array
+ * @cmd_dma: DMA address of the command body
+ * @data: memory allocation for command-specific data
+ * @data_dma: DMA address for command-specific data
+ */
+struct vsp1_dl_ext_cmd {
+ struct vsp1_dl_cmd_pool *pool;
+ struct list_head free;
+
+ u8 opcode;
+ u32 flags;
+
+ struct vsp1_pre_ext_dl_body *cmds;
+ unsigned int num_cmds;
+ dma_addr_t cmd_dma;
+
+ void *data;
+ dma_addr_t data_dma;
+};
+
void vsp1_dlm_setup(struct vsp1_device *vsp1);
struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
@@ -33,6 +60,7 @@ struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm);
struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm);
void vsp1_dl_list_put(struct vsp1_dl_list *dl);
struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl);
+struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl);
void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal);
struct vsp1_dl_body_pool *
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index edb35a5c57ea..b9c0f695d002 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -670,9 +670,11 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
drm_pipe->width = cfg->width;
drm_pipe->height = cfg->height;
+ pipe->interlaced = cfg->interlaced;
- dev_dbg(vsp1->dev, "%s: configuring LIF%u with format %ux%u\n",
- __func__, pipe_index, cfg->width, cfg->height);
+ dev_dbg(vsp1->dev, "%s: configuring LIF%u with format %ux%u%s\n",
+ __func__, pipe_index, cfg->width, cfg->height,
+ pipe->interlaced ? "i" : "");
mutex_lock(&vsp1->drm->lock);
@@ -728,9 +730,6 @@ EXPORT_SYMBOL_GPL(vsp1_du_setup_lif);
*/
void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index)
{
- struct vsp1_device *vsp1 = dev_get_drvdata(dev);
-
- mutex_lock(&vsp1->drm->lock);
}
EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
@@ -806,7 +805,7 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
*/
fmtinfo = vsp1_get_format_info(vsp1, cfg->pixelformat);
if (!fmtinfo) {
- dev_dbg(vsp1->dev, "Unsupport pixel format %08x for RPF\n",
+ dev_dbg(vsp1->dev, "Unsupported pixel format %08x for RPF\n",
cfg->pixelformat);
return -EINVAL;
}
@@ -846,6 +845,7 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
drm_pipe->crc = cfg->crc;
+ mutex_lock(&vsp1->drm->lock);
vsp1_du_pipeline_setup_inputs(vsp1, pipe);
vsp1_du_pipeline_configure(pipe);
mutex_unlock(&vsp1->drm->lock);
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 5d82f6ee56ea..b6619c9c18bb 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -265,7 +265,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
/* Instantiate all the entities. */
- if (vsp1->info->features & VSP1_HAS_BRS) {
+ if (vsp1_feature(vsp1, VSP1_HAS_BRS)) {
vsp1->brs = vsp1_brx_create(vsp1, VSP1_ENTITY_BRS);
if (IS_ERR(vsp1->brs)) {
ret = PTR_ERR(vsp1->brs);
@@ -275,7 +275,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->brs->entity.list_dev, &vsp1->entities);
}
- if (vsp1->info->features & VSP1_HAS_BRU) {
+ if (vsp1_feature(vsp1, VSP1_HAS_BRU)) {
vsp1->bru = vsp1_brx_create(vsp1, VSP1_ENTITY_BRU);
if (IS_ERR(vsp1->bru)) {
ret = PTR_ERR(vsp1->bru);
@@ -285,7 +285,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->bru->entity.list_dev, &vsp1->entities);
}
- if (vsp1->info->features & VSP1_HAS_CLU) {
+ if (vsp1_feature(vsp1, VSP1_HAS_CLU)) {
vsp1->clu = vsp1_clu_create(vsp1);
if (IS_ERR(vsp1->clu)) {
ret = PTR_ERR(vsp1->clu);
@@ -311,7 +311,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
- if (vsp1->info->features & VSP1_HAS_HGO && vsp1->info->uapi) {
+ if (vsp1_feature(vsp1, VSP1_HAS_HGO) && vsp1->info->uapi) {
vsp1->hgo = vsp1_hgo_create(vsp1);
if (IS_ERR(vsp1->hgo)) {
ret = PTR_ERR(vsp1->hgo);
@@ -322,7 +322,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
&vsp1->entities);
}
- if (vsp1->info->features & VSP1_HAS_HGT && vsp1->info->uapi) {
+ if (vsp1_feature(vsp1, VSP1_HAS_HGT) && vsp1->info->uapi) {
vsp1->hgt = vsp1_hgt_create(vsp1);
if (IS_ERR(vsp1->hgt)) {
ret = PTR_ERR(vsp1->hgt);
@@ -353,7 +353,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
}
- if (vsp1->info->features & VSP1_HAS_LUT) {
+ if (vsp1_feature(vsp1, VSP1_HAS_LUT)) {
vsp1->lut = vsp1_lut_create(vsp1);
if (IS_ERR(vsp1->lut)) {
ret = PTR_ERR(vsp1->lut);
@@ -387,7 +387,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
}
- if (vsp1->info->features & VSP1_HAS_SRU) {
+ if (vsp1_feature(vsp1, VSP1_HAS_SRU)) {
vsp1->sru = vsp1_sru_create(vsp1);
if (IS_ERR(vsp1->sru)) {
ret = PTR_ERR(vsp1->sru);
@@ -537,7 +537,7 @@ static int vsp1_device_init(struct vsp1_device *vsp1)
vsp1_write(vsp1, VI6_DPR_HSI_ROUTE, VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_BRU_ROUTE, VI6_DPR_NODE_UNUSED);
- if (vsp1->info->features & VSP1_HAS_BRS)
+ if (vsp1_feature(vsp1, VSP1_HAS_BRS))
vsp1_write(vsp1, VI6_DPR_ILV_BRS_ROUTE, VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_HGO_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
@@ -754,7 +754,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN3,
.model = "VSP2-D",
.gen = 3,
- .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP,
+ .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP | VSP1_HAS_EXT_DL,
.lif_count = 1,
.rpf_count = 5,
.uif_count = 1,
@@ -774,7 +774,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPDL_GEN3,
.model = "VSP2-DL",
.gen = 3,
- .features = VSP1_HAS_BRS | VSP1_HAS_BRU,
+ .features = VSP1_HAS_BRS | VSP1_HAS_BRU | VSP1_HAS_EXT_DL,
.lif_count = 2,
.rpf_count = 5,
.uif_count = 2,
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h
index 743d8f0db45c..ae646c9ef337 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.h
+++ b/drivers/media/platform/vsp1/vsp1_pipe.h
@@ -104,6 +104,7 @@ struct vsp1_partition {
* @entities: list of entities in the pipeline
* @stream_config: cached stream configuration for video pipelines
* @configured: when false the @stream_config shall be written to the hardware
+ * @interlaced: True when the pipeline is configured in interlaced mode
* @partitions: The number of partitions used to process one frame
* @partition: The current partition for configuration to process
* @part_table: The pre-calculated partitions used by the pipeline
@@ -142,6 +143,7 @@ struct vsp1_pipeline {
struct vsp1_dl_body *stream_config;
bool configured;
+ bool interlaced;
unsigned int partitions;
struct vsp1_partition *partition;
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 0d249ff9f564..3738ff2f7b85 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -28,6 +28,7 @@
#define VI6_SRESET_SRTS(n) (1 << (n))
#define VI6_STATUS 0x0038
+#define VI6_STATUS_FLD_STD(n) (1 << ((n) + 28))
#define VI6_STATUS_SYS_ACT(n) (1 << ((n) + 8))
#define VI6_WPF_IRQ_ENB(n) (0x0048 + (n) * 12)
@@ -72,7 +73,7 @@
#define VI6_DL_SWAP_WDS (1 << 1)
#define VI6_DL_SWAP_BTS (1 << 0)
-#define VI6_DL_EXT_CTRL 0x011c
+#define VI6_DL_EXT_CTRL(n) (0x011c + (n) * 36)
#define VI6_DL_EXT_CTRL_NWE (1 << 16)
#define VI6_DL_EXT_CTRL_POLINT_MASK (0x3f << 8)
#define VI6_DL_EXT_CTRL_POLINT_SHIFT 8
@@ -80,6 +81,8 @@
#define VI6_DL_EXT_CTRL_EXPRI (1 << 4)
#define VI6_DL_EXT_CTRL_EXT (1 << 0)
+#define VI6_DL_EXT_AUTOFLD_INT BIT(0)
+
#define VI6_DL_BODY_SIZE 0x0120
#define VI6_DL_BODY_SIZE_UPD (1 << 24)
#define VI6_DL_BODY_SIZE_BS_MASK (0x1ffff << 0)
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 69e5fe6e6b50..f8005b60b9d2 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -20,6 +20,18 @@
#define RPF_MAX_WIDTH 8190
#define RPF_MAX_HEIGHT 8190
+/* Pre extended display list command data structure. */
+struct vsp1_extcmd_auto_fld_body {
+ u32 top_y0;
+ u32 bottom_y0;
+ u32 top_c0;
+ u32 bottom_c0;
+ u32 top_c1;
+ u32 bottom_c1;
+ u32 reserved0;
+ u32 reserved1;
+} __packed;
+
/* -----------------------------------------------------------------------------
* Device Access
*/
@@ -64,6 +76,14 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
pstride |= format->plane_fmt[1].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
+ /*
+ * pstride has both STRIDE_Y and STRIDE_C, but multiplying the whole
+ * of pstride by 2 is conveniently OK here as we are multiplying both
+ * values.
+ */
+ if (pipe->interlaced)
+ pstride *= 2;
+
vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_PSTRIDE, pstride);
/* Format */
@@ -100,6 +120,9 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
top = compose->top;
}
+ if (pipe->interlaced)
+ top /= 2;
+
vsp1_rpf_write(rpf, dlb, VI6_RPF_LOC,
(left << VI6_RPF_LOC_HCOORD_SHIFT) |
(top << VI6_RPF_LOC_VCOORD_SHIFT));
@@ -169,6 +192,36 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
}
+static void vsp1_rpf_configure_autofld(struct vsp1_rwpf *rpf,
+ struct vsp1_dl_list *dl)
+{
+ const struct v4l2_pix_format_mplane *format = &rpf->format;
+ struct vsp1_dl_ext_cmd *cmd;
+ struct vsp1_extcmd_auto_fld_body *auto_fld;
+ u32 offset_y, offset_c;
+
+ cmd = vsp1_dl_get_pre_cmd(dl);
+ if (WARN_ONCE(!cmd, "Failed to obtain an autofld cmd"))
+ return;
+
+ /* Re-index our auto_fld to match the current RPF. */
+ auto_fld = cmd->data;
+ auto_fld = &auto_fld[rpf->entity.index];
+
+ auto_fld->top_y0 = rpf->mem.addr[0];
+ auto_fld->top_c0 = rpf->mem.addr[1];
+ auto_fld->top_c1 = rpf->mem.addr[2];
+
+ offset_y = format->plane_fmt[0].bytesperline;
+ offset_c = format->plane_fmt[1].bytesperline;
+
+ auto_fld->bottom_y0 = rpf->mem.addr[0] + offset_y;
+ auto_fld->bottom_c0 = rpf->mem.addr[1] + offset_c;
+ auto_fld->bottom_c1 = rpf->mem.addr[2] + offset_c;
+
+ cmd->flags |= VI6_DL_EXT_AUTOFLD_INT | BIT(16 + rpf->entity.index);
+}
+
static void rpf_configure_frame(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
@@ -221,6 +274,11 @@ static void rpf_configure_partition(struct vsp1_entity *entity,
crop.left += pipe->partition->rpf.left;
}
+ if (pipe->interlaced) {
+ crop.height = round_down(crop.height / 2, fmtinfo->vsub);
+ crop.top = round_down(crop.top / 2, fmtinfo->vsub);
+ }
+
vsp1_rpf_write(rpf, dlb, VI6_RPF_SRC_BSIZE,
(crop.width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
(crop.height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
@@ -249,9 +307,17 @@ static void rpf_configure_partition(struct vsp1_entity *entity,
fmtinfo->swap_uv)
swap(mem.addr[1], mem.addr[2]);
- vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_Y, mem.addr[0]);
- vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C0, mem.addr[1]);
- vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C1, mem.addr[2]);
+ /*
+ * Interlaced pipelines will use the extended pre-cmd to process
+ * SRCM_ADDR_{Y,C0,C1}
+ */
+ if (pipe->interlaced) {
+ vsp1_rpf_configure_autofld(rpf, dl);
+ } else {
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_Y, mem.addr[0]);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C0, mem.addr[1]);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C1, mem.addr[2]);
+ }
}
static void rpf_partition(struct vsp1_entity *entity,
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 23c8f706b3f2..c2a1a7f97e26 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -141,13 +141,13 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
if (wpf->entity.index != 0) {
/* Only WPF0 supports flipping. */
num_flip_ctrls = 0;
- } else if (vsp1->info->features & VSP1_HAS_WPF_HFLIP) {
+ } else if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP)) {
/*
* When horizontal flip is supported the WPF implements three
* controls (horizontal flip, vertical flip and rotation).
*/
num_flip_ctrls = 3;
- } else if (vsp1->info->features & VSP1_HAS_WPF_VFLIP) {
+ } else if (vsp1_feature(vsp1, VSP1_HAS_WPF_VFLIP)) {
/*
* When only vertical flip is supported the WPF implements a
* single control (vertical flip).
@@ -276,7 +276,7 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
vsp1_wpf_write(wpf, dlb, VI6_WPF_DSWAP, fmtinfo->swap);
- if (vsp1->info->features & VSP1_HAS_WPF_HFLIP &&
+ if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP) &&
wpf->entity.index == 0)
vsp1_wpf_write(wpf, dlb, VI6_WPF_ROT_CTRL,
VI6_WPF_ROT_CTRL_LN16 |
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 8f9f8dfc3497..11aa94f189cb 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1096,7 +1096,7 @@ static __poll_t wl1273_fm_fops_poll(struct file *file,
struct wl1273_core *core = radio->core;
if (radio->owner && radio->owner != file)
- return -EBUSY;
+ return EPOLLERR;
radio->owner = file;
diff --git a/drivers/media/radio/si4713/radio-usb-si4713.c b/drivers/media/radio/si4713/radio-usb-si4713.c
index 05c66701a899..1ebbf0217142 100644
--- a/drivers/media/radio/si4713/radio-usb-si4713.c
+++ b/drivers/media/radio/si4713/radio-usb-si4713.c
@@ -370,9 +370,6 @@ static int si4713_transfer(struct i2c_adapter *i2c_adapter,
int retval = -EINVAL;
int i;
- if (num <= 0)
- return 0;
-
for (i = 0; i < num; i++) {
if (msgs[i].flags & I2C_M_RD)
retval = si4713_i2c_read(radio, msgs[i].buf, msgs[i].len);
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 40826bba06b6..8b97fd1f0cea 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -174,6 +174,7 @@ static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
rcu_assign_pointer(raw->progs, new_array);
bpf_prog_array_free(old_array);
+ bpf_prog_put(prog);
unlock:
mutex_unlock(&ir_raw_handler_lock);
return ret;
@@ -195,41 +196,33 @@ void lirc_bpf_run(struct rc_dev *rcdev, u32 sample)
*/
void lirc_bpf_free(struct rc_dev *rcdev)
{
- struct bpf_prog **progs;
+ struct bpf_prog_array_item *item;
if (!rcdev->raw->progs)
return;
- progs = rcu_dereference(rcdev->raw->progs)->progs;
- while (*progs)
- bpf_prog_put(*progs++);
+ item = rcu_dereference(rcdev->raw->progs)->items;
+ while (item->prog) {
+ bpf_prog_put(item->prog);
+ item++;
+ }
bpf_prog_array_free(rcdev->raw->progs);
}
-int lirc_prog_attach(const union bpf_attr *attr)
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
- struct bpf_prog *prog;
struct rc_dev *rcdev;
int ret;
if (attr->attach_flags)
return -EINVAL;
- prog = bpf_prog_get_type(attr->attach_bpf_fd,
- BPF_PROG_TYPE_LIRC_MODE2);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
-
rcdev = rc_dev_get_from_fd(attr->target_fd);
- if (IS_ERR(rcdev)) {
- bpf_prog_put(prog);
+ if (IS_ERR(rcdev))
return PTR_ERR(rcdev);
- }
ret = lirc_bpf_attach(rcdev, prog);
- if (ret)
- bpf_prog_put(prog);
put_device(&rcdev->dev);
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 2e0066b1a31c..e7948908e78c 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -30,13 +30,13 @@ static int ir_raw_event_thread(void *data)
while (kfifo_out(&raw->kfifo, &ev, 1)) {
if (is_timing_event(ev)) {
if (ev.duration == 0)
- dev_err(&dev->dev, "nonsensical timing event of duration 0");
+ dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
if (is_timing_event(raw->prev_ev) &&
!is_transition(&ev, &raw->prev_ev))
- dev_err(&dev->dev, "two consecutive events of type %s",
- TO_STR(ev.pulse));
+ dev_warn_once(&dev->dev, "two consecutive events of type %s",
+ TO_STR(ev.pulse));
if (raw->prev_ev.reset && ev.pulse == 0)
- dev_err(&dev->dev, "timing event after reset should be pulse");
+ dev_warn_once(&dev->dev, "timing event after reset should be pulse");
}
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (dev->enabled_protocols &
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 2e222d9ee01f..ca68e1d2b2f9 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -679,6 +679,14 @@ static void ir_timer_repeat(struct timer_list *t)
spin_unlock_irqrestore(&dev->keylock, flags);
}
+static unsigned int repeat_period(int protocol)
+{
+ if (protocol >= ARRAY_SIZE(protocols))
+ return 100;
+
+ return protocols[protocol].repeat_period;
+}
+
/**
* rc_repeat() - signals that a key is still pressed
* @dev: the struct rc_dev descriptor of the device
@@ -691,7 +699,7 @@ void rc_repeat(struct rc_dev *dev)
{
unsigned long flags;
unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
- msecs_to_jiffies(protocols[dev->last_protocol].repeat_period);
+ msecs_to_jiffies(repeat_period(dev->last_protocol));
struct lirc_scancode sc = {
.scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
.keycode = dev->keypressed ? dev->last_keycode : KEY_RESERVED,
@@ -803,7 +811,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode,
if (dev->keypressed) {
dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
- msecs_to_jiffies(protocols[protocol].repeat_period);
+ msecs_to_jiffies(repeat_period(protocol));
mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
}
spin_unlock_irqrestore(&dev->keylock, flags);
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index b5b9d87ba75c..fbec1a13dc6a 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -610,9 +610,9 @@ static int e4000_dvb_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops e4000_dvb_tuner_ops = {
.info = {
- .name = "Elonics E4000",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
+ .name = "Elonics E4000",
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
},
.init = e4000_dvb_init,
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index 145407dee3db..a983899c6b0b 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -472,10 +472,10 @@ static int fc0011_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
static const struct dvb_tuner_ops fc0011_tuner_ops = {
.info = {
- .name = "Fitipower FC0011",
+ .name = "Fitipower FC0011",
- .frequency_min = 45000000,
- .frequency_max = 1000000000,
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 1000 * MHz,
},
.release = fc0011_release,
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index 625ac6f51c39..e992b98ae5bc 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -415,11 +415,10 @@ exit:
static const struct dvb_tuner_ops fc0012_tuner_ops = {
.info = {
- .name = "Fitipower FC0012",
+ .name = "Fitipower FC0012",
- .frequency_min = 37000000, /* estimate */
- .frequency_max = 862000000, /* estimate */
- .frequency_step = 0,
+ .frequency_min_hz = 37 * MHz, /* estimate */
+ .frequency_max_hz = 862 * MHz, /* estimate */
},
.release = fc0012_release,
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index e606118d1a9b..fc62afb1450d 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -574,11 +574,10 @@ exit:
static const struct dvb_tuner_ops fc0013_tuner_ops = {
.info = {
- .name = "Fitipower FC0013",
+ .name = "Fitipower FC0013",
- .frequency_min = 37000000, /* estimate */
- .frequency_max = 1680000000, /* CHECK */
- .frequency_step = 0,
+ .frequency_min_hz = 37 * MHz, /* estimate */
+ .frequency_max_hz = 1680 * MHz, /* CHECK */
},
.release = fc0013_release,
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 743184ae0d26..db26892aac84 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -355,9 +355,9 @@ static int fc2580_dvb_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops fc2580_dvb_tuner_ops = {
.info = {
- .name = "FCI FC2580",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
+ .name = "FCI FC2580",
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
},
.init = fc2580_dvb_init,
diff --git a/drivers/media/tuners/it913x.c b/drivers/media/tuners/it913x.c
index 27e5bc1c3cb5..b5eb39921e95 100644
--- a/drivers/media/tuners/it913x.c
+++ b/drivers/media/tuners/it913x.c
@@ -375,9 +375,9 @@ err:
static const struct dvb_tuner_ops it913x_tuner_ops = {
.info = {
- .name = "ITE IT913X",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
+ .name = "ITE IT913X",
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
},
.init = it913x_init,
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
index 9f3e0fd4cad9..3df2f23a40be 100644
--- a/drivers/media/tuners/m88rs6000t.c
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -569,9 +569,9 @@ err:
static const struct dvb_tuner_ops m88rs6000t_tuner_ops = {
.info = {
- .name = "Montage M88RS6000 Internal Tuner",
- .frequency_min = 950000,
- .frequency_max = 2150000,
+ .name = "Montage M88RS6000 Internal Tuner",
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.init = m88rs6000t_init,
diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
index 20ceb72e530b..721d8f722efb 100644
--- a/drivers/media/tuners/max2165.c
+++ b/drivers/media/tuners/max2165.c
@@ -377,10 +377,10 @@ static void max2165_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops max2165_tuner_ops = {
.info = {
- .name = "Maxim MAX2165",
- .frequency_min = 470000000,
- .frequency_max = 862000000,
- .frequency_step = 50000,
+ .name = "Maxim MAX2165",
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = max2165_release,
diff --git a/drivers/media/tuners/mc44s803.c b/drivers/media/tuners/mc44s803.c
index 403c6b2aa53b..2023e081d9ad 100644
--- a/drivers/media/tuners/mc44s803.c
+++ b/drivers/media/tuners/mc44s803.c
@@ -300,10 +300,10 @@ static int mc44s803_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops mc44s803_tuner_ops = {
.info = {
- .name = "Freescale MC44S803",
- .frequency_min = 48000000,
- .frequency_max = 1000000000,
- .frequency_step = 100000,
+ .name = "Freescale MC44S803",
+ .frequency_min_hz = 48 * MHz,
+ .frequency_max_hz = 1000 * MHz,
+ .frequency_step_hz = 100 * kHz,
},
.release = mc44s803_release,
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 3d3c6815b6a7..4ace77cfe285 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -395,10 +395,10 @@ static void mt2060_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops mt2060_tuner_ops = {
.info = {
- .name = "Microtune MT2060",
- .frequency_min = 48000000,
- .frequency_max = 860000000,
- .frequency_step = 50000,
+ .name = "Microtune MT2060",
+ .frequency_min_hz = 48 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = mt2060_release,
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 80dc3e241b4a..f4c8a7293ebb 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -2200,10 +2200,9 @@ static int mt2063_get_bandwidth(struct dvb_frontend *fe, u32 *bw)
static const struct dvb_tuner_ops mt2063_ops = {
.info = {
.name = "MT2063 Silicon Tuner",
- .frequency_min = 45000000,
- .frequency_max = 865000000,
- .frequency_step = 0,
- },
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 865 * MHz,
+ },
.init = mt2063_init,
.sleep = MT2063_Sleep,
diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c
index 659bf19dc434..086a7b7cf634 100644
--- a/drivers/media/tuners/mt2131.c
+++ b/drivers/media/tuners/mt2131.c
@@ -235,10 +235,10 @@ static void mt2131_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops mt2131_tuner_ops = {
.info = {
- .name = "Microtune MT2131",
- .frequency_min = 48000000,
- .frequency_max = 860000000,
- .frequency_step = 50000,
+ .name = "Microtune MT2131",
+ .frequency_min_hz = 48 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = mt2131_release,
diff --git a/drivers/media/tuners/mt2266.c b/drivers/media/tuners/mt2266.c
index f4545b7f5da2..e6cc78720de4 100644
--- a/drivers/media/tuners/mt2266.c
+++ b/drivers/media/tuners/mt2266.c
@@ -304,10 +304,10 @@ static void mt2266_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops mt2266_tuner_ops = {
.info = {
- .name = "Microtune MT2266",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_step = 50000,
+ .name = "Microtune MT2266",
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = mt2266_release,
.init = mt2266_init,
diff --git a/drivers/media/tuners/mxl301rf.c b/drivers/media/tuners/mxl301rf.c
index 57b0e4862aaf..c628435a1b06 100644
--- a/drivers/media/tuners/mxl301rf.c
+++ b/drivers/media/tuners/mxl301rf.c
@@ -271,8 +271,8 @@ static const struct dvb_tuner_ops mxl301rf_ops = {
.info = {
.name = "MaxLinear MxL301RF",
- .frequency_min = 93000000,
- .frequency_max = 803142857,
+ .frequency_min_hz = 93 * MHz,
+ .frequency_max_hz = 803 * MHz + 142857,
},
.init = mxl301rf_init,
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index 355ef2959b7d..ec584316c812 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -4075,10 +4075,10 @@ static void mxl5005s_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops mxl5005s_tuner_ops = {
.info = {
- .name = "MaxLinear MXL5005S",
- .frequency_min = 48000000,
- .frequency_max = 860000000,
- .frequency_step = 50000,
+ .name = "MaxLinear MXL5005S",
+ .frequency_min_hz = 48 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = mxl5005s_release,
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index 4081fd97c3b2..54d9226aaff7 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -59,8 +59,6 @@ MODULE_PARM_DESC(debug, "set debug level");
/* ------------------------------------------------------------------------- */
-#define MHz 1000000
-
enum mxl5007t_mode {
MxL_MODE_ISDBT = 0,
MxL_MODE_DVBT = 1,
diff --git a/drivers/media/tuners/qm1d1b0004.c b/drivers/media/tuners/qm1d1b0004.c
index b4495cc1626b..008ad870c00f 100644
--- a/drivers/media/tuners/qm1d1b0004.c
+++ b/drivers/media/tuners/qm1d1b0004.c
@@ -186,8 +186,8 @@ static const struct dvb_tuner_ops qm1d1b0004_ops = {
.info = {
.name = "Sharp qm1d1b0004",
- .frequency_min = 950000,
- .frequency_max = 2150000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.init = qm1d1b0004_init,
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index 642a065b9a07..83ca5dc047ea 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -388,8 +388,8 @@ static const struct dvb_tuner_ops qm1d1c0042_ops = {
.info = {
.name = "Sharp QM1D1C0042",
- .frequency_min = 950000,
- .frequency_max = 2150000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.init = qm1d1c0042_init,
diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
index b92be882ab3c..4565c06b1617 100644
--- a/drivers/media/tuners/qt1010.c
+++ b/drivers/media/tuners/qt1010.c
@@ -394,10 +394,10 @@ static int qt1010_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops qt1010_tuner_ops = {
.info = {
- .name = "Quantek QT1010",
- .frequency_min = QT1010_MIN_FREQ,
- .frequency_max = QT1010_MAX_FREQ,
- .frequency_step = QT1010_STEP,
+ .name = "Quantek QT1010",
+ .frequency_min_hz = QT1010_MIN_FREQ,
+ .frequency_max_hz = QT1010_MAX_FREQ,
+ .frequency_step_hz = QT1010_STEP,
},
.release = qt1010_release,
diff --git a/drivers/media/tuners/qt1010_priv.h b/drivers/media/tuners/qt1010_priv.h
index 4cb78ecc8985..f25324c63067 100644
--- a/drivers/media/tuners/qt1010_priv.h
+++ b/drivers/media/tuners/qt1010_priv.h
@@ -71,12 +71,14 @@ reg def meaning
2f 00 ? not used?
*/
-#define QT1010_STEP 125000 /* 125 kHz used by Windows drivers,
- hw could be more precise but we don't
- know how to use */
-#define QT1010_MIN_FREQ 48000000 /* 48 MHz */
-#define QT1010_MAX_FREQ 860000000 /* 860 MHz */
-#define QT1010_OFFSET 1246000000 /* 1246 MHz */
+#define QT1010_STEP (125 * kHz) /*
+ * used by Windows drivers,
+ * hw could be more precise but we don't
+ * know how to use
+ */
+#define QT1010_MIN_FREQ (48 * MHz)
+#define QT1010_MAX_FREQ (860 * MHz)
+#define QT1010_OFFSET (1246 * MHz)
#define QT1010_WR 0
#define QT1010_RD 1
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index 3e14b9e2e763..ba4be08a8551 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -2297,9 +2297,9 @@ static void r820t_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops r820t_tuner_ops = {
.info = {
- .name = "Rafael Micro R820T",
- .frequency_min = 42000000,
- .frequency_max = 1002000000,
+ .name = "Rafael Micro R820T",
+ .frequency_min_hz = 42 * MHz,
+ .frequency_max_hz = 1002 * MHz,
},
.init = r820t_init,
.release = r820t_release,
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 9e34d31d724d..a08d8fe2bb1b 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -387,9 +387,9 @@ static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops si2157_ops = {
.info = {
- .name = "Silicon Labs Si2141/Si2146/2147/2148/2157/2158",
- .frequency_min = 42000000,
- .frequency_max = 870000000,
+ .name = "Silicon Labs Si2141/Si2146/2147/2148/2157/2158",
+ .frequency_min_hz = 42 * MHz,
+ .frequency_max_hz = 870 * MHz,
},
.init = si2157_init,
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index 7b8068354fea..8326106ec2e3 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -175,11 +175,11 @@ static int tda18212_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops tda18212_tuner_ops = {
.info = {
- .name = "NXP TDA18212",
+ .name = "NXP TDA18212",
- .frequency_min = 48000000,
- .frequency_max = 864000000,
- .frequency_step = 1000,
+ .frequency_min_hz = 48 * MHz,
+ .frequency_max_hz = 864 * MHz,
+ .frequency_step_hz = 1 * kHz,
},
.set_params = tda18212_set_params,
diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
index c56fcf5d48e3..cbbd4d5e15da 100644
--- a/drivers/media/tuners/tda18218.c
+++ b/drivers/media/tuners/tda18218.c
@@ -269,11 +269,11 @@ static void tda18218_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops tda18218_tuner_ops = {
.info = {
- .name = "NXP TDA18218",
+ .name = "NXP TDA18218",
- .frequency_min = 174000000,
- .frequency_max = 864000000,
- .frequency_step = 1000,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 864 * MHz,
+ .frequency_step_hz = 1 * kHz,
},
.release = tda18218_release,
diff --git a/drivers/media/tuners/tda18250.c b/drivers/media/tuners/tda18250.c
index 20d12b063380..20d10ef45ab6 100644
--- a/drivers/media/tuners/tda18250.c
+++ b/drivers/media/tuners/tda18250.c
@@ -740,9 +740,9 @@ static int tda18250_sleep(struct dvb_frontend *fe)
static const struct dvb_tuner_ops tda18250_ops = {
.info = {
- .name = "NXP TDA18250",
- .frequency_min = 42000000,
- .frequency_max = 870000000,
+ .name = "NXP TDA18250",
+ .frequency_min_hz = 42 * MHz,
+ .frequency_max_hz = 870 * MHz,
},
.init = tda18250_init,
diff --git a/drivers/media/tuners/tda18271-common.c b/drivers/media/tuners/tda18271-common.c
index 7e81cd887c13..054b3b747dae 100644
--- a/drivers/media/tuners/tda18271-common.c
+++ b/drivers/media/tuners/tda18271-common.c
@@ -225,7 +225,7 @@ static int __tda18271_write_regs(struct dvb_frontend *fe, int idx, int len,
*/
if (lock_i2c) {
tda18271_i2c_gate_ctrl(fe, 1);
- i2c_lock_adapter(priv->i2c_props.adap);
+ i2c_lock_bus(priv->i2c_props.adap, I2C_LOCK_SEGMENT);
}
while (len) {
if (max > len)
@@ -246,7 +246,7 @@ static int __tda18271_write_regs(struct dvb_frontend *fe, int idx, int len,
len -= max;
}
if (lock_i2c) {
- i2c_unlock_adapter(priv->i2c_props.adap);
+ i2c_unlock_bus(priv->i2c_props.adap, I2C_LOCK_SEGMENT);
tda18271_i2c_gate_ctrl(fe, 0);
}
@@ -300,7 +300,7 @@ int tda18271_init_regs(struct dvb_frontend *fe)
* as those could cause bad things
*/
tda18271_i2c_gate_ctrl(fe, 1);
- i2c_lock_adapter(priv->i2c_props.adap);
+ i2c_lock_bus(priv->i2c_props.adap, I2C_LOCK_SEGMENT);
/* initialize registers */
switch (priv->id) {
@@ -516,7 +516,7 @@ int tda18271_init_regs(struct dvb_frontend *fe)
/* synchronize */
__tda18271_write_regs(fe, R_EP1, 1, false);
- i2c_unlock_adapter(priv->i2c_props.adap);
+ i2c_unlock_bus(priv->i2c_props.adap, I2C_LOCK_SEGMENT);
tda18271_i2c_gate_ctrl(fe, 0);
return 0;
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index 147155553648..4d69029229e4 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -1240,9 +1240,9 @@ static int tda18271_set_config(struct dvb_frontend *fe, void *priv_cfg)
static const struct dvb_tuner_ops tda18271_tuner_ops = {
.info = {
.name = "NXP TDA18271HD",
- .frequency_min = 45000000,
- .frequency_max = 864000000,
- .frequency_step = 62500
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 864 * MHz,
+ .frequency_step_hz = 62500
},
.init = tda18271_init,
.sleep = tda18271_sleep,
diff --git a/drivers/media/tuners/tda827x.c b/drivers/media/tuners/tda827x.c
index 8400808f8f7f..4391dabba510 100644
--- a/drivers/media/tuners/tda827x.c
+++ b/drivers/media/tuners/tda827x.c
@@ -816,9 +816,9 @@ static int tda827x_initial_sleep(struct dvb_frontend *fe)
static const struct dvb_tuner_ops tda827xo_tuner_ops = {
.info = {
.name = "Philips TDA827X",
- .frequency_min = 55000000,
- .frequency_max = 860000000,
- .frequency_step = 250000
+ .frequency_min_hz = 55 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 250 * kHz
},
.release = tda827x_release,
.init = tda827x_initial_init,
@@ -832,9 +832,9 @@ static const struct dvb_tuner_ops tda827xo_tuner_ops = {
static const struct dvb_tuner_ops tda827xa_tuner_ops = {
.info = {
.name = "Philips TDA827XA",
- .frequency_min = 44000000,
- .frequency_max = 906000000,
- .frequency_step = 62500
+ .frequency_min_hz = 44 * MHz,
+ .frequency_max_hz = 906 * MHz,
+ .frequency_step_hz = 62500
},
.release = tda827x_release,
.init = tda827x_init,
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index 9d70378fe2d3..5c89a130b47d 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -164,9 +164,9 @@ static int tua9001_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops tua9001_tuner_ops = {
.info = {
- .name = "Infineon TUA9001",
- .frequency_min = 170000000,
- .frequency_max = 862000000,
+ .name = "Infineon TUA9001",
+ .frequency_min_hz = 170 * MHz,
+ .frequency_max_hz = 862 * MHz,
},
.init = tua9001_init,
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
index 36b88f820239..29c1473f2e9f 100644
--- a/drivers/media/tuners/tuner-simple.c
+++ b/drivers/media/tuners/tuner-simple.c
@@ -670,6 +670,7 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
int rc, j;
struct tuner_params *t_params;
unsigned int freq = params->frequency;
+ bool mono = params->audmode == V4L2_TUNER_MODE_MONO;
tun = priv->tun;
@@ -736,8 +737,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
config |= TDA9887_PORT2_ACTIVE;
if (t_params->intercarrier_mode)
config |= TDA9887_INTERCARRIER;
-/* if (t_params->port1_set_for_fm_mono)
- config &= ~TDA9887_PORT1_ACTIVE;*/
+ if (t_params->port1_set_for_fm_mono && mono)
+ config &= ~TDA9887_PORT1_ACTIVE;
if (t_params->fm_gain_normal)
config |= TDA9887_GAIN_NORMAL;
if (t_params->radio_if == 2)
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 84744e138982..aa6861dcd3fd 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -376,9 +376,8 @@ static int load_all_firmwares(struct dvb_frontend *fe,
tuner_err("Firmware type ");
dump_firm_type(type);
printk(KERN_CONT
- "(%x), id %llx is corrupted (size=%d, expected %d)\n",
- type, (unsigned long long)id,
- (unsigned)(endp - p), size);
+ "(%x), id %llx is corrupted (size=%zd, expected %d)\n",
+ type, (unsigned long long)id, (endp - p), size);
goto corrupt;
}
@@ -616,8 +615,8 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type,
}
if ((size + p > endp)) {
- tuner_err("missing bytes: need %d, have %d\n",
- size, (int)(endp - p));
+ tuner_err("missing bytes: need %d, have %zd\n",
+ size, (endp - p));
return -EINVAL;
}
@@ -1440,9 +1439,9 @@ unlock:
static const struct dvb_tuner_ops xc2028_dvb_tuner_ops = {
.info = {
.name = "Xceive XC3028",
- .frequency_min = 42000000,
- .frequency_max = 864000000,
- .frequency_step = 50000,
+ .frequency_min_hz = 42 * MHz,
+ .frequency_max_hz = 864 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.set_config = xc2028_set_config,
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index f0fa8da08afa..eb6d65dae748 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -398,8 +398,8 @@ static int xc_set_rf_frequency(struct xc4000_priv *priv, u32 freq_hz)
dprintk(1, "%s(%u)\n", __func__, freq_hz);
- if ((freq_hz > xc4000_tuner_ops.info.frequency_max) ||
- (freq_hz < xc4000_tuner_ops.info.frequency_min))
+ if ((freq_hz > xc4000_tuner_ops.info.frequency_max_hz) ||
+ (freq_hz < xc4000_tuner_ops.info.frequency_min_hz))
return -EINVAL;
freq_code = (u16)(freq_hz / 15625);
@@ -815,9 +815,9 @@ static int xc4000_fwupload(struct dvb_frontend *fe)
p += sizeof(size);
if (!size || size > endp - p) {
- printk(KERN_ERR "Firmware type (%x), id %llx is corrupted (size=%d, expected %d)\n",
+ printk(KERN_ERR "Firmware type (%x), id %llx is corrupted (size=%zd, expected %d)\n",
type, (unsigned long long)id,
- (unsigned)(endp - p), size);
+ endp - p, size);
goto corrupt;
}
@@ -1635,10 +1635,10 @@ static void xc4000_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops xc4000_tuner_ops = {
.info = {
- .name = "Xceive XC4000",
- .frequency_min = 1000000,
- .frequency_max = 1023000000,
- .frequency_step = 50000,
+ .name = "Xceive XC4000",
+ .frequency_min_hz = 1 * MHz,
+ .frequency_max_hz = 1023 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = xc4000_release,
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index f7a8d05d1758..f6b65278e502 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -460,8 +460,8 @@ static int xc_set_rf_frequency(struct xc5000_priv *priv, u32 freq_hz)
dprintk(1, "%s(%u)\n", __func__, freq_hz);
- if ((freq_hz > xc5000_tuner_ops.info.frequency_max) ||
- (freq_hz < xc5000_tuner_ops.info.frequency_min))
+ if ((freq_hz > xc5000_tuner_ops.info.frequency_max_hz) ||
+ (freq_hz < xc5000_tuner_ops.info.frequency_min_hz))
return -EINVAL;
freq_code = (u16)(freq_hz / 15625);
@@ -1350,10 +1350,10 @@ static int xc5000_set_config(struct dvb_frontend *fe, void *priv_cfg)
static const struct dvb_tuner_ops xc5000_tuner_ops = {
.info = {
- .name = "Xceive XC5000",
- .frequency_min = 1000000,
- .frequency_max = 1023000000,
- .frequency_step = 50000,
+ .name = "Xceive XC5000",
+ .frequency_min_hz = 1 * MHz,
+ .frequency_max_hz = 1023 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = xc5000_release,
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 70e187971590..62b45062b1e6 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -133,7 +133,7 @@ static void au0828_irq_callback(struct urb *urb)
au0828_isocdbg("au0828_irq_callback called: status kill\n");
return;
default: /* unknown error */
- au0828_isocdbg("urb completition error %d.\n", urb->status);
+ au0828_isocdbg("urb completion error %d.\n", urb->status);
break;
}
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 0f13192634c7..9e5b3e7c3ef5 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -15,7 +15,7 @@ config VIDEO_CX231XX
config VIDEO_CX231XX_RC
bool "Conexant cx231xx Remote Controller additional support"
- depends on RC_CORE
+ depends on RC_CORE=y || RC_CORE=VIDEO_CX231XX
depends on VIDEO_CX231XX
default y
---help---
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index c4a84fb930b6..32ee7b3f21c9 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -112,7 +112,7 @@ static void cx231xx_audio_isocirq(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- dev_dbg(dev->dev, "urb completition error %d.\n",
+ dev_dbg(dev->dev, "urb completion error %d.\n",
urb->status);
break;
}
@@ -126,6 +126,7 @@ static void cx231xx_audio_isocirq(struct urb *urb)
stride = runtime->frame_bits >> 3;
for (i = 0; i < urb->number_of_packets; i++) {
+ unsigned long flags;
int length = urb->iso_frame_desc[i].actual_length /
stride;
cp = (unsigned char *)urb->transfer_buffer +
@@ -148,7 +149,7 @@ static void cx231xx_audio_isocirq(struct urb *urb)
length * stride);
}
- snd_pcm_stream_lock(substream);
+ snd_pcm_stream_lock_irqsave(substream, flags);
dev->adev.hwptr_done_capture += length;
if (dev->adev.hwptr_done_capture >=
@@ -163,7 +164,7 @@ static void cx231xx_audio_isocirq(struct urb *urb)
runtime->period_size;
period_elapsed = 1;
}
- snd_pcm_stream_unlock(substream);
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
}
if (period_elapsed)
snd_pcm_period_elapsed(substream);
@@ -202,7 +203,7 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- dev_dbg(dev->dev, "urb completition error %d.\n",
+ dev_dbg(dev->dev, "urb completion error %d.\n",
urb->status);
break;
}
@@ -216,6 +217,7 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
stride = runtime->frame_bits >> 3;
if (1) {
+ unsigned long flags;
int length = urb->actual_length /
stride;
cp = (unsigned char *)urb->transfer_buffer;
@@ -234,7 +236,7 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
length * stride);
}
- snd_pcm_stream_lock(substream);
+ snd_pcm_stream_lock_irqsave(substream, flags);
dev->adev.hwptr_done_capture += length;
if (dev->adev.hwptr_done_capture >=
@@ -249,7 +251,7 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
runtime->period_size;
period_elapsed = 1;
}
- snd_pcm_stream_unlock(substream);
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
}
if (period_elapsed)
snd_pcm_period_elapsed(substream);
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 53d846dea3d2..493c2dca6244 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -799,6 +799,7 @@ static void cx231xx_isoc_irq_callback(struct urb *urb)
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode);
+ unsigned long flags;
int i;
switch (urb->status) {
@@ -815,9 +816,9 @@ static void cx231xx_isoc_irq_callback(struct urb *urb)
}
/* Copy data from URB */
- spin_lock(&dev->video_mode.slock);
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
dev->video_mode.isoc_ctl.isoc_copy(dev, urb);
- spin_unlock(&dev->video_mode.slock);
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
/* Reset urb buffers */
for (i = 0; i < urb->number_of_packets; i++) {
@@ -844,6 +845,7 @@ static void cx231xx_bulk_irq_callback(struct urb *urb)
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode);
+ unsigned long flags;
switch (urb->status) {
case 0: /* success */
@@ -862,9 +864,9 @@ static void cx231xx_bulk_irq_callback(struct urb *urb)
}
/* Copy data from URB */
- spin_lock(&dev->video_mode.slock);
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
dev->video_mode.bulk_ctl.bulk_copy(dev, urb);
- spin_unlock(&dev->video_mode.slock);
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
/* Reset urb buffers */
urb->status = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index 6e1bef2a45bb..15a91169e749 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -376,8 +376,6 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
struct cx231xx *dev = bus->dev;
int addr, rc, i, byte;
- if (num <= 0)
- return 0;
mutex_lock(&dev->i2c_lock);
for (i = 0; i < num; i++) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index b621cf1aa96b..10b2eb7338ad 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -305,6 +305,7 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode);
+ unsigned long flags;
switch (urb->status) {
case 0: /* success */
@@ -316,14 +317,14 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
return;
default: /* error */
dev_err(dev->dev,
- "urb completition error %d.\n", urb->status);
+ "urb completion error %d.\n", urb->status);
break;
}
/* Copy data from URB */
- spin_lock(&dev->vbi_mode.slock);
+ spin_lock_irqsave(&dev->vbi_mode.slock, flags);
dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb);
- spin_unlock(&dev->vbi_mode.slock);
+ spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
/* Reset status */
urb->status = 0;
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 082b8d67244b..df4412245a8a 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -96,10 +96,13 @@ config DVB_USB_GL861
tristate "Genesys Logic GL861 USB2.0 support"
depends on DVB_USB_V2
select DVB_ZL10353 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TC90522 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the MSI Megasky 580 (55801) DVB-T USB2.0
- receiver with USB ID 0db0:5581.
+ receiver with USB ID 0db0:5581, Friio White ISDB-T receiver
+ with USB ID 0x7a69:0001.
config DVB_USB_LME2510
tristate "LME DM04/QQBOX DVB-S USB2.0 support"
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index 9d154fdae45b..3338b21d8b25 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -6,10 +6,14 @@
*
* see Documentation/media/dvb-drivers/dvb-usb.rst for more information
*/
+#include <linux/string.h>
+
#include "gl861.h"
#include "zl10353.h"
#include "qt1010.h"
+#include "tc90522.h"
+#include "dvb-pll.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -26,10 +30,14 @@ static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr,
if (wo) {
req = GL861_REQ_I2C_WRITE;
type = GL861_WRITE;
+ buf = kmemdup(wbuf, wlen, GFP_KERNEL);
} else { /* rw */
req = GL861_REQ_I2C_READ;
type = GL861_READ;
+ buf = kmalloc(rlen, GFP_KERNEL);
}
+ if (!buf)
+ return -ENOMEM;
switch (wlen) {
case 1:
@@ -42,24 +50,19 @@ static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr,
default:
dev_err(&d->udev->dev, "%s: wlen=%d, aborting\n",
KBUILD_MODNAME, wlen);
+ kfree(buf);
return -EINVAL;
}
- buf = NULL;
- if (rlen > 0) {
- buf = kmalloc(rlen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
+
usleep_range(1000, 2000); /* avoid I2C errors */
ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), req, type,
value, index, buf, rlen, 2000);
- if (rlen > 0) {
- if (ret > 0)
- memcpy(rbuf, buf, rlen);
- kfree(buf);
- }
+ if (!wo && ret > 0)
+ memcpy(rbuf, buf, rlen);
+
+ kfree(buf);
return ret;
}
@@ -162,11 +165,478 @@ static struct dvb_usb_device_properties gl861_props = {
}
};
+
+/*
+ * For Friio
+ */
+
+struct friio_priv {
+ struct i2c_adapter *demod_sub_i2c;
+ struct i2c_client *i2c_client_demod;
+ struct i2c_client *i2c_client_tuner;
+ struct i2c_adapter tuner_adap;
+};
+
+struct friio_config {
+ struct i2c_board_info demod_info;
+ struct tc90522_config demod_cfg;
+
+ struct i2c_board_info tuner_info;
+ struct dvb_pll_config tuner_cfg;
+};
+
+static const struct friio_config friio_config = {
+ .demod_info = { I2C_BOARD_INFO(TC90522_I2C_DEV_TER, 0x18), },
+ .tuner_info = { I2C_BOARD_INFO("tua6034_friio", 0x60), },
+};
+
+/* For another type of I2C:
+ * message sent by a USB control-read/write transaction with data stage.
+ * Used in init/config of Friio.
+ */
+static int
+gl861_i2c_write_ex(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(wlen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, wbuf, wlen);
+ ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
+ GL861_REQ_I2C_RAW, GL861_WRITE,
+ addr << (8 + 1), 0x0100, buf, wlen, 2000);
+ kfree(buf);
+ return ret;
+}
+
+static int
+gl861_i2c_read_ex(struct dvb_usb_device *d, u8 addr, u8 *rbuf, u16 rlen)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(rlen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
+ GL861_REQ_I2C_READ, GL861_READ,
+ addr << (8 + 1), 0x0100, buf, rlen, 2000);
+ if (ret > 0 && rlen > 0)
+ memcpy(buf, rbuf, rlen);
+ kfree(buf);
+ return ret;
+}
+
+/* For I2C transactions to the tuner of Friio (dvb_pll).
+ *
+ * Friio uses irregular USB encapsulation for tuner i2c transactions:
+ * write transacions are encapsulated with a different USB 'request' value.
+ *
+ * Although all transactions are sent via the demod(tc90522)
+ * and the demod provides an i2c adapter for them, it cannot be used in Friio
+ * since it assumes using the same parent adapter with the demod,
+ * which does not use the request value and uses same one for both read/write.
+ * So we define a dedicated i2c adapter here.
+ */
+
+static int
+friio_i2c_tuner_read(struct dvb_usb_device *d, struct i2c_msg *msg)
+{
+ struct friio_priv *priv;
+ u8 addr;
+
+ priv = d_to_priv(d);
+ addr = priv->i2c_client_demod->addr;
+ return gl861_i2c_read_ex(d, addr, msg->buf, msg->len);
+}
+
+static int
+friio_i2c_tuner_write(struct dvb_usb_device *d, struct i2c_msg *msg)
+{
+ u8 *buf;
+ int ret;
+ struct friio_priv *priv;
+
+ priv = d_to_priv(d);
+
+ if (msg->len < 1)
+ return -EINVAL;
+
+ buf = kmalloc(msg->len + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ buf[0] = msg->addr << 1;
+ memcpy(buf + 1, msg->buf, msg->len);
+
+ ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
+ GL861_REQ_I2C_RAW, GL861_WRITE,
+ priv->i2c_client_demod->addr << (8 + 1),
+ 0xFE, buf, msg->len + 1, 2000);
+ kfree(buf);
+ return ret;
+}
+
+static int friio_tuner_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
+{
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int i;
+
+ if (num > 2)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ for (i = 0; i < num; i++) {
+ int ret;
+
+ if (msg[i].flags & I2C_M_RD)
+ ret = friio_i2c_tuner_read(d, &msg[i]);
+ else
+ ret = friio_i2c_tuner_write(d, &msg[i]);
+
+ if (ret < 0)
+ break;
+
+ usleep_range(1000, 2000); /* avoid I2C errors */
+ }
+
+ mutex_unlock(&d->i2c_mutex);
+ return i;
+}
+
+static struct i2c_algorithm friio_tuner_i2c_algo = {
+ .master_xfer = friio_tuner_i2c_xfer,
+ .functionality = gl861_i2c_func,
+};
+
+/* GPIO control in Friio */
+
+#define FRIIO_CTL_LNB (1 << 0)
+#define FRIIO_CTL_STROBE (1 << 1)
+#define FRIIO_CTL_CLK (1 << 2)
+#define FRIIO_CTL_LED (1 << 3)
+
+#define FRIIO_LED_RUNNING 0x6400ff64
+#define FRIIO_LED_STOPPED 0x96ff00ff
+
+/* control PIC16F676 attached to Friio */
+static int friio_ext_ctl(struct dvb_usb_device *d,
+ u32 sat_color, int power_on)
+{
+ int i, ret;
+ struct i2c_msg msg;
+ u8 *buf;
+ u32 mask;
+ u8 power = (power_on) ? FRIIO_CTL_LNB : 0;
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ msg.addr = 0x00;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = buf;
+ buf[0] = 0x00;
+
+ /* send 2bit header (&B10) */
+ buf[1] = power | FRIIO_CTL_LED | FRIIO_CTL_STROBE;
+ ret = i2c_transfer(&d->i2c_adap, &msg, 1);
+ buf[1] |= FRIIO_CTL_CLK;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+
+ buf[1] = power | FRIIO_CTL_STROBE;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+ buf[1] |= FRIIO_CTL_CLK;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+
+ /* send 32bit(satur, R, G, B) data in serial */
+ mask = 1 << 31;
+ for (i = 0; i < 32; i++) {
+ buf[1] = power | FRIIO_CTL_STROBE;
+ if (sat_color & mask)
+ buf[1] |= FRIIO_CTL_LED;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+ buf[1] |= FRIIO_CTL_CLK;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+ mask >>= 1;
+ }
+
+ /* set the strobe off */
+ buf[1] = power;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+ buf[1] |= FRIIO_CTL_CLK;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+
+ kfree(buf);
+ return (ret == 70) ? 0 : -EREMOTEIO;
+}
+
+/* init/config of gl861 for Friio */
+/* NOTE:
+ * This function cannot be moved to friio_init()/dvb_usbv2_init(),
+ * because the init defined here must be done before any activities like I2C,
+ * but friio_init() is called by dvb-usbv2 after {_frontend, _tuner}_attach(),
+ * where I2C communication is used.
+ * Thus this function is set to be called from _power_ctl().
+ *
+ * Since it will be called on the early init stage
+ * where the i2c adapter is not initialized yet,
+ * we cannot use i2c_transfer() here.
+ */
+static int friio_reset(struct dvb_usb_device *d)
+{
+ int i, ret;
+ u8 wbuf[2], rbuf[2];
+
+ static const u8 friio_init_cmds[][2] = {
+ {0x33, 0x08}, {0x37, 0x40}, {0x3a, 0x1f}, {0x3b, 0xff},
+ {0x3c, 0x1f}, {0x3d, 0xff}, {0x38, 0x00}, {0x35, 0x00},
+ {0x39, 0x00}, {0x36, 0x00},
+ };
+
+ ret = usb_set_interface(d->udev, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ wbuf[0] = 0x11;
+ wbuf[1] = 0x02;
+ ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ if (ret < 0)
+ return ret;
+ usleep_range(2000, 3000);
+
+ wbuf[0] = 0x11;
+ wbuf[1] = 0x00;
+ ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Check if the dev is really a Friio White, since it might be
+ * another device, Friio Black, with the same VID/PID.
+ */
+
+ usleep_range(1000, 2000);
+ wbuf[0] = 0x03;
+ wbuf[1] = 0x80;
+ ret = gl861_i2c_write_ex(d, 0x09, wbuf, 2);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(2000, 3000);
+ ret = gl861_i2c_read_ex(d, 0x09, rbuf, 2);
+ if (ret < 0)
+ return ret;
+ if (rbuf[0] != 0xff || rbuf[1] != 0xff)
+ return -ENODEV;
+
+
+ usleep_range(1000, 2000);
+ ret = gl861_i2c_write_ex(d, 0x48, wbuf, 2);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(2000, 3000);
+ ret = gl861_i2c_read_ex(d, 0x48, rbuf, 2);
+ if (ret < 0)
+ return ret;
+ if (rbuf[0] != 0xff || rbuf[1] != 0xff)
+ return -ENODEV;
+
+ wbuf[0] = 0x30;
+ wbuf[1] = 0x04;
+ ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ wbuf[0] = 0x00;
+ wbuf[1] = 0x01;
+ ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ wbuf[0] = 0x06;
+ wbuf[1] = 0x0f;
+ ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(friio_init_cmds); i++) {
+ ret = gl861_i2c_msg(d, 0x00, (u8 *)friio_init_cmds[i], 2,
+ NULL, 0);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * DVB callbacks for Friio
+ */
+
+static int friio_power_ctrl(struct dvb_usb_device *d, int onoff)
+{
+ return onoff ? friio_reset(d) : 0;
+}
+
+static int friio_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ const struct i2c_board_info *info;
+ struct dvb_usb_device *d;
+ struct tc90522_config cfg;
+ struct i2c_client *cl;
+ struct friio_priv *priv;
+
+ info = &friio_config.demod_info;
+ d = adap_to_d(adap);
+ cl = dvb_module_probe("tc90522", info->type,
+ &d->i2c_adap, info->addr, &cfg);
+ if (!cl)
+ return -ENODEV;
+ adap->fe[0] = cfg.fe;
+
+ /* ignore cfg.tuner_i2c and create new one */
+ priv = adap_to_priv(adap);
+ priv->i2c_client_demod = cl;
+ priv->tuner_adap.algo = &friio_tuner_i2c_algo;
+ priv->tuner_adap.dev.parent = &d->udev->dev;
+ strlcpy(priv->tuner_adap.name, d->name, sizeof(priv->tuner_adap.name));
+ strlcat(priv->tuner_adap.name, "-tuner", sizeof(priv->tuner_adap.name));
+ priv->demod_sub_i2c = &priv->tuner_adap;
+ i2c_set_adapdata(&priv->tuner_adap, d);
+
+ return i2c_add_adapter(&priv->tuner_adap);
+}
+
+static int friio_frontend_detach(struct dvb_usb_adapter *adap)
+{
+ struct friio_priv *priv;
+
+ priv = adap_to_priv(adap);
+ i2c_del_adapter(&priv->tuner_adap);
+ dvb_module_release(priv->i2c_client_demod);
+ return 0;
+}
+
+static int friio_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ const struct i2c_board_info *info;
+ struct dvb_pll_config cfg;
+ struct i2c_client *cl;
+ struct friio_priv *priv;
+
+ priv = adap_to_priv(adap);
+ info = &friio_config.tuner_info;
+ cfg = friio_config.tuner_cfg;
+ cfg.fe = adap->fe[0];
+
+ cl = dvb_module_probe("dvb_pll", info->type,
+ priv->demod_sub_i2c, info->addr, &cfg);
+ if (!cl)
+ return -ENODEV;
+ priv->i2c_client_tuner = cl;
+ return 0;
+}
+
+static int friio_tuner_detach(struct dvb_usb_adapter *adap)
+{
+ struct friio_priv *priv;
+
+ priv = adap_to_priv(adap);
+ dvb_module_release(priv->i2c_client_tuner);
+ return 0;
+}
+
+static int friio_init(struct dvb_usb_device *d)
+{
+ int i;
+ int ret;
+ struct friio_priv *priv;
+
+ static const u8 demod_init[][2] = {
+ {0x01, 0x40}, {0x04, 0x38}, {0x05, 0x40}, {0x07, 0x40},
+ {0x0f, 0x4f}, {0x11, 0x21}, {0x12, 0x0b}, {0x13, 0x2f},
+ {0x14, 0x31}, {0x16, 0x02}, {0x21, 0xc4}, {0x22, 0x20},
+ {0x2c, 0x79}, {0x2d, 0x34}, {0x2f, 0x00}, {0x30, 0x28},
+ {0x31, 0x31}, {0x32, 0xdf}, {0x38, 0x01}, {0x39, 0x78},
+ {0x3b, 0x33}, {0x3c, 0x33}, {0x48, 0x90}, {0x51, 0x68},
+ {0x5e, 0x38}, {0x71, 0x00}, {0x72, 0x08}, {0x77, 0x00},
+ {0xc0, 0x21}, {0xc1, 0x10}, {0xe4, 0x1a}, {0xea, 0x1f},
+ {0x77, 0x00}, {0x71, 0x00}, {0x71, 0x00}, {0x76, 0x0c},
+ };
+
+ /* power on LNA? */
+ ret = friio_ext_ctl(d, FRIIO_LED_STOPPED, true);
+ if (ret < 0)
+ return ret;
+ msleep(20);
+
+ /* init/config demod */
+ priv = d_to_priv(d);
+ for (i = 0; i < ARRAY_SIZE(demod_init); i++) {
+ int ret;
+
+ ret = i2c_master_send(priv->i2c_client_demod, demod_init[i], 2);
+ if (ret < 0)
+ return ret;
+ }
+ msleep(100);
+ return 0;
+}
+
+static void friio_exit(struct dvb_usb_device *d)
+{
+ friio_ext_ctl(d, FRIIO_LED_STOPPED, false);
+}
+
+static int friio_streaming_ctrl(struct dvb_frontend *fe, int onoff)
+{
+ u32 led_color;
+
+ led_color = onoff ? FRIIO_LED_RUNNING : FRIIO_LED_STOPPED;
+ return friio_ext_ctl(fe_to_d(fe), led_color, true);
+}
+
+
+static struct dvb_usb_device_properties friio_props = {
+ .driver_name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .adapter_nr = adapter_nr,
+
+ .size_of_priv = sizeof(struct friio_priv),
+
+ .i2c_algo = &gl861_i2c_algo,
+ .power_ctrl = friio_power_ctrl,
+ .frontend_attach = friio_frontend_attach,
+ .frontend_detach = friio_frontend_detach,
+ .tuner_attach = friio_tuner_attach,
+ .tuner_detach = friio_tuner_detach,
+ .init = friio_init,
+ .exit = friio_exit,
+ .streaming_ctrl = friio_streaming_ctrl,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .stream = DVB_USB_STREAM_BULK(0x01, 8, 16384),
+ }
+ }
+};
+
static const struct usb_device_id gl861_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_MSI, USB_PID_MSI_MEGASKY580_55801,
&gl861_props, "MSI Mega Sky 55801 DVB-T USB2.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_ALINK, USB_VID_ALINK_DTU,
&gl861_props, "A-LINK DTU DVB-T USB2.0", NULL) },
+ { DVB_USB_DEVICE(USB_VID_774, USB_PID_FRIIO_WHITE,
+ &friio_props, "774 Friio White ISDB-T USB2.0", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, gl861_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.h b/drivers/media/usb/dvb-usb-v2/gl861.h
index b651b857e034..02c00e10748a 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.h
+++ b/drivers/media/usb/dvb-usb-v2/gl861.h
@@ -9,5 +9,6 @@
#define GL861_REQ_I2C_WRITE 0x01
#define GL861_REQ_I2C_READ 0x02
+#define GL861_REQ_I2C_RAW 0x03
#endif
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index 221cf46b4140..9f74453799a2 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -554,9 +554,9 @@ static const struct dvb_frontend_ops mxl111sf_demod_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "MaxLinear MxL111SF DVB-T demodulator",
- .frequency_min = 177000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 177 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index 240d736bf1bb..92b3b9221a21 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -465,9 +465,9 @@ static const struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = {
.info = {
.name = "MaxLinear MxL111SF",
#if 0
- .frequency_min = ,
- .frequency_max = ,
- .frequency_step = ,
+ .frequency_min_hz = ,
+ .frequency_max_hz = ,
+ .frequency_step_hz = ,
#endif
},
#if 0
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index c76e78f9638a..a970224a94bd 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1732,7 +1732,7 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
goto exit;
ret = rtl28xxu_rd_reg(d, IR_RX_BC, &buf[0]);
- if (ret)
+ if (ret || buf[0] > sizeof(buf))
goto err;
len = buf[0];
diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
index b0499f95ec45..024c751eb165 100644
--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
@@ -40,7 +40,7 @@ static void usb_urb_complete(struct urb *urb)
return;
default: /* error */
dev_dbg_ratelimited(&stream->udev->dev,
- "%s: urb completition failed=%d\n",
+ "%s: urb completion failed=%d\n",
__func__, urb->status);
break;
}
@@ -69,7 +69,7 @@ static void usb_urb_complete(struct urb *urb)
break;
default:
dev_err(&stream->udev->dev,
- "%s: unknown endpoint type in completition handler\n",
+ "%s: unknown endpoint type in completion handler\n",
KBUILD_MODNAME);
return;
}
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index b8a1c62a0682..513df955eaa3 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -312,12 +312,6 @@ config DVB_USB_DTV5100
help
Say Y here to support the AME DTV-5100 USB2.0 DVB-T receiver.
-config DVB_USB_FRIIO
- tristate "Friio ISDB-T USB2.0 Receiver support"
- depends on DVB_USB
- help
- Say Y here to support the Japanese DTV receiver Friio.
-
config DVB_USB_AZ6027
tristate "Azurewave DVB-S/S2 USB2.0 AZ6027 support"
depends on DVB_USB
diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile
index 9ad2618408ef..407d90ca8be0 100644
--- a/drivers/media/usb/dvb-usb/Makefile
+++ b/drivers/media/usb/dvb-usb/Makefile
@@ -71,9 +71,6 @@ obj-$(CONFIG_DVB_USB_DTV5100) += dvb-usb-dtv5100.o
dvb-usb-cinergyT2-objs := cinergyT2-core.o cinergyT2-fe.o
obj-$(CONFIG_DVB_USB_CINERGY_T2) += dvb-usb-cinergyT2.o
-dvb-usb-friio-objs := friio.o friio-fe.o
-obj-$(CONFIG_DVB_USB_FRIIO) += dvb-usb-friio.o
-
dvb-usb-az6027-objs := az6027.o
obj-$(CONFIG_DVB_USB_AZ6027) += dvb-usb-az6027.o
diff --git a/drivers/media/usb/dvb-usb/af9005-fe.c b/drivers/media/usb/dvb-usb/af9005-fe.c
index 7fbbc954da16..09cc3a21af65 100644
--- a/drivers/media/usb/dvb-usb/af9005-fe.c
+++ b/drivers/media/usb/dvb-usb/af9005-fe.c
@@ -1455,9 +1455,9 @@ static const struct dvb_frontend_ops af9005_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "AF9005 USB DVB-T",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 250000,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 250 * kHz,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index 5a2f81311fb7..df71df7ed524 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -295,9 +295,9 @@ static const struct dvb_frontend_ops cinergyt2_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = DRIVER_NAME,
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2
| FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4
| FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index c53a969bc6be..091389fdf89e 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -1745,6 +1745,7 @@ static int dib809x_tuner_attach(struct dvb_usb_adapter *adap)
if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
return -ENODEV;
} else {
+ /* FIXME: check if it is fe_adap[1] */
if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
return -ENODEV;
}
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index 7e75aae34fb8..1ca3a51b2ae3 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -230,9 +230,9 @@ static const struct dvb_frontend_ops dtt200u_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "WideView USB DVB-T",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 250000,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 250 * kHz,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 0d4fdd34a710..9ce8b4d79d1f 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -2101,14 +2101,12 @@ static struct dvb_usb_device_properties s6x0_properties = {
}
};
-static struct dvb_usb_device_properties *p1100;
static const struct dvb_usb_device_description d1100 = {
"Prof 1100 USB ",
{&dw2102_table[PROF_1100], NULL},
{NULL},
};
-static struct dvb_usb_device_properties *s660;
static const struct dvb_usb_device_description d660 = {
"TeVii S660 USB",
{&dw2102_table[TEVII_S660], NULL},
@@ -2127,14 +2125,12 @@ static const struct dvb_usb_device_description d480_2 = {
{NULL},
};
-static struct dvb_usb_device_properties *p7500;
static const struct dvb_usb_device_description d7500 = {
"Prof 7500 USB DVB-S2",
{&dw2102_table[PROF_7500], NULL},
{NULL},
};
-static struct dvb_usb_device_properties *s421;
static const struct dvb_usb_device_description d421 = {
"TeVii S421 PCI",
{&dw2102_table[TEVII_S421], NULL},
@@ -2334,6 +2330,11 @@ static int dw2102_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int retval = -ENOMEM;
+ struct dvb_usb_device_properties *p1100;
+ struct dvb_usb_device_properties *s660;
+ struct dvb_usb_device_properties *p7500;
+ struct dvb_usb_device_properties *s421;
+
p1100 = kmemdup(&s6x0_properties,
sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
if (!p1100)
@@ -2402,8 +2403,16 @@ static int dw2102_probe(struct usb_interface *intf,
0 == dvb_usb_device_init(intf, &t220_properties,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &tt_s2_4600_properties,
- THIS_MODULE, NULL, adapter_nr))
+ THIS_MODULE, NULL, adapter_nr)) {
+
+ /* clean up copied properties */
+ kfree(s421);
+ kfree(p7500);
+ kfree(s660);
+ kfree(p1100);
+
return 0;
+ }
retval = -ENODEV;
kfree(s421);
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
index 932f262452eb..e6bd0ed8d789 100644
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ b/drivers/media/usb/dvb-usb/friio-fe.c
@@ -133,10 +133,10 @@ static int jdvbt90502_pll_set_freq(struct jdvbt90502_state *state, u32 freq)
u32 f;
deb_fe("%s: freq=%d, step=%d\n", __func__, freq,
- state->frontend.ops.info.frequency_stepsize);
+ state->frontend.ops.info.frequency_stepsize_hz);
/* freq -> oscilator frequency conversion. */
/* freq: 473,000,000 + n*6,000,000 [+ 142857 (center freq. shift)] */
- f = freq / state->frontend.ops.info.frequency_stepsize;
+ f = freq / state->frontend.ops.info.frequency_stepsize_hz;
/* add 399[1/7 MHZ] = 57MHz for the IF */
f += 399;
/* add center frequency shift if necessary */
@@ -413,10 +413,9 @@ static const struct dvb_frontend_ops jdvbt90502_ops = {
.delsys = { SYS_ISDBT },
.info = {
.name = "Comtech JDVBT90502 ISDB-T",
- .frequency_min = 473000000, /* UHF 13ch, center */
- .frequency_max = 767142857, /* UHF 62ch, center */
- .frequency_stepsize = JDVBT90502_PLL_CLK / JDVBT90502_PLL_DIVIDER,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 473000000, /* UHF 13ch, center */
+ .frequency_max_hz = 767142857, /* UHF 62ch, center */
+ .frequency_stepsize_hz = JDVBT90502_PLL_CLK / JDVBT90502_PLL_DIVIDER,
/* NOTE: this driver ignores all parameters but frequency. */
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index 51b026fa6bfb..22554d9abd43 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -255,9 +255,6 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu
int i, j;
int ret = 0;
- if (!num)
- return -EINVAL;
-
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
diff --git a/drivers/media/usb/dvb-usb/usb-urb.c b/drivers/media/usb/dvb-usb/usb-urb.c
index 5e05963f4220..9771f0954c69 100644
--- a/drivers/media/usb/dvb-usb/usb-urb.c
+++ b/drivers/media/usb/dvb-usb/usb-urb.c
@@ -33,7 +33,7 @@ static void usb_urb_complete(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- deb_ts("urb completition error %d.\n", urb->status);
+ deb_ts("urb completion error %d.\n", urb->status);
break;
}
@@ -57,7 +57,7 @@ static void usb_urb_complete(struct urb *urb)
stream->complete(stream, b, urb->actual_length);
break;
default:
- err("unknown endpoint type in completition handler.");
+ err("unknown endpoint type in completion handler.");
return;
}
usb_submit_urb(urb,GFP_ATOMIC);
diff --git a/drivers/media/usb/dvb-usb/vp702x-fe.c b/drivers/media/usb/dvb-usb/vp702x-fe.c
index ae48146e005c..9eb811452f2e 100644
--- a/drivers/media/usb/dvb-usb/vp702x-fe.c
+++ b/drivers/media/usb/dvb-usb/vp702x-fe.c
@@ -349,10 +349,9 @@ static const struct dvb_frontend_ops vp702x_fe_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Twinhan DST-like frontend (VP7021/VP7020) DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1000, /* kHz for QPSK frontends */
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
diff --git a/drivers/media/usb/dvb-usb/vp7045-fe.c b/drivers/media/usb/dvb-usb/vp7045-fe.c
index f86040173b8d..1173ae29885b 100644
--- a/drivers/media/usb/dvb-usb/vp7045-fe.c
+++ b/drivers/media/usb/dvb-usb/vp7045-fe.c
@@ -162,9 +162,9 @@ static const struct dvb_frontend_ops vp7045_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Twinhan VP7045/46 USB DVB-T",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 1000,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 1 * kHz,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 6c8438311d3b..71c829f31d3b 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -543,7 +543,7 @@ static const struct em28xx_reg_seq hauppauge_dualhd_dvb[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xdf, 0xff, 100}, /* demod 2 reset */
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100},
- {EM2874_R5F_TS_ENABLE, 0x44, 0xff, 50},
+ {EM2874_R5F_TS_ENABLE, 0x00, 0xff, 50}, /* disable TS filters */
{EM2874_R5D_TS1_PKT_SIZE, 0x05, 0xff, 50},
{EM2874_R5E_TS2_PKT_SIZE, 0x05, 0xff, 50},
{-1, -1, -1, -1},
@@ -2688,8 +2688,6 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM28178_BOARD_PCTV_292E },
{ USB_DEVICE(0x2040, 0x8268), /* Hauppauge Retail WinTV-soloHD Bulk */
.driver_info = EM28178_BOARD_PCTV_292E },
- { USB_DEVICE(0x2040, 0x8268), /* Hauppauge WinTV-soloHD alt. PID */
- .driver_info = EM28178_BOARD_PCTV_292E },
{ USB_DEVICE(0x0413, 0x6f07),
.driver_info = EM2861_BOARD_LEADTEK_VC100 },
{ USB_DEVICE(0xeb1a, 0x8179),
@@ -2854,13 +2852,13 @@ static void em28xx_pre_card_setup(struct em28xx *dev)
em28xx_write_reg(dev, EM2880_R04_GPO, 0x01);
usleep_range(10000, 11000);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfc);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xdc);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfc);
- mdelay(70);
+ msleep(70);
break;
case EM2870_BOARD_TERRATEC_XS_MT2060:
/*
@@ -2868,11 +2866,11 @@ static void em28xx_pre_card_setup(struct em28xx *dev)
* demod work
*/
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xde);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
- mdelay(70);
+ msleep(70);
break;
case EM2870_BOARD_PINNACLE_PCTV_DVB:
/*
@@ -2880,11 +2878,11 @@ static void em28xx_pre_card_setup(struct em28xx *dev)
* DVB-T demod work
*/
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xde);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
- mdelay(70);
+ msleep(70);
break;
case EM2820_BOARD_GADMEI_UTV310:
case EM2820_BOARD_MSI_VOX_USB_2:
@@ -3376,7 +3374,9 @@ void em28xx_free_device(struct kref *ref)
if (!dev->disconnected)
em28xx_release_resources(dev);
- kfree(dev->alt_max_pkt_size_isoc);
+ if (dev->ts == PRIMARY_TS)
+ kfree(dev->alt_max_pkt_size_isoc);
+
kfree(dev);
}
EXPORT_SYMBOL_GPL(em28xx_free_device);
@@ -3861,6 +3861,17 @@ static int em28xx_usb_probe(struct usb_interface *intf,
dev->has_video = false;
}
+ if (dev->board.has_dual_ts &&
+ (dev->tuner_type != TUNER_ABSENT || INPUT(0)->type)) {
+ /*
+ * The logic with sets alternate is not ready for dual-tuners
+ * which analog modes.
+ */
+ dev_err(&intf->dev,
+ "We currently don't support analog TV or stream capture on dual tuners.\n");
+ has_video = false;
+ }
+
/* Select USB transfer types to use */
if (has_video) {
if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk))
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index f70845e7d8c6..5657f8710ca6 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -655,12 +655,12 @@ int em28xx_capture_start(struct em28xx *dev, int start)
rc = em28xx_write_reg_bits(dev,
EM2874_R5F_TS_ENABLE,
start ? EM2874_TS1_CAPTURE_ENABLE : 0x00,
- EM2874_TS1_CAPTURE_ENABLE);
+ EM2874_TS1_CAPTURE_ENABLE | EM2874_TS1_FILTER_ENABLE | EM2874_TS1_NULL_DISCARD);
else
rc = em28xx_write_reg_bits(dev,
EM2874_R5F_TS_ENABLE,
start ? EM2874_TS2_CAPTURE_ENABLE : 0x00,
- EM2874_TS2_CAPTURE_ENABLE);
+ EM2874_TS2_CAPTURE_ENABLE | EM2874_TS2_FILTER_ENABLE | EM2874_TS2_NULL_DISCARD);
} else {
/* FIXME: which is the best order? */
/* video registers are sampled by VREF */
@@ -1053,7 +1053,7 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
/* submit urbs and enables IRQ */
for (i = 0; i < usb_bufs->num_bufs; i++) {
- rc = usb_submit_urb(usb_bufs->urb[i], GFP_ATOMIC);
+ rc = usb_submit_urb(usb_bufs->urb[i], GFP_KERNEL);
if (rc) {
dev_err(&dev->intf->dev,
"submit of urb %i failed (error=%i)\n", i, rc);
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index b778d8a1983e..a73faf12f7e4 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -218,7 +218,9 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
dvb_alt = dev->dvb_alt_isoc;
}
- usb_set_interface(udev, dev->ifnum, dvb_alt);
+ if (!dev->board.has_dual_ts)
+ usb_set_interface(udev, dev->ifnum, dvb_alt);
+
rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
if (rc < 0)
return rc;
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 6458682bc6e2..e19d6342e0d0 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -559,10 +559,6 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
dev->cur_i2c_bus = bus;
}
- if (num <= 0) {
- rt_mutex_unlock(&dev->i2c_bus_lock);
- return 0;
- }
for (i = 0; i < num; i++) {
addr = msgs[i].addr << 1;
if (!msgs[i].len) {
diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
index 05b1126f263e..62aeebcdd7f7 100644
--- a/drivers/media/usb/go7007/go7007-driver.c
+++ b/drivers/media/usb/go7007/go7007-driver.c
@@ -448,13 +448,14 @@ static struct go7007_buffer *frame_boundary(struct go7007 *go, struct go7007_buf
{
u32 *bytesused;
struct go7007_buffer *vb_tmp = NULL;
+ unsigned long flags;
if (vb == NULL) {
- spin_lock(&go->spinlock);
+ spin_lock_irqsave(&go->spinlock, flags);
if (!list_empty(&go->vidq_active))
vb = go->active_buf =
list_first_entry(&go->vidq_active, struct go7007_buffer, list);
- spin_unlock(&go->spinlock);
+ spin_unlock_irqrestore(&go->spinlock, flags);
go->next_seq++;
return vb;
}
@@ -468,7 +469,7 @@ static struct go7007_buffer *frame_boundary(struct go7007 *go, struct go7007_buf
vb->vb.vb2_buf.timestamp = ktime_get_ns();
vb_tmp = vb;
- spin_lock(&go->spinlock);
+ spin_lock_irqsave(&go->spinlock, flags);
list_del(&vb->list);
if (list_empty(&go->vidq_active))
vb = NULL;
@@ -476,7 +477,7 @@ static struct go7007_buffer *frame_boundary(struct go7007 *go, struct go7007_buf
vb = list_first_entry(&go->vidq_active,
struct go7007_buffer, list);
go->active_buf = vb;
- spin_unlock(&go->spinlock);
+ spin_unlock_irqrestore(&go->spinlock, flags);
vb2_buffer_done(&vb_tmp->vb.vb2_buf, VB2_BUF_STATE_DONE);
return vb;
}
diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c
index f84a2130f033..137fc253b122 100644
--- a/drivers/media/usb/go7007/snd-go7007.c
+++ b/drivers/media/usb/go7007/snd-go7007.c
@@ -75,13 +75,14 @@ static void parse_audio_stream_data(struct go7007 *go, u8 *buf, int length)
struct go7007_snd *gosnd = go->snd_context;
struct snd_pcm_runtime *runtime = gosnd->substream->runtime;
int frames = bytes_to_frames(runtime, length);
+ unsigned long flags;
- spin_lock(&gosnd->lock);
+ spin_lock_irqsave(&gosnd->lock, flags);
gosnd->hw_ptr += frames;
if (gosnd->hw_ptr >= runtime->buffer_size)
gosnd->hw_ptr -= runtime->buffer_size;
gosnd->avail += frames;
- spin_unlock(&gosnd->lock);
+ spin_unlock_irqrestore(&gosnd->lock, flags);
if (gosnd->w_idx + length > runtime->dma_bytes) {
int cpy = runtime->dma_bytes - gosnd->w_idx;
@@ -92,13 +93,13 @@ static void parse_audio_stream_data(struct go7007 *go, u8 *buf, int length)
}
memcpy(runtime->dma_area + gosnd->w_idx, buf, length);
gosnd->w_idx += length;
- spin_lock(&gosnd->lock);
+ spin_lock_irqsave(&gosnd->lock, flags);
if (gosnd->avail < runtime->period_size) {
- spin_unlock(&gosnd->lock);
+ spin_unlock_irqrestore(&gosnd->lock, flags);
return;
}
gosnd->avail -= runtime->period_size;
- spin_unlock(&gosnd->lock);
+ spin_unlock_irqrestore(&gosnd->lock, flags);
if (gosnd->capturing)
snd_pcm_period_elapsed(gosnd->substream);
}
diff --git a/drivers/media/usb/gspca/kinect.c b/drivers/media/usb/gspca/kinect.c
index 0cfdf8a1e19d..f993f6280c56 100644
--- a/drivers/media/usb/gspca/kinect.c
+++ b/drivers/media/usb/gspca/kinect.c
@@ -163,7 +163,7 @@ static int send_cmd(struct gspca_dev *gspca_dev, uint16_t cmd, void *cmdbuf,
actual_len = kinect_read(udev, ibuf, 0x200);
} while (actual_len == 0);
gspca_dbg(gspca_dev, D_USBO, "Control reply: %d\n", actual_len);
- if (actual_len < sizeof(*rhdr)) {
+ if (actual_len < (int)sizeof(*rhdr)) {
pr_err("send_cmd: Input control transfer failed (%d)\n",
actual_len);
return actual_len < 0 ? actual_len : -EREMOTEIO;
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index 6d692fb3e8dd..34085a0b15a1 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -597,7 +597,7 @@ static int hackrf_submit_urbs(struct hackrf_dev *dev)
for (i = 0; i < dev->urbs_initialized; i++) {
dev_dbg(dev->dev, "submit urb=%d\n", i);
- ret = usb_submit_urb(dev->urb_list[i], GFP_ATOMIC);
+ ret = usb_submit_urb(dev->urb_list[i], GFP_KERNEL);
if (ret) {
dev_err(dev->dev, "Could not submit URB no. %d - get them all back\n",
i);
@@ -636,7 +636,7 @@ static int hackrf_alloc_stream_bufs(struct hackrf_dev *dev)
for (dev->buf_num = 0; dev->buf_num < MAX_BULK_BUFS; dev->buf_num++) {
dev->buf_list[dev->buf_num] = usb_alloc_coherent(dev->udev,
- BULK_BUFFER_SIZE, GFP_ATOMIC,
+ BULK_BUFFER_SIZE, GFP_KERNEL,
&dev->dma_addr[dev->buf_num]);
if (!dev->buf_list[dev->buf_num]) {
dev_dbg(dev->dev, "alloc buf=%d failed\n",
@@ -689,7 +689,7 @@ static int hackrf_alloc_urbs(struct hackrf_dev *dev, bool rcv)
/* allocate the URBs */
for (i = 0; i < MAX_BULK_BUFS; i++) {
dev_dbg(dev->dev, "alloc urb=%d\n", i);
- dev->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
+ dev->urb_list[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb_list[i]) {
for (j = 0; j < i; j++)
usb_free_urb(dev->urb_list[j]);
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index c71ddefd2e58..5a3cb614a211 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -117,9 +117,6 @@ static int hdpvr_transfer(struct i2c_adapter *i2c_adapter, struct i2c_msg *msgs,
struct hdpvr_device *dev = i2c_get_adapdata(i2c_adapter);
int retval = 0, addr;
- if (num <= 0)
- return 0;
-
mutex_lock(&dev->i2c_mutex);
addr = msgs[0].addr << 1;
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 77b759a0bcd9..504e413edcd2 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -802,6 +802,7 @@ int stk1160_vb2_setup(struct stk1160 *dev)
q->buf_struct_size = sizeof(struct stk1160_buffer);
q->ops = &stk1160_video_qops;
q->mem_ops = &vb2_vmalloc_memops;
+ q->lock = &dev->vb_queue_lock;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
rc = vb2_queue_init(q);
@@ -827,7 +828,6 @@ int stk1160_video_register(struct stk1160 *dev)
* It will be used to protect *only* v4l2 ioctls.
*/
dev->vdev.lock = &dev->v4l_lock;
- dev->vdev.queue->lock = &dev->vb_queue_lock;
/* This will be used to set video_device parent */
dev->vdev.v4l2_dev = &dev->v4l2_dev;
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index c811fc6cf48a..3a4e545c6037 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -266,6 +266,11 @@ static int register_dvb(struct tm6000_core *dev)
ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
THIS_MODULE, &dev->udev->dev, adapter_nr);
+ if (ret < 0) {
+ pr_err("tm6000: couldn't register the adapter!\n");
+ goto err;
+ }
+
dvb->adapter.priv = dev;
if (dvb->frontend) {
diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/media/usb/tm6000/tm6000-i2c.c
index 659b63febf85..ccd1adf862b1 100644
--- a/drivers/media/usb/tm6000/tm6000-i2c.c
+++ b/drivers/media/usb/tm6000/tm6000-i2c.c
@@ -145,8 +145,6 @@ static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap,
struct tm6000_core *dev = i2c_adap->algo_data;
int addr, rc, i, byte;
- if (num <= 0)
- return 0;
for (i = 0; i < num; i++) {
addr = (msgs[i].addr << 1) & 0xff;
i2c_dprintk(2, "%s %s addr=0x%x len=%d:",
diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
index 6ea05d909024..278bf6c5855b 100644
--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
@@ -247,9 +247,9 @@ static const struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "TechnoTrend/Hauppauge DEC2000-t Frontend",
- .frequency_min = 51000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
@@ -270,9 +270,9 @@ static const struct dvb_frontend_ops ttusbdecfe_dvbs_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "TechnoTrend/Hauppauge DEC3000-s Frontend",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
.symbol_rate_min = 1000000, /* guessed */
.symbol_rate_max = 45000000, /* guessed */
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index 2c2ca77fa01f..4ce38246ed64 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -126,6 +126,7 @@ static void usbtv_audio_urb_received(struct urb *urb)
struct snd_pcm_runtime *runtime = substream->runtime;
size_t i, frame_bytes, chunk_length, buffer_pos, period_pos;
int period_elapsed;
+ unsigned long flags;
void *urb_current;
switch (urb->status) {
@@ -179,12 +180,12 @@ static void usbtv_audio_urb_received(struct urb *urb)
}
}
- snd_pcm_stream_lock(substream);
+ snd_pcm_stream_lock_irqsave(substream, flags);
chip->snd_buffer_pos = buffer_pos;
chip->snd_period_pos = period_pos;
- snd_pcm_stream_unlock(substream);
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
if (period_elapsed)
snd_pcm_period_elapsed(substream);
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index a36b4fb949fa..c2ad102bd693 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -20,6 +20,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <media/v4l2-ctrls.h>
@@ -971,12 +972,30 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
return 0;
}
+static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping,
+ const u8 *data)
+{
+ s32 value = mapping->get(mapping, UVC_GET_CUR, data);
+
+ if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
+ struct uvc_menu_info *menu = mapping->menu_info;
+ unsigned int i;
+
+ for (i = 0; i < mapping->menu_count; ++i, ++menu) {
+ if (menu->value == value) {
+ value = i;
+ break;
+ }
+ }
+ }
+
+ return value;
+}
+
static int __uvc_ctrl_get(struct uvc_video_chain *chain,
struct uvc_control *ctrl, struct uvc_control_mapping *mapping,
s32 *value)
{
- struct uvc_menu_info *menu;
- unsigned int i;
int ret;
if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
@@ -993,18 +1012,8 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
ctrl->loaded = 1;
}
- *value = mapping->get(mapping, UVC_GET_CUR,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
-
- if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
- menu = mapping->menu_info;
- for (i = 0; i < mapping->menu_count; ++i, ++menu) {
- if (menu->value == *value) {
- *value = i;
- break;
- }
- }
- }
+ *value = __uvc_ctrl_get_value(mapping,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
return 0;
}
@@ -1125,7 +1134,7 @@ done:
}
/*
- * Mapping V4L2 controls to UVC controls can be straighforward if done well.
+ * Mapping V4L2 controls to UVC controls can be straightforward if done well.
* Most of the UVC controls exist in V4L2, and can be mapped directly. Some
* must be grouped (for instance the Red Balance, Blue Balance and Do White
* Balance V4L2 controls use the White Balance Component UVC control) or
@@ -1216,53 +1225,135 @@ static void uvc_ctrl_fill_event(struct uvc_video_chain *chain,
ev->u.ctrl.default_value = v4l2_ctrl.default_value;
}
-static void uvc_ctrl_send_event(struct uvc_fh *handle,
- struct uvc_control *ctrl, struct uvc_control_mapping *mapping,
- s32 value, u32 changes)
+/*
+ * Send control change events to all subscribers for the @ctrl control. By
+ * default the subscriber that generated the event, as identified by @handle,
+ * is not notified unless it has set the V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK flag.
+ * @handle can be NULL for asynchronous events related to auto-update controls,
+ * in which case all subscribers are notified.
+ */
+static void uvc_ctrl_send_event(struct uvc_video_chain *chain,
+ struct uvc_fh *handle, struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping, s32 value, u32 changes)
{
+ struct v4l2_fh *originator = handle ? &handle->vfh : NULL;
struct v4l2_subscribed_event *sev;
struct v4l2_event ev;
if (list_empty(&mapping->ev_subs))
return;
- uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, value, changes);
+ uvc_ctrl_fill_event(chain, &ev, ctrl, mapping, value, changes);
list_for_each_entry(sev, &mapping->ev_subs, node) {
- if (sev->fh && (sev->fh != &handle->vfh ||
+ if (sev->fh != originator ||
(sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK) ||
- (changes & V4L2_EVENT_CTRL_CH_FLAGS)))
+ (changes & V4L2_EVENT_CTRL_CH_FLAGS))
v4l2_event_queue_fh(sev->fh, &ev);
}
}
-static void uvc_ctrl_send_slave_event(struct uvc_fh *handle,
- struct uvc_control *master, u32 slave_id,
- const struct v4l2_ext_control *xctrls, unsigned int xctrls_count)
+/*
+ * Send control change events for the slave of the @master control identified
+ * by the V4L2 ID @slave_id. The @handle identifies the event subscriber that
+ * generated the event and may be NULL for auto-update events.
+ */
+static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
+ struct uvc_fh *handle, struct uvc_control *master, u32 slave_id)
{
struct uvc_control_mapping *mapping = NULL;
struct uvc_control *ctrl = NULL;
u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
- unsigned int i;
s32 val = 0;
- /*
- * We can skip sending an event for the slave if the slave
- * is being modified in the same transaction.
- */
- for (i = 0; i < xctrls_count; i++) {
- if (xctrls[i].id == slave_id)
- return;
- }
-
__uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0);
if (ctrl == NULL)
return;
- if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
+ if (__uvc_ctrl_get(chain, ctrl, mapping, &val) == 0)
changes |= V4L2_EVENT_CTRL_CH_VALUE;
- uvc_ctrl_send_event(handle, ctrl, mapping, val, changes);
+ uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
+}
+
+static void uvc_ctrl_status_event_work(struct work_struct *work)
+{
+ struct uvc_device *dev = container_of(work, struct uvc_device,
+ async_ctrl.work);
+ struct uvc_ctrl_work *w = &dev->async_ctrl;
+ struct uvc_video_chain *chain = w->chain;
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl = w->ctrl;
+ struct uvc_fh *handle;
+ unsigned int i;
+ int ret;
+
+ mutex_lock(&chain->ctrl_mutex);
+
+ handle = ctrl->handle;
+ ctrl->handle = NULL;
+
+ list_for_each_entry(mapping, &ctrl->info.mappings, list) {
+ s32 value = __uvc_ctrl_get_value(mapping, w->data);
+
+ /*
+ * handle may be NULL here if the device sends auto-update
+ * events without a prior related control set from userspace.
+ */
+ for (i = 0; i < ARRAY_SIZE(mapping->slave_ids); ++i) {
+ if (!mapping->slave_ids[i])
+ break;
+
+ uvc_ctrl_send_slave_event(chain, handle, ctrl,
+ mapping->slave_ids[i]);
+ }
+
+ uvc_ctrl_send_event(chain, handle, ctrl, mapping, value,
+ V4L2_EVENT_CTRL_CH_VALUE);
+ }
+
+ mutex_unlock(&chain->ctrl_mutex);
+
+ /* Resubmit the URB. */
+ w->urb->interval = dev->int_ep->desc.bInterval;
+ ret = usb_submit_urb(w->urb, GFP_KERNEL);
+ if (ret < 0)
+ uvc_printk(KERN_ERR, "Failed to resubmit status URB (%d).\n",
+ ret);
+}
+
+bool uvc_ctrl_status_event(struct urb *urb, struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, const u8 *data)
+{
+ struct uvc_device *dev = chain->dev;
+ struct uvc_ctrl_work *w = &dev->async_ctrl;
+
+ if (list_empty(&ctrl->info.mappings)) {
+ ctrl->handle = NULL;
+ return false;
+ }
+
+ w->data = data;
+ w->urb = urb;
+ w->chain = chain;
+ w->ctrl = ctrl;
+
+ schedule_work(&w->work);
+
+ return true;
+}
+
+static bool uvc_ctrl_xctrls_has_control(const struct v4l2_ext_control *xctrls,
+ unsigned int xctrls_count, u32 id)
+{
+ unsigned int i;
+
+ for (i = 0; i < xctrls_count; ++i) {
+ if (xctrls[i].id == id)
+ return true;
+ }
+
+ return false;
}
static void uvc_ctrl_send_events(struct uvc_fh *handle,
@@ -1277,29 +1368,39 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
for (i = 0; i < xctrls_count; ++i) {
ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
+ if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+ /* Notification will be sent from an Interrupt event. */
+ continue;
+
for (j = 0; j < ARRAY_SIZE(mapping->slave_ids); ++j) {
- if (!mapping->slave_ids[j])
+ u32 slave_id = mapping->slave_ids[j];
+
+ if (!slave_id)
break;
- uvc_ctrl_send_slave_event(handle, ctrl,
- mapping->slave_ids[j],
- xctrls, xctrls_count);
+
+ /*
+ * We can skip sending an event for the slave if the
+ * slave is being modified in the same transaction.
+ */
+ if (uvc_ctrl_xctrls_has_control(xctrls, xctrls_count,
+ slave_id))
+ continue;
+
+ uvc_ctrl_send_slave_event(handle->chain, handle, ctrl,
+ slave_id);
}
/*
* If the master is being modified in the same transaction
* flags may change too.
*/
- if (mapping->master_id) {
- for (j = 0; j < xctrls_count; j++) {
- if (xctrls[j].id == mapping->master_id) {
- changes |= V4L2_EVENT_CTRL_CH_FLAGS;
- break;
- }
- }
- }
+ if (mapping->master_id &&
+ uvc_ctrl_xctrls_has_control(xctrls, xctrls_count,
+ mapping->master_id))
+ changes |= V4L2_EVENT_CTRL_CH_FLAGS;
- uvc_ctrl_send_event(handle, ctrl, mapping, xctrls[i].value,
- changes);
+ uvc_ctrl_send_event(handle->chain, handle, ctrl, mapping,
+ xctrls[i].value, changes);
}
}
@@ -1472,9 +1573,10 @@ int uvc_ctrl_get(struct uvc_video_chain *chain,
return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value);
}
-int uvc_ctrl_set(struct uvc_video_chain *chain,
+int uvc_ctrl_set(struct uvc_fh *handle,
struct v4l2_ext_control *xctrl)
{
+ struct uvc_video_chain *chain = handle->chain;
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
s32 value;
@@ -1581,6 +1683,9 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
mapping->set(mapping, value,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+ if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+ ctrl->handle = handle;
+
ctrl->dirty = 1;
ctrl->modified = 1;
return 0;
@@ -1612,7 +1717,9 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev,
| (data[0] & UVC_CONTROL_CAP_SET ?
UVC_CTRL_FLAG_SET_CUR : 0)
| (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ?
- UVC_CTRL_FLAG_AUTO_UPDATE : 0);
+ UVC_CTRL_FLAG_AUTO_UPDATE : 0)
+ | (data[0] & UVC_CONTROL_CAP_ASYNCHRONOUS ?
+ UVC_CTRL_FLAG_ASYNCHRONOUS : 0);
kfree(data);
return ret;
@@ -2173,6 +2280,8 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
struct uvc_entity *entity;
unsigned int i;
+ INIT_WORK(&dev->async_ctrl.work, uvc_ctrl_status_event_work);
+
/* Walk the entities list and instantiate controls */
list_for_each_entry(entity, &dev->entities, list) {
struct uvc_control *ctrl;
@@ -2241,6 +2350,8 @@ void uvc_ctrl_cleanup_device(struct uvc_device *dev)
struct uvc_entity *entity;
unsigned int i;
+ cancel_work_sync(&dev->async_ctrl.work);
+
/* Free controls and control mappings for all entities. */
list_for_each_entry(entity, &dev->entities, list) {
for (i = 0; i < entity->ncontrols; ++i) {
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 8e138201330f..d46dc432456c 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -100,6 +100,11 @@ static struct uvc_format_desc uvc_fmts[] = {
.fcc = V4L2_PIX_FMT_GREY,
},
{
+ .name = "IR 8-bit (L8_IR)",
+ .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
.name = "Greyscale 10-bit (Y10 )",
.guid = UVC_GUID_FORMAT_Y10,
.fcc = V4L2_PIX_FMT_Y10,
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index 7b710410584a..0722dc684378 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -78,7 +78,24 @@ static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
/* --------------------------------------------------------------------------
* Status interrupt endpoint
*/
-static void uvc_event_streaming(struct uvc_device *dev, u8 *data, int len)
+struct uvc_streaming_status {
+ u8 bStatusType;
+ u8 bOriginator;
+ u8 bEvent;
+ u8 bValue[];
+} __packed;
+
+struct uvc_control_status {
+ u8 bStatusType;
+ u8 bOriginator;
+ u8 bEvent;
+ u8 bSelector;
+ u8 bAttribute;
+ u8 bValue[];
+} __packed;
+
+static void uvc_event_streaming(struct uvc_device *dev,
+ struct uvc_streaming_status *status, int len)
{
if (len < 3) {
uvc_trace(UVC_TRACE_STATUS, "Invalid streaming status event "
@@ -86,31 +103,97 @@ static void uvc_event_streaming(struct uvc_device *dev, u8 *data, int len)
return;
}
- if (data[2] == 0) {
+ if (status->bEvent == 0) {
if (len < 4)
return;
uvc_trace(UVC_TRACE_STATUS, "Button (intf %u) %s len %d\n",
- data[1], data[3] ? "pressed" : "released", len);
- uvc_input_report_key(dev, KEY_CAMERA, data[3]);
+ status->bOriginator,
+ status->bValue[0] ? "pressed" : "released", len);
+ uvc_input_report_key(dev, KEY_CAMERA, status->bValue[0]);
} else {
uvc_trace(UVC_TRACE_STATUS,
"Stream %u error event %02x len %d.\n",
- data[1], data[2], len);
+ status->bOriginator, status->bEvent, len);
}
}
-static void uvc_event_control(struct uvc_device *dev, u8 *data, int len)
+#define UVC_CTRL_VALUE_CHANGE 0
+#define UVC_CTRL_INFO_CHANGE 1
+#define UVC_CTRL_FAILURE_CHANGE 2
+#define UVC_CTRL_MIN_CHANGE 3
+#define UVC_CTRL_MAX_CHANGE 4
+
+static struct uvc_control *uvc_event_entity_find_ctrl(struct uvc_entity *entity,
+ u8 selector)
{
- char *attrs[3] = { "value", "info", "failure" };
+ struct uvc_control *ctrl;
+ unsigned int i;
+
+ for (i = 0, ctrl = entity->controls; i < entity->ncontrols; i++, ctrl++)
+ if (ctrl->info.selector == selector)
+ return ctrl;
+
+ return NULL;
+}
- if (len < 6 || data[2] != 0 || data[4] > 2) {
+static struct uvc_control *uvc_event_find_ctrl(struct uvc_device *dev,
+ const struct uvc_control_status *status,
+ struct uvc_video_chain **chain)
+{
+ list_for_each_entry((*chain), &dev->chains, list) {
+ struct uvc_entity *entity;
+ struct uvc_control *ctrl;
+
+ list_for_each_entry(entity, &(*chain)->entities, chain) {
+ if (entity->id != status->bOriginator)
+ continue;
+
+ ctrl = uvc_event_entity_find_ctrl(entity,
+ status->bSelector);
+ if (ctrl)
+ return ctrl;
+ }
+ }
+
+ return NULL;
+}
+
+static bool uvc_event_control(struct urb *urb,
+ const struct uvc_control_status *status, int len)
+{
+ static const char *attrs[] = { "value", "info", "failure", "min", "max" };
+ struct uvc_device *dev = urb->context;
+ struct uvc_video_chain *chain;
+ struct uvc_control *ctrl;
+
+ if (len < 6 || status->bEvent != 0 ||
+ status->bAttribute >= ARRAY_SIZE(attrs)) {
uvc_trace(UVC_TRACE_STATUS, "Invalid control status event "
"received.\n");
- return;
+ return false;
}
uvc_trace(UVC_TRACE_STATUS, "Control %u/%u %s change len %d.\n",
- data[1], data[3], attrs[data[4]], len);
+ status->bOriginator, status->bSelector,
+ attrs[status->bAttribute], len);
+
+ /* Find the control. */
+ ctrl = uvc_event_find_ctrl(dev, status, &chain);
+ if (!ctrl)
+ return false;
+
+ switch (status->bAttribute) {
+ case UVC_CTRL_VALUE_CHANGE:
+ return uvc_ctrl_status_event(urb, chain, ctrl, status->bValue);
+
+ case UVC_CTRL_INFO_CHANGE:
+ case UVC_CTRL_FAILURE_CHANGE:
+ case UVC_CTRL_MIN_CHANGE:
+ case UVC_CTRL_MAX_CHANGE:
+ break;
+ }
+
+ return false;
}
static void uvc_status_complete(struct urb *urb)
@@ -138,13 +221,23 @@ static void uvc_status_complete(struct urb *urb)
len = urb->actual_length;
if (len > 0) {
switch (dev->status[0] & 0x0f) {
- case UVC_STATUS_TYPE_CONTROL:
- uvc_event_control(dev, dev->status, len);
+ case UVC_STATUS_TYPE_CONTROL: {
+ struct uvc_control_status *status =
+ (struct uvc_control_status *)dev->status;
+
+ if (uvc_event_control(urb, status, len))
+ /* The URB will be resubmitted in work context. */
+ return;
break;
+ }
- case UVC_STATUS_TYPE_STREAMING:
- uvc_event_streaming(dev, dev->status, len);
+ case UVC_STATUS_TYPE_STREAMING: {
+ struct uvc_streaming_status *status =
+ (struct uvc_streaming_status *)dev->status;
+
+ uvc_event_streaming(dev, status, len);
break;
+ }
default:
uvc_trace(UVC_TRACE_STATUS, "Unknown status event "
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index bd32914259ae..18a7384b50ee 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -994,7 +994,7 @@ static int uvc_ioctl_s_ctrl(struct file *file, void *fh,
if (ret < 0)
return ret;
- ret = uvc_ctrl_set(chain, &xctrl);
+ ret = uvc_ctrl_set(handle, &xctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
return ret;
@@ -1069,7 +1069,7 @@ static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
return ret;
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
- ret = uvc_ctrl_set(chain, ctrl);
+ ret = uvc_ctrl_set(handle, ctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
ctrls->error_idx = commit ? ctrls->count : i;
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index a88b2e51a666..86a99f461fd8 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -73,17 +73,57 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
u8 intfnum, u8 cs, void *data, u16 size)
{
int ret;
+ u8 error;
+ u8 tmp;
ret = __uvc_query_ctrl(dev, query, unit, intfnum, cs, data, size,
UVC_CTRL_CONTROL_TIMEOUT);
- if (ret != size) {
- uvc_printk(KERN_ERR, "Failed to query (%s) UVC control %u on "
- "unit %u: %d (exp. %u).\n", uvc_query_name(query), cs,
- unit, ret, size);
- return -EIO;
+ if (likely(ret == size))
+ return 0;
+
+ uvc_printk(KERN_ERR,
+ "Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
+ uvc_query_name(query), cs, unit, ret, size);
+
+ if (ret != -EPIPE)
+ return ret;
+
+ tmp = *(u8 *)data;
+
+ ret = __uvc_query_ctrl(dev, UVC_GET_CUR, 0, intfnum,
+ UVC_VC_REQUEST_ERROR_CODE_CONTROL, data, 1,
+ UVC_CTRL_CONTROL_TIMEOUT);
+
+ error = *(u8 *)data;
+ *(u8 *)data = tmp;
+
+ if (ret != 1)
+ return ret < 0 ? ret : -EPIPE;
+
+ uvc_trace(UVC_TRACE_CONTROL, "Control error %u\n", error);
+
+ switch (error) {
+ case 0:
+ /* Cannot happen - we received a STALL */
+ return -EPIPE;
+ case 1: /* Not ready */
+ return -EBUSY;
+ case 2: /* Wrong state */
+ return -EILSEQ;
+ case 3: /* Power */
+ return -EREMOTE;
+ case 4: /* Out of range */
+ return -ERANGE;
+ case 5: /* Invalid unit */
+ case 6: /* Invalid control */
+ case 7: /* Invalid Request */
+ case 8: /* Invalid value within range */
+ return -EINVAL;
+ default: /* reserved or unknown */
+ break;
}
- return 0;
+ return -EPIPE;
}
static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
@@ -1232,6 +1272,8 @@ static void uvc_video_validate_buffer(const struct uvc_streaming *stream,
static void uvc_video_next_buffers(struct uvc_streaming *stream,
struct uvc_buffer **video_buf, struct uvc_buffer **meta_buf)
{
+ uvc_video_validate_buffer(stream, *video_buf);
+
if (*meta_buf) {
struct vb2_v4l2_buffer *vb2_meta = &(*meta_buf)->buf;
const struct vb2_v4l2_buffer *vb2_video = &(*video_buf)->buf;
@@ -1270,10 +1312,8 @@ static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
do {
ret = uvc_video_decode_start(stream, buf, mem,
urb->iso_frame_desc[i].actual_length);
- if (ret == -EAGAIN) {
- uvc_video_validate_buffer(stream, buf);
+ if (ret == -EAGAIN)
uvc_video_next_buffers(stream, &buf, &meta_buf);
- }
} while (ret == -EAGAIN);
if (ret < 0)
@@ -1289,10 +1329,8 @@ static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
uvc_video_decode_end(stream, buf, mem,
urb->iso_frame_desc[i].actual_length);
- if (buf->state == UVC_BUF_STATE_READY) {
- uvc_video_validate_buffer(stream, buf);
+ if (buf->state == UVC_BUF_STATE_READY)
uvc_video_next_buffers(stream, &buf, &meta_buf);
- }
}
}
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index be5cf179228b..e5f5d84f1d1d 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -12,6 +12,7 @@
#include <linux/usb/video.h>
#include <linux/uvcvideo.h>
#include <linux/videodev2.h>
+#include <linux/workqueue.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -157,6 +158,9 @@
#define UVC_GUID_FORMAT_D3DFMT_L8 \
{0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_KSMEDIA_L8_IR \
+ {0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
/* ------------------------------------------------------------------------
@@ -256,6 +260,8 @@ struct uvc_control {
initialized:1;
u8 *uvc_data;
+
+ struct uvc_fh *handle; /* File handle that last changed the control. */
};
struct uvc_format_desc {
@@ -600,6 +606,14 @@ struct uvc_device {
u8 *status;
struct input_dev *input;
char input_phys[64];
+
+ struct uvc_ctrl_work {
+ struct work_struct work;
+ struct urb *urb;
+ struct uvc_video_chain *chain;
+ struct uvc_control *ctrl;
+ const void *data;
+ } async_ctrl;
};
enum uvc_handle_state {
@@ -753,6 +767,8 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
int uvc_ctrl_init_device(struct uvc_device *dev);
void uvc_ctrl_cleanup_device(struct uvc_device *dev);
int uvc_ctrl_restore_values(struct uvc_device *dev);
+bool uvc_ctrl_status_event(struct urb *urb, struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, const u8 *data);
int uvc_ctrl_begin(struct uvc_video_chain *chain);
int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
@@ -770,7 +786,7 @@ static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
}
int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
-int uvc_ctrl_set(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
+int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl);
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index d29e45516eb7..599c1cbff3b9 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -431,6 +431,20 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Use Previous Specific Frame",
NULL,
};
+ static const char * const vp8_profile[] = {
+ "0",
+ "1",
+ "2",
+ "3",
+ NULL,
+ };
+ static const char * const vp9_profile[] = {
+ "0",
+ "1",
+ "2",
+ "3",
+ NULL,
+ };
static const char * const flash_led_mode[] = {
"Off",
@@ -614,6 +628,10 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return mpeg4_profile;
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
return vpx_golden_frame_sel;
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ return vp8_profile;
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ return vp9_profile;
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
return jpeg_chroma_subsampling;
case V4L2_CID_DV_TX_MODE:
@@ -839,7 +857,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE: return "VPX Profile";
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile";
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile";
/* HEVC controls */
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value";
@@ -1180,6 +1199,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_DEINTERLACING_MODE:
case V4L2_CID_TUNE_DEEMPHASIS:
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
case V4L2_CID_DETECT_MD_MODE:
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
@@ -3137,9 +3158,22 @@ static int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
/* If OK, then make the new values permanent. */
update_flag = is_cur_manual(master) != is_new_manual(master);
- for (i = 0; i < master->ncontrols; i++)
+
+ for (i = 0; i < master->ncontrols; i++) {
+ /*
+ * If we switch from auto to manual mode, and this cluster
+ * contains volatile controls, then all non-master controls
+ * have to be marked as changed. The 'new' value contains
+ * the volatile value (obtained by update_from_auto_cluster),
+ * which now has to become the current value.
+ */
+ if (i && update_flag && is_new_manual(master) &&
+ master->has_volatiles && master->cluster[i])
+ master->cluster[i]->has_changed = true;
+
new_to_cur(fh, master->cluster[i], ch_flags |
((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
+ }
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 4ffd7d60a901..69e775930fc4 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -202,7 +202,7 @@ static void v4l2_device_release(struct device *cd)
mutex_unlock(&videodev_lock);
#if defined(CONFIG_MEDIA_CONTROLLER)
- if (v4l2_dev->mdev) {
+ if (v4l2_dev->mdev && vdev->vfl_dir != VFL_DIR_M2M) {
/* Remove interfaces and interface links */
media_devnode_remove(vdev->intf_devnode);
if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN)
@@ -733,19 +733,22 @@ static void determine_valid_ioctls(struct video_device *vdev)
BASE_VIDIOC_PRIVATE);
}
-static int video_register_media_controller(struct video_device *vdev, int type)
+static int video_register_media_controller(struct video_device *vdev)
{
#if defined(CONFIG_MEDIA_CONTROLLER)
u32 intf_type;
int ret;
- if (!vdev->v4l2_dev->mdev)
+ /* Memory-to-memory devices are more complex and use
+ * their own function to register its mc entities.
+ */
+ if (!vdev->v4l2_dev->mdev || vdev->vfl_dir == VFL_DIR_M2M)
return 0;
vdev->entity.obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE;
vdev->entity.function = MEDIA_ENT_F_UNKNOWN;
- switch (type) {
+ switch (vdev->vfl_type) {
case VFL_TYPE_GRABBER:
intf_type = MEDIA_INTF_T_V4L_VIDEO;
vdev->entity.function = MEDIA_ENT_F_IO_V4L;
@@ -808,7 +811,8 @@ static int video_register_media_controller(struct video_device *vdev, int type)
link = media_create_intf_link(&vdev->entity,
&vdev->intf_devnode->intf,
- MEDIA_LNK_FL_ENABLED);
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link) {
media_devnode_remove(vdev->intf_devnode);
media_device_unregister_entity(&vdev->entity);
@@ -993,7 +997,7 @@ int __video_register_device(struct video_device *vdev,
v4l2_device_get(vdev->v4l2_dev);
/* Part 5: Register the entity. */
- ret = video_register_media_controller(vdev, type);
+ ret = video_register_media_controller(vdev);
/* Part 6: Activate this minor. The char device can now be used. */
set_bit(V4L2_FL_REGISTERED, &vdev->flags);
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 937c6de85606..3940e55c72f1 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -267,7 +267,8 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
link = media_create_intf_link(&sd->entity,
&vdev->intf_devnode->intf,
- MEDIA_LNK_FL_ENABLED);
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link) {
err = -ENOMEM;
goto clean_up;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 3f77aa318035..169bdbb1f61a 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -154,6 +154,10 @@ static void v4l2_fwnode_endpoint_parse_parallel_bus(
flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH :
V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW;
+ if (!fwnode_property_read_u32(fwnode, "data-enable-active", &v))
+ flags |= v ? V4L2_MBUS_DATA_ENABLE_HIGH :
+ V4L2_MBUS_DATA_ENABLE_LOW;
+
bus->flags = flags;
}
@@ -739,7 +743,7 @@ static struct fwnode_handle *v4l2_fwnode_reference_get_int_prop(
const char * const *props, unsigned int nprops)
{
struct fwnode_reference_args fwnode_args;
- unsigned int *args = fwnode_args.args;
+ u64 *args = fwnode_args.args;
struct fwnode_handle *child;
int ret;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index dd210067151f..54afc9c7ee6e 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -29,6 +29,7 @@
#include <media/v4l2-device.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mc.h>
+#include <media/v4l2-mem2mem.h>
#include <trace/events/v4l2.h>
@@ -125,6 +126,42 @@ int v4l2_video_std_construct(struct v4l2_standard *vs,
}
EXPORT_SYMBOL(v4l2_video_std_construct);
+/* Fill in the fields of a v4l2_standard structure according to the
+ * 'id' and 'vs->index' parameters. Returns negative on error. */
+int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id)
+{
+ v4l2_std_id curr_id = 0;
+ unsigned int index = vs->index, i, j = 0;
+ const char *descr = "";
+
+ /* Return -ENODATA if the id for the current input
+ or output is 0, meaning that it doesn't support this API. */
+ if (id == 0)
+ return -ENODATA;
+
+ /* Return norm array in a canonical way */
+ for (i = 0; i <= index && id; i++) {
+ /* last std value in the standards array is 0, so this
+ while always ends there since (id & 0) == 0. */
+ while ((id & standards[j].std) != standards[j].std)
+ j++;
+ curr_id = standards[j].std;
+ descr = standards[j].descr;
+ j++;
+ if (curr_id == 0)
+ break;
+ if (curr_id != V4L2_STD_PAL &&
+ curr_id != V4L2_STD_SECAM &&
+ curr_id != V4L2_STD_NTSC)
+ id &= ~curr_id;
+ }
+ if (i <= index)
+ return -EINVAL;
+
+ v4l2_video_std_construct(vs, curr_id, descr);
+ return 0;
+}
+
/* ----------------------------------------------------------------- */
/* some arrays for pretty-printing debug messages of enum types */
@@ -1147,6 +1184,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break;
case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break;
case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break;
+ case V4L2_PIX_FMT_Y10P: descr = "10-bit Greyscale (MIPI Packed)"; break;
case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break;
case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break;
case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break;
@@ -1222,6 +1260,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break;
case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break;
case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break;
+ case V4L2_PIX_FMT_SBGGR14P: descr = "14-bit Bayer BGBG/GRGR Packed"; break;
+ case V4L2_PIX_FMT_SGBRG14P: descr = "14-bit Bayer GBGB/RGRG Packed"; break;
+ case V4L2_PIX_FMT_SGRBG14P: descr = "14-bit Bayer GRGR/BGBG Packed"; break;
+ case V4L2_PIX_FMT_SRGGB14P: descr = "14-bit Bayer RGRG/GBGB Packed"; break;
case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break;
case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break;
case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break;
@@ -1274,6 +1316,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_VP8: descr = "VP8"; break;
case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */
+ case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */
case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break;
case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break;
case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break;
@@ -1753,36 +1796,8 @@ static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
{
struct video_device *vfd = video_devdata(file);
struct v4l2_standard *p = arg;
- v4l2_std_id id = vfd->tvnorms, curr_id = 0;
- unsigned int index = p->index, i, j = 0;
- const char *descr = "";
-
- /* Return -ENODATA if the tvnorms for the current input
- or output is 0, meaning that it doesn't support this API. */
- if (id == 0)
- return -ENODATA;
- /* Return norm array in a canonical way */
- for (i = 0; i <= index && id; i++) {
- /* last std value in the standards array is 0, so this
- while always ends there since (id & 0) == 0. */
- while ((id & standards[j].std) != standards[j].std)
- j++;
- curr_id = standards[j].std;
- descr = standards[j].descr;
- j++;
- if (curr_id == 0)
- break;
- if (curr_id != V4L2_STD_PAL &&
- curr_id != V4L2_STD_SECAM &&
- curr_id != V4L2_STD_NTSC)
- id &= ~curr_id;
- }
- if (i <= index)
- return -EINVAL;
-
- v4l2_video_std_construct(p, curr_id, descr);
- return 0;
+ return v4l_video_std_enumstd(p, vfd->tvnorms);
}
static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
@@ -2662,11 +2677,62 @@ static bool v4l2_is_known_ioctl(unsigned int cmd)
return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd;
}
+#if IS_ENABLED(CONFIG_V4L2_MEM2MEM_DEV)
+static bool v4l2_ioctl_m2m_queue_is_output(unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case VIDIOC_CREATE_BUFS: {
+ struct v4l2_create_buffers *cbufs = arg;
+
+ return V4L2_TYPE_IS_OUTPUT(cbufs->format.type);
+ }
+ case VIDIOC_REQBUFS: {
+ struct v4l2_requestbuffers *rbufs = arg;
+
+ return V4L2_TYPE_IS_OUTPUT(rbufs->type);
+ }
+ case VIDIOC_QBUF:
+ case VIDIOC_DQBUF:
+ case VIDIOC_QUERYBUF:
+ case VIDIOC_PREPARE_BUF: {
+ struct v4l2_buffer *buf = arg;
+
+ return V4L2_TYPE_IS_OUTPUT(buf->type);
+ }
+ case VIDIOC_EXPBUF: {
+ struct v4l2_exportbuffer *expbuf = arg;
+
+ return V4L2_TYPE_IS_OUTPUT(expbuf->type);
+ }
+ case VIDIOC_STREAMON:
+ case VIDIOC_STREAMOFF: {
+ int *type = arg;
+
+ return V4L2_TYPE_IS_OUTPUT(*type);
+ }
+ default:
+ return false;
+ }
+}
+#endif
+
static struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev,
- unsigned int cmd)
+ struct v4l2_fh *vfh, unsigned int cmd,
+ void *arg)
{
if (_IOC_NR(cmd) >= V4L2_IOCTLS)
return vdev->lock;
+#if IS_ENABLED(CONFIG_V4L2_MEM2MEM_DEV)
+ if (vfh && vfh->m2m_ctx &&
+ (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) {
+ bool is_output = v4l2_ioctl_m2m_queue_is_output(cmd, arg);
+ struct v4l2_m2m_queue_ctx *ctx = is_output ?
+ &vfh->m2m_ctx->out_q_ctx : &vfh->m2m_ctx->cap_q_ctx;
+
+ if (ctx->q.lock)
+ return ctx->q.lock;
+ }
+#endif
if (vdev->queue && vdev->queue->lock &&
(v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE))
return vdev->queue->lock;
@@ -2733,7 +2799,7 @@ static long __video_do_ioctl(struct file *file,
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
vfh = file->private_data;
- lock = v4l2_ioctl_get_lock(vfd, cmd);
+ lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg);
if (lock && mutex_lock_interruptible(lock))
return -ERESTARTSYS;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index c4f963d96a79..ce9bd1b91210 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -17,9 +17,11 @@
#include <linux/sched.h>
#include <linux/slab.h>
+#include <media/media-device.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
@@ -50,9 +52,38 @@ module_param(debug, bool, 0644);
* offsets but for different queues */
#define DST_QUEUE_OFF_BASE (1 << 30)
+enum v4l2_m2m_entity_type {
+ MEM2MEM_ENT_TYPE_SOURCE,
+ MEM2MEM_ENT_TYPE_SINK,
+ MEM2MEM_ENT_TYPE_PROC
+};
+
+static const char * const m2m_entity_name[] = {
+ "source",
+ "sink",
+ "proc"
+};
/**
* struct v4l2_m2m_dev - per-device context
+ * @source: &struct media_entity pointer with the source entity
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_unregister_media_controller().
+ * @source_pad: &struct media_pad with the source pad.
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_unregister_media_controller().
+ * @sink: &struct media_entity pointer with the sink entity
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_unregister_media_controller().
+ * @sink_pad: &struct media_pad with the sink pad.
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_unregister_media_controller().
+ * @proc: &struct media_entity pointer with the M2M device itself.
+ * @proc_pads: &struct media_pad with the @proc pads.
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_unregister_media_controller().
+ * @intf_devnode: &struct media_intf devnode pointer with the interface
+ * with controls the M2M device.
* @curr_ctx: currently running instance
* @job_queue: instances queued to run
* @job_spinlock: protects job_queue
@@ -60,6 +91,15 @@ module_param(debug, bool, 0644);
*/
struct v4l2_m2m_dev {
struct v4l2_m2m_ctx *curr_ctx;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_entity *source;
+ struct media_pad source_pad;
+ struct media_entity sink;
+ struct media_pad sink_pad;
+ struct media_entity proc;
+ struct media_pad proc_pads[2];
+ struct media_intf_devnode *intf_devnode;
+#endif
struct list_head job_queue;
spinlock_t job_spinlock;
@@ -107,6 +147,24 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
+void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
+{
+ struct v4l2_m2m_buffer *b;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+
+ if (list_empty(&q_ctx->rdy_queue)) {
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return NULL;
+ }
+
+ b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return &b->vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
+
void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b;
@@ -209,15 +267,24 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
}
-void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
+/*
+ * __v4l2_m2m_try_queue() - queue a job
+ * @m2m_dev: m2m device
+ * @m2m_ctx: m2m context
+ *
+ * Check if this context is ready to queue a job.
+ *
+ * This function can run in interrupt context.
+ */
+static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
{
- struct v4l2_m2m_dev *m2m_dev;
unsigned long flags_job, flags_out, flags_cap;
- m2m_dev = m2m_ctx->m2m_dev;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
if (!m2m_ctx->out_q_ctx.q.streaming
@@ -275,7 +342,25 @@ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
m2m_ctx->job_flags |= TRANS_QUEUED;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+}
+/**
+ * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
+ * @m2m_ctx: m2m context
+ *
+ * Check if this context is ready to queue a job. If suitable,
+ * run the next queued job on the mem2mem device.
+ *
+ * This function shouldn't run in interrupt context.
+ *
+ * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
+ * and then run another job for another context.
+ */
+void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
+
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
v4l2_m2m_try_run(m2m_dev);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
@@ -300,7 +385,8 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
m2m_ctx->job_flags |= TRANS_ABORT;
if (m2m_ctx->job_flags & TRANS_RUNNING) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
- m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
+ if (m2m_dev->m2m_ops->job_abort)
+ m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
wait_event(m2m_ctx->finished,
!(m2m_ctx->job_flags & TRANS_RUNNING));
@@ -339,7 +425,6 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
* allow more than one job on the job_queue per instance, each has
* to be scheduled separately after the previous one finishes. */
v4l2_m2m_try_schedule(m2m_ctx);
- v4l2_m2m_try_run(m2m_dev);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
@@ -595,12 +680,179 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL(v4l2_m2m_mmap);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
+{
+ media_remove_intf_links(&m2m_dev->intf_devnode->intf);
+ media_devnode_remove(m2m_dev->intf_devnode);
+
+ media_entity_remove_links(m2m_dev->source);
+ media_entity_remove_links(&m2m_dev->sink);
+ media_entity_remove_links(&m2m_dev->proc);
+ media_device_unregister_entity(m2m_dev->source);
+ media_device_unregister_entity(&m2m_dev->sink);
+ media_device_unregister_entity(&m2m_dev->proc);
+ kfree(m2m_dev->source->name);
+ kfree(m2m_dev->sink.name);
+ kfree(m2m_dev->proc.name);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
+
+static int v4l2_m2m_register_entity(struct media_device *mdev,
+ struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
+ struct video_device *vdev, int function)
+{
+ struct media_entity *entity;
+ struct media_pad *pads;
+ char *name;
+ unsigned int len;
+ int num_pads;
+ int ret;
+
+ switch (type) {
+ case MEM2MEM_ENT_TYPE_SOURCE:
+ entity = m2m_dev->source;
+ pads = &m2m_dev->source_pad;
+ pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ num_pads = 1;
+ break;
+ case MEM2MEM_ENT_TYPE_SINK:
+ entity = &m2m_dev->sink;
+ pads = &m2m_dev->sink_pad;
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ num_pads = 1;
+ break;
+ case MEM2MEM_ENT_TYPE_PROC:
+ entity = &m2m_dev->proc;
+ pads = m2m_dev->proc_pads;
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ num_pads = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
+ if (type != MEM2MEM_ENT_TYPE_PROC) {
+ entity->info.dev.major = VIDEO_MAJOR;
+ entity->info.dev.minor = vdev->minor;
+ }
+ len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
+ name = kmalloc(len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
+ entity->name = name;
+ entity->function = function;
+
+ ret = media_entity_pads_init(entity, num_pads, pads);
+ if (ret)
+ return ret;
+ ret = media_device_register_entity(mdev, entity);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ struct video_device *vdev, int function)
+{
+ struct media_device *mdev = vdev->v4l2_dev->mdev;
+ struct media_link *link;
+ int ret;
+
+ if (!mdev)
+ return 0;
+
+ /* A memory-to-memory device consists in two
+ * DMA engine and one video processing entities.
+ * The DMA engine entities are linked to a V4L interface
+ */
+
+ /* Create the three entities with their pads */
+ m2m_dev->source = &vdev->entity;
+ ret = v4l2_m2m_register_entity(mdev, m2m_dev,
+ MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
+ if (ret)
+ return ret;
+ ret = v4l2_m2m_register_entity(mdev, m2m_dev,
+ MEM2MEM_ENT_TYPE_PROC, vdev, function);
+ if (ret)
+ goto err_rel_entity0;
+ ret = v4l2_m2m_register_entity(mdev, m2m_dev,
+ MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
+ if (ret)
+ goto err_rel_entity1;
+
+ /* Connect the three entities */
+ ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rel_entity2;
+
+ ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rm_links0;
+
+ /* Create video interface */
+ m2m_dev->intf_devnode = media_devnode_create(mdev,
+ MEDIA_INTF_T_V4L_VIDEO, 0,
+ VIDEO_MAJOR, vdev->minor);
+ if (!m2m_dev->intf_devnode) {
+ ret = -ENOMEM;
+ goto err_rm_links1;
+ }
+
+ /* Connect the two DMA engines to the interface */
+ link = media_create_intf_link(m2m_dev->source,
+ &m2m_dev->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+
+ link = media_create_intf_link(&m2m_dev->sink,
+ &m2m_dev->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_intf_link;
+ }
+ return 0;
+
+err_rm_intf_link:
+ media_remove_intf_links(&m2m_dev->intf_devnode->intf);
+err_rm_devnode:
+ media_devnode_remove(m2m_dev->intf_devnode);
+err_rm_links1:
+ media_entity_remove_links(&m2m_dev->sink);
+err_rm_links0:
+ media_entity_remove_links(&m2m_dev->proc);
+ media_entity_remove_links(m2m_dev->source);
+err_rel_entity2:
+ media_device_unregister_entity(&m2m_dev->proc);
+ kfree(m2m_dev->proc.name);
+err_rel_entity1:
+ media_device_unregister_entity(&m2m_dev->sink);
+ kfree(m2m_dev->sink.name);
+err_rel_entity0:
+ media_device_unregister_entity(m2m_dev->source);
+ kfree(m2m_dev->source->name);
+ return ret;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
+#endif
+
struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
- if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
- WARN_ON(!m2m_ops->job_abort))
+ if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
return ERR_PTR(-EINVAL);
m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 6a7f7f75dfd7..2b63fa6b6fc9 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -494,6 +494,28 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_DV_TIMINGS:
return v4l2_subdev_call(sd, video, s_dv_timings, arg);
+
+ case VIDIOC_SUBDEV_G_STD:
+ return v4l2_subdev_call(sd, video, g_std, arg);
+
+ case VIDIOC_SUBDEV_S_STD: {
+ v4l2_std_id *std = arg;
+
+ return v4l2_subdev_call(sd, video, s_std, *std);
+ }
+
+ case VIDIOC_SUBDEV_ENUMSTD: {
+ struct v4l2_standard *p = arg;
+ v4l2_std_id id;
+
+ if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
+ return -EINVAL;
+
+ return v4l_video_std_enumstd(p, id);
+ }
+
+ case VIDIOC_SUBDEV_QUERYSTD:
+ return v4l2_subdev_call(sd, video, querystd, arg);
#endif
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 8d731d6c3e54..63389f075f1d 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -116,12 +116,14 @@ config FSL_CORENET_CF
config FSL_IFC
bool
- depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A
+ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
+ depends on HAS_IOMEM
config JZ4780_NEMC
bool "Ingenic JZ4780 SoC NEMC driver"
default y
- depends on MACH_JZ4780
+ depends on MACH_JZ4780 || COMPILE_TEST
+ depends on HAS_IOMEM && OF
help
This driver is for the NAND/External Memory Controller (NEMC) in
the Ingenic JZ4780. This controller is used to handle external
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index bb93cc53554e..bd25faf6d13d 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -672,13 +672,6 @@ static int tegra_mc_probe(struct platform_device *pdev)
return err;
}
- err = tegra_mc_reset_setup(mc);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to register reset controller: %d\n",
- err);
- return err;
- }
-
mc->irq = platform_get_irq(pdev, 0);
if (mc->irq < 0) {
dev_err(&pdev->dev, "interrupt not specified\n");
@@ -697,13 +690,16 @@ static int tegra_mc_probe(struct platform_device *pdev)
return err;
}
+ err = tegra_mc_reset_setup(mc);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to register reset controller: %d\n",
+ err);
+
if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU)) {
mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
- if (IS_ERR(mc->smmu)) {
+ if (IS_ERR(mc->smmu))
dev_err(&pdev->dev, "failed to probe SMMU: %ld\n",
PTR_ERR(mc->smmu));
- return PTR_ERR(mc->smmu);
- }
}
return 0;
diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
index 7254fb596979..ffda903c49bb 100644
--- a/drivers/memory/tegra/tegra186.c
+++ b/drivers/memory/tegra/tegra186.c
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <dt-bindings/memory/tegra186-mc.h>
diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c
index 632651f4b6e8..2250d03ea17f 100644
--- a/drivers/memory/ti-emif-pm.c
+++ b/drivers/memory/ti-emif-pm.c
@@ -249,6 +249,34 @@ static const struct of_device_id ti_emif_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ti_emif_of_match);
+#ifdef CONFIG_PM_SLEEP
+static int ti_emif_resume(struct device *dev)
+{
+ unsigned long tmp =
+ __raw_readl((void *)emif_instance->ti_emif_sram_virt);
+
+ /*
+ * Check to see if what we are copying is already present in the
+ * first byte at the destination, only copy if it is not which
+ * indicates we have lost context and sram no longer contains
+ * the PM code
+ */
+ if (tmp != ti_emif_sram)
+ ti_emif_push_sram(dev, emif_instance);
+
+ return 0;
+}
+
+static int ti_emif_suspend(struct device *dev)
+{
+ /*
+ * The contents will be present in DDR hence no need to
+ * explicitly save
+ */
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
static int ti_emif_probe(struct platform_device *pdev)
{
int ret;
@@ -308,12 +336,17 @@ static int ti_emif_remove(struct platform_device *pdev)
return 0;
}
+static const struct dev_pm_ops ti_emif_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ti_emif_suspend, ti_emif_resume)
+};
+
static struct platform_driver ti_emif_driver = {
.probe = ti_emif_probe,
.remove = ti_emif_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(ti_emif_of_match),
+ .pm = &ti_emif_pm_ops,
},
};
module_platform_driver(ti_emif_driver);
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a625ac4e2872..e6b4ae558767 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -642,6 +642,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
freereq = 0;
if (event != MPI_EVENT_EVENT_CHANGE)
break;
+ /* else: fall through */
case MPI_FUNCTION_CONFIG:
case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
@@ -1779,7 +1780,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
struct proc_dir_entry *dent;
#endif
- ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
+ ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_KERNEL);
if (ioc == NULL) {
printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
return -ENOMEM;
@@ -1886,6 +1887,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
ioc->errata_flag_1064 = 1;
+ /* fall through */
case MPI_MANUFACTPAGE_DEVICEID_FC909:
case MPI_MANUFACTPAGE_DEVICEID_FC929:
case MPI_MANUFACTPAGE_DEVICEID_FC919:
@@ -1930,6 +1932,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
+ /* fall through */
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
ioc->bus_type = SPI;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 4470630dd545..8d22d6134a89 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2514,8 +2514,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
if (mpt_config(ioc, &cfg) == 0) {
ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf;
if (strlen(pdata->BoardTracerNumber) > 1) {
- strncpy(karg.serial_number, pdata->BoardTracerNumber, 24);
- karg.serial_number[24-1]='\0';
+ strlcpy(karg.serial_number,
+ pdata->BoardTracerNumber, 24);
}
}
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 06b175420be9..b15fdc626fb8 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1292,7 +1292,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* SCSI needs scsi_cmnd lookup table!
* (with size equal to req_depth*PtrSz!)
*/
- ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC);
+ ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_KERNEL);
if (!ioc->ScsiLookup) {
error = -ENOMEM;
goto out_mptfc_probe;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 76a66da33996..b8cf2658649e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -4327,6 +4327,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
}
}
mpt_findImVolumes(ioc);
+ /* fall through */
case MPTSAS_ADD_DEVICE:
memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
index 84e313107233..7b9052ea7413 100644
--- a/drivers/mfd/88pm860x-i2c.c
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -146,14 +146,14 @@ int pm860x_page_reg_write(struct i2c_client *i2c, int reg,
unsigned char zero;
int ret;
- i2c_lock_adapter(i2c->adapter);
+ i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
read_device(i2c, 0xFA, 0, &zero);
read_device(i2c, 0xFB, 0, &zero);
read_device(i2c, 0xFF, 0, &zero);
ret = write_device(i2c, reg, 1, &data);
read_device(i2c, 0xFE, 0, &zero);
read_device(i2c, 0xFC, 0, &zero);
- i2c_unlock_adapter(i2c->adapter);
+ i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
return ret;
}
EXPORT_SYMBOL(pm860x_page_reg_write);
@@ -164,14 +164,14 @@ int pm860x_page_bulk_read(struct i2c_client *i2c, int reg,
unsigned char zero = 0;
int ret;
- i2c_lock_adapter(i2c->adapter);
+ i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
read_device(i2c, 0xfa, 0, &zero);
read_device(i2c, 0xfb, 0, &zero);
read_device(i2c, 0xff, 0, &zero);
ret = read_device(i2c, reg, count, buf);
read_device(i2c, 0xFE, 0, &zero);
read_device(i2c, 0xFC, 0, &zero);
- i2c_unlock_adapter(i2c->adapter);
+ i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
return ret;
}
EXPORT_SYMBOL(pm860x_page_bulk_read);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index b860eb5aa194..11841f4b7b2b 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -202,26 +202,6 @@ config MFD_CROS_EC
You also need to enable the driver for the bus you are using. The
protocol for talking to the EC is defined by the bus driver.
-config MFD_CROS_EC_I2C
- tristate "ChromeOS Embedded Controller (I2C)"
- depends on MFD_CROS_EC && I2C
-
- help
- If you say Y here, you get support for talking to the ChromeOS
- EC through an I2C bus. This uses a simple byte-level protocol with
- a checksum. Failing accesses will be retried three times to
- improve reliability.
-
-config MFD_CROS_EC_SPI
- tristate "ChromeOS Embedded Controller (SPI)"
- depends on MFD_CROS_EC && SPI
-
- ---help---
- If you say Y here, you get support for talking to the ChromeOS EC
- through a SPI bus, using a byte-level protocol. Since the EC's
- response time cannot be guaranteed, we support ignoring
- 'pre-amble' bytes before the response actually starts.
-
config MFD_CROS_EC_CHARDEV
tristate "Chrome OS Embedded Controller userspace device interface"
depends on MFD_CROS_EC
@@ -232,6 +212,56 @@ config MFD_CROS_EC_CHARDEV
If you have a supported Chromebook, choose Y or M here.
The module will be called cros_ec_dev.
+config MFD_MADERA
+ tristate "Cirrus Logic Madera codecs"
+ select MFD_CORE
+ select REGMAP
+ select REGMAP_IRQ
+ select MADERA_IRQ
+ select PINCTRL
+ select PINCTRL_MADERA
+ help
+ Support for the Cirrus Logic Madera platform audio codecs
+
+config MFD_MADERA_I2C
+ tristate "Cirrus Logic Madera codecs with I2C"
+ depends on MFD_MADERA
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Support for the Cirrus Logic Madera platform audio SoC
+ core functionality controlled via I2C.
+
+config MFD_MADERA_SPI
+ tristate "Cirrus Logic Madera codecs with SPI"
+ depends on MFD_MADERA
+ depends on SPI_MASTER
+ select REGMAP_SPI
+ help
+ Support for the Cirrus Logic Madera platform audio SoC
+ core functionality controlled via SPI.
+
+config MFD_CS47L35
+ bool "Cirrus Logic CS47L35"
+ select PINCTRL_CS47L35
+ depends on MFD_MADERA
+ help
+ Support for Cirrus Logic CS47L35 Smart Codec
+
+config MFD_CS47L85
+ bool "Cirrus Logic CS47L85"
+ select PINCTRL_CS47L85
+ depends on MFD_MADERA
+ help
+ Support for Cirrus Logic CS47L85 Smart Codec
+
+config MFD_CS47L90
+ bool "Cirrus Logic CS47L90/91"
+ select PINCTRL_CS47L90
+ depends on MFD_MADERA
+ help
+ Support for Cirrus Logic CS47L90 and CS47L91 Smart Codecs
+
config MFD_ASIC3
bool "Compaq ASIC3"
depends on GPIOLIB && ARM
@@ -1787,6 +1817,19 @@ config MFD_STW481X
in various ST Microelectronics and ST-Ericsson embedded
Nomadik series.
+config MFD_ROHM_BD718XX
+ tristate "ROHM BD71837 Power Management IC"
+ depends on I2C=y
+ depends on OF
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select MFD_CORE
+ help
+ Select this option to get support for the ROHM BD71837
+ Power Management ICs. BD71837 is designed to power processors like
+ NXP i.MX8. It contains 8 BUCK outputs and 7 LDOs, voltage monitoring
+ and emergency shut down as well as 32,768KHz clock output.
+
config MFD_STM32_LPTIMER
tristate "Support for STM32 Low-Power Timer"
depends on (ARCH_STM32 && OF) || COMPILE_TEST
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index e9fd20dba18d..5856a9489cbd 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -14,8 +14,6 @@ obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
obj-$(CONFIG_MFD_BD9571MWV) += bd9571mwv.o
cros_ec_core-objs := cros_ec.o
obj-$(CONFIG_MFD_CROS_EC) += cros_ec_core.o
-obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o
-obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o
obj-$(CONFIG_MFD_CROS_EC_CHARDEV) += cros_ec_dev.o
obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
@@ -72,6 +70,20 @@ wm8994-objs := wm8994-core.o wm8994-irq.o wm8994-regmap.o
obj-$(CONFIG_MFD_WM8994) += wm8994.o
obj-$(CONFIG_MFD_WM97xx) += wm97xx-core.o
+madera-objs := madera-core.o
+ifeq ($(CONFIG_MFD_CS47L35),y)
+madera-objs += cs47l35-tables.o
+endif
+ifeq ($(CONFIG_MFD_CS47L85),y)
+madera-objs += cs47l85-tables.o
+endif
+ifeq ($(CONFIG_MFD_CS47L90),y)
+madera-objs += cs47l90-tables.o
+endif
+obj-$(CONFIG_MFD_MADERA) += madera.o
+obj-$(CONFIG_MFD_MADERA_I2C) += madera-i2c.o
+obj-$(CONFIG_MFD_MADERA_SPI) += madera-spi.o
+
obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
@@ -227,4 +239,5 @@ obj-$(CONFIG_MFD_STM32_TIMERS) += stm32-timers.o
obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o
+obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 83f1c5a516d9..5f1e37d23943 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -24,6 +24,7 @@
#include <linux/regulator/consumer.h>
#include <linux/regulator/machine.h>
#include <linux/slab.h>
+#include <linux/ktime.h>
#include <linux/platform_device.h>
#include <linux/mfd/arizona/core.h>
@@ -236,22 +237,39 @@ static irqreturn_t arizona_overclocked(int irq, void *data)
#define ARIZONA_REG_POLL_DELAY_US 7500
+static inline bool arizona_poll_reg_delay(ktime_t timeout)
+{
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ return false;
+
+ usleep_range(ARIZONA_REG_POLL_DELAY_US / 2, ARIZONA_REG_POLL_DELAY_US);
+
+ return true;
+}
+
static int arizona_poll_reg(struct arizona *arizona,
int timeout_ms, unsigned int reg,
unsigned int mask, unsigned int target)
{
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_ms * USEC_PER_MSEC);
unsigned int val = 0;
int ret;
- ret = regmap_read_poll_timeout(arizona->regmap,
- reg, val, ((val & mask) == target),
- ARIZONA_REG_POLL_DELAY_US,
- timeout_ms * 1000);
- if (ret)
- dev_err(arizona->dev, "Polling reg 0x%x timed out: %x\n",
- reg, val);
+ do {
+ ret = regmap_read(arizona->regmap, reg, &val);
- return ret;
+ if ((val & mask) == target)
+ return 0;
+ } while (arizona_poll_reg_delay(timeout));
+
+ if (ret) {
+ dev_err(arizona->dev, "Failed polling reg 0x%x: %d\n",
+ reg, ret);
+ return ret;
+ }
+
+ dev_err(arizona->dev, "Polling reg 0x%x timed out: %x\n", reg, val);
+ return -ETIMEDOUT;
}
static int arizona_wait_for_boot(struct arizona *arizona)
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index f87342c211bc..4d069ed21ff6 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -349,6 +349,8 @@ static int as3722_i2c_of_probe(struct i2c_client *i2c,
"ams,enable-internal-int-pullup");
as3722->en_intern_i2c_pullup = of_property_read_bool(np,
"ams,enable-internal-i2c-pullup");
+ as3722->en_ac_ok_pwr_on = of_property_read_bool(np,
+ "ams,enable-ac-ok-power-on");
as3722->irq_flags = irqd_get_trigger_type(irq_data);
dev_dbg(&i2c->dev, "IRQ flags are 0x%08lx\n", as3722->irq_flags);
return 0;
@@ -360,6 +362,7 @@ static int as3722_i2c_probe(struct i2c_client *i2c,
struct as3722 *as3722;
unsigned long irq_flags;
int ret;
+ u8 val = 0;
as3722 = devm_kzalloc(&i2c->dev, sizeof(struct as3722), GFP_KERNEL);
if (!as3722)
@@ -398,6 +401,15 @@ static int as3722_i2c_probe(struct i2c_client *i2c,
if (ret < 0)
return ret;
+ if (as3722->en_ac_ok_pwr_on)
+ val = AS3722_CTRL_SEQU1_AC_OK_PWR_ON;
+ ret = as3722_update_bits(as3722, AS3722_CTRL_SEQU1_REG,
+ AS3722_CTRL_SEQU1_AC_OK_PWR_ON, val);
+ if (ret < 0) {
+ dev_err(as3722->dev, "CTRLsequ1 update failed: %d\n", ret);
+ return ret;
+ }
+
ret = devm_mfd_add_devices(&i2c->dev, -1, as3722_devs,
ARRAY_SIZE(as3722_devs), NULL, 0,
regmap_irq_get_domain(as3722->irq_data));
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index 4b15b0840f16..e82543bcfdc8 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -22,6 +22,7 @@
#include <linux/mfd/atmel-hlcdc.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index d35a5fe6c950..a7b7c5423ea5 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -65,6 +65,7 @@ static const struct of_device_id axp20x_i2c_of_match[] = {
{ .compatible = "x-powers,axp202", .data = (void *)AXP202_ID },
{ .compatible = "x-powers,axp209", .data = (void *)AXP209_ID },
{ .compatible = "x-powers,axp221", .data = (void *)AXP221_ID },
+ { .compatible = "x-powers,axp806", .data = (void *)AXP806_ID },
{ },
};
MODULE_DEVICE_TABLE(of, axp20x_i2c_of_match);
@@ -74,6 +75,7 @@ static const struct i2c_device_id axp20x_i2c_id[] = {
{ "axp202", 0 },
{ "axp209", 0 },
{ "axp221", 0 },
+ { "axp806", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 9a2ef3d9b8f8..0be511dd93d0 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -221,6 +221,11 @@ static const struct resource axp803_pek_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP803_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
};
+static const struct resource axp806_pek_resources[] = {
+ DEFINE_RES_IRQ_NAMED(AXP806_IRQ_POK_RISE, "PEK_DBR"),
+ DEFINE_RES_IRQ_NAMED(AXP806_IRQ_POK_FALL, "PEK_DBF"),
+};
+
static const struct resource axp809_pek_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP809_IRQ_PEK_RIS_EDGE, "PEK_DBR"),
DEFINE_RES_IRQ_NAMED(AXP809_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
@@ -730,6 +735,15 @@ static const struct mfd_cell axp803_cells[] = {
{ .name = "axp20x-regulator" },
};
+static const struct mfd_cell axp806_self_working_cells[] = {
+ {
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp806_pek_resources),
+ .resources = axp806_pek_resources,
+ },
+ { .name = "axp20x-regulator" },
+};
+
static const struct mfd_cell axp806_cells[] = {
{
.id = 2,
@@ -842,8 +856,14 @@ int axp20x_match_device(struct axp20x_dev *axp20x)
axp20x->regmap_irq_chip = &axp803_regmap_irq_chip;
break;
case AXP806_ID:
- axp20x->nr_cells = ARRAY_SIZE(axp806_cells);
- axp20x->cells = axp806_cells;
+ if (of_property_read_bool(axp20x->dev->of_node,
+ "x-powers,self-working-mode")) {
+ axp20x->nr_cells = ARRAY_SIZE(axp806_self_working_cells);
+ axp20x->cells = axp806_self_working_cells;
+ } else {
+ axp20x->nr_cells = ARRAY_SIZE(axp806_cells);
+ axp20x->cells = axp806_cells;
+ }
axp20x->regmap_cfg = &axp806_regmap_config;
axp20x->regmap_irq_chip = &axp806_regmap_irq_chip;
break;
@@ -901,7 +921,9 @@ int axp20x_device_probe(struct axp20x_dev *axp20x)
*/
if (axp20x->variant == AXP806_ID) {
if (of_property_read_bool(axp20x->dev->of_node,
- "x-powers,master-mode"))
+ "x-powers,master-mode") ||
+ of_property_read_bool(axp20x->dev->of_node,
+ "x-powers,self-working-mode"))
regmap_write(axp20x->regmap, AXP806_REG_ADDR_EXT,
AXP806_REG_ADDR_EXT_ADDR_MASTER_MODE);
else
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 306e1fd109bd..999dac752bcc 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -20,6 +20,7 @@
#include <linux/fs.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -377,10 +378,18 @@ error:
kfree(msg);
}
+static const struct mfd_cell cros_ec_cec_cells[] = {
+ { .name = "cros-ec-cec" }
+};
+
static const struct mfd_cell cros_ec_rtc_cells[] = {
{ .name = "cros-ec-rtc" }
};
+static const struct mfd_cell cros_usbpd_charger_cells[] = {
+ { .name = "cros-usbpd-charger" }
+};
+
static int ec_device_probe(struct platform_device *pdev)
{
int retval = -ENOMEM;
@@ -419,6 +428,18 @@ static int ec_device_probe(struct platform_device *pdev)
if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
cros_ec_sensors_register(ec);
+ /* Check whether this EC instance has CEC host command support */
+ if (cros_ec_check_features(ec, EC_FEATURE_CEC)) {
+ retval = mfd_add_devices(ec->dev, PLATFORM_DEVID_AUTO,
+ cros_ec_cec_cells,
+ ARRAY_SIZE(cros_ec_cec_cells),
+ NULL, 0, NULL);
+ if (retval)
+ dev_err(ec->dev,
+ "failed to add cros-ec-cec device: %d\n",
+ retval);
+ }
+
/* Check whether this EC instance has RTC host command support */
if (cros_ec_check_features(ec, EC_FEATURE_RTC)) {
retval = mfd_add_devices(ec->dev, PLATFORM_DEVID_AUTO,
@@ -431,6 +452,18 @@ static int ec_device_probe(struct platform_device *pdev)
retval);
}
+ /* Check whether this EC instance has the PD charge manager */
+ if (cros_ec_check_features(ec, EC_FEATURE_USB_PD)) {
+ retval = mfd_add_devices(ec->dev, PLATFORM_DEVID_AUTO,
+ cros_usbpd_charger_cells,
+ ARRAY_SIZE(cros_usbpd_charger_cells),
+ NULL, 0, NULL);
+ if (retval)
+ dev_err(ec->dev,
+ "failed to add cros-usbpd-charger device: %d\n",
+ retval);
+ }
+
/* Take control of the lightbar from the EC. */
lb_manual_suspend_ctrl(ec, 1);
diff --git a/drivers/mfd/cs47l35-tables.c b/drivers/mfd/cs47l35-tables.c
new file mode 100644
index 000000000000..604c9dd14df5
--- /dev/null
+++ b/drivers/mfd/cs47l35-tables.c
@@ -0,0 +1,1609 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Regmap tables for CS47L35 codec
+ *
+ * Copyright (C) 2015-2017 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/madera/core.h>
+#include <linux/mfd/madera/registers.h>
+
+#include "madera.h"
+
+static const struct reg_sequence cs47l35_reva_16_patch[] = {
+ { 0x460, 0x0c40 },
+ { 0x461, 0xcd1a },
+ { 0x462, 0x0c40 },
+ { 0x463, 0xb53b },
+ { 0x464, 0x0c40 },
+ { 0x465, 0x7503 },
+ { 0x466, 0x0c40 },
+ { 0x467, 0x4a41 },
+ { 0x468, 0x0041 },
+ { 0x469, 0x3491 },
+ { 0x46a, 0x0841 },
+ { 0x46b, 0x1f50 },
+ { 0x46c, 0x0446 },
+ { 0x46d, 0x14ed },
+ { 0x46e, 0x0446 },
+ { 0x46f, 0x1455 },
+ { 0x470, 0x04c6 },
+ { 0x471, 0x1220 },
+ { 0x472, 0x04c6 },
+ { 0x473, 0x040f },
+ { 0x474, 0x04ce },
+ { 0x475, 0x0339 },
+ { 0x476, 0x05df },
+ { 0x477, 0x028f },
+ { 0x478, 0x05df },
+ { 0x479, 0x0209 },
+ { 0x47a, 0x05df },
+ { 0x47b, 0x00cf },
+ { 0x47c, 0x05df },
+ { 0x47d, 0x0001 },
+ { 0x47e, 0x07ff },
+};
+
+int cs47l35_patch(struct madera *madera)
+{
+ int ret;
+
+ ret = regmap_register_patch(madera->regmap, cs47l35_reva_16_patch,
+ ARRAY_SIZE(cs47l35_reva_16_patch));
+ if (ret < 0)
+ dev_err(madera->dev, "Error applying patch: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs47l35_patch);
+
+static const struct reg_default cs47l35_reg_default[] = {
+ { 0x00000020, 0x0000 }, /* R32 (0x20) - Tone Generator 1 */
+ { 0x00000021, 0x1000 }, /* R33 (0x21) - Tone Generator 2 */
+ { 0x00000022, 0x0000 }, /* R34 (0x22) - Tone Generator 3 */
+ { 0x00000023, 0x1000 }, /* R35 (0x23) - Tone Generator 4 */
+ { 0x00000024, 0x0000 }, /* R36 (0x24) - Tone Generator 5 */
+ { 0x00000030, 0x0000 }, /* R48 (0x30) - PWM Drive 1 */
+ { 0x00000031, 0x0100 }, /* R49 (0x31) - PWM Drive 2 */
+ { 0x00000032, 0x0100 }, /* R50 (0x32) - PWM Drive 3 */
+ { 0x00000061, 0x01ff }, /* R97 (0x61) - Sample Rate Sequence Select 1 */
+ { 0x00000062, 0x01ff }, /* R98 (0x62) - Sample Rate Sequence Select 2 */
+ { 0x00000063, 0x01ff }, /* R99 (0x63) - Sample Rate Sequence Select 3 */
+ { 0x00000064, 0x01ff }, /* R100 (0x64) - Sample Rate Sequence Select 4*/
+ { 0x00000066, 0x01ff }, /* R102 (0x66) - Always On Triggers Sequence Select 1*/
+ { 0x00000067, 0x01ff }, /* R103 (0x67) - Always On Triggers Sequence Select 2*/
+ { 0x00000090, 0x0000 }, /* R144 (0x90) - Haptics Control 1 */
+ { 0x00000091, 0x7fff }, /* R145 (0x91) - Haptics Control 2 */
+ { 0x00000092, 0x0000 }, /* R146 (0x92) - Haptics phase 1 intensity */
+ { 0x00000093, 0x0000 }, /* R147 (0x93) - Haptics phase 1 duration */
+ { 0x00000094, 0x0000 }, /* R148 (0x94) - Haptics phase 2 intensity */
+ { 0x00000095, 0x0000 }, /* R149 (0x95) - Haptics phase 2 duration */
+ { 0x00000096, 0x0000 }, /* R150 (0x96) - Haptics phase 3 intensity */
+ { 0x00000097, 0x0000 }, /* R151 (0x97) - Haptics phase 3 duration */
+ { 0x000000A0, 0x0000 }, /* R160 (0xa0) - Comfort Noise Generator */
+ { 0x00000100, 0x0002 }, /* R256 (0x100) - Clock 32k 1 */
+ { 0x00000101, 0x0404 }, /* R257 (0x101) - System Clock 1 */
+ { 0x00000102, 0x0011 }, /* R258 (0x102) - Sample rate 1 */
+ { 0x00000103, 0x0011 }, /* R259 (0x103) - Sample rate 2 */
+ { 0x00000104, 0x0011 }, /* R260 (0x104) - Sample rate 3 */
+ { 0x00000120, 0x0305 }, /* R288 (0x120) - DSP Clock 1 */
+ { 0x00000122, 0x0000 }, /* R290 (0x122) - DSP Clock 2 */
+ { 0x00000149, 0x0000 }, /* R329 (0x149) - Output system clock */
+ { 0x0000014a, 0x0000 }, /* R330 (0x14a) - Output async clock */
+ { 0x00000152, 0x0000 }, /* R338 (0x152) - Rate Estimator 1 */
+ { 0x00000153, 0x0000 }, /* R339 (0x153) - Rate Estimator 2 */
+ { 0x00000154, 0x0000 }, /* R340 (0x154) - Rate Estimator 3 */
+ { 0x00000155, 0x0000 }, /* R341 (0x155) - Rate Estimator 4 */
+ { 0x00000156, 0x0000 }, /* R342 (0x156) - Rate Estimator 5 */
+ { 0x00000171, 0x0002 }, /* R369 (0x171) - FLL1 Control 1 */
+ { 0x00000172, 0x0008 }, /* R370 (0x172) - FLL1 Control 2 */
+ { 0x00000173, 0x0018 }, /* R371 (0x173) - FLL1 Control 3 */
+ { 0x00000174, 0x007d }, /* R372 (0x174) - FLL1 Control 4 */
+ { 0x00000175, 0x0000 }, /* R373 (0x175) - FLL1 Control 5 */
+ { 0x00000176, 0x0000 }, /* R374 (0x176) - FLL1 Control 6 */
+ { 0x00000177, 0x0281 }, /* R375 (0x177) - FLL1 Loop Filter Test 1 */
+ { 0x00000179, 0x0000 }, /* R377 (0x179) - FLL1 Control 7 */
+ { 0x0000017a, 0x0b06 }, /* R378 (0x17a) - FLL1 EFS2 */
+ { 0x0000017f, 0x0000 }, /* R383 (0x17f) - FLL1 Synchroniser 1 */
+ { 0x00000180, 0x0000 }, /* R384 (0x180) - FLL1 Synchroniser 2 */
+ { 0x00000181, 0x0000 }, /* R385 (0x181) - FLL1 Synchroniser 3 */
+ { 0x00000182, 0x0000 }, /* R386 (0x182) - FLL1 Synchroniser 4 */
+ { 0x00000183, 0x0000 }, /* R387 (0x183) - FLL1 Synchroniser 5 */
+ { 0x00000184, 0x0000 }, /* R388 (0x184) - FLL1 Synchroniser 6 */
+ { 0x00000185, 0x0001 }, /* R389 (0x185) - FLL1 Synchroniser 7 */
+ { 0x00000187, 0x0000 }, /* R391 (0x187) - FLL1 Spread Spectrum */
+ { 0x00000188, 0x000c }, /* R392 (0x188) - FLL1 GPIO Clock */
+ { 0x00000200, 0x0006 }, /* R512 (0x200) - Mic Charge Pump 1 */
+ { 0x0000020b, 0x0400 }, /* R523 (0x20b) - HP Charge Pump 8 */
+ { 0x00000213, 0x03e4 }, /* R531 (0x213) - LDO2 Control 1 */
+ { 0x00000218, 0x00e6 }, /* R536 (0x218) - Mic Bias Ctrl 1 */
+ { 0x00000219, 0x00e6 }, /* R537 (0x219) - Mic Bias Ctrl 2 */
+ { 0x0000021c, 0x0022 }, /* R540 (0x21c) - Mic Bias Ctrl 5 */
+ { 0x0000021e, 0x0022 }, /* R542 (0x21e) - Mic Bias Ctrl 6 */
+ { 0x0000027e, 0x0000 }, /* R638 (0x27e) - EDRE HP stereo control */
+ { 0x00000293, 0x0080 }, /* R659 (0x293) - Accessory Detect Mode 1 */
+ { 0x0000029b, 0x0000 }, /* R667 (0x29b) - Headphone Detect 1 */
+ { 0x000002a3, 0x1102 }, /* R675 (0x2a3) - Mic Detect Control 1 */
+ { 0x000002a4, 0x009f }, /* R676 (0x2a4) - Mic Detect Control 2 */
+ { 0x000002a6, 0x3d3d }, /* R678 (0x2a6) - Mic Detect Level 1 */
+ { 0x000002a7, 0x3d3d }, /* R679 (0x2a7) - Mic Detect Level 2 */
+ { 0x000002a8, 0x333d }, /* R680 (0x2a8) - Mic Detect Level 3 */
+ { 0x000002a9, 0x202d }, /* R681 (0x2a9) - Mic Detect Level 4 */
+ { 0x000002c6, 0x0010 }, /* R710 (0x2c5) - Mic Clamp control */
+ { 0x000002c8, 0x0000 }, /* R712 (0x2c8) - GP switch 1 */
+ { 0x000002d3, 0x0000 }, /* R723 (0x2d3) - Jack detect analogue */
+ { 0x00000300, 0x0000 }, /* R768 (0x300) - Input Enables */
+ { 0x00000308, 0x0000 }, /* R776 (0x308) - Input Rate */
+ { 0x00000309, 0x0022 }, /* R777 (0x309) - Input Volume Ramp */
+ { 0x0000030c, 0x0002 }, /* R780 (0x30c) - HPF Control */
+ { 0x00000310, 0x0080 }, /* R784 (0x310) - IN1L Control */
+ { 0x00000311, 0x0180 }, /* R785 (0x311) - ADC Digital Volume 1L */
+ { 0x00000312, 0x0500 }, /* R786 (0x312) - DMIC1L Control */
+ { 0x00000314, 0x0080 }, /* R788 (0x314) - IN1R Control */
+ { 0x00000315, 0x0180 }, /* R789 (0x315) - ADC Digital Volume 1R */
+ { 0x00000316, 0x0000 }, /* R790 (0x316) - DMIC1R Control */
+ { 0x00000318, 0x0080 }, /* R792 (0x318) - IN2L Control */
+ { 0x00000319, 0x0180 }, /* R793 (0x319) - ADC Digital Volume 2L */
+ { 0x0000031a, 0x0500 }, /* R794 (0x31a) - DMIC2L Control */
+ { 0x0000031c, 0x0080 }, /* R796 (0x31c) - IN2R Control */
+ { 0x0000031d, 0x0180 }, /* R797 (0x31d) - ADC Digital Volume 2R */
+ { 0x0000031e, 0x0000 }, /* R798 (0x31e) - DMIC2R Control */
+ { 0x00000400, 0x0000 }, /* R1024 (0x400) - Output Enables 1 */
+ { 0x00000408, 0x0000 }, /* R1032 (0x408) - Output Rate 1 */
+ { 0x00000409, 0x0022 }, /* R1033 (0x409) - Output Volume Ramp */
+ { 0x00000410, 0x0080 }, /* R1040 (0x410) - Output Path Config 1L */
+ { 0x00000411, 0x0180 }, /* R1041 (0x411) - DAC Digital Volume 1L */
+ { 0x00000413, 0x0001 }, /* R1043 (0x413) - Noise Gate Select 1L */
+ { 0x00000414, 0x0080 }, /* R1044 (0x414) - Output Path Config 1R */
+ { 0x00000415, 0x0180 }, /* R1045 (0x415) - DAC Digital Volume 1R */
+ { 0x00000417, 0x0002 }, /* R1047 (0x417) - Noise Gate Select 1R */
+ { 0x00000428, 0x0000 }, /* R1064 (0x428) - Output Path Config 4L */
+ { 0x00000429, 0x0180 }, /* R1065 (0x429) - DAC Digital Volume 4L */
+ { 0x0000042b, 0x0040 }, /* R1067 (0x42b) - Noise Gate Select 4L */
+ { 0x00000430, 0x0000 }, /* R1072 (0x430) - Output Path Config 5L */
+ { 0x00000431, 0x0180 }, /* R1073 (0x431) - DAC Digital Volume 5L */
+ { 0x00000433, 0x0100 }, /* R1075 (0x433) - Noise Gate Select 5L */
+ { 0x00000434, 0x0000 }, /* R1076 (0x434) - Output Path Config 5R */
+ { 0x00000435, 0x0180 }, /* R1077 (0x435) - DAC Digital Volume 5R */
+ { 0x00000437, 0x0200 }, /* R1079 (0x437) - Noise Gate Select 5R */
+ { 0x00000440, 0x0003 }, /* R1088 (0x440) - DRE Enable */
+ { 0x00000448, 0x0a83 }, /* R1096 (0x448) - eDRE Enable */
+ { 0x0000044a, 0x0000 }, /* R1098 (0x44a) - eDRE Manual */
+ { 0x00000450, 0x0000 }, /* R1104 (0x450) - DAC AEC Control 1 */
+ { 0x00000458, 0x0000 }, /* R1112 (0x458) - Noise Gate Control */
+ { 0x00000490, 0x0069 }, /* R1168 (0x490) - PDM SPK1 CTRL 1 */
+ { 0x00000491, 0x0000 }, /* R1169 (0x491) - PDM SPK1 CTRL 2 */
+ { 0x000004a0, 0x3080 }, /* R1184 (0x4a0) - HP1 Short Circuit Ctrl */
+ { 0x000004a8, 0x7120 }, /* R1192 (0x4a8) - HP Test Ctrl 5 */
+ { 0x000004a9, 0x7120 }, /* R1193 (0x4a9) - HP Test Ctrl 6 */
+ { 0x00000500, 0x000c }, /* R1280 (0x500) - AIF1 BCLK Ctrl */
+ { 0x00000501, 0x0000 }, /* R1281 (0x501) - AIF1 Tx Pin Ctrl */
+ { 0x00000502, 0x0000 }, /* R1282 (0x502) - AIF1 Rx Pin Ctrl */
+ { 0x00000503, 0x0000 }, /* R1283 (0x503) - AIF1 Rate Ctrl */
+ { 0x00000504, 0x0000 }, /* R1284 (0x504) - AIF1 Format */
+ { 0x00000506, 0x0040 }, /* R1286 (0x506) - AIF1 Rx BCLK Rate */
+ { 0x00000507, 0x1818 }, /* R1287 (0x507) - AIF1 Frame Ctrl 1 */
+ { 0x00000508, 0x1818 }, /* R1288 (0x508) - AIF1 Frame Ctrl 2 */
+ { 0x00000509, 0x0000 }, /* R1289 (0x509) - AIF1 Frame Ctrl 3 */
+ { 0x0000050a, 0x0001 }, /* R1290 (0x50a) - AIF1 Frame Ctrl 4 */
+ { 0x0000050b, 0x0002 }, /* R1291 (0x50b) - AIF1 Frame Ctrl 5 */
+ { 0x0000050c, 0x0003 }, /* R1292 (0x50c) - AIF1 Frame Ctrl 6 */
+ { 0x0000050d, 0x0004 }, /* R1293 (0x50d) - AIF1 Frame Ctrl 7 */
+ { 0x0000050e, 0x0005 }, /* R1294 (0x50e) - AIF1 Frame Ctrl 8 */
+ { 0x00000511, 0x0000 }, /* R1297 (0x511) - AIF1 Frame Ctrl 11 */
+ { 0x00000512, 0x0001 }, /* R1298 (0x512) - AIF1 Frame Ctrl 12 */
+ { 0x00000513, 0x0002 }, /* R1299 (0x513) - AIF1 Frame Ctrl 13 */
+ { 0x00000514, 0x0003 }, /* R1300 (0x514) - AIF1 Frame Ctrl 14 */
+ { 0x00000515, 0x0004 }, /* R1301 (0x515) - AIF1 Frame Ctrl 15 */
+ { 0x00000516, 0x0005 }, /* R1302 (0x516) - AIF1 Frame Ctrl 16 */
+ { 0x00000519, 0x0000 }, /* R1305 (0x519) - AIF1 Tx Enables */
+ { 0x0000051a, 0x0000 }, /* R1306 (0x51a) - AIF1 Rx Enables */
+ { 0x00000540, 0x000c }, /* R1344 (0x540) - AIF2 BCLK Ctrl */
+ { 0x00000541, 0x0000 }, /* R1345 (0x541) - AIF2 Tx Pin Ctrl */
+ { 0x00000542, 0x0000 }, /* R1346 (0x542) - AIF2 Rx Pin Ctrl */
+ { 0x00000543, 0x0000 }, /* R1347 (0x543) - AIF2 Rate Ctrl */
+ { 0x00000544, 0x0000 }, /* R1348 (0x544) - AIF2 Format */
+ { 0x00000546, 0x0040 }, /* R1350 (0x546) - AIF2 Rx BCLK Rate */
+ { 0x00000547, 0x1818 }, /* R1351 (0x547) - AIF2 Frame Ctrl 1 */
+ { 0x00000548, 0x1818 }, /* R1352 (0x548) - AIF2 Frame Ctrl 2 */
+ { 0x00000549, 0x0000 }, /* R1353 (0x549) - AIF2 Frame Ctrl 3 */
+ { 0x0000054a, 0x0001 }, /* R1354 (0x54a) - AIF2 Frame Ctrl 4 */
+ { 0x00000551, 0x0000 }, /* R1361 (0x551) - AIF2 Frame Ctrl 11 */
+ { 0x00000552, 0x0001 }, /* R1362 (0x552) - AIF2 Frame Ctrl 12 */
+ { 0x00000559, 0x0000 }, /* R1369 (0x559) - AIF2 Tx Enables */
+ { 0x0000055a, 0x0000 }, /* R1370 (0x55a) - AIF2 Rx Enables */
+ { 0x00000580, 0x000c }, /* R1408 (0x580) - AIF3 BCLK Ctrl */
+ { 0x00000581, 0x0000 }, /* R1409 (0x581) - AIF3 Tx Pin Ctrl */
+ { 0x00000582, 0x0000 }, /* R1410 (0x582) - AIF3 Rx Pin Ctrl */
+ { 0x00000583, 0x0000 }, /* R1411 (0x583) - AIF3 Rate Ctrl */
+ { 0x00000584, 0x0000 }, /* R1412 (0x584) - AIF3 Format */
+ { 0x00000586, 0x0040 }, /* R1414 (0x586) - AIF3 Rx BCLK Rate */
+ { 0x00000587, 0x1818 }, /* R1415 (0x587) - AIF3 Frame Ctrl 1 */
+ { 0x00000588, 0x1818 }, /* R1416 (0x588) - AIF3 Frame Ctrl 2 */
+ { 0x00000589, 0x0000 }, /* R1417 (0x589) - AIF3 Frame Ctrl 3 */
+ { 0x0000058a, 0x0001 }, /* R1418 (0x58a) - AIF3 Frame Ctrl 4 */
+ { 0x00000591, 0x0000 }, /* R1425 (0x591) - AIF3 Frame Ctrl 11 */
+ { 0x00000592, 0x0001 }, /* R1426 (0x592) - AIF3 Frame Ctrl 12 */
+ { 0x00000599, 0x0000 }, /* R1433 (0x599) - AIF3 Tx Enables */
+ { 0x0000059a, 0x0000 }, /* R1434 (0x59a) - AIF3 Rx Enables */
+ { 0x000005c2, 0x0000 }, /* R1474 (0x5c2) - SPD1 TX Control */
+ { 0x000005e3, 0x0000 }, /* R1507 (0x5e3) - SLIMbus Framer Ref Gear */
+ { 0x000005e5, 0x0000 }, /* R1509 (0x5e5) - SLIMbus Rates 1 */
+ { 0x000005e6, 0x0000 }, /* R1510 (0x5e6) - SLIMbus Rates 2 */
+ { 0x000005e7, 0x0000 }, /* R1511 (0x5e7) - SLIMbus Rates 3 */
+ { 0x000005e9, 0x0000 }, /* R1513 (0x5e9) - SLIMbus Rates 5 */
+ { 0x000005ea, 0x0000 }, /* R1514 (0x5ea) - SLIMbus Rates 6 */
+ { 0x000005eb, 0x0000 }, /* R1515 (0x5eb) - SLIMbus Rates 7 */
+ { 0x000005f5, 0x0000 }, /* R1525 (0x5f5) - SLIMbus RX Channel Enable */
+ { 0x000005f6, 0x0000 }, /* R1526 (0x5f6) - SLIMbus TX Channel Enable */
+ { 0x00000640, 0x0000 }, /* R1600 (0x640) - PWM1MIX Input 1 Source */
+ { 0x00000641, 0x0080 }, /* R1601 (0x641) - PWM1MIX Input 1 Volume */
+ { 0x00000642, 0x0000 }, /* R1602 (0x642) - PWM1MIX Input 2 Source */
+ { 0x00000643, 0x0080 }, /* R1603 (0x643) - PWM1MIX Input 2 Volume */
+ { 0x00000644, 0x0000 }, /* R1604 (0x644) - PWM1MIX Input 3 Source */
+ { 0x00000645, 0x0080 }, /* R1605 (0x645) - PWM1MIX Input 3 Volume */
+ { 0x00000646, 0x0000 }, /* R1606 (0x646) - PWM1MIX Input 4 Source */
+ { 0x00000647, 0x0080 }, /* R1607 (0x647) - PWM1MIX Input 4 Volume */
+ { 0x00000648, 0x0000 }, /* R1608 (0x648) - PWM2MIX Input 1 Source */
+ { 0x00000649, 0x0080 }, /* R1609 (0x649) - PWM2MIX Input 1 Volume */
+ { 0x0000064a, 0x0000 }, /* R1610 (0x64a) - PWM2MIX Input 2 Source */
+ { 0x0000064b, 0x0080 }, /* R1611 (0x64b) - PWM2MIX Input 2 Volume */
+ { 0x0000064c, 0x0000 }, /* R1612 (0x64c) - PWM2MIX Input 3 Source */
+ { 0x0000064d, 0x0080 }, /* R1613 (0x64d) - PWM2MIX Input 3 Volume */
+ { 0x0000064e, 0x0000 }, /* R1614 (0x64e) - PWM2MIX Input 4 Source */
+ { 0x0000064f, 0x0080 }, /* R1615 (0x64f) - PWM2MIX Input 4 Volume */
+ { 0x00000680, 0x0000 }, /* R1664 (0x680) - OUT1LMIX Input 1 Source */
+ { 0x00000681, 0x0080 }, /* R1665 (0x681) - OUT1LMIX Input 1 Volume */
+ { 0x00000682, 0x0000 }, /* R1666 (0x682) - OUT1LMIX Input 2 Source */
+ { 0x00000683, 0x0080 }, /* R1667 (0x683) - OUT1LMIX Input 2 Volume */
+ { 0x00000684, 0x0000 }, /* R1668 (0x684) - OUT1LMIX Input 3 Source */
+ { 0x00000685, 0x0080 }, /* R1669 (0x685) - OUT1LMIX Input 3 Volume */
+ { 0x00000686, 0x0000 }, /* R1670 (0x686) - OUT1LMIX Input 4 Source */
+ { 0x00000687, 0x0080 }, /* R1671 (0x687) - OUT1LMIX Input 4 Volume */
+ { 0x00000688, 0x0000 }, /* R1672 (0x688) - OUT1RMIX Input 1 Source */
+ { 0x00000689, 0x0080 }, /* R1673 (0x689) - OUT1RMIX Input 1 Volume */
+ { 0x0000068a, 0x0000 }, /* R1674 (0x68a) - OUT1RMIX Input 2 Source */
+ { 0x0000068b, 0x0080 }, /* R1675 (0x68b) - OUT1RMIX Input 2 Volume */
+ { 0x0000068c, 0x0000 }, /* R1672 (0x68c) - OUT1RMIX Input 3 Source */
+ { 0x0000068d, 0x0080 }, /* R1673 (0x68d) - OUT1RMIX Input 3 Volume */
+ { 0x0000068e, 0x0000 }, /* R1674 (0x68e) - OUT1RMIX Input 4 Source */
+ { 0x0000068f, 0x0080 }, /* R1675 (0x68f) - OUT1RMIX Input 4 Volume */
+ { 0x000006b0, 0x0000 }, /* R1712 (0x6b0) - OUT4LMIX Input 1 Source */
+ { 0x000006b1, 0x0080 }, /* R1713 (0x6b1) - OUT4LMIX Input 1 Volume */
+ { 0x000006b2, 0x0000 }, /* R1714 (0x6b2) - OUT4LMIX Input 2 Source */
+ { 0x000006b3, 0x0080 }, /* R1715 (0x6b3) - OUT4LMIX Input 2 Volume */
+ { 0x000006b4, 0x0000 }, /* R1716 (0x6b4) - OUT4LMIX Input 3 Source */
+ { 0x000006b5, 0x0080 }, /* R1717 (0x6b5) - OUT4LMIX Input 3 Volume */
+ { 0x000006b6, 0x0000 }, /* R1718 (0x6b6) - OUT4LMIX Input 4 Source */
+ { 0x000006b7, 0x0080 }, /* R1719 (0x6b7) - OUT4LMIX Input 4 Volume */
+ { 0x000006c0, 0x0000 }, /* R1728 (0x6c0) - OUT5LMIX Input 1 Source */
+ { 0x000006c1, 0x0080 }, /* R1729 (0x6c1) - OUT5LMIX Input 1 Volume */
+ { 0x000006c2, 0x0000 }, /* R1730 (0x6c2) - OUT5LMIX Input 2 Source */
+ { 0x000006c3, 0x0080 }, /* R1731 (0x6c3) - OUT5LMIX Input 2 Volume */
+ { 0x000006c4, 0x0000 }, /* R1732 (0x6c4) - OUT5LMIX Input 3 Source */
+ { 0x000006c5, 0x0080 }, /* R1733 (0x6c5) - OUT5LMIX Input 3 Volume */
+ { 0x000006c6, 0x0000 }, /* R1734 (0x6c6) - OUT5LMIX Input 4 Source */
+ { 0x000006c7, 0x0080 }, /* R1735 (0x6c7) - OUT5LMIX Input 4 Volume */
+ { 0x000006c8, 0x0000 }, /* R1736 (0x6c8) - OUT5RMIX Input 1 Source */
+ { 0x000006c9, 0x0080 }, /* R1737 (0x6c9) - OUT5RMIX Input 1 Volume */
+ { 0x000006ca, 0x0000 }, /* R1738 (0x6ca) - OUT5RMIX Input 2 Source */
+ { 0x000006cb, 0x0080 }, /* R1739 (0x6cb) - OUT5RMIX Input 2 Volume */
+ { 0x000006cc, 0x0000 }, /* R1740 (0x6cc) - OUT5RMIX Input 3 Source */
+ { 0x000006cd, 0x0080 }, /* R1741 (0x6cd) - OUT5RMIX Input 3 Volume */
+ { 0x000006ce, 0x0000 }, /* R1742 (0x6ce) - OUT5RMIX Input 4 Source */
+ { 0x000006cf, 0x0080 }, /* R1743 (0x6cf) - OUT5RMIX Input 4 Volume */
+ { 0x00000700, 0x0000 }, /* R1792 (0x700) - AIF1TX1MIX Input 1 Source */
+ { 0x00000701, 0x0080 }, /* R1793 (0x701) - AIF1TX1MIX Input 1 Volume */
+ { 0x00000702, 0x0000 }, /* R1794 (0x702) - AIF1TX1MIX Input 2 Source */
+ { 0x00000703, 0x0080 }, /* R1795 (0x703) - AIF1TX1MIX Input 2 Volume */
+ { 0x00000704, 0x0000 }, /* R1796 (0x704) - AIF1TX1MIX Input 3 Source */
+ { 0x00000705, 0x0080 }, /* R1797 (0x705) - AIF1TX1MIX Input 3 Volume */
+ { 0x00000706, 0x0000 }, /* R1798 (0x706) - AIF1TX1MIX Input 4 Source */
+ { 0x00000707, 0x0080 }, /* R1799 (0x707) - AIF1TX1MIX Input 4 Volume */
+ { 0x00000708, 0x0000 }, /* R1800 (0x708) - AIF1TX2MIX Input 1 Source */
+ { 0x00000709, 0x0080 }, /* R1801 (0x709) - AIF1TX2MIX Input 1 Volume */
+ { 0x0000070a, 0x0000 }, /* R1802 (0x70a) - AIF1TX2MIX Input 2 Source */
+ { 0x0000070b, 0x0080 }, /* R1803 (0x70b) - AIF1TX2MIX Input 2 Volume */
+ { 0x0000070c, 0x0000 }, /* R1804 (0x70c) - AIF1TX2MIX Input 3 Source */
+ { 0x0000070d, 0x0080 }, /* R1805 (0x70d) - AIF1TX2MIX Input 3 Volume */
+ { 0x0000070e, 0x0000 }, /* R1806 (0x70e) - AIF1TX2MIX Input 4 Source */
+ { 0x0000070f, 0x0080 }, /* R1807 (0x70f) - AIF1TX2MIX Input 4 Volume */
+ { 0x00000710, 0x0000 }, /* R1808 (0x710) - AIF1TX3MIX Input 1 Source */
+ { 0x00000711, 0x0080 }, /* R1809 (0x711) - AIF1TX3MIX Input 1 Volume */
+ { 0x00000712, 0x0000 }, /* R1810 (0x712) - AIF1TX3MIX Input 2 Source */
+ { 0x00000713, 0x0080 }, /* R1811 (0x713) - AIF1TX3MIX Input 2 Volume */
+ { 0x00000714, 0x0000 }, /* R1812 (0x714) - AIF1TX3MIX Input 3 Source */
+ { 0x00000715, 0x0080 }, /* R1813 (0x715) - AIF1TX3MIX Input 3 Volume */
+ { 0x00000716, 0x0000 }, /* R1814 (0x716) - AIF1TX3MIX Input 4 Source */
+ { 0x00000717, 0x0080 }, /* R1815 (0x717) - AIF1TX3MIX Input 4 Volume */
+ { 0x00000718, 0x0000 }, /* R1816 (0x718) - AIF1TX4MIX Input 1 Source */
+ { 0x00000719, 0x0080 }, /* R1817 (0x719) - AIF1TX4MIX Input 1 Volume */
+ { 0x0000071a, 0x0000 }, /* R1818 (0x71a) - AIF1TX4MIX Input 2 Source */
+ { 0x0000071b, 0x0080 }, /* R1819 (0x71b) - AIF1TX4MIX Input 2 Volume */
+ { 0x0000071c, 0x0000 }, /* R1820 (0x71c) - AIF1TX4MIX Input 3 Source */
+ { 0x0000071d, 0x0080 }, /* R1821 (0x71d) - AIF1TX4MIX Input 3 Volume */
+ { 0x0000071e, 0x0000 }, /* R1822 (0x71e) - AIF1TX4MIX Input 4 Source */
+ { 0x0000071f, 0x0080 }, /* R1823 (0x71f) - AIF1TX4MIX Input 4 Volume */
+ { 0x00000720, 0x0000 }, /* R1824 (0x720) - AIF1TX5MIX Input 1 Source */
+ { 0x00000721, 0x0080 }, /* R1825 (0x721) - AIF1TX5MIX Input 1 Volume */
+ { 0x00000722, 0x0000 }, /* R1826 (0x722) - AIF1TX5MIX Input 2 Source */
+ { 0x00000723, 0x0080 }, /* R1827 (0x723) - AIF1TX5MIX Input 2 Volume */
+ { 0x00000724, 0x0000 }, /* R1828 (0x724) - AIF1TX5MIX Input 3 Source */
+ { 0x00000725, 0x0080 }, /* R1829 (0x725) - AIF1TX5MIX Input 3 Volume */
+ { 0x00000726, 0x0000 }, /* R1830 (0x726) - AIF1TX5MIX Input 4 Source */
+ { 0x00000727, 0x0080 }, /* R1831 (0x727) - AIF1TX5MIX Input 4 Volume */
+ { 0x00000728, 0x0000 }, /* R1832 (0x728) - AIF1TX6MIX Input 1 Source */
+ { 0x00000729, 0x0080 }, /* R1833 (0x729) - AIF1TX6MIX Input 1 Volume */
+ { 0x0000072a, 0x0000 }, /* R1834 (0x72a) - AIF1TX6MIX Input 2 Source */
+ { 0x0000072b, 0x0080 }, /* R1835 (0x72b) - AIF1TX6MIX Input 2 Volume */
+ { 0x0000072c, 0x0000 }, /* R1836 (0x72c) - AIF1TX6MIX Input 3 Source */
+ { 0x0000072d, 0x0080 }, /* R1837 (0x72d) - AIF1TX6MIX Input 3 Volume */
+ { 0x0000072e, 0x0000 }, /* R1838 (0x72e) - AIF1TX6MIX Input 4 Source */
+ { 0x0000072f, 0x0080 }, /* R1839 (0x72f) - AIF1TX6MIX Input 4 Volume */
+ { 0x00000740, 0x0000 }, /* R1856 (0x740) - AIF2TX1MIX Input 1 Source */
+ { 0x00000741, 0x0080 }, /* R1857 (0x741) - AIF2TX1MIX Input 1 Volume */
+ { 0x00000742, 0x0000 }, /* R1858 (0x742) - AIF2TX1MIX Input 2 Source */
+ { 0x00000743, 0x0080 }, /* R1859 (0x743) - AIF2TX1MIX Input 2 Volume */
+ { 0x00000744, 0x0000 }, /* R1860 (0x744) - AIF2TX1MIX Input 3 Source */
+ { 0x00000745, 0x0080 }, /* R1861 (0x745) - AIF2TX1MIX Input 3 Volume */
+ { 0x00000746, 0x0000 }, /* R1862 (0x746) - AIF2TX1MIX Input 4 Source */
+ { 0x00000747, 0x0080 }, /* R1863 (0x747) - AIF2TX1MIX Input 4 Volume */
+ { 0x00000748, 0x0000 }, /* R1864 (0x748) - AIF2TX2MIX Input 1 Source */
+ { 0x00000749, 0x0080 }, /* R1865 (0x749) - AIF2TX2MIX Input 1 Volume */
+ { 0x0000074a, 0x0000 }, /* R1866 (0x74a) - AIF2TX2MIX Input 2 Source */
+ { 0x0000074b, 0x0080 }, /* R1867 (0x74b) - AIF2TX2MIX Input 2 Volume */
+ { 0x0000074c, 0x0000 }, /* R1868 (0x74c) - AIF2TX2MIX Input 3 Source */
+ { 0x0000074d, 0x0080 }, /* R1869 (0x74d) - AIF2TX2MIX Input 3 Volume */
+ { 0x0000074e, 0x0000 }, /* R1870 (0x74e) - AIF2TX2MIX Input 4 Source */
+ { 0x0000074f, 0x0080 }, /* R1871 (0x74f) - AIF2TX2MIX Input 4 Volume */
+ { 0x00000780, 0x0000 }, /* R1920 (0x780) - AIF3TX1MIX Input 1 Source */
+ { 0x00000781, 0x0080 }, /* R1921 (0x781) - AIF3TX1MIX Input 1 Volume */
+ { 0x00000782, 0x0000 }, /* R1922 (0x782) - AIF3TX1MIX Input 2 Source */
+ { 0x00000783, 0x0080 }, /* R1923 (0x783) - AIF3TX1MIX Input 2 Volume */
+ { 0x00000784, 0x0000 }, /* R1924 (0x784) - AIF3TX1MIX Input 3 Source */
+ { 0x00000785, 0x0080 }, /* R1925 (0x785) - AIF3TX1MIX Input 3 Volume */
+ { 0x00000786, 0x0000 }, /* R1926 (0x786) - AIF3TX1MIX Input 4 Source */
+ { 0x00000787, 0x0080 }, /* R1927 (0x787) - AIF3TX1MIX Input 4 Volume */
+ { 0x00000788, 0x0000 }, /* R1928 (0x788) - AIF3TX2MIX Input 1 Source */
+ { 0x00000789, 0x0080 }, /* R1929 (0x789) - AIF3TX2MIX Input 1 Volume */
+ { 0x0000078a, 0x0000 }, /* R1930 (0x78a) - AIF3TX2MIX Input 2 Source */
+ { 0x0000078b, 0x0080 }, /* R1931 (0x78b) - AIF3TX2MIX Input 2 Volume */
+ { 0x0000078c, 0x0000 }, /* R1932 (0x78c) - AIF3TX2MIX Input 3 Source */
+ { 0x0000078d, 0x0080 }, /* R1933 (0x78d) - AIF3TX2MIX Input 3 Volume */
+ { 0x0000078e, 0x0000 }, /* R1934 (0x78e) - AIF3TX2MIX Input 4 Source */
+ { 0x0000078f, 0x0080 }, /* R1935 (0x78f) - AIF3TX2MIX Input 4 Volume */
+ { 0x000007c0, 0x0000 }, /* R1984 (0x7c0) - SLIMTX1MIX Input 1 Source */
+ { 0x000007c1, 0x0080 }, /* R1985 (0x7c1) - SLIMTX1MIX Input 1 Volume */
+ { 0x000007c2, 0x0000 }, /* R1986 (0x7c2) - SLIMTX1MIX Input 2 Source */
+ { 0x000007c3, 0x0080 }, /* R1987 (0x7c3) - SLIMTX1MIX Input 2 Volume */
+ { 0x000007c4, 0x0000 }, /* R1988 (0x7c4) - SLIMTX1MIX Input 3 Source */
+ { 0x000007c5, 0x0080 }, /* R1989 (0x7c5) - SLIMTX1MIX Input 3 Volume */
+ { 0x000007c6, 0x0000 }, /* R1990 (0x7c6) - SLIMTX1MIX Input 4 Source */
+ { 0x000007c7, 0x0080 }, /* R1991 (0x7c7) - SLIMTX1MIX Input 4 Volume */
+ { 0x000007c8, 0x0000 }, /* R1992 (0x7c8) - SLIMTX2MIX Input 1 Source */
+ { 0x000007c9, 0x0080 }, /* R1993 (0x7c9) - SLIMTX2MIX Input 1 Volume */
+ { 0x000007ca, 0x0000 }, /* R1994 (0x7ca) - SLIMTX2MIX Input 2 Source */
+ { 0x000007cb, 0x0080 }, /* R1995 (0x7cb) - SLIMTX2MIX Input 2 Volume */
+ { 0x000007cc, 0x0000 }, /* R1996 (0x7cc) - SLIMTX2MIX Input 3 Source */
+ { 0x000007cd, 0x0080 }, /* R1997 (0x7cd) - SLIMTX2MIX Input 3 Volume */
+ { 0x000007ce, 0x0000 }, /* R1998 (0x7ce) - SLIMTX2MIX Input 4 Source */
+ { 0x000007cf, 0x0080 }, /* R1999 (0x7cf) - SLIMTX2MIX Input 4 Volume */
+ { 0x000007d0, 0x0000 }, /* R2000 (0x7d0) - SLIMTX3MIX Input 1 Source */
+ { 0x000007d1, 0x0080 }, /* R2001 (0x7d1) - SLIMTX3MIX Input 1 Volume */
+ { 0x000007d2, 0x0000 }, /* R2002 (0x7d2) - SLIMTX3MIX Input 2 Source */
+ { 0x000007d3, 0x0080 }, /* R2003 (0x7d3) - SLIMTX3MIX Input 2 Volume */
+ { 0x000007d4, 0x0000 }, /* R2004 (0x7d4) - SLIMTX3MIX Input 3 Source */
+ { 0x000007d5, 0x0080 }, /* R2005 (0x7d5) - SLIMTX3MIX Input 3 Volume */
+ { 0x000007d6, 0x0000 }, /* R2006 (0x7d6) - SLIMTX3MIX Input 4 Source */
+ { 0x000007d7, 0x0080 }, /* R2007 (0x7d7) - SLIMTX3MIX Input 4 Volume */
+ { 0x000007d8, 0x0000 }, /* R2008 (0x7d8) - SLIMTX4MIX Input 1 Source */
+ { 0x000007d9, 0x0080 }, /* R2009 (0x7d9) - SLIMTX4MIX Input 1 Volume */
+ { 0x000007da, 0x0000 }, /* R2010 (0x7da) - SLIMTX4MIX Input 2 Source */
+ { 0x000007db, 0x0080 }, /* R2011 (0x7db) - SLIMTX4MIX Input 2 Volume */
+ { 0x000007dc, 0x0000 }, /* R2012 (0x7dc) - SLIMTX4MIX Input 3 Source */
+ { 0x000007dd, 0x0080 }, /* R2013 (0x7dd) - SLIMTX4MIX Input 3 Volume */
+ { 0x000007de, 0x0000 }, /* R2014 (0x7de) - SLIMTX4MIX Input 4 Source */
+ { 0x000007df, 0x0080 }, /* R2015 (0x7df) - SLIMTX4MIX Input 4 Volume */
+ { 0x000007e0, 0x0000 }, /* R2016 (0x7e0) - SLIMTX5MIX Input 1 Source */
+ { 0x000007e1, 0x0080 }, /* R2017 (0x7e1) - SLIMTX5MIX Input 1 Volume */
+ { 0x000007e2, 0x0000 }, /* R2018 (0x7e2) - SLIMTX5MIX Input 2 Source */
+ { 0x000007e3, 0x0080 }, /* R2019 (0x7e3) - SLIMTX5MIX Input 2 Volume */
+ { 0x000007e4, 0x0000 }, /* R2020 (0x7e4) - SLIMTX5MIX Input 3 Source */
+ { 0x000007e5, 0x0080 }, /* R2021 (0x7e5) - SLIMTX5MIX Input 3 Volume */
+ { 0x000007e6, 0x0000 }, /* R2022 (0x7e6) - SLIMTX5MIX Input 4 Source */
+ { 0x000007e7, 0x0080 }, /* R2023 (0x7e7) - SLIMTX5MIX Input 4 Volume */
+ { 0x000007e8, 0x0000 }, /* R2024 (0x7e8) - SLIMTX6MIX Input 1 Source */
+ { 0x000007e9, 0x0080 }, /* R2025 (0x7e9) - SLIMTX6MIX Input 1 Volume */
+ { 0x000007ea, 0x0000 }, /* R2026 (0x7ea) - SLIMTX6MIX Input 2 Source */
+ { 0x000007eb, 0x0080 }, /* R2027 (0x7eb) - SLIMTX6MIX Input 2 Volume */
+ { 0x000007ec, 0x0000 }, /* R2028 (0x7ec) - SLIMTX6MIX Input 3 Source */
+ { 0x000007ed, 0x0080 }, /* R2029 (0x7ed) - SLIMTX6MIX Input 3 Volume */
+ { 0x000007ee, 0x0000 }, /* R2030 (0x7ee) - SLIMTX6MIX Input 4 Source */
+ { 0x000007ef, 0x0080 }, /* R2031 (0x7ef) - SLIMTX6MIX Input 4 Volume */
+ { 0x00000800, 0x0000 }, /* R2048 (0x800) - SPDIF1TX1MIX Input 1 Source*/
+ { 0x00000801, 0x0080 }, /* R2049 (0x801) - SPDIF1TX1MIX Input 1 Volume*/
+ { 0x00000808, 0x0000 }, /* R2056 (0x808) - SPDIF1TX2MIX Input 1 Source*/
+ { 0x00000809, 0x0080 }, /* R2057 (0x809) - SPDIF1TX2MIX Input 1 Volume*/
+ { 0x00000880, 0x0000 }, /* R2176 (0x880) - EQ1MIX Input 1 Source */
+ { 0x00000881, 0x0080 }, /* R2177 (0x881) - EQ1MIX Input 1 Volume */
+ { 0x00000882, 0x0000 }, /* R2178 (0x882) - EQ1MIX Input 2 Source */
+ { 0x00000883, 0x0080 }, /* R2179 (0x883) - EQ1MIX Input 2 Volume */
+ { 0x00000884, 0x0000 }, /* R2180 (0x884) - EQ1MIX Input 3 Source */
+ { 0x00000885, 0x0080 }, /* R2181 (0x885) - EQ1MIX Input 3 Volume */
+ { 0x00000886, 0x0000 }, /* R2182 (0x886) - EQ1MIX Input 4 Source */
+ { 0x00000887, 0x0080 }, /* R2183 (0x887) - EQ1MIX Input 4 Volume */
+ { 0x00000888, 0x0000 }, /* R2184 (0x888) - EQ2MIX Input 1 Source */
+ { 0x00000889, 0x0080 }, /* R2185 (0x889) - EQ2MIX Input 1 Volume */
+ { 0x0000088a, 0x0000 }, /* R2186 (0x88a) - EQ2MIX Input 2 Source */
+ { 0x0000088b, 0x0080 }, /* R2187 (0x88b) - EQ2MIX Input 2 Volume */
+ { 0x0000088c, 0x0000 }, /* R2188 (0x88c) - EQ2MIX Input 3 Source */
+ { 0x0000088d, 0x0080 }, /* R2189 (0x88d) - EQ2MIX Input 3 Volume */
+ { 0x0000088e, 0x0000 }, /* R2190 (0x88e) - EQ2MIX Input 4 Source */
+ { 0x0000088f, 0x0080 }, /* R2191 (0x88f) - EQ2MIX Input 4 Volume */
+ { 0x00000890, 0x0000 }, /* R2192 (0x890) - EQ3MIX Input 1 Source */
+ { 0x00000891, 0x0080 }, /* R2193 (0x891) - EQ3MIX Input 1 Volume */
+ { 0x00000892, 0x0000 }, /* R2194 (0x892) - EQ3MIX Input 2 Source */
+ { 0x00000893, 0x0080 }, /* R2195 (0x893) - EQ3MIX Input 2 Volume */
+ { 0x00000894, 0x0000 }, /* R2196 (0x894) - EQ3MIX Input 3 Source */
+ { 0x00000895, 0x0080 }, /* R2197 (0x895) - EQ3MIX Input 3 Volume */
+ { 0x00000896, 0x0000 }, /* R2198 (0x896) - EQ3MIX Input 4 Source */
+ { 0x00000897, 0x0080 }, /* R2199 (0x897) - EQ3MIX Input 4 Volume */
+ { 0x00000898, 0x0000 }, /* R2200 (0x898) - EQ4MIX Input 1 Source */
+ { 0x00000899, 0x0080 }, /* R2201 (0x899) - EQ4MIX Input 1 Volume */
+ { 0x0000089a, 0x0000 }, /* R2202 (0x89a) - EQ4MIX Input 2 Source */
+ { 0x0000089b, 0x0080 }, /* R2203 (0x89b) - EQ4MIX Input 2 Volume */
+ { 0x0000089c, 0x0000 }, /* R2204 (0x89c) - EQ4MIX Input 3 Source */
+ { 0x0000089d, 0x0080 }, /* R2205 (0x89d) - EQ4MIX Input 3 Volume */
+ { 0x0000089e, 0x0000 }, /* R2206 (0x89e) - EQ4MIX Input 4 Source */
+ { 0x0000089f, 0x0080 }, /* R2207 (0x89f) - EQ4MIX Input 4 Volume */
+ { 0x000008c0, 0x0000 }, /* R2240 (0x8c0) - DRC1LMIX Input 1 Source */
+ { 0x000008c1, 0x0080 }, /* R2241 (0x8c1) - DRC1LMIX Input 1 Volume */
+ { 0x000008c2, 0x0000 }, /* R2242 (0x8c2) - DRC1LMIX Input 2 Source */
+ { 0x000008c3, 0x0080 }, /* R2243 (0x8c3) - DRC1LMIX Input 2 Volume */
+ { 0x000008c4, 0x0000 }, /* R2244 (0x8c4) - DRC1LMIX Input 3 Source */
+ { 0x000008c5, 0x0080 }, /* R2245 (0x8c5) - DRC1LMIX Input 3 Volume */
+ { 0x000008c6, 0x0000 }, /* R2246 (0x8c6) - DRC1LMIX Input 4 Source */
+ { 0x000008c7, 0x0080 }, /* R2247 (0x8c7) - DRC1LMIX Input 4 Volume */
+ { 0x000008c8, 0x0000 }, /* R2248 (0x8c8) - DRC1RMIX Input 1 Source */
+ { 0x000008c9, 0x0080 }, /* R2249 (0x8c9) - DRC1RMIX Input 1 Volume */
+ { 0x000008ca, 0x0000 }, /* R2250 (0x8ca) - DRC1RMIX Input 2 Source */
+ { 0x000008cb, 0x0080 }, /* R2251 (0x8cb) - DRC1RMIX Input 2 Volume */
+ { 0x000008cc, 0x0000 }, /* R2252 (0x8cc) - DRC1RMIX Input 3 Source */
+ { 0x000008cd, 0x0080 }, /* R2253 (0x8cd) - DRC1RMIX Input 3 Volume */
+ { 0x000008ce, 0x0000 }, /* R2254 (0x8ce) - DRC1RMIX Input 4 Source */
+ { 0x000008cf, 0x0080 }, /* R2255 (0x8cf) - DRC1RMIX Input 4 Volume */
+ { 0x000008d0, 0x0000 }, /* R2256 (0x8d0) - DRC2LMIX Input 1 Source */
+ { 0x000008d1, 0x0080 }, /* R2257 (0x8d1) - DRC2LMIX Input 1 Volume */
+ { 0x000008d2, 0x0000 }, /* R2258 (0x8d2) - DRC2LMIX Input 2 Source */
+ { 0x000008d3, 0x0080 }, /* R2259 (0x8d3) - DRC2LMIX Input 2 Volume */
+ { 0x000008d4, 0x0000 }, /* R2260 (0x8d4) - DRC2LMIX Input 3 Source */
+ { 0x000008d5, 0x0080 }, /* R2261 (0x8d5) - DRC2LMIX Input 3 Volume */
+ { 0x000008d6, 0x0000 }, /* R2262 (0x8d6) - DRC2LMIX Input 4 Source */
+ { 0x000008d7, 0x0080 }, /* R2263 (0x8d7) - DRC2LMIX Input 4 Volume */
+ { 0x000008d8, 0x0000 }, /* R2264 (0x8d8) - DRC2RMIX Input 1 Source */
+ { 0x000008d9, 0x0080 }, /* R2265 (0x8d9) - DRC2RMIX Input 1 Volume */
+ { 0x000008da, 0x0000 }, /* R2266 (0x8da) - DRC2RMIX Input 2 Source */
+ { 0x000008db, 0x0080 }, /* R2267 (0x8db) - DRC2RMIX Input 2 Volume */
+ { 0x000008dc, 0x0000 }, /* R2268 (0x8dc) - DRC2RMIX Input 3 Source */
+ { 0x000008dd, 0x0080 }, /* R2269 (0x8dd) - DRC2RMIX Input 3 Volume */
+ { 0x000008de, 0x0000 }, /* R2270 (0x8de) - DRC2RMIX Input 4 Source */
+ { 0x000008df, 0x0080 }, /* R2271 (0x8df) - DRC2RMIX Input 4 Volume */
+ { 0x00000900, 0x0000 }, /* R2304 (0x900) - HPLP1MIX Input 1 Source */
+ { 0x00000901, 0x0080 }, /* R2305 (0x901) - HPLP1MIX Input 1 Volume */
+ { 0x00000902, 0x0000 }, /* R2306 (0x902) - HPLP1MIX Input 2 Source */
+ { 0x00000903, 0x0080 }, /* R2307 (0x903) - HPLP1MIX Input 2 Volume */
+ { 0x00000904, 0x0000 }, /* R2308 (0x904) - HPLP1MIX Input 3 Source */
+ { 0x00000905, 0x0080 }, /* R2309 (0x905) - HPLP1MIX Input 3 Volume */
+ { 0x00000906, 0x0000 }, /* R2310 (0x906) - HPLP1MIX Input 4 Source */
+ { 0x00000907, 0x0080 }, /* R2311 (0x907) - HPLP1MIX Input 4 Volume */
+ { 0x00000908, 0x0000 }, /* R2312 (0x908) - HPLP2MIX Input 1 Source */
+ { 0x00000909, 0x0080 }, /* R2313 (0x909) - HPLP2MIX Input 1 Volume */
+ { 0x0000090a, 0x0000 }, /* R2314 (0x90a) - HPLP2MIX Input 2 Source */
+ { 0x0000090b, 0x0080 }, /* R2315 (0x90b) - HPLP2MIX Input 2 Volume */
+ { 0x0000090c, 0x0000 }, /* R2316 (0x90c) - HPLP2MIX Input 3 Source */
+ { 0x0000090d, 0x0080 }, /* R2317 (0x90d) - HPLP2MIX Input 3 Volume */
+ { 0x0000090e, 0x0000 }, /* R2318 (0x90e) - HPLP2MIX Input 4 Source */
+ { 0x0000090f, 0x0080 }, /* R2319 (0x90f) - HPLP2MIX Input 4 Volume */
+ { 0x00000910, 0x0000 }, /* R2320 (0x910) - HPLP3MIX Input 1 Source */
+ { 0x00000911, 0x0080 }, /* R2321 (0x911) - HPLP3MIX Input 1 Volume */
+ { 0x00000912, 0x0000 }, /* R2322 (0x912) - HPLP3MIX Input 2 Source */
+ { 0x00000913, 0x0080 }, /* R2323 (0x913) - HPLP3MIX Input 2 Volume */
+ { 0x00000914, 0x0000 }, /* R2324 (0x914) - HPLP3MIX Input 3 Source */
+ { 0x00000915, 0x0080 }, /* R2325 (0x915) - HPLP3MIX Input 3 Volume */
+ { 0x00000916, 0x0000 }, /* R2326 (0x916) - HPLP3MIX Input 4 Source */
+ { 0x00000917, 0x0080 }, /* R2327 (0x917) - HPLP3MIX Input 4 Volume */
+ { 0x00000918, 0x0000 }, /* R2328 (0x918) - HPLP4MIX Input 1 Source */
+ { 0x00000919, 0x0080 }, /* R2329 (0x919) - HPLP4MIX Input 1 Volume */
+ { 0x0000091a, 0x0000 }, /* R2330 (0x91a) - HPLP4MIX Input 2 Source */
+ { 0x0000091b, 0x0080 }, /* R2331 (0x91b) - HPLP4MIX Input 2 Volume */
+ { 0x0000091c, 0x0000 }, /* R2332 (0x91c) - HPLP4MIX Input 3 Source */
+ { 0x0000091d, 0x0080 }, /* R2333 (0x91d) - HPLP4MIX Input 3 Volume */
+ { 0x0000091e, 0x0000 }, /* R2334 (0x91e) - HPLP4MIX Input 4 Source */
+ { 0x0000091f, 0x0080 }, /* R2335 (0x91f) - HPLP4MIX Input 4 Volume */
+ { 0x00000940, 0x0000 }, /* R2368 (0x940) - DSP1LMIX Input 1 Source */
+ { 0x00000941, 0x0080 }, /* R2369 (0x941) - DSP1LMIX Input 1 Volume */
+ { 0x00000942, 0x0000 }, /* R2370 (0x942) - DSP1LMIX Input 2 Source */
+ { 0x00000943, 0x0080 }, /* R2371 (0x943) - DSP1LMIX Input 2 Volume */
+ { 0x00000944, 0x0000 }, /* R2372 (0x944) - DSP1LMIX Input 3 Source */
+ { 0x00000945, 0x0080 }, /* R2373 (0x945) - DSP1LMIX Input 3 Volume */
+ { 0x00000946, 0x0000 }, /* R2374 (0x946) - DSP1LMIX Input 4 Source */
+ { 0x00000947, 0x0080 }, /* R2375 (0x947) - DSP1LMIX Input 4 Volume */
+ { 0x00000948, 0x0000 }, /* R2376 (0x948) - DSP1RMIX Input 1 Source */
+ { 0x00000949, 0x0080 }, /* R2377 (0x949) - DSP1RMIX Input 1 Volume */
+ { 0x0000094a, 0x0000 }, /* R2378 (0x94a) - DSP1RMIX Input 2 Source */
+ { 0x0000094b, 0x0080 }, /* R2379 (0x94b) - DSP1RMIX Input 2 Volume */
+ { 0x0000094c, 0x0000 }, /* R2380 (0x94c) - DSP1RMIX Input 3 Source */
+ { 0x0000094d, 0x0080 }, /* R2381 (0x94d) - DSP1RMIX Input 3 Volume */
+ { 0x0000094e, 0x0000 }, /* R2382 (0x94e) - DSP1RMIX Input 4 Source */
+ { 0x0000094f, 0x0080 }, /* R2383 (0x94f) - DSP1RMIX Input 4 Volume */
+ { 0x00000950, 0x0000 }, /* R2384 (0x950) - DSP1AUX1MIX Input 1 Source */
+ { 0x00000958, 0x0000 }, /* R2392 (0x958) - DSP1AUX2MIX Input 1 Source */
+ { 0x00000960, 0x0000 }, /* R2400 (0x960) - DSP1AUX3MIX Input 1 Source */
+ { 0x00000968, 0x0000 }, /* R2408 (0x968) - DSP1AUX4MIX Input 1 Source */
+ { 0x00000970, 0x0000 }, /* R2416 (0x970) - DSP1AUX5MIX Input 1 Source */
+ { 0x00000978, 0x0000 }, /* R2424 (0x978) - DSP1AUX6MIX Input 1 Source */
+ { 0x00000980, 0x0000 }, /* R2432 (0x980) - DSP2LMIX Input 1 Source */
+ { 0x00000981, 0x0080 }, /* R2433 (0x981) - DSP2LMIX Input 1 Volume */
+ { 0x00000982, 0x0000 }, /* R2434 (0x982) - DSP2LMIX Input 2 Source */
+ { 0x00000983, 0x0080 }, /* R2435 (0x983) - DSP2LMIX Input 2 Volume */
+ { 0x00000984, 0x0000 }, /* R2436 (0x984) - DSP2LMIX Input 3 Source */
+ { 0x00000985, 0x0080 }, /* R2437 (0x985) - DSP2LMIX Input 3 Volume */
+ { 0x00000986, 0x0000 }, /* R2438 (0x986) - DSP2LMIX Input 4 Source */
+ { 0x00000987, 0x0080 }, /* R2439 (0x987) - DSP2LMIX Input 4 Volume */
+ { 0x00000988, 0x0000 }, /* R2440 (0x988) - DSP2RMIX Input 1 Source */
+ { 0x00000989, 0x0080 }, /* R2441 (0x989) - DSP2RMIX Input 1 Volume */
+ { 0x0000098a, 0x0000 }, /* R2442 (0x98a) - DSP2RMIX Input 2 Source */
+ { 0x0000098b, 0x0080 }, /* R2443 (0x98b) - DSP2RMIX Input 2 Volume */
+ { 0x0000098c, 0x0000 }, /* R2444 (0x98c) - DSP2RMIX Input 3 Source */
+ { 0x0000098d, 0x0080 }, /* R2445 (0x98d) - DSP2RMIX Input 3 Volume */
+ { 0x0000098e, 0x0000 }, /* R2446 (0x98e) - DSP2RMIX Input 4 Source */
+ { 0x0000098f, 0x0080 }, /* R2447 (0x98f) - DSP2RMIX Input 4 Volume */
+ { 0x00000990, 0x0000 }, /* R2448 (0x990) - DSP2AUX1MIX Input 1 Source */
+ { 0x00000998, 0x0000 }, /* R2456 (0x998) - DSP2AUX2MIX Input 1 Source */
+ { 0x000009a0, 0x0000 }, /* R2464 (0x9a0) - DSP2AUX3MIX Input 1 Source */
+ { 0x000009a8, 0x0000 }, /* R2472 (0x9a8) - DSP2AUX4MIX Input 1 Source */
+ { 0x000009b0, 0x0000 }, /* R2480 (0x9b0) - DSP2AUX5MIX Input 1 Source */
+ { 0x000009b8, 0x0000 }, /* R2488 (0x9b8) - DSP2AUX6MIX Input 1 Source */
+ { 0x000009c0, 0x0000 }, /* R2496 (0x9c0) - DSP3LMIX Input 1 Source */
+ { 0x000009c1, 0x0080 }, /* R2497 (0x9c1) - DSP3LMIX Input 1 Volume */
+ { 0x000009c2, 0x0000 }, /* R2498 (0x9c2) - DSP3LMIX Input 2 Source */
+ { 0x000009c3, 0x0080 }, /* R2499 (0x9c3) - DSP3LMIX Input 2 Volume */
+ { 0x000009c4, 0x0000 }, /* R2500 (0x9c4) - DSP3LMIX Input 3 Source */
+ { 0x000009c5, 0x0080 }, /* R2501 (0x9c5) - DSP3LMIX Input 3 Volume */
+ { 0x000009c6, 0x0000 }, /* R2502 (0x9c6) - DSP3LMIX Input 4 Source */
+ { 0x000009c7, 0x0080 }, /* R2503 (0x9c7) - DSP3LMIX Input 4 Volume */
+ { 0x000009c8, 0x0000 }, /* R2504 (0x9c8) - DSP3RMIX Input 1 Source */
+ { 0x000009c9, 0x0080 }, /* R2505 (0x9c9) - DSP3RMIX Input 1 Volume */
+ { 0x000009ca, 0x0000 }, /* R2506 (0x9ca) - DSP3RMIX Input 2 Source */
+ { 0x000009cb, 0x0080 }, /* R2507 (0x9cb) - DSP3RMIX Input 2 Volume */
+ { 0x000009cc, 0x0000 }, /* R2508 (0x9cc) - DSP3RMIX Input 3 Source */
+ { 0x000009cd, 0x0080 }, /* R2509 (0x9cd) - DSP3RMIX Input 3 Volume */
+ { 0x000009ce, 0x0000 }, /* R2510 (0x9ce) - DSP3RMIX Input 4 Source */
+ { 0x000009cf, 0x0080 }, /* R2511 (0x9cf) - DSP3RMIX Input 4 Volume */
+ { 0x000009d0, 0x0000 }, /* R2512 (0x9d0) - DSP3AUX1MIX Input 1 Source */
+ { 0x000009d8, 0x0000 }, /* R2520 (0x9d8) - DSP3AUX2MIX Input 1 Source */
+ { 0x000009e0, 0x0000 }, /* R2528 (0x9e0) - DSP3AUX3MIX Input 1 Source */
+ { 0x000009e8, 0x0000 }, /* R2536 (0x9e8) - DSP3AUX4MIX Input 1 Source */
+ { 0x000009f0, 0x0000 }, /* R2544 (0x9f0) - DSP3AUX5MIX Input 1 Source */
+ { 0x000009f8, 0x0000 }, /* R2552 (0x9f8) - DSP3AUX6MIX Input 1 Source */
+ { 0x00000b00, 0x0000 }, /* R2816 (0xb00) - ISRC1DEC1MIX Input 1 Source*/
+ { 0x00000b08, 0x0000 }, /* R2824 (0xb08) - ISRC1DEC2MIX Input 1 Source*/
+ { 0x00000b10, 0x0000 }, /* R2832 (0xb10) - ISRC1DEC3MIX Input 1 Source*/
+ { 0x00000b18, 0x0000 }, /* R2840 (0xb18) - ISRC1DEC4MIX Input 1 Source*/
+ { 0x00000b20, 0x0000 }, /* R2848 (0xb20) - ISRC1INT1MIX Input 1 Source*/
+ { 0x00000b28, 0x0000 }, /* R2856 (0xb28) - ISRC1INT2MIX Input 1 Source*/
+ { 0x00000b30, 0x0000 }, /* R2864 (0xb30) - ISRC1INT3MIX Input 1 Source*/
+ { 0x00000b38, 0x0000 }, /* R2872 (0xb38) - ISRC1INT4MIX Input 1 Source*/
+ { 0x00000b40, 0x0000 }, /* R2880 (0xb40) - ISRC2DEC1MIX Input 1 Source*/
+ { 0x00000b48, 0x0000 }, /* R2888 (0xb48) - ISRC2DEC2MIX Input 1 Source*/
+ { 0x00000b50, 0x0000 }, /* R2896 (0xb50) - ISRC2DEC3MIX Input 1 Source*/
+ { 0x00000b58, 0x0000 }, /* R2904 (0xb58) - ISRC2DEC4MIX Input 1 Source*/
+ { 0x00000b60, 0x0000 }, /* R2912 (0xb60) - ISRC2INT1MIX Input 1 Source*/
+ { 0x00000b68, 0x0000 }, /* R2920 (0xb68) - ISRC2INT2MIX Input 1 Source*/
+ { 0x00000b70, 0x0000 }, /* R2928 (0xb70) - ISRC2INT3MIX Input 1 Source*/
+ { 0x00000b78, 0x0000 }, /* R2936 (0xb78) - ISRC2INT4MIX Input 1 Source*/
+ { 0x00000e00, 0x0000 }, /* R3584 (0xe00) - FX Ctrl1 */
+ { 0x00000e10, 0x6318 }, /* R3600 (0xe10) - EQ1_1 */
+ { 0x00000e11, 0x6300 }, /* R3601 (0xe11) - EQ1_2 */
+ { 0x00000e12, 0x0fc8 }, /* R3602 (0xe12) - EQ1_3 */
+ { 0x00000e13, 0x03fe }, /* R3603 (0xe13) - EQ1_4 */
+ { 0x00000e14, 0x00e0 }, /* R3604 (0xe14) - EQ1_5 */
+ { 0x00000e15, 0x1ec4 }, /* R3605 (0xe15) - EQ1_6 */
+ { 0x00000e16, 0xf136 }, /* R3606 (0xe16) - EQ1_7 */
+ { 0x00000e17, 0x0409 }, /* R3607 (0xe17) - EQ1_8 */
+ { 0x00000e18, 0x04cc }, /* R3608 (0xe18) - EQ1_9 */
+ { 0x00000e19, 0x1c9b }, /* R3609 (0xe19) - EQ1_10 */
+ { 0x00000e1a, 0xf337 }, /* R3610 (0xe1a) - EQ1_11 */
+ { 0x00000e1b, 0x040b }, /* R3611 (0xe1b) - EQ1_12 */
+ { 0x00000e1c, 0x0cbb }, /* R3612 (0xe1c) - EQ1_13 */
+ { 0x00000e1d, 0x16f8 }, /* R3613 (0xe1d) - EQ1_14 */
+ { 0x00000e1e, 0xf7d9 }, /* R3614 (0xe1e) - EQ1_15 */
+ { 0x00000e1f, 0x040a }, /* R3615 (0xe1f) - EQ1_16 */
+ { 0x00000e20, 0x1f14 }, /* R3616 (0xe20) - EQ1_17 */
+ { 0x00000e21, 0x058c }, /* R3617 (0xe21) - EQ1_18 */
+ { 0x00000e22, 0x0563 }, /* R3618 (0xe22) - EQ1_19 */
+ { 0x00000e23, 0x4000 }, /* R3619 (0xe23) - EQ1_20 */
+ { 0x00000e24, 0x0b75 }, /* R3620 (0xe24) - EQ1_21 */
+ { 0x00000e26, 0x6318 }, /* R3622 (0xe26) - EQ2_1 */
+ { 0x00000e27, 0x6300 }, /* R3623 (0xe27) - EQ2_2 */
+ { 0x00000e28, 0x0fc8 }, /* R3624 (0xe28) - EQ2_3 */
+ { 0x00000e29, 0x03fe }, /* R3625 (0xe29) - EQ2_4 */
+ { 0x00000e2a, 0x00e0 }, /* R3626 (0xe2a) - EQ2_5 */
+ { 0x00000e2b, 0x1ec4 }, /* R3627 (0xe2b) - EQ2_6 */
+ { 0x00000e2c, 0xf136 }, /* R3628 (0xe2c) - EQ2_7 */
+ { 0x00000e2d, 0x0409 }, /* R3629 (0xe2d) - EQ2_8 */
+ { 0x00000e2e, 0x04cc }, /* R3630 (0xe2e) - EQ2_9 */
+ { 0x00000e2f, 0x1c9b }, /* R3631 (0xe2f) - EQ2_10 */
+ { 0x00000e30, 0xf337 }, /* R3632 (0xe30) - EQ2_11 */
+ { 0x00000e31, 0x040b }, /* R3633 (0xe31) - EQ2_12 */
+ { 0x00000e32, 0x0cbb }, /* R3634 (0xe32) - EQ2_13 */
+ { 0x00000e33, 0x16f8 }, /* R3635 (0xe33) - EQ2_14 */
+ { 0x00000e34, 0xf7d9 }, /* R3636 (0xe34) - EQ2_15 */
+ { 0x00000e35, 0x040a }, /* R3637 (0xe35) - EQ2_16 */
+ { 0x00000e36, 0x1f14 }, /* R3638 (0xe36) - EQ2_17 */
+ { 0x00000e37, 0x058c }, /* R3639 (0xe37) - EQ2_18 */
+ { 0x00000e38, 0x0563 }, /* R3640 (0xe38) - EQ2_19 */
+ { 0x00000e39, 0x4000 }, /* R3641 (0xe39) - EQ2_20 */
+ { 0x00000e3a, 0x0b75 }, /* R3642 (0xe3a) - EQ2_21 */
+ { 0x00000e3c, 0x6318 }, /* R3644 (0xe3c) - EQ3_1 */
+ { 0x00000e3d, 0x6300 }, /* R3645 (0xe3d) - EQ3_2 */
+ { 0x00000e3e, 0x0fc8 }, /* R3646 (0xe3e) - EQ3_3 */
+ { 0x00000e3f, 0x03fe }, /* R3647 (0xe3f) - EQ3_4 */
+ { 0x00000e40, 0x00e0 }, /* R3648 (0xe40) - EQ3_5 */
+ { 0x00000e41, 0x1ec4 }, /* R3649 (0xe41) - EQ3_6 */
+ { 0x00000e42, 0xf136 }, /* R3650 (0xe42) - EQ3_7 */
+ { 0x00000e43, 0x0409 }, /* R3651 (0xe43) - EQ3_8 */
+ { 0x00000e44, 0x04cc }, /* R3652 (0xe44) - EQ3_9 */
+ { 0x00000e45, 0x1c9b }, /* R3653 (0xe45) - EQ3_10 */
+ { 0x00000e46, 0xf337 }, /* R3654 (0xe46) - EQ3_11 */
+ { 0x00000e47, 0x040b }, /* R3655 (0xe47) - EQ3_12 */
+ { 0x00000e48, 0x0cbb }, /* R3656 (0xe48) - EQ3_13 */
+ { 0x00000e49, 0x16f8 }, /* R3657 (0xe49) - EQ3_14 */
+ { 0x00000e4a, 0xf7d9 }, /* R3658 (0xe4a) - EQ3_15 */
+ { 0x00000e4b, 0x040a }, /* R3659 (0xe4b) - EQ3_16 */
+ { 0x00000e4c, 0x1f14 }, /* R3660 (0xe4c) - EQ3_17 */
+ { 0x00000e4d, 0x058c }, /* R3661 (0xe4d) - EQ3_18 */
+ { 0x00000e4e, 0x0563 }, /* R3662 (0xe4e) - EQ3_19 */
+ { 0x00000e4f, 0x4000 }, /* R3663 (0xe4f) - EQ3_20 */
+ { 0x00000e50, 0x0b75 }, /* R3664 (0xe50) - EQ3_21 */
+ { 0x00000e52, 0x6318 }, /* R3666 (0xe52) - EQ4_1 */
+ { 0x00000e53, 0x6300 }, /* R3667 (0xe53) - EQ4_2 */
+ { 0x00000e54, 0x0fc8 }, /* R3668 (0xe54) - EQ4_3 */
+ { 0x00000e55, 0x03fe }, /* R3669 (0xe55) - EQ4_4 */
+ { 0x00000e56, 0x00e0 }, /* R3670 (0xe56) - EQ4_5 */
+ { 0x00000e57, 0x1ec4 }, /* R3671 (0xe57) - EQ4_6 */
+ { 0x00000e58, 0xf136 }, /* R3672 (0xe58) - EQ4_7 */
+ { 0x00000e59, 0x0409 }, /* R3673 (0xe59) - EQ4_8 */
+ { 0x00000e5a, 0x04cc }, /* R3674 (0xe5a) - EQ4_9 */
+ { 0x00000e5b, 0x1c9b }, /* R3675 (0xe5b) - EQ4_10 */
+ { 0x00000e5c, 0xf337 }, /* R3676 (0xe5c) - EQ4_11 */
+ { 0x00000e5d, 0x040b }, /* R3677 (0xe5d) - EQ4_12 */
+ { 0x00000e5e, 0x0cbb }, /* R3678 (0xe5e) - EQ4_13 */
+ { 0x00000e5f, 0x16f8 }, /* R3679 (0xe5f) - EQ4_14 */
+ { 0x00000e60, 0xf7d9 }, /* R3680 (0xe60) - EQ4_15 */
+ { 0x00000e61, 0x040a }, /* R3681 (0xe61) - EQ4_16 */
+ { 0x00000e62, 0x1f14 }, /* R3682 (0xe62) - EQ4_17 */
+ { 0x00000e63, 0x058c }, /* R3683 (0xe63) - EQ4_18 */
+ { 0x00000e64, 0x0563 }, /* R3684 (0xe64) - EQ4_19 */
+ { 0x00000e65, 0x4000 }, /* R3685 (0xe65) - EQ4_20 */
+ { 0x00000e66, 0x0b75 }, /* R3686 (0xe66) - EQ4_21 */
+ { 0x00000e80, 0x0018 }, /* R3712 (0xe80) - DRC1 ctrl1 */
+ { 0x00000e81, 0x0933 }, /* R3713 (0xe81) - DRC1 ctrl2 */
+ { 0x00000e82, 0x0018 }, /* R3714 (0xe82) - DRC1 ctrl3 */
+ { 0x00000e83, 0x0000 }, /* R3715 (0xe83) - DRC1 ctrl4 */
+ { 0x00000e84, 0x0000 }, /* R3716 (0xe84) - DRC1 ctrl5 */
+ { 0x00000e88, 0x0018 }, /* R3720 (0xe88) - DRC2 ctrl1 */
+ { 0x00000e89, 0x0933 }, /* R3721 (0xe89) - DRC2 ctrl2 */
+ { 0x00000e8a, 0x0018 }, /* R3722 (0xe8a) - DRC2 ctrl3 */
+ { 0x00000e8b, 0x0000 }, /* R3723 (0xe8b) - DRC2 ctrl4 */
+ { 0x00000e8c, 0x0000 }, /* R3724 (0xe8c) - DRC2 ctrl5 */
+ { 0x00000ec0, 0x0000 }, /* R3776 (0xec0) - HPLPF1_1 */
+ { 0x00000ec1, 0x0000 }, /* R3777 (0xec1) - HPLPF1_2 */
+ { 0x00000ec4, 0x0000 }, /* R3780 (0xec4) - HPLPF2_1 */
+ { 0x00000ec5, 0x0000 }, /* R3781 (0xec5) - HPLPF2_2 */
+ { 0x00000ec8, 0x0000 }, /* R3784 (0xec8) - HPLPF3_1 */
+ { 0x00000ec9, 0x0000 }, /* R3785 (0xec9) - HPLPF3_2 */
+ { 0x00000ecc, 0x0000 }, /* R3788 (0xecc) - HPLPF4_1 */
+ { 0x00000ecd, 0x0000 }, /* R3789 (0xecd) - HPLPF4_2 */
+ { 0x00000ef0, 0x0000 }, /* R3824 (0xef0) - ISRC 1 CTRL 1 */
+ { 0x00000ef1, 0x0001 }, /* R3825 (0xef1) - ISRC 1 CTRL 2 */
+ { 0x00000ef2, 0x0000 }, /* R3826 (0xef2) - ISRC 1 CTRL 3 */
+ { 0x00000ef3, 0x0000 }, /* R3827 (0xef3) - ISRC 2 CTRL 1 */
+ { 0x00000ef4, 0x0001 }, /* R3828 (0xef4) - ISRC 2 CTRL 2 */
+ { 0x00000ef5, 0x0000 }, /* R3829 (0xef5) - ISRC 2 CTRL 3 */
+ { 0x00001300, 0x0000 }, /* R4864 (0x1300) - DAC Comp 1 */
+ { 0x00001302, 0x0000 }, /* R4866 (0x1302) - DAC Comp 2 */
+ { 0x00001380, 0x0000 }, /* R4992 (0x1380) - FRF Coefficient 1L 1 */
+ { 0x00001381, 0x0000 }, /* R4993 (0x1381) - FRF Coefficient 1L 2 */
+ { 0x00001382, 0x0000 }, /* R4994 (0x1382) - FRF Coefficient 1L 3 */
+ { 0x00001383, 0x0000 }, /* R4995 (0x1383) - FRF Coefficient 1L 4 */
+ { 0x00001390, 0x0000 }, /* R5008 (0x1390) - FRF Coefficient 1R 1 */
+ { 0x00001391, 0x0000 }, /* R5009 (0x1391) - FRF Coefficient 1R 2 */
+ { 0x00001392, 0x0000 }, /* R5010 (0x1392) - FRF Coefficient 1R 3 */
+ { 0x00001393, 0x0000 }, /* R5011 (0x1393) - FRF Coefficient 1R 4 */
+ { 0x000013a0, 0x0000 }, /* R5024 (0x13a0) - FRF Coefficient 4L 1 */
+ { 0x000013a1, 0x0000 }, /* R5025 (0x13a1) - FRF Coefficient 4L 2 */
+ { 0x000013a2, 0x0000 }, /* R5026 (0x13a2) - FRF Coefficient 4L 3 */
+ { 0x000013a3, 0x0000 }, /* R5027 (0x13a3) - FRF Coefficient 4L 4 */
+ { 0x000013b0, 0x0000 }, /* R5040 (0x13b0) - FRF Coefficient 5L 1 */
+ { 0x000013b1, 0x0000 }, /* R5041 (0x13b1) - FRF Coefficient 5L 2 */
+ { 0x000013b2, 0x0000 }, /* R5042 (0x13b2) - FRF Coefficient 5L 3 */
+ { 0x000013b3, 0x0000 }, /* R5043 (0x13b3) - FRF Coefficient 5L 4 */
+ { 0x000013c0, 0x0000 }, /* R5040 (0x13c0) - FRF Coefficient 5R 1 */
+ { 0x000013c1, 0x0000 }, /* R5041 (0x13c1) - FRF Coefficient 5R 2 */
+ { 0x000013c2, 0x0000 }, /* R5042 (0x13c2) - FRF Coefficient 5R 3 */
+ { 0x000013c3, 0x0000 }, /* R5043 (0x13c3) - FRF Coefficient 5R 4 */
+ { 0x00001700, 0x2001 }, /* R5888 (0x1700) - GPIO1 Control 1 */
+ { 0x00001701, 0xf000 }, /* R5889 (0x1701) - GPIO1 Control 2 */
+ { 0x00001702, 0x2001 }, /* R5890 (0x1702) - GPIO2 Control 1 */
+ { 0x00001703, 0xf000 }, /* R5891 (0x1703) - GPIO2 Control 2 */
+ { 0x00001704, 0x2001 }, /* R5892 (0x1704) - GPIO3 Control 1 */
+ { 0x00001705, 0xf000 }, /* R5893 (0x1705) - GPIO3 Control 2 */
+ { 0x00001706, 0x2001 }, /* R5894 (0x1706) - GPIO4 Control 1 */
+ { 0x00001707, 0xf000 }, /* R5895 (0x1707) - GPIO4 Control 2 */
+ { 0x00001708, 0x2001 }, /* R5896 (0x1708) - GPIO5 Control 1 */
+ { 0x00001709, 0xf000 }, /* R5897 (0x1709) - GPIO5 Control 2 */
+ { 0x0000170a, 0x2001 }, /* R5898 (0x170a) - GPIO6 Control 1 */
+ { 0x0000170b, 0xf000 }, /* R5899 (0x170b) - GPIO6 Control 2 */
+ { 0x0000170c, 0x2001 }, /* R5900 (0x170c) - GPIO7 Control 1 */
+ { 0x0000170d, 0xf000 }, /* R5901 (0x170d) - GPIO7 Control 2 */
+ { 0x0000170e, 0x2001 }, /* R5902 (0x170e) - GPIO8 Control 1 */
+ { 0x0000170f, 0xf000 }, /* R5903 (0x170f) - GPIO8 Control 2 */
+ { 0x00001710, 0x2001 }, /* R5904 (0x1710) - GPIO9 Control 1 */
+ { 0x00001711, 0xf000 }, /* R5905 (0x1711) - GPIO9 Control 2 */
+ { 0x00001712, 0x2001 }, /* R5906 (0x1712) - GPIO10 Control 1 */
+ { 0x00001713, 0xf000 }, /* R5907 (0x1713) - GPIO10 Control 2 */
+ { 0x00001714, 0x2001 }, /* R5908 (0x1714) - GPIO11 Control 1 */
+ { 0x00001715, 0xf000 }, /* R5909 (0x1715) - GPIO11 Control 2 */
+ { 0x00001716, 0x2001 }, /* R5910 (0x1716) - GPIO12 Control 1 */
+ { 0x00001717, 0xf000 }, /* R5911 (0x1717) - GPIO12 Control 2 */
+ { 0x00001718, 0x2001 }, /* R5912 (0x1718) - GPIO13 Control 1 */
+ { 0x00001719, 0xf000 }, /* R5913 (0x1719) - GPIO13 Control 2 */
+ { 0x0000171a, 0x2001 }, /* R5914 (0x171a) - GPIO14 Control 1 */
+ { 0x0000171b, 0xf000 }, /* R5915 (0x171b) - GPIO14 Control 2 */
+ { 0x0000171c, 0x2001 }, /* R5916 (0x171c) - GPIO15 Control 1 */
+ { 0x0000171d, 0xf000 }, /* R5917 (0x171d) - GPIO15 Control 2 */
+ { 0x0000171e, 0x2001 }, /* R5918 (0x171e) - GPIO16 Control 1 */
+ { 0x0000171f, 0xf000 }, /* R5919 (0x171f) - GPIO16 Control 2 */
+ { 0x00001840, 0xffff }, /* R6208 (0x1840) - IRQ1 Mask 1 */
+ { 0x00001841, 0xffff }, /* R6209 (0x1841) - IRQ1 Mask 2 */
+ { 0x00001842, 0xffff }, /* R6210 (0x1842) - IRQ1 Mask 3 */
+ { 0x00001843, 0xffff }, /* R6211 (0x1843) - IRQ1 Mask 4 */
+ { 0x00001844, 0xffff }, /* R6212 (0x1844) - IRQ1 Mask 5 */
+ { 0x00001845, 0xffff }, /* R6213 (0x1845) - IRQ1 Mask 6 */
+ { 0x00001846, 0xffff }, /* R6214 (0x1846) - IRQ1 Mask 7 */
+ { 0x00001847, 0xffff }, /* R6215 (0x1847) - IRQ1 Mask 8 */
+ { 0x00001848, 0xffff }, /* R6216 (0x1848) - IRQ1 Mask 9 */
+ { 0x00001849, 0xffff }, /* R6217 (0x1849) - IRQ1 Mask 10 */
+ { 0x0000184a, 0xffff }, /* R6218 (0x184a) - IRQ1 Mask 11 */
+ { 0x0000184b, 0xffff }, /* R6219 (0x184b) - IRQ1 Mask 12 */
+ { 0x0000184c, 0xffff }, /* R6220 (0x184c) - IRQ1 Mask 13 */
+ { 0x0000184d, 0xffff }, /* R6221 (0x184d) - IRQ1 Mask 14 */
+ { 0x0000184e, 0xffff }, /* R6222 (0x184e) - IRQ1 Mask 15 */
+ { 0x0000184f, 0xffff }, /* R6223 (0x184f) - IRQ1 Mask 16 */
+ { 0x00001850, 0xffff }, /* R6224 (0x1850) - IRQ1 Mask 17 */
+ { 0x00001851, 0xffff }, /* R6225 (0x1851) - IRQ1 Mask 18 */
+ { 0x00001852, 0xffff }, /* R6226 (0x1852) - IRQ1 Mask 19 */
+ { 0x00001853, 0xffff }, /* R6227 (0x1853) - IRQ1 Mask 20 */
+ { 0x00001854, 0xffff }, /* R6228 (0x1854) - IRQ1 Mask 21 */
+ { 0x00001855, 0xffff }, /* R6229 (0x1855) - IRQ1 Mask 22 */
+ { 0x00001856, 0xffff }, /* R6230 (0x1856) - IRQ1 Mask 23 */
+ { 0x00001857, 0xffff }, /* R6231 (0x1857) - IRQ1 Mask 24 */
+ { 0x00001858, 0xffff }, /* R6232 (0x1858) - IRQ1 Mask 25 */
+ { 0x00001859, 0xffff }, /* R6233 (0x1859) - IRQ1 Mask 26 */
+ { 0x0000185a, 0xffff }, /* R6234 (0x185a) - IRQ1 Mask 27 */
+ { 0x0000185b, 0xffff }, /* R6235 (0x185b) - IRQ1 Mask 28 */
+ { 0x0000185c, 0xffff }, /* R6236 (0x185c) - IRQ1 Mask 29 */
+ { 0x0000185d, 0xffff }, /* R6237 (0x185d) - IRQ1 Mask 30 */
+ { 0x0000185e, 0xffff }, /* R6238 (0x185e) - IRQ1 Mask 31 */
+ { 0x0000185f, 0xffff }, /* R6239 (0x185f) - IRQ1 Mask 32 */
+ { 0x00001860, 0xffff }, /* R6240 (0x1860) - IRQ1 Mask 33 */
+ { 0x00001a06, 0x0000 }, /* R6662 (0x1a06) - Interrupt Debounce 7 */
+ { 0x00001a80, 0x4400 }, /* R6784 (0x1a80) - IRQ1 CTRL */
+};
+
+static bool cs47l35_is_adsp_memory(unsigned int reg)
+{
+ switch (reg) {
+ case 0x080000 ... 0x085ffe:
+ case 0x0a0000 ... 0x0a7ffe:
+ case 0x0c0000 ... 0x0c1ffe:
+ case 0x0e0000 ... 0x0e1ffe:
+ case 0x100000 ... 0x10effe:
+ case 0x120000 ... 0x12bffe:
+ case 0x136000 ... 0x137ffe:
+ case 0x140000 ... 0x14bffe:
+ case 0x160000 ... 0x161ffe:
+ case 0x180000 ... 0x18effe:
+ case 0x1a0000 ... 0x1b1ffe:
+ case 0x1b6000 ... 0x1b7ffe:
+ case 0x1c0000 ... 0x1cbffe:
+ case 0x1e0000 ... 0x1e1ffe:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs47l35_16bit_readable_register(struct device *dev,
+ unsigned int reg)
+{
+ switch (reg) {
+ case MADERA_SOFTWARE_RESET:
+ case MADERA_HARDWARE_REVISION:
+ case MADERA_WRITE_SEQUENCER_CTRL_0:
+ case MADERA_WRITE_SEQUENCER_CTRL_1:
+ case MADERA_WRITE_SEQUENCER_CTRL_2:
+ case MADERA_TONE_GENERATOR_1:
+ case MADERA_TONE_GENERATOR_2:
+ case MADERA_TONE_GENERATOR_3:
+ case MADERA_TONE_GENERATOR_4:
+ case MADERA_TONE_GENERATOR_5:
+ case MADERA_PWM_DRIVE_1:
+ case MADERA_PWM_DRIVE_2:
+ case MADERA_PWM_DRIVE_3:
+ case MADERA_SAMPLE_RATE_SEQUENCE_SELECT_1:
+ case MADERA_SAMPLE_RATE_SEQUENCE_SELECT_2:
+ case MADERA_SAMPLE_RATE_SEQUENCE_SELECT_3:
+ case MADERA_SAMPLE_RATE_SEQUENCE_SELECT_4:
+ case MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1:
+ case MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2:
+ case MADERA_HAPTICS_CONTROL_1:
+ case MADERA_HAPTICS_CONTROL_2:
+ case MADERA_HAPTICS_PHASE_1_INTENSITY:
+ case MADERA_HAPTICS_PHASE_1_DURATION:
+ case MADERA_HAPTICS_PHASE_2_INTENSITY:
+ case MADERA_HAPTICS_PHASE_2_DURATION:
+ case MADERA_HAPTICS_PHASE_3_INTENSITY:
+ case MADERA_HAPTICS_PHASE_3_DURATION:
+ case MADERA_HAPTICS_STATUS:
+ case MADERA_COMFORT_NOISE_GENERATOR:
+ case MADERA_CLOCK_32K_1:
+ case MADERA_SYSTEM_CLOCK_1:
+ case MADERA_SAMPLE_RATE_1:
+ case MADERA_SAMPLE_RATE_2:
+ case MADERA_SAMPLE_RATE_3:
+ case MADERA_SAMPLE_RATE_1_STATUS:
+ case MADERA_SAMPLE_RATE_2_STATUS:
+ case MADERA_SAMPLE_RATE_3_STATUS:
+ case MADERA_DSP_CLOCK_1:
+ case MADERA_DSP_CLOCK_2:
+ case MADERA_OUTPUT_SYSTEM_CLOCK:
+ case MADERA_OUTPUT_ASYNC_CLOCK:
+ case MADERA_RATE_ESTIMATOR_1:
+ case MADERA_RATE_ESTIMATOR_2:
+ case MADERA_RATE_ESTIMATOR_3:
+ case MADERA_RATE_ESTIMATOR_4:
+ case MADERA_RATE_ESTIMATOR_5:
+ case MADERA_FLL1_CONTROL_1:
+ case MADERA_FLL1_CONTROL_2:
+ case MADERA_FLL1_CONTROL_3:
+ case MADERA_FLL1_CONTROL_4:
+ case MADERA_FLL1_CONTROL_5:
+ case MADERA_FLL1_CONTROL_6:
+ case MADERA_FLL1_CONTROL_7:
+ case MADERA_FLL1_EFS_2:
+ case MADERA_FLL1_LOOP_FILTER_TEST_1:
+ case CS47L35_FLL1_SYNCHRONISER_1:
+ case CS47L35_FLL1_SYNCHRONISER_2:
+ case CS47L35_FLL1_SYNCHRONISER_3:
+ case CS47L35_FLL1_SYNCHRONISER_4:
+ case CS47L35_FLL1_SYNCHRONISER_5:
+ case CS47L35_FLL1_SYNCHRONISER_6:
+ case CS47L35_FLL1_SYNCHRONISER_7:
+ case CS47L35_FLL1_SPREAD_SPECTRUM:
+ case CS47L35_FLL1_GPIO_CLOCK:
+ case MADERA_MIC_CHARGE_PUMP_1:
+ case MADERA_HP_CHARGE_PUMP_8:
+ case MADERA_LDO2_CONTROL_1:
+ case MADERA_MIC_BIAS_CTRL_1:
+ case MADERA_MIC_BIAS_CTRL_2:
+ case MADERA_MIC_BIAS_CTRL_5:
+ case MADERA_MIC_BIAS_CTRL_6:
+ case MADERA_HP_CTRL_1L:
+ case MADERA_HP_CTRL_1R:
+ case MADERA_DCS_HP1L_CONTROL:
+ case MADERA_DCS_HP1R_CONTROL:
+ case MADERA_EDRE_HP_STEREO_CONTROL:
+ case MADERA_ACCESSORY_DETECT_MODE_1:
+ case MADERA_HEADPHONE_DETECT_1:
+ case MADERA_HEADPHONE_DETECT_2:
+ case MADERA_HEADPHONE_DETECT_3:
+ case MADERA_HEADPHONE_DETECT_5:
+ case MADERA_MICD_CLAMP_CONTROL:
+ case MADERA_MIC_DETECT_1_CONTROL_1:
+ case MADERA_MIC_DETECT_1_CONTROL_2:
+ case MADERA_MIC_DETECT_1_CONTROL_3:
+ case MADERA_MIC_DETECT_1_LEVEL_1:
+ case MADERA_MIC_DETECT_1_LEVEL_2:
+ case MADERA_MIC_DETECT_1_LEVEL_3:
+ case MADERA_MIC_DETECT_1_LEVEL_4:
+ case MADERA_MIC_DETECT_1_CONTROL_4:
+ case MADERA_GP_SWITCH_1:
+ case MADERA_JACK_DETECT_ANALOGUE:
+ case MADERA_INPUT_ENABLES:
+ case MADERA_INPUT_ENABLES_STATUS:
+ case MADERA_INPUT_RATE:
+ case MADERA_INPUT_VOLUME_RAMP:
+ case MADERA_HPF_CONTROL:
+ case MADERA_IN1L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_1L:
+ case MADERA_DMIC1L_CONTROL:
+ case MADERA_IN1R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_1R:
+ case MADERA_DMIC1R_CONTROL:
+ case MADERA_IN2L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_2L:
+ case MADERA_DMIC2L_CONTROL:
+ case MADERA_IN2R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_2R:
+ case MADERA_DMIC2R_CONTROL:
+ case MADERA_OUTPUT_ENABLES_1:
+ case MADERA_OUTPUT_STATUS_1:
+ case MADERA_RAW_OUTPUT_STATUS_1:
+ case MADERA_OUTPUT_RATE_1:
+ case MADERA_OUTPUT_VOLUME_RAMP:
+ case MADERA_OUTPUT_PATH_CONFIG_1L:
+ case MADERA_DAC_DIGITAL_VOLUME_1L:
+ case MADERA_NOISE_GATE_SELECT_1L:
+ case MADERA_OUTPUT_PATH_CONFIG_1R:
+ case MADERA_DAC_DIGITAL_VOLUME_1R:
+ case MADERA_NOISE_GATE_SELECT_1R:
+ case MADERA_OUTPUT_PATH_CONFIG_4L:
+ case MADERA_DAC_DIGITAL_VOLUME_4L:
+ case MADERA_NOISE_GATE_SELECT_4L:
+ case MADERA_OUTPUT_PATH_CONFIG_5L:
+ case MADERA_DAC_DIGITAL_VOLUME_5L:
+ case MADERA_NOISE_GATE_SELECT_5L:
+ case MADERA_OUTPUT_PATH_CONFIG_5R:
+ case MADERA_DAC_DIGITAL_VOLUME_5R:
+ case MADERA_NOISE_GATE_SELECT_5R:
+ case MADERA_DRE_ENABLE:
+ case MADERA_EDRE_ENABLE:
+ case MADERA_EDRE_MANUAL:
+ case MADERA_DAC_AEC_CONTROL_1:
+ case MADERA_NOISE_GATE_CONTROL:
+ case MADERA_PDM_SPK1_CTRL_1:
+ case MADERA_PDM_SPK1_CTRL_2:
+ case MADERA_HP1_SHORT_CIRCUIT_CTRL:
+ case MADERA_HP_TEST_CTRL_5:
+ case MADERA_HP_TEST_CTRL_6:
+ case MADERA_AIF1_BCLK_CTRL:
+ case MADERA_AIF1_TX_PIN_CTRL:
+ case MADERA_AIF1_RX_PIN_CTRL:
+ case MADERA_AIF1_RATE_CTRL:
+ case MADERA_AIF1_FORMAT:
+ case MADERA_AIF1_RX_BCLK_RATE:
+ case MADERA_AIF1_FRAME_CTRL_1:
+ case MADERA_AIF1_FRAME_CTRL_2:
+ case MADERA_AIF1_FRAME_CTRL_3:
+ case MADERA_AIF1_FRAME_CTRL_4:
+ case MADERA_AIF1_FRAME_CTRL_5:
+ case MADERA_AIF1_FRAME_CTRL_6:
+ case MADERA_AIF1_FRAME_CTRL_7:
+ case MADERA_AIF1_FRAME_CTRL_8:
+ case MADERA_AIF1_FRAME_CTRL_11:
+ case MADERA_AIF1_FRAME_CTRL_12:
+ case MADERA_AIF1_FRAME_CTRL_13:
+ case MADERA_AIF1_FRAME_CTRL_14:
+ case MADERA_AIF1_FRAME_CTRL_15:
+ case MADERA_AIF1_FRAME_CTRL_16:
+ case MADERA_AIF1_TX_ENABLES:
+ case MADERA_AIF1_RX_ENABLES:
+ case MADERA_AIF2_BCLK_CTRL:
+ case MADERA_AIF2_TX_PIN_CTRL:
+ case MADERA_AIF2_RX_PIN_CTRL:
+ case MADERA_AIF2_RATE_CTRL:
+ case MADERA_AIF2_FORMAT:
+ case MADERA_AIF2_RX_BCLK_RATE:
+ case MADERA_AIF2_FRAME_CTRL_1:
+ case MADERA_AIF2_FRAME_CTRL_2:
+ case MADERA_AIF2_FRAME_CTRL_3:
+ case MADERA_AIF2_FRAME_CTRL_4:
+ case MADERA_AIF2_FRAME_CTRL_11:
+ case MADERA_AIF2_FRAME_CTRL_12:
+ case MADERA_AIF2_TX_ENABLES:
+ case MADERA_AIF2_RX_ENABLES:
+ case MADERA_AIF3_BCLK_CTRL:
+ case MADERA_AIF3_TX_PIN_CTRL:
+ case MADERA_AIF3_RX_PIN_CTRL:
+ case MADERA_AIF3_RATE_CTRL:
+ case MADERA_AIF3_FORMAT:
+ case MADERA_AIF3_RX_BCLK_RATE:
+ case MADERA_AIF3_FRAME_CTRL_1:
+ case MADERA_AIF3_FRAME_CTRL_2:
+ case MADERA_AIF3_FRAME_CTRL_3:
+ case MADERA_AIF3_FRAME_CTRL_4:
+ case MADERA_AIF3_FRAME_CTRL_11:
+ case MADERA_AIF3_FRAME_CTRL_12:
+ case MADERA_AIF3_TX_ENABLES:
+ case MADERA_AIF3_RX_ENABLES:
+ case MADERA_SPD1_TX_CONTROL:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_1:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_2:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_3:
+ case MADERA_SLIMBUS_FRAMER_REF_GEAR:
+ case MADERA_SLIMBUS_RATES_1:
+ case MADERA_SLIMBUS_RATES_2:
+ case MADERA_SLIMBUS_RATES_3:
+ case MADERA_SLIMBUS_RATES_5:
+ case MADERA_SLIMBUS_RATES_6:
+ case MADERA_SLIMBUS_RATES_7:
+ case MADERA_SLIMBUS_RX_CHANNEL_ENABLE:
+ case MADERA_SLIMBUS_TX_CHANNEL_ENABLE:
+ case MADERA_SLIMBUS_RX_PORT_STATUS:
+ case MADERA_SLIMBUS_TX_PORT_STATUS:
+ case MADERA_PWM1MIX_INPUT_1_SOURCE:
+ case MADERA_PWM1MIX_INPUT_1_VOLUME:
+ case MADERA_PWM1MIX_INPUT_2_SOURCE:
+ case MADERA_PWM1MIX_INPUT_2_VOLUME:
+ case MADERA_PWM1MIX_INPUT_3_SOURCE:
+ case MADERA_PWM1MIX_INPUT_3_VOLUME:
+ case MADERA_PWM1MIX_INPUT_4_SOURCE:
+ case MADERA_PWM1MIX_INPUT_4_VOLUME:
+ case MADERA_PWM2MIX_INPUT_1_SOURCE:
+ case MADERA_PWM2MIX_INPUT_1_VOLUME:
+ case MADERA_PWM2MIX_INPUT_2_SOURCE:
+ case MADERA_PWM2MIX_INPUT_2_VOLUME:
+ case MADERA_PWM2MIX_INPUT_3_SOURCE:
+ case MADERA_PWM2MIX_INPUT_3_VOLUME:
+ case MADERA_PWM2MIX_INPUT_4_SOURCE:
+ case MADERA_PWM2MIX_INPUT_4_VOLUME:
+ case MADERA_OUT1LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT1LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT1LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT1LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT1LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT1LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT1LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT1LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT1RMIX_INPUT_1_SOURCE:
+ case MADERA_OUT1RMIX_INPUT_1_VOLUME:
+ case MADERA_OUT1RMIX_INPUT_2_SOURCE:
+ case MADERA_OUT1RMIX_INPUT_2_VOLUME:
+ case MADERA_OUT1RMIX_INPUT_3_SOURCE:
+ case MADERA_OUT1RMIX_INPUT_3_VOLUME:
+ case MADERA_OUT1RMIX_INPUT_4_SOURCE:
+ case MADERA_OUT1RMIX_INPUT_4_VOLUME:
+ case MADERA_OUT4LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT4LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT4LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT4LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT4LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT4LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT4LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT4LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT5LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT5LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT5LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT5LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT5LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT5LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT5LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT5LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT5RMIX_INPUT_1_SOURCE:
+ case MADERA_OUT5RMIX_INPUT_1_VOLUME:
+ case MADERA_OUT5RMIX_INPUT_2_SOURCE:
+ case MADERA_OUT5RMIX_INPUT_2_VOLUME:
+ case MADERA_OUT5RMIX_INPUT_3_SOURCE:
+ case MADERA_OUT5RMIX_INPUT_3_VOLUME:
+ case MADERA_OUT5RMIX_INPUT_4_SOURCE:
+ case MADERA_OUT5RMIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX1MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX1MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX1MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX1MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX1MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX1MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX1MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX1MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX2MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX2MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX2MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX2MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX2MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX2MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX2MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX2MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX3MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX3MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX3MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX3MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX3MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX3MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX3MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX3MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX4MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX4MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX4MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX4MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX4MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX4MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX4MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX4MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX5MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX5MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX5MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX5MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX5MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX5MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX5MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX5MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX6MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX6MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX6MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX6MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX6MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX6MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX6MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX6MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX1MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX1MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX1MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX1MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX1MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX1MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX1MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX1MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX2MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX2MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX2MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX2MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX2MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX2MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX2MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX2MIX_INPUT_4_VOLUME:
+ case MADERA_AIF3TX1MIX_INPUT_1_SOURCE:
+ case MADERA_AIF3TX1MIX_INPUT_1_VOLUME:
+ case MADERA_AIF3TX1MIX_INPUT_2_SOURCE:
+ case MADERA_AIF3TX1MIX_INPUT_2_VOLUME:
+ case MADERA_AIF3TX1MIX_INPUT_3_SOURCE:
+ case MADERA_AIF3TX1MIX_INPUT_3_VOLUME:
+ case MADERA_AIF3TX1MIX_INPUT_4_SOURCE:
+ case MADERA_AIF3TX1MIX_INPUT_4_VOLUME:
+ case MADERA_AIF3TX2MIX_INPUT_1_SOURCE:
+ case MADERA_AIF3TX2MIX_INPUT_1_VOLUME:
+ case MADERA_AIF3TX2MIX_INPUT_2_SOURCE:
+ case MADERA_AIF3TX2MIX_INPUT_2_VOLUME:
+ case MADERA_AIF3TX2MIX_INPUT_3_SOURCE:
+ case MADERA_AIF3TX2MIX_INPUT_3_VOLUME:
+ case MADERA_AIF3TX2MIX_INPUT_4_SOURCE:
+ case MADERA_AIF3TX2MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX1MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX1MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX1MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX1MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX1MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX1MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX1MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX1MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX2MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX2MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX2MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX2MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX2MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX2MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX2MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX2MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX3MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX3MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX3MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX3MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX3MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX3MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX3MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX3MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX4MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX4MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX4MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX4MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX4MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX4MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX4MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX4MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX5MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX5MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX5MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX5MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX5MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX5MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX5MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX5MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX6MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX6MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX6MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX6MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX6MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX6MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX6MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX6MIX_INPUT_4_VOLUME:
+ case MADERA_SPDIF1TX1MIX_INPUT_1_SOURCE:
+ case MADERA_SPDIF1TX1MIX_INPUT_1_VOLUME:
+ case MADERA_SPDIF1TX2MIX_INPUT_1_SOURCE:
+ case MADERA_SPDIF1TX2MIX_INPUT_1_VOLUME:
+ case MADERA_EQ1MIX_INPUT_1_SOURCE:
+ case MADERA_EQ1MIX_INPUT_1_VOLUME:
+ case MADERA_EQ1MIX_INPUT_2_SOURCE:
+ case MADERA_EQ1MIX_INPUT_2_VOLUME:
+ case MADERA_EQ1MIX_INPUT_3_SOURCE:
+ case MADERA_EQ1MIX_INPUT_3_VOLUME:
+ case MADERA_EQ1MIX_INPUT_4_SOURCE:
+ case MADERA_EQ1MIX_INPUT_4_VOLUME:
+ case MADERA_EQ2MIX_INPUT_1_SOURCE:
+ case MADERA_EQ2MIX_INPUT_1_VOLUME:
+ case MADERA_EQ2MIX_INPUT_2_SOURCE:
+ case MADERA_EQ2MIX_INPUT_2_VOLUME:
+ case MADERA_EQ2MIX_INPUT_3_SOURCE:
+ case MADERA_EQ2MIX_INPUT_3_VOLUME:
+ case MADERA_EQ2MIX_INPUT_4_SOURCE:
+ case MADERA_EQ2MIX_INPUT_4_VOLUME:
+ case MADERA_EQ3MIX_INPUT_1_SOURCE:
+ case MADERA_EQ3MIX_INPUT_1_VOLUME:
+ case MADERA_EQ3MIX_INPUT_2_SOURCE:
+ case MADERA_EQ3MIX_INPUT_2_VOLUME:
+ case MADERA_EQ3MIX_INPUT_3_SOURCE:
+ case MADERA_EQ3MIX_INPUT_3_VOLUME:
+ case MADERA_EQ3MIX_INPUT_4_SOURCE:
+ case MADERA_EQ3MIX_INPUT_4_VOLUME:
+ case MADERA_EQ4MIX_INPUT_1_SOURCE:
+ case MADERA_EQ4MIX_INPUT_1_VOLUME:
+ case MADERA_EQ4MIX_INPUT_2_SOURCE:
+ case MADERA_EQ4MIX_INPUT_2_VOLUME:
+ case MADERA_EQ4MIX_INPUT_3_SOURCE:
+ case MADERA_EQ4MIX_INPUT_3_VOLUME:
+ case MADERA_EQ4MIX_INPUT_4_SOURCE:
+ case MADERA_EQ4MIX_INPUT_4_VOLUME:
+ case MADERA_DRC1LMIX_INPUT_1_SOURCE:
+ case MADERA_DRC1LMIX_INPUT_1_VOLUME:
+ case MADERA_DRC1LMIX_INPUT_2_SOURCE:
+ case MADERA_DRC1LMIX_INPUT_2_VOLUME:
+ case MADERA_DRC1LMIX_INPUT_3_SOURCE:
+ case MADERA_DRC1LMIX_INPUT_3_VOLUME:
+ case MADERA_DRC1LMIX_INPUT_4_SOURCE:
+ case MADERA_DRC1LMIX_INPUT_4_VOLUME:
+ case MADERA_DRC1RMIX_INPUT_1_SOURCE:
+ case MADERA_DRC1RMIX_INPUT_1_VOLUME:
+ case MADERA_DRC1RMIX_INPUT_2_SOURCE:
+ case MADERA_DRC1RMIX_INPUT_2_VOLUME:
+ case MADERA_DRC1RMIX_INPUT_3_SOURCE:
+ case MADERA_DRC1RMIX_INPUT_3_VOLUME:
+ case MADERA_DRC1RMIX_INPUT_4_SOURCE:
+ case MADERA_DRC1RMIX_INPUT_4_VOLUME:
+ case MADERA_DRC2LMIX_INPUT_1_SOURCE:
+ case MADERA_DRC2LMIX_INPUT_1_VOLUME:
+ case MADERA_DRC2LMIX_INPUT_2_SOURCE:
+ case MADERA_DRC2LMIX_INPUT_2_VOLUME:
+ case MADERA_DRC2LMIX_INPUT_3_SOURCE:
+ case MADERA_DRC2LMIX_INPUT_3_VOLUME:
+ case MADERA_DRC2LMIX_INPUT_4_SOURCE:
+ case MADERA_DRC2LMIX_INPUT_4_VOLUME:
+ case MADERA_DRC2RMIX_INPUT_1_SOURCE:
+ case MADERA_DRC2RMIX_INPUT_1_VOLUME:
+ case MADERA_DRC2RMIX_INPUT_2_SOURCE:
+ case MADERA_DRC2RMIX_INPUT_2_VOLUME:
+ case MADERA_DRC2RMIX_INPUT_3_SOURCE:
+ case MADERA_DRC2RMIX_INPUT_3_VOLUME:
+ case MADERA_DRC2RMIX_INPUT_4_SOURCE:
+ case MADERA_DRC2RMIX_INPUT_4_VOLUME:
+ case MADERA_HPLP1MIX_INPUT_1_SOURCE:
+ case MADERA_HPLP1MIX_INPUT_1_VOLUME:
+ case MADERA_HPLP1MIX_INPUT_2_SOURCE:
+ case MADERA_HPLP1MIX_INPUT_2_VOLUME:
+ case MADERA_HPLP1MIX_INPUT_3_SOURCE:
+ case MADERA_HPLP1MIX_INPUT_3_VOLUME:
+ case MADERA_HPLP1MIX_INPUT_4_SOURCE:
+ case MADERA_HPLP1MIX_INPUT_4_VOLUME:
+ case MADERA_HPLP2MIX_INPUT_1_SOURCE:
+ case MADERA_HPLP2MIX_INPUT_1_VOLUME:
+ case MADERA_HPLP2MIX_INPUT_2_SOURCE:
+ case MADERA_HPLP2MIX_INPUT_2_VOLUME:
+ case MADERA_HPLP2MIX_INPUT_3_SOURCE:
+ case MADERA_HPLP2MIX_INPUT_3_VOLUME:
+ case MADERA_HPLP2MIX_INPUT_4_SOURCE:
+ case MADERA_HPLP2MIX_INPUT_4_VOLUME:
+ case MADERA_HPLP3MIX_INPUT_1_SOURCE:
+ case MADERA_HPLP3MIX_INPUT_1_VOLUME:
+ case MADERA_HPLP3MIX_INPUT_2_SOURCE:
+ case MADERA_HPLP3MIX_INPUT_2_VOLUME:
+ case MADERA_HPLP3MIX_INPUT_3_SOURCE:
+ case MADERA_HPLP3MIX_INPUT_3_VOLUME:
+ case MADERA_HPLP3MIX_INPUT_4_SOURCE:
+ case MADERA_HPLP3MIX_INPUT_4_VOLUME:
+ case MADERA_HPLP4MIX_INPUT_1_SOURCE:
+ case MADERA_HPLP4MIX_INPUT_1_VOLUME:
+ case MADERA_HPLP4MIX_INPUT_2_SOURCE:
+ case MADERA_HPLP4MIX_INPUT_2_VOLUME:
+ case MADERA_HPLP4MIX_INPUT_3_SOURCE:
+ case MADERA_HPLP4MIX_INPUT_3_VOLUME:
+ case MADERA_HPLP4MIX_INPUT_4_SOURCE:
+ case MADERA_HPLP4MIX_INPUT_4_VOLUME:
+ case MADERA_DSP1LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP1LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP1LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP1LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP1LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP1LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP1LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP1LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP1RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP1RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP1RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP1RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP1RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP1RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP1RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP1RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP1AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP2LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP2LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP2LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP2RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP2RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP2RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP2RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP2RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP2RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP2RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP2RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP2AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP3LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP3LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP3LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP3RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP3RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP3RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP3RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP3RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP3RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP3RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP3RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP3AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1DEC1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1DEC2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1DEC3MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1DEC4MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1INT1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1INT2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1INT3MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1INT4MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2DEC1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2DEC2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2DEC3MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2DEC4MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2INT1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2INT2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2INT3MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2INT4MIX_INPUT_1_SOURCE:
+ case MADERA_FX_CTRL1:
+ case MADERA_FX_CTRL2:
+ case MADERA_EQ1_1 ... MADERA_EQ1_21:
+ case MADERA_EQ2_1 ... MADERA_EQ2_21:
+ case MADERA_EQ3_1 ... MADERA_EQ3_21:
+ case MADERA_EQ4_1 ... MADERA_EQ4_21:
+ case MADERA_DRC1_CTRL1:
+ case MADERA_DRC1_CTRL2:
+ case MADERA_DRC1_CTRL3:
+ case MADERA_DRC1_CTRL4:
+ case MADERA_DRC1_CTRL5:
+ case MADERA_DRC2_CTRL1:
+ case MADERA_DRC2_CTRL2:
+ case MADERA_DRC2_CTRL3:
+ case MADERA_DRC2_CTRL4:
+ case MADERA_DRC2_CTRL5:
+ case MADERA_HPLPF1_1:
+ case MADERA_HPLPF1_2:
+ case MADERA_HPLPF2_1:
+ case MADERA_HPLPF2_2:
+ case MADERA_HPLPF3_1:
+ case MADERA_HPLPF3_2:
+ case MADERA_HPLPF4_1:
+ case MADERA_HPLPF4_2:
+ case MADERA_ISRC_1_CTRL_1:
+ case MADERA_ISRC_1_CTRL_2:
+ case MADERA_ISRC_1_CTRL_3:
+ case MADERA_ISRC_2_CTRL_1:
+ case MADERA_ISRC_2_CTRL_2:
+ case MADERA_ISRC_2_CTRL_3:
+ case MADERA_DAC_COMP_1:
+ case MADERA_DAC_COMP_2:
+ case MADERA_FRF_COEFFICIENT_1L_1:
+ case MADERA_FRF_COEFFICIENT_1L_2:
+ case MADERA_FRF_COEFFICIENT_1L_3:
+ case MADERA_FRF_COEFFICIENT_1L_4:
+ case MADERA_FRF_COEFFICIENT_1R_1:
+ case MADERA_FRF_COEFFICIENT_1R_2:
+ case MADERA_FRF_COEFFICIENT_1R_3:
+ case MADERA_FRF_COEFFICIENT_1R_4:
+ case CS47L35_FRF_COEFFICIENT_4L_1:
+ case CS47L35_FRF_COEFFICIENT_4L_2:
+ case CS47L35_FRF_COEFFICIENT_4L_3:
+ case CS47L35_FRF_COEFFICIENT_4L_4:
+ case CS47L35_FRF_COEFFICIENT_5L_1:
+ case CS47L35_FRF_COEFFICIENT_5L_2:
+ case CS47L35_FRF_COEFFICIENT_5L_3:
+ case CS47L35_FRF_COEFFICIENT_5L_4:
+ case CS47L35_FRF_COEFFICIENT_5R_1:
+ case CS47L35_FRF_COEFFICIENT_5R_2:
+ case CS47L35_FRF_COEFFICIENT_5R_3:
+ case CS47L35_FRF_COEFFICIENT_5R_4:
+ case MADERA_GPIO1_CTRL_1 ... MADERA_GPIO16_CTRL_2:
+ case MADERA_IRQ1_STATUS_1 ... MADERA_IRQ1_STATUS_33:
+ case MADERA_IRQ1_MASK_1 ... MADERA_IRQ1_MASK_33:
+ case MADERA_IRQ1_RAW_STATUS_1 ... MADERA_IRQ1_RAW_STATUS_33:
+ case MADERA_INTERRUPT_DEBOUNCE_7:
+ case MADERA_IRQ1_CTRL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs47l35_16bit_volatile_register(struct device *dev,
+ unsigned int reg)
+{
+ switch (reg) {
+ case MADERA_SOFTWARE_RESET:
+ case MADERA_HARDWARE_REVISION:
+ case MADERA_WRITE_SEQUENCER_CTRL_0:
+ case MADERA_WRITE_SEQUENCER_CTRL_1:
+ case MADERA_WRITE_SEQUENCER_CTRL_2:
+ case MADERA_HAPTICS_STATUS:
+ case MADERA_SAMPLE_RATE_1_STATUS:
+ case MADERA_SAMPLE_RATE_2_STATUS:
+ case MADERA_SAMPLE_RATE_3_STATUS:
+ case MADERA_HP_CTRL_1L:
+ case MADERA_HP_CTRL_1R:
+ case MADERA_DCS_HP1L_CONTROL:
+ case MADERA_DCS_HP1R_CONTROL:
+ case MADERA_MIC_DETECT_1_CONTROL_3:
+ case MADERA_MIC_DETECT_1_CONTROL_4:
+ case MADERA_HEADPHONE_DETECT_2:
+ case MADERA_HEADPHONE_DETECT_3:
+ case MADERA_HEADPHONE_DETECT_5:
+ case MADERA_INPUT_ENABLES_STATUS:
+ case MADERA_OUTPUT_STATUS_1:
+ case MADERA_RAW_OUTPUT_STATUS_1:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_1:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_2:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_3:
+ case MADERA_SLIMBUS_RX_PORT_STATUS:
+ case MADERA_SLIMBUS_TX_PORT_STATUS:
+ case MADERA_FX_CTRL2:
+ case MADERA_IRQ1_STATUS_1 ... MADERA_IRQ1_STATUS_33:
+ case MADERA_IRQ1_RAW_STATUS_1 ... MADERA_IRQ1_RAW_STATUS_33:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs47l35_32bit_readable_register(struct device *dev,
+ unsigned int reg)
+{
+ switch (reg) {
+ case MADERA_WSEQ_SEQUENCE_1 ... MADERA_WSEQ_SEQUENCE_252:
+ case CS47L35_OTP_HPDET_CAL_1 ... CS47L35_OTP_HPDET_CAL_2:
+ case MADERA_DSP1_CONFIG_1 ... MADERA_DSP1_SCRATCH_2:
+ case MADERA_DSP2_CONFIG_1 ... MADERA_DSP2_SCRATCH_2:
+ case MADERA_DSP3_CONFIG_1 ... MADERA_DSP3_SCRATCH_2:
+ return true;
+ default:
+ return cs47l35_is_adsp_memory(reg);
+ }
+}
+
+static bool cs47l35_32bit_volatile_register(struct device *dev,
+ unsigned int reg)
+{
+ switch (reg) {
+ case MADERA_WSEQ_SEQUENCE_1 ... MADERA_WSEQ_SEQUENCE_252:
+ case CS47L35_OTP_HPDET_CAL_1 ... CS47L35_OTP_HPDET_CAL_2:
+ case MADERA_DSP1_CONFIG_1 ... MADERA_DSP1_SCRATCH_2:
+ case MADERA_DSP2_CONFIG_1 ... MADERA_DSP2_SCRATCH_2:
+ case MADERA_DSP3_CONFIG_1 ... MADERA_DSP3_SCRATCH_2:
+ return true;
+ default:
+ return cs47l35_is_adsp_memory(reg);
+ }
+}
+
+const struct regmap_config cs47l35_16bit_spi_regmap = {
+ .name = "cs47l35_16bit",
+ .reg_bits = 32,
+ .pad_bits = 16,
+ .val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = 0x1b00,
+ .readable_reg = cs47l35_16bit_readable_register,
+ .volatile_reg = cs47l35_16bit_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = cs47l35_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(cs47l35_reg_default),
+};
+EXPORT_SYMBOL_GPL(cs47l35_16bit_spi_regmap);
+
+const struct regmap_config cs47l35_16bit_i2c_regmap = {
+ .name = "cs47l35_16bit",
+ .reg_bits = 32,
+ .val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = 0x1b00,
+ .readable_reg = cs47l35_16bit_readable_register,
+ .volatile_reg = cs47l35_16bit_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = cs47l35_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(cs47l35_reg_default),
+};
+EXPORT_SYMBOL_GPL(cs47l35_16bit_i2c_regmap);
+
+const struct regmap_config cs47l35_32bit_spi_regmap = {
+ .name = "cs47l35_32bit",
+ .reg_bits = 32,
+ .reg_stride = 2,
+ .pad_bits = 16,
+ .val_bits = 32,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = MADERA_DSP3_SCRATCH_2,
+ .readable_reg = cs47l35_32bit_readable_register,
+ .volatile_reg = cs47l35_32bit_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_GPL(cs47l35_32bit_spi_regmap);
+
+const struct regmap_config cs47l35_32bit_i2c_regmap = {
+ .name = "cs47l35_32bit",
+ .reg_bits = 32,
+ .reg_stride = 2,
+ .val_bits = 32,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = MADERA_DSP3_SCRATCH_2,
+ .readable_reg = cs47l35_32bit_readable_register,
+ .volatile_reg = cs47l35_32bit_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_GPL(cs47l35_32bit_i2c_regmap);
diff --git a/drivers/mfd/cs47l85-tables.c b/drivers/mfd/cs47l85-tables.c
new file mode 100644
index 000000000000..43803145d8e5
--- /dev/null
+++ b/drivers/mfd/cs47l85-tables.c
@@ -0,0 +1,3009 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Regmap tables for CS47L85 codec
+ *
+ * Copyright (C) 2015-2017 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/madera/core.h>
+#include <linux/mfd/madera/registers.h>
+
+#include "madera.h"
+
+static const struct reg_sequence cs47l85_reva_16_patch[] = {
+ { 0x80, 0x0003 },
+ { 0x213, 0x03E4 },
+ { 0x177, 0x0281 },
+ { 0x197, 0x0281 },
+ { 0x1B7, 0x0281 },
+ { 0x4B1, 0x010A },
+ { 0x4CF, 0x0933 },
+ { 0x36C, 0x011B },
+ { 0x4B8, 0x1120 },
+ { 0x4A0, 0x3280 },
+ { 0x4A1, 0x3200 },
+ { 0x4A2, 0x3200 },
+ { 0x441, 0xC050 },
+ { 0x4A4, 0x000B },
+ { 0x4A5, 0x000B },
+ { 0x4A6, 0x000B },
+ { 0x4E2, 0x1E1D },
+ { 0x4E3, 0x1E1D },
+ { 0x4E4, 0x1E1D },
+ { 0x293, 0x0080 },
+ { 0x17D, 0x0303 },
+ { 0x19D, 0x0303 },
+ { 0x27E, 0x0000 },
+ { 0x80, 0x0000 },
+ { 0x80, 0x0000 },
+ { 0x448, 0x003f },
+};
+
+static const struct reg_sequence cs47l85_revc_16_patch[] = {
+ { 0x27E, 0x0000 },
+ { 0x2C2, 0x0005 },
+ { 0x448, 0x003f },
+};
+
+static const struct reg_sequence cs47l85_reva_32_patch[] = {
+ { 0x3000, 0xC2253632 },
+ { 0x3002, 0xC2300001 },
+ { 0x3004, 0x8225100E },
+ { 0x3006, 0x22251803 },
+ { 0x3008, 0x82310B00 },
+ { 0x300A, 0xE231023B },
+ { 0x300C, 0x02313B01 },
+ { 0x300E, 0x62300000 },
+ { 0x3010, 0xE2314288 },
+ { 0x3012, 0x02310B00 },
+ { 0x3014, 0x02310B00 },
+ { 0x3016, 0x04050100 },
+ { 0x3018, 0x42310C02 },
+ { 0x301A, 0xE2310227 },
+ { 0x301C, 0x02313B01 },
+ { 0x301E, 0xE2314266 },
+ { 0x3020, 0xE2315294 },
+ { 0x3022, 0x02310B00 },
+ { 0x3024, 0x02310B00 },
+ { 0x3026, 0x02251100 },
+ { 0x3028, 0x02251401 },
+ { 0x302A, 0x02250200 },
+ { 0x302C, 0x02251001 },
+ { 0x302E, 0x02250200 },
+ { 0x3030, 0xE2310266 },
+ { 0x3032, 0x82314B15 },
+ { 0x3034, 0x82310B15 },
+ { 0x3036, 0xE2315294 },
+ { 0x3038, 0x02310B00 },
+ { 0x303A, 0x8225160D },
+ { 0x303C, 0x0225F501 },
+ { 0x303E, 0x8225061C },
+ { 0x3040, 0x02251000 },
+ { 0x3042, 0x04051101 },
+ { 0x3044, 0x02251800 },
+ { 0x3046, 0x42251203 },
+ { 0x3048, 0x02251101 },
+ { 0x304A, 0xC2251300 },
+ { 0x304C, 0x2225FB02 },
+ { 0x3050, 0xC2263632 },
+ { 0x3052, 0xC2300001 },
+ { 0x3054, 0x8226100E },
+ { 0x3056, 0x22261803 },
+ { 0x3058, 0x82310B02 },
+ { 0x305A, 0xE231023B },
+ { 0x305C, 0x02313B01 },
+ { 0x305E, 0x62300000 },
+ { 0x3060, 0xE2314288 },
+ { 0x3062, 0x02310B00 },
+ { 0x3064, 0x02310B00 },
+ { 0x3066, 0x04050000 },
+ { 0x3068, 0x42310C03 },
+ { 0x306A, 0xE2310227 },
+ { 0x306C, 0x02313B01 },
+ { 0x306E, 0xE2314266 },
+ { 0x3070, 0xE2315294 },
+ { 0x3072, 0x02310B00 },
+ { 0x3074, 0x02310B00 },
+ { 0x3076, 0x02261100 },
+ { 0x3078, 0x02261401 },
+ { 0x307A, 0x02260200 },
+ { 0x307C, 0x02261001 },
+ { 0x307E, 0x02260200 },
+ { 0x3080, 0xE2310266 },
+ { 0x3082, 0x82314B17 },
+ { 0x3084, 0x82310B17 },
+ { 0x3086, 0xE2315294 },
+ { 0x3088, 0x02310B00 },
+ { 0x308A, 0x8226160D },
+ { 0x308C, 0x0226F501 },
+ { 0x308E, 0x8226061C },
+ { 0x3090, 0x02261000 },
+ { 0x3092, 0x04051101 },
+ { 0x3094, 0x02261800 },
+ { 0x3096, 0x42261203 },
+ { 0x3098, 0x02261101 },
+ { 0x309A, 0xC2261300 },
+ { 0x309C, 0x2226FB02 },
+ { 0x309E, 0x0000F000 },
+ { 0x30A0, 0xC2273632 },
+ { 0x30A2, 0xC2400001 },
+ { 0x30A4, 0x8227100E },
+ { 0x30A6, 0x22271803 },
+ { 0x30A8, 0x82410B00 },
+ { 0x30AA, 0xE241023B },
+ { 0x30AC, 0x02413B01 },
+ { 0x30AE, 0x62400000 },
+ { 0x30B0, 0xE2414288 },
+ { 0x30B2, 0x02410B00 },
+ { 0x30B4, 0x02410B00 },
+ { 0x30B6, 0x04050300 },
+ { 0x30B8, 0x42410C02 },
+ { 0x30BA, 0xE2410227 },
+ { 0x30BC, 0x02413B01 },
+ { 0x30BE, 0xE2414266 },
+ { 0x30C0, 0xE2415294 },
+ { 0x30C2, 0x02410B00 },
+ { 0x30C4, 0x02410B00 },
+ { 0x30C6, 0x02271100 },
+ { 0x30C8, 0x02271401 },
+ { 0x30CA, 0x02270200 },
+ { 0x30CC, 0x02271001 },
+ { 0x30CE, 0x02270200 },
+ { 0x30D0, 0xE2410266 },
+ { 0x30D2, 0x82414B15 },
+ { 0x30D4, 0x82410B15 },
+ { 0x30D6, 0xE2415294 },
+ { 0x30D8, 0x02410B00 },
+ { 0x30DA, 0x8227160D },
+ { 0x30DC, 0x0227F501 },
+ { 0x30DE, 0x8227061C },
+ { 0x30E0, 0x02271000 },
+ { 0x30E2, 0x04051101 },
+ { 0x30E4, 0x02271800 },
+ { 0x30E6, 0x42271203 },
+ { 0x30E8, 0x02271101 },
+ { 0x30EA, 0xC2271300 },
+ { 0x30EC, 0x2227FB02 },
+ { 0x30F0, 0xC2283632 },
+ { 0x30F2, 0xC2400001 },
+ { 0x30F4, 0x8228100E },
+ { 0x30F6, 0x22281803 },
+ { 0x30F8, 0x82410B02 },
+ { 0x30FA, 0xE241023B },
+ { 0x30FC, 0x02413B01 },
+ { 0x30FE, 0x62400000 },
+ { 0x3100, 0xE2414288 },
+ { 0x3102, 0x02410B00 },
+ { 0x3104, 0x02410B00 },
+ { 0x3106, 0x04050200 },
+ { 0x3108, 0x42410C03 },
+ { 0x310A, 0xE2410227 },
+ { 0x310C, 0x02413B01 },
+ { 0x310E, 0xE2414266 },
+ { 0x3110, 0xE2415294 },
+ { 0x3112, 0x02410B00 },
+ { 0x3114, 0x02410B00 },
+ { 0x3116, 0x02281100 },
+ { 0x3118, 0x02281401 },
+ { 0x311A, 0x02280200 },
+ { 0x311C, 0x02281001 },
+ { 0x311E, 0x02280200 },
+ { 0x3120, 0xE2410266 },
+ { 0x3122, 0x82414B17 },
+ { 0x3124, 0x82410B17 },
+ { 0x3126, 0xE2415294 },
+ { 0x3128, 0x02410B00 },
+ { 0x312A, 0x8228160D },
+ { 0x312C, 0x0228F501 },
+ { 0x312E, 0x8228061C },
+ { 0x3130, 0x02281000 },
+ { 0x3132, 0x04051101 },
+ { 0x3134, 0x02281800 },
+ { 0x3136, 0x42281203 },
+ { 0x3138, 0x02281101 },
+ { 0x313A, 0xC2281300 },
+ { 0x313C, 0x2228FB02 },
+ { 0x3140, 0xC2293632 },
+ { 0x3142, 0xC2500001 },
+ { 0x3144, 0x8229100E },
+ { 0x3146, 0x22291803 },
+ { 0x3148, 0x82510B00 },
+ { 0x314A, 0xE251023B },
+ { 0x314C, 0x02513B01 },
+ { 0x314E, 0x62500000 },
+ { 0x3150, 0xE2514288 },
+ { 0x3152, 0x02510B00 },
+ { 0x3154, 0x02510B00 },
+ { 0x3156, 0x04050500 },
+ { 0x3158, 0x42510C02 },
+ { 0x315A, 0xE2510227 },
+ { 0x315C, 0x02513B01 },
+ { 0x315E, 0xE2514266 },
+ { 0x3160, 0xE2515294 },
+ { 0x3162, 0x02510B00 },
+ { 0x3164, 0x02510B00 },
+ { 0x3166, 0x02291100 },
+ { 0x3168, 0x02291401 },
+ { 0x316A, 0x02290200 },
+ { 0x316C, 0x02291001 },
+ { 0x316E, 0x02290200 },
+ { 0x3170, 0xE2510266 },
+ { 0x3172, 0x82514B15 },
+ { 0x3174, 0x82510B15 },
+ { 0x3176, 0xE2515294 },
+ { 0x3178, 0x02510B00 },
+ { 0x317A, 0x8229160D },
+ { 0x317C, 0x0229F501 },
+ { 0x317E, 0x8229061C },
+ { 0x3180, 0x02291000 },
+ { 0x3182, 0x04051101 },
+ { 0x3184, 0x02291800 },
+ { 0x3186, 0x42291203 },
+ { 0x3188, 0x02291101 },
+ { 0x318A, 0xC2291300 },
+ { 0x318C, 0x2229FB02 },
+ { 0x3190, 0xC22A3632 },
+ { 0x3192, 0xC2500001 },
+ { 0x3194, 0x822A100E },
+ { 0x3196, 0x222A1803 },
+ { 0x3198, 0x82510B02 },
+ { 0x319A, 0xE251023B },
+ { 0x319C, 0x02513B01 },
+ { 0x319E, 0x62500000 },
+ { 0x31A0, 0xE2514288 },
+ { 0x31A2, 0x02510B00 },
+ { 0x31A4, 0x02510B00 },
+ { 0x31A6, 0x04050400 },
+ { 0x31A8, 0x42510C03 },
+ { 0x31AA, 0xE2510227 },
+ { 0x31AC, 0x02513B01 },
+ { 0x31AE, 0xE2514266 },
+ { 0x31B0, 0xE2515294 },
+ { 0x31B2, 0x02510B00 },
+ { 0x31B4, 0x02510B00 },
+ { 0x31B6, 0x022A1100 },
+ { 0x31B8, 0x022A1401 },
+ { 0x31BA, 0x022A0200 },
+ { 0x31BC, 0x022A1001 },
+ { 0x31BE, 0x022A0200 },
+ { 0x31C0, 0xE2510266 },
+ { 0x31C2, 0x82514B17 },
+ { 0x31C4, 0x82510B17 },
+ { 0x31C6, 0xE2515294 },
+ { 0x31C8, 0x02510B00 },
+ { 0x31CA, 0x822A160D },
+ { 0x31CC, 0x022AF501 },
+ { 0x31CE, 0x822A061C },
+ { 0x31D0, 0x022A1000 },
+ { 0x31D2, 0x04051101 },
+ { 0x31D4, 0x022A1800 },
+ { 0x31D6, 0x422A1203 },
+ { 0x31D8, 0x022A1101 },
+ { 0x31DA, 0xC22A1300 },
+ { 0x31DC, 0x222AFB02 },
+};
+
+static const struct reg_sequence cs47l85_revc_32_patch[] = {
+ { 0x3380, 0xE4103066 },
+ { 0x3382, 0xE4103070 },
+ { 0x3384, 0xE4103078 },
+ { 0x3386, 0xE4103080 },
+ { 0x3388, 0xE410F080 },
+ { 0x338A, 0xE4143066 },
+ { 0x338C, 0xE4143070 },
+ { 0x338E, 0xE4143078 },
+ { 0x3390, 0xE4143080 },
+ { 0x3392, 0xE414F080 },
+ { 0x3394, 0xE4103078 },
+ { 0x3396, 0xE4103070 },
+ { 0x3398, 0xE4103066 },
+ { 0x339A, 0xE410F056 },
+ { 0x339C, 0xE4143078 },
+ { 0x339E, 0xE4143070 },
+ { 0x33A0, 0xE4143066 },
+ { 0x33A2, 0xE414F056 },
+};
+
+int cs47l85_patch(struct madera *madera)
+{
+ int ret = 0;
+ const struct reg_sequence *patch16;
+ const struct reg_sequence *patch32;
+ unsigned int num16, num32;
+
+ switch (madera->rev) {
+ case 0:
+ case 1:
+ patch16 = cs47l85_reva_16_patch;
+ num16 = ARRAY_SIZE(cs47l85_reva_16_patch);
+
+ patch32 = cs47l85_reva_32_patch;
+ num32 = ARRAY_SIZE(cs47l85_reva_32_patch);
+ break;
+ default:
+ patch16 = cs47l85_revc_16_patch;
+ num16 = ARRAY_SIZE(cs47l85_revc_16_patch);
+
+ patch32 = cs47l85_revc_32_patch;
+ num32 = ARRAY_SIZE(cs47l85_revc_32_patch);
+ break;
+ }
+
+ ret = regmap_register_patch(madera->regmap, patch16, num16);
+ if (ret < 0) {
+ dev_err(madera->dev,
+ "Error in applying 16-bit patch: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_register_patch(madera->regmap_32bit, patch32, num32);
+ if (ret < 0) {
+ dev_err(madera->dev,
+ "Error in applying 32-bit patch: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs47l85_patch);
+
+static const struct reg_default cs47l85_reg_default[] = {
+ { 0x00000020, 0x0000 }, /* R32 (0x20) - Tone Generator 1 */
+ { 0x00000021, 0x1000 }, /* R33 (0x21) - Tone Generator 2 */
+ { 0x00000022, 0x0000 }, /* R34 (0x22) - Tone Generator 3 */
+ { 0x00000023, 0x1000 }, /* R35 (0x23) - Tone Generator 4 */
+ { 0x00000024, 0x0000 }, /* R36 (0x24) - Tone Generator 5 */
+ { 0x00000030, 0x0000 }, /* R48 (0x30) - PWM Drive 1 */
+ { 0x00000031, 0x0100 }, /* R49 (0x31) - PWM Drive 2 */
+ { 0x00000032, 0x0100 }, /* R50 (0x32) - PWM Drive 3 */
+ { 0x00000061, 0x01ff }, /* R97 (0x61) - Sample Rate Sequence Select 1 */
+ { 0x00000062, 0x01ff }, /* R98 (0x62) - Sample Rate Sequence Select 2 */
+ { 0x00000063, 0x01ff }, /* R99 (0x63) - Sample Rate Sequence Select 3 */
+ { 0x00000064, 0x01ff }, /* R100 (0x64) - Sample Rate Sequence Select 4 */
+ { 0x00000066, 0x01ff }, /* R102 (0x66) - Always On Triggers Sequence Select 1*/
+ { 0x00000067, 0x01ff }, /* R103 (0x67) - Always On Triggers Sequence Select 2*/
+ { 0x00000090, 0x0000 }, /* R144 (0x90) - Haptics Control 1 */
+ { 0x00000091, 0x7fff }, /* R145 (0x91) - Haptics Control 2 */
+ { 0x00000092, 0x0000 }, /* R146 (0x92) - Haptics phase 1 intensity */
+ { 0x00000093, 0x0000 }, /* R147 (0x93) - Haptics phase 1 duration */
+ { 0x00000094, 0x0000 }, /* R148 (0x94) - Haptics phase 2 intensity */
+ { 0x00000095, 0x0000 }, /* R149 (0x95) - Haptics phase 2 duration */
+ { 0x00000096, 0x0000 }, /* R150 (0x96) - Haptics phase 3 intensity */
+ { 0x00000097, 0x0000 }, /* R151 (0x97) - Haptics phase 3 duration */
+ { 0x000000a0, 0x0000 }, /* R160 (0xa0) - Comfort Noise Generator */
+ { 0x00000100, 0x0002 }, /* R256 (0x100) - Clock 32k 1 */
+ { 0x00000101, 0x0404 }, /* R257 (0x101) - System Clock 1 */
+ { 0x00000102, 0x0011 }, /* R258 (0x102) - Sample rate 1 */
+ { 0x00000103, 0x0011 }, /* R259 (0x103) - Sample rate 2 */
+ { 0x00000104, 0x0011 }, /* R260 (0x104) - Sample rate 3 */
+ { 0x00000112, 0x0305 }, /* R274 (0x112) - Async clock 1 */
+ { 0x00000113, 0x0011 }, /* R275 (0x113) - Async sample rate 1 */
+ { 0x00000114, 0x0011 }, /* R276 (0x114) - Async sample rate 2 */
+ { 0x00000120, 0x0305 }, /* R288 (0x120) - DSP Clock 1 */
+ { 0x00000122, 0x0000 }, /* R290 (0x122) - DSP Clock 2 */
+ { 0x00000149, 0x0000 }, /* R329 (0x149) - Output system clock */
+ { 0x0000014a, 0x0000 }, /* R330 (0x14a) - Output async clock */
+ { 0x00000152, 0x0000 }, /* R338 (0x152) - Rate Estimator 1 */
+ { 0x00000153, 0x0000 }, /* R339 (0x153) - Rate Estimator 2 */
+ { 0x00000154, 0x0000 }, /* R340 (0x154) - Rate Estimator 3 */
+ { 0x00000155, 0x0000 }, /* R341 (0x155) - Rate Estimator 4 */
+ { 0x00000156, 0x0000 }, /* R342 (0x156) - Rate Estimator 5 */
+ { 0x00000171, 0x0002 }, /* R369 (0x171) - FLL1 Control 1 */
+ { 0x00000172, 0x0008 }, /* R370 (0x172) - FLL1 Control 2 */
+ { 0x00000173, 0x0018 }, /* R371 (0x173) - FLL1 Control 3 */
+ { 0x00000174, 0x007d }, /* R372 (0x174) - FLL1 Control 4 */
+ { 0x00000175, 0x0000 }, /* R373 (0x175) - FLL1 Control 5 */
+ { 0x00000176, 0x0000 }, /* R374 (0x176) - FLL1 Control 6 */
+ { 0x00000177, 0x0281 }, /* R375 (0x177) - FLL1 Loop Filter Test 1 */
+ { 0x00000179, 0x0000 }, /* R377 (0x179) - FLL1 Control 7 */
+ { 0x00000181, 0x0000 }, /* R385 (0x181) - FLL1 Synchroniser 1 */
+ { 0x00000182, 0x0000 }, /* R386 (0x182) - FLL1 Synchroniser 2 */
+ { 0x00000183, 0x0000 }, /* R387 (0x183) - FLL1 Synchroniser 3 */
+ { 0x00000184, 0x0000 }, /* R388 (0x184) - FLL1 Synchroniser 4 */
+ { 0x00000185, 0x0000 }, /* R389 (0x185) - FLL1 Synchroniser 5 */
+ { 0x00000186, 0x0000 }, /* R390 (0x186) - FLL1 Synchroniser 6 */
+ { 0x00000187, 0x0001 }, /* R391 (0x187) - FLL1 Synchroniser 7 */
+ { 0x00000189, 0x0000 }, /* R393 (0x189) - FLL1 Spread Spectrum */
+ { 0x0000018a, 0x000c }, /* R394 (0x18a) - FLL1 GPIO Clock */
+ { 0x00000191, 0x0002 }, /* R401 (0x191) - FLL2 Control 1 */
+ { 0x00000192, 0x0008 }, /* R402 (0x192) - FLL2 Control 2 */
+ { 0x00000193, 0x0018 }, /* R403 (0x193) - FLL2 Control 3 */
+ { 0x00000194, 0x007d }, /* R404 (0x194) - FLL2 Control 4 */
+ { 0x00000195, 0x0000 }, /* R405 (0x195) - FLL2 Control 5 */
+ { 0x00000196, 0x0000 }, /* R406 (0x196) - FLL2 Control 6 */
+ { 0x00000197, 0x0281 }, /* R407 (0x197) - FLL2 Loop Filter Test 1 */
+ { 0x00000199, 0x0000 }, /* R409 (0x199) - FLL2 Control 7 */
+ { 0x000001a1, 0x0000 }, /* R417 (0x1a1) - FLL2 Synchroniser 1 */
+ { 0x000001a2, 0x0000 }, /* R418 (0x1a2) - FLL2 Synchroniser 2 */
+ { 0x000001a3, 0x0000 }, /* R419 (0x1a3) - FLL2 Synchroniser 3 */
+ { 0x000001a4, 0x0000 }, /* R420 (0x1a4) - FLL2 Synchroniser 4 */
+ { 0x000001a5, 0x0000 }, /* R421 (0x1a5) - FLL2 Synchroniser 5 */
+ { 0x000001a6, 0x0000 }, /* R422 (0x1a6) - FLL2 Synchroniser 6 */
+ { 0x000001a7, 0x0001 }, /* R423 (0x1a7) - FLL2 Synchroniser 7 */
+ { 0x000001a9, 0x0000 }, /* R425 (0x1a9) - FLL2 Spread Spectrum */
+ { 0x000001aa, 0x000c }, /* R426 (0x1aa) - FLL2 GPIO Clock */
+ { 0x000001b1, 0x0002 }, /* R433 (0x1b1) - FLL3 Control 1 */
+ { 0x000001b2, 0x0008 }, /* R434 (0x1b2) - FLL3 Control 2 */
+ { 0x000001b3, 0x0018 }, /* R435 (0x1b3) - FLL3 Control 3 */
+ { 0x000001b4, 0x007d }, /* R436 (0x1b4) - FLL3 Control 4 */
+ { 0x000001b5, 0x0000 }, /* R437 (0x1b5) - FLL3 Control 5 */
+ { 0x000001b6, 0x0000 }, /* R438 (0x1b6) - FLL3 Control 6 */
+ { 0x000001b7, 0x0281 }, /* R439 (0x1b7) - FLL3 Loop Filter Test 1 */
+ { 0x000001b9, 0x0000 }, /* R441 (0x1b9) - FLL3 Control 7 */
+ { 0x000001c1, 0x0000 }, /* R449 (0x1c1) - FLL3 Synchroniser 1 */
+ { 0x000001c2, 0x0000 }, /* R450 (0x1c2) - FLL3 Synchroniser 2 */
+ { 0x000001c3, 0x0000 }, /* R451 (0x1c3) - FLL3 Synchroniser 3 */
+ { 0x000001c4, 0x0000 }, /* R452 (0x1c4) - FLL3 Synchroniser 4 */
+ { 0x000001c5, 0x0000 }, /* R453 (0x1c5) - FLL3 Synchroniser 5 */
+ { 0x000001c6, 0x0000 }, /* R454 (0x1c6) - FLL3 Synchroniser 6 */
+ { 0x000001c7, 0x0001 }, /* R455 (0x1c7) - FLL3 Synchroniser 7 */
+ { 0x000001c9, 0x0000 }, /* R457 (0x1c9) - FLL3 Spread Spectrum */
+ { 0x000001ca, 0x000C }, /* R458 (0x1ca) - FLL3 GPIO Clock */
+ { 0x00000200, 0x0006 }, /* R512 (0x200) - Mic Charge Pump 1 */
+ { 0x0000020b, 0x0400 }, /* R523 (0x20B) - HP Charge Pump 8 */
+ { 0x00000210, 0x0184 }, /* R528 (0x210) - LDO1 Control 1 */
+ { 0x00000213, 0x03e4 }, /* R531 (0x213) - LDO2 Control 1 */
+ { 0x00000218, 0x00e6 }, /* R536 (0x218) - Mic Bias Ctrl 1 */
+ { 0x00000219, 0x00e6 }, /* R537 (0x219) - Mic Bias Ctrl 2 */
+ { 0x0000021a, 0x00e6 }, /* R538 (0x21a) - Mic Bias Ctrl 3 */
+ { 0x0000021b, 0x00e6 }, /* R539 (0x21b) - Mic Bias Ctrl 4 */
+ { 0x0000027e, 0x0000 }, /* R638 (0x27e) - EDRE HP stereo control */
+ { 0x00000293, 0x0000 }, /* R659 (0x293) - Accessory Detect Mode 1 */
+ { 0x0000029b, 0x0000 }, /* R667 (0x29b) - Headphone Detect 1 */
+ { 0x000002a3, 0x1102 }, /* R675 (0x2a3) - Mic Detect Control 1 */
+ { 0x000002a4, 0x009f }, /* R676 (0x2a4) - Mic Detect Control 2 */
+ { 0x000002a6, 0x3737 }, /* R678 (0x2a6) - Mic Detect Level 1 */
+ { 0x000002a7, 0x2c37 }, /* R679 (0x2a7) - Mic Detect Level 2 */
+ { 0x000002a8, 0x1422 }, /* R680 (0x2a8) - Mic Detect Level 3 */
+ { 0x000002a9, 0x030a }, /* R681 (0x2a9) - Mic Detect Level 4 */
+ { 0x000002c6, 0x0010 }, /* R710 (0x2c6) - Mic Clamp control */
+ { 0x000002c8, 0x0000 }, /* R712 (0x2c8) - GP switch 1 */
+ { 0x000002d3, 0x0000 }, /* R723 (0x2d3) - Jack detect analogue */
+ { 0x00000300, 0x0000 }, /* R768 (0x300) - Input Enables */
+ { 0x00000308, 0x0000 }, /* R776 (0x308) - Input Rate */
+ { 0x00000309, 0x0022 }, /* R777 (0x309) - Input Volume Ramp */
+ { 0x0000030c, 0x0002 }, /* R780 (0x30c) - HPF Control */
+ { 0x00000310, 0x0080 }, /* R784 (0x310) - IN1L Control */
+ { 0x00000311, 0x0180 }, /* R785 (0x311) - ADC Digital Volume 1L */
+ { 0x00000312, 0x0500 }, /* R786 (0x312) - DMIC1L Control */
+ { 0x00000314, 0x0080 }, /* R788 (0x314) - IN1R Control */
+ { 0x00000315, 0x0180 }, /* R789 (0x315) - ADC Digital Volume 1R */
+ { 0x00000316, 0x0000 }, /* R790 (0x316) - DMIC1R Control */
+ { 0x00000318, 0x0080 }, /* R792 (0x318) - IN2L Control */
+ { 0x00000319, 0x0180 }, /* R793 (0x319) - ADC Digital Volume 2L */
+ { 0x0000031a, 0x0500 }, /* R794 (0x31a) - DMIC2L Control */
+ { 0x0000031c, 0x0080 }, /* R796 (0x31c) - IN2R Control */
+ { 0x0000031d, 0x0180 }, /* R797 (0x31d) - ADC Digital Volume 2R */
+ { 0x0000031e, 0x0000 }, /* R798 (0x31e) - DMIC2R Control */
+ { 0x00000320, 0x0080 }, /* R800 (0x320) - IN3L Control */
+ { 0x00000321, 0x0180 }, /* R801 (0x321) - ADC Digital Volume 3L */
+ { 0x00000322, 0x0500 }, /* R802 (0x322) - DMIC3L Control */
+ { 0x00000324, 0x0080 }, /* R804 (0x324) - IN3R Control */
+ { 0x00000325, 0x0180 }, /* R805 (0x325) - ADC Digital Volume 3R */
+ { 0x00000326, 0x0000 }, /* R806 (0x326) - DMIC3R Control */
+ { 0x00000328, 0x0000 }, /* R808 (0x328) - IN4 Control */
+ { 0x00000329, 0x0180 }, /* R809 (0x329) - ADC Digital Volume 4L */
+ { 0x0000032a, 0x0500 }, /* R810 (0x32a) - DMIC4L Control */
+ { 0x0000032c, 0x0000 }, /* R812 (0x32c) - IN4R Control */
+ { 0x0000032d, 0x0180 }, /* R813 (0x32d) - ADC Digital Volume 4R */
+ { 0x0000032e, 0x0000 }, /* R814 (0x32e) - DMIC4R Control */
+ { 0x00000330, 0x0000 }, /* R816 (0x330) - IN5L Control */
+ { 0x00000331, 0x0180 }, /* R817 (0x331) - ADC Digital Volume 5L */
+ { 0x00000332, 0x0500 }, /* R818 (0x332) - DMIC5L Control */
+ { 0x00000334, 0x0000 }, /* R820 (0x334) - IN5R Control */
+ { 0x00000335, 0x0180 }, /* R821 (0x335) - ADC Digital Volume 5R */
+ { 0x00000336, 0x0000 }, /* R822 (0x336) - DMIC5R Control */
+ { 0x00000338, 0x0000 }, /* R824 (0x338) - IN6L Control */
+ { 0x00000339, 0x0180 }, /* R825 (0x339) - ADC Digital Volume 6L */
+ { 0x0000033a, 0x0500 }, /* R826 (0x33a) - DMIC6L Control */
+ { 0x0000033c, 0x0000 }, /* R828 (0x33c) - IN6R Control */
+ { 0x0000033d, 0x0180 }, /* R829 (0x33d) - ADC Digital Volume 6R */
+ { 0x0000033e, 0x0000 }, /* R830 (0x33e) - DMIC6R Control */
+ { 0x00000400, 0x0000 }, /* R1024 (0x400) - Output Enables 1 */
+ { 0x00000408, 0x0000 }, /* R1032 (0x408) - Output Rate 1 */
+ { 0x00000409, 0x0022 }, /* R1033 (0x409) - Output Volume Ramp */
+ { 0x00000410, 0x0080 }, /* R1040 (0x410) - Output Path Config 1L */
+ { 0x00000411, 0x0180 }, /* R1041 (0x411) - DAC Digital Volume 1L */
+ { 0x00000413, 0x0001 }, /* R1043 (0x413) - Noise Gate Select 1L */
+ { 0x00000414, 0x0080 }, /* R1044 (0x414) - Output Path Config 1R */
+ { 0x00000415, 0x0180 }, /* R1045 (0x415) - DAC Digital Volume 1R */
+ { 0x00000417, 0x0002 }, /* R1047 (0x417) - Noise Gate Select 1R */
+ { 0x00000418, 0x0080 }, /* R1048 (0x418) - Output Path Config 2L */
+ { 0x00000419, 0x0180 }, /* R1049 (0x419) - DAC Digital Volume 2L */
+ { 0x0000041b, 0x0004 }, /* R1051 (0x41b) - Noise Gate Select 2L */
+ { 0x0000041c, 0x0080 }, /* R1052 (0x41c) - Output Path Config 2R */
+ { 0x0000041d, 0x0180 }, /* R1053 (0x41d) - DAC Digital Volume 2R */
+ { 0x0000041f, 0x0008 }, /* R1055 (0x41f) - Noise Gate Select 2R */
+ { 0x00000420, 0x0080 }, /* R1056 (0x420) - Output Path Config 3L */
+ { 0x00000421, 0x0180 }, /* R1057 (0x421) - DAC Digital Volume 3L */
+ { 0x00000423, 0x0010 }, /* R1059 (0x423) - Noise Gate Select 3L */
+ { 0x00000424, 0x0080 }, /* R1060 (0x424) - Output Path Config 3R */
+ { 0x00000425, 0x0180 }, /* R1061 (0x425) - DAC Digital Volume 3R */
+ { 0x00000427, 0x0020 }, /* R1063 (0x427) - Noise Gate Select 3R */
+ { 0x00000428, 0x0000 }, /* R1064 (0x428) - Output Path Config 4L */
+ { 0x00000429, 0x0180 }, /* R1065 (0x429) - DAC Digital Volume 4L */
+ { 0x0000042b, 0x0040 }, /* R1067 (0x42b) - Noise Gate Select 4L */
+ { 0x0000042c, 0x0000 }, /* R1068 (0x42c) - Output Path Config 4R */
+ { 0x0000042d, 0x0180 }, /* R1069 (0x42d) - DAC Digital Volume 4R */
+ { 0x0000042f, 0x0080 }, /* R1071 (0x42f) - Noise Gate Select 4R */
+ { 0x00000430, 0x0000 }, /* R1072 (0x430) - Output Path Config 5L */
+ { 0x00000431, 0x0180 }, /* R1073 (0x431) - DAC Digital Volume 5L */
+ { 0x00000433, 0x0100 }, /* R1075 (0x433) - Noise Gate Select 5L */
+ { 0x00000434, 0x0000 }, /* R1076 (0x434) - Output Path Config 5R */
+ { 0x00000435, 0x0180 }, /* R1077 (0x435) - DAC Digital Volume 5R */
+ { 0x00000437, 0x0200 }, /* R1079 (0x437) - Noise Gate Select 5R */
+ { 0x00000438, 0x0000 }, /* R1080 (0x438) - Output Path Config 6L */
+ { 0x00000439, 0x0180 }, /* R1081 (0x439) - DAC Digital Volume 6L */
+ { 0x0000043b, 0x0400 }, /* R1083 (0x43b) - Noise Gate Select 6L */
+ { 0x0000043c, 0x0000 }, /* R1084 (0x43c) - Output Path Config 6R */
+ { 0x0000043d, 0x0180 }, /* R1085 (0x43d) - DAC Digital Volume 6R */
+ { 0x0000043f, 0x0800 }, /* R1087 (0x43f) - Noise Gate Select 6R */
+ { 0x00000440, 0x003f }, /* R1088 (0x440) - DRE Enable */
+ { 0x00000448, 0x003f }, /* R1096 (0x448) - EDRE Enable */
+ { 0x0000044a, 0x0000 }, /* R1098 (0x44a) - EDRE Manual */
+ { 0x00000450, 0x0000 }, /* R1104 (0x450) - DAC AEC Control 1 */
+ { 0x00000451, 0x0000 }, /* R1105 (0x451) - DAC AEC Control 2 */
+ { 0x00000458, 0x0000 }, /* R1112 (0x458) - Noise Gate Control */
+ { 0x00000490, 0x0069 }, /* R1168 (0x490) - PDM SPK1 CTRL 1 */
+ { 0x00000491, 0x0000 }, /* R1169 (0x491) - PDM SPK1 CTRL 2 */
+ { 0x00000492, 0x0069 }, /* R1170 (0x492) - PDM SPK2 CTRL 1 */
+ { 0x00000493, 0x0000 }, /* R1171 (0x493) - PDM SPK2 CTRL 2 */
+ { 0x000004a0, 0x3210 }, /* R1184 (0x4a0) - HP1 Short Circuit Ctrl */
+ { 0x000004a1, 0x3200 }, /* R1185 (0x4a1) - HP2 Short Circuit Ctrl */
+ { 0x000004a2, 0x3200 }, /* R1186 (0x4a2) - HP3 Short Circuit Ctrl */
+ { 0x000004a8, 0x7020 }, /* R1192 (0x4a8) - HP Test Ctrl 5 */
+ { 0x000004a9, 0x7020 }, /* R1193 (0x4a9) - HP Test Ctrl 6 */
+ { 0x00000500, 0x000c }, /* R1280 (0x500) - AIF1 BCLK Ctrl */
+ { 0x00000501, 0x0000 }, /* R1281 (0x501) - AIF1 Tx Pin Ctrl */
+ { 0x00000502, 0x0000 }, /* R1282 (0x502) - AIF1 Rx Pin Ctrl */
+ { 0x00000503, 0x0000 }, /* R1283 (0x503) - AIF1 Rate Ctrl */
+ { 0x00000504, 0x0000 }, /* R1284 (0x504) - AIF1 Format */
+ { 0x00000506, 0x0040 }, /* R1286 (0x506) - AIF1 Rx BCLK Rate */
+ { 0x00000507, 0x1818 }, /* R1287 (0x507) - AIF1 Frame Ctrl 1 */
+ { 0x00000508, 0x1818 }, /* R1288 (0x508) - AIF1 Frame Ctrl 2 */
+ { 0x00000509, 0x0000 }, /* R1289 (0x509) - AIF1 Frame Ctrl 3 */
+ { 0x0000050a, 0x0001 }, /* R1290 (0x50a) - AIF1 Frame Ctrl 4 */
+ { 0x0000050b, 0x0002 }, /* R1291 (0x50b) - AIF1 Frame Ctrl 5 */
+ { 0x0000050c, 0x0003 }, /* R1292 (0x50c) - AIF1 Frame Ctrl 6 */
+ { 0x0000050d, 0x0004 }, /* R1293 (0x50d) - AIF1 Frame Ctrl 7 */
+ { 0x0000050e, 0x0005 }, /* R1294 (0x50e) - AIF1 Frame Ctrl 8 */
+ { 0x0000050f, 0x0006 }, /* R1295 (0x50f) - AIF1 Frame Ctrl 9 */
+ { 0x00000510, 0x0007 }, /* R1296 (0x510) - AIF1 Frame Ctrl 10 */
+ { 0x00000511, 0x0000 }, /* R1297 (0x511) - AIF1 Frame Ctrl 11 */
+ { 0x00000512, 0x0001 }, /* R1298 (0x512) - AIF1 Frame Ctrl 12 */
+ { 0x00000513, 0x0002 }, /* R1299 (0x513) - AIF1 Frame Ctrl 13 */
+ { 0x00000514, 0x0003 }, /* R1300 (0x514) - AIF1 Frame Ctrl 14 */
+ { 0x00000515, 0x0004 }, /* R1301 (0x515) - AIF1 Frame Ctrl 15 */
+ { 0x00000516, 0x0005 }, /* R1302 (0x516) - AIF1 Frame Ctrl 16 */
+ { 0x00000517, 0x0006 }, /* R1303 (0x517) - AIF1 Frame Ctrl 17 */
+ { 0x00000518, 0x0007 }, /* R1304 (0x518) - AIF1 Frame Ctrl 18 */
+ { 0x00000519, 0x0000 }, /* R1305 (0x519) - AIF1 Tx Enables */
+ { 0x0000051a, 0x0000 }, /* R1306 (0x51a) - AIF1 Rx Enables */
+ { 0x00000540, 0x000c }, /* R1344 (0x540) - AIF2 BCLK Ctrl */
+ { 0x00000541, 0x0000 }, /* R1345 (0x541) - AIF2 Tx Pin Ctrl */
+ { 0x00000542, 0x0000 }, /* R1346 (0x542) - AIF2 Rx Pin Ctrl */
+ { 0x00000543, 0x0000 }, /* R1347 (0x543) - AIF2 Rate Ctrl */
+ { 0x00000544, 0x0000 }, /* R1348 (0x544) - AIF2 Format */
+ { 0x00000546, 0x0040 }, /* R1350 (0x546) - AIF2 Rx BCLK Rate */
+ { 0x00000547, 0x1818 }, /* R1351 (0x547) - AIF2 Frame Ctrl 1 */
+ { 0x00000548, 0x1818 }, /* R1352 (0x548) - AIF2 Frame Ctrl 2 */
+ { 0x00000549, 0x0000 }, /* R1353 (0x549) - AIF2 Frame Ctrl 3 */
+ { 0x0000054a, 0x0001 }, /* R1354 (0x54a) - AIF2 Frame Ctrl 4 */
+ { 0x0000054b, 0x0002 }, /* R1355 (0x54b) - AIF2 Frame Ctrl 5 */
+ { 0x0000054c, 0x0003 }, /* R1356 (0x54c) - AIF2 Frame Ctrl 6 */
+ { 0x0000054d, 0x0004 }, /* R1357 (0x54d) - AIF2 Frame Ctrl 7 */
+ { 0x0000054e, 0x0005 }, /* R1358 (0x54e) - AIF2 Frame Ctrl 8 */
+ { 0x0000054f, 0x0006 }, /* R1359 (0x54f) - AIF2 Frame Ctrl 9 */
+ { 0x00000550, 0x0007 }, /* R1360 (0x550) - AIF2 Frame Ctrl 10 */
+ { 0x00000551, 0x0000 }, /* R1361 (0x551) - AIF2 Frame Ctrl 11 */
+ { 0x00000552, 0x0001 }, /* R1362 (0x552) - AIF2 Frame Ctrl 12 */
+ { 0x00000553, 0x0002 }, /* R1363 (0x553) - AIF2 Frame Ctrl 13 */
+ { 0x00000554, 0x0003 }, /* R1364 (0x554) - AIF2 Frame Ctrl 14 */
+ { 0x00000555, 0x0004 }, /* R1365 (0x555) - AIF2 Frame Ctrl 15 */
+ { 0x00000556, 0x0005 }, /* R1366 (0x556) - AIF2 Frame Ctrl 16 */
+ { 0x00000557, 0x0006 }, /* R1367 (0x557) - AIF2 Frame Ctrl 17 */
+ { 0x00000558, 0x0007 }, /* R1368 (0x558) - AIF2 Frame Ctrl 18 */
+ { 0x00000559, 0x0000 }, /* R1369 (0x559) - AIF2 Tx Enables */
+ { 0x0000055a, 0x0000 }, /* R1370 (0x55a) - AIF2 Rx Enables */
+ { 0x00000580, 0x000c }, /* R1408 (0x580) - AIF3 BCLK Ctrl */
+ { 0x00000581, 0x0000 }, /* R1409 (0x581) - AIF3 Tx Pin Ctrl */
+ { 0x00000582, 0x0000 }, /* R1410 (0x582) - AIF3 Rx Pin Ctrl */
+ { 0x00000583, 0x0000 }, /* R1411 (0x583) - AIF3 Rate Ctrl */
+ { 0x00000584, 0x0000 }, /* R1412 (0x584) - AIF3 Format */
+ { 0x00000586, 0x0040 }, /* R1414 (0x586) - AIF3 Rx BCLK Rate */
+ { 0x00000587, 0x1818 }, /* R1415 (0x587) - AIF3 Frame Ctrl 1 */
+ { 0x00000588, 0x1818 }, /* R1416 (0x588) - AIF3 Frame Ctrl 2 */
+ { 0x00000589, 0x0000 }, /* R1417 (0x589) - AIF3 Frame Ctrl 3 */
+ { 0x0000058a, 0x0001 }, /* R1418 (0x58a) - AIF3 Frame Ctrl 4 */
+ { 0x00000591, 0x0000 }, /* R1425 (0x591) - AIF3 Frame Ctrl 11 */
+ { 0x00000592, 0x0001 }, /* R1426 (0x592) - AIF3 Frame Ctrl 12 */
+ { 0x00000599, 0x0000 }, /* R1433 (0x599) - AIF3 Tx Enables */
+ { 0x0000059a, 0x0000 }, /* R1434 (0x59a) - AIF3 Rx Enables */
+ { 0x000005a0, 0x000c }, /* R1440 (0x5a0) - AIF4 BCLK Ctrl */
+ { 0x000005a1, 0x0000 }, /* R1441 (0x5a1) - AIF4 Tx Pin Ctrl */
+ { 0x000005a2, 0x0000 }, /* R1442 (0x5a2) - AIF4 Rx Pin Ctrl */
+ { 0x000005a3, 0x0000 }, /* R1443 (0x5a3) - AIF4 Rate Ctrl */
+ { 0x000005a4, 0x0000 }, /* R1444 (0x5a4) - AIF4 Format */
+ { 0x000005a6, 0x0040 }, /* R1446 (0x5a6) - AIF4 Rx BCLK Rate */
+ { 0x000005a7, 0x1818 }, /* R1447 (0x5a7) - AIF4 Frame Ctrl 1 */
+ { 0x000005a8, 0x1818 }, /* R1448 (0x5a8) - AIF4 Frame Ctrl 2 */
+ { 0x000005a9, 0x0000 }, /* R1449 (0x5a9) - AIF4 Frame Ctrl 3 */
+ { 0x000005aa, 0x0001 }, /* R1450 (0x5aa) - AIF4 Frame Ctrl 4 */
+ { 0x000005b1, 0x0000 }, /* R1457 (0x5b1) - AIF4 Frame Ctrl 11 */
+ { 0x000005b2, 0x0001 }, /* R1458 (0x5b2) - AIF4 Frame Ctrl 12 */
+ { 0x000005b9, 0x0000 }, /* R1465 (0x5b9) - AIF4 Tx Enables */
+ { 0x000005ba, 0x0000 }, /* R1466 (0x5ba) - AIF4 Rx Enables */
+ { 0x000005c2, 0x0000 }, /* R1474 (0x5c2) - SPD1 TX Control */
+ { 0x000005e3, 0x0000 }, /* R1507 (0x5e3) - SLIMbus Framer Ref Gear */
+ { 0x000005e5, 0x0000 }, /* R1509 (0x5e5) - SLIMbus Rates 1 */
+ { 0x000005e6, 0x0000 }, /* R1510 (0x5e6) - SLIMbus Rates 2 */
+ { 0x000005e7, 0x0000 }, /* R1511 (0x5e7) - SLIMbus Rates 3 */
+ { 0x000005e8, 0x0000 }, /* R1512 (0x5e8) - SLIMbus Rates 4 */
+ { 0x000005e9, 0x0000 }, /* R1513 (0x5e9) - SLIMbus Rates 5 */
+ { 0x000005ea, 0x0000 }, /* R1514 (0x5ea) - SLIMbus Rates 6 */
+ { 0x000005eb, 0x0000 }, /* R1515 (0x5eb) - SLIMbus Rates 7 */
+ { 0x000005ec, 0x0000 }, /* R1516 (0x5ec) - SLIMbus Rates 8 */
+ { 0x000005f5, 0x0000 }, /* R1525 (0x5f5) - SLIMbus RX Channel Enable */
+ { 0x000005f6, 0x0000 }, /* R1526 (0x5F6) - SLIMbus TX Channel Enable */
+ { 0x00000640, 0x0000 }, /* R1600 (0x640) - PWM1MIX Input 1 Source */
+ { 0x00000641, 0x0080 }, /* R1601 (0x641) - PWM1MIX Input 1 Volume */
+ { 0x00000642, 0x0000 }, /* R1602 (0x642) - PWM1MIX Input 2 Source */
+ { 0x00000643, 0x0080 }, /* R1603 (0x643) - PWM1MIX Input 2 Volume */
+ { 0x00000644, 0x0000 }, /* R1604 (0x644) - PWM1MIX Input 3 Source */
+ { 0x00000645, 0x0080 }, /* R1605 (0x645) - PWM1MIX Input 3 Volume */
+ { 0x00000646, 0x0000 }, /* R1606 (0x646) - PWM1MIX Input 4 Source */
+ { 0x00000647, 0x0080 }, /* R1607 (0x647) - PWM1MIX Input 4 Volume */
+ { 0x00000648, 0x0000 }, /* R1608 (0x648) - PWM2MIX Input 1 Source */
+ { 0x00000649, 0x0080 }, /* R1609 (0x649) - PWM2MIX Input 1 Volume */
+ { 0x0000064a, 0x0000 }, /* R1610 (0x64a) - PWM2MIX Input 2 Source */
+ { 0x0000064b, 0x0080 }, /* R1611 (0x64b) - PWM2MIX Input 2 Volume */
+ { 0x0000064c, 0x0000 }, /* R1612 (0x64c) - PWM2MIX Input 3 Source */
+ { 0x0000064d, 0x0080 }, /* R1613 (0x64d) - PWM2MIX Input 3 Volume */
+ { 0x0000064e, 0x0000 }, /* R1614 (0x64e) - PWM2MIX Input 4 Source */
+ { 0x0000064f, 0x0080 }, /* R1615 (0x64f) - PWM2MIX Input 4 Volume */
+ { 0x00000680, 0x0000 }, /* R1664 (0x680) - OUT1LMIX Input 1 Source */
+ { 0x00000681, 0x0080 }, /* R1665 (0x681) - OUT1LMIX Input 1 Volume */
+ { 0x00000682, 0x0000 }, /* R1666 (0x682) - OUT1LMIX Input 2 Source */
+ { 0x00000683, 0x0080 }, /* R1667 (0x683) - OUT1LMIX Input 2 Volume */
+ { 0x00000684, 0x0000 }, /* R1668 (0x684) - OUT1LMIX Input 3 Source */
+ { 0x00000685, 0x0080 }, /* R1669 (0x685) - OUT1LMIX Input 3 Volume */
+ { 0x00000686, 0x0000 }, /* R1670 (0x686) - OUT1LMIX Input 4 Source */
+ { 0x00000687, 0x0080 }, /* R1671 (0x687) - OUT1LMIX Input 4 Volume */
+ { 0x00000688, 0x0000 }, /* R1672 (0x688) - OUT1RMIX Input 1 Source */
+ { 0x00000689, 0x0080 }, /* R1673 (0x689) - OUT1RMIX Input 1 Volume */
+ { 0x0000068a, 0x0000 }, /* R1674 (0x68a) - OUT1RMIX Input 2 Source */
+ { 0x0000068b, 0x0080 }, /* R1675 (0x68b) - OUT1RMIX Input 2 Volume */
+ { 0x0000068c, 0x0000 }, /* R1672 (0x68c) - OUT1RMIX Input 3 Source */
+ { 0x0000068d, 0x0080 }, /* R1673 (0x68d) - OUT1RMIX Input 3 Volume */
+ { 0x0000068e, 0x0000 }, /* R1674 (0x68e) - OUT1RMIX Input 4 Source */
+ { 0x0000068f, 0x0080 }, /* R1675 (0x68f) - OUT1RMIX Input 4 Volume */
+ { 0x00000690, 0x0000 }, /* R1680 (0x690) - OUT2LMIX Input 1 Source */
+ { 0x00000691, 0x0080 }, /* R1681 (0x691) - OUT2LMIX Input 1 Volume */
+ { 0x00000692, 0x0000 }, /* R1682 (0x692) - OUT2LMIX Input 2 Source */
+ { 0x00000693, 0x0080 }, /* R1683 (0x693) - OUT2LMIX Input 2 Volume */
+ { 0x00000694, 0x0000 }, /* R1684 (0x694) - OUT2LMIX Input 3 Source */
+ { 0x00000695, 0x0080 }, /* R1685 (0x695) - OUT2LMIX Input 3 Volume */
+ { 0x00000696, 0x0000 }, /* R1686 (0x696) - OUT2LMIX Input 4 Source */
+ { 0x00000697, 0x0080 }, /* R1687 (0x697) - OUT2LMIX Input 4 Volume */
+ { 0x00000698, 0x0000 }, /* R1688 (0x698) - OUT2RMIX Input 1 Source */
+ { 0x00000699, 0x0080 }, /* R1689 (0x699) - OUT2RMIX Input 1 Volume */
+ { 0x0000069a, 0x0000 }, /* R1690 (0x69a) - OUT2RMIX Input 2 Source */
+ { 0x0000069b, 0x0080 }, /* R1691 (0x69b) - OUT2RMIX Input 2 Volume */
+ { 0x0000069c, 0x0000 }, /* R1692 (0x69c) - OUT2RMIX Input 3 Source */
+ { 0x0000069d, 0x0080 }, /* R1693 (0x69d) - OUT2RMIX Input 3 Volume */
+ { 0x0000069e, 0x0000 }, /* R1694 (0x69e) - OUT2RMIX Input 4 Source */
+ { 0x0000069f, 0x0080 }, /* R1695 (0x69f) - OUT2RMIX Input 4 Volume */
+ { 0x000006a0, 0x0000 }, /* R1696 (0x6a0) - OUT3LMIX Input 1 Source */
+ { 0x000006a1, 0x0080 }, /* R1697 (0x6a1) - OUT3LMIX Input 1 Volume */
+ { 0x000006a2, 0x0000 }, /* R1698 (0x6a2) - OUT3LMIX Input 2 Source */
+ { 0x000006a3, 0x0080 }, /* R1699 (0x6a3) - OUT3LMIX Input 2 Volume */
+ { 0x000006a4, 0x0000 }, /* R1700 (0x6a4) - OUT3LMIX Input 3 Source */
+ { 0x000006a5, 0x0080 }, /* R1701 (0x6a5) - OUT3LMIX Input 3 Volume */
+ { 0x000006a6, 0x0000 }, /* R1702 (0x6a6) - OUT3LMIX Input 4 Source */
+ { 0x000006a7, 0x0080 }, /* R1703 (0x6a7) - OUT3LMIX Input 4 Volume */
+ { 0x000006a8, 0x0000 }, /* R1704 (0x6a8) - OUT3RMIX Input 1 Source */
+ { 0x000006a9, 0x0080 }, /* R1705 (0x6a9) - OUT3RMIX Input 1 Volume */
+ { 0x000006aa, 0x0000 }, /* R1706 (0x6aa) - OUT3RMIX Input 2 Source */
+ { 0x000006ab, 0x0080 }, /* R1707 (0x6ab) - OUT3RMIX Input 2 Volume */
+ { 0x000006ac, 0x0000 }, /* R1708 (0x6ac) - OUT3RMIX Input 3 Source */
+ { 0x000006ad, 0x0080 }, /* R1709 (0x6ad) - OUT3RMIX Input 3 Volume */
+ { 0x000006ae, 0x0000 }, /* R1710 (0x6ae) - OUT3RMIX Input 4 Source */
+ { 0x000006af, 0x0080 }, /* R1711 (0x6af) - OUT3RMIX Input 4 Volume */
+ { 0x000006b0, 0x0000 }, /* R1712 (0x6b0) - OUT4LMIX Input 1 Source */
+ { 0x000006b1, 0x0080 }, /* R1713 (0x6b1) - OUT4LMIX Input 1 Volume */
+ { 0x000006b2, 0x0000 }, /* R1714 (0x6b2) - OUT4LMIX Input 2 Source */
+ { 0x000006b3, 0x0080 }, /* R1715 (0x6b3) - OUT4LMIX Input 2 Volume */
+ { 0x000006b4, 0x0000 }, /* R1716 (0x6b4) - OUT4LMIX Input 3 Source */
+ { 0x000006b5, 0x0080 }, /* R1717 (0x6b5) - OUT4LMIX Input 3 Volume */
+ { 0x000006b6, 0x0000 }, /* R1718 (0x6b6) - OUT4LMIX Input 4 Source */
+ { 0x000006b7, 0x0080 }, /* R1719 (0x6b7) - OUT4LMIX Input 4 Volume */
+ { 0x000006b8, 0x0000 }, /* R1720 (0x6b8) - OUT4RMIX Input 1 Source */
+ { 0x000006b9, 0x0080 }, /* R1721 (0x6b9) - OUT4RMIX Input 1 Volume */
+ { 0x000006ba, 0x0000 }, /* R1722 (0x6ba) - OUT4RMIX Input 2 Source */
+ { 0x000006bb, 0x0080 }, /* R1723 (0x6bb) - OUT4RMIX Input 2 Volume */
+ { 0x000006bc, 0x0000 }, /* R1724 (0x6bc) - OUT4RMIX Input 3 Source */
+ { 0x000006bd, 0x0080 }, /* R1725 (0x6bd) - OUT4RMIX Input 3 Volume */
+ { 0x000006be, 0x0000 }, /* R1726 (0x6be) - OUT4RMIX Input 4 Source */
+ { 0x000006bf, 0x0080 }, /* R1727 (0x6bf) - OUT4RMIX Input 4 Volume */
+ { 0x000006c0, 0x0000 }, /* R1728 (0x6c0) - OUT5LMIX Input 1 Source */
+ { 0x000006c1, 0x0080 }, /* R1729 (0x6c1) - OUT5LMIX Input 1 Volume */
+ { 0x000006c2, 0x0000 }, /* R1730 (0x6c2) - OUT5LMIX Input 2 Source */
+ { 0x000006c3, 0x0080 }, /* R1731 (0x6c3) - OUT5LMIX Input 2 Volume */
+ { 0x000006c4, 0x0000 }, /* R1732 (0x6c4) - OUT5LMIX Input 3 Source */
+ { 0x000006c5, 0x0080 }, /* R1733 (0x6c5) - OUT5LMIX Input 3 Volume */
+ { 0x000006c6, 0x0000 }, /* R1734 (0x6c6) - OUT5LMIX Input 4 Source */
+ { 0x000006c7, 0x0080 }, /* R1735 (0x6c7) - OUT5LMIX Input 4 Volume */
+ { 0x000006c8, 0x0000 }, /* R1736 (0x6c8) - OUT5RMIX Input 1 Source */
+ { 0x000006c9, 0x0080 }, /* R1737 (0x6c9) - OUT5RMIX Input 1 Volume */
+ { 0x000006ca, 0x0000 }, /* R1738 (0x6ca) - OUT5RMIX Input 2 Source */
+ { 0x000006cb, 0x0080 }, /* R1739 (0x6cb) - OUT5RMIX Input 2 Volume */
+ { 0x000006cc, 0x0000 }, /* R1740 (0x6cc) - OUT5RMIX Input 3 Source */
+ { 0x000006cd, 0x0080 }, /* R1741 (0x6cd) - OUT5RMIX Input 3 Volume */
+ { 0x000006ce, 0x0000 }, /* R1742 (0x6ce) - OUT5RMIX Input 4 Source */
+ { 0x000006cf, 0x0080 }, /* R1743 (0x6cf) - OUT5RMIX Input 4 Volume */
+ { 0x000006d0, 0x0000 }, /* R1744 (0x6d0) - OUT6LMIX Input 1 Source */
+ { 0x000006d1, 0x0080 }, /* R1745 (0x6d1) - OUT6LMIX Input 1 Volume */
+ { 0x000006d2, 0x0000 }, /* R1746 (0x6d2) - OUT6LMIX Input 2 Source */
+ { 0x000006d3, 0x0080 }, /* R1747 (0x6d3) - OUT6LMIX Input 2 Volume */
+ { 0x000006d4, 0x0000 }, /* R1748 (0x6d4) - OUT6LMIX Input 3 Source */
+ { 0x000006d5, 0x0080 }, /* R1749 (0x6d5) - OUT6LMIX Input 3 Volume */
+ { 0x000006d6, 0x0000 }, /* R1750 (0x6d6) - OUT6LMIX Input 4 Source */
+ { 0x000006d7, 0x0080 }, /* R1751 (0x6d7) - OUT6LMIX Input 4 Volume */
+ { 0x000006d8, 0x0000 }, /* R1752 (0x6d8) - OUT6RMIX Input 1 Source */
+ { 0x000006d9, 0x0080 }, /* R1753 (0x6d9) - OUT6RMIX Input 1 Volume */
+ { 0x000006da, 0x0000 }, /* R1754 (0x6da) - OUT6RMIX Input 2 Source */
+ { 0x000006db, 0x0080 }, /* R1755 (0x6db) - OUT6RMIX Input 2 Volume */
+ { 0x000006dc, 0x0000 }, /* R1756 (0x6dc) - OUT6RMIX Input 3 Source */
+ { 0x000006dd, 0x0080 }, /* R1757 (0x6dd) - OUT6RMIX Input 3 Volume */
+ { 0x000006de, 0x0000 }, /* R1758 (0x6de) - OUT6RMIX Input 4 Source */
+ { 0x000006df, 0x0080 }, /* R1759 (0x6df) - OUT6RMIX Input 4 Volume */
+ { 0x00000700, 0x0000 }, /* R1792 (0x700) - AIF1TX1MIX Input 1 Source */
+ { 0x00000701, 0x0080 }, /* R1793 (0x701) - AIF1TX1MIX Input 1 Volume */
+ { 0x00000702, 0x0000 }, /* R1794 (0x702) - AIF1TX1MIX Input 2 Source */
+ { 0x00000703, 0x0080 }, /* R1795 (0x703) - AIF1TX1MIX Input 2 Volume */
+ { 0x00000704, 0x0000 }, /* R1796 (0x704) - AIF1TX1MIX Input 3 Source */
+ { 0x00000705, 0x0080 }, /* R1797 (0x705) - AIF1TX1MIX Input 3 Volume */
+ { 0x00000706, 0x0000 }, /* R1798 (0x706) - AIF1TX1MIX Input 4 Source */
+ { 0x00000707, 0x0080 }, /* R1799 (0x707) - AIF1TX1MIX Input 4 Volume */
+ { 0x00000708, 0x0000 }, /* R1800 (0x708) - AIF1TX2MIX Input 1 Source */
+ { 0x00000709, 0x0080 }, /* R1801 (0x709) - AIF1TX2MIX Input 1 Volume */
+ { 0x0000070a, 0x0000 }, /* R1802 (0x70a) - AIF1TX2MIX Input 2 Source */
+ { 0x0000070b, 0x0080 }, /* R1803 (0x70b) - AIF1TX2MIX Input 2 Volume */
+ { 0x0000070c, 0x0000 }, /* R1804 (0x70c) - AIF1TX2MIX Input 3 Source */
+ { 0x0000070d, 0x0080 }, /* R1805 (0x70d) - AIF1TX2MIX Input 3 Volume */
+ { 0x0000070e, 0x0000 }, /* R1806 (0x70e) - AIF1TX2MIX Input 4 Source */
+ { 0x0000070f, 0x0080 }, /* R1807 (0x70f) - AIF1TX2MIX Input 4 Volume */
+ { 0x00000710, 0x0000 }, /* R1808 (0x710) - AIF1TX3MIX Input 1 Source */
+ { 0x00000711, 0x0080 }, /* R1809 (0x711) - AIF1TX3MIX Input 1 Volume */
+ { 0x00000712, 0x0000 }, /* R1810 (0x712) - AIF1TX3MIX Input 2 Source */
+ { 0x00000713, 0x0080 }, /* R1811 (0x713) - AIF1TX3MIX Input 2 Volume */
+ { 0x00000714, 0x0000 }, /* R1812 (0x714) - AIF1TX3MIX Input 3 Source */
+ { 0x00000715, 0x0080 }, /* R1813 (0x715) - AIF1TX3MIX Input 3 Volume */
+ { 0x00000716, 0x0000 }, /* R1814 (0x716) - AIF1TX3MIX Input 4 Source */
+ { 0x00000717, 0x0080 }, /* R1815 (0x717) - AIF1TX3MIX Input 4 Volume */
+ { 0x00000718, 0x0000 }, /* R1816 (0x718) - AIF1TX4MIX Input 1 Source */
+ { 0x00000719, 0x0080 }, /* R1817 (0x719) - AIF1TX4MIX Input 1 Volume */
+ { 0x0000071a, 0x0000 }, /* R1818 (0x71a) - AIF1TX4MIX Input 2 Source */
+ { 0x0000071b, 0x0080 }, /* R1819 (0x71b) - AIF1TX4MIX Input 2 Volume */
+ { 0x0000071c, 0x0000 }, /* R1820 (0x71c) - AIF1TX4MIX Input 3 Source */
+ { 0x0000071d, 0x0080 }, /* R1821 (0x71d) - AIF1TX4MIX Input 3 Volume */
+ { 0x0000071e, 0x0000 }, /* R1822 (0x71e) - AIF1TX4MIX Input 4 Source */
+ { 0x0000071f, 0x0080 }, /* R1823 (0x71f) - AIF1TX4MIX Input 4 Volume */
+ { 0x00000720, 0x0000 }, /* R1824 (0x720) - AIF1TX5MIX Input 1 Source */
+ { 0x00000721, 0x0080 }, /* R1825 (0x721) - AIF1TX5MIX Input 1 Volume */
+ { 0x00000722, 0x0000 }, /* R1826 (0x722) - AIF1TX5MIX Input 2 Source */
+ { 0x00000723, 0x0080 }, /* R1827 (0x723) - AIF1TX5MIX Input 2 Volume */
+ { 0x00000724, 0x0000 }, /* R1828 (0x724) - AIF1TX5MIX Input 3 Source */
+ { 0x00000725, 0x0080 }, /* R1829 (0x725) - AIF1TX5MIX Input 3 Volume */
+ { 0x00000726, 0x0000 }, /* R1830 (0x726) - AIF1TX5MIX Input 4 Source */
+ { 0x00000727, 0x0080 }, /* R1831 (0x727) - AIF1TX5MIX Input 4 Volume */
+ { 0x00000728, 0x0000 }, /* R1832 (0x728) - AIF1TX6MIX Input 1 Source */
+ { 0x00000729, 0x0080 }, /* R1833 (0x729) - AIF1TX6MIX Input 1 Volume */
+ { 0x0000072a, 0x0000 }, /* R1834 (0x72a) - AIF1TX6MIX Input 2 Source */
+ { 0x0000072b, 0x0080 }, /* R1835 (0x72b) - AIF1TX6MIX Input 2 Volume */
+ { 0x0000072c, 0x0000 }, /* R1836 (0x72c) - AIF1TX6MIX Input 3 Source */
+ { 0x0000072d, 0x0080 }, /* R1837 (0x72d) - AIF1TX6MIX Input 3 Volume */
+ { 0x0000072e, 0x0000 }, /* R1838 (0x72e) - AIF1TX6MIX Input 4 Source */
+ { 0x0000072f, 0x0080 }, /* R1839 (0x72f) - AIF1TX6MIX Input 4 Volume */
+ { 0x00000730, 0x0000 }, /* R1840 (0x730) - AIF1TX7MIX Input 1 Source */
+ { 0x00000731, 0x0080 }, /* R1841 (0x731) - AIF1TX7MIX Input 1 Volume */
+ { 0x00000732, 0x0000 }, /* R1842 (0x732) - AIF1TX7MIX Input 2 Source */
+ { 0x00000733, 0x0080 }, /* R1843 (0x733) - AIF1TX7MIX Input 2 Volume */
+ { 0x00000734, 0x0000 }, /* R1844 (0x734) - AIF1TX7MIX Input 3 Source */
+ { 0x00000735, 0x0080 }, /* R1845 (0x735) - AIF1TX7MIX Input 3 Volume */
+ { 0x00000736, 0x0000 }, /* R1846 (0x736) - AIF1TX7MIX Input 4 Source */
+ { 0x00000737, 0x0080 }, /* R1847 (0x737) - AIF1TX7MIX Input 4 Volume */
+ { 0x00000738, 0x0000 }, /* R1848 (0x738) - AIF1TX8MIX Input 1 Source */
+ { 0x00000739, 0x0080 }, /* R1849 (0x739) - AIF1TX8MIX Input 1 Volume */
+ { 0x0000073a, 0x0000 }, /* R1850 (0x73a) - AIF1TX8MIX Input 2 Source */
+ { 0x0000073b, 0x0080 }, /* R1851 (0x73b) - AIF1TX8MIX Input 2 Volume */
+ { 0x0000073c, 0x0000 }, /* R1852 (0x73c) - AIF1TX8MIX Input 3 Source */
+ { 0x0000073d, 0x0080 }, /* R1853 (0x73d) - AIF1TX8MIX Input 3 Volume */
+ { 0x0000073e, 0x0000 }, /* R1854 (0x73e) - AIF1TX8MIX Input 4 Source */
+ { 0x0000073f, 0x0080 }, /* R1855 (0x73f) - AIF1TX8MIX Input 4 Volume */
+ { 0x00000740, 0x0000 }, /* R1856 (0x740) - AIF2TX1MIX Input 1 Source */
+ { 0x00000741, 0x0080 }, /* R1857 (0x741) - AIF2TX1MIX Input 1 Volume */
+ { 0x00000742, 0x0000 }, /* R1858 (0x742) - AIF2TX1MIX Input 2 Source */
+ { 0x00000743, 0x0080 }, /* R1859 (0x743) - AIF2TX1MIX Input 2 Volume */
+ { 0x00000744, 0x0000 }, /* R1860 (0x744) - AIF2TX1MIX Input 3 Source */
+ { 0x00000745, 0x0080 }, /* R1861 (0x745) - AIF2TX1MIX Input 3 Volume */
+ { 0x00000746, 0x0000 }, /* R1862 (0x746) - AIF2TX1MIX Input 4 Source */
+ { 0x00000747, 0x0080 }, /* R1863 (0x747) - AIF2TX1MIX Input 4 Volume */
+ { 0x00000748, 0x0000 }, /* R1864 (0x748) - AIF2TX2MIX Input 1 Source */
+ { 0x00000749, 0x0080 }, /* R1865 (0x749) - AIF2TX2MIX Input 1 Volume */
+ { 0x0000074a, 0x0000 }, /* R1866 (0x74a) - AIF2TX2MIX Input 2 Source */
+ { 0x0000074b, 0x0080 }, /* R1867 (0x74b) - AIF2TX2MIX Input 2 Volume */
+ { 0x0000074c, 0x0000 }, /* R1868 (0x74c) - AIF2TX2MIX Input 3 Source */
+ { 0x0000074d, 0x0080 }, /* R1869 (0x74d) - AIF2TX2MIX Input 3 Volume */
+ { 0x0000074e, 0x0000 }, /* R1870 (0x74e) - AIF2TX2MIX Input 4 Source */
+ { 0x0000074f, 0x0080 }, /* R1871 (0x74f) - AIF2TX2MIX Input 4 Volume */
+ { 0x00000750, 0x0000 }, /* R1872 (0x750) - AIF2TX3MIX Input 1 Source */
+ { 0x00000751, 0x0080 }, /* R1873 (0x751) - AIF2TX3MIX Input 1 Volume */
+ { 0x00000752, 0x0000 }, /* R1874 (0x752) - AIF2TX3MIX Input 2 Source */
+ { 0x00000753, 0x0080 }, /* R1875 (0x753) - AIF2TX3MIX Input 2 Volume */
+ { 0x00000754, 0x0000 }, /* R1876 (0x754) - AIF2TX3MIX Input 3 Source */
+ { 0x00000755, 0x0080 }, /* R1877 (0x755) - AIF2TX3MIX Input 3 Volume */
+ { 0x00000756, 0x0000 }, /* R1878 (0x756) - AIF2TX3MIX Input 4 Source */
+ { 0x00000757, 0x0080 }, /* R1879 (0x757) - AIF2TX3MIX Input 4 Volume */
+ { 0x00000758, 0x0000 }, /* R1880 (0x758) - AIF2TX4MIX Input 1 Source */
+ { 0x00000759, 0x0080 }, /* R1881 (0x759) - AIF2TX4MIX Input 1 Volume */
+ { 0x0000075a, 0x0000 }, /* R1882 (0x75a) - AIF2TX4MIX Input 2 Source */
+ { 0x0000075b, 0x0080 }, /* R1883 (0x75b) - AIF2TX4MIX Input 2 Volume */
+ { 0x0000075c, 0x0000 }, /* R1884 (0x75c) - AIF2TX4MIX Input 3 Source */
+ { 0x0000075d, 0x0080 }, /* R1885 (0x75d) - AIF2TX4MIX Input 3 Volume */
+ { 0x0000075e, 0x0000 }, /* R1886 (0x75e) - AIF2TX4MIX Input 4 Source */
+ { 0x0000075f, 0x0080 }, /* R1887 (0x75f) - AIF2TX4MIX Input 4 Volume */
+ { 0x00000760, 0x0000 }, /* R1888 (0x760) - AIF2TX5MIX Input 1 Source */
+ { 0x00000761, 0x0080 }, /* R1889 (0x761) - AIF2TX5MIX Input 1 Volume */
+ { 0x00000762, 0x0000 }, /* R1890 (0x762) - AIF2TX5MIX Input 2 Source */
+ { 0x00000763, 0x0080 }, /* R1891 (0x763) - AIF2TX5MIX Input 2 Volume */
+ { 0x00000764, 0x0000 }, /* R1892 (0x764) - AIF2TX5MIX Input 3 Source */
+ { 0x00000765, 0x0080 }, /* R1893 (0x765) - AIF2TX5MIX Input 3 Volume */
+ { 0x00000766, 0x0000 }, /* R1894 (0x766) - AIF2TX5MIX Input 4 Source */
+ { 0x00000767, 0x0080 }, /* R1895 (0x767) - AIF2TX5MIX Input 4 Volume */
+ { 0x00000768, 0x0000 }, /* R1896 (0x768) - AIF2TX6MIX Input 1 Source */
+ { 0x00000769, 0x0080 }, /* R1897 (0x769) - AIF2TX6MIX Input 1 Volume */
+ { 0x0000076a, 0x0000 }, /* R1898 (0x76a) - AIF2TX6MIX Input 2 Source */
+ { 0x0000076b, 0x0080 }, /* R1899 (0x76b) - AIF2TX6MIX Input 2 Volume */
+ { 0x0000076c, 0x0000 }, /* R1900 (0x76c) - AIF2TX6MIX Input 3 Source */
+ { 0x0000076d, 0x0080 }, /* R1901 (0x76d) - AIF2TX6MIX Input 3 Volume */
+ { 0x0000076e, 0x0000 }, /* R1902 (0x76e) - AIF2TX6MIX Input 4 Source */
+ { 0x0000076f, 0x0080 }, /* R1903 (0x76f) - AIF2TX6MIX Input 4 Volume */
+ { 0x00000770, 0x0000 }, /* R1904 (0x770) - AIF2TX7MIX Input 1 Source */
+ { 0x00000771, 0x0080 }, /* R1905 (0x771) - AIF2TX7MIX Input 1 Volume */
+ { 0x00000772, 0x0000 }, /* R1906 (0x772) - AIF2TX7MIX Input 2 Source */
+ { 0x00000773, 0x0080 }, /* R1907 (0x773) - AIF2TX7MIX Input 2 Volume */
+ { 0x00000774, 0x0000 }, /* R1908 (0x774) - AIF2TX7MIX Input 3 Source */
+ { 0x00000775, 0x0080 }, /* R1909 (0x775) - AIF2TX7MIX Input 3 Volume */
+ { 0x00000776, 0x0000 }, /* R1910 (0x776) - AIF2TX7MIX Input 4 Source */
+ { 0x00000777, 0x0080 }, /* R1911 (0x777) - AIF2TX7MIX Input 4 Volume */
+ { 0x00000778, 0x0000 }, /* R1912 (0x778) - AIF2TX8MIX Input 1 Source */
+ { 0x00000779, 0x0080 }, /* R1913 (0x779) - AIF2TX8MIX Input 1 Volume */
+ { 0x0000077a, 0x0000 }, /* R1914 (0x77a) - AIF2TX8MIX Input 2 Source */
+ { 0x0000077b, 0x0080 }, /* R1915 (0x77b) - AIF2TX8MIX Input 2 Volume */
+ { 0x0000077c, 0x0000 }, /* R1916 (0x77c) - AIF2TX8MIX Input 3 Source */
+ { 0x0000077d, 0x0080 }, /* R1917 (0x77d) - AIF2TX8MIX Input 3 Volume */
+ { 0x0000077e, 0x0000 }, /* R1918 (0x77e) - AIF2TX8MIX Input 4 Source */
+ { 0x0000077f, 0x0080 }, /* R1919 (0x77f) - AIF2TX8MIX Input 4 Volume */
+ { 0x00000780, 0x0000 }, /* R1920 (0x780) - AIF3TX1MIX Input 1 Source */
+ { 0x00000781, 0x0080 }, /* R1921 (0x781) - AIF3TX1MIX Input 1 Volume */
+ { 0x00000782, 0x0000 }, /* R1922 (0x782) - AIF3TX1MIX Input 2 Source */
+ { 0x00000783, 0x0080 }, /* R1923 (0x783) - AIF3TX1MIX Input 2 Volume */
+ { 0x00000784, 0x0000 }, /* R1924 (0x784) - AIF3TX1MIX Input 3 Source */
+ { 0x00000785, 0x0080 }, /* R1925 (0x785) - AIF3TX1MIX Input 3 Volume */
+ { 0x00000786, 0x0000 }, /* R1926 (0x786) - AIF3TX1MIX Input 4 Source */
+ { 0x00000787, 0x0080 }, /* R1927 (0x787) - AIF3TX1MIX Input 4 Volume */
+ { 0x00000788, 0x0000 }, /* R1928 (0x788) - AIF3TX2MIX Input 1 Source */
+ { 0x00000789, 0x0080 }, /* R1929 (0x789) - AIF3TX2MIX Input 1 Volume */
+ { 0x0000078a, 0x0000 }, /* R1930 (0x78a) - AIF3TX2MIX Input 2 Source */
+ { 0x0000078b, 0x0080 }, /* R1931 (0x78b) - AIF3TX2MIX Input 2 Volume */
+ { 0x0000078c, 0x0000 }, /* R1932 (0x78c) - AIF3TX2MIX Input 3 Source */
+ { 0x0000078d, 0x0080 }, /* R1933 (0x78d) - AIF3TX2MIX Input 3 Volume */
+ { 0x0000078e, 0x0000 }, /* R1934 (0x78e) - AIF3TX2MIX Input 4 Source */
+ { 0x0000078f, 0x0080 }, /* R1935 (0x78f) - AIF3TX2MIX Input 4 Volume */
+ { 0x000007a0, 0x0000 }, /* R1952 (0x7a0) - AIF4TX1MIX Input 1 Source */
+ { 0x000007a1, 0x0080 }, /* R1953 (0x7a1) - AIF4TX1MIX Input 1 Volume */
+ { 0x000007a2, 0x0000 }, /* R1954 (0x7a2) - AIF4TX1MIX Input 2 Source */
+ { 0x000007a3, 0x0080 }, /* R1955 (0x7a3) - AIF4TX1MIX Input 2 Volume */
+ { 0x000007a4, 0x0000 }, /* R1956 (0x7a4) - AIF4TX1MIX Input 3 Source */
+ { 0x000007a5, 0x0080 }, /* R1957 (0x7a5) - AIF4TX1MIX Input 3 Volume */
+ { 0x000007a6, 0x0000 }, /* R1958 (0x7a6) - AIF4TX1MIX Input 4 Source */
+ { 0x000007a7, 0x0080 }, /* R1959 (0x7a7) - AIF4TX1MIX Input 4 Volume */
+ { 0x000007a8, 0x0000 }, /* R1960 (0x7a8) - AIF4TX2MIX Input 1 Source */
+ { 0x000007a9, 0x0080 }, /* R1961 (0x7a9) - AIF4TX2MIX Input 1 Volume */
+ { 0x000007aa, 0x0000 }, /* R1962 (0x7aa) - AIF4TX2MIX Input 2 Source */
+ { 0x000007ab, 0x0080 }, /* R1963 (0x7ab) - AIF4TX2MIX Input 2 Volume */
+ { 0x000007ac, 0x0000 }, /* R1964 (0x7ac) - AIF4TX2MIX Input 3 Source */
+ { 0x000007ad, 0x0080 }, /* R1965 (0x7ad) - AIF4TX2MIX Input 3 Volume */
+ { 0x000007ae, 0x0000 }, /* R1966 (0x7ae) - AIF4TX2MIX Input 4 Source */
+ { 0x000007af, 0x0080 }, /* R1967 (0x7af) - AIF4TX2MIX Input 4 Volume */
+ { 0x000007c0, 0x0000 }, /* R1984 (0x7c0) - SLIMTX1MIX Input 1 Source */
+ { 0x000007c1, 0x0080 }, /* R1985 (0x7c1) - SLIMTX1MIX Input 1 Volume */
+ { 0x000007c2, 0x0000 }, /* R1986 (0x7c2) - SLIMTX1MIX Input 2 Source */
+ { 0x000007c3, 0x0080 }, /* R1987 (0x7c3) - SLIMTX1MIX Input 2 Volume */
+ { 0x000007c4, 0x0000 }, /* R1988 (0x7c4) - SLIMTX1MIX Input 3 Source */
+ { 0x000007c5, 0x0080 }, /* R1989 (0x7c5) - SLIMTX1MIX Input 3 Volume */
+ { 0x000007c6, 0x0000 }, /* R1990 (0x7c6) - SLIMTX1MIX Input 4 Source */
+ { 0x000007c7, 0x0080 }, /* R1991 (0x7c7) - SLIMTX1MIX Input 4 Volume */
+ { 0x000007c8, 0x0000 }, /* R1992 (0x7c8) - SLIMTX2MIX Input 1 Source */
+ { 0x000007c9, 0x0080 }, /* R1993 (0x7c9) - SLIMTX2MIX Input 1 Volume */
+ { 0x000007ca, 0x0000 }, /* R1994 (0x7ca) - SLIMTX2MIX Input 2 Source */
+ { 0x000007cb, 0x0080 }, /* R1995 (0x7cb) - SLIMTX2MIX Input 2 Volume */
+ { 0x000007cc, 0x0000 }, /* R1996 (0x7cc) - SLIMTX2MIX Input 3 Source */
+ { 0x000007cd, 0x0080 }, /* R1997 (0x7cd) - SLIMTX2MIX Input 3 Volume */
+ { 0x000007ce, 0x0000 }, /* R1998 (0x7ce) - SLIMTX2MIX Input 4 Source */
+ { 0x000007cf, 0x0080 }, /* R1999 (0x7cf) - SLIMTX2MIX Input 4 Volume */
+ { 0x000007d0, 0x0000 }, /* R2000 (0x7d0) - SLIMTX3MIX Input 1 Source */
+ { 0x000007d1, 0x0080 }, /* R2001 (0x7d1) - SLIMTX3MIX Input 1 Volume */
+ { 0x000007d2, 0x0000 }, /* R2002 (0x7d2) - SLIMTX3MIX Input 2 Source */
+ { 0x000007d3, 0x0080 }, /* R2003 (0x7d3) - SLIMTX3MIX Input 2 Volume */
+ { 0x000007d4, 0x0000 }, /* R2004 (0x7d4) - SLIMTX3MIX Input 3 Source */
+ { 0x000007d5, 0x0080 }, /* R2005 (0x7d5) - SLIMTX3MIX Input 3 Volume */
+ { 0x000007d6, 0x0000 }, /* R2006 (0x7d6) - SLIMTX3MIX Input 4 Source */
+ { 0x000007d7, 0x0080 }, /* R2007 (0x7d7) - SLIMTX3MIX Input 4 Volume */
+ { 0x000007d8, 0x0000 }, /* R2008 (0x7d8) - SLIMTX4MIX Input 1 Source */
+ { 0x000007d9, 0x0080 }, /* R2009 (0x7d9) - SLIMTX4MIX Input 1 Volume */
+ { 0x000007da, 0x0000 }, /* R2010 (0x7da) - SLIMTX4MIX Input 2 Source */
+ { 0x000007db, 0x0080 }, /* R2011 (0x7db) - SLIMTX4MIX Input 2 Volume */
+ { 0x000007dc, 0x0000 }, /* R2012 (0x7dc) - SLIMTX4MIX Input 3 Source */
+ { 0x000007dd, 0x0080 }, /* R2013 (0x7dd) - SLIMTX4MIX Input 3 Volume */
+ { 0x000007de, 0x0000 }, /* R2014 (0x7de) - SLIMTX4MIX Input 4 Source */
+ { 0x000007df, 0x0080 }, /* R2015 (0x7df) - SLIMTX4MIX Input 4 Volume */
+ { 0x000007e0, 0x0000 }, /* R2016 (0x7e0) - SLIMTX5MIX Input 1 Source */
+ { 0x000007e1, 0x0080 }, /* R2017 (0x7e1) - SLIMTX5MIX Input 1 Volume */
+ { 0x000007e2, 0x0000 }, /* R2018 (0x7e2) - SLIMTX5MIX Input 2 Source */
+ { 0x000007e3, 0x0080 }, /* R2019 (0x7e3) - SLIMTX5MIX Input 2 Volume */
+ { 0x000007e4, 0x0000 }, /* R2020 (0x7e4) - SLIMTX5MIX Input 3 Source */
+ { 0x000007e5, 0x0080 }, /* R2021 (0x7e5) - SLIMTX5MIX Input 3 Volume */
+ { 0x000007e6, 0x0000 }, /* R2022 (0x7e6) - SLIMTX5MIX Input 4 Source */
+ { 0x000007e7, 0x0080 }, /* R2023 (0x7e7) - SLIMTX5MIX Input 4 Volume */
+ { 0x000007e8, 0x0000 }, /* R2024 (0x7e8) - SLIMTX6MIX Input 1 Source */
+ { 0x000007e9, 0x0080 }, /* R2025 (0x7e9) - SLIMTX6MIX Input 1 Volume */
+ { 0x000007ea, 0x0000 }, /* R2026 (0x7ea) - SLIMTX6MIX Input 2 Source */
+ { 0x000007eb, 0x0080 }, /* R2027 (0x7eb) - SLIMTX6MIX Input 2 Volume */
+ { 0x000007ec, 0x0000 }, /* R2028 (0x7ec) - SLIMTX6MIX Input 3 Source */
+ { 0x000007ed, 0x0080 }, /* R2029 (0x7ed) - SLIMTX6MIX Input 3 Volume */
+ { 0x000007ee, 0x0000 }, /* R2030 (0x7ee) - SLIMTX6MIX Input 4 Source */
+ { 0x000007ef, 0x0080 }, /* R2031 (0x7ef) - SLIMTX6MIX Input 4 Volume */
+ { 0x000007f0, 0x0000 }, /* R2032 (0x7f0) - SLIMTX7MIX Input 1 Source */
+ { 0x000007f1, 0x0080 }, /* R2033 (0x7f1) - SLIMTX7MIX Input 1 Volume */
+ { 0x000007f2, 0x0000 }, /* R2034 (0x7f2) - SLIMTX7MIX Input 2 Source */
+ { 0x000007f3, 0x0080 }, /* R2035 (0x7f3) - SLIMTX7MIX Input 2 Volume */
+ { 0x000007f4, 0x0000 }, /* R2036 (0x7f4) - SLIMTX7MIX Input 3 Source */
+ { 0x000007f5, 0x0080 }, /* R2037 (0x7f5) - SLIMTX7MIX Input 3 Volume */
+ { 0x000007f6, 0x0000 }, /* R2038 (0x7f6) - SLIMTX7MIX Input 4 Source */
+ { 0x000007f7, 0x0080 }, /* R2039 (0x7f7) - SLIMTX7MIX Input 4 Volume */
+ { 0x000007f8, 0x0000 }, /* R2040 (0x7f8) - SLIMTX8MIX Input 1 Source */
+ { 0x000007f9, 0x0080 }, /* R2041 (0x7f9) - SLIMTX8MIX Input 1 Volume */
+ { 0x000007fa, 0x0000 }, /* R2042 (0x7fa) - SLIMTX8MIX Input 2 Source */
+ { 0x000007fb, 0x0080 }, /* R2043 (0x7fb) - SLIMTX8MIX Input 2 Volume */
+ { 0x000007fc, 0x0000 }, /* R2044 (0x7fc) - SLIMTX8MIX Input 3 Source */
+ { 0x000007fd, 0x0080 }, /* R2045 (0x7fd) - SLIMTX8MIX Input 3 Volume */
+ { 0x000007fe, 0x0000 }, /* R2046 (0x7fe) - SLIMTX8MIX Input 4 Source */
+ { 0x000007ff, 0x0080 }, /* R2047 (0x7ff) - SLIMTX8MIX Input 4 Volume */
+ { 0x00000800, 0x0000 }, /* R2048 (0x800) - SPDIF1TX1MIX Input 1 Source */
+ { 0x00000801, 0x0080 }, /* R2049 (0x801) - SPDIF1TX1MIX Input 1 Volume */
+ { 0x00000808, 0x0000 }, /* R2056 (0x808) - SPDIF1TX2MIX Input 1 Source */
+ { 0x00000809, 0x0080 }, /* R2057 (0x809) - SPDIF1TX2MIX Input 1 Volume */
+ { 0x00000880, 0x0000 }, /* R2176 (0x880) - EQ1MIX Input 1 Source */
+ { 0x00000881, 0x0080 }, /* R2177 (0x881) - EQ1MIX Input 1 Volume */
+ { 0x00000882, 0x0000 }, /* R2178 (0x882) - EQ1MIX Input 2 Source */
+ { 0x00000883, 0x0080 }, /* R2179 (0x883) - EQ1MIX Input 2 Volume */
+ { 0x00000884, 0x0000 }, /* R2180 (0x884) - EQ1MIX Input 3 Source */
+ { 0x00000885, 0x0080 }, /* R2181 (0x885) - EQ1MIX Input 3 Volume */
+ { 0x00000886, 0x0000 }, /* R2182 (0x886) - EQ1MIX Input 4 Source */
+ { 0x00000887, 0x0080 }, /* R2183 (0x887) - EQ1MIX Input 4 Volume */
+ { 0x00000888, 0x0000 }, /* R2184 (0x888) - EQ2MIX Input 1 Source */
+ { 0x00000889, 0x0080 }, /* R2185 (0x889) - EQ2MIX Input 1 Volume */
+ { 0x0000088a, 0x0000 }, /* R2186 (0x88a) - EQ2MIX Input 2 Source */
+ { 0x0000088b, 0x0080 }, /* R2187 (0x88b) - EQ2MIX Input 2 Volume */
+ { 0x0000088c, 0x0000 }, /* R2188 (0x88c) - EQ2MIX Input 3 Source */
+ { 0x0000088d, 0x0080 }, /* R2189 (0x88d) - EQ2MIX Input 3 Volume */
+ { 0x0000088e, 0x0000 }, /* R2190 (0x88e) - EQ2MIX Input 4 Source */
+ { 0x0000088f, 0x0080 }, /* R2191 (0x88f) - EQ2MIX Input 4 Volume */
+ { 0x00000890, 0x0000 }, /* R2192 (0x890) - EQ3MIX Input 1 Source */
+ { 0x00000891, 0x0080 }, /* R2193 (0x891) - EQ3MIX Input 1 Volume */
+ { 0x00000892, 0x0000 }, /* R2194 (0x892) - EQ3MIX Input 2 Source */
+ { 0x00000893, 0x0080 }, /* R2195 (0x893) - EQ3MIX Input 2 Volume */
+ { 0x00000894, 0x0000 }, /* R2196 (0x894) - EQ3MIX Input 3 Source */
+ { 0x00000895, 0x0080 }, /* R2197 (0x895) - EQ3MIX Input 3 Volume */
+ { 0x00000896, 0x0000 }, /* R2198 (0x896) - EQ3MIX Input 4 Source */
+ { 0x00000897, 0x0080 }, /* R2199 (0x897) - EQ3MIX Input 4 Volume */
+ { 0x00000898, 0x0000 }, /* R2200 (0x898) - EQ4MIX Input 1 Source */
+ { 0x00000899, 0x0080 }, /* R2201 (0x899) - EQ4MIX Input 1 Volume */
+ { 0x0000089a, 0x0000 }, /* R2202 (0x89a) - EQ4MIX Input 2 Source */
+ { 0x0000089b, 0x0080 }, /* R2203 (0x89b) - EQ4MIX Input 2 Volume */
+ { 0x0000089c, 0x0000 }, /* R2204 (0x89c) - EQ4MIX Input 3 Source */
+ { 0x0000089d, 0x0080 }, /* R2205 (0x89d) - EQ4MIX Input 3 Volume */
+ { 0x0000089e, 0x0000 }, /* R2206 (0x89e) - EQ4MIX Input 4 Source */
+ { 0x0000089f, 0x0080 }, /* R2207 (0x89f) - EQ4MIX Input 4 Volume */
+ { 0x000008c0, 0x0000 }, /* R2240 (0x8c0) - DRC1LMIX Input 1 Source */
+ { 0x000008c1, 0x0080 }, /* R2241 (0x8c1) - DRC1LMIX Input 1 Volume */
+ { 0x000008c2, 0x0000 }, /* R2242 (0x8c2) - DRC1LMIX Input 2 Source */
+ { 0x000008c3, 0x0080 }, /* R2243 (0x8c3) - DRC1LMIX Input 2 Volume */
+ { 0x000008c4, 0x0000 }, /* R2244 (0x8c4) - DRC1LMIX Input 3 Source */
+ { 0x000008c5, 0x0080 }, /* R2245 (0x8c5) - DRC1LMIX Input 3 Volume */
+ { 0x000008c6, 0x0000 }, /* R2246 (0x8c6) - DRC1LMIX Input 4 Source */
+ { 0x000008c7, 0x0080 }, /* R2247 (0x8c7) - DRC1LMIX Input 4 Volume */
+ { 0x000008c8, 0x0000 }, /* R2248 (0x8c8) - DRC1RMIX Input 1 Source */
+ { 0x000008c9, 0x0080 }, /* R2249 (0x8c9) - DRC1RMIX Input 1 Volume */
+ { 0x000008ca, 0x0000 }, /* R2250 (0x8ca) - DRC1RMIX Input 2 Source */
+ { 0x000008cb, 0x0080 }, /* R2251 (0x8cb) - DRC1RMIX Input 2 Volume */
+ { 0x000008cc, 0x0000 }, /* R2252 (0x8cc) - DRC1RMIX Input 3 Source */
+ { 0x000008cd, 0x0080 }, /* R2253 (0x8cd) - DRC1RMIX Input 3 Volume */
+ { 0x000008ce, 0x0000 }, /* R2254 (0x8ce) - DRC1RMIX Input 4 Source */
+ { 0x000008cf, 0x0080 }, /* R2255 (0x8cf) - DRC1RMIX Input 4 Volume */
+ { 0x000008d0, 0x0000 }, /* R2256 (0x8d0) - DRC2LMIX Input 1 Source */
+ { 0x000008d1, 0x0080 }, /* R2257 (0x8d1) - DRC2LMIX Input 1 Volume */
+ { 0x000008d2, 0x0000 }, /* R2258 (0x8d2) - DRC2LMIX Input 2 Source */
+ { 0x000008d3, 0x0080 }, /* R2259 (0x8d3) - DRC2LMIX Input 2 Volume */
+ { 0x000008d4, 0x0000 }, /* R2260 (0x8d4) - DRC2LMIX Input 3 Source */
+ { 0x000008d5, 0x0080 }, /* R2261 (0x8d5) - DRC2LMIX Input 3 Volume */
+ { 0x000008d6, 0x0000 }, /* R2262 (0x8d6) - DRC2LMIX Input 4 Source */
+ { 0x000008d7, 0x0080 }, /* R2263 (0x8d7) - DRC2LMIX Input 4 Volume */
+ { 0x000008d8, 0x0000 }, /* R2264 (0x8d8) - DRC2RMIX Input 1 Source */
+ { 0x000008d9, 0x0080 }, /* R2265 (0x8d9) - DRC2RMIX Input 1 Volume */
+ { 0x000008da, 0x0000 }, /* R2266 (0x8da) - DRC2RMIX Input 2 Source */
+ { 0x000008db, 0x0080 }, /* R2267 (0x8db) - DRC2RMIX Input 2 Volume */
+ { 0x000008dc, 0x0000 }, /* R2268 (0x8dc) - DRC2RMIX Input 3 Source */
+ { 0x000008dd, 0x0080 }, /* R2269 (0x8dd) - DRC2RMIX Input 3 Volume */
+ { 0x000008de, 0x0000 }, /* R2270 (0x8de) - DRC2RMIX Input 4 Source */
+ { 0x000008df, 0x0080 }, /* R2271 (0x8df) - DRC2RMIX Input 4 Volume */
+ { 0x00000900, 0x0000 }, /* R2304 (0x900) - HPLP1MIX Input 1 Source */
+ { 0x00000901, 0x0080 }, /* R2305 (0x901) - HPLP1MIX Input 1 Volume */
+ { 0x00000902, 0x0000 }, /* R2306 (0x902) - HPLP1MIX Input 2 Source */
+ { 0x00000903, 0x0080 }, /* R2307 (0x903) - HPLP1MIX Input 2 Volume */
+ { 0x00000904, 0x0000 }, /* R2308 (0x904) - HPLP1MIX Input 3 Source */
+ { 0x00000905, 0x0080 }, /* R2309 (0x905) - HPLP1MIX Input 3 Volume */
+ { 0x00000906, 0x0000 }, /* R2310 (0x906) - HPLP1MIX Input 4 Source */
+ { 0x00000907, 0x0080 }, /* R2311 (0x907) - HPLP1MIX Input 4 Volume */
+ { 0x00000908, 0x0000 }, /* R2312 (0x908) - HPLP2MIX Input 1 Source */
+ { 0x00000909, 0x0080 }, /* R2313 (0x909) - HPLP2MIX Input 1 Volume */
+ { 0x0000090a, 0x0000 }, /* R2314 (0x90a) - HPLP2MIX Input 2 Source */
+ { 0x0000090b, 0x0080 }, /* R2315 (0x90b) - HPLP2MIX Input 2 Volume */
+ { 0x0000090c, 0x0000 }, /* R2316 (0x90c) - HPLP2MIX Input 3 Source */
+ { 0x0000090d, 0x0080 }, /* R2317 (0x90d) - HPLP2MIX Input 3 Volume */
+ { 0x0000090e, 0x0000 }, /* R2318 (0x90e) - HPLP2MIX Input 4 Source */
+ { 0x0000090f, 0x0080 }, /* R2319 (0x90f) - HPLP2MIX Input 4 Volume */
+ { 0x00000910, 0x0000 }, /* R2320 (0x910) - HPLP3MIX Input 1 Source */
+ { 0x00000911, 0x0080 }, /* R2321 (0x911) - HPLP3MIX Input 1 Volume */
+ { 0x00000912, 0x0000 }, /* R2322 (0x912) - HPLP3MIX Input 2 Source */
+ { 0x00000913, 0x0080 }, /* R2323 (0x913) - HPLP3MIX Input 2 Volume */
+ { 0x00000914, 0x0000 }, /* R2324 (0x914) - HPLP3MIX Input 3 Source */
+ { 0x00000915, 0x0080 }, /* R2325 (0x915) - HPLP3MIX Input 3 Volume */
+ { 0x00000916, 0x0000 }, /* R2326 (0x916) - HPLP3MIX Input 4 Source */
+ { 0x00000917, 0x0080 }, /* R2327 (0x917) - HPLP3MIX Input 4 Volume */
+ { 0x00000918, 0x0000 }, /* R2328 (0x918) - HPLP4MIX Input 1 Source */
+ { 0x00000919, 0x0080 }, /* R2329 (0x919) - HPLP4MIX Input 1 Volume */
+ { 0x0000091a, 0x0000 }, /* R2330 (0x91a) - HPLP4MIX Input 2 Source */
+ { 0x0000091b, 0x0080 }, /* R2331 (0x91b) - HPLP4MIX Input 2 Volume */
+ { 0x0000091c, 0x0000 }, /* R2332 (0x91c) - HPLP4MIX Input 3 Source */
+ { 0x0000091d, 0x0080 }, /* R2333 (0x91d) - HPLP4MIX Input 3 Volume */
+ { 0x0000091e, 0x0000 }, /* R2334 (0x91e) - HPLP4MIX Input 4 Source */
+ { 0x0000091f, 0x0080 }, /* R2335 (0x91f) - HPLP4MIX Input 4 Volume */
+ { 0x00000940, 0x0000 }, /* R2368 (0x940) - DSP1LMIX Input 1 Source */
+ { 0x00000941, 0x0080 }, /* R2369 (0x941) - DSP1LMIX Input 1 Volume */
+ { 0x00000942, 0x0000 }, /* R2370 (0x942) - DSP1LMIX Input 2 Source */
+ { 0x00000943, 0x0080 }, /* R2371 (0x943) - DSP1LMIX Input 2 Volume */
+ { 0x00000944, 0x0000 }, /* R2372 (0x944) - DSP1LMIX Input 3 Source */
+ { 0x00000945, 0x0080 }, /* R2373 (0x945) - DSP1LMIX Input 3 Volume */
+ { 0x00000946, 0x0000 }, /* R2374 (0x946) - DSP1LMIX Input 4 Source */
+ { 0x00000947, 0x0080 }, /* R2375 (0x947) - DSP1LMIX Input 4 Volume */
+ { 0x00000948, 0x0000 }, /* R2376 (0x948) - DSP1RMIX Input 1 Source */
+ { 0x00000949, 0x0080 }, /* R2377 (0x949) - DSP1RMIX Input 1 Volume */
+ { 0x0000094a, 0x0000 }, /* R2378 (0x94a) - DSP1RMIX Input 2 Source */
+ { 0x0000094b, 0x0080 }, /* R2379 (0x94b) - DSP1RMIX Input 2 Volume */
+ { 0x0000094c, 0x0000 }, /* R2380 (0x94c) - DSP1RMIX Input 3 Source */
+ { 0x0000094d, 0x0080 }, /* R2381 (0x94d) - DSP1RMIX Input 3 Volume */
+ { 0x0000094e, 0x0000 }, /* R2382 (0x94e) - DSP1RMIX Input 4 Source */
+ { 0x0000094f, 0x0080 }, /* R2383 (0x94f) - DSP1RMIX Input 4 Volume */
+ { 0x00000950, 0x0000 }, /* R2384 (0x950) - DSP1AUX1MIX Input 1 Source */
+ { 0x00000958, 0x0000 }, /* R2392 (0x958) - DSP1AUX2MIX Input 1 Source */
+ { 0x00000960, 0x0000 }, /* R2400 (0x960) - DSP1AUX3MIX Input 1 Source */
+ { 0x00000968, 0x0000 }, /* R2408 (0x968) - DSP1AUX4MIX Input 1 Source */
+ { 0x00000970, 0x0000 }, /* R2416 (0x970) - DSP1AUX5MIX Input 1 Source */
+ { 0x00000978, 0x0000 }, /* R2424 (0x978) - DSP1AUX6MIX Input 1 Source */
+ { 0x00000980, 0x0000 }, /* R2432 (0x980) - DSP2LMIX Input 1 Source */
+ { 0x00000981, 0x0080 }, /* R2433 (0x981) - DSP2LMIX Input 1 Volume */
+ { 0x00000982, 0x0000 }, /* R2434 (0x982) - DSP2LMIX Input 2 Source */
+ { 0x00000983, 0x0080 }, /* R2435 (0x983) - DSP2LMIX Input 2 Volume */
+ { 0x00000984, 0x0000 }, /* R2436 (0x984) - DSP2LMIX Input 3 Source */
+ { 0x00000985, 0x0080 }, /* R2437 (0x985) - DSP2LMIX Input 3 Volume */
+ { 0x00000986, 0x0000 }, /* R2438 (0x986) - DSP2LMIX Input 4 Source */
+ { 0x00000987, 0x0080 }, /* R2439 (0x987) - DSP2LMIX Input 4 Volume */
+ { 0x00000988, 0x0000 }, /* R2440 (0x988) - DSP2RMIX Input 1 Source */
+ { 0x00000989, 0x0080 }, /* R2441 (0x989) - DSP2RMIX Input 1 Volume */
+ { 0x0000098a, 0x0000 }, /* R2442 (0x98a) - DSP2RMIX Input 2 Source */
+ { 0x0000098b, 0x0080 }, /* R2443 (0x98b) - DSP2RMIX Input 2 Volume */
+ { 0x0000098c, 0x0000 }, /* R2444 (0x98c) - DSP2RMIX Input 3 Source */
+ { 0x0000098d, 0x0080 }, /* R2445 (0x98d) - DSP2RMIX Input 3 Volume */
+ { 0x0000098e, 0x0000 }, /* R2446 (0x98e) - DSP2RMIX Input 4 Source */
+ { 0x0000098f, 0x0080 }, /* R2447 (0x98f) - DSP2RMIX Input 4 Volume */
+ { 0x00000990, 0x0000 }, /* R2448 (0x990) - DSP2AUX1MIX Input 1 Source */
+ { 0x00000998, 0x0000 }, /* R2456 (0x998) - DSP2AUX2MIX Input 1 Source */
+ { 0x000009a0, 0x0000 }, /* R2464 (0x9a0) - DSP2AUX3MIX Input 1 Source */
+ { 0x000009a8, 0x0000 }, /* R2472 (0x9a8) - DSP2AUX4MIX Input 1 Source */
+ { 0x000009b0, 0x0000 }, /* R2480 (0x9b0) - DSP2AUX5MIX Input 1 Source */
+ { 0x000009b8, 0x0000 }, /* R2488 (0x9b8) - DSP2AUX6MIX Input 1 Source */
+ { 0x000009c0, 0x0000 }, /* R2496 (0x9c0) - DSP3LMIX Input 1 Source */
+ { 0x000009c1, 0x0080 }, /* R2497 (0x9c1) - DSP3LMIX Input 1 Volume */
+ { 0x000009c2, 0x0000 }, /* R2498 (0x9c2) - DSP3LMIX Input 2 Source */
+ { 0x000009c3, 0x0080 }, /* R2499 (0x9c3) - DSP3LMIX Input 2 Volume */
+ { 0x000009c4, 0x0000 }, /* R2500 (0x9c4) - DSP3LMIX Input 3 Source */
+ { 0x000009c5, 0x0080 }, /* R2501 (0x9c5) - DSP3LMIX Input 3 Volume */
+ { 0x000009c6, 0x0000 }, /* R2502 (0x9c6) - DSP3LMIX Input 4 Source */
+ { 0x000009c7, 0x0080 }, /* R2503 (0x9c7) - DSP3LMIX Input 4 Volume */
+ { 0x000009c8, 0x0000 }, /* R2504 (0x9c8) - DSP3RMIX Input 1 Source */
+ { 0x000009c9, 0x0080 }, /* R2505 (0x9c9) - DSP3RMIX Input 1 Volume */
+ { 0x000009ca, 0x0000 }, /* R2506 (0x9ca) - DSP3RMIX Input 2 Source */
+ { 0x000009cb, 0x0080 }, /* R2507 (0x9cb) - DSP3RMIX Input 2 Volume */
+ { 0x000009cc, 0x0000 }, /* R2508 (0x9cc) - DSP3RMIX Input 3 Source */
+ { 0x000009cd, 0x0080 }, /* R2509 (0x9cd) - DSP3RMIX Input 3 Volume */
+ { 0x000009ce, 0x0000 }, /* R2510 (0x9ce) - DSP3RMIX Input 4 Source */
+ { 0x000009cf, 0x0080 }, /* R2511 (0x9cf) - DSP3RMIX Input 4 Volume */
+ { 0x000009d0, 0x0000 }, /* R2512 (0x9d0) - DSP3AUX1MIX Input 1 Source */
+ { 0x000009d8, 0x0000 }, /* R2520 (0x9d8) - DSP3AUX2MIX Input 1 Source */
+ { 0x000009e0, 0x0000 }, /* R2528 (0x9e0) - DSP3AUX3MIX Input 1 Source */
+ { 0x000009e8, 0x0000 }, /* R2536 (0x9e8) - DSP3AUX4MIX Input 1 Source */
+ { 0x000009f0, 0x0000 }, /* R2544 (0x9f0) - DSP3AUX5MIX Input 1 Source */
+ { 0x000009f8, 0x0000 }, /* R2552 (0x9f8) - DSP3AUX6MIX Input 1 Source */
+ { 0x00000a00, 0x0000 }, /* R2560 (0xa00) - DSP4LMIX Input 1 Source */
+ { 0x00000a01, 0x0080 }, /* R2561 (0xa01) - DSP4LMIX Input 1 Volume */
+ { 0x00000a02, 0x0000 }, /* R2562 (0xa02) - DSP4LMIX Input 2 Source */
+ { 0x00000a03, 0x0080 }, /* R2563 (0xa03) - DSP4LMIX Input 2 Volume */
+ { 0x00000a04, 0x0000 }, /* R2564 (0xa04) - DSP4LMIX Input 3 Source */
+ { 0x00000a05, 0x0080 }, /* R2565 (0xa05) - DSP4LMIX Input 3 Volume */
+ { 0x00000a06, 0x0000 }, /* R2566 (0xa06) - DSP4LMIX Input 4 Source */
+ { 0x00000a07, 0x0080 }, /* R2567 (0xa07) - DSP4LMIX Input 4 Volume */
+ { 0x00000a08, 0x0000 }, /* R2568 (0xa08) - DSP4RMIX Input 1 Source */
+ { 0x00000a09, 0x0080 }, /* R2569 (0xa09) - DSP4RMIX Input 1 Volume */
+ { 0x00000a0a, 0x0000 }, /* R2570 (0xa0a) - DSP4RMIX Input 2 Source */
+ { 0x00000a0b, 0x0080 }, /* R2571 (0xa0b) - DSP4RMIX Input 2 Volume */
+ { 0x00000a0c, 0x0000 }, /* R2572 (0xa0c) - DSP4RMIX Input 3 Source */
+ { 0x00000a0d, 0x0080 }, /* R2573 (0xa0d) - DSP4RMIX Input 3 Volume */
+ { 0x00000a0e, 0x0000 }, /* R2574 (0xa0e) - DSP4RMIX Input 4 Source */
+ { 0x00000a0f, 0x0080 }, /* R2575 (0xa0f) - DSP4RMIX Input 4 Volume */
+ { 0x00000a10, 0x0000 }, /* R2576 (0xa10) - DSP4AUX1MIX Input 1 Source */
+ { 0x00000a18, 0x0000 }, /* R2584 (0xa18) - DSP4AUX2MIX Input 1 Source */
+ { 0x00000a20, 0x0000 }, /* R2592 (0xa20) - DSP4AUX3MIX Input 1 Source */
+ { 0x00000a28, 0x0000 }, /* R2600 (0xa28) - DSP4AUX4MIX Input 1 Source */
+ { 0x00000a30, 0x0000 }, /* R2608 (0xa30) - DSP4AUX5MIX Input 1 Source */
+ { 0x00000a38, 0x0000 }, /* R2616 (0xa38) - DSP4AUX6MIX Input 1 Source */
+ { 0x00000a40, 0x0000 }, /* R2624 (0xa40) - DSP5LMIX Input 1 Source */
+ { 0x00000a41, 0x0080 }, /* R2625 (0xa41) - DSP5LMIX Input 1 Volume */
+ { 0x00000a42, 0x0000 }, /* R2626 (0xa42) - DSP5LMIX Input 2 Source */
+ { 0x00000a43, 0x0080 }, /* R2627 (0xa43) - DSP5LMIX Input 2 Volume */
+ { 0x00000a44, 0x0000 }, /* R2628 (0xa44) - DSP5LMIX Input 3 Source */
+ { 0x00000a45, 0x0080 }, /* R2629 (0xa45) - DSP5LMIX Input 3 Volume */
+ { 0x00000a46, 0x0000 }, /* R2630 (0xa46) - DSP5LMIX Input 4 Source */
+ { 0x00000a47, 0x0080 }, /* R2631 (0xa47) - DSP5LMIX Input 4 Volume */
+ { 0x00000a48, 0x0000 }, /* R2632 (0xa48) - DSP5RMIX Input 1 Source */
+ { 0x00000a49, 0x0080 }, /* R2633 (0xa49) - DSP5RMIX Input 1 Volume */
+ { 0x00000a4a, 0x0000 }, /* R2634 (0xa4a) - DSP5RMIX Input 2 Source */
+ { 0x00000a4b, 0x0080 }, /* R2635 (0xa4b) - DSP5RMIX Input 2 Volume */
+ { 0x00000a4c, 0x0000 }, /* R2636 (0xa4c) - DSP5RMIX Input 3 Source */
+ { 0x00000a4d, 0x0080 }, /* R2637 (0xa4d) - DSP5RMIX Input 3 Volume */
+ { 0x00000a4e, 0x0000 }, /* R2638 (0xa4e) - DSP5RMIX Input 4 Source */
+ { 0x00000a4f, 0x0080 }, /* R2639 (0xa4f) - DSP5RMIX Input 4 Volume */
+ { 0x00000a50, 0x0000 }, /* R2640 (0xa50) - DSP5AUX1MIX Input 1 Source */
+ { 0x00000a58, 0x0000 }, /* R2658 (0xa58) - DSP5AUX2MIX Input 1 Source */
+ { 0x00000a60, 0x0000 }, /* R2656 (0xa60) - DSP5AUX3MIX Input 1 Source */
+ { 0x00000a68, 0x0000 }, /* R2664 (0xa68) - DSP5AUX4MIX Input 1 Source */
+ { 0x00000a70, 0x0000 }, /* R2672 (0xa70) - DSP5AUX5MIX Input 1 Source */
+ { 0x00000a78, 0x0000 }, /* R2680 (0xa78) - DSP5AUX6MIX Input 1 Source */
+ { 0x00000a80, 0x0000 }, /* R2688 (0xa80) - ASRC1_1LMIX Input 1 Source */
+ { 0x00000a88, 0x0000 }, /* R2696 (0xa88) - ASRC1_1RMIX Input 1 Source */
+ { 0x00000a90, 0x0000 }, /* R2704 (0xa90) - ASRC1_2LMIX Input 1 Source */
+ { 0x00000a98, 0x0000 }, /* R2712 (0xa98) - ASRC1_2RMIX Input 1 Source */
+ { 0x00000aa0, 0x0000 }, /* R2720 (0xaa0) - ASRC2_1LMIX Input 1 Source */
+ { 0x00000aa8, 0x0000 }, /* R2728 (0xaa8) - ASRC2_1RMIX Input 1 Source */
+ { 0x00000ab0, 0x0000 }, /* R2736 (0xab0) - ASRC2_2LMIX Input 1 Source */
+ { 0x00000ab8, 0x0000 }, /* R2744 (0xab8) - ASRC2_2RMIX Input 1 Source */
+ { 0x00000b00, 0x0000 }, /* R2816 (0xb00) - ISRC1DEC1MIX Input 1 Source*/
+ { 0x00000b08, 0x0000 }, /* R2824 (0xb08) - ISRC1DEC2MIX Input 1 Source*/
+ { 0x00000b10, 0x0000 }, /* R2832 (0xb10) - ISRC1DEC3MIX Input 1 Source*/
+ { 0x00000b18, 0x0000 }, /* R2840 (0xb18) - ISRC1DEC4MIX Input 1 Source*/
+ { 0x00000b20, 0x0000 }, /* R2848 (0xb20) - ISRC1INT1MIX Input 1 Source*/
+ { 0x00000b28, 0x0000 }, /* R2856 (0xb28) - ISRC1INT2MIX Input 1 Source*/
+ { 0x00000b30, 0x0000 }, /* R2864 (0xb30) - ISRC1INT3MIX Input 1 Source*/
+ { 0x00000b38, 0x0000 }, /* R2872 (0xb38) - ISRC1INT4MIX Input 1 Source*/
+ { 0x00000b40, 0x0000 }, /* R2880 (0xb40) - ISRC2DEC1MIX Input 1 Source*/
+ { 0x00000b48, 0x0000 }, /* R2888 (0xb48) - ISRC2DEC2MIX Input 1 Source*/
+ { 0x00000b50, 0x0000 }, /* R2896 (0xb50) - ISRC2DEC3MIX Input 1 Source*/
+ { 0x00000b58, 0x0000 }, /* R2904 (0xb58) - ISRC2DEC4MIX Input 1 Source*/
+ { 0x00000b60, 0x0000 }, /* R2912 (0xb60) - ISRC2INT1MIX Input 1 Source*/
+ { 0x00000b68, 0x0000 }, /* R2920 (0xb68) - ISRC2INT2MIX Input 1 Source*/
+ { 0x00000b70, 0x0000 }, /* R2928 (0xb70) - ISRC2INT3MIX Input 1 Source*/
+ { 0x00000b78, 0x0000 }, /* R2936 (0xb78) - ISRC2INT4MIX Input 1 Source*/
+ { 0x00000b80, 0x0000 }, /* R2944 (0xb80) - ISRC3DEC1MIX Input 1 Source*/
+ { 0x00000b88, 0x0000 }, /* R2952 (0xb88) - ISRC3DEC2MIX Input 1 Source*/
+ { 0x00000ba0, 0x0000 }, /* R2976 (0xb80) - ISRC3INT1MIX Input 1 Source*/
+ { 0x00000ba8, 0x0000 }, /* R2984 (0xb88) - ISRC3INT2MIX Input 1 Source*/
+ { 0x00000bc0, 0x0000 }, /* R3008 (0xbc0) - ISRC4DEC1MIX Input 1 Source */
+ { 0x00000bc8, 0x0000 }, /* R3016 (0xbc8) - ISRC4DEC2MIX Input 1 Source */
+ { 0x00000be0, 0x0000 }, /* R3040 (0xbe0) - ISRC4INT1MIX Input 1 Source */
+ { 0x00000be8, 0x0000 }, /* R3048 (0xbe8) - ISRC4INT2MIX Input 1 Source */
+ { 0x00000c00, 0x0000 }, /* R3072 (0xc00) - DSP6LMIX Input 1 Source */
+ { 0x00000c01, 0x0080 }, /* R3073 (0xc01) - DSP6LMIX Input 1 Volume */
+ { 0x00000c02, 0x0000 }, /* R3074 (0xc02) - DSP6LMIX Input 2 Source */
+ { 0x00000c03, 0x0080 }, /* R3075 (0xc03) - DSP6LMIX Input 2 Volume */
+ { 0x00000c04, 0x0000 }, /* R3076 (0xc04) - DSP6LMIX Input 3 Source */
+ { 0x00000c05, 0x0080 }, /* R3077 (0xc05) - DSP6LMIX Input 3 Volume */
+ { 0x00000c06, 0x0000 }, /* R3078 (0xc06) - DSP6LMIX Input 4 Source */
+ { 0x00000c07, 0x0080 }, /* R3079 (0xc07) - DSP6LMIX Input 4 Volume */
+ { 0x00000c08, 0x0000 }, /* R3080 (0xc08) - DSP6RMIX Input 1 Source */
+ { 0x00000c09, 0x0080 }, /* R3081 (0xc09) - DSP6RMIX Input 1 Volume */
+ { 0x00000c0a, 0x0000 }, /* R3082 (0xc0a) - DSP6RMIX Input 2 Source */
+ { 0x00000c0b, 0x0080 }, /* R3083 (0xc0b) - DSP6RMIX Input 2 Volume */
+ { 0x00000c0c, 0x0000 }, /* R3084 (0xc0c) - DSP6RMIX Input 3 Source */
+ { 0x00000c0d, 0x0080 }, /* R3085 (0xc0d) - DSP6RMIX Input 3 Volume */
+ { 0x00000c0e, 0x0000 }, /* R3086 (0xc0e) - DSP6RMIX Input 4 Source */
+ { 0x00000c0f, 0x0080 }, /* R3087 (0xc0f) - DSP6RMIX Input 4 Volume */
+ { 0x00000c10, 0x0000 }, /* R3088 (0xc10) - DSP6AUX1MIX Input 1 Source */
+ { 0x00000c18, 0x0000 }, /* R3088 (0xc18) - DSP6AUX2MIX Input 1 Source */
+ { 0x00000c20, 0x0000 }, /* R3088 (0xc20) - DSP6AUX3MIX Input 1 Source */
+ { 0x00000c28, 0x0000 }, /* R3088 (0xc28) - DSP6AUX4MIX Input 1 Source */
+ { 0x00000c30, 0x0000 }, /* R3088 (0xc30) - DSP6AUX5MIX Input 1 Source */
+ { 0x00000c38, 0x0000 }, /* R3088 (0xc38) - DSP6AUX6MIX Input 1 Source */
+ { 0x00000c40, 0x0000 }, /* R3136 (0xc40) - DSP7LMIX Input 1 Source */
+ { 0x00000c41, 0x0080 }, /* R3137 (0xc41) - DSP7LMIX Input 1 Volume */
+ { 0x00000c42, 0x0000 }, /* R3138 (0xc42) - DSP7LMIX Input 2 Source */
+ { 0x00000c43, 0x0080 }, /* R3139 (0xc43) - DSP7LMIX Input 2 Volume */
+ { 0x00000c44, 0x0000 }, /* R3140 (0xc44) - DSP7LMIX Input 3 Source */
+ { 0x00000c45, 0x0080 }, /* R3141 (0xc45) - DSP7lMIX Input 3 Volume */
+ { 0x00000c46, 0x0000 }, /* R3142 (0xc46) - DSP7lMIX Input 4 Source */
+ { 0x00000c47, 0x0080 }, /* R3143 (0xc47) - DSP7LMIX Input 4 Volume */
+ { 0x00000c48, 0x0000 }, /* R3144 (0xc48) - DSP7RMIX Input 1 Source */
+ { 0x00000c49, 0x0080 }, /* R3145 (0xc49) - DSP7RMIX Input 1 Volume */
+ { 0x00000c4a, 0x0000 }, /* R3146 (0xc4a) - DSP7RMIX Input 2 Source */
+ { 0x00000c4b, 0x0080 }, /* R3147 (0xc4b) - DSP7RMIX Input 2 Volume */
+ { 0x00000c4c, 0x0000 }, /* R3148 (0xc4c) - DSP7RMIX Input 3 Source */
+ { 0x00000c4d, 0x0080 }, /* R3159 (0xc4d) - DSP7RMIX Input 3 Volume */
+ { 0x00000c4e, 0x0000 }, /* R3150 (0xc4e) - DSP7RMIX Input 4 Source */
+ { 0x00000c4f, 0x0080 }, /* R3151 (0xc4f) - DSP7RMIX Input 4 Volume */
+ { 0x00000c50, 0x0000 }, /* R3152 (0xc50) - DSP7AUX1MIX Input 1 Source */
+ { 0x00000c58, 0x0000 }, /* R3160 (0xc58) - DSP7AUX2MIX Input 1 Source */
+ { 0x00000c60, 0x0000 }, /* R3168 (0xc60) - DSP7AUX3MIX Input 1 Source */
+ { 0x00000c68, 0x0000 }, /* R3176 (0xc68) - DSP7AUX4MIX Input 1 Source */
+ { 0x00000c70, 0x0000 }, /* R3184 (0xc70) - DSP7AUX5MIX Input 1 Source */
+ { 0x00000c78, 0x0000 }, /* R3192 (0xc78) - DSP7AUX6MIX Input 1 Source */
+ { 0x00000e00, 0x0000 }, /* R3584 (0xe00) - FX Ctrl1 */
+ { 0x00000e10, 0x6318 }, /* R3600 (0xe10) - EQ1_1 */
+ { 0x00000e11, 0x6300 }, /* R3601 (0xe11) - EQ1_2 */
+ { 0x00000e12, 0x0fc8 }, /* R3602 (0xe12) - EQ1_3 */
+ { 0x00000e13, 0x03fe }, /* R3603 (0xe13) - EQ1_4 */
+ { 0x00000e14, 0x00e0 }, /* R3604 (0xe14) - EQ1_5 */
+ { 0x00000e15, 0x1ec4 }, /* R3605 (0xe15) - EQ1_6 */
+ { 0x00000e16, 0xf136 }, /* R3606 (0xe16) - EQ1_7 */
+ { 0x00000e17, 0x0409 }, /* R3607 (0xe17) - EQ1_8 */
+ { 0x00000e18, 0x04cc }, /* R3608 (0xe18) - EQ1_9 */
+ { 0x00000e19, 0x1c9b }, /* R3609 (0xe19) - EQ1_10 */
+ { 0x00000e1a, 0xf337 }, /* R3610 (0xe1a) - EQ1_11 */
+ { 0x00000e1b, 0x040b }, /* R3611 (0xe1b) - EQ1_12 */
+ { 0x00000e1c, 0x0cbb }, /* R3612 (0xe1c) - EQ1_13 */
+ { 0x00000e1d, 0x16f8 }, /* R3613 (0xe1d) - EQ1_14 */
+ { 0x00000e1e, 0xf7d9 }, /* R3614 (0xe1e) - EQ1_15 */
+ { 0x00000e1f, 0x040a }, /* R3615 (0xe1f) - EQ1_16 */
+ { 0x00000e20, 0x1f14 }, /* R3616 (0xe20) - EQ1_17 */
+ { 0x00000e21, 0x058c }, /* R3617 (0xe21) - EQ1_18 */
+ { 0x00000e22, 0x0563 }, /* R3618 (0xe22) - EQ1_19 */
+ { 0x00000e23, 0x4000 }, /* R3619 (0xe23) - EQ1_20 */
+ { 0x00000e24, 0x0b75 }, /* R3620 (0xe24) - EQ1_21 */
+ { 0x00000e26, 0x6318 }, /* R3622 (0xe26) - EQ2_1 */
+ { 0x00000e27, 0x6300 }, /* R3623 (0xe27) - EQ2_2 */
+ { 0x00000e28, 0x0fc8 }, /* R3624 (0xe28) - EQ2_3 */
+ { 0x00000e29, 0x03fe }, /* R3625 (0xe29) - EQ2_4 */
+ { 0x00000e2a, 0x00e0 }, /* R3626 (0xe2a) - EQ2_5 */
+ { 0x00000e2b, 0x1ec4 }, /* R3627 (0xe2b) - EQ2_6 */
+ { 0x00000e2c, 0xf136 }, /* R3628 (0xe2c) - EQ2_7 */
+ { 0x00000e2d, 0x0409 }, /* R3629 (0xe2d) - EQ2_8 */
+ { 0x00000e2e, 0x04cc }, /* R3630 (0xe2e) - EQ2_9 */
+ { 0x00000e2f, 0x1c9b }, /* R3631 (0xe2f) - EQ2_10 */
+ { 0x00000e30, 0xf337 }, /* R3632 (0xe30) - EQ2_11 */
+ { 0x00000e31, 0x040b }, /* R3633 (0xe31) - EQ2_12 */
+ { 0x00000e32, 0x0cbb }, /* R3634 (0xe32) - EQ2_13 */
+ { 0x00000e33, 0x16f8 }, /* R3635 (0xe33) - EQ2_14 */
+ { 0x00000e34, 0xf7d9 }, /* R3636 (0xe34) - EQ2_15 */
+ { 0x00000e35, 0x040a }, /* R3637 (0xe35) - EQ2_16 */
+ { 0x00000e36, 0x1f14 }, /* R3638 (0xe36) - EQ2_17 */
+ { 0x00000e37, 0x058c }, /* R3639 (0xe37) - EQ2_18 */
+ { 0x00000e38, 0x0563 }, /* R3640 (0xe38) - EQ2_19 */
+ { 0x00000e39, 0x4000 }, /* R3641 (0xe39) - EQ2_20 */
+ { 0x00000e3a, 0x0b75 }, /* R3642 (0xe3a) - EQ2_21 */
+ { 0x00000e3c, 0x6318 }, /* R3644 (0xe3c) - EQ3_1 */
+ { 0x00000e3d, 0x6300 }, /* R3645 (0xe3d) - EQ3_2 */
+ { 0x00000e3e, 0x0fc8 }, /* R3646 (0xe3e) - EQ3_3 */
+ { 0x00000e3f, 0x03fe }, /* R3647 (0xe3f) - EQ3_4 */
+ { 0x00000e40, 0x00e0 }, /* R3648 (0xe40) - EQ3_5 */
+ { 0x00000e41, 0x1ec4 }, /* R3649 (0xe41) - EQ3_6 */
+ { 0x00000e42, 0xf136 }, /* R3650 (0xe42) - EQ3_7 */
+ { 0x00000e43, 0x0409 }, /* R3651 (0xe43) - EQ3_8 */
+ { 0x00000e44, 0x04cc }, /* R3652 (0xe44) - EQ3_9 */
+ { 0x00000e45, 0x1c9b }, /* R3653 (0xe45) - EQ3_10 */
+ { 0x00000e46, 0xf337 }, /* R3654 (0xe46) - EQ3_11 */
+ { 0x00000e47, 0x040b }, /* R3655 (0xe47) - EQ3_12 */
+ { 0x00000e48, 0x0cbb }, /* R3656 (0xe48) - EQ3_13 */
+ { 0x00000e49, 0x16f8 }, /* R3657 (0xe49) - EQ3_14 */
+ { 0x00000e4a, 0xf7d9 }, /* R3658 (0xe4a) - EQ3_15 */
+ { 0x00000e4b, 0x040a }, /* R3659 (0xe4b) - EQ3_16 */
+ { 0x00000e4c, 0x1f14 }, /* R3660 (0xe4c) - EQ3_17 */
+ { 0x00000e4d, 0x058c }, /* R3661 (0xe4d) - EQ3_18 */
+ { 0x00000e4e, 0x0563 }, /* R3662 (0xe4e) - EQ3_19 */
+ { 0x00000e4f, 0x4000 }, /* R3663 (0xe4f) - EQ3_20 */
+ { 0x00000e50, 0x0b75 }, /* R3664 (0xe50) - EQ3_21 */
+ { 0x00000e52, 0x6318 }, /* R3666 (0xe52) - EQ4_1 */
+ { 0x00000e53, 0x6300 }, /* R3667 (0xe53) - EQ4_2 */
+ { 0x00000e54, 0x0fc8 }, /* R3668 (0xe54) - EQ4_3 */
+ { 0x00000e55, 0x03fe }, /* R3669 (0xe55) - EQ4_4 */
+ { 0x00000e56, 0x00e0 }, /* R3670 (0xe56) - EQ4_5 */
+ { 0x00000e57, 0x1ec4 }, /* R3671 (0xe57) - EQ4_6 */
+ { 0x00000e58, 0xf136 }, /* R3672 (0xe58) - EQ4_7 */
+ { 0x00000e59, 0x0409 }, /* R3673 (0xe59) - EQ4_8 */
+ { 0x00000e5a, 0x04cc }, /* R3674 (0xe5a) - EQ4_9 */
+ { 0x00000e5b, 0x1c9b }, /* R3675 (0xe5b) - EQ4_10 */
+ { 0x00000e5c, 0xf337 }, /* R3676 (0xe5c) - EQ4_11 */
+ { 0x00000e5d, 0x040b }, /* R3677 (0xe5d) - EQ4_12 */
+ { 0x00000e5e, 0x0cbb }, /* R3678 (0xe5e) - EQ4_13 */
+ { 0x00000e5f, 0x16f8 }, /* R3679 (0xe5f) - EQ4_14 */
+ { 0x00000e60, 0xf7d9 }, /* R3680 (0xe60) - EQ4_15 */
+ { 0x00000e61, 0x040a }, /* R3681 (0xe61) - EQ4_16 */
+ { 0x00000e62, 0x1f14 }, /* R3682 (0xe62) - EQ4_17 */
+ { 0x00000e63, 0x058c }, /* R3683 (0xe63) - EQ4_18 */
+ { 0x00000e64, 0x0563 }, /* R3684 (0xe64) - EQ4_19 */
+ { 0x00000e65, 0x4000 }, /* R3685 (0xe65) - EQ4_20 */
+ { 0x00000e66, 0x0b75 }, /* R3686 (0xe66) - EQ4_21 */
+ { 0x00000e80, 0x0018 }, /* R3712 (0xe80) - DRC1 ctrl1 */
+ { 0x00000e81, 0x0933 }, /* R3713 (0xe81) - DRC1 ctrl2 */
+ { 0x00000e82, 0x0018 }, /* R3714 (0xe82) - DRC1 ctrl3 */
+ { 0x00000e83, 0x0000 }, /* R3715 (0xe83) - DRC1 ctrl4 */
+ { 0x00000e84, 0x0000 }, /* R3716 (0xe84) - DRC1 ctrl5 */
+ { 0x00000e88, 0x0933 }, /* R3720 (0xe88) - DRC2 ctrl1 */
+ { 0x00000e89, 0x0018 }, /* R3721 (0xe89) - DRC2 ctrl2 */
+ { 0x00000e8a, 0x0000 }, /* R3722 (0xe8a) - DRC2 ctrl3 */
+ { 0x00000e8b, 0x0000 }, /* R3723 (0xe8b) - DRC2 ctrl4 */
+ { 0x00000e8c, 0x0040 }, /* R3724 (0xe8c) - DRC2 ctrl5 */
+ { 0x00000ec0, 0x0000 }, /* R3776 (0xec0) - HPLPF1_1 */
+ { 0x00000ec1, 0x0000 }, /* R3777 (0xec1) - HPLPF1_2 */
+ { 0x00000ec4, 0x0000 }, /* R3780 (0xec4) - HPLPF2_1 */
+ { 0x00000ec5, 0x0000 }, /* R3781 (0xec5) - HPLPF2_2 */
+ { 0x00000ec8, 0x0000 }, /* R3784 (0xec8) - HPLPF3_1 */
+ { 0x00000ec9, 0x0000 }, /* R3785 (0xec9) - HPLPF3_2 */
+ { 0x00000ecc, 0x0000 }, /* R3788 (0xecc) - HPLPF4_1 */
+ { 0x00000ecd, 0x0000 }, /* R3789 (0xecd) - HPLPF4_2 */
+ { 0x00000ed0, 0x0000 }, /* R3792 (0xed0) - ASRC2_ENABLE */
+ { 0x00000ed2, 0x0000 }, /* R3794 (0xed2) - ASRC2_RATE1 */
+ { 0x00000ed3, 0x4000 }, /* R3795 (0xed3) - ASRC2_RATE2 */
+ { 0x00000ee0, 0x0000 }, /* R3808 (0xee0) - ASRC1_ENABLE */
+ { 0x00000ee2, 0x0000 }, /* R3810 (0xee2) - ASRC1_RATE1 */
+ { 0x00000ee3, 0x4000 }, /* R3811 (0xee3) - ASRC1_RATE2 */
+ { 0x00000ef0, 0x0000 }, /* R3824 (0xef0) - ISRC 1 CTRL 1 */
+ { 0x00000ef1, 0x0001 }, /* R3825 (0xef1) - ISRC 1 CTRL 2 */
+ { 0x00000ef2, 0x0000 }, /* R3826 (0xef2) - ISRC 1 CTRL 3 */
+ { 0x00000ef3, 0x0000 }, /* R3827 (0xef3) - ISRC 2 CTRL 1 */
+ { 0x00000ef4, 0x0001 }, /* R3828 (0xef4) - ISRC 2 CTRL 2 */
+ { 0x00000ef5, 0x0000 }, /* R3829 (0xef5) - ISRC 2 CTRL 3 */
+ { 0x00000ef6, 0x0000 }, /* R3830 (0xef6) - ISRC 3 CTRL 1 */
+ { 0x00000ef7, 0x0001 }, /* R3831 (0xef7) - ISRC 3 CTRL 2 */
+ { 0x00000ef8, 0x0000 }, /* R3832 (0xef8) - ISRC 3 CTRL 3 */
+ { 0x00000ef9, 0x0000 }, /* R3833 (0xef9) - ISRC 4 CTRL 1 */
+ { 0x00000efa, 0x0001 }, /* R3834 (0xefa) - ISRC 4 CTRL 2 */
+ { 0x00000efb, 0x0000 }, /* R3835 (0xefb) - ISRC 4 CTRL 3 */
+ { 0x00000f01, 0x0000 }, /* R3841 (0xf01) - ANC_SRC */
+ { 0x00000f02, 0x0000 }, /* R3842 (0xf02) - DSP Status */
+ { 0x00000f08, 0x001c }, /* R3848 (0xf08) - ANC Coefficient */
+ { 0x00000f09, 0x0000 }, /* R3849 (0xf09) - ANC Coefficient */
+ { 0x00000f0a, 0x0000 }, /* R3850 (0xf0a) - ANC Coefficient */
+ { 0x00000f0b, 0x0000 }, /* R3851 (0xf0b) - ANC Coefficient */
+ { 0x00000f0c, 0x0000 }, /* R3852 (0xf0c) - ANC Coefficient */
+ { 0x00000f0d, 0x0000 }, /* R3853 (0xf0d) - ANC Coefficient */
+ { 0x00000f0e, 0x0000 }, /* R3854 (0xf0e) - ANC Coefficient */
+ { 0x00000f0f, 0x0000 }, /* R3855 (0xf0f) - ANC Coefficient */
+ { 0x00000f10, 0x0000 }, /* R3856 (0xf10) - ANC Coefficient */
+ { 0x00000f11, 0x0000 }, /* R3857 (0xf11) - ANC Coefficient */
+ { 0x00000f12, 0x0000 }, /* R3858 (0xf12) - ANC Coefficient */
+ { 0x00000f15, 0x0000 }, /* R3861 (0xf15) - FCL Filter Control */
+ { 0x00000f17, 0x0004 }, /* R3863 (0xf17) - FCL ADC Reformatter Control */
+ { 0x00000f18, 0x0004 }, /* R3864 (0xf18) - ANC Coefficient */
+ { 0x00000f19, 0x0002 }, /* R3865 (0xf19) - ANC Coefficient */
+ { 0x00000f1a, 0x0000 }, /* R3866 (0xf1a) - ANC Coefficient */
+ { 0x00000f1b, 0x0010 }, /* R3867 (0xf1b) - ANC Coefficient */
+ { 0x00000f1c, 0x0000 }, /* R3868 (0xf1c) - ANC Coefficient */
+ { 0x00000f1d, 0x0000 }, /* R3869 (0xf1d) - ANC Coefficient */
+ { 0x00000f1e, 0x0000 }, /* R3870 (0xf1e) - ANC Coefficient */
+ { 0x00000f1f, 0x0000 }, /* R3871 (0xf1f) - ANC Coefficient */
+ { 0x00000f20, 0x0000 }, /* R3872 (0xf20) - ANC Coefficient */
+ { 0x00000f21, 0x0000 }, /* R3873 (0xf21) - ANC Coefficient */
+ { 0x00000f22, 0x0000 }, /* R3874 (0xf22) - ANC Coefficient */
+ { 0x00000f23, 0x0000 }, /* R3875 (0xf23) - ANC Coefficient */
+ { 0x00000f24, 0x0000 }, /* R3876 (0xf24) - ANC Coefficient */
+ { 0x00000f25, 0x0000 }, /* R3877 (0xf25) - ANC Coefficient */
+ { 0x00000f26, 0x0000 }, /* R3878 (0xf26) - ANC Coefficient */
+ { 0x00000f27, 0x0000 }, /* R3879 (0xf27) - ANC Coefficient */
+ { 0x00000f28, 0x0000 }, /* R3880 (0xf28) - ANC Coefficient */
+ { 0x00000f29, 0x0000 }, /* R3881 (0xf29) - ANC Coefficient */
+ { 0x00000f2a, 0x0000 }, /* R3882 (0xf2a) - ANC Coefficient */
+ { 0x00000f2b, 0x0000 }, /* R3883 (0xf2b) - ANC Coefficient */
+ { 0x00000f2c, 0x0000 }, /* R3884 (0xf2c) - ANC Coefficient */
+ { 0x00000f2d, 0x0000 }, /* R3885 (0xf2d) - ANC Coefficient */
+ { 0x00000f2e, 0x0000 }, /* R3886 (0xf2e) - ANC Coefficient */
+ { 0x00000f2f, 0x0000 }, /* R3887 (0xf2f) - ANC Coefficient */
+ { 0x00000f30, 0x0000 }, /* R3888 (0xf30) - ANC Coefficient */
+ { 0x00000f31, 0x0000 }, /* R3889 (0xf31) - ANC Coefficient */
+ { 0x00000f32, 0x0000 }, /* R3890 (0xf32) - ANC Coefficient */
+ { 0x00000f33, 0x0000 }, /* R3891 (0xf33) - ANC Coefficient */
+ { 0x00000f34, 0x0000 }, /* R3892 (0xf34) - ANC Coefficient */
+ { 0x00000f35, 0x0000 }, /* R3893 (0xf35) - ANC Coefficient */
+ { 0x00000f36, 0x0000 }, /* R3894 (0xf36) - ANC Coefficient */
+ { 0x00000f37, 0x0000 }, /* R3895 (0xf37) - ANC Coefficient */
+ { 0x00000f38, 0x0000 }, /* R3896 (0xf38) - ANC Coefficient */
+ { 0x00000f39, 0x0000 }, /* R3897 (0xf39) - ANC Coefficient */
+ { 0x00000f3a, 0x0000 }, /* R3898 (0xf3a) - ANC Coefficient */
+ { 0x00000f3b, 0x0000 }, /* R3899 (0xf3b) - ANC Coefficient */
+ { 0x00000f3c, 0x0000 }, /* R3900 (0xf3c) - ANC Coefficient */
+ { 0x00000f3d, 0x0000 }, /* R3901 (0xf3d) - ANC Coefficient */
+ { 0x00000f3e, 0x0000 }, /* R3902 (0xf3e) - ANC Coefficient */
+ { 0x00000f3f, 0x0000 }, /* R3903 (0xf3f) - ANC Coefficient */
+ { 0x00000f40, 0x0000 }, /* R3904 (0xf40) - ANC Coefficient */
+ { 0x00000f41, 0x0000 }, /* R3905 (0xf41) - ANC Coefficient */
+ { 0x00000f42, 0x0000 }, /* R3906 (0xf42) - ANC Coefficient */
+ { 0x00000f43, 0x0000 }, /* R3907 (0xf43) - ANC Coefficient */
+ { 0x00000f44, 0x0000 }, /* R3908 (0xf44) - ANC Coefficient */
+ { 0x00000f45, 0x0000 }, /* R3909 (0xf45) - ANC Coefficient */
+ { 0x00000f46, 0x0000 }, /* R3910 (0xf46) - ANC Coefficient */
+ { 0x00000f47, 0x0000 }, /* R3911 (0xf47) - ANC Coefficient */
+ { 0x00000f48, 0x0000 }, /* R3912 (0xf48) - ANC Coefficient */
+ { 0x00000f49, 0x0000 }, /* R3913 (0xf49) - ANC Coefficient */
+ { 0x00000f4a, 0x0000 }, /* R3914 (0xf4a) - ANC Coefficient */
+ { 0x00000f4b, 0x0000 }, /* R3915 (0xf4b) - ANC Coefficient */
+ { 0x00000f4c, 0x0000 }, /* R3916 (0xf4c) - ANC Coefficient */
+ { 0x00000f4d, 0x0000 }, /* R3917 (0xf4d) - ANC Coefficient */
+ { 0x00000f4e, 0x0000 }, /* R3918 (0xf4e) - ANC Coefficient */
+ { 0x00000f4f, 0x0000 }, /* R3919 (0xf4f) - ANC Coefficient */
+ { 0x00000f50, 0x0000 }, /* R3920 (0xf50) - ANC Coefficient */
+ { 0x00000f51, 0x0000 }, /* R3921 (0xf51) - ANC Coefficient */
+ { 0x00000f52, 0x0000 }, /* R3922 (0xf52) - ANC Coefficient */
+ { 0x00000f53, 0x0000 }, /* R3923 (0xf53) - ANC Coefficient */
+ { 0x00000f54, 0x0000 }, /* R3924 (0xf54) - ANC Coefficient */
+ { 0x00000f55, 0x0000 }, /* R3925 (0xf55) - ANC Coefficient */
+ { 0x00000f56, 0x0000 }, /* R3926 (0xf56) - ANC Coefficient */
+ { 0x00000f57, 0x0000 }, /* R3927 (0xf57) - ANC Coefficient */
+ { 0x00000f58, 0x0000 }, /* R3928 (0xf58) - ANC Coefficient */
+ { 0x00000f59, 0x0000 }, /* R3929 (0xf59) - ANC Coefficient */
+ { 0x00000f5a, 0x0000 }, /* R3930 (0xf5a) - ANC Coefficient */
+ { 0x00000f5b, 0x0000 }, /* R3931 (0xf5b) - ANC Coefficient */
+ { 0x00000f5c, 0x0000 }, /* R3932 (0xf5c) - ANC Coefficient */
+ { 0x00000f5d, 0x0000 }, /* R3933 (0xf5d) - ANC Coefficient */
+ { 0x00000f5e, 0x0000 }, /* R3934 (0xf5e) - ANC Coefficient */
+ { 0x00000f5f, 0x0000 }, /* R3935 (0xf5f) - ANC Coefficient */
+ { 0x00000f60, 0x0000 }, /* R3936 (0xf60) - ANC Coefficient */
+ { 0x00000f61, 0x0000 }, /* R3937 (0xf61) - ANC Coefficient */
+ { 0x00000f62, 0x0000 }, /* R3938 (0xf62) - ANC Coefficient */
+ { 0x00000f63, 0x0000 }, /* R3939 (0xf63) - ANC Coefficient */
+ { 0x00000f64, 0x0000 }, /* R3940 (0xf64) - ANC Coefficient */
+ { 0x00000f65, 0x0000 }, /* R3941 (0xf65) - ANC Coefficient */
+ { 0x00000f66, 0x0000 }, /* R3942 (0xf66) - ANC Coefficient */
+ { 0x00000f67, 0x0000 }, /* R3943 (0xf67) - ANC Coefficient */
+ { 0x00000f68, 0x0000 }, /* R3944 (0xf68) - ANC Coefficient */
+ { 0x00000f69, 0x0000 }, /* R3945 (0xf69) - ANC Coefficient */
+ { 0x00000f71, 0x0000 }, /* R3953 (0xf71) - FCR Filter Control */
+ { 0x00000f73, 0x0004 }, /* R3955 (0xf73) - FCR ADC Reformatter Control */
+ { 0x00000f74, 0x0004 }, /* R3956 (0xf74) - ANC Coefficient */
+ { 0x00000f75, 0x0002 }, /* R3957 (0xf75) - ANC Coefficient */
+ { 0x00000f76, 0x0000 }, /* R3958 (0xf76) - ANC Coefficient */
+ { 0x00000f77, 0x0010 }, /* R3959 (0xf77) - ANC Coefficient */
+ { 0x00000f78, 0x0000 }, /* R3960 (0xf78) - ANC Coefficient */
+ { 0x00000f79, 0x0000 }, /* R3961 (0xf79) - ANC Coefficient */
+ { 0x00000f7a, 0x0000 }, /* R3962 (0xf7a) - ANC Coefficient */
+ { 0x00000f7b, 0x0000 }, /* R3963 (0xf7b) - ANC Coefficient */
+ { 0x00000f7c, 0x0000 }, /* R3964 (0xf7c) - ANC Coefficient */
+ { 0x00000f7d, 0x0000 }, /* R3965 (0xf7d) - ANC Coefficient */
+ { 0x00000f7e, 0x0000 }, /* R3966 (0xf7e) - ANC Coefficient */
+ { 0x00000f7f, 0x0000 }, /* R3967 (0xf7f) - ANC Coefficient */
+ { 0x00000f80, 0x0000 }, /* R3968 (0xf80) - ANC Coefficient */
+ { 0x00000f81, 0x0000 }, /* R3969 (0xf81) - ANC Coefficient */
+ { 0x00000f82, 0x0000 }, /* R3970 (0xf82) - ANC Coefficient */
+ { 0x00000f83, 0x0000 }, /* R3971 (0xf83) - ANC Coefficient */
+ { 0x00000f84, 0x0000 }, /* R3972 (0xf84) - ANC Coefficient */
+ { 0x00000f85, 0x0000 }, /* R3973 (0xf85) - ANC Coefficient */
+ { 0x00000f86, 0x0000 }, /* R3974 (0xf86) - ANC Coefficient */
+ { 0x00000f87, 0x0000 }, /* R3975 (0xf87) - ANC Coefficient */
+ { 0x00000f88, 0x0000 }, /* R3976 (0xf88) - ANC Coefficient */
+ { 0x00000f89, 0x0000 }, /* R3977 (0xf89) - ANC Coefficient */
+ { 0x00000f8a, 0x0000 }, /* R3978 (0xf8a) - ANC Coefficient */
+ { 0x00000f8b, 0x0000 }, /* R3979 (0xf8b) - ANC Coefficient */
+ { 0x00000f8c, 0x0000 }, /* R3980 (0xf8c) - ANC Coefficient */
+ { 0x00000f8d, 0x0000 }, /* R3981 (0xf8d) - ANC Coefficient */
+ { 0x00000f8e, 0x0000 }, /* R3982 (0xf8e) - ANC Coefficient */
+ { 0x00000f8f, 0x0000 }, /* R3983 (0xf8f) - ANC Coefficient */
+ { 0x00000f90, 0x0000 }, /* R3984 (0xf90) - ANC Coefficient */
+ { 0x00000f91, 0x0000 }, /* R3985 (0xf91) - ANC Coefficient */
+ { 0x00000f92, 0x0000 }, /* R3986 (0xf92) - ANC Coefficient */
+ { 0x00000f93, 0x0000 }, /* R3987 (0xf93) - ANC Coefficient */
+ { 0x00000f94, 0x0000 }, /* R3988 (0xf94) - ANC Coefficient */
+ { 0x00000f95, 0x0000 }, /* R3989 (0xf95) - ANC Coefficient */
+ { 0x00000f96, 0x0000 }, /* R3990 (0xf96) - ANC Coefficient */
+ { 0x00000f97, 0x0000 }, /* R3991 (0xf97) - ANC Coefficient */
+ { 0x00000f98, 0x0000 }, /* R3992 (0xf98) - ANC Coefficient */
+ { 0x00000f99, 0x0000 }, /* R3993 (0xf99) - ANC Coefficient */
+ { 0x00000f9a, 0x0000 }, /* R3994 (0xf9a) - ANC Coefficient */
+ { 0x00000f9b, 0x0000 }, /* R3995 (0xf9b) - ANC Coefficient */
+ { 0x00000f9c, 0x0000 }, /* R3996 (0xf9c) - ANC Coefficient */
+ { 0x00000f9d, 0x0000 }, /* R3997 (0xf9d) - ANC Coefficient */
+ { 0x00000f9e, 0x0000 }, /* R3998 (0xf9e) - ANC Coefficient */
+ { 0x00000f9f, 0x0000 }, /* R3999 (0xf9f) - ANC Coefficient */
+ { 0x00000fa0, 0x0000 }, /* R4000 (0xfa0) - ANC Coefficient */
+ { 0x00000fa1, 0x0000 }, /* R4001 (0xfa1) - ANC Coefficient */
+ { 0x00000fa2, 0x0000 }, /* R4002 (0xfa2) - ANC Coefficient */
+ { 0x00000fa3, 0x0000 }, /* R4003 (0xfa3) - ANC Coefficient */
+ { 0x00000fa4, 0x0000 }, /* R4004 (0xfa4) - ANC Coefficient */
+ { 0x00000fa5, 0x0000 }, /* R4005 (0xfa5) - ANC Coefficient */
+ { 0x00000fa6, 0x0000 }, /* R4006 (0xfa6) - ANC Coefficient */
+ { 0x00000fa7, 0x0000 }, /* R4007 (0xfa7) - ANC Coefficient */
+ { 0x00000fa8, 0x0000 }, /* R4008 (0xfa8) - ANC Coefficient */
+ { 0x00000fa9, 0x0000 }, /* R4009 (0xfa9) - ANC Coefficient */
+ { 0x00000faa, 0x0000 }, /* R4010 (0xfaa) - ANC Coefficient */
+ { 0x00000fab, 0x0000 }, /* R4011 (0xfab) - ANC Coefficient */
+ { 0x00000fac, 0x0000 }, /* R4012 (0xfac) - ANC Coefficient */
+ { 0x00000fad, 0x0000 }, /* R4013 (0xfad) - ANC Coefficient */
+ { 0x00000fae, 0x0000 }, /* R4014 (0xfae) - ANC Coefficient */
+ { 0x00000faf, 0x0000 }, /* R4015 (0xfaf) - ANC Coefficient */
+ { 0x00000fb0, 0x0000 }, /* R4016 (0xfb0) - ANC Coefficient */
+ { 0x00000fb1, 0x0000 }, /* R4017 (0xfb1) - ANC Coefficient */
+ { 0x00000fb2, 0x0000 }, /* R4018 (0xfb2) - ANC Coefficient */
+ { 0x00000fb3, 0x0000 }, /* R4019 (0xfb3) - ANC Coefficient */
+ { 0x00000fb4, 0x0000 }, /* R4020 (0xfb4) - ANC Coefficient */
+ { 0x00000fb5, 0x0000 }, /* R4021 (0xfb5) - ANC Coefficient */
+ { 0x00000fb6, 0x0000 }, /* R4022 (0xfb6) - ANC Coefficient */
+ { 0x00000fb7, 0x0000 }, /* R4023 (0xfb7) - ANC Coefficient */
+ { 0x00000fb8, 0x0000 }, /* R4024 (0xfb8) - ANC Coefficient */
+ { 0x00000fb9, 0x0000 }, /* R4025 (0xfb9) - ANC Coefficient */
+ { 0x00000fba, 0x0000 }, /* R4026 (0xfba) - ANC Coefficient */
+ { 0x00000fbb, 0x0000 }, /* R4027 (0xfbb) - ANC Coefficient */
+ { 0x00000fbc, 0x0000 }, /* R4028 (0xfbc) - ANC Coefficient */
+ { 0x00000fbd, 0x0000 }, /* R4029 (0xfbd) - ANC Coefficient */
+ { 0x00000fbe, 0x0000 }, /* R4030 (0xfbe) - ANC Coefficient */
+ { 0x00000fbf, 0x0000 }, /* R4031 (0xfbf) - ANC Coefficient */
+ { 0x00000fc0, 0x0000 }, /* R4032 (0xfc0) - ANC Coefficient */
+ { 0x00000fc1, 0x0000 }, /* R4033 (0xfc1) - ANC Coefficient */
+ { 0x00000fc2, 0x0000 }, /* R4034 (0xfc2) - ANC Coefficient */
+ { 0x00000fc3, 0x0000 }, /* R4035 (0xfc3) - ANC Coefficient */
+ { 0x00000fc4, 0x0000 }, /* R4036 (0xfc4) - ANC Coefficient */
+ { 0x00000fc5, 0x0000 }, /* R4037 (0xfc5) - ANC Coefficient */
+ { 0x00001300, 0x0000 }, /* R4864 (0x1300) - DAC Comp 1 */
+ { 0x00001302, 0x0000 }, /* R4866 (0x1302) - DAC Comp 2 */
+ { 0x00001380, 0x0000 }, /* R4992 (0x1380) - FRF Coefficient 1L 1 */
+ { 0x00001381, 0x0000 }, /* R4993 (0x1381) - FRF Coefficient 1L 2 */
+ { 0x00001382, 0x0000 }, /* R4994 (0x1382) - FRF Coefficient 1L 3 */
+ { 0x00001383, 0x0000 }, /* R4995 (0x1383) - FRF Coefficient 1L 4 */
+ { 0x00001390, 0x0000 }, /* R5008 (0x1390) - FRF Coefficient 1R 1 */
+ { 0x00001391, 0x0000 }, /* R5009 (0x1391) - FRF Coefficient 1R 2 */
+ { 0x00001392, 0x0000 }, /* R5010 (0x1392) - FRF Coefficient 1R 3 */
+ { 0x00001393, 0x0000 }, /* R5011 (0x1393) - FRF Coefficient 1R 4 */
+ { 0x000013a0, 0x0000 }, /* R5024 (0x13a0) - FRF Coefficient 2L 1 */
+ { 0x000013a1, 0x0000 }, /* R5025 (0x13a1) - FRF Coefficient 2L 2 */
+ { 0x000013a2, 0x0000 }, /* R5026 (0x13a2) - FRF Coefficient 2L 3 */
+ { 0x000013a3, 0x0000 }, /* R5027 (0x13a3) - FRF Coefficient 2L 4 */
+ { 0x000013b0, 0x0000 }, /* R5040 (0x13b0) - FRF Coefficient 2R 1 */
+ { 0x000013b1, 0x0000 }, /* R5041 (0x13b1) - FRF Coefficient 2R 2 */
+ { 0x000013b2, 0x0000 }, /* R5042 (0x13b2) - FRF Coefficient 2R 3 */
+ { 0x000013b3, 0x0000 }, /* R5043 (0x13b3) - FRF Coefficient 2R 4 */
+ { 0x000013c0, 0x0000 }, /* R5040 (0x13c0) - FRF Coefficient 3L 1 */
+ { 0x000013c1, 0x0000 }, /* R5041 (0x13c1) - FRF Coefficient 3L 2 */
+ { 0x000013c2, 0x0000 }, /* R5042 (0x13c2) - FRF Coefficient 3L 3 */
+ { 0x000013c3, 0x0000 }, /* R5043 (0x13c3) - FRF Coefficient 3L 4 */
+ { 0x000013d0, 0x0000 }, /* R5072 (0x13d0) - FRF Coefficient 3R 1 */
+ { 0x000013d1, 0x0000 }, /* R5073 (0x13d1) - FRF Coefficient 3R 2 */
+ { 0x000013d2, 0x0000 }, /* R5074 (0x13d2) - FRF Coefficient 3R 3 */
+ { 0x000013d3, 0x0000 }, /* R5075 (0x13d3) - FRF Coefficient 3R 4 */
+ { 0x000013e0, 0x0000 }, /* R5088 (0x13e0) - FRF Coefficient 4L 1 */
+ { 0x000013e1, 0x0000 }, /* R5089 (0x13e1) - FRF Coefficient 4L 2 */
+ { 0x000013e2, 0x0000 }, /* R5090 (0x13e2) - FRF Coefficient 4L 3 */
+ { 0x000013e3, 0x0000 }, /* R5091 (0x13e3) - FRF Coefficient 4L 4 */
+ { 0x000013f0, 0x0000 }, /* R5104 (0x13f0) - FRF Coefficient 4R 1 */
+ { 0x000013f1, 0x0000 }, /* R5105 (0x13f1) - FRF Coefficient 4R 2 */
+ { 0x000013f2, 0x0000 }, /* R5106 (0x13f2) - FRF Coefficient 4R 3 */
+ { 0x000013f3, 0x0000 }, /* R5107 (0x13f3) - FRF Coefficient 4R 4 */
+ { 0x00001400, 0x0000 }, /* R5120 (0x1400) - FRF Coefficient 5L 1 */
+ { 0x00001401, 0x0000 }, /* R5121 (0x1401) - FRF Coefficient 5L 2 */
+ { 0x00001402, 0x0000 }, /* R5122 (0x1402) - FRF Coefficient 5L 3 */
+ { 0x00001403, 0x0000 }, /* R5123 (0x1403) - FRF Coefficient 5L 4 */
+ { 0x00001410, 0x0000 }, /* R5136 (0x1410) - FRF Coefficient 5R 1 */
+ { 0x00001411, 0x0000 }, /* R5137 (0x1411) - FRF Coefficient 5R 2 */
+ { 0x00001412, 0x0000 }, /* R5138 (0x1412) - FRF Coefficient 5R 3 */
+ { 0x00001413, 0x0000 }, /* R5139 (0x1413) - FRF Coefficient 5R 4 */
+ { 0x00001420, 0x0000 }, /* R5152 (0x1420) - FRF Coefficient 6L 1 */
+ { 0x00001421, 0x0000 }, /* R5153 (0x1421) - FRF Coefficient 6L 2 */
+ { 0x00001422, 0x0000 }, /* R5154 (0x1422) - FRF Coefficient 6L 3 */
+ { 0x00001423, 0x0000 }, /* R5155 (0x1423) - FRF Coefficient 6L 4 */
+ { 0x00001430, 0x0000 }, /* R5168 (0x1430) - FRF Coefficient 6R 1 */
+ { 0x00001431, 0x0000 }, /* R5169 (0x1431) - FRF Coefficient 6R 2 */
+ { 0x00001432, 0x0000 }, /* R5170 (0x1432) - FRF Coefficient 6R 3 */
+ { 0x00001433, 0x0000 }, /* R5171 (0x1433) - FRF Coefficient 6R 4 */
+ { 0x00001700, 0x2001 }, /* R5888 (0x1700) - GPIO1 Control 1 */
+ { 0x00001701, 0xe000 }, /* R5889 (0x1701) - GPIO1 Control 2 */
+ { 0x00001702, 0x2001 }, /* R5890 (0x1702) - GPIO2 Control 1 */
+ { 0x00001703, 0xe000 }, /* R5891 (0x1703) - GPIO2 Control 2 */
+ { 0x00001704, 0x2001 }, /* R5892 (0x1704) - GPIO3 Control 1 */
+ { 0x00001705, 0xe000 }, /* R5893 (0x1705) - GPIO3 Control 2 */
+ { 0x00001706, 0x2001 }, /* R5894 (0x1706) - GPIO4 Control 1 */
+ { 0x00001707, 0xe000 }, /* R5895 (0x1707) - GPIO4 Control 2 */
+ { 0x00001708, 0x2001 }, /* R5896 (0x1708) - GPIO5 Control 1 */
+ { 0x00001709, 0xe000 }, /* R5897 (0x1709) - GPIO5 Control 2 */
+ { 0x0000170a, 0x2001 }, /* R5898 (0x170a) - GPIO6 Control 1 */
+ { 0x0000170b, 0xe000 }, /* R5899 (0x170b) - GPIO6 Control 2 */
+ { 0x0000170c, 0x2001 }, /* R5900 (0x170c) - GPIO7 Control 1 */
+ { 0x0000170d, 0xe000 }, /* R5901 (0x170d) - GPIO7 Control 2 */
+ { 0x0000170e, 0x2001 }, /* R5902 (0x170e) - GPIO8 Control 1 */
+ { 0x0000170f, 0xe000 }, /* R5903 (0x170f) - GPIO8 Control 2 */
+ { 0x00001710, 0x2001 }, /* R5904 (0x1710) - GPIO9 Control 1 */
+ { 0x00001711, 0xe000 }, /* R5905 (0x1711) - GPIO9 Control 2 */
+ { 0x00001712, 0x2001 }, /* R5906 (0x1712) - GPIO10 Control 1 */
+ { 0x00001713, 0xe000 }, /* R5907 (0x1713) - GPIO10 Control 2 */
+ { 0x00001714, 0x2001 }, /* R5908 (0x1714) - GPIO11 Control 1 */
+ { 0x00001715, 0xe000 }, /* R5909 (0x1715) - GPIO11 Control 2 */
+ { 0x00001716, 0x2001 }, /* R5910 (0x1716) - GPIO12 Control 1 */
+ { 0x00001717, 0xe000 }, /* R5911 (0x1717) - GPIO12 Control 2 */
+ { 0x00001718, 0x2001 }, /* R5912 (0x1718) - GPIO13 Control 1 */
+ { 0x00001719, 0xE000 }, /* R5913 (0x1719) - GPIO13 Control 2 */
+ { 0x0000171a, 0x2001 }, /* R5914 (0x171a) - GPIO14 Control 1 */
+ { 0x0000171b, 0xE000 }, /* R5915 (0x171b) - GPIO14 Control 2 */
+ { 0x0000171c, 0x2001 }, /* R5916 (0x171c) - GPIO15 Control 1 */
+ { 0x0000171d, 0xE000 }, /* R5917 (0x171d) - GPIO15 Control 2 */
+ { 0x0000171e, 0x2001 }, /* R5918 (0x171e) - GPIO16 Control 1 */
+ { 0x0000171f, 0xE000 }, /* R5919 (0x171f) - GPIO16 Control 2 */
+ { 0x00001720, 0x2001 }, /* R5920 (0x1720) - GPIO17 Control 1 */
+ { 0x00001721, 0xe000 }, /* R5921 (0x1721) - GPIO17 Control 2 */
+ { 0x00001722, 0x2001 }, /* R5922 (0x1722) - GPIO18 Control 1 */
+ { 0x00001723, 0xe000 }, /* R5923 (0x1723) - GPIO18 Control 2 */
+ { 0x00001724, 0x2001 }, /* R5924 (0x1724) - GPIO19 Control 1 */
+ { 0x00001725, 0xe000 }, /* R5925 (0x1725) - GPIO19 Control 2 */
+ { 0x00001726, 0x2001 }, /* R5926 (0x1726) - GPIO20 Control 1 */
+ { 0x00001727, 0xe000 }, /* R5927 (0x1727) - GPIO20 Control 2 */
+ { 0x00001728, 0x2001 }, /* R5928 (0x1728) - GPIO21 Control 1 */
+ { 0x00001729, 0xe000 }, /* R5929 (0x1729) - GPIO21 Control 2 */
+ { 0x0000172a, 0x2001 }, /* R5930 (0x172a) - GPIO22 Control 1 */
+ { 0x0000172b, 0xe000 }, /* R5931 (0x172b) - GPIO22 Control 2 */
+ { 0x0000172c, 0x2001 }, /* R5932 (0x172c) - GPIO23 Control 1 */
+ { 0x0000172d, 0xe000 }, /* R5933 (0x172d) - GPIO23 Control 2 */
+ { 0x0000172e, 0x2001 }, /* R5934 (0x172e) - GPIO24 Control 1 */
+ { 0x0000172f, 0xe000 }, /* R5935 (0x172f) - GPIO24 Control 2 */
+ { 0x00001730, 0x2001 }, /* R5936 (0x1730) - GPIO25 Control 1 */
+ { 0x00001731, 0xe000 }, /* R5937 (0x1731) - GPIO25 Control 2 */
+ { 0x00001732, 0x2001 }, /* R5938 (0x1732) - GPIO26 Control 1 */
+ { 0x00001733, 0xe000 }, /* R5939 (0x1733) - GPIO26 Control 2 */
+ { 0x00001734, 0x2001 }, /* R5940 (0x1734) - GPIO27 Control 1 */
+ { 0x00001735, 0xe000 }, /* R5941 (0x1735) - GPIO27 Control 2 */
+ { 0x00001736, 0x2001 }, /* R5942 (0x1736) - GPIO28 Control 1 */
+ { 0x00001737, 0xe000 }, /* R5943 (0x1737) - GPIO28 Control 2 */
+ { 0x00001738, 0x2001 }, /* R5944 (0x1738) - GPIO29 Control 1 */
+ { 0x00001739, 0xe000 }, /* R5945 (0x1739) - GPIO29 Control 2 */
+ { 0x0000173a, 0x2001 }, /* R5946 (0x173a) - GPIO30 Control 1 */
+ { 0x0000173b, 0xe000 }, /* R5947 (0x173b) - GPIO30 Control 2 */
+ { 0x0000173c, 0x2001 }, /* R5948 (0x173c) - GPIO31 Control 1 */
+ { 0x0000173d, 0xe000 }, /* R5949 (0x173d) - GPIO31 Control 2 */
+ { 0x0000173e, 0x2001 }, /* R5950 (0x173e) - GPIO32 Control 1 */
+ { 0x0000173f, 0xe000 }, /* R5951 (0x173f) - GPIO32 Control 2 */
+ { 0x00001740, 0x2001 }, /* R5952 (0x1740) - GPIO33 Control 1 */
+ { 0x00001741, 0xe000 }, /* R5953 (0x1741) - GPIO33 Control 2 */
+ { 0x00001742, 0x2001 }, /* R5954 (0x1742) - GPIO34 Control 1 */
+ { 0x00001743, 0xe000 }, /* R5955 (0x1743) - GPIO34 Control 2 */
+ { 0x00001744, 0x2001 }, /* R5956 (0x1744) - GPIO35 Control 1 */
+ { 0x00001745, 0xe000 }, /* R5957 (0x1745) - GPIO35 Control 2 */
+ { 0x00001746, 0x2001 }, /* R5958 (0x1746) - GPIO36 Control 1 */
+ { 0x00001747, 0xe000 }, /* R5959 (0x1747) - GPIO36 Control 2 */
+ { 0x00001748, 0x2001 }, /* R5960 (0x1748) - GPIO37 Control 1 */
+ { 0x00001749, 0xe000 }, /* R5961 (0x1749) - GPIO37 Control 2 */
+ { 0x0000174a, 0x2001 }, /* R5962 (0x174a) - GPIO38 Control 1 */
+ { 0x0000174b, 0xe000 }, /* R5963 (0x174b) - GPIO38 Control 2 */
+ { 0x0000174c, 0x2001 }, /* R5964 (0x174c) - GPIO39 Control 1 */
+ { 0x0000174d, 0xe000 }, /* R5965 (0x174d) - GPIO39 Control 2 */
+ { 0x0000174e, 0x2001 }, /* R5966 (0x174e) - GPIO40 Control 1 */
+ { 0x0000174f, 0xe000 }, /* R5967 (0x174f) - GPIO40 Control 2 */
+ { 0x00001840, 0xffff }, /* R6208 (0x1840) - IRQ1 Mask 1 */
+ { 0x00001841, 0xffff }, /* R6209 (0x1841) - IRQ1 Mask 2 */
+ { 0x00001842, 0xffff }, /* R6210 (0x1842) - IRQ1 Mask 3 */
+ { 0x00001843, 0xffff }, /* R6211 (0x1843) - IRQ1 Mask 4 */
+ { 0x00001844, 0xffff }, /* R6212 (0x1844) - IRQ1 Mask 5 */
+ { 0x00001845, 0xffff }, /* R6213 (0x1845) - IRQ1 Mask 6 */
+ { 0x00001846, 0xffff }, /* R6214 (0x1846) - IRQ1 Mask 7 */
+ { 0x00001847, 0xffff }, /* R6215 (0x1847) - IRQ1 Mask 8 */
+ { 0x00001848, 0xffff }, /* R6216 (0x1848) - IRQ1 Mask 9 */
+ { 0x00001849, 0xffff }, /* R6217 (0x1849) - IRQ1 Mask 10 */
+ { 0x0000184a, 0xffff }, /* R6218 (0x184a) - IRQ1 Mask 11 */
+ { 0x0000184b, 0xffff }, /* R6219 (0x184b) - IRQ1 Mask 12 */
+ { 0x0000184c, 0xffff }, /* R6220 (0x184c) - IRQ1 Mask 13 */
+ { 0x0000184d, 0xffff }, /* R6221 (0x184d) - IRQ1 Mask 14 */
+ { 0x0000184e, 0xffff }, /* R6222 (0x184e) - IRQ1 Mask 15 */
+ { 0x0000184f, 0xffff }, /* R6223 (0x184f) - IRQ1 Mask 16 */
+ { 0x00001850, 0xffff }, /* R6224 (0x1850) - IRQ1 Mask 17 */
+ { 0x00001851, 0xffff }, /* R6225 (0x1851) - IRQ1 Mask 18 */
+ { 0x00001852, 0xffff }, /* R6226 (0x1852) - IRQ1 Mask 19 */
+ { 0x00001853, 0xffff }, /* R6227 (0x1853) - IRQ1 Mask 20 */
+ { 0x00001854, 0xffff }, /* R6228 (0x1854) - IRQ1 Mask 21 */
+ { 0x00001855, 0xffff }, /* R6229 (0x1855) - IRQ1 Mask 22 */
+ { 0x00001856, 0xffff }, /* R6230 (0x1856) - IRQ1 Mask 23 */
+ { 0x00001857, 0xffff }, /* R6231 (0x1857) - IRQ1 Mask 24 */
+ { 0x00001858, 0xffff }, /* R6232 (0x1858) - IRQ1 Mask 25 */
+ { 0x00001859, 0xffff }, /* R6233 (0x1859) - IRQ1 Mask 26 */
+ { 0x0000185a, 0xffff }, /* R6234 (0x185a) - IRQ1 Mask 27 */
+ { 0x0000185b, 0xffff }, /* R6235 (0x185b) - IRQ1 Mask 28 */
+ { 0x0000185c, 0xffff }, /* R6236 (0x185c) - IRQ1 Mask 29 */
+ { 0x0000185d, 0xffff }, /* R6237 (0x185d) - IRQ1 Mask 30 */
+ { 0x0000185e, 0xffff }, /* R6238 (0x185e) - IRQ1 Mask 31 */
+ { 0x0000185f, 0xffff }, /* R6239 (0x185f) - IRQ1 Mask 32 */
+ { 0x00001860, 0xffff }, /* R6240 (0x1860) - IRQ1 Mask 33 */
+ { 0x00001a06, 0x0000 }, /* R6662 (0x1a06) - Interrupt Debounce 7 */
+ { 0x00001a80, 0x4400 }, /* R6784 (0x1a80) - IRQ1 CTRL */
+};
+
+static bool cs47l85_is_adsp_memory(unsigned int reg)
+{
+ switch (reg) {
+ case 0x080000 ... 0x085ffe:
+ case 0x0a0000 ... 0x0a7ffe:
+ case 0x0c0000 ... 0x0c1ffe:
+ case 0x0e0000 ... 0x0e1ffe:
+ case 0x100000 ... 0x10effe:
+ case 0x120000 ... 0x12bffe:
+ case 0x136000 ... 0x137ffe:
+ case 0x140000 ... 0x14bffe:
+ case 0x160000 ... 0x161ffe:
+ case 0x180000 ... 0x18effe:
+ case 0x1a0000 ... 0x1b1ffe:
+ case 0x1b6000 ... 0x1b7ffe:
+ case 0x1c0000 ... 0x1cbffe:
+ case 0x1e0000 ... 0x1e1ffe:
+ case 0x200000 ... 0x208ffe:
+ case 0x220000 ... 0x231ffe:
+ case 0x240000 ... 0x24bffe:
+ case 0x260000 ... 0x261ffe:
+ case 0x280000 ... 0x288ffe:
+ case 0x2a0000 ... 0x2a9ffe:
+ case 0x2c0000 ... 0x2c3ffe:
+ case 0x2e0000 ... 0x2e1ffe:
+ case 0x300000 ... 0x305ffe:
+ case 0x320000 ... 0x333ffe:
+ case 0x340000 ... 0x34bffe:
+ case 0x360000 ... 0x361ffe:
+ case 0x380000 ... 0x388ffe:
+ case 0x3a0000 ... 0x3a7ffe:
+ case 0x3c0000 ... 0x3c1ffe:
+ case 0x3e0000 ... 0x3e1ffe:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs47l85_16bit_readable_register(struct device *dev,
+ unsigned int reg)
+{
+ switch (reg) {
+ case MADERA_SOFTWARE_RESET:
+ case MADERA_HARDWARE_REVISION:
+ case MADERA_WRITE_SEQUENCER_CTRL_0:
+ case MADERA_WRITE_SEQUENCER_CTRL_1:
+ case MADERA_WRITE_SEQUENCER_CTRL_2:
+ case MADERA_TONE_GENERATOR_1:
+ case MADERA_TONE_GENERATOR_2:
+ case MADERA_TONE_GENERATOR_3:
+ case MADERA_TONE_GENERATOR_4:
+ case MADERA_TONE_GENERATOR_5:
+ case MADERA_PWM_DRIVE_1:
+ case MADERA_PWM_DRIVE_2:
+ case MADERA_PWM_DRIVE_3:
+ case MADERA_SAMPLE_RATE_SEQUENCE_SELECT_1:
+ case MADERA_SAMPLE_RATE_SEQUENCE_SELECT_2:
+ case MADERA_SAMPLE_RATE_SEQUENCE_SELECT_3:
+ case MADERA_SAMPLE_RATE_SEQUENCE_SELECT_4:
+ case MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1:
+ case MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2:
+ case MADERA_HAPTICS_CONTROL_1:
+ case MADERA_HAPTICS_CONTROL_2:
+ case MADERA_HAPTICS_PHASE_1_INTENSITY:
+ case MADERA_HAPTICS_PHASE_1_DURATION:
+ case MADERA_HAPTICS_PHASE_2_INTENSITY:
+ case MADERA_HAPTICS_PHASE_2_DURATION:
+ case MADERA_HAPTICS_PHASE_3_INTENSITY:
+ case MADERA_HAPTICS_PHASE_3_DURATION:
+ case MADERA_HAPTICS_STATUS:
+ case MADERA_COMFORT_NOISE_GENERATOR:
+ case MADERA_CLOCK_32K_1:
+ case MADERA_SYSTEM_CLOCK_1:
+ case MADERA_SAMPLE_RATE_1:
+ case MADERA_SAMPLE_RATE_2:
+ case MADERA_SAMPLE_RATE_3:
+ case MADERA_SAMPLE_RATE_1_STATUS:
+ case MADERA_SAMPLE_RATE_2_STATUS:
+ case MADERA_SAMPLE_RATE_3_STATUS:
+ case MADERA_ASYNC_CLOCK_1:
+ case MADERA_ASYNC_SAMPLE_RATE_1:
+ case MADERA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case MADERA_ASYNC_SAMPLE_RATE_2:
+ case MADERA_ASYNC_SAMPLE_RATE_2_STATUS:
+ case MADERA_DSP_CLOCK_1:
+ case MADERA_DSP_CLOCK_2:
+ case MADERA_OUTPUT_SYSTEM_CLOCK:
+ case MADERA_OUTPUT_ASYNC_CLOCK:
+ case MADERA_RATE_ESTIMATOR_1:
+ case MADERA_RATE_ESTIMATOR_2:
+ case MADERA_RATE_ESTIMATOR_3:
+ case MADERA_RATE_ESTIMATOR_4:
+ case MADERA_RATE_ESTIMATOR_5:
+ case MADERA_FLL1_CONTROL_1:
+ case MADERA_FLL1_CONTROL_2:
+ case MADERA_FLL1_CONTROL_3:
+ case MADERA_FLL1_CONTROL_4:
+ case MADERA_FLL1_CONTROL_5:
+ case MADERA_FLL1_CONTROL_6:
+ case MADERA_FLL1_CONTROL_7:
+ case MADERA_FLL1_LOOP_FILTER_TEST_1:
+ case MADERA_FLL1_SYNCHRONISER_1:
+ case MADERA_FLL1_SYNCHRONISER_2:
+ case MADERA_FLL1_SYNCHRONISER_3:
+ case MADERA_FLL1_SYNCHRONISER_4:
+ case MADERA_FLL1_SYNCHRONISER_5:
+ case MADERA_FLL1_SYNCHRONISER_6:
+ case MADERA_FLL1_SYNCHRONISER_7:
+ case MADERA_FLL1_SPREAD_SPECTRUM:
+ case MADERA_FLL1_GPIO_CLOCK:
+ case MADERA_FLL2_CONTROL_1:
+ case MADERA_FLL2_CONTROL_2:
+ case MADERA_FLL2_CONTROL_3:
+ case MADERA_FLL2_CONTROL_4:
+ case MADERA_FLL2_CONTROL_5:
+ case MADERA_FLL2_CONTROL_6:
+ case MADERA_FLL2_CONTROL_7:
+ case MADERA_FLL2_LOOP_FILTER_TEST_1:
+ case MADERA_FLL2_SYNCHRONISER_1:
+ case MADERA_FLL2_SYNCHRONISER_2:
+ case MADERA_FLL2_SYNCHRONISER_3:
+ case MADERA_FLL2_SYNCHRONISER_4:
+ case MADERA_FLL2_SYNCHRONISER_5:
+ case MADERA_FLL2_SYNCHRONISER_6:
+ case MADERA_FLL2_SYNCHRONISER_7:
+ case MADERA_FLL2_SPREAD_SPECTRUM:
+ case MADERA_FLL2_GPIO_CLOCK:
+ case MADERA_FLL3_CONTROL_1:
+ case MADERA_FLL3_CONTROL_2:
+ case MADERA_FLL3_CONTROL_3:
+ case MADERA_FLL3_CONTROL_4:
+ case MADERA_FLL3_CONTROL_5:
+ case MADERA_FLL3_CONTROL_6:
+ case MADERA_FLL3_CONTROL_7:
+ case MADERA_FLL3_LOOP_FILTER_TEST_1:
+ case MADERA_FLL3_SYNCHRONISER_1:
+ case MADERA_FLL3_SYNCHRONISER_2:
+ case MADERA_FLL3_SYNCHRONISER_3:
+ case MADERA_FLL3_SYNCHRONISER_4:
+ case MADERA_FLL3_SYNCHRONISER_5:
+ case MADERA_FLL3_SYNCHRONISER_6:
+ case MADERA_FLL3_SYNCHRONISER_7:
+ case MADERA_FLL3_SPREAD_SPECTRUM:
+ case MADERA_FLL3_GPIO_CLOCK:
+ case MADERA_MIC_CHARGE_PUMP_1:
+ case MADERA_HP_CHARGE_PUMP_8:
+ case MADERA_LDO1_CONTROL_1:
+ case MADERA_LDO2_CONTROL_1:
+ case MADERA_MIC_BIAS_CTRL_1:
+ case MADERA_MIC_BIAS_CTRL_2:
+ case MADERA_MIC_BIAS_CTRL_3:
+ case MADERA_MIC_BIAS_CTRL_4:
+ case MADERA_HP_CTRL_1L:
+ case MADERA_HP_CTRL_1R:
+ case MADERA_HP_CTRL_2L:
+ case MADERA_HP_CTRL_2R:
+ case MADERA_HP_CTRL_3L:
+ case MADERA_HP_CTRL_3R:
+ case MADERA_DCS_HP1L_CONTROL:
+ case MADERA_DCS_HP1R_CONTROL:
+ case MADERA_EDRE_HP_STEREO_CONTROL:
+ case MADERA_ACCESSORY_DETECT_MODE_1:
+ case MADERA_HEADPHONE_DETECT_1:
+ case MADERA_HEADPHONE_DETECT_2:
+ case MADERA_HEADPHONE_DETECT_3:
+ case MADERA_HEADPHONE_DETECT_5:
+ case MADERA_MICD_CLAMP_CONTROL:
+ case MADERA_MIC_DETECT_1_CONTROL_1:
+ case MADERA_MIC_DETECT_1_CONTROL_2:
+ case MADERA_MIC_DETECT_1_CONTROL_3:
+ case MADERA_MIC_DETECT_1_LEVEL_1:
+ case MADERA_MIC_DETECT_1_LEVEL_2:
+ case MADERA_MIC_DETECT_1_LEVEL_3:
+ case MADERA_MIC_DETECT_1_LEVEL_4:
+ case MADERA_MIC_DETECT_1_CONTROL_4:
+ case MADERA_GP_SWITCH_1:
+ case MADERA_JACK_DETECT_ANALOGUE:
+ case MADERA_INPUT_ENABLES:
+ case MADERA_INPUT_ENABLES_STATUS:
+ case MADERA_INPUT_RATE:
+ case MADERA_INPUT_VOLUME_RAMP:
+ case MADERA_HPF_CONTROL:
+ case MADERA_IN1L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_1L:
+ case MADERA_DMIC1L_CONTROL:
+ case MADERA_IN1R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_1R:
+ case MADERA_DMIC1R_CONTROL:
+ case MADERA_IN2L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_2L:
+ case MADERA_DMIC2L_CONTROL:
+ case MADERA_IN2R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_2R:
+ case MADERA_DMIC2R_CONTROL:
+ case MADERA_IN3L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_3L:
+ case MADERA_DMIC3L_CONTROL:
+ case MADERA_IN3R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_3R:
+ case MADERA_DMIC3R_CONTROL:
+ case MADERA_IN4L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_4L:
+ case MADERA_DMIC4L_CONTROL:
+ case MADERA_IN4R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_4R:
+ case MADERA_DMIC4R_CONTROL:
+ case MADERA_IN5L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_5L:
+ case MADERA_DMIC5L_CONTROL:
+ case MADERA_IN5R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_5R:
+ case MADERA_DMIC5R_CONTROL:
+ case MADERA_IN6L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_6L:
+ case MADERA_DMIC6L_CONTROL:
+ case MADERA_IN6R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_6R:
+ case MADERA_DMIC6R_CONTROL:
+ case MADERA_OUTPUT_ENABLES_1:
+ case MADERA_OUTPUT_STATUS_1:
+ case MADERA_RAW_OUTPUT_STATUS_1:
+ case MADERA_OUTPUT_RATE_1:
+ case MADERA_OUTPUT_VOLUME_RAMP:
+ case MADERA_OUTPUT_PATH_CONFIG_1L:
+ case MADERA_DAC_DIGITAL_VOLUME_1L:
+ case MADERA_NOISE_GATE_SELECT_1L:
+ case MADERA_OUTPUT_PATH_CONFIG_1R:
+ case MADERA_DAC_DIGITAL_VOLUME_1R:
+ case MADERA_NOISE_GATE_SELECT_1R:
+ case MADERA_OUTPUT_PATH_CONFIG_2L:
+ case MADERA_DAC_DIGITAL_VOLUME_2L:
+ case MADERA_NOISE_GATE_SELECT_2L:
+ case MADERA_OUTPUT_PATH_CONFIG_2R:
+ case MADERA_DAC_DIGITAL_VOLUME_2R:
+ case MADERA_NOISE_GATE_SELECT_2R:
+ case MADERA_OUTPUT_PATH_CONFIG_3L:
+ case MADERA_DAC_DIGITAL_VOLUME_3L:
+ case MADERA_NOISE_GATE_SELECT_3L:
+ case MADERA_OUTPUT_PATH_CONFIG_3R:
+ case MADERA_DAC_DIGITAL_VOLUME_3R:
+ case MADERA_NOISE_GATE_SELECT_3R:
+ case MADERA_OUTPUT_PATH_CONFIG_4L:
+ case MADERA_DAC_DIGITAL_VOLUME_4L:
+ case MADERA_NOISE_GATE_SELECT_4L:
+ case MADERA_OUTPUT_PATH_CONFIG_4R:
+ case MADERA_DAC_DIGITAL_VOLUME_4R:
+ case MADERA_NOISE_GATE_SELECT_4R:
+ case MADERA_OUTPUT_PATH_CONFIG_5L:
+ case MADERA_DAC_DIGITAL_VOLUME_5L:
+ case MADERA_NOISE_GATE_SELECT_5L:
+ case MADERA_OUTPUT_PATH_CONFIG_5R:
+ case MADERA_DAC_DIGITAL_VOLUME_5R:
+ case MADERA_NOISE_GATE_SELECT_5R:
+ case MADERA_OUTPUT_PATH_CONFIG_6L:
+ case MADERA_DAC_DIGITAL_VOLUME_6L:
+ case MADERA_NOISE_GATE_SELECT_6L:
+ case MADERA_OUTPUT_PATH_CONFIG_6R:
+ case MADERA_DAC_DIGITAL_VOLUME_6R:
+ case MADERA_NOISE_GATE_SELECT_6R:
+ case MADERA_DRE_ENABLE:
+ case MADERA_EDRE_ENABLE:
+ case MADERA_EDRE_MANUAL:
+ case MADERA_DAC_AEC_CONTROL_1:
+ case MADERA_DAC_AEC_CONTROL_2:
+ case MADERA_NOISE_GATE_CONTROL:
+ case MADERA_PDM_SPK1_CTRL_1:
+ case MADERA_PDM_SPK1_CTRL_2:
+ case MADERA_PDM_SPK2_CTRL_1:
+ case MADERA_PDM_SPK2_CTRL_2:
+ case MADERA_HP1_SHORT_CIRCUIT_CTRL:
+ case MADERA_HP2_SHORT_CIRCUIT_CTRL:
+ case MADERA_HP3_SHORT_CIRCUIT_CTRL:
+ case MADERA_HP_TEST_CTRL_5:
+ case MADERA_HP_TEST_CTRL_6:
+ case MADERA_AIF1_BCLK_CTRL:
+ case MADERA_AIF1_TX_PIN_CTRL:
+ case MADERA_AIF1_RX_PIN_CTRL:
+ case MADERA_AIF1_RATE_CTRL:
+ case MADERA_AIF1_FORMAT:
+ case MADERA_AIF1_RX_BCLK_RATE:
+ case MADERA_AIF1_FRAME_CTRL_1:
+ case MADERA_AIF1_FRAME_CTRL_2:
+ case MADERA_AIF1_FRAME_CTRL_3:
+ case MADERA_AIF1_FRAME_CTRL_4:
+ case MADERA_AIF1_FRAME_CTRL_5:
+ case MADERA_AIF1_FRAME_CTRL_6:
+ case MADERA_AIF1_FRAME_CTRL_7:
+ case MADERA_AIF1_FRAME_CTRL_8:
+ case MADERA_AIF1_FRAME_CTRL_9:
+ case MADERA_AIF1_FRAME_CTRL_10:
+ case MADERA_AIF1_FRAME_CTRL_11:
+ case MADERA_AIF1_FRAME_CTRL_12:
+ case MADERA_AIF1_FRAME_CTRL_13:
+ case MADERA_AIF1_FRAME_CTRL_14:
+ case MADERA_AIF1_FRAME_CTRL_15:
+ case MADERA_AIF1_FRAME_CTRL_16:
+ case MADERA_AIF1_FRAME_CTRL_17:
+ case MADERA_AIF1_FRAME_CTRL_18:
+ case MADERA_AIF1_TX_ENABLES:
+ case MADERA_AIF1_RX_ENABLES:
+ case MADERA_AIF2_BCLK_CTRL:
+ case MADERA_AIF2_TX_PIN_CTRL:
+ case MADERA_AIF2_RX_PIN_CTRL:
+ case MADERA_AIF2_RATE_CTRL:
+ case MADERA_AIF2_FORMAT:
+ case MADERA_AIF2_RX_BCLK_RATE:
+ case MADERA_AIF2_FRAME_CTRL_1:
+ case MADERA_AIF2_FRAME_CTRL_2:
+ case MADERA_AIF2_FRAME_CTRL_3:
+ case MADERA_AIF2_FRAME_CTRL_4:
+ case MADERA_AIF2_FRAME_CTRL_5:
+ case MADERA_AIF2_FRAME_CTRL_6:
+ case MADERA_AIF2_FRAME_CTRL_7:
+ case MADERA_AIF2_FRAME_CTRL_8:
+ case MADERA_AIF2_FRAME_CTRL_9:
+ case MADERA_AIF2_FRAME_CTRL_10:
+ case MADERA_AIF2_FRAME_CTRL_11:
+ case MADERA_AIF2_FRAME_CTRL_12:
+ case MADERA_AIF2_FRAME_CTRL_13:
+ case MADERA_AIF2_FRAME_CTRL_14:
+ case MADERA_AIF2_FRAME_CTRL_15:
+ case MADERA_AIF2_FRAME_CTRL_16:
+ case MADERA_AIF2_FRAME_CTRL_17:
+ case MADERA_AIF2_FRAME_CTRL_18:
+ case MADERA_AIF2_TX_ENABLES:
+ case MADERA_AIF2_RX_ENABLES:
+ case MADERA_AIF3_BCLK_CTRL:
+ case MADERA_AIF3_TX_PIN_CTRL:
+ case MADERA_AIF3_RX_PIN_CTRL:
+ case MADERA_AIF3_RATE_CTRL:
+ case MADERA_AIF3_FORMAT:
+ case MADERA_AIF3_RX_BCLK_RATE:
+ case MADERA_AIF3_FRAME_CTRL_1:
+ case MADERA_AIF3_FRAME_CTRL_2:
+ case MADERA_AIF3_FRAME_CTRL_3:
+ case MADERA_AIF3_FRAME_CTRL_4:
+ case MADERA_AIF3_FRAME_CTRL_11:
+ case MADERA_AIF3_FRAME_CTRL_12:
+ case MADERA_AIF3_TX_ENABLES:
+ case MADERA_AIF3_RX_ENABLES:
+ case MADERA_AIF4_BCLK_CTRL:
+ case MADERA_AIF4_TX_PIN_CTRL:
+ case MADERA_AIF4_RX_PIN_CTRL:
+ case MADERA_AIF4_RATE_CTRL:
+ case MADERA_AIF4_FORMAT:
+ case MADERA_AIF4_RX_BCLK_RATE:
+ case MADERA_AIF4_FRAME_CTRL_1:
+ case MADERA_AIF4_FRAME_CTRL_2:
+ case MADERA_AIF4_FRAME_CTRL_3:
+ case MADERA_AIF4_FRAME_CTRL_4:
+ case MADERA_AIF4_FRAME_CTRL_11:
+ case MADERA_AIF4_FRAME_CTRL_12:
+ case MADERA_AIF4_TX_ENABLES:
+ case MADERA_AIF4_RX_ENABLES:
+ case MADERA_SPD1_TX_CONTROL:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_1:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_2:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_3:
+ case MADERA_SLIMBUS_FRAMER_REF_GEAR:
+ case MADERA_SLIMBUS_RATES_1:
+ case MADERA_SLIMBUS_RATES_2:
+ case MADERA_SLIMBUS_RATES_3:
+ case MADERA_SLIMBUS_RATES_4:
+ case MADERA_SLIMBUS_RATES_5:
+ case MADERA_SLIMBUS_RATES_6:
+ case MADERA_SLIMBUS_RATES_7:
+ case MADERA_SLIMBUS_RATES_8:
+ case MADERA_SLIMBUS_RX_CHANNEL_ENABLE:
+ case MADERA_SLIMBUS_TX_CHANNEL_ENABLE:
+ case MADERA_SLIMBUS_RX_PORT_STATUS:
+ case MADERA_SLIMBUS_TX_PORT_STATUS:
+ case MADERA_PWM1MIX_INPUT_1_SOURCE:
+ case MADERA_PWM1MIX_INPUT_1_VOLUME:
+ case MADERA_PWM1MIX_INPUT_2_SOURCE:
+ case MADERA_PWM1MIX_INPUT_2_VOLUME:
+ case MADERA_PWM1MIX_INPUT_3_SOURCE:
+ case MADERA_PWM1MIX_INPUT_3_VOLUME:
+ case MADERA_PWM1MIX_INPUT_4_SOURCE:
+ case MADERA_PWM1MIX_INPUT_4_VOLUME:
+ case MADERA_PWM2MIX_INPUT_1_SOURCE:
+ case MADERA_PWM2MIX_INPUT_1_VOLUME:
+ case MADERA_PWM2MIX_INPUT_2_SOURCE:
+ case MADERA_PWM2MIX_INPUT_2_VOLUME:
+ case MADERA_PWM2MIX_INPUT_3_SOURCE:
+ case MADERA_PWM2MIX_INPUT_3_VOLUME:
+ case MADERA_PWM2MIX_INPUT_4_SOURCE:
+ case MADERA_PWM2MIX_INPUT_4_VOLUME:
+ case MADERA_OUT1LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT1LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT1LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT1LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT1LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT1LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT1LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT1LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT1RMIX_INPUT_1_SOURCE:
+ case MADERA_OUT1RMIX_INPUT_1_VOLUME:
+ case MADERA_OUT1RMIX_INPUT_2_SOURCE:
+ case MADERA_OUT1RMIX_INPUT_2_VOLUME:
+ case MADERA_OUT1RMIX_INPUT_3_SOURCE:
+ case MADERA_OUT1RMIX_INPUT_3_VOLUME:
+ case MADERA_OUT1RMIX_INPUT_4_SOURCE:
+ case MADERA_OUT1RMIX_INPUT_4_VOLUME:
+ case MADERA_OUT2LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT2LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT2LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT2LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT2LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT2LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT2LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT2LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT2RMIX_INPUT_1_SOURCE:
+ case MADERA_OUT2RMIX_INPUT_1_VOLUME:
+ case MADERA_OUT2RMIX_INPUT_2_SOURCE:
+ case MADERA_OUT2RMIX_INPUT_2_VOLUME:
+ case MADERA_OUT2RMIX_INPUT_3_SOURCE:
+ case MADERA_OUT2RMIX_INPUT_3_VOLUME:
+ case MADERA_OUT2RMIX_INPUT_4_SOURCE:
+ case MADERA_OUT2RMIX_INPUT_4_VOLUME:
+ case MADERA_OUT3LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT3LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT3LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT3LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT3LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT3LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT3LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT3LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT3RMIX_INPUT_1_SOURCE:
+ case MADERA_OUT3RMIX_INPUT_1_VOLUME:
+ case MADERA_OUT3RMIX_INPUT_2_SOURCE:
+ case MADERA_OUT3RMIX_INPUT_2_VOLUME:
+ case MADERA_OUT3RMIX_INPUT_3_SOURCE:
+ case MADERA_OUT3RMIX_INPUT_3_VOLUME:
+ case MADERA_OUT3RMIX_INPUT_4_SOURCE:
+ case MADERA_OUT3RMIX_INPUT_4_VOLUME:
+ case MADERA_OUT4LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT4LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT4LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT4LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT4LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT4LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT4LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT4LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT4RMIX_INPUT_1_SOURCE:
+ case MADERA_OUT4RMIX_INPUT_1_VOLUME:
+ case MADERA_OUT4RMIX_INPUT_2_SOURCE:
+ case MADERA_OUT4RMIX_INPUT_2_VOLUME:
+ case MADERA_OUT4RMIX_INPUT_3_SOURCE:
+ case MADERA_OUT4RMIX_INPUT_3_VOLUME:
+ case MADERA_OUT4RMIX_INPUT_4_SOURCE:
+ case MADERA_OUT4RMIX_INPUT_4_VOLUME:
+ case MADERA_OUT5LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT5LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT5LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT5LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT5LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT5LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT5LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT5LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT5RMIX_INPUT_1_SOURCE:
+ case MADERA_OUT5RMIX_INPUT_1_VOLUME:
+ case MADERA_OUT5RMIX_INPUT_2_SOURCE:
+ case MADERA_OUT5RMIX_INPUT_2_VOLUME:
+ case MADERA_OUT5RMIX_INPUT_3_SOURCE:
+ case MADERA_OUT5RMIX_INPUT_3_VOLUME:
+ case MADERA_OUT5RMIX_INPUT_4_SOURCE:
+ case MADERA_OUT5RMIX_INPUT_4_VOLUME:
+ case MADERA_OUT6LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT6LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT6LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT6LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT6LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT6LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT6LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT6LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT6RMIX_INPUT_1_SOURCE:
+ case MADERA_OUT6RMIX_INPUT_1_VOLUME:
+ case MADERA_OUT6RMIX_INPUT_2_SOURCE:
+ case MADERA_OUT6RMIX_INPUT_2_VOLUME:
+ case MADERA_OUT6RMIX_INPUT_3_SOURCE:
+ case MADERA_OUT6RMIX_INPUT_3_VOLUME:
+ case MADERA_OUT6RMIX_INPUT_4_SOURCE:
+ case MADERA_OUT6RMIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX1MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX1MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX1MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX1MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX1MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX1MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX1MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX1MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX2MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX2MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX2MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX2MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX2MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX2MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX2MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX2MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX3MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX3MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX3MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX3MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX3MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX3MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX3MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX3MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX4MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX4MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX4MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX4MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX4MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX4MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX4MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX4MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX5MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX5MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX5MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX5MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX5MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX5MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX5MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX5MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX6MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX6MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX6MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX6MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX6MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX6MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX6MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX6MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX7MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX7MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX7MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX7MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX7MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX7MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX7MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX7MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX8MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX8MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX8MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX8MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX8MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX8MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX8MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX8MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX1MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX1MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX1MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX1MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX1MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX1MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX1MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX1MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX2MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX2MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX2MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX2MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX2MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX2MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX2MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX2MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX3MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX3MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX3MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX3MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX3MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX3MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX3MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX3MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX4MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX4MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX4MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX4MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX4MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX4MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX4MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX4MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX5MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX5MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX5MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX5MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX5MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX5MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX5MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX5MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX6MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX6MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX6MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX6MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX6MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX6MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX6MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX6MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX7MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX7MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX7MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX7MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX7MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX7MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX7MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX7MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX8MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX8MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX8MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX8MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX8MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX8MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX8MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX8MIX_INPUT_4_VOLUME:
+ case MADERA_AIF3TX1MIX_INPUT_1_SOURCE:
+ case MADERA_AIF3TX1MIX_INPUT_1_VOLUME:
+ case MADERA_AIF3TX1MIX_INPUT_2_SOURCE:
+ case MADERA_AIF3TX1MIX_INPUT_2_VOLUME:
+ case MADERA_AIF3TX1MIX_INPUT_3_SOURCE:
+ case MADERA_AIF3TX1MIX_INPUT_3_VOLUME:
+ case MADERA_AIF3TX1MIX_INPUT_4_SOURCE:
+ case MADERA_AIF3TX1MIX_INPUT_4_VOLUME:
+ case MADERA_AIF3TX2MIX_INPUT_1_SOURCE:
+ case MADERA_AIF3TX2MIX_INPUT_1_VOLUME:
+ case MADERA_AIF3TX2MIX_INPUT_2_SOURCE:
+ case MADERA_AIF3TX2MIX_INPUT_2_VOLUME:
+ case MADERA_AIF3TX2MIX_INPUT_3_SOURCE:
+ case MADERA_AIF3TX2MIX_INPUT_3_VOLUME:
+ case MADERA_AIF3TX2MIX_INPUT_4_SOURCE:
+ case MADERA_AIF3TX2MIX_INPUT_4_VOLUME:
+ case MADERA_AIF4TX1MIX_INPUT_1_SOURCE:
+ case MADERA_AIF4TX1MIX_INPUT_1_VOLUME:
+ case MADERA_AIF4TX1MIX_INPUT_2_SOURCE:
+ case MADERA_AIF4TX1MIX_INPUT_2_VOLUME:
+ case MADERA_AIF4TX1MIX_INPUT_3_SOURCE:
+ case MADERA_AIF4TX1MIX_INPUT_3_VOLUME:
+ case MADERA_AIF4TX1MIX_INPUT_4_SOURCE:
+ case MADERA_AIF4TX1MIX_INPUT_4_VOLUME:
+ case MADERA_AIF4TX2MIX_INPUT_1_SOURCE:
+ case MADERA_AIF4TX2MIX_INPUT_1_VOLUME:
+ case MADERA_AIF4TX2MIX_INPUT_2_SOURCE:
+ case MADERA_AIF4TX2MIX_INPUT_2_VOLUME:
+ case MADERA_AIF4TX2MIX_INPUT_3_SOURCE:
+ case MADERA_AIF4TX2MIX_INPUT_3_VOLUME:
+ case MADERA_AIF4TX2MIX_INPUT_4_SOURCE:
+ case MADERA_AIF4TX2MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX1MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX1MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX1MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX1MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX1MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX1MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX1MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX1MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX2MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX2MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX2MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX2MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX2MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX2MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX2MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX2MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX3MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX3MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX3MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX3MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX3MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX3MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX3MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX3MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX4MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX4MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX4MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX4MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX4MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX4MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX4MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX4MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX5MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX5MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX5MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX5MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX5MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX5MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX5MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX5MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX6MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX6MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX6MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX6MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX6MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX6MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX6MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX6MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX7MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX7MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX7MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX7MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX7MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX7MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX7MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX7MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX8MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX8MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX8MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX8MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX8MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX8MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX8MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX8MIX_INPUT_4_VOLUME:
+ case MADERA_SPDIF1TX1MIX_INPUT_1_SOURCE:
+ case MADERA_SPDIF1TX1MIX_INPUT_1_VOLUME:
+ case MADERA_SPDIF1TX2MIX_INPUT_1_SOURCE:
+ case MADERA_SPDIF1TX2MIX_INPUT_1_VOLUME:
+ case MADERA_EQ1MIX_INPUT_1_SOURCE:
+ case MADERA_EQ1MIX_INPUT_1_VOLUME:
+ case MADERA_EQ1MIX_INPUT_2_SOURCE:
+ case MADERA_EQ1MIX_INPUT_2_VOLUME:
+ case MADERA_EQ1MIX_INPUT_3_SOURCE:
+ case MADERA_EQ1MIX_INPUT_3_VOLUME:
+ case MADERA_EQ1MIX_INPUT_4_SOURCE:
+ case MADERA_EQ1MIX_INPUT_4_VOLUME:
+ case MADERA_EQ2MIX_INPUT_1_SOURCE:
+ case MADERA_EQ2MIX_INPUT_1_VOLUME:
+ case MADERA_EQ2MIX_INPUT_2_SOURCE:
+ case MADERA_EQ2MIX_INPUT_2_VOLUME:
+ case MADERA_EQ2MIX_INPUT_3_SOURCE:
+ case MADERA_EQ2MIX_INPUT_3_VOLUME:
+ case MADERA_EQ2MIX_INPUT_4_SOURCE:
+ case MADERA_EQ2MIX_INPUT_4_VOLUME:
+ case MADERA_EQ3MIX_INPUT_1_SOURCE:
+ case MADERA_EQ3MIX_INPUT_1_VOLUME:
+ case MADERA_EQ3MIX_INPUT_2_SOURCE:
+ case MADERA_EQ3MIX_INPUT_2_VOLUME:
+ case MADERA_EQ3MIX_INPUT_3_SOURCE:
+ case MADERA_EQ3MIX_INPUT_3_VOLUME:
+ case MADERA_EQ3MIX_INPUT_4_SOURCE:
+ case MADERA_EQ3MIX_INPUT_4_VOLUME:
+ case MADERA_EQ4MIX_INPUT_1_SOURCE:
+ case MADERA_EQ4MIX_INPUT_1_VOLUME:
+ case MADERA_EQ4MIX_INPUT_2_SOURCE:
+ case MADERA_EQ4MIX_INPUT_2_VOLUME:
+ case MADERA_EQ4MIX_INPUT_3_SOURCE:
+ case MADERA_EQ4MIX_INPUT_3_VOLUME:
+ case MADERA_EQ4MIX_INPUT_4_SOURCE:
+ case MADERA_EQ4MIX_INPUT_4_VOLUME:
+ case MADERA_DRC1LMIX_INPUT_1_SOURCE:
+ case MADERA_DRC1LMIX_INPUT_1_VOLUME:
+ case MADERA_DRC1LMIX_INPUT_2_SOURCE:
+ case MADERA_DRC1LMIX_INPUT_2_VOLUME:
+ case MADERA_DRC1LMIX_INPUT_3_SOURCE:
+ case MADERA_DRC1LMIX_INPUT_3_VOLUME:
+ case MADERA_DRC1LMIX_INPUT_4_SOURCE:
+ case MADERA_DRC1LMIX_INPUT_4_VOLUME:
+ case MADERA_DRC1RMIX_INPUT_1_SOURCE:
+ case MADERA_DRC1RMIX_INPUT_1_VOLUME:
+ case MADERA_DRC1RMIX_INPUT_2_SOURCE:
+ case MADERA_DRC1RMIX_INPUT_2_VOLUME:
+ case MADERA_DRC1RMIX_INPUT_3_SOURCE:
+ case MADERA_DRC1RMIX_INPUT_3_VOLUME:
+ case MADERA_DRC1RMIX_INPUT_4_SOURCE:
+ case MADERA_DRC1RMIX_INPUT_4_VOLUME:
+ case MADERA_DRC2LMIX_INPUT_1_SOURCE:
+ case MADERA_DRC2LMIX_INPUT_1_VOLUME:
+ case MADERA_DRC2LMIX_INPUT_2_SOURCE:
+ case MADERA_DRC2LMIX_INPUT_2_VOLUME:
+ case MADERA_DRC2LMIX_INPUT_3_SOURCE:
+ case MADERA_DRC2LMIX_INPUT_3_VOLUME:
+ case MADERA_DRC2LMIX_INPUT_4_SOURCE:
+ case MADERA_DRC2LMIX_INPUT_4_VOLUME:
+ case MADERA_DRC2RMIX_INPUT_1_SOURCE:
+ case MADERA_DRC2RMIX_INPUT_1_VOLUME:
+ case MADERA_DRC2RMIX_INPUT_2_SOURCE:
+ case MADERA_DRC2RMIX_INPUT_2_VOLUME:
+ case MADERA_DRC2RMIX_INPUT_3_SOURCE:
+ case MADERA_DRC2RMIX_INPUT_3_VOLUME:
+ case MADERA_DRC2RMIX_INPUT_4_SOURCE:
+ case MADERA_DRC2RMIX_INPUT_4_VOLUME:
+ case MADERA_HPLP1MIX_INPUT_1_SOURCE:
+ case MADERA_HPLP1MIX_INPUT_1_VOLUME:
+ case MADERA_HPLP1MIX_INPUT_2_SOURCE:
+ case MADERA_HPLP1MIX_INPUT_2_VOLUME:
+ case MADERA_HPLP1MIX_INPUT_3_SOURCE:
+ case MADERA_HPLP1MIX_INPUT_3_VOLUME:
+ case MADERA_HPLP1MIX_INPUT_4_SOURCE:
+ case MADERA_HPLP1MIX_INPUT_4_VOLUME:
+ case MADERA_HPLP2MIX_INPUT_1_SOURCE:
+ case MADERA_HPLP2MIX_INPUT_1_VOLUME:
+ case MADERA_HPLP2MIX_INPUT_2_SOURCE:
+ case MADERA_HPLP2MIX_INPUT_2_VOLUME:
+ case MADERA_HPLP2MIX_INPUT_3_SOURCE:
+ case MADERA_HPLP2MIX_INPUT_3_VOLUME:
+ case MADERA_HPLP2MIX_INPUT_4_SOURCE:
+ case MADERA_HPLP2MIX_INPUT_4_VOLUME:
+ case MADERA_HPLP3MIX_INPUT_1_SOURCE:
+ case MADERA_HPLP3MIX_INPUT_1_VOLUME:
+ case MADERA_HPLP3MIX_INPUT_2_SOURCE:
+ case MADERA_HPLP3MIX_INPUT_2_VOLUME:
+ case MADERA_HPLP3MIX_INPUT_3_SOURCE:
+ case MADERA_HPLP3MIX_INPUT_3_VOLUME:
+ case MADERA_HPLP3MIX_INPUT_4_SOURCE:
+ case MADERA_HPLP3MIX_INPUT_4_VOLUME:
+ case MADERA_HPLP4MIX_INPUT_1_SOURCE:
+ case MADERA_HPLP4MIX_INPUT_1_VOLUME:
+ case MADERA_HPLP4MIX_INPUT_2_SOURCE:
+ case MADERA_HPLP4MIX_INPUT_2_VOLUME:
+ case MADERA_HPLP4MIX_INPUT_3_SOURCE:
+ case MADERA_HPLP4MIX_INPUT_3_VOLUME:
+ case MADERA_HPLP4MIX_INPUT_4_SOURCE:
+ case MADERA_HPLP4MIX_INPUT_4_VOLUME:
+ case MADERA_DSP1LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP1LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP1LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP1LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP1LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP1LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP1LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP1LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP1RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP1RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP1RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP1RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP1RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP1RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP1RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP1RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP1AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP2LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP2LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP2LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP2RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP2RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP2RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP2RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP2RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP2RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP2RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP2RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP2AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP3LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP3LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP3LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP3RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP3RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP3RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP3RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP3RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP3RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP3RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP3RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP3AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DSP4LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP4LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP4LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP4LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP4LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP4LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP4LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP4LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP4RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP4RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP4RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP4RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP4RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP4RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP4RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP4RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP4AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP4AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP4AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP4AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP4AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP4AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DSP5LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP5LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP5LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP5LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP5LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP5LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP5LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP5LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP5RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP5RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP5RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP5RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP5RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP5RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP5RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP5RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP5AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP5AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP5AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP5AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP5AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP5AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_ASRC1_1LMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC1_1RMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC1_2LMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC1_2RMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC2_1LMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC2_1RMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC2_2LMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC2_2RMIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1DEC1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1DEC2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1DEC3MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1DEC4MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1INT1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1INT2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1INT3MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1INT4MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2DEC1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2DEC2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2DEC3MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2DEC4MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2INT1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2INT2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2INT3MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2INT4MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC3DEC1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC3DEC2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC3INT1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC3INT2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC4DEC1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC4DEC2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC4INT1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC4INT2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP6LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP6LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP6LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP6LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP6LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP6LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP6LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP6LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP6RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP6RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP6RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP6RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP6RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP6RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP6RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP6RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP6AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP6AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP6AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP6AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP6AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP6AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DSP7LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP7LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP7LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP7LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP7LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP7LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP7LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP7LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP7RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP7RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP7RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP7RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP7RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP7RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP7RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP7RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP7AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP7AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP7AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP7AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP7AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP7AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_FX_CTRL1:
+ case MADERA_FX_CTRL2:
+ case MADERA_EQ1_1 ... MADERA_EQ1_21:
+ case MADERA_EQ2_1 ... MADERA_EQ2_21:
+ case MADERA_EQ3_1 ... MADERA_EQ3_21:
+ case MADERA_EQ4_1 ... MADERA_EQ4_21:
+ case MADERA_DRC1_CTRL1:
+ case MADERA_DRC1_CTRL2:
+ case MADERA_DRC1_CTRL3:
+ case MADERA_DRC1_CTRL4:
+ case MADERA_DRC1_CTRL5:
+ case MADERA_DRC2_CTRL1:
+ case MADERA_DRC2_CTRL2:
+ case MADERA_DRC2_CTRL3:
+ case MADERA_DRC2_CTRL4:
+ case MADERA_DRC2_CTRL5:
+ case MADERA_HPLPF1_1:
+ case MADERA_HPLPF1_2:
+ case MADERA_HPLPF2_1:
+ case MADERA_HPLPF2_2:
+ case MADERA_HPLPF3_1:
+ case MADERA_HPLPF3_2:
+ case MADERA_HPLPF4_1:
+ case MADERA_HPLPF4_2:
+ case MADERA_ASRC1_ENABLE:
+ case MADERA_ASRC1_STATUS:
+ case MADERA_ASRC1_RATE1:
+ case MADERA_ASRC1_RATE2:
+ case MADERA_ASRC2_ENABLE:
+ case MADERA_ASRC2_STATUS:
+ case MADERA_ASRC2_RATE1:
+ case MADERA_ASRC2_RATE2:
+ case MADERA_ISRC_1_CTRL_1:
+ case MADERA_ISRC_1_CTRL_2:
+ case MADERA_ISRC_1_CTRL_3:
+ case MADERA_ISRC_2_CTRL_1:
+ case MADERA_ISRC_2_CTRL_2:
+ case MADERA_ISRC_2_CTRL_3:
+ case MADERA_ISRC_3_CTRL_1:
+ case MADERA_ISRC_3_CTRL_2:
+ case MADERA_ISRC_3_CTRL_3:
+ case MADERA_ISRC_4_CTRL_1:
+ case MADERA_ISRC_4_CTRL_2:
+ case MADERA_ISRC_4_CTRL_3:
+ case MADERA_CLOCK_CONTROL:
+ case MADERA_ANC_SRC:
+ case MADERA_DSP_STATUS:
+ case MADERA_ANC_COEFF_START ... MADERA_ANC_COEFF_END:
+ case MADERA_FCL_FILTER_CONTROL:
+ case MADERA_FCL_ADC_REFORMATTER_CONTROL:
+ case MADERA_FCL_COEFF_START ... MADERA_FCL_COEFF_END:
+ case MADERA_FCR_FILTER_CONTROL:
+ case MADERA_FCR_ADC_REFORMATTER_CONTROL:
+ case MADERA_FCR_COEFF_START ... MADERA_FCR_COEFF_END:
+ case MADERA_DAC_COMP_1:
+ case MADERA_DAC_COMP_2:
+ case MADERA_FRF_COEFFICIENT_1L_1:
+ case MADERA_FRF_COEFFICIENT_1L_2:
+ case MADERA_FRF_COEFFICIENT_1L_3:
+ case MADERA_FRF_COEFFICIENT_1L_4:
+ case MADERA_FRF_COEFFICIENT_1R_1:
+ case MADERA_FRF_COEFFICIENT_1R_2:
+ case MADERA_FRF_COEFFICIENT_1R_3:
+ case MADERA_FRF_COEFFICIENT_1R_4:
+ case MADERA_FRF_COEFFICIENT_2L_1:
+ case MADERA_FRF_COEFFICIENT_2L_2:
+ case MADERA_FRF_COEFFICIENT_2L_3:
+ case MADERA_FRF_COEFFICIENT_2L_4:
+ case MADERA_FRF_COEFFICIENT_2R_1:
+ case MADERA_FRF_COEFFICIENT_2R_2:
+ case MADERA_FRF_COEFFICIENT_2R_3:
+ case MADERA_FRF_COEFFICIENT_2R_4:
+ case MADERA_FRF_COEFFICIENT_3L_1:
+ case MADERA_FRF_COEFFICIENT_3L_2:
+ case MADERA_FRF_COEFFICIENT_3L_3:
+ case MADERA_FRF_COEFFICIENT_3L_4:
+ case MADERA_FRF_COEFFICIENT_3R_1:
+ case MADERA_FRF_COEFFICIENT_3R_2:
+ case MADERA_FRF_COEFFICIENT_3R_3:
+ case MADERA_FRF_COEFFICIENT_3R_4:
+ case MADERA_FRF_COEFFICIENT_4L_1:
+ case MADERA_FRF_COEFFICIENT_4L_2:
+ case MADERA_FRF_COEFFICIENT_4L_3:
+ case MADERA_FRF_COEFFICIENT_4L_4:
+ case MADERA_FRF_COEFFICIENT_4R_1:
+ case MADERA_FRF_COEFFICIENT_4R_2:
+ case MADERA_FRF_COEFFICIENT_4R_3:
+ case MADERA_FRF_COEFFICIENT_4R_4:
+ case MADERA_FRF_COEFFICIENT_5L_1:
+ case MADERA_FRF_COEFFICIENT_5L_2:
+ case MADERA_FRF_COEFFICIENT_5L_3:
+ case MADERA_FRF_COEFFICIENT_5L_4:
+ case MADERA_FRF_COEFFICIENT_5R_1:
+ case MADERA_FRF_COEFFICIENT_5R_2:
+ case MADERA_FRF_COEFFICIENT_5R_3:
+ case MADERA_FRF_COEFFICIENT_5R_4:
+ case MADERA_FRF_COEFFICIENT_6L_1:
+ case MADERA_FRF_COEFFICIENT_6L_2:
+ case MADERA_FRF_COEFFICIENT_6L_3:
+ case MADERA_FRF_COEFFICIENT_6L_4:
+ case MADERA_FRF_COEFFICIENT_6R_1:
+ case MADERA_FRF_COEFFICIENT_6R_2:
+ case MADERA_FRF_COEFFICIENT_6R_3:
+ case MADERA_FRF_COEFFICIENT_6R_4:
+ case MADERA_GPIO1_CTRL_1 ... MADERA_GPIO40_CTRL_2:
+ case MADERA_IRQ1_STATUS_1 ... MADERA_IRQ1_STATUS_33:
+ case MADERA_IRQ1_MASK_1 ... MADERA_IRQ1_MASK_33:
+ case MADERA_IRQ1_RAW_STATUS_1 ... MADERA_IRQ1_RAW_STATUS_33:
+ case MADERA_INTERRUPT_DEBOUNCE_7:
+ case MADERA_IRQ1_CTRL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs47l85_16bit_volatile_register(struct device *dev,
+ unsigned int reg)
+{
+ switch (reg) {
+ case MADERA_SOFTWARE_RESET:
+ case MADERA_HARDWARE_REVISION:
+ case MADERA_WRITE_SEQUENCER_CTRL_0:
+ case MADERA_WRITE_SEQUENCER_CTRL_1:
+ case MADERA_WRITE_SEQUENCER_CTRL_2:
+ case MADERA_HAPTICS_STATUS:
+ case MADERA_SAMPLE_RATE_1_STATUS:
+ case MADERA_SAMPLE_RATE_2_STATUS:
+ case MADERA_SAMPLE_RATE_3_STATUS:
+ case MADERA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case MADERA_ASYNC_SAMPLE_RATE_2_STATUS:
+ case MADERA_HP_CTRL_1L:
+ case MADERA_HP_CTRL_1R:
+ case MADERA_HP_CTRL_2L:
+ case MADERA_HP_CTRL_2R:
+ case MADERA_HP_CTRL_3L:
+ case MADERA_HP_CTRL_3R:
+ case MADERA_DCS_HP1L_CONTROL:
+ case MADERA_DCS_HP1R_CONTROL:
+ case MADERA_MIC_DETECT_1_CONTROL_3:
+ case MADERA_MIC_DETECT_1_CONTROL_4:
+ case MADERA_HEADPHONE_DETECT_2:
+ case MADERA_HEADPHONE_DETECT_3:
+ case MADERA_HEADPHONE_DETECT_5:
+ case MADERA_INPUT_ENABLES_STATUS:
+ case MADERA_OUTPUT_STATUS_1:
+ case MADERA_RAW_OUTPUT_STATUS_1:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_1:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_2:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_3:
+ case MADERA_SLIMBUS_RX_PORT_STATUS:
+ case MADERA_SLIMBUS_TX_PORT_STATUS:
+ case MADERA_FX_CTRL2:
+ case MADERA_ASRC2_STATUS:
+ case MADERA_ASRC1_STATUS:
+ case MADERA_CLOCK_CONTROL:
+ case MADERA_IRQ1_STATUS_1 ...MADERA_IRQ1_STATUS_33:
+ case MADERA_IRQ1_RAW_STATUS_1 ... MADERA_IRQ1_RAW_STATUS_33:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs47l85_32bit_readable_register(struct device *dev,
+ unsigned int reg)
+{
+ switch (reg) {
+ case MADERA_WSEQ_SEQUENCE_1 ... MADERA_WSEQ_SEQUENCE_508:
+ case CS47L85_OTP_HPDET_CAL_1 ... CS47L85_OTP_HPDET_CAL_2:
+ case MADERA_DSP1_CONFIG_1 ... MADERA_DSP1_SCRATCH_2:
+ case MADERA_DSP2_CONFIG_1 ... MADERA_DSP2_SCRATCH_2:
+ case MADERA_DSP3_CONFIG_1 ... MADERA_DSP3_SCRATCH_2:
+ case MADERA_DSP4_CONFIG_1 ... MADERA_DSP4_SCRATCH_2:
+ case MADERA_DSP5_CONFIG_1 ... MADERA_DSP5_SCRATCH_2:
+ case MADERA_DSP6_CONFIG_1 ... MADERA_DSP6_SCRATCH_2:
+ case MADERA_DSP7_CONFIG_1 ... MADERA_DSP7_SCRATCH_2:
+ return true;
+ default:
+ return cs47l85_is_adsp_memory(reg);
+ }
+}
+
+static bool cs47l85_32bit_volatile_register(struct device *dev,
+ unsigned int reg)
+{
+ switch (reg) {
+ case MADERA_WSEQ_SEQUENCE_1 ... MADERA_WSEQ_SEQUENCE_508:
+ case CS47L85_OTP_HPDET_CAL_1 ... CS47L85_OTP_HPDET_CAL_2:
+ case MADERA_DSP1_CONFIG_1 ... MADERA_DSP1_SCRATCH_2:
+ case MADERA_DSP2_CONFIG_1 ... MADERA_DSP2_SCRATCH_2:
+ case MADERA_DSP3_CONFIG_1 ... MADERA_DSP3_SCRATCH_2:
+ case MADERA_DSP4_CONFIG_1 ... MADERA_DSP4_SCRATCH_2:
+ case MADERA_DSP5_CONFIG_1 ... MADERA_DSP5_SCRATCH_2:
+ case MADERA_DSP6_CONFIG_1 ... MADERA_DSP6_SCRATCH_2:
+ case MADERA_DSP7_CONFIG_1 ... MADERA_DSP7_SCRATCH_2:
+ return true;
+ default:
+ return cs47l85_is_adsp_memory(reg);
+ }
+}
+
+const struct regmap_config cs47l85_16bit_spi_regmap = {
+ .name = "cs47l85_16bit",
+ .reg_bits = 32,
+ .pad_bits = 16,
+ .val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = 0x2fff,
+ .readable_reg = cs47l85_16bit_readable_register,
+ .volatile_reg = cs47l85_16bit_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = cs47l85_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(cs47l85_reg_default),
+};
+EXPORT_SYMBOL_GPL(cs47l85_16bit_spi_regmap);
+
+const struct regmap_config cs47l85_16bit_i2c_regmap = {
+ .name = "cs47l85_16bit",
+ .reg_bits = 32,
+ .val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = 0x2fff,
+ .readable_reg = cs47l85_16bit_readable_register,
+ .volatile_reg = cs47l85_16bit_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = cs47l85_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(cs47l85_reg_default),
+};
+EXPORT_SYMBOL_GPL(cs47l85_16bit_i2c_regmap);
+
+const struct regmap_config cs47l85_32bit_spi_regmap = {
+ .name = "cs47l85_32bit",
+ .reg_bits = 32,
+ .reg_stride = 2,
+ .pad_bits = 16,
+ .val_bits = 32,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = MADERA_DSP7_SCRATCH_2,
+ .readable_reg = cs47l85_32bit_readable_register,
+ .volatile_reg = cs47l85_32bit_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_GPL(cs47l85_32bit_spi_regmap);
+
+const struct regmap_config cs47l85_32bit_i2c_regmap = {
+ .name = "cs47l85_32bit",
+ .reg_bits = 32,
+ .reg_stride = 2,
+ .val_bits = 32,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = MADERA_DSP7_SCRATCH_2,
+ .readable_reg = cs47l85_32bit_readable_register,
+ .volatile_reg = cs47l85_32bit_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_GPL(cs47l85_32bit_i2c_regmap);
diff --git a/drivers/mfd/cs47l90-tables.c b/drivers/mfd/cs47l90-tables.c
new file mode 100644
index 000000000000..77207d98f0cc
--- /dev/null
+++ b/drivers/mfd/cs47l90-tables.c
@@ -0,0 +1,2674 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Regmap tables for CS47L90 codec
+ *
+ * Copyright (C) 2015-2017 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/madera/core.h>
+#include <linux/mfd/madera/registers.h>
+
+#include "madera.h"
+
+static const struct reg_sequence cs47l90_reva_16_patch[] = {
+ { 0x8A, 0x5555 },
+ { 0x8A, 0xAAAA },
+ { 0x4CF, 0x0700 },
+ { 0x171, 0x0003 },
+ { 0x101, 0x0444 },
+ { 0x159, 0x0002 },
+ { 0x120, 0x0444 },
+ { 0x1D1, 0x0004 },
+ { 0x1E0, 0xC084 },
+ { 0x159, 0x0000 },
+ { 0x120, 0x0404 },
+ { 0x101, 0x0404 },
+ { 0x171, 0x0002 },
+ { 0x17A, 0x2906 },
+ { 0x19A, 0x2906 },
+ { 0x441, 0xC750 },
+ { 0x340, 0x0001 },
+ { 0x112, 0x0405 },
+ { 0x124, 0x0C49 },
+ { 0x1300, 0x050E },
+ { 0x1302, 0x0101 },
+ { 0x1380, 0x0425 },
+ { 0x1381, 0xF6D8 },
+ { 0x1382, 0x0632 },
+ { 0x1383, 0xFEC8 },
+ { 0x1390, 0x042F },
+ { 0x1391, 0xF6CA },
+ { 0x1392, 0x0637 },
+ { 0x1393, 0xFEC8 },
+ { 0x281, 0x0000 },
+ { 0x282, 0x0000 },
+ { 0x4EA, 0x0100 },
+ { 0x8A, 0xCCCC },
+ { 0x8A, 0x3333 },
+};
+
+int cs47l90_patch(struct madera *madera)
+{
+ int ret;
+
+ ret = regmap_register_patch(madera->regmap,
+ cs47l90_reva_16_patch,
+ ARRAY_SIZE(cs47l90_reva_16_patch));
+ if (ret < 0) {
+ dev_err(madera->dev,
+ "Error in applying 16-bit patch: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs47l90_patch);
+
+static const struct reg_default cs47l90_reg_default[] = {
+ { 0x00000020, 0x0000 }, /* R32 (0x20) - Tone Generator 1 */
+ { 0x00000021, 0x1000 }, /* R33 (0x21) - Tone Generator 2 */
+ { 0x00000022, 0x0000 }, /* R34 (0x22) - Tone Generator 3 */
+ { 0x00000023, 0x1000 }, /* R35 (0x23) - Tone Generator 4 */
+ { 0x00000024, 0x0000 }, /* R36 (0x24) - Tone Generator 5 */
+ { 0x00000030, 0x0000 }, /* R48 (0x30) - PWM Drive 1 */
+ { 0x00000031, 0x0100 }, /* R49 (0x31) - PWM Drive 2 */
+ { 0x00000032, 0x0100 }, /* R50 (0x32) - PWM Drive 3 */
+ { 0x00000061, 0x01ff }, /* R97 (0x61) - Sample Rate Sequence Select 1 */
+ { 0x00000062, 0x01ff }, /* R98 (0x62) - Sample Rate Sequence Select 2 */
+ { 0x00000063, 0x01ff }, /* R99 (0x63) - Sample Rate Sequence Select 3 */
+ { 0x00000064, 0x01ff }, /* R100 (0x64) - Sample Rate Sequence Select 4 */
+ { 0x00000066, 0x01ff }, /* R102 (0x66) - Always On Triggers Sequence Select 1 */
+ { 0x00000067, 0x01ff }, /* R103 (0x67) - Always On Triggers Sequence Select 2 */
+ { 0x00000090, 0x0000 }, /* R144 (0x90) - Haptics Control 1 */
+ { 0x00000091, 0x7fff }, /* R145 (0x91) - Haptics Control 2 */
+ { 0x00000092, 0x0000 }, /* R146 (0x92) - Haptics phase 1 intensity */
+ { 0x00000093, 0x0000 }, /* R147 (0x93) - Haptics phase 1 duration */
+ { 0x00000094, 0x0000 }, /* R148 (0x94) - Haptics phase 2 intensity */
+ { 0x00000095, 0x0000 }, /* R149 (0x95) - Haptics phase 2 duration */
+ { 0x00000096, 0x0000 }, /* R150 (0x96) - Haptics phase 3 intensity */
+ { 0x00000097, 0x0000 }, /* R151 (0x97) - Haptics phase 3 duration */
+ { 0x000000a0, 0x0000 }, /* R160 (0xa0) - Comfort Noise Generator */
+ { 0x00000100, 0x0002 }, /* R256 (0x100) - Clock 32k 1 */
+ { 0x00000101, 0x0404 }, /* R257 (0x101) - System Clock 1 */
+ { 0x00000102, 0x0011 }, /* R258 (0x102) - Sample rate 1 */
+ { 0x00000103, 0x0011 }, /* R259 (0x103) - Sample rate 2 */
+ { 0x00000104, 0x0011 }, /* R260 (0x104) - Sample rate 3 */
+ { 0x00000112, 0x0405 }, /* R274 (0x112) - Async clock 1 */
+ { 0x00000113, 0x0011 }, /* R275 (0x113) - Async sample rate 1 */
+ { 0x00000114, 0x0011 }, /* R276 (0x114) - Async sample rate 2 */
+ { 0x00000120, 0x0404 }, /* R288 (0x120) - DSP Clock 1 */
+ { 0x00000122, 0x0000 }, /* R290 (0x122) - DSP Clock 2 */
+ { 0x00000149, 0x0000 }, /* R329 (0x149) - Output system clock */
+ { 0x0000014a, 0x0000 }, /* R330 (0x14a) - Output async clock */
+ { 0x00000152, 0x0000 }, /* R338 (0x152) - Rate Estimator 1 */
+ { 0x00000153, 0x0000 }, /* R339 (0x153) - Rate Estimator 2 */
+ { 0x00000154, 0x0000 }, /* R340 (0x154) - Rate Estimator 3 */
+ { 0x00000155, 0x0000 }, /* R341 (0x155) - Rate Estimator 4 */
+ { 0x00000156, 0x0000 }, /* R342 (0x156) - Rate Estimator 5 */
+ { 0x00000171, 0x0002 }, /* R369 (0x171) - FLL1 Control 1 */
+ { 0x00000172, 0x0008 }, /* R370 (0x172) - FLL1 Control 2 */
+ { 0x00000173, 0x0018 }, /* R371 (0x173) - FLL1 Control 3 */
+ { 0x00000174, 0x007d }, /* R372 (0x174) - FLL1 Control 4 */
+ { 0x00000175, 0x0000 }, /* R373 (0x175) - FLL1 Control 5 */
+ { 0x00000176, 0x0000 }, /* R374 (0x176) - FLL1 Control 6 */
+ { 0x00000177, 0x0281 }, /* R375 (0x177) - FLL1 Loop Filter Test 1 */
+ { 0x00000179, 0x0000 }, /* R377 (0x179) - FLL1 Control 7 */
+ { 0x0000017a, 0x2906 }, /* R377 (0x17a) - FLL1 Efs 2 */
+ { 0x00000181, 0x0000 }, /* R385 (0x181) - FLL1 Synchroniser 1 */
+ { 0x00000182, 0x0000 }, /* R386 (0x182) - FLL1 Synchroniser 2 */
+ { 0x00000183, 0x0000 }, /* R387 (0x183) - FLL1 Synchroniser 3 */
+ { 0x00000184, 0x0000 }, /* R388 (0x184) - FLL1 Synchroniser 4 */
+ { 0x00000185, 0x0000 }, /* R389 (0x185) - FLL1 Synchroniser 5 */
+ { 0x00000186, 0x0000 }, /* R390 (0x186) - FLL1 Synchroniser 6 */
+ { 0x00000187, 0x0001 }, /* R391 (0x187) - FLL1 Synchroniser 7 */
+ { 0x00000189, 0x0000 }, /* R393 (0x189) - FLL1 Spread Spectrum */
+ { 0x0000018a, 0x0004 }, /* R394 (0x18a) - FLL1 GPIO Clock */
+ { 0x00000191, 0x0002 }, /* R401 (0x191) - FLL2 Control 1 */
+ { 0x00000192, 0x0008 }, /* R402 (0x192) - FLL2 Control 2 */
+ { 0x00000193, 0x0018 }, /* R403 (0x193) - FLL2 Control 3 */
+ { 0x00000194, 0x007d }, /* R404 (0x194) - FLL2 Control 4 */
+ { 0x00000195, 0x0000 }, /* R405 (0x195) - FLL2 Control 5 */
+ { 0x00000196, 0x0000 }, /* R406 (0x196) - FLL2 Control 6 */
+ { 0x00000197, 0x0281 }, /* R407 (0x197) - FLL2 Loop Filter Test 1 */
+ { 0x00000199, 0x0000 }, /* R409 (0x199) - FLL2 Control 7 */
+ { 0x0000019a, 0x2906 }, /* R410 (0x19a) - FLL2 Efs 2 */
+ { 0x000001a1, 0x0000 }, /* R417 (0x1a1) - FLL2 Synchroniser 1 */
+ { 0x000001a2, 0x0000 }, /* R418 (0x1a2) - FLL2 Synchroniser 2 */
+ { 0x000001a3, 0x0000 }, /* R419 (0x1a3) - FLL2 Synchroniser 3 */
+ { 0x000001a4, 0x0000 }, /* R420 (0x1a4) - FLL2 Synchroniser 4 */
+ { 0x000001a5, 0x0000 }, /* R421 (0x1a5) - FLL2 Synchroniser 5 */
+ { 0x000001a6, 0x0000 }, /* R422 (0x1a6) - FLL2 Synchroniser 6 */
+ { 0x000001a7, 0x0001 }, /* R423 (0x1a7) - FLL2 Synchroniser 7 */
+ { 0x000001a9, 0x0000 }, /* R425 (0x1a9) - FLL2 Spread Spectrum */
+ { 0x000001aa, 0x0004 }, /* R426 (0x1aa) - FLL2 GPIO Clock */
+ { 0x000001d1, 0x0004 }, /* R465 (0x1d1) - FLLAO_CONTROL_1 */
+ { 0x000001d2, 0x0004 }, /* R466 (0x1d2) - FLLAO_CONTROL_2 */
+ { 0x000001d3, 0x0000 }, /* R467 (0x1d3) - FLLAO_CONTROL_3 */
+ { 0x000001d4, 0x0000 }, /* R468 (0x1d4) - FLLAO_CONTROL_4 */
+ { 0x000001d5, 0x0001 }, /* R469 (0x1d5) - FLLAO_CONTROL_5 */
+ { 0x000001d6, 0x8004 }, /* R470 (0x1d6) - FLLAO_CONTROL_6 */
+ { 0x000001d8, 0x0000 }, /* R472 (0x1d8) - FLLAO_CONTROL_7 */
+ { 0x000001da, 0x0070 }, /* R474 (0x1da) - FLLAO_CONTROL_8 */
+ { 0x000001db, 0x0000 }, /* R475 (0x1db) - FLLAO_CONTROL_9 */
+ { 0x000001dc, 0x06da }, /* R476 (0x1dc) - FLLAO_CONTROL_10 */
+ { 0x000001dd, 0x0011 }, /* R477 (0x1dd) - FLLAO_CONTROL_11 */
+ { 0x00000200, 0x0006 }, /* R512 (0x200) - Mic Charge Pump 1 */
+ { 0x00000213, 0x03e4 }, /* R531 (0x213) - LDO2 Control 1 */
+ { 0x00000218, 0x00e6 }, /* R536 (0x218) - Mic Bias Ctrl 1 */
+ { 0x00000219, 0x00e6 }, /* R537 (0x219) - Mic Bias Ctrl 2 */
+ { 0x0000021c, 0x2222 }, /* R540 (0x21c) - Mic Bias Ctrl 5 */
+ { 0x0000021e, 0x2222 }, /* R542 (0x21e) - Mic Bias Ctrl 6 */
+ { 0x0000027e, 0x0000 }, /* R638 (0x27e) - EDRE HP stereo control */
+ { 0x00000293, 0x0080 }, /* R659 (0x293) - Accessory Detect Mode 1 */
+ { 0x00000299, 0x0000 }, /* R665 (0x299) - Headphone Detect 0 */
+ { 0x0000029b, 0x0000 }, /* R667 (0x29b) - Headphone Detect 1 */
+ { 0x000002a2, 0x0010 }, /* R674 (0x2a2) - Mic Detect 1 Control 0 */
+ { 0x000002a3, 0x1102 }, /* R675 (0x2a3) - Mic Detect 1 Control 1 */
+ { 0x000002a4, 0x009f }, /* R676 (0x2a4) - Mic Detect 1 Control 2 */
+ { 0x000002a6, 0x3d3d }, /* R678 (0x2a6) - Mic Detect 1 Level 1 */
+ { 0x000002a7, 0x3d3d }, /* R679 (0x2a7) - Mic Detect 1 Level 2 */
+ { 0x000002a8, 0x333d }, /* R680 (0x2a8) - Mic Detect 1 Level 3 */
+ { 0x000002a9, 0x202d }, /* R681 (0x2a9) - Mic Detect 1 Level 4 */
+ { 0x000002b2, 0x0010 }, /* R690 (0x2b2) - Mic Detect 2 Control 0 */
+ { 0x000002b3, 0x1102 }, /* R691 (0x2b3) - Mic Detect 2 Control 1 */
+ { 0x000002b4, 0x009f }, /* R692 (0x2b4) - Mic Detect 2 Control 2 */
+ { 0x000002b6, 0x3d3d }, /* R694 (0x2b6) - Mic Detect 2 Level 1 */
+ { 0x000002b7, 0x3d3d }, /* R695 (0x2b7) - Mic Detect 2 Level 2 */
+ { 0x000002b8, 0x333d }, /* R696 (0x2b8) - Mic Detect 2 Level 3 */
+ { 0x000002b9, 0x202d }, /* R697 (0x2b9) - Mic Detect 2 Level 4 */
+ { 0x000002c6, 0x0010 }, /* R710 (0x2c6) - Mic Clamp control */
+ { 0x000002c8, 0x0000 }, /* R712 (0x2c8) - GP switch 1 */
+ { 0x000002d3, 0x0000 }, /* R723 (0x2d3) - Jack detect analogue */
+ { 0x00000300, 0x0000 }, /* R768 (0x300) - Input Enables */
+ { 0x00000308, 0x0400 }, /* R776 (0x308) - Input Rate */
+ { 0x00000309, 0x0022 }, /* R777 (0x309) - Input Volume Ramp */
+ { 0x0000030c, 0x0002 }, /* R780 (0x30C) - HPF Control */
+ { 0x00000310, 0x0080 }, /* R784 (0x310) - IN1L Control */
+ { 0x00000311, 0x0180 }, /* R785 (0x311) - ADC Digital Volume 1L */
+ { 0x00000312, 0x0500 }, /* R786 (0x312) - DMIC1L Control */
+ { 0x00000313, 0x0000 }, /* R787 (0x313) - IN1L Rate Control */
+ { 0x00000314, 0x0080 }, /* R788 (0x314) - IN1R Control */
+ { 0x00000315, 0x0180 }, /* R789 (0x315) - ADC Digital Volume 1R */
+ { 0x00000316, 0x0000 }, /* R790 (0x316) - DMIC1R Control */
+ { 0x00000317, 0x0000 }, /* R791 (0x317) - IN1R Rate Control */
+ { 0x00000318, 0x0080 }, /* R792 (0x318) - IN2L Control */
+ { 0x00000319, 0x0180 }, /* R793 (0x319) - ADC Digital Volume 2L */
+ { 0x0000031a, 0x0500 }, /* R794 (0x31a) - DMIC2L Control */
+ { 0x0000031b, 0x0000 }, /* R795 (0x31b) - IN2L Rate Control */
+ { 0x0000031c, 0x0080 }, /* R796 (0x31c) - IN2R Control */
+ { 0x0000031d, 0x0180 }, /* R797 (0x31d) - ADC Digital Volume 2R */
+ { 0x0000031e, 0x0000 }, /* R798 (0x31e) - DMIC2R Control */
+ { 0x0000031f, 0x0000 }, /* R799 (0x31f) - IN2R Rate Control */
+ { 0x00000320, 0x0000 }, /* R800 (0x320) - IN3L Control */
+ { 0x00000321, 0x0180 }, /* R801 (0x321) - ADC Digital Volume 3L */
+ { 0x00000322, 0x0500 }, /* R802 (0x322) - DMIC3L Control */
+ { 0x00000323, 0x0000 }, /* R803 (0x323) - IN3L Rate Control */
+ { 0x00000324, 0x0000 }, /* R804 (0x324) - IN3R Control */
+ { 0x00000325, 0x0180 }, /* R805 (0x325) - ADC Digital Volume 3R */
+ { 0x00000326, 0x0000 }, /* R806 (0x326) - DMIC3R Control */
+ { 0x00000327, 0x0000 }, /* R807 (0x327) - IN3R Rate Control */
+ { 0x00000328, 0x0000 }, /* R808 (0x328) - IN4 Control */
+ { 0x00000329, 0x0180 }, /* R809 (0x329) - ADC Digital Volume 4L */
+ { 0x0000032a, 0x0500 }, /* R810 (0x32a) - DMIC4L Control */
+ { 0x0000032b, 0x0000 }, /* R811 (0x32b) - IN4L Rate Control */
+ { 0x0000032c, 0x0000 }, /* R812 (0x32c) - IN4R Control */
+ { 0x0000032d, 0x0180 }, /* R813 (0x32d) - ADC Digital Volume 4R */
+ { 0x0000032e, 0x0000 }, /* R814 (0x32e) - DMIC4R Control */
+ { 0x0000032f, 0x0000 }, /* R815 (0x32f) - IN4R Rate Control */
+ { 0x00000330, 0x0000 }, /* R816 (0x330) - IN5L Control */
+ { 0x00000331, 0x0180 }, /* R817 (0x331) - ADC Digital Volume 5L */
+ { 0x00000332, 0x0500 }, /* R818 (0x332) - DMIC5L Control */
+ { 0x00000333, 0x0000 }, /* R819 (0x333) - IN5L Rate Control */
+ { 0x00000334, 0x0000 }, /* R820 (0x334) - IN5R Control */
+ { 0x00000335, 0x0180 }, /* R821 (0x335) - ADC Digital Volume 5R */
+ { 0x00000336, 0x0000 }, /* R822 (0x336) - DMIC5R Control */
+ { 0x00000337, 0x0000 }, /* R823 (0x337) - IN5R Rate Control */
+ { 0x00000400, 0x0000 }, /* R1024 (0x400) - Output Enables 1 */
+ { 0x00000408, 0x0000 }, /* R1032 (0x408) - Output Rate 1 */
+ { 0x00000409, 0x0022 }, /* R1033 (0x409) - Output Volume Ramp */
+ { 0x00000410, 0x0080 }, /* R1040 (0x410) - Output Path Config 1L */
+ { 0x00000411, 0x0180 }, /* R1041 (0x411) - DAC Digital Volume 1L */
+ { 0x00000412, 0x0000 }, /* R1042 (0x412) - Output Path Config 1 */
+ { 0x00000413, 0x0001 }, /* R1043 (0x413) - Noise Gate Select 1L */
+ { 0x00000414, 0x0080 }, /* R1044 (0x414) - Output Path Config 1R */
+ { 0x00000415, 0x0180 }, /* R1045 (0x415) - DAC Digital Volume 1R */
+ { 0x00000417, 0x0002 }, /* R1047 (0x417) - Noise Gate Select 1R */
+ { 0x00000418, 0x0080 }, /* R1048 (0x418) - Output Path Config 2L */
+ { 0x00000419, 0x0180 }, /* R1049 (0x419) - DAC Digital Volume 2L */
+ { 0x0000041a, 0x0002 }, /* R1050 (0x41a) - Output Path Config 2 */
+ { 0x0000041b, 0x0004 }, /* R1051 (0x41b) - Noise Gate Select 2L */
+ { 0x0000041c, 0x0080 }, /* R1052 (0x41c) - Output Path Config 2R */
+ { 0x0000041d, 0x0180 }, /* R1053 (0x41d) - DAC Digital Volume 2R */
+ { 0x0000041f, 0x0008 }, /* R1055 (0x41f) - Noise Gate Select 2R */
+ { 0x00000420, 0x0080 }, /* R1056 (0x420) - Output Path Config 3L */
+ { 0x00000421, 0x0180 }, /* R1057 (0x421) - DAC Digital Volume 3L */
+ { 0x00000423, 0x0010 }, /* R1059 (0x423) - Noise Gate Select 3L */
+ { 0x00000424, 0x0080 }, /* R1060 (0x424) - Output Path Config 3R */
+ { 0x00000425, 0x0180 }, /* R1061 (0x425) - DAC Digital Volume 3R */
+ { 0x00000427, 0x0020 }, /* R1063 (0x427) - Noise Gate Select 3R */
+ { 0x00000430, 0x0000 }, /* R1072 (0x430) - Output Path Config 5L */
+ { 0x00000431, 0x0180 }, /* R1073 (0x431) - DAC Digital Volume 5L */
+ { 0x00000433, 0x0100 }, /* R1075 (0x433) - Noise Gate Select 5L */
+ { 0x00000434, 0x0000 }, /* R1076 (0x434) - Output Path Config 5R */
+ { 0x00000435, 0x0180 }, /* R1077 (0x435) - DAC Digital Volume 5R */
+ { 0x00000437, 0x0200 }, /* R1079 (0x437) - Noise Gate Select 5R */
+ { 0x00000440, 0x003f }, /* R1088 (0x440) - DRE Enable */
+ { 0x00000448, 0x003f }, /* R1096 (0x448) - eDRE Enable */
+ { 0x00000450, 0x0000 }, /* R1104 (0x450) - DAC AEC Control 1 */
+ { 0x00000458, 0x0000 }, /* R1112 (0x458) - Noise Gate Control */
+ { 0x00000490, 0x0069 }, /* R1168 (0x490) - PDM SPK1 CTRL 1 */
+ { 0x00000491, 0x0000 }, /* R1169 (0x491) - PDM SPK1 CTRL 2 */
+ { 0x000004a0, 0x3080 }, /* R1184 (0x4a0) - HP1 Short Circuit Ctrl */
+ { 0x000004a1, 0x3000 }, /* R1185 (0x4a1) - HP2 Short Circuit Ctrl */
+ { 0x000004a2, 0x3000 }, /* R1186 (0x4a2) - HP3 Short Circuit Ctrl */
+ { 0x00000500, 0x000c }, /* R1280 (0x500) - AIF1 BCLK Ctrl */
+ { 0x00000501, 0x0000 }, /* R1281 (0x501) - AIF1 Tx Pin Ctrl */
+ { 0x00000502, 0x0000 }, /* R1282 (0x502) - AIF1 Rx Pin Ctrl */
+ { 0x00000503, 0x0000 }, /* R1283 (0x503) - AIF1 Rate Ctrl */
+ { 0x00000504, 0x0000 }, /* R1284 (0x504) - AIF1 Format */
+ { 0x00000506, 0x0040 }, /* R1286 (0x506) - AIF1 Rx BCLK Rate */
+ { 0x00000507, 0x1818 }, /* R1287 (0x507) - AIF1 Frame Ctrl 1 */
+ { 0x00000508, 0x1818 }, /* R1288 (0x508) - AIF1 Frame Ctrl 2 */
+ { 0x00000509, 0x0000 }, /* R1289 (0x509) - AIF1 Frame Ctrl 3 */
+ { 0x0000050a, 0x0001 }, /* R1290 (0x50a) - AIF1 Frame Ctrl 4 */
+ { 0x0000050b, 0x0002 }, /* R1291 (0x50b) - AIF1 Frame Ctrl 5 */
+ { 0x0000050c, 0x0003 }, /* R1292 (0x50c) - AIF1 Frame Ctrl 6 */
+ { 0x0000050d, 0x0004 }, /* R1293 (0x50d) - AIF1 Frame Ctrl 7 */
+ { 0x0000050e, 0x0005 }, /* R1294 (0x50e) - AIF1 Frame Ctrl 8 */
+ { 0x0000050f, 0x0006 }, /* R1295 (0x50f) - AIF1 Frame Ctrl 9 */
+ { 0x00000510, 0x0007 }, /* R1296 (0x510) - AIF1 Frame Ctrl 10 */
+ { 0x00000511, 0x0000 }, /* R1297 (0x511) - AIF1 Frame Ctrl 11 */
+ { 0x00000512, 0x0001 }, /* R1298 (0x512) - AIF1 Frame Ctrl 12 */
+ { 0x00000513, 0x0002 }, /* R1299 (0x513) - AIF1 Frame Ctrl 13 */
+ { 0x00000514, 0x0003 }, /* R1300 (0x514) - AIF1 Frame Ctrl 14 */
+ { 0x00000515, 0x0004 }, /* R1301 (0x515) - AIF1 Frame Ctrl 15 */
+ { 0x00000516, 0x0005 }, /* R1302 (0x516) - AIF1 Frame Ctrl 16 */
+ { 0x00000517, 0x0006 }, /* R1303 (0x517) - AIF1 Frame Ctrl 17 */
+ { 0x00000518, 0x0007 }, /* R1304 (0x518) - AIF1 Frame Ctrl 18 */
+ { 0x00000519, 0x0000 }, /* R1305 (0x519) - AIF1 Tx Enables */
+ { 0x0000051a, 0x0000 }, /* R1306 (0x51a) - AIF1 Rx Enables */
+ { 0x00000540, 0x000c }, /* R1344 (0x540) - AIF2 BCLK Ctrl */
+ { 0x00000541, 0x0000 }, /* R1345 (0x541) - AIF2 Tx Pin Ctrl */
+ { 0x00000542, 0x0000 }, /* R1346 (0x542) - AIF2 Rx Pin Ctrl */
+ { 0x00000543, 0x0000 }, /* R1347 (0x543) - AIF2 Rate Ctrl */
+ { 0x00000544, 0x0000 }, /* R1348 (0x544) - AIF2 Format */
+ { 0x00000546, 0x0040 }, /* R1350 (0x546) - AIF2 Rx BCLK Rate */
+ { 0x00000547, 0x1818 }, /* R1351 (0x547) - AIF2 Frame Ctrl 1 */
+ { 0x00000548, 0x1818 }, /* R1352 (0x548) - AIF2 Frame Ctrl 2 */
+ { 0x00000549, 0x0000 }, /* R1353 (0x549) - AIF2 Frame Ctrl 3 */
+ { 0x0000054a, 0x0001 }, /* R1354 (0x54a) - AIF2 Frame Ctrl 4 */
+ { 0x0000054b, 0x0002 }, /* R1355 (0x54b) - AIF2 Frame Ctrl 5 */
+ { 0x0000054c, 0x0003 }, /* R1356 (0x54c) - AIF2 Frame Ctrl 6 */
+ { 0x0000054d, 0x0004 }, /* R1357 (0x54d) - AIF2 Frame Ctrl 7 */
+ { 0x0000054e, 0x0005 }, /* R1358 (0x54e) - AIF2 Frame Ctrl 8 */
+ { 0x0000054f, 0x0006 }, /* R1359 (0x54f) - AIF2 Frame Ctrl 9 */
+ { 0x00000550, 0x0007 }, /* R1360 (0x550) - AIF2 Frame Ctrl 10 */
+ { 0x00000551, 0x0000 }, /* R1361 (0x551) - AIF2 Frame Ctrl 11 */
+ { 0x00000552, 0x0001 }, /* R1362 (0x552) - AIF2 Frame Ctrl 12 */
+ { 0x00000553, 0x0002 }, /* R1363 (0x553) - AIF2 Frame Ctrl 13 */
+ { 0x00000554, 0x0003 }, /* R1364 (0x554) - AIF2 Frame Ctrl 14 */
+ { 0x00000555, 0x0004 }, /* R1365 (0x555) - AIF2 Frame Ctrl 15 */
+ { 0x00000556, 0x0005 }, /* R1366 (0x556) - AIF2 Frame Ctrl 16 */
+ { 0x00000557, 0x0006 }, /* R1367 (0x557) - AIF2 Frame Ctrl 17 */
+ { 0x00000558, 0x0007 }, /* R1368 (0x558) - AIF2 Frame Ctrl 18 */
+ { 0x00000559, 0x0000 }, /* R1369 (0x559) - AIF2 Tx Enables */
+ { 0x0000055a, 0x0000 }, /* R1370 (0x55a) - AIF2 Rx Enables */
+ { 0x00000580, 0x000c }, /* R1408 (0x580) - AIF3 BCLK Ctrl */
+ { 0x00000581, 0x0000 }, /* R1409 (0x581) - AIF3 Tx Pin Ctrl */
+ { 0x00000582, 0x0000 }, /* R1410 (0x582) - AIF3 Rx Pin Ctrl */
+ { 0x00000583, 0x0000 }, /* R1411 (0x583) - AIF3 Rate Ctrl */
+ { 0x00000584, 0x0000 }, /* R1412 (0x584) - AIF3 Format */
+ { 0x00000586, 0x0040 }, /* R1414 (0x586) - AIF3 Rx BCLK Rate */
+ { 0x00000587, 0x1818 }, /* R1415 (0x587) - AIF3 Frame Ctrl 1 */
+ { 0x00000588, 0x1818 }, /* R1416 (0x588) - AIF3 Frame Ctrl 2 */
+ { 0x00000589, 0x0000 }, /* R1417 (0x589) - AIF3 Frame Ctrl 3 */
+ { 0x0000058a, 0x0001 }, /* R1418 (0x58a) - AIF3 Frame Ctrl 4 */
+ { 0x00000591, 0x0000 }, /* R1425 (0x591) - AIF3 Frame Ctrl 11 */
+ { 0x00000592, 0x0001 }, /* R1426 (0x592) - AIF3 Frame Ctrl 12 */
+ { 0x00000599, 0x0000 }, /* R1433 (0x599) - AIF3 Tx Enables */
+ { 0x0000059a, 0x0000 }, /* R1434 (0x59a) - AIF3 Rx Enables */
+ { 0x000005a0, 0x000c }, /* R1440 (0x5a0) - AIF4 BCLK Ctrl */
+ { 0x000005a1, 0x0000 }, /* R1441 (0x5a1) - AIF4 Tx Pin Ctrl */
+ { 0x000005a2, 0x0000 }, /* R1442 (0x5a2) - AIF4 Rx Pin Ctrl */
+ { 0x000005a3, 0x0000 }, /* R1443 (0x5a3) - AIF4 Rate Ctrl */
+ { 0x000005a4, 0x0000 }, /* R1444 (0x5a4) - AIF4 Format */
+ { 0x000005a6, 0x0040 }, /* R1446 (0x5a6) - AIF4 Rx BCLK Rate */
+ { 0x000005a7, 0x1818 }, /* R1447 (0x5a7) - AIF4 Frame Ctrl 1 */
+ { 0x000005a8, 0x1818 }, /* R1448 (0x5a8) - AIF4 Frame Ctrl 2 */
+ { 0x000005a9, 0x0000 }, /* R1449 (0x5a9) - AIF4 Frame Ctrl 3 */
+ { 0x000005aa, 0x0001 }, /* R1450 (0x5aa) - AIF4 Frame Ctrl 4 */
+ { 0x000005b1, 0x0000 }, /* R1457 (0x5b1) - AIF4 Frame Ctrl 11 */
+ { 0x000005b2, 0x0001 }, /* R1458 (0x5b2) - AIF4 Frame Ctrl 12 */
+ { 0x000005b9, 0x0000 }, /* R1465 (0x5b9) - AIF4 Tx Enables */
+ { 0x000005ba, 0x0000 }, /* R1466 (0x5ba) - AIF4 Rx Enables */
+ { 0x000005c2, 0x0000 }, /* R1474 (0x5c2) - SPD1 TX Control */
+ { 0x000005e3, 0x0000 }, /* R1507 (0x5e3) - SLIMbus Framer Ref Gear */
+ { 0x000005e5, 0x0000 }, /* R1509 (0x5e5) - SLIMbus Rates 1 */
+ { 0x000005e6, 0x0000 }, /* R1510 (0x5e6) - SLIMbus Rates 2 */
+ { 0x000005e7, 0x0000 }, /* R1511 (0x5e7) - SLIMbus Rates 3 */
+ { 0x000005e8, 0x0000 }, /* R1512 (0x5e8) - SLIMbus Rates 4 */
+ { 0x000005e9, 0x0000 }, /* R1513 (0x5e9) - SLIMbus Rates 5 */
+ { 0x000005ea, 0x0000 }, /* R1514 (0x5ea) - SLIMbus Rates 6 */
+ { 0x000005eb, 0x0000 }, /* R1515 (0x5eb) - SLIMbus Rates 7 */
+ { 0x000005ec, 0x0000 }, /* R1516 (0x5ec) - SLIMbus Rates 8 */
+ { 0x000005f5, 0x0000 }, /* R1525 (0x5f5) - SLIMbus RX Channel Enable */
+ { 0x000005f6, 0x0000 }, /* R1526 (0x5F6) - SLIMbus TX Channel Enable */
+ { 0x00000640, 0x0000 }, /* R1600 (0x640) - PWM1MIX Input 1 Source */
+ { 0x00000641, 0x0080 }, /* R1601 (0x641) - PWM1MIX Input 1 Volume */
+ { 0x00000642, 0x0000 }, /* R1602 (0x642) - PWM1MIX Input 2 Source */
+ { 0x00000643, 0x0080 }, /* R1603 (0x643) - PWM1MIX Input 2 Volume */
+ { 0x00000644, 0x0000 }, /* R1604 (0x644) - PWM1MIX Input 3 Source */
+ { 0x00000645, 0x0080 }, /* R1605 (0x645) - PWM1MIX Input 3 Volume */
+ { 0x00000646, 0x0000 }, /* R1606 (0x646) - PWM1MIX Input 4 Source */
+ { 0x00000647, 0x0080 }, /* R1607 (0x647) - PWM1MIX Input 4 Volume */
+ { 0x00000648, 0x0000 }, /* R1608 (0x648) - PWM2MIX Input 1 Source */
+ { 0x00000649, 0x0080 }, /* R1609 (0x649) - PWM2MIX Input 1 Volume */
+ { 0x0000064a, 0x0000 }, /* R1610 (0x64a) - PWM2MIX Input 2 Source */
+ { 0x0000064b, 0x0080 }, /* R1611 (0x64b) - PWM2MIX Input 2 Volume */
+ { 0x0000064c, 0x0000 }, /* R1612 (0x64c) - PWM2MIX Input 3 Source */
+ { 0x0000064d, 0x0080 }, /* R1613 (0x64d) - PWM2MIX Input 3 Volume */
+ { 0x0000064e, 0x0000 }, /* R1614 (0x64e) - PWM2MIX Input 4 Source */
+ { 0x0000064f, 0x0080 }, /* R1615 (0x64f) - PWM2MIX Input 4 Volume */
+ { 0x00000680, 0x0000 }, /* R1664 (0x680) - OUT1LMIX Input 1 Source */
+ { 0x00000681, 0x0080 }, /* R1665 (0x681) - OUT1LMIX Input 1 Volume */
+ { 0x00000682, 0x0000 }, /* R1666 (0x682) - OUT1LMIX Input 2 Source */
+ { 0x00000683, 0x0080 }, /* R1667 (0x683) - OUT1LMIX Input 2 Volume */
+ { 0x00000684, 0x0000 }, /* R1668 (0x684) - OUT1LMIX Input 3 Source */
+ { 0x00000685, 0x0080 }, /* R1669 (0x685) - OUT1LMIX Input 3 Volume */
+ { 0x00000686, 0x0000 }, /* R1670 (0x686) - OUT1LMIX Input 4 Source */
+ { 0x00000687, 0x0080 }, /* R1671 (0x687) - OUT1LMIX Input 4 Volume */
+ { 0x00000688, 0x0000 }, /* R1672 (0x688) - OUT1RMIX Input 1 Source */
+ { 0x00000689, 0x0080 }, /* R1673 (0x689) - OUT1RMIX Input 1 Volume */
+ { 0x0000068a, 0x0000 }, /* R1674 (0x68a) - OUT1RMIX Input 2 Source */
+ { 0x0000068b, 0x0080 }, /* R1675 (0x68b) - OUT1RMIX Input 2 Volume */
+ { 0x0000068c, 0x0000 }, /* R1672 (0x68c) - OUT1RMIX Input 3 Source */
+ { 0x0000068d, 0x0080 }, /* R1673 (0x68d) - OUT1RMIX Input 3 Volume */
+ { 0x0000068e, 0x0000 }, /* R1674 (0x68e) - OUT1RMIX Input 4 Source */
+ { 0x0000068f, 0x0080 }, /* R1675 (0x68f) - OUT1RMIX Input 4 Volume */
+ { 0x00000690, 0x0000 }, /* R1680 (0x690) - OUT2LMIX Input 1 Source */
+ { 0x00000691, 0x0080 }, /* R1681 (0x691) - OUT2LMIX Input 1 Volume */
+ { 0x00000692, 0x0000 }, /* R1682 (0x692) - OUT2LMIX Input 2 Source */
+ { 0x00000693, 0x0080 }, /* R1683 (0x693) - OUT2LMIX Input 2 Volume */
+ { 0x00000694, 0x0000 }, /* R1684 (0x694) - OUT2LMIX Input 3 Source */
+ { 0x00000695, 0x0080 }, /* R1685 (0x695) - OUT2LMIX Input 3 Volume */
+ { 0x00000696, 0x0000 }, /* R1686 (0x696) - OUT2LMIX Input 4 Source */
+ { 0x00000697, 0x0080 }, /* R1687 (0x697) - OUT2LMIX Input 4 Volume */
+ { 0x00000698, 0x0000 }, /* R1688 (0x698) - OUT2RMIX Input 1 Source */
+ { 0x00000699, 0x0080 }, /* R1689 (0x699) - OUT2RMIX Input 1 Volume */
+ { 0x0000069a, 0x0000 }, /* R1690 (0x69a) - OUT2RMIX Input 2 Source */
+ { 0x0000069b, 0x0080 }, /* R1691 (0x69b) - OUT2RMIX Input 2 Volume */
+ { 0x0000069c, 0x0000 }, /* R1692 (0x69c) - OUT2RMIX Input 3 Source */
+ { 0x0000069d, 0x0080 }, /* R1693 (0x69d) - OUT2RMIX Input 3 Volume */
+ { 0x0000069e, 0x0000 }, /* R1694 (0x69e) - OUT2RMIX Input 4 Source */
+ { 0x0000069f, 0x0080 }, /* R1695 (0x69f) - OUT2RMIX Input 4 Volume */
+ { 0x000006a0, 0x0000 }, /* R1696 (0x6a0) - OUT3LMIX Input 1 Source */
+ { 0x000006a1, 0x0080 }, /* R1697 (0x6a1) - OUT3LMIX Input 1 Volume */
+ { 0x000006a2, 0x0000 }, /* R1698 (0x6a2) - OUT3LMIX Input 2 Source */
+ { 0x000006a3, 0x0080 }, /* R1699 (0x6a3) - OUT3LMIX Input 2 Volume */
+ { 0x000006a4, 0x0000 }, /* R1700 (0x6a4) - OUT3LMIX Input 3 Source */
+ { 0x000006a5, 0x0080 }, /* R1701 (0x6a5) - OUT3LMIX Input 3 Volume */
+ { 0x000006a6, 0x0000 }, /* R1702 (0x6a6) - OUT3LMIX Input 4 Source */
+ { 0x000006a7, 0x0080 }, /* R1703 (0x6a7) - OUT3LMIX Input 4 Volume */
+ { 0x000006a8, 0x0000 }, /* R1704 (0x6a8) - OUT3RMIX Input 1 Source */
+ { 0x000006a9, 0x0080 }, /* R1705 (0x6a9) - OUT3RMIX Input 1 Volume */
+ { 0x000006aa, 0x0000 }, /* R1706 (0x6aa) - OUT3RMIX Input 2 Source */
+ { 0x000006ab, 0x0080 }, /* R1707 (0x6ab) - OUT3RMIX Input 2 Volume */
+ { 0x000006ac, 0x0000 }, /* R1708 (0x6ac) - OUT3RMIX Input 3 Source */
+ { 0x000006ad, 0x0080 }, /* R1709 (0x6ad) - OUT3RMIX Input 3 Volume */
+ { 0x000006ae, 0x0000 }, /* R1710 (0x6ae) - OUT3RMIX Input 4 Source */
+ { 0x000006af, 0x0080 }, /* R1711 (0x6af) - OUT3RMIX Input 4 Volume */
+ { 0x000006c0, 0x0000 }, /* R1728 (0x6c0) - OUT5LMIX Input 1 Source */
+ { 0x000006c1, 0x0080 }, /* R1729 (0x6c1) - OUT5LMIX Input 1 Volume */
+ { 0x000006c2, 0x0000 }, /* R1730 (0x6c2) - OUT5LMIX Input 2 Source */
+ { 0x000006c3, 0x0080 }, /* R1731 (0x6c3) - OUT5LMIX Input 2 Volume */
+ { 0x000006c4, 0x0000 }, /* R1732 (0x6c4) - OUT5LMIX Input 3 Source */
+ { 0x000006c5, 0x0080 }, /* R1733 (0x6c5) - OUT5LMIX Input 3 Volume */
+ { 0x000006c6, 0x0000 }, /* R1734 (0x6c6) - OUT5LMIX Input 4 Source */
+ { 0x000006c7, 0x0080 }, /* R1735 (0x6c7) - OUT5LMIX Input 4 Volume */
+ { 0x000006c8, 0x0000 }, /* R1736 (0x6c8) - OUT5RMIX Input 1 Source */
+ { 0x000006c9, 0x0080 }, /* R1737 (0x6c9) - OUT5RMIX Input 1 Volume */
+ { 0x000006ca, 0x0000 }, /* R1738 (0x6ca) - OUT5RMIX Input 2 Source */
+ { 0x000006cb, 0x0080 }, /* R1739 (0x6cb) - OUT5RMIX Input 2 Volume */
+ { 0x000006cc, 0x0000 }, /* R1740 (0x6cc) - OUT5RMIX Input 3 Source */
+ { 0x000006cd, 0x0080 }, /* R1741 (0x6cd) - OUT5RMIX Input 3 Volume */
+ { 0x000006ce, 0x0000 }, /* R1742 (0x6ce) - OUT5RMIX Input 4 Source */
+ { 0x000006cf, 0x0080 }, /* R1743 (0x6cf) - OUT5RMIX Input 4 Volume */
+ { 0x00000700, 0x0000 }, /* R1792 (0x700) - AIF1TX1MIX Input 1 Source */
+ { 0x00000701, 0x0080 }, /* R1793 (0x701) - AIF1TX1MIX Input 1 Volume */
+ { 0x00000702, 0x0000 }, /* R1794 (0x702) - AIF1TX1MIX Input 2 Source */
+ { 0x00000703, 0x0080 }, /* R1795 (0x703) - AIF1TX1MIX Input 2 Volume */
+ { 0x00000704, 0x0000 }, /* R1796 (0x704) - AIF1TX1MIX Input 3 Source */
+ { 0x00000705, 0x0080 }, /* R1797 (0x705) - AIF1TX1MIX Input 3 Volume */
+ { 0x00000706, 0x0000 }, /* R1798 (0x706) - AIF1TX1MIX Input 4 Source */
+ { 0x00000707, 0x0080 }, /* R1799 (0x707) - AIF1TX1MIX Input 4 Volume */
+ { 0x00000708, 0x0000 }, /* R1800 (0x708) - AIF1TX2MIX Input 1 Source */
+ { 0x00000709, 0x0080 }, /* R1801 (0x709) - AIF1TX2MIX Input 1 Volume */
+ { 0x0000070a, 0x0000 }, /* R1802 (0x70a) - AIF1TX2MIX Input 2 Source */
+ { 0x0000070b, 0x0080 }, /* R1803 (0x70b) - AIF1TX2MIX Input 2 Volume */
+ { 0x0000070c, 0x0000 }, /* R1804 (0x70c) - AIF1TX2MIX Input 3 Source */
+ { 0x0000070d, 0x0080 }, /* R1805 (0x70d) - AIF1TX2MIX Input 3 Volume */
+ { 0x0000070e, 0x0000 }, /* R1806 (0x70e) - AIF1TX2MIX Input 4 Source */
+ { 0x0000070f, 0x0080 }, /* R1807 (0x70f) - AIF1TX2MIX Input 4 Volume */
+ { 0x00000710, 0x0000 }, /* R1808 (0x710) - AIF1TX3MIX Input 1 Source */
+ { 0x00000711, 0x0080 }, /* R1809 (0x711) - AIF1TX3MIX Input 1 Volume */
+ { 0x00000712, 0x0000 }, /* R1810 (0x712) - AIF1TX3MIX Input 2 Source */
+ { 0x00000713, 0x0080 }, /* R1811 (0x713) - AIF1TX3MIX Input 2 Volume */
+ { 0x00000714, 0x0000 }, /* R1812 (0x714) - AIF1TX3MIX Input 3 Source */
+ { 0x00000715, 0x0080 }, /* R1813 (0x715) - AIF1TX3MIX Input 3 Volume */
+ { 0x00000716, 0x0000 }, /* R1814 (0x716) - AIF1TX3MIX Input 4 Source */
+ { 0x00000717, 0x0080 }, /* R1815 (0x717) - AIF1TX3MIX Input 4 Volume */
+ { 0x00000718, 0x0000 }, /* R1816 (0x718) - AIF1TX4MIX Input 1 Source */
+ { 0x00000719, 0x0080 }, /* R1817 (0x719) - AIF1TX4MIX Input 1 Volume */
+ { 0x0000071a, 0x0000 }, /* R1818 (0x71a) - AIF1TX4MIX Input 2 Source */
+ { 0x0000071b, 0x0080 }, /* R1819 (0x71b) - AIF1TX4MIX Input 2 Volume */
+ { 0x0000071c, 0x0000 }, /* R1820 (0x71c) - AIF1TX4MIX Input 3 Source */
+ { 0x0000071d, 0x0080 }, /* R1821 (0x71d) - AIF1TX4MIX Input 3 Volume */
+ { 0x0000071e, 0x0000 }, /* R1822 (0x71e) - AIF1TX4MIX Input 4 Source */
+ { 0x0000071f, 0x0080 }, /* R1823 (0x71f) - AIF1TX4MIX Input 4 Volume */
+ { 0x00000720, 0x0000 }, /* R1824 (0x720) - AIF1TX5MIX Input 1 Source */
+ { 0x00000721, 0x0080 }, /* R1825 (0x721) - AIF1TX5MIX Input 1 Volume */
+ { 0x00000722, 0x0000 }, /* R1826 (0x722) - AIF1TX5MIX Input 2 Source */
+ { 0x00000723, 0x0080 }, /* R1827 (0x723) - AIF1TX5MIX Input 2 Volume */
+ { 0x00000724, 0x0000 }, /* R1828 (0x724) - AIF1TX5MIX Input 3 Source */
+ { 0x00000725, 0x0080 }, /* R1829 (0x725) - AIF1TX5MIX Input 3 Volume */
+ { 0x00000726, 0x0000 }, /* R1830 (0x726) - AIF1TX5MIX Input 4 Source */
+ { 0x00000727, 0x0080 }, /* R1831 (0x727) - AIF1TX5MIX Input 4 Volume */
+ { 0x00000728, 0x0000 }, /* R1832 (0x728) - AIF1TX6MIX Input 1 Source */
+ { 0x00000729, 0x0080 }, /* R1833 (0x729) - AIF1TX6MIX Input 1 Volume */
+ { 0x0000072a, 0x0000 }, /* R1834 (0x72a) - AIF1TX6MIX Input 2 Source */
+ { 0x0000072b, 0x0080 }, /* R1835 (0x72b) - AIF1TX6MIX Input 2 Volume */
+ { 0x0000072c, 0x0000 }, /* R1836 (0x72c) - AIF1TX6MIX Input 3 Source */
+ { 0x0000072d, 0x0080 }, /* R1837 (0x72d) - AIF1TX6MIX Input 3 Volume */
+ { 0x0000072e, 0x0000 }, /* R1838 (0x72e) - AIF1TX6MIX Input 4 Source */
+ { 0x0000072f, 0x0080 }, /* R1839 (0x72f) - AIF1TX6MIX Input 4 Volume */
+ { 0x00000730, 0x0000 }, /* R1840 (0x730) - AIF1TX7MIX Input 1 Source */
+ { 0x00000731, 0x0080 }, /* R1841 (0x731) - AIF1TX7MIX Input 1 Volume */
+ { 0x00000732, 0x0000 }, /* R1842 (0x732) - AIF1TX7MIX Input 2 Source */
+ { 0x00000733, 0x0080 }, /* R1843 (0x733) - AIF1TX7MIX Input 2 Volume */
+ { 0x00000734, 0x0000 }, /* R1844 (0x734) - AIF1TX7MIX Input 3 Source */
+ { 0x00000735, 0x0080 }, /* R1845 (0x735) - AIF1TX7MIX Input 3 Volume */
+ { 0x00000736, 0x0000 }, /* R1846 (0x736) - AIF1TX7MIX Input 4 Source */
+ { 0x00000737, 0x0080 }, /* R1847 (0x737) - AIF1TX7MIX Input 4 Volume */
+ { 0x00000738, 0x0000 }, /* R1848 (0x738) - AIF1TX8MIX Input 1 Source */
+ { 0x00000739, 0x0080 }, /* R1849 (0x739) - AIF1TX8MIX Input 1 Volume */
+ { 0x0000073a, 0x0000 }, /* R1850 (0x73a) - AIF1TX8MIX Input 2 Source */
+ { 0x0000073b, 0x0080 }, /* R1851 (0x73b) - AIF1TX8MIX Input 2 Volume */
+ { 0x0000073c, 0x0000 }, /* R1852 (0x73c) - AIF1TX8MIX Input 3 Source */
+ { 0x0000073d, 0x0080 }, /* R1853 (0x73d) - AIF1TX8MIX Input 3 Volume */
+ { 0x0000073e, 0x0000 }, /* R1854 (0x73e) - AIF1TX8MIX Input 4 Source */
+ { 0x0000073f, 0x0080 }, /* R1855 (0x73f) - AIF1TX8MIX Input 4 Volume */
+ { 0x00000740, 0x0000 }, /* R1856 (0x740) - AIF2TX1MIX Input 1 Source */
+ { 0x00000741, 0x0080 }, /* R1857 (0x741) - AIF2TX1MIX Input 1 Volume */
+ { 0x00000742, 0x0000 }, /* R1858 (0x742) - AIF2TX1MIX Input 2 Source */
+ { 0x00000743, 0x0080 }, /* R1859 (0x743) - AIF2TX1MIX Input 2 Volume */
+ { 0x00000744, 0x0000 }, /* R1860 (0x744) - AIF2TX1MIX Input 3 Source */
+ { 0x00000745, 0x0080 }, /* R1861 (0x745) - AIF2TX1MIX Input 3 Volume */
+ { 0x00000746, 0x0000 }, /* R1862 (0x746) - AIF2TX1MIX Input 4 Source */
+ { 0x00000747, 0x0080 }, /* R1863 (0x747) - AIF2TX1MIX Input 4 Volume */
+ { 0x00000748, 0x0000 }, /* R1864 (0x748) - AIF2TX2MIX Input 1 Source */
+ { 0x00000749, 0x0080 }, /* R1865 (0x749) - AIF2TX2MIX Input 1 Volume */
+ { 0x0000074a, 0x0000 }, /* R1866 (0x74a) - AIF2TX2MIX Input 2 Source */
+ { 0x0000074b, 0x0080 }, /* R1867 (0x74b) - AIF2TX2MIX Input 2 Volume */
+ { 0x0000074c, 0x0000 }, /* R1868 (0x74c) - AIF2TX2MIX Input 3 Source */
+ { 0x0000074d, 0x0080 }, /* R1869 (0x74d) - AIF2TX2MIX Input 3 Volume */
+ { 0x0000074e, 0x0000 }, /* R1870 (0x74e) - AIF2TX2MIX Input 4 Source */
+ { 0x0000074f, 0x0080 }, /* R1871 (0x74f) - AIF2TX2MIX Input 4 Volume */
+ { 0x00000750, 0x0000 }, /* R1872 (0x750) - AIF2TX3MIX Input 1 Source */
+ { 0x00000751, 0x0080 }, /* R1873 (0x751) - AIF2TX3MIX Input 1 Volume */
+ { 0x00000752, 0x0000 }, /* R1874 (0x752) - AIF2TX3MIX Input 2 Source */
+ { 0x00000753, 0x0080 }, /* R1875 (0x753) - AIF2TX3MIX Input 2 Volume */
+ { 0x00000754, 0x0000 }, /* R1876 (0x754) - AIF2TX3MIX Input 3 Source */
+ { 0x00000755, 0x0080 }, /* R1877 (0x755) - AIF2TX3MIX Input 3 Volume */
+ { 0x00000756, 0x0000 }, /* R1878 (0x756) - AIF2TX3MIX Input 4 Source */
+ { 0x00000757, 0x0080 }, /* R1879 (0x757) - AIF2TX3MIX Input 4 Volume */
+ { 0x00000758, 0x0000 }, /* R1880 (0x758) - AIF2TX4MIX Input 1 Source */
+ { 0x00000759, 0x0080 }, /* R1881 (0x759) - AIF2TX4MIX Input 1 Volume */
+ { 0x0000075a, 0x0000 }, /* R1882 (0x75a) - AIF2TX4MIX Input 2 Source */
+ { 0x0000075b, 0x0080 }, /* R1883 (0x75b) - AIF2TX4MIX Input 2 Volume */
+ { 0x0000075c, 0x0000 }, /* R1884 (0x75c) - AIF2TX4MIX Input 3 Source */
+ { 0x0000075d, 0x0080 }, /* R1885 (0x75d) - AIF2TX4MIX Input 3 Volume */
+ { 0x0000075e, 0x0000 }, /* R1886 (0x75e) - AIF2TX4MIX Input 4 Source */
+ { 0x0000075f, 0x0080 }, /* R1887 (0x75f) - AIF2TX4MIX Input 4 Volume */
+ { 0x00000760, 0x0000 }, /* R1888 (0x760) - AIF2TX5MIX Input 1 Source */
+ { 0x00000761, 0x0080 }, /* R1889 (0x761) - AIF2TX5MIX Input 1 Volume */
+ { 0x00000762, 0x0000 }, /* R1890 (0x762) - AIF2TX5MIX Input 2 Source */
+ { 0x00000763, 0x0080 }, /* R1891 (0x763) - AIF2TX5MIX Input 2 Volume */
+ { 0x00000764, 0x0000 }, /* R1892 (0x764) - AIF2TX5MIX Input 3 Source */
+ { 0x00000765, 0x0080 }, /* R1893 (0x765) - AIF2TX5MIX Input 3 Volume */
+ { 0x00000766, 0x0000 }, /* R1894 (0x766) - AIF2TX5MIX Input 4 Source */
+ { 0x00000767, 0x0080 }, /* R1895 (0x767) - AIF2TX5MIX Input 4 Volume */
+ { 0x00000768, 0x0000 }, /* R1896 (0x768) - AIF2TX6MIX Input 1 Source */
+ { 0x00000769, 0x0080 }, /* R1897 (0x769) - AIF2TX6MIX Input 1 Volume */
+ { 0x0000076a, 0x0000 }, /* R1898 (0x76a) - AIF2TX6MIX Input 2 Source */
+ { 0x0000076b, 0x0080 }, /* R1899 (0x76b) - AIF2TX6MIX Input 2 Volume */
+ { 0x0000076c, 0x0000 }, /* R1900 (0x76c) - AIF2TX6MIX Input 3 Source */
+ { 0x0000076d, 0x0080 }, /* R1901 (0x76d) - AIF2TX6MIX Input 3 Volume */
+ { 0x0000076e, 0x0000 }, /* R1902 (0x76e) - AIF2TX6MIX Input 4 Source */
+ { 0x0000076f, 0x0080 }, /* R1903 (0x76f) - AIF2TX6MIX Input 4 Volume */
+ { 0x00000770, 0x0000 }, /* R1904 (0x770) - AIF2TX7MIX Input 1 Source */
+ { 0x00000771, 0x0080 }, /* R1905 (0x771) - AIF2TX7MIX Input 1 Volume */
+ { 0x00000772, 0x0000 }, /* R1906 (0x772) - AIF2TX7MIX Input 2 Source */
+ { 0x00000773, 0x0080 }, /* R1907 (0x773) - AIF2TX7MIX Input 2 Volume */
+ { 0x00000774, 0x0000 }, /* R1908 (0x774) - AIF2TX7MIX Input 3 Source */
+ { 0x00000775, 0x0080 }, /* R1909 (0x775) - AIF2TX7MIX Input 3 Volume */
+ { 0x00000776, 0x0000 }, /* R1910 (0x776) - AIF2TX7MIX Input 4 Source */
+ { 0x00000777, 0x0080 }, /* R1911 (0x777) - AIF2TX7MIX Input 4 Volume */
+ { 0x00000778, 0x0000 }, /* R1912 (0x778) - AIF2TX8MIX Input 1 Source */
+ { 0x00000779, 0x0080 }, /* R1913 (0x779) - AIF2TX8MIX Input 1 Volume */
+ { 0x0000077a, 0x0000 }, /* R1914 (0x77a) - AIF2TX8MIX Input 2 Source */
+ { 0x0000077b, 0x0080 }, /* R1915 (0x77b) - AIF2TX8MIX Input 2 Volume */
+ { 0x0000077c, 0x0000 }, /* R1916 (0x77c) - AIF2TX8MIX Input 3 Source */
+ { 0x0000077d, 0x0080 }, /* R1917 (0x77d) - AIF2TX8MIX Input 3 Volume */
+ { 0x0000077e, 0x0000 }, /* R1918 (0x77e) - AIF2TX8MIX Input 4 Source */
+ { 0x0000077f, 0x0080 }, /* R1919 (0x77f) - AIF2TX8MIX Input 4 Volume */
+ { 0x00000780, 0x0000 }, /* R1920 (0x780) - AIF3TX1MIX Input 1 Source */
+ { 0x00000781, 0x0080 }, /* R1921 (0x781) - AIF3TX1MIX Input 1 Volume */
+ { 0x00000782, 0x0000 }, /* R1922 (0x782) - AIF3TX1MIX Input 2 Source */
+ { 0x00000783, 0x0080 }, /* R1923 (0x783) - AIF3TX1MIX Input 2 Volume */
+ { 0x00000784, 0x0000 }, /* R1924 (0x784) - AIF3TX1MIX Input 3 Source */
+ { 0x00000785, 0x0080 }, /* R1925 (0x785) - AIF3TX1MIX Input 3 Volume */
+ { 0x00000786, 0x0000 }, /* R1926 (0x786) - AIF3TX1MIX Input 4 Source */
+ { 0x00000787, 0x0080 }, /* R1927 (0x787) - AIF3TX1MIX Input 4 Volume */
+ { 0x00000788, 0x0000 }, /* R1928 (0x788) - AIF3TX2MIX Input 1 Source */
+ { 0x00000789, 0x0080 }, /* R1929 (0x789) - AIF3TX2MIX Input 1 Volume */
+ { 0x0000078a, 0x0000 }, /* R1930 (0x78a) - AIF3TX2MIX Input 2 Source */
+ { 0x0000078b, 0x0080 }, /* R1931 (0x78b) - AIF3TX2MIX Input 2 Volume */
+ { 0x0000078c, 0x0000 }, /* R1932 (0x78c) - AIF3TX2MIX Input 3 Source */
+ { 0x0000078d, 0x0080 }, /* R1933 (0x78d) - AIF3TX2MIX Input 3 Volume */
+ { 0x0000078e, 0x0000 }, /* R1934 (0x78e) - AIF3TX2MIX Input 4 Source */
+ { 0x0000078f, 0x0080 }, /* R1935 (0x78f) - AIF3TX2MIX Input 4 Volume */
+ { 0x000007a0, 0x0000 }, /* R1952 (0x7a0) - AIF4TX1MIX Input 1 Source */
+ { 0x000007a1, 0x0080 }, /* R1953 (0x7a1) - AIF4TX1MIX Input 1 Volume */
+ { 0x000007a2, 0x0000 }, /* R1954 (0x7a2) - AIF4TX1MIX Input 2 Source */
+ { 0x000007a3, 0x0080 }, /* R1955 (0x7a3) - AIF4TX1MIX Input 2 Volume */
+ { 0x000007a4, 0x0000 }, /* R1956 (0x7a4) - AIF4TX1MIX Input 3 Source */
+ { 0x000007a5, 0x0080 }, /* R1957 (0x7a5) - AIF4TX1MIX Input 3 Volume */
+ { 0x000007a6, 0x0000 }, /* R1958 (0x7a6) - AIF4TX1MIX Input 4 Source */
+ { 0x000007a7, 0x0080 }, /* R1959 (0x7a7) - AIF4TX1MIX Input 4 Volume */
+ { 0x000007a8, 0x0000 }, /* R1960 (0x7a8) - AIF4TX2MIX Input 1 Source */
+ { 0x000007a9, 0x0080 }, /* R1961 (0x7a9) - AIF4TX2MIX Input 1 Volume */
+ { 0x000007aa, 0x0000 }, /* R1962 (0x7aa) - AIF4TX2MIX Input 2 Source */
+ { 0x000007ab, 0x0080 }, /* R1963 (0x7ab) - AIF4TX2MIX Input 2 Volume */
+ { 0x000007ac, 0x0000 }, /* R1964 (0x7ac) - AIF4TX2MIX Input 3 Source */
+ { 0x000007ad, 0x0080 }, /* R1965 (0x7ad) - AIF4TX2MIX Input 3 Volume */
+ { 0x000007ae, 0x0000 }, /* R1966 (0x7ae) - AIF4TX2MIX Input 4 Source */
+ { 0x000007af, 0x0080 }, /* R1967 (0x7af) - AIF4TX2MIX Input 4 Volume */
+ { 0x000007c0, 0x0000 }, /* R1984 (0x7c0) - SLIMTX1MIX Input 1 Source */
+ { 0x000007c1, 0x0080 }, /* R1985 (0x7c1) - SLIMTX1MIX Input 1 Volume */
+ { 0x000007c2, 0x0000 }, /* R1986 (0x7c2) - SLIMTX1MIX Input 2 Source */
+ { 0x000007c3, 0x0080 }, /* R1987 (0x7c3) - SLIMTX1MIX Input 2 Volume */
+ { 0x000007c4, 0x0000 }, /* R1988 (0x7c4) - SLIMTX1MIX Input 3 Source */
+ { 0x000007c5, 0x0080 }, /* R1989 (0x7c5) - SLIMTX1MIX Input 3 Volume */
+ { 0x000007c6, 0x0000 }, /* R1990 (0x7c6) - SLIMTX1MIX Input 4 Source */
+ { 0x000007c7, 0x0080 }, /* R1991 (0x7c7) - SLIMTX1MIX Input 4 Volume */
+ { 0x000007c8, 0x0000 }, /* R1992 (0x7c8) - SLIMTX2MIX Input 1 Source */
+ { 0x000007c9, 0x0080 }, /* R1993 (0x7c9) - SLIMTX2MIX Input 1 Volume */
+ { 0x000007ca, 0x0000 }, /* R1994 (0x7ca) - SLIMTX2MIX Input 2 Source */
+ { 0x000007cb, 0x0080 }, /* R1995 (0x7cb) - SLIMTX2MIX Input 2 Volume */
+ { 0x000007cc, 0x0000 }, /* R1996 (0x7cc) - SLIMTX2MIX Input 3 Source */
+ { 0x000007cd, 0x0080 }, /* R1997 (0x7cd) - SLIMTX2MIX Input 3 Volume */
+ { 0x000007ce, 0x0000 }, /* R1998 (0x7ce) - SLIMTX2MIX Input 4 Source */
+ { 0x000007cf, 0x0080 }, /* R1999 (0x7cf) - SLIMTX2MIX Input 4 Volume */
+ { 0x000007d0, 0x0000 }, /* R2000 (0x7d0) - SLIMTX3MIX Input 1 Source */
+ { 0x000007d1, 0x0080 }, /* R2001 (0x7d1) - SLIMTX3MIX Input 1 Volume */
+ { 0x000007d2, 0x0000 }, /* R2002 (0x7d2) - SLIMTX3MIX Input 2 Source */
+ { 0x000007d3, 0x0080 }, /* R2003 (0x7d3) - SLIMTX3MIX Input 2 Volume */
+ { 0x000007d4, 0x0000 }, /* R2004 (0x7d4) - SLIMTX3MIX Input 3 Source */
+ { 0x000007d5, 0x0080 }, /* R2005 (0x7d5) - SLIMTX3MIX Input 3 Volume */
+ { 0x000007d6, 0x0000 }, /* R2006 (0x7d6) - SLIMTX3MIX Input 4 Source */
+ { 0x000007d7, 0x0080 }, /* R2007 (0x7d7) - SLIMTX3MIX Input 4 Volume */
+ { 0x000007d8, 0x0000 }, /* R2008 (0x7d8) - SLIMTX4MIX Input 1 Source */
+ { 0x000007d9, 0x0080 }, /* R2009 (0x7d9) - SLIMTX4MIX Input 1 Volume */
+ { 0x000007da, 0x0000 }, /* R2010 (0x7da) - SLIMTX4MIX Input 2 Source */
+ { 0x000007db, 0x0080 }, /* R2011 (0x7db) - SLIMTX4MIX Input 2 Volume */
+ { 0x000007dc, 0x0000 }, /* R2012 (0x7dc) - SLIMTX4MIX Input 3 Source */
+ { 0x000007dd, 0x0080 }, /* R2013 (0x7dd) - SLIMTX4MIX Input 3 Volume */
+ { 0x000007de, 0x0000 }, /* R2014 (0x7de) - SLIMTX4MIX Input 4 Source */
+ { 0x000007df, 0x0080 }, /* R2015 (0x7df) - SLIMTX4MIX Input 4 Volume */
+ { 0x000007e0, 0x0000 }, /* R2016 (0x7e0) - SLIMTX5MIX Input 1 Source */
+ { 0x000007e1, 0x0080 }, /* R2017 (0x7e1) - SLIMTX5MIX Input 1 Volume */
+ { 0x000007e2, 0x0000 }, /* R2018 (0x7e2) - SLIMTX5MIX Input 2 Source */
+ { 0x000007e3, 0x0080 }, /* R2019 (0x7e3) - SLIMTX5MIX Input 2 Volume */
+ { 0x000007e4, 0x0000 }, /* R2020 (0x7e4) - SLIMTX5MIX Input 3 Source */
+ { 0x000007e5, 0x0080 }, /* R2021 (0x7e5) - SLIMTX5MIX Input 3 Volume */
+ { 0x000007e6, 0x0000 }, /* R2022 (0x7e6) - SLIMTX5MIX Input 4 Source */
+ { 0x000007e7, 0x0080 }, /* R2023 (0x7e7) - SLIMTX5MIX Input 4 Volume */
+ { 0x000007e8, 0x0000 }, /* R2024 (0x7e8) - SLIMTX6MIX Input 1 Source */
+ { 0x000007e9, 0x0080 }, /* R2025 (0x7e9) - SLIMTX6MIX Input 1 Volume */
+ { 0x000007ea, 0x0000 }, /* R2026 (0x7ea) - SLIMTX6MIX Input 2 Source */
+ { 0x000007eb, 0x0080 }, /* R2027 (0x7eb) - SLIMTX6MIX Input 2 Volume */
+ { 0x000007ec, 0x0000 }, /* R2028 (0x7ec) - SLIMTX6MIX Input 3 Source */
+ { 0x000007ed, 0x0080 }, /* R2029 (0x7ed) - SLIMTX6MIX Input 3 Volume */
+ { 0x000007ee, 0x0000 }, /* R2030 (0x7ee) - SLIMTX6MIX Input 4 Source */
+ { 0x000007ef, 0x0080 }, /* R2031 (0x7ef) - SLIMTX6MIX Input 4 Volume */
+ { 0x000007f0, 0x0000 }, /* R2032 (0x7f0) - SLIMTX7MIX Input 1 Source */
+ { 0x000007f1, 0x0080 }, /* R2033 (0x7f1) - SLIMTX7MIX Input 1 Volume */
+ { 0x000007f2, 0x0000 }, /* R2034 (0x7f2) - SLIMTX7MIX Input 2 Source */
+ { 0x000007f3, 0x0080 }, /* R2035 (0x7f3) - SLIMTX7MIX Input 2 Volume */
+ { 0x000007f4, 0x0000 }, /* R2036 (0x7f4) - SLIMTX7MIX Input 3 Source */
+ { 0x000007f5, 0x0080 }, /* R2037 (0x7f5) - SLIMTX7MIX Input 3 Volume */
+ { 0x000007f6, 0x0000 }, /* R2038 (0x7f6) - SLIMTX7MIX Input 4 Source */
+ { 0x000007f7, 0x0080 }, /* R2039 (0x7f7) - SLIMTX7MIX Input 4 Volume */
+ { 0x000007f8, 0x0000 }, /* R2040 (0x7f8) - SLIMTX8MIX Input 1 Source */
+ { 0x000007f9, 0x0080 }, /* R2041 (0x7f9) - SLIMTX8MIX Input 1 Volume */
+ { 0x000007fa, 0x0000 }, /* R2042 (0x7fa) - SLIMTX8MIX Input 2 Source */
+ { 0x000007fb, 0x0080 }, /* R2043 (0x7fb) - SLIMTX8MIX Input 2 Volume */
+ { 0x000007fc, 0x0000 }, /* R2044 (0x7fc) - SLIMTX8MIX Input 3 Source */
+ { 0x000007fd, 0x0080 }, /* R2045 (0x7fd) - SLIMTX8MIX Input 3 Volume */
+ { 0x000007fe, 0x0000 }, /* R2046 (0x7fe) - SLIMTX8MIX Input 4 Source */
+ { 0x000007ff, 0x0080 }, /* R2047 (0x7ff) - SLIMTX8MIX Input 4 Volume */
+ { 0x00000800, 0x0000 }, /* R2048 (0x800) - SPDIF1TX1MIX Input 1 Source */
+ { 0x00000801, 0x0080 }, /* R2049 (0x801) - SPDIF1TX1MIX Input 1 Volume */
+ { 0x00000808, 0x0000 }, /* R2056 (0x808) - SPDIF1TX2MIX Input 1 Source */
+ { 0x00000809, 0x0080 }, /* R2057 (0x809) - SPDIF1TX2MIX Input 1 Volume */
+ { 0x00000880, 0x0000 }, /* R2176 (0x880) - EQ1MIX Input 1 Source */
+ { 0x00000881, 0x0080 }, /* R2177 (0x881) - EQ1MIX Input 1 Volume */
+ { 0x00000882, 0x0000 }, /* R2178 (0x882) - EQ1MIX Input 2 Source */
+ { 0x00000883, 0x0080 }, /* R2179 (0x883) - EQ1MIX Input 2 Volume */
+ { 0x00000884, 0x0000 }, /* R2180 (0x884) - EQ1MIX Input 3 Source */
+ { 0x00000885, 0x0080 }, /* R2181 (0x885) - EQ1MIX Input 3 Volume */
+ { 0x00000886, 0x0000 }, /* R2182 (0x886) - EQ1MIX Input 4 Source */
+ { 0x00000887, 0x0080 }, /* R2183 (0x887) - EQ1MIX Input 4 Volume */
+ { 0x00000888, 0x0000 }, /* R2184 (0x888) - EQ2MIX Input 1 Source */
+ { 0x00000889, 0x0080 }, /* R2185 (0x889) - EQ2MIX Input 1 Volume */
+ { 0x0000088a, 0x0000 }, /* R2186 (0x88a) - EQ2MIX Input 2 Source */
+ { 0x0000088b, 0x0080 }, /* R2187 (0x88b) - EQ2MIX Input 2 Volume */
+ { 0x0000088c, 0x0000 }, /* R2188 (0x88c) - EQ2MIX Input 3 Source */
+ { 0x0000088d, 0x0080 }, /* R2189 (0x88d) - EQ2MIX Input 3 Volume */
+ { 0x0000088e, 0x0000 }, /* R2190 (0x88e) - EQ2MIX Input 4 Source */
+ { 0x0000088f, 0x0080 }, /* R2191 (0x88f) - EQ2MIX Input 4 Volume */
+ { 0x00000890, 0x0000 }, /* R2192 (0x890) - EQ3MIX Input 1 Source */
+ { 0x00000891, 0x0080 }, /* R2193 (0x891) - EQ3MIX Input 1 Volume */
+ { 0x00000892, 0x0000 }, /* R2194 (0x892) - EQ3MIX Input 2 Source */
+ { 0x00000893, 0x0080 }, /* R2195 (0x893) - EQ3MIX Input 2 Volume */
+ { 0x00000894, 0x0000 }, /* R2196 (0x894) - EQ3MIX Input 3 Source */
+ { 0x00000895, 0x0080 }, /* R2197 (0x895) - EQ3MIX Input 3 Volume */
+ { 0x00000896, 0x0000 }, /* R2198 (0x896) - EQ3MIX Input 4 Source */
+ { 0x00000897, 0x0080 }, /* R2199 (0x897) - EQ3MIX Input 4 Volume */
+ { 0x00000898, 0x0000 }, /* R2200 (0x898) - EQ4MIX Input 1 Source */
+ { 0x00000899, 0x0080 }, /* R2201 (0x899) - EQ4MIX Input 1 Volume */
+ { 0x0000089a, 0x0000 }, /* R2202 (0x89a) - EQ4MIX Input 2 Source */
+ { 0x0000089b, 0x0080 }, /* R2203 (0x89b) - EQ4MIX Input 2 Volume */
+ { 0x0000089c, 0x0000 }, /* R2204 (0x89c) - EQ4MIX Input 3 Source */
+ { 0x0000089d, 0x0080 }, /* R2205 (0x89d) - EQ4MIX Input 3 Volume */
+ { 0x0000089e, 0x0000 }, /* R2206 (0x89e) - EQ4MIX Input 4 Source */
+ { 0x0000089f, 0x0080 }, /* R2207 (0x89f) - EQ4MIX Input 4 Volume */
+ { 0x000008c0, 0x0000 }, /* R2240 (0x8c0) - DRC1LMIX Input 1 Source */
+ { 0x000008c1, 0x0080 }, /* R2241 (0x8c1) - DRC1LMIX Input 1 Volume */
+ { 0x000008c2, 0x0000 }, /* R2242 (0x8c2) - DRC1LMIX Input 2 Source */
+ { 0x000008c3, 0x0080 }, /* R2243 (0x8c3) - DRC1LMIX Input 2 Volume */
+ { 0x000008c4, 0x0000 }, /* R2244 (0x8c4) - DRC1LMIX Input 3 Source */
+ { 0x000008c5, 0x0080 }, /* R2245 (0x8c5) - DRC1LMIX Input 3 Volume */
+ { 0x000008c6, 0x0000 }, /* R2246 (0x8c6) - DRC1LMIX Input 4 Source */
+ { 0x000008c7, 0x0080 }, /* R2247 (0x8c7) - DRC1LMIX Input 4 Volume */
+ { 0x000008c8, 0x0000 }, /* R2248 (0x8c8) - DRC1RMIX Input 1 Source */
+ { 0x000008c9, 0x0080 }, /* R2249 (0x8c9) - DRC1RMIX Input 1 Volume */
+ { 0x000008ca, 0x0000 }, /* R2250 (0x8ca) - DRC1RMIX Input 2 Source */
+ { 0x000008cb, 0x0080 }, /* R2251 (0x8cb) - DRC1RMIX Input 2 Volume */
+ { 0x000008cc, 0x0000 }, /* R2252 (0x8cc) - DRC1RMIX Input 3 Source */
+ { 0x000008cd, 0x0080 }, /* R2253 (0x8cd) - DRC1RMIX Input 3 Volume */
+ { 0x000008ce, 0x0000 }, /* R2254 (0x8ce) - DRC1RMIX Input 4 Source */
+ { 0x000008cf, 0x0080 }, /* R2255 (0x8cf) - DRC1RMIX Input 4 Volume */
+ { 0x000008d0, 0x0000 }, /* R2256 (0x8d0) - DRC2LMIX Input 1 Source */
+ { 0x000008d1, 0x0080 }, /* R2257 (0x8d1) - DRC2LMIX Input 1 Volume */
+ { 0x000008d2, 0x0000 }, /* R2258 (0x8d2) - DRC2LMIX Input 2 Source */
+ { 0x000008d3, 0x0080 }, /* R2259 (0x8d3) - DRC2LMIX Input 2 Volume */
+ { 0x000008d4, 0x0000 }, /* R2260 (0x8d4) - DRC2LMIX Input 3 Source */
+ { 0x000008d5, 0x0080 }, /* R2261 (0x8d5) - DRC2LMIX Input 3 Volume */
+ { 0x000008d6, 0x0000 }, /* R2262 (0x8d6) - DRC2LMIX Input 4 Source */
+ { 0x000008d7, 0x0080 }, /* R2263 (0x8d7) - DRC2LMIX Input 4 Volume */
+ { 0x000008d8, 0x0000 }, /* R2264 (0x8d8) - DRC2RMIX Input 1 Source */
+ { 0x000008d9, 0x0080 }, /* R2265 (0x8d9) - DRC2RMIX Input 1 Volume */
+ { 0x000008da, 0x0000 }, /* R2266 (0x8da) - DRC2RMIX Input 2 Source */
+ { 0x000008db, 0x0080 }, /* R2267 (0x8db) - DRC2RMIX Input 2 Volume */
+ { 0x000008dc, 0x0000 }, /* R2268 (0x8dc) - DRC2RMIX Input 3 Source */
+ { 0x000008dd, 0x0080 }, /* R2269 (0x8dd) - DRC2RMIX Input 3 Volume */
+ { 0x000008de, 0x0000 }, /* R2270 (0x8de) - DRC2RMIX Input 4 Source */
+ { 0x000008df, 0x0080 }, /* R2271 (0x8df) - DRC2RMIX Input 4 Volume */
+ { 0x00000900, 0x0000 }, /* R2304 (0x900) - HPLP1MIX Input 1 Source */
+ { 0x00000901, 0x0080 }, /* R2305 (0x901) - HPLP1MIX Input 1 Volume */
+ { 0x00000902, 0x0000 }, /* R2306 (0x902) - HPLP1MIX Input 2 Source */
+ { 0x00000903, 0x0080 }, /* R2307 (0x903) - HPLP1MIX Input 2 Volume */
+ { 0x00000904, 0x0000 }, /* R2308 (0x904) - HPLP1MIX Input 3 Source */
+ { 0x00000905, 0x0080 }, /* R2309 (0x905) - HPLP1MIX Input 3 Volume */
+ { 0x00000906, 0x0000 }, /* R2310 (0x906) - HPLP1MIX Input 4 Source */
+ { 0x00000907, 0x0080 }, /* R2311 (0x907) - HPLP1MIX Input 4 Volume */
+ { 0x00000908, 0x0000 }, /* R2312 (0x908) - HPLP2MIX Input 1 Source */
+ { 0x00000909, 0x0080 }, /* R2313 (0x909) - HPLP2MIX Input 1 Volume */
+ { 0x0000090a, 0x0000 }, /* R2314 (0x90a) - HPLP2MIX Input 2 Source */
+ { 0x0000090b, 0x0080 }, /* R2315 (0x90b) - HPLP2MIX Input 2 Volume */
+ { 0x0000090c, 0x0000 }, /* R2316 (0x90c) - HPLP2MIX Input 3 Source */
+ { 0x0000090d, 0x0080 }, /* R2317 (0x90d) - HPLP2MIX Input 3 Volume */
+ { 0x0000090e, 0x0000 }, /* R2318 (0x90e) - HPLP2MIX Input 4 Source */
+ { 0x0000090f, 0x0080 }, /* R2319 (0x90f) - HPLP2MIX Input 4 Volume */
+ { 0x00000910, 0x0000 }, /* R2320 (0x910) - HPLP3MIX Input 1 Source */
+ { 0x00000911, 0x0080 }, /* R2321 (0x911) - HPLP3MIX Input 1 Volume */
+ { 0x00000912, 0x0000 }, /* R2322 (0x912) - HPLP3MIX Input 2 Source */
+ { 0x00000913, 0x0080 }, /* R2323 (0x913) - HPLP3MIX Input 2 Volume */
+ { 0x00000914, 0x0000 }, /* R2324 (0x914) - HPLP3MIX Input 3 Source */
+ { 0x00000915, 0x0080 }, /* R2325 (0x915) - HPLP3MIX Input 3 Volume */
+ { 0x00000916, 0x0000 }, /* R2326 (0x916) - HPLP3MIX Input 4 Source */
+ { 0x00000917, 0x0080 }, /* R2327 (0x917) - HPLP3MIX Input 4 Volume */
+ { 0x00000918, 0x0000 }, /* R2328 (0x918) - HPLP4MIX Input 1 Source */
+ { 0x00000919, 0x0080 }, /* R2329 (0x919) - HPLP4MIX Input 1 Volume */
+ { 0x0000091a, 0x0000 }, /* R2330 (0x91a) - HPLP4MIX Input 2 Source */
+ { 0x0000091b, 0x0080 }, /* R2331 (0x91b) - HPLP4MIX Input 2 Volume */
+ { 0x0000091c, 0x0000 }, /* R2332 (0x91c) - HPLP4MIX Input 3 Source */
+ { 0x0000091d, 0x0080 }, /* R2333 (0x91d) - HPLP4MIX Input 3 Volume */
+ { 0x0000091e, 0x0000 }, /* R2334 (0x91e) - HPLP4MIX Input 4 Source */
+ { 0x0000091f, 0x0080 }, /* R2335 (0x91f) - HPLP4MIX Input 4 Volume */
+ { 0x00000940, 0x0000 }, /* R2368 (0x940) - DSP1LMIX Input 1 Source */
+ { 0x00000941, 0x0080 }, /* R2369 (0x941) - DSP1LMIX Input 1 Volume */
+ { 0x00000942, 0x0000 }, /* R2370 (0x942) - DSP1LMIX Input 2 Source */
+ { 0x00000943, 0x0080 }, /* R2371 (0x943) - DSP1LMIX Input 2 Volume */
+ { 0x00000944, 0x0000 }, /* R2372 (0x944) - DSP1LMIX Input 3 Source */
+ { 0x00000945, 0x0080 }, /* R2373 (0x945) - DSP1LMIX Input 3 Volume */
+ { 0x00000946, 0x0000 }, /* R2374 (0x946) - DSP1LMIX Input 4 Source */
+ { 0x00000947, 0x0080 }, /* R2375 (0x947) - DSP1LMIX Input 4 Volume */
+ { 0x00000948, 0x0000 }, /* R2376 (0x948) - DSP1RMIX Input 1 Source */
+ { 0x00000949, 0x0080 }, /* R2377 (0x949) - DSP1RMIX Input 1 Volume */
+ { 0x0000094a, 0x0000 }, /* R2378 (0x94a) - DSP1RMIX Input 2 Source */
+ { 0x0000094b, 0x0080 }, /* R2379 (0x94b) - DSP1RMIX Input 2 Volume */
+ { 0x0000094c, 0x0000 }, /* R2380 (0x94c) - DSP1RMIX Input 3 Source */
+ { 0x0000094d, 0x0080 }, /* R2381 (0x94d) - DSP1RMIX Input 3 Volume */
+ { 0x0000094e, 0x0000 }, /* R2382 (0x94e) - DSP1RMIX Input 4 Source */
+ { 0x0000094f, 0x0080 }, /* R2383 (0x94f) - DSP1RMIX Input 4 Volume */
+ { 0x00000950, 0x0000 }, /* R2384 (0x950) - DSP1AUX1MIX Input 1 Source */
+ { 0x00000958, 0x0000 }, /* R2392 (0x958) - DSP1AUX2MIX Input 1 Source */
+ { 0x00000960, 0x0000 }, /* R2400 (0x960) - DSP1AUX3MIX Input 1 Source */
+ { 0x00000968, 0x0000 }, /* R2408 (0x968) - DSP1AUX4MIX Input 1 Source */
+ { 0x00000970, 0x0000 }, /* R2416 (0x970) - DSP1AUX5MIX Input 1 Source */
+ { 0x00000978, 0x0000 }, /* R2424 (0x978) - DSP1AUX6MIX Input 1 Source */
+ { 0x00000980, 0x0000 }, /* R2432 (0x980) - DSP2LMIX Input 1 Source */
+ { 0x00000981, 0x0080 }, /* R2433 (0x981) - DSP2LMIX Input 1 Volume */
+ { 0x00000982, 0x0000 }, /* R2434 (0x982) - DSP2LMIX Input 2 Source */
+ { 0x00000983, 0x0080 }, /* R2435 (0x983) - DSP2LMIX Input 2 Volume */
+ { 0x00000984, 0x0000 }, /* R2436 (0x984) - DSP2LMIX Input 3 Source */
+ { 0x00000985, 0x0080 }, /* R2437 (0x985) - DSP2LMIX Input 3 Volume */
+ { 0x00000986, 0x0000 }, /* R2438 (0x986) - DSP2LMIX Input 4 Source */
+ { 0x00000987, 0x0080 }, /* R2439 (0x987) - DSP2LMIX Input 4 Volume */
+ { 0x00000988, 0x0000 }, /* R2440 (0x988) - DSP2RMIX Input 1 Source */
+ { 0x00000989, 0x0080 }, /* R2441 (0x989) - DSP2RMIX Input 1 Volume */
+ { 0x0000098a, 0x0000 }, /* R2442 (0x98a) - DSP2RMIX Input 2 Source */
+ { 0x0000098b, 0x0080 }, /* R2443 (0x98b) - DSP2RMIX Input 2 Volume */
+ { 0x0000098c, 0x0000 }, /* R2444 (0x98c) - DSP2RMIX Input 3 Source */
+ { 0x0000098d, 0x0080 }, /* R2445 (0x98d) - DSP2RMIX Input 3 Volume */
+ { 0x0000098e, 0x0000 }, /* R2446 (0x98e) - DSP2RMIX Input 4 Source */
+ { 0x0000098f, 0x0080 }, /* R2447 (0x98f) - DSP2RMIX Input 4 Volume */
+ { 0x00000990, 0x0000 }, /* R2448 (0x990) - DSP2AUX1MIX Input 1 Source */
+ { 0x00000998, 0x0000 }, /* R2456 (0x998) - DSP2AUX2MIX Input 1 Source */
+ { 0x000009a0, 0x0000 }, /* R2464 (0x9a0) - DSP2AUX3MIX Input 1 Source */
+ { 0x000009a8, 0x0000 }, /* R2472 (0x9a8) - DSP2AUX4MIX Input 1 Source */
+ { 0x000009b0, 0x0000 }, /* R2480 (0x9b0) - DSP2AUX5MIX Input 1 Source */
+ { 0x000009b8, 0x0000 }, /* R2488 (0x9b8) - DSP2AUX6MIX Input 1 Source */
+ { 0x000009c0, 0x0000 }, /* R2496 (0x9c0) - DSP3LMIX Input 1 Source */
+ { 0x000009c1, 0x0080 }, /* R2497 (0x9c1) - DSP3LMIX Input 1 Volume */
+ { 0x000009c2, 0x0000 }, /* R2498 (0x9c2) - DSP3LMIX Input 2 Source */
+ { 0x000009c3, 0x0080 }, /* R2499 (0x9c3) - DSP3LMIX Input 2 Volume */
+ { 0x000009c4, 0x0000 }, /* R2500 (0x9c4) - DSP3LMIX Input 3 Source */
+ { 0x000009c5, 0x0080 }, /* R2501 (0x9c5) - DSP3LMIX Input 3 Volume */
+ { 0x000009c6, 0x0000 }, /* R2502 (0x9c6) - DSP3LMIX Input 4 Source */
+ { 0x000009c7, 0x0080 }, /* R2503 (0x9c7) - DSP3LMIX Input 4 Volume */
+ { 0x000009c8, 0x0000 }, /* R2504 (0x9c8) - DSP3RMIX Input 1 Source */
+ { 0x000009c9, 0x0080 }, /* R2505 (0x9c9) - DSP3RMIX Input 1 Volume */
+ { 0x000009ca, 0x0000 }, /* R2506 (0x9ca) - DSP3RMIX Input 2 Source */
+ { 0x000009cb, 0x0080 }, /* R2507 (0x9cb) - DSP3RMIX Input 2 Volume */
+ { 0x000009cc, 0x0000 }, /* R2508 (0x9cc) - DSP3RMIX Input 3 Source */
+ { 0x000009cd, 0x0080 }, /* R2509 (0x9cd) - DSP3RMIX Input 3 Volume */
+ { 0x000009ce, 0x0000 }, /* R2510 (0x9ce) - DSP3RMIX Input 4 Source */
+ { 0x000009cf, 0x0080 }, /* R2511 (0x9cf) - DSP3RMIX Input 4 Volume */
+ { 0x000009d0, 0x0000 }, /* R2512 (0x9d0) - DSP3AUX1MIX Input 1 Source */
+ { 0x000009d8, 0x0000 }, /* R2520 (0x9d8) - DSP3AUX2MIX Input 1 Source */
+ { 0x000009e0, 0x0000 }, /* R2528 (0x9e0) - DSP3AUX3MIX Input 1 Source */
+ { 0x000009e8, 0x0000 }, /* R2536 (0x9e8) - DSP3AUX4MIX Input 1 Source */
+ { 0x000009f0, 0x0000 }, /* R2544 (0x9f0) - DSP3AUX5MIX Input 1 Source */
+ { 0x000009f8, 0x0000 }, /* R2552 (0x9f8) - DSP3AUX6MIX Input 1 Source */
+ { 0x00000a00, 0x0000 }, /* R2560 (0xa00) - DSP4LMIX Input 1 Source */
+ { 0x00000a01, 0x0080 }, /* R2561 (0xa01) - DSP4LMIX Input 1 Volume */
+ { 0x00000a02, 0x0000 }, /* R2562 (0xa02) - DSP4LMIX Input 2 Source */
+ { 0x00000a03, 0x0080 }, /* R2563 (0xa03) - DSP4LMIX Input 2 Volume */
+ { 0x00000a04, 0x0000 }, /* R2564 (0xa04) - DSP4LMIX Input 3 Source */
+ { 0x00000a05, 0x0080 }, /* R2565 (0xa05) - DSP4LMIX Input 3 Volume */
+ { 0x00000a06, 0x0000 }, /* R2566 (0xa06) - DSP4LMIX Input 4 Source */
+ { 0x00000a07, 0x0080 }, /* R2567 (0xa07) - DSP4LMIX Input 4 Volume */
+ { 0x00000a08, 0x0000 }, /* R2568 (0xa08) - DSP4RMIX Input 1 Source */
+ { 0x00000a09, 0x0080 }, /* R2569 (0xa09) - DSP4RMIX Input 1 Volume */
+ { 0x00000a0a, 0x0000 }, /* R2570 (0xa0a) - DSP4RMIX Input 2 Source */
+ { 0x00000a0b, 0x0080 }, /* R2571 (0xa0b) - DSP4RMIX Input 2 Volume */
+ { 0x00000a0c, 0x0000 }, /* R2572 (0xa0c) - DSP4RMIX Input 3 Source */
+ { 0x00000a0d, 0x0080 }, /* R2573 (0xa0d) - DSP4RMIX Input 3 Volume */
+ { 0x00000a0e, 0x0000 }, /* R2574 (0xa0e) - DSP4RMIX Input 4 Source */
+ { 0x00000a0f, 0x0080 }, /* R2575 (0xa0f) - DSP4RMIX Input 4 Volume */
+ { 0x00000a10, 0x0000 }, /* R2576 (0xa10) - DSP4AUX1MIX Input 1 Source */
+ { 0x00000a18, 0x0000 }, /* R2584 (0xa18) - DSP4AUX2MIX Input 1 Source */
+ { 0x00000a20, 0x0000 }, /* R2592 (0xa20) - DSP4AUX3MIX Input 1 Source */
+ { 0x00000a28, 0x0000 }, /* R2600 (0xa28) - DSP4AUX4MIX Input 1 Source */
+ { 0x00000a30, 0x0000 }, /* R2608 (0xa30) - DSP4AUX5MIX Input 1 Source */
+ { 0x00000a38, 0x0000 }, /* R2616 (0xa38) - DSP4AUX6MIX Input 1 Source */
+ { 0x00000a40, 0x0000 }, /* R2624 (0xa40) - DSP5LMIX Input 1 Source */
+ { 0x00000a41, 0x0080 }, /* R2625 (0xa41) - DSP5LMIX Input 1 Volume */
+ { 0x00000a42, 0x0000 }, /* R2626 (0xa42) - DSP5LMIX Input 2 Source */
+ { 0x00000a43, 0x0080 }, /* R2627 (0xa43) - DSP5LMIX Input 2 Volume */
+ { 0x00000a44, 0x0000 }, /* R2628 (0xa44) - DSP5LMIX Input 3 Source */
+ { 0x00000a45, 0x0080 }, /* R2629 (0xa45) - DSP5LMIX Input 3 Volume */
+ { 0x00000a46, 0x0000 }, /* R2630 (0xa46) - DSP5LMIX Input 4 Source */
+ { 0x00000a47, 0x0080 }, /* R2631 (0xa47) - DSP5LMIX Input 4 Volume */
+ { 0x00000a48, 0x0000 }, /* R2632 (0xa48) - DSP5RMIX Input 1 Source */
+ { 0x00000a49, 0x0080 }, /* R2633 (0xa49) - DSP5RMIX Input 1 Volume */
+ { 0x00000a4a, 0x0000 }, /* R2634 (0xa4a) - DSP5RMIX Input 2 Source */
+ { 0x00000a4b, 0x0080 }, /* R2635 (0xa4b) - DSP5RMIX Input 2 Volume */
+ { 0x00000a4c, 0x0000 }, /* R2636 (0xa4c) - DSP5RMIX Input 3 Source */
+ { 0x00000a4d, 0x0080 }, /* R2637 (0xa4d) - DSP5RMIX Input 3 Volume */
+ { 0x00000a4e, 0x0000 }, /* R2638 (0xa4e) - DSP5RMIX Input 4 Source */
+ { 0x00000a4f, 0x0080 }, /* R2639 (0xa4f) - DSP5RMIX Input 4 Volume */
+ { 0x00000a50, 0x0000 }, /* R2640 (0xa50) - DSP5AUX1MIX Input 1 Source */
+ { 0x00000a58, 0x0000 }, /* R2658 (0xa58) - DSP5AUX2MIX Input 1 Source */
+ { 0x00000a60, 0x0000 }, /* R2656 (0xa60) - DSP5AUX3MIX Input 1 Source */
+ { 0x00000a68, 0x0000 }, /* R2664 (0xa68) - DSP5AUX4MIX Input 1 Source */
+ { 0x00000a70, 0x0000 }, /* R2672 (0xa70) - DSP5AUX5MIX Input 1 Source */
+ { 0x00000a78, 0x0000 }, /* R2680 (0xa78) - DSP5AUX6MIX Input 1 Source */
+ { 0x00000a80, 0x0000 }, /* R2688 (0xa80) - ASRC1_1LMIX Input 1 Source */
+ { 0x00000a88, 0x0000 }, /* R2696 (0xa88) - ASRC1_1RMIX Input 1 Source */
+ { 0x00000a90, 0x0000 }, /* R2704 (0xa90) - ASRC1_2LMIX Input 1 Source */
+ { 0x00000a98, 0x0000 }, /* R2712 (0xa98) - ASRC1_2RMIX Input 1 Source */
+ { 0x00000aa0, 0x0000 }, /* R2720 (0xaa0) - ASRC2_1LMIX Input 1 Source */
+ { 0x00000aa8, 0x0000 }, /* R2728 (0xaa8) - ASRC2_1RMIX Input 1 Source */
+ { 0x00000ab0, 0x0000 }, /* R2736 (0xab0) - ASRC2_2LMIX Input 1 Source */
+ { 0x00000ab8, 0x0000 }, /* R2744 (0xab8) - ASRC2_2RMIX Input 1 Source */
+ { 0x00000b00, 0x0000 }, /* R2816 (0xb00) - ISRC1DEC1MIX Input 1 Source*/
+ { 0x00000b08, 0x0000 }, /* R2824 (0xb08) - ISRC1DEC2MIX Input 1 Source*/
+ { 0x00000b10, 0x0000 }, /* R2832 (0xb10) - ISRC1DEC3MIX Input 1 Source*/
+ { 0x00000b18, 0x0000 }, /* R2840 (0xb18) - ISRC1DEC4MIX Input 1 Source*/
+ { 0x00000b20, 0x0000 }, /* R2848 (0xb20) - ISRC1INT1MIX Input 1 Source*/
+ { 0x00000b28, 0x0000 }, /* R2856 (0xb28) - ISRC1INT2MIX Input 1 Source*/
+ { 0x00000b30, 0x0000 }, /* R2864 (0xb30) - ISRC1INT3MIX Input 1 Source*/
+ { 0x00000b38, 0x0000 }, /* R2872 (0xb38) - ISRC1INT4MIX Input 1 Source*/
+ { 0x00000b40, 0x0000 }, /* R2880 (0xb40) - ISRC2DEC1MIX Input 1 Source*/
+ { 0x00000b48, 0x0000 }, /* R2888 (0xb48) - ISRC2DEC2MIX Input 1 Source*/
+ { 0x00000b50, 0x0000 }, /* R2896 (0xb50) - ISRC2DEC3MIX Input 1 Source*/
+ { 0x00000b58, 0x0000 }, /* R2904 (0xb58) - ISRC2DEC4MIX Input 1 Source*/
+ { 0x00000b60, 0x0000 }, /* R2912 (0xb60) - ISRC2INT1MIX Input 1 Source*/
+ { 0x00000b68, 0x0000 }, /* R2920 (0xb68) - ISRC2INT2MIX Input 1 Source*/
+ { 0x00000b70, 0x0000 }, /* R2928 (0xb70) - ISRC2INT3MIX Input 1 Source*/
+ { 0x00000b78, 0x0000 }, /* R2936 (0xb78) - ISRC2INT4MIX Input 1 Source*/
+ { 0x00000b80, 0x0000 }, /* R2944 (0xb80) - ISRC3DEC1MIX Input 1 Source*/
+ { 0x00000b88, 0x0000 }, /* R2952 (0xb88) - ISRC3DEC2MIX Input 1 Source*/
+ { 0x00000ba0, 0x0000 }, /* R2976 (0xb80) - ISRC3INT1MIX Input 1 Source*/
+ { 0x00000ba8, 0x0000 }, /* R2984 (0xb88) - ISRC3INT2MIX Input 1 Source*/
+ { 0x00000bc0, 0x0000 }, /* R3008 (0xbc0) - ISRC4DEC1MIX Input 1 Source */
+ { 0x00000bc8, 0x0000 }, /* R3016 (0xbc8) - ISRC4DEC2MIX Input 1 Source */
+ { 0x00000be0, 0x0000 }, /* R3040 (0xbe0) - ISRC4INT1MIX Input 1 Source */
+ { 0x00000be8, 0x0000 }, /* R3048 (0xbe8) - ISRC4INT2MIX Input 1 Source */
+ { 0x00000c00, 0x0000 }, /* R3072 (0xc00) - DSP6LMIX Input 1 Source */
+ { 0x00000c01, 0x0080 }, /* R3073 (0xc01) - DSP6LMIX Input 1 Volume */
+ { 0x00000c02, 0x0000 }, /* R3074 (0xc02) - DSP6LMIX Input 2 Source */
+ { 0x00000c03, 0x0080 }, /* R3075 (0xc03) - DSP6LMIX Input 2 Volume */
+ { 0x00000c04, 0x0000 }, /* R3076 (0xc04) - DSP6LMIX Input 3 Source */
+ { 0x00000c05, 0x0080 }, /* R3077 (0xc05) - DSP6LMIX Input 3 Volume */
+ { 0x00000c06, 0x0000 }, /* R3078 (0xc06) - DSP6LMIX Input 4 Source */
+ { 0x00000c07, 0x0080 }, /* R3079 (0xc07) - DSP6LMIX Input 4 Volume */
+ { 0x00000c08, 0x0000 }, /* R3080 (0xc08) - DSP6RMIX Input 1 Source */
+ { 0x00000c09, 0x0080 }, /* R3081 (0xc09) - DSP6RMIX Input 1 Volume */
+ { 0x00000c0a, 0x0000 }, /* R3082 (0xc0a) - DSP6RMIX Input 2 Source */
+ { 0x00000c0b, 0x0080 }, /* R3083 (0xc0b) - DSP6RMIX Input 2 Volume */
+ { 0x00000c0c, 0x0000 }, /* R3084 (0xc0c) - DSP6RMIX Input 3 Source */
+ { 0x00000c0d, 0x0080 }, /* R3085 (0xc0d) - DSP6RMIX Input 3 Volume */
+ { 0x00000c0e, 0x0000 }, /* R3086 (0xc0e) - DSP6RMIX Input 4 Source */
+ { 0x00000c0f, 0x0080 }, /* R3087 (0xc0f) - DSP6RMIX Input 4 Volume */
+ { 0x00000c10, 0x0000 }, /* R3088 (0xc10) - DSP6AUX1MIX Input 1 Source */
+ { 0x00000c18, 0x0000 }, /* R3088 (0xc18) - DSP6AUX2MIX Input 1 Source */
+ { 0x00000c20, 0x0000 }, /* R3088 (0xc20) - DSP6AUX3MIX Input 1 Source */
+ { 0x00000c28, 0x0000 }, /* R3088 (0xc28) - DSP6AUX4MIX Input 1 Source */
+ { 0x00000c30, 0x0000 }, /* R3088 (0xc30) - DSP6AUX5MIX Input 1 Source */
+ { 0x00000c38, 0x0000 }, /* R3088 (0xc38) - DSP6AUX6MIX Input 1 Source */
+ { 0x00000c40, 0x0000 }, /* R3136 (0xc40) - DSP7LMIX Input 1 Source */
+ { 0x00000c41, 0x0080 }, /* R3137 (0xc41) - DSP7LMIX Input 1 Volume */
+ { 0x00000c42, 0x0000 }, /* R3138 (0xc42) - DSP7LMIX Input 2 Source */
+ { 0x00000c43, 0x0080 }, /* R3139 (0xc43) - DSP7LMIX Input 2 Volume */
+ { 0x00000c44, 0x0000 }, /* R3140 (0xc44) - DSP7LMIX Input 3 Source */
+ { 0x00000c45, 0x0080 }, /* R3141 (0xc45) - DSP7lMIX Input 3 Volume */
+ { 0x00000c46, 0x0000 }, /* R3142 (0xc46) - DSP7lMIX Input 4 Source */
+ { 0x00000c47, 0x0080 }, /* R3143 (0xc47) - DSP7LMIX Input 4 Volume */
+ { 0x00000c48, 0x0000 }, /* R3144 (0xc48) - DSP7RMIX Input 1 Source */
+ { 0x00000c49, 0x0080 }, /* R3145 (0xc49) - DSP7RMIX Input 1 Volume */
+ { 0x00000c4a, 0x0000 }, /* R3146 (0xc4a) - DSP7RMIX Input 2 Source */
+ { 0x00000c4b, 0x0080 }, /* R3147 (0xc4b) - DSP7RMIX Input 2 Volume */
+ { 0x00000c4c, 0x0000 }, /* R3148 (0xc4c) - DSP7RMIX Input 3 Source */
+ { 0x00000c4d, 0x0080 }, /* R3159 (0xc4d) - DSP7RMIX Input 3 Volume */
+ { 0x00000c4e, 0x0000 }, /* R3150 (0xc4e) - DSP7RMIX Input 4 Source */
+ { 0x00000c4f, 0x0080 }, /* R3151 (0xc4f) - DSP7RMIX Input 4 Volume */
+ { 0x00000c50, 0x0000 }, /* R3152 (0xc50) - DSP7AUX1MIX Input 1 Source */
+ { 0x00000c58, 0x0000 }, /* R3160 (0xc58) - DSP7AUX2MIX Input 1 Source */
+ { 0x00000c60, 0x0000 }, /* R3168 (0xc60) - DSP7AUX3MIX Input 1 Source */
+ { 0x00000c68, 0x0000 }, /* R3176 (0xc68) - DSP7AUX4MIX Input 1 Source */
+ { 0x00000c70, 0x0000 }, /* R3184 (0xc70) - DSP7AUX5MIX Input 1 Source */
+ { 0x00000c78, 0x0000 }, /* R3192 (0xc78) - DSP7AUX6MIX Input 1 Source */
+ { 0x00000dc0, 0x0000 }, /* R3520 (0xdc0) - DFC1MIX Input 1 Source */
+ { 0x00000dc8, 0x0000 }, /* R3528 (0xdc8) - DFC2MIX Input 1 Source */
+ { 0x00000dd0, 0x0000 }, /* R3536 (0xdd0) - DFC3MIX Input 1 Source */
+ { 0x00000dd8, 0x0000 }, /* R3544 (0xdd8) - DFC4MIX Input 1 Source */
+ { 0x00000de0, 0x0000 }, /* R3552 (0xde0) - DFC5MIX Input 1 Source */
+ { 0x00000de8, 0x0000 }, /* R3560 (0xde8) - DFC6MIX Input 1 Source */
+ { 0x00000df0, 0x0000 }, /* R3568 (0xdf0) - DFC7MIX Input 1 Source */
+ { 0x00000df8, 0x0000 }, /* R3576 (0xdf8) - DFC8MIX Input 1 Source */
+ { 0x00000e00, 0x0000 }, /* R3584 (0xe00) - FX_Ctrl1 */
+ { 0x00000e10, 0x6318 }, /* R3600 (0xe10) - EQ1_1 */
+ { 0x00000e11, 0x6300 }, /* R3601 (0xe11) - EQ1_2 */
+ { 0x00000e12, 0x0fc8 }, /* R3602 (0xe12) - EQ1_3 */
+ { 0x00000e13, 0x03fe }, /* R3603 (0xe13) - EQ1_4 */
+ { 0x00000e14, 0x00e0 }, /* R3604 (0xe14) - EQ1_5 */
+ { 0x00000e15, 0x1ec4 }, /* R3605 (0xe15) - EQ1_6 */
+ { 0x00000e16, 0xf136 }, /* R3606 (0xe16) - EQ1_7 */
+ { 0x00000e17, 0x0409 }, /* R3607 (0xe17) - EQ1_8 */
+ { 0x00000e18, 0x04cc }, /* R3608 (0xe18) - EQ1_9 */
+ { 0x00000e19, 0x1c9b }, /* R3609 (0xe19) - EQ1_10 */
+ { 0x00000e1a, 0xf337 }, /* R3610 (0xe1a) - EQ1_11 */
+ { 0x00000e1b, 0x040b }, /* R3611 (0xe1b) - EQ1_12 */
+ { 0x00000e1c, 0x0cbb }, /* R3612 (0xe1c) - EQ1_13 */
+ { 0x00000e1d, 0x16f8 }, /* R3613 (0xe1d) - EQ1_14 */
+ { 0x00000e1e, 0xf7d9 }, /* R3614 (0xe1e) - EQ1_15 */
+ { 0x00000e1f, 0x040a }, /* R3615 (0xe1f) - EQ1_16 */
+ { 0x00000e20, 0x1f14 }, /* R3616 (0xe20) - EQ1_17 */
+ { 0x00000e21, 0x058c }, /* R3617 (0xe21) - EQ1_18 */
+ { 0x00000e22, 0x0563 }, /* R3618 (0xe22) - EQ1_19 */
+ { 0x00000e23, 0x4000 }, /* R3619 (0xe23) - EQ1_20 */
+ { 0x00000e24, 0x0b75 }, /* R3620 (0xe24) - EQ1_21 */
+ { 0x00000e26, 0x6318 }, /* R3622 (0xe26) - EQ2_1 */
+ { 0x00000e27, 0x6300 }, /* R3623 (0xe27) - EQ2_2 */
+ { 0x00000e28, 0x0fc8 }, /* R3624 (0xe28) - EQ2_3 */
+ { 0x00000e29, 0x03fe }, /* R3625 (0xe29) - EQ2_4 */
+ { 0x00000e2a, 0x00e0 }, /* R3626 (0xe2a) - EQ2_5 */
+ { 0x00000e2b, 0x1ec4 }, /* R3627 (0xe2b) - EQ2_6 */
+ { 0x00000e2c, 0xf136 }, /* R3628 (0xe2c) - EQ2_7 */
+ { 0x00000e2d, 0x0409 }, /* R3629 (0xe2d) - EQ2_8 */
+ { 0x00000e2e, 0x04cc }, /* R3630 (0xe2e) - EQ2_9 */
+ { 0x00000e2f, 0x1c9b }, /* R3631 (0xe2f) - EQ2_10 */
+ { 0x00000e30, 0xf337 }, /* R3632 (0xe30) - EQ2_11 */
+ { 0x00000e31, 0x040b }, /* R3633 (0xe31) - EQ2_12 */
+ { 0x00000e32, 0x0cbb }, /* R3634 (0xe32) - EQ2_13 */
+ { 0x00000e33, 0x16f8 }, /* R3635 (0xe33) - EQ2_14 */
+ { 0x00000e34, 0xf7d9 }, /* R3636 (0xe34) - EQ2_15 */
+ { 0x00000e35, 0x040a }, /* R3637 (0xe35) - EQ2_16 */
+ { 0x00000e36, 0x1f14 }, /* R3638 (0xe36) - EQ2_17 */
+ { 0x00000e37, 0x058c }, /* R3639 (0xe37) - EQ2_18 */
+ { 0x00000e38, 0x0563 }, /* R3640 (0xe38) - EQ2_19 */
+ { 0x00000e39, 0x4000 }, /* R3641 (0xe39) - EQ2_20 */
+ { 0x00000e3a, 0x0b75 }, /* R3642 (0xe3a) - EQ2_21 */
+ { 0x00000e3c, 0x6318 }, /* R3644 (0xe3c) - EQ3_1 */
+ { 0x00000e3d, 0x6300 }, /* R3645 (0xe3d) - EQ3_2 */
+ { 0x00000e3e, 0x0fc8 }, /* R3646 (0xe3e) - EQ3_3 */
+ { 0x00000e3f, 0x03fe }, /* R3647 (0xe3f) - EQ3_4 */
+ { 0x00000e40, 0x00e0 }, /* R3648 (0xe40) - EQ3_5 */
+ { 0x00000e41, 0x1ec4 }, /* R3649 (0xe41) - EQ3_6 */
+ { 0x00000e42, 0xf136 }, /* R3650 (0xe42) - EQ3_7 */
+ { 0x00000e43, 0x0409 }, /* R3651 (0xe43) - EQ3_8 */
+ { 0x00000e44, 0x04cc }, /* R3652 (0xe44) - EQ3_9 */
+ { 0x00000e45, 0x1c9b }, /* R3653 (0xe45) - EQ3_10 */
+ { 0x00000e46, 0xf337 }, /* R3654 (0xe46) - EQ3_11 */
+ { 0x00000e47, 0x040b }, /* R3655 (0xe47) - EQ3_12 */
+ { 0x00000e48, 0x0cbb }, /* R3656 (0xe48) - EQ3_13 */
+ { 0x00000e49, 0x16f8 }, /* R3657 (0xe49) - EQ3_14 */
+ { 0x00000e4a, 0xf7d9 }, /* R3658 (0xe4a) - EQ3_15 */
+ { 0x00000e4b, 0x040a }, /* R3659 (0xe4b) - EQ3_16 */
+ { 0x00000e4c, 0x1f14 }, /* R3660 (0xe4c) - EQ3_17 */
+ { 0x00000e4d, 0x058c }, /* R3661 (0xe4d) - EQ3_18 */
+ { 0x00000e4e, 0x0563 }, /* R3662 (0xe4e) - EQ3_19 */
+ { 0x00000e4f, 0x4000 }, /* R3663 (0xe4f) - EQ3_20 */
+ { 0x00000e50, 0x0b75 }, /* R3664 (0xe50) - EQ3_21 */
+ { 0x00000e52, 0x6318 }, /* R3666 (0xe52) - EQ4_1 */
+ { 0x00000e53, 0x6300 }, /* R3667 (0xe53) - EQ4_2 */
+ { 0x00000e54, 0x0fc8 }, /* R3668 (0xe54) - EQ4_3 */
+ { 0x00000e55, 0x03fe }, /* R3669 (0xe55) - EQ4_4 */
+ { 0x00000e56, 0x00e0 }, /* R3670 (0xe56) - EQ4_5 */
+ { 0x00000e57, 0x1ec4 }, /* R3671 (0xe57) - EQ4_6 */
+ { 0x00000e58, 0xf136 }, /* R3672 (0xe58) - EQ4_7 */
+ { 0x00000e59, 0x0409 }, /* R3673 (0xe59) - EQ4_8 */
+ { 0x00000e5a, 0x04cc }, /* R3674 (0xe5a) - EQ4_9 */
+ { 0x00000e5b, 0x1c9b }, /* R3675 (0xe5b) - EQ4_10 */
+ { 0x00000e5c, 0xf337 }, /* R3676 (0xe5c) - EQ4_11 */
+ { 0x00000e5d, 0x040b }, /* R3677 (0xe5d) - EQ4_12 */
+ { 0x00000e5e, 0x0cbb }, /* R3678 (0xe5e) - EQ4_13 */
+ { 0x00000e5f, 0x16f8 }, /* R3679 (0xe5f) - EQ4_14 */
+ { 0x00000e60, 0xf7d9 }, /* R3680 (0xe60) - EQ4_15 */
+ { 0x00000e61, 0x040a }, /* R3681 (0xe61) - EQ4_16 */
+ { 0x00000e62, 0x1f14 }, /* R3682 (0xe62) - EQ4_17 */
+ { 0x00000e63, 0x058c }, /* R3683 (0xe63) - EQ4_18 */
+ { 0x00000e64, 0x0563 }, /* R3684 (0xe64) - EQ4_19 */
+ { 0x00000e65, 0x4000 }, /* R3685 (0xe65) - EQ4_20 */
+ { 0x00000e66, 0x0b75 }, /* R3686 (0xe66) - EQ4_21 */
+ { 0x00000e80, 0x0018 }, /* R3712 (0xe80) - DRC1 ctrl1 */
+ { 0x00000e81, 0x0933 }, /* R3713 (0xe81) - DRC1 ctrl2 */
+ { 0x00000e82, 0x0018 }, /* R3714 (0xe82) - DRC1 ctrl3 */
+ { 0x00000e83, 0x0000 }, /* R3715 (0xe83) - DRC1 ctrl4 */
+ { 0x00000e84, 0x0000 }, /* R3716 (0xe84) - DRC1 ctrl5 */
+ { 0x00000e88, 0x0018 }, /* R3720 (0xe88) - DRC2 ctrl1 */
+ { 0x00000e89, 0x0933 }, /* R3721 (0xe89) - DRC2 ctrl2 */
+ { 0x00000e8a, 0x0018 }, /* R3722 (0xe8a) - DRC2 ctrl3 */
+ { 0x00000e8b, 0x0000 }, /* R3723 (0xe8b) - DRC2 ctrl4 */
+ { 0x00000e8c, 0x0000 }, /* R3724 (0xe8c) - DRC2 ctrl5 */
+ { 0x00000ec0, 0x0000 }, /* R3776 (0xec0) - HPLPF1_1 */
+ { 0x00000ec1, 0x0000 }, /* R3777 (0xec1) - HPLPF1_2 */
+ { 0x00000ec4, 0x0000 }, /* R3780 (0xec4) - HPLPF2_1 */
+ { 0x00000ec5, 0x0000 }, /* R3781 (0xec5) - HPLPF2_2 */
+ { 0x00000ec8, 0x0000 }, /* R3784 (0xec8) - HPLPF3_1 */
+ { 0x00000ec9, 0x0000 }, /* R3785 (0xec9) - HPLPF3_2 */
+ { 0x00000ecc, 0x0000 }, /* R3788 (0xecc) - HPLPF4_1 */
+ { 0x00000ecd, 0x0000 }, /* R3789 (0xecd) - HPLPF4_2 */
+ { 0x00000ed0, 0x0000 }, /* R3792 (0xed0) - ASRC2_ENABLE */
+ { 0x00000ed2, 0x0000 }, /* R3794 (0xed2) - ASRC2_RATE1 */
+ { 0x00000ed3, 0x4000 }, /* R3795 (0xed3) - ASRC2_RATE2 */
+ { 0x00000ee0, 0x0000 }, /* R3808 (0xee0) - ASRC1_ENABLE */
+ { 0x00000ee2, 0x0000 }, /* R3810 (0xee2) - ASRC1_RATE1 */
+ { 0x00000ee3, 0x4000 }, /* R3811 (0xee3) - ASRC1_RATE2 */
+ { 0x00000ef0, 0x0000 }, /* R3824 (0xef0) - ISRC 1 CTRL 1 */
+ { 0x00000ef1, 0x0001 }, /* R3825 (0xef1) - ISRC 1 CTRL 2 */
+ { 0x00000ef2, 0x0000 }, /* R3826 (0xef2) - ISRC 1 CTRL 3 */
+ { 0x00000ef3, 0x0000 }, /* R3827 (0xef3) - ISRC 2 CTRL 1 */
+ { 0x00000ef4, 0x0001 }, /* R3828 (0xef4) - ISRC 2 CTRL 2 */
+ { 0x00000ef5, 0x0000 }, /* R3829 (0xef5) - ISRC 2 CTRL 3 */
+ { 0x00000ef6, 0x0000 }, /* R3830 (0xef6) - ISRC 3 CTRL 1 */
+ { 0x00000ef7, 0x0001 }, /* R3831 (0xef7) - ISRC 3 CTRL 2 */
+ { 0x00000ef8, 0x0000 }, /* R3832 (0xef8) - ISRC 3 CTRL 3 */
+ { 0x00000ef9, 0x0000 }, /* R3833 (0xef9) - ISRC 4 CTRL 1 */
+ { 0x00000efa, 0x0001 }, /* R3834 (0xefa) - ISRC 4 CTRL 2 */
+ { 0x00000efb, 0x0000 }, /* R3835 (0xefb) - ISRC 4 CTRL 3 */
+ { 0x00000f01, 0x0000 }, /* R3841 (0xf01) - ANC_SRC */
+ { 0x00000f02, 0x0000 }, /* R3842 (0xf02) - DSP Status */
+ { 0x00000f08, 0x001c }, /* R3848 (0xf08) - ANC Coefficient */
+ { 0x00000f09, 0x0000 }, /* R3849 (0xf09) - ANC Coefficient */
+ { 0x00000f0a, 0x0000 }, /* R3850 (0xf0a) - ANC Coefficient */
+ { 0x00000f0b, 0x0000 }, /* R3851 (0xf0b) - ANC Coefficient */
+ { 0x00000f0c, 0x0000 }, /* R3852 (0xf0c) - ANC Coefficient */
+ { 0x00000f0d, 0x0000 }, /* R3853 (0xf0d) - ANC Coefficient */
+ { 0x00000f0e, 0x0000 }, /* R3854 (0xf0e) - ANC Coefficient */
+ { 0x00000f0f, 0x0000 }, /* R3855 (0xf0f) - ANC Coefficient */
+ { 0x00000f10, 0x0000 }, /* R3856 (0xf10) - ANC Coefficient */
+ { 0x00000f11, 0x0000 }, /* R3857 (0xf11) - ANC Coefficient */
+ { 0x00000f12, 0x0000 }, /* R3858 (0xf12) - ANC Coefficient */
+ { 0x00000f15, 0x0000 }, /* R3861 (0xf15) - FCL Filter Control */
+ { 0x00000f17, 0x0004 }, /* R3863 (0xf17) - FCL ADC Reformatter Control */
+ { 0x00000f18, 0x0004 }, /* R3864 (0xf18) - ANC Coefficient */
+ { 0x00000f19, 0x0002 }, /* R3865 (0xf19) - ANC Coefficient */
+ { 0x00000f1a, 0x0000 }, /* R3866 (0xf1a) - ANC Coefficient */
+ { 0x00000f1b, 0x0010 }, /* R3867 (0xf1b) - ANC Coefficient */
+ { 0x00000f1c, 0x0000 }, /* R3868 (0xf1c) - ANC Coefficient */
+ { 0x00000f1d, 0x0000 }, /* R3869 (0xf1d) - ANC Coefficient */
+ { 0x00000f1e, 0x0000 }, /* R3870 (0xf1e) - ANC Coefficient */
+ { 0x00000f1f, 0x0000 }, /* R3871 (0xf1f) - ANC Coefficient */
+ { 0x00000f20, 0x0000 }, /* R3872 (0xf20) - ANC Coefficient */
+ { 0x00000f21, 0x0000 }, /* R3873 (0xf21) - ANC Coefficient */
+ { 0x00000f22, 0x0000 }, /* R3874 (0xf22) - ANC Coefficient */
+ { 0x00000f23, 0x0000 }, /* R3875 (0xf23) - ANC Coefficient */
+ { 0x00000f24, 0x0000 }, /* R3876 (0xf24) - ANC Coefficient */
+ { 0x00000f25, 0x0000 }, /* R3877 (0xf25) - ANC Coefficient */
+ { 0x00000f26, 0x0000 }, /* R3878 (0xf26) - ANC Coefficient */
+ { 0x00000f27, 0x0000 }, /* R3879 (0xf27) - ANC Coefficient */
+ { 0x00000f28, 0x0000 }, /* R3880 (0xf28) - ANC Coefficient */
+ { 0x00000f29, 0x0000 }, /* R3881 (0xf29) - ANC Coefficient */
+ { 0x00000f2a, 0x0000 }, /* R3882 (0xf2a) - ANC Coefficient */
+ { 0x00000f2b, 0x0000 }, /* R3883 (0xf2b) - ANC Coefficient */
+ { 0x00000f2c, 0x0000 }, /* R3884 (0xf2c) - ANC Coefficient */
+ { 0x00000f2d, 0x0000 }, /* R3885 (0xf2d) - ANC Coefficient */
+ { 0x00000f2e, 0x0000 }, /* R3886 (0xf2e) - ANC Coefficient */
+ { 0x00000f2f, 0x0000 }, /* R3887 (0xf2f) - ANC Coefficient */
+ { 0x00000f30, 0x0000 }, /* R3888 (0xf30) - ANC Coefficient */
+ { 0x00000f31, 0x0000 }, /* R3889 (0xf31) - ANC Coefficient */
+ { 0x00000f32, 0x0000 }, /* R3890 (0xf32) - ANC Coefficient */
+ { 0x00000f33, 0x0000 }, /* R3891 (0xf33) - ANC Coefficient */
+ { 0x00000f34, 0x0000 }, /* R3892 (0xf34) - ANC Coefficient */
+ { 0x00000f35, 0x0000 }, /* R3893 (0xf35) - ANC Coefficient */
+ { 0x00000f36, 0x0000 }, /* R3894 (0xf36) - ANC Coefficient */
+ { 0x00000f37, 0x0000 }, /* R3895 (0xf37) - ANC Coefficient */
+ { 0x00000f38, 0x0000 }, /* R3896 (0xf38) - ANC Coefficient */
+ { 0x00000f39, 0x0000 }, /* R3897 (0xf39) - ANC Coefficient */
+ { 0x00000f3a, 0x0000 }, /* R3898 (0xf3a) - ANC Coefficient */
+ { 0x00000f3b, 0x0000 }, /* R3899 (0xf3b) - ANC Coefficient */
+ { 0x00000f3c, 0x0000 }, /* R3900 (0xf3c) - ANC Coefficient */
+ { 0x00000f3d, 0x0000 }, /* R3901 (0xf3d) - ANC Coefficient */
+ { 0x00000f3e, 0x0000 }, /* R3902 (0xf3e) - ANC Coefficient */
+ { 0x00000f3f, 0x0000 }, /* R3903 (0xf3f) - ANC Coefficient */
+ { 0x00000f40, 0x0000 }, /* R3904 (0xf40) - ANC Coefficient */
+ { 0x00000f41, 0x0000 }, /* R3905 (0xf41) - ANC Coefficient */
+ { 0x00000f42, 0x0000 }, /* R3906 (0xf42) - ANC Coefficient */
+ { 0x00000f43, 0x0000 }, /* R3907 (0xf43) - ANC Coefficient */
+ { 0x00000f44, 0x0000 }, /* R3908 (0xf44) - ANC Coefficient */
+ { 0x00000f45, 0x0000 }, /* R3909 (0xf45) - ANC Coefficient */
+ { 0x00000f46, 0x0000 }, /* R3910 (0xf46) - ANC Coefficient */
+ { 0x00000f47, 0x0000 }, /* R3911 (0xf47) - ANC Coefficient */
+ { 0x00000f48, 0x0000 }, /* R3912 (0xf48) - ANC Coefficient */
+ { 0x00000f49, 0x0000 }, /* R3913 (0xf49) - ANC Coefficient */
+ { 0x00000f4a, 0x0000 }, /* R3914 (0xf4a) - ANC Coefficient */
+ { 0x00000f4b, 0x0000 }, /* R3915 (0xf4b) - ANC Coefficient */
+ { 0x00000f4c, 0x0000 }, /* R3916 (0xf4c) - ANC Coefficient */
+ { 0x00000f4d, 0x0000 }, /* R3917 (0xf4d) - ANC Coefficient */
+ { 0x00000f4e, 0x0000 }, /* R3918 (0xf4e) - ANC Coefficient */
+ { 0x00000f4f, 0x0000 }, /* R3919 (0xf4f) - ANC Coefficient */
+ { 0x00000f50, 0x0000 }, /* R3920 (0xf50) - ANC Coefficient */
+ { 0x00000f51, 0x0000 }, /* R3921 (0xf51) - ANC Coefficient */
+ { 0x00000f52, 0x0000 }, /* R3922 (0xf52) - ANC Coefficient */
+ { 0x00000f53, 0x0000 }, /* R3923 (0xf53) - ANC Coefficient */
+ { 0x00000f54, 0x0000 }, /* R3924 (0xf54) - ANC Coefficient */
+ { 0x00000f55, 0x0000 }, /* R3925 (0xf55) - ANC Coefficient */
+ { 0x00000f56, 0x0000 }, /* R3926 (0xf56) - ANC Coefficient */
+ { 0x00000f57, 0x0000 }, /* R3927 (0xf57) - ANC Coefficient */
+ { 0x00000f58, 0x0000 }, /* R3928 (0xf58) - ANC Coefficient */
+ { 0x00000f59, 0x0000 }, /* R3929 (0xf59) - ANC Coefficient */
+ { 0x00000f5a, 0x0000 }, /* R3930 (0xf5a) - ANC Coefficient */
+ { 0x00000f5b, 0x0000 }, /* R3931 (0xf5b) - ANC Coefficient */
+ { 0x00000f5c, 0x0000 }, /* R3932 (0xf5c) - ANC Coefficient */
+ { 0x00000f5d, 0x0000 }, /* R3933 (0xf5d) - ANC Coefficient */
+ { 0x00000f5e, 0x0000 }, /* R3934 (0xf5e) - ANC Coefficient */
+ { 0x00000f5f, 0x0000 }, /* R3935 (0xf5f) - ANC Coefficient */
+ { 0x00000f60, 0x0000 }, /* R3936 (0xf60) - ANC Coefficient */
+ { 0x00000f61, 0x0000 }, /* R3937 (0xf61) - ANC Coefficient */
+ { 0x00000f62, 0x0000 }, /* R3938 (0xf62) - ANC Coefficient */
+ { 0x00000f63, 0x0000 }, /* R3939 (0xf63) - ANC Coefficient */
+ { 0x00000f64, 0x0000 }, /* R3940 (0xf64) - ANC Coefficient */
+ { 0x00000f65, 0x0000 }, /* R3941 (0xf65) - ANC Coefficient */
+ { 0x00000f66, 0x0000 }, /* R3942 (0xf66) - ANC Coefficient */
+ { 0x00000f67, 0x0000 }, /* R3943 (0xf67) - ANC Coefficient */
+ { 0x00000f68, 0x0000 }, /* R3944 (0xf68) - ANC Coefficient */
+ { 0x00000f69, 0x0000 }, /* R3945 (0xf69) - ANC Coefficient */
+ { 0x00000f71, 0x0000 }, /* R3953 (0xf71) - FCR Filter Control */
+ { 0x00000f73, 0x0004 }, /* R3955 (0xf73) - FCR ADC Reformatter Control */
+ { 0x00000f74, 0x0004 }, /* R3956 (0xf74) - ANC Coefficient */
+ { 0x00000f75, 0x0002 }, /* R3957 (0xf75) - ANC Coefficient */
+ { 0x00000f76, 0x0000 }, /* R3958 (0xf76) - ANC Coefficient */
+ { 0x00000f77, 0x0010 }, /* R3959 (0xf77) - ANC Coefficient */
+ { 0x00000f78, 0x0000 }, /* R3960 (0xf78) - ANC Coefficient */
+ { 0x00000f79, 0x0000 }, /* R3961 (0xf79) - ANC Coefficient */
+ { 0x00000f7a, 0x0000 }, /* R3962 (0xf7a) - ANC Coefficient */
+ { 0x00000f7b, 0x0000 }, /* R3963 (0xf7b) - ANC Coefficient */
+ { 0x00000f7c, 0x0000 }, /* R3964 (0xf7c) - ANC Coefficient */
+ { 0x00000f7d, 0x0000 }, /* R3965 (0xf7d) - ANC Coefficient */
+ { 0x00000f7e, 0x0000 }, /* R3966 (0xf7e) - ANC Coefficient */
+ { 0x00000f7f, 0x0000 }, /* R3967 (0xf7f) - ANC Coefficient */
+ { 0x00000f80, 0x0000 }, /* R3968 (0xf80) - ANC Coefficient */
+ { 0x00000f81, 0x0000 }, /* R3969 (0xf81) - ANC Coefficient */
+ { 0x00000f82, 0x0000 }, /* R3970 (0xf82) - ANC Coefficient */
+ { 0x00000f83, 0x0000 }, /* R3971 (0xf83) - ANC Coefficient */
+ { 0x00000f84, 0x0000 }, /* R3972 (0xf84) - ANC Coefficient */
+ { 0x00000f85, 0x0000 }, /* R3973 (0xf85) - ANC Coefficient */
+ { 0x00000f86, 0x0000 }, /* R3974 (0xf86) - ANC Coefficient */
+ { 0x00000f87, 0x0000 }, /* R3975 (0xf87) - ANC Coefficient */
+ { 0x00000f88, 0x0000 }, /* R3976 (0xf88) - ANC Coefficient */
+ { 0x00000f89, 0x0000 }, /* R3977 (0xf89) - ANC Coefficient */
+ { 0x00000f8a, 0x0000 }, /* R3978 (0xf8a) - ANC Coefficient */
+ { 0x00000f8b, 0x0000 }, /* R3979 (0xf8b) - ANC Coefficient */
+ { 0x00000f8c, 0x0000 }, /* R3980 (0xf8c) - ANC Coefficient */
+ { 0x00000f8d, 0x0000 }, /* R3981 (0xf8d) - ANC Coefficient */
+ { 0x00000f8e, 0x0000 }, /* R3982 (0xf8e) - ANC Coefficient */
+ { 0x00000f8f, 0x0000 }, /* R3983 (0xf8f) - ANC Coefficient */
+ { 0x00000f90, 0x0000 }, /* R3984 (0xf90) - ANC Coefficient */
+ { 0x00000f91, 0x0000 }, /* R3985 (0xf91) - ANC Coefficient */
+ { 0x00000f92, 0x0000 }, /* R3986 (0xf92) - ANC Coefficient */
+ { 0x00000f93, 0x0000 }, /* R3987 (0xf93) - ANC Coefficient */
+ { 0x00000f94, 0x0000 }, /* R3988 (0xf94) - ANC Coefficient */
+ { 0x00000f95, 0x0000 }, /* R3989 (0xf95) - ANC Coefficient */
+ { 0x00000f96, 0x0000 }, /* R3990 (0xf96) - ANC Coefficient */
+ { 0x00000f97, 0x0000 }, /* R3991 (0xf97) - ANC Coefficient */
+ { 0x00000f98, 0x0000 }, /* R3992 (0xf98) - ANC Coefficient */
+ { 0x00000f99, 0x0000 }, /* R3993 (0xf99) - ANC Coefficient */
+ { 0x00000f9a, 0x0000 }, /* R3994 (0xf9a) - ANC Coefficient */
+ { 0x00000f9b, 0x0000 }, /* R3995 (0xf9b) - ANC Coefficient */
+ { 0x00000f9c, 0x0000 }, /* R3996 (0xf9c) - ANC Coefficient */
+ { 0x00000f9d, 0x0000 }, /* R3997 (0xf9d) - ANC Coefficient */
+ { 0x00000f9e, 0x0000 }, /* R3998 (0xf9e) - ANC Coefficient */
+ { 0x00000f9f, 0x0000 }, /* R3999 (0xf9f) - ANC Coefficient */
+ { 0x00000fa0, 0x0000 }, /* R4000 (0xfa0) - ANC Coefficient */
+ { 0x00000fa1, 0x0000 }, /* R4001 (0xfa1) - ANC Coefficient */
+ { 0x00000fa2, 0x0000 }, /* R4002 (0xfa2) - ANC Coefficient */
+ { 0x00000fa3, 0x0000 }, /* R4003 (0xfa3) - ANC Coefficient */
+ { 0x00000fa4, 0x0000 }, /* R4004 (0xfa4) - ANC Coefficient */
+ { 0x00000fa5, 0x0000 }, /* R4005 (0xfa5) - ANC Coefficient */
+ { 0x00000fa6, 0x0000 }, /* R4006 (0xfa6) - ANC Coefficient */
+ { 0x00000fa7, 0x0000 }, /* R4007 (0xfa7) - ANC Coefficient */
+ { 0x00000fa8, 0x0000 }, /* R4008 (0xfa8) - ANC Coefficient */
+ { 0x00000fa9, 0x0000 }, /* R4009 (0xfa9) - ANC Coefficient */
+ { 0x00000faa, 0x0000 }, /* R4010 (0xfaa) - ANC Coefficient */
+ { 0x00000fab, 0x0000 }, /* R4011 (0xfab) - ANC Coefficient */
+ { 0x00000fac, 0x0000 }, /* R4012 (0xfac) - ANC Coefficient */
+ { 0x00000fad, 0x0000 }, /* R4013 (0xfad) - ANC Coefficient */
+ { 0x00000fae, 0x0000 }, /* R4014 (0xfae) - ANC Coefficient */
+ { 0x00000faf, 0x0000 }, /* R4015 (0xfaf) - ANC Coefficient */
+ { 0x00000fb0, 0x0000 }, /* R4016 (0xfb0) - ANC Coefficient */
+ { 0x00000fb1, 0x0000 }, /* R4017 (0xfb1) - ANC Coefficient */
+ { 0x00000fb2, 0x0000 }, /* R4018 (0xfb2) - ANC Coefficient */
+ { 0x00000fb3, 0x0000 }, /* R4019 (0xfb3) - ANC Coefficient */
+ { 0x00000fb4, 0x0000 }, /* R4020 (0xfb4) - ANC Coefficient */
+ { 0x00000fb5, 0x0000 }, /* R4021 (0xfb5) - ANC Coefficient */
+ { 0x00000fb6, 0x0000 }, /* R4022 (0xfb6) - ANC Coefficient */
+ { 0x00000fb7, 0x0000 }, /* R4023 (0xfb7) - ANC Coefficient */
+ { 0x00000fb8, 0x0000 }, /* R4024 (0xfb8) - ANC Coefficient */
+ { 0x00000fb9, 0x0000 }, /* R4025 (0xfb9) - ANC Coefficient */
+ { 0x00000fba, 0x0000 }, /* R4026 (0xfba) - ANC Coefficient */
+ { 0x00000fbb, 0x0000 }, /* R4027 (0xfbb) - ANC Coefficient */
+ { 0x00000fbc, 0x0000 }, /* R4028 (0xfbc) - ANC Coefficient */
+ { 0x00000fbd, 0x0000 }, /* R4029 (0xfbd) - ANC Coefficient */
+ { 0x00000fbe, 0x0000 }, /* R4030 (0xfbe) - ANC Coefficient */
+ { 0x00000fbf, 0x0000 }, /* R4031 (0xfbf) - ANC Coefficient */
+ { 0x00000fc0, 0x0000 }, /* R4032 (0xfc0) - ANC Coefficient */
+ { 0x00000fc1, 0x0000 }, /* R4033 (0xfc1) - ANC Coefficient */
+ { 0x00000fc2, 0x0000 }, /* R4034 (0xfc2) - ANC Coefficient */
+ { 0x00000fc3, 0x0000 }, /* R4035 (0xfc3) - ANC Coefficient */
+ { 0x00000fc4, 0x0000 }, /* R4036 (0xfc4) - ANC Coefficient */
+ { 0x00000fc5, 0x0000 }, /* R4037 (0xfc5) - ANC Coefficient */
+ { 0x00001300, 0x050E }, /* R4864 (0x1300) - DAC Comp 1 */
+ { 0x00001302, 0x0101 }, /* R4866 (0x1302) - DAC Comp 2 */
+ { 0x00001380, 0x0425 }, /* R4992 (0x1380) - FRF Coefficient 1L 1 */
+ { 0x00001381, 0xF6D8 }, /* R4993 (0x1381) - FRF Coefficient 1L 2 */
+ { 0x00001382, 0x0632 }, /* R4994 (0x1382) - FRF Coefficient 1L 3 */
+ { 0x00001383, 0xFEC8 }, /* R4995 (0x1383) - FRF Coefficient 1L 4 */
+ { 0x00001390, 0x042F }, /* R5008 (0x1390) - FRF Coefficient 1R 1 */
+ { 0x00001391, 0xF6CA }, /* R5009 (0x1391) - FRF Coefficient 1R 2 */
+ { 0x00001392, 0x0637 }, /* R5010 (0x1392) - FRF Coefficient 1R 3 */
+ { 0x00001393, 0xFEC8 }, /* R5011 (0x1393) - FRF Coefficient 1R 4 */
+ { 0x000013a0, 0x0000 }, /* R5024 (0x13a0) - FRF Coefficient 2L 1 */
+ { 0x000013a1, 0x0000 }, /* R5025 (0x13a1) - FRF Coefficient 2L 2 */
+ { 0x000013a2, 0x0000 }, /* R5026 (0x13a2) - FRF Coefficient 2L 3 */
+ { 0x000013a3, 0x0000 }, /* R5027 (0x13a3) - FRF Coefficient 2L 4 */
+ { 0x000013b0, 0x0000 }, /* R5040 (0x13b0) - FRF Coefficient 2R 1 */
+ { 0x000013b1, 0x0000 }, /* R5041 (0x13b1) - FRF Coefficient 2R 2 */
+ { 0x000013b2, 0x0000 }, /* R5042 (0x13b2) - FRF Coefficient 2R 3 */
+ { 0x000013b3, 0x0000 }, /* R5043 (0x13b3) - FRF Coefficient 2R 4 */
+ { 0x000013c0, 0x0000 }, /* R5040 (0x13c0) - FRF Coefficient 3L 1 */
+ { 0x000013c1, 0x0000 }, /* R5041 (0x13c1) - FRF Coefficient 3L 2 */
+ { 0x000013c2, 0x0000 }, /* R5042 (0x13c2) - FRF Coefficient 3L 3 */
+ { 0x000013c3, 0x0000 }, /* R5043 (0x13c3) - FRF Coefficient 3L 4 */
+ { 0x000013d0, 0x0000 }, /* R5072 (0x13d0) - FRF Coefficient 3R 1 */
+ { 0x000013d1, 0x0000 }, /* R5073 (0x13d1) - FRF Coefficient 3R 2 */
+ { 0x000013d2, 0x0000 }, /* R5074 (0x13d2) - FRF Coefficient 3R 3 */
+ { 0x000013d3, 0x0000 }, /* R5075 (0x13d3) - FRF Coefficient 3R 4 */
+ { 0x00001400, 0x0000 }, /* R5120 (0x1400) - FRF Coefficient 5L 1 */
+ { 0x00001401, 0x0000 }, /* R5121 (0x1401) - FRF Coefficient 5L 2 */
+ { 0x00001402, 0x0000 }, /* R5122 (0x1402) - FRF Coefficient 5L 3 */
+ { 0x00001403, 0x0000 }, /* R5123 (0x1403) - FRF Coefficient 5L 4 */
+ { 0x00001410, 0x0000 }, /* R5136 (0x1410) - FRF Coefficient 5R 1 */
+ { 0x00001411, 0x0000 }, /* R5137 (0x1411) - FRF Coefficient 5R 2 */
+ { 0x00001412, 0x0000 }, /* R5138 (0x1412) - FRF Coefficient 5R 3 */
+ { 0x00001413, 0x0000 }, /* R5139 (0x1413) - FRF Coefficient 5R 4 */
+ { 0x00001480, 0x0000 }, /* R5248 (0x1480) - DFC1_CTRL */
+ { 0x00001482, 0x1f00 }, /* R5250 (0x1482) - DFC1_RX */
+ { 0x00001484, 0x1f00 }, /* R5252 (0x1486) - DFC1_TX */
+ { 0x00001486, 0x0000 }, /* R5254 (0x1486) - DFC2_CTRL */
+ { 0x00001488, 0x1f00 }, /* R5256 (0x1488) - DFC2_RX */
+ { 0x0000148a, 0x1f00 }, /* R5258 (0x148a) - DFC2_TX */
+ { 0x0000148c, 0x0000 }, /* R5260 (0x148c) - DFC3_CTRL */
+ { 0x0000148e, 0x1f00 }, /* R5262 (0x148e) - DFC3_RX */
+ { 0x00001490, 0x1f00 }, /* R5264 (0x1490) - DFC3_TX */
+ { 0x00001492, 0x0000 }, /* R5266 (0x1492) - DFC4_CTRL */
+ { 0x00001494, 0x1f00 }, /* R5268 (0x1494) - DFC4_RX */
+ { 0x00001496, 0x1f00 }, /* R5270 (0x1496) - DFC4_TX */
+ { 0x00001498, 0x0000 }, /* R5272 (0x1498) - DFC5_CTRL */
+ { 0x0000149a, 0x1f00 }, /* R5274 (0x149a) - DFC5_RX */
+ { 0x0000149c, 0x1f00 }, /* R5276 (0x149c) - DFC5_TX */
+ { 0x0000149e, 0x0000 }, /* R5278 (0x149e) - DFC6_CTRL */
+ { 0x000014a0, 0x1f00 }, /* R5280 (0x14a0) - DFC6_RX */
+ { 0x000014a2, 0x1f00 }, /* R5282 (0x14a2) - DFC6_TX */
+ { 0x000014a4, 0x0000 }, /* R5284 (0x14a4) - DFC7_CTRL */
+ { 0x000014a6, 0x1f00 }, /* R5286 (0x14a6) - DFC7_RX */
+ { 0x000014a8, 0x1f00 }, /* R5288 (0x14a8) - DFC7_TX */
+ { 0x000014aa, 0x0000 }, /* R5290 (0x14aa) - DFC8_CTRL */
+ { 0x000014ac, 0x1f00 }, /* R5292 (0x14ac) - DFC8_RX */
+ { 0x000014ae, 0x1f00 }, /* R5294 (0x14ae) - DFC8_TX */
+ { 0x00001700, 0x2001 }, /* R5888 (0x1700) - GPIO1 Control 1 */
+ { 0x00001701, 0xf000 }, /* R5889 (0x1701) - GPIO1 Control 2 */
+ { 0x00001702, 0x2001 }, /* R5890 (0x1702) - GPIO2 Control 1 */
+ { 0x00001703, 0xf000 }, /* R5891 (0x1702) - GPIO2 Control 2 */
+ { 0x00001704, 0x2001 }, /* R5892 (0x1704) - GPIO3 Control 1 */
+ { 0x00001705, 0xf000 }, /* R5893 (0x1705) - GPIO3 Control 2 */
+ { 0x00001706, 0x2001 }, /* R5894 (0x1706) - GPIO4 Control 1 */
+ { 0x00001707, 0xf000 }, /* R5895 (0x1707) - GPIO4 Control 2 */
+ { 0x00001708, 0x2001 }, /* R5896 (0x1708) - GPIO5 Control 1 */
+ { 0x00001709, 0xf000 }, /* R5897 (0x1709) - GPIO5 Control 2 */
+ { 0x0000170a, 0x2001 }, /* R5898 (0x170a) - GPIO6 Control 1 */
+ { 0x0000170b, 0xf000 }, /* R5899 (0x170b) - GPIO6 Control 2 */
+ { 0x0000170c, 0x2001 }, /* R5900 (0x170c) - GPIO7 Control 1 */
+ { 0x0000170d, 0xf000 }, /* R5901 (0x170d) - GPIO7 Control 2 */
+ { 0x0000170e, 0x2001 }, /* R5902 (0x170e) - GPIO8 Control 1 */
+ { 0x0000170f, 0xf000 }, /* R5903 (0x170f) - GPIO8 Control 2 */
+ { 0x00001710, 0x2001 }, /* R5904 (0x1710) - GPIO9 Control 1 */
+ { 0x00001711, 0xf000 }, /* R5905 (0x1711) - GPIO9 Control 2 */
+ { 0x00001712, 0x2001 }, /* R5906 (0x1712) - GPIO10 Control 1 */
+ { 0x00001713, 0xf000 }, /* R5907 (0x1713) - GPIO10 Control 2 */
+ { 0x00001714, 0x2001 }, /* R5908 (0x1714) - GPIO11 Control 1 */
+ { 0x00001715, 0xf000 }, /* R5909 (0x1715) - GPIO11 Control 2 */
+ { 0x00001716, 0x2001 }, /* R5910 (0x1716) - GPIO12 Control 1 */
+ { 0x00001717, 0xf000 }, /* R5911 (0x1717) - GPIO12 Control 2 */
+ { 0x00001718, 0x2001 }, /* R5912 (0x1718) - GPIO13 Control 1 */
+ { 0x00001719, 0xf000 }, /* R5913 (0x1719) - GPIO13 Control 2 */
+ { 0x0000171a, 0x2001 }, /* R5914 (0x171a) - GPIO14 Control 1 */
+ { 0x0000171b, 0xf000 }, /* R5915 (0x171b) - GPIO14 Control 2 */
+ { 0x0000171c, 0x2001 }, /* R5916 (0x171c) - GPIO15 Control 1 */
+ { 0x0000171d, 0xf000 }, /* R5917 (0x171d) - GPIO15 Control 2 */
+ { 0x0000171e, 0x2001 }, /* R5918 (0x171e) - GPIO16 Control 1 */
+ { 0x0000171f, 0xf000 }, /* R5919 (0x171f) - GPIO16 Control 2 */
+ { 0x00001720, 0x2001 }, /* R5920 (0x1720) - GPIO17 Control 1 */
+ { 0x00001721, 0xf000 }, /* R5921 (0x1721) - GPIO17 Control 2 */
+ { 0x00001722, 0x2001 }, /* R5922 (0x1722) - GPIO18 Control 1 */
+ { 0x00001723, 0xf000 }, /* R5923 (0x1723) - GPIO18 Control 2 */
+ { 0x00001724, 0x2001 }, /* R5924 (0x1724) - GPIO19 Control 1 */
+ { 0x00001725, 0xf000 }, /* R5925 (0x1725) - GPIO19 Control 2 */
+ { 0x00001726, 0x2001 }, /* R5926 (0x1726) - GPIO20 Control 1 */
+ { 0x00001727, 0xf000 }, /* R5927 (0x1727) - GPIO20 Control 2 */
+ { 0x00001728, 0x2001 }, /* R5928 (0x1728) - GPIO21 Control 1 */
+ { 0x00001729, 0xf000 }, /* R5929 (0x1729) - GPIO21 Control 2 */
+ { 0x0000172a, 0x2001 }, /* R5930 (0x172a) - GPIO22 Control 1 */
+ { 0x0000172b, 0xf000 }, /* R5931 (0x172b) - GPIO22 Control 2 */
+ { 0x0000172c, 0x2001 }, /* R5932 (0x172c) - GPIO23 Control 1 */
+ { 0x0000172d, 0xf000 }, /* R5933 (0x172d) - GPIO23 Control 2 */
+ { 0x0000172e, 0x2001 }, /* R5934 (0x172e) - GPIO24 Control 1 */
+ { 0x0000172f, 0xf000 }, /* R5935 (0x172f) - GPIO24 Control 2 */
+ { 0x00001730, 0x2001 }, /* R5936 (0x1730) - GPIO25 Control 1 */
+ { 0x00001731, 0xf000 }, /* R5937 (0x1731) - GPIO25 Control 2 */
+ { 0x00001732, 0x2001 }, /* R5938 (0x1732) - GPIO26 Control 1 */
+ { 0x00001733, 0xf000 }, /* R5939 (0x1733) - GPIO26 Control 2 */
+ { 0x00001734, 0x2001 }, /* R5940 (0x1734) - GPIO27 Control 1 */
+ { 0x00001735, 0xf000 }, /* R5941 (0x1735) - GPIO27 Control 2 */
+ { 0x00001736, 0x2001 }, /* R5942 (0x1736) - GPIO28 Control 1 */
+ { 0x00001737, 0xf000 }, /* R5943 (0x1737) - GPIO28 Control 2 */
+ { 0x00001738, 0x2001 }, /* R5944 (0x1738) - GPIO29 Control 1 */
+ { 0x00001739, 0xf000 }, /* R5945 (0x1739) - GPIO29 Control 2 */
+ { 0x0000173a, 0x2001 }, /* R5946 (0x173a) - GPIO30 Control 1 */
+ { 0x0000173b, 0xf000 }, /* R5947 (0x173b) - GPIO30 Control 2 */
+ { 0x0000173c, 0x2001 }, /* R5948 (0x173c) - GPIO31 Control 1 */
+ { 0x0000173d, 0xf000 }, /* R5949 (0x173d) - GPIO31 Control 2 */
+ { 0x0000173e, 0x2001 }, /* R5950 (0x173e) - GPIO32 Control 1 */
+ { 0x0000173f, 0xf000 }, /* R5951 (0x173f) - GPIO32 Control 2 */
+ { 0x00001740, 0x2001 }, /* R5952 (0x1740) - GPIO33 Control 1 */
+ { 0x00001741, 0xf000 }, /* R5953 (0x1741) - GPIO33 Control 2 */
+ { 0x00001742, 0x2001 }, /* R5954 (0x1742) - GPIO34 Control 1 */
+ { 0x00001743, 0xf000 }, /* R5955 (0x1743) - GPIO34 Control 2 */
+ { 0x00001744, 0x2001 }, /* R5956 (0x1744) - GPIO35 Control 1 */
+ { 0x00001745, 0xf000 }, /* R5957 (0x1745) - GPIO35 Control 2 */
+ { 0x00001746, 0x2001 }, /* R5958 (0x1746) - GPIO36 Control 1 */
+ { 0x00001747, 0xf000 }, /* R5959 (0x1747) - GPIO36 Control 2 */
+ { 0x00001748, 0x2001 }, /* R5960 (0x1748) - GPIO37 Control 1 */
+ { 0x00001749, 0xf000 }, /* R5961 (0x1749) - GPIO37 Control 2 */
+ { 0x0000174a, 0x2001 }, /* R5962 (0x174a) - GPIO38 Control 1 */
+ { 0x0000174b, 0xf000 }, /* R5963 (0x174b) - GPIO38 Control 2 */
+ { 0x00001840, 0xffff }, /* R6208 (0x1840) - IRQ1 Mask 1 */
+ { 0x00001841, 0xffff }, /* R6209 (0x1841) - IRQ1 Mask 2 */
+ { 0x00001842, 0xffff }, /* R6210 (0x1842) - IRQ1 Mask 3 */
+ { 0x00001843, 0xffff }, /* R6211 (0x1843) - IRQ1 Mask 4 */
+ { 0x00001844, 0xffff }, /* R6212 (0x1844) - IRQ1 Mask 5 */
+ { 0x00001845, 0xffff }, /* R6213 (0x1845) - IRQ1 Mask 6 */
+ { 0x00001846, 0xffff }, /* R6214 (0x1846) - IRQ1 Mask 7 */
+ { 0x00001847, 0xffff }, /* R6215 (0x1847) - IRQ1 Mask 8 */
+ { 0x00001848, 0xffff }, /* R6216 (0x1848) - IRQ1 Mask 9 */
+ { 0x00001849, 0xffff }, /* R6217 (0x1849) - IRQ1 Mask 10 */
+ { 0x0000184a, 0xffff }, /* R6218 (0x184a) - IRQ1 Mask 11 */
+ { 0x0000184b, 0xffff }, /* R6219 (0x184b) - IRQ1 Mask 12 */
+ { 0x0000184c, 0xffff }, /* R6220 (0x184c) - IRQ1 Mask 13 */
+ { 0x0000184d, 0xffff }, /* R6221 (0x184d) - IRQ1 Mask 14 */
+ { 0x0000184e, 0xffff }, /* R6222 (0x184e) - IRQ1 Mask 15 */
+ { 0x0000184f, 0xffff }, /* R6223 (0x184f) - IRQ1 Mask 16 */
+ { 0x00001850, 0xffff }, /* R6224 (0x1850) - IRQ1 Mask 17 */
+ { 0x00001851, 0xffff }, /* R6225 (0x1851) - IRQ1 Mask 18 */
+ { 0x00001852, 0xffff }, /* R6226 (0x1852) - IRQ1 Mask 19 */
+ { 0x00001853, 0xffff }, /* R6227 (0x1853) - IRQ1 Mask 20 */
+ { 0x00001854, 0xffff }, /* R6228 (0x1854) - IRQ1 Mask 21 */
+ { 0x00001855, 0xffff }, /* R6229 (0x1855) - IRQ1 Mask 22 */
+ { 0x00001856, 0xffff }, /* R6230 (0x1856) - IRQ1 Mask 23 */
+ { 0x00001857, 0xffff }, /* R6231 (0x1857) - IRQ1 Mask 24 */
+ { 0x00001858, 0xffff }, /* R6232 (0x1858) - IRQ1 Mask 25 */
+ { 0x00001859, 0xffff }, /* R6233 (0x1859) - IRQ1 Mask 26 */
+ { 0x0000185a, 0xffff }, /* R6234 (0x185a) - IRQ1 Mask 27 */
+ { 0x0000185b, 0xffff }, /* R6235 (0x185b) - IRQ1 Mask 28 */
+ { 0x0000185c, 0xffff }, /* R6236 (0x185c) - IRQ1 Mask 29 */
+ { 0x0000185d, 0xffff }, /* R6237 (0x185d) - IRQ1 Mask 30 */
+ { 0x0000185e, 0xffff }, /* R6238 (0x185e) - IRQ1 Mask 31 */
+ { 0x0000185f, 0xffff }, /* R6239 (0x185f) - IRQ1 Mask 32 */
+ { 0x00001860, 0xffff }, /* R6240 (0x1860) - IRQ1 Mask 33 */
+ { 0x00001a06, 0x0000 }, /* R6662 (0x1a06) - Interrupt Debounce 7 */
+ { 0x00001a80, 0x4400 }, /* R6784 (0x1a80) - IRQ1 CTRL */
+};
+
+static bool cs47l90_is_adsp_memory(unsigned int reg)
+{
+ switch (reg) {
+ case 0x080000 ... 0x088ffe:
+ case 0x0a0000 ... 0x0a9ffe:
+ case 0x0c0000 ... 0x0c3ffe:
+ case 0x0e0000 ... 0x0e1ffe:
+ case 0x100000 ... 0x10effe:
+ case 0x120000 ... 0x12bffe:
+ case 0x136000 ... 0x137ffe:
+ case 0x140000 ... 0x14bffe:
+ case 0x160000 ... 0x161ffe:
+ case 0x180000 ... 0x18effe:
+ case 0x1a0000 ... 0x1b1ffe:
+ case 0x1b6000 ... 0x1b7ffe:
+ case 0x1c0000 ... 0x1cbffe:
+ case 0x1e0000 ... 0x1e1ffe:
+ case 0x200000 ... 0x208ffe:
+ case 0x220000 ... 0x229ffe:
+ case 0x240000 ... 0x243ffe:
+ case 0x260000 ... 0x261ffe:
+ case 0x280000 ... 0x288ffe:
+ case 0x2a0000 ... 0x2a9ffe:
+ case 0x2c0000 ... 0x2c3ffe:
+ case 0x2e0000 ... 0x2e1ffe:
+ case 0x300000 ... 0x308ffe:
+ case 0x320000 ... 0x333ffe:
+ case 0x340000 ... 0x353ffe:
+ case 0x360000 ... 0x361ffe:
+ case 0x380000 ... 0x388ffe:
+ case 0x3a0000 ... 0x3b3ffe:
+ case 0x3c0000 ... 0x3d3ffe:
+ case 0x3e0000 ... 0x3e1ffe:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs47l90_16bit_readable_register(struct device *dev,
+ unsigned int reg)
+{
+ switch (reg) {
+ case MADERA_SOFTWARE_RESET:
+ case MADERA_HARDWARE_REVISION:
+ case MADERA_WRITE_SEQUENCER_CTRL_0:
+ case MADERA_WRITE_SEQUENCER_CTRL_1:
+ case MADERA_WRITE_SEQUENCER_CTRL_2:
+ case MADERA_TONE_GENERATOR_1:
+ case MADERA_TONE_GENERATOR_2:
+ case MADERA_TONE_GENERATOR_3:
+ case MADERA_TONE_GENERATOR_4:
+ case MADERA_TONE_GENERATOR_5:
+ case MADERA_PWM_DRIVE_1:
+ case MADERA_PWM_DRIVE_2:
+ case MADERA_PWM_DRIVE_3:
+ case MADERA_SAMPLE_RATE_SEQUENCE_SELECT_1:
+ case MADERA_SAMPLE_RATE_SEQUENCE_SELECT_2:
+ case MADERA_SAMPLE_RATE_SEQUENCE_SELECT_3:
+ case MADERA_SAMPLE_RATE_SEQUENCE_SELECT_4:
+ case MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1:
+ case MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2:
+ case MADERA_HAPTICS_CONTROL_1:
+ case MADERA_HAPTICS_CONTROL_2:
+ case MADERA_HAPTICS_PHASE_1_INTENSITY:
+ case MADERA_HAPTICS_PHASE_1_DURATION:
+ case MADERA_HAPTICS_PHASE_2_INTENSITY:
+ case MADERA_HAPTICS_PHASE_2_DURATION:
+ case MADERA_HAPTICS_PHASE_3_INTENSITY:
+ case MADERA_HAPTICS_PHASE_3_DURATION:
+ case MADERA_HAPTICS_STATUS:
+ case MADERA_COMFORT_NOISE_GENERATOR:
+ case MADERA_CLOCK_32K_1:
+ case MADERA_SYSTEM_CLOCK_1:
+ case MADERA_SAMPLE_RATE_1:
+ case MADERA_SAMPLE_RATE_2:
+ case MADERA_SAMPLE_RATE_3:
+ case MADERA_SAMPLE_RATE_1_STATUS:
+ case MADERA_SAMPLE_RATE_2_STATUS:
+ case MADERA_SAMPLE_RATE_3_STATUS:
+ case MADERA_ASYNC_CLOCK_1:
+ case MADERA_ASYNC_SAMPLE_RATE_1:
+ case MADERA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case MADERA_ASYNC_SAMPLE_RATE_2:
+ case MADERA_ASYNC_SAMPLE_RATE_2_STATUS:
+ case MADERA_DSP_CLOCK_1:
+ case MADERA_DSP_CLOCK_2:
+ case MADERA_OUTPUT_SYSTEM_CLOCK:
+ case MADERA_OUTPUT_ASYNC_CLOCK:
+ case MADERA_RATE_ESTIMATOR_1:
+ case MADERA_RATE_ESTIMATOR_2:
+ case MADERA_RATE_ESTIMATOR_3:
+ case MADERA_RATE_ESTIMATOR_4:
+ case MADERA_RATE_ESTIMATOR_5:
+ case MADERA_FLL1_CONTROL_1:
+ case MADERA_FLL1_CONTROL_2:
+ case MADERA_FLL1_CONTROL_3:
+ case MADERA_FLL1_CONTROL_4:
+ case MADERA_FLL1_CONTROL_5:
+ case MADERA_FLL1_CONTROL_6:
+ case MADERA_FLL1_CONTROL_7:
+ case MADERA_FLL1_EFS_2:
+ case MADERA_FLL1_LOOP_FILTER_TEST_1:
+ case MADERA_FLL1_SYNCHRONISER_1:
+ case MADERA_FLL1_SYNCHRONISER_2:
+ case MADERA_FLL1_SYNCHRONISER_3:
+ case MADERA_FLL1_SYNCHRONISER_4:
+ case MADERA_FLL1_SYNCHRONISER_5:
+ case MADERA_FLL1_SYNCHRONISER_6:
+ case MADERA_FLL1_SYNCHRONISER_7:
+ case MADERA_FLL1_SPREAD_SPECTRUM:
+ case MADERA_FLL1_GPIO_CLOCK:
+ case MADERA_FLL2_CONTROL_1:
+ case MADERA_FLL2_CONTROL_2:
+ case MADERA_FLL2_CONTROL_3:
+ case MADERA_FLL2_CONTROL_4:
+ case MADERA_FLL2_CONTROL_5:
+ case MADERA_FLL2_CONTROL_6:
+ case MADERA_FLL2_CONTROL_7:
+ case MADERA_FLL2_EFS_2:
+ case MADERA_FLL2_LOOP_FILTER_TEST_1:
+ case MADERA_FLL2_SYNCHRONISER_1:
+ case MADERA_FLL2_SYNCHRONISER_2:
+ case MADERA_FLL2_SYNCHRONISER_3:
+ case MADERA_FLL2_SYNCHRONISER_4:
+ case MADERA_FLL2_SYNCHRONISER_5:
+ case MADERA_FLL2_SYNCHRONISER_6:
+ case MADERA_FLL2_SYNCHRONISER_7:
+ case MADERA_FLL2_SPREAD_SPECTRUM:
+ case MADERA_FLL2_GPIO_CLOCK:
+ case MADERA_FLLAO_CONTROL_1:
+ case MADERA_FLLAO_CONTROL_2:
+ case MADERA_FLLAO_CONTROL_3:
+ case MADERA_FLLAO_CONTROL_4:
+ case MADERA_FLLAO_CONTROL_5:
+ case MADERA_FLLAO_CONTROL_6:
+ case MADERA_FLLAO_CONTROL_7:
+ case MADERA_FLLAO_CONTROL_8:
+ case MADERA_FLLAO_CONTROL_9:
+ case MADERA_FLLAO_CONTROL_10:
+ case MADERA_FLLAO_CONTROL_11:
+ case MADERA_MIC_CHARGE_PUMP_1:
+ case MADERA_LDO2_CONTROL_1:
+ case MADERA_MIC_BIAS_CTRL_1:
+ case MADERA_MIC_BIAS_CTRL_2:
+ case MADERA_MIC_BIAS_CTRL_5:
+ case MADERA_MIC_BIAS_CTRL_6:
+ case MADERA_HP_CTRL_1L:
+ case MADERA_HP_CTRL_1R:
+ case MADERA_HP_CTRL_2L:
+ case MADERA_HP_CTRL_2R:
+ case MADERA_HP_CTRL_3L:
+ case MADERA_HP_CTRL_3R:
+ case MADERA_EDRE_HP_STEREO_CONTROL:
+ case MADERA_ACCESSORY_DETECT_MODE_1:
+ case MADERA_HEADPHONE_DETECT_0:
+ case MADERA_HEADPHONE_DETECT_1:
+ case MADERA_HEADPHONE_DETECT_2:
+ case MADERA_HEADPHONE_DETECT_3:
+ case MADERA_HEADPHONE_DETECT_5:
+ case MADERA_MICD_CLAMP_CONTROL:
+ case MADERA_MIC_DETECT_1_CONTROL_0:
+ case MADERA_MIC_DETECT_1_CONTROL_1:
+ case MADERA_MIC_DETECT_1_CONTROL_2:
+ case MADERA_MIC_DETECT_1_CONTROL_3:
+ case MADERA_MIC_DETECT_1_LEVEL_1:
+ case MADERA_MIC_DETECT_1_LEVEL_2:
+ case MADERA_MIC_DETECT_1_LEVEL_3:
+ case MADERA_MIC_DETECT_1_LEVEL_4:
+ case MADERA_MIC_DETECT_1_CONTROL_4:
+ case MADERA_MIC_DETECT_2_CONTROL_0:
+ case MADERA_MIC_DETECT_2_CONTROL_1:
+ case MADERA_MIC_DETECT_2_CONTROL_2:
+ case MADERA_MIC_DETECT_2_CONTROL_3:
+ case MADERA_MIC_DETECT_2_LEVEL_1:
+ case MADERA_MIC_DETECT_2_LEVEL_2:
+ case MADERA_MIC_DETECT_2_LEVEL_3:
+ case MADERA_MIC_DETECT_2_LEVEL_4:
+ case MADERA_MIC_DETECT_2_CONTROL_4:
+ case MADERA_GP_SWITCH_1:
+ case MADERA_JACK_DETECT_ANALOGUE:
+ case MADERA_INPUT_ENABLES:
+ case MADERA_INPUT_ENABLES_STATUS:
+ case MADERA_INPUT_RATE:
+ case MADERA_INPUT_VOLUME_RAMP:
+ case MADERA_HPF_CONTROL:
+ case MADERA_IN1L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_1L:
+ case MADERA_DMIC1L_CONTROL:
+ case MADERA_IN1L_RATE_CONTROL:
+ case MADERA_IN1R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_1R:
+ case MADERA_DMIC1R_CONTROL:
+ case MADERA_IN1R_RATE_CONTROL:
+ case MADERA_IN2L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_2L:
+ case MADERA_DMIC2L_CONTROL:
+ case MADERA_IN2L_RATE_CONTROL:
+ case MADERA_IN2R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_2R:
+ case MADERA_DMIC2R_CONTROL:
+ case MADERA_IN2R_RATE_CONTROL:
+ case MADERA_IN3L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_3L:
+ case MADERA_DMIC3L_CONTROL:
+ case MADERA_IN3L_RATE_CONTROL:
+ case MADERA_IN3R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_3R:
+ case MADERA_DMIC3R_CONTROL:
+ case MADERA_IN3R_RATE_CONTROL:
+ case MADERA_IN4L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_4L:
+ case MADERA_DMIC4L_CONTROL:
+ case MADERA_IN4L_RATE_CONTROL:
+ case MADERA_IN4R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_4R:
+ case MADERA_DMIC4R_CONTROL:
+ case MADERA_IN4R_RATE_CONTROL:
+ case MADERA_IN5L_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_5L:
+ case MADERA_DMIC5L_CONTROL:
+ case MADERA_IN5L_RATE_CONTROL:
+ case MADERA_IN5R_CONTROL:
+ case MADERA_ADC_DIGITAL_VOLUME_5R:
+ case MADERA_DMIC5R_CONTROL:
+ case MADERA_IN5R_RATE_CONTROL:
+ case MADERA_OUTPUT_ENABLES_1:
+ case MADERA_OUTPUT_STATUS_1:
+ case MADERA_RAW_OUTPUT_STATUS_1:
+ case MADERA_OUTPUT_RATE_1:
+ case MADERA_OUTPUT_VOLUME_RAMP:
+ case MADERA_OUTPUT_PATH_CONFIG_1L:
+ case MADERA_DAC_DIGITAL_VOLUME_1L:
+ case MADERA_OUTPUT_PATH_CONFIG_1:
+ case MADERA_NOISE_GATE_SELECT_1L:
+ case MADERA_OUTPUT_PATH_CONFIG_1R:
+ case MADERA_DAC_DIGITAL_VOLUME_1R:
+ case MADERA_NOISE_GATE_SELECT_1R:
+ case MADERA_OUTPUT_PATH_CONFIG_2L:
+ case MADERA_DAC_DIGITAL_VOLUME_2L:
+ case MADERA_OUTPUT_PATH_CONFIG_2:
+ case MADERA_NOISE_GATE_SELECT_2L:
+ case MADERA_OUTPUT_PATH_CONFIG_2R:
+ case MADERA_DAC_DIGITAL_VOLUME_2R:
+ case MADERA_NOISE_GATE_SELECT_2R:
+ case MADERA_OUTPUT_PATH_CONFIG_3L:
+ case MADERA_DAC_DIGITAL_VOLUME_3L:
+ case MADERA_NOISE_GATE_SELECT_3L:
+ case MADERA_OUTPUT_PATH_CONFIG_3R:
+ case MADERA_DAC_DIGITAL_VOLUME_3R:
+ case MADERA_NOISE_GATE_SELECT_3R:
+ case MADERA_OUTPUT_PATH_CONFIG_5L:
+ case MADERA_DAC_DIGITAL_VOLUME_5L:
+ case MADERA_NOISE_GATE_SELECT_5L:
+ case MADERA_OUTPUT_PATH_CONFIG_5R:
+ case MADERA_DAC_DIGITAL_VOLUME_5R:
+ case MADERA_NOISE_GATE_SELECT_5R:
+ case MADERA_DRE_ENABLE:
+ case MADERA_EDRE_ENABLE:
+ case MADERA_DAC_AEC_CONTROL_1:
+ case MADERA_NOISE_GATE_CONTROL:
+ case MADERA_PDM_SPK1_CTRL_1:
+ case MADERA_PDM_SPK1_CTRL_2:
+ case MADERA_HP1_SHORT_CIRCUIT_CTRL:
+ case MADERA_HP2_SHORT_CIRCUIT_CTRL:
+ case MADERA_HP3_SHORT_CIRCUIT_CTRL:
+ case MADERA_AIF1_BCLK_CTRL:
+ case MADERA_AIF1_TX_PIN_CTRL:
+ case MADERA_AIF1_RX_PIN_CTRL:
+ case MADERA_AIF1_RATE_CTRL:
+ case MADERA_AIF1_FORMAT:
+ case MADERA_AIF1_RX_BCLK_RATE:
+ case MADERA_AIF1_FRAME_CTRL_1:
+ case MADERA_AIF1_FRAME_CTRL_2:
+ case MADERA_AIF1_FRAME_CTRL_3:
+ case MADERA_AIF1_FRAME_CTRL_4:
+ case MADERA_AIF1_FRAME_CTRL_5:
+ case MADERA_AIF1_FRAME_CTRL_6:
+ case MADERA_AIF1_FRAME_CTRL_7:
+ case MADERA_AIF1_FRAME_CTRL_8:
+ case MADERA_AIF1_FRAME_CTRL_9:
+ case MADERA_AIF1_FRAME_CTRL_10:
+ case MADERA_AIF1_FRAME_CTRL_11:
+ case MADERA_AIF1_FRAME_CTRL_12:
+ case MADERA_AIF1_FRAME_CTRL_13:
+ case MADERA_AIF1_FRAME_CTRL_14:
+ case MADERA_AIF1_FRAME_CTRL_15:
+ case MADERA_AIF1_FRAME_CTRL_16:
+ case MADERA_AIF1_FRAME_CTRL_17:
+ case MADERA_AIF1_FRAME_CTRL_18:
+ case MADERA_AIF1_TX_ENABLES:
+ case MADERA_AIF1_RX_ENABLES:
+ case MADERA_AIF2_BCLK_CTRL:
+ case MADERA_AIF2_TX_PIN_CTRL:
+ case MADERA_AIF2_RX_PIN_CTRL:
+ case MADERA_AIF2_RATE_CTRL:
+ case MADERA_AIF2_FORMAT:
+ case MADERA_AIF2_RX_BCLK_RATE:
+ case MADERA_AIF2_FRAME_CTRL_1:
+ case MADERA_AIF2_FRAME_CTRL_2:
+ case MADERA_AIF2_FRAME_CTRL_3:
+ case MADERA_AIF2_FRAME_CTRL_4:
+ case MADERA_AIF2_FRAME_CTRL_5:
+ case MADERA_AIF2_FRAME_CTRL_6:
+ case MADERA_AIF2_FRAME_CTRL_7:
+ case MADERA_AIF2_FRAME_CTRL_8:
+ case MADERA_AIF2_FRAME_CTRL_9:
+ case MADERA_AIF2_FRAME_CTRL_10:
+ case MADERA_AIF2_FRAME_CTRL_11:
+ case MADERA_AIF2_FRAME_CTRL_12:
+ case MADERA_AIF2_FRAME_CTRL_13:
+ case MADERA_AIF2_FRAME_CTRL_14:
+ case MADERA_AIF2_FRAME_CTRL_15:
+ case MADERA_AIF2_FRAME_CTRL_16:
+ case MADERA_AIF2_FRAME_CTRL_17:
+ case MADERA_AIF2_FRAME_CTRL_18:
+ case MADERA_AIF2_TX_ENABLES:
+ case MADERA_AIF2_RX_ENABLES:
+ case MADERA_AIF3_BCLK_CTRL:
+ case MADERA_AIF3_TX_PIN_CTRL:
+ case MADERA_AIF3_RX_PIN_CTRL:
+ case MADERA_AIF3_RATE_CTRL:
+ case MADERA_AIF3_FORMAT:
+ case MADERA_AIF3_RX_BCLK_RATE:
+ case MADERA_AIF3_FRAME_CTRL_1:
+ case MADERA_AIF3_FRAME_CTRL_2:
+ case MADERA_AIF3_FRAME_CTRL_3:
+ case MADERA_AIF3_FRAME_CTRL_4:
+ case MADERA_AIF3_FRAME_CTRL_11:
+ case MADERA_AIF3_FRAME_CTRL_12:
+ case MADERA_AIF3_TX_ENABLES:
+ case MADERA_AIF3_RX_ENABLES:
+ case MADERA_AIF4_BCLK_CTRL:
+ case MADERA_AIF4_TX_PIN_CTRL:
+ case MADERA_AIF4_RX_PIN_CTRL:
+ case MADERA_AIF4_RATE_CTRL:
+ case MADERA_AIF4_FORMAT:
+ case MADERA_AIF4_RX_BCLK_RATE:
+ case MADERA_AIF4_FRAME_CTRL_1:
+ case MADERA_AIF4_FRAME_CTRL_2:
+ case MADERA_AIF4_FRAME_CTRL_3:
+ case MADERA_AIF4_FRAME_CTRL_4:
+ case MADERA_AIF4_FRAME_CTRL_11:
+ case MADERA_AIF4_FRAME_CTRL_12:
+ case MADERA_AIF4_TX_ENABLES:
+ case MADERA_AIF4_RX_ENABLES:
+ case MADERA_SPD1_TX_CONTROL:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_1:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_2:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_3:
+ case MADERA_SLIMBUS_FRAMER_REF_GEAR:
+ case MADERA_SLIMBUS_RATES_1:
+ case MADERA_SLIMBUS_RATES_2:
+ case MADERA_SLIMBUS_RATES_3:
+ case MADERA_SLIMBUS_RATES_4:
+ case MADERA_SLIMBUS_RATES_5:
+ case MADERA_SLIMBUS_RATES_6:
+ case MADERA_SLIMBUS_RATES_7:
+ case MADERA_SLIMBUS_RATES_8:
+ case MADERA_SLIMBUS_RX_CHANNEL_ENABLE:
+ case MADERA_SLIMBUS_TX_CHANNEL_ENABLE:
+ case MADERA_SLIMBUS_RX_PORT_STATUS:
+ case MADERA_SLIMBUS_TX_PORT_STATUS:
+ case MADERA_PWM1MIX_INPUT_1_SOURCE:
+ case MADERA_PWM1MIX_INPUT_1_VOLUME:
+ case MADERA_PWM1MIX_INPUT_2_SOURCE:
+ case MADERA_PWM1MIX_INPUT_2_VOLUME:
+ case MADERA_PWM1MIX_INPUT_3_SOURCE:
+ case MADERA_PWM1MIX_INPUT_3_VOLUME:
+ case MADERA_PWM1MIX_INPUT_4_SOURCE:
+ case MADERA_PWM1MIX_INPUT_4_VOLUME:
+ case MADERA_PWM2MIX_INPUT_1_SOURCE:
+ case MADERA_PWM2MIX_INPUT_1_VOLUME:
+ case MADERA_PWM2MIX_INPUT_2_SOURCE:
+ case MADERA_PWM2MIX_INPUT_2_VOLUME:
+ case MADERA_PWM2MIX_INPUT_3_SOURCE:
+ case MADERA_PWM2MIX_INPUT_3_VOLUME:
+ case MADERA_PWM2MIX_INPUT_4_SOURCE:
+ case MADERA_PWM2MIX_INPUT_4_VOLUME:
+ case MADERA_OUT1LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT1LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT1LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT1LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT1LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT1LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT1LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT1LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT1RMIX_INPUT_1_SOURCE:
+ case MADERA_OUT1RMIX_INPUT_1_VOLUME:
+ case MADERA_OUT1RMIX_INPUT_2_SOURCE:
+ case MADERA_OUT1RMIX_INPUT_2_VOLUME:
+ case MADERA_OUT1RMIX_INPUT_3_SOURCE:
+ case MADERA_OUT1RMIX_INPUT_3_VOLUME:
+ case MADERA_OUT1RMIX_INPUT_4_SOURCE:
+ case MADERA_OUT1RMIX_INPUT_4_VOLUME:
+ case MADERA_OUT2LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT2LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT2LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT2LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT2LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT2LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT2LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT2LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT2RMIX_INPUT_1_SOURCE:
+ case MADERA_OUT2RMIX_INPUT_1_VOLUME:
+ case MADERA_OUT2RMIX_INPUT_2_SOURCE:
+ case MADERA_OUT2RMIX_INPUT_2_VOLUME:
+ case MADERA_OUT2RMIX_INPUT_3_SOURCE:
+ case MADERA_OUT2RMIX_INPUT_3_VOLUME:
+ case MADERA_OUT2RMIX_INPUT_4_SOURCE:
+ case MADERA_OUT2RMIX_INPUT_4_VOLUME:
+ case MADERA_OUT3LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT3LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT3LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT3LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT3LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT3LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT3LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT3LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT3RMIX_INPUT_1_SOURCE:
+ case MADERA_OUT3RMIX_INPUT_1_VOLUME:
+ case MADERA_OUT3RMIX_INPUT_2_SOURCE:
+ case MADERA_OUT3RMIX_INPUT_2_VOLUME:
+ case MADERA_OUT3RMIX_INPUT_3_SOURCE:
+ case MADERA_OUT3RMIX_INPUT_3_VOLUME:
+ case MADERA_OUT3RMIX_INPUT_4_SOURCE:
+ case MADERA_OUT3RMIX_INPUT_4_VOLUME:
+ case MADERA_OUT5LMIX_INPUT_1_SOURCE:
+ case MADERA_OUT5LMIX_INPUT_1_VOLUME:
+ case MADERA_OUT5LMIX_INPUT_2_SOURCE:
+ case MADERA_OUT5LMIX_INPUT_2_VOLUME:
+ case MADERA_OUT5LMIX_INPUT_3_SOURCE:
+ case MADERA_OUT5LMIX_INPUT_3_VOLUME:
+ case MADERA_OUT5LMIX_INPUT_4_SOURCE:
+ case MADERA_OUT5LMIX_INPUT_4_VOLUME:
+ case MADERA_OUT5RMIX_INPUT_1_SOURCE:
+ case MADERA_OUT5RMIX_INPUT_1_VOLUME:
+ case MADERA_OUT5RMIX_INPUT_2_SOURCE:
+ case MADERA_OUT5RMIX_INPUT_2_VOLUME:
+ case MADERA_OUT5RMIX_INPUT_3_SOURCE:
+ case MADERA_OUT5RMIX_INPUT_3_VOLUME:
+ case MADERA_OUT5RMIX_INPUT_4_SOURCE:
+ case MADERA_OUT5RMIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX1MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX1MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX1MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX1MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX1MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX1MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX1MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX1MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX2MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX2MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX2MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX2MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX2MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX2MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX2MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX2MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX3MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX3MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX3MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX3MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX3MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX3MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX3MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX3MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX4MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX4MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX4MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX4MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX4MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX4MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX4MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX4MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX5MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX5MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX5MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX5MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX5MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX5MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX5MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX5MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX6MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX6MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX6MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX6MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX6MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX6MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX6MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX6MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX7MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX7MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX7MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX7MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX7MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX7MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX7MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX7MIX_INPUT_4_VOLUME:
+ case MADERA_AIF1TX8MIX_INPUT_1_SOURCE:
+ case MADERA_AIF1TX8MIX_INPUT_1_VOLUME:
+ case MADERA_AIF1TX8MIX_INPUT_2_SOURCE:
+ case MADERA_AIF1TX8MIX_INPUT_2_VOLUME:
+ case MADERA_AIF1TX8MIX_INPUT_3_SOURCE:
+ case MADERA_AIF1TX8MIX_INPUT_3_VOLUME:
+ case MADERA_AIF1TX8MIX_INPUT_4_SOURCE:
+ case MADERA_AIF1TX8MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX1MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX1MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX1MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX1MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX1MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX1MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX1MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX1MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX2MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX2MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX2MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX2MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX2MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX2MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX2MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX2MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX3MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX3MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX3MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX3MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX3MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX3MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX3MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX3MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX4MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX4MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX4MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX4MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX4MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX4MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX4MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX4MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX5MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX5MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX5MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX5MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX5MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX5MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX5MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX5MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX6MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX6MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX6MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX6MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX6MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX6MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX6MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX6MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX7MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX7MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX7MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX7MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX7MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX7MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX7MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX7MIX_INPUT_4_VOLUME:
+ case MADERA_AIF2TX8MIX_INPUT_1_SOURCE:
+ case MADERA_AIF2TX8MIX_INPUT_1_VOLUME:
+ case MADERA_AIF2TX8MIX_INPUT_2_SOURCE:
+ case MADERA_AIF2TX8MIX_INPUT_2_VOLUME:
+ case MADERA_AIF2TX8MIX_INPUT_3_SOURCE:
+ case MADERA_AIF2TX8MIX_INPUT_3_VOLUME:
+ case MADERA_AIF2TX8MIX_INPUT_4_SOURCE:
+ case MADERA_AIF2TX8MIX_INPUT_4_VOLUME:
+ case MADERA_AIF3TX1MIX_INPUT_1_SOURCE:
+ case MADERA_AIF3TX1MIX_INPUT_1_VOLUME:
+ case MADERA_AIF3TX1MIX_INPUT_2_SOURCE:
+ case MADERA_AIF3TX1MIX_INPUT_2_VOLUME:
+ case MADERA_AIF3TX1MIX_INPUT_3_SOURCE:
+ case MADERA_AIF3TX1MIX_INPUT_3_VOLUME:
+ case MADERA_AIF3TX1MIX_INPUT_4_SOURCE:
+ case MADERA_AIF3TX1MIX_INPUT_4_VOLUME:
+ case MADERA_AIF3TX2MIX_INPUT_1_SOURCE:
+ case MADERA_AIF3TX2MIX_INPUT_1_VOLUME:
+ case MADERA_AIF3TX2MIX_INPUT_2_SOURCE:
+ case MADERA_AIF3TX2MIX_INPUT_2_VOLUME:
+ case MADERA_AIF3TX2MIX_INPUT_3_SOURCE:
+ case MADERA_AIF3TX2MIX_INPUT_3_VOLUME:
+ case MADERA_AIF3TX2MIX_INPUT_4_SOURCE:
+ case MADERA_AIF3TX2MIX_INPUT_4_VOLUME:
+ case MADERA_AIF4TX1MIX_INPUT_1_SOURCE:
+ case MADERA_AIF4TX1MIX_INPUT_1_VOLUME:
+ case MADERA_AIF4TX1MIX_INPUT_2_SOURCE:
+ case MADERA_AIF4TX1MIX_INPUT_2_VOLUME:
+ case MADERA_AIF4TX1MIX_INPUT_3_SOURCE:
+ case MADERA_AIF4TX1MIX_INPUT_3_VOLUME:
+ case MADERA_AIF4TX1MIX_INPUT_4_SOURCE:
+ case MADERA_AIF4TX1MIX_INPUT_4_VOLUME:
+ case MADERA_AIF4TX2MIX_INPUT_1_SOURCE:
+ case MADERA_AIF4TX2MIX_INPUT_1_VOLUME:
+ case MADERA_AIF4TX2MIX_INPUT_2_SOURCE:
+ case MADERA_AIF4TX2MIX_INPUT_2_VOLUME:
+ case MADERA_AIF4TX2MIX_INPUT_3_SOURCE:
+ case MADERA_AIF4TX2MIX_INPUT_3_VOLUME:
+ case MADERA_AIF4TX2MIX_INPUT_4_SOURCE:
+ case MADERA_AIF4TX2MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX1MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX1MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX1MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX1MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX1MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX1MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX1MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX1MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX2MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX2MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX2MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX2MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX2MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX2MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX2MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX2MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX3MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX3MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX3MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX3MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX3MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX3MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX3MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX3MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX4MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX4MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX4MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX4MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX4MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX4MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX4MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX4MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX5MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX5MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX5MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX5MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX5MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX5MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX5MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX5MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX6MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX6MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX6MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX6MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX6MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX6MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX6MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX6MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX7MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX7MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX7MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX7MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX7MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX7MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX7MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX7MIX_INPUT_4_VOLUME:
+ case MADERA_SLIMTX8MIX_INPUT_1_SOURCE:
+ case MADERA_SLIMTX8MIX_INPUT_1_VOLUME:
+ case MADERA_SLIMTX8MIX_INPUT_2_SOURCE:
+ case MADERA_SLIMTX8MIX_INPUT_2_VOLUME:
+ case MADERA_SLIMTX8MIX_INPUT_3_SOURCE:
+ case MADERA_SLIMTX8MIX_INPUT_3_VOLUME:
+ case MADERA_SLIMTX8MIX_INPUT_4_SOURCE:
+ case MADERA_SLIMTX8MIX_INPUT_4_VOLUME:
+ case MADERA_SPDIF1TX1MIX_INPUT_1_SOURCE:
+ case MADERA_SPDIF1TX1MIX_INPUT_1_VOLUME:
+ case MADERA_SPDIF1TX2MIX_INPUT_1_SOURCE:
+ case MADERA_SPDIF1TX2MIX_INPUT_1_VOLUME:
+ case MADERA_EQ1MIX_INPUT_1_SOURCE:
+ case MADERA_EQ1MIX_INPUT_1_VOLUME:
+ case MADERA_EQ1MIX_INPUT_2_SOURCE:
+ case MADERA_EQ1MIX_INPUT_2_VOLUME:
+ case MADERA_EQ1MIX_INPUT_3_SOURCE:
+ case MADERA_EQ1MIX_INPUT_3_VOLUME:
+ case MADERA_EQ1MIX_INPUT_4_SOURCE:
+ case MADERA_EQ1MIX_INPUT_4_VOLUME:
+ case MADERA_EQ2MIX_INPUT_1_SOURCE:
+ case MADERA_EQ2MIX_INPUT_1_VOLUME:
+ case MADERA_EQ2MIX_INPUT_2_SOURCE:
+ case MADERA_EQ2MIX_INPUT_2_VOLUME:
+ case MADERA_EQ2MIX_INPUT_3_SOURCE:
+ case MADERA_EQ2MIX_INPUT_3_VOLUME:
+ case MADERA_EQ2MIX_INPUT_4_SOURCE:
+ case MADERA_EQ2MIX_INPUT_4_VOLUME:
+ case MADERA_EQ3MIX_INPUT_1_SOURCE:
+ case MADERA_EQ3MIX_INPUT_1_VOLUME:
+ case MADERA_EQ3MIX_INPUT_2_SOURCE:
+ case MADERA_EQ3MIX_INPUT_2_VOLUME:
+ case MADERA_EQ3MIX_INPUT_3_SOURCE:
+ case MADERA_EQ3MIX_INPUT_3_VOLUME:
+ case MADERA_EQ3MIX_INPUT_4_SOURCE:
+ case MADERA_EQ3MIX_INPUT_4_VOLUME:
+ case MADERA_EQ4MIX_INPUT_1_SOURCE:
+ case MADERA_EQ4MIX_INPUT_1_VOLUME:
+ case MADERA_EQ4MIX_INPUT_2_SOURCE:
+ case MADERA_EQ4MIX_INPUT_2_VOLUME:
+ case MADERA_EQ4MIX_INPUT_3_SOURCE:
+ case MADERA_EQ4MIX_INPUT_3_VOLUME:
+ case MADERA_EQ4MIX_INPUT_4_SOURCE:
+ case MADERA_EQ4MIX_INPUT_4_VOLUME:
+ case MADERA_DRC1LMIX_INPUT_1_SOURCE:
+ case MADERA_DRC1LMIX_INPUT_1_VOLUME:
+ case MADERA_DRC1LMIX_INPUT_2_SOURCE:
+ case MADERA_DRC1LMIX_INPUT_2_VOLUME:
+ case MADERA_DRC1LMIX_INPUT_3_SOURCE:
+ case MADERA_DRC1LMIX_INPUT_3_VOLUME:
+ case MADERA_DRC1LMIX_INPUT_4_SOURCE:
+ case MADERA_DRC1LMIX_INPUT_4_VOLUME:
+ case MADERA_DRC1RMIX_INPUT_1_SOURCE:
+ case MADERA_DRC1RMIX_INPUT_1_VOLUME:
+ case MADERA_DRC1RMIX_INPUT_2_SOURCE:
+ case MADERA_DRC1RMIX_INPUT_2_VOLUME:
+ case MADERA_DRC1RMIX_INPUT_3_SOURCE:
+ case MADERA_DRC1RMIX_INPUT_3_VOLUME:
+ case MADERA_DRC1RMIX_INPUT_4_SOURCE:
+ case MADERA_DRC1RMIX_INPUT_4_VOLUME:
+ case MADERA_DRC2LMIX_INPUT_1_SOURCE:
+ case MADERA_DRC2LMIX_INPUT_1_VOLUME:
+ case MADERA_DRC2LMIX_INPUT_2_SOURCE:
+ case MADERA_DRC2LMIX_INPUT_2_VOLUME:
+ case MADERA_DRC2LMIX_INPUT_3_SOURCE:
+ case MADERA_DRC2LMIX_INPUT_3_VOLUME:
+ case MADERA_DRC2LMIX_INPUT_4_SOURCE:
+ case MADERA_DRC2LMIX_INPUT_4_VOLUME:
+ case MADERA_DRC2RMIX_INPUT_1_SOURCE:
+ case MADERA_DRC2RMIX_INPUT_1_VOLUME:
+ case MADERA_DRC2RMIX_INPUT_2_SOURCE:
+ case MADERA_DRC2RMIX_INPUT_2_VOLUME:
+ case MADERA_DRC2RMIX_INPUT_3_SOURCE:
+ case MADERA_DRC2RMIX_INPUT_3_VOLUME:
+ case MADERA_DRC2RMIX_INPUT_4_SOURCE:
+ case MADERA_DRC2RMIX_INPUT_4_VOLUME:
+ case MADERA_HPLP1MIX_INPUT_1_SOURCE:
+ case MADERA_HPLP1MIX_INPUT_1_VOLUME:
+ case MADERA_HPLP1MIX_INPUT_2_SOURCE:
+ case MADERA_HPLP1MIX_INPUT_2_VOLUME:
+ case MADERA_HPLP1MIX_INPUT_3_SOURCE:
+ case MADERA_HPLP1MIX_INPUT_3_VOLUME:
+ case MADERA_HPLP1MIX_INPUT_4_SOURCE:
+ case MADERA_HPLP1MIX_INPUT_4_VOLUME:
+ case MADERA_HPLP2MIX_INPUT_1_SOURCE:
+ case MADERA_HPLP2MIX_INPUT_1_VOLUME:
+ case MADERA_HPLP2MIX_INPUT_2_SOURCE:
+ case MADERA_HPLP2MIX_INPUT_2_VOLUME:
+ case MADERA_HPLP2MIX_INPUT_3_SOURCE:
+ case MADERA_HPLP2MIX_INPUT_3_VOLUME:
+ case MADERA_HPLP2MIX_INPUT_4_SOURCE:
+ case MADERA_HPLP2MIX_INPUT_4_VOLUME:
+ case MADERA_HPLP3MIX_INPUT_1_SOURCE:
+ case MADERA_HPLP3MIX_INPUT_1_VOLUME:
+ case MADERA_HPLP3MIX_INPUT_2_SOURCE:
+ case MADERA_HPLP3MIX_INPUT_2_VOLUME:
+ case MADERA_HPLP3MIX_INPUT_3_SOURCE:
+ case MADERA_HPLP3MIX_INPUT_3_VOLUME:
+ case MADERA_HPLP3MIX_INPUT_4_SOURCE:
+ case MADERA_HPLP3MIX_INPUT_4_VOLUME:
+ case MADERA_HPLP4MIX_INPUT_1_SOURCE:
+ case MADERA_HPLP4MIX_INPUT_1_VOLUME:
+ case MADERA_HPLP4MIX_INPUT_2_SOURCE:
+ case MADERA_HPLP4MIX_INPUT_2_VOLUME:
+ case MADERA_HPLP4MIX_INPUT_3_SOURCE:
+ case MADERA_HPLP4MIX_INPUT_3_VOLUME:
+ case MADERA_HPLP4MIX_INPUT_4_SOURCE:
+ case MADERA_HPLP4MIX_INPUT_4_VOLUME:
+ case MADERA_DSP1LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP1LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP1LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP1LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP1LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP1LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP1LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP1LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP1RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP1RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP1RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP1RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP1RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP1RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP1RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP1RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP1AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP1AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP2LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP2LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP2LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP2LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP2RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP2RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP2RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP2RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP2RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP2RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP2RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP2RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP2AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP2AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP3LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP3LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP3LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP3LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP3RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP3RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP3RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP3RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP3RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP3RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP3RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP3RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP3AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP3AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DSP4LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP4LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP4LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP4LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP4LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP4LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP4LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP4LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP4RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP4RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP4RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP4RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP4RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP4RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP4RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP4RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP4AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP4AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP4AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP4AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP4AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP4AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DSP5LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP5LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP5LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP5LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP5LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP5LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP5LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP5LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP5RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP5RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP5RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP5RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP5RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP5RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP5RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP5RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP5AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP5AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP5AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP5AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP5AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP5AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_ASRC1_1LMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC1_1RMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC1_2LMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC1_2RMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC2_1LMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC2_1RMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC2_2LMIX_INPUT_1_SOURCE:
+ case MADERA_ASRC2_2RMIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1DEC1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1DEC2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1DEC3MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1DEC4MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1INT1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1INT2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1INT3MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC1INT4MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2DEC1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2DEC2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2DEC3MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2DEC4MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2INT1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2INT2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2INT3MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC2INT4MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC3DEC1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC3DEC2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC3INT1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC3INT2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC4DEC1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC4DEC2MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC4INT1MIX_INPUT_1_SOURCE:
+ case MADERA_ISRC4INT2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP6LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP6LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP6LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP6LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP6LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP6LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP6LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP6LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP6RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP6RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP6RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP6RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP6RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP6RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP6RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP6RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP6AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP6AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP6AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP6AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP6AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP6AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DSP7LMIX_INPUT_1_SOURCE:
+ case MADERA_DSP7LMIX_INPUT_1_VOLUME:
+ case MADERA_DSP7LMIX_INPUT_2_SOURCE:
+ case MADERA_DSP7LMIX_INPUT_2_VOLUME:
+ case MADERA_DSP7LMIX_INPUT_3_SOURCE:
+ case MADERA_DSP7LMIX_INPUT_3_VOLUME:
+ case MADERA_DSP7LMIX_INPUT_4_SOURCE:
+ case MADERA_DSP7LMIX_INPUT_4_VOLUME:
+ case MADERA_DSP7RMIX_INPUT_1_SOURCE:
+ case MADERA_DSP7RMIX_INPUT_1_VOLUME:
+ case MADERA_DSP7RMIX_INPUT_2_SOURCE:
+ case MADERA_DSP7RMIX_INPUT_2_VOLUME:
+ case MADERA_DSP7RMIX_INPUT_3_SOURCE:
+ case MADERA_DSP7RMIX_INPUT_3_VOLUME:
+ case MADERA_DSP7RMIX_INPUT_4_SOURCE:
+ case MADERA_DSP7RMIX_INPUT_4_VOLUME:
+ case MADERA_DSP7AUX1MIX_INPUT_1_SOURCE:
+ case MADERA_DSP7AUX2MIX_INPUT_1_SOURCE:
+ case MADERA_DSP7AUX3MIX_INPUT_1_SOURCE:
+ case MADERA_DSP7AUX4MIX_INPUT_1_SOURCE:
+ case MADERA_DSP7AUX5MIX_INPUT_1_SOURCE:
+ case MADERA_DSP7AUX6MIX_INPUT_1_SOURCE:
+ case MADERA_DFC1MIX_INPUT_1_SOURCE:
+ case MADERA_DFC2MIX_INPUT_1_SOURCE:
+ case MADERA_DFC3MIX_INPUT_1_SOURCE:
+ case MADERA_DFC4MIX_INPUT_1_SOURCE:
+ case MADERA_DFC5MIX_INPUT_1_SOURCE:
+ case MADERA_DFC6MIX_INPUT_1_SOURCE:
+ case MADERA_DFC7MIX_INPUT_1_SOURCE:
+ case MADERA_DFC8MIX_INPUT_1_SOURCE:
+ case MADERA_FX_CTRL1:
+ case MADERA_FX_CTRL2:
+ case MADERA_EQ1_1 ... MADERA_EQ1_21:
+ case MADERA_EQ2_1 ... MADERA_EQ2_21:
+ case MADERA_EQ3_1 ... MADERA_EQ3_21:
+ case MADERA_EQ4_1 ... MADERA_EQ4_21:
+ case MADERA_DRC1_CTRL1:
+ case MADERA_DRC1_CTRL2:
+ case MADERA_DRC1_CTRL3:
+ case MADERA_DRC1_CTRL4:
+ case MADERA_DRC1_CTRL5:
+ case MADERA_DRC2_CTRL1:
+ case MADERA_DRC2_CTRL2:
+ case MADERA_DRC2_CTRL3:
+ case MADERA_DRC2_CTRL4:
+ case MADERA_DRC2_CTRL5:
+ case MADERA_HPLPF1_1:
+ case MADERA_HPLPF1_2:
+ case MADERA_HPLPF2_1:
+ case MADERA_HPLPF2_2:
+ case MADERA_HPLPF3_1:
+ case MADERA_HPLPF3_2:
+ case MADERA_HPLPF4_1:
+ case MADERA_HPLPF4_2:
+ case MADERA_ASRC1_ENABLE:
+ case MADERA_ASRC1_STATUS:
+ case MADERA_ASRC1_RATE1:
+ case MADERA_ASRC1_RATE2:
+ case MADERA_ASRC2_ENABLE:
+ case MADERA_ASRC2_STATUS:
+ case MADERA_ASRC2_RATE1:
+ case MADERA_ASRC2_RATE2:
+ case MADERA_ISRC_1_CTRL_1:
+ case MADERA_ISRC_1_CTRL_2:
+ case MADERA_ISRC_1_CTRL_3:
+ case MADERA_ISRC_2_CTRL_1:
+ case MADERA_ISRC_2_CTRL_2:
+ case MADERA_ISRC_2_CTRL_3:
+ case MADERA_ISRC_3_CTRL_1:
+ case MADERA_ISRC_3_CTRL_2:
+ case MADERA_ISRC_3_CTRL_3:
+ case MADERA_ISRC_4_CTRL_1:
+ case MADERA_ISRC_4_CTRL_2:
+ case MADERA_ISRC_4_CTRL_3:
+ case MADERA_CLOCK_CONTROL:
+ case MADERA_ANC_SRC:
+ case MADERA_DSP_STATUS:
+ case MADERA_ANC_COEFF_START ... MADERA_ANC_COEFF_END:
+ case MADERA_FCL_FILTER_CONTROL:
+ case MADERA_FCL_ADC_REFORMATTER_CONTROL:
+ case MADERA_FCL_COEFF_START ... MADERA_FCL_COEFF_END:
+ case MADERA_FCR_FILTER_CONTROL:
+ case MADERA_FCR_ADC_REFORMATTER_CONTROL:
+ case MADERA_FCR_COEFF_START ... MADERA_FCR_COEFF_END:
+ case MADERA_DAC_COMP_1:
+ case MADERA_DAC_COMP_2:
+ case MADERA_FRF_COEFFICIENT_1L_1:
+ case MADERA_FRF_COEFFICIENT_1L_2:
+ case MADERA_FRF_COEFFICIENT_1L_3:
+ case MADERA_FRF_COEFFICIENT_1L_4:
+ case MADERA_FRF_COEFFICIENT_1R_1:
+ case MADERA_FRF_COEFFICIENT_1R_2:
+ case MADERA_FRF_COEFFICIENT_1R_3:
+ case MADERA_FRF_COEFFICIENT_1R_4:
+ case MADERA_FRF_COEFFICIENT_2L_1:
+ case MADERA_FRF_COEFFICIENT_2L_2:
+ case MADERA_FRF_COEFFICIENT_2L_3:
+ case MADERA_FRF_COEFFICIENT_2L_4:
+ case MADERA_FRF_COEFFICIENT_2R_1:
+ case MADERA_FRF_COEFFICIENT_2R_2:
+ case MADERA_FRF_COEFFICIENT_2R_3:
+ case MADERA_FRF_COEFFICIENT_2R_4:
+ case MADERA_FRF_COEFFICIENT_3L_1:
+ case MADERA_FRF_COEFFICIENT_3L_2:
+ case MADERA_FRF_COEFFICIENT_3L_3:
+ case MADERA_FRF_COEFFICIENT_3L_4:
+ case MADERA_FRF_COEFFICIENT_3R_1:
+ case MADERA_FRF_COEFFICIENT_3R_2:
+ case MADERA_FRF_COEFFICIENT_3R_3:
+ case MADERA_FRF_COEFFICIENT_3R_4:
+ case MADERA_FRF_COEFFICIENT_5L_1:
+ case MADERA_FRF_COEFFICIENT_5L_2:
+ case MADERA_FRF_COEFFICIENT_5L_3:
+ case MADERA_FRF_COEFFICIENT_5L_4:
+ case MADERA_FRF_COEFFICIENT_5R_1:
+ case MADERA_FRF_COEFFICIENT_5R_2:
+ case MADERA_FRF_COEFFICIENT_5R_3:
+ case MADERA_FRF_COEFFICIENT_5R_4:
+ case MADERA_DFC1_CTRL:
+ case MADERA_DFC1_RX:
+ case MADERA_DFC1_TX:
+ case MADERA_DFC2_CTRL:
+ case MADERA_DFC2_RX:
+ case MADERA_DFC2_TX:
+ case MADERA_DFC3_CTRL:
+ case MADERA_DFC3_RX:
+ case MADERA_DFC3_TX:
+ case MADERA_DFC4_CTRL:
+ case MADERA_DFC4_RX:
+ case MADERA_DFC4_TX:
+ case MADERA_DFC5_CTRL:
+ case MADERA_DFC5_RX:
+ case MADERA_DFC5_TX:
+ case MADERA_DFC6_CTRL:
+ case MADERA_DFC6_RX:
+ case MADERA_DFC6_TX:
+ case MADERA_DFC7_CTRL:
+ case MADERA_DFC7_RX:
+ case MADERA_DFC7_TX:
+ case MADERA_DFC8_CTRL:
+ case MADERA_DFC8_RX:
+ case MADERA_DFC8_TX:
+ case MADERA_DFC_STATUS:
+ case MADERA_GPIO1_CTRL_1 ... MADERA_GPIO38_CTRL_2:
+ case MADERA_IRQ1_STATUS_1 ... MADERA_IRQ1_STATUS_33:
+ case MADERA_IRQ1_MASK_1 ... MADERA_IRQ1_MASK_33:
+ case MADERA_IRQ1_RAW_STATUS_1 ... MADERA_IRQ1_RAW_STATUS_33:
+ case MADERA_INTERRUPT_DEBOUNCE_7:
+ case MADERA_IRQ1_CTRL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs47l90_16bit_volatile_register(struct device *dev,
+ unsigned int reg)
+{
+ switch (reg) {
+ case MADERA_SOFTWARE_RESET:
+ case MADERA_HARDWARE_REVISION:
+ case MADERA_WRITE_SEQUENCER_CTRL_0:
+ case MADERA_WRITE_SEQUENCER_CTRL_1:
+ case MADERA_WRITE_SEQUENCER_CTRL_2:
+ case MADERA_HAPTICS_STATUS:
+ case MADERA_SAMPLE_RATE_1_STATUS:
+ case MADERA_SAMPLE_RATE_2_STATUS:
+ case MADERA_SAMPLE_RATE_3_STATUS:
+ case MADERA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case MADERA_ASYNC_SAMPLE_RATE_2_STATUS:
+ case MADERA_HP_CTRL_1L:
+ case MADERA_HP_CTRL_1R:
+ case MADERA_HP_CTRL_2L:
+ case MADERA_HP_CTRL_2R:
+ case MADERA_HP_CTRL_3L:
+ case MADERA_HP_CTRL_3R:
+ case MADERA_MIC_DETECT_1_CONTROL_3:
+ case MADERA_MIC_DETECT_1_CONTROL_4:
+ case MADERA_MIC_DETECT_2_CONTROL_3:
+ case MADERA_MIC_DETECT_2_CONTROL_4:
+ case MADERA_HEADPHONE_DETECT_2:
+ case MADERA_HEADPHONE_DETECT_3:
+ case MADERA_HEADPHONE_DETECT_5:
+ case MADERA_INPUT_ENABLES_STATUS:
+ case MADERA_OUTPUT_STATUS_1:
+ case MADERA_RAW_OUTPUT_STATUS_1:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_1:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_2:
+ case MADERA_SPD1_TX_CHANNEL_STATUS_3:
+ case MADERA_SLIMBUS_RX_PORT_STATUS:
+ case MADERA_SLIMBUS_TX_PORT_STATUS:
+ case MADERA_FX_CTRL2:
+ case MADERA_ASRC2_STATUS:
+ case MADERA_ASRC1_STATUS:
+ case MADERA_CLOCK_CONTROL:
+ case MADERA_DFC_STATUS:
+ case MADERA_IRQ1_STATUS_1 ... MADERA_IRQ1_STATUS_33:
+ case MADERA_IRQ1_RAW_STATUS_1 ... MADERA_IRQ1_RAW_STATUS_33:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs47l90_32bit_readable_register(struct device *dev,
+ unsigned int reg)
+{
+ switch (reg) {
+ case MADERA_WSEQ_SEQUENCE_1 ... MADERA_WSEQ_SEQUENCE_508:
+ case MADERA_OTP_HPDET_CAL_1 ... MADERA_OTP_HPDET_CAL_2:
+ case MADERA_DSP1_CONFIG_1 ... MADERA_DSP1_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ case MADERA_DSP2_CONFIG_1 ... MADERA_DSP2_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ case MADERA_DSP3_CONFIG_1 ... MADERA_DSP3_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ case MADERA_DSP4_CONFIG_1 ... MADERA_DSP4_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ case MADERA_DSP5_CONFIG_1 ... MADERA_DSP5_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ case MADERA_DSP6_CONFIG_1 ... MADERA_DSP6_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ case MADERA_DSP7_CONFIG_1 ... MADERA_DSP7_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ return true;
+ default:
+ return cs47l90_is_adsp_memory(reg);
+ }
+}
+
+static bool cs47l90_32bit_volatile_register(struct device *dev,
+ unsigned int reg)
+{
+ switch (reg) {
+ case MADERA_WSEQ_SEQUENCE_1 ... MADERA_WSEQ_SEQUENCE_508:
+ case MADERA_OTP_HPDET_CAL_1 ... MADERA_OTP_HPDET_CAL_2:
+ case MADERA_DSP1_CONFIG_1 ... MADERA_DSP1_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ case MADERA_DSP2_CONFIG_1 ... MADERA_DSP2_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ case MADERA_DSP3_CONFIG_1 ... MADERA_DSP3_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ case MADERA_DSP4_CONFIG_1 ... MADERA_DSP4_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ case MADERA_DSP5_CONFIG_1 ... MADERA_DSP5_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ case MADERA_DSP6_CONFIG_1 ... MADERA_DSP6_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ case MADERA_DSP7_CONFIG_1 ... MADERA_DSP7_PMEM_ERR_ADDR___XMEM_ERR_ADDR:
+ return true;
+ default:
+ return cs47l90_is_adsp_memory(reg);
+ }
+}
+
+const struct regmap_config cs47l90_16bit_spi_regmap = {
+ .name = "cs47l90_16bit",
+ .reg_bits = 32,
+ .pad_bits = 16,
+ .val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = MADERA_INTERRUPT_RAW_STATUS_1,
+ .readable_reg = cs47l90_16bit_readable_register,
+ .volatile_reg = cs47l90_16bit_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = cs47l90_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(cs47l90_reg_default),
+};
+EXPORT_SYMBOL_GPL(cs47l90_16bit_spi_regmap);
+
+const struct regmap_config cs47l90_16bit_i2c_regmap = {
+ .name = "cs47l90_16bit",
+ .reg_bits = 32,
+ .val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = MADERA_INTERRUPT_RAW_STATUS_1,
+ .readable_reg = cs47l90_16bit_readable_register,
+ .volatile_reg = cs47l90_16bit_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = cs47l90_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(cs47l90_reg_default),
+};
+EXPORT_SYMBOL_GPL(cs47l90_16bit_i2c_regmap);
+
+const struct regmap_config cs47l90_32bit_spi_regmap = {
+ .name = "cs47l90_32bit",
+ .reg_bits = 32,
+ .reg_stride = 2,
+ .pad_bits = 16,
+ .val_bits = 32,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = MADERA_DSP7_PMEM_ERR_ADDR___XMEM_ERR_ADDR,
+ .readable_reg = cs47l90_32bit_readable_register,
+ .volatile_reg = cs47l90_32bit_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_GPL(cs47l90_32bit_spi_regmap);
+
+const struct regmap_config cs47l90_32bit_i2c_regmap = {
+ .name = "cs47l90_32bit",
+ .reg_bits = 32,
+ .reg_stride = 2,
+ .val_bits = 32,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = MADERA_DSP7_PMEM_ERR_ADDR___XMEM_ERR_ADDR,
+ .readable_reg = cs47l90_32bit_readable_register,
+ .volatile_reg = cs47l90_32bit_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_GPL(cs47l90_32bit_i2c_regmap);
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
index 6c2870d4e754..6e4ce49b4405 100644
--- a/drivers/mfd/da9063-core.c
+++ b/drivers/mfd/da9063-core.c
@@ -76,7 +76,7 @@ static struct resource da9063_hwmon_resources[] = {
};
-static const struct mfd_cell da9063_devs[] = {
+static const struct mfd_cell da9063_common_devs[] = {
{
.name = DA9063_DRVNAME_REGULATORS,
.num_resources = ARRAY_SIZE(da9063_regulators_resources),
@@ -101,14 +101,18 @@ static const struct mfd_cell da9063_devs[] = {
.of_compatible = "dlg,da9063-onkey",
},
{
+ .name = DA9063_DRVNAME_VIBRATION,
+ },
+};
+
+/* Only present on DA9063 , not on DA9063L */
+static const struct mfd_cell da9063_devs[] = {
+ {
.name = DA9063_DRVNAME_RTC,
.num_resources = ARRAY_SIZE(da9063_rtc_resources),
.resources = da9063_rtc_resources,
.of_compatible = "dlg,da9063-rtc",
},
- {
- .name = DA9063_DRVNAME_VIBRATION,
- },
};
static int da9063_clear_fault_log(struct da9063 *da9063)
@@ -192,7 +196,7 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq)
dev_err(da9063->dev, "Cannot read chip model id.\n");
return -EIO;
}
- if (model != PMIC_DA9063) {
+ if (model != PMIC_CHIP_ID_DA9063) {
dev_err(da9063->dev, "Invalid chip model id: 0x%02x\n", model);
return -ENODEV;
}
@@ -215,7 +219,6 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq)
return -ENODEV;
}
- da9063->model = model;
da9063->variant_code = variant_code;
ret = da9063_irq_init(da9063);
@@ -226,19 +229,26 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq)
da9063->irq_base = regmap_irq_chip_get_base(da9063->regmap_irq);
- ret = mfd_add_devices(da9063->dev, -1, da9063_devs,
- ARRAY_SIZE(da9063_devs), NULL, da9063->irq_base,
- NULL);
- if (ret)
- dev_err(da9063->dev, "Cannot add MFD cells\n");
+ ret = devm_mfd_add_devices(da9063->dev, PLATFORM_DEVID_NONE,
+ da9063_common_devs,
+ ARRAY_SIZE(da9063_common_devs),
+ NULL, da9063->irq_base, NULL);
+ if (ret) {
+ dev_err(da9063->dev, "Failed to add child devices\n");
+ return ret;
+ }
- return ret;
-}
+ if (da9063->type == PMIC_TYPE_DA9063) {
+ ret = devm_mfd_add_devices(da9063->dev, PLATFORM_DEVID_NONE,
+ da9063_devs, ARRAY_SIZE(da9063_devs),
+ NULL, da9063->irq_base, NULL);
+ if (ret) {
+ dev_err(da9063->dev, "Failed to add child devices\n");
+ return ret;
+ }
+ }
-void da9063_device_exit(struct da9063 *da9063)
-{
- mfd_remove_devices(da9063->dev);
- da9063_irq_exit(da9063);
+ return ret;
}
MODULE_DESCRIPTION("PMIC driver for Dialog DA9063");
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 981805a2c521..50a24b1921d0 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -29,78 +29,33 @@
#include <linux/regulator/of_regulator.h>
static const struct regmap_range da9063_ad_readable_ranges[] = {
- {
- .range_min = DA9063_REG_PAGE_CON,
- .range_max = DA9063_AD_REG_SECOND_D,
- }, {
- .range_min = DA9063_REG_SEQ,
- .range_max = DA9063_REG_ID_32_31,
- }, {
- .range_min = DA9063_REG_SEQ_A,
- .range_max = DA9063_REG_AUTO3_LOW,
- }, {
- .range_min = DA9063_REG_T_OFFSET,
- .range_max = DA9063_AD_REG_GP_ID_19,
- }, {
- .range_min = DA9063_REG_CHIP_ID,
- .range_max = DA9063_REG_CHIP_VARIANT,
- },
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_AD_REG_SECOND_D),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+ regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+ regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_AD_REG_GP_ID_19),
+ regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT),
};
static const struct regmap_range da9063_ad_writeable_ranges[] = {
- {
- .range_min = DA9063_REG_PAGE_CON,
- .range_max = DA9063_REG_PAGE_CON,
- }, {
- .range_min = DA9063_REG_FAULT_LOG,
- .range_max = DA9063_REG_VSYS_MON,
- }, {
- .range_min = DA9063_REG_COUNT_S,
- .range_max = DA9063_AD_REG_ALARM_Y,
- }, {
- .range_min = DA9063_REG_SEQ,
- .range_max = DA9063_REG_ID_32_31,
- }, {
- .range_min = DA9063_REG_SEQ_A,
- .range_max = DA9063_REG_AUTO3_LOW,
- }, {
- .range_min = DA9063_REG_CONFIG_I,
- .range_max = DA9063_AD_REG_MON_REG_4,
- }, {
- .range_min = DA9063_AD_REG_GP_ID_0,
- .range_max = DA9063_AD_REG_GP_ID_19,
- },
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON),
+ regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON),
+ regmap_reg_range(DA9063_REG_COUNT_S, DA9063_AD_REG_ALARM_Y),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+ regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+ regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_AD_REG_MON_REG_4),
+ regmap_reg_range(DA9063_AD_REG_GP_ID_0, DA9063_AD_REG_GP_ID_19),
};
static const struct regmap_range da9063_ad_volatile_ranges[] = {
- {
- .range_min = DA9063_REG_PAGE_CON,
- .range_max = DA9063_REG_EVENT_D,
- }, {
- .range_min = DA9063_REG_CONTROL_A,
- .range_max = DA9063_REG_CONTROL_B,
- }, {
- .range_min = DA9063_REG_CONTROL_E,
- .range_max = DA9063_REG_CONTROL_F,
- }, {
- .range_min = DA9063_REG_BCORE2_CONT,
- .range_max = DA9063_REG_LDO11_CONT,
- }, {
- .range_min = DA9063_REG_DVC_1,
- .range_max = DA9063_REG_ADC_MAN,
- }, {
- .range_min = DA9063_REG_ADC_RES_L,
- .range_max = DA9063_AD_REG_SECOND_D,
- }, {
- .range_min = DA9063_REG_SEQ,
- .range_max = DA9063_REG_SEQ,
- }, {
- .range_min = DA9063_REG_EN_32K,
- .range_max = DA9063_REG_EN_32K,
- }, {
- .range_min = DA9063_AD_REG_MON_REG_5,
- .range_max = DA9063_AD_REG_MON_REG_6,
- },
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D),
+ regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B),
+ regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F),
+ regmap_reg_range(DA9063_REG_BCORE2_CONT, DA9063_REG_LDO11_CONT),
+ regmap_reg_range(DA9063_REG_DVC_1, DA9063_REG_ADC_MAN),
+ regmap_reg_range(DA9063_REG_ADC_RES_L, DA9063_AD_REG_SECOND_D),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_SEQ),
+ regmap_reg_range(DA9063_REG_EN_32K, DA9063_REG_EN_32K),
+ regmap_reg_range(DA9063_AD_REG_MON_REG_5, DA9063_AD_REG_MON_REG_6),
};
static const struct regmap_access_table da9063_ad_readable_table = {
@@ -119,78 +74,33 @@ static const struct regmap_access_table da9063_ad_volatile_table = {
};
static const struct regmap_range da9063_bb_readable_ranges[] = {
- {
- .range_min = DA9063_REG_PAGE_CON,
- .range_max = DA9063_BB_REG_SECOND_D,
- }, {
- .range_min = DA9063_REG_SEQ,
- .range_max = DA9063_REG_ID_32_31,
- }, {
- .range_min = DA9063_REG_SEQ_A,
- .range_max = DA9063_REG_AUTO3_LOW,
- }, {
- .range_min = DA9063_REG_T_OFFSET,
- .range_max = DA9063_BB_REG_GP_ID_19,
- }, {
- .range_min = DA9063_REG_CHIP_ID,
- .range_max = DA9063_REG_CHIP_VARIANT,
- },
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_BB_REG_SECOND_D),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+ regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+ regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_19),
+ regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT),
};
static const struct regmap_range da9063_bb_writeable_ranges[] = {
- {
- .range_min = DA9063_REG_PAGE_CON,
- .range_max = DA9063_REG_PAGE_CON,
- }, {
- .range_min = DA9063_REG_FAULT_LOG,
- .range_max = DA9063_REG_VSYS_MON,
- }, {
- .range_min = DA9063_REG_COUNT_S,
- .range_max = DA9063_BB_REG_ALARM_Y,
- }, {
- .range_min = DA9063_REG_SEQ,
- .range_max = DA9063_REG_ID_32_31,
- }, {
- .range_min = DA9063_REG_SEQ_A,
- .range_max = DA9063_REG_AUTO3_LOW,
- }, {
- .range_min = DA9063_REG_CONFIG_I,
- .range_max = DA9063_BB_REG_MON_REG_4,
- }, {
- .range_min = DA9063_BB_REG_GP_ID_0,
- .range_max = DA9063_BB_REG_GP_ID_19,
- },
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON),
+ regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON),
+ regmap_reg_range(DA9063_REG_COUNT_S, DA9063_BB_REG_ALARM_Y),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+ regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+ regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_BB_REG_MON_REG_4),
+ regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_19),
};
static const struct regmap_range da9063_bb_volatile_ranges[] = {
- {
- .range_min = DA9063_REG_PAGE_CON,
- .range_max = DA9063_REG_EVENT_D,
- }, {
- .range_min = DA9063_REG_CONTROL_A,
- .range_max = DA9063_REG_CONTROL_B,
- }, {
- .range_min = DA9063_REG_CONTROL_E,
- .range_max = DA9063_REG_CONTROL_F,
- }, {
- .range_min = DA9063_REG_BCORE2_CONT,
- .range_max = DA9063_REG_LDO11_CONT,
- }, {
- .range_min = DA9063_REG_DVC_1,
- .range_max = DA9063_REG_ADC_MAN,
- }, {
- .range_min = DA9063_REG_ADC_RES_L,
- .range_max = DA9063_BB_REG_SECOND_D,
- }, {
- .range_min = DA9063_REG_SEQ,
- .range_max = DA9063_REG_SEQ,
- }, {
- .range_min = DA9063_REG_EN_32K,
- .range_max = DA9063_REG_EN_32K,
- }, {
- .range_min = DA9063_BB_REG_MON_REG_5,
- .range_max = DA9063_BB_REG_MON_REG_6,
- },
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D),
+ regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B),
+ regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F),
+ regmap_reg_range(DA9063_REG_BCORE2_CONT, DA9063_REG_LDO11_CONT),
+ regmap_reg_range(DA9063_REG_DVC_1, DA9063_REG_ADC_MAN),
+ regmap_reg_range(DA9063_REG_ADC_RES_L, DA9063_BB_REG_SECOND_D),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_SEQ),
+ regmap_reg_range(DA9063_REG_EN_32K, DA9063_REG_EN_32K),
+ regmap_reg_range(DA9063_BB_REG_MON_REG_5, DA9063_BB_REG_MON_REG_6),
};
static const struct regmap_access_table da9063_bb_readable_table = {
@@ -208,6 +118,50 @@ static const struct regmap_access_table da9063_bb_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(da9063_bb_volatile_ranges),
};
+static const struct regmap_range da9063l_bb_readable_ranges[] = {
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_MON_A10_RES),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+ regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+ regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_19),
+ regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT),
+};
+
+static const struct regmap_range da9063l_bb_writeable_ranges[] = {
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON),
+ regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+ regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+ regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_BB_REG_MON_REG_4),
+ regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_19),
+};
+
+static const struct regmap_range da9063l_bb_volatile_ranges[] = {
+ regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D),
+ regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B),
+ regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F),
+ regmap_reg_range(DA9063_REG_BCORE2_CONT, DA9063_REG_LDO11_CONT),
+ regmap_reg_range(DA9063_REG_DVC_1, DA9063_REG_ADC_MAN),
+ regmap_reg_range(DA9063_REG_ADC_RES_L, DA9063_REG_MON_A10_RES),
+ regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_SEQ),
+ regmap_reg_range(DA9063_REG_EN_32K, DA9063_REG_EN_32K),
+ regmap_reg_range(DA9063_BB_REG_MON_REG_5, DA9063_BB_REG_MON_REG_6),
+};
+
+static const struct regmap_access_table da9063l_bb_readable_table = {
+ .yes_ranges = da9063l_bb_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063l_bb_readable_ranges),
+};
+
+static const struct regmap_access_table da9063l_bb_writeable_table = {
+ .yes_ranges = da9063l_bb_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063l_bb_writeable_ranges),
+};
+
+static const struct regmap_access_table da9063l_bb_volatile_table = {
+ .yes_ranges = da9063l_bb_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063l_bb_volatile_ranges),
+};
+
static const struct regmap_range_cfg da9063_range_cfg[] = {
{
.range_min = DA9063_REG_PAGE_CON,
@@ -232,11 +186,12 @@ static struct regmap_config da9063_regmap_config = {
static const struct of_device_id da9063_dt_ids[] = {
{ .compatible = "dlg,da9063", },
+ { .compatible = "dlg,da9063l", },
{ }
};
MODULE_DEVICE_TABLE(of, da9063_dt_ids);
static int da9063_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct da9063 *da9063;
int ret;
@@ -248,11 +203,16 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, da9063);
da9063->dev = &i2c->dev;
da9063->chip_irq = i2c->irq;
+ da9063->type = id->driver_data;
if (da9063->variant_code == PMIC_DA9063_AD) {
da9063_regmap_config.rd_table = &da9063_ad_readable_table;
da9063_regmap_config.wr_table = &da9063_ad_writeable_table;
da9063_regmap_config.volatile_table = &da9063_ad_volatile_table;
+ } else if (da9063->type == PMIC_TYPE_DA9063L) {
+ da9063_regmap_config.rd_table = &da9063l_bb_readable_table;
+ da9063_regmap_config.wr_table = &da9063l_bb_writeable_table;
+ da9063_regmap_config.volatile_table = &da9063l_bb_volatile_table;
} else {
da9063_regmap_config.rd_table = &da9063_bb_readable_table;
da9063_regmap_config.wr_table = &da9063_bb_writeable_table;
@@ -270,17 +230,9 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
return da9063_device_init(da9063, i2c->irq);
}
-static int da9063_i2c_remove(struct i2c_client *i2c)
-{
- struct da9063 *da9063 = i2c_get_clientdata(i2c);
-
- da9063_device_exit(da9063);
-
- return 0;
-}
-
static const struct i2c_device_id da9063_i2c_id[] = {
- {"da9063", PMIC_DA9063},
+ { "da9063", PMIC_TYPE_DA9063 },
+ { "da9063l", PMIC_TYPE_DA9063L },
{},
};
MODULE_DEVICE_TABLE(i2c, da9063_i2c_id);
@@ -291,7 +243,6 @@ static struct i2c_driver da9063_i2c_driver = {
.of_match_table = of_match_ptr(da9063_dt_ids),
},
.probe = da9063_i2c_probe,
- .remove = da9063_i2c_remove,
.id_table = da9063_i2c_id,
};
diff --git a/drivers/mfd/da9063-irq.c b/drivers/mfd/da9063-irq.c
index 207bbfe55449..ecc0c8ce6c58 100644
--- a/drivers/mfd/da9063-irq.c
+++ b/drivers/mfd/da9063-irq.c
@@ -28,132 +28,145 @@
static const struct regmap_irq da9063_irqs[] = {
/* DA9063 event A register */
- [DA9063_IRQ_ONKEY] = {
- .reg_offset = DA9063_REG_EVENT_A_OFFSET,
- .mask = DA9063_M_ONKEY,
- },
- [DA9063_IRQ_ALARM] = {
- .reg_offset = DA9063_REG_EVENT_A_OFFSET,
- .mask = DA9063_M_ALARM,
- },
- [DA9063_IRQ_TICK] = {
- .reg_offset = DA9063_REG_EVENT_A_OFFSET,
- .mask = DA9063_M_TICK,
- },
- [DA9063_IRQ_ADC_RDY] = {
- .reg_offset = DA9063_REG_EVENT_A_OFFSET,
- .mask = DA9063_M_ADC_RDY,
- },
- [DA9063_IRQ_SEQ_RDY] = {
- .reg_offset = DA9063_REG_EVENT_A_OFFSET,
- .mask = DA9063_M_SEQ_RDY,
- },
+ REGMAP_IRQ_REG(DA9063_IRQ_ONKEY,
+ DA9063_REG_EVENT_A_OFFSET, DA9063_M_ONKEY),
+ REGMAP_IRQ_REG(DA9063_IRQ_ALARM,
+ DA9063_REG_EVENT_A_OFFSET, DA9063_M_ALARM),
+ REGMAP_IRQ_REG(DA9063_IRQ_TICK,
+ DA9063_REG_EVENT_A_OFFSET, DA9063_M_TICK),
+ REGMAP_IRQ_REG(DA9063_IRQ_ADC_RDY,
+ DA9063_REG_EVENT_A_OFFSET, DA9063_M_ADC_RDY),
+ REGMAP_IRQ_REG(DA9063_IRQ_SEQ_RDY,
+ DA9063_REG_EVENT_A_OFFSET, DA9063_M_SEQ_RDY),
/* DA9063 event B register */
- [DA9063_IRQ_WAKE] = {
- .reg_offset = DA9063_REG_EVENT_B_OFFSET,
- .mask = DA9063_M_WAKE,
- },
- [DA9063_IRQ_TEMP] = {
- .reg_offset = DA9063_REG_EVENT_B_OFFSET,
- .mask = DA9063_M_TEMP,
- },
- [DA9063_IRQ_COMP_1V2] = {
- .reg_offset = DA9063_REG_EVENT_B_OFFSET,
- .mask = DA9063_M_COMP_1V2,
- },
- [DA9063_IRQ_LDO_LIM] = {
- .reg_offset = DA9063_REG_EVENT_B_OFFSET,
- .mask = DA9063_M_LDO_LIM,
- },
- [DA9063_IRQ_REG_UVOV] = {
- .reg_offset = DA9063_REG_EVENT_B_OFFSET,
- .mask = DA9063_M_UVOV,
- },
- [DA9063_IRQ_DVC_RDY] = {
- .reg_offset = DA9063_REG_EVENT_B_OFFSET,
- .mask = DA9063_M_DVC_RDY,
- },
- [DA9063_IRQ_VDD_MON] = {
- .reg_offset = DA9063_REG_EVENT_B_OFFSET,
- .mask = DA9063_M_VDD_MON,
- },
- [DA9063_IRQ_WARN] = {
- .reg_offset = DA9063_REG_EVENT_B_OFFSET,
- .mask = DA9063_M_VDD_WARN,
- },
+ REGMAP_IRQ_REG(DA9063_IRQ_WAKE,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_WAKE),
+ REGMAP_IRQ_REG(DA9063_IRQ_TEMP,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_TEMP),
+ REGMAP_IRQ_REG(DA9063_IRQ_COMP_1V2,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_COMP_1V2),
+ REGMAP_IRQ_REG(DA9063_IRQ_LDO_LIM,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_LDO_LIM),
+ REGMAP_IRQ_REG(DA9063_IRQ_REG_UVOV,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_UVOV),
+ REGMAP_IRQ_REG(DA9063_IRQ_DVC_RDY,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_DVC_RDY),
+ REGMAP_IRQ_REG(DA9063_IRQ_VDD_MON,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_VDD_MON),
+ REGMAP_IRQ_REG(DA9063_IRQ_WARN,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_VDD_WARN),
/* DA9063 event C register */
- [DA9063_IRQ_GPI0] = {
- .reg_offset = DA9063_REG_EVENT_C_OFFSET,
- .mask = DA9063_M_GPI0,
- },
- [DA9063_IRQ_GPI1] = {
- .reg_offset = DA9063_REG_EVENT_C_OFFSET,
- .mask = DA9063_M_GPI1,
- },
- [DA9063_IRQ_GPI2] = {
- .reg_offset = DA9063_REG_EVENT_C_OFFSET,
- .mask = DA9063_M_GPI2,
- },
- [DA9063_IRQ_GPI3] = {
- .reg_offset = DA9063_REG_EVENT_C_OFFSET,
- .mask = DA9063_M_GPI3,
- },
- [DA9063_IRQ_GPI4] = {
- .reg_offset = DA9063_REG_EVENT_C_OFFSET,
- .mask = DA9063_M_GPI4,
- },
- [DA9063_IRQ_GPI5] = {
- .reg_offset = DA9063_REG_EVENT_C_OFFSET,
- .mask = DA9063_M_GPI5,
- },
- [DA9063_IRQ_GPI6] = {
- .reg_offset = DA9063_REG_EVENT_C_OFFSET,
- .mask = DA9063_M_GPI6,
- },
- [DA9063_IRQ_GPI7] = {
- .reg_offset = DA9063_REG_EVENT_C_OFFSET,
- .mask = DA9063_M_GPI7,
- },
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI0,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI0),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI1,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI1),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI2,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI2),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI3,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI3),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI4,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI4),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI5,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI5),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI6,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI6),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI7,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI7),
/* DA9063 event D register */
- [DA9063_IRQ_GPI8] = {
- .reg_offset = DA9063_REG_EVENT_D_OFFSET,
- .mask = DA9063_M_GPI8,
- },
- [DA9063_IRQ_GPI9] = {
- .reg_offset = DA9063_REG_EVENT_D_OFFSET,
- .mask = DA9063_M_GPI9,
- },
- [DA9063_IRQ_GPI10] = {
- .reg_offset = DA9063_REG_EVENT_D_OFFSET,
- .mask = DA9063_M_GPI10,
- },
- [DA9063_IRQ_GPI11] = {
- .reg_offset = DA9063_REG_EVENT_D_OFFSET,
- .mask = DA9063_M_GPI11,
- },
- [DA9063_IRQ_GPI12] = {
- .reg_offset = DA9063_REG_EVENT_D_OFFSET,
- .mask = DA9063_M_GPI12,
- },
- [DA9063_IRQ_GPI13] = {
- .reg_offset = DA9063_REG_EVENT_D_OFFSET,
- .mask = DA9063_M_GPI13,
- },
- [DA9063_IRQ_GPI14] = {
- .reg_offset = DA9063_REG_EVENT_D_OFFSET,
- .mask = DA9063_M_GPI14,
- },
- [DA9063_IRQ_GPI15] = {
- .reg_offset = DA9063_REG_EVENT_D_OFFSET,
- .mask = DA9063_M_GPI15,
- },
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI8,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI8),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI9,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI9),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI10,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI10),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI11,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI11),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI12,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI12),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI13,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI13),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI14,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI14),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI15,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI15),
};
static const struct regmap_irq_chip da9063_irq_chip = {
.name = "da9063-irq",
.irqs = da9063_irqs,
- .num_irqs = DA9063_NUM_IRQ,
+ .num_irqs = ARRAY_SIZE(da9063_irqs),
+ .num_regs = 4,
+ .status_base = DA9063_REG_EVENT_A,
+ .mask_base = DA9063_REG_IRQ_MASK_A,
+ .ack_base = DA9063_REG_EVENT_A,
+ .init_ack_masked = true,
+};
+
+static const struct regmap_irq da9063l_irqs[] = {
+ /* DA9063 event A register */
+ REGMAP_IRQ_REG(DA9063_IRQ_ONKEY,
+ DA9063_REG_EVENT_A_OFFSET, DA9063_M_ONKEY),
+ REGMAP_IRQ_REG(DA9063_IRQ_ADC_RDY,
+ DA9063_REG_EVENT_A_OFFSET, DA9063_M_ADC_RDY),
+ REGMAP_IRQ_REG(DA9063_IRQ_SEQ_RDY,
+ DA9063_REG_EVENT_A_OFFSET, DA9063_M_SEQ_RDY),
+ /* DA9063 event B register */
+ REGMAP_IRQ_REG(DA9063_IRQ_WAKE,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_WAKE),
+ REGMAP_IRQ_REG(DA9063_IRQ_TEMP,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_TEMP),
+ REGMAP_IRQ_REG(DA9063_IRQ_COMP_1V2,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_COMP_1V2),
+ REGMAP_IRQ_REG(DA9063_IRQ_LDO_LIM,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_LDO_LIM),
+ REGMAP_IRQ_REG(DA9063_IRQ_REG_UVOV,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_UVOV),
+ REGMAP_IRQ_REG(DA9063_IRQ_DVC_RDY,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_DVC_RDY),
+ REGMAP_IRQ_REG(DA9063_IRQ_VDD_MON,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_VDD_MON),
+ REGMAP_IRQ_REG(DA9063_IRQ_WARN,
+ DA9063_REG_EVENT_B_OFFSET, DA9063_M_VDD_WARN),
+ /* DA9063 event C register */
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI0,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI0),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI1,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI1),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI2,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI2),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI3,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI3),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI4,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI4),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI5,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI5),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI6,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI6),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI7,
+ DA9063_REG_EVENT_C_OFFSET, DA9063_M_GPI7),
+ /* DA9063 event D register */
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI8,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI8),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI9,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI9),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI10,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI10),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI11,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI11),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI12,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI12),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI13,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI13),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI14,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI14),
+ REGMAP_IRQ_REG(DA9063_IRQ_GPI15,
+ DA9063_REG_EVENT_D_OFFSET, DA9063_M_GPI15),
+};
+static const struct regmap_irq_chip da9063l_irq_chip = {
+ .name = "da9063l-irq",
+ .irqs = da9063l_irqs,
+ .num_irqs = ARRAY_SIZE(da9063l_irqs),
.num_regs = 4,
.status_base = DA9063_REG_EVENT_A,
.mask_base = DA9063_REG_IRQ_MASK_A,
@@ -163,6 +176,7 @@ static const struct regmap_irq_chip da9063_irq_chip = {
int da9063_irq_init(struct da9063 *da9063)
{
+ const struct regmap_irq_chip *irq_chip;
int ret;
if (!da9063->chip_irq) {
@@ -170,10 +184,15 @@ int da9063_irq_init(struct da9063 *da9063)
return -EINVAL;
}
- ret = regmap_add_irq_chip(da9063->regmap, da9063->chip_irq,
+ if (da9063->type == PMIC_TYPE_DA9063)
+ irq_chip = &da9063_irq_chip;
+ else
+ irq_chip = &da9063l_irq_chip;
+
+ ret = devm_regmap_add_irq_chip(da9063->dev, da9063->regmap,
+ da9063->chip_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
- da9063->irq_base, &da9063_irq_chip,
- &da9063->regmap_irq);
+ da9063->irq_base, irq_chip, &da9063->regmap_irq);
if (ret) {
dev_err(da9063->dev, "Failed to reguest IRQ %d: %d\n",
da9063->chip_irq, ret);
@@ -182,8 +201,3 @@ int da9063_irq_init(struct da9063 *da9063)
return 0;
}
-
-void da9063_irq_exit(struct da9063 *da9063)
-{
- regmap_del_irq_chip(da9063->chip_irq, da9063->regmap_irq);
-}
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 704e189ca162..90e789943466 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -194,6 +194,7 @@ static bool dln2_transfer_complete(struct dln2_dev *dln2, struct urb *urb,
struct device *dev = &dln2->interface->dev;
struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle];
struct dln2_rx_context *rxc;
+ unsigned long flags;
bool valid_slot = false;
if (rx_slot >= DLN2_MAX_RX_SLOTS)
@@ -201,18 +202,13 @@ static bool dln2_transfer_complete(struct dln2_dev *dln2, struct urb *urb,
rxc = &rxs->slots[rx_slot];
- /*
- * No need to disable interrupts as this lock is not taken in interrupt
- * context elsewhere in this driver. This function (or its callers) are
- * also not exported to other modules.
- */
- spin_lock(&rxs->lock);
+ spin_lock_irqsave(&rxs->lock, flags);
if (rxc->in_use && !rxc->urb) {
rxc->urb = urb;
complete(&rxc->done);
valid_slot = true;
}
- spin_unlock(&rxs->lock);
+ spin_unlock_irqrestore(&rxs->lock, flags);
out:
if (!valid_slot)
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index c37ccbfd52f2..96c07fa1802a 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -49,7 +49,7 @@ static struct regmap_config hi655x_regmap_config = {
.reg_bits = 32,
.reg_stride = HI655X_STRIDE,
.val_bits = 8,
- .max_register = HI655X_BUS_ADDR(0xFFF),
+ .max_register = HI655X_BUS_ADDR(0x400) - HI655X_STRIDE,
};
static struct resource pwrkey_resources[] = {
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index d9ae983095c5..0e5282fc1467 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -178,6 +178,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x31c2), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x31c4), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x31c6), (kernel_ulong_t)&bxt_info },
+ /* ICL-LP */
+ { PCI_VDEVICE(INTEL, 0x34a8), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x34a9), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x34aa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x34ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x34c5), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x34c6), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x34c7), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x34e8), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x34e9), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x34ea), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&spt_info },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&apl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&apl_i2c_info },
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 390b27cb2c2e..fb5a10b8317d 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -143,7 +143,7 @@ static struct platform_device *kempld_pdev;
static int kempld_create_platform_device(const struct dmi_system_id *id)
{
- struct kempld_platform_data *pdata = id->driver_data;
+ const struct kempld_platform_data *pdata = id->driver_data;
int ret;
kempld_pdev = platform_device_alloc("kempld", -1);
@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(kempld_write32);
*/
void kempld_get_mutex(struct kempld_device_data *pld)
{
- struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
+ const struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
mutex_lock(&pld->lock);
pdata->get_hardware_mutex(pld);
@@ -272,7 +272,7 @@ EXPORT_SYMBOL_GPL(kempld_get_mutex);
*/
void kempld_release_mutex(struct kempld_device_data *pld)
{
- struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
+ const struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
pdata->release_hardware_mutex(pld);
mutex_unlock(&pld->lock);
@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(kempld_release_mutex);
static int kempld_get_info(struct kempld_device_data *pld)
{
int ret;
- struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
+ const struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
char major, minor;
ret = pdata->get_info(pld);
@@ -332,7 +332,7 @@ static int kempld_get_info(struct kempld_device_data *pld)
*/
static int kempld_register_cells(struct kempld_device_data *pld)
{
- struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
+ const struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
return pdata->register_cells(pld);
}
@@ -444,7 +444,8 @@ static int kempld_detect_device(struct kempld_device_data *pld)
static int kempld_probe(struct platform_device *pdev)
{
- struct kempld_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ const struct kempld_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct kempld_device_data *pld;
struct resource *ioport;
@@ -476,7 +477,7 @@ static int kempld_probe(struct platform_device *pdev)
static int kempld_remove(struct platform_device *pdev)
{
struct kempld_device_data *pld = platform_get_drvdata(pdev);
- struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
+ const struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
sysfs_remove_group(&pld->dev->kobj, &pld_attr_group);
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
new file mode 100644
index 000000000000..8cfea969b060
--- /dev/null
+++ b/drivers/mfd/madera-core.c
@@ -0,0 +1,609 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Core MFD support for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#include <linux/mfd/madera/core.h>
+#include <linux/mfd/madera/registers.h>
+
+#include "madera.h"
+
+#define CS47L35_SILICON_ID 0x6360
+#define CS47L85_SILICON_ID 0x6338
+#define CS47L90_SILICON_ID 0x6364
+
+#define MADERA_32KZ_MCLK2 1
+
+static const char * const madera_core_supplies[] = {
+ "AVDD",
+ "DBVDD1",
+};
+
+static const struct mfd_cell madera_ldo1_devs[] = {
+ { .name = "madera-ldo1" },
+};
+
+static const char * const cs47l35_supplies[] = {
+ "MICVDD",
+ "DBVDD2",
+ "CPVDD1",
+ "CPVDD2",
+ "SPKVDD",
+};
+
+static const struct mfd_cell cs47l35_devs[] = {
+ { .name = "madera-pinctrl", },
+ { .name = "madera-irq", },
+ { .name = "madera-micsupp", },
+ { .name = "madera-gpio", },
+ { .name = "madera-extcon", },
+ {
+ .name = "cs47l35-codec",
+ .parent_supplies = cs47l35_supplies,
+ .num_parent_supplies = ARRAY_SIZE(cs47l35_supplies),
+ },
+};
+
+static const char * const cs47l85_supplies[] = {
+ "MICVDD",
+ "DBVDD2",
+ "DBVDD3",
+ "DBVDD4",
+ "CPVDD1",
+ "CPVDD2",
+ "SPKVDDL",
+ "SPKVDDR",
+};
+
+static const struct mfd_cell cs47l85_devs[] = {
+ { .name = "madera-pinctrl", },
+ { .name = "madera-irq", },
+ { .name = "madera-micsupp" },
+ { .name = "madera-gpio", },
+ { .name = "madera-extcon", },
+ {
+ .name = "cs47l85-codec",
+ .parent_supplies = cs47l85_supplies,
+ .num_parent_supplies = ARRAY_SIZE(cs47l85_supplies),
+ },
+};
+
+static const char * const cs47l90_supplies[] = {
+ "MICVDD",
+ "DBVDD2",
+ "DBVDD3",
+ "DBVDD4",
+ "CPVDD1",
+ "CPVDD2",
+};
+
+static const struct mfd_cell cs47l90_devs[] = {
+ { .name = "madera-pinctrl", },
+ { .name = "madera-irq", },
+ { .name = "madera-micsupp", },
+ { .name = "madera-gpio", },
+ { .name = "madera-extcon", },
+ {
+ .name = "cs47l90-codec",
+ .parent_supplies = cs47l90_supplies,
+ .num_parent_supplies = ARRAY_SIZE(cs47l90_supplies),
+ },
+};
+
+/* Used by madera-i2c and madera-spi drivers */
+const char *madera_name_from_type(enum madera_type type)
+{
+ switch (type) {
+ case CS47L35:
+ return "CS47L35";
+ case CS47L85:
+ return "CS47L85";
+ case CS47L90:
+ return "CS47L90";
+ case CS47L91:
+ return "CS47L91";
+ case WM1840:
+ return "WM1840";
+ default:
+ return "Unknown";
+ }
+}
+EXPORT_SYMBOL_GPL(madera_name_from_type);
+
+#define MADERA_BOOT_POLL_MAX_INTERVAL_US 5000
+#define MADERA_BOOT_POLL_TIMEOUT_US 25000
+
+static int madera_wait_for_boot(struct madera *madera)
+{
+ unsigned int val;
+ int ret;
+
+ /*
+ * We can't use an interrupt as we need to runtime resume to do so,
+ * so we poll the status bit. This won't race with the interrupt
+ * handler because it will be blocked on runtime resume.
+ */
+ ret = regmap_read_poll_timeout(madera->regmap,
+ MADERA_IRQ1_RAW_STATUS_1,
+ val,
+ (val & MADERA_BOOT_DONE_STS1),
+ MADERA_BOOT_POLL_MAX_INTERVAL_US,
+ MADERA_BOOT_POLL_TIMEOUT_US);
+
+ if (ret)
+ dev_err(madera->dev, "Polling BOOT_DONE_STS failed: %d\n", ret);
+
+ /*
+ * BOOT_DONE defaults to unmasked on boot so we must ack it.
+ * Do this unconditionally to avoid interrupt storms.
+ */
+ regmap_write(madera->regmap, MADERA_IRQ1_STATUS_1,
+ MADERA_BOOT_DONE_EINT1);
+
+ pm_runtime_mark_last_busy(madera->dev);
+
+ return ret;
+}
+
+static int madera_soft_reset(struct madera *madera)
+{
+ int ret;
+
+ ret = regmap_write(madera->regmap, MADERA_SOFTWARE_RESET, 0);
+ if (ret != 0) {
+ dev_err(madera->dev, "Failed to soft reset device: %d\n", ret);
+ return ret;
+ }
+
+ /* Allow time for internal clocks to startup after reset */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static void madera_enable_hard_reset(struct madera *madera)
+{
+ if (!madera->pdata.reset)
+ return;
+
+ /*
+ * There are many existing out-of-tree users of these codecs that we
+ * can't break so preserve the expected behaviour of setting the line
+ * low to assert reset.
+ */
+ gpiod_set_raw_value_cansleep(madera->pdata.reset, 0);
+}
+
+static void madera_disable_hard_reset(struct madera *madera)
+{
+ if (!madera->pdata.reset)
+ return;
+
+ gpiod_set_raw_value_cansleep(madera->pdata.reset, 1);
+ usleep_range(1000, 2000);
+}
+
+static int __maybe_unused madera_runtime_resume(struct device *dev)
+{
+ struct madera *madera = dev_get_drvdata(dev);
+ int ret;
+
+ dev_dbg(dev, "Leaving sleep mode\n");
+
+ ret = regulator_enable(madera->dcvdd);
+ if (ret) {
+ dev_err(dev, "Failed to enable DCVDD: %d\n", ret);
+ return ret;
+ }
+
+ regcache_cache_only(madera->regmap, false);
+ regcache_cache_only(madera->regmap_32bit, false);
+
+ ret = madera_wait_for_boot(madera);
+ if (ret)
+ goto err;
+
+ ret = regcache_sync(madera->regmap);
+ if (ret) {
+ dev_err(dev, "Failed to restore 16-bit register cache\n");
+ goto err;
+ }
+
+ ret = regcache_sync(madera->regmap_32bit);
+ if (ret) {
+ dev_err(dev, "Failed to restore 32-bit register cache\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ regcache_cache_only(madera->regmap_32bit, true);
+ regcache_cache_only(madera->regmap, true);
+ regulator_disable(madera->dcvdd);
+
+ return ret;
+}
+
+static int __maybe_unused madera_runtime_suspend(struct device *dev)
+{
+ struct madera *madera = dev_get_drvdata(dev);
+
+ dev_dbg(madera->dev, "Entering sleep mode\n");
+
+ regcache_cache_only(madera->regmap, true);
+ regcache_mark_dirty(madera->regmap);
+ regcache_cache_only(madera->regmap_32bit, true);
+ regcache_mark_dirty(madera->regmap_32bit);
+
+ regulator_disable(madera->dcvdd);
+
+ return 0;
+}
+
+const struct dev_pm_ops madera_pm_ops = {
+ SET_RUNTIME_PM_OPS(madera_runtime_suspend,
+ madera_runtime_resume,
+ NULL)
+};
+EXPORT_SYMBOL_GPL(madera_pm_ops);
+
+const struct of_device_id madera_of_match[] = {
+ { .compatible = "cirrus,cs47l35", .data = (void *)CS47L35 },
+ { .compatible = "cirrus,cs47l85", .data = (void *)CS47L85 },
+ { .compatible = "cirrus,cs47l90", .data = (void *)CS47L90 },
+ { .compatible = "cirrus,cs47l91", .data = (void *)CS47L91 },
+ { .compatible = "cirrus,wm1840", .data = (void *)WM1840 },
+ {}
+};
+EXPORT_SYMBOL_GPL(madera_of_match);
+
+static int madera_get_reset_gpio(struct madera *madera)
+{
+ struct gpio_desc *reset;
+ int ret;
+
+ if (madera->pdata.reset)
+ return 0;
+
+ reset = devm_gpiod_get_optional(madera->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset)) {
+ ret = PTR_ERR(reset);
+ if (ret != -EPROBE_DEFER)
+ dev_err(madera->dev, "Failed to request /RESET: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * A hard reset is needed for full reset of the chip. We allow running
+ * without hard reset only because it can be useful for early
+ * prototyping and some debugging, but we need to warn it's not ideal.
+ */
+ if (!reset)
+ dev_warn(madera->dev,
+ "Running without reset GPIO is not recommended\n");
+
+ madera->pdata.reset = reset;
+
+ return 0;
+}
+
+static void madera_set_micbias_info(struct madera *madera)
+{
+ /*
+ * num_childbias is an array because future codecs can have different
+ * childbiases for each micbias. Unspecified values default to 0.
+ */
+ switch (madera->type) {
+ case CS47L35:
+ madera->num_micbias = 2;
+ madera->num_childbias[0] = 2;
+ madera->num_childbias[1] = 2;
+ return;
+ case CS47L85:
+ case WM1840:
+ madera->num_micbias = 4;
+ /* no child biases */
+ return;
+ case CS47L90:
+ case CS47L91:
+ madera->num_micbias = 2;
+ madera->num_childbias[0] = 4;
+ madera->num_childbias[1] = 4;
+ return;
+ default:
+ return;
+ }
+}
+
+int madera_dev_init(struct madera *madera)
+{
+ struct device *dev = madera->dev;
+ unsigned int hwid;
+ int (*patch_fn)(struct madera *) = NULL;
+ const struct mfd_cell *mfd_devs;
+ int n_devs = 0;
+ int i, ret;
+
+ dev_set_drvdata(madera->dev, madera);
+ BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier);
+ madera_set_micbias_info(madera);
+
+ /*
+ * We need writable hw config info that all children can share.
+ * Simplest to take one shared copy of pdata struct.
+ */
+ if (dev_get_platdata(madera->dev)) {
+ memcpy(&madera->pdata, dev_get_platdata(madera->dev),
+ sizeof(madera->pdata));
+ }
+
+ ret = madera_get_reset_gpio(madera);
+ if (ret)
+ return ret;
+
+ regcache_cache_only(madera->regmap, true);
+ regcache_cache_only(madera->regmap_32bit, true);
+
+ for (i = 0; i < ARRAY_SIZE(madera_core_supplies); i++)
+ madera->core_supplies[i].supply = madera_core_supplies[i];
+
+ madera->num_core_supplies = ARRAY_SIZE(madera_core_supplies);
+
+ /*
+ * On some codecs DCVDD could be supplied by the internal LDO1.
+ * For those we must add the LDO1 driver before requesting DCVDD
+ * No devm_ because we need to control shutdown order of children.
+ */
+ switch (madera->type) {
+ case CS47L35:
+ case CS47L90:
+ case CS47L91:
+ break;
+ case CS47L85:
+ case WM1840:
+ ret = mfd_add_devices(madera->dev, PLATFORM_DEVID_NONE,
+ madera_ldo1_devs,
+ ARRAY_SIZE(madera_ldo1_devs),
+ NULL, 0, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to add LDO1 child: %d\n", ret);
+ return ret;
+ }
+ break;
+ default:
+ /* No point continuing if the type is unknown */
+ dev_err(madera->dev, "Unknown device type %d\n", madera->type);
+ return -ENODEV;
+ }
+
+ ret = devm_regulator_bulk_get(dev, madera->num_core_supplies,
+ madera->core_supplies);
+ if (ret) {
+ dev_err(dev, "Failed to request core supplies: %d\n", ret);
+ goto err_devs;
+ }
+
+ /*
+ * Don't use devres here. If the regulator is one of our children it
+ * will already have been removed before devres cleanup on this mfd
+ * driver tries to call put() on it. We need control of shutdown order.
+ */
+ madera->dcvdd = regulator_get(madera->dev, "DCVDD");
+ if (IS_ERR(madera->dcvdd)) {
+ ret = PTR_ERR(madera->dcvdd);
+ dev_err(dev, "Failed to request DCVDD: %d\n", ret);
+ goto err_devs;
+ }
+
+ ret = regulator_bulk_enable(madera->num_core_supplies,
+ madera->core_supplies);
+ if (ret) {
+ dev_err(dev, "Failed to enable core supplies: %d\n", ret);
+ goto err_dcvdd;
+ }
+
+ ret = regulator_enable(madera->dcvdd);
+ if (ret) {
+ dev_err(dev, "Failed to enable DCVDD: %d\n", ret);
+ goto err_enable;
+ }
+
+ madera_disable_hard_reset(madera);
+
+ regcache_cache_only(madera->regmap, false);
+ regcache_cache_only(madera->regmap_32bit, false);
+
+ /*
+ * Now we can power up and verify that this is a chip we know about
+ * before we start doing any writes to its registers.
+ */
+ ret = regmap_read(madera->regmap, MADERA_SOFTWARE_RESET, &hwid);
+ if (ret) {
+ dev_err(dev, "Failed to read ID register: %d\n", ret);
+ goto err_reset;
+ }
+
+ switch (hwid) {
+ case CS47L35_SILICON_ID:
+ if (IS_ENABLED(CONFIG_MFD_CS47L35)) {
+ switch (madera->type) {
+ case CS47L35:
+ patch_fn = cs47l35_patch;
+ mfd_devs = cs47l35_devs;
+ n_devs = ARRAY_SIZE(cs47l35_devs);
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case CS47L85_SILICON_ID:
+ if (IS_ENABLED(CONFIG_MFD_CS47L85)) {
+ switch (madera->type) {
+ case CS47L85:
+ case WM1840:
+ patch_fn = cs47l85_patch;
+ mfd_devs = cs47l85_devs;
+ n_devs = ARRAY_SIZE(cs47l85_devs);
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case CS47L90_SILICON_ID:
+ if (IS_ENABLED(CONFIG_MFD_CS47L90)) {
+ switch (madera->type) {
+ case CS47L90:
+ case CS47L91:
+ patch_fn = cs47l90_patch;
+ mfd_devs = cs47l90_devs;
+ n_devs = ARRAY_SIZE(cs47l90_devs);
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ dev_err(madera->dev, "Unknown device ID: %x\n", hwid);
+ ret = -EINVAL;
+ goto err_reset;
+ }
+
+ if (!n_devs) {
+ dev_err(madera->dev, "Device ID 0x%x not a %s\n", hwid,
+ madera->type_name);
+ ret = -ENODEV;
+ goto err_reset;
+ }
+
+ /*
+ * It looks like a device we support. If we don't have a hard reset
+ * we can now attempt a soft reset.
+ */
+ if (!madera->pdata.reset) {
+ ret = madera_soft_reset(madera);
+ if (ret)
+ goto err_reset;
+ }
+
+ ret = madera_wait_for_boot(madera);
+ if (ret) {
+ dev_err(madera->dev, "Device failed initial boot: %d\n", ret);
+ goto err_reset;
+ }
+
+ ret = regmap_read(madera->regmap, MADERA_HARDWARE_REVISION,
+ &madera->rev);
+ if (ret) {
+ dev_err(dev, "Failed to read revision register: %d\n", ret);
+ goto err_reset;
+ }
+ madera->rev &= MADERA_HW_REVISION_MASK;
+
+ dev_info(dev, "%s silicon revision %d\n", madera->type_name,
+ madera->rev);
+
+ /* Apply hardware patch */
+ if (patch_fn) {
+ ret = patch_fn(madera);
+ if (ret) {
+ dev_err(madera->dev, "Failed to apply patch %d\n", ret);
+ goto err_reset;
+ }
+ }
+
+ /* Init 32k clock sourced from MCLK2 */
+ ret = regmap_update_bits(madera->regmap,
+ MADERA_CLOCK_32K_1,
+ MADERA_CLK_32K_ENA_MASK | MADERA_CLK_32K_SRC_MASK,
+ MADERA_CLK_32K_ENA | MADERA_32KZ_MCLK2);
+ if (ret) {
+ dev_err(madera->dev, "Failed to init 32k clock: %d\n", ret);
+ goto err_reset;
+ }
+
+ pm_runtime_set_active(madera->dev);
+ pm_runtime_enable(madera->dev);
+ pm_runtime_set_autosuspend_delay(madera->dev, 100);
+ pm_runtime_use_autosuspend(madera->dev);
+
+ /* No devm_ because we need to control shutdown order of children */
+ ret = mfd_add_devices(madera->dev, PLATFORM_DEVID_NONE,
+ mfd_devs, n_devs,
+ NULL, 0, NULL);
+ if (ret) {
+ dev_err(madera->dev, "Failed to add subdevices: %d\n", ret);
+ goto err_pm_runtime;
+ }
+
+ return 0;
+
+err_pm_runtime:
+ pm_runtime_disable(madera->dev);
+err_reset:
+ madera_enable_hard_reset(madera);
+ regulator_disable(madera->dcvdd);
+err_enable:
+ regulator_bulk_disable(madera->num_core_supplies,
+ madera->core_supplies);
+err_dcvdd:
+ regulator_put(madera->dcvdd);
+err_devs:
+ mfd_remove_devices(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(madera_dev_init);
+
+int madera_dev_exit(struct madera *madera)
+{
+ /* Prevent any IRQs being serviced while we clean up */
+ disable_irq(madera->irq);
+
+ /*
+ * DCVDD could be supplied by a child node, we must disable it before
+ * removing the children, and prevent PM runtime from turning it back on
+ */
+ pm_runtime_disable(madera->dev);
+
+ regulator_disable(madera->dcvdd);
+ regulator_put(madera->dcvdd);
+
+ mfd_remove_devices(madera->dev);
+ madera_enable_hard_reset(madera);
+
+ regulator_bulk_disable(madera->num_core_supplies,
+ madera->core_supplies);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(madera_dev_exit);
+
+MODULE_DESCRIPTION("Madera core MFD driver");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/madera-i2c.c b/drivers/mfd/madera-i2c.c
new file mode 100644
index 000000000000..05ae94be01d8
--- /dev/null
+++ b/drivers/mfd/madera-i2c.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I2C bus interface to Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/madera/core.h>
+
+#include "madera.h"
+
+static int madera_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct madera *madera;
+ const struct regmap_config *regmap_16bit_config = NULL;
+ const struct regmap_config *regmap_32bit_config = NULL;
+ const void *of_data;
+ unsigned long type;
+ const char *name;
+ int ret;
+
+ of_data = of_device_get_match_data(&i2c->dev);
+ if (of_data)
+ type = (unsigned long)of_data;
+ else
+ type = id->driver_data;
+
+ switch (type) {
+ case CS47L35:
+ if (IS_ENABLED(CONFIG_MFD_CS47L35)) {
+ regmap_16bit_config = &cs47l35_16bit_i2c_regmap;
+ regmap_32bit_config = &cs47l35_32bit_i2c_regmap;
+ }
+ break;
+ case CS47L85:
+ case WM1840:
+ if (IS_ENABLED(CONFIG_MFD_CS47L85)) {
+ regmap_16bit_config = &cs47l85_16bit_i2c_regmap;
+ regmap_32bit_config = &cs47l85_32bit_i2c_regmap;
+ }
+ break;
+ case CS47L90:
+ case CS47L91:
+ if (IS_ENABLED(CONFIG_MFD_CS47L90)) {
+ regmap_16bit_config = &cs47l90_16bit_i2c_regmap;
+ regmap_32bit_config = &cs47l90_32bit_i2c_regmap;
+ }
+ break;
+ default:
+ dev_err(&i2c->dev,
+ "Unknown Madera I2C device type %ld\n", type);
+ return -EINVAL;
+ }
+
+ name = madera_name_from_type(type);
+
+ if (!regmap_16bit_config) {
+ /* it's polite to say which codec isn't built into the kernel */
+ dev_err(&i2c->dev,
+ "Kernel does not include support for %s\n", name);
+ return -EINVAL;
+ }
+
+ madera = devm_kzalloc(&i2c->dev, sizeof(*madera), GFP_KERNEL);
+ if (!madera)
+ return -ENOMEM;
+
+
+ madera->regmap = devm_regmap_init_i2c(i2c, regmap_16bit_config);
+ if (IS_ERR(madera->regmap)) {
+ ret = PTR_ERR(madera->regmap);
+ dev_err(&i2c->dev,
+ "Failed to allocate 16-bit register map: %d\n", ret);
+ return ret;
+ }
+
+ madera->regmap_32bit = devm_regmap_init_i2c(i2c, regmap_32bit_config);
+ if (IS_ERR(madera->regmap_32bit)) {
+ ret = PTR_ERR(madera->regmap_32bit);
+ dev_err(&i2c->dev,
+ "Failed to allocate 32-bit register map: %d\n", ret);
+ return ret;
+ }
+
+ madera->type = type;
+ madera->type_name = name;
+ madera->dev = &i2c->dev;
+ madera->irq = i2c->irq;
+
+ return madera_dev_init(madera);
+}
+
+static int madera_i2c_remove(struct i2c_client *i2c)
+{
+ struct madera *madera = dev_get_drvdata(&i2c->dev);
+
+ madera_dev_exit(madera);
+
+ return 0;
+}
+
+static const struct i2c_device_id madera_i2c_id[] = {
+ { "cs47l35", CS47L35 },
+ { "cs47l85", CS47L85 },
+ { "cs47l90", CS47L90 },
+ { "cs47l91", CS47L91 },
+ { "wm1840", WM1840 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, madera_i2c_id);
+
+static struct i2c_driver madera_i2c_driver = {
+ .driver = {
+ .name = "madera",
+ .pm = &madera_pm_ops,
+ .of_match_table = of_match_ptr(madera_of_match),
+ },
+ .probe = madera_i2c_probe,
+ .remove = madera_i2c_remove,
+ .id_table = madera_i2c_id,
+};
+
+module_i2c_driver(madera_i2c_driver);
+
+MODULE_DESCRIPTION("Madera I2C bus interface");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/madera-spi.c b/drivers/mfd/madera-spi.c
new file mode 100644
index 000000000000..4c398b278bba
--- /dev/null
+++ b/drivers/mfd/madera-spi.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SPI bus interface to Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include <linux/mfd/madera/core.h>
+
+#include "madera.h"
+
+static int madera_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct madera *madera;
+ const struct regmap_config *regmap_16bit_config = NULL;
+ const struct regmap_config *regmap_32bit_config = NULL;
+ const void *of_data;
+ unsigned long type;
+ const char *name;
+ int ret;
+
+ of_data = of_device_get_match_data(&spi->dev);
+ if (of_data)
+ type = (unsigned long)of_data;
+ else
+ type = id->driver_data;
+
+ switch (type) {
+ case CS47L35:
+ if (IS_ENABLED(CONFIG_MFD_CS47L35)) {
+ regmap_16bit_config = &cs47l35_16bit_spi_regmap;
+ regmap_32bit_config = &cs47l35_32bit_spi_regmap;
+ }
+ break;
+ case CS47L85:
+ case WM1840:
+ if (IS_ENABLED(CONFIG_MFD_CS47L85)) {
+ regmap_16bit_config = &cs47l85_16bit_spi_regmap;
+ regmap_32bit_config = &cs47l85_32bit_spi_regmap;
+ }
+ break;
+ case CS47L90:
+ case CS47L91:
+ if (IS_ENABLED(CONFIG_MFD_CS47L90)) {
+ regmap_16bit_config = &cs47l90_16bit_spi_regmap;
+ regmap_32bit_config = &cs47l90_32bit_spi_regmap;
+ }
+ break;
+ default:
+ dev_err(&spi->dev,
+ "Unknown Madera SPI device type %ld\n", type);
+ return -EINVAL;
+ }
+
+ name = madera_name_from_type(type);
+
+ if (!regmap_16bit_config) {
+ /* it's polite to say which codec isn't built into the kernel */
+ dev_err(&spi->dev,
+ "Kernel does not include support for %s\n", name);
+ return -EINVAL;
+ }
+
+ madera = devm_kzalloc(&spi->dev, sizeof(*madera), GFP_KERNEL);
+ if (!madera)
+ return -ENOMEM;
+
+ madera->regmap = devm_regmap_init_spi(spi, regmap_16bit_config);
+ if (IS_ERR(madera->regmap)) {
+ ret = PTR_ERR(madera->regmap);
+ dev_err(&spi->dev,
+ "Failed to allocate 16-bit register map: %d\n", ret);
+ return ret;
+ }
+
+ madera->regmap_32bit = devm_regmap_init_spi(spi, regmap_32bit_config);
+ if (IS_ERR(madera->regmap_32bit)) {
+ ret = PTR_ERR(madera->regmap_32bit);
+ dev_err(&spi->dev,
+ "Failed to allocate 32-bit register map: %d\n", ret);
+ return ret;
+ }
+
+ madera->type = type;
+ madera->type_name = name;
+ madera->dev = &spi->dev;
+ madera->irq = spi->irq;
+
+ return madera_dev_init(madera);
+}
+
+static int madera_spi_remove(struct spi_device *spi)
+{
+ struct madera *madera = spi_get_drvdata(spi);
+
+ madera_dev_exit(madera);
+
+ return 0;
+}
+
+static const struct spi_device_id madera_spi_ids[] = {
+ { "cs47l35", CS47L35 },
+ { "cs47l85", CS47L85 },
+ { "cs47l90", CS47L90 },
+ { "cs47l91", CS47L91 },
+ { "wm1840", WM1840 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, madera_spi_ids);
+
+static struct spi_driver madera_spi_driver = {
+ .driver = {
+ .name = "madera",
+ .pm = &madera_pm_ops,
+ .of_match_table = of_match_ptr(madera_of_match),
+ },
+ .probe = madera_spi_probe,
+ .remove = madera_spi_remove,
+ .id_table = madera_spi_ids,
+};
+
+module_spi_driver(madera_spi_driver);
+
+MODULE_DESCRIPTION("Madera SPI bus interface");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/madera.h b/drivers/mfd/madera.h
new file mode 100644
index 000000000000..891b84efb9a7
--- /dev/null
+++ b/drivers/mfd/madera.h
@@ -0,0 +1,44 @@
+/*
+ * MFD internals for Cirrus Logic Madera codecs
+ *
+ * Copyright 2015-2018 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MADERA_MFD_H
+#define MADERA_MFD_H
+
+#include <linux/of.h>
+#include <linux/pm.h>
+
+struct madera;
+
+extern const struct dev_pm_ops madera_pm_ops;
+extern const struct of_device_id madera_of_match[];
+
+int madera_dev_init(struct madera *madera);
+int madera_dev_exit(struct madera *madera);
+
+const char *madera_name_from_type(enum madera_type type);
+
+extern const struct regmap_config cs47l35_16bit_spi_regmap;
+extern const struct regmap_config cs47l35_32bit_spi_regmap;
+extern const struct regmap_config cs47l35_16bit_i2c_regmap;
+extern const struct regmap_config cs47l35_32bit_i2c_regmap;
+int cs47l35_patch(struct madera *madera);
+
+extern const struct regmap_config cs47l85_16bit_spi_regmap;
+extern const struct regmap_config cs47l85_32bit_spi_regmap;
+extern const struct regmap_config cs47l85_16bit_i2c_regmap;
+extern const struct regmap_config cs47l85_32bit_i2c_regmap;
+int cs47l85_patch(struct madera *madera);
+
+extern const struct regmap_config cs47l90_16bit_spi_regmap;
+extern const struct regmap_config cs47l90_32bit_spi_regmap;
+extern const struct regmap_config cs47l90_16bit_i2c_regmap;
+extern const struct regmap_config cs47l90_32bit_i2c_regmap;
+int cs47l90_patch(struct madera *madera);
+#endif
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 36dcd98977d6..2a8369657e38 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -63,16 +63,6 @@
#define RAVE_SP_TX_BUFFER_SIZE \
(RAVE_SP_STX_ETX_SIZE + 2 * RAVE_SP_RX_BUFFER_SIZE)
-#define RAVE_SP_BOOT_SOURCE_GET 0
-#define RAVE_SP_BOOT_SOURCE_SET 1
-
-#define RAVE_SP_RDU2_BOARD_TYPE_RMB 0
-#define RAVE_SP_RDU2_BOARD_TYPE_DEB 1
-
-#define RAVE_SP_BOOT_SOURCE_SD 0
-#define RAVE_SP_BOOT_SOURCE_EMMC 1
-#define RAVE_SP_BOOT_SOURCE_NOR 2
-
/**
* enum rave_sp_deframer_state - Possible state for de-framer
*
@@ -127,14 +117,44 @@ struct rave_sp_checksum {
void (*subroutine)(const u8 *, size_t, u8 *);
};
+struct rave_sp_version {
+ u8 hardware;
+ __le16 major;
+ u8 minor;
+ u8 letter[2];
+} __packed;
+
+struct rave_sp_status {
+ struct rave_sp_version bootloader_version;
+ struct rave_sp_version firmware_version;
+ u16 rdu_eeprom_flag;
+ u16 dds_eeprom_flag;
+ u8 pic_flag;
+ u8 orientation;
+ u32 etc;
+ s16 temp[2];
+ u8 backlight_current[3];
+ u8 dip_switch;
+ u8 host_interrupt;
+ u16 voltage_28;
+ u8 i2c_device_status;
+ u8 power_status;
+ u8 general_status;
+ u8 deprecated1;
+ u8 power_led_status;
+ u8 deprecated2;
+ u8 periph_power_shutoff;
+} __packed;
+
/**
* struct rave_sp_variant_cmds - Variant specific command routines
*
* @translate: Generic to variant specific command mapping routine
- *
+ * @get_status: Variant specific implementation of CMD_GET_STATUS
*/
struct rave_sp_variant_cmds {
int (*translate)(enum rave_sp_command);
+ int (*get_status)(struct rave_sp *sp, struct rave_sp_status *);
};
/**
@@ -180,35 +200,6 @@ struct rave_sp {
const char *part_number_bootloader;
};
-struct rave_sp_version {
- u8 hardware;
- __le16 major;
- u8 minor;
- u8 letter[2];
-} __packed;
-
-struct rave_sp_status {
- struct rave_sp_version bootloader_version;
- struct rave_sp_version firmware_version;
- u16 rdu_eeprom_flag;
- u16 dds_eeprom_flag;
- u8 pic_flag;
- u8 orientation;
- u32 etc;
- s16 temp[2];
- u8 backlight_current[3];
- u8 dip_switch;
- u8 host_interrupt;
- u16 voltage_28;
- u8 i2c_device_status;
- u8 power_status;
- u8 general_status;
- u8 deprecated1;
- u8 power_led_status;
- u8 deprecated2;
- u8 periph_power_shutoff;
-} __packed;
-
static bool rave_sp_id_is_event(u8 code)
{
return (code & 0xF0) == RAVE_SP_EVNT_BASE;
@@ -641,10 +632,14 @@ static int rave_sp_default_cmd_translate(enum rave_sp_command command)
return 0x14;
case RAVE_SP_CMD_SW_WDT:
return 0x1C;
+ case RAVE_SP_CMD_PET_WDT:
+ return 0x1D;
case RAVE_SP_CMD_RESET:
return 0x1E;
case RAVE_SP_CMD_RESET_REASON:
return 0x1F;
+ case RAVE_SP_CMD_RMB_EEPROM:
+ return 0x20;
default:
return -EINVAL;
}
@@ -666,18 +661,44 @@ static const char *devm_rave_sp_version(struct device *dev,
version->letter[1]);
}
-static int rave_sp_get_status(struct rave_sp *sp)
+static int rave_sp_rdu1_get_status(struct rave_sp *sp,
+ struct rave_sp_status *status)
{
- struct device *dev = &sp->serdev->dev;
u8 cmd[] = {
[0] = RAVE_SP_CMD_STATUS,
[1] = 0
};
+
+ return rave_sp_exec(sp, cmd, sizeof(cmd), status, sizeof(*status));
+}
+
+static int rave_sp_emulated_get_status(struct rave_sp *sp,
+ struct rave_sp_status *status)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_GET_FIRMWARE_VERSION,
+ [1] = 0,
+ };
+ int ret;
+
+ ret = rave_sp_exec(sp, cmd, sizeof(cmd), &status->firmware_version,
+ sizeof(status->firmware_version));
+ if (ret)
+ return ret;
+
+ cmd[0] = RAVE_SP_CMD_GET_BOOTLOADER_VERSION;
+ return rave_sp_exec(sp, cmd, sizeof(cmd), &status->bootloader_version,
+ sizeof(status->bootloader_version));
+}
+
+static int rave_sp_get_status(struct rave_sp *sp)
+{
+ struct device *dev = &sp->serdev->dev;
struct rave_sp_status status;
const char *version;
int ret;
- ret = rave_sp_exec(sp, cmd, sizeof(cmd), &status, sizeof(status));
+ ret = sp->variant->cmd.get_status(sp, &status);
if (ret)
return ret;
@@ -707,9 +728,10 @@ static const struct rave_sp_checksum rave_sp_checksum_ccitt = {
};
static const struct rave_sp_variant rave_sp_legacy = {
- .checksum = &rave_sp_checksum_8b2c,
+ .checksum = &rave_sp_checksum_ccitt,
.cmd = {
.translate = rave_sp_default_cmd_translate,
+ .get_status = rave_sp_emulated_get_status,
},
};
@@ -717,6 +739,7 @@ static const struct rave_sp_variant rave_sp_rdu1 = {
.checksum = &rave_sp_checksum_8b2c,
.cmd = {
.translate = rave_sp_rdu1_cmd_translate,
+ .get_status = rave_sp_rdu1_get_status,
},
};
@@ -724,6 +747,7 @@ static const struct rave_sp_variant rave_sp_rdu2 = {
.checksum = &rave_sp_checksum_ccitt,
.cmd = {
.translate = rave_sp_rdu2_cmd_translate,
+ .get_status = rave_sp_emulated_get_status,
},
};
@@ -776,6 +800,13 @@ static int rave_sp_probe(struct serdev_device *serdev)
return ret;
serdev_device_set_baudrate(serdev, baud);
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ if (ret) {
+ dev_err(dev, "Failed to set parity\n");
+ return ret;
+ }
ret = rave_sp_get_status(sp);
if (ret) {
diff --git a/drivers/mfd/rohm-bd718x7.c b/drivers/mfd/rohm-bd718x7.c
new file mode 100644
index 000000000000..75c8ec659547
--- /dev/null
+++ b/drivers/mfd/rohm-bd718x7.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// Copyright (C) 2018 ROHM Semiconductors
+//
+// ROHM BD71837MWV PMIC driver
+//
+// Datasheet available from
+// https://www.rohm.com/datasheet/BD71837MWV/bd71837mwv-e
+
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/rohm-bd718x7.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+/*
+ * gpio_keys.h requires definiton of bool. It is brought in
+ * by above includes. Keep this as last until gpio_keys.h gets fixed.
+ */
+#include <linux/gpio_keys.h>
+
+static const u8 supported_revisions[] = { 0xA2 /* BD71837 */ };
+
+static struct gpio_keys_button button = {
+ .code = KEY_POWER,
+ .gpio = -1,
+ .type = EV_KEY,
+};
+
+static struct gpio_keys_platform_data bd718xx_powerkey_data = {
+ .buttons = &button,
+ .nbuttons = 1,
+ .name = "bd718xx-pwrkey",
+};
+
+static struct mfd_cell bd71837_mfd_cells[] = {
+ {
+ .name = "gpio-keys",
+ .platform_data = &bd718xx_powerkey_data,
+ .pdata_size = sizeof(bd718xx_powerkey_data),
+ },
+ { .name = "bd71837-clk", },
+ { .name = "bd71837-pmic", },
+};
+
+static const struct regmap_irq bd71837_irqs[] = {
+ REGMAP_IRQ_REG(BD71837_INT_SWRST, 0, BD71837_INT_SWRST_MASK),
+ REGMAP_IRQ_REG(BD71837_INT_PWRBTN_S, 0, BD71837_INT_PWRBTN_S_MASK),
+ REGMAP_IRQ_REG(BD71837_INT_PWRBTN_L, 0, BD71837_INT_PWRBTN_L_MASK),
+ REGMAP_IRQ_REG(BD71837_INT_PWRBTN, 0, BD71837_INT_PWRBTN_MASK),
+ REGMAP_IRQ_REG(BD71837_INT_WDOG, 0, BD71837_INT_WDOG_MASK),
+ REGMAP_IRQ_REG(BD71837_INT_ON_REQ, 0, BD71837_INT_ON_REQ_MASK),
+ REGMAP_IRQ_REG(BD71837_INT_STBY_REQ, 0, BD71837_INT_STBY_REQ_MASK),
+};
+
+static struct regmap_irq_chip bd71837_irq_chip = {
+ .name = "bd71837-irq",
+ .irqs = bd71837_irqs,
+ .num_irqs = ARRAY_SIZE(bd71837_irqs),
+ .num_regs = 1,
+ .irq_reg_stride = 1,
+ .status_base = BD71837_REG_IRQ,
+ .mask_base = BD71837_REG_MIRQ,
+ .ack_base = BD71837_REG_IRQ,
+ .init_ack_masked = true,
+ .mask_invert = false,
+};
+
+static const struct regmap_range pmic_status_range = {
+ .range_min = BD71837_REG_IRQ,
+ .range_max = BD71837_REG_POW_STATE,
+};
+
+static const struct regmap_access_table volatile_regs = {
+ .yes_ranges = &pmic_status_range,
+ .n_yes_ranges = 1,
+};
+
+static const struct regmap_config bd71837_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &volatile_regs,
+ .max_register = BD71837_MAX_REGISTER - 1,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int bd71837_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct bd71837 *bd71837;
+ int ret, i;
+ unsigned int val;
+
+ bd71837 = devm_kzalloc(&i2c->dev, sizeof(struct bd71837), GFP_KERNEL);
+
+ if (!bd71837)
+ return -ENOMEM;
+
+ bd71837->chip_irq = i2c->irq;
+
+ if (!bd71837->chip_irq) {
+ dev_err(&i2c->dev, "No IRQ configured\n");
+ return -EINVAL;
+ }
+
+ bd71837->dev = &i2c->dev;
+ dev_set_drvdata(&i2c->dev, bd71837);
+
+ bd71837->regmap = devm_regmap_init_i2c(i2c, &bd71837_regmap_config);
+ if (IS_ERR(bd71837->regmap)) {
+ dev_err(&i2c->dev, "regmap initialization failed\n");
+ return PTR_ERR(bd71837->regmap);
+ }
+
+ ret = regmap_read(bd71837->regmap, BD71837_REG_REV, &val);
+ if (ret) {
+ dev_err(&i2c->dev, "Read BD71837_REG_DEVICE failed\n");
+ return ret;
+ }
+ for (i = 0; i < ARRAY_SIZE(supported_revisions); i++)
+ if (supported_revisions[i] == val)
+ break;
+
+ if (i == ARRAY_SIZE(supported_revisions)) {
+ dev_err(&i2c->dev, "Unsupported chip revision\n");
+ return -ENODEV;
+ }
+
+ ret = devm_regmap_add_irq_chip(&i2c->dev, bd71837->regmap,
+ bd71837->chip_irq, IRQF_ONESHOT, 0,
+ &bd71837_irq_chip, &bd71837->irq_data);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to add irq_chip\n");
+ return ret;
+ }
+
+ /* Configure short press to 10 milliseconds */
+ ret = regmap_update_bits(bd71837->regmap,
+ BD71837_REG_PWRONCONFIG0,
+ BD718XX_PWRBTN_PRESS_DURATION_MASK,
+ BD718XX_PWRBTN_SHORT_PRESS_10MS);
+ if (ret) {
+ dev_err(&i2c->dev,
+ "Failed to configure button short press timeout\n");
+ return ret;
+ }
+
+ /* Configure long press to 10 seconds */
+ ret = regmap_update_bits(bd71837->regmap,
+ BD71837_REG_PWRONCONFIG1,
+ BD718XX_PWRBTN_PRESS_DURATION_MASK,
+ BD718XX_PWRBTN_LONG_PRESS_10S);
+
+ if (ret) {
+ dev_err(&i2c->dev,
+ "Failed to configure button long press timeout\n");
+ return ret;
+ }
+
+ ret = regmap_irq_get_virq(bd71837->irq_data, BD71837_INT_PWRBTN_S);
+
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to get the IRQ\n");
+ return ret;
+ }
+
+ button.irq = ret;
+
+ ret = devm_mfd_add_devices(bd71837->dev, PLATFORM_DEVID_AUTO,
+ bd71837_mfd_cells,
+ ARRAY_SIZE(bd71837_mfd_cells), NULL, 0,
+ regmap_irq_get_domain(bd71837->irq_data));
+ if (ret)
+ dev_err(&i2c->dev, "Failed to create subdevices\n");
+
+ return ret;
+}
+
+static const struct of_device_id bd71837_of_match[] = {
+ { .compatible = "rohm,bd71837", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bd71837_of_match);
+
+static struct i2c_driver bd71837_i2c_driver = {
+ .driver = {
+ .name = "rohm-bd718x7",
+ .of_match_table = bd71837_of_match,
+ },
+ .probe = bd71837_i2c_probe,
+};
+
+static int __init bd71837_i2c_init(void)
+{
+ return i2c_add_driver(&bd71837_i2c_driver);
+}
+
+/* Initialise early so consumer devices can complete system boot */
+subsys_initcall(bd71837_i2c_init);
+
+static void __exit bd71837_i2c_exit(void)
+{
+ i2c_del_driver(&bd71837_i2c_driver);
+}
+module_exit(bd71837_i2c_exit);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("ROHM BD71837 Power Management IC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index ca6b80d08ffc..9613b4257302 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -146,6 +146,7 @@ static const struct of_device_id sec_dt_match[] = {
/* Sentinel */
},
};
+MODULE_DEVICE_TABLE(of, sec_dt_match);
#endif
static bool s2mpa01_volatile(struct device *dev, unsigned int reg)
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 2a87b0d2f21f..a530972c5a7e 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -715,6 +715,7 @@ sm501_create_subdev(struct sm501_devdata *sm, char *name,
smdev->pdev.name = name;
smdev->pdev.id = sm->pdev_id;
smdev->pdev.dev.parent = sm->dev;
+ smdev->pdev.dev.coherent_dma_mask = 0xffffffff;
if (res_count) {
smdev->pdev.resource = (struct resource *)(smdev+1);
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 47012c0899cd..7a30546880a4 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -209,14 +209,13 @@ static int ti_tscadc_probe(struct platform_device *pdev)
* The TSC_ADC_SS controller design assumes the OCP clock is
* at least 6x faster than the ADC clock.
*/
- clk = clk_get(&pdev->dev, "adc_tsc_fck");
+ clk = devm_clk_get(&pdev->dev, "adc_tsc_fck");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get TSC fck\n");
err = PTR_ERR(clk);
goto err_disable_clk;
}
clock_rate = clk_get_rate(clk);
- clk_put(clk);
tscadc->clk_div = clock_rate / ADC_CLK;
/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 5d5888ee2966..22bd6525e09c 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -302,6 +302,10 @@ static int wm8994_set_pdata_from_of(struct wm8994 *wm8994)
if (of_find_property(np, "wlf,ldoena-always-driven", NULL))
pdata->lineout2fb = true;
+ pdata->spkmode_pu = of_property_read_bool(np, "wlf,spkmode-pu");
+
+ pdata->csnaddr_pd = of_property_read_bool(np, "wlf,csnaddr-pd");
+
pdata->ldo[0].enable = of_get_named_gpio(np, "wlf,ldo1ena", 0);
if (pdata->ldo[0].enable < 0)
pdata->ldo[0].enable = 0;
@@ -513,14 +517,15 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
default:
dev_err(wm8994->dev, "Unknown device type %d\n", wm8994->type);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_enable;
}
ret = regmap_reinit_cache(wm8994->regmap, regmap_config);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to reinit register cache: %d\n",
ret);
- return ret;
+ goto err_enable;
}
/* Explicitly put the device into reset in case regulators
@@ -531,7 +536,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET));
if (ret != 0) {
dev_err(wm8994->dev, "Failed to reset device: %d\n", ret);
- return ret;
+ goto err_enable;
}
if (regmap_patch) {
@@ -540,7 +545,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
if (ret != 0) {
dev_err(wm8994->dev, "Failed to register patch: %d\n",
ret);
- goto err;
+ goto err_enable;
}
}
@@ -559,6 +564,8 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
if (pdata->spkmode_pu)
pulls |= WM8994_SPKMODE_PU;
+ if (pdata->csnaddr_pd)
+ pulls |= WM8994_CSNADDR_PD;
/* Disable unneeded pulls */
wm8994_set_bits(wm8994, WM8994_PULL_CONTROL_2,
diff --git a/drivers/misc/aspeed-lpc-snoop.c b/drivers/misc/aspeed-lpc-snoop.c
index cb78c98bc78d..2feb4347d67f 100644
--- a/drivers/misc/aspeed-lpc-snoop.c
+++ b/drivers/misc/aspeed-lpc-snoop.c
@@ -16,12 +16,15 @@
#include <linux/bitops.h>
#include <linux/interrupt.h>
+#include <linux/fs.h>
#include <linux/kfifo.h>
#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/poll.h>
#include <linux/regmap.h>
#define DEVICE_NAME "aspeed-lpc-snoop"
@@ -59,20 +62,70 @@ struct aspeed_lpc_snoop_model_data {
unsigned int has_hicrb_ensnp;
};
+struct aspeed_lpc_snoop_channel {
+ struct kfifo fifo;
+ wait_queue_head_t wq;
+ struct miscdevice miscdev;
+};
+
struct aspeed_lpc_snoop {
struct regmap *regmap;
int irq;
- struct kfifo snoop_fifo[NUM_SNOOP_CHANNELS];
+ struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS];
+};
+
+static struct aspeed_lpc_snoop_channel *snoop_file_to_chan(struct file *file)
+{
+ return container_of(file->private_data,
+ struct aspeed_lpc_snoop_channel,
+ miscdev);
+}
+
+static ssize_t snoop_file_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file);
+ unsigned int copied;
+ int ret = 0;
+
+ if (kfifo_is_empty(&chan->fifo)) {
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_event_interruptible(chan->wq,
+ !kfifo_is_empty(&chan->fifo));
+ if (ret == -ERESTARTSYS)
+ return -EINTR;
+ }
+ ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
+
+ return ret ? ret : copied;
+}
+
+static unsigned int snoop_file_poll(struct file *file,
+ struct poll_table_struct *pt)
+{
+ struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file);
+
+ poll_wait(file, &chan->wq, pt);
+ return !kfifo_is_empty(&chan->fifo) ? POLLIN : 0;
+}
+
+static const struct file_operations snoop_fops = {
+ .owner = THIS_MODULE,
+ .read = snoop_file_read,
+ .poll = snoop_file_poll,
+ .llseek = noop_llseek,
};
/* Save a byte to a FIFO and discard the oldest byte if FIFO is full */
-static void put_fifo_with_discard(struct kfifo *fifo, u8 val)
+static void put_fifo_with_discard(struct aspeed_lpc_snoop_channel *chan, u8 val)
{
- if (!kfifo_initialized(fifo))
+ if (!kfifo_initialized(&chan->fifo))
return;
- if (kfifo_is_full(fifo))
- kfifo_skip(fifo);
- kfifo_put(fifo, val);
+ if (kfifo_is_full(&chan->fifo))
+ kfifo_skip(&chan->fifo);
+ kfifo_put(&chan->fifo, val);
+ wake_up_interruptible(&chan->wq);
}
static irqreturn_t aspeed_lpc_snoop_irq(int irq, void *arg)
@@ -97,12 +150,12 @@ static irqreturn_t aspeed_lpc_snoop_irq(int irq, void *arg)
if (reg & HICR6_STR_SNP0W) {
u8 val = (data & SNPWDR_CH0_MASK) >> SNPWDR_CH0_SHIFT;
- put_fifo_with_discard(&lpc_snoop->snoop_fifo[0], val);
+ put_fifo_with_discard(&lpc_snoop->chan[0], val);
}
if (reg & HICR6_STR_SNP1W) {
u8 val = (data & SNPWDR_CH1_MASK) >> SNPWDR_CH1_SHIFT;
- put_fifo_with_discard(&lpc_snoop->snoop_fifo[1], val);
+ put_fifo_with_discard(&lpc_snoop->chan[1], val);
}
return IRQ_HANDLED;
@@ -139,12 +192,22 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
const struct aspeed_lpc_snoop_model_data *model_data =
of_device_get_match_data(dev);
+ init_waitqueue_head(&lpc_snoop->chan[channel].wq);
/* Create FIFO datastructure */
- rc = kfifo_alloc(&lpc_snoop->snoop_fifo[channel],
+ rc = kfifo_alloc(&lpc_snoop->chan[channel].fifo,
SNOOP_FIFO_SIZE, GFP_KERNEL);
if (rc)
return rc;
+ lpc_snoop->chan[channel].miscdev.minor = MISC_DYNAMIC_MINOR;
+ lpc_snoop->chan[channel].miscdev.name =
+ devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel);
+ lpc_snoop->chan[channel].miscdev.fops = &snoop_fops;
+ lpc_snoop->chan[channel].miscdev.parent = dev;
+ rc = misc_register(&lpc_snoop->chan[channel].miscdev);
+ if (rc)
+ return rc;
+
/* Enable LPC snoop channel at requested port */
switch (channel) {
case 0:
@@ -191,7 +254,8 @@ static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
return;
}
- kfifo_free(&lpc_snoop->snoop_fifo[channel]);
+ kfifo_free(&lpc_snoop->chan[channel].fifo);
+ misc_deregister(&lpc_snoop->chan[channel].miscdev);
}
static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index e8f1d4bb806a..da445223f4cc 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -80,7 +80,7 @@ static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
0xFC, 0);
}
-int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
+static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
{
rtsx_pci_write_register(pcr, MSGTXDATA0,
MASK_8_BIT_DEF, (u8) (latency & 0xFF));
@@ -143,7 +143,7 @@ int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
return 0;
}
-void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
+static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
{
if (pcr->ops->set_l1off_cfg_sub_d0)
pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
@@ -162,7 +162,7 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
rtsx_set_l1off_sub_cfg_d0(pcr, 1);
}
-void rtsx_pm_full_on(struct rtsx_pcr *pcr)
+static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
{
if (pcr->ops->full_on)
pcr->ops->full_on(pcr);
@@ -967,13 +967,13 @@ static void rtsx_pci_card_detect(struct work_struct *work)
pcr->slots[RTSX_MS_CARD].p_dev);
}
-void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
+static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
{
if (pcr->ops->process_ocp)
pcr->ops->process_ocp(pcr);
}
-int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
+static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
{
if (pcr->option.ocp_en)
rtsx_pci_process_ocp(pcr);
@@ -1094,7 +1094,7 @@ static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
rtsx_enable_aspm(pcr);
}
-void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
+static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
{
if (pcr->ops->power_saving)
pcr->ops->power_saving(pcr);
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index 93397cb05b15..3ce933707828 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -33,11 +33,3 @@ config CXL
CAPI adapters are found in POWER8 based systems.
If unsure, say N.
-
-config CXL_BIMODAL
- bool "Support for bi-modal CAPI cards"
- depends on HOTPLUG_PCI_POWERNV = y && CXL || HOTPLUG_PCI_POWERNV = m && CXL = m
- default y
- help
- Select this option to enable support for bi-modal CAPI cards, such as
- the Mellanox CX-4.
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 502d41fc9ea5..5eea61b9584f 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -4,7 +4,7 @@ ccflags-$(CONFIG_PPC_WERROR) += -Werror
cxl-y += main.o file.o irq.o fault.o native.o
cxl-y += context.o sysfs.o pci.o trace.o
-cxl-y += vphb.o phb.o api.o cxllib.o
+cxl-y += vphb.o api.o cxllib.o
cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o
cxl-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_CXL) += cxl.o
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 753b1a698fc4..750470ef2049 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -11,7 +11,6 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <misc/cxl.h>
-#include <linux/msi.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/sched/mm.h>
@@ -67,10 +66,8 @@ static struct file *cxl_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags)
{
- struct qstr this;
- struct path path;
struct file *file;
- struct inode *inode = NULL;
+ struct inode *inode;
int rc;
/* strongly inspired by anon_inode_getfile() */
@@ -91,27 +88,15 @@ static struct file *cxl_getfile(const char *name,
goto err_fs;
}
- file = ERR_PTR(-ENOMEM);
- this.name = name;
- this.len = strlen(name);
- this.hash = 0;
- path.dentry = d_alloc_pseudo(cxl_vfs_mount->mnt_sb, &this);
- if (!path.dentry)
+ file = alloc_file_pseudo(inode, cxl_vfs_mount, name,
+ flags & (O_ACCMODE | O_NONBLOCK), fops);
+ if (IS_ERR(file))
goto err_inode;
- path.mnt = mntget(cxl_vfs_mount);
- d_instantiate(path.dentry, inode);
-
- file = alloc_file(&path, OPEN_FMODE(flags), fops);
- if (IS_ERR(file))
- goto err_dput;
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = priv;
return file;
-err_dput:
- path_put(&path);
err_inode:
iput(inode);
err_fs:
@@ -182,21 +167,6 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
return 0;
}
-int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
-{
- if (*ctx == NULL || *afu_irq == 0) {
- *afu_irq = 1;
- *ctx = cxl_get_context(pdev);
- } else {
- (*afu_irq)++;
- if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) {
- *ctx = list_next_entry(*ctx, extra_irq_contexts);
- *afu_irq = 1;
- }
- }
- return cxl_find_afu_irq(*ctx, *afu_irq);
-}
-/* Exported via cxl_base */
int cxl_set_priv(struct cxl_context *ctx, void *priv)
{
@@ -324,7 +294,6 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
if (task) {
ctx->pid = get_task_pid(task, PIDTYPE_PID);
kernel = false;
- ctx->real_mode = false;
/* acquire a reference to the task's mm */
ctx->mm = get_task_mm(current);
@@ -388,24 +357,6 @@ void cxl_set_master(struct cxl_context *ctx)
}
EXPORT_SYMBOL_GPL(cxl_set_master);
-int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode)
-{
- if (ctx->status == STARTED) {
- /*
- * We could potentially update the PE and issue an update LLCMD
- * to support this, but it doesn't seem to have a good use case
- * since it's trivial to just create a second kernel context
- * with different translation modes, so until someone convinces
- * me otherwise:
- */
- return -EBUSY;
- }
-
- ctx->real_mode = real_mode;
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_set_translation_mode);
-
/* wrappers around afu_* file ops which are EXPORTED */
int cxl_fd_open(struct inode *inode, struct file *file)
{
@@ -587,100 +538,3 @@ ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
}
EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
-
-int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs)
-{
- struct cxl_afu *afu = cxl_pci_to_afu(dev);
- if (IS_ERR(afu))
- return -ENODEV;
-
- if (irqs > afu->adapter->user_irqs)
- return -EINVAL;
-
- /* Limit user_irqs to prevent the user increasing this via sysfs */
- afu->adapter->user_irqs = irqs;
- afu->irqs_max = irqs;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process);
-
-int cxl_get_max_irqs_per_process(struct pci_dev *dev)
-{
- struct cxl_afu *afu = cxl_pci_to_afu(dev);
- if (IS_ERR(afu))
- return -ENODEV;
-
- return afu->irqs_max;
-}
-EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process);
-
-/*
- * This is a special interrupt allocation routine called from the PHB's MSI
- * setup function. When capi interrupts are allocated in this manner they must
- * still be associated with a running context, but since the MSI APIs have no
- * way to specify this we use the default context associated with the device.
- *
- * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU
- * interrupt number, so in order to overcome this their driver informs us of
- * the restriction by setting the maximum interrupts per context, and we
- * allocate additional contexts as necessary so that we can keep the AFU
- * interrupt number within the supported range.
- */
-int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
-{
- struct cxl_context *ctx, *new_ctx, *default_ctx;
- int remaining;
- int rc;
-
- ctx = default_ctx = cxl_get_context(pdev);
- if (WARN_ON(!default_ctx))
- return -ENODEV;
-
- remaining = nvec;
- while (remaining > 0) {
- rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max));
- if (rc) {
- pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev));
- return rc;
- }
- remaining -= ctx->afu->irqs_max;
-
- if (ctx != default_ctx && default_ctx->status == STARTED) {
- WARN_ON(cxl_start_context(ctx,
- be64_to_cpu(default_ctx->elem->common.wed),
- NULL));
- }
-
- if (remaining > 0) {
- new_ctx = cxl_dev_context_init(pdev);
- if (IS_ERR(new_ctx)) {
- pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
- return -ENOSPC;
- }
- list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts);
- ctx = new_ctx;
- }
- }
-
- return 0;
-}
-/* Exported via cxl_base */
-
-void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
-{
- struct cxl_context *ctx, *pos, *tmp;
-
- ctx = cxl_get_context(pdev);
- if (WARN_ON(!ctx))
- return;
-
- cxl_free_afu_irqs(ctx);
- list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) {
- cxl_stop_context(pos);
- cxl_free_afu_irqs(pos);
- list_del(&pos->extra_irq_contexts);
- cxl_release_context(pos);
- }
-}
-/* Exported via cxl_base */
diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c
index cd54ce6f6230..7557835cdfcd 100644
--- a/drivers/misc/cxl/base.c
+++ b/drivers/misc/cxl/base.c
@@ -106,89 +106,6 @@ int cxl_update_properties(struct device_node *dn,
}
EXPORT_SYMBOL_GPL(cxl_update_properties);
-/*
- * API calls into the driver that may be called from the PHB code and must be
- * built in.
- */
-bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu)
-{
- bool ret;
- struct cxl_calls *calls;
-
- calls = cxl_calls_get();
- if (!calls)
- return false;
-
- ret = calls->cxl_pci_associate_default_context(dev, afu);
-
- cxl_calls_put(calls);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(cxl_pci_associate_default_context);
-
-void cxl_pci_disable_device(struct pci_dev *dev)
-{
- struct cxl_calls *calls;
-
- calls = cxl_calls_get();
- if (!calls)
- return;
-
- calls->cxl_pci_disable_device(dev);
-
- cxl_calls_put(calls);
-}
-EXPORT_SYMBOL_GPL(cxl_pci_disable_device);
-
-int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
-{
- int ret;
- struct cxl_calls *calls;
-
- calls = cxl_calls_get();
- if (!calls)
- return -EBUSY;
-
- ret = calls->cxl_next_msi_hwirq(pdev, ctx, afu_irq);
-
- cxl_calls_put(calls);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(cxl_next_msi_hwirq);
-
-int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
-{
- int ret;
- struct cxl_calls *calls;
-
- calls = cxl_calls_get();
- if (!calls)
- return false;
-
- ret = calls->cxl_cx4_setup_msi_irqs(pdev, nvec, type);
-
- cxl_calls_put(calls);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(cxl_cx4_setup_msi_irqs);
-
-void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
-{
- struct cxl_calls *calls;
-
- calls = cxl_calls_get();
- if (!calls)
- return;
-
- calls->cxl_cx4_teardown_msi_irqs(pdev);
-
- cxl_calls_put(calls);
-}
-EXPORT_SYMBOL_GPL(cxl_cx4_teardown_msi_irqs);
-
static int __init cxl_base_init(void)
{
struct device_node *np;
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index c6ec872800a2..5fe529b43ebe 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -74,7 +74,6 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
ctx->pending_afu_err = false;
INIT_LIST_HEAD(&ctx->irq_names);
- INIT_LIST_HEAD(&ctx->extra_irq_contexts);
/*
* When we have to destroy all contexts in cxl_context_detach_all() we
@@ -96,7 +95,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
*/
mutex_lock(&afu->contexts_lock);
idr_preload(GFP_KERNEL);
- i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe,
+ i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
ctx->afu->num_procs, GFP_NOWAIT);
idr_preload_end();
mutex_unlock(&afu->contexts_lock);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 918d4fb742d1..d1d927ccb589 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -93,11 +93,6 @@ static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148};
static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150};
static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158};
static const cxl_p1_reg_t CXL_PSL_TRACE = {0x0170};
-/* XSL registers (Mellanox CX4) */
-static const cxl_p1_reg_t CXL_XSL_Timebase = {0x0100};
-static const cxl_p1_reg_t CXL_XSL_TB_CTLSTAT = {0x0108};
-static const cxl_p1_reg_t CXL_XSL_FEC = {0x0158};
-static const cxl_p1_reg_t CXL_XSL_DSNCTL = {0x0168};
/* PSL registers - CAIA 2 */
static const cxl_p1_reg_t CXL_PSL9_CONTROL = {0x0020};
static const cxl_p1_reg_t CXL_XSL9_INV = {0x0110};
@@ -613,7 +608,6 @@ struct cxl_context {
bool pe_inserted;
bool master;
bool kernel;
- bool real_mode;
bool pending_irq;
bool pending_fault;
bool pending_afu_err;
@@ -624,14 +618,6 @@ struct cxl_context {
struct rcu_head rcu;
- /*
- * Only used when more interrupts are allocated via
- * pci_enable_msix_range than are supported in the default context, to
- * use additional contexts to overcome the limitation. i.e. Mellanox
- * CX4 only:
- */
- struct list_head extra_irq_contexts;
-
struct mm_struct *mm;
u16 tidr;
@@ -704,7 +690,6 @@ struct cxl {
struct bin_attribute cxl_attr;
int adapter_num;
int user_irqs;
- int min_pe;
u64 ps_size;
u16 psl_rev;
u16 base_image;
@@ -865,32 +850,12 @@ static inline bool cxl_is_power9(void)
return false;
}
-static inline bool cxl_is_power9_dd1(void)
-{
- if ((pvr_version_is(PVR_POWER9)) &&
- cpu_has_feature(CPU_FTR_POWER9_DD1))
- return true;
- return false;
-}
-
ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
loff_t off, size_t count);
-/* Internal functions wrapped in cxl_base to allow PHB to call them */
-bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu);
-void _cxl_pci_disable_device(struct pci_dev *dev);
-int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
-int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
-void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev);
struct cxl_calls {
void (*cxl_slbia)(struct mm_struct *mm);
- bool (*cxl_pci_associate_default_context)(struct pci_dev *dev, struct cxl_afu *afu);
- void (*cxl_pci_disable_device)(struct pci_dev *dev);
- int (*cxl_next_msi_hwirq)(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
- int (*cxl_cx4_setup_msi_irqs)(struct pci_dev *pdev, int nvec, int type);
- void (*cxl_cx4_teardown_msi_irqs)(struct pci_dev *pdev);
-
struct module *owner;
};
int register_cxl_calls(struct cxl_calls *calls);
@@ -955,7 +920,6 @@ int cxl_debugfs_afu_add(struct cxl_afu *afu);
void cxl_debugfs_afu_remove(struct cxl_afu *afu);
void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir);
void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir);
-void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter, struct dentry *dir);
void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir);
void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir);
@@ -998,11 +962,6 @@ static inline void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter,
{
}
-static inline void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter,
- struct dentry *dir)
-{
-}
-
static inline void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir)
{
}
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
index 0bc7c31cf739..5a3f91255258 100644
--- a/drivers/misc/cxl/cxllib.c
+++ b/drivers/misc/cxl/cxllib.c
@@ -102,10 +102,6 @@ int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg)
rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &cfg->dsnctl);
if (rc)
return rc;
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- /* workaround for DD1 - nbwind = capiind */
- cfg->dsnctl |= ((u64)0x02 << (63-47));
- }
cfg->version = CXL_XSL_CONFIG_CURRENT_VERSION;
cfg->log_bar_size = CXL_CAPI_WINDOW_LOG_SIZE;
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index 1643850d2302..a1921d81593a 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -58,11 +58,6 @@ void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir)
debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE));
}
-void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter, struct dentry *dir)
-{
- debugfs_create_io_x64("fec", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_XSL_FEC));
-}
-
int cxl_debugfs_adapter_add(struct cxl *adapter)
{
struct dentry *dir;
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 70dbb6de102c..dc7b34174f85 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -33,7 +33,7 @@ static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
* This finds a free SSTE for the given SLB, or returns NULL if it's already in
* the segment table.
*/
-static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
+static struct cxl_sste *find_free_sste(struct cxl_context *ctx,
struct copro_slb *slb)
{
struct cxl_sste *primary, *sste, *ret = NULL;
@@ -134,7 +134,7 @@ static int cxl_handle_segment_miss(struct cxl_context *ctx,
int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar)
{
- unsigned flt = 0;
+ vm_fault_t flt = 0;
int result;
unsigned long access, flags, inv_flags = 0;
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 4644f16606a3..3bc0c15d4d85 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -623,9 +623,6 @@ static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u
{
pr_devel("in %s\n", __func__);
- if (ctx->real_mode)
- return -EPERM;
-
ctx->kernel = kernel;
if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
return attach_afu_directed(ctx, wed, amr);
@@ -916,11 +913,6 @@ static int afu_properties_look_ok(struct cxl_afu *afu)
return -EINVAL;
}
- if (afu->crs_len < 0) {
- dev_err(&afu->dev, "Unexpected configuration record size value\n");
- return -EINVAL;
- }
-
return 0;
}
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index c1ba0d42cbc8..f35406be465a 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -104,11 +104,6 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
static struct cxl_calls cxl_calls = {
.cxl_slbia = cxl_slbia_core,
- .cxl_pci_associate_default_context = _cxl_pci_associate_default_context,
- .cxl_pci_disable_device = _cxl_pci_disable_device,
- .cxl_next_msi_hwirq = _cxl_next_msi_hwirq,
- .cxl_cx4_setup_msi_irqs = _cxl_cx4_setup_msi_irqs,
- .cxl_cx4_teardown_msi_irqs = _cxl_cx4_teardown_msi_irqs,
.owner = THIS_MODULE,
};
@@ -287,7 +282,7 @@ int cxl_adapter_context_get(struct cxl *adapter)
int rc;
rc = atomic_inc_unless_negative(&adapter->contexts_num);
- return rc >= 0 ? 0 : -EBUSY;
+ return rc ? 0 : -EBUSY;
}
void cxl_adapter_context_put(struct cxl *adapter)
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 98f867fcef24..c9d5d82dce8e 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -605,6 +605,7 @@ u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
sr |= CXL_PSL_SR_An_MP;
if (mfspr(SPRN_LPCR) & LPCR_TC)
sr |= CXL_PSL_SR_An_TC;
+
if (kernel) {
if (!real_mode)
sr |= CXL_PSL_SR_An_R;
@@ -629,7 +630,7 @@ u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
static u64 calculate_sr(struct cxl_context *ctx)
{
- return cxl_calculate_sr(ctx->master, ctx->kernel, ctx->real_mode,
+ return cxl_calculate_sr(ctx->master, ctx->kernel, false,
cxl_is_power9());
}
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 429d6de1dde7..b66d832d3233 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -55,8 +55,6 @@
pci_read_config_byte(dev, vsec + 0xa, dest)
#define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
pci_write_config_byte(dev, vsec + 0xa, val)
-#define CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, vsec, val) \
- pci_bus_write_config_byte(bus, devfn, vsec + 0xa, val)
#define CXL_VSEC_PROTOCOL_MASK 0xe0
#define CXL_VSEC_PROTOCOL_1024TB 0x80
#define CXL_VSEC_PROTOCOL_512TB 0x40
@@ -465,23 +463,21 @@ int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg)
/* nMMU_ID Defaults to: b’000001001’*/
xsl_dsnctl |= ((u64)0x09 << (63-28));
- if (!(cxl_is_power9_dd1())) {
- /*
- * Used to identify CAPI packets which should be sorted into
- * the Non-Blocking queues by the PHB. This field should match
- * the PHB PBL_NBW_CMPM register
- * nbwind=0x03, bits [57:58], must include capi indicator.
- * Not supported on P9 DD1.
- */
- xsl_dsnctl |= (nbwind << (63-55));
+ /*
+ * Used to identify CAPI packets which should be sorted into
+ * the Non-Blocking queues by the PHB. This field should match
+ * the PHB PBL_NBW_CMPM register
+ * nbwind=0x03, bits [57:58], must include capi indicator.
+ * Not supported on P9 DD1.
+ */
+ xsl_dsnctl |= (nbwind << (63-55));
- /*
- * Upper 16b address bits of ASB_Notify messages sent to the
- * system. Need to match the PHB’s ASN Compare/Mask Register.
- * Not supported on P9 DD1.
- */
- xsl_dsnctl |= asnind;
- }
+ /*
+ * Upper 16b address bits of ASB_Notify messages sent to the
+ * system. Need to match the PHB’s ASN Compare/Mask Register.
+ * Not supported on P9 DD1.
+ */
+ xsl_dsnctl |= asnind;
*reg = xsl_dsnctl;
return 0;
@@ -539,15 +535,8 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
/* Snoop machines */
cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL);
- if (cxl_is_power9_dd1()) {
- /* Disabling deadlock counter CAR */
- cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0020000000000001ULL);
- /* Enable NORST */
- cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x8000000000000000ULL);
- } else {
- /* Enable NORST and DD2 features */
- cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL);
- }
+ /* Enable NORST and DD2 features */
+ cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL);
/*
* Check if PSL has data-cache. We need to flush adapter datacache
@@ -595,27 +584,7 @@ static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci
return 0;
}
-static int init_implementation_adapter_regs_xsl(struct cxl *adapter, struct pci_dev *dev)
-{
- u64 xsl_dsnctl;
- u64 chipid;
- u32 phb_index;
- u64 capp_unit_id;
- int rc;
-
- rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
- if (rc)
- return rc;
-
- /* Tell XSL where to route data to */
- xsl_dsnctl = 0x0000600000000000ULL | (chipid << (63-5));
- xsl_dsnctl |= (capp_unit_id << (63-13));
- cxl_p1_write(adapter, CXL_XSL_DSNCTL, xsl_dsnctl);
-
- return 0;
-}
-
-/* PSL & XSL */
+/* PSL */
#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
/* For the PSL this is a multiple for 0 < n <= 7: */
@@ -627,21 +596,6 @@ static void write_timebase_ctrl_psl8(struct cxl *adapter)
TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
}
-/* XSL */
-#define TBSYNC_ENA (1ULL << 63)
-/* For the XSL this is 2**n * 2000 clocks for 0 < n <= 6: */
-#define XSL_2000_CLOCKS 1
-#define XSL_4000_CLOCKS 2
-#define XSL_8000_CLOCKS 3
-
-static void write_timebase_ctrl_xsl(struct cxl *adapter)
-{
- cxl_p1_write(adapter, CXL_XSL_TB_CTLSTAT,
- TBSYNC_ENA |
- TBSYNC_CAL(3) |
- TBSYNC_CNT(XSL_4000_CLOCKS));
-}
-
static u64 timebase_read_psl9(struct cxl *adapter)
{
return cxl_p1_read(adapter, CXL_PSL9_Timebase);
@@ -652,11 +606,6 @@ static u64 timebase_read_psl8(struct cxl *adapter)
return cxl_p1_read(adapter, CXL_PSL_Timebase);
}
-static u64 timebase_read_xsl(struct cxl *adapter)
-{
- return cxl_p1_read(adapter, CXL_XSL_Timebase);
-}
-
static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
{
struct device_node *np;
@@ -800,234 +749,36 @@ static int setup_cxl_bars(struct pci_dev *dev)
return 0;
}
-#ifdef CONFIG_CXL_BIMODAL
-
-struct cxl_switch_work {
- struct pci_dev *dev;
- struct work_struct work;
- int vsec;
- int mode;
-};
-
-static void switch_card_to_cxl(struct work_struct *work)
+/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
+static int switch_card_to_cxl(struct pci_dev *dev)
{
- struct cxl_switch_work *switch_work =
- container_of(work, struct cxl_switch_work, work);
- struct pci_dev *dev = switch_work->dev;
- struct pci_bus *bus = dev->bus;
- struct pci_controller *hose = pci_bus_to_host(bus);
- struct pci_dev *bridge;
- struct pnv_php_slot *php_slot;
- unsigned int devfn;
+ int vsec;
u8 val;
int rc;
- dev_info(&bus->dev, "cxl: Preparing for mode switch...\n");
- bridge = list_first_entry_or_null(&hose->bus->devices, struct pci_dev,
- bus_list);
- if (!bridge) {
- dev_WARN(&bus->dev, "cxl: Couldn't find root port!\n");
- goto err_dev_put;
- }
-
- php_slot = pnv_php_find_slot(pci_device_to_OF_node(bridge));
- if (!php_slot) {
- dev_err(&bus->dev, "cxl: Failed to find slot hotplug "
- "information. You may need to upgrade "
- "skiboot. Aborting.\n");
- goto err_dev_put;
- }
-
- rc = CXL_READ_VSEC_MODE_CONTROL(dev, switch_work->vsec, &val);
- if (rc) {
- dev_err(&bus->dev, "cxl: Failed to read CAPI mode control: %i\n", rc);
- goto err_dev_put;
- }
- devfn = dev->devfn;
-
- /* Release the reference obtained in cxl_check_and_switch_mode() */
- pci_dev_put(dev);
-
- dev_dbg(&bus->dev, "cxl: Removing PCI devices from kernel\n");
- pci_lock_rescan_remove();
- pci_hp_remove_devices(bridge->subordinate);
- pci_unlock_rescan_remove();
-
- /* Switch the CXL protocol on the card */
- if (switch_work->mode == CXL_BIMODE_CXL) {
- dev_info(&bus->dev, "cxl: Switching card to CXL mode\n");
- val &= ~CXL_VSEC_PROTOCOL_MASK;
- val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
- rc = pnv_cxl_enable_phb_kernel_api(hose, true);
- if (rc) {
- dev_err(&bus->dev, "cxl: Failed to enable kernel API"
- " on real PHB, aborting\n");
- goto err_free_work;
- }
- } else {
- dev_WARN(&bus->dev, "cxl: Switching card to PCI mode not supported!\n");
- goto err_free_work;
- }
-
- rc = CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, switch_work->vsec, val);
- if (rc) {
- dev_err(&bus->dev, "cxl: Failed to configure CXL protocol: %i\n", rc);
- goto err_free_work;
- }
+ dev_info(&dev->dev, "switch card to CXL\n");
- /*
- * The CAIA spec (v1.1, Section 10.6 Bi-modal Device Support) states
- * we must wait 100ms after this mode switch before touching PCIe config
- * space.
- */
- msleep(100);
-
- /*
- * Hot reset to cause the card to come back in cxl mode. A
- * OPAL_RESET_PCI_LINK would be sufficient, but currently lacks support
- * in skiboot, so we use a hot reset instead.
- *
- * We call pci_set_pcie_reset_state() on the bridge, as a CAPI card is
- * guaranteed to sit directly under the root port, and setting the reset
- * state on a device directly under the root port is equivalent to doing
- * it on the root port iself.
- */
- dev_info(&bus->dev, "cxl: Configuration write complete, resetting card\n");
- pci_set_pcie_reset_state(bridge, pcie_hot_reset);
- pci_set_pcie_reset_state(bridge, pcie_deassert_reset);
-
- dev_dbg(&bus->dev, "cxl: Offlining slot\n");
- rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_OFFLINE);
- if (rc) {
- dev_err(&bus->dev, "cxl: OPAL offlining call failed: %i\n", rc);
- goto err_free_work;
- }
-
- dev_dbg(&bus->dev, "cxl: Onlining and probing slot\n");
- rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_ONLINE);
- if (rc) {
- dev_err(&bus->dev, "cxl: OPAL onlining call failed: %i\n", rc);
- goto err_free_work;
- }
-
- pci_lock_rescan_remove();
- pci_hp_add_devices(bridge->subordinate);
- pci_unlock_rescan_remove();
-
- dev_info(&bus->dev, "cxl: CAPI mode switch completed\n");
- kfree(switch_work);
- return;
-
-err_dev_put:
- /* Release the reference obtained in cxl_check_and_switch_mode() */
- pci_dev_put(dev);
-err_free_work:
- kfree(switch_work);
-}
-
-int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec)
-{
- struct cxl_switch_work *work;
- u8 val;
- int rc;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE))
+ if (!(vsec = find_cxl_vsec(dev))) {
+ dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
return -ENODEV;
-
- if (!vsec) {
- vsec = find_cxl_vsec(dev);
- if (!vsec) {
- dev_info(&dev->dev, "CXL VSEC not found\n");
- return -ENODEV;
- }
}
- rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val);
- if (rc) {
- dev_err(&dev->dev, "Failed to read current mode control: %i", rc);
+ if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
+ dev_err(&dev->dev, "failed to read current mode control: %i", rc);
return rc;
}
-
- if (mode == CXL_BIMODE_PCI) {
- if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) {
- dev_info(&dev->dev, "Card is already in PCI mode\n");
- return 0;
- }
- /*
- * TODO: Before it's safe to switch the card back to PCI mode
- * we need to disable the CAPP and make sure any cachelines the
- * card holds have been flushed out. Needs skiboot support.
- */
- dev_WARN(&dev->dev, "CXL mode switch to PCI unsupported!\n");
- return -EIO;
- }
-
- if (val & CXL_VSEC_PROTOCOL_ENABLE) {
- dev_info(&dev->dev, "Card is already in CXL mode\n");
- return 0;
+ val &= ~CXL_VSEC_PROTOCOL_MASK;
+ val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
+ if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
+ dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
+ return rc;
}
-
- dev_info(&dev->dev, "Card is in PCI mode, scheduling kernel thread "
- "to switch to CXL mode\n");
-
- work = kmalloc(sizeof(struct cxl_switch_work), GFP_KERNEL);
- if (!work)
- return -ENOMEM;
-
- pci_dev_get(dev);
- work->dev = dev;
- work->vsec = vsec;
- work->mode = mode;
- INIT_WORK(&work->work, switch_card_to_cxl);
-
- schedule_work(&work->work);
-
/*
- * We return a failure now to abort the driver init. Once the
- * link has been cycled and the card is in cxl mode we will
- * come back (possibly using the generic cxl driver), but
- * return success as the card should then be in cxl mode.
- *
- * TODO: What if the card comes back in PCI mode even after
- * the switch? Don't want to spin endlessly.
+ * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
+ * we must wait 100ms after this mode switch before touching
+ * PCIe config space.
*/
- return -EBUSY;
-}
-EXPORT_SYMBOL_GPL(cxl_check_and_switch_mode);
-
-#endif /* CONFIG_CXL_BIMODAL */
-
-static int setup_cxl_protocol_area(struct pci_dev *dev)
-{
- u8 val;
- int rc;
- int vsec = find_cxl_vsec(dev);
-
- if (!vsec) {
- dev_info(&dev->dev, "CXL VSEC not found\n");
- return -ENODEV;
- }
-
- rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val);
- if (rc) {
- dev_err(&dev->dev, "Failed to read current mode control: %i\n", rc);
- return rc;
- }
-
- if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) {
- dev_err(&dev->dev, "Card not in CAPI mode!\n");
- return -EIO;
- }
-
- if ((val & CXL_VSEC_PROTOCOL_MASK) != CXL_VSEC_PROTOCOL_256TB) {
- val &= ~CXL_VSEC_PROTOCOL_MASK;
- val |= CXL_VSEC_PROTOCOL_256TB;
- rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val);
- if (rc) {
- dev_err(&dev->dev, "Failed to set CXL protocol area: %i\n", rc);
- return rc;
- }
- }
+ msleep(100);
return 0;
}
@@ -1724,7 +1475,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = setup_cxl_bars(dev)))
return rc;
- if ((rc = setup_cxl_protocol_area(dev)))
+ if ((rc = switch_card_to_cxl(dev)))
return rc;
if ((rc = cxl_update_image_control(adapter)))
@@ -1871,37 +1622,14 @@ static const struct cxl_service_layer_ops psl8_ops = {
.needs_reset_before_disable = true,
};
-static const struct cxl_service_layer_ops xsl_ops = {
- .adapter_regs_init = init_implementation_adapter_regs_xsl,
- .invalidate_all = cxl_invalidate_all_psl8,
- .sanitise_afu_regs = sanitise_afu_regs_psl8,
- .handle_interrupt = cxl_irq_psl8,
- .fail_irq = cxl_fail_irq_psl,
- .activate_dedicated_process = cxl_activate_dedicated_process_psl8,
- .attach_afu_directed = cxl_attach_afu_directed_psl8,
- .attach_dedicated_process = cxl_attach_dedicated_process_psl8,
- .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8,
- .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_xsl,
- .write_timebase_ctrl = write_timebase_ctrl_xsl,
- .timebase_read = timebase_read_xsl,
- .capi_mode = OPAL_PHB_CAPI_MODE_DMA,
-};
-
static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
{
- if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
- /* Mellanox CX-4 */
- dev_info(&dev->dev, "Device uses an XSL\n");
- adapter->native->sl_ops = &xsl_ops;
- adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
+ if (cxl_is_power8()) {
+ dev_info(&dev->dev, "Device uses a PSL8\n");
+ adapter->native->sl_ops = &psl8_ops;
} else {
- if (cxl_is_power8()) {
- dev_info(&dev->dev, "Device uses a PSL8\n");
- adapter->native->sl_ops = &psl8_ops;
- } else {
- dev_info(&dev->dev, "Device uses a PSL9\n");
- adapter->native->sl_ops = &psl9_ops;
- }
+ dev_info(&dev->dev, "Device uses a PSL9\n");
+ adapter->native->sl_ops = &psl9_ops;
}
}
@@ -2008,43 +1736,6 @@ int cxl_slot_is_switched(struct pci_dev *dev)
return (depth > CXL_MAX_PCIEX_PARENT);
}
-bool cxl_slot_is_supported(struct pci_dev *dev, int flags)
-{
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return false;
-
- if ((flags & CXL_SLOT_FLAG_DMA) && (!pvr_version_is(PVR_POWER8NVL))) {
- /*
- * CAPP DMA mode is technically supported on regular P8, but
- * will EEH if the card attempts to access memory < 4GB, which
- * we cannot realistically avoid. We might be able to work
- * around the issue, but until then return unsupported:
- */
- return false;
- }
-
- if (cxl_slot_is_switched(dev))
- return false;
-
- /*
- * XXX: This gets a little tricky on regular P8 (not POWER8NVL) since
- * the CAPP can be connected to PHB 0, 1 or 2 on a first come first
- * served basis, which is racy to check from here. If we need to
- * support this in future we might need to consider having this
- * function effectively reserve it ahead of time.
- *
- * Currently, the only user of this API is the Mellanox CX4, which is
- * only supported on P8NVL due to the above mentioned limitation of
- * CAPP DMA mode and therefore does not need to worry about this. If the
- * issue with CAPP DMA mode is later worked around on P8 we might need
- * to revisit this.
- */
-
- return true;
-}
-EXPORT_SYMBOL_GPL(cxl_slot_is_supported);
-
-
static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct cxl *adapter;
@@ -2086,9 +1777,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
}
- if (pnv_pci_on_cxl_phb(dev) && adapter->slices >= 1)
- pnv_cxl_phb_set_peer_afu(dev, adapter->afu[0]);
-
return 0;
}
diff --git a/drivers/misc/cxl/phb.c b/drivers/misc/cxl/phb.c
deleted file mode 100644
index 6ec69ada19f4..000000000000
--- a/drivers/misc/cxl/phb.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2014-2016 IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/pci.h>
-#include "cxl.h"
-
-bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu)
-{
- struct cxl_context *ctx;
-
- /*
- * Allocate a context to do cxl things to. This is used for interrupts
- * in the peer model using a real phb, and if we eventually do DMA ops
- * in the virtual phb, we'll need a default context to attach them to.
- */
- ctx = cxl_dev_context_init(dev);
- if (IS_ERR(ctx))
- return false;
- dev->dev.archdata.cxl_ctx = ctx;
-
- return (cxl_ops->afu_check_and_enable(afu) == 0);
-}
-/* exported via cxl_base */
-
-void _cxl_pci_disable_device(struct pci_dev *dev)
-{
- struct cxl_context *ctx = cxl_get_context(dev);
-
- if (ctx) {
- if (ctx->status == STARTED) {
- dev_err(&dev->dev, "Default context started\n");
- return;
- }
- dev->dev.archdata.cxl_ctx = NULL;
- cxl_release_context(ctx);
- }
-}
-/* exported via cxl_base */
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 7fd0bdc1436a..7908633d9204 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -9,7 +9,6 @@
#include <linux/pci.h>
#include <misc/cxl.h>
-#include <asm/pnv-pci.h>
#include "cxl.h"
static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
@@ -45,6 +44,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
{
struct pci_controller *phb;
struct cxl_afu *afu;
+ struct cxl_context *ctx;
phb = pci_bus_to_host(dev->bus);
afu = (struct cxl_afu *)phb->private_data;
@@ -57,7 +57,30 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
set_dma_ops(&dev->dev, &dma_nommu_ops);
set_dma_offset(&dev->dev, PAGE_OFFSET);
- return _cxl_pci_associate_default_context(dev, afu);
+ /*
+ * Allocate a context to do cxl things too. If we eventually do real
+ * DMA ops, we'll need a default context to attach them to
+ */
+ ctx = cxl_dev_context_init(dev);
+ if (IS_ERR(ctx))
+ return false;
+ dev->dev.archdata.cxl_ctx = ctx;
+
+ return (cxl_ops->afu_check_and_enable(afu) == 0);
+}
+
+static void cxl_pci_disable_device(struct pci_dev *dev)
+{
+ struct cxl_context *ctx = cxl_get_context(dev);
+
+ if (ctx) {
+ if (ctx->status == STARTED) {
+ dev_err(&dev->dev, "Default context started\n");
+ return;
+ }
+ dev->dev.archdata.cxl_ctx = NULL;
+ cxl_release_context(ctx);
+ }
}
static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
@@ -191,8 +214,8 @@ static struct pci_controller_ops cxl_pci_controller_ops =
{
.probe_mode = cxl_pci_probe_mode,
.enable_device_hook = cxl_pci_enable_device_hook,
- .disable_device = _cxl_pci_disable_device,
- .release_device = _cxl_pci_disable_device,
+ .disable_device = cxl_pci_disable_device,
+ .release_device = cxl_pci_disable_device,
.window_alignment = cxl_pci_window_alignment,
.reset_secondary_bus = cxl_pci_reset_secondary_bus,
.setup_msi_irqs = cxl_setup_msi_irqs,
@@ -284,18 +307,13 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu)
*/
}
-static bool _cxl_pci_is_vphb_device(struct pci_controller *phb)
-{
- return (phb->ops == &cxl_pcie_pci_ops);
-}
-
bool cxl_pci_is_vphb_device(struct pci_dev *dev)
{
struct pci_controller *phb;
phb = pci_bus_to_host(dev->bus);
- return _cxl_pci_is_vphb_device(phb);
+ return (phb->ops == &cxl_pcie_pci_ops);
}
struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
@@ -304,13 +322,7 @@ struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
phb = pci_bus_to_host(dev->bus);
- if (_cxl_pci_is_vphb_device(phb))
- return (struct cxl_afu *)phb->private_data;
-
- if (pnv_pci_on_cxl_phb(dev))
- return pnv_cxl_phb_to_afu(phb);
-
- return ERR_PTR(-ENODEV);
+ return (struct cxl_afu *)phb->private_data;
}
EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index f5cc517d1131..7e50e1d6f58c 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -478,6 +478,23 @@ static void at24_properties_to_pdata(struct device *dev,
if (device_property_present(dev, "no-read-rollover"))
chip->flags |= AT24_FLAG_NO_RDROL;
+ err = device_property_read_u32(dev, "address-width", &val);
+ if (!err) {
+ switch (val) {
+ case 8:
+ if (chip->flags & AT24_FLAG_ADDR16)
+ dev_warn(dev, "Override address width to be 8, while default is 16\n");
+ chip->flags &= ~AT24_FLAG_ADDR16;
+ break;
+ case 16:
+ chip->flags |= AT24_FLAG_ADDR16;
+ break;
+ default:
+ dev_warn(dev, "Bad \"address-width\" property: %u\n",
+ val);
+ }
+ }
+
err = device_property_read_u32(dev, "size", &val);
if (!err)
chip->byte_len = val;
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 6a7d4a2ad514..840afb398f9e 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -94,8 +94,10 @@ static int at25_ee_read(void *priv, unsigned int offset,
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
+ /* fall through */
case 2:
*cp++ = offset >> 8;
+ /* fall through */
case 1:
case 0: /* can't happen: for better codegen */
*cp++ = offset >> 0;
@@ -180,8 +182,10 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
+ /* fall through */
case 2:
*cp++ = offset >> 8;
+ /* fall through */
case 1:
case 0: /* can't happen: for better codegen */
*cp++ = offset >> 0;
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 59dc24bb70ec..8a4659518c33 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -938,7 +938,7 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf,
{
struct idt_89hpesx_dev *pdev = filep->private_data;
char *colon_ch, *csraddr_str, *csrval_str;
- int ret, csraddr_len, csrval_len;
+ int ret, csraddr_len;
u32 csraddr, csrval;
char *buf;
@@ -974,12 +974,10 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf,
csraddr_str[csraddr_len] = '\0';
/* Register value must follow the colon */
csrval_str = colon_ch + 1;
- csrval_len = strnlen(csrval_str, count - csraddr_len);
} else /* if (str_colon == NULL) */ {
csraddr_str = (char *)buf; /* Just to shut warning up */
csraddr_len = strnlen(csraddr_str, count);
csrval_str = NULL;
- csrval_len = 0;
}
/* Convert CSR address to u32 value */
@@ -1130,7 +1128,7 @@ static void idt_get_fw_data(struct idt_89hpesx_dev *pdev)
device_for_each_child_node(dev, fwnode) {
ee_id = idt_ee_match_id(fwnode);
- if (IS_ERR_OR_NULL(ee_id)) {
+ if (!ee_id) {
dev_warn(dev, "Skip unsupported EEPROM device");
continue;
} else
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 0e32709d1022..fc0cf9a7402e 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -148,7 +148,8 @@ static int max6875_probe(struct i2c_client *client,
if (client->addr & 1)
return -ENODEV;
- if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL)))
+ data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL);
+ if (!data)
return -ENOMEM;
/* A fake client is created on the odd address */
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 1c3967f10f55..120738d6e58b 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -497,7 +497,7 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m);
static inline bool dma_mapping_used(struct dma_mapping *m)
{
if (!m)
- return 0;
+ return false;
return m->size != 0;
}
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index f921dd590271..c6b82f09b3ba 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -305,7 +305,6 @@ GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show);
static int genwqe_info_show(struct seq_file *s, void *unused)
{
struct genwqe_dev *cd = s->private;
- u16 val16, type;
u64 app_id, slu_id, bitstream = -1;
struct pci_dev *pci_dev = cd->pci_dev;
@@ -315,9 +314,6 @@ static int genwqe_info_show(struct seq_file *s, void *unused)
if (genwqe_is_privileged(cd))
bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM);
- val16 = (u16)(slu_id & 0x0fLLU);
- type = (u16)((slu_id >> 20) & 0xffLLU);
-
seq_printf(s, "%s driver version: %s\n"
" Device Name/Type: %s %s CardIdx: %d\n"
" SLU/APP Config : 0x%016llx/0x%016llx\n"
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 0dd6b5ef314a..f453ab82f0d7 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -304,14 +304,12 @@ static int genwqe_open(struct inode *inode, struct file *filp)
{
struct genwqe_dev *cd;
struct genwqe_file *cfile;
- struct pci_dev *pci_dev;
cfile = kzalloc(sizeof(*cfile), GFP_KERNEL);
if (cfile == NULL)
return -ENOMEM;
cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe);
- pci_dev = cd->pci_dev;
cfile->cd = cd;
cfile->filp = filp;
cfile->client = NULL;
@@ -864,7 +862,6 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
struct genwqe_dev *cd = cfile->cd;
struct genwqe_ddcb_cmd *cmd = &req->cmd;
struct dma_mapping *m;
- const char *type = "UNKNOWN";
for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
i++, asiv_offs += 0x08) {
@@ -933,11 +930,9 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
m = genwqe_search_pin(cfile, u_addr, u_size, NULL);
if (m != NULL) {
- type = "PINNING";
page_offs = (u_addr -
(u64)m->u_vaddr)/PAGE_SIZE;
} else {
- type = "MAPPING";
m = &req->dma_mappings[i];
genwqe_mapping_init(m,
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e05c3245930a..fa840666bdd1 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file)
static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
void __iomem *address = (void __iomem *)file->private_data;
- unsigned char *page;
- int retval;
int len = 0;
unsigned int value;
-
- if (*offset < 0)
- return -EINVAL;
- if (count == 0 || count > 1024)
- return 0;
- if (*offset != 0)
- return 0;
-
- page = (unsigned char *)__get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
+ char lbuf[20];
value = readl(address);
- len = sprintf(page, "%d\n", value);
-
- if (copy_to_user(buf, page, len)) {
- retval = -EFAULT;
- goto exit;
- }
- *offset += len;
- retval = len;
+ len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
-exit:
- free_page((unsigned long)page);
- return retval;
+ return simple_read_from_buffer(buf, count, offset, lbuf, len);
}
static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index fb83d1375638..8f82bb9d11e2 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -273,7 +273,7 @@ static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
dma_addr_t *dma_handle)
{
/* allocate memory */
- void *buffer = kzalloc(size, GFP_KERNEL);
+ void *buffer = kzalloc(size, GFP_ATOMIC);
if (!buffer) {
*dma_handle = 0;
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 0208c4b027c5..a6f41f96f2a1 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2013, Intel Corporation.
+ * Copyright (c) 2003-2018, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -96,8 +96,22 @@ struct mkhi_fwcaps {
u8 data[0];
} __packed;
+struct mkhi_fw_ver_block {
+ u16 minor;
+ u8 major;
+ u8 platform;
+ u16 buildno;
+ u16 hotfix;
+} __packed;
+
+struct mkhi_fw_ver {
+ struct mkhi_fw_ver_block ver[MEI_MAX_FW_VER_BLOCKS];
+} __packed;
+
#define MKHI_FWCAPS_GROUP_ID 0x3
#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6
+#define MKHI_GEN_GROUP_ID 0xFF
+#define MKHI_GEN_GET_FW_VERSION_CMD 0x2
struct mkhi_msg_hdr {
u8 group_id;
u8 command;
@@ -139,21 +153,81 @@ static int mei_osver(struct mei_cl_device *cldev)
return __mei_cl_send(cldev->cl, buf, size, mode);
}
+#define MKHI_FWVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \
+ sizeof(struct mkhi_fw_ver))
+#define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \
+ sizeof(struct mkhi_fw_ver_block) * (__num))
+#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */
+static int mei_fwver(struct mei_cl_device *cldev)
+{
+ char buf[MKHI_FWVER_BUF_LEN];
+ struct mkhi_msg *req;
+ struct mkhi_fw_ver *fwver;
+ int bytes_recv, ret, i;
+
+ memset(buf, 0, sizeof(buf));
+
+ req = (struct mkhi_msg *)buf;
+ req->hdr.group_id = MKHI_GEN_GROUP_ID;
+ req->hdr.command = MKHI_GEN_GET_FW_VERSION_CMD;
+
+ ret = __mei_cl_send(cldev->cl, buf, sizeof(struct mkhi_msg_hdr),
+ MEI_CL_IO_TX_BLOCKING);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n");
+ return ret;
+ }
+
+ ret = 0;
+ bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), 0,
+ MKHI_RCV_TIMEOUT);
+ if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) {
+ /*
+ * Should be at least one version block,
+ * error out if nothing found
+ */
+ dev_err(&cldev->dev, "Could not read FW version\n");
+ return -EIO;
+ }
+
+ fwver = (struct mkhi_fw_ver *)req->data;
+ memset(cldev->bus->fw_ver, 0, sizeof(cldev->bus->fw_ver));
+ for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) {
+ if ((size_t)bytes_recv < MKHI_FWVER_LEN(i + 1))
+ break;
+ dev_dbg(&cldev->dev, "FW version%d %d:%d.%d.%d.%d\n",
+ i, fwver->ver[i].platform,
+ fwver->ver[i].major, fwver->ver[i].minor,
+ fwver->ver[i].hotfix, fwver->ver[i].buildno);
+
+ cldev->bus->fw_ver[i].platform = fwver->ver[i].platform;
+ cldev->bus->fw_ver[i].major = fwver->ver[i].major;
+ cldev->bus->fw_ver[i].minor = fwver->ver[i].minor;
+ cldev->bus->fw_ver[i].hotfix = fwver->ver[i].hotfix;
+ cldev->bus->fw_ver[i].buildno = fwver->ver[i].buildno;
+ }
+
+ return ret;
+}
+
static void mei_mkhi_fix(struct mei_cl_device *cldev)
{
int ret;
- if (!cldev->bus->hbm_f_os_supported)
- return;
-
ret = mei_cldev_enable(cldev);
if (ret)
return;
- ret = mei_osver(cldev);
+ ret = mei_fwver(cldev);
if (ret < 0)
- dev_err(&cldev->dev, "OS version command failed %d\n", ret);
+ dev_err(&cldev->dev, "FW version command failed %d\n", ret);
+ if (cldev->bus->hbm_f_os_supported) {
+ ret = mei_osver(cldev);
+ if (ret < 0)
+ dev_err(&cldev->dev, "OS version command failed %d\n",
+ ret);
+ }
mei_cldev_disable(cldev);
}
@@ -266,8 +340,8 @@ static int mei_nfc_if_version(struct mei_cl *cl,
return -ENOMEM;
ret = 0;
- bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0);
- if (bytes_recv < if_version_length) {
+ bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0, 0);
+ if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) {
dev_err(bus->dev, "Could not read IF version\n");
ret = -EIO;
goto err;
@@ -410,7 +484,7 @@ void mei_cl_bus_dev_fixup(struct mei_cl_device *cldev)
{
struct mei_fixup *f;
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
- int i;
+ size_t i;
for (i = 0; i < ARRAY_SIZE(mei_fixups); i++) {
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index b1133739fb4b..7bba62a72921 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -116,11 +116,12 @@ out:
* @buf: buffer to receive
* @length: buffer length
* @mode: io mode
+ * @timeout: recv timeout, 0 for infinite timeout
*
* Return: read size in bytes of < 0 on error
*/
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
- unsigned int mode)
+ unsigned int mode, unsigned long timeout)
{
struct mei_device *bus;
struct mei_cl_cb *cb;
@@ -158,13 +159,28 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
mutex_unlock(&bus->device_lock);
- if (wait_event_interruptible(cl->rx_wait,
- (!list_empty(&cl->rd_completed)) ||
- (!mei_cl_is_connected(cl)))) {
-
- if (signal_pending(current))
- return -EINTR;
- return -ERESTARTSYS;
+ if (timeout) {
+ rets = wait_event_interruptible_timeout
+ (cl->rx_wait,
+ (!list_empty(&cl->rd_completed)) ||
+ (!mei_cl_is_connected(cl)),
+ msecs_to_jiffies(timeout));
+ if (rets == 0)
+ return -ETIME;
+ if (rets < 0) {
+ if (signal_pending(current))
+ return -EINTR;
+ return -ERESTARTSYS;
+ }
+ } else {
+ if (wait_event_interruptible
+ (cl->rx_wait,
+ (!list_empty(&cl->rd_completed)) ||
+ (!mei_cl_is_connected(cl)))) {
+ if (signal_pending(current))
+ return -EINTR;
+ return -ERESTARTSYS;
+ }
}
mutex_lock(&bus->device_lock);
@@ -231,7 +247,7 @@ ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
{
struct mei_cl *cl = cldev->cl;
- return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK);
+ return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK, 0);
}
EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
@@ -248,7 +264,7 @@ ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
{
struct mei_cl *cl = cldev->cl;
- return __mei_cl_recv(cl, buf, length, 0);
+ return __mei_cl_recv(cl, buf, length, 0, 0);
}
EXPORT_SYMBOL_GPL(mei_cldev_recv);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 8d6197a88b54..4ab6251d418e 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -863,10 +863,12 @@ int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
int slots;
int ret;
- msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
slots = mei_hbuf_empty_slots(dev);
+ if (slots < 0)
+ return -EOVERFLOW;
- if (slots < msg_slots)
+ if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_cl_send_disconnect(cl, cb);
@@ -1053,13 +1055,15 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
int slots;
int rets;
- msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
- slots = mei_hbuf_empty_slots(dev);
-
if (mei_cl_is_other_connecting(cl))
return 0;
- if (slots < msg_slots)
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
+ slots = mei_hbuf_empty_slots(dev);
+ if (slots < 0)
+ return -EOVERFLOW;
+
+ if ((u32)slots < msg_slots)
return -EMSGSIZE;
rets = mei_cl_send_connect(cl, cb);
@@ -1294,10 +1298,12 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
int ret;
bool request;
- msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
slots = mei_hbuf_empty_slots(dev);
+ if (slots < 0)
+ return -EOVERFLOW;
- if (slots < msg_slots)
+ if ((u32)slots < msg_slots)
return -EMSGSIZE;
request = mei_cl_notify_fop2req(cb->fop_type);
@@ -1533,6 +1539,23 @@ nortpm:
}
/**
+ * mei_msg_hdr_init - initialize mei message header
+ *
+ * @mei_hdr: mei message header
+ * @cb: message callback structure
+ */
+static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb)
+{
+ mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
+ mei_hdr->me_addr = mei_cl_me_id(cb->cl);
+ mei_hdr->length = 0;
+ mei_hdr->reserved = 0;
+ mei_hdr->msg_complete = 0;
+ mei_hdr->dma_ring = 0;
+ mei_hdr->internal = cb->internal;
+}
+
+/**
* mei_cl_irq_write - write a message to device
* from the interrupt thread context
*
@@ -1548,9 +1571,10 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr mei_hdr;
+ size_t hdr_len = sizeof(mei_hdr);
size_t len;
- u32 msg_slots;
- int slots;
+ size_t hbuf_len;
+ int hbuf_slots;
int rets;
bool first_chunk;
@@ -1572,40 +1596,41 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
return 0;
}
- slots = mei_hbuf_empty_slots(dev);
len = buf->size - cb->buf_idx;
- msg_slots = mei_data2slots(len);
+ hbuf_slots = mei_hbuf_empty_slots(dev);
+ if (hbuf_slots < 0) {
+ rets = -EOVERFLOW;
+ goto err;
+ }
- mei_hdr.host_addr = mei_cl_host_addr(cl);
- mei_hdr.me_addr = mei_cl_me_id(cl);
- mei_hdr.reserved = 0;
- mei_hdr.internal = cb->internal;
+ hbuf_len = mei_slots2data(hbuf_slots);
- if (slots >= msg_slots) {
+ mei_msg_hdr_init(&mei_hdr, cb);
+
+ /**
+ * Split the message only if we can write the whole host buffer
+ * otherwise wait for next time the host buffer is empty.
+ */
+ if (len + hdr_len <= hbuf_len) {
mei_hdr.length = len;
mei_hdr.msg_complete = 1;
- /* Split the message only if we can write the whole host buffer */
- } else if (slots == dev->hbuf_depth) {
- msg_slots = slots;
- len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
- mei_hdr.length = len;
- mei_hdr.msg_complete = 0;
+ } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
+ mei_hdr.length = hbuf_len - hdr_len;
} else {
- /* wait for next time the host buffer is empty */
return 0;
}
cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n",
cb->buf.size, cb->buf_idx);
- rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
+ rets = mei_write_message(dev, &mei_hdr, hdr_len,
+ buf->data + cb->buf_idx, mei_hdr.length);
if (rets)
goto err;
cl->status = 0;
cl->writing_state = MEI_WRITING;
cb->buf_idx += mei_hdr.length;
- cb->completed = mei_hdr.msg_complete == 1;
if (first_chunk) {
if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
@@ -1634,13 +1659,16 @@ err:
*
* Return: number of bytes sent on success, <0 on failure.
*/
-int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
+ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
{
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr mei_hdr;
- int size;
- int rets;
+ size_t hdr_len = sizeof(mei_hdr);
+ size_t len;
+ size_t hbuf_len;
+ int hbuf_slots;
+ ssize_t rets;
bool blocking;
if (WARN_ON(!cl || !cl->dev))
@@ -1652,52 +1680,57 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
dev = cl->dev;
buf = &cb->buf;
- size = buf->size;
+ len = buf->size;
blocking = cb->blocking;
- cl_dbg(dev, cl, "size=%d\n", size);
+ cl_dbg(dev, cl, "len=%zd\n", len);
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
- cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ cl_err(dev, cl, "rpm: get failed %zd\n", rets);
goto free;
}
cb->buf_idx = 0;
cl->writing_state = MEI_IDLE;
- mei_hdr.host_addr = mei_cl_host_addr(cl);
- mei_hdr.me_addr = mei_cl_me_id(cl);
- mei_hdr.reserved = 0;
- mei_hdr.msg_complete = 0;
- mei_hdr.internal = cb->internal;
rets = mei_cl_tx_flow_ctrl_creds(cl);
if (rets < 0)
goto err;
+ mei_msg_hdr_init(&mei_hdr, cb);
+
if (rets == 0) {
cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
- rets = size;
+ rets = len;
goto out;
}
+
if (!mei_hbuf_acquire(dev)) {
cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
- rets = size;
+ rets = len;
goto out;
}
- /* Check for a maximum length */
- if (size > mei_hbuf_max_len(dev)) {
- mei_hdr.length = mei_hbuf_max_len(dev);
- mei_hdr.msg_complete = 0;
- } else {
- mei_hdr.length = size;
+ hbuf_slots = mei_hbuf_empty_slots(dev);
+ if (hbuf_slots < 0) {
+ rets = -EOVERFLOW;
+ goto out;
+ }
+
+ hbuf_len = mei_slots2data(hbuf_slots);
+
+ if (len + hdr_len <= hbuf_len) {
+ mei_hdr.length = len;
mei_hdr.msg_complete = 1;
+ } else {
+ mei_hdr.length = hbuf_len - hdr_len;
}
- rets = mei_write_message(dev, &mei_hdr, buf->data);
+ rets = mei_write_message(dev, &mei_hdr, hdr_len,
+ buf->data, mei_hdr.length);
if (rets)
goto err;
@@ -1707,7 +1740,6 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
cl->writing_state = MEI_WRITING;
cb->buf_idx = mei_hdr.length;
- cb->completed = mei_hdr.msg_complete == 1;
out:
if (mei_hdr.msg_complete)
@@ -1735,7 +1767,7 @@ out:
}
}
- rets = size;
+ rets = buf->size;
err:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 5371df4d8af3..64e318f589b4 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -202,7 +202,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list);
int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
-int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
+ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index c815da91089c..7b5df8fd6c5a 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -183,6 +183,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
dev->hbm_f_fa_supported);
pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n",
dev->hbm_f_os_supported);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tDR: %01d\n",
+ dev->hbm_f_dr_supported);
}
pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index fe6595fe94f1..09e233d4c0de 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -96,6 +96,20 @@ static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status)
}
/**
+ * mei_hbm_write_message - wrapper for sending hbm messages.
+ *
+ * @dev: mei device
+ * @hdr: mei header
+ * @data: payload
+ */
+static inline int mei_hbm_write_message(struct mei_device *dev,
+ struct mei_msg_hdr *hdr,
+ const void *data)
+{
+ return mei_write_message(dev, hdr, sizeof(*hdr), data, hdr->length);
+}
+
+/**
* mei_hbm_idle - set hbm to idle state
*
* @dev: the device structure
@@ -131,6 +145,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
hdr->me_addr = 0;
hdr->length = length;
hdr->msg_complete = 1;
+ hdr->dma_ring = 0;
hdr->reserved = 0;
hdr->internal = 0;
}
@@ -174,7 +189,7 @@ static inline int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl,
mei_hbm_hdr(&mei_hdr, len);
mei_hbm_cl_hdr(cl, hbm_cmd, buf, len);
- return mei_write_message(dev, &mei_hdr, buf);
+ return mei_hbm_write_message(dev, &mei_hdr, buf);
}
/**
@@ -267,7 +282,7 @@ int mei_hbm_start_req(struct mei_device *dev)
start_req.host_version.minor_version = HBM_MINOR_VERSION;
dev->hbm_state = MEI_HBM_IDLE;
- ret = mei_write_message(dev, &mei_hdr, &start_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &start_req);
if (ret) {
dev_err(dev->dev, "version message write failed: ret = %d\n",
ret);
@@ -304,7 +319,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
enum_req.flags |= dev->hbm_f_ie_supported ?
MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
- ret = mei_write_message(dev, &mei_hdr, &enum_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &enum_req);
if (ret) {
dev_err(dev->dev, "enumeration request write failed: ret = %d.\n",
ret);
@@ -373,7 +388,7 @@ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
resp.me_addr = addr;
resp.status = status;
- ret = mei_write_message(dev, &mei_hdr, &resp);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &resp);
if (ret)
dev_err(dev->dev, "add client response write failed: ret = %d\n",
ret);
@@ -430,7 +445,7 @@ int mei_hbm_cl_notify_req(struct mei_device *dev,
req.start = start;
- ret = mei_write_message(dev, &mei_hdr, &req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "notify request failed: ret = %d\n", ret);
@@ -555,7 +570,7 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
prop_req.me_addr = addr;
- ret = mei_write_message(dev, &mei_hdr, &prop_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &prop_req);
if (ret) {
dev_err(dev->dev, "properties request write failed: ret = %d\n",
ret);
@@ -592,7 +607,7 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
memset(&req, 0, len);
req.hbm_cmd = pg_cmd;
- ret = mei_write_message(dev, &mei_hdr, &req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "power gate command write failed.\n");
return ret;
@@ -618,7 +633,7 @@ static int mei_hbm_stop_req(struct mei_device *dev)
req.hbm_cmd = HOST_STOP_REQ_CMD;
req.reason = DRIVER_STOP_REQUEST;
- return mei_write_message(dev, &mei_hdr, &req);
+ return mei_hbm_write_message(dev, &mei_hdr, &req);
}
/**
@@ -992,6 +1007,12 @@ static void mei_hbm_config_features(struct mei_device *dev)
/* OS ver message Support */
if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
dev->hbm_f_os_supported = 1;
+
+ /* DMA Ring Support */
+ if (dev->version.major_version > HBM_MAJOR_VERSION_DR ||
+ (dev->version.major_version == HBM_MAJOR_VERSION_DR &&
+ dev->version.minor_version >= HBM_MINOR_VERSION_DR))
+ dev->hbm_f_dr_supported = 1;
}
/**
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 334ab02e1de2..0759c3a668de 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -19,6 +19,7 @@
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
#include "mei_dev.h"
#include "hbm.h"
@@ -228,7 +229,7 @@ static void mei_me_hw_config(struct mei_device *dev)
/* Doesn't change in runtime */
hcsr = mei_hcsr_read(dev);
- dev->hbuf_depth = (hcsr & H_CBD) >> 24;
+ hw->hbuf_depth = (hcsr & H_CBD) >> 24;
reg = 0;
pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
@@ -490,70 +491,82 @@ static bool mei_me_hbuf_is_empty(struct mei_device *dev)
*/
static int mei_me_hbuf_empty_slots(struct mei_device *dev)
{
+ struct mei_me_hw *hw = to_me_hw(dev);
unsigned char filled_slots, empty_slots;
filled_slots = mei_hbuf_filled_slots(dev);
- empty_slots = dev->hbuf_depth - filled_slots;
+ empty_slots = hw->hbuf_depth - filled_slots;
/* check for overflow */
- if (filled_slots > dev->hbuf_depth)
+ if (filled_slots > hw->hbuf_depth)
return -EOVERFLOW;
return empty_slots;
}
/**
- * mei_me_hbuf_max_len - returns size of hw buffer.
+ * mei_me_hbuf_depth - returns depth of the hw buffer.
*
* @dev: the device structure
*
- * Return: size of hw buffer in bytes
+ * Return: size of hw buffer in slots
*/
-static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
+static u32 mei_me_hbuf_depth(const struct mei_device *dev)
{
- return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
-}
+ struct mei_me_hw *hw = to_me_hw(dev);
+ return hw->hbuf_depth;
+}
/**
* mei_me_hbuf_write - writes a message to host hw buffer.
*
* @dev: the device structure
- * @header: mei HECI header of message
- * @buf: message payload will be written
+ * @hdr: header of message
+ * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes)
+ * @data: payload
+ * @data_len: payload length in bytes
*
- * Return: -EIO if write has failed
+ * Return: 0 if success, < 0 - otherwise.
*/
static int mei_me_hbuf_write(struct mei_device *dev,
- struct mei_msg_hdr *header,
- const unsigned char *buf)
+ const void *hdr, size_t hdr_len,
+ const void *data, size_t data_len)
{
unsigned long rem;
- unsigned long length = header->length;
- u32 *reg_buf = (u32 *)buf;
+ unsigned long i;
+ const u32 *reg_buf;
u32 dw_cnt;
- int i;
int empty_slots;
- dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
+ if (WARN_ON(!hdr || !data || hdr_len & 0x3))
+ return -EINVAL;
+
+ dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
empty_slots = mei_hbuf_empty_slots(dev);
dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
- dw_cnt = mei_data2slots(length);
- if (empty_slots < 0 || dw_cnt > empty_slots)
+ if (empty_slots < 0)
+ return -EOVERFLOW;
+
+ dw_cnt = mei_data2slots(hdr_len + data_len);
+ if (dw_cnt > (u32)empty_slots)
return -EMSGSIZE;
- mei_me_hcbww_write(dev, *((u32 *) header));
+ reg_buf = hdr;
+ for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
+ mei_me_hcbww_write(dev, reg_buf[i]);
- for (i = 0; i < length / 4; i++)
+ reg_buf = data;
+ for (i = 0; i < data_len / MEI_SLOT_SIZE; i++)
mei_me_hcbww_write(dev, reg_buf[i]);
- rem = length & 0x3;
+ rem = data_len & 0x3;
if (rem > 0) {
u32 reg = 0;
- memcpy(&reg, &buf[length - rem], rem);
+ memcpy(&reg, (const u8 *)data + data_len - rem, rem);
mei_me_hcbww_write(dev, reg);
}
@@ -601,11 +614,11 @@ static int mei_me_count_full_read_slots(struct mei_device *dev)
* Return: always 0
*/
static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
- unsigned long buffer_length)
+ unsigned long buffer_length)
{
u32 *reg_buf = (u32 *)buffer;
- for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
+ for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE)
*reg_buf++ = mei_me_mecbrw_read(dev);
if (buffer_length > 0) {
@@ -1314,7 +1327,7 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.hbuf_free_slots = mei_me_hbuf_empty_slots,
.hbuf_is_ready = mei_me_hbuf_is_empty,
- .hbuf_max_len = mei_me_hbuf_max_len,
+ .hbuf_depth = mei_me_hbuf_depth,
.write = mei_me_hbuf_write,
@@ -1377,6 +1390,11 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
.fw_status.status[4] = PCI_CFG_HFS_5, \
.fw_status.status[5] = PCI_CFG_HFS_6
+#define MEI_CFG_DMA_128 \
+ .dma_size[DMA_DSCR_HOST] = SZ_128K, \
+ .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
+ .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
+
/* ICH Legacy devices */
static const struct mei_cfg mei_me_ich_cfg = {
MEI_CFG_ICH_HFS,
@@ -1409,6 +1427,12 @@ static const struct mei_cfg mei_me_pch8_sps_cfg = {
MEI_CFG_FW_SPS,
};
+/* Cannon Lake and newer devices */
+static const struct mei_cfg mei_me_pch12_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_DMA_128,
+};
+
/*
* mei_cfg_list - A list of platform platform specific configurations.
* Note: has to be synchronized with enum mei_cfg_idx.
@@ -1421,6 +1445,7 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
[MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
+ [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 67892533576e..bbcc5fc106cd 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -31,10 +31,12 @@
*
* @fw_status: FW status
* @quirk_probe: device exclusion quirk
+ * @dma_size: device DMA buffers size
*/
struct mei_cfg {
const struct mei_fw_status fw_status;
bool (*quirk_probe)(struct pci_dev *pdev);
+ size_t dma_size[DMA_DSCR_NUM];
};
@@ -52,12 +54,14 @@ struct mei_cfg {
* @mem_addr: io memory address
* @pg_state: power gating state
* @d0i3_supported: di03 support
+ * @hbuf_depth: depth of hardware host/write buffer in slots
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
void __iomem *mem_addr;
enum mei_pg_state pg_state;
bool d0i3_supported;
+ u8 hbuf_depth;
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
@@ -78,6 +82,7 @@ struct mei_me_hw {
* @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
+ * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
* @MEI_ME_NUM_CFG: Upper Sentinel.
*/
enum mei_cfg_idx {
@@ -88,6 +93,7 @@ enum mei_cfg_idx {
MEI_ME_PCH_CPT_PBG_CFG,
MEI_ME_PCH8_CFG,
MEI_ME_PCH8_SPS_CFG,
+ MEI_ME_PCH12_CFG,
MEI_ME_NUM_CFG,
};
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index c2c8993e2a51..8449fe0367ff 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -31,6 +31,7 @@
#include "mei-trace.h"
+#define TXE_HBUF_DEPTH (PAYLOAD_SIZE / MEI_SLOT_SIZE)
/**
* mei_txe_reg_read - Reads 32bit data from the txe device
@@ -681,9 +682,6 @@ static void mei_txe_hw_config(struct mei_device *dev)
struct mei_txe_hw *hw = to_txe_hw(dev);
- /* Doesn't change in runtime */
- dev->hbuf_depth = PAYLOAD_SIZE / 4;
-
hw->aliveness = mei_txe_aliveness_get(dev);
hw->readiness = mei_txe_readiness_get(dev);
@@ -691,37 +689,34 @@ static void mei_txe_hw_config(struct mei_device *dev)
hw->aliveness, hw->readiness);
}
-
/**
* mei_txe_write - writes a message to device.
*
* @dev: the device structure
- * @header: header of message
- * @buf: message buffer will be written
+ * @hdr: header of message
+ * @hdr_len: header length in bytes - must multiplication of a slot (4bytes)
+ * @data: payload
+ * @data_len: paylead length in bytes
*
- * Return: 0 if success, <0 - otherwise.
+ * Return: 0 if success, < 0 - otherwise.
*/
-
static int mei_txe_write(struct mei_device *dev,
- struct mei_msg_hdr *header,
- const unsigned char *buf)
+ const void *hdr, size_t hdr_len,
+ const void *data, size_t data_len)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
unsigned long rem;
- unsigned long length;
- int slots = dev->hbuf_depth;
- u32 *reg_buf = (u32 *)buf;
+ const u32 *reg_buf;
+ u32 slots = TXE_HBUF_DEPTH;
u32 dw_cnt;
- int i;
+ unsigned long i, j;
- if (WARN_ON(!header || !buf))
+ if (WARN_ON(!hdr || !data || hdr_len & 0x3))
return -EINVAL;
- length = header->length;
-
- dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
+ dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
- dw_cnt = mei_data2slots(length);
+ dw_cnt = mei_data2slots(hdr_len + data_len);
if (dw_cnt > slots)
return -EMSGSIZE;
@@ -739,17 +734,20 @@ static int mei_txe_write(struct mei_device *dev,
return -EAGAIN;
}
- mei_txe_input_payload_write(dev, 0, *((u32 *)header));
+ reg_buf = hdr;
+ for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
+ mei_txe_input_payload_write(dev, i, reg_buf[i]);
- for (i = 0; i < length / 4; i++)
- mei_txe_input_payload_write(dev, i + 1, reg_buf[i]);
+ reg_buf = data;
+ for (j = 0; j < data_len / MEI_SLOT_SIZE; j++)
+ mei_txe_input_payload_write(dev, i + j, reg_buf[j]);
- rem = length & 0x3;
+ rem = data_len & 0x3;
if (rem > 0) {
u32 reg = 0;
- memcpy(&reg, &buf[length - rem], rem);
- mei_txe_input_payload_write(dev, i + 1, reg);
+ memcpy(&reg, (const u8 *)data + data_len - rem, rem);
+ mei_txe_input_payload_write(dev, i + j, reg);
}
/* after each write the whole buffer is consumed */
@@ -762,15 +760,15 @@ static int mei_txe_write(struct mei_device *dev,
}
/**
- * mei_txe_hbuf_max_len - mimics the me hbuf circular buffer
+ * mei_txe_hbuf_depth - mimics the me hbuf circular buffer
*
* @dev: the device structure
*
- * Return: the PAYLOAD_SIZE - 4
+ * Return: the TXE_HBUF_DEPTH
*/
-static size_t mei_txe_hbuf_max_len(const struct mei_device *dev)
+static u32 mei_txe_hbuf_depth(const struct mei_device *dev)
{
- return PAYLOAD_SIZE - sizeof(struct mei_msg_hdr);
+ return TXE_HBUF_DEPTH;
}
/**
@@ -778,7 +776,7 @@ static size_t mei_txe_hbuf_max_len(const struct mei_device *dev)
*
* @dev: the device structure
*
- * Return: always hbuf_depth
+ * Return: always TXE_HBUF_DEPTH
*/
static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
{
@@ -797,7 +795,7 @@ static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
static int mei_txe_count_full_read_slots(struct mei_device *dev)
{
/* read buffers has static size */
- return PAYLOAD_SIZE / 4;
+ return TXE_HBUF_DEPTH;
}
/**
@@ -839,7 +837,7 @@ static int mei_txe_read(struct mei_device *dev,
dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n",
len, mei_txe_out_data_read(dev, 0));
- for (i = 0; i < len / 4; i++) {
+ for (i = 0; i < len / MEI_SLOT_SIZE; i++) {
/* skip header: index starts from 1 */
reg = mei_txe_out_data_read(dev, i + 1);
dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg);
@@ -1140,7 +1138,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
/* Input Ready: Detection if host can write to SeC */
if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) {
dev->hbuf_is_ready = true;
- hw->slots = dev->hbuf_depth;
+ hw->slots = TXE_HBUF_DEPTH;
}
if (hw->aliveness && dev->hbuf_is_ready) {
@@ -1186,7 +1184,7 @@ static const struct mei_hw_ops mei_txe_hw_ops = {
.hbuf_free_slots = mei_txe_hbuf_empty_slots,
.hbuf_is_ready = mei_txe_is_input_ready,
- .hbuf_max_len = mei_txe_hbuf_max_len,
+ .hbuf_depth = mei_txe_hbuf_depth,
.write = mei_txe_write,
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 5c8286b40b62..65655925791a 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -28,8 +28,6 @@
#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
#define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */
-#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
-
#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */
#define MEI_HBM_TIMEOUT 1 /* 1 second */
@@ -82,6 +80,12 @@
#define HBM_MINOR_VERSION_OS 0
#define HBM_MAJOR_VERSION_OS 2
+/*
+ * MEI version with dma ring support
+ */
+#define HBM_MINOR_VERSION_DR 1
+#define HBM_MAJOR_VERSION_DR 2
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
@@ -124,6 +128,9 @@
#define MEI_HBM_NOTIFY_RES_CMD 0x90
#define MEI_HBM_NOTIFICATION_CMD 0x11
+#define MEI_HBM_DMA_SETUP_REQ_CMD 0x12
+#define MEI_HBM_DMA_SETUP_RES_CMD 0x92
+
/*
* MEI Stop Reason
* used by hbm_host_stop_request.reason
@@ -189,19 +196,27 @@ enum mei_cl_disconnect_status {
MEI_CL_DISCONN_SUCCESS = MEI_HBMS_SUCCESS
};
-/*
- * MEI BUS Interface Section
+/**
+ * struct mei_msg_hdr - MEI BUS Interface Section
+ *
+ * @me_addr: device address
+ * @host_addr: host address
+ * @length: message length
+ * @reserved: reserved
+ * @dma_ring: message is on dma ring
+ * @internal: message is internal
+ * @msg_complete: last packet of the message
*/
struct mei_msg_hdr {
u32 me_addr:8;
u32 host_addr:8;
u32 length:9;
- u32 reserved:5;
+ u32 reserved:4;
+ u32 dma_ring:1;
u32 internal:1;
u32 msg_complete:1;
} __packed;
-
struct mei_bus_message {
u8 hbm_cmd;
u8 data[0];
@@ -451,4 +466,50 @@ struct hbm_notification {
u8 reserved[1];
} __packed;
+/**
+ * struct hbm_dma_mem_dscr - dma ring
+ *
+ * @addr_hi: the high 32bits of 64 bit address
+ * @addr_lo: the low 32bits of 64 bit address
+ * @size : size in bytes (must be power of 2)
+ */
+struct hbm_dma_mem_dscr {
+ u32 addr_hi;
+ u32 addr_lo;
+ u32 size;
+} __packed;
+
+enum {
+ DMA_DSCR_HOST = 0,
+ DMA_DSCR_DEVICE = 1,
+ DMA_DSCR_CTRL = 2,
+ DMA_DSCR_NUM,
+};
+
+/**
+ * struct hbm_dma_setup_request - dma setup request
+ *
+ * @hbm_cmd: bus message command header
+ * @reserved: reserved for alignment
+ * @dma_dscr: dma descriptor for HOST, DEVICE, and CTRL
+ */
+struct hbm_dma_setup_request {
+ u8 hbm_cmd;
+ u8 reserved[3];
+ struct hbm_dma_mem_dscr dma_dscr[DMA_DSCR_NUM];
+} __packed;
+
+/**
+ * struct hbm_dma_setup_response - dma setup response
+ *
+ * @hbm_cmd: bus message command header
+ * @status: 0 on success; otherwise DMA setup failed.
+ * @reserved: reserved for alignment
+ */
+struct hbm_dma_setup_response {
+ u8 hbm_cmd;
+ u8 status;
+ u8 reserved[2];
+} __packed;
+
#endif
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index b0b8f18a85e3..5a661cbdf2ae 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -173,10 +173,12 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
int slots;
int ret;
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_response));
slots = mei_hbuf_empty_slots(dev);
- msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response));
+ if (slots < 0)
+ return -EOVERFLOW;
- if (slots < msg_slots)
+ if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_hbm_cl_disconnect_rsp(dev, cl);
@@ -206,10 +208,12 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
if (!list_empty(&cl->rd_pending))
return 0;
- msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_flow_control));
slots = mei_hbuf_empty_slots(dev);
+ if (slots < 0)
+ return -EOVERFLOW;
- if (slots < msg_slots)
+ if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_hbm_cl_flow_control_req(dev, cl);
@@ -310,8 +314,11 @@ int mei_irq_read_handler(struct mei_device *dev,
if (&cl->link == &dev->file_list) {
/* A message for not connected fixed address clients
* should be silently discarded
+ * On power down client may be force cleaned,
+ * silently discard such messages
*/
- if (hdr_is_fixed(mei_hdr)) {
+ if (hdr_is_fixed(mei_hdr) ||
+ dev->dev_state == MEI_DEV_POWER_DOWN) {
mei_irq_discard_msg(dev, mei_hdr);
ret = 0;
goto reset_slots;
@@ -365,7 +372,10 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
return 0;
slots = mei_hbuf_empty_slots(dev);
- if (slots <= 0)
+ if (slots < 0)
+ return -EOVERFLOW;
+
+ if (slots == 0)
return -EMSGSIZE;
/* complete all waiting for write CB */
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 7465f17e1559..4d77a6ae183a 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
+ * Copyright (c) 2003-2018, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -137,7 +137,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
struct mei_device *dev;
struct mei_cl_cb *cb = NULL;
bool nonblock = !!(file->f_flags & O_NONBLOCK);
- int rets;
+ ssize_t rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -170,7 +170,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
rets = mei_cl_read_start(cl, length, file);
if (rets && rets != -EBUSY) {
- cl_dbg(dev, cl, "mei start read failure status = %d\n", rets);
+ cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets);
goto out;
}
@@ -204,7 +204,7 @@ copy_buffer:
/* now copy the data to user space */
if (cb->status) {
rets = cb->status;
- cl_dbg(dev, cl, "read operation failed %d\n", rets);
+ cl_dbg(dev, cl, "read operation failed %zd\n", rets);
goto free;
}
@@ -236,7 +236,7 @@ free:
*offset = 0;
out:
- cl_dbg(dev, cl, "end mei read rets = %d\n", rets);
+ cl_dbg(dev, cl, "end mei read rets = %zd\n", rets);
mutex_unlock(&dev->device_lock);
return rets;
}
@@ -256,7 +256,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
struct mei_cl *cl = file->private_data;
struct mei_cl_cb *cb;
struct mei_device *dev;
- int rets;
+ ssize_t rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -312,7 +312,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
}
}
- *offset = 0;
cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
if (!cb) {
rets = -ENOMEM;
@@ -812,11 +811,39 @@ static ssize_t tx_queue_limit_store(struct device *device,
}
static DEVICE_ATTR_RW(tx_queue_limit);
+/**
+ * fw_ver_show - display ME FW version
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t fw_ver_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ struct mei_fw_version *ver;
+ ssize_t cnt = 0;
+ int i;
+
+ ver = dev->fw_ver;
+
+ for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++)
+ cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n",
+ ver[i].platform, ver[i].major, ver[i].minor,
+ ver[i].hotfix, ver[i].buildno);
+ return cnt;
+}
+static DEVICE_ATTR_RO(fw_ver);
+
static struct attribute *mei_attrs[] = {
&dev_attr_fw_status.attr,
&dev_attr_hbm_ver.attr,
&dev_attr_hbm_ver_drv.attr,
&dev_attr_tx_queue_limit.attr,
+ &dev_attr_fw_ver.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index be9c48415da9..377397e1b5a5 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
+ * Copyright (c) 2003-2018, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -26,7 +26,8 @@
#include "hw.h"
#include "hbm.h"
-#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32))
+#define MEI_SLOT_SIZE sizeof(u32)
+#define MEI_RD_MSG_BUF_SIZE (128 * MEI_SLOT_SIZE)
/*
* Number of Maximum MEI Clients
@@ -174,7 +175,6 @@ struct mei_cl;
* @status: io status of the cb
* @internal: communication between driver and FW flag
* @blocking: transmission blocking mode
- * @completed: the transfer or reception has completed
*/
struct mei_cl_cb {
struct list_head list;
@@ -186,7 +186,6 @@ struct mei_cl_cb {
int status;
u32 internal:1;
u32 blocking:1;
- u32 completed:1;
};
/**
@@ -269,7 +268,7 @@ struct mei_cl {
*
* @hbuf_free_slots : query for write buffer empty slots
* @hbuf_is_ready : query if write buffer is empty
- * @hbuf_max_len : query for write buffer max len
+ * @hbuf_depth : query for write buffer depth
*
* @write : write a message to FW
*
@@ -299,10 +298,10 @@ struct mei_hw_ops {
int (*hbuf_free_slots)(struct mei_device *dev);
bool (*hbuf_is_ready)(struct mei_device *dev);
- size_t (*hbuf_max_len)(const struct mei_device *dev);
+ u32 (*hbuf_depth)(const struct mei_device *dev);
int (*write)(struct mei_device *dev,
- struct mei_msg_hdr *hdr,
- const unsigned char *buf);
+ const void *hdr, size_t hdr_len,
+ const void *data, size_t data_len);
int (*rdbuf_full_slots)(struct mei_device *dev);
@@ -317,7 +316,7 @@ void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
unsigned int mode);
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
- unsigned int mode);
+ unsigned int mode, unsigned long timeout);
bool mei_cl_bus_rx_event(struct mei_cl *cl);
bool mei_cl_bus_notify_event(struct mei_cl *cl);
void mei_cl_bus_remove_devices(struct mei_device *bus);
@@ -355,6 +354,25 @@ enum mei_pg_state {
const char *mei_pg_state_str(enum mei_pg_state state);
/**
+ * struct mei_fw_version - MEI FW version struct
+ *
+ * @platform: platform identifier
+ * @major: major version field
+ * @minor: minor version field
+ * @buildno: build number version field
+ * @hotfix: hotfix number version field
+ */
+struct mei_fw_version {
+ u8 platform;
+ u8 major;
+ u16 minor;
+ u16 buildno;
+ u16 hotfix;
+};
+
+#define MEI_MAX_FW_VER_BLOCKS 3
+
+/**
* struct mei_device - MEI private device struct
*
* @dev : device on a bus
@@ -390,7 +408,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @rd_msg_buf : control messages buffer
* @rd_msg_hdr : read message header storage
*
- * @hbuf_depth : depth of hardware host/write buffer is slots
* @hbuf_is_ready : query if the host host/write buffer is ready
*
* @version : HBM protocol version in use
@@ -401,6 +418,9 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @hbm_f_fa_supported : hbm feature fixed address client
* @hbm_f_ie_supported : hbm feature immediate reply to enum request
* @hbm_f_os_supported : hbm feature support OS ver message
+ * @hbm_f_dr_supported : hbm feature dma ring supported
+ *
+ * @fw_ver : FW versions
*
* @me_clients_rwsem: rw lock over me_clients list
* @me_clients : list of FW clients
@@ -466,7 +486,6 @@ struct mei_device {
u32 rd_msg_hdr;
/* write buffer */
- u8 hbuf_depth;
bool hbuf_is_ready;
struct hbm_version version;
@@ -477,6 +496,9 @@ struct mei_device {
unsigned int hbm_f_fa_supported:1;
unsigned int hbm_f_ie_supported:1;
unsigned int hbm_f_os_supported:1;
+ unsigned int hbm_f_dr_supported:1;
+
+ struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
struct rw_semaphore me_clients_rwsem;
struct list_head me_clients;
@@ -508,8 +530,7 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
}
/**
- * mei_data2slots - get slots - number of (dwords) from a message length
- * + size of the mei header
+ * mei_data2slots - get slots number from a message length
*
* @length: size of the messages in bytes
*
@@ -517,7 +538,20 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
*/
static inline u32 mei_data2slots(size_t length)
{
- return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
+ return DIV_ROUND_UP(length, MEI_SLOT_SIZE);
+}
+
+/**
+ * mei_hbm2slots - get slots number from a hbm message length
+ * length + size of the mei message header
+ *
+ * @length: size of the messages in bytes
+ *
+ * Return: number of slots
+ */
+static inline u32 mei_hbm2slots(size_t length)
+{
+ return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, MEI_SLOT_SIZE);
}
/**
@@ -529,7 +563,7 @@ static inline u32 mei_data2slots(size_t length)
*/
static inline u32 mei_slots2data(int slots)
{
- return slots * 4;
+ return slots * MEI_SLOT_SIZE;
}
/*
@@ -630,15 +664,16 @@ static inline int mei_hbuf_empty_slots(struct mei_device *dev)
return dev->ops->hbuf_free_slots(dev);
}
-static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
+static inline u32 mei_hbuf_depth(const struct mei_device *dev)
{
- return dev->ops->hbuf_max_len(dev);
+ return dev->ops->hbuf_depth(dev);
}
static inline int mei_write_message(struct mei_device *dev,
- struct mei_msg_hdr *hdr, const void *buf)
+ const void *hdr, size_t hdr_len,
+ const void *data, size_t data_len)
{
- return dev->ops->write(dev, hdr, buf);
+ return dev->ops->write(dev, hdr, hdr_len, data, data_len);
}
static inline u32 mei_read_hdr(const struct mei_device *dev)
@@ -681,10 +716,10 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {}
int mei_register(struct mei_device *dev, struct device *parent);
void mei_deregister(struct mei_device *dev);
-#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d"
+#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d internal=%1d comp=%1d"
#define MEI_HDR_PRM(hdr) \
(hdr)->host_addr, (hdr)->me_addr, \
- (hdr)->length, (hdr)->internal, (hdr)->msg_complete
+ (hdr)->length, (hdr)->dma_ring, (hdr)->internal, (hdr)->msg_complete
ssize_t mei_fw_status2str(struct mei_fw_status *fw_sts, char *buf, size_t len);
/**
diff --git a/drivers/misc/mic/cosm/cosm_main.h b/drivers/misc/mic/cosm/cosm_main.h
index f01156fca881..aa78cdf25e40 100644
--- a/drivers/misc/mic/cosm/cosm_main.h
+++ b/drivers/misc/mic/cosm/cosm_main.h
@@ -45,7 +45,10 @@ struct cosm_msg {
u64 id;
union {
u64 shutdown_status;
- struct timespec64 timespec;
+ struct {
+ u64 tv_sec;
+ u64 tv_nsec;
+ } timespec;
};
};
diff --git a/drivers/misc/mic/cosm/cosm_scif_server.c b/drivers/misc/mic/cosm/cosm_scif_server.c
index 05a63286741c..e94b7eac4a06 100644
--- a/drivers/misc/mic/cosm/cosm_scif_server.c
+++ b/drivers/misc/mic/cosm/cosm_scif_server.c
@@ -179,9 +179,13 @@ static void cosm_set_crashed(struct cosm_device *cdev)
static void cosm_send_time(struct cosm_device *cdev)
{
struct cosm_msg msg = { .id = COSM_MSG_SYNC_TIME };
+ struct timespec64 ts;
int rc;
- getnstimeofday64(&msg.timespec);
+ ktime_get_real_ts64(&ts);
+ msg.timespec.tv_sec = ts.tv_sec;
+ msg.timespec.tv_nsec = ts.tv_nsec;
+
rc = scif_send(cdev->epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
if (rc < 0)
dev_err(&cdev->dev, "%s %d scif_send failed rc %d\n",
diff --git a/drivers/misc/mic/cosm_client/cosm_scif_client.c b/drivers/misc/mic/cosm_client/cosm_scif_client.c
index beafc0da4027..225078cb51fd 100644
--- a/drivers/misc/mic/cosm_client/cosm_scif_client.c
+++ b/drivers/misc/mic/cosm_client/cosm_scif_client.c
@@ -63,7 +63,11 @@ static struct notifier_block cosm_reboot = {
/* Set system time from timespec value received from the host */
static void cosm_set_time(struct cosm_msg *msg)
{
- int rc = do_settimeofday64(&msg->timespec);
+ struct timespec64 ts = {
+ .tv_sec = msg->timespec.tv_sec,
+ .tv_nsec = msg->timespec.tv_nsec,
+ };
+ int rc = do_settimeofday64(&ts);
if (rc)
dev_err(&client_spdev->dev, "%s: %d settimeofday rc %d\n",
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index 7b2dddcdd46d..8dd0ccedeb94 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -187,6 +187,7 @@ int scif_close(scif_epd_t epd)
case SCIFEP_ZOMBIE:
dev_err(scif_info.mdev.this_device,
"SCIFAPI close: zombie state unexpected\n");
+ /* fall through */
case SCIFEP_DISCONNECTED:
spin_unlock(&ep->lock);
scif_unregister_all_windows(epd);
@@ -370,11 +371,10 @@ int scif_bind(scif_epd_t epd, u16 pn)
goto scif_bind_exit;
}
} else {
- pn = scif_get_new_port();
- if (!pn) {
- ret = -ENOSPC;
+ ret = scif_get_new_port();
+ if (ret < 0)
goto scif_bind_exit;
- }
+ pn = ret;
}
ep->state = SCIFEP_BOUND;
@@ -648,13 +648,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
err = -EISCONN;
break;
case SCIFEP_UNBOUND:
- ep->port.port = scif_get_new_port();
- if (!ep->port.port) {
- err = -ENOSPC;
- } else {
- ep->port.node = scif_info.nodeid;
- ep->conn_async_state = ASYNC_CONN_IDLE;
- }
+ err = scif_get_new_port();
+ if (err < 0)
+ break;
+ ep->port.port = err;
+ ep->port.node = scif_info.nodeid;
+ ep->conn_async_state = ASYNC_CONN_IDLE;
/* Fall through */
case SCIFEP_BOUND:
/*
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index 63d6246d6dff..6369aeaa7056 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -200,15 +200,18 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn,
schedule_work(&scif_info.misc_work);
}
-static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+static int scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct scif_mmu_notif *mmn;
mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
scif_rma_destroy_tcw(mmn, start, end - start);
+
+ return 0;
}
static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
index 95f74623113e..c10a940e3b38 100644
--- a/drivers/misc/ocxl/context.c
+++ b/drivers/misc/ocxl/context.c
@@ -86,7 +86,7 @@ out:
return rc;
}
-static int map_afu_irq(struct vm_area_struct *vma, unsigned long address,
+static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address,
u64 offset, struct ocxl_context *ctx)
{
u64 trigger_addr;
@@ -95,15 +95,15 @@ static int map_afu_irq(struct vm_area_struct *vma, unsigned long address,
if (!trigger_addr)
return VM_FAULT_SIGBUS;
- vm_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT);
- return VM_FAULT_NOPAGE;
+ return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT);
}
-static int map_pp_mmio(struct vm_area_struct *vma, unsigned long address,
+static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address,
u64 offset, struct ocxl_context *ctx)
{
u64 pp_mmio_addr;
int pasid_off;
+ vm_fault_t ret;
if (offset >= ctx->afu->config.pp_mmio_stride)
return VM_FAULT_SIGBUS;
@@ -121,27 +121,27 @@ static int map_pp_mmio(struct vm_area_struct *vma, unsigned long address,
pasid_off * ctx->afu->config.pp_mmio_stride +
offset;
- vm_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT);
+ ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT);
mutex_unlock(&ctx->status_mutex);
- return VM_FAULT_NOPAGE;
+ return ret;
}
-static int ocxl_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ocxl_context *ctx = vma->vm_file->private_data;
u64 offset;
- int rc;
+ vm_fault_t ret;
offset = vmf->pgoff << PAGE_SHIFT;
pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__,
ctx->pasid, vmf->address, offset);
if (offset < ctx->afu->irq_base_offset)
- rc = map_pp_mmio(vma, vmf->address, offset, ctx);
+ ret = map_pp_mmio(vma, vmf->address, offset, ctx);
else
- rc = map_afu_irq(vma, vmf->address, offset, ctx);
- return rc;
+ ret = map_afu_irq(vma, vmf->address, offset, ctx);
+ return ret;
}
static const struct vm_operations_struct ocxl_vmops = {
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index 88876ae8f330..31695a078485 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -2,6 +2,7 @@
// Copyright 2017 IBM Corp.
#include <linux/sched/mm.h>
#include <linux/mutex.h>
+#include <linux/mm_types.h>
#include <linux/mmu_context.h>
#include <asm/copro.h>
#include <asm/pnv-ocxl.h>
@@ -126,7 +127,7 @@ static void ack_irq(struct spa *spa, enum xsl_response r)
static void xsl_fault_handler_bh(struct work_struct *fault_work)
{
- unsigned int flt = 0;
+ vm_fault_t flt = 0;
unsigned long access, flags, inv_flags = 0;
enum xsl_response r;
struct xsl_fault *fault = container_of(fault_work, struct xsl_fault,
@@ -136,7 +137,7 @@ static void xsl_fault_handler_bh(struct work_struct *fault_work)
int rc;
/*
- * We need to release a reference on the mm whenever exiting this
+ * We must release a reference on mm_users whenever exiting this
* function (taken in the memory fault interrupt handler)
*/
rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr,
@@ -172,7 +173,7 @@ static void xsl_fault_handler_bh(struct work_struct *fault_work)
}
r = RESTART;
ack:
- mmdrop(fault->pe_data.mm);
+ mmput(fault->pe_data.mm);
ack_irq(spa, r);
}
@@ -184,6 +185,7 @@ static irqreturn_t xsl_fault_handler(int irq, void *data)
struct pe_data *pe_data;
struct ocxl_process_element *pe;
int lpid, pid, tid;
+ bool schedule = false;
read_irq(spa, &dsisr, &dar, &pe_handle);
trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1);
@@ -226,14 +228,19 @@ static irqreturn_t xsl_fault_handler(int irq, void *data)
}
WARN_ON(pe_data->mm->context.id != pid);
- spa->xsl_fault.pe = pe_handle;
- spa->xsl_fault.dar = dar;
- spa->xsl_fault.dsisr = dsisr;
- spa->xsl_fault.pe_data = *pe_data;
- mmgrab(pe_data->mm); /* mm count is released by bottom half */
-
+ if (mmget_not_zero(pe_data->mm)) {
+ spa->xsl_fault.pe = pe_handle;
+ spa->xsl_fault.dar = dar;
+ spa->xsl_fault.dsisr = dsisr;
+ spa->xsl_fault.pe_data = *pe_data;
+ schedule = true;
+ /* mm_users count released by bottom half */
+ }
rcu_read_unlock();
- schedule_work(&spa->xsl_fault.fault_work);
+ if (schedule)
+ schedule_work(&spa->xsl_fault.fault_work);
+ else
+ ack_irq(spa, ADDRESS_ERROR);
return IRQ_HANDLED;
}
diff --git a/drivers/misc/ocxl/sysfs.c b/drivers/misc/ocxl/sysfs.c
index d9753a1db14b..0ab1fd1b2682 100644
--- a/drivers/misc/ocxl/sysfs.c
+++ b/drivers/misc/ocxl/sysfs.c
@@ -64,7 +64,7 @@ static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj,
return count;
}
-static int global_mmio_fault(struct vm_fault *vmf)
+static vm_fault_t global_mmio_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ocxl_afu *afu = vma->vm_private_data;
@@ -75,8 +75,7 @@ static int global_mmio_fault(struct vm_fault *vmf)
offset = vmf->pgoff;
offset += (afu->global_mmio_start >> PAGE_SHIFT);
- vm_insert_pfn(vma, vmf->address, offset);
- return VM_FAULT_NOPAGE;
+ return vmf_insert_pfn(vma, vmf->address, offset);
}
static const struct vm_operations_struct global_mmio_vmops = {
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 7b370466a227..896e2df9400f 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -35,38 +35,45 @@
#include <uapi/linux/pcitest.h>
-#define DRV_MODULE_NAME "pci-endpoint-test"
-
-#define PCI_ENDPOINT_TEST_MAGIC 0x0
-
-#define PCI_ENDPOINT_TEST_COMMAND 0x4
-#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
-#define COMMAND_RAISE_MSI_IRQ BIT(1)
-#define MSI_NUMBER_SHIFT 2
-/* 6 bits for MSI number */
-#define COMMAND_READ BIT(8)
-#define COMMAND_WRITE BIT(9)
-#define COMMAND_COPY BIT(10)
-
-#define PCI_ENDPOINT_TEST_STATUS 0x8
-#define STATUS_READ_SUCCESS BIT(0)
-#define STATUS_READ_FAIL BIT(1)
-#define STATUS_WRITE_SUCCESS BIT(2)
-#define STATUS_WRITE_FAIL BIT(3)
-#define STATUS_COPY_SUCCESS BIT(4)
-#define STATUS_COPY_FAIL BIT(5)
-#define STATUS_IRQ_RAISED BIT(6)
-#define STATUS_SRC_ADDR_INVALID BIT(7)
-#define STATUS_DST_ADDR_INVALID BIT(8)
-
-#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0xc
+#define DRV_MODULE_NAME "pci-endpoint-test"
+
+#define IRQ_TYPE_UNDEFINED -1
+#define IRQ_TYPE_LEGACY 0
+#define IRQ_TYPE_MSI 1
+#define IRQ_TYPE_MSIX 2
+
+#define PCI_ENDPOINT_TEST_MAGIC 0x0
+
+#define PCI_ENDPOINT_TEST_COMMAND 0x4
+#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+#define COMMAND_RAISE_MSI_IRQ BIT(1)
+#define COMMAND_RAISE_MSIX_IRQ BIT(2)
+#define COMMAND_READ BIT(3)
+#define COMMAND_WRITE BIT(4)
+#define COMMAND_COPY BIT(5)
+
+#define PCI_ENDPOINT_TEST_STATUS 0x8
+#define STATUS_READ_SUCCESS BIT(0)
+#define STATUS_READ_FAIL BIT(1)
+#define STATUS_WRITE_SUCCESS BIT(2)
+#define STATUS_WRITE_FAIL BIT(3)
+#define STATUS_COPY_SUCCESS BIT(4)
+#define STATUS_COPY_FAIL BIT(5)
+#define STATUS_IRQ_RAISED BIT(6)
+#define STATUS_SRC_ADDR_INVALID BIT(7)
+#define STATUS_DST_ADDR_INVALID BIT(8)
+
+#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
-#define PCI_ENDPOINT_TEST_SIZE 0x1c
-#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
+#define PCI_ENDPOINT_TEST_SIZE 0x1c
+#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
+
+#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
+#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
static DEFINE_IDA(pci_endpoint_test_ida);
@@ -77,6 +84,10 @@ static bool no_msi;
module_param(no_msi, bool, 0444);
MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
+static int irq_type = IRQ_TYPE_MSI;
+module_param(irq_type, int, 0444);
+MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
+
enum pci_barno {
BAR_0,
BAR_1,
@@ -103,7 +114,7 @@ struct pci_endpoint_test {
struct pci_endpoint_test_data {
enum pci_barno test_reg_bar;
size_t alignment;
- bool no_msi;
+ int irq_type;
};
static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
@@ -147,6 +158,100 @@ static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
+{
+ struct pci_dev *pdev = test->pdev;
+
+ pci_free_irq_vectors(pdev);
+}
+
+static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
+ int type)
+{
+ int irq = -1;
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ bool res = true;
+
+ switch (type) {
+ case IRQ_TYPE_LEGACY:
+ irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
+ if (irq < 0)
+ dev_err(dev, "Failed to get Legacy interrupt\n");
+ break;
+ case IRQ_TYPE_MSI:
+ irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
+ if (irq < 0)
+ dev_err(dev, "Failed to get MSI interrupts\n");
+ break;
+ case IRQ_TYPE_MSIX:
+ irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
+ if (irq < 0)
+ dev_err(dev, "Failed to get MSI-X interrupts\n");
+ break;
+ default:
+ dev_err(dev, "Invalid IRQ type selected\n");
+ }
+
+ if (irq < 0) {
+ irq = 0;
+ res = false;
+ }
+ test->num_irqs = irq;
+
+ return res;
+}
+
+static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
+{
+ int i;
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+
+ for (i = 0; i < test->num_irqs; i++)
+ devm_free_irq(dev, pci_irq_vector(pdev, i), test);
+
+ test->num_irqs = 0;
+}
+
+static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
+{
+ int i;
+ int err;
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+
+ for (i = 0; i < test->num_irqs; i++) {
+ err = devm_request_irq(dev, pci_irq_vector(pdev, i),
+ pci_endpoint_test_irqhandler,
+ IRQF_SHARED, DRV_MODULE_NAME, test);
+ if (err)
+ goto fail;
+ }
+
+ return true;
+
+fail:
+ switch (irq_type) {
+ case IRQ_TYPE_LEGACY:
+ dev_err(dev, "Failed to request IRQ %d for Legacy\n",
+ pci_irq_vector(pdev, i));
+ break;
+ case IRQ_TYPE_MSI:
+ dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
+ pci_irq_vector(pdev, i),
+ i + 1);
+ break;
+ case IRQ_TYPE_MSIX:
+ dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
+ pci_irq_vector(pdev, i),
+ i + 1);
+ break;
+ }
+
+ return false;
+}
+
static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
enum pci_barno barno)
{
@@ -179,6 +284,9 @@ static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
{
u32 val;
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
+ IRQ_TYPE_LEGACY);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
COMMAND_RAISE_LEGACY_IRQ);
val = wait_for_completion_timeout(&test->irq_raised,
@@ -190,14 +298,18 @@ static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
}
static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
- u8 msi_num)
+ u16 msi_num, bool msix)
{
u32 val;
struct pci_dev *pdev = test->pdev;
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
+ msix == false ? IRQ_TYPE_MSI :
+ IRQ_TYPE_MSIX);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
- msi_num << MSI_NUMBER_SHIFT |
- COMMAND_RAISE_MSI_IRQ);
+ msix == false ? COMMAND_RAISE_MSI_IRQ :
+ COMMAND_RAISE_MSIX_IRQ);
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
@@ -230,6 +342,11 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
if (size > SIZE_MAX - alignment)
goto err;
+ if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+ dev_err(dev, "Invalid IRQ type option\n");
+ goto err;
+ }
+
orig_src_addr = dma_alloc_coherent(dev, size + alignment,
&orig_src_phys_addr, GFP_KERNEL);
if (!orig_src_addr) {
@@ -281,8 +398,10 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
- 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
+ COMMAND_COPY);
wait_for_completion(&test->irq_raised);
@@ -318,6 +437,11 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
if (size > SIZE_MAX - alignment)
goto err;
+ if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+ dev_err(dev, "Invalid IRQ type option\n");
+ goto err;
+ }
+
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
@@ -348,8 +472,10 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
- 1 << MSI_NUMBER_SHIFT | COMMAND_READ);
+ COMMAND_READ);
wait_for_completion(&test->irq_raised);
@@ -379,6 +505,11 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
if (size > SIZE_MAX - alignment)
goto err;
+ if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+ dev_err(dev, "Invalid IRQ type option\n");
+ goto err;
+ }
+
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
@@ -403,8 +534,10 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
- 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
+ COMMAND_WRITE);
wait_for_completion(&test->irq_raised);
@@ -417,6 +550,38 @@ err:
return ret;
}
+static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
+ int req_irq_type)
+{
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+
+ if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
+ dev_err(dev, "Invalid IRQ type option\n");
+ return false;
+ }
+
+ if (irq_type == req_irq_type)
+ return true;
+
+ pci_endpoint_test_release_irq(test);
+ pci_endpoint_test_free_irq_vectors(test);
+
+ if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
+ goto err;
+
+ if (!pci_endpoint_test_request_irq(test))
+ goto err;
+
+ irq_type = req_irq_type;
+ return true;
+
+err:
+ pci_endpoint_test_free_irq_vectors(test);
+ irq_type = IRQ_TYPE_UNDEFINED;
+ return false;
+}
+
static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -436,7 +601,8 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
ret = pci_endpoint_test_legacy_irq(test);
break;
case PCITEST_MSI:
- ret = pci_endpoint_test_msi_irq(test, arg);
+ case PCITEST_MSIX:
+ ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
break;
case PCITEST_WRITE:
ret = pci_endpoint_test_write(test, arg);
@@ -447,6 +613,12 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
case PCITEST_COPY:
ret = pci_endpoint_test_copy(test, arg);
break;
+ case PCITEST_SET_IRQTYPE:
+ ret = pci_endpoint_test_set_irq(test, arg);
+ break;
+ case PCITEST_GET_IRQTYPE:
+ ret = irq_type;
+ break;
}
ret:
@@ -462,9 +634,7 @@ static const struct file_operations pci_endpoint_test_fops = {
static int pci_endpoint_test_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int i;
int err;
- int irq = 0;
int id;
char name[20];
enum pci_barno bar;
@@ -486,11 +656,14 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->alignment = 0;
test->pdev = pdev;
+ if (no_msi)
+ irq_type = IRQ_TYPE_LEGACY;
+
data = (struct pci_endpoint_test_data *)ent->driver_data;
if (data) {
test_reg_bar = data->test_reg_bar;
test->alignment = data->alignment;
- no_msi = data->no_msi;
+ irq_type = data->irq_type;
}
init_completion(&test->irq_raised);
@@ -510,28 +683,11 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!no_msi) {
- irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
- if (irq < 0)
- dev_err(dev, "Failed to get MSI interrupts\n");
- test->num_irqs = irq;
- }
-
- err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
- IRQF_SHARED, DRV_MODULE_NAME, test);
- if (err) {
- dev_err(dev, "Failed to request IRQ %d\n", pdev->irq);
- goto err_disable_msi;
- }
+ if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
+ goto err_disable_irq;
- for (i = 1; i < irq; i++) {
- err = devm_request_irq(dev, pci_irq_vector(pdev, i),
- pci_endpoint_test_irqhandler,
- IRQF_SHARED, DRV_MODULE_NAME, test);
- if (err)
- dev_err(dev, "failed to request IRQ %d for MSI %d\n",
- pci_irq_vector(pdev, i), i + 1);
- }
+ if (!pci_endpoint_test_request_irq(test))
+ goto err_disable_irq;
for (bar = BAR_0; bar <= BAR_5; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
@@ -590,12 +746,10 @@ err_iounmap:
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
+ pci_endpoint_test_release_irq(test);
- for (i = 0; i < irq; i++)
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
-
-err_disable_msi:
- pci_disable_msi(pdev);
+err_disable_irq:
+ pci_endpoint_test_free_irq_vectors(test);
pci_release_regions(pdev);
err_disable_pdev:
@@ -607,7 +761,6 @@ err_disable_pdev:
static void pci_endpoint_test_remove(struct pci_dev *pdev)
{
int id;
- int i;
enum pci_barno bar;
struct pci_endpoint_test *test = pci_get_drvdata(pdev);
struct miscdevice *misc_device = &test->miscdev;
@@ -624,9 +777,10 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
- for (i = 0; i < test->num_irqs; i++)
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
- pci_disable_msi(pdev);
+
+ pci_endpoint_test_release_irq(test);
+ pci_endpoint_test_free_irq_vectors(test);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index a3454eb56fbf..be28f05bfafa 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -219,9 +219,10 @@ void gru_flush_all_tlb(struct gru_state *gru)
/*
* MMUOPS notifier callout functions
*/
-static void gru_invalidate_range_start(struct mmu_notifier *mn,
+static int gru_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ bool blockable)
{
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
ms_notifier);
@@ -231,6 +232,8 @@ static void gru_invalidate_range_start(struct mmu_notifier *mn,
gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms,
start, end, atomic_read(&gms->ms_range_active));
gru_flush_tlb_range(gms, start, end - start);
+
+ return 0;
}
static void gru_invalidate_range_end(struct mmu_notifier *mn,
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 128d5615c804..05a890ce2ab8 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -656,7 +656,6 @@ xpc_initiate_connect(int ch_number)
{
short partid;
struct xpc_partition *part;
- struct xpc_channel *ch;
DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
@@ -664,8 +663,6 @@ xpc_initiate_connect(int ch_number)
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
- ch = &part->channels[ch_number];
-
/*
* Initiate the establishment of a connection on the
* newly registered channel to the remote partition.
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 7284413dabfd..0c3ef6f1df54 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -415,7 +415,6 @@ xpc_discovery(void)
int region_size;
int max_regions;
int nasid;
- struct xpc_rsvd_page *rp;
unsigned long *discovered_nasids;
enum xp_retval ret;
@@ -432,8 +431,6 @@ xpc_discovery(void)
return;
}
- rp = (struct xpc_rsvd_page *)xpc_rsvd_page;
-
/*
* The term 'region' in this context refers to the minimum number of
* nodes that can comprise an access protection grouping. The access
@@ -449,8 +446,10 @@ xpc_discovery(void)
switch (region_size) {
case 128:
max_regions *= 2;
+ /* fall through */
case 64:
max_regions *= 2;
+ /* fall through */
case 32:
max_regions *= 2;
region_size = 16;
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index c5dc6095686a..74b183baf044 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -391,29 +391,37 @@ static int sram_probe(struct platform_device *pdev)
if (IS_ERR(sram->pool))
return PTR_ERR(sram->pool);
- ret = sram_reserve_regions(sram, res);
- if (ret)
- return ret;
-
sram->clk = devm_clk_get(sram->dev, NULL);
if (IS_ERR(sram->clk))
sram->clk = NULL;
else
clk_prepare_enable(sram->clk);
+ ret = sram_reserve_regions(sram, res);
+ if (ret)
+ goto err_disable_clk;
+
platform_set_drvdata(pdev, sram);
init_func = of_device_get_match_data(&pdev->dev);
if (init_func) {
ret = init_func();
if (ret)
- return ret;
+ goto err_free_partitions;
}
dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
gen_pool_size(sram->pool) / 1024, sram->virt_base);
return 0;
+
+err_free_partitions:
+ sram_free_partitions(sram);
+err_disable_clk:
+ if (sram->clk)
+ clk_disable_unprepare(sram->clk);
+
+ return ret;
}
static int sram_remove(struct platform_device *pdev)
diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig
index f34dcc514730..5bb92698bc80 100644
--- a/drivers/misc/ti-st/Kconfig
+++ b/drivers/misc/ti-st/Kconfig
@@ -5,7 +5,8 @@
menu "Texas Instruments shared transport line discipline"
config TI_ST
tristate "Shared transport core driver"
- depends on NET && GPIOLIB && TTY
+ depends on NET && TTY
+ depends on GPIOLIB || COMPILE_TEST
select FW_LOADER
help
This enables the shared transport core driver for TI
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 5ec3f5a43718..1874ac922166 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -138,7 +138,7 @@ static void kim_int_recv(struct kim_data_s *kim_gdata,
const unsigned char *data, long count)
{
const unsigned char *ptr;
- int len = 0, type = 0;
+ int len = 0;
unsigned char *plen;
pr_debug("%s", __func__);
@@ -183,7 +183,6 @@ static void kim_int_recv(struct kim_data_s *kim_gdata,
case 0x04:
kim_gdata->rx_state = ST_W4_HEADER;
kim_gdata->rx_count = 2;
- type = *ptr;
break;
default:
pr_info("unknown packet");
@@ -756,14 +755,14 @@ static int kim_probe(struct platform_device *pdev)
err = gpio_request(kim_gdata->nshutdown, "kim");
if (unlikely(err)) {
pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
- return err;
+ goto err_sysfs_group;
}
/* Configure nShutdown GPIO as output=0 */
err = gpio_direction_output(kim_gdata->nshutdown, 0);
if (unlikely(err)) {
pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
- return err;
+ goto err_sysfs_group;
}
/* get reference of pdev for request_firmware
*/
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index adf46072cb37..3fce3b6a3624 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -177,7 +177,7 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
} else
lux = 0;
else
- return -EAGAIN;
+ return 0;
/* LUX range check */
return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index 80a6f199077c..6c3591cdf855 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -258,13 +258,9 @@ static int vexpress_syscfg_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&syscfg->funcs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
-
- syscfg->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!syscfg->base)
- return -EFAULT;
+ syscfg->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(syscfg->base))
+ return PTR_ERR(syscfg->base);
/* Must use dev.parent (MFD), as that's where DT phandle points at... */
bridge = vexpress_config_bridge_register(pdev->dev.parent,
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index efd733472a35..2543ef1ece17 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1,27 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* VMware Balloon driver.
*
- * Copyright (C) 2000-2014, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; version 2 of the License and no later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Maintained by: Xavier Deguillard <xdeguillard@vmware.com>
- * Philip Moltmann <moltmann@vmware.com>
- */
-
-/*
* This is VMware physical memory management driver for Linux. The driver
* acts like a "balloon" that can be inflated to reclaim physical pages by
* reserving them in the guest and invalidating them in the monitor,
@@ -55,25 +37,6 @@ MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
/*
- * Various constants controlling rate of inflaint/deflating balloon,
- * measured in pages.
- */
-
-/*
- * Rates of memory allocaton when guest experiences memory pressure
- * (driver performs sleeping allocations).
- */
-#define VMW_BALLOON_RATE_ALLOC_MIN 512U
-#define VMW_BALLOON_RATE_ALLOC_MAX 2048U
-#define VMW_BALLOON_RATE_ALLOC_INC 16U
-
-/*
- * When guest is under memory pressure, use a reduced page allocation
- * rate for next several cycles.
- */
-#define VMW_BALLOON_SLOW_CYCLES 4
-
-/*
* Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
* allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
* __GFP_NOWARN, to suppress page allocation failure warnings.
@@ -284,12 +247,6 @@ struct vmballoon {
/* reset flag */
bool reset_required;
- /* adjustment rates (pages per second) */
- unsigned int rate_alloc;
-
- /* slowdown page allocations for next few cycles */
- unsigned int slow_allocation_cycles;
-
unsigned long capabilities;
struct vmballoon_batch_page *batch_page;
@@ -341,7 +298,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
success = false;
}
- if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
+ /*
+ * 2MB pages are only supported with batching. If batching is for some
+ * reason disabled, do not use 2MB pages, since otherwise the legacy
+ * mechanism is used with 2MB pages, causing a failure.
+ */
+ if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
+ (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
b->supported_page_sizes = 2;
else
b->supported_page_sizes = 1;
@@ -450,7 +413,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pfn32 = (u32)pfn;
if (pfn32 != pfn)
- return -1;
+ return -EINVAL;
STATS_INC(b->stats.lock[false]);
@@ -460,14 +423,14 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
STATS_INC(b->stats.lock_fail[false]);
- return 1;
+ return -EIO;
}
static int vmballoon_send_batched_lock(struct vmballoon *b,
unsigned int num_pages, bool is_2m_pages, unsigned int *target)
{
unsigned long status;
- unsigned long pfn = page_to_pfn(b->page);
+ unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
STATS_INC(b->stats.lock[is_2m_pages]);
@@ -515,7 +478,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b,
unsigned int num_pages, bool is_2m_pages, unsigned int *target)
{
unsigned long status;
- unsigned long pfn = page_to_pfn(b->page);
+ unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
STATS_INC(b->stats.unlock[is_2m_pages]);
@@ -597,11 +560,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
target);
- if (locked > 0) {
+ if (locked) {
STATS_INC(b->stats.refused_alloc[false]);
- if (hv_status == VMW_BALLOON_ERROR_RESET ||
- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
+ if (locked == -EIO &&
+ (hv_status == VMW_BALLOON_ERROR_RESET ||
+ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
vmballoon_free_page(page, false);
return -EIO;
}
@@ -617,7 +581,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
} else {
vmballoon_free_page(page, false);
}
- return -EIO;
+ return locked;
}
/* track allocated page */
@@ -790,8 +754,6 @@ static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
*/
static void vmballoon_inflate(struct vmballoon *b)
{
- unsigned rate;
- unsigned int allocations = 0;
unsigned int num_pages = 0;
int error = 0;
gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
@@ -818,17 +780,9 @@ static void vmballoon_inflate(struct vmballoon *b)
* Start with no sleep allocation rate which may be higher
* than sleeping allocation rate.
*/
- if (b->slow_allocation_cycles) {
- rate = b->rate_alloc;
- is_2m_pages = false;
- } else {
- rate = UINT_MAX;
- is_2m_pages =
- b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
- }
+ is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
- pr_debug("%s - goal: %d, no-sleep rate: %u, sleep rate: %d\n",
- __func__, b->target - b->size, rate, b->rate_alloc);
+ pr_debug("%s - goal: %d", __func__, b->target - b->size);
while (!b->reset_required &&
b->size + num_pages * vmballoon_page_size(is_2m_pages)
@@ -861,31 +815,24 @@ static void vmballoon_inflate(struct vmballoon *b)
if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
/*
* CANSLEEP page allocation failed, so guest
- * is under severe memory pressure. Quickly
- * decrease allocation rate.
+ * is under severe memory pressure. We just log
+ * the event, but do not stop the inflation
+ * due to its negative impact on performance.
*/
- b->rate_alloc = max(b->rate_alloc / 2,
- VMW_BALLOON_RATE_ALLOC_MIN);
STATS_INC(b->stats.sleep_alloc_fail);
break;
}
/*
* NOSLEEP page allocation failed, so the guest is
- * under memory pressure. Let us slow down page
- * allocations for next few cycles so that the guest
- * gets out of memory pressure. Also, if we already
- * allocated b->rate_alloc pages, let's pause,
- * otherwise switch to sleeping allocations.
+ * under memory pressure. Slowing down page alloctions
+ * seems to be reasonable, but doing so might actually
+ * cause the hypervisor to throttle us down, resulting
+ * in degraded performance. We will count on the
+ * scheduler and standard memory management mechanisms
+ * for now.
*/
- b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
-
- if (allocations >= b->rate_alloc)
- break;
-
flags = VMW_PAGE_ALLOC_CANSLEEP;
- /* Lower rate for sleeping allocations. */
- rate = b->rate_alloc;
continue;
}
@@ -899,28 +846,11 @@ static void vmballoon_inflate(struct vmballoon *b)
}
cond_resched();
-
- if (allocations >= rate) {
- /* We allocated enough pages, let's take a break. */
- break;
- }
}
if (num_pages > 0)
b->ops->lock(b, num_pages, is_2m_pages, &b->target);
- /*
- * We reached our goal without failures so try increasing
- * allocation rate.
- */
- if (error == 0 && allocations >= b->rate_alloc) {
- unsigned int mult = allocations / b->rate_alloc;
-
- b->rate_alloc =
- min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
- VMW_BALLOON_RATE_ALLOC_MAX);
- }
-
vmballoon_release_refused_pages(b, true);
vmballoon_release_refused_pages(b, false);
}
@@ -1029,29 +959,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
*/
static int vmballoon_vmci_init(struct vmballoon *b)
{
- int error = 0;
+ unsigned long error, dummy;
- if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
- error = vmci_doorbell_create(&b->vmci_doorbell,
- VMCI_FLAG_DELAYED_CB,
- VMCI_PRIVILEGE_FLAG_RESTRICTED,
- vmballoon_doorbell, b);
-
- if (error == VMCI_SUCCESS) {
- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
- b->vmci_doorbell.context,
- b->vmci_doorbell.resource, error);
- STATS_INC(b->stats.doorbell_set);
- }
- }
+ if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
+ return 0;
- if (error != 0) {
- vmballoon_vmci_cleanup(b);
+ error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
+ VMCI_PRIVILEGE_FLAG_RESTRICTED,
+ vmballoon_doorbell, b);
- return -EIO;
- }
+ if (error != VMCI_SUCCESS)
+ goto fail;
+
+ error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
+ b->vmci_doorbell.resource, dummy);
+
+ STATS_INC(b->stats.doorbell_set);
+
+ if (error != VMW_BALLOON_SUCCESS)
+ goto fail;
return 0;
+fail:
+ vmballoon_vmci_cleanup(b);
+ return -EIO;
}
/*
@@ -1114,9 +1045,6 @@ static void vmballoon_work(struct work_struct *work)
if (b->reset_required)
vmballoon_reset(b);
- if (b->slow_allocation_cycles > 0)
- b->slow_allocation_cycles--;
-
if (!b->reset_required && vmballoon_send_get_target(b, &target)) {
/* update target, adjust size */
b->target = target;
@@ -1160,11 +1088,6 @@ static int vmballoon_debug_show(struct seq_file *f, void *offset)
"current: %8d pages\n",
b->target, b->size);
- /* format rate info */
- seq_printf(f,
- "rateSleepAlloc: %8d pages/sec\n",
- b->rate_alloc);
-
seq_printf(f,
"\n"
"timer: %8u\n"
@@ -1271,9 +1194,6 @@ static int __init vmballoon_init(void)
INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
}
- /* initialize rates */
- balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
-
INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
error = vmballoon_debugfs_init(&balloon);
@@ -1289,7 +1209,14 @@ static int __init vmballoon_init(void)
return 0;
}
-module_init(vmballoon_init);
+
+/*
+ * Using late_initcall() instead of module_init() allows the balloon to use the
+ * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
+ * VMCI is probed only after the balloon is initialized. If the balloon is used
+ * as a module, late_initcall() is equivalent to module_init().
+ */
+late_initcall(vmballoon_init);
static void __exit vmballoon_exit(void)
{
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index b4d7774cfe07..bd52f29b4a4e 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -668,7 +668,7 @@ static int qp_host_get_user_memory(u64 produce_uva,
retval = get_user_pages_fast((uintptr_t) produce_uva,
produce_q->kernel_if->num_pages, 1,
produce_q->kernel_if->u.h.header_page);
- if (retval < produce_q->kernel_if->num_pages) {
+ if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
retval);
qp_release_pages(produce_q->kernel_if->u.h.header_page,
@@ -680,7 +680,7 @@ static int qp_host_get_user_memory(u64 produce_uva,
retval = get_user_pages_fast((uintptr_t) consume_uva,
consume_q->kernel_if->num_pages, 1,
consume_q->kernel_if->u.h.header_page);
- if (retval < consume_q->kernel_if->num_pages) {
+ if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
retval);
qp_release_pages(consume_q->kernel_if->u.h.header_page,
@@ -2214,7 +2214,6 @@ int vmci_qp_broker_map(struct vmci_handle handle,
{
struct qp_broker_entry *entry;
const u32 context_id = vmci_ctx_get_id(context);
- bool is_local = false;
int result;
if (vmci_handle_is_invalid(handle) || !context ||
@@ -2243,7 +2242,6 @@ int vmci_qp_broker_map(struct vmci_handle handle,
goto out;
}
- is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
result = VMCI_SUCCESS;
if (context_id != VMCI_HOST_CONTEXT_ID) {
@@ -2325,7 +2323,6 @@ int vmci_qp_broker_unmap(struct vmci_handle handle,
{
struct qp_broker_entry *entry;
const u32 context_id = vmci_ctx_get_id(context);
- bool is_local = false;
int result;
if (vmci_handle_is_invalid(handle) || !context ||
@@ -2354,8 +2351,6 @@ int vmci_qp_broker_unmap(struct vmci_handle handle,
goto out;
}
- is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
-
if (context_id != VMCI_HOST_CONTEXT_ID) {
qp_acquire_queue_mutex(entry->produce_q);
result = qp_save_headers(entry);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 281826d1fcca..50a5c340307b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2078,7 +2078,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
/* Do not retry else we can't see errors */
err = mmc_wait_for_cmd(card->host, &cmd, 0);
- if (err || (cmd.resp[0] & 0xFDF92000)) {
+ if (err || R1_STATUS(cmd.resp[0])) {
pr_err("error %d requesting status %#x\n",
err, cmd.resp[0]);
err = -EIO;
@@ -2716,52 +2716,6 @@ void mmc_stop_host(struct mmc_host *host)
mmc_release_host(host);
}
-int mmc_power_save_host(struct mmc_host *host)
-{
- int ret = 0;
-
- pr_debug("%s: %s: powering down\n", mmc_hostname(host), __func__);
-
- mmc_bus_get(host);
-
- if (!host->bus_ops || host->bus_dead) {
- mmc_bus_put(host);
- return -EINVAL;
- }
-
- if (host->bus_ops->power_save)
- ret = host->bus_ops->power_save(host);
-
- mmc_bus_put(host);
-
- mmc_power_off(host);
-
- return ret;
-}
-EXPORT_SYMBOL(mmc_power_save_host);
-
-int mmc_power_restore_host(struct mmc_host *host)
-{
- int ret;
-
- pr_debug("%s: %s: powering up\n", mmc_hostname(host), __func__);
-
- mmc_bus_get(host);
-
- if (!host->bus_ops || host->bus_dead) {
- mmc_bus_put(host);
- return -EINVAL;
- }
-
- mmc_power_up(host, host->card->ocr);
- ret = host->bus_ops->power_restore(host);
-
- mmc_bus_put(host);
-
- return ret;
-}
-EXPORT_SYMBOL(mmc_power_restore_host);
-
#ifdef CONFIG_PM_SLEEP
/* Do the card removal on suspend if card is assumed removeable
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 9d8f09ac0821..087ba68b2920 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -28,8 +28,6 @@ struct mmc_bus_ops {
int (*resume)(struct mmc_host *);
int (*runtime_suspend)(struct mmc_host *);
int (*runtime_resume)(struct mmc_host *);
- int (*power_save)(struct mmc_host *);
- int (*power_restore)(struct mmc_host *);
int (*alive)(struct mmc_host *);
int (*shutdown)(struct mmc_host *);
int (*hw_reset)(struct mmc_host *);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 4466f5de54d4..bc1bd2c25613 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1169,6 +1169,10 @@ static int mmc_select_hs400(struct mmc_card *card)
/* Set host controller to HS timing */
mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ /* Prepare host to downgrade to HS timing */
+ if (host->ops->hs400_downgrade)
+ host->ops->hs400_downgrade(host);
+
/* Reduce frequency to HS frequency */
max_dtr = card->ext_csd.hs_max_dtr;
mmc_set_clock(host, max_dtr);
@@ -1209,6 +1213,9 @@ static int mmc_select_hs400(struct mmc_card *card)
if (err)
goto out_err;
+ if (host->ops->hs400_complete)
+ host->ops->hs400_complete(host);
+
return 0;
out_err:
@@ -1256,6 +1263,9 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_HS);
+ if (host->ops->hs400_downgrade)
+ host->ops->hs400_downgrade(host);
+
err = mmc_switch_status(card);
if (err)
goto out_err;
@@ -1338,8 +1348,12 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
err = mmc_select_bus_width(card);
- if (err < 0)
+ if (err != MMC_BUS_WIDTH_8) {
+ pr_err("%s: switch to 8bit bus width failed, err:%d\n",
+ mmc_hostname(host), err);
+ err = err < 0 ? err : -ENOTSUPP;
goto out_err;
+ }
/* Switch card to HS mode */
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 42d6aa89a48a..873b2aa0c155 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -417,7 +417,7 @@ static int mmc_switch_status_error(struct mmc_host *host, u32 status)
if (status & R1_SPI_ILLEGAL_COMMAND)
return -EBADMSG;
} else {
- if (status & 0xFDFFA000)
+ if (R1_STATUS(status))
pr_warn("%s: unexpected status %#x after switch\n",
mmc_hostname(host), status);
if (status & R1_SWITCH_ERROR)
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index a86490dbca70..d8e17ea6126d 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1076,7 +1076,6 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
.resume = mmc_sdio_resume,
.runtime_suspend = mmc_sdio_runtime_suspend,
.runtime_resume = mmc_sdio_runtime_resume,
- .power_restore = mmc_sdio_power_restore,
.alive = mmc_sdio_alive,
.hw_reset = mmc_sdio_hw_reset,
.sw_reset = mmc_sdio_sw_reset,
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index ef05e0039378..2a833686784b 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -27,8 +27,8 @@ struct mmc_gpio {
bool override_cd_active_level;
irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
char *ro_label;
- char cd_label[0];
u32 cd_debounce_delay_ms;
+ char cd_label[];
};
static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 0581c199c996..694d0828215d 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -176,6 +176,17 @@ config MMC_SDHCI_OF_HLWD
If unsure, say N.
+config MMC_SDHCI_OF_DWCMSHC
+ tristate "SDHCI OF support for the Synopsys DWC MSHC"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ depends on COMMON_CLK
+ help
+ This selects Synopsys DesignWare Cores Mobile Storage Controller
+ support.
+ If you have a controller with this interface, say Y or M here.
+ If unsure, say N.
+
config MMC_SDHCI_CADENCE
tristate "SDHCI support for the Cadence SD/SDIO/eMMC controller"
depends on MMC_SDHCI_PLTFM
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 85dc1322c3de..ce8398e6f2c0 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -11,7 +11,8 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
-sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o
+sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o \
+ sdhci-pci-dwc-mshc.o
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
@@ -82,6 +83,7 @@ obj-$(CONFIG_MMC_SDHCI_OF_ARASAN) += sdhci-of-arasan.o
obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
+obj-$(CONFIG_MMC_SDHCI_OF_DWCMSHC) += sdhci-of-dwcmshc.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index a84aa3f1ae85..ab47b018716a 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -175,6 +175,20 @@ static int dw_mci_exynos_runtime_resume(struct device *dev)
return ret;
}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * dw_mci_exynos_suspend_noirq - Exynos-specific suspend code
+ *
+ * This ensures that device will be in runtime active state in
+ * dw_mci_exynos_resume_noirq after calling pm_runtime_force_resume()
+ */
+static int dw_mci_exynos_suspend_noirq(struct device *dev)
+{
+ pm_runtime_get_noresume(dev);
+ return pm_runtime_force_suspend(dev);
+}
/**
* dw_mci_exynos_resume_noirq - Exynos-specific resume code
@@ -186,12 +200,16 @@ static int dw_mci_exynos_runtime_resume(struct device *dev)
*
* We run this code on all exynos variants because it doesn't hurt.
*/
-
static int dw_mci_exynos_resume_noirq(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
struct dw_mci_exynos_priv_data *priv = host->priv;
u32 clksel;
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
@@ -207,11 +225,11 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
mci_writel(host, CLKSEL, clksel);
}
+ pm_runtime_put(dev);
+
return 0;
}
-#else
-#define dw_mci_exynos_resume_noirq NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
{
@@ -553,14 +571,11 @@ static int dw_mci_exynos_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops dw_mci_exynos_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend_noirq,
+ dw_mci_exynos_resume_noirq)
SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
dw_mci_exynos_runtime_resume,
NULL)
- .resume_noirq = dw_mci_exynos_resume_noirq,
- .thaw_noirq = dw_mci_exynos_resume_noirq,
- .restore_noirq = dw_mci_exynos_resume_noirq,
};
static struct platform_driver dw_mci_exynos_pltfm_driver = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 623f4d27fa01..80dc2fd6576c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1065,8 +1065,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
* It's used when HS400 mode is enabled.
*/
if (data->flags & MMC_DATA_WRITE &&
- !(host->timing != MMC_TIMING_MMC_HS400))
- return;
+ host->timing != MMC_TIMING_MMC_HS400)
+ goto disable;
if (data->flags & MMC_DATA_WRITE)
enable = SDMMC_CARD_WR_THR_EN;
@@ -1074,7 +1074,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
enable = SDMMC_CARD_RD_THR_EN;
if (host->timing != MMC_TIMING_MMC_HS200 &&
- host->timing != MMC_TIMING_UHS_SDR104)
+ host->timing != MMC_TIMING_UHS_SDR104 &&
+ host->timing != MMC_TIMING_MMC_HS400)
goto disable;
blksz_depth = blksz / (1 << host->data_shift);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f1849775e47e..1841d250e9e2 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -48,78 +48,6 @@
static unsigned int fmax = 515633;
-/**
- * struct variant_data - MMCI variant-specific quirks
- * @clkreg: default value for MCICLOCK register
- * @clkreg_enable: enable value for MMCICLOCK register
- * @clkreg_8bit_bus_enable: enable value for 8 bit bus
- * @clkreg_neg_edge_enable: enable value for inverted data/cmd output
- * @datalength_bits: number of bits in the MMCIDATALENGTH register
- * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
- * is asserted (likewise for RX)
- * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
- * is asserted (likewise for RX)
- * @data_cmd_enable: enable value for data commands.
- * @st_sdio: enable ST specific SDIO logic
- * @st_clkdiv: true if using a ST-specific clock divider algorithm
- * @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
- * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
- * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
- * register
- * @datactrl_mask_sdio: SDIO enable mask in datactrl register
- * @pwrreg_powerup: power up value for MMCIPOWER register
- * @f_max: maximum clk frequency supported by the controller.
- * @signal_direction: input/out direction of bus signals can be indicated
- * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
- * @busy_detect: true if the variant supports busy detection on DAT0.
- * @busy_dpsm_flag: bitmask enabling busy detection in the DPSM
- * @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register
- * indicating that the card is busy
- * @busy_detect_mask: bitmask identifying the bit in the MMCIMASK0 to mask for
- * getting busy end detection interrupts
- * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
- * @explicit_mclk_control: enable explicit mclk control in driver.
- * @qcom_fifo: enables qcom specific fifo pio read logic.
- * @qcom_dml: enables qcom specific dma glue for dma transfers.
- * @reversed_irq_handling: handle data irq before cmd irq.
- * @mmcimask1: true if variant have a MMCIMASK1 register.
- * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS
- * register.
- * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register
- */
-struct variant_data {
- unsigned int clkreg;
- unsigned int clkreg_enable;
- unsigned int clkreg_8bit_bus_enable;
- unsigned int clkreg_neg_edge_enable;
- unsigned int datalength_bits;
- unsigned int fifosize;
- unsigned int fifohalfsize;
- unsigned int data_cmd_enable;
- unsigned int datactrl_mask_ddrmode;
- unsigned int datactrl_mask_sdio;
- bool st_sdio;
- bool st_clkdiv;
- bool blksz_datactrl16;
- bool blksz_datactrl4;
- u32 pwrreg_powerup;
- u32 f_max;
- bool signal_direction;
- bool pwrreg_clkgate;
- bool busy_detect;
- u32 busy_dpsm_flag;
- u32 busy_detect_flag;
- u32 busy_detect_mask;
- bool pwrreg_nopower;
- bool explicit_mclk_control;
- bool qcom_fifo;
- bool qcom_dml;
- bool reversed_irq_handling;
- bool mmcimask1;
- u32 start_err;
- u32 opendrain;
-};
-
static struct variant_data variant_arm = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
@@ -280,6 +208,7 @@ static struct variant_data variant_qcom = {
.mmcimask1 = true,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_ROD,
+ .init = qcom_variant_init,
};
/* Busy detection for the ST Micro variant */
@@ -489,7 +418,6 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
static void mmci_dma_setup(struct mmci_host *host)
{
const char *rxname, *txname;
- struct variant_data *variant = host->variant;
host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
@@ -537,9 +465,8 @@ static void mmci_dma_setup(struct mmci_host *host)
host->mmc->max_seg_size = max_seg_size;
}
- if (variant->qcom_dml && host->dma_rx_channel && host->dma_tx_channel)
- if (dml_hw_init(host, host->mmc->parent->of_node))
- variant->qcom_dml = false;
+ if (host->ops && host->ops->dma_setup)
+ host->ops->dma_setup(host);
}
/*
@@ -1706,6 +1633,9 @@ static int mmci_probe(struct amba_device *dev,
goto clk_disable;
}
+ if (variant->init)
+ variant->init(host);
+
/*
* The ARM and ST versions of the block have slightly different
* clock divider equations which means that the minimum divider
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index f91cdf7f6dae..517591d219e9 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -195,8 +195,86 @@
#define MMCI_PINCTRL_STATE_OPENDRAIN "opendrain"
struct clk;
-struct variant_data;
struct dma_chan;
+struct mmci_host;
+
+/**
+ * struct variant_data - MMCI variant-specific quirks
+ * @clkreg: default value for MCICLOCK register
+ * @clkreg_enable: enable value for MMCICLOCK register
+ * @clkreg_8bit_bus_enable: enable value for 8 bit bus
+ * @clkreg_neg_edge_enable: enable value for inverted data/cmd output
+ * @datalength_bits: number of bits in the MMCIDATALENGTH register
+ * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
+ * is asserted (likewise for RX)
+ * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
+ * is asserted (likewise for RX)
+ * @data_cmd_enable: enable value for data commands.
+ * @st_sdio: enable ST specific SDIO logic
+ * @st_clkdiv: true if using a ST-specific clock divider algorithm
+ * @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
+ * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
+ * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
+ * register
+ * @datactrl_mask_sdio: SDIO enable mask in datactrl register
+ * @pwrreg_powerup: power up value for MMCIPOWER register
+ * @f_max: maximum clk frequency supported by the controller.
+ * @signal_direction: input/out direction of bus signals can be indicated
+ * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
+ * @busy_detect: true if the variant supports busy detection on DAT0.
+ * @busy_dpsm_flag: bitmask enabling busy detection in the DPSM
+ * @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register
+ * indicating that the card is busy
+ * @busy_detect_mask: bitmask identifying the bit in the MMCIMASK0 to mask for
+ * getting busy end detection interrupts
+ * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
+ * @explicit_mclk_control: enable explicit mclk control in driver.
+ * @qcom_fifo: enables qcom specific fifo pio read logic.
+ * @qcom_dml: enables qcom specific dma glue for dma transfers.
+ * @reversed_irq_handling: handle data irq before cmd irq.
+ * @mmcimask1: true if variant have a MMCIMASK1 register.
+ * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS
+ * register.
+ * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register
+ */
+struct variant_data {
+ unsigned int clkreg;
+ unsigned int clkreg_enable;
+ unsigned int clkreg_8bit_bus_enable;
+ unsigned int clkreg_neg_edge_enable;
+ unsigned int datalength_bits;
+ unsigned int fifosize;
+ unsigned int fifohalfsize;
+ unsigned int data_cmd_enable;
+ unsigned int datactrl_mask_ddrmode;
+ unsigned int datactrl_mask_sdio;
+ bool st_sdio;
+ bool st_clkdiv;
+ bool blksz_datactrl16;
+ bool blksz_datactrl4;
+ u32 pwrreg_powerup;
+ u32 f_max;
+ bool signal_direction;
+ bool pwrreg_clkgate;
+ bool busy_detect;
+ u32 busy_dpsm_flag;
+ u32 busy_detect_flag;
+ u32 busy_detect_mask;
+ bool pwrreg_nopower;
+ bool explicit_mclk_control;
+ bool qcom_fifo;
+ bool qcom_dml;
+ bool reversed_irq_handling;
+ bool mmcimask1;
+ u32 start_err;
+ u32 opendrain;
+ void (*init)(struct mmci_host *host);
+};
+
+/* mmci variant callbacks */
+struct mmci_host_ops {
+ void (*dma_setup)(struct mmci_host *host);
+};
struct mmci_host_next {
struct dma_async_tx_descriptor *dma_desc;
@@ -228,6 +306,7 @@ struct mmci_host {
u32 mask1_reg;
bool vqmmc_enabled;
struct mmci_platform_data *plat;
+ struct mmci_host_ops *ops;
struct variant_data *variant;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default;
diff --git a/drivers/mmc/host/mmci_qcom_dml.c b/drivers/mmc/host/mmci_qcom_dml.c
index 00750c9d3514..be3fab5db83f 100644
--- a/drivers/mmc/host/mmci_qcom_dml.c
+++ b/drivers/mmc/host/mmci_qcom_dml.c
@@ -119,17 +119,20 @@ static int of_get_dml_pipe_index(struct device_node *np, const char *name)
}
/* Initialize the dml hardware connected to SD Card controller */
-int dml_hw_init(struct mmci_host *host, struct device_node *np)
+static void qcom_dma_setup(struct mmci_host *host)
{
u32 config;
void __iomem *base;
int consumer_id, producer_id;
+ struct device_node *np = host->mmc->parent->of_node;
consumer_id = of_get_dml_pipe_index(np, "tx");
producer_id = of_get_dml_pipe_index(np, "rx");
- if (producer_id < 0 || consumer_id < 0)
- return -ENODEV;
+ if (producer_id < 0 || consumer_id < 0) {
+ host->variant->qcom_dml = false;
+ return;
+ }
base = host->base + DML_OFFSET;
@@ -172,6 +175,13 @@ int dml_hw_init(struct mmci_host *host, struct device_node *np)
/* Make sure dml initialization is finished */
mb();
+}
- return 0;
+static struct mmci_host_ops qcom_variant_ops = {
+ .dma_setup = qcom_dma_setup,
+};
+
+void qcom_variant_init(struct mmci_host *host)
+{
+ host->ops = &qcom_variant_ops;
}
diff --git a/drivers/mmc/host/mmci_qcom_dml.h b/drivers/mmc/host/mmci_qcom_dml.h
index 6e405d09d534..fa16f6f4d4ad 100644
--- a/drivers/mmc/host/mmci_qcom_dml.h
+++ b/drivers/mmc/host/mmci_qcom_dml.h
@@ -16,12 +16,11 @@
#define __MMC_QCOM_DML_H__
#ifdef CONFIG_MMC_QCOM_DML
-int dml_hw_init(struct mmci_host *host, struct device_node *np);
+void qcom_variant_init(struct mmci_host *host);
void dml_start_xfer(struct mmci_host *host, struct mmc_data *data);
#else
-static inline int dml_hw_init(struct mmci_host *host, struct device_node *np)
+static inline void qcom_variant_init(struct mmci_host *host)
{
- return -ENOSYS;
}
static inline void dml_start_xfer(struct mmci_host *host, struct mmc_data *data)
{
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 75f781c11e89..de4e6e5bf304 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -293,9 +293,10 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
int i;
for_each_sg(data->sg, sg, data->sg_len, i) {
- void *buf = kmap_atomic(sg_page(sg) + sg->offset;
+ void *buf = kmap_atomic(sg_page(sg) + sg->offset);
buffer_swap32(buf, sg->length);
kunmap_atomic(buf);
+ }
}
#else
static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index c763b404510f..f7ffbf1676b1 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -24,7 +24,6 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
-#include <linux/dma/pxa-dma.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mmc/host.h>
@@ -59,11 +58,11 @@ struct pxamci_host {
void __iomem *base;
struct clk *clk;
unsigned long clkrate;
- int irq;
unsigned int clkrt;
unsigned int cmdat;
unsigned int imask;
unsigned int power_mode;
+ unsigned long detect_delay_ms;
struct pxamci_platform_data *pdata;
struct mmc_request *mrq;
@@ -73,64 +72,48 @@ struct pxamci_host {
struct dma_chan *dma_chan_rx;
struct dma_chan *dma_chan_tx;
dma_cookie_t dma_cookie;
- dma_addr_t sg_dma;
unsigned int dma_len;
-
unsigned int dma_dir;
- unsigned int dma_drcmrrx;
- unsigned int dma_drcmrtx;
-
- struct regulator *vcc;
};
-static inline void pxamci_init_ocr(struct pxamci_host *host)
+static int pxamci_init_ocr(struct pxamci_host *host)
{
-#ifdef CONFIG_REGULATOR
- host->vcc = devm_regulator_get_optional(mmc_dev(host->mmc), "vmmc");
-
- if (IS_ERR(host->vcc))
- host->vcc = NULL;
- else {
- host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
- if (host->pdata && host->pdata->ocr_mask)
- dev_warn(mmc_dev(host->mmc),
- "ocr_mask/setpower will not be used\n");
- }
-#endif
- if (host->vcc == NULL) {
+ struct mmc_host *mmc = host->mmc;
+ int ret;
+
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret < 0)
+ return ret;
+
+ if (IS_ERR(mmc->supply.vmmc)) {
/* fall-back to platform data */
- host->mmc->ocr_avail = host->pdata ?
+ mmc->ocr_avail = host->pdata ?
host->pdata->ocr_mask :
MMC_VDD_32_33 | MMC_VDD_33_34;
}
+
+ return 0;
}
static inline int pxamci_set_power(struct pxamci_host *host,
unsigned char power_mode,
unsigned int vdd)
{
+ struct mmc_host *mmc = host->mmc;
+ struct regulator *supply = mmc->supply.vmmc;
int on;
- if (host->vcc) {
- int ret;
+ if (!IS_ERR(supply))
+ return mmc_regulator_set_ocr(mmc, supply, vdd);
- if (power_mode == MMC_POWER_UP) {
- ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
- if (ret)
- return ret;
- } else if (power_mode == MMC_POWER_OFF) {
- ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
- if (ret)
- return ret;
- }
- }
- if (!host->vcc && host->pdata &&
+ if (host->pdata &&
gpio_is_valid(host->pdata->gpio_power)) {
on = ((1 << vdd) & host->pdata->ocr_mask);
gpio_set_value(host->pdata->gpio_power,
!!on ^ host->pdata->gpio_power_invert);
}
- if (!host->vcc && host->pdata && host->pdata->setpower)
+
+ if (host->pdata && host->pdata->setpower)
return host->pdata->setpower(mmc_dev(host->mmc), vdd);
return 0;
@@ -585,7 +568,7 @@ static irqreturn_t pxamci_detect_irq(int irq, void *devid)
{
struct pxamci_host *host = mmc_priv(devid);
- mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms));
+ mmc_detect_change(devid, msecs_to_jiffies(host->detect_delay_ms));
return IRQ_HANDLED;
}
@@ -597,37 +580,30 @@ static const struct of_device_id pxa_mmc_dt_ids[] = {
MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids);
-static int pxamci_of_init(struct platform_device *pdev)
+static int pxamci_of_init(struct platform_device *pdev,
+ struct mmc_host *mmc)
{
- struct device_node *np = pdev->dev.of_node;
- struct pxamci_platform_data *pdata;
- u32 tmp;
-
- if (!np)
- return 0;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ struct device_node *np = pdev->dev.of_node;
+ struct pxamci_host *host = mmc_priv(mmc);
+ u32 tmp;
+ int ret;
- pdata->gpio_card_detect =
- of_get_named_gpio(np, "cd-gpios", 0);
- pdata->gpio_card_ro =
- of_get_named_gpio(np, "wp-gpios", 0);
+ if (!np)
+ return 0;
/* pxa-mmc specific */
- pdata->gpio_power =
- of_get_named_gpio(np, "pxa-mmc,gpio-power", 0);
-
if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0)
- pdata->detect_delay_ms = tmp;
+ host->detect_delay_ms = tmp;
- pdev->dev.platform_data = pdata;
+ ret = mmc_of_parse(mmc);
+ if (ret < 0)
+ return ret;
- return 0;
+ return 0;
}
#else
-static int pxamci_of_init(struct platform_device *pdev)
+static int pxamci_of_init(struct platform_device *pdev,
+ struct mmc_host *mmc)
{
return 0;
}
@@ -637,21 +613,16 @@ static int pxamci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct pxamci_host *host = NULL;
- struct resource *r, *dmarx, *dmatx;
- struct pxad_param param_rx, param_tx;
- int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
- dma_cap_mask_t mask;
-
- ret = pxamci_of_init(pdev);
- if (ret)
- return ret;
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ int ret, irq;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
+ mmc = mmc_alloc_host(sizeof(struct pxamci_host), dev);
if (!mmc) {
ret = -ENOMEM;
goto out;
@@ -680,12 +651,16 @@ static int pxamci_probe(struct platform_device *pdev)
*/
mmc->max_blk_count = 65535;
+ ret = pxamci_of_init(pdev, mmc);
+ if (ret)
+ return ret;
+
host = mmc_priv(mmc);
host->mmc = mmc;
host->pdata = pdev->dev.platform_data;
host->clkrt = CLKRT_OFF;
- host->clk = devm_clk_get(&pdev->dev, NULL);
+ host->clk = devm_clk_get(dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
host->clk = NULL;
@@ -700,7 +675,9 @@ static int pxamci_probe(struct platform_device *pdev)
mmc->f_min = (host->clkrate + 63) / 64;
mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate;
- pxamci_init_ocr(host);
+ ret = pxamci_init_ocr(host);
+ if (ret < 0)
+ return ret;
mmc->caps = 0;
host->cmdat = 0;
@@ -714,10 +691,9 @@ static int pxamci_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
host->res = r;
- host->irq = irq;
host->imask = MMC_I_MASK_ALL;
- host->base = devm_ioremap_resource(&pdev->dev, r);
+ host->base = devm_ioremap_resource(dev, r);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto out;
@@ -732,89 +708,76 @@ static int pxamci_probe(struct platform_device *pdev)
writel(64, host->base + MMC_RESTO);
writel(host->imask, host->base + MMC_I_MASK);
- ret = devm_request_irq(&pdev->dev, host->irq, pxamci_irq, 0,
+ ret = devm_request_irq(dev, irq, pxamci_irq, 0,
DRIVER_NAME, host);
if (ret)
goto out;
platform_set_drvdata(pdev, mmc);
- if (!pdev->dev.of_node) {
- dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!dmarx || !dmatx) {
- ret = -ENXIO;
- goto out;
- }
- param_rx.prio = PXAD_PRIO_LOWEST;
- param_rx.drcmr = dmarx->start;
- param_tx.prio = PXAD_PRIO_LOWEST;
- param_tx.drcmr = dmatx->start;
- }
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- host->dma_chan_rx =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param_rx, &pdev->dev, "rx");
+ host->dma_chan_rx = dma_request_slave_channel(dev, "rx");
if (host->dma_chan_rx == NULL) {
- dev_err(&pdev->dev, "unable to request rx dma channel\n");
+ dev_err(dev, "unable to request rx dma channel\n");
ret = -ENODEV;
goto out;
}
- host->dma_chan_tx =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param_tx, &pdev->dev, "tx");
+ host->dma_chan_tx = dma_request_slave_channel(dev, "tx");
if (host->dma_chan_tx == NULL) {
- dev_err(&pdev->dev, "unable to request tx dma channel\n");
+ dev_err(dev, "unable to request tx dma channel\n");
ret = -ENODEV;
goto out;
}
if (host->pdata) {
- gpio_cd = host->pdata->gpio_card_detect;
- gpio_ro = host->pdata->gpio_card_ro;
- gpio_power = host->pdata->gpio_power;
- }
- if (gpio_is_valid(gpio_power)) {
- ret = devm_gpio_request(&pdev->dev, gpio_power,
- "mmc card power");
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_power %d\n",
- gpio_power);
- goto out;
+ int gpio_cd = host->pdata->gpio_card_detect;
+ int gpio_ro = host->pdata->gpio_card_ro;
+ int gpio_power = host->pdata->gpio_power;
+
+ host->detect_delay_ms = host->pdata->detect_delay_ms;
+
+ if (gpio_is_valid(gpio_power)) {
+ ret = devm_gpio_request(dev, gpio_power,
+ "mmc card power");
+ if (ret) {
+ dev_err(dev,
+ "Failed requesting gpio_power %d\n",
+ gpio_power);
+ goto out;
+ }
+ gpio_direction_output(gpio_power,
+ host->pdata->gpio_power_invert);
}
- gpio_direction_output(gpio_power,
- host->pdata->gpio_power_invert);
- }
- if (gpio_is_valid(gpio_ro)) {
- ret = mmc_gpio_request_ro(mmc, gpio_ro);
+
+ if (gpio_is_valid(gpio_ro)) {
+ ret = mmc_gpio_request_ro(mmc, gpio_ro);
+ if (ret) {
+ dev_err(dev,
+ "Failed requesting gpio_ro %d\n",
+ gpio_ro);
+ goto out;
+ } else {
+ mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+ 0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ }
+ }
+
+ if (gpio_is_valid(gpio_cd))
+ ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n",
- gpio_ro);
+ dev_err(dev, "Failed requesting gpio_cd %d\n",
+ gpio_cd);
goto out;
- } else {
- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
}
- }
- if (gpio_is_valid(gpio_cd))
- ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
- goto out;
- }
-
- if (host->pdata && host->pdata->init)
- host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
+ if (host->pdata->init)
+ host->pdata->init(dev, pxamci_detect_irq, mmc);
- if (gpio_is_valid(gpio_power) && host->pdata->setpower)
- dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n");
- if (gpio_is_valid(gpio_ro) && host->pdata->get_ro)
- dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n");
+ if (gpio_is_valid(gpio_power) && host->pdata->setpower)
+ dev_warn(dev, "gpio_power and setpower() both defined\n");
+ if (gpio_is_valid(gpio_ro) && host->pdata->get_ro)
+ dev_warn(dev, "gpio_ro and get_ro() both defined\n");
+ }
mmc_add_host(mmc);
@@ -835,18 +798,12 @@ out:
static int pxamci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
- int gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
if (mmc) {
struct pxamci_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
- if (host->pdata) {
- gpio_cd = host->pdata->gpio_card_detect;
- gpio_ro = host->pdata->gpio_card_ro;
- gpio_power = host->pdata->gpio_power;
- }
if (host->pdata && host->pdata->exit)
host->pdata->exit(&pdev->dev, mmc);
@@ -862,6 +819,7 @@ static int pxamci_remove(struct platform_device *pdev)
mmc_free_host(mmc);
}
+
return 0;
}
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 45c015da2e75..777e32b0e410 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -212,6 +212,7 @@ static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
#define SH_MOBILE_SDHI_SCC_CKSEL 0x006
#define SH_MOBILE_SDHI_SCC_RVSCNTL 0x008
#define SH_MOBILE_SDHI_SCC_RVSREQ 0x00A
+#define SH_MOBILE_SDHI_SCC_TMPPORT2 0x00E
/* Definitions for values the SH_MOBILE_SDHI_SCC_DTCNTL register */
#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN BIT(0)
@@ -224,6 +225,9 @@ static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
#define SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN BIT(0)
/* Definitions for values the SH_MOBILE_SDHI_SCC_RVSREQ register */
#define SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR BIT(2)
+/* Definitions for values the SH_MOBILE_SDHI_SCC_TMPPORT2 register */
+#define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL BIT(4)
+#define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN BIT(31)
static inline u32 sd_scc_read32(struct tmio_mmc_host *host,
struct renesas_sdhi *priv, int addr)
@@ -244,33 +248,30 @@ static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host)
priv = host_to_priv(host);
- /* set sampling clock selection range */
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
- 0x8 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
-
/* Initialize SCC */
sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, 0x0);
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
- SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
- sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL));
-
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+ /* set sampling clock selection range */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
+ SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
+ 0x8 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
+
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
SH_MOBILE_SDHI_SCC_CKSEL_DTSEL |
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL));
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos);
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
/* Read TAPNUM */
return (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL) >>
SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT) &
@@ -286,13 +287,95 @@ static void renesas_sdhi_prepare_tuning(struct tmio_mmc_host *host,
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap);
}
+static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
+{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ /* Set HS400 mode */
+ sd_ctrl_write16(host, CTL_SDIF_MODE, 0x0001 |
+ sd_ctrl_read16(host, CTL_SDIF_MODE));
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2,
+ (SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN |
+ SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
+
+ /* Set the sampling clock selection range of HS400 mode */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
+ SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
+ 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
+
+
+ if (host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400)
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
+ host->tap_set / 2);
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
+ SH_MOBILE_SDHI_SCC_CKSEL_DTSEL |
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL));
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+}
+
+static void renesas_sdhi_reset_scc(struct tmio_mmc_host *host,
+ struct renesas_sdhi *priv)
+{
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
+ ~SH_MOBILE_SDHI_SCC_CKSEL_DTSEL &
+ sd_scc_read32(host, priv,
+ SH_MOBILE_SDHI_SCC_CKSEL));
+}
+
+static void renesas_sdhi_disable_scc(struct tmio_mmc_host *host)
+{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ renesas_sdhi_reset_scc(host, priv);
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
+ ~SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN &
+ sd_scc_read32(host, priv,
+ SH_MOBILE_SDHI_SCC_DTCNTL));
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+}
+
+static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host,
+ struct renesas_sdhi *priv)
+{
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ /* Reset HS400 mode */
+ sd_ctrl_write16(host, CTL_SDIF_MODE, ~0x0001 &
+ sd_ctrl_read16(host, CTL_SDIF_MODE));
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2,
+ ~(SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN |
+ SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) &
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+}
+
+static void renesas_sdhi_prepare_hs400_tuning(struct tmio_mmc_host *host)
+{
+ renesas_sdhi_reset_hs400_mode(host, host_to_priv(host));
+}
+
#define SH_MOBILE_SDHI_MAX_TAP 3
static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
unsigned long tap_cnt; /* counter of tuning success */
- unsigned long tap_set; /* tap position */
unsigned long tap_start;/* start position of tuning success */
unsigned long tap_end; /* end position of tuning success */
unsigned long ntap; /* temporary counter of tuning success */
@@ -302,6 +385,18 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
/*
+ * When tuning CMD19 is issued twice for each tap, merge the
+ * result requiring the tap to be good in both runs before
+ * considering it for tuning selection.
+ */
+ for (i = 0; i < host->tap_num * 2; i++) {
+ int offset = host->tap_num * (i < host->tap_num ? 1 : -1);
+
+ if (!test_bit(i, host->taps))
+ clear_bit(i + offset, host->taps);
+ }
+
+ /*
* Find the longest consecutive run of successful probes. If that
* is more than SH_MOBILE_SDHI_MAX_TAP probes long then use the
* center index as the tap.
@@ -330,12 +425,12 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
}
if (tap_cnt >= SH_MOBILE_SDHI_MAX_TAP)
- tap_set = (tap_start + tap_end) / 2 % host->tap_num;
+ host->tap_set = (tap_start + tap_end) / 2 % host->tap_num;
else
return -EIO;
/* Set SCC */
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap_set);
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, host->tap_set);
/* Enable auto re-tuning */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
@@ -368,13 +463,8 @@ static void renesas_sdhi_hw_reset(struct tmio_mmc_host *host)
priv = host_to_priv(host);
- /* Reset SCC */
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
- ~SH_MOBILE_SDHI_SCC_CKSEL_DTSEL &
- sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL));
+ renesas_sdhi_reset_scc(host, priv);
+ renesas_sdhi_reset_hs400_mode(host, priv);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
@@ -592,7 +682,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
/* Enable tuning iff we have an SCC and a supported mode */
if (of_data && of_data->scc_offset &&
(host->mmc->caps & MMC_CAP_UHS_SDR104 ||
- host->mmc->caps2 & MMC_CAP2_HS200_1_8V_SDR)) {
+ host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
+ MMC_CAP2_HS400_1_8V))) {
const struct renesas_sdhi_scc *taps = of_data->taps;
bool hit = false;
@@ -616,6 +707,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->select_tuning = renesas_sdhi_select_tuning;
host->check_scc_error = renesas_sdhi_check_scc_error;
host->hw_reset = renesas_sdhi_hw_reset;
+ host->prepare_hs400_tuning =
+ renesas_sdhi_prepare_hs400_tuning;
+ host->hs400_downgrade = renesas_sdhi_disable_scc;
+ host->hs400_complete = renesas_sdhi_hs400_complete;
}
i = 0;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index f7f9773d161f..35cc0de6be67 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -82,6 +82,22 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
},
};
+static const struct renesas_sdhi_of_data of_rcar_r8a7795_compatible = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
+ TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2 |
+ TMIO_MMC_HAVE_4TAP_HS400,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+ MMC_CAP_CMD23,
+ .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
+ .bus_shift = 2,
+ .scc_offset = 0x1000,
+ .taps = rcar_gen3_scc_taps,
+ .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
+ /* DMAC can handle 0xffffffff blk count but only 1 segment */
+ .max_blk_count = 0xffffffff,
+ .max_segs = 1,
+};
+
static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
@@ -98,8 +114,8 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
};
static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
- { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
- { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_r8a7795_compatible, },
+ { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_r8a7795_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
{},
};
@@ -139,8 +155,7 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
RST_RESERVED_BITS | val);
- if (host->data && host->data->flags & MMC_DATA_READ)
- clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
+ clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
renesas_sdhi_internal_dmac_enable_dma(host, true);
}
@@ -164,17 +179,14 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
goto force_pio;
/* This DMAC cannot handle if buffer is not 8-bytes alignment */
- if (!IS_ALIGNED(sg_dma_address(sg), 8)) {
- dma_unmap_sg(&host->pdev->dev, sg, host->sg_len,
- mmc_get_dma_dir(data));
- goto force_pio;
- }
+ if (!IS_ALIGNED(sg_dma_address(sg), 8))
+ goto force_pio_with_unmap;
if (data->flags & MMC_DATA_READ) {
dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
- goto force_pio;
+ goto force_pio_with_unmap;
} else {
dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
}
@@ -189,6 +201,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
return;
+force_pio_with_unmap:
+ dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data));
+
force_pio:
host->force_pio = true;
renesas_sdhi_internal_dmac_enable_dma(host, false);
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 4bb46c489d71..890f192dedbd 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -78,6 +78,19 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
},
};
+static const struct renesas_sdhi_of_data of_rcar_r8a7795_compatible = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
+ TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2 |
+ TMIO_MMC_HAVE_4TAP_HS400,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+ MMC_CAP_CMD23,
+ .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
+ .bus_shift = 2,
+ .scc_offset = 0x1000,
+ .taps = rcar_gen3_scc_taps,
+ .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
+};
+
static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
@@ -104,8 +117,8 @@ static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
- { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
- { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_r8a7795_compatible, },
+ { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_r8a7795_compatible, },
{ .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d6aef70d34fa..f44e49014a44 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Freescale eSDHC i.MX controller driver for the platform bus.
*
@@ -5,10 +6,6 @@
*
* Copyright (c) 2010 Pengutronix e.K.
* Author: Wolfram Sang <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#include <linux/io.h>
@@ -312,6 +309,15 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
val |= SDHCI_SUPPORT_HS400;
+
+ /*
+ * Do not advertise faster UHS modes if there are no
+ * pinctrl states for 100MHz/200MHz.
+ */
+ if (IS_ERR_OR_NULL(imx_data->pins_100mhz) ||
+ IS_ERR_OR_NULL(imx_data->pins_200mhz))
+ val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50
+ | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400);
}
}
@@ -699,14 +705,14 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
int div = 1;
u32 temp, val;
+ if (esdhc_is_usdhc(imx_data)) {
+ val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
+ }
+
if (clock == 0) {
host->mmc->actual_clock = 0;
-
- if (esdhc_is_usdhc(imx_data)) {
- val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
- writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
- host->ioaddr + ESDHC_VENDOR_SPEC);
- }
return;
}
@@ -752,7 +758,7 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
if (esdhc_is_usdhc(imx_data)) {
val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
- host->ioaddr + ESDHC_VENDOR_SPEC);
+ host->ioaddr + ESDHC_VENDOR_SPEC);
}
mdelay(1);
@@ -1142,34 +1148,18 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
&boarddata->tuning_start_tap);
if (of_find_property(np, "no-1-8-v", NULL))
- boarddata->support_vsel = false;
- else
- boarddata->support_vsel = true;
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line))
boarddata->delay_line = 0;
mmc_of_parse_voltage(np, &host->ocr_mask);
- /* sdr50 and sdr104 need work on 1.8v signal voltage */
- if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
- !IS_ERR(imx_data->pins_default)) {
+ if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pins_default)) {
imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
ESDHC_PINCTRL_STATE_100MHZ);
imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
ESDHC_PINCTRL_STATE_200MHZ);
- if (IS_ERR(imx_data->pins_100mhz) ||
- IS_ERR(imx_data->pins_200mhz)) {
- dev_warn(mmc_dev(host->mmc),
- "could not get ultra high speed state, work on normal mode\n");
- /*
- * fall back to not supporting uhs by specifying no
- * 1.8v quirk
- */
- host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
- }
- } else {
- host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
}
/* call to generic mmc_of_parse to support additional capabilities */
@@ -1321,7 +1311,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (esdhc_is_usdhc(imx_data)) {
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
- host->mmc->caps |= MMC_CAP_1_8V_DDR;
+ host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200))
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 646bf377ba77..3cc8bfee6c18 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -33,16 +33,11 @@
#define CORE_MCI_GENERICS 0x70
#define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
-#define CORE_HC_MODE 0x78
#define HC_MODE_EN 0x1
#define CORE_POWER 0x0
#define CORE_SW_RST BIT(7)
#define FF_CLK_SW_RST_DIS BIT(13)
-#define CORE_PWRCTL_STATUS 0xdc
-#define CORE_PWRCTL_MASK 0xe0
-#define CORE_PWRCTL_CLEAR 0xe4
-#define CORE_PWRCTL_CTL 0xe8
#define CORE_PWRCTL_BUS_OFF BIT(0)
#define CORE_PWRCTL_BUS_ON BIT(1)
#define CORE_PWRCTL_IO_LOW BIT(2)
@@ -63,17 +58,13 @@
#define CORE_CDR_EXT_EN BIT(19)
#define CORE_DLL_PDN BIT(29)
#define CORE_DLL_RST BIT(30)
-#define CORE_DLL_CONFIG 0x100
#define CORE_CMD_DAT_TRACK_SEL BIT(0)
-#define CORE_DLL_STATUS 0x108
-#define CORE_DLL_CONFIG_2 0x1b4
#define CORE_DDR_CAL_EN BIT(0)
#define CORE_FLL_CYCLE_CNT BIT(18)
#define CORE_DLL_CLOCK_DISABLE BIT(21)
-#define CORE_VENDOR_SPEC 0x10c
-#define CORE_VENDOR_SPEC_POR_VAL 0xa1c
+#define CORE_VENDOR_SPEC_POR_VAL 0xa1c
#define CORE_CLK_PWRSAVE BIT(1)
#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
@@ -111,17 +102,14 @@
#define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
#define CORE_CDC_SWITCH_RC_EN BIT(1)
-#define CORE_DDR_200_CFG 0x184
#define CORE_CDC_T4_DLY_SEL BIT(0)
#define CORE_CMDIN_RCLK_EN BIT(1)
#define CORE_START_CDC_TRAFFIC BIT(6)
-#define CORE_VENDOR_SPEC3 0x1b0
+
#define CORE_PWRSAVE_DLL BIT(3)
-#define CORE_DDR_CONFIG 0x1b8
#define DDR_CONFIG_POR_VAL 0x80040853
-#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11c
#define INVALID_TUNING_PHASE -1
#define SDHCI_MSM_MIN_CLOCK 400000
@@ -137,6 +125,117 @@
/* Timeout value to avoid infinite waiting for pwr_irq */
#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+#define msm_host_readl(msm_host, host, offset) \
+ msm_host->var_ops->msm_readl_relaxed(host, offset)
+
+#define msm_host_writel(msm_host, val, host, offset) \
+ msm_host->var_ops->msm_writel_relaxed(val, host, offset)
+
+struct sdhci_msm_offset {
+ u32 core_hc_mode;
+ u32 core_mci_data_cnt;
+ u32 core_mci_status;
+ u32 core_mci_fifo_cnt;
+ u32 core_mci_version;
+ u32 core_generics;
+ u32 core_testbus_config;
+ u32 core_testbus_sel2_bit;
+ u32 core_testbus_ena;
+ u32 core_testbus_sel2;
+ u32 core_pwrctl_status;
+ u32 core_pwrctl_mask;
+ u32 core_pwrctl_clear;
+ u32 core_pwrctl_ctl;
+ u32 core_sdcc_debug_reg;
+ u32 core_dll_config;
+ u32 core_dll_status;
+ u32 core_vendor_spec;
+ u32 core_vendor_spec_adma_err_addr0;
+ u32 core_vendor_spec_adma_err_addr1;
+ u32 core_vendor_spec_func2;
+ u32 core_vendor_spec_capabilities0;
+ u32 core_ddr_200_cfg;
+ u32 core_vendor_spec3;
+ u32 core_dll_config_2;
+ u32 core_ddr_config;
+ u32 core_ddr_config_2;
+};
+
+static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
+ .core_mci_data_cnt = 0x35c,
+ .core_mci_status = 0x324,
+ .core_mci_fifo_cnt = 0x308,
+ .core_mci_version = 0x318,
+ .core_generics = 0x320,
+ .core_testbus_config = 0x32c,
+ .core_testbus_sel2_bit = 3,
+ .core_testbus_ena = (1 << 31),
+ .core_testbus_sel2 = (1 << 3),
+ .core_pwrctl_status = 0x240,
+ .core_pwrctl_mask = 0x244,
+ .core_pwrctl_clear = 0x248,
+ .core_pwrctl_ctl = 0x24c,
+ .core_sdcc_debug_reg = 0x358,
+ .core_dll_config = 0x200,
+ .core_dll_status = 0x208,
+ .core_vendor_spec = 0x20c,
+ .core_vendor_spec_adma_err_addr0 = 0x214,
+ .core_vendor_spec_adma_err_addr1 = 0x218,
+ .core_vendor_spec_func2 = 0x210,
+ .core_vendor_spec_capabilities0 = 0x21c,
+ .core_ddr_200_cfg = 0x224,
+ .core_vendor_spec3 = 0x250,
+ .core_dll_config_2 = 0x254,
+ .core_ddr_config = 0x258,
+ .core_ddr_config_2 = 0x25c,
+};
+
+static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
+ .core_hc_mode = 0x78,
+ .core_mci_data_cnt = 0x30,
+ .core_mci_status = 0x34,
+ .core_mci_fifo_cnt = 0x44,
+ .core_mci_version = 0x050,
+ .core_generics = 0x70,
+ .core_testbus_config = 0x0cc,
+ .core_testbus_sel2_bit = 4,
+ .core_testbus_ena = (1 << 3),
+ .core_testbus_sel2 = (1 << 4),
+ .core_pwrctl_status = 0xdc,
+ .core_pwrctl_mask = 0xe0,
+ .core_pwrctl_clear = 0xe4,
+ .core_pwrctl_ctl = 0xe8,
+ .core_sdcc_debug_reg = 0x124,
+ .core_dll_config = 0x100,
+ .core_dll_status = 0x108,
+ .core_vendor_spec = 0x10c,
+ .core_vendor_spec_adma_err_addr0 = 0x114,
+ .core_vendor_spec_adma_err_addr1 = 0x118,
+ .core_vendor_spec_func2 = 0x110,
+ .core_vendor_spec_capabilities0 = 0x11c,
+ .core_ddr_200_cfg = 0x184,
+ .core_vendor_spec3 = 0x1b0,
+ .core_dll_config_2 = 0x1b4,
+ .core_ddr_config = 0x1b8,
+ .core_ddr_config_2 = 0x1bc,
+};
+
+struct sdhci_msm_variant_ops {
+ u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset);
+ void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host,
+ u32 offset);
+};
+
+/*
+ * From V5, register spaces have changed. Wrap this info in a structure
+ * and choose the data_structure based on version info mentioned in DT.
+ */
+struct sdhci_msm_variant_info {
+ bool mci_removed;
+ const struct sdhci_msm_variant_ops *var_ops;
+ const struct sdhci_msm_offset *offset;
+};
+
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
@@ -156,8 +255,53 @@ struct sdhci_msm_host {
wait_queue_head_t pwr_irq_wait;
bool pwr_irq_flag;
u32 caps_0;
+ bool mci_removed;
+ const struct sdhci_msm_variant_ops *var_ops;
+ const struct sdhci_msm_offset *offset;
};
+static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ return msm_host->offset;
+}
+
+/*
+ * APIs to read/write to vendor specific registers which were there in the
+ * core_mem region before MCI was removed.
+ */
+static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host,
+ u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ return readl_relaxed(msm_host->core_mem + offset);
+}
+
+static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host,
+ u32 offset)
+{
+ return readl_relaxed(host->ioaddr + offset);
+}
+
+static void sdhci_msm_mci_variant_writel_relaxed(u32 val,
+ struct sdhci_host *host, u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ writel_relaxed(val, msm_host->core_mem + offset);
+}
+
+static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
+ struct sdhci_host *host, u32 offset)
+{
+ writel_relaxed(val, host->ioaddr + offset);
+}
+
static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
unsigned int clock)
{
@@ -205,10 +349,12 @@ static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
u32 wait_cnt = 50;
u8 ck_out_en;
struct mmc_host *mmc = host->mmc;
+ const struct sdhci_msm_offset *msm_offset =
+ sdhci_priv_msm_offset(host);
/* Poll for CK_OUT_EN bit. max. poll time = 50us */
- ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
- CORE_CK_OUT_EN);
+ ck_out_en = !!(readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config) & CORE_CK_OUT_EN);
while (ck_out_en != poll) {
if (--wait_cnt == 0) {
@@ -218,8 +364,8 @@ static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
}
udelay(1);
- ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
- CORE_CK_OUT_EN);
+ ck_out_en = !!(readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config) & CORE_CK_OUT_EN);
}
return 0;
@@ -235,16 +381,18 @@ static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
unsigned long flags;
u32 config;
struct mmc_host *mmc = host->mmc;
+ const struct sdhci_msm_offset *msm_offset =
+ sdhci_priv_msm_offset(host);
if (phase > 0xf)
return -EINVAL;
spin_lock_irqsave(&host->lock, flags);
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
rc = msm_dll_poll_ck_out_en(host, 0);
@@ -255,24 +403,24 @@ static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
* Write the selected DLL clock output phase (0 ... 15)
* to CDR_SELEXT bit field of DLL_CONFIG register.
*/
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
config &= ~CDR_SELEXT_MASK;
config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
config |= CORE_CK_OUT_EN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
rc = msm_dll_poll_ck_out_en(host, 1);
if (rc)
goto err_out;
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
config |= CORE_CDR_EN;
config &= ~CORE_CDR_EXT_EN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
goto out;
err_out:
@@ -398,6 +546,8 @@ static int msm_find_most_appropriate_phase(struct sdhci_host *host,
static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
{
u32 mclk_freq = 0, config;
+ const struct sdhci_msm_offset *msm_offset =
+ sdhci_priv_msm_offset(host);
/* Program the MCLK value to MCLK_FREQ bit field */
if (host->clock <= 112000000)
@@ -417,10 +567,10 @@ static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
else if (host->clock <= 200000000)
mclk_freq = 7;
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
config &= ~CMUX_SHIFT_PHASE_MASK;
config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
}
/* Initialize the DLL (Programmable Delay Line) */
@@ -432,6 +582,8 @@ static int msm_init_cm_dll(struct sdhci_host *host)
int wait_cnt = 50;
unsigned long flags;
u32 config;
+ const struct sdhci_msm_offset *msm_offset =
+ msm_host->offset;
spin_lock_irqsave(&host->lock, flags);
@@ -440,34 +592,43 @@ static int msm_init_cm_dll(struct sdhci_host *host)
* tuning is in progress. Keeping PWRSAVE ON may
* turn off the clock.
*/
- config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
config &= ~CORE_CLK_PWRSAVE;
- writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
if (msm_host->use_14lpp_dll_reset) {
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config);
config &= ~CORE_CK_OUT_EN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config);
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config_2);
config |= CORE_DLL_CLOCK_DISABLE;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config_2);
}
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config);
config |= CORE_DLL_RST;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config);
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config);
config |= CORE_DLL_PDN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config);
msm_cm_dll_set_freq(host);
if (msm_host->use_14lpp_dll_reset &&
!IS_ERR_OR_NULL(msm_host->xo_clk)) {
u32 mclk_freq = 0;
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config_2);
config &= CORE_FLL_CYCLE_CNT;
if (config)
mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
@@ -476,40 +637,52 @@ static int msm_init_cm_dll(struct sdhci_host *host)
mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
clk_get_rate(msm_host->xo_clk));
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config_2);
config &= ~(0xFF << 10);
config |= mclk_freq << 10;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config_2);
/* wait for 5us before enabling DLL clock */
udelay(5);
}
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config);
config &= ~CORE_DLL_RST;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config);
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config);
config &= ~CORE_DLL_PDN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config);
if (msm_host->use_14lpp_dll_reset) {
msm_cm_dll_set_freq(host);
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config_2);
config &= ~CORE_DLL_CLOCK_DISABLE;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config_2);
}
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config);
config |= CORE_DLL_EN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config);
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config);
config |= CORE_CK_OUT_EN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config);
/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
- while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
+ while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) &
CORE_DLL_LOCK)) {
/* max. wait for 50us sec for LOCK bit to be set */
if (--wait_cnt == 0) {
@@ -530,19 +703,21 @@ static void msm_hc_select_default(struct sdhci_host *host)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
u32 config;
+ const struct sdhci_msm_offset *msm_offset =
+ msm_host->offset;
if (!msm_host->use_cdclp533) {
config = readl_relaxed(host->ioaddr +
- CORE_VENDOR_SPEC3);
+ msm_offset->core_vendor_spec3);
config &= ~CORE_PWRSAVE_DLL;
writel_relaxed(config, host->ioaddr +
- CORE_VENDOR_SPEC3);
+ msm_offset->core_vendor_spec3);
}
- config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
config &= ~CORE_HC_MCLK_SEL_MASK;
config |= CORE_HC_MCLK_SEL_DFLT;
- writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
/*
* Disable HC_SELECT_IN to be able to use the UHS mode select
@@ -551,10 +726,10 @@ static void msm_hc_select_default(struct sdhci_host *host)
* Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
* in VENDOR_SPEC_FUNC
*/
- config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
config &= ~CORE_HC_SELECT_IN_EN;
config &= ~CORE_HC_SELECT_IN_MASK;
- writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
/*
* Make sure above writes impacting free running MCLK are completed
@@ -570,32 +745,36 @@ static void msm_hc_select_hs400(struct sdhci_host *host)
struct mmc_ios ios = host->mmc->ios;
u32 config, dll_lock;
int rc;
+ const struct sdhci_msm_offset *msm_offset =
+ msm_host->offset;
/* Select the divided clock (free running MCLK/2) */
- config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
config &= ~CORE_HC_MCLK_SEL_MASK;
config |= CORE_HC_MCLK_SEL_HS400;
- writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
/*
* Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
* register
*/
if ((msm_host->tuning_done || ios.enhanced_strobe) &&
!msm_host->calibration_done) {
- config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_vendor_spec);
config |= CORE_HC_SELECT_IN_HS400;
config |= CORE_HC_SELECT_IN_EN;
- writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_vendor_spec);
}
if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
/*
* Poll on DLL_LOCK or DDR_DLL_LOCK bits in
- * CORE_DLL_STATUS to be set. This should get set
+ * core_dll_status to be set. This should get set
* within 15 us at 200 MHz.
*/
rc = readl_relaxed_poll_timeout(host->ioaddr +
- CORE_DLL_STATUS,
+ msm_offset->core_dll_status,
dll_lock,
(dll_lock &
(CORE_DLL_LOCK |
@@ -647,6 +826,8 @@ static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
u32 config, calib_done;
int ret;
+ const struct sdhci_msm_offset *msm_offset =
+ msm_host->offset;
pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
@@ -663,13 +844,13 @@ static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
if (ret)
goto out;
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
config |= CORE_CMD_DAT_TRACK_SEL;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
- config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
config &= ~CORE_CDC_T4_DLY_SEL;
- writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
@@ -679,9 +860,9 @@ static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
config |= CORE_CDC_SWITCH_RC_EN;
writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
- config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
config &= ~CORE_START_CDC_TRAFFIC;
- writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
/* Perform CDC Register Initialization Sequence */
@@ -733,9 +914,9 @@ static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
goto out;
}
- config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
config |= CORE_START_CDC_TRAFFIC;
- writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
out:
pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
__func__, ret);
@@ -747,32 +928,38 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
struct mmc_host *mmc = host->mmc;
u32 dll_status, config;
int ret;
+ const struct sdhci_msm_offset *msm_offset =
+ sdhci_priv_msm_offset(host);
pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
/*
- * Currently the CORE_DDR_CONFIG register defaults to desired
+ * Currently the core_ddr_config register defaults to desired
* configuration on reset. Currently reprogramming the power on
* reset (POR) value in case it might have been modified by
* bootloaders. In the future, if this changes, then the desired
* values will need to be programmed appropriately.
*/
- writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + CORE_DDR_CONFIG);
+ writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
+ msm_offset->core_ddr_config);
if (mmc->ios.enhanced_strobe) {
- config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_ddr_200_cfg);
config |= CORE_CMDIN_RCLK_EN;
- writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_ddr_200_cfg);
}
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2);
config |= CORE_DDR_CAL_EN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2);
- ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
- dll_status,
- (dll_status & CORE_DDR_DLL_LOCK),
- 10, 1000);
+ ret = readl_relaxed_poll_timeout(host->ioaddr +
+ msm_offset->core_dll_status,
+ dll_status,
+ (dll_status & CORE_DDR_DLL_LOCK),
+ 10, 1000);
if (ret == -ETIMEDOUT) {
pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
@@ -780,9 +967,9 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
goto out;
}
- config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3);
+ config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3);
config |= CORE_PWRSAVE_DLL;
- writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC3);
+ writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec3);
/*
* Drain writebuffer to ensure above DLL calibration
@@ -802,6 +989,8 @@ static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
struct mmc_host *mmc = host->mmc;
int ret;
u32 config;
+ const struct sdhci_msm_offset *msm_offset =
+ msm_host->offset;
pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
@@ -819,9 +1008,11 @@ static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
msm_host->saved_tuning_phase);
if (ret)
goto out;
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config);
config |= CORE_CMD_DAT_TRACK_SEL;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config);
}
if (msm_host->use_cdclp533)
@@ -951,6 +1142,8 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
u16 ctrl_2;
u32 config;
+ const struct sdhci_msm_offset *msm_offset =
+ msm_host->offset;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
@@ -991,13 +1184,17 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
* DLL is not required for clock <= 100MHz
* Thus, make sure DLL it is disabled when not required
*/
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config);
config |= CORE_DLL_RST;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config);
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config);
config |= CORE_DLL_PDN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_dll_config);
/*
* The DLL needs to be restored and CDCLP533 recalibrated
@@ -1039,7 +1236,9 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
bool done = false;
- u32 val;
+ u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
+ const struct sdhci_msm_offset *msm_offset =
+ msm_host->offset;
pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
mmc_hostname(host->mmc), __func__, req_type,
@@ -1048,8 +1247,12 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
/*
* The power interrupt will not be generated for signal voltage
* switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
+ * Since sdhci-msm-v5, this bit has been removed and SW must consider
+ * it as always set.
*/
- val = readl(msm_host->core_mem + CORE_MCI_GENERICS);
+ if (!msm_host->mci_removed)
+ val = msm_host_readl(msm_host, host,
+ msm_offset->core_generics);
if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
!(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
return;
@@ -1097,12 +1300,14 @@ static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_msm_offset *msm_offset =
+ msm_host->offset;
pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
- mmc_hostname(host->mmc),
- readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
- readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
- readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
+ mmc_hostname(host->mmc),
+ msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status),
+ msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask),
+ msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl));
}
static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
@@ -1113,11 +1318,14 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
int retry = 10;
u32 pwr_state = 0, io_level = 0;
u32 config;
+ const struct sdhci_msm_offset *msm_offset = msm_host->offset;
- irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
+ irq_status = msm_host_readl(msm_host, host,
+ msm_offset->core_pwrctl_status);
irq_status &= INT_MASK;
- writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
+ msm_host_writel(msm_host, irq_status, host,
+ msm_offset->core_pwrctl_clear);
/*
* There is a rare HW scenario where the first clear pulse could be
@@ -1126,8 +1334,8 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
* sure status register is cleared. Otherwise, this will result in
* a spurious power IRQ resulting in system instability.
*/
- while (irq_status & readl_relaxed(msm_host->core_mem +
- CORE_PWRCTL_STATUS)) {
+ while (irq_status & msm_host_readl(msm_host, host,
+ msm_offset->core_pwrctl_status)) {
if (retry == 0) {
pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
mmc_hostname(host->mmc), irq_status);
@@ -1135,8 +1343,8 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
WARN_ON(1);
break;
}
- writel_relaxed(irq_status,
- msm_host->core_mem + CORE_PWRCTL_CLEAR);
+ msm_host_writel(msm_host, irq_status, host,
+ msm_offset->core_pwrctl_clear);
retry--;
udelay(10);
}
@@ -1167,7 +1375,8 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
* report back if it succeded or not to this register. The voltage
* switches are handled by the sdhci core, so just report success.
*/
- writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
+ msm_host_writel(msm_host, irq_ack, host,
+ msm_offset->core_pwrctl_ctl);
/*
* If we don't have info regarding the voltage levels supported by
@@ -1186,7 +1395,8 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
* controllers with only 1.8V, we will set the IO PAD bit
* without waiting for a REQ_IO_LOW.
*/
- config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_vendor_spec);
new_config = config;
if ((io_level & REQ_IO_HIGH) &&
@@ -1197,8 +1407,8 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
new_config |= CORE_IO_PAD_PWR_SWITCH;
if (config ^ new_config)
- writel_relaxed(new_config,
- host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(new_config, host->ioaddr +
+ msm_offset->core_vendor_spec);
}
if (pwr_state)
@@ -1359,6 +1569,7 @@ static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
struct regulator *supply = mmc->supply.vqmmc;
u32 caps = 0, config;
struct sdhci_host *host = mmc_priv(mmc);
+ const struct sdhci_msm_offset *msm_offset = msm_host->offset;
if (!IS_ERR(mmc->supply.vqmmc)) {
if (regulator_is_supported_voltage(supply, 1700000, 1950000))
@@ -1378,7 +1589,8 @@ static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
*/
u32 io_level = msm_host->curr_io_level;
- config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_vendor_spec);
config |= CORE_IO_PAD_PWR_SWITCH_EN;
if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT))
@@ -1386,14 +1598,38 @@ static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT))
config |= CORE_IO_PAD_PWR_SWITCH;
- writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(config,
+ host->ioaddr + msm_offset->core_vendor_spec);
}
msm_host->caps_0 |= caps;
pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
}
+static const struct sdhci_msm_variant_ops mci_var_ops = {
+ .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
+ .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
+};
+
+static const struct sdhci_msm_variant_ops v5_var_ops = {
+ .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed,
+ .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed,
+};
+
+static const struct sdhci_msm_variant_info sdhci_msm_mci_var = {
+ .mci_removed = false,
+ .var_ops = &mci_var_ops,
+ .offset = &sdhci_msm_mci_offset,
+};
+
+static const struct sdhci_msm_variant_info sdhci_msm_v5_var = {
+ .mci_removed = true,
+ .var_ops = &v5_var_ops,
+ .offset = &sdhci_msm_v5_offset,
+};
+
static const struct of_device_id sdhci_msm_dt_match[] = {
- { .compatible = "qcom,sdhci-msm-v4" },
+ {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
+ {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
{},
};
@@ -1429,6 +1665,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
u16 host_version, core_minor;
u32 core_version, config;
u8 core_major;
+ const struct sdhci_msm_offset *msm_offset;
+ const struct sdhci_msm_variant_info *var_info;
host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
if (IS_ERR(host))
@@ -1444,6 +1682,18 @@ static int sdhci_msm_probe(struct platform_device *pdev)
if (ret)
goto pltfm_free;
+ /*
+ * Based on the compatible string, load the required msm host info from
+ * the data associated with the version info.
+ */
+ var_info = of_device_get_match_data(&pdev->dev);
+
+ msm_host->mci_removed = var_info->mci_removed;
+ msm_host->var_ops = var_info->var_ops;
+ msm_host->offset = var_info->offset;
+
+ msm_offset = msm_host->offset;
+
sdhci_get_of_property(pdev);
msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
@@ -1508,32 +1758,39 @@ static int sdhci_msm_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
}
- core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
+ if (!msm_host->mci_removed) {
+ core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ msm_host->core_mem = devm_ioremap_resource(&pdev->dev,
+ core_memres);
- if (IS_ERR(msm_host->core_mem)) {
- dev_err(&pdev->dev, "Failed to remap registers\n");
- ret = PTR_ERR(msm_host->core_mem);
- goto clk_disable;
+ if (IS_ERR(msm_host->core_mem)) {
+ ret = PTR_ERR(msm_host->core_mem);
+ goto clk_disable;
+ }
}
/* Reset the vendor spec register to power on reset state */
writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
- host->ioaddr + CORE_VENDOR_SPEC);
-
- /* Set HC_MODE_EN bit in HC_MODE register */
- writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
-
- config = readl_relaxed(msm_host->core_mem + CORE_HC_MODE);
- config |= FF_CLK_SW_RST_DIS;
- writel_relaxed(config, msm_host->core_mem + CORE_HC_MODE);
+ host->ioaddr + msm_offset->core_vendor_spec);
+
+ if (!msm_host->mci_removed) {
+ /* Set HC_MODE_EN bit in HC_MODE register */
+ msm_host_writel(msm_host, HC_MODE_EN, host,
+ msm_offset->core_hc_mode);
+ config = msm_host_readl(msm_host, host,
+ msm_offset->core_hc_mode);
+ config |= FF_CLK_SW_RST_DIS;
+ msm_host_writel(msm_host, config, host,
+ msm_offset->core_hc_mode);
+ }
host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
SDHCI_VENDOR_VER_SHIFT));
- core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
+ core_version = msm_host_readl(msm_host, host,
+ msm_offset->core_mci_version);
core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
CORE_VERSION_MAJOR_SHIFT;
core_minor = core_version & CORE_VERSION_MINOR_MASK;
@@ -1558,7 +1815,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
writel_relaxed(config, host->ioaddr +
- CORE_VENDOR_SPEC_CAPABILITIES0);
+ msm_offset->core_vendor_spec_capabilities0);
}
/*
@@ -1587,7 +1844,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
sdhci_msm_init_pwr_irq_wait(msm_host);
/* Enable pwr irq interrupts */
- writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK);
+ msm_host_writel(msm_host, INT_MASK, host,
+ msm_offset->core_pwrctl_mask);
ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
sdhci_msm_pwr_irq, IRQF_ONESHOT,
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index e3332a522a5d..a40bcc27f187 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -102,6 +102,9 @@ struct sdhci_arasan_data {
/* Controller does not have CD wired and will not function normally without */
#define SDHCI_ARASAN_QUIRK_FORCE_CDTEST BIT(0)
+/* Controller immediately reports SDHCI_CLOCK_INT_STABLE after enabling the
+ * internal clock even when the clock isn't stable */
+#define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1)
};
static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
@@ -207,6 +210,16 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_set_clock(host, clock);
+ if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE)
+ /*
+ * Some controllers immediately report SDHCI_CLOCK_INT_STABLE
+ * after enabling the clock even though the clock is not
+ * stable. Trying to use a clock without waiting here results
+ * in EILSEQ while detecting some older/slower cards. The
+ * chosen delay is the maximum delay from sdhci_set_clock.
+ */
+ msleep(20);
+
if (ctrl_phy) {
phy_power_on(sdhci_arasan->phy);
sdhci_arasan->is_phy_on = true;
@@ -758,6 +771,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (of_property_read_bool(np, "xlnx,fails-without-test-cd"))
sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_FORCE_CDTEST;
+ if (of_property_read_bool(np, "xlnx,int-clock-stable-broken"))
+ sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE;
+
pltfm_host->clk = clk_xin;
if (of_device_is_compatible(pdev->dev.of_node,
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
new file mode 100644
index 000000000000..1b7cd144fb01
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Synopsys DesignWare Cores Mobile Storage Host Controller
+ *
+ * Copyright (C) 2018 Synaptics Incorporated
+ *
+ * Author: Jisheng Zhang <jszhang@kernel.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "sdhci-pltfm.h"
+
+struct dwcmshc_priv {
+ struct clk *bus_clk;
+};
+
+static const struct sdhci_ops sdhci_dwcmshc_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .reset = sdhci_reset,
+};
+
+static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
+ .ops = &sdhci_dwcmshc_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+};
+
+static int dwcmshc_probe(struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_host *host;
+ struct dwcmshc_priv *priv;
+ int err;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_dwcmshc_pdata,
+ sizeof(struct dwcmshc_priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
+
+ pltfm_host->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(pltfm_host->clk)) {
+ err = PTR_ERR(pltfm_host->clk);
+ dev_err(&pdev->dev, "failed to get core clk: %d\n", err);
+ goto free_pltfm;
+ }
+ err = clk_prepare_enable(pltfm_host->clk);
+ if (err)
+ goto free_pltfm;
+
+ priv->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (!IS_ERR(priv->bus_clk))
+ clk_prepare_enable(priv->bus_clk);
+
+ err = mmc_of_parse(host->mmc);
+ if (err)
+ goto err_clk;
+
+ sdhci_get_of_property(pdev);
+
+ err = sdhci_add_host(host);
+ if (err)
+ goto err_clk;
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(pltfm_host->clk);
+ clk_disable_unprepare(priv->bus_clk);
+free_pltfm:
+ sdhci_pltfm_free(pdev);
+ return err;
+}
+
+static int dwcmshc_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_remove_host(host, 0);
+
+ clk_disable_unprepare(pltfm_host->clk);
+ clk_disable_unprepare(priv->bus_clk);
+
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
+ { .compatible = "snps,dwcmshc-sdhci" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
+
+static struct platform_driver sdhci_dwcmshc_driver = {
+ .driver = {
+ .name = "sdhci-dwcmshc",
+ .of_match_table = sdhci_dwcmshc_dt_ids,
+ },
+ .probe = dwcmshc_probe,
+ .remove = dwcmshc_remove,
+};
+module_platform_driver(sdhci_dwcmshc_driver);
+
+MODULE_DESCRIPTION("SDHCI platform driver for Synopsys DWC MSHC");
+MODULE_AUTHOR("Jisheng Zhang <jszhang@kernel.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 4ffa6b173a21..9cb7554a463d 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -22,6 +22,7 @@
#include <linux/sys_soc.h>
#include <linux/clk.h>
#include <linux/ktime.h>
+#include <linux/dma-mapping.h>
#include <linux/mmc/host.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
@@ -29,11 +30,56 @@
#define VENDOR_V_22 0x12
#define VENDOR_V_23 0x13
+#define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
+
+struct esdhc_clk_fixup {
+ const unsigned int sd_dflt_max_clk;
+ const unsigned int max_clk[MMC_TIMING_NUM];
+};
+
+static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
+ .sd_dflt_max_clk = 25000000,
+ .max_clk[MMC_TIMING_MMC_HS] = 46500000,
+ .max_clk[MMC_TIMING_SD_HS] = 46500000,
+};
+
+static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
+ .sd_dflt_max_clk = 25000000,
+ .max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
+ .max_clk[MMC_TIMING_MMC_HS200] = 167000000,
+};
+
+static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
+ .sd_dflt_max_clk = 25000000,
+ .max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
+ .max_clk[MMC_TIMING_MMC_HS200] = 125000000,
+};
+
+static const struct esdhc_clk_fixup p1010_esdhc_clk = {
+ .sd_dflt_max_clk = 20000000,
+ .max_clk[MMC_TIMING_LEGACY] = 20000000,
+ .max_clk[MMC_TIMING_MMC_HS] = 42000000,
+ .max_clk[MMC_TIMING_SD_HS] = 40000000,
+};
+
+static const struct of_device_id sdhci_esdhc_of_match[] = {
+ { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
+ { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
+ { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
+ { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
+ { .compatible = "fsl,mpc8379-esdhc" },
+ { .compatible = "fsl,mpc8536-esdhc" },
+ { .compatible = "fsl,esdhc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
+
struct sdhci_esdhc {
u8 vendor_ver;
u8 spec_ver;
bool quirk_incorrect_hostver;
unsigned int peripheral_clock;
+ const struct esdhc_clk_fixup *clk_fixup;
};
/**
@@ -427,6 +473,11 @@ static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
static int esdhc_of_enable_dma(struct sdhci_host *host)
{
u32 value;
+ struct device *dev = mmc_dev(host->mmc);
+
+ if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
+ of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
value |= ESDHC_DMA_SNOOP;
@@ -492,6 +543,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
int pre_div = 1;
int div = 1;
ktime_t timeout;
+ long fixup = 0;
u32 temp;
host->mmc->actual_clock = 0;
@@ -505,27 +557,14 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
if (esdhc->vendor_ver < VENDOR_V_23)
pre_div = 2;
- /*
- * Limit SD clock to 167MHz for ls1046a according to its datasheet
- */
- if (clock > 167000000 &&
- of_find_compatible_node(NULL, NULL, "fsl,ls1046a-esdhc"))
- clock = 167000000;
+ if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
+ esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
+ fixup = esdhc->clk_fixup->sd_dflt_max_clk;
+ else if (esdhc->clk_fixup)
+ fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
- /*
- * Limit SD clock to 125MHz for ls1012a according to its datasheet
- */
- if (clock > 125000000 &&
- of_find_compatible_node(NULL, NULL, "fsl,ls1012a-esdhc"))
- clock = 125000000;
-
- /* Workaround to reduce the clock frequency for p1010 esdhc */
- if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
- if (clock > 20000000)
- clock -= 5000000;
- if (clock > 40000000)
- clock -= 5000000;
- }
+ if (fixup && clock > fixup)
+ clock = fixup;
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
@@ -783,6 +822,7 @@ static struct soc_device_attribute soc_incorrect_hostver[] = {
static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
{
+ const struct of_device_id *match;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_esdhc *esdhc;
struct device_node *np;
@@ -802,6 +842,9 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
else
esdhc->quirk_incorrect_hostver = false;
+ match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
+ if (match)
+ esdhc->clk_fixup = match->data;
np = pdev->dev.of_node;
clk = of_clk_get(np, 0);
if (!IS_ERR(clk)) {
@@ -901,14 +944,6 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
return ret;
}
-static const struct of_device_id sdhci_esdhc_of_match[] = {
- { .compatible = "fsl,mpc8379-esdhc" },
- { .compatible = "fsl,mpc8536-esdhc" },
- { .compatible = "fsl,esdhc" },
- { }
-};
-MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
-
static struct platform_driver sdhci_esdhc_driver = {
.driver = {
.name = "sdhci-esdhc",
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 77dd3521daae..7bfd366d970d 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1500,6 +1500,8 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, CNP_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, CNP_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd),
+ SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc),
+ SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(O2, 8120, o2),
SDHCI_PCI_DEVICE(O2, 8220, o2),
SDHCI_PCI_DEVICE(O2, 8221, o2),
@@ -1511,6 +1513,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
+ SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
/* Generic SD host controller */
{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
diff --git a/drivers/mmc/host/sdhci-pci-dwc-mshc.c b/drivers/mmc/host/sdhci-pci-dwc-mshc.c
new file mode 100644
index 000000000000..f78d65448d17
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-dwc-mshc.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDHCI driver for Synopsys DWC_MSHC controller
+ *
+ * Copyright (C) 2018 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors:
+ * Prabu Thangamuthu <prabu.t@synopsys.com>
+ * Manjunath M B <manjumb@synopsys.com>
+ */
+
+#include "sdhci.h"
+#include "sdhci-pci.h"
+
+#define SDHCI_VENDOR_PTR_R 0xE8
+
+/* Synopsys vendor specific registers */
+#define SDHC_GPIO_OUT 0x34
+#define SDHC_AT_CTRL_R 0x40
+#define SDHC_SW_TUNE_EN 0x00000010
+
+/* MMCM DRP */
+#define SDHC_MMCM_DIV_REG 0x1020
+#define DIV_REG_100_MHZ 0x1145
+#define DIV_REG_200_MHZ 0x1083
+#define SDHC_MMCM_CLKFBOUT 0x1024
+#define CLKFBOUT_100_MHZ 0x0000
+#define CLKFBOUT_200_MHZ 0x0080
+#define SDHC_CCLK_MMCM_RST 0x00000001
+
+static void sdhci_snps_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+ u32 reg, vendor_ptr;
+
+ vendor_ptr = sdhci_readw(host, SDHCI_VENDOR_PTR_R);
+
+ /* Disable software managed rx tuning */
+ reg = sdhci_readl(host, (SDHC_AT_CTRL_R + vendor_ptr));
+ reg &= ~SDHC_SW_TUNE_EN;
+ sdhci_writel(host, reg, (SDHC_AT_CTRL_R + vendor_ptr));
+
+ if (clock <= 52000000) {
+ sdhci_set_clock(host, clock);
+ } else {
+ /* Assert reset to MMCM */
+ reg = sdhci_readl(host, (SDHC_GPIO_OUT + vendor_ptr));
+ reg |= SDHC_CCLK_MMCM_RST;
+ sdhci_writel(host, reg, (SDHC_GPIO_OUT + vendor_ptr));
+
+ /* Configure MMCM */
+ if (clock == 100000000) {
+ sdhci_writel(host, DIV_REG_100_MHZ, SDHC_MMCM_DIV_REG);
+ sdhci_writel(host, CLKFBOUT_100_MHZ,
+ SDHC_MMCM_CLKFBOUT);
+ } else {
+ sdhci_writel(host, DIV_REG_200_MHZ, SDHC_MMCM_DIV_REG);
+ sdhci_writel(host, CLKFBOUT_200_MHZ,
+ SDHC_MMCM_CLKFBOUT);
+ }
+
+ /* De-assert reset to MMCM */
+ reg = sdhci_readl(host, (SDHC_GPIO_OUT + vendor_ptr));
+ reg &= ~SDHC_CCLK_MMCM_RST;
+ sdhci_writel(host, reg, (SDHC_GPIO_OUT + vendor_ptr));
+
+ /* Enable clock */
+ clk = SDHCI_PROG_CLOCK_MODE | SDHCI_CLOCK_INT_EN |
+ SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ }
+}
+
+static const struct sdhci_ops sdhci_snps_ops = {
+ .set_clock = sdhci_snps_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+const struct sdhci_pci_fixes sdhci_snps = {
+ .ops = &sdhci_snps_ops,
+};
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 555970a29c94..77e9bc4aaee9 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -3,6 +3,7 @@
*
* Authors: Peter Guo <peter.guo@bayhubtech.com>
* Adam Lee <adam.lee@canonical.com>
+ * Ernest Zhang <ernest.zhang@bayhubtech.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -16,6 +17,9 @@
*/
#include <linux/pci.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/delay.h>
#include "sdhci.h"
#include "sdhci-pci.h"
@@ -39,6 +43,7 @@
#define O2_SD_MISC_CTRL4 0xFC
#define O2_SD_TUNING_CTRL 0x300
#define O2_SD_PLL_SETTING 0x304
+#define O2_SD_MISC_SETTING 0x308
#define O2_SD_CLK_SETTING 0x328
#define O2_SD_CAP_REG2 0x330
#define O2_SD_CAP_REG0 0x334
@@ -53,6 +58,82 @@
#define O2_SD_VENDOR_SETTING 0x110
#define O2_SD_VENDOR_SETTING2 0x1C8
+#define O2_SD_HW_TUNING_DISABLE BIT(4)
+
+static void sdhci_o2_set_tuning_mode(struct sdhci_host *host)
+{
+ u16 reg;
+
+ /* enable hardware tuning */
+ reg = sdhci_readw(host, O2_SD_VENDOR_SETTING);
+ reg &= ~O2_SD_HW_TUNING_DISABLE;
+ sdhci_writew(host, reg, O2_SD_VENDOR_SETTING);
+}
+
+static void __sdhci_o2_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ int i;
+
+ sdhci_send_tuning(host, MMC_SEND_TUNING_BLOCK_HS200);
+
+ for (i = 0; i < 150; i++) {
+ u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
+ if (ctrl & SDHCI_CTRL_TUNED_CLK) {
+ host->tuning_done = true;
+ return;
+ }
+ pr_warn("%s: HW tuning failed !\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+
+ mdelay(1);
+ }
+
+ pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
+ mmc_hostname(host->mmc));
+ sdhci_reset_tuning(host);
+}
+
+static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int current_bus_width = 0;
+
+ /*
+ * This handler only implements the eMMC tuning that is specific to
+ * this controller. Fall back to the standard method for other TIMING.
+ */
+ if (host->timing != MMC_TIMING_MMC_HS200)
+ return sdhci_execute_tuning(mmc, opcode);
+
+ if (WARN_ON(opcode != MMC_SEND_TUNING_BLOCK_HS200))
+ return -EINVAL;
+
+ /*
+ * o2 sdhci host didn't support 8bit emmc tuning
+ */
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
+ current_bus_width = mmc->ios.bus_width;
+ sdhci_set_bus_width(host, MMC_BUS_WIDTH_4);
+ }
+
+ sdhci_o2_set_tuning_mode(host);
+
+ sdhci_start_tuning(host);
+
+ __sdhci_o2_execute_tuning(host, opcode);
+
+ sdhci_end_tuning(host);
+
+ if (current_bus_width == MMC_BUS_WIDTH_8)
+ sdhci_set_bus_width(host, current_bus_width);
+
+ host->flags &= ~SDHCI_HS400_TUNING;
+ return 0;
+}
static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value)
{
@@ -179,11 +260,35 @@ static void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32);
}
+static void sdhci_pci_o2_enable_msi(struct sdhci_pci_chip *chip,
+ struct sdhci_host *host)
+{
+ int ret;
+
+ ret = pci_find_capability(chip->pdev, PCI_CAP_ID_MSI);
+ if (!ret) {
+ pr_info("%s: unsupport msi, use INTx irq\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+
+ ret = pci_alloc_irq_vectors(chip->pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (ret < 0) {
+ pr_err("%s: enable PCI MSI failed, err=%d\n",
+ mmc_hostname(host->mmc), ret);
+ return;
+ }
+
+ host->irq = pci_irq_vector(chip->pdev, 0);
+}
+
int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
struct sdhci_pci_chip *chip;
struct sdhci_host *host;
u32 reg;
+ int ret;
chip = slot->chip;
host = slot->host;
@@ -197,6 +302,25 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
if (reg & 0x1)
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+ sdhci_pci_o2_enable_msi(chip, host);
+
+ if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD0) {
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_MISC_SETTING, &reg);
+ if (ret)
+ return -EIO;
+ if (reg & (1 << 4)) {
+ pr_info("%s: emmc 1.8v flag is set, force 1.8v signaling voltage\n",
+ mmc_hostname(host->mmc));
+ host->flags &= ~SDHCI_SIGNALING_330;
+ host->flags |= SDHCI_SIGNALING_180;
+ host->mmc->caps2 |= MMC_CAP2_NO_SD;
+ host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
+ }
+ }
+
+ host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning;
+
if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2)
break;
/* set dll watch dog timer */
@@ -293,9 +417,8 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
/* Check Whether subId is 0x11 or 0x12 */
if ((scratch_32 == 0x11) || (scratch_32 == 0x12)) {
- scratch_32 = 0x2c280000;
+ scratch_32 = 0x25100000;
- /* Set Base Clock to 208MZ */
o2_pci_set_baseclk(chip, scratch_32);
ret = pci_read_config_dword(chip->pdev,
O2_SD_FUNC_REG4,
@@ -388,7 +511,7 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_PLL_SETTING, scratch_32);
} else {
scratch_32 &= 0x0000FFFF;
- scratch_32 |= 0x2c280000;
+ scratch_32 |= 0x25100000;
pci_write_config_dword(chip->pdev,
O2_SD_PLL_SETTING, scratch_32);
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index db9cb54ef700..2ef0bdca9197 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -48,6 +48,8 @@
#define PCI_DEVICE_ID_INTEL_CNP_EMMC 0x9dc4
#define PCI_DEVICE_ID_INTEL_CNP_SD 0x9df5
#define PCI_DEVICE_ID_INTEL_CNPH_SD 0xa375
+#define PCI_DEVICE_ID_INTEL_ICP_EMMC 0x34c4
+#define PCI_DEVICE_ID_INTEL_ICP_SD 0x34f8
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
@@ -59,6 +61,8 @@
#define PCI_VENDOR_ID_ARASAN 0x16e6
#define PCI_DEVICE_ID_ARASAN_PHY_EMMC 0x0670
+#define PCI_DEVICE_ID_SYNOPSYS_DWC_MSHC 0xc202
+
/*
* PCI device class and mask
*/
@@ -182,5 +186,6 @@ int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
#endif
extern const struct sdhci_pci_fixes sdhci_arasan;
+extern const struct sdhci_pci_fixes sdhci_snps;
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 970d38f68939..908b23e6a03c 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -210,9 +210,24 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
if (!clock)
return sdhci_set_clock(host, clock);
+ /*
+ * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
+ * divider to be configured to divided the host clock by two. The SDHCI
+ * clock divider is calculated as part of sdhci_set_clock() by
+ * sdhci_calc_clk(). The divider is calculated from host->max_clk and
+ * the requested clock rate.
+ *
+ * By setting the host->max_clk to clock * 2 the divider calculation
+ * will always result in the correct value for DDR50/52 modes,
+ * regardless of clock rate rounding, which may happen if the value
+ * from clk_get_rate() is used.
+ */
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
clk_set_rate(pltfm_host->clk, host_clk);
- host->max_clk = clk_get_rate(pltfm_host->clk);
+ if (tegra_host->ddr_signaling)
+ host->max_clk = host_clk;
+ else
+ host->max_clk = clk_get_rate(pltfm_host->clk);
sdhci_set_clock(host, clock);
@@ -228,7 +243,8 @@ static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
- if (timing == MMC_TIMING_UHS_DDR50)
+ if (timing == MMC_TIMING_UHS_DDR50 ||
+ timing == MMC_TIMING_MMC_DDR52)
tegra_host->ddr_signaling = true;
sdhci_set_uhs_signaling(host, timing);
@@ -238,11 +254,7 @@ static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- /*
- * DDR modes require the host to run at double the card frequency, so
- * the maximum rate we can support is half of the module input clock.
- */
- return clk_round_rate(pltfm_host->clk, UINT_MAX) / 2;
+ return clk_round_rate(pltfm_host->clk, UINT_MAX);
}
static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
@@ -334,7 +346,16 @@ static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_BROKEN_HS200 |
+ /*
+ * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
+ * though no command operation was in progress."
+ *
+ * The exact reason is unknown, as the same hardware seems
+ * to support Auto CMD23 on a downstream 3.1 kernel.
+ */
+ SDHCI_QUIRK2_ACMD23_BROKEN,
.ops = &tegra_sdhci_ops,
};
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index a35804b203a7..c335052d0c02 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -526,6 +526,7 @@ static bool xenon_emmc_phy_slow_mode(struct sdhci_host *host,
ret = true;
break;
}
+ /* else: fall through */
default:
reg &= ~XENON_TIMING_ADJUST_SLOW_MODE;
ret = false;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 1c828e0e9905..1b3fbd9bd5c5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1029,7 +1029,9 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
if (data == NULL) {
if (host->quirks2 &
SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
- sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
+ /* must not clear SDHCI_TRANSFER_MODE when tuning */
+ if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
+ sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
} else {
/* clear Auto CMD settings for no data CMDs */
mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
@@ -2103,7 +2105,7 @@ static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
return 0;
}
-static void sdhci_start_tuning(struct sdhci_host *host)
+void sdhci_start_tuning(struct sdhci_host *host)
{
u16 ctrl;
@@ -2126,14 +2128,16 @@ static void sdhci_start_tuning(struct sdhci_host *host)
sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
}
+EXPORT_SYMBOL_GPL(sdhci_start_tuning);
-static void sdhci_end_tuning(struct sdhci_host *host)
+void sdhci_end_tuning(struct sdhci_host *host)
{
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
+EXPORT_SYMBOL_GPL(sdhci_end_tuning);
-static void sdhci_reset_tuning(struct sdhci_host *host)
+void sdhci_reset_tuning(struct sdhci_host *host)
{
u16 ctrl;
@@ -2142,6 +2146,7 @@ static void sdhci_reset_tuning(struct sdhci_host *host)
ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
}
+EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
{
@@ -2162,7 +2167,7 @@ static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
* interrupt setup is different to other commands and there is no timeout
* interrupt so special handling is needed.
*/
-static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
+void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
{
struct mmc_host *mmc = host->mmc;
struct mmc_command cmd = {};
@@ -2212,6 +2217,7 @@ static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
msecs_to_jiffies(50));
}
+EXPORT_SYMBOL_GPL(sdhci_send_tuning);
static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
{
@@ -3734,14 +3740,21 @@ int sdhci_setup_host(struct sdhci_host *host)
mmc_gpio_get_cd(host->mmc) < 0)
mmc->caps |= MMC_CAP_NEEDS_POLL;
- /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = regulator_enable(mmc->supply.vqmmc);
+
+ /* If vqmmc provides no 1.8V signalling, then there's no UHS */
if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
1950000))
host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_SDR50 |
SDHCI_SUPPORT_DDR50);
+
+ /* In eMMC case vqmmc might be a fixed 1.8V regulator */
+ if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
+ 3600000))
+ host->flags &= ~SDHCI_SIGNALING_330;
+
if (ret) {
pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
mmc_hostname(mmc), ret);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 23966f887da6..f0bd36ce3817 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -748,4 +748,9 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
void sdhci_dumpregs(struct sdhci_host *host);
+void sdhci_start_tuning(struct sdhci_host *host);
+void sdhci_end_tuning(struct sdhci_host *host);
+void sdhci_reset_tuning(struct sdhci_host *host);
+void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
+
#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index e7472590f2ed..568349e1fbc2 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1388,7 +1388,7 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
if (host->cfg->clk_delays || host->use_new_timings)
- mmc->caps |= MMC_CAP_1_8V_DDR;
+ mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
ret = mmc_of_parse(mmc);
if (ret)
@@ -1407,7 +1407,10 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
if (ret)
goto error_free_dma;
- dev_info(&pdev->dev, "base:0x%p irq:%u\n", host->reg_base, host->irq);
+ dev_info(&pdev->dev, "initialized, max. request size: %u KB%s\n",
+ mmc->max_req_size >> 10,
+ host->use_new_timings ? ", uses new timings mode" : "");
+
return 0;
error_free_dma:
@@ -1446,6 +1449,7 @@ static int sunxi_mmc_runtime_resume(struct device *dev)
sunxi_mmc_init_host(host);
sunxi_mmc_set_bus_width(host, mmc->ios.bus_width);
sunxi_mmc_set_clk(host, &mmc->ios);
+ enable_irq(host->irq);
return 0;
}
@@ -1455,6 +1459,12 @@ static int sunxi_mmc_runtime_suspend(struct device *dev)
struct mmc_host *mmc = dev_get_drvdata(dev);
struct sunxi_mmc_host *host = mmc_priv(mmc);
+ /*
+ * When clocks are off, it's possible receiving
+ * fake interrupts, which will stall the system.
+ * Disabling the irq will prevent this.
+ */
+ disable_irq(host->irq);
sunxi_mmc_reset_host(host);
sunxi_mmc_disable(host);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index e7d651352dc9..5d141f79e175 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -46,6 +46,7 @@
#define CTL_DMA_ENABLE 0xd8
#define CTL_RESET_SD 0xe0
#define CTL_VERSION 0xe2
+#define CTL_SDIF_MODE 0xe6
#define CTL_SDIO_REGS 0x100
#define CTL_CLK_AND_WAIT_CTL 0x138
#define CTL_RESET_SDIO 0x1e0
@@ -191,6 +192,11 @@ struct tmio_mmc_host {
/* Tuning values: 1 for success, 0 for failure */
DECLARE_BITMAP(taps, BITS_PER_BYTE * sizeof(long));
unsigned int tap_num;
+ unsigned long tap_set;
+
+ void (*prepare_hs400_tuning)(struct tmio_mmc_host *host);
+ void (*hs400_downgrade)(struct tmio_mmc_host *host);
+ void (*hs400_complete)(struct tmio_mmc_host *host);
const struct tmio_mmc_dma_ops *dma_ops;
};
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 308029930304..261b4d62d2b1 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -199,6 +199,14 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
tmio_mmc_clk_stop(host);
return;
}
+ /*
+ * Both HS400 and HS200/SD104 set 200MHz, but some devices need to
+ * set 400MHz to distinguish the CPG settings in HS400.
+ */
+ if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
+ host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400 &&
+ new_clock == 200000000)
+ new_clock = 400000000;
if (host->clk_update)
clock = host->clk_update(host, new_clock) / 512;
@@ -209,8 +217,13 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
clock <<= 1;
/* 1/1 clock is option */
- if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
- clk |= 0xff;
+ if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) &&
+ ((clk >> 22) & 0x1)) {
+ if (!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400))
+ clk |= 0xff;
+ else
+ clk &= ~0xff;
+ }
if (host->set_clk_div)
host->set_clk_div(host->pdev, (clk >> 22) & 1);
@@ -309,7 +322,6 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host,
{
struct mmc_data *data = host->data;
int c = cmd->opcode;
- u32 irq_mask = TMIO_MASK_CMD;
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE: c |= RESP_NONE; break;
@@ -349,7 +361,7 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host,
c |= TRANSFER_READ;
}
- tmio_mmc_enable_mmc_irqs(host, irq_mask);
+ tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD);
/* Fire off the command */
sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
@@ -805,8 +817,6 @@ static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
host->prepare_tuning(host, i % host->tap_num);
ret = mmc_send_tuning(mmc, opcode, NULL);
- if (ret && ret != -EILSEQ)
- goto out;
if (ret == 0)
set_bit(i, host->taps);
@@ -1087,6 +1097,33 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
return blk_size;
}
+static int tmio_mmc_prepare_hs400_tuning(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ if (host->prepare_hs400_tuning)
+ host->prepare_hs400_tuning(host);
+
+ return 0;
+}
+
+static void tmio_mmc_hs400_downgrade(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ if (host->hs400_downgrade)
+ host->hs400_downgrade(host);
+}
+
+static void tmio_mmc_hs400_complete(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ if (host->hs400_complete)
+ host->hs400_complete(host);
+}
+
static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
@@ -1096,6 +1133,9 @@ static const struct mmc_host_ops tmio_mmc_ops = {
.multi_io_quirk = tmio_multi_io_quirk,
.hw_reset = tmio_mmc_hw_reset,
.execute_tuning = tmio_mmc_execute_tuning,
+ .prepare_hs400_tuning = tmio_mmc_prepare_hs400_tuning,
+ .hs400_downgrade = tmio_mmc_hs400_downgrade,
+ .hs400_complete = tmio_mmc_hs400_complete,
};
static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 46ab7feec6b6..c77f537323ec 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -24,7 +24,7 @@ config MTD_TESTS
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
- ---help---
+ help
RedBoot is a ROM monitor and bootloader which deals with multiple
'images' in flash devices by putting a table one of the erase
blocks on the device, similar to a partition table, which gives
@@ -45,7 +45,7 @@ if MTD_REDBOOT_PARTS
config MTD_REDBOOT_DIRECTORY_BLOCK
int "Location of RedBoot partition table"
default "-1"
- ---help---
+ help
This option is the Linux counterpart to the
CYGNUM_REDBOOT_FIS_DIRECTORY_BLOCK RedBoot compile time
option.
@@ -75,7 +75,7 @@ endif # MTD_REDBOOT_PARTS
config MTD_CMDLINE_PARTS
tristate "Command line partition table parsing"
depends on MTD
- ---help---
+ help
Allow generic configuration of the MTD partition tables via the kernel
command line. Multiple flash resources are supported for hardware where
different kinds of flash memory are available.
@@ -112,7 +112,7 @@ config MTD_CMDLINE_PARTS
config MTD_AFS_PARTS
tristate "ARM Firmware Suite partition parsing"
depends on (ARM || ARM64)
- ---help---
+ help
The ARM Firmware Suite allows the user to divide flash devices into
multiple 'images'. Each such image has a header containing its name
and offset/size etc.
@@ -136,7 +136,7 @@ config MTD_OF_PARTS
config MTD_AR7_PARTS
tristate "TI AR7 partitioning support"
- ---help---
+ help
TI AR7 partitioning support
config MTD_BCM63XX_PARTS
@@ -170,7 +170,7 @@ config MTD_BLOCK
tristate "Caching block device access to MTD devices"
depends on BLOCK
select MTD_BLKDEVS
- ---help---
+ help
Although most flash chips have an erase size too large to be useful
as block devices, it is possible to use MTD devices which are based
on RAM chips in this manner. This block device is a user of MTD
@@ -205,7 +205,7 @@ config FTL
tristate "FTL (Flash Translation Layer) support"
depends on BLOCK
select MTD_BLKDEVS
- ---help---
+ help
This provides support for the original Flash Translation Layer which
is part of the PCMCIA specification. It uses a kind of pseudo-
file system on a flash device to emulate a block device with
@@ -222,7 +222,7 @@ config NFTL
tristate "NFTL (NAND Flash Translation Layer) support"
depends on BLOCK
select MTD_BLKDEVS
- ---help---
+ help
This provides support for the NAND Flash Translation Layer which is
used on M-Systems' DiskOnChip devices. It uses a kind of pseudo-
file system on a flash device to emulate a block device with
@@ -246,7 +246,7 @@ config INFTL
tristate "INFTL (Inverse NAND Flash Translation Layer) support"
depends on BLOCK
select MTD_BLKDEVS
- ---help---
+ help
This provides support for the Inverse NAND Flash Translation
Layer which is used on M-Systems' newer DiskOnChip devices. It
uses a kind of pseudo-file system on a flash device to emulate
@@ -261,10 +261,10 @@ config INFTL
not use it.
config RFD_FTL
- tristate "Resident Flash Disk (Flash Translation Layer) support"
+ tristate "Resident Flash Disk (Flash Translation Layer) support"
depends on BLOCK
select MTD_BLKDEVS
- ---help---
+ help
This provides support for the flash translation layer known
as the Resident Flash Disk (RFD), as used by the Embedded BIOS
of General Software. There is a blurb at:
@@ -308,7 +308,7 @@ config MTD_SWAP
select MTD_BLKDEVS
help
Provides volatile block device driver on top of mtd partition
- suitable for swapping. The mapping of written blocks is not saved.
+ suitable for swapping. The mapping of written blocks is not saved.
The driver provides wear leveling by storing erase counter into the
OOB.
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index bbfa1f129266..39ec32a29051 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -44,7 +44,7 @@ choice
prompt "Flash cmd/query data swapping"
depends on MTD_CFI_ADV_OPTIONS
default MTD_CFI_NOSWAP
- ---help---
+ help
This option defines the way in which the CPU attempts to arrange
data bits when writing the 'magic' commands to the chips. Saying
'NO', which is the default when CONFIG_MTD_CFI_ADV_OPTIONS isn't
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index a0c655628d6d..72428b6bfc47 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1216,7 +1216,6 @@ static inline int do_read_secsi_onechip(struct map_info *map,
size_t grouplen)
{
DECLARE_WAITQUEUE(wait, current);
- unsigned long timeo = jiffies + HZ;
retry:
mutex_lock(&chip->mutex);
@@ -1229,7 +1228,6 @@ static inline int do_read_secsi_onechip(struct map_info *map,
schedule();
remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + HZ;
goto retry;
}
@@ -2526,7 +2524,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct ppb_lock {
struct flchip *chip;
- loff_t offset;
+ unsigned long adr;
int locked;
};
@@ -2544,8 +2542,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
unsigned long timeo;
int ret;
+ adr += chip->start;
mutex_lock(&chip->mutex);
- ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+ ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
@@ -2563,8 +2562,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
chip->state = FL_LOCKING;
- map_write(map, CMD(0xA0), chip->start + adr);
- map_write(map, CMD(0x00), chip->start + adr);
+ map_write(map, CMD(0xA0), adr);
+ map_write(map, CMD(0x00), adr);
} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
/*
* Unlocking of one specific sector is not supported, so we
@@ -2602,7 +2601,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
map_write(map, CMD(0x00), chip->start);
chip->state = FL_READY;
- put_chip(map, chip, adr + chip->start);
+ put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
@@ -2659,9 +2658,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
* sectors shall be unlocked, so lets keep their locking
* status at "unlocked" (locked=0) for the final re-locking.
*/
- if ((adr < ofs) || (adr >= (ofs + len))) {
+ if ((offset < ofs) || (offset >= (ofs + len))) {
sect[sectors].chip = &cfi->chips[chipnum];
- sect[sectors].offset = offset;
+ sect[sectors].adr = adr;
sect[sectors].locked = do_ppb_xxlock(
map, &cfi->chips[chipnum], adr, 0,
DO_XXLOCK_ONEBLOCK_GETLOCK);
@@ -2675,6 +2674,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
i++;
if (adr >> cfi->chipshift) {
+ if (offset >= (ofs + len))
+ break;
adr = 0;
chipnum++;
@@ -2705,7 +2706,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
*/
for (i = 0; i < sectors; i++) {
if (sect[i].locked)
- do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+ do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
DO_XXLOCK_ONEBLOCK_LOCK);
}
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index b57ceea21513..837b04ab96a9 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -202,16 +202,19 @@ static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
struct cfi_private *cfi = map->fldrv_priv;
__u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
#ifdef CONFIG_MODULES
- char probename[sizeof(VMLINUX_SYMBOL_STR(cfi_cmdset_%4.4X))];
cfi_cmdset_fn_t *probe_function;
+ char *probename;
- sprintf(probename, VMLINUX_SYMBOL_STR(cfi_cmdset_%4.4X), type);
+ probename = kasprintf(GFP_KERNEL, "cfi_cmdset_%4.4X", type);
+ if (!probename)
+ return NULL;
probe_function = __symbol_get(probename);
if (!probe_function) {
request_module("cfi_cmdset_%4.4X", type);
probe_function = __symbol_get(probename);
}
+ kfree(probename);
if (probe_function) {
struct mtd_info *mtd;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 57b02c4b3f63..e514d57a0419 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -5,7 +5,7 @@ menu "Self-contained MTD device drivers"
config MTD_PMC551
tristate "Ramix PMC551 PCI Mezzanine RAM card support"
depends on PCI
- ---help---
+ help
This provides a MTD device driver for the Ramix PMC551 RAM PCI card
from Ramix Inc. <http://www.ramix.com/products/memory/pmc551.html>.
These devices come in memory configurations from 32M - 1G. If you
@@ -209,7 +209,7 @@ config MTD_DOCG3
select BCH
select BCH_CONST_PARAMS
select BITREVERSE
- ---help---
+ help
This provides an MTD device driver for the M-Systems DiskOnChip
G3 devices.
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index e84563d2067f..cbfafc453274 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -28,11 +28,9 @@
#include <linux/spi/flash.h>
#include <linux/mtd/spi-nor.h>
-#define MAX_CMD_SIZE 6
struct m25p {
struct spi_mem *spimem;
struct spi_nor spi_nor;
- u8 command[MAX_CMD_SIZE];
};
static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
@@ -70,7 +68,7 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
- SPI_MEM_OP_DUMMY(0, 1),
+ SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(len, buf, 1));
size_t remaining = len;
int ret;
@@ -78,7 +76,6 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
/* get transfer protocols. */
op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
- op.dummy.buswidth = op.addr.buswidth;
op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
@@ -202,6 +199,9 @@ static int m25p_probe(struct spi_mem *spimem)
if (data && data->name)
nor->mtd.name = data->name;
+ if (!nor->mtd.name)
+ nor->mtd.name = spi_mem_get_name(spimem);
+
/* For some (historical?) reason many platforms provide two different
* names in flash_platform_data: "name" and "type". Quite often name is
* set to "m25p80" and then "type" provides a real chip name.
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 3a6f450d1093..53febe8a68c3 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -733,8 +733,8 @@ static struct flash_info dataflash_data[] = {
{ "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS},
{ "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
- { "AT45DB641E", 0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
- { "at45db641e", 0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
+ { "AT45DB641E", 0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
+ { "at45db641e", 0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
};
static struct flash_info *jedec_lookup(struct spi_device *spi,
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index c1312b141ae0..33593122e49b 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -223,6 +223,7 @@ static int powernv_flash_set_driver_info(struct device *dev,
mtd->_read = powernv_flash_read;
mtd->_write = powernv_flash_write;
mtd->dev.parent = dev;
+ mtd_set_of_node(mtd, dev->of_node);
return 0;
}
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 1897f33fe3e7..10d24efb4629 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -394,9 +394,8 @@ static int sst25l_probe(struct spi_device *spi)
flash->mtd.numeraseregions);
- ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
- data ? data->parts : NULL,
- data ? data->nr_parts : 0);
+ ret = mtd_device_register(&flash->mtd, data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
if (ret)
return -ENODEV;
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig
index 3a19cbee24d7..a5a332fbd593 100644
--- a/drivers/mtd/lpddr/Kconfig
+++ b/drivers/mtd/lpddr/Kconfig
@@ -13,10 +13,10 @@ config MTD_QINFO_PROBE
depends on MTD_LPDDR
tristate "Detect flash chips by QINFO probe"
help
- Device Information for LPDDR chips is offered through the Overlay
- Window QINFO interface, permits software to be used for entire
- families of devices. This serves similar purpose of CFI on legacy
- Flash products
+ Device Information for LPDDR chips is offered through the Overlay
+ Window QINFO interface, permits software to be used for entire
+ families of devices. This serves similar purpose of CFI on legacy
+ Flash products
config MTD_LPDDR2_NVM
# ARM dependency is only for writel_relaxed()
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index 5d73db2a496d..c950c880ad59 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -476,7 +476,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
return -EINVAL;
}
/* Parse partitions and register the MTD device */
- return mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ return mtd_device_register(mtd, NULL, 0);
}
/*
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index bdc1283f30fb..afb36bff13a7 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -207,13 +207,13 @@ config MTD_ICHXROM
BE VERY CAREFUL.
config MTD_ESB2ROM
- tristate "BIOS flash chip on Intel ESB Controller Hub 2"
- depends on X86 && MTD_JEDECPROBE && PCI
- help
- Support for treating the BIOS flash chip on ESB2 motherboards
- as an MTD device - with this you can reprogram your BIOS.
+ tristate "BIOS flash chip on Intel ESB Controller Hub 2"
+ depends on X86 && MTD_JEDECPROBE && PCI
+ help
+ Support for treating the BIOS flash chip on ESB2 motherboards
+ as an MTD device - with this you can reprogram your BIOS.
- BE VERY CAREFUL.
+ BE VERY CAREFUL.
config MTD_CK804XROM
tristate "BIOS flash chip on Nvidia CK804"
@@ -401,12 +401,12 @@ config MTD_PISMO
When built as a module, it will be called pismo.ko
config MTD_LATCH_ADDR
- tristate "Latch-assisted Flash Chip Support"
- depends on MTD_COMPLEX_MAPPINGS
- help
- Map driver which allows flashes to be partially physically addressed
- and have the upper address lines set by a board specific code.
+ tristate "Latch-assisted Flash Chip Support"
+ depends on MTD_COMPLEX_MAPPINGS
+ help
+ Map driver which allows flashes to be partially physically addressed
+ and have the upper address lines set by a board specific code.
- If compiled as a module, it will be called latch-addr-flash.
+ If compiled as a module, it will be called latch-addr-flash.
endmenu
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 385305e66fd1..9d9723693217 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -239,6 +239,9 @@ static int gpio_flash_probe(struct platform_device *pdev)
state->map.bankwidth = pdata->width;
state->map.size = state->win_size * (1 << state->gpio_count);
state->map.virt = ioremap_nocache(memory->start, state->map.size);
+ if (!state->map.virt)
+ return -ENOMEM;
+
state->map.phys = NO_XIP;
state->map.map_priv_1 = (unsigned long)state;
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index a0b8fa7849a9..815e2db87955 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -88,9 +88,8 @@ static int __init init_impa7(void)
if (impa7_mtd[i]) {
impa7_mtd[i]->owner = THIS_MODULE;
devicesfound++;
- mtd_device_parse_register(impa7_mtd[i], NULL, NULL,
- partitions,
- ARRAY_SIZE(partitions));
+ mtd_device_register(impa7_mtd[i], partitions,
+ ARRAY_SIZE(partitions));
} else {
iounmap((void __iomem *)impa7_map[i].virt);
}
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index dd5d6855f543..69503aef981e 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -71,7 +71,7 @@ static int vr_nor_init_partitions(struct vr_nor_mtd *p)
{
/* register the flash bank */
/* partition the flash bank */
- return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
+ return mtd_device_register(p->info, NULL, 0);
}
static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 6dc97aa667dc..51db24b7f88d 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -197,9 +197,8 @@ static int latch_addr_flash_probe(struct platform_device *dev)
}
info->mtd->dev.parent = &dev->dev;
- mtd_device_parse_register(info->mtd, NULL, NULL,
- latch_addr_data->parts,
- latch_addr_data->nr_parts);
+ mtd_device_register(info->mtd, latch_addr_data->parts,
+ latch_addr_data->nr_parts);
return 0;
iounmap:
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 3a06ecfc55ff..80a187167c92 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -97,8 +97,7 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
goto err_out;
}
info->mtd->dev.parent = &dev->dev;
- err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts,
- pdata->nr_parts);
+ err = mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
if (err)
goto err_out;
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index bb580bc16445..c07f21b20463 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -59,9 +59,9 @@ static int __init init_soleng_maps(void)
return -ENXIO;
}
}
- printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n",
- soleng_flash_map.phys & 0x1fffffff,
- soleng_eprom_map.phys & 0x1fffffff);
+ printk(KERN_NOTICE "Solution Engine: Flash at 0x%pap, EPROM at 0x%pap\n",
+ &soleng_flash_map.phys,
+ &soleng_eprom_map.phys);
flash_mtd->owner = THIS_MODULE;
eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index cd67c85cc87d..02389528f622 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -160,8 +160,12 @@ static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
pr_debug("MTD_read\n");
- if (*ppos + count > mtd->size)
- count = mtd->size - *ppos;
+ if (*ppos + count > mtd->size) {
+ if (*ppos < mtd->size)
+ count = mtd->size - *ppos;
+ else
+ count = 0;
+ }
if (!count)
return 0;
@@ -246,7 +250,7 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c
pr_debug("MTD_write\n");
- if (*ppos == mtd->size)
+ if (*ppos >= mtd->size)
return -ENOSPC;
if (*ppos + count > mtd->size)
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 42395df06be9..97ac219c082e 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1155,21 +1155,29 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
int ret_code;
ops->retlen = ops->oobretlen = 0;
- if (!mtd->_read_oob)
- return -EOPNOTSUPP;
ret_code = mtd_check_oob_ops(mtd, from, ops);
if (ret_code)
return ret_code;
ledtrig_mtd_activity();
+
+ /* Check the validity of a potential fallback on mtd->_read */
+ if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
+ return -EOPNOTSUPP;
+
+ if (mtd->_read_oob)
+ ret_code = mtd->_read_oob(mtd, from, ops);
+ else
+ ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
+ ops->datbuf);
+
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
* similar to mtd->_read(), returning a non-negative integer
* representing max bitflips. In other cases, mtd->_read_oob() may
* return -EUCLEAN. In all cases, perform similar logic to mtd_read().
*/
- ret_code = mtd->_read_oob(mtd, from, ops);
if (unlikely(ret_code < 0))
return ret_code;
if (mtd->ecc_strength == 0)
@@ -1184,8 +1192,7 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
int ret;
ops->retlen = ops->oobretlen = 0;
- if (!mtd->_write_oob)
- return -EOPNOTSUPP;
+
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
@@ -1194,7 +1201,16 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
return ret;
ledtrig_mtd_activity();
- return mtd->_write_oob(mtd, to, ops);
+
+ /* Check the validity of a potential fallback on mtd->_write */
+ if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
+ return -EOPNOTSUPP;
+
+ if (mtd->_write_oob)
+ return mtd->_write_oob(mtd, to, ops);
+ else
+ return mtd->_write(mtd, to, ops->len, &ops->retlen,
+ ops->datbuf);
}
EXPORT_SYMBOL_GPL(mtd_write_oob);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index f8d3a015cdad..52e2cb35fc79 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -322,22 +322,6 @@ static inline void free_partition(struct mtd_part *p)
kfree(p);
}
-/**
- * mtd_parse_part - parse MTD partition looking for subpartitions
- *
- * @slave: part that is supposed to be a container and should be parsed
- * @types: NULL-terminated array with names of partition parsers to try
- *
- * Some partitions are kind of containers with extra subpartitions (volumes).
- * There can be various formats of such containers. This function tries to use
- * specified parsers to analyze given partition and registers found
- * subpartitions on success.
- */
-static int mtd_parse_part(struct mtd_part *slave, const char *const *types)
-{
- return parse_mtd_partitions(&slave->mtd, types, NULL);
-}
-
static struct mtd_part *allocate_partition(struct mtd_info *parent,
const struct mtd_partition *part, int partno,
uint64_t cur_offset)
@@ -735,8 +719,8 @@ int add_mtd_partitions(struct mtd_info *master,
add_mtd_device(&slave->mtd);
mtd_add_partition_attrs(slave);
- if (parts[i].types)
- mtd_parse_part(slave, parts[i].types);
+ /* Look for subpartitions */
+ parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
cur_offset = slave->offset + slave->mtd.size;
}
@@ -812,6 +796,12 @@ static const char * const default_mtd_part_types[] = {
NULL
};
+/* Check DT only when looking for subpartitions. */
+static const char * const default_subpartition_types[] = {
+ "ofpart",
+ NULL
+};
+
static int mtd_part_do_parse(struct mtd_part_parser *parser,
struct mtd_info *master,
struct mtd_partitions *pparts,
@@ -882,7 +872,9 @@ static int mtd_part_of_parse(struct mtd_info *master,
const char *fixed = "fixed-partitions";
int ret, err = 0;
- np = of_get_child_by_name(mtd_get_of_node(master), "partitions");
+ np = mtd_get_of_node(master);
+ if (!mtd_is_partition(master))
+ np = of_get_child_by_name(np, "partitions");
of_property_for_each_string(np, "compatible", prop, compat) {
parser = mtd_part_get_compatible_parser(compat);
if (!parser)
@@ -945,7 +937,8 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
int ret, err = 0;
if (!types)
- types = default_mtd_part_types;
+ types = mtd_is_partition(master) ? default_subpartition_types :
+ default_mtd_part_types;
for ( ; *types; types++) {
/*
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 88c7d3b4ff8b..9033215e62ea 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -4,3 +4,4 @@ config MTD_NAND_CORE
source "drivers/mtd/nand/onenand/Kconfig"
source "drivers/mtd/nand/raw/Kconfig"
+source "drivers/mtd/nand/spi/Kconfig"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 3f0cb87f1a57..7ecd80c0a66e 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
obj-y += onenand/
obj-y += raw/
+obj-y += spi/
diff --git a/drivers/mtd/nand/onenand/generic.c b/drivers/mtd/nand/onenand/generic.c
index d5ccaf943b91..acad17ec6581 100644
--- a/drivers/mtd/nand/onenand/generic.c
+++ b/drivers/mtd/nand/onenand/generic.c
@@ -66,9 +66,8 @@ static int generic_onenand_probe(struct platform_device *pdev)
goto out_iounmap;
}
- err = mtd_device_parse_register(&info->mtd, NULL, NULL,
- pdata ? pdata->parts : NULL,
- pdata ? pdata->nr_parts : 0);
+ err = mtd_device_register(&info->mtd, pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
platform_set_drvdata(pdev, info);
diff --git a/drivers/mtd/nand/onenand/samsung.c b/drivers/mtd/nand/onenand/samsung.c
index 4cce4c0311ca..e64d0fdf7eb5 100644
--- a/drivers/mtd/nand/onenand/samsung.c
+++ b/drivers/mtd/nand/onenand/samsung.c
@@ -933,9 +933,8 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
- err = mtd_device_parse_register(mtd, NULL, NULL,
- pdata ? pdata->parts : NULL,
- pdata ? pdata->nr_parts : 0);
+ err = mtd_device_register(mtd, pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
if (err) {
dev_err(&pdev->dev, "failed to parse partitions and register the MTD device\n");
onenand_release(mtd);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 6871ff0fd300..5fc9a1bde4ac 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -44,12 +44,12 @@ config MTD_NAND_DENALI
tristate
config MTD_NAND_DENALI_PCI
- tristate "Support Denali NAND controller on Intel Moorestown"
+ tristate "Support Denali NAND controller on Intel Moorestown"
select MTD_NAND_DENALI
depends on PCI
- help
- Enable the driver for NAND flash on Intel Moorestown, using the
- Denali NAND controller core.
+ help
+ Enable the driver for NAND flash on Intel Moorestown, using the
+ Denali NAND controller core.
config MTD_NAND_DENALI_DT
tristate "Support Denali NAND controller as a DT device"
@@ -77,9 +77,10 @@ config MTD_NAND_AMS_DELTA
config MTD_NAND_OMAP2
tristate "NAND Flash device on OMAP2, OMAP3, OMAP4 and Keystone"
- depends on (ARCH_OMAP2PLUS || ARCH_KEYSTONE)
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+ depends on HAS_IOMEM
help
- Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
+ Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
and Keystone platforms.
config MTD_NAND_OMAP_BCH
@@ -137,7 +138,7 @@ config MTD_NAND_NDFC
depends on 4xx
select MTD_NAND_ECC_SMC
help
- NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
+ NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
config MTD_NAND_S3C2410_CLKSTOP
bool "Samsung S3C NAND IDLE clock stop"
@@ -152,6 +153,7 @@ config MTD_NAND_S3C2410_CLKSTOP
config MTD_NAND_TANGO
tristate "NAND Flash support for Tango chips"
depends on ARCH_TANGO || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables the NAND Flash controller on Tango chips.
@@ -168,40 +170,40 @@ config MTD_NAND_DISKONCHIP
these devices.
config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- bool "Advanced detection options for DiskOnChip"
- depends on MTD_NAND_DISKONCHIP
- help
- This option allows you to specify nonstandard address at which to
- probe for a DiskOnChip, or to change the detection options. You
- are unlikely to need any of this unless you are using LinuxBIOS.
- Say 'N'.
+ bool "Advanced detection options for DiskOnChip"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ This option allows you to specify nonstandard address at which to
+ probe for a DiskOnChip, or to change the detection options. You
+ are unlikely to need any of this unless you are using LinuxBIOS.
+ Say 'N'.
config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
- hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- depends on MTD_NAND_DISKONCHIP
- default "0"
- ---help---
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option allows you to specify a single address at which to probe
- for the device, which is useful if you have other devices in that
- range which get upset when they are probed.
-
- (Note that on PowerPC, the normal probe will only check at
- 0xE4000000.)
-
- Normally, you should leave this set to zero, to allow the probe at
- the normal addresses.
+ hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ depends on MTD_NAND_DISKONCHIP
+ default "0"
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option allows you to specify a single address at which to probe
+ for the device, which is useful if you have other devices in that
+ range which get upset when they are probed.
+
+ (Note that on PowerPC, the normal probe will only check at
+ 0xE4000000.)
+
+ Normally, you should leave this set to zero, to allow the probe at
+ the normal addresses.
config MTD_NAND_DISKONCHIP_PROBE_HIGH
- bool "Probe high addresses"
- depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- help
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option changes to make it probe between 0xFFFC8000 and
- 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
- useful to you. Say 'N'.
+ bool "Probe high addresses"
+ depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option changes to make it probe between 0xFFFC8000 and
+ 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
+ useful to you. Say 'N'.
config MTD_NAND_DISKONCHIP_BBTWRITE
bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
@@ -247,7 +249,8 @@ config MTD_NAND_DOCG4
config MTD_NAND_SHARPSL
tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
- depends on ARCH_PXA
+ depends on ARCH_PXA || COMPILE_TEST
+ depends on HAS_IOMEM
config MTD_NAND_CAFE
tristate "NAND support for OLPC CAFÉ chip"
@@ -274,7 +277,9 @@ config MTD_NAND_CS553X
config MTD_NAND_ATMEL
tristate "Support for NAND Flash / SmartMedia on AT91"
- depends on ARCH_AT91
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on HAS_IOMEM
+ select GENERIC_ALLOCATOR
select MFD_ATMEL_SMC
help
Enables support for NAND Flash / Smart Media Card interface
@@ -294,7 +299,8 @@ config MTD_NAND_MARVELL
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell
chips) NAND controller. This is the default for the PHYTEC 3250
@@ -305,7 +311,8 @@ config MTD_NAND_SLC_LPC32XX
config MTD_NAND_MLC_LPC32XX
tristate "NXP LPC32xx MLC Controller"
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND
controller. This is the default for the WORK92105 controller
@@ -339,17 +346,18 @@ config MTD_NAND_NANDSIM
MTD nand layer.
config MTD_NAND_GPMI_NAND
- tristate "GPMI NAND Flash Controller driver"
- depends on MTD_NAND && MXS_DMA
- help
- Enables NAND Flash support for IMX23, IMX28 or IMX6.
- The GPMI controller is very powerful, with the help of BCH
- module, it can do the hardware ECC. The GPMI supports several
- NAND flashs at the same time.
+ tristate "GPMI NAND Flash Controller driver"
+ depends on MXS_DMA
+ help
+ Enables NAND Flash support for IMX23, IMX28 or IMX6.
+ The GPMI controller is very powerful, with the help of BCH
+ module, it can do the hardware ECC. The GPMI supports several
+ NAND flashs at the same time.
config MTD_NAND_BRCMNAND
tristate "Broadcom STB NAND controller"
- depends on ARM || ARM64 || MIPS
+ depends on ARM || ARM64 || MIPS || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables the Broadcom NAND controller driver. The controller was
originally designed for Set-Top Box but is used on various BCM7xxx,
@@ -358,6 +366,7 @@ config MTD_NAND_BRCMNAND
config MTD_NAND_BCM47XXNFLASH
tristate "Support for NAND flash on BCM4706 BCMA bus"
depends on BCMA_NFLASH
+ depends on BCMA
help
BCMA bus can have various flash memories attached, they are
registered by bcma as platform devices. This enables driver for
@@ -399,7 +408,8 @@ config MTD_NAND_FSL_ELBC
config MTD_NAND_FSL_IFC
tristate "NAND support for Freescale IFC controller"
- depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A
+ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
+ depends on HAS_IOMEM
select FSL_IFC
select MEMORY
help
@@ -437,7 +447,8 @@ config MTD_NAND_VF610_NFC
config MTD_NAND_MXC
tristate "MXC NAND support"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the driver for the NAND flash controller on the
MXC processors.
@@ -451,15 +462,17 @@ config MTD_NAND_SH_FLCTL
for NAND Flash using FLCTL.
config MTD_NAND_DAVINCI
- tristate "Support NAND on DaVinci/Keystone SoC"
- depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF)
- help
+ tristate "Support NAND on DaVinci/Keystone SoC"
+ depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF) || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
Enable the driver for NAND flash chips on Texas Instruments
DaVinci/Keystone processors.
config MTD_NAND_TXX9NDFMC
tristate "NAND Flash support for TXx9 SoC"
- depends on SOC_TX4938 || SOC_TX4939
+ depends on SOC_TX4938 || SOC_TX4939 || COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the NAND flash controller on the TXx9 SoCs.
@@ -471,28 +484,31 @@ config MTD_NAND_SOCRATES
config MTD_NAND_NUC900
tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
- depends on ARCH_W90X900
+ depends on ARCH_W90X900 || COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the driver for the NAND Flash on evaluation board based
on w90p910 / NUC9xx.
config MTD_NAND_JZ4740
tristate "Support for JZ4740 SoC NAND controller"
- depends on MACH_JZ4740
+ depends on MACH_JZ4740 || COMPILE_TEST
+ depends on HAS_IOMEM
help
- Enables support for NAND Flash on JZ4740 SoC based boards.
+ Enables support for NAND Flash on JZ4740 SoC based boards.
config MTD_NAND_JZ4780
tristate "Support for NAND on JZ4780 SoC"
- depends on MACH_JZ4780 && JZ4780_NEMC
+ depends on JZ4780_NEMC
help
Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
based boards, using the BCH controller for hardware error correction.
config MTD_NAND_FSMC
tristate "Support for NAND on ST Micros FSMC"
- depends on OF
- depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
+ depends on OF && HAS_IOMEM
+ depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300 || \
+ COMPILE_TEST
help
Enables support for NAND Flash chips on the ST Microelectronics
Flexible Static Memory Controller (FSMC)
@@ -506,19 +522,22 @@ config MTD_NAND_XWAY
config MTD_NAND_SUNXI
tristate "Support for NAND on Allwinner SoCs"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables support for NAND Flash chips on Allwinner SoCs.
config MTD_NAND_HISI504
tristate "Support for NAND controller on Hisilicon SoC Hip04"
depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables support for NAND controller on Hisilicon SoC Hip04.
config MTD_NAND_QCOM
tristate "Support for NAND on QCOM SoCs"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables support for NAND flash chips on SoCs containing the EBI2 NAND
controller. This controller is found on IPQ806x SoC.
@@ -526,8 +545,20 @@ config MTD_NAND_QCOM
config MTD_NAND_MTK
tristate "Support for NAND controller on MTK SoCs"
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables support for NAND controller on MTK SoCs.
This controller is found on mt27xx, mt81xx, mt65xx SoCs.
+config MTD_NAND_TEGRA
+ tristate "Support for NAND controller on NVIDIA Tegra"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND flash controller on NVIDIA Tegra SoC.
+ The driver has been developed and tested on a Tegra 2 SoC. DMA
+ support, raw read/write page as well as HW ECC read/write page
+ is supported. Extra OOB bytes when using HW ECC are currently
+ not supported.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 165b7ef9e9a1..d5a5f9832b88 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
+obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_amd.o
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 12f6753d47ae..a068b214ebaa 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -52,7 +52,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/genalloc.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
@@ -129,6 +128,11 @@
#define DEFAULT_TIMEOUT_MS 1000
#define MIN_DMA_LEN 128
+static bool atmel_nand_avoid_dma __read_mostly;
+
+MODULE_PARM_DESC(avoiddma, "Avoid using DMA");
+module_param_named(avoiddma, atmel_nand_avoid_dma, bool, 0400);
+
enum atmel_nand_rb_type {
ATMEL_NAND_NO_RB,
ATMEL_NAND_NATIVE_RB,
@@ -197,7 +201,7 @@ struct atmel_nand_controller_ops {
int (*remove)(struct atmel_nand_controller *nc);
void (*nand_init)(struct atmel_nand_controller *nc,
struct atmel_nand *nand);
- int (*ecc_init)(struct atmel_nand *nand);
+ int (*ecc_init)(struct nand_chip *chip);
int (*setup_data_interface)(struct atmel_nand *nand, int csline,
const struct nand_data_interface *conf);
};
@@ -211,7 +215,7 @@ struct atmel_nand_controller_caps {
};
struct atmel_nand_controller {
- struct nand_hw_control base;
+ struct nand_controller base;
const struct atmel_nand_controller_caps *caps;
struct device *dev;
struct regmap *smc;
@@ -222,7 +226,7 @@ struct atmel_nand_controller {
};
static inline struct atmel_nand_controller *
-to_nand_controller(struct nand_hw_control *ctl)
+to_nand_controller(struct nand_controller *ctl)
{
return container_of(ctl, struct atmel_nand_controller, base);
}
@@ -234,7 +238,7 @@ struct atmel_smc_nand_controller {
};
static inline struct atmel_smc_nand_controller *
-to_smc_nand_controller(struct nand_hw_control *ctl)
+to_smc_nand_controller(struct nand_controller *ctl)
{
return container_of(to_nand_controller(ctl),
struct atmel_smc_nand_controller, base);
@@ -258,7 +262,7 @@ struct atmel_hsmc_nand_controller {
};
static inline struct atmel_hsmc_nand_controller *
-to_hsmc_nand_controller(struct nand_hw_control *ctl)
+to_hsmc_nand_controller(struct nand_controller *ctl)
{
return container_of(to_nand_controller(ctl),
struct atmel_hsmc_nand_controller, base);
@@ -1128,9 +1132,8 @@ static int atmel_nand_pmecc_init(struct nand_chip *chip)
return 0;
}
-static int atmel_nand_ecc_init(struct atmel_nand *nand)
+static int atmel_nand_ecc_init(struct nand_chip *chip)
{
- struct nand_chip *chip = &nand->base;
struct atmel_nand_controller *nc;
int ret;
@@ -1165,12 +1168,11 @@ static int atmel_nand_ecc_init(struct atmel_nand *nand)
return 0;
}
-static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
+static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip)
{
- struct nand_chip *chip = &nand->base;
int ret;
- ret = atmel_nand_ecc_init(nand);
+ ret = atmel_nand_ecc_init(chip);
if (ret)
return ret;
@@ -1553,23 +1555,7 @@ static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
chip->select_chip = atmel_hsmc_nand_select_chip;
}
-static int atmel_nand_detect(struct atmel_nand *nand)
-{
- struct nand_chip *chip = &nand->base;
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct atmel_nand_controller *nc;
- int ret;
-
- nc = to_nand_controller(chip->controller);
-
- ret = nand_scan_ident(mtd, nand->numcs, NULL);
- if (ret)
- dev_err(nc->dev, "nand_scan_ident() failed: %d\n", ret);
-
- return ret;
-}
-
-static int atmel_nand_unregister(struct atmel_nand *nand)
+static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
{
struct nand_chip *chip = &nand->base;
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -1585,60 +1571,6 @@ static int atmel_nand_unregister(struct atmel_nand *nand)
return 0;
}
-static int atmel_nand_register(struct atmel_nand *nand)
-{
- struct nand_chip *chip = &nand->base;
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct atmel_nand_controller *nc;
- int ret;
-
- nc = to_nand_controller(chip->controller);
-
- if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
- /*
- * We keep the MTD name unchanged to avoid breaking platforms
- * where the MTD cmdline parser is used and the bootloader
- * has not been updated to use the new naming scheme.
- */
- mtd->name = "atmel_nand";
- } else if (!mtd->name) {
- /*
- * If the new bindings are used and the bootloader has not been
- * updated to pass a new mtdparts parameter on the cmdline, you
- * should define the following property in your nand node:
- *
- * label = "atmel_nand";
- *
- * This way, mtd->name will be set by the core when
- * nand_set_flash_node() is called.
- */
- mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
- "%s:nand.%d", dev_name(nc->dev),
- nand->cs[0].id);
- if (!mtd->name) {
- dev_err(nc->dev, "Failed to allocate mtd->name\n");
- return -ENOMEM;
- }
- }
-
- ret = nand_scan_tail(mtd);
- if (ret) {
- dev_err(nc->dev, "nand_scan_tail() failed: %d\n", ret);
- return ret;
- }
-
- ret = mtd_device_register(mtd, NULL, 0);
- if (ret) {
- dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
- nand_cleanup(chip);
- return ret;
- }
-
- list_add_tail(&nand->node, &nc->chips);
-
- return 0;
-}
-
static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
struct device_node *np,
int reg_cells)
@@ -1750,6 +1682,8 @@ static int
atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
struct atmel_nand *nand)
{
+ struct nand_chip *chip = &nand->base;
+ struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
/* No card inserted, skip this NAND. */
@@ -1760,15 +1694,22 @@ atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
nc->caps->ops->nand_init(nc, nand);
- ret = atmel_nand_detect(nand);
- if (ret)
+ ret = nand_scan(mtd, nand->numcs);
+ if (ret) {
+ dev_err(nc->dev, "NAND scan failed: %d\n", ret);
return ret;
+ }
- ret = nc->caps->ops->ecc_init(nand);
- if (ret)
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
return ret;
+ }
+
+ list_add_tail(&nand->node, &nc->chips);
- return atmel_nand_register(nand);
+ return 0;
}
static int
@@ -1778,7 +1719,7 @@ atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
int ret;
list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
- ret = atmel_nand_unregister(nand);
+ ret = atmel_nand_controller_remove_nand(nand);
if (ret)
return ret;
}
@@ -1953,6 +1894,51 @@ static const struct of_device_id atmel_matrix_of_ids[] = {
{ /* sentinel */ },
};
+static int atmel_nand_attach_chip(struct nand_chip *chip)
+{
+ struct atmel_nand_controller *nc = to_nand_controller(chip->controller);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nc->caps->ops->ecc_init(chip);
+ if (ret)
+ return ret;
+
+ if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "atmel_nand";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your nand node:
+ *
+ * label = "atmel_nand";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nc->dev),
+ nand->cs[0].id);
+ if (!mtd->name) {
+ dev_err(nc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops atmel_nand_controller_ops = {
+ .attach_chip = atmel_nand_attach_chip,
+};
+
static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
struct platform_device *pdev,
const struct atmel_nand_controller_caps *caps)
@@ -1961,7 +1947,8 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
struct device_node *np = dev->of_node;
int ret;
- nand_hw_control_init(&nc->base);
+ nand_controller_init(&nc->base);
+ nc->base.ops = &atmel_nand_controller_ops;
INIT_LIST_HEAD(&nc->chips);
nc->dev = dev;
nc->caps = caps;
@@ -1977,7 +1964,7 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
return ret;
}
- if (nc->caps->has_dma) {
+ if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
dma_cap_mask_t mask;
dma_cap_zero(mask);
@@ -2045,7 +2032,7 @@ atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
return ret;
}
- nc->ebi_csa_offs = (unsigned int)match->data;
+ nc->ebi_csa_offs = (uintptr_t)match->data;
/*
* The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
@@ -2214,9 +2201,9 @@ atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
return -ENOMEM;
}
- nc->sram.virt = gen_pool_dma_alloc(nc->sram.pool,
- ATMEL_NFC_SRAM_SIZE,
- &nc->sram.dma);
+ nc->sram.virt = (void __iomem *)gen_pool_dma_alloc(nc->sram.pool,
+ ATMEL_NFC_SRAM_SIZE,
+ &nc->sram.dma);
if (!nc->sram.virt) {
dev_err(nc->base.dev,
"Could not allocate memory from the NFC SRAM pool\n");
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index df0ef1f1e2f5..35f5c84cd331 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -8,7 +8,6 @@
*/
#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 1306aaa7a8bf..4b90d5b380c2 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -114,7 +114,7 @@ enum {
struct brcmnand_controller {
struct device *dev;
- struct nand_hw_control controller;
+ struct nand_controller controller;
void __iomem *nand_base;
void __iomem *nand_fc; /* flash cache */
void __iomem *flash_dma_base;
@@ -2208,6 +2208,40 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
return 0;
}
+static int brcmnand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ int ret;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ /*
+ * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
+ * to/from, and have nand_base pass us a bounce buffer instead, as
+ * needed.
+ */
+ chip->options |= NAND_USE_BOUNCE_BUFFER;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (brcmnand_setup_dev(host))
+ return -ENXIO;
+
+ chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
+
+ /* only use our internal HW threshold */
+ mtd->bitflip_threshold = 1;
+
+ ret = brcmstb_choose_ecc_layout(host);
+
+ return ret;
+}
+
+static const struct nand_controller_ops brcmnand_controller_ops = {
+ .attach_chip = brcmnand_attach_chip,
+};
+
static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
{
struct brcmnand_controller *ctrl = host->ctrl;
@@ -2267,33 +2301,7 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
nand_writereg(ctrl, cfg_offs,
nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret)
- return ret;
-
- chip->options |= NAND_NO_SUBPAGE_WRITE;
- /*
- * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
- * to/from, and have nand_base pass us a bounce buffer instead, as
- * needed.
- */
- chip->options |= NAND_USE_BOUNCE_BUFFER;
-
- if (chip->bbt_options & NAND_BBT_USE_FLASH)
- chip->bbt_options |= NAND_BBT_NO_OOB;
-
- if (brcmnand_setup_dev(host))
- return -ENXIO;
-
- chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
- /* only use our internal HW threshold */
- mtd->bitflip_threshold = 1;
-
- ret = brcmstb_choose_ecc_layout(host);
- if (ret)
- return ret;
-
- ret = nand_scan_tail(mtd);
+ ret = nand_scan(mtd, 1);
if (ret)
return ret;
@@ -2433,7 +2441,8 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
init_completion(&ctrl->done);
init_completion(&ctrl->dma_done);
- nand_hw_control_init(&ctrl->controller);
+ nand_controller_init(&ctrl->controller);
+ ctrl->controller.ops = &brcmnand_controller_ops;
INIT_LIST_HEAD(&ctrl->host_list);
/* NAND register range */
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
index 5c271077ac87..489af7bc005a 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "brcmnand.h"
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index d721f489b38b..1dbe43adcfe7 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -67,6 +67,7 @@ struct cafe_priv {
int nr_data;
int data_pos;
int page_addr;
+ bool usedma;
dma_addr_t dmaaddr;
unsigned char *dmabuf;
};
@@ -121,7 +122,7 @@ static void cafe_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
- if (usedma)
+ if (cafe->usedma)
memcpy(cafe->dmabuf + cafe->datalen, buf, len);
else
memcpy_toio(cafe->mmio + CAFE_NAND_WRITE_DATA + cafe->datalen, buf, len);
@@ -137,7 +138,7 @@ static void cafe_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
- if (usedma)
+ if (cafe->usedma)
memcpy(buf, cafe->dmabuf + cafe->datalen, len);
else
memcpy_fromio(buf, cafe->mmio + CAFE_NAND_READ_DATA + cafe->datalen, len);
@@ -253,7 +254,7 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
/* NB: The datasheet lies -- we really should be subtracting 1 here */
cafe_writel(cafe, cafe->datalen, NAND_DATA_LEN);
cafe_writel(cafe, 0x90000000, NAND_IRQ);
- if (usedma && (ctl1 & (3<<25))) {
+ if (cafe->usedma && (ctl1 & (3<<25))) {
uint32_t dmactl = 0xc0000000 + cafe->datalen;
/* If WR or RD bits set, set up DMA */
if (ctl1 & (1<<26)) {
@@ -345,11 +346,6 @@ static irqreturn_t cafe_nand_interrupt(int irq, void *id)
return IRQ_HANDLED;
}
-static void cafe_nand_bug(struct mtd_info *mtd)
-{
- BUG();
-}
-
static int cafe_nand_write_oob(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
@@ -598,6 +594,76 @@ static int cafe_mul(int x)
return gf4096_mul(x, 0xe01);
}
+static int cafe_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ int err = 0;
+
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
+ &cafe->dmaaddr, GFP_KERNEL);
+ if (!cafe->dmabuf)
+ return -ENOMEM;
+
+ /* Set up DMA address */
+ cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
+ cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
+ cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
+
+ /* Restore the DMA flag */
+ cafe->usedma = usedma;
+
+ cafe->ctl2 = BIT(27); /* Reed-Solomon ECC */
+ if (mtd->writesize == 2048)
+ cafe->ctl2 |= BIT(29); /* 2KiB page size */
+
+ /* Set up ECC according to the type of chip we found */
+ mtd_set_ooblayout(mtd, &cafe_ooblayout_ops);
+ if (mtd->writesize == 2048) {
+ cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
+ cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
+ } else if (mtd->writesize == 512) {
+ cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
+ cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
+ } else {
+ dev_warn(&cafe->pdev->dev,
+ "Unexpected NAND flash writesize %d. Aborting\n",
+ mtd->writesize);
+ err = -ENOTSUPP;
+ goto out_free_dma;
+ }
+
+ cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+ cafe->nand.ecc.size = mtd->writesize;
+ cafe->nand.ecc.bytes = 14;
+ cafe->nand.ecc.strength = 4;
+ cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
+ cafe->nand.ecc.write_oob = cafe_nand_write_oob;
+ cafe->nand.ecc.read_page = cafe_nand_read_page;
+ cafe->nand.ecc.read_oob = cafe_nand_read_oob;
+
+ return 0;
+
+ out_free_dma:
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+
+ return err;
+}
+
+static void cafe_nand_detach_chip(struct nand_chip *chip)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+}
+
+static const struct nand_controller_ops cafe_nand_controller_ops = {
+ .attach_chip = cafe_nand_attach_chip,
+ .detach_chip = cafe_nand_detach_chip,
+};
+
static int cafe_nand_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -605,7 +671,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
struct cafe_priv *cafe;
uint32_t ctrl;
int err = 0;
- int old_dma;
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -713,65 +778,15 @@ static int cafe_nand_probe(struct pci_dev *pdev,
cafe_readl(cafe, GLOBAL_CTRL),
cafe_readl(cafe, GLOBAL_IRQ_MASK));
- /* Do not use the DMA for the nand_scan_ident() */
- old_dma = usedma;
- usedma = 0;
+ /* Do not use the DMA during the NAND identification */
+ cafe->usedma = 0;
/* Scan to find existence of the device */
- err = nand_scan_ident(mtd, 2, NULL);
+ cafe->nand.dummy_controller.ops = &cafe_nand_controller_ops;
+ err = nand_scan(mtd, 2);
if (err)
goto out_irq;
- cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
- &cafe->dmaaddr, GFP_KERNEL);
- if (!cafe->dmabuf) {
- err = -ENOMEM;
- goto out_irq;
- }
-
- /* Set up DMA address */
- cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
- cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
-
- cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
- cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
-
- /* Restore the DMA flag */
- usedma = old_dma;
-
- cafe->ctl2 = 1<<27; /* Reed-Solomon ECC */
- if (mtd->writesize == 2048)
- cafe->ctl2 |= 1<<29; /* 2KiB page size */
-
- /* Set up ECC according to the type of chip we found */
- mtd_set_ooblayout(mtd, &cafe_ooblayout_ops);
- if (mtd->writesize == 2048) {
- cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
- cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
- } else if (mtd->writesize == 512) {
- cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
- cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
- } else {
- pr_warn("Unexpected NAND flash writesize %d. Aborting\n",
- mtd->writesize);
- goto out_free_dma;
- }
- cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
- cafe->nand.ecc.size = mtd->writesize;
- cafe->nand.ecc.bytes = 14;
- cafe->nand.ecc.strength = 4;
- cafe->nand.ecc.hwctl = (void *)cafe_nand_bug;
- cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
- cafe->nand.ecc.correct = (void *)cafe_nand_bug;
- cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
- cafe->nand.ecc.write_oob = cafe_nand_write_oob;
- cafe->nand.ecc.read_page = cafe_nand_read_page;
- cafe->nand.ecc.read_oob = cafe_nand_read_oob;
-
- err = nand_scan_tail(mtd);
- if (err)
- goto out_free_dma;
-
pci_set_drvdata(pdev, mtd);
mtd->name = "cafe_nand";
@@ -783,8 +798,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
out_cleanup_nand:
nand_cleanup(&cafe->nand);
- out_free_dma:
- dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
out_irq:
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
diff --git a/drivers/mtd/nand/raw/cmx270_nand.c b/drivers/mtd/nand/raw/cmx270_nand.c
index 02d6751e9efe..b66e254b6802 100644
--- a/drivers/mtd/nand/raw/cmx270_nand.c
+++ b/drivers/mtd/nand/raw/cmx270_nand.c
@@ -200,8 +200,8 @@ static int __init cmx270_init(void)
}
/* Register the partitions */
- ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL,
- partition_info, NUM_PARTITIONS);
+ ret = mtd_device_register(cmx270_nand_mtd, partition_info,
+ NUM_PARTITIONS);
if (ret)
goto err_scan;
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index 82269fde9e66..beafad62e7d5 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -310,8 +310,7 @@ static int __init cs553x_init(void)
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
if (cs553x_mtd[i]) {
/* If any devices registered, return success. Else the last error. */
- mtd_device_parse_register(cs553x_mtd[i], NULL, NULL,
- NULL, 0);
+ mtd_device_register(cs553x_mtd[i], NULL, 0);
err = 0;
}
}
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index cd12e5abafde..40145e206a6b 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -53,15 +53,14 @@
struct davinci_nand_info {
struct nand_chip chip;
- struct device *dev;
+ struct platform_device *pdev;
bool is_readmode;
void __iomem *base;
void __iomem *vaddr;
- uint32_t ioaddr;
- uint32_t current_cs;
+ void __iomem *current_cs;
uint32_t mask_chipsel;
uint32_t mask_ale;
@@ -102,17 +101,17 @@ static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct davinci_nand_info *info = to_davinci_nand(mtd);
- uint32_t addr = info->current_cs;
+ void __iomem *addr = info->current_cs;
struct nand_chip *nand = mtd_to_nand(mtd);
/* Did the control lines change? */
if (ctrl & NAND_CTRL_CHANGE) {
if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
- addr |= info->mask_cle;
+ addr += info->mask_cle;
else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
- addr |= info->mask_ale;
+ addr += info->mask_ale;
- nand->IO_ADDR_W = (void __iomem __force *)addr;
+ nand->IO_ADDR_W = addr;
}
if (cmd != NAND_CMD_NONE)
@@ -122,14 +121,14 @@ static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
{
struct davinci_nand_info *info = to_davinci_nand(mtd);
- uint32_t addr = info->ioaddr;
+
+ info->current_cs = info->vaddr;
/* maybe kick in a second chipselect */
if (chip > 0)
- addr |= info->mask_chipsel;
- info->current_cs = addr;
+ info->current_cs += info->mask_chipsel;
- info->chip.IO_ADDR_W = (void __iomem __force *)addr;
+ info->chip.IO_ADDR_W = info->current_cs;
info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
}
@@ -319,7 +318,7 @@ static int nand_davinci_correct_4bit(struct mtd_info *mtd,
/* Unpack ten bytes into eight 10 bit values. We know we're
* little-endian, and use type punning for less shifting/masking.
*/
- if (WARN_ON(0x01 & (unsigned) ecc_code))
+ if (WARN_ON(0x01 & (uintptr_t)ecc_code))
return -EINVAL;
ecc16 = (unsigned short *)ecc_code;
@@ -441,9 +440,9 @@ static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct nand_chip *chip = mtd_to_nand(mtd);
- if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
+ if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
- else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
+ else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
else
ioread8_rep(chip->IO_ADDR_R, buf, len);
@@ -454,9 +453,9 @@ static void nand_davinci_write_buf(struct mtd_info *mtd,
{
struct nand_chip *chip = mtd_to_nand(mtd);
- if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
+ if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
- else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
+ else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
else
iowrite8_rep(chip->IO_ADDR_R, buf, len);
@@ -606,6 +605,104 @@ static struct davinci_nand_pdata
}
#endif
+static int davinci_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ struct davinci_nand_pdata *pdata = nand_davinci_get_pdata(info->pdev);
+ int ret = 0;
+
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ switch (info->chip.ecc.mode) {
+ case NAND_ECC_NONE:
+ pdata->ecc_bits = 0;
+ break;
+ case NAND_ECC_SOFT:
+ pdata->ecc_bits = 0;
+ /*
+ * This driver expects Hamming based ECC when ecc_mode is set
+ * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
+ * avoid adding an extra ->ecc_algo field to
+ * davinci_nand_pdata.
+ */
+ info->chip.ecc.algo = NAND_ECC_HAMMING;
+ break;
+ case NAND_ECC_HW:
+ if (pdata->ecc_bits == 4) {
+ /*
+ * No sanity checks: CPUs must support this,
+ * and the chips may not use NAND_BUSWIDTH_16.
+ */
+
+ /* No sharing 4-bit hardware between chipselects yet */
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc4_busy)
+ ret = -EBUSY;
+ else
+ ecc4_busy = true;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ if (ret == -EBUSY)
+ return ret;
+
+ info->chip.ecc.calculate = nand_davinci_calculate_4bit;
+ info->chip.ecc.correct = nand_davinci_correct_4bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
+ info->chip.ecc.bytes = 10;
+ info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
+ info->chip.ecc.algo = NAND_ECC_BCH;
+ } else {
+ /* 1bit ecc hamming */
+ info->chip.ecc.calculate = nand_davinci_calculate_1bit;
+ info->chip.ecc.correct = nand_davinci_correct_1bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ info->chip.ecc.bytes = 3;
+ info->chip.ecc.algo = NAND_ECC_HAMMING;
+ }
+ info->chip.ecc.size = 512;
+ info->chip.ecc.strength = pdata->ecc_bits;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Update ECC layout if needed ... for 1-bit HW ECC, the default
+ * is OK, but it allocates 6 bytes when only 3 are needed (for
+ * each 512 bytes). For the 4-bit HW ECC, that default is not
+ * usable: 10 bytes are needed, not 6.
+ */
+ if (pdata->ecc_bits == 4) {
+ int chunks = mtd->writesize / 512;
+
+ if (!chunks || mtd->oobsize < 16) {
+ dev_dbg(&info->pdev->dev, "too small\n");
+ return -EINVAL;
+ }
+
+ /* For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
+ } else if (chunks == 4 || chunks == 8) {
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
+ } else {
+ return -EIO;
+ }
+ }
+
+ return ret;
+}
+
+static const struct nand_controller_ops davinci_nand_controller_ops = {
+ .attach_chip = davinci_nand_attach_chip,
+};
+
static int nand_davinci_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata;
@@ -659,7 +756,7 @@ static int nand_davinci_probe(struct platform_device *pdev)
return -EADDRNOTAVAIL;
}
- info->dev = &pdev->dev;
+ info->pdev = pdev;
info->base = base;
info->vaddr = vaddr;
@@ -680,9 +777,7 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->chip.bbt_md = pdata->bbt_md;
info->timing = pdata->timing;
- info->ioaddr = (uint32_t __force) vaddr;
-
- info->current_cs = info->ioaddr;
+ info->current_cs = info->vaddr;
info->core_chipsel = pdata->core_chipsel;
info->mask_chipsel = pdata->mask_chipsel;
@@ -711,100 +806,15 @@ static int nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
+ info->chip.dummy_controller.ops = &davinci_nand_controller_ops;
+ ret = nand_scan(mtd, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
return ret;
}
- switch (info->chip.ecc.mode) {
- case NAND_ECC_NONE:
- pdata->ecc_bits = 0;
- break;
- case NAND_ECC_SOFT:
- pdata->ecc_bits = 0;
- /*
- * This driver expects Hamming based ECC when ecc_mode is set
- * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
- * avoid adding an extra ->ecc_algo field to
- * davinci_nand_pdata.
- */
- info->chip.ecc.algo = NAND_ECC_HAMMING;
- break;
- case NAND_ECC_HW:
- if (pdata->ecc_bits == 4) {
- /* No sanity checks: CPUs must support this,
- * and the chips may not use NAND_BUSWIDTH_16.
- */
-
- /* No sharing 4-bit hardware between chipselects yet */
- spin_lock_irq(&davinci_nand_lock);
- if (ecc4_busy)
- ret = -EBUSY;
- else
- ecc4_busy = true;
- spin_unlock_irq(&davinci_nand_lock);
-
- if (ret == -EBUSY)
- return ret;
-
- info->chip.ecc.calculate = nand_davinci_calculate_4bit;
- info->chip.ecc.correct = nand_davinci_correct_4bit;
- info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
- info->chip.ecc.bytes = 10;
- info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
- info->chip.ecc.algo = NAND_ECC_BCH;
- } else {
- /* 1bit ecc hamming */
- info->chip.ecc.calculate = nand_davinci_calculate_1bit;
- info->chip.ecc.correct = nand_davinci_correct_1bit;
- info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
- info->chip.ecc.bytes = 3;
- info->chip.ecc.algo = NAND_ECC_HAMMING;
- }
- info->chip.ecc.size = 512;
- info->chip.ecc.strength = pdata->ecc_bits;
- break;
- default:
- return -EINVAL;
- }
-
- /* Update ECC layout if needed ... for 1-bit HW ECC, the default
- * is OK, but it allocates 6 bytes when only 3 are needed (for
- * each 512 bytes). For the 4-bit HW ECC, that default is not
- * usable: 10 bytes are needed, not 6.
- */
- if (pdata->ecc_bits == 4) {
- int chunks = mtd->writesize / 512;
-
- if (!chunks || mtd->oobsize < 16) {
- dev_dbg(&pdev->dev, "too small\n");
- ret = -EINVAL;
- goto err;
- }
-
- /* For small page chips, preserve the manufacturer's
- * badblock marking data ... and make sure a flash BBT
- * table marker fits in the free bytes.
- */
- if (chunks == 1) {
- mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
- } else if (chunks == 4 || chunks == 8) {
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
- info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
- } else {
- ret = -EIO;
- goto err;
- }
- }
-
- ret = nand_scan_tail(mtd);
- if (ret < 0)
- goto err;
-
if (pdata->parts)
- ret = mtd_device_parse_register(mtd, NULL, NULL,
- pdata->parts, pdata->nr_parts);
+ ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
else
ret = mtd_device_register(mtd, NULL, 0);
if (ret < 0)
@@ -819,11 +829,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
err_cleanup_nand:
nand_cleanup(&info->chip);
-err:
- spin_lock_irq(&davinci_nand_lock);
- if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
- ecc4_busy = false;
- spin_unlock_irq(&davinci_nand_lock);
return ret;
}
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 2a302a1d1430..ca18612c4201 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -51,14 +51,6 @@ MODULE_LICENSE("GPL");
#define DENALI_INVALID_BANK -1
#define DENALI_NR_BANKS 4
-/*
- * The bus interface clock, clk_x, is phase aligned with the core clock. The
- * clk_x is an integral multiple N of the core clk. The value N is configured
- * at IP delivery time, and its available value is 4, 5, or 6. We need to align
- * to the largest value to make it work with any possible configuration.
- */
-#define DENALI_CLK_X_MULT 6
-
static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
@@ -954,7 +946,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
const struct nand_sdr_timings *timings;
- unsigned long t_clk;
+ unsigned long t_x, mult_x;
int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
int addr_2_data_mask;
@@ -965,15 +957,24 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
return PTR_ERR(timings);
/* clk_x period in picoseconds */
- t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
- if (!t_clk)
+ t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
+ if (!t_x)
+ return -EINVAL;
+
+ /*
+ * The bus interface clock, clk_x, is phase aligned with the core clock.
+ * The clk_x is an integral multiple N of the core clk. The value N is
+ * configured at IP delivery time, and its available value is 4, 5, 6.
+ */
+ mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
+ if (mult_x < 4 || mult_x > 6)
return -EINVAL;
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
/* tREA -> ACC_CLKS */
- acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
+ acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
tmp = ioread32(denali->reg + ACC_CLKS);
@@ -982,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
iowrite32(tmp, denali->reg + ACC_CLKS);
/* tRWH -> RE_2_WE */
- re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
+ re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
tmp = ioread32(denali->reg + RE_2_WE);
@@ -991,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
iowrite32(tmp, denali->reg + RE_2_WE);
/* tRHZ -> RE_2_RE */
- re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
+ re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
tmp = ioread32(denali->reg + RE_2_RE);
@@ -1005,8 +1006,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
* With WE_2_RE properly set, the Denali controller automatically takes
* care of the delay; the driver need not set NAND_WAIT_TCCS.
*/
- we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
- t_clk);
+ we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
@@ -1021,7 +1021,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
if (denali->revision < 0x0501)
addr_2_data_mask >>= 1;
- addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
+ addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
@@ -1031,7 +1031,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
/* tREH, tWH -> RDWR_EN_HI_CNT */
rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
- t_clk);
+ t_x);
rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
@@ -1040,11 +1040,10 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
/* tRP, tWP -> RDWR_EN_LO_CNT */
- rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
- t_clk);
+ rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
- t_clk);
- rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
+ t_x);
+ rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
@@ -1054,8 +1053,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
/* tCS, tCEA -> CS_SETUP_CNT */
- cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
- (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
+ cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
+ (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
0);
cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
@@ -1120,33 +1119,6 @@ int denali_calc_ecc_bytes(int step_size, int strength)
}
EXPORT_SYMBOL(denali_calc_ecc_bytes);
-static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
- struct denali_nand_info *denali)
-{
- int oobavail = mtd->oobsize - denali->oob_skip_bytes;
- int ret;
-
- /*
- * If .size and .strength are already set (usually by DT),
- * check if they are supported by this controller.
- */
- if (chip->ecc.size && chip->ecc.strength)
- return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
-
- /*
- * We want .size and .strength closest to the chip's requirement
- * unless NAND_ECC_MAXIMIZE is requested.
- */
- if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
- ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
- if (!ret)
- return 0;
- }
-
- /* Max ECC strength is the last thing we can do */
- return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
-}
-
static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
@@ -1233,62 +1205,12 @@ static int denali_multidev_fixup(struct denali_nand_info *denali)
return 0;
}
-int denali_init(struct denali_nand_info *denali)
+static int denali_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = &denali->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
- u32 features = ioread32(denali->reg + FEATURES);
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
int ret;
- mtd->dev.parent = denali->dev;
- denali_hw_init(denali);
-
- init_completion(&denali->complete);
- spin_lock_init(&denali->irq_lock);
-
- denali_clear_irq_all(denali);
-
- ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
- IRQF_SHARED, DENALI_NAND_NAME, denali);
- if (ret) {
- dev_err(denali->dev, "Unable to request IRQ\n");
- return ret;
- }
-
- denali_enable_irq(denali);
- denali_reset_banks(denali);
-
- denali->active_bank = DENALI_INVALID_BANK;
-
- nand_set_flash_node(chip, denali->dev->of_node);
- /* Fallback to the default name if DT did not give "label" property */
- if (!mtd->name)
- mtd->name = "denali-nand";
-
- chip->select_chip = denali_select_chip;
- chip->read_byte = denali_read_byte;
- chip->write_byte = denali_write_byte;
- chip->read_word = denali_read_word;
- chip->cmd_ctrl = denali_cmd_ctrl;
- chip->dev_ready = denali_dev_ready;
- chip->waitfunc = denali_waitfunc;
-
- if (features & FEATURES__INDEX_ADDR) {
- denali->host_read = denali_indexed_read;
- denali->host_write = denali_indexed_write;
- } else {
- denali->host_read = denali_direct_read;
- denali->host_write = denali_direct_write;
- }
-
- /* clk rate info is needed for setup_data_interface */
- if (denali->clk_x_rate)
- chip->setup_data_interface = denali_setup_data_interface;
-
- ret = nand_scan_ident(mtd, denali->max_banks, NULL);
- if (ret)
- goto disable_irq;
-
if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
denali->dma_avail = 1;
@@ -1317,10 +1239,11 @@ int denali_init(struct denali_nand_info *denali)
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
chip->options |= NAND_NO_SUBPAGE_WRITE;
- ret = denali_ecc_setup(mtd, chip, denali);
+ ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
+ mtd->oobsize - denali->oob_skip_bytes);
if (ret) {
dev_err(denali->dev, "Failed to setup ECC settings.\n");
- goto disable_irq;
+ return ret;
}
dev_dbg(denali->dev,
@@ -1364,7 +1287,7 @@ int denali_init(struct denali_nand_info *denali)
ret = denali_multidev_fixup(denali);
if (ret)
- goto disable_irq;
+ return ret;
/*
* This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
@@ -1372,26 +1295,92 @@ int denali_init(struct denali_nand_info *denali)
* guarantee DMA-safe alignment.
*/
denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
- if (!denali->buf) {
- ret = -ENOMEM;
- goto disable_irq;
+ if (!denali->buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void denali_detach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ kfree(denali->buf);
+}
+
+static const struct nand_controller_ops denali_controller_ops = {
+ .attach_chip = denali_attach_chip,
+ .detach_chip = denali_detach_chip,
+};
+
+int denali_init(struct denali_nand_info *denali)
+{
+ struct nand_chip *chip = &denali->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 features = ioread32(denali->reg + FEATURES);
+ int ret;
+
+ mtd->dev.parent = denali->dev;
+ denali_hw_init(denali);
+
+ init_completion(&denali->complete);
+ spin_lock_init(&denali->irq_lock);
+
+ denali_clear_irq_all(denali);
+
+ ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
+ IRQF_SHARED, DENALI_NAND_NAME, denali);
+ if (ret) {
+ dev_err(denali->dev, "Unable to request IRQ\n");
+ return ret;
}
- ret = nand_scan_tail(mtd);
+ denali_enable_irq(denali);
+ denali_reset_banks(denali);
+
+ denali->active_bank = DENALI_INVALID_BANK;
+
+ nand_set_flash_node(chip, denali->dev->of_node);
+ /* Fallback to the default name if DT did not give "label" property */
+ if (!mtd->name)
+ mtd->name = "denali-nand";
+
+ chip->select_chip = denali_select_chip;
+ chip->read_byte = denali_read_byte;
+ chip->write_byte = denali_write_byte;
+ chip->read_word = denali_read_word;
+ chip->cmd_ctrl = denali_cmd_ctrl;
+ chip->dev_ready = denali_dev_ready;
+ chip->waitfunc = denali_waitfunc;
+
+ if (features & FEATURES__INDEX_ADDR) {
+ denali->host_read = denali_indexed_read;
+ denali->host_write = denali_indexed_write;
+ } else {
+ denali->host_read = denali_direct_read;
+ denali->host_write = denali_direct_write;
+ }
+
+ /* clk rate info is needed for setup_data_interface */
+ if (denali->clk_rate && denali->clk_x_rate)
+ chip->setup_data_interface = denali_setup_data_interface;
+
+ chip->dummy_controller.ops = &denali_controller_ops;
+ ret = nand_scan(mtd, denali->max_banks);
if (ret)
- goto free_buf;
+ goto disable_irq;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
goto cleanup_nand;
}
+
return 0;
cleanup_nand:
nand_cleanup(chip);
-free_buf:
- kfree(denali->buf);
disable_irq:
denali_disable_irq(denali);
@@ -1404,7 +1393,6 @@ void denali_remove(struct denali_nand_info *denali)
struct mtd_info *mtd = nand_to_mtd(&denali->nand);
nand_release(mtd);
- kfree(denali->buf);
denali_disable_irq(denali);
}
EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index 9ad33d237378..1f8feaf924eb 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -300,6 +300,7 @@
struct denali_nand_info {
struct nand_chip nand;
+ unsigned long clk_rate; /* core clock rate */
unsigned long clk_x_rate; /* bus interface clock rate */
int active_bank; /* currently selected bank */
struct device *dev;
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index cfd33e6ca77f..0faaad032e5f 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -27,7 +27,9 @@
struct denali_dt {
struct denali_nand_info denali;
- struct clk *clk;
+ struct clk *clk; /* core clock */
+ struct clk *clk_x; /* bus interface clock */
+ struct clk *clk_ecc; /* ECC circuit clock */
};
struct denali_dt_data {
@@ -79,59 +81,99 @@ MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
static int denali_dt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct resource *res;
struct denali_dt *dt;
const struct denali_dt_data *data;
struct denali_nand_info *denali;
int ret;
- dt = devm_kzalloc(&pdev->dev, sizeof(*dt), GFP_KERNEL);
+ dt = devm_kzalloc(dev, sizeof(*dt), GFP_KERNEL);
if (!dt)
return -ENOMEM;
denali = &dt->denali;
- data = of_device_get_match_data(&pdev->dev);
+ data = of_device_get_match_data(dev);
if (data) {
denali->revision = data->revision;
denali->caps = data->caps;
denali->ecc_caps = data->ecc_caps;
}
- denali->dev = &pdev->dev;
+ denali->dev = dev;
denali->irq = platform_get_irq(pdev, 0);
if (denali->irq < 0) {
- dev_err(&pdev->dev, "no irq defined\n");
+ dev_err(dev, "no irq defined\n");
return denali->irq;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "denali_reg");
- denali->reg = devm_ioremap_resource(&pdev->dev, res);
+ denali->reg = devm_ioremap_resource(dev, res);
if (IS_ERR(denali->reg))
return PTR_ERR(denali->reg);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
- denali->host = devm_ioremap_resource(&pdev->dev, res);
+ denali->host = devm_ioremap_resource(dev, res);
if (IS_ERR(denali->host))
return PTR_ERR(denali->host);
- dt->clk = devm_clk_get(&pdev->dev, NULL);
+ /*
+ * A single anonymous clock is supported for the backward compatibility.
+ * New platforms should support all the named clocks.
+ */
+ dt->clk = devm_clk_get(dev, "nand");
+ if (IS_ERR(dt->clk))
+ dt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(dt->clk)) {
- dev_err(&pdev->dev, "no clk available\n");
+ dev_err(dev, "no clk available\n");
return PTR_ERR(dt->clk);
}
+
+ dt->clk_x = devm_clk_get(dev, "nand_x");
+ if (IS_ERR(dt->clk_x))
+ dt->clk_x = NULL;
+
+ dt->clk_ecc = devm_clk_get(dev, "ecc");
+ if (IS_ERR(dt->clk_ecc))
+ dt->clk_ecc = NULL;
+
ret = clk_prepare_enable(dt->clk);
if (ret)
return ret;
- denali->clk_x_rate = clk_get_rate(dt->clk);
+ ret = clk_prepare_enable(dt->clk_x);
+ if (ret)
+ goto out_disable_clk;
+
+ ret = clk_prepare_enable(dt->clk_ecc);
+ if (ret)
+ goto out_disable_clk_x;
+
+ if (dt->clk_x) {
+ denali->clk_rate = clk_get_rate(dt->clk);
+ denali->clk_x_rate = clk_get_rate(dt->clk_x);
+ } else {
+ /*
+ * Hardcode the clock rates for the backward compatibility.
+ * This works for both SOCFPGA and UniPhier.
+ */
+ dev_notice(dev,
+ "necessary clock is missing. default clock rates are used.\n");
+ denali->clk_rate = 50000000;
+ denali->clk_x_rate = 200000000;
+ }
ret = denali_init(denali);
if (ret)
- goto out_disable_clk;
+ goto out_disable_clk_ecc;
platform_set_drvdata(pdev, dt);
return 0;
+out_disable_clk_ecc:
+ clk_disable_unprepare(dt->clk_ecc);
+out_disable_clk_x:
+ clk_disable_unprepare(dt->clk_x);
out_disable_clk:
clk_disable_unprepare(dt->clk);
@@ -143,6 +185,8 @@ static int denali_dt_remove(struct platform_device *pdev)
struct denali_dt *dt = platform_get_drvdata(pdev);
denali_remove(&dt->denali);
+ clk_disable_unprepare(dt->clk_ecc);
+ clk_disable_unprepare(dt->clk_x);
clk_disable_unprepare(dt->clk);
return 0;
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index 49cb3e1f8bd0..7c8efc4c7bdf 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -73,6 +73,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->irq = dev->irq;
denali->ecc_caps = &denali_pci_ecc_caps;
denali->nand.ecc.options |= NAND_ECC_MAXIMIZE;
+ denali->clk_rate = 50000000; /* 50 MHz */
denali->clk_x_rate = 200000000; /* 200 MHz */
ret = pci_request_regions(dev, DENALI_NAND_NAME);
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 8d10061abb4b..3c46188dd6d2 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1291,7 +1291,7 @@ static int __init nftl_scan_bbt(struct mtd_info *mtd)
this->bbt_md = NULL;
}
- ret = this->scan_bbt(mtd);
+ ret = nand_create_bbt(this);
if (ret)
return ret;
@@ -1338,7 +1338,7 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
this->bbt_md->pattern = "TBB_SYSM";
}
- ret = this->scan_bbt(mtd);
+ ret = nand_create_bbt(this);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c
index 1314aa99b9ab..a3f04315c05c 100644
--- a/drivers/mtd/nand/raw/docg4.c
+++ b/drivers/mtd/nand/raw/docg4.c
@@ -1227,10 +1227,9 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
* required within a nand driver because they are performed by the nand
* infrastructure code as part of nand_scan(). In this case they need
* to be initialized here because we skip call to nand_scan_ident() (the
- * first half of nand_scan()). The call to nand_scan_ident() is skipped
- * because for this device the chip id is not read in the manner of a
- * standard nand device. Unfortunately, nand_scan_ident() does other
- * things as well, such as call nand_set_defaults().
+ * first half of nand_scan()). The call to nand_scan_ident() could be
+ * skipped because for this device the chip id is not read in the manner
+ * of a standard nand device.
*/
struct nand_chip *nand = mtd_to_nand(mtd);
@@ -1257,8 +1256,8 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
nand->ecc.strength = DOCG4_T;
nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
- nand->controller = &nand->hwcontrol;
- nand_hw_control_init(nand->controller);
+ nand->controller = &nand->dummy_controller;
+ nand_controller_init(nand->controller);
/* methods */
nand->cmdfunc = docg4_command;
@@ -1315,6 +1314,40 @@ static int __init read_id_reg(struct mtd_info *mtd)
static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+static int docg4_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct docg4_priv *doc = (struct docg4_priv *)(chip + 1);
+ int ret;
+
+ init_mtd_structs(mtd);
+
+ /* Initialize kernel BCH algorithm */
+ doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
+ if (!doc->bch)
+ return -EINVAL;
+
+ reset(mtd);
+
+ ret = read_id_reg(mtd);
+ if (ret)
+ free_bch(doc->bch);
+
+ return ret;
+}
+
+static void docg4_detach_chip(struct nand_chip *chip)
+{
+ struct docg4_priv *doc = (struct docg4_priv *)(chip + 1);
+
+ free_bch(doc->bch);
+}
+
+static const struct nand_controller_ops docg4_controller_ops = {
+ .attach_chip = docg4_attach_chip,
+ .detach_chip = docg4_detach_chip,
+};
+
static int __init probe_docg4(struct platform_device *pdev)
{
struct mtd_info *mtd;
@@ -1341,7 +1374,7 @@ static int __init probe_docg4(struct platform_device *pdev)
nand = kzalloc(len, GFP_KERNEL);
if (nand == NULL) {
retval = -ENOMEM;
- goto fail_unmap;
+ goto unmap;
}
mtd = nand_to_mtd(nand);
@@ -1350,46 +1383,35 @@ static int __init probe_docg4(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
doc->virtadr = virtadr;
doc->dev = dev;
-
- init_mtd_structs(mtd);
-
- /* initialize kernel bch algorithm */
- doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
- if (doc->bch == NULL) {
- retval = -EINVAL;
- goto fail;
- }
-
platform_set_drvdata(pdev, doc);
- reset(mtd);
- retval = read_id_reg(mtd);
- if (retval == -ENODEV) {
- dev_warn(dev, "No diskonchip G4 device found.\n");
- goto fail;
- }
-
- retval = nand_scan_tail(mtd);
+ /*
+ * Running nand_scan() with maxchips == 0 will skip nand_scan_ident(),
+ * which is a specific operation with this driver and done in the
+ * ->attach_chip callback.
+ */
+ nand->dummy_controller.ops = &docg4_controller_ops;
+ retval = nand_scan(mtd, 0);
if (retval)
- goto fail;
+ goto free_nand;
retval = read_factory_bbt(mtd);
if (retval)
- goto fail;
+ goto cleanup_nand;
retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
if (retval)
- goto fail;
+ goto cleanup_nand;
doc->mtd = mtd;
+
return 0;
-fail:
- nand_release(mtd); /* deletes partitions and mtd devices */
- free_bch(doc->bch);
+cleanup_nand:
+ nand_cleanup(nand);
+free_nand:
kfree(nand);
-
-fail_unmap:
+unmap:
iounmap(virtadr);
return retval;
@@ -1399,7 +1421,6 @@ static int __exit cleanup_docg4(struct platform_device *pdev)
{
struct docg4_priv *doc = platform_get_drvdata(pdev);
nand_release(doc->mtd);
- free_bch(doc->bch);
kfree(mtd_to_nand(doc->mtd));
iounmap(doc->virtadr);
return 0;
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 51f0b340bc0d..55f449b711fd 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -61,7 +61,7 @@ struct fsl_elbc_mtd {
/* Freescale eLBC FCM controller information */
struct fsl_elbc_fcm_ctrl {
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct fsl_elbc_mtd *chips[MAX_BANKS];
u8 __iomem *addr; /* Address of assigned FCM buffer */
@@ -637,9 +637,9 @@ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
}
-static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
+static int fsl_elbc_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
@@ -700,12 +700,16 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
dev_err(priv->dev,
"fsl_elbc_init: page size %d is not supported\n",
mtd->writesize);
- return -1;
+ return -ENOTSUPP;
}
return 0;
}
+static const struct nand_controller_ops fsl_elbc_controller_ops = {
+ .attach_chip = fsl_elbc_attach_chip,
+};
+
static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
@@ -879,7 +883,7 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
}
elbc_fcm_ctrl->counter++;
- nand_hw_control_init(&elbc_fcm_ctrl->controller);
+ nand_controller_init(&elbc_fcm_ctrl->controller);
fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
} else {
elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
@@ -910,15 +914,8 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
if (ret)
goto err;
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret)
- goto err;
-
- ret = fsl_elbc_chip_init_tail(mtd);
- if (ret)
- goto err;
-
- ret = nand_scan_tail(mtd);
+ priv->chip.controller->ops = &fsl_elbc_controller_ops;
+ ret = nand_scan(mtd, 1);
if (ret)
goto err;
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 382b67e97174..24f59d0066af 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -51,7 +51,7 @@ struct fsl_ifc_mtd {
/* overview of the fsl ifc controller */
struct fsl_ifc_nand_ctrl {
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
void __iomem *addr; /* Address of assigned IFC buffer */
@@ -225,7 +225,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int bufnum = nctrl->page & priv->bufnum_mask;
int sector_start = bufnum * chip->ecc.steps;
int sector_end = sector_start + chip->ecc.steps - 1;
- __be32 *eccstat_regs;
+ __be32 __iomem *eccstat_regs;
eccstat_regs = ifc->ifc_nand.nand_eccstat;
eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
@@ -714,9 +714,9 @@ static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
return nand_prog_page_end_op(chip);
}
-static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
+static int fsl_ifc_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
@@ -757,6 +757,10 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
return 0;
}
+static const struct nand_controller_ops fsl_ifc_controller_ops = {
+ .attach_chip = fsl_ifc_attach_chip,
+};
+
static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
{
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
@@ -1004,7 +1008,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
ifc_nand_ctrl->addr = NULL;
fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
- nand_hw_control_init(&ifc_nand_ctrl->controller);
+ nand_controller_init(&ifc_nand_ctrl->controller);
} else {
ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
}
@@ -1046,15 +1050,8 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
if (ret)
goto err;
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret)
- goto err;
-
- ret = fsl_ifc_chip_init_tail(mtd);
- if (ret)
- goto err;
-
- ret = nand_scan_tail(mtd);
+ priv->chip.controller->ops = &fsl_ifc_controller_ops;
+ ret = nand_scan(mtd, 1);
if (ret)
goto err;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index f4a5a317d4ae..f418236fa020 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -62,7 +62,7 @@
reg)
/* fsmc controller registers for NAND flash */
-#define PC 0x00
+#define FSMC_PC 0x00
/* pc register definitions */
#define FSMC_RESET (1 << 0)
#define FSMC_WAITON (1 << 1)
@@ -273,12 +273,13 @@ static void fsmc_nand_setup(struct fsmc_nand_data *host,
tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
if (host->nand.options & NAND_BUSWIDTH_16)
- writel_relaxed(value | FSMC_DEVWID_16, host->regs_va + PC);
+ writel_relaxed(value | FSMC_DEVWID_16,
+ host->regs_va + FSMC_PC);
else
- writel_relaxed(value | FSMC_DEVWID_8, host->regs_va + PC);
+ writel_relaxed(value | FSMC_DEVWID_8, host->regs_va + FSMC_PC);
- writel_relaxed(readl(host->regs_va + PC) | tclr | tar,
- host->regs_va + PC);
+ writel_relaxed(readl(host->regs_va + FSMC_PC) | tclr | tar,
+ host->regs_va + FSMC_PC);
writel_relaxed(thiz | thold | twait | tset, host->regs_va + COMM);
writel_relaxed(thiz | thold | twait | tset, host->regs_va + ATTRIB);
}
@@ -371,12 +372,12 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
{
struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
- writel_relaxed(readl(host->regs_va + PC) & ~FSMC_ECCPLEN_256,
- host->regs_va + PC);
- writel_relaxed(readl(host->regs_va + PC) & ~FSMC_ECCEN,
- host->regs_va + PC);
- writel_relaxed(readl(host->regs_va + PC) | FSMC_ECCEN,
- host->regs_va + PC);
+ writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCPLEN_256,
+ host->regs_va + FSMC_PC);
+ writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCEN,
+ host->regs_va + FSMC_PC);
+ writel_relaxed(readl(host->regs_va + FSMC_PC) | FSMC_ECCEN,
+ host->regs_va + FSMC_PC);
}
/*
@@ -546,7 +547,7 @@ static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
int i;
- if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ if (IS_ALIGNED((uintptr_t)buf, sizeof(uint32_t)) &&
IS_ALIGNED(len, sizeof(uint32_t))) {
uint32_t *p = (uint32_t *)buf;
len = len >> 2;
@@ -569,7 +570,7 @@ static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
int i;
- if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ if (IS_ALIGNED((uintptr_t)buf, sizeof(uint32_t)) &&
IS_ALIGNED(len, sizeof(uint32_t))) {
uint32_t *p = (uint32_t *)buf;
len = len >> 2;
@@ -618,11 +619,11 @@ static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
if (chipnr > 0)
return;
- pc = readl(host->regs_va + PC);
+ pc = readl(host->regs_va + FSMC_PC);
if (chipnr < 0)
- writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + PC);
+ writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC);
else
- writel_relaxed(pc | FSMC_ENABLE, host->regs_va + PC);
+ writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC);
/* nCE line must be asserted before starting any operation */
mb();
@@ -740,7 +741,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
nand_read_page_op(chip, page, s * eccsize, NULL, 0);
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+ nand_read_data_op(chip, p, eccsize, false);
for (j = 0; j < eccbytes;) {
struct mtd_oob_region oobregion;
@@ -918,6 +919,82 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
return 0;
}
+static int fsmc_nand_attach_chip(struct nand_chip *nand)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+
+ if (AMBA_REV_BITS(host->pid) >= 8) {
+ switch (mtd->oobsize) {
+ case 16:
+ case 64:
+ case 128:
+ case 224:
+ case 256:
+ break;
+ default:
+ dev_warn(host->dev,
+ "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
+
+ return 0;
+ }
+
+ switch (nand->ecc.mode) {
+ case NAND_ECC_HW:
+ dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
+ nand->ecc.calculate = fsmc_read_hwecc_ecc1;
+ nand->ecc.correct = nand_correct_data;
+ nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
+ break;
+
+ case NAND_ECC_SOFT:
+ if (nand->ecc.algo == NAND_ECC_BCH) {
+ dev_info(host->dev,
+ "Using 4-bit SW BCH ECC scheme\n");
+ break;
+ }
+
+ case NAND_ECC_ON_DIE:
+ break;
+
+ default:
+ dev_err(host->dev, "Unsupported ECC mode!\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * Don't set layout for BCH4 SW ECC. This will be
+ * generated later in nand_bch_init() later.
+ */
+ if (nand->ecc.mode == NAND_ECC_HW) {
+ switch (mtd->oobsize) {
+ case 16:
+ case 64:
+ case 128:
+ mtd_set_ooblayout(mtd,
+ &fsmc_ecc1_ooblayout_ops);
+ break;
+ default:
+ dev_warn(host->dev,
+ "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops fsmc_nand_controller_ops = {
+ .attach_chip = fsmc_nand_attach_chip,
+};
+
/*
* fsmc_nand_probe - Probe function
* @pdev: platform device structure
@@ -1047,76 +1124,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
/*
* Scan to find existence of the device
*/
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret) {
- dev_err(&pdev->dev, "No NAND Device found!\n");
- goto release_dma_write_chan;
- }
-
- if (AMBA_REV_BITS(host->pid) >= 8) {
- switch (mtd->oobsize) {
- case 16:
- case 64:
- case 128:
- case 224:
- case 256:
- break;
- default:
- dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
- mtd->oobsize);
- ret = -EINVAL;
- goto release_dma_write_chan;
- }
-
- mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
- } else {
- switch (nand->ecc.mode) {
- case NAND_ECC_HW:
- dev_info(&pdev->dev, "Using 1-bit HW ECC scheme\n");
- nand->ecc.calculate = fsmc_read_hwecc_ecc1;
- nand->ecc.correct = nand_correct_data;
- nand->ecc.bytes = 3;
- nand->ecc.strength = 1;
- break;
-
- case NAND_ECC_SOFT:
- if (nand->ecc.algo == NAND_ECC_BCH) {
- dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n");
- break;
- }
-
- case NAND_ECC_ON_DIE:
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported ECC mode!\n");
- goto release_dma_write_chan;
- }
-
- /*
- * Don't set layout for BCH4 SW ECC. This will be
- * generated later in nand_bch_init() later.
- */
- if (nand->ecc.mode == NAND_ECC_HW) {
- switch (mtd->oobsize) {
- case 16:
- case 64:
- case 128:
- mtd_set_ooblayout(mtd,
- &fsmc_ecc1_ooblayout_ops);
- break;
- default:
- dev_warn(&pdev->dev,
- "No oob scheme defined for oobsize %d\n",
- mtd->oobsize);
- ret = -EINVAL;
- goto release_dma_write_chan;
- }
- }
- }
-
- /* Second stage of scan to fill MTD data-structures */
- ret = nand_scan_tail(mtd);
+ nand->dummy_controller.ops = &fsmc_nand_controller_ops;
+ ret = nand_scan(mtd, 1);
if (ret)
goto release_dma_write_chan;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
index 83697b8df871..88ea2203e263 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale GPMI NAND Flash Driver
*
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
* Copyright (C) 2008 Embedded Alley Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/delay.h>
#include <linux/clk.h>
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index f6aa358a3452..1c1ebbc82824 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale GPMI NAND Flash Driver
*
* Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
* Copyright (C) 2008 Embedded Alley Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/clk.h>
#include <linux/slab.h>
@@ -757,9 +744,9 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
* [2] Allocate a read/write data buffer.
* The gpmi_alloc_dma_buffer can be called twice.
* We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
- * is called before the nand_scan_ident; and we allocate a buffer
- * of the real NAND page size when the gpmi_alloc_dma_buffer is
- * called after the nand_scan_ident.
+ * is called before the NAND identification; and we allocate a
+ * buffer of the real NAND page size when the gpmi_alloc_dma_buffer
+ * is called after.
*/
this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
GFP_DMA | GFP_KERNEL);
@@ -957,7 +944,6 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
struct mtd_info *mtd = nand_to_mtd(chip);
- void *payload_virt;
dma_addr_t payload_phys;
unsigned int i;
unsigned char *status;
@@ -967,7 +953,6 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
dev_dbg(this->dev, "page number is : %d\n", page);
- payload_virt = this->payload_virt;
payload_phys = this->payload_phys;
if (virt_addr_valid(buf)) {
@@ -976,7 +961,6 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
dest_phys = dma_map_single(this->dev, buf, nfc_geo->payload_size,
DMA_FROM_DEVICE);
if (!dma_mapping_error(this->dev, dest_phys)) {
- payload_virt = buf;
payload_phys = dest_phys;
direct = true;
}
@@ -1881,6 +1865,34 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
return 0;
}
+static int gpmi_nand_attach_chip(struct nand_chip *chip)
+{
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ int ret;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (of_property_read_bool(this->dev->of_node,
+ "fsl,no-blockmark-swap"))
+ this->swap_block_mark = false;
+ }
+ dev_dbg(this->dev, "Blockmark swapping %sabled\n",
+ this->swap_block_mark ? "en" : "dis");
+
+ ret = gpmi_init_last(this);
+ if (ret)
+ return ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ return 0;
+}
+
+static const struct nand_controller_ops gpmi_nand_controller_ops = {
+ .attach_chip = gpmi_nand_attach_chip,
+};
+
static int gpmi_nand_init(struct gpmi_nand_data *this)
{
struct nand_chip *chip = &this->nand;
@@ -1921,33 +1933,15 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
- ret = nand_scan_ident(mtd, GPMI_IS_MX6(this) ? 2 : 1, NULL);
- if (ret)
- goto err_out;
-
- if (chip->bbt_options & NAND_BBT_USE_FLASH) {
- chip->bbt_options |= NAND_BBT_NO_OOB;
-
- if (of_property_read_bool(this->dev->of_node,
- "fsl,no-blockmark-swap"))
- this->swap_block_mark = false;
- }
- dev_dbg(this->dev, "Blockmark swapping %sabled\n",
- this->swap_block_mark ? "en" : "dis");
-
- ret = gpmi_init_last(this);
- if (ret)
- goto err_out;
-
- chip->options |= NAND_SKIP_BBTSCAN;
- ret = nand_scan_tail(mtd);
+ chip->dummy_controller.ops = &gpmi_nand_controller_ops;
+ ret = nand_scan(mtd, GPMI_IS_MX6(this) ? 2 : 1);
if (ret)
goto err_out;
ret = nand_boot_init(this);
if (ret)
goto err_nand_cleanup;
- ret = chip->scan_bbt(mtd);
+ ret = nand_create_bbt(chip);
if (ret)
goto err_nand_cleanup;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
index 6aa10d6962d6..69cd0cbde4f2 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Freescale GPMI NAND Flash Driver
*
* Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
* Copyright (C) 2008 Embedded Alley Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
#define __DRIVERS_MTD_NAND_GPMI_NAND_H
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index a1e009c8e556..950dc7789296 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -709,9 +709,50 @@ static int hisi_nfc_ecc_probe(struct hinfc_host *host)
return 0;
}
+static int hisi_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct hinfc_host *host = nand_get_controller_data(chip);
+ int flag;
+
+ host->buffer = dmam_alloc_coherent(host->dev,
+ mtd->writesize + mtd->oobsize,
+ &host->dma_buffer, GFP_KERNEL);
+ if (!host->buffer)
+ return -ENOMEM;
+
+ host->dma_oob = host->dma_buffer + mtd->writesize;
+ memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
+
+ flag = hinfc_read(host, HINFC504_CON);
+ flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
+ switch (mtd->writesize) {
+ case 2048:
+ flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT);
+ break;
+ /*
+ * TODO: add more pagesize support,
+ * default pagesize has been set in hisi_nfc_host_init
+ */
+ default:
+ dev_err(host->dev, "NON-2KB page size nand flash\n");
+ return -EINVAL;
+ }
+ hinfc_write(host, flag, HINFC504_CON);
+
+ if (chip->ecc.mode == NAND_ECC_HW)
+ hisi_nfc_ecc_probe(host);
+
+ return 0;
+}
+
+static const struct nand_controller_ops hisi_nfc_controller_ops = {
+ .attach_chip = hisi_nfc_attach_chip,
+};
+
static int hisi_nfc_probe(struct platform_device *pdev)
{
- int ret = 0, irq, flag, max_chips = HINFC504_MAX_CHIP;
+ int ret = 0, irq, max_chips = HINFC504_MAX_CHIP;
struct device *dev = &pdev->dev;
struct hinfc_host *host;
struct nand_chip *chip;
@@ -769,42 +810,11 @@ static int hisi_nfc_probe(struct platform_device *pdev)
return ret;
}
- ret = nand_scan_ident(mtd, max_chips, NULL);
+ chip->dummy_controller.ops = &hisi_nfc_controller_ops;
+ ret = nand_scan(mtd, max_chips);
if (ret)
return ret;
- host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
- &host->dma_buffer, GFP_KERNEL);
- if (!host->buffer)
- return -ENOMEM;
-
- host->dma_oob = host->dma_buffer + mtd->writesize;
- memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
-
- flag = hinfc_read(host, HINFC504_CON);
- flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
- switch (mtd->writesize) {
- case 2048:
- flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT); break;
- /*
- * TODO: add more pagesize support,
- * default pagesize has been set in hisi_nfc_host_init
- */
- default:
- dev_err(dev, "NON-2KB page size nand flash\n");
- return -EINVAL;
- }
- hinfc_write(host, flag, HINFC504_CON);
-
- if (chip->ecc.mode == NAND_ECC_HW)
- hisi_nfc_ecc_probe(host);
-
- ret = nand_scan_tail(mtd);
- if (ret) {
- dev_err(dev, "nand_scan_tail failed: %d\n", ret);
- return ret;
- }
-
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "Err MTD partition=%d\n", ret);
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c
index 613b00a9604b..a7515452bc59 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/jz4740_nand.c
@@ -13,6 +13,7 @@
*
*/
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,9 +24,9 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
-#include <asm/mach-jz4740/jz4740_nand.h>
+#include <linux/platform_data/jz4740/jz4740_nand.h>
#define JZ_REG_NAND_CTRL 0x50
#define JZ_REG_NAND_ECC_CTRL 0x100
@@ -330,7 +331,7 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
if (chipnr == 0) {
/* Detect first chip. */
- ret = nand_scan_ident(mtd, 1, NULL);
+ ret = nand_scan(mtd, 1);
if (ret)
goto notfound_id;
@@ -355,7 +356,7 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
mtd->size += chip->chipsize;
}
- dev_info(&pdev->dev, "Found chip %i on bank %i\n", chipnr, bank);
+ dev_info(&pdev->dev, "Found chip %zu on bank %i\n", chipnr, bank);
return 0;
notfound_id:
@@ -367,6 +368,24 @@ notfound_id:
return ret;
}
+static int jz_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ struct jz_nand_platform_data *pdata = dev_get_platdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (pdata && pdata->ident_callback)
+ pdata->ident_callback(pdev, mtd, &pdata->partitions,
+ &pdata->num_partitions);
+
+ return 0;
+}
+
+static const struct nand_controller_ops jz_nand_controller_ops = {
+ .attach_chip = jz_nand_attach_chip,
+};
+
static int jz_nand_probe(struct platform_device *pdev)
{
int ret;
@@ -410,6 +429,7 @@ static int jz_nand_probe(struct platform_device *pdev)
chip->chip_delay = 50;
chip->cmd_ctrl = jz_nand_cmd_ctrl;
chip->select_chip = jz_nand_select_chip;
+ chip->dummy_controller.ops = &jz_nand_controller_ops;
if (nand->busy_gpio)
chip->dev_ready = jz_nand_dev_ready;
@@ -455,33 +475,20 @@ static int jz_nand_probe(struct platform_device *pdev)
goto err_iounmap_mmio;
}
- if (pdata && pdata->ident_callback) {
- pdata->ident_callback(pdev, mtd, &pdata->partitions,
- &pdata->num_partitions);
- }
-
- ret = nand_scan_tail(mtd);
- if (ret) {
- dev_err(&pdev->dev, "Failed to scan NAND\n");
- goto err_unclaim_banks;
- }
-
- ret = mtd_device_parse_register(mtd, NULL, NULL,
- pdata ? pdata->partitions : NULL,
- pdata ? pdata->num_partitions : 0);
+ ret = mtd_device_register(mtd, pdata ? pdata->partitions : NULL,
+ pdata ? pdata->num_partitions : 0);
if (ret) {
dev_err(&pdev->dev, "Failed to add mtd device\n");
- goto err_nand_release;
+ goto err_cleanup_nand;
}
dev_info(&pdev->dev, "Successfully registered JZ4740 NAND driver\n");
return 0;
-err_nand_release:
- nand_release(mtd);
-err_unclaim_banks:
+err_cleanup_nand:
+ nand_cleanup(chip);
while (chipnr--) {
unsigned char bank = nand->banks[chipnr];
jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
diff --git a/drivers/mtd/nand/raw/jz4780_nand.c b/drivers/mtd/nand/raw/jz4780_nand.c
index e69f6ae4c539..db4fa60bd52a 100644
--- a/drivers/mtd/nand/raw/jz4780_nand.c
+++ b/drivers/mtd/nand/raw/jz4780_nand.c
@@ -44,7 +44,7 @@ struct jz4780_nand_cs {
struct jz4780_nand_controller {
struct device *dev;
struct jz4780_bch *bch;
- struct nand_hw_control controller;
+ struct nand_controller controller;
unsigned int num_banks;
struct list_head chips;
int selected;
@@ -65,7 +65,8 @@ static inline struct jz4780_nand_chip *to_jz4780_nand_chip(struct mtd_info *mtd)
return container_of(mtd_to_nand(mtd), struct jz4780_nand_chip, chip);
}
-static inline struct jz4780_nand_controller *to_jz4780_nand_controller(struct nand_hw_control *ctrl)
+static inline struct jz4780_nand_controller
+*to_jz4780_nand_controller(struct nand_controller *ctrl)
{
return container_of(ctrl, struct jz4780_nand_controller, controller);
}
@@ -157,9 +158,8 @@ static int jz4780_nand_ecc_correct(struct mtd_info *mtd, u8 *dat,
return jz4780_bch_correct(nfc->bch, &params, dat, read_ecc);
}
-static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *dev)
+static int jz4780_nand_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = &nand->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(chip->controller);
int eccbytes;
@@ -170,7 +170,8 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de
switch (chip->ecc.mode) {
case NAND_ECC_HW:
if (!nfc->bch) {
- dev_err(dev, "HW BCH selected, but BCH controller not found\n");
+ dev_err(nfc->dev,
+ "HW BCH selected, but BCH controller not found\n");
return -ENODEV;
}
@@ -179,15 +180,16 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de
chip->ecc.correct = jz4780_nand_ecc_correct;
/* fall through */
case NAND_ECC_SOFT:
- dev_info(dev, "using %s (strength %d, size %d, bytes %d)\n",
- (nfc->bch) ? "hardware BCH" : "software ECC",
- chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
+ dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
+ (nfc->bch) ? "hardware BCH" : "software ECC",
+ chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
break;
case NAND_ECC_NONE:
- dev_info(dev, "not using ECC\n");
+ dev_info(nfc->dev, "not using ECC\n");
break;
default:
- dev_err(dev, "ECC mode %d not supported\n", chip->ecc.mode);
+ dev_err(nfc->dev, "ECC mode %d not supported\n",
+ chip->ecc.mode);
return -EINVAL;
}
@@ -199,7 +201,7 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de
eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
if (eccbytes > mtd->oobsize - 2) {
- dev_err(dev,
+ dev_err(nfc->dev,
"invalid ECC config: required %d ECC bytes, but only %d are available",
eccbytes, mtd->oobsize - 2);
return -EINVAL;
@@ -210,6 +212,10 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de
return 0;
}
+static const struct nand_controller_ops jz4780_nand_controller_ops = {
+ .attach_chip = jz4780_nand_attach_chip,
+};
+
static int jz4780_nand_init_chip(struct platform_device *pdev,
struct jz4780_nand_controller *nfc,
struct device_node *np,
@@ -279,15 +285,8 @@ static int jz4780_nand_init_chip(struct platform_device *pdev,
chip->controller = &nfc->controller;
nand_set_flash_node(chip, np);
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret)
- return ret;
-
- ret = jz4780_nand_init_ecc(nand, dev);
- if (ret)
- return ret;
-
- ret = nand_scan_tail(mtd);
+ chip->controller->ops = &jz4780_nand_controller_ops;
+ ret = nand_scan(mtd, 1);
if (ret)
return ret;
@@ -368,7 +367,7 @@ static int jz4780_nand_probe(struct platform_device *pdev)
nfc->dev = dev;
nfc->num_banks = num_banks;
- nand_hw_control_init(&nfc->controller);
+ nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
ret = jz4780_nand_init_chips(nfc, pdev);
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 052d123a8304..e82abada130a 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -184,6 +184,7 @@ static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
};
struct lpc32xx_nand_host {
+ struct platform_device *pdev;
struct nand_chip nand_chip;
struct lpc32xx_mlc_platform_data *pdata;
struct clk *clk;
@@ -653,6 +654,32 @@ static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
return ncfg;
}
+static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ struct device *dev = &host->pdev->dev;
+
+ host->dma_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dma_buf)
+ return -ENOMEM;
+
+ host->dummy_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dummy_buf)
+ return -ENOMEM;
+
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
+ mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
+ host->mlcsubpages = mtd->writesize / 512;
+
+ return 0;
+}
+
+static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
+ .attach_chip = lpc32xx_nand_attach_chip,
+};
+
/*
* Probe for NAND controller
*/
@@ -669,6 +696,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
if (!host)
return -ENOMEM;
+ host->pdev = pdev;
+
rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->io_base = devm_ioremap_resource(&pdev->dev, rc);
if (IS_ERR(host->io_base))
@@ -748,31 +777,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
}
- /*
- * Scan to find existance of the device and
- * Get the type of NAND device SMALL block or LARGE block
- */
- res = nand_scan_ident(mtd, 1, NULL);
- if (res)
- goto release_dma_chan;
-
- host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
- if (!host->dma_buf) {
- res = -ENOMEM;
- goto release_dma_chan;
- }
-
- host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
- if (!host->dummy_buf) {
- res = -ENOMEM;
- goto release_dma_chan;
- }
-
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = 512;
- mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
- host->mlcsubpages = mtd->writesize / 512;
-
/* initially clear interrupt status */
readb(MLC_IRQ_SR(host->io_base));
@@ -794,10 +798,11 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
/*
- * Fills out all the uninitialized function pointers with the defaults
- * And scans for a bad block table if appropriate.
+ * Scan to find existence of the device and get the type of NAND device:
+ * SMALL block or LARGE block.
*/
- res = nand_scan_tail(mtd);
+ nand_chip->dummy_controller.ops = &lpc32xx_nand_controller_ops;
+ res = nand_scan(mtd, 1);
if (res)
goto free_irq;
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 42820aa1abab..a4e8b7e75135 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -779,6 +779,46 @@ static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
return ncfg;
}
+static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ /* OOB and ECC CPU and DMA work areas */
+ host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
+
+ /*
+ * Small page FLASH has a unique OOB layout, but large and huge
+ * page FLASH use the standard layout. Small page FLASH uses a
+ * custom BBT marker layout.
+ */
+ if (mtd->writesize <= 512)
+ mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
+
+ /* These sizes remain the same regardless of page size */
+ chip->ecc.size = 256;
+ chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
+ chip->ecc.prepad = 0;
+ chip->ecc.postpad = 0;
+
+ /*
+ * Use a custom BBT marker setup for small page FLASH that
+ * won't interfere with the ECC layout. Large and huge page
+ * FLASH use the standard layout.
+ */
+ if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
+ mtd->writesize <= 512) {
+ chip->bbt_td = &bbt_smallpage_main_descr;
+ chip->bbt_md = &bbt_smallpage_mirror_descr;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
+ .attach_chip = lpc32xx_nand_attach_chip,
+};
+
/*
* Probe for NAND controller
*/
@@ -884,41 +924,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
/* Find NAND device */
- res = nand_scan_ident(mtd, 1, NULL);
- if (res)
- goto release_dma;
-
- /* OOB and ECC CPU and DMA work areas */
- host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
-
- /*
- * Small page FLASH has a unique OOB layout, but large and huge
- * page FLASH use the standard layout. Small page FLASH uses a
- * custom BBT marker layout.
- */
- if (mtd->writesize <= 512)
- mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
-
- /* These sizes remain the same regardless of page size */
- chip->ecc.size = 256;
- chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
- chip->ecc.prepad = chip->ecc.postpad = 0;
-
- /*
- * Use a custom BBT marker setup for small page FLASH that
- * won't interfere with the ECC layout. Large and huge page
- * FLASH use the standard layout.
- */
- if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
- mtd->writesize <= 512) {
- chip->bbt_td = &bbt_smallpage_main_descr;
- chip->bbt_md = &bbt_smallpage_mirror_descr;
- }
-
- /*
- * Fills out all the uninitialized function pointers with the defaults
- */
- res = nand_scan_tail(mtd);
+ chip->dummy_controller.ops = &lpc32xx_nand_controller_ops;
+ res = nand_scan(mtd, 1);
if (res)
goto release_dma;
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index ebb1d141b900..7af4d6213ee5 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -318,7 +318,7 @@ struct marvell_nfc_caps {
* @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
*/
struct marvell_nfc {
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct device *dev;
void __iomem *regs;
struct clk *core_clk;
@@ -335,7 +335,7 @@ struct marvell_nfc {
u8 *dma_buf;
};
-static inline struct marvell_nfc *to_marvell_nfc(struct nand_hw_control *ctrl)
+static inline struct marvell_nfc *to_marvell_nfc(struct nand_controller *ctrl)
{
return container_of(ctrl, struct marvell_nfc, controller);
}
@@ -650,11 +650,6 @@ static void marvell_nfc_select_chip(struct mtd_info *mtd, int die_nr)
return;
}
- /*
- * Do not change the timing registers when using the DT property
- * marvell,nand-keep-config; in that case ->ndtr0 and ->ndtr1 from the
- * marvell_nand structure are supposedly empty.
- */
writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
@@ -2157,6 +2152,7 @@ static int marvell_nand_ecc_init(struct mtd_info *mtd,
break;
case NAND_ECC_NONE:
case NAND_ECC_SOFT:
+ case NAND_ECC_ON_DIE:
if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
mtd->writesize != SZ_2K) {
dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
@@ -2294,6 +2290,111 @@ static int marvell_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr,
return 0;
}
+static int marvell_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(nfc->dev);
+ int ret;
+
+ if (pdata && pdata->flash_bbt)
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Save the chip-specific fields of NDCR */
+ marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
+ if (chip->options & NAND_BUSWIDTH_16)
+ marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ /*
+ * On small page NANDs, only one cycle is needed to pass the
+ * column address.
+ */
+ if (mtd->writesize <= 512) {
+ marvell_nand->addr_cyc = 1;
+ } else {
+ marvell_nand->addr_cyc = 2;
+ marvell_nand->ndcr |= NDCR_RA_START;
+ }
+
+ /*
+ * Now add the number of cycles needed to pass the row
+ * address.
+ *
+ * Addressing a chip using CS 2 or 3 should also need the third row
+ * cycle but due to inconsistance in the documentation and lack of
+ * hardware to test this situation, this case is not supported.
+ */
+ if (chip->options & NAND_ROW_ADDR_3)
+ marvell_nand->addr_cyc += 3;
+ else
+ marvell_nand->addr_cyc += 2;
+
+ if (pdata) {
+ chip->ecc.size = pdata->ecc_step_size;
+ chip->ecc.strength = pdata->ecc_strength;
+ }
+
+ ret = marvell_nand_ecc_init(mtd, &chip->ecc);
+ if (ret) {
+ dev_err(nfc->dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ if (chip->ecc.mode == NAND_ECC_HW) {
+ /*
+ * Subpage write not available with hardware ECC, prohibit also
+ * subpage read as in userspace subpage access would still be
+ * allowed and subpage write, if used, would lead to numerous
+ * uncorrectable ECC errors.
+ */
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "pxa3xx_nand-0";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "main-storage";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nfc->dev),
+ marvell_nand->sels[0].cs);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops marvell_nand_controller_ops = {
+ .attach_chip = marvell_nand_attach_chip,
+};
+
static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
struct device_node *np)
{
@@ -2436,105 +2537,10 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
chip->options |= NAND_BUSWIDTH_AUTO;
- ret = nand_scan_ident(mtd, marvell_nand->nsels, NULL);
- if (ret) {
- dev_err(dev, "could not identify the nand chip\n");
- return ret;
- }
-
- if (pdata && pdata->flash_bbt)
- chip->bbt_options |= NAND_BBT_USE_FLASH;
- if (chip->bbt_options & NAND_BBT_USE_FLASH) {
- /*
- * We'll use a bad block table stored in-flash and don't
- * allow writing the bad block marker to the flash.
- */
- chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
- chip->bbt_td = &bbt_main_descr;
- chip->bbt_md = &bbt_mirror_descr;
- }
-
- /* Save the chip-specific fields of NDCR */
- marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
- if (chip->options & NAND_BUSWIDTH_16)
- marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
-
- /*
- * On small page NANDs, only one cycle is needed to pass the
- * column address.
- */
- if (mtd->writesize <= 512) {
- marvell_nand->addr_cyc = 1;
- } else {
- marvell_nand->addr_cyc = 2;
- marvell_nand->ndcr |= NDCR_RA_START;
- }
-
- /*
- * Now add the number of cycles needed to pass the row
- * address.
- *
- * Addressing a chip using CS 2 or 3 should also need the third row
- * cycle but due to inconsistance in the documentation and lack of
- * hardware to test this situation, this case is not supported.
- */
- if (chip->options & NAND_ROW_ADDR_3)
- marvell_nand->addr_cyc += 3;
- else
- marvell_nand->addr_cyc += 2;
-
- if (pdata) {
- chip->ecc.size = pdata->ecc_step_size;
- chip->ecc.strength = pdata->ecc_strength;
- }
-
- ret = marvell_nand_ecc_init(mtd, &chip->ecc);
- if (ret) {
- dev_err(dev, "ECC init failed: %d\n", ret);
- return ret;
- }
-
- if (chip->ecc.mode == NAND_ECC_HW) {
- /*
- * Subpage write not available with hardware ECC, prohibit also
- * subpage read as in userspace subpage access would still be
- * allowed and subpage write, if used, would lead to numerous
- * uncorrectable ECC errors.
- */
- chip->options |= NAND_NO_SUBPAGE_WRITE;
- }
-
- if (pdata || nfc->caps->legacy_of_bindings) {
- /*
- * We keep the MTD name unchanged to avoid breaking platforms
- * where the MTD cmdline parser is used and the bootloader
- * has not been updated to use the new naming scheme.
- */
- mtd->name = "pxa3xx_nand-0";
- } else if (!mtd->name) {
- /*
- * If the new bindings are used and the bootloader has not been
- * updated to pass a new mtdparts parameter on the cmdline, you
- * should define the following property in your NAND node, ie:
- *
- * label = "main-storage";
- *
- * This way, mtd->name will be set by the core when
- * nand_set_flash_node() is called.
- */
- mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
- "%s:nand.%d", dev_name(nfc->dev),
- marvell_nand->sels[0].cs);
- if (!mtd->name) {
- dev_err(nfc->dev, "Failed to allocate mtd->name\n");
- return -ENOMEM;
- }
- }
-
- ret = nand_scan_tail(mtd);
+ ret = nand_scan(mtd, marvell_nand->nsels);
if (ret) {
- dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ dev_err(dev, "could not scan the nand chip\n");
return ret;
}
@@ -2612,8 +2618,6 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
dev);
struct dma_slave_config config = {};
struct resource *r;
- dma_cap_mask_t mask;
- struct pxad_param param;
int ret;
if (!IS_ENABLED(CONFIG_PXA_DMA)) {
@@ -2626,20 +2630,7 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
if (ret)
return ret;
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r) {
- dev_err(nfc->dev, "No resource defined for data DMA\n");
- return -ENXIO;
- }
-
- param.drcmr = r->start;
- param.prio = PXAD_PRIO_LOWEST;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- nfc->dma_chan =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param, nfc->dev,
- "data");
+ nfc->dma_chan = dma_request_slave_channel(nfc->dev, "data");
if (!nfc->dma_chan) {
dev_err(nfc->dev,
"Unable to request data DMA channel\n");
@@ -2677,6 +2668,21 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
return 0;
}
+static void marvell_nfc_reset(struct marvell_nfc *nfc)
+{
+ /*
+ * ECC operations and interruptions are only enabled when specifically
+ * needed. ECC shall not be activated in the early stages (fails probe).
+ * Arbiter flag, even if marked as "reserved", must be set (empirical).
+ * SPARE_EN bit must always be set or ECC bytes will not be at the same
+ * offset in the read page and this will fail the protection.
+ */
+ writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
+ NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
+ writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+}
+
static int marvell_nfc_init(struct marvell_nfc *nfc)
{
struct device_node *np = nfc->dev->of_node;
@@ -2715,17 +2721,7 @@ static int marvell_nfc_init(struct marvell_nfc *nfc)
if (!nfc->caps->is_nfcv2)
marvell_nfc_init_dma(nfc);
- /*
- * ECC operations and interruptions are only enabled when specifically
- * needed. ECC shall not be activated in the early stages (fails probe).
- * Arbiter flag, even if marked as "reserved", must be set (empirical).
- * SPARE_EN bit must always be set or ECC bytes will not be at the same
- * offset in the read page and this will fail the protection.
- */
- writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
- NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
- writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
- writel_relaxed(0, nfc->regs + NDECCCTRL);
+ marvell_nfc_reset(nfc);
return 0;
}
@@ -2744,7 +2740,8 @@ static int marvell_nfc_probe(struct platform_device *pdev)
return -ENOMEM;
nfc->dev = dev;
- nand_hw_control_init(&nfc->controller);
+ nand_controller_init(&nfc->controller);
+ nfc->controller.ops = &marvell_nand_controller_ops;
INIT_LIST_HEAD(&nfc->chips);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2772,17 +2769,19 @@ static int marvell_nfc_probe(struct platform_device *pdev)
return ret;
nfc->reg_clk = devm_clk_get(&pdev->dev, "reg");
- if (PTR_ERR(nfc->reg_clk) != -ENOENT) {
- if (!IS_ERR(nfc->reg_clk)) {
- ret = clk_prepare_enable(nfc->reg_clk);
- if (ret)
- goto unprepare_core_clk;
- } else {
+ if (IS_ERR(nfc->reg_clk)) {
+ if (PTR_ERR(nfc->reg_clk) != -ENOENT) {
ret = PTR_ERR(nfc->reg_clk);
goto unprepare_core_clk;
}
+
+ nfc->reg_clk = NULL;
}
+ ret = clk_prepare_enable(nfc->reg_clk);
+ if (ret)
+ goto unprepare_core_clk;
+
marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
ret = devm_request_irq(dev, irq, marvell_nfc_isr,
@@ -2840,6 +2839,49 @@ static int marvell_nfc_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused marvell_nfc_suspend(struct device *dev)
+{
+ struct marvell_nfc *nfc = dev_get_drvdata(dev);
+ struct marvell_nand_chip *chip;
+
+ list_for_each_entry(chip, &nfc->chips, node)
+ marvell_nfc_wait_ndrun(&chip->chip);
+
+ clk_disable_unprepare(nfc->reg_clk);
+ clk_disable_unprepare(nfc->core_clk);
+
+ return 0;
+}
+
+static int __maybe_unused marvell_nfc_resume(struct device *dev)
+{
+ struct marvell_nfc *nfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(nfc->core_clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(nfc->reg_clk);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Reset nfc->selected_chip so the next command will cause the timing
+ * registers to be restored in marvell_nfc_select_chip().
+ */
+ nfc->selected_chip = NULL;
+
+ /* Reset registers that have lost their contents */
+ marvell_nfc_reset(nfc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops marvell_nfc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(marvell_nfc_suspend, marvell_nfc_resume)
+};
+
static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
.max_cs_nb = 4,
.max_rb_nb = 2,
@@ -2924,6 +2966,7 @@ static struct platform_driver marvell_nfc_driver = {
.driver = {
.name = "marvell-nfc",
.of_match_table = marvell_nfc_of_ids,
+ .pm = &marvell_nfc_pm_ops,
},
.id_table = marvell_nfc_platform_ids,
.probe = marvell_nfc_probe,
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 75c845adb050..57b5ed1699e3 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -145,7 +145,7 @@ struct mtk_nfc_clk {
};
struct mtk_nfc {
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct mtk_ecc_config ecc_cfg;
struct mtk_nfc_clk clk;
struct mtk_ecc *ecc;
@@ -1250,13 +1250,54 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
return 0;
}
+static int mtk_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ int len;
+ int ret;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ dev_err(dev, "16bits buswidth not supported");
+ return -EINVAL;
+ }
+
+ /* store bbt magic in page, cause OOB is not protected */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ ret = mtk_nfc_ecc_init(dev, mtd);
+ if (ret)
+ return ret;
+
+ ret = mtk_nfc_set_spare_per_sector(&mtk_nand->spare_per_sector, mtd);
+ if (ret)
+ return ret;
+
+ mtk_nfc_set_fdm(&mtk_nand->fdm, mtd);
+ mtk_nfc_set_bad_mark_ctl(&mtk_nand->bad_mark, mtd);
+
+ len = mtd->writesize + mtd->oobsize;
+ nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!nfc->buffer)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct nand_controller_ops mtk_nfc_controller_ops = {
+ .attach_chip = mtk_nfc_attach_chip,
+};
+
static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
struct device_node *np)
{
struct mtk_nfc_nand_chip *chip;
struct nand_chip *nand;
struct mtd_info *mtd;
- int nsels, len;
+ int nsels;
u32 tmp;
int ret;
int i;
@@ -1324,40 +1365,11 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
mtk_nfc_hw_init(nfc);
- ret = nand_scan_ident(mtd, nsels, NULL);
- if (ret)
- return ret;
-
- /* store bbt magic in page, cause OOB is not protected */
- if (nand->bbt_options & NAND_BBT_USE_FLASH)
- nand->bbt_options |= NAND_BBT_NO_OOB;
-
- ret = mtk_nfc_ecc_init(dev, mtd);
- if (ret)
- return -EINVAL;
-
- if (nand->options & NAND_BUSWIDTH_16) {
- dev_err(dev, "16bits buswidth not supported");
- return -EINVAL;
- }
-
- ret = mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
- if (ret)
- return ret;
-
- mtk_nfc_set_fdm(&chip->fdm, mtd);
- mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
-
- len = mtd->writesize + mtd->oobsize;
- nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
- if (!nfc->buffer)
- return -ENOMEM;
-
- ret = nand_scan_tail(mtd);
+ ret = nand_scan(mtd, nsels);
if (ret)
return ret;
- ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "mtd parse partition error\n");
nand_release(mtd);
@@ -1443,6 +1455,7 @@ static int mtk_nfc_probe(struct platform_device *pdev)
spin_lock_init(&nfc->controller.lock);
init_waitqueue_head(&nfc->controller.wq);
INIT_LIST_HEAD(&nfc->chips);
+ nfc->controller.ops = &mtk_nfc_controller_ops;
/* probe defer if not ready */
nfc->ecc = of_mtk_ecc_get(np);
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 45786e707b7b..4c9214dea424 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright 2008 Sascha Hauer, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
*/
#include <linux/delay.h>
@@ -34,8 +21,6 @@
#include <linux/completion.h>
#include <linux/of.h>
#include <linux/of_device.h>
-
-#include <asm/mach/flash.h>
#include <linux/platform_data/mtd-mxc_nand.h>
#define DRIVER_NAME "mxc_nand"
@@ -48,7 +33,7 @@
#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
-#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10)
+#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
#define NFC_V1_V2_WRPROT (host->regs + 0x12)
#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
@@ -1274,6 +1259,9 @@ static void preset_v2(struct mtd_info *mtd)
writew(config1, NFC_V1_V2_CONFIG1);
/* preset operation */
+ /* spare area size in 16-bit half-words */
+ writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
+
/* Unlock the internal RAM Buffer */
writew(0x2, NFC_V1_V2_CONFIG);
@@ -1683,7 +1671,7 @@ static const struct of_device_id mxcnd_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mxcnd_dt_ids);
-static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+static int mxcnd_probe_dt(struct mxc_nand_host *host)
{
struct device_node *np = host->dev->of_node;
const struct of_device_id *of_id =
@@ -1697,12 +1685,80 @@ static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
return 0;
}
#else
-static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+static int mxcnd_probe_dt(struct mxc_nand_host *host)
{
return 1;
}
#endif
+static int mxcnd_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ struct device *dev = mtd->dev.parent;
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_HW:
+ chip->ecc.read_page = mxc_nand_read_page;
+ chip->ecc.read_page_raw = mxc_nand_read_page_raw;
+ chip->ecc.read_oob = mxc_nand_read_oob;
+ chip->ecc.write_page = mxc_nand_write_page_ecc;
+ chip->ecc.write_page_raw = mxc_nand_write_page_raw;
+ chip->ecc.write_oob = mxc_nand_write_oob;
+ break;
+
+ case NAND_ECC_SOFT:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Allocate the right size buffer now */
+ devm_kfree(dev, (void *)host->data_buf);
+ host->data_buf = devm_kzalloc(dev, mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!host->data_buf)
+ return -ENOMEM;
+
+ /* Call preset again, with correct writesize chip time */
+ host->devtype_data->preset(mtd);
+
+ if (!chip->ecc.bytes) {
+ if (host->eccsize == 8)
+ chip->ecc.bytes = 18;
+ else if (host->eccsize == 4)
+ chip->ecc.bytes = 9;
+ }
+
+ /*
+ * Experimentation shows that i.MX NFC can only handle up to 218 oob
+ * bytes. Limit used_oobsize to 218 so as to not confuse copy_spare()
+ * into copying invalid data to/from the spare IO buffer, as this
+ * might cause ECC data corruption when doing sub-page write to a
+ * partially written page.
+ */
+ host->used_oobsize = min(mtd->oobsize, 218U);
+
+ if (chip->ecc.mode == NAND_ECC_HW) {
+ if (is_imx21_nfc(host) || is_imx27_nfc(host))
+ chip->ecc.strength = 1;
+ else
+ chip->ecc.strength = (host->eccsize == 4) ? 4 : 8;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops mxcnd_controller_ops = {
+ .attach_chip = mxcnd_attach_chip,
+};
+
static int mxcnd_probe(struct platform_device *pdev)
{
struct nand_chip *this;
@@ -1842,71 +1898,9 @@ static int mxcnd_probe(struct platform_device *pdev)
host->devtype_data->irq_control(host, 1);
}
- /* first scan to find the device and get the page size */
- err = nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL);
- if (err)
- goto escan;
-
- switch (this->ecc.mode) {
- case NAND_ECC_HW:
- this->ecc.read_page = mxc_nand_read_page;
- this->ecc.read_page_raw = mxc_nand_read_page_raw;
- this->ecc.read_oob = mxc_nand_read_oob;
- this->ecc.write_page = mxc_nand_write_page_ecc;
- this->ecc.write_page_raw = mxc_nand_write_page_raw;
- this->ecc.write_oob = mxc_nand_write_oob;
- break;
-
- case NAND_ECC_SOFT:
- break;
-
- default:
- err = -EINVAL;
- goto escan;
- }
-
- if (this->bbt_options & NAND_BBT_USE_FLASH) {
- this->bbt_td = &bbt_main_descr;
- this->bbt_md = &bbt_mirror_descr;
- }
-
- /* allocate the right size buffer now */
- devm_kfree(&pdev->dev, (void *)host->data_buf);
- host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize,
- GFP_KERNEL);
- if (!host->data_buf) {
- err = -ENOMEM;
- goto escan;
- }
-
- /* Call preset again, with correct writesize this time */
- host->devtype_data->preset(mtd);
-
- if (!this->ecc.bytes) {
- if (host->eccsize == 8)
- this->ecc.bytes = 18;
- else if (host->eccsize == 4)
- this->ecc.bytes = 9;
- }
-
- /*
- * Experimentation shows that i.MX NFC can only handle up to 218 oob
- * bytes. Limit used_oobsize to 218 so as to not confuse copy_spare()
- * into copying invalid data to/from the spare IO buffer, as this
- * might cause ECC data corruption when doing sub-page write to a
- * partially written page.
- */
- host->used_oobsize = min(mtd->oobsize, 218U);
-
- if (this->ecc.mode == NAND_ECC_HW) {
- if (is_imx21_nfc(host) || is_imx27_nfc(host))
- this->ecc.strength = 1;
- else
- this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
- }
-
- /* second phase scan */
- err = nand_scan_tail(mtd);
+ /* Scan the NAND device */
+ this->dummy_controller.ops = &mxcnd_controller_ops;
+ err = nand_scan(mtd, is_imx25_nfc(host) ? 4 : 1);
if (err)
goto escan;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 10c4f9919850..d527e448ce19 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
for (; page < page_end; page++) {
res = chip->ecc.read_oob(mtd, chip, page);
- if (res)
+ if (res < 0)
return res;
bad = chip->oob_poi[chip->badblockpos];
@@ -2668,8 +2668,8 @@ static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
return subop && instr_idx < subop->ninstrs;
}
-static int nand_subop_get_start_off(const struct nand_subop *subop,
- unsigned int instr_idx)
+static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
if (instr_idx)
return 0;
@@ -2688,12 +2688,12 @@ static int nand_subop_get_start_off(const struct nand_subop *subop,
*
* Given an address instruction, returns the offset of the first cycle to issue.
*/
-int nand_subop_get_addr_start_off(const struct nand_subop *subop,
- unsigned int instr_idx)
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
- subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
- return -EINVAL;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
+ return 0;
return nand_subop_get_start_off(subop, instr_idx);
}
@@ -2710,14 +2710,14 @@ EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
*
* Given an address instruction, returns the number of address cycle to issue.
*/
-int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
- unsigned int instr_idx)
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
int start_off, end_off;
- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
- subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
- return -EINVAL;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
+ return 0;
start_off = nand_subop_get_addr_start_off(subop, instr_idx);
@@ -2742,12 +2742,12 @@ EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
*
* Given a data instruction, returns the offset to start from.
*/
-int nand_subop_get_data_start_off(const struct nand_subop *subop,
- unsigned int instr_idx)
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
- !nand_instr_is_data(&subop->instrs[instr_idx]))
- return -EINVAL;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx])))
+ return 0;
return nand_subop_get_start_off(subop, instr_idx);
}
@@ -2764,14 +2764,14 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
*
* Returns the length of the chunk of data to send/receive.
*/
-int nand_subop_get_data_len(const struct nand_subop *subop,
- unsigned int instr_idx)
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
int start_off = 0, end_off;
- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
- !nand_instr_is_data(&subop->instrs[instr_idx]))
- return -EINVAL;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx])))
+ return 0;
start_off = nand_subop_get_data_start_off(subop, instr_idx);
@@ -2967,6 +2967,23 @@ int nand_check_erased_ecc_chunk(void *data, int datalen,
EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
/**
+ * nand_read_page_raw_notsupp - dummy read raw page function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Returns -ENOTSUPP unconditionally.
+ */
+int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(nand_read_page_raw_notsupp);
+
+/**
* nand_read_page_raw - [INTERN] read raw page data without ecc
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -3960,6 +3977,22 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
return ret;
}
+/**
+ * nand_write_page_raw_notsupp - dummy raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * Returns -ENOTSUPP unconditionally.
+ */
+int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int oob_required, int page)
+{
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(nand_write_page_raw_notsupp);
/**
* nand_write_page_raw - [INTERN] raw page write function
@@ -4965,12 +4998,10 @@ static void nand_set_defaults(struct nand_chip *chip)
chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
if (!chip->read_buf || chip->read_buf == nand_read_buf)
chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
- if (!chip->scan_bbt)
- chip->scan_bbt = nand_default_bbt;
if (!chip->controller) {
- chip->controller = &chip->hwcontrol;
- nand_hw_control_init(chip->controller);
+ chip->controller = &chip->dummy_controller;
+ nand_controller_init(chip->controller);
}
if (!chip->buf_align)
@@ -5120,6 +5151,8 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_onfi_params *p;
+ struct onfi_params *onfi;
+ int onfi_version = 0;
char id[4];
int i, ret, val;
@@ -5168,30 +5201,35 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
}
}
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+ chip->manufacturer.desc->ops->fixup_onfi_param_page)
+ chip->manufacturer.desc->ops->fixup_onfi_param_page(chip, p);
+
/* Check version */
val = le16_to_cpu(p->revision);
- if (val & (1 << 5))
- chip->parameters.onfi.version = 23;
- else if (val & (1 << 4))
- chip->parameters.onfi.version = 22;
- else if (val & (1 << 3))
- chip->parameters.onfi.version = 21;
- else if (val & (1 << 2))
- chip->parameters.onfi.version = 20;
- else if (val & (1 << 1))
- chip->parameters.onfi.version = 10;
-
- if (!chip->parameters.onfi.version) {
+ if (val & ONFI_VERSION_2_3)
+ onfi_version = 23;
+ else if (val & ONFI_VERSION_2_2)
+ onfi_version = 22;
+ else if (val & ONFI_VERSION_2_1)
+ onfi_version = 21;
+ else if (val & ONFI_VERSION_2_0)
+ onfi_version = 20;
+ else if (val & ONFI_VERSION_1_0)
+ onfi_version = 10;
+
+ if (!onfi_version) {
pr_info("unsupported ONFI version: %d\n", val);
goto free_onfi_param_page;
- } else {
- ret = 1;
}
sanitize_string(p->manufacturer, sizeof(p->manufacturer));
sanitize_string(p->model, sizeof(p->model));
- strncpy(chip->parameters.model, p->model,
- sizeof(chip->parameters.model) - 1);
+ chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
+ if (!chip->parameters.model) {
+ ret = -ENOMEM;
+ goto free_onfi_param_page;
+ }
mtd->writesize = le32_to_cpu(p->byte_per_page);
@@ -5219,7 +5257,7 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
if (p->ecc_bits != 0xff) {
chip->ecc_strength_ds = p->ecc_bits;
chip->ecc_step_ds = 512;
- } else if (chip->parameters.onfi.version >= 21 &&
+ } else if (onfi_version >= 21 &&
(le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
/*
@@ -5246,19 +5284,33 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
bitmap_set(chip->parameters.set_feature_list,
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
}
- chip->parameters.onfi.tPROG = le16_to_cpu(p->t_prog);
- chip->parameters.onfi.tBERS = le16_to_cpu(p->t_bers);
- chip->parameters.onfi.tR = le16_to_cpu(p->t_r);
- chip->parameters.onfi.tCCS = le16_to_cpu(p->t_ccs);
- chip->parameters.onfi.async_timing_mode =
- le16_to_cpu(p->async_timing_mode);
- chip->parameters.onfi.vendor_revision =
- le16_to_cpu(p->vendor_revision);
- memcpy(chip->parameters.onfi.vendor, p->vendor,
- sizeof(p->vendor));
+ onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
+ if (!onfi) {
+ ret = -ENOMEM;
+ goto free_model;
+ }
+
+ onfi->version = onfi_version;
+ onfi->tPROG = le16_to_cpu(p->t_prog);
+ onfi->tBERS = le16_to_cpu(p->t_bers);
+ onfi->tR = le16_to_cpu(p->t_r);
+ onfi->tCCS = le16_to_cpu(p->t_ccs);
+ onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
+ onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
+ memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
+ chip->parameters.onfi = onfi;
+
+ /* Identification done, free the full ONFI parameter page and exit */
+ kfree(p);
+
+ return 1;
+
+free_model:
+ kfree(chip->parameters.model);
free_onfi_param_page:
kfree(p);
+
return ret;
}
@@ -5321,8 +5373,11 @@ static int nand_flash_detect_jedec(struct nand_chip *chip)
sanitize_string(p->manufacturer, sizeof(p->manufacturer));
sanitize_string(p->model, sizeof(p->model));
- strncpy(chip->parameters.model, p->model,
- sizeof(chip->parameters.model) - 1);
+ chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
+ if (!chip->parameters.model) {
+ ret = -ENOMEM;
+ goto free_jedec_param_page;
+ }
mtd->writesize = le32_to_cpu(p->byte_per_page);
@@ -5511,8 +5566,9 @@ static bool find_full_id_nand(struct nand_chip *chip,
chip->onfi_timing_mode_default =
type->onfi_timing_mode_default;
- strncpy(chip->parameters.model, type->name,
- sizeof(chip->parameters.model) - 1);
+ chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
+ if (!chip->parameters.model)
+ return false;
return true;
}
@@ -5651,7 +5707,6 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
}
}
- chip->parameters.onfi.version = 0;
if (!type->name || !type->pagesize) {
/* Check if the chip is ONFI compliant */
ret = nand_flash_detect_onfi(chip);
@@ -5671,8 +5726,9 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
if (!type->name)
return -ENODEV;
- strncpy(chip->parameters.model, type->name,
- sizeof(chip->parameters.model) - 1);
+ chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
+ if (!chip->parameters.model)
+ return -ENOMEM;
chip->chipsize = (uint64_t)type->chipsize << 20;
@@ -5702,7 +5758,9 @@ ident_done:
mtd->name);
pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
(chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
- return -EINVAL;
+ ret = -EINVAL;
+
+ goto free_detect_allocation;
}
nand_decode_bbm_options(chip);
@@ -5739,6 +5797,11 @@ ident_done:
(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
return 0;
+
+free_detect_allocation:
+ kfree(chip->parameters.model);
+
+ return ret;
}
static const char * const nand_ecc_modes[] = {
@@ -5777,6 +5840,7 @@ static int of_get_nand_ecc_mode(struct device_node *np)
static const char * const nand_ecc_algos[] = {
[NAND_ECC_HAMMING] = "hamming",
[NAND_ECC_BCH] = "bch",
+ [NAND_ECC_RS] = "rs",
};
static int of_get_nand_ecc_algo(struct device_node *np)
@@ -5858,6 +5922,9 @@ static int nand_dt_init(struct nand_chip *chip)
if (of_get_nand_bus_width(dn) == 16)
chip->options |= NAND_BUSWIDTH_16;
+ if (of_property_read_bool(dn, "nand-is-boot-medium"))
+ chip->options |= NAND_IS_BOOT_MEDIUM;
+
if (of_get_nand_on_flash_bbt(dn))
chip->bbt_options |= NAND_BBT_USE_FLASH;
@@ -5885,7 +5952,7 @@ static int nand_dt_init(struct nand_chip *chip)
}
/**
- * nand_scan_ident - [NAND Interface] Scan for the NAND device
+ * nand_scan_ident - Scan for the NAND device
* @mtd: MTD device structure
* @maxchips: number of chips to scan for
* @table: alternative NAND ID table
@@ -5893,9 +5960,13 @@ static int nand_dt_init(struct nand_chip *chip)
* This is the first phase of the normal nand_scan() function. It reads the
* flash ID and sets up MTD fields accordingly.
*
+ * This helper used to be called directly from controller drivers that needed
+ * to tweak some ECC-related parameters before nand_scan_tail(). This separation
+ * prevented dynamic allocations during this phase which was unconvenient and
+ * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
*/
-int nand_scan_ident(struct mtd_info *mtd, int maxchips,
- struct nand_flash_dev *table)
+static int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ struct nand_flash_dev *table)
{
int i, nand_maf_id, nand_dev_id;
struct nand_chip *chip = mtd_to_nand(mtd);
@@ -5969,7 +6040,12 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
return 0;
}
-EXPORT_SYMBOL(nand_scan_ident);
+
+static void nand_scan_ident_cleanup(struct nand_chip *chip)
+{
+ kfree(chip->parameters.model);
+ kfree(chip->parameters.onfi);
+}
static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
{
@@ -6077,24 +6153,17 @@ static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
* by the controller and the calculated ECC bytes fit within the chip's OOB.
* On success, the calculated ECC bytes is set.
*/
-int nand_check_ecc_caps(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail)
+static int
+nand_check_ecc_caps(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_step_info *stepinfo;
int preset_step = chip->ecc.size;
int preset_strength = chip->ecc.strength;
- int nsteps, ecc_bytes;
+ int ecc_bytes, nsteps = mtd->writesize / preset_step;
int i, j;
- if (WARN_ON(oobavail < 0))
- return -EINVAL;
-
- if (!preset_step || !preset_strength)
- return -ENODATA;
-
- nsteps = mtd->writesize / preset_step;
-
for (i = 0; i < caps->nstepinfos; i++) {
stepinfo = &caps->stepinfos[i];
@@ -6127,7 +6196,6 @@ int nand_check_ecc_caps(struct nand_chip *chip,
return -ENOTSUPP;
}
-EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
/**
* nand_match_ecc_req - meet the chip's requirement with least ECC bytes
@@ -6139,8 +6207,9 @@ EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
* number of ECC bytes (i.e. with the largest number of OOB-free bytes).
* On success, the chosen ECC settings are set.
*/
-int nand_match_ecc_req(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail)
+static int
+nand_match_ecc_req(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_step_info *stepinfo;
@@ -6151,9 +6220,6 @@ int nand_match_ecc_req(struct nand_chip *chip,
int best_ecc_bytes_total = INT_MAX;
int i, j;
- if (WARN_ON(oobavail < 0))
- return -EINVAL;
-
/* No information provided by the NAND chip */
if (!req_step || !req_strength)
return -ENOTSUPP;
@@ -6212,7 +6278,6 @@ int nand_match_ecc_req(struct nand_chip *chip,
return 0;
}
-EXPORT_SYMBOL_GPL(nand_match_ecc_req);
/**
* nand_maximize_ecc - choose the max ECC strength available
@@ -6223,8 +6288,9 @@ EXPORT_SYMBOL_GPL(nand_match_ecc_req);
* Choose the max ECC strength that is supported on the controller, and can fit
* within the chip's OOB. On success, the chosen ECC settings are set.
*/
-int nand_maximize_ecc(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail)
+static int
+nand_maximize_ecc(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_step_info *stepinfo;
@@ -6234,9 +6300,6 @@ int nand_maximize_ecc(struct nand_chip *chip,
int best_strength, best_ecc_bytes;
int i, j;
- if (WARN_ON(oobavail < 0))
- return -EINVAL;
-
for (i = 0; i < caps->nstepinfos; i++) {
stepinfo = &caps->stepinfos[i];
step_size = stepinfo->stepsize;
@@ -6285,7 +6348,44 @@ int nand_maximize_ecc(struct nand_chip *chip,
return 0;
}
-EXPORT_SYMBOL_GPL(nand_maximize_ecc);
+
+/**
+ * nand_ecc_choose_conf - Set the ECC strength and ECC step size
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * Choose the ECC configuration according to following logic
+ *
+ * 1. If both ECC step size and ECC strength are already set (usually by DT)
+ * then check if it is supported by this controller.
+ * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
+ * 3. Otherwise, try to match the ECC step size and ECC strength closest
+ * to the chip's requirement. If available OOB size can't fit the chip
+ * requirement then fallback to the maximum ECC step size and ECC strength.
+ *
+ * On success, the chosen ECC settings are set.
+ */
+int nand_ecc_choose_conf(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
+ return -EINVAL;
+
+ if (chip->ecc.size && chip->ecc.strength)
+ return nand_check_ecc_caps(chip, caps, oobavail);
+
+ if (chip->ecc.options & NAND_ECC_MAXIMIZE)
+ return nand_maximize_ecc(chip, caps, oobavail);
+
+ if (!nand_match_ecc_req(chip, caps, oobavail))
+ return 0;
+
+ return nand_maximize_ecc(chip, caps, oobavail);
+}
+EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
/*
* Check if the chip configuration meet the datasheet requirements.
@@ -6322,14 +6422,14 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
}
/**
- * nand_scan_tail - [NAND Interface] Scan for the NAND device
+ * nand_scan_tail - Scan for the NAND device
* @mtd: MTD device structure
*
* This is the second phase of the normal nand_scan() function. It fills out
* all the uninitialized function pointers with the defaults and scans for a
* bad block table if appropriate.
*/
-int nand_scan_tail(struct mtd_info *mtd)
+static int nand_scan_tail(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -6636,7 +6736,7 @@ int nand_scan_tail(struct mtd_info *mtd)
return 0;
/* Build bad block table */
- ret = chip->scan_bbt(mtd);
+ ret = nand_create_bbt(chip);
if (ret)
goto err_nand_manuf_cleanup;
@@ -6653,24 +6753,27 @@ err_free_buf:
return ret;
}
-EXPORT_SYMBOL(nand_scan_tail);
-/*
- * is_module_text_address() isn't exported, and it's mostly a pointless
- * test if this is a module _anyway_ -- they'd have to try _really_ hard
- * to call us from in-kernel code if the core NAND support is modular.
- */
-#ifdef MODULE
-#define caller_is_module() (1)
-#else
-#define caller_is_module() \
- is_module_text_address((unsigned long)__builtin_return_address(0))
-#endif
+static int nand_attach(struct nand_chip *chip)
+{
+ if (chip->controller->ops && chip->controller->ops->attach_chip)
+ return chip->controller->ops->attach_chip(chip);
+
+ return 0;
+}
+
+static void nand_detach(struct nand_chip *chip)
+{
+ if (chip->controller->ops && chip->controller->ops->detach_chip)
+ chip->controller->ops->detach_chip(chip);
+}
/**
* nand_scan_with_ids - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
- * @maxchips: number of chips to scan for
+ * @maxchips: number of chips to scan for. @nand_scan_ident() will not be run if
+ * this parameter is zero (useful for specific drivers that must
+ * handle this part of the process themselves, e.g docg4).
* @ids: optional flash IDs table
*
* This fills out all the uninitialized function pointers with the defaults.
@@ -6680,11 +6783,30 @@ EXPORT_SYMBOL(nand_scan_tail);
int nand_scan_with_ids(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *ids)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
- ret = nand_scan_ident(mtd, maxchips, ids);
- if (!ret)
- ret = nand_scan_tail(mtd);
+ if (maxchips) {
+ ret = nand_scan_ident(mtd, maxchips, ids);
+ if (ret)
+ return ret;
+ }
+
+ ret = nand_attach(chip);
+ if (ret)
+ goto cleanup_ident;
+
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ goto detach_chip;
+
+ return 0;
+
+detach_chip:
+ nand_detach(chip);
+cleanup_ident:
+ nand_scan_ident_cleanup(chip);
+
return ret;
}
EXPORT_SYMBOL(nand_scan_with_ids);
@@ -6712,7 +6834,14 @@ void nand_cleanup(struct nand_chip *chip)
/* Free manufacturer priv data. */
nand_manufacturer_cleanup(chip);
+
+ /* Free controller specific allocations after chip identification */
+ nand_detach(chip);
+
+ /* Free identification phase allocations */
+ nand_scan_ident_cleanup(chip);
}
+
EXPORT_SYMBOL_GPL(nand_cleanup);
/**
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index d9f4ceff2568..39db352f8757 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -1349,15 +1349,14 @@ static int nand_create_badblock_pattern(struct nand_chip *this)
}
/**
- * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
- * @mtd: MTD device structure
+ * nand_create_bbt - [NAND Interface] Select a default bad block table for the device
+ * @this: NAND chip object
*
* This function selects the default bad block table support for the device and
* calls the nand_scan_bbt function.
*/
-int nand_default_bbt(struct mtd_info *mtd)
+int nand_create_bbt(struct nand_chip *this)
{
- struct nand_chip *this = mtd_to_nand(mtd);
int ret;
/* Is a flash based bad block table requested? */
@@ -1383,8 +1382,9 @@ int nand_default_bbt(struct mtd_info *mtd)
return ret;
}
- return nand_scan_bbt(mtd, this->badblock_pattern);
+ return nand_scan_bbt(nand_to_mtd(this), this->badblock_pattern);
}
+EXPORT_SYMBOL(nand_create_bbt);
/**
* nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index d542908a0ebb..4ffbb26e76d6 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -100,6 +100,16 @@ static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
struct mtd_info *mtd = nand_to_mtd(chip);
u16 column = ((u16)addr << 8) | addr;
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_ADDR(1, &addr, 0),
+ NAND_OP_8BIT_DATA_OUT(1, &val, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
chip->write_byte(mtd, val);
@@ -473,6 +483,19 @@ static void hynix_nand_extract_oobsize(struct nand_chip *chip,
WARN(1, "Invalid OOB size");
break;
}
+
+ /*
+ * The datasheet of H27UCG8T2BTR mentions that the "Redundant
+ * Area Size" is encoded "per 8KB" (page size). This chip uses
+ * a page size of 16KiB. The datasheet mentions an OOB size of
+ * 1.280 bytes, but the OOB size encoded in the ID bytes (using
+ * the existing logic above) is 640 bytes.
+ * Update the OOB size for this chip by taking the value
+ * determined above and scaling it to the actual page size (so
+ * the actual OOB size for this chip is: 640 * 16k / 8k).
+ */
+ if (chip->id.data[1] == 0xde)
+ mtd->oobsize *= mtd->writesize / SZ_8K;
}
}
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 7ed1f87e742a..49c546c97c6f 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -17,23 +17,47 @@
#include <linux/mtd/rawnand.h>
+/*
+ * Macronix AC series does not support using SET/GET_FEATURES to change
+ * the timings unlike what is declared in the parameter page. Unflag
+ * this feature to avoid unnecessary downturns.
+ */
+static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
+{
+ unsigned int i;
+ static const char * const broken_get_timings[] = {
+ "MX30LF1G18AC",
+ "MX30LF1G28AC",
+ "MX30LF2G18AC",
+ "MX30LF2G28AC",
+ "MX30LF4G18AC",
+ "MX30LF4G28AC",
+ "MX60LF8G18AC",
+ };
+
+ if (!chip->parameters.supports_set_get_features)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
+ if (!strcmp(broken_get_timings[i], chip->parameters.model))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(broken_get_timings))
+ return;
+
+ bitmap_clear(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ bitmap_clear(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+}
+
static int macronix_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
- /*
- * MX30LF2G18AC chip does not support using SET/GET_FEATURES to change
- * the timings unlike what is declared in the parameter page. Unflag
- * this feature to avoid unnecessary downturns.
- */
- if (chip->parameters.supports_set_get_features &&
- !strcmp("MX30LF2G18AC", chip->parameters.model)) {
- bitmap_clear(chip->parameters.get_feature_list,
- ONFI_FEATURE_ADDR_TIMING_MODE, 1);
- bitmap_clear(chip->parameters.set_feature_list,
- ONFI_FEATURE_ADDR_TIMING_MODE, 1);
- }
+ macronix_nand_fix_broken_get_timings(chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 0af45b134c0c..f5dc0a7a2456 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -16,12 +16,33 @@
*/
#include <linux/mtd/rawnand.h>
+#include <linux/slab.h>
/*
- * Special Micron status bit that indicates when the block has been
- * corrected by on-die ECC and should be rewritten
+ * Special Micron status bit 3 indicates that the block has been
+ * corrected by on-die ECC and should be rewritten.
*/
-#define NAND_STATUS_WRITE_RECOMMENDED BIT(3)
+#define NAND_ECC_STATUS_WRITE_RECOMMENDED BIT(3)
+
+/*
+ * On chips with 8-bit ECC and additional bit can be used to distinguish
+ * cases where a errors were corrected without needing a rewrite
+ *
+ * Bit 4 Bit 3 Bit 0 Description
+ * ----- ----- ----- -----------
+ * 0 0 0 No Errors
+ * 0 0 1 Multiple uncorrected errors
+ * 0 1 0 4 - 6 errors corrected, recommend rewrite
+ * 0 1 1 Reserved
+ * 1 0 0 1 - 3 errors corrected
+ * 1 0 1 Reserved
+ * 1 1 0 7 - 8 errors corrected, recommend rewrite
+ */
+#define NAND_ECC_STATUS_MASK (BIT(4) | BIT(3) | BIT(0))
+#define NAND_ECC_STATUS_UNCORRECTABLE BIT(0)
+#define NAND_ECC_STATUS_4_6_CORRECTED BIT(3)
+#define NAND_ECC_STATUS_1_3_CORRECTED BIT(4)
+#define NAND_ECC_STATUS_7_8_CORRECTED (BIT(4) | BIT(3))
struct nand_onfi_vendor_micron {
u8 two_plane_read;
@@ -43,6 +64,16 @@ struct nand_onfi_vendor_micron {
u8 param_revision;
} __packed;
+struct micron_on_die_ecc {
+ bool forced;
+ bool enabled;
+ void *rawbuf;
+};
+
+struct micron_nand {
+ struct micron_on_die_ecc ecc;
+};
+
static int micron_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
{
struct nand_chip *chip = mtd_to_nand(mtd);
@@ -57,23 +88,27 @@ static int micron_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
static int micron_nand_onfi_init(struct nand_chip *chip)
{
struct nand_parameters *p = &chip->parameters;
- struct nand_onfi_vendor_micron *micron = (void *)p->onfi.vendor;
- if (chip->parameters.onfi.version && p->onfi.vendor_revision) {
+ if (p->onfi) {
+ struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor;
+
chip->read_retries = micron->read_retry_options;
chip->setup_read_retry = micron_nand_setup_read_retry;
}
if (p->supports_set_get_features) {
set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
+ set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
+ set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
}
return 0;
}
-static int micron_nand_on_die_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
+static int micron_nand_on_die_4_ooblayout_ecc(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
{
if (section >= 4)
return -ERANGE;
@@ -84,8 +119,9 @@ static int micron_nand_on_die_ooblayout_ecc(struct mtd_info *mtd, int section,
return 0;
}
-static int micron_nand_on_die_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
+static int micron_nand_on_die_4_ooblayout_free(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
{
if (section >= 4)
return -ERANGE;
@@ -96,19 +132,161 @@ static int micron_nand_on_die_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
-static const struct mtd_ooblayout_ops micron_nand_on_die_ooblayout_ops = {
- .ecc = micron_nand_on_die_ooblayout_ecc,
- .free = micron_nand_on_die_ooblayout_free,
+static const struct mtd_ooblayout_ops micron_nand_on_die_4_ooblayout_ops = {
+ .ecc = micron_nand_on_die_4_ooblayout_ecc,
+ .free = micron_nand_on_die_4_ooblayout_free,
+};
+
+static int micron_nand_on_die_8_ooblayout_ecc(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = mtd->oobsize - chip->ecc.total;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int micron_nand_on_die_8_ooblayout_free(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = mtd->oobsize - chip->ecc.total - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_nand_on_die_8_ooblayout_ops = {
+ .ecc = micron_nand_on_die_8_ooblayout_ecc,
+ .free = micron_nand_on_die_8_ooblayout_free,
};
static int micron_nand_on_die_ecc_setup(struct nand_chip *chip, bool enable)
{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, };
+ int ret;
+
+ if (micron->ecc.forced)
+ return 0;
+
+ if (micron->ecc.enabled == enable)
+ return 0;
if (enable)
feature[0] |= ONFI_FEATURE_ON_DIE_ECC_EN;
- return nand_set_features(chip, ONFI_FEATURE_ON_DIE_ECC, feature);
+ ret = nand_set_features(chip, ONFI_FEATURE_ON_DIE_ECC, feature);
+ if (!ret)
+ micron->ecc.enabled = enable;
+
+ return ret;
+}
+
+static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status,
+ void *buf, int page,
+ int oob_required)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int step, max_bitflips = 0;
+ int ret;
+
+ if (!(status & NAND_ECC_STATUS_WRITE_RECOMMENDED)) {
+ if (status & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+
+ return 0;
+ }
+
+ /*
+ * The internal ECC doesn't tell us the number of bitflips that have
+ * been corrected, but tells us if it recommends to rewrite the block.
+ * If it's the case, we need to read the page in raw mode and compare
+ * its content to the corrected version to extract the actual number of
+ * bitflips.
+ * But before we do that, we must make sure we have all OOB bytes read
+ * in non-raw mode, even if the user did not request those bytes.
+ */
+ if (!oob_required) {
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ ret = nand_read_page_op(chip, page, 0, micron->ecc.rawbuf,
+ mtd->writesize + mtd->oobsize);
+ if (ret)
+ return ret;
+
+ for (step = 0; step < chip->ecc.steps; step++) {
+ unsigned int offs, i, nbitflips = 0;
+ u8 *rawbuf, *corrbuf;
+
+ offs = step * chip->ecc.size;
+ rawbuf = micron->ecc.rawbuf + offs;
+ corrbuf = buf + offs;
+
+ for (i = 0; i < chip->ecc.size; i++)
+ nbitflips += hweight8(corrbuf[i] ^ rawbuf[i]);
+
+ offs = (step * 16) + 4;
+ rawbuf = micron->ecc.rawbuf + mtd->writesize + offs;
+ corrbuf = chip->oob_poi + offs;
+
+ for (i = 0; i < chip->ecc.bytes + 4; i++)
+ nbitflips += hweight8(corrbuf[i] ^ rawbuf[i]);
+
+ if (WARN_ON(nbitflips > chip->ecc.strength))
+ return -EINVAL;
+
+ max_bitflips = max(nbitflips, max_bitflips);
+ mtd->ecc_stats.corrected += nbitflips;
+ }
+
+ return max_bitflips;
+}
+
+static int micron_nand_on_die_ecc_status_8(struct nand_chip *chip, u8 status)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /*
+ * With 8/512 we have more information but still don't know precisely
+ * how many bit-flips were seen.
+ */
+ switch (status & NAND_ECC_STATUS_MASK) {
+ case NAND_ECC_STATUS_UNCORRECTABLE:
+ mtd->ecc_stats.failed++;
+ return 0;
+ case NAND_ECC_STATUS_1_3_CORRECTED:
+ mtd->ecc_stats.corrected += 3;
+ return 3;
+ case NAND_ECC_STATUS_4_6_CORRECTED:
+ mtd->ecc_stats.corrected += 6;
+ /* rewrite recommended */
+ return 6;
+ case NAND_ECC_STATUS_7_8_CORRECTED:
+ mtd->ecc_stats.corrected += 8;
+ /* rewrite recommended */
+ return 8;
+ default:
+ return 0;
+ }
}
static int
@@ -135,24 +313,18 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
if (ret)
goto out;
- if (status & NAND_STATUS_FAIL)
- mtd->ecc_stats.failed++;
-
- /*
- * The internal ECC doesn't tell us the number of bitflips
- * that have been corrected, but tells us if it recommends to
- * rewrite the block. If it's the case, then we pretend we had
- * a number of bitflips equal to the ECC strength, which will
- * hint the NAND core to rewrite the block.
- */
- else if (status & NAND_STATUS_WRITE_RECOMMENDED)
- max_bitflips = chip->ecc.strength;
-
ret = nand_read_data_op(chip, buf, mtd->writesize, false);
if (!ret && oob_required)
ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
false);
+ if (chip->ecc.strength == 4)
+ max_bitflips = micron_nand_on_die_ecc_status_4(chip, status,
+ buf, page,
+ oob_required);
+ else
+ max_bitflips = micron_nand_on_die_ecc_status_8(chip, status);
+
out:
micron_nand_on_die_ecc_setup(chip, false);
@@ -193,6 +365,9 @@ enum {
MICRON_ON_DIE_MANDATORY,
};
+#define MICRON_ID_INTERNAL_ECC_MASK GENMASK(1, 0)
+#define MICRON_ID_ECC_ENABLED BIT(7)
+
/*
* Try to detect if the NAND support on-die ECC. To do this, we enable
* the feature, and read back if it has been enabled as expected. We
@@ -205,42 +380,52 @@ enum {
*/
static int micron_supports_on_die_ecc(struct nand_chip *chip)
{
- u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, };
+ u8 id[5];
int ret;
- if (!chip->parameters.onfi.version)
+ if (!chip->parameters.onfi)
return MICRON_ON_DIE_UNSUPPORTED;
if (chip->bits_per_cell != 1)
return MICRON_ON_DIE_UNSUPPORTED;
+ /*
+ * We only support on-die ECC of 4/512 or 8/512
+ */
+ if (chip->ecc_strength_ds != 4 && chip->ecc_strength_ds != 8)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ /* 0x2 means on-die ECC is available. */
+ if (chip->id.len != 5 ||
+ (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
ret = micron_nand_on_die_ecc_setup(chip, true);
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
- ret = nand_get_features(chip, ONFI_FEATURE_ON_DIE_ECC, feature);
- if (ret < 0)
- return ret;
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
- if ((feature[0] & ONFI_FEATURE_ON_DIE_ECC_EN) == 0)
+ if (!(id[4] & MICRON_ID_ECC_ENABLED))
return MICRON_ON_DIE_UNSUPPORTED;
ret = micron_nand_on_die_ecc_setup(chip, false);
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
- ret = nand_get_features(chip, ONFI_FEATURE_ON_DIE_ECC, feature);
- if (ret < 0)
- return ret;
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
- if (feature[0] & ONFI_FEATURE_ON_DIE_ECC_EN)
+ if (id[4] & MICRON_ID_ECC_ENABLED)
return MICRON_ON_DIE_MANDATORY;
/*
- * Some Micron NANDs have an on-die ECC of 4/512, some other
- * 8/512. We only support the former.
+ * We only support on-die ECC of 4/512 or 8/512
*/
- if (chip->ecc_strength_ds != 4)
+ if (chip->ecc_strength_ds != 4 && chip->ecc_strength_ds != 8)
return MICRON_ON_DIE_UNSUPPORTED;
return MICRON_ON_DIE_SUPPORTED;
@@ -249,44 +434,116 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
static int micron_nand_init(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct micron_nand *micron;
int ondie;
int ret;
+ micron = kzalloc(sizeof(*micron), GFP_KERNEL);
+ if (!micron)
+ return -ENOMEM;
+
+ nand_set_manufacturer_data(chip, micron);
+
ret = micron_nand_onfi_init(chip);
if (ret)
- return ret;
+ goto err_free_manuf_data;
if (mtd->writesize == 2048)
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
ondie = micron_supports_on_die_ecc(chip);
- if (ondie == MICRON_ON_DIE_MANDATORY) {
+ if (ondie == MICRON_ON_DIE_MANDATORY &&
+ chip->ecc.mode != NAND_ECC_ON_DIE) {
pr_err("On-die ECC forcefully enabled, not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_free_manuf_data;
}
if (chip->ecc.mode == NAND_ECC_ON_DIE) {
if (ondie == MICRON_ON_DIE_UNSUPPORTED) {
pr_err("On-die ECC selected but not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_free_manuf_data;
+ }
+
+ if (ondie == MICRON_ON_DIE_MANDATORY) {
+ micron->ecc.forced = true;
+ micron->ecc.enabled = true;
+ }
+
+ /*
+ * In case of 4bit on-die ECC, we need a buffer to store a
+ * page dumped in raw mode so that we can compare its content
+ * to the same page after ECC correction happened and extract
+ * the real number of bitflips from this comparison.
+ * That's not needed for 8-bit ECC, because the status expose
+ * a better approximation of the number of bitflips in a page.
+ */
+ if (chip->ecc_strength_ds == 4) {
+ micron->ecc.rawbuf = kmalloc(mtd->writesize +
+ mtd->oobsize,
+ GFP_KERNEL);
+ if (!micron->ecc.rawbuf) {
+ ret = -ENOMEM;
+ goto err_free_manuf_data;
+ }
}
- chip->ecc.bytes = 8;
+ if (chip->ecc_strength_ds == 4)
+ mtd_set_ooblayout(mtd,
+ &micron_nand_on_die_4_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd,
+ &micron_nand_on_die_8_ooblayout_ops);
+
+ chip->ecc.bytes = chip->ecc_strength_ds * 2;
chip->ecc.size = 512;
- chip->ecc.strength = 4;
+ chip->ecc.strength = chip->ecc_strength_ds;
chip->ecc.algo = NAND_ECC_BCH;
chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
- chip->ecc.read_page_raw = nand_read_page_raw;
- chip->ecc.write_page_raw = nand_write_page_raw;
- mtd_set_ooblayout(mtd, &micron_nand_on_die_ooblayout_ops);
+ if (ondie == MICRON_ON_DIE_MANDATORY) {
+ chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
+ chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
+ } else {
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ }
}
return 0;
+
+err_free_manuf_data:
+ kfree(micron->ecc.rawbuf);
+ kfree(micron);
+
+ return ret;
+}
+
+static void micron_nand_cleanup(struct nand_chip *chip)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+
+ kfree(micron->ecc.rawbuf);
+ kfree(micron);
+}
+
+static void micron_fixup_onfi_param_page(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ /*
+ * MT29F1G08ABAFAWP-ITE:F and possibly others report 00 00 for the
+ * revision number field of the ONFI parameter page. Assume ONFI
+ * version 1.0 if the revision number is 00 00.
+ */
+ if (le16_to_cpu(p->revision) == 0)
+ p->revision = cpu_to_le16(ONFI_VERSION_1_0);
}
const struct nand_manufacturer_ops micron_nand_manuf_ops = {
.init = micron_nand_init,
+ .cleanup = micron_nand_cleanup,
+ .fixup_onfi_param_page = micron_fixup_onfi_param_page,
};
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
index 7c4e4a371bbc..ebc7b5f76f77 100644
--- a/drivers/mtd/nand/raw/nand_timings.c
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -13,6 +13,8 @@
#include <linux/export.h>
#include <linux/mtd/rawnand.h>
+#define ONFI_DYN_TIMING_MAX U16_MAX
+
static const struct nand_data_interface onfi_sdr_timings[] = {
/* Mode 0 */
{
@@ -292,6 +294,7 @@ int onfi_fill_data_interface(struct nand_chip *chip,
int timing_mode)
{
struct nand_data_interface *iface = &chip->data_interface;
+ struct onfi_params *onfi = chip->parameters.onfi;
if (type != NAND_SDR_IFACE)
return -EINVAL;
@@ -303,20 +306,35 @@ int onfi_fill_data_interface(struct nand_chip *chip,
/*
* Initialize timings that cannot be deduced from timing mode:
- * tR, tPROG, tCCS, ...
+ * tPROG, tBERS, tR and tCCS.
* These information are part of the ONFI parameter page.
*/
- if (chip->parameters.onfi.version) {
- struct nand_parameters *params = &chip->parameters;
+ if (onfi) {
+ struct nand_sdr_timings *timings = &iface->timings.sdr;
+
+ /* microseconds -> picoseconds */
+ timings->tPROG_max = 1000000ULL * onfi->tPROG;
+ timings->tBERS_max = 1000000ULL * onfi->tBERS;
+ timings->tR_max = 1000000ULL * onfi->tR;
+
+ /* nanoseconds -> picoseconds */
+ timings->tCCS_min = 1000UL * onfi->tCCS;
+ } else {
struct nand_sdr_timings *timings = &iface->timings.sdr;
+ /*
+ * For non-ONFI chips we use the highest possible value for
+ * tPROG and tBERS. tR and tCCS will take the default values
+ * precised in the ONFI specification for timing mode 0,
+ * respectively 200us and 500ns.
+ */
/* microseconds -> picoseconds */
- timings->tPROG_max = 1000000ULL * params->onfi.tPROG;
- timings->tBERS_max = 1000000ULL * params->onfi.tBERS;
- timings->tR_max = 1000000ULL * params->onfi.tR;
+ timings->tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
+ timings->tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
+ timings->tR_max = 1000000ULL * 200000000ULL;
/* nanoseconds -> picoseconds */
- timings->tCCS_min = 1000UL * params->onfi.tCCS;
+ timings->tCCS_min = 1000UL * 500000;
}
return 0;
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index f8edacde49ab..71ac034aee9c 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -2192,6 +2192,48 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
return;
}
+static int ns_attach_chip(struct nand_chip *chip)
+{
+ unsigned int eccsteps, eccbytes;
+
+ if (!bch)
+ return 0;
+
+ if (!mtd_nand_has_bch()) {
+ NS_ERR("BCH ECC support is disabled\n");
+ return -EINVAL;
+ }
+
+ /* Use 512-byte ecc blocks */
+ eccsteps = nsmtd->writesize / 512;
+ eccbytes = ((bch * 13) + 7) / 8;
+
+ /* Do not bother supporting small page devices */
+ if (nsmtd->oobsize < 64 || !eccsteps) {
+ NS_ERR("BCH not available on small page devices\n");
+ return -EINVAL;
+ }
+
+ if (((eccbytes * eccsteps) + 2) > nsmtd->oobsize) {
+ NS_ERR("Invalid BCH value %u\n", bch);
+ return -EINVAL;
+ }
+
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_BCH;
+ chip->ecc.size = 512;
+ chip->ecc.strength = bch;
+ chip->ecc.bytes = eccbytes;
+
+ NS_INFO("Using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
+
+ return 0;
+}
+
+static const struct nand_controller_ops ns_controller_ops = {
+ .attach_chip = ns_attach_chip,
+};
+
/*
* Module initialization function
*/
@@ -2276,44 +2318,10 @@ static int __init ns_init_module(void)
if ((retval = parse_gravepages()) != 0)
goto error;
- retval = nand_scan_ident(nsmtd, 1, NULL);
- if (retval) {
- NS_ERR("cannot scan NAND Simulator device\n");
- goto error;
- }
-
- if (bch) {
- unsigned int eccsteps, eccbytes;
- if (!mtd_nand_has_bch()) {
- NS_ERR("BCH ECC support is disabled\n");
- retval = -EINVAL;
- goto error;
- }
- /* use 512-byte ecc blocks */
- eccsteps = nsmtd->writesize/512;
- eccbytes = (bch*13+7)/8;
- /* do not bother supporting small page devices */
- if ((nsmtd->oobsize < 64) || !eccsteps) {
- NS_ERR("bch not available on small page devices\n");
- retval = -EINVAL;
- goto error;
- }
- if ((eccbytes*eccsteps+2) > nsmtd->oobsize) {
- NS_ERR("invalid bch value %u\n", bch);
- retval = -EINVAL;
- goto error;
- }
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_BCH;
- chip->ecc.size = 512;
- chip->ecc.strength = bch;
- chip->ecc.bytes = eccbytes;
- NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
- }
-
- retval = nand_scan_tail(nsmtd);
+ chip->dummy_controller.ops = &ns_controller_ops;
+ retval = nand_scan(nsmtd, 1);
if (retval) {
- NS_ERR("can't register NAND Simulator\n");
+ NS_ERR("Could not scan NAND Simulator device\n");
goto error;
}
@@ -2337,7 +2345,7 @@ static int __init ns_init_module(void)
if ((retval = init_nandsim(nsmtd)) != 0)
goto err_exit;
- if ((retval = chip->scan_bbt(nsmtd)) != 0)
+ if ((retval = nand_create_bbt(chip)) != 0)
goto err_exit;
if ((retval = parse_badblocks(nand, nsmtd)) != 0)
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index d8a806894937..540fa1a0ea24 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -39,7 +39,7 @@ struct ndfc_controller {
void __iomem *ndfcbase;
struct nand_chip chip;
int chip_select;
- struct nand_hw_control ndfc_control;
+ struct nand_controller ndfc_control;
};
static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
@@ -218,7 +218,7 @@ static int ndfc_probe(struct platform_device *ofdev)
ndfc = &ndfc_ctrl[cs];
ndfc->chip_select = cs;
- nand_hw_control_init(&ndfc->ndfc_control);
+ nand_controller_init(&ndfc->ndfc_control);
ndfc->ofdev = ofdev;
dev_set_drvdata(&ofdev->dev, ndfc);
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index e50c64adc3c8..4546ac0bed4a 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -144,12 +144,6 @@ static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
0xac, 0x6b, 0xff, 0x99, 0x7b};
static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
-/* Shared among all NAND instances to synchronize access to the ECC Engine */
-static struct nand_hw_control omap_gpmc_controller = {
- .lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock),
- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq),
-};
-
struct omap_nand_info {
struct nand_chip nand;
struct platform_device *pdev;
@@ -1915,106 +1909,26 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
.free = omap_sw_ooblayout_free,
};
-static int omap_nand_probe(struct platform_device *pdev)
+static int omap_nand_attach_chip(struct nand_chip *chip)
{
- struct omap_nand_info *info;
- struct mtd_info *mtd;
- struct nand_chip *nand_chip;
- int err;
- dma_cap_mask_t mask;
- struct resource *res;
- struct device *dev = &pdev->dev;
- int min_oobbytes = BADBLOCK_MARKER_LENGTH;
- int oobbytes_per_step;
-
- info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->pdev = pdev;
-
- err = omap_get_dt_info(dev, info);
- if (err)
- return err;
-
- info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
- if (!info->ops) {
- dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
- return -ENODEV;
- }
-
- nand_chip = &info->nand;
- mtd = nand_to_mtd(nand_chip);
- mtd->dev.parent = &pdev->dev;
- nand_chip->ecc.priv = NULL;
- nand_set_flash_node(nand_chip, dev->of_node);
-
- if (!mtd->name) {
- mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "omap2-nand.%d", info->gpmc_cs);
- if (!mtd->name) {
- dev_err(&pdev->dev, "Failed to set MTD name\n");
- return -ENOMEM;
- }
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(nand_chip->IO_ADDR_R))
- return PTR_ERR(nand_chip->IO_ADDR_R);
-
- info->phys_base = res->start;
-
- nand_chip->controller = &omap_gpmc_controller;
-
- nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
- nand_chip->cmd_ctrl = omap_hwcontrol;
-
- info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
- GPIOD_IN);
- if (IS_ERR(info->ready_gpiod)) {
- dev_err(dev, "failed to get ready gpio\n");
- return PTR_ERR(info->ready_gpiod);
- }
-
- /*
- * If RDY/BSY line is connected to OMAP then use the omap ready
- * function and the generic nand_wait function which reads the status
- * register after monitoring the RDY/BSY line. Otherwise use a standard
- * chip delay which is slightly more than tR (AC Timing) of the NAND
- * device and read status register until you get a failure or success
- */
- if (info->ready_gpiod) {
- nand_chip->dev_ready = omap_dev_ready;
- nand_chip->chip_delay = 0;
- } else {
- nand_chip->waitfunc = omap_wait;
- nand_chip->chip_delay = 50;
- }
-
- if (info->flash_bbt)
- nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
-
- /* scan NAND device connected to chip controller */
- nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
- err = nand_scan_ident(mtd, 1, NULL);
- if (err) {
- dev_err(&info->pdev->dev,
- "scan failed, may be bus-width mismatch\n");
- goto return_error;
- }
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct device *dev = &info->pdev->dev;
+ int min_oobbytes = BADBLOCK_MARKER_LENGTH;
+ int oobbytes_per_step;
+ dma_cap_mask_t mask;
+ int err;
- if (nand_chip->bbt_options & NAND_BBT_USE_FLASH)
- nand_chip->bbt_options |= NAND_BBT_NO_OOB;
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
else
- nand_chip->options |= NAND_SKIP_BBTSCAN;
+ chip->options |= NAND_SKIP_BBTSCAN;
- /* re-populate low-level callbacks based on xfer modes */
+ /* Re-populate low-level callbacks based on xfer modes */
switch (info->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
- nand_chip->read_buf = omap_read_buf_pref;
- nand_chip->write_buf = omap_write_buf_pref;
+ chip->read_buf = omap_read_buf_pref;
+ chip->write_buf = omap_write_buf_pref;
break;
case NAND_OMAP_POLLED:
@@ -2024,12 +1938,11 @@ static int omap_nand_probe(struct platform_device *pdev)
case NAND_OMAP_PREFETCH_DMA:
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- info->dma = dma_request_chan(pdev->dev.parent, "rxtx");
+ info->dma = dma_request_chan(dev, "rxtx");
if (IS_ERR(info->dma)) {
- dev_err(&pdev->dev, "DMA engine request failed\n");
- err = PTR_ERR(info->dma);
- goto return_error;
+ dev_err(dev, "DMA engine request failed\n");
+ return PTR_ERR(info->dma);
} else {
struct dma_slave_config cfg;
@@ -2042,222 +1955,306 @@ static int omap_nand_probe(struct platform_device *pdev)
cfg.dst_maxburst = 16;
err = dmaengine_slave_config(info->dma, &cfg);
if (err) {
- dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
+ dev_err(dev,
+ "DMA engine slave config failed: %d\n",
err);
- goto return_error;
+ return err;
}
- nand_chip->read_buf = omap_read_buf_dma_pref;
- nand_chip->write_buf = omap_write_buf_dma_pref;
+ chip->read_buf = omap_read_buf_dma_pref;
+ chip->write_buf = omap_write_buf_dma_pref;
}
break;
case NAND_OMAP_PREFETCH_IRQ:
- info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
+ info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
if (info->gpmc_irq_fifo <= 0) {
- dev_err(&pdev->dev, "error getting fifo irq\n");
- err = -ENODEV;
- goto return_error;
+ dev_err(dev, "Error getting fifo IRQ\n");
+ return -ENODEV;
}
- err = devm_request_irq(&pdev->dev, info->gpmc_irq_fifo,
- omap_nand_irq, IRQF_SHARED,
- "gpmc-nand-fifo", info);
+ err = devm_request_irq(dev, info->gpmc_irq_fifo,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-fifo", info);
if (err) {
- dev_err(&pdev->dev, "requesting irq(%d) error:%d",
- info->gpmc_irq_fifo, err);
+ dev_err(dev, "Requesting IRQ %d, error %d\n",
+ info->gpmc_irq_fifo, err);
info->gpmc_irq_fifo = 0;
- goto return_error;
+ return err;
}
- info->gpmc_irq_count = platform_get_irq(pdev, 1);
+ info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
if (info->gpmc_irq_count <= 0) {
- dev_err(&pdev->dev, "error getting count irq\n");
- err = -ENODEV;
- goto return_error;
+ dev_err(dev, "Error getting IRQ count\n");
+ return -ENODEV;
}
- err = devm_request_irq(&pdev->dev, info->gpmc_irq_count,
- omap_nand_irq, IRQF_SHARED,
- "gpmc-nand-count", info);
+ err = devm_request_irq(dev, info->gpmc_irq_count,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-count", info);
if (err) {
- dev_err(&pdev->dev, "requesting irq(%d) error:%d",
- info->gpmc_irq_count, err);
+ dev_err(dev, "Requesting IRQ %d, error %d\n",
+ info->gpmc_irq_count, err);
info->gpmc_irq_count = 0;
- goto return_error;
+ return err;
}
- nand_chip->read_buf = omap_read_buf_irq_pref;
- nand_chip->write_buf = omap_write_buf_irq_pref;
+ chip->read_buf = omap_read_buf_irq_pref;
+ chip->write_buf = omap_write_buf_irq_pref;
break;
default:
- dev_err(&pdev->dev,
- "xfer_type(%d) not supported!\n", info->xfer_type);
- err = -EINVAL;
- goto return_error;
+ dev_err(dev, "xfer_type %d not supported!\n", info->xfer_type);
+ return -EINVAL;
}
- if (!omap2_nand_ecc_check(info)) {
- err = -EINVAL;
- goto return_error;
- }
+ if (!omap2_nand_ecc_check(info))
+ return -EINVAL;
/*
* Bail out earlier to let NAND_ECC_SOFT code create its own
* ooblayout instead of using ours.
*/
if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
- nand_chip->ecc.mode = NAND_ECC_SOFT;
- nand_chip->ecc.algo = NAND_ECC_HAMMING;
- goto scan_tail;
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ return 0;
}
- /* populate MTD interface based on ECC scheme */
+ /* Populate MTD interface based on ECC scheme */
switch (info->ecc_opt) {
case OMAP_ECC_HAM1_CODE_HW:
- pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.bytes = 3;
- nand_chip->ecc.size = 512;
- nand_chip->ecc.strength = 1;
- nand_chip->ecc.calculate = omap_calculate_ecc;
- nand_chip->ecc.hwctl = omap_enable_hwecc;
- nand_chip->ecc.correct = omap_correct_data;
+ dev_info(dev, "nand: using OMAP_ECC_HAM1_CODE_HW\n");
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.bytes = 3;
+ chip->ecc.size = 512;
+ chip->ecc.strength = 1;
+ chip->ecc.calculate = omap_calculate_ecc;
+ chip->ecc.hwctl = omap_enable_hwecc;
+ chip->ecc.correct = omap_correct_data;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
- oobbytes_per_step = nand_chip->ecc.bytes;
+ oobbytes_per_step = chip->ecc.bytes;
- if (!(nand_chip->options & NAND_BUSWIDTH_16))
- min_oobbytes = 1;
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ min_oobbytes = 1;
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = 512;
- nand_chip->ecc.bytes = 7;
- nand_chip->ecc.strength = 4;
- nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
- nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 7;
+ chip->ecc.strength = 4;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = nand_bch_correct_data;
+ chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
- oobbytes_per_step = nand_chip->ecc.bytes + 1;
- /* software bch library is used for locating errors */
- nand_chip->ecc.priv = nand_bch_init(mtd);
- if (!nand_chip->ecc.priv) {
- dev_err(&info->pdev->dev, "unable to use BCH library\n");
- err = -EINVAL;
- goto return_error;
+ oobbytes_per_step = chip->ecc.bytes + 1;
+ /* Software BCH library is used for locating errors */
+ chip->ecc.priv = nand_bch_init(mtd);
+ if (!chip->ecc.priv) {
+ dev_err(dev, "Unable to use BCH library\n");
+ return -EINVAL;
}
break;
case OMAP_ECC_BCH4_CODE_HW:
pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = 512;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
/* 14th bit is kept reserved for ROM-code compatibility */
- nand_chip->ecc.bytes = 7 + 1;
- nand_chip->ecc.strength = 4;
- nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
- nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.read_page = omap_read_page_bch;
- nand_chip->ecc.write_page = omap_write_page_bch;
- nand_chip->ecc.write_subpage = omap_write_subpage_bch;
+ chip->ecc.bytes = 7 + 1;
+ chip->ecc.strength = 4;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = omap_elm_correct_data;
+ chip->ecc.read_page = omap_read_page_bch;
+ chip->ecc.write_page = omap_write_page_bch;
+ chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
- oobbytes_per_step = nand_chip->ecc.bytes;
+ oobbytes_per_step = chip->ecc.bytes;
err = elm_config(info->elm_dev, BCH4_ECC,
- mtd->writesize / nand_chip->ecc.size,
- nand_chip->ecc.size, nand_chip->ecc.bytes);
+ mtd->writesize / chip->ecc.size,
+ chip->ecc.size, chip->ecc.bytes);
if (err < 0)
- goto return_error;
+ return err;
break;
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = 512;
- nand_chip->ecc.bytes = 13;
- nand_chip->ecc.strength = 8;
- nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
- nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 13;
+ chip->ecc.strength = 8;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = nand_bch_correct_data;
+ chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
- oobbytes_per_step = nand_chip->ecc.bytes + 1;
- /* software bch library is used for locating errors */
- nand_chip->ecc.priv = nand_bch_init(mtd);
- if (!nand_chip->ecc.priv) {
- dev_err(&info->pdev->dev, "unable to use BCH library\n");
- err = -EINVAL;
- goto return_error;
+ oobbytes_per_step = chip->ecc.bytes + 1;
+ /* Software BCH library is used for locating errors */
+ chip->ecc.priv = nand_bch_init(mtd);
+ if (!chip->ecc.priv) {
+ dev_err(dev, "unable to use BCH library\n");
+ return -EINVAL;
}
break;
case OMAP_ECC_BCH8_CODE_HW:
pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = 512;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
/* 14th bit is kept reserved for ROM-code compatibility */
- nand_chip->ecc.bytes = 13 + 1;
- nand_chip->ecc.strength = 8;
- nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
- nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.read_page = omap_read_page_bch;
- nand_chip->ecc.write_page = omap_write_page_bch;
- nand_chip->ecc.write_subpage = omap_write_subpage_bch;
+ chip->ecc.bytes = 13 + 1;
+ chip->ecc.strength = 8;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = omap_elm_correct_data;
+ chip->ecc.read_page = omap_read_page_bch;
+ chip->ecc.write_page = omap_write_page_bch;
+ chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
- oobbytes_per_step = nand_chip->ecc.bytes;
+ oobbytes_per_step = chip->ecc.bytes;
err = elm_config(info->elm_dev, BCH8_ECC,
- mtd->writesize / nand_chip->ecc.size,
- nand_chip->ecc.size, nand_chip->ecc.bytes);
+ mtd->writesize / chip->ecc.size,
+ chip->ecc.size, chip->ecc.bytes);
if (err < 0)
- goto return_error;
+ return err;
break;
case OMAP_ECC_BCH16_CODE_HW:
- pr_info("using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = 512;
- nand_chip->ecc.bytes = 26;
- nand_chip->ecc.strength = 16;
- nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
- nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.read_page = omap_read_page_bch;
- nand_chip->ecc.write_page = omap_write_page_bch;
- nand_chip->ecc.write_subpage = omap_write_subpage_bch;
+ pr_info("Using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 26;
+ chip->ecc.strength = 16;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = omap_elm_correct_data;
+ chip->ecc.read_page = omap_read_page_bch;
+ chip->ecc.write_page = omap_write_page_bch;
+ chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
- oobbytes_per_step = nand_chip->ecc.bytes;
+ oobbytes_per_step = chip->ecc.bytes;
err = elm_config(info->elm_dev, BCH16_ECC,
- mtd->writesize / nand_chip->ecc.size,
- nand_chip->ecc.size, nand_chip->ecc.bytes);
+ mtd->writesize / chip->ecc.size,
+ chip->ecc.size, chip->ecc.bytes);
if (err < 0)
- goto return_error;
+ return err;
break;
default:
- dev_err(&info->pdev->dev, "invalid or unsupported ECC scheme\n");
- err = -EINVAL;
- goto return_error;
+ dev_err(dev, "Invalid or unsupported ECC scheme\n");
+ return -EINVAL;
}
- /* check if NAND device's OOB is enough to store ECC signatures */
+ /* Check if NAND device's OOB is enough to store ECC signatures */
min_oobbytes += (oobbytes_per_step *
- (mtd->writesize / nand_chip->ecc.size));
+ (mtd->writesize / chip->ecc.size));
if (mtd->oobsize < min_oobbytes) {
- dev_err(&info->pdev->dev,
- "not enough OOB bytes required = %d, available=%d\n",
+ dev_err(dev,
+ "Not enough OOB bytes: required = %d, available=%d\n",
min_oobbytes, mtd->oobsize);
- err = -EINVAL;
- goto return_error;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops omap_nand_controller_ops = {
+ .attach_chip = omap_nand_attach_chip,
+};
+
+/* Shared among all NAND instances to synchronize access to the ECC Engine */
+static struct nand_controller omap_gpmc_controller = {
+ .lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock),
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq),
+ .ops = &omap_nand_controller_ops,
+};
+
+static int omap_nand_probe(struct platform_device *pdev)
+{
+ struct omap_nand_info *info;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int err;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pdev = pdev;
+
+ err = omap_get_dt_info(dev, info);
+ if (err)
+ return err;
+
+ info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
+ if (!info->ops) {
+ dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
+ return -ENODEV;
}
-scan_tail:
- /* second phase scan */
- err = nand_scan_tail(mtd);
+ nand_chip = &info->nand;
+ mtd = nand_to_mtd(nand_chip);
+ mtd->dev.parent = &pdev->dev;
+ nand_chip->ecc.priv = NULL;
+ nand_set_flash_node(nand_chip, dev->of_node);
+
+ if (!mtd->name) {
+ mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "omap2-nand.%d", info->gpmc_cs);
+ if (!mtd->name) {
+ dev_err(&pdev->dev, "Failed to set MTD name\n");
+ return -ENOMEM;
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nand_chip->IO_ADDR_R))
+ return PTR_ERR(nand_chip->IO_ADDR_R);
+
+ info->phys_base = res->start;
+
+ nand_chip->controller = &omap_gpmc_controller;
+
+ nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
+ nand_chip->cmd_ctrl = omap_hwcontrol;
+
+ info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
+ GPIOD_IN);
+ if (IS_ERR(info->ready_gpiod)) {
+ dev_err(dev, "failed to get ready gpio\n");
+ return PTR_ERR(info->ready_gpiod);
+ }
+
+ /*
+ * If RDY/BSY line is connected to OMAP then use the omap ready
+ * function and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line. Otherwise use a standard
+ * chip delay which is slightly more than tR (AC Timing) of the NAND
+ * device and read status register until you get a failure or success
+ */
+ if (info->ready_gpiod) {
+ nand_chip->dev_ready = omap_dev_ready;
+ nand_chip->chip_delay = 0;
+ } else {
+ nand_chip->waitfunc = omap_wait;
+ nand_chip->chip_delay = 50;
+ }
+
+ if (info->flash_bbt)
+ nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ /* scan NAND device connected to chip controller */
+ nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
+
+ err = nand_scan(mtd, 1);
if (err)
goto return_error;
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index 7825fd3ce66b..52d435285a3f 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -18,7 +18,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <linux/platform_data/mtd-orion_nand.h>
struct orion_nand_info {
@@ -52,7 +52,7 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct nand_chip *chip = mtd_to_nand(mtd);
void __iomem *io_base = chip->IO_ADDR_R;
-#if __LINUX_ARM_ARCH__ >= 5
+#if defined(__LINUX_ARM_ARCH__) && __LINUX_ARM_ARCH__ >= 5
uint64_t *buf64;
#endif
int i = 0;
@@ -61,7 +61,7 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
*buf++ = readb(io_base);
len--;
}
-#if __LINUX_ARM_ARCH__ >= 5
+#if defined(__LINUX_ARM_ARCH__) && __LINUX_ARM_ARCH__ >= 5
buf64 = (uint64_t *)buf;
while (i < len/8) {
/*
@@ -153,9 +153,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
if (board->width == 16)
nc->options |= NAND_BUSWIDTH_16;
- if (board->dev_ready)
- nc->dev_ready = board->dev_ready;
-
platform_set_drvdata(pdev, info);
/* Not all platforms can gate the clock, so it is not
diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c
index d649d5944826..01b00bb69c1e 100644
--- a/drivers/mtd/nand/raw/oxnas_nand.c
+++ b/drivers/mtd/nand/raw/oxnas_nand.c
@@ -32,7 +32,7 @@
#define OXNAS_NAND_MAX_CHIPS 1
struct oxnas_nand_ctrl {
- struct nand_hw_control base;
+ struct nand_controller base;
void __iomem *io_base;
struct clk *clk;
struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
@@ -96,7 +96,7 @@ static int oxnas_nand_probe(struct platform_device *pdev)
if (!oxnas)
return -ENOMEM;
- nand_hw_control_init(&oxnas->base);
+ nand_controller_init(&oxnas->base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
oxnas->io_base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
index 925a1323604d..222626df4b96 100644
--- a/drivers/mtd/nand/raw/plat_nand.c
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -67,12 +67,10 @@ static int plat_nand_probe(struct platform_device *pdev)
data->chip.select_chip = pdata->ctrl.select_chip;
data->chip.write_buf = pdata->ctrl.write_buf;
data->chip.read_buf = pdata->ctrl.read_buf;
- data->chip.read_byte = pdata->ctrl.read_byte;
data->chip.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
data->chip.bbt_options |= pdata->chip.bbt_options;
- data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
data->chip.ecc.mode = NAND_ECC_SOFT;
data->chip.ecc.algo = NAND_ECC_HAMMING;
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 6a5519f0ff25..d1d470bb32e4 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -213,6 +213,8 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
#define QPIC_PER_CW_CMD_SGL 32
#define QPIC_PER_CW_DATA_SGL 8
+#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+
/*
* Flags used in DMA descriptor preparation helper functions
* (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
@@ -245,6 +247,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
* @tx_sgl_start - start index in data sgl for tx.
* @rx_sgl_pos - current index in data sgl for rx.
* @rx_sgl_start - start index in data sgl for rx.
+ * @wait_second_completion - wait for second DMA desc completion before making
+ * the NAND transfer completion.
+ * @txn_done - completion for NAND transfer.
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
*/
struct bam_transaction {
struct bam_cmd_element *bam_ce;
@@ -258,6 +265,10 @@ struct bam_transaction {
u32 tx_sgl_start;
u32 rx_sgl_pos;
u32 rx_sgl_start;
+ bool wait_second_completion;
+ struct completion txn_done;
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
};
/*
@@ -354,7 +365,7 @@ struct nandc_regs {
* from all connected NAND devices pagesize
*/
struct qcom_nand_controller {
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct list_head host_list;
struct device *dev;
@@ -504,6 +515,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn->data_sgl = bam_txn_buf;
+ init_completion(&bam_txn->txn_done);
+
return bam_txn;
}
@@ -523,11 +536,33 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn->tx_sgl_start = 0;
bam_txn->rx_sgl_pos = 0;
bam_txn->rx_sgl_start = 0;
+ bam_txn->last_data_desc = NULL;
+ bam_txn->wait_second_completion = false;
sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
QPIC_PER_CW_CMD_SGL);
sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
QPIC_PER_CW_DATA_SGL);
+
+ reinit_completion(&bam_txn->txn_done);
+}
+
+/* Callback for DMA descriptor completion */
+static void qpic_bam_dma_done(void *data)
+{
+ struct bam_transaction *bam_txn = data;
+
+ /*
+ * In case of data transfer with NAND, 2 callbacks will be generated.
+ * One for command channel and another one for data channel.
+ * If current transaction has data descriptors
+ * (i.e. wait_second_completion is true), then set this to false
+ * and wait for second DMA descriptor completion.
+ */
+ if (bam_txn->wait_second_completion)
+ bam_txn->wait_second_completion = false;
+ else
+ complete(&bam_txn->txn_done);
}
static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
@@ -756,6 +791,12 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
desc->dma_desc = dma_desc;
+ /* update last data/command descriptor */
+ if (chan == nandc->cmd_chan)
+ bam_txn->last_cmd_desc = dma_desc;
+ else
+ bam_txn->last_data_desc = dma_desc;
+
list_add_tail(&desc->node, &nandc->desc_list);
return 0;
@@ -1055,7 +1096,8 @@ static void config_nand_page_read(struct qcom_nand_controller *nandc)
* Helper to prepare DMA descriptors for configuring registers
* before reading each codeword in NAND page.
*/
-static void config_nand_cw_read(struct qcom_nand_controller *nandc)
+static void
+config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
{
if (nandc->props->is_bam)
write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
@@ -1064,19 +1106,25 @@ static void config_nand_cw_read(struct qcom_nand_controller *nandc)
write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
- read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
- NAND_BAM_NEXT_SGL);
+ if (use_ecc) {
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+ read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+ } else {
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ }
}
/*
* Helper to prepare dma descriptors to configure registers needed for reading a
* single codeword in page
*/
-static void config_nand_single_cw_page_read(struct qcom_nand_controller *nandc)
+static void
+config_nand_single_cw_page_read(struct qcom_nand_controller *nandc,
+ bool use_ecc)
{
config_nand_page_read(nandc);
- config_nand_cw_read(nandc);
+ config_nand_cw_read(nandc, use_ecc);
}
/*
@@ -1157,7 +1205,7 @@ static int nandc_param(struct qcom_nand_host *host)
nandc->buf_count = 512;
memset(nandc->data_buffer, 0xff, nandc->buf_count);
- config_nand_single_cw_page_read(nandc);
+ config_nand_single_cw_page_read(nandc, false);
read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
nandc->buf_count, 0);
@@ -1273,10 +1321,20 @@ static int submit_descs(struct qcom_nand_controller *nandc)
cookie = dmaengine_submit(desc->dma_desc);
if (nandc->props->is_bam) {
+ bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
+ bam_txn->last_cmd_desc->callback_param = bam_txn;
+ if (bam_txn->last_data_desc) {
+ bam_txn->last_data_desc->callback = qpic_bam_dma_done;
+ bam_txn->last_data_desc->callback_param = bam_txn;
+ bam_txn->wait_second_completion = true;
+ }
+
dma_async_issue_pending(nandc->tx_chan);
dma_async_issue_pending(nandc->rx_chan);
+ dma_async_issue_pending(nandc->cmd_chan);
- if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
+ if (!wait_for_completion_timeout(&bam_txn->txn_done,
+ QPIC_NAND_COMPLETION_TIMEOUT))
return -ETIMEDOUT;
} else {
if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
@@ -1512,20 +1570,180 @@ struct read_stats {
__le32 erased_cw;
};
+/* reads back FLASH_STATUS register set by the controller */
+static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int i;
+
+ for (i = 0; i < cw_cnt; i++) {
+ u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* performs raw read for one codeword */
+static int
+qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *data_buf, u8 *oob_buf, int page, int cw)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ host->use_ecc = false;
+
+ clear_bam_transaction(nandc);
+ set_address(host, host->cw_size * cw, page);
+ update_rw_regs(host, 1, true);
+ config_nand_page_read(nandc);
+
+ data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
+ oob_size1 = host->bbm_size;
+
+ if (cw == (ecc->steps - 1)) {
+ data_size2 = ecc->size - data_size1 -
+ ((ecc->steps - 1) * 4);
+ oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size2 = host->cw_data - data_size1;
+ oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ if (nandc->props->is_bam) {
+ nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
+ read_loc += data_size1;
+
+ nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
+ read_loc += oob_size1;
+
+ nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
+ read_loc += data_size2;
+
+ nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
+ }
+
+ config_nand_cw_read(nandc, false);
+
+ read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+ reg_off += data_size1;
+
+ read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+ reg_off += oob_size1;
+
+ read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+ reg_off += data_size2;
+
+ read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+
+ ret = submit_descs(nandc);
+ free_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
+ return ret;
+ }
+
+ return check_flash_errors(host, 1);
+}
+
+/*
+ * Bitflips can happen in erased codewords also so this function counts the
+ * number of 0 in each CW for which ECC engine returns the uncorrectable
+ * error. The page will be assumed as erased if this count is less than or
+ * equal to the ecc->strength for each CW.
+ *
+ * 1. Both DATA and OOB need to be checked for number of 0. The
+ * top-level API can be called with only data buf or OOB buf so use
+ * chip->data_buf if data buf is null and chip->oob_poi if oob buf
+ * is null for copying the raw bytes.
+ * 2. Perform raw read for all the CW which has uncorrectable errors.
+ * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
+ * The BBM and spare bytes bit flip won’t affect the ECC so don’t check
+ * the number of bitflips in this area.
+ */
+static int
+check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *oob_buf, unsigned long uncorrectable_cws,
+ int page, unsigned int max_bitflips)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *cw_data_buf, *cw_oob_buf;
+ int cw, data_size, oob_size, ret = 0;
+
+ if (!data_buf) {
+ data_buf = chip->data_buf;
+ chip->pagebuf = -1;
+ }
+
+ if (!oob_buf) {
+ oob_buf = chip->oob_poi;
+ chip->pagebuf = -1;
+ }
+
+ for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
+ if (cw == (ecc->steps - 1)) {
+ data_size = ecc->size - ((ecc->steps - 1) * 4);
+ oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
+ } else {
+ data_size = host->cw_data;
+ oob_size = host->ecc_bytes_hw;
+ }
+
+ /* determine starting buffer address for current CW */
+ cw_data_buf = data_buf + (cw * host->cw_data);
+ cw_oob_buf = oob_buf + (cw * ecc->bytes);
+
+ ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
+ cw_oob_buf, page, cw);
+ if (ret)
+ return ret;
+
+ /*
+ * make sure it isn't an erased page reported
+ * as not-erased by HW because of a few bitflips
+ */
+ ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
+ cw_oob_buf + host->bbm_size,
+ oob_size, NULL,
+ 0, ecc->strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ }
+ }
+
+ return max_bitflips;
+}
+
/*
* reads back status registers set by the controller to notify page read
* errors. this is equivalent to what 'ecc->correct()' would do.
*/
static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
- u8 *oob_buf)
+ u8 *oob_buf, int page)
{
struct nand_chip *chip = &host->chip;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- unsigned int max_bitflips = 0;
+ unsigned int max_bitflips = 0, uncorrectable_cws = 0;
struct read_stats *buf;
+ bool flash_op_err = false, erased;
int i;
+ u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
buf = (struct read_stats *)nandc->reg_read_buf;
nandc_read_buffer_sync(nandc, true);
@@ -1546,48 +1764,49 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
buffer = le32_to_cpu(buf->buffer);
erased_cw = le32_to_cpu(buf->erased_cw);
- if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
- bool erased;
-
- /* ignore erased codeword errors */
+ /*
+ * Check ECC failure for each codeword. ECC failure can
+ * happen in either of the following conditions
+ * 1. If number of bitflips are greater than ECC engine
+ * capability.
+ * 2. If this codeword contains all 0xff for which erased
+ * codeword detection check will be done.
+ */
+ if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
+ /*
+ * For BCH ECC, ignore erased codeword errors, if
+ * ERASED_CW bits are set.
+ */
if (host->bch_enabled) {
erased = (erased_cw & ERASED_CW) == ERASED_CW ?
true : false;
- } else {
+ /*
+ * For RS ECC, HW reports the erased CW by placing
+ * special characters at certain offsets in the buffer.
+ * These special characters will be valid only if
+ * complete page is read i.e. data_buf is not NULL.
+ */
+ } else if (data_buf) {
erased = erased_chunk_check_and_fixup(data_buf,
data_len);
+ } else {
+ erased = false;
}
- if (erased) {
- data_buf += data_len;
- if (oob_buf)
- oob_buf += oob_len + ecc->bytes;
- continue;
- }
-
- if (buffer & BS_UNCORRECTABLE_BIT) {
- int ret, ecclen, extraooblen;
- void *eccbuf;
-
- eccbuf = oob_buf ? oob_buf + oob_len : NULL;
- ecclen = oob_buf ? host->ecc_bytes_hw : 0;
- extraooblen = oob_buf ? oob_len : 0;
-
- /*
- * make sure it isn't an erased page reported
- * as not-erased by HW because of a few bitflips
- */
- ret = nand_check_erased_ecc_chunk(data_buf,
- data_len, eccbuf, ecclen, oob_buf,
- extraooblen, ecc->strength);
- if (ret < 0) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += ret;
- max_bitflips =
- max_t(unsigned int, max_bitflips, ret);
- }
- }
+ if (!erased)
+ uncorrectable_cws |= BIT(i);
+ /*
+ * Check if MPU or any other operational error (timeout,
+ * device failure, etc.) happened for this codeword and
+ * make flash_op_err true. If flash_op_err is set, then
+ * EIO will be returned for page read.
+ */
+ } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
+ flash_op_err = true;
+ /*
+ * No ECC or operational errors happened. Check the number of
+ * bits corrected and update the ecc_stats.corrected.
+ */
} else {
unsigned int stat;
@@ -1596,12 +1815,21 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
max_bitflips = max(max_bitflips, stat);
}
- data_buf += data_len;
+ if (data_buf)
+ data_buf += data_len;
if (oob_buf)
oob_buf += oob_len + ecc->bytes;
}
- return max_bitflips;
+ if (flash_op_err)
+ return -EIO;
+
+ if (!uncorrectable_cws)
+ return max_bitflips;
+
+ return check_for_erased_page(host, data_buf_start, oob_buf_start,
+ uncorrectable_cws, page,
+ max_bitflips);
}
/*
@@ -1609,11 +1837,12 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
* ecc->read_oob()
*/
static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
- u8 *oob_buf)
+ u8 *oob_buf, int page)
{
struct nand_chip *chip = &host->chip;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
int i, ret;
config_nand_page_read(nandc);
@@ -1644,7 +1873,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
}
}
- config_nand_cw_read(nandc);
+ config_nand_cw_read(nandc, true);
if (data_buf)
read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
@@ -1674,12 +1903,14 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
}
ret = submit_descs(nandc);
- if (ret)
- dev_err(nandc->dev, "failure to read page/oob\n");
-
free_descs(nandc);
- return ret;
+ if (ret) {
+ dev_err(nandc->dev, "failure to read page/oob\n");
+ return ret;
+ }
+
+ return parse_read_errors(host, data_buf_start, oob_buf_start, page);
}
/*
@@ -1704,7 +1935,7 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
set_address(host, host->cw_size * (ecc->steps - 1), page);
update_rw_regs(host, 1, true);
- config_nand_single_cw_page_read(nandc);
+ config_nand_single_cw_page_read(nandc, host->use_ecc);
read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
@@ -1724,20 +1955,14 @@ static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
u8 *data_buf, *oob_buf = NULL;
- int ret;
nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
clear_bam_transaction(nandc);
- ret = read_page_ecc(host, data_buf, oob_buf);
- if (ret) {
- dev_err(nandc->dev, "failure to read page\n");
- return ret;
- }
- return parse_read_errors(host, data_buf, oob_buf);
+ return read_page_ecc(host, data_buf, oob_buf, page);
}
/* implements ecc->read_page_raw() */
@@ -1746,77 +1971,20 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
int oob_required, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- u8 *data_buf, *oob_buf;
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int i, ret;
- int read_loc;
-
- nand_read_page_op(chip, page, 0, NULL, 0);
- data_buf = buf;
- oob_buf = chip->oob_poi;
-
- host->use_ecc = false;
+ int cw, ret;
+ u8 *data_buf = buf, *oob_buf = chip->oob_poi;
- clear_bam_transaction(nandc);
- update_rw_regs(host, ecc->steps, true);
- config_nand_page_read(nandc);
-
- for (i = 0; i < ecc->steps; i++) {
- int data_size1, data_size2, oob_size1, oob_size2;
- int reg_off = FLASH_BUF_ACC;
-
- data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
- oob_size1 = host->bbm_size;
-
- if (i == (ecc->steps - 1)) {
- data_size2 = ecc->size - data_size1 -
- ((ecc->steps - 1) << 2);
- oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
- host->spare_bytes;
- } else {
- data_size2 = host->cw_data - data_size1;
- oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
- }
-
- if (nandc->props->is_bam) {
- read_loc = 0;
- nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
- read_loc += data_size1;
-
- nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
- read_loc += oob_size1;
-
- nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
- read_loc += data_size2;
-
- nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
- }
-
- config_nand_cw_read(nandc);
-
- read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
- reg_off += data_size1;
- data_buf += data_size1;
-
- read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
- reg_off += oob_size1;
- oob_buf += oob_size1;
-
- read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
- reg_off += data_size2;
- data_buf += data_size2;
+ for (cw = 0; cw < ecc->steps; cw++) {
+ ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
+ page, cw);
+ if (ret)
+ return ret;
- read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
- oob_buf += oob_size2;
+ data_buf += host->cw_data;
+ oob_buf += ecc->bytes;
}
- ret = submit_descs(nandc);
- if (ret)
- dev_err(nandc->dev, "failure to read raw page\n");
-
- free_descs(nandc);
-
return 0;
}
@@ -1827,7 +1995,6 @@ static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int ret;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -1836,11 +2003,7 @@ static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
set_address(host, 0, page);
update_rw_regs(host, ecc->steps, true);
- ret = read_page_ecc(host, NULL, chip->oob_poi);
- if (ret)
- dev_err(nandc->dev, "failure to read oob\n");
-
- return ret;
+ return read_page_ecc(host, NULL, chip->oob_poi, page);
}
/* implements ecc->write_page() */
@@ -1988,11 +2151,9 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
/*
* implements ecc->write_oob()
*
- * the NAND controller cannot write only data or only oob within a codeword,
- * since ecc is calculated for the combined codeword. we first copy the
- * entire contents for the last codeword(data + oob), replace the old oob
- * with the new one in chip->oob_poi, and then write the entire codeword.
- * this read-copy-write operation results in a slight performance loss.
+ * the NAND controller cannot write only data or only OOB within a codeword
+ * since ECC is calculated for the combined codeword. So update the OOB from
+ * chip->oob_poi, and pad the data area with OxFF before writing.
*/
static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
@@ -2005,19 +2166,13 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int ret;
host->use_ecc = true;
-
- clear_bam_transaction(nandc);
- ret = copy_last_cw(host, page);
- if (ret)
- return ret;
-
- clear_read_regs(nandc);
clear_bam_transaction(nandc);
/* calculate the data and oob size for the last codeword/step */
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = mtd->oobavail;
+ memset(nandc->data_buffer, 0xff, host->cw_data);
/* override new oob content to last codeword */
mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
0, mtd->oobavail);
@@ -2049,7 +2204,6 @@ static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int page, ret, bbpos, bad = 0;
- u32 flash_status;
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
@@ -2066,9 +2220,7 @@ static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
if (ret)
goto err;
- flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
-
- if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+ if (check_flash_errors(host, 1)) {
dev_warn(nandc->dev, "error when trying to read BBM\n");
goto err;
}
@@ -2315,27 +2467,40 @@ static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
.free = qcom_nand_ooblayout_free,
};
-static int qcom_nand_host_setup(struct qcom_nand_host *host)
+static int
+qcom_nandc_calc_ecc_bytes(int step_size, int strength)
+{
+ return strength == 4 ? 12 : 16;
+}
+NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
+ NANDC_STEP_SIZE, 4, 8);
+
+static int qcom_nand_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- int cwperpage, bad_block_byte;
+ int cwperpage, bad_block_byte, ret;
bool wide_bus;
int ecc_mode = 1;
+ /* controller only supports 512 bytes data steps */
+ ecc->size = NANDC_STEP_SIZE;
+ wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
+ cwperpage = mtd->writesize / NANDC_STEP_SIZE;
+
/*
- * the controller requires each step consists of 512 bytes of data.
- * bail out if DT has populated a wrong step size.
+ * Each CW has 4 available OOB bytes which will be protected with ECC
+ * so remaining bytes can be used for ECC.
*/
- if (ecc->size != NANDC_STEP_SIZE) {
- dev_err(nandc->dev, "invalid ecc size\n");
- return -EINVAL;
+ ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
+ mtd->oobsize - (cwperpage * 4));
+ if (ret) {
+ dev_err(nandc->dev, "No valid ECC settings possible\n");
+ return ret;
}
- wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
-
if (ecc->strength >= 8) {
/* 8 bit ECC defaults to BCH ECC on all platforms */
host->bch_enabled = true;
@@ -2403,7 +2568,6 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
- cwperpage = mtd->writesize / ecc->size;
nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
cwperpage);
@@ -2419,12 +2583,6 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
* for 8 bit ECC
*/
host->cw_size = host->cw_data + ecc->bytes;
-
- if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
- dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
- return -EINVAL;
- }
-
bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
@@ -2482,6 +2640,10 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
return 0;
}
+static const struct nand_controller_ops qcom_nandc_ops = {
+ .attach_chip = qcom_nand_attach_chip,
+};
+
static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
{
int ret;
@@ -2570,7 +2732,8 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
INIT_LIST_HEAD(&nandc->desc_list);
INIT_LIST_HEAD(&nandc->host_list);
- nand_hw_control_init(&nandc->controller);
+ nand_controller_init(&nandc->controller);
+ nandc->controller.ops = &qcom_nandc_ops;
return 0;
}
@@ -2623,9 +2786,9 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
return 0;
}
-static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
- struct qcom_nand_host *host,
- struct device_node *dn)
+static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
+ struct qcom_nand_host *host,
+ struct device_node *dn)
{
struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -2672,30 +2835,13 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
/* set up initial status value */
host->status = NAND_STATUS_READY | NAND_STATUS_WP;
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret)
- return ret;
-
- ret = qcom_nand_host_setup(host);
-
- return ret;
-}
-
-static int qcom_nand_mtd_register(struct qcom_nand_controller *nandc,
- struct qcom_nand_host *host,
- struct device_node *dn)
-{
- struct nand_chip *chip = &host->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
- int ret;
-
- ret = nand_scan_tail(mtd);
+ ret = nand_scan(mtd, 1);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
- nand_cleanup(mtd_to_nand(mtd));
+ nand_cleanup(chip);
return ret;
}
@@ -2704,28 +2850,9 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
{
struct device *dev = nandc->dev;
struct device_node *dn = dev->of_node, *child;
- struct qcom_nand_host *host, *tmp;
+ struct qcom_nand_host *host;
int ret;
- for_each_available_child_of_node(dn, child) {
- host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- of_node_put(child);
- return -ENOMEM;
- }
-
- ret = qcom_nand_host_init(nandc, host, child);
- if (ret) {
- devm_kfree(dev, host);
- continue;
- }
-
- list_add_tail(&host->node, &nandc->host_list);
- }
-
- if (list_empty(&nandc->host_list))
- return -ENODEV;
-
if (nandc->props->is_bam) {
free_bam_transaction(nandc);
nandc->bam_txn = alloc_bam_transaction(nandc);
@@ -2736,12 +2863,20 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
}
}
- list_for_each_entry_safe(host, tmp, &nandc->host_list, node) {
- ret = qcom_nand_mtd_register(nandc, host, child);
+ for_each_available_child_of_node(dn, child) {
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ of_node_put(child);
+ return -ENOMEM;
+ }
+
+ ret = qcom_nand_host_init_and_register(nandc, host, child);
if (ret) {
- list_del(&host->node);
devm_kfree(dev, host);
+ continue;
}
+
+ list_add_tail(&host->node, &nandc->host_list);
}
if (list_empty(&nandc->host_list))
@@ -2799,14 +2934,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
nandc->props = dev_data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nandc->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(nandc->base))
- return PTR_ERR(nandc->base);
-
- nandc->base_phys = res->start;
- nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
-
nandc->core_clk = devm_clk_get(dev, "core");
if (IS_ERR(nandc->core_clk))
return PTR_ERR(nandc->core_clk);
@@ -2819,9 +2946,21 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nandc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nandc->base))
+ return PTR_ERR(nandc->base);
+
+ nandc->base_phys = res->start;
+ nandc->base_dma = dma_map_resource(dev, res->start,
+ resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (!nandc->base_dma)
+ return -ENXIO;
+
ret = qcom_nandc_alloc(nandc);
if (ret)
- goto err_core_clk;
+ goto err_nandc_alloc;
ret = clk_prepare_enable(nandc->core_clk);
if (ret)
@@ -2847,6 +2986,9 @@ err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
qcom_nandc_unalloc(nandc);
+err_nandc_alloc:
+ dma_unmap_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
return ret;
}
@@ -2854,16 +2996,21 @@ err_core_clk:
static int qcom_nandc_remove(struct platform_device *pdev)
{
struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct qcom_nand_host *host;
list_for_each_entry(host, &nandc->host_list, node)
nand_release(nand_to_mtd(&host->chip));
+
qcom_nandc_unalloc(nandc);
clk_disable_unprepare(nandc->aon_clk);
clk_disable_unprepare(nandc->core_clk);
+ dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+
return 0;
}
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index 19661c5d3220..c21e8892394a 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -162,7 +162,7 @@ enum s3c_nand_clk_state {
*/
struct s3c2410_nand_info {
/* mtd info */
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct s3c2410_nand_mtd *mtds;
struct s3c2410_platform_nand *platform;
@@ -802,8 +802,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
mtdinfo->name = set->name;
- return mtd_device_parse_register(mtdinfo, NULL, NULL,
- set->partitions, set->nr_partitions);
+ return mtd_device_register(mtdinfo, set->partitions,
+ set->nr_partitions);
}
return -ENODEV;
@@ -915,20 +915,19 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
}
/**
- * s3c2410_nand_update_chip - post probe update
- * @info: The controller instance.
- * @nmtd: The driver version of the MTD instance.
+ * s3c2410_nand_attach_chip - Init the ECC engine after NAND scan
+ * @chip: The NAND chip
*
- * This routine is called after the chip probe has successfully completed
- * and the relevant per-chip information updated. This call ensure that
+ * This hook is called by the core after the identification of the NAND chip,
+ * once the relevant per-chip information is up to date.. This call ensure that
* we update the internal state accordingly.
*
* The internal state is currently limited to the ECC state information.
*/
-static int s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
- struct s3c2410_nand_mtd *nmtd)
+static int s3c2410_nand_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = &nmtd->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
switch (chip->ecc.mode) {
@@ -998,6 +997,10 @@ static int s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
return 0;
}
+static const struct nand_controller_ops s3c24xx_nand_controller_ops = {
+ .attach_chip = s3c2410_nand_attach_chip,
+};
+
static const struct of_device_id s3c24xx_nand_dt_ids[] = {
{
.compatible = "samsung,s3c2410-nand",
@@ -1094,7 +1097,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- nand_hw_control_init(&info->controller);
+ nand_controller_init(&info->controller);
+ info->controller.ops = &s3c24xx_nand_controller_ops;
/* get the clock source and enable it */
@@ -1134,8 +1138,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
- sets = (plat != NULL) ? plat->sets : NULL;
- nr_sets = (plat != NULL) ? plat->nr_sets : 1;
+ if (!plat->sets || plat->nr_sets < 1) {
+ err = -EINVAL;
+ goto exit_error;
+ }
+
+ sets = plat->sets;
+ nr_sets = plat->nr_sets;
info->mtd_count = nr_sets;
@@ -1152,7 +1161,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
nmtd = info->mtds;
- for (setno = 0; setno < nr_sets; setno++, nmtd++) {
+ for (setno = 0; setno < nr_sets; setno++, nmtd++, sets++) {
struct mtd_info *mtd = nand_to_mtd(&nmtd->chip);
pr_debug("initialising set %d (%p, info %p)\n",
@@ -1161,22 +1170,11 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
s3c2410_nand_init_chip(info, nmtd, sets);
- err = nand_scan_ident(mtd, (sets) ? sets->nr_chips : 1, NULL);
- if (err)
- goto exit_error;
-
- err = s3c2410_nand_update_chip(info, nmtd);
- if (err < 0)
- goto exit_error;
-
- err = nand_scan_tail(mtd);
+ err = nand_scan(mtd, sets ? sets->nr_chips : 1);
if (err)
goto exit_error;
s3c2410_nand_add_partition(info, nmtd, sets);
-
- if (sets != NULL)
- sets++;
}
/* initialise the hardware */
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index c7abceffcc40..bb8866e05ff7 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1002,10 +1002,17 @@ static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
flctl->index += len;
}
-static int flctl_chip_init_tail(struct mtd_info *mtd)
+static int flctl_chip_attach_chip(struct nand_chip *chip)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- struct nand_chip *chip = &flctl->chip;
+
+ /*
+ * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
+ * Add the SEL_16BIT flag in flctl->flcmncr_base.
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ flctl->flcmncr_base |= SEL_16BIT;
if (mtd->writesize == 512) {
flctl->page_size = 0;
@@ -1063,6 +1070,10 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
return 0;
}
+static const struct nand_controller_ops flctl_nand_controller_ops = {
+ .attach_chip = flctl_chip_attach_chip,
+};
+
static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
{
struct sh_flctl *flctl = dev_id;
@@ -1191,25 +1202,8 @@ static int flctl_probe(struct platform_device *pdev)
flctl_setup_dma(flctl);
- ret = nand_scan_ident(flctl_mtd, 1, NULL);
- if (ret)
- goto err_chip;
-
- if (nand->options & NAND_BUSWIDTH_16) {
- /*
- * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
- * Add the SEL_16BIT flag in pdata->flcmncr_val and re-assign
- * flctl->flcmncr_base to pdata->flcmncr_val.
- */
- pdata->flcmncr_val |= SEL_16BIT;
- flctl->flcmncr_base = pdata->flcmncr_val;
- }
-
- ret = flctl_chip_init_tail(flctl_mtd);
- if (ret)
- goto err_chip;
-
- ret = nand_scan_tail(flctl_mtd);
+ nand->dummy_controller.ops = &flctl_nand_controller_ops;
+ ret = nand_scan(flctl_mtd, 1);
if (ret)
goto err_chip;
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index e93df02c825e..fc171b17a39b 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -21,10 +21,7 @@
#include <linux/mtd/sharpsl.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
+#include <linux/io.h>
struct sharpsl_nand {
struct nand_chip chip;
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index 7f5044a79f01..73aafe8c3ef3 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -160,19 +160,9 @@ static struct nand_flash_dev nand_xd_flash_ids[] = {
{NULL}
};
-int sm_register_device(struct mtd_info *mtd, int smartmedia)
+static int sm_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- int ret;
-
- chip->options |= NAND_SKIP_BBTSCAN;
-
- /* Scan for card properties */
- ret = nand_scan_ident(mtd, 1, smartmedia ?
- nand_smartmedia_flash_ids : nand_xd_flash_ids);
-
- if (ret)
- return ret;
+ struct mtd_info *mtd = nand_to_mtd(chip);
/* Bad block marker position */
chip->badblockpos = 0x05;
@@ -187,12 +177,33 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia)
else
return -ENODEV;
- ret = nand_scan_tail(mtd);
+ return 0;
+}
+
+static const struct nand_controller_ops sm_controller_ops = {
+ .attach_chip = sm_attach_chip,
+};
+
+int sm_register_device(struct mtd_info *mtd, int smartmedia)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_flash_dev *flash_ids;
+ int ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+ /* Scan for card properties */
+ chip->dummy_controller.ops = &sm_controller_ops;
+ flash_ids = smartmedia ? nand_smartmedia_flash_ids : nand_xd_flash_ids;
+ ret = nand_scan_with_ids(mtd, 1, flash_ids);
if (ret)
return ret;
- return mtd_device_register(mtd, NULL, 0);
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ nand_cleanup(chip);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(sm_register_device);
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index d831a141a196..1f0b7ee38df5 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -29,14 +29,12 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/reset.h>
@@ -127,7 +125,7 @@
#define NFC_CMD_TYPE_MSK GENMASK(31, 30)
#define NFC_NORMAL_OP (0 << 30)
#define NFC_ECC_OP (1 << 30)
-#define NFC_PAGE_OP (2 << 30)
+#define NFC_PAGE_OP (2U << 30)
/* define bit use in NFC_RCMD_SET */
#define NFC_READ_CMD_MSK GENMASK(7, 0)
@@ -234,7 +232,7 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
* controller events
*/
struct sunxi_nfc {
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct device *dev;
void __iomem *regs;
struct clk *ahb_clk;
@@ -247,7 +245,7 @@ struct sunxi_nfc {
struct dma_chan *dmac;
};
-static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
+static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_controller *ctrl)
{
return container_of(ctrl, struct sunxi_nfc, controller);
}
@@ -544,7 +542,7 @@ static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
{
- uint8_t ret;
+ uint8_t ret = 0;
sunxi_nfc_read_buf(mtd, &ret, 1);
@@ -1816,12 +1814,21 @@ static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
}
}
-static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
- struct device_node *np)
+static int sunxi_nand_attach_chip(struct nand_chip *nand)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ struct device_node *np = nand_get_flash_node(nand);
int ret;
+ if (nand->bbt_options & NAND_BBT_USE_FLASH)
+ nand->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ nand->options |= NAND_SUBPAGE_READ;
+
if (!ecc->size) {
ecc->size = nand->ecc_step_ds;
ecc->strength = nand->ecc_strength_ds;
@@ -1846,6 +1853,10 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
return 0;
}
+static const struct nand_controller_ops sunxi_nand_controller_ops = {
+ .attach_chip = sunxi_nand_attach_chip,
+};
+
static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
struct device_node *np)
{
@@ -1911,6 +1922,8 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
/* Default tR value specified in the ONFI spec (chapter 4.15.1) */
nand->chip_delay = 200;
nand->controller = &nfc->controller;
+ nand->controller->ops = &sunxi_nand_controller_ops;
+
/*
* Set the ECC mode to the default value in case nothing is specified
* in the DT.
@@ -1927,30 +1940,10 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
mtd = nand_to_mtd(nand);
mtd->dev.parent = dev;
- ret = nand_scan_ident(mtd, nsels, NULL);
+ ret = nand_scan(mtd, nsels);
if (ret)
return ret;
- if (nand->bbt_options & NAND_BBT_USE_FLASH)
- nand->bbt_options |= NAND_BBT_NO_OOB;
-
- if (nand->options & NAND_NEED_SCRAMBLING)
- nand->options |= NAND_NO_SUBPAGE_WRITE;
-
- nand->options |= NAND_SUBPAGE_READ;
-
- ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
- if (ret) {
- dev_err(dev, "ECC init failed: %d\n", ret);
- return ret;
- }
-
- ret = nand_scan_tail(mtd);
- if (ret) {
- dev_err(dev, "nand_scan_tail failed: %d\n", ret);
- return ret;
- }
-
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register mtd device: %d\n", ret);
@@ -2012,7 +2005,7 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
return -ENOMEM;
nfc->dev = dev;
- nand_hw_control_init(&nfc->controller);
+ nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index f2052fae21c7..72698691727d 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -83,7 +83,7 @@
#define MAX_CS 4
struct tango_nfc {
- struct nand_hw_control hw;
+ struct nand_controller hw;
void __iomem *reg_base;
void __iomem *mem_base;
void __iomem *pbus_base;
@@ -517,6 +517,28 @@ static int tango_set_timings(struct mtd_info *mtd, int csline,
return 0;
}
+static int tango_attach_chip(struct nand_chip *chip)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ ecc->mode = NAND_ECC_HW;
+ ecc->algo = NAND_ECC_BCH;
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * FIELD_ORDER, BITS_PER_BYTE);
+
+ ecc->read_page_raw = tango_read_page_raw;
+ ecc->write_page_raw = tango_write_page_raw;
+ ecc->read_page = tango_read_page;
+ ecc->write_page = tango_write_page;
+ ecc->read_oob = tango_read_oob;
+ ecc->write_oob = tango_write_oob;
+
+ return 0;
+}
+
+static const struct nand_controller_ops tango_controller_ops = {
+ .attach_chip = tango_attach_chip,
+};
+
static int chip_init(struct device *dev, struct device_node *np)
{
u32 cs;
@@ -566,22 +588,7 @@ static int chip_init(struct device *dev, struct device_node *np)
mtd_set_ooblayout(mtd, &tango_nand_ooblayout_ops);
mtd->dev.parent = dev;
- err = nand_scan_ident(mtd, 1, NULL);
- if (err)
- return err;
-
- ecc->mode = NAND_ECC_HW;
- ecc->algo = NAND_ECC_BCH;
- ecc->bytes = DIV_ROUND_UP(ecc->strength * FIELD_ORDER, BITS_PER_BYTE);
-
- ecc->read_page_raw = tango_read_page_raw;
- ecc->write_page_raw = tango_write_page_raw;
- ecc->read_page = tango_read_page;
- ecc->write_page = tango_write_page;
- ecc->read_oob = tango_read_oob;
- ecc->write_oob = tango_write_oob;
-
- err = nand_scan_tail(mtd);
+ err = nand_scan(mtd, 1);
if (err)
return err;
@@ -654,7 +661,8 @@ static int tango_nand_probe(struct platform_device *pdev)
return PTR_ERR(nfc->chan);
platform_set_drvdata(pdev, nfc);
- nand_hw_control_init(&nfc->hw);
+ nand_controller_init(&nfc->hw);
+ nfc->hw.ops = &tango_controller_ops;
nfc->freq_kHz = clk_get_rate(clk) / 1000;
for_each_child_of_node(pdev->dev.of_node, np) {
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
new file mode 100644
index 000000000000..79da1efc88d1
--- /dev/null
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -0,0 +1,1246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Stefan Agner <stefan@agner.ch>
+ * Copyright (C) 2014-2015 Lucas Stach <dev@lynxeye.de>
+ * Copyright (C) 2012 Avionic Design GmbH
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#define COMMAND 0x00
+#define COMMAND_GO BIT(31)
+#define COMMAND_CLE BIT(30)
+#define COMMAND_ALE BIT(29)
+#define COMMAND_PIO BIT(28)
+#define COMMAND_TX BIT(27)
+#define COMMAND_RX BIT(26)
+#define COMMAND_SEC_CMD BIT(25)
+#define COMMAND_AFT_DAT BIT(24)
+#define COMMAND_TRANS_SIZE(size) ((((size) - 1) & 0xf) << 20)
+#define COMMAND_A_VALID BIT(19)
+#define COMMAND_B_VALID BIT(18)
+#define COMMAND_RD_STATUS_CHK BIT(17)
+#define COMMAND_RBSY_CHK BIT(16)
+#define COMMAND_CE(x) BIT(8 + ((x) & 0x7))
+#define COMMAND_CLE_SIZE(size) ((((size) - 1) & 0x3) << 4)
+#define COMMAND_ALE_SIZE(size) ((((size) - 1) & 0xf) << 0)
+
+#define STATUS 0x04
+
+#define ISR 0x08
+#define ISR_CORRFAIL_ERR BIT(24)
+#define ISR_UND BIT(7)
+#define ISR_OVR BIT(6)
+#define ISR_CMD_DONE BIT(5)
+#define ISR_ECC_ERR BIT(4)
+
+#define IER 0x0c
+#define IER_ERR_TRIG_VAL(x) (((x) & 0xf) << 16)
+#define IER_UND BIT(7)
+#define IER_OVR BIT(6)
+#define IER_CMD_DONE BIT(5)
+#define IER_ECC_ERR BIT(4)
+#define IER_GIE BIT(0)
+
+#define CONFIG 0x10
+#define CONFIG_HW_ECC BIT(31)
+#define CONFIG_ECC_SEL BIT(30)
+#define CONFIG_ERR_COR BIT(29)
+#define CONFIG_PIPE_EN BIT(28)
+#define CONFIG_TVAL_4 (0 << 24)
+#define CONFIG_TVAL_6 (1 << 24)
+#define CONFIG_TVAL_8 (2 << 24)
+#define CONFIG_SKIP_SPARE BIT(23)
+#define CONFIG_BUS_WIDTH_16 BIT(21)
+#define CONFIG_COM_BSY BIT(20)
+#define CONFIG_PS_256 (0 << 16)
+#define CONFIG_PS_512 (1 << 16)
+#define CONFIG_PS_1024 (2 << 16)
+#define CONFIG_PS_2048 (3 << 16)
+#define CONFIG_PS_4096 (4 << 16)
+#define CONFIG_SKIP_SPARE_SIZE_4 (0 << 14)
+#define CONFIG_SKIP_SPARE_SIZE_8 (1 << 14)
+#define CONFIG_SKIP_SPARE_SIZE_12 (2 << 14)
+#define CONFIG_SKIP_SPARE_SIZE_16 (3 << 14)
+#define CONFIG_TAG_BYTE_SIZE(x) ((x) & 0xff)
+
+#define TIMING_1 0x14
+#define TIMING_TRP_RESP(x) (((x) & 0xf) << 28)
+#define TIMING_TWB(x) (((x) & 0xf) << 24)
+#define TIMING_TCR_TAR_TRR(x) (((x) & 0xf) << 20)
+#define TIMING_TWHR(x) (((x) & 0xf) << 16)
+#define TIMING_TCS(x) (((x) & 0x3) << 14)
+#define TIMING_TWH(x) (((x) & 0x3) << 12)
+#define TIMING_TWP(x) (((x) & 0xf) << 8)
+#define TIMING_TRH(x) (((x) & 0x3) << 4)
+#define TIMING_TRP(x) (((x) & 0xf) << 0)
+
+#define RESP 0x18
+
+#define TIMING_2 0x1c
+#define TIMING_TADL(x) ((x) & 0xf)
+
+#define CMD_REG1 0x20
+#define CMD_REG2 0x24
+#define ADDR_REG1 0x28
+#define ADDR_REG2 0x2c
+
+#define DMA_MST_CTRL 0x30
+#define DMA_MST_CTRL_GO BIT(31)
+#define DMA_MST_CTRL_IN (0 << 30)
+#define DMA_MST_CTRL_OUT BIT(30)
+#define DMA_MST_CTRL_PERF_EN BIT(29)
+#define DMA_MST_CTRL_IE_DONE BIT(28)
+#define DMA_MST_CTRL_REUSE BIT(27)
+#define DMA_MST_CTRL_BURST_1 (2 << 24)
+#define DMA_MST_CTRL_BURST_4 (3 << 24)
+#define DMA_MST_CTRL_BURST_8 (4 << 24)
+#define DMA_MST_CTRL_BURST_16 (5 << 24)
+#define DMA_MST_CTRL_IS_DONE BIT(20)
+#define DMA_MST_CTRL_EN_A BIT(2)
+#define DMA_MST_CTRL_EN_B BIT(1)
+
+#define DMA_CFG_A 0x34
+#define DMA_CFG_B 0x38
+
+#define FIFO_CTRL 0x3c
+#define FIFO_CTRL_CLR_ALL BIT(3)
+
+#define DATA_PTR 0x40
+#define TAG_PTR 0x44
+#define ECC_PTR 0x48
+
+#define DEC_STATUS 0x4c
+#define DEC_STATUS_A_ECC_FAIL BIT(1)
+#define DEC_STATUS_ERR_COUNT_MASK 0x00ff0000
+#define DEC_STATUS_ERR_COUNT_SHIFT 16
+
+#define HWSTATUS_CMD 0x50
+#define HWSTATUS_MASK 0x54
+#define HWSTATUS_RDSTATUS_MASK(x) (((x) & 0xff) << 24)
+#define HWSTATUS_RDSTATUS_VALUE(x) (((x) & 0xff) << 16)
+#define HWSTATUS_RBSY_MASK(x) (((x) & 0xff) << 8)
+#define HWSTATUS_RBSY_VALUE(x) (((x) & 0xff) << 0)
+
+#define BCH_CONFIG 0xcc
+#define BCH_ENABLE BIT(0)
+#define BCH_TVAL_4 (0 << 4)
+#define BCH_TVAL_8 (1 << 4)
+#define BCH_TVAL_14 (2 << 4)
+#define BCH_TVAL_16 (3 << 4)
+
+#define DEC_STAT_RESULT 0xd0
+#define DEC_STAT_BUF 0xd4
+#define DEC_STAT_BUF_FAIL_SEC_FLAG_MASK 0xff000000
+#define DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT 24
+#define DEC_STAT_BUF_CORR_SEC_FLAG_MASK 0x00ff0000
+#define DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT 16
+#define DEC_STAT_BUF_MAX_CORR_CNT_MASK 0x00001f00
+#define DEC_STAT_BUF_MAX_CORR_CNT_SHIFT 8
+
+#define OFFSET(val, off) ((val) < (off) ? 0 : (val) - (off))
+
+#define SKIP_SPARE_BYTES 4
+#define BITS_PER_STEP_RS 18
+#define BITS_PER_STEP_BCH 13
+
+#define INT_MASK (IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
+#define HWSTATUS_CMD_DEFAULT NAND_STATUS_READY
+#define HWSTATUS_MASK_DEFAULT (HWSTATUS_RDSTATUS_MASK(1) | \
+ HWSTATUS_RDSTATUS_VALUE(0) | \
+ HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
+ HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
+
+struct tegra_nand_controller {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct completion command_complete;
+ struct completion dma_complete;
+ bool last_read_error;
+ int cur_cs;
+ struct nand_chip *chip;
+};
+
+struct tegra_nand_chip {
+ struct nand_chip chip;
+ struct gpio_desc *wp_gpio;
+ struct mtd_oob_region ecc;
+ u32 config;
+ u32 config_ecc;
+ u32 bch_config;
+ int cs[1];
+};
+
+static inline struct tegra_nand_controller *
+ to_tegra_ctrl(struct nand_controller *hw_ctrl)
+{
+ return container_of(hw_ctrl, struct tegra_nand_controller, controller);
+}
+
+static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip)
+{
+ return container_of(chip, struct tegra_nand_chip, chip);
+}
+
+static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength,
+ BITS_PER_BYTE);
+
+ if (section > 0)
+ return -ERANGE;
+
+ oobregion->offset = SKIP_SPARE_BYTES;
+ oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
+
+ return 0;
+}
+
+static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ return -ERANGE;
+}
+
+static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = {
+ .ecc = tegra_nand_ooblayout_rs_ecc,
+ .free = tegra_nand_ooblayout_no_free,
+};
+
+static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength,
+ BITS_PER_BYTE);
+
+ if (section > 0)
+ return -ERANGE;
+
+ oobregion->offset = SKIP_SPARE_BYTES;
+ oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = {
+ .ecc = tegra_nand_ooblayout_bch_ecc,
+ .free = tegra_nand_ooblayout_no_free,
+};
+
+static irqreturn_t tegra_nand_irq(int irq, void *data)
+{
+ struct tegra_nand_controller *ctrl = data;
+ u32 isr, dma;
+
+ isr = readl_relaxed(ctrl->regs + ISR);
+ dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
+ dev_dbg(ctrl->dev, "isr %08x\n", isr);
+
+ if (!isr && !(dma & DMA_MST_CTRL_IS_DONE))
+ return IRQ_NONE;
+
+ /*
+ * The bit name is somewhat missleading: This is also set when
+ * HW ECC was successful. The data sheet states:
+ * Correctable OR Un-correctable errors occurred in the DMA transfer...
+ */
+ if (isr & ISR_CORRFAIL_ERR)
+ ctrl->last_read_error = true;
+
+ if (isr & ISR_CMD_DONE)
+ complete(&ctrl->command_complete);
+
+ if (isr & ISR_UND)
+ dev_err(ctrl->dev, "FIFO underrun\n");
+
+ if (isr & ISR_OVR)
+ dev_err(ctrl->dev, "FIFO overrun\n");
+
+ /* handle DMA interrupts */
+ if (dma & DMA_MST_CTRL_IS_DONE) {
+ writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
+ complete(&ctrl->dma_complete);
+ }
+
+ /* clear interrupts */
+ writel_relaxed(isr, ctrl->regs + ISR);
+
+ return IRQ_HANDLED;
+}
+
+static const char * const tegra_nand_reg_names[] = {
+ "COMMAND",
+ "STATUS",
+ "ISR",
+ "IER",
+ "CONFIG",
+ "TIMING",
+ NULL,
+ "TIMING2",
+ "CMD_REG1",
+ "CMD_REG2",
+ "ADDR_REG1",
+ "ADDR_REG2",
+ "DMA_MST_CTRL",
+ "DMA_CFG_A",
+ "DMA_CFG_B",
+ "FIFO_CTRL",
+};
+
+static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl)
+{
+ u32 reg;
+ int i;
+
+ dev_err(ctrl->dev, "Tegra NAND controller register dump\n");
+ for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) {
+ const char *reg_name = tegra_nand_reg_names[i];
+
+ if (!reg_name)
+ continue;
+
+ reg = readl_relaxed(ctrl->regs + (i * 4));
+ dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg);
+ }
+}
+
+static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl)
+{
+ u32 isr, dma;
+
+ disable_irq(ctrl->irq);
+
+ /* Abort current command/DMA operation */
+ writel_relaxed(0, ctrl->regs + DMA_MST_CTRL);
+ writel_relaxed(0, ctrl->regs + COMMAND);
+
+ /* clear interrupts */
+ isr = readl_relaxed(ctrl->regs + ISR);
+ writel_relaxed(isr, ctrl->regs + ISR);
+ dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
+ writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
+
+ reinit_completion(&ctrl->command_complete);
+ reinit_completion(&ctrl->dma_complete);
+
+ enable_irq(ctrl->irq);
+}
+
+static int tegra_nand_cmd(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ const struct nand_op_instr *instr_data_in = NULL;
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ unsigned int op_id, size = 0, offset = 0;
+ bool first_cmd = true;
+ u32 reg, cmd = 0;
+ int ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int naddrs, i;
+ const u8 *addrs;
+ u32 addr1 = 0, addr2 = 0;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd) {
+ cmd |= COMMAND_CLE;
+ writel_relaxed(instr->ctx.cmd.opcode,
+ ctrl->regs + CMD_REG1);
+ } else {
+ cmd |= COMMAND_SEC_CMD;
+ writel_relaxed(instr->ctx.cmd.opcode,
+ ctrl->regs + CMD_REG2);
+ }
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs);
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ addr1 |= *addrs++ << (BITS_PER_BYTE * i);
+ naddrs -= i;
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ addr2 |= *addrs++ << (BITS_PER_BYTE * i);
+
+ writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
+ writel_relaxed(addr2, ctrl->regs + ADDR_REG2);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ size = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+
+ cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
+ COMMAND_RX | COMMAND_A_VALID;
+
+ instr_data_in = instr;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ size = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+
+ cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
+ COMMAND_TX | COMMAND_A_VALID;
+ memcpy(&reg, instr->ctx.data.buf.out + offset, size);
+
+ writel_relaxed(reg, ctrl->regs + RESP);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ cmd |= COMMAND_RBSY_CHK;
+ break;
+ }
+ }
+
+ cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs);
+ writel_relaxed(cmd, ctrl->regs + COMMAND);
+ ret = wait_for_completion_timeout(&ctrl->command_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ctrl->dev, "COMMAND timeout\n");
+ tegra_nand_dump_reg(ctrl);
+ tegra_nand_controller_abort(ctrl);
+ return -ETIMEDOUT;
+ }
+
+ if (instr_data_in) {
+ reg = readl_relaxed(ctrl->regs + RESP);
+ memcpy(instr_data_in->ctx.data.buf.in + offset, &reg, size);
+ }
+
+ return 0;
+}
+
+static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
+ NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
+ );
+
+static int tegra_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
+ check_only);
+}
+
+static void tegra_nand_select_chip(struct mtd_info *mtd, int die_nr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+
+ WARN_ON(die_nr >= (int)ARRAY_SIZE(nand->cs));
+
+ if (die_nr < 0 || die_nr > 0) {
+ ctrl->cur_cs = -1;
+ return;
+ }
+
+ ctrl->cur_cs = nand->cs[die_nr];
+}
+
+static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
+ struct nand_chip *chip, bool enable)
+{
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+
+ if (chip->ecc.algo == NAND_ECC_BCH && enable)
+ writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
+ else
+ writel_relaxed(0, ctrl->regs + BCH_CONFIG);
+
+ if (enable)
+ writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG);
+ else
+ writel_relaxed(nand->config, ctrl->regs + CONFIG);
+}
+
+static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
+ void *buf, void *oob_buf, int oob_len, int page,
+ bool read)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ dma_addr_t dma_addr = 0, dma_addr_oob = 0;
+ u32 addr1, cmd, dma_ctrl;
+ int ret;
+
+ if (read) {
+ writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
+ writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
+ } else {
+ writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1);
+ writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2);
+ }
+ cmd = COMMAND_CLE | COMMAND_SEC_CMD;
+
+ /* Lower 16-bits are column, by default 0 */
+ addr1 = page << 16;
+
+ if (!buf)
+ addr1 |= mtd->writesize;
+ writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
+ cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5);
+ } else {
+ cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4);
+ }
+
+ if (buf) {
+ dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir);
+ ret = dma_mapping_error(ctrl->dev, dma_addr);
+ if (ret) {
+ dev_err(ctrl->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A);
+ writel_relaxed(dma_addr, ctrl->regs + DATA_PTR);
+ }
+
+ if (oob_buf) {
+ dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize,
+ dir);
+ ret = dma_mapping_error(ctrl->dev, dma_addr_oob);
+ if (ret) {
+ dev_err(ctrl->dev, "dma mapping error\n");
+ ret = -EINVAL;
+ goto err_unmap_dma_page;
+ }
+
+ writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B);
+ writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR);
+ }
+
+ dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN |
+ DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE |
+ DMA_MST_CTRL_BURST_16;
+
+ if (buf)
+ dma_ctrl |= DMA_MST_CTRL_EN_A;
+ if (oob_buf)
+ dma_ctrl |= DMA_MST_CTRL_EN_B;
+
+ if (read)
+ dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE;
+ else
+ dma_ctrl |= DMA_MST_CTRL_OUT;
+
+ writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL);
+
+ cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) |
+ COMMAND_CE(ctrl->cur_cs);
+
+ if (buf)
+ cmd |= COMMAND_A_VALID;
+ if (oob_buf)
+ cmd |= COMMAND_B_VALID;
+
+ if (read)
+ cmd |= COMMAND_RX;
+ else
+ cmd |= COMMAND_TX | COMMAND_AFT_DAT;
+
+ writel_relaxed(cmd, ctrl->regs + COMMAND);
+
+ ret = wait_for_completion_timeout(&ctrl->command_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ctrl->dev, "COMMAND timeout\n");
+ tegra_nand_dump_reg(ctrl);
+ tegra_nand_controller_abort(ctrl);
+ ret = -ETIMEDOUT;
+ goto err_unmap_dma;
+ }
+
+ ret = wait_for_completion_timeout(&ctrl->dma_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ctrl->dev, "DMA timeout\n");
+ tegra_nand_dump_reg(ctrl);
+ tegra_nand_controller_abort(ctrl);
+ ret = -ETIMEDOUT;
+ goto err_unmap_dma;
+ }
+ ret = 0;
+
+err_unmap_dma:
+ if (oob_buf)
+ dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir);
+err_unmap_dma_page:
+ if (buf)
+ dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir);
+
+ return ret;
+}
+
+static int tegra_nand_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+
+ return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
+ mtd->oobsize, page, true);
+}
+
+static int tegra_nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+
+ return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
+ mtd->oobsize, page, false);
+}
+
+static int tegra_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
+ mtd->oobsize, page, true);
+}
+
+static int tegra_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
+ mtd->oobsize, page, false);
+}
+
+static int tegra_nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+ u32 dec_stat, max_corr_cnt;
+ unsigned long fail_sec_flag;
+ int ret;
+
+ tegra_nand_hw_ecc(ctrl, chip, true);
+ ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
+ tegra_nand_hw_ecc(ctrl, chip, false);
+ if (ret)
+ return ret;
+
+ /* No correctable or un-correctable errors, page must have 0 bitflips */
+ if (!ctrl->last_read_error)
+ return 0;
+
+ /*
+ * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
+ * which contains information for all ECC selections.
+ *
+ * Note that since we do not use Command Queues DEC_RESULT does not
+ * state the number of pages we can read from the DEC_STAT_BUF. But
+ * since CORRFAIL_ERR did occur during page read we do have a valid
+ * result in DEC_STAT_BUF.
+ */
+ ctrl->last_read_error = false;
+ dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF);
+
+ fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >>
+ DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT;
+
+ max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >>
+ DEC_STAT_BUF_MAX_CORR_CNT_SHIFT;
+
+ if (fail_sec_flag) {
+ int bit, max_bitflips = 0;
+
+ /*
+ * Since we do not support subpage writes, a complete page
+ * is either written or not. We can take a shortcut here by
+ * checking wheather any of the sector has been successful
+ * read. If at least one sectors has been read successfully,
+ * the page must have been a written previously. It cannot
+ * be an erased page.
+ *
+ * E.g. controller might return fail_sec_flag with 0x4, which
+ * would mean only the third sector failed to correct. The
+ * page must have been written and the third sector is really
+ * not correctable anymore.
+ */
+ if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) {
+ mtd->ecc_stats.failed += hweight8(fail_sec_flag);
+ return max_corr_cnt;
+ }
+
+ /*
+ * All sectors failed to correct, but the ECC isn't smart
+ * enough to figure out if a page is really just erased.
+ * Read OOB data and check whether data/OOB is completely
+ * erased or if error correction just failed for all sub-
+ * pages.
+ */
+ ret = tegra_nand_read_oob(mtd, chip, page);
+ if (ret < 0)
+ return ret;
+
+ for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) {
+ u8 *data = buf + (chip->ecc.size * bit);
+ u8 *oob = chip->oob_poi + nand->ecc.offset +
+ (chip->ecc.bytes * bit);
+
+ ret = nand_check_erased_ecc_chunk(data, chip->ecc.size,
+ oob, chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max(ret, max_bitflips);
+ }
+ }
+
+ return max_t(unsigned int, max_corr_cnt, max_bitflips);
+ } else {
+ int corr_sec_flag;
+
+ corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >>
+ DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT;
+
+ /*
+ * The value returned in the register is the maximum of
+ * bitflips encountered in any of the ECC regions. As there is
+ * no way to get the number of bitflips in a specific regions
+ * we are not able to deliver correct stats but instead
+ * overestimate the number of corrected bitflips by assuming
+ * that all regions where errors have been corrected
+ * encountered the maximum number of bitflips.
+ */
+ mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag);
+
+ return max_corr_cnt;
+ }
+}
+
+static int tegra_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+ int ret;
+
+ tegra_nand_hw_ecc(ctrl, chip, true);
+ ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
+ 0, page, false);
+ tegra_nand_hw_ecc(ctrl, chip, false);
+
+ return ret;
+}
+
+static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
+ const struct nand_sdr_timings *timings)
+{
+ /*
+ * The period (and all other timings in this function) is in ps,
+ * so need to take care here to avoid integer overflows.
+ */
+ unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
+ unsigned int period = DIV_ROUND_UP(1000000, rate);
+ u32 val, reg = 0;
+
+ val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min,
+ timings->tRC_min), period);
+ reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3));
+
+ val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min),
+ max(timings->tALS_min, timings->tALH_min)),
+ period);
+ reg |= TIMING_TCS(OFFSET(val, 2));
+
+ val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000,
+ period);
+ reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1));
+
+ reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1));
+ reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1));
+ reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1));
+ reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1));
+ reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1));
+
+ writel_relaxed(reg, ctrl->regs + TIMING_1);
+
+ val = DIV_ROUND_UP(timings->tADL_min, period);
+ reg = TIMING_TADL(OFFSET(val, 3));
+
+ writel_relaxed(reg, ctrl->regs + TIMING_2);
+}
+
+static int tegra_nand_setup_data_interface(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ const struct nand_sdr_timings *timings;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ tegra_nand_setup_timing(ctrl, timings);
+
+ return 0;
+}
+
+static const int rs_strength_bootable[] = { 4 };
+static const int rs_strength[] = { 4, 6, 8 };
+static const int bch_strength_bootable[] = { 8, 16 };
+static const int bch_strength[] = { 4, 8, 14, 16 };
+
+static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
+ int strength_len, int bits_per_step,
+ int oobsize)
+{
+ bool maximize = chip->ecc.options & NAND_ECC_MAXIMIZE;
+ int i;
+
+ /*
+ * Loop through available strengths. Backwards in case we try to
+ * maximize the BCH strength.
+ */
+ for (i = 0; i < strength_len; i++) {
+ int strength_sel, bytes_per_step, bytes_per_page;
+
+ if (maximize) {
+ strength_sel = strength[strength_len - i - 1];
+ } else {
+ strength_sel = strength[i];
+
+ if (strength_sel < chip->ecc_strength_ds)
+ continue;
+ }
+
+ bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel,
+ BITS_PER_BYTE);
+ bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4);
+
+ /* Check whether strength fits OOB */
+ if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES))
+ return strength_sel;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
+{
+ const int *strength;
+ int strength_len, bits_per_step;
+
+ switch (chip->ecc.algo) {
+ case NAND_ECC_RS:
+ bits_per_step = BITS_PER_STEP_RS;
+ if (chip->options & NAND_IS_BOOT_MEDIUM) {
+ strength = rs_strength_bootable;
+ strength_len = ARRAY_SIZE(rs_strength_bootable);
+ } else {
+ strength = rs_strength;
+ strength_len = ARRAY_SIZE(rs_strength);
+ }
+ break;
+ case NAND_ECC_BCH:
+ bits_per_step = BITS_PER_STEP_BCH;
+ if (chip->options & NAND_IS_BOOT_MEDIUM) {
+ strength = bch_strength_bootable;
+ strength_len = ARRAY_SIZE(bch_strength_bootable);
+ } else {
+ strength = bch_strength;
+ strength_len = ARRAY_SIZE(bch_strength);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tegra_nand_get_strength(chip, strength, strength_len,
+ bits_per_step, oobsize);
+}
+
+static int tegra_nand_attach_chip(struct nand_chip *chip)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bits_per_step;
+ int ret;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ if (chip->ecc_step_ds != 512) {
+ dev_err(ctrl->dev, "Unsupported step size %d\n",
+ chip->ecc_step_ds);
+ return -EINVAL;
+ }
+
+ chip->ecc.read_page = tegra_nand_read_page_hwecc;
+ chip->ecc.write_page = tegra_nand_write_page_hwecc;
+ chip->ecc.read_page_raw = tegra_nand_read_page_raw;
+ chip->ecc.write_page_raw = tegra_nand_write_page_raw;
+ chip->ecc.read_oob = tegra_nand_read_oob;
+ chip->ecc.write_oob = tegra_nand_write_oob;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ nand->config |= CONFIG_BUS_WIDTH_16;
+
+ if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
+ if (mtd->writesize < 2048)
+ chip->ecc.algo = NAND_ECC_RS;
+ else
+ chip->ecc.algo = NAND_ECC_BCH;
+ }
+
+ if (chip->ecc.algo == NAND_ECC_BCH && mtd->writesize < 2048) {
+ dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
+ return -EINVAL;
+ }
+
+ if (!chip->ecc.strength) {
+ ret = tegra_nand_select_strength(chip, mtd->oobsize);
+ if (ret < 0) {
+ dev_err(ctrl->dev,
+ "No valid strength found, minimum %d\n",
+ chip->ecc_strength_ds);
+ return ret;
+ }
+
+ chip->ecc.strength = ret;
+ }
+
+ nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE |
+ CONFIG_SKIP_SPARE_SIZE_4;
+
+ switch (chip->ecc.algo) {
+ case NAND_ECC_RS:
+ bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
+ mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
+ nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
+ CONFIG_ERR_COR;
+ switch (chip->ecc.strength) {
+ case 4:
+ nand->config_ecc |= CONFIG_TVAL_4;
+ break;
+ case 6:
+ nand->config_ecc |= CONFIG_TVAL_6;
+ break;
+ case 8:
+ nand->config_ecc |= CONFIG_TVAL_8;
+ break;
+ default:
+ dev_err(ctrl->dev, "ECC strength %d not supported\n",
+ chip->ecc.strength);
+ return -EINVAL;
+ }
+ break;
+ case NAND_ECC_BCH:
+ bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
+ mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
+ nand->bch_config = BCH_ENABLE;
+ switch (chip->ecc.strength) {
+ case 4:
+ nand->bch_config |= BCH_TVAL_4;
+ break;
+ case 8:
+ nand->bch_config |= BCH_TVAL_8;
+ break;
+ case 14:
+ nand->bch_config |= BCH_TVAL_14;
+ break;
+ case 16:
+ nand->bch_config |= BCH_TVAL_16;
+ break;
+ default:
+ dev_err(ctrl->dev, "ECC strength %d not supported\n",
+ chip->ecc.strength);
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(ctrl->dev, "ECC algorithm not supported\n");
+ return -EINVAL;
+ }
+
+ dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
+ chip->ecc.algo == NAND_ECC_BCH ? "BCH" : "RS",
+ chip->ecc.strength);
+
+ chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
+
+ switch (mtd->writesize) {
+ case 256:
+ nand->config |= CONFIG_PS_256;
+ break;
+ case 512:
+ nand->config |= CONFIG_PS_512;
+ break;
+ case 1024:
+ nand->config |= CONFIG_PS_1024;
+ break;
+ case 2048:
+ nand->config |= CONFIG_PS_2048;
+ break;
+ case 4096:
+ nand->config |= CONFIG_PS_4096;
+ break;
+ default:
+ dev_err(ctrl->dev, "Unsupported writesize %d\n",
+ mtd->writesize);
+ return -ENODEV;
+ }
+
+ /* Store complete configuration for HW ECC in config_ecc */
+ nand->config_ecc |= nand->config;
+
+ /* Non-HW ECC read/writes complete OOB */
+ nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1);
+ writel_relaxed(nand->config, ctrl->regs + CONFIG);
+
+ return 0;
+}
+
+static const struct nand_controller_ops tegra_nand_controller_ops = {
+ .attach_chip = &tegra_nand_attach_chip,
+};
+
+static int tegra_nand_chips_init(struct device *dev,
+ struct tegra_nand_controller *ctrl)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *np_nand;
+ int nsels, nchips = of_get_child_count(np);
+ struct tegra_nand_chip *nand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int ret;
+ u32 cs;
+
+ if (nchips != 1) {
+ dev_err(dev, "Currently only one NAND chip supported\n");
+ return -EINVAL;
+ }
+
+ np_nand = of_get_next_child(np, NULL);
+
+ nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32));
+ if (nsels != 1) {
+ dev_err(dev, "Missing/invalid reg property\n");
+ return -EINVAL;
+ }
+
+ /* Retrieve CS id, currently only single die NAND supported */
+ ret = of_property_read_u32(np_nand, "reg", &cs);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n", ret);
+ return ret;
+ }
+
+ nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand->cs[0] = cs;
+
+ nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
+
+ if (IS_ERR(nand->wp_gpio)) {
+ ret = PTR_ERR(nand->wp_gpio);
+ dev_err(dev, "Failed to request WP GPIO: %d\n", ret);
+ return ret;
+ }
+
+ chip = &nand->chip;
+ chip->controller = &ctrl->controller;
+
+ mtd = nand_to_mtd(chip);
+
+ mtd->dev.parent = dev;
+ mtd->owner = THIS_MODULE;
+
+ nand_set_flash_node(chip, np_nand);
+
+ if (!mtd->name)
+ mtd->name = "tegra_nand";
+
+ chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER;
+ chip->exec_op = tegra_nand_exec_op;
+ chip->select_chip = tegra_nand_select_chip;
+ chip->setup_data_interface = tegra_nand_setup_data_interface;
+
+ ret = nand_scan(mtd, 1);
+ if (ret)
+ return ret;
+
+ mtd_ooblayout_ecc(mtd, 0, &nand->ecc);
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "Failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ ctrl->chip = chip;
+
+ return 0;
+}
+
+static int tegra_nand_probe(struct platform_device *pdev)
+{
+ struct reset_control *rst;
+ struct tegra_nand_controller *ctrl;
+ struct resource *res;
+ int err = 0;
+
+ ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->dev = &pdev->dev;
+ nand_controller_init(&ctrl->controller);
+ ctrl->controller.ops = &tegra_nand_controller_ops;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctrl->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctrl->regs))
+ return PTR_ERR(ctrl->regs);
+
+ rst = devm_reset_control_get(&pdev->dev, "nand");
+ if (IS_ERR(rst))
+ return PTR_ERR(rst);
+
+ ctrl->clk = devm_clk_get(&pdev->dev, "nand");
+ if (IS_ERR(ctrl->clk))
+ return PTR_ERR(ctrl->clk);
+
+ err = clk_prepare_enable(ctrl->clk);
+ if (err)
+ return err;
+
+ err = reset_control_reset(rst);
+ if (err) {
+ dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
+ goto err_disable_clk;
+ }
+
+ writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
+ writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK);
+ writel_relaxed(INT_MASK, ctrl->regs + IER);
+
+ init_completion(&ctrl->command_complete);
+ init_completion(&ctrl->dma_complete);
+
+ ctrl->irq = platform_get_irq(pdev, 0);
+ err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
+ dev_name(&pdev->dev), ctrl);
+ if (err) {
+ dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
+ goto err_disable_clk;
+ }
+
+ writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
+
+ err = tegra_nand_chips_init(ctrl->dev, ctrl);
+ if (err)
+ goto err_disable_clk;
+
+ platform_set_drvdata(pdev, ctrl);
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(ctrl->clk);
+ return err;
+}
+
+static int tegra_nand_remove(struct platform_device *pdev)
+{
+ struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
+ struct nand_chip *chip = ctrl->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = mtd_device_unregister(mtd);
+ if (ret)
+ return ret;
+
+ nand_cleanup(chip);
+
+ clk_disable_unprepare(ctrl->clk);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_nand_of_match[] = {
+ { .compatible = "nvidia,tegra20-nand" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra_nand_of_match);
+
+static struct platform_driver tegra_nand_driver = {
+ .driver = {
+ .name = "tegra-nand",
+ .of_match_table = tegra_nand_of_match,
+ },
+ .probe = tegra_nand_probe,
+ .remove = tegra_nand_remove,
+};
+module_platform_driver(tegra_nand_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
+MODULE_AUTHOR("Thierry Reding <thierry.reding@nvidia.com>");
+MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>");
+MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index b567d212fe7d..4d61a14fcb65 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -20,7 +20,7 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
-#include <asm/txx9/ndfmc.h>
+#include <linux/platform_data/txx9/ndfmc.h>
/* TXX9 NDFMC Registers */
#define TXX9_NDFDTR 0x00
@@ -73,7 +73,7 @@ struct txx9ndfmc_drvdata {
void __iomem *base;
unsigned char hold; /* in gbusclock */
unsigned char spw; /* in gbusclock */
- struct nand_hw_control hw_control;
+ struct nand_controller controller;
};
static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -254,23 +254,25 @@ static void txx9ndfmc_initialize(struct platform_device *dev)
#define TXX9NDFMC_NS_TO_CYC(gbusclk, ns) \
DIV_ROUND_UP((ns) * DIV_ROUND_UP(gbusclk, 1000), 1000000)
-static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
+static int txx9ndfmc_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- int ret;
-
- ret = nand_scan_ident(mtd, 1, NULL);
- if (!ret) {
- if (mtd->writesize >= 512) {
- /* Hardware ECC 6 byte ECC per 512 Byte data */
- chip->ecc.size = 512;
- chip->ecc.bytes = 6;
- }
- ret = nand_scan_tail(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (mtd->writesize >= 512) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ } else {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
}
- return ret;
+
+ return 0;
}
+static const struct nand_controller_ops txx9ndfmc_controller_ops = {
+ .attach_chip = txx9ndfmc_attach_chip,
+};
+
static int __init txx9ndfmc_probe(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
@@ -303,7 +305,8 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n",
(gbusclk + 500000) / 1000000, hold, spw);
- nand_hw_control_init(&drvdata->hw_control);
+ nand_controller_init(&drvdata->controller);
+ drvdata->controller.ops = &txx9ndfmc_controller_ops;
platform_set_drvdata(dev, drvdata);
txx9ndfmc_initialize(dev);
@@ -332,12 +335,9 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
chip->ecc.correct = txx9ndfmc_correct_data;
chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
chip->ecc.mode = NAND_ECC_HW;
- /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
chip->ecc.strength = 1;
chip->chip_delay = 100;
- chip->controller = &drvdata->hw_control;
+ chip->controller = &drvdata->controller;
nand_set_controller_data(chip, txx9_priv);
txx9_priv->dev = dev;
@@ -359,14 +359,14 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
if (plat->wide_mask & (1 << i))
chip->options |= NAND_BUSWIDTH_16;
- if (txx9ndfmc_nand_scan(mtd)) {
+ if (nand_scan(mtd, 1)) {
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
continue;
}
mtd->name = txx9_priv->mtdname;
- mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ mtd_device_register(mtd, NULL, 0);
drvdata->mtds[i] = mtd;
}
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index d5a22fc96878..6f6dcbf9095b 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -747,6 +747,69 @@ static void vf610_nfc_init_controller(struct vf610_nfc *nfc)
}
}
+static int vf610_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+ vf610_nfc_init_controller(nfc);
+
+ /* Bad block options. */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ /* Single buffer only, max 256 OOB minus ECC status */
+ if (mtd->writesize + mtd->oobsize > PAGE_2K + OOB_MAX - 8) {
+ dev_err(nfc->dev, "Unsupported flash page size\n");
+ return -ENXIO;
+ }
+
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return 0;
+
+ if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
+ dev_err(nfc->dev, "Unsupported flash with hwecc\n");
+ return -ENXIO;
+ }
+
+ if (chip->ecc.size != mtd->writesize) {
+ dev_err(nfc->dev, "Step size needs to be page size\n");
+ return -ENXIO;
+ }
+
+ /* Only 64 byte ECC layouts known */
+ if (mtd->oobsize > 64)
+ mtd->oobsize = 64;
+
+ /* Use default large page ECC layout defined in NAND core */
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ if (chip->ecc.strength == 32) {
+ nfc->ecc_mode = ECC_60_BYTE;
+ chip->ecc.bytes = 60;
+ } else if (chip->ecc.strength == 24) {
+ nfc->ecc_mode = ECC_45_BYTE;
+ chip->ecc.bytes = 45;
+ } else {
+ dev_err(nfc->dev, "Unsupported ECC strength\n");
+ return -ENXIO;
+ }
+
+ chip->ecc.read_page = vf610_nfc_read_page;
+ chip->ecc.write_page = vf610_nfc_write_page;
+ chip->ecc.read_page_raw = vf610_nfc_read_page_raw;
+ chip->ecc.write_page_raw = vf610_nfc_write_page_raw;
+ chip->ecc.read_oob = vf610_nfc_read_oob;
+ chip->ecc.write_oob = vf610_nfc_write_oob;
+
+ chip->ecc.size = PAGE_2K;
+
+ return 0;
+}
+
+static const struct nand_controller_ops vf610_nfc_controller_ops = {
+ .attach_chip = vf610_nfc_attach_chip,
+};
+
static int vf610_nfc_probe(struct platform_device *pdev)
{
struct vf610_nfc *nfc;
@@ -827,67 +890,9 @@ static int vf610_nfc_probe(struct platform_device *pdev)
vf610_nfc_preinit_controller(nfc);
- /* first scan to find the device and get the page size */
- err = nand_scan_ident(mtd, 1, NULL);
- if (err)
- goto err_disable_clk;
-
- vf610_nfc_init_controller(nfc);
-
- /* Bad block options. */
- if (chip->bbt_options & NAND_BBT_USE_FLASH)
- chip->bbt_options |= NAND_BBT_NO_OOB;
-
- /* Single buffer only, max 256 OOB minus ECC status */
- if (mtd->writesize + mtd->oobsize > PAGE_2K + OOB_MAX - 8) {
- dev_err(nfc->dev, "Unsupported flash page size\n");
- err = -ENXIO;
- goto err_disable_clk;
- }
-
- if (chip->ecc.mode == NAND_ECC_HW) {
- if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
- dev_err(nfc->dev, "Unsupported flash with hwecc\n");
- err = -ENXIO;
- goto err_disable_clk;
- }
-
- if (chip->ecc.size != mtd->writesize) {
- dev_err(nfc->dev, "Step size needs to be page size\n");
- err = -ENXIO;
- goto err_disable_clk;
- }
-
- /* Only 64 byte ECC layouts known */
- if (mtd->oobsize > 64)
- mtd->oobsize = 64;
-
- /* Use default large page ECC layout defined in NAND core */
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
- if (chip->ecc.strength == 32) {
- nfc->ecc_mode = ECC_60_BYTE;
- chip->ecc.bytes = 60;
- } else if (chip->ecc.strength == 24) {
- nfc->ecc_mode = ECC_45_BYTE;
- chip->ecc.bytes = 45;
- } else {
- dev_err(nfc->dev, "Unsupported ECC strength\n");
- err = -ENXIO;
- goto err_disable_clk;
- }
-
- chip->ecc.read_page = vf610_nfc_read_page;
- chip->ecc.write_page = vf610_nfc_write_page;
- chip->ecc.read_page_raw = vf610_nfc_read_page_raw;
- chip->ecc.write_page_raw = vf610_nfc_write_page_raw;
- chip->ecc.read_oob = vf610_nfc_read_oob;
- chip->ecc.write_oob = vf610_nfc_write_oob;
-
- chip->ecc.size = PAGE_2K;
- }
-
- /* second phase scan */
- err = nand_scan_tail(mtd);
+ /* Scan the NAND chip */
+ chip->dummy_controller.ops = &vf610_nfc_controller_ops;
+ err = nand_scan(mtd, 1);
if (err)
goto err_disable_clk;
diff --git a/drivers/mtd/nand/spi/Kconfig b/drivers/mtd/nand/spi/Kconfig
new file mode 100644
index 000000000000..7c37d2929b68
--- /dev/null
+++ b/drivers/mtd/nand/spi/Kconfig
@@ -0,0 +1,7 @@
+menuconfig MTD_SPI_NAND
+ tristate "SPI NAND device Support"
+ select MTD_NAND_CORE
+ depends on SPI_MASTER
+ select SPI_MEM
+ help
+ This is the framework for the SPI NAND device drivers.
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
new file mode 100644
index 000000000000..b74e074b363a
--- /dev/null
+++ b/drivers/mtd/nand/spi/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+spinand-objs := core.o macronix.o micron.o winbond.o
+obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
new file mode 100644
index 000000000000..30f83649c481
--- /dev/null
+++ b/drivers/mtd/nand/spi/core.c
@@ -0,0 +1,1155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#define pr_fmt(fmt) "spi-nand: " fmt
+
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/spinand.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
+ const struct nand_page_io_req *req,
+ u16 *column)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int shift;
+
+ if (nand->memorg.planes_per_lun < 2)
+ return;
+
+ /* The plane number is passed in MSB just above the column address */
+ shift = fls(nand->memorg.pagesize);
+ *column |= req->pos.plane << shift;
+}
+
+static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
+{
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
+ spinand->scratchbuf);
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ *val = *spinand->scratchbuf;
+ return 0;
+}
+
+static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
+ spinand->scratchbuf);
+
+ *spinand->scratchbuf = val;
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_read_status(struct spinand_device *spinand, u8 *status)
+{
+ return spinand_read_reg_op(spinand, REG_STATUS, status);
+}
+
+static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (WARN_ON(spinand->cur_target < 0 ||
+ spinand->cur_target >= nand->memorg.ntargets))
+ return -EINVAL;
+
+ *cfg = spinand->cfg_cache[spinand->cur_target];
+ return 0;
+}
+
+static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+ if (WARN_ON(spinand->cur_target < 0 ||
+ spinand->cur_target >= nand->memorg.ntargets))
+ return -EINVAL;
+
+ if (spinand->cfg_cache[spinand->cur_target] == cfg)
+ return 0;
+
+ ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
+ if (ret)
+ return ret;
+
+ spinand->cfg_cache[spinand->cur_target] = cfg;
+ return 0;
+}
+
+/**
+ * spinand_upd_cfg() - Update the configuration register
+ * @spinand: the spinand device
+ * @mask: the mask encoding the bits to update in the config reg
+ * @val: the new value to apply
+ *
+ * Update the configuration register.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
+{
+ int ret;
+ u8 cfg;
+
+ ret = spinand_get_cfg(spinand, &cfg);
+ if (ret)
+ return ret;
+
+ cfg &= ~mask;
+ cfg |= val;
+
+ return spinand_set_cfg(spinand, cfg);
+}
+
+/**
+ * spinand_select_target() - Select a specific NAND target/die
+ * @spinand: the spinand device
+ * @target: the target/die to select
+ *
+ * Select a new target/die. If chip only has one die, this function is a NOOP.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_select_target(struct spinand_device *spinand, unsigned int target)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+ if (WARN_ON(target >= nand->memorg.ntargets))
+ return -EINVAL;
+
+ if (spinand->cur_target == target)
+ return 0;
+
+ if (nand->memorg.ntargets == 1) {
+ spinand->cur_target = target;
+ return 0;
+ }
+
+ ret = spinand->select_target(spinand, target);
+ if (ret)
+ return ret;
+
+ spinand->cur_target = target;
+ return 0;
+}
+
+static int spinand_init_cfg_cache(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct device *dev = &spinand->spimem->spi->dev;
+ unsigned int target;
+ int ret;
+
+ spinand->cfg_cache = devm_kcalloc(dev,
+ nand->memorg.ntargets,
+ sizeof(*spinand->cfg_cache),
+ GFP_KERNEL);
+ if (!spinand->cfg_cache)
+ return -ENOMEM;
+
+ for (target = 0; target < nand->memorg.ntargets; target++) {
+ ret = spinand_select_target(spinand, target);
+ if (ret)
+ return ret;
+
+ /*
+ * We use spinand_read_reg_op() instead of spinand_get_cfg()
+ * here to bypass the config cache.
+ */
+ ret = spinand_read_reg_op(spinand, REG_CFG,
+ &spinand->cfg_cache[target]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spinand_init_quad_enable(struct spinand_device *spinand)
+{
+ bool enable = false;
+
+ if (!(spinand->flags & SPINAND_HAS_QE_BIT))
+ return 0;
+
+ if (spinand->op_templates.read_cache->data.buswidth == 4 ||
+ spinand->op_templates.write_cache->data.buswidth == 4 ||
+ spinand->op_templates.update_cache->data.buswidth == 4)
+ enable = true;
+
+ return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
+ enable ? CFG_QUAD_ENABLE : 0);
+}
+
+static int spinand_ecc_enable(struct spinand_device *spinand,
+ bool enable)
+{
+ return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
+ enable ? CFG_ECC_ENABLE : 0);
+}
+
+static int spinand_write_enable_op(struct spinand_device *spinand)
+{
+ struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_load_page_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int row = nanddev_pos_to_row(nand, &req->pos);
+ struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_read_from_cache_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct spi_mem_op op = *spinand->op_templates.read_cache;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct nand_page_io_req adjreq = *req;
+ unsigned int nbytes = 0;
+ void *buf = NULL;
+ u16 column = 0;
+ int ret;
+
+ if (req->datalen) {
+ adjreq.datalen = nanddev_page_size(nand);
+ adjreq.dataoffs = 0;
+ adjreq.databuf.in = spinand->databuf;
+ buf = spinand->databuf;
+ nbytes = adjreq.datalen;
+ }
+
+ if (req->ooblen) {
+ adjreq.ooblen = nanddev_per_page_oobsize(nand);
+ adjreq.ooboffs = 0;
+ adjreq.oobbuf.in = spinand->oobbuf;
+ nbytes += nanddev_per_page_oobsize(nand);
+ if (!buf) {
+ buf = spinand->oobbuf;
+ column = nanddev_page_size(nand);
+ }
+ }
+
+ spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+ op.addr.val = column;
+
+ /*
+ * Some controllers are limited in term of max RX data size. In this
+ * case, just repeat the READ_CACHE operation after updating the
+ * column.
+ */
+ while (nbytes) {
+ op.data.buf.in = buf;
+ op.data.nbytes = nbytes;
+ ret = spi_mem_adjust_op_size(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ buf += op.data.nbytes;
+ nbytes -= op.data.nbytes;
+ op.addr.val += op.data.nbytes;
+ }
+
+ if (req->datalen)
+ memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
+ req->datalen);
+
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+ mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
+ spinand->oobbuf,
+ req->ooboffs,
+ req->ooblen);
+ else
+ memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
+ req->ooblen);
+ }
+
+ return 0;
+}
+
+static int spinand_write_to_cache_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct spi_mem_op op = *spinand->op_templates.write_cache;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct nand_page_io_req adjreq = *req;
+ unsigned int nbytes = 0;
+ void *buf = NULL;
+ u16 column = 0;
+ int ret;
+
+ memset(spinand->databuf, 0xff,
+ nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand));
+
+ if (req->datalen) {
+ memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
+ req->datalen);
+ adjreq.dataoffs = 0;
+ adjreq.datalen = nanddev_page_size(nand);
+ adjreq.databuf.out = spinand->databuf;
+ nbytes = adjreq.datalen;
+ buf = spinand->databuf;
+ }
+
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+ mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
+ spinand->oobbuf,
+ req->ooboffs,
+ req->ooblen);
+ else
+ memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
+ req->ooblen);
+
+ adjreq.ooblen = nanddev_per_page_oobsize(nand);
+ adjreq.ooboffs = 0;
+ nbytes += nanddev_per_page_oobsize(nand);
+ if (!buf) {
+ buf = spinand->oobbuf;
+ column = nanddev_page_size(nand);
+ }
+ }
+
+ spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+
+ op = *spinand->op_templates.write_cache;
+ op.addr.val = column;
+
+ /*
+ * Some controllers are limited in term of max TX data size. In this
+ * case, split the operation into one LOAD CACHE and one or more
+ * LOAD RANDOM CACHE.
+ */
+ while (nbytes) {
+ op.data.buf.out = buf;
+ op.data.nbytes = nbytes;
+
+ ret = spi_mem_adjust_op_size(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ buf += op.data.nbytes;
+ nbytes -= op.data.nbytes;
+ op.addr.val += op.data.nbytes;
+
+ /*
+ * We need to use the RANDOM LOAD CACHE operation if there's
+ * more than one iteration, because the LOAD operation resets
+ * the cache to 0xff.
+ */
+ if (nbytes) {
+ column = op.addr.val;
+ op = *spinand->op_templates.update_cache;
+ op.addr.val = column;
+ }
+ }
+
+ return 0;
+}
+
+static int spinand_program_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int row = nanddev_pos_to_row(nand, &req->pos);
+ struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_erase_op(struct spinand_device *spinand,
+ const struct nand_pos *pos)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int row = nanddev_pos_to_row(nand, pos);
+ struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_wait(struct spinand_device *spinand, u8 *s)
+{
+ unsigned long timeo = jiffies + msecs_to_jiffies(400);
+ u8 status;
+ int ret;
+
+ do {
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ return ret;
+
+ if (!(status & STATUS_BUSY))
+ goto out;
+ } while (time_before(jiffies, timeo));
+
+ /*
+ * Extra read, just in case the STATUS_READY bit has changed
+ * since our last check
+ */
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ return ret;
+
+out:
+ if (s)
+ *s = status;
+
+ return status & STATUS_BUSY ? -ETIMEDOUT : 0;
+}
+
+static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
+{
+ struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
+ SPINAND_MAX_ID_LEN);
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (!ret)
+ memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
+
+ return ret;
+}
+
+static int spinand_reset_op(struct spinand_device *spinand)
+{
+ struct spi_mem_op op = SPINAND_RESET_OP;
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ return spinand_wait(spinand, NULL);
+}
+
+static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
+{
+ return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
+}
+
+static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (spinand->eccinfo.get_status)
+ return spinand->eccinfo.get_status(spinand, status);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * We have no way to know exactly how many bitflips have been
+ * fixed, so let's return the maximum possible value so that
+ * wear-leveling layers move the data immediately.
+ */
+ return nand->eccreq.strength;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req,
+ bool ecc_enabled)
+{
+ u8 status;
+ int ret;
+
+ ret = spinand_load_page_op(spinand, req);
+ if (ret)
+ return ret;
+
+ ret = spinand_wait(spinand, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = spinand_read_from_cache_op(spinand, req);
+ if (ret)
+ return ret;
+
+ if (!ecc_enabled)
+ return 0;
+
+ return spinand_check_ecc_status(spinand, status);
+}
+
+static int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ u8 status;
+ int ret;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_write_to_cache_op(spinand, req);
+ if (ret)
+ return ret;
+
+ ret = spinand_program_op(spinand, req);
+ if (ret)
+ return ret;
+
+ ret = spinand_wait(spinand, &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int max_bitflips = 0;
+ struct nand_io_iter iter;
+ bool enable_ecc = false;
+ bool ecc_failed = false;
+ int ret = 0;
+
+ if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
+ enable_ecc = true;
+
+ mutex_lock(&spinand->lock);
+
+ nanddev_io_for_each_page(nand, from, ops, &iter) {
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ break;
+
+ ret = spinand_ecc_enable(spinand, enable_ecc);
+ if (ret)
+ break;
+
+ ret = spinand_read_page(spinand, &iter.req, enable_ecc);
+ if (ret < 0 && ret != -EBADMSG)
+ break;
+
+ if (ret == -EBADMSG) {
+ ecc_failed = true;
+ mtd->ecc_stats.failed++;
+ ret = 0;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ }
+
+ ops->retlen += iter.req.datalen;
+ ops->oobretlen += iter.req.ooblen;
+ }
+
+ mutex_unlock(&spinand->lock);
+
+ if (ecc_failed && !ret)
+ ret = -EBADMSG;
+
+ return ret ? ret : max_bitflips;
+}
+
+static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_io_iter iter;
+ bool enable_ecc = false;
+ int ret = 0;
+
+ if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
+ enable_ecc = true;
+
+ mutex_lock(&spinand->lock);
+
+ nanddev_io_for_each_page(nand, to, ops, &iter) {
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ break;
+
+ ret = spinand_ecc_enable(spinand, enable_ecc);
+ if (ret)
+ break;
+
+ ret = spinand_write_page(spinand, &iter.req);
+ if (ret)
+ break;
+
+ ops->retlen += iter.req.datalen;
+ ops->oobretlen += iter.req.ooblen;
+ }
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_page_io_req req = {
+ .pos = *pos,
+ .ooblen = 2,
+ .ooboffs = 0,
+ .oobbuf.in = spinand->oobbuf,
+ .mode = MTD_OPS_RAW,
+ };
+
+ memset(spinand->oobbuf, 0, 2);
+ spinand_select_target(spinand, pos->target);
+ spinand_read_page(spinand, &req, false);
+ if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
+ return true;
+
+ return false;
+}
+
+static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_pos pos;
+ int ret;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ mutex_lock(&spinand->lock);
+ ret = nanddev_isbad(nand, &pos);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_page_io_req req = {
+ .pos = *pos,
+ .ooboffs = 0,
+ .ooblen = 2,
+ .oobbuf.out = spinand->oobbuf,
+ };
+ int ret;
+
+ /* Erase block before marking it bad. */
+ ret = spinand_select_target(spinand, pos->target);
+ if (ret)
+ return ret;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ spinand_erase_op(spinand, pos);
+
+ memset(spinand->oobbuf, 0, 2);
+ return spinand_write_page(spinand, &req);
+}
+
+static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_pos pos;
+ int ret;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ mutex_lock(&spinand->lock);
+ ret = nanddev_markbad(nand, &pos);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 status;
+ int ret;
+
+ ret = spinand_select_target(spinand, pos->target);
+ if (ret)
+ return ret;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_erase_op(spinand, pos);
+ if (ret)
+ return ret;
+
+ ret = spinand_wait(spinand, &status);
+ if (!ret && (status & STATUS_ERASE_FAILED))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int spinand_mtd_erase(struct mtd_info *mtd,
+ struct erase_info *einfo)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ mutex_lock(&spinand->lock);
+ ret = nanddev_mtd_erase(mtd, einfo);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_pos pos;
+ int ret;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ mutex_lock(&spinand->lock);
+ ret = nanddev_isreserved(nand, &pos);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static const struct nand_ops spinand_ops = {
+ .erase = spinand_erase,
+ .markbad = spinand_markbad,
+ .isbad = spinand_isbad,
+};
+
+static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &macronix_spinand_manufacturer,
+ &micron_spinand_manufacturer,
+ &winbond_spinand_manufacturer,
+};
+
+static int spinand_manufacturer_detect(struct spinand_device *spinand)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
+ ret = spinand_manufacturers[i]->ops->detect(spinand);
+ if (ret > 0) {
+ spinand->manufacturer = spinand_manufacturers[i];
+ return 0;
+ } else if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static int spinand_manufacturer_init(struct spinand_device *spinand)
+{
+ if (spinand->manufacturer->ops->init)
+ return spinand->manufacturer->ops->init(spinand);
+
+ return 0;
+}
+
+static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
+{
+ /* Release manufacturer private data */
+ if (spinand->manufacturer->ops->cleanup)
+ return spinand->manufacturer->ops->cleanup(spinand);
+}
+
+static const struct spi_mem_op *
+spinand_select_op_variant(struct spinand_device *spinand,
+ const struct spinand_op_variants *variants)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int i;
+
+ for (i = 0; i < variants->nops; i++) {
+ struct spi_mem_op op = variants->ops[i];
+ unsigned int nbytes;
+ int ret;
+
+ nbytes = nanddev_per_page_oobsize(nand) +
+ nanddev_page_size(nand);
+
+ while (nbytes) {
+ op.data.nbytes = nbytes;
+ ret = spi_mem_adjust_op_size(spinand->spimem, &op);
+ if (ret)
+ break;
+
+ if (!spi_mem_supports_op(spinand->spimem, &op))
+ break;
+
+ nbytes -= op.data.nbytes;
+ }
+
+ if (!nbytes)
+ return &variants->ops[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * spinand_match_and_init() - Try to find a match between a device ID and an
+ * entry in a spinand_info table
+ * @spinand: SPI NAND object
+ * @table: SPI NAND device description table
+ * @table_size: size of the device description table
+ *
+ * Should be used by SPI NAND manufacturer drivers when they want to find a
+ * match between a device ID retrieved through the READ_ID command and an
+ * entry in the SPI NAND description table. If a match is found, the spinand
+ * object will be initialized with information provided by the matching
+ * spinand_info entry.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_match_and_init(struct spinand_device *spinand,
+ const struct spinand_info *table,
+ unsigned int table_size, u8 devid)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int i;
+
+ for (i = 0; i < table_size; i++) {
+ const struct spinand_info *info = &table[i];
+ const struct spi_mem_op *op;
+
+ if (devid != info->devid)
+ continue;
+
+ nand->memorg = table[i].memorg;
+ nand->eccreq = table[i].eccreq;
+ spinand->eccinfo = table[i].eccinfo;
+ spinand->flags = table[i].flags;
+ spinand->select_target = table[i].select_target;
+
+ op = spinand_select_op_variant(spinand,
+ info->op_variants.read_cache);
+ if (!op)
+ return -ENOTSUPP;
+
+ spinand->op_templates.read_cache = op;
+
+ op = spinand_select_op_variant(spinand,
+ info->op_variants.write_cache);
+ if (!op)
+ return -ENOTSUPP;
+
+ spinand->op_templates.write_cache = op;
+
+ op = spinand_select_op_variant(spinand,
+ info->op_variants.update_cache);
+ spinand->op_templates.update_cache = op;
+
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int spinand_detect(struct spinand_device *spinand)
+{
+ struct device *dev = &spinand->spimem->spi->dev;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+ ret = spinand_reset_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_read_id_op(spinand, spinand->id.data);
+ if (ret)
+ return ret;
+
+ spinand->id.len = SPINAND_MAX_ID_LEN;
+
+ ret = spinand_manufacturer_detect(spinand);
+ if (ret) {
+ dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
+ spinand->id.data);
+ return ret;
+ }
+
+ if (nand->memorg.ntargets > 1 && !spinand->select_target) {
+ dev_err(dev,
+ "SPI NANDs with more than one die must implement ->select_target()\n");
+ return -EINVAL;
+ }
+
+ dev_info(&spinand->spimem->spi->dev,
+ "%s SPI NAND was found.\n", spinand->manufacturer->name);
+ dev_info(&spinand->spimem->spi->dev,
+ "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
+ nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
+ nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
+
+ return 0;
+}
+
+static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
+ .ecc = spinand_noecc_ooblayout_ecc,
+ .free = spinand_noecc_ooblayout_free,
+};
+
+static int spinand_init(struct spinand_device *spinand)
+{
+ struct device *dev = &spinand->spimem->spi->dev;
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ int ret, i;
+
+ /*
+ * We need a scratch buffer because the spi_mem interface requires that
+ * buf passed in spi_mem_op->data.buf be DMA-able.
+ */
+ spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
+ if (!spinand->scratchbuf)
+ return -ENOMEM;
+
+ ret = spinand_detect(spinand);
+ if (ret)
+ goto err_free_bufs;
+
+ /*
+ * Use kzalloc() instead of devm_kzalloc() here, because some drivers
+ * may use this buffer for DMA access.
+ * Memory allocated by devm_ does not guarantee DMA-safe alignment.
+ */
+ spinand->databuf = kzalloc(nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand),
+ GFP_KERNEL);
+ if (!spinand->databuf) {
+ ret = -ENOMEM;
+ goto err_free_bufs;
+ }
+
+ spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
+
+ ret = spinand_init_cfg_cache(spinand);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_init_quad_enable(spinand);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_manufacturer_init(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize the SPI NAND chip (err = %d)\n",
+ ret);
+ goto err_free_bufs;
+ }
+
+ /* After power up, all blocks are locked, so unlock them here. */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ ret = spinand_select_target(spinand, i);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
+ if (ret)
+ goto err_free_bufs;
+ }
+
+ ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
+ if (ret)
+ goto err_manuf_cleanup;
+
+ /*
+ * Right now, we don't support ECC, so let the whole oob
+ * area is available for user.
+ */
+ mtd->_read_oob = spinand_mtd_read;
+ mtd->_write_oob = spinand_mtd_write;
+ mtd->_block_isbad = spinand_mtd_block_isbad;
+ mtd->_block_markbad = spinand_mtd_block_markbad;
+ mtd->_block_isreserved = spinand_mtd_block_isreserved;
+ mtd->_erase = spinand_mtd_erase;
+
+ if (spinand->eccinfo.ooblayout)
+ mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
+ else
+ mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
+
+ ret = mtd_ooblayout_count_freebytes(mtd);
+ if (ret < 0)
+ goto err_cleanup_nanddev;
+
+ mtd->oobavail = ret;
+
+ return 0;
+
+err_cleanup_nanddev:
+ nanddev_cleanup(nand);
+
+err_manuf_cleanup:
+ spinand_manufacturer_cleanup(spinand);
+
+err_free_bufs:
+ kfree(spinand->databuf);
+ kfree(spinand->scratchbuf);
+ return ret;
+}
+
+static void spinand_cleanup(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ nanddev_cleanup(nand);
+ spinand_manufacturer_cleanup(spinand);
+ kfree(spinand->databuf);
+ kfree(spinand->scratchbuf);
+}
+
+static int spinand_probe(struct spi_mem *mem)
+{
+ struct spinand_device *spinand;
+ struct mtd_info *mtd;
+ int ret;
+
+ spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
+ GFP_KERNEL);
+ if (!spinand)
+ return -ENOMEM;
+
+ spinand->spimem = mem;
+ spi_mem_set_drvdata(mem, spinand);
+ spinand_set_of_node(spinand, mem->spi->dev.of_node);
+ mutex_init(&spinand->lock);
+ mtd = spinand_to_mtd(spinand);
+ mtd->dev.parent = &mem->spi->dev;
+
+ ret = spinand_init(spinand);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_spinand_cleanup;
+
+ return 0;
+
+err_spinand_cleanup:
+ spinand_cleanup(spinand);
+
+ return ret;
+}
+
+static int spinand_remove(struct spi_mem *mem)
+{
+ struct spinand_device *spinand;
+ struct mtd_info *mtd;
+ int ret;
+
+ spinand = spi_mem_get_drvdata(mem);
+ mtd = spinand_to_mtd(spinand);
+
+ ret = mtd_device_unregister(mtd);
+ if (ret)
+ return ret;
+
+ spinand_cleanup(spinand);
+
+ return 0;
+}
+
+static const struct spi_device_id spinand_ids[] = {
+ { .name = "spi-nand" },
+ { /* sentinel */ },
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id spinand_of_ids[] = {
+ { .compatible = "spi-nand" },
+ { /* sentinel */ },
+};
+#endif
+
+static struct spi_mem_driver spinand_drv = {
+ .spidrv = {
+ .id_table = spinand_ids,
+ .driver = {
+ .name = "spi-nand",
+ .of_match_table = of_match_ptr(spinand_of_ids),
+ },
+ },
+ .probe = spinand_probe,
+ .remove = spinand_remove,
+};
+module_spi_mem_driver(spinand_drv);
+
+MODULE_DESCRIPTION("SPI NAND framework");
+MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
new file mode 100644
index 000000000000..98f6b9c4b684
--- /dev/null
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Macronix
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_MACRONIX 0xC2
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int mx35lfxge4ab_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 2;
+ region->length = mtd->oobsize - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = {
+ .ecc = mx35lfxge4ab_ooblayout_ecc,
+ .free = mx35lfxge4ab_ooblayout_free,
+};
+
+static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_DUMMY(1, 1),
+ SPI_MEM_OP_DATA_IN(1, eccsr, 1));
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 eccsr;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * Let's try to retrieve the real maximum number of bitflips
+ * in order to avoid forcing the wear-leveling layer to move
+ * data around if it's not necessary.
+ */
+ if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr))
+ return nand->eccreq.strength;
+
+ if (WARN_ON(eccsr > nand->eccreq.strength || !eccsr))
+ return nand->eccreq.strength;
+
+ return eccsr;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info macronix_spinand_table[] = {
+ SPINAND_INFO("MX35LF1GE4AB", 0x12,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35LF2GE4AB", 0x22,
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 2, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+};
+
+static int macronix_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * Macronix SPI NAND read ID needs a dummy byte, so the first byte in
+ * raw_id is garbage.
+ */
+ if (id[1] != SPINAND_MFR_MACRONIX)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, macronix_spinand_table,
+ ARRAY_SIZE(macronix_spinand_table),
+ id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
+ .detect = macronix_spinand_detect,
+};
+
+const struct spinand_manufacturer macronix_spinand_manufacturer = {
+ .id = SPINAND_MFR_MACRONIX,
+ .name = "Macronix",
+ .ops = &macronix_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
new file mode 100644
index 000000000000..9c4381d6847b
--- /dev/null
+++ b/drivers/mtd/nand/spi/micron.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_MICRON 0x2c
+
+#define MICRON_STATUS_ECC_MASK GENMASK(7, 4)
+#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4)
+#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4)
+#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
+#define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int mt29f2g01abagd_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 64;
+ region->length = 64;
+
+ return 0;
+}
+
+static int mt29f2g01abagd_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mt29f2g01abagd_ooblayout = {
+ .ecc = mt29f2g01abagd_ooblayout_ecc,
+ .free = mt29f2g01abagd_ooblayout_free,
+};
+
+static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & MICRON_STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case MICRON_STATUS_ECC_1TO3_BITFLIPS:
+ return 3;
+
+ case MICRON_STATUS_ECC_4TO6_BITFLIPS:
+ return 6;
+
+ case MICRON_STATUS_ECC_7TO8_BITFLIPS:
+ return 8;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info micron_spinand_table[] = {
+ SPINAND_INFO("MT29F2G01ABAGD", 0x24,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&mt29f2g01abagd_ooblayout,
+ mt29f2g01abagd_ecc_get_status)),
+};
+
+static int micron_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * Micron SPI NAND read ID need a dummy byte,
+ * so the first byte in raw_id is dummy.
+ */
+ if (id[1] != SPINAND_MFR_MICRON)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, micron_spinand_table,
+ ARRAY_SIZE(micron_spinand_table), id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
+ .detect = micron_spinand_detect,
+};
+
+const struct spinand_manufacturer micron_spinand_manufacturer = {
+ .id = SPINAND_MFR_MICRON,
+ .name = "Micron",
+ .ops = &micron_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
new file mode 100644
index 000000000000..67baa1b32c00
--- /dev/null
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 exceet electronics GmbH
+ *
+ * Authors:
+ * Frieder Schrempf <frieder.schrempf@exceet.de>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_WINBOND 0xEF
+
+#define WINBOND_CFG_BUF_READ BIT(3)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+
+ return 0;
+}
+
+static int w25m02gv_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 2;
+ region->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops w25m02gv_ooblayout = {
+ .ecc = w25m02gv_ooblayout_ecc,
+ .free = w25m02gv_ooblayout_free,
+};
+
+static int w25m02gv_select_target(struct spinand_device *spinand,
+ unsigned int target)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0xc2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1,
+ spinand->scratchbuf,
+ 1));
+
+ *spinand->scratchbuf = target;
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static const struct spinand_info winbond_spinand_table[] = {
+ SPINAND_INFO("W25M02GV", 0xAB,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 2),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+ SPINAND_SELECT_TARGET(w25m02gv_select_target)),
+};
+
+/**
+ * winbond_spinand_detect - initialize device related part in spinand_device
+ * struct if it is a Winbond device.
+ * @spinand: SPI NAND device structure
+ */
+static int winbond_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * Winbond SPI NAND read ID need a dummy byte,
+ * so the first byte in raw_id is dummy.
+ */
+ if (id[1] != SPINAND_MFR_WINBOND)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, winbond_spinand_table,
+ ARRAY_SIZE(winbond_spinand_table), id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static int winbond_spinand_init(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int i;
+
+ /*
+ * Make sure all dies are in buffer read mode and not continuous read
+ * mode.
+ */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ spinand_select_target(spinand, i);
+ spinand_upd_cfg(spinand, WINBOND_CFG_BUF_READ,
+ WINBOND_CFG_BUF_READ);
+ }
+
+ return 0;
+}
+
+static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = {
+ .detect = winbond_spinand_detect,
+ .init = winbond_spinand_init,
+};
+
+const struct spinand_manufacturer winbond_spinand_manufacturer = {
+ .id = SPINAND_MFR_WINBOND,
+ .name = "Winbond",
+ .ops = &winbond_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 27184e3874db..91b7fb326f9a 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -577,7 +577,7 @@ static int get_fold_mark(struct NFTLrecord *nftl, unsigned int block)
int NFTL_mount(struct NFTLrecord *s)
{
int i;
- unsigned int first_logical_block, logical_block, rep_block, nb_erases, erase_mark;
+ unsigned int first_logical_block, logical_block, rep_block, erase_mark;
unsigned int block, first_block, is_first_block;
int chain_length, do_format_chain;
struct nftl_uci0 h0;
@@ -621,7 +621,6 @@ int NFTL_mount(struct NFTLrecord *s)
logical_block = le16_to_cpu ((h0.VirtUnitNum | h0.SpareVirtUnitNum));
rep_block = le16_to_cpu ((h0.ReplUnitNum | h0.SpareReplUnitNum));
- nb_erases = le32_to_cpu (h1.WearInfo);
erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
is_first_block = !(logical_block >> 15);
diff --git a/drivers/mtd/parsers/parser_trx.c b/drivers/mtd/parsers/parser_trx.c
index 17ac33599783..4a89a68622fe 100644
--- a/drivers/mtd/parsers/parser_trx.c
+++ b/drivers/mtd/parsers/parser_trx.c
@@ -116,9 +116,16 @@ static int parser_trx_parse(struct mtd_info *mtd,
return i;
};
+static const struct of_device_id mtd_parser_trx_of_match_table[] = {
+ { .compatible = "brcm,trx" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtd_parser_trx_of_match_table);
+
static struct mtd_part_parser mtd_parser_trx = {
.parse_fn = parser_trx_parse,
.name = "trx",
+ .of_match_table = mtd_parser_trx_of_match_table,
};
module_mtd_part_parser(mtd_parser_trx);
diff --git a/drivers/mtd/spi-nor/atmel-quadspi.c b/drivers/mtd/spi-nor/atmel-quadspi.c
index 6c5708bacad8..820048726b4f 100644
--- a/drivers/mtd/spi-nor/atmel-quadspi.c
+++ b/drivers/mtd/spi-nor/atmel-quadspi.c
@@ -34,7 +34,7 @@
#include <linux/of.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
/* QSPI register offsets */
#define QSPI_CR 0x0000 /* Control Register */
@@ -737,6 +737,26 @@ static int atmel_qspi_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused atmel_qspi_suspend(struct device *dev)
+{
+ struct atmel_qspi *aq = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(aq->clk);
+
+ return 0;
+}
+
+static int __maybe_unused atmel_qspi_resume(struct device *dev)
+{
+ struct atmel_qspi *aq = dev_get_drvdata(dev);
+
+ clk_prepare_enable(aq->clk);
+
+ return atmel_qspi_init(aq);
+}
+
+static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend,
+ atmel_qspi_resume);
static const struct of_device_id atmel_qspi_dt_ids[] = {
{ .compatible = "atmel,sama5d2-qspi" },
@@ -749,6 +769,7 @@ static struct platform_driver atmel_qspi_driver = {
.driver = {
.name = "atmel_qspi",
.of_match_table = atmel_qspi_dt_ids,
+ .pm = &atmel_qspi_pm_ops,
},
.probe = atmel_qspi_probe,
.remove = atmel_qspi_remove,
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index c3f7aaa5d18f..8e714fbfa521 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -525,15 +525,14 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
reg_base + CQSPI_REG_INDIRECTRD);
while (remaining > 0) {
- ret = wait_for_completion_timeout(&cqspi->transfer_complete,
- msecs_to_jiffies
- (CQSPI_READ_TIMEOUT_MS));
+ if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
+ ret = -ETIMEDOUT;
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
- if (!ret && bytes_to_read == 0) {
+ if (ret && bytes_to_read == 0) {
dev_err(nor->dev, "Indirect read timeout, no bytes\n");
- ret = -ETIMEDOUT;
goto failrd;
}
@@ -649,10 +648,8 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
iowrite32_rep(cqspi->ahb_base, txbuf,
DIV_ROUND_UP(write_bytes, 4));
- ret = wait_for_completion_timeout(&cqspi->transfer_complete,
- msecs_to_jiffies
- (CQSPI_TIMEOUT_MS));
- if (!ret) {
+ if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
dev_err(nor->dev, "Indirect write timeout\n");
ret = -ETIMEDOUT;
goto failwr;
@@ -926,10 +923,12 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
if (ret)
return ret;
- if (f_pdata->use_direct_mode)
+ if (f_pdata->use_direct_mode) {
memcpy_toio(cqspi->ahb_base + to, buf, len);
- else
+ ret = cqspi_wait_idle(cqspi);
+ } else {
ret = cqspi_indirect_write_execute(nor, to, buf, len);
+ }
if (ret)
return ret;
@@ -986,9 +985,8 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
}
dma_async_issue_pending(cqspi->rx_chan);
- ret = wait_for_completion_timeout(&cqspi->rx_dma_complete,
- msecs_to_jiffies(len));
- if (ret <= 0) {
+ if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
+ msecs_to_jiffies(len))) {
dmaengine_terminate_sync(cqspi->rx_chan);
dev_err(nor->dev, "DMA wait_for_completion_timeout\n");
ret = -ETIMEDOUT;
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index d2cbfc27826e..af0a22019516 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -908,7 +908,7 @@ struct intel_spi *intel_spi_probe(struct device *dev,
if (!ispi->writeable || !writeable)
ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
- ret = mtd_device_parse_register(&ispi->nor.mtd, NULL, NULL, &part, 1);
+ ret = mtd_device_register(&ispi->nor.mtd, &part, 1);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c
index 15374216d4d9..0c9094ec5966 100644
--- a/drivers/mtd/spi-nor/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/nxp-spifi.c
@@ -436,6 +436,7 @@ static int nxp_spifi_probe(struct platform_device *pdev)
}
ret = nxp_spifi_setup_flash(spifi, flash_np);
+ of_node_put(flash_np);
if (ret) {
dev_err(&pdev->dev, "unable to setup flash chip\n");
goto dis_clks;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index d9c368c44194..f028277fb1ce 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -2757,8 +2757,18 @@ static int spi_nor_init(struct spi_nor *nor)
if ((nor->addr_width == 4) &&
(JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
- !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ !(nor->info->flags & SPI_NOR_4B_OPCODES)) {
+ /*
+ * If the RESET# pin isn't hooked up properly, or the system
+ * otherwise doesn't perform a reset command in the boot
+ * sequence, it's impossible to 100% protect against unexpected
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+ "enabling reset hack; may not recover from unexpected reboots\n");
set_4byte(nor, nor->info, 1);
+ }
return 0;
}
@@ -2781,7 +2791,8 @@ void spi_nor_restore(struct spi_nor *nor)
/* restore the addressing mode */
if ((nor->addr_width == 4) &&
(JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
- !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ !(nor->info->flags & SPI_NOR_4B_OPCODES) &&
+ (nor->flags & SNOR_F_BROKEN_RESET))
set_4byte(nor, nor->info, 0);
}
EXPORT_SYMBOL_GPL(spi_nor_restore);
@@ -2911,6 +2922,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
}
+ if (of_property_read_bool(np, "broken-flash-reset"))
+ nor->flags |= SNOR_F_BROKEN_RESET;
+
/* Some devices cannot do fast-read, no matter what DT tells us */
if (info->flags & SPI_NOR_NO_FR)
params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c
index 72553506a00b..13e9fc961d3b 100644
--- a/drivers/mtd/spi-nor/stm32-quadspi.c
+++ b/drivers/mtd/spi-nor/stm32-quadspi.c
@@ -355,7 +355,7 @@ static int stm32_qspi_read_reg(struct spi_nor *nor,
struct device *dev = flash->qspi->dev;
struct stm32_qspi_cmd cmd;
- dev_dbg(dev, "read_reg: cmd:%#.2x buf:%p len:%#x\n", opcode, buf, len);
+ dev_dbg(dev, "read_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = opcode;
@@ -376,7 +376,7 @@ static int stm32_qspi_write_reg(struct spi_nor *nor, u8 opcode,
struct device *dev = flash->qspi->dev;
struct stm32_qspi_cmd cmd;
- dev_dbg(dev, "write_reg: cmd:%#.2x buf:%p len:%#x\n", opcode, buf, len);
+ dev_dbg(dev, "write_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = opcode;
@@ -398,7 +398,7 @@ static ssize_t stm32_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
struct stm32_qspi_cmd cmd;
int err;
- dev_dbg(qspi->dev, "read(%#.2x): buf:%p from:%#.8x len:%#zx\n",
+ dev_dbg(qspi->dev, "read(%#.2x): buf:%pK from:%#.8x len:%#zx\n",
nor->read_opcode, buf, (u32)from, len);
memset(&cmd, 0, sizeof(cmd));
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 45c329694a5e..22547d7a84ea 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -367,6 +367,10 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
return count;
}
+ /*
+ * We voluntarily do not take into account the skip_check flag
+ * as we want to make sure what we wrote was correctly written.
+ */
err = ubi_check_volume(ubi, vol->vol_id);
if (err < 0)
return err;
@@ -622,6 +626,13 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
req->vol_type != UBI_STATIC_VOLUME)
goto bad;
+ if (req->flags & ~UBI_VOL_VALID_FLGS)
+ goto bad;
+
+ if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG &&
+ req->vol_type != UBI_STATIC_VOLUME)
+ goto bad;
+
if (req->alignment > ubi->leb_size)
goto bad;
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index d4b2e8744498..e9e9ecbcedcc 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -202,7 +202,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
desc->mode = mode;
mutex_lock(&ubi->ckvol_mutex);
- if (!vol->checked) {
+ if (!vol->checked && !vol->skip_check) {
/* This is the first open - check the volume */
err = ubi_check_volume(ubi, vol_id);
if (err < 0) {
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 195ff8ca8211..b5fe8f82281b 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -45,6 +45,11 @@ enum {
* Volume flags used in the volume table record.
*
* @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume
+ * @UBI_VTBL_SKIP_CRC_CHECK_FLG: skip the CRC check done on a static volume at
+ * open time. Should only be set on volumes that
+ * are used by upper layers doing this kind of
+ * check. Main use-case for this flag is
+ * boot-time reduction
*
* %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume
* table. UBI automatically re-sizes the volume which has this flag and makes
@@ -76,6 +81,7 @@ enum {
*/
enum {
UBI_VTBL_AUTORESIZE_FLG = 0x01,
+ UBI_VTBL_SKIP_CRC_CHECK_FLG = 0x02,
};
/*
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index f5ba97c46160..d47b9e436e67 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -327,6 +327,9 @@ struct ubi_eba_leb_desc {
* atomic LEB change
*
* @eba_tbl: EBA table of this volume (LEB->PEB mapping)
+ * @skip_check: %1 if CRC check of this static volume should be skipped.
+ * Directly reflects the presence of the
+ * %UBI_VTBL_SKIP_CRC_CHECK_FLG flag in the vtbl entry
* @checked: %1 if this static volume was checked
* @corrupted: %1 if the volume is corrupted (static volumes only)
* @upd_marker: %1 if the update marker is set for this volume
@@ -374,6 +377,7 @@ struct ubi_volume {
void *upd_buf;
struct ubi_eba_table *eba_tbl;
+ unsigned int skip_check:1;
unsigned int checked:1;
unsigned int corrupted:1;
unsigned int upd_marker:1;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 0be516780e92..729588b94e41 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -174,6 +174,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->dev.class = &ubi_class;
vol->dev.groups = volume_dev_groups;
+ if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG)
+ vol->skip_check = 1;
+
spin_lock(&ubi->volumes_lock);
if (vol_id == UBI_VOL_NUM_AUTO) {
/* Find unused volume ID */
@@ -299,6 +302,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vtbl_rec.vol_type = UBI_VID_DYNAMIC;
else
vtbl_rec.vol_type = UBI_VID_STATIC;
+
+ if (vol->skip_check)
+ vtbl_rec.flags |= UBI_VTBL_SKIP_CRC_CHECK_FLG;
+
memcpy(vtbl_rec.name, vol->name, vol->name_len);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
@@ -733,6 +740,11 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
ubi_err(ubi, "bad used_bytes");
goto fail;
}
+
+ if (vol->skip_check) {
+ ubi_err(ubi, "bad skip_check");
+ goto fail;
+ }
} else {
if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
ubi_err(ubi, "bad used_ebs");
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 94d7a865b135..1bc82154bb18 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -560,6 +560,9 @@ static int init_volumes(struct ubi_device *ubi,
vol->name[vol->name_len] = '\0';
vol->vol_id = i;
+ if (vtbl[i].flags & UBI_VTBL_SKIP_CRC_CHECK_FLG)
+ vol->skip_check = 1;
+
if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
/* Auto re-size flag may be set only for one volume */
if (ubi->autoresize_vol_id != -1) {
@@ -579,6 +582,16 @@ static int init_volumes(struct ubi_device *ubi,
reserved_pebs += vol->reserved_pebs;
/*
+ * We use ubi->peb_count and not vol->reserved_pebs because
+ * we want to keep the code simple. Otherwise we'd have to
+ * resize/check the bitmap upon volume resize too.
+ * Allocating a few bytes more does not hurt.
+ */
+ err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
+ if (err)
+ return err;
+
+ /*
* In case of dynamic volume UBI knows nothing about how many
* data is stored there. So assume the whole volume is used.
*/
@@ -620,16 +633,6 @@ static int init_volumes(struct ubi_device *ubi,
(long long)(vol->used_ebs - 1) * vol->usable_leb_size;
vol->used_bytes += av->last_data_size;
vol->last_eb_bytes = av->last_data_size;
-
- /*
- * We use ubi->peb_count and not vol->reserved_pebs because
- * we want to keep the code simple. Otherwise we'd have to
- * resize/check the bitmap upon volume resize too.
- * Allocating a few bytes more does not hurt.
- */
- err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
- if (err)
- return err;
}
/* And add the layout volume */
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index 6241678e99af..7659d6c5f718 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -21,6 +21,16 @@ config MUX_ADG792A
To compile the driver as a module, choose M here: the module will
be called mux-adg792a.
+config MUX_ADGS1408
+ tristate "Analog Devices ADGS1408/ADGS1409 Multiplexers"
+ depends on SPI
+ help
+ ADGS1408 8:1 multiplexer and ADGS1409 double 4:1 multiplexer
+ switches.
+
+ To compile the driver as a module, choose M here: the module will
+ be called mux-adgs1408.
+
config MUX_GPIO
tristate "GPIO-controlled Multiplexer"
depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/mux/Makefile b/drivers/mux/Makefile
index c3d883955fd5..6e9fa47daf56 100644
--- a/drivers/mux/Makefile
+++ b/drivers/mux/Makefile
@@ -5,10 +5,12 @@
mux-core-objs := core.o
mux-adg792a-objs := adg792a.o
+mux-adgs1408-objs := adgs1408.o
mux-gpio-objs := gpio.o
mux-mmio-objs := mmio.o
obj-$(CONFIG_MULTIPLEXER) += mux-core.o
obj-$(CONFIG_MUX_ADG792A) += mux-adg792a.o
+obj-$(CONFIG_MUX_ADGS1408) += mux-adgs1408.o
obj-$(CONFIG_MUX_GPIO) += mux-gpio.o
obj-$(CONFIG_MUX_MMIO) += mux-mmio.o
diff --git a/drivers/mux/adgs1408.c b/drivers/mux/adgs1408.c
new file mode 100644
index 000000000000..0f7cf54e3234
--- /dev/null
+++ b/drivers/mux/adgs1408.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ADGS1408/ADGS1409 SPI MUX driver
+ *
+ * Copyright 2018 Analog Devices Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mux/driver.h>
+#include <linux/of_platform.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#define ADGS1408_SW_DATA (0x01)
+#define ADGS1408_REG_READ(reg) ((reg) | 0x80)
+#define ADGS1408_DISABLE (0x00)
+#define ADGS1408_MUX(state) (((state) << 1) | 1)
+
+enum adgs1408_chip_id {
+ ADGS1408 = 1,
+ ADGS1409,
+};
+
+static int adgs1408_spi_reg_write(struct spi_device *spi,
+ u8 reg_addr, u8 reg_data)
+{
+ u8 tx_buf[2];
+
+ tx_buf[0] = reg_addr;
+ tx_buf[1] = reg_data;
+
+ return spi_write_then_read(spi, tx_buf, sizeof(tx_buf), NULL, 0);
+}
+
+static int adgs1408_set(struct mux_control *mux, int state)
+{
+ struct spi_device *spi = to_spi_device(mux->chip->dev.parent);
+ u8 reg;
+
+ if (state == MUX_IDLE_DISCONNECT)
+ reg = ADGS1408_DISABLE;
+ else
+ reg = ADGS1408_MUX(state);
+
+ return adgs1408_spi_reg_write(spi, ADGS1408_SW_DATA, reg);
+}
+
+static const struct mux_control_ops adgs1408_ops = {
+ .set = adgs1408_set,
+};
+
+static int adgs1408_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ enum adgs1408_chip_id chip_id;
+ struct mux_chip *mux_chip;
+ struct mux_control *mux;
+ s32 idle_state;
+ int ret;
+
+ chip_id = (enum adgs1408_chip_id)of_device_get_match_data(dev);
+ if (!chip_id)
+ chip_id = spi_get_device_id(spi)->driver_data;
+
+ mux_chip = devm_mux_chip_alloc(dev, 1, 0);
+ if (IS_ERR(mux_chip))
+ return PTR_ERR(mux_chip);
+
+ mux_chip->ops = &adgs1408_ops;
+
+ ret = adgs1408_spi_reg_write(spi, ADGS1408_SW_DATA, ADGS1408_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = device_property_read_u32(dev, "idle-state", (u32 *)&idle_state);
+ if (ret < 0)
+ idle_state = MUX_IDLE_AS_IS;
+
+ mux = mux_chip->mux;
+
+ if (chip_id == ADGS1408)
+ mux->states = 8;
+ else
+ mux->states = 4;
+
+ switch (idle_state) {
+ case MUX_IDLE_DISCONNECT:
+ case MUX_IDLE_AS_IS:
+ case 0 ... 7:
+ /* adgs1409 supports only 4 states */
+ if (idle_state < mux->states) {
+ mux->idle_state = idle_state;
+ break;
+ }
+ /* fall through */
+ default:
+ dev_err(dev, "invalid idle-state %d\n", idle_state);
+ return -EINVAL;
+ }
+
+ return devm_mux_chip_register(dev, mux_chip);
+}
+
+static const struct spi_device_id adgs1408_spi_id[] = {
+ { "adgs1408", ADGS1408 },
+ { "adgs1409", ADGS1409 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adgs1408_spi_id);
+
+static const struct of_device_id adgs1408_of_match[] = {
+ { .compatible = "adi,adgs1408", .data = (void *)ADGS1408, },
+ { .compatible = "adi,adgs1409", .data = (void *)ADGS1409, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adgs1408_of_match);
+
+static struct spi_driver adgs1408_driver = {
+ .driver = {
+ .name = "adgs1408",
+ .of_match_table = of_match_ptr(adgs1408_of_match),
+ },
+ .probe = adgs1408_probe,
+ .id_table = adgs1408_spi_id,
+};
+module_spi_driver(adgs1408_driver);
+
+MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADGS1408 MUX driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 63e3844c5bec..a764a83f99da 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1717,6 +1717,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_upper_unlink;
}
+ bond->nest_level = dev_get_nest_level(bond_dev) + 1;
+
/* If the mode uses primary, then the following is handled by
* bond_change_active_slave().
*/
@@ -1764,7 +1766,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
- bond->nest_level = dev_get_nest_level(bond_dev);
netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
slave_dev->name,
@@ -3415,6 +3416,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
}
}
+static int bond_get_nest_level(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+
+ return bond->nest_level;
+}
+
static void bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
@@ -3423,7 +3431,7 @@ static void bond_get_stats(struct net_device *bond_dev,
struct list_head *iter;
struct slave *slave;
- spin_lock(&bond->stats_lock);
+ spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
memcpy(stats, &bond->bond_stats, sizeof(*stats));
rcu_read_lock();
@@ -4094,7 +4102,8 @@ static inline int bond_slave_override(struct bonding *bond,
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
/* This helper function exists to help dev_pick_tx get the correct
* destination queue. Using a helper function skips a call to
@@ -4227,6 +4236,7 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_neigh_setup = bond_neigh_setup,
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
+ .ndo_get_lock_subclass = bond_get_nest_level,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = bond_netpoll_setup,
.ndo_netpoll_cleanup = bond_netpoll_cleanup,
@@ -4725,6 +4735,7 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
+ bond->nest_level = SINGLE_DEPTH_NESTING;
netdev_lockdep_set_classes(bond_dev);
list_add_tail(&bond->bond_list, &bn->dev_list);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 98663c50ded0..4d5d01cb8141 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option)
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
- netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
- newval->string);
- /* disable arp monitoring */
- bond->params.arp_interval = 0;
- /* set miimon to default value */
- bond->params.miimon = BOND_DEFAULT_MIIMON;
- netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
- bond->params.miimon);
+ if (!bond_mode_uses_arp(newval->value)) {
+ if (bond->params.arp_interval) {
+ netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
+ newval->string);
+ /* disable arp monitoring */
+ bond->params.arp_interval = 0;
+ }
+
+ if (!bond->params.miimon) {
+ /* set miimon to default value */
+ bond->params.miimon = BOND_DEFAULT_MIIMON;
+ netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
+ bond->params.miimon);
+ }
}
if (newval->value == BOND_MODE_ALB)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6096440e96ea..35847250da5a 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -160,14 +160,19 @@ static ssize_t bonding_sysfs_store_option(struct device *d,
{
struct bonding *bond = to_bond(d);
const struct bond_option *opt;
+ char *buffer_clone;
int ret;
opt = bond_opt_get_by_name(attr->attr.name);
if (WARN_ON(!opt))
return -ENOENT;
- ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer);
+ buffer_clone = kstrndup(buffer, count, GFP_KERNEL);
+ if (!buffer_clone)
+ return -ENOMEM;
+ ret = bond_opt_tryset_rtnl(bond, opt->id, buffer_clone);
if (!ret)
ret = count;
+ kfree(buffer_clone);
return ret;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 2cb75988b328..7cdd0cead693 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -73,6 +73,12 @@ config CAN_CALC_BITTIMING
config CAN_LEDS
bool "Enable LED triggers for Netlink based drivers"
depends on LEDS_CLASS
+ # The netdev trigger (LEDS_TRIGGER_NETDEV) should be able to do
+ # everything that this driver is doing. This is marked as broken
+ # because it uses stuff that is intended to be changed or removed.
+ # Please consider switching to the netdev trigger and confirm it
+ # fulfills your needs instead of fixing this driver.
+ depends on BROKEN
select LEDS_TRIGGERS
---help---
This option adds two LED triggers for packet receive and transmit
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index d4dd4da23997..da636a22c542 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 "
static int i82527_compat;
module_param(i82527_compat, int, 0444);
-MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode "
+MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 compatibility mode "
"without using additional functions");
/*
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3c71f1cb205f..49163570a63a 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -649,8 +649,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
- *cf = skb_put(skb, sizeof(struct can_frame));
- memset(*cf, 0, sizeof(struct can_frame));
+ *cf = skb_put_zero(skb, sizeof(struct can_frame));
return skb;
}
@@ -678,8 +677,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
- *cfd = skb_put(skb, sizeof(struct canfd_frame));
- memset(*cfd, 0, sizeof(struct canfd_frame));
+ *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
return skb;
}
@@ -703,7 +701,8 @@ EXPORT_SYMBOL_GPL(alloc_can_err_skb);
/*
* Allocate and setup space for the CAN network device
*/
-struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs)
{
struct net_device *dev;
struct can_priv *priv;
@@ -715,7 +714,8 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
else
size = sizeof_priv;
- dev = alloc_netdev(size, "can%d", NET_NAME_UNKNOWN, can_setup);
+ dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
+ txqs, rxqs);
if (!dev)
return NULL;
@@ -734,7 +734,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
return dev;
}
-EXPORT_SYMBOL_GPL(alloc_candev);
+EXPORT_SYMBOL_GPL(alloc_candev_mqs);
/*
* Free space of the CAN network device
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index d53a45bf2a72..8e972ef08637 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1,24 +1,13 @@
-/*
- * flexcan.c - FLEXCAN CAN controller driver
- *
- * Copyright (c) 2005-2006 Varma Electronics Oy
- * Copyright (c) 2009 Sascha Hauer, Pengutronix
- * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
- * Copyright (c) 2014 David Jander, Protonic Holland
- *
- * Based on code originally by Andrey Volkov <avolkov@varma-el.com>
- *
- * LICENCE:
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// flexcan.c - FLEXCAN CAN controller driver
+//
+// Copyright (c) 2005-2006 Varma Electronics Oy
+// Copyright (c) 2009 Sascha Hauer, Pengutronix
+// Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (c) 2014 David Jander, Protonic Holland
+//
+// Based on code originally by Andrey Volkov <avolkov@varma-el.com>
#include <linux/netdevice.h>
#include <linux/can.h>
@@ -523,7 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
return err;
}
-static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
struct can_frame *cf = (struct can_frame *)skb->data;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index adfdb66a486e..02042cb09bd2 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1684,7 +1684,7 @@ static int ican3_stop(struct net_device *ndev)
return 0;
}
-static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ican3_dev *mod = netdev_priv(ndev);
struct can_frame *cf = (struct can_frame *)skb->data;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index b397a33f3d32..9b449400376b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
int err;
err = pm_runtime_get_sync(priv->device);
- if (err)
+ if (err < 0) {
pm_runtime_put_noidle(priv->device);
+ return err;
+ }
- return err;
+ return 0;
}
static void m_can_clk_stop(struct m_can_priv *priv)
@@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
} else {
/* Version 3.1.x or 3.2.x */
- cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE);
+ cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
+ CCCR_NISO);
/* Only 3.2.x has NISO Bit implemented */
if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
priv->can.clock.freq = clk_get_rate(cclk);
priv->mram_base = mram_addr;
- m_can_of_parse_mram(priv, mram_config_vals);
-
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto clk_disable;
}
+ m_can_of_parse_mram(priv, mram_config_vals);
+
devm_can_led_init(dev);
of_can_transceiver(dev);
@@ -1687,8 +1690,6 @@ failed_ret:
return ret;
}
-/* TODO: runtime PM with power down or sleep mode */
-
static __maybe_unused int m_can_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
@@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- m_can_init_ram(priv);
-
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
@@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
if (ret)
return ret;
+ m_can_init_ram(priv);
m_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c7427bdd3a4b..2949a381a94d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
return 0;
}
cdm = of_iomap(np_cdm, 0);
+ if (!cdm) {
+ of_node_put(np_cdm);
+ dev_err(&ofdev->dev, "can't map clock node!\n");
+ return 0;
+ }
if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index ed8561d4a90f..5696d7e80751 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -486,7 +486,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
if (msg_size <= 0)
break;
- msg_ptr += msg_size;
+ msg_ptr += ALIGN(msg_size, 4);
}
if (msg_size < 0)
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index b9e28578bc7b..c458d5fdc8d3 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
+#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \
+ ((u32)(y) << 16) | \
+ ((u32)(z) << 8))
+
/* System Control Registers Bits */
#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
@@ -170,9 +174,6 @@ struct pciefd_page {
u32 size;
};
-#define CANFD_IRQ_SET 0x00000001
-#define CANFD_TX_PATH_SET 0x00000002
-
/* CAN-FD channel object */
struct pciefd_board;
struct pciefd_can {
@@ -414,7 +415,7 @@ static int pciefd_pre_cmd(struct peak_canfd_priv *ucan)
break;
/* going into operational mode: setup IRQ handler */
- err = request_irq(priv->board->pci_dev->irq,
+ err = request_irq(priv->ucan.ndev->irq,
pciefd_irq_handler,
IRQF_SHARED,
PCIEFD_DRV_NAME,
@@ -487,15 +488,18 @@ static int pciefd_post_cmd(struct peak_canfd_priv *ucan)
/* controller now in reset mode: */
+ /* disable IRQ for this CAN */
+ pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
+ PCIEFD_REG_CAN_RX_CTL_CLR);
+
/* stop and reset DMA addresses in Tx/Rx engines */
pciefd_can_clear_tx_dma(priv);
pciefd_can_clear_rx_dma(priv);
- /* disable IRQ for this CAN */
- pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
- PCIEFD_REG_CAN_RX_CTL_CLR);
+ /* wait for above commands to complete (read cycle) */
+ (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1);
- free_irq(priv->board->pci_dev->irq, priv);
+ free_irq(priv->ucan.ndev->irq, priv);
ucan->can.state = CAN_STATE_STOPPED;
@@ -634,7 +638,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd)
GFP_KERNEL);
if (!priv->tx_dma_vaddr) {
dev_err(&pciefd->pci_dev->dev,
- "Tx dmaim_alloc_coherent(%u) failure\n",
+ "Tx dmam_alloc_coherent(%u) failure\n",
PCIEFD_TX_DMA_SIZE);
goto err_free_candev;
}
@@ -687,7 +691,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd)
pciefd->can[pciefd->can_count] = priv;
dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n",
- ndev->name, priv->reg_base, pciefd->pci_dev->irq);
+ ndev->name, priv->reg_base, ndev->irq);
return 0;
@@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
"%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
hw_ver_major, hw_ver_minor, hw_ver_sub);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
+ * 64-bit logical addresses: this workaround forces usage of 32-bit
+ * DMA addresses only when such a fw is detected.
+ */
+ if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
+ PCIEFD_FW_VERSION(3, 3, 0)) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ dev_warn(&pdev->dev,
+ "warning: can't set DMA mask %llxh (err %d)\n",
+ DMA_BIT_MASK(32), err);
+ }
+#endif
+
/* stop system clock */
pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
PCIEFD_REG_SYS_CTL_CLR);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 5adc95c922ee..a97b81d1d0da 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -608,7 +608,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
writeb(0x00, cfg_base + PITA_GPIOICR);
/* Toggle reset */
writeb(0x05, cfg_base + PITA_MISC + 3);
- mdelay(5);
+ usleep_range(5000, 6000);
/* Leave parport mux mode */
writeb(0x04, cfg_base + PITA_MISC + 3);
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 485b19c9ae47..b8c39ede7cd5 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -530,7 +530,7 @@ static int pcan_add_channels(struct pcan_pccard *card)
pcan_write_reg(card, PCC_CCR, ccr);
/* wait 2ms before unresetting channels */
- mdelay(2);
+ usleep_range(2000, 3000);
ccr &= ~PCC_CCR_RST_ALL;
pcan_write_reg(card, PCC_CCR, ccr);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 1ac2090a1721..093fc9a529f0 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -409,7 +409,7 @@ static int sun4ican_set_mode(struct net_device *dev, enum can_mode mode)
* xx xx xx xx ff ll 00 11 22 33 44 55 66 77
* [ can_id ] [flags] [len] [can data (up to 8 bytes]
*/
-static int sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sun4ican_priv *priv = netdev_priv(dev);
struct can_frame *cf = (struct can_frame *)skb->data;
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index c36f4bdcbf4f..750d04d9e2ae 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -1,6 +1,12 @@
menu "CAN USB interfaces"
depends on USB
+config CAN_8DEV_USB
+ tristate "8 devices USB2CAN interface"
+ ---help---
+ This driver supports the USB2CAN interface
+ from 8 devices (http://www.8devices.com).
+
config CAN_EMS_USB
tristate "EMS CPC-USB/ARM7 CAN/USB interface"
---help---
@@ -26,7 +32,7 @@ config CAN_KVASER_USB
tristate "Kvaser CAN/USB interface"
---help---
This driver adds support for Kvaser CAN/USB devices like Kvaser
- Leaf Light and Kvaser USBcan II.
+ Leaf Light, Kvaser USBcan II and Kvaser Memorator Pro 5xHS.
The driver provides support for the following devices:
- Kvaser Leaf Light
@@ -55,12 +61,30 @@ config CAN_KVASER_USB
- Kvaser Memorator HS/HS
- Kvaser Memorator HS/LS
- Scania VCI2 (if you have the Kvaser logo on top)
+ - Kvaser BlackBird v2
+ - Kvaser Leaf Pro HS v2
+ - Kvaser Hybrid 2xCAN/LIN
+ - Kvaser Hybrid Pro 2xCAN/LIN
+ - Kvaser Memorator 2xHS v2
+ - Kvaser Memorator Pro 2xHS v2
+ - Kvaser Memorator Pro 5xHS
+ - Kvaser USBcan Light 4xHS
+ - Kvaser USBcan Pro 2xHS v2
+ - Kvaser USBcan Pro 5xHS
+ - ATI Memorator Pro 2xHS v2
+ - ATI USBcan Pro 2xHS v2
If unsure, say N.
To compile this driver as a module, choose M here: the
module will be called kvaser_usb.
+config CAN_MCBA_USB
+ tristate "Microchip CAN BUS Analyzer interface"
+ ---help---
+ This driver supports the CAN BUS Analyzer interface
+ from Microchip (http://www.microchip.com/development-tools/).
+
config CAN_PEAK_USB
tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
---help---
@@ -77,16 +101,26 @@ config CAN_PEAK_USB
(see also http://www.peak-system.com).
-config CAN_8DEV_USB
- tristate "8 devices USB2CAN interface"
- ---help---
- This driver supports the USB2CAN interface
- from 8 devices (http://www.8devices.com).
-
config CAN_MCBA_USB
tristate "Microchip CAN BUS Analyzer interface"
---help---
This driver supports the CAN BUS Analyzer interface
from Microchip (http://www.microchip.com/development-tools/).
+config CAN_UCAN
+ tristate "Theobroma Systems UCAN interface"
+ ---help---
+ This driver supports the Theobroma Systems
+ UCAN USB-CAN interface.
+
+ The UCAN driver supports the microcontroller-based USB/CAN
+ adapters from Theobroma Systems. There are two form-factors
+ that run essentially the same firmware:
+
+ * Seal: standalone USB stick
+ https://www.theobroma-systems.com/seal)
+ * Mule: integrated on the PCB of various System-on-Modules
+ from Theobroma Systems like the A31-µQ7 and the RK3399-Q7
+ (https://www.theobroma-systems.com/rk3399-q7)
+
endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 49ac7b99ba32..aa0f17c0b2ed 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -3,10 +3,11 @@
# Makefile for the Linux Controller Area Network USB drivers.
#
+obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
-obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
-obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
-obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
+obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o
+obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
+obj-$(CONFIG_CAN_UCAN) += ucan.o
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 12ff0020ecd6..b7dfd4109d24 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1072,6 +1072,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
usb_free_urb(dev->intr_urb);
kfree(dev->intr_in_buffer);
+ kfree(dev->tx_msg_buffer);
}
}
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
deleted file mode 100644
index daed57d3d209..000000000000
--- a/drivers/net/can/usb/kvaser_usb.c
+++ /dev/null
@@ -1,2085 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * Parts of this driver are based on the following:
- * - Kvaser linux leaf driver (version 4.78)
- * - CAN driver for esd CAN-USB/2
- * - Kvaser linux usbcanII driver (version 5.3)
- *
- * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved.
- * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
- * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
- * Copyright (C) 2015 Valeo S.A.
- */
-
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/completion.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/usb.h>
-
-#include <linux/can.h>
-#include <linux/can/dev.h>
-#include <linux/can/error.h>
-
-#define MAX_RX_URBS 4
-#define START_TIMEOUT 1000 /* msecs */
-#define STOP_TIMEOUT 1000 /* msecs */
-#define USB_SEND_TIMEOUT 1000 /* msecs */
-#define USB_RECV_TIMEOUT 1000 /* msecs */
-#define RX_BUFFER_SIZE 3072
-#define CAN_USB_CLOCK 8000000
-#define MAX_NET_DEVICES 3
-#define MAX_USBCAN_NET_DEVICES 2
-
-/* Kvaser Leaf USB devices */
-#define KVASER_VENDOR_ID 0x0bfd
-#define USB_LEAF_DEVEL_PRODUCT_ID 10
-#define USB_LEAF_LITE_PRODUCT_ID 11
-#define USB_LEAF_PRO_PRODUCT_ID 12
-#define USB_LEAF_SPRO_PRODUCT_ID 14
-#define USB_LEAF_PRO_LS_PRODUCT_ID 15
-#define USB_LEAF_PRO_SWC_PRODUCT_ID 16
-#define USB_LEAF_PRO_LIN_PRODUCT_ID 17
-#define USB_LEAF_SPRO_LS_PRODUCT_ID 18
-#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19
-#define USB_MEMO2_DEVEL_PRODUCT_ID 22
-#define USB_MEMO2_HSHS_PRODUCT_ID 23
-#define USB_UPRO_HSHS_PRODUCT_ID 24
-#define USB_LEAF_LITE_GI_PRODUCT_ID 25
-#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26
-#define USB_MEMO2_HSLS_PRODUCT_ID 27
-#define USB_LEAF_LITE_CH_PRODUCT_ID 28
-#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29
-#define USB_OEM_MERCURY_PRODUCT_ID 34
-#define USB_OEM_LEAF_PRODUCT_ID 35
-#define USB_CAN_R_PRODUCT_ID 39
-#define USB_LEAF_LITE_V2_PRODUCT_ID 288
-#define USB_MINI_PCIE_HS_PRODUCT_ID 289
-#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
-#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
-#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
-
-static inline bool kvaser_is_leaf(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
-}
-
-/* Kvaser USBCan-II devices */
-#define USB_USBCAN_REVB_PRODUCT_ID 2
-#define USB_VCI2_PRODUCT_ID 3
-#define USB_USBCAN2_PRODUCT_ID 4
-#define USB_MEMORATOR_PRODUCT_ID 5
-
-static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
- id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
-}
-
-/* USB devices features */
-#define KVASER_HAS_SILENT_MODE BIT(0)
-#define KVASER_HAS_TXRX_ERRORS BIT(1)
-
-/* Message header size */
-#define MSG_HEADER_LEN 2
-
-/* Can message flags */
-#define MSG_FLAG_ERROR_FRAME BIT(0)
-#define MSG_FLAG_OVERRUN BIT(1)
-#define MSG_FLAG_NERR BIT(2)
-#define MSG_FLAG_WAKEUP BIT(3)
-#define MSG_FLAG_REMOTE_FRAME BIT(4)
-#define MSG_FLAG_RESERVED BIT(5)
-#define MSG_FLAG_TX_ACK BIT(6)
-#define MSG_FLAG_TX_REQUEST BIT(7)
-
-/* Can states (M16C CxSTRH register) */
-#define M16C_STATE_BUS_RESET BIT(0)
-#define M16C_STATE_BUS_ERROR BIT(4)
-#define M16C_STATE_BUS_PASSIVE BIT(5)
-#define M16C_STATE_BUS_OFF BIT(6)
-
-/* Can msg ids */
-#define CMD_RX_STD_MESSAGE 12
-#define CMD_TX_STD_MESSAGE 13
-#define CMD_RX_EXT_MESSAGE 14
-#define CMD_TX_EXT_MESSAGE 15
-#define CMD_SET_BUS_PARAMS 16
-#define CMD_GET_BUS_PARAMS 17
-#define CMD_GET_BUS_PARAMS_REPLY 18
-#define CMD_GET_CHIP_STATE 19
-#define CMD_CHIP_STATE_EVENT 20
-#define CMD_SET_CTRL_MODE 21
-#define CMD_GET_CTRL_MODE 22
-#define CMD_GET_CTRL_MODE_REPLY 23
-#define CMD_RESET_CHIP 24
-#define CMD_RESET_CARD 25
-#define CMD_START_CHIP 26
-#define CMD_START_CHIP_REPLY 27
-#define CMD_STOP_CHIP 28
-#define CMD_STOP_CHIP_REPLY 29
-
-#define CMD_LEAF_GET_CARD_INFO2 32
-#define CMD_USBCAN_RESET_CLOCK 32
-#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33
-
-#define CMD_GET_CARD_INFO 34
-#define CMD_GET_CARD_INFO_REPLY 35
-#define CMD_GET_SOFTWARE_INFO 38
-#define CMD_GET_SOFTWARE_INFO_REPLY 39
-#define CMD_ERROR_EVENT 45
-#define CMD_FLUSH_QUEUE 48
-#define CMD_RESET_ERROR_COUNTER 49
-#define CMD_TX_ACKNOWLEDGE 50
-#define CMD_CAN_ERROR_EVENT 51
-#define CMD_FLUSH_QUEUE_REPLY 68
-
-#define CMD_LEAF_USB_THROTTLE 77
-#define CMD_LEAF_LOG_MESSAGE 106
-
-/* error factors */
-#define M16C_EF_ACKE BIT(0)
-#define M16C_EF_CRCE BIT(1)
-#define M16C_EF_FORME BIT(2)
-#define M16C_EF_STFE BIT(3)
-#define M16C_EF_BITE0 BIT(4)
-#define M16C_EF_BITE1 BIT(5)
-#define M16C_EF_RCVE BIT(6)
-#define M16C_EF_TRE BIT(7)
-
-/* Only Leaf-based devices can report M16C error factors,
- * thus define our own error status flags for USBCANII
- */
-#define USBCAN_ERROR_STATE_NONE 0
-#define USBCAN_ERROR_STATE_TX_ERROR BIT(0)
-#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
-#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
-
-/* bittiming parameters */
-#define KVASER_USB_TSEG1_MIN 1
-#define KVASER_USB_TSEG1_MAX 16
-#define KVASER_USB_TSEG2_MIN 1
-#define KVASER_USB_TSEG2_MAX 8
-#define KVASER_USB_SJW_MAX 4
-#define KVASER_USB_BRP_MIN 1
-#define KVASER_USB_BRP_MAX 64
-#define KVASER_USB_BRP_INC 1
-
-/* ctrl modes */
-#define KVASER_CTRL_MODE_NORMAL 1
-#define KVASER_CTRL_MODE_SILENT 2
-#define KVASER_CTRL_MODE_SELFRECEPTION 3
-#define KVASER_CTRL_MODE_OFF 4
-
-/* Extended CAN identifier flag */
-#define KVASER_EXTENDED_FRAME BIT(31)
-
-/* Kvaser USB CAN dongles are divided into two major families:
- * - Leaf: Based on Renesas M32C, running firmware labeled as 'filo'
- * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios'
- */
-enum kvaser_usb_family {
- KVASER_LEAF,
- KVASER_USBCAN,
-};
-
-struct kvaser_msg_simple {
- u8 tid;
- u8 channel;
-} __packed;
-
-struct kvaser_msg_cardinfo {
- u8 tid;
- u8 nchannels;
- union {
- struct {
- __le32 serial_number;
- __le32 padding;
- } __packed leaf0;
- struct {
- __le32 serial_number_low;
- __le32 serial_number_high;
- } __packed usbcan0;
- } __packed;
- __le32 clock_resolution;
- __le32 mfgdate;
- u8 ean[8];
- u8 hw_revision;
- union {
- struct {
- u8 usb_hs_mode;
- } __packed leaf1;
- struct {
- u8 padding;
- } __packed usbcan1;
- } __packed;
- __le16 padding;
-} __packed;
-
-struct kvaser_msg_cardinfo2 {
- u8 tid;
- u8 reserved;
- u8 pcb_id[24];
- __le32 oem_unlock_code;
-} __packed;
-
-struct leaf_msg_softinfo {
- u8 tid;
- u8 padding0;
- __le32 sw_options;
- __le32 fw_version;
- __le16 max_outstanding_tx;
- __le16 padding1[9];
-} __packed;
-
-struct usbcan_msg_softinfo {
- u8 tid;
- u8 fw_name[5];
- __le16 max_outstanding_tx;
- u8 padding[6];
- __le32 fw_version;
- __le16 checksum;
- __le16 sw_options;
-} __packed;
-
-struct kvaser_msg_busparams {
- u8 tid;
- u8 channel;
- __le32 bitrate;
- u8 tseg1;
- u8 tseg2;
- u8 sjw;
- u8 no_samp;
-} __packed;
-
-struct kvaser_msg_tx_can {
- u8 channel;
- u8 tid;
- u8 msg[14];
- union {
- struct {
- u8 padding;
- u8 flags;
- } __packed leaf;
- struct {
- u8 flags;
- u8 padding;
- } __packed usbcan;
- } __packed;
-} __packed;
-
-struct kvaser_msg_rx_can_header {
- u8 channel;
- u8 flag;
-} __packed;
-
-struct leaf_msg_rx_can {
- u8 channel;
- u8 flag;
-
- __le16 time[3];
- u8 msg[14];
-} __packed;
-
-struct usbcan_msg_rx_can {
- u8 channel;
- u8 flag;
-
- u8 msg[14];
- __le16 time;
-} __packed;
-
-struct leaf_msg_chip_state_event {
- u8 tid;
- u8 channel;
-
- __le16 time[3];
- u8 tx_errors_count;
- u8 rx_errors_count;
-
- u8 status;
- u8 padding[3];
-} __packed;
-
-struct usbcan_msg_chip_state_event {
- u8 tid;
- u8 channel;
-
- u8 tx_errors_count;
- u8 rx_errors_count;
- __le16 time;
-
- u8 status;
- u8 padding[3];
-} __packed;
-
-struct kvaser_msg_tx_acknowledge_header {
- u8 channel;
- u8 tid;
-} __packed;
-
-struct leaf_msg_tx_acknowledge {
- u8 channel;
- u8 tid;
-
- __le16 time[3];
- u8 flags;
- u8 time_offset;
-} __packed;
-
-struct usbcan_msg_tx_acknowledge {
- u8 channel;
- u8 tid;
-
- __le16 time;
- __le16 padding;
-} __packed;
-
-struct leaf_msg_error_event {
- u8 tid;
- u8 flags;
- __le16 time[3];
- u8 channel;
- u8 padding;
- u8 tx_errors_count;
- u8 rx_errors_count;
- u8 status;
- u8 error_factor;
-} __packed;
-
-struct usbcan_msg_error_event {
- u8 tid;
- u8 padding;
- u8 tx_errors_count_ch0;
- u8 rx_errors_count_ch0;
- u8 tx_errors_count_ch1;
- u8 rx_errors_count_ch1;
- u8 status_ch0;
- u8 status_ch1;
- __le16 time;
-} __packed;
-
-struct kvaser_msg_ctrl_mode {
- u8 tid;
- u8 channel;
- u8 ctrl_mode;
- u8 padding[3];
-} __packed;
-
-struct kvaser_msg_flush_queue {
- u8 tid;
- u8 channel;
- u8 flags;
- u8 padding[3];
-} __packed;
-
-struct leaf_msg_log_message {
- u8 channel;
- u8 flags;
- __le16 time[3];
- u8 dlc;
- u8 time_offset;
- __le32 id;
- u8 data[8];
-} __packed;
-
-struct kvaser_msg {
- u8 len;
- u8 id;
- union {
- struct kvaser_msg_simple simple;
- struct kvaser_msg_cardinfo cardinfo;
- struct kvaser_msg_cardinfo2 cardinfo2;
- struct kvaser_msg_busparams busparams;
-
- struct kvaser_msg_rx_can_header rx_can_header;
- struct kvaser_msg_tx_acknowledge_header tx_acknowledge_header;
-
- union {
- struct leaf_msg_softinfo softinfo;
- struct leaf_msg_rx_can rx_can;
- struct leaf_msg_chip_state_event chip_state_event;
- struct leaf_msg_tx_acknowledge tx_acknowledge;
- struct leaf_msg_error_event error_event;
- struct leaf_msg_log_message log_message;
- } __packed leaf;
-
- union {
- struct usbcan_msg_softinfo softinfo;
- struct usbcan_msg_rx_can rx_can;
- struct usbcan_msg_chip_state_event chip_state_event;
- struct usbcan_msg_tx_acknowledge tx_acknowledge;
- struct usbcan_msg_error_event error_event;
- } __packed usbcan;
-
- struct kvaser_msg_tx_can tx_can;
- struct kvaser_msg_ctrl_mode ctrl_mode;
- struct kvaser_msg_flush_queue flush_queue;
- } u;
-} __packed;
-
-/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
- * handling. Some discrepancies between the two families exist:
- *
- * - USBCAN firmware does not report M16C "error factors"
- * - USBCAN controllers has difficulties reporting if the raised error
- * event is for ch0 or ch1. They leave such arbitration to the OS
- * driver by letting it compare error counters with previous values
- * and decide the error event's channel. Thus for USBCAN, the channel
- * field is only advisory.
- */
-struct kvaser_usb_error_summary {
- u8 channel, status, txerr, rxerr;
- union {
- struct {
- u8 error_factor;
- } leaf;
- struct {
- u8 other_ch_status;
- u8 error_state;
- } usbcan;
- };
-};
-
-/* Context for an outstanding, not yet ACKed, transmission */
-struct kvaser_usb_tx_urb_context {
- struct kvaser_usb_net_priv *priv;
- u32 echo_index;
- int dlc;
-};
-
-struct kvaser_usb {
- struct usb_device *udev;
- struct kvaser_usb_net_priv *nets[MAX_NET_DEVICES];
-
- struct usb_endpoint_descriptor *bulk_in, *bulk_out;
- struct usb_anchor rx_submitted;
-
- /* @max_tx_urbs: Firmware-reported maximum number of outstanding,
- * not yet ACKed, transmissions on this device. This value is
- * also used as a sentinel for marking free tx contexts.
- */
- u32 fw_version;
- unsigned int nchannels;
- unsigned int max_tx_urbs;
- enum kvaser_usb_family family;
-
- bool rxinitdone;
- void *rxbuf[MAX_RX_URBS];
- dma_addr_t rxbuf_dma[MAX_RX_URBS];
-};
-
-struct kvaser_usb_net_priv {
- struct can_priv can;
- struct can_berr_counter bec;
-
- struct kvaser_usb *dev;
- struct net_device *netdev;
- int channel;
-
- struct completion start_comp, stop_comp;
- struct usb_anchor tx_submitted;
-
- spinlock_t tx_contexts_lock;
- int active_tx_contexts;
- struct kvaser_usb_tx_urb_context tx_contexts[];
-};
-
-static const struct usb_device_id kvaser_usb_table[] = {
- /* Leaf family IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
-
- /* USBCANII family IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
-
- { }
-};
-MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
-
-static inline int kvaser_usb_send_msg(const struct kvaser_usb *dev,
- struct kvaser_msg *msg)
-{
- int actual_len;
-
- return usb_bulk_msg(dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->bulk_out->bEndpointAddress),
- msg, msg->len, &actual_len,
- USB_SEND_TIMEOUT);
-}
-
-static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
- struct kvaser_msg *msg)
-{
- struct kvaser_msg *tmp;
- void *buf;
- int actual_len;
- int err;
- int pos;
- unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT);
-
- buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- do {
- err = usb_bulk_msg(dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- buf, RX_BUFFER_SIZE, &actual_len,
- USB_RECV_TIMEOUT);
- if (err < 0)
- goto end;
-
- pos = 0;
- while (pos <= actual_len - MSG_HEADER_LEN) {
- tmp = buf + pos;
-
- /* Handle messages crossing the USB endpoint max packet
- * size boundary. Check kvaser_usb_read_bulk_callback()
- * for further details.
- */
- if (tmp->len == 0) {
- pos = round_up(pos, le16_to_cpu(dev->bulk_in->
- wMaxPacketSize));
- continue;
- }
-
- if (pos + tmp->len > actual_len) {
- dev_err_ratelimited(dev->udev->dev.parent,
- "Format error\n");
- break;
- }
-
- if (tmp->id == id) {
- memcpy(msg, tmp, tmp->len);
- goto end;
- }
-
- pos += tmp->len;
- }
- } while (time_before(jiffies, to));
-
- err = -EINVAL;
-
-end:
- kfree(buf);
-
- return err;
-}
-
-static int kvaser_usb_send_simple_msg(const struct kvaser_usb *dev,
- u8 msg_id, int channel)
-{
- struct kvaser_msg *msg;
- int rc;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = msg_id;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
- msg->u.simple.channel = channel;
- msg->u.simple.tid = 0xff;
-
- rc = kvaser_usb_send_msg(dev, msg);
-
- kfree(msg);
- return rc;
-}
-
-static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
-{
- struct kvaser_msg msg;
- int err;
-
- err = kvaser_usb_send_simple_msg(dev, CMD_GET_SOFTWARE_INFO, 0);
- if (err)
- return err;
-
- err = kvaser_usb_wait_msg(dev, CMD_GET_SOFTWARE_INFO_REPLY, &msg);
- if (err)
- return err;
-
- switch (dev->family) {
- case KVASER_LEAF:
- dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
- dev->max_tx_urbs =
- le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
- break;
- case KVASER_USBCAN:
- dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
- dev->max_tx_urbs =
- le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
- break;
- }
-
- return 0;
-}
-
-static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
-{
- struct kvaser_msg msg;
- int err;
-
- err = kvaser_usb_send_simple_msg(dev, CMD_GET_CARD_INFO, 0);
- if (err)
- return err;
-
- err = kvaser_usb_wait_msg(dev, CMD_GET_CARD_INFO_REPLY, &msg);
- if (err)
- return err;
-
- dev->nchannels = msg.u.cardinfo.nchannels;
- if ((dev->nchannels > MAX_NET_DEVICES) ||
- (dev->family == KVASER_USBCAN &&
- dev->nchannels > MAX_USBCAN_NET_DEVICES))
- return -EINVAL;
-
- return 0;
-}
-
-static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct net_device_stats *stats;
- struct kvaser_usb_tx_urb_context *context;
- struct kvaser_usb_net_priv *priv;
- struct sk_buff *skb;
- struct can_frame *cf;
- unsigned long flags;
- u8 channel, tid;
-
- channel = msg->u.tx_acknowledge_header.channel;
- tid = msg->u.tx_acknowledge_header.tid;
-
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
-
- if (!netif_device_present(priv->netdev))
- return;
-
- stats = &priv->netdev->stats;
-
- context = &priv->tx_contexts[tid % dev->max_tx_urbs];
-
- /* Sometimes the state change doesn't come after a bus-off event */
- if (priv->can.restart_ms &&
- (priv->can.state >= CAN_STATE_BUS_OFF)) {
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (skb) {
- cf->can_id |= CAN_ERR_RESTARTED;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
- } else {
- netdev_err(priv->netdev,
- "No memory left for err_skb\n");
- }
-
- priv->can.can_stats.restarts++;
- netif_carrier_on(priv->netdev);
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
- }
-
- stats->tx_packets++;
- stats->tx_bytes += context->dlc;
-
- spin_lock_irqsave(&priv->tx_contexts_lock, flags);
-
- can_get_echo_skb(priv->netdev, context->echo_index);
- context->echo_index = dev->max_tx_urbs;
- --priv->active_tx_contexts;
- netif_wake_queue(priv->netdev);
-
- spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
-}
-
-static void kvaser_usb_simple_msg_callback(struct urb *urb)
-{
- struct net_device *netdev = urb->context;
-
- kfree(urb->transfer_buffer);
-
- if (urb->status)
- netdev_warn(netdev, "urb status received: %d\n",
- urb->status);
-}
-
-static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
- u8 msg_id)
-{
- struct kvaser_usb *dev = priv->dev;
- struct net_device *netdev = priv->netdev;
- struct kvaser_msg *msg;
- struct urb *urb;
- void *buf;
- int err;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
-
- buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
- if (!buf) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- msg = (struct kvaser_msg *)buf;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
- msg->id = msg_id;
- msg->u.simple.channel = priv->channel;
-
- usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->bulk_out->bEndpointAddress),
- buf, msg->len,
- kvaser_usb_simple_msg_callback, netdev);
- usb_anchor_urb(urb, &priv->tx_submitted);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err) {
- netdev_err(netdev, "Error transmitting URB\n");
- usb_unanchor_urb(urb);
- kfree(buf);
- usb_free_urb(urb);
- return err;
- }
-
- usb_free_urb(urb);
-
- return 0;
-}
-
-static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
- const struct kvaser_usb_error_summary *es,
- struct can_frame *cf)
-{
- struct kvaser_usb *dev = priv->dev;
- struct net_device_stats *stats = &priv->netdev->stats;
- enum can_state cur_state, new_state, tx_state, rx_state;
-
- netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status);
-
- new_state = cur_state = priv->can.state;
-
- if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET))
- new_state = CAN_STATE_BUS_OFF;
- else if (es->status & M16C_STATE_BUS_PASSIVE)
- new_state = CAN_STATE_ERROR_PASSIVE;
- else if (es->status & M16C_STATE_BUS_ERROR) {
- /* Guard against spurious error events after a busoff */
- if (cur_state < CAN_STATE_BUS_OFF) {
- if ((es->txerr >= 128) || (es->rxerr >= 128))
- new_state = CAN_STATE_ERROR_PASSIVE;
- else if ((es->txerr >= 96) || (es->rxerr >= 96))
- new_state = CAN_STATE_ERROR_WARNING;
- else if (cur_state > CAN_STATE_ERROR_ACTIVE)
- new_state = CAN_STATE_ERROR_ACTIVE;
- }
- }
-
- if (!es->status)
- new_state = CAN_STATE_ERROR_ACTIVE;
-
- if (new_state != cur_state) {
- tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
- rx_state = (es->txerr <= es->rxerr) ? new_state : 0;
-
- can_change_state(priv->netdev, cf, tx_state, rx_state);
- }
-
- if (priv->can.restart_ms &&
- (cur_state >= CAN_STATE_BUS_OFF) &&
- (new_state < CAN_STATE_BUS_OFF)) {
- priv->can.can_stats.restarts++;
- }
-
- switch (dev->family) {
- case KVASER_LEAF:
- if (es->leaf.error_factor) {
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- }
- break;
- case KVASER_USBCAN:
- if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR)
- stats->tx_errors++;
- if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR)
- stats->rx_errors++;
- if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
- priv->can.can_stats.bus_error++;
- }
- break;
- }
-
- priv->bec.txerr = es->txerr;
- priv->bec.rxerr = es->rxerr;
-}
-
-static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
- const struct kvaser_usb_error_summary *es)
-{
- struct can_frame *cf, tmp_cf = { .can_id = CAN_ERR_FLAG, .can_dlc = CAN_ERR_DLC };
- struct sk_buff *skb;
- struct net_device_stats *stats;
- struct kvaser_usb_net_priv *priv;
- enum can_state old_state, new_state;
-
- if (es->channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", es->channel);
- return;
- }
-
- priv = dev->nets[es->channel];
- stats = &priv->netdev->stats;
-
- /* Update all of the can interface's state and error counters before
- * trying any memory allocation that can actually fail with -ENOMEM.
- *
- * We send a temporary stack-allocated error can frame to
- * can_change_state() for the very same reason.
- *
- * TODO: Split can_change_state() responsibility between updating the
- * can interface's state and counters, and the setting up of can error
- * frame ID and data to userspace. Remove stack allocation afterwards.
- */
- old_state = priv->can.state;
- kvaser_usb_rx_error_update_can_state(priv, es, &tmp_cf);
- new_state = priv->can.state;
-
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
- memcpy(cf, &tmp_cf, sizeof(*cf));
-
- if (new_state != old_state) {
- if (es->status &
- (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
- if (!priv->can.restart_ms)
- kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
- netif_carrier_off(priv->netdev);
- }
-
- if (priv->can.restart_ms &&
- (old_state >= CAN_STATE_BUS_OFF) &&
- (new_state < CAN_STATE_BUS_OFF)) {
- cf->can_id |= CAN_ERR_RESTARTED;
- netif_carrier_on(priv->netdev);
- }
- }
-
- switch (dev->family) {
- case KVASER_LEAF:
- if (es->leaf.error_factor) {
- cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
-
- if (es->leaf.error_factor & M16C_EF_ACKE)
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
- if (es->leaf.error_factor & M16C_EF_CRCE)
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
- if (es->leaf.error_factor & M16C_EF_FORME)
- cf->data[2] |= CAN_ERR_PROT_FORM;
- if (es->leaf.error_factor & M16C_EF_STFE)
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- if (es->leaf.error_factor & M16C_EF_BITE0)
- cf->data[2] |= CAN_ERR_PROT_BIT0;
- if (es->leaf.error_factor & M16C_EF_BITE1)
- cf->data[2] |= CAN_ERR_PROT_BIT1;
- if (es->leaf.error_factor & M16C_EF_TRE)
- cf->data[2] |= CAN_ERR_PROT_TX;
- }
- break;
- case KVASER_USBCAN:
- if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
- cf->can_id |= CAN_ERR_BUSERROR;
- }
- break;
- }
-
- cf->data[6] = es->txerr;
- cf->data[7] = es->rxerr;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
-}
-
-/* For USBCAN, report error to userspace iff the channels's errors counter
- * has changed, or we're the only channel seeing a bus error state.
- */
-static void kvaser_usbcan_conditionally_rx_error(const struct kvaser_usb *dev,
- struct kvaser_usb_error_summary *es)
-{
- struct kvaser_usb_net_priv *priv;
- int channel;
- bool report_error;
-
- channel = es->channel;
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
- report_error = false;
-
- if (es->txerr != priv->bec.txerr) {
- es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR;
- report_error = true;
- }
- if (es->rxerr != priv->bec.rxerr) {
- es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR;
- report_error = true;
- }
- if ((es->status & M16C_STATE_BUS_ERROR) &&
- !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) {
- es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR;
- report_error = true;
- }
-
- if (report_error)
- kvaser_usb_rx_error(dev, es);
-}
-
-static void kvaser_usbcan_rx_error(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_error_summary es = { };
-
- switch (msg->id) {
- /* Sometimes errors are sent as unsolicited chip state events */
- case CMD_CHIP_STATE_EVENT:
- es.channel = msg->u.usbcan.chip_state_event.channel;
- es.status = msg->u.usbcan.chip_state_event.status;
- es.txerr = msg->u.usbcan.chip_state_event.tx_errors_count;
- es.rxerr = msg->u.usbcan.chip_state_event.rx_errors_count;
- kvaser_usbcan_conditionally_rx_error(dev, &es);
- break;
-
- case CMD_CAN_ERROR_EVENT:
- es.channel = 0;
- es.status = msg->u.usbcan.error_event.status_ch0;
- es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch0;
- es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch0;
- es.usbcan.other_ch_status =
- msg->u.usbcan.error_event.status_ch1;
- kvaser_usbcan_conditionally_rx_error(dev, &es);
-
- /* The USBCAN firmware supports up to 2 channels.
- * Now that ch0 was checked, check if ch1 has any errors.
- */
- if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
- es.channel = 1;
- es.status = msg->u.usbcan.error_event.status_ch1;
- es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch1;
- es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch1;
- es.usbcan.other_ch_status =
- msg->u.usbcan.error_event.status_ch0;
- kvaser_usbcan_conditionally_rx_error(dev, &es);
- }
- break;
-
- default:
- dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
- msg->id);
- }
-}
-
-static void kvaser_leaf_rx_error(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_error_summary es = { };
-
- switch (msg->id) {
- case CMD_CAN_ERROR_EVENT:
- es.channel = msg->u.leaf.error_event.channel;
- es.status = msg->u.leaf.error_event.status;
- es.txerr = msg->u.leaf.error_event.tx_errors_count;
- es.rxerr = msg->u.leaf.error_event.rx_errors_count;
- es.leaf.error_factor = msg->u.leaf.error_event.error_factor;
- break;
- case CMD_LEAF_LOG_MESSAGE:
- es.channel = msg->u.leaf.log_message.channel;
- es.status = msg->u.leaf.log_message.data[0];
- es.txerr = msg->u.leaf.log_message.data[2];
- es.rxerr = msg->u.leaf.log_message.data[3];
- es.leaf.error_factor = msg->u.leaf.log_message.data[1];
- break;
- case CMD_CHIP_STATE_EVENT:
- es.channel = msg->u.leaf.chip_state_event.channel;
- es.status = msg->u.leaf.chip_state_event.status;
- es.txerr = msg->u.leaf.chip_state_event.tx_errors_count;
- es.rxerr = msg->u.leaf.chip_state_event.rx_errors_count;
- es.leaf.error_factor = 0;
- break;
- default:
- dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
- msg->id);
- return;
- }
-
- kvaser_usb_rx_error(dev, &es);
-}
-
-static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
- const struct kvaser_msg *msg)
-{
- struct can_frame *cf;
- struct sk_buff *skb;
- struct net_device_stats *stats = &priv->netdev->stats;
-
- if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
- MSG_FLAG_NERR)) {
- netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
- msg->u.rx_can_header.flag);
-
- stats->rx_errors++;
- return;
- }
-
- if (msg->u.rx_can_header.flag & MSG_FLAG_OVERRUN) {
- stats->rx_over_errors++;
- stats->rx_errors++;
-
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
- }
-}
-
-static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_net_priv *priv;
- struct can_frame *cf;
- struct sk_buff *skb;
- struct net_device_stats *stats;
- u8 channel = msg->u.rx_can_header.channel;
- const u8 *rx_msg = NULL; /* GCC */
-
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
- stats = &priv->netdev->stats;
-
- if ((msg->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
- (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE)) {
- kvaser_leaf_rx_error(dev, msg);
- return;
- } else if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
- MSG_FLAG_NERR |
- MSG_FLAG_OVERRUN)) {
- kvaser_usb_rx_can_err(priv, msg);
- return;
- } else if (msg->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) {
- netdev_warn(priv->netdev,
- "Unhandled frame (flags: 0x%02x)",
- msg->u.rx_can_header.flag);
- return;
- }
-
- switch (dev->family) {
- case KVASER_LEAF:
- rx_msg = msg->u.leaf.rx_can.msg;
- break;
- case KVASER_USBCAN:
- rx_msg = msg->u.usbcan.rx_can.msg;
- break;
- }
-
- skb = alloc_can_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
-
- if (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE) {
- cf->can_id = le32_to_cpu(msg->u.leaf.log_message.id);
- if (cf->can_id & KVASER_EXTENDED_FRAME)
- cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
- else
- cf->can_id &= CAN_SFF_MASK;
-
- cf->can_dlc = get_can_dlc(msg->u.leaf.log_message.dlc);
-
- if (msg->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
- cf->can_id |= CAN_RTR_FLAG;
- else
- memcpy(cf->data, &msg->u.leaf.log_message.data,
- cf->can_dlc);
- } else {
- cf->can_id = ((rx_msg[0] & 0x1f) << 6) | (rx_msg[1] & 0x3f);
-
- if (msg->id == CMD_RX_EXT_MESSAGE) {
- cf->can_id <<= 18;
- cf->can_id |= ((rx_msg[2] & 0x0f) << 14) |
- ((rx_msg[3] & 0xff) << 6) |
- (rx_msg[4] & 0x3f);
- cf->can_id |= CAN_EFF_FLAG;
- }
-
- cf->can_dlc = get_can_dlc(rx_msg[5]);
-
- if (msg->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
- cf->can_id |= CAN_RTR_FLAG;
- else
- memcpy(cf->data, &rx_msg[6],
- cf->can_dlc);
- }
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
-}
-
-static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_net_priv *priv;
- u8 channel = msg->u.simple.channel;
-
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
-
- if (completion_done(&priv->start_comp) &&
- netif_queue_stopped(priv->netdev)) {
- netif_wake_queue(priv->netdev);
- } else {
- netif_start_queue(priv->netdev);
- complete(&priv->start_comp);
- }
-}
-
-static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_net_priv *priv;
- u8 channel = msg->u.simple.channel;
-
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
-
- complete(&priv->stop_comp);
-}
-
-static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- switch (msg->id) {
- case CMD_START_CHIP_REPLY:
- kvaser_usb_start_chip_reply(dev, msg);
- break;
-
- case CMD_STOP_CHIP_REPLY:
- kvaser_usb_stop_chip_reply(dev, msg);
- break;
-
- case CMD_RX_STD_MESSAGE:
- case CMD_RX_EXT_MESSAGE:
- kvaser_usb_rx_can_msg(dev, msg);
- break;
-
- case CMD_LEAF_LOG_MESSAGE:
- if (dev->family != KVASER_LEAF)
- goto warn;
- kvaser_usb_rx_can_msg(dev, msg);
- break;
-
- case CMD_CHIP_STATE_EVENT:
- case CMD_CAN_ERROR_EVENT:
- if (dev->family == KVASER_LEAF)
- kvaser_leaf_rx_error(dev, msg);
- else
- kvaser_usbcan_rx_error(dev, msg);
- break;
-
- case CMD_TX_ACKNOWLEDGE:
- kvaser_usb_tx_acknowledge(dev, msg);
- break;
-
- /* Ignored messages */
- case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
- if (dev->family != KVASER_USBCAN)
- goto warn;
- break;
-
- case CMD_FLUSH_QUEUE_REPLY:
- if (dev->family != KVASER_LEAF)
- goto warn;
- break;
-
- default:
-warn: dev_warn(dev->udev->dev.parent,
- "Unhandled message (%d)\n", msg->id);
- break;
- }
-}
-
-static void kvaser_usb_read_bulk_callback(struct urb *urb)
-{
- struct kvaser_usb *dev = urb->context;
- struct kvaser_msg *msg;
- int pos = 0;
- int err, i;
-
- switch (urb->status) {
- case 0:
- break;
- case -ENOENT:
- case -EPIPE:
- case -EPROTO:
- case -ESHUTDOWN:
- return;
- default:
- dev_info(dev->udev->dev.parent, "Rx URB aborted (%d)\n",
- urb->status);
- goto resubmit_urb;
- }
-
- while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
- msg = urb->transfer_buffer + pos;
-
- /* The Kvaser firmware can only read and write messages that
- * does not cross the USB's endpoint wMaxPacketSize boundary.
- * If a follow-up command crosses such boundary, firmware puts
- * a placeholder zero-length command in its place then aligns
- * the real command to the next max packet size.
- *
- * Handle such cases or we're going to miss a significant
- * number of events in case of a heavy rx load on the bus.
- */
- if (msg->len == 0) {
- pos = round_up(pos, le16_to_cpu(dev->bulk_in->
- wMaxPacketSize));
- continue;
- }
-
- if (pos + msg->len > urb->actual_length) {
- dev_err_ratelimited(dev->udev->dev.parent,
- "Format error\n");
- break;
- }
-
- kvaser_usb_handle_message(dev, msg);
- pos += msg->len;
- }
-
-resubmit_urb:
- usb_fill_bulk_urb(urb, dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- urb->transfer_buffer, RX_BUFFER_SIZE,
- kvaser_usb_read_bulk_callback, dev);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err == -ENODEV) {
- for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
- continue;
-
- netif_device_detach(dev->nets[i]->netdev);
- }
- } else if (err) {
- dev_err(dev->udev->dev.parent,
- "Failed resubmitting read bulk urb: %d\n", err);
- }
-
- return;
-}
-
-static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
-{
- int i, err = 0;
-
- if (dev->rxinitdone)
- return 0;
-
- for (i = 0; i < MAX_RX_URBS; i++) {
- struct urb *urb = NULL;
- u8 *buf = NULL;
- dma_addr_t buf_dma;
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- err = -ENOMEM;
- break;
- }
-
- buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE,
- GFP_KERNEL, &buf_dma);
- if (!buf) {
- dev_warn(dev->udev->dev.parent,
- "No memory left for USB buffer\n");
- usb_free_urb(urb);
- err = -ENOMEM;
- break;
- }
-
- usb_fill_bulk_urb(urb, dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- buf, RX_BUFFER_SIZE,
- kvaser_usb_read_bulk_callback,
- dev);
- urb->transfer_dma = buf_dma;
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- usb_anchor_urb(urb, &dev->rx_submitted);
-
- err = usb_submit_urb(urb, GFP_KERNEL);
- if (err) {
- usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
- buf_dma);
- usb_free_urb(urb);
- break;
- }
-
- dev->rxbuf[i] = buf;
- dev->rxbuf_dma[i] = buf_dma;
-
- usb_free_urb(urb);
- }
-
- if (i == 0) {
- dev_warn(dev->udev->dev.parent,
- "Cannot setup read URBs, error %d\n", err);
- return err;
- } else if (i < MAX_RX_URBS) {
- dev_warn(dev->udev->dev.parent,
- "RX performances may be slow\n");
- }
-
- dev->rxinitdone = true;
-
- return 0;
-}
-
-static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv)
-{
- struct kvaser_msg *msg;
- int rc;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = CMD_SET_CTRL_MODE;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_ctrl_mode);
- msg->u.ctrl_mode.tid = 0xff;
- msg->u.ctrl_mode.channel = priv->channel;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
- else
- msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
-
- rc = kvaser_usb_send_msg(priv->dev, msg);
-
- kfree(msg);
- return rc;
-}
-
-static int kvaser_usb_start_chip(struct kvaser_usb_net_priv *priv)
-{
- int err;
-
- init_completion(&priv->start_comp);
-
- err = kvaser_usb_send_simple_msg(priv->dev, CMD_START_CHIP,
- priv->channel);
- if (err)
- return err;
-
- if (!wait_for_completion_timeout(&priv->start_comp,
- msecs_to_jiffies(START_TIMEOUT)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int kvaser_usb_open(struct net_device *netdev)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct kvaser_usb *dev = priv->dev;
- int err;
-
- err = open_candev(netdev);
- if (err)
- return err;
-
- err = kvaser_usb_setup_rx_urbs(dev);
- if (err)
- goto error;
-
- err = kvaser_usb_set_opt_mode(priv);
- if (err)
- goto error;
-
- err = kvaser_usb_start_chip(priv);
- if (err) {
- netdev_warn(netdev, "Cannot start device, error %d\n", err);
- goto error;
- }
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
- return 0;
-
-error:
- close_candev(netdev);
- return err;
-}
-
-static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
-{
- int i, max_tx_urbs;
-
- max_tx_urbs = priv->dev->max_tx_urbs;
-
- priv->active_tx_contexts = 0;
- for (i = 0; i < max_tx_urbs; i++)
- priv->tx_contexts[i].echo_index = max_tx_urbs;
-}
-
-/* This method might sleep. Do not call it in the atomic context
- * of URB completions.
- */
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
-{
- usb_kill_anchored_urbs(&priv->tx_submitted);
- kvaser_usb_reset_tx_urb_contexts(priv);
-}
-
-static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
-{
- int i;
-
- usb_kill_anchored_urbs(&dev->rx_submitted);
-
- for (i = 0; i < MAX_RX_URBS; i++)
- usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
- dev->rxbuf[i],
- dev->rxbuf_dma[i]);
-
- for (i = 0; i < dev->nchannels; i++) {
- struct kvaser_usb_net_priv *priv = dev->nets[i];
-
- if (priv)
- kvaser_usb_unlink_tx_urbs(priv);
- }
-}
-
-static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv)
-{
- int err;
-
- init_completion(&priv->stop_comp);
-
- err = kvaser_usb_send_simple_msg(priv->dev, CMD_STOP_CHIP,
- priv->channel);
- if (err)
- return err;
-
- if (!wait_for_completion_timeout(&priv->stop_comp,
- msecs_to_jiffies(STOP_TIMEOUT)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv)
-{
- struct kvaser_msg *msg;
- int rc;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = CMD_FLUSH_QUEUE;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_flush_queue);
- msg->u.flush_queue.channel = priv->channel;
- msg->u.flush_queue.flags = 0x00;
-
- rc = kvaser_usb_send_msg(priv->dev, msg);
-
- kfree(msg);
- return rc;
-}
-
-static int kvaser_usb_close(struct net_device *netdev)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct kvaser_usb *dev = priv->dev;
- int err;
-
- netif_stop_queue(netdev);
-
- err = kvaser_usb_flush_queue(priv);
- if (err)
- netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
-
- err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
- if (err)
- netdev_warn(netdev, "Cannot reset card, error %d\n", err);
-
- err = kvaser_usb_stop_chip(priv);
- if (err)
- netdev_warn(netdev, "Cannot stop device, error %d\n", err);
-
- /* reset tx contexts */
- kvaser_usb_unlink_tx_urbs(priv);
-
- priv->can.state = CAN_STATE_STOPPED;
- close_candev(priv->netdev);
-
- return 0;
-}
-
-static void kvaser_usb_write_bulk_callback(struct urb *urb)
-{
- struct kvaser_usb_tx_urb_context *context = urb->context;
- struct kvaser_usb_net_priv *priv;
- struct net_device *netdev;
-
- if (WARN_ON(!context))
- return;
-
- priv = context->priv;
- netdev = priv->netdev;
-
- kfree(urb->transfer_buffer);
-
- if (!netif_device_present(netdev))
- return;
-
- if (urb->status)
- netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
-}
-
-static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct kvaser_usb *dev = priv->dev;
- struct net_device_stats *stats = &netdev->stats;
- struct can_frame *cf = (struct can_frame *)skb->data;
- struct kvaser_usb_tx_urb_context *context = NULL;
- struct urb *urb;
- void *buf;
- struct kvaser_msg *msg;
- int i, err, ret = NETDEV_TX_OK;
- u8 *msg_tx_can_flags = NULL; /* GCC */
- unsigned long flags;
-
- if (can_dropped_invalid_skb(netdev, skb))
- return NETDEV_TX_OK;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- stats->tx_dropped++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
- if (!buf) {
- stats->tx_dropped++;
- dev_kfree_skb(skb);
- goto freeurb;
- }
-
- msg = buf;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can);
- msg->u.tx_can.channel = priv->channel;
-
- switch (dev->family) {
- case KVASER_LEAF:
- msg_tx_can_flags = &msg->u.tx_can.leaf.flags;
- break;
- case KVASER_USBCAN:
- msg_tx_can_flags = &msg->u.tx_can.usbcan.flags;
- break;
- }
-
- *msg_tx_can_flags = 0;
-
- if (cf->can_id & CAN_EFF_FLAG) {
- msg->id = CMD_TX_EXT_MESSAGE;
- msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f;
- msg->u.tx_can.msg[1] = (cf->can_id >> 18) & 0x3f;
- msg->u.tx_can.msg[2] = (cf->can_id >> 14) & 0x0f;
- msg->u.tx_can.msg[3] = (cf->can_id >> 6) & 0xff;
- msg->u.tx_can.msg[4] = cf->can_id & 0x3f;
- } else {
- msg->id = CMD_TX_STD_MESSAGE;
- msg->u.tx_can.msg[0] = (cf->can_id >> 6) & 0x1f;
- msg->u.tx_can.msg[1] = cf->can_id & 0x3f;
- }
-
- msg->u.tx_can.msg[5] = cf->can_dlc;
- memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc);
-
- if (cf->can_id & CAN_RTR_FLAG)
- *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
-
- spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- for (i = 0; i < dev->max_tx_urbs; i++) {
- if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
- context = &priv->tx_contexts[i];
-
- context->echo_index = i;
- can_put_echo_skb(skb, netdev, context->echo_index);
- ++priv->active_tx_contexts;
- if (priv->active_tx_contexts >= dev->max_tx_urbs)
- netif_stop_queue(netdev);
-
- break;
- }
- }
- spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
-
- /* This should never happen; it implies a flow control bug */
- if (!context) {
- netdev_warn(netdev, "cannot find free context\n");
-
- kfree(buf);
- ret = NETDEV_TX_BUSY;
- goto freeurb;
- }
-
- context->priv = priv;
- context->dlc = cf->can_dlc;
-
- msg->u.tx_can.tid = context->echo_index;
-
- usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->bulk_out->bEndpointAddress),
- buf, msg->len,
- kvaser_usb_write_bulk_callback, context);
- usb_anchor_urb(urb, &priv->tx_submitted);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(err)) {
- spin_lock_irqsave(&priv->tx_contexts_lock, flags);
-
- can_free_echo_skb(netdev, context->echo_index);
- context->echo_index = dev->max_tx_urbs;
- --priv->active_tx_contexts;
- netif_wake_queue(netdev);
-
- spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
-
- usb_unanchor_urb(urb);
- kfree(buf);
-
- stats->tx_dropped++;
-
- if (err == -ENODEV)
- netif_device_detach(netdev);
- else
- netdev_warn(netdev, "Failed tx_urb %d\n", err);
-
- goto freeurb;
- }
-
- ret = NETDEV_TX_OK;
-
-freeurb:
- usb_free_urb(urb);
- return ret;
-}
-
-static const struct net_device_ops kvaser_usb_netdev_ops = {
- .ndo_open = kvaser_usb_open,
- .ndo_stop = kvaser_usb_close,
- .ndo_start_xmit = kvaser_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
-};
-
-static const struct can_bittiming_const kvaser_usb_bittiming_const = {
- .name = "kvaser_usb",
- .tseg1_min = KVASER_USB_TSEG1_MIN,
- .tseg1_max = KVASER_USB_TSEG1_MAX,
- .tseg2_min = KVASER_USB_TSEG2_MIN,
- .tseg2_max = KVASER_USB_TSEG2_MAX,
- .sjw_max = KVASER_USB_SJW_MAX,
- .brp_min = KVASER_USB_BRP_MIN,
- .brp_max = KVASER_USB_BRP_MAX,
- .brp_inc = KVASER_USB_BRP_INC,
-};
-
-static int kvaser_usb_set_bittiming(struct net_device *netdev)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *bt = &priv->can.bittiming;
- struct kvaser_usb *dev = priv->dev;
- struct kvaser_msg *msg;
- int rc;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = CMD_SET_BUS_PARAMS;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_busparams);
- msg->u.busparams.channel = priv->channel;
- msg->u.busparams.tid = 0xff;
- msg->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
- msg->u.busparams.sjw = bt->sjw;
- msg->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
- msg->u.busparams.tseg2 = bt->phase_seg2;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- msg->u.busparams.no_samp = 3;
- else
- msg->u.busparams.no_samp = 1;
-
- rc = kvaser_usb_send_msg(dev, msg);
-
- kfree(msg);
- return rc;
-}
-
-static int kvaser_usb_set_mode(struct net_device *netdev,
- enum can_mode mode)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- int err;
-
- switch (mode) {
- case CAN_MODE_START:
- err = kvaser_usb_simple_msg_async(priv, CMD_START_CHIP);
- if (err)
- return err;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int kvaser_usb_get_berr_counter(const struct net_device *netdev,
- struct can_berr_counter *bec)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
-
- *bec = priv->bec;
-
- return 0;
-}
-
-static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
-{
- int i;
-
- for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
- continue;
-
- unregister_candev(dev->nets[i]->netdev);
- }
-
- kvaser_usb_unlink_all_urbs(dev);
-
- for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
- continue;
-
- free_candev(dev->nets[i]->netdev);
- }
-}
-
-static int kvaser_usb_init_one(struct usb_interface *intf,
- const struct usb_device_id *id, int channel)
-{
- struct kvaser_usb *dev = usb_get_intfdata(intf);
- struct net_device *netdev;
- struct kvaser_usb_net_priv *priv;
- int err;
-
- err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
- if (err)
- return err;
-
- netdev = alloc_candev(sizeof(*priv) +
- dev->max_tx_urbs * sizeof(*priv->tx_contexts),
- dev->max_tx_urbs);
- if (!netdev) {
- dev_err(&intf->dev, "Cannot alloc candev\n");
- return -ENOMEM;
- }
-
- priv = netdev_priv(netdev);
-
- init_usb_anchor(&priv->tx_submitted);
- init_completion(&priv->start_comp);
- init_completion(&priv->stop_comp);
-
- priv->dev = dev;
- priv->netdev = netdev;
- priv->channel = channel;
-
- spin_lock_init(&priv->tx_contexts_lock);
- kvaser_usb_reset_tx_urb_contexts(priv);
-
- priv->can.state = CAN_STATE_STOPPED;
- priv->can.clock.freq = CAN_USB_CLOCK;
- priv->can.bittiming_const = &kvaser_usb_bittiming_const;
- priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
- priv->can.do_set_mode = kvaser_usb_set_mode;
- if (id->driver_info & KVASER_HAS_TXRX_ERRORS)
- priv->can.do_get_berr_counter = kvaser_usb_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
- if (id->driver_info & KVASER_HAS_SILENT_MODE)
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
-
- netdev->flags |= IFF_ECHO;
-
- netdev->netdev_ops = &kvaser_usb_netdev_ops;
-
- SET_NETDEV_DEV(netdev, &intf->dev);
- netdev->dev_id = channel;
-
- dev->nets[channel] = priv;
-
- err = register_candev(netdev);
- if (err) {
- dev_err(&intf->dev, "Failed to register can device\n");
- free_candev(netdev);
- dev->nets[channel] = NULL;
- return err;
- }
-
- netdev_dbg(netdev, "device registered\n");
-
- return 0;
-}
-
-static int kvaser_usb_get_endpoints(const struct usb_interface *intf,
- struct usb_endpoint_descriptor **in,
- struct usb_endpoint_descriptor **out)
-{
- const struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- int i;
-
- iface_desc = &intf->altsetting[0];
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (!*in && usb_endpoint_is_bulk_in(endpoint))
- *in = endpoint;
-
- if (!*out && usb_endpoint_is_bulk_out(endpoint))
- *out = endpoint;
-
- /* use first bulk endpoint for in and out */
- if (*in && *out)
- return 0;
- }
-
- return -ENODEV;
-}
-
-static int kvaser_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct kvaser_usb *dev;
- int err = -ENOMEM;
- int i, retry = 3;
-
- dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- if (kvaser_is_leaf(id)) {
- dev->family = KVASER_LEAF;
- } else if (kvaser_is_usbcan(id)) {
- dev->family = KVASER_USBCAN;
- } else {
- dev_err(&intf->dev,
- "Product ID (%d) does not belong to any known Kvaser USB family",
- id->idProduct);
- return -ENODEV;
- }
-
- err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
- if (err) {
- dev_err(&intf->dev, "Cannot get usb endpoint(s)");
- return err;
- }
-
- dev->udev = interface_to_usbdev(intf);
-
- init_usb_anchor(&dev->rx_submitted);
-
- usb_set_intfdata(intf, dev);
-
- /* On some x86 laptops, plugging a Kvaser device again after
- * an unplug makes the firmware always ignore the very first
- * command. For such a case, provide some room for retries
- * instead of completely exiting the driver.
- */
- do {
- err = kvaser_usb_get_software_info(dev);
- } while (--retry && err == -ETIMEDOUT);
-
- if (err) {
- dev_err(&intf->dev,
- "Cannot get software infos, error %d\n", err);
- return err;
- }
-
- dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
- ((dev->fw_version >> 24) & 0xff),
- ((dev->fw_version >> 16) & 0xff),
- (dev->fw_version & 0xffff));
-
- dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
-
- err = kvaser_usb_get_card_info(dev);
- if (err) {
- dev_err(&intf->dev,
- "Cannot get card infos, error %d\n", err);
- return err;
- }
-
- for (i = 0; i < dev->nchannels; i++) {
- err = kvaser_usb_init_one(intf, id, i);
- if (err) {
- kvaser_usb_remove_interfaces(dev);
- return err;
- }
- }
-
- return 0;
-}
-
-static void kvaser_usb_disconnect(struct usb_interface *intf)
-{
- struct kvaser_usb *dev = usb_get_intfdata(intf);
-
- usb_set_intfdata(intf, NULL);
-
- if (!dev)
- return;
-
- kvaser_usb_remove_interfaces(dev);
-}
-
-static struct usb_driver kvaser_usb_driver = {
- .name = "kvaser_usb",
- .probe = kvaser_usb_probe,
- .disconnect = kvaser_usb_disconnect,
- .id_table = kvaser_usb_table,
-};
-
-module_usb_driver(kvaser_usb_driver);
-
-MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
-MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile
new file mode 100644
index 000000000000..9f41ddab6a5a
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
+kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
new file mode 100644
index 000000000000..390b6bde883c
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Parts of this driver are based on the following:
+ * - Kvaser linux leaf driver (version 4.78)
+ * - CAN driver for esd CAN-USB/2
+ * - Kvaser linux usbcanII driver (version 5.3)
+ * - Kvaser linux mhydra driver (version 5.24)
+ *
+ * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ * Copyright (C) 2015 Valeo S.A.
+ */
+
+#ifndef KVASER_USB_H
+#define KVASER_USB_H
+
+/* Kvaser USB CAN dongles are divided into three major platforms:
+ * - Hydra: Running firmware labeled as 'mhydra'
+ * - Leaf: Based on Renesas M32C or Freescale i.MX28, running firmware labeled
+ * as 'filo'
+ * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios'
+ */
+
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#define KVASER_USB_MAX_RX_URBS 4
+#define KVASER_USB_MAX_TX_URBS 128
+#define KVASER_USB_TIMEOUT 1000 /* msecs */
+#define KVASER_USB_RX_BUFFER_SIZE 3072
+#define KVASER_USB_MAX_NET_DEVICES 5
+
+/* USB devices features */
+#define KVASER_USB_HAS_SILENT_MODE BIT(0)
+#define KVASER_USB_HAS_TXRX_ERRORS BIT(1)
+
+/* Device capabilities */
+#define KVASER_USB_CAP_BERR_CAP 0x01
+#define KVASER_USB_CAP_EXT_CAP 0x02
+#define KVASER_USB_HYDRA_CAP_EXT_CMD 0x04
+
+struct kvaser_usb_dev_cfg;
+
+enum kvaser_usb_leaf_family {
+ KVASER_LEAF,
+ KVASER_USBCAN,
+};
+
+#define KVASER_USB_HYDRA_MAX_CMD_LEN 128
+struct kvaser_usb_dev_card_data_hydra {
+ u8 channel_to_he[KVASER_USB_MAX_NET_DEVICES];
+ u8 sysdbg_he;
+ spinlock_t transid_lock; /* lock for transid */
+ u16 transid;
+ /* lock for usb_rx_leftover and usb_rx_leftover_len */
+ spinlock_t usb_rx_leftover_lock;
+ u8 usb_rx_leftover[KVASER_USB_HYDRA_MAX_CMD_LEN];
+ u8 usb_rx_leftover_len;
+};
+struct kvaser_usb_dev_card_data {
+ u32 ctrlmode_supported;
+ u32 capabilities;
+ union {
+ struct {
+ enum kvaser_usb_leaf_family family;
+ } leaf;
+ struct kvaser_usb_dev_card_data_hydra hydra;
+ };
+};
+
+/* Context for an outstanding, not yet ACKed, transmission */
+struct kvaser_usb_tx_urb_context {
+ struct kvaser_usb_net_priv *priv;
+ u32 echo_index;
+ int dlc;
+};
+
+struct kvaser_usb {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES];
+ const struct kvaser_usb_dev_ops *ops;
+ const struct kvaser_usb_dev_cfg *cfg;
+
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ struct usb_anchor rx_submitted;
+
+ /* @max_tx_urbs: Firmware-reported maximum number of outstanding,
+ * not yet ACKed, transmissions on this device. This value is
+ * also used as a sentinel for marking free tx contexts.
+ */
+ u32 fw_version;
+ unsigned int nchannels;
+ unsigned int max_tx_urbs;
+ struct kvaser_usb_dev_card_data card_data;
+
+ bool rxinitdone;
+ void *rxbuf[KVASER_USB_MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[KVASER_USB_MAX_RX_URBS];
+};
+
+struct kvaser_usb_net_priv {
+ struct can_priv can;
+ struct can_berr_counter bec;
+
+ struct kvaser_usb *dev;
+ struct net_device *netdev;
+ int channel;
+
+ struct completion start_comp, stop_comp, flush_comp;
+ struct usb_anchor tx_submitted;
+
+ spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */
+ int active_tx_contexts;
+ struct kvaser_usb_tx_urb_context tx_contexts[];
+};
+
+/**
+ * struct kvaser_usb_dev_ops - Device specific functions
+ * @dev_set_mode: used for can.do_set_mode
+ * @dev_set_bittiming: used for can.do_set_bittiming
+ * @dev_set_data_bittiming: used for can.do_set_data_bittiming
+ * @dev_get_berr_counter: used for can.do_get_berr_counter
+ *
+ * @dev_setup_endpoints: setup USB in and out endpoints
+ * @dev_init_card: initialize card
+ * @dev_get_software_info: get software info
+ * @dev_get_software_details: get software details
+ * @dev_get_card_info: get card info
+ * @dev_get_capabilities: discover device capabilities
+ *
+ * @dev_set_opt_mode: set ctrlmod
+ * @dev_start_chip: start the CAN controller
+ * @dev_stop_chip: stop the CAN controller
+ * @dev_reset_chip: reset the CAN controller
+ * @dev_flush_queue: flush outstanding CAN messages
+ * @dev_read_bulk_callback: handle incoming commands
+ * @dev_frame_to_cmd: translate struct can_frame into device command
+ */
+struct kvaser_usb_dev_ops {
+ int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode);
+ int (*dev_set_bittiming)(struct net_device *netdev);
+ int (*dev_set_data_bittiming)(struct net_device *netdev);
+ int (*dev_get_berr_counter)(const struct net_device *netdev,
+ struct can_berr_counter *bec);
+ int (*dev_setup_endpoints)(struct kvaser_usb *dev);
+ int (*dev_init_card)(struct kvaser_usb *dev);
+ int (*dev_get_software_info)(struct kvaser_usb *dev);
+ int (*dev_get_software_details)(struct kvaser_usb *dev);
+ int (*dev_get_card_info)(struct kvaser_usb *dev);
+ int (*dev_get_capabilities)(struct kvaser_usb *dev);
+ int (*dev_set_opt_mode)(const struct kvaser_usb_net_priv *priv);
+ int (*dev_start_chip)(struct kvaser_usb_net_priv *priv);
+ int (*dev_stop_chip)(struct kvaser_usb_net_priv *priv);
+ int (*dev_reset_chip)(struct kvaser_usb *dev, int channel);
+ int (*dev_flush_queue)(struct kvaser_usb_net_priv *priv);
+ void (*dev_read_bulk_callback)(struct kvaser_usb *dev, void *buf,
+ int len);
+ void *(*dev_frame_to_cmd)(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid);
+};
+
+struct kvaser_usb_dev_cfg {
+ const struct can_clock clock;
+ const unsigned int timestamp_freq;
+ const struct can_bittiming_const * const bittiming_const;
+ const struct can_bittiming_const * const data_bittiming_const;
+};
+
+extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
+extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+
+int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
+ int *actual_len);
+
+int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len);
+
+int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
+ int len);
+
+int kvaser_usb_can_rx_over_error(struct net_device *netdev);
+#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
new file mode 100644
index 000000000000..b939a4c10b84
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -0,0 +1,835 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Parts of this driver are based on the following:
+ * - Kvaser linux leaf driver (version 4.78)
+ * - CAN driver for esd CAN-USB/2
+ * - Kvaser linux usbcanII driver (version 5.3)
+ * - Kvaser linux mhydra driver (version 5.24)
+ *
+ * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ * Copyright (C) 2015 Valeo S.A.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/if.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/netlink.h>
+
+#include "kvaser_usb.h"
+
+/* Kvaser USB vendor id. */
+#define KVASER_VENDOR_ID 0x0bfd
+
+/* Kvaser Leaf USB devices product ids */
+#define USB_LEAF_DEVEL_PRODUCT_ID 10
+#define USB_LEAF_LITE_PRODUCT_ID 11
+#define USB_LEAF_PRO_PRODUCT_ID 12
+#define USB_LEAF_SPRO_PRODUCT_ID 14
+#define USB_LEAF_PRO_LS_PRODUCT_ID 15
+#define USB_LEAF_PRO_SWC_PRODUCT_ID 16
+#define USB_LEAF_PRO_LIN_PRODUCT_ID 17
+#define USB_LEAF_SPRO_LS_PRODUCT_ID 18
+#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19
+#define USB_MEMO2_DEVEL_PRODUCT_ID 22
+#define USB_MEMO2_HSHS_PRODUCT_ID 23
+#define USB_UPRO_HSHS_PRODUCT_ID 24
+#define USB_LEAF_LITE_GI_PRODUCT_ID 25
+#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26
+#define USB_MEMO2_HSLS_PRODUCT_ID 27
+#define USB_LEAF_LITE_CH_PRODUCT_ID 28
+#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29
+#define USB_OEM_MERCURY_PRODUCT_ID 34
+#define USB_OEM_LEAF_PRODUCT_ID 35
+#define USB_CAN_R_PRODUCT_ID 39
+#define USB_LEAF_LITE_V2_PRODUCT_ID 288
+#define USB_MINI_PCIE_HS_PRODUCT_ID 289
+#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
+#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
+#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
+
+/* Kvaser USBCan-II devices product ids */
+#define USB_USBCAN_REVB_PRODUCT_ID 2
+#define USB_VCI2_PRODUCT_ID 3
+#define USB_USBCAN2_PRODUCT_ID 4
+#define USB_MEMORATOR_PRODUCT_ID 5
+
+/* Kvaser Minihydra USB devices product ids */
+#define USB_BLACKBIRD_V2_PRODUCT_ID 258
+#define USB_MEMO_PRO_5HS_PRODUCT_ID 260
+#define USB_USBCAN_PRO_5HS_PRODUCT_ID 261
+#define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 262
+#define USB_LEAF_PRO_HS_V2_PRODUCT_ID 263
+#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 264
+#define USB_MEMO_2HS_PRODUCT_ID 265
+#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 266
+#define USB_HYBRID_CANLIN_PRODUCT_ID 267
+#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268
+#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269
+#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270
+
+static inline bool kvaser_is_leaf(const struct usb_device_id *id)
+{
+ return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
+ id->idProduct <= USB_CAN_R_PRODUCT_ID) ||
+ (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID &&
+ id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID);
+}
+
+static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
+{
+ return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
+ id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
+}
+
+static inline bool kvaser_is_hydra(const struct usb_device_id *id)
+{
+ return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID &&
+ id->idProduct <= USB_HYBRID_PRO_CANLIN_PRODUCT_ID;
+}
+
+static const struct usb_device_id kvaser_usb_table[] = {
+ /* Leaf USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
+
+ /* USBCANII USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+
+ /* Minihydra USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
+
+int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len)
+{
+ int actual_len; /* Not used */
+
+ return usb_bulk_msg(dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out->bEndpointAddress),
+ cmd, len, &actual_len, KVASER_USB_TIMEOUT);
+}
+
+int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
+ int *actual_len)
+{
+ return usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ cmd, len, actual_len, KVASER_USB_TIMEOUT);
+}
+
+static void kvaser_usb_send_cmd_callback(struct urb *urb)
+{
+ struct net_device *netdev = urb->context;
+
+ kfree(urb->transfer_buffer);
+
+ if (urb->status)
+ netdev_warn(netdev, "urb status received: %d\n", urb->status);
+}
+
+int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
+ int len)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct net_device *netdev = priv->netdev;
+ struct urb *urb;
+ int err;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out->bEndpointAddress),
+ cmd, len, kvaser_usb_send_cmd_callback, netdev);
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ netdev_err(netdev, "Error transmitting URB\n");
+ usb_unanchor_urb(urb);
+ }
+ usb_free_urb(urb);
+
+ return 0;
+}
+
+int kvaser_usb_can_rx_over_error(struct net_device *netdev)
+{
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
+ return -ENOMEM;
+ }
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+
+ return 0;
+}
+
+static void kvaser_usb_read_bulk_callback(struct urb *urb)
+{
+ struct kvaser_usb *dev = urb->context;
+ int err;
+ unsigned int i;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
+ case -ESHUTDOWN:
+ return;
+ default:
+ dev_info(&dev->intf->dev, "Rx URB aborted (%d)\n", urb->status);
+ goto resubmit_urb;
+ }
+
+ dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
+ urb->actual_length);
+
+resubmit_urb:
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ urb->transfer_buffer, KVASER_USB_RX_BUFFER_SIZE,
+ kvaser_usb_read_bulk_callback, dev);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err == -ENODEV) {
+ for (i = 0; i < dev->nchannels; i++) {
+ if (!dev->nets[i])
+ continue;
+
+ netif_device_detach(dev->nets[i]->netdev);
+ }
+ } else if (err) {
+ dev_err(&dev->intf->dev,
+ "Failed resubmitting read bulk urb: %d\n", err);
+ }
+}
+
+static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
+{
+ int i, err = 0;
+
+ if (dev->rxinitdone)
+ return 0;
+
+ for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++) {
+ struct urb *urb = NULL;
+ u8 *buf = NULL;
+ dma_addr_t buf_dma;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ err = -ENOMEM;
+ break;
+ }
+
+ buf = usb_alloc_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE,
+ GFP_KERNEL, &buf_dma);
+ if (!buf) {
+ dev_warn(&dev->intf->dev,
+ "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ err = -ENOMEM;
+ break;
+ }
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_rcvbulkpipe
+ (dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ buf, KVASER_USB_RX_BUFFER_SIZE,
+ kvaser_usb_read_bulk_callback, dev);
+ urb->transfer_dma = buf_dma;
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ usb_unanchor_urb(urb);
+ usb_free_coherent(dev->udev,
+ KVASER_USB_RX_BUFFER_SIZE, buf,
+ buf_dma);
+ usb_free_urb(urb);
+ break;
+ }
+
+ dev->rxbuf[i] = buf;
+ dev->rxbuf_dma[i] = buf_dma;
+
+ usb_free_urb(urb);
+ }
+
+ if (i == 0) {
+ dev_warn(&dev->intf->dev, "Cannot setup read URBs, error %d\n",
+ err);
+ return err;
+ } else if (i < KVASER_USB_MAX_RX_URBS) {
+ dev_warn(&dev->intf->dev, "RX performances may be slow\n");
+ }
+
+ dev->rxinitdone = true;
+
+ return 0;
+}
+
+static int kvaser_usb_open(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ int err;
+
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ err = kvaser_usb_setup_rx_urbs(dev);
+ if (err)
+ goto error;
+
+ err = dev->ops->dev_set_opt_mode(priv);
+ if (err)
+ goto error;
+
+ err = dev->ops->dev_start_chip(priv);
+ if (err) {
+ netdev_warn(netdev, "Cannot start device, error %d\n", err);
+ goto error;
+ }
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+
+error:
+ close_candev(netdev);
+ return err;
+}
+
+static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
+{
+ int i, max_tx_urbs;
+
+ max_tx_urbs = priv->dev->max_tx_urbs;
+
+ priv->active_tx_contexts = 0;
+ for (i = 0; i < max_tx_urbs; i++)
+ priv->tx_contexts[i].echo_index = max_tx_urbs;
+}
+
+/* This method might sleep. Do not call it in the atomic context
+ * of URB completions.
+ */
+static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+{
+ usb_kill_anchored_urbs(&priv->tx_submitted);
+ kvaser_usb_reset_tx_urb_contexts(priv);
+}
+
+static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
+{
+ int i;
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
+
+ for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++)
+ usb_free_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE,
+ dev->rxbuf[i], dev->rxbuf_dma[i]);
+
+ for (i = 0; i < dev->nchannels; i++) {
+ struct kvaser_usb_net_priv *priv = dev->nets[i];
+
+ if (priv)
+ kvaser_usb_unlink_tx_urbs(priv);
+ }
+}
+
+static int kvaser_usb_close(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ int err;
+
+ netif_stop_queue(netdev);
+
+ err = dev->ops->dev_flush_queue(priv);
+ if (err)
+ netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
+
+ if (dev->ops->dev_reset_chip) {
+ err = dev->ops->dev_reset_chip(dev, priv->channel);
+ if (err)
+ netdev_warn(netdev, "Cannot reset card, error %d\n",
+ err);
+ }
+
+ err = dev->ops->dev_stop_chip(priv);
+ if (err)
+ netdev_warn(netdev, "Cannot stop device, error %d\n", err);
+
+ /* reset tx contexts */
+ kvaser_usb_unlink_tx_urbs(priv);
+
+ priv->can.state = CAN_STATE_STOPPED;
+ close_candev(priv->netdev);
+
+ return 0;
+}
+
+static void kvaser_usb_write_bulk_callback(struct urb *urb)
+{
+ struct kvaser_usb_tx_urb_context *context = urb->context;
+ struct kvaser_usb_net_priv *priv;
+ struct net_device *netdev;
+
+ if (WARN_ON(!context))
+ return;
+
+ priv = context->priv;
+ netdev = priv->netdev;
+
+ kfree(urb->transfer_buffer);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (urb->status)
+ netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
+}
+
+static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ struct net_device_stats *stats = &netdev->stats;
+ struct kvaser_usb_tx_urb_context *context = NULL;
+ struct urb *urb;
+ void *buf;
+ int cmd_len = 0;
+ int err, ret = NETDEV_TX_OK;
+ unsigned int i;
+ unsigned long flags;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+ for (i = 0; i < dev->max_tx_urbs; i++) {
+ if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
+ context = &priv->tx_contexts[i];
+
+ context->echo_index = i;
+ can_put_echo_skb(skb, netdev, context->echo_index);
+ ++priv->active_tx_contexts;
+ if (priv->active_tx_contexts >= (int)dev->max_tx_urbs)
+ netif_stop_queue(netdev);
+
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+
+ /* This should never happen; it implies a flow control bug */
+ if (!context) {
+ netdev_warn(netdev, "cannot find free context\n");
+
+ ret = NETDEV_TX_BUSY;
+ goto freeurb;
+ }
+
+ buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len,
+ context->echo_index);
+ if (!buf) {
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+
+ can_free_echo_skb(netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+ goto freeurb;
+ }
+
+ context->priv = priv;
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out->bEndpointAddress),
+ buf, cmd_len, kvaser_usb_write_bulk_callback,
+ context);
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err)) {
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+
+ can_free_echo_skb(netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+
+ usb_unanchor_urb(urb);
+ kfree(buf);
+
+ stats->tx_dropped++;
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netdev_warn(netdev, "Failed tx_urb %d\n", err);
+
+ goto freeurb;
+ }
+
+ ret = NETDEV_TX_OK;
+
+freeurb:
+ usb_free_urb(urb);
+ return ret;
+}
+
+static const struct net_device_ops kvaser_usb_netdev_ops = {
+ .ndo_open = kvaser_usb_open,
+ .ndo_stop = kvaser_usb_close,
+ .ndo_start_xmit = kvaser_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (!dev->nets[i])
+ continue;
+
+ unregister_candev(dev->nets[i]->netdev);
+ }
+
+ kvaser_usb_unlink_all_urbs(dev);
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (!dev->nets[i])
+ continue;
+
+ free_candev(dev->nets[i]->netdev);
+ }
+}
+
+static int kvaser_usb_init_one(struct kvaser_usb *dev,
+ const struct usb_device_id *id, int channel)
+{
+ struct net_device *netdev;
+ struct kvaser_usb_net_priv *priv;
+ int err;
+
+ if (dev->ops->dev_reset_chip) {
+ err = dev->ops->dev_reset_chip(dev, channel);
+ if (err)
+ return err;
+ }
+
+ netdev = alloc_candev(sizeof(*priv) +
+ dev->max_tx_urbs * sizeof(*priv->tx_contexts),
+ dev->max_tx_urbs);
+ if (!netdev) {
+ dev_err(&dev->intf->dev, "Cannot alloc candev\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(netdev);
+
+ init_usb_anchor(&priv->tx_submitted);
+ init_completion(&priv->start_comp);
+ init_completion(&priv->stop_comp);
+ priv->can.ctrlmode_supported = 0;
+
+ priv->dev = dev;
+ priv->netdev = netdev;
+ priv->channel = channel;
+
+ spin_lock_init(&priv->tx_contexts_lock);
+ kvaser_usb_reset_tx_urb_contexts(priv);
+
+ priv->can.state = CAN_STATE_STOPPED;
+ priv->can.clock.freq = dev->cfg->clock.freq;
+ priv->can.bittiming_const = dev->cfg->bittiming_const;
+ priv->can.do_set_bittiming = dev->ops->dev_set_bittiming;
+ priv->can.do_set_mode = dev->ops->dev_set_mode;
+ if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) ||
+ (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
+ priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter;
+ if (id->driver_info & KVASER_USB_HAS_SILENT_MODE)
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+
+ priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
+
+ if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
+ priv->can.do_set_data_bittiming =
+ dev->ops->dev_set_data_bittiming;
+ }
+
+ netdev->flags |= IFF_ECHO;
+
+ netdev->netdev_ops = &kvaser_usb_netdev_ops;
+
+ SET_NETDEV_DEV(netdev, &dev->intf->dev);
+ netdev->dev_id = channel;
+
+ dev->nets[channel] = priv;
+
+ err = register_candev(netdev);
+ if (err) {
+ dev_err(&dev->intf->dev, "Failed to register CAN device\n");
+ free_candev(netdev);
+ dev->nets[channel] = NULL;
+ return err;
+ }
+
+ netdev_dbg(netdev, "device registered\n");
+
+ return 0;
+}
+
+static int kvaser_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct kvaser_usb *dev;
+ int err;
+ int i;
+
+ dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ if (kvaser_is_leaf(id)) {
+ dev->card_data.leaf.family = KVASER_LEAF;
+ dev->ops = &kvaser_usb_leaf_dev_ops;
+ } else if (kvaser_is_usbcan(id)) {
+ dev->card_data.leaf.family = KVASER_USBCAN;
+ dev->ops = &kvaser_usb_leaf_dev_ops;
+ } else if (kvaser_is_hydra(id)) {
+ dev->ops = &kvaser_usb_hydra_dev_ops;
+ } else {
+ dev_err(&intf->dev,
+ "Product ID (%d) is not a supported Kvaser USB device\n",
+ id->idProduct);
+ return -ENODEV;
+ }
+
+ dev->intf = intf;
+
+ err = dev->ops->dev_setup_endpoints(dev);
+ if (err) {
+ dev_err(&intf->dev, "Cannot get usb endpoint(s)");
+ return err;
+ }
+
+ dev->udev = interface_to_usbdev(intf);
+
+ init_usb_anchor(&dev->rx_submitted);
+
+ usb_set_intfdata(intf, dev);
+
+ dev->card_data.ctrlmode_supported = 0;
+ dev->card_data.capabilities = 0;
+ err = dev->ops->dev_init_card(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Failed to initialize card, error %d\n", err);
+ return err;
+ }
+
+ err = dev->ops->dev_get_software_info(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Cannot get software info, error %d\n", err);
+ return err;
+ }
+
+ if (dev->ops->dev_get_software_details) {
+ err = dev->ops->dev_get_software_details(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Cannot get software details, error %d\n", err);
+ return err;
+ }
+ }
+
+ if (WARN_ON(!dev->cfg))
+ return -ENODEV;
+
+ dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
+ ((dev->fw_version >> 24) & 0xff),
+ ((dev->fw_version >> 16) & 0xff),
+ (dev->fw_version & 0xffff));
+
+ dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
+
+ err = dev->ops->dev_get_card_info(dev);
+ if (err) {
+ dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
+ return err;
+ }
+
+ if (dev->ops->dev_get_capabilities) {
+ err = dev->ops->dev_get_capabilities(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Cannot get capabilities, error %d\n", err);
+ kvaser_usb_remove_interfaces(dev);
+ return err;
+ }
+ }
+
+ for (i = 0; i < dev->nchannels; i++) {
+ err = kvaser_usb_init_one(dev, id, i);
+ if (err) {
+ kvaser_usb_remove_interfaces(dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void kvaser_usb_disconnect(struct usb_interface *intf)
+{
+ struct kvaser_usb *dev = usb_get_intfdata(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (!dev)
+ return;
+
+ kvaser_usb_remove_interfaces(dev);
+}
+
+static struct usb_driver kvaser_usb_driver = {
+ .name = "kvaser_usb",
+ .probe = kvaser_usb_probe,
+ .disconnect = kvaser_usb_disconnect,
+ .id_table = kvaser_usb_table,
+};
+
+module_usb_driver(kvaser_usb_driver);
+
+MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
+MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
+MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
new file mode 100644
index 000000000000..c084bae5ec0a
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -0,0 +1,2028 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Parts of this driver are based on the following:
+ * - Kvaser linux mhydra driver (version 5.24)
+ * - CAN driver for esd CAN-USB/2
+ *
+ * Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ *
+ * Known issues:
+ * - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only
+ * reported after a call to do_get_berr_counter(), since firmware does not
+ * distinguish between ERROR_WARNING and ERROR_ACTIVE.
+ * - Hardware timestamps are not set for CAN Tx frames.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/netlink.h>
+
+#include "kvaser_usb.h"
+
+/* Forward declarations */
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan;
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc;
+
+#define KVASER_USB_HYDRA_BULK_EP_IN_ADDR 0x82
+#define KVASER_USB_HYDRA_BULK_EP_OUT_ADDR 0x02
+
+#define KVASER_USB_HYDRA_MAX_TRANSID 0xff
+#define KVASER_USB_HYDRA_MIN_TRANSID 0x01
+
+/* Minihydra command IDs */
+#define CMD_SET_BUSPARAMS_REQ 16
+#define CMD_GET_CHIP_STATE_REQ 19
+#define CMD_CHIP_STATE_EVENT 20
+#define CMD_SET_DRIVERMODE_REQ 21
+#define CMD_START_CHIP_REQ 26
+#define CMD_START_CHIP_RESP 27
+#define CMD_STOP_CHIP_REQ 28
+#define CMD_STOP_CHIP_RESP 29
+#define CMD_TX_CAN_MESSAGE 33
+#define CMD_GET_CARD_INFO_REQ 34
+#define CMD_GET_CARD_INFO_RESP 35
+#define CMD_GET_SOFTWARE_INFO_REQ 38
+#define CMD_GET_SOFTWARE_INFO_RESP 39
+#define CMD_ERROR_EVENT 45
+#define CMD_FLUSH_QUEUE 48
+#define CMD_TX_ACKNOWLEDGE 50
+#define CMD_FLUSH_QUEUE_RESP 66
+#define CMD_SET_BUSPARAMS_FD_REQ 69
+#define CMD_SET_BUSPARAMS_FD_RESP 70
+#define CMD_SET_BUSPARAMS_RESP 85
+#define CMD_GET_CAPABILITIES_REQ 95
+#define CMD_GET_CAPABILITIES_RESP 96
+#define CMD_RX_MESSAGE 106
+#define CMD_MAP_CHANNEL_REQ 200
+#define CMD_MAP_CHANNEL_RESP 201
+#define CMD_GET_SOFTWARE_DETAILS_REQ 202
+#define CMD_GET_SOFTWARE_DETAILS_RESP 203
+#define CMD_EXTENDED 255
+
+/* Minihydra extended command IDs */
+#define CMD_TX_CAN_MESSAGE_FD 224
+#define CMD_TX_ACKNOWLEDGE_FD 225
+#define CMD_RX_MESSAGE_FD 226
+
+/* Hydra commands are handled by different threads in firmware.
+ * The threads are denoted hydra entity (HE). Each HE got a unique 6-bit
+ * address. The address is used in hydra commands to get/set source and
+ * destination HE. There are two predefined HE addresses, the remaining
+ * addresses are different between devices and firmware versions. Hence, we need
+ * to enumerate the addresses (see kvaser_usb_hydra_map_channel()).
+ */
+
+/* Well-known HE addresses */
+#define KVASER_USB_HYDRA_HE_ADDRESS_ROUTER 0x00
+#define KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL 0x3e
+
+#define KVASER_USB_HYDRA_TRANSID_CANHE 0x40
+#define KVASER_USB_HYDRA_TRANSID_SYSDBG 0x61
+
+struct kvaser_cmd_map_ch_req {
+ char name[16];
+ u8 channel;
+ u8 reserved[11];
+} __packed;
+
+struct kvaser_cmd_map_ch_res {
+ u8 he_addr;
+ u8 channel;
+ u8 reserved[26];
+} __packed;
+
+struct kvaser_cmd_card_info {
+ __le32 serial_number;
+ __le32 clock_res;
+ __le32 mfg_date;
+ __le32 ean[2];
+ u8 hw_version;
+ u8 usb_mode;
+ u8 hw_type;
+ u8 reserved0;
+ u8 nchannels;
+ u8 reserved1[3];
+} __packed;
+
+struct kvaser_cmd_sw_info {
+ u8 reserved0[8];
+ __le16 max_outstanding_tx;
+ u8 reserved1[18];
+} __packed;
+
+struct kvaser_cmd_sw_detail_req {
+ u8 use_ext_cmd;
+ u8 reserved[27];
+} __packed;
+
+/* Software detail flags */
+#define KVASER_USB_HYDRA_SW_FLAG_FW_BETA BIT(2)
+#define KVASER_USB_HYDRA_SW_FLAG_FW_BAD BIT(4)
+#define KVASER_USB_HYDRA_SW_FLAG_FREQ_80M BIT(5)
+#define KVASER_USB_HYDRA_SW_FLAG_EXT_CMD BIT(9)
+#define KVASER_USB_HYDRA_SW_FLAG_CANFD BIT(10)
+#define KVASER_USB_HYDRA_SW_FLAG_NONISO BIT(11)
+#define KVASER_USB_HYDRA_SW_FLAG_EXT_CAP BIT(12)
+struct kvaser_cmd_sw_detail_res {
+ __le32 sw_flags;
+ __le32 sw_version;
+ __le32 sw_name;
+ __le32 ean[2];
+ __le32 max_bitrate;
+ u8 reserved[4];
+} __packed;
+
+/* Sub commands for cap_req and cap_res */
+#define KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE 0x02
+#define KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT 0x05
+#define KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT 0x06
+struct kvaser_cmd_cap_req {
+ __le16 cap_cmd;
+ u8 reserved[26];
+} __packed;
+
+/* Status codes for cap_res */
+#define KVASER_USB_HYDRA_CAP_STAT_OK 0x00
+#define KVASER_USB_HYDRA_CAP_STAT_NOT_IMPL 0x01
+#define KVASER_USB_HYDRA_CAP_STAT_UNAVAIL 0x02
+struct kvaser_cmd_cap_res {
+ __le16 cap_cmd;
+ __le16 status;
+ __le32 mask;
+ __le32 value;
+ u8 reserved[16];
+} __packed;
+
+/* CMD_ERROR_EVENT error codes */
+#define KVASER_USB_HYDRA_ERROR_EVENT_CAN 0x01
+#define KVASER_USB_HYDRA_ERROR_EVENT_PARAM 0x09
+struct kvaser_cmd_error_event {
+ __le16 timestamp[3];
+ u8 reserved;
+ u8 error_code;
+ __le16 info1;
+ __le16 info2;
+} __packed;
+
+/* Chip state status flags. Used for chip_state_event and err_frame_data. */
+#define KVASER_USB_HYDRA_BUS_ERR_ACT 0x00
+#define KVASER_USB_HYDRA_BUS_ERR_PASS BIT(5)
+#define KVASER_USB_HYDRA_BUS_BUS_OFF BIT(6)
+struct kvaser_cmd_chip_state_event {
+ __le16 timestamp[3];
+ u8 tx_err_counter;
+ u8 rx_err_counter;
+ u8 bus_status;
+ u8 reserved[19];
+} __packed;
+
+/* Busparam modes */
+#define KVASER_USB_HYDRA_BUS_MODE_CAN 0x00
+#define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01
+#define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02
+struct kvaser_cmd_set_busparams {
+ __le32 bitrate;
+ u8 tseg1;
+ u8 tseg2;
+ u8 sjw;
+ u8 nsamples;
+ u8 reserved0[4];
+ __le32 bitrate_d;
+ u8 tseg1_d;
+ u8 tseg2_d;
+ u8 sjw_d;
+ u8 nsamples_d;
+ u8 canfd_mode;
+ u8 reserved1[7];
+} __packed;
+
+/* Ctrl modes */
+#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
+#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
+struct kvaser_cmd_set_ctrlmode {
+ u8 mode;
+ u8 reserved[27];
+} __packed;
+
+struct kvaser_err_frame_data {
+ u8 bus_status;
+ u8 reserved0;
+ u8 tx_err_counter;
+ u8 rx_err_counter;
+ u8 reserved1[4];
+} __packed;
+
+struct kvaser_cmd_rx_can {
+ u8 cmd_len;
+ u8 cmd_no;
+ u8 channel;
+ u8 flags;
+ __le16 timestamp[3];
+ u8 dlc;
+ u8 padding;
+ __le32 id;
+ union {
+ u8 data[8];
+ struct kvaser_err_frame_data err_frame_data;
+ };
+} __packed;
+
+/* Extended CAN ID flag. Used in rx_can and tx_can */
+#define KVASER_USB_HYDRA_EXTENDED_FRAME_ID BIT(31)
+struct kvaser_cmd_tx_can {
+ __le32 id;
+ u8 data[8];
+ u8 dlc;
+ u8 flags;
+ __le16 transid;
+ u8 channel;
+ u8 reserved[11];
+} __packed;
+
+struct kvaser_cmd_header {
+ u8 cmd_no;
+ /* The destination HE address is stored in 0..5 of he_addr.
+ * The upper part of source HE address is stored in 6..7 of he_addr, and
+ * the lower part is stored in 12..15 of transid.
+ */
+ u8 he_addr;
+ __le16 transid;
+} __packed;
+
+struct kvaser_cmd {
+ struct kvaser_cmd_header header;
+ union {
+ struct kvaser_cmd_map_ch_req map_ch_req;
+ struct kvaser_cmd_map_ch_res map_ch_res;
+
+ struct kvaser_cmd_card_info card_info;
+ struct kvaser_cmd_sw_info sw_info;
+ struct kvaser_cmd_sw_detail_req sw_detail_req;
+ struct kvaser_cmd_sw_detail_res sw_detail_res;
+
+ struct kvaser_cmd_cap_req cap_req;
+ struct kvaser_cmd_cap_res cap_res;
+
+ struct kvaser_cmd_error_event error_event;
+
+ struct kvaser_cmd_set_busparams set_busparams_req;
+
+ struct kvaser_cmd_chip_state_event chip_state_event;
+
+ struct kvaser_cmd_set_ctrlmode set_ctrlmode;
+
+ struct kvaser_cmd_rx_can rx_can;
+ struct kvaser_cmd_tx_can tx_can;
+ } __packed;
+} __packed;
+
+/* CAN frame flags. Used in rx_can, ext_rx_can, tx_can and ext_tx_can */
+#define KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME BIT(0)
+#define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1)
+#define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4)
+#define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5)
+/* CAN frame flags. Used in ext_rx_can and ext_tx_can */
+#define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12)
+#define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13)
+#define KVASER_USB_HYDRA_CF_FLAG_FDF BIT(16)
+#define KVASER_USB_HYDRA_CF_FLAG_BRS BIT(17)
+#define KVASER_USB_HYDRA_CF_FLAG_ESI BIT(18)
+
+/* KCAN packet header macros. Used in ext_rx_can and ext_tx_can */
+#define KVASER_USB_KCAN_DATA_DLC_BITS 4
+#define KVASER_USB_KCAN_DATA_DLC_SHIFT 8
+#define KVASER_USB_KCAN_DATA_DLC_MASK \
+ GENMASK(KVASER_USB_KCAN_DATA_DLC_BITS - 1 + \
+ KVASER_USB_KCAN_DATA_DLC_SHIFT, \
+ KVASER_USB_KCAN_DATA_DLC_SHIFT)
+
+#define KVASER_USB_KCAN_DATA_BRS BIT(14)
+#define KVASER_USB_KCAN_DATA_FDF BIT(15)
+#define KVASER_USB_KCAN_DATA_OSM BIT(16)
+#define KVASER_USB_KCAN_DATA_AREQ BIT(31)
+#define KVASER_USB_KCAN_DATA_SRR BIT(31)
+#define KVASER_USB_KCAN_DATA_RTR BIT(29)
+#define KVASER_USB_KCAN_DATA_IDE BIT(30)
+struct kvaser_cmd_ext_rx_can {
+ __le32 flags;
+ __le32 id;
+ __le32 kcan_id;
+ __le32 kcan_header;
+ __le64 timestamp;
+ union {
+ u8 kcan_payload[64];
+ struct kvaser_err_frame_data err_frame_data;
+ };
+} __packed;
+
+struct kvaser_cmd_ext_tx_can {
+ __le32 flags;
+ __le32 id;
+ __le32 kcan_id;
+ __le32 kcan_header;
+ u8 databytes;
+ u8 dlc;
+ u8 reserved[6];
+ u8 kcan_payload[64];
+} __packed;
+
+struct kvaser_cmd_ext_tx_ack {
+ __le32 flags;
+ u8 reserved0[4];
+ __le64 timestamp;
+ u8 reserved1[8];
+} __packed;
+
+/* struct for extended commands (CMD_EXTENDED) */
+struct kvaser_cmd_ext {
+ struct kvaser_cmd_header header;
+ __le16 len;
+ u8 cmd_no_ext;
+ u8 reserved;
+
+ union {
+ struct kvaser_cmd_ext_rx_can rx_can;
+ struct kvaser_cmd_ext_tx_can tx_can;
+ struct kvaser_cmd_ext_tx_ack tx_ack;
+ } __packed;
+} __packed;
+
+static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
+ .name = "kvaser_usb_kcan",
+ .tseg1_min = 1,
+ .tseg1_max = 255,
+ .tseg2_min = 1,
+ .tseg2_max = 32,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 4096,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = {
+ .name = "kvaser_usb_flex",
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+#define KVASER_USB_HYDRA_TRANSID_BITS 12
+#define KVASER_USB_HYDRA_TRANSID_MASK \
+ GENMASK(KVASER_USB_HYDRA_TRANSID_BITS - 1, 0)
+#define KVASER_USB_HYDRA_HE_ADDR_SRC_MASK GENMASK(7, 6)
+#define KVASER_USB_HYDRA_HE_ADDR_DEST_MASK GENMASK(5, 0)
+#define KVASER_USB_HYDRA_HE_ADDR_SRC_BITS 2
+static inline u16 kvaser_usb_hydra_get_cmd_transid(const struct kvaser_cmd *cmd)
+{
+ return le16_to_cpu(cmd->header.transid) & KVASER_USB_HYDRA_TRANSID_MASK;
+}
+
+static inline void kvaser_usb_hydra_set_cmd_transid(struct kvaser_cmd *cmd,
+ u16 transid)
+{
+ cmd->header.transid =
+ cpu_to_le16(transid & KVASER_USB_HYDRA_TRANSID_MASK);
+}
+
+static inline u8 kvaser_usb_hydra_get_cmd_src_he(const struct kvaser_cmd *cmd)
+{
+ return (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) >>
+ KVASER_USB_HYDRA_HE_ADDR_SRC_BITS |
+ le16_to_cpu(cmd->header.transid) >>
+ KVASER_USB_HYDRA_TRANSID_BITS;
+}
+
+static inline void kvaser_usb_hydra_set_cmd_dest_he(struct kvaser_cmd *cmd,
+ u8 dest_he)
+{
+ cmd->header.he_addr =
+ (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) |
+ (dest_he & KVASER_USB_HYDRA_HE_ADDR_DEST_MASK);
+}
+
+static u8 kvaser_usb_hydra_channel_from_cmd(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ int i;
+ u8 channel = 0xff;
+ u8 src_he = kvaser_usb_hydra_get_cmd_src_he(cmd);
+
+ for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) {
+ if (dev->card_data.hydra.channel_to_he[i] == src_he) {
+ channel = i;
+ break;
+ }
+ }
+
+ return channel;
+}
+
+static u16 kvaser_usb_hydra_get_next_transid(struct kvaser_usb *dev)
+{
+ unsigned long flags;
+ u16 transid;
+ struct kvaser_usb_dev_card_data_hydra *card_data =
+ &dev->card_data.hydra;
+
+ spin_lock_irqsave(&card_data->transid_lock, flags);
+ transid = card_data->transid;
+ if (transid >= KVASER_USB_HYDRA_MAX_TRANSID)
+ transid = KVASER_USB_HYDRA_MIN_TRANSID;
+ else
+ transid++;
+ card_data->transid = transid;
+ spin_unlock_irqrestore(&card_data->transid_lock, flags);
+
+ return transid;
+}
+
+static size_t kvaser_usb_hydra_cmd_size(struct kvaser_cmd *cmd)
+{
+ size_t ret;
+
+ if (cmd->header.cmd_no == CMD_EXTENDED)
+ ret = le16_to_cpu(((struct kvaser_cmd_ext *)cmd)->len);
+ else
+ ret = sizeof(struct kvaser_cmd);
+
+ return ret;
+}
+
+static struct kvaser_usb_net_priv *
+kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv = NULL;
+ u8 channel = kvaser_usb_hydra_channel_from_cmd(dev, cmd);
+
+ if (channel >= dev->nchannels)
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ else
+ priv = dev->nets[channel];
+
+ return priv;
+}
+
+static ktime_t
+kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg,
+ const struct kvaser_cmd *cmd)
+{
+ u64 ticks;
+
+ if (cmd->header.cmd_no == CMD_EXTENDED) {
+ struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
+
+ ticks = le64_to_cpu(cmd_ext->rx_can.timestamp);
+ } else {
+ ticks = le16_to_cpu(cmd->rx_can.timestamp[0]);
+ ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16;
+ ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32;
+ }
+
+ return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+}
+
+static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
+ u8 cmd_no, int channel)
+{
+ struct kvaser_cmd *cmd;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = cmd_no;
+ if (channel < 0) {
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
+ } else {
+ if (channel >= KVASER_USB_MAX_NET_DEVICES) {
+ dev_err(&dev->intf->dev, "channel (%d) out of range.\n",
+ channel);
+ err = -EINVAL;
+ goto end;
+ }
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[channel]);
+ }
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ goto end;
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int
+kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ u8 cmd_no)
+{
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb *dev = priv->dev;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = cmd_no;
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd_async(priv, cmd,
+ kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ kfree(cmd);
+
+ return err;
+}
+
+/* This function is used for synchronously waiting on hydra control commands.
+ * Note: Compared to kvaser_usb_hydra_read_bulk_callback(), we never need to
+ * handle partial hydra commands. Since hydra control commands are always
+ * non-extended commands.
+ */
+static int kvaser_usb_hydra_wait_cmd(const struct kvaser_usb *dev, u8 cmd_no,
+ struct kvaser_cmd *cmd)
+{
+ void *buf;
+ int err;
+ unsigned long timeout = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT);
+
+ if (cmd->header.cmd_no == CMD_EXTENDED) {
+ dev_err(&dev->intf->dev, "Wait for CMD_EXTENDED not allowed\n");
+ return -EINVAL;
+ }
+
+ buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ do {
+ int actual_len = 0;
+ int pos = 0;
+
+ err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE,
+ &actual_len);
+ if (err < 0)
+ goto end;
+
+ while (pos < actual_len) {
+ struct kvaser_cmd *tmp_cmd;
+ size_t cmd_len;
+
+ tmp_cmd = buf + pos;
+ cmd_len = kvaser_usb_hydra_cmd_size(tmp_cmd);
+ if (pos + cmd_len > actual_len) {
+ dev_err_ratelimited(&dev->intf->dev,
+ "Format error\n");
+ break;
+ }
+
+ if (tmp_cmd->header.cmd_no == cmd_no) {
+ memcpy(cmd, tmp_cmd, cmd_len);
+ goto end;
+ }
+ pos += cmd_len;
+ }
+ } while (time_before(jiffies, timeout));
+
+ err = -EINVAL;
+
+end:
+ kfree(buf);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_map_channel_resp(struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ u8 he, channel;
+ u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
+ struct kvaser_usb_dev_card_data_hydra *card_data =
+ &dev->card_data.hydra;
+
+ if (transid > 0x007f || transid < 0x0040) {
+ dev_err(&dev->intf->dev,
+ "CMD_MAP_CHANNEL_RESP, invalid transid: 0x%x\n",
+ transid);
+ return -EINVAL;
+ }
+
+ switch (transid) {
+ case KVASER_USB_HYDRA_TRANSID_CANHE:
+ case KVASER_USB_HYDRA_TRANSID_CANHE + 1:
+ case KVASER_USB_HYDRA_TRANSID_CANHE + 2:
+ case KVASER_USB_HYDRA_TRANSID_CANHE + 3:
+ case KVASER_USB_HYDRA_TRANSID_CANHE + 4:
+ channel = transid & 0x000f;
+ he = cmd->map_ch_res.he_addr;
+ card_data->channel_to_he[channel] = he;
+ break;
+ case KVASER_USB_HYDRA_TRANSID_SYSDBG:
+ card_data->sysdbg_he = cmd->map_ch_res.he_addr;
+ break;
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unknown CMD_MAP_CHANNEL_RESP transid=0x%x\n",
+ transid);
+ break;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid,
+ u8 channel, const char *name)
+{
+ struct kvaser_cmd *cmd;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ strcpy(cmd->map_ch_req.name, name);
+ cmd->header.cmd_no = CMD_MAP_CHANNEL_REQ;
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ROUTER);
+ cmd->map_ch_req.channel = channel;
+
+ kvaser_usb_hydra_set_cmd_transid(cmd, transid);
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ goto end;
+
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_MAP_CHANNEL_RESP, cmd);
+ if (err)
+ goto end;
+
+ err = kvaser_usb_hydra_map_channel_resp(dev, cmd);
+ if (err)
+ goto end;
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
+ u16 cap_cmd_req, u16 *status)
+{
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+ struct kvaser_cmd *cmd;
+ u32 value = 0;
+ u32 mask = 0;
+ u16 cap_cmd_res;
+ int err;
+ int i;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
+ cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
+
+ kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ goto end;
+
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd);
+ if (err)
+ goto end;
+
+ *status = le16_to_cpu(cmd->cap_res.status);
+
+ if (*status != KVASER_USB_HYDRA_CAP_STAT_OK)
+ goto end;
+
+ cap_cmd_res = le16_to_cpu(cmd->cap_res.cap_cmd);
+ switch (cap_cmd_res) {
+ case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE:
+ case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT:
+ case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT:
+ value = le32_to_cpu(cmd->cap_res.value);
+ mask = le32_to_cpu(cmd->cap_res.mask);
+ break;
+ default:
+ dev_warn(&dev->intf->dev, "Unknown capability command %u\n",
+ cap_cmd_res);
+ break;
+ }
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (BIT(i) & (value & mask)) {
+ switch (cap_cmd_res) {
+ case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE:
+ card_data->ctrlmode_supported |=
+ CAN_CTRLMODE_LISTENONLY;
+ break;
+ case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT:
+ card_data->capabilities |=
+ KVASER_USB_CAP_BERR_CAP;
+ break;
+ case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT:
+ card_data->ctrlmode_supported |=
+ CAN_CTRLMODE_ONE_SHOT;
+ break;
+ }
+ }
+ }
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static void kvaser_usb_hydra_start_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ if (completion_done(&priv->start_comp) &&
+ netif_queue_stopped(priv->netdev)) {
+ netif_wake_queue(priv->netdev);
+ } else {
+ netif_start_queue(priv->netdev);
+ complete(&priv->start_comp);
+ }
+}
+
+static void kvaser_usb_hydra_stop_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ complete(&priv->stop_comp);
+}
+
+static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ complete(&priv->flush_comp);
+}
+
+static void
+kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
+ u8 bus_status,
+ const struct can_berr_counter *bec,
+ enum can_state *new_state)
+{
+ if (bus_status & KVASER_USB_HYDRA_BUS_BUS_OFF) {
+ *new_state = CAN_STATE_BUS_OFF;
+ } else if (bus_status & KVASER_USB_HYDRA_BUS_ERR_PASS) {
+ *new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (bus_status == KVASER_USB_HYDRA_BUS_ERR_ACT) {
+ if (bec->txerr >= 128 || bec->rxerr >= 128) {
+ netdev_warn(priv->netdev,
+ "ERR_ACTIVE but err tx=%u or rx=%u >=128\n",
+ bec->txerr, bec->rxerr);
+ *new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (bec->txerr >= 96 || bec->rxerr >= 96) {
+ *new_state = CAN_STATE_ERROR_WARNING;
+ } else {
+ *new_state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+}
+
+static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
+ u8 bus_status,
+ const struct can_berr_counter *bec)
+{
+ struct net_device *netdev = priv->netdev;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ enum can_state new_state, old_state;
+
+ old_state = priv->can.state;
+
+ kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, bec,
+ &new_state);
+
+ if (new_state == old_state)
+ return;
+
+ /* Ignore state change if previous state was STOPPED and the new state
+ * is BUS_OFF. Firmware always report this as BUS_OFF, since firmware
+ * does not distinguish between BUS_OFF and STOPPED.
+ */
+ if (old_state == CAN_STATE_STOPPED && new_state == CAN_STATE_BUS_OFF)
+ return;
+
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (skb) {
+ enum can_state tx_state, rx_state;
+
+ tx_state = (bec->txerr >= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (bec->txerr <= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ can_change_state(netdev, cf, tx_state, rx_state);
+ }
+
+ if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
+ if (!priv->can.restart_ms)
+ kvaser_usb_hydra_send_simple_cmd_async
+ (priv, CMD_STOP_CHIP_REQ);
+
+ can_bus_off(netdev);
+ }
+
+ if (!skb) {
+ netdev_warn(netdev, "No memory left for err_skb\n");
+ return;
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF)
+ priv->can.can_stats.restarts++;
+
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+
+ stats = &netdev->stats;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ struct can_berr_counter bec;
+ u8 bus_status;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ bus_status = cmd->chip_state_event.bus_status;
+ bec.txerr = cmd->chip_state_event.tx_err_counter;
+ bec.rxerr = cmd->chip_state_event.rx_err_counter;
+
+ kvaser_usb_hydra_update_state(priv, bus_status, &bec);
+ priv->bec.txerr = bec.txerr;
+ priv->bec.rxerr = bec.rxerr;
+}
+
+static void kvaser_usb_hydra_error_event_parameter(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ /* info1 will contain the offending cmd_no */
+ switch (le16_to_cpu(cmd->error_event.info1)) {
+ case CMD_START_CHIP_REQ:
+ dev_warn(&dev->intf->dev,
+ "CMD_START_CHIP_REQ error in parameter\n");
+ break;
+
+ case CMD_STOP_CHIP_REQ:
+ dev_warn(&dev->intf->dev,
+ "CMD_STOP_CHIP_REQ error in parameter\n");
+ break;
+
+ case CMD_FLUSH_QUEUE:
+ dev_warn(&dev->intf->dev,
+ "CMD_FLUSH_QUEUE error in parameter\n");
+ break;
+
+ case CMD_SET_BUSPARAMS_REQ:
+ dev_warn(&dev->intf->dev,
+ "Set bittiming failed. Error in parameter\n");
+ break;
+
+ case CMD_SET_BUSPARAMS_FD_REQ:
+ dev_warn(&dev->intf->dev,
+ "Set data bittiming failed. Error in parameter\n");
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled parameter error event cmd_no (%u)\n",
+ le16_to_cpu(cmd->error_event.info1));
+ break;
+ }
+}
+
+static void kvaser_usb_hydra_error_event(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ switch (cmd->error_event.error_code) {
+ case KVASER_USB_HYDRA_ERROR_EVENT_PARAM:
+ kvaser_usb_hydra_error_event_parameter(dev, cmd);
+ break;
+
+ case KVASER_USB_HYDRA_ERROR_EVENT_CAN:
+ /* Wrong channel mapping?! This should never happen!
+ * info1 will contain the offending cmd_no
+ */
+ dev_err(&dev->intf->dev,
+ "Received CAN error event for cmd_no (%u)\n",
+ le16_to_cpu(cmd->error_event.info1));
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled error event (%d)\n",
+ cmd->error_event.error_code);
+ break;
+ }
+}
+
+static void
+kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
+ const struct kvaser_err_frame_data *err_frame_data,
+ ktime_t hwtstamp)
+{
+ struct net_device *netdev = priv->netdev;
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct can_berr_counter bec;
+ enum can_state new_state, old_state;
+ u8 bus_status;
+
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
+ bus_status = err_frame_data->bus_status;
+ bec.txerr = err_frame_data->tx_err_counter;
+ bec.rxerr = err_frame_data->rx_err_counter;
+
+ old_state = priv->can.state;
+ kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec,
+ &new_state);
+
+ skb = alloc_can_err_skb(netdev, &cf);
+
+ if (new_state != old_state) {
+ if (skb) {
+ enum can_state tx_state, rx_state;
+
+ tx_state = (bec.txerr >= bec.rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (bec.txerr <= bec.rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+
+ can_change_state(netdev, cf, tx_state, rx_state);
+ }
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ if (!priv->can.restart_ms)
+ kvaser_usb_hydra_send_simple_cmd_async
+ (priv, CMD_STOP_CHIP_REQ);
+
+ can_bus_off(netdev);
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+
+ if (!skb) {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
+ return;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = hwtstamp;
+
+ cf->can_id |= CAN_ERR_BUSERROR;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+
+ priv->bec.txerr = bec.txerr;
+ priv->bec.rxerr = bec.rxerr;
+}
+
+static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv,
+ const struct kvaser_cmd_ext *cmd)
+{
+ struct net_device *netdev = priv->netdev;
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 flags;
+
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
+ return;
+ }
+
+ cf->can_id |= CAN_ERR_BUSERROR;
+ flags = le32_to_cpu(cmd->tx_ack.flags);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_OSM_NACK)
+ cf->can_id |= CAN_ERR_ACK;
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_ABL) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ priv->can.can_stats.arbitration_lost++;
+ }
+
+ stats->tx_errors++;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_tx_urb_context *context;
+ struct kvaser_usb_net_priv *priv;
+ unsigned long irq_flags;
+ bool one_shot_fail = false;
+ u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ if (!netif_device_present(priv->netdev))
+ return;
+
+ if (cmd->header.cmd_no == CMD_EXTENDED) {
+ struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
+ u32 flags = le32_to_cpu(cmd_ext->tx_ack.flags);
+
+ if (flags & (KVASER_USB_HYDRA_CF_FLAG_OSM_NACK |
+ KVASER_USB_HYDRA_CF_FLAG_ABL)) {
+ kvaser_usb_hydra_one_shot_fail(priv, cmd_ext);
+ one_shot_fail = true;
+ }
+ }
+
+ context = &priv->tx_contexts[transid % dev->max_tx_urbs];
+ if (!one_shot_fail) {
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ stats->tx_packets++;
+ stats->tx_bytes += can_dlc2len(context->dlc);
+ }
+
+ spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
+
+ can_get_echo_skb(priv->netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(priv->netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags);
+}
+
+static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv = NULL;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct net_device_stats *stats;
+ u8 flags;
+ ktime_t hwtstamp;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ stats = &priv->netdev->stats;
+
+ flags = cmd->rx_can.flags;
+ hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
+ kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
+ hwtstamp);
+ return;
+ }
+
+ skb = alloc_can_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = hwtstamp;
+
+ cf->can_id = le32_to_cpu(cmd->rx_can.id);
+
+ if (cf->can_id & KVASER_USB_HYDRA_EXTENDED_FRAME_ID) {
+ cf->can_id &= CAN_EFF_MASK;
+ cf->can_id |= CAN_EFF_FLAG;
+ } else {
+ cf->can_id &= CAN_SFF_MASK;
+ }
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN)
+ kvaser_usb_can_rx_over_error(priv->netdev);
+
+ cf->can_dlc = get_can_dlc(cmd->rx_can.dlc);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, cmd->rx_can.data, cf->can_dlc);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
+ const struct kvaser_cmd_ext *cmd)
+{
+ struct kvaser_cmd *std_cmd = (struct kvaser_cmd *)cmd;
+ struct kvaser_usb_net_priv *priv;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct net_device_stats *stats;
+ u32 flags;
+ u8 dlc;
+ u32 kcan_header;
+ ktime_t hwtstamp;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, std_cmd);
+ if (!priv)
+ return;
+
+ stats = &priv->netdev->stats;
+
+ kcan_header = le32_to_cpu(cmd->rx_can.kcan_header);
+ dlc = (kcan_header & KVASER_USB_KCAN_DATA_DLC_MASK) >>
+ KVASER_USB_KCAN_DATA_DLC_SHIFT;
+
+ flags = le32_to_cpu(cmd->rx_can.flags);
+ hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
+ kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
+ hwtstamp);
+ return;
+ }
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF)
+ skb = alloc_canfd_skb(priv->netdev, &cf);
+ else
+ skb = alloc_can_skb(priv->netdev, (struct can_frame **)&cf);
+
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = hwtstamp;
+
+ cf->can_id = le32_to_cpu(cmd->rx_can.id);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID) {
+ cf->can_id &= CAN_EFF_MASK;
+ cf->can_id |= CAN_EFF_FLAG;
+ } else {
+ cf->can_id &= CAN_SFF_MASK;
+ }
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN)
+ kvaser_usb_can_rx_over_error(priv->netdev);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) {
+ cf->len = can_dlc2len(get_canfd_dlc(dlc));
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_BRS)
+ cf->flags |= CANFD_BRS;
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI)
+ cf->flags |= CANFD_ESI;
+ } else {
+ cf->len = get_can_dlc(dlc);
+ }
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->len;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ switch (cmd->header.cmd_no) {
+ case CMD_START_CHIP_RESP:
+ kvaser_usb_hydra_start_chip_reply(dev, cmd);
+ break;
+
+ case CMD_STOP_CHIP_RESP:
+ kvaser_usb_hydra_stop_chip_reply(dev, cmd);
+ break;
+
+ case CMD_FLUSH_QUEUE_RESP:
+ kvaser_usb_hydra_flush_queue_reply(dev, cmd);
+ break;
+
+ case CMD_CHIP_STATE_EVENT:
+ kvaser_usb_hydra_state_event(dev, cmd);
+ break;
+
+ case CMD_ERROR_EVENT:
+ kvaser_usb_hydra_error_event(dev, cmd);
+ break;
+
+ case CMD_TX_ACKNOWLEDGE:
+ kvaser_usb_hydra_tx_acknowledge(dev, cmd);
+ break;
+
+ case CMD_RX_MESSAGE:
+ kvaser_usb_hydra_rx_msg_std(dev, cmd);
+ break;
+
+ /* Ignored commands */
+ case CMD_SET_BUSPARAMS_RESP:
+ case CMD_SET_BUSPARAMS_FD_RESP:
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev, "Unhandled command (%d)\n",
+ cmd->header.cmd_no);
+ break;
+ }
+}
+
+static void kvaser_usb_hydra_handle_cmd_ext(const struct kvaser_usb *dev,
+ const struct kvaser_cmd_ext *cmd)
+{
+ switch (cmd->cmd_no_ext) {
+ case CMD_TX_ACKNOWLEDGE_FD:
+ kvaser_usb_hydra_tx_acknowledge(dev, (struct kvaser_cmd *)cmd);
+ break;
+
+ case CMD_RX_MESSAGE_FD:
+ kvaser_usb_hydra_rx_msg_ext(dev, cmd);
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev, "Unhandled extended command (%d)\n",
+ cmd->header.cmd_no);
+ break;
+ }
+}
+
+static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ if (cmd->header.cmd_no == CMD_EXTENDED)
+ kvaser_usb_hydra_handle_cmd_ext
+ (dev, (struct kvaser_cmd_ext *)cmd);
+ else
+ kvaser_usb_hydra_handle_cmd_std(dev, cmd);
+}
+
+static void *
+kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd_ext *cmd;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u8 dlc = can_len2dlc(cf->len);
+ u8 nbr_of_bytes = cf->len;
+ u32 flags;
+ u32 id;
+ u32 kcan_id;
+ u32 kcan_header;
+
+ *frame_len = nbr_of_bytes;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC);
+ if (!cmd)
+ return NULL;
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ ((struct kvaser_cmd *)cmd,
+ dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid((struct kvaser_cmd *)cmd, transid);
+
+ cmd->header.cmd_no = CMD_EXTENDED;
+ cmd->cmd_no_ext = CMD_TX_CAN_MESSAGE_FD;
+
+ *cmd_len = ALIGN(sizeof(struct kvaser_cmd_ext) -
+ sizeof(cmd->tx_can.kcan_payload) + nbr_of_bytes,
+ 8);
+
+ cmd->len = cpu_to_le16(*cmd_len);
+
+ cmd->tx_can.databytes = nbr_of_bytes;
+ cmd->tx_can.dlc = dlc;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ id = cf->can_id & CAN_EFF_MASK;
+ flags = KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID;
+ kcan_id = (cf->can_id & CAN_EFF_MASK) |
+ KVASER_USB_KCAN_DATA_IDE | KVASER_USB_KCAN_DATA_SRR;
+ } else {
+ id = cf->can_id & CAN_SFF_MASK;
+ flags = 0;
+ kcan_id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ if (cf->can_id & CAN_ERR_FLAG)
+ flags |= KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME;
+
+ kcan_header = ((dlc << KVASER_USB_KCAN_DATA_DLC_SHIFT) &
+ KVASER_USB_KCAN_DATA_DLC_MASK) |
+ KVASER_USB_KCAN_DATA_AREQ |
+ (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT ?
+ KVASER_USB_KCAN_DATA_OSM : 0);
+
+ if (can_is_canfd_skb(skb)) {
+ kcan_header |= KVASER_USB_KCAN_DATA_FDF |
+ (cf->flags & CANFD_BRS ?
+ KVASER_USB_KCAN_DATA_BRS : 0);
+ } else {
+ if (cf->can_id & CAN_RTR_FLAG) {
+ kcan_id |= KVASER_USB_KCAN_DATA_RTR;
+ cmd->tx_can.databytes = 0;
+ flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME;
+ }
+ }
+
+ cmd->tx_can.kcan_id = cpu_to_le32(kcan_id);
+ cmd->tx_can.id = cpu_to_le32(id);
+ cmd->tx_can.flags = cpu_to_le32(flags);
+ cmd->tx_can.kcan_header = cpu_to_le32(kcan_header);
+
+ memcpy(cmd->tx_can.kcan_payload, cf->data, nbr_of_bytes);
+
+ return cmd;
+}
+
+static void *
+kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 flags;
+ u32 id;
+
+ *frame_len = cf->can_dlc;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ if (!cmd)
+ return NULL;
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid(cmd, transid);
+
+ cmd->header.cmd_no = CMD_TX_CAN_MESSAGE;
+
+ *cmd_len = ALIGN(sizeof(struct kvaser_cmd), 8);
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ id = (cf->can_id & CAN_EFF_MASK);
+ id |= KVASER_USB_HYDRA_EXTENDED_FRAME_ID;
+ } else {
+ id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ cmd->tx_can.dlc = cf->can_dlc;
+
+ flags = (cf->can_id & CAN_EFF_FLAG ?
+ KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0);
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME;
+
+ flags |= (cf->can_id & CAN_ERR_FLAG ?
+ KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME : 0);
+
+ cmd->tx_can.id = cpu_to_le32(id);
+ cmd->tx_can.flags = flags;
+
+ memcpy(cmd->tx_can.data, cf->data, *frame_len);
+
+ return cmd;
+}
+
+static int kvaser_usb_hydra_set_mode(struct net_device *netdev,
+ enum can_mode mode)
+{
+ int err = 0;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ /* CAN controller automatically recovers from BUS_OFF */
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+{
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb *dev = priv->dev;
+ int tseg1 = bt->prop_seg + bt->phase_seg1;
+ int tseg2 = bt->phase_seg2;
+ int sjw = bt->sjw;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
+ cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate);
+ cmd->set_busparams_req.sjw = (u8)sjw;
+ cmd->set_busparams_req.tseg1 = (u8)tseg1;
+ cmd->set_busparams_req.tseg2 = (u8)tseg2;
+ cmd->set_busparams_req.nsamples = 1;
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
+{
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct kvaser_usb *dev = priv->dev;
+ int tseg1 = dbt->prop_seg + dbt->phase_seg1;
+ int tseg2 = dbt->phase_seg2;
+ int sjw = dbt->sjw;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
+ cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate);
+ cmd->set_busparams_req.sjw_d = (u8)sjw;
+ cmd->set_busparams_req.tseg1_d = (u8)tseg1;
+ cmd->set_busparams_req.tseg2_d = (u8)tseg2;
+ cmd->set_busparams_req.nsamples_d = 1;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ cmd->set_busparams_req.canfd_mode =
+ KVASER_USB_HYDRA_BUS_MODE_NONISO;
+ else
+ cmd->set_busparams_req.canfd_mode =
+ KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO;
+ }
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ int err;
+
+ err = kvaser_usb_hydra_send_simple_cmd(priv->dev,
+ CMD_GET_CHIP_STATE_REQ,
+ priv->channel);
+ if (err)
+ return err;
+
+ *bec = priv->bec;
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev)
+{
+ const struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *ep;
+ int i;
+
+ iface_desc = &dev->intf->altsetting[0];
+
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ ep = &iface_desc->endpoint[i].desc;
+
+ if (!dev->bulk_in && usb_endpoint_is_bulk_in(ep) &&
+ ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_IN_ADDR)
+ dev->bulk_in = ep;
+
+ if (!dev->bulk_out && usb_endpoint_is_bulk_out(ep) &&
+ ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_OUT_ADDR)
+ dev->bulk_out = ep;
+
+ if (dev->bulk_in && dev->bulk_out)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev)
+{
+ int err;
+ unsigned int i;
+ struct kvaser_usb_dev_card_data_hydra *card_data =
+ &dev->card_data.hydra;
+
+ card_data->transid = KVASER_USB_HYDRA_MIN_TRANSID;
+ spin_lock_init(&card_data->transid_lock);
+
+ memset(card_data->usb_rx_leftover, 0, KVASER_USB_HYDRA_MAX_CMD_LEN);
+ card_data->usb_rx_leftover_len = 0;
+ spin_lock_init(&card_data->usb_rx_leftover_lock);
+
+ memset(card_data->channel_to_he, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL,
+ sizeof(card_data->channel_to_he));
+ card_data->sysdbg_he = 0;
+
+ for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) {
+ err = kvaser_usb_hydra_map_channel
+ (dev,
+ (KVASER_USB_HYDRA_TRANSID_CANHE | i),
+ i, "CAN");
+ if (err) {
+ dev_err(&dev->intf->dev,
+ "CMD_MAP_CHANNEL_REQ failed for CAN%u\n", i);
+ return err;
+ }
+ }
+
+ err = kvaser_usb_hydra_map_channel(dev, KVASER_USB_HYDRA_TRANSID_SYSDBG,
+ 0, "SYSDBG");
+ if (err) {
+ dev_err(&dev->intf->dev,
+ "CMD_MAP_CHANNEL_REQ failed for SYSDBG\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd cmd;
+ int err;
+
+ err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO_REQ,
+ -1);
+ if (err)
+ return err;
+
+ memset(&cmd, 0, sizeof(struct kvaser_cmd));
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_RESP, &cmd);
+ if (err)
+ return err;
+
+ dev->max_tx_urbs = min_t(unsigned int, KVASER_USB_MAX_TX_URBS,
+ le16_to_cpu(cmd.sw_info.max_outstanding_tx));
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd *cmd;
+ int err;
+ u32 flags;
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
+ cmd->sw_detail_req.use_ext_cmd = 1;
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
+
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ goto end;
+
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_DETAILS_RESP,
+ cmd);
+ if (err)
+ goto end;
+
+ dev->fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version);
+ flags = le32_to_cpu(cmd->sw_detail_res.sw_flags);
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BAD) {
+ dev_err(&dev->intf->dev,
+ "Bad firmware, device refuse to run!\n");
+ err = -EINVAL;
+ goto end;
+ }
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BETA)
+ dev_info(&dev->intf->dev, "Beta firmware in use\n");
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CAP)
+ card_data->capabilities |= KVASER_USB_CAP_EXT_CAP;
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CMD)
+ card_data->capabilities |= KVASER_USB_HYDRA_CAP_EXT_CMD;
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_CANFD)
+ card_data->ctrlmode_supported |= CAN_CTRLMODE_FD;
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_NONISO)
+ card_data->ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_FREQ_80M)
+ dev->cfg = &kvaser_usb_hydra_dev_cfg_kcan;
+ else
+ dev->cfg = &kvaser_usb_hydra_dev_cfg_flexc;
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_card_info(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd cmd;
+ int err;
+
+ err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_CARD_INFO_REQ, -1);
+ if (err)
+ return err;
+
+ memset(&cmd, 0, sizeof(struct kvaser_cmd));
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CARD_INFO_RESP, &cmd);
+ if (err)
+ return err;
+
+ dev->nchannels = cmd.card_info.nchannels;
+ if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_get_capabilities(struct kvaser_usb *dev)
+{
+ int err;
+ u16 status;
+
+ if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) {
+ dev_info(&dev->intf->dev,
+ "No extended capability support. Upgrade your device.\n");
+ return 0;
+ }
+
+ err = kvaser_usb_hydra_get_single_capability
+ (dev,
+ KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE failed %u\n",
+ status);
+
+ err = kvaser_usb_hydra_get_single_capability
+ (dev,
+ KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT failed %u\n",
+ status);
+
+ err = kvaser_usb_hydra_get_single_capability
+ (dev, KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT failed %u\n",
+ status);
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ int err;
+
+ if ((priv->can.ctrlmode &
+ (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) ==
+ CAN_CTRLMODE_FD_NON_ISO) {
+ netdev_warn(priv->netdev,
+ "CTRLMODE_FD shall be on if CTRLMODE_FD_NON_ISO is on\n");
+ return -EINVAL;
+ }
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_LISTEN;
+ else
+ cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->start_comp);
+
+ err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->start_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->stop_comp);
+
+ /* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT
+ * see comment in kvaser_usb_hydra_update_state()
+ */
+ priv->can.state = CAN_STATE_STOPPED;
+
+ err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_STOP_CHIP_REQ,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->stop_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->flush_comp);
+
+ err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->flush_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* A single extended hydra command can be transmitted in multiple transfers
+ * We have to buffer partial hydra commands, and handle them on next callback.
+ */
+static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev,
+ void *buf, int len)
+{
+ unsigned long irq_flags;
+ struct kvaser_cmd *cmd;
+ int pos = 0;
+ size_t cmd_len;
+ struct kvaser_usb_dev_card_data_hydra *card_data =
+ &dev->card_data.hydra;
+ int usb_rx_leftover_len;
+ spinlock_t *usb_rx_leftover_lock = &card_data->usb_rx_leftover_lock;
+
+ spin_lock_irqsave(usb_rx_leftover_lock, irq_flags);
+ usb_rx_leftover_len = card_data->usb_rx_leftover_len;
+ if (usb_rx_leftover_len) {
+ int remaining_bytes;
+
+ cmd = (struct kvaser_cmd *)card_data->usb_rx_leftover;
+
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+
+ remaining_bytes = min_t(unsigned int, len,
+ cmd_len - usb_rx_leftover_len);
+ /* Make sure we do not overflow usb_rx_leftover */
+ if (remaining_bytes + usb_rx_leftover_len >
+ KVASER_USB_HYDRA_MAX_CMD_LEN) {
+ dev_err(&dev->intf->dev, "Format error\n");
+ spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags);
+ return;
+ }
+
+ memcpy(card_data->usb_rx_leftover + usb_rx_leftover_len, buf,
+ remaining_bytes);
+ pos += remaining_bytes;
+
+ if (remaining_bytes + usb_rx_leftover_len == cmd_len) {
+ kvaser_usb_hydra_handle_cmd(dev, cmd);
+ usb_rx_leftover_len = 0;
+ } else {
+ /* Command still not complete */
+ usb_rx_leftover_len += remaining_bytes;
+ }
+ card_data->usb_rx_leftover_len = usb_rx_leftover_len;
+ }
+ spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags);
+
+ while (pos < len) {
+ cmd = buf + pos;
+
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+
+ if (pos + cmd_len > len) {
+ /* We got first part of a command */
+ int leftover_bytes;
+
+ leftover_bytes = len - pos;
+ /* Make sure we do not overflow usb_rx_leftover */
+ if (leftover_bytes > KVASER_USB_HYDRA_MAX_CMD_LEN) {
+ dev_err(&dev->intf->dev, "Format error\n");
+ return;
+ }
+ spin_lock_irqsave(usb_rx_leftover_lock, irq_flags);
+ memcpy(card_data->usb_rx_leftover, buf + pos,
+ leftover_bytes);
+ card_data->usb_rx_leftover_len = leftover_bytes;
+ spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags);
+ break;
+ }
+
+ kvaser_usb_hydra_handle_cmd(dev, cmd);
+ pos += cmd_len;
+ }
+}
+
+static void *
+kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid)
+{
+ void *buf;
+
+ if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD)
+ buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len,
+ cmd_len, transid);
+ else
+ buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len,
+ cmd_len, transid);
+
+ return buf;
+}
+
+const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
+ .dev_set_mode = kvaser_usb_hydra_set_mode,
+ .dev_set_bittiming = kvaser_usb_hydra_set_bittiming,
+ .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming,
+ .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter,
+ .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints,
+ .dev_init_card = kvaser_usb_hydra_init_card,
+ .dev_get_software_info = kvaser_usb_hydra_get_software_info,
+ .dev_get_software_details = kvaser_usb_hydra_get_software_details,
+ .dev_get_card_info = kvaser_usb_hydra_get_card_info,
+ .dev_get_capabilities = kvaser_usb_hydra_get_capabilities,
+ .dev_set_opt_mode = kvaser_usb_hydra_set_opt_mode,
+ .dev_start_chip = kvaser_usb_hydra_start_chip,
+ .dev_stop_chip = kvaser_usb_hydra_stop_chip,
+ .dev_reset_chip = NULL,
+ .dev_flush_queue = kvaser_usb_hydra_flush_queue,
+ .dev_read_bulk_callback = kvaser_usb_hydra_read_bulk_callback,
+ .dev_frame_to_cmd = kvaser_usb_hydra_frame_to_cmd,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = {
+ .clock = {
+ .freq = 80000000,
+ },
+ .timestamp_freq = 80,
+ .bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c,
+ .data_bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
+ .clock = {
+ .freq = 24000000,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
+};
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
new file mode 100644
index 000000000000..07d2f3aa2c02
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -0,0 +1,1358 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Parts of this driver are based on the following:
+ * - Kvaser linux leaf driver (version 4.78)
+ * - CAN driver for esd CAN-USB/2
+ * - Kvaser linux usbcanII driver (version 5.3)
+ *
+ * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ * Copyright (C) 2015 Valeo S.A.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/netlink.h>
+
+#include "kvaser_usb.h"
+
+/* Forward declaration */
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
+
+#define CAN_USB_CLOCK 8000000
+#define MAX_USBCAN_NET_DEVICES 2
+
+/* Command header size */
+#define CMD_HEADER_LEN 2
+
+/* Kvaser CAN message flags */
+#define MSG_FLAG_ERROR_FRAME BIT(0)
+#define MSG_FLAG_OVERRUN BIT(1)
+#define MSG_FLAG_NERR BIT(2)
+#define MSG_FLAG_WAKEUP BIT(3)
+#define MSG_FLAG_REMOTE_FRAME BIT(4)
+#define MSG_FLAG_RESERVED BIT(5)
+#define MSG_FLAG_TX_ACK BIT(6)
+#define MSG_FLAG_TX_REQUEST BIT(7)
+
+/* CAN states (M16C CxSTRH register) */
+#define M16C_STATE_BUS_RESET BIT(0)
+#define M16C_STATE_BUS_ERROR BIT(4)
+#define M16C_STATE_BUS_PASSIVE BIT(5)
+#define M16C_STATE_BUS_OFF BIT(6)
+
+/* Leaf/usbcan command ids */
+#define CMD_RX_STD_MESSAGE 12
+#define CMD_TX_STD_MESSAGE 13
+#define CMD_RX_EXT_MESSAGE 14
+#define CMD_TX_EXT_MESSAGE 15
+#define CMD_SET_BUS_PARAMS 16
+#define CMD_CHIP_STATE_EVENT 20
+#define CMD_SET_CTRL_MODE 21
+#define CMD_RESET_CHIP 24
+#define CMD_START_CHIP 26
+#define CMD_START_CHIP_REPLY 27
+#define CMD_STOP_CHIP 28
+#define CMD_STOP_CHIP_REPLY 29
+
+#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33
+
+#define CMD_GET_CARD_INFO 34
+#define CMD_GET_CARD_INFO_REPLY 35
+#define CMD_GET_SOFTWARE_INFO 38
+#define CMD_GET_SOFTWARE_INFO_REPLY 39
+#define CMD_FLUSH_QUEUE 48
+#define CMD_TX_ACKNOWLEDGE 50
+#define CMD_CAN_ERROR_EVENT 51
+#define CMD_FLUSH_QUEUE_REPLY 68
+
+#define CMD_LEAF_LOG_MESSAGE 106
+
+/* error factors */
+#define M16C_EF_ACKE BIT(0)
+#define M16C_EF_CRCE BIT(1)
+#define M16C_EF_FORME BIT(2)
+#define M16C_EF_STFE BIT(3)
+#define M16C_EF_BITE0 BIT(4)
+#define M16C_EF_BITE1 BIT(5)
+#define M16C_EF_RCVE BIT(6)
+#define M16C_EF_TRE BIT(7)
+
+/* Only Leaf-based devices can report M16C error factors,
+ * thus define our own error status flags for USBCANII
+ */
+#define USBCAN_ERROR_STATE_NONE 0
+#define USBCAN_ERROR_STATE_TX_ERROR BIT(0)
+#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
+#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
+
+/* bittiming parameters */
+#define KVASER_USB_TSEG1_MIN 1
+#define KVASER_USB_TSEG1_MAX 16
+#define KVASER_USB_TSEG2_MIN 1
+#define KVASER_USB_TSEG2_MAX 8
+#define KVASER_USB_SJW_MAX 4
+#define KVASER_USB_BRP_MIN 1
+#define KVASER_USB_BRP_MAX 64
+#define KVASER_USB_BRP_INC 1
+
+/* ctrl modes */
+#define KVASER_CTRL_MODE_NORMAL 1
+#define KVASER_CTRL_MODE_SILENT 2
+#define KVASER_CTRL_MODE_SELFRECEPTION 3
+#define KVASER_CTRL_MODE_OFF 4
+
+/* Extended CAN identifier flag */
+#define KVASER_EXTENDED_FRAME BIT(31)
+
+struct kvaser_cmd_simple {
+ u8 tid;
+ u8 channel;
+} __packed;
+
+struct kvaser_cmd_cardinfo {
+ u8 tid;
+ u8 nchannels;
+ __le32 serial_number;
+ __le32 padding0;
+ __le32 clock_resolution;
+ __le32 mfgdate;
+ u8 ean[8];
+ u8 hw_revision;
+ union {
+ struct {
+ u8 usb_hs_mode;
+ } __packed leaf1;
+ struct {
+ u8 padding;
+ } __packed usbcan1;
+ } __packed;
+ __le16 padding1;
+} __packed;
+
+struct leaf_cmd_softinfo {
+ u8 tid;
+ u8 padding0;
+ __le32 sw_options;
+ __le32 fw_version;
+ __le16 max_outstanding_tx;
+ __le16 padding1[9];
+} __packed;
+
+struct usbcan_cmd_softinfo {
+ u8 tid;
+ u8 fw_name[5];
+ __le16 max_outstanding_tx;
+ u8 padding[6];
+ __le32 fw_version;
+ __le16 checksum;
+ __le16 sw_options;
+} __packed;
+
+struct kvaser_cmd_busparams {
+ u8 tid;
+ u8 channel;
+ __le32 bitrate;
+ u8 tseg1;
+ u8 tseg2;
+ u8 sjw;
+ u8 no_samp;
+} __packed;
+
+struct kvaser_cmd_tx_can {
+ u8 channel;
+ u8 tid;
+ u8 data[14];
+ union {
+ struct {
+ u8 padding;
+ u8 flags;
+ } __packed leaf;
+ struct {
+ u8 flags;
+ u8 padding;
+ } __packed usbcan;
+ } __packed;
+} __packed;
+
+struct kvaser_cmd_rx_can_header {
+ u8 channel;
+ u8 flag;
+} __packed;
+
+struct leaf_cmd_rx_can {
+ u8 channel;
+ u8 flag;
+
+ __le16 time[3];
+ u8 data[14];
+} __packed;
+
+struct usbcan_cmd_rx_can {
+ u8 channel;
+ u8 flag;
+
+ u8 data[14];
+ __le16 time;
+} __packed;
+
+struct leaf_cmd_chip_state_event {
+ u8 tid;
+ u8 channel;
+
+ __le16 time[3];
+ u8 tx_errors_count;
+ u8 rx_errors_count;
+
+ u8 status;
+ u8 padding[3];
+} __packed;
+
+struct usbcan_cmd_chip_state_event {
+ u8 tid;
+ u8 channel;
+
+ u8 tx_errors_count;
+ u8 rx_errors_count;
+ __le16 time;
+
+ u8 status;
+ u8 padding[3];
+} __packed;
+
+struct kvaser_cmd_tx_acknowledge_header {
+ u8 channel;
+ u8 tid;
+} __packed;
+
+struct leaf_cmd_error_event {
+ u8 tid;
+ u8 flags;
+ __le16 time[3];
+ u8 channel;
+ u8 padding;
+ u8 tx_errors_count;
+ u8 rx_errors_count;
+ u8 status;
+ u8 error_factor;
+} __packed;
+
+struct usbcan_cmd_error_event {
+ u8 tid;
+ u8 padding;
+ u8 tx_errors_count_ch0;
+ u8 rx_errors_count_ch0;
+ u8 tx_errors_count_ch1;
+ u8 rx_errors_count_ch1;
+ u8 status_ch0;
+ u8 status_ch1;
+ __le16 time;
+} __packed;
+
+struct kvaser_cmd_ctrl_mode {
+ u8 tid;
+ u8 channel;
+ u8 ctrl_mode;
+ u8 padding[3];
+} __packed;
+
+struct kvaser_cmd_flush_queue {
+ u8 tid;
+ u8 channel;
+ u8 flags;
+ u8 padding[3];
+} __packed;
+
+struct leaf_cmd_log_message {
+ u8 channel;
+ u8 flags;
+ __le16 time[3];
+ u8 dlc;
+ u8 time_offset;
+ __le32 id;
+ u8 data[8];
+} __packed;
+
+struct kvaser_cmd {
+ u8 len;
+ u8 id;
+ union {
+ struct kvaser_cmd_simple simple;
+ struct kvaser_cmd_cardinfo cardinfo;
+ struct kvaser_cmd_busparams busparams;
+
+ struct kvaser_cmd_rx_can_header rx_can_header;
+ struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header;
+
+ union {
+ struct leaf_cmd_softinfo softinfo;
+ struct leaf_cmd_rx_can rx_can;
+ struct leaf_cmd_chip_state_event chip_state_event;
+ struct leaf_cmd_error_event error_event;
+ struct leaf_cmd_log_message log_message;
+ } __packed leaf;
+
+ union {
+ struct usbcan_cmd_softinfo softinfo;
+ struct usbcan_cmd_rx_can rx_can;
+ struct usbcan_cmd_chip_state_event chip_state_event;
+ struct usbcan_cmd_error_event error_event;
+ } __packed usbcan;
+
+ struct kvaser_cmd_tx_can tx_can;
+ struct kvaser_cmd_ctrl_mode ctrl_mode;
+ struct kvaser_cmd_flush_queue flush_queue;
+ } u;
+} __packed;
+
+/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
+ * handling. Some discrepancies between the two families exist:
+ *
+ * - USBCAN firmware does not report M16C "error factors"
+ * - USBCAN controllers has difficulties reporting if the raised error
+ * event is for ch0 or ch1. They leave such arbitration to the OS
+ * driver by letting it compare error counters with previous values
+ * and decide the error event's channel. Thus for USBCAN, the channel
+ * field is only advisory.
+ */
+struct kvaser_usb_err_summary {
+ u8 channel, status, txerr, rxerr;
+ union {
+ struct {
+ u8 error_factor;
+ } leaf;
+ struct {
+ u8 other_ch_status;
+ u8 error_state;
+ } usbcan;
+ };
+};
+
+static void *
+kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ u8 *cmd_tx_can_flags = NULL; /* GCC */
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ *frame_len = cf->can_dlc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (cmd) {
+ cmd->u.tx_can.tid = transid & 0xff;
+ cmd->len = *cmd_len = CMD_HEADER_LEN +
+ sizeof(struct kvaser_cmd_tx_can);
+ cmd->u.tx_can.channel = priv->channel;
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags;
+ break;
+ case KVASER_USBCAN:
+ cmd_tx_can_flags = &cmd->u.tx_can.usbcan.flags;
+ break;
+ }
+
+ *cmd_tx_can_flags = 0;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ cmd->id = CMD_TX_EXT_MESSAGE;
+ cmd->u.tx_can.data[0] = (cf->can_id >> 24) & 0x1f;
+ cmd->u.tx_can.data[1] = (cf->can_id >> 18) & 0x3f;
+ cmd->u.tx_can.data[2] = (cf->can_id >> 14) & 0x0f;
+ cmd->u.tx_can.data[3] = (cf->can_id >> 6) & 0xff;
+ cmd->u.tx_can.data[4] = cf->can_id & 0x3f;
+ } else {
+ cmd->id = CMD_TX_STD_MESSAGE;
+ cmd->u.tx_can.data[0] = (cf->can_id >> 6) & 0x1f;
+ cmd->u.tx_can.data[1] = cf->can_id & 0x3f;
+ }
+
+ cmd->u.tx_can.data[5] = cf->can_dlc;
+ memcpy(&cmd->u.tx_can.data[6], cf->data, cf->can_dlc);
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ *cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
+ }
+ return cmd;
+}
+
+static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
+ struct kvaser_cmd *cmd)
+{
+ struct kvaser_cmd *tmp;
+ void *buf;
+ int actual_len;
+ int err;
+ int pos;
+ unsigned long to = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT);
+
+ buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ do {
+ err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE,
+ &actual_len);
+ if (err < 0)
+ goto end;
+
+ pos = 0;
+ while (pos <= actual_len - CMD_HEADER_LEN) {
+ tmp = buf + pos;
+
+ /* Handle commands crossing the USB endpoint max packet
+ * size boundary. Check kvaser_usb_read_bulk_callback()
+ * for further details.
+ */
+ if (tmp->len == 0) {
+ pos = round_up(pos,
+ le16_to_cpu
+ (dev->bulk_in->wMaxPacketSize));
+ continue;
+ }
+
+ if (pos + tmp->len > actual_len) {
+ dev_err_ratelimited(&dev->intf->dev,
+ "Format error\n");
+ break;
+ }
+
+ if (tmp->id == id) {
+ memcpy(cmd, tmp, tmp->len);
+ goto end;
+ }
+
+ pos += tmp->len;
+ }
+ } while (time_before(jiffies, to));
+
+ err = -EINVAL;
+
+end:
+ kfree(buf);
+
+ return err;
+}
+
+static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
+ u8 cmd_id, int channel)
+{
+ struct kvaser_cmd *cmd;
+ int rc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = cmd_id;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple);
+ cmd->u.simple.channel = channel;
+ cmd->u.simple.tid = 0xff;
+
+ rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+
+ kfree(cmd);
+ return rc;
+}
+
+static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd cmd;
+ int err;
+
+ err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0);
+ if (err)
+ return err;
+
+ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_REPLY, &cmd);
+ if (err)
+ return err;
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
+ dev->max_tx_urbs =
+ le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
+ break;
+ case KVASER_USBCAN:
+ dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ dev->max_tx_urbs =
+ le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
+ break;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_get_software_info(struct kvaser_usb *dev)
+{
+ int err;
+ int retry = 3;
+
+ /* On some x86 laptops, plugging a Kvaser device again after
+ * an unplug makes the firmware always ignore the very first
+ * command. For such a case, provide some room for retries
+ * instead of completely exiting the driver.
+ */
+ do {
+ err = kvaser_usb_leaf_get_software_info_inner(dev);
+ } while (--retry && err == -ETIMEDOUT);
+
+ return err;
+}
+
+static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd cmd;
+ int err;
+
+ err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_CARD_INFO, 0);
+ if (err)
+ return err;
+
+ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CARD_INFO_REPLY, &cmd);
+ if (err)
+ return err;
+
+ dev->nchannels = cmd.u.cardinfo.nchannels;
+ if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES ||
+ (dev->card_data.leaf.family == KVASER_USBCAN &&
+ dev->nchannels > MAX_USBCAN_NET_DEVICES))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct net_device_stats *stats;
+ struct kvaser_usb_tx_urb_context *context;
+ struct kvaser_usb_net_priv *priv;
+ unsigned long flags;
+ u8 channel, tid;
+
+ channel = cmd->u.tx_acknowledge_header.channel;
+ tid = cmd->u.tx_acknowledge_header.tid;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+
+ if (!netif_device_present(priv->netdev))
+ return;
+
+ stats = &priv->netdev->stats;
+
+ context = &priv->tx_contexts[tid % dev->max_tx_urbs];
+
+ /* Sometimes the state change doesn't come after a bus-off event */
+ if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) {
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ } else {
+ netdev_err(priv->netdev,
+ "No memory left for err_skb\n");
+ }
+
+ priv->can.can_stats.restarts++;
+ netif_carrier_on(priv->netdev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += context->dlc;
+
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+
+ can_get_echo_skb(priv->netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(priv->netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+}
+
+static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ u8 cmd_id)
+{
+ struct kvaser_cmd *cmd;
+ int err;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple);
+ cmd->id = cmd_id;
+ cmd->u.simple.channel = priv->channel;
+
+ err = kvaser_usb_send_cmd_async(priv, cmd, cmd->len);
+ if (err)
+ kfree(cmd);
+
+ return err;
+}
+
+static void
+kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ const struct kvaser_usb_err_summary *es,
+ struct can_frame *cf)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct net_device_stats *stats = &priv->netdev->stats;
+ enum can_state cur_state, new_state, tx_state, rx_state;
+
+ netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status);
+
+ new_state = priv->can.state;
+ cur_state = priv->can.state;
+
+ if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (es->status & M16C_STATE_BUS_PASSIVE) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (es->status & M16C_STATE_BUS_ERROR) {
+ /* Guard against spurious error events after a busoff */
+ if (cur_state < CAN_STATE_BUS_OFF) {
+ if (es->txerr >= 128 || es->rxerr >= 128)
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (es->txerr >= 96 || es->rxerr >= 96)
+ new_state = CAN_STATE_ERROR_WARNING;
+ else if (cur_state > CAN_STATE_ERROR_ACTIVE)
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+
+ if (!es->status)
+ new_state = CAN_STATE_ERROR_ACTIVE;
+
+ if (new_state != cur_state) {
+ tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
+ rx_state = (es->txerr <= es->rxerr) ? new_state : 0;
+
+ can_change_state(priv->netdev, cf, tx_state, rx_state);
+ }
+
+ if (priv->can.restart_ms &&
+ cur_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF)
+ priv->can.can_stats.restarts++;
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ if (es->leaf.error_factor) {
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ }
+ break;
+ case KVASER_USBCAN:
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR)
+ stats->tx_errors++;
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR)
+ stats->rx_errors++;
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR)
+ priv->can.can_stats.bus_error++;
+ break;
+ }
+
+ priv->bec.txerr = es->txerr;
+ priv->bec.rxerr = es->rxerr;
+}
+
+static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_usb_err_summary *es)
+{
+ struct can_frame *cf;
+ struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG,
+ .can_dlc = CAN_ERR_DLC };
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ struct kvaser_usb_net_priv *priv;
+ enum can_state old_state, new_state;
+
+ if (es->channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", es->channel);
+ return;
+ }
+
+ priv = dev->nets[es->channel];
+ stats = &priv->netdev->stats;
+
+ /* Update all of the CAN interface's state and error counters before
+ * trying any memory allocation that can actually fail with -ENOMEM.
+ *
+ * We send a temporary stack-allocated error CAN frame to
+ * can_change_state() for the very same reason.
+ *
+ * TODO: Split can_change_state() responsibility between updating the
+ * CAN interface's state and counters, and the setting up of CAN error
+ * frame ID and data to userspace. Remove stack allocation afterwards.
+ */
+ old_state = priv->can.state;
+ kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
+ new_state = priv->can.state;
+
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+ memcpy(cf, &tmp_cf, sizeof(*cf));
+
+ if (new_state != old_state) {
+ if (es->status &
+ (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
+ if (!priv->can.restart_ms)
+ kvaser_usb_leaf_simple_cmd_async(priv,
+ CMD_STOP_CHIP);
+ netif_carrier_off(priv->netdev);
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+ netif_carrier_on(priv->netdev);
+ }
+ }
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ if (es->leaf.error_factor) {
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+
+ if (es->leaf.error_factor & M16C_EF_ACKE)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (es->leaf.error_factor & M16C_EF_CRCE)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (es->leaf.error_factor & M16C_EF_FORME)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (es->leaf.error_factor & M16C_EF_STFE)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (es->leaf.error_factor & M16C_EF_BITE0)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (es->leaf.error_factor & M16C_EF_BITE1)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (es->leaf.error_factor & M16C_EF_TRE)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+ break;
+ case KVASER_USBCAN:
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR)
+ cf->can_id |= CAN_ERR_BUSERROR;
+ break;
+ }
+
+ cf->data[6] = es->txerr;
+ cf->data[7] = es->rxerr;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+/* For USBCAN, report error to userspace if the channels's errors counter
+ * has changed, or we're the only channel seeing a bus error state.
+ */
+static void
+kvaser_usb_leaf_usbcan_conditionally_rx_error(const struct kvaser_usb *dev,
+ struct kvaser_usb_err_summary *es)
+{
+ struct kvaser_usb_net_priv *priv;
+ unsigned int channel;
+ bool report_error;
+
+ channel = es->channel;
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+ report_error = false;
+
+ if (es->txerr != priv->bec.txerr) {
+ es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR;
+ report_error = true;
+ }
+ if (es->rxerr != priv->bec.rxerr) {
+ es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR;
+ report_error = true;
+ }
+ if ((es->status & M16C_STATE_BUS_ERROR) &&
+ !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) {
+ es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR;
+ report_error = true;
+ }
+
+ if (report_error)
+ kvaser_usb_leaf_rx_error(dev, es);
+}
+
+static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_err_summary es = { };
+
+ switch (cmd->id) {
+ /* Sometimes errors are sent as unsolicited chip state events */
+ case CMD_CHIP_STATE_EVENT:
+ es.channel = cmd->u.usbcan.chip_state_event.channel;
+ es.status = cmd->u.usbcan.chip_state_event.status;
+ es.txerr = cmd->u.usbcan.chip_state_event.tx_errors_count;
+ es.rxerr = cmd->u.usbcan.chip_state_event.rx_errors_count;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+ break;
+
+ case CMD_CAN_ERROR_EVENT:
+ es.channel = 0;
+ es.status = cmd->u.usbcan.error_event.status_ch0;
+ es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0;
+ es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0;
+ es.usbcan.other_ch_status =
+ cmd->u.usbcan.error_event.status_ch1;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+
+ /* The USBCAN firmware supports up to 2 channels.
+ * Now that ch0 was checked, check if ch1 has any errors.
+ */
+ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
+ es.channel = 1;
+ es.status = cmd->u.usbcan.error_event.status_ch1;
+ es.txerr =
+ cmd->u.usbcan.error_event.tx_errors_count_ch1;
+ es.rxerr =
+ cmd->u.usbcan.error_event.rx_errors_count_ch1;
+ es.usbcan.other_ch_status =
+ cmd->u.usbcan.error_event.status_ch0;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+ }
+ break;
+
+ default:
+ dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id);
+ }
+}
+
+static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_err_summary es = { };
+
+ switch (cmd->id) {
+ case CMD_CAN_ERROR_EVENT:
+ es.channel = cmd->u.leaf.error_event.channel;
+ es.status = cmd->u.leaf.error_event.status;
+ es.txerr = cmd->u.leaf.error_event.tx_errors_count;
+ es.rxerr = cmd->u.leaf.error_event.rx_errors_count;
+ es.leaf.error_factor = cmd->u.leaf.error_event.error_factor;
+ break;
+ case CMD_LEAF_LOG_MESSAGE:
+ es.channel = cmd->u.leaf.log_message.channel;
+ es.status = cmd->u.leaf.log_message.data[0];
+ es.txerr = cmd->u.leaf.log_message.data[2];
+ es.rxerr = cmd->u.leaf.log_message.data[3];
+ es.leaf.error_factor = cmd->u.leaf.log_message.data[1];
+ break;
+ case CMD_CHIP_STATE_EVENT:
+ es.channel = cmd->u.leaf.chip_state_event.channel;
+ es.status = cmd->u.leaf.chip_state_event.status;
+ es.txerr = cmd->u.leaf.chip_state_event.tx_errors_count;
+ es.rxerr = cmd->u.leaf.chip_state_event.rx_errors_count;
+ es.leaf.error_factor = 0;
+ break;
+ default:
+ dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id);
+ return;
+ }
+
+ kvaser_usb_leaf_rx_error(dev, &es);
+}
+
+static void kvaser_usb_leaf_rx_can_err(const struct kvaser_usb_net_priv *priv,
+ const struct kvaser_cmd *cmd)
+{
+ if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
+ MSG_FLAG_NERR)) {
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
+ cmd->u.rx_can_header.flag);
+
+ stats->rx_errors++;
+ return;
+ }
+
+ if (cmd->u.rx_can_header.flag & MSG_FLAG_OVERRUN)
+ kvaser_usb_can_rx_over_error(priv->netdev);
+}
+
+static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ u8 channel = cmd->u.rx_can_header.channel;
+ const u8 *rx_data = NULL; /* GCC */
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+ stats = &priv->netdev->stats;
+
+ if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
+ (dev->card_data.leaf.family == KVASER_LEAF &&
+ cmd->id == CMD_LEAF_LOG_MESSAGE)) {
+ kvaser_usb_leaf_leaf_rx_error(dev, cmd);
+ return;
+ } else if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
+ MSG_FLAG_NERR |
+ MSG_FLAG_OVERRUN)) {
+ kvaser_usb_leaf_rx_can_err(priv, cmd);
+ return;
+ } else if (cmd->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) {
+ netdev_warn(priv->netdev,
+ "Unhandled frame (flags: 0x%02x)\n",
+ cmd->u.rx_can_header.flag);
+ return;
+ }
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ rx_data = cmd->u.leaf.rx_can.data;
+ break;
+ case KVASER_USBCAN:
+ rx_data = cmd->u.usbcan.rx_can.data;
+ break;
+ }
+
+ skb = alloc_can_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id ==
+ CMD_LEAF_LOG_MESSAGE) {
+ cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id);
+ if (cf->can_id & KVASER_EXTENDED_FRAME)
+ cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+ else
+ cf->can_id &= CAN_SFF_MASK;
+
+ cf->can_dlc = get_can_dlc(cmd->u.leaf.log_message.dlc);
+
+ if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, &cmd->u.leaf.log_message.data,
+ cf->can_dlc);
+ } else {
+ cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f);
+
+ if (cmd->id == CMD_RX_EXT_MESSAGE) {
+ cf->can_id <<= 18;
+ cf->can_id |= ((rx_data[2] & 0x0f) << 14) |
+ ((rx_data[3] & 0xff) << 6) |
+ (rx_data[4] & 0x3f);
+ cf->can_id |= CAN_EFF_FLAG;
+ }
+
+ cf->can_dlc = get_can_dlc(rx_data[5]);
+
+ if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, &rx_data[6], cf->can_dlc);
+ }
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ u8 channel = cmd->u.simple.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+
+ if (completion_done(&priv->start_comp) &&
+ netif_queue_stopped(priv->netdev)) {
+ netif_wake_queue(priv->netdev);
+ } else {
+ netif_start_queue(priv->netdev);
+ complete(&priv->start_comp);
+ }
+}
+
+static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ u8 channel = cmd->u.simple.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+
+ complete(&priv->stop_comp);
+}
+
+static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ switch (cmd->id) {
+ case CMD_START_CHIP_REPLY:
+ kvaser_usb_leaf_start_chip_reply(dev, cmd);
+ break;
+
+ case CMD_STOP_CHIP_REPLY:
+ kvaser_usb_leaf_stop_chip_reply(dev, cmd);
+ break;
+
+ case CMD_RX_STD_MESSAGE:
+ case CMD_RX_EXT_MESSAGE:
+ kvaser_usb_leaf_rx_can_msg(dev, cmd);
+ break;
+
+ case CMD_LEAF_LOG_MESSAGE:
+ if (dev->card_data.leaf.family != KVASER_LEAF)
+ goto warn;
+ kvaser_usb_leaf_rx_can_msg(dev, cmd);
+ break;
+
+ case CMD_CHIP_STATE_EVENT:
+ case CMD_CAN_ERROR_EVENT:
+ if (dev->card_data.leaf.family == KVASER_LEAF)
+ kvaser_usb_leaf_leaf_rx_error(dev, cmd);
+ else
+ kvaser_usb_leaf_usbcan_rx_error(dev, cmd);
+ break;
+
+ case CMD_TX_ACKNOWLEDGE:
+ kvaser_usb_leaf_tx_acknowledge(dev, cmd);
+ break;
+
+ /* Ignored commands */
+ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
+ if (dev->card_data.leaf.family != KVASER_USBCAN)
+ goto warn;
+ break;
+
+ case CMD_FLUSH_QUEUE_REPLY:
+ if (dev->card_data.leaf.family != KVASER_LEAF)
+ goto warn;
+ break;
+
+ default:
+warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id);
+ break;
+ }
+}
+
+static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev,
+ void *buf, int len)
+{
+ struct kvaser_cmd *cmd;
+ int pos = 0;
+
+ while (pos <= len - CMD_HEADER_LEN) {
+ cmd = buf + pos;
+
+ /* The Kvaser firmware can only read and write commands that
+ * does not cross the USB's endpoint wMaxPacketSize boundary.
+ * If a follow-up command crosses such boundary, firmware puts
+ * a placeholder zero-length command in its place then aligns
+ * the real command to the next max packet size.
+ *
+ * Handle such cases or we're going to miss a significant
+ * number of events in case of a heavy rx load on the bus.
+ */
+ if (cmd->len == 0) {
+ pos = round_up(pos, le16_to_cpu
+ (dev->bulk_in->wMaxPacketSize));
+ continue;
+ }
+
+ if (pos + cmd->len > len) {
+ dev_err_ratelimited(&dev->intf->dev, "Format error\n");
+ break;
+ }
+
+ kvaser_usb_leaf_handle_command(dev, cmd);
+ pos += cmd->len;
+ }
+}
+
+static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_cmd *cmd;
+ int rc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_SET_CTRL_MODE;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_ctrl_mode);
+ cmd->u.ctrl_mode.tid = 0xff;
+ cmd->u.ctrl_mode.channel = priv->channel;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
+ else
+ cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
+
+ rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len);
+
+ kfree(cmd);
+ return rc;
+}
+
+static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->start_comp);
+
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->start_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->stop_comp);
+
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->stop_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_reset_chip(struct kvaser_usb *dev, int channel)
+{
+ return kvaser_usb_leaf_send_simple_cmd(dev, CMD_RESET_CHIP, channel);
+}
+
+static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_cmd *cmd;
+ int rc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_FLUSH_QUEUE;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_flush_queue);
+ cmd->u.flush_queue.channel = priv->channel;
+ cmd->u.flush_queue.flags = 0x00;
+
+ rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len);
+
+ kfree(cmd);
+ return rc;
+}
+
+static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
+{
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+
+ dev->cfg = &kvaser_usb_leaf_dev_cfg;
+ card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+
+ return 0;
+}
+
+static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
+ .name = "kvaser_usb",
+ .tseg1_min = KVASER_USB_TSEG1_MIN,
+ .tseg1_max = KVASER_USB_TSEG1_MAX,
+ .tseg2_min = KVASER_USB_TSEG2_MIN,
+ .tseg2_max = KVASER_USB_TSEG2_MAX,
+ .sjw_max = KVASER_USB_SJW_MAX,
+ .brp_min = KVASER_USB_BRP_MIN,
+ .brp_max = KVASER_USB_BRP_MAX,
+ .brp_inc = KVASER_USB_BRP_INC,
+};
+
+static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ int rc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_SET_BUS_PARAMS;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams);
+ cmd->u.busparams.channel = priv->channel;
+ cmd->u.busparams.tid = 0xff;
+ cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
+ cmd->u.busparams.sjw = bt->sjw;
+ cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
+ cmd->u.busparams.tseg2 = bt->phase_seg2;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ cmd->u.busparams.no_samp = 3;
+ else
+ cmd->u.busparams.no_samp = 1;
+
+ rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+
+ kfree(cmd);
+ return rc;
+}
+
+static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
+ enum can_mode mode)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ int err;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+
+ *bec = priv->bec;
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
+{
+ const struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ int i;
+
+ iface_desc = &dev->intf->altsetting[0];
+
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (!dev->bulk_in && usb_endpoint_is_bulk_in(endpoint))
+ dev->bulk_in = endpoint;
+
+ if (!dev->bulk_out && usb_endpoint_is_bulk_out(endpoint))
+ dev->bulk_out = endpoint;
+
+ /* use first bulk endpoint for in and out */
+ if (dev->bulk_in && dev->bulk_out)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
+ .dev_set_mode = kvaser_usb_leaf_set_mode,
+ .dev_set_bittiming = kvaser_usb_leaf_set_bittiming,
+ .dev_set_data_bittiming = NULL,
+ .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter,
+ .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints,
+ .dev_init_card = kvaser_usb_leaf_init_card,
+ .dev_get_software_info = kvaser_usb_leaf_get_software_info,
+ .dev_get_software_details = NULL,
+ .dev_get_card_info = kvaser_usb_leaf_get_card_info,
+ .dev_get_capabilities = NULL,
+ .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
+ .dev_start_chip = kvaser_usb_leaf_start_chip,
+ .dev_stop_chip = kvaser_usb_leaf_stop_chip,
+ .dev_reset_chip = kvaser_usb_leaf_reset_chip,
+ .dev_flush_queue = kvaser_usb_leaf_flush_queue,
+ .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
+ .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
+ .clock = {
+ .freq = CAN_USB_CLOCK,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index f530a80f5051..13238a72a338 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -423,6 +423,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
new_state = CAN_STATE_ERROR_WARNING;
break;
}
+ /* else: fall through */
case CAN_STATE_ERROR_WARNING:
if (n & PCAN_USB_ERROR_BUS_HEAVY) {
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 50e911428638..611f9d31be5d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -353,6 +353,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
default:
netdev_warn(netdev, "tx urb submitting failed err=%d\n",
err);
+ /* fall through */
case -ENOENT:
/* cable unplugged */
stats->tx_dropped++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 0105fbfea273..d516def846ab 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -141,8 +141,10 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...)
switch (id) {
case PCAN_USBPRO_TXMSG8:
i += 4;
+ /* fall through */
case PCAN_USBPRO_TXMSG4:
i += 4;
+ /* fall through */
case PCAN_USBPRO_TXMSG0:
*pc++ = va_arg(ap, int);
*pc++ = va_arg(ap, int);
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
new file mode 100644
index 000000000000..0678a38b1af4
--- /dev/null
+++ b/drivers/net/can/usb/ucan.c
@@ -0,0 +1,1613 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Driver for Theobroma Systems UCAN devices, Protocol Version 3
+ *
+ * Copyright (C) 2018 Theobroma Systems Design und Consulting GmbH
+ *
+ *
+ * General Description:
+ *
+ * The USB Device uses three Endpoints:
+ *
+ * CONTROL Endpoint: Is used the setup the device (start, stop,
+ * info, configure).
+ *
+ * IN Endpoint: The device sends CAN Frame Messages and Device
+ * Information using the IN endpoint.
+ *
+ * OUT Endpoint: The driver sends configuration requests, and CAN
+ * Frames on the out endpoint.
+ *
+ * Error Handling:
+ *
+ * If error reporting is turned on the device encodes error into CAN
+ * error frames (see uapi/linux/can/error.h) and sends it using the
+ * IN Endpoint. The driver updates statistics and forward it.
+ */
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#define UCAN_DRIVER_NAME "ucan"
+#define UCAN_MAX_RX_URBS 8
+/* the CAN controller needs a while to enable/disable the bus */
+#define UCAN_USB_CTL_PIPE_TIMEOUT 1000
+/* this driver currently supports protocol version 3 only */
+#define UCAN_PROTOCOL_VERSION_MIN 3
+#define UCAN_PROTOCOL_VERSION_MAX 3
+
+/* UCAN Message Definitions
+ * ------------------------
+ *
+ * ucan_message_out_t and ucan_message_in_t define the messages
+ * transmitted on the OUT and IN endpoint.
+ *
+ * Multibyte fields are transmitted with little endianness
+ *
+ * INTR Endpoint: a single uint32_t storing the current space in the fifo
+ *
+ * OUT Endpoint: single message of type ucan_message_out_t is
+ * transmitted on the out endpoint
+ *
+ * IN Endpoint: multiple messages ucan_message_in_t concateted in
+ * the following way:
+ *
+ * m[n].len <=> the length if message n(including the header in bytes)
+ * m[n] is is aligned to a 4 byte boundary, hence
+ * offset(m[0]) := 0;
+ * offset(m[n+1]) := offset(m[n]) + (m[n].len + 3) & 3
+ *
+ * this implies that
+ * offset(m[n]) % 4 <=> 0
+ */
+
+/* Device Global Commands */
+enum {
+ UCAN_DEVICE_GET_FW_STRING = 0,
+};
+
+/* UCAN Commands */
+enum {
+ /* start the can transceiver - val defines the operation mode */
+ UCAN_COMMAND_START = 0,
+ /* cancel pending transmissions and stop the can transceiver */
+ UCAN_COMMAND_STOP = 1,
+ /* send can transceiver into low-power sleep mode */
+ UCAN_COMMAND_SLEEP = 2,
+ /* wake up can transceiver from low-power sleep mode */
+ UCAN_COMMAND_WAKEUP = 3,
+ /* reset the can transceiver */
+ UCAN_COMMAND_RESET = 4,
+ /* get piece of info from the can transceiver - subcmd defines what
+ * piece
+ */
+ UCAN_COMMAND_GET = 5,
+ /* clear or disable hardware filter - subcmd defines which of the two */
+ UCAN_COMMAND_FILTER = 6,
+ /* Setup bittiming */
+ UCAN_COMMAND_SET_BITTIMING = 7,
+ /* recover from bus-off state */
+ UCAN_COMMAND_RESTART = 8,
+};
+
+/* UCAN_COMMAND_START and UCAN_COMMAND_GET_INFO operation modes (bitmap).
+ * Undefined bits must be set to 0.
+ */
+enum {
+ UCAN_MODE_LOOPBACK = BIT(0),
+ UCAN_MODE_SILENT = BIT(1),
+ UCAN_MODE_3_SAMPLES = BIT(2),
+ UCAN_MODE_ONE_SHOT = BIT(3),
+ UCAN_MODE_BERR_REPORT = BIT(4),
+};
+
+/* UCAN_COMMAND_GET subcommands */
+enum {
+ UCAN_COMMAND_GET_INFO = 0,
+ UCAN_COMMAND_GET_PROTOCOL_VERSION = 1,
+};
+
+/* UCAN_COMMAND_FILTER subcommands */
+enum {
+ UCAN_FILTER_CLEAR = 0,
+ UCAN_FILTER_DISABLE = 1,
+ UCAN_FILTER_ENABLE = 2,
+};
+
+/* OUT endpoint message types */
+enum {
+ UCAN_OUT_TX = 2, /* transmit a CAN frame */
+};
+
+/* IN endpoint message types */
+enum {
+ UCAN_IN_TX_COMPLETE = 1, /* CAN frame transmission completed */
+ UCAN_IN_RX = 2, /* CAN frame received */
+};
+
+struct ucan_ctl_cmd_start {
+ __le16 mode; /* OR-ing any of UCAN_MODE_* */
+} __packed;
+
+struct ucan_ctl_cmd_set_bittiming {
+ __le32 tq; /* Time quanta (TQ) in nanoseconds */
+ __le16 brp; /* TQ Prescaler */
+ __le16 sample_point; /* Samplepoint on tenth percent */
+ u8 prop_seg; /* Propagation segment in TQs */
+ u8 phase_seg1; /* Phase buffer segment 1 in TQs */
+ u8 phase_seg2; /* Phase buffer segment 2 in TQs */
+ u8 sjw; /* Synchronisation jump width in TQs */
+} __packed;
+
+struct ucan_ctl_cmd_device_info {
+ __le32 freq; /* Clock Frequency for tq generation */
+ u8 tx_fifo; /* Size of the transmission fifo */
+ u8 sjw_max; /* can_bittiming fields... */
+ u8 tseg1_min;
+ u8 tseg1_max;
+ u8 tseg2_min;
+ u8 tseg2_max;
+ __le16 brp_inc;
+ __le32 brp_min;
+ __le32 brp_max; /* ...can_bittiming fields */
+ __le16 ctrlmodes; /* supported control modes */
+ __le16 hwfilter; /* Number of HW filter banks */
+ __le16 rxmboxes; /* Number of receive Mailboxes */
+} __packed;
+
+struct ucan_ctl_cmd_get_protocol_version {
+ __le32 version;
+} __packed;
+
+union ucan_ctl_payload {
+ /* Setup Bittiming
+ * bmRequest == UCAN_COMMAND_START
+ */
+ struct ucan_ctl_cmd_start cmd_start;
+ /* Setup Bittiming
+ * bmRequest == UCAN_COMMAND_SET_BITTIMING
+ */
+ struct ucan_ctl_cmd_set_bittiming cmd_set_bittiming;
+ /* Get Device Information
+ * bmRequest == UCAN_COMMAND_GET; wValue = UCAN_COMMAND_GET_INFO
+ */
+ struct ucan_ctl_cmd_device_info cmd_get_device_info;
+ /* Get Protocol Version
+ * bmRequest == UCAN_COMMAND_GET;
+ * wValue = UCAN_COMMAND_GET_PROTOCOL_VERSION
+ */
+ struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
+
+ u8 raw[128];
+} __packed;
+
+enum {
+ UCAN_TX_COMPLETE_SUCCESS = BIT(0),
+};
+
+/* Transmission Complete within ucan_message_in */
+struct ucan_tx_complete_entry_t {
+ u8 echo_index;
+ u8 flags;
+} __packed __aligned(0x2);
+
+/* CAN Data message format within ucan_message_in/out */
+struct ucan_can_msg {
+ /* note DLC is computed by
+ * msg.len - sizeof (msg.len)
+ * - sizeof (msg.type)
+ * - sizeof (msg.can_msg.id)
+ */
+ __le32 id;
+
+ union {
+ u8 data[CAN_MAX_DLEN]; /* Data of CAN frames */
+ u8 dlc; /* RTR dlc */
+ };
+} __packed;
+
+/* OUT Endpoint, outbound messages */
+struct ucan_message_out {
+ __le16 len; /* Length of the content include header */
+ u8 type; /* UCAN_OUT_TX and friends */
+ u8 subtype; /* command sub type */
+
+ union {
+ /* Transmit CAN frame
+ * (type == UCAN_TX) && ((msg.can_msg.id & CAN_RTR_FLAG) == 0)
+ * subtype stores the echo id
+ */
+ struct ucan_can_msg can_msg;
+ } msg;
+} __packed __aligned(0x4);
+
+/* IN Endpoint, inbound messages */
+struct ucan_message_in {
+ __le16 len; /* Length of the content include header */
+ u8 type; /* UCAN_IN_RX and friends */
+ u8 subtype; /* command sub type */
+
+ union {
+ /* CAN Frame received
+ * (type == UCAN_IN_RX)
+ * && ((msg.can_msg.id & CAN_RTR_FLAG) == 0)
+ */
+ struct ucan_can_msg can_msg;
+
+ /* CAN transmission complete
+ * (type == UCAN_IN_TX_COMPLETE)
+ */
+ struct ucan_tx_complete_entry_t can_tx_complete_msg[0];
+ } __aligned(0x4) msg;
+} __packed;
+
+/* Macros to calculate message lengths */
+#define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg)
+
+#define UCAN_IN_HDR_SIZE offsetof(struct ucan_message_in, msg)
+#define UCAN_IN_LEN(member) (UCAN_OUT_HDR_SIZE + sizeof(member))
+
+struct ucan_priv;
+
+/* Context Information for transmission URBs */
+struct ucan_urb_context {
+ struct ucan_priv *up;
+ u8 dlc;
+ bool allocated;
+};
+
+/* Information reported by the USB device */
+struct ucan_device_info {
+ struct can_bittiming_const bittiming_const;
+ u8 tx_fifo;
+};
+
+/* Driver private data */
+struct ucan_priv {
+ /* must be the first member */
+ struct can_priv can;
+
+ /* linux USB device structures */
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct net_device *netdev;
+
+ /* lock for can->echo_skb (used around
+ * can_put/get/free_echo_skb
+ */
+ spinlock_t echo_skb_lock;
+
+ /* usb device information information */
+ u8 intf_index;
+ u8 in_ep_addr;
+ u8 out_ep_addr;
+ u16 in_ep_size;
+
+ /* transmission and reception buffers */
+ struct usb_anchor rx_urbs;
+ struct usb_anchor tx_urbs;
+
+ union ucan_ctl_payload *ctl_msg_buffer;
+ struct ucan_device_info device_info;
+
+ /* transmission control information and locks */
+ spinlock_t context_lock;
+ unsigned int available_tx_urbs;
+ struct ucan_urb_context *context_array;
+};
+
+static u8 ucan_get_can_dlc(struct ucan_can_msg *msg, u16 len)
+{
+ if (le32_to_cpu(msg->id) & CAN_RTR_FLAG)
+ return get_can_dlc(msg->dlc);
+ else
+ return get_can_dlc(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id)));
+}
+
+static void ucan_release_context_array(struct ucan_priv *up)
+{
+ if (!up->context_array)
+ return;
+
+ /* lock is not needed because, driver is currently opening or closing */
+ up->available_tx_urbs = 0;
+
+ kfree(up->context_array);
+ up->context_array = NULL;
+}
+
+static int ucan_alloc_context_array(struct ucan_priv *up)
+{
+ int i;
+
+ /* release contexts if any */
+ ucan_release_context_array(up);
+
+ up->context_array = kcalloc(up->device_info.tx_fifo,
+ sizeof(*up->context_array),
+ GFP_KERNEL);
+ if (!up->context_array) {
+ netdev_err(up->netdev,
+ "Not enough memory to allocate tx contexts\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < up->device_info.tx_fifo; i++) {
+ up->context_array[i].allocated = false;
+ up->context_array[i].up = up;
+ }
+
+ /* lock is not needed because, driver is currently opening */
+ up->available_tx_urbs = up->device_info.tx_fifo;
+
+ return 0;
+}
+
+static struct ucan_urb_context *ucan_alloc_context(struct ucan_priv *up)
+{
+ int i;
+ unsigned long flags;
+ struct ucan_urb_context *ret = NULL;
+
+ if (WARN_ON_ONCE(!up->context_array))
+ return NULL;
+
+ /* execute context operation atomically */
+ spin_lock_irqsave(&up->context_lock, flags);
+
+ for (i = 0; i < up->device_info.tx_fifo; i++) {
+ if (!up->context_array[i].allocated) {
+ /* update context */
+ ret = &up->context_array[i];
+ up->context_array[i].allocated = true;
+
+ /* stop queue if necessary */
+ up->available_tx_urbs--;
+ if (!up->available_tx_urbs)
+ netif_stop_queue(up->netdev);
+
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&up->context_lock, flags);
+ return ret;
+}
+
+static bool ucan_release_context(struct ucan_priv *up,
+ struct ucan_urb_context *ctx)
+{
+ unsigned long flags;
+ bool ret = false;
+
+ if (WARN_ON_ONCE(!up->context_array))
+ return false;
+
+ /* execute context operation atomically */
+ spin_lock_irqsave(&up->context_lock, flags);
+
+ /* context was not allocated, maybe the device sent garbage */
+ if (ctx->allocated) {
+ ctx->allocated = false;
+
+ /* check if the queue needs to be woken */
+ if (!up->available_tx_urbs)
+ netif_wake_queue(up->netdev);
+ up->available_tx_urbs++;
+
+ ret = true;
+ }
+
+ spin_unlock_irqrestore(&up->context_lock, flags);
+ return ret;
+}
+
+static int ucan_ctrl_command_out(struct ucan_priv *up,
+ u8 cmd, u16 subcmd, u16 datalen)
+{
+ return usb_control_msg(up->udev,
+ usb_sndctrlpipe(up->udev, 0),
+ cmd,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ subcmd,
+ up->intf_index,
+ up->ctl_msg_buffer,
+ datalen,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+}
+
+static int ucan_device_request_in(struct ucan_priv *up,
+ u8 cmd, u16 subcmd, u16 datalen)
+{
+ return usb_control_msg(up->udev,
+ usb_rcvctrlpipe(up->udev, 0),
+ cmd,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ subcmd,
+ 0,
+ up->ctl_msg_buffer,
+ datalen,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+}
+
+/* Parse the device information structure reported by the device and
+ * setup private variables accordingly
+ */
+static void ucan_parse_device_info(struct ucan_priv *up,
+ struct ucan_ctl_cmd_device_info *device_info)
+{
+ struct can_bittiming_const *bittiming =
+ &up->device_info.bittiming_const;
+ u16 ctrlmodes;
+
+ /* store the data */
+ up->can.clock.freq = le32_to_cpu(device_info->freq);
+ up->device_info.tx_fifo = device_info->tx_fifo;
+ strcpy(bittiming->name, "ucan");
+ bittiming->tseg1_min = device_info->tseg1_min;
+ bittiming->tseg1_max = device_info->tseg1_max;
+ bittiming->tseg2_min = device_info->tseg2_min;
+ bittiming->tseg2_max = device_info->tseg2_max;
+ bittiming->sjw_max = device_info->sjw_max;
+ bittiming->brp_min = le32_to_cpu(device_info->brp_min);
+ bittiming->brp_max = le32_to_cpu(device_info->brp_max);
+ bittiming->brp_inc = le16_to_cpu(device_info->brp_inc);
+
+ ctrlmodes = le16_to_cpu(device_info->ctrlmodes);
+
+ up->can.ctrlmode_supported = 0;
+
+ if (ctrlmodes & UCAN_MODE_LOOPBACK)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
+ if (ctrlmodes & UCAN_MODE_SILENT)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+ if (ctrlmodes & UCAN_MODE_3_SAMPLES)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ if (ctrlmodes & UCAN_MODE_ONE_SHOT)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+ if (ctrlmodes & UCAN_MODE_BERR_REPORT)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING;
+}
+
+/* Handle a CAN error frame that we have received from the device.
+ * Returns true if the can state has changed.
+ */
+static bool ucan_handle_error_frame(struct ucan_priv *up,
+ struct ucan_message_in *m,
+ canid_t canid)
+{
+ enum can_state new_state = up->can.state;
+ struct net_device_stats *net_stats = &up->netdev->stats;
+ struct can_device_stats *can_stats = &up->can.can_stats;
+
+ if (canid & CAN_ERR_LOSTARB)
+ can_stats->arbitration_lost++;
+
+ if (canid & CAN_ERR_BUSERROR)
+ can_stats->bus_error++;
+
+ if (canid & CAN_ERR_ACK)
+ net_stats->tx_errors++;
+
+ if (canid & CAN_ERR_BUSOFF)
+ new_state = CAN_STATE_BUS_OFF;
+
+ /* controller problems, details in data[1] */
+ if (canid & CAN_ERR_CRTL) {
+ u8 d1 = m->msg.can_msg.data[1];
+
+ if (d1 & CAN_ERR_CRTL_RX_OVERFLOW)
+ net_stats->rx_over_errors++;
+
+ /* controller state bits: if multiple are set the worst wins */
+ if (d1 & CAN_ERR_CRTL_ACTIVE)
+ new_state = CAN_STATE_ERROR_ACTIVE;
+
+ if (d1 & (CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING))
+ new_state = CAN_STATE_ERROR_WARNING;
+
+ if (d1 & (CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE))
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ }
+
+ /* protocol error, details in data[2] */
+ if (canid & CAN_ERR_PROT) {
+ u8 d2 = m->msg.can_msg.data[2];
+
+ if (d2 & CAN_ERR_PROT_TX)
+ net_stats->tx_errors++;
+ else
+ net_stats->rx_errors++;
+ }
+
+ /* no state change - we are done */
+ if (up->can.state == new_state)
+ return false;
+
+ /* we switched into a better state */
+ if (up->can.state > new_state) {
+ up->can.state = new_state;
+ return true;
+ }
+
+ /* we switched into a worse state */
+ up->can.state = new_state;
+ switch (new_state) {
+ case CAN_STATE_BUS_OFF:
+ can_stats->bus_off++;
+ can_bus_off(up->netdev);
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ can_stats->error_passive++;
+ break;
+ case CAN_STATE_ERROR_WARNING:
+ can_stats->error_warning++;
+ break;
+ default:
+ break;
+ }
+ return true;
+}
+
+/* Callback on reception of a can frame via the IN endpoint
+ *
+ * This function allocates an skb and transferres it to the Linux
+ * network stack
+ */
+static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m)
+{
+ int len;
+ canid_t canid;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &up->netdev->stats;
+
+ /* get the contents of the length field */
+ len = le16_to_cpu(m->len);
+
+ /* check sanity */
+ if (len < UCAN_IN_HDR_SIZE + sizeof(m->msg.can_msg.id)) {
+ netdev_warn(up->netdev, "invalid input message len: %d\n", len);
+ return;
+ }
+
+ /* handle error frames */
+ canid = le32_to_cpu(m->msg.can_msg.id);
+ if (canid & CAN_ERR_FLAG) {
+ bool busstate_changed = ucan_handle_error_frame(up, m, canid);
+
+ /* if berr-reporting is off only state changes get through */
+ if (!(up->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ !busstate_changed)
+ return;
+ } else {
+ canid_t canid_mask;
+ /* compute the mask for canid */
+ canid_mask = CAN_RTR_FLAG;
+ if (canid & CAN_EFF_FLAG)
+ canid_mask |= CAN_EFF_MASK | CAN_EFF_FLAG;
+ else
+ canid_mask |= CAN_SFF_MASK;
+
+ if (canid & ~canid_mask)
+ netdev_warn(up->netdev,
+ "unexpected bits set (canid %x, mask %x)",
+ canid, canid_mask);
+
+ canid &= canid_mask;
+ }
+
+ /* allocate skb */
+ skb = alloc_can_skb(up->netdev, &cf);
+ if (!skb)
+ return;
+
+ /* fill the can frame */
+ cf->can_id = canid;
+
+ /* compute DLC taking RTR_FLAG into account */
+ cf->can_dlc = ucan_get_can_dlc(&m->msg.can_msg, len);
+
+ /* copy the payload of non RTR frames */
+ if (!(cf->can_id & CAN_RTR_FLAG) || (cf->can_id & CAN_ERR_FLAG))
+ memcpy(cf->data, m->msg.can_msg.data, cf->can_dlc);
+
+ /* don't count error frames as real packets */
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ /* pass it to Linux */
+ netif_rx(skb);
+}
+
+/* callback indicating completed transmission */
+static void ucan_tx_complete_msg(struct ucan_priv *up,
+ struct ucan_message_in *m)
+{
+ unsigned long flags;
+ u16 count, i;
+ u8 echo_index, dlc;
+ u16 len = le16_to_cpu(m->len);
+
+ struct ucan_urb_context *context;
+
+ if (len < UCAN_IN_HDR_SIZE || (len % 2 != 0)) {
+ netdev_err(up->netdev, "invalid tx complete length\n");
+ return;
+ }
+
+ count = (len - UCAN_IN_HDR_SIZE) / 2;
+ for (i = 0; i < count; i++) {
+ /* we did not submit such echo ids */
+ echo_index = m->msg.can_tx_complete_msg[i].echo_index;
+ if (echo_index >= up->device_info.tx_fifo) {
+ up->netdev->stats.tx_errors++;
+ netdev_err(up->netdev,
+ "invalid echo_index %d received\n",
+ echo_index);
+ continue;
+ }
+
+ /* gather information from the context */
+ context = &up->context_array[echo_index];
+ dlc = READ_ONCE(context->dlc);
+
+ /* Release context and restart queue if necessary.
+ * Also check if the context was allocated
+ */
+ if (!ucan_release_context(up, context))
+ continue;
+
+ spin_lock_irqsave(&up->echo_skb_lock, flags);
+ if (m->msg.can_tx_complete_msg[i].flags &
+ UCAN_TX_COMPLETE_SUCCESS) {
+ /* update statistics */
+ up->netdev->stats.tx_packets++;
+ up->netdev->stats.tx_bytes += dlc;
+ can_get_echo_skb(up->netdev, echo_index);
+ } else {
+ up->netdev->stats.tx_dropped++;
+ can_free_echo_skb(up->netdev, echo_index);
+ }
+ spin_unlock_irqrestore(&up->echo_skb_lock, flags);
+ }
+}
+
+/* callback on reception of a USB message */
+static void ucan_read_bulk_callback(struct urb *urb)
+{
+ int ret;
+ int pos;
+ struct ucan_priv *up = urb->context;
+ struct net_device *netdev = up->netdev;
+ struct ucan_message_in *m;
+
+ /* the device is not up and the driver should not receive any
+ * data on the bulk in pipe
+ */
+ if (WARN_ON(!up->context_array)) {
+ usb_free_coherent(up->udev,
+ up->in_ep_size,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+ return;
+ }
+
+ /* check URB status */
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
+ case -ESHUTDOWN:
+ case -ETIME:
+ /* urb is not resubmitted -> free dma data */
+ usb_free_coherent(up->udev,
+ up->in_ep_size,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+ netdev_dbg(up->netdev, "not resumbmitting urb; status: %d\n",
+ urb->status);
+ return;
+ default:
+ goto resubmit;
+ }
+
+ /* sanity check */
+ if (!netif_device_present(netdev))
+ return;
+
+ /* iterate over input */
+ pos = 0;
+ while (pos < urb->actual_length) {
+ int len;
+
+ /* check sanity (length of header) */
+ if ((urb->actual_length - pos) < UCAN_IN_HDR_SIZE) {
+ netdev_warn(up->netdev,
+ "invalid message (short; no hdr; l:%d)\n",
+ urb->actual_length);
+ goto resubmit;
+ }
+
+ /* setup the message address */
+ m = (struct ucan_message_in *)
+ ((u8 *)urb->transfer_buffer + pos);
+ len = le16_to_cpu(m->len);
+
+ /* check sanity (length of content) */
+ if (urb->actual_length - pos < len) {
+ netdev_warn(up->netdev,
+ "invalid message (short; no data; l:%d)\n",
+ urb->actual_length);
+ print_hex_dump(KERN_WARNING,
+ "raw data: ",
+ DUMP_PREFIX_ADDRESS,
+ 16,
+ 1,
+ urb->transfer_buffer,
+ urb->actual_length,
+ true);
+
+ goto resubmit;
+ }
+
+ switch (m->type) {
+ case UCAN_IN_RX:
+ ucan_rx_can_msg(up, m);
+ break;
+ case UCAN_IN_TX_COMPLETE:
+ ucan_tx_complete_msg(up, m);
+ break;
+ default:
+ netdev_warn(up->netdev,
+ "invalid message (type; t:%d)\n",
+ m->type);
+ break;
+ }
+
+ /* proceed to next message */
+ pos += len;
+ /* align to 4 byte boundary */
+ pos = round_up(pos, 4);
+ }
+
+resubmit:
+ /* resubmit urb when done */
+ usb_fill_bulk_urb(urb, up->udev,
+ usb_rcvbulkpipe(up->udev,
+ up->in_ep_addr),
+ urb->transfer_buffer,
+ up->in_ep_size,
+ ucan_read_bulk_callback,
+ up);
+
+ usb_anchor_urb(urb, &up->rx_urbs);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+
+ if (ret < 0) {
+ netdev_err(up->netdev,
+ "failed resubmitting read bulk urb: %d\n",
+ ret);
+
+ usb_unanchor_urb(urb);
+ usb_free_coherent(up->udev,
+ up->in_ep_size,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+
+ if (ret == -ENODEV)
+ netif_device_detach(netdev);
+ }
+}
+
+/* callback after transmission of a USB message */
+static void ucan_write_bulk_callback(struct urb *urb)
+{
+ unsigned long flags;
+ struct ucan_priv *up;
+ struct ucan_urb_context *context = urb->context;
+
+ /* get the urb context */
+ if (WARN_ON_ONCE(!context))
+ return;
+
+ /* free up our allocated buffer */
+ usb_free_coherent(urb->dev,
+ sizeof(struct ucan_message_out),
+ urb->transfer_buffer,
+ urb->transfer_dma);
+
+ up = context->up;
+ if (WARN_ON_ONCE(!up))
+ return;
+
+ /* sanity check */
+ if (!netif_device_present(up->netdev))
+ return;
+
+ /* transmission failed (USB - the device will not send a TX complete) */
+ if (urb->status) {
+ netdev_warn(up->netdev,
+ "failed to transmit USB message to device: %d\n",
+ urb->status);
+
+ /* update counters an cleanup */
+ spin_lock_irqsave(&up->echo_skb_lock, flags);
+ can_free_echo_skb(up->netdev, context - up->context_array);
+ spin_unlock_irqrestore(&up->echo_skb_lock, flags);
+
+ up->netdev->stats.tx_dropped++;
+
+ /* release context and restart the queue if necessary */
+ if (!ucan_release_context(up, context))
+ netdev_err(up->netdev,
+ "urb failed, failed to release context\n");
+ }
+}
+
+static void ucan_cleanup_rx_urbs(struct ucan_priv *up, struct urb **urbs)
+{
+ int i;
+
+ for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
+ if (urbs[i]) {
+ usb_unanchor_urb(urbs[i]);
+ usb_free_coherent(up->udev,
+ up->in_ep_size,
+ urbs[i]->transfer_buffer,
+ urbs[i]->transfer_dma);
+ usb_free_urb(urbs[i]);
+ }
+ }
+
+ memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS);
+}
+
+static int ucan_prepare_and_anchor_rx_urbs(struct ucan_priv *up,
+ struct urb **urbs)
+{
+ int i;
+
+ memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS);
+
+ for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
+ void *buf;
+
+ urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urbs[i])
+ goto err;
+
+ buf = usb_alloc_coherent(up->udev,
+ up->in_ep_size,
+ GFP_KERNEL, &urbs[i]->transfer_dma);
+ if (!buf) {
+ /* cleanup this urb */
+ usb_free_urb(urbs[i]);
+ urbs[i] = NULL;
+ goto err;
+ }
+
+ usb_fill_bulk_urb(urbs[i], up->udev,
+ usb_rcvbulkpipe(up->udev,
+ up->in_ep_addr),
+ buf,
+ up->in_ep_size,
+ ucan_read_bulk_callback,
+ up);
+
+ urbs[i]->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_anchor_urb(urbs[i], &up->rx_urbs);
+ }
+ return 0;
+
+err:
+ /* cleanup other unsubmitted urbs */
+ ucan_cleanup_rx_urbs(up, urbs);
+ return -ENOMEM;
+}
+
+/* Submits rx urbs with the semantic: Either submit all, or cleanup
+ * everything. I case of errors submitted urbs are killed and all urbs in
+ * the array are freed. I case of no errors every entry in the urb
+ * array is set to NULL.
+ */
+static int ucan_submit_rx_urbs(struct ucan_priv *up, struct urb **urbs)
+{
+ int i, ret;
+
+ /* Iterate over all urbs to submit. On success remove the urb
+ * from the list.
+ */
+ for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
+ ret = usb_submit_urb(urbs[i], GFP_KERNEL);
+ if (ret) {
+ netdev_err(up->netdev,
+ "could not submit urb; code: %d\n",
+ ret);
+ goto err;
+ }
+
+ /* Anchor URB and drop reference, USB core will take
+ * care of freeing it
+ */
+ usb_free_urb(urbs[i]);
+ urbs[i] = NULL;
+ }
+ return 0;
+
+err:
+ /* Cleanup unsubmitted urbs */
+ ucan_cleanup_rx_urbs(up, urbs);
+
+ /* Kill urbs that are already submitted */
+ usb_kill_anchored_urbs(&up->rx_urbs);
+
+ return ret;
+}
+
+/* Open the network device */
+static int ucan_open(struct net_device *netdev)
+{
+ int ret, ret_cleanup;
+ u16 ctrlmode;
+ struct urb *urbs[UCAN_MAX_RX_URBS];
+ struct ucan_priv *up = netdev_priv(netdev);
+
+ ret = ucan_alloc_context_array(up);
+ if (ret)
+ return ret;
+
+ /* Allocate and prepare IN URBS - allocated and anchored
+ * urbs are stored in urbs[] for clean
+ */
+ ret = ucan_prepare_and_anchor_rx_urbs(up, urbs);
+ if (ret)
+ goto err_contexts;
+
+ /* Check the control mode */
+ ctrlmode = 0;
+ if (up->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ ctrlmode |= UCAN_MODE_LOOPBACK;
+ if (up->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ ctrlmode |= UCAN_MODE_SILENT;
+ if (up->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ ctrlmode |= UCAN_MODE_3_SAMPLES;
+ if (up->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ ctrlmode |= UCAN_MODE_ONE_SHOT;
+
+ /* Enable this in any case - filtering is down within the
+ * receive path
+ */
+ ctrlmode |= UCAN_MODE_BERR_REPORT;
+ up->ctl_msg_buffer->cmd_start.mode = cpu_to_le16(ctrlmode);
+
+ /* Driver is ready to receive data - start the USB device */
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_START, 0, 2);
+ if (ret < 0) {
+ netdev_err(up->netdev,
+ "could not start device, code: %d\n",
+ ret);
+ goto err_reset;
+ }
+
+ /* Call CAN layer open */
+ ret = open_candev(netdev);
+ if (ret)
+ goto err_stop;
+
+ /* Driver is ready to receive data. Submit RX URBS */
+ ret = ucan_submit_rx_urbs(up, urbs);
+ if (ret)
+ goto err_stop;
+
+ up->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Start the network queue */
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_stop:
+ /* The device have started already stop it */
+ ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0);
+ if (ret_cleanup < 0)
+ netdev_err(up->netdev,
+ "could not stop device, code: %d\n",
+ ret_cleanup);
+
+err_reset:
+ /* The device might have received data, reset it for
+ * consistent state
+ */
+ ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
+ if (ret_cleanup < 0)
+ netdev_err(up->netdev,
+ "could not reset device, code: %d\n",
+ ret_cleanup);
+
+ /* clean up unsubmitted urbs */
+ ucan_cleanup_rx_urbs(up, urbs);
+
+err_contexts:
+ ucan_release_context_array(up);
+ return ret;
+}
+
+static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up,
+ struct ucan_urb_context *context,
+ struct can_frame *cf,
+ u8 echo_index)
+{
+ int mlen;
+ struct urb *urb;
+ struct ucan_message_out *m;
+
+ /* create a URB, and a buffer for it, and copy the data to the URB */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(up->netdev, "no memory left for URBs\n");
+ return NULL;
+ }
+
+ m = usb_alloc_coherent(up->udev,
+ sizeof(struct ucan_message_out),
+ GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!m) {
+ netdev_err(up->netdev, "no memory left for USB buffer\n");
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ /* build the USB message */
+ m->type = UCAN_OUT_TX;
+ m->msg.can_msg.id = cpu_to_le32(cf->can_id);
+
+ if (cf->can_id & CAN_RTR_FLAG) {
+ mlen = UCAN_OUT_HDR_SIZE +
+ offsetof(struct ucan_can_msg, dlc) +
+ sizeof(m->msg.can_msg.dlc);
+ m->msg.can_msg.dlc = cf->can_dlc;
+ } else {
+ mlen = UCAN_OUT_HDR_SIZE +
+ sizeof(m->msg.can_msg.id) + cf->can_dlc;
+ memcpy(m->msg.can_msg.data, cf->data, cf->can_dlc);
+ }
+ m->len = cpu_to_le16(mlen);
+
+ context->dlc = cf->can_dlc;
+
+ m->subtype = echo_index;
+
+ /* build the urb */
+ usb_fill_bulk_urb(urb, up->udev,
+ usb_sndbulkpipe(up->udev,
+ up->out_ep_addr),
+ m, mlen, ucan_write_bulk_callback, context);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ return urb;
+}
+
+static void ucan_clean_up_tx_urb(struct ucan_priv *up, struct urb *urb)
+{
+ usb_free_coherent(up->udev, sizeof(struct ucan_message_out),
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+}
+
+/* callback when Linux needs to send a can frame */
+static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ unsigned long flags;
+ int ret;
+ u8 echo_index;
+ struct urb *urb;
+ struct ucan_urb_context *context;
+ struct ucan_priv *up = netdev_priv(netdev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* check skb */
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* allocate a context and slow down tx path, if fifo state is low */
+ context = ucan_alloc_context(up);
+ echo_index = context - up->context_array;
+
+ if (WARN_ON_ONCE(!context))
+ return NETDEV_TX_BUSY;
+
+ /* prepare urb for transmission */
+ urb = ucan_prepare_tx_urb(up, context, cf, echo_index);
+ if (!urb)
+ goto drop;
+
+ /* put the skb on can loopback stack */
+ spin_lock_irqsave(&up->echo_skb_lock, flags);
+ can_put_echo_skb(skb, up->netdev, echo_index);
+ spin_unlock_irqrestore(&up->echo_skb_lock, flags);
+
+ /* transmit it */
+ usb_anchor_urb(urb, &up->tx_urbs);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+
+ /* cleanup urb */
+ if (ret) {
+ /* on error, clean up */
+ usb_unanchor_urb(urb);
+ ucan_clean_up_tx_urb(up, urb);
+ if (!ucan_release_context(up, context))
+ netdev_err(up->netdev,
+ "xmit err: failed to release context\n");
+
+ /* remove the skb from the echo stack - this also
+ * frees the skb
+ */
+ spin_lock_irqsave(&up->echo_skb_lock, flags);
+ can_free_echo_skb(up->netdev, echo_index);
+ spin_unlock_irqrestore(&up->echo_skb_lock, flags);
+
+ if (ret == -ENODEV) {
+ netif_device_detach(up->netdev);
+ } else {
+ netdev_warn(up->netdev,
+ "xmit err: failed to submit urb %d\n",
+ ret);
+ up->netdev->stats.tx_dropped++;
+ }
+ return NETDEV_TX_OK;
+ }
+
+ netif_trans_update(netdev);
+
+ /* release ref, as we do not need the urb anymore */
+ usb_free_urb(urb);
+
+ return NETDEV_TX_OK;
+
+drop:
+ if (!ucan_release_context(up, context))
+ netdev_err(up->netdev,
+ "xmit drop: failed to release context\n");
+ dev_kfree_skb(skb);
+ up->netdev->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+/* Device goes down
+ *
+ * Clean up used resources
+ */
+static int ucan_close(struct net_device *netdev)
+{
+ int ret;
+ struct ucan_priv *up = netdev_priv(netdev);
+
+ up->can.state = CAN_STATE_STOPPED;
+
+ /* stop sending data */
+ usb_kill_anchored_urbs(&up->tx_urbs);
+
+ /* stop receiving data */
+ usb_kill_anchored_urbs(&up->rx_urbs);
+
+ /* stop and reset can device */
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0);
+ if (ret < 0)
+ netdev_err(up->netdev,
+ "could not stop device, code: %d\n",
+ ret);
+
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
+ if (ret < 0)
+ netdev_err(up->netdev,
+ "could not reset device, code: %d\n",
+ ret);
+
+ netif_stop_queue(netdev);
+
+ ucan_release_context_array(up);
+
+ close_candev(up->netdev);
+ return 0;
+}
+
+/* CAN driver callbacks */
+static const struct net_device_ops ucan_netdev_ops = {
+ .ndo_open = ucan_open,
+ .ndo_stop = ucan_close,
+ .ndo_start_xmit = ucan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+/* Request to set bittiming
+ *
+ * This function generates an USB set bittiming message and transmits
+ * it to the device
+ */
+static int ucan_set_bittiming(struct net_device *netdev)
+{
+ int ret;
+ struct ucan_priv *up = netdev_priv(netdev);
+ struct ucan_ctl_cmd_set_bittiming *cmd_set_bittiming;
+
+ cmd_set_bittiming = &up->ctl_msg_buffer->cmd_set_bittiming;
+ cmd_set_bittiming->tq = cpu_to_le32(up->can.bittiming.tq);
+ cmd_set_bittiming->brp = cpu_to_le16(up->can.bittiming.brp);
+ cmd_set_bittiming->sample_point =
+ cpu_to_le16(up->can.bittiming.sample_point);
+ cmd_set_bittiming->prop_seg = up->can.bittiming.prop_seg;
+ cmd_set_bittiming->phase_seg1 = up->can.bittiming.phase_seg1;
+ cmd_set_bittiming->phase_seg2 = up->can.bittiming.phase_seg2;
+ cmd_set_bittiming->sjw = up->can.bittiming.sjw;
+
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_SET_BITTIMING, 0,
+ sizeof(*cmd_set_bittiming));
+ return (ret < 0) ? ret : 0;
+}
+
+/* Restart the device to get it out of BUS-OFF state.
+ * Called when the user runs "ip link set can1 type can restart".
+ */
+static int ucan_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ int ret;
+ unsigned long flags;
+ struct ucan_priv *up = netdev_priv(netdev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ netdev_dbg(up->netdev, "restarting device\n");
+
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESTART, 0, 0);
+ up->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* check if queue can be restarted,
+ * up->available_tx_urbs must be protected by the
+ * lock
+ */
+ spin_lock_irqsave(&up->context_lock, flags);
+
+ if (up->available_tx_urbs > 0)
+ netif_wake_queue(up->netdev);
+
+ spin_unlock_irqrestore(&up->context_lock, flags);
+
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Probe the device, reset it and gather general device information */
+static int ucan_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int ret;
+ int i;
+ u32 protocol_version;
+ struct usb_device *udev;
+ struct net_device *netdev;
+ struct usb_host_interface *iface_desc;
+ struct ucan_priv *up;
+ struct usb_endpoint_descriptor *ep;
+ u16 in_ep_size;
+ u16 out_ep_size;
+ u8 in_ep_addr;
+ u8 out_ep_addr;
+ union ucan_ctl_payload *ctl_msg_buffer;
+ char firmware_str[sizeof(union ucan_ctl_payload) + 1];
+
+ udev = interface_to_usbdev(intf);
+
+ /* Stage 1 - Interface Parsing
+ * ---------------------------
+ *
+ * Identifie the device USB interface descriptor and its
+ * endpoints. Probing is aborted on errors.
+ */
+
+ /* check if the interface is sane */
+ iface_desc = intf->cur_altsetting;
+ if (!iface_desc)
+ return -ENODEV;
+
+ dev_info(&udev->dev,
+ "%s: probing device on interface #%d\n",
+ UCAN_DRIVER_NAME,
+ iface_desc->desc.bInterfaceNumber);
+
+ /* interface sanity check */
+ if (iface_desc->desc.bNumEndpoints != 2) {
+ dev_err(&udev->dev,
+ "%s: invalid EP count (%d)",
+ UCAN_DRIVER_NAME, iface_desc->desc.bNumEndpoints);
+ goto err_firmware_needs_update;
+ }
+
+ /* check interface endpoints */
+ in_ep_addr = 0;
+ out_ep_addr = 0;
+ in_ep_size = 0;
+ out_ep_size = 0;
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
+ ep = &iface_desc->endpoint[i].desc;
+
+ if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != 0) &&
+ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK)) {
+ /* In Endpoint */
+ in_ep_addr = ep->bEndpointAddress;
+ in_ep_addr &= USB_ENDPOINT_NUMBER_MASK;
+ in_ep_size = le16_to_cpu(ep->wMaxPacketSize);
+ } else if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ==
+ 0) &&
+ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK)) {
+ /* Out Endpoint */
+ out_ep_addr = ep->bEndpointAddress;
+ out_ep_addr &= USB_ENDPOINT_NUMBER_MASK;
+ out_ep_size = le16_to_cpu(ep->wMaxPacketSize);
+ }
+ }
+
+ /* check if interface is sane */
+ if (!in_ep_addr || !out_ep_addr) {
+ dev_err(&udev->dev, "%s: invalid endpoint configuration\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+ if (in_ep_size < sizeof(struct ucan_message_in)) {
+ dev_err(&udev->dev, "%s: invalid in_ep MaxPacketSize\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+ if (out_ep_size < sizeof(struct ucan_message_out)) {
+ dev_err(&udev->dev, "%s: invalid out_ep MaxPacketSize\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+
+ /* Stage 2 - Device Identification
+ * -------------------------------
+ *
+ * The device interface seems to be a ucan device. Do further
+ * compatibility checks. On error probing is aborted, on
+ * success this stage leaves the ctl_msg_buffer with the
+ * reported contents of a GET_INFO command (supported
+ * bittimings, tx_fifo depth). This information is used in
+ * Stage 3 for the final driver initialisation.
+ */
+
+ /* Prepare Memory for control transferes */
+ ctl_msg_buffer = devm_kzalloc(&udev->dev,
+ sizeof(union ucan_ctl_payload),
+ GFP_KERNEL);
+ if (!ctl_msg_buffer) {
+ dev_err(&udev->dev,
+ "%s: failed to allocate control pipe memory\n",
+ UCAN_DRIVER_NAME);
+ return -ENOMEM;
+ }
+
+ /* get protocol version
+ *
+ * note: ucan_ctrl_command_* wrappers cannot be used yet
+ * because `up` is initialised in Stage 3
+ */
+ ret = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ UCAN_COMMAND_GET,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ UCAN_COMMAND_GET_PROTOCOL_VERSION,
+ iface_desc->desc.bInterfaceNumber,
+ ctl_msg_buffer,
+ sizeof(union ucan_ctl_payload),
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+
+ /* older firmware version do not support this command - those
+ * are not supported by this drive
+ */
+ if (ret != 4) {
+ dev_err(&udev->dev,
+ "%s: could not read protocol version, ret=%d\n",
+ UCAN_DRIVER_NAME, ret);
+ if (ret >= 0)
+ ret = -EINVAL;
+ goto err_firmware_needs_update;
+ }
+
+ /* this driver currently supports protocol version 3 only */
+ protocol_version =
+ le32_to_cpu(ctl_msg_buffer->cmd_get_protocol_version.version);
+ if (protocol_version < UCAN_PROTOCOL_VERSION_MIN ||
+ protocol_version > UCAN_PROTOCOL_VERSION_MAX) {
+ dev_err(&udev->dev,
+ "%s: device protocol version %d is not supported\n",
+ UCAN_DRIVER_NAME, protocol_version);
+ goto err_firmware_needs_update;
+ }
+
+ /* request the device information and store it in ctl_msg_buffer
+ *
+ * note: ucan_ctrl_command_* wrappers connot be used yet
+ * because `up` is initialised in Stage 3
+ */
+ ret = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ UCAN_COMMAND_GET,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ UCAN_COMMAND_GET_INFO,
+ iface_desc->desc.bInterfaceNumber,
+ ctl_msg_buffer,
+ sizeof(ctl_msg_buffer->cmd_get_device_info),
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+
+ if (ret < 0) {
+ dev_err(&udev->dev, "%s: failed to retrieve device info\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+ if (ret < sizeof(ctl_msg_buffer->cmd_get_device_info)) {
+ dev_err(&udev->dev, "%s: device reported invalid device info\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+ if (ctl_msg_buffer->cmd_get_device_info.tx_fifo == 0) {
+ dev_err(&udev->dev,
+ "%s: device reported invalid tx-fifo size\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+
+ /* Stage 3 - Driver Initialisation
+ * -------------------------------
+ *
+ * Register device to Linux, prepare private structures and
+ * reset the device.
+ */
+
+ /* allocate driver resources */
+ netdev = alloc_candev(sizeof(struct ucan_priv),
+ ctl_msg_buffer->cmd_get_device_info.tx_fifo);
+ if (!netdev) {
+ dev_err(&udev->dev,
+ "%s: cannot allocate candev\n", UCAN_DRIVER_NAME);
+ return -ENOMEM;
+ }
+
+ up = netdev_priv(netdev);
+
+ /* initialze data */
+ up->udev = udev;
+ up->intf = intf;
+ up->netdev = netdev;
+ up->intf_index = iface_desc->desc.bInterfaceNumber;
+ up->in_ep_addr = in_ep_addr;
+ up->out_ep_addr = out_ep_addr;
+ up->in_ep_size = in_ep_size;
+ up->ctl_msg_buffer = ctl_msg_buffer;
+ up->context_array = NULL;
+ up->available_tx_urbs = 0;
+
+ up->can.state = CAN_STATE_STOPPED;
+ up->can.bittiming_const = &up->device_info.bittiming_const;
+ up->can.do_set_bittiming = ucan_set_bittiming;
+ up->can.do_set_mode = &ucan_set_mode;
+ spin_lock_init(&up->context_lock);
+ spin_lock_init(&up->echo_skb_lock);
+ netdev->netdev_ops = &ucan_netdev_ops;
+
+ usb_set_intfdata(intf, up);
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ /* parse device information
+ * the data retrieved in Stage 2 is still available in
+ * up->ctl_msg_buffer
+ */
+ ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
+
+ /* just print some device information - if available */
+ ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
+ sizeof(union ucan_ctl_payload));
+ if (ret > 0) {
+ /* copy string while ensuring zero terminiation */
+ strncpy(firmware_str, up->ctl_msg_buffer->raw,
+ sizeof(union ucan_ctl_payload));
+ firmware_str[sizeof(union ucan_ctl_payload)] = '\0';
+ } else {
+ strcpy(firmware_str, "unknown");
+ }
+
+ /* device is compatible, reset it */
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
+ if (ret < 0)
+ goto err_free_candev;
+
+ init_usb_anchor(&up->rx_urbs);
+ init_usb_anchor(&up->tx_urbs);
+
+ up->can.state = CAN_STATE_STOPPED;
+
+ /* register the device */
+ ret = register_candev(netdev);
+ if (ret)
+ goto err_free_candev;
+
+ /* initialisation complete, log device info */
+ netdev_info(up->netdev, "registered device\n");
+ netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
+
+ /* success */
+ return 0;
+
+err_free_candev:
+ free_candev(netdev);
+ return ret;
+
+err_firmware_needs_update:
+ dev_err(&udev->dev,
+ "%s: probe failed; try to update the device firmware\n",
+ UCAN_DRIVER_NAME);
+ return -ENODEV;
+}
+
+/* disconnect the device */
+static void ucan_disconnect(struct usb_interface *intf)
+{
+ struct usb_device *udev;
+ struct ucan_priv *up = usb_get_intfdata(intf);
+
+ udev = interface_to_usbdev(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (up) {
+ unregister_netdev(up->netdev);
+ free_candev(up->netdev);
+ }
+}
+
+static struct usb_device_id ucan_table[] = {
+ /* Mule (soldered onto compute modules) */
+ {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425a, 0)},
+ /* Seal (standalone USB stick) */
+ {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425b, 0)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, ucan_table);
+/* driver callbacks */
+static struct usb_driver ucan_driver = {
+ .name = UCAN_DRIVER_NAME,
+ .probe = ucan_probe,
+ .disconnect = ucan_disconnect,
+ .id_table = ucan_table,
+};
+
+module_usb_driver(ucan_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Martin Elshuber <martin.elshuber@theobroma-systems.com>");
+MODULE_AUTHOR("Jakob Unterwurzacher <jakob.unterwurzacher@theobroma-systems.com>");
+MODULE_DESCRIPTION("Driver for Theobroma Systems UCAN devices");
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 89aec07c225f..045f0845e665 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -2,6 +2,7 @@
*
* Copyright (C) 2012 - 2014 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved.
+ * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
*
* Description:
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
@@ -25,8 +26,10 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/can/dev.h>
@@ -48,16 +51,34 @@ enum xcan_reg {
XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
- XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */
- XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */
- XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */
- XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */
- XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */
- XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */
- XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */
- XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */
+
+ /* not on CAN FD cores */
+ XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */
+ XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */
+ XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
+
+ /* only on CAN FD cores */
+ XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
+ XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
+ XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
+ XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
+ XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
};
+#define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
+#define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
+#define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
+#define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
+
+#define XCAN_CANFD_FRAME_SIZE 0x48
+#define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
+ XCAN_CANFD_FRAME_SIZE * (n))
+#define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
+ XCAN_CANFD_FRAME_SIZE * (n))
+
+/* the single TX mailbox used by this driver on CAN FD HW */
+#define XCAN_TX_MAILBOX_IDX 0
+
/* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
#define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
#define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
@@ -67,6 +88,9 @@ enum xcan_reg {
#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
+#define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */
+#define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */
+#define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */
#define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
#define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
#define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
@@ -80,6 +104,7 @@ enum xcan_reg {
#define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
#define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
#define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
+#define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */
#define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
#define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
#define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
@@ -97,15 +122,15 @@ enum xcan_reg {
#define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
-
-#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
- XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
- XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
- XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
+#define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
+#define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
+#define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
+#define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
+#define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */
#define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
#define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
#define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
@@ -115,9 +140,31 @@ enum xcan_reg {
#define XCAN_FRAME_MAX_DATA_LEN 8
#define XCAN_TIMEOUT (1 * HZ)
+/* TX-FIFO-empty interrupt available */
+#define XCAN_FLAG_TXFEMP 0x0001
+/* RX Match Not Finished interrupt available */
+#define XCAN_FLAG_RXMNF 0x0002
+/* Extended acceptance filters with control at 0xE0 */
+#define XCAN_FLAG_EXT_FILTERS 0x0004
+/* TX mailboxes instead of TX FIFO */
+#define XCAN_FLAG_TX_MAILBOXES 0x0008
+/* RX FIFO with each buffer in separate registers at 0x1100
+ * instead of the regular FIFO at 0x50
+ */
+#define XCAN_FLAG_RX_FIFO_MULTI 0x0010
+
+struct xcan_devtype_data {
+ unsigned int flags;
+ const struct can_bittiming_const *bittiming_const;
+ const char *bus_clk_name;
+ unsigned int btr_ts2_shift;
+ unsigned int btr_sjw_shift;
+};
+
/**
* struct xcan_priv - This definition define CAN driver instance
* @can: CAN private data structure.
+ * @tx_lock: Lock for synchronizing TX interrupt handling
* @tx_head: Tx CAN packets ready to send on the queue
* @tx_tail: Tx CAN packets successfully sended on the queue
* @tx_max: Maximum number packets the driver can send
@@ -129,9 +176,11 @@ enum xcan_reg {
* @irq_flags: For request_irq()
* @bus_clk: Pointer to struct clk
* @can_clk: Pointer to struct clk
+ * @devtype: Device type specific constants
*/
struct xcan_priv {
struct can_priv can;
+ spinlock_t tx_lock;
unsigned int tx_head;
unsigned int tx_tail;
unsigned int tx_max;
@@ -144,6 +193,7 @@ struct xcan_priv {
unsigned long irq_flags;
struct clk *bus_clk;
struct clk *can_clk;
+ struct xcan_devtype_data devtype;
};
/* CAN Bittiming constants as per Xilinx CAN specs */
@@ -159,6 +209,18 @@ static const struct can_bittiming_const xcan_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const xcan_bittiming_const_canfd = {
+ .name = DRIVER_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 64,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -214,6 +276,23 @@ static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
}
/**
+ * xcan_rx_int_mask - Get the mask for the receive interrupt
+ * @priv: Driver private data structure
+ *
+ * Return: The receive interrupt mask used by the driver on this HW
+ */
+static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
+{
+ /* RXNEMP is better suited for our use case as it cannot be cleared
+ * while the FIFO is non-empty, but CAN FD HW does not have it
+ */
+ if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
+ return XCAN_IXR_RXOK_MASK;
+ else
+ return XCAN_IXR_RXNEMP_MASK;
+}
+
+/**
* set_reset_mode - Resets the CAN device mode
* @ndev: Pointer to net_device structure
*
@@ -238,6 +317,10 @@ static int set_reset_mode(struct net_device *ndev)
usleep_range(500, 10000);
}
+ /* reset clears FIFOs */
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+
return 0;
}
@@ -273,10 +356,10 @@ static int xcan_set_bittiming(struct net_device *ndev)
btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
/* Setting Time Segment 2 in BTR Register */
- btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT;
+ btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
/* Setting Synchronous jump width in BTR Register */
- btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT;
+ btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
@@ -304,6 +387,7 @@ static int xcan_chip_start(struct net_device *ndev)
u32 reg_msr, reg_sr_mask;
int err;
unsigned long timeout;
+ u32 ier;
/* Check if it is in reset mode */
err = set_reset_mode(ndev);
@@ -315,7 +399,15 @@ static int xcan_chip_start(struct net_device *ndev)
return err;
/* Enable interrupts */
- priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL);
+ ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
+ XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
+ XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
+
+ if (priv->devtype.flags & XCAN_FLAG_RXMNF)
+ ier |= XCAN_IXR_RXMNF_MASK;
+
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
/* Check whether it is loopback mode or normal mode */
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
@@ -326,6 +418,12 @@ static int xcan_chip_start(struct net_device *ndev)
reg_sr_mask = XCAN_SR_NORMAL_MASK;
}
+ /* enable the first extended filter, if any, as cores with extended
+ * filtering default to non-receipt if all filters are disabled
+ */
+ if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
+ priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
+
priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
@@ -376,33 +474,15 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
/**
- * xcan_start_xmit - Starts the transmission
- * @skb: sk_buff pointer that contains data to be Txed
- * @ndev: Pointer to net_device structure
- *
- * This function is invoked from upper layers to initiate transmission. This
- * function uses the next available free txbuff and populates their fields to
- * start the transmission.
- *
- * Return: 0 on success and failure value on error
+ * xcan_write_frame - Write a frame to HW
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @frame_offset: Register offset to write the frame to
*/
-static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
+ int frame_offset)
{
- struct xcan_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
- struct can_frame *cf = (struct can_frame *)skb->data;
u32 id, dlc, data[2] = {0, 0};
-
- if (can_dropped_invalid_skb(ndev, skb))
- return NETDEV_TX_OK;
-
- /* Check if the TX buffer is full */
- if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
- XCAN_SR_TXFLL_MASK)) {
- netif_stop_queue(ndev);
- netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
- return NETDEV_TX_BUSY;
- }
+ struct can_frame *cf = (struct can_frame *)skb->data;
/* Watch carefully on the bit sequence */
if (cf->can_id & CAN_EFF_FLAG) {
@@ -438,26 +518,119 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (cf->can_dlc > 4)
data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
- priv->tx_head++;
-
- /* Write the Frame to Xilinx CAN TX FIFO */
- priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id);
- /* If the CAN frame is RTR frame this write triggers tranmission */
- priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
+ priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
+ /* If the CAN frame is RTR frame this write triggers transmission
+ * (not on CAN FD)
+ */
+ priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
if (!(cf->can_id & CAN_RTR_FLAG)) {
- priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]);
+ priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset),
+ data[0]);
/* If the CAN frame is Standard/Extended frame this
- * write triggers tranmission
+ * write triggers transmission (not on CAN FD)
*/
- priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]);
- stats->tx_bytes += cf->can_dlc;
+ priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset),
+ data[1]);
}
+}
+
+/**
+ * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
+ *
+ * Return: 0 on success, -ENOSPC if FIFO is full.
+ */
+static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+
+ /* Check if the TX buffer is full */
+ if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
+ XCAN_SR_TXFLL_MASK))
+ return -ENOSPC;
+
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ priv->tx_head++;
+
+ xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
+
+ /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
+ if (priv->tx_max > 1)
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
/* Check if the TX buffer is full */
if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ return 0;
+}
+
+/**
+ * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
+ *
+ * Return: 0 on success, -ENOSPC if there is no space
+ */
+static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+
+ if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
+ BIT(XCAN_TX_MAILBOX_IDX)))
+ return -ENOSPC;
+
+ can_put_echo_skb(skb, ndev, 0);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ priv->tx_head++;
+
+ xcan_write_frame(priv, skb,
+ XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
+
+ /* Mark buffer as ready for transmit */
+ priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
+
+ netif_stop_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ return 0;
+}
+
+/**
+ * xcan_start_xmit - Starts the transmission
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from upper layers to initiate transmission.
+ *
+ * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
+ */
+static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
+ ret = xcan_start_xmit_mailbox(skb, ndev);
+ else
+ ret = xcan_start_xmit_fifo(skb, ndev);
+
+ if (ret < 0) {
+ netdev_err(ndev, "BUG!, TX full when queue awake!\n");
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
return NETDEV_TX_OK;
}
@@ -465,13 +638,14 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* xcan_rx - Is called from CAN isr to complete the received
* frame processing
* @ndev: Pointer to net_device structure
+ * @frame_base: Register offset to the frame to be read
*
* This function is invoked from the CAN isr(poll) to process the Rx frames. It
* does minimal processing and invokes "netif_receive_skb" to complete further
* processing.
* Return: 1 on success and 0 on failure.
*/
-static int xcan_rx(struct net_device *ndev)
+static int xcan_rx(struct net_device *ndev, int frame_base)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
@@ -486,9 +660,9 @@ static int xcan_rx(struct net_device *ndev)
}
/* Read a frame from Xilinx zynq CANPS */
- id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET);
- dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >>
- XCAN_DLCR_DLC_SHIFT;
+ id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
+ dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
+ XCAN_DLCR_DLC_SHIFT;
/* Change Xilinx CAN data length format to socketCAN data format */
cf->can_dlc = get_can_dlc(dlc);
@@ -511,8 +685,8 @@ static int xcan_rx(struct net_device *ndev)
}
/* DW1/DW2 must always be read to remove message from RXFIFO */
- data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
- data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
+ data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
+ data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
if (!(cf->can_id & CAN_RTR_FLAG)) {
/* Change Xilinx CAN data format to socketCAN data format */
@@ -530,6 +704,103 @@ static int xcan_rx(struct net_device *ndev)
}
/**
+ * xcan_current_error_state - Get current error state from HW
+ * @ndev: Pointer to net_device structure
+ *
+ * Checks the current CAN error state from the HW. Note that this
+ * only checks for ERROR_PASSIVE and ERROR_WARNING.
+ *
+ * Return:
+ * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
+ * otherwise.
+ */
+static enum can_state xcan_current_error_state(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
+
+ if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
+ return CAN_STATE_ERROR_PASSIVE;
+ else if (status & XCAN_SR_ERRWRN_MASK)
+ return CAN_STATE_ERROR_WARNING;
+ else
+ return CAN_STATE_ERROR_ACTIVE;
+}
+
+/**
+ * xcan_set_error_state - Set new CAN error state
+ * @ndev: Pointer to net_device structure
+ * @new_state: The new CAN state to be set
+ * @cf: Error frame to be populated or NULL
+ *
+ * Set new CAN error state for the device, updating statistics and
+ * populating the error frame if given.
+ */
+static void xcan_set_error_state(struct net_device *ndev,
+ enum can_state new_state,
+ struct can_frame *cf)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
+ u32 txerr = ecr & XCAN_ECR_TEC_MASK;
+ u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
+ enum can_state tx_state = txerr >= rxerr ? new_state : 0;
+ enum can_state rx_state = txerr <= rxerr ? new_state : 0;
+
+ /* non-ERROR states are handled elsewhere */
+ if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
+ return;
+
+ can_change_state(ndev, cf, tx_state, rx_state);
+
+ if (cf) {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+}
+
+/**
+ * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
+ * @ndev: Pointer to net_device structure
+ *
+ * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
+ * the performed RX/TX has caused it to drop to a lesser state and set
+ * the interface state accordingly.
+ */
+static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ enum can_state old_state = priv->can.state;
+ enum can_state new_state;
+
+ /* changing error state due to successful frame RX/TX can only
+ * occur from these states
+ */
+ if (old_state != CAN_STATE_ERROR_WARNING &&
+ old_state != CAN_STATE_ERROR_PASSIVE)
+ return;
+
+ new_state = xcan_current_error_state(ndev);
+
+ if (new_state != old_state) {
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
+
+ if (skb) {
+ struct net_device_stats *stats = &ndev->stats;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+ }
+}
+
+/**
* xcan_err_interrupt - error frame Isr
* @ndev: net_device pointer
* @isr: interrupt status register value
@@ -544,16 +815,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
- u32 err_status, status, txerr = 0, rxerr = 0;
+ u32 err_status;
skb = alloc_can_err_skb(ndev, &cf);
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
- txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
- rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
- XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
- status = priv->read_reg(priv, XCAN_SR_OFFSET);
if (isr & XCAN_IXR_BSOFF_MASK) {
priv->can.state = CAN_STATE_BUS_OFF;
@@ -563,28 +830,11 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
- } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- priv->can.can_stats.error_passive++;
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (rxerr > 127) ?
- CAN_ERR_CRTL_RX_PASSIVE :
- CAN_ERR_CRTL_TX_PASSIVE;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
- } else if (status & XCAN_SR_ERRWRN_MASK) {
- priv->can.state = CAN_STATE_ERROR_WARNING;
- priv->can.can_stats.error_warning++;
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
+ } else {
+ enum can_state new_state = xcan_current_error_state(ndev);
+
+ if (new_state != priv->can.state)
+ xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
}
/* Check for Arbitration lost interrupt */
@@ -600,13 +850,23 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
if (isr & XCAN_IXR_RXOFLW_MASK) {
stats->rx_over_errors++;
stats->rx_errors++;
- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
}
}
+ /* Check for RX Match Not Finished interrupt */
+ if (isr & XCAN_IXR_RXMNF_MASK) {
+ stats->rx_dropped++;
+ stats->rx_errors++;
+ netdev_err(ndev, "RX match not finished, frame discarded\n");
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_UNSPEC;
+ }
+ }
+
/* Check for error interrupt */
if (isr & XCAN_IXR_ERROR_MASK) {
if (skb)
@@ -691,6 +951,44 @@ static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
}
/**
+ * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
+ *
+ * Return: Register offset of the next frame in RX FIFO.
+ */
+static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
+{
+ int offset;
+
+ if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
+ u32 fsr;
+
+ /* clear RXOK before the is-empty check so that any newly
+ * received frame will reassert it without a race
+ */
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
+
+ fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
+
+ /* check if RX FIFO is empty */
+ if (!(fsr & XCAN_FSR_FL_MASK))
+ return -ENOENT;
+
+ offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
+
+ } else {
+ /* check if RX FIFO is empty */
+ if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
+ XCAN_IXR_RXNEMP_MASK))
+ return -ENOENT;
+
+ /* frames are read from a static offset */
+ offset = XCAN_RXFIFO_OFFSET;
+ }
+
+ return offset;
+}
+
+/**
* xcan_rx_poll - Poll routine for rx packets (NAPI)
* @napi: napi structure pointer
* @quota: Max number of rx packets to be processed.
@@ -704,31 +1002,35 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
{
struct net_device *ndev = napi->dev;
struct xcan_priv *priv = netdev_priv(ndev);
- u32 isr, ier;
+ u32 ier;
int work_done = 0;
-
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
- while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
- if (isr & XCAN_IXR_RXOK_MASK) {
- priv->write_reg(priv, XCAN_ICR_OFFSET,
- XCAN_IXR_RXOK_MASK);
- work_done += xcan_rx(ndev);
- } else {
+ int frame_offset;
+
+ while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
+ (work_done < quota)) {
+ work_done += xcan_rx(ndev, frame_offset);
+
+ if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
+ /* increment read index */
+ priv->write_reg(priv, XCAN_FSR_OFFSET,
+ XCAN_FSR_IRI_MASK);
+ else
+ /* clear rx-not-empty (will actually clear only if
+ * empty)
+ */
priv->write_reg(priv, XCAN_ICR_OFFSET,
- XCAN_IXR_RXNEMP_MASK);
- break;
- }
- priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ XCAN_IXR_RXNEMP_MASK);
}
- if (work_done)
+ if (work_done) {
can_led_event(ndev, CAN_LED_EVENT_RX);
+ xcan_update_error_state_after_rxtx(ndev);
+ }
if (work_done < quota) {
napi_complete_done(napi, work_done);
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+ ier |= xcan_rx_int_mask(priv);
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
}
return work_done;
@@ -743,18 +1045,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
+ unsigned int frames_in_fifo;
+ int frames_sent = 1; /* TXOK => at least 1 frame was sent */
+ unsigned long flags;
+ int retries = 0;
+
+ /* Synchronize with xmit as we need to know the exact number
+ * of frames in the FIFO to stay in sync due to the TXFEMP
+ * handling.
+ * This also prevents a race between netif_wake_queue() and
+ * netif_stop_queue().
+ */
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ frames_in_fifo = priv->tx_head - priv->tx_tail;
- while ((priv->tx_head - priv->tx_tail > 0) &&
- (isr & XCAN_IXR_TXOK_MASK)) {
+ if (WARN_ON_ONCE(frames_in_fifo == 0)) {
+ /* clear TXOK anyway to avoid getting back here */
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
- can_get_echo_skb(ndev, priv->tx_tail %
- priv->tx_max);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return;
+ }
+
+ /* Check if 2 frames were sent (TXOK only means that at least 1
+ * frame was sent).
+ */
+ if (frames_in_fifo > 1) {
+ WARN_ON(frames_in_fifo > priv->tx_max);
+
+ /* Synchronize TXOK and isr so that after the loop:
+ * (1) isr variable is up-to-date at least up to TXOK clear
+ * time. This avoids us clearing a TXOK of a second frame
+ * but not noticing that the FIFO is now empty and thus
+ * marking only a single frame as sent.
+ * (2) No TXOK is left. Having one could mean leaving a
+ * stray TXOK as we might process the associated frame
+ * via TXFEMP handling as we read TXFEMP *after* TXOK
+ * clear to satisfy (1).
+ */
+ while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ }
+
+ if (isr & XCAN_IXR_TXFEMP_MASK) {
+ /* nothing in FIFO anymore */
+ frames_sent = frames_in_fifo;
+ }
+ } else {
+ /* single frame in fifo, just clear TXOK */
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ }
+
+ while (frames_sent--) {
+ stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
+ priv->tx_max);
priv->tx_tail++;
stats->tx_packets++;
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
- can_led_event(ndev, CAN_LED_EVENT_TX);
+
netif_wake_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+ xcan_update_error_state_after_rxtx(ndev);
}
/**
@@ -773,6 +1128,8 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
struct net_device *ndev = (struct net_device *)dev_id;
struct xcan_priv *priv = netdev_priv(ndev);
u32 isr, ier;
+ u32 isr_errors;
+ u32 rx_int_mask = xcan_rx_int_mask(priv);
/* Get the interrupt status from Xilinx CAN */
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -791,18 +1148,18 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
xcan_tx_interrupt(ndev, isr);
/* Check for the type of error interrupt and Processing it */
- if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
- XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
- priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
- XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
- XCAN_IXR_ARBLST_MASK));
+ isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
+ XCAN_IXR_RXMNF_MASK);
+ if (isr_errors) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
xcan_err_interrupt(ndev, isr);
}
/* Check for the type of receive interrupt and Processing it */
- if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
+ if (isr & rx_int_mask) {
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
+ ier &= ~rx_int_mask;
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
napi_schedule(&priv->napi);
}
@@ -819,13 +1176,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
static void xcan_chip_stop(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
- u32 ier;
/* Disable interrupts and leave the can in configuration mode */
- ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier &= ~XCAN_INTR_ALL;
- priv->write_reg(priv, XCAN_IER_OFFSET, ier);
- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+ set_reset_mode(ndev);
priv->can.state = CAN_STATE_STOPPED;
}
@@ -958,10 +1311,15 @@ static const struct net_device_ops xcan_netdev_ops = {
*/
static int __maybe_unused xcan_suspend(struct device *dev)
{
- if (!device_may_wakeup(dev))
- return pm_runtime_force_suspend(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
- return 0;
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ xcan_chip_stop(ndev);
+ }
+
+ return pm_runtime_force_suspend(dev);
}
/**
@@ -973,11 +1331,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
*/
static int __maybe_unused xcan_resume(struct device *dev)
{
- if (!device_may_wakeup(dev))
- return pm_runtime_force_resume(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
+ int ret;
- return 0;
+ ret = pm_runtime_force_resume(dev);
+ if (ret) {
+ dev_err(dev, "pm_runtime_force_resume failed on resume\n");
+ return ret;
+ }
+
+ if (netif_running(ndev)) {
+ ret = xcan_chip_start(ndev);
+ if (ret) {
+ dev_err(dev, "xcan_chip_start failed on resume\n");
+ return ret;
+ }
+
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+ return 0;
}
/**
@@ -992,14 +1366,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct xcan_priv *priv = netdev_priv(ndev);
- if (netif_running(ndev)) {
- netif_stop_queue(ndev);
- netif_device_detach(ndev);
- }
-
- priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
- priv->can.state = CAN_STATE_SLEEPING;
-
clk_disable_unprepare(priv->bus_clk);
clk_disable_unprepare(priv->can_clk);
@@ -1018,7 +1384,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
- u32 isr, status;
ret = clk_prepare_enable(priv->bus_clk);
if (ret) {
@@ -1032,27 +1397,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
return ret;
}
- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
- status = priv->read_reg(priv, XCAN_SR_OFFSET);
-
- if (netif_running(ndev)) {
- if (isr & XCAN_IXR_BSOFF_MASK) {
- priv->can.state = CAN_STATE_BUS_OFF;
- priv->write_reg(priv, XCAN_SRR_OFFSET,
- XCAN_SRR_RESET_MASK);
- } else if ((status & XCAN_SR_ESTAT_MASK) ==
- XCAN_SR_ESTAT_MASK) {
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- } else if (status & XCAN_SR_ERRWRN_MASK) {
- priv->can.state = CAN_STATE_ERROR_WARNING;
- } else {
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
- }
- netif_device_attach(ndev);
- netif_start_queue(ndev);
- }
-
return 0;
}
@@ -1061,6 +1405,40 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
};
+static const struct xcan_devtype_data xcan_zynq_data = {
+ .bittiming_const = &xcan_bittiming_const,
+ .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
+ .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
+ .bus_clk_name = "pclk",
+};
+
+static const struct xcan_devtype_data xcan_axi_data = {
+ .bittiming_const = &xcan_bittiming_const,
+ .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
+ .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
+ .bus_clk_name = "s_axi_aclk",
+};
+
+static const struct xcan_devtype_data xcan_canfd_data = {
+ .flags = XCAN_FLAG_EXT_FILTERS |
+ XCAN_FLAG_RXMNF |
+ XCAN_FLAG_TX_MAILBOXES |
+ XCAN_FLAG_RX_FIFO_MULTI,
+ .bittiming_const = &xcan_bittiming_const,
+ .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
+ .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
+ .bus_clk_name = "s_axi_aclk",
+};
+
+/* Match table for OF platform binding */
+static const struct of_device_id xcan_of_match[] = {
+ { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
+ { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
+ { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xcan_of_match);
+
/**
* xcan_probe - Platform registration call
* @pdev: Handle to the platform device structure
@@ -1075,8 +1453,13 @@ static int xcan_probe(struct platform_device *pdev)
struct resource *res; /* IO mem resources */
struct net_device *ndev;
struct xcan_priv *priv;
+ const struct of_device_id *of_id;
+ const struct xcan_devtype_data *devtype = &xcan_axi_data;
void __iomem *addr;
- int ret, rx_max, tx_max;
+ int ret;
+ int rx_max, tx_max;
+ int hw_tx_max, hw_rx_max;
+ const char *hw_tx_max_property;
/* Get the virtual base address for the device */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1086,13 +1469,54 @@ static int xcan_probe(struct platform_device *pdev)
goto err;
}
- ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
- if (ret < 0)
+ of_id = of_match_device(xcan_of_match, &pdev->dev);
+ if (of_id && of_id->data)
+ devtype = of_id->data;
+
+ hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
+ "tx-mailbox-count" : "tx-fifo-depth";
+
+ ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
+ &hw_tx_max);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "missing %s property\n",
+ hw_tx_max_property);
goto err;
+ }
- ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max);
- if (ret < 0)
+ ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
+ &hw_rx_max);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "missing rx-fifo-depth property (mailbox mode is not supported)\n");
goto err;
+ }
+
+ /* With TX FIFO:
+ *
+ * There is no way to directly figure out how many frames have been
+ * sent when the TXOK interrupt is processed. If TXFEMP
+ * is supported, we can have 2 frames in the FIFO and use TXFEMP
+ * to determine if 1 or 2 frames have been sent.
+ * Theoretically we should be able to use TXFWMEMP to determine up
+ * to 3 frames, but it seems that after putting a second frame in the
+ * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
+ * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
+ * sent), which is not a sensible state - possibly TXFWMEMP is not
+ * completely synchronized with the rest of the bits?
+ *
+ * With TX mailboxes:
+ *
+ * HW sends frames in CAN ID priority order. To preserve FIFO ordering
+ * we submit frames one at a time.
+ */
+ if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
+ (devtype->flags & XCAN_FLAG_TXFEMP))
+ tx_max = min(hw_tx_max, 2);
+ else
+ tx_max = 1;
+
+ rx_max = hw_rx_max;
/* Create a CAN device instance */
ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
@@ -1101,13 +1525,15 @@ static int xcan_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->dev = &pdev->dev;
- priv->can.bittiming_const = &xcan_bittiming_const;
+ priv->can.bittiming_const = devtype->bittiming_const;
priv->can.do_set_mode = xcan_do_set_mode;
priv->can.do_get_berr_counter = xcan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
priv->reg_base = addr;
priv->tx_max = tx_max;
+ priv->devtype = *devtype;
+ spin_lock_init(&priv->tx_lock);
/* Get IRQ for the device */
ndev->irq = platform_get_irq(pdev, 0);
@@ -1124,22 +1550,12 @@ static int xcan_probe(struct platform_device *pdev)
ret = PTR_ERR(priv->can_clk);
goto err_free;
}
- /* Check for type of CAN device */
- if (of_device_is_compatible(pdev->dev.of_node,
- "xlnx,zynq-can-1.0")) {
- priv->bus_clk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(priv->bus_clk)) {
- dev_err(&pdev->dev, "bus clock not found\n");
- ret = PTR_ERR(priv->bus_clk);
- goto err_free;
- }
- } else {
- priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
- if (IS_ERR(priv->bus_clk)) {
- dev_err(&pdev->dev, "bus clock not found\n");
- ret = PTR_ERR(priv->bus_clk);
- goto err_free;
- }
+
+ priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
+ if (IS_ERR(priv->bus_clk)) {
+ dev_err(&pdev->dev, "bus clock not found\n");
+ ret = PTR_ERR(priv->bus_clk);
+ goto err_free;
}
priv->write_reg = xcan_write_reg_le;
@@ -1172,9 +1588,9 @@ static int xcan_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
- netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
- priv->reg_base, ndev->irq, priv->can.clock.freq,
- priv->tx_max);
+ netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
+ priv->reg_base, ndev->irq, priv->can.clock.freq,
+ hw_tx_max, priv->tx_max);
return 0;
@@ -1208,14 +1624,6 @@ static int xcan_remove(struct platform_device *pdev)
return 0;
}
-/* Match table for OF platform binding */
-static const struct of_device_id xcan_of_match[] = {
- { .compatible = "xlnx,zynq-can-1.0", },
- { .compatible = "xlnx,axi-can-1.00.a", },
- { /* end of list */ },
-};
-MODULE_DEVICE_TABLE(of, xcan_of_match);
-
static struct platform_driver xcan_driver = {
.probe = xcan_probe,
.remove = xcan_remove,
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 2b81b97e994f..d3ce1e4cb4d3 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -5,7 +5,7 @@ source "drivers/net/dsa/b53/Kconfig"
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
- depends on HAS_IOMEM && NET_DSA && OF_MDIO
+ depends on HAS_IOMEM && NET_DSA
select NET_DSA_TAG_BRCM
select FIXED_PHY
select BCM7XXX_PHY
@@ -52,6 +52,17 @@ config NET_DSA_QCA8K
This enables support for the Qualcomm Atheros QCA8K Ethernet
switch chips.
+config NET_DSA_REALTEK_SMI
+ tristate "Realtek SMI Ethernet switch family support"
+ depends on NET_DSA
+ select FIXED_PHY
+ select IRQ_DOMAIN
+ select REALTEK_PHY
+ select REGMAP
+ ---help---
+ This enables support for the Realtek SMI-based switch
+ chips, currently only RTL8366RB.
+
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
@@ -76,4 +87,15 @@ config NET_DSA_SMSC_LAN9303_MDIO
Enable access functions if the SMSC/Microchip LAN9303 is configured
for MDIO managed mode.
+config NET_DSA_VITESSE_VSC73XX
+ tristate "Vitesse VSC7385/7388/7395/7398 support"
+ depends on OF && SPI
+ depends on NET_DSA
+ select FIXED_PHY
+ select VITESSE_PHY
+ select GPIOLIB
+ ---help---
+ This enables support for the Vitesse VSC7385, VSC7388,
+ VSC7395 and VSC7398 SparX integrated ethernet switches.
+
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 15c2a831edf1..46c1cba91ffe 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -8,9 +8,12 @@ endif
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek.o
+realtek-objs := realtek-smi.o rtl8366.o rtl8366rb.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
+obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx.o
obj-y += b53/
obj-y += microchip/
obj-y += mv88e6xxx/
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 8247481eaa06..91de2ba99ad1 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -374,6 +374,7 @@ static const struct of_device_id b53_srab_of_match[] = {
{ .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,cygnus-srab", .data = (void *)BCM583XX_DEVICE_ID },
{ .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,omega-srab", .data = (void *)BCM583XX_DEVICE_ID },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, b53_srab_of_match);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 02e8982519ce..e0066adcd2f3 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -166,6 +166,11 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+ /* Enable learning */
+ reg = core_readl(priv, CORE_DIS_LEARN);
+ reg &= ~BIT(port);
+ core_writel(priv, reg, CORE_DIS_LEARN);
+
/* Enable Broadcom tags for that port if requested */
if (priv->brcm_tag_mask & BIT(port))
b53_brcm_hdr_setup(ds, port);
@@ -220,10 +225,15 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- u32 off, reg;
+ u32 reg;
- if (priv->wol_ports_mask & (1 << port))
+ /* Disable learning while in WoL mode */
+ if (priv->wol_ports_mask & (1 << port)) {
+ reg = core_readl(priv, CORE_DIS_LEARN);
+ reg |= BIT(port);
+ core_writel(priv, reg, CORE_DIS_LEARN);
return;
+ }
if (port == priv->moca_port)
bcm_sf2_port_intr_disable(priv, port);
@@ -231,11 +241,6 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, false);
- if (dsa_is_cpu_port(ds, port))
- off = CORE_IMP_CTL;
- else
- off = CORE_G_PCTL_PORT(port);
-
b53_disable_port(ds, port, phy);
/* Power down the port memory */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index b89acaee12d4..47c5f272a084 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -732,6 +732,8 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ s8 cpu_port = ds->ports[port].cpu_dp->index;
+ __u64 ring_cookie = fs->ring_cookie;
unsigned int queue_num, port_num;
int ret = -EINVAL;
@@ -748,21 +750,28 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
fs->location > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
+ /* This rule is a Wake-on-LAN filter and we must specifically
+ * target the CPU port in order for it to be working.
+ */
+ if (ring_cookie == RX_CLS_FLOW_WAKE)
+ ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
+
/* We do not support discarding packets, check that the
* destination port is enabled and that we are within the
* number of ports supported by the switch
*/
- port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES;
+ port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
- if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
- !dsa_is_user_port(ds, port_num) ||
+ if (ring_cookie == RX_CLS_FLOW_DISC ||
+ !(dsa_is_user_port(ds, port_num) ||
+ dsa_is_cpu_port(ds, port_num)) ||
port_num >= priv->hw_params.num_ports)
return -EINVAL;
/*
* We have a small oddity where Port 6 just does not have a
* valid bit here (so we substract by one).
*/
- queue_num = fs->ring_cookie % SF2_NUM_EGRESS_QUEUES;
+ queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES;
if (port_num >= 7)
port_num -= 1;
@@ -1187,6 +1196,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
+ struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1213,12 +1223,23 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
mutex_unlock(&priv->cfp.lock);
+ if (ret)
+ return ret;
+
+ /* Pass up the commands to the attached master network device */
+ if (p->ethtool_ops->get_rxnfc) {
+ ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ }
+
return ret;
}
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
+ struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1239,6 +1260,23 @@ int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
mutex_unlock(&priv->cfp.lock);
+ if (ret)
+ return ret;
+
+ /* Pass up the commands to the attached master network device.
+ * This can fail, so rollback the operation if we need to.
+ */
+ if (p->ethtool_ops->set_rxnfc) {
+ ret = p->ethtool_ops->set_rxnfc(p, nfc);
+ if (ret && ret != -EOPNOTSUPP) {
+ mutex_lock(&priv->cfp.lock);
+ bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
+ mutex_unlock(&priv->cfp.lock);
+ } else {
+ ret = 0;
+ }
+ }
+
return ret;
}
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 3ccd5a865dcb..0a1e530d52b7 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -168,6 +168,8 @@ enum bcm_sf2_reg_offs {
#define CORE_SWITCH_CTRL 0x00088
#define MII_DUMB_FWDG_EN (1 << 6)
+#define CORE_DIS_LEARN 0x000f0
+
#define CORE_SFT_LRN_CTRL 0x000f8
#define SW_LEARN_CNTL(x) (1 << (x))
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 7210c49b7922..54e0ca6ed730 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1102,6 +1102,15 @@ static const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
},
+ {
+ .chip_id = 0x00989700,
+ .dev_name = "KSZ9897",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ },
};
static int ksz_switch_init(struct ksz_device *dev)
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index c51946983bed..8c1778b42701 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -195,6 +195,7 @@ static int ksz_spi_remove(struct spi_device *spi)
static const struct of_device_id ksz_dt_ids[] = {
{ .compatible = "microchip,ksz9477" },
+ { .compatible = "microchip,ksz9897" },
{},
};
MODULE_DEVICE_TABLE(of, ksz_dt_ids);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 437cd6eb4faa..8da3d39e3218 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -343,6 +343,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
+/* To be called with reg_lock held */
static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
{
int irq, virq;
@@ -362,9 +363,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
{
- mv88e6xxx_g1_irq_free_common(chip);
-
+ /*
+ * free_irq must be called without reg_lock taken because the irq
+ * handler takes this lock, too.
+ */
free_irq(chip->irq, chip);
+
+ mutex_lock(&chip->reg_lock);
+ mv88e6xxx_g1_irq_free_common(chip);
+ mutex_unlock(&chip->reg_lock);
}
static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
@@ -469,10 +476,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
{
- mv88e6xxx_g1_irq_free_common(chip);
-
kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
kthread_destroy_worker(chip->kworker);
+
+ mutex_lock(&chip->reg_lock);
+ mv88e6xxx_g1_irq_free_common(chip);
+ mutex_unlock(&chip->reg_lock);
}
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
@@ -515,7 +524,7 @@ int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update)
}
static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
- int link, int speed, int duplex,
+ int link, int speed, int duplex, int pause,
phy_interface_t mode)
{
int err;
@@ -534,6 +543,12 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
goto restore_link;
}
+ if (chip->info->ops->port_set_pause) {
+ err = chip->info->ops->port_set_pause(chip, port, pause);
+ if (err)
+ goto restore_link;
+ }
+
if (chip->info->ops->port_set_duplex) {
err = chip->info->ops->port_set_duplex(chip, port, duplex);
if (err && err != -EOPNOTSUPP)
@@ -575,17 +590,100 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed,
- phydev->duplex, phydev->interface);
+ phydev->duplex, phydev->pause,
+ phydev->interface);
mutex_unlock(&chip->reg_lock);
if (err && err != -EOPNOTSUPP)
dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
}
+static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state)
+{
+ if (!phy_interface_mode_is_8023z(state->interface)) {
+ /* 10M and 100M are only supported in non-802.3z mode */
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ }
+}
+
+static void mv88e6185_phylink_validate(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state)
+{
+ /* FIXME: if the port is in 1000Base-X mode, then it only supports
+ * 1000M FD speeds. In this case, CMODE will indicate 5.
+ */
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ mv88e6065_phylink_validate(chip, port, mask, state);
+}
+
+static void mv88e6352_phylink_validate(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state)
+{
+ /* No ethtool bits for 200Mbps */
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ mv88e6065_phylink_validate(chip, port, mask, state);
+}
+
+static void mv88e6390_phylink_validate(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state)
+{
+ if (port >= 9)
+ phylink_set(mask, 2500baseX_Full);
+
+ /* No ethtool bits for 200Mbps */
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ mv88e6065_phylink_validate(chip, port, mask, state);
+}
+
+static void mv88e6390x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state)
+{
+ if (port >= 9) {
+ phylink_set(mask, 10000baseT_Full);
+ phylink_set(mask, 10000baseKR_Full);
+ }
+
+ mv88e6390_phylink_validate(chip, port, mask, state);
+}
+
static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ /* Allow all the expected bits */
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set_port_modes(mask);
+
+ if (chip->info->ops->phylink_validate)
+ chip->info->ops->phylink_validate(chip, port, mask, state);
+
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ /* We can only operate at 2500BaseX or 1000BaseX. If requested
+ * to advertise both, only report advertising at 2500BaseX.
+ */
+ phylink_helper_basex_speed(state);
}
static int mv88e6xxx_link_state(struct dsa_switch *ds, int port,
@@ -595,7 +693,10 @@ static int mv88e6xxx_link_state(struct dsa_switch *ds, int port,
int err;
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_port_link_state(chip, port, state);
+ if (chip->info->ops->port_link_state)
+ err = chip->info->ops->port_link_state(chip, port, state);
+ else
+ err = -EOPNOTSUPP;
mutex_unlock(&chip->reg_lock);
return err;
@@ -606,7 +707,7 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
const struct phylink_link_state *state)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int speed, duplex, link, err;
+ int speed, duplex, link, pause, err;
if (mode == MLO_AN_PHY)
return;
@@ -620,9 +721,10 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
duplex = DUPLEX_UNFORCED;
link = LINK_UNFORCED;
}
+ pause = !!phylink_test(state->advertising, Pause);
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex,
+ err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, pause,
state->interface);
mutex_unlock(&chip->reg_lock);
@@ -2071,6 +2173,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
int err;
u16 reg;
+ chip->ports[port].chip = chip;
+ chip->ports[port].port = port;
+
/* MAC Forcing register: don't force link, speed, duplex or flow control
* state to any particular values on physical ports, but force the CPU
* port and all DSA ports to their maximum bandwidth and full duplex.
@@ -2078,10 +2183,12 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP,
SPEED_MAX, DUPLEX_FULL,
+ PAUSE_OFF,
PHY_INTERFACE_MODE_NA);
else
err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
SPEED_UNFORCED, DUPLEX_UNFORCED,
+ PAUSE_ON,
PHY_INTERFACE_MODE_NA);
if (err)
return err;
@@ -2230,7 +2337,12 @@ static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
int err;
mutex_lock(&chip->reg_lock);
+
err = mv88e6xxx_serdes_power(chip, port, true);
+
+ if (!err && chip->info->ops->serdes_irq_setup)
+ err = chip->info->ops->serdes_irq_setup(chip, port);
+
mutex_unlock(&chip->reg_lock);
return err;
@@ -2242,8 +2354,13 @@ static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
mutex_lock(&chip->reg_lock);
+
+ if (chip->info->ops->serdes_irq_free)
+ chip->info->ops->serdes_irq_free(chip, port);
+
if (mv88e6xxx_serdes_power(chip, port, false))
dev_err(chip->dev, "failed to power off SERDES\n");
+
mutex_unlock(&chip->reg_lock);
}
@@ -2277,6 +2394,7 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ u8 cmode;
int err;
int i;
@@ -2285,6 +2403,17 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
mutex_lock(&chip->reg_lock);
+ /* Cache the cmode of each port. */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
+ if (chip->info->ops->port_get_cmode) {
+ err = chip->info->ops->port_get_cmode(chip, i, &cmode);
+ if (err)
+ goto unlock;
+
+ chip->ports[i].cmode = cmode;
+ }
+ }
+
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (dsa_is_unused_port(ds, i))
@@ -2592,6 +2721,8 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2608,7 +2739,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .serdes_power = mv88e6341_serdes_power,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6095_ops = {
@@ -2624,6 +2755,8 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
+ .port_link_state = mv88e6185_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2635,6 +2768,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6097_ops = {
@@ -2657,6 +2791,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2671,6 +2807,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6123_ops = {
@@ -2688,6 +2825,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2701,6 +2840,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6131_ops = {
@@ -2721,6 +2861,9 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
+ .port_set_pause = mv88e6185_port_set_pause,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2736,6 +2879,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6141_ops = {
@@ -2761,6 +2905,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -2774,7 +2920,9 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6341_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_validate = mv88e6390_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
@@ -2797,6 +2945,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2810,6 +2960,9 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .avb_ops = &mv88e6165_avb_ops,
+ .ptp_ops = &mv88e6165_ptp_ops,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6165_ops = {
@@ -2825,6 +2978,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2838,6 +2993,9 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .avb_ops = &mv88e6165_avb_ops,
+ .ptp_ops = &mv88e6165_ptp_ops,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6171_ops = {
@@ -2861,6 +3019,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2874,6 +3034,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
@@ -2899,6 +3060,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2915,6 +3078,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_validate = mv88e6352_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
@@ -2938,6 +3102,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2951,7 +3117,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .serdes_power = mv88e6341_serdes_power,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -2977,6 +3143,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2993,6 +3161,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_validate = mv88e6352_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
@@ -3009,6 +3178,9 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
+ .port_set_pause = mv88e6185_port_set_pause,
+ .port_link_state = mv88e6185_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3024,6 +3196,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6190_ops = {
@@ -3045,6 +3218,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3060,7 +3235,10 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .serdes_irq_setup = mv88e6390_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_validate = mv88e6390_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -3082,6 +3260,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3096,8 +3276,11 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.rmu_disable = mv88e6390_g1_rmu_disable,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
+ .serdes_power = mv88e6390x_serdes_power,
+ .serdes_irq_setup = mv88e6390_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_validate = mv88e6390x_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -3119,6 +3302,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3134,6 +3319,11 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .serdes_irq_setup = mv88e6390_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390_serdes_irq_free,
+ .avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6390_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
@@ -3159,6 +3349,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3176,6 +3368,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.serdes_power = mv88e6352_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6352_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
@@ -3198,6 +3392,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3213,8 +3409,12 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .serdes_irq_setup = mv88e6390_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6390_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
@@ -3239,6 +3439,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3253,6 +3455,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
@@ -3277,6 +3481,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3289,6 +3495,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
@@ -3314,6 +3522,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3327,8 +3537,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6341_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6390_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
@@ -3352,6 +3565,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3365,6 +3580,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
@@ -3388,6 +3604,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3402,6 +3620,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
@@ -3427,6 +3647,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3444,9 +3666,11 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.serdes_power = mv88e6352_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
.serdes_get_sset_count = mv88e6352_serdes_get_sset_count,
.serdes_get_strings = mv88e6352_serdes_get_strings,
.serdes_get_stats = mv88e6352_serdes_get_stats,
+ .phylink_validate = mv88e6352_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -3471,6 +3695,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3486,8 +3712,12 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .serdes_irq_setup = mv88e6390_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6390_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -3512,6 +3742,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3526,9 +3758,13 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.rmu_disable = mv88e6390_g1_rmu_disable,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
+ .serdes_power = mv88e6390x_serdes_power,
+ .serdes_irq_setup = mv88e6390_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6390x_phylink_validate,
};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
@@ -3680,6 +3916,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.pvt = true,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
+ .ptp_support = true,
.ops = &mv88e6161_ops,
},
@@ -3702,6 +3939,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.pvt = true,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
+ .ptp_support = true,
.ops = &mv88e6165_ops,
},
@@ -4506,12 +4744,10 @@ out_g2_irq:
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
out_g1_irq:
- mutex_lock(&chip->reg_lock);
if (chip->irq > 0)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
- mutex_unlock(&chip->reg_lock);
out:
if (pdata)
dev_put(pdata->netdev);
@@ -4539,12 +4775,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
- mutex_lock(&chip->reg_lock);
if (chip->irq > 0)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
- mutex_unlock(&chip->reg_lock);
}
static const struct of_device_id mv88e6xxx_of_match[] = {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 8ac3fbb15352..f9ecb7872d32 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -155,6 +155,7 @@ struct mv88e6xxx_bus_ops;
struct mv88e6xxx_irq_ops;
struct mv88e6xxx_gpio_ops;
struct mv88e6xxx_avb_ops;
+struct mv88e6xxx_ptp_ops;
struct mv88e6xxx_irq {
u16 masked;
@@ -190,12 +191,16 @@ struct mv88e6xxx_port_hwtstamp {
};
struct mv88e6xxx_port {
+ struct mv88e6xxx_chip *chip;
+ int port;
u64 serdes_stats[2];
u64 atu_member_violation;
u64 atu_miss_violation;
u64 atu_full_violation;
u64 vtu_member_violation;
u64 vtu_miss_violation;
+ u8 cmode;
+ int serdes_irq;
};
struct mv88e6xxx_chip {
@@ -273,6 +278,7 @@ struct mv88e6xxx_chip {
struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO];
u16 trig_config;
u16 evcap_config;
+ u16 enable_count;
/* Per-port timestamping resources. */
struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS];
@@ -349,6 +355,13 @@ struct mv88e6xxx_ops {
*/
int (*port_set_duplex)(struct mv88e6xxx_chip *chip, int port, int dup);
+#define PAUSE_ON 1
+#define PAUSE_OFF 0
+
+ /* Enable/disable sending Pause */
+ int (*port_set_pause)(struct mv88e6xxx_chip *chip, int port,
+ int pause);
+
#define SPEED_MAX INT_MAX
#define SPEED_UNFORCED -2
@@ -381,12 +394,16 @@ struct mv88e6xxx_ops {
*/
int (*port_set_cmode)(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
+ int (*port_get_cmode)(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
/* Some devices have a per port register indicating what is
* the upstream port this port should forward to.
*/
int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
+ /* Return the port link state, as required by phylink */
+ int (*port_link_state)(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_link_state *state);
/* Snapshot the statistics for a port. The statistics can then
* be read back a leisure but still with a consistent view.
@@ -418,6 +435,10 @@ struct mv88e6xxx_ops {
/* Power on/off a SERDES interface */
int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, bool on);
+ /* SERDES interrupt handling */
+ int (*serdes_irq_setup)(struct mv88e6xxx_chip *chip, int port);
+ void (*serdes_irq_free)(struct mv88e6xxx_chip *chip, int port);
+
/* Statistics from the SERDES interface */
int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port,
@@ -439,6 +460,14 @@ struct mv88e6xxx_ops {
/* Remote Management Unit operations */
int (*rmu_disable)(struct mv88e6xxx_chip *chip);
+
+ /* Precision Time Protocol operations */
+ const struct mv88e6xxx_ptp_ops *ptp_ops;
+
+ /* Phylink */
+ void (*phylink_validate)(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state);
};
struct mv88e6xxx_irq_ops {
@@ -486,6 +515,24 @@ struct mv88e6xxx_avb_ops {
int (*tai_write)(struct mv88e6xxx_chip *chip, int addr, u16 data);
};
+struct mv88e6xxx_ptp_ops {
+ u64 (*clock_read)(const struct cyclecounter *cc);
+ int (*ptp_enable)(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on);
+ int (*ptp_verify)(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan);
+ void (*event_work)(struct work_struct *ugly);
+ int (*port_enable)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_disable)(struct mv88e6xxx_chip *chip, int port);
+ int (*global_enable)(struct mv88e6xxx_chip *chip);
+ int (*global_disable)(struct mv88e6xxx_chip *chip);
+ int n_ext_ts;
+ int arr0_sts_reg;
+ int arr1_sts_reg;
+ int dep_sts_reg;
+ u32 rx_filters;
+};
+
#define STATS_TYPE_PORT BIT(0)
#define STATS_TYPE_BANK0 BIT(1)
#define STATS_TYPE_BANK1 BIT(2)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 37e8ce2c72a0..194660d8c783 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -160,6 +160,7 @@
#define MV88E6390_G2_AVB_CMD_OP_WRITE 0x6000
#define MV88E6352_G2_AVB_CMD_PORT_MASK 0x0f00
#define MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL 0xe
+#define MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL 0xf
#define MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL 0xf
#define MV88E6390_G2_AVB_CMD_PORT_MASK 0x1f00
#define MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL 0x1e
@@ -335,6 +336,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
+extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops;
extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops;
@@ -484,6 +486,7 @@ static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {};
static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {};
+static const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {};
static const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {};
static const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {};
diff --git a/drivers/net/dsa/mv88e6xxx/global2_avb.c b/drivers/net/dsa/mv88e6xxx/global2_avb.c
index 2e398ccb88ca..672b503a67e1 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_avb.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_avb.c
@@ -130,6 +130,31 @@ const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {
.tai_write = mv88e6352_g2_avb_tai_write,
};
+static int mv88e6165_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ return mv88e6352_g2_avb_port_ptp_read(chip,
+ MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data, len);
+}
+
+static int mv88e6165_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ return mv88e6352_g2_avb_port_ptp_write(chip,
+ MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data);
+}
+
+const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {
+ .port_ptp_read = mv88e6352_g2_avb_port_ptp_read,
+ .port_ptp_write = mv88e6352_g2_avb_port_ptp_write,
+ .ptp_read = mv88e6352_g2_avb_ptp_read,
+ .ptp_write = mv88e6352_g2_avb_ptp_write,
+ .tai_read = mv88e6165_g2_avb_tai_read,
+ .tai_write = mv88e6165_g2_avb_tai_write,
+};
+
static int mv88e6390_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip,
int port, int addr, u16 *data,
int len)
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index a036c490b7ce..a17c16a2ab78 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -51,17 +51,30 @@ static int mv88e6xxx_ptp_write(struct mv88e6xxx_chip *chip, int addr,
return chip->info->ops->avb_ops->ptp_write(chip, addr, data);
}
+static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data)
+{
+ if (!chip->info->ops->avb_ops->ptp_read)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->ptp_read(chip, addr, data, 1);
+}
+
/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX
* timestamp. When working properly, hardware will produce a timestamp
* within 1ms. Software may enounter delays due to MDIO contention, so
* the timeout is set accordingly.
*/
-#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(20)
+#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *info)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ const struct mv88e6xxx_ptp_ops *ptp_ops;
+ struct mv88e6xxx_chip *chip;
+
+ chip = ds->priv;
+ ptp_ops = chip->info->ops->ptp_ops;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
@@ -74,17 +87,7 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+ info->rx_filters = ptp_ops->rx_filters;
return 0;
}
@@ -92,10 +95,9 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
struct hwtstamp_config *config)
{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
bool tstamp_enable = false;
- u16 port_config0;
- int err;
/* Prevent the TX/RX paths from trying to interact with the
* timestamp hardware while we reconfigure it.
@@ -120,6 +122,14 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
/* The switch supports timestamping both L2 and L4; one cannot be
* disabled independently of the other.
*/
+
+ if (!(BIT(config->rx_filter) & ptp_ops->rx_filters)) {
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ dev_dbg(chip->dev, "Unsupported rx_filter %d\n",
+ config->rx_filter);
+ return -ERANGE;
+ }
+
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tstamp_enable = false;
@@ -141,24 +151,22 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
return -ERANGE;
}
+ mutex_lock(&chip->reg_lock);
if (tstamp_enable) {
- /* Disable transportSpecific value matching, so that packets
- * with either 1588 (0) and 802.1AS (1) will be timestamped.
- */
- port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH;
+ chip->enable_count += 1;
+ if (chip->enable_count == 1 && ptp_ops->global_enable)
+ ptp_ops->global_enable(chip);
+ if (ptp_ops->port_enable)
+ ptp_ops->port_enable(chip, port);
} else {
- /* Disable PTP. This disables both RX and TX timestamping. */
- port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP;
+ if (ptp_ops->port_disable)
+ ptp_ops->port_disable(chip, port);
+ chip->enable_count -= 1;
+ if (chip->enable_count == 0 && ptp_ops->global_disable)
+ ptp_ops->global_disable(chip);
}
-
- mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
- port_config0);
mutex_unlock(&chip->reg_lock);
- if (err < 0)
- return err;
-
/* Once hardware has been configured, enable timestamp checks
* in the RX/TX paths.
*/
@@ -338,17 +346,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_port_hwtstamp *ps)
{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct sk_buff *skb;
skb = skb_dequeue(&ps->rx_queue);
if (skb)
- mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR0_STS,
+ mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr0_sts_reg,
&ps->rx_queue);
skb = skb_dequeue(&ps->rx_queue2);
if (skb)
- mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR1_STS,
+ mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr1_sts_reg,
&ps->rx_queue2);
}
@@ -389,6 +398,7 @@ bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_port_hwtstamp *ps)
{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct skb_shared_hwtstamps shhwtstamps;
u16 departure_block[4], status;
struct sk_buff *tmp_skb;
@@ -401,7 +411,7 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
- MV88E6XXX_PORT_PTP_DEP_STS,
+ ptp_ops->dep_sts_reg,
departure_block,
ARRAY_SIZE(departure_block));
mutex_unlock(&chip->reg_lock);
@@ -425,8 +435,7 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
/* We have the timestamp; go ahead and clear valid now */
mutex_lock(&chip->reg_lock);
- mv88e6xxx_port_ptp_write(chip, ps->port_id,
- MV88E6XXX_PORT_PTP_DEP_STS, 0);
+ mv88e6xxx_port_ptp_write(chip, ps->port_id, ptp_ops->dep_sts_reg, 0);
mutex_unlock(&chip->reg_lock);
status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK;
@@ -522,8 +531,48 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
return true;
}
+int mv88e6165_global_disable(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val);
+ if (err)
+ return err;
+ val |= MV88E6165_PTP_CFG_DISABLE_PTP;
+
+ return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val);
+}
+
+int mv88e6165_global_enable(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val);
+ if (err)
+ return err;
+
+ val &= ~(MV88E6165_PTP_CFG_DISABLE_PTP | MV88E6165_PTP_CFG_TSPEC_MASK);
+
+ return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val);
+}
+
+int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
+ MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP);
+}
+
+int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
+ MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH);
+}
+
static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port)
{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
ps->port_id = port;
@@ -531,12 +580,15 @@ static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port)
skb_queue_head_init(&ps->rx_queue);
skb_queue_head_init(&ps->rx_queue2);
- return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
- MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP);
+ if (ptp_ops->port_disable)
+ return ptp_ops->port_disable(chip, port);
+
+ return 0;
}
int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
int err;
int i;
@@ -547,6 +599,18 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
return err;
}
+ /* Disable PTP globally */
+ if (ptp_ops->global_disable) {
+ err = ptp_ops->global_disable(chip);
+ if (err)
+ return err;
+ }
+
+ /* Set the ethertype of L2 PTP messages */
+ err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588);
+ if (err)
+ return err;
+
/* MV88E6XXX_PTP_MSG_TYPE is a mask of PTP message types to
* timestamp. This affects all ports that have timestamping enabled,
* but the timestamp config is per-port; thus we configure all events
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index bc71c9212a08..b9a72661bcc4 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -19,7 +19,7 @@
#include "chip.h"
-/* Global PTP registers */
+/* Global 6352 PTP registers */
/* Offset 0x00: PTP EtherType */
#define MV88E6XXX_PTP_ETHERTYPE 0x00
@@ -34,6 +34,12 @@
/* Offset 0x02: Timestamp Arrival Capture Pointers */
#define MV88E6XXX_PTP_TS_ARRIVAL_PTR 0x02
+/* Offset 0x05: PTP Global Configuration */
+#define MV88E6165_PTP_CFG 0x05
+#define MV88E6165_PTP_CFG_TSPEC_MASK 0xf000
+#define MV88E6165_PTP_CFG_DISABLE_TS_OVERWRITE BIT(1)
+#define MV88E6165_PTP_CFG_DISABLE_PTP BIT(0)
+
/* Offset 0x07: PTP Global Configuration */
#define MV88E6341_PTP_CFG 0x07
#define MV88E6341_PTP_CFG_UPDATE 0x8000
@@ -46,7 +52,7 @@
/* Offset 0x08: PTP Interrupt Status */
#define MV88E6XXX_PTP_IRQ_STATUS 0x08
-/* Per-Port PTP Registers */
+/* Per-Port 6352 PTP Registers */
/* Offset 0x00: PTP Configuration 0 */
#define MV88E6XXX_PORT_PTP_CFG0 0x00
#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_SHIFT 12
@@ -123,6 +129,10 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip);
+int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port);
+int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port);
+int mv88e6165_global_enable(struct mv88e6xxx_chip *chip);
+int mv88e6165_global_disable(struct mv88e6xxx_chip *chip);
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 429d0ebcd5b1..92945841c8e8 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -19,6 +19,7 @@
#include "chip.h"
#include "port.h"
+#include "serdes.h"
int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
u16 *val)
@@ -36,6 +37,29 @@ int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
return mv88e6xxx_write(chip, addr, reg, val);
}
+/* Offset 0x00: MAC (or PCS or Physical) Status Register
+ *
+ * For most devices, this is read only. However the 6185 has the MyPause
+ * bit read/write.
+ */
+int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
+ int pause)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ if (pause)
+ reg |= MV88E6XXX_PORT_STS_MY_PAUSE;
+ else
+ reg &= ~MV88E6XXX_PORT_STS_MY_PAUSE;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
+}
+
/* Offset 0x01: MAC (or PCS or Physical) Control Register
*
* Link, Duplex and Flow Control have one force bit, one value bit.
@@ -318,8 +342,9 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
- u16 reg;
+ int lane;
u16 cmode;
+ u16 reg;
int err;
if (mode == PHY_INTERFACE_MODE_NA)
@@ -349,6 +374,20 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
cmode = 0;
}
+ lane = mv88e6390x_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return lane;
+
+ if (chip->ports[port].serdes_irq) {
+ err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6390_serdes_power(chip, port, false);
+ if (err)
+ return err;
+
if (cmode) {
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err)
@@ -360,12 +399,38 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
if (err)
return err;
+
+ err = mv88e6390_serdes_power(chip, port, true);
+ if (err)
+ return err;
+
+ if (chip->ports[port].serdes_irq) {
+ err = mv88e6390_serdes_irq_enable(chip, port, lane);
+ if (err)
+ return err;
+ }
}
+ chip->ports[port].cmode = cmode;
+
return 0;
}
-int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
+int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ *cmode = reg & MV88E6185_PORT_STS_CMODE_MASK;
+
+ return 0;
+}
+
+int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
{
int err;
u16 reg;
@@ -379,7 +444,7 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
return 0;
}
-int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port,
+int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
struct phylink_link_state *state)
{
int err;
@@ -400,7 +465,7 @@ int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port,
state->speed = SPEED_1000;
break;
case MV88E6XXX_PORT_STS_SPEED_10000:
- if ((reg &MV88E6XXX_PORT_STS_CMODE_MASK) ==
+ if ((reg & MV88E6XXX_PORT_STS_CMODE_MASK) ==
MV88E6XXX_PORT_STS_CMODE_2500BASEX)
state->speed = SPEED_2500;
else
@@ -417,6 +482,42 @@ int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port,
return 0;
}
+int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_link_state *state)
+{
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
+ u8 cmode = chip->ports[port].cmode;
+
+ /* When a port is in "Cross-chip serdes" mode, it uses
+ * 1000Base-X full duplex mode, but there is no automatic
+ * link detection. Use the sync OK status for link (as it
+ * would do for 1000Base-X mode.)
+ */
+ if (cmode == MV88E6185_PORT_STS_CMODE_SERDES) {
+ u16 mac;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port,
+ MV88E6XXX_PORT_MAC_CTL, &mac);
+ if (err)
+ return err;
+
+ state->link = !!(mac & MV88E6185_PORT_MAC_CTL_SYNC_OK);
+ state->an_enabled = 1;
+ state->an_complete =
+ !!(mac & MV88E6185_PORT_MAC_CTL_AN_DONE);
+ state->duplex =
+ state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN;
+ state->speed =
+ state->link ? SPEED_1000 : SPEED_UNKNOWN;
+
+ return 0;
+ }
+ }
+
+ return mv88e6352_port_link_state(chip, port, state);
+}
+
/* Offset 0x02: Jamming Control
*
* Do not limit the period of time that this port can be paused for by
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 5e1db1b221ca..f32f56af8e35 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -42,14 +42,28 @@
#define MV88E6XXX_PORT_STS_CMODE_2500BASEX 0x000b
#define MV88E6XXX_PORT_STS_CMODE_XAUI 0x000c
#define MV88E6XXX_PORT_STS_CMODE_RXAUI 0x000d
+#define MV88E6185_PORT_STS_CDUPLEX 0x0008
+#define MV88E6185_PORT_STS_CMODE_MASK 0x0007
+#define MV88E6185_PORT_STS_CMODE_GMII_FD 0x0000
+#define MV88E6185_PORT_STS_CMODE_MII_100_FD_PS 0x0001
+#define MV88E6185_PORT_STS_CMODE_MII_100 0x0002
+#define MV88E6185_PORT_STS_CMODE_MII_10 0x0003
+#define MV88E6185_PORT_STS_CMODE_SERDES 0x0004
+#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
+#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
+#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
/* Offset 0x01: MAC (or PCS or Physical) Control Register */
#define MV88E6XXX_PORT_MAC_CTL 0x01
#define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK 0x8000
#define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK 0x4000
+#define MV88E6185_PORT_MAC_CTL_SYNC_OK 0x4000
#define MV88E6390_PORT_MAC_CTL_FORCE_SPEED 0x2000
#define MV88E6390_PORT_MAC_CTL_ALTSPEED 0x1000
#define MV88E6352_PORT_MAC_CTL_200BASE 0x1000
+#define MV88E6185_PORT_MAC_CTL_AN_EN 0x0400
+#define MV88E6185_PORT_MAC_CTL_AN_RESTART 0x0200
+#define MV88E6185_PORT_MAC_CTL_AN_DONE 0x0100
#define MV88E6XXX_PORT_MAC_CTL_FC 0x0080
#define MV88E6XXX_PORT_MAC_CTL_FORCE_FC 0x0040
#define MV88E6XXX_PORT_MAC_CTL_LINK_UP 0x0020
@@ -242,6 +256,8 @@ int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
u16 val);
+int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
+ int pause);
int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
@@ -295,8 +311,11 @@ int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out);
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
-int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
-int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port,
+int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_link_state *state);
+int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
struct phylink_link_state *state);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index bd85e2c390e1..4b336d8d4c67 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -16,6 +16,7 @@
#include "chip.h"
#include "global2.h"
+#include "hwtstamp.h"
#include "ptp.h"
/* Raw timestamps are in units of 8-ns clock periods. */
@@ -50,7 +51,7 @@ static int mv88e6xxx_tai_write(struct mv88e6xxx_chip *chip, int addr, u16 data)
}
/* TODO: places where this are called should be using pinctrl */
-static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
+static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
int func, int input)
{
int err;
@@ -65,7 +66,7 @@ static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
return chip->info->ops->gpio_ops->set_pctl(chip, pin, func);
}
-static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
+static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
u16 phc_time[2];
@@ -79,13 +80,27 @@ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
return ((u32)phc_time[1] << 16) | phc_time[0];
}
-/* mv88e6xxx_config_eventcap - configure TAI event capture
+static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc)
+{
+ struct mv88e6xxx_chip *chip = cc_to_chip(cc);
+ u16 phc_time[2];
+ int err;
+
+ err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time,
+ ARRAY_SIZE(phc_time));
+ if (err)
+ return 0;
+ else
+ return ((u32)phc_time[1] << 16) | phc_time[0];
+}
+
+/* mv88e6352_config_eventcap - configure TAI event capture
* @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external)
* @rising: zero for falling-edge trigger, else rising-edge trigger
*
* This will also reset the capture sequence counter.
*/
-static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event,
+static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event,
int rising)
{
u16 global_config;
@@ -118,7 +133,7 @@ static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event,
return err;
}
-static void mv88e6xxx_tai_event_work(struct work_struct *ugly)
+static void mv88e6352_tai_event_work(struct work_struct *ugly)
{
struct delayed_work *dw = to_delayed_work(ugly);
struct mv88e6xxx_chip *chip = dw_tai_event_to_chip(dw);
@@ -232,7 +247,7 @@ static int mv88e6xxx_ptp_settime(struct ptp_clock_info *ptp,
return 0;
}
-static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip,
+static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
struct ptp_clock_request *rq, int on)
{
int rising = (rq->extts.flags & PTP_RISING_EDGE);
@@ -250,18 +265,18 @@ static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip,
if (on) {
func = MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ;
- err = mv88e6xxx_set_gpio_func(chip, pin, func, true);
+ err = mv88e6352_set_gpio_func(chip, pin, func, true);
if (err)
goto out;
schedule_delayed_work(&chip->tai_event_work,
TAI_EVENT_WORK_INTERVAL);
- err = mv88e6xxx_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
+ err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
} else {
func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO;
- err = mv88e6xxx_set_gpio_func(chip, pin, func, true);
+ err = mv88e6352_set_gpio_func(chip, pin, func, true);
cancel_delayed_work_sync(&chip->tai_event_work);
}
@@ -272,20 +287,20 @@ out:
return err;
}
-static int mv88e6xxx_ptp_enable(struct ptp_clock_info *ptp,
+static int mv88e6352_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- return mv88e6xxx_ptp_enable_extts(chip, rq, on);
+ return mv88e6352_ptp_enable_extts(chip, rq, on);
default:
return -EOPNOTSUPP;
}
}
-static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+static int mv88e6352_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
switch (func) {
@@ -299,6 +314,55 @@ static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
return 0;
}
+const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
+ .clock_read = mv88e6352_ptp_clock_read,
+ .ptp_enable = mv88e6352_ptp_enable,
+ .ptp_verify = mv88e6352_ptp_verify,
+ .event_work = mv88e6352_tai_event_work,
+ .port_enable = mv88e6352_hwtstamp_port_enable,
+ .port_disable = mv88e6352_hwtstamp_port_disable,
+ .n_ext_ts = 1,
+ .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
+ .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+};
+
+const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
+ .clock_read = mv88e6165_ptp_clock_read,
+ .global_enable = mv88e6165_global_enable,
+ .global_disable = mv88e6165_global_disable,
+ .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS,
+ .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+};
+
+static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
+{
+ struct mv88e6xxx_chip *chip = cc_to_chip(cc);
+
+ if (chip->info->ops->ptp_ops->clock_read)
+ return chip->info->ops->ptp_ops->clock_read(cc);
+
+ return 0;
+}
+
/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3
* seconds; this task forces periodic reads so that we don't miss any.
*/
@@ -317,6 +381,7 @@ static void mv88e6xxx_ptp_overflow_check(struct work_struct *work)
int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
int i;
/* Set up the cycle counter */
@@ -330,14 +395,15 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
ktime_to_ns(ktime_get_real()));
INIT_DELAYED_WORK(&chip->overflow_work, mv88e6xxx_ptp_overflow_check);
- INIT_DELAYED_WORK(&chip->tai_event_work, mv88e6xxx_tai_event_work);
+ if (ptp_ops->event_work)
+ INIT_DELAYED_WORK(&chip->tai_event_work, ptp_ops->event_work);
chip->ptp_clock_info.owner = THIS_MODULE;
snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name),
dev_name(chip->dev));
chip->ptp_clock_info.max_adj = 1000000;
- chip->ptp_clock_info.n_ext_ts = 1;
+ chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts;
chip->ptp_clock_info.n_per_out = 0;
chip->ptp_clock_info.n_pins = mv88e6xxx_num_gpio(chip);
chip->ptp_clock_info.pps = 0;
@@ -355,8 +421,8 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime;
chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime;
chip->ptp_clock_info.settime64 = mv88e6xxx_ptp_settime;
- chip->ptp_clock_info.enable = mv88e6xxx_ptp_enable;
- chip->ptp_clock_info.verify = mv88e6xxx_ptp_verify;
+ chip->ptp_clock_info.enable = ptp_ops->ptp_enable;
+ chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev);
@@ -373,7 +439,8 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
{
if (chip->ptp_clock) {
cancel_delayed_work_sync(&chip->overflow_work);
- cancel_delayed_work_sync(&chip->tai_event_work);
+ if (chip->info->ops->ptp_ops->event_work)
+ cancel_delayed_work_sync(&chip->tai_event_work);
ptp_clock_unregister(chip->ptp_clock);
chip->ptp_clock = NULL;
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 10f271ab650d..28a030840517 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -78,6 +78,71 @@
/* Offset 0x12: Lock Status */
#define MV88E6XXX_TAI_LOCK_STATUS 0x12
+/* Offset 0x00: Ether Type */
+#define MV88E6XXX_PTP_GC_ETYPE 0x00
+
+/* 6165 Global Control Registers */
+/* Offset 0x00: Ether Type */
+#define MV88E6XXX_PTP_GC_ETYPE 0x00
+
+/* Offset 0x01: Message ID */
+#define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01
+
+/* Offset 0x02: Time Stamp Arrive Time */
+#define MV88E6XXX_PTP_GC_TS_ARR_PTR 0x02
+
+/* Offset 0x03: Port Arrival Interrupt Enable */
+#define MV88E6XXX_PTP_GC_PORT_ARR_INT_EN 0x03
+
+/* Offset 0x04: Port Departure Interrupt Enable */
+#define MV88E6XXX_PTP_GC_PORT_DEP_INT_EN 0x04
+
+/* Offset 0x05: Configuration */
+#define MV88E6XXX_PTP_GC_CONFIG 0x05
+#define MV88E6XXX_PTP_GC_CONFIG_DIS_OVERWRITE BIT(1)
+#define MV88E6XXX_PTP_GC_CONFIG_DIS_TS BIT(0)
+
+/* Offset 0x8: Interrupt Status */
+#define MV88E6XXX_PTP_GC_INT_STATUS 0x08
+
+/* Offset 0x9/0xa: Global Time */
+#define MV88E6XXX_PTP_GC_TIME_LO 0x09
+#define MV88E6XXX_PTP_GC_TIME_HI 0x0A
+
+/* 6165 Per Port Registers */
+/* Offset 0: Arrival Time 0 Status */
+#define MV88E6165_PORT_PTP_ARR0_STS 0x00
+
+/* Offset 0x01/0x02: PTP Arrival 0 Time */
+#define MV88E6165_PORT_PTP_ARR0_TIME_LO 0x01
+#define MV88E6165_PORT_PTP_ARR0_TIME_HI 0x02
+
+/* Offset 0x03: PTP Arrival 0 Sequence ID */
+#define MV88E6165_PORT_PTP_ARR0_SEQID 0x03
+
+/* Offset 0x04: PTP Arrival 1 Status */
+#define MV88E6165_PORT_PTP_ARR1_STS 0x04
+
+/* Offset 0x05/0x6E: PTP Arrival 1 Time */
+#define MV88E6165_PORT_PTP_ARR1_TIME_LO 0x05
+#define MV88E6165_PORT_PTP_ARR1_TIME_HI 0x06
+
+/* Offset 0x07: PTP Arrival 1 Sequence ID */
+#define MV88E6165_PORT_PTP_ARR1_SEQID 0x07
+
+/* Offset 0x08: PTP Departure Status */
+#define MV88E6165_PORT_PTP_DEP_STS 0x08
+
+/* Offset 0x09/0x0a: PTP Deperture Time */
+#define MV88E6165_PORT_PTP_DEP_TIME_LO 0x09
+#define MV88E6165_PORT_PTP_DEP_TIME_HI 0x0a
+
+/* Offset 0x0b: PTP Departure Sequence ID */
+#define MV88E6165_PORT_PTP_DEP_SEQID 0x0b
+
+/* Offset 0x0d: Port Status */
+#define MV88E6164_PORT_STATUS 0x0d
+
#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
@@ -87,6 +152,9 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
#define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \
ptp_clock_info)
+extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
+extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
+
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
@@ -103,6 +171,9 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
{
}
+static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
+static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
+
#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
#endif /* _MV88E6XXX_PTP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 880b2cf0a530..e82983975754 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -11,6 +11,8 @@
* (at your option) any later version.
*/
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/mii.h>
#include "chip.h"
@@ -35,6 +37,22 @@ static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
reg, val);
}
+static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip,
+ int lane, int device, int reg, u16 *val)
+{
+ int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
+
+ return mv88e6xxx_phy_read(chip, lane, reg_c45, val);
+}
+
+static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
+ int lane, int device, int reg, u16 val)
+{
+ int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
+
+ return mv88e6xxx_phy_write(chip, lane, reg_c45, val);
+}
+
static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on)
{
u16 val, new_val;
@@ -57,14 +75,7 @@ static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on)
static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
{
- u8 cmode;
- int err;
-
- err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
- if (err) {
- dev_err(chip->dev, "failed to read cmode\n");
- return false;
- }
+ u8 cmode = chip->ports[port].cmode;
if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASE_X) ||
(cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) ||
@@ -174,16 +185,121 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
+/* Return the SERDES lane address a port is using. Only Ports 9 and 10
+ * have SERDES lanes. Returns -ENODEV if a port does not have a lane.
+ */
+static int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ switch (port) {
+ case 9:
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ return MV88E6390_PORT9_LANE0;
+ return -ENODEV;
+ case 10:
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ return MV88E6390_PORT10_LANE0;
+ return -ENODEV;
+ default:
+ return -ENODEV;
+ }
+}
+
+/* Return the SERDES lane address a port is using. Ports 9 and 10 can
+ * use multiple lanes. If so, return the first lane the port uses.
+ * Returns -ENODEV if a port does not have a lane.
+ */
+int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 cmode_port9, cmode_port10, cmode_port;
+
+ cmode_port9 = chip->ports[9].cmode;
+ cmode_port10 = chip->ports[10].cmode;
+ cmode_port = chip->ports[port].cmode;
+
+ switch (port) {
+ case 2:
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
+ return MV88E6390_PORT9_LANE1;
+ return -ENODEV;
+ case 3:
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
+ return MV88E6390_PORT9_LANE2;
+ return -ENODEV;
+ case 4:
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
+ return MV88E6390_PORT9_LANE3;
+ return -ENODEV;
+ case 5:
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
+ return MV88E6390_PORT10_LANE1;
+ return -ENODEV;
+ case 6:
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
+ return MV88E6390_PORT10_LANE2;
+ return -ENODEV;
+ case 7:
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
+ return MV88E6390_PORT10_LANE3;
+ return -ENODEV;
+ case 9:
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_XAUI ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ return MV88E6390_PORT9_LANE0;
+ return -ENODEV;
+ case 10:
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_XAUI ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ return MV88E6390_PORT10_LANE0;
+ return -ENODEV;
+ default:
+ return -ENODEV;
+ }
+}
+
/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */
-static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on)
+static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane,
+ bool on)
{
u16 val, new_val;
- int reg_c45;
int err;
- reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE |
- MV88E6390_PCS_CONTROL_1;
- err = mv88e6xxx_phy_read(chip, addr, reg_c45, &val);
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_PCS_CONTROL_1, &val);
+
if (err)
return err;
@@ -195,22 +311,21 @@ static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on)
new_val = val | MV88E6390_PCS_CONTROL_1_PDOWN;
if (val != new_val)
- err = mv88e6xxx_phy_write(chip, addr, reg_c45, new_val);
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_PCS_CONTROL_1, new_val);
return err;
}
-/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */
-static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int addr,
- bool on)
+/* Set the power on/off for SGMII and 1000Base-X */
+static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane,
+ bool on)
{
u16 val, new_val;
- int reg_c45;
int err;
- reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE |
- MV88E6390_SGMII_CONTROL;
- err = mv88e6xxx_phy_read(chip, addr, reg_c45, &val);
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_CONTROL, &val);
if (err)
return err;
@@ -222,127 +337,261 @@ static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int addr,
new_val = val | MV88E6390_SGMII_CONTROL_PDOWN;
if (val != new_val)
- err = mv88e6xxx_phy_write(chip, addr, reg_c45, new_val);
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_CONTROL, new_val);
return err;
}
-static int mv88e6390_serdes_lower(struct mv88e6xxx_chip *chip, u8 cmode,
- int port_donor, int lane, bool rxaui, bool on)
+static int mv88e6390_serdes_power_lane(struct mv88e6xxx_chip *chip, int port,
+ int lane, bool on)
{
- int err;
- u8 cmode_donor;
-
- err = mv88e6xxx_port_get_cmode(chip, port_donor, &cmode_donor);
- if (err)
- return err;
+ u8 cmode = chip->ports[port].cmode;
- switch (cmode_donor) {
- case MV88E6XXX_PORT_STS_CMODE_RXAUI:
- if (!rxaui)
- break;
- /* Fall through */
- case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
+ switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
- cmode == MV88E6XXX_PORT_STS_CMODE_SGMII)
- return mv88e6390_serdes_sgmii(chip, lane, on);
+ return mv88e6390_serdes_power_sgmii(chip, lane, on);
+ case MV88E6XXX_PORT_STS_CMODE_XAUI:
+ case MV88E6XXX_PORT_STS_CMODE_RXAUI:
+ return mv88e6390_serdes_power_10g(chip, lane, on);
+ }
+
+ return 0;
+}
+
+int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+{
+ int lane;
+
+ lane = mv88e6390_serdes_get_lane(chip, port);
+ if (lane == -ENODEV)
+ return 0;
+
+ if (lane < 0)
+ return lane;
+
+ switch (port) {
+ case 9 ... 10:
+ return mv88e6390_serdes_power_lane(chip, port, lane, on);
}
+
+ return 0;
+}
+
+int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+{
+ int lane;
+
+ lane = mv88e6390x_serdes_get_lane(chip, port);
+ if (lane == -ENODEV)
+ return 0;
+
+ if (lane < 0)
+ return lane;
+
+ switch (port) {
+ case 2 ... 4:
+ case 5 ... 7:
+ case 9 ... 10:
+ return mv88e6390_serdes_power_lane(chip, port, lane, on);
+ }
+
return 0;
}
-static int mv88e6390_serdes_port9(struct mv88e6xxx_chip *chip, u8 cmode,
- bool on)
+static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
+ int port, int lane)
{
+ struct dsa_switch *ds = chip->ds;
+ u16 status;
+ bool up;
+
+ mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_STATUS, &status);
+
+ /* Status must be read twice in order to give the current link
+ * status. Otherwise the change in link status since the last
+ * read of the register is returned.
+ */
+ mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_STATUS, &status);
+ up = status & MV88E6390_SGMII_STATUS_LINK;
+
+ dsa_port_phylink_mac_change(ds, port, up);
+}
+
+static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip,
+ int lane)
+{
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_INT_ENABLE,
+ MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP);
+}
+
+static int mv88e6390_serdes_irq_disable_sgmii(struct mv88e6xxx_chip *chip,
+ int lane)
+{
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_INT_ENABLE, 0);
+}
+
+int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
+ int lane)
+{
+ u8 cmode = chip->ports[port].cmode;
+ int err = 0;
+
switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
case MV88E6XXX_PORT_STS_CMODE_SGMII:
- return mv88e6390_serdes_sgmii(chip, MV88E6390_PORT9_LANE0, on);
- case MV88E6XXX_PORT_STS_CMODE_XAUI:
- case MV88E6XXX_PORT_STS_CMODE_RXAUI:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_10g(chip, MV88E6390_PORT9_LANE0, on);
+ err = mv88e6390_serdes_irq_enable_sgmii(chip, lane);
}
- return 0;
+ return err;
}
-static int mv88e6390_serdes_port10(struct mv88e6xxx_chip *chip, u8 cmode,
- bool on)
+int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port,
+ int lane)
{
+ u8 cmode = chip->ports[port].cmode;
+ int err = 0;
+
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
- return mv88e6390_serdes_sgmii(chip, MV88E6390_PORT10_LANE0, on);
- case MV88E6XXX_PORT_STS_CMODE_XAUI:
- case MV88E6XXX_PORT_STS_CMODE_RXAUI:
case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_10g(chip, MV88E6390_PORT10_LANE0, on);
+ err = mv88e6390_serdes_irq_disable_sgmii(chip, lane);
}
- return 0;
+ return err;
}
-int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip,
+ int lane, u16 *status)
{
- u8 cmode;
int err;
- err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
- if (err)
- return err;
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_INT_STATUS, status);
- switch (port) {
- case 2:
- return mv88e6390_serdes_lower(chip, cmode, 9,
- MV88E6390_PORT9_LANE1,
- false, on);
- case 3:
- return mv88e6390_serdes_lower(chip, cmode, 9,
- MV88E6390_PORT9_LANE2,
- true, on);
- case 4:
- return mv88e6390_serdes_lower(chip, cmode, 9,
- MV88E6390_PORT9_LANE3,
- true, on);
- case 5:
- return mv88e6390_serdes_lower(chip, cmode, 10,
- MV88E6390_PORT10_LANE1,
- false, on);
- case 6:
- return mv88e6390_serdes_lower(chip, cmode, 10,
- MV88E6390_PORT10_LANE2,
- true, on);
- case 7:
- return mv88e6390_serdes_lower(chip, cmode, 10,
- MV88E6390_PORT10_LANE3,
- true, on);
- case 9:
- return mv88e6390_serdes_port9(chip, cmode, on);
- case 10:
- return mv88e6390_serdes_port10(chip, cmode, on);
+ return err;
+}
+
+static irqreturn_t mv88e6390_serdes_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_port *port = dev_id;
+ struct mv88e6xxx_chip *chip = port->chip;
+ irqreturn_t ret = IRQ_NONE;
+ u8 cmode = port->cmode;
+ u16 status;
+ int lane;
+ int err;
+
+ lane = mv88e6390x_serdes_get_lane(chip, port->port);
+
+ mutex_lock(&chip->reg_lock);
+
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status);
+ if (err)
+ goto out;
+ if (status & (MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP)) {
+ ret = IRQ_HANDLED;
+ mv88e6390_serdes_irq_link_sgmii(chip, port->port, lane);
+ }
}
+out:
+ mutex_unlock(&chip->reg_lock);
- return 0;
+ return ret;
}
-int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
{
+ int lane;
int err;
- u8 cmode;
- if (port != 5)
+ /* Only support ports 9 and 10 at the moment */
+ if (port < 9)
return 0;
- err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
- if (err)
+ lane = mv88e6390x_serdes_get_lane(chip, port);
+
+ if (lane == -ENODEV)
+ return 0;
+
+ if (lane < 0)
+ return lane;
+
+ chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain,
+ port);
+ if (chip->ports[port].serdes_irq < 0) {
+ dev_err(chip->dev, "Unable to map SERDES irq: %d\n",
+ chip->ports[port].serdes_irq);
+ return chip->ports[port].serdes_irq;
+ }
+
+ /* Requesting the IRQ will trigger irq callbacks. So we cannot
+ * hold the reg_lock.
+ */
+ mutex_unlock(&chip->reg_lock);
+ err = request_threaded_irq(chip->ports[port].serdes_irq, NULL,
+ mv88e6390_serdes_thread_fn,
+ IRQF_ONESHOT, "mv88e6xxx-serdes",
+ &chip->ports[port]);
+ mutex_lock(&chip->reg_lock);
+
+ if (err) {
+ dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n",
+ err);
return err;
+ }
+
+ return mv88e6390_serdes_irq_enable(chip, port, lane);
+}
+
+void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+{
+ int lane = mv88e6390x_serdes_get_lane(chip, port);
+
+ if (port < 9)
+ return;
+
+ if (lane < 0)
+ return;
+
+ mv88e6390_serdes_irq_disable(chip, port, lane);
+
+ /* Freeing the IRQ will trigger irq callbacks. So we cannot
+ * hold the reg_lock.
+ */
+ mutex_unlock(&chip->reg_lock);
+ free_irq(chip->ports[port].serdes_irq, &chip->ports[port]);
+ mutex_lock(&chip->reg_lock);
+
+ chip->ports[port].serdes_irq = 0;
+}
+
+int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ if (port != 5)
+ return 0;
if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
- return mv88e6390_serdes_sgmii(chip, MV88E6341_ADDR_SERDES, on);
+ return mv88e6390_serdes_power_sgmii(chip, MV88E6341_ADDR_SERDES,
+ on);
return 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index b6e5fbd46b5e..b1496de9c6fe 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -29,7 +29,6 @@
#define MV88E6390_PORT10_LANE1 0x15
#define MV88E6390_PORT10_LANE2 0x16
#define MV88E6390_PORT10_LANE3 0x17
-#define MV88E6390_SERDES_DEVICE (4 << 16)
/* 10GBASE-R and 10GBASE-X4/X2 */
#define MV88E6390_PCS_CONTROL_1 0x1000
@@ -43,13 +42,36 @@
#define MV88E6390_SGMII_CONTROL_RESET BIT(15)
#define MV88E6390_SGMII_CONTROL_LOOPBACK BIT(14)
#define MV88E6390_SGMII_CONTROL_PDOWN BIT(11)
+#define MV88E6390_SGMII_STATUS 0x2001
+#define MV88E6390_SGMII_STATUS_AN_DONE BIT(5)
+#define MV88E6390_SGMII_STATUS_REMOTE_FAULT BIT(4)
+#define MV88E6390_SGMII_STATUS_LINK BIT(2)
+#define MV88E6390_SGMII_INT_ENABLE 0xa001
+#define MV88E6390_SGMII_INT_SPEED_CHANGE BIT(14)
+#define MV88E6390_SGMII_INT_DUPLEX_CHANGE BIT(13)
+#define MV88E6390_SGMII_INT_PAGE_RX BIT(12)
+#define MV88E6390_SGMII_INT_AN_COMPLETE BIT(11)
+#define MV88E6390_SGMII_INT_LINK_DOWN BIT(10)
+#define MV88E6390_SGMII_INT_LINK_UP BIT(9)
+#define MV88E6390_SGMII_INT_SYMBOL_ERROR BIT(8)
+#define MV88E6390_SGMII_INT_FALSE_CARRIER BIT(7)
+#define MV88E6390_SGMII_INT_STATUS 0xa002
+int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
+int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
+int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
+void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data);
int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
+int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
+ int lane);
+int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port,
+ int lane);
+
#endif
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
new file mode 100644
index 000000000000..b4b839a1d095
--- /dev/null
+++ b/drivers/net/dsa/realtek-smi.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek Simple Management Interface (SMI) driver
+ * It can be discussed how "simple" this interface is.
+ *
+ * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
+ * but the protocol is not MDIO at all. Instead it is a Realtek
+ * pecularity that need to bit-bang the lines in a special way to
+ * communicate with the switch.
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+
+#include "realtek-smi.h"
+
+#define REALTEK_SMI_ACK_RETRY_COUNT 5
+#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */
+#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */
+
+static inline void realtek_smi_clk_delay(struct realtek_smi *smi)
+{
+ ndelay(smi->clk_delay);
+}
+
+static void realtek_smi_start(struct realtek_smi *smi)
+{
+ /* Set GPIO pins to output mode, with initial state:
+ * SCK = 0, SDA = 1
+ */
+ gpiod_direction_output(smi->mdc, 0);
+ gpiod_direction_output(smi->mdio, 1);
+ realtek_smi_clk_delay(smi);
+
+ /* CLK 1: 0 -> 1, 1 -> 0 */
+ gpiod_set_value(smi->mdc, 1);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 0);
+ realtek_smi_clk_delay(smi);
+
+ /* CLK 2: */
+ gpiod_set_value(smi->mdc, 1);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdio, 0);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 0);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdio, 1);
+}
+
+static void realtek_smi_stop(struct realtek_smi *smi)
+{
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdio, 0);
+ gpiod_set_value(smi->mdc, 1);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdio, 1);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 1);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 0);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 1);
+
+ /* Add a click */
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 0);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 1);
+
+ /* Set GPIO pins to input mode */
+ gpiod_direction_input(smi->mdio);
+ gpiod_direction_input(smi->mdc);
+}
+
+static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len)
+{
+ for (; len > 0; len--) {
+ realtek_smi_clk_delay(smi);
+
+ /* Prepare data */
+ gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1))));
+ realtek_smi_clk_delay(smi);
+
+ /* Clocking */
+ gpiod_set_value(smi->mdc, 1);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 0);
+ }
+}
+
+static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data)
+{
+ gpiod_direction_input(smi->mdio);
+
+ for (*data = 0; len > 0; len--) {
+ u32 u;
+
+ realtek_smi_clk_delay(smi);
+
+ /* Clocking */
+ gpiod_set_value(smi->mdc, 1);
+ realtek_smi_clk_delay(smi);
+ u = !!gpiod_get_value(smi->mdio);
+ gpiod_set_value(smi->mdc, 0);
+
+ *data |= (u << (len - 1));
+ }
+
+ gpiod_direction_output(smi->mdio, 0);
+}
+
+static int realtek_smi_wait_for_ack(struct realtek_smi *smi)
+{
+ int retry_cnt;
+
+ retry_cnt = 0;
+ do {
+ u32 ack;
+
+ realtek_smi_read_bits(smi, 1, &ack);
+ if (ack == 0)
+ break;
+
+ if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
+ dev_err(smi->dev, "ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data)
+{
+ realtek_smi_write_bits(smi, data, 8);
+ return realtek_smi_wait_for_ack(smi);
+}
+
+static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data)
+{
+ realtek_smi_write_bits(smi, data, 8);
+ return 0;
+}
+
+static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(smi, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(smi, 0x00, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(smi, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(smi, 0x01, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data)
+{
+ unsigned long flags;
+ u8 lo = 0;
+ u8 hi = 0;
+ int ret;
+
+ spin_lock_irqsave(&smi->lock, flags);
+
+ realtek_smi_start(smi);
+
+ /* Send READ command */
+ ret = realtek_smi_write_byte(smi, smi->cmd_read);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(smi, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(smi, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Read DATA[7:0] */
+ realtek_smi_read_byte0(smi, &lo);
+ /* Read DATA[15:8] */
+ realtek_smi_read_byte1(smi, &hi);
+
+ *data = ((u32)lo) | (((u32)hi) << 8);
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(smi);
+ spin_unlock_irqrestore(&smi->lock, flags);
+
+ return ret;
+}
+
+static int realtek_smi_write_reg(struct realtek_smi *smi,
+ u32 addr, u32 data, bool ack)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&smi->lock, flags);
+
+ realtek_smi_start(smi);
+
+ /* Send WRITE command */
+ ret = realtek_smi_write_byte(smi, smi->cmd_write);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(smi, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(smi, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Write DATA[7:0] */
+ ret = realtek_smi_write_byte(smi, data & 0xff);
+ if (ret)
+ goto out;
+
+ /* Write DATA[15:8] */
+ if (ack)
+ ret = realtek_smi_write_byte(smi, data >> 8);
+ else
+ ret = realtek_smi_write_byte_noack(smi, data >> 8);
+ if (ret)
+ goto out;
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(smi);
+ spin_unlock_irqrestore(&smi->lock, flags);
+
+ return ret;
+}
+
+/* There is one single case when we need to use this accessor and that
+ * is when issueing soft reset. Since the device reset as soon as we write
+ * that bit, no ACK will come back for natural reasons.
+ */
+int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
+ u32 data)
+{
+ return realtek_smi_write_reg(smi, addr, data, false);
+}
+EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack);
+
+/* Regmap accessors */
+
+static int realtek_smi_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_smi *smi = ctx;
+
+ return realtek_smi_write_reg(smi, reg, val, true);
+}
+
+static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_smi *smi = ctx;
+
+ return realtek_smi_read_reg(smi, reg, val);
+}
+
+static const struct regmap_config realtek_smi_mdio_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+ .cache_type = REGCACHE_NONE,
+};
+
+static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct realtek_smi *smi = bus->priv;
+
+ return smi->ops->phy_read(smi, addr, regnum);
+}
+
+static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct realtek_smi *smi = bus->priv;
+
+ return smi->ops->phy_write(smi, addr, regnum, val);
+}
+
+int realtek_smi_setup_mdio(struct realtek_smi *smi)
+{
+ struct device_node *mdio_np;
+ int ret;
+
+ mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
+ "realtek,smi-mdio");
+ if (!mdio_np) {
+ dev_err(smi->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
+ if (!smi->slave_mii_bus)
+ return -ENOMEM;
+ smi->slave_mii_bus->priv = smi;
+ smi->slave_mii_bus->name = "SMI slave MII";
+ smi->slave_mii_bus->read = realtek_smi_mdio_read;
+ smi->slave_mii_bus->write = realtek_smi_mdio_write;
+ snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
+ smi->ds->index);
+ smi->slave_mii_bus->dev.of_node = mdio_np;
+ smi->slave_mii_bus->parent = smi->dev;
+ smi->ds->slave_mii_bus = smi->slave_mii_bus;
+
+ ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np);
+ if (ret) {
+ dev_err(smi->dev, "unable to register MDIO bus %s\n",
+ smi->slave_mii_bus->id);
+ of_node_put(mdio_np);
+ }
+
+ return 0;
+}
+
+static int realtek_smi_probe(struct platform_device *pdev)
+{
+ const struct realtek_smi_variant *var;
+ struct device *dev = &pdev->dev;
+ struct realtek_smi *smi;
+ struct device_node *np;
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ np = dev->of_node;
+
+ smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL);
+ if (!smi)
+ return -ENOMEM;
+ smi->map = devm_regmap_init(dev, NULL, smi,
+ &realtek_smi_mdio_regmap_config);
+ if (IS_ERR(smi->map)) {
+ ret = PTR_ERR(smi->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Link forward and backward */
+ smi->dev = dev;
+ smi->clk_delay = var->clk_delay;
+ smi->cmd_read = var->cmd_read;
+ smi->cmd_write = var->cmd_write;
+ smi->ops = var->ops;
+
+ dev_set_drvdata(dev, smi);
+ spin_lock_init(&smi->lock);
+
+ /* TODO: if power is software controlled, set up any regulators here */
+
+ /* Assert then deassert RESET */
+ smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(smi->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(smi->reset);
+ }
+ msleep(REALTEK_SMI_HW_STOP_DELAY);
+ gpiod_set_value(smi->reset, 0);
+ msleep(REALTEK_SMI_HW_START_DELAY);
+ dev_info(dev, "deasserted RESET\n");
+
+ /* Fetch MDIO pins */
+ smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
+ if (IS_ERR(smi->mdc))
+ return PTR_ERR(smi->mdc);
+ smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
+ if (IS_ERR(smi->mdio))
+ return PTR_ERR(smi->mdio);
+
+ smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+ ret = smi->ops->detect(smi);
+ if (ret) {
+ dev_err(dev, "unable to detect switch\n");
+ return ret;
+ }
+
+ smi->ds = dsa_switch_alloc(dev, smi->num_ports);
+ if (!smi->ds)
+ return -ENOMEM;
+ smi->ds->priv = smi;
+
+ smi->ds->ops = var->ds_ops;
+ ret = dsa_register_switch(smi->ds);
+ if (ret) {
+ dev_err(dev, "unable to register switch ret = %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int realtek_smi_remove(struct platform_device *pdev)
+{
+ struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
+
+ dsa_unregister_switch(smi->ds);
+ gpiod_set_value(smi->reset, 1);
+
+ return 0;
+}
+
+static const struct of_device_id realtek_smi_of_match[] = {
+ {
+ .compatible = "realtek,rtl8366rb",
+ .data = &rtl8366rb_variant,
+ },
+ {
+ /* FIXME: add support for RTL8366S and more */
+ .compatible = "realtek,rtl8366s",
+ .data = NULL,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
+
+static struct platform_driver realtek_smi_driver = {
+ .driver = {
+ .name = "realtek-smi",
+ .of_match_table = of_match_ptr(realtek_smi_of_match),
+ },
+ .probe = realtek_smi_probe,
+ .remove = realtek_smi_remove,
+};
+module_platform_driver(realtek_smi_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek-smi.h b/drivers/net/dsa/realtek-smi.h
new file mode 100644
index 000000000000..9a63b51e1d82
--- /dev/null
+++ b/drivers/net/dsa/realtek-smi.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Realtek SMI interface driver defines
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#ifndef _REALTEK_SMI_H
+#define _REALTEK_SMI_H
+
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <net/dsa.h>
+
+struct realtek_smi_ops;
+struct dentry;
+struct inode;
+struct file;
+
+struct rtl8366_mib_counter {
+ unsigned int base;
+ unsigned int offset;
+ unsigned int length;
+ const char *name;
+};
+
+struct rtl8366_vlan_mc {
+ u16 vid;
+ u16 untag;
+ u16 member;
+ u8 fid;
+ u8 priority;
+};
+
+struct rtl8366_vlan_4k {
+ u16 vid;
+ u16 untag;
+ u16 member;
+ u8 fid;
+};
+
+struct realtek_smi {
+ struct device *dev;
+ struct gpio_desc *reset;
+ struct gpio_desc *mdc;
+ struct gpio_desc *mdio;
+ struct regmap *map;
+ struct mii_bus *slave_mii_bus;
+
+ unsigned int clk_delay;
+ u8 cmd_read;
+ u8 cmd_write;
+ spinlock_t lock; /* Locks around command writes */
+ struct dsa_switch *ds;
+ struct irq_domain *irqdomain;
+ bool leds_disabled;
+
+ unsigned int cpu_port;
+ unsigned int num_ports;
+ unsigned int num_vlan_mc;
+ unsigned int num_mib_counters;
+ struct rtl8366_mib_counter *mib_counters;
+
+ const struct realtek_smi_ops *ops;
+
+ int vlan_enabled;
+ int vlan4k_enabled;
+
+ char buf[4096];
+};
+
+/**
+ * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations
+ * @detect: detects the chiptype
+ */
+struct realtek_smi_ops {
+ int (*detect)(struct realtek_smi *smi);
+ int (*reset_chip)(struct realtek_smi *smi);
+ int (*setup)(struct realtek_smi *smi);
+ void (*cleanup)(struct realtek_smi *smi);
+ int (*get_mib_counter)(struct realtek_smi *smi,
+ int port,
+ struct rtl8366_mib_counter *mib,
+ u64 *mibvalue);
+ int (*get_vlan_mc)(struct realtek_smi *smi, u32 index,
+ struct rtl8366_vlan_mc *vlanmc);
+ int (*set_vlan_mc)(struct realtek_smi *smi, u32 index,
+ const struct rtl8366_vlan_mc *vlanmc);
+ int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid,
+ struct rtl8366_vlan_4k *vlan4k);
+ int (*set_vlan_4k)(struct realtek_smi *smi,
+ const struct rtl8366_vlan_4k *vlan4k);
+ int (*get_mc_index)(struct realtek_smi *smi, int port, int *val);
+ int (*set_mc_index)(struct realtek_smi *smi, int port, int index);
+ bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan);
+ int (*enable_vlan)(struct realtek_smi *smi, bool enable);
+ int (*enable_vlan4k)(struct realtek_smi *smi, bool enable);
+ int (*enable_port)(struct realtek_smi *smi, int port, bool enable);
+ int (*phy_read)(struct realtek_smi *smi, int phy, int regnum);
+ int (*phy_write)(struct realtek_smi *smi, int phy, int regnum,
+ u16 val);
+};
+
+struct realtek_smi_variant {
+ const struct dsa_switch_ops *ds_ops;
+ const struct realtek_smi_ops *ops;
+ unsigned int clk_delay;
+ u8 cmd_read;
+ u8 cmd_write;
+};
+
+/* SMI core calls */
+int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
+ u32 data);
+int realtek_smi_setup_mdio(struct realtek_smi *smi);
+
+/* RTL8366 library helpers */
+int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
+int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+ u32 untag, u32 fid);
+int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
+int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+ unsigned int vid);
+int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
+int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
+int rtl8366_reset_vlan(struct realtek_smi *smi);
+int rtl8366_init_vlan(struct realtek_smi *smi);
+int rtl8366_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering);
+int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int rtl8366_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data);
+int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
+void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
+
+extern const struct realtek_smi_variant rtl8366rb_variant;
+
+#endif /* _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
new file mode 100644
index 000000000000..6dedd43442cc
--- /dev/null
+++ b/drivers/net/dsa/rtl8366.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI library helpers for the RTL8366x variants
+ * RTL8366RB and RTL8366S
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ */
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+
+#include "realtek-smi.h"
+
+int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
+{
+ int ret;
+ int i;
+
+ *used = 0;
+ for (i = 0; i < smi->num_ports; i++) {
+ int index = 0;
+
+ ret = smi->ops->get_mc_index(smi, i, &index);
+ if (ret)
+ return ret;
+
+ if (mc_index == index) {
+ *used = 1;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+
+int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+ u32 untag, u32 fid)
+{
+ struct rtl8366_vlan_4k vlan4k;
+ int ret;
+ int i;
+
+ /* Update the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+ vlan4k.member = member;
+ vlan4k.untag = untag;
+ vlan4k.fid = fid;
+ ret = smi->ops->set_vlan_4k(smi, &vlan4k);
+ if (ret)
+ return ret;
+
+ /* Try to find an existing MC entry for this VID */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ struct rtl8366_vlan_mc vlanmc;
+
+ ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ if (vid == vlanmc.vid) {
+ /* update the MC entry */
+ vlanmc.member = member;
+ vlanmc.untag = untag;
+ vlanmc.fid = fid;
+
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
+
+int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ int ret;
+ int index;
+
+ ret = smi->ops->get_mc_index(smi, port, &index);
+ if (ret)
+ return ret;
+
+ ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
+ if (ret)
+ return ret;
+
+ *val = vlanmc.vid;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
+
+int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+ unsigned int vid)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ struct rtl8366_vlan_4k vlan4k;
+ int ret;
+ int i;
+
+ /* Try to find an existing MC entry for this VID */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ if (vid == vlanmc.vid) {
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ ret = smi->ops->set_mc_index(smi, port, i);
+ return ret;
+ }
+ }
+
+ /* We have no MC entry for this VID, try to find an empty one */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ if (vlanmc.vid == 0 && vlanmc.member == 0) {
+ /* Update the entry from the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+ vlanmc.vid = vid;
+ vlanmc.member = vlan4k.member;
+ vlanmc.untag = vlan4k.untag;
+ vlanmc.fid = vlan4k.fid;
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ ret = smi->ops->set_mc_index(smi, port, i);
+ return ret;
+ }
+ }
+
+ /* MC table is full, try to find an unused entry and replace it */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ int used;
+
+ ret = rtl8366_mc_is_used(smi, i, &used);
+ if (ret)
+ return ret;
+
+ if (!used) {
+ /* Update the entry from the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+ vlanmc.vid = vid;
+ vlanmc.member = vlan4k.member;
+ vlanmc.untag = vlan4k.untag;
+ vlanmc.fid = vlan4k.fid;
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ ret = smi->ops->set_mc_index(smi, port, i);
+ return ret;
+ }
+ }
+
+ dev_err(smi->dev,
+ "all VLAN member configurations are in use\n");
+
+ return -ENOSPC;
+}
+EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
+
+int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
+{
+ int ret;
+
+ /* To enable 4k VLAN, ordinary VLAN must be enabled first,
+ * but if we disable 4k VLAN it is fine to leave ordinary
+ * VLAN enabled.
+ */
+ if (enable) {
+ /* Make sure VLAN is ON */
+ ret = smi->ops->enable_vlan(smi, true);
+ if (ret)
+ return ret;
+
+ smi->vlan_enabled = true;
+ }
+
+ ret = smi->ops->enable_vlan4k(smi, enable);
+ if (ret)
+ return ret;
+
+ smi->vlan4k_enabled = enable;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
+
+int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable)
+{
+ int ret;
+
+ ret = smi->ops->enable_vlan(smi, enable);
+ if (ret)
+ return ret;
+
+ smi->vlan_enabled = enable;
+
+ /* If we turn VLAN off, make sure that we turn off
+ * 4k VLAN as well, if that happened to be on.
+ */
+ if (!enable) {
+ smi->vlan4k_enabled = false;
+ ret = smi->ops->enable_vlan4k(smi, false);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
+
+int rtl8366_reset_vlan(struct realtek_smi *smi)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ int ret;
+ int i;
+
+ rtl8366_enable_vlan(smi, false);
+ rtl8366_enable_vlan4k(smi, false);
+
+ /* Clear the 16 VLAN member configurations */
+ vlanmc.vid = 0;
+ vlanmc.priority = 0;
+ vlanmc.member = 0;
+ vlanmc.untag = 0;
+ vlanmc.fid = 0;
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
+
+int rtl8366_init_vlan(struct realtek_smi *smi)
+{
+ int port;
+ int ret;
+
+ ret = rtl8366_reset_vlan(smi);
+ if (ret)
+ return ret;
+
+ /* Loop over the available ports, for each port, associate
+ * it with the VLAN (port+1)
+ */
+ for (port = 0; port < smi->num_ports; port++) {
+ u32 mask;
+
+ if (port == smi->cpu_port)
+ /* For the CPU port, make all ports members of this
+ * VLAN.
+ */
+ mask = GENMASK(smi->num_ports - 1, 0);
+ else
+ /* For all other ports, enable itself plus the
+ * CPU port.
+ */
+ mask = BIT(port) | BIT(smi->cpu_port);
+
+ /* For each port, set the port as member of VLAN (port+1)
+ * and untagged, except for the CPU port: the CPU port (5) is
+ * member of VLAN 6 and so are ALL the other ports as well.
+ * Use filter 0 (no filter).
+ */
+ dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n",
+ (port + 1), port, mask);
+ ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0);
+ if (ret)
+ return ret;
+
+ dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n",
+ (port + 1), port, (port + 1));
+ ret = rtl8366_set_pvid(smi, port, (port + 1));
+ if (ret)
+ return ret;
+ }
+
+ return rtl8366_enable_vlan(smi, true);
+}
+EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
+
+int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8366_vlan_4k vlan4k;
+ int ret;
+
+ if (!smi->ops->is_vlan_valid(smi, port))
+ return -EINVAL;
+
+ dev_info(smi->dev, "%s filtering on port %d\n",
+ vlan_filtering ? "enable" : "disable",
+ port);
+
+ /* TODO:
+ * The hardware support filter ID (FID) 0..7, I have no clue how to
+ * support this in the driver when the callback only says on/off.
+ */
+ ret = smi->ops->get_vlan_4k(smi, port, &vlan4k);
+ if (ret)
+ return ret;
+
+ /* Just set the filter to FID 1 for now then */
+ ret = rtl8366_set_vlan(smi, port,
+ vlan4k.member,
+ vlan4k.untag,
+ 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering);
+
+int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (!smi->ops->is_vlan_valid(smi, port))
+ return -EINVAL;
+
+ dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
+ vlan->vid_begin, vlan->vid_end);
+
+ /* Enable VLAN in the hardware
+ * FIXME: what's with this 4k business?
+ * Just rtl8366_enable_vlan() seems inconclusive.
+ */
+ ret = rtl8366_enable_vlan4k(smi, true);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare);
+
+void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
+ struct realtek_smi *smi = ds->priv;
+ u32 member = 0;
+ u32 untag = 0;
+ u16 vid;
+ int ret;
+
+ if (!smi->ops->is_vlan_valid(smi, port))
+ return;
+
+ dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
+ port,
+ untagged ? "untagged" : "tagged",
+ pvid ? " PVID" : "no PVID");
+
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ dev_err(smi->dev, "port is DSA or CPU port\n");
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ int pvid_val = 0;
+
+ dev_info(smi->dev, "add VLAN %04x\n", vid);
+ member |= BIT(port);
+
+ if (untagged)
+ untag |= BIT(port);
+
+ /* To ensure that we have a valid MC entry for this VLAN,
+ * initialize the port VLAN ID here.
+ */
+ ret = rtl8366_get_pvid(smi, port, &pvid_val);
+ if (ret < 0) {
+ dev_err(smi->dev, "could not lookup PVID for port %d\n",
+ port);
+ return;
+ }
+ if (pvid_val == 0) {
+ ret = rtl8366_set_pvid(smi, port, vid);
+ if (ret < 0)
+ return;
+ }
+ }
+
+ ret = rtl8366_set_vlan(smi, port, member, untag, 0);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to set up VLAN %04x",
+ vid);
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
+
+int rtl8366_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct realtek_smi *smi = ds->priv;
+ u16 vid;
+ int ret;
+
+ dev_info(smi->dev, "del VLAN on port %d\n", port);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ int i;
+
+ dev_info(smi->dev, "del VLAN %04x\n", vid);
+
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ struct rtl8366_vlan_mc vlanmc;
+
+ ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ if (vid == vlanmc.vid) {
+ /* clear VLAN member configurations */
+ vlanmc.vid = 0;
+ vlanmc.priority = 0;
+ vlanmc.member = 0;
+ vlanmc.untag = 0;
+ vlanmc.fid = 0;
+
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to remove VLAN %04x\n",
+ vid);
+ return ret;
+ }
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
+
+void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8366_mib_counter *mib;
+ int i;
+
+ if (port >= smi->num_ports)
+ return;
+
+ for (i = 0; i < smi->num_mib_counters; i++) {
+ mib = &smi->mib_counters[i];
+ strncpy(data + i * ETH_GSTRING_LEN,
+ mib->name, ETH_GSTRING_LEN);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_strings);
+
+int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct realtek_smi *smi = ds->priv;
+
+ /* We only support SS_STATS */
+ if (sset != ETH_SS_STATS)
+ return 0;
+ if (port >= smi->num_ports)
+ return -EINVAL;
+
+ return smi->num_mib_counters;
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
+
+void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+ struct realtek_smi *smi = ds->priv;
+ int i;
+ int ret;
+
+ if (port >= smi->num_ports)
+ return;
+
+ for (i = 0; i < smi->num_mib_counters; i++) {
+ struct rtl8366_mib_counter *mib;
+ u64 mibvalue = 0;
+
+ mib = &smi->mib_counters[i];
+ ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue);
+ if (ret) {
+ dev_err(smi->dev, "error reading MIB counter %s\n",
+ mib->name);
+ }
+ data[i] = mibvalue;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
new file mode 100644
index 000000000000..a4d5049df692
--- /dev/null
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -0,0 +1,1454 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI subdriver for the Realtek RTL8366RB ethernet switch
+ *
+ * This is a sparsely documented chip, the only viable documentation seems
+ * to be a patched up code drop from the vendor that appear in various
+ * GPL source trees.
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#include "realtek-smi.h"
+
+#define RTL8366RB_PORT_NUM_CPU 5
+#define RTL8366RB_NUM_PORTS 6
+#define RTL8366RB_PHY_NO_MAX 4
+#define RTL8366RB_PHY_ADDR_MAX 31
+
+/* Switch Global Configuration register */
+#define RTL8366RB_SGCR 0x0000
+#define RTL8366RB_SGCR_EN_BC_STORM_CTRL BIT(0)
+#define RTL8366RB_SGCR_MAX_LENGTH(a) ((a) << 4)
+#define RTL8366RB_SGCR_MAX_LENGTH_MASK RTL8366RB_SGCR_MAX_LENGTH(0x3)
+#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0)
+#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1)
+#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2)
+#define RTL8366RB_SGCR_MAX_LENGTH_9216 RTL8366RB_SGCR_MAX_LENGTH(0x3)
+#define RTL8366RB_SGCR_EN_VLAN BIT(13)
+#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14)
+
+/* Port Enable Control register */
+#define RTL8366RB_PECR 0x0001
+
+/* Switch Security Control registers */
+#define RTL8366RB_SSCR0 0x0002
+#define RTL8366RB_SSCR1 0x0003
+#define RTL8366RB_SSCR2 0x0004
+#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
+
+/* Port Mode Control registers */
+#define RTL8366RB_PMC0 0x0005
+#define RTL8366RB_PMC0_SPI BIT(0)
+#define RTL8366RB_PMC0_EN_AUTOLOAD BIT(1)
+#define RTL8366RB_PMC0_PROBE BIT(2)
+#define RTL8366RB_PMC0_DIS_BISR BIT(3)
+#define RTL8366RB_PMC0_ADCTEST BIT(4)
+#define RTL8366RB_PMC0_SRAM_DIAG BIT(5)
+#define RTL8366RB_PMC0_EN_SCAN BIT(6)
+#define RTL8366RB_PMC0_P4_IOMODE_SHIFT 7
+#define RTL8366RB_PMC0_P4_IOMODE_MASK GENMASK(9, 7)
+#define RTL8366RB_PMC0_P5_IOMODE_SHIFT 10
+#define RTL8366RB_PMC0_P5_IOMODE_MASK GENMASK(12, 10)
+#define RTL8366RB_PMC0_SDSMODE_SHIFT 13
+#define RTL8366RB_PMC0_SDSMODE_MASK GENMASK(15, 13)
+#define RTL8366RB_PMC1 0x0006
+
+/* Port Mirror Control Register */
+#define RTL8366RB_PMCR 0x0007
+#define RTL8366RB_PMCR_SOURCE_PORT(a) (a)
+#define RTL8366RB_PMCR_SOURCE_PORT_MASK 0x000f
+#define RTL8366RB_PMCR_MONITOR_PORT(a) ((a) << 4)
+#define RTL8366RB_PMCR_MONITOR_PORT_MASK 0x00f0
+#define RTL8366RB_PMCR_MIRROR_RX BIT(8)
+#define RTL8366RB_PMCR_MIRROR_TX BIT(9)
+#define RTL8366RB_PMCR_MIRROR_SPC BIT(10)
+#define RTL8366RB_PMCR_MIRROR_ISO BIT(11)
+
+/* bits 0..7 = port 0, bits 8..15 = port 1 */
+#define RTL8366RB_PAACR0 0x0010
+/* bits 0..7 = port 2, bits 8..15 = port 3 */
+#define RTL8366RB_PAACR1 0x0011
+/* bits 0..7 = port 4, bits 8..15 = port 5 */
+#define RTL8366RB_PAACR2 0x0012
+#define RTL8366RB_PAACR_SPEED_10M 0
+#define RTL8366RB_PAACR_SPEED_100M 1
+#define RTL8366RB_PAACR_SPEED_1000M 2
+#define RTL8366RB_PAACR_FULL_DUPLEX BIT(2)
+#define RTL8366RB_PAACR_LINK_UP BIT(4)
+#define RTL8366RB_PAACR_TX_PAUSE BIT(5)
+#define RTL8366RB_PAACR_RX_PAUSE BIT(6)
+#define RTL8366RB_PAACR_AN BIT(7)
+
+#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \
+ RTL8366RB_PAACR_FULL_DUPLEX | \
+ RTL8366RB_PAACR_LINK_UP | \
+ RTL8366RB_PAACR_TX_PAUSE | \
+ RTL8366RB_PAACR_RX_PAUSE)
+
+/* bits 0..7 = port 0, bits 8..15 = port 1 */
+#define RTL8366RB_PSTAT0 0x0014
+/* bits 0..7 = port 2, bits 8..15 = port 3 */
+#define RTL8366RB_PSTAT1 0x0015
+/* bits 0..7 = port 4, bits 8..15 = port 5 */
+#define RTL8366RB_PSTAT2 0x0016
+
+#define RTL8366RB_POWER_SAVING_REG 0x0021
+
+/* CPU port control reg */
+#define RTL8368RB_CPU_CTRL_REG 0x0061
+#define RTL8368RB_CPU_PORTS_MSK 0x00FF
+/* Enables inserting custom tag length/type 0x8899 */
+#define RTL8368RB_CPU_INSTAG BIT(15)
+
+#define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */
+#define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */
+#define RTL8366RB_SMAR2 0x0072 /* bits 32..47 */
+
+#define RTL8366RB_RESET_CTRL_REG 0x0100
+#define RTL8366RB_CHIP_CTRL_RESET_HW BIT(0)
+#define RTL8366RB_CHIP_CTRL_RESET_SW BIT(1)
+
+#define RTL8366RB_CHIP_ID_REG 0x0509
+#define RTL8366RB_CHIP_ID_8366 0x5937
+#define RTL8366RB_CHIP_VERSION_CTRL_REG 0x050A
+#define RTL8366RB_CHIP_VERSION_MASK 0xf
+
+/* PHY registers control */
+#define RTL8366RB_PHY_ACCESS_CTRL_REG 0x8000
+#define RTL8366RB_PHY_CTRL_READ BIT(0)
+#define RTL8366RB_PHY_CTRL_WRITE 0
+#define RTL8366RB_PHY_ACCESS_BUSY_REG 0x8001
+#define RTL8366RB_PHY_INT_BUSY BIT(0)
+#define RTL8366RB_PHY_EXT_BUSY BIT(4)
+#define RTL8366RB_PHY_ACCESS_DATA_REG 0x8002
+#define RTL8366RB_PHY_EXT_CTRL_REG 0x8010
+#define RTL8366RB_PHY_EXT_WRDATA_REG 0x8011
+#define RTL8366RB_PHY_EXT_RDDATA_REG 0x8012
+
+#define RTL8366RB_PHY_REG_MASK 0x1f
+#define RTL8366RB_PHY_PAGE_OFFSET 5
+#define RTL8366RB_PHY_PAGE_MASK (0xf << 5)
+#define RTL8366RB_PHY_NO_OFFSET 9
+#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
+
+#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
+
+/* LED control registers */
+#define RTL8366RB_LED_BLINKRATE_REG 0x0430
+#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
+#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
+#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
+#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
+#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
+#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
+#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+
+#define RTL8366RB_LED_CTRL_REG 0x0431
+#define RTL8366RB_LED_OFF 0x0
+#define RTL8366RB_LED_DUP_COL 0x1
+#define RTL8366RB_LED_LINK_ACT 0x2
+#define RTL8366RB_LED_SPD1000 0x3
+#define RTL8366RB_LED_SPD100 0x4
+#define RTL8366RB_LED_SPD10 0x5
+#define RTL8366RB_LED_SPD1000_ACT 0x6
+#define RTL8366RB_LED_SPD100_ACT 0x7
+#define RTL8366RB_LED_SPD10_ACT 0x8
+#define RTL8366RB_LED_SPD100_10_ACT 0x9
+#define RTL8366RB_LED_FIBER 0xa
+#define RTL8366RB_LED_AN_FAULT 0xb
+#define RTL8366RB_LED_LINK_RX 0xc
+#define RTL8366RB_LED_LINK_TX 0xd
+#define RTL8366RB_LED_MASTER 0xe
+#define RTL8366RB_LED_FORCE 0xf
+#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+#define RTL8366RB_LED_1_OFFSET 6
+#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+#define RTL8366RB_LED_3_OFFSET 6
+
+#define RTL8366RB_MIB_COUNT 33
+#define RTL8366RB_GLOBAL_MIB_COUNT 1
+#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
+#define RTL8366RB_MIB_COUNTER_BASE 0x1000
+#define RTL8366RB_MIB_CTRL_REG 0x13F0
+#define RTL8366RB_MIB_CTRL_USER_MASK 0x0FFC
+#define RTL8366RB_MIB_CTRL_BUSY_MASK BIT(0)
+#define RTL8366RB_MIB_CTRL_RESET_MASK BIT(1)
+#define RTL8366RB_MIB_CTRL_PORT_RESET(_p) BIT(2 + (_p))
+#define RTL8366RB_MIB_CTRL_GLOBAL_RESET BIT(11)
+
+#define RTL8366RB_PORT_VLAN_CTRL_BASE 0x0063
+#define RTL8366RB_PORT_VLAN_CTRL_REG(_p) \
+ (RTL8366RB_PORT_VLAN_CTRL_BASE + (_p) / 4)
+#define RTL8366RB_PORT_VLAN_CTRL_MASK 0xf
+#define RTL8366RB_PORT_VLAN_CTRL_SHIFT(_p) (4 * ((_p) % 4))
+
+#define RTL8366RB_VLAN_TABLE_READ_BASE 0x018C
+#define RTL8366RB_VLAN_TABLE_WRITE_BASE 0x0185
+
+#define RTL8366RB_TABLE_ACCESS_CTRL_REG 0x0180
+#define RTL8366RB_TABLE_VLAN_READ_CTRL 0x0E01
+#define RTL8366RB_TABLE_VLAN_WRITE_CTRL 0x0F01
+
+#define RTL8366RB_VLAN_MC_BASE(_x) (0x0020 + (_x) * 3)
+
+#define RTL8366RB_PORT_LINK_STATUS_BASE 0x0014
+#define RTL8366RB_PORT_STATUS_SPEED_MASK 0x0003
+#define RTL8366RB_PORT_STATUS_DUPLEX_MASK 0x0004
+#define RTL8366RB_PORT_STATUS_LINK_MASK 0x0010
+#define RTL8366RB_PORT_STATUS_TXPAUSE_MASK 0x0020
+#define RTL8366RB_PORT_STATUS_RXPAUSE_MASK 0x0040
+#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
+
+#define RTL8366RB_NUM_VLANS 16
+#define RTL8366RB_NUM_LEDGROUPS 4
+#define RTL8366RB_NUM_VIDS 4096
+#define RTL8366RB_PRIORITYMAX 7
+#define RTL8366RB_FIDMAX 7
+
+#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
+#define RTL8366RB_PORT_2 BIT(1) /* In userspace port 1 */
+#define RTL8366RB_PORT_3 BIT(2) /* In userspace port 2 */
+#define RTL8366RB_PORT_4 BIT(3) /* In userspace port 3 */
+#define RTL8366RB_PORT_5 BIT(4) /* In userspace port 4 */
+
+#define RTL8366RB_PORT_CPU BIT(5) /* CPU port */
+
+#define RTL8366RB_PORT_ALL (RTL8366RB_PORT_1 | \
+ RTL8366RB_PORT_2 | \
+ RTL8366RB_PORT_3 | \
+ RTL8366RB_PORT_4 | \
+ RTL8366RB_PORT_5 | \
+ RTL8366RB_PORT_CPU)
+
+#define RTL8366RB_PORT_ALL_BUT_CPU (RTL8366RB_PORT_1 | \
+ RTL8366RB_PORT_2 | \
+ RTL8366RB_PORT_3 | \
+ RTL8366RB_PORT_4 | \
+ RTL8366RB_PORT_5)
+
+#define RTL8366RB_PORT_ALL_EXTERNAL (RTL8366RB_PORT_1 | \
+ RTL8366RB_PORT_2 | \
+ RTL8366RB_PORT_3 | \
+ RTL8366RB_PORT_4)
+
+#define RTL8366RB_PORT_ALL_INTERNAL RTL8366RB_PORT_CPU
+
+/* First configuration word per member config, VID and prio */
+#define RTL8366RB_VLAN_VID_MASK 0xfff
+#define RTL8366RB_VLAN_PRIORITY_SHIFT 12
+#define RTL8366RB_VLAN_PRIORITY_MASK 0x7
+/* Second configuration word per member config, member and untagged */
+#define RTL8366RB_VLAN_UNTAG_SHIFT 8
+#define RTL8366RB_VLAN_UNTAG_MASK 0xff
+#define RTL8366RB_VLAN_MEMBER_MASK 0xff
+/* Third config word per member config, STAG currently unused */
+#define RTL8366RB_VLAN_STAG_MBR_MASK 0xff
+#define RTL8366RB_VLAN_STAG_MBR_SHIFT 8
+#define RTL8366RB_VLAN_STAG_IDX_MASK 0x7
+#define RTL8366RB_VLAN_STAG_IDX_SHIFT 5
+#define RTL8366RB_VLAN_FID_MASK 0x7
+
+/* Port ingress bandwidth control */
+#define RTL8366RB_IB_BASE 0x0200
+#define RTL8366RB_IB_REG(pnum) (RTL8366RB_IB_BASE + (pnum))
+#define RTL8366RB_IB_BDTH_MASK 0x3fff
+#define RTL8366RB_IB_PREIFG BIT(14)
+
+/* Port egress bandwidth control */
+#define RTL8366RB_EB_BASE 0x02d1
+#define RTL8366RB_EB_REG(pnum) (RTL8366RB_EB_BASE + (pnum))
+#define RTL8366RB_EB_BDTH_MASK 0x3fff
+#define RTL8366RB_EB_PREIFG_REG 0x02f8
+#define RTL8366RB_EB_PREIFG BIT(9)
+
+#define RTL8366RB_BDTH_SW_MAX 1048512 /* 1048576? */
+#define RTL8366RB_BDTH_UNIT 64
+#define RTL8366RB_BDTH_REG_DEFAULT 16383
+
+/* QOS */
+#define RTL8366RB_QOS BIT(15)
+/* Include/Exclude Preamble and IFG (20 bytes). 0:Exclude, 1:Include. */
+#define RTL8366RB_QOS_DEFAULT_PREIFG 1
+
+/* Interrupt handling */
+#define RTL8366RB_INTERRUPT_CONTROL_REG 0x0440
+#define RTL8366RB_INTERRUPT_POLARITY BIT(0)
+#define RTL8366RB_P4_RGMII_LED BIT(2)
+#define RTL8366RB_INTERRUPT_MASK_REG 0x0441
+#define RTL8366RB_INTERRUPT_LINK_CHGALL GENMASK(11, 0)
+#define RTL8366RB_INTERRUPT_ACLEXCEED BIT(8)
+#define RTL8366RB_INTERRUPT_STORMEXCEED BIT(9)
+#define RTL8366RB_INTERRUPT_P4_FIBER BIT(12)
+#define RTL8366RB_INTERRUPT_P4_UTP BIT(13)
+#define RTL8366RB_INTERRUPT_VALID (RTL8366RB_INTERRUPT_LINK_CHGALL | \
+ RTL8366RB_INTERRUPT_ACLEXCEED | \
+ RTL8366RB_INTERRUPT_STORMEXCEED | \
+ RTL8366RB_INTERRUPT_P4_FIBER | \
+ RTL8366RB_INTERRUPT_P4_UTP)
+#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
+#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
+
+/* bits 0..5 enable force when cleared */
+#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
+
+#define RTL8366RB_OAM_PARSER_REG 0x0F14
+#define RTL8366RB_OAM_MULTIPLEXER_REG 0x0F15
+
+#define RTL8366RB_GREEN_FEATURE_REG 0x0F51
+#define RTL8366RB_GREEN_FEATURE_MSK 0x0007
+#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
+#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
+
+static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
+ { 0, 0, 4, "IfInOctets" },
+ { 0, 4, 4, "EtherStatsOctets" },
+ { 0, 8, 2, "EtherStatsUnderSizePkts" },
+ { 0, 10, 2, "EtherFragments" },
+ { 0, 12, 2, "EtherStatsPkts64Octets" },
+ { 0, 14, 2, "EtherStatsPkts65to127Octets" },
+ { 0, 16, 2, "EtherStatsPkts128to255Octets" },
+ { 0, 18, 2, "EtherStatsPkts256to511Octets" },
+ { 0, 20, 2, "EtherStatsPkts512to1023Octets" },
+ { 0, 22, 2, "EtherStatsPkts1024to1518Octets" },
+ { 0, 24, 2, "EtherOversizeStats" },
+ { 0, 26, 2, "EtherStatsJabbers" },
+ { 0, 28, 2, "IfInUcastPkts" },
+ { 0, 30, 2, "EtherStatsMulticastPkts" },
+ { 0, 32, 2, "EtherStatsBroadcastPkts" },
+ { 0, 34, 2, "EtherStatsDropEvents" },
+ { 0, 36, 2, "Dot3StatsFCSErrors" },
+ { 0, 38, 2, "Dot3StatsSymbolErrors" },
+ { 0, 40, 2, "Dot3InPauseFrames" },
+ { 0, 42, 2, "Dot3ControlInUnknownOpcodes" },
+ { 0, 44, 4, "IfOutOctets" },
+ { 0, 48, 2, "Dot3StatsSingleCollisionFrames" },
+ { 0, 50, 2, "Dot3StatMultipleCollisionFrames" },
+ { 0, 52, 2, "Dot3sDeferredTransmissions" },
+ { 0, 54, 2, "Dot3StatsLateCollisions" },
+ { 0, 56, 2, "EtherStatsCollisions" },
+ { 0, 58, 2, "Dot3StatsExcessiveCollisions" },
+ { 0, 60, 2, "Dot3OutPauseFrames" },
+ { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" },
+ { 0, 64, 2, "Dot1dTpPortInDiscards" },
+ { 0, 66, 2, "IfOutUcastPkts" },
+ { 0, 68, 2, "IfOutMulticastPkts" },
+ { 0, 70, 2, "IfOutBroadcastPkts" },
+};
+
+static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
+ int port,
+ struct rtl8366_mib_counter *mib,
+ u64 *mibvalue)
+{
+ u32 addr, val;
+ int ret;
+ int i;
+
+ addr = RTL8366RB_MIB_COUNTER_BASE +
+ RTL8366RB_MIB_COUNTER_PORT_OFFSET * (port) +
+ mib->offset;
+
+ /* Writing access counter address first
+ * then ASIC will prepare 64bits counter wait for being retrived
+ */
+ ret = regmap_write(smi->map, addr, 0); /* Write whatever */
+ if (ret)
+ return ret;
+
+ /* Read MIB control register */
+ ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val);
+ if (ret)
+ return -EIO;
+
+ if (val & RTL8366RB_MIB_CTRL_BUSY_MASK)
+ return -EBUSY;
+
+ if (val & RTL8366RB_MIB_CTRL_RESET_MASK)
+ return -EIO;
+
+ /* Read each individual MIB 16 bits at the time */
+ *mibvalue = 0;
+ for (i = mib->length; i > 0; i--) {
+ ret = regmap_read(smi->map, addr + (i - 1), &val);
+ if (ret)
+ return ret;
+ *mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
+ }
+ return 0;
+}
+
+static u32 rtl8366rb_get_irqmask(struct irq_data *d)
+{
+ int line = irqd_to_hwirq(d);
+ u32 val;
+
+ /* For line interrupts we combine link down in bits
+ * 6..11 with link up in bits 0..5 into one interrupt.
+ */
+ if (line < 12)
+ val = BIT(line) | BIT(line + 6);
+ else
+ val = BIT(line);
+ return val;
+}
+
+static void rtl8366rb_mask_irq(struct irq_data *d)
+{
+ struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ int ret;
+
+ ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ rtl8366rb_get_irqmask(d), 0);
+ if (ret)
+ dev_err(smi->dev, "could not mask IRQ\n");
+}
+
+static void rtl8366rb_unmask_irq(struct irq_data *d)
+{
+ struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ int ret;
+
+ ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ rtl8366rb_get_irqmask(d),
+ rtl8366rb_get_irqmask(d));
+ if (ret)
+ dev_err(smi->dev, "could not unmask IRQ\n");
+}
+
+static irqreturn_t rtl8366rb_irq(int irq, void *data)
+{
+ struct realtek_smi *smi = data;
+ u32 stat;
+ int ret;
+
+ /* This clears the IRQ status register */
+ ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ &stat);
+ if (ret) {
+ dev_err(smi->dev, "can't read interrupt status\n");
+ return IRQ_NONE;
+ }
+ stat &= RTL8366RB_INTERRUPT_VALID;
+ if (!stat)
+ return IRQ_NONE;
+ while (stat) {
+ int line = __ffs(stat);
+ int child_irq;
+
+ stat &= ~BIT(line);
+ /* For line interrupts we combine link down in bits
+ * 6..11 with link up in bits 0..5 into one interrupt.
+ */
+ if (line < 12 && line > 5)
+ line -= 5;
+ child_irq = irq_find_mapping(smi->irqdomain, line);
+ handle_nested_irq(child_irq);
+ }
+ return IRQ_HANDLED;
+}
+
+static struct irq_chip rtl8366rb_irq_chip = {
+ .name = "RTL8366RB",
+ .irq_mask = rtl8366rb_mask_irq,
+ .irq_unmask = rtl8366rb_unmask_irq,
+};
+
+static int rtl8366rb_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &rtl8366rb_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void rtl8366rb_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_nested_thread(irq, 0);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
+ .map = rtl8366rb_irq_map,
+ .unmap = rtl8366rb_irq_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+{
+ struct device_node *intc;
+ unsigned long irq_trig;
+ int irq;
+ int ret;
+ u32 val;
+ int i;
+
+ intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ if (!intc) {
+ dev_err(smi->dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+ /* RB8366RB IRQs cascade off this one */
+ irq = of_irq_get(intc, 0);
+ if (irq <= 0) {
+ dev_err(smi->dev, "failed to get parent IRQ\n");
+ return irq ? irq : -EINVAL;
+ }
+
+ /* This clears the IRQ status register */
+ ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ &val);
+ if (ret) {
+ dev_err(smi->dev, "can't read interrupt status\n");
+ return ret;
+ }
+
+ /* Fetch IRQ edge information from the descriptor */
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ switch (irq_trig) {
+ case IRQF_TRIGGER_RISING:
+ case IRQF_TRIGGER_HIGH:
+ dev_info(smi->dev, "active high/rising IRQ\n");
+ val = 0;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
+ dev_info(smi->dev, "active low/falling IRQ\n");
+ val = RTL8366RB_INTERRUPT_POLARITY;
+ break;
+ }
+ ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_INTERRUPT_POLARITY,
+ val);
+ if (ret) {
+ dev_err(smi->dev, "could not configure IRQ polarity\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(smi->dev, irq, NULL,
+ rtl8366rb_irq, IRQF_ONESHOT,
+ "RTL8366RB", smi);
+ if (ret) {
+ dev_err(smi->dev, "unable to request irq: %d\n", ret);
+ return ret;
+ }
+ smi->irqdomain = irq_domain_add_linear(intc,
+ RTL8366RB_NUM_INTERRUPT,
+ &rtl8366rb_irqdomain_ops,
+ smi);
+ if (!smi->irqdomain) {
+ dev_err(smi->dev, "failed to create IRQ domain\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < smi->num_ports; i++)
+ irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
+
+ return 0;
+}
+
+static int rtl8366rb_set_addr(struct realtek_smi *smi)
+{
+ u8 addr[ETH_ALEN];
+ u16 val;
+ int ret;
+
+ eth_random_addr(addr);
+
+ dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ val = addr[0] << 8 | addr[1];
+ ret = regmap_write(smi->map, RTL8366RB_SMAR0, val);
+ if (ret)
+ return ret;
+ val = addr[2] << 8 | addr[3];
+ ret = regmap_write(smi->map, RTL8366RB_SMAR1, val);
+ if (ret)
+ return ret;
+ val = addr[4] << 8 | addr[5];
+ ret = regmap_write(smi->map, RTL8366RB_SMAR2, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Found in a vendor driver */
+
+/* For the "version 0" early silicon, appear in most source releases */
+static const u16 rtl8366rb_init_jam_ver_0[] = {
+ 0x000B, 0x0001, 0x03A6, 0x0100, 0x03A7, 0x0001, 0x02D1, 0x3FFF,
+ 0x02D2, 0x3FFF, 0x02D3, 0x3FFF, 0x02D4, 0x3FFF, 0x02D5, 0x3FFF,
+ 0x02D6, 0x3FFF, 0x02D7, 0x3FFF, 0x02D8, 0x3FFF, 0x022B, 0x0688,
+ 0x022C, 0x0FAC, 0x03D0, 0x4688, 0x03D1, 0x01F5, 0x0000, 0x0830,
+ 0x02F9, 0x0200, 0x02F7, 0x7FFF, 0x02F8, 0x03FF, 0x0080, 0x03E8,
+ 0x0081, 0x00CE, 0x0082, 0x00DA, 0x0083, 0x0230, 0xBE0F, 0x2000,
+ 0x0231, 0x422A, 0x0232, 0x422A, 0x0233, 0x422A, 0x0234, 0x422A,
+ 0x0235, 0x422A, 0x0236, 0x422A, 0x0237, 0x422A, 0x0238, 0x422A,
+ 0x0239, 0x422A, 0x023A, 0x422A, 0x023B, 0x422A, 0x023C, 0x422A,
+ 0x023D, 0x422A, 0x023E, 0x422A, 0x023F, 0x422A, 0x0240, 0x422A,
+ 0x0241, 0x422A, 0x0242, 0x422A, 0x0243, 0x422A, 0x0244, 0x422A,
+ 0x0245, 0x422A, 0x0246, 0x422A, 0x0247, 0x422A, 0x0248, 0x422A,
+ 0x0249, 0x0146, 0x024A, 0x0146, 0x024B, 0x0146, 0xBE03, 0xC961,
+ 0x024D, 0x0146, 0x024E, 0x0146, 0x024F, 0x0146, 0x0250, 0x0146,
+ 0xBE64, 0x0226, 0x0252, 0x0146, 0x0253, 0x0146, 0x024C, 0x0146,
+ 0x0251, 0x0146, 0x0254, 0x0146, 0xBE62, 0x3FD0, 0x0084, 0x0320,
+ 0x0255, 0x0146, 0x0256, 0x0146, 0x0257, 0x0146, 0x0258, 0x0146,
+ 0x0259, 0x0146, 0x025A, 0x0146, 0x025B, 0x0146, 0x025C, 0x0146,
+ 0x025D, 0x0146, 0x025E, 0x0146, 0x025F, 0x0146, 0x0260, 0x0146,
+ 0x0261, 0xA23F, 0x0262, 0x0294, 0x0263, 0xA23F, 0x0264, 0x0294,
+ 0x0265, 0xA23F, 0x0266, 0x0294, 0x0267, 0xA23F, 0x0268, 0x0294,
+ 0x0269, 0xA23F, 0x026A, 0x0294, 0x026B, 0xA23F, 0x026C, 0x0294,
+ 0x026D, 0xA23F, 0x026E, 0x0294, 0x026F, 0xA23F, 0x0270, 0x0294,
+ 0x02F5, 0x0048, 0xBE09, 0x0E00, 0xBE1E, 0x0FA0, 0xBE14, 0x8448,
+ 0xBE15, 0x1007, 0xBE4A, 0xA284, 0xC454, 0x3F0B, 0xC474, 0x3F0B,
+ 0xBE48, 0x3672, 0xBE4B, 0x17A7, 0xBE4C, 0x0B15, 0xBE52, 0x0EDD,
+ 0xBE49, 0x8C00, 0xBE5B, 0x785C, 0xBE5C, 0x785C, 0xBE5D, 0x785C,
+ 0xBE61, 0x368A, 0xBE63, 0x9B84, 0xC456, 0xCC13, 0xC476, 0xCC13,
+ 0xBE65, 0x307D, 0xBE6D, 0x0005, 0xBE6E, 0xE120, 0xBE2E, 0x7BAF,
+};
+
+/* This v1 init sequence is from Belkin F5D8235 U-Boot release */
+static const u16 rtl8366rb_init_jam_ver_1[] = {
+ 0x0000, 0x0830, 0x0001, 0x8000, 0x0400, 0x8130, 0xBE78, 0x3C3C,
+ 0x0431, 0x5432, 0xBE37, 0x0CE4, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0,
+ 0xC44C, 0x1585, 0xC44C, 0x1185, 0xC44C, 0x1585, 0xC46C, 0x1585,
+ 0xC46C, 0x1185, 0xC46C, 0x1585, 0xC451, 0x2135, 0xC471, 0x2135,
+ 0xBE10, 0x8140, 0xBE15, 0x0007, 0xBE6E, 0xE120, 0xBE69, 0xD20F,
+ 0xBE6B, 0x0320, 0xBE24, 0xB000, 0xBE23, 0xFF51, 0xBE22, 0xDF20,
+ 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800, 0xBE24, 0x0000,
+ 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60, 0xBE21, 0x0140,
+ 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000, 0xBE2E, 0x7B7A,
+ 0xBE36, 0x0CE4, 0x02F5, 0x0048, 0xBE77, 0x2940, 0x000A, 0x83E0,
+ 0xBE79, 0x3C3C, 0xBE00, 0x1340,
+};
+
+/* This v2 init sequence is from Belkin F5D8235 U-Boot release */
+static const u16 rtl8366rb_init_jam_ver_2[] = {
+ 0x0450, 0x0000, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432,
+ 0xC44F, 0x6250, 0xC46F, 0x6250, 0xC456, 0x0C14, 0xC476, 0x0C14,
+ 0xC44C, 0x1C85, 0xC44C, 0x1885, 0xC44C, 0x1C85, 0xC46C, 0x1C85,
+ 0xC46C, 0x1885, 0xC46C, 0x1C85, 0xC44C, 0x0885, 0xC44C, 0x0881,
+ 0xC44C, 0x0885, 0xC46C, 0x0885, 0xC46C, 0x0881, 0xC46C, 0x0885,
+ 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001,
+ 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6E, 0x0320,
+ 0xBE77, 0x2940, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120,
+ 0x8000, 0x0001, 0xBE15, 0x1007, 0x8000, 0x0000, 0xBE15, 0x1007,
+ 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160, 0xBE10, 0x8140,
+ 0xBE00, 0x1340, 0x0F51, 0x0010,
+};
+
+/* Appears in a DDWRT code dump */
+static const u16 rtl8366rb_init_jam_ver_3[] = {
+ 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432,
+ 0x0F51, 0x0017, 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0,
+ 0xC456, 0x0C14, 0xC476, 0x0C14, 0xC454, 0x3F8B, 0xC474, 0x3F8B,
+ 0xC450, 0x2071, 0xC470, 0x2071, 0xC451, 0x226B, 0xC471, 0x226B,
+ 0xC452, 0xA293, 0xC472, 0xA293, 0xC44C, 0x1585, 0xC44C, 0x1185,
+ 0xC44C, 0x1585, 0xC46C, 0x1585, 0xC46C, 0x1185, 0xC46C, 0x1585,
+ 0xC44C, 0x0185, 0xC44C, 0x0181, 0xC44C, 0x0185, 0xC46C, 0x0185,
+ 0xC46C, 0x0181, 0xC46C, 0x0185, 0xBE24, 0xB000, 0xBE23, 0xFF51,
+ 0xBE22, 0xDF20, 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800,
+ 0xBE24, 0x0000, 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60,
+ 0xBE21, 0x0140, 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000,
+ 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001,
+ 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6B, 0x0320,
+ 0xBE77, 0x2800, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120,
+ 0x8000, 0x0001, 0xBE10, 0x8140, 0x8000, 0x0000, 0xBE10, 0x8140,
+ 0xBE15, 0x1007, 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160,
+ 0xBE10, 0x8140, 0xBE00, 0x1340, 0x0450, 0x0000, 0x0401, 0x0000,
+};
+
+/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */
+static const u16 rtl8366rb_init_jam_f5d8235[] = {
+ 0x0242, 0x02BF, 0x0245, 0x02BF, 0x0248, 0x02BF, 0x024B, 0x02BF,
+ 0x024E, 0x02BF, 0x0251, 0x02BF, 0x0254, 0x0A3F, 0x0256, 0x0A3F,
+ 0x0258, 0x0A3F, 0x025A, 0x0A3F, 0x025C, 0x0A3F, 0x025E, 0x0A3F,
+ 0x0263, 0x007C, 0x0100, 0x0004, 0xBE5B, 0x3500, 0x800E, 0x200F,
+ 0xBE1D, 0x0F00, 0x8001, 0x5011, 0x800A, 0xA2F4, 0x800B, 0x17A3,
+ 0xBE4B, 0x17A3, 0xBE41, 0x5011, 0xBE17, 0x2100, 0x8000, 0x8304,
+ 0xBE40, 0x8304, 0xBE4A, 0xA2F4, 0x800C, 0xA8D5, 0x8014, 0x5500,
+ 0x8015, 0x0004, 0xBE4C, 0xA8D5, 0xBE59, 0x0008, 0xBE09, 0x0E00,
+ 0xBE36, 0x1036, 0xBE37, 0x1036, 0x800D, 0x00FF, 0xBE4D, 0x00FF,
+};
+
+/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */
+static const u16 rtl8366rb_init_jam_dgn3500[] = {
+ 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0F51, 0x0017,
+ 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, 0x0450, 0x0000,
+ 0x0401, 0x0000, 0x0431, 0x0960,
+};
+
+/* This jam table activates "green ethernet", which means low power mode
+ * and is claimed to detect the cable length and not use more power than
+ * necessary, and the ports should enter power saving mode 10 seconds after
+ * a cable is disconnected. Seems to always be the same.
+ */
+static const u16 rtl8366rb_green_jam[][2] = {
+ {0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7},
+ {0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C},
+ {0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C},
+};
+
+static int rtl8366rb_setup(struct dsa_switch *ds)
+{
+ struct realtek_smi *smi = ds->priv;
+ const u16 *jam_table;
+ u32 chip_ver = 0;
+ u32 chip_id = 0;
+ int jam_size;
+ u32 val;
+ int ret;
+ int i;
+
+ ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
+ if (ret) {
+ dev_err(smi->dev, "unable to read chip id\n");
+ return ret;
+ }
+
+ switch (chip_id) {
+ case RTL8366RB_CHIP_ID_8366:
+ break;
+ default:
+ dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
+ &chip_ver);
+ if (ret) {
+ dev_err(smi->dev, "unable to read chip version\n");
+ return ret;
+ }
+
+ dev_info(smi->dev, "RTL%04x ver %u chip found\n",
+ chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
+
+ /* Do the init dance using the right jam table */
+ switch (chip_ver) {
+ case 0:
+ jam_table = rtl8366rb_init_jam_ver_0;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_0);
+ break;
+ case 1:
+ jam_table = rtl8366rb_init_jam_ver_1;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_1);
+ break;
+ case 2:
+ jam_table = rtl8366rb_init_jam_ver_2;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_2);
+ break;
+ default:
+ jam_table = rtl8366rb_init_jam_ver_3;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_3);
+ break;
+ }
+
+ /* Special jam tables for special routers
+ * TODO: are these necessary? Maintainers, please test
+ * without them, using just the off-the-shelf tables.
+ */
+ if (of_machine_is_compatible("belkin,f5d8235-v1")) {
+ jam_table = rtl8366rb_init_jam_f5d8235;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_f5d8235);
+ }
+ if (of_machine_is_compatible("netgear,dgn3500") ||
+ of_machine_is_compatible("netgear,dgn3500b")) {
+ jam_table = rtl8366rb_init_jam_dgn3500;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
+ }
+
+ i = 0;
+ while (i < jam_size) {
+ if ((jam_table[i] & 0xBE00) == 0xBE00) {
+ ret = regmap_read(smi->map,
+ RTL8366RB_PHY_ACCESS_BUSY_REG,
+ &val);
+ if (ret)
+ return ret;
+ if (!(val & RTL8366RB_PHY_INT_BUSY)) {
+ ret = regmap_write(smi->map,
+ RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
+ if (ret)
+ return ret;
+ }
+ }
+ dev_dbg(smi->dev, "jam %04x into register %04x\n",
+ jam_table[i + 1],
+ jam_table[i]);
+ ret = regmap_write(smi->map,
+ jam_table[i],
+ jam_table[i + 1]);
+ if (ret)
+ return ret;
+ i += 2;
+ }
+
+ /* Set up the "green ethernet" feature */
+ i = 0;
+ while (i < ARRAY_SIZE(rtl8366rb_green_jam)) {
+ ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_BUSY_REG,
+ &val);
+ if (ret)
+ return ret;
+ if (!(val & RTL8366RB_PHY_INT_BUSY)) {
+ ret = regmap_write(smi->map,
+ RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
+ if (ret)
+ return ret;
+ ret = regmap_write(smi->map,
+ rtl8366rb_green_jam[i][0],
+ rtl8366rb_green_jam[i][1]);
+ if (ret)
+ return ret;
+ i++;
+ }
+ }
+ ret = regmap_write(smi->map,
+ RTL8366RB_GREEN_FEATURE_REG,
+ (chip_ver == 1) ? 0x0007 : 0x0003);
+ if (ret)
+ return ret;
+
+ /* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
+ ret = regmap_write(smi->map, 0x0c, 0x240);
+ if (ret)
+ return ret;
+ ret = regmap_write(smi->map, 0x0d, 0x240);
+ if (ret)
+ return ret;
+
+ /* Set some random MAC address */
+ ret = rtl8366rb_set_addr(smi);
+ if (ret)
+ return ret;
+
+ /* Enable CPU port and enable inserting CPU tag
+ *
+ * Disabling RTL8368RB_CPU_INSTAG here will change the behaviour
+ * of the switch totally and it will start talking Realtek RRCP
+ * internally. It is probably possible to experiment with this,
+ * but then the kernel needs to understand and handle RRCP first.
+ */
+ ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG,
+ 0xFFFF,
+ RTL8368RB_CPU_INSTAG | BIT(smi->cpu_port));
+ if (ret)
+ return ret;
+
+ /* Make sure we default-enable the fixed CPU port */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PECR,
+ BIT(smi->cpu_port),
+ 0);
+ if (ret)
+ return ret;
+
+ /* Set maximum packet length to 1536 bytes */
+ ret = regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ RTL8366RB_SGCR_MAX_LENGTH_MASK,
+ RTL8366RB_SGCR_MAX_LENGTH_1536);
+ if (ret)
+ return ret;
+
+ /* Enable learning for all ports */
+ ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0);
+ if (ret)
+ return ret;
+
+ /* Enable auto ageing for all ports */
+ ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0);
+ if (ret)
+ return ret;
+
+ /* Port 4 setup: this enables Port 4, usually the WAN port,
+ * common PHY IO mode is apparently mode 0, and this is not what
+ * the port is initialized to. There is no explanation of the
+ * IO modes in the Realtek source code, if your WAN port is
+ * connected to something exotic such as fiber, then this might
+ * be worth experimenting with.
+ */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PMC0,
+ RTL8366RB_PMC0_P4_IOMODE_MASK,
+ 0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT);
+ if (ret)
+ return ret;
+
+ /* Discard VLAN tagged packets if the port is not a member of
+ * the VLAN with which the packets is associated.
+ */
+ ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ RTL8366RB_PORT_ALL);
+ if (ret)
+ return ret;
+
+ /* Don't drop packets whose DA has not been learned */
+ ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2,
+ RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
+ if (ret)
+ return ret;
+
+ /* Set blinking, TODO: make this configurable */
+ ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG,
+ RTL8366RB_LED_BLINKRATE_MASK,
+ RTL8366RB_LED_BLINKRATE_56MS);
+ if (ret)
+ return ret;
+
+ /* Set up LED activity:
+ * Each port has 4 LEDs, we configure all ports to the same
+ * behaviour (no individual config) but we can set up each
+ * LED separately.
+ */
+ if (smi->leds_disabled) {
+ /* Turn everything off */
+ regmap_update_bits(smi->map,
+ RTL8366RB_LED_0_1_CTRL_REG,
+ 0x0FFF, 0);
+ regmap_update_bits(smi->map,
+ RTL8366RB_LED_2_3_CTRL_REG,
+ 0x0FFF, 0);
+ regmap_update_bits(smi->map,
+ RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_P4_RGMII_LED,
+ 0);
+ val = RTL8366RB_LED_OFF;
+ } else {
+ /* TODO: make this configurable per LED */
+ val = RTL8366RB_LED_FORCE;
+ }
+ for (i = 0; i < 4; i++) {
+ ret = regmap_update_bits(smi->map,
+ RTL8366RB_LED_CTRL_REG,
+ 0xf << (i * 4),
+ val << (i * 4));
+ if (ret)
+ return ret;
+ }
+
+ ret = rtl8366_init_vlan(smi);
+ if (ret)
+ return ret;
+
+ ret = rtl8366rb_setup_cascaded_irq(smi);
+ if (ret)
+ dev_info(smi->dev, "no interrupt support\n");
+
+ ret = realtek_smi_setup_mdio(smi);
+ if (ret) {
+ dev_info(smi->dev, "could not set up MDIO bus\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
+ int port)
+{
+ /* For now, the RTL switches are handled without any custom tags.
+ *
+ * It is possible to turn on "custom tags" by removing the
+ * RTL8368RB_CPU_INSTAG flag when enabling the port but what it
+ * does is unfamiliar to DSA: ethernet frames of type 8899, the Realtek
+ * Remote Control Protocol (RRCP) start to appear on the CPU port of
+ * the device. So this is not the ordinary few extra bytes in the
+ * frame. Instead it appears that the switch starts to talk Realtek
+ * RRCP internally which means a pretty complex RRCP implementation
+ * decoding and responding the RRCP protocol is needed to exploit this.
+ *
+ * The OpenRRCP project (dormant since 2009) have reverse-egineered
+ * parts of the protocol.
+ */
+ return DSA_TAG_PROTO_NONE;
+}
+
+static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (port != smi->cpu_port)
+ return;
+
+ dev_info(smi->dev, "adjust link on CPU port (%d)\n", port);
+
+ /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
+ ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
+ BIT(port), BIT(port));
+ if (ret)
+ return;
+
+ ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
+ 0xFF00U,
+ RTL8366RB_PAACR_CPU_PORT << 8);
+ if (ret)
+ return;
+
+ /* Enable the CPU port */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ 0);
+ if (ret)
+ return;
+}
+
+static void rb8366rb_set_port_led(struct realtek_smi *smi,
+ int port, bool enable)
+{
+ u16 val = enable ? 0x3f : 0;
+ int ret;
+
+ if (smi->leds_disabled)
+ return;
+
+ switch (port) {
+ case 0:
+ ret = regmap_update_bits(smi->map,
+ RTL8366RB_LED_0_1_CTRL_REG,
+ 0x3F, val);
+ break;
+ case 1:
+ ret = regmap_update_bits(smi->map,
+ RTL8366RB_LED_0_1_CTRL_REG,
+ 0x3F << RTL8366RB_LED_1_OFFSET,
+ val << RTL8366RB_LED_1_OFFSET);
+ break;
+ case 2:
+ ret = regmap_update_bits(smi->map,
+ RTL8366RB_LED_2_3_CTRL_REG,
+ 0x3F, val);
+ break;
+ case 3:
+ ret = regmap_update_bits(smi->map,
+ RTL8366RB_LED_2_3_CTRL_REG,
+ 0x3F << RTL8366RB_LED_3_OFFSET,
+ val << RTL8366RB_LED_3_OFFSET);
+ break;
+ case 4:
+ ret = regmap_update_bits(smi->map,
+ RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_P4_RGMII_LED,
+ enable ? RTL8366RB_P4_RGMII_LED : 0);
+ break;
+ default:
+ dev_err(smi->dev, "no LED for port %d\n", port);
+ return;
+ }
+ if (ret)
+ dev_err(smi->dev, "error updating LED on port %d\n", port);
+}
+
+static int
+rtl8366rb_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ dev_dbg(smi->dev, "enable port %d\n", port);
+ ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ 0);
+ if (ret)
+ return ret;
+
+ rb8366rb_set_port_led(smi, port, true);
+ return 0;
+}
+
+static void
+rtl8366rb_port_disable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ dev_dbg(smi->dev, "disable port %d\n", port);
+ ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ BIT(port));
+ if (ret)
+ return;
+
+ rb8366rb_set_port_led(smi, port, false);
+}
+
+static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
+ struct rtl8366_vlan_4k *vlan4k)
+{
+ u32 data[3];
+ int ret;
+ int i;
+
+ memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k));
+
+ if (vid >= RTL8366RB_NUM_VIDS)
+ return -EINVAL;
+
+ /* write VID */
+ ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
+ vid & RTL8366RB_VLAN_VID_MASK);
+ if (ret)
+ return ret;
+
+ /* write table access control word */
+ ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ RTL8366RB_TABLE_VLAN_READ_CTRL);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_read(smi->map,
+ RTL8366RB_VLAN_TABLE_READ_BASE + i,
+ &data[i]);
+ if (ret)
+ return ret;
+ }
+
+ vlan4k->vid = vid;
+ vlan4k->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
+ RTL8366RB_VLAN_UNTAG_MASK;
+ vlan4k->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
+ vlan4k->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
+
+ return 0;
+}
+
+static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
+ const struct rtl8366_vlan_4k *vlan4k)
+{
+ u32 data[3];
+ int ret;
+ int i;
+
+ if (vlan4k->vid >= RTL8366RB_NUM_VIDS ||
+ vlan4k->member > RTL8366RB_VLAN_MEMBER_MASK ||
+ vlan4k->untag > RTL8366RB_VLAN_UNTAG_MASK ||
+ vlan4k->fid > RTL8366RB_FIDMAX)
+ return -EINVAL;
+
+ data[0] = vlan4k->vid & RTL8366RB_VLAN_VID_MASK;
+ data[1] = (vlan4k->member & RTL8366RB_VLAN_MEMBER_MASK) |
+ ((vlan4k->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
+ RTL8366RB_VLAN_UNTAG_SHIFT);
+ data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(smi->map,
+ RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
+ data[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* write table access control word */
+ ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ RTL8366RB_TABLE_VLAN_WRITE_CTRL);
+
+ return ret;
+}
+
+static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
+ struct rtl8366_vlan_mc *vlanmc)
+{
+ u32 data[3];
+ int ret;
+ int i;
+
+ memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc));
+
+ if (index >= RTL8366RB_NUM_VLANS)
+ return -EINVAL;
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_read(smi->map,
+ RTL8366RB_VLAN_MC_BASE(index) + i,
+ &data[i]);
+ if (ret)
+ return ret;
+ }
+
+ vlanmc->vid = data[0] & RTL8366RB_VLAN_VID_MASK;
+ vlanmc->priority = (data[0] >> RTL8366RB_VLAN_PRIORITY_SHIFT) &
+ RTL8366RB_VLAN_PRIORITY_MASK;
+ vlanmc->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
+ RTL8366RB_VLAN_UNTAG_MASK;
+ vlanmc->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
+ vlanmc->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
+
+ return 0;
+}
+
+static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
+ const struct rtl8366_vlan_mc *vlanmc)
+{
+ u32 data[3];
+ int ret;
+ int i;
+
+ if (index >= RTL8366RB_NUM_VLANS ||
+ vlanmc->vid >= RTL8366RB_NUM_VIDS ||
+ vlanmc->priority > RTL8366RB_PRIORITYMAX ||
+ vlanmc->member > RTL8366RB_VLAN_MEMBER_MASK ||
+ vlanmc->untag > RTL8366RB_VLAN_UNTAG_MASK ||
+ vlanmc->fid > RTL8366RB_FIDMAX)
+ return -EINVAL;
+
+ data[0] = (vlanmc->vid & RTL8366RB_VLAN_VID_MASK) |
+ ((vlanmc->priority & RTL8366RB_VLAN_PRIORITY_MASK) <<
+ RTL8366RB_VLAN_PRIORITY_SHIFT);
+ data[1] = (vlanmc->member & RTL8366RB_VLAN_MEMBER_MASK) |
+ ((vlanmc->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
+ RTL8366RB_VLAN_UNTAG_SHIFT);
+ data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(smi->map,
+ RTL8366RB_VLAN_MC_BASE(index) + i,
+ data[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
+{
+ u32 data;
+ int ret;
+
+ if (port >= smi->num_ports)
+ return -EINVAL;
+
+ ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ &data);
+ if (ret)
+ return ret;
+
+ *val = (data >> RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)) &
+ RTL8366RB_PORT_VLAN_CTRL_MASK;
+
+ return 0;
+}
+
+static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
+{
+ if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
+ return -EINVAL;
+
+ return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ RTL8366RB_PORT_VLAN_CTRL_MASK <<
+ RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
+ (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
+ RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
+}
+
+static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
+{
+ unsigned int max = RTL8366RB_NUM_VLANS;
+
+ if (smi->vlan4k_enabled)
+ max = RTL8366RB_NUM_VIDS - 1;
+
+ if (vlan == 0 || vlan >= max)
+ return false;
+
+ return true;
+}
+
+static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable)
+{
+ dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable");
+ return regmap_update_bits(smi->map,
+ RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
+ enable ? RTL8366RB_SGCR_EN_VLAN : 0);
+}
+
+static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable)
+{
+ dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
+ return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ RTL8366RB_SGCR_EN_VLAN_4KTB,
+ enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
+}
+
+static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+{
+ u32 val;
+ u32 reg;
+ int ret;
+
+ if (phy > RTL8366RB_PHY_NO_MAX)
+ return -EINVAL;
+
+ ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_READ);
+ if (ret)
+ return ret;
+
+ reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
+
+ ret = regmap_write(smi->map, reg, 0);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to write PHY%d reg %04x @ %04x, ret %d\n",
+ phy, regnum, reg, ret);
+ return ret;
+ }
+
+ ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
+ if (ret)
+ return ret;
+
+ dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
+ phy, regnum, reg, val);
+
+ return val;
+}
+
+static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+ u16 val)
+{
+ u32 reg;
+ int ret;
+
+ if (phy > RTL8366RB_PHY_NO_MAX)
+ return -EINVAL;
+
+ ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
+ if (ret)
+ return ret;
+
+ reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
+
+ dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
+ phy, regnum, reg, val);
+
+ ret = regmap_write(smi->map, reg, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8366rb_reset_chip(struct realtek_smi *smi)
+{
+ int timeout = 10;
+ u32 val;
+ int ret;
+
+ realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG,
+ RTL8366RB_CHIP_CTRL_RESET_HW);
+ do {
+ usleep_range(20000, 25000);
+ ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & RTL8366RB_CHIP_CTRL_RESET_HW))
+ break;
+ } while (--timeout);
+
+ if (!timeout) {
+ dev_err(smi->dev, "timeout waiting for the switch to reset\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rtl8366rb_detect(struct realtek_smi *smi)
+{
+ struct device *dev = smi->dev;
+ int ret;
+ u32 val;
+
+ /* Detect device */
+ ret = regmap_read(smi->map, 0x5c, &val);
+ if (ret) {
+ dev_err(dev, "can't get chip ID (%d)\n", ret);
+ return ret;
+ }
+
+ switch (val) {
+ case 0x6027:
+ dev_info(dev, "found an RTL8366S switch\n");
+ dev_err(dev, "this switch is not yet supported, submit patches!\n");
+ return -ENODEV;
+ case 0x5937:
+ dev_info(dev, "found an RTL8366RB switch\n");
+ smi->cpu_port = RTL8366RB_PORT_NUM_CPU;
+ smi->num_ports = RTL8366RB_NUM_PORTS;
+ smi->num_vlan_mc = RTL8366RB_NUM_VLANS;
+ smi->mib_counters = rtl8366rb_mib_counters;
+ smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
+ break;
+ default:
+ dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
+ val);
+ break;
+ }
+
+ ret = rtl8366rb_reset_chip(smi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct dsa_switch_ops rtl8366rb_switch_ops = {
+ .get_tag_protocol = rtl8366_get_tag_protocol,
+ .setup = rtl8366rb_setup,
+ .adjust_link = rtl8366rb_adjust_link,
+ .get_strings = rtl8366_get_strings,
+ .get_ethtool_stats = rtl8366_get_ethtool_stats,
+ .get_sset_count = rtl8366_get_sset_count,
+ .port_vlan_filtering = rtl8366_vlan_filtering,
+ .port_vlan_prepare = rtl8366_vlan_prepare,
+ .port_vlan_add = rtl8366_vlan_add,
+ .port_vlan_del = rtl8366_vlan_del,
+ .port_enable = rtl8366rb_port_enable,
+ .port_disable = rtl8366rb_port_disable,
+};
+
+static const struct realtek_smi_ops rtl8366rb_smi_ops = {
+ .detect = rtl8366rb_detect,
+ .get_vlan_mc = rtl8366rb_get_vlan_mc,
+ .set_vlan_mc = rtl8366rb_set_vlan_mc,
+ .get_vlan_4k = rtl8366rb_get_vlan_4k,
+ .set_vlan_4k = rtl8366rb_set_vlan_4k,
+ .get_mc_index = rtl8366rb_get_mc_index,
+ .set_mc_index = rtl8366rb_set_mc_index,
+ .get_mib_counter = rtl8366rb_get_mib_counter,
+ .is_vlan_valid = rtl8366rb_is_vlan_valid,
+ .enable_vlan = rtl8366rb_enable_vlan,
+ .enable_vlan4k = rtl8366rb_enable_vlan4k,
+ .phy_read = rtl8366rb_phy_read,
+ .phy_write = rtl8366rb_phy_write,
+};
+
+const struct realtek_smi_variant rtl8366rb_variant = {
+ .ds_ops = &rtl8366rb_switch_ops,
+ .ops = &rtl8366rb_smi_ops,
+ .clk_delay = 10,
+ .cmd_read = 0xa9,
+ .cmd_write = 0xa8,
+};
+EXPORT_SYMBOL_GPL(rtl8366rb_variant);
diff --git a/drivers/net/dsa/vitesse-vsc73xx.c b/drivers/net/dsa/vitesse-vsc73xx.c
new file mode 100644
index 000000000000..9f1b5f2e8a64
--- /dev/null
+++ b/drivers/net/dsa/vitesse-vsc73xx.c
@@ -0,0 +1,1365 @@
+// SPDX-License-Identifier: GPL-2.0
+/* DSA driver for:
+ * Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
+ *
+ * These switches have a built-in 8051 CPU and can download and execute a
+ * firmware in this CPU. They can also be configured to use an external CPU
+ * handling the switch in a memory-mapped manner by connecting to that external
+ * CPU's memory bus.
+ *
+ * This driver (currently) only takes control of the switch chip over SPI and
+ * configures it to route packages around when connected to a CPU port. The
+ * chip has embedded PHYs and VLAN support so we model it using DSA.
+ *
+ * Copyright (C) 2018 Linus Wallej <linus.walleij@linaro.org>
+ * Includes portions of code from the firmware uploader by:
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+#include <linux/etherdevice.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/random.h>
+#include <net/dsa.h>
+
+#define VSC73XX_BLOCK_MAC 0x1 /* Subblocks 0-4, 6 (CPU port) */
+#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
+#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
+#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
+#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
+#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
+#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
+
+#define CPU_PORT 6 /* CPU port */
+
+/* MAC Block registers */
+#define VSC73XX_MAC_CFG 0x00
+#define VSC73XX_MACHDXGAP 0x02
+#define VSC73XX_FCCONF 0x04
+#define VSC73XX_FCMACHI 0x08
+#define VSC73XX_FCMACLO 0x0c
+#define VSC73XX_MAXLEN 0x10
+#define VSC73XX_ADVPORTM 0x19
+#define VSC73XX_TXUPDCFG 0x24
+#define VSC73XX_TXQ_SELECT_CFG 0x28
+#define VSC73XX_RXOCT 0x50
+#define VSC73XX_TXOCT 0x51
+#define VSC73XX_C_RX0 0x52
+#define VSC73XX_C_RX1 0x53
+#define VSC73XX_C_RX2 0x54
+#define VSC73XX_C_TX0 0x55
+#define VSC73XX_C_TX1 0x56
+#define VSC73XX_C_TX2 0x57
+#define VSC73XX_C_CFG 0x58
+#define VSC73XX_CAT_DROP 0x6e
+#define VSC73XX_CAT_PR_MISC_L2 0x6f
+#define VSC73XX_CAT_PR_USR_PRIO 0x75
+#define VSC73XX_Q_MISC_CONF 0xdf
+
+/* MAC_CFG register bits */
+#define VSC73XX_MAC_CFG_WEXC_DIS BIT(31)
+#define VSC73XX_MAC_CFG_PORT_RST BIT(29)
+#define VSC73XX_MAC_CFG_TX_EN BIT(28)
+#define VSC73XX_MAC_CFG_SEED_LOAD BIT(27)
+#define VSC73XX_MAC_CFG_SEED_MASK GENMASK(26, 19)
+#define VSC73XX_MAC_CFG_SEED_OFFSET 19
+#define VSC73XX_MAC_CFG_FDX BIT(18)
+#define VSC73XX_MAC_CFG_GIGA_MODE BIT(17)
+#define VSC73XX_MAC_CFG_RX_EN BIT(16)
+#define VSC73XX_MAC_CFG_VLAN_DBLAWR BIT(15)
+#define VSC73XX_MAC_CFG_VLAN_AWR BIT(14)
+#define VSC73XX_MAC_CFG_100_BASE_T BIT(13) /* Not in manual */
+#define VSC73XX_MAC_CFG_TX_IPG_MASK GENMASK(10, 6)
+#define VSC73XX_MAC_CFG_TX_IPG_OFFSET 6
+#define VSC73XX_MAC_CFG_TX_IPG_1000M (6 << VSC73XX_MAC_CFG_TX_IPG_OFFSET)
+#define VSC73XX_MAC_CFG_TX_IPG_100_10M (17 << VSC73XX_MAC_CFG_TX_IPG_OFFSET)
+#define VSC73XX_MAC_CFG_MAC_RX_RST BIT(5)
+#define VSC73XX_MAC_CFG_MAC_TX_RST BIT(4)
+#define VSC73XX_MAC_CFG_CLK_SEL_MASK GENMASK(2, 0)
+#define VSC73XX_MAC_CFG_CLK_SEL_OFFSET 0
+#define VSC73XX_MAC_CFG_CLK_SEL_1000M 1
+#define VSC73XX_MAC_CFG_CLK_SEL_100M 2
+#define VSC73XX_MAC_CFG_CLK_SEL_10M 3
+#define VSC73XX_MAC_CFG_CLK_SEL_EXT 4
+
+#define VSC73XX_MAC_CFG_1000M_F_PHY (VSC73XX_MAC_CFG_FDX | \
+ VSC73XX_MAC_CFG_GIGA_MODE | \
+ VSC73XX_MAC_CFG_TX_IPG_1000M | \
+ VSC73XX_MAC_CFG_CLK_SEL_EXT)
+#define VSC73XX_MAC_CFG_100_10M_F_PHY (VSC73XX_MAC_CFG_FDX | \
+ VSC73XX_MAC_CFG_TX_IPG_100_10M | \
+ VSC73XX_MAC_CFG_CLK_SEL_EXT)
+#define VSC73XX_MAC_CFG_100_10M_H_PHY (VSC73XX_MAC_CFG_TX_IPG_100_10M | \
+ VSC73XX_MAC_CFG_CLK_SEL_EXT)
+#define VSC73XX_MAC_CFG_1000M_F_RGMII (VSC73XX_MAC_CFG_FDX | \
+ VSC73XX_MAC_CFG_GIGA_MODE | \
+ VSC73XX_MAC_CFG_TX_IPG_1000M | \
+ VSC73XX_MAC_CFG_CLK_SEL_1000M)
+#define VSC73XX_MAC_CFG_RESET (VSC73XX_MAC_CFG_PORT_RST | \
+ VSC73XX_MAC_CFG_MAC_RX_RST | \
+ VSC73XX_MAC_CFG_MAC_TX_RST)
+
+/* Flow control register bits */
+#define VSC73XX_FCCONF_ZERO_PAUSE_EN BIT(17)
+#define VSC73XX_FCCONF_FLOW_CTRL_OBEY BIT(16)
+#define VSC73XX_FCCONF_PAUSE_VAL_MASK GENMASK(15, 0)
+
+/* ADVPORTM advanced port setup register bits */
+#define VSC73XX_ADVPORTM_IFG_PPM BIT(7)
+#define VSC73XX_ADVPORTM_EXC_COL_CONT BIT(6)
+#define VSC73XX_ADVPORTM_EXT_PORT BIT(5)
+#define VSC73XX_ADVPORTM_INV_GTX BIT(4)
+#define VSC73XX_ADVPORTM_ENA_GTX BIT(3)
+#define VSC73XX_ADVPORTM_DDR_MODE BIT(2)
+#define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1)
+#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0)
+
+/* CAT_DROP categorizer frame dropping register bits */
+#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6)
+#define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4)
+#define VSC73XX_CAT_DROP_FWD_PAUSE_ENA BIT(3)
+#define VSC73XX_CAT_DROP_UNTAGGED_ENA BIT(2)
+#define VSC73XX_CAT_DROP_TAGGED_ENA BIT(1)
+#define VSC73XX_CAT_DROP_NULL_MAC_ENA BIT(0)
+
+#define VSC73XX_Q_MISC_CONF_EXTENT_MEM BIT(31)
+#define VSC73XX_Q_MISC_CONF_EARLY_TX_MASK GENMASK(4, 1)
+#define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1)
+#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0)
+
+/* Frame analyzer block 2 registers */
+#define VSC73XX_STORMLIMIT 0x02
+#define VSC73XX_ADVLEARN 0x03
+#define VSC73XX_IFLODMSK 0x04
+#define VSC73XX_VLANMASK 0x05
+#define VSC73XX_MACHDATA 0x06
+#define VSC73XX_MACLDATA 0x07
+#define VSC73XX_ANMOVED 0x08
+#define VSC73XX_ANAGEFIL 0x09
+#define VSC73XX_ANEVENTS 0x0a
+#define VSC73XX_ANCNTMASK 0x0b
+#define VSC73XX_ANCNTVAL 0x0c
+#define VSC73XX_LEARNMASK 0x0d
+#define VSC73XX_UFLODMASK 0x0e
+#define VSC73XX_MFLODMASK 0x0f
+#define VSC73XX_RECVMASK 0x10
+#define VSC73XX_AGGRCTRL 0x20
+#define VSC73XX_AGGRMSKS 0x30 /* Until 0x3f */
+#define VSC73XX_DSTMASKS 0x40 /* Until 0x7f */
+#define VSC73XX_SRCMASKS 0x80 /* Until 0x87 */
+#define VSC73XX_CAPENAB 0xa0
+#define VSC73XX_MACACCESS 0xb0
+#define VSC73XX_IPMCACCESS 0xb1
+#define VSC73XX_MACTINDX 0xc0
+#define VSC73XX_VLANACCESS 0xd0
+#define VSC73XX_VLANTIDX 0xe0
+#define VSC73XX_AGENCTRL 0xf0
+#define VSC73XX_CAPRST 0xff
+
+#define VSC73XX_MACACCESS_CPU_COPY BIT(14)
+#define VSC73XX_MACACCESS_FWD_KILL BIT(13)
+#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12)
+#define VSC73XX_MACACCESS_AGED_FLAG BIT(11)
+#define VSC73XX_MACACCESS_VALID BIT(10)
+#define VSC73XX_MACACCESS_LOCKED BIT(9)
+#define VSC73XX_MACACCESS_DEST_IDX_MASK GENMASK(8, 3)
+#define VSC73XX_MACACCESS_CMD_MASK GENMASK(2, 0)
+#define VSC73XX_MACACCESS_CMD_IDLE 0
+#define VSC73XX_MACACCESS_CMD_LEARN 1
+#define VSC73XX_MACACCESS_CMD_FORGET 2
+#define VSC73XX_MACACCESS_CMD_AGE_TABLE 3
+#define VSC73XX_MACACCESS_CMD_FLUSH_TABLE 4
+#define VSC73XX_MACACCESS_CMD_CLEAR_TABLE 5
+#define VSC73XX_MACACCESS_CMD_READ_ENTRY 6
+#define VSC73XX_MACACCESS_CMD_WRITE_ENTRY 7
+
+#define VSC73XX_VLANACCESS_LEARN_DISABLED BIT(30)
+#define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29)
+#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28)
+#define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2)
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0)
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3
+
+/* MII block 3 registers */
+#define VSC73XX_MII_STAT 0x0
+#define VSC73XX_MII_CMD 0x1
+#define VSC73XX_MII_DATA 0x2
+
+/* Arbiter block 5 registers */
+#define VSC73XX_ARBEMPTY 0x0c
+#define VSC73XX_ARBDISC 0x0e
+#define VSC73XX_SBACKWDROP 0x12
+#define VSC73XX_DBACKWDROP 0x13
+#define VSC73XX_ARBBURSTPROB 0x15
+
+/* System block 7 registers */
+#define VSC73XX_ICPU_SIPAD 0x01
+#define VSC73XX_GMIIDELAY 0x05
+#define VSC73XX_ICPU_CTRL 0x10
+#define VSC73XX_ICPU_ADDR 0x11
+#define VSC73XX_ICPU_SRAM 0x12
+#define VSC73XX_HWSEM 0x13
+#define VSC73XX_GLORESET 0x14
+#define VSC73XX_ICPU_MBOX_VAL 0x15
+#define VSC73XX_ICPU_MBOX_SET 0x16
+#define VSC73XX_ICPU_MBOX_CLR 0x17
+#define VSC73XX_CHIPID 0x18
+#define VSC73XX_GPIO 0x34
+
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE 0
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS 1
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS 2
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS 3
+
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE (0 << 4)
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS (1 << 4)
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS (2 << 4)
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS (3 << 4)
+
+#define VSC73XX_ICPU_CTRL_WATCHDOG_RST BIT(31)
+#define VSC73XX_ICPU_CTRL_CLK_DIV_MASK GENMASK(12, 8)
+#define VSC73XX_ICPU_CTRL_SRST_HOLD BIT(7)
+#define VSC73XX_ICPU_CTRL_ICPU_PI_EN BIT(6)
+#define VSC73XX_ICPU_CTRL_BOOT_EN BIT(3)
+#define VSC73XX_ICPU_CTRL_EXT_ACC_EN BIT(2)
+#define VSC73XX_ICPU_CTRL_CLK_EN BIT(1)
+#define VSC73XX_ICPU_CTRL_SRST BIT(0)
+
+#define VSC73XX_CHIPID_ID_SHIFT 12
+#define VSC73XX_CHIPID_ID_MASK 0xffff
+#define VSC73XX_CHIPID_REV_SHIFT 28
+#define VSC73XX_CHIPID_REV_MASK 0xf
+#define VSC73XX_CHIPID_ID_7385 0x7385
+#define VSC73XX_CHIPID_ID_7388 0x7388
+#define VSC73XX_CHIPID_ID_7395 0x7395
+#define VSC73XX_CHIPID_ID_7398 0x7398
+
+#define VSC73XX_GLORESET_STROBE BIT(4)
+#define VSC73XX_GLORESET_ICPU_LOCK BIT(3)
+#define VSC73XX_GLORESET_MEM_LOCK BIT(2)
+#define VSC73XX_GLORESET_PHY_RESET BIT(1)
+#define VSC73XX_GLORESET_MASTER_RESET BIT(0)
+
+#define VSC73XX_CMD_MODE_READ 0
+#define VSC73XX_CMD_MODE_WRITE 1
+#define VSC73XX_CMD_MODE_SHIFT 4
+#define VSC73XX_CMD_BLOCK_SHIFT 5
+#define VSC73XX_CMD_BLOCK_MASK 0x7
+#define VSC73XX_CMD_SUBBLOCK_MASK 0xf
+
+#define VSC7385_CLOCK_DELAY ((3 << 4) | 3)
+#define VSC7385_CLOCK_DELAY_MASK ((3 << 4) | 3)
+
+#define VSC73XX_ICPU_CTRL_STOP (VSC73XX_ICPU_CTRL_SRST_HOLD | \
+ VSC73XX_ICPU_CTRL_BOOT_EN | \
+ VSC73XX_ICPU_CTRL_EXT_ACC_EN)
+
+#define VSC73XX_ICPU_CTRL_START (VSC73XX_ICPU_CTRL_CLK_DIV | \
+ VSC73XX_ICPU_CTRL_BOOT_EN | \
+ VSC73XX_ICPU_CTRL_CLK_EN | \
+ VSC73XX_ICPU_CTRL_SRST)
+
+/**
+ * struct vsc73xx - VSC73xx state container
+ */
+struct vsc73xx {
+ struct device *dev;
+ struct gpio_desc *reset;
+ struct spi_device *spi;
+ struct dsa_switch *ds;
+ struct gpio_chip gc;
+ u16 chipid;
+ u8 addr[ETH_ALEN];
+ struct mutex lock; /* Protects SPI traffic */
+};
+
+#define IS_7385(a) ((a)->chipid == VSC73XX_CHIPID_ID_7385)
+#define IS_7388(a) ((a)->chipid == VSC73XX_CHIPID_ID_7388)
+#define IS_7395(a) ((a)->chipid == VSC73XX_CHIPID_ID_7395)
+#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398)
+#define IS_739X(a) (IS_7395(a) || IS_7398(a))
+
+struct vsc73xx_counter {
+ u8 counter;
+ const char *name;
+};
+
+/* Counters are named according to the MIB standards where applicable.
+ * Some counters are custom, non-standard. The standard counters are
+ * named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex
+ * 30A Counters.
+ */
+static const struct vsc73xx_counter vsc73xx_rx_counters[] = {
+ { 0, "RxEtherStatsPkts" },
+ { 1, "RxBroadcast+MulticastPkts" }, /* non-standard counter */
+ { 2, "RxTotalErrorPackets" }, /* non-standard counter */
+ { 3, "RxEtherStatsBroadcastPkts" },
+ { 4, "RxEtherStatsMulticastPkts" },
+ { 5, "RxEtherStatsPkts64Octets" },
+ { 6, "RxEtherStatsPkts65to127Octets" },
+ { 7, "RxEtherStatsPkts128to255Octets" },
+ { 8, "RxEtherStatsPkts256to511Octets" },
+ { 9, "RxEtherStatsPkts512to1023Octets" },
+ { 10, "RxEtherStatsPkts1024to1518Octets" },
+ { 11, "RxJumboFrames" }, /* non-standard counter */
+ { 12, "RxaPauseMACControlFramesTransmitted" },
+ { 13, "RxFIFODrops" }, /* non-standard counter */
+ { 14, "RxBackwardDrops" }, /* non-standard counter */
+ { 15, "RxClassifierDrops" }, /* non-standard counter */
+ { 16, "RxEtherStatsCRCAlignErrors" },
+ { 17, "RxEtherStatsUndersizePkts" },
+ { 18, "RxEtherStatsOversizePkts" },
+ { 19, "RxEtherStatsFragments" },
+ { 20, "RxEtherStatsJabbers" },
+ { 21, "RxaMACControlFramesReceived" },
+ /* 22-24 are undefined */
+ { 25, "RxaFramesReceivedOK" },
+ { 26, "RxQoSClass0" }, /* non-standard counter */
+ { 27, "RxQoSClass1" }, /* non-standard counter */
+ { 28, "RxQoSClass2" }, /* non-standard counter */
+ { 29, "RxQoSClass3" }, /* non-standard counter */
+};
+
+static const struct vsc73xx_counter vsc73xx_tx_counters[] = {
+ { 0, "TxEtherStatsPkts" },
+ { 1, "TxBroadcast+MulticastPkts" }, /* non-standard counter */
+ { 2, "TxTotalErrorPackets" }, /* non-standard counter */
+ { 3, "TxEtherStatsBroadcastPkts" },
+ { 4, "TxEtherStatsMulticastPkts" },
+ { 5, "TxEtherStatsPkts64Octets" },
+ { 6, "TxEtherStatsPkts65to127Octets" },
+ { 7, "TxEtherStatsPkts128to255Octets" },
+ { 8, "TxEtherStatsPkts256to511Octets" },
+ { 9, "TxEtherStatsPkts512to1023Octets" },
+ { 10, "TxEtherStatsPkts1024to1518Octets" },
+ { 11, "TxJumboFrames" }, /* non-standard counter */
+ { 12, "TxaPauseMACControlFramesTransmitted" },
+ { 13, "TxFIFODrops" }, /* non-standard counter */
+ { 14, "TxDrops" }, /* non-standard counter */
+ { 15, "TxEtherStatsCollisions" },
+ { 16, "TxEtherStatsCRCAlignErrors" },
+ { 17, "TxEtherStatsUndersizePkts" },
+ { 18, "TxEtherStatsOversizePkts" },
+ { 19, "TxEtherStatsFragments" },
+ { 20, "TxEtherStatsJabbers" },
+ /* 21-24 are undefined */
+ { 25, "TxaFramesReceivedOK" },
+ { 26, "TxQoSClass0" }, /* non-standard counter */
+ { 27, "TxQoSClass1" }, /* non-standard counter */
+ { 28, "TxQoSClass2" }, /* non-standard counter */
+ { 29, "TxQoSClass3" }, /* non-standard counter */
+};
+
+static int vsc73xx_is_addr_valid(u8 block, u8 subblock)
+{
+ switch (block) {
+ case VSC73XX_BLOCK_MAC:
+ switch (subblock) {
+ case 0 ... 4:
+ case 6:
+ return 1;
+ }
+ break;
+
+ case VSC73XX_BLOCK_ANALYZER:
+ case VSC73XX_BLOCK_SYSTEM:
+ switch (subblock) {
+ case 0:
+ return 1;
+ }
+ break;
+
+ case VSC73XX_BLOCK_MII:
+ case VSC73XX_BLOCK_CAPTURE:
+ case VSC73XX_BLOCK_ARBITER:
+ switch (subblock) {
+ case 0 ... 1:
+ return 1;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static u8 vsc73xx_make_addr(u8 mode, u8 block, u8 subblock)
+{
+ u8 ret;
+
+ ret = (block & VSC73XX_CMD_BLOCK_MASK) << VSC73XX_CMD_BLOCK_SHIFT;
+ ret |= (mode & 1) << VSC73XX_CMD_MODE_SHIFT;
+ ret |= subblock & VSC73XX_CMD_SUBBLOCK_MASK;
+
+ return ret;
+}
+
+static int vsc73xx_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 *val)
+{
+ struct spi_transfer t[2];
+ struct spi_message m;
+ u8 cmd[4];
+ u8 buf[4];
+ int ret;
+
+ if (!vsc73xx_is_addr_valid(block, subblock))
+ return -EINVAL;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = sizeof(buf);
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_READ, block, subblock);
+ cmd[1] = reg;
+ cmd[2] = 0;
+ cmd[3] = 0;
+
+ mutex_lock(&vsc->lock);
+ ret = spi_sync(vsc->spi, &m);
+ mutex_unlock(&vsc->lock);
+
+ if (ret)
+ return ret;
+
+ *val = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
+
+ return 0;
+}
+
+static int vsc73xx_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 val)
+{
+ struct spi_transfer t[2];
+ struct spi_message m;
+ u8 cmd[2];
+ u8 buf[4];
+ int ret;
+
+ if (!vsc73xx_is_addr_valid(block, subblock))
+ return -EINVAL;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = sizeof(buf);
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_WRITE, block, subblock);
+ cmd[1] = reg;
+
+ buf[0] = (val >> 24) & 0xff;
+ buf[1] = (val >> 16) & 0xff;
+ buf[2] = (val >> 8) & 0xff;
+ buf[3] = val & 0xff;
+
+ mutex_lock(&vsc->lock);
+ ret = spi_sync(vsc->spi, &m);
+ mutex_unlock(&vsc->lock);
+
+ return ret;
+}
+
+static int vsc73xx_update_bits(struct vsc73xx *vsc, u8 block, u8 subblock,
+ u8 reg, u32 mask, u32 val)
+{
+ u32 tmp, orig;
+ int ret;
+
+ /* Same read-modify-write algorithm as e.g. regmap */
+ ret = vsc73xx_read(vsc, block, subblock, reg, &orig);
+ if (ret)
+ return ret;
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ return vsc73xx_write(vsc, block, subblock, reg, tmp);
+}
+
+static int vsc73xx_detect(struct vsc73xx *vsc)
+{
+ bool icpu_si_boot_en;
+ bool icpu_pi_en;
+ u32 val;
+ u32 rev;
+ int ret;
+ u32 id;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_ICPU_MBOX_VAL, &val);
+ if (ret) {
+ dev_err(vsc->dev, "unable to read mailbox (%d)\n", ret);
+ return ret;
+ }
+
+ if (val == 0xffffffff) {
+ dev_info(vsc->dev, "chip seems dead, assert reset\n");
+ gpiod_set_value_cansleep(vsc->reset, 1);
+ /* Reset pulse should be 20ns minimum, according to datasheet
+ * table 245, so 10us should be fine
+ */
+ usleep_range(10, 100);
+ gpiod_set_value_cansleep(vsc->reset, 0);
+ /* Wait 20ms according to datasheet table 245 */
+ msleep(20);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_ICPU_MBOX_VAL, &val);
+ if (val == 0xffffffff) {
+ dev_err(vsc->dev, "seems not to help, giving up\n");
+ return -ENODEV;
+ }
+ }
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_CHIPID, &val);
+ if (ret) {
+ dev_err(vsc->dev, "unable to read chip id (%d)\n", ret);
+ return ret;
+ }
+
+ id = (val >> VSC73XX_CHIPID_ID_SHIFT) &
+ VSC73XX_CHIPID_ID_MASK;
+ switch (id) {
+ case VSC73XX_CHIPID_ID_7385:
+ case VSC73XX_CHIPID_ID_7388:
+ case VSC73XX_CHIPID_ID_7395:
+ case VSC73XX_CHIPID_ID_7398:
+ break;
+ default:
+ dev_err(vsc->dev, "unsupported chip, id=%04x\n", id);
+ return -ENODEV;
+ }
+
+ vsc->chipid = id;
+ rev = (val >> VSC73XX_CHIPID_REV_SHIFT) &
+ VSC73XX_CHIPID_REV_MASK;
+ dev_info(vsc->dev, "VSC%04X (rev: %d) switch found\n", id, rev);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_ICPU_CTRL, &val);
+ if (ret) {
+ dev_err(vsc->dev, "unable to read iCPU control\n");
+ return ret;
+ }
+
+ /* The iCPU can always be used but can boot in different ways.
+ * If it is initially disabled and has no external memory,
+ * we are in control and can do whatever we like, else we
+ * are probably in trouble (we need some way to communicate
+ * with the running firmware) so we bail out for now.
+ */
+ icpu_pi_en = !!(val & VSC73XX_ICPU_CTRL_ICPU_PI_EN);
+ icpu_si_boot_en = !!(val & VSC73XX_ICPU_CTRL_BOOT_EN);
+ if (icpu_si_boot_en && icpu_pi_en) {
+ dev_err(vsc->dev,
+ "iCPU enabled boots from SI, has external memory\n");
+ dev_err(vsc->dev, "no idea how to deal with this\n");
+ return -ENODEV;
+ }
+ if (icpu_si_boot_en && !icpu_pi_en) {
+ dev_err(vsc->dev,
+ "iCPU enabled boots from SI, no external memory\n");
+ dev_err(vsc->dev, "no idea how to deal with this\n");
+ return -ENODEV;
+ }
+ if (!icpu_si_boot_en && icpu_pi_en) {
+ dev_err(vsc->dev,
+ "iCPU enabled, boots from PI external memory\n");
+ dev_err(vsc->dev, "no idea how to deal with this\n");
+ return -ENODEV;
+ }
+ /* !icpu_si_boot_en && !cpu_pi_en */
+ dev_info(vsc->dev, "iCPU disabled, no external memory\n");
+
+ return 0;
+}
+
+static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ struct vsc73xx *vsc = ds->priv;
+ u32 cmd;
+ u32 val;
+ int ret;
+
+ /* Setting bit 26 means "read" */
+ cmd = BIT(26) | (phy << 21) | (regnum << 16);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ if (ret)
+ return ret;
+ msleep(2);
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
+ if (ret)
+ return ret;
+ if (val & BIT(16)) {
+ dev_err(vsc->dev, "reading reg %02x from phy%d failed\n",
+ regnum, phy);
+ return -EIO;
+ }
+ val &= 0xFFFFU;
+
+ dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n",
+ regnum, phy, val);
+
+ return val;
+}
+
+static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ struct vsc73xx *vsc = ds->priv;
+ u32 cmd;
+ int ret;
+
+ /* It was found through tedious experiments that this router
+ * chip really hates to have it's PHYs reset. They
+ * never recover if that happens: autonegotiation stops
+ * working after a reset. Just filter out this command.
+ * (Resetting the whole chip is OK.)
+ */
+ if (regnum == 0 && (val & BIT(15))) {
+ dev_info(vsc->dev, "reset PHY - disallowed\n");
+ return 0;
+ }
+
+ cmd = (phy << 21) | (regnum << 16);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ if (ret)
+ return ret;
+
+ dev_dbg(vsc->dev, "write %04x to reg %02x in phy%d\n",
+ val, regnum, phy);
+ return 0;
+}
+
+static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
+ int port)
+{
+ /* The switch internally uses a 8 byte header with length,
+ * source port, tag, LPA and priority. This is supposedly
+ * only accessible when operating the switch using the internal
+ * CPU or with an external CPU mapping the device in, but not
+ * when operating the switch over SPI and putting frames in/out
+ * on port 6 (the CPU port). So far we must assume that we
+ * cannot access the tag. (See "Internal frame header" section
+ * 3.9.1 in the manual.)
+ */
+ return DSA_TAG_PROTO_NONE;
+}
+
+static int vsc73xx_setup(struct dsa_switch *ds)
+{
+ struct vsc73xx *vsc = ds->priv;
+ int i;
+
+ dev_info(vsc->dev, "set up the switch\n");
+
+ /* Issue RESET */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
+ VSC73XX_GLORESET_MASTER_RESET);
+ usleep_range(125, 200);
+
+ /* Initialize memory, initialize RAM bank 0..15 except 6 and 7
+ * This sequence appears in the
+ * VSC7385 SparX-G5 datasheet section 6.6.1
+ * VSC7395 SparX-G5e datasheet section 6.6.1
+ * "initialization sequence".
+ * No explanation is given to the 0x1010400 magic number.
+ */
+ for (i = 0; i <= 15; i++) {
+ if (i != 6 && i != 7) {
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MEMINIT,
+ 2,
+ 0, 0x1010400 + i);
+ mdelay(1);
+ }
+ }
+ mdelay(30);
+
+ /* Clear MAC table */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS,
+ VSC73XX_MACACCESS_CMD_CLEAR_TABLE);
+
+ /* Clear VLAN table */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_VLANACCESS,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE);
+
+ msleep(40);
+
+ /* Use 20KiB buffers on all ports on VSC7395
+ * The VSC7385 has 16KiB buffers and that is the
+ * default if we don't set this up explicitly.
+ * Port "31" is "all ports".
+ */
+ if (IS_739X(vsc))
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 0x1f,
+ VSC73XX_Q_MISC_CONF,
+ VSC73XX_Q_MISC_CONF_EXTENT_MEM);
+
+ /* Put all ports into reset until enabled */
+ for (i = 0; i < 7; i++) {
+ if (i == 5)
+ continue;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 4,
+ VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
+ }
+
+ /* MII delay, set both GTX and RX delay to 2 ns */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
+ VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
+ VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
+ /* Enable reception of frames on all ports */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK,
+ 0x5f);
+ /* IP multicast flood mask (table 144) */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK,
+ 0xff);
+
+ mdelay(50);
+
+ /* Release reset from the internal PHYs */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
+ VSC73XX_GLORESET_PHY_RESET);
+
+ udelay(4);
+
+ return 0;
+}
+
+static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
+{
+ u32 val;
+
+ /* MAC configure, first reset the port and then write defaults */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET);
+
+ /* Take up the port in 1Gbit mode by default, this will be
+ * augmented after auto-negotiation on the PHY-facing
+ * ports.
+ */
+ if (port == CPU_PORT)
+ val = VSC73XX_MAC_CFG_1000M_F_RGMII;
+ else
+ val = VSC73XX_MAC_CFG_1000M_F_PHY;
+
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_MAC_CFG,
+ val |
+ VSC73XX_MAC_CFG_TX_EN |
+ VSC73XX_MAC_CFG_RX_EN);
+
+ /* Max length, we can do up to 9.6 KiB, so allow that.
+ * According to application not "VSC7398 Jumbo Frames" setting
+ * up the MTU to 9.6 KB does not affect the performance on standard
+ * frames, so just enable it. It is clear from the application note
+ * that "9.6 kilobytes" == 9600 bytes.
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_MAXLEN, 9600);
+
+ /* Flow control for the CPU port:
+ * Use a zero delay pause frame when pause condition is left
+ * Obey pause control frames
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_FCCONF,
+ VSC73XX_FCCONF_ZERO_PAUSE_EN |
+ VSC73XX_FCCONF_FLOW_CTRL_OBEY);
+
+ /* Issue pause control frames on PHY facing ports.
+ * Allow early initiation of MAC transmission if the amount
+ * of egress data is below 512 bytes on CPU port.
+ * FIXME: enable 20KiB buffers?
+ */
+ if (port == CPU_PORT)
+ val = VSC73XX_Q_MISC_CONF_EARLY_TX_512;
+ else
+ val = VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE;
+ val |= VSC73XX_Q_MISC_CONF_EXTENT_MEM;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_Q_MISC_CONF,
+ val);
+
+ /* Flow control MAC: a MAC address used in flow control frames */
+ val = (vsc->addr[5] << 16) | (vsc->addr[4] << 8) | (vsc->addr[3]);
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_FCMACHI,
+ val);
+ val = (vsc->addr[2] << 16) | (vsc->addr[1] << 8) | (vsc->addr[0]);
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_FCMACLO,
+ val);
+
+ /* Tell the categorizer to forward pause frames, not control
+ * frame. Do not drop anything.
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_CAT_DROP,
+ VSC73XX_CAT_DROP_FWD_PAUSE_ENA);
+
+ /* Clear all counters */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port, VSC73XX_C_RX0, 0);
+}
+
+static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
+ int port, struct phy_device *phydev,
+ u32 initval)
+{
+ u32 val = initval;
+ u8 seed;
+
+ /* Reset this port FIXME: break out subroutine */
+ val |= VSC73XX_MAC_CFG_RESET;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
+
+ /* Seed the port randomness with randomness */
+ get_random_bytes(&seed, 1);
+ val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
+ val |= VSC73XX_MAC_CFG_SEED_LOAD;
+ val |= VSC73XX_MAC_CFG_WEXC_DIS;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
+
+ /* Flow control for the PHY facing ports:
+ * Use a zero delay pause frame when pause condition is left
+ * Obey pause control frames
+ * When generating pause frames, use 0xff as pause value
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF,
+ VSC73XX_FCCONF_ZERO_PAUSE_EN |
+ VSC73XX_FCCONF_FLOW_CTRL_OBEY |
+ 0xff);
+
+ /* Disallow backward dropping of frames from this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_SBACKWDROP, BIT(port), 0);
+
+ /* Enable TX, RX, deassert reset, stop loading seed */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD |
+ VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN,
+ VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN);
+}
+
+static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct vsc73xx *vsc = ds->priv;
+ u32 val;
+
+ /* Special handling of the CPU-facing port */
+ if (port == CPU_PORT) {
+ /* Other ports are already initialized but not this one */
+ vsc73xx_init_port(vsc, CPU_PORT);
+ /* Select the external port for this interface (EXT_PORT)
+ * Enable the GMII GTX external clock
+ * Use double data rate (DDR mode)
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ CPU_PORT,
+ VSC73XX_ADVPORTM,
+ VSC73XX_ADVPORTM_EXT_PORT |
+ VSC73XX_ADVPORTM_ENA_GTX |
+ VSC73XX_ADVPORTM_DDR_MODE);
+ }
+
+ /* This is the MAC confiuration that always need to happen
+ * after a PHY or the CPU port comes up or down.
+ */
+ if (!phydev->link) {
+ int maxloop = 10;
+
+ dev_dbg(vsc->dev, "port %d: went down\n",
+ port);
+
+ /* Disable RX on this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RX_EN, 0);
+
+ /* Discard packets */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), BIT(port));
+
+ /* Wait until queue is empty */
+ vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBEMPTY, &val);
+ while (!(val & BIT(port))) {
+ msleep(1);
+ vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBEMPTY, &val);
+ if (--maxloop == 0) {
+ dev_err(vsc->dev,
+ "timeout waiting for block arbiter\n");
+ /* Continue anyway */
+ break;
+ }
+ }
+
+ /* Put this port into reset */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET);
+
+ /* Accept packets again */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), 0);
+
+ /* Allow backward dropping of frames from this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_SBACKWDROP, BIT(port), BIT(port));
+
+ /* Receive mask (disable forwarding) */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_RECVMASK, BIT(port), 0);
+
+ return;
+ }
+
+ /* Figure out what speed was negotiated */
+ if (phydev->speed == SPEED_1000) {
+ dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n",
+ port);
+
+ /* Set up default for internal port or external RGMII */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ val = VSC73XX_MAC_CFG_1000M_F_RGMII;
+ else
+ val = VSC73XX_MAC_CFG_1000M_F_PHY;
+ vsc73xx_adjust_enable_port(vsc, port, phydev, val);
+ } else if (phydev->speed == SPEED_100) {
+ if (phydev->duplex == DUPLEX_FULL) {
+ val = VSC73XX_MAC_CFG_100_10M_F_PHY;
+ dev_dbg(vsc->dev,
+ "port %d: 100 Mbit full duplex mode\n",
+ port);
+ } else {
+ val = VSC73XX_MAC_CFG_100_10M_H_PHY;
+ dev_dbg(vsc->dev,
+ "port %d: 100 Mbit half duplex mode\n",
+ port);
+ }
+ vsc73xx_adjust_enable_port(vsc, port, phydev, val);
+ } else if (phydev->speed == SPEED_10) {
+ if (phydev->duplex == DUPLEX_FULL) {
+ val = VSC73XX_MAC_CFG_100_10M_F_PHY;
+ dev_dbg(vsc->dev,
+ "port %d: 10 Mbit full duplex mode\n",
+ port);
+ } else {
+ val = VSC73XX_MAC_CFG_100_10M_H_PHY;
+ dev_dbg(vsc->dev,
+ "port %d: 10 Mbit half duplex mode\n",
+ port);
+ }
+ vsc73xx_adjust_enable_port(vsc, port, phydev, val);
+ } else {
+ dev_err(vsc->dev,
+ "could not adjust link: unknown speed\n");
+ }
+
+ /* Enable port (forwarding) in the receieve mask */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_RECVMASK, BIT(port), BIT(port));
+}
+
+static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ dev_info(vsc->dev, "enable port %d\n", port);
+ vsc73xx_init_port(vsc, port);
+
+ return 0;
+}
+
+static void vsc73xx_port_disable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ /* Just put the port into reset */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
+}
+
+static const struct vsc73xx_counter *
+vsc73xx_find_counter(struct vsc73xx *vsc,
+ u8 counter,
+ bool tx)
+{
+ const struct vsc73xx_counter *cnts;
+ int num_cnts;
+ int i;
+
+ if (tx) {
+ cnts = vsc73xx_tx_counters;
+ num_cnts = ARRAY_SIZE(vsc73xx_tx_counters);
+ } else {
+ cnts = vsc73xx_rx_counters;
+ num_cnts = ARRAY_SIZE(vsc73xx_rx_counters);
+ }
+
+ for (i = 0; i < num_cnts; i++) {
+ const struct vsc73xx_counter *cnt;
+
+ cnt = &cnts[i];
+ if (cnt->counter == counter)
+ return cnt;
+ }
+
+ return NULL;
+}
+
+static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ const struct vsc73xx_counter *cnt;
+ struct vsc73xx *vsc = ds->priv;
+ u8 indices[6];
+ int i, j;
+ u32 val;
+ int ret;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_C_CFG, &val);
+ if (ret)
+ return;
+
+ indices[0] = (val & 0x1f); /* RX counter 0 */
+ indices[1] = ((val >> 5) & 0x1f); /* RX counter 1 */
+ indices[2] = ((val >> 10) & 0x1f); /* RX counter 2 */
+ indices[3] = ((val >> 16) & 0x1f); /* TX counter 0 */
+ indices[4] = ((val >> 21) & 0x1f); /* TX counter 1 */
+ indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */
+
+ /* The first counters is the RX octets */
+ j = 0;
+ strncpy(data + j * ETH_GSTRING_LEN,
+ "RxEtherStatsOctets", ETH_GSTRING_LEN);
+ j++;
+
+ /* Each port supports recording 3 RX counters and 3 TX counters,
+ * figure out what counters we use in this set-up and return the
+ * names of them. The hardware default counters will be number of
+ * packets on RX/TX, combined broadcast+multicast packets RX/TX and
+ * total error packets RX/TX.
+ */
+ for (i = 0; i < 3; i++) {
+ cnt = vsc73xx_find_counter(vsc, indices[i], false);
+ if (cnt)
+ strncpy(data + j * ETH_GSTRING_LEN,
+ cnt->name, ETH_GSTRING_LEN);
+ j++;
+ }
+
+ /* TX stats begins with the number of TX octets */
+ strncpy(data + j * ETH_GSTRING_LEN,
+ "TxEtherStatsOctets", ETH_GSTRING_LEN);
+ j++;
+
+ for (i = 3; i < 6; i++) {
+ cnt = vsc73xx_find_counter(vsc, indices[i], true);
+ if (cnt)
+ strncpy(data + j * ETH_GSTRING_LEN,
+ cnt->name, ETH_GSTRING_LEN);
+ j++;
+ }
+}
+
+static int vsc73xx_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ /* We only support SS_STATS */
+ if (sset != ETH_SS_STATS)
+ return 0;
+ /* RX and TX packets, then 3 RX counters, 3 TX counters */
+ return 8;
+}
+
+static void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct vsc73xx *vsc = ds->priv;
+ u8 regs[] = {
+ VSC73XX_RXOCT,
+ VSC73XX_C_RX0,
+ VSC73XX_C_RX1,
+ VSC73XX_C_RX2,
+ VSC73XX_TXOCT,
+ VSC73XX_C_TX0,
+ VSC73XX_C_TX1,
+ VSC73XX_C_TX2,
+ };
+ u32 val;
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port,
+ regs[i], &val);
+ if (ret) {
+ dev_err(vsc->dev, "error reading counter %d\n", i);
+ return;
+ }
+ data[i] = val;
+ }
+}
+
+static const struct dsa_switch_ops vsc73xx_ds_ops = {
+ .get_tag_protocol = vsc73xx_get_tag_protocol,
+ .setup = vsc73xx_setup,
+ .phy_read = vsc73xx_phy_read,
+ .phy_write = vsc73xx_phy_write,
+ .adjust_link = vsc73xx_adjust_link,
+ .get_strings = vsc73xx_get_strings,
+ .get_ethtool_stats = vsc73xx_get_ethtool_stats,
+ .get_sset_count = vsc73xx_get_sset_count,
+ .port_enable = vsc73xx_port_enable,
+ .port_disable = vsc73xx_port_disable,
+};
+
+static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+ u32 val;
+ int ret;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(offset));
+}
+
+static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+ u32 tmp = val ? BIT(offset) : 0;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset), tmp);
+}
+
+static int vsc73xx_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+ u32 tmp = val ? BIT(offset) : 0;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset + 4) | BIT(offset),
+ BIT(offset + 4) | tmp);
+}
+
+static int vsc73xx_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset + 4),
+ 0);
+}
+
+static int vsc73xx_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+ u32 val;
+ int ret;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, &val);
+ if (ret)
+ return ret;
+
+ return !(val & BIT(offset + 4));
+}
+
+static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
+{
+ int ret;
+
+ vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
+ vsc->chipid);
+ vsc->gc.ngpio = 4;
+ vsc->gc.owner = THIS_MODULE;
+ vsc->gc.parent = vsc->dev;
+ vsc->gc.of_node = vsc->dev->of_node;
+ vsc->gc.base = -1;
+ vsc->gc.get = vsc73xx_gpio_get;
+ vsc->gc.set = vsc73xx_gpio_set;
+ vsc->gc.direction_input = vsc73xx_gpio_direction_input;
+ vsc->gc.direction_output = vsc73xx_gpio_direction_output;
+ vsc->gc.get_direction = vsc73xx_gpio_get_direction;
+ vsc->gc.can_sleep = true;
+ ret = devm_gpiochip_add_data(vsc->dev, &vsc->gc, vsc);
+ if (ret) {
+ dev_err(vsc->dev, "unable to register GPIO chip\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int vsc73xx_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct vsc73xx *vsc;
+ int ret;
+
+ vsc = devm_kzalloc(dev, sizeof(*vsc), GFP_KERNEL);
+ if (!vsc)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, vsc);
+ vsc->spi = spi_dev_get(spi);
+ vsc->dev = dev;
+ mutex_init(&vsc->lock);
+
+ /* Release reset, if any */
+ vsc->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(vsc->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(vsc->reset);
+ }
+ if (vsc->reset)
+ /* Wait 20ms according to datasheet table 245 */
+ msleep(20);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(dev, "spi setup failed.\n");
+ return ret;
+ }
+
+ ret = vsc73xx_detect(vsc);
+ if (ret) {
+ dev_err(dev, "no chip found (%d)\n", ret);
+ return -ENODEV;
+ }
+
+ eth_random_addr(vsc->addr);
+ dev_info(vsc->dev,
+ "MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ vsc->addr[0], vsc->addr[1], vsc->addr[2],
+ vsc->addr[3], vsc->addr[4], vsc->addr[5]);
+
+ /* The VSC7395 switch chips have 5+1 ports which means 5
+ * ordinary ports and a sixth CPU port facing the processor
+ * with an RGMII interface. These ports are numbered 0..4
+ * and 6, so they leave a "hole" in the port map for port 5,
+ * which is invalid.
+ *
+ * The VSC7398 has 8 ports, port 7 is again the CPU port.
+ *
+ * We allocate 8 ports and avoid access to the nonexistant
+ * ports.
+ */
+ vsc->ds = dsa_switch_alloc(dev, 8);
+ if (!vsc->ds)
+ return -ENOMEM;
+ vsc->ds->priv = vsc;
+
+ vsc->ds->ops = &vsc73xx_ds_ops;
+ ret = dsa_register_switch(vsc->ds);
+ if (ret) {
+ dev_err(dev, "unable to register switch (%d)\n", ret);
+ return ret;
+ }
+
+ ret = vsc73xx_gpio_probe(vsc);
+ if (ret) {
+ dsa_unregister_switch(vsc->ds);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vsc73xx_remove(struct spi_device *spi)
+{
+ struct vsc73xx *vsc = spi_get_drvdata(spi);
+
+ dsa_unregister_switch(vsc->ds);
+ gpiod_set_value(vsc->reset, 1);
+
+ return 0;
+}
+
+static const struct of_device_id vsc73xx_of_match[] = {
+ {
+ .compatible = "vitesse,vsc7385",
+ },
+ {
+ .compatible = "vitesse,vsc7388",
+ },
+ {
+ .compatible = "vitesse,vsc7395",
+ },
+ {
+ .compatible = "vitesse,vsc7398",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+
+static struct spi_driver vsc73xx_driver = {
+ .probe = vsc73xx_probe,
+ .remove = vsc73xx_remove,
+ .driver = {
+ .name = "vsc73xx",
+ .of_match_table = vsc73xx_of_match,
+ },
+};
+module_spi_driver(vsc73xx_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 5b7658bcf020..5c3ef9fc8207 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -32,7 +32,7 @@ config EL3
config 3C515
tristate "3c515 ISA \"Fast EtherLink\""
- depends on ISA && ISA_DMA_API
+ depends on ISA && ISA_DMA_API && !PPC32
---help---
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
network card, say Y here.
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index d422a124cd7c..0b6bbf63f7ca 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -610,6 +610,7 @@ static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = info->phy_id;
+ /* Fall through */
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
return 0;
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index b6d735bf8011..342ae08ec3c2 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -153,9 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
static void dayna_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
-#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
-#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
-
#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
@@ -239,7 +236,7 @@ static enum mac8390_access mac8390_testio(unsigned long membase)
unsigned long outdata = 0xA5A0B5B0;
unsigned long indata = 0x00000000;
/* Try writing 32 bits */
- memcpy_toio(membase, &outdata, 4);
+ memcpy_toio((void __iomem *)membase, &outdata, 4);
/* Now compare them */
if (memcmp_withio(&outdata, membase, 4) == 0)
return ACCESS_32;
@@ -711,7 +708,7 @@ static void sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- memcpy_fromio(hdr, dev->mem_start + hdr_start, 4);
+ memcpy_fromio(hdr, (void __iomem *)dev->mem_start + hdr_start, 4);
/* Fix endianness */
hdr->count = swab16(hdr->count);
}
@@ -725,13 +722,16 @@ static void sane_block_input(struct net_device *dev, int count,
if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- memcpy_fromio(skb->data, dev->mem_start + xfer_base,
+ memcpy_fromio(skb->data,
+ (void __iomem *)dev->mem_start + xfer_base,
semi_count);
count -= semi_count;
- memcpy_fromio(skb->data + semi_count, ei_status.rmem_start,
- count);
+ memcpy_fromio(skb->data + semi_count,
+ (void __iomem *)ei_status.rmem_start, count);
} else {
- memcpy_fromio(skb->data, dev->mem_start + xfer_base, count);
+ memcpy_fromio(skb->data,
+ (void __iomem *)dev->mem_start + xfer_base,
+ count);
}
}
@@ -740,7 +740,7 @@ static void sane_block_output(struct net_device *dev, int count,
{
long shmem = (start_page - WD_START_PG)<<8;
- memcpy_toio(dev->mem_start + shmem, buf, count);
+ memcpy_toio((void __iomem *)dev->mem_start + shmem, buf, count);
}
/* dayna block input/output */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index af766fd61151..6fde68aa13a4 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -81,7 +81,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -128,6 +127,7 @@ config FEALNX
cards. <http://www.myson.com.tw/>
source "drivers/net/ethernet/natsemi/Kconfig"
+source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 8fbfe9ce2fa5..b45d5f626b59 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
-obj-$(CONFIG_NET_CADENCE) += cadence/
+obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
@@ -36,7 +36,6 @@ obj-$(CONFIG_NET_VENDOR_DEC) += dec/
obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
-obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
@@ -60,6 +59,7 @@ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
+obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
obj-$(CONFIG_NET_VENDOR_NI) += ni/
obj-$(CONFIG_NET_NETX) += netx-eth.o
@@ -68,7 +68,7 @@ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
obj-$(CONFIG_LPC_ENET) += nxp/
obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
obj-$(CONFIG_ETHOC) += ethoc.o
-obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/
+obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/
obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
@@ -80,8 +80,7 @@ obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
obj-$(CONFIG_NET_VENDOR_SIS) += sis/
-obj-$(CONFIG_SFC) += sfc/
-obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
+obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 3872ab96b80a..097467f44b0d 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -802,7 +802,7 @@ static int starfire_init_one(struct pci_dev *pdev,
int mii_status;
for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
- mdelay(100);
+ msleep(100);
boguscnt = 1000;
while (--boguscnt > 0)
if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 8f71b79b4949..4f11f98347ed 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -551,6 +551,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
ap->name);
break;
}
+ /* Fall through */
case PCI_VENDOR_ID_SGI:
printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
break;
@@ -1933,7 +1934,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
while (idx != rxretprd) {
struct ring_info *rip;
struct sk_buff *skb;
- struct rx_desc *rxdesc, *retdesc;
+ struct rx_desc *retdesc;
u32 skbidx;
int bd_flags, desc_type, mapsize;
u16 csum;
@@ -1959,19 +1960,16 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
case 0:
rip = &ap->skb->rx_std_skbuff[skbidx];
mapsize = ACE_STD_BUFSIZE;
- rxdesc = &ap->rx_std_ring[skbidx];
std_count++;
break;
case BD_FLG_JUMBO:
rip = &ap->skb->rx_jumbo_skbuff[skbidx];
mapsize = ACE_JUMBO_BUFSIZE;
- rxdesc = &ap->rx_jumbo_ring[skbidx];
atomic_dec(&ap->cur_jumbo_bufs);
break;
case BD_FLG_MINI:
rip = &ap->skb->rx_mini_skbuff[skbidx];
mapsize = ACE_MINI_BUFSIZE;
- rxdesc = &ap->rx_mini_ring[skbidx];
mini_count++;
break;
default:
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 1b9d3130af4d..17f12c18d225 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -333,6 +333,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
+ io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
io_sq->desc_entry_size =
(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
sizeof(struct ena_eth_io_tx_desc) :
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index f2af87d70594..c673ac2df65b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2213,7 +2213,8 @@ static void ena_netpoll(struct net_device *netdev)
#endif /* CONFIG_NET_POLL_CONTROLLER */
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
u16 qid;
/* we suspect that this is good for in--kernel network services that
@@ -2223,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb))
qid = skb_get_rx_queue(skb);
else
- qid = fallback(dev, skb);
+ qid = fallback(dev, skb, NULL);
return qid;
}
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index d5c15e8bb3de..9e5cf5583c87 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -44,7 +44,7 @@ config AMD8111_ETH
config LANCE
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
- depends on ISA && ISA_DMA_API && !ARM
+ depends on ISA && ISA_DMA_API && !ARM && !PPC32
---help---
If you have a network (Ethernet) card of this type, say Y here.
Some LinkSys cards are of this type.
@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
config NI65
tristate "NI6510 support"
- depends on ISA && ISA_DMA_API && !ARM
+ depends on ISA && ISA_DMA_API && !ARM && !PPC32
---help---
If you have a network (Ethernet) card of this type, say Y here.
@@ -173,7 +173,7 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM && HAS_DMA
+ depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
depends on X86 || ARM64 || COMPILE_TEST
select BITREVERSE
select CRC32
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index be198cc0b10c..f5ad12c10934 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -2036,22 +2036,22 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
}
lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->tx_dma_addr)
return -ENOMEM;
lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->rx_dma_addr)
return -ENOMEM;
lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->tx_skbuff)
return -ENOMEM;
lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->rx_skbuff)
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index cc1e4f820e64..533094233659 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -289,7 +289,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
struct page *pages = NULL;
dma_addr_t pages_dma;
gfp_t gfp;
- int order, ret;
+ int order;
again:
order = alloc_order;
@@ -316,10 +316,9 @@ again:
/* Map the pages */
pages_dma = dma_map_page(pdata->dev, pages, 0,
PAGE_SIZE << order, DMA_FROM_DEVICE);
- ret = dma_mapping_error(pdata->dev, pages_dma);
- if (ret) {
+ if (dma_mapping_error(pdata->dev, pages_dma)) {
put_page(pages);
- return ret;
+ return -ENOMEM;
}
pa->pages = pages;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index e107e180e2c8..1e929a1e4ca7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -119,6 +119,7 @@
#include <linux/clk.h>
#include <linux/bitrev.h>
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include "xgbe.h"
#include "xgbe-common.h"
@@ -887,7 +888,6 @@ static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
static u32 xgbe_vid_crc32_le(__le16 vid_le)
{
- u32 poly = 0xedb88320; /* CRCPOLY_LE */
u32 crc = ~0;
u32 temp = 0;
unsigned char *data = (unsigned char *)&vid_le;
@@ -904,7 +904,7 @@ static u32 xgbe_vid_crc32_le(__le16 vid_le)
data_byte >>= 1;
if (temp)
- crc ^= poly;
+ crc ^= CRC32_POLY_LE;
}
return crc;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4b5d625de8f0..8a3a60bb2688 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1111,14 +1111,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
if (pdata->tx_pause != pdata->phy.tx_pause) {
new_state = 1;
- pdata->hw_if.config_tx_flow_control(pdata);
pdata->tx_pause = pdata->phy.tx_pause;
+ pdata->hw_if.config_tx_flow_control(pdata);
}
if (pdata->rx_pause != pdata->phy.rx_pause) {
new_state = 1;
- pdata->hw_if.config_rx_flow_control(pdata);
pdata->rx_pause = pdata->phy.rx_pause;
+ pdata->hw_if.config_rx_flow_control(pdata);
}
/* Speed support */
diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig
index 1205861b6318..eedd3f3dd22e 100644
--- a/drivers/net/ethernet/apm/xgene-v2/Kconfig
+++ b/drivers/net/ethernet/apm/xgene-v2/Kconfig
@@ -1,6 +1,5 @@
config NET_XGENE_V2
tristate "APM X-Gene SoC Ethernet-v2 Driver"
- depends on HAS_DMA
depends on ARCH_XGENE || COMPILE_TEST
help
This is the Ethernet driver for the on-chip ethernet interface
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index afccb033177b..e4e33c900b57 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -1,6 +1,5 @@
config NET_XGENE
tristate "APM X-Gene SoC Ethernet Driver"
- depends on HAS_DMA
depends on ARCH_XGENE || COMPILE_TEST
select PHYLIB
select MDIO_XGENE
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 3188f553da35..078a04dc1182 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -836,19 +836,19 @@ static void xgene_enet_adjust_link(struct net_device *ndev)
#ifdef CONFIG_ACPI
static struct acpi_device *acpi_phy_find_device(struct device *dev)
{
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
struct fwnode_handle *fw_node;
int status;
fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev));
status = acpi_node_get_property_reference(fw_node, "phy-handle", 0,
&args);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status) || !is_acpi_device_node(args.fwnode)) {
dev_dbg(dev, "No matching phy in ACPI table\n");
return NULL;
}
- return args.adev;
+ return to_acpi_device_node(args.fwnode);
}
#endif
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 5a655d289dd5..024998d6d8c6 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/bitrev.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
@@ -37,11 +38,6 @@
#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
-/*
- * CRC polynomial - used in working out multicast filter bits.
- */
-#define ENET_CRCPOLY 0x04c11db7
-
/* switch to use multicast code lifted from sunhme driver */
#define SUNHME_MULTICAST
@@ -838,7 +834,7 @@ crc416(unsigned int curval, unsigned short nxtval)
next = next >> 1;
/* do the XOR */
- if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
+ if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
}
return cur;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index fc7383106946..91eb8910b1c9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -63,8 +63,6 @@
#define AQ_CFG_NAPI_WEIGHT 64U
-#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
-
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
#define AQ_NIC_FC_OFF 0U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index f2d8063a2cef..08c9fa6ca71f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -11,6 +11,7 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
+#include "aq_vec.h"
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
@@ -284,6 +285,117 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
return aq_nic_update_interrupt_moderation_settings(aq_nic);
}
+static int aq_ethtool_nway_reset(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (unlikely(!aq_nic->aq_fw_ops->renegotiate))
+ return -EOPNOTSUPP;
+
+ if (netif_running(ndev))
+ return aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw);
+
+ return 0;
+}
+
+static void aq_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ pause->autoneg = 0;
+
+ if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ pause->rx_pause = 1;
+ if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
+ pause->tx_pause = 1;
+}
+
+static int aq_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ if (!aq_nic->aq_fw_ops->set_flow_control)
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg == AUTONEG_ENABLE)
+ return -EOPNOTSUPP;
+
+ if (pause->rx_pause)
+ aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX;
+ else
+ aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX;
+
+ if (pause->tx_pause)
+ aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX;
+ else
+ aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX;
+
+ err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
+
+ return err;
+}
+
+static void aq_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+
+ ring->rx_pending = aq_nic_cfg->rxds;
+ ring->tx_pending = aq_nic_cfg->txds;
+
+ ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max;
+ ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max;
+}
+
+static int aq_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ int err = 0;
+ bool ndev_running = false;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+ const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
+ err = -EOPNOTSUPP;
+ goto err_exit;
+ }
+
+ if (netif_running(ndev)) {
+ ndev_running = true;
+ dev_close(ndev);
+ }
+
+ aq_nic_free_vectors(aq_nic);
+
+ aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
+ aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max);
+ aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE);
+
+ aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
+ aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max);
+ aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE);
+
+ for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs;
+ aq_nic->aq_vecs++) {
+ aq_nic->aq_vec[aq_nic->aq_vecs] =
+ aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg);
+ if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ }
+ if (ndev_running)
+ err = dev_open(ndev);
+
+err_exit:
+ return err;
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
@@ -291,6 +403,11 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_drvinfo = aq_ethtool_get_drvinfo,
.get_strings = aq_ethtool_get_strings,
.get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
+ .nway_reset = aq_ethtool_nway_reset,
+ .get_ringparam = aq_get_ringparam,
+ .set_ringparam = aq_set_ringparam,
+ .get_pauseparam = aq_ethtool_get_pauseparam,
+ .set_pauseparam = aq_ethtool_set_pauseparam,
.get_rxfh_key_size = aq_ethtool_get_rss_key_size,
.get_rxfh = aq_ethtool_get_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index a2d416b24ffc..5c00671f248d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -24,8 +24,10 @@ struct aq_hw_caps_s {
u64 link_speed_msk;
unsigned int hw_priv_flags;
u32 media_type;
- u32 rxds;
- u32 txds;
+ u32 rxds_max;
+ u32 txds_max;
+ u32 rxds_min;
+ u32 txds_min;
u32 txhwb_alignment;
u32 irq_mask;
u32 vecs;
@@ -98,6 +100,11 @@ struct aq_stats_s {
#define AQ_HW_MEDIA_TYPE_TP 1U
#define AQ_HW_MEDIA_TYPE_FIBRE 2U
+#define AQ_HW_TXD_MULTIPLE 8U
+#define AQ_HW_RXD_MULTIPLE 8U
+
+#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
+
struct aq_hw_s {
atomic_t flags;
u8 rbl_enabled:1;
@@ -177,7 +184,7 @@ struct aq_hw_ops {
unsigned int packet_filter);
int (*hw_multicast_list_set)(struct aq_hw_s *self,
- u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX]
+ u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
u32 count);
@@ -197,25 +204,30 @@ struct aq_hw_ops {
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
- int (*hw_deinit)(struct aq_hw_s *self);
-
int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
};
struct aq_fw_ops {
int (*init)(struct aq_hw_s *self);
+ int (*deinit)(struct aq_hw_s *self);
+
int (*reset)(struct aq_hw_s *self);
+ int (*renegotiate)(struct aq_hw_s *self);
+
int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
- int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state);
+ int (*set_state)(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
int (*update_link_status)(struct aq_hw_s *self);
int (*update_stats)(struct aq_hw_s *self);
+
+ int (*set_flow_control)(struct aq_hw_s *self);
};
#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index ba5fe8c4125d..e3ae29e523f0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -135,17 +135,10 @@ err_exit:
static void aq_ndev_set_multicast_settings(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- int err = 0;
- err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
- if (err < 0)
- return;
+ aq_nic_set_packet_filter(aq_nic, ndev->flags);
- if (netdev_mc_count(ndev)) {
- err = aq_nic_set_multicast_list(aq_nic, ndev);
- if (err < 0)
- return;
- }
+ aq_nic_set_multicast_list(aq_nic, ndev);
}
static const struct net_device_ops aq_ndev_ops = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 1a1a6380c128..26dc6782b475 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -89,8 +89,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
aq_nic_rss_init(self, cfg->num_rss_queues);
/*descriptors */
- cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF);
- cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF);
+ cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
+ cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
/*rss rings */
cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
@@ -563,34 +563,41 @@ err_exit:
int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
{
+ unsigned int packet_filter = self->packet_filter;
struct netdev_hw_addr *ha = NULL;
unsigned int i = 0U;
- self->mc_list.count = 0U;
-
- netdev_for_each_mc_addr(ha, ndev) {
- ether_addr_copy(self->mc_list.ar[i++], ha->addr);
- ++self->mc_list.count;
+ self->mc_list.count = 0;
+ if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+ packet_filter |= IFF_PROMISC;
+ } else {
+ netdev_for_each_uc_addr(ha, ndev) {
+ ether_addr_copy(self->mc_list.ar[i++], ha->addr);
- if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
- break;
+ if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
+ break;
+ }
}
- if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
- /* Number of filters is too big: atlantic does not support this.
- * Force all multi filter to support this.
- * With this we disable all UC filters and setup "all pass"
- * multicast mask
- */
- self->packet_filter |= IFF_ALLMULTI;
- self->aq_nic_cfg.mc_list_count = 0;
- return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
- self->packet_filter);
+ if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+ packet_filter |= IFF_ALLMULTI;
} else {
- return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
- self->mc_list.ar,
- self->mc_list.count);
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+
+ if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
+ break;
+ }
+ }
+
+ if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
+ packet_filter |= IFF_MULTICAST;
+ self->mc_list.count = i;
+ self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
+ self->mc_list.ar,
+ self->mc_list.count);
}
+ return aq_nic_set_packet_filter(self, packet_filter);
}
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
@@ -761,10 +768,14 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
- if (self->aq_nic_cfg.flow_control)
+ if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
+ if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+
if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
else
@@ -879,7 +890,7 @@ void aq_nic_deinit(struct aq_nic_s *self)
aq_vec_deinit(aq_vec);
if (self->power_state == AQ_HW_POWER_STATE_D0) {
- (void)self->aq_hw_ops->hw_deinit(self->aq_hw);
+ (void)self->aq_fw_ops->deinit(self->aq_hw);
} else {
(void)self->aq_hw_ops->hw_set_power(self->aq_hw,
self->power_state);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index faa533a0ec47..fecfc401f95d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -75,7 +75,7 @@ struct aq_nic_s {
struct aq_hw_link_status_s link_status;
struct {
u32 count;
- u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+ u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
} mc_list;
struct pci_dev *pdev;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 67e2f9fb9402..97addfa6f895 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -19,29 +19,31 @@
#include "hw_atl_a0_internal.h"
#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
- .is_64_dma = true, \
- .msix_irqs = 4U, \
- .irq_mask = ~0U, \
- .vecs = HW_ATL_A0_RSS_MAX, \
- .tcs = HW_ATL_A0_TC_MAX, \
- .rxd_alignment = 1U, \
- .rxd_size = HW_ATL_A0_RXD_SIZE, \
- .rxds = 248U, \
- .txd_alignment = 1U, \
- .txd_size = HW_ATL_A0_TXD_SIZE, \
- .txds = 8U * 1024U, \
- .txhwb_alignment = 4096U, \
- .tx_rings = HW_ATL_A0_TX_RINGS, \
- .rx_rings = HW_ATL_A0_RX_RINGS, \
- .hw_features = NETIF_F_HW_CSUM | \
- NETIF_F_RXHASH | \
- NETIF_F_RXCSUM | \
- NETIF_F_SG | \
- NETIF_F_TSO, \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_A0_RSS_MAX, \
+ .tcs = HW_ATL_A0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_A0_RXD_SIZE, \
+ .rxds_max = HW_ATL_A0_MAX_RXD, \
+ .rxds_min = HW_ATL_A0_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_A0_TXD_SIZE, \
+ .txds_max = HW_ATL_A0_MAX_TXD, \
+ .txds_min = HW_ATL_A0_MIN_RXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_A0_TX_RINGS, \
+ .rx_rings = HW_ATL_A0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_SG | \
+ NETIF_F_TSO, \
.hw_priv_flags = IFF_UNICAST_FLT, \
- .flow_control = true, \
- .mtu = HW_ATL_A0_MTU_JUMBO, \
- .mac_regs_count = 88, \
+ .flow_control = true, \
+ .mtu = HW_ATL_A0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
@@ -765,7 +767,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
u8 ar_mac
- [AQ_CFG_MULTICAST_ADDRESS_MAX]
+ [AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
u32 count)
{
@@ -875,7 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
const struct aq_hw_ops hw_atl_ops_a0 = {
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
.hw_init = hw_atl_a0_hw_init,
- .hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
.hw_reset = hw_atl_a0_hw_reset,
.hw_start = hw_atl_a0_hw_start,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 1d8855558d74..3c94cff57876 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -88,4 +88,12 @@
#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
+#define HW_ATL_A0_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_A0_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_A0_MAX_RXD 8184U
+#define HW_ATL_A0_MAX_TXD 8184U
+
#endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 819f6bcf9b4e..1d44a386e7d3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -20,30 +20,32 @@
#include "hw_atl_llh_internal.h"
#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
- .is_64_dma = true, \
- .msix_irqs = 4U, \
- .irq_mask = ~0U, \
- .vecs = HW_ATL_B0_RSS_MAX, \
- .tcs = HW_ATL_B0_TC_MAX, \
- .rxd_alignment = 1U, \
- .rxd_size = HW_ATL_B0_RXD_SIZE, \
- .rxds = 4U * 1024U, \
- .txd_alignment = 1U, \
- .txd_size = HW_ATL_B0_TXD_SIZE, \
- .txds = 8U * 1024U, \
- .txhwb_alignment = 4096U, \
- .tx_rings = HW_ATL_B0_TX_RINGS, \
- .rx_rings = HW_ATL_B0_RX_RINGS, \
- .hw_features = NETIF_F_HW_CSUM | \
- NETIF_F_RXCSUM | \
- NETIF_F_RXHASH | \
- NETIF_F_SG | \
- NETIF_F_TSO | \
- NETIF_F_LRO, \
- .hw_priv_flags = IFF_UNICAST_FLT, \
- .flow_control = true, \
- .mtu = HW_ATL_B0_MTU_JUMBO, \
- .mac_regs_count = 88, \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_B0_RSS_MAX, \
+ .tcs = HW_ATL_B0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_B0_RXD_SIZE, \
+ .rxds_max = HW_ATL_B0_MAX_RXD, \
+ .rxds_min = HW_ATL_B0_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_B0_TXD_SIZE, \
+ .txds_max = HW_ATL_B0_MAX_TXD, \
+ .txds_min = HW_ATL_B0_MIN_TXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_B0_TX_RINGS, \
+ .rx_rings = HW_ATL_B0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_LRO, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL_B0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
@@ -762,7 +764,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
hw_atl_rpfl2multicast_flr_en_set(self,
- IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
hw_atl_rpfl2_accept_all_mc_packets_set(self,
IS_FILTER_ENABLED(IFF_ALLMULTI));
@@ -784,7 +786,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
u8 ar_mac
- [AQ_CFG_MULTICAST_ADDRESS_MAX]
+ [AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
u32 count)
{
@@ -812,7 +814,7 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
hw_atl_rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled),
- HW_ATL_B0_MAC_MIN + i);
+ HW_ATL_B0_MAC_MIN + i);
}
err = aq_hw_err_from_flags(self);
@@ -933,7 +935,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
- .hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
.hw_reset = hw_atl_b0_hw_reset,
.hw_start = hw_atl_b0_hw_start,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 405d1455c222..28568f5fa74b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -142,6 +142,14 @@
#define HW_ATL_INTR_MODER_MAX 0x1FF
#define HW_ATL_INTR_MODER_MIN 0xFF
+#define HW_ATL_B0_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_B0_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_B0_MAX_RXD 8184U
+#define HW_ATL_B0_MAX_TXD 8184U
+
/* HW layer capabilities */
#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index e652d86b87d4..c965e65d07db 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -30,10 +30,11 @@
#define HW_ATL_MPI_CONTROL_ADR 0x0368U
#define HW_ATL_MPI_STATE_ADR 0x036CU
-#define HW_ATL_MPI_STATE_MSK 0x00FFU
-#define HW_ATL_MPI_STATE_SHIFT 0U
-#define HW_ATL_MPI_SPEED_MSK 0xFFFF0000U
-#define HW_ATL_MPI_SPEED_SHIFT 16U
+#define HW_ATL_MPI_STATE_MSK 0x00FFU
+#define HW_ATL_MPI_STATE_SHIFT 0U
+#define HW_ATL_MPI_SPEED_MSK 0x00FF0000U
+#define HW_ATL_MPI_SPEED_SHIFT 16U
+#define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U
#define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704
#define HW_ATL_MPI_BOOT_EXIT_CODE 0x388
@@ -525,19 +526,20 @@ static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
{
u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
- val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT);
+ val = val & ~HW_ATL_MPI_SPEED_MSK;
+ val |= speed << HW_ATL_MPI_SPEED_SHIFT;
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
return 0;
}
-void hw_atl_utils_mpi_set(struct aq_hw_s *self,
- enum hal_atl_utils_fw_state_e state,
- u32 speed)
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
{
int err = 0;
u32 transaction_id = 0;
struct hw_aq_atl_utils_mbox_header mbox;
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
if (state == MPI_RESET) {
hw_atl_utils_mpi_read_mbox(self, &mbox);
@@ -551,21 +553,21 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
if (err < 0)
goto err_exit;
}
+ /* On interface DEINIT we disable DW (raise bit)
+ * Otherwise enable DW (clear bit)
+ */
+ if (state == MPI_DEINIT || state == MPI_POWER)
+ val |= HW_ATL_MPI_DIRTY_WAKE_MSK;
+ else
+ val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK;
- aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR,
- (speed << HW_ATL_MPI_SPEED_SHIFT) | state);
-
-err_exit:;
-}
-
-static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
- enum hal_atl_utils_fw_state_e state)
-{
- u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+ /* Set new state bits */
+ val = val & ~HW_ATL_MPI_STATE_MSK;
+ val |= state & HW_ATL_MPI_STATE_MSK;
- val = state | (val & HW_ATL_MPI_SPEED_MSK);
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
- return 0;
+err_exit:
+ return err;
}
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
@@ -721,16 +723,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
*p = chip_features;
}
-int hw_atl_utils_hw_deinit(struct aq_hw_s *self)
+static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
{
- hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U);
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
return 0;
}
int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
unsigned int power_state)
{
- hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U);
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_POWER);
return 0;
}
@@ -823,10 +827,12 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
const struct aq_fw_ops aq_fw_1x_ops = {
.init = hw_atl_utils_mpi_create,
+ .deinit = hw_atl_fw1x_deinit,
.reset = NULL,
.get_mac_permanent = hw_atl_utils_get_mac_permanent,
.set_link_speed = hw_atl_utils_mpi_set_speed,
.set_state = hw_atl_utils_mpi_set_state,
.update_link_status = hw_atl_utils_mpi_get_link_status,
.update_stats = hw_atl_utils_update_stats,
+ .set_flow_control = NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index cd8f18f39c61..b875590efcbd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -239,6 +239,41 @@ enum hw_atl_fw2x_caps_hi {
CAPS_HI_TRANSACTION_ID,
};
+enum hw_atl_fw2x_ctrl {
+ CTRL_RESERVED1 = 0x00,
+ CTRL_RESERVED2,
+ CTRL_RESERVED3,
+ CTRL_PAUSE,
+ CTRL_ASYMMETRIC_PAUSE,
+ CTRL_RESERVED4,
+ CTRL_RESERVED5,
+ CTRL_RESERVED6,
+ CTRL_1GBASET_FD_EEE,
+ CTRL_2P5GBASET_FD_EEE,
+ CTRL_5GBASET_FD_EEE,
+ CTRL_10GBASET_FD_EEE,
+ CTRL_THERMAL_SHUTDOWN,
+ CTRL_PHY_LOGS,
+ CTRL_EEE_AUTO_DISABLE,
+ CTRL_PFC,
+ CTRL_WAKE_ON_LINK,
+ CTRL_CABLE_DIAG,
+ CTRL_TEMPERATURE,
+ CTRL_DOWNSHIFT,
+ CTRL_PTP_AVB,
+ CTRL_RESERVED7,
+ CTRL_LINK_DROP,
+ CTRL_SLEEP_PROXY,
+ CTRL_WOL,
+ CTRL_MAC_STOP,
+ CTRL_EXT_LOOPBACK,
+ CTRL_INT_LOOPBACK,
+ CTRL_RESERVED8,
+ CTRL_WOL_TIMER,
+ CTRL_STATISTICS,
+ CTRL_FORCE_RECONNECT,
+};
+
struct aq_hw_s;
struct aq_fw_ops;
struct aq_hw_caps_s;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 39cd3a27fe77..e37943760a58 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -28,6 +28,10 @@
#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed);
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
+
static int aq_fw2x_init(struct aq_hw_s *self)
{
int err = 0;
@@ -39,6 +43,16 @@ static int aq_fw2x_init(struct aq_hw_s *self)
return err;
}
+static int aq_fw2x_deinit(struct aq_hw_s *self)
+{
+ int err = aq_fw2x_set_link_speed(self, 0);
+
+ if (!err)
+ err = aq_fw2x_set_state(self, MPI_DEINIT);
+
+ return err;
+}
+
static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
{
enum hw_atl_fw2x_rate rate = 0;
@@ -73,10 +87,38 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
return 0;
}
+static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state)
+{
+ if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ *mpi_state |= BIT(CAPS_HI_PAUSE);
+ else
+ *mpi_state &= ~BIT(CAPS_HI_PAUSE);
+
+ if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
+ *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+ else
+ *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+}
+
static int aq_fw2x_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state)
{
- /* No explicit state in 2x fw */
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ switch (state) {
+ case MPI_INIT:
+ mpi_state &= ~BIT(CAPS_HI_LINK_DROP);
+ aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+ break;
+ case MPI_DEINIT:
+ mpi_state |= BIT(CAPS_HI_LINK_DROP);
+ break;
+ case MPI_RESET:
+ case MPI_POWER:
+ /* No actions */
+ break;
+ }
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
return 0;
}
@@ -173,12 +215,37 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
return hw_atl_utils_update_stats(self);
}
+static int aq_fw2x_renegotiate(struct aq_hw_s *self)
+{
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ mpi_opts |= BIT(CTRL_FORCE_RECONNECT);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ return 0;
+}
+
+static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
+
+ return 0;
+}
+
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
+ .deinit = aq_fw2x_deinit,
.reset = NULL,
+ .renegotiate = aq_fw2x_renegotiate,
.get_mac_permanent = aq_fw2x_get_mac_permanent,
.set_link_speed = aq_fw2x_set_link_speed,
.set_state = aq_fw2x_set_state,
.update_link_status = aq_fw2x_update_link_status,
.update_stats = aq_fw2x_update_stats,
+ .set_flow_control = aq_fw2x_set_flow_control,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index a445de6837a6..94efc6477bdc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -12,8 +12,8 @@
#define NIC_MAJOR_DRIVER_VERSION 2
#define NIC_MINOR_DRIVER_VERSION 0
-#define NIC_BUILD_DRIVER_VERSION 2
-#define NIC_REVISION_DRIVER_VERSION 1
+#define NIC_BUILD_DRIVER_VERSION 3
+#define NIC_REVISION_DRIVER_VERSION 0
#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index e743ddf46343..5d0ab8e74b68 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -24,7 +24,8 @@ config ARC_EMAC_CORE
config ARC_EMAC
tristate "ARC EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET && HAS_DMA && (ARC || COMPILE_TEST)
+ depends on OF_IRQ && OF_NET
+ depends on ARC || COMPILE_TEST
---help---
On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
non-standard on-chip ethernet device ARC EMAC 10/100 is used.
@@ -33,7 +34,8 @@ config ARC_EMAC
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA && (ARCH_ROCKCHIP || COMPILE_TEST)
+ depends on OF_IRQ && OF_NET && REGULATOR
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
---help---
Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers.
This selects Rockchip SoC glue layer support for the
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 567ee54504bc..6d3221134927 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1279,7 +1279,6 @@ static void alx_check_link(struct alx_priv *alx)
struct alx_hw *hw = &alx->hw;
unsigned long flags;
int old_speed;
- u8 old_duplex;
int err;
/* clear PHY internal interrupt status, otherwise the main
@@ -1288,7 +1287,6 @@ static void alx_check_link(struct alx_priv *alx)
alx_clear_phy_intr(hw);
old_speed = hw->link_speed;
- old_duplex = hw->duplex;
err = alx_read_phy_link(hw);
if (err < 0)
goto reset;
@@ -1897,13 +1895,19 @@ static int alx_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct alx_priv *alx = pci_get_drvdata(pdev);
struct alx_hw *hw = &alx->hw;
+ int err;
alx_reset_phy(hw);
if (!netif_running(alx->dev))
return 0;
netif_device_attach(alx->dev);
- return __alx_open(alx, true);
+
+ rtnl_lock();
+ err = __alx_open(alx, true);
+ rtnl_unlock();
+
+ return err;
}
static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 94270f654b3b..7087b88550db 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1686,6 +1686,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
skb = build_skb(page_address(page) + adapter->rx_page_offset,
adapter->rx_frag_size);
if (likely(skb)) {
+ skb_reserve(skb, NET_SKB_PAD);
adapter->rx_page_offset += adapter->rx_frag_size;
if (adapter->rx_page_offset >= PAGE_SIZE)
adapter->rx_page = NULL;
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
index 8ba7f8ff3434..392f564d8fd4 100644
--- a/drivers/net/ethernet/aurora/Kconfig
+++ b/drivers/net/ethernet/aurora/Kconfig
@@ -1,5 +1,6 @@
config NET_VENDOR_AURORA
bool "Aurora VLSI devices"
+ default y
help
If you have a network (Ethernet) device belonging to this class,
say Y.
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index e94159507847..c8d1f8fa4713 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -304,12 +304,10 @@ static int nb8800_poll(struct napi_struct *napi, int budget)
again:
do {
- struct nb8800_rx_buf *rxb;
unsigned int len;
next = (last + 1) % RX_DESC_COUNT;
- rxb = &priv->rx_bufs[next];
rxd = &priv->rx_descs[next];
if (!rxd->report)
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index af75156919ed..c1d3ee9baf7e 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -61,7 +61,7 @@ config BCM63XX_ENET
config BCMGENET
tristate "Broadcom GENET internal MAC support"
- depends on OF && HAS_IOMEM
+ depends on HAS_IOMEM
select MII
select PHYLIB
select FIXED_PHY
@@ -157,7 +157,6 @@ config BGMAC
config BGMAC_BCMA
tristate "Broadcom iProc GBit BCMA support"
depends on BCMA && BCMA_HOST_SOC
- depends on HAS_DMA
depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
select BGMAC
select PHYLIB
@@ -170,7 +169,6 @@ config BGMAC_BCMA
config BGMAC_PLATFORM
tristate "Broadcom iProc GBit platform support"
- depends on HAS_DMA
depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on OF
select BGMAC
@@ -183,7 +181,7 @@ config BGMAC_PLATFORM
config SYSTEMPORT
tristate "Broadcom SYSTEMPORT internal MAC support"
- depends on OF
+ depends on HAS_IOMEM
depends on NET_DSA || !NET_DSA
select MII
select PHYLIB
@@ -232,4 +230,12 @@ config BNXT_DCB
If unsure, say N.
+config BNXT_HWMON
+ bool "Broadcom NetXtreme-C/E HWMON support"
+ default y
+ depends on BNXT && HWMON && !(BNXT=y && HWMON=m)
+ ---help---
+ Say Y if you want to expose the thermal sensor data on NetXtreme-C/E
+ devices, via the hwmon sysfs interface.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index d5fca2e5a9bc..147045757b10 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -521,7 +521,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
struct bcm_sysport_priv *priv = netdev_priv(dev);
u32 reg;
- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+ wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
if (!(priv->wolopts & WAKE_MAGICSECURE))
@@ -539,7 +539,7 @@ static int bcm_sysport_set_wol(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
- u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+ u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
if (!device_can_wakeup(kdev))
return -ENOTSUPP;
@@ -1041,17 +1041,45 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
+{
+ u32 reg, bit;
+
+ reg = umac_readl(priv, UMAC_MPD_CTRL);
+ if (enable)
+ reg |= MPD_EN;
+ else
+ reg &= ~MPD_EN;
+ umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+ if (priv->is_lite)
+ bit = RBUF_ACPI_EN_LITE;
+ else
+ bit = RBUF_ACPI_EN;
+
+ reg = rbuf_readl(priv, RBUF_CONTROL);
+ if (enable)
+ reg |= bit;
+ else
+ reg &= ~bit;
+ rbuf_writel(priv, reg, RBUF_CONTROL);
+}
+
static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
u32 reg;
/* Stop monitoring MPD interrupt */
- intrl2_0_mask_set(priv, INTRL2_0_MPD);
+ intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
+
+ /* Disable RXCHK, active filters and Broadcom tag matching */
+ reg = rxchk_readl(priv, RXCHK_CONTROL);
+ reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
+ RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
+ rxchk_writel(priv, reg, RXCHK_CONTROL);
/* Clear the MagicPacket detection logic */
- reg = umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~MPD_EN;
- umac_writel(priv, reg, UMAC_MPD_CTRL);
+ mpd_enable_set(priv, false);
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
@@ -1077,6 +1105,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *txr;
unsigned int ring, ring_bit;
+ u32 reg;
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1102,9 +1131,14 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
bcm_sysport_tx_reclaim_all(priv);
- if (priv->irq0_stat & INTRL2_0_MPD) {
- netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
- bcm_sysport_resume_from_wol(priv);
+ if (priv->irq0_stat & INTRL2_0_MPD)
+ netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+ if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+ RXCHK_BRCM_TAG_MATCH_MASK;
+ netdev_info(priv->netdev,
+ "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
}
if (!priv->is_lite)
@@ -1946,8 +1980,8 @@ static int bcm_sysport_open(struct net_device *dev)
if (!priv->is_lite)
priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
else
- priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
- GIB_FCS_STRIP);
+ priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
+ GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
0, priv->phy_interface);
@@ -2090,6 +2124,132 @@ static int bcm_sysport_stop(struct net_device *dev)
return 0;
}
+static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
+ u64 location)
+{
+ unsigned int index;
+ u32 reg;
+
+ for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
+ reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
+ reg &= RXCHK_BRCM_TAG_CID_MASK;
+ if (reg == location)
+ return index;
+ }
+
+ return -EINVAL;
+}
+
+static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
+ struct ethtool_rxnfc *nfc)
+{
+ int index;
+
+ /* This is not a rule that we know about */
+ index = bcm_sysport_rule_find(priv, nfc->fs.location);
+ if (index < 0)
+ return -EOPNOTSUPP;
+
+ nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
+
+ return 0;
+}
+
+static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
+ struct ethtool_rxnfc *nfc)
+{
+ unsigned int index;
+ u32 reg;
+
+ /* We cannot match locations greater than what the classification ID
+ * permits (256 entries)
+ */
+ if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
+ return -E2BIG;
+
+ /* We cannot support flows that are not destined for a wake-up */
+ if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
+ return -EOPNOTSUPP;
+
+ /* All filters are already in use, we cannot match more rules */
+ if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
+ RXCHK_BRCM_TAG_MAX)
+ return -ENOSPC;
+
+ index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
+ if (index > RXCHK_BRCM_TAG_MAX)
+ return -ENOSPC;
+
+ /* Location is the classification ID, and index is the position
+ * within one of our 8 possible filters to be programmed
+ */
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
+ reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
+ reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
+ rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
+ rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
+
+ set_bit(index, priv->filters);
+
+ return 0;
+}
+
+static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
+ u64 location)
+{
+ int index;
+
+ /* This is not a rule that we know about */
+ index = bcm_sysport_rule_find(priv, location);
+ if (index < 0)
+ return -EOPNOTSUPP;
+
+ /* No need to disable this filter if it was enabled, this will
+ * be taken care of during suspend time by bcm_sysport_suspend_to_wol
+ */
+ clear_bit(index, priv->filters);
+
+ return 0;
+}
+
+static int bcm_sysport_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXCLSRULE:
+ ret = bcm_sysport_rule_get(priv, nfc);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int bcm_sysport_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = bcm_sysport_rule_set(priv, nfc);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = bcm_sysport_rule_del(priv, nfc->fs.location);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops bcm_sysport_ethtool_ops = {
.get_drvinfo = bcm_sysport_get_drvinfo,
.get_msglevel = bcm_sysport_get_msglvl,
@@ -2104,10 +2264,12 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
.set_coalesce = bcm_sysport_set_coalesce,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_rxnfc = bcm_sysport_get_rxnfc,
+ .set_rxnfc = bcm_sysport_set_rxnfc,
};
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2116,7 +2278,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int q, port;
if (!netdev_uses_dsa(dev))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
/* DSA tagging layer will have configured the correct queue */
q = BRCM_TAG_GET_QUEUE(queue);
@@ -2124,7 +2286,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
if (unlikely(!tx_ring))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
return tx_ring->index;
}
@@ -2423,21 +2585,43 @@ static int bcm_sysport_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
{
struct net_device *ndev = priv->netdev;
unsigned int timeout = 1000;
+ unsigned int index, i = 0;
u32 reg;
/* Password has already been programmed */
reg = umac_readl(priv, UMAC_MPD_CTRL);
- reg |= MPD_EN;
+ if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
+ reg |= MPD_EN;
reg &= ~PSW_EN;
if (priv->wolopts & WAKE_MAGICSECURE)
reg |= PSW_EN;
umac_writel(priv, reg, UMAC_MPD_CTRL);
+ if (priv->wolopts & WAKE_FILTER) {
+ /* Turn on ACPI matching to steal packets from RBUF */
+ reg = rbuf_readl(priv, RBUF_CONTROL);
+ if (priv->is_lite)
+ reg |= RBUF_ACPI_EN_LITE;
+ else
+ reg |= RBUF_ACPI_EN;
+ rbuf_writel(priv, reg, RBUF_CONTROL);
+
+ /* Enable RXCHK, active filters and Broadcom tag matching */
+ reg = rxchk_readl(priv, RXCHK_CONTROL);
+ reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
+ RXCHK_BRCM_TAG_MATCH_SHIFT);
+ for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
+ reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
+ i++;
+ }
+ reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
+ rxchk_writel(priv, reg, RXCHK_CONTROL);
+ }
+
/* Make sure RBUF entered WoL mode as result */
do {
reg = rbuf_readl(priv, RBUF_STATUS);
@@ -2449,9 +2633,7 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
/* Do not leave the UniMAC RBUF matching only MPD packets */
if (!timeout) {
- reg = umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~MPD_EN;
- umac_writel(priv, reg, UMAC_MPD_CTRL);
+ mpd_enable_set(priv, false);
netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
return -ETIMEDOUT;
}
@@ -2460,14 +2642,14 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
umac_enable_set(priv, CMD_RX_EN, 1);
/* Enable the interrupt wake-up source */
- intrl2_0_mask_clear(priv, INTRL2_0_MPD);
+ intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
return 0;
}
-static int bcm_sysport_suspend(struct device *d)
+static int __maybe_unused bcm_sysport_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2529,7 +2711,7 @@ static int bcm_sysport_suspend(struct device *d)
return ret;
}
-static int bcm_sysport_resume(struct device *d)
+static int __maybe_unused bcm_sysport_resume(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2622,7 +2804,6 @@ out_free_tx_rings:
bcm_sysport_fini_tx_ring(priv, i);
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
bcm_sysport_suspend, bcm_sysport_resume);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index d6e5d0cbf3a3..046c6c1d97fd 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -11,6 +11,7 @@
#ifndef __BCM_SYSPORT_H
#define __BCM_SYSPORT_H
+#include <linux/bitmap.h>
#include <linux/if_vlan.h>
#include <linux/net_dim.h>
@@ -155,14 +156,18 @@ struct bcm_rsb {
#define RXCHK_PARSE_AUTH (1 << 22)
#define RXCHK_BRCM_TAG0 0x04
-#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0)
+#define RXCHK_BRCM_TAG(i) ((i) * 0x4 + RXCHK_BRCM_TAG0)
#define RXCHK_BRCM_TAG0_MASK 0x24
-#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK)
+#define RXCHK_BRCM_TAG_MASK(i) ((i) * 0x4 + RXCHK_BRCM_TAG0_MASK)
#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44
#define RXCHK_ETHERTYPE 0x48
#define RXCHK_BAD_CSUM_CNTR 0x4C
#define RXCHK_OTHER_DISC_CNTR 0x50
+#define RXCHK_BRCM_TAG_MAX 8
+#define RXCHK_BRCM_TAG_CID_SHIFT 16
+#define RXCHK_BRCM_TAG_CID_MASK 0xff
+
/* TXCHCK offsets and defines */
#define SYS_PORT_TXCHK_OFFSET 0x380
#define TXCHK_PKT_RDY_THRESH 0x00
@@ -185,6 +190,7 @@ struct bcm_rsb {
#define RBUF_RSB_SWAP0 (1 << 22)
#define RBUF_RSB_SWAP1 (1 << 23)
#define RBUF_ACPI_EN (1 << 23)
+#define RBUF_ACPI_EN_LITE (1 << 24)
#define RBUF_PKT_RDY_THRESH 0x04
@@ -278,7 +284,8 @@ struct bcm_rsb {
#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT)
#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT)
#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT)
-#define GIB_FCS_STRIP (1 << 6)
+#define GIB_FCS_STRIP_SHIFT 6
+#define GIB_FCS_STRIP (1 << GIB_FCS_STRIP_SHIFT)
#define GIB_LCL_LOOP_EN (1 << 7)
#define GIB_LCL_LOOP_TXEN (1 << 8)
#define GIB_RMT_LOOP_EN (1 << 9)
@@ -776,6 +783,7 @@ struct bcm_sysport_priv {
/* Ethtool */
u32 msg_enable;
+ DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX);
struct bcm_sysport_stats64 stats64;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index e6ea8e61f96d..4c94d9218bba 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -236,7 +236,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
struct device *dma_dev = bgmac->dma_dev;
int empty_slot;
- bool freed = false;
unsigned bytes_compl = 0, pkts_compl = 0;
/* The last slot that hardware didn't consume yet */
@@ -279,7 +278,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
slot->dma_addr = 0;
ring->start++;
- freed = true;
}
if (!pkts_compl)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d847e1b9c37b..be1506169076 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1533,6 +1533,7 @@ struct bnx2x {
struct link_vars link_vars;
u32 link_cnt;
struct bnx2x_link_report_data last_reported_link;
+ bool force_link_down;
struct mdio_if_info mdio;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8cd73ff5debc..5a727d4729da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
{
struct bnx2x_link_report_data cur_data;
+ if (bp->force_link_down) {
+ bp->link_vars.link_up = 0;
+ return;
+ }
+
/* reread mf_cfg */
if (IS_PF(bp) && !CHIP_IS_E1(bp))
bnx2x_read_mf_cfg(bp);
@@ -1905,7 +1910,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1927,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+ return fallback(dev, skb, NULL) %
+ (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -2817,6 +2824,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->pending_max = 0;
}
+ bp->force_link_down = false;
if (bp->port.pmf) {
rc = bnx2x_initial_phy_init(bp, load_mode);
if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a8ce5c55bbb0..0e508e5defce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -497,7 +497,8 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index da18aa239acb..a4a90b6cdb46 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3388,14 +3388,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+ true);
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+ true);
}
return 0;
@@ -3509,7 +3513,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
}
- return bnx2x_config_rss_eth(bp, false);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_config_rss_eth(bp, false);
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 22243c480a05..98d4c5a3ff21 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6339,6 +6339,7 @@ int bnx2x_set_led(struct link_params *params,
*/
if (!vars->link_up)
break;
+ /* else: fall through */
case LED_MODE_ON:
if (((params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
@@ -12521,11 +12522,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
case PORT_FEATURE_LINK_SPEED_10M_HALF:
phy->req_duplex = DUPLEX_HALF;
+ /* fall through */
case PORT_FEATURE_LINK_SPEED_10M_FULL:
phy->req_line_speed = SPEED_10;
break;
case PORT_FEATURE_LINK_SPEED_100M_HALF:
phy->req_duplex = DUPLEX_HALF;
+ /* fall through */
case PORT_FEATURE_LINK_SPEED_100M_FULL:
phy->req_line_speed = SPEED_100;
break;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5b1ed240bf18..71362b7f6040 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8561,11 +8561,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
bp->num_queues,
1 + bp->num_cnic_queues);
- /* falling through... */
+ /* fall through */
case BNX2X_INT_MODE_MSI:
bnx2x_enable_msi(bp);
- /* falling through... */
+ /* fall through */
case BNX2X_INT_MODE_INTX:
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bp->sp_rtnl_state = 0;
smp_mb();
+ /* Immediately indicate link as down */
+ bp->link_vars.link_up = 0;
+ bp->force_link_down = true;
+ netif_carrier_off(bp->dev);
+ BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
+
bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
/* When ret value shows failure of allocation failure,
* the nic is rebooted again. If open still fails, a error
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8baf9d3eb4b1..3f4d2c8da21a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* Don't break */
+ /* fall through */
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
@@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* Don't break */
+ /* fall through */
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index dc77bfded865..62da46537734 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1827,6 +1827,7 @@ get_vf:
DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
vf->abs_vfid, qidx);
bnx2x_vf_handle_rss_update_eqe(bp, vf);
+ /* fall through */
case EVENT_RING_OPCODE_VF_FLR:
/* Do nothing for now */
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 176fc9f4d7de..8bb1e38b1681 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -51,6 +51,8 @@
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -1115,7 +1117,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info->hash_type = PKT_HASH_TYPE_L4;
tpa_info->gso_type = SKB_GSO_TCPV4;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- if (hash_type == 3)
+ if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
@@ -1727,8 +1729,8 @@ static int bnxt_async_event_process(struct bnxt *bp,
speed);
}
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
- /* fall thru */
}
+ /* fall through */
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
@@ -3012,13 +3014,6 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr);
bp->hwrm_cmd_resp_addr = NULL;
- if (bp->hwrm_dbg_resp_addr) {
- dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
- bp->hwrm_dbg_resp_addr,
- bp->hwrm_dbg_resp_dma_addr);
-
- bp->hwrm_dbg_resp_addr = NULL;
- }
}
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -3030,12 +3025,6 @@ static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
GFP_KERNEL);
if (!bp->hwrm_cmd_resp_addr)
return -ENOMEM;
- bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
- HWRM_DBG_REG_BUF_SIZE,
- &bp->hwrm_dbg_resp_dma_addr,
- GFP_KERNEL);
- if (!bp->hwrm_dbg_resp_addr)
- netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
return 0;
}
@@ -3458,7 +3447,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
- if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
memcpy(short_cmd_req, req, msg_len);
@@ -3651,7 +3640,9 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
+ struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_rgtr_input req = {0};
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
@@ -3689,7 +3680,15 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
}
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ else if (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
}
static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
@@ -3994,6 +3993,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
+ req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
max_rings = bp->rx_nr_rings - 1;
@@ -4591,7 +4591,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
}
hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
u16 cp, stats;
hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
@@ -4637,7 +4637,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->fid = cpu_to_le16(0xffff);
enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
req->num_tx_rings = cpu_to_le16(tx_rings);
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
@@ -4710,7 +4710,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
struct hwrm_func_vf_cfg_input req = {0};
int rc;
- if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
+ if (!BNXT_NEW_RM(bp)) {
bp->hw_resc.resv_tx_rings = tx_rings;
return 0;
}
@@ -4770,7 +4770,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
- if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+ if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))
return true;
@@ -4806,7 +4806,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
return rc;
tx = hw_resc->resv_tx_rings;
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
rx = hw_resc->resv_rx_rings;
cp = hw_resc->resv_cp_rings;
grp = hw_resc->resv_hw_ring_grps;
@@ -4850,7 +4850,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
u32 flags;
int rc;
- if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ if (!BNXT_NEW_RM(bp))
return 0;
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
@@ -4879,7 +4879,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
@@ -5101,9 +5101,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
- bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+ bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
- bp->flags |= BNXT_FLAG_FW_DCBX_AGENT;
+ bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
}
if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
@@ -5175,7 +5175,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
pf->vf_resv_strategy =
le16_to_cpu(resp->vf_reservation_strategy);
- if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
+ if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
}
hwrm_func_resc_qcaps_exit:
@@ -5261,7 +5261,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (bp->hwrm_spec_code >= 0x10803) {
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
if (!rc)
- bp->flags |= BNXT_FLAG_NEW_RM;
+ bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
}
return 0;
}
@@ -5281,7 +5281,8 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
int rc = 0;
struct hwrm_queue_qportcfg_input req = {0};
struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
- u8 i, *qptr;
+ u8 i, j, *qptr;
+ bool no_rdma;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
@@ -5299,19 +5300,24 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
if (bp->max_tc > BNXT_MAX_QUEUE)
bp->max_tc = BNXT_MAX_QUEUE;
+ no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
+ qptr = &resp->queue_id0;
+ for (i = 0, j = 0; i < bp->max_tc; i++) {
+ bp->q_info[j].queue_id = *qptr++;
+ bp->q_info[j].queue_profile = *qptr++;
+ bp->tc_to_qidx[j] = j;
+ if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
+ (no_rdma && BNXT_PF(bp)))
+ j++;
+ }
+ bp->max_tc = max_t(u8, j, 1);
+
if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
bp->max_tc = 1;
if (bp->max_lltc > bp->max_tc)
bp->max_lltc = bp->max_tc;
- qptr = &resp->queue_id0;
- for (i = 0; i < bp->max_tc; i++) {
- bp->q_info[i].queue_id = *qptr++;
- bp->q_info[i].queue_profile = *qptr++;
- bp->tc_to_qidx[i] = i;
- }
-
qportcfg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -5364,7 +5370,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
- bp->flags |= BNXT_FLAG_SHORT_CMD;
+ bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -5712,7 +5718,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
}
vnic->uc_filter_count = 1;
- vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+ vnic->rx_mask = 0;
+ if (bp->dev->flags & IFF_BROADCAST)
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
@@ -5917,7 +5925,7 @@ unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
}
-void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
+static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
{
bp->hw_resc.max_irqs = max_irqs;
}
@@ -5931,7 +5939,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
max_idx = min_t(int, bp->total_irqs, max_cp);
avail_msix = max_idx - bp->cp_nr_rings;
- if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num)
+ if (!BNXT_NEW_RM(bp) || avail_msix >= num)
return avail_msix;
if (max_irq < total_req) {
@@ -5944,7 +5952,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
static int bnxt_get_num_msix(struct bnxt *bp)
{
- if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ if (!BNXT_NEW_RM(bp))
return bnxt_get_max_func_irqs(bp);
return bnxt_cp_rings_in_use(bp);
@@ -6067,8 +6075,7 @@ int bnxt_reserve_rings(struct bnxt *bp)
netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
return rc;
}
- if ((bp->flags & BNXT_FLAG_NEW_RM) &&
- (bnxt_get_num_msix(bp) != bp->total_irqs)) {
+ if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
rc = bnxt_init_int_mode(bp);
@@ -6348,6 +6355,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
}
+ if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
+ if (bp->test_info)
+ bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
+ }
if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
@@ -6644,6 +6655,39 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
+{
+ struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_drv_if_change_input req = {0};
+ bool resc_reinit = false;
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
+ if (up)
+ req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc && (resp->flags &
+ cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
+ resc_reinit = true;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (up && resc_reinit && BNXT_NEW_RM(bp)) {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_tx_rings = 0;
+ hw_resc->resv_rx_rings = 0;
+ hw_resc->resv_hw_ring_grps = 0;
+ hw_resc->resv_vnics = 0;
+ }
+ return rc;
+}
+
static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
{
struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
@@ -6753,6 +6797,62 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
} while (handle && handle != 0xffff);
}
+#ifdef CONFIG_BNXT_HWMON
+static ssize_t bnxt_show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct hwrm_temp_monitor_query_input req = {0};
+ struct hwrm_temp_monitor_query_output *resp;
+ struct bnxt *bp = dev_get_drvdata(dev);
+ u32 temp = 0;
+
+ resp = bp->hwrm_cmd_resp_addr;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+ temp = resp->temp * 1000; /* display millidegree */
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return sprintf(buf, "%u\n", temp);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
+
+static struct attribute *bnxt_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bnxt);
+
+static void bnxt_hwmon_close(struct bnxt *bp)
+{
+ if (bp->hwmon_dev) {
+ hwmon_device_unregister(bp->hwmon_dev);
+ bp->hwmon_dev = NULL;
+ }
+}
+
+static void bnxt_hwmon_open(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+ DRV_MODULE_NAME, bp,
+ bnxt_groups);
+ if (IS_ERR(bp->hwmon_dev)) {
+ bp->hwmon_dev = NULL;
+ dev_warn(&pdev->dev, "Cannot register hwmon device\n");
+ }
+}
+#else
+static void bnxt_hwmon_close(struct bnxt *bp)
+{
+}
+
+static void bnxt_hwmon_open(struct bnxt *bp)
+{
+}
+#endif
+
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -6888,7 +6988,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = bnxt_request_irq(bp);
if (rc) {
netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
- goto open_err;
+ goto open_err_irq;
}
}
@@ -6905,8 +7005,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
mutex_lock(&bp->link_lock);
rc = bnxt_update_phy_setting(bp);
mutex_unlock(&bp->link_lock);
- if (rc)
+ if (rc) {
netdev_warn(bp->dev, "failed to update phy settings\n");
+ if (BNXT_SINGLE_PF(bp)) {
+ bp->link_info.phy_retry = true;
+ bp->link_info.phy_retry_expires =
+ jiffies + 5 * HZ;
+ }
+ }
}
if (irq_re_init)
@@ -6928,6 +7034,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
open_err:
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
+
+open_err_irq:
bnxt_del_napi(bp);
open_err_free_mem:
@@ -6990,8 +7098,16 @@ void bnxt_half_close_nic(struct bnxt *bp)
static int bnxt_open(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ bnxt_hwrm_if_change(bp, true);
+ rc = __bnxt_open_nic(bp, true, true);
+ if (rc)
+ bnxt_hwrm_if_change(bp, false);
+
+ bnxt_hwmon_open(bp);
- return __bnxt_open_nic(bp, true, true);
+ return rc;
}
static bool bnxt_drv_busy(struct bnxt *bp)
@@ -7053,8 +7169,10 @@ static int bnxt_close(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
+ bnxt_hwmon_close(bp);
bnxt_close_nic(bp, true, true);
bnxt_hwrm_shutdown_link(bp);
+ bnxt_hwrm_if_change(bp, false);
return 0;
}
@@ -7214,13 +7332,16 @@ static void bnxt_set_rx_mode(struct net_device *dev)
mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
- CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+ CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
+ CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
uc_update = bnxt_uc_list_updated(bp);
+ if (dev->flags & IFF_BROADCAST)
+ mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
if (dev->flags & IFF_ALLMULTI) {
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
@@ -7301,7 +7422,7 @@ skip_uc:
static bool bnxt_can_reserve_rings(struct bnxt *bp)
{
#ifdef CONFIG_BNXT_SRIOV
- if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) {
+ if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
/* No minimum rings were provisioned by the PF. Don't
@@ -7351,7 +7472,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
return false;
}
- if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ if (!BNXT_NEW_RM(bp))
return true;
if (vnics == bp->hw_resc.resv_vnics)
@@ -7585,6 +7706,16 @@ static void bnxt_timer(struct timer_list *t)
set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
+
+ if (bp->link_info.phy_retry) {
+ if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
+ bp->link_info.phy_retry = 0;
+ netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
+ } else {
+ set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
+ }
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -7672,6 +7803,19 @@ static void bnxt_sp_task(struct work_struct *work)
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
rc);
}
+ if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
+ int rc;
+
+ mutex_lock(&bp->link_lock);
+ rc = bnxt_update_phy_setting(bp);
+ mutex_unlock(&bp->link_lock);
+ if (rc) {
+ netdev_warn(bp->dev, "update phy settings retry failed\n");
+ } else {
+ bp->link_info.phy_retry = false;
+ netdev_info(bp->dev, "update phy settings retry succeeded\n");
+ }
+ }
if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
mutex_lock(&bp->link_lock);
bnxt_get_port_module_status(bp);
@@ -7724,7 +7868,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
cp += bnxt_get_ulp_msix_num(bp);
return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
vnics);
@@ -7984,7 +8128,7 @@ static int bnxt_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
- bp, bp);
+ bp, bp, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
return 0;
@@ -8502,11 +8646,11 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
int rx, tx, cp;
_bnxt_get_max_rings(bp, &rx, &tx, &cp);
+ *max_rx = rx;
+ *max_tx = tx;
if (!rx || !tx || !cp)
return -ENOMEM;
- *max_rx = rx;
- *max_tx = tx;
return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
}
@@ -8520,8 +8664,11 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
/* Not enough rings, try disabling agg rings. */
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
- if (rc)
+ if (rc) {
+ /* set BNXT_FLAG_AGG_RINGS back for consistency */
+ bp->flags |= BNXT_FLAG_AGG_RINGS;
return rc;
+ }
bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
@@ -8730,7 +8877,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
rc = bnxt_alloc_hwrm_short_cmd_req(bp);
if (rc)
goto init_err_pci_clean;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 9b14eb610b9f..fefa011320e0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,11 +12,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.9.1"
+#define DRV_MODULE_VERSION "1.9.2"
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 9
-#define DRV_VER_UPD 1
+#define DRV_VER_UPD 2
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
@@ -326,6 +326,10 @@ struct rx_tpa_start_cmp_ext {
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)
+#define TPA_START_IS_IPV6(rx_tpa_start) \
+ (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -862,6 +866,7 @@ struct bnxt_pf_info {
u8 vf_resv_strategy;
#define BNXT_VF_RESV_STRATEGY_MAXIMAL 0
#define BNXT_VF_RESV_STRATEGY_MINIMAL 1
+#define BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC 2
void *hwrm_cmd_req_addr[4];
dma_addr_t hwrm_cmd_req_dma_addr[4];
struct bnxt_vf_info *vf;
@@ -959,6 +964,9 @@ struct bnxt_link_info {
u16 advertising; /* user adv setting */
bool force_link_chng;
+ bool phy_retry;
+ unsigned long phy_retry_expires;
+
/* a copy of phy_qcfg output used to report link
* info to VF
*/
@@ -990,6 +998,8 @@ struct bnxt_led_info {
struct bnxt_test_info {
u8 offline_mask;
+ u8 flags;
+#define BNXT_TEST_FL_EXT_LPBK 0x1
u16 timeout;
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
@@ -1134,7 +1144,6 @@ struct bnxt {
atomic_t intr_sem;
u32 flags;
- #define BNXT_FLAG_DCB_ENABLED 0x1
#define BNXT_FLAG_VF 0x2
#define BNXT_FLAG_LRO 0x4
#ifdef CONFIG_INET
@@ -1163,15 +1172,11 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
- #define BNXT_FLAG_FW_LLDP_AGENT 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
- #define BNXT_FLAG_SHORT_CMD 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
- #define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
- #define BNXT_FLAG_NEW_RM 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1276,10 +1281,19 @@ struct bnxt {
struct ieee_ets *ieee_ets;
u8 dcbx_cap;
u8 default_pri;
+ u8 max_dscp_value;
#endif /* CONFIG_BNXT_DCB */
u32 msg_enable;
+ u32 fw_cap;
+ #define BNXT_FW_CAP_SHORT_CMD 0x00000001
+ #define BNXT_FW_CAP_LLDP_AGENT 0x00000002
+ #define BNXT_FW_CAP_DCBX_AGENT 0x00000004
+ #define BNXT_FW_CAP_NEW_RM 0x00000008
+ #define BNXT_FW_CAP_IF_CHANGE 0x00000010
+
+#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u32 hwrm_intr_seq_id;
@@ -1287,9 +1301,6 @@ struct bnxt {
dma_addr_t hwrm_short_cmd_req_dma_addr;
void *hwrm_cmd_resp_addr;
dma_addr_t hwrm_cmd_resp_dma_addr;
- void *hwrm_dbg_resp_addr;
- dma_addr_t hwrm_dbg_resp_dma_addr;
-#define HWRM_DBG_REG_BUF_SIZE 128
struct rx_port_stats *hw_rx_port_stats;
struct tx_port_stats *hw_tx_port_stats;
@@ -1345,6 +1356,7 @@ struct bnxt {
#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15
+#define BNXT_UPDATE_PHY_SP_EVENT 16
struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
@@ -1400,6 +1412,7 @@ struct bnxt {
struct bnxt_tc_info *tc_info;
struct dentry *debugfs_pdev;
struct dentry *debugfs_dim;
+ struct device *hwmon_dev;
};
#define BNXT_RX_STATS_OFFSET(counter) \
@@ -1470,7 +1483,6 @@ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
-void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp);
void bnxt_tx_disable(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
new file mode 100644
index 000000000000..09c22f8fe399
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -0,0 +1,66 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2018 Broadcom Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_COREDUMP_H
+#define BNXT_COREDUMP_H
+
+struct bnxt_coredump_segment_hdr {
+ __u8 signature[4];
+ __le32 component_id;
+ __le32 segment_id;
+ __le32 flags;
+ __u8 low_version;
+ __u8 high_version;
+ __le16 function_id;
+ __le32 offset;
+ __le32 length;
+ __le32 status;
+ __le32 duration;
+ __le32 data_offset;
+ __le32 instance;
+ __le32 rsvd[5];
+};
+
+struct bnxt_coredump_record {
+ __u8 signature[4];
+ __le32 flags;
+ __u8 low_version;
+ __u8 high_version;
+ __u8 asic_state;
+ __u8 rsvd0[5];
+ char system_name[32];
+ __le16 year;
+ __le16 month;
+ __le16 day;
+ __le16 hour;
+ __le16 minute;
+ __le16 second;
+ __le16 utc_bias;
+ __le16 rsvd1;
+ char commandline[256];
+ __le32 total_segments;
+ __le32 os_ver_major;
+ __le32 os_ver_minor;
+ __le32 rsvd2;
+ char os_name[32];
+ __le16 end_year;
+ __le16 end_month;
+ __le16 end_day;
+ __le16 end_hour;
+ __le16 end_minute;
+ __le16 end_second;
+ __le16 end_utc_bias;
+ __le32 asic_id1;
+ __le32 asic_id2;
+ __le32 coredump_status;
+ __u8 ioctl_low_version;
+ __u8 ioctl_high_version;
+ __le16 rsvd3[313];
+};
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index d5bc72cecde3..ddc98c359488 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -385,6 +385,61 @@ set_app_exit:
return rc;
}
+static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
+{
+ struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_dscp_qcaps_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
+ if (bp->max_dscp_value < 0x3f)
+ bp->max_dscp_value = 0;
+ }
+
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
+ bool add)
+{
+ struct hwrm_queue_dscp2pri_cfg_input req = {0};
+ struct bnxt_dscp2pri_entry *dscp2pri;
+ dma_addr_t mapping;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10800)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1);
+ dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri),
+ &mapping, GFP_KERNEL);
+ if (!dscp2pri)
+ return -ENOMEM;
+
+ req.src_data_addr = cpu_to_le64(mapping);
+ dscp2pri->dscp = app->protocol;
+ if (add)
+ dscp2pri->mask = 0x3f;
+ else
+ dscp2pri->mask = 0;
+ dscp2pri->pri = app->priority;
+ req.entry_cnt = cpu_to_le16(1);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
+ mapping);
+ return rc;
+}
+
static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
{
int total_ets_bw = 0;
@@ -551,15 +606,30 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
return rc;
}
+static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt *bp, struct dcb_app *app)
+{
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) {
+ if (!bp->max_dscp_value)
+ return -ENOTSUPP;
+ if (app->protocol > bp->max_dscp_value)
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
struct bnxt *bp = netdev_priv(dev);
- int rc = -EINVAL;
+ int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
+ rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
+ if (rc)
+ return rc;
+
rc = dcb_ieee_setapp(dev, app);
if (rc)
return rc;
@@ -570,6 +640,9 @@ static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
app->protocol == ROCE_V2_UDP_DPORT))
rc = bnxt_hwrm_set_dcbx_app(bp, app, true);
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, true);
+
return rc;
}
@@ -582,6 +655,10 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
+ rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
+ if (rc)
+ return rc;
+
rc = dcb_ieee_delapp(dev, app);
if (rc)
return rc;
@@ -591,6 +668,9 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
app->protocol == ROCE_V2_UDP_DPORT))
rc = bnxt_hwrm_set_dcbx_app(bp, app, false);
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, false);
+
return rc;
}
@@ -610,7 +690,7 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return 1;
if (mode & DCB_CAP_DCBX_HOST) {
- if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
+ if (BNXT_VF(bp) || (bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
return 1;
/* only support IEEE */
@@ -642,10 +722,11 @@ void bnxt_dcb_init(struct bnxt *bp)
if (bp->hwrm_spec_code < 0x10501)
return;
+ bnxt_hwrm_queue_dscp_qcaps(bp);
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
- if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
+ if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
- else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT)
+ else if (bp->fw_cap & BNXT_FW_CAP_DCBX_AGENT)
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
bp->dev->dcbnl_ops = &dcbnl_ops;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 69efde785f23..6eed231de565 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -33,10 +33,20 @@ struct bnxt_cos2bw_cfg {
u8 unused;
};
+struct bnxt_dscp2pri_entry {
+ u8 dscp;
+ u8 mask;
+ u8 pri;
+};
+
#define BNXT_LLQ(q_profile) \
((q_profile) == \
QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE)
+#define BNXT_CNPQ(q_profile) \
+ ((q_profile) == \
+ QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
+
#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300
void bnxt_dcb_init(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 402fa32f7a88..f3b9fbcc705b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,16 +21,99 @@ static const struct devlink_ops bnxt_dl_ops = {
#endif /* CONFIG_BNXT_SRIOV */
};
+static const struct bnxt_dl_nvm_param nvm_params[] = {
+ {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
+ BNXT_NVM_SHARED_CFG, 1},
+};
+
+static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ int msg_len, union devlink_param_value *val)
+{
+ struct hwrm_nvm_get_variable_input *req = msg;
+ void *data_addr = NULL, *buf = NULL;
+ struct bnxt_dl_nvm_param nvm_param;
+ int bytesize, idx = 0, rc, i;
+ dma_addr_t data_dma_addr;
+
+ /* Get/Set NVM CFG parameter is supported only on PFs */
+ if (BNXT_VF(bp))
+ return -EPERM;
+
+ for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
+ if (nvm_params[i].id == param_id) {
+ nvm_param = nvm_params[i];
+ break;
+ }
+ }
+
+ if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
+ idx = bp->pf.port_id;
+ else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
+
+ bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
+ if (nvm_param.num_bits == 1)
+ buf = &val->vbool;
+
+ data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
+ &data_dma_addr, GFP_KERNEL);
+ if (!data_addr)
+ return -ENOMEM;
+
+ req->dest_data_addr = cpu_to_le64(data_dma_addr);
+ req->data_len = cpu_to_le16(nvm_param.num_bits);
+ req->option_num = cpu_to_le16(nvm_param.offset);
+ req->index_0 = cpu_to_le16(idx);
+ if (idx)
+ req->dimensions = cpu_to_le16(1);
+
+ if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE))
+ memcpy(data_addr, buf, bytesize);
+
+ rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
+ memcpy(buf, data_addr, bytesize);
+
+ dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
+ if (rc)
+ return -EIO;
+ return 0;
+}
+
+static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct hwrm_nvm_get_variable_input req = {0};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
+ return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+}
+
+static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct hwrm_nvm_set_variable_input req = {0};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
+ return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+}
+
+static const struct devlink_param bnxt_dl_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ NULL),
+};
+
int bnxt_dl_register(struct bnxt *bp)
{
struct devlink *dl;
int rc;
- if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
- return 0;
-
- if (bp->hwrm_spec_code < 0x10803) {
- netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n");
+ if (bp->hwrm_spec_code < 0x10600) {
+ netdev_warn(bp->dev, "Firmware does not support NVM params");
return -ENOTSUPP;
}
@@ -41,16 +124,34 @@ int bnxt_dl_register(struct bnxt *bp)
}
bnxt_link_bp_to_dl(bp, dl);
- bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+
+ /* Add switchdev eswitch mode setting, if SRIOV supported */
+ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
+ bp->hwrm_spec_code > 0x10803)
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+
rc = devlink_register(dl, &bp->pdev->dev);
if (rc) {
- bnxt_link_bp_to_dl(bp, NULL);
- devlink_free(dl);
netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
- return rc;
+ goto err_dl_free;
+ }
+
+ rc = devlink_params_register(dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ if (rc) {
+ netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
+ rc);
+ goto err_dl_unreg;
}
return 0;
+
+err_dl_unreg:
+ devlink_unregister(dl);
+err_dl_free:
+ bnxt_link_bp_to_dl(bp, NULL);
+ devlink_free(dl);
+ return rc;
}
void bnxt_dl_unregister(struct bnxt *bp)
@@ -60,6 +161,8 @@ void bnxt_dl_unregister(struct bnxt *bp)
if (!dl)
return;
+ devlink_params_unregister(dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index e92a35d8b642..2f68dc048390 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -33,6 +33,21 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
}
}
+#define NVM_OFF_ENABLE_SRIOV 401
+
+enum bnxt_nvm_dir_type {
+ BNXT_NVM_SHARED_CFG = 40,
+ BNXT_NVM_PORT_CFG,
+ BNXT_NVM_FUNC_CFG,
+};
+
+struct bnxt_dl_nvm_param {
+ u16 id;
+ u16 offset;
+ u16 dir_type;
+ u16 num_bits;
+};
+
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7270c8b0cef3..e52d7af3ab3e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -16,12 +16,15 @@
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
+#include <linux/utsname.h>
+#include <linux/time.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_xdp.h"
#include "bnxt_ethtool.h"
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
+#include "bnxt_coredump.h"
#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
@@ -112,6 +115,11 @@ static int bnxt_set_coalesce(struct net_device *dev,
BNXT_MAX_STATS_COAL_TICKS);
stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
bp->stats_coal_ticks = stats_ticks;
+ if (bp->stats_coal_ticks)
+ bp->current_interval =
+ bp->stats_coal_ticks * HZ / 1000000;
+ else
+ bp->current_interval = BNXT_TIMER_INTERVAL;
update_stats = true;
}
@@ -162,7 +170,7 @@ static const struct {
BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
- BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames),
+ BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
@@ -205,9 +213,9 @@ static const struct {
BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
- BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames),
+ BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
- BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames),
+ BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
@@ -463,7 +471,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_tx_sch_inputs;
/* Get the most up-to-date max_tx_sch_inputs. */
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
bnxt_hwrm_func_resc_qcaps(bp, false);
max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
@@ -2392,7 +2400,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
}
-static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
+static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
{
struct hwrm_port_phy_cfg_input req = {0};
@@ -2400,7 +2408,10 @@ static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
if (enable) {
bnxt_disable_an_for_lpbk(bp, &req);
- req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
+ if (ext)
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
+ else
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
} else {
req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
}
@@ -2533,15 +2544,17 @@ static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
return rc;
}
-#define BNXT_DRV_TESTS 3
+#define BNXT_DRV_TESTS 4
#define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
#define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
-#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
+#define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
+#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3)
static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
u64 *buf)
{
struct bnxt *bp = netdev_priv(dev);
+ bool do_ext_lpbk = false;
bool offline = false;
u8 test_results = 0;
u8 test_mask = 0;
@@ -2555,6 +2568,10 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
return;
}
+ if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
+ (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK))
+ do_ext_lpbk = true;
+
if (etest->flags & ETH_TEST_FL_OFFLINE) {
if (bp->pf.active_vfs) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -2595,13 +2612,22 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
buf[BNXT_MACLPBK_TEST_IDX] = 0;
bnxt_hwrm_mac_loopback(bp, false);
- bnxt_hwrm_phy_loopback(bp, true);
+ bnxt_hwrm_phy_loopback(bp, true, false);
msleep(1000);
if (bnxt_run_loopback(bp)) {
buf[BNXT_PHYLPBK_TEST_IDX] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
- bnxt_hwrm_phy_loopback(bp, false);
+ if (do_ext_lpbk) {
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ bnxt_hwrm_phy_loopback(bp, true, true);
+ msleep(1000);
+ if (bnxt_run_loopback(bp)) {
+ buf[BNXT_EXTLPBK_TEST_IDX] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+ bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
bnxt_open_nic(bp, false, true);
}
@@ -2662,6 +2688,331 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return rc;
}
+static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
+ struct bnxt_hwrm_dbg_dma_info *info)
+{
+ struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_dbg_cmn_input *cmn_req = msg;
+ __le16 *seq_ptr = msg + info->seq_off;
+ u16 seq = 0, len, segs_off;
+ void *resp = cmn_resp;
+ dma_addr_t dma_handle;
+ int rc, off = 0;
+ void *dma_buf;
+
+ dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle,
+ GFP_KERNEL);
+ if (!dma_buf)
+ return -ENOMEM;
+
+ segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ total_segments);
+ cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
+ cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ while (1) {
+ *seq_ptr = cpu_to_le16(seq);
+ rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
+ if (!seq &&
+ cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ info->segs = le16_to_cpu(*((__le16 *)(resp +
+ segs_off)));
+ if (!info->segs) {
+ rc = -EIO;
+ break;
+ }
+
+ info->dest_buf_size = info->segs *
+ sizeof(struct coredump_segment_record);
+ info->dest_buf = kmalloc(info->dest_buf_size,
+ GFP_KERNEL);
+ if (!info->dest_buf) {
+ rc = -ENOMEM;
+ break;
+ }
+ }
+
+ if (info->dest_buf)
+ memcpy(info->dest_buf + off, dma_buf, len);
+
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
+ if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
+ break;
+
+ seq++;
+ off += len;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle);
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
+ struct bnxt_coredump *coredump)
+{
+ struct hwrm_dbg_coredump_list_input req = {0};
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
+
+ info.dma_len = COREDUMP_LIST_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ data_len);
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
+ if (!rc) {
+ coredump->data = info.dest_buf;
+ coredump->data_size = info.dest_buf_size;
+ coredump->total_segs = info.segs;
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
+ u16 segment_id)
+{
+ struct hwrm_dbg_coredump_initiate_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
+ req.component_id = cpu_to_le16(component_id);
+ req.segment_id = cpu_to_le16(segment_id);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
+ u16 segment_id, u32 *seg_len,
+ void *buf, u32 offset)
+{
+ struct hwrm_dbg_coredump_retrieve_input req = {0};
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1);
+ req.component_id = cpu_to_le16(component_id);
+ req.segment_id = cpu_to_le16(segment_id);
+
+ info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
+ seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
+ data_len);
+ if (buf)
+ info.dest_buf = buf + offset;
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
+ if (!rc)
+ *seg_len = info.dest_buf_size;
+
+ return rc;
+}
+
+static void
+bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec, u32 seg_len,
+ int status, u32 duration, u32 instance)
+{
+ memset(seg_hdr, 0, sizeof(*seg_hdr));
+ memcpy(seg_hdr->signature, "sEgM", 4);
+ if (seg_rec) {
+ seg_hdr->component_id = (__force __le32)seg_rec->component_id;
+ seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
+ seg_hdr->low_version = seg_rec->version_low;
+ seg_hdr->high_version = seg_rec->version_hi;
+ } else {
+ /* For hwrm_ver_get response Component id = 2
+ * and Segment id = 0
+ */
+ seg_hdr->component_id = cpu_to_le32(2);
+ seg_hdr->segment_id = 0;
+ }
+ seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
+ seg_hdr->length = cpu_to_le32(seg_len);
+ seg_hdr->status = cpu_to_le32(status);
+ seg_hdr->duration = cpu_to_le32(duration);
+ seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
+ seg_hdr->instance = cpu_to_le32(instance);
+}
+
+static void
+bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
+ time64_t start, s16 start_utc, u16 total_segs,
+ int status)
+{
+ time64_t end = ktime_get_real_seconds();
+ u32 os_ver_major = 0, os_ver_minor = 0;
+ struct tm tm;
+
+ time64_to_tm(start, 0, &tm);
+ memset(record, 0, sizeof(*record));
+ memcpy(record->signature, "cOrE", 4);
+ record->flags = 0;
+ record->low_version = 0;
+ record->high_version = 1;
+ record->asic_state = 0;
+ strlcpy(record->system_name, utsname()->nodename,
+ sizeof(record->system_name));
+ record->year = cpu_to_le16(tm.tm_year);
+ record->month = cpu_to_le16(tm.tm_mon);
+ record->day = cpu_to_le16(tm.tm_mday);
+ record->hour = cpu_to_le16(tm.tm_hour);
+ record->minute = cpu_to_le16(tm.tm_min);
+ record->second = cpu_to_le16(tm.tm_sec);
+ record->utc_bias = cpu_to_le16(start_utc);
+ strcpy(record->commandline, "ethtool -w");
+ record->total_segments = cpu_to_le32(total_segs);
+
+ sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
+ record->os_ver_major = cpu_to_le32(os_ver_major);
+ record->os_ver_minor = cpu_to_le32(os_ver_minor);
+
+ strlcpy(record->os_name, utsname()->sysname, 32);
+ time64_to_tm(end, 0, &tm);
+ record->end_year = cpu_to_le16(tm.tm_year + 1900);
+ record->end_month = cpu_to_le16(tm.tm_mon + 1);
+ record->end_day = cpu_to_le16(tm.tm_mday);
+ record->end_hour = cpu_to_le16(tm.tm_hour);
+ record->end_minute = cpu_to_le16(tm.tm_min);
+ record->end_second = cpu_to_le16(tm.tm_sec);
+ record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
+ record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
+ bp->ver_resp.chip_rev << 8 |
+ bp->ver_resp.chip_metal);
+ record->asic_id2 = 0;
+ record->coredump_status = cpu_to_le32(status);
+ record->ioctl_low_version = 0;
+ record->ioctl_high_version = 0;
+}
+
+static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
+{
+ u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
+ struct coredump_segment_record *seg_record = NULL;
+ u32 offset = 0, seg_hdr_len, seg_record_len;
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_coredump coredump = {NULL};
+ time64_t start_time;
+ u16 start_utc;
+ int rc = 0, i;
+
+ start_time = ktime_get_real_seconds();
+ start_utc = sys_tz.tz_minuteswest * 60;
+ seg_hdr_len = sizeof(seg_hdr);
+
+ /* First segment should be hwrm_ver_get response */
+ *dump_len = seg_hdr_len + ver_get_resp_len;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
+ 0, 0, 0);
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len;
+ memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
+ offset += ver_get_resp_len;
+ }
+
+ rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
+ if (rc) {
+ netdev_err(bp->dev, "Failed to get coredump segment list\n");
+ goto err;
+ }
+
+ *dump_len += seg_hdr_len * coredump.total_segs;
+
+ seg_record = (struct coredump_segment_record *)coredump.data;
+ seg_record_len = sizeof(*seg_record);
+
+ for (i = 0; i < coredump.total_segs; i++) {
+ u16 comp_id = le16_to_cpu(seg_record->component_id);
+ u16 seg_id = le16_to_cpu(seg_record->segment_id);
+ u32 duration = 0, seg_len = 0;
+ unsigned long start, end;
+
+ start = jiffies;
+
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ if (rc) {
+ netdev_err(bp->dev,
+ "Failed to initiate coredump for seg = %d\n",
+ seg_record->segment_id);
+ goto next_seg;
+ }
+
+ /* Write segment data into the buffer */
+ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
+ &seg_len, buf,
+ offset + seg_hdr_len);
+ if (rc)
+ netdev_err(bp->dev,
+ "Failed to retrieve coredump for seg = %d\n",
+ seg_record->segment_id);
+
+next_seg:
+ end = jiffies;
+ duration = jiffies_to_msecs(end - start);
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
+ rc, duration, 0);
+
+ if (buf) {
+ /* Write segment header into the buffer */
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len + seg_len;
+ }
+
+ *dump_len += seg_len;
+ seg_record =
+ (struct coredump_segment_record *)((u8 *)seg_record +
+ seg_record_len);
+ }
+
+err:
+ if (buf)
+ bnxt_fill_coredump_record(bp, buf + offset, start_time,
+ start_utc, coredump.total_segs + 1,
+ rc);
+ kfree(coredump.data);
+ *dump_len += sizeof(struct bnxt_coredump_record);
+
+ return rc;
+}
+
+static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (bp->hwrm_spec_code < 0x10801)
+ return -EOPNOTSUPP;
+
+ dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
+ bp->ver_resp.hwrm_fw_min_8b << 16 |
+ bp->ver_resp.hwrm_fw_bld_8b << 8 |
+ bp->ver_resp.hwrm_fw_rsvd_8b;
+
+ return bnxt_get_coredump(bp, NULL, &dump->len);
+}
+
+static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
+ void *buf)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (bp->hwrm_spec_code < 0x10801)
+ return -EOPNOTSUPP;
+
+ memset(buf, 0, dump->len);
+
+ return bnxt_get_coredump(bp, buf, &dump->len);
+}
+
void bnxt_ethtool_init(struct bnxt *bp)
{
struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
@@ -2702,6 +3053,8 @@ void bnxt_ethtool_init(struct bnxt *bp)
strcpy(str, "Mac loopback test (offline)");
} else if (i == BNXT_PHYLPBK_TEST_IDX) {
strcpy(str, "Phy loopback test (offline)");
+ } else if (i == BNXT_EXTLPBK_TEST_IDX) {
+ strcpy(str, "Ext loopback test (offline)");
} else if (i == BNXT_IRQ_TEST_IDX) {
strcpy(str, "Interrupt_test (offline)");
} else {
@@ -2763,4 +3116,6 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
.reset = bnxt_reset,
+ .get_dump_flag = bnxt_get_dump_flag,
+ .get_dump_data = bnxt_get_dump_data,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 836ef682f24c..b5b65b3f8534 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -22,6 +22,43 @@ struct bnxt_led_cfg {
u8 rsvd;
};
+#define COREDUMP_LIST_BUF_LEN 2048
+#define COREDUMP_RETRIEVE_BUF_LEN 4096
+
+struct bnxt_coredump {
+ void *data;
+ int data_size;
+ u16 total_segs;
+};
+
+struct bnxt_hwrm_dbg_dma_info {
+ void *dest_buf;
+ int dest_buf_size;
+ u16 dma_len;
+ u16 seq_off;
+ u16 data_len_off;
+ u16 segs;
+};
+
+struct hwrm_dbg_cmn_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+};
+
+struct hwrm_dbg_cmn_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define HWRM_DBG_CMN_FLAGS_MORE 1
+};
+
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 0fe0ea8dce6c..971ace5d0d4a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -96,6 +96,7 @@ struct hwrm_short_input {
struct cmd_nums {
__le16 req_type;
#define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
#define HWRM_FUNC_BUF_UNRGTR 0xeUL
#define HWRM_FUNC_VF_CFG 0xfUL
#define HWRM_RESERVED1 0x10UL
@@ -159,6 +160,7 @@ struct cmd_nums {
#define HWRM_RING_FREE 0x51UL
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_AGGINT_QCAPS 0x54UL
#define HWRM_RING_RESET 0x5eUL
#define HWRM_RING_GRP_ALLOC 0x60UL
#define HWRM_RING_GRP_FREE 0x61UL
@@ -191,6 +193,8 @@ struct cmd_nums {
#define HWRM_PORT_QSTATS_EXT 0xb4UL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_HEALTH_CHECK 0xc2UL
+ #define HWRM_FW_SYNC 0xc3UL
#define HWRM_FW_SET_TIME 0xc8UL
#define HWRM_FW_GET_TIME 0xc9UL
#define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
@@ -269,6 +273,11 @@ struct cmd_nums {
#define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
#define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
#define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
+ #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
+ #define HWRM_FUNC_VF_BW_CFG 0x195UL
+ #define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -284,6 +293,8 @@ struct cmd_nums {
#define HWRM_DBG_COREDUMP_LIST 0xff17UL
#define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
#define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_DBG_FW_CLI 0xff1aUL
+ #define HWRM_DBG_I2C_CMD 0xff1bUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -318,6 +329,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
#define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
#define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -344,9 +356,9 @@ struct hwrm_err_output {
#define HWRM_RESP_VALID_KEY 1
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 9
-#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 15
-#define HWRM_VERSION_STR "1.9.1.15"
+#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_RSVD 25
+#define HWRM_VERSION_STR "1.9.2.25"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -526,6 +538,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
__le32 event_data2;
@@ -564,6 +577,8 @@ struct hwrm_async_event_cmpl_link_status_change {
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
};
/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
@@ -817,23 +832,26 @@ struct hwrm_func_qcaps_output {
__le16 fid;
__le16 port_id;
__le32 flags;
- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -947,58 +965,26 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
#define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
u8 options;
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xfcUL
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
__le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
__le16 alloc_sp_tx_rings;
__le16 alloc_stat_ctx;
- u8 unused_2[7];
- u8 valid;
-};
-
-/* hwrm_func_vlan_cfg_input (size:384b/48B) */
-struct hwrm_func_vlan_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[2];
- __le32 enables;
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
- __le16 stag_vid;
- u8 stag_pcp;
- u8 unused_1;
- __be16 stag_tpid;
- __le16 ctag_vid;
- u8 ctag_pcp;
- u8 unused_2;
- __be16 ctag_tpid;
- __le32 rsvd1;
- __le32 rsvd2;
- u8 unused_3[4];
-};
-
-/* hwrm_func_vlan_cfg_output (size:128b/16B) */
-struct hwrm_func_vlan_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
+ __le16 alloc_msix;
+ u8 unused_2[5];
u8 valid;
};
@@ -1010,7 +996,7 @@ struct hwrm_func_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le16 fid;
- u8 unused_0[2];
+ __le16 num_msix;
__le32 flags;
#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
@@ -1050,6 +1036,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
#define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
__le16 mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -1109,13 +1097,19 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
#define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
u8 options;
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xfcUL
- #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
__le16 num_mcast_filters;
};
@@ -1212,30 +1206,6 @@ struct hwrm_func_vf_resc_free_output {
u8 valid;
};
-/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
-struct hwrm_func_vf_vnic_ids_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- u8 unused_0[2];
- __le32 max_vnic_id_cnt;
- __le64 vnic_id_tbl_addr;
-};
-
-/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
-struct hwrm_func_vf_vnic_ids_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id_cnt;
- u8 unused_0[3];
- u8 valid;
-};
-
/* hwrm_func_drv_rgtr_input (size:896b/112B) */
struct hwrm_func_drv_rgtr_input {
__le16 req_type;
@@ -1286,7 +1256,9 @@ struct hwrm_func_drv_rgtr_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- u8 unused_0[7];
+ __le32 flags;
+ #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
+ u8 unused_0[3];
u8 valid;
};
@@ -1372,7 +1344,7 @@ struct hwrm_func_drv_qver_input {
u8 unused_0[2];
};
-/* hwrm_func_drv_qver_output (size:192b/24B) */
+/* hwrm_func_drv_qver_output (size:256b/32B) */
struct hwrm_func_drv_qver_output {
__le16 error_code;
__le16 req_type;
@@ -1394,12 +1366,13 @@ struct hwrm_func_drv_qver_output {
u8 ver_maj_8b;
u8 ver_min_8b;
u8 ver_upd_8b;
- u8 unused_0[2];
- u8 valid;
+ u8 unused_0[3];
__le16 ver_maj;
__le16 ver_min;
__le16 ver_upd;
__le16 ver_patch;
+ u8 unused_1[7];
+ u8 valid;
};
/* hwrm_func_resource_qcaps_input (size:192b/24B) */
@@ -1493,6 +1466,410 @@ struct hwrm_func_vf_resource_cfg_output {
u8 valid;
};
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */
+struct hwrm_func_backing_store_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 qp_max_entries;
+ __le16 qp_min_qp1_entries;
+ __le16 qp_max_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_max_l2_entries;
+ __le32 srq_max_entries;
+ __le16 srq_entry_size;
+ __le16 cq_max_l2_entries;
+ __le32 cq_max_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_max_vnic_entries;
+ __le16 vnic_max_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le32 stat_max_entries;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le32 tqm_min_entries_per_ring;
+ __le32 tqm_max_entries_per_ring;
+ __le32 mrav_max_entries;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ __le32 tim_max_entries;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */
+struct hwrm_func_backing_store_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ u8 qpc_pg_size_qpc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
+ u8 srq_pg_size_srq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
+ u8 cq_pg_size_cq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
+ u8 vnic_pg_size_vnic_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
+ u8 stat_pg_size_stat_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
+ u8 tqm_sp_pg_size_tqm_sp_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
+ u8 tqm_ring0_pg_size_tqm_ring0_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
+ u8 tqm_ring1_pg_size_tqm_ring1_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
+ u8 tqm_ring2_pg_size_tqm_ring2_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
+ u8 tqm_ring3_pg_size_tqm_ring3_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
+ u8 tqm_ring4_pg_size_tqm_ring4_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
+ u8 tqm_ring5_pg_size_tqm_ring5_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
+ u8 tqm_ring6_pg_size_tqm_ring6_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
+ u8 tqm_ring7_pg_size_tqm_ring7_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
+ u8 mrav_pg_size_mrav_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
+ u8 tim_pg_size_tim_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
+ __le64 qpc_page_dir;
+ __le64 srq_page_dir;
+ __le64 cq_page_dir;
+ __le64 vnic_page_dir;
+ __le64 stat_page_dir;
+ __le64 tqm_sp_page_dir;
+ __le64 tqm_ring0_page_dir;
+ __le64 tqm_ring1_page_dir;
+ __le64 tqm_ring2_page_dir;
+ __le64 tqm_ring3_page_dir;
+ __le64 tqm_ring4_page_dir;
+ __le64 tqm_ring5_page_dir;
+ __le64 tqm_ring6_page_dir;
+ __le64 tqm_ring7_page_dir;
+ __le64 mrav_page_dir;
+ __le64 tim_page_dir;
+ __le32 qp_num_entries;
+ __le32 srq_num_entries;
+ __le32 cq_num_entries;
+ __le32 stat_num_entries;
+ __le32 tqm_sp_num_entries;
+ __le32 tqm_ring0_num_entries;
+ __le32 tqm_ring1_num_entries;
+ __le32 tqm_ring2_num_entries;
+ __le32 tqm_ring3_num_entries;
+ __le32 tqm_ring4_num_entries;
+ __le32 tqm_ring5_num_entries;
+ __le32 tqm_ring6_num_entries;
+ __le32 tqm_ring7_num_entries;
+ __le32 mrav_num_entries;
+ __le32 tim_num_entries;
+ __le16 qp_num_qp1_entries;
+ __le16 qp_num_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_num_l2_entries;
+ __le16 srq_entry_size;
+ __le16 cq_num_l2_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_num_vnic_entries;
+ __le16 vnic_num_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+};
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_if_change_input (size:192b/24B) */
+struct hwrm_func_drv_if_change_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
+ __le32 unused;
+};
+
+/* hwrm_func_drv_if_change_output (size:128b/16B) */
+struct hwrm_func_drv_if_change_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
/* hwrm_port_phy_cfg_input (size:448b/56B) */
struct hwrm_port_phy_cfg_input {
__le16 req_type;
@@ -1592,10 +1969,11 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
#define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_REMOTE
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
u8 force_pause;
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
@@ -1751,10 +2129,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
#define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_REMOTE
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
u8 force_pause;
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
@@ -2014,6 +2393,131 @@ struct hwrm_port_mac_ptp_qcfg_output {
u8 valid;
};
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518b_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047b_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518b_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
/* hwrm_port_qstats_input (size:320b/40B) */
struct hwrm_port_qstats_input {
__le16 req_type;
@@ -2039,6 +2543,83 @@ struct hwrm_port_qstats_output {
u8 valid;
};
+/* tx_port_stats_ext (size:2048b/256B) */
+struct tx_port_stats_ext {
+ __le64 tx_bytes_cos0;
+ __le64 tx_bytes_cos1;
+ __le64 tx_bytes_cos2;
+ __le64 tx_bytes_cos3;
+ __le64 tx_bytes_cos4;
+ __le64 tx_bytes_cos5;
+ __le64 tx_bytes_cos6;
+ __le64 tx_bytes_cos7;
+ __le64 tx_packets_cos0;
+ __le64 tx_packets_cos1;
+ __le64 tx_packets_cos2;
+ __le64 tx_packets_cos3;
+ __le64 tx_packets_cos4;
+ __le64 tx_packets_cos5;
+ __le64 tx_packets_cos6;
+ __le64 tx_packets_cos7;
+ __le64 pfc_pri0_tx_duration_us;
+ __le64 pfc_pri0_tx_transitions;
+ __le64 pfc_pri1_tx_duration_us;
+ __le64 pfc_pri1_tx_transitions;
+ __le64 pfc_pri2_tx_duration_us;
+ __le64 pfc_pri2_tx_transitions;
+ __le64 pfc_pri3_tx_duration_us;
+ __le64 pfc_pri3_tx_transitions;
+ __le64 pfc_pri4_tx_duration_us;
+ __le64 pfc_pri4_tx_transitions;
+ __le64 pfc_pri5_tx_duration_us;
+ __le64 pfc_pri5_tx_transitions;
+ __le64 pfc_pri6_tx_duration_us;
+ __le64 pfc_pri6_tx_transitions;
+ __le64 pfc_pri7_tx_duration_us;
+ __le64 pfc_pri7_tx_transitions;
+};
+
+/* rx_port_stats_ext (size:2368b/296B) */
+struct rx_port_stats_ext {
+ __le64 link_down_events;
+ __le64 continuous_pause_events;
+ __le64 resume_pause_events;
+ __le64 continuous_roce_pause_events;
+ __le64 resume_roce_pause_events;
+ __le64 rx_bytes_cos0;
+ __le64 rx_bytes_cos1;
+ __le64 rx_bytes_cos2;
+ __le64 rx_bytes_cos3;
+ __le64 rx_bytes_cos4;
+ __le64 rx_bytes_cos5;
+ __le64 rx_bytes_cos6;
+ __le64 rx_bytes_cos7;
+ __le64 rx_packets_cos0;
+ __le64 rx_packets_cos1;
+ __le64 rx_packets_cos2;
+ __le64 rx_packets_cos3;
+ __le64 rx_packets_cos4;
+ __le64 rx_packets_cos5;
+ __le64 rx_packets_cos6;
+ __le64 rx_packets_cos7;
+ __le64 pfc_pri0_rx_duration_us;
+ __le64 pfc_pri0_rx_transitions;
+ __le64 pfc_pri1_rx_duration_us;
+ __le64 pfc_pri1_rx_transitions;
+ __le64 pfc_pri2_rx_duration_us;
+ __le64 pfc_pri2_rx_transitions;
+ __le64 pfc_pri3_rx_duration_us;
+ __le64 pfc_pri3_rx_transitions;
+ __le64 pfc_pri4_rx_duration_us;
+ __le64 pfc_pri4_rx_transitions;
+ __le64 pfc_pri5_rx_duration_us;
+ __le64 pfc_pri5_rx_transitions;
+ __le64 pfc_pri6_rx_duration_us;
+ __le64 pfc_pri6_rx_transitions;
+ __le64 pfc_pri7_rx_duration_us;
+ __le64 pfc_pri7_rx_transitions;
+};
+
/* hwrm_port_qstats_ext_input (size:320b/40B) */
struct hwrm_port_qstats_ext_input {
__le16 req_type;
@@ -2062,7 +2643,8 @@ struct hwrm_port_qstats_ext_output {
__le16 resp_len;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[3];
+ __le16 total_active_cos_queues;
+ u8 unused_0;
u8 valid;
};
@@ -2153,9 +2735,10 @@ struct hwrm_port_phy_qcaps_output {
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -2612,6 +3195,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id0;
u8 queue_id0_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2620,6 +3204,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id1;
u8 queue_id1_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2628,6 +3213,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id2;
u8 queue_id2_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2636,6 +3222,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id3;
u8 queue_id3_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2644,6 +3231,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id4;
u8 queue_id4_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2652,6 +3240,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id5;
u8 queue_id5_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2660,6 +3249,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id6;
u8 queue_id6_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2668,6 +3258,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id7;
u8 queue_id7_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -3689,18 +4280,21 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
__le32 enables;
- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
__le16 cos_rule;
__le16 lb_rule;
__le16 mru;
- u8 unused_0[4];
+ __le16 default_rx_ring_id;
+ __le16 default_cmpl_ring_id;
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -3740,6 +4334,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
u8 unused_1[7];
u8 valid;
};
@@ -3857,7 +4452,14 @@ struct hwrm_vnic_rss_cfg_input {
#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
- u8 unused_0[4];
+ __le16 vnic_id;
+ u8 ring_table_pair_index;
+ u8 hash_mode_flags;
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
__le64 ring_grp_tbl_addr;
__le64 hash_key_tbl_addr;
__le16 rss_ctx_idx;
@@ -3950,7 +4552,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output {
u8 valid;
};
-/* hwrm_ring_alloc_input (size:640b/80B) */
+/* hwrm_ring_alloc_input (size:704b/88B) */
struct hwrm_ring_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3961,12 +4563,17 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
#define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
#define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL
+ #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
u8 unused_0[3];
__le64 page_tbl_addr;
__le32 fbo;
@@ -3977,8 +4584,9 @@ struct hwrm_ring_alloc_input {
__le16 logical_id;
__le16 cmpl_ring_id;
__le16 queue_id;
- u8 unused_2[2];
- __le32 reserved1;
+ __le16 rx_buf_size;
+ __le16 rx_ring_id;
+ __le16 nq_ring_id;
__le16 ring_arb_cfg;
#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
@@ -4016,6 +4624,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
#define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
u8 unused_4[3];
+ __le64 cq_handle;
};
/* hwrm_ring_alloc_output (size:128b/16B) */
@@ -4042,7 +4651,9 @@ struct hwrm_ring_free_input {
#define RING_FREE_REQ_RING_TYPE_TX 0x1UL
#define RING_FREE_REQ_RING_TYPE_RX 0x2UL
#define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_ROCE_CMPL
+ #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
u8 unused_0;
__le16 ring_id;
u8 unused_1[4];
@@ -4058,6 +4669,52 @@ struct hwrm_ring_free_output {
u8 valid;
};
+/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
+struct hwrm_ring_aggint_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
+struct hwrm_ring_aggint_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 cmpl_params;
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
+ __le32 nq_params;
+ #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ __le16 num_cmpl_dma_aggr_min;
+ __le16 num_cmpl_dma_aggr_max;
+ __le16 num_cmpl_dma_aggr_during_int_min;
+ __le16 num_cmpl_dma_aggr_during_int_max;
+ __le16 cmpl_aggr_dma_tmr_min;
+ __le16 cmpl_aggr_dma_tmr_max;
+ __le16 cmpl_aggr_dma_tmr_during_int_min;
+ __le16 cmpl_aggr_dma_tmr_during_int_max;
+ __le16 int_lat_tmr_min_min;
+ __le16 int_lat_tmr_min_max;
+ __le16 int_lat_tmr_max_min;
+ __le16 int_lat_tmr_max_max;
+ __le16 num_cmpl_aggr_int_min;
+ __le16 num_cmpl_aggr_int_max;
+ __le16 timer_units;
+ u8 unused_0[1];
+ u8 valid;
+};
+
/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
struct hwrm_ring_cmpl_ring_qaggint_params_input {
__le16 req_type;
@@ -4100,6 +4757,7 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
__le16 flags;
#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
__le16 num_cmpl_dma_aggr;
__le16 num_cmpl_dma_aggr_during_int;
__le16 cmpl_aggr_dma_tmr;
@@ -4107,7 +4765,14 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
__le16 int_lat_tmr_min;
__le16 int_lat_tmr_max;
__le16 num_cmpl_aggr_int;
- u8 unused_0[6];
+ __le16 enables;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
+ u8 unused_0[4];
};
/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
@@ -4120,34 +4785,6 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
u8 valid;
};
-/* hwrm_ring_reset_input (size:192b/24B) */
-struct hwrm_ring_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
- u8 unused_0;
- __le16 ring_id;
- u8 unused_1[4];
-};
-
-/* hwrm_ring_reset_output (size:128b/16B) */
-struct hwrm_ring_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
/* hwrm_ring_grp_alloc_input (size:192b/24B) */
struct hwrm_ring_grp_alloc_input {
__le16 req_type;
@@ -5032,7 +5669,8 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1
u8 unused_0[7];
};
@@ -5059,7 +5697,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1
u8 unused_0;
__be16 tunnel_dst_port_val;
u8 unused_1[4];
@@ -5087,7 +5726,8 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1
u8 unused_0;
__le16 tunnel_dst_port_id;
u8 unused_1[4];
@@ -5259,140 +5899,6 @@ struct hwrm_pcie_qstats_output {
u8 valid;
};
-/* tx_port_stats (size:3264b/408B) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* rx_port_stats (size:4224b/528B) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
-};
-
-/* rx_port_stats_ext (size:320b/40B) */
-struct rx_port_stats_ext {
- __le64 link_down_events;
- __le64 continuous_pause_events;
- __le64 resume_pause_events;
- __le64 continuous_roce_pause_events;
- __le64 resume_roce_pause_events;
-};
-
/* pcie_ctx_hw_stats (size:768b/96B) */
struct pcie_ctx_hw_stats {
__le64 pcie_pl_signal_integrity;
@@ -5884,6 +6390,114 @@ struct hwrm_wol_reason_qcfg_output {
u8 valid;
};
+/* coredump_segment_record (size:128b/16B) */
+struct coredump_segment_record {
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 max_instances;
+ u8 version_hi;
+ u8 version_low;
+ u8 seg_flags;
+ u8 unused_0[7];
+};
+
+/* hwrm_dbg_coredump_list_input (size:256b/32B) */
+struct hwrm_dbg_coredump_list_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le16 seq_no;
+ u8 unused_0[2];
+};
+
+/* hwrm_dbg_coredump_list_output (size:128b/16B) */
+struct hwrm_dbg_coredump_list_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 total_segments;
+ __le16 data_len;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
+struct hwrm_dbg_coredump_initiate_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_0;
+ u8 seg_flags;
+ u8 unused_1[7];
+};
+
+/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
+struct hwrm_dbg_coredump_initiate_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* coredump_data_hdr (size:128b/16B) */
+struct coredump_data_hdr {
+ __le32 address;
+ __le32 flags_length;
+ __le32 instance;
+ __le32 next_offset;
+};
+
+/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
+struct hwrm_dbg_coredump_retrieve_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le32 unused_0;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_1;
+ u8 seg_flags;
+ u8 unused_2;
+ __le16 unused_3;
+ __le32 unused_4;
+ __le32 seq_no;
+ __le32 unused_5;
+};
+
+/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
+struct hwrm_dbg_coredump_retrieve_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 data_len;
+ u8 unused_1[3];
+ u8 valid;
+};
+
/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
__le16 req_type;
@@ -6269,12 +6883,14 @@ struct hwrm_nvm_set_variable_input {
__le16 index_2;
__le16 index_3;
u8 flags;
- #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
u8 unused_0;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index a64910892c25..6d583bcd2a81 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -447,7 +447,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
struct bnxt_pf_info *pf = &bp->pf;
- int i, rc = 0;
+ int i, rc = 0, min = 1;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
@@ -464,14 +464,19 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
- if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
- req.min_cmpl_rings = cpu_to_le16(1);
- req.min_tx_rings = cpu_to_le16(1);
- req.min_rx_rings = cpu_to_le16(1);
- req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX);
- req.min_vnics = cpu_to_le16(1);
- req.min_stat_ctx = cpu_to_le16(1);
- req.min_hw_ring_grps = cpu_to_le16(1);
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
+ min = 0;
+ req.min_rsscos_ctx = cpu_to_le16(min);
+ }
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
+ pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
+ req.min_cmpl_rings = cpu_to_le16(min);
+ req.min_tx_rings = cpu_to_le16(min);
+ req.min_rx_rings = cpu_to_le16(min);
+ req.min_l2_ctxs = cpu_to_le16(min);
+ req.min_vnics = cpu_to_le16(min);
+ req.min_stat_ctx = cpu_to_le16(min);
+ req.min_hw_ring_grps = cpu_to_le16(min);
} else {
vf_cp_rings /= num_vfs;
vf_tx_rings /= num_vfs;
@@ -618,7 +623,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
{
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
else
return bnxt_hwrm_func_cfg(bp, num_vfs);
@@ -956,9 +961,13 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
mac_ok = true;
- } else if (bp->hwrm_spec_code < 0x10202) {
- mac_ok = true;
} else {
+ /* There are two cases:
+ * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
+ * to the PF and so it doesn't have to match
+ * 2.Allow VF to modify it's own MAC when PF has not assigned a
+ * valid MAC address and firmware spec >= 0x10202
+ */
mac_ok = true;
}
if (mac_ok)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 795f45024c20..139d96c5a023 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -27,6 +27,15 @@
#define BNXT_FID_INVALID 0xffff
#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
+#define is_vlan_pcp_wildcarded(vlan_tci_mask) \
+ ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
+#define is_vlan_pcp_exactmatch(vlan_tci_mask) \
+ ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
+#define is_vlan_pcp_zero(vlan_tci) \
+ ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
+#define is_vid_exactmatch(vlan_tci_mask) \
+ ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
+
/* Return the dst fid of the func for flow forwarding
* For PFs: src_fid is the fid of the PF
* For VF-reps: src_fid the fid of the VF
@@ -389,6 +398,21 @@ static bool is_exactmatch(void *mask, int len)
return true;
}
+static bool is_vlan_tci_allowed(__be16 vlan_tci_mask,
+ __be16 vlan_tci)
+{
+ /* VLAN priority must be either exactly zero or fully wildcarded and
+ * VLAN id must be exact match.
+ */
+ if (is_vid_exactmatch(vlan_tci_mask) &&
+ ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
+ is_vlan_pcp_zero(vlan_tci)) ||
+ is_vlan_pcp_wildcarded(vlan_tci_mask)))
+ return true;
+
+ return false;
+}
+
static bool bits_set(void *key, int len)
{
const u8 *p = key;
@@ -803,9 +827,9 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
/* Currently VLAN fields cannot be partial wildcard */
if (bits_set(&flow->l2_key.inner_vlan_tci,
sizeof(flow->l2_key.inner_vlan_tci)) &&
- !is_exactmatch(&flow->l2_mask.inner_vlan_tci,
- sizeof(flow->l2_mask.inner_vlan_tci))) {
- netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n");
+ !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
+ flow->l2_key.inner_vlan_tci)) {
+ netdev_info(bp->dev, "Unsupported VLAN TCI\n");
return false;
}
if (bits_set(&flow->l2_key.inner_vlan_tpid,
@@ -1544,22 +1568,16 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp)
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower)
{
- int rc = 0;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
- break;
-
+ return bnxt_tc_add_flow(bp, src_fid, cls_flower);
case TC_CLSFLOWER_DESTROY:
- rc = bnxt_tc_del_flow(bp, cls_flower);
- break;
-
+ return bnxt_tc_del_flow(bp, cls_flower);
case TC_CLSFLOWER_STATS:
- rc = bnxt_tc_get_flow_stats(bp, cls_flower);
- break;
+ return bnxt_tc_get_flow_stats(bp, cls_flower);
+ default:
+ return -EOPNOTSUPP;
}
- return rc;
}
static const struct rhashtable_params bnxt_tc_flow_ht_params = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 347e4f946eb2..c37b2842f972 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -141,7 +141,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
if (avail_msix > num_msix)
avail_msix = num_msix;
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
idx = bp->cp_nr_rings;
} else {
max_idx = min_t(int, bp->total_irqs, max_cp_rings);
@@ -162,14 +162,13 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
return -EAGAIN;
}
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
}
bnxt_fill_msix_vecs(bp, ent);
- bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
return avail_msix;
@@ -192,7 +191,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
edev->ulp_tbl[ulp_id].msix_requested = 0;
- bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
if (netif_running(dev)) {
bnxt_close_nic(bp, true, false);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 05d405905906..e31f5d803c13 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -173,7 +173,7 @@ static int bnxt_vf_rep_setup_tc_block(struct net_device *dev,
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
bnxt_vf_rep_setup_tc_block_cb,
- vf_rep, vf_rep);
+ vf_rep, vf_rep, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
bnxt_vf_rep_setup_tc_block_cb, vf_rep);
@@ -543,9 +543,14 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ if (bp->hwrm_spec_code < 0x10803) {
+ netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
+ rc = -ENOTSUPP;
+ goto done;
+ }
+
if (pci_num_vf(bp->pdev) == 0) {
- netdev_info(bp->dev,
- "Enable VFs before setting switchdev mode");
+ netdev_info(bp->dev, "Enable VFs before setting switchdev mode");
rc = -EPERM;
goto done;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 1f0e872d0667..0584d07c8c33 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -219,7 +219,6 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
rc = bnxt_xdp_set(bp, xdp->prog);
break;
case XDP_QUERY_PROG:
- xdp->prog_attached = !!bp->xdp_prog;
xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
rc = 0;
break;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 30273a7717e2..d83233ae4a15 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
id_tbl->max = size;
id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
- id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
+ id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
@@ -2562,7 +2562,6 @@ static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
{
- struct fcoe_kwqe_destroy *req;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
@@ -2571,7 +2570,6 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
- req = (struct fcoe_kwqe_destroy *) kwqe;
cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
memset(&l5_data, 0, sizeof(l5_data));
@@ -4090,7 +4088,7 @@ static void cnic_cm_free_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- kfree(cp->csk_tbl);
+ kvfree(cp->csk_tbl);
cp->csk_tbl = NULL;
cnic_free_id_tbl(&cp->csk_port_tbl);
}
@@ -4100,8 +4098,8 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
u32 port_id;
- cp->csk_tbl = kcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
- GFP_KERNEL);
+ cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
+ GFP_KERNEL);
if (!cp->csk_tbl)
return -ENOMEM;
@@ -5091,13 +5089,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_eth_dev *ethdev = cp->ethdev;
- int func, ret;
+ int ret;
u32 pfid;
dev->stats_addr = ethdev->addr_drv_info_to_mcp;
cp->func = bp->pf_num;
- func = CNIC_FUNC(cp);
pfid = bp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3be87efdc93d..e6f28c7942ab 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6,11 +6,15 @@
* Copyright (C) 2004 Sun Microsystems Inc.
* Copyright (C) 2005-2016 Broadcom Corporation.
* Copyright (C) 2016-2017 Broadcom Limited.
+ * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ * refers to Broadcom Inc. and/or its subsidiaries.
*
* Firmware is:
* Derived from proprietary unpublished source code,
* Copyright (C) 2000-2016 Broadcom Corporation.
* Copyright (C) 2016-2017 Broadcom Ltd.
+ * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ * refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted for the distribution of this firmware
* data in hexadecimal or equivalent format, provided this copyright
@@ -50,6 +54,7 @@
#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/crc32poly.h>
#include <net/checksum.h>
#include <net/ip.h>
@@ -721,6 +726,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
+ /* else: fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -781,6 +787,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
+ /* else: fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -9290,6 +9297,15 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_restore_clk(tp);
+ /* Increase the core clock speed to fix tx timeout issue for 5762
+ * with 100Mbps link speed.
+ */
+ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+ TG3_CPMU_MAC_ORIDE_ENABLE);
+ }
+
/* Reprobe ASF enable state. */
tg3_flag_clear(tp, ENABLE_ASF);
tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
@@ -9707,7 +9723,7 @@ static inline u32 calc_crc(unsigned char *buf, int len)
reg >>= 1;
if (tmp)
- reg ^= 0xedb88320;
+ reg ^= CRC32_POLY_LE;
}
}
@@ -10706,28 +10722,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
switch (limit) {
case 16:
tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
+ /* fall through */
case 15:
tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
+ /* fall through */
case 14:
tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
+ /* fall through */
case 13:
tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
+ /* fall through */
case 12:
tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
+ /* fall through */
case 11:
tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
+ /* fall through */
case 10:
tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
+ /* fall through */
case 9:
tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
+ /* fall through */
case 8:
tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
+ /* fall through */
case 7:
tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
+ /* fall through */
case 6:
tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
+ /* fall through */
case 5:
tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
+ /* fall through */
case 4:
/* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
case 3:
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1d61aa3efda1..a772a33b685c 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -7,6 +7,8 @@
* Copyright (C) 2004 Sun Microsystems Inc.
* Copyright (C) 2007-2016 Broadcom Corporation.
* Copyright (C) 2016-2017 Broadcom Limited.
+ * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ * refers to Broadcom Inc. and/or its subsidiaries.
*/
#ifndef _T3_H
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 427d65a1a126..b9984015ca8c 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -2,7 +2,7 @@
# Atmel device configuration
#
-config NET_CADENCE
+config NET_VENDOR_CADENCE
bool "Cadence devices"
depends on HAS_IOMEM
default y
@@ -16,7 +16,7 @@ config NET_CADENCE
the remaining Atmel network card questions. If you say Y, you will be
asked for your specific card in the following questions.
-if NET_CADENCE
+if NET_VENDOR_CADENCE
config MACB
tristate "Cadence MACB/GEM support"
@@ -48,4 +48,4 @@ config MACB_PCI
To compile this driver as a module, choose M here: the module
will be called macb_pci.
-endif # NET_CADENCE
+endif # NET_VENDOR_CADENCE
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 86659823b259..3d45f4c92cf6 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -166,6 +166,7 @@
#define GEM_DCFG6 0x0294 /* Design Config 6 */
#define GEM_DCFG7 0x0298 /* Design Config 7 */
#define GEM_DCFG8 0x029C /* Design Config 8 */
+#define GEM_DCFG10 0x02A4 /* Design Config 10 */
#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */
#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */
@@ -490,6 +491,12 @@
#define GEM_SCR2CMP_OFFSET 0
#define GEM_SCR2CMP_SIZE 8
+/* Bitfields in DCFG10 */
+#define GEM_TXBD_RDBUFF_OFFSET 12
+#define GEM_TXBD_RDBUFF_SIZE 4
+#define GEM_RXBD_RDBUFF_OFFSET 8
+#define GEM_RXBD_RDBUFF_SIZE 4
+
/* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0
#define GEM_SUBNSINCR_SIZE 16
@@ -635,6 +642,7 @@
#define MACB_CAPS_USRIO_DISABLED 0x00000010
#define MACB_CAPS_JUMBO 0x00000020
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
+#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -1203,6 +1211,9 @@ struct macb {
unsigned int max_tuples;
struct tasklet_struct hresp_err_tasklet;
+
+ int rx_bd_rd_prefetch;
+ int tx_bd_rd_prefetch;
};
#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 3e93df5d4e3b..dc09f9a8a49b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -1565,6 +1566,9 @@ static unsigned int macb_tx_map(struct macb *bp,
if (i == queue->tx_head) {
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
+ if ((bp->dev->features & NETIF_F_HW_CSUM) &&
+ skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
+ ctrl |= MACB_BIT(TX_NOCRC);
} else
/* Only set MSS/MFS on payload descriptors
* (second or later descriptor)
@@ -1651,7 +1655,68 @@ static inline int macb_clear_csum(struct sk_buff *skb)
return 0;
}
-static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+{
+ bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
+ int padlen = ETH_ZLEN - (*skb)->len;
+ int headroom = skb_headroom(*skb);
+ int tailroom = skb_tailroom(*skb);
+ struct sk_buff *nskb;
+ u32 fcs;
+
+ if (!(ndev->features & NETIF_F_HW_CSUM) ||
+ !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
+ skb_shinfo(*skb)->gso_size) /* Not available for GSO */
+ return 0;
+
+ if (padlen <= 0) {
+ /* FCS could be appeded to tailroom. */
+ if (tailroom >= ETH_FCS_LEN)
+ goto add_fcs;
+ /* FCS could be appeded by moving data to headroom. */
+ else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
+ padlen = 0;
+ /* No room for FCS, need to reallocate skb. */
+ else
+ padlen = ETH_FCS_LEN - tailroom;
+ } else {
+ /* Add room for FCS. */
+ padlen += ETH_FCS_LEN;
+ }
+
+ if (!cloned && headroom + tailroom >= padlen) {
+ (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
+ skb_set_tail_pointer(*skb, (*skb)->len);
+ } else {
+ nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+
+ dev_kfree_skb_any(*skb);
+ *skb = nskb;
+ }
+
+ if (padlen) {
+ if (padlen >= ETH_FCS_LEN)
+ skb_put_zero(*skb, padlen - ETH_FCS_LEN);
+ else
+ skb_trim(*skb, ETH_FCS_LEN - padlen);
+ }
+
+add_fcs:
+ /* set FCS to packet */
+ fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
+ fcs = ~fcs;
+
+ skb_put_u8(*skb, fcs & 0xff);
+ skb_put_u8(*skb, (fcs >> 8) & 0xff);
+ skb_put_u8(*skb, (fcs >> 16) & 0xff);
+ skb_put_u8(*skb, (fcs >> 24) & 0xff);
+
+ return 0;
+}
+
+static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
u16 queue_index = skb_get_queue_mapping(skb);
struct macb *bp = netdev_priv(dev);
@@ -1660,6 +1725,17 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
bool is_lso, is_udp = 0;
+ netdev_tx_t ret = NETDEV_TX_OK;
+
+ if (macb_clear_csum(skb)) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (macb_pad_and_fcs(&skb, dev)) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
is_lso = (skb_shinfo(skb)->gso_size != 0);
@@ -1716,11 +1792,6 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- if (macb_clear_csum(skb)) {
- dev_kfree_skb_any(skb);
- goto unlock;
- }
-
/* Map socket buffer for DMA transfer */
if (!macb_tx_map(bp, queue, skb, hdrlen)) {
dev_kfree_skb_any(skb);
@@ -1739,7 +1810,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
unlock:
spin_unlock_irqrestore(&bp->lock, flags);
- return NETDEV_TX_OK;
+ return ret;
}
static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
@@ -1811,23 +1882,25 @@ static void macb_free_consistent(struct macb *bp)
{
struct macb_queue *queue;
unsigned int q;
+ int size;
- queue = &bp->queues[0];
bp->macbgem_ops.mog_free_rx_buffers(bp);
- if (queue->rx_ring) {
- dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
- queue->rx_ring, queue->rx_ring_dma);
- queue->rx_ring = NULL;
- }
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
if (queue->tx_ring) {
- dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
+ size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
+ dma_free_coherent(&bp->pdev->dev, size,
queue->tx_ring, queue->tx_ring_dma);
queue->tx_ring = NULL;
}
+ if (queue->rx_ring) {
+ size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
+ dma_free_coherent(&bp->pdev->dev, size,
+ queue->rx_ring, queue->rx_ring_dma);
+ queue->rx_ring = NULL;
+ }
}
}
@@ -1874,7 +1947,7 @@ static int macb_alloc_consistent(struct macb *bp)
int size;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- size = TX_RING_BYTES(bp);
+ size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&queue->tx_ring_dma,
GFP_KERNEL);
@@ -1890,7 +1963,7 @@ static int macb_alloc_consistent(struct macb *bp)
if (!queue->tx_skb)
goto out_err;
- size = RX_RING_BYTES(bp);
+ size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&queue->rx_ring_dma, GFP_KERNEL);
if (!queue->rx_ring)
@@ -3547,7 +3620,8 @@ static int at91ether_close(struct net_device *dev)
}
/* Transmit packet */
-static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
@@ -3726,6 +3800,8 @@ static int at91ether_init(struct platform_device *pdev)
int err;
u32 reg;
+ bp->queues[0].bp = bp;
+
dev->netdev_ops = &at91ether_netdev_ops;
dev->ethtool_ops = &macb_ethtool_ops;
@@ -3795,7 +3871,7 @@ static const struct macb_config np4_config = {
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -3856,7 +3932,7 @@ static int macb_probe(struct platform_device *pdev)
void __iomem *mem;
const char *mac;
struct macb *bp;
- int err;
+ int err, val;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mem = devm_ioremap_resource(&pdev->dev, regs);
@@ -3945,6 +4021,18 @@ static int macb_probe(struct platform_device *pdev)
else
dev->max_mtu = ETH_DATA_LEN;
+ if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
+ val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
+ if (val)
+ bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
+ macb_dma_desc_get_size(bp);
+
+ val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
+ if (val)
+ bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
+ macb_dma_desc_get_size(bp);
+ }
+
mac = of_get_mac_address(np);
if (mac) {
ether_addr_copy(bp->dev->dev_addr, mac);
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 2220c771092b..cd5296b84229 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
if (delta > TSU_NSEC_MAX_VAL) {
gem_tsu_get_time(&bp->ptp_clock_info, &now);
- if (sign)
- now = timespec64_sub(now, then);
- else
- now = timespec64_add(now, then);
+ now = timespec64_add(now, then);
gem_tsu_set_time(&bp->ptp_clock_info,
(const struct timespec64 *)&now);
@@ -469,6 +466,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
case HWTSTAMP_TX_ONESTEP_SYNC:
if (gem_ptp_set_one_step_sync(bp, 1) != 0)
return -ERANGE;
+ /* fall through */
case HWTSTAMP_TX_ON:
tx_bd_control = TSTAMP_ALL_FRAMES;
break;
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
index 07d2201530d2..9fdd496b90ff 100644
--- a/drivers/net/ethernet/calxeda/Kconfig
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -1,6 +1,6 @@
config NET_CALXEDA_XGMAC
tristate "Calxeda 1G/10G XGMAC Ethernet driver"
- depends on HAS_IOMEM && HAS_DMA
+ depends on HAS_IOMEM
depends on ARCH_HIGHBANK || COMPILE_TEST
select CRC32
help
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 2c63afff1382..13741ee49b9b 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -14,6 +14,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/circ_buf.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 043e3c11c42b..5f03199a3acf 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -4,7 +4,6 @@
config NET_VENDOR_CAVIUM
bool "Cavium ethernet drivers"
- depends on PCI
default y
---help---
Select this option if you want enable Cavium network support.
@@ -15,7 +14,7 @@ if NET_VENDOR_CAVIUM
config THUNDER_NIC_PF
tristate "Thunder Physical function driver"
- depends on 64BIT
+ depends on 64BIT && PCI
select THUNDER_NIC_BGX
---help---
This driver supports Thunder's NIC physical function.
@@ -28,15 +27,15 @@ config THUNDER_NIC_PF
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
imply CAVIUM_PTP
- depends on 64BIT
+ depends on 64BIT && PCI
---help---
This driver supports Thunder's NIC virtual function
config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
- depends on 64BIT
+ depends on 64BIT && PCI
select PHYLIB
- select MDIO_THUNDER
+ select MDIO_THUNDER if PCI
select THUNDER_NIC_RGX
---help---
This driver supports programming and controlling of MAC
@@ -44,16 +43,16 @@ config THUNDER_NIC_BGX
config THUNDER_NIC_RGX
tristate "Thunder MAC interface driver (RGX)"
- depends on 64BIT
+ depends on 64BIT && PCI
select PHYLIB
- select MDIO_THUNDER
+ select MDIO_THUNDER if PCI
---help---
This driver supports configuring XCV block of RGX interface
present on CN81XX chip.
config CAVIUM_PTP
tristate "Cavium PTP coprocessor as PTP clock"
- depends on 64BIT
+ depends on 64BIT && PCI
imply PTP_1588_CLOCK
default y
---help---
@@ -65,8 +64,9 @@ config CAVIUM_PTP
config LIQUIDIO
tristate "Cavium LiquidIO support"
- depends on 64BIT
+ depends on 64BIT && PCI
depends on MAY_USE_DEVLINK
+ depends on PCI
imply PTP_1588_CLOCK
select FW_LOADER
select LIBCRC32C
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 929d485a3a2f..9f4f3c1d5043 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -493,6 +493,9 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+ /* clear IPTR */
+ reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
+
/* set DPTR */
reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
@@ -1414,50 +1417,6 @@ int validate_cn23xx_pf_config_info(struct octeon_device *oct,
return 0;
}
-void cn23xx_dump_iq_regs(struct octeon_device *oct)
-{
- u32 regval, q_no;
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
- CN23XX_SLI_IQ_DOORBELL(0),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_DOORBELL(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
- CN23XX_SLI_IQ_BASE_ADDR64(0),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_BASE_ADDR64(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
- CN23XX_SLI_IQ_SIZE(0),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_IQ_SIZE(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_CTL_STATUS [0x%x]: 0x%016llx\n",
- CN23XX_SLI_CTL_STATUS,
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_CTL_STATUS)));
-
- for (q_no = 0; q_no < CN23XX_MAX_INPUT_QUEUES; q_no++) {
- dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
- q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))));
- }
-
- pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
- dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
- CN23XX_CONFIG_PCIE_DEVCTL, regval);
-
- dev_dbg(&oct->pci_dev->dev, "SLI_PRT[%d]_CFG [0x%llx]: 0x%016llx\n",
- oct->pcie_port, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
- CVM_CAST64(lio_pci_readq(
- oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_S2M_PORT[%d]_CTL [0x%x]: 0x%016llx\n",
- oct->pcie_port, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
-}
-
int cn23xx_fw_loaded(struct octeon_device *oct)
{
u64 val;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
index 9338a0008378..962bb62933db 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -165,6 +165,9 @@ static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
reg_val =
octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
+ /* clear IPTR */
+ reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
+
/* set DPTR */
reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
@@ -379,7 +382,7 @@ void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct)
mbox_cmd.recv_len = 0;
mbox_cmd.recv_status = 0;
mbox_cmd.fn = NULL;
- mbox_cmd.fn_arg = 0;
+ mbox_cmd.fn_arg = NULL;
octeon_mbox_write(oct, &mbox_cmd);
}
@@ -679,33 +682,3 @@ int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
return 0;
}
-
-void cn23xx_dump_vf_iq_regs(struct octeon_device *oct)
-{
- u32 regval, q_no;
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
- CN23XX_VF_SLI_IQ_DOORBELL(0),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_VF_SLI_IQ_DOORBELL(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
- CN23XX_VF_SLI_IQ_BASE_ADDR64(0),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
- CN23XX_VF_SLI_IQ_SIZE(0),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_SIZE(0))));
-
- for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
- dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
- q_no, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))));
- }
-
- pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
- dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
- CN23XX_CONFIG_PCIE_DEVCTL, regval);
-}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 06f7449c569d..8e05afd5e39c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -389,18 +389,14 @@ static int lio_set_link_ksettings(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
struct oct_link_info *linfo;
struct octeon_device *oct;
- u32 is25G = 0;
oct = lio->oct_dev;
linfo = &lio->linfo;
- if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
- oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
- is25G = 1;
- } else {
+ if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID))
return -EOPNOTSUPP;
- }
if (oct->no_speed_setting) {
dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n",
@@ -857,7 +853,14 @@ static int lio_set_phys_id(struct net_device *netdev,
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ struct oct_link_info *linfo;
int value, ret;
+ u32 cur_ver;
+
+ linfo = &lio->linfo;
+ cur_ver = OCT_FW_VER(oct->fw_info.ver.maj,
+ oct->fw_info.ver.min,
+ oct->fw_info.ver.rev);
switch (state) {
case ETHTOOL_ID_ACTIVE:
@@ -896,16 +899,22 @@ static int lio_set_phys_id(struct net_device *netdev,
return ret;
} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
octnet_id_active(netdev, LED_IDENTIFICATION_ON);
-
- /* returns 0 since updates are asynchronous */
- return 0;
+ if (linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
+ cur_ver > OCT_FW_VER(1, 7, 2))
+ return 2;
+ else
+ return 0;
} else {
return -EINVAL;
}
break;
case ETHTOOL_ID_ON:
- if (oct->chip_id == OCTEON_CN66XX)
+ if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
+ linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
+ cur_ver > OCT_FW_VER(1, 7, 2))
+ octnet_id_active(netdev, LED_IDENTIFICATION_ON);
+ else if (oct->chip_id == OCTEON_CN66XX)
octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
VITESSE_PHY_GPIO_HIGH);
else
@@ -914,7 +923,11 @@ static int lio_set_phys_id(struct net_device *netdev,
break;
case ETHTOOL_ID_OFF:
- if (oct->chip_id == OCTEON_CN66XX)
+ if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
+ linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
+ cur_ver > OCT_FW_VER(1, 7, 2))
+ octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
+ else if (oct->chip_id == OCTEON_CN66XX)
octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
VITESSE_PHY_GPIO_LOW);
else
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8a815bb57177..6fb13fa73b27 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -91,6 +91,9 @@ static int octeon_console_debug_enabled(u32 console)
*/
#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
+/* time to wait for possible in-flight requests in milliseconds */
+#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
+
struct lio_trusted_vf_ctx {
struct completion complete;
int status;
@@ -259,7 +262,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
force_io_queues_off(oct);
/* To allow for in-flight requests */
- schedule_timeout_uninterruptible(100);
+ schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
if (wait_for_pending_requests(oct))
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
@@ -684,7 +687,7 @@ static void lio_sync_octeon_time(struct work_struct *work)
lt = (struct lio_time *)sc->virtdptr;
/* Get time of the day */
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
lt->sec = ts.tv_sec;
lt->nsec = ts.tv_nsec;
octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
@@ -2206,6 +2209,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCSHWTSTAMP:
if (lio->oct_dev->ptp_enable)
return hwtstamp_ioctl(netdev, ifr);
+ /* fall through */
default:
return -EOPNOTSUPP;
}
@@ -2628,7 +2632,7 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
return ret;
@@ -2906,7 +2910,7 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
nctrl.ncmd.s.more = 0;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.cb_fn = 0;
+ nctrl.cb_fn = NULL;
nctrl.wait_time = LIO_CMD_WAIT_TM;
octnet_send_nic_ctrl_pkt(oct, &nctrl);
@@ -3065,7 +3069,7 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
nctrl.ncmd.s.param2 = linkstate;
nctrl.ncmd.s.more = 0;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.cb_fn = 0;
+ nctrl.cb_fn = NULL;
nctrl.wait_time = LIO_CMD_WAIT_TM;
octnet_send_nic_ctrl_pkt(oct, &nctrl);
@@ -3299,7 +3303,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
{
struct lio *lio = NULL;
struct net_device *netdev;
- u8 mac[6], i, j, *fw_ver;
+ u8 mac[6], i, j, *fw_ver, *micro_ver;
+ unsigned long micro;
+ u32 cur_ver;
struct octeon_soft_command *sc;
struct liquidio_if_cfg_context *ctx;
struct liquidio_if_cfg_resp *resp;
@@ -3429,6 +3435,14 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
fw_ver);
}
+ /* extract micro version field; point past '<maj>.<min>.' */
+ micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
+ if (kstrtoul(micro_ver, 10, &micro) != 0)
+ micro = 0;
+ octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
+ octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
+ octeon_dev->fw_info.ver.rev = micro;
+
octeon_swap_8B_data((u64 *)(&resp->cfg_info),
(sizeof(struct liquidio_if_cfg_info)) >> 3);
@@ -3569,9 +3583,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
u8 vfmac[ETH_ALEN];
- random_ether_addr(&vfmac[0]);
- if (__liquidio_set_vf_mac(netdev, j,
- &vfmac[0], false)) {
+ eth_random_addr(vfmac);
+ if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
dev_err(&octeon_dev->pci_dev->dev,
"Error setting VF%d MAC address\n",
j);
@@ -3672,7 +3685,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OCTEON_CN2350_25GB_SUBSYS_ID ||
octeon_dev->subsystem_id ==
OCTEON_CN2360_25GB_SUBSYS_ID) {
- liquidio_get_speed(lio);
+ cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
+ octeon_dev->fw_info.ver.min,
+ octeon_dev->fw_info.ver.rev);
+
+ /* speed control unsupported in f/w older than 1.7.2 */
+ if (cur_ver < OCT_FW_VER(1, 7, 2)) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "speed setting not supported by f/w.");
+ octeon_dev->speed_setting = 25;
+ octeon_dev->no_speed_setting = 1;
+ } else {
+ liquidio_get_speed(lio);
+ }
if (octeon_dev->speed_setting == 0) {
octeon_dev->speed_setting = 25;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 7fa0212873ac..b77835724dc8 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1693,7 +1693,7 @@ liquidio_vlan_rx_kill_vid(struct net_device *netdev,
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
return ret;
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 690424b6781a..7407fcd338e9 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -907,6 +907,7 @@ static inline int opcode_slow_path(union octeon_rh *rh)
#define VITESSE_PHY_GPIO_LOW 0x3
#define LED_IDENTIFICATION_ON 0x1
#define LED_IDENTIFICATION_OFF 0x0
+#define LIO23XX_COPPERHEAD_LED_GPIO 0x2
struct oct_mdio_cmd {
u64 op;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 7f97ae48efed..0cc2338d8d2a 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -902,7 +902,7 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
*
* Octeon always uses UTC time. so timezone information is not sent.
*/
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
ret = snprintf(boottime, MAX_BOOTTIME_SIZE,
" time_sec=%lld time_nsec=%ld",
(s64)ts.tv_sec, ts.tv_nsec);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 94a4ed88d618..d99ca6ba23a4 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -288,8 +288,17 @@ struct oct_fw_info {
*/
u32 app_mode;
char liquidio_firmware_version[32];
+ /* Fields extracted from legacy string 'liquidio_firmware_version' */
+ struct {
+ u8 maj;
+ u8 min;
+ u8 rev;
+ } ver;
};
+#define OCT_FW_VER(maj, min, rev) \
+ (((u32)(maj) << 16) | ((u32)(min) << 8) | ((u32)(rev)))
+
/* wrappers around work structs */
struct cavium_wk {
struct delayed_work work;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 5fed7b63223e..2327062e8af6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -82,6 +82,16 @@ struct octeon_instr_queue {
/** A spinlock to protect while posting on the ring. */
spinlock_t post_lock;
+ /** This flag indicates if the queue can be used for soft commands.
+ * If this flag is set, post_lock must be acquired before posting
+ * a command to the queue.
+ * If this flag is clear, post_lock is invalid for the queue.
+ * All control commands (soft commands) will go through only Queue 0
+ * (control and data queue). So only queue-0 needs post_lock,
+ * other queues are only data queues and does not need post_lock
+ */
+ bool allow_soft_cmds;
+
u32 pkt_in_done;
/** A spinlock to protect access to the input ring.*/
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 1f2e75da28f8..8f746e1348d4 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -110,8 +110,8 @@ int octeon_init_instr_queue(struct octeon_device *oct,
memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
- dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
- iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
+ dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n",
+ iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count);
iq->txpciq.u64 = txpciq.u64;
iq->fill_threshold = (u32)conf->db_min;
@@ -126,7 +126,12 @@ int octeon_init_instr_queue(struct octeon_device *oct,
/* Initialize the spinlock for this instruction queue */
spin_lock_init(&iq->lock);
- spin_lock_init(&iq->post_lock);
+ if (iq_no == 0) {
+ iq->allow_soft_cmds = true;
+ spin_lock_init(&iq->post_lock);
+ } else {
+ iq->allow_soft_cmds = false;
+ }
spin_lock_init(&iq->iq_flush_running_lock);
@@ -566,7 +571,8 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
/* Get the lock and prevent other tasks and tx interrupt handler from
* running.
*/
- spin_lock_bh(&iq->post_lock);
+ if (iq->allow_soft_cmds)
+ spin_lock_bh(&iq->post_lock);
st = __post_command2(iq, cmd);
@@ -583,7 +589,8 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
}
- spin_unlock_bh(&iq->post_lock);
+ if (iq->allow_soft_cmds)
+ spin_unlock_bh(&iq->post_lock);
/* This is only done here to expedite packets being flushed
* for cases where there are no IQ completion interrupts.
@@ -702,11 +709,20 @@ octeon_prepare_soft_command(struct octeon_device *oct,
int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
+ struct octeon_instr_queue *iq;
struct octeon_instr_ih2 *ih2;
struct octeon_instr_ih3 *ih3;
struct octeon_instr_irh *irh;
u32 len;
+ iq = oct->instr_queue[sc->iq_no];
+ if (!iq->allow_soft_cmds) {
+ dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n",
+ sc->iq_no);
+ INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1);
+ return IQ_SEND_FAILED;
+ }
+
if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
if (ih3->dlengsz) {
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 3f6afb54a5eb..bb43ddb7539e 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -643,13 +643,21 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
+ int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
netdev->mtu = new_mtu;
- cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
+ /* HW lifts the limit if the frame is VLAN tagged
+ * (+4 bytes per each tag, up to two tags)
+ */
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
+ /* Set the hardware to truncate packets larger than the MTU. The jabber
+ * register must be set to a multiple of 8 bytes, so round up. JABBER is
+ * an unconditional limit, so we need to account for two possible VLAN
+ * tags.
+ */
cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
- (size_without_fcs + 7) & 0xfff8);
+ (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
return 0;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 5603f5ab1fee..92ba958e204e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -527,6 +527,7 @@ static int nicvf_get_rss_hash_opts(struct nicvf *nic,
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case IPV4_FLOW:
case IPV6_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 135766c4296b..768f584f8392 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1848,7 +1848,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return nicvf_xdp_setup(nic, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nic->xdp_prog;
xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0;
return 0;
default:
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 5d08d2aeb172..e337da6ba2a4 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1083,6 +1083,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
GFP_KERNEL);
+ if (!lmac->dmacs)
+ return -ENOMEM;
/* Enable lmac */
bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 8623be13bf86..0ccdde366ae1 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -109,10 +109,6 @@ static int disable_msi = 0;
module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
-static const char pci_speed[][4] = {
- "33", "66", "100", "133"
-};
-
/*
* Setup MAC to receive the types of packets we want.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 7b795edd9d3a..a19172dbe6be 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -51,6 +51,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
#include "common.h"
#include "cxgb3_ioctl.h"
@@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (t.qset_idx >= nqsets)
return -EINVAL;
+ t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
q = &adapter->params.sge.qset[q1 + t.qset_idx];
t.rspq_size = q->rspq_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 248e40c6966c..0e9182d3f02c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -136,6 +136,7 @@ again:
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
+ /* fall through */
case L2T_STATE_VALID: /* fast-path, send the packet on */
return cxgb3_ofld_send(dev, skb);
case L2T_STATE_RESOLVING:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 3c5057868ab3..36d25883d123 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -120,6 +120,8 @@ struct cudbg_mem_desc {
u32 idx;
};
+#define CUDBG_MEMINFO_REV 1
+
struct cudbg_meminfo {
struct cudbg_mem_desc avail[4];
struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3];
@@ -137,6 +139,9 @@ struct cudbg_meminfo {
u32 port_alloc[4];
u32 loopback_used[NCHAN];
u32 loopback_alloc[NCHAN];
+ u32 p_structs_free_cnt;
+ u32 free_rx_cnt;
+ u32 free_tx_cnt;
};
struct cudbg_cim_pif_la {
@@ -281,12 +286,18 @@ struct cudbg_tid_data {
#define CUDBG_NUM_ULPTX 11
#define CUDBG_NUM_ULPTX_READ 512
+#define CUDBG_NUM_ULPTX_ASIC 6
+#define CUDBG_NUM_ULPTX_ASIC_READ 128
+
+#define CUDBG_ULPTX_LA_REV 1
struct cudbg_ulptx_la {
u32 rdptr[CUDBG_NUM_ULPTX];
u32 wrptr[CUDBG_NUM_ULPTX];
u32 rddata[CUDBG_NUM_ULPTX];
u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ];
+ u32 rdptr_asic[CUDBG_NUM_ULPTX_ASIC_READ];
+ u32 rddata_asic[CUDBG_NUM_ULPTX_ASIC_READ][CUDBG_NUM_ULPTX_ASIC];
};
#define CUDBG_CHAC_PBT_ADDR 0x2800
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 0afcfe99bff3..d97e0d7e541a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -349,6 +349,11 @@ int cudbg_fill_meminfo(struct adapter *padap,
meminfo_buff->up_extmem2_hi = hi;
lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
+ for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++)
+ meminfo_buff->free_rx_cnt +=
+ FREERXPAGECOUNT_G(t4_read_reg(padap,
+ TP_FLM_FREE_RX_CNT_A));
+
meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo);
meminfo_buff->rx_pages_data[1] =
t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
@@ -356,6 +361,11 @@ int cudbg_fill_meminfo(struct adapter *padap,
lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
+ for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++)
+ meminfo_buff->free_tx_cnt +=
+ FREETXPAGECOUNT_G(t4_read_reg(padap,
+ TP_FLM_FREE_TX_CNT_A));
+
meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
meminfo_buff->tx_pages_data[1] =
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
@@ -364,6 +374,8 @@ int cudbg_fill_meminfo(struct adapter *padap,
meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
+ meminfo_buff->p_structs_free_cnt =
+ FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A));
for (i = 0; i < 4; i++) {
if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
@@ -1465,14 +1477,23 @@ int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_meminfo *meminfo_buff;
+ struct cudbg_ver_hdr *ver_hdr;
int rc;
- rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_meminfo),
&temp_buff);
if (rc)
return rc;
- meminfo_buff = (struct cudbg_meminfo *)temp_buff.data;
+ ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
+ ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
+ ver_hdr->revision = CUDBG_MEMINFO_REV;
+ ver_hdr->size = sizeof(struct cudbg_meminfo);
+
+ meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data +
+ sizeof(*ver_hdr));
rc = cudbg_fill_meminfo(padap, meminfo_buff);
if (rc) {
cudbg_err->sys_err = rc;
@@ -2586,15 +2607,24 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_ulptx_la *ulptx_la_buff;
+ struct cudbg_ver_hdr *ver_hdr;
u32 i, j;
int rc;
- rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_ulptx_la),
&temp_buff);
if (rc)
return rc;
- ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
+ ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
+ ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
+ ver_hdr->revision = CUDBG_ULPTX_LA_REV;
+ ver_hdr->size = sizeof(struct cudbg_ulptx_la);
+
+ ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data +
+ sizeof(*ver_hdr));
for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
ULP_TX_LA_RDPTR_0_A +
@@ -2610,6 +2640,25 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
t4_read_reg(padap,
ULP_TX_LA_RDDATA_0_A + 0x10 * i);
}
+
+ for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) {
+ t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1);
+ ulptx_la_buff->rdptr_asic[i] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A);
+ ulptx_la_buff->rddata_asic[i][0] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A);
+ ulptx_la_buff->rddata_asic[i][1] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A);
+ ulptx_la_buff->rddata_asic[i][2] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A);
+ ulptx_la_buff->rddata_asic[i][3] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A);
+ ulptx_la_buff->rddata_asic[i][4] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A);
+ ulptx_la_buff->rddata_asic[i][5] =
+ t4_read_reg(padap, PM_RX_BASE_ADDR);
+ }
+
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 0dbe2d9e22d6..76d16747f513 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -46,6 +46,7 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
+#include <linux/rhashtable.h>
#include <linux/etherdevice.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
@@ -319,6 +320,21 @@ struct vpd_params {
u8 na[MACADDR_LEN + 1];
};
+/* Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
struct pci_params {
unsigned int vpd_cap_addr;
unsigned char speed;
@@ -346,6 +362,7 @@ struct adapter_params {
struct sge_params sge;
struct tp_params tp;
struct vpd_params vpd;
+ struct pf_resources pfres;
struct pci_params pci;
struct devlog_params devlog;
enum pcie_memwin drv_memwin;
@@ -521,6 +538,15 @@ enum {
MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
};
+enum {
+ PRIV_FLAG_PORT_TX_VM_BIT,
+};
+
+#define PRIV_FLAG_PORT_TX_VM BIT(PRIV_FLAG_PORT_TX_VM_BIT)
+
+#define PRIV_FLAGS_ADAP 0
+#define PRIV_FLAGS_PORT PRIV_FLAG_PORT_TX_VM
+
struct adapter;
struct sge_rspq;
@@ -557,6 +583,7 @@ struct port_info {
struct hwtstamp_config tstamp_config;
bool ptp_enable;
struct sched_table *sched_tbl;
+ u32 eth_flags;
};
struct dentry;
@@ -867,6 +894,7 @@ struct adapter {
unsigned int flags;
unsigned int adap_idx;
enum chip_type chip;
+ u32 eth_flags;
int msg_enable;
__be16 vxlan_port;
@@ -956,6 +984,7 @@ struct adapter {
struct chcr_stats_debug chcr_stats;
/* TC flower offload */
+ bool tc_flower_initialized;
struct rhashtable flower_tbl;
struct rhashtable_params flower_ht_params;
struct timer_list flower_stats_timer;
@@ -1333,7 +1362,7 @@ void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
void t4_free_sge_resources(struct adapter *adap);
void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
irq_handler_t t4_intr_handler(struct adapter *adap);
-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
@@ -1555,6 +1584,7 @@ int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_pfres(struct adapter *adapter);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
@@ -1823,4 +1853,5 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
+int cxgb4_dcb_enabled(const struct net_device *dev);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 8d751efcb90e..5f01c0a7fd98 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -224,7 +224,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
break;
case CUDBG_MEMINFO:
- len = sizeof(struct cudbg_meminfo);
+ len = sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_meminfo);
break;
case CUDBG_CIM_PIF_LA:
len = sizeof(struct cudbg_cim_pif_la);
@@ -273,7 +274,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
}
break;
case CUDBG_ULPTX_LA:
- len = sizeof(struct cudbg_ulptx_la);
+ len = sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_ulptx_la);
break;
case CUDBG_UP_CIM_INDIRECT:
n = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 4e7f72b17e82..b34f0f077a31 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -22,7 +22,7 @@
/* DCBx version control
*/
-static const char * const dcb_ver_array[] = {
+const char * const dcb_ver_array[] = {
"Unknown",
"DCBx-CIN",
"DCBx-CEE 1.01",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index c301aaf79d64..0f72f9c4ec74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2414,6 +2414,234 @@ static const struct file_operations rss_vf_config_debugfs_fops = {
.release = seq_release_private
};
+#ifdef CONFIG_CHELSIO_T4_DCB
+extern char *dcb_ver_array[];
+
+/* Data Center Briging information for each port.
+ */
+static int dcb_info_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adap = seq->private;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Data Center Bridging Information\n");
+ } else {
+ int port = (uintptr_t)v - 2;
+ struct net_device *dev = adap->port[port];
+ struct port_info *pi = netdev2pinfo(dev);
+ struct port_dcb_info *dcb = &pi->dcb;
+
+ seq_puts(seq, "\n");
+ seq_printf(seq, "Port: %d (DCB negotiated: %s)\n",
+ port,
+ cxgb4_dcb_enabled(dev) ? "yes" : "no");
+
+ if (cxgb4_dcb_enabled(dev))
+ seq_printf(seq, "[ DCBx Version %s ]\n",
+ dcb_ver_array[dcb->dcb_version]);
+
+ if (dcb->msgs) {
+ int i;
+
+ seq_puts(seq, "\n Index\t\t\t :\t");
+ for (i = 0; i < 8; i++)
+ seq_printf(seq, " %3d", i);
+ seq_puts(seq, "\n\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_PGID) {
+ int prio, pgid;
+
+ seq_puts(seq, " Priority Group IDs\t :\t");
+ for (prio = 0; prio < 8; prio++) {
+ pgid = (dcb->pgid >> 4 * (7 - prio)) & 0xf;
+ seq_printf(seq, " %3d", pgid);
+ }
+ seq_puts(seq, "\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_PGRATE) {
+ int pg;
+
+ seq_puts(seq, " Priority Group BW(%)\t :\t");
+ for (pg = 0; pg < 8; pg++)
+ seq_printf(seq, " %3d", dcb->pgrate[pg]);
+ seq_puts(seq, "\n");
+
+ if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+ seq_puts(seq, " TSA Algorithm\t\t :\t");
+ for (pg = 0; pg < 8; pg++)
+ seq_printf(seq, " %3d", dcb->tsa[pg]);
+ seq_puts(seq, "\n");
+ }
+
+ seq_printf(seq, " Max PG Traffic Classes [%3d ]\n",
+ dcb->pg_num_tcs_supported);
+
+ seq_puts(seq, "\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_PRIORATE) {
+ int prio;
+
+ seq_puts(seq, " Priority Rate\t:\t");
+ for (prio = 0; prio < 8; prio++)
+ seq_printf(seq, " %3d", dcb->priorate[prio]);
+ seq_puts(seq, "\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_PFC) {
+ int prio;
+
+ seq_puts(seq, " Priority Flow Control :\t");
+ for (prio = 0; prio < 8; prio++) {
+ int pfcen = (dcb->pfcen >> 1 * (7 - prio))
+ & 0x1;
+ seq_printf(seq, " %3d", pfcen);
+ }
+ seq_puts(seq, "\n");
+
+ seq_printf(seq, " Max PFC Traffic Classes [%3d ]\n",
+ dcb->pfc_num_tcs_supported);
+
+ seq_puts(seq, "\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_APP_ID) {
+ int app, napps;
+
+ seq_puts(seq, " Application Information:\n");
+ seq_puts(seq, " App Priority Selection Protocol\n");
+ seq_puts(seq, " Index Map Field ID\n");
+ for (app = 0, napps = 0;
+ app < CXGB4_MAX_DCBX_APP_SUPPORTED; app++) {
+ struct app_priority *ap;
+ static const char * const sel_names[] = {
+ "Ethertype",
+ "Socket TCP",
+ "Socket UDP",
+ "Socket All",
+ };
+ const char *sel_name;
+
+ ap = &dcb->app_priority[app];
+ /* skip empty slots */
+ if (ap->protocolid == 0)
+ continue;
+ napps++;
+
+ if (ap->sel_field < ARRAY_SIZE(sel_names))
+ sel_name = sel_names[ap->sel_field];
+ else
+ sel_name = "UNKNOWN";
+
+ seq_printf(seq, " %3d %#04x %-10s (%d) %#06x (%d)\n",
+ app,
+ ap->user_prio_map,
+ sel_name, ap->sel_field,
+ ap->protocolid, ap->protocolid);
+ }
+ if (napps == 0)
+ seq_puts(seq, " --- None ---\n");
+ }
+ }
+ return 0;
+}
+
+static inline void *dcb_info_get_idx(struct adapter *adap, loff_t pos)
+{
+ return (pos <= adap->params.nports
+ ? (void *)((uintptr_t)pos + 1)
+ : NULL);
+}
+
+static void *dcb_info_start(struct seq_file *seq, loff_t *pos)
+{
+ struct adapter *adap = seq->private;
+
+ return (*pos
+ ? dcb_info_get_idx(adap, *pos)
+ : SEQ_START_TOKEN);
+}
+
+static void dcb_info_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *dcb_info_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct adapter *adap = seq->private;
+
+ (*pos)++;
+ return dcb_info_get_idx(adap, *pos);
+}
+
+static const struct seq_operations dcb_info_seq_ops = {
+ .start = dcb_info_start,
+ .next = dcb_info_next,
+ .stop = dcb_info_stop,
+ .show = dcb_info_show
+};
+
+static int dcb_info_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &dcb_info_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations dcb_info_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = dcb_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif /* CONFIG_CHELSIO_T4_DCB */
+
+static int resources_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct pf_resources *pfres = &adapter->params.pfres;
+
+ #define S(desc, fmt, var) \
+ seq_printf(seq, "%-60s " fmt "\n", \
+ desc " (" #var "):", pfres->var)
+
+ S("Virtual Interfaces", "%d", nvi);
+ S("Egress Queues", "%d", neq);
+ S("Ethernet Control", "%d", nethctrl);
+ S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
+ S("Ingress Queues", "%d", niq);
+ S("Traffic Class", "%d", tc);
+ S("Port Access Rights Mask", "%#x", pmask);
+ S("MAC Address Filters", "%d", nexactf);
+ S("Firmware Command Read Capabilities", "%#x", r_caps);
+ S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
+
+ #undef S
+
+ return 0;
+}
+
+static int resources_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, resources_show, inode->i_private);
+}
+
+static const struct file_operations resources_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = resources_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
/**
* ethqset2pinfo - return port_info of an Ethernet Queue Set
* @adap: the adapter
@@ -2436,16 +2664,64 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
return NULL;
}
+static int sge_qinfo_uld_txq_entries(const struct adapter *adap, int uld)
+{
+ const struct sge_uld_txq_info *utxq_info = adap->sge.uld_txq_info[uld];
+
+ if (!utxq_info)
+ return 0;
+
+ return DIV_ROUND_UP(utxq_info->ntxq, 4);
+}
+
+static int sge_qinfo_uld_rspq_entries(const struct adapter *adap, int uld,
+ bool ciq)
+{
+ const struct sge_uld_rxq_info *urxq_info = adap->sge.uld_rxq_info[uld];
+
+ if (!urxq_info)
+ return 0;
+
+ return ciq ? DIV_ROUND_UP(urxq_info->nciq, 4) :
+ DIV_ROUND_UP(urxq_info->nrxq, 4);
+}
+
+static int sge_qinfo_uld_rxq_entries(const struct adapter *adap, int uld)
+{
+ return sge_qinfo_uld_rspq_entries(adap, uld, false);
+}
+
+static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
+{
+ return sge_qinfo_uld_rspq_entries(adap, uld, true);
+}
+
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
+ int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
+ int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
+ int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
+ const struct sge_uld_txq_info *utxq_info;
+ const struct sge_uld_rxq_info *urxq_info;
struct adapter *adap = seq->private;
- int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
- int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
- int i, r = (uintptr_t)v - 1;
- int ofld_idx = r - eth_entries;
- int ctrl_idx = ofld_idx - ofld_entries;
- int fq_idx = ctrl_idx - ctrl_entries;
+ int i, n, r = (uintptr_t)v - 1;
+ int eth_entries, ctrl_entries;
+ struct sge *s = &adap->sge;
+
+ eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
+ ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
+
+ mutex_lock(&uld_mutex);
+ if (s->uld_txq_info)
+ for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
+ uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
+
+ if (s->uld_rxq_info) {
+ for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
+ uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
+ uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
+ }
+ }
if (r)
seq_putc(seq, '\n');
@@ -2467,9 +2743,10 @@ do { \
if (r < eth_entries) {
int base_qset = r * 4;
- const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset];
- const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset];
- int n = min(4, adap->sge.ethqsets - 4 * r);
+ const struct sge_eth_rxq *rx = &s->ethrxq[base_qset];
+ const struct sge_eth_txq *tx = &s->ethtxq[base_qset];
+
+ n = min(4, s->ethqsets - 4 * r);
S("QType:", "Ethernet");
S("Interface:",
@@ -2494,8 +2771,7 @@ do { \
R("RspQ CIDX:", rspq.cidx);
R("RspQ Gen:", rspq.gen);
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
R("FL ID:", fl.cntxt_id);
R("FL size:", fl.size - 8);
R("FL pend:", fl.pend_cred);
@@ -2520,9 +2796,196 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- } else if (ctrl_idx < ctrl_entries) {
- const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
- int n = min(4, adap->params.nports - 4 * ctrl_idx);
+ goto unlock;
+ }
+
+ r -= eth_entries;
+ if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
+ const struct sge_uld_txq *tx;
+
+ utxq_info = s->uld_txq_info[CXGB4_TX_OFLD];
+ tx = &utxq_info->uldtxq[r * 4];
+ n = min(4, utxq_info->ntxq - 4 * r);
+
+ S("QType:", "OFLD-TXQ");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+
+ goto unlock;
+ }
+
+ r -= uld_txq_entries[CXGB4_TX_OFLD];
+ if (r < uld_rxq_entries[CXGB4_ULD_RDMA]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "RDMA-CPL");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_RDMA];
+ if (r < uld_ciq_entries[CXGB4_ULD_RDMA]) {
+ const struct sge_ofld_rxq *rx;
+ int ciq_idx = 0;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ ciq_idx = urxq_info->nrxq + (r * 4);
+ rx = &urxq_info->uldrxq[ciq_idx];
+ n = min(4, urxq_info->nciq - 4 * r);
+
+ S("QType:", "RDMA-CIQ");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+
+ goto unlock;
+ }
+
+ r -= uld_ciq_entries[CXGB4_ULD_RDMA];
+ if (r < uld_rxq_entries[CXGB4_ULD_ISCSI]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSI];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "iSCSI");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_ISCSI];
+ if (r < uld_rxq_entries[CXGB4_ULD_ISCSIT]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSIT];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "iSCSIT");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_ISCSIT];
+ if (r < uld_rxq_entries[CXGB4_ULD_TLS]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_TLS];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "TLS");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_TLS];
+ if (r < uld_txq_entries[CXGB4_TX_CRYPTO]) {
+ const struct sge_ofld_rxq *rx;
+ const struct sge_uld_txq *tx;
+
+ utxq_info = s->uld_txq_info[CXGB4_TX_CRYPTO];
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_CRYPTO];
+ tx = &utxq_info->uldtxq[r * 4];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, utxq_info->ntxq - 4 * r);
+
+ S("QType:", "Crypto");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_txq_entries[CXGB4_TX_CRYPTO];
+ if (r < ctrl_entries) {
+ const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4];
+
+ n = min(4, adap->params.nports - 4 * r);
S("QType:", "Control");
T("TxQ ID:", q.cntxt_id);
@@ -2532,8 +2995,13 @@ do { \
T("TxQ PIDX:", q.pidx);
TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts);
- } else if (fq_idx == 0) {
- const struct sge_rspq *evtq = &adap->sge.fw_evtq;
+
+ goto unlock;
+ }
+
+ r -= ctrl_entries;
+ if (r < 1) {
+ const struct sge_rspq *evtq = &s->fw_evtq;
seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
@@ -2544,8 +3012,13 @@ do { \
seq_printf(seq, "%-12s %16u\n", "Intr delay:",
qtimer_val(adap, evtq));
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
- adap->sge.counter_val[evtq->pktcnt_idx]);
+ s->counter_val[evtq->pktcnt_idx]);
+
+ goto unlock;
}
+
+unlock:
+ mutex_unlock(&uld_mutex);
#undef R
#undef RL
#undef T
@@ -2559,8 +3032,21 @@ do { \
static int sge_queue_entries(const struct adapter *adap)
{
+ int tot_uld_entries = 0;
+ int i;
+
+ mutex_lock(&uld_mutex);
+ for (i = 0; i < CXGB4_TX_MAX; i++)
+ tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i);
+
+ for (i = 0; i < CXGB4_ULD_MAX; i++) {
+ tot_uld_entries += sge_qinfo_uld_rxq_entries(adap, i);
+ tot_uld_entries += sge_qinfo_uld_ciq_entries(adap, i);
+ }
+ mutex_unlock(&uld_mutex);
+
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
+ tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
@@ -2851,15 +3337,17 @@ static int meminfo_show(struct seq_file *seq, void *v)
mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
meminfo.up_extmem2_hi);
- seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
- meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
- meminfo.rx_pages_data[2]);
+ seq_printf(seq, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
+ meminfo.rx_pages_data[0], meminfo.free_rx_cnt,
+ meminfo.rx_pages_data[1], meminfo.rx_pages_data[2]);
- seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
- meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
- meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
+ seq_printf(seq, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
+ meminfo.tx_pages_data[0], meminfo.free_tx_cnt,
+ meminfo.tx_pages_data[1], meminfo.tx_pages_data[2],
+ meminfo.tx_pages_data[3]);
- seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
+ seq_printf(seq, "%u p-structs (%u free)\n\n",
+ meminfo.p_structs, meminfo.p_structs_free_cnt);
for (i = 0; i < 4; i++)
/* For T6 these are MAC buffer groups */
@@ -2924,6 +3412,169 @@ static const struct file_operations chcr_stats_debugfs_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+
+#define PRINT_ADAP_STATS(string, value) \
+ seq_printf(seq, "%-25s %-20llu\n", (string), \
+ (unsigned long long)(value))
+
+#define PRINT_CH_STATS(string, value) \
+do { \
+ seq_printf(seq, "%-25s ", (string)); \
+ for (i = 0; i < adap->params.arch.nchan; i++) \
+ seq_printf(seq, "%-20llu ", \
+ (unsigned long long)stats.value[i]); \
+ seq_printf(seq, "\n"); \
+} while (0)
+
+#define PRINT_CH_STATS2(string, value) \
+do { \
+ seq_printf(seq, "%-25s ", (string)); \
+ for (i = 0; i < adap->params.arch.nchan; i++) \
+ seq_printf(seq, "%-20llu ", \
+ (unsigned long long)stats[i].value); \
+ seq_printf(seq, "\n"); \
+} while (0)
+
+static void show_tcp_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_tcp_stats v4, v6;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_tcp_stats(adap, &v4, &v6, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("tcp_ipv4_out_rsts:", v4.tcp_out_rsts);
+ PRINT_ADAP_STATS("tcp_ipv4_in_segs:", v4.tcp_in_segs);
+ PRINT_ADAP_STATS("tcp_ipv4_out_segs:", v4.tcp_out_segs);
+ PRINT_ADAP_STATS("tcp_ipv4_retrans_segs:", v4.tcp_retrans_segs);
+ PRINT_ADAP_STATS("tcp_ipv6_out_rsts:", v6.tcp_out_rsts);
+ PRINT_ADAP_STATS("tcp_ipv6_in_segs:", v6.tcp_in_segs);
+ PRINT_ADAP_STATS("tcp_ipv6_out_segs:", v6.tcp_out_segs);
+ PRINT_ADAP_STATS("tcp_ipv6_retrans_segs:", v6.tcp_retrans_segs);
+}
+
+static void show_ddp_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_usm_stats stats;
+
+ spin_lock(&adap->stats_lock);
+ t4_get_usm_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("usm_ddp_frames:", stats.frames);
+ PRINT_ADAP_STATS("usm_ddp_octets:", stats.octets);
+ PRINT_ADAP_STATS("usm_ddp_drops:", stats.drops);
+}
+
+static void show_rdma_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_rdma_stats stats;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_rdma_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("rdma_no_rqe_mod_defer:", stats.rqe_dfr_mod);
+ PRINT_ADAP_STATS("rdma_no_rqe_pkt_defer:", stats.rqe_dfr_pkt);
+}
+
+static void show_tp_err_adapter_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_err_stats stats;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_err_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("tp_err_ofld_no_neigh:", stats.ofld_no_neigh);
+ PRINT_ADAP_STATS("tp_err_ofld_cong_defer:", stats.ofld_cong_defer);
+}
+
+static void show_cpl_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_cpl_stats stats;
+ u8 i;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_cpl_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_CH_STATS("tp_cpl_requests:", req);
+ PRINT_CH_STATS("tp_cpl_responses:", rsp);
+}
+
+static void show_tp_err_channel_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_err_stats stats;
+ u8 i;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_err_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_CH_STATS("tp_mac_in_errs:", mac_in_errs);
+ PRINT_CH_STATS("tp_hdr_in_errs:", hdr_in_errs);
+ PRINT_CH_STATS("tp_tcp_in_errs:", tcp_in_errs);
+ PRINT_CH_STATS("tp_tcp6_in_errs:", tcp6_in_errs);
+ PRINT_CH_STATS("tp_tnl_cong_drops:", tnl_cong_drops);
+ PRINT_CH_STATS("tp_tnl_tx_drops:", tnl_tx_drops);
+ PRINT_CH_STATS("tp_ofld_vlan_drops:", ofld_vlan_drops);
+ PRINT_CH_STATS("tp_ofld_chan_drops:", ofld_chan_drops);
+}
+
+static void show_fcoe_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_fcoe_stats stats[NCHAN];
+ u8 i;
+
+ spin_lock(&adap->stats_lock);
+ for (i = 0; i < adap->params.arch.nchan; i++)
+ t4_get_fcoe_stats(adap, i, &stats[i], false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_CH_STATS2("fcoe_octets_ddp", octets_ddp);
+ PRINT_CH_STATS2("fcoe_frames_ddp", frames_ddp);
+ PRINT_CH_STATS2("fcoe_frames_drop", frames_drop);
+}
+
+#undef PRINT_CH_STATS2
+#undef PRINT_CH_STATS
+#undef PRINT_ADAP_STATS
+
+static int tp_stats_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adap = seq->private;
+
+ seq_puts(seq, "\n--------Adapter Stats--------\n");
+ show_tcp_stats(seq);
+ show_ddp_stats(seq);
+ show_rdma_stats(seq);
+ show_tp_err_adapter_stats(seq);
+
+ seq_puts(seq, "\n-------- Channel Stats --------\n");
+ if (adap->params.arch.nchan == NCHAN)
+ seq_printf(seq, "%-25s %-20s %-20s %-20s %-20s\n",
+ " ", "channel 0", "channel 1",
+ "channel 2", "channel 3");
+ else
+ seq_printf(seq, "%-25s %-20s %-20s\n",
+ " ", "channel 0", "channel 1");
+ show_cpl_stats(seq);
+ show_tp_err_channel_stats(seq);
+ show_fcoe_stats(seq);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats);
+
/* Add an array of Debug FS files.
*/
void add_debugfs_files(struct adapter *adap,
@@ -2973,6 +3624,10 @@ int t4_setup_debugfs(struct adapter *adap)
{ "rss_key", &rss_key_debugfs_fops, 0400, 0 },
{ "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 },
{ "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 },
+ { "resources", &resources_debugfs_fops, 0400, 0 },
+#ifdef CONFIG_CHELSIO_T4_DCB
+ { "dcb_info", &dcb_info_debugfs_fops, 0400, 0 },
+#endif
{ "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 },
{ "ibq_tp0", &cim_ibq_fops, 0400, 0 },
{ "ibq_tp1", &cim_ibq_fops, 0400, 1 },
@@ -2999,6 +3654,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "blocked_fl", &blocked_fl_fops, 0600, 0 },
{ "meminfo", &meminfo_fops, 0400, 0 },
{ "crypto", &chcr_stats_debugfs_fops, 0400, 0 },
+ { "tp_stats", &tp_stats_debugfs_fops, 0400, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index f7eef93ffc87..d07230c892a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -115,42 +115,10 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
"db_drop ",
"db_full ",
"db_empty ",
- "tcp_ipv4_out_rsts ",
- "tcp_ipv4_in_segs ",
- "tcp_ipv4_out_segs ",
- "tcp_ipv4_retrans_segs ",
- "tcp_ipv6_out_rsts ",
- "tcp_ipv6_in_segs ",
- "tcp_ipv6_out_segs ",
- "tcp_ipv6_retrans_segs ",
- "usm_ddp_frames ",
- "usm_ddp_octets ",
- "usm_ddp_drops ",
- "rdma_no_rqe_mod_defer ",
- "rdma_no_rqe_pkt_defer ",
- "tp_err_ofld_no_neigh ",
- "tp_err_ofld_cong_defer ",
"write_coal_success ",
"write_coal_fail ",
};
-static char channel_stats_strings[][ETH_GSTRING_LEN] = {
- "--------Channel--------- ",
- "tp_cpl_requests ",
- "tp_cpl_responses ",
- "tp_mac_in_errs ",
- "tp_hdr_in_errs ",
- "tp_tcp_in_errs ",
- "tp_tcp6_in_errs ",
- "tp_tnl_cong_drops ",
- "tp_tnl_tx_drops ",
- "tp_ofld_vlan_drops ",
- "tp_ofld_chan_drops ",
- "fcoe_octets_ddp ",
- "fcoe_frames_ddp ",
- "fcoe_frames_drop ",
-};
-
static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
"-------Loopback----------- ",
"octets_ok ",
@@ -177,14 +145,19 @@ static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
"bg3_frames_trunc ",
};
+static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
+};
+
static int get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(stats_strings) +
ARRAY_SIZE(adapter_stats_strings) +
- ARRAY_SIZE(channel_stats_strings) +
ARRAY_SIZE(loopback_stats_strings);
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(cxgb4_priv_flags_strings);
default:
return -EOPNOTSUPP;
}
@@ -235,6 +208,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
FW_HDR_FW_VER_MINOR_G(exprom_vers),
FW_HDR_FW_VER_MICRO_G(exprom_vers),
FW_HDR_FW_VER_BUILD_G(exprom_vers));
+ info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
}
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -245,11 +219,11 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
memcpy(data, adapter_stats_strings,
sizeof(adapter_stats_strings));
data += sizeof(adapter_stats_strings);
- memcpy(data, channel_stats_strings,
- sizeof(channel_stats_strings));
- data += sizeof(channel_stats_strings);
memcpy(data, loopback_stats_strings,
sizeof(loopback_stats_strings));
+ } else if (stringset == ETH_SS_PRIV_FLAGS) {
+ memcpy(data, cxgb4_priv_flags_strings,
+ sizeof(cxgb4_priv_flags_strings));
}
}
@@ -270,41 +244,10 @@ struct adapter_stats {
u64 db_drop;
u64 db_full;
u64 db_empty;
- u64 tcp_v4_out_rsts;
- u64 tcp_v4_in_segs;
- u64 tcp_v4_out_segs;
- u64 tcp_v4_retrans_segs;
- u64 tcp_v6_out_rsts;
- u64 tcp_v6_in_segs;
- u64 tcp_v6_out_segs;
- u64 tcp_v6_retrans_segs;
- u64 frames;
- u64 octets;
- u64 drops;
- u64 rqe_dfr_mod;
- u64 rqe_dfr_pkt;
- u64 ofld_no_neigh;
- u64 ofld_cong_defer;
u64 wc_success;
u64 wc_fail;
};
-struct channel_stats {
- u64 cpl_req;
- u64 cpl_rsp;
- u64 mac_in_errs;
- u64 hdr_in_errs;
- u64 tcp_in_errs;
- u64 tcp6_in_errs;
- u64 tnl_cong_drops;
- u64 tnl_tx_drops;
- u64 ofld_vlan_drops;
- u64 ofld_chan_drops;
- u64 octets_ddp;
- u64 frames_ddp;
- u64 frames_drop;
-};
-
static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
@@ -327,45 +270,14 @@ static void collect_sge_port_stats(const struct adapter *adap,
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
{
- struct tp_tcp_stats v4, v6;
- struct tp_rdma_stats rdma_stats;
- struct tp_err_stats err_stats;
- struct tp_usm_stats usm_stats;
u64 val1, val2;
memset(s, 0, sizeof(*s));
- spin_lock(&adap->stats_lock);
- t4_tp_get_tcp_stats(adap, &v4, &v6, false);
- t4_tp_get_rdma_stats(adap, &rdma_stats, false);
- t4_get_usm_stats(adap, &usm_stats, false);
- t4_tp_get_err_stats(adap, &err_stats, false);
- spin_unlock(&adap->stats_lock);
-
s->db_drop = adap->db_stats.db_drop;
s->db_full = adap->db_stats.db_full;
s->db_empty = adap->db_stats.db_empty;
- s->tcp_v4_out_rsts = v4.tcp_out_rsts;
- s->tcp_v4_in_segs = v4.tcp_in_segs;
- s->tcp_v4_out_segs = v4.tcp_out_segs;
- s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
- s->tcp_v6_out_rsts = v6.tcp_out_rsts;
- s->tcp_v6_in_segs = v6.tcp_in_segs;
- s->tcp_v6_out_segs = v6.tcp_out_segs;
- s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
-
- if (is_offload(adap)) {
- s->frames = usm_stats.frames;
- s->octets = usm_stats.octets;
- s->drops = usm_stats.drops;
- s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
- s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
- }
-
- s->ofld_no_neigh = err_stats.ofld_no_neigh;
- s->ofld_cong_defer = err_stats.ofld_cong_defer;
-
if (!is_t4(adap->params.chip)) {
int v;
@@ -379,36 +291,6 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
}
}
-static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
- u8 i)
-{
- struct tp_cpl_stats cpl_stats;
- struct tp_err_stats err_stats;
- struct tp_fcoe_stats fcoe_stats;
-
- memset(s, 0, sizeof(*s));
-
- spin_lock(&adap->stats_lock);
- t4_tp_get_cpl_stats(adap, &cpl_stats, false);
- t4_tp_get_err_stats(adap, &err_stats, false);
- t4_get_fcoe_stats(adap, i, &fcoe_stats, false);
- spin_unlock(&adap->stats_lock);
-
- s->cpl_req = cpl_stats.req[i];
- s->cpl_rsp = cpl_stats.rsp[i];
- s->mac_in_errs = err_stats.mac_in_errs[i];
- s->hdr_in_errs = err_stats.hdr_in_errs[i];
- s->tcp_in_errs = err_stats.tcp_in_errs[i];
- s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
- s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
- s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
- s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
- s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
- s->octets_ddp = fcoe_stats.octets_ddp;
- s->frames_ddp = fcoe_stats.frames_ddp;
- s->frames_drop = fcoe_stats.frames_drop;
-}
-
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
u64 *data)
{
@@ -429,11 +311,6 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
data += sizeof(struct adapter_stats) / sizeof(u64);
*data++ = (u64)pi->port_id;
- collect_channel_stats(adapter, (struct channel_stats *)data,
- pi->port_id);
- data += sizeof(struct channel_stats) / sizeof(u64);
-
- *data++ = (u64)pi->port_id;
memset(&s, 0, sizeof(s));
t4_get_lb_stats(adapter, pi->port_id, &s);
@@ -751,13 +628,10 @@ static int get_link_ksettings(struct net_device *dev,
fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
link_ksettings->link_modes.lp_advertising);
- if (netif_carrier_ok(dev)) {
- base->speed = pi->link_cfg.speed;
- base->duplex = DUPLEX_FULL;
- } else {
- base->speed = SPEED_UNKNOWN;
- base->duplex = DUPLEX_UNKNOWN;
- }
+ base->speed = (netif_carrier_ok(dev)
+ ? pi->link_cfg.speed
+ : SPEED_UNKNOWN);
+ base->duplex = DUPLEX_FULL;
if (pi->link_cfg.fc & PAUSE_RX) {
if (pi->link_cfg.fc & PAUSE_TX) {
@@ -1499,6 +1373,36 @@ static int cxgb4_get_module_eeprom(struct net_device *dev,
offset, len, &data[eprom->len - len]);
}
+static u32 cxgb4_get_priv_flags(struct net_device *netdev)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+
+ return (adapter->eth_flags | pi->eth_flags);
+}
+
+/**
+ * set_flags - set/unset specified flags if passed in new_flags
+ * @cur_flags: pointer to current flags
+ * @new_flags: new incoming flags
+ * @flags: set of flags to set/unset
+ */
+static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
+{
+ *cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
+}
+
+static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+
+ set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
+ set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
+
+ return 0;
+}
+
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
@@ -1535,6 +1439,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_dump_data = get_dump_data,
.get_module_info = cxgb4_get_module_info,
.get_module_eeprom = cxgb4_get_module_eeprom,
+ .get_priv_flags = cxgb4_get_priv_flags,
+ .set_priv_flags = cxgb4_set_priv_flags,
};
void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 00fc5f1afb1d..7dddb9e748b8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1038,10 +1038,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
req->local_port = cpu_to_be16(f->fs.val.lport);
req->peer_port = cpu_to_be16(f->fs.val.fport);
- req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
- f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
- req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
- f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
+ memcpy(&req->local_ip, f->fs.val.lip, 4);
+ memcpy(&req->peer_ip, f->fs.val.fip, 4);
req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
f->fs.newvlan == VLAN_REWRITE) |
DELACK_V(f->fs.hitcnts) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index dd04a2f89ce6..961e3087d1d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -263,11 +263,11 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
enable ? "set" : "unset", pi->port_id, i, -err);
else
- txq->dcb_prio = value;
+ txq->dcb_prio = enable ? value : 0;
}
}
-static int cxgb4_dcb_enabled(const struct net_device *dev)
+int cxgb4_dcb_enabled(const struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
@@ -554,10 +554,9 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
dev = q->adap->port[q->adap->chan_map[port]];
dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
- ? !!(pcmd->u.info.dcbxdis_pkd &
- FW_PORT_CMD_DCBXDIS_F)
- : !!(pcmd->u.info32.lstatus32_to_cbllen32 &
- FW_PORT_CMD_DCBXDIS32_F));
+ ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
+ : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
+ & FW_PORT_CMD_DCBXDIS32_F));
state_input = (dcbxdis
? CXGB4_DCB_INPUT_FW_DISABLED
: CXGB4_DCB_INPUT_FW_ENABLED);
@@ -924,12 +923,14 @@ static int setup_sge_queues(struct adapter *adap)
QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
return 0;
freeout:
+ dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
t4_free_sge_resources(adap);
return err;
}
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
int txq;
@@ -971,7 +972,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
- return fallback(dev, skb) % dev->real_num_tx_queues;
+ return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
}
static int closest_timer(const struct sge *s, int time)
@@ -3016,7 +3017,7 @@ static int cxgb_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
- pi, dev);
+ pi, dev, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
return 0;
@@ -3072,6 +3073,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev,
adapter->geneve_port = 0;
t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
+ break;
default:
return;
}
@@ -3157,6 +3159,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
+ break;
default:
return;
}
@@ -3217,7 +3220,7 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev,
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
- .ndo_start_xmit = t4_eth_xmit,
+ .ndo_start_xmit = t4_start_xmit,
.ndo_select_queue = cxgb_select_queue,
.ndo_get_stats64 = cxgb_get_stats,
.ndo_set_rx_mode = cxgb_set_rxmode,
@@ -3536,6 +3539,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
u32 v;
int ret;
+ /* Now that we've successfully configured and initialized the adapter
+ * can ask the Firmware what resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning information\n");
+ return ret;
+ }
+
/* get device capabilities */
memset(c, 0, sizeof(*c));
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
@@ -4170,32 +4183,6 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
- /*
- * Grab VPD parameters. This should be done after we establish a
- * connection to the firmware since some of the VPD parameters
- * (notably the Core Clock frequency) are retrieved via requests to
- * the firmware. On the other hand, we need these fairly early on
- * so we do this right after getting ahold of the firmware.
- */
- ret = t4_get_vpd_params(adap, &adap->params.vpd);
- if (ret < 0)
- goto bye;
-
- /*
- * Find out what ports are available to us. Note that we need to do
- * this before calling adap_init0_no_config() since it needs nports
- * and portvec ...
- */
- v =
- FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
- if (ret < 0)
- goto bye;
-
- adap->params.nports = hweight32(port_vec);
- adap->params.portvec = port_vec;
-
/* If the firmware is initialized already, emit a simply note to that
* effect. Otherwise, it's time to try initializing the adapter.
*/
@@ -4246,6 +4233,45 @@ static int adap_init0(struct adapter *adap)
}
}
+ /* Now that we've successfully configured and initialized the adapter
+ * (or found it already initialized), we can ask the Firmware what
+ * resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning information\n");
+ goto bye;
+ }
+
+ /* Grab VPD parameters. This should be done after we establish a
+ * connection to the firmware since some of the VPD parameters
+ * (notably the Core Clock frequency) are retrieved via requests to
+ * the firmware. On the other hand, we need these fairly early on
+ * so we do this right after getting ahold of the firmware.
+ *
+ * We need to do this after initializing the adapter because someone
+ * could have FLASHed a new VPD which won't be read by the firmware
+ * until we do the RESET ...
+ */
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
+ if (ret < 0)
+ goto bye;
+
+ /* Find out what ports are available to us. Note that we need to do
+ * this before calling adap_init0_no_config() since it needs nports
+ * and portvec ...
+ */
+ v =
+ FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
+ if (ret < 0)
+ goto bye;
+
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+
/* Give the SGE code a chance to pull in anything that it needs ...
* Note that this must be called after we retrieve our VPD parameters
* in order to know how to convert core ticks to seconds, etc.
@@ -4797,10 +4823,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
* of ports we found and the number of available CPUs. Most settings can be
* modified by the admin prior to actual use.
*/
-static void cfg_queues(struct adapter *adap)
+static int cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int i = 0, n10g = 0, qidx = 0;
+ int i, n10g = 0, qidx = 0;
+ int niqflint, neq, avail_eth_qsets;
+ int max_eth_qsets = 32;
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
@@ -4812,16 +4840,46 @@ static void cfg_queues(struct adapter *adap)
adap->params.crypto = 0;
}
- n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ /* Calculate the number of Ethernet Queue Sets available based on
+ * resources provisioned for us. We always have an Asynchronous
+ * Firmware Event Ingress Queue. If we're operating in MSI or Legacy
+ * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
+ * Ingress Queue. Meanwhile, we need two Egress Queues for each
+ * Queue Set: one for the Free List and one for the Ethernet TX Queue.
+ *
+ * Note that we should also take into account all of the various
+ * Offload Queues. But, in any situation where we're operating in
+ * a Resource Constrained Provisioning environment, doing any Offload
+ * at all is problematic ...
+ */
+ niqflint = adap->params.pfres.niqflint - 1;
+ if (!(adap->flags & USING_MSIX))
+ niqflint--;
+ neq = adap->params.pfres.neq / 2;
+ avail_eth_qsets = min(niqflint, neq);
+
+ if (avail_eth_qsets > max_eth_qsets)
+ avail_eth_qsets = max_eth_qsets;
+
+ if (avail_eth_qsets < adap->params.nports) {
+ dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
+ avail_eth_qsets, adap->params.nports);
+ return -ENOMEM;
+ }
+
+ /* Count the number of 10Gb/s or better ports */
+ for_each_port(adap, i)
+ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
* own TX Queue in order to prevent Head-Of-Line Blocking.
*/
- if (adap->params.nports * 8 > MAX_ETH_QSETS) {
- dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
- MAX_ETH_QSETS, adap->params.nports * 8);
- BUG_ON(1);
+ if (adap->params.nports * 8 > avail_eth_qsets) {
+ dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
+ avail_eth_qsets, adap->params.nports * 8);
+ return -ENOMEM;
}
for_each_port(adap, i) {
@@ -4837,7 +4895,7 @@ static void cfg_queues(struct adapter *adap)
* per 10G port.
*/
if (n10g)
- q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
+ q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues();
@@ -4888,6 +4946,8 @@ static void cfg_queues(struct adapter *adap)
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
init_rspq(adap, &s->intrq, 0, 1, 512, 64);
+
+ return 0;
}
/*
@@ -5084,17 +5144,9 @@ static void print_port_info(const struct net_device *dev)
{
char buf[80];
char *bufp = buf;
- const char *spd = "";
const struct port_info *pi = netdev_priv(dev);
const struct adapter *adap = pi->adapter;
- if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
- spd = " 2.5 GT/s";
- else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
- spd = " 5 GT/s";
- else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
- spd = " 8 GT/s";
-
if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
bufp += sprintf(bufp, "100M/");
if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
@@ -5598,6 +5650,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_CHELSIO_T4_DCB
netdev->dcbnl_ops = &cxgb4_dcb_ops;
cxgb4_dcb_state_init(netdev);
+ cxgb4_dcb_version_init(netdev);
#endif
cxgb4_set_ethtool_ops(netdev);
}
@@ -5628,10 +5681,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ if (!(adapter->flags & FW_OK))
+ goto fw_attach_fail;
+
/* Configure queues and allocate tables now, they can be needed as
* soon as the first register_netdev completes.
*/
- cfg_queues(adapter);
+ err = cfg_queues(adapter);
+ if (err)
+ goto out_free_dev;
adapter->smt = t4_init_smt();
if (!adapter->smt) {
@@ -5703,7 +5761,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
u32 hash_base, hash_reg;
- if (chip <= CHELSIO_T5) {
+ if (chip_ver <= CHELSIO_T5) {
hash_reg = LE_DB_TID_HASHBASE_A;
hash_base = t4_read_reg(adapter, hash_reg);
adapter->tids.hash_base = hash_base / 4;
@@ -5738,6 +5796,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_dev;
}
+fw_attach_fail:
/*
* The card is now ready to go. If any errors occur during device
* registration we do not fail the whole card but rather proceed only
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 3ddd2c4acf68..623f73dd7738 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -874,6 +874,9 @@ int cxgb4_init_tc_flower(struct adapter *adap)
{
int ret;
+ if (adap->tc_flower_initialized)
+ return -EEXIST;
+
adap->flower_ht_params = cxgb4_tc_flower_ht_params;
ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
if (ret)
@@ -882,13 +885,18 @@ int cxgb4_init_tc_flower(struct adapter *adap)
INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
+ adap->tc_flower_initialized = true;
return 0;
}
void cxgb4_cleanup_tc_flower(struct adapter *adap)
{
+ if (!adap->tc_flower_initialized)
+ return;
+
if (adap->flower_stats_timer.function)
del_timer_sync(&adap->flower_stats_timer);
cancel_work_sync(&adap->flower_stats_work);
rhashtable_destroy(&adap->flower_tbl);
+ adap->tc_flower_initialized = false;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 77c2c538b1fd..301c4df8a566 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -231,6 +231,7 @@ again:
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
+ /* fall through */
case L2T_STATE_VALID: /* fast-path, send the packet on */
return t4_ofld_send(adap, skb);
case L2T_STATE_RESOLVING:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 9148abb7994c..7fc656680299 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -539,6 +539,9 @@ void t4_cleanup_sched(struct adapter *adap)
struct port_info *pi = netdev2pinfo(adap->port[j]);
s = pi->sched_tbl;
+ if (!s)
+ continue;
+
for (i = 0; i < s->sched_size; i++) {
struct sched_class *e;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 395e2a0e8d7f..6807bc3a44fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1288,13 +1288,13 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
}
/**
- * t4_eth_xmit - add a packet to an Ethernet Tx queue
+ * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
* @dev: the egress net device
*
* Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
*/
-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 wr_mid, ctrl0, op;
u64 cntrl, *end, *sgl;
@@ -1547,6 +1547,374 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+/* Constants ... */
+enum {
+ /* Egress Queue sizes, producer and consumer indices are all in units
+ * of Egress Context Units bytes. Note that as far as the hardware is
+ * concerned, the free list is an Egress Queue (the host produces free
+ * buffers which the hardware consumes) and free list entries are
+ * 64-bit PCI DMA addresses.
+ */
+ EQ_UNIT = SGE_EQ_IDXSIZE,
+ FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+ TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+
+ T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
+};
+
+/**
+ * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @skb: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit completely as
+ * immediate data.
+ */
+static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
+{
+ /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
+ * which does not accommodate immediate data. We could dike out all
+ * of the support code for immediate data but that would tie our hands
+ * too much if we ever want to enhace the firmware. It would also
+ * create more differences between the PF and VF Drivers.
+ */
+ return false;
+}
+
+/**
+ * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for a TX Work Request for the
+ * given Ethernet packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+ if (t4vf_is_eth_imm(skb))
+ return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
+ sizeof(__be64));
+
+ /* Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ if (skb_shinfo(skb)->gso_size)
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ else
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+/**
+ * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
+ */
+static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ const struct skb_shared_info *ssi;
+ struct fw_eth_tx_pkt_vm_wr *wr;
+ int qidx, credits, max_pkt_len;
+ struct cpl_tx_pkt_core *cpl;
+ const struct port_info *pi;
+ unsigned int flits, ndesc;
+ struct sge_eth_txq *txq;
+ struct adapter *adapter;
+ u64 cntrl, *end;
+ u32 wr_mid;
+ const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci);
+
+ /* The chip minimum packet length is 10 octets but the firmware
+ * command that we are using requires that we copy the Ethernet header
+ * (including the VLAN tag) into the header so we reject anything
+ * smaller than that ...
+ */
+ if (unlikely(skb->len < fw_hdr_copy_len))
+ goto out_free;
+
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+ if (skb_vlan_tag_present(skb))
+ max_pkt_len += VLAN_HLEN;
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ goto out_free;
+
+ /* Figure out which TX Queue we're going to use. */
+ pi = netdev_priv(dev);
+ adapter = pi->adapter;
+ qidx = skb_get_queue_mapping(skb);
+ WARN_ON(qidx >= pi->nqsets);
+ txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+
+ /* Take this opportunity to reclaim any TX Descriptors whose DMA
+ * transfers have completed.
+ */
+ cxgb4_reclaim_completed_tx(adapter, &txq->q, true);
+
+ /* Calculate the number of flits and TX Descriptors we're going to
+ * need along with how many TX Descriptors will be left over after
+ * we inject our Work Request.
+ */
+ flits = t4vf_calc_tx_flits(skb);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&txq->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ /* Not enough room for this packet's Work Request. Stop the
+ * TX Queue and return a "busy" condition. The queue will get
+ * started later on when the firmware informs us that space
+ * has opened up.
+ */
+ eth_txq_stop(txq);
+ dev_err(adapter->pdev_dev,
+ "%s: TX ring %u full while queue awake!\n",
+ dev->name, qidx);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!t4vf_is_eth_imm(skb) &&
+ unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+ /* We need to map the skb into PCI DMA space (because it can't
+ * be in-lined directly into the Work Request) and the mapping
+ * operation failed. Record the error and drop the packet.
+ */
+ txq->mapping_err++;
+ goto out_free;
+ }
+
+ wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ /* After we're done injecting the Work Request for this
+ * packet, we'll be below our "stop threshold" so stop the TX
+ * Queue now and schedule a request for an SGE Egress Queue
+ * Update message. The queue will get started later on when
+ * the firmware processes this Work Request and sends us an
+ * Egress Queue Status Update message indicating that space
+ * has opened up.
+ */
+ eth_txq_stop(txq);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ /* Start filling in our Work Request. Note that we do _not_ handle
+ * the WR Header wrapping around the TX Descriptor Ring. If our
+ * maximum header size ever exceeds one TX Descriptor, we'll need to
+ * do something else here.
+ */
+ WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
+ wr = (void *)&txq->q.desc[txq->q.pidx];
+ wr->equiq_to_len16 = cpu_to_be32(wr_mid);
+ wr->r3[0] = cpu_to_be32(0);
+ wr->r3[1] = cpu_to_be32(0);
+ skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ end = (u64 *)wr + flits;
+
+ /* If this is a Large Send Offload packet we'll put in an LSO CPL
+ * message with an encapsulated TX Packet CPL message. Otherwise we
+ * just use a TX Packet CPL message.
+ */
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+ bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN_V(sizeof(*lso) +
+ sizeof(*cpl)));
+ /* Fill in the LSO CPL message. */
+ lso->lso_ctrl =
+ cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F |
+ LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = cpu_to_be16(0);
+ lso->mss = cpu_to_be16(ssi->gso_size);
+ lso->seqno_offset = cpu_to_be32(0);
+ if (is_t4(adapter->params.chip))
+ lso->len = cpu_to_be32(skb->len);
+ else
+ lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
+
+ /* Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(lso + 1);
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
+ txq->tso++;
+ txq->tx_cso += ssi->gso_segs;
+ } else {
+ int len;
+
+ len = (t4vf_is_eth_imm(skb)
+ ? skb->len + sizeof(*cpl)
+ : sizeof(*cpl));
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN_V(len));
+
+ /* Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(wr + 1);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ cntrl = hwcsum(adapter->params.chip, skb) |
+ TXPKT_IPCSUM_DIS_F;
+ txq->tx_cso++;
+ } else {
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ }
+ }
+
+ /* If there's a VLAN tag present, add that to the list of things to
+ * do in this Work Request.
+ */
+ if (skb_vlan_tag_present(skb)) {
+ txq->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+ }
+
+ /* Fill in the TX Packet CPL message header. */
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->port_id) |
+ TXPKT_PF_V(0));
+ cpl->pack = cpu_to_be16(0);
+ cpl->len = cpu_to_be16(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ /* Fill in the body of the TX Packet CPL message with either in-lined
+ * data or a Scatter/Gather List.
+ */
+ if (t4vf_is_eth_imm(skb)) {
+ /* In-line the packet's data and free the skb since we don't
+ * need it any longer.
+ */
+ cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
+ dev_consume_skb_any(skb);
+ } else {
+ /* Write the skb's Scatter/Gather list into the TX Packet CPL
+ * message and retain a pointer to the skb so we can free it
+ * later when its DMA completes. (We store the skb pointer
+ * in the Software Descriptor corresponding to the last TX
+ * Descriptor used by the Work Request.)
+ *
+ * The retained skb will be freed when the corresponding TX
+ * Descriptors are reclaimed after their DMAs complete.
+ * However, this could take quite a while since, in general,
+ * the hardware is set up to be lazy about sending DMA
+ * completion notifications to us and we mostly perform TX
+ * reclaims in the transmit routine.
+ *
+ * This is good for performamce but means that we rely on new
+ * TX packets arriving to run the destructors of completed
+ * packets, which open up space in their sockets' send queues.
+ * Sometimes we do not get such new packets causing TX to
+ * stall. A single UDP transmitter is a good example of this
+ * situation. We have a clean up timer that periodically
+ * reclaims completed packets but it doesn't run often enough
+ * (nor do we want it to) to prevent lengthy stalls. A
+ * solution to this problem is to run the destructor early,
+ * after the packet is queued but before it's DMAd. A con is
+ * that we lie to socket memory accounting, but the amount of
+ * extra memory is reasonable (limited by the number of TX
+ * descriptors), the packets do actually get freed quickly by
+ * new packets almost always, and for protocols like TCP that
+ * wait for acks to really free up the data the extra memory
+ * is even less. On the positive side we run the destructors
+ * on the sending CPU rather than on a potentially different
+ * completing CPU, usually a good thing.
+ *
+ * Run the destructor before telling the DMA engine about the
+ * packet to make sure it doesn't complete and get freed
+ * prematurely.
+ */
+ struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
+ struct sge_txq *tq = &txq->q;
+ int last_desc;
+
+ /* If the Work Request header was an exact multiple of our TX
+ * Descriptor length, then it's possible that the starting SGL
+ * pointer lines up exactly with the end of our TX Descriptor
+ * ring. If that's the case, wrap around to the beginning
+ * here ...
+ */
+ if (unlikely((void *)sgl == (void *)tq->stat)) {
+ sgl = (void *)tq->desc;
+ end = (void *)((void *)tq->desc +
+ ((void *)end - (void *)tq->stat));
+ }
+
+ cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
+ skb_orphan(skb);
+
+ last_desc = tq->pidx + ndesc - 1;
+ if (last_desc >= tq->size)
+ last_desc -= tq->size;
+ tq->sdesc[last_desc].skb = skb;
+ tq->sdesc[last_desc].sgl = sgl;
+ }
+
+ /* Advance our internal TX Queue state, tell the hardware about
+ * the new TX descriptors and return success.
+ */
+ txq_advance(&txq->q, ndesc);
+
+ cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
+ return NETDEV_TX_OK;
+
+out_free:
+ /* An error of some sort happened. Free the TX skb and tell the
+ * OS that we've "dealt" with the packet ...
+ */
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
+ return cxgb4_vf_eth_xmit(skb, dev);
+
+ return cxgb4_eth_xmit(skb, dev);
+}
+
/**
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
* @q: the SGE control Tx queue
@@ -3044,7 +3412,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
if (cong >= 0)
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
+ FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
+ : FW_IQ_IQTYPE_OFLD));
if (fl) {
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 974a868a4824..5fe5d16dee72 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2882,6 +2882,57 @@ int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
return 0;
}
+/**
+ * t4_get_pfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a physical
+ * function. The results are stored in @adapter->pfres.
+ */
+int t4_get_pfres(struct adapter *adapter)
+{
+ struct pf_resources *pfres = &adapter->params.pfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ int v;
+ u32 word;
+
+ /* Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PFVF_CMD_PFN_V(adapter->pf) |
+ FW_PFVF_CMD_VFN_V(0));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /* Extract PF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
+ pfres->niq = FW_PFVF_CMD_NIQ_G(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ pfres->neq = FW_PFVF_CMD_NEQ_G(word);
+ pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
+
+ word = be32_to_cpu(rpl.tc_to_nexactf);
+ pfres->tc = FW_PFVF_CMD_TC_G(word);
+ pfres->nvi = FW_PFVF_CMD_NVI_G(word);
+ pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
+
+ word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+ pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
+ pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
+ pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
+
+ return 0;
+}
+
/* serial flash and firmware constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -7453,10 +7504,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
switch (nmac) {
case 5:
memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
+ /* Fall through */
case 4:
memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
+ /* Fall through */
case 3:
memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
+ /* Fall through */
case 2:
memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
}
@@ -8702,7 +8756,7 @@ static int t4_get_flash_params(struct adapter *adap)
};
unsigned int part, manufacturer;
- unsigned int density, size;
+ unsigned int density, size = 0;
u32 flashid = 0;
int ret;
@@ -8772,11 +8826,6 @@ static int t4_get_flash_params(struct adapter *adap)
case 0x22: /* 256MB */
size = 1 << 28;
break;
-
- default:
- dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
- flashid, density);
- return -EINVAL;
}
break;
}
@@ -8792,10 +8841,6 @@ static int t4_get_flash_params(struct adapter *adap)
case 0x17: /* 64MB */
size = 1 << 26;
break;
- default:
- dev_err(adap->pdev_dev, "ISSI Flash Part has bad size, ID = %#x, Density code = %#x\n",
- flashid, density);
- return -EINVAL;
}
break;
}
@@ -8811,10 +8856,6 @@ static int t4_get_flash_params(struct adapter *adap)
case 0x18: /* 16MB */
size = 1 << 24;
break;
- default:
- dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
- flashid, density);
- return -EINVAL;
}
break;
}
@@ -8830,17 +8871,21 @@ static int t4_get_flash_params(struct adapter *adap)
case 0x18: /* 16MB */
size = 1 << 24;
break;
- default:
- dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
- flashid, density);
- return -EINVAL;
}
break;
}
- default:
- dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n",
- flashid);
- return -EINVAL;
+ }
+
+ /* If we didn't recognize the FLASH part, that's no real issue: the
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_
+ * use a FLASH part which is at least 4MB in size and has 64KB
+ * sectors. The unrecognized FLASH part is likely to be much larger
+ * than 4MB, but that's all we really need.
+ */
+ if (size == 0) {
+ dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+ flashid);
+ size = 1 << 22;
}
/* Store decoded Flash size and fall through into vetting code. */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 09e38f0733bd..b8f75a22fb6c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -755,7 +755,7 @@ struct cpl_abort_req_rss {
struct cpl_abort_req_rss6 {
WR_HDR;
union opcode_tid ot;
- __u32 srqidx_status;
+ __be32 srqidx_status;
};
#define ABORT_RSS_STATUS_S 0
@@ -785,7 +785,7 @@ struct cpl_abort_rpl_rss {
struct cpl_abort_rpl_rss6 {
union opcode_tid ot;
- __u32 srqidx_status;
+ __be32 srqidx_status;
};
struct cpl_abort_rpl {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index c7f8d0441278..60df66f4d21c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -188,6 +188,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50af), /* Custom T580-KR-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50b0), /* Custom T520-CR-LOM */
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 6b55aa2eb2a5..eb222d40ddbf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1502,6 +1502,25 @@
#define TP_MIB_DATA_A 0x7e54
#define TP_INT_CAUSE_A 0x7e74
+#define TP_FLM_FREE_PS_CNT_A 0x7e80
+#define TP_FLM_FREE_RX_CNT_A 0x7e84
+
+#define FREEPSTRUCTCOUNT_S 0
+#define FREEPSTRUCTCOUNT_M 0x1fffffU
+#define FREEPSTRUCTCOUNT_G(x) (((x) >> FREEPSTRUCTCOUNT_S) & FREEPSTRUCTCOUNT_M)
+
+#define FREERXPAGECOUNT_S 0
+#define FREERXPAGECOUNT_M 0x1fffffU
+#define FREERXPAGECOUNT_V(x) ((x) << FREERXPAGECOUNT_S)
+#define FREERXPAGECOUNT_G(x) (((x) >> FREERXPAGECOUNT_S) & FREERXPAGECOUNT_M)
+
+#define TP_FLM_FREE_TX_CNT_A 0x7e88
+
+#define FREETXPAGECOUNT_S 0
+#define FREETXPAGECOUNT_M 0x1fffffU
+#define FREETXPAGECOUNT_V(x) ((x) << FREETXPAGECOUNT_S)
+#define FREETXPAGECOUNT_G(x) (((x) >> FREETXPAGECOUNT_S) & FREETXPAGECOUNT_M)
+
#define FLMTXFLSTEMPTY_S 30
#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U)
@@ -1683,6 +1702,16 @@
#define ULP_TX_LA_RDPTR_0_A 0x8ec0
#define ULP_TX_LA_RDDATA_0_A 0x8ec4
#define ULP_TX_LA_WRPTR_0_A 0x8ec8
+#define ULP_TX_ASIC_DEBUG_CTRL_A 0x8f70
+
+#define ULP_TX_ASIC_DEBUG_0_A 0x8f74
+#define ULP_TX_ASIC_DEBUG_1_A 0x8f78
+#define ULP_TX_ASIC_DEBUG_2_A 0x8f7c
+#define ULP_TX_ASIC_DEBUG_3_A 0x8f80
+#define ULP_TX_ASIC_DEBUG_4_A 0x8f84
+
+/* registers for module PM_RX */
+#define PM_RX_BASE_ADDR 0x8fc0
#define PMRX_E_PCMD_PAR_ERROR_S 0
#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index f1967cf6d43c..5dc6c4154af8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1472,6 +1472,12 @@ enum fw_iq_type {
FW_IQ_TYPE_NO_FL_INT_CAP
};
+enum fw_iq_iqtype {
+ FW_IQ_IQTYPE_OTHER,
+ FW_IQ_IQTYPE_NIC,
+ FW_IQ_IQTYPE_OFLD,
+};
+
struct fw_iq_cmd {
__be32 op_to_vfn;
__be32 alloc_to_len16;
@@ -1586,6 +1592,12 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_IQFLINTISCSIC_S 26
#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
+#define FW_IQ_CMD_IQTYPE_S 24
+#define FW_IQ_CMD_IQTYPE_M 0x3
+#define FW_IQ_CMD_IQTYPE_V(x) ((x) << FW_IQ_CMD_IQTYPE_S)
+#define FW_IQ_CMD_IQTYPE_G(x) \
+ (((x) >> FW_IQ_CMD_IQTYPE_S) & FW_IQ_CMD_IQTYPE_M)
+
#define FW_IQ_CMD_FL0CNGCHMAP_S 20
#define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 4eb15ceddca3..a844296135b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x13
-#define T4FW_VERSION_MICRO 0x01
+#define T4FW_VERSION_MINOR 0x14
+#define T4FW_VERSION_MICRO 0x08
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x13
-#define T5FW_VERSION_MICRO 0x01
+#define T5FW_VERSION_MINOR 0x14
+#define T5FW_VERSION_MICRO 0x08
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x13
-#define T6FW_VERSION_MICRO 0x01
+#define T6FW_VERSION_MINOR 0x14
+#define T6FW_VERSION_MICRO 0x08
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 0ed161642371..74849be5f004 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -412,12 +412,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
ppmax * (sizeof(struct cxgbi_ppod_data)) +
ppod_bmap_size * sizeof(unsigned long);
- ppm = vmalloc(alloc_sz);
+ ppm = vzalloc(alloc_sz);
if (!ppm)
goto release_ppm_pool;
- memset(ppm, 0, alloc_sz);
-
ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]);
if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) {
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 5ab912937aff..ec0b545197e2 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
config CS89x0
tristate "CS89x0 support"
depends on ISA || EISA || ARM
+ depends on !PPC32
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the file
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 90c645b8538e..60641e202534 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2047,28 +2047,42 @@ static int enic_stop(struct net_device *netdev)
return 0;
}
+static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool running = netif_running(netdev);
+ int err = 0;
+
+ ASSERT_RTNL();
+ if (running) {
+ err = enic_stop(netdev);
+ if (err)
+ return err;
+ }
+
+ netdev->mtu = new_mtu;
+
+ if (running) {
+ err = enic_open(netdev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int enic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct enic *enic = netdev_priv(netdev);
- int running = netif_running(netdev);
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
- if (running)
- enic_stop(netdev);
-
- netdev->mtu = new_mtu;
-
if (netdev->mtu > enic->port_mtu)
netdev_warn(netdev,
- "interface MTU (%d) set higher than port MTU (%d)\n",
- netdev->mtu, enic->port_mtu);
+ "interface MTU (%d) set higher than port MTU (%d)\n",
+ netdev->mtu, enic->port_mtu);
- if (running)
- enic_open(netdev);
-
- return 0;
+ return _enic_change_mtu(netdev, new_mtu);
}
static void enic_change_mtu_work(struct work_struct *work)
@@ -2076,47 +2090,9 @@ static void enic_change_mtu_work(struct work_struct *work)
struct enic *enic = container_of(work, struct enic, change_mtu_work);
struct net_device *netdev = enic->netdev;
int new_mtu = vnic_dev_mtu(enic->vdev);
- int err;
- unsigned int i;
-
- new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
rtnl_lock();
-
- /* Stop RQ */
- del_timer_sync(&enic->notify_timer);
-
- for (i = 0; i < enic->rq_count; i++)
- napi_disable(&enic->napi[i]);
-
- vnic_intr_mask(&enic->intr[0]);
- enic_synchronize_irqs(enic);
- err = vnic_rq_disable(&enic->rq[0]);
- if (err) {
- rtnl_unlock();
- netdev_err(netdev, "Unable to disable RQ.\n");
- return;
- }
- vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
- vnic_cq_clean(&enic->cq[0]);
- vnic_intr_clean(&enic->intr[0]);
-
- /* Fill RQ with new_mtu-sized buffers */
- netdev->mtu = new_mtu;
- vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
- /* Need at least one buffer on ring to get going */
- if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
- rtnl_unlock();
- netdev_err(netdev, "Unable to alloc receive buffers.\n");
- return;
- }
-
- /* Start RQ */
- vnic_rq_enable(&enic->rq[0]);
- napi_enable(&enic->napi[0]);
- vnic_intr_unmask(&enic->intr[0]);
- enic_notify_timer_start(enic);
-
+ (void)_enic_change_mtu(netdev, new_mtu);
rtnl_unlock();
netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
@@ -2916,7 +2892,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
enic->port_mtu = enic->config.mtu;
- (void)enic_change_mtu(netdev, enic->port_mtu);
err = enic_set_mac_addr(netdev, enic->mac_addr);
if (err) {
@@ -3006,6 +2981,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* MTU range: 68 - 9000 */
netdev->min_mtu = ENIC_MIN_MTU;
netdev->max_mtu = ENIC_MAX_MTU;
+ netdev->mtu = enic->port_mtu;
err = register_netdev(netdev);
if (err) {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index e9db811df59c..901e44b0b795 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -1071,7 +1071,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
unsigned int num_bars)
{
if (!vdev) {
- vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
+ vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
if (!vdev)
return NULL;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index f8aa326d1d58..a3e7b003ada1 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -35,7 +35,7 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
- rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
+ rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL);
if (!rq->bufs[i])
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 090cc65658a3..eb75891974df 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -35,7 +35,7 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
- wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
+ wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_KERNEL);
if (!wq->bufs[i])
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 6d7404f66f84..1c9ad3630c77 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -46,6 +46,11 @@
#define DRV_NAME "gmac-gemini"
#define DRV_VERSION "1.0"
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
#define HSIZE_8 0x00
#define HSIZE_16 0x01
#define HSIZE_32 0x02
@@ -146,6 +151,7 @@ struct gemini_ethernet {
void __iomem *base;
struct gemini_ethernet_port *port0;
struct gemini_ethernet_port *port1;
+ bool initialized;
spinlock_t irq_lock; /* Locks IRQ-related registers */
unsigned int freeq_order;
@@ -300,23 +306,26 @@ static void gmac_speed_set(struct net_device *netdev)
status.bits.speed = GMAC_SPEED_1000;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
- netdev_info(netdev, "connect to RGMII @ 1Gbit\n");
+ netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n",
+ phydev_name(phydev));
break;
case 100:
status.bits.speed = GMAC_SPEED_100;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
- netdev_info(netdev, "connect to RGMII @ 100 Mbit\n");
+ netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n",
+ phydev_name(phydev));
break;
case 10:
status.bits.speed = GMAC_SPEED_10;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
- netdev_info(netdev, "connect to RGMII @ 10 Mbit\n");
+ netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n",
+ phydev_name(phydev));
break;
default:
- netdev_warn(netdev, "Not supported PHY speed (%d)\n",
- phydev->speed);
+ netdev_warn(netdev, "Unsupported PHY speed (%d) on %s\n",
+ phydev->speed, phydev_name(phydev));
}
if (phydev->duplex == DUPLEX_FULL) {
@@ -363,12 +372,6 @@ static int gmac_setup_phy(struct net_device *netdev)
return -ENODEV;
netdev->phydev = phy;
- netdev_info(netdev, "connected to PHY \"%s\"\n",
- phydev_name(phy));
- phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
- (unsigned long)phy->phy_id,
- phy_modes(phy->interface));
-
phy->supported &= PHY_GBIT_FEATURES;
phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
phy->advertising = phy->supported;
@@ -376,19 +379,19 @@ static int gmac_setup_phy(struct net_device *netdev)
/* set PHY interface type */
switch (phy->interface) {
case PHY_INTERFACE_MODE_MII:
- netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ netdev_dbg(netdev,
+ "MII: set GMAC0 to GMII mode, GMAC1 disabled\n");
status.bits.mii_rmii = GMAC_PHY_MII;
- netdev_info(netdev, "connect to MII\n");
break;
case PHY_INTERFACE_MODE_GMII:
- netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ netdev_dbg(netdev,
+ "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n");
status.bits.mii_rmii = GMAC_PHY_GMII;
- netdev_info(netdev, "connect to GMII\n");
break;
case PHY_INTERFACE_MODE_RGMII:
- dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n");
+ netdev_dbg(netdev,
+ "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n");
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
- netdev_info(netdev, "connect to RGMII\n");
break;
default:
netdev_err(netdev, "Unsupported MII interface\n");
@@ -398,29 +401,63 @@ static int gmac_setup_phy(struct net_device *netdev)
}
writel(status.bits32, port->gmac_base + GMAC_STATUS);
+ if (netif_msg_link(port))
+ phy_attached_info(phy);
+
return 0;
}
-static int gmac_pick_rx_max_len(int max_l3_len)
-{
- /* index = CONFIG_MAXLEN_XXX values */
- static const int max_len[8] = {
- 1536, 1518, 1522, 1542,
- 9212, 10236, 1518, 1518
- };
- int i, n = 5;
+/* The maximum frame length is not logically enumerated in the
+ * hardware, so we do a table lookup to find the applicable max
+ * frame length.
+ */
+struct gmac_max_framelen {
+ unsigned int max_l3_len;
+ u8 val;
+};
+
+static const struct gmac_max_framelen gmac_maxlens[] = {
+ {
+ .max_l3_len = 1518,
+ .val = CONFIG0_MAXLEN_1518,
+ },
+ {
+ .max_l3_len = 1522,
+ .val = CONFIG0_MAXLEN_1522,
+ },
+ {
+ .max_l3_len = 1536,
+ .val = CONFIG0_MAXLEN_1536,
+ },
+ {
+ .max_l3_len = 1542,
+ .val = CONFIG0_MAXLEN_1542,
+ },
+ {
+ .max_l3_len = 9212,
+ .val = CONFIG0_MAXLEN_9k,
+ },
+ {
+ .max_l3_len = 10236,
+ .val = CONFIG0_MAXLEN_10k,
+ },
+};
- max_l3_len += ETH_HLEN + VLAN_HLEN;
+static int gmac_pick_rx_max_len(unsigned int max_l3_len)
+{
+ const struct gmac_max_framelen *maxlen;
+ int maxtot;
+ int i;
- if (max_l3_len > max_len[n])
- return -1;
+ maxtot = max_l3_len + ETH_HLEN + VLAN_HLEN;
- for (i = 0; i < 5; i++) {
- if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
- n = i;
+ for (i = 0; i < ARRAY_SIZE(gmac_maxlens); i++) {
+ maxlen = &gmac_maxlens[i];
+ if (maxtot <= maxlen->max_l3_len)
+ return maxlen->val;
}
- return n;
+ return -1;
}
static int gmac_init(struct net_device *netdev)
@@ -1276,8 +1313,8 @@ static void gmac_enable_irq(struct net_device *netdev, int enable)
unsigned long flags;
u32 val, mask;
- netdev_info(netdev, "%s device %d %s\n", __func__,
- netdev->dev_id, enable ? "enable" : "disable");
+ netdev_dbg(netdev, "%s device %d %s\n", __func__,
+ netdev->dev_id, enable ? "enable" : "disable");
spin_lock_irqsave(&geth->irq_lock, flags);
mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2);
@@ -1753,7 +1790,10 @@ static int gmac_open(struct net_device *netdev)
phy_start(netdev->phydev);
err = geth_resize_freeq(port);
- if (err) {
+ /* It's fine if it's just busy, the other port has set up
+ * the freeq in that case.
+ */
+ if (err && (err != -EBUSY)) {
netdev_err(netdev, "could not resize freeq\n");
goto err_stop_phy;
}
@@ -1782,7 +1822,7 @@ static int gmac_open(struct net_device *netdev)
HRTIMER_MODE_REL);
port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
- netdev_info(netdev, "opened\n");
+ netdev_dbg(netdev, "opened\n");
return 0;
@@ -2264,6 +2304,14 @@ static void gemini_port_remove(struct gemini_ethernet_port *port)
static void gemini_ethernet_init(struct gemini_ethernet *geth)
{
+ /* Only do this once both ports are online */
+ if (geth->initialized)
+ return;
+ if (geth->port0 && geth->port1)
+ geth->initialized = true;
+ else
+ return;
+
writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
@@ -2354,6 +2402,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->id = id;
port->geth = geth;
port->dev = dev;
+ port->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
/* DMA memory */
dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2410,6 +2459,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
geth->port0 = port;
else
geth->port1 = port;
+
+ /* This will just be done once both ports are up and reset */
+ gemini_ethernet_init(geth);
+
platform_set_drvdata(pdev, port);
/* Set up and register the netdev */
@@ -2423,6 +2476,11 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->hw_features = GMAC_OFFLOAD_FEATURES;
netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+ /* We can handle jumbo frames up to 10236 bytes so, let's accept
+ * payloads of 10236 bytes minus VLAN and ethernet header
+ */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
port->freeq_refill = 0;
netif_napi_add(netdev, &port->napi, gmac_napi_poll,
@@ -2435,7 +2493,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->mac_addr[0], port->mac_addr[1],
port->mac_addr[2]);
dev_info(dev, "using a random ethernet address\n");
- random_ether_addr(netdev->dev_addr);
+ eth_random_addr(netdev->dev_addr);
}
gmac_write_mac_address(netdev);
@@ -2527,7 +2585,6 @@ static int gemini_ethernet_probe(struct platform_device *pdev)
spin_lock_init(&geth->irq_lock);
spin_lock_init(&geth->freeq_lock);
- gemini_ethernet_init(geth);
/* The children will use this */
platform_set_drvdata(pdev, geth);
@@ -2540,8 +2597,8 @@ static int gemini_ethernet_remove(struct platform_device *pdev)
{
struct gemini_ethernet *geth = platform_get_drvdata(pdev);
- gemini_ethernet_init(geth);
geth_cleanup_freeq(geth);
+ geth->initialized = false;
return 0;
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index a31b4df3e7ff..66535d1653f6 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3204,6 +3204,8 @@ srom_map_media(struct net_device *dev)
case SROM_10BASETF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
+ /* fall through */
+
case SROM_10BASET:
if (lp->params.fdx && !lp->fdx) return -1;
if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
@@ -3224,6 +3226,8 @@ srom_map_media(struct net_device *dev)
case SROM_100BASETF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
+ /* fall through */
+
case SROM_100BASET:
if (lp->params.fdx && !lp->fdx) return -1;
lp->media = _100Mb;
@@ -3236,6 +3240,8 @@ srom_map_media(struct net_device *dev)
case SROM_100BASEFF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
+ /* fall through */
+
case SROM_100BASEF:
if (lp->params.fdx && !lp->fdx) return -1;
lp->media = _100Mb;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 00d02a0967d0..3e3e08698876 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -923,6 +923,7 @@ static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
data->phy_id = 1;
else
return -ENODEV;
+ /* Fall through */
case SIOCGMIIREG: /* Read MII PHY register. */
if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index b4853ec9de8d..8cf794edd3c3 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -1,7 +1,7 @@
config BE2NET
tristate "ServerEngines' 10Gbps NIC - BladeEngine"
depends on PCI
- ---help---
+ help
This driver implements the NIC functionality for ServerEngines'
10Gbps network adapter - BladeEngine.
@@ -10,6 +10,42 @@ config BE2NET_HWMON
depends on BE2NET && HWMON
depends on !(BE2NET=y && HWMON=m)
default y
- ---help---
+ help
Say Y here if you want to expose thermal sensor data on
be2net network adapter.
+
+config BE2NET_BE2
+ bool "Support for BE2 chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on BE2
+ chipsets. (e.g. OneConnect OCe10xxx)
+
+config BE2NET_BE3
+ bool "Support for BE3 chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on BE3
+ chipsets. (e.g. OneConnect OCe11xxx)
+
+config BE2NET_LANCER
+ bool "Support for Lancer chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on Lancer
+ chipsets. (e.g LightPulse LPe12xxx)
+
+config BE2NET_SKYHAWK
+ bool "Support for Skyhawk chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on Skyhawk
+ chipsets. (e.g. OneConnect OCe14xxx)
+
+comment "WARNING: be2net is useless without any enabled chip"
+ depends on BE2NET_BE2=n && BE2NET_BE3=n && BE2NET_LANCER=n && \
+ BE2NET_SKYHAWK=n && BE2NET
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 382891f81e09..58bcee8f0a58 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "11.4.0.0"
+#define DRV_VER "12.0.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -185,34 +185,13 @@ static inline void queue_tail_inc(struct be_queue_info *q)
struct be_eq_obj {
struct be_queue_info q;
- char desc[32];
-
- /* Adaptive interrupt coalescing (AIC) info */
- bool enable_aic;
- u32 min_eqd; /* in usecs */
- u32 max_eqd; /* in usecs */
- u32 eqd; /* configured val when aic is off */
- u32 cur_eqd; /* in usecs */
+ struct be_adapter *adapter;
+ struct napi_struct napi;
u8 idx; /* array index */
u8 msix_idx;
u16 spurious_intr;
- struct napi_struct napi;
- struct be_adapter *adapter;
cpumask_var_t affinity_mask;
-
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#define BE_EQ_IDLE 0
-#define BE_EQ_NAPI 1 /* napi owns this EQ */
-#define BE_EQ_POLL 2 /* poll owns this EQ */
-#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL)
-#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */
-#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */
-#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
-#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD)
- unsigned int state;
- spinlock_t lock; /* lock to serialize napi and busy-poll */
-#endif /* CONFIG_NET_RX_BUSY_POLL */
} ____cacheline_aligned_in_smp;
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
@@ -238,7 +217,6 @@ struct be_tx_stats {
u64 tx_vxlan_offload_pkts;
u64 tx_reqs;
u64 tx_compl;
- ulong tx_jiffies;
u32 tx_stops;
u32 tx_drv_drops; /* pkts dropped by driver */
/* the error counters are described in be_ethtool.c */
@@ -261,9 +239,9 @@ struct be_tx_compl_info {
struct be_tx_obj {
u32 db_offset;
+ struct be_tx_compl_info txcp;
struct be_queue_info q;
struct be_queue_info cq;
- struct be_tx_compl_info txcp;
/* Remember the skbs that were transmitted */
struct sk_buff *sent_skb_list[TX_Q_LEN];
struct be_tx_stats stats;
@@ -458,10 +436,10 @@ struct be_port_resources {
#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
struct rss_info {
- u64 rss_flags;
u8 rsstable[RSS_INDIR_TABLE_LEN];
u8 rss_queue[RSS_INDIR_TABLE_LEN];
u8 rss_hkey[RSS_HASH_KEY_LEN];
+ u64 rss_flags;
};
#define BE_INVALID_DIE_TEMP 0xFF
@@ -544,11 +522,13 @@ enum {
};
struct be_error_recovery {
- /* Lancer error recovery variables */
- u8 recovery_retries;
+ union {
+ u8 recovery_retries; /* used for Lancer */
+ u8 recovery_state; /* used for BEx and Skyhawk */
+ };
/* BEx/Skyhawk error recovery variables */
- u8 recovery_state;
+ bool recovery_supported;
u16 ue_to_reset_time; /* Time after UE, to soft reset
* the chip - PF0 only
*/
@@ -556,7 +536,6 @@ struct be_error_recovery {
* of SLIPORT_SEMAPHORE reg
*/
u16 last_err_code;
- bool recovery_supported;
unsigned long probe_time;
unsigned long last_recovery_time;
@@ -773,17 +752,33 @@ static inline u16 be_max_any_irqs(struct be_adapter *adapter)
/* Is BE in QNQ multi-channel mode */
#define be_is_qnq_mode(adapter) (adapter->function_mode & QNQ_MODE)
+#ifdef CONFIG_BE2NET_LANCER
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
adapter->pdev->device == OC_DEVICE_ID4)
+#else
+#define lancer_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_LANCER */
+#ifdef CONFIG_BE2NET_SKYHAWK
#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5 || \
adapter->pdev->device == OC_DEVICE_ID6)
+#else
+#define skyhawk_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_SKYHAWK */
+#ifdef CONFIG_BE2NET_BE3
#define BE3_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID2 || \
adapter->pdev->device == OC_DEVICE_ID2)
+#else
+#define BE3_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_BE3 */
+#ifdef CONFIG_BE2NET_BE2
#define BE2_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID1 || \
adapter->pdev->device == OC_DEVICE_ID1)
+#else
+#define BE2_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_BE2 */
#define BEx_chip(adapter) (BE3_chip(adapter) || BE2_chip(adapter))
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 7f7e206f95f8..3f6749fc889f 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -575,6 +575,7 @@ static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds)
break;
}
}
+ /* fall through */
case PHY_TYPE_SFP_PLUS_10GB:
case PHY_TYPE_XFP_10GB:
case PHY_TYPE_SFP_1GB:
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 8f755009ff38..74d122616e76 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -47,14 +47,22 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
static struct workqueue_struct *be_err_recovery_workq;
static const struct pci_device_id be_dev_ids[] = {
+#ifdef CONFIG_BE2NET_BE2
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
- { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
+#endif /* CONFIG_BE2NET_BE2 */
+#ifdef CONFIG_BE2NET_BE3
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+#endif /* CONFIG_BE2NET_BE3 */
+#ifdef CONFIG_BE2NET_LANCER
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+#endif /* CONFIG_BE2NET_LANCER */
+#ifdef CONFIG_BE2NET_SKYHAWK
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
+#endif /* CONFIG_BE2NET_SKYHAWK */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -1412,6 +1420,83 @@ drop:
return NETDEV_TX_OK;
}
+static void be_tx_timeout(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ struct be_tx_obj *txo;
+ struct sk_buff *skb;
+ struct tcphdr *tcphdr;
+ struct udphdr *udphdr;
+ u32 *entry;
+ int status;
+ int i, j;
+
+ for_all_tx_queues(adapter, txo, i) {
+ dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n",
+ i, txo->q.head, txo->q.tail,
+ atomic_read(&txo->q.used), txo->q.id);
+
+ entry = txo->q.dma_mem.va;
+ for (j = 0; j < TX_Q_LEN * 4; j += 4) {
+ if (entry[j] != 0 || entry[j + 1] != 0 ||
+ entry[j + 2] != 0 || entry[j + 3] != 0) {
+ dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
+ j, entry[j], entry[j + 1],
+ entry[j + 2], entry[j + 3]);
+ }
+ }
+
+ entry = txo->cq.dma_mem.va;
+ dev_info(dev, "TXCQ Dump: %d H: %d T: %d used: %d\n",
+ i, txo->cq.head, txo->cq.tail,
+ atomic_read(&txo->cq.used));
+ for (j = 0; j < TX_CQ_LEN * 4; j += 4) {
+ if (entry[j] != 0 || entry[j + 1] != 0 ||
+ entry[j + 2] != 0 || entry[j + 3] != 0) {
+ dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
+ j, entry[j], entry[j + 1],
+ entry[j + 2], entry[j + 3]);
+ }
+ }
+
+ for (j = 0; j < TX_Q_LEN; j++) {
+ if (txo->sent_skb_list[j]) {
+ skb = txo->sent_skb_list[j];
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
+ tcphdr = tcp_hdr(skb);
+ dev_info(dev, "TCP source port %d\n",
+ ntohs(tcphdr->source));
+ dev_info(dev, "TCP dest port %d\n",
+ ntohs(tcphdr->dest));
+ dev_info(dev, "TCP sequence num %d\n",
+ ntohs(tcphdr->seq));
+ dev_info(dev, "TCP ack_seq %d\n",
+ ntohs(tcphdr->ack_seq));
+ } else if (ip_hdr(skb)->protocol ==
+ IPPROTO_UDP) {
+ udphdr = udp_hdr(skb);
+ dev_info(dev, "UDP source port %d\n",
+ ntohs(udphdr->source));
+ dev_info(dev, "UDP dest port %d\n",
+ ntohs(udphdr->dest));
+ }
+ dev_info(dev, "skb[%d] %p len %d proto 0x%x\n",
+ j, skb, skb->len, skb->protocol);
+ }
+ }
+ }
+
+ if (lancer_chip(adapter)) {
+ dev_info(dev, "Initiating reset due to tx timeout\n");
+ dev_info(dev, "Resetting adapter\n");
+ status = lancer_physdev_ctrl(adapter,
+ PHYSDEV_CONTROL_FW_RESET_MASK);
+ if (status)
+ dev_err(dev, "Reset failed .. Reboot server\n");
+ }
+}
+
static inline bool be_in_all_promisc(struct be_adapter *adapter)
{
return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
@@ -3274,7 +3359,7 @@ void be_detect_error(struct be_adapter *adapter)
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
- dev_info(dev, "Firmware update in progress\n");
+ dev_info(dev, "Reset is in progress\n");
} else {
dev_err(dev, "Error detected in the card\n");
dev_err(dev, "ERR: sliport status 0x%x\n",
@@ -3403,9 +3488,11 @@ static int be_msix_register(struct be_adapter *adapter)
int status, i, vec;
for_all_evt_queues(adapter, eqo, i) {
- sprintf(eqo->desc, "%s-q%d", netdev->name, i);
+ char irq_name[IFNAMSIZ+4];
+
+ snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i);
vec = be_msix_vec_get(adapter, eqo);
- status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
+ status = request_irq(vec, be_msix, 0, irq_name, eqo);
if (status)
goto err_msix;
@@ -5216,6 +5303,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_get_vf_config = be_get_vf_config,
.ndo_set_vf_link_state = be_set_vf_link_state,
.ndo_set_vf_spoofchk = be_set_vf_spoofchk,
+ .ndo_tx_timeout = be_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = be_netpoll,
#endif
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index aecc76504b69..a1197d3adbe0 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -29,6 +29,7 @@
#include <linux/io.h>
#include <linux/mii.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 5f4e1ffa7b95..65a22cd9aef2 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
/* Default alignment for start of data in an Rx FD */
#define DPAA_FD_DATA_ALIGNMENT 16
+/* The DPAA requires 256 bytes reserved and mapped for the SGT */
+#define DPAA_SGT_SIZE 256
+
/* Values for the L3R field of the FM Parse Results
*/
/* L3 Type field: First IP Present IPv4 */
@@ -1168,7 +1171,7 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
buf_prefix_content.pass_prs_result = true;
buf_prefix_content.pass_hash_result = true;
- buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.pass_time_stamp = true;
buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
params.specific_params.non_rx_params.err_fqid = errq->fqid;
@@ -1210,7 +1213,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
buf_prefix_content.pass_prs_result = true;
buf_prefix_content.pass_hash_result = true;
- buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.pass_time_stamp = true;
buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
rx_p = &params.specific_params.rx_params;
@@ -1607,18 +1610,32 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
{
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
struct device *dev = priv->net_dev->dev.parent;
+ struct skb_shared_hwtstamps shhwtstamps;
dma_addr_t addr = qm_fd_addr(fd);
const struct qm_sg_entry *sgt;
struct sk_buff **skbh, *skb;
int nr_frags, i;
+ u64 ns;
skbh = (struct sk_buff **)phys_to_virt(addr);
skb = *skbh;
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
+ &ns)) {
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else {
+ dev_warn(dev, "fman_port_get_tstamp failed!\n");
+ }
+ }
+
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
nr_frags = skb_shinfo(skb)->nr_frags;
- dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
- sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_unmap_single(dev, addr,
+ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
dma_dir);
/* The sgt buffer has been allocated with netdev_alloc_frag(),
@@ -1903,8 +1920,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
void *sgt_buf;
/* get a page frag to store the SGTable */
- sz = SKB_DATA_ALIGN(priv->tx_headroom +
- sizeof(struct qm_sg_entry) * (1 + nr_frags));
+ sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
sgt_buf = netdev_alloc_frag(sz);
if (unlikely(!sgt_buf)) {
netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
@@ -1972,9 +1988,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
skbh = (struct sk_buff **)buffer_start;
*skbh = skb;
- addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
- sizeof(struct qm_sg_entry) * (1 + nr_frags),
- dma_dir);
+ addr = dma_map_single(dev, buffer_start,
+ priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
if (unlikely(dma_mapping_error(dev, addr))) {
dev_err(dev, "DMA mapping failed");
err = -EINVAL;
@@ -2086,6 +2101,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0))
goto skb_to_fd_failed;
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+
if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
return NETDEV_TX_OK;
@@ -2227,6 +2247,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
+ struct skb_shared_hwtstamps *shhwtstamps;
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
const struct qm_fd *fd = &dq->fd;
@@ -2240,6 +2261,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct sk_buff *skb;
int *count_ptr;
void *vaddr;
+ u64 ns;
fd_status = be32_to_cpu(fd->status);
fd_format = qm_fd_get_format(fd);
@@ -2304,6 +2326,16 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
if (!skb)
return qman_cb_dqrr_consume;
+ if (priv->rx_tstamp) {
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ else
+ dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n");
+ }
+
skb->protocol = eth_type_trans(skb, net_dev);
if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
@@ -2523,11 +2555,58 @@ static int dpaa_eth_stop(struct net_device *net_dev)
return err;
}
+static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ /* Couldn't disable rx/tx timestamping separately.
+ * Do nothing here.
+ */
+ priv->tx_tstamp = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
+ priv->tx_tstamp = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ /* Couldn't disable rx/tx timestamping separately.
+ * Do nothing here.
+ */
+ priv->rx_tstamp = false;
+ } else {
+ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
+ priv->rx_tstamp = true;
+ /* TS is set for all frame types, not only those requested */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{
- if (!net_dev->phydev)
- return -EINVAL;
- return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+ int ret = -EINVAL;
+
+ if (cmd == SIOCGMIIREG) {
+ if (net_dev->phydev)
+ return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+ }
+
+ if (cmd == SIOCSHWTSTAMP)
+ return dpaa_ts_ioctl(net_dev, rq, cmd);
+
+ return ret;
}
static const struct net_device_ops dpaa_ops = {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index bd9422082f83..af320f83c742 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -182,6 +182,9 @@ struct dpaa_priv {
struct dpaa_buffer_layout buf_layout[2];
u16 rx_headroom;
+
+ bool tx_tstamp; /* Tx timestamping enabled */
+ bool rx_tstamp; /* Rx timestamping enabled */
};
/* from dpaa_ethtool.c */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 2f933b6b2f4e..3184c8f7cdd0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -32,6 +32,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/string.h>
+#include <linux/of_platform.h>
+#include <linux/net_tstamp.h>
+#include <linux/fsl/ptp_qoriq.h>
#include "dpaa_eth.h"
#include "mac.h"
@@ -515,6 +518,41 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static int dpaa_get_ts_info(struct net_device *net_dev,
+ struct ethtool_ts_info *info)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct device_node *mac_node = dev->of_node;
+ struct device_node *fman_node = NULL, *ptp_node = NULL;
+ struct platform_device *ptp_dev = NULL;
+ struct qoriq_ptp *ptp = NULL;
+
+ info->phc_index = -1;
+
+ fman_node = of_get_parent(mac_node);
+ if (fman_node)
+ ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+
+ if (ptp_node)
+ ptp_dev = of_find_device_by_node(ptp_node);
+
+ if (ptp_dev)
+ ptp = platform_get_drvdata(ptp_dev);
+
+ if (ptp)
+ info->phc_index = ptp->phc_index;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.get_drvinfo = dpaa_get_drvinfo,
.get_msglevel = dpaa_get_msglevel,
@@ -530,4 +568,5 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.set_link_ksettings = dpaa_set_link_ksettings,
.get_rxnfc = dpaa_get_rxnfc,
.set_rxnfc = dpaa_set_rxnfc,
+ .get_ts_info = dpaa_get_ts_info,
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c729665107f5..2708297e7795 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -48,6 +48,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/platform_device.h>
#include <linux/mdio.h>
#include <linux/phy.h>
@@ -2949,13 +2950,12 @@ fec_enet_close(struct net_device *ndev)
*/
#define FEC_HASH_BITS 6 /* #bits in hash */
-#define CRC32_POLY 0xEDB88320
static void set_multicast_list(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct netdev_hw_addr *ha;
- unsigned int i, bit, data, crc, tmp;
+ unsigned int crc, tmp;
unsigned char hash;
unsigned int hash_high = 0, hash_low = 0;
@@ -2983,15 +2983,7 @@ static void set_multicast_list(struct net_device *ndev)
/* Add the addresses in hash register */
netdev_for_each_mc_addr(ha, ndev) {
/* calculate crc32 value of mac address */
- crc = 0xffffffff;
-
- for (i = 0; i < ndev->addr_len; i++) {
- data = ha->addr[i];
- for (bit = 0; bit < 8; bit++, data >>= 1) {
- crc = (crc >> 1) ^
- (((crc ^ data) & 1) ? CRC32_POLY : 0);
- }
- }
+ crc = ether_crc_le(ndev->addr_len, ha->addr);
/* only upper 6 bits (FEC_HASH_BITS) are used
* which point to specific bit in the hash registers
@@ -3136,6 +3128,7 @@ static int fec_enet_init(struct net_device *ndev)
unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
sizeof(struct bufdesc);
unsigned dsize_log2 = __fls(dsize);
+ int ret;
WARN_ON(dsize != (1 << dsize_log2));
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
@@ -3146,6 +3139,13 @@ static int fec_enet_init(struct net_device *ndev)
fep->tx_align = 0x3;
#endif
+ /* Check mask of the streaming and coherent API */
+ ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
+ if (ret < 0) {
+ dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
+ return ret;
+ }
+
fec_enet_alloc_queue(ndev);
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 36c2d7d6ee1b..7e892b1cbd3d 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -99,7 +99,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
{
unsigned long flags;
u32 val, tempval;
- int inc;
struct timespec64 ts;
u64 ns;
val = 0;
@@ -114,7 +113,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
fep->pps_channel = DEFAULT_PPS_CHANNEL;
fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
- inc = fep->ptp_inc;
spin_lock_irqsave(&fep->tmreg_lock, flags);
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 9530405030a7..c415ac67cb7b 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2801,7 +2801,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
of_node_put(muram_node);
of_node_put(fm_node);
- err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
+ err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED,
+ "fman", fman);
if (err < 0) {
dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
__func__, irq, err);
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index bfa02e0014ae..935c317fa696 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -41,6 +41,7 @@
/* Frame queue Context Override */
#define FM_FD_CMD_FCO 0x80000000
#define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */
+#define FM_FD_CMD_UPD 0x20000000 /* Update Prepended Data */
#define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */
/* TX-Port: Unsupported Format */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 57b1e2b47c0a..1ca543ac8f2c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -123,11 +123,13 @@
#define DTSEC_ECNTRL_R100M 0x00000008
#define DTSEC_ECNTRL_QSGMIIM 0x00000001
+#define TCTRL_TTSE 0x00000040
#define TCTRL_GTS 0x00000020
#define RCTRL_PAL_MASK 0x001f0000
#define RCTRL_PAL_SHIFT 16
#define RCTRL_GHTX 0x00000400
+#define RCTRL_RTSE 0x00000040
#define RCTRL_GRS 0x00000020
#define RCTRL_MPROM 0x00000008
#define RCTRL_RSF 0x00000004
@@ -1136,6 +1138,31 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
return 0;
}
+int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 rctrl, tctrl;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ rctrl = ioread32be(&regs->rctrl);
+ tctrl = ioread32be(&regs->tctrl);
+
+ if (enable) {
+ rctrl |= RCTRL_RTSE;
+ tctrl |= TCTRL_TTSE;
+ } else {
+ rctrl &= ~RCTRL_RTSE;
+ tctrl &= ~TCTRL_TTSE;
+ }
+
+ iowrite32be(rctrl, &regs->rctrl);
+ iowrite32be(tctrl, &regs->tctrl);
+
+ return 0;
+}
+
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 1a689adf5a22..5149d96ec2c1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -56,5 +56,6 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
+int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 446a97b792e3..bc6eb30aa20f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -964,6 +964,11 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable)
return 0;
}
+int memac_set_tstamp(struct fman_mac *memac, bool enable)
+{
+ return 0; /* Always enabled. */
+}
+
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index b5a50338ed9a..b2c671ec0ce7 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -58,5 +58,6 @@ int memac_set_exception(struct fman_mac *memac,
int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
int memac_set_allmulti(struct fman_mac *memac, bool enable);
+int memac_set_tstamp(struct fman_mac *memac, bool enable);
#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ce6e24c74978..ee82ee1384eb 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
#define HWP_HXS_PHE_REPORT 0x00000800
#define HWP_HXS_PCAC_PSTAT 0x00000100
#define HWP_HXS_PCAC_PSTOP 0x00000001
+#define HWP_HXS_TCP_OFFSET 0xA
+#define HWP_HXS_UDP_OFFSET 0xB
+#define HWP_HXS_SH_PAD_REM 0x80000000
+
struct fman_port_hwp_regs {
struct {
u32 ssa; /* Soft Sequence Attachment */
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
iowrite32be(0xffffffff, &regs->pmda[i].lcv);
}
+ /* Short packet padding removal from checksum calculation */
+ iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
+ iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
+
start_port_hwp(port);
}
@@ -1731,6 +1739,18 @@ int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
}
EXPORT_SYMBOL(fman_port_get_hash_result_offset);
+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp)
+{
+ if (port->buffer_offsets.time_stamp_offset == ILLEGAL_BASE)
+ return -EINVAL;
+
+ *tstamp = be64_to_cpu(*(__be64 *)(data +
+ port->buffer_offsets.time_stamp_offset));
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_get_tstamp);
+
static int fman_port_probe(struct platform_device *of_dev)
{
struct fman_port *port;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index e86ca6a34e4e..9dbb69f40121 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -153,6 +153,8 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port);
int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset);
+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
+
struct fman_port *fman_port_bind(struct device *dev);
#endif /* __FMAN_PORT_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 284735d4ebe9..40705938eecc 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -44,6 +44,7 @@
#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
/* Command and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_EN_TIMESTAMP 0x00100000
#define CMD_CFG_NO_LEN_CHK 0x00020000
#define CMD_CFG_PAUSE_IGNORE 0x00000100
#define CMF_CFG_CRC_FWD 0x00000040
@@ -588,6 +589,26 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
return 0;
}
+int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+
+ if (enable)
+ tmp |= CMD_CFG_EN_TIMESTAMP;
+ else
+ tmp &= ~CMD_CFG_EN_TIMESTAMP;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index cbbd3b422a98..3bfd1062b386 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -52,5 +52,6 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
+int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 7b5b95f52c09..a847b9c3b31a 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -471,6 +471,7 @@ static void setup_dtsec(struct mac_device *mac_dev)
mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
mac_dev->set_exception = dtsec_set_exception;
mac_dev->set_allmulti = dtsec_set_allmulti;
+ mac_dev->set_tstamp = dtsec_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
@@ -490,6 +491,7 @@ static void setup_tgec(struct mac_device *mac_dev)
mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
mac_dev->set_exception = tgec_set_exception;
mac_dev->set_allmulti = tgec_set_allmulti;
+ mac_dev->set_tstamp = tgec_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
@@ -509,6 +511,7 @@ static void setup_memac(struct mac_device *mac_dev)
mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
mac_dev->set_exception = memac_set_exception;
mac_dev->set_allmulti = memac_set_allmulti;
+ mac_dev->set_tstamp = memac_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index b520cec120ee..824a81a9f350 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -68,6 +68,7 @@ struct mac_device {
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
+ int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
int (*set_multi)(struct net_device *net_dev,
struct mac_device *mac_dev);
int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h
index 7832db71dcb9..1dbee5d898b3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fec.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fec.h
@@ -2,9 +2,6 @@
#ifndef FS_ENET_FEC_H
#define FS_ENET_FEC_H
-/* CRC polynomium used by the FEC for the multicast group filtering */
-#define FEC_CRC_POLY 0x04C11DB7
-
#define FEC_MAX_MULTICAST_ADDRS 64
/* Interrupt events/masks.
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 1fc27c97e3b2..99fe2c210d0f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -18,6 +18,7 @@
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
+#include <linux/crc32.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -176,21 +177,10 @@ static void set_multicast_start(struct net_device *dev)
static void set_multicast_one(struct net_device *dev, const u8 *mac)
{
struct fs_enet_private *fep = netdev_priv(dev);
- int temp, hash_index, i, j;
+ int temp, hash_index;
u32 crc, csrVal;
- u8 byte, msb;
-
- crc = 0xffffffff;
- for (i = 0; i < 6; i++) {
- byte = mac[i];
- for (j = 0; j < 8; j++) {
- msb = crc >> 31;
- crc <<= 1;
- if (msb ^ (byte & 0x1))
- crc ^= FEC_CRC_POLY;
- byte >>= 1;
- }
- }
+
+ crc = ether_crc(6, mac);
temp = (crc & 0x3f) >> 1;
hash_index = ((temp & 0x01) << 4) |
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8cb98cae0a6f..395a5266ea30 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -740,7 +740,6 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
u64 class)
{
- unsigned int last_rule_idx = priv->cur_filer_idx;
unsigned int cmp_rqfpr;
unsigned int *local_rqfpr;
unsigned int *local_rqfcr;
@@ -819,7 +818,6 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
}
priv->cur_filer_idx = l - 1;
- last_rule_idx = l;
/* hash rules */
ethflow_to_filer_rules(priv, ethflow);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 42fca3208c0b..22a817da861e 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3096,6 +3096,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
ugeth_vdbg("%s: IN", __func__);
+ netdev_sent_queue(dev, skb->len);
spin_lock_irqsave(&ugeth->lock, flags);
dev->stats.tx_bytes += skb->len;
@@ -3240,6 +3241,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
{
/* Start from the next BD that should be filled */
struct ucc_geth_private *ugeth = netdev_priv(dev);
+ unsigned int bytes_sent = 0;
+ int howmany = 0;
u8 __iomem *bd; /* BD pointer */
u32 bd_status;
@@ -3257,7 +3260,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
if (!skb)
break;
-
+ howmany++;
+ bytes_sent += skb->len;
dev->stats.tx_packets++;
dev_consume_skb_any(skb);
@@ -3279,6 +3283,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
bd_status = in_be32((u32 __iomem *)bd);
}
ugeth->confBd[txQ] = bd;
+ netdev_completed_queue(dev, howmany, bytes_sent);
return 0;
}
@@ -3479,6 +3484,7 @@ static int ucc_geth_open(struct net_device *dev)
phy_start(ugeth->phydev);
napi_enable(&ugeth->napi);
+ netdev_reset_queue(dev);
netif_start_queue(dev);
device_set_wakeup_capable(&dev->dev,
@@ -3509,6 +3515,7 @@ static int ucc_geth_close(struct net_device *dev)
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 8bcf470ff5f3..25152715396b 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_HISILICON
bool "Hisilicon devices"
default y
- depends on (OF || ACPI) && HAS_DMA
+ depends on OF || ACPI
depends on ARM || ARM64 || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -85,10 +85,12 @@ config HNS3
drivers(like ODP)to register with HNAE devices and their associated
operations.
+if HNS3
+
config HNS3_HCLGE
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
+ default m
depends on PCI_MSI
- depends on HNS3
---help---
This selects the HNS3_HCLGE network acceleration engine & its hardware
compatibility layer. The engine would be used in Hisilicon hip08 family of
@@ -97,16 +99,15 @@ config HNS3_HCLGE
config HNS3_DCB
bool "Hisilicon HNS3 Data Center Bridge Support"
default n
- depends on HNS3 && HNS3_HCLGE && DCB
+ depends on HNS3_HCLGE && DCB
---help---
Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver.
If unsure, say N.
config HNS3_HCLGEVF
- tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
- depends on PCI_MSI
- depends on HNS3
+ tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+ depends on PCI_MSI
depends on HNS3_HCLGE
---help---
This selects the HNS3 VF drivers network acceleration engine & its hardware
@@ -115,11 +116,13 @@ config HNS3_HCLGEVF
config HNS3_ENET
tristate "Hisilicon HNS3 Ethernet Device Support"
+ default m
depends on 64BIT && PCI
- depends on HNS3
---help---
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
devices and their associated operations.
+endif #HNS3
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 340e28211135..14374a856d30 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -904,7 +904,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
hip04_config_fifo(priv);
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
hip04_update_mac_address(ndev);
ret = hip04_alloc_ring(ndev, d);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 25a6c8722eca..c5727003af8c 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1006,12 +1006,11 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
for (i = 0; i < QUEUE_NUMS; i++) {
size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
- virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
- GFP_KERNEL);
+ virt_addr = dma_zalloc_coherent(dev, size, &phys_addr,
+ GFP_KERNEL);
if (virt_addr == NULL)
goto error_free_pool;
- memset(virt_addr, 0, size);
priv->pool[i].size = size;
priv->pool[i].desc = virt_addr;
priv->pool[i].phys_addr = phys_addr;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index bd68379d2bea..e6aad30e7e69 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -70,8 +70,8 @@ static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
return container_of(q, struct ring_pair_cb, q);
}
-struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
- u32 port_id)
+static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
+ u32 port_id)
{
int vfnum_per_port;
int qnum_per_vf;
@@ -329,7 +329,7 @@ static int hns_ae_start(struct hnae_handle *handle)
return 0;
}
-void hns_ae_stop(struct hnae_handle *handle)
+static void hns_ae_stop(struct hnae_handle *handle)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
@@ -357,7 +357,7 @@ static void hns_ae_reset(struct hnae_handle *handle)
}
}
-void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
+static void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
{
u32 flag;
@@ -577,8 +577,8 @@ static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
*rx_usecs_high = HNS_RCB_RX_USECS_HIGH;
}
-void hns_ae_update_stats(struct hnae_handle *handle,
- struct net_device_stats *net_stats)
+static void hns_ae_update_stats(struct hnae_handle *handle,
+ struct net_device_stats *net_stats)
{
int port;
int idx;
@@ -660,7 +660,7 @@ void hns_ae_update_stats(struct hnae_handle *handle,
net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
}
-void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
+static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
{
int idx;
struct hns_mac_cb *mac_cb;
@@ -692,8 +692,8 @@ void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
}
-void hns_ae_get_strings(struct hnae_handle *handle,
- u32 stringset, u8 *data)
+static void hns_ae_get_strings(struct hnae_handle *handle,
+ u32 stringset, u8 *data)
{
int port;
int idx;
@@ -725,7 +725,7 @@ void hns_ae_get_strings(struct hnae_handle *handle,
hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
}
-int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
+static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
{
u32 sset_count = 0;
struct hns_mac_cb *mac_cb;
@@ -771,7 +771,7 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
return ret;
}
-void hns_ae_update_led_status(struct hnae_handle *handle)
+static void hns_ae_update_led_status(struct hnae_handle *handle)
{
struct hns_mac_cb *mac_cb;
@@ -783,8 +783,8 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
hns_set_led_opt(mac_cb);
}
-int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
- enum hnae_led_state status)
+static int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
+ enum hnae_led_state status)
{
struct hns_mac_cb *mac_cb;
@@ -795,7 +795,7 @@ int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
return hns_cpld_led_set_id(mac_cb, status);
}
-void hns_ae_get_regs(struct hnae_handle *handle, void *data)
+static void hns_ae_get_regs(struct hnae_handle *handle, void *data)
{
u32 *p = data;
int i;
@@ -820,7 +820,7 @@ void hns_ae_get_regs(struct hnae_handle *handle, void *data)
hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
}
-int hns_ae_get_regs_len(struct hnae_handle *handle)
+static int hns_ae_get_regs_len(struct hnae_handle *handle)
{
u32 total_num;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 74bd260ca02a..5488c6e89f21 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -339,7 +339,7 @@ static void hns_gmac_init(void *mac_drv)
GMAC_TX_WATER_LINE_SHIFT, 8);
}
-void hns_gmac_update_stats(void *mac_drv)
+static void hns_gmac_update_stats(void *mac_drv)
{
struct mac_hw_stats *hw_stats = NULL;
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 9dcc5765f11f..1c2326bd76e2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -458,11 +458,6 @@ int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size)
{
struct mac_driver *drv = hns_mac_get_drv(mac_cb);
u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ?
- MAC_MAX_MTU : MAC_MAX_MTU_V2;
-
- if (mac_cb->mac_type == HNAE_PORT_DEBUG)
- max_frm = MAC_MAX_MTU_DBG;
if (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size)
return -EINVAL;
@@ -708,7 +703,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
{
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
struct platform_device *pdev;
struct mii_bus *mii_bus;
int rc;
@@ -722,13 +717,15 @@ static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
mac_cb->fw_port, "mdio-node", 0, &args);
if (rc)
return rc;
+ if (!is_acpi_device_node(args.fwnode))
+ return -EINVAL;
addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port);
if (addr < 0)
return addr;
/* dev address in adev */
- pdev = hns_dsaf_find_platform_device(acpi_fwnode_handle(args.adev));
+ pdev = hns_dsaf_find_platform_device(args.fwnode);
if (!pdev) {
dev_err(mac_cb->dev, "mac%d mdio pdev is NULL\n",
mac_cb->mac_id);
@@ -931,8 +928,9 @@ static int hns_mac_get_mode(phy_interface_t phy_if)
}
}
-u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
- struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
+static u8 __iomem *
+hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
+ struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
{
u8 __iomem *base = dsaf_dev->io_base;
int mac_id = mac_cb->mac_id;
@@ -950,7 +948,8 @@ u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
* @mac_cb: mac control block
* return 0 - success , negative --fail
*/
-int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
+static int
+hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
{
int ret;
u32 mac_mode_idx;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 0ce07f6eb1e6..ca50c2553a9c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -28,7 +28,7 @@
#include "hns_dsaf_rcb.h"
#include "hns_dsaf_misc.h"
-const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
+const static char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
[DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
[DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
[DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
@@ -42,7 +42,7 @@ static const struct acpi_device_id hns_dsaf_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match);
-int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
+static int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
{
int ret, i;
u32 desc_num;
@@ -959,7 +959,8 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
spin_unlock_bh(&dsaf_dev->tcam_lock);
}
-void hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
+static void
+hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
{
addr[0] = mac_key->high.bits.mac_0;
addr[1] = mac_key->high.bits.mac_1;
@@ -1682,7 +1683,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_tbl_tcam_mcast_cfg mac_data;
struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
- struct dsaf_drv_tbl_tcam_key tmp_mac_key;
struct dsaf_tbl_tcam_data tcam_data;
u8 mc_addr[ETH_ALEN];
int mskid;
@@ -1739,10 +1739,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
/* if exist, add in */
hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data,
&mac_data);
-
- tmp_mac_key.high.val =
- le32_to_cpu(tcam_data.tbl_tcam_data_high);
- tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
}
/* config hardware entry */
@@ -1852,7 +1848,7 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_tbl_tcam_data tcam_data;
int mskid;
const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0};
- struct dsaf_drv_tbl_tcam_key mask_key, tmp_mac_key;
+ struct dsaf_drv_tbl_tcam_key mask_key;
struct dsaf_tbl_tcam_data *pmask_key = NULL;
u8 mc_addr[ETH_ALEN];
@@ -1915,9 +1911,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* read entry */
hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
- tmp_mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
- tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
/*del the port*/
if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
mskid = mac_entry->port_num;
@@ -2084,8 +2077,9 @@ static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
* @dsaf_id: dsa fabric id
* @xge_ge_work_mode
*/
-void hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
- enum dsaf_port_rate_mode rate_mode)
+static void
+hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
+ enum dsaf_port_rate_mode rate_mode)
{
u32 port_work_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index acf29633ec79..16294cd3c954 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -340,7 +340,8 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
* bit18-19 for com/dfx
* @enable: false - request reset , true - drop reset
*/
-void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
+static void
+hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
{
u32 reg_addr;
@@ -362,7 +363,7 @@ void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
* bit18-19 for com/dfx
* @enable: false - request reset , true - drop reset
*/
-void
+static void
hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
{
hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
@@ -370,7 +371,7 @@ hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
msk, dereset);
}
-void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
+static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
{
if (!dereset) {
dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
@@ -384,7 +385,7 @@ void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
}
}
-void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
{
hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
HNS_ROCE_RESET_FUNC, 0, dereset);
@@ -568,7 +569,7 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
return phy_if;
}
-int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+static int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
u32 val = 0;
int ret;
@@ -586,7 +587,7 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
return 0;
}
-int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
union acpi_object *obj;
union acpi_object obj_args, argv4;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 93e71e27401b..d160d8c9e45b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -73,7 +73,7 @@ hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
* comm_index: common index
* retuen 0 - success , negative --fail
*/
-int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
+static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
{
struct ppe_common_cb *ppe_common;
int ppe_num;
@@ -104,7 +104,8 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
return 0;
}
-void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
+static void
+hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
{
dsaf_dev->ppe_common[comm_index] = NULL;
}
@@ -203,9 +204,9 @@ static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0);
- mdelay(100);
+ msleep(100);
dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1);
- mdelay(100);
+ msleep(100);
if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
switch (dsaf_mode) {
@@ -337,7 +338,7 @@ static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
}
}
-void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
+static void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
{
u32 i;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index e2e28532e4dc..9d76e2e54f9d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -705,7 +705,7 @@ void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
}
}
-int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
+static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
{
switch (dsaf_dev->dsaf_mode) {
case DSAF_MODE_ENABLE_FIX:
@@ -741,7 +741,7 @@ int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
}
}
-void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{
struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 51e7e9f5af49..ba4316910dea 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -215,10 +215,10 @@ static void hns_xgmac_init(void *mac_drv)
u32 port = drv->mac_id;
dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0);
- mdelay(100);
+ msleep(100);
dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1);
- mdelay(100);
+ msleep(100);
hns_xgmac_lf_rf_control_init(drv);
hns_xgmac_exc_irq_en(drv, 0);
@@ -311,7 +311,7 @@ static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval)
dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval);
}
-void hns_xgmac_update_stats(void *mac_drv)
+static void hns_xgmac_update_stats(void *mac_drv)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index ef9ef703d13a..9f2b552aee33 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1300,7 +1300,7 @@ static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-void hns_nic_update_stats(struct net_device *netdev)
+static void hns_nic_update_stats(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -1582,7 +1582,7 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
/* use only for netconsole to poll with the device without interrupt */
#ifdef CONFIG_NET_POLL_CONTROLLER
-void hns_nic_poll_controller(struct net_device *ndev)
+static void hns_nic_poll_controller(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
unsigned long flags;
@@ -1935,7 +1935,7 @@ static int hns_nic_uc_unsync(struct net_device *netdev,
*
* return void
*/
-void hns_set_multicast_list(struct net_device *ndev)
+static void hns_set_multicast_list(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
@@ -1957,7 +1957,7 @@ void hns_set_multicast_list(struct net_device *ndev)
}
}
-void hns_nic_set_rx_mode(struct net_device *ndev)
+static void hns_nic_set_rx_mode(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
@@ -2022,7 +2022,8 @@ static void hns_nic_get_stats64(struct net_device *ndev,
static u16
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -2032,7 +2033,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
is_multicast_ether_addr(eth_hdr->h_dest))
return 0;
else
- return fallback(ndev, skb);
+ return fallback(ndev, skb, NULL);
}
static const struct net_device_ops hns_nic_netdev_ops = {
@@ -2377,7 +2378,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
}
priv->fwnode = &ae_node->fwnode;
} else if (is_acpi_node(dev->fwnode)) {
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
if (acpi_dev_found(hns_enet_acpi_match[0].id))
priv->enet_ver = AE_VERSION_1;
@@ -2393,7 +2394,11 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
dev_err(dev, "not find ae-handle\n");
goto out_read_prop_fail;
}
- priv->fwnode = acpi_fwnode_handle(args.adev);
+ if (!is_acpi_device_node(args.fwnode)) {
+ ret = -EINVAL;
+ goto out_read_prop_fail;
+ }
+ priv->fwnode = args.fwnode;
} else {
dev_err(dev, "cannot read cfg data from OF or acpi\n");
return -ENXIO;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 2e14a3ae1d8b..08f3c4743f74 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -307,6 +307,7 @@ static int __lb_setup(struct net_device *ndev,
break;
case MAC_LOOP_PHY_NONE:
ret = hns_nic_config_phy_loopback(phy_dev, 0x0);
+ /* fall through */
case MAC_LOOP_NONE:
if (!ret && h->dev->ops->set_loopback) {
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
@@ -658,8 +659,8 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
* @dev: net device
* @param: ethtool parameter
*/
-void hns_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *param)
+static void hns_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *param)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
@@ -808,7 +809,8 @@ static int hns_set_coalesce(struct net_device *net_dev,
* @dev: net device
* @ch: channel info.
*/
-void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
+static void
+hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
@@ -825,8 +827,8 @@ void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
* @stats: statistics info.
* @data: statistics data.
*/
-void hns_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+static void hns_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
{
u64 *p = data;
struct hns_nic_priv *priv = netdev_priv(netdev);
@@ -883,7 +885,7 @@ void hns_get_ethtool_stats(struct net_device *netdev,
* @stats: string set ID.
* @data: objects data.
*/
-void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -973,7 +975,7 @@ void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
*
* Return string set count.
*/
-int hns_get_sset_count(struct net_device *netdev, int stringset)
+static int hns_get_sset_count(struct net_device *netdev, int stringset)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -1007,7 +1009,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
*
* Return 0 on success, negative on failure.
*/
-int hns_phy_led_set(struct net_device *netdev, int value)
+static int hns_phy_led_set(struct net_device *netdev, int value)
{
int retval;
struct phy_device *phy_dev = netdev->phydev;
@@ -1029,7 +1031,8 @@ int hns_phy_led_set(struct net_device *netdev, int value)
*
* Return 0 on success, negative on failure.
*/
-int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+static int
+hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -1103,8 +1106,8 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
* @cmd: ethtool cmd
* @date: register data
*/
-void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
- void *data)
+static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
+ void *data)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 9d79dad2c6aa..fff5be8078ac 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -1,14 +1,7 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/spinlock.h>
#include "hnae3.h"
@@ -41,13 +34,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client,
{
switch (client->type) {
case HNAE3_CLIENT_KNIC:
- hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
+ hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_UNIC:
- hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
+ hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_ROCE:
- hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
+ hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
break;
default:
break;
@@ -61,16 +54,16 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
switch (client->type) {
case HNAE3_CLIENT_KNIC:
- inited = hnae_get_bit(ae_dev->flag,
+ inited = hnae3_get_bit(ae_dev->flag,
HNAE3_KNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_UNIC:
- inited = hnae_get_bit(ae_dev->flag,
+ inited = hnae3_get_bit(ae_dev->flag,
HNAE3_UNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_ROCE:
- inited = hnae_get_bit(ae_dev->flag,
- HNAE3_ROCE_CLIENT_INITED_B);
+ inited = hnae3_get_bit(ae_dev->flag,
+ HNAE3_ROCE_CLIENT_INITED_B);
break;
default:
break;
@@ -86,7 +79,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
/* check if this client matches the type of ae_dev */
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
- hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
+ hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
return 0;
}
@@ -95,7 +88,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
ret = ae_dev->ops->init_client_instance(client, ae_dev);
if (ret) {
dev_err(&ae_dev->pdev->dev,
- "fail to instantiate client\n");
+ "fail to instantiate client, ret = %d\n", ret);
return ret;
}
@@ -135,7 +128,8 @@ int hnae3_register_client(struct hnae3_client *client)
ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
- "match and instantiation failed for port\n");
+ "match and instantiation failed for port, ret = %d\n",
+ ret);
}
exit:
@@ -185,11 +179,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
ae_dev->ops = ae_algo->ops;
ret = ae_algo->ops->init_ae_dev(ae_dev);
if (ret) {
- dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n");
+ dev_err(&ae_dev->pdev->dev,
+ "init ae_dev error, ret = %d\n", ret);
continue;
}
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
/* check the client list for the match with this ae_dev type and
* initialize the figure out client instance
@@ -198,7 +193,8 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
- "match and instantiation failed\n");
+ "match and instantiation failed, ret = %d\n",
+ ret);
}
}
@@ -218,7 +214,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
- if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
@@ -232,7 +228,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
}
list_del(&ae_algo->node);
@@ -271,11 +267,12 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
/* ae_dev init should set flag */
ret = ae_dev->ops->init_ae_dev(ae_dev);
if (ret) {
- dev_err(&ae_dev->pdev->dev, "init ae_dev error\n");
+ dev_err(&ae_dev->pdev->dev,
+ "init ae_dev error, ret = %d\n", ret);
goto out_err;
}
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
break;
}
@@ -286,7 +283,8 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
- "match and instantiation failed\n");
+ "match and instantiation failed, ret = %d\n",
+ ret);
}
out_err:
@@ -306,7 +304,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
- if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
@@ -317,7 +315,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
}
list_del(&ae_dev->node);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 8acb1d116a02..67befff0bfc5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HNAE3_H
#define __HNAE3_H
@@ -62,10 +56,10 @@
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \
- hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+ hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
#define hnae3_dev_dcb_supported(hdev) \
- hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+ hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
@@ -167,7 +161,6 @@ struct hnae3_client_ops {
#define HNAE3_CLIENT_NAME_LENGTH 16
struct hnae3_client {
char name[HNAE3_CLIENT_NAME_LENGTH];
- u16 version;
unsigned long state;
enum hnae3_client_type type;
const struct hnae3_client_ops *ops;
@@ -436,7 +429,6 @@ struct hnae3_dcb_ops {
struct hnae3_ae_algo {
const struct hnae3_ae_ops *ops;
struct list_head node;
- char name[HNAE3_CLASS_NAME_SIZE];
const struct pci_device_id *pdev_id_table;
};
@@ -509,17 +501,17 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
};
-#define hnae_set_field(origin, mask, shift, val) \
+#define hnae3_set_field(origin, mask, shift, val) \
do { \
(origin) &= (~(mask)); \
(origin) |= ((val) << (shift)) & (mask); \
} while (0)
-#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
-#define hnae_set_bit(origin, shift, val) \
- hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
-#define hnae_get_bit(origin, shift) \
- hnae_get_field((origin), (0x1 << (shift)), (shift))
+#define hnae3_set_bit(origin, shift, val) \
+ hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
+#define hnae3_get_bit(origin, shift) \
+ hnae3_get_field((origin), (0x1 << (shift)), (shift))
void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index eb82700da7d0..ea5f8a84070d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include "hnae3.h"
#include "hns3_enet.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 25a73bb2e642..3554dca7a680 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
@@ -56,15 +50,16 @@ static const struct pci_device_id hns3_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
-static irqreturn_t hns3_irq_handle(int irq, void *dev)
+static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
- struct hns3_enet_tqp_vector *tqp_vector = dev;
+ struct hns3_enet_tqp_vector *tqp_vector = vector;
napi_schedule(&tqp_vector->napi);
@@ -239,7 +234,28 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
- int ret;
+ int i, ret;
+
+ if (kinfo->num_tc <= 1) {
+ netdev_reset_tc(netdev);
+ } else {
+ ret = netdev_set_num_tc(netdev, kinfo->num_tc);
+ if (ret) {
+ netdev_err(netdev,
+ "netdev_set_num_tc fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (!kinfo->tc_info[i].enable)
+ continue;
+
+ netdev_set_tc_queue(netdev,
+ kinfo->tc_info[i].tc,
+ kinfo->tc_info[i].tqp_count,
+ kinfo->tc_info[i].tqp_offset);
+ }
+ }
ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) {
@@ -312,7 +328,9 @@ out_start_err:
static int hns3_nic_net_open(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo;
+ int i, ret;
netif_carrier_off(netdev);
@@ -327,6 +345,12 @@ static int hns3_nic_net_open(struct net_device *netdev)
return ret;
}
+ kinfo = &h->kinfo;
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ netdev_set_prio_tc_map(netdev, i,
+ kinfo->prio_tc[i]);
+ }
+
priv->ae_handle->last_reset_time = jiffies;
return 0;
}
@@ -493,8 +517,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* find the txbd field values */
*paylen = skb->len - hdr_len;
- hnae_set_bit(*type_cs_vlan_tso,
- HNS3_TXD_TSO_B, 1);
+ hnae3_set_bit(*type_cs_vlan_tso,
+ HNS3_TXD_TSO_B, 1);
/* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size;
@@ -586,21 +610,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute L2 header size for normal packet, defined in 2 Bytes */
l2_len = l3.hdr - skb->data;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, l2_len >> 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, l2_len >> 1);
/* tunnel packet*/
if (skb->encapsulation) {
/* compute OL2 header size, defined in 2 Bytes */
ol2_len = l2_len;
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, ol2_len >> 1);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, ol2_len >> 1);
/* compute OL3 header size, defined in 4 Bytes */
ol3_len = l4.hdr - l3.hdr;
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
- HNS3_TXD_L3LEN_S, ol3_len >> 2);
+ hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
+ HNS3_TXD_L3LEN_S, ol3_len >> 2);
/* MAC in UDP, MAC in GRE (0x6558)*/
if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
@@ -609,16 +633,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute OL4 header size, defined in 4 Bytes. */
ol4_len = l2_hdr - l4.hdr;
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, ol4_len >> 2);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
+ ol4_len >> 2);
/* switch IP header ptr from outer to inner header */
l3.hdr = skb_inner_network_header(skb);
/* compute inner l2 header size, defined in 2 Bytes. */
l2_len = l3.hdr - l2_hdr;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, l2_len >> 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, l2_len >> 1);
} else {
/* skb packet types not supported by hardware,
* txbd len fild doesn't be filled.
@@ -634,22 +659,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute inner(/normal) L3 header size, defined in 4 Bytes */
l3_len = l4.hdr - l3.hdr;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
- HNS3_TXD_L3LEN_S, l3_len >> 2);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
+ HNS3_TXD_L3LEN_S, l3_len >> 2);
/* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) {
case IPPROTO_TCP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, l4.tcp->doff);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S, l4.tcp->doff);
break;
case IPPROTO_SCTP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (sizeof(struct sctphdr) >> 2));
break;
case IPPROTO_UDP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (sizeof(struct udphdr) >> 2));
break;
default:
/* skb packet types not supported by hardware,
@@ -703,32 +730,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
/* define outer network header type.*/
if (skb->protocol == htons(ETH_P_IP)) {
if (skb_is_gso(skb))
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_CSUM);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_CSUM);
else
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_NO_CSUM);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_NO_CSUM);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
- HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
+ hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
}
/* define tunnel type(OL4).*/
switch (l4_proto) {
case IPPROTO_UDP:
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_M,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_MAC_IN_UDP);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_MAC_IN_UDP);
break;
case IPPROTO_GRE:
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_M,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_NVGRE);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_NVGRE);
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -749,43 +778,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
}
if (l3.v4->version == 4) {
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
- HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
+ HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
if (skb_is_gso(skb))
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
-
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
} else if (l3.v6->version == 6) {
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
- HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
+ HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
}
switch (l4_proto) {
case IPPROTO_TCP:
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_TCP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_TCP);
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
break;
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_UDP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_UDP);
break;
case IPPROTO_SCTP:
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_SCTP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_SCTP);
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -807,11 +836,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
{
/* Config bd buffer end */
- hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
- HNS3_TXD_BDTYPE_S, 0);
- hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
- hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
- hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
+ hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
+ HNS3_TXD_BDTYPE_S, 0);
+ hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
+ hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
+ hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
}
static int hns3_fill_desc_vtags(struct sk_buff *skb,
@@ -844,10 +873,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
* and use inner_vtag in one tag case.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
- hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+ hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
*out_vtag = vlan_tag;
} else {
- hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+ hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
}
} else if (skb->protocol == htons(ETH_P_8021Q)) {
@@ -880,7 +909,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
u16 out_vtag = 0;
u32 paylen = 0;
u16 mss = 0;
- __be16 protocol;
u8 ol4_proto;
u8 il4_proto;
int ret;
@@ -909,7 +937,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb);
- protocol = skb->protocol;
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (ret)
@@ -1135,7 +1162,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
wmb(); /* Commit all data before submit */
- hnae_queue_xmit(ring->tqp, buf_num);
+ hnae3_queue_xmit(ring->tqp, buf_num);
return NETDEV_TX_OK;
@@ -1304,7 +1331,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
u16 mode = mqprio_qopt->mode;
u8 hw = mqprio_qopt->qopt.hw;
bool if_running;
- unsigned int i;
int ret;
if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
@@ -1328,24 +1354,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
goto out;
- if (tc <= 1) {
- netdev_reset_tc(netdev);
- } else {
- ret = netdev_set_num_tc(netdev, tc);
- if (ret)
- goto out;
-
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (!kinfo->tc_info[i].enable)
- continue;
-
- netdev_set_tc_queue(netdev,
- kinfo->tc_info[i].tc,
- kinfo->tc_info[i].tqp_count,
- kinfo->tc_info[i].tqp_offset);
- }
- }
-
ret = hns3_nic_set_real_num_queue(netdev);
out:
@@ -1665,6 +1673,9 @@ static struct pci_driver hns3_driver = {
/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct pci_dev *pdev = h->pdev;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1698,12 +1709,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (pdev->revision != 0x20)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- unsigned int order = hnae_page_order(ring);
+ unsigned int order = hnae3_page_order(ring);
struct page *p;
p = dev_alloc_pages(order);
@@ -1714,7 +1728,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->page_offset = 0;
cb->reuse_flag = 0;
cb->buf = page_address(p);
- cb->length = hnae_page_size(ring);
+ cb->length = hnae3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
return 0;
@@ -1780,33 +1794,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
/* free desc along with its attached buffer */
static void hns3_free_desc(struct hns3_enet_ring *ring)
{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
hns3_free_buffers(ring);
- dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- DMA_BIDIRECTIONAL);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hns3_alloc_desc(struct hns3_enet_ring *ring)
{
int size = ring->desc_num * sizeof(ring->desc[0]);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
+ &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
@@ -1887,7 +1895,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length;
- /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
+ /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
hns3_free_buffer_detach(ring, ring->next_to_clean);
ring_ptr_move_fw(ring, next_to_clean);
@@ -1917,7 +1925,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
if (is_ring_empty(ring) || head == ring->next_to_clean)
return true; /* no data to poll */
- if (!is_valid_clean_head(ring, head)) {
+ if (unlikely(!is_valid_clean_head(ring, head))) {
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
@@ -2016,15 +2024,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
bool twobufs;
twobufs = ((PAGE_SIZE < 8192) &&
- hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
+ hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size);
- truesize = hnae_buf_size(ring);
+ truesize = hnae3_buf_size(ring);
if (!twobufs)
- last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
+ last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
@@ -2076,13 +2084,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
/* check if hardware has done checksum */
- if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
+ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return;
- if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
+ if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
netdev_err(netdev, "L3/L4 error pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.l3l4_csum_err++;
@@ -2091,23 +2099,25 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
}
- l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
- HNS3_RXD_L3ID_S);
- l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
- HNS3_RXD_L4ID_S);
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+ HNS3_RXD_L4ID_S);
- ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
+ ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
+ HNS3_RXD_OL4ID_S);
switch (ol4_type) {
case HNS3_OL4_TYPE_MAC_IN_UDP:
case HNS3_OL4_TYPE_NVGRE:
skb->csum_level = 1;
+ /* fall through */
case HNS3_OL4_TYPE_NO_TUN:
/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
- if (l3_type == HNS3_L3_TYPE_IPV4 ||
- (l3_type == HNS3_L3_TYPE_IPV6 &&
- (l4_type == HNS3_L4_TYPE_UDP ||
- l4_type == HNS3_L4_TYPE_TCP ||
- l4_type == HNS3_L4_TYPE_SCTP)))
+ if ((l3_type == HNS3_L3_TYPE_IPV4 ||
+ l3_type == HNS3_L3_TYPE_IPV6) &&
+ (l4_type == HNS3_L4_TYPE_UDP ||
+ l4_type == HNS3_L4_TYPE_TCP ||
+ l4_type == HNS3_L4_TYPE_SCTP))
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
}
@@ -2135,8 +2145,8 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
#define HNS3_STRP_OUTER_VLAN 0x1
#define HNS3_STRP_INNER_VLAN 0x2
- switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
- HNS3_RXD_STRP_TAGP_S)) {
+ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
+ HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
break;
@@ -2174,7 +2184,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Check valid BD */
- if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
return -EFAULT;
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
@@ -2229,7 +2239,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
- while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+ while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
@@ -2257,7 +2267,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
vlan_tag);
}
- if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
+ if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
((u64 *)desc)[0], ((u64 *)desc)[1]);
u64_stats_update_begin(&ring->syncp);
@@ -2269,7 +2279,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
}
if (unlikely((!desc->rx.pkt_len) ||
- hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
+ hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
netdev_err(netdev, "truncated pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.err_pkt_len++;
@@ -2279,7 +2289,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return -EFAULT;
}
- if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
+ if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
netdev_err(netdev, "L2 error pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.l2_err++;
@@ -2532,10 +2542,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
tx_ring = tqp_vector->tx_group.ring;
if (tx_ring) {
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
+ hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_TX);
+ hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL;
@@ -2549,12 +2559,12 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->next = chain;
chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae_set_field(chain->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S,
- HNAE3_RING_GL_TX);
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_TX);
+ hnae3_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ HNAE3_RING_GL_TX);
cur_chain = chain;
}
@@ -2564,10 +2574,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
if (!tx_ring && rx_ring) {
cur_chain->next = NULL;
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_RX);
+ hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next;
}
@@ -2579,10 +2589,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->next = chain;
chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_RX);
+ hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
cur_chain = chain;
@@ -2745,10 +2755,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
if (ret)
return ret;
- ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
- if (ret)
- return ret;
-
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
@@ -2809,7 +2815,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->io_base = q->io_base;
}
- hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
+ hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
ring->tqp = q;
ring->desc = NULL;
@@ -2969,13 +2975,33 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
- hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
- hns3_buf_size2type(ring->buf_size));
hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
ring->desc_num / 8 - 1);
}
}
+static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+ int i;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
+ int j;
+
+ if (!tc_info->enable)
+ continue;
+
+ for (j = 0; j < tc_info->tqp_count; j++) {
+ struct hnae3_queue *q;
+
+ q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
+ hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
+ tc_info->tc);
+ }
+ }
+}
+
int hns3_init_all_ring(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
@@ -3081,7 +3107,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->dev = &pdev->dev;
priv->netdev = netdev;
priv->ae_handle = handle;
- priv->ae_handle->reset_level = HNAE3_NONE_RESET;
priv->ae_handle->last_reset_time = jiffies;
priv->tx_timeout_count = 0;
@@ -3102,6 +3127,11 @@ static int hns3_client_init(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ if (handle->flags & HNAE3_SUPPORT_VF)
+ handle->reset_level = HNAE3_VF_RESET;
+ else
+ handle->reset_level = HNAE3_FUNC_RESET;
+
ret = hns3_get_ring_config(priv);
if (ret) {
ret = -ENOMEM;
@@ -3208,7 +3238,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
struct net_device *ndev = kinfo->netdev;
bool if_running;
int ret;
- u8 i;
if (tc > HNAE3_MAX_TC)
return -EINVAL;
@@ -3218,10 +3247,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if_running = netif_running(ndev);
- ret = netdev_set_num_tc(ndev, tc);
- if (ret)
- return ret;
-
if (if_running) {
(void)hns3_nic_net_stop(ndev);
msleep(100);
@@ -3232,27 +3257,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if (ret)
goto err_out;
- if (tc <= 1) {
- netdev_reset_tc(ndev);
- goto out;
- }
-
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
-
- if (tc_info->enable)
- netdev_set_tc_queue(ndev,
- tc_info->tc,
- tc_info->tqp_count,
- tc_info->tqp_offset);
- }
-
- for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
- netdev_set_prio_tc_map(ndev, i,
- kinfo->prio_tc[i]);
- }
-
-out:
ret = hns3_nic_set_real_num_queue(ndev);
err_out:
@@ -3409,6 +3413,8 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
rx_ring->next_to_use = 0;
}
+ hns3_init_tx_ring_tc(priv);
+
return 0;
}
@@ -3418,7 +3424,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
struct net_device *ndev = kinfo->netdev;
if (!netif_running(ndev))
- return -EIO;
+ return 0;
return hns3_nic_net_stop(ndev);
}
@@ -3458,10 +3464,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- ret = hns3_get_ring_config(priv);
- if (ret)
- return ret;
-
ret = hns3_nic_init_vector_data(priv);
if (ret)
return ret;
@@ -3493,10 +3495,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
if (ret)
netdev_err(netdev, "uninit ring error\n");
- hns3_put_ring_config(priv);
-
- priv->ring_data = NULL;
-
hns3_uninit_mac_addr(netdev);
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 3b083d5ae9ce..a02a96aee2a2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HNS3_ENET_H
#define __HNS3_ENET_H
@@ -43,7 +37,7 @@ enum hns3_nic_state {
#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040
#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044
#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048
-#define HNS3_RING_TX_RING_BD_LEN_REG 0x0004C
+#define HNS3_RING_TX_RING_TC_REG 0x00050
#define HNS3_RING_TX_RING_TAIL_REG 0x00058
#define HNS3_RING_TX_RING_HEAD_REG 0x0005C
#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
@@ -499,7 +493,6 @@ struct hns3_enet_tqp_vector {
u16 num_tqps; /* total number of tqps in TQP vector */
- cpumask_t affinity_mask;
char name[HNAE3_INT_NAME_LEN];
/* when 0 should adjust interrupt coalesce parameter */
@@ -591,7 +584,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value))
-#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
+#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
@@ -601,9 +594,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
-#define hnae_buf_size(_ring) ((_ring)->buf_size)
-#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
-#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
+#define hnae3_buf_size(_ring) ((_ring)->buf_size)
+#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
+#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring))
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 40c0425b4023..f70ee6910ee2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
#include <linux/string.h>
@@ -59,7 +53,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TPYE_NUM 1
+#define HNS3_SELF_TEST_TYPE_NUM 2
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -84,6 +78,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
return -EOPNOTSUPP;
switch (loop) {
+ case HNAE3_MAC_INTER_LOOP_SERDES:
case HNAE3_MAC_INTER_LOOP_MAC:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
@@ -201,7 +196,9 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
rx_group = &ring->tqp_vector->rx_group;
pre_rx_pkt = rx_group->total_packets;
+ preempt_disable();
hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data);
+ preempt_enable();
rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt);
rx_group->total_packets = pre_rx_pkt;
@@ -291,7 +288,7 @@ static void hns3_self_test(struct net_device *ndev,
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- int st_param[HNS3_SELF_TEST_TPYE_NUM][2];
+ int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
bool if_running = netif_running(ndev);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
bool dis_vlan_filter;
@@ -307,6 +304,10 @@ static void hns3_self_test(struct net_device *ndev,
st_param[HNAE3_MAC_INTER_LOOP_MAC][1] =
h->flags & HNAE3_SUPPORT_MAC_LOOPBACK;
+ st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES;
+ st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] =
+ h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK;
+
if (if_running)
dev_close(ndev);
@@ -320,7 +321,7 @@ static void hns3_self_test(struct net_device *ndev,
set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
- for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) {
+ for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
if (!st_param[i][1])
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index c36d64710fa6..ac13cb2b168e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -18,8 +12,7 @@
#include "hclge_main.h"
#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
-#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \
- DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
static int hclge_ring_space(struct hclge_cmq_ring *ring)
@@ -46,31 +39,24 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclge_desc);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
+ size, &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
{
- dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- DMA_BIDIRECTIONAL);
+ int size = ring->desc_num * sizeof(struct hclge_desc);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(cmq_ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
@@ -80,7 +66,7 @@ static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
(ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
int ret;
- ring->flag = ring_type;
+ ring->ring_type = ring_type;
ring->dev = hdev;
ret = hclge_alloc_cmd_desc(ring);
@@ -111,8 +97,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
if (is_read)
desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
- else
- desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
}
static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
@@ -121,26 +105,26 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
struct hclge_dev *hdev = ring->dev;
struct hclge_hw *hw = &hdev->hw;
- if (ring->flag == HCLGE_TYPE_CSQ) {
+ if (ring->ring_type == HCLGE_TYPE_CSQ) {
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
- (u32)dma);
+ lower_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
- (u32)((dma >> 31) >> 1));
+ upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
HCLGE_NIC_CMQ_ENABLE);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
} else {
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
- (u32)dma);
+ lower_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
- (u32)((dma >> 31) >> 1));
+ upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
HCLGE_NIC_CMQ_ENABLE);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
}
@@ -152,33 +136,27 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw)
static int hclge_cmd_csq_clean(struct hclge_hw *hw)
{
- struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_cmq_ring *csq = &hw->cmq.csq;
- u16 ntc = csq->next_to_clean;
- struct hclge_desc *desc;
- int clean = 0;
u32 head;
+ int clean;
- desc = &csq->desc[ntc];
head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
rmb(); /* Make sure head is ready before touch any data */
if (!is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head,
- csq->next_to_use, csq->next_to_clean);
- return 0;
- }
-
- while (head != ntc) {
- memset(desc, 0, sizeof(*desc));
- ntc++;
- if (ntc == csq->desc_num)
- ntc = 0;
- desc = &csq->desc[ntc];
- clean++;
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hdev->pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ dev_warn(&hdev->pdev->dev,
+ "IMP firmware watchdog reset soon expected!\n");
+ return -EIO;
}
- csq->next_to_clean = ntc;
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
return clean;
}
@@ -216,7 +194,7 @@ static bool hclge_is_special_opcode(u16 opcode)
**/
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
- struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_desc *desc_to_use;
bool complete = false;
u32 timeout = 0;
@@ -227,7 +205,8 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclge_ring_space(&hw->cmq.csq)) {
+ if (num > hclge_ring_space(&hw->cmq.csq) ||
+ test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -256,33 +235,34 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
*/
if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
do {
- if (hclge_cmd_csq_done(hw))
+ if (hclge_cmd_csq_done(hw)) {
+ complete = true;
break;
+ }
udelay(1);
timeout++;
} while (timeout < hw->cmq.tx_timeout);
}
- if (hclge_cmd_csq_done(hw)) {
- complete = true;
+ if (!complete) {
+ retval = -EAGAIN;
+ } else {
handle = 0;
while (handle < num) {
/* Get the result of hardware write back */
desc_to_use = &hw->cmq.csq.desc[ntc];
desc[handle] = *desc_to_use;
- pr_debug("Get cmd desc:\n");
if (likely(!hclge_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[handle].retval);
else
desc_ret = le16_to_cpu(desc[0].retval);
- if ((enum hclge_cmd_return_status)desc_ret ==
- HCLGE_CMD_EXEC_SUCCESS)
+ if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
retval = 0;
else
retval = -EIO;
- hw->cmq.last_status = (enum hclge_cmd_status)desc_ret;
+ hw->cmq.last_status = desc_ret;
ntc++;
handle++;
if (ntc == hw->cmq.csq.desc_num)
@@ -290,15 +270,13 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
}
}
- if (!complete)
- retval = -EAGAIN;
-
/* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw);
- if (handle != num) {
+ if (handle < 0)
+ retval = handle;
+ else if (handle != num)
dev_warn(&hdev->pdev->dev,
"cleaned %d, need to clean %d\n", handle, num);
- }
spin_unlock_bh(&hw->cmq.csq.lock);
@@ -369,6 +347,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
spin_lock_init(&hdev->hw.cmq.crq.lock);
hclge_cmd_init_regs(&hdev->hw);
+ clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d9aaa76c76eb..821d4c2f84bd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_CMD_H
#define __HCLGE_CMD_H
@@ -27,17 +21,10 @@ struct hclge_desc {
__le32 data[6];
};
-struct hclge_desc_cb {
- dma_addr_t dma;
- void *va;
- u32 length;
-};
-
struct hclge_cmq_ring {
dma_addr_t desc_dma_addr;
struct hclge_desc *desc;
- struct hclge_desc_cb *desc_cb;
- struct hclge_dev *dev;
+ struct hclge_dev *dev;
u32 head;
u32 tail;
@@ -45,7 +32,7 @@ struct hclge_cmq_ring {
u16 desc_num;
int next_to_use;
int next_to_clean;
- u8 flag;
+ u8 ring_type; /* cmq ring type */
spinlock_t lock; /* Command queue lock */
};
@@ -71,26 +58,19 @@ struct hclge_misc_vector {
struct hclge_cmq {
struct hclge_cmq_ring csq;
struct hclge_cmq_ring crq;
- u16 tx_timeout; /* Tx timeout */
+ u16 tx_timeout;
enum hclge_cmd_status last_status;
};
-#define HCLGE_CMD_FLAG_IN_VALID_SHIFT 0
-#define HCLGE_CMD_FLAG_OUT_VALID_SHIFT 1
-#define HCLGE_CMD_FLAG_NEXT_SHIFT 2
-#define HCLGE_CMD_FLAG_WR_OR_RD_SHIFT 3
-#define HCLGE_CMD_FLAG_NO_INTR_SHIFT 4
-#define HCLGE_CMD_FLAG_ERR_INTR_SHIFT 5
-
-#define HCLGE_CMD_FLAG_IN BIT(HCLGE_CMD_FLAG_IN_VALID_SHIFT)
-#define HCLGE_CMD_FLAG_OUT BIT(HCLGE_CMD_FLAG_OUT_VALID_SHIFT)
-#define HCLGE_CMD_FLAG_NEXT BIT(HCLGE_CMD_FLAG_NEXT_SHIFT)
-#define HCLGE_CMD_FLAG_WR BIT(HCLGE_CMD_FLAG_WR_OR_RD_SHIFT)
-#define HCLGE_CMD_FLAG_NO_INTR BIT(HCLGE_CMD_FLAG_NO_INTR_SHIFT)
-#define HCLGE_CMD_FLAG_ERR_INTR BIT(HCLGE_CMD_FLAG_ERR_INTR_SHIFT)
+#define HCLGE_CMD_FLAG_IN BIT(0)
+#define HCLGE_CMD_FLAG_OUT BIT(1)
+#define HCLGE_CMD_FLAG_NEXT BIT(2)
+#define HCLGE_CMD_FLAG_WR BIT(3)
+#define HCLGE_CMD_FLAG_NO_INTR BIT(4)
+#define HCLGE_CMD_FLAG_ERR_INTR BIT(5)
enum hclge_opcode_type {
- /* Generic command */
+ /* Generic commands */
HCLGE_OPC_QUERY_FW_VER = 0x0001,
HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
HCLGE_OPC_GBL_RST_STATUS = 0x0021,
@@ -106,18 +86,17 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_REG_NUM = 0x0040,
HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
- /* Device management command */
- /* MAC commond */
+ /* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
HCLGE_OPC_QUERY_AN_RESULT = 0x0306,
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
- /* MACSEC command */
+ HCLGE_OPC_SERDES_LOOPBACK = 0x0315,
- /* PFC/Pause CMD*/
+ /* PFC/Pause commands */
HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
HCLGE_OPC_CFG_MAC_PARA = 0x0703,
@@ -148,7 +127,7 @@ enum hclge_opcode_type {
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
- /* Packet buffer allocate command */
+ /* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
@@ -156,11 +135,10 @@ enum hclge_opcode_type {
HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
- /* PTP command */
/* TQP management command */
HCLGE_OPC_SET_TQP_MAP = 0x0A01,
- /* TQP command */
+ /* TQP commands */
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
HCLGE_OPC_QUERY_TX_STATUS = 0x0B03,
@@ -172,10 +150,10 @@ enum hclge_opcode_type {
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
- /* TSO cmd */
+ /* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
- /* RSS cmd */
+ /* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
HCLGE_OPC_RSS_TC_MODE = 0x0D08,
@@ -184,15 +162,15 @@ enum hclge_opcode_type {
/* Promisuous mode command */
HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
- /* Vlan offload command */
+ /* Vlan offload commands */
HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
- /* Interrupts cmd */
+ /* Interrupts commands */
HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
- /* MAC command */
+ /* MAC commands */
HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
@@ -201,13 +179,13 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
- /* Multicast linear table cmd */
+ /* Multicast linear table commands */
HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021,
HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022,
HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023,
- /* VLAN command */
+ /* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
@@ -215,7 +193,7 @@ enum hclge_opcode_type {
/* MDIO command */
HCLGE_OPC_MDIO_CONFIG = 0x1900,
- /* QCN command */
+ /* QCN commands */
HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03,
@@ -225,7 +203,7 @@ enum hclge_opcode_type {
HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
- /* Mailbox cmd */
+ /* Mailbox command */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
/* Led command */
@@ -381,8 +359,10 @@ struct hclge_pf_res_cmd {
__le16 buf_size;
__le16 msixcap_localid_ba_nic;
__le16 msixcap_localid_ba_rocee;
+#define HCLGE_MSIX_OFT_ROCEE_S 0
+#define HCLGE_MSIX_OFT_ROCEE_M GENMASK(15, 0)
#define HCLGE_PF_VEC_NUM_S 0
-#define HCLGE_PF_VEC_NUM_M (0xff << HCLGE_PF_VEC_NUM_S)
+#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0)
__le16 pf_intr_vector_number;
__le16 pf_own_fun_number;
__le32 rsv[3];
@@ -471,8 +451,8 @@ struct hclge_rss_tc_mode_cmd {
u8 rsv[8];
};
-#define HCLGE_LINK_STS_B 0
-#define HCLGE_LINK_STATUS BIT(HCLGE_LINK_STS_B)
+#define HCLGE_LINK_STATUS_UP_B 0
+#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B)
struct hclge_link_status_cmd {
u8 status;
u8 rsv[23];
@@ -571,7 +551,8 @@ struct hclge_config_auto_neg_cmd {
struct hclge_config_max_frm_size_cmd {
__le16 max_frm_size;
- u8 rsv[22];
+ u8 min_frm_size;
+ u8 rsv[21];
};
enum hclge_mac_vlan_tbl_opcode {
@@ -581,13 +562,13 @@ enum hclge_mac_vlan_tbl_opcode {
HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
};
-#define HCLGE_MAC_VLAN_BIT0_EN_B 0x0
-#define HCLGE_MAC_VLAN_BIT1_EN_B 0x1
-#define HCLGE_MAC_EPORT_SW_EN_B 0xc
-#define HCLGE_MAC_EPORT_TYPE_B 0xb
-#define HCLGE_MAC_EPORT_VFID_S 0x3
+#define HCLGE_MAC_VLAN_BIT0_EN_B 0
+#define HCLGE_MAC_VLAN_BIT1_EN_B 1
+#define HCLGE_MAC_EPORT_SW_EN_B 12
+#define HCLGE_MAC_EPORT_TYPE_B 11
+#define HCLGE_MAC_EPORT_VFID_S 3
#define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3)
-#define HCLGE_MAC_EPORT_PFID_S 0x0
+#define HCLGE_MAC_EPORT_PFID_S 0
#define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0)
struct hclge_mac_vlan_tbl_entry_cmd {
u8 flags;
@@ -603,7 +584,7 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6];
};
-#define HCLGE_VLAN_MASK_EN_B 0x0
+#define HCLGE_VLAN_MASK_EN_B 0
struct hclge_mac_vlan_mask_entry_cmd {
u8 rsv0[2];
u8 vlan_mask;
@@ -634,23 +615,23 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2];
};
-#define HCLGE_CFG_MTA_MAC_SEL_S 0x0
+#define HCLGE_CFG_MTA_MAC_SEL_S 0
#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
-#define HCLGE_CFG_MTA_MAC_EN_B 0x7
+#define HCLGE_CFG_MTA_MAC_EN_B 7
struct hclge_mta_filter_mode_cmd {
u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */
u8 rsv[23];
};
-#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0x0
+#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0
struct hclge_cfg_func_mta_filter_cmd {
u8 accept; /* Only used lowest 1 bit */
u8 function_id;
u8 rsv[22];
};
-#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0
-#define HCLGE_CFG_MTA_ITEM_IDX_S 0x0
+#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0
+#define HCLGE_CFG_MTA_ITEM_IDX_S 0
#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0)
struct hclge_cfg_func_mta_item_cmd {
__le16 item_idx; /* Only used lowest 12 bit */
@@ -795,6 +776,17 @@ struct hclge_reset_cmd {
u8 fun_reset_vfid;
u8 rsv[22];
};
+
+#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
+#define HCLGE_CMD_SERDES_DONE_B BIT(0)
+#define HCLGE_CMD_SERDES_SUCCESS_B BIT(1)
+struct hclge_serdes_lb_cmd {
+ u8 mask;
+ u8 enable;
+ u8 result;
+ u8 rsv[21];
+};
+
#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 955f0e3d5c95..f08ebb7caaaf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include "hclge_main.h"
#include "hclge_tm.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
index 7d808ee96694..278f21e02736 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_DCB_H__
#define __HCLGE_DCB_H__
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d318d35e598f..8577dfc799ad 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/acpi.h>
#include <linux/device.h>
@@ -793,9 +787,10 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
count += 1;
handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
- } else {
- count = -EOPNOTSUPP;
}
+
+ count++;
+ handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK;
} else if (stringset == ETH_SS_STATS) {
count = ARRAY_SIZE(g_mac_stats_string) +
ARRAY_SIZE(g_all_32bit_stats_string) +
@@ -938,18 +933,22 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_base_msix_offset =
+ hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
hdev->num_roce_msi =
- hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
- HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+ hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
- hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
+ hdev->num_msi = hdev->num_roce_msi +
+ hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
- hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
- HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+ hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
}
return 0;
@@ -1038,38 +1037,38 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
req = (struct hclge_cfg_param_cmd *)desc[0].data;
/* get the configuration */
- cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_VMDQ_M,
- HCLGE_CFG_VMDQ_S);
- cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
- cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_TQP_DESC_N_M,
- HCLGE_CFG_TQP_DESC_N_S);
-
- cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_PHY_ADDR_M,
- HCLGE_CFG_PHY_ADDR_S);
- cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_MEDIA_TP_M,
- HCLGE_CFG_MEDIA_TP_S);
- cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_RX_BUF_LEN_M,
- HCLGE_CFG_RX_BUF_LEN_S);
+ cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_VMDQ_M,
+ HCLGE_CFG_VMDQ_S);
+ cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
+ cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_TQP_DESC_N_M,
+ HCLGE_CFG_TQP_DESC_N_S);
+
+ cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_PHY_ADDR_M,
+ HCLGE_CFG_PHY_ADDR_S);
+ cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_MEDIA_TP_M,
+ HCLGE_CFG_MEDIA_TP_S);
+ cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_RX_BUF_LEN_M,
+ HCLGE_CFG_RX_BUF_LEN_S);
/* get mac_address */
mac_addr_tmp = __le32_to_cpu(req->param[2]);
- mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_MAC_ADDR_H_M,
- HCLGE_CFG_MAC_ADDR_H_S);
+ mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_MAC_ADDR_H_M,
+ HCLGE_CFG_MAC_ADDR_H_S);
mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
- cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_DEFAULT_SPEED_M,
- HCLGE_CFG_DEFAULT_SPEED_S);
- cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_RSS_SIZE_M,
- HCLGE_CFG_RSS_SIZE_S);
+ cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_DEFAULT_SPEED_M,
+ HCLGE_CFG_DEFAULT_SPEED_S);
+ cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1077,9 +1076,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
req = (struct hclge_cfg_param_cmd *)desc[1].data;
cfg->numa_node_map = __le32_to_cpu(req->param[0]);
- cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_SPEED_ABILITY_M,
- HCLGE_CFG_SPEED_ABILITY_S);
+ cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_SPEED_ABILITY_M,
+ HCLGE_CFG_SPEED_ABILITY_S);
}
/* hclge_get_cfg: query the static parameter from flash
@@ -1098,22 +1097,22 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
req = (struct hclge_cfg_param_cmd *)desc[i].data;
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
true);
- hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
- HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
+ hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
+ HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
/* Len should be united by 4 bytes when send to hardware */
- hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
- HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
+ hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
+ HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
req->offset = cpu_to_le32(offset);
}
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "get config failed %d.\n", ret);
+ dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
return ret;
}
hclge_parse_cfg(hcfg, desc);
+
return 0;
}
@@ -1130,13 +1129,10 @@ static int hclge_get_cap(struct hclge_dev *hdev)
/* get pf resource */
ret = hclge_query_pf_resource(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "query pf resource error %d.\n", ret);
- return ret;
- }
+ if (ret)
+ dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
- return 0;
+ return ret;
}
static int hclge_configure(struct hclge_dev *hdev)
@@ -1189,7 +1185,7 @@ static int hclge_configure(struct hclge_dev *hdev)
/* Currently not support uncontiuous tc */
for (i = 0; i < hdev->tm_info.num_tc; i++)
- hnae_set_bit(hdev->hw_tc_map, i, 1);
+ hnae3_set_bit(hdev->hw_tc_map, i, 1);
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
@@ -1208,13 +1204,13 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
req = (struct hclge_cfg_tso_status_cmd *)desc.data;
tso_mss = 0;
- hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_min);
+ hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
+ HCLGE_TSO_MSS_MIN_S, tso_mss_min);
req->tso_mss_min = cpu_to_le16(tso_mss);
tso_mss = 0;
- hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_max);
+ hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
+ HCLGE_TSO_MSS_MIN_S, tso_mss_max);
req->tso_mss_max = cpu_to_le16(tso_mss);
return hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1265,44 +1261,43 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
req->tqp_vid = cpu_to_le16(tqp_vid);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
- ret);
- return ret;
- }
+ if (ret)
+ dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
- return 0;
+ return ret;
}
-static int hclge_assign_tqp(struct hclge_vport *vport,
- struct hnae3_queue **tqp, u16 num_tqps)
+static int hclge_assign_tqp(struct hclge_vport *vport)
{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
int i, alloced;
for (i = 0, alloced = 0; i < hdev->num_tqps &&
- alloced < num_tqps; i++) {
+ alloced < kinfo->num_tqps; i++) {
if (!hdev->htqp[i].alloced) {
hdev->htqp[i].q.handle = &vport->nic;
hdev->htqp[i].q.tqp_index = alloced;
- tqp[alloced] = &hdev->htqp[i].q;
+ hdev->htqp[i].q.desc_num = kinfo->num_desc;
+ kinfo->tqp[alloced] = &hdev->htqp[i].q;
hdev->htqp[i].alloced = true;
alloced++;
}
}
- vport->alloc_tqps = num_tqps;
+ vport->alloc_tqps = kinfo->num_tqps;
return 0;
}
-static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
+static int hclge_knic_setup(struct hclge_vport *vport,
+ u16 num_tqps, u16 num_desc)
{
struct hnae3_handle *nic = &vport->nic;
struct hnae3_knic_private_info *kinfo = &nic->kinfo;
struct hclge_dev *hdev = vport->back;
int i, ret;
- kinfo->num_desc = hdev->num_desc;
+ kinfo->num_desc = num_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
kinfo->rss_size
@@ -1329,13 +1324,11 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
if (!kinfo->tqp)
return -ENOMEM;
- ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
- if (ret) {
+ ret = hclge_assign_tqp(vport);
+ if (ret)
dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
- return -EINVAL;
- }
- return 0;
+ return ret;
}
static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
@@ -1397,7 +1390,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->numa_node_mask = hdev->numa_node_mask;
if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
- ret = hclge_knic_setup(vport, num_tqps);
+ ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
if (ret) {
dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
ret);
@@ -1487,13 +1480,11 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
@@ -1501,13 +1492,10 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
{
int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "tx buffer alloc failed %d\n", ret);
- return ret;
- }
+ if (ret)
+ dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
- return 0;
+ return ret;
}
static int hclge_get_tc_num(struct hclge_dev *hdev)
@@ -1825,17 +1813,13 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
(1 << HCLGE_TC0_PRI_BUF_EN_B));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"rx private buffer alloc cmd failed %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
-#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
-
static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
@@ -1863,25 +1847,21 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
req->tc_wl[j].high =
cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].high |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->tc_wl[j].low =
cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].low |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
}
}
/* Send 2 descriptor at one time */
ret = hclge_cmd_send(&hdev->hw, desc, 2);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"rx private waterline config cmd failed %d\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_common_thrd_config(struct hclge_dev *hdev,
@@ -1911,24 +1891,20 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
req->com_thrd[j].high =
cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
req->com_thrd[j].high |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->com_thrd[j].low =
cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
req->com_thrd[j].low |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
}
}
/* Send 2 descriptors at one time */
ret = hclge_cmd_send(&hdev->hw, desc, 2);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"common threshold config cmd failed %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_common_wl_config(struct hclge_dev *hdev,
@@ -1943,23 +1919,17 @@ static int hclge_common_wl_config(struct hclge_dev *hdev,
req = (struct hclge_rx_com_wl *)desc.data;
req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
- req->com_wl.high |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
- HCLGE_RX_PRIV_EN_B);
+ req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
- req->com_wl.low |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
- HCLGE_RX_PRIV_EN_B);
+ req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"common waterline config cmd failed %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
int hclge_buffer_alloc(struct hclge_dev *hdev)
@@ -2074,7 +2044,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
hdev->num_msi_left = vectors;
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
- HCLGE_ROCE_VECTOR_OFFSET;
+ hdev->roce_base_msix_offset;
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
@@ -2118,48 +2088,48 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
- hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
+ hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
switch (speed) {
case HCLGE_MAC_SPEED_10M:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 6);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 6);
break;
case HCLGE_MAC_SPEED_100M:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 7);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 7);
break;
case HCLGE_MAC_SPEED_1G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 0);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 0);
break;
case HCLGE_MAC_SPEED_10G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 1);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 1);
break;
case HCLGE_MAC_SPEED_25G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 2);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 2);
break;
case HCLGE_MAC_SPEED_40G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 3);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 3);
break;
case HCLGE_MAC_SPEED_50G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 4);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 4);
break;
case HCLGE_MAC_SPEED_100G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 5);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 5);
break;
default:
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
return -EINVAL;
}
- hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
- 1);
+ hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
+ 1);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2201,18 +2171,16 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
return ret;
}
- *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
- speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
- HCLGE_QUERY_SPEED_S);
+ *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
+ speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
+ HCLGE_QUERY_SPEED_S);
ret = hclge_parse_speed(speed_tmp, speed);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"could not parse speed(=%d), %d\n", speed_tmp, ret);
- return -EIO;
- }
- return 0;
+ return ret;
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2225,17 +2193,15 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
req = (struct hclge_config_auto_neg_cmd *)desc.data;
- hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
+ hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
req->cfg_an_cmd_flag = cpu_to_le32(flag);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
@@ -2269,8 +2235,8 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
- hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
- mask_vlan ? 1 : 0);
+ hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+ mask_vlan ? 1 : 0);
ether_addr_copy(req->mac_mask, mac_mask);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2341,13 +2307,11 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mtu = ETH_DATA_LEN;
ret = hclge_set_mtu(handle, mtu);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"set mtu failed ret=%d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
@@ -2386,7 +2350,7 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
}
req = (struct hclge_link_status_cmd *)desc.data;
- link_status = req->status & HCLGE_LINK_STATUS;
+ link_status = req->status & HCLGE_LINK_STATUS_UP_M;
return !!link_status;
}
@@ -2505,7 +2469,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
u32 cmdq_src_reg;
/* fetch the events from their corresponding regs */
- rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
/* Assumption: If by any chance reset and mailbox events are reported
@@ -2517,12 +2481,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
@@ -2614,6 +2580,12 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
{
+ if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
+ dev_warn(&hdev->pdev->dev,
+ "vector(vector_id %d) has been freed.\n", vector_id);
+ return;
+ }
+
hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
hdev->num_msi_left += 1;
hdev->num_msi_used -= 1;
@@ -2705,7 +2677,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
}
val = hclge_read_dev(&hdev->hw, reg);
- while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
+ while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
val = hclge_read_dev(&hdev->hw, reg);
cnt++;
@@ -2727,8 +2699,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
- hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
- hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
+ hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
req->fun_reset_vfid = func_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2747,13 +2718,13 @@ static void hclge_do_reset(struct hclge_dev *hdev)
switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
- hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
+ hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Global Reset requested\n");
break;
case HNAE3_CORE_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
- hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
+ hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Core Reset requested\n");
break;
@@ -2810,8 +2781,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
break;
default:
- dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d",
- hdev->reset_type);
break;
}
@@ -2824,16 +2793,17 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
static void hclge_reset(struct hclge_dev *hdev)
{
- /* perform reset of the stack & ae device for a client */
+ struct hnae3_handle *handle;
+ /* perform reset of the stack & ae device for a client */
+ handle = &hdev->vport[0].nic;
+ rtnl_lock();
hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (!hclge_reset_wait(hdev)) {
- rtnl_lock();
hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
hclge_reset_ae_dev(hdev->ae_dev);
hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- rtnl_unlock();
hclge_clear_reset_cause(hdev);
} else {
@@ -2843,6 +2813,8 @@ static void hclge_reset(struct hclge_dev *hdev)
}
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ handle->last_reset_time = jiffies;
+ rtnl_unlock();
}
static void hclge_reset_event(struct hnae3_handle *handle)
@@ -2855,8 +2827,13 @@ static void hclge_reset_event(struct hnae3_handle *handle)
* know this if last reset request did not occur very recently (watchdog
* timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
* In case of new request we reset the "reset level" to PF reset.
+ * And if it is a repeat reset request of the most recent one then we
+ * want to make sure we throttle the reset request. Therefore, we will
+ * not allow it again before 3*HZ times.
*/
- if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
+ if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+ return;
+ else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
handle->reset_level = HNAE3_FUNC_RESET;
dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
@@ -2868,8 +2845,6 @@ static void hclge_reset_event(struct hnae3_handle *handle)
if (handle->reset_level < HNAE3_GLOBAL_RESET)
handle->reset_level++;
-
- handle->last_reset_time = jiffies;
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -3110,23 +3085,21 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
u16 mode = 0;
- hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
- hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
- HCLGE_RSS_TC_SIZE_S, tc_size[i]);
- hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
- HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
+ hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
+ hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
+ HCLGE_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
+ HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
req->rss_tc_mode[i] = cpu_to_le16(mode);
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Configure rss tc mode fail, status = %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
@@ -3149,13 +3122,10 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Configure rss input fail, status = %d\n", ret);
- return ret;
- }
-
- return 0;
+ return ret;
}
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
@@ -3491,16 +3461,16 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
i = 0;
for (node = ring_chain; node; node = node->next) {
tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
- hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
- HCLGE_INT_TYPE_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
- HCLGE_TQP_ID_S, node->tqp_index);
- hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- hnae_get_field(node->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S));
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
+ HCLGE_INT_TYPE_S,
+ hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
+ hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+ HCLGE_TQP_ID_S, node->tqp_index);
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S));
req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -3603,12 +3573,11 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Set promisc mode fail, status is %d.\n", ret);
- return ret;
- }
- return 0;
+
+ return ret;
}
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -3648,20 +3617,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
- hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -3689,7 +3658,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
/* 2 Then setup the loopback flag */
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
@@ -3704,6 +3673,55 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
+static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
+{
+#define HCLGE_SERDES_RETRY_MS 10
+#define HCLGE_SERDES_RETRY_NUM 100
+ struct hclge_serdes_lb_cmd *req;
+ struct hclge_desc desc;
+ int ret, i = 0;
+
+ req = (struct hclge_serdes_lb_cmd *)&desc.data[0];
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
+
+ if (en) {
+ req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ } else {
+ req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback set fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ do {
+ msleep(HCLGE_SERDES_RETRY_MS);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback get, ret = %d\n", ret);
+ return ret;
+ }
+ } while (++i < HCLGE_SERDES_RETRY_NUM &&
+ !(req->result & HCLGE_CMD_SERDES_DONE_B));
+
+ if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
+ dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
+ return -EBUSY;
+ } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
+ dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int hclge_set_loopback(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en)
{
@@ -3715,6 +3733,9 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_MAC_INTER_LOOP_MAC:
ret = hclge_set_mac_loopback(hdev, en);
break;
+ case HNAE3_MAC_INTER_LOOP_SERDES:
+ ret = hclge_set_serdes_loopback(hdev, en);
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -3763,7 +3784,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i, ret;
+ int i;
for (i = 0; i < vport->alloc_tqps; i++)
hclge_tqp_enable(hdev, i, 0, true);
@@ -3777,9 +3798,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
- ret = hclge_mac_start_phy(hdev);
- if (ret)
- return ret;
+ hclge_mac_start_phy(hdev);
return 0;
}
@@ -3911,7 +3930,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
#define HCLGE_FUNC_NUMBER_PER_DESC 6
int i, j;
- for (i = 0; i < HCLGE_DESC_NUMBER; i++)
+ for (i = 1; i < HCLGE_DESC_NUMBER; i++)
for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
if (desc[i].data[j])
return false;
@@ -3953,20 +3972,18 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
req = (struct hclge_mta_filter_mode_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
- hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
- enable);
- hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
- HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
+ hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
+ enable);
+ hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
+ HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Config mat filter mode failed for cmd_send, ret =%d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
@@ -3980,19 +3997,17 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
- hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
- enable);
+ hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
+ enable);
req->function_id = func_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Config func_id enable failed for cmd_send, ret =%d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_set_mta_table_item(struct hclge_vport *vport,
@@ -4007,10 +4022,10 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
- hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
+ hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
- hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
- HCLGE_CFG_MTA_ITEM_IDX_S, idx);
+ hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
+ HCLGE_CFG_MTA_ITEM_IDX_S, idx);
req->item_idx = cpu_to_le16(item_idx);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -4257,17 +4272,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
- hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
-
- hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
- hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
- hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
- HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
- hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
- HCLGE_MAC_EPORT_PFID_S, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
req.egress_port = cpu_to_le16(egress_port);
@@ -4318,8 +4326,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
@@ -4331,7 +4339,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_add_mc_addr_common(vport, addr);
+ return hclge_add_mc_addr_common(vport, addr);
}
int hclge_add_mc_addr_common(struct hclge_vport *vport,
@@ -4351,10 +4359,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return -EINVAL;
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+ hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -4418,10 +4426,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+ hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -4604,13 +4612,11 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
req->vlan_fe = filter_en;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
#define HCLGE_FILTER_TYPE_VF 0
@@ -4802,19 +4808,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
- vcfg->accept_tag1 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
- vcfg->accept_untag1 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
- vcfg->accept_tag2 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
- vcfg->accept_untag2 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
- vcfg->insert_tag1_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
- vcfg->insert_tag2_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
+ vcfg->accept_tag1 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
+ vcfg->accept_untag1 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
+ vcfg->accept_tag2 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
+ vcfg->accept_untag2 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
req->vf_bitmap[req->vf_offset] =
@@ -4840,14 +4846,14 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
- vcfg->strip_tag1_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
- vcfg->strip_tag2_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
- vcfg->vlan1_vlan_prionly ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
- vcfg->vlan2_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
req->vf_bitmap[req->vf_offset] =
@@ -4999,16 +5005,15 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
req->max_frm_size = cpu_to_le16(max_frm_size);
+ req->min_frm_size = HCLGE_MAC_MIN_FRAME;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
- return ret;
- }
-
- hdev->mps = max_frm_size;
+ else
+ hdev->mps = max_frm_size;
- return 0;
+ return ret;
}
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
@@ -5043,7 +5048,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
- hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
+ hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -5073,7 +5078,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return ret;
}
- return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+ return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
}
static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
@@ -5380,12 +5385,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
- mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
- HCLGE_PHY_MDIX_CTRL_S);
+ mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
+ HCLGE_PHY_MDIX_CTRL_S);
retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
- mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
- is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
+ mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
+ is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
@@ -5412,6 +5417,16 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
*tp_mdix = ETH_TP_MDI;
}
+static int hclge_init_instance_hw(struct hclge_dev *hdev)
+{
+ return hclge_mac_connect_phy(hdev);
+}
+
+static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
+{
+ hclge_mac_disconnect_phy(hdev);
+}
+
static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -5431,6 +5446,13 @@ static int hclge_init_client_instance(struct hnae3_client *client,
if (ret)
return ret;
+ ret = hclge_init_instance_hw(hdev);
+ if (ret) {
+ client->ops->uninit_instance(&vport->nic,
+ 0);
+ return ret;
+ }
+
if (hdev->roce_client &&
hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
@@ -5493,6 +5515,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (client->type == HNAE3_CLIENT_ROCE)
return;
if (client->ops->uninit_instance) {
+ hclge_uninit_instance_hw(hdev);
client->ops->uninit_instance(&vport->nic, 0);
hdev->nic_client = NULL;
vport->nic.client = NULL;
@@ -5531,7 +5554,6 @@ static int hclge_pci_init(struct hclge_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
- hw->back = hdev;
hw->io_base = pcim_iomap(pdev, 2, 0);
if (!hw->io_base) {
dev_err(&pdev->dev, "Can't map configuration register space\n");
@@ -5562,6 +5584,30 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
pci_disable_device(pdev);
}
+static void hclge_state_init(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_state_uninit(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+ if (hdev->service_timer.function)
+ del_timer_sync(&hdev->service_timer);
+ if (hdev->service_task.func)
+ cancel_work_sync(&hdev->service_task);
+ if (hdev->rst_service_task.func)
+ cancel_work_sync(&hdev->rst_service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -5577,8 +5623,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
- hdev->reset_request = 0;
- hdev->reset_pending = 0;
ae_dev->priv = hdev;
ret = hclge_pci_init(hdev);
@@ -5702,12 +5746,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
- set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
- set_bit(HCLGE_STATE_DOWN, &hdev->state);
- clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
- clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+ hclge_state_init(hdev);
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -5812,16 +5851,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac;
- set_bit(HCLGE_STATE_DOWN, &hdev->state);
-
- if (hdev->service_timer.function)
- del_timer_sync(&hdev->service_timer);
- if (hdev->service_task.func)
- cancel_work_sync(&hdev->service_task);
- if (hdev->rst_service_task.func)
- cancel_work_sync(&hdev->rst_service_task);
- if (hdev->mbx_service_task.func)
- cancel_work_sync(&hdev->mbx_service_task);
+ hclge_state_uninit(hdev);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
@@ -5905,9 +5935,10 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
u32 *rss_indir;
int ret, i;
+ /* Free old tqps, and reallocate with new tqp number when nic setup */
hclge_release_tqp(vport);
- ret = hclge_knic_setup(vport, new_tqps_num);
+ ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
if (ret) {
dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
return ret;
@@ -6149,8 +6180,8 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
req = (struct hclge_set_led_state_cmd *)desc.data;
- hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
- HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+ hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+ HCLGE_LED_LOCATE_STATE_S, locate_led_status);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -6280,7 +6311,6 @@ static const struct hnae3_ae_ops hclge_ops = {
static struct hnae3_ae_algo ae_algo = {
.ops = &hclge_ops,
- .name = HCLGE_NAME,
.pdev_id_table = ae_algo_pci_tbl,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7488534528cd..1528fb3fa6be 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_MAIN_H
#define __HCLGE_MAIN_H
@@ -22,8 +16,6 @@
#define HCLGE_INVALID_VPORT 0xffff
-#define HCLGE_ROCE_VECTOR_OFFSET 96
-
#define HCLGE_PF_CFG_BLOCK_SIZE 32
#define HCLGE_PF_CFG_DESC_NUM \
(HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
@@ -40,7 +32,7 @@
#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
-#define HCLGE_RSS_HASH_ALGO_MASK 0xf
+#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
#define HCLGE_RSS_CFG_TBL_NUM \
(HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
@@ -77,11 +69,11 @@
/* Copper Specific Status Register */
#define HCLGE_PHY_CSS_REG 17
-#define HCLGE_PHY_MDIX_CTRL_S (5)
+#define HCLGE_PHY_MDIX_CTRL_S 5
#define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5)
-#define HCLGE_PHY_MDIX_STATUS_B (6)
-#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11)
+#define HCLGE_PHY_MDIX_STATUS_B 6
+#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
@@ -89,9 +81,10 @@
/* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG 0x20700
+#define HCLGE_MISC_VECTOR_INT_STS 0x20800
#define HCLGE_GLOBAL_RESET_REG 0x20A00
-#define HCLGE_GLOBAL_RESET_BIT 0x0
-#define HCLGE_CORE_RESET_BIT 0x1
+#define HCLGE_GLOBAL_RESET_BIT 0
+#define HCLGE_CORE_RESET_BIT 1
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -128,6 +121,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_STATISTICS_UPDATING,
+ HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_MAX
};
@@ -138,12 +132,6 @@ enum hclge_evt_cause {
};
#define HCLGE_MPF_ENBALE 1
-struct hclge_caps {
- u16 num_tqp;
- u16 num_buffer_cell;
- u32 flag;
- u16 vmdq;
-};
enum HCLGE_MAC_SPEED {
HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
@@ -189,8 +177,6 @@ struct hclge_hw {
struct hclge_mac mac;
int num_vec;
struct hclge_cmq cmq;
- struct hclge_caps caps;
- void *back;
};
/* TQP stats */
@@ -202,7 +188,10 @@ struct hlcge_tqp_stats {
};
struct hclge_tqp {
- struct device *dev; /* Device for DMA mapping */
+ /* copy of device pointer from pci_dev,
+ * used when perform DMA mapping
+ */
+ struct device *dev;
struct hnae3_queue q;
struct hlcge_tqp_stats tqp_stats;
u16 index; /* Global index in a NIC controller */
@@ -492,13 +481,11 @@ struct hclge_dev {
u16 num_tqps; /* Num task queue pairs of this PF */
u16 num_req_vfs; /* Num VFs requested for this PF */
- /* Base task tqp physical id of this PF */
- u16 base_tqp_pid;
+ u16 base_tqp_pid; /* Base task tqp physical id of this PF */
u16 alloc_rss_size; /* Allocated RSS task queue */
u16 rss_size_max; /* HW defined max RSS task queue */
- /* Num of guaranteed filters for this PF */
- u16 fdir_pf_filter_count;
+ u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
u16 num_alloc_vport; /* Num vports this driver supports */
u32 numa_node_mask;
u16 rx_buf_len;
@@ -520,6 +507,7 @@ struct hclge_dev {
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
+ u16 roce_base_msix_offset;
u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
@@ -560,7 +548,7 @@ struct hclge_dev {
u32 mps; /* Max packet size */
enum hclge_mta_dmac_sel_type mta_mac_sel_type;
- bool enable_mta; /* Mutilcast filter enable */
+ bool enable_mta; /* Multicast filter enable */
struct hclge_vlan_type_cfg vlan_type_cfg;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 7541cb9b71ce..f34851c91eb3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -104,13 +104,15 @@ static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
}
}
-/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
+/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
+ * from mailbox message
* msg[0]: opcode
* msg[1]: <not relevant to this function>
* msg[2]: ring_num
* msg[3]: first ring type (TX|RX)
* msg[4]: first tqp id
- * msg[5] ~ msg[14]: other ring type and tqp id
+ * msg[5]: first int_gl idx
+ * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
*/
static int hclge_get_ring_chain_from_mbx(
struct hclge_mbx_vf_to_pf_cmd *req,
@@ -128,12 +130,12 @@ static int hclge_get_ring_chain_from_mbx(
HCLGE_MBX_RING_NODE_VARIABLE_NUM))
return -ENOMEM;
- hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+ hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
- hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- req->msg[5]);
+ hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ req->msg[5]);
cur_chain = ring_chain;
@@ -142,19 +144,19 @@ static int hclge_get_ring_chain_from_mbx(
if (!new_chain)
goto err;
- hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
+ hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
new_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
[req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
- hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
+ hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
cur_chain->next = new_chain;
cur_chain = new_chain;
@@ -460,7 +462,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
+ if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n",
req->msg[0]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 9f7932e423b5..398971a062f4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
#include <linux/kernel.h>
@@ -67,16 +61,16 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
- hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
- HCLGE_MDIO_PHYID_S, phyid);
- hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
- HCLGE_MDIO_PHYREG_S, regnum);
+ hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+ HCLGE_MDIO_PHYID_S, phyid);
+ hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+ HCLGE_MDIO_PHYREG_S, regnum);
- hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
- HCLGE_MDIO_CTRL_ST_S, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
- HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
+ hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+ HCLGE_MDIO_CTRL_ST_S, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+ HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
mdio_cmd->data_wr = cpu_to_le16(data);
@@ -105,16 +99,16 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
- hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
- HCLGE_MDIO_PHYID_S, phyid);
- hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
- HCLGE_MDIO_PHYREG_S, regnum);
+ hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+ HCLGE_MDIO_PHYID_S, phyid);
+ hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+ HCLGE_MDIO_PHYREG_S, regnum);
- hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
- HCLGE_MDIO_CTRL_ST_S, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
- HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
+ hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+ HCLGE_MDIO_CTRL_ST_S, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+ HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
/* Read out phy data */
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -125,7 +119,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
return ret;
}
- if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
+ if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
dev_err(&hdev->pdev->dev, "mdio read data error\n");
return -EIO;
}
@@ -199,7 +193,7 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
netdev_err(netdev, "failed to configure flow control.\n");
}
-int hclge_mac_start_phy(struct hclge_dev *hdev)
+int hclge_mac_connect_phy(struct hclge_dev *hdev)
{
struct net_device *netdev = hdev->vport[0].nic.netdev;
struct phy_device *phydev = hdev->hw.mac.phydev;
@@ -208,6 +202,8 @@ int hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return 0;
+ phydev->supported &= ~SUPPORTED_FIBRE;
+
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
PHY_INTERFACE_MODE_SGMII);
@@ -219,11 +215,29 @@ int hclge_mac_start_phy(struct hclge_dev *hdev)
phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES;
phydev->advertising = phydev->supported;
- phy_start(phydev);
-
return 0;
}
+void hclge_mac_disconnect_phy(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phy_disconnect(phydev);
+}
+
+void hclge_mac_start_phy(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phy_start(phydev);
+}
+
void hclge_mac_stop_phy(struct hclge_dev *hdev)
{
struct net_device *netdev = hdev->vport[0].nic.netdev;
@@ -233,5 +247,4 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev)
return;
phy_stop(phydev);
- phy_disconnect(phydev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index c5e91cfb8f2c..5fbf7dddb5d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -1,17 +1,13 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_MDIO_H
#define __HCLGE_MDIO_H
int hclge_mac_mdio_config(struct hclge_dev *hdev);
-int hclge_mac_start_phy(struct hclge_dev *hdev);
+int hclge_mac_connect_phy(struct hclge_dev *hdev);
+void hclge_mac_disconnect_phy(struct hclge_dev *hdev);
+void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 262c125f8137..5db70a1451c5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
@@ -1184,10 +1178,10 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp;
- grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M,
- HCLGE_BP_GRP_ID_S);
- sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
- HCLGE_BP_SUB_GRP_ID_S);
+ grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
+ HCLGE_BP_GRP_ID_S);
+ sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
+ HCLGE_BP_SUB_GRP_ID_S);
if (i == grp)
qs_bitmap |= (1 << sub_grp);
@@ -1223,6 +1217,10 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
tx_en = true;
rx_en = true;
break;
+ case HCLGE_FC_PFC:
+ tx_en = false;
+ rx_en = false;
+ break;
default:
tx_en = true;
rx_en = true;
@@ -1240,8 +1238,9 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
if (ret)
return ret;
- if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
- return hclge_mac_pause_setup_hw(hdev);
+ ret = hclge_mac_pause_setup_hw(hdev);
+ if (ret)
+ return ret;
/* Only DCB-supported dev supports qset back pressure and pfc cmd */
if (!hnae3_dev_dcb_supported(hdev))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index c2b6e8a6700f..dd4c194747c1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_TM_H
#define __HCLGE_TM_H
@@ -123,10 +117,11 @@ struct hclge_port_shapping_cmd {
};
#define hclge_tm_set_field(dest, string, val) \
- hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
- (HCLGE_TM_SHAP_##string##_LSH), val)
+ hnae3_set_field((dest), \
+ (HCLGE_TM_SHAP_##string##_MSK), \
+ (HCLGE_TM_SHAP_##string##_LSH), val)
#define hclge_tm_get_field(src, string) \
- hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
+ hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH))
int hclge_tm_schd_init(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 1bbfe131b596..fb471fe2c494 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -76,32 +76,24 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
+ size, &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
{
- dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- hclgevf_ring_to_dma_dir(ring));
+ int size = ring->desc_num * sizeof(struct hclgevf_desc);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(cmq_ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 621c6cbacf76..19b32860309c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -82,6 +82,7 @@ struct hclgevf_cmq {
enum hclgevf_opcode_type {
/* Generic command */
HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
+ HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
/* TQP command */
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
@@ -134,6 +135,19 @@ struct hclgevf_query_version_cmd {
__le32 firmware_rsv[5];
};
+#define HCLGEVF_MSIX_OFT_ROCEE_S 0
+#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S)
+#define HCLGEVF_VEC_NUM_S 0
+#define HCLGEVF_VEC_NUM_M (0xff << HCLGEVF_VEC_NUM_S)
+struct hclgevf_query_res_cmd {
+ __le16 tqp_num;
+ __le16 reserved;
+ __le16 msixcap_localid_ba_nic;
+ __le16 msixcap_localid_ba_rocee;
+ __le16 vf_intr_vector_number;
+ __le16 rsv[7];
+};
+
#define HCLGEVF_RSS_HASH_KEY_OFFSET 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
struct hclgevf_rss_config_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a17872aab168..9c0091f2addf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -330,6 +330,12 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
{
+ if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
+ dev_warn(&hdev->pdev->dev,
+ "vector(vector_id %d) has been freed.\n", vector_id);
+ return;
+ }
+
hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
hdev->num_msi_left += 1;
hdev->num_msi_used -= 1;
@@ -444,12 +450,12 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
- (tc_valid[i] & 0x1));
- hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
- HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
- hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
- HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+ hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+ HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+ HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
}
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -547,24 +553,18 @@ static int hclgevf_get_tc_size(struct hnae3_handle *handle)
}
static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
- int vector,
+ int vector_id,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_ring_chain_node *node;
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclgevf_desc desc;
- int i = 0, vector_id;
+ int i = 0;
int status;
u8 type;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
- vector_id = hclgevf_get_vector_index(hdev, vector);
- if (vector_id < 0) {
- dev_err(&handle->pdev->dev,
- "Get vector index fail. ret =%d\n", vector_id);
- return vector_id;
- }
for (node = ring_chain; node; node = node->next) {
int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
@@ -582,11 +582,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
}
req->msg[idx_offset] =
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
+ hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
req->msg[idx_offset + 1] = node->tqp_index;
- req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S);
+ req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S);
i++;
if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
@@ -617,7 +617,17 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
struct hnae3_ring_chain_node *ring_chain)
{
- return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int vector_id;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
}
static int hclgevf_unmap_ring_from_vector(
@@ -635,7 +645,7 @@ static int hclgevf_unmap_ring_from_vector(
return vector_id;
}
- ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
+ ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
if (ret)
dev_err(&handle->pdev->dev,
"Unmap ring from vector fail. vector=%d, ret =%d\n",
@@ -648,8 +658,17 @@ static int hclgevf_unmap_ring_from_vector(
static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int vector_id;
- hclgevf_free_vector(hdev, vector);
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "hclgevf_put_vector get vector index fail. ret =%d\n",
+ vector_id);
+ return vector_id;
+ }
+
+ hclgevf_free_vector(hdev, vector_id);
return 0;
}
@@ -990,8 +1009,8 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
/* wait to check the hardware reset completion status */
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
- (cnt < HCLGEVF_RESET_WAIT_CNT)) {
+ while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
+ (cnt < HCLGEVF_RESET_WAIT_CNT)) {
msleep(HCLGEVF_RESET_WAIT_MS);
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
cnt++;
@@ -1351,14 +1370,13 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
struct hnae3_handle *roce = &hdev->roce;
struct hnae3_handle *nic = &hdev->nic;
- roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
+ roce->rinfo.num_vectors = hdev->num_roce_msix;
if (hdev->num_msi_left < roce->rinfo.num_vectors ||
hdev->num_msi_left == 0)
return -EINVAL;
- roce->rinfo.base_vector =
- hdev->vector_status[hdev->num_msi_used];
+ roce->rinfo.base_vector = hdev->roce_base_vector;
roce->rinfo.netdev = nic->kinfo.netdev;
roce->rinfo.roce_io_base = hdev->hw.io_base;
@@ -1501,10 +1519,15 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
if (hclgevf_dev_ongoing_reset(hdev))
return 0;
- hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
+ if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
+ vectors = pci_alloc_irq_vectors(pdev,
+ hdev->roce_base_msix_offset + 1,
+ hdev->num_msi,
+ PCI_IRQ_MSIX);
+ else
+ vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
- PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (vectors < 0) {
dev_err(&pdev->dev,
"failed(%d) to allocate MSI/MSI-X vectors\n",
@@ -1519,6 +1542,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
hdev->base_msi_vector = pdev->irq;
+ hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
@@ -1582,9 +1606,10 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
hclgevf_free_vector(hdev, 0);
}
-static int hclgevf_init_instance(struct hclgevf_dev *hdev,
- struct hnae3_client *client)
+static int hclgevf_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
{
+ struct hclgevf_dev *hdev = ae_dev->priv;
int ret;
switch (client->type) {
@@ -1635,9 +1660,11 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev,
return 0;
}
-static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
- struct hnae3_client *client)
+static void hclgevf_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
/* un-init roce, if it exists */
if (hdev->roce_client)
hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
@@ -1648,22 +1675,6 @@ static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
client->ops->uninit_instance(&hdev->nic, 0);
}
-static int hclgevf_register_client(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
-{
- struct hclgevf_dev *hdev = ae_dev->priv;
-
- return hclgevf_init_instance(hdev, client);
-}
-
-static void hclgevf_unregister_client(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
-{
- struct hclgevf_dev *hdev = ae_dev->priv;
-
- hclgevf_uninit_instance(hdev, client);
-}
-
static int hclgevf_pci_init(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -1727,6 +1738,45 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
pci_disable_device(pdev);
}
+static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_query_res_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "query vf resource failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ req = (struct hclgevf_query_res_cmd *)desc.data;
+
+ if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
+ hdev->roce_base_msix_offset =
+ hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ HCLGEVF_MSIX_OFT_ROCEE_M,
+ HCLGEVF_MSIX_OFT_ROCEE_S);
+ hdev->num_roce_msix =
+ hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+ /* VF should have NIC vectors and Roce vectors, NIC vectors
+ * are queued before Roce vectors. The offset is fixed to 64.
+ */
+ hdev->num_msi = hdev->num_roce_msix +
+ hdev->roce_base_msix_offset;
+ } else {
+ hdev->num_msi =
+ hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+ }
+
+ return 0;
+}
+
static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -1744,18 +1794,26 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
return ret;
}
+ ret = hclgevf_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_init;
+
+ /* Get vf resource */
+ ret = hclgevf_query_vf_resource(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query vf status error, ret = %d.\n", ret);
+ goto err_query_vf;
+ }
+
ret = hclgevf_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
- goto err_irq_init;
+ goto err_query_vf;
}
hclgevf_state_init(hdev);
- ret = hclgevf_cmd_init(hdev);
- if (ret)
- goto err_cmd_init;
-
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
@@ -1811,11 +1869,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
err_config:
hclgevf_misc_irq_uninit(hdev);
err_misc_irq_init:
- hclgevf_cmd_uninit(hdev);
-err_cmd_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
-err_irq_init:
+err_query_vf:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_init:
hclgevf_pci_uninit(hdev);
return ret;
}
@@ -1924,8 +1982,8 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
- .init_client_instance = hclgevf_register_client,
- .uninit_client_instance = hclgevf_unregister_client,
+ .init_client_instance = hclgevf_init_client_instance,
+ .uninit_client_instance = hclgevf_uninit_client_instance,
.start = hclgevf_ae_start,
.stop = hclgevf_ae_stop,
.map_ring_to_vector = hclgevf_map_ring_to_vector,
@@ -1962,7 +2020,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
static struct hnae3_ae_algo ae_algovf = {
.ops = &hclgevf_ops,
- .name = HCLGEVF_NAME,
.pdev_id_table = ae_algovf_pci_tbl,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 0656e8e5c5f0..b23ba171473c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -12,7 +12,6 @@
#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
-#define HCLGEVF_ROCEE_VECTOR_NUM 0
#define HCLGEVF_MISC_VECTOR_NUM 0
#define HCLGEVF_INVALID_VPORT 0xffff
@@ -150,6 +149,9 @@ struct hclgevf_dev {
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
+ u16 num_roce_msix; /* Num of roce vectors for this VF */
+ u16 roce_base_msix_offset;
+ int roce_base_vector;
u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index b598c06af8e0..e9d5a4f96304 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
+ if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n",
req->msg[0]);
@@ -208,7 +208,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
/* tail the async message in arq */
msg_q = hdev->arq.msg_q[hdev->arq.tail];
- memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE);
+ memcpy(&msg_q[0], req->msg,
+ HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
hclge_mbx_tail_ptr_move_arq(hdev->arq);
hdev->arq.count++;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 79b567447084..6b19607a4caa 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -264,7 +264,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_cmd_fw_ctxt fw_ctxt;
- struct hinic_pfhwdev *pfhwdev;
u16 out_size;
int err;
@@ -276,8 +275,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT,
&fw_ctxt, sizeof(fw_ctxt),
&fw_ctxt, &out_size);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index b9db6d649743..cb239627770f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -635,17 +635,18 @@ void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
}
/**
- * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci
+ * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the
+ * wqe only have one wqebb
* @sq: send queue
* @skb: return skb that was saved
- * @wqe_size: the size of the wqe
+ * @wqe_size: the wqe size ptr
* @cons_idx: consumer index of the wqe
*
* Return wqe in ci position
**/
-struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx)
+struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int *wqe_size, u16 *cons_idx)
{
struct hinic_hw_wqe *hw_wqe;
struct hinic_sq_wqe *sq_wqe;
@@ -658,6 +659,8 @@ struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
if (IS_ERR(hw_wqe))
return NULL;
+ *skb = sq->saved_skb[*cons_idx];
+
sq_wqe = &hw_wqe->sq_wqe;
ctrl = &sq_wqe->ctrl;
ctrl_info = be32_to_cpu(ctrl->ctrl_info);
@@ -665,11 +668,28 @@ struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
*wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task);
*wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len);
+ *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size);
- *skb = sq->saved_skb[*cons_idx];
+ return &hw_wqe->sq_wqe;
+}
+
+/**
+ * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci
+ * @sq: send queue
+ * @skb: return skb that was saved
+ * @wqe_size: the size of the wqe
+ * @cons_idx: consumer index of the wqe
+ *
+ * Return wqe in ci position
+ **/
+struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int wqe_size, u16 *cons_idx)
+{
+ struct hinic_hw_wqe *hw_wqe;
- /* using the real wqe size to read wqe again */
- hw_wqe = hinic_read_wqe(sq->wq, *wqe_size, cons_idx);
+ hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx);
+ *skb = sq->saved_skb[*cons_idx];
return &hw_wqe->sq_wqe;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index df729a1587e9..6c84f83ec283 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -165,7 +165,11 @@ void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx);
+ unsigned int wqe_size, u16 *cons_idx);
+
+struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int *wqe_size, u16 *cons_idx);
void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 5b122728dcb4..09e9da10b786 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -983,6 +983,7 @@ static int nic_dev_init(struct pci_dev *pdev)
hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
nic_dev, link_status_event_handler);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index e2e5cdc7119c..4c0f7eda1166 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
{
struct hinic_rq *rq = rxq->rq;
+ irq_set_affinity_hint(rq->irq, NULL);
free_irq(rq->irq, rxq);
rx_del_napi(rxq);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 9128858479c4..c5fca0356c9c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -229,6 +229,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
txq->txq_stats.tx_busy++;
u64_stats_update_end(&txq->txq_stats.syncp);
err = NETDEV_TX_BUSY;
+ wqe_size = 0;
goto flush_skbs;
}
@@ -282,7 +283,11 @@ static void free_all_tx_skbs(struct hinic_txq *txq)
int nr_sges;
u16 ci;
- while ((sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &ci))) {
+ while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
+ sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
+ if (!sq_wqe)
+ break;
+
nr_sges = skb_shinfo(skb)->nr_frags + 1;
hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
@@ -318,11 +323,21 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
do {
hw_ci = HW_CONS_IDX(sq) & wq->mask;
- sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &sw_ci);
+ /* Reading a WQEBB to get real WQE size and consumer index. */
+ sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
if ((!sq_wqe) ||
(((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
break;
+ /* If this WQE have multiple WQEBBs, we will read again to get
+ * full size WQE.
+ */
+ if (wqe_size > wq->wqebb_size) {
+ sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
+ if (unlikely(!sq_wqe))
+ break;
+ }
+
tx_bytes += skb->len;
pkts++;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index d0e196bff081..dafdd4ade705 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -329,7 +329,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
return;
failure:
- dev_info(dev, "replenish pools failure\n");
+ if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
+ dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
pool->free_map[pool->next_free] = index;
pool->rx_buff[index].skb = NULL;
@@ -717,23 +718,6 @@ static int init_tx_pools(struct net_device *netdev)
return 0;
}
-static void release_error_buffers(struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
- list_del(&error_buff->list);
- dma_unmap_single(dev, error_buff->dma, error_buff->len,
- DMA_FROM_DEVICE);
- kfree(error_buff->buff);
- kfree(error_buff);
- }
- spin_unlock_irqrestore(&adapter->error_list_lock, flags);
-}
-
static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
{
int i;
@@ -895,7 +879,6 @@ static void release_resources(struct ibmvnic_adapter *adapter)
release_tx_pools(adapter);
release_rx_pools(adapter);
- release_error_buffers(adapter);
release_napi(adapter);
release_login_rsp_buffer(adapter);
}
@@ -1617,7 +1600,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
&tx_crq);
}
if (lpar_rc != H_SUCCESS) {
- dev_err(dev, "tx failed with code %ld\n", lpar_rc);
+ if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
+ dev_err_ratelimited(dev, "tx: send failed\n");
dev_kfree_skb_any(skb);
tx_buff->skb = NULL;
@@ -1825,8 +1809,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rc = ibmvnic_login(netdev);
if (rc) {
- adapter->state = VNIC_PROBED;
- return 0;
+ adapter->state = reset_state;
+ return rc;
}
if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
@@ -3204,6 +3188,25 @@ static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
return crq;
}
+static void print_subcrq_error(struct device *dev, int rc, const char *func)
+{
+ switch (rc) {
+ case H_PARAMETER:
+ dev_warn_ratelimited(dev,
+ "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
+ func, rc);
+ break;
+ case H_CLOSED:
+ dev_warn_ratelimited(dev,
+ "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
+ func, rc);
+ break;
+ default:
+ dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
+ break;
+ }
+}
+
static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
union sub_crq *sub_crq)
{
@@ -3230,11 +3233,8 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
cpu_to_be64(u64_crq[2]),
cpu_to_be64(u64_crq[3]));
- if (rc) {
- if (rc == H_CLOSED)
- dev_warn(dev, "CRQ Queue closed\n");
- dev_err(dev, "Send error (rc=%d)\n", rc);
- }
+ if (rc)
+ print_subcrq_error(dev, rc, __func__);
return rc;
}
@@ -3252,11 +3252,8 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
cpu_to_be64(remote_handle),
ioba, num_entries);
- if (rc) {
- if (rc == H_CLOSED)
- dev_warn(dev, "CRQ Queue closed\n");
- dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
- }
+ if (rc)
+ print_subcrq_error(dev, rc, __func__);
return rc;
}
@@ -3828,132 +3825,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
ibmvnic_send_crq(adapter, &crq);
}
-static void handle_error_info_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff, *tmp;
- unsigned long flags;
- bool found = false;
- int i;
-
- if (!crq->request_error_rsp.rc.code) {
- dev_info(dev, "Request Error Rsp returned with rc=%x\n",
- crq->request_error_rsp.rc.code);
- return;
- }
-
- spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
- if (error_buff->error_id == crq->request_error_rsp.error_id) {
- found = true;
- list_del(&error_buff->list);
- break;
- }
- spin_unlock_irqrestore(&adapter->error_list_lock, flags);
-
- if (!found) {
- dev_err(dev, "Couldn't find error id %x\n",
- be32_to_cpu(crq->request_error_rsp.error_id));
- return;
- }
-
- dev_err(dev, "Detailed info for error id %x:",
- be32_to_cpu(crq->request_error_rsp.error_id));
-
- for (i = 0; i < error_buff->len; i++) {
- pr_cont("%02x", (int)error_buff->buff[i]);
- if (i % 8 == 7)
- pr_cont(" ");
- }
- pr_cont("\n");
-
- dma_unmap_single(dev, error_buff->dma, error_buff->len,
- DMA_FROM_DEVICE);
- kfree(error_buff->buff);
- kfree(error_buff);
-}
-
-static void request_error_information(struct ibmvnic_adapter *adapter,
- union ibmvnic_crq *err_crq)
-{
- struct device *dev = &adapter->vdev->dev;
- struct net_device *netdev = adapter->netdev;
- struct ibmvnic_error_buff *error_buff;
- unsigned long timeout = msecs_to_jiffies(30000);
- union ibmvnic_crq crq;
- unsigned long flags;
- int rc, detail_len;
-
- error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
- if (!error_buff)
- return;
-
- detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
- error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
- if (!error_buff->buff) {
- kfree(error_buff);
- return;
- }
-
- error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, error_buff->dma)) {
- netdev_err(netdev, "Couldn't map error buffer\n");
- kfree(error_buff->buff);
- kfree(error_buff);
- return;
- }
-
- error_buff->len = detail_len;
- error_buff->error_id = err_crq->error_indication.error_id;
-
- spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_add_tail(&error_buff->list, &adapter->errors);
- spin_unlock_irqrestore(&adapter->error_list_lock, flags);
-
- memset(&crq, 0, sizeof(crq));
- crq.request_error_info.first = IBMVNIC_CRQ_CMD;
- crq.request_error_info.cmd = REQUEST_ERROR_INFO;
- crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
- crq.request_error_info.len = cpu_to_be32(detail_len);
- crq.request_error_info.error_id = err_crq->error_indication.error_id;
-
- rc = ibmvnic_send_crq(adapter, &crq);
- if (rc) {
- netdev_err(netdev, "failed to request error information\n");
- goto err_info_fail;
- }
-
- if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
- netdev_err(netdev, "timeout waiting for error information\n");
- goto err_info_fail;
+static const char *ibmvnic_fw_err_cause(u16 cause)
+{
+ switch (cause) {
+ case ADAPTER_PROBLEM:
+ return "adapter problem";
+ case BUS_PROBLEM:
+ return "bus problem";
+ case FW_PROBLEM:
+ return "firmware problem";
+ case DD_PROBLEM:
+ return "device driver problem";
+ case EEH_RECOVERY:
+ return "EEH recovery";
+ case FW_UPDATED:
+ return "firmware updated";
+ case LOW_MEMORY:
+ return "low Memory";
+ default:
+ return "unknown";
}
-
- return;
-
-err_info_fail:
- spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_del(&error_buff->list);
- spin_unlock_irqrestore(&adapter->error_list_lock, flags);
-
- kfree(error_buff->buff);
- kfree(error_buff);
}
static void handle_error_indication(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
+ u16 cause;
- dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
- crq->error_indication.flags
- & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
- be32_to_cpu(crq->error_indication.error_id),
- be16_to_cpu(crq->error_indication.error_cause));
+ cause = be16_to_cpu(crq->error_indication.error_cause);
- if (be32_to_cpu(crq->error_indication.error_id))
- request_error_information(adapter, crq);
+ dev_warn_ratelimited(dev,
+ "Firmware reports %serror, cause: %s. Starting recovery...\n",
+ crq->error_indication.flags
+ & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
+ ibmvnic_fw_err_cause(cause));
if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
@@ -4453,10 +4359,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
netdev_dbg(netdev, "Got Error Indication\n");
handle_error_indication(crq, adapter);
break;
- case REQUEST_ERROR_RSP:
- netdev_dbg(netdev, "Got Error Detail Response\n");
- handle_error_info_rsp(crq, adapter);
- break;
case REQUEST_STATISTICS_RSP:
netdev_dbg(netdev, "Got Statistics Response\n");
complete(&adapter->stats_done);
@@ -4815,9 +4717,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
spin_lock_init(&adapter->stats_lock);
- INIT_LIST_HEAD(&adapter->errors);
- spin_lock_init(&adapter->error_list_lock);
-
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
mutex_init(&adapter->reset_lock);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f9fb780102ac..f06eec145ca6 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -512,24 +512,6 @@ struct ibmvnic_error_indication {
u8 reserved2[2];
} __packed __aligned(8);
-struct ibmvnic_request_error_info {
- u8 first;
- u8 cmd;
- u8 reserved[2];
- __be32 ioba;
- __be32 len;
- __be32 error_id;
-} __packed __aligned(8);
-
-struct ibmvnic_request_error_rsp {
- u8 first;
- u8 cmd;
- u8 reserved[2];
- __be32 error_id;
- __be32 len;
- struct ibmvnic_rc rc;
-} __packed __aligned(8);
-
struct ibmvnic_link_state_indication {
u8 first;
u8 cmd;
@@ -709,8 +691,6 @@ union ibmvnic_crq {
struct ibmvnic_request_debug_stats request_debug_stats;
struct ibmvnic_request_debug_stats request_debug_stats_rsp;
struct ibmvnic_error_indication error_indication;
- struct ibmvnic_request_error_info request_error_info;
- struct ibmvnic_request_error_rsp request_error_rsp;
struct ibmvnic_link_state_indication link_state_indication;
struct ibmvnic_change_mac_addr change_mac_addr;
struct ibmvnic_change_mac_addr change_mac_addr_rsp;
@@ -809,8 +789,6 @@ enum ibmvnic_commands {
SET_PHYS_PARMS = 0x07,
SET_PHYS_PARMS_RSP = 0x87,
ERROR_INDICATION = 0x08,
- REQUEST_ERROR_INFO = 0x09,
- REQUEST_ERROR_RSP = 0x89,
LOGICAL_LINK_STATE = 0x0C,
LOGICAL_LINK_STATE_RSP = 0x8C,
REQUEST_STATISTICS = 0x0D,
@@ -945,14 +923,6 @@ struct ibmvnic_rx_pool {
struct ibmvnic_long_term_buff long_term_buff;
};
-struct ibmvnic_error_buff {
- char *buff;
- dma_addr_t dma;
- int len;
- struct list_head list;
- __be32 error_id;
-};
-
struct ibmvnic_vpd {
unsigned char *buff;
dma_addr_t dma_addr;
@@ -1047,9 +1017,6 @@ struct ibmvnic_adapter {
struct completion init_done;
int init_done_rc;
- struct list_head errors;
- spinlock_t error_list_lock;
-
struct completion fw_done;
int fw_done_rc;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ddbea79d18e5..501ee718177f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -868,6 +868,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
cmd_completed = true;
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
status = 0;
+ else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ status = I40E_ERR_NOT_READY;
else
status = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 7d888e05f96f..80e3eec6134e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -2247,6 +2247,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
+#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index eb2d1530d331..85f75b5978fc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3541,6 +3541,41 @@ i40e_aq_update_nvm_exit:
}
/**
+ * i40e_aq_rearrange_nvm
+ * @hw: pointer to the hw struct
+ * @rearrange_nvm: defines direction of rearrangement
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Rearrange NVM structure, available only for transition FW
+ **/
+i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_nvm_update *cmd;
+ i40e_status status;
+ struct i40e_aq_desc desc;
+
+ cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+ rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
+ I40E_AQ_NVM_REARRANGE_TO_STRUCT);
+
+ if (!rearrange_nvm) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_rearrange_nvm_exit;
+ }
+
+ cmd->command_flags |= rearrange_nvm;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_rearrange_nvm_exit:
+ return status;
+}
+
+/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
* @bridge_type: type of bridge requested
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 6947a2a571cb..abcd096ede14 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -7,6 +7,11 @@
#include "i40e_diag.h"
struct i40e_stats {
+ /* The stat_string is expected to be a format string formatted using
+ * vsnprintf by i40e_add_stat_strings. Every member of a stats array
+ * should use the same format specifiers as they will be formatted
+ * using the same variadic arguments.
+ */
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
@@ -26,6 +31,8 @@ struct i40e_stats {
I40E_STAT(struct i40e_vsi, _name, _stat)
#define I40E_VEB_STAT(_name, _stat) \
I40E_STAT(struct i40e_veb, _name, _stat)
+#define I40E_PFC_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_pfc_stats, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -56,6 +63,13 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = {
I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
};
+static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
+ I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
+ I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
+ I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
+ I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
+};
+
static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
@@ -141,6 +155,22 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count),
};
+struct i40e_pfc_stats {
+ u64 priority_xon_rx;
+ u64 priority_xoff_rx;
+ u64 priority_xon_tx;
+ u64 priority_xoff_tx;
+ u64 priority_xon_2_xoff;
+};
+
+static const struct i40e_stats i40e_gstrings_pfc_stats[] = {
+ I40E_PFC_STAT("port.tx_priority_%u_xon_tx", priority_xon_tx),
+ I40E_PFC_STAT("port.tx_priority_%u_xoff_tx", priority_xoff_tx),
+ I40E_PFC_STAT("port.rx_priority_%u_xon_rx", priority_xon_rx),
+ I40E_PFC_STAT("port.rx_priority_%u_xoff_rx", priority_xoff_rx),
+ I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff),
+};
+
/* We use num_tx_queues here as a proxy for the maximum number of queues
* available because we always allocate queues symmetrically.
*/
@@ -155,23 +185,17 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
-#define I40E_PFC_STATS_LEN ( \
- (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
- FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
- FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
- FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
- FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
- / sizeof(u64))
-#define I40E_VEB_TC_STATS_LEN ( \
- (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \
- FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \
- FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
- FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
- / sizeof(u64))
-#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
-#define I40E_VEB_STATS_TOTAL (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
+
+#define I40E_PFC_STATS_LEN (ARRAY_SIZE(i40e_gstrings_pfc_stats) * \
+ I40E_MAX_USER_PRIORITY)
+
+#define I40E_VEB_STATS_LEN (ARRAY_SIZE(i40e_gstrings_veb_stats) + \
+ (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \
+ I40E_MAX_TRAFFIC_CLASS))
+
#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
I40E_PFC_STATS_LEN + \
+ I40E_VEB_STATS_LEN + \
I40E_VSI_STATS_LEN((n)))
enum i40e_ethtool_test_id {
@@ -1565,7 +1589,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring;
u16 unused;
/* clone ring and setup updated count */
@@ -1589,9 +1612,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
/* now allocate the Rx buffers to make sure the OS
* has enough memory, any failure here means abort
*/
- ring = &rx_rings[i];
- unused = I40E_DESC_UNUSED(ring);
- err = i40e_alloc_rx_buffers(ring, unused);
+ unused = I40E_DESC_UNUSED(&rx_rings[i]);
+ err = i40e_alloc_rx_buffers(&rx_rings[i], unused);
rx_unwind:
if (err) {
do {
@@ -1681,7 +1703,7 @@ static int i40e_get_stats_count(struct net_device *netdev)
struct i40e_pf *pf = vsi->back;
if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
- return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_TOTAL;
+ return I40E_PF_STATS_LEN(netdev);
else
return I40E_VSI_STATS_LEN(netdev);
}
@@ -1706,6 +1728,114 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
}
/**
+ * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer
+ * @data: location to store the stat value
+ * @pointer: basis for where to copy from
+ * @stat: the stat definition
+ *
+ * Copies the stat data defined by the pointer and stat structure pair into
+ * the memory supplied as data. Used to implement i40e_add_ethtool_stats.
+ * If the pointer is null, data will be zero'd.
+ */
+static inline void
+i40e_add_one_ethtool_stat(u64 *data, void *pointer,
+ const struct i40e_stats *stat)
+{
+ char *p;
+
+ if (!pointer) {
+ /* ensure that the ethtool data buffer is zero'd for any stats
+ * which don't have a valid pointer.
+ */
+ *data = 0;
+ return;
+ }
+
+ p = (char *)pointer + stat->stat_offset;
+ switch (stat->sizeof_stat) {
+ case sizeof(u64):
+ *data = *((u64 *)p);
+ break;
+ case sizeof(u32):
+ *data = *((u32 *)p);
+ break;
+ case sizeof(u16):
+ *data = *((u16 *)p);
+ break;
+ case sizeof(u8):
+ *data = *((u8 *)p);
+ break;
+ default:
+ WARN_ONCE(1, "unexpected stat size for %s",
+ stat->stat_string);
+ *data = 0;
+ }
+}
+
+/**
+ * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location to copy stats from
+ * @stats: array of stats to copy
+ * @size: the size of the stats definition
+ *
+ * Copy the stats defined by the stats array using the pointer as a base into
+ * the data buffer supplied by ethtool. Updates the data pointer to point to
+ * the next empty location for successive calls to __i40e_add_ethtool_stats.
+ * If pointer is null, set the data values to zero and update the pointer to
+ * skip these stats.
+ **/
+static inline void
+__i40e_add_ethtool_stats(u64 **data, void *pointer,
+ const struct i40e_stats stats[],
+ const unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
+}
+
+/**
+ * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location where stats are stored
+ * @stats: static const array of stat definitions
+ *
+ * Macro to ease the use of __i40e_add_ethtool_stats by taking a static
+ * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
+ * ensuring that we pass the size associated with the given stats array.
+ * Assumes that stats is an array.
+ **/
+#define i40e_add_ethtool_stats(data, pointer, stats) \
+ __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
+
+/**
+ * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
+ * @pf: the PF device structure
+ * @i: the priority value to copy
+ *
+ * The PFC stats are found as arrays in pf->stats, which is not easy to pass
+ * into i40e_add_ethtool_stats. Produce a formatted i40e_pfc_stats structure
+ * of the PFC stats for the given priority.
+ **/
+static inline struct i40e_pfc_stats
+i40e_get_pfc_stats(struct i40e_pf *pf, unsigned int i)
+{
+#define I40E_GET_PFC_STAT(stat, priority) \
+ .stat = pf->stats.stat[priority]
+
+ struct i40e_pfc_stats pfc = {
+ I40E_GET_PFC_STAT(priority_xon_rx, i),
+ I40E_GET_PFC_STAT(priority_xoff_rx, i),
+ I40E_GET_PFC_STAT(priority_xon_tx, i),
+ I40E_GET_PFC_STAT(priority_xoff_tx, i),
+ I40E_GET_PFC_STAT(priority_xon_2_xoff, i),
+ };
+ return pfc;
+}
+
+/**
* i40e_get_ethtool_stats - copy stat values into supplied buffer
* @netdev: the netdev to collect stats for
* @stats: ethtool stats command structure
@@ -1726,23 +1856,19 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ struct i40e_veb *veb = pf->veb[pf->lan_veb];
unsigned int i;
- char *p;
- struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
unsigned int start;
+ bool veb_stats;
+ u64 *p = data;
i40e_update_stats(vsi);
- for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
- p = (char *)net_stats + i40e_gstrings_net_stats[i].stat_offset;
- *(data++) = (i40e_gstrings_net_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
- p = (char *)vsi + i40e_gstrings_misc_stats[i].stat_offset;
- *(data++) = (i40e_gstrings_misc_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
+ i40e_add_ethtool_stats(&data, i40e_get_vsi_stats_struct(vsi),
+ i40e_gstrings_net_stats);
+
+ i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats);
+
rcu_read_lock();
for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev) ; i++) {
tx_ring = READ_ONCE(vsi->tx_rings[i]);
@@ -1777,45 +1903,72 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
}
rcu_read_unlock();
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
- return;
+ goto check_data_pointer;
- if ((pf->lan_veb != I40E_NO_VEB) &&
- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
- struct i40e_veb *veb = pf->veb[pf->lan_veb];
+ veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
+
+ /* If veb stats aren't enabled, pass NULL instead of the veb so that
+ * we initialize stats to zero and update the data pointer
+ * intelligently
+ */
+ i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
+ i40e_gstrings_veb_stats);
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
+ i40e_gstrings_veb_tc_stats);
+
+ i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
- for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
- p = (char *)veb;
- p += i40e_gstrings_veb_stats[i].stat_offset;
- *(data++) = (i40e_gstrings_veb_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- *(data++) = veb->tc_stats.tc_tx_packets[i];
- *(data++) = veb->tc_stats.tc_tx_bytes[i];
- *(data++) = veb->tc_stats.tc_rx_packets[i];
- *(data++) = veb->tc_stats.tc_rx_bytes[i];
- }
- } else {
- data += I40E_VEB_STATS_TOTAL;
- }
- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
- p = (char *)pf + i40e_gstrings_stats[i].stat_offset;
- *(data++) = (i40e_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- *(data++) = pf->stats.priority_xon_tx[i];
- *(data++) = pf->stats.priority_xoff_tx[i];
+ struct i40e_pfc_stats pfc = i40e_get_pfc_stats(pf, i);
+
+ i40e_add_ethtool_stats(&data, &pfc, i40e_gstrings_pfc_stats);
}
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- *(data++) = pf->stats.priority_xon_rx[i];
- *(data++) = pf->stats.priority_xoff_rx[i];
+
+check_data_pointer:
+ WARN_ONCE(data - p != i40e_get_stats_count(netdev),
+ "ethtool stats count mismatch!");
+}
+
+/**
+ * __i40e_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ * @size: size of the stats array
+ *
+ * Format and copy the strings described by stats into the buffer pointed at
+ * by p.
+ **/
+static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
+ const unsigned int size, ...)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ va_list args;
+
+ va_start(args, size);
+ vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
+ *p += ETH_GSTRING_LEN;
+ va_end(args);
}
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
- *(data++) = pf->stats.priority_xon_2_xoff[i];
}
/**
+ * 40e_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ *
+ * Format and copy the strings described by the const static stats value into
+ * the buffer pointed at by p. Assumes that stats can have ARRAY_SIZE called
+ * for it.
+ **/
+#define i40e_add_stat_strings(p, stats, ...) \
+ __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
+
+/**
* i40e_get_stat_strings - copy stat strings into supplied buffer
* @netdev: the netdev to collect strings for
* @data: supplied buffer to copy strings into
@@ -1833,16 +1986,10 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
unsigned int i;
u8 *p = data;
- for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_net_stats[i].stat_string);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_misc_stats[i].stat_string);
- data += ETH_GSTRING_LEN;
- }
+ i40e_add_stat_strings(&data, i40e_gstrings_net_stats);
+
+ i40e_add_stat_strings(&data, i40e_gstrings_misc_stats);
+
for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) {
snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
data += ETH_GSTRING_LEN;
@@ -1856,52 +2003,15 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
return;
- for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_veb_stats[i].stat_string);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- "veb.tc_%u_tx_packets", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "veb.tc_%u_tx_bytes", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "veb.tc_%u_rx_packets", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "veb.tc_%u_rx_bytes", i);
- data += ETH_GSTRING_LEN;
- }
+ i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_stats[i].stat_string);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xon", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xoff", i);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xoff", i);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon_2_xoff", i);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ i40e_add_stat_strings(&data, i40e_gstrings_veb_tc_stats, i);
+
+ i40e_add_stat_strings(&data, i40e_gstrings_stats);
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
"stat strings count mismatch!");
@@ -4535,7 +4645,6 @@ flags_complete:
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
struct i40e_dcbx_config *dcbcfg;
- int i;
i40e_aq_stop_lldp(&pf->hw, true, NULL);
i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c944bd10b03d..f2c622e78802 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1800,6 +1800,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
num_tc_qps);
break;
}
+ /* fall through */
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
@@ -6597,6 +6598,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
err = i40e_aq_set_phy_config(hw, &config, NULL);
if (err) {
@@ -7522,7 +7525,7 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
@@ -7554,7 +7557,7 @@ static int i40e_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
- np, np);
+ np, np, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
return 0;
@@ -11841,7 +11844,6 @@ static int i40e_xdp(struct net_device *dev,
case XDP_SETUP_PROG:
return i40e_xdp_setup(vsi, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
return 0;
default:
@@ -11978,7 +11980,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
IFNAMSIZ - 4,
pf->vsi[pf->lan_vsi]->netdev->name);
- random_ether_addr(mac_addr);
+ eth_random_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_mac_filter(vsi, mac_addr);
@@ -14354,12 +14356,6 @@ static void i40e_shutdown(struct pci_dev *pdev)
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
- rtnl_lock();
- i40e_prep_for_reset(pf, true);
- rtnl_unlock();
-
- wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
- wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 3170655cdeb9..e08d754824b1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -193,6 +193,9 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index ed6dbcfd4e96..b5042d1a63c0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2199,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
return true;
}
-#define I40E_XDP_PASS 0
-#define I40E_XDP_CONSUMED 1
-#define I40E_XDP_TX 2
+#define I40E_XDP_PASS 0
+#define I40E_XDP_CONSUMED BIT(0)
+#define I40E_XDP_TX BIT(1)
+#define I40E_XDP_REDIR BIT(2)
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
struct i40e_ring *xdp_ring);
@@ -2248,13 +2249,14 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
break;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
+ result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
break;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping packet */
+ /* fall through -- handle aborts by dropping packet */
case XDP_DROP:
result = I40E_XDP_CONSUMED;
break;
@@ -2311,7 +2313,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- bool failure = false, xdp_xmit = false;
+ unsigned int xdp_xmit = 0;
+ bool failure = false;
struct xdp_buff xdp;
xdp.rxq = &rx_ring->xdp_rxq;
@@ -2372,8 +2375,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
}
if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -I40E_XDP_TX) {
- xdp_xmit = true;
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
} else {
rx_buffer->pagecnt_bias++;
@@ -2427,12 +2432,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
total_rx_packets++;
}
- if (xdp_xmit) {
+ if (xdp_xmit & I40E_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & I40E_XDP_TX) {
struct i40e_ring *xdp_ring =
rx_ring->vsi->xdp_rings[rx_ring->queue_index];
i40e_xdp_ring_update_tail(xdp_ring);
- xdp_do_flush_map();
}
rx_ring->skb = skb;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index c355120dfdfd..21a0dbf6ccf6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -797,6 +797,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
cmd_completed = true;
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
status = 0;
+ else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ status = I40E_ERR_NOT_READY;
else
status = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index aa81e87cd471..5fd8529465d4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -2175,6 +2175,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
+#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 9cef54971312..eea280ba411e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1021,75 +1021,6 @@ do_retry:
}
/**
- * i40evf_aq_set_phy_register
- * @hw: pointer to the hw struct
- * @phy_select: select which phy should be accessed
- * @dev_addr: PHY device address
- * @reg_addr: PHY register address
- * @reg_val: new register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Reset the external PHY.
- **/
-i40e_status i40evf_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
-
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_phy_register);
-
- cmd->phy_interface = phy_select;
- cmd->dev_address = dev_addr;
- cmd->reg_address = cpu_to_le32(reg_addr);
- cmd->reg_value = cpu_to_le32(reg_val);
-
- status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
- * i40evf_aq_get_phy_register
- * @hw: pointer to the hw struct
- * @phy_select: select which phy should be accessed
- * @dev_addr: PHY device address
- * @reg_addr: PHY register address
- * @reg_val: read register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Reset the external PHY.
- **/
-i40e_status i40evf_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
-
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_phy_register);
-
- cmd->phy_interface = phy_select;
- cmd->dev_address = dev_addr;
- cmd->reg_address = cpu_to_le32(reg_addr);
-
- status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- if (!status)
- *reg_val = le32_to_cpu(cmd->reg_value);
-
- return status;
-}
-
-/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index a7b87f935411..5906c1c1d19d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2884,7 +2884,7 @@ static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
@@ -2926,7 +2926,7 @@ static int i40evf_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
- adapter, adapter);
+ adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
adapter);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b13b42e5a1d9..bafdcf70a353 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -225,19 +225,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
E1000_STATUS_FUNC_SHIFT;
- /* Make sure the PHY is in a good state. Several people have reported
- * firmware leaving the PHY's page select register set to something
- * other than the default of zero, which causes the PHY ID read to
- * access something other than the intended register.
- */
- ret_val = hw->phy.ops.reset(hw);
- if (ret_val) {
- hw_dbg("Error resetting the PHY.\n");
- goto out;
- }
-
/* Set phy->phy_addr and phy->id. */
- igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
return ret_val;
@@ -1720,6 +1708,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
/* disable PCS autoneg and support parallel detect only */
pcs_autoneg = false;
+ /* fall through */
default:
if (hw->mac.type == e1000_82575 ||
hw->mac.type == e1000_82576) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 252440a418dc..8a28f3388f69 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1048,6 +1048,22 @@
#define E1000_TQAVCTRL_XMIT_MODE BIT(0)
#define E1000_TQAVCTRL_DATAFETCHARB BIT(4)
#define E1000_TQAVCTRL_DATATRANARB BIT(8)
+#define E1000_TQAVCTRL_DATATRANTIM BIT(9)
+#define E1000_TQAVCTRL_SP_WAIT_SR BIT(10)
+/* Fetch Time Delta - bits 31:16
+ *
+ * This field holds the value to be reduced from the launch time for
+ * fetch time decision. The FetchTimeDelta value is defined in 32 ns
+ * granularity.
+ *
+ * This field is 16 bits wide, and so the maximum value is:
+ *
+ * 65535 * 32 = 2097120 ~= 2.1 msec
+ *
+ * XXX: We are configuring the max value here since we couldn't come up
+ * with a reason for not doing so.
+ */
+#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16)
/* TX Qav Credit Control fields */
#define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 2be0e762ec69..ad2125e5a7f7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -659,6 +659,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
phy_data |= M88E1000_PSCR_AUTO_X_1000T;
break;
}
+ /* fall through */
case 0:
default:
phy_data |= M88E1000_PSCR_AUTO_X_MODE;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9643b5b3d444..ca54e268d157 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -262,6 +262,7 @@ struct igb_ring {
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
+ bool launchtime_enable; /* true if LaunchTime is enabled */
bool cbs_enable; /* indicates if CBS is enabled */
s32 idleslope; /* idleSlope in kbps */
s32 sendslope; /* sendSlope in kbps */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f707709969ac..d03c2f0d7592 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -22,7 +22,6 @@
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/pci.h>
-#include <linux/pci-aspm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
@@ -1654,33 +1653,65 @@ static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
wr32(E1000_I210_TQAVCC(queue), val);
}
+static bool is_any_cbs_enabled(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->tx_ring[i]->cbs_enable)
+ return true;
+ }
+
+ return false;
+}
+
+static bool is_any_txtime_enabled(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->tx_ring[i]->launchtime_enable)
+ return true;
+ }
+
+ return false;
+}
+
/**
- * igb_configure_cbs - Configure Credit-Based Shaper (CBS)
+ * igb_config_tx_modes - Configure "Qav Tx mode" features on igb
* @adapter: pointer to adapter struct
* @queue: queue number
- * @enable: true = enable CBS, false = disable CBS
- * @idleslope: idleSlope in kbps
- * @sendslope: sendSlope in kbps
- * @hicredit: hiCredit in bytes
- * @locredit: loCredit in bytes
*
- * Configure CBS for a given hardware queue. When disabling, idleslope,
- * sendslope, hicredit, locredit arguments are ignored. Returns 0 if
- * success. Negative otherwise.
+ * Configure CBS and Launchtime for a given hardware queue.
+ * Parameters are retrieved from the correct Tx ring, so
+ * igb_save_cbs_params() and igb_save_txtime_params() should be used
+ * for setting those correctly prior to this function being called.
**/
-static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
- bool enable, int idleslope, int sendslope,
- int hicredit, int locredit)
+static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
{
+ struct igb_ring *ring = adapter->tx_ring[queue];
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
- u32 tqavcc;
+ u32 tqavcc, tqavctrl;
u16 value;
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
- if (enable || queue == 0) {
+ /* If any of the Qav features is enabled, configure queues as SR and
+ * with HIGH PRIO. If none is, then configure them with LOW PRIO and
+ * as SP.
+ */
+ if (ring->cbs_enable || ring->launchtime_enable) {
+ set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
+ set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
+ } else {
+ set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
+ set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
+ }
+
+ /* If CBS is enabled, set DataTranARB and config its parameters. */
+ if (ring->cbs_enable || queue == 0) {
/* i210 does not allow the queue 0 to be in the Strict
* Priority mode while the Qav mode is enabled, so,
* instead of disabling strict priority mode, we give
@@ -1690,14 +1721,19 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* Queue0 QueueMode must be set to 1b when
* TransmitMode is set to Qav."
*/
- if (queue == 0 && !enable) {
+ if (queue == 0 && !ring->cbs_enable) {
/* max "linkspeed" idleslope in kbps */
- idleslope = 1000000;
- hicredit = ETH_FRAME_LEN;
+ ring->idleslope = 1000000;
+ ring->hicredit = ETH_FRAME_LEN;
}
- set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
- set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
+ /* Always set data transfer arbitration to credit-based
+ * shaper algorithm on TQAVCTRL if CBS is enabled for any of
+ * the queues.
+ */
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
/* According to i210 datasheet section 7.2.7.7, we should set
* the 'idleSlope' field from TQAVCC register following the
@@ -1756,17 +1792,16 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* calculated value, so the resulting bandwidth might
* be slightly higher for some configurations.
*/
- value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
+ value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
tqavcc = rd32(E1000_I210_TQAVCC(queue));
tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
tqavcc |= value;
wr32(E1000_I210_TQAVCC(queue), tqavcc);
- wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735);
+ wr32(E1000_I210_TQAVHC(queue),
+ 0x80000000 + ring->hicredit * 0x7735);
} else {
- set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
- set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
/* Set idleSlope to zero. */
tqavcc = rd32(E1000_I210_TQAVCC(queue));
@@ -1775,6 +1810,43 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
/* Set hiCredit to zero. */
wr32(E1000_I210_TQAVHC(queue), 0);
+
+ /* If CBS is not enabled for any queues anymore, then return to
+ * the default state of Data Transmission Arbitration on
+ * TQAVCTRL.
+ */
+ if (!is_any_cbs_enabled(adapter)) {
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
+ }
+ }
+
+ /* If LaunchTime is enabled, set DataTranTIM. */
+ if (ring->launchtime_enable) {
+ /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled
+ * for any of the SR queues, and configure fetchtime delta.
+ * XXX NOTE:
+ * - LaunchTime will be enabled for all SR queues.
+ * - A fixed offset can be added relative to the launch
+ * time of all packets if configured at reg LAUNCH_OS0.
+ * We are keeping it as 0 for now (default value).
+ */
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
+ E1000_TQAVCTRL_FETCHTIME_DELTA;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
+ } else {
+ /* If Launchtime is not enabled for any SR queues anymore,
+ * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta,
+ * effectively disabling Launchtime.
+ */
+ if (!is_any_txtime_enabled(adapter)) {
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
+ tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
+ }
}
/* XXX: In i210 controller the sendSlope and loCredit parameters from
@@ -1782,9 +1854,27 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* configuration' in respect to these parameters.
*/
- netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
- (enable) ? "enabled" : "disabled", queue,
- idleslope, sendslope, hicredit, locredit);
+ netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \
+ idleslope %d sendslope %d hiCredit %d \
+ locredit %d\n",
+ (ring->cbs_enable) ? "enabled" : "disabled",
+ (ring->launchtime_enable) ? "enabled" : "disabled", queue,
+ ring->idleslope, ring->sendslope, ring->hicredit,
+ ring->locredit);
+}
+
+static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
+ bool enable)
+{
+ struct igb_ring *ring;
+
+ if (queue < 0 || queue > adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[queue];
+ ring->launchtime_enable = enable;
+
+ return 0;
}
static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
@@ -1807,21 +1897,15 @@ static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
return 0;
}
-static bool is_any_cbs_enabled(struct igb_adapter *adapter)
-{
- struct igb_ring *ring;
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = adapter->tx_ring[i];
-
- if (ring->cbs_enable)
- return true;
- }
-
- return false;
-}
-
+/**
+ * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable
+ * @adapter: pointer to adapter struct
+ *
+ * Configure TQAVCTRL register switching the controller's Tx mode
+ * if FQTSS mode is enabled or disabled. Additionally, will issue
+ * a call to igb_config_tx_modes() per queue so any previously saved
+ * Tx parameters are applied.
+ **/
static void igb_setup_tx_mode(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1836,11 +1920,11 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
int i, max_queue;
/* Configure TQAVCTRL register: set transmit mode to 'Qav',
- * set data fetch arbitration to 'round robin' and set data
- * transfer arbitration to 'credit shaper algorithm.
+ * set data fetch arbitration to 'round robin', set SP_WAIT_SR
+ * so SP queues wait for SR ones.
*/
val = rd32(E1000_I210_TQAVCTRL);
- val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB;
+ val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
val &= ~E1000_TQAVCTRL_DATAFETCHARB;
wr32(E1000_I210_TQAVCTRL, val);
@@ -1881,11 +1965,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
adapter->num_tx_queues : I210_SR_QUEUES_NUM;
for (i = 0; i < max_queue; i++) {
- struct igb_ring *ring = adapter->tx_ring[i];
-
- igb_configure_cbs(adapter, i, ring->cbs_enable,
- ring->idleslope, ring->sendslope,
- ring->hicredit, ring->locredit);
+ igb_config_tx_modes(adapter, i);
}
} else {
wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
@@ -2459,6 +2539,19 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
+static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
+{
+ if (!is_fqtss_enabled(adapter)) {
+ enable_fqtss(adapter, true);
+ return;
+ }
+
+ igb_config_tx_modes(adapter, queue);
+
+ if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
+ enable_fqtss(adapter, false);
+}
+
static int igb_offload_cbs(struct igb_adapter *adapter,
struct tc_cbs_qopt_offload *qopt)
{
@@ -2479,17 +2572,7 @@ static int igb_offload_cbs(struct igb_adapter *adapter,
if (err)
return err;
- if (is_fqtss_enabled(adapter)) {
- igb_configure_cbs(adapter, qopt->queue, qopt->enable,
- qopt->idleslope, qopt->sendslope,
- qopt->hicredit, qopt->locredit);
-
- if (!is_any_cbs_enabled(adapter))
- enable_fqtss(adapter, false);
-
- } else {
- enable_fqtss(adapter, true);
- }
+ igb_offload_apply(adapter, qopt->queue);
return 0;
}
@@ -2698,7 +2781,7 @@ static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
@@ -2728,7 +2811,7 @@ static int igb_setup_tc_block(struct igb_adapter *adapter,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
- adapter, adapter);
+ adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
adapter);
@@ -2738,6 +2821,29 @@ static int igb_setup_tc_block(struct igb_adapter *adapter,
}
}
+static int igb_offload_txtime(struct igb_adapter *adapter,
+ struct tc_etf_qopt_offload *qopt)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* Launchtime offloading is only supported by i210 controller. */
+ if (hw->mac.type != e1000_i210)
+ return -EOPNOTSUPP;
+
+ /* Launchtime offloading is only supported by queues 0 and 1. */
+ if (qopt->queue < 0 || qopt->queue > 1)
+ return -EINVAL;
+
+ err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
+ if (err)
+ return err;
+
+ igb_offload_apply(adapter, qopt->queue);
+
+ return 0;
+}
+
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -2748,6 +2854,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
return igb_offload_cbs(adapter, type_data);
case TC_SETUP_BLOCK:
return igb_setup_tc_block(adapter, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return igb_offload_txtime(adapter, type_data);
default:
return -EOPNOTSUPP;
@@ -5067,6 +5175,7 @@ bool igb_has_link(struct igb_adapter *adapter)
case e1000_media_type_copper:
if (!hw->mac.get_link_status)
return true;
+ /* fall through */
case e1000_media_type_internal_serdes:
hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status;
@@ -5568,11 +5677,14 @@ set_itr_now:
}
}
-static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
- u32 type_tucmd, u32 mss_l4len_idx)
+static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
+ struct igb_tx_buffer *first,
+ u32 vlan_macip_lens, u32 type_tucmd,
+ u32 mss_l4len_idx)
{
struct e1000_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
+ struct timespec64 ts;
context_desc = IGB_TX_CTXTDESC(tx_ring, i);
@@ -5587,9 +5699,18 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
mss_l4len_idx |= tx_ring->reg_idx << 4;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ /* We assume there is always a valid tx time available. Invalid times
+ * should have been handled by the upper layers.
+ */
+ if (tx_ring->launchtime_enable) {
+ ts = ns_to_timespec64(first->skb->tstamp);
+ context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
+ } else {
+ context_desc->seqnum_seed = 0;
+ }
}
static int igb_tso(struct igb_ring *tx_ring,
@@ -5672,7 +5793,8 @@ static int igb_tso(struct igb_ring *tx_ring,
vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
- igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
+ igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
return 1;
}
@@ -5714,6 +5836,7 @@ csum_failed:
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
+ /* fall through */
default:
skb_checksum_help(skb);
goto csum_failed;
@@ -5727,7 +5850,7 @@ no_csum:
vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
- igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
+ igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
}
#define IGB_SET_FLAG(_input, _flag, _result) \
@@ -5909,7 +6032,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
* We also need this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
- wmb();
+ dma_wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
@@ -6015,8 +6138,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
}
}
- skb_tx_timestamp(skb);
-
if (skb_vlan_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
@@ -6032,6 +6153,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
else if (!tso)
igb_tx_csum(tx_ring, first);
+ skb_tx_timestamp(skb);
+
if (igb_tx_map(tx_ring, first, hdr_len))
goto cleanup_tx_tstamp;
@@ -8409,7 +8532,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
- wmb();
+ dma_wmb();
writel(i, rx_ring->tail);
}
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index f818f060e5a7..e0c989ffb2b3 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2102,6 +2102,7 @@ csum_failed:
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
+ /* fall through */
default:
skb_checksum_help(skb);
goto csum_failed;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 144d5fe6b944..4fc906c6166b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -855,7 +855,8 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *);
void ixgbe_free_tx_resources(struct ixgbe_ring *);
void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
-void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
+void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
+void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3f5c350716bb..0bd1294ba517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1871,7 +1871,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
if (enable_addr != 0)
rar_high |= IXGBE_RAH_AV;
+ /* Record lower 32 bits of MAC address and then make
+ * sure that write is flushed to hardware before writing
+ * the upper 16 bits and setting the valid bit.
+ */
IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+ IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
return 0;
@@ -1903,8 +1908,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+ /* Clear the address valid bit and upper 16 bits of the address
+ * before clearing the lower bits. This way we aren't updating
+ * a live filter.
+ */
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
/* clear VMDq pool/queue selection for this RAR */
hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index bd1ba88ec1d5..e5a8461fe6a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -511,7 +511,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
static int ixgbe_get_regs_len(struct net_device *netdev)
{
-#define IXGBE_REGS_LEN 1139
+#define IXGBE_REGS_LEN 1145
return IXGBE_REGS_LEN * sizeof(u32);
}
@@ -874,6 +874,14 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* X540 specific DCB registers */
regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
+
+ /* Security config registers */
+ regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+ regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
+ regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
}
static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -1690,35 +1698,17 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
{
- struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
- struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 reg_ctl;
-
- /* shut down the DMA engines now so they can be reinitialized later */
+ /* Shut down the DMA engines now so they can be reinitialized later,
+ * since the test rings and normally used rings should overlap on
+ * queue 0 we can just use the standard disable Rx/Tx calls and they
+ * will take care of disabling the test rings for us.
+ */
/* first Rx */
- hw->mac.ops.disable_rx(hw);
- ixgbe_disable_rx_queue(adapter, rx_ring);
+ ixgbe_disable_rx(adapter);
/* now Tx */
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
- reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
-
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
- reg_ctl &= ~IXGBE_DMATXCTL_TE;
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
- break;
- default:
- break;
- }
+ ixgbe_disable_tx(adapter);
ixgbe_reset(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index c116f459945d..da4322e4daed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -839,7 +839,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
}
itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
- if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
+ if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
__func__, itd->sa_idx, xs->xso.offload_handle);
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3e87dbbc9024..447098005490 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
-#define IXGBE_XDP_PASS 0
-#define IXGBE_XDP_CONSUMED 1
-#define IXGBE_XDP_TX 2
+#define IXGBE_XDP_PASS 0
+#define IXGBE_XDP_CONSUMED BIT(0)
+#define IXGBE_XDP_TX BIT(1)
+#define IXGBE_XDP_REDIR BIT(2)
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
struct xdp_frame *xdpf);
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
if (!err)
- result = IXGBE_XDP_TX;
+ result = IXGBE_XDP_REDIR;
else
result = IXGBE_XDP_CONSUMED;
break;
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
unsigned int mss = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
- bool xdp_xmit = false;
+ unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
xdp.rxq = &rx_ring->xdp_rxq;
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
- xdp_xmit = true;
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
} else {
rx_buffer->pagecnt_bias++;
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
total_rx_packets++;
}
- if (xdp_xmit) {
+ if (xdp_xmit & IXGBE_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
/* Force memory writes to complete before letting h/w
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
wmb();
writel(ring->next_to_use, ring->tail);
-
- xdp_do_flush_map();
}
u64_stats_update_begin(&rx_ring->syncp);
@@ -4018,38 +4022,6 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
}
}
-void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int wait_loop = IXGBE_MAX_RX_DESC_POLL;
- u32 rxdctl;
- u8 reg_idx = ring->reg_idx;
-
- if (ixgbe_removed(hw->hw_addr))
- return;
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- rxdctl &= ~IXGBE_RXDCTL_ENABLE;
-
- /* write value back with RXDCTL.ENABLE bit cleared */
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
-
- if (hw->mac.type == ixgbe_mac_82598EB &&
- !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
- return;
-
- /* the hardware may take up to 100us to really disable the rx queue */
- do {
- udelay(10);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
-
- if (!wait_loop) {
- e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
- "the polling period\n", reg_idx);
- }
-}
-
void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
@@ -4059,9 +4031,13 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
- /* disable queue to avoid issues while updating state */
+ /* disable queue to avoid use of these values while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- ixgbe_disable_rx_queue(adapter, ring);
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+ IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -5271,6 +5247,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
struct ixgbe_fwd_adapter *accel)
{
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ int num_tc = netdev_get_num_tc(adapter->netdev);
struct net_device *vdev = accel->netdev;
int i, baseq, err;
@@ -5282,6 +5260,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
accel->rx_base_queue = baseq;
accel->tx_base_queue = baseq;
+ /* record configuration for macvlan interface in vdev */
+ for (i = 0; i < num_tc; i++)
+ netdev_bind_sb_channel_queue(adapter->netdev, vdev,
+ i, rss_i, baseq + (rss_i * i));
+
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->rx_ring[baseq + i]->netdev = vdev;
@@ -5306,6 +5289,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(adapter->netdev, vdev);
+ netdev_set_sb_channel(vdev, 0);
+
clear_bit(accel->pool, adapter->fwd_bitmask);
kfree(accel);
@@ -5618,6 +5605,212 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
ixgbe_up_complete(adapter);
}
+static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
+{
+ u16 devctl2;
+
+ pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
+
+ switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
+ case IXGBE_PCIDEVCTRL2_17_34s:
+ case IXGBE_PCIDEVCTRL2_4_8s:
+ /* For now we cap the upper limit on delay to 2 seconds
+ * as we end up going up to 34 seconds of delay in worst
+ * case timeout value.
+ */
+ case IXGBE_PCIDEVCTRL2_1_2s:
+ return 2000000ul; /* 2.0 s */
+ case IXGBE_PCIDEVCTRL2_260_520ms:
+ return 520000ul; /* 520 ms */
+ case IXGBE_PCIDEVCTRL2_65_130ms:
+ return 130000ul; /* 130 ms */
+ case IXGBE_PCIDEVCTRL2_16_32ms:
+ return 32000ul; /* 32 ms */
+ case IXGBE_PCIDEVCTRL2_1_2ms:
+ return 2000ul; /* 2 ms */
+ case IXGBE_PCIDEVCTRL2_50_100us:
+ return 100ul; /* 100 us */
+ case IXGBE_PCIDEVCTRL2_16_32ms_def:
+ return 32000ul; /* 32 ms */
+ default:
+ break;
+ }
+
+ /* We shouldn't need to hit this path, but just in case default as
+ * though completion timeout is not supported and support 32ms.
+ */
+ return 32000ul;
+}
+
+void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, wait_loop;
+ u32 rxdctl;
+
+ /* disable receives */
+ hw->mac.ops.disable_rx(hw);
+
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+
+ /* disable all enabled Rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ rxdctl |= IXGBE_RXDCTL_SWFLSH;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+ }
+
+ /* RXDCTL.EN may not change on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* Determine our minimum delay interval. We will increase this value
+ * with each subsequent test. This way if the device returns quickly
+ * we should spend as little time as possible waiting, however as
+ * the time increases we will wait for larger periods of time.
+ *
+ * The trick here is that we increase the interval using the
+ * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result
+ * of that wait is that it totals up to 100x whatever interval we
+ * choose. Since our minimum wait is 100us we can just divide the
+ * total timeout by 100 to get our minimum delay interval.
+ */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ rxdctl = 0;
+
+ /* OR together the reading of all the active RXDCTL registers,
+ * and then test the result. We need the disable to complete
+ * before we start freeing the memory and invalidating the
+ * DMA mappings.
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ }
+
+ if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
+ return;
+ }
+
+ e_err(drv,
+ "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
+}
+
+void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, wait_loop;
+ u32 txdctl;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+
+ /* disable all enabled Tx queues */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
+
+ /* disable all enabled XDP Tx queues */
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
+
+ /* If the link is not up there shouldn't be much in the way of
+ * pending transactions. Those that are left will be flushed out
+ * when the reset logic goes through the flush sequence to clean out
+ * the pending Tx transactions.
+ */
+ if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ goto dma_engine_disable;
+
+ /* Determine our minimum delay interval. We will increase this value
+ * with each subsequent test. This way if the device returns quickly
+ * we should spend as little time as possible waiting, however as
+ * the time increases we will wait for larger periods of time.
+ *
+ * The trick here is that we increase the interval using the
+ * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result
+ * of that wait is that it totals up to 100x whatever interval we
+ * choose. Since our minimum wait is 100us we can just divide the
+ * total timeout by 100 to get our minimum delay interval.
+ */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ txdctl = 0;
+
+ /* OR together the reading of all the active TXDCTL registers,
+ * and then test the result. We need the disable to complete
+ * before we start freeing the memory and invalidating the
+ * DMA mappings.
+ */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ }
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ }
+
+ if (!(txdctl & IXGBE_TXDCTL_ENABLE))
+ goto dma_engine_disable;
+ }
+
+ e_err(drv,
+ "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
+
+dma_engine_disable:
+ /* Disable the Tx DMA engine on 82599 and later MAC */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
+ (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
+ ~IXGBE_DMATXCTL_TE));
+ /* fall through */
+ default:
+ break;
+ }
+}
+
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -5799,24 +5992,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
return; /* do nothing if already down */
- /* disable receives */
- hw->mac.ops.disable_rx(hw);
+ /* Shut off incoming Tx traffic */
+ netif_tx_stop_all_queues(netdev);
- /* disable all enabled rx queues */
- for (i = 0; i < adapter->num_rx_queues; i++)
- /* this call also flushes the previous write */
- ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+ /* call carrier off first to avoid false dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
- usleep_range(10000, 20000);
+ /* Disable Rx */
+ ixgbe_disable_rx(adapter);
/* synchronize_sched() needed for pending XDP buffers to drain */
if (adapter->xdp_ring[0])
synchronize_sched();
- netif_tx_stop_all_queues(netdev);
-
- /* call carrier off first to avoid false dev_watchdog timeouts */
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
ixgbe_irq_disable(adapter);
@@ -5844,30 +6032,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
}
/* disable transmits in the hardware now that interrupts are off */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- u8 reg_idx = adapter->tx_ring[i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
- }
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
-
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
- }
-
- /* Disable the Tx DMA engine on 82599 and later MAC */
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
- (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
- ~IXGBE_DMATXCTL_TE));
- break;
- default:
- break;
- }
+ ixgbe_disable_tx(adapter);
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
@@ -6454,6 +6619,11 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (adapter->xdp_prog) {
+ e_warn(probe, "MTU cannot be changed while XDP program is loaded\n");
+ return -EPERM;
+ }
+
/*
* For 82599EB we cannot allow legacy VFs to enable their receive
* paths when MTU greater than 1500 is configured. So display a
@@ -8193,25 +8363,25 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
input, common, ring->queue_index);
}
+#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
- struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
struct ixgbe_adapter *adapter;
- int txq;
-#ifdef IXGBE_FCOE
struct ixgbe_ring_feature *f;
-#endif
+ int txq;
- if (fwd_adapter) {
- adapter = netdev_priv(dev);
- txq = reciprocal_scale(skb_get_hash(skb),
- adapter->num_rx_queues_per_pool);
+ if (sb_dev) {
+ u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+ struct net_device *vdev = sb_dev;
- return txq + fwd_adapter->tx_base_queue;
- }
+ txq = vdev->tc_to_txq[tc].offset;
+ txq += reciprocal_scale(skb_get_hash(skb),
+ vdev->tc_to_txq[tc].count);
-#ifdef IXGBE_FCOE
+ return txq;
+ }
/*
* only execute the code below if protocol is FCoE
@@ -8222,11 +8392,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
case htons(ETH_P_FIP):
adapter = netdev_priv(dev);
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
/* fall through */
default:
- return fallback(dev, skb);
+ return fallback(dev, skb, sb_dev);
}
f = &adapter->ring_feature[RING_F_FCOE];
@@ -8238,11 +8408,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
txq -= f->indices;
return txq + f->offset;
-#else
- return fallback(dev, skb);
-#endif
}
+#endif
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
struct xdp_frame *xdpf)
{
@@ -8762,6 +8930,11 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
/* if we cannot find a free pool then disable the offload */
netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
macvlan_release_l2fw_offload(vdev);
+
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(adapter->netdev, vdev);
+ netdev_set_sb_channel(vdev, 0);
+
kfree(accel);
return 0;
@@ -9325,7 +9498,7 @@ static int ixgbe_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb,
- adapter, adapter);
+ adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb,
adapter);
@@ -9389,6 +9562,11 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
features &= ~NETIF_F_LRO;
+ if (adapter->xdp_prog && (features & NETIF_F_LRO)) {
+ e_dev_err("LRO is not supported with XDP\n");
+ features &= ~NETIF_F_LRO;
+ }
+
return features;
}
@@ -9765,6 +9943,13 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (!macvlan_supports_dest_filter(vdev))
return ERR_PTR(-EMEDIUMTYPE);
+ /* We need to lock down the macvlan to be a single queue device so that
+ * we can reuse the tc_to_txq field in the macvlan netdev to represent
+ * the queue mapping to our netdev.
+ */
+ if (netif_is_multiqueue(vdev))
+ return ERR_PTR(-ERANGE);
+
pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
if (pool == adapter->num_rx_pools) {
u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
@@ -9821,6 +10006,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
return ERR_PTR(-ENOMEM);
set_bit(pool, adapter->fwd_bitmask);
+ netdev_set_sb_channel(vdev, pool);
accel->pool = pool;
accel->netdev = vdev;
@@ -9862,6 +10048,10 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
ring->netdev = NULL;
}
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(pdev, accel->netdev);
+ netdev_set_sb_channel(accel->netdev, 0);
+
clear_bit(accel->pool, adapter->fwd_bitmask);
kfree(accel);
}
@@ -9962,7 +10152,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return ixgbe_xdp_setup(dev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!(adapter->xdp_prog);
xdp->prog_id = adapter->xdp_prog ?
adapter->xdp_prog->aux->id : 0;
return 0;
@@ -10022,7 +10211,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
- .ndo_select_queue = ixgbe_select_queue,
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -10045,6 +10233,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_poll_controller = ixgbe_netpoll,
#endif
#ifdef IXGBE_FCOE
+ .ndo_select_queue = ixgbe_select_queue,
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 59416eddd840..d86446d202d5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4462,7 +4462,6 @@ static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return ixgbevf_xdp_setup(dev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!(adapter->xdp_prog);
xdp->prog_id = adapter->xdp_prog ?
adapter->xdp_prog->aux->id : 0;
return 0;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 06ff185eb188..a5ab6f3403ae 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1911,10 +1911,10 @@ jme_wait_link(struct jme_adapter *jme)
{
u32 phylink, to = JME_WAIT_LINK_TIME;
- mdelay(1000);
+ msleep(1000);
phylink = jme_linkstat_from_phy(jme);
while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
- mdelay(10);
+ usleep_range(10000, 11000);
phylink = jme_linkstat_from_phy(jme);
}
}
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index afc810069440..7a637b51c7d2 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -563,14 +563,6 @@ ltq_etop_set_multicast_list(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static u16
-ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
-{
- /* we are currently only using the first queue */
- return 0;
-}
-
static int
ltq_etop_init(struct net_device *dev)
{
@@ -641,7 +633,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
.ndo_set_mac_address = ltq_etop_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
- .ndo_select_queue = ltq_etop_select_queue,
+ .ndo_select_queue = dev_pick_tx_zero,
.ndo_init = ltq_etop_init,
.ndo_tx_timeout = ltq_etop_tx_timeout,
};
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index cc2f7701e71e..f33fd22b351c 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -18,8 +18,8 @@ if NET_VENDOR_MARVELL
config MV643XX_ETH
tristate "Marvell Discovery (643XX) and Orion ethernet support"
- depends on (MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST) && INET
- depends on HAS_DMA
+ depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST
+ depends on INET
select PHYLIB
select MVMDIO
---help---
@@ -58,7 +58,6 @@ config MVNETA_BM_ENABLE
config MVNETA
tristate "Marvell Armada 370/38x/XP/37xx network interface support"
depends on ARCH_MVEBU || COMPILE_TEST
- depends on HAS_DMA
select MVMDIO
select PHYLINK
---help---
@@ -84,7 +83,6 @@ config MVNETA_BM
config MVPP2
tristate "Marvell Armada 375/7K/8K network interface support"
depends on ARCH_MVEBU || COMPILE_TEST
- depends on HAS_DMA
select MVMDIO
select PHYLINK
---help---
@@ -93,7 +91,7 @@ config MVPP2
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
- depends on HAS_IOMEM && HAS_DMA
+ depends on HAS_IOMEM
depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST
select PHYLIB
---help---
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 17a904cc6a5e..bc80a678abc3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -295,10 +295,10 @@
#define MVNETA_RSS_LU_TABLE_SIZE 1
/* Max number of Rx descriptors */
-#define MVNETA_MAX_RXD 128
+#define MVNETA_MAX_RXD 512
/* Max number of Tx descriptors */
-#define MVNETA_MAX_TXD 532
+#define MVNETA_MAX_TXD 1024
/* Max number of allowed TCP segments for software TSO */
#define MVNETA_MAX_TSO_SEGS 100
@@ -328,6 +328,8 @@
enum {
ETHTOOL_STAT_EEE_WAKEUP,
+ ETHTOOL_STAT_SKB_ALLOC_ERR,
+ ETHTOOL_STAT_REFILL_ERR,
ETHTOOL_MAX_STATS,
};
@@ -375,6 +377,8 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3054, T_REG_32, "fc_sent", },
{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
+ { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
+ { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
};
struct mvneta_pcpu_stats {
@@ -479,7 +483,10 @@ struct mvneta_port {
#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
#define MVNETA_RXD_L3_IP4 BIT(25)
-#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
+#define MVNETA_RXD_LAST_DESC BIT(26)
+#define MVNETA_RXD_FIRST_DESC BIT(27)
+#define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
+ MVNETA_RXD_LAST_DESC)
#define MVNETA_RXD_L4_CSUM_OK BIT(30)
#if defined(__LITTLE_ENDIAN)
@@ -589,9 +596,6 @@ struct mvneta_rx_queue {
/* num of rx descriptors in the rx descriptor ring */
int size;
- /* counter of times when mvneta_refill() failed */
- int missed;
-
u32 pkts_coal;
u32 time_coal;
@@ -609,6 +613,18 @@ struct mvneta_rx_queue {
/* Index of the next RX DMA descriptor to process */
int next_desc_to_proc;
+
+ /* Index of first RX DMA descriptor to refill */
+ int first_to_refill;
+ u32 refill_num;
+
+ /* pointer to uncomplete skb buffer */
+ struct sk_buff *skb;
+ int left_size;
+
+ /* error counters */
+ u32 skb_alloc_err;
+ u32 refill_err;
};
static enum cpuhp_state online_hpstate;
@@ -621,6 +637,7 @@ static int txq_number = 8;
static int rxq_def;
static int rx_copybreak __read_mostly = 256;
+static int rx_header_size __read_mostly = 128;
/* HW BM need that each port be identify by a unique ID */
static int global_port_id;
@@ -1684,13 +1701,6 @@ static void mvneta_rx_error(struct mvneta_port *pp,
{
u32 status = rx_desc->status;
- if (!mvneta_rxq_desc_is_first_last(status)) {
- netdev_err(pp->dev,
- "bad rx status %08x (buffer oversize), size=%d\n",
- status, rx_desc->data_size);
- return;
- }
-
switch (status & MVNETA_RXD_ERR_CODE_MASK) {
case MVNETA_RXD_ERR_CRC:
netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
@@ -1715,7 +1725,8 @@ static void mvneta_rx_error(struct mvneta_port *pp,
static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
struct sk_buff *skb)
{
- if ((status & MVNETA_RXD_L3_IP4) &&
+ if ((pp->dev->features & NETIF_F_RXCSUM) &&
+ (status & MVNETA_RXD_L3_IP4) &&
(status & MVNETA_RXD_L4_CSUM_OK)) {
skb->csum = 0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1790,47 +1801,30 @@ static void mvneta_txq_done(struct mvneta_port *pp,
}
}
-void *mvneta_frag_alloc(unsigned int frag_size)
-{
- if (likely(frag_size <= PAGE_SIZE))
- return netdev_alloc_frag(frag_size);
- else
- return kmalloc(frag_size, GFP_ATOMIC);
-}
-EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
-
-void mvneta_frag_free(unsigned int frag_size, void *data)
-{
- if (likely(frag_size <= PAGE_SIZE))
- skb_free_frag(data);
- else
- kfree(data);
-}
-EXPORT_SYMBOL_GPL(mvneta_frag_free);
-
/* Refill processing for SW buffer management */
+/* Allocate page per descriptor */
static int mvneta_rx_refill(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
- struct mvneta_rx_queue *rxq)
-
+ struct mvneta_rx_queue *rxq,
+ gfp_t gfp_mask)
{
dma_addr_t phys_addr;
- void *data;
+ struct page *page;
- data = mvneta_frag_alloc(pp->frag_size);
- if (!data)
+ page = __dev_alloc_page(gfp_mask);
+ if (!page)
return -ENOMEM;
- phys_addr = dma_map_single(pp->dev->dev.parent, data,
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
- DMA_FROM_DEVICE);
+ /* map page for use */
+ phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- mvneta_frag_free(pp->frag_size, data);
+ __free_page(page);
return -ENOMEM;
}
phys_addr += pp->rx_offset_correction;
- mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
+ mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
return 0;
}
@@ -1893,115 +1887,192 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
for (i = 0; i < rxq->size; i++) {
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
void *data = rxq->buf_virt_addr[i];
+ if (!data || !(rx_desc->buf_phys_addr))
+ continue;
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
- mvneta_frag_free(pp->frag_size, data);
+ __free_page(data);
}
}
+static inline
+int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
+{
+ struct mvneta_rx_desc *rx_desc;
+ int curr_desc = rxq->first_to_refill;
+ int i;
+
+ for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
+ rx_desc = rxq->descs + curr_desc;
+ if (!(rx_desc->buf_phys_addr)) {
+ if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
+ pr_err("Can't refill queue %d. Done %d from %d\n",
+ rxq->id, i, rxq->refill_num);
+ rxq->refill_err++;
+ break;
+ }
+ }
+ curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
+ }
+ rxq->refill_num -= i;
+ rxq->first_to_refill = curr_desc;
+
+ return i;
+}
+
/* Main rx processing when using software buffer management */
-static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
+static int mvneta_rx_swbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int budget,
struct mvneta_rx_queue *rxq)
{
- struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
- int rx_done;
+ int rx_todo, rx_proc;
+ int refill = 0;
u32 rcvd_pkts = 0;
u32 rcvd_bytes = 0;
/* Get number of received packets */
- rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
-
- if (rx_todo > rx_done)
- rx_todo = rx_done;
-
- rx_done = 0;
+ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
+ rx_proc = 0;
/* Fairness NAPI loop */
- while (rx_done < rx_todo) {
+ while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
- struct sk_buff *skb;
unsigned char *data;
+ struct page *page;
dma_addr_t phys_addr;
- u32 rx_status, frag_size;
- int rx_bytes, err, index;
+ u32 rx_status, index;
+ int rx_bytes, skb_size, copy_size;
+ int frag_num, frag_size, frag_offset;
- rx_done++;
- rx_status = rx_desc->status;
- rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
index = rx_desc - rxq->descs;
- data = rxq->buf_virt_addr[index];
+ page = (struct page *)rxq->buf_virt_addr[index];
+ data = page_address(page);
+ /* Prefetch header */
+ prefetch(data);
+
phys_addr = rx_desc->buf_phys_addr;
+ rx_status = rx_desc->status;
+ rx_proc++;
+ rxq->refill_num++;
+
+ if (rx_status & MVNETA_RXD_FIRST_DESC) {
+ /* Check errors only for FIRST descriptor */
+ if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
+ mvneta_rx_error(pp, rx_desc);
+ dev->stats.rx_errors++;
+ /* leave the descriptor untouched */
+ continue;
+ }
+ rx_bytes = rx_desc->data_size -
+ (ETH_FCS_LEN + MVNETA_MH_SIZE);
+
+ /* Allocate small skb for each new packet */
+ skb_size = max(rx_copybreak, rx_header_size);
+ rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
+ if (unlikely(!rxq->skb)) {
+ netdev_err(dev,
+ "Can't allocate skb on queue %d\n",
+ rxq->id);
+ dev->stats.rx_dropped++;
+ rxq->skb_alloc_err++;
+ continue;
+ }
+ copy_size = min(skb_size, rx_bytes);
+
+ /* Copy data from buffer to SKB, skip Marvell header */
+ memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
+ copy_size);
+ skb_put(rxq->skb, copy_size);
+ rxq->left_size = rx_bytes - copy_size;
+
+ mvneta_rx_csum(pp, rx_status, rxq->skb);
+ if (rxq->left_size == 0) {
+ int size = copy_size + MVNETA_MH_SIZE;
+
+ dma_sync_single_range_for_cpu(dev->dev.parent,
+ phys_addr, 0,
+ size,
+ DMA_FROM_DEVICE);
+
+ /* leave the descriptor and buffer untouched */
+ } else {
+ /* refill descriptor with new buffer later */
+ rx_desc->buf_phys_addr = 0;
+
+ frag_num = 0;
+ frag_offset = copy_size + MVNETA_MH_SIZE;
+ frag_size = min(rxq->left_size,
+ (int)(PAGE_SIZE - frag_offset));
+ skb_add_rx_frag(rxq->skb, frag_num, page,
+ frag_offset, frag_size,
+ PAGE_SIZE);
+ dma_unmap_single(dev->dev.parent, phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rxq->left_size -= frag_size;
+ }
+ } else {
+ /* Middle or Last descriptor */
+ if (unlikely(!rxq->skb)) {
+ pr_debug("no skb for rx_status 0x%x\n",
+ rx_status);
+ continue;
+ }
+ if (!rxq->left_size) {
+ /* last descriptor has only FCS */
+ /* and can be discarded */
+ dma_sync_single_range_for_cpu(dev->dev.parent,
+ phys_addr, 0,
+ ETH_FCS_LEN,
+ DMA_FROM_DEVICE);
+ /* leave the descriptor and buffer untouched */
+ } else {
+ /* refill descriptor with new buffer later */
+ rx_desc->buf_phys_addr = 0;
+
+ frag_num = skb_shinfo(rxq->skb)->nr_frags;
+ frag_offset = 0;
+ frag_size = min(rxq->left_size,
+ (int)(PAGE_SIZE - frag_offset));
+ skb_add_rx_frag(rxq->skb, frag_num, page,
+ frag_offset, frag_size,
+ PAGE_SIZE);
+
+ dma_unmap_single(dev->dev.parent, phys_addr,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+ rxq->left_size -= frag_size;
+ }
+ } /* Middle or Last descriptor */
- if (!mvneta_rxq_desc_is_first_last(rx_status) ||
- (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
- mvneta_rx_error(pp, rx_desc);
-err_drop_frame:
- dev->stats.rx_errors++;
- /* leave the descriptor untouched */
+ if (!(rx_status & MVNETA_RXD_LAST_DESC))
+ /* no last descriptor this time */
continue;
- }
-
- if (rx_bytes <= rx_copybreak) {
- /* better copy a small frame and not unmap the DMA region */
- skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
- if (unlikely(!skb))
- goto err_drop_frame;
-
- dma_sync_single_range_for_cpu(dev->dev.parent,
- phys_addr,
- MVNETA_MH_SIZE + NET_SKB_PAD,
- rx_bytes,
- DMA_FROM_DEVICE);
- skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
- rx_bytes);
- skb->protocol = eth_type_trans(skb, dev);
- mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
-
- rcvd_pkts++;
- rcvd_bytes += rx_bytes;
-
- /* leave the descriptor and buffer untouched */
+ if (rxq->left_size) {
+ pr_err("get last desc, but left_size (%d) != 0\n",
+ rxq->left_size);
+ dev_kfree_skb_any(rxq->skb);
+ rxq->left_size = 0;
+ rxq->skb = NULL;
continue;
}
-
- /* Refill processing */
- err = mvneta_rx_refill(pp, rx_desc, rxq);
- if (err) {
- netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
- goto err_drop_frame;
- }
-
- frag_size = pp->frag_size;
-
- skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
-
- /* After refill old buffer has to be unmapped regardless
- * the skb is successfully built or not.
- */
- dma_unmap_single(dev->dev.parent, phys_addr,
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
- DMA_FROM_DEVICE);
-
- if (!skb)
- goto err_drop_frame;
-
rcvd_pkts++;
- rcvd_bytes += rx_bytes;
+ rcvd_bytes += rxq->skb->len;
/* Linux processing */
- skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
- skb_put(skb, rx_bytes);
-
- skb->protocol = eth_type_trans(skb, dev);
+ rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
- mvneta_rx_csum(pp, rx_status, skb);
+ if (dev->features & NETIF_F_GRO)
+ napi_gro_receive(napi, rxq->skb);
+ else
+ netif_receive_skb(rxq->skb);
- napi_gro_receive(&port->napi, skb);
+ /* clean uncomplete skb pointer in queue */
+ rxq->skb = NULL;
+ rxq->left_size = 0;
}
if (rcvd_pkts) {
@@ -2013,17 +2084,20 @@ err_drop_frame:
u64_stats_update_end(&stats->syncp);
}
+ /* return some buffers to hardware queue, one at a time is too slow */
+ refill = mvneta_rx_refill_queue(pp, rxq);
+
/* Update rxq management counters */
- mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+ mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
- return rx_done;
+ return rcvd_pkts;
}
/* Main rx processing when using hardware buffer management */
-static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
+static int mvneta_rx_hwbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_queue *rxq)
{
- struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
int rx_done;
u32 rcvd_pkts = 0;
@@ -2085,7 +2159,7 @@ err_drop_frame:
skb->protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
+ napi_gro_receive(napi, skb);
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -2102,7 +2176,7 @@ err_drop_frame:
err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
if (err) {
netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
+ rxq->refill_err++;
goto err_drop_frame_ret_pool;
}
@@ -2129,7 +2203,7 @@ err_drop_frame:
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
+ napi_gro_receive(napi, skb);
}
if (rcvd_pkts) {
@@ -2722,9 +2796,11 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
if (rx_queue) {
rx_queue = rx_queue - 1;
if (pp->bm_priv)
- rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
+ rx_done = mvneta_rx_hwbm(napi, pp, budget,
+ &pp->rxqs[rx_queue]);
else
- rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
+ rx_done = mvneta_rx_swbm(napi, pp, budget,
+ &pp->rxqs[rx_queue]);
}
if (rx_done < budget) {
@@ -2761,9 +2837,11 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
for (i = 0; i < num; i++) {
memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
- if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
- netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
- __func__, rxq->id, i, num);
+ if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
+ GFP_KERNEL) != 0) {
+ netdev_err(pp->dev,
+ "%s:rxq %d, %d of %d buffs filled\n",
+ __func__, rxq->id, i, num);
break;
}
}
@@ -2821,21 +2899,23 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
- /* Set Offset */
- mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction);
-
/* Set coalescing pkts and time */
mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
if (!pp->bm_priv) {
- /* Fill RXQ with buffers from RX pool */
- mvneta_rxq_buf_size_set(pp, rxq,
- MVNETA_RX_BUF_SIZE(pp->pkt_size));
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq, 0);
+ mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
mvneta_rxq_bm_disable(pp, rxq);
mvneta_rxq_fill(pp, rxq, rxq->size);
} else {
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq,
+ NET_SKB_PAD - pp->rx_offset_correction);
+
mvneta_rxq_bm_enable(pp, rxq);
+ /* Fill RXQ with buffers from RX pool */
mvneta_rxq_long_pool_set(pp, rxq);
mvneta_rxq_short_pool_set(pp, rxq);
mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
@@ -2864,6 +2944,9 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
{
mvneta_rxq_drop_pkts(pp, rxq);
+ if (rxq->skb)
+ dev_kfree_skb_any(rxq->skb);
+
if (rxq->descs)
dma_free_coherent(pp->dev->dev.parent,
rxq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2874,6 +2957,10 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
rxq->last_desc = 0;
rxq->next_desc_to_proc = 0;
rxq->descs_phys = 0;
+ rxq->first_to_refill = 0;
+ rxq->refill_num = 0;
+ rxq->skb = NULL;
+ rxq->left_size = 0;
}
static int mvneta_txq_sw_init(struct mvneta_port *pp,
@@ -3177,8 +3264,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mvneta_bm_update_mtu(pp, mtu);
pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
- pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret) {
@@ -3194,7 +3279,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
on_each_cpu(mvneta_percpu_enable, pp, true);
mvneta_start_dev(pp);
- mvneta_port_up(pp);
netdev_update_features(dev);
@@ -3666,8 +3750,7 @@ static int mvneta_open(struct net_device *dev)
int ret;
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
- pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ pp->frag_size = PAGE_SIZE;
ret = mvneta_setup_rxqs(pp);
if (ret)
@@ -3962,6 +4045,12 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
case ETHTOOL_STAT_EEE_WAKEUP:
val = phylink_get_eee_err(pp->phylink);
break;
+ case ETHTOOL_STAT_SKB_ALLOC_ERR:
+ val = pp->rxqs[0].skb_alloc_err;
+ break;
+ case ETHTOOL_STAT_REFILL_ERR:
+ val = pp->rxqs[0].refill_err;
+ break;
}
break;
}
@@ -4018,13 +4107,18 @@ static int mvneta_config_rss(struct mvneta_port *pp)
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
- /* We have to synchronise on the napi of each CPU */
- for_each_online_cpu(cpu) {
- struct mvneta_pcpu_port *pcpu_port =
- per_cpu_ptr(pp->ports, cpu);
+ if (!pp->neta_armada3700) {
+ /* We have to synchronise on the napi of each CPU */
+ for_each_online_cpu(cpu) {
+ struct mvneta_pcpu_port *pcpu_port =
+ per_cpu_ptr(pp->ports, cpu);
- napi_synchronize(&pcpu_port->napi);
- napi_disable(&pcpu_port->napi);
+ napi_synchronize(&pcpu_port->napi);
+ napi_disable(&pcpu_port->napi);
+ }
+ } else {
+ napi_synchronize(&pp->napi);
+ napi_disable(&pp->napi);
}
pp->rxq_def = pp->indir[0];
@@ -4041,12 +4135,16 @@ static int mvneta_config_rss(struct mvneta_port *pp)
mvneta_percpu_elect(pp);
spin_unlock(&pp->lock);
- /* We have to synchronise on the napi of each CPU */
- for_each_online_cpu(cpu) {
- struct mvneta_pcpu_port *pcpu_port =
- per_cpu_ptr(pp->ports, cpu);
+ if (!pp->neta_armada3700) {
+ /* We have to synchronise on the napi of each CPU */
+ for_each_online_cpu(cpu) {
+ struct mvneta_pcpu_port *pcpu_port =
+ per_cpu_ptr(pp->ports, cpu);
- napi_enable(&pcpu_port->napi);
+ napi_enable(&pcpu_port->napi);
+ }
+ } else {
+ napi_enable(&pp->napi);
}
netif_tx_start_all_queues(pp->dev);
@@ -4362,14 +4460,6 @@ static int mvneta_probe(struct platform_device *pdev)
pp->dn = dn;
pp->rxq_def = rxq_def;
-
- /* Set RX packet offset correction for platforms, whose
- * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
- * platforms and 0B for 32-bit ones.
- */
- pp->rx_offset_correction =
- max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION);
-
pp->indir[0] = rxq_def;
/* Get special SoC configurations */
@@ -4457,16 +4547,28 @@ static int mvneta_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
pp->id = global_port_id++;
+ pp->rx_offset_correction = 0; /* not relevant for SW BM */
/* Obtain access to BM resources if enabled and already initialized */
bm_node = of_parse_phandle(dn, "buffer-manager", 0);
- if (bm_node && bm_node->data) {
- pp->bm_priv = bm_node->data;
- err = mvneta_bm_port_init(pdev, pp);
- if (err < 0) {
- dev_info(&pdev->dev, "use SW buffer management\n");
- pp->bm_priv = NULL;
+ if (bm_node) {
+ pp->bm_priv = mvneta_bm_get(bm_node);
+ if (pp->bm_priv) {
+ err = mvneta_bm_port_init(pdev, pp);
+ if (err < 0) {
+ dev_info(&pdev->dev,
+ "use SW buffer management\n");
+ mvneta_bm_put(pp->bm_priv);
+ pp->bm_priv = NULL;
+ }
}
+ /* Set RX packet offset correction for platforms, whose
+ * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
+ * platforms and 0B for 32-bit ones.
+ */
+ pp->rx_offset_correction = max(0,
+ NET_SKB_PAD -
+ MVNETA_RX_PKT_OFFSET_CORRECTION);
}
of_node_put(bm_node);
@@ -4526,6 +4628,7 @@ err_netdev:
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
1 << pp->id);
+ mvneta_bm_put(pp->bm_priv);
}
err_free_stats:
free_percpu(pp->stats);
@@ -4563,6 +4666,7 @@ static int mvneta_remove(struct platform_device *pdev)
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
1 << pp->id);
+ mvneta_bm_put(pp->bm_priv);
}
return 0;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 466939f8f0cf..de468e1bdba9 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
@@ -392,6 +393,20 @@ static void mvneta_bm_put_sram(struct mvneta_bm *priv)
MVNETA_BM_BPPI_SIZE);
}
+struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{
+ struct platform_device *pdev = of_find_device_by_node(node);
+
+ return pdev ? platform_get_drvdata(pdev) : NULL;
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_get);
+
+void mvneta_bm_put(struct mvneta_bm *priv)
+{
+ platform_device_put(priv->pdev);
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_put);
+
static int mvneta_bm_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index a32de432800c..c8425d35c049 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -130,10 +130,10 @@ struct mvneta_bm_pool {
};
/* Declarations and definitions */
-void *mvneta_frag_alloc(unsigned int frag_size);
-void mvneta_frag_free(unsigned int frag_size, void *data);
-
#if IS_ENABLED(CONFIG_MVNETA_BM)
+struct mvneta_bm *mvneta_bm_get(struct device_node *node);
+void mvneta_bm_put(struct mvneta_bm *priv);
+
void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool, u8 port_map);
void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
@@ -178,5 +178,7 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool)
{ return 0; }
+struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; }
+void mvneta_bm_put(struct mvneta_bm *priv) {}
#endif /* CONFIG_MVNETA_BM */
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile
index 4d11dd9e3246..51f65a202c6e 100644
--- a/drivers/net/ethernet/marvell/mvpp2/Makefile
+++ b/drivers/net/ethernet/marvell/mvpp2/Makefile
@@ -4,4 +4,4 @@
#
obj-$(CONFIG_MVPP2) := mvpp2.o
-mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o
+mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index def00dc3eb4e..67b9e81b7c02 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1,17 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for Marvell PPv2 network controller for Armada 375 SoC.
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _MVPP2_H_
#define _MVPP2_H_
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
@@ -66,15 +64,18 @@
#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
+#define MVPP2_PRS_TCAM_HIT_IDX_REG 0x1240
+#define MVPP2_PRS_TCAM_HIT_CNT_REG 0x1244
+#define MVPP2_PRS_TCAM_HIT_CNT_MASK GENMASK(15, 0)
/* RSS Registers */
#define MVPP22_RSS_INDEX 0x1500
#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
-#define MVPP22_RSS_TABLE_ENTRY 0x1508
-#define MVPP22_RSS_TABLE 0x1510
+#define MVPP22_RXQ2RSS_TABLE 0x1504
#define MVPP22_RSS_TABLE_POINTER(p) (p)
+#define MVPP22_RSS_TABLE_ENTRY 0x1508
#define MVPP22_RSS_WIDTH 0x150c
/* Classifier Registers */
@@ -86,11 +87,28 @@
#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
#define MVPP2_CLS_LKP_TBL_REG 0x1818
#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
+#define MVPP2_CLS_LKP_FLOW_PTR(flow) ((flow) << 16)
#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
+#define MVPP2_CLS_FLOW_TBL0_LAST BIT(0)
+#define MVPP2_CLS_FLOW_TBL0_ENG_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL0_OFFS 1
+#define MVPP2_CLS_FLOW_TBL0_ENG(x) ((x) << 1)
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID_MASK 0xff
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID(port) ((port) << 4)
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL BIT(23)
#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
+#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x)
+#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f
+#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9)
+#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL1_SEQ(x) ((x) << 15)
#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
+#define MVPP2_CLS_FLOW_TBL2_FLD_MASK 0x3f
+#define MVPP2_CLS_FLOW_TBL2_FLD_OFFS(n) ((n) * 6)
+#define MVPP2_CLS_FLOW_TBL2_FLD(n, x) ((x) << ((n) * 6))
#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
@@ -98,6 +116,32 @@
#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
+/* Classifier C2 engine Registers */
+#define MVPP22_CLS_C2_TCAM_IDX 0x1b00
+#define MVPP22_CLS_C2_TCAM_DATA0 0x1b10
+#define MVPP22_CLS_C2_TCAM_DATA1 0x1b14
+#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18
+#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c
+#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20
+#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8)
+#define MVPP22_CLS_C2_HIT_CTR 0x1b50
+#define MVPP22_CLS_C2_ACT 0x1b60
+#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19)
+#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13)
+#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11)
+#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9)
+#define MVPP22_CLS_C2_ATTR0 0x1b64
+#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24)
+#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f
+#define MVPP22_CLS_C2_ATTR0_QHIGH_OFFS 24
+#define MVPP22_CLS_C2_ATTR0_QLOW(ql) (((ql) & 0x7) << 21)
+#define MVPP22_CLS_C2_ATTR0_QLOW_MASK 0x7
+#define MVPP22_CLS_C2_ATTR0_QLOW_OFFS 21
+#define MVPP22_CLS_C2_ATTR1 0x1b68
+#define MVPP22_CLS_C2_ATTR2 0x1b6c
+#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30)
+#define MVPP22_CLS_C2_ATTR3 0x1b70
+
/* Descriptor Manager Top Registers */
#define MVPP2_RXQ_NUM_REG 0x2040
#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
@@ -275,6 +319,11 @@
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
+/* Hit counters registers */
+#define MVPP2_CTRS_IDX 0x7040
+#define MVPP2_CLS_DEC_TBL_HIT_CTR 0x7700
+#define MVPP2_CLS_FLOW_TBL_HIT_CTR 0x7704
+
/* TX Scheduler registers */
#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
@@ -499,7 +548,7 @@
#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
/* Dfault number of RXQs in use */
-#define MVPP2_DEFAULT_RXQ 4
+#define MVPP2_DEFAULT_RXQ 1
/* Max number of Rx descriptors */
#define MVPP2_MAX_RXD_MAX 1024
@@ -553,6 +602,11 @@
((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
+#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32)
+#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32)
+
+/* RSS constants */
+#define MVPP22_RSS_TABLE_ENTRIES 32
/* IPv6 max L3 address size */
#define MVPP2_MAX_L3_ADDR_SIZE 16
@@ -703,6 +757,9 @@ struct mvpp2 {
/* Workqueue to gather hardware statistics */
char queue_name[30];
struct workqueue_struct *stats_queue;
+
+ /* Debugfs root entry */
+ struct dentry *dbgfs_dir;
};
struct mvpp2_pcpu_stats {
@@ -795,6 +852,9 @@ struct mvpp2_port {
bool has_tx_irqs;
u32 tx_time_coal;
+
+ /* RSS indirection table */
+ u32 indir[MVPP22_RSS_TABLE_ENTRIES];
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -831,52 +891,52 @@ struct mvpp2_port {
/* HW TX descriptor for PPv2.1 */
struct mvpp21_tx_desc {
- u32 command; /* Options used by HW for packet transmitting.*/
+ __le32 command; /* Options used by HW for packet transmitting.*/
u8 packet_offset; /* the offset from the buffer beginning */
u8 phys_txq; /* destination queue ID */
- u16 data_size; /* data size of transmitted packet in bytes */
- u32 buf_dma_addr; /* physical addr of transmitted buffer */
- u32 buf_cookie; /* cookie for access to TX buffer in tx path */
- u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
- u32 reserved2; /* reserved (for future use) */
+ __le16 data_size; /* data size of transmitted packet in bytes */
+ __le32 buf_dma_addr; /* physical addr of transmitted buffer */
+ __le32 buf_cookie; /* cookie for access to TX buffer in tx path */
+ __le32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
+ __le32 reserved2; /* reserved (for future use) */
};
/* HW RX descriptor for PPv2.1 */
struct mvpp21_rx_desc {
- u32 status; /* info about received packet */
- u16 reserved1; /* parser_info (for future use, PnC) */
- u16 data_size; /* size of received packet in bytes */
- u32 buf_dma_addr; /* physical address of the buffer */
- u32 buf_cookie; /* cookie for access to RX buffer in rx path */
- u16 reserved2; /* gem_port_id (for future use, PON) */
- u16 reserved3; /* csum_l4 (for future use, PnC) */
+ __le32 status; /* info about received packet */
+ __le16 reserved1; /* parser_info (for future use, PnC) */
+ __le16 data_size; /* size of received packet in bytes */
+ __le32 buf_dma_addr; /* physical address of the buffer */
+ __le32 buf_cookie; /* cookie for access to RX buffer in rx path */
+ __le16 reserved2; /* gem_port_id (for future use, PON) */
+ __le16 reserved3; /* csum_l4 (for future use, PnC) */
u8 reserved4; /* bm_qset (for future use, BM) */
u8 reserved5;
- u16 reserved6; /* classify_info (for future use, PnC) */
- u32 reserved7; /* flow_id (for future use, PnC) */
- u32 reserved8;
+ __le16 reserved6; /* classify_info (for future use, PnC) */
+ __le32 reserved7; /* flow_id (for future use, PnC) */
+ __le32 reserved8;
};
/* HW TX descriptor for PPv2.2 */
struct mvpp22_tx_desc {
- u32 command;
+ __le32 command;
u8 packet_offset;
u8 phys_txq;
- u16 data_size;
- u64 reserved1;
- u64 buf_dma_addr_ptp;
- u64 buf_cookie_misc;
+ __le16 data_size;
+ __le64 reserved1;
+ __le64 buf_dma_addr_ptp;
+ __le64 buf_cookie_misc;
};
/* HW RX descriptor for PPv2.2 */
struct mvpp22_rx_desc {
- u32 status;
- u16 reserved1;
- u16 data_size;
- u32 reserved2;
- u32 reserved3;
- u64 buf_dma_addr_key_hash;
- u64 buf_cookie_misc;
+ __le32 status;
+ __le16 reserved1;
+ __le16 data_size;
+ __le32 reserved2;
+ __le32 reserved3;
+ __le64 buf_dma_addr_key_hash;
+ __le64 buf_cookie_misc;
};
/* Opaque type used by the driver to manipulate the HW TX and RX
@@ -1043,4 +1103,8 @@ u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset);
void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset,
u32 data);
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
+
+void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 8581d5b17dd5..efdb7a656835 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1,17 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RSS and Classifier helpers for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include "mvpp2.h"
#include "mvpp2_cls.h"
+#include "mvpp2_prs.h"
+
+#define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
+{ \
+ .flow_type = _type, \
+ .flow_id = _id, \
+ .supported_hash_opts = _opts, \
+ .prs_ri = { \
+ .ri = _ri, \
+ .ri_mask = _ri_mask \
+ } \
+}
+
+static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
+ /* TCP over IPv4 flows, Not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv4 flows, Not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv4 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv4 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv4 flows, Not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv4 flows, Not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv4 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv4 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv6 flows, not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv6 flows, not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv6 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv6 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv6 flows, not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv6 flows, not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv6 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv6 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* IPv4 flows, no vlan tag */
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv4 flows, with vlan tag */
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv6 flows, no vlan tag */
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv6 flows, with vlan tag */
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* Non IP flow, no vlan tag */
+ MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
+ 0,
+ MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK),
+ /* Non IP flow, with vlan tag */
+ MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
+ MVPP22_CLS_HEK_OPT_VLAN,
+ 0, 0),
+};
+
+u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
+{
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+
+ return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR);
+}
+
+void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_flow_entry *fe)
+{
+ fe->index = index;
+ mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
+ fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
+ fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
+ fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
+}
/* Update classification flow table registers */
static void mvpp2_cls_flow_write(struct mvpp2 *priv,
@@ -23,6 +349,25 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv,
mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
}
+u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
+{
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+
+ return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR);
+}
+
+void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
+ struct mvpp2_cls_lookup_entry *le)
+{
+ u32 val;
+
+ val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
+ mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
+ le->way = way;
+ le->lkpid = lkpid;
+ le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG);
+}
+
/* Update classification lookup table register */
static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
struct mvpp2_cls_lookup_entry *le)
@@ -34,6 +379,439 @@ static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
}
+/* Operations on flow entry */
+static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
+{
+ return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
+}
+
+static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
+ int num_of_fields)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
+}
+
+static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
+ int field_index)
+{
+ return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
+ MVPP2_CLS_FLOW_TBL2_FLD_MASK;
+}
+
+static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
+ int field_index, int field_id)
+{
+ fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
+ MVPP2_CLS_FLOW_TBL2_FLD_MASK);
+ fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
+}
+
+static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
+ int engine)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
+}
+
+int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe)
+{
+ return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) &
+ MVPP2_CLS_FLOW_TBL0_ENG_MASK;
+}
+
+static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
+ bool from_packet)
+{
+ if (from_packet)
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
+ else
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
+}
+
+static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
+}
+
+static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
+ bool is_last)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
+ fe->data[0] |= !!is_last;
+}
+
+static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
+}
+
+static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
+ u32 port)
+{
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
+}
+
+/* Initialize the parser entry for the given flow */
+static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
+ struct mvpp2_cls_flow *flow)
+{
+ mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
+ flow->prs_ri.ri_mask);
+}
+
+/* Initialize the Lookup Id table entry for the given flow */
+static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
+ struct mvpp2_cls_flow *flow)
+{
+ struct mvpp2_cls_lookup_entry le;
+
+ le.way = 0;
+ le.lkpid = flow->flow_id;
+
+ /* The default RxQ for this port is set in the C2 lookup */
+ le.data = 0;
+
+ /* We point on the first lookup in the sequence for the flow, that is
+ * the C2 lookup.
+ */
+ le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
+
+ /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
+ le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+ mvpp2_cls_lookup_write(priv, &le);
+}
+
+/* Initialize the flow table entries for the given flow */
+static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
+{
+ struct mvpp2_cls_flow_entry fe;
+ int i;
+
+ /* C2 lookup */
+ memset(&fe, 0, sizeof(fe));
+ fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
+
+ mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_cls_flow_last_set(&fe, 0);
+ mvpp2_cls_flow_pri_set(&fe, 0);
+ mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
+
+ /* Add all ports */
+ for (i = 0; i < MVPP2_MAX_PORTS; i++)
+ mvpp2_cls_flow_port_add(&fe, BIT(i));
+
+ mvpp2_cls_flow_write(priv, &fe);
+
+ /* C3Hx lookups */
+ for (i = 0; i < MVPP2_MAX_PORTS; i++) {
+ memset(&fe, 0, sizeof(fe));
+ fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
+
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_cls_flow_pri_set(&fe, i + 1);
+ mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
+ mvpp2_cls_flow_port_add(&fe, BIT(i));
+
+ mvpp2_cls_flow_write(priv, &fe);
+ }
+
+ /* Update the last entry */
+ mvpp2_cls_flow_last_set(&fe, 1);
+ mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
+
+ mvpp2_cls_flow_write(priv, &fe);
+}
+
+/* Adds a field to the Header Extracted Key generation parameters*/
+static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
+ u32 field_id)
+{
+ int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
+
+ if (nb_fields == MVPP2_FLOW_N_FIELDS)
+ return -EINVAL;
+
+ mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
+
+ mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
+
+ return 0;
+}
+
+static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
+ unsigned long hash_opts)
+{
+ u32 field_id;
+ int i;
+
+ /* Clear old fields */
+ mvpp2_cls_flow_hek_num_set(fe, 0);
+ fe->data[2] = 0;
+
+ for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
+ switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_VLAN:
+ field_id = MVPP22_CLS_FIELD_VLAN;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ field_id = MVPP22_CLS_FIELD_IP4SA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ field_id = MVPP22_CLS_FIELD_IP4DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ field_id = MVPP22_CLS_FIELD_IP6SA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ field_id = MVPP22_CLS_FIELD_IP6DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ field_id = MVPP22_CLS_FIELD_L4SIP;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ field_id = MVPP22_CLS_FIELD_L4DIP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (mvpp2_flow_add_hek_field(fe, field_id))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+{
+ if (flow >= MVPP2_N_FLOWS)
+ return NULL;
+
+ return &cls_flows[flow];
+}
+
+/* Set the hash generation options for the given traffic flow.
+ * One traffic flow (in the ethtool sense) has multiple classification flows,
+ * to handle specific cases such as fragmentation, or the presence of a
+ * VLAN / DSA Tag.
+ *
+ * Each of these individual flows has different constraints, for example we
+ * can't hash fragmented packets on L4 data (else we would risk having packet
+ * re-ordering), so each classification flows masks the options with their
+ * supported ones.
+ *
+ */
+static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
+ u16 requested_opts)
+{
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *flow;
+ int i, engine, flow_index;
+ u16 hash_opts;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return -EINVAL;
+
+ if (flow->flow_type != flow_type)
+ continue;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
+ flow->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts = flow->supported_hash_opts & requested_opts;
+
+ /* Use C3HB engine to access L4 infos. This adds L4 infos to the
+ * hash parameters
+ */
+ if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
+ engine = MVPP22_CLS_ENGINE_C3HB;
+ else
+ engine = MVPP22_CLS_ENGINE_C3HA;
+
+ if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
+ return -EINVAL;
+
+ mvpp2_cls_flow_eng_set(&fe, engine);
+
+ mvpp2_cls_flow_write(port->priv, &fe);
+ }
+
+ return 0;
+}
+
+u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
+{
+ u16 hash_opts = 0;
+ int n_fields, i, field;
+
+ n_fields = mvpp2_cls_flow_hek_num_get(fe);
+
+ for (i = 0; i < n_fields; i++) {
+ field = mvpp2_cls_flow_hek_get(fe, i);
+
+ switch (field) {
+ case MVPP22_CLS_FIELD_MAC_DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
+ break;
+ case MVPP22_CLS_FIELD_VLAN:
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
+ break;
+ case MVPP22_CLS_FIELD_L3_PROTO:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
+ break;
+ case MVPP22_CLS_FIELD_IP4SA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
+ break;
+ case MVPP22_CLS_FIELD_IP4DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
+ break;
+ case MVPP22_CLS_FIELD_IP6SA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
+ break;
+ case MVPP22_CLS_FIELD_IP6DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
+ break;
+ case MVPP22_CLS_FIELD_L4SIP:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
+ break;
+ case MVPP22_CLS_FIELD_L4DIP:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
+ break;
+ default:
+ break;
+ }
+ }
+ return hash_opts;
+}
+
+/* Returns the hash opts for this flow. There are several classifier flows
+ * for one traffic flow, this returns an aggregation of all configurations.
+ */
+static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
+{
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *flow;
+ int i, flow_index;
+ u16 hash_opts = 0;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return 0;
+
+ if (flow->flow_type != flow_type)
+ continue;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
+ flow->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts |= mvpp2_flow_get_hek_fields(&fe);
+ }
+
+ return hash_opts;
+}
+
+static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
+{
+ struct mvpp2_cls_flow *flow;
+ int i;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ break;
+
+ mvpp2_cls_flow_prs_init(priv, flow);
+ mvpp2_cls_flow_lkp_init(priv, flow);
+ mvpp2_cls_flow_init(priv, flow);
+ }
+}
+
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
+
+ /* Write TCAM */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
+}
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+ c2->index = index;
+
+ c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
+ c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
+ c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
+ c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
+ c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
+
+ c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
+
+ c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
+ c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
+ c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
+ c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
+}
+
+static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql, pmap;
+
+ memset(&c2, 0, sizeof(c2));
+
+ c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
+
+ pmap = BIT(port->id);
+ c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+
+ /* Update RSS status after matching this entry */
+ c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
+
+ /* Mark packet as "forwarded to software", needed for RSS */
+ c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
+
+ /* Configure the default rx queue : Update Queue Low and Queue High, but
+ * don't lock, since the rx queue selection might be overridden by RSS
+ */
+ c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
+ MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
+
+ qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
/* Classifier default initialization */
void mvpp2_cls_init(struct mvpp2 *priv)
{
@@ -61,6 +839,8 @@ void mvpp2_cls_init(struct mvpp2 *priv)
le.way = 1;
mvpp2_cls_lookup_write(priv, &le);
}
+
+ mvpp2_cls_port_init_flows(priv);
}
void mvpp2_cls_port_config(struct mvpp2_port *port)
@@ -89,6 +869,47 @@ void mvpp2_cls_port_config(struct mvpp2_port *port)
/* Update lookup ID table entry */
mvpp2_cls_lookup_write(port->priv, &le);
+
+ mvpp2_port_c2_cls_init(port);
+}
+
+u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index);
+
+ return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
+}
+
+static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+void mvpp22_rss_enable(struct mvpp2_port *port)
+{
+ mvpp2_rss_port_c2_enable(port);
+}
+
+void mvpp22_rss_disable(struct mvpp2_port *port)
+{
+ mvpp2_rss_port_c2_disable(port);
}
/* Set CPU queue number for oversize packets */
@@ -107,7 +928,116 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
}
-void mvpp22_init_rss(struct mvpp2_port *port)
+static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
+{
+ int nrxqs, cpu, cpus = num_possible_cpus();
+
+ /* Number of RXQs per CPU */
+ nrxqs = port->nrxqs / cpus;
+
+ /* CPU that will handle this rx queue */
+ cpu = rxq / nrxqs;
+
+ if (!cpu_online(cpu))
+ return port->first_rxq;
+
+ /* Indirection to better distribute the paquets on the CPUs when
+ * configuring the RSS queues.
+ */
+ return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
+}
+
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
+{
+ struct mvpp2 *priv = port->priv;
+ int i;
+
+ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
+ u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
+ MVPP22_RSS_INDEX_TABLE_ENTRY(i);
+ mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+
+ mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
+ mvpp22_rxfh_indir(port, port->indir[i]));
+ }
+}
+
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+{
+ u16 hash_opts = 0;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ if (info->data & RXH_L4_B_0_1)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
+ if (info->data & RXH_L4_B_2_3)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
+ /* Fallthrough */
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ if (info->data & RXH_L2DA)
+ hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
+ if (info->data & RXH_VLAN)
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
+ if (info->data & RXH_L3_PROTO)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
+ if (info->data & RXH_IP_SRC)
+ hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
+ MVPP22_CLS_HEK_OPT_IP6SA);
+ if (info->data & RXH_IP_DST)
+ hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
+ MVPP22_CLS_HEK_OPT_IP6DA);
+ break;
+ default: return -EOPNOTSUPP;
+ }
+
+ return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
+}
+
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+{
+ unsigned long hash_opts;
+ int i;
+
+ hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
+ info->data = 0;
+
+ for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
+ switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
+ info->data |= RXH_L2DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_VLAN:
+ info->data |= RXH_VLAN;
+ break;
+ case MVPP22_CLS_HEK_OPT_L3_PROTO:
+ info->data |= RXH_L3_PROTO;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ info->data |= RXH_IP_SRC;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ info->data |= RXH_IP_DST;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ info->data |= RXH_L4_B_0_1;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ info->data |= RXH_L4_B_2_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void mvpp22_rss_port_init(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
int i;
@@ -115,27 +1045,30 @@ void mvpp22_init_rss(struct mvpp2_port *port)
/* Set the table width: replace the whole classifier Rx queue number
* with the ones configured in RSS table entries.
*/
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
- /* Loop through the classifier Rx Queues and map them to a RSS table.
- * Map them all to the first table (0) by default.
+ /* The default RxQ is used as a key to select the RSS table to use.
+ * We use one RSS table per port.
*/
- for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
- mvpp2_write(priv, MVPP22_RSS_TABLE,
- MVPP22_RSS_TABLE_POINTER(0));
- }
+ mvpp2_write(priv, MVPP22_RSS_INDEX,
+ MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
+ mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
+ MVPP22_RSS_TABLE_POINTER(port->id));
/* Configure the first table to evenly distribute the packets across
- * real Rx Queues. The table entries map a hash to an port Rx Queue.
+ * real Rx Queues. The table entries map a hash to a port Rx Queue.
*/
- for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
- u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
- MVPP22_RSS_INDEX_TABLE_ENTRY(i);
- mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
+ port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
- mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
- }
+ mvpp22_rss_fill_table(port, port->id);
+ /* Configure default flows */
+ mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
+ mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
+ mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
+ mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 8e1d7f9ffa0b..089f05f29891 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -1,27 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* RSS and Classifier definitions for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _MVPP2_CLS_H_
#define _MVPP2_CLS_H_
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+
/* Classifier constants */
#define MVPP2_CLS_FLOWS_TBL_SIZE 512
#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
#define MVPP2_CLS_LKP_TBL_SIZE 64
#define MVPP2_CLS_RX_QUEUES 256
-/* RSS constants */
-#define MVPP22_RSS_TABLE_ENTRIES 32
+/* Classifier flow constants */
+
+#define MVPP2_FLOW_N_FIELDS 4
+
+enum mvpp2_cls_engine {
+ MVPP22_CLS_ENGINE_C2 = 1,
+ MVPP22_CLS_ENGINE_C3A,
+ MVPP22_CLS_ENGINE_C3B,
+ MVPP22_CLS_ENGINE_C4,
+ MVPP22_CLS_ENGINE_C3HA = 6,
+ MVPP22_CLS_ENGINE_C3HB = 7,
+};
+
+#define MVPP22_CLS_HEK_OPT_MAC_DA BIT(0)
+#define MVPP22_CLS_HEK_OPT_VLAN BIT(1)
+#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(2)
+#define MVPP22_CLS_HEK_OPT_IP4SA BIT(3)
+#define MVPP22_CLS_HEK_OPT_IP4DA BIT(4)
+#define MVPP22_CLS_HEK_OPT_IP6SA BIT(5)
+#define MVPP22_CLS_HEK_OPT_IP6DA BIT(6)
+#define MVPP22_CLS_HEK_OPT_L4SIP BIT(7)
+#define MVPP22_CLS_HEK_OPT_L4DIP BIT(8)
+#define MVPP22_CLS_HEK_N_FIELDS 9
+
+#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \
+ MVPP22_CLS_HEK_OPT_L4DIP)
+
+#define MVPP22_CLS_HEK_IP4_2T (MVPP22_CLS_HEK_OPT_IP4SA | \
+ MVPP22_CLS_HEK_OPT_IP4DA)
+
+#define MVPP22_CLS_HEK_IP6_2T (MVPP22_CLS_HEK_OPT_IP6SA | \
+ MVPP22_CLS_HEK_OPT_IP6DA)
+
+/* The fifth tuple in "5T" is the L4_Info field */
+#define MVPP22_CLS_HEK_IP4_5T (MVPP22_CLS_HEK_IP4_2T | \
+ MVPP22_CLS_HEK_L4_OPTS)
+
+#define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \
+ MVPP22_CLS_HEK_L4_OPTS)
+
+enum mvpp2_cls_field_id {
+ MVPP22_CLS_FIELD_MAC_DA = 0x03,
+ MVPP22_CLS_FIELD_VLAN = 0x06,
+ MVPP22_CLS_FIELD_L3_PROTO = 0x0f,
+ MVPP22_CLS_FIELD_IP4SA = 0x10,
+ MVPP22_CLS_FIELD_IP4DA = 0x11,
+ MVPP22_CLS_FIELD_IP6SA = 0x17,
+ MVPP22_CLS_FIELD_IP6DA = 0x1a,
+ MVPP22_CLS_FIELD_L4SIP = 0x1d,
+ MVPP22_CLS_FIELD_L4DIP = 0x1e,
+};
+
+enum mvpp2_cls_flow_seq {
+ MVPP2_CLS_FLOW_SEQ_NORMAL = 0,
+ MVPP2_CLS_FLOW_SEQ_FIRST1,
+ MVPP2_CLS_FLOW_SEQ_FIRST2,
+ MVPP2_CLS_FLOW_SEQ_LAST,
+ MVPP2_CLS_FLOW_SEQ_MIDDLE
+};
+
+/* Classifier C2 engine constants */
+#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16)
+
+enum mvpp22_cls_c2_action {
+ MVPP22_C2_NO_UPD = 0,
+ MVPP22_C2_NO_UPD_LOCK,
+ MVPP22_C2_UPD,
+ MVPP22_C2_UPD_LOCK,
+};
+
+enum mvpp22_cls_c2_fwd_action {
+ MVPP22_C2_FWD_NO_UPD = 0,
+ MVPP22_C2_FWD_NO_UPD_LOCK,
+ MVPP22_C2_FWD_SW,
+ MVPP22_C2_FWD_SW_LOCK,
+ MVPP22_C2_FWD_HW,
+ MVPP22_C2_FWD_HW_LOCK,
+ MVPP22_C2_FWD_HW_LOW_LAT,
+ MVPP22_C2_FWD_HW_LOW_LAT_LOCK,
+};
+
+#define MVPP2_CLS_C2_TCAM_WORDS 5
+#define MVPP2_CLS_C2_ATTR_WORDS 5
+
+struct mvpp2_cls_c2_entry {
+ u32 index;
+ u32 tcam[MVPP2_CLS_C2_TCAM_WORDS];
+ u32 act;
+ u32 attr[MVPP2_CLS_C2_ATTR_WORDS];
+};
+
+/* Classifier C2 engine entries */
+#define MVPP22_CLS_C2_RSS_ENTRY(port) (port)
+#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS
+/* RSS flow entries in the flow table. We have 2 entries per port for RSS.
+ *
+ * The first performs a lookup using the C2 TCAM engine, to tag the
+ * packet for software forwarding (needed for RSS), enable or disable RSS, and
+ * assign the default rx queue.
+ *
+ * The second configures the hash generation, by specifying which fields of the
+ * packet header are used to generate the hash, and specifies the relevant hash
+ * engine to use.
+ */
+#define MVPP22_RSS_FLOW_C2_OFFS 0
+#define MVPP22_RSS_FLOW_HASH_OFFS 1
+#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1)
+
+#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
+ MVPP22_RSS_FLOW_C2_OFFS)
+#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
+ MVPP22_RSS_FLOW_HASH_OFFS)
+#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port)
+
+/* Packet flow ID */
+enum mvpp2_prs_flow {
+ MVPP2_FL_START = 8,
+ MVPP2_FL_IP4_TCP_NF_UNTAG = MVPP2_FL_START,
+ MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP2_FL_IP4_UNTAG, /* non-TCP, non-UDP, same for below */
+ MVPP2_FL_IP4_TAG,
+ MVPP2_FL_IP6_UNTAG,
+ MVPP2_FL_IP6_TAG,
+ MVPP2_FL_NON_IP_UNTAG,
+ MVPP2_FL_NON_IP_TAG,
+ MVPP2_FL_LAST,
+};
+
+struct mvpp2_cls_flow {
+ /* The L2-L4 traffic flow type */
+ int flow_type;
+
+ /* The first id in the flow table for this flow */
+ u16 flow_id;
+
+ /* The supported HEK fields for this flow */
+ u16 supported_hash_opts;
+
+ /* The Header Parser result_info that matches this flow */
+ struct mvpp2_prs_result_info prs_ri;
+};
+
+#define MVPP2_N_FLOWS 52
+
+#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1)
+#define MVPP2_FLOW_C2_ENTRY(id) ((id) * MVPP2_ENTRIES_PER_FLOW)
+#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id) ((id) * MVPP2_ENTRIES_PER_FLOW + \
+ (port) + 1)
struct mvpp2_cls_flow_entry {
u32 index;
u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
@@ -33,7 +193,15 @@ struct mvpp2_cls_lookup_entry {
u32 data;
};
-void mvpp22_init_rss(struct mvpp2_port *port);
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
+
+void mvpp22_rss_port_init(struct mvpp2_port *port);
+
+void mvpp22_rss_enable(struct mvpp2_port *port);
+void mvpp22_rss_disable(struct mvpp2_port *port);
+
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
void mvpp2_cls_init(struct mvpp2 *priv);
@@ -41,4 +209,25 @@ void mvpp2_cls_port_config(struct mvpp2_port *port);
void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port);
+int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe);
+
+u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe);
+
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
+
+u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index);
+
+void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_flow_entry *fe);
+
+u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index);
+
+void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
+ struct mvpp2_cls_lookup_entry *le);
+
+u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index);
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2);
+
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
new file mode 100644
index 000000000000..f9744a61e5dd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2018 Marvell
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+#include "mvpp2_cls.h"
+
+struct mvpp2_dbgfs_prs_entry {
+ int tid;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_flow_entry {
+ int flow;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_port_flow_entry {
+ struct mvpp2_port *port;
+ struct mvpp2_dbgfs_flow_entry *dbg_fe;
+};
+
+static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ int id = MVPP2_FLOW_C2_ENTRY(entry->flow);
+
+ u32 hits = mvpp2_cls_flow_hits(entry->priv, id);
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_flt_hits);
+
+static int mvpp2_dbgfs_flow_dec_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+
+ u32 hits = mvpp2_cls_lookup_hits(entry->priv, entry->flow);
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits);
+
+static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ struct mvpp2_cls_flow *f;
+ const char *flow_name;
+
+ f = mvpp2_cls_flow_get(entry->flow);
+ if (!f)
+ return -EINVAL;
+
+ switch (f->flow_type) {
+ case IPV4_FLOW:
+ flow_name = "ipv4";
+ break;
+ case IPV6_FLOW:
+ flow_name = "ipv6";
+ break;
+ case TCP_V4_FLOW:
+ flow_name = "tcp4";
+ break;
+ case TCP_V6_FLOW:
+ flow_name = "tcp6";
+ break;
+ case UDP_V4_FLOW:
+ flow_name = "udp4";
+ break;
+ case UDP_V6_FLOW:
+ flow_name = "udp6";
+ break;
+ default:
+ flow_name = "other";
+ }
+
+ seq_printf(s, "%s\n", flow_name);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private);
+}
+
+static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private;
+
+ kfree(flow_entry);
+ return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_flow_type_fops = {
+ .open = mvpp2_dbgfs_flow_type_open,
+ .read = seq_read,
+ .release = mvpp2_dbgfs_flow_type_release,
+};
+
+static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ struct mvpp2_cls_flow *f;
+
+ f = mvpp2_cls_flow_get(entry->flow);
+ if (!f)
+ return -EINVAL;
+
+ seq_printf(s, "%d\n", f->flow_id);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_id);
+
+static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
+ struct mvpp2_port *port = entry->port;
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *f;
+ int flow_index;
+ u16 hash_opts;
+
+ f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
+ if (!f)
+ return -EINVAL;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts = mvpp2_flow_get_hek_fields(&fe);
+
+ seq_printf(s, "0x%04x\n", hash_opts);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show,
+ inode->i_private);
+}
+
+static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode,
+ struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private;
+
+ kfree(flow_entry);
+ return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = {
+ .open = mvpp2_dbgfs_port_flow_hash_opt_open,
+ .read = seq_read,
+ .release = mvpp2_dbgfs_port_flow_hash_opt_release,
+};
+
+static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
+ struct mvpp2_port *port = entry->port;
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *f;
+ int flow_index, engine;
+
+ f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
+ if (!f)
+ return -EINVAL;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ engine = mvpp2_cls_flow_eng_get(&fe);
+
+ seq_printf(s, "%d\n", engine);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine);
+
+static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ u32 hits;
+
+ hits = mvpp2_cls_c2_hit_count(port->priv,
+ MVPP22_CLS_C2_RSS_ENTRY(port->id));
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits);
+
+static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) &
+ MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+
+ ql = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QLOW_OFFS) &
+ MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ seq_printf(s, "%d\n", (qh << 3 | ql));
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq);
+
+static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2_cls_c2_entry c2;
+ int enabled;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN);
+
+ seq_printf(s, "%d\n", enabled);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_enable);
+
+static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ unsigned char byte[2], enable[2];
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ u16 rvid;
+ int tid;
+
+ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+ tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ if (!priv->prs_shadow[tid].valid)
+ continue;
+
+ if (!test_bit(port->id, &pmap))
+ continue;
+
+ mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
+ mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
+
+ rvid = ((byte[0] & 0xf) << 8) + byte[1];
+
+ seq_printf(s, "%u\n", rvid);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid);
+
+static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ int i;
+
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+ mvpp2_prs_init_from_hw(port->priv, &pe, i);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap))
+ seq_printf(s, "%03d\n", i);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_parser);
+
+static int mvpp2_dbgfs_filter_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ int index, tid;
+
+ for (tid = MVPP2_PE_MAC_RANGE_START;
+ tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+ unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC ||
+ priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ /* We only want entries active on this port */
+ if (!test_bit(port->id, &pmap))
+ continue;
+
+ /* Read mac addr from entry */
+ for (index = 0; index < ETH_ALEN; index++)
+ mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
+ &da_mask[index]);
+
+ seq_printf(s, "%pM\n", da);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_filter);
+
+static int mvpp2_dbgfs_prs_lu_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2 *priv = entry->priv;
+
+ seq_printf(s, "%x\n", priv->prs_shadow[entry->tid].lu);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_lu);
+
+static int mvpp2_dbgfs_prs_pmap_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned int pmap;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ pmap &= MVPP2_PRS_PORT_MASK;
+
+ seq_printf(s, "%02x\n", pmap);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_pmap);
+
+static int mvpp2_dbgfs_prs_ai_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned char ai, ai_mask;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ ai = pe.tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
+ ai_mask = (pe.tcam[MVPP2_PRS_TCAM_AI_WORD] >> 16) & MVPP2_PRS_AI_MASK;
+
+ seq_printf(s, "%02x %02x\n", ai, ai_mask);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_ai);
+
+static int mvpp2_dbgfs_prs_hdata_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned char data[8], mask[8];
+ int i;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ for (i = 0; i < 8; i++)
+ mvpp2_prs_tcam_data_byte_get(&pe, i, &data[i], &mask[i]);
+
+ seq_printf(s, "%*phN %*phN\n", 8, data, 8, mask);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hdata);
+
+static int mvpp2_dbgfs_prs_sram_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ seq_printf(s, "%*phN\n", 14, pe.sram);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_sram);
+
+static int mvpp2_dbgfs_prs_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ int val;
+
+ val = mvpp2_prs_hits(entry->priv, entry->tid);
+ if (val < 0)
+ return val;
+
+ seq_printf(s, "%d\n", val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hits);
+
+static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2 *priv = entry->priv;
+ int tid = entry->tid;
+
+ seq_printf(s, "%d\n", priv->prs_shadow[tid].valid ? 1 : 0);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private);
+}
+
+static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct mvpp2_dbgfs_prs_entry *entry = seq->private;
+
+ kfree(entry);
+ return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_prs_valid_fops = {
+ .open = mvpp2_dbgfs_prs_valid_open,
+ .read = seq_read,
+ .release = mvpp2_dbgfs_prs_valid_release,
+};
+
+static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
+ struct mvpp2_port *port,
+ struct mvpp2_dbgfs_flow_entry *entry)
+{
+ struct mvpp2_dbgfs_port_flow_entry *port_entry;
+ struct dentry *port_dir;
+
+ port_dir = debugfs_create_dir(port->dev->name, parent);
+ if (IS_ERR(port_dir))
+ return PTR_ERR(port_dir);
+
+ /* This will be freed by 'hash_opts' release op */
+ port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL);
+ if (!port_entry)
+ return -ENOMEM;
+
+ port_entry->port = port;
+ port_entry->dbg_fe = entry;
+
+ debugfs_create_file("hash_opts", 0444, port_dir, port_entry,
+ &mvpp2_dbgfs_port_flow_hash_opt_fops);
+
+ debugfs_create_file("engine", 0444, port_dir, port_entry,
+ &mvpp2_dbgfs_port_flow_engine_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int flow)
+{
+ struct mvpp2_dbgfs_flow_entry *entry;
+ struct dentry *flow_entry_dir;
+ char flow_entry_name[10];
+ int i, ret;
+
+ sprintf(flow_entry_name, "%02d", flow);
+
+ flow_entry_dir = debugfs_create_dir(flow_entry_name, parent);
+ if (!flow_entry_dir)
+ return -ENOMEM;
+
+ /* This will be freed by 'type' release op */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->flow = flow;
+ entry->priv = priv;
+
+ debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_flt_hits_fops);
+
+ debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_dec_hits_fops);
+
+ debugfs_create_file("type", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_type_fops);
+
+ debugfs_create_file("id", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_id_fops);
+
+ /* Create entry for each port */
+ for (i = 0; i < priv->port_count; i++) {
+ ret = mvpp2_dbgfs_flow_port_init(flow_entry_dir,
+ priv->port_list[i], entry);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *flow_dir;
+ int i, ret;
+
+ flow_dir = debugfs_create_dir("flows", parent);
+ if (!flow_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int tid)
+{
+ struct mvpp2_dbgfs_prs_entry *entry;
+ struct dentry *prs_entry_dir;
+ char prs_entry_name[10];
+
+ if (tid >= MVPP2_PRS_TCAM_SRAM_SIZE)
+ return -EINVAL;
+
+ sprintf(prs_entry_name, "%03d", tid);
+
+ prs_entry_dir = debugfs_create_dir(prs_entry_name, parent);
+ if (!prs_entry_dir)
+ return -ENOMEM;
+
+ /* The 'valid' entry's ops will free that */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->tid = tid;
+ entry->priv = priv;
+
+ /* Create each attr */
+ debugfs_create_file("sram", 0444, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_sram_fops);
+
+ debugfs_create_file("valid", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_valid_fops);
+
+ debugfs_create_file("lookup_id", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_lu_fops);
+
+ debugfs_create_file("ai", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_ai_fops);
+
+ debugfs_create_file("header_data", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_hdata_fops);
+
+ debugfs_create_file("hits", 0444, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_hits_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *prs_dir;
+ int i, ret;
+
+ prs_dir = debugfs_create_dir("parser", parent);
+ if (!prs_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+ ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_port_init(struct dentry *parent,
+ struct mvpp2_port *port)
+{
+ struct dentry *port_dir;
+
+ port_dir = debugfs_create_dir(port->dev->name, parent);
+ if (IS_ERR(port_dir))
+ return PTR_ERR(port_dir);
+
+ debugfs_create_file("parser_entries", 0444, port_dir, port,
+ &mvpp2_dbgfs_port_parser_fops);
+
+ debugfs_create_file("mac_filter", 0444, port_dir, port,
+ &mvpp2_dbgfs_filter_fops);
+
+ debugfs_create_file("vid_filter", 0444, port_dir, port,
+ &mvpp2_dbgfs_port_vid_fops);
+
+ debugfs_create_file("c2_hits", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_hits_fops);
+
+ debugfs_create_file("default_rxq", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_rxq_fops);
+
+ debugfs_create_file("rss_enable", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_enable_fops);
+
+ return 0;
+}
+
+void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
+{
+ debugfs_remove_recursive(priv->dbgfs_dir);
+}
+
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
+{
+ struct dentry *mvpp2_dir, *mvpp2_root;
+ int ret, i;
+
+ mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
+ if (!mvpp2_root) {
+ mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
+ if (IS_ERR(mvpp2_root))
+ return;
+ }
+
+ mvpp2_dir = debugfs_create_dir(name, mvpp2_root);
+ if (IS_ERR(mvpp2_dir))
+ return;
+
+ priv->dbgfs_dir = mvpp2_dir;
+
+ ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < priv->port_count; i++) {
+ ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]);
+ if (ret)
+ goto err;
+ }
+
+ ret = mvpp2_dbgfs_flow_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
+ return;
+err:
+ mvpp2_dbgfs_cleanup(priv);
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 0319ed9ef8b8..32d785b616e1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Marvell PPv2 network controller for Armada 375 SoC.
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/acpi.h>
@@ -66,7 +63,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
#define MVPP2_QDIST_SINGLE_MODE 0
#define MVPP2_QDIST_MULTI_MODE 1
-static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
+static int queue_mode = MVPP2_QDIST_MULTI_MODE;
module_param(queue_mode, int, 0444);
MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
@@ -151,9 +148,10 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
if (port->priv->hw_version == MVPP21)
- return tx_desc->pp21.buf_dma_addr;
+ return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
else
- return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
+ return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
+ MVPP2_DESC_DMA_MASK;
}
static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
@@ -166,12 +164,12 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
offset = dma_addr & MVPP2_TX_DESC_ALIGN;
if (port->priv->hw_version == MVPP21) {
- tx_desc->pp21.buf_dma_addr = addr;
+ tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
tx_desc->pp21.packet_offset = offset;
} else {
- u64 val = (u64)addr;
+ __le64 val = cpu_to_le64(addr);
- tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
+ tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
tx_desc->pp22.buf_dma_addr_ptp |= val;
tx_desc->pp22.packet_offset = offset;
}
@@ -181,9 +179,9 @@ static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
if (port->priv->hw_version == MVPP21)
- return tx_desc->pp21.data_size;
+ return le16_to_cpu(tx_desc->pp21.data_size);
else
- return tx_desc->pp22.data_size;
+ return le16_to_cpu(tx_desc->pp22.data_size);
}
static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
@@ -191,9 +189,9 @@ static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
size_t size)
{
if (port->priv->hw_version == MVPP21)
- tx_desc->pp21.data_size = size;
+ tx_desc->pp21.data_size = cpu_to_le16(size);
else
- tx_desc->pp22.data_size = size;
+ tx_desc->pp22.data_size = cpu_to_le16(size);
}
static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
@@ -211,9 +209,9 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
unsigned int command)
{
if (port->priv->hw_version == MVPP21)
- tx_desc->pp21.command = command;
+ tx_desc->pp21.command = cpu_to_le32(command);
else
- tx_desc->pp22.command = command;
+ tx_desc->pp22.command = cpu_to_le32(command);
}
static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
@@ -229,36 +227,38 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.buf_dma_addr;
+ return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
else
- return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
+ return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
+ MVPP2_DESC_DMA_MASK;
}
static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.buf_cookie;
+ return le32_to_cpu(rx_desc->pp21.buf_cookie);
else
- return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
+ return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
+ MVPP2_DESC_DMA_MASK;
}
static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.data_size;
+ return le16_to_cpu(rx_desc->pp21.data_size);
else
- return rx_desc->pp22.data_size;
+ return le16_to_cpu(rx_desc->pp22.data_size);
}
static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.status;
+ return le32_to_cpu(rx_desc->pp21.status);
else
- return rx_desc->pp22.status;
+ return le32_to_cpu(rx_desc->pp22.status);
}
static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
@@ -1735,7 +1735,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
command |= MVPP2_TXD_IP_CSUM_DISABLE;
- if (l3_proto == swab16(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
} else {
@@ -3273,6 +3273,11 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
}
}
+static bool mvpp22_rss_is_supported(void)
+{
+ return queue_mode == MVPP2_QDIST_MULTI_MODE;
+}
+
static int mvpp2_open(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -3365,9 +3370,6 @@ static int mvpp2_open(struct net_device *dev)
mvpp2_start_dev(port);
- if (priv->hw_version == MVPP22)
- mvpp22_init_rss(port);
-
/* Start hardware statistics gathering */
queue_delayed_work(priv->stats_queue, &port->stats_work,
MVPP2_MIB_COUNTERS_STATS_DELAY);
@@ -3626,6 +3628,13 @@ static int mvpp2_set_features(struct net_device *dev,
}
}
+ if (changed & NETIF_F_RXHASH) {
+ if (features & NETIF_F_RXHASH)
+ mvpp22_rss_enable(port);
+ else
+ mvpp22_rss_disable(port);
+ }
+
return 0;
}
@@ -3813,6 +3822,94 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(port->phylink, cmd);
}
+static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rules)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = mvpp2_ethtool_rxfh_get(port, info);
+ break;
+ case ETHTOOL_GRXRINGS:
+ info->data = port->nrxqs;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return ret;
+}
+
+static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = mvpp2_ethtool_rxfh_set(port, info);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
+{
+ return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
+}
+
+static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ if (indir)
+ memcpy(indir, port->indir,
+ ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_CRC32;
+
+ return 0;
+}
+
+static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+ return -EOPNOTSUPP;
+
+ if (key)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ memcpy(port->indir, indir,
+ ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+ mvpp22_rss_fill_table(port, port->id);
+ }
+
+ return 0;
+}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -3844,6 +3941,12 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.set_pauseparam = mvpp2_ethtool_set_pause_param,
.get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
.set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
+ .get_rxnfc = mvpp2_ethtool_get_rxnfc,
+ .set_rxnfc = mvpp2_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
+ .get_rxfh = mvpp2_ethtool_get_rxfh,
+ .set_rxfh = mvpp2_ethtool_set_rxfh,
+
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -3985,8 +4088,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
MVPP2_MAX_PORTS * priv->max_port_rxqs)
return -EINVAL;
- if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
- (port->ntxqs > MVPP2_MAX_TXQ))
+ if (port->nrxqs % MVPP2_DEFAULT_RXQ ||
+ port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
return -EINVAL;
/* Disable port */
@@ -4075,6 +4178,9 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_cls_oversize_rxq_set(port);
mvpp2_cls_port_config(port);
+ if (mvpp22_rss_is_supported())
+ mvpp22_rss_port_init(port);
+
/* Provide an initial Rx packet size */
port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
@@ -4681,6 +4787,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (mvpp22_rss_is_supported())
+ dev->hw_features |= NETIF_F_RXHASH;
+
if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
@@ -5011,6 +5120,12 @@ static int mvpp2_probe(struct platform_device *pdev)
(unsigned long)of_device_get_match_data(&pdev->dev);
}
+ /* multi queue mode isn't supported on PPV2.1, fallback to single
+ * mode
+ */
+ if (priv->hw_version == MVPP21)
+ queue_mode = MVPP2_QDIST_SINGLE_MODE;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
@@ -5174,6 +5289,8 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_port_probe;
}
+ mvpp2_dbgfs_init(priv, pdev->name);
+
platform_set_drvdata(pdev, priv);
return 0;
@@ -5207,6 +5324,8 @@ static int mvpp2_remove(struct platform_device *pdev)
struct fwnode_handle *port_fwnode;
int i = 0;
+ mvpp2_dbgfs_cleanup(priv);
+
flush_workqueue(priv->stats_queue);
destroy_workqueue(priv->stats_queue);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 6bb69f086794..392fd895f278 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Header Parser helpers for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
@@ -30,24 +27,24 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
return -EINVAL;
/* Clear entry invalidation bit */
- pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
/* Write tcam index - indirect access */
mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
+ mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
return 0;
}
/* Initialize tcam entry from hw */
-static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
- struct mvpp2_prs_entry *pe, int tid)
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid)
{
int i;
@@ -60,18 +57,18 @@ static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
/* Write tcam index - indirect access */
mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
- pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
+ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
- if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+ if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
return MVPP2_PRS_TCAM_ENTRY_INVALID;
for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
+ pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
- pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
+ pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
return 0;
}
@@ -103,42 +100,35 @@ static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
/* Update lookup field in tcam sw entry */
static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
-
- pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
- pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
}
/* Update mask for single port in tcam sw entry */
static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
unsigned int port, bool add)
{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
if (add)
- pe->tcam.byte[enable_off] &= ~(1 << port);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
else
- pe->tcam.byte[enable_off] |= 1 << port;
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
}
/* Update port map in tcam sw entry */
static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
unsigned int ports)
{
- unsigned char port_mask = MVPP2_PRS_PORT_MASK;
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
- pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
- pe->tcam.byte[enable_off] &= ~port_mask;
- pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
}
/* Obtain port map from tcam sw entry */
-static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
+unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
- return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
+ return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
}
/* Set byte of data and its enable bits in tcam sw entry */
@@ -146,55 +136,58 @@ static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
unsigned int offs, unsigned char byte,
unsigned char enable)
{
- pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
- pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
+ int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
+
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
}
/* Get byte of data and its enable bits from tcam sw entry */
-static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
- unsigned int offs, unsigned char *byte,
- unsigned char *enable)
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char *byte,
+ unsigned char *enable)
{
- *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
- *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
+ int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
+
+ *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
+ *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
}
/* Compare tcam data bytes with a pattern */
static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
u16 data)
{
- int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
u16 tcam_data;
- tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
- if (tcam_data != data)
- return false;
- return true;
+ tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
+ return tcam_data == data;
}
/* Update ai bits in tcam sw entry */
static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
unsigned int bits, unsigned int enable)
{
- int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
+ int i;
for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
if (!(enable & BIT(i)))
continue;
if (bits & BIT(i))
- pe->tcam.byte[ai_idx] |= 1 << i;
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
else
- pe->tcam.byte[ai_idx] &= ~(1 << i);
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
}
- pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
}
/* Get ai bits from tcam sw entry */
static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
{
- return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
+ return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
}
/* Set ethertype in tcam sw entry */
@@ -215,16 +208,16 @@ static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
/* Set bits in sram sw entry */
static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
- int val)
+ u32 val)
{
- pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
+ pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
}
/* Clear bits in sram sw entry */
static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
- int val)
+ u32 val)
{
- pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
+ pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
}
/* Update ri bits in sram sw entry */
@@ -234,15 +227,16 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
unsigned int i;
for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
- int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
-
if (!(mask & BIT(i)))
continue;
if (bits & BIT(i))
- mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
+ 1);
else
- mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
+ mvpp2_prs_sram_bits_clear(pe,
+ MVPP2_PRS_SRAM_RI_OFFS + i,
+ 1);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
}
@@ -251,7 +245,7 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
/* Obtain ri bits from sram sw entry */
static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
{
- return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
+ return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
}
/* Update ai bits in sram sw entry */
@@ -259,16 +253,18 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
unsigned int bits, unsigned int mask)
{
unsigned int i;
- int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
if (!(mask & BIT(i)))
continue;
if (bits & BIT(i))
- mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
+ 1);
else
- mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
+ mvpp2_prs_sram_bits_clear(pe,
+ MVPP2_PRS_SRAM_AI_OFFS + i,
+ 1);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
}
@@ -278,12 +274,12 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
{
u8 bits;
- int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
- int ai_en_off = ai_off + 1;
- int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
+ /* ai is stored on bits 90->97; so it spreads across two u32 */
+ int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
+ int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
- bits = (pe->sram.byte[ai_off] >> ai_shift) |
- (pe->sram.byte[ai_en_off] << (8 - ai_shift));
+ bits = (pe->sram[ai_off] >> ai_shift) |
+ (pe->sram[ai_off + 1] << (32 - ai_shift));
return bits;
}
@@ -316,8 +312,7 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
}
/* Set value */
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
- (unsigned char)shift;
+ pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
/* Reset and set operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
@@ -346,13 +341,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
/* Set value */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
MVPP2_PRS_SRAM_UDF_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
- MVPP2_PRS_SRAM_UDF_BITS)] &=
- ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
- MVPP2_PRS_SRAM_UDF_BITS)] |=
- (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+ offset & MVPP2_PRS_SRAM_UDF_MASK);
/* Set offset type */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
@@ -362,16 +352,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
/* Set offset operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
-
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
- MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
- ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
- (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
-
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
- MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
- (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+ op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
/* Set base offset as current */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
@@ -662,7 +644,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
continue;
mvpp2_prs_init_from_hw(priv, &pe, tid);
- match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
if (!match)
continue;
@@ -790,8 +772,8 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_init_from_hw(priv, &pe, tid);
- match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
- mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
+ mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
if (!match)
continue;
@@ -932,8 +914,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
pe.index = tid;
/* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
@@ -1433,17 +1415,13 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
pe.index = tid;
- /* Clear tcam data before updating */
- pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
- pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
-
mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
MVPP2_PRS_IPV4_HEAD,
MVPP2_PRS_IPV4_HEAD_MASK);
/* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK);
@@ -1644,8 +1622,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
MVPP2_PRS_IPV4_IHL_MASK);
/* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK);
@@ -2428,6 +2406,41 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
return 0;
}
+int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
+{
+ struct mvpp2_prs_entry pe;
+ u8 *ri_byte, *ri_byte_mask;
+ int tid, i;
+
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_tcam_first_free(priv,
+ MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ ri_byte = (u8 *)&ri;
+ ri_byte_mask = (u8 *)&ri_mask;
+
+ mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ for (i = 0; i < 4; i++) {
+ mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
+ ri_byte_mask[i]);
+ }
+
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
/* Set prs flow for the port */
int mvpp2_prs_def_flow(struct mvpp2_port *port)
{
@@ -2465,3 +2478,19 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
return 0;
}
+
+int mvpp2_prs_hits(struct mvpp2 *priv, int index)
+{
+ u32 val;
+
+ if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
+ return -EINVAL;
+
+ mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
+
+ val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
+
+ val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
+
+ return val;
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index 22fbbc4c8b28..e22f6c85d380 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -1,22 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header Parser definitions for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
+#ifndef _MVPP2_PRS_H_
+#define _MVPP2_PRS_H_
+
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/platform_device.h>
#include "mvpp2.h"
-#ifndef _MVPP2_PRS_H_
-#define _MVPP2_PRS_H_
-
/* Parser constants */
#define MVPP2_PRS_TCAM_SRAM_SIZE 256
#define MVPP2_PRS_TCAM_WORDS 6
@@ -50,17 +48,25 @@
* The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
*/
#define MVPP2_PRS_AI_BITS 8
+#define MVPP2_PRS_AI_MASK 0xff
#define MVPP2_PRS_PORT_MASK 0xff
#define MVPP2_PRS_LU_MASK 0xf
-#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
- (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
-#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
- (((offs) * 2) - ((offs) % 2) + 2)
-#define MVPP2_PRS_TCAM_AI_BYTE 16
-#define MVPP2_PRS_TCAM_PORT_BYTE 17
-#define MVPP2_PRS_TCAM_LU_BYTE 20
-#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
-#define MVPP2_PRS_TCAM_INV_WORD 5
+
+/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */
+#define MVPP2_PRS_BYTE_TO_WORD(byte) ((byte) / 2)
+#define MVPP2_PRS_BYTE_IN_WORD(byte) ((byte) % 2)
+
+#define MVPP2_PRS_TCAM_EN(data) ((data) << 16)
+#define MVPP2_PRS_TCAM_AI_WORD 4
+#define MVPP2_PRS_TCAM_AI(ai) (ai)
+#define MVPP2_PRS_TCAM_AI_EN(ai) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_AI(ai))
+#define MVPP2_PRS_TCAM_PORT_WORD 4
+#define MVPP2_PRS_TCAM_PORT(p) ((p) << 8)
+#define MVPP2_PRS_TCAM_PORT_EN(p) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_PORT(p))
+#define MVPP2_PRS_TCAM_LU_WORD 5
+#define MVPP2_PRS_TCAM_LU(lu) (lu)
+#define MVPP2_PRS_TCAM_LU_EN(lu) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_LU(lu))
+#define MVPP2_PRS_TCAM_INV_WORD 5
#define MVPP2_PRS_VID_TCAM_BYTE 2
@@ -146,6 +152,7 @@
#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
+#define MVPP2_PRS_SRAM_SHIFT_MASK 0xff
#define MVPP2_PRS_SRAM_UDF_OFFS 73
#define MVPP2_PRS_SRAM_UDF_BITS 8
#define MVPP2_PRS_SRAM_UDF_MASK 0xff
@@ -214,6 +221,10 @@
#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
#define MVPP2_PRS_RI_DROP_MASK 0x80000000
+#define MVPP2_PRS_IP_MASK (MVPP2_PRS_RI_L3_PROTO_MASK | \
+ MVPP2_PRS_RI_IP_FRAG_MASK | \
+ MVPP2_PRS_RI_L4_PROTO_MASK)
+
/* Sram additional info bits assignment */
#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
@@ -255,20 +266,15 @@ enum mvpp2_prs_lookup {
MVPP2_PRS_LU_LAST,
};
-union mvpp2_prs_tcam_entry {
- u32 word[MVPP2_PRS_TCAM_WORDS];
- u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
-};
-
-union mvpp2_prs_sram_entry {
- u32 word[MVPP2_PRS_SRAM_WORDS];
- u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
-};
-
struct mvpp2_prs_entry {
u32 index;
- union mvpp2_prs_tcam_entry tcam;
- union mvpp2_prs_sram_entry sram;
+ u32 tcam[MVPP2_PRS_TCAM_WORDS];
+ u32 sram[MVPP2_PRS_SRAM_WORDS];
+};
+
+struct mvpp2_prs_result_info {
+ u32 ri;
+ u32 ri_mask;
};
struct mvpp2_prs_shadow {
@@ -288,10 +294,21 @@ struct mvpp2_prs_shadow {
int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv);
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid);
+
+unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe);
+
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char *byte,
+ unsigned char *enable);
+
int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add);
int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type);
+int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask);
+
int mvpp2_prs_def_flow(struct mvpp2_port *port);
void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port);
@@ -311,4 +328,6 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port);
int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da);
+int mvpp2_prs_hits(struct mvpp2 *priv, int index);
+
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index d8ebf0a05e0c..6e6abdc399de 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -605,10 +605,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_alloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
- &eth->phy_scratch_ring,
- GFP_ATOMIC | __GFP_ZERO);
+ eth->scratch_ring = dma_zalloc_coherent(eth->dev,
+ cnt * sizeof(struct mtk_tx_dma),
+ &eth->phy_scratch_ring,
+ GFP_ATOMIC);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
@@ -623,7 +623,6 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
return -ENOMEM;
- memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
phy_ring_tail = eth->phy_scratch_ring +
(sizeof(struct mtk_tx_dma) * (cnt - 1));
@@ -1221,14 +1220,11 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_alloc_coherent(eth->dev,
- MTK_DMA_SIZE * sz,
- &ring->phys,
- GFP_ATOMIC | __GFP_ZERO);
+ ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
goto no_tx_mem;
- memset(ring->dma, 0, MTK_DMA_SIZE * sz);
for (i = 0; i < MTK_DMA_SIZE; i++) {
int next = (i + 1) % MTK_DMA_SIZE;
u32 next_ptr = ring->phys + next * sz;
@@ -1321,10 +1317,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return -ENOMEM;
}
- ring->dma = dma_alloc_coherent(eth->dev,
- rx_dma_size * sizeof(*ring->dma),
- &ring->phys,
- GFP_ATOMIC | __GFP_ZERO);
+ ring->dma = dma_zalloc_coherent(eth->dev,
+ rx_dma_size * sizeof(*ring->dma),
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
return -ENOMEM;
@@ -2463,42 +2458,6 @@ free_netdev:
return err;
}
-static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id)
-{
- u32 val[2], id[4];
-
- regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]);
- regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]);
-
- id[3] = ((val[0] >> 16) & 0xff) - '0';
- id[2] = ((val[0] >> 24) & 0xff) - '0';
- id[1] = (val[1] & 0xff) - '0';
- id[0] = ((val[1] >> 8) & 0xff) - '0';
-
- *chip_id = (id[3] * 1000) + (id[2] * 100) +
- (id[1] * 10) + id[0];
-
- if (!(*chip_id)) {
- dev_err(eth->dev, "failed to get chip id\n");
- return -ENODEV;
- }
-
- dev_info(eth->dev, "chip id = %d\n", *chip_id);
-
- return 0;
-}
-
-static bool mtk_is_hwlro_supported(struct mtk_eth *eth)
-{
- switch (eth->chip_id) {
- case MT7622_ETH:
- case MT7623_ETH:
- return true;
- }
-
- return false;
-}
-
static int mtk_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2577,11 +2536,7 @@ static int mtk_probe(struct platform_device *pdev)
if (err)
return err;
- err = mtk_get_chip_id(eth, &eth->chip_id);
- if (err)
- return err;
-
- eth->hwlro = mtk_is_hwlro_supported(eth);
+ eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
for_each_child_of_node(pdev->dev.of_node, mac_np) {
if (!of_device_is_compatible(mac_np,
@@ -2670,19 +2625,19 @@ static int mtk_remove(struct platform_device *pdev)
}
static const struct mtk_soc_data mt2701_data = {
- .caps = MTK_GMAC1_TRGMII,
+ .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
static const struct mtk_soc_data mt7622_data = {
- .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW,
+ .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
};
static const struct mtk_soc_data mt7623_data = {
- .caps = MTK_GMAC1_TRGMII,
+ .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 672b8c353c47..46819297fc3e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -566,6 +566,7 @@ struct mtk_rx_ring {
#define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII)
#define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \
MTK_GMAC2_SGMII)
+#define MTK_HWLRO BIT(12)
#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
/* struct mtk_eth_data - This is the structure holding all differences
@@ -635,7 +636,6 @@ struct mtk_eth {
struct regmap *ethsys;
struct regmap *sgmiisys;
struct regmap *pctl;
- u32 chip_id;
bool hwlro;
refcount_t dma_refcnt;
struct mtk_tx_ring tx_ring;
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 16b10d01fcf4..3f400770fcd8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \
main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
- srq.o resource_tracker.o
+ srq.o resource_tracker.o crdump.o
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index e2b6b0cac1ac..c81d15bf259c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -178,10 +178,12 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
dev = persist->dev;
mlx4_err(dev, "device is going to be reset\n");
- if (mlx4_is_slave(dev))
+ if (mlx4_is_slave(dev)) {
err = mlx4_reset_slave(dev);
- else
+ } else {
+ mlx4_crdump_collect(dev);
err = mlx4_reset_master(dev);
+ }
if (!err) {
mlx4_err(dev, "device was reset successfully\n");
@@ -212,7 +214,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
- err = mlx4_restart_one(persist->pdev);
+ err = mlx4_restart_one(persist->pdev, false, NULL);
mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n",
err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
new file mode 100644
index 000000000000..88316c743820
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx4.h"
+
+#define BAD_ACCESS 0xBADACCE5
+#define HEALTH_BUFFER_SIZE 0x40
+#define CR_ENABLE_BIT swab32(BIT(6))
+#define CR_ENABLE_BIT_OFFSET 0xF3F04
+#define MAX_NUM_OF_DUMPS_TO_STORE (8)
+
+static const char *region_cr_space_str = "cr-space";
+static const char *region_fw_health_str = "fw-health";
+
+/* Set to true in case cr enable bit was set to true before crdump */
+static bool crdump_enbale_bit_set;
+
+static void crdump_enable_crspace_access(struct mlx4_dev *dev,
+ u8 __iomem *cr_space)
+{
+ /* Get current enable bit value */
+ crdump_enbale_bit_set =
+ readl(cr_space + CR_ENABLE_BIT_OFFSET) & CR_ENABLE_BIT;
+
+ /* Enable FW CR filter (set bit6 to 0) */
+ if (crdump_enbale_bit_set)
+ writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) & ~CR_ENABLE_BIT,
+ cr_space + CR_ENABLE_BIT_OFFSET);
+
+ /* Enable block volatile crspace accesses */
+ writel(swab32(1), cr_space + dev->caps.health_buffer_addrs +
+ HEALTH_BUFFER_SIZE);
+}
+
+static void crdump_disable_crspace_access(struct mlx4_dev *dev,
+ u8 __iomem *cr_space)
+{
+ /* Disable block volatile crspace accesses */
+ writel(0, cr_space + dev->caps.health_buffer_addrs +
+ HEALTH_BUFFER_SIZE);
+
+ /* Restore FW CR filter value (set bit6 to original value) */
+ if (crdump_enbale_bit_set)
+ writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) | CR_ENABLE_BIT,
+ cr_space + CR_ENABLE_BIT_OFFSET);
+}
+
+static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev,
+ u8 __iomem *cr_space,
+ u32 id)
+{
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ struct pci_dev *pdev = dev->persist->pdev;
+ unsigned long cr_res_size;
+ u8 *crspace_data;
+ int offset;
+ int err;
+
+ if (!crdump->region_crspace) {
+ mlx4_err(dev, "crdump: cr-space region is NULL\n");
+ return;
+ }
+
+ /* Try to collect CR space */
+ cr_res_size = pci_resource_len(pdev, 0);
+ crspace_data = kvmalloc(cr_res_size, GFP_KERNEL);
+ if (crspace_data) {
+ for (offset = 0; offset < cr_res_size; offset += 4)
+ *(u32 *)(crspace_data + offset) =
+ readl(cr_space + offset);
+
+ err = devlink_region_snapshot_create(crdump->region_crspace,
+ cr_res_size, crspace_data,
+ id, &kvfree);
+ if (err) {
+ kvfree(crspace_data);
+ mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
+ region_cr_space_str, id, err);
+ } else {
+ mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n",
+ id, region_cr_space_str);
+ }
+ } else {
+ mlx4_err(dev, "crdump: Failed to allocate crspace buffer\n");
+ }
+}
+
+static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev,
+ u8 __iomem *cr_space,
+ u32 id)
+{
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ u8 *health_data;
+ int offset;
+ int err;
+
+ if (!crdump->region_fw_health) {
+ mlx4_err(dev, "crdump: fw-health region is NULL\n");
+ return;
+ }
+
+ /* Try to collect health buffer */
+ health_data = kvmalloc(HEALTH_BUFFER_SIZE, GFP_KERNEL);
+ if (health_data) {
+ u8 __iomem *health_buf_start =
+ cr_space + dev->caps.health_buffer_addrs;
+
+ for (offset = 0; offset < HEALTH_BUFFER_SIZE; offset += 4)
+ *(u32 *)(health_data + offset) =
+ readl(health_buf_start + offset);
+
+ err = devlink_region_snapshot_create(crdump->region_fw_health,
+ HEALTH_BUFFER_SIZE,
+ health_data,
+ id, &kvfree);
+ if (err) {
+ kvfree(health_data);
+ mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
+ region_fw_health_str, id, err);
+ } else {
+ mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n",
+ id, region_fw_health_str);
+ }
+ } else {
+ mlx4_err(dev, "crdump: Failed to allocate health buffer\n");
+ }
+}
+
+int mlx4_crdump_collect(struct mlx4_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ struct pci_dev *pdev = dev->persist->pdev;
+ unsigned long cr_res_size;
+ u8 __iomem *cr_space;
+ u32 id;
+
+ if (!dev->caps.health_buffer_addrs) {
+ mlx4_info(dev, "crdump: FW doesn't support health buffer access, skipping\n");
+ return 0;
+ }
+
+ if (!crdump->snapshot_enable) {
+ mlx4_info(dev, "crdump: devlink snapshot disabled, skipping\n");
+ return 0;
+ }
+
+ cr_res_size = pci_resource_len(pdev, 0);
+
+ cr_space = ioremap(pci_resource_start(pdev, 0), cr_res_size);
+ if (!cr_space) {
+ mlx4_err(dev, "crdump: Failed to map pci cr region\n");
+ return -ENODEV;
+ }
+
+ crdump_enable_crspace_access(dev, cr_space);
+
+ /* Get the available snapshot ID for the dumps */
+ id = devlink_region_shapshot_id_get(devlink);
+
+ /* Try to capture dumps */
+ mlx4_crdump_collect_crspace(dev, cr_space, id);
+ mlx4_crdump_collect_fw_health(dev, cr_space, id);
+
+ crdump_disable_crspace_access(dev, cr_space);
+
+ iounmap(cr_space);
+ return 0;
+}
+
+int mlx4_crdump_init(struct mlx4_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ struct pci_dev *pdev = dev->persist->pdev;
+
+ crdump->snapshot_enable = false;
+
+ /* Create cr-space region */
+ crdump->region_crspace =
+ devlink_region_create(devlink,
+ region_cr_space_str,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ pci_resource_len(pdev, 0));
+ if (IS_ERR(crdump->region_crspace))
+ mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
+ region_cr_space_str,
+ PTR_ERR(crdump->region_crspace));
+
+ /* Create fw-health region */
+ crdump->region_fw_health =
+ devlink_region_create(devlink,
+ region_fw_health_str,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ HEALTH_BUFFER_SIZE);
+ if (IS_ERR(crdump->region_fw_health))
+ mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
+ region_fw_health_str,
+ PTR_ERR(crdump->region_fw_health));
+
+ return 0;
+}
+
+void mlx4_crdump_end(struct mlx4_dev *dev)
+{
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+
+ devlink_region_destroy(crdump->region_fw_health);
+ devlink_region_destroy(crdump->region_crspace);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 65eb06e017e4..6785661d1a72 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2926,7 +2926,6 @@ static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return mlx4_xdp_set(dev, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_id = mlx4_xdp_query(dev);
- xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9f54ccbddea7..a1aeeb8094c3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
{
const struct mlx4_en_frag_info *frag_info = priv->frag_info;
unsigned int truesize = 0;
+ bool release = true;
int nr, frag_size;
struct page *page;
dma_addr_t dma;
- bool release;
/* Collect used fragments while replacing them in the HW descriptors */
for (nr = 0;; frags++) {
@@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
release = page_count(page) != 1 ||
page_is_pfmemalloc(page) ||
page_to_nid(page) != numa_mem_id();
- } else {
+ } else if (!priv->rx_headroom) {
+ /* rx_headroom for non XDP setup is always 0.
+ * When XDP is set, the above condition will
+ * guarantee page is always released.
+ */
u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
frags->page_offset += sz_align;
@@ -791,8 +795,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(dev, xdp_prog, act);
+ /* fall through */
case XDP_DROP:
ring->xdp_drop++;
xdp_drop_no_cnt:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 0227786308af..1857ee0f0871 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -688,15 +688,16 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
- return fallback(dev, skb) % rings_p_up;
+ return fallback(dev, skb, NULL) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 46dcbfbe4c5e..babcfd9c0571 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -825,7 +825,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
-
+#define QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET 0xe4
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -1082,6 +1082,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->rl_caps.min_unit = size >> 14;
}
+ MLX4_GET(dev_cap->health_buffer_addrs, outbox,
+ QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET);
+
MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
if (field32 & (1 << 16))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index cd6399c76bfd..650ae08c71de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -128,6 +128,7 @@ struct mlx4_dev_cap {
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
struct mlx4_rate_limit_caps rl_caps;
+ u32 health_buffer_addrs;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
bool wol_port[MLX4_MAX_PORTS + 1];
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 872014702fc1..d2d59444f562 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -159,9 +159,10 @@ static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
-int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(1);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
-MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment "
+ "(0-7) (default: 0)");
static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
static int arr_argc = 2;
@@ -177,6 +178,131 @@ struct mlx4_port_config {
static atomic_t pf_loading = ATOMIC_INIT(0);
+static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ ctx->val.vbool = !!mlx4_internal_err_reset;
+ return 0;
+}
+
+static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ mlx4_internal_err_reset = ctx->val.vbool;
+ return 0;
+}
+
+static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+
+ ctx->val.vbool = dev->persist->crdump.snapshot_enable;
+ return 0;
+}
+
+static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+
+ dev->persist->crdump.snapshot_enable = ctx->val.vbool;
+ return 0;
+}
+
+static int
+mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ u32 value = val.vu32;
+
+ if (value < 1 || value > 128)
+ return -ERANGE;
+
+ if (!is_power_of_2(value)) {
+ NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum mlx4_devlink_param_id {
+ MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+};
+
+static const struct devlink_param mlx4_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(INT_ERR_RESET,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ mlx4_devlink_ierr_reset_get,
+ mlx4_devlink_ierr_reset_set, NULL),
+ DEVLINK_PARAM_GENERIC(MAX_MACS,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx4_devlink_max_macs_validate),
+ DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ mlx4_devlink_crdump_snapshot_get,
+ mlx4_devlink_crdump_snapshot_set, NULL),
+ DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+ DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ "enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+};
+
+static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ int err;
+
+ err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
+ if (err)
+ mlx4_warn(dev,
+ "devlink set parameter %u value failed (err = %d)",
+ param_id, err);
+}
+
+static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vbool = !!mlx4_internal_err_reset;
+ mlx4_devlink_set_init_value(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ value);
+
+ value.vu32 = 1UL << log_num_mac;
+ mlx4_devlink_set_init_value(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value);
+
+ value.vbool = enable_64b_cqe_eqe;
+ mlx4_devlink_set_init_value(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ value);
+
+ value.vbool = enable_4k_uar;
+ mlx4_devlink_set_init_value(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ value);
+
+ value.vbool = false;
+ mlx4_devlink_set_init_value(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ value);
+}
+
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
@@ -428,6 +554,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
dev->caps.wol_port[1] = dev_cap->wol_port[1];
dev->caps.wol_port[2] = dev_cap->wol_port[2];
+ dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs;
/* Save uar page shift */
if (!mlx4_is_slave(dev)) {
@@ -3711,10 +3838,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
}
}
- err = mlx4_catas_init(&priv->dev);
+ err = mlx4_crdump_init(&priv->dev);
if (err)
goto err_release_regions;
+ err = mlx4_catas_init(&priv->dev);
+ if (err)
+ goto err_crdump;
+
err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
if (err)
goto err_catas;
@@ -3724,6 +3855,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
err_catas:
mlx4_catas_end(&priv->dev);
+err_crdump:
+ mlx4_crdump_end(&priv->dev);
+
err_release_regions:
pci_release_regions(pdev);
@@ -3757,8 +3891,68 @@ static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
return __set_port_type(info, mlx4_port_type);
}
+static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ union devlink_param_value saved_value;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ &saved_value);
+ if (!err && mlx4_internal_err_reset != saved_value.vbool) {
+ mlx4_internal_err_reset = saved_value.vbool;
+ /* Notify on value changed on runtime configuration mode */
+ devlink_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
+ }
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &saved_value);
+ if (!err)
+ log_num_mac = order_base_2(saved_value.vu32);
+ err = devlink_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ &saved_value);
+ if (!err)
+ enable_64b_cqe_eqe = saved_value.vbool;
+ err = devlink_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ &saved_value);
+ if (!err)
+ enable_4k_uar = saved_value.vbool;
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ &saved_value);
+ if (!err && crdump->snapshot_enable != saved_value.vbool) {
+ crdump->snapshot_enable = saved_value.vbool;
+ devlink_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
+ }
+}
+
+static int mlx4_devlink_reload(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_dev_persistent *persist = dev->persist;
+ int err;
+
+ if (persist->num_vfs)
+ mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
+ err = mlx4_restart_one(persist->pdev, true, devlink);
+ if (err)
+ mlx4_err(persist->dev, "mlx4_restart_one failed, ret=%d\n", err);
+
+ return err;
+}
+
static const struct devlink_ops mlx4_devlink_ops = {
.port_type_set = mlx4_devlink_port_type_set,
+ .reload = mlx4_devlink_reload,
};
static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -3792,14 +3986,21 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ret = devlink_register(devlink, &pdev->dev);
if (ret)
goto err_persist_free;
-
- ret = __mlx4_init_one(pdev, id->driver_data, priv);
+ ret = devlink_params_register(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
if (ret)
goto err_devlink_unregister;
+ mlx4_devlink_set_params_init_values(devlink);
+ ret = __mlx4_init_one(pdev, id->driver_data, priv);
+ if (ret)
+ goto err_params_unregister;
pci_save_state(pdev);
return 0;
+err_params_unregister:
+ devlink_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
err_devlink_unregister:
devlink_unregister(devlink);
err_persist_free:
@@ -3929,6 +4130,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
else
mlx4_info(dev, "%s: interface is down\n", __func__);
mlx4_catas_end(dev);
+ mlx4_crdump_end(dev);
if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
mlx4_warn(dev, "Disabling SR-IOV\n");
pci_disable_sriov(pdev);
@@ -3936,6 +4138,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
mlx4_pci_disable_device(dev);
+ devlink_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
devlink_unregister(devlink);
kfree(dev->persist);
devlink_free(devlink);
@@ -3960,7 +4164,7 @@ static int restore_current_port_types(struct mlx4_dev *dev,
return err;
}
-int mlx4_restart_one(struct pci_dev *pdev)
+int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
@@ -3973,6 +4177,8 @@ int mlx4_restart_one(struct pci_dev *pdev)
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
mlx4_unload_one(pdev);
+ if (reload)
+ mlx4_devlink_param_load_driverinit_values(devlink);
err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
if (err) {
mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
@@ -4205,7 +4411,7 @@ static int __init mlx4_verify_params(void)
if (use_prio != 0)
pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
- if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
+ if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) {
pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
log_mtts_per_seg);
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 4c5306dbcf11..ffed2d4c9403 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1412,6 +1412,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
return 0;
+ /* fall through */
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
@@ -1441,6 +1442,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
return 0;
+ /* fall through */
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cb9e923e8399..ebcd2778eeb3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -84,7 +84,6 @@ enum {
MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7,
MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12,
MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2),
- MLX4_MTT_ENTRY_PER_SEG = 8,
};
enum {
@@ -1042,7 +1041,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev);
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
int mlx4_catas_init(struct mlx4_dev *dev);
void mlx4_catas_end(struct mlx4_dev *dev);
-int mlx4_restart_one(struct pci_dev *pdev);
+int mlx4_crdump_init(struct mlx4_dev *dev);
+void mlx4_crdump_end(struct mlx4_dev *dev);
+int mlx4_restart_one(struct pci_dev *pdev, bool reload,
+ struct devlink *devlink);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
@@ -1227,6 +1229,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
int mlx4_comm_internal_err(u32 slave_read);
+int mlx4_crdump_collect(struct mlx4_dev *dev);
+
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);
void mlx4_do_sense_ports(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index ace6545f82e6..c3228b89df46 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -699,7 +699,8 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index bae8b22edbb7..ba361c5fbda3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -105,7 +105,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
request->num_mtt =
roundup_pow_of_two(max_t(unsigned, request->num_mtt,
min(1UL << (31 - log_mtts_per_seg),
- si.totalram >> (log_mtts_per_seg - 1))));
+ (si.totalram << 1) >> log_mtts_per_seg)));
+
profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 7b1b5ac986d0..31bd56727022 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2958,7 +2958,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
u32 srqn = qp_get_srqn(qpc) & 0xffffff;
int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
struct res_srq *srq;
- int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+ int local_qpn = vhcr->in_modifier & 0xffffff;
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 2545296a0c08..37a551436e4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -3,10 +3,11 @@
#
config MLX5_CORE
- tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
+ tristate "Mellanox 5th generation network adapters (ConnectX series) core driver"
depends on MAY_USE_DEVLINK
depends on PCI
imply PTP_1588_CLOCK
+ imply VXLAN
default n
---help---
Core driver for low level functionality of the ConnectX-4 and
@@ -27,7 +28,7 @@ config MLX5_FPGA
sandbox-specific client drivers.
config MLX5_CORE_EN
- bool "Mellanox Technologies ConnectX-4 Ethernet support"
+ bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
depends on IPV6=y || IPV6=n || MLX5_CORE=m
select PAGE_POOL
@@ -35,6 +36,24 @@ config MLX5_CORE_EN
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
+config MLX5_EN_ARFS
+ bool "Mellanox MLX5 ethernet accelerated receive flow steering (ARFS) support"
+ depends on MLX5_CORE_EN && RFS_ACCEL
+ default y
+ ---help---
+ Mellanox MLX5 ethernet hardware-accelerated receive flow steering support,
+ Enables ethernet netdevice arfs support and ntuple filtering.
+
+config MLX5_EN_RXNFC
+ bool "Mellanox MLX5 ethernet rx nfc flow steering support"
+ depends on MLX5_CORE_EN
+ default y
+ ---help---
+ Mellanox MLX5 ethernet rx nfc flow steering support
+ Enables ethtool receive network flow classification, which allows user defined
+ flow rules to direct traffic into arbitrary rx queue via ethtool set/get_rxnfc
+ API.
+
config MLX5_MPFS
bool "Mellanox Technologies MLX5 MPFS support"
depends on MLX5_CORE_EN
@@ -69,7 +88,7 @@ config MLX5_CORE_EN_DCB
If unsure, set to Y
config MLX5_CORE_IPOIB
- bool "Mellanox Technologies ConnectX-4 IPoIB offloads support"
+ bool "Mellanox 5th generation network adapters (connectX series) IPoIB offloads support"
depends on MLX5_CORE_EN
default n
---help---
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9efbf193ad5a..d324a3884462 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -1,33 +1,61 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
+#
+# Makefile for Mellanox 5th generation network adapters
+# (ConnectX series) core & netdev driver
+#
+
subdir-ccflags-y += -I$(src)
+obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
+
+#
+# mlx5 core basic
+#
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \
- diag/fs_tracepoint.o
-
-mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
-
-mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
- fpga/ipsec.o fpga/tls.o
+ fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \
+ diag/fs_tracepoint.o diag/fw_tracer.o
+#
+# Netdev basic
+#
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
- en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
-
-mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
-
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.o
+ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
+ en_selftest.o en/port.o
+
+#
+# Netdev extra
+#
+mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
+mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o
+
+#
+# Core extra
+#
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o
+mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
+mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
+mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
+
+#
+# Ipoib netdev
+#
+mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
+#
+# Accelerations & FPGA
+#
+mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
-mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o
+mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
+ fpga/ipsec.o fpga/tls.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
- en_accel/ipsec_stats.o
+ en_accel/ipsec_stats.o
-mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
+mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
CFLAGS_tracepoint.o := -I$(src)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
new file mode 100644
index 000000000000..c13260467750
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
@@ -0,0 +1,37 @@
+#ifndef __MLX5E_ACCEL_H__
+#define __MLX5E_ACCEL_H__
+
+#ifdef CONFIG_MLX5_ACCEL
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "en.h"
+
+static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
+{
+ __be16 *ethtype;
+
+ if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
+ return false;
+ ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
+ if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
+ return false;
+ return true;
+}
+
+static inline void remove_metadata_hdr(struct sk_buff *skb)
+{
+ struct ethhdr *old_eth;
+ struct ethhdr *new_eth;
+
+ /* Remove the metadata from the buffer */
+ old_eth = (struct ethhdr *)skb->data;
+ new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
+ memmove(new_eth, old_eth, 2 * ETH_ALEN);
+ /* Ethertype is already in its new place */
+ skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
+}
+
+#endif /* CONFIG_MLX5_ACCEL */
+
+#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
index 77ac19f38cbe..da7bd26368f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
@@ -37,17 +37,26 @@
#include "mlx5_core.h"
#include "fpga/tls.h"
-int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid)
+int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx)
{
- return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, p_swid);
+ return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
+ start_offload_tcp_sn, p_swid,
+ direction_sx);
}
-void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid)
+void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ bool direction_sx)
{
- mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL);
+ mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
+}
+
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn)
+{
+ return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
}
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
index 6f9c9f446ecc..def4093ebfae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -60,10 +60,14 @@ struct mlx5_ifc_tls_flow_bits {
u8 reserved_at_2[0x1e];
};
-int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid);
-void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid);
+int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx);
+void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ bool direction_sx);
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn);
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
@@ -72,10 +76,14 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
#else
static inline int
-mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid) { return 0; }
-static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { }
+mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx) { return -ENOTSUPP; }
+static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ bool direction_sx) { }
+static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle,
+ u32 seq, u64 rcd_sn) { return 0; }
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 323ffe8bf7e4..456f30007ad6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
int i;
buf->size = size;
- buf->npages = 1 << get_order(size);
+ buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
buf->page_shift = PAGE_SHIFT;
buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 487388aed98f..3ce14d42ddc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -211,7 +211,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
ent->ret = 0;
return;
}
- usleep_range(5000, 10000);
+ cond_resched();
} while (time_before(jiffies, poll_end));
ent->ret = -ETIMEDOUT;
@@ -278,6 +278,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_PSV:
case MLX5_CMD_OP_DESTROY_SRQ:
case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+ case MLX5_CMD_OP_DESTROY_XRQ:
case MLX5_CMD_OP_DESTROY_DCT:
case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
@@ -310,6 +311,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_FPGA_DESTROY_QP:
+ case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -346,6 +348,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_XRC_SRQ:
case MLX5_CMD_OP_QUERY_XRC_SRQ:
case MLX5_CMD_OP_ARM_XRC_SRQ:
+ case MLX5_CMD_OP_CREATE_XRQ:
+ case MLX5_CMD_OP_QUERY_XRQ:
+ case MLX5_CMD_OP_ARM_XRQ:
case MLX5_CMD_OP_CREATE_DCT:
case MLX5_CMD_OP_DRAIN_DCT:
case MLX5_CMD_OP_QUERY_DCT:
@@ -427,6 +432,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_FPGA_MODIFY_QP:
case MLX5_CMD_OP_FPGA_QUERY_QP:
case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -452,6 +460,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
MLX5_COMMAND_STR_CASE(QUERY_ISSI);
MLX5_COMMAND_STR_CASE(SET_ISSI);
+ MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
MLX5_COMMAND_STR_CASE(CREATE_MKEY);
MLX5_COMMAND_STR_CASE(QUERY_MKEY);
MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
@@ -599,6 +608,15 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
+ MLX5_COMMAND_STR_CASE(CREATE_XRQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
+ MLX5_COMMAND_STR_CASE(QUERY_XRQ);
+ MLX5_COMMAND_STR_CASE(ARM_XRQ);
+ MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
+ MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
+ MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
+ MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
+ MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
default: return "unknown command opcode";
}
}
@@ -677,7 +695,7 @@ struct mlx5_ifc_mbox_out_bits {
struct mlx5_ifc_mbox_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -697,6 +715,7 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
u8 status;
u16 opcode;
u16 op_mod;
+ u16 uid;
mlx5_cmd_mbox_status(out, &status, &syndrome);
if (!status)
@@ -704,8 +723,15 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
opcode = MLX5_GET(mbox_in, in, opcode);
op_mod = MLX5_GET(mbox_in, in, op_mod);
+ uid = MLX5_GET(mbox_in, in, uid);
- mlx5_core_err(dev,
+ if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
+ mlx5_core_err_rl(dev,
+ "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
+ mlx5_command_str(opcode), opcode, op_mod,
+ cmd_status_str(status), status, syndrome);
+ else
+ mlx5_core_dbg(dev,
"%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
mlx5_command_str(opcode),
opcode, op_mod,
@@ -807,6 +833,7 @@ static void cmd_work_handler(struct work_struct *work)
unsigned long flags;
bool poll_cmd = ent->polling;
int alloc_ret;
+ int cmd_mode;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@@ -853,6 +880,7 @@ static void cmd_work_handler(struct work_struct *work)
set_signature(ent, !cmd->checksum_disabled);
dump_command(dev, ent, 1);
ent->ts1 = ktime_get_ns();
+ cmd_mode = cmd->mode;
if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
@@ -877,7 +905,7 @@ static void cmd_work_handler(struct work_struct *work)
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
mmiowb();
/* if not in polling don't use ent after this point */
- if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
+ if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
@@ -1020,7 +1048,10 @@ static ssize_t dbg_write(struct file *filp, const char __user *buf,
if (!dbg->in_msg || !dbg->out_msg)
return -ENOMEM;
- if (copy_from_user(lbuf, buf, sizeof(lbuf)))
+ if (count < sizeof(lbuf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
return -EFAULT;
lbuf[sizeof(lbuf) - 1] = 0;
@@ -1224,21 +1255,12 @@ static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
{
struct mlx5_core_dev *dev = filp->private_data;
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
- int copy;
-
- if (*pos)
- return 0;
if (!dbg->out_msg)
return -ENOMEM;
- copy = min_t(int, count, dbg->outlen);
- if (copy_to_user(buf, dbg->out_msg, copy))
- return -EFAULT;
-
- *pos += copy;
-
- return copy;
+ return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
+ dbg->outlen);
}
static const struct file_operations dfops = {
@@ -1256,19 +1278,11 @@ static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
char outlen[8];
int err;
- if (*pos)
- return 0;
-
err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
if (err < 0)
return err;
- if (copy_to_user(buf, &outlen, err))
- return -EFAULT;
-
- *pos += err;
-
- return err;
+ return simple_read_from_buffer(buf, count, pos, outlen, err);
}
static ssize_t outlen_write(struct file *filp, const char __user *buf,
@@ -1276,7 +1290,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
{
struct mlx5_core_dev *dev = filp->private_data;
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
- char outlen_str[8];
+ char outlen_str[8] = {0};
int outlen;
void *ptr;
int err;
@@ -1291,8 +1305,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
if (copy_from_user(outlen_str, buf, count))
return -EFAULT;
- outlen_str[7] = 0;
-
err = sscanf(outlen_str, "%d", &outlen);
if (err < 0)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 413080a312a7..90fabd612b6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -150,22 +150,13 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
int ret;
char tbuf[22];
- if (*pos)
- return 0;
-
stats = filp->private_data;
spin_lock_irq(&stats->lock);
if (stats->n)
field = div64_u64(stats->sum, stats->n);
spin_unlock_irq(&stats->lock);
ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
- if (ret > 0) {
- if (copy_to_user(buf, tbuf, ret))
- return -EFAULT;
- }
-
- *pos += ret;
- return ret;
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
static ssize_t average_write(struct file *filp, const char __user *buf,
@@ -442,9 +433,6 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
u64 field;
int ret;
- if (*pos)
- return 0;
-
desc = filp->private_data;
d = (void *)(desc - desc->i) - sizeof(*d);
switch (d->type) {
@@ -470,13 +458,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
else
ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
- if (ret > 0) {
- if (copy_to_user(buf, tbuf, ret))
- return -EFAULT;
- }
-
- *pos += ret;
- return ret;
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
static const struct file_operations fops = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index b3820a34e773..0f11fff32a9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -240,6 +240,9 @@ const char *parse_fs_dst(struct trace_seq *p,
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
trace_seq_printf(p, "ft=%p\n", dst->ft);
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ trace_seq_printf(p, "ft_num=%u\n", dst->ft_num);
+ break;
case MLX5_FLOW_DESTINATION_TYPE_TIR:
trace_seq_printf(p, "tir=%u\n", dst->tir_num);
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 09f178a3fcab..0240aee9189e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -138,6 +138,8 @@ TRACE_EVENT(mlx5_fs_del_fg,
{MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\
{MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\
{MLX5_FLOW_CONTEXT_ACTION_VLAN_POP, "VLAN_POP"},\
+ {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2, "VLAN_PUSH_2"},\
+ {MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2, "VLAN_POP_2"},\
{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
TRACE_EVENT(mlx5_fs_set_fte,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
new file mode 100644
index 000000000000..d4ec93bde4de
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -0,0 +1,947 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#define CREATE_TRACE_POINTS
+#include "fw_tracer.h"
+#include "fw_tracer_tracepoint.h"
+
+static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
+{
+ u32 *string_db_base_address_out = tracer->str_db.base_address_out;
+ u32 *string_db_size_out = tracer->str_db.size_out;
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+ void *mtrc_cap_sp;
+ int err, i;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTRC_CAP, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Error reading tracer caps %d\n",
+ err);
+ return err;
+ }
+
+ if (!MLX5_GET(mtrc_cap, out, trace_to_memory)) {
+ mlx5_core_dbg(dev, "FWTracer: Device does not support logging traces to memory\n");
+ return -ENOTSUPP;
+ }
+
+ tracer->trc_ver = MLX5_GET(mtrc_cap, out, trc_ver);
+ tracer->str_db.first_string_trace =
+ MLX5_GET(mtrc_cap, out, first_string_trace);
+ tracer->str_db.num_string_trace =
+ MLX5_GET(mtrc_cap, out, num_string_trace);
+ tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
+ tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
+
+ for (i = 0; i < tracer->str_db.num_string_db; i++) {
+ mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
+ string_db_base_address_out[i] = MLX5_GET(mtrc_string_db_param,
+ mtrc_cap_sp,
+ string_db_base_address);
+ string_db_size_out[i] = MLX5_GET(mtrc_string_db_param,
+ mtrc_cap_sp, string_db_size);
+ }
+
+ return err;
+}
+
+static int mlx5_set_mtrc_caps_trace_owner(struct mlx5_fw_tracer *tracer,
+ u32 *out, u32 out_size,
+ u8 trace_owner)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+
+ MLX5_SET(mtrc_cap, in, trace_owner, trace_owner);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out, out_size,
+ MLX5_REG_MTRC_CAP, 0, 1);
+}
+
+static int mlx5_fw_tracer_ownership_acquire(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+ int err;
+
+ err = mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out),
+ MLX5_FW_TRACER_ACQUIRE_OWNERSHIP);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Acquire tracer ownership failed %d\n",
+ err);
+ return err;
+ }
+
+ tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
+
+ if (!tracer->owner)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer)
+{
+ u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+
+ mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out),
+ MLX5_FW_TRACER_RELEASE_OWNERSHIP);
+ tracer->owner = false;
+}
+
+static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ struct device *ddev = &dev->pdev->dev;
+ dma_addr_t dma;
+ void *buff;
+ gfp_t gfp;
+ int err;
+
+ tracer->buff.size = TRACE_BUFFER_SIZE_BYTE;
+
+ gfp = GFP_KERNEL | __GFP_ZERO;
+ buff = (void *)__get_free_pages(gfp,
+ get_order(tracer->buff.size));
+ if (!buff) {
+ err = -ENOMEM;
+ mlx5_core_warn(dev, "FWTracer: Failed to allocate pages, %d\n", err);
+ return err;
+ }
+ tracer->buff.log_buf = buff;
+
+ dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ddev, dma)) {
+ mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n",
+ dma_mapping_error(ddev, dma));
+ err = -ENOMEM;
+ goto free_pages;
+ }
+ tracer->buff.dma = dma;
+
+ return 0;
+
+free_pages:
+ free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
+
+ return err;
+}
+
+static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ struct device *ddev = &dev->pdev->dev;
+
+ if (!tracer->buff.log_buf)
+ return;
+
+ dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE);
+ free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
+}
+
+static int mlx5_fw_tracer_create_mkey(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ int err, inlen, i;
+ __be64 *mtt;
+ void *mkc;
+ u32 *in;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
+ sizeof(*mtt) * round_up(TRACER_BUFFER_PAGE_NUM, 2);
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2));
+ mtt = (u64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0 ; i < TRACER_BUFFER_PAGE_NUM ; i++)
+ mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE);
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, pd, tracer->buff.pdn);
+ MLX5_SET(mkc, mkc, bsf_octword_size, 0);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2));
+ MLX5_SET64(mkc, mkc, start_addr, tracer->buff.dma);
+ MLX5_SET64(mkc, mkc, len, tracer->buff.size);
+ err = mlx5_core_create_mkey(dev, &tracer->buff.mkey, in, inlen);
+ if (err)
+ mlx5_core_warn(dev, "FWTracer: Failed to create mkey, %d\n", err);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_fw_tracer_free_strings_db(struct mlx5_fw_tracer *tracer)
+{
+ u32 num_string_db = tracer->str_db.num_string_db;
+ int i;
+
+ for (i = 0; i < num_string_db; i++) {
+ kfree(tracer->str_db.buffer[i]);
+ tracer->str_db.buffer[i] = NULL;
+ }
+}
+
+static int mlx5_fw_tracer_allocate_strings_db(struct mlx5_fw_tracer *tracer)
+{
+ u32 *string_db_size_out = tracer->str_db.size_out;
+ u32 num_string_db = tracer->str_db.num_string_db;
+ int i;
+
+ for (i = 0; i < num_string_db; i++) {
+ tracer->str_db.buffer[i] = kzalloc(string_db_size_out[i], GFP_KERNEL);
+ if (!tracer->str_db.buffer[i])
+ goto free_strings_db;
+ }
+
+ return 0;
+
+free_strings_db:
+ mlx5_fw_tracer_free_strings_db(tracer);
+ return -ENOMEM;
+}
+
+static void mlx5_tracer_read_strings_db(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer,
+ read_fw_strings_work);
+ u32 num_of_reads, num_string_db = tracer->str_db.num_string_db;
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+ u32 leftovers, offset;
+ int err = 0, i, j;
+ u32 *out, outlen;
+ void *out_value;
+
+ outlen = MLX5_ST_SZ_BYTES(mtrc_stdb) + STRINGS_DB_READ_SIZE_BYTES;
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_string_db; i++) {
+ offset = 0;
+ MLX5_SET(mtrc_stdb, in, string_db_index, i);
+ num_of_reads = tracer->str_db.size_out[i] /
+ STRINGS_DB_READ_SIZE_BYTES;
+ leftovers = (tracer->str_db.size_out[i] %
+ STRINGS_DB_READ_SIZE_BYTES) /
+ STRINGS_DB_LEFTOVER_SIZE_BYTES;
+
+ MLX5_SET(mtrc_stdb, in, read_size, STRINGS_DB_READ_SIZE_BYTES);
+ for (j = 0; j < num_of_reads; j++) {
+ MLX5_SET(mtrc_stdb, in, start_offset, offset);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ outlen, MLX5_REG_MTRC_STDB,
+ 0, 1);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n",
+ err);
+ goto out_free;
+ }
+
+ out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data);
+ memcpy(tracer->str_db.buffer[i] + offset, out_value,
+ STRINGS_DB_READ_SIZE_BYTES);
+ offset += STRINGS_DB_READ_SIZE_BYTES;
+ }
+
+ /* Strings database is aligned to 64, need to read leftovers*/
+ MLX5_SET(mtrc_stdb, in, read_size,
+ STRINGS_DB_LEFTOVER_SIZE_BYTES);
+ for (j = 0; j < leftovers; j++) {
+ MLX5_SET(mtrc_stdb, in, start_offset, offset);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ outlen, MLX5_REG_MTRC_STDB,
+ 0, 1);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n",
+ err);
+ goto out_free;
+ }
+
+ out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data);
+ memcpy(tracer->str_db.buffer[i] + offset, out_value,
+ STRINGS_DB_LEFTOVER_SIZE_BYTES);
+ offset += STRINGS_DB_LEFTOVER_SIZE_BYTES;
+ }
+ }
+
+ tracer->str_db.loaded = true;
+
+out_free:
+ kfree(out);
+out:
+ return;
+}
+
+static void mlx5_fw_tracer_arm(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
+ int err;
+
+ MLX5_SET(mtrc_ctrl, in, arm_event, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTRC_CTRL, 0, 1);
+ if (err)
+ mlx5_core_warn(dev, "FWTracer: Failed to arm tracer event %d\n", err);
+}
+
+static const char *VAL_PARM = "%llx";
+static const char *REPLACE_64_VAL_PARM = "%x%x";
+static const char *PARAM_CHAR = "%";
+
+static int mlx5_tracer_message_hash(u32 message_id)
+{
+ return jhash_1word(message_id, 0) & (MESSAGE_HASH_SIZE - 1);
+}
+
+static struct tracer_string_format *mlx5_tracer_message_insert(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct hlist_head *head =
+ &tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)];
+ struct tracer_string_format *cur_string;
+
+ cur_string = kzalloc(sizeof(*cur_string), GFP_KERNEL);
+ if (!cur_string)
+ return NULL;
+
+ hlist_add_head(&cur_string->hlist, head);
+
+ return cur_string;
+}
+
+static struct tracer_string_format *mlx5_tracer_get_string(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_string_format *cur_string;
+ u32 str_ptr, offset;
+ int i;
+
+ str_ptr = tracer_event->string_event.string_param;
+
+ for (i = 0; i < tracer->str_db.num_string_db; i++) {
+ if (str_ptr > tracer->str_db.base_address_out[i] &&
+ str_ptr < tracer->str_db.base_address_out[i] +
+ tracer->str_db.size_out[i]) {
+ offset = str_ptr - tracer->str_db.base_address_out[i];
+ /* add it to the hash */
+ cur_string = mlx5_tracer_message_insert(tracer, tracer_event);
+ if (!cur_string)
+ return NULL;
+ cur_string->string = (char *)(tracer->str_db.buffer[i] +
+ offset);
+ return cur_string;
+ }
+ }
+
+ return NULL;
+}
+
+static void mlx5_tracer_clean_message(struct tracer_string_format *str_frmt)
+{
+ hlist_del(&str_frmt->hlist);
+ kfree(str_frmt);
+}
+
+static int mlx5_tracer_get_num_of_params(char *str)
+{
+ char *substr, *pstr = str;
+ int num_of_params = 0;
+
+ /* replace %llx with %x%x */
+ substr = strstr(pstr, VAL_PARM);
+ while (substr) {
+ memcpy(substr, REPLACE_64_VAL_PARM, 4);
+ pstr = substr;
+ substr = strstr(pstr, VAL_PARM);
+ }
+
+ /* count all the % characters */
+ substr = strstr(str, PARAM_CHAR);
+ while (substr) {
+ num_of_params += 1;
+ str = substr + 1;
+ substr = strstr(str, PARAM_CHAR);
+ }
+
+ return num_of_params;
+}
+
+static struct tracer_string_format *mlx5_tracer_message_find(struct hlist_head *head,
+ u8 event_id, u32 tmsn)
+{
+ struct tracer_string_format *message;
+
+ hlist_for_each_entry(message, head, hlist)
+ if (message->event_id == event_id && message->tmsn == tmsn)
+ return message;
+
+ return NULL;
+}
+
+static struct tracer_string_format *mlx5_tracer_message_get(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct hlist_head *head =
+ &tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)];
+
+ return mlx5_tracer_message_find(head, tracer_event->event_id, tracer_event->string_event.tmsn);
+}
+
+static void poll_trace(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event, u64 *trace)
+{
+ u32 timestamp_low, timestamp_mid, timestamp_high, urts;
+
+ tracer_event->event_id = MLX5_GET(tracer_event, trace, event_id);
+ tracer_event->lost_event = MLX5_GET(tracer_event, trace, lost);
+
+ switch (tracer_event->event_id) {
+ case TRACER_EVENT_TYPE_TIMESTAMP:
+ tracer_event->type = TRACER_EVENT_TYPE_TIMESTAMP;
+ urts = MLX5_GET(tracer_timestamp_event, trace, urts);
+ if (tracer->trc_ver == 0)
+ tracer_event->timestamp_event.unreliable = !!(urts >> 2);
+ else
+ tracer_event->timestamp_event.unreliable = !!(urts & 1);
+
+ timestamp_low = MLX5_GET(tracer_timestamp_event,
+ trace, timestamp7_0);
+ timestamp_mid = MLX5_GET(tracer_timestamp_event,
+ trace, timestamp39_8);
+ timestamp_high = MLX5_GET(tracer_timestamp_event,
+ trace, timestamp52_40);
+
+ tracer_event->timestamp_event.timestamp =
+ ((u64)timestamp_high << 40) |
+ ((u64)timestamp_mid << 8) |
+ (u64)timestamp_low;
+ break;
+ default:
+ if (tracer_event->event_id >= tracer->str_db.first_string_trace ||
+ tracer_event->event_id <= tracer->str_db.first_string_trace +
+ tracer->str_db.num_string_trace) {
+ tracer_event->type = TRACER_EVENT_TYPE_STRING;
+ tracer_event->string_event.timestamp =
+ MLX5_GET(tracer_string_event, trace, timestamp);
+ tracer_event->string_event.string_param =
+ MLX5_GET(tracer_string_event, trace, string_param);
+ tracer_event->string_event.tmsn =
+ MLX5_GET(tracer_string_event, trace, tmsn);
+ tracer_event->string_event.tdsn =
+ MLX5_GET(tracer_string_event, trace, tdsn);
+ } else {
+ tracer_event->type = TRACER_EVENT_TYPE_UNRECOGNIZED;
+ }
+ break;
+ }
+}
+
+static u64 get_block_timestamp(struct mlx5_fw_tracer *tracer, u64 *ts_event)
+{
+ struct tracer_event tracer_event;
+ u8 event_id;
+
+ event_id = MLX5_GET(tracer_event, ts_event, event_id);
+
+ if (event_id == TRACER_EVENT_TYPE_TIMESTAMP)
+ poll_trace(tracer, &tracer_event, ts_event);
+ else
+ tracer_event.timestamp_event.timestamp = 0;
+
+ return tracer_event.timestamp_event.timestamp;
+}
+
+static void mlx5_fw_tracer_clean_print_hash(struct mlx5_fw_tracer *tracer)
+{
+ struct tracer_string_format *str_frmt;
+ struct hlist_node *n;
+ int i;
+
+ for (i = 0; i < MESSAGE_HASH_SIZE; i++) {
+ hlist_for_each_entry_safe(str_frmt, n, &tracer->hash[i], hlist)
+ mlx5_tracer_clean_message(str_frmt);
+ }
+}
+
+static void mlx5_fw_tracer_clean_ready_list(struct mlx5_fw_tracer *tracer)
+{
+ struct tracer_string_format *str_frmt, *tmp_str;
+
+ list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list,
+ list)
+ list_del(&str_frmt->list);
+}
+
+static void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt,
+ struct mlx5_core_dev *dev,
+ u64 trace_timestamp)
+{
+ char tmp[512];
+
+ snprintf(tmp, sizeof(tmp), str_frmt->string,
+ str_frmt->params[0],
+ str_frmt->params[1],
+ str_frmt->params[2],
+ str_frmt->params[3],
+ str_frmt->params[4],
+ str_frmt->params[5],
+ str_frmt->params[6]);
+
+ trace_mlx5_fw(dev->tracer, trace_timestamp, str_frmt->lost,
+ str_frmt->event_id, tmp);
+
+ /* remove it from hash */
+ mlx5_tracer_clean_message(str_frmt);
+}
+
+static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_string_format *cur_string;
+
+ if (tracer_event->string_event.tdsn == 0) {
+ cur_string = mlx5_tracer_get_string(tracer, tracer_event);
+ if (!cur_string)
+ return -1;
+
+ cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string);
+ cur_string->last_param_num = 0;
+ cur_string->event_id = tracer_event->event_id;
+ cur_string->tmsn = tracer_event->string_event.tmsn;
+ cur_string->timestamp = tracer_event->string_event.timestamp;
+ cur_string->lost = tracer_event->lost_event;
+ if (cur_string->num_of_params == 0) /* trace with no params */
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ } else {
+ cur_string = mlx5_tracer_message_get(tracer, tracer_event);
+ if (!cur_string) {
+ pr_debug("%s Got string event for unknown string tdsm: %d\n",
+ __func__, tracer_event->string_event.tmsn);
+ return -1;
+ }
+ cur_string->last_param_num += 1;
+ if (cur_string->last_param_num > TRACER_MAX_PARAMS) {
+ pr_debug("%s Number of params exceeds the max (%d)\n",
+ __func__, TRACER_MAX_PARAMS);
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ return 0;
+ }
+ /* keep the new parameter */
+ cur_string->params[cur_string->last_param_num - 1] =
+ tracer_event->string_event.string_param;
+ if (cur_string->last_param_num == cur_string->num_of_params)
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ }
+
+ return 0;
+}
+
+static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_timestamp_event timestamp_event =
+ tracer_event->timestamp_event;
+ struct tracer_string_format *str_frmt, *tmp_str;
+ struct mlx5_core_dev *dev = tracer->dev;
+ u64 trace_timestamp;
+
+ list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list, list) {
+ list_del(&str_frmt->list);
+ if (str_frmt->timestamp < (timestamp_event.timestamp & MASK_6_0))
+ trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |
+ (str_frmt->timestamp & MASK_6_0);
+ else
+ trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) |
+ (str_frmt->timestamp & MASK_6_0);
+
+ mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp);
+ }
+}
+
+static int mlx5_tracer_handle_trace(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ if (tracer_event->type == TRACER_EVENT_TYPE_STRING) {
+ mlx5_tracer_handle_string_trace(tracer, tracer_event);
+ } else if (tracer_event->type == TRACER_EVENT_TYPE_TIMESTAMP) {
+ if (!tracer_event->timestamp_event.unreliable)
+ mlx5_tracer_handle_timestamp_trace(tracer, tracer_event);
+ } else {
+ pr_debug("%s Got unrecognised type %d for parsing, exiting..\n",
+ __func__, tracer_event->type);
+ }
+ return 0;
+}
+
+static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer =
+ container_of(work, struct mlx5_fw_tracer, handle_traces_work);
+ u64 block_timestamp, last_block_timestamp, tmp_trace_block[TRACES_PER_BLOCK];
+ u32 block_count, start_offset, prev_start_offset, prev_consumer_index;
+ u32 trace_event_size = MLX5_ST_SZ_BYTES(tracer_event);
+ struct mlx5_core_dev *dev = tracer->dev;
+ struct tracer_event tracer_event;
+ int i;
+
+ mlx5_core_dbg(dev, "FWTracer: Handle Trace event, owner=(%d)\n", tracer->owner);
+ if (!tracer->owner)
+ return;
+
+ block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
+ start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
+
+ /* Copy the block to local buffer to avoid HW override while being processed*/
+ memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset,
+ TRACER_BLOCK_SIZE_BYTE);
+
+ block_timestamp =
+ get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]);
+
+ while (block_timestamp > tracer->last_timestamp) {
+ /* Check block override if its not the first block */
+ if (!tracer->last_timestamp) {
+ u64 *ts_event;
+ /* To avoid block override be the HW in case of buffer
+ * wraparound, the time stamp of the previous block
+ * should be compared to the last timestamp handled
+ * by the driver.
+ */
+ prev_consumer_index =
+ (tracer->buff.consumer_index - 1) & (block_count - 1);
+ prev_start_offset = prev_consumer_index * TRACER_BLOCK_SIZE_BYTE;
+
+ ts_event = tracer->buff.log_buf + prev_start_offset +
+ (TRACES_PER_BLOCK - 1) * trace_event_size;
+ last_block_timestamp = get_block_timestamp(tracer, ts_event);
+ /* If previous timestamp different from last stored
+ * timestamp then there is a good chance that the
+ * current buffer is overwritten and therefore should
+ * not be parsed.
+ */
+ if (tracer->last_timestamp != last_block_timestamp) {
+ mlx5_core_warn(dev, "FWTracer: Events were lost\n");
+ tracer->last_timestamp = block_timestamp;
+ tracer->buff.consumer_index =
+ (tracer->buff.consumer_index + 1) & (block_count - 1);
+ break;
+ }
+ }
+
+ /* Parse events */
+ for (i = 0; i < TRACES_PER_BLOCK ; i++) {
+ poll_trace(tracer, &tracer_event, &tmp_trace_block[i]);
+ mlx5_tracer_handle_trace(tracer, &tracer_event);
+ }
+
+ tracer->buff.consumer_index =
+ (tracer->buff.consumer_index + 1) & (block_count - 1);
+
+ tracer->last_timestamp = block_timestamp;
+ start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
+ memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset,
+ TRACER_BLOCK_SIZE_BYTE);
+ block_timestamp = get_block_timestamp(tracer,
+ &tmp_trace_block[TRACES_PER_BLOCK - 1]);
+ }
+
+ mlx5_fw_tracer_arm(dev);
+}
+
+static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 out[MLX5_ST_SZ_DW(mtrc_conf)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtrc_conf)] = {0};
+ int err;
+
+ MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY);
+ MLX5_SET(mtrc_conf, in, log_trace_buffer_size,
+ ilog2(TRACER_BUFFER_PAGE_NUM));
+ MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTRC_CONF, 0, 1);
+ if (err)
+ mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
+
+ return err;
+}
+
+static int mlx5_fw_tracer_set_mtrc_ctrl(struct mlx5_fw_tracer *tracer, u8 status, u8 arm)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
+ int err;
+
+ MLX5_SET(mtrc_ctrl, in, modify_field_select, TRACE_STATUS);
+ MLX5_SET(mtrc_ctrl, in, trace_status, status);
+ MLX5_SET(mtrc_ctrl, in, arm_event, arm);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTRC_CTRL, 0, 1);
+
+ if (!err && status)
+ tracer->last_timestamp = 0;
+
+ return err;
+}
+
+static int mlx5_fw_tracer_start(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ int err;
+
+ err = mlx5_fw_tracer_ownership_acquire(tracer);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Ownership was not granted %d\n", err);
+ /* Don't fail since ownership can be acquired on a later FW event */
+ return 0;
+ }
+
+ err = mlx5_fw_tracer_set_mtrc_conf(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to set tracer configuration %d\n", err);
+ goto release_ownership;
+ }
+
+ /* enable tracer & trace events */
+ err = mlx5_fw_tracer_set_mtrc_ctrl(tracer, 1, 1);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to enable tracer %d\n", err);
+ goto release_ownership;
+ }
+
+ mlx5_core_dbg(dev, "FWTracer: Ownership granted and active\n");
+ return 0;
+
+release_ownership:
+ mlx5_fw_tracer_ownership_release(tracer);
+ return err;
+}
+
+static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer =
+ container_of(work, struct mlx5_fw_tracer, ownership_change_work);
+
+ mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
+ if (tracer->owner) {
+ tracer->owner = false;
+ tracer->buff.consumer_index = 0;
+ return;
+ }
+
+ mlx5_fw_tracer_start(tracer);
+}
+
+/* Create software resources (Buffers, etc ..) */
+struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_tracer *tracer = NULL;
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG(dev, tracer_registers)) {
+ mlx5_core_dbg(dev, "FWTracer: Tracer capability not present\n");
+ return NULL;
+ }
+
+ tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
+ if (!tracer)
+ return ERR_PTR(-ENOMEM);
+
+ tracer->work_queue = create_singlethread_workqueue("mlx5_fw_tracer");
+ if (!tracer->work_queue) {
+ err = -ENOMEM;
+ goto free_tracer;
+ }
+
+ tracer->dev = dev;
+
+ INIT_LIST_HEAD(&tracer->ready_strings_list);
+ INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change);
+ INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db);
+ INIT_WORK(&tracer->handle_traces_work, mlx5_fw_tracer_handle_traces);
+
+
+ err = mlx5_query_mtrc_caps(tracer);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err);
+ goto destroy_workqueue;
+ }
+
+ err = mlx5_fw_tracer_create_log_buf(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Create log buffer failed %d\n", err);
+ goto destroy_workqueue;
+ }
+
+ err = mlx5_fw_tracer_allocate_strings_db(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Allocate strings database failed %d\n", err);
+ goto free_log_buf;
+ }
+
+ mlx5_core_dbg(dev, "FWTracer: Tracer created\n");
+
+ return tracer;
+
+free_log_buf:
+ mlx5_fw_tracer_destroy_log_buf(tracer);
+destroy_workqueue:
+ tracer->dev = NULL;
+ destroy_workqueue(tracer->work_queue);
+free_tracer:
+ kfree(tracer);
+ return ERR_PTR(err);
+}
+
+/* Create HW resources + start tracer
+ * must be called before Async EQ is created
+ */
+int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev;
+ int err;
+
+ if (IS_ERR_OR_NULL(tracer))
+ return 0;
+
+ dev = tracer->dev;
+
+ if (!tracer->str_db.loaded)
+ queue_work(tracer->work_queue, &tracer->read_fw_strings_work);
+
+ err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
+ return err;
+ }
+
+ err = mlx5_fw_tracer_create_mkey(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to create mkey %d\n", err);
+ goto err_dealloc_pd;
+ }
+
+ mlx5_fw_tracer_start(tracer);
+
+ return 0;
+
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+ return err;
+}
+
+/* Stop tracer + Cleanup HW resources
+ * must be called after Async EQ is destroyed
+ */
+void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
+{
+ if (IS_ERR_OR_NULL(tracer))
+ return;
+
+ mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n",
+ tracer->owner);
+
+ cancel_work_sync(&tracer->ownership_change_work);
+ cancel_work_sync(&tracer->handle_traces_work);
+
+ if (tracer->owner)
+ mlx5_fw_tracer_ownership_release(tracer);
+
+ mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey);
+ mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn);
+}
+
+/* Free software resources (Buffers, etc ..) */
+void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
+{
+ if (IS_ERR_OR_NULL(tracer))
+ return;
+
+ mlx5_core_dbg(tracer->dev, "FWTracer: Destroy\n");
+
+ cancel_work_sync(&tracer->read_fw_strings_work);
+ mlx5_fw_tracer_clean_ready_list(tracer);
+ mlx5_fw_tracer_clean_print_hash(tracer);
+ mlx5_fw_tracer_free_strings_db(tracer);
+ mlx5_fw_tracer_destroy_log_buf(tracer);
+ flush_workqueue(tracer->work_queue);
+ destroy_workqueue(tracer->work_queue);
+ kfree(tracer);
+}
+
+void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
+{
+ struct mlx5_fw_tracer *tracer = dev->tracer;
+
+ if (!tracer)
+ return;
+
+ switch (eqe->sub_type) {
+ case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE:
+ if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state))
+ queue_work(tracer->work_queue, &tracer->ownership_change_work);
+ break;
+ case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
+ if (likely(tracer->str_db.loaded))
+ queue_work(tracer->work_queue, &tracer->handle_traces_work);
+ break;
+ default:
+ mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
+ eqe->sub_type);
+ }
+}
+
+EXPORT_TRACEPOINT_SYMBOL(mlx5_fw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
new file mode 100644
index 000000000000..0347f2dd5cee
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __LIB_TRACER_H__
+#define __LIB_TRACER_H__
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+
+#define STRINGS_DB_SECTIONS_NUM 8
+#define STRINGS_DB_READ_SIZE_BYTES 256
+#define STRINGS_DB_LEFTOVER_SIZE_BYTES 64
+#define TRACER_BUFFER_PAGE_NUM 64
+#define TRACER_BUFFER_CHUNK 4096
+#define TRACE_BUFFER_SIZE_BYTE (TRACER_BUFFER_PAGE_NUM * TRACER_BUFFER_CHUNK)
+
+#define TRACER_BLOCK_SIZE_BYTE 256
+#define TRACES_PER_BLOCK 32
+
+#define TRACER_MAX_PARAMS 7
+#define MESSAGE_HASH_BITS 6
+#define MESSAGE_HASH_SIZE BIT(MESSAGE_HASH_BITS)
+
+#define MASK_52_7 (0x1FFFFFFFFFFF80)
+#define MASK_6_0 (0x7F)
+
+struct mlx5_fw_tracer {
+ struct mlx5_core_dev *dev;
+ bool owner;
+ u8 trc_ver;
+ struct workqueue_struct *work_queue;
+ struct work_struct ownership_change_work;
+ struct work_struct read_fw_strings_work;
+
+ /* Strings DB */
+ struct {
+ u8 first_string_trace;
+ u8 num_string_trace;
+ u32 num_string_db;
+ u32 base_address_out[STRINGS_DB_SECTIONS_NUM];
+ u32 size_out[STRINGS_DB_SECTIONS_NUM];
+ void *buffer[STRINGS_DB_SECTIONS_NUM];
+ bool loaded;
+ } str_db;
+
+ /* Log Buffer */
+ struct {
+ u32 pdn;
+ void *log_buf;
+ dma_addr_t dma;
+ u32 size;
+ struct mlx5_core_mkey mkey;
+ u32 consumer_index;
+ } buff;
+
+ u64 last_timestamp;
+ struct work_struct handle_traces_work;
+ struct hlist_head hash[MESSAGE_HASH_SIZE];
+ struct list_head ready_strings_list;
+};
+
+struct tracer_string_format {
+ char *string;
+ int params[TRACER_MAX_PARAMS];
+ int num_of_params;
+ int last_param_num;
+ u8 event_id;
+ u32 tmsn;
+ struct hlist_node hlist;
+ struct list_head list;
+ u32 timestamp;
+ bool lost;
+};
+
+enum mlx5_fw_tracer_ownership_state {
+ MLX5_FW_TRACER_RELEASE_OWNERSHIP,
+ MLX5_FW_TRACER_ACQUIRE_OWNERSHIP,
+};
+
+enum tracer_ctrl_fields_select {
+ TRACE_STATUS = 1 << 0,
+};
+
+enum tracer_event_type {
+ TRACER_EVENT_TYPE_STRING,
+ TRACER_EVENT_TYPE_TIMESTAMP = 0xFF,
+ TRACER_EVENT_TYPE_UNRECOGNIZED,
+};
+
+enum tracing_mode {
+ TRACE_TO_MEMORY = 1 << 0,
+};
+
+struct tracer_timestamp_event {
+ u64 timestamp;
+ u8 unreliable;
+};
+
+struct tracer_string_event {
+ u32 timestamp;
+ u32 tmsn;
+ u32 tdsn;
+ u32 string_param;
+};
+
+struct tracer_event {
+ bool lost_event;
+ u32 type;
+ u8 event_id;
+ union {
+ struct tracer_string_event string_event;
+ struct tracer_timestamp_event timestamp_event;
+ };
+};
+
+struct mlx5_ifc_tracer_event_bits {
+ u8 lost[0x1];
+ u8 timestamp[0x7];
+ u8 event_id[0x8];
+ u8 event_data[0x30];
+};
+
+struct mlx5_ifc_tracer_string_event_bits {
+ u8 lost[0x1];
+ u8 timestamp[0x7];
+ u8 event_id[0x8];
+ u8 tmsn[0xd];
+ u8 tdsn[0x3];
+ u8 string_param[0x20];
+};
+
+struct mlx5_ifc_tracer_timestamp_event_bits {
+ u8 timestamp7_0[0x8];
+ u8 event_id[0x8];
+ u8 urts[0x3];
+ u8 timestamp52_40[0xd];
+ u8 timestamp39_8[0x20];
+};
+
+struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev);
+int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer);
+void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer);
+void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer);
+void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
new file mode 100644
index 000000000000..83f90e9aff45
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if !defined(__LIB_TRACER_TRACEPOINT_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __LIB_TRACER_TRACEPOINT_H__
+
+#include <linux/tracepoint.h>
+#include "fw_tracer.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+/* Tracepoint for FWTracer messages: */
+TRACE_EVENT(mlx5_fw,
+ TP_PROTO(const struct mlx5_fw_tracer *tracer, u64 trace_timestamp,
+ bool lost, u8 event_id, const char *msg),
+
+ TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(&tracer->dev->pdev->dev))
+ __field(u64, trace_timestamp)
+ __field(bool, lost)
+ __field(u8, event_id)
+ __string(msg, msg)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name(&tracer->dev->pdev->dev));
+ __entry->trace_timestamp = trace_timestamp;
+ __entry->lost = lost;
+ __entry->event_id = event_id;
+ __assign_str(msg, msg);
+ ),
+
+ TP_printk("%s [0x%llx] %d [0x%x] %s",
+ __get_str(dev_name),
+ __entry->trace_timestamp,
+ __entry->lost, __entry->event_id,
+ __get_str(msg))
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ./diag
+#define TRACE_INCLUDE_FILE fw_tracer_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index eb9eb7aa953a..db2cfcd21d43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -52,6 +52,7 @@
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
+#include "en/fs.h"
struct page_pool;
@@ -137,7 +138,6 @@ struct page_pool;
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
-#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
#define MLX5E_UMR_WQE_INLINE_SZ \
@@ -148,10 +148,6 @@ struct page_pool;
(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
-#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_DS_COUNT \
- ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
-
#define MLX5E_NUM_MAIN_GROUPS 9
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -349,6 +345,7 @@ enum {
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
+ MLX5E_SQ_STATE_REDIRECT,
};
struct mlx5e_sq_wqe_info {
@@ -369,16 +366,14 @@ struct mlx5e_txqsq {
struct mlx5e_cq cq;
- /* write@xmit, read@completion */
- struct {
- struct mlx5e_sq_dma *dma_fifo;
- struct mlx5e_tx_wqe_info *wqe_info;
- } db;
-
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
struct mlx5e_sq_stats *stats;
+ struct {
+ struct mlx5e_sq_dma *dma_fifo;
+ struct mlx5e_tx_wqe_info *wqe_info;
+ } db;
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
@@ -400,30 +395,43 @@ struct mlx5e_txqsq {
} recover;
} ____cacheline_aligned_in_smp;
+struct mlx5e_dma_info {
+ struct page *page;
+ dma_addr_t addr;
+};
+
+struct mlx5e_xdp_info {
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+ struct mlx5e_dma_info di;
+};
+
struct mlx5e_xdpsq {
/* data path */
- /* dirtied @rx completion */
+ /* dirtied @completion */
u16 cc;
- u16 pc;
+ bool redirect_flush;
- struct mlx5e_cq cq;
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ bool doorbell;
- /* write@xmit, read@completion */
- struct {
- struct mlx5e_dma_info *di;
- bool doorbell;
- bool redirect_flush;
- } db;
+ struct mlx5e_cq cq;
/* read only */
struct mlx5_wq_cyc wq;
+ struct mlx5e_xdpsq_stats *stats;
+ struct {
+ struct mlx5e_xdp_info *xdpi;
+ } db;
void __iomem *uar_map;
u32 sqn;
struct device *pdev;
__be32 mkey_be;
u8 min_inline_mode;
unsigned long state;
+ unsigned int hw_mtu;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
@@ -460,11 +468,6 @@ mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
}
-struct mlx5e_dma_info {
- struct page *page;
- dma_addr_t addr;
-};
-
struct mlx5e_wqe_frag_info {
struct mlx5e_dma_info *di;
u32 offset;
@@ -567,7 +570,6 @@ struct mlx5e_rq {
/* XDP */
struct bpf_prog *xdp_prog;
- unsigned int hw_mtu;
struct mlx5e_xdpsq xdpsq;
DECLARE_BITMAP(flags, 8);
struct page_pool *page_pool;
@@ -596,6 +598,9 @@ struct mlx5e_channel {
__be32 mkey_be;
u8 num_tc;
+ /* XDP_REDIRECT */
+ struct mlx5e_xdpsq xdpsq;
+
/* data path - accessed per napi poll */
struct irq_desc *irq_desc;
struct mlx5e_ch_stats *stats;
@@ -618,159 +623,16 @@ struct mlx5e_channel_stats {
struct mlx5e_ch_stats ch;
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
+ struct mlx5e_xdpsq_stats rq_xdpsq;
+ struct mlx5e_xdpsq_stats xdpsq;
} ____cacheline_aligned_in_smp;
-enum mlx5e_traffic_types {
- MLX5E_TT_IPV4_TCP,
- MLX5E_TT_IPV6_TCP,
- MLX5E_TT_IPV4_UDP,
- MLX5E_TT_IPV6_UDP,
- MLX5E_TT_IPV4_IPSEC_AH,
- MLX5E_TT_IPV6_IPSEC_AH,
- MLX5E_TT_IPV4_IPSEC_ESP,
- MLX5E_TT_IPV6_IPSEC_ESP,
- MLX5E_TT_IPV4,
- MLX5E_TT_IPV6,
- MLX5E_TT_ANY,
- MLX5E_NUM_TT,
- MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
-};
-
-enum mlx5e_tunnel_types {
- MLX5E_TT_IPV4_GRE,
- MLX5E_TT_IPV6_GRE,
- MLX5E_NUM_TUNNEL_TT,
-};
-
enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
};
-struct mlx5e_vxlan_db {
- spinlock_t lock; /* protect vxlan table */
- struct radix_tree_root tree;
-};
-
-struct mlx5e_l2_rule {
- u8 addr[ETH_ALEN + 2];
- struct mlx5_flow_handle *rule;
-};
-
-struct mlx5e_flow_table {
- int num_groups;
- struct mlx5_flow_table *t;
- struct mlx5_flow_group **g;
-};
-
-#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
-
-struct mlx5e_tc_table {
- struct mlx5_flow_table *t;
-
- struct rhashtable ht;
-
- DECLARE_HASHTABLE(mod_hdr_tbl, 8);
- DECLARE_HASHTABLE(hairpin_tbl, 8);
-};
-
-struct mlx5e_vlan_table {
- struct mlx5e_flow_table ft;
- DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
- DECLARE_BITMAP(active_svlans, VLAN_N_VID);
- struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
- struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
- struct mlx5_flow_handle *untagged_rule;
- struct mlx5_flow_handle *any_cvlan_rule;
- struct mlx5_flow_handle *any_svlan_rule;
- bool cvlan_filter_disabled;
-};
-
-struct mlx5e_l2_table {
- struct mlx5e_flow_table ft;
- struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
- struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
- struct mlx5e_l2_rule broadcast;
- struct mlx5e_l2_rule allmulti;
- struct mlx5e_l2_rule promisc;
- bool broadcast_enabled;
- bool allmulti_enabled;
- bool promisc_enabled;
-};
-
-/* L3/L4 traffic type classifier */
-struct mlx5e_ttc_table {
- struct mlx5e_flow_table ft;
- struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
- struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
-};
-
-#define ARFS_HASH_SHIFT BITS_PER_BYTE
-#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
-struct arfs_table {
- struct mlx5e_flow_table ft;
- struct mlx5_flow_handle *default_rule;
- struct hlist_head rules_hash[ARFS_HASH_SIZE];
-};
-
-enum arfs_type {
- ARFS_IPV4_TCP,
- ARFS_IPV6_TCP,
- ARFS_IPV4_UDP,
- ARFS_IPV6_UDP,
- ARFS_NUM_TYPES,
-};
-
-struct mlx5e_arfs_tables {
- struct arfs_table arfs_tables[ARFS_NUM_TYPES];
- /* Protect aRFS rules list */
- spinlock_t arfs_lock;
- struct list_head rules;
- int last_filter_id;
- struct workqueue_struct *wq;
-};
-
-/* NIC prio FTS */
-enum {
- MLX5E_VLAN_FT_LEVEL = 0,
- MLX5E_L2_FT_LEVEL,
- MLX5E_TTC_FT_LEVEL,
- MLX5E_INNER_TTC_FT_LEVEL,
- MLX5E_ARFS_FT_LEVEL
-};
-
-enum {
- MLX5E_TC_FT_LEVEL = 0,
- MLX5E_TC_TTC_FT_LEVEL,
-};
-
-struct mlx5e_ethtool_table {
- struct mlx5_flow_table *ft;
- int num_rules;
-};
-
-#define ETHTOOL_NUM_L3_L4_FTS 7
-#define ETHTOOL_NUM_L2_FTS 4
-
-struct mlx5e_ethtool_steering {
- struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
- struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
- struct list_head rules;
- int tot_num_rules;
-};
-
-struct mlx5e_flow_steering {
- struct mlx5_flow_namespace *ns;
- struct mlx5e_ethtool_steering ethtool;
- struct mlx5e_tc_table tc;
- struct mlx5e_vlan_table vlan;
- struct mlx5e_l2_table l2;
- struct mlx5e_ttc_table ttc;
- struct mlx5e_ttc_table inner_ttc;
- struct mlx5e_arfs_tables arfs;
-};
-
struct mlx5e_rqt {
u32 rqtn;
bool enabled;
@@ -810,7 +672,6 @@ struct mlx5e_priv {
u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs;
- struct mlx5e_vxlan_db vxlan;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
@@ -858,15 +719,14 @@ struct mlx5e_profile {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
} rx_handlers;
- void (*netdev_registered_init)(struct mlx5e_priv *priv);
- void (*netdev_registered_remove)(struct mlx5e_priv *priv);
int max_tc;
};
void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi);
@@ -876,14 +736,13 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -892,7 +751,6 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
-void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
@@ -908,23 +766,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
void mlx5e_update_stats(struct mlx5e_priv *priv);
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf);
-int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
- int location);
-int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *info, u32 *rule_locs);
-int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
- struct ethtool_rx_flow_spec *fs);
-int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
- int location);
-void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
-void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
@@ -935,8 +780,6 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
-void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_timestamp_init(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {
@@ -1053,32 +896,6 @@ void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#endif
-#ifndef CONFIG_RFS_ACCEL
-static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
-{
- return 0;
-}
-
-static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
-
-static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
-{
- return -EOPNOTSUPP;
-}
-#else
-int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
-int mlx5e_arfs_enable(struct mlx5e_priv *priv);
-int mlx5e_arfs_disable(struct mlx5e_priv *priv);
-int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id);
-#endif
-
int mlx5e_create_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir, u32 *in, int inlen);
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
@@ -1099,27 +916,6 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
-struct ttc_params {
- struct mlx5_flow_table_attr ft_attr;
- u32 any_tt_tirn;
- u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
- struct mlx5e_ttc_table *inner_ttc;
-};
-
-void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
-void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
-void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
-
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc);
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc);
-
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc);
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc);
-
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
new file mode 100644
index 000000000000..bbf69e859b78
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef __MLX5E_FLOW_STEER_H__
+#define __MLX5E_FLOW_STEER_H__
+
+enum {
+ MLX5E_TC_FT_LEVEL = 0,
+ MLX5E_TC_TTC_FT_LEVEL,
+};
+
+struct mlx5e_tc_table {
+ struct mlx5_flow_table *t;
+
+ struct rhashtable ht;
+
+ DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
+};
+
+struct mlx5e_flow_table {
+ int num_groups;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_group **g;
+};
+
+struct mlx5e_l2_rule {
+ u8 addr[ETH_ALEN + 2];
+ struct mlx5_flow_handle *rule;
+};
+
+#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct mlx5e_vlan_table {
+ struct mlx5e_flow_table ft;
+ DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
+ DECLARE_BITMAP(active_svlans, VLAN_N_VID);
+ struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *untagged_rule;
+ struct mlx5_flow_handle *any_cvlan_rule;
+ struct mlx5_flow_handle *any_svlan_rule;
+ bool cvlan_filter_disabled;
+};
+
+struct mlx5e_l2_table {
+ struct mlx5e_flow_table ft;
+ struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
+ struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
+ struct mlx5e_l2_rule broadcast;
+ struct mlx5e_l2_rule allmulti;
+ struct mlx5e_l2_rule promisc;
+ bool broadcast_enabled;
+ bool allmulti_enabled;
+ bool promisc_enabled;
+};
+
+enum mlx5e_traffic_types {
+ MLX5E_TT_IPV4_TCP,
+ MLX5E_TT_IPV6_TCP,
+ MLX5E_TT_IPV4_UDP,
+ MLX5E_TT_IPV6_UDP,
+ MLX5E_TT_IPV4_IPSEC_AH,
+ MLX5E_TT_IPV6_IPSEC_AH,
+ MLX5E_TT_IPV4_IPSEC_ESP,
+ MLX5E_TT_IPV6_IPSEC_ESP,
+ MLX5E_TT_IPV4,
+ MLX5E_TT_IPV6,
+ MLX5E_TT_ANY,
+ MLX5E_NUM_TT,
+ MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
+};
+
+enum mlx5e_tunnel_types {
+ MLX5E_TT_IPV4_GRE,
+ MLX5E_TT_IPV6_GRE,
+ MLX5E_NUM_TUNNEL_TT,
+};
+
+/* L3/L4 traffic type classifier */
+struct mlx5e_ttc_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
+ struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
+};
+
+/* NIC prio FTS */
+enum {
+ MLX5E_VLAN_FT_LEVEL = 0,
+ MLX5E_L2_FT_LEVEL,
+ MLX5E_TTC_FT_LEVEL,
+ MLX5E_INNER_TTC_FT_LEVEL,
+#ifdef CONFIG_MLX5_EN_ARFS
+ MLX5E_ARFS_FT_LEVEL
+#endif
+};
+
+#ifdef CONFIG_MLX5_EN_RXNFC
+
+struct mlx5e_ethtool_table {
+ struct mlx5_flow_table *ft;
+ int num_rules;
+};
+
+#define ETHTOOL_NUM_L3_L4_FTS 7
+#define ETHTOOL_NUM_L2_FTS 4
+
+struct mlx5e_ethtool_steering {
+ struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
+ struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
+ struct list_head rules;
+ int tot_num_rules;
+};
+
+void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
+int mlx5e_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
+#else
+static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
+static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
+#endif /* CONFIG_MLX5_EN_RXNFC */
+
+#ifdef CONFIG_MLX5_EN_ARFS
+#define ARFS_HASH_SHIFT BITS_PER_BYTE
+#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct arfs_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_handle *default_rule;
+ struct hlist_head rules_hash[ARFS_HASH_SIZE];
+};
+
+enum arfs_type {
+ ARFS_IPV4_TCP,
+ ARFS_IPV6_TCP,
+ ARFS_IPV4_UDP,
+ ARFS_IPV6_UDP,
+ ARFS_NUM_TYPES,
+};
+
+struct mlx5e_arfs_tables {
+ struct arfs_table arfs_tables[ARFS_NUM_TYPES];
+ /* Protect aRFS rules list */
+ spinlock_t arfs_lock;
+ struct list_head rules;
+ int last_filter_id;
+ struct workqueue_struct *wq;
+};
+
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
+int mlx5e_arfs_enable(struct mlx5e_priv *priv);
+int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+#else
+static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
+static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
+static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
+#endif
+
+struct mlx5e_flow_steering {
+ struct mlx5_flow_namespace *ns;
+#ifdef CONFIG_MLX5_EN_RXNFC
+ struct mlx5e_ethtool_steering ethtool;
+#endif
+ struct mlx5e_tc_table tc;
+ struct mlx5e_vlan_table vlan;
+ struct mlx5e_l2_table l2;
+ struct mlx5e_ttc_table ttc;
+ struct mlx5e_ttc_table inner_ttc;
+#ifdef CONFIG_MLX5_EN_ARFS
+ struct mlx5e_arfs_tables arfs;
+#endif
+};
+
+struct ttc_params {
+ struct mlx5_flow_table_attr ft_attr;
+ u32 any_tt_tirn;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_ttc_table *inner_ttc;
+};
+
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
+
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
+
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
+
+void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
+
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+
+#endif /* __MLX5E_FLOW_STEER_H__ */
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
new file mode 100644
index 000000000000..ad6d471d00dd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bpf_trace.h>
+#include "en/xdp.h"
+
+static inline bool
+mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
+ struct xdp_buff *xdp)
+{
+ struct mlx5e_xdp_info xdpi;
+
+ xdpi.xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpi.xdpf))
+ return false;
+ xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf);
+ dma_sync_single_for_device(sq->pdev, xdpi.dma_addr,
+ xdpi.xdpf->len, PCI_DMA_TODEVICE);
+ xdpi.di = *di;
+
+ return mlx5e_xmit_xdp_frame(sq, &xdpi);
+}
+
+/* returns true if packet was consumed by xdp */
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
+ void *va, u16 *rx_headroom, u32 *len)
+{
+ struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
+ struct xdp_buff xdp;
+ u32 act;
+ int err;
+
+ if (!prog)
+ return false;
+
+ xdp.data = va + *rx_headroom;
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.data_end = xdp.data + *len;
+ xdp.data_hard_start = va;
+ xdp.rxq = &rq->xdp_rxq;
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ *rx_headroom = xdp.data - xdp.data_hard_start;
+ *len = xdp.data_end - xdp.data;
+ return false;
+ case XDP_TX:
+ if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp)))
+ goto xdp_abort;
+ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
+ return true;
+ case XDP_REDIRECT:
+ /* When XDP enabled then page-refcnt==1 here */
+ err = xdp_do_redirect(rq->netdev, &xdp, prog);
+ if (unlikely(err))
+ goto xdp_abort;
+ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
+ rq->xdpsq.redirect_flush = true;
+ mlx5e_page_dma_unmap(rq, di);
+ rq->stats->xdp_redirect++;
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+xdp_abort:
+ trace_xdp_exception(rq->netdev, prog, act);
+ /* fall through */
+ case XDP_DROP:
+ rq->stats->xdp_drop++;
+ return true;
+ }
+}
+
+bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg = wqe->data;
+
+ struct xdp_frame *xdpf = xdpi->xdpf;
+ dma_addr_t dma_addr = xdpi->dma_addr;
+ unsigned int dma_len = xdpf->len;
+
+ struct mlx5e_xdpsq_stats *stats = sq->stats;
+
+ prefetchw(wqe);
+
+ if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
+ stats->err++;
+ return false;
+ }
+
+ if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
+ if (sq->doorbell) {
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
+ sq->doorbell = false;
+ }
+ stats->full++;
+ return false;
+ }
+
+ cseg->fm_ce_se = 0;
+
+ /* copy the inline part if required */
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+ memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE);
+ eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+ dma_len -= MLX5E_XDP_MIN_INLINE;
+ dma_addr += MLX5E_XDP_MIN_INLINE;
+ dseg++;
+ }
+
+ /* write the dma part */
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->byte_count = cpu_to_be32(dma_len);
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
+
+ /* move page to reference to sq responsibility,
+ * and mark so it's not put back in page-cache.
+ */
+ sq->db.xdpi[pi] = *xdpi;
+ sq->pc++;
+
+ sq->doorbell = true;
+
+ stats->xmit++;
+ return true;
+}
+
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_xdpsq *sq;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_rq *rq;
+ bool is_redirect;
+ u16 sqcc;
+ int i;
+
+ sq = container_of(cq, struct mlx5e_xdpsq, cq);
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ return false;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return false;
+
+ is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
+ rq = container_of(sq, struct mlx5e_rq, xdpsq);
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ i = 0;
+ do {
+ u16 wqe_counter;
+ bool last_wqe;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ do {
+ u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
+
+ last_wqe = (sqcc == wqe_counter);
+ sqcc++;
+
+ if (is_redirect) {
+ xdp_return_frame(xdpi->xdpf);
+ dma_unmap_single(sq->pdev, xdpi->dma_addr,
+ xdpi->xdpf->len, DMA_TO_DEVICE);
+ } else {
+ /* Recycle RX page */
+ mlx5e_page_release(rq, &xdpi->di, true);
+ }
+ } while (!last_wqe);
+ } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+
+ sq->stats->cqes += i;
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+ return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_rq *rq;
+ bool is_redirect;
+
+ is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
+ rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq);
+
+ while (sq->cc != sq->pc) {
+ u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
+ struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
+
+ sq->cc++;
+
+ if (is_redirect) {
+ xdp_return_frame(xdpi->xdpf);
+ dma_unmap_single(sq->pdev, xdpi->dma_addr,
+ xdpi->xdpf->len, DMA_TO_DEVICE);
+ } else {
+ /* Recycle RX page */
+ mlx5e_page_release(rq, &xdpi->di, false);
+ }
+ }
+}
+
+int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_xdpsq *sq;
+ int drops = 0;
+ int sq_num;
+ int i;
+
+ if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ sq_num = smp_processor_id();
+
+ if (unlikely(sq_num >= priv->channels.num))
+ return -ENXIO;
+
+ sq = &priv->channels.c[sq_num]->xdpsq;
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ return -ENETDOWN;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ struct mlx5e_xdp_info xdpi;
+
+ xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ continue;
+ }
+
+ xdpi.xdpf = xdpf;
+
+ if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) {
+ dma_unmap_single(sq->pdev, xdpi.dma_addr,
+ xdpf->len, DMA_TO_DEVICE);
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ mlx5e_xmit_xdp_doorbell(sq);
+
+ return n - drops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
new file mode 100644
index 000000000000..6dfab045925f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_EN_XDP_H__
+#define __MLX5_EN_XDP_H__
+
+#include "en.h"
+
+#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
+ MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
+#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
+#define MLX5E_XDP_TX_DS_COUNT \
+ ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
+ void *va, u16 *rx_headroom, u32 *len);
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
+
+bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
+int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index f20074dbef32..1dd225380a66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -34,19 +34,26 @@
#ifndef __MLX5E_EN_ACCEL_H__
#define __MLX5E_EN_ACCEL_H__
-#ifdef CONFIG_MLX5_ACCEL
-
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
#include "en.h"
-static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
- struct mlx5e_txqsq *sq,
- struct net_device *dev,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+static inline void
+mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
+{
+ int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+
+ udp_hdr(skb)->len = htons(payload_len);
+}
+
+static inline struct sk_buff *
+mlx5e_accel_handle_tx(struct sk_buff *skb,
+ struct mlx5e_txqsq *sq,
+ struct net_device *dev,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi)
{
#ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
@@ -64,9 +71,10 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
}
#endif
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ mlx5e_udp_gso_handle_tx_skb(skb);
+
return skb;
}
-#endif /* CONFIG_MLX5_ACCEL */
-
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index c245d8e78509..128a82b1dbfc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -37,6 +37,7 @@
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h"
+#include "accel/accel.h"
#include "en.h"
enum {
@@ -346,19 +347,12 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
}
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 *cqe_bcnt)
{
struct mlx5e_ipsec_metadata *mdata;
- struct ethhdr *old_eth;
- struct ethhdr *new_eth;
struct xfrm_state *xs;
- __be16 *ethtype;
- /* Detect inline metadata */
- if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
- return skb;
- ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
- if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
+ if (!is_metadata_hdr_valid(skb))
return skb;
/* Use the metadata */
@@ -369,12 +363,8 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
return NULL;
}
- /* Remove the metadata from the buffer */
- old_eth = (struct ethhdr *)skb->data;
- new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
- memmove(new_eth, old_eth, 2 * ETH_ALEN);
- /* Ethertype is already in its new place */
- skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
+ remove_metadata_hdr(skb);
+ *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
return skb;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 2bfbbef1b054..ca47c0540904 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -41,7 +41,7 @@
#include "en.h"
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb);
+ struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_ipsec_inverse_table_init(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index d167845271c3..eddd7702680b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -110,9 +110,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
u32 caps = mlx5_accel_tls_device_caps(mdev);
int ret = -ENOMEM;
void *flow;
-
- if (direction != TLS_OFFLOAD_CTX_DIR_TX)
- return -EINVAL;
+ u32 swid;
flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
if (!flow)
@@ -122,18 +120,23 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
if (ret)
goto free_flow;
+ ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
+ start_offload_tcp_sn, &swid,
+ direction == TLS_OFFLOAD_CTX_DIR_TX);
+ if (ret < 0)
+ goto free_flow;
+
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- struct mlx5e_tls_offload_context *tx_ctx =
+ struct mlx5e_tls_offload_context_tx *tx_ctx =
mlx5e_get_tls_tx_context(tls_ctx);
- u32 swid;
-
- ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, &swid);
- if (ret < 0)
- goto free_flow;
tx_ctx->swid = htonl(swid);
tx_ctx->expected_seq = start_offload_tcp_sn;
+ } else {
+ struct mlx5e_tls_offload_context_rx *rx_ctx =
+ mlx5e_get_tls_rx_context(tls_ctx);
+
+ rx_ctx->handle = htonl(swid);
}
return 0;
@@ -147,30 +150,60 @@ static void mlx5e_tls_del(struct net_device *netdev,
enum tls_offload_ctx_dir direction)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ unsigned int handle;
- if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid);
+ handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
+ mlx5e_get_tls_tx_context(tls_ctx)->swid :
+ mlx5e_get_tls_rx_context(tls_ctx)->handle);
- mlx5_accel_tls_del_tx_flow(priv->mdev, swid);
- } else {
- netdev_err(netdev, "unsupported direction %d\n", direction);
- }
+ mlx5_accel_tls_del_flow(priv->mdev, handle,
+ direction == TLS_OFFLOAD_CTX_DIR_TX);
+}
+
+static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk,
+ u32 seq, u64 rcd_sn)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_tls_offload_context_rx *rx_ctx;
+
+ rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
+
+ netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
+ be64_to_cpu(rcd_sn));
+ mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
}
static const struct tlsdev_ops mlx5e_tls_ops = {
.tls_dev_add = mlx5e_tls_add,
.tls_dev_del = mlx5e_tls_del,
+ .tls_dev_resync_rx = mlx5e_tls_resync_rx,
};
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
+ u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
struct net_device *netdev = priv->netdev;
if (!mlx5_accel_is_tls_device(priv->mdev))
return;
- netdev->features |= NETIF_F_HW_TLS_TX;
- netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ if (caps & MLX5_ACCEL_TLS_TX) {
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ }
+
+ if (caps & MLX5_ACCEL_TLS_RX) {
+ netdev->features |= NETIF_F_HW_TLS_RX;
+ netdev->hw_features |= NETIF_F_HW_TLS_RX;
+ }
+
+ if (!(caps & MLX5_ACCEL_TLS_LRO)) {
+ netdev->features &= ~NETIF_F_LRO;
+ netdev->hw_features &= ~NETIF_F_LRO;
+ }
+
netdev->tlsdev_ops = &mlx5e_tls_ops;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
index b6162178f621..3f5d72163b56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -43,25 +43,44 @@ struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_drop_resync_alloc;
atomic64_t tx_tls_drop_no_sync_data;
atomic64_t tx_tls_drop_bypass_required;
+ atomic64_t rx_tls_drop_resync_request;
+ atomic64_t rx_tls_resync_request;
+ atomic64_t rx_tls_resync_reply;
+ atomic64_t rx_tls_auth_fail;
};
struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats;
};
-struct mlx5e_tls_offload_context {
- struct tls_offload_context base;
+struct mlx5e_tls_offload_context_tx {
+ struct tls_offload_context_tx base;
u32 expected_seq;
__be32 swid;
};
-static inline struct mlx5e_tls_offload_context *
+static inline struct mlx5e_tls_offload_context_tx *
mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
{
- BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) >
- TLS_OFFLOAD_CONTEXT_SIZE);
- return container_of(tls_offload_ctx(tls_ctx),
- struct mlx5e_tls_offload_context,
+ BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) >
+ TLS_OFFLOAD_CONTEXT_SIZE_TX);
+ return container_of(tls_offload_ctx_tx(tls_ctx),
+ struct mlx5e_tls_offload_context_tx,
+ base);
+}
+
+struct mlx5e_tls_offload_context_rx {
+ struct tls_offload_context_rx base;
+ __be32 handle;
+};
+
+static inline struct mlx5e_tls_offload_context_rx *
+mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
+{
+ BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
+ TLS_OFFLOAD_CONTEXT_SIZE_RX);
+ return container_of(tls_offload_ctx_rx(tls_ctx),
+ struct mlx5e_tls_offload_context_rx,
base);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 15aef71d1957..be137d4a9169 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -33,6 +33,14 @@
#include "en_accel/tls.h"
#include "en_accel/tls_rxtx.h"
+#include "accel/accel.h"
+
+#include <net/inet6_hashtables.h>
+#include <linux/ipv6.h>
+
+#define SYNDROM_DECRYPTED 0x30
+#define SYNDROM_RESYNC_REQUEST 0x31
+#define SYNDROM_AUTH_FAILED 0x32
#define SYNDROME_OFFLOAD_REQUIRED 32
#define SYNDROME_SYNC 33
@@ -44,10 +52,26 @@ struct sync_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
-struct mlx5e_tls_metadata {
+struct recv_metadata_content {
+ u8 syndrome;
+ u8 reserved;
+ __be32 sync_seq;
+} __packed;
+
+struct send_metadata_content {
/* One byte of syndrome followed by 3 bytes of swid */
__be32 syndrome_swid;
__be16 first_seq;
+} __packed;
+
+struct mlx5e_tls_metadata {
+ union {
+ /* from fpga to host */
+ struct recv_metadata_content recv;
+ /* from host to fpga */
+ struct send_metadata_content send;
+ unsigned char raw[6];
+ } __packed content;
/* packet type ID field */
__be16 ethertype;
} __packed;
@@ -68,12 +92,13 @@ static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
2 * ETH_ALEN);
eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
- pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
+ pet->content.send.syndrome_swid =
+ htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
return 0;
}
-static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context,
+static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
u32 tcp_seq, struct sync_info *info)
{
int remaining, i = 0, ret = -EINVAL;
@@ -149,7 +174,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
memcpy(pet, &syndrome, sizeof(syndrome));
- pet->first_seq = htons(tcp_seq);
+ pet->content.send.first_seq = htons(tcp_seq);
/* MLX5 devices don't care about the checksum partial start, offset
* and pseudo header
@@ -161,7 +186,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
}
static struct sk_buff *
-mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
+mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi,
@@ -239,7 +264,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
u16 *pi)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_tls_offload_context *context;
+ struct mlx5e_tls_offload_context_tx *context;
struct tls_context *tls_ctx;
u32 expected_seq;
int datalen;
@@ -276,3 +301,83 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
out:
return skb;
}
+
+static int tls_update_resync_sn(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_tls_metadata *mdata)
+{
+ struct sock *sk = NULL;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ __be32 seq;
+
+ if (mdata->ethertype != htons(ETH_P_IP))
+ return -EINVAL;
+
+ iph = (struct iphdr *)(mdata + 1);
+
+ th = ((void *)iph) + iph->ihl * 4;
+
+ if (iph->version == 4) {
+ sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ iph->saddr, th->source, iph->daddr,
+ th->dest, netdev->ifindex);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
+
+ sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ &ipv6h->saddr, th->source,
+ &ipv6h->daddr, ntohs(th->dest),
+ netdev->ifindex, 0);
+#endif
+ }
+ if (!sk || sk->sk_state == TCP_TIME_WAIT) {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
+ goto out;
+ }
+
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+
+ memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
+ tls_offload_rx_resync_request(sk, seq);
+out:
+ return 0;
+}
+
+void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ u32 *cqe_bcnt)
+{
+ struct mlx5e_tls_metadata *mdata;
+ struct mlx5e_priv *priv;
+
+ if (!is_metadata_hdr_valid(skb))
+ return;
+
+ /* Use the metadata */
+ mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
+ switch (mdata->content.recv.syndrome) {
+ case SYNDROM_DECRYPTED:
+ skb->decrypted = 1;
+ break;
+ case SYNDROM_RESYNC_REQUEST:
+ tls_update_resync_sn(netdev, skb, mdata);
+ priv = netdev_priv(netdev);
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
+ break;
+ case SYNDROM_AUTH_FAILED:
+ /* Authentication failure will be observed and verified by kTLS */
+ priv = netdev_priv(netdev);
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
+ break;
+ default:
+ /* Bypass the metadata header to others */
+ return;
+ }
+
+ remove_metadata_hdr(skb);
+ *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 405dfd302225..311667ec71b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -45,6 +45,9 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_tx_wqe **wqe,
u16 *pi);
+void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ u32 *cqe_bcnt);
+
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 75e4308ba786..45cdde694d20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -30,8 +30,6 @@
* SOFTWARE.
*/
-#ifdef CONFIG_RFS_ACCEL
-
#include <linux/hash.h>
#include <linux/mlx5/fs.h>
#include <linux/ip.h>
@@ -381,14 +379,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
- if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
- break;
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
arfs_rule->filter_id)) {
hlist_del_init(&arfs_rule->hlist);
hlist_add_head(&arfs_rule->hlist, &del_list);
+ if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+ break;
}
}
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
@@ -711,6 +709,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb->protocol != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
if (!arfs_t)
return -EPROTONOSUPPORT;
@@ -735,4 +736,4 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
-#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 0a52f31fef37..722998d68564 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
}
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
- struct ieee_ets *ets)
+ struct ieee_ets *ets,
+ bool zero_sum_allowed)
{
bool have_ets_tc = false;
int bw_sum = 0;
@@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
}
if (have_ets_tc && bw_sum != 100) {
- netdev_err(netdev,
- "Failed to validate ETS: BW sum is illegal\n");
+ if (bw_sum || (!bw_sum && !zero_sum_allowed))
+ netdev_err(netdev,
+ "Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;
}
return 0;
@@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP;
- err = mlx5e_dbcnl_validate_ets(netdev, ets);
+ err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
if (err)
return err;
@@ -441,16 +443,12 @@ static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
bool is_new;
int err;
- if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
- return -EINVAL;
-
- if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
- return -EINVAL;
-
- if (!MLX5_DSCP_SUPPORTED(priv->mdev))
- return -EINVAL;
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
+ !MLX5_DSCP_SUPPORTED(priv->mdev))
+ return -EOPNOTSUPP;
- if (app->protocol >= MLX5E_MAX_DSCP)
+ if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
+ (app->protocol >= MLX5E_MAX_DSCP))
return -EINVAL;
/* Save the old entry info */
@@ -498,16 +496,12 @@ static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
struct mlx5e_priv *priv = netdev_priv(dev);
int err;
- if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
- return -EINVAL;
-
- if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
- return -EINVAL;
-
- if (!MLX5_DSCP_SUPPORTED(priv->mdev))
- return -EINVAL;
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
+ !MLX5_DSCP_SUPPORTED(priv->mdev))
+ return -EOPNOTSUPP;
- if (app->protocol >= MLX5E_MAX_DSCP)
+ if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
+ (app->protocol >= MLX5E_MAX_DSCP))
return -EINVAL;
/* Skip if no dscp app entry */
@@ -642,12 +636,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
ets.prio_tc[i]);
}
- err = mlx5e_dbcnl_validate_ets(netdev, &ets);
- if (err) {
- netdev_err(netdev,
- "%s, Failed to validate ETS: %d\n", __func__, err);
+ err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
+ if (err)
goto out;
- }
err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
if (err) {
@@ -1147,7 +1138,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
int err;
- err = mlx5_set_trust_state(priv->mdev, trust_state);
+ err = mlx5_set_trust_state(priv->mdev, trust_state);
if (err)
return err;
priv->dcbx_dp.trust_state = trust_state;
@@ -1173,6 +1164,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
+ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
+
if (!MLX5_DSCP_SUPPORTED(mdev))
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index fffe514ba855..98dd3e0ada72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,6 +32,7 @@
#include "en.h"
#include "en/port.h"
+#include "lib/clock.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
@@ -969,33 +970,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
return 0;
}
-static int mlx5e_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info, u32 *rule_locs)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- int err = 0;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = priv->channels.params.num_channels;
- break;
- case ETHTOOL_GRXCLSRLCNT:
- info->rule_cnt = priv->fs.ethtool.tot_num_rules;
- break;
- case ETHTOOL_GRXCLSRULE:
- err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
- break;
- case ETHTOOL_GRXCLSRLALL:
- err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
@@ -1133,10 +1107,10 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
if (ret)
return ret;
- info->phc_index = mdev->clock.ptp ?
- ptp_clock_index(mdev->clock.ptp) : -1;
+ info->phc_index = mlx5_clock_get_ptp_index(mdev);
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
+ info->phc_index == -1)
return 0;
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
@@ -1606,26 +1580,6 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
-static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- int err = 0;
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXCLSRLINS:
- err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash)
{
@@ -1678,8 +1632,10 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
+#ifdef CONFIG_MLX5_EN_RXNFC
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
+#endif
.flash_device = mlx5e_flash_device,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
@@ -1696,5 +1652,4 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.self_test = mlx5e_self_test,
.get_msglevel = mlx5e_get_msglevel,
.set_msglevel = mlx5e_set_msglevel,
-
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index eafc59280ada..75bb981e00b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -66,11 +66,14 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
break;
case IP_USER_FLOW:
+ case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
@@ -115,29 +118,203 @@ static void mask_spec(u8 *mask, u8 *val, size_t size)
*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
}
-static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
- __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
+#define MLX5E_FTE_SET(header_p, fld, v) \
+ MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
+
+#define MLX5E_FTE_ADDR_OF(header_p, fld) \
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
+
+static void
+set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
+ __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
{
if (ip4src_m) {
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
- src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ip4src_v, sizeof(ip4src_v));
- memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
- src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ memset(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
0xff, sizeof(ip4src_m));
}
if (ip4dst_m) {
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ip4dst_v, sizeof(ip4dst_v));
- memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ memset(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
0xff, sizeof(ip4dst_m));
}
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- ethertype, ETH_P_IP);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
- ethertype, 0xffff);
+
+ MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
+ MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
+}
+
+static void
+set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
+ __be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
+{
+ u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
+
+ if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ ip6src_v, ip6_sz);
+ memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ ip6src_m, ip6_sz);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ ip6dst_v, ip6_sz);
+ memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ ip6dst_m, ip6_sz);
+ }
+
+ MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
+ MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
+}
+
+static void
+set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
+ __be16 pdst_m, __be16 pdst_v)
+{
+ if (psrc_m) {
+ MLX5E_FTE_SET(headers_c, tcp_sport, 0xffff);
+ MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
+ }
+ if (pdst_m) {
+ MLX5E_FTE_SET(headers_c, tcp_dport, 0xffff);
+ MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
+ }
+
+ MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
+ MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
+}
+
+static void
+set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
+ __be16 pdst_m, __be16 pdst_v)
+{
+ if (psrc_m) {
+ MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
+ MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
+ }
+
+ if (pdst_m) {
+ MLX5E_FTE_SET(headers_c, udp_dport, 0xffff);
+ MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
+ }
+
+ MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
+ MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
+}
+
+static void
+parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec;
+
+ set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
+ l4_mask->ip4dst, l4_val->ip4dst);
+
+ set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
+ l4_mask->pdst, l4_val->pdst);
+}
+
+static void
+parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
+ struct ethtool_tcpip4_spec *l4_val = &fs->h_u.udp_ip4_spec;
+
+ set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
+ l4_mask->ip4dst, l4_val->ip4dst);
+
+ set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
+ l4_mask->pdst, l4_val->pdst);
+}
+
+static void
+parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
+
+ set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
+ l3_mask->ip4dst, l3_val->ip4dst);
+
+ if (l3_mask->proto) {
+ MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
+ MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
+ }
+}
+
+static void
+parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec;
+
+ set_ip6(headers_c, headers_v, l3_mask->ip6src,
+ l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
+
+ if (l3_mask->l4_proto) {
+ MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
+ MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
+ }
+}
+
+static void
+parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec;
+
+ set_ip6(headers_c, headers_v, l4_mask->ip6src,
+ l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
+
+ set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
+ l4_mask->pdst, l4_val->pdst);
+}
+
+static void
+parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
+ struct ethtool_tcpip6_spec *l4_val = &fs->h_u.udp_ip6_spec;
+
+ set_ip6(headers_c, headers_v, l4_mask->ip6src,
+ l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
+
+ set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
+ l4_mask->pdst, l4_val->pdst);
+}
+
+static void
+parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethhdr *eth_mask = &fs->m_u.ether_spec;
+ struct ethhdr *eth_val = &fs->h_u.ether_spec;
+
+ mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
+ MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
+ MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
+}
+
+static void
+set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
+{
+ MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
+ MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
+ MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
+ MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
+}
+
+static void
+set_dmac(void *headers_c, void *headers_v,
+ unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
+{
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
}
static int set_flow_attrs(u32 *match_c, u32 *match_v,
@@ -148,112 +325,42 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers);
u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
- struct ethtool_tcpip4_spec *l4_mask;
- struct ethtool_tcpip4_spec *l4_val;
- struct ethtool_usrip4_spec *l3_mask;
- struct ethtool_usrip4_spec *l3_val;
- struct ethhdr *eth_val;
- struct ethhdr *eth_mask;
switch (flow_type) {
case TCP_V4_FLOW:
- l4_mask = &fs->m_u.tcp_ip4_spec;
- l4_val = &fs->h_u.tcp_ip4_spec;
- set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
- l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
-
- if (l4_mask->psrc) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
- ntohs(l4_val->psrc));
- }
- if (l4_mask->pdst) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
- ntohs(l4_val->pdst));
- }
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
- IPPROTO_TCP);
+ parse_tcp4(outer_headers_c, outer_headers_v, fs);
break;
case UDP_V4_FLOW:
- l4_mask = &fs->m_u.tcp_ip4_spec;
- l4_val = &fs->h_u.tcp_ip4_spec;
- set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
- l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
-
- if (l4_mask->psrc) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
- ntohs(l4_val->psrc));
- }
- if (l4_mask->pdst) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
- ntohs(l4_val->pdst));
- }
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
- IPPROTO_UDP);
+ parse_udp4(outer_headers_c, outer_headers_v, fs);
break;
case IP_USER_FLOW:
- l3_mask = &fs->m_u.usr_ip4_spec;
- l3_val = &fs->h_u.usr_ip4_spec;
- set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
- l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
+ parse_ip4(outer_headers_c, outer_headers_v, fs);
+ break;
+ case TCP_V6_FLOW:
+ parse_tcp6(outer_headers_c, outer_headers_v, fs);
+ break;
+ case UDP_V6_FLOW:
+ parse_udp6(outer_headers_c, outer_headers_v, fs);
+ break;
+ case IPV6_USER_FLOW:
+ parse_ip6(outer_headers_c, outer_headers_v, fs);
break;
case ETHER_FLOW:
- eth_mask = &fs->m_u.ether_spec;
- eth_val = &fs->h_u.ether_spec;
-
- mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_c, smac_47_16),
- eth_mask->h_source);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_v, smac_47_16),
- eth_val->h_source);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_c, dmac_47_16),
- eth_mask->h_dest);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_v, dmac_47_16),
- eth_val->h_dest);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
- ntohs(eth_mask->h_proto));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
- ntohs(eth_val->h_proto));
+ parse_ether(outer_headers_c, outer_headers_v, fs);
break;
default:
return -EINVAL;
}
if ((fs->flow_type & FLOW_EXT) &&
- (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
- cvlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- cvlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
- first_vid, 0xfff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- first_vid, ntohs(fs->h_ext.vlan_tci));
- }
+ (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
+ set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
+
if (fs->flow_type & FLOW_MAC_EXT &&
!is_zero_ether_addr(fs->m_ext.h_dest)) {
mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_c, dmac_47_16),
- fs->m_ext.h_dest);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_v, dmac_47_16),
- fs->h_ext.h_dest);
+ set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
+ fs->h_ext.h_dest);
}
return 0;
@@ -379,16 +486,143 @@ static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
#define all_zeros_or_all_ones(field) \
((field) == 0 || (field) == (__force typeof(field))-1)
+static int validate_ethter(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethhdr *eth_mask = &fs->m_u.ether_spec;
+ int ntuples = 0;
+
+ if (!is_zero_ether_addr(eth_mask->h_dest))
+ ntuples++;
+ if (!is_zero_ether_addr(eth_mask->h_source))
+ ntuples++;
+ if (eth_mask->h_proto)
+ ntuples++;
+ return ntuples;
+}
+
+static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
+ int ntuples = 0;
+
+ if (l4_mask->tos)
+ return -EINVAL;
+
+ if (l4_mask->ip4src) {
+ if (!all_ones(l4_mask->ip4src))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l4_mask->ip4dst) {
+ if (!all_ones(l4_mask->ip4dst))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l4_mask->psrc) {
+ if (!all_ones(l4_mask->psrc))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l4_mask->pdst) {
+ if (!all_ones(l4_mask->pdst))
+ return -EINVAL;
+ ntuples++;
+ }
+ /* Flow is TCP/UDP */
+ return ++ntuples;
+}
+
+static int validate_ip4(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
+ int ntuples = 0;
+
+ if (l3_mask->l4_4_bytes || l3_mask->tos ||
+ fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ return -EINVAL;
+ if (l3_mask->ip4src) {
+ if (!all_ones(l3_mask->ip4src))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l3_mask->ip4dst) {
+ if (!all_ones(l3_mask->ip4dst))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l3_mask->proto)
+ ntuples++;
+ /* Flow is IPv4 */
+ return ++ntuples;
+}
+
+static int validate_ip6(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
+ int ntuples = 0;
+
+ if (l3_mask->l4_4_bytes || l3_mask->tclass)
+ return -EINVAL;
+ if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
+ ntuples++;
+
+ if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
+ ntuples++;
+ if (l3_mask->l4_proto)
+ ntuples++;
+ /* Flow is IPv6 */
+ return ++ntuples;
+}
+
+static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
+ int ntuples = 0;
+
+ if (l4_mask->tclass)
+ return -EINVAL;
+
+ if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
+ ntuples++;
+
+ if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
+ ntuples++;
+
+ if (l4_mask->psrc) {
+ if (!all_ones(l4_mask->psrc))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l4_mask->pdst) {
+ if (!all_ones(l4_mask->pdst))
+ return -EINVAL;
+ ntuples++;
+ }
+ /* Flow is TCP/UDP */
+ return ++ntuples;
+}
+
+static int validate_vlan(struct ethtool_rx_flow_spec *fs)
+{
+ if (fs->m_ext.vlan_etype ||
+ fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
+ return -EINVAL;
+
+ if (fs->m_ext.vlan_tci &&
+ (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
+ return -EINVAL;
+
+ return 1;
+}
+
static int validate_flow(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs)
{
- struct ethtool_tcpip4_spec *l4_mask;
- struct ethtool_usrip4_spec *l3_mask;
- struct ethhdr *eth_mask;
int num_tuples = 0;
+ int ret = 0;
if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
- return -EINVAL;
+ return -ENOSPC;
if (fs->ring_cookie >= priv->channels.params.num_channels &&
fs->ring_cookie != RX_CLS_FLOW_DISC)
@@ -396,73 +630,42 @@ static int validate_flow(struct mlx5e_priv *priv,
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
- eth_mask = &fs->m_u.ether_spec;
- if (!is_zero_ether_addr(eth_mask->h_dest))
- num_tuples++;
- if (!is_zero_ether_addr(eth_mask->h_source))
- num_tuples++;
- if (eth_mask->h_proto)
- num_tuples++;
+ num_tuples += validate_ethter(fs);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
- if (fs->m_u.tcp_ip4_spec.tos)
- return -EINVAL;
- l4_mask = &fs->m_u.tcp_ip4_spec;
- if (l4_mask->ip4src) {
- if (!all_ones(l4_mask->ip4src))
- return -EINVAL;
- num_tuples++;
- }
- if (l4_mask->ip4dst) {
- if (!all_ones(l4_mask->ip4dst))
- return -EINVAL;
- num_tuples++;
- }
- if (l4_mask->psrc) {
- if (!all_ones(l4_mask->psrc))
- return -EINVAL;
- num_tuples++;
- }
- if (l4_mask->pdst) {
- if (!all_ones(l4_mask->pdst))
- return -EINVAL;
- num_tuples++;
- }
- /* Flow is TCP/UDP */
- num_tuples++;
+ ret = validate_tcpudp4(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
break;
case IP_USER_FLOW:
- l3_mask = &fs->m_u.usr_ip4_spec;
- if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
- fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
- return -EINVAL;
- if (l3_mask->ip4src) {
- if (!all_ones(l3_mask->ip4src))
- return -EINVAL;
- num_tuples++;
- }
- if (l3_mask->ip4dst) {
- if (!all_ones(l3_mask->ip4dst))
- return -EINVAL;
- num_tuples++;
- }
- /* Flow is IPv4 */
- num_tuples++;
+ ret = validate_ip4(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ ret = validate_tcpudp6(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
+ break;
+ case IPV6_USER_FLOW:
+ ret = validate_ip6(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
break;
default:
- return -EINVAL;
+ return -ENOTSUPP;
}
if ((fs->flow_type & FLOW_EXT)) {
- if (fs->m_ext.vlan_etype ||
- (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
- return -EINVAL;
-
- if (fs->m_ext.vlan_tci) {
- if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
- return -EINVAL;
- }
- num_tuples++;
+ ret = validate_vlan(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
}
if (fs->flow_type & FLOW_MAC_EXT &&
@@ -472,8 +675,9 @@ static int validate_flow(struct mlx5e_priv *priv,
return num_tuples;
}
-int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
- struct ethtool_rx_flow_spec *fs)
+static int
+mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs)
{
struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_ethtool_rule *eth_rule;
@@ -483,8 +687,9 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
num_tuples = validate_flow(priv, fs);
if (num_tuples <= 0) {
- netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__);
- return -EINVAL;
+ netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
+ __func__, num_tuples);
+ return num_tuples;
}
eth_ft = get_flow_table(priv, fs, num_tuples);
@@ -519,8 +724,8 @@ del_ethtool_rule:
return err;
}
-int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
- int location)
+static int
+mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
{
struct mlx5e_ethtool_rule *eth_rule;
int err = 0;
@@ -539,8 +744,9 @@ out:
return err;
}
-int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
- int location)
+static int
+mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, int location)
{
struct mlx5e_ethtool_rule *eth_rule;
@@ -557,8 +763,9 @@ int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
return -ENOENT;
}
-int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
- u32 *rule_locs)
+static int
+mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
{
int location = 0;
int idx = 0;
@@ -587,3 +794,51 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
{
INIT_LIST_HEAD(&priv->fs.ethtool.rules);
}
+
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+int mlx5e_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = priv->channels.params.num_channels;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 56c1b6f5593e..5a7939e70190 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -45,8 +45,10 @@
#include "en_accel/tls.h"
#include "accel/ipsec.h"
#include "accel/tls.h"
-#include "vxlan.h"
+#include "lib/vxlan.h"
+#include "lib/clock.h"
#include "en/port.h"
+#include "en/xdp.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
@@ -96,14 +98,19 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
{
- if (!params->xdp_prog) {
- u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN;
+ u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 linear_rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+ u32 frag_sz;
- return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu);
- }
+ linear_rq_headroom += NET_IP_ALIGN;
+
+ frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu);
+
+ if (params->xdp_prog && frag_sz < PAGE_SIZE)
+ frag_sz = PAGE_SIZE;
- return PAGE_SIZE;
+ return frag_sz;
}
static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
@@ -222,7 +229,7 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
u8 port_state;
port_state = mlx5_query_vport_state(mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
+ MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
0);
if (port_state == VPORT_STATE_UP) {
@@ -270,12 +277,9 @@ void mlx5e_update_stats_work(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
update_stats_work);
+
mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->profile->update_stats(priv);
- queue_delayed_work(priv->wq, dwork,
- msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
- }
+ priv->profile->update_stats(priv);
mutex_unlock(&priv->state_lock);
}
@@ -352,8 +356,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- rq->mpwqe.info = kcalloc_node(wq_sz, sizeof(*rq->mpwqe.info),
- GFP_KERNEL, cpu_to_node(c->cpu));
+ rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
+ sizeof(*rq->mpwqe.info)),
+ GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->mpwqe.info)
return -ENOMEM;
@@ -487,7 +492,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
- rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->channel_stats[c->ix].rq;
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
@@ -670,7 +674,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err_free:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- kfree(rq->mpwqe.info);
+ kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -702,7 +706,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- kfree(rq->mpwqe.info);
+ kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -879,7 +883,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
/* UMR WQE (if in progress) is always at wq->head */
if (rq->mpwqe.umr_in_progress)
- mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
+ rq->dealloc_wqe(rq, wq->head);
while (!mlx5_wq_ll_is_empty(wq)) {
struct mlx5e_rx_wqe_ll *wqe;
@@ -965,16 +969,16 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
- kfree(sq->db.di);
+ kvfree(sq->db.xdpi);
}
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- sq->db.di = kcalloc_node(wq_sz, sizeof(*sq->db.di),
- GFP_KERNEL, numa);
- if (!sq->db.di) {
+ sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)),
+ GFP_KERNEL, numa);
+ if (!sq->db.xdpi) {
mlx5e_free_xdpsq_db(sq);
return -ENOMEM;
}
@@ -985,7 +989,8 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
- struct mlx5e_xdpsq *sq)
+ struct mlx5e_xdpsq *sq,
+ bool is_redirect)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
@@ -997,6 +1002,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
+ sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ sq->stats = is_redirect ?
+ &c->priv->channel_stats[c->ix].xdpsq :
+ &c->priv->channel_stats[c->ix].rq_xdpsq;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1024,15 +1033,16 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
{
- kfree(sq->db.ico_wqe);
+ kvfree(sq->db.ico_wqe);
}
static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
{
u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- sq->db.ico_wqe = kcalloc_node(wq_sz, sizeof(*sq->db.ico_wqe),
- GFP_KERNEL, numa);
+ sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
+ sizeof(*sq->db.ico_wqe)),
+ GFP_KERNEL, numa);
if (!sq->db.ico_wqe)
return -ENOMEM;
@@ -1077,8 +1087,8 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
{
- kfree(sq->db.wqe_info);
- kfree(sq->db.dma_fifo);
+ kvfree(sq->db.wqe_info);
+ kvfree(sq->db.dma_fifo);
}
static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
@@ -1086,10 +1096,12 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
- sq->db.dma_fifo = kcalloc_node(df_sz, sizeof(*sq->db.dma_fifo),
- GFP_KERNEL, numa);
- sq->db.wqe_info = kcalloc_node(wq_sz, sizeof(*sq->db.wqe_info),
- GFP_KERNEL, numa);
+ sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
+ sizeof(*sq->db.dma_fifo)),
+ GFP_KERNEL, numa);
+ sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
+ sizeof(*sq->db.wqe_info)),
+ GFP_KERNEL, numa);
if (!sq->db.dma_fifo || !sq->db.wqe_info) {
mlx5e_free_txqsq_db(sq);
return -ENOMEM;
@@ -1523,7 +1535,8 @@ static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
- struct mlx5e_xdpsq *sq)
+ struct mlx5e_xdpsq *sq,
+ bool is_redirect)
{
unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
struct mlx5e_create_sq_param csp = {};
@@ -1531,7 +1544,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
int err;
int i;
- err = mlx5e_alloc_xdpsq(c, params, param, sq);
+ err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect);
if (err)
return err;
@@ -1540,6 +1553,8 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
+ if (is_redirect)
+ set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
if (err)
@@ -1893,7 +1908,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
int err;
int eqn;
- c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+ c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@@ -1922,10 +1937,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
+ err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq);
if (err)
goto err_close_tx_cqs;
+ err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
+ if (err)
+ goto err_close_xdp_tx_cqs;
+
/* XDP SQ CQ params are same as normal TXQ sq CQ params */
err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
&cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
@@ -1942,7 +1961,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_icosq;
- err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
+ err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq, false) : 0;
if (err)
goto err_close_sqs;
@@ -1950,9 +1969,17 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_xdp_sq;
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->xdpsq, true);
+ if (err)
+ goto err_close_rq;
+
*cp = c;
return 0;
+
+err_close_rq:
+ mlx5e_close_rq(&c->rq);
+
err_close_xdp_sq:
if (c->xdp)
mlx5e_close_xdpsq(&c->rq.xdpsq);
@@ -1971,6 +1998,9 @@ err_disable_napi:
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
+err_close_xdp_tx_cqs:
+ mlx5e_close_cq(&c->xdpsq.cq);
+
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
@@ -1979,7 +2009,7 @@ err_close_icosq_cq:
err_napi_del:
netif_napi_del(&c->napi);
- kfree(c);
+ kvfree(c);
return err;
}
@@ -2005,6 +2035,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
+ mlx5e_close_xdpsq(&c->xdpsq);
mlx5e_close_rq(&c->rq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq.xdpsq);
@@ -2014,11 +2045,12 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
if (c->xdp)
mlx5e_close_cq(&c->rq.xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
+ mlx5e_close_cq(&c->xdpsq.cq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi);
- kfree(c);
+ kvfree(c);
}
#define DEFAULT_FRAG_SIZE (2048)
@@ -2276,7 +2308,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
chs->num = chs->params.num_channels;
chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
- cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+ cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
if (!chs->c || !cparam)
goto err_free;
@@ -2287,7 +2319,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- kfree(cparam);
+ kvfree(cparam);
return 0;
err_close_channels:
@@ -2296,7 +2328,7 @@ err_close_channels:
err_free:
kfree(chs->c);
- kfree(cparam);
+ kvfree(cparam);
chs->num = 0;
return err;
}
@@ -2846,7 +2878,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_activate_channels(&priv->channels);
netif_tx_start_all_queues(priv->netdev);
- if (MLX5_VPORT_MANAGER(priv->mdev))
+ if (MLX5_ESWITCH_MANAGER(priv->mdev))
mlx5e_add_sqs_fwd_rules(priv);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2857,7 +2889,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
mlx5e_redirect_rqts_to_drop(priv);
- if (MLX5_VPORT_MANAGER(priv->mdev))
+ if (MLX5_ESWITCH_MANAGER(priv->mdev))
mlx5e_remove_sqs_fwd_rules(priv);
/* FIXME: This is a W/A only for tx timeout watch dog false alarm when
@@ -2943,7 +2975,7 @@ int mlx5e_open(struct net_device *netdev)
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
- if (mlx5e_vxlan_allowed(priv->mdev))
+ if (mlx5_vxlan_allowed(priv->mdev->vxlan))
udp_tunnel_get_rx_info(netdev);
return err;
@@ -3371,7 +3403,7 @@ static int mlx5e_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
- priv, priv);
+ priv, priv, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
priv);
@@ -3405,6 +3437,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ /* update HW stats in background for next time */
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+
if (mlx5e_is_uplink_rep(priv)) {
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
@@ -3590,7 +3625,7 @@ unlock:
return err;
}
-#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_EN_ARFS
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3645,7 +3680,7 @@ static int mlx5e_set_features(struct net_device *netdev,
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
-#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_EN_ARFS
err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
#endif
@@ -3703,6 +3738,14 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
+ if (params->xdp_prog &&
+ !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
+ netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
+ new_mtu, MLX5E_XDP_MAX_MTU);
+ err = -EINVAL;
+ goto out;
+ }
+
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
@@ -3712,7 +3755,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (!reset) {
params->sw_mtu = new_mtu;
- set_mtu_cb(priv);
+ if (set_mtu_cb)
+ set_mtu_cb(priv);
netdev->mtu = params->sw_mtu;
goto out;
}
@@ -3739,7 +3783,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
+ (mlx5_clock_get_ptp_index(priv->mdev) == -1))
return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -3873,9 +3918,9 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
static int mlx5_vport_link2ifla(u8 esw_link)
{
switch (esw_link) {
- case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
+ case MLX5_VPORT_ADMIN_STATE_DOWN:
return IFLA_VF_LINK_STATE_DISABLE;
- case MLX5_ESW_VPORT_ADMIN_STATE_UP:
+ case MLX5_VPORT_ADMIN_STATE_UP:
return IFLA_VF_LINK_STATE_ENABLE;
}
return IFLA_VF_LINK_STATE_AUTO;
@@ -3885,11 +3930,11 @@ static int mlx5_ifla_link2vport(u8 ifla_link)
{
switch (ifla_link) {
case IFLA_VF_LINK_STATE_DISABLE:
- return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
+ return MLX5_VPORT_ADMIN_STATE_DOWN;
case IFLA_VF_LINK_STATE_ENABLE:
- return MLX5_ESW_VPORT_ADMIN_STATE_UP;
+ return MLX5_VPORT_ADMIN_STATE_UP;
}
- return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
+ return MLX5_VPORT_ADMIN_STATE_AUTO;
}
static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
@@ -3927,6 +3972,57 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
}
#endif
+struct mlx5e_vxlan_work {
+ struct work_struct work;
+ struct mlx5e_priv *priv;
+ u16 port;
+};
+
+static void mlx5e_vxlan_add_work(struct work_struct *work)
+{
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
+ u16 port = vxlan_work->port;
+
+ mutex_lock(&priv->state_lock);
+ mlx5_vxlan_add_port(priv->mdev->vxlan, port);
+ mutex_unlock(&priv->state_lock);
+
+ kfree(vxlan_work);
+}
+
+static void mlx5e_vxlan_del_work(struct work_struct *work)
+{
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
+ u16 port = vxlan_work->port;
+
+ mutex_lock(&priv->state_lock);
+ mlx5_vxlan_del_port(priv->mdev->vxlan, port);
+ mutex_unlock(&priv->state_lock);
+ kfree(vxlan_work);
+}
+
+static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
+{
+ struct mlx5e_vxlan_work *vxlan_work;
+
+ vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
+ if (!vxlan_work)
+ return;
+
+ if (add)
+ INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
+ else
+ INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
+
+ vxlan_work->priv = priv;
+ vxlan_work->port = port;
+ queue_work(priv->wq, &vxlan_work->work);
+}
+
static void mlx5e_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti)
{
@@ -3935,10 +4031,10 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (!mlx5e_vxlan_allowed(priv->mdev))
+ if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
return;
- mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
+ mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
}
static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -3949,10 +4045,10 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (!mlx5e_vxlan_allowed(priv->mdev))
+ if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
return;
- mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
+ mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
}
static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
@@ -3983,7 +4079,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
port = be16_to_cpu(udph->dest);
/* Verify if UDP port is being offloaded by HW */
- if (mlx5e_vxlan_lookup_port(priv, port))
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
return features;
}
@@ -4090,26 +4186,47 @@ static void mlx5e_tx_timeout(struct net_device *dev)
queue_work(priv->wq, &priv->tx_timeout_work);
}
+static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5e_channels new_channels = {};
+
+ if (priv->channels.params.lro_en) {
+ netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+ return -EINVAL;
+ }
+
+ if (MLX5_IPSEC_DEV(priv->mdev)) {
+ netdev_warn(netdev, "can't set XDP with IPSec offload\n");
+ return -EINVAL;
+ }
+
+ new_channels.params = priv->channels.params;
+ new_channels.params.xdp_prog = prog;
+
+ if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
+ netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
+ new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct bpf_prog *old_prog;
- int err = 0;
bool reset, was_opened;
+ int err = 0;
int i;
mutex_lock(&priv->state_lock);
- if ((netdev->features & NETIF_F_LRO) && prog) {
- netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
- err = -EINVAL;
- goto unlock;
- }
-
- if ((netdev->features & NETIF_F_HW_ESP) && prog) {
- netdev_warn(netdev, "can't set XDP with IPSec offload\n");
- err = -EINVAL;
- goto unlock;
+ if (prog) {
+ err = mlx5e_xdp_allowed(priv, prog);
+ if (err)
+ goto unlock;
}
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
@@ -4192,7 +4309,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return mlx5e_xdp_set(dev, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_id = mlx5e_xdp_query(dev);
- xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
@@ -4234,11 +4350,12 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
.ndo_features_check = mlx5e_features_check,
-#ifdef CONFIG_RFS_ACCEL
- .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
-#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
+ .ndo_xdp_xmit = mlx5e_xdp_xmit,
+#ifdef CONFIG_MLX5_EN_ARFS
+ .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll,
#endif
@@ -4534,8 +4651,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
- netdev->hw_features |= NETIF_F_GSO_PARTIAL;
+ if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -4543,7 +4659,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
}
- if (mlx5e_vxlan_allowed(mdev)) {
+ if (mlx5_vxlan_allowed(mdev->vxlan)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
@@ -4560,6 +4676,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM;
}
+ netdev->hw_features |= NETIF_F_GSO_PARTIAL;
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+ netdev->features |= NETIF_F_GSO_UDP_L4;
+
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
if (fcs_supported)
@@ -4584,7 +4705,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
netdev->hw_features |= NETIF_F_HW_TC;
-#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_EN_ARFS
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
@@ -4597,7 +4718,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_set_netdev_dev_addr(netdev);
#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
- if (MLX5_VPORT_MANAGER(mdev))
+ if (MLX5_ESWITCH_MANAGER(mdev))
netdev->switchdev_ops = &mlx5e_switchdev_ops;
#endif
@@ -4649,14 +4770,12 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_build_tc2txq_maps(priv);
- mlx5e_vxlan_init(priv);
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
- mlx5e_vxlan_cleanup(priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -4753,7 +4872,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_enable_async_events(priv);
- if (MLX5_VPORT_MANAGER(priv->mdev))
+ if (MLX5_ESWITCH_MANAGER(priv->mdev))
mlx5e_register_vport_reps(priv);
if (netdev->reg_state != NETREG_REGISTERED)
@@ -4788,7 +4907,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->set_rx_mode_work);
- if (MLX5_VPORT_MANAGER(priv->mdev))
+ if (MLX5_ESWITCH_MANAGER(priv->mdev))
mlx5e_unregister_vport_reps(priv);
mlx5e_disable_async_events(priv);
@@ -4830,7 +4949,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
return NULL;
}
-#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_EN_ARFS
netdev->rx_cpu_rmap = mdev->rmap;
#endif
@@ -4972,7 +5091,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
return NULL;
#ifdef CONFIG_MLX5_ESWITCH
- if (MLX5_VPORT_MANAGER(mdev)) {
+ if (MLX5_ESWITCH_MANAGER(mdev)) {
rpriv = mlx5e_alloc_nic_rep_priv(mdev);
if (!rpriv) {
mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 57987f6546e8..c9cc9747d21d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -698,8 +698,8 @@ static int mlx5e_rep_open(struct net_device *dev)
goto unlock;
if (!mlx5_modify_vport_admin_state(priv->mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_VPORT_ADMIN_STATE_UP))
netif_carrier_on(dev);
unlock:
@@ -716,8 +716,8 @@ static int mlx5e_rep_close(struct net_device *dev)
mutex_lock(&priv->state_lock);
mlx5_modify_vport_admin_state(priv->mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_VPORT_ADMIN_STATE_DOWN);
ret = mlx5e_close_locked(dev);
mutex_unlock(&priv->state_lock);
return ret;
@@ -797,7 +797,7 @@ static int mlx5e_rep_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
- priv, priv);
+ priv, priv, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
return 0;
@@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep;
- if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ if (!MLX5_ESWITCH_MANAGER(priv->mdev))
return false;
rep = rpriv->rep;
@@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5_eswitch_rep *rep;
+
+ if (!MLX5_ESWITCH_MANAGER(priv->mdev))
+ return false;
+ rep = rpriv->rep;
if (rep && rep->vport != FDB_UPLINK_VPORT)
return true;
@@ -893,6 +897,9 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ /* update HW stats in background for next time */
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d3a1dd20e41d..15d8ae28c040 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,7 +34,6 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
-#include <linux/bpf_trace.h>
#include <net/busy_poll.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
@@ -44,7 +43,9 @@
#include "en_rep.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/tls_rxtx.h"
#include "lib/clock.h"
+#include "en/xdp.h"
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
@@ -238,8 +239,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
return 0;
}
-static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
{
dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
}
@@ -276,10 +276,11 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
}
static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
- struct mlx5e_wqe_frag_info *frag)
+ struct mlx5e_wqe_frag_info *frag,
+ bool recycle)
{
if (frag->last_in_page)
- mlx5e_page_release(rq, frag->di, true);
+ mlx5e_page_release(rq, frag->di, recycle);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -307,25 +308,26 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
free_frags:
while (--i >= 0)
- mlx5e_put_rx_frag(rq, --frag);
+ mlx5e_put_rx_frag(rq, --frag, true);
return err;
}
static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
- struct mlx5e_wqe_frag_info *wi)
+ struct mlx5e_wqe_frag_info *wi,
+ bool recycle)
{
int i;
for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
- mlx5e_put_rx_frag(rq, wi);
+ mlx5e_put_rx_frag(rq, wi, recycle);
}
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, false);
}
static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
@@ -395,7 +397,8 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
}
}
-void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
+static void
+mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
{
const bool no_xdp_xmit =
bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
@@ -404,7 +407,7 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
- mlx5e_page_release(rq, &dma_info[i], true);
+ mlx5e_page_release(rq, &dma_info[i], recycle);
}
static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
@@ -487,7 +490,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
sq->pc += MLX5E_UMR_WQEBBS;
- mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
return 0;
@@ -504,8 +507,8 @@ err_unmap:
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
-
- mlx5e_free_rx_mpwqe(rq, wi);
+ /* Don't recycle, this function is called on rq/netdev close */
+ mlx5e_free_rx_mpwqe(rq, wi, false);
}
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
@@ -601,6 +604,8 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (!rq->mpwqe.umr_in_progress)
mlx5e_alloc_rx_mpwqe(rq, wq->head);
+ else
+ rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2;
return false;
}
@@ -795,6 +800,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct net_device *netdev = rq->netdev;
skb->mac_len = ETH_HLEN;
+
+#ifdef CONFIG_MLX5_EN_TLS
+ mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt);
+#endif
+
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
@@ -839,135 +849,6 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
-static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_tx_wqe *wqe;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
-
- wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
-}
-
-static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di,
- const struct xdp_buff *xdp)
-{
- struct mlx5e_xdpsq *sq = &rq->xdpsq;
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- struct mlx5_wqe_data_seg *dseg;
-
- ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
- dma_addr_t dma_addr = di->addr + data_offset;
- unsigned int dma_len = xdp->data_end - xdp->data;
-
- struct mlx5e_rq_stats *stats = rq->stats;
-
- prefetchw(wqe);
-
- if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) {
- stats->xdp_drop++;
- return false;
- }
-
- if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
- if (sq->db.doorbell) {
- /* SQ is full, ring doorbell */
- mlx5e_xmit_xdp_doorbell(sq);
- sq->db.doorbell = false;
- }
- stats->xdp_tx_full++;
- return false;
- }
-
- dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
-
- cseg->fm_ce_se = 0;
-
- dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
-
- /* copy the inline part if required */
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
- eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
- dma_len -= MLX5E_XDP_MIN_INLINE;
- dma_addr += MLX5E_XDP_MIN_INLINE;
- dseg++;
- }
-
- /* write the dma part */
- dseg->addr = cpu_to_be64(dma_addr);
- dseg->byte_count = cpu_to_be32(dma_len);
-
- cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
-
- /* move page to reference to sq responsibility,
- * and mark so it's not put back in page-cache.
- */
- __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
- sq->db.di[pi] = *di;
- sq->pc++;
-
- sq->db.doorbell = true;
-
- stats->xdp_tx++;
- return true;
-}
-
-/* returns true if packet was consumed by xdp */
-static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di,
- void *va, u16 *rx_headroom, u32 *len)
-{
- struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
- struct xdp_buff xdp;
- u32 act;
- int err;
-
- if (!prog)
- return false;
-
- xdp.data = va + *rx_headroom;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + *len;
- xdp.data_hard_start = va;
- xdp.rxq = &rq->xdp_rxq;
-
- act = bpf_prog_run_xdp(prog, &xdp);
- switch (act) {
- case XDP_PASS:
- *rx_headroom = xdp.data - xdp.data_hard_start;
- *len = xdp.data_end - xdp.data;
- return false;
- case XDP_TX:
- if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
- trace_xdp_exception(rq->netdev, prog, act);
- return true;
- case XDP_REDIRECT:
- /* When XDP enabled then page-refcnt==1 here */
- err = xdp_do_redirect(rq->netdev, &xdp, prog);
- if (!err) {
- __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
- rq->xdpsq.db.redirect_flush = true;
- mlx5e_page_dma_unmap(rq, di);
- }
- return true;
- default:
- bpf_warn_invalid_xdp_action(act);
- case XDP_ABORTED:
- trace_xdp_exception(rq->netdev, prog, act);
- case XDP_DROP:
- rq->stats->xdp_drop++;
- return true;
- }
-}
-
static inline
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
@@ -1105,7 +986,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
free_wqe:
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
@@ -1147,7 +1028,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
free_wqe:
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
@@ -1218,6 +1099,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
frag_size, DMA_FROM_DEVICE);
+ prefetchw(va); /* xdp_frame data area */
prefetch(data);
rcu_read_lock();
@@ -1261,7 +1143,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
}
if (unlikely(mpwrq_is_filler_cqe(cqe))) {
- rq->stats->mpwqe_filler++;
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->mpwqe_filler_cqes++;
+ stats->mpwqe_filler_strides += cstrides;
goto mpwrq_cqe_out;
}
@@ -1281,7 +1166,7 @@ mpwrq_cqe_out:
wq = &rq->mpwqe.wq;
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
- mlx5e_free_rx_mpwqe(rq, wi);
+ mlx5e_free_rx_mpwqe(rq, wi, true);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
@@ -1317,14 +1202,14 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
- if (xdpsq->db.doorbell) {
+ if (xdpsq->doorbell) {
mlx5e_xmit_xdp_doorbell(xdpsq);
- xdpsq->db.doorbell = false;
+ xdpsq->doorbell = false;
}
- if (xdpsq->db.redirect_flush) {
+ if (xdpsq->redirect_flush) {
xdp_do_flush_map();
- xdpsq->db.redirect_flush = false;
+ xdpsq->redirect_flush = false;
}
mlx5_cqwq_update_db_record(&cq->wq);
@@ -1335,78 +1220,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
return work_done;
}
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
-{
- struct mlx5e_xdpsq *sq;
- struct mlx5_cqe64 *cqe;
- struct mlx5e_rq *rq;
- u16 sqcc;
- int i;
-
- sq = container_of(cq, struct mlx5e_xdpsq, cq);
-
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
- return false;
-
- cqe = mlx5_cqwq_get_cqe(&cq->wq);
- if (!cqe)
- return false;
-
- rq = container_of(sq, struct mlx5e_rq, xdpsq);
-
- /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
- * otherwise a cq overrun may occur
- */
- sqcc = sq->cc;
-
- i = 0;
- do {
- u16 wqe_counter;
- bool last_wqe;
-
- mlx5_cqwq_pop(&cq->wq);
-
- wqe_counter = be16_to_cpu(cqe->wqe_counter);
-
- do {
- struct mlx5e_dma_info *di;
- u16 ci;
-
- last_wqe = (sqcc == wqe_counter);
-
- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
- di = &sq->db.di[ci];
-
- sqcc++;
- /* Recycle RX page */
- mlx5e_page_release(rq, di, true);
- } while (!last_wqe);
- } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
-
- mlx5_cqwq_update_db_record(&cq->wq);
-
- /* ensure cq space is freed before enabling more cqes */
- wmb();
-
- sq->cc = sqcc;
- return (i == MLX5E_TX_CQ_POLL_BUDGET);
-}
-
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
-{
- struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
- struct mlx5e_dma_info *di;
- u16 ci;
-
- while (sq->cc != sq->pc) {
- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
- di = &sq->db.di[ci];
- sq->cc++;
-
- mlx5e_page_release(rq, di, false);
- }
-}
-
#ifdef CONFIG_MLX5_CORE_IPOIB
#define MLX5_IB_GRH_DGID_OFFSET 24
@@ -1508,7 +1321,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
wq_free_wqe:
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
mlx5_wq_cyc_pop(wq);
}
@@ -1531,19 +1344,19 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (unlikely(!skb)) {
/* a DROP, save the page-reuse checks */
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
goto wq_cyc_pop;
}
- skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb);
+ skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
if (unlikely(!skb)) {
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
goto wq_cyc_pop;
}
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 4d316cc9b008..35ded91203f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -74,7 +74,7 @@ static int mlx5e_test_link_state(struct mlx5e_priv *priv)
if (!netif_carrier_ok(priv->netdev))
return 1;
- port_state = mlx5_query_vport_state(priv->mdev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
+ port_state = mlx5_query_vport_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0);
return port_state == VPORT_STATE_UP ? 0 : 1;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1646859974ce..6839481f7697 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -44,6 +44,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
@@ -58,8 +59,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
@@ -67,10 +71,17 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
@@ -80,6 +91,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
};
@@ -118,6 +134,8 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
+ struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
+ struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
int j;
@@ -131,11 +149,15 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
- s->rx_xdp_drop += rq_stats->xdp_drop;
- s->rx_xdp_tx += rq_stats->xdp_tx;
- s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_redirect += rq_stats->xdp_redirect;
+ s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
+ s->rx_xdp_tx_full += xdpsq_stats->full;
+ s->rx_xdp_tx_err += xdpsq_stats->err;
+ s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
s->rx_wqe_err += rq_stats->wqe_err;
- s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+ s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
+ s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@@ -145,7 +167,17 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_empty += rq_stats->cache_empty;
s->rx_cache_busy += rq_stats->cache_busy;
s->rx_cache_waive += rq_stats->cache_waive;
- s->ch_eq_rearm += ch_stats->eq_rearm;
+ s->rx_congst_umr += rq_stats->congst_umr;
+ s->ch_events += ch_stats->events;
+ s->ch_poll += ch_stats->poll;
+ s->ch_arm += ch_stats->arm;
+ s->ch_aff_change += ch_stats->aff_change;
+ s->ch_eq_rearm += ch_stats->eq_rearm;
+ /* xdp redirect */
+ s->tx_xdp_xmit += xdpsq_red_stats->xmit;
+ s->tx_xdp_full += xdpsq_red_stats->full;
+ s->tx_xdp_err += xdpsq_red_stats->err;
+ s->tx_xdp_cqes += xdpsq_red_stats->cqes;
for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
@@ -157,8 +189,10 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
+ s->tx_nop += sq_stats->nop;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
+ s->tx_udp_seg_rem += sq_stats->udp_seg_rem;
s->tx_queue_dropped += sq_stats->dropped;
s->tx_cqe_err += sq_stats->cqe_err;
s->tx_recover += sq_stats->recover;
@@ -170,6 +204,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
#endif
+ s->tx_cqes += sq_stats->cqes;
}
}
@@ -813,7 +848,7 @@ static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
-static int mlx5e_grp_per_prio_traffic_get_num_stats(struct mlx5e_priv *priv)
+static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
{
return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
}
@@ -971,7 +1006,7 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
{
- return mlx5e_grp_per_prio_traffic_get_num_stats(priv) +
+ return mlx5e_grp_per_prio_traffic_get_num_stats() +
mlx5e_grp_per_prio_pfc_get_num_stats(priv);
}
@@ -1106,13 +1141,13 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
@@ -1122,6 +1157,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
};
static const struct counter_desc sq_stats_desc[] = {
@@ -1140,16 +1176,37 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
};
+static const struct counter_desc rq_xdpsq_stats_desc[] = {
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
+};
+
+static const struct counter_desc xdpsq_stats_desc[] = {
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
+};
+
static const struct counter_desc ch_stats_desc[] = {
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
};
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
+#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
@@ -1158,7 +1215,9 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
- (NUM_SQ_STATS * max_nch * priv->max_opened_tc);
+ (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
+ (NUM_RQ_XDPSQ_STATS * max_nch) +
+ (NUM_XDPSQ_STATS * max_nch);
}
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
@@ -1172,9 +1231,14 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ch_stats_desc[j].format, i);
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_stats_desc[j].format, i);
+ for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_xdpsq_stats_desc[j].format, i);
+ }
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
@@ -1183,6 +1247,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
sq_stats_desc[j].format,
priv->channel_tc2txq[i][tc]);
+ for (i = 0; i < max_nch; i++)
+ for (j = 0; j < NUM_XDPSQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ xdpsq_stats_desc[j].format, i);
+
return idx;
}
@@ -1198,11 +1267,16 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
ch_stats_desc, j);
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
rq_stats_desc, j);
+ for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
+ rq_xdpsq_stats_desc, j);
+ }
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
@@ -1211,6 +1285,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
sq_stats_desc, j);
+ for (i = 0; i < max_nch; i++)
+ for (j = 0; j < NUM_XDPSQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
+ xdpsq_stats_desc, j);
+
return idx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 643153bb3607..a4c035aedd46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -44,6 +44,8 @@
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
struct counter_desc {
@@ -61,6 +63,7 @@ struct mlx5e_sw_stats {
u64 tx_tso_inner_packets;
u64 tx_tso_inner_bytes;
u64 tx_added_vlan_packets;
+ u64 tx_nop;
u64 rx_lro_packets;
u64 rx_lro_bytes;
u64 rx_removed_vlan_packets;
@@ -69,8 +72,11 @@ struct mlx5e_sw_stats {
u64 rx_csum_complete;
u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop;
- u64 rx_xdp_tx;
+ u64 rx_xdp_redirect;
+ u64 rx_xdp_tx_xmit;
u64 rx_xdp_tx_full;
+ u64 rx_xdp_tx_err;
+ u64 rx_xdp_tx_cqe;
u64 tx_csum_none;
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
@@ -78,10 +84,17 @@ struct mlx5e_sw_stats {
u64 tx_queue_dropped;
u64 tx_xmit_more;
u64 tx_recover;
+ u64 tx_cqes;
u64 tx_queue_wake;
+ u64 tx_udp_seg_rem;
u64 tx_cqe_err;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_full;
+ u64 tx_xdp_err;
+ u64 tx_xdp_cqes;
u64 rx_wqe_err;
- u64 rx_mpwqe_filler;
+ u64 rx_mpwqe_filler_cqes;
+ u64 rx_mpwqe_filler_strides;
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
@@ -91,6 +104,11 @@ struct mlx5e_sw_stats {
u64 rx_cache_empty;
u64 rx_cache_busy;
u64 rx_cache_waive;
+ u64 rx_congst_umr;
+ u64 ch_events;
+ u64 ch_poll;
+ u64 ch_arm;
+ u64 ch_aff_change;
u64 ch_eq_rearm;
#ifdef CONFIG_MLX5_EN_TLS
@@ -168,10 +186,10 @@ struct mlx5e_rq_stats {
u64 lro_bytes;
u64 removed_vlan_packets;
u64 xdp_drop;
- u64 xdp_tx;
- u64 xdp_tx_full;
+ u64 xdp_redirect;
u64 wqe_err;
- u64 mpwqe_filler;
+ u64 mpwqe_filler_cqes;
+ u64 mpwqe_filler_strides;
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
@@ -181,6 +199,7 @@ struct mlx5e_rq_stats {
u64 cache_empty;
u64 cache_busy;
u64 cache_waive;
+ u64 congst_umr;
};
struct mlx5e_sq_stats {
@@ -196,6 +215,7 @@ struct mlx5e_sq_stats {
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
+ u64 udp_seg_rem;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_ooo;
u64 tls_resync_bytes;
@@ -206,11 +226,24 @@ struct mlx5e_sq_stats {
u64 dropped;
u64 recover;
/* dirtied @completion */
- u64 wake ____cacheline_aligned_in_smp;
+ u64 cqes ____cacheline_aligned_in_smp;
+ u64 wake;
u64 cqe_err;
};
+struct mlx5e_xdpsq_stats {
+ u64 xmit;
+ u64 full;
+ u64 err;
+ /* dirtied @completion */
+ u64 cqes ____cacheline_aligned_in_smp;
+};
+
struct mlx5e_ch_stats {
+ u64 events;
+ u64 poll;
+ u64 arm;
+ u64 aff_change;
u64 eq_rearm;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 0edf4751a8ba..9131a1376e7d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -50,7 +50,7 @@
#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
-#include "vxlan.h"
+#include "lib/vxlan.h"
#include "fs_core.h"
#include "en/port.h"
@@ -1032,10 +1032,8 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
* dst ip pair
*/
n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
- if (!n) {
- WARN(1, "The neighbour already freed\n");
+ if (!n)
return;
- }
neigh_event_send(n, NULL);
neigh_release(n);
@@ -1126,16 +1124,12 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask);
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- struct net_device *up_dev = uplink_rpriv->netdev;
- struct mlx5e_priv *up_priv = netdev_priv(up_dev);
/* Full udp dst port must be given */
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
goto vxlan_match_offload_err;
- if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f);
else {
@@ -1213,6 +1207,26 @@ vxlan_match_offload_err:
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_dissector_key_ip *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP,
+ f->key);
+ struct flow_dissector_key_ip *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP,
+ f->mask);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+ }
+
/* Enforce DMAC when offloading incoming tunneled flows.
* Flow counters require a match on the DMAC.
*/
@@ -1237,6 +1251,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
+ void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
u16 addr_type = 0;
u8 ip_proto = 0;
@@ -1247,6 +1265,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
@@ -1256,7 +1275,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_TCP) |
- BIT(FLOW_DISSECTOR_KEY_IP))) {
+ BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
@@ -1327,9 +1347,18 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
- if (mask->vlan_id || mask->vlan_priority) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+ if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
+ if (key->vlan_tpid == htons(ETH_P_8021AD)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ svlan_tag, 1);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ cvlan_tag, 1);
+ }
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
@@ -1341,6 +1370,41 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CVLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CVLAN,
+ f->mask);
+ if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
+ if (key->vlan_tpid == htons(ETH_P_8021AD)) {
+ MLX5_SET(fte_match_set_misc, misc_c,
+ outer_second_svlan_tag, 1);
+ MLX5_SET(fte_match_set_misc, misc_v,
+ outer_second_svlan_tag, 1);
+ } else {
+ MLX5_SET(fte_match_set_misc, misc_c,
+ outer_second_cvlan_tag, 1);
+ MLX5_SET(fte_match_set_misc, misc_v,
+ outer_second_cvlan_tag, 1);
+ }
+
+ MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
+ mask->vlan_id);
+ MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
+ key->vlan_id);
+ MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
+ mask->vlan_priority);
+ MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
+ key->vlan_priority);
+
+ *match_level = MLX5_MATCH_L2;
+ }
+ }
+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key =
skb_flow_dissector_target(f->dissector,
@@ -1957,6 +2021,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
else
actions = flow->nic_attr->action;
+ if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
+ !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
+ return false;
+
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
return modify_header_match_supported(&parse_attr->spec, exts);
@@ -1966,15 +2034,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
- u16 func_id, peer_id;
+ u64 fsystem_guid, psystem_guid;
fmdev = priv->mdev;
pmdev = peer_priv->mdev;
- func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
- peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+ mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
+ mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
- return (func_id == peer_id);
+ return (fsystem_guid == psystem_guid);
}
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
@@ -2078,7 +2146,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct flowi4 *fl4,
struct neighbour **out_n,
- int *out_ttl)
+ u8 *out_ttl)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *uplink_rpriv;
@@ -2102,7 +2170,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
else
*out_dev = rt->dst.dev;
- *out_ttl = ip4_dst_hoplimit(&rt->dst);
+ if (!(*out_ttl))
+ *out_ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
ip_rt_put(rt);
if (!n)
@@ -2131,7 +2200,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct flowi6 *fl6,
struct neighbour **out_n,
- int *out_ttl)
+ u8 *out_ttl)
{
struct neighbour *n = NULL;
struct dst_entry *dst;
@@ -2146,7 +2215,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
if (ret < 0)
return ret;
- *out_ttl = ip6_dst_hoplimit(dst);
+ if (!(*out_ttl))
+ *out_ttl = ip6_dst_hoplimit(dst);
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */
@@ -2170,7 +2240,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
static void gen_vxlan_header_ipv4(struct net_device *out_dev,
char buf[], int encap_size,
unsigned char h_dest[ETH_ALEN],
- int ttl,
+ u8 tos, u8 ttl,
__be32 daddr,
__be32 saddr,
__be16 udp_dst_port,
@@ -2190,6 +2260,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev,
ip->daddr = daddr;
ip->saddr = saddr;
+ ip->tos = tos;
ip->ttl = ttl;
ip->protocol = IPPROTO_UDP;
ip->version = 0x4;
@@ -2203,7 +2274,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev,
static void gen_vxlan_header_ipv6(struct net_device *out_dev,
char buf[], int encap_size,
unsigned char h_dest[ETH_ALEN],
- int ttl,
+ u8 tos, u8 ttl,
struct in6_addr *daddr,
struct in6_addr *saddr,
__be16 udp_dst_port,
@@ -2220,7 +2291,7 @@ static void gen_vxlan_header_ipv6(struct net_device *out_dev,
ether_addr_copy(eth->h_source, out_dev->dev_addr);
eth->h_proto = htons(ETH_P_IPV6);
- ip6_flow_hdr(ip6h, 0, 0);
+ ip6_flow_hdr(ip6h, tos, 0);
/* the HW fills up ipv6 payload len */
ip6h->nexthdr = IPPROTO_UDP;
ip6h->hop_limit = ttl;
@@ -2242,9 +2313,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi4 fl4 = {};
+ u8 nud_state, tos, ttl;
char *encap_header;
- int ttl, err;
- u8 nud_state;
+ int err;
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -2265,6 +2336,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
err = -EOPNOTSUPP;
goto free_encap;
}
+
+ tos = tun_key->tos;
+ ttl = tun_key->ttl;
+
fl4.flowi4_tos = tun_key->tos;
fl4.daddr = tun_key->u.ipv4.dst;
fl4.saddr = tun_key->u.ipv4.src;
@@ -2299,7 +2374,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
gen_vxlan_header_ipv4(out_dev, encap_header,
- ipv4_encap_size, e->h_dest, ttl,
+ ipv4_encap_size, e->h_dest, tos, ttl,
fl4.daddr,
fl4.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id));
@@ -2347,9 +2422,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
+ u8 nud_state, tos, ttl;
char *encap_header;
- int err, ttl = 0;
- u8 nud_state;
+ int err;
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -2371,6 +2446,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
goto free_encap;
}
+ tos = tun_key->tos;
+ ttl = tun_key->ttl;
+
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
fl6.daddr = tun_key->u.ipv6.dst;
fl6.saddr = tun_key->u.ipv6.src;
@@ -2405,7 +2483,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
gen_vxlan_header_ipv6(out_dev, encap_header,
- ipv6_encap_size, e->h_dest, ttl,
+ ipv6_encap_size, e->h_dest, tos, ttl,
&fl6.daddr,
&fl6.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id));
@@ -2451,11 +2529,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
- REP_ETH);
- struct net_device *up_dev = uplink_rpriv->netdev;
unsigned short family = ip_tunnel_info_af(tun_info);
- struct mlx5e_priv *up_priv = netdev_priv(up_dev);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct ip_tunnel_key *key = &tun_info->key;
struct mlx5e_encap_entry *e;
@@ -2475,7 +2549,7 @@ vxlan_encap_offload_err:
return -EOPNOTSUPP;
}
- if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else {
@@ -2531,6 +2605,56 @@ out_err:
return err;
}
+static int parse_tc_vlan_action(struct mlx5e_priv *priv,
+ const struct tc_action *a,
+ struct mlx5_esw_flow_attr *attr,
+ u32 *action)
+{
+ u8 vlan_idx = attr->total_vlan;
+
+ if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
+ return -EOPNOTSUPP;
+
+ if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
+ if (vlan_idx) {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
+ MLX5_FS_VLAN_DEPTH))
+ return -EOPNOTSUPP;
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
+ } else {
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ }
+ } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
+ attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
+ attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
+ attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
+ if (!attr->vlan_proto[vlan_idx])
+ attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
+
+ if (vlan_idx) {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
+ MLX5_FS_VLAN_DEPTH))
+ return -EOPNOTSUPP;
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ } else {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
+ (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
+ tcf_vlan_push_prio(a)))
+ return -EOPNOTSUPP;
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ }
+ } else { /* action is TCA_VLAN_ACT_MODIFY */
+ return -EOPNOTSUPP;
+ }
+
+ attr->total_vlan = vlan_idx + 1;
+
+ return 0;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
@@ -2542,6 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
LIST_HEAD(actions);
bool encap = false;
u32 action = 0;
+ int err;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
@@ -2558,8 +2683,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_pedit(a)) {
- int err;
-
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
parse_attr);
if (err)
@@ -2626,23 +2749,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_vlan(a)) {
- if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
- action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
- action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- attr->vlan_vid = tcf_vlan_push_vid(a);
- if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
- attr->vlan_prio = tcf_vlan_push_prio(a);
- attr->vlan_proto = tcf_vlan_push_proto(a);
- if (!attr->vlan_proto)
- attr->vlan_proto = htons(ETH_P_8021Q);
- } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
- tcf_vlan_push_prio(a)) {
- return -EOPNOTSUPP;
- }
- } else { /* action is TCA_VLAN_ACT_MODIFY */
- return -EOPNOTSUPP;
- }
+ err = parse_tc_vlan_action(priv, a, attr, &action);
+
+ if (err)
+ return err;
+
attr->mirror_count = attr->out_count;
continue;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f29deb44bf3b..ae73ea992845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -66,22 +66,21 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev,
}
}
+static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
+{
+ return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
+}
+
static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
dma_addr_t addr,
u32 size,
enum mlx5e_dma_map_type map_type)
{
- u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
-
- sq->db.dma_fifo[i].addr = addr;
- sq->db.dma_fifo[i].size = size;
- sq->db.dma_fifo[i].type = map_type;
- sq->dma_fifo_pc++;
-}
+ struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
-static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
-{
- return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
+ dma->addr = addr;
+ dma->size = size;
+ dma->type = map_type;
}
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
@@ -111,10 +110,11 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
#endif
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int channel_ix = fallback(dev, skb);
+ int channel_ix = fallback(dev, skb, NULL);
u16 num_channels;
int up = 0;
@@ -228,7 +228,10 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
+ else
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
stats->tso_packets++;
stats->tso_bytes += skb->len - ihs;
}
@@ -443,12 +446,11 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
-#ifdef CONFIG_MLX5_ACCEL
/* might send skbs and update wqe and pi */
skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
if (unlikely(!skb))
return NETDEV_TX_OK;
-#endif
+
return mlx5e_sq_xmit(sq, skb, wqe, pi);
}
@@ -466,6 +468,7 @@ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
+ struct mlx5e_sq_stats *stats;
struct mlx5e_txqsq *sq;
struct mlx5_cqe64 *cqe;
u32 dma_fifo_cc;
@@ -483,6 +486,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (!cqe)
return false;
+ stats = sq->stats;
+
npkts = 0;
nbytes = 0;
@@ -511,7 +516,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
queue_work(cq->channel->priv->wq,
&sq->recover.recover_work);
}
- sq->stats->cqe_err++;
+ stats->cqe_err++;
}
do {
@@ -556,6 +561,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+ stats->cqes += i;
+
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
@@ -571,7 +578,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
MLX5E_SQ_STOP_ROOM) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
- sq->stats->wake++;
+ stats->wake++;
}
return (i == MLX5E_TX_CQ_POLL_BUDGET);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 1b17f682693b..85d517360157 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -32,6 +32,7 @@
#include <linux/irq.h>
#include "en.h"
+#include "en/xdp.h"
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
@@ -74,13 +75,18 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
+ struct mlx5e_ch_stats *ch_stats = c->stats;
bool busy = false;
int work_done = 0;
int i;
+ ch_stats->poll++;
+
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
+ busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
+
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
@@ -94,6 +100,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
return budget;
+ ch_stats->aff_change++;
if (budget && work_done == budget)
work_done--;
}
@@ -101,6 +108,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (unlikely(!napi_complete_done(napi, work_done)))
return work_done;
+ ch_stats->arm++;
+
for (i = 0; i < c->num_tc; i++) {
mlx5e_handle_tx_dim(&c->sq[i]);
mlx5e_cq_arm(&c->sq[i].cq);
@@ -110,6 +119,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
+ mlx5e_cq_arm(&c->xdpsq.cq);
return work_done;
}
@@ -118,8 +128,9 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
- cq->event_ctr++;
napi_schedule(cq->napi);
+ cq->event_ctr++;
+ cq->channel->stats->events++;
}
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 406c23862f5f..48864f4988a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -40,6 +40,8 @@
#include "mlx5_core.h"
#include "fpga/core.h"
#include "eswitch.h"
+#include "lib/clock.h"
+#include "diag/fw_tracer.h"
enum {
MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
@@ -168,6 +170,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
case MLX5_EVENT_TYPE_GENERAL_EVENT:
return "MLX5_EVENT_TYPE_GENERAL_EVENT";
+ case MLX5_EVENT_TYPE_DEVICE_TRACER:
+ return "MLX5_EVENT_TYPE_DEVICE_TRACER";
default:
return "Unrecognized event";
}
@@ -576,6 +580,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
case MLX5_EVENT_TYPE_GENERAL_EVENT:
general_event_handler(dev, eqe);
break;
+
+ case MLX5_EVENT_TYPE_DEVICE_TRACER:
+ mlx5_fw_tracer_event(dev, eqe);
+ break;
+
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@@ -853,6 +862,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, temp_warn_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
+ if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f63dfbcd29fe..2b252cde5cc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -246,7 +246,7 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
}
-static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
+static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
@@ -1469,7 +1469,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
return;
mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num,
vport->info.link_state);
mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
@@ -1582,9 +1582,9 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
esw_vport_disable_qos(esw, vport_num);
if (vport_num && esw->mode == SRIOV_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num,
- MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+ MLX5_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
esw_vport_destroy_drop_counters(vport);
@@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
}
/* Public E-Switch API */
-#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev))
+#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
+
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{
int err;
int i, enabled_events;
- if (!ESW_ALLOWED(esw))
- return 0;
-
- if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
+ if (!ESW_ALLOWED(esw) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
return -EOPNOTSUPP;
@@ -1620,7 +1618,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
esw->mode = mode;
if (mode == SRIOV_LEGACY) {
- err = esw_create_legacy_fdb_table(esw, nvfs + 1);
+ err = esw_create_legacy_fdb_table(esw);
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
@@ -1698,7 +1696,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
int vport_num;
int err;
- if (!MLX5_VPORT_MANAGER(dev))
+ if (!MLX5_ESWITCH_MANAGER(dev))
return 0;
esw_info(dev,
@@ -1738,7 +1736,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
struct mlx5_vport *vport = &esw->vports[vport_num];
vport->vport = vport_num;
- vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
@@ -1767,7 +1765,7 @@ abort:
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
{
- if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
+ if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
return;
esw_info(esw->dev, "cleanup\n");
@@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u64 node_guid;
int err = 0;
- if (!ESW_ALLOWED(esw))
+ if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
return -EPERM;
if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
return -EINVAL;
@@ -1862,7 +1860,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
evport = &esw->vports[vport];
err = mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport, link_state);
if (err) {
mlx5_core_warn(esw->dev,
@@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
{
struct mlx5_vport *evport;
- if (!ESW_ALLOWED(esw))
+ if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
@@ -2218,6 +2216,6 @@ free_out:
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
{
- return esw->mode;
+ return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index b174da2884c5..c17bfcab517c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -38,6 +38,7 @@
#include <net/devlink.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/eswitch.h>
+#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
#ifdef CONFIG_MLX5_ESWITCH
@@ -256,9 +257,10 @@ struct mlx5_esw_flow_attr {
int out_count;
int action;
- __be16 vlan_proto;
- u16 vlan_vid;
- u8 vlan_prio;
+ __be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
+ u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
+ u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
+ u8 total_vlan;
bool vlan_handled;
u32 encap_id;
u32 mod_hdr_id;
@@ -282,10 +284,17 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos, u8 set_flags);
-static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev)
+static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
+ u8 vlan_depth)
{
- return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
+ bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
+
+ if (vlan_depth == 1)
+ return ret;
+
+ return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
}
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index cecd201f0b73..f72b5c9dcfe9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -66,13 +66,18 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
- if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
+ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
- flow_act.vlan.ethtype = ntohs(attr->vlan_proto);
- flow_act.vlan.vid = attr->vlan_vid;
- flow_act.vlan.prio = attr->vlan_prio;
+ flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
+ flow_act.vlan[0].vid = attr->vlan_vid[0];
+ flow_act.vlan[0].prio = attr->vlan_prio[0];
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
+ flow_act.vlan[1].vid = attr->vlan_vid[1];
+ flow_act.vlan[1].prio = attr->vlan_prio[1];
+ }
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
@@ -266,7 +271,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
/* protects against (1) setting rules with different vlans to push and
* (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
*/
- if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid))
+ if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
goto out_notsupp;
return 0;
@@ -284,7 +289,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
int err = 0;
/* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev))
+ if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
@@ -324,11 +329,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (vport->vlan_refcount)
goto skip_set_push;
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0,
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
SET_VLAN_INSERT | SET_VLAN_STRIP);
if (err)
goto out;
- vport->vlan = attr->vlan_vid;
+ vport->vlan = attr->vlan_vid[0];
skip_set_push:
vport->vlan_refcount++;
}
@@ -347,7 +352,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int err = 0;
/* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev))
+ if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
if (!attr->vlan_handled)
@@ -1079,8 +1084,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return -EOPNOTSUPP;
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
- return -EOPNOTSUPP;
+ if(!MLX5_ESWITCH_MANAGER(dev))
+ return -EPERM;
if (dev->priv.eswitch->mode == SRIOV_NONE)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index c9736238604a..5cf5f2a9d51f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -129,6 +129,7 @@ static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
void *ptr)
{
+ unsigned long flags;
int ret;
/* TLS metadata format is 1 byte for syndrome followed
@@ -139,9 +140,9 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
idr_preload(GFP_KERNEL);
- spin_lock_irq(idr_spinlock);
+ spin_lock_irqsave(idr_spinlock, flags);
ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
- spin_unlock_irq(idr_spinlock);
+ spin_unlock_irqrestore(idr_spinlock, flags);
idr_preload_end();
return ret;
@@ -157,6 +158,13 @@ static void mlx5_fpga_tls_release_swid(struct idr *idr,
spin_unlock_irqrestore(idr_spinlock, flags);
}
+static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_dma_buf *buf, u8 status)
+{
+ kfree(buf);
+}
+
struct mlx5_teardown_stream_context {
struct mlx5_fpga_tls_command_context cmd;
u32 swid;
@@ -178,9 +186,13 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
mlx5_fpga_err(fdev,
"Teardown stream failed with syndrome = %d",
syndrome);
- else
+ else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
- &fdev->tls->idr_spinlock,
+ &fdev->tls->tx_idr_spinlock,
+ ctx->swid);
+ else
+ mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
+ &fdev->tls->rx_idr_spinlock,
ctx->swid);
}
mlx5_fpga_tls_put_command_ctx(cmd);
@@ -196,6 +208,40 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
MLX5_GET(tls_flow, flow, direction_sx));
}
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn)
+{
+ struct mlx5_fpga_dma_buf *buf;
+ int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
+ void *flow;
+ void *cmd;
+ int ret;
+
+ buf = kzalloc(size, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ cmd = (buf + 1);
+
+ rcu_read_lock();
+ flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
+ rcu_read_unlock();
+ mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+
+ MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
+ MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
+ MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
+ MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
+
+ buf->sg[0].data = cmd;
+ buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
+ buf->complete = mlx_tls_kfree_complete;
+
+ ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+
+ return ret;
+}
+
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
void *flow, u32 swid, gfp_t flags)
{
@@ -223,14 +269,18 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
mlx5_fpga_tls_teardown_completion);
}
-void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags)
+void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ gfp_t flags, bool direction_sx)
{
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
void *flow;
rcu_read_lock();
- flow = idr_find(&tls->tx_idr, swid);
+ if (direction_sx)
+ flow = idr_find(&tls->tx_idr, swid);
+ else
+ flow = idr_find(&tls->rx_idr, swid);
+
rcu_read_unlock();
if (!flow) {
@@ -289,9 +339,11 @@ mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
* the command context because we might not have received
* the tx completion yet.
*/
- mlx5_fpga_tls_del_tx_flow(fdev->mdev,
- MLX5_GET(tls_cmd, tls_cmd, swid),
- GFP_ATOMIC);
+ mlx5_fpga_tls_del_flow(fdev->mdev,
+ MLX5_GET(tls_cmd, tls_cmd, swid),
+ GFP_ATOMIC,
+ MLX5_GET(tls_cmd, tls_cmd,
+ direction_sx));
}
mlx5_fpga_tls_put_command_ctx(cmd);
@@ -415,8 +467,7 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
if (err)
goto error;
- if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 |
- MLX5_ACCEL_TLS_AES_GCM128))) {
+ if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
err = -ENOTSUPP;
goto error;
}
@@ -438,7 +489,9 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
INIT_LIST_HEAD(&tls->pending_cmds);
idr_init(&tls->tx_idr);
- spin_lock_init(&tls->idr_spinlock);
+ idr_init(&tls->rx_idr);
+ spin_lock_init(&tls->tx_idr_spinlock);
+ spin_lock_init(&tls->rx_idr_spinlock);
fdev->tls = tls;
return 0;
@@ -500,9 +553,9 @@ static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
return 0;
}
-static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info, u32 swid,
- u32 tcp_sn)
+static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 swid, u32 tcp_sn)
{
u32 caps = mlx5_fpga_tls_device_caps(mdev);
struct mlx5_setup_stream_context *ctx;
@@ -533,30 +586,42 @@ out:
return ret;
}
-int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid)
+int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx)
{
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
int ret = -ENOMEM;
u32 swid;
- ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow);
+ if (direction_sx)
+ ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
+ &tls->tx_idr_spinlock, flow);
+ else
+ ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
+ &tls->rx_idr_spinlock, flow);
+
if (ret < 0)
return ret;
swid = ret;
- MLX5_SET(tls_flow, flow, direction_sx, 1);
+ MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
- ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
- start_offload_tcp_sn);
+ ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
+ start_offload_tcp_sn);
if (ret && ret != -EINTR)
goto free_swid;
*p_swid = swid;
return 0;
free_swid:
- mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid);
+ if (direction_sx)
+ mlx5_fpga_tls_release_swid(&tls->tx_idr,
+ &tls->tx_idr_spinlock, swid);
+ else
+ mlx5_fpga_tls_release_swid(&tls->rx_idr,
+ &tls->rx_idr_spinlock, swid);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
index 800a214e4e49..3b2e37bf76fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
@@ -46,15 +46,18 @@ struct mlx5_fpga_tls {
struct mlx5_fpga_conn *conn;
struct idr tx_idr;
- spinlock_t idr_spinlock; /* protects the IDR */
+ struct idr rx_idr;
+ spinlock_t tx_idr_spinlock; /* protects the IDR */
+ spinlock_t rx_idr_spinlock; /* protects the IDR */
};
-int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid);
+int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx);
-void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags);
+void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ gfp_t flags, bool direction_sx);
bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
@@ -65,4 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
return mdev->fpga->tls->caps;
}
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn);
+
#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 5a00deff5457..8e01f818021b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -349,9 +349,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
- MLX5_SET(vlan, vlan, ethtype, fte->action.vlan.ethtype);
- MLX5_SET(vlan, vlan, vid, fte->action.vlan.vid);
- MLX5_SET(vlan, vlan, prio, fte->action.vlan.prio);
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
+
+ vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
+
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
@@ -362,18 +368,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
- unsigned int id;
+ unsigned int id, type = dst->dest_attr.type;
- if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
- MLX5_SET(dest_format_struct, in_dests, destination_type,
- dst->dest_attr.type);
- if (dst->dest_attr.type ==
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
+ switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ id = dst->dest_attr.ft_num;
+ type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = dst->dest_attr.ft->id;
- } else if (dst->dest_attr.type ==
- MLX5_FLOW_DESTINATION_TYPE_VPORT) {
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
id = dst->dest_attr.vport.num;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
@@ -381,9 +389,13 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id);
- } else {
+ break;
+ default:
id = dst->dest_attr.tir_num;
}
+
+ MLX5_SET(dest_format_struct, in_dests, destination_type,
+ type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
list_size++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 49a75d31185e..f418541af7cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -32,6 +32,7 @@
#include <linux/mutex.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
#include "fs_core.h"
@@ -309,89 +310,17 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
return NULL;
}
-static bool check_last_reserved(const u32 *match_criteria)
-{
- char *match_criteria_reserved =
- MLX5_ADDR_OF(fte_match_param, match_criteria, MLX5_FTE_MATCH_PARAM_RESERVED);
-
- return !match_criteria_reserved[0] &&
- !memcmp(match_criteria_reserved, match_criteria_reserved + 1,
- MLX5_FLD_SZ_BYTES(fte_match_param,
- MLX5_FTE_MATCH_PARAM_RESERVED) - 1);
-}
-
-static bool check_valid_mask(u8 match_criteria_enable, const u32 *match_criteria)
-{
- if (match_criteria_enable & ~(
- (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) |
- (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) |
- (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) |
- (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2)))
- return false;
-
- if (!(match_criteria_enable &
- 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS)) {
- char *fg_type_mask = MLX5_ADDR_OF(fte_match_param,
- match_criteria, outer_headers);
-
- if (fg_type_mask[0] ||
- memcmp(fg_type_mask, fg_type_mask + 1,
- MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4) - 1))
- return false;
- }
-
- if (!(match_criteria_enable &
- 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS)) {
- char *fg_type_mask = MLX5_ADDR_OF(fte_match_param,
- match_criteria, misc_parameters);
-
- if (fg_type_mask[0] ||
- memcmp(fg_type_mask, fg_type_mask + 1,
- MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
- return false;
- }
-
- if (!(match_criteria_enable &
- 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS)) {
- char *fg_type_mask = MLX5_ADDR_OF(fte_match_param,
- match_criteria, inner_headers);
-
- if (fg_type_mask[0] ||
- memcmp(fg_type_mask, fg_type_mask + 1,
- MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4) - 1))
- return false;
- }
-
- if (!(match_criteria_enable &
- 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2)) {
- char *fg_type_mask = MLX5_ADDR_OF(fte_match_param,
- match_criteria, misc_parameters_2);
-
- if (fg_type_mask[0] ||
- memcmp(fg_type_mask, fg_type_mask + 1,
- MLX5_ST_SZ_BYTES(fte_match_set_misc2) - 1))
- return false;
- }
-
- return check_last_reserved(match_criteria);
-}
-
static bool check_valid_spec(const struct mlx5_flow_spec *spec)
{
int i;
- if (!check_valid_mask(spec->match_criteria_enable, spec->match_criteria)) {
- pr_warn("mlx5_core: Match criteria given mismatches match_criteria_enable\n");
- return false;
- }
-
for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
if (spec->match_value[i] & ~spec->match_criteria[i]) {
pr_warn("mlx5_core: match_value differs from match_criteria\n");
return false;
}
- return check_last_reserved(spec->match_value);
+ return true;
}
static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
@@ -1158,9 +1087,6 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg;
int err;
- if (!check_valid_mask(match_criteria_enable, match_criteria))
- return ERR_PTR(-EINVAL);
-
if (ft->autogroup.active)
return ERR_PTR(-EPERM);
@@ -1431,7 +1357,9 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
- d1->tir_num == d2->tir_num))
+ d1->tir_num == d2->tir_num) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
+ d1->ft_num == d2->ft_num))
return true;
}
@@ -1464,7 +1392,9 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
MLX5_FLOW_CONTEXT_ACTION_DECAP |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
- MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
return true;
return false;
@@ -1823,7 +1753,7 @@ search_again_locked:
g = alloc_auto_flow_group(ft, spec);
if (IS_ERR(g)) {
- rule = (void *)g;
+ rule = ERR_CAST(g);
up_write_ref_node(&ft->node);
return rule;
}
@@ -1873,7 +1803,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
- int dest_num)
+ int num_dest)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_flow_destination gen_dest = {};
@@ -1886,7 +1816,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft))
return ERR_PTR(-EOPNOTSUPP);
- if (dest)
+ if (num_dest)
return ERR_PTR(-EINVAL);
mutex_lock(&root->chain_lock);
next_ft = find_next_chained_ft(prio);
@@ -1894,7 +1824,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
gen_dest.ft = next_ft;
dest = &gen_dest;
- dest_num = 1;
+ num_dest = 1;
flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else {
mutex_unlock(&root->chain_lock);
@@ -1902,7 +1832,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
}
}
- handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num);
+ handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!IS_ERR_OR_NULL(handle) &&
@@ -2652,7 +2582,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+ if (MLX5_ESWITCH_MANAGER(dev)) {
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
err = init_fdb_root_ns(steering);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index afd9f4fa22f4..41ad24f0de2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -32,6 +32,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
+#include <linux/mlx5/eswitch.h>
#include <linux/module.h>
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
@@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
}
if (MLX5_CAP_GEN(dev, vport_group_manager) &&
- MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+ MLX5_ESWITCH_MANAGER(dev)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
if (err)
return err;
}
- if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+ if (MLX5_ESWITCH_MANAGER(dev)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index af3bb2f7a504..e3797a44e074 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -76,6 +76,7 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ u16 max_mtu;
/* priv init */
priv->mdev = mdev;
@@ -84,6 +85,9 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
priv->ppriv = ppriv;
mutex_init(&priv->state_lock);
+ mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+ netdev->mtu = max_mtu;
+
mlx5e_build_nic_params(mdev, &priv->channels.params,
profile->max_nch(mdev), netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
@@ -580,6 +584,22 @@ static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
return 0;
}
+static void mlx5_rdma_netdev_free(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ const struct mlx5e_profile *profile = priv->profile;
+
+ mlx5e_detach_netdev(priv);
+ profile->cleanup(priv);
+ destroy_workqueue(priv->wq);
+
+ if (!ipriv->sub_interface) {
+ mlx5i_pkey_qpn_ht_cleanup(netdev);
+ mlx5e_destroy_mdev_resources(priv->mdev);
+ }
+}
+
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
struct ib_device *ibdev,
const char *name,
@@ -653,6 +673,9 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
rn->detach_mcast = mlx5i_detach_mcast;
rn->set_id = mlx5i_set_pkey_index;
+ netdev->priv_destructor = mlx5_rdma_netdev_free;
+ netdev->needs_free_netdev = 1;
+
return netdev;
destroy_ht:
@@ -665,21 +688,3 @@ err_free_netdev:
return NULL;
}
EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
-
-void mlx5_rdma_netdev_free(struct net_device *netdev)
-{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- struct mlx5i_priv *ipriv = priv->ppriv;
- const struct mlx5e_profile *profile = priv->profile;
-
- mlx5e_detach_netdev(priv);
- profile->cleanup(priv);
- destroy_workqueue(priv->wq);
-
- if (!ipriv->sub_interface) {
- mlx5i_pkey_qpn_ht_cleanup(netdev);
- mlx5e_destroy_mdev_resources(priv->mdev);
- }
- free_netdev(netdev);
-}
-EXPORT_SYMBOL(mlx5_rdma_netdev_free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 1e062e6b2587..3f767cde4c1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
void mlx5_init_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
+ u64 overflow_cycles;
u64 ns;
u64 frac = 0;
u32 dev_freq;
@@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around.
+ * The period is calculated as the minimum between max HW cycles count
+ * (The clock source mask) and max amount of cycles that can be
+ * multiplied by clock multiplier where the result doesn't exceed
+ * 64bits.
*/
- ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
+ overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
+ overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
+
+ ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
frac, &frac);
- do_div(ns, NSEC_PER_SEC / 2 / HZ);
+ do_div(ns, NSEC_PER_SEC / HZ);
clock->overflow_period = ns;
mdev->clock_info_page = alloc_page(GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index a8eecedd46c2..02e2e4575e4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -33,8 +33,15 @@
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
+void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
+
+static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
+{
+ return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1;
+}
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
u64 timestamp)
@@ -48,4 +55,21 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
return ns_to_ktime(nsec);
}
+#else
+static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) {}
+
+static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
+{
+ return -1;
+}
+
+static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
+ u64 timestamp)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 7cb67122e8b5..98359559c77e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -33,6 +33,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
#include "lib/mpfs.h"
@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
struct mlx5_mpfs *mpfs;
- if (!MLX5_VPORT_MANAGER(dev))
+ if (!MLX5_ESWITCH_MANAGER(dev))
return 0;
mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_mpfs *mpfs = dev->priv.mpfs;
- if (!MLX5_VPORT_MANAGER(dev))
+ if (!MLX5_ESWITCH_MANAGER(dev))
return;
WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
u32 index;
int err;
- if (!MLX5_VPORT_MANAGER(dev))
+ if (!MLX5_ESWITCH_MANAGER(dev))
return 0;
mutex_lock(&mpfs->lock);
@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
int err = 0;
u32 index;
- if (!MLX5_VPORT_MANAGER(dev))
+ if (!MLX5_ESWITCH_MANAGER(dev))
return 0;
mutex_lock(&mpfs->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
new file mode 100644
index 000000000000..9a8fd762167b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "vxlan.h"
+
+struct mlx5_vxlan {
+ struct mlx5_core_dev *mdev;
+ spinlock_t lock; /* protect vxlan table */
+ /* max_num_ports is usuallly 4, 16 buckets is more than enough */
+ DECLARE_HASHTABLE(htable, 4);
+ int num_ports;
+ struct mutex sync_lock; /* sync add/del port HW operations */
+};
+
+struct mlx5_vxlan_port {
+ struct hlist_node hlist;
+ atomic_t refcount;
+ u16 udp_port;
+};
+
+static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
+}
+
+static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
+{
+ u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
+
+ MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
+ MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
+ MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
+
+ MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+ MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static struct mlx5_vxlan_port*
+mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+
+ hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) {
+ if (vxlanp->udp_port == port)
+ return vxlanp;
+ }
+
+ return NULL;
+}
+
+struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+
+ if (!mlx5_vxlan_allowed(vxlan))
+ return NULL;
+
+ spin_lock_bh(&vxlan->lock);
+ vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
+ spin_unlock_bh(&vxlan->lock);
+
+ return vxlanp;
+}
+
+int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+ int ret = -ENOSPC;
+
+ vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
+ if (vxlanp) {
+ atomic_inc(&vxlanp->refcount);
+ return 0;
+ }
+
+ mutex_lock(&vxlan->sync_lock);
+ if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
+ mlx5_core_info(vxlan->mdev,
+ "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
+ port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
+ if (ret)
+ goto unlock;
+
+ vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
+ if (!vxlanp) {
+ ret = -ENOMEM;
+ goto err_delete_port;
+ }
+
+ vxlanp->udp_port = port;
+ atomic_set(&vxlanp->refcount, 1);
+
+ spin_lock_bh(&vxlan->lock);
+ hash_add(vxlan->htable, &vxlanp->hlist, port);
+ spin_unlock_bh(&vxlan->lock);
+
+ vxlan->num_ports++;
+ mutex_unlock(&vxlan->sync_lock);
+ return 0;
+
+err_delete_port:
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
+
+unlock:
+ mutex_unlock(&vxlan->sync_lock);
+ return ret;
+}
+
+int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+ bool remove = false;
+ int ret = 0;
+
+ mutex_lock(&vxlan->sync_lock);
+
+ spin_lock_bh(&vxlan->lock);
+ vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
+ if (!vxlanp) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ if (atomic_dec_and_test(&vxlanp->refcount)) {
+ hash_del(&vxlanp->hlist);
+ remove = true;
+ }
+
+out_unlock:
+ spin_unlock_bh(&vxlan->lock);
+
+ if (remove) {
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
+ kfree(vxlanp);
+ vxlan->num_ports--;
+ }
+
+ mutex_unlock(&vxlan->sync_lock);
+
+ return ret;
+}
+
+struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_vxlan *vxlan;
+
+ if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev))
+ return ERR_PTR(-ENOTSUPP);
+
+ vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
+ if (!vxlan)
+ return ERR_PTR(-ENOMEM);
+
+ vxlan->mdev = mdev;
+ mutex_init(&vxlan->sync_lock);
+ spin_lock_init(&vxlan->lock);
+ hash_init(vxlan->htable);
+
+ /* Hardware adds 4789 by default */
+ mlx5_vxlan_add_port(vxlan, 4789);
+
+ return vxlan;
+}
+
+void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
+{
+ struct mlx5_vxlan_port *vxlanp;
+ struct hlist_node *tmp;
+ int bkt;
+
+ if (!mlx5_vxlan_allowed(vxlan))
+ return;
+
+ /* Lockless since we are the only hash table consumers*/
+ hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
+ hash_del(&vxlanp->hlist);
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);
+ kfree(vxlanp);
+ }
+
+ kfree(vxlan);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
index 5ef6ae7d568a..8fb0eb08fa6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
@@ -33,31 +33,32 @@
#define __MLX5_VXLAN_H__
#include <linux/mlx5/driver.h>
-#include "en.h"
-struct mlx5e_vxlan {
- atomic_t refcount;
- u16 udp_port;
-};
+struct mlx5_vxlan;
+struct mlx5_vxlan_port;
-struct mlx5e_vxlan_work {
- struct work_struct work;
- struct mlx5e_priv *priv;
- sa_family_t sa_family;
- u16 port;
-};
-
-static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
+static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan)
{
- return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
- mlx5_core_is_pf(mdev));
+ /* not allowed reason is encoded in vxlan pointer as error,
+ * on mlx5_vxlan_create
+ */
+ return !IS_ERR_OR_NULL(vxlan);
}
-void mlx5e_vxlan_init(struct mlx5e_priv *priv);
-void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
-
-void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
- u16 port, int add);
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
+#if IS_ENABLED(CONFIG_VXLAN)
+struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev);
+void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
+int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
+int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
+struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
+#else
+static inline struct mlx5_vxlan*
+mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
+static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
+static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
+static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
+static inline struct mx5_vxlan_port*
+mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return NULL; }
+#endif
#endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 615005e63819..cf3e4a659052 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -62,9 +62,11 @@
#include "accel/ipsec.h"
#include "accel/tls.h"
#include "lib/clock.h"
+#include "lib/vxlan.h"
+#include "diag/fw_tracer.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
-MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
+MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
@@ -321,7 +323,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_eq_table *table = &priv->eq_table;
- int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
+ MLX5_CAP_GEN(dev, max_num_eqs) :
+ 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
int err;
@@ -960,6 +964,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_init_clock(dev);
+ dev->vxlan = mlx5_vxlan_create(dev);
+
err = mlx5_init_rl_table(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init rate limiting\n");
@@ -990,6 +996,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_sriov_cleanup;
}
+ dev->tracer = mlx5_fw_tracer_create(dev);
+
return 0;
err_sriov_cleanup:
@@ -1001,6 +1009,7 @@ err_mpfs_cleanup:
err_rl_cleanup:
mlx5_cleanup_rl_table(dev);
err_tables_cleanup:
+ mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
@@ -1015,11 +1024,13 @@ out:
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
+ mlx5_fw_tracer_destroy(dev->tracer);
mlx5_fpga_cleanup(dev);
mlx5_sriov_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_mpfs_cleanup(dev);
mlx5_cleanup_rl_table(dev);
+ mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_mkey_table(dev);
@@ -1167,10 +1178,16 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_put_uars;
}
+ err = mlx5_fw_tracer_init(dev->tracer);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init FW tracer\n");
+ goto err_fw_tracer;
+ }
+
err = alloc_comp_eqs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
- goto err_stop_eqs;
+ goto err_comp_eqs;
}
err = mlx5_irq_set_affinity_hints(dev);
@@ -1252,7 +1269,10 @@ err_fpga_start:
err_affinity_hints:
free_comp_eqs(dev);
-err_stop_eqs:
+err_comp_eqs:
+ mlx5_fw_tracer_cleanup(dev->tracer);
+
+err_fw_tracer:
mlx5_stop_eqs(dev);
err_put_uars:
@@ -1320,6 +1340,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_fpga_device_stop(dev);
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
+ mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_stop_eqs(dev);
mlx5_put_uars_page(dev, priv->uar);
mlx5_free_irq_vectors(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 023882d9a22e..b4134fa0bba3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -66,6 +66,12 @@ do { \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+#define mlx5_core_err_rl(__dev, format, ...) \
+ dev_err_ratelimited(&(__dev)->pdev->dev, \
+ "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
+
#define mlx5_core_warn(__dev, format, ...) \
dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
@@ -93,7 +99,6 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
struct mlx5_pagefault *pfault);
-void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index f4f02f775c93..0670165afd5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -146,23 +146,6 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
}
EXPORT_SYMBOL(mlx5_core_query_mkey);
-int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
- u32 *mkey)
-{
- u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
- int err;
-
- MLX5_SET(query_special_contexts_in, in, opcode,
- MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *mkey = MLX5_GET(query_special_contexts_out, out,
- dump_fill_mkey);
- return err;
-}
-EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
-
static inline u32 mlx5_get_psv(u32 *out, int psv_index)
{
switch (psv_index) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index fa9d0760dd36..31a9cbd85689 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+ u32 out[MLX5_ST_SZ_DW(qetc_reg)];
if (!MLX5_CAP_GEN(mdev, ets))
return -EOPNOTSUPP;
@@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
int outlen)
{
- u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)];
if (!MLX5_CAP_GEN(mdev, ets))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 2a8b529ce6dd..a0674962f02c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
return -EBUSY;
}
+ if (!MLX5_ESWITCH_MANAGER(dev))
+ goto enable_vfs_hca;
+
err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
if (err) {
mlx5_core_warn(dev,
@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
return err;
}
+enable_vfs_hca:
for (vf = 0; vf < num_vfs; vf++) {
err = mlx5_core_enable_hca(dev, vf + 1);
if (err) {
@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
}
out:
- mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+ if (MLX5_ESWITCH_MANAGER(dev))
+ mlx5_eswitch_disable_sriov(dev->priv.eswitch);
if (mlx5_wait_for_vf_pages(dev))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 719cecb182c6..b02af317c125 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -62,17 +62,6 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
return MLX5_GET(query_vport_state_out, out, state);
}
-EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
-
-u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
-{
- u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
-
- _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
-
- return MLX5_GET(query_vport_state_out, out, admin_state);
-}
-EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state)
@@ -90,7 +79,6 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
-EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
u32 *out, int outlen)
@@ -549,8 +537,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
return -EINVAL;
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return -EACCES;
- if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
- return -EOPNOTSUPP;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
deleted file mode 100644
index 2f74953e4561..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include "mlx5_core.h"
-#include "vxlan.h"
-
-void mlx5e_vxlan_init(struct mlx5e_priv *priv)
-{
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
-
- spin_lock_init(&vxlan_db->lock);
- INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC);
-}
-
-static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
-{
- u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
-
- MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
- MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
- MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
-{
- u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
-
- MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
- MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
- MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
-{
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- struct mlx5e_vxlan *vxlan;
-
- spin_lock_bh(&vxlan_db->lock);
- vxlan = radix_tree_lookup(&vxlan_db->tree, port);
- spin_unlock_bh(&vxlan_db->lock);
-
- return vxlan;
-}
-
-static void mlx5e_vxlan_add_port(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- u16 port = vxlan_work->port;
- struct mlx5e_vxlan *vxlan;
- int err;
-
- mutex_lock(&priv->state_lock);
- vxlan = mlx5e_vxlan_lookup_port(priv, port);
- if (vxlan) {
- atomic_inc(&vxlan->refcount);
- goto free_work;
- }
-
- if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
- goto free_work;
-
- vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
- if (!vxlan)
- goto err_delete_port;
-
- vxlan->udp_port = port;
- atomic_set(&vxlan->refcount, 1);
-
- spin_lock_bh(&vxlan_db->lock);
- err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
- spin_unlock_bh(&vxlan_db->lock);
- if (err)
- goto err_free;
-
- goto free_work;
-
-err_free:
- kfree(vxlan);
-err_delete_port:
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
-free_work:
- mutex_unlock(&priv->state_lock);
- kfree(vxlan_work);
-}
-
-static void mlx5e_vxlan_del_port(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- u16 port = vxlan_work->port;
- struct mlx5e_vxlan *vxlan;
- bool remove = false;
-
- mutex_lock(&priv->state_lock);
- spin_lock_bh(&vxlan_db->lock);
- vxlan = radix_tree_lookup(&vxlan_db->tree, port);
- if (!vxlan)
- goto out_unlock;
-
- if (atomic_dec_and_test(&vxlan->refcount)) {
- radix_tree_delete(&vxlan_db->tree, port);
- remove = true;
- }
-
-out_unlock:
- spin_unlock_bh(&vxlan_db->lock);
-
- if (remove) {
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
- kfree(vxlan);
- }
- mutex_unlock(&priv->state_lock);
- kfree(vxlan_work);
-}
-
-void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
- u16 port, int add)
-{
- struct mlx5e_vxlan_work *vxlan_work;
-
- vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
- if (!vxlan_work)
- return;
-
- if (add)
- INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
- else
- INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
-
- vxlan_work->priv = priv;
- vxlan_work->port = port;
- vxlan_work->sa_family = sa_family;
- queue_work(priv->wq, &vxlan_work->work);
-}
-
-void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
-{
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- struct mlx5e_vxlan *vxlan;
- unsigned int port = 0;
-
- /* Lockless since we are the only radix-tree consumers, wq is disabled */
- while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
- port = vxlan->udp_port;
- radix_tree_delete(&vxlan_db->tree, port);
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
- kfree(vxlan);
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index b97bb72b4db4..86478a6b99c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -113,35 +113,45 @@ err_db_free:
return err;
}
-static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
- struct mlx5_wq_qp *qp)
+static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
+ struct mlx5_wq_qp *qp)
{
+ struct mlx5_frag_buf_ctrl *sq_fbc;
struct mlx5_frag_buf *rqb, *sqb;
- rqb = &qp->rq.fbc.frag_buf;
+ rqb = &qp->rq.fbc.frag_buf;
*rqb = *buf;
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
- rqb->npages = 1 << get_order(rqb->size);
+ rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
- sqb = &qp->sq.fbc.frag_buf;
- *sqb = *buf;
- sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
- sqb->npages = 1 << get_order(sqb->size);
+ sq_fbc = &qp->sq.fbc;
+ sqb = &sq_fbc->frag_buf;
+ *sqb = *buf;
+ sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
+ sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
sqb->frags += rqb->npages; /* first part is for the rq */
+ if (sq_fbc->strides_offset)
+ sqb->frags--;
}
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
+ u32 sq_strides_offset;
int err;
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
MLX5_GET(qpc, qpc, log_rq_size),
&wq->rq.fbc);
- mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
- MLX5_GET(qpc, qpc, log_sq_size),
- &wq->sq.fbc);
+
+ sq_strides_offset =
+ ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
+
+ mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
+ MLX5_GET(qpc, qpc, log_sq_size),
+ sq_strides_offset,
+ &wq->sq.fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
+ mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 0b47126815b6..2bd4c3184eba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -229,6 +229,11 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
return !wq->cur_sz;
}
+static inline int mlx5_wq_ll_missing(struct mlx5_wq_ll *wq)
+{
+ return wq->fbc.sz_m1 - wq->cur_sz;
+}
+
static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
{
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index f4d9c9975ac3..8a291eb36c64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -30,7 +30,7 @@ config MLXSW_CORE_THERMAL
config MLXSW_PCI
tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
- depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
+ depends on PCI && HAS_IOMEM && MLXSW_CORE
default m
---help---
This is PCI bus implementation for Mellanox Technologies Switch ASICs.
@@ -78,6 +78,7 @@ config MLXSW_SPECTRUM
depends on IPV6 || IPV6=n
depends on NET_IPGRE || NET_IPGRE=n
depends on IPV6_GRE || IPV6_GRE=n
+ select GENERIC_ALLOCATOR
select PARMAN
select MLXFW
default m
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 0cadcabfe86f..68fa44a41485 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -15,11 +15,18 @@ mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
- spectrum_kvdl.o spectrum_acl_tcam.o \
- spectrum_acl.o spectrum_flower.o \
- spectrum_cnt.o spectrum_fid.o \
- spectrum_ipip.o spectrum_acl_flex_actions.o \
- spectrum_mr.o spectrum_mr_tcam.o \
+ spectrum1_kvdl.o spectrum2_kvdl.o \
+ spectrum_kvdl.o \
+ spectrum_acl_tcam.o spectrum_acl_ctcam.o \
+ spectrum_acl_atcam.o spectrum_acl_erp.o \
+ spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
+ spectrum_acl.o \
+ spectrum_flower.o spectrum_cnt.o \
+ spectrum_fid.o spectrum_ipip.o \
+ spectrum_acl_flex_actions.o \
+ spectrum_acl_flex_keys.o \
+ spectrum1_mr_tcam.o spectrum2_mr_tcam.o \
+ spectrum_mr_tcam.o spectrum_mr.o \
spectrum_qdisc.o spectrum_span.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 2bc48054b685..0772e4339b33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/cmd.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_CMD_H
#define _MLXSW_CMD_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f9c724752a32..81533d7f395c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -759,15 +726,6 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
return mlxsw_driver;
}
-static void mlxsw_core_driver_put(const char *kind)
-{
- struct mlxsw_driver *mlxsw_driver;
-
- spin_lock(&mlxsw_core_driver_list_lock);
- mlxsw_driver = __driver_find(kind);
- spin_unlock(&mlxsw_core_driver_list_lock);
-}
-
static int mlxsw_devlink_port_split(struct devlink *devlink,
unsigned int port_index,
unsigned int count,
@@ -1115,7 +1073,6 @@ err_bus_init:
if (!reload)
devlink_free(devlink);
err_devlink_alloc:
- mlxsw_core_driver_put(device_kind);
return err;
}
EXPORT_SYMBOL(mlxsw_core_bus_device_register);
@@ -1123,7 +1080,6 @@ EXPORT_SYMBOL(mlxsw_core_bus_device_register);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
bool reload)
{
- const char *device_kind = mlxsw_core->bus_info->device_kind;
struct devlink *devlink = priv_to_devlink(mlxsw_core);
if (mlxsw_core->reload_fail)
@@ -1144,7 +1100,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
reload_fail:
devlink_free(devlink);
- mlxsw_core_driver_put(device_kind);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 552cfa29c2f7..655ddd204ab2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_CORE_H
#define _MLXSW_CORE_H
@@ -362,6 +329,7 @@ struct mlxsw_fw_rev {
u16 major;
u16 minor;
u16 subminor;
+ u16 can_reset_minor;
};
struct mlxsw_bus_info {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 3c0d882ba183..c51b2adfc1e1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
- * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -327,12 +296,16 @@ static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
list_add(&resource->list, &block->resource_list);
}
+static void mlxsw_afa_resource_del(struct mlxsw_afa_resource *resource)
+{
+ list_del(&resource->list);
+}
+
static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
{
struct mlxsw_afa_resource *resource, *tmp;
list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
- list_del(&resource->list);
resource->destructor(block, resource);
}
}
@@ -351,9 +324,24 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
block->first_set = mlxsw_afa_set_create(true);
if (!block->first_set)
goto err_first_set_create;
- block->cur_set = block->first_set;
+
+ /* In case user instructs to have dummy first set, we leave it
+ * empty here and create another, real, set right away.
+ */
+ if (mlxsw_afa->ops->dummy_first_set) {
+ block->cur_set = mlxsw_afa_set_create(false);
+ if (!block->cur_set)
+ goto err_second_set_create;
+ block->cur_set->prev = block->first_set;
+ block->first_set->next = block->cur_set;
+ } else {
+ block->cur_set = block->first_set;
+ }
+
return block;
+err_second_set_create:
+ mlxsw_afa_set_destroy(block->first_set);
err_first_set_create:
kfree(block);
return NULL;
@@ -415,11 +403,31 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
}
EXPORT_SYMBOL(mlxsw_afa_block_first_set);
-u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
+char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block)
+{
+ return block->cur_set->ht_key.enc_actions;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_cur_set);
+
+u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block)
{
- return block->first_set->kvdl_index;
+ /* First set is never in KVD linear. So the first set
+ * with valid KVD linear index is always the second one.
+ */
+ if (WARN_ON(!block->first_set->next))
+ return 0;
+ return block->first_set->next->kvdl_index;
}
-EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
+EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index);
+
+int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity)
+{
+ u32 kvdl_index = mlxsw_afa_block_first_kvdl_index(block);
+
+ return block->afa->ops->kvdl_set_activity_get(block->afa->ops_priv,
+ kvdl_index, activity);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_activity_get);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
{
@@ -530,6 +538,7 @@ static void
mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
{
+ mlxsw_afa_resource_del(&fwd_entry_ref->resource);
mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
kfree(fwd_entry_ref);
}
@@ -579,6 +588,7 @@ static void
mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
struct mlxsw_afa_counter *counter)
{
+ mlxsw_afa_resource_del(&counter->resource);
block->afa->ops->counter_index_put(block->afa->ops_priv,
counter->counter_index);
kfree(counter);
@@ -626,8 +636,8 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
char *oneact;
char *actions;
- if (WARN_ON(block->finished))
- return NULL;
+ if (block->finished)
+ return ERR_PTR(-EINVAL);
if (block->cur_act_index + action_size >
block->afa->max_acts_per_set) {
struct mlxsw_afa_set *set;
@@ -637,7 +647,7 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
*/
set = mlxsw_afa_set_create(false);
if (!set)
- return NULL;
+ return ERR_PTR(-ENOBUFS);
set->prev = block->cur_set;
block->cur_act_index = 0;
block->cur_set->next = set;
@@ -718,14 +728,17 @@ mlxsw_afa_vlan_pack(char *payload,
}
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
- u16 vid, u8 pcp, u8 et)
+ u16 vid, u8 pcp, u8 et,
+ struct netlink_ext_ack *extack)
{
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_VLAN_CODE,
MLXSW_AFA_VLAN_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append vlan_modify action");
+ return PTR_ERR(act);
+ }
mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
@@ -806,8 +819,8 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
MLXSW_AFA_TRAPDISC_CODE,
MLXSW_AFA_TRAPDISC_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act))
+ return PTR_ERR(act);
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
return 0;
@@ -820,8 +833,8 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
MLXSW_AFA_TRAPDISC_CODE,
MLXSW_AFA_TRAPDISC_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act))
+ return PTR_ERR(act);
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
trap_id);
@@ -836,8 +849,8 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
MLXSW_AFA_TRAPDISC_CODE,
MLXSW_AFA_TRAPDISC_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act))
+ return PTR_ERR(act);
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
trap_id);
@@ -856,6 +869,7 @@ static void
mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
struct mlxsw_afa_mirror *mirror)
{
+ mlxsw_afa_resource_del(&mirror->resource);
block->afa->ops->mirror_del(block->afa->ops_priv,
mirror->local_in_port,
mirror->span_id,
@@ -908,8 +922,8 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_TRAPDISC_CODE,
MLXSW_AFA_TRAPDISC_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act))
+ return PTR_ERR(act);
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
@@ -918,19 +932,23 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
int
mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port,
- const struct net_device *out_dev, bool ingress)
+ const struct net_device *out_dev, bool ingress,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_afa_mirror *mirror;
int err;
mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev,
ingress);
- if (IS_ERR(mirror))
+ if (IS_ERR(mirror)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create mirror action");
return PTR_ERR(mirror);
-
+ }
err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append mirror action");
goto err_append_allocated_mirror;
+ }
return 0;
@@ -980,24 +998,30 @@ mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
}
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
- u8 local_port, bool in_port)
+ u8 local_port, bool in_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
u32 kvdl_index;
char *act;
int err;
- if (in_port)
+ if (in_port) {
+ NL_SET_ERR_MSG_MOD(extack, "Forwarding to ingress port is not supported");
return -EOPNOTSUPP;
+ }
fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
- if (IS_ERR(fwd_entry_ref))
+ if (IS_ERR(fwd_entry_ref)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create forward action");
return PTR_ERR(fwd_entry_ref);
+ }
kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index;
act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
MLXSW_AFA_FORWARD_SIZE);
- if (!act) {
- err = -ENOBUFS;
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append forward action");
+ err = PTR_ERR(act);
goto err_append_action;
}
mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
@@ -1052,8 +1076,8 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
{
char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
MLXSW_AFA_POLCNT_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act))
+ return PTR_ERR(act);
mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
counter_index);
return 0;
@@ -1061,21 +1085,25 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 *p_counter_index)
+ u32 *p_counter_index,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_afa_counter *counter;
u32 counter_index;
int err;
counter = mlxsw_afa_counter_create(block);
- if (IS_ERR(counter))
+ if (IS_ERR(counter)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create count action");
return PTR_ERR(counter);
+ }
counter_index = counter->counter_index;
err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append count action");
goto err_append_allocated_counter;
-
+ }
if (p_counter_index)
*p_counter_index = counter_index;
return 0;
@@ -1118,13 +1146,16 @@ static inline void mlxsw_afa_virfwd_pack(char *payload,
mlxsw_afa_virfwd_fid_set(payload, fid);
}
-int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
+ struct netlink_ext_ack *extack)
{
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_VIRFWD_CODE,
MLXSW_AFA_VIRFWD_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append fid_set action");
+ return PTR_ERR(act);
+ }
mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
return 0;
}
@@ -1193,8 +1224,8 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_MCROUTER_CODE,
MLXSW_AFA_MCROUTER_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act))
+ return PTR_ERR(act);
mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
expected_irif, min_mtu, rmid_valid, kvdl_index);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 3a155d104384..0e3a59dda12e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_CORE_ACL_FLEX_ACTIONS_H
#define _MLXSW_CORE_ACL_FLEX_ACTIONS_H
@@ -45,6 +14,8 @@ struct mlxsw_afa_ops {
int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index,
char *enc_actions, bool is_first);
void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
+ int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index,
+ bool *activity);
int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
@@ -54,6 +25,7 @@ struct mlxsw_afa_ops {
bool ingress, int *p_span_id);
void (*mirror_del)(void *priv, u8 local_in_port, int span_id,
bool ingress);
+ bool dummy_first_set;
};
struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
@@ -64,7 +36,9 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa);
void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
-u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
+char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block);
+u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
@@ -75,16 +49,21 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
u8 local_in_port,
const struct net_device *out_dev,
- bool ingress);
+ bool ingress,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
- u8 local_port, bool in_port);
+ u8 local_port, bool in_port,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
- u16 vid, u8 pcp, u8 et);
+ u16 vid, u8 pcp, u8 et,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
u32 counter_index);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 *p_counter_index);
-int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
+ u32 *p_counter_index,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
u16 expected_irif, u16 min_mtu,
bool rmid_valid, u32 kvdl_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index b32a00972e83..785bf01fe2be 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -43,6 +12,7 @@
struct mlxsw_afk {
struct list_head key_info_list;
unsigned int max_blocks;
+ const struct mlxsw_afk_ops *ops;
const struct mlxsw_afk_block *blocks;
unsigned int blocks_count;
};
@@ -69,8 +39,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
}
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
- const struct mlxsw_afk_block *blocks,
- unsigned int blocks_count)
+ const struct mlxsw_afk_ops *ops)
{
struct mlxsw_afk *mlxsw_afk;
@@ -79,8 +48,9 @@ struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
return NULL;
INIT_LIST_HEAD(&mlxsw_afk->key_info_list);
mlxsw_afk->max_blocks = max_blocks;
- mlxsw_afk->blocks = blocks;
- mlxsw_afk->blocks_count = blocks_count;
+ mlxsw_afk->ops = ops;
+ mlxsw_afk->blocks = ops->blocks;
+ mlxsw_afk->blocks_count = ops->blocks_count;
WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk));
return mlxsw_afk;
}
@@ -415,61 +385,76 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
}
EXPORT_SYMBOL(mlxsw_afk_values_add_buf);
-static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item,
- const struct mlxsw_item *output_item,
- char *storage, char *output_indexed)
+static void mlxsw_sp_afk_encode_u32(const struct mlxsw_item *storage_item,
+ const struct mlxsw_item *output_item,
+ char *storage, char *output)
{
u32 value;
value = __mlxsw_item_get32(storage, storage_item, 0);
- __mlxsw_item_set32(output_indexed, output_item, 0, value);
+ __mlxsw_item_set32(output, output_item, 0, value);
}
-static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item,
- const struct mlxsw_item *output_item,
- char *storage, char *output_indexed)
+static void mlxsw_sp_afk_encode_buf(const struct mlxsw_item *storage_item,
+ const struct mlxsw_item *output_item,
+ char *storage, char *output)
{
char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
- char *output_data = __mlxsw_item_data(output_indexed, output_item, 0);
+ char *output_data = __mlxsw_item_data(output, output_item, 0);
size_t len = output_item->size.bytes;
memcpy(output_data, storage_data, len);
}
-#define MLXSW_AFK_KEY_BLOCK_SIZE 16
-
-static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
- int block_index, char *storage, char *output)
+static void
+mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
+ char *output, char *storage)
{
- char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE;
const struct mlxsw_item *storage_item = &elinst->info->item;
const struct mlxsw_item *output_item = &elinst->item;
if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
- mlxsw_afk_encode_u32(storage_item, output_item,
- storage, output_indexed);
+ mlxsw_sp_afk_encode_u32(storage_item, output_item,
+ storage, output);
else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
- mlxsw_afk_encode_buf(storage_item, output_item,
- storage, output_indexed);
+ mlxsw_sp_afk_encode_buf(storage_item, output_item,
+ storage, output);
}
-void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+#define MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE 16
+
+void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask)
+ char *key, char *mask, int block_start, int block_end)
{
+ char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
+ char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
const struct mlxsw_afk_element_inst *elinst;
enum mlxsw_afk_element element;
- int block_index;
+ int block_index, i;
+
+ for (i = block_start; i <= block_end; i++) {
+ memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
+ memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
+
+ mlxsw_afk_element_usage_for_each(element, &values->elusage) {
+ elinst = mlxsw_afk_key_info_elinst_get(key_info,
+ element,
+ &block_index);
+ if (!elinst || block_index != i)
+ continue;
+
+ mlxsw_sp_afk_encode_one(elinst, block_key,
+ values->storage.key);
+ mlxsw_sp_afk_encode_one(elinst, block_mask,
+ values->storage.mask);
+ }
- mlxsw_afk_element_usage_for_each(element, &values->elusage) {
- elinst = mlxsw_afk_key_info_elinst_get(key_info, element,
- &block_index);
- if (!elinst)
- continue;
- mlxsw_afk_encode_one(elinst, block_index,
- values->storage.key, key);
- mlxsw_afk_encode_one(elinst, block_index,
- values->storage.mask, mask);
+ if (key)
+ mlxsw_afk->ops->encode_block(block_key, i, key);
+ if (mask)
+ mlxsw_afk->ops->encode_block(block_mask, i, mask);
}
}
EXPORT_SYMBOL(mlxsw_afk_encode);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 122506daa586..c29c045d826d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_CORE_ACL_FLEX_KEYS_H
#define _MLXSW_CORE_ACL_FLEX_KEYS_H
@@ -42,16 +11,20 @@
enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
- MLXSW_AFK_ELEMENT_DMAC,
- MLXSW_AFK_ELEMENT_SMAC,
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
MLXSW_AFK_ELEMENT_ETHERTYPE,
MLXSW_AFK_ELEMENT_IP_PROTO,
- MLXSW_AFK_ELEMENT_SRC_IP4,
- MLXSW_AFK_ELEMENT_DST_IP4,
- MLXSW_AFK_ELEMENT_SRC_IP6_HI,
- MLXSW_AFK_ELEMENT_SRC_IP6_LO,
- MLXSW_AFK_ELEMENT_DST_IP6_HI,
- MLXSW_AFK_ELEMENT_DST_IP6_LO,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
@@ -99,9 +72,11 @@ struct mlxsw_afk_element_info {
* define an internal storage geometry.
*/
static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
- MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6),
- MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6),
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4),
MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
@@ -112,12 +87,14 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
- MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
};
#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
@@ -208,9 +185,14 @@ mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small,
struct mlxsw_afk;
+struct mlxsw_afk_ops {
+ const struct mlxsw_afk_block *blocks;
+ unsigned int blocks_count;
+ void (*encode_block)(char *block, int block_index, char *output);
+};
+
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
- const struct mlxsw_afk_block *blocks,
- unsigned int blocks_count);
+ const struct mlxsw_afk_ops *ops);
void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk);
struct mlxsw_afk_key_info;
@@ -243,8 +225,9 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
enum mlxsw_afk_element element,
const char *key_value, const char *mask_value,
unsigned int len);
-void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask);
+ char *key, char *mask, int block_start, int block_end);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 84185f8dfbae..f6cf2896d337 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index d866c98c1a97..6d29dc428608 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -1,34 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved
* Copyright (c) 2016 Ivan Vecera <cera@cera.cz>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
index 97b6bb5d9185..a33b896f4bb8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/emad.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/emad.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_EMAD_H
#define _MLXSW_EMAD_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 25f9915ebd82..798bd5aca384 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/i2c.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/err.h>
#include <linux/i2c.h>
@@ -46,8 +15,6 @@
#include "core.h"
#include "i2c.h"
-static const char mlxsw_i2c_driver_name[] = "mlxsw_i2c";
-
#define MLXSW_I2C_CIR2_BASE 0x72000
#define MLXSW_I2C_CIR_STATUS_OFF 0x18
#define MLXSW_I2C_CIR2_OFF_STATUS (MLXSW_I2C_CIR2_BASE + \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.h b/drivers/net/ethernet/mellanox/mlxsw/i2c.h
index daa24b213ea4..17e059d47fae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/i2c.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_I2C_H
#define _MLXSW_I2C_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/ib.h b/drivers/net/ethernet/mellanox/mlxsw/ib.h
index ce313aaa6336..2d0cb0f5eb85 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/ib.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/ib.h
@@ -1,36 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/ib.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXSW_IB_H
#define _MLXSW_IB_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index 31c886edc791..e92cadc98128 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/item.h
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_ITEM_H
#define _MLXSW_ITEM_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 3dd16267b76c..5a6c4457fb55 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/minimal.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/i2c.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index fc4557245ff4..4d271fb3de3d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/pci.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -53,8 +22,6 @@
#include "port.h"
#include "resources.h"
-static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
-
#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
#define mlxsw_pci_read32(mlxsw_pci, reg) \
@@ -1750,6 +1717,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
struct mlxsw_pci *mlxsw_pci;
+ bool called_again = false;
int err;
mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
@@ -1806,10 +1774,18 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.dev = &pdev->dev;
mlxsw_pci->id = id;
+again:
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
&mlxsw_pci_bus, mlxsw_pci, false,
NULL);
- if (err) {
+ /* -EAGAIN is returned in case the FW was updated. FW needs
+ * a reset, so lets try to call mlxsw_core_bus_device_register()
+ * again.
+ */
+ if (err == -EAGAIN && !called_again) {
+ called_again = true;
+ goto again;
+ } else if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index d65582325cd5..946339e13eb9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/pci.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_PCI_H
#define _MLXSW_PCI_H
@@ -39,6 +8,7 @@
#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 963155f6a17a..83f452b7ccbb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
- * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_PCI_HW_H
#define _MLXSW_PCI_HW_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index c580abba8d34..a33eeef0b00c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -1,38 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/port.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXSW_PORT_H
#define _MLXSW_PORT_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1877d9f8a11a..6e8b619b769b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1,44 +1,10 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/reg.h
- * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
- * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_REG_H
#define _MLXSW_REG_H
+#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
@@ -1943,6 +1909,28 @@ static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port,
mlxsw_reg_cwtpm_ntcp_r_set(payload, profile);
}
+/* PGCR - Policy-Engine General Configuration Register
+ * ---------------------------------------------------
+ * This register configures general Policy-Engine settings.
+ */
+#define MLXSW_REG_PGCR_ID 0x3001
+#define MLXSW_REG_PGCR_LEN 0x20
+
+MLXSW_REG_DEFINE(pgcr, MLXSW_REG_PGCR_ID, MLXSW_REG_PGCR_LEN);
+
+/* reg_pgcr_default_action_pointer_base
+ * Default action pointer base. Each region has a default action pointer
+ * which is equal to default_action_pointer_base + region_id.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pgcr, default_action_pointer_base, 0x1C, 0, 24);
+
+static inline void mlxsw_reg_pgcr_pack(char *payload, u32 pointer_base)
+{
+ MLXSW_REG_ZERO(pgcr, payload);
+ mlxsw_reg_pgcr_default_action_pointer_base_set(payload, pointer_base);
+}
+
/* PPBT - Policy-Engine Port Binding Table
* ---------------------------------------
* This register is used for configuration of the Port Binding Table.
@@ -2132,14 +2120,18 @@ MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4);
/* reg_ptar_action_set_type
* Type of action set to be used on this region.
- * For Spectrum, this is always type 2 - "flexible"
+ * For Spectrum and Spectrum-2, this is always type 2 - "flexible"
* Access: WO
*/
MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8);
+enum mlxsw_reg_ptar_key_type {
+ MLXSW_REG_PTAR_KEY_TYPE_FLEX = 0x50, /* Spetrum */
+ MLXSW_REG_PTAR_KEY_TYPE_FLEX2 = 0x51, /* Spectrum-2 */
+};
+
/* reg_ptar_key_type
* TCAM key type for the region.
- * For Spectrum, this is always type 0x50 - "FLEX_KEY"
* Access: WO
*/
MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8);
@@ -2182,13 +2174,14 @@ MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8,
MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false);
static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op,
+ enum mlxsw_reg_ptar_key_type key_type,
u16 region_size, u16 region_id,
const char *tcam_region_info)
{
MLXSW_REG_ZERO(ptar, payload);
mlxsw_reg_ptar_op_set(payload, op);
mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */
- mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */
+ mlxsw_reg_ptar_key_type_set(payload, key_type);
mlxsw_reg_ptar_region_size_set(payload, region_size);
mlxsw_reg_ptar_region_id_set(payload, region_id);
mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info);
@@ -2327,6 +2320,23 @@ MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN);
*/
MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
+/* reg_pefa_a
+ * Index in the KVD Linear Centralized Database.
+ * Activity
+ * For a new entry: set if ca=0, clear if ca=1
+ * Set if a packet lookup has hit on the specific entry
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pefa, a, 0x04, 29, 1);
+
+/* reg_pefa_ca
+ * Clear activity
+ * When write: activity is according to this field
+ * When read: after reading the activity is cleared according to ca
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pefa, ca, 0x04, 24, 1);
+
#define MLXSW_REG_FLEX_ACTION_SET_LEN 0xA8
/* reg_pefa_flex_action_set
@@ -2336,12 +2346,20 @@ MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
*/
MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, MLXSW_REG_FLEX_ACTION_SET_LEN);
-static inline void mlxsw_reg_pefa_pack(char *payload, u32 index,
+static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, bool ca,
const char *flex_action_set)
{
MLXSW_REG_ZERO(pefa, payload);
mlxsw_reg_pefa_index_set(payload, index);
- mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set);
+ mlxsw_reg_pefa_ca_set(payload, ca);
+ if (flex_action_set)
+ mlxsw_reg_pefa_flex_action_set_memcpy_to(payload,
+ flex_action_set);
+}
+
+static inline void mlxsw_reg_pefa_unpack(char *payload, bool *p_a)
+{
+ *p_a = mlxsw_reg_pefa_a_get(payload);
}
/* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2
@@ -2397,6 +2415,15 @@ MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3);
*/
MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16);
+/* reg_ptce2_priority
+ * Priority of the rule, higher values win. The range is 1..cap_kvd_size-1.
+ * Note: priority does not have to be unique per rule.
+ * Within a region, higher priority should have lower offset (no limitation
+ * between regions in a multi-region).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24);
+
/* reg_ptce2_tcam_region_info
* Opaque object that represents the TCAM region.
* Access: Index
@@ -2404,14 +2431,14 @@ MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16);
MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10,
MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
-#define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96
+#define MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN 96
/* reg_ptce2_flex_key_blocks
* ACL Key.
* Access: RW
*/
MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
- MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN);
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
/* reg_ptce2_mask
* mask- in the same size as key. A bit that is set directs the TCAM
@@ -2420,7 +2447,7 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
* Access: RW
*/
MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80,
- MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN);
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
/* reg_ptce2_flex_action_set
* ACL action set.
@@ -2432,15 +2459,567 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0,
static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
enum mlxsw_reg_ptce2_op op,
const char *tcam_region_info,
- u16 offset)
+ u16 offset, u32 priority)
{
MLXSW_REG_ZERO(ptce2, payload);
mlxsw_reg_ptce2_v_set(payload, valid);
mlxsw_reg_ptce2_op_set(payload, op);
mlxsw_reg_ptce2_offset_set(payload, offset);
+ mlxsw_reg_ptce2_priority_set(payload, priority);
mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info);
}
+/* PERPT - Policy-Engine ERP Table Register
+ * ----------------------------------------
+ * This register adds and removes eRPs from the eRP table.
+ */
+#define MLXSW_REG_PERPT_ID 0x3021
+#define MLXSW_REG_PERPT_LEN 0x80
+
+MLXSW_REG_DEFINE(perpt, MLXSW_REG_PERPT_ID, MLXSW_REG_PERPT_LEN);
+
+/* reg_perpt_erpt_bank
+ * eRP table bank.
+ * Range 0 .. cap_max_erp_table_banks - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perpt, erpt_bank, 0x00, 16, 4);
+
+/* reg_perpt_erpt_index
+ * Index to eRP table within the eRP bank.
+ * Range is 0 .. cap_max_erp_table_bank_size - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perpt, erpt_index, 0x00, 0, 8);
+
+enum mlxsw_reg_perpt_key_size {
+ MLXSW_REG_PERPT_KEY_SIZE_2KB,
+ MLXSW_REG_PERPT_KEY_SIZE_4KB,
+ MLXSW_REG_PERPT_KEY_SIZE_8KB,
+ MLXSW_REG_PERPT_KEY_SIZE_12KB,
+};
+
+/* reg_perpt_key_size
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, key_size, 0x04, 0, 4);
+
+/* reg_perpt_bf_bypass
+ * 0 - The eRP is used only if bloom filter state is set for the given
+ * rule.
+ * 1 - The eRP is used regardless of bloom filter state.
+ * The bypass is an OR condition of region_id or eRP. See PERCR.bf_bypass
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perpt, bf_bypass, 0x08, 8, 1);
+
+/* reg_perpt_erp_id
+ * eRP ID for use by the rules.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perpt, erp_id, 0x08, 0, 4);
+
+/* reg_perpt_erpt_base_bank
+ * Base eRP table bank, points to head of erp_vector
+ * Range is 0 .. cap_max_erp_table_banks - 1
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erpt_base_bank, 0x0C, 16, 4);
+
+/* reg_perpt_erpt_base_index
+ * Base index to eRP table within the eRP bank
+ * Range is 0 .. cap_max_erp_table_bank_size - 1
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erpt_base_index, 0x0C, 0, 8);
+
+/* reg_perpt_erp_index_in_vector
+ * eRP index in the vector.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erp_index_in_vector, 0x10, 0, 4);
+
+/* reg_perpt_erp_vector
+ * eRP vector.
+ * Access: OP
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, perpt, erp_vector, 0x14, 4, 1);
+
+/* reg_perpt_mask
+ * Mask
+ * 0 - A-TCAM will ignore the bit in key
+ * 1 - A-TCAM will compare the bit in key
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, perpt, mask, 0x20, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+
+static inline void mlxsw_reg_perpt_erp_vector_pack(char *payload,
+ unsigned long *erp_vector,
+ unsigned long size)
+{
+ unsigned long bit;
+
+ for_each_set_bit(bit, erp_vector, size)
+ mlxsw_reg_perpt_erp_vector_set(payload, bit, true);
+}
+
+static inline void
+mlxsw_reg_perpt_pack(char *payload, u8 erpt_bank, u8 erpt_index,
+ enum mlxsw_reg_perpt_key_size key_size, u8 erp_id,
+ u8 erpt_base_bank, u8 erpt_base_index, u8 erp_index,
+ char *mask)
+{
+ MLXSW_REG_ZERO(perpt, payload);
+ mlxsw_reg_perpt_erpt_bank_set(payload, erpt_bank);
+ mlxsw_reg_perpt_erpt_index_set(payload, erpt_index);
+ mlxsw_reg_perpt_key_size_set(payload, key_size);
+ mlxsw_reg_perpt_bf_bypass_set(payload, true);
+ mlxsw_reg_perpt_erp_id_set(payload, erp_id);
+ mlxsw_reg_perpt_erpt_base_bank_set(payload, erpt_base_bank);
+ mlxsw_reg_perpt_erpt_base_index_set(payload, erpt_base_index);
+ mlxsw_reg_perpt_erp_index_in_vector_set(payload, erp_index);
+ mlxsw_reg_perpt_mask_memcpy_to(payload, mask);
+}
+
+/* PERAR - Policy-Engine Region Association Register
+ * -------------------------------------------------
+ * This register associates a hw region for region_id's. Changing on the fly
+ * is supported by the device.
+ */
+#define MLXSW_REG_PERAR_ID 0x3026
+#define MLXSW_REG_PERAR_LEN 0x08
+
+MLXSW_REG_DEFINE(perar, MLXSW_REG_PERAR_ID, MLXSW_REG_PERAR_LEN);
+
+/* reg_perar_region_id
+ * Region identifier
+ * Range 0 .. cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perar, region_id, 0x00, 0, 16);
+
+static inline unsigned int
+mlxsw_reg_perar_hw_regions_needed(unsigned int block_num)
+{
+ return DIV_ROUND_UP(block_num, 4);
+}
+
+/* reg_perar_hw_region
+ * HW Region
+ * Range 0 .. cap_max_regions-1
+ * Default: hw_region = region_id
+ * For a 8 key block region, 2 consecutive regions are used
+ * For a 12 key block region, 3 consecutive regions are used
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perar, hw_region, 0x04, 0, 16);
+
+static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id,
+ u16 hw_region)
+{
+ MLXSW_REG_ZERO(perar, payload);
+ mlxsw_reg_perar_region_id_set(payload, region_id);
+ mlxsw_reg_perar_hw_region_set(payload, hw_region);
+}
+
+/* PTCE-V3 - Policy-Engine TCAM Entry Register Version 3
+ * -----------------------------------------------------
+ * This register is a new version of PTCE-V2 in order to support the
+ * A-TCAM. This register is not supported by SwitchX/-2 and Spectrum.
+ */
+#define MLXSW_REG_PTCE3_ID 0x3027
+#define MLXSW_REG_PTCE3_LEN 0xF0
+
+MLXSW_REG_DEFINE(ptce3, MLXSW_REG_PTCE3_ID, MLXSW_REG_PTCE3_LEN);
+
+/* reg_ptce3_v
+ * Valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, v, 0x00, 31, 1);
+
+enum mlxsw_reg_ptce3_op {
+ /* Write operation. Used to write a new entry to the table.
+ * All R/W fields are relevant for new entry. Activity bit is set
+ * for new entries. Write with v = 0 will delete the entry. Must
+ * not be used if an entry exists.
+ */
+ MLXSW_REG_PTCE3_OP_WRITE_WRITE = 0,
+ /* Update operation */
+ MLXSW_REG_PTCE3_OP_WRITE_UPDATE = 1,
+ /* Read operation */
+ MLXSW_REG_PTCE3_OP_QUERY_READ = 0,
+};
+
+/* reg_ptce3_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ptce3, op, 0x00, 20, 3);
+
+/* reg_ptce3_priority
+ * Priority of the rule. Higher values win.
+ * For Spectrum-2 range is 1..cap_kvd_size - 1
+ * Note: Priority does not have to be unique per rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, priority, 0x04, 0, 24);
+
+/* reg_ptce3_tcam_region_info
+ * Opaque object that represents the TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce3, tcam_region_info, 0x10,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+/* reg_ptce3_flex2_key_blocks
+ * ACL key. The key must be masked according to eRP (if exists) or
+ * according to master mask.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce3, flex2_key_blocks, 0x20,
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+
+/* reg_ptce3_erp_id
+ * eRP ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, erp_id, 0x80, 0, 4);
+
+/* reg_ptce3_delta_start
+ * Start point of delta_value and delta_mask, in bits. Must not exceed
+ * num_key_blocks * 36 - 8. Reserved when delta_mask = 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_start, 0x84, 0, 10);
+
+/* reg_ptce3_delta_mask
+ * Delta mask.
+ * 0 - Ignore relevant bit in delta_value
+ * 1 - Compare relevant bit in delta_value
+ * Delta mask must not be set for reserved fields in the key blocks.
+ * Note: No delta when no eRPs. Thus, for regions with
+ * PERERP.erpt_pointer_valid = 0 the delta mask must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_mask, 0x88, 16, 8);
+
+/* reg_ptce3_delta_value
+ * Delta value.
+ * Bits which are masked by delta_mask must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_value, 0x88, 0, 8);
+
+/* reg_ptce3_prune_vector
+ * Pruning vector relative to the PERPT.erp_id.
+ * Used for reducing lookups.
+ * 0 - NEED: Do a lookup using the eRP.
+ * 1 - PRUNE: Do not perform a lookup using the eRP.
+ * Maybe be modified by PEAPBL and PEAPBM.
+ * Note: In Spectrum-2, a region of 8 key blocks must be set to either
+ * all 1's or all 0's.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, ptce3, prune_vector, 0x90, 4, 1);
+
+/* reg_ptce3_prune_ctcam
+ * Pruning on C-TCAM. Used for reducing lookups.
+ * 0 - NEED: Do a lookup in the C-TCAM.
+ * 1 - PRUNE: Do not perform a lookup in the C-TCAM.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, prune_ctcam, 0x94, 31, 1);
+
+/* reg_ptce3_large_exists
+ * Large entry key ID exists.
+ * Within the region:
+ * 0 - SINGLE: The large_entry_key_id is not currently in use.
+ * For rule insert: The MSB of the key (blocks 6..11) will be added.
+ * For rule delete: The MSB of the key will be removed.
+ * 1 - NON_SINGLE: The large_entry_key_id is currently in use.
+ * For rule insert: The MSB of the key (blocks 6..11) will not be added.
+ * For rule delete: The MSB of the key will not be removed.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptce3, large_exists, 0x98, 31, 1);
+
+/* reg_ptce3_large_entry_key_id
+ * Large entry key ID.
+ * A key for 12 key blocks rules. Reserved when region has less than 12 key
+ * blocks. Must be different for different keys which have the same common
+ * 6 key blocks (MSB, blocks 6..11) key within a region.
+ * Range is 0..cap_max_pe_large_key_id - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, large_entry_key_id, 0x98, 0, 24);
+
+/* reg_ptce3_action_pointer
+ * Pointer to action.
+ * Range is 0..cap_max_kvd_action_sets - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, action_pointer, 0xA0, 0, 24);
+
+static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
+ enum mlxsw_reg_ptce3_op op,
+ u32 priority,
+ const char *tcam_region_info,
+ const char *key, u8 erp_id,
+ bool large_exists, u32 lkey_id,
+ u32 action_pointer)
+{
+ MLXSW_REG_ZERO(ptce3, payload);
+ mlxsw_reg_ptce3_v_set(payload, valid);
+ mlxsw_reg_ptce3_op_set(payload, op);
+ mlxsw_reg_ptce3_priority_set(payload, priority);
+ mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info);
+ mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key);
+ mlxsw_reg_ptce3_erp_id_set(payload, erp_id);
+ mlxsw_reg_ptce3_large_exists_set(payload, large_exists);
+ mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id);
+ mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer);
+}
+
+/* PERCR - Policy-Engine Region Configuration Register
+ * ---------------------------------------------------
+ * This register configures the region parameters. The region_id must be
+ * allocated.
+ */
+#define MLXSW_REG_PERCR_ID 0x302A
+#define MLXSW_REG_PERCR_LEN 0x80
+
+MLXSW_REG_DEFINE(percr, MLXSW_REG_PERCR_ID, MLXSW_REG_PERCR_LEN);
+
+/* reg_percr_region_id
+ * Region identifier.
+ * Range 0..cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, percr, region_id, 0x00, 0, 16);
+
+/* reg_percr_atcam_ignore_prune
+ * Ignore prune_vector by other A-TCAM rules. Used e.g., for a new rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, atcam_ignore_prune, 0x04, 25, 1);
+
+/* reg_percr_ctcam_ignore_prune
+ * Ignore prune_ctcam by other A-TCAM rules. Used e.g., for a new rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, ctcam_ignore_prune, 0x04, 24, 1);
+
+/* reg_percr_bf_bypass
+ * Bloom filter bypass.
+ * 0 - Bloom filter is used (default)
+ * 1 - Bloom filter is bypassed. The bypass is an OR condition of
+ * region_id or eRP. See PERPT.bf_bypass
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, bf_bypass, 0x04, 16, 1);
+
+/* reg_percr_master_mask
+ * Master mask. Logical OR mask of all masks of all rules of a region
+ * (both A-TCAM and C-TCAM). When there are no eRPs
+ * (erpt_pointer_valid = 0), then this provides the mask.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, percr, master_mask, 0x20, 96);
+
+static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id)
+{
+ MLXSW_REG_ZERO(percr, payload);
+ mlxsw_reg_percr_region_id_set(payload, region_id);
+ mlxsw_reg_percr_atcam_ignore_prune_set(payload, false);
+ mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false);
+ mlxsw_reg_percr_bf_bypass_set(payload, true);
+}
+
+/* PERERP - Policy-Engine Region eRP Register
+ * ------------------------------------------
+ * This register configures the region eRP. The region_id must be
+ * allocated.
+ */
+#define MLXSW_REG_PERERP_ID 0x302B
+#define MLXSW_REG_PERERP_LEN 0x1C
+
+MLXSW_REG_DEFINE(pererp, MLXSW_REG_PERERP_ID, MLXSW_REG_PERERP_LEN);
+
+/* reg_pererp_region_id
+ * Region identifier.
+ * Range 0..cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pererp, region_id, 0x00, 0, 16);
+
+/* reg_pererp_ctcam_le
+ * C-TCAM lookup enable. Reserved when erpt_pointer_valid = 0.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, ctcam_le, 0x04, 28, 1);
+
+/* reg_pererp_erpt_pointer_valid
+ * erpt_pointer is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, erpt_pointer_valid, 0x10, 31, 1);
+
+/* reg_pererp_erpt_bank_pointer
+ * Pointer to eRP table bank. May be modified at any time.
+ * Range 0..cap_max_erp_table_banks-1
+ * Reserved when erpt_pointer_valid = 0
+ */
+MLXSW_ITEM32(reg, pererp, erpt_bank_pointer, 0x10, 16, 4);
+
+/* reg_pererp_erpt_pointer
+ * Pointer to eRP table within the eRP bank. Can be changed for an
+ * existing region.
+ * Range 0..cap_max_erp_table_size-1
+ * Reserved when erpt_pointer_valid = 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, erpt_pointer, 0x10, 0, 8);
+
+/* reg_pererp_erpt_vector
+ * Vector of allowed eRP indexes starting from erpt_pointer within the
+ * erpt_bank_pointer. Next entries will be in next bank.
+ * Note that eRP index is used and not eRP ID.
+ * Reserved when erpt_pointer_valid = 0
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pererp, erpt_vector, 0x14, 4, 1);
+
+/* reg_pererp_master_rp_id
+ * Master RP ID. When there are no eRPs, then this provides the eRP ID
+ * for the lookup. Can be changed for an existing region.
+ * Reserved when erpt_pointer_valid = 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, master_rp_id, 0x18, 0, 4);
+
+static inline void mlxsw_reg_pererp_erp_vector_pack(char *payload,
+ unsigned long *erp_vector,
+ unsigned long size)
+{
+ unsigned long bit;
+
+ for_each_set_bit(bit, erp_vector, size)
+ mlxsw_reg_pererp_erpt_vector_set(payload, bit, true);
+}
+
+static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id,
+ bool ctcam_le, bool erpt_pointer_valid,
+ u8 erpt_bank_pointer, u8 erpt_pointer,
+ u8 master_rp_id)
+{
+ MLXSW_REG_ZERO(pererp, payload);
+ mlxsw_reg_pererp_region_id_set(payload, region_id);
+ mlxsw_reg_pererp_ctcam_le_set(payload, ctcam_le);
+ mlxsw_reg_pererp_erpt_pointer_valid_set(payload, erpt_pointer_valid);
+ mlxsw_reg_pererp_erpt_bank_pointer_set(payload, erpt_bank_pointer);
+ mlxsw_reg_pererp_erpt_pointer_set(payload, erpt_pointer);
+ mlxsw_reg_pererp_master_rp_id_set(payload, master_rp_id);
+}
+
+/* IEDR - Infrastructure Entry Delete Register
+ * ----------------------------------------------------
+ * This register is used for deleting entries from the entry tables.
+ * It is legitimate to attempt to delete a nonexisting entry (the device will
+ * respond as a good flow).
+ */
+#define MLXSW_REG_IEDR_ID 0x3804
+#define MLXSW_REG_IEDR_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_IEDR_REC_LEN 0x8 /* record length */
+#define MLXSW_REG_IEDR_REC_MAX_COUNT 64
+#define MLXSW_REG_IEDR_LEN (MLXSW_REG_IEDR_BASE_LEN + \
+ MLXSW_REG_IEDR_REC_LEN * \
+ MLXSW_REG_IEDR_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(iedr, MLXSW_REG_IEDR_ID, MLXSW_REG_IEDR_LEN);
+
+/* reg_iedr_num_rec
+ * Number of records.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, iedr, num_rec, 0x00, 0, 8);
+
+/* reg_iedr_rec_type
+ * Resource type.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_type, MLXSW_REG_IEDR_BASE_LEN, 24, 8,
+ MLXSW_REG_IEDR_REC_LEN, 0x00, false);
+
+/* reg_iedr_rec_size
+ * Size of entries do be deleted. The unit is 1 entry, regardless of entry type.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 11,
+ MLXSW_REG_IEDR_REC_LEN, 0x00, false);
+
+/* reg_iedr_rec_index_start
+ * Resource index start.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_index_start, MLXSW_REG_IEDR_BASE_LEN, 0, 24,
+ MLXSW_REG_IEDR_REC_LEN, 0x04, false);
+
+static inline void mlxsw_reg_iedr_pack(char *payload)
+{
+ MLXSW_REG_ZERO(iedr, payload);
+}
+
+static inline void mlxsw_reg_iedr_rec_pack(char *payload, int rec_index,
+ u8 rec_type, u16 rec_size,
+ u32 rec_index_start)
+{
+ u8 num_rec = mlxsw_reg_iedr_num_rec_get(payload);
+
+ if (rec_index >= num_rec)
+ mlxsw_reg_iedr_num_rec_set(payload, rec_index + 1);
+ mlxsw_reg_iedr_rec_type_set(payload, rec_index, rec_type);
+ mlxsw_reg_iedr_rec_size_set(payload, rec_index, rec_size);
+ mlxsw_reg_iedr_rec_index_start_set(payload, rec_index, rec_index_start);
+}
+
+/* QPTS - QoS Priority Trust State Register
+ * ----------------------------------------
+ * This register controls the port policy to calculate the switch priority and
+ * packet color based on incoming packet fields.
+ */
+#define MLXSW_REG_QPTS_ID 0x4002
+#define MLXSW_REG_QPTS_LEN 0x8
+
+MLXSW_REG_DEFINE(qpts, MLXSW_REG_QPTS_ID, MLXSW_REG_QPTS_LEN);
+
+/* reg_qpts_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported.
+ */
+MLXSW_ITEM32(reg, qpts, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_qpts_trust_state {
+ MLXSW_REG_QPTS_TRUST_STATE_PCP = 1,
+ MLXSW_REG_QPTS_TRUST_STATE_DSCP = 2, /* For MPLS, trust EXP. */
+};
+
+/* reg_qpts_trust_state
+ * Trust state for a given port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpts, trust_state, 0x04, 0, 3);
+
+static inline void mlxsw_reg_qpts_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ MLXSW_REG_ZERO(qpts, payload);
+
+ mlxsw_reg_qpts_local_port_set(payload, local_port);
+ mlxsw_reg_qpts_trust_state_set(payload, ts);
+}
+
/* QPCR - QoS Policer Configuration Register
* -----------------------------------------
* The QPCR register is used to create policers - that limit
@@ -2753,6 +3332,219 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
mlxsw_reg_qeec_next_element_index_set(payload, next_index);
}
+/* QRWE - QoS ReWrite Enable
+ * -------------------------
+ * This register configures the rewrite enable per receive port.
+ */
+#define MLXSW_REG_QRWE_ID 0x400F
+#define MLXSW_REG_QRWE_LEN 0x08
+
+MLXSW_REG_DEFINE(qrwe, MLXSW_REG_QRWE_ID, MLXSW_REG_QRWE_LEN);
+
+/* reg_qrwe_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported. No support for router port.
+ */
+MLXSW_ITEM32(reg, qrwe, local_port, 0x00, 16, 8);
+
+/* reg_qrwe_dscp
+ * Whether to enable DSCP rewrite (default is 0, don't rewrite).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qrwe, dscp, 0x04, 1, 1);
+
+/* reg_qrwe_pcp
+ * Whether to enable PCP and DEI rewrite (default is 0, don't rewrite).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qrwe, pcp, 0x04, 0, 1);
+
+static inline void mlxsw_reg_qrwe_pack(char *payload, u8 local_port,
+ bool rewrite_pcp, bool rewrite_dscp)
+{
+ MLXSW_REG_ZERO(qrwe, payload);
+ mlxsw_reg_qrwe_local_port_set(payload, local_port);
+ mlxsw_reg_qrwe_pcp_set(payload, rewrite_pcp);
+ mlxsw_reg_qrwe_dscp_set(payload, rewrite_dscp);
+}
+
+/* QPDSM - QoS Priority to DSCP Mapping
+ * ------------------------------------
+ * QoS Priority to DSCP Mapping Register
+ */
+#define MLXSW_REG_QPDSM_ID 0x4011
+#define MLXSW_REG_QPDSM_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN 0x4 /* record length */
+#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT 16
+#define MLXSW_REG_QPDSM_LEN (MLXSW_REG_QPDSM_BASE_LEN + \
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN * \
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(qpdsm, MLXSW_REG_QPDSM_ID, MLXSW_REG_QPDSM_LEN);
+
+/* reg_qpdsm_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpdsm, local_port, 0x00, 16, 8);
+
+/* reg_qpdsm_prio_entry_color0_e
+ * Enable update of the entry for color 0 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 31, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color0_dscp
+ * DSCP field in the outer label of the packet for color 0 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 24, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color1_e
+ * Enable update of the entry for color 1 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 23, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color1_dscp
+ * DSCP field in the outer label of the packet for color 1 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 16, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color2_e
+ * Enable update of the entry for color 2 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 15, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color2_dscp
+ * DSCP field in the outer label of the packet for color 2 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 8, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_qpdsm_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(qpdsm, payload);
+ mlxsw_reg_qpdsm_local_port_set(payload, local_port);
+}
+
+static inline void
+mlxsw_reg_qpdsm_prio_pack(char *payload, unsigned short prio, u8 dscp)
+{
+ mlxsw_reg_qpdsm_prio_entry_color0_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color0_dscp_set(payload, prio, dscp);
+ mlxsw_reg_qpdsm_prio_entry_color1_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color1_dscp_set(payload, prio, dscp);
+ mlxsw_reg_qpdsm_prio_entry_color2_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color2_dscp_set(payload, prio, dscp);
+}
+
+/* QPDPM - QoS Port DSCP to Priority Mapping Register
+ * --------------------------------------------------
+ * This register controls the mapping from DSCP field to
+ * Switch Priority for IP packets.
+ */
+#define MLXSW_REG_QPDPM_ID 0x4013
+#define MLXSW_REG_QPDPM_BASE_LEN 0x4 /* base length, without records */
+#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN 0x2 /* record length */
+#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT 64
+#define MLXSW_REG_QPDPM_LEN (MLXSW_REG_QPDPM_BASE_LEN + \
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN * \
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(qpdpm, MLXSW_REG_QPDPM_ID, MLXSW_REG_QPDPM_LEN);
+
+/* reg_qpdpm_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpdpm, local_port, 0x00, 16, 8);
+
+/* reg_qpdpm_dscp_e
+ * Enable update of the specific entry. When cleared, the switch_prio and color
+ * fields are ignored and the previous switch_prio and color values are
+ * preserved.
+ * Access: WO
+ */
+MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_e, MLXSW_REG_QPDPM_BASE_LEN, 15, 1,
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdpm_dscp_prio
+ * The new Switch Priority value for the relevant DSCP value.
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_prio,
+ MLXSW_REG_QPDPM_BASE_LEN, 0, 4,
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_qpdpm_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(qpdpm, payload);
+ mlxsw_reg_qpdpm_local_port_set(payload, local_port);
+}
+
+static inline void
+mlxsw_reg_qpdpm_dscp_pack(char *payload, unsigned short dscp, u8 prio)
+{
+ mlxsw_reg_qpdpm_dscp_entry_e_set(payload, dscp, 1);
+ mlxsw_reg_qpdpm_dscp_entry_prio_set(payload, dscp, prio);
+}
+
+/* QTCTM - QoS Switch Traffic Class Table is Multicast-Aware Register
+ * ------------------------------------------------------------------
+ * This register configures if the Switch Priority to Traffic Class mapping is
+ * based on Multicast packet indication. If so, then multicast packets will get
+ * a Traffic Class that is plus (cap_max_tclass_data/2) the value configured by
+ * QTCT.
+ * By default, Switch Priority to Traffic Class mapping is not based on
+ * Multicast packet indication.
+ */
+#define MLXSW_REG_QTCTM_ID 0x401A
+#define MLXSW_REG_QTCTM_LEN 0x08
+
+MLXSW_REG_DEFINE(qtctm, MLXSW_REG_QTCTM_ID, MLXSW_REG_QTCTM_LEN);
+
+/* reg_qtctm_local_port
+ * Local port number.
+ * No support for CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8);
+
+/* reg_qtctm_mc
+ * Multicast Mode
+ * Whether Switch Priority to Traffic Class mapping is based on Multicast packet
+ * indication (default is 0, not based on Multicast packet indication).
+ */
+MLXSW_ITEM32(reg, qtctm, mc, 0x04, 0, 1);
+
+static inline void
+mlxsw_reg_qtctm_pack(char *payload, u8 local_port, bool mc)
+{
+ MLXSW_REG_ZERO(qtctm, payload);
+ mlxsw_reg_qtctm_local_port_set(payload, local_port);
+ mlxsw_reg_qtctm_mc_set(payload, mc);
+}
+
/* PMLP - Ports Module to Local Port Register
* ------------------------------------------
* Configures the assignment of modules to local ports.
@@ -3350,6 +4142,7 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2,
MLXSW_REG_PPCNT_EXT_CNT = 0x5,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
@@ -3508,6 +4301,68 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+/* Ethernet RFC 2819 Counter Group */
+
+/* reg_ppcnt_ether_stats_pkts64octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts64octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts65to127octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts65to127octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts128to255octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts128to255octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts256to511octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts256to511octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts512to1023octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts512to1023octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts1024to1518octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1024to1518octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts1519to2047octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1519to2047octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts2048to4095octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts2048to4095octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts4096to8191octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x98, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts8192to10239octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64);
+
/* Ethernet Extended Counter Group Counters */
/* reg_ppcnt_ecn_marked
@@ -4338,6 +5193,20 @@ MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
*/
MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6);
+/* reg_ritr_if_vrrp_id_ipv6
+ * VRRP ID for IPv6
+ * Note: Reserved for RIF types other than VLAN, FID and Sub-port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv6, 0x1C, 8, 8);
+
+/* reg_ritr_if_vrrp_id_ipv4
+ * VRRP ID for IPv4
+ * Note: Reserved for RIF types other than VLAN, FID and Sub-port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8);
+
/* VLAN Interface */
/* reg_ritr_vlan_if_vid
@@ -7871,6 +8740,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
+ MLXSW_REG(pgcr),
MLXSW_REG(ppbt),
MLXSW_REG(pacl),
MLXSW_REG(pagt),
@@ -7879,9 +8749,20 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(prcr),
MLXSW_REG(pefa),
MLXSW_REG(ptce2),
+ MLXSW_REG(perpt),
+ MLXSW_REG(perar),
+ MLXSW_REG(ptce3),
+ MLXSW_REG(percr),
+ MLXSW_REG(pererp),
+ MLXSW_REG(iedr),
+ MLXSW_REG(qpts),
MLXSW_REG(qpcr),
MLXSW_REG(qtct),
MLXSW_REG(qeec),
+ MLXSW_REG(qrwe),
+ MLXSW_REG(qpdsm),
+ MLXSW_REG(qpdpm),
+ MLXSW_REG(qtctm),
MLXSW_REG(pmlp),
MLXSW_REG(pmtu),
MLXSW_REG(ptys),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index fd9299ccec72..79a31de7c825 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/resources.h
- * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016-2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_RESOURCES_H
#define _MLXSW_RESOURCES_H
@@ -42,6 +11,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SIZE,
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
+ MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
+ MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
MLXSW_RES_ID_CQE_V0,
MLXSW_RES_ID_CQE_V1,
@@ -63,6 +34,13 @@ enum mlxsw_res_id {
MLXSW_RES_ID_ACL_FLEX_KEYS,
MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
+ MLXSW_RES_ID_ACL_MAX_ERPT_BANKS,
+ MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE,
+ MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB,
MLXSW_RES_ID_MAX_CPU_POLICERS,
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
@@ -83,6 +61,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SIZE] = 0x1001,
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
+ [MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
+ [MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
[MLXSW_RES_ID_CQE_V0] = 0x2210,
[MLXSW_RES_ID_CQE_V1] = 0x2211,
@@ -104,6 +84,13 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
[MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
[MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
+ [MLXSW_RES_ID_ACL_MAX_ERPT_BANKS] = 0x2940,
+ [MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE] = 0x2941,
+ [MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB] = 0x2950,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB] = 0x2951,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953,
[MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 968b88af2ef5..6070d1591d1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
- * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -74,17 +41,27 @@
#include "spectrum_span.h"
#include "../mlxfw/mlxfw.h"
-#define MLXSW_FWREV_MAJOR 13
-#define MLXSW_FWREV_MINOR 1620
-#define MLXSW_FWREV_SUBMINOR 192
-#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
+#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
+
+#define MLXSW_SP1_FWREV_MAJOR 13
+#define MLXSW_SP1_FWREV_MINOR 1702
+#define MLXSW_SP1_FWREV_SUBMINOR 6
+#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
-#define MLXSW_SP_FW_FILENAME \
- "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
- "." __stringify(MLXSW_FWREV_MINOR) \
- "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
+static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
+ .major = MLXSW_SP1_FWREV_MAJOR,
+ .minor = MLXSW_SP1_FWREV_MINOR,
+ .subminor = MLXSW_SP1_FWREV_SUBMINOR,
+ .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
+};
+
+#define MLXSW_SP1_FW_FILENAME \
+ "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
+ "." __stringify(MLXSW_SP1_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
-static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp_driver_version[] = "1.0";
/* tx_hdr_version
@@ -338,35 +315,50 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
+ const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
+ const char *fw_filename = mlxsw_sp->fw_filename;
const struct firmware *firmware;
int err;
+ /* Don't check if driver does not require it */
+ if (!req_rev || !fw_filename)
+ return 0;
+
/* Validate driver & FW are compatible */
- if (rev->major != MLXSW_FWREV_MAJOR) {
+ if (rev->major != req_rev->major) {
WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
- rev->major, MLXSW_FWREV_MAJOR);
+ rev->major, req_rev->major);
return -EINVAL;
}
- if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
- MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
+ if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
+ MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor))
return 0;
dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
rev->major, rev->minor, rev->subminor);
dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
- MLXSW_SP_FW_FILENAME);
+ fw_filename);
- err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
+ err = request_firmware_direct(&firmware, fw_filename,
mlxsw_sp->bus_info->dev);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
- MLXSW_SP_FW_FILENAME);
+ fw_filename);
return err;
}
err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
release_firmware(firmware);
- return err;
+ if (err)
+ dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
+
+ /* On FW flash success, tell the caller FW reset is needed
+ * if current FW supports it.
+ */
+ if (rev->minor >= req_rev->can_reset_minor)
+ return err ? err : -EAGAIN;
+ else
+ return 0;
}
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
@@ -1441,6 +1433,11 @@ mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
return 0;
case TC_CLSFLOWER_STATS:
return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
+ case TC_CLSFLOWER_TMPLT_CREATE:
+ return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
+ case TC_CLSFLOWER_TMPLT_DESTROY:
+ mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -1503,7 +1500,8 @@ static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
static int
mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tcf_block *block, bool ingress)
+ struct tcf_block *block, bool ingress,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_block *acl_block;
@@ -1518,7 +1516,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM;
block_cb = __tcf_block_cb_register(block,
mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp, acl_block);
+ mlxsw_sp, acl_block, extack);
if (IS_ERR(block_cb)) {
err = PTR_ERR(block_cb);
goto err_cb_register;
@@ -1541,7 +1539,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
err_block_bind:
if (!tcf_block_cb_decref(block_cb)) {
- __tcf_block_cb_unregister(block_cb);
+ __tcf_block_cb_unregister(block, block_cb);
err_cb_register:
mlxsw_sp_acl_block_destroy(acl_block);
}
@@ -1571,7 +1569,7 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
mlxsw_sp_port, ingress);
if (!err && !tcf_block_cb_decref(block_cb)) {
- __tcf_block_cb_unregister(block_cb);
+ __tcf_block_cb_unregister(block, block_cb);
mlxsw_sp_acl_block_destroy(acl_block);
}
}
@@ -1596,11 +1594,12 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
switch (f->command) {
case TC_BLOCK_BIND:
err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
- mlxsw_sp_port);
+ mlxsw_sp_port, f->extack);
if (err)
return err;
err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
- f->block, ingress);
+ f->block, ingress,
+ f->extack);
if (err) {
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
return err;
@@ -1712,7 +1711,8 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
+ sizeof(drvinfo->driver));
strlcpy(drvinfo->version, mlxsw_sp_driver_version,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -1873,6 +1873,52 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
+ {
+ .str = "ether_pkts64octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
+ },
+ {
+ .str = "ether_pkts65to127octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
+ },
+ {
+ .str = "ether_pkts128to255octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
+ },
+ {
+ .str = "ether_pkts256to511octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
+ },
+ {
+ .str = "ether_pkts512to1023octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
+ },
+ {
+ .str = "ether_pkts1024to1518octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
+ },
+ {
+ .str = "ether_pkts1519to2047octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
+ },
+ {
+ .str = "ether_pkts2048to4095octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
+ },
+ {
+ .str = "ether_pkts4096to8191octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
+ },
+ {
+ .str = "ether_pkts8192to10239octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
{
.str = "rx_octets_prio",
@@ -1925,9 +1971,11 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
- (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
- MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
- IEEE_8021QAZ_MAX_TCS)
+ MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
+ (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
+ IEEE_8021QAZ_MAX_TCS) + \
+ (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
+ TC_MAX_QUEUE))
static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
{
@@ -1964,11 +2012,16 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
mlxsw_sp_port_get_prio_strings(&p, i);
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ for (i = 0; i < TC_MAX_QUEUE; i++)
mlxsw_sp_port_get_tc_strings(&p, i);
break;
@@ -2003,10 +2056,14 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
int *p_len, enum mlxsw_reg_ppcnt_grp grp)
{
switch (grp) {
- case MLXSW_REG_PPCNT_IEEE_8023_CNT:
+ case MLXSW_REG_PPCNT_IEEE_8023_CNT:
*p_hw_stats = mlxsw_sp_port_hw_stats;
*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_RFC_2819_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_PRIO_CNT:
*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
@@ -2056,6 +2113,11 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
data, data_index);
data_index = MLXSW_SP_PORT_HW_STATS_LEN;
+ /* RFC 2819 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+
/* Per-Priority Counters */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
@@ -2064,7 +2126,7 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
}
/* Per-TC Counters */
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ for (i = 0; i < TC_MAX_QUEUE; i++) {
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
data, data_index);
data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
@@ -2711,9 +2773,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
false, 0);
if (err)
return err;
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+ i + 8, i,
+ false, 0);
+ if (err)
+ return err;
}
- /* Make sure the max shaper is disabled in all hierarcies that
+ /* Make sure the max shaper is disabled in all hierarchies that
* support it.
*/
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
@@ -2748,6 +2817,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
+static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qtctm_pl[MLXSW_REG_QTCTM_LEN];
+
+ mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width, u8 lane)
{
@@ -2876,6 +2955,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_ets_init;
}
+ err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_tc_mc_mode;
+ }
+
/* ETS and buffers must be initialized before DCB. */
err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
if (err) {
@@ -2932,6 +3018,8 @@ err_port_qdiscs_init:
err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init:
+ mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
+err_port_tc_mc_mode:
err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
@@ -2966,6 +3054,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
+ mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
kfree(mlxsw_sp_port->sample);
@@ -3371,6 +3460,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
/* PKT Sample trap */
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
false, SP_IP2ME, DISCARD),
@@ -3623,10 +3714,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->bus_info = mlxsw_bus_info;
err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
+ if (err)
return err;
- }
err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) {
@@ -3757,6 +3846,36 @@ err_fids_init:
return err;
}
+static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
+ mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
+ mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+}
+
+static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+}
+
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -3777,7 +3896,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_kvdl_fini(mlxsw_sp);
}
-static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
+static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_flood_tables = 1,
@@ -3803,6 +3922,28 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
},
};
+static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
+ .used_max_mid = 1,
+ .max_mid = MLXSW_SP_MID_MAX,
+ .used_flood_tables = 1,
+ .used_flood_mode = 1,
+ .flood_mode = 3,
+ .max_fid_offset_flood_tables = 3,
+ .fid_offset_flood_table_size = VLAN_N_VID - 1,
+ .max_fid_flood_tables = 3,
+ .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+};
+
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
@@ -3839,7 +3980,7 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
DEVLINK_RESOURCE_UNIT_ENTRY);
}
-static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
+static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
struct devlink_resource_size_params hash_single_size_params;
@@ -3850,7 +3991,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
const struct mlxsw_config_profile *profile;
int err;
- profile = &mlxsw_sp_config_profile;
+ profile = &mlxsw_sp1_config_profile;
if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
return -EIO;
@@ -3876,7 +4017,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
return err;
- err = mlxsw_sp_kvdl_resources_register(mlxsw_core);
+ err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
if (err)
return err;
@@ -3905,6 +4046,16 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
return 0;
}
+static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_sp1_resources_kvd_register(mlxsw_core);
+}
+
+static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ return 0;
+}
+
static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
@@ -3960,10 +4111,10 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
return 0;
}
-static struct mlxsw_driver mlxsw_sp_driver = {
- .kind = mlxsw_sp_driver_name,
+static struct mlxsw_driver mlxsw_sp1_driver = {
+ .kind = mlxsw_sp1_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
- .init = mlxsw_sp_init,
+ .init = mlxsw_sp1_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
@@ -3979,10 +4130,35 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
- .resources_register = mlxsw_sp_resources_register,
+ .resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sp_config_profile,
+ .profile = &mlxsw_sp1_config_profile,
+ .res_query_enabled = true,
+};
+
+static struct mlxsw_driver mlxsw_sp2_driver = {
+ .kind = mlxsw_sp2_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp2_init,
+ .fini = mlxsw_sp_fini,
+ .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp2_resources_register,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
};
@@ -4397,7 +4573,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
if (!is_vlan_dev(upper_dev) &&
!netif_is_lag_master(upper_dev) &&
!netif_is_bridge_master(upper_dev) &&
- !netif_is_ovs_master(upper_dev)) {
+ !netif_is_ovs_master(upper_dev) &&
+ !netif_is_macvlan(upper_dev)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
@@ -4423,6 +4600,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
return -EINVAL;
}
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
return -EINVAL;
@@ -4461,6 +4643,9 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
else
mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
+ } else if (netif_is_macvlan(upper_dev)) {
+ if (!info->linking)
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
}
break;
}
@@ -4545,8 +4730,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!netif_is_bridge_master(upper_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers");
+ if (!netif_is_bridge_master(upper_dev) &&
+ !netif_is_macvlan(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
if (!info->linking)
@@ -4558,6 +4744,11 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4571,6 +4762,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
vlan_dev,
upper_dev);
+ } else if (netif_is_macvlan(upper_dev)) {
+ if (!info->linking)
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
} else {
err = -EINVAL;
WARN_ON(1);
@@ -4620,6 +4814,64 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
return 0;
}
+static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ if (!mlxsw_sp)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EOPNOTSUPP;
+ }
+ if (!info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev))
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+
+ if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+
+ return -EOPNOTSUPP;
+}
+
static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
{
struct netdev_notifier_changeupper_info *info = ptr;
@@ -4661,6 +4913,10 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
else if (is_vlan_dev(dev))
err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
+ else if (netif_is_bridge_master(dev))
+ err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
+ else if (netif_is_macvlan(dev))
+ err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
return notifier_from_errno(err);
}
@@ -4681,14 +4937,24 @@ static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
.notifier_call = mlxsw_sp_inet6addr_event,
};
-static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
+static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
{0, },
};
-static struct pci_driver mlxsw_sp_pci_driver = {
- .name = mlxsw_sp_driver_name,
- .id_table = mlxsw_sp_pci_id_table,
+static struct pci_driver mlxsw_sp1_pci_driver = {
+ .name = mlxsw_sp1_driver_name,
+ .id_table = mlxsw_sp1_pci_id_table,
+};
+
+static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp2_pci_driver = {
+ .name = mlxsw_sp2_driver_name,
+ .id_table = mlxsw_sp2_pci_id_table,
};
static int __init mlxsw_sp_module_init(void)
@@ -4700,19 +4966,31 @@ static int __init mlxsw_sp_module_init(void)
register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
- err = mlxsw_core_driver_register(&mlxsw_sp_driver);
+ err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
+ if (err)
+ goto err_sp1_core_driver_register;
+
+ err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
+ if (err)
+ goto err_sp2_core_driver_register;
+
+ err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
if (err)
- goto err_core_driver_register;
+ goto err_sp1_pci_driver_register;
- err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
+ err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
if (err)
- goto err_pci_driver_register;
+ goto err_sp2_pci_driver_register;
return 0;
-err_pci_driver_register:
- mlxsw_core_driver_unregister(&mlxsw_sp_driver);
-err_core_driver_register:
+err_sp2_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+err_sp1_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
+err_sp2_core_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
+err_sp1_core_driver_register:
unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
@@ -4722,8 +5000,10 @@ err_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
- mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
- mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
@@ -4736,5 +5016,6 @@ module_exit(mlxsw_sp_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
-MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
-MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
+MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4a519d8edec8..3ae930196741 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_H
#define _MLXSW_SPECTRUM_H
@@ -54,6 +21,7 @@
#include "core.h"
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
+#include "reg.h"
#define MLXSW_SP_FID_8021D_MAX 1024
@@ -145,6 +113,9 @@ struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
struct mlxsw_sp_fid_core;
struct mlxsw_sp_kvdl;
+struct mlxsw_sp_kvdl_ops;
+struct mlxsw_sp_mr_tcam_ops;
+struct mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
@@ -168,6 +139,13 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
+ const struct mlxsw_fw_rev *req_rev;
+ const char *fw_filename;
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops;
+ const struct mlxsw_afa_ops *afa_ops;
+ const struct mlxsw_afk_ops *afk_ops;
+ const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
+ const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
};
static inline struct mlxsw_sp_upper *
@@ -233,6 +211,7 @@ struct mlxsw_sp_port {
struct ieee_ets *ets;
struct ieee_maxrate *maxrate;
struct ieee_pfc *pfc;
+ enum mlxsw_reg_qpts_trust_state trust_state;
} dcb;
struct {
u8 module;
@@ -407,6 +386,8 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev);
int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr);
int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
@@ -435,15 +416,62 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
/* spectrum_kvdl.c */
+enum mlxsw_sp_kvdl_entry_type {
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+};
+
+static inline unsigned int
+mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
+{
+ switch (type) {
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */
+ default:
+ return 1;
+ }
+}
+
+struct mlxsw_sp_kvdl_ops {
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index);
+ void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index);
+ int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count);
+ int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv);
+};
+
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
- u32 *p_entry_index);
-void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
-int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
- unsigned int entry_count,
- unsigned int *p_alloc_size);
-int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index);
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index);
+int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count);
+
+/* spectrum1_kvdl.c */
+extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops;
+int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
+
+/* spectrum2_kvdl.c */
+extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops;
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
@@ -452,44 +480,14 @@ struct mlxsw_sp_acl_rule_info {
unsigned int counter_index;
};
-enum mlxsw_sp_acl_profile {
- MLXSW_SP_ACL_PROFILE_FLOWER,
-};
-
-struct mlxsw_sp_acl_profile_ops {
- size_t ruleset_priv_size;
- int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
- void *priv, void *ruleset_priv);
- void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
- int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
- void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
- u16 (*ruleset_group_id)(void *ruleset_priv);
- size_t rule_priv_size;
- int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv, void *rule_priv,
- struct mlxsw_sp_acl_rule_info *rulei);
- void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
- int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
- bool *activity);
-};
-
-struct mlxsw_sp_acl_ops {
- size_t priv_size;
- int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
- void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
- const struct mlxsw_sp_acl_profile_ops *
- (*profile_ops)(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_acl_profile profile);
-};
-
struct mlxsw_sp_acl_block;
struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
+enum mlxsw_sp_acl_profile {
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+};
+
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
@@ -507,6 +505,7 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
+bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
@@ -514,7 +513,8 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
- enum mlxsw_sp_acl_profile profile);
+ enum mlxsw_sp_acl_profile profile,
+ struct mlxsw_afk_element_usage *tmplt_elusage);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset);
@@ -541,25 +541,30 @@ int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_acl_block *block,
- struct net_device *out_dev);
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct net_device *out_dev);
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- u32 action, u16 vid, u16 proto, u8 prio);
+ u32 action, u16 vid, u16 proto, u8 prio,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei);
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- u16 fid);
+ u16 fid, struct netlink_ext_ack *extack);
struct mlxsw_sp_acl_rule;
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
- unsigned long cookie);
+ unsigned long cookie,
+ struct netlink_ext_ack *extack);
void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule);
int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
@@ -582,7 +587,52 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
/* spectrum_acl_tcam.c */
-extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
+struct mlxsw_sp_acl_tcam;
+struct mlxsw_sp_acl_tcam_region;
+
+struct mlxsw_sp_acl_tcam_ops {
+ enum mlxsw_reg_ptar_key_type key_type;
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *tcam);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ size_t region_priv_size;
+ int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
+ struct mlxsw_sp_acl_tcam_region *region);
+ void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv);
+ int (*region_associate)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region);
+ size_t chunk_priv_size;
+ void (*chunk_init)(void *region_priv, void *chunk_priv,
+ unsigned int priority);
+ void (*chunk_fini)(void *chunk_priv);
+ size_t entry_priv_size;
+ int (*entry_add)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ void (*entry_del)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv);
+ int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity);
+};
+
+/* spectrum1_acl_tcam.c */
+extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops;
+
+/* spectrum2_acl_tcam.c */
+extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops;
+
+/* spectrum_acl_flex_actions.c */
+extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops;
+extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
+
+/* spectrum_acl_flex_keys.c */
+extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
+extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
@@ -594,6 +644,12 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
+int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct tc_cls_flower_offload *f);
+void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct tc_cls_flower_offload *f);
/* spectrum_qdisc.c */
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -631,4 +687,40 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_mr.c */
+enum mlxsw_sp_mr_route_prio {
+ MLXSW_SP_MR_ROUTE_PRIO_SG,
+ MLXSW_SP_MR_ROUTE_PRIO_STARG,
+ MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
+ __MLXSW_SP_MR_ROUTE_PRIO_MAX
+};
+
+#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
+
+struct mlxsw_sp_mr_route_key;
+
+struct mlxsw_sp_mr_tcam_ops {
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ void (*fini)(void *priv);
+ size_t route_priv_size;
+ int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio);
+ void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key);
+ int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block);
+};
+
+/* spectrum1_mr_tcam.c */
+extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
+
+/* spectrum2_mr_tcam.c */
+extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops;
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
new file mode 100644
index 000000000000..2a9eac90002e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+struct mlxsw_sp1_acl_tcam_region {
+ struct mlxsw_sp_acl_ctcam_region cregion;
+ struct mlxsw_sp_acl_tcam_region *region;
+ struct {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+ struct mlxsw_sp_acl_ctcam_entry centry;
+ struct mlxsw_sp_acl_rule_info *rulei;
+ } catchall;
+};
+
+struct mlxsw_sp1_acl_tcam_chunk {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+};
+
+struct mlxsw_sp1_acl_tcam_entry {
+ struct mlxsw_sp_acl_ctcam_entry centry;
+};
+
+static int
+mlxsw_sp1_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp1_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+}
+
+static const struct mlxsw_sp_acl_ctcam_region_ops
+mlxsw_sp1_acl_ctcam_region_ops = {
+ .entry_insert = mlxsw_sp1_acl_ctcam_region_entry_insert,
+ .entry_remove = mlxsw_sp1_acl_ctcam_region_entry_remove,
+};
+
+static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ return 0;
+}
+
+static void mlxsw_sp1_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+}
+
+static int
+mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+ int err;
+
+ mlxsw_sp_acl_ctcam_chunk_init(&region->cregion,
+ &region->catchall.cchunk,
+ MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
+ rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+ if (IS_ERR(rulei)) {
+ err = PTR_ERR(rulei);
+ goto err_rulei_create;
+ }
+ err = mlxsw_sp_acl_rulei_act_continue(rulei);
+ if (WARN_ON(err))
+ goto err_rulei_act_continue;
+ err = mlxsw_sp_acl_rulei_commit(rulei);
+ if (err)
+ goto err_rulei_commit;
+ err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+ &region->catchall.cchunk,
+ &region->catchall.centry,
+ rulei, false);
+ if (err)
+ goto err_entry_add;
+ region->catchall.rulei = rulei;
+ return 0;
+
+err_entry_add:
+err_rulei_commit:
+err_rulei_act_continue:
+ mlxsw_sp_acl_rulei_destroy(rulei);
+err_rulei_create:
+ mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
+ return err;
+}
+
+static void
+mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
+
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+ &region->catchall.cchunk,
+ &region->catchall.centry);
+ mlxsw_sp_acl_rulei_destroy(rulei);
+ mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
+ struct mlxsw_sp_acl_tcam_region *_region)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ int err;
+
+ err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion,
+ _region,
+ &mlxsw_sp1_acl_ctcam_region_ops);
+ if (err)
+ return err;
+ err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region);
+ if (err)
+ goto err_catchall_add;
+ region->region = _region;
+ return 0;
+
+err_catchall_add:
+ mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+ return err;
+}
+
+static void
+mlxsw_sp1_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+
+ mlxsw_sp1_acl_ctcam_region_catchall_del(mlxsw_sp, region);
+ mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ return 0;
+}
+
+static void mlxsw_sp1_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
+ unsigned int priority)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_ctcam_chunk_init(&region->cregion, &chunk->cchunk,
+ priority);
+}
+
+static void mlxsw_sp1_acl_tcam_chunk_fini(void *chunk_priv)
+{
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk);
+}
+
+static int mlxsw_sp1_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+
+ return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+ &chunk->cchunk, &entry->centry,
+ rulei, false);
+}
+
+static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+ &chunk->cchunk, &entry->centry);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *_region,
+ unsigned int offset,
+ bool *activity)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ int err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
+ _region->tcam_region_info, offset, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ if (err)
+ return err;
+ *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
+ return 0;
+}
+
+static int
+mlxsw_sp1_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+ unsigned int offset;
+
+ offset = mlxsw_sp_acl_ctcam_entry_offset(&entry->centry);
+ return mlxsw_sp1_acl_tcam_region_entry_activity_get(mlxsw_sp,
+ region->region,
+ offset, activity);
+}
+
+const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = {
+ .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX,
+ .priv_size = 0,
+ .init = mlxsw_sp1_acl_tcam_init,
+ .fini = mlxsw_sp1_acl_tcam_fini,
+ .region_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_region),
+ .region_init = mlxsw_sp1_acl_tcam_region_init,
+ .region_fini = mlxsw_sp1_acl_tcam_region_fini,
+ .region_associate = mlxsw_sp1_acl_tcam_region_associate,
+ .chunk_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_chunk),
+ .chunk_init = mlxsw_sp1_acl_tcam_chunk_init,
+ .chunk_fini = mlxsw_sp1_acl_tcam_chunk_fini,
+ .entry_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_entry),
+ .entry_add = mlxsw_sp1_acl_tcam_entry_add,
+ .entry_del = mlxsw_sp1_acl_tcam_entry_del,
+ .entry_activity_get = mlxsw_sp1_acl_tcam_entry_activity_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
new file mode 100644
index 000000000000..09ee0a807747
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+
+#define MLXSW_SP1_KVDL_SINGLE_BASE 0
+#define MLXSW_SP1_KVDL_SINGLE_SIZE 16384
+#define MLXSW_SP1_KVDL_SINGLE_END \
+ (MLXSW_SP1_KVDL_SINGLE_SIZE + MLXSW_SP1_KVDL_SINGLE_BASE - 1)
+
+#define MLXSW_SP1_KVDL_CHUNKS_BASE \
+ (MLXSW_SP1_KVDL_SINGLE_BASE + MLXSW_SP1_KVDL_SINGLE_SIZE)
+#define MLXSW_SP1_KVDL_CHUNKS_SIZE 49152
+#define MLXSW_SP1_KVDL_CHUNKS_END \
+ (MLXSW_SP1_KVDL_CHUNKS_SIZE + MLXSW_SP1_KVDL_CHUNKS_BASE - 1)
+
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE \
+ (MLXSW_SP1_KVDL_CHUNKS_BASE + MLXSW_SP1_KVDL_CHUNKS_SIZE)
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE \
+ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE)
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_END \
+ (MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE - 1)
+
+#define MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE 1
+#define MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE 32
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
+
+struct mlxsw_sp1_kvdl_part_info {
+ unsigned int part_index;
+ unsigned int start_index;
+ unsigned int end_index;
+ unsigned int alloc_size;
+ enum mlxsw_sp_resource_id resource_id;
+};
+
+enum mlxsw_sp1_kvdl_part_id {
+ MLXSW_SP1_KVDL_PART_ID_SINGLE,
+ MLXSW_SP1_KVDL_PART_ID_CHUNKS,
+ MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS,
+};
+
+#define MLXSW_SP1_KVDL_PART_INFO(id) \
+[MLXSW_SP1_KVDL_PART_ID_##id] = { \
+ .start_index = MLXSW_SP1_KVDL_##id##_BASE, \
+ .end_index = MLXSW_SP1_KVDL_##id##_END, \
+ .alloc_size = MLXSW_SP1_KVDL_##id##_ALLOC_SIZE, \
+ .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
+}
+
+static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = {
+ MLXSW_SP1_KVDL_PART_INFO(SINGLE),
+ MLXSW_SP1_KVDL_PART_INFO(CHUNKS),
+ MLXSW_SP1_KVDL_PART_INFO(LARGE_CHUNKS),
+};
+
+#define MLXSW_SP1_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp1_kvdl_parts_info)
+
+struct mlxsw_sp1_kvdl_part {
+ struct mlxsw_sp1_kvdl_part_info info;
+ unsigned long usage[0]; /* Entries */
+};
+
+struct mlxsw_sp1_kvdl {
+ struct mlxsw_sp1_kvdl_part *parts[MLXSW_SP1_KVDL_PARTS_INFO_LEN];
+};
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_alloc_size_part(struct mlxsw_sp1_kvdl *kvdl,
+ unsigned int alloc_size)
+{
+ struct mlxsw_sp1_kvdl_part *part, *min_part = NULL;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ part = kvdl->parts[i];
+ if (alloc_size <= part->info.alloc_size &&
+ (!min_part ||
+ part->info.alloc_size <= min_part->info.alloc_size))
+ min_part = part;
+ }
+
+ return min_part ?: ERR_PTR(-ENOBUFS);
+}
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_index_part(struct mlxsw_sp1_kvdl *kvdl, u32 kvdl_index)
+{
+ struct mlxsw_sp1_kvdl_part *part;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ part = kvdl->parts[i];
+ if (kvdl_index >= part->info.start_index &&
+ kvdl_index <= part->info.end_index)
+ return part;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static u32
+mlxsw_sp1_kvdl_to_kvdl_index(const struct mlxsw_sp1_kvdl_part_info *info,
+ unsigned int entry_index)
+{
+ return info->start_index + entry_index * info->alloc_size;
+}
+
+static unsigned int
+mlxsw_sp1_kvdl_to_entry_index(const struct mlxsw_sp1_kvdl_part_info *info,
+ u32 kvdl_index)
+{
+ return (kvdl_index - info->start_index) / info->alloc_size;
+}
+
+static int mlxsw_sp1_kvdl_part_alloc(struct mlxsw_sp1_kvdl_part *part,
+ u32 *p_kvdl_index)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int entry_index, nr_entries;
+
+ nr_entries = (info->end_index - info->start_index + 1) /
+ info->alloc_size;
+ entry_index = find_first_zero_bit(part->usage, nr_entries);
+ if (entry_index == nr_entries)
+ return -ENOBUFS;
+ __set_bit(entry_index, part->usage);
+
+ *p_kvdl_index = mlxsw_sp1_kvdl_to_kvdl_index(info, entry_index);
+
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_part_free(struct mlxsw_sp1_kvdl_part *part,
+ u32 kvdl_index)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int entry_index;
+
+ entry_index = mlxsw_sp1_kvdl_to_entry_index(info, kvdl_index);
+ __clear_bit(entry_index, part->usage);
+}
+
+static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ u32 *p_entry_index)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ /* Find partition with smallest allocation size satisfying the
+ * requested size.
+ */
+ part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ return mlxsw_sp1_kvdl_part_alloc(part, p_entry_index);
+}
+
+static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = mlxsw_sp1_kvdl_index_part(kvdl, entry_index);
+ if (IS_ERR(part))
+ return;
+ mlxsw_sp1_kvdl_part_free(part, entry_index);
+}
+
+static int mlxsw_sp1_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_size)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ *p_alloc_size = part->info.alloc_size;
+
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_part_update(struct mlxsw_sp1_kvdl_part *part,
+ struct mlxsw_sp1_kvdl_part *part_prev,
+ unsigned int size)
+{
+ if (!part_prev) {
+ part->info.end_index = size - 1;
+ } else {
+ part->info.start_index = part_prev->info.end_index + 1;
+ part->info.end_index = part->info.start_index + size - 1;
+ }
+}
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp1_kvdl_part_info *info,
+ struct mlxsw_sp1_kvdl_part *part_prev)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl_part *part;
+ bool need_update = true;
+ unsigned int nr_entries;
+ size_t usage_size;
+ u64 resource_size;
+ int err;
+
+ err = devlink_resource_size_get(devlink, info->resource_id,
+ &resource_size);
+ if (err) {
+ need_update = false;
+ resource_size = info->end_index - info->start_index + 1;
+ }
+
+ nr_entries = div_u64(resource_size, info->alloc_size);
+ usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
+ part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ if (!part)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&part->info, info, sizeof(part->info));
+
+ if (need_update)
+ mlxsw_sp1_kvdl_part_update(part, part_prev, resource_size);
+ return part;
+}
+
+static void mlxsw_sp1_kvdl_part_fini(struct mlxsw_sp1_kvdl_part *part)
+{
+ kfree(part);
+}
+
+static int mlxsw_sp1_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_kvdl *kvdl)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info;
+ struct mlxsw_sp1_kvdl_part *part_prev = NULL;
+ int err, i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ info = &mlxsw_sp1_kvdl_parts_info[i];
+ kvdl->parts[i] = mlxsw_sp1_kvdl_part_init(mlxsw_sp, info,
+ part_prev);
+ if (IS_ERR(kvdl->parts[i])) {
+ err = PTR_ERR(kvdl->parts[i]);
+ goto err_kvdl_part_init;
+ }
+ part_prev = kvdl->parts[i];
+ }
+ return 0;
+
+err_kvdl_part_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
+ return err;
+}
+
+static void mlxsw_sp1_kvdl_parts_fini(struct mlxsw_sp1_kvdl *kvdl)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
+ mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
+}
+
+static u64 mlxsw_sp1_kvdl_part_occ(struct mlxsw_sp1_kvdl_part *part)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int nr_entries;
+ int bit = -1;
+ u64 occ = 0;
+
+ nr_entries = (info->end_index -
+ info->start_index + 1) /
+ info->alloc_size;
+ while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+ < nr_entries)
+ occ += info->alloc_size;
+ return occ;
+}
+
+static u64 mlxsw_sp1_kvdl_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ u64 occ = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
+ occ += mlxsw_sp1_kvdl_part_occ(kvdl->parts[i]);
+
+ return occ;
+}
+
+static u64 mlxsw_sp1_kvdl_single_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_SINGLE];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp1_kvdl_chunks_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_CHUNKS];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp1_kvdl_large_chunks_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ int err;
+
+ err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl);
+ if (err)
+ return err;
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ mlxsw_sp1_kvdl_occ_get,
+ kvdl);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ mlxsw_sp1_kvdl_single_occ_get,
+ kvdl);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ mlxsw_sp1_kvdl_chunks_occ_get,
+ kvdl);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ mlxsw_sp1_kvdl_large_chunks_occ_get,
+ kvdl);
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR);
+ mlxsw_sp1_kvdl_parts_fini(kvdl);
+}
+
+const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops = {
+ .priv_size = sizeof(struct mlxsw_sp1_kvdl),
+ .init = mlxsw_sp1_kvdl_init,
+ .fini = mlxsw_sp1_kvdl_fini,
+ .alloc = mlxsw_sp1_kvdl_alloc,
+ .free = mlxsw_sp1_kvdl_free,
+ .alloc_size_query = mlxsw_sp1_kvdl_alloc_size_query,
+};
+
+int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ static struct devlink_resource_size_params size_params;
+ u32 kvdl_max_size;
+ int err;
+
+ kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+ MLXSW_SP1_KVDL_SINGLE_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+ MLXSW_SP1_KVDL_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
new file mode 100644
index 000000000000..c8c67536917b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_mr.h"
+
+struct mlxsw_sp1_mr_tcam_region {
+ struct mlxsw_sp *mlxsw_sp;
+ enum mlxsw_reg_rtar_key_type rtar_key_type;
+ struct parman *parman;
+ struct parman_prio *parman_prios;
+};
+
+struct mlxsw_sp1_mr_tcam {
+ struct mlxsw_sp1_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
+};
+
+struct mlxsw_sp1_mr_tcam_route {
+ struct parman_item parman_item;
+ struct parman_prio *parman_prio;
+};
+
+static int mlxsw_sp1_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
+ struct parman_item *parman_item,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ ntohl(key->group.addr4),
+ ntohl(key->group_mask.addr4),
+ ntohl(key->source.addr4),
+ ntohl(key->source_mask.addr4),
+ mlxsw_afa_block_first_set(afa_block));
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ key->group.addr6,
+ key->group_mask.addr6,
+ key->source.addr6,
+ key->source_mask.addr6,
+ mlxsw_afa_block_first_set(afa_block));
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static int mlxsw_sp1_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp,
+ struct parman_item *parman_item,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
+ key->vrid, 0, 0, 0, 0, 0, 0, NULL);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
+ key->vrid, 0, 0, zero_addr, zero_addr,
+ zero_addr, zero_addr, NULL);
+ break;
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static struct mlxsw_sp1_mr_tcam_region *
+mlxsw_sp1_mr_tcam_protocol_region(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ enum mlxsw_sp_l3proto proto)
+{
+ return &mr_tcam->tcam_regions[proto];
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_parman_item_add(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ struct mlxsw_sp1_mr_tcam_route *route,
+ struct mlxsw_sp_mr_route_key *key,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct mlxsw_sp1_mr_tcam_region *tcam_region;
+ int err;
+
+ tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
+ err = parman_item_add(tcam_region->parman,
+ &tcam_region->parman_prios[prio],
+ &route->parman_item);
+ if (err)
+ return err;
+
+ route->parman_prio = &tcam_region->parman_prios[prio];
+ return 0;
+}
+
+static void
+mlxsw_sp1_mr_tcam_route_parman_item_remove(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ struct mlxsw_sp1_mr_tcam_route *route,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct mlxsw_sp1_mr_tcam_region *tcam_region;
+
+ tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
+ parman_item_remove(tcam_region->parman,
+ route->parman_prio, &route->parman_item);
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ int err;
+
+ err = mlxsw_sp1_mr_tcam_route_parman_item_add(mr_tcam, route,
+ key, prio);
+ if (err)
+ return err;
+
+ err = mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ key, afa_block);
+ if (err)
+ goto err_route_replace;
+ return 0;
+
+err_route_replace:
+ mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
+ return err;
+}
+
+static void
+mlxsw_sp1_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+
+ mlxsw_sp1_mr_tcam_route_remove(mlxsw_sp, &route->parman_item, key);
+ mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+
+ return mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ key, afa_block);
+}
+
+#define MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP 16
+
+static int
+mlxsw_sp1_mr_tcam_region_alloc(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
+ mr_tcam_region->rtar_key_type,
+ MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void
+mlxsw_sp1_mr_tcam_region_free(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
+ mr_tcam_region->rtar_key_type, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static int mlxsw_sp1_mr_tcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
+ mr_tcam_region->rtar_key_type, new_count);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void mlxsw_sp1_mr_tcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rrcr_pl[MLXSW_REG_RRCR_LEN];
+
+ mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
+ from_index, count,
+ mr_tcam_region->rtar_key_type, to_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
+}
+
+static const struct parman_ops mlxsw_sp1_mr_tcam_region_parman_ops = {
+ .base_count = MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp1_mr_tcam_region_parman_resize,
+ .move = mlxsw_sp1_mr_tcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static int
+mlxsw_sp1_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region,
+ enum mlxsw_reg_rtar_key_type rtar_key_type)
+{
+ struct parman_prio *parman_prios;
+ struct parman *parman;
+ int err;
+ int i;
+
+ mr_tcam_region->rtar_key_type = rtar_key_type;
+ mr_tcam_region->mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp1_mr_tcam_region_alloc(mr_tcam_region);
+ if (err)
+ return err;
+
+ parman = parman_create(&mlxsw_sp1_mr_tcam_region_parman_ops,
+ mr_tcam_region);
+ if (!parman) {
+ err = -ENOMEM;
+ goto err_parman_create;
+ }
+ mr_tcam_region->parman = parman;
+
+ parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
+ sizeof(*parman_prios), GFP_KERNEL);
+ if (!parman_prios) {
+ err = -ENOMEM;
+ goto err_parman_prios_alloc;
+ }
+ mr_tcam_region->parman_prios = parman_prios;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_init(mr_tcam_region->parman,
+ &mr_tcam_region->parman_prios[i], i);
+ return 0;
+
+err_parman_prios_alloc:
+ parman_destroy(parman);
+err_parman_create:
+ mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
+ return err;
+}
+
+static void
+mlxsw_sp1_mr_tcam_region_fini(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_fini(&mr_tcam_region->parman_prios[i]);
+ kfree(mr_tcam_region->parman_prios);
+ parman_destroy(mr_tcam_region->parman);
+ mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
+}
+
+static int mlxsw_sp1_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+ u32 rtar_key;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+ return -EIO;
+
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
+ err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV4],
+ rtar_key);
+ if (err)
+ return err;
+
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
+ err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV6],
+ rtar_key);
+ if (err)
+ goto err_ipv6_region_init;
+
+ return 0;
+
+err_ipv6_region_init:
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+ return err;
+}
+
+static void mlxsw_sp1_mr_tcam_fini(void *priv)
+{
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+}
+
+const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops = {
+ .priv_size = sizeof(struct mlxsw_sp1_mr_tcam),
+ .init = mlxsw_sp1_mr_tcam_init,
+ .fini = mlxsw_sp1_mr_tcam_fini,
+ .route_priv_size = sizeof(struct mlxsw_sp1_mr_tcam_route),
+ .route_create = mlxsw_sp1_mr_tcam_route_create,
+ .route_destroy = mlxsw_sp1_mr_tcam_route_destroy,
+ .route_update = mlxsw_sp1_mr_tcam_route_update,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
new file mode 100644
index 000000000000..8ca77f3e8f27
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+#include "core_acl_flex_actions.h"
+
+struct mlxsw_sp2_acl_tcam {
+ struct mlxsw_sp_acl_atcam atcam;
+ u32 kvdl_index;
+ unsigned int kvdl_count;
+};
+
+struct mlxsw_sp2_acl_tcam_region {
+ struct mlxsw_sp_acl_atcam_region aregion;
+ struct mlxsw_sp_acl_tcam_region *region;
+};
+
+struct mlxsw_sp2_acl_tcam_chunk {
+ struct mlxsw_sp_acl_atcam_chunk achunk;
+};
+
+struct mlxsw_sp2_acl_tcam_entry {
+ struct mlxsw_sp_acl_atcam_entry aentry;
+ struct mlxsw_afa_block *act_block;
+};
+
+static int
+mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+ struct mlxsw_sp_acl_erp *erp;
+
+ aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
+ aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
+
+ erp = mlxsw_sp_acl_erp_get(aregion, mask, true);
+ if (IS_ERR(erp))
+ return PTR_ERR(erp);
+ aentry->erp = erp;
+
+ return 0;
+}
+
+static void
+mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+
+ aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
+ aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
+
+ mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+}
+
+static const struct mlxsw_sp_acl_ctcam_region_ops
+mlxsw_sp2_acl_ctcam_region_ops = {
+ .entry_insert = mlxsw_sp2_acl_ctcam_region_entry_insert,
+ .entry_remove = mlxsw_sp2_acl_ctcam_region_entry_remove,
+};
+
+static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *_tcam)
+{
+ struct mlxsw_sp2_acl_tcam *tcam = priv;
+ struct mlxsw_afa_block *afa_block;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ char pgcr_pl[MLXSW_REG_PGCR_LEN];
+ char *enc_actions;
+ int i;
+ int err;
+
+ tcam->kvdl_count = _tcam->max_regions;
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, &tcam->kvdl_index);
+ if (err)
+ return err;
+
+ /* Create flex action block, set default action (continue)
+ * but don't commit. We need just the current set encoding
+ * to be written using PEFA register to all indexes for all regions.
+ */
+ afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
+ if (!afa_block) {
+ err = -ENOMEM;
+ goto err_afa_block;
+ }
+ err = mlxsw_afa_block_continue(afa_block);
+ if (WARN_ON(err))
+ goto err_afa_block_continue;
+ enc_actions = mlxsw_afa_block_cur_set(afa_block);
+
+ for (i = 0; i < tcam->kvdl_count; i++) {
+ mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i,
+ true, enc_actions);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ goto err_pefa_write;
+ }
+ mlxsw_reg_pgcr_pack(pgcr_pl, tcam->kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pgcr), pgcr_pl);
+ if (err)
+ goto err_pgcr_write;
+
+ err = mlxsw_sp_acl_atcam_init(mlxsw_sp, &tcam->atcam);
+ if (err)
+ goto err_atcam_init;
+
+ mlxsw_afa_block_destroy(afa_block);
+ return 0;
+
+err_atcam_init:
+err_pgcr_write:
+err_pefa_write:
+err_afa_block_continue:
+ mlxsw_afa_block_destroy(afa_block);
+err_afa_block:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, tcam->kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_acl_tcam *tcam = priv;
+
+ mlxsw_sp_acl_atcam_fini(mlxsw_sp, &tcam->atcam);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, tcam->kvdl_index);
+}
+
+static int
+mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
+ struct mlxsw_sp_acl_tcam_region *_region)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam *tcam = tcam_priv;
+
+ region->region = _region;
+
+ return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam,
+ &region->aregion, _region,
+ &mlxsw_sp2_acl_ctcam_region_ops);
+}
+
+static void
+mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+
+ mlxsw_sp_acl_atcam_region_fini(&region->aregion);
+}
+
+static int
+mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id);
+}
+
+static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
+ unsigned int priority)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_atcam_chunk_init(&region->aregion, &chunk->achunk,
+ priority);
+}
+
+static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv)
+{
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_atcam_chunk_fini(&chunk->achunk);
+}
+
+static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ entry->act_block = rulei->act_block;
+ return mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, &region->aregion,
+ &chunk->achunk, &entry->aentry,
+ rulei);
+}
+
+static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, &region->aregion, &chunk->achunk,
+ &entry->aentry);
+}
+
+static int
+mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity)
+{
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ return mlxsw_afa_block_activity_get(entry->act_block, activity);
+}
+
+const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = {
+ .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX2,
+ .priv_size = sizeof(struct mlxsw_sp2_acl_tcam),
+ .init = mlxsw_sp2_acl_tcam_init,
+ .fini = mlxsw_sp2_acl_tcam_fini,
+ .region_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_region),
+ .region_init = mlxsw_sp2_acl_tcam_region_init,
+ .region_fini = mlxsw_sp2_acl_tcam_region_fini,
+ .region_associate = mlxsw_sp2_acl_tcam_region_associate,
+ .chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk),
+ .chunk_init = mlxsw_sp2_acl_tcam_chunk_init,
+ .chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini,
+ .entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry),
+ .entry_add = mlxsw_sp2_acl_tcam_entry_add,
+ .entry_del = mlxsw_sp2_acl_tcam_entry_del,
+ .entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
new file mode 100644
index 000000000000..68c8b148bef2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+#include "resources.h"
+
+struct mlxsw_sp2_kvdl_part_info {
+ u8 res_type;
+ /* For each defined partititon we need to know how many
+ * usage bits we need and how many indexes there are
+ * represented by a single bit. This could be got from FW
+ * querying appropriate resources. So have the resource
+ * ids for for this purpose in partition definition.
+ */
+ enum mlxsw_res_id usage_bit_count_res_id;
+ enum mlxsw_res_id index_range_res_id;
+};
+
+#define MLXSW_SP2_KVDL_PART_INFO(_entry_type, _res_type, \
+ _usage_bit_count_res_id, _index_range_res_id) \
+[MLXSW_SP_KVDL_ENTRY_TYPE_##_entry_type] = { \
+ .res_type = _res_type, \
+ .usage_bit_count_res_id = MLXSW_RES_ID_##_usage_bit_count_res_id, \
+ .index_range_res_id = MLXSW_RES_ID_##_index_range_res_id, \
+}
+
+static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
+ MLXSW_SP2_KVDL_PART_INFO(ADJ, 0x21, KVD_SIZE, MAX_KVD_LINEAR_RANGE),
+ MLXSW_SP2_KVDL_PART_INFO(ACTSET, 0x23, MAX_KVD_ACTION_SETS,
+ MAX_KVD_ACTION_SETS),
+ MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
+};
+
+#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info)
+
+struct mlxsw_sp2_kvdl_part {
+ const struct mlxsw_sp2_kvdl_part_info *info;
+ unsigned int usage_bit_count;
+ unsigned int indexes_per_usage_bit;
+ unsigned int last_allocated_bit;
+ unsigned long usage[0]; /* Usage bits */
+};
+
+struct mlxsw_sp2_kvdl {
+ struct mlxsw_sp2_kvdl_part *parts[MLXSW_SP2_KVDL_PARTS_INFO_LEN];
+};
+
+static int mlxsw_sp2_kvdl_part_find_zero_bits(struct mlxsw_sp2_kvdl_part *part,
+ unsigned int bit_count,
+ unsigned int *p_bit)
+{
+ unsigned int start_bit;
+ unsigned int bit;
+ unsigned int i;
+ bool wrap = false;
+
+ start_bit = part->last_allocated_bit + 1;
+ if (start_bit == part->usage_bit_count)
+ start_bit = 0;
+ bit = start_bit;
+again:
+ bit = find_next_zero_bit(part->usage, part->usage_bit_count, bit);
+ if (!wrap && bit + bit_count >= part->usage_bit_count) {
+ wrap = true;
+ bit = 0;
+ goto again;
+ }
+ if (wrap && bit + bit_count >= start_bit)
+ return -ENOBUFS;
+ for (i = 0; i < bit_count; i++) {
+ if (test_bit(bit + i, part->usage)) {
+ bit += bit_count;
+ goto again;
+ }
+ }
+ *p_bit = bit;
+ return 0;
+}
+
+static int mlxsw_sp2_kvdl_part_alloc(struct mlxsw_sp2_kvdl_part *part,
+ unsigned int size,
+ u32 *p_kvdl_index)
+{
+ unsigned int bit_count;
+ unsigned int bit;
+ unsigned int i;
+ int err;
+
+ bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit);
+ err = mlxsw_sp2_kvdl_part_find_zero_bits(part, bit_count, &bit);
+ if (err)
+ return err;
+ for (i = 0; i < bit_count; i++)
+ __set_bit(bit + i, part->usage);
+ *p_kvdl_index = bit * part->indexes_per_usage_bit;
+ return 0;
+}
+
+static int mlxsw_sp2_kvdl_rec_del(struct mlxsw_sp *mlxsw_sp, u8 res_type,
+ u16 size, u32 kvdl_index)
+{
+ char *iedr_pl;
+ int err;
+
+ iedr_pl = kmalloc(MLXSW_REG_IEDR_LEN, GFP_KERNEL);
+ if (!iedr_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_iedr_pack(iedr_pl);
+ mlxsw_reg_iedr_rec_pack(iedr_pl, 0, res_type, size, kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(iedr), iedr_pl);
+ kfree(iedr_pl);
+ return err;
+}
+
+static void mlxsw_sp2_kvdl_part_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp2_kvdl_part *part,
+ unsigned int size, u32 kvdl_index)
+{
+ unsigned int bit_count;
+ unsigned int bit;
+ unsigned int i;
+ int err;
+
+ /* We need to ask FW to delete previously used KVD linear index */
+ err = mlxsw_sp2_kvdl_rec_del(mlxsw_sp, part->info->res_type,
+ size, kvdl_index);
+ if (err)
+ return;
+
+ bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit);
+ bit = kvdl_index / part->indexes_per_usage_bit;
+ for (i = 0; i < bit_count; i++)
+ __clear_bit(bit + i, part->usage);
+}
+
+static int mlxsw_sp2_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ u32 *p_entry_index)
+{
+ unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type);
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+ struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type];
+
+ return mlxsw_sp2_kvdl_part_alloc(part, size, p_entry_index);
+}
+
+static void mlxsw_sp2_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ int entry_index)
+{
+ unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type);
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+ struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type];
+
+ return mlxsw_sp2_kvdl_part_free(mlxsw_sp, part, size, entry_index);
+}
+
+static int mlxsw_sp2_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count)
+{
+ *p_alloc_count = entry_count;
+ return 0;
+}
+
+static struct mlxsw_sp2_kvdl_part *
+mlxsw_sp2_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp2_kvdl_part_info *info)
+{
+ unsigned int indexes_per_usage_bit;
+ struct mlxsw_sp2_kvdl_part *part;
+ unsigned int index_range;
+ unsigned int usage_bit_count;
+ size_t usage_size;
+
+ if (!mlxsw_core_res_valid(mlxsw_sp->core,
+ info->usage_bit_count_res_id) ||
+ !mlxsw_core_res_valid(mlxsw_sp->core,
+ info->index_range_res_id))
+ return ERR_PTR(-EIO);
+ usage_bit_count = mlxsw_core_res_get(mlxsw_sp->core,
+ info->usage_bit_count_res_id);
+ index_range = mlxsw_core_res_get(mlxsw_sp->core,
+ info->index_range_res_id);
+
+ /* For some partitions, one usage bit represents a group of indexes.
+ * That's why we compute the number of indexes per usage bit here,
+ * according to queried resources.
+ */
+ indexes_per_usage_bit = index_range / usage_bit_count;
+
+ usage_size = BITS_TO_LONGS(usage_bit_count) * sizeof(unsigned long);
+ part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ if (!part)
+ return ERR_PTR(-ENOMEM);
+ part->info = info;
+ part->usage_bit_count = usage_bit_count;
+ part->indexes_per_usage_bit = indexes_per_usage_bit;
+ part->last_allocated_bit = usage_bit_count - 1;
+ return part;
+}
+
+static void mlxsw_sp2_kvdl_part_fini(struct mlxsw_sp2_kvdl_part *part)
+{
+ kfree(part);
+}
+
+static int mlxsw_sp2_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp2_kvdl *kvdl)
+{
+ const struct mlxsw_sp2_kvdl_part_info *info;
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++) {
+ info = &mlxsw_sp2_kvdl_parts_info[i];
+ kvdl->parts[i] = mlxsw_sp2_kvdl_part_init(mlxsw_sp, info);
+ if (IS_ERR(kvdl->parts[i])) {
+ err = PTR_ERR(kvdl->parts[i]);
+ goto err_kvdl_part_init;
+ }
+ }
+ return 0;
+
+err_kvdl_part_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]);
+ return err;
+}
+
+static void mlxsw_sp2_kvdl_parts_fini(struct mlxsw_sp2_kvdl *kvdl)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++)
+ mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]);
+}
+
+static int mlxsw_sp2_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+
+ return mlxsw_sp2_kvdl_parts_init(mlxsw_sp, kvdl);
+}
+
+static void mlxsw_sp2_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+
+ mlxsw_sp2_kvdl_parts_fini(kvdl);
+}
+
+const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops = {
+ .priv_size = sizeof(struct mlxsw_sp2_kvdl),
+ .init = mlxsw_sp2_kvdl_init,
+ .fini = mlxsw_sp2_kvdl_fini,
+ .alloc = mlxsw_sp2_kvdl_alloc,
+ .free = mlxsw_sp2_kvdl_free,
+ .alloc_size_query = mlxsw_sp2_kvdl_alloc_size_query,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
new file mode 100644
index 000000000000..4dd62478162e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+
+#include "core_acl_flex_actions.h"
+#include "spectrum.h"
+#include "spectrum_mr.h"
+
+static int
+mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp2_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key)
+{
+}
+
+static int
+mlxsw_sp2_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ return 0;
+}
+
+static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ return 0;
+}
+
+static void mlxsw_sp2_mr_tcam_fini(void *priv)
+{
+}
+
+const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = {
+ .init = mlxsw_sp2_mr_tcam_init,
+ .fini = mlxsw_sp2_mr_tcam_fini,
+ .route_create = mlxsw_sp2_mr_tcam_route_create,
+ .route_destroy = mlxsw_sp2_mr_tcam_route_destroy,
+ .route_update = mlxsw_sp2_mr_tcam_route_update,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 79b1fa27a9a4..c4f9238591e6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -48,13 +17,12 @@
#include "spectrum.h"
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
-#include "spectrum_acl_flex_keys.h"
+#include "spectrum_acl_tcam.h"
struct mlxsw_sp_acl {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
struct mlxsw_sp_fid *dummy_fid;
- const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
struct list_head rules;
struct {
@@ -62,8 +30,7 @@ struct mlxsw_sp_acl {
unsigned long interval; /* ms */
#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
} rule_activity_update;
- unsigned long priv[0];
- /* priv has to be always the last item */
+ struct mlxsw_sp_acl_tcam tcam;
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
@@ -160,6 +127,17 @@ bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
return block->disable_count;
}
+bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ if (!binding->ingress)
+ return true;
+ }
+ return false;
+}
+
static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
@@ -319,7 +297,8 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
- const struct mlxsw_sp_acl_profile_ops *ops)
+ const struct mlxsw_sp_acl_profile_ops *ops,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -339,7 +318,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_init;
- err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
+ err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
+ tmplt_elusage);
if (err)
goto err_ops_ruleset_add;
@@ -409,7 +389,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- ops = acl->ops->profile_ops(mlxsw_sp, profile);
+ ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
@@ -421,13 +401,14 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
- enum mlxsw_sp_acl_profile profile)
+ enum mlxsw_sp_acl_profile profile,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
{
const struct mlxsw_sp_acl_profile_ops *ops;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- ops = acl->ops->profile_ops(mlxsw_sp, profile);
+ ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
@@ -436,7 +417,8 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
return ruleset;
}
- return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops);
+ return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
+ tmplt_elusage);
}
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
@@ -487,7 +469,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
unsigned int priority)
{
- rulei->priority = priority;
+ rulei->priority = priority >> 16;
}
void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
@@ -536,18 +518,23 @@ int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct net_device *out_dev)
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port;
u8 local_port;
bool in_port;
if (out_dev) {
- if (!mlxsw_sp_port_dev_check(out_dev))
+ if (!mlxsw_sp_port_dev_check(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
return -EINVAL;
+ }
mlxsw_sp_port = netdev_priv(out_dev);
- if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
+ if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
return -EINVAL;
+ }
local_port = mlxsw_sp_port->local_port;
in_port = false;
} else {
@@ -558,20 +545,22 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
in_port = true;
}
return mlxsw_afa_block_append_fwd(rulei->act_block,
- local_port, in_port);
+ local_port, in_port, extack);
}
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_acl_block *block,
- struct net_device *out_dev)
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_acl_block_binding *binding;
struct mlxsw_sp_port *in_port;
- if (!list_is_singular(&block->binding_list))
+ if (!list_is_singular(&block->binding_list)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
return -EOPNOTSUPP;
-
+ }
binding = list_first_entry(&block->binding_list,
struct mlxsw_sp_acl_block_binding, list);
in_port = binding->mlxsw_sp_port;
@@ -579,12 +568,14 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
return mlxsw_afa_block_append_mirror(rulei->act_block,
in_port->local_port,
out_dev,
- binding->ingress);
+ binding->ingress,
+ extack);
}
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- u32 action, u16 vid, u16 proto, u8 prio)
+ u32 action, u16 vid, u16 proto, u8 prio,
+ struct netlink_ext_ack *extack)
{
u8 ethertype;
@@ -597,44 +588,50 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
ethertype = 1;
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
proto);
return -EINVAL;
}
return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
- vid, prio, ethertype);
+ vid, prio, ethertype,
+ extack);
} else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
return -EINVAL;
}
}
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei)
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct netlink_ext_ack *extack)
{
return mlxsw_afa_block_append_counter(rulei->act_block,
- &rulei->counter_index);
+ &rulei->counter_index, extack);
}
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- u16 fid)
+ u16 fid, struct netlink_ext_ack *extack)
{
- return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
+ return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
}
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
- unsigned long cookie)
+ unsigned long cookie,
+ struct netlink_ext_ack *extack)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
struct mlxsw_sp_acl_rule *rule;
int err;
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
- rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
+ rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp),
+ GFP_KERNEL);
if (!rule) {
err = -ENOMEM;
goto err_alloc;
@@ -825,20 +822,20 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
{
- const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp_fid *fid;
struct mlxsw_sp_acl *acl;
+ size_t alloc_size;
int err;
- acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
+ alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
+ acl = kzalloc(alloc_size, GFP_KERNEL);
if (!acl)
return -ENOMEM;
mlxsw_sp->acl = acl;
acl->mlxsw_sp = mlxsw_sp;
acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_FLEX_KEYS),
- mlxsw_sp_afk_blocks,
- MLXSW_SP_AFK_BLOCKS_COUNT);
+ mlxsw_sp->afk_ops);
if (!acl->afk) {
err = -ENOMEM;
goto err_afk_create;
@@ -857,12 +854,10 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
acl->dummy_fid = fid;
INIT_LIST_HEAD(&acl->rules);
- err = acl_ops->init(mlxsw_sp, acl->priv);
+ err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
if (err)
goto err_acl_ops_init;
- acl->ops = acl_ops;
-
/* Create the delayed work for the rule activity_update */
INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
mlxsw_sp_acl_rul_activity_update_work);
@@ -884,10 +879,9 @@ err_afk_create:
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
- acl_ops->fini(mlxsw_sp, acl->priv);
+ mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
WARN_ON(!list_empty(&acl->rules));
mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
new file mode 100644
index 000000000000..2dda028f94db
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+#include "core_acl_flex_keys.h"
+
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11
+
+struct mlxsw_sp_acl_atcam_lkey_id_ht_key {
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */
+ u8 erp_id;
+};
+
+struct mlxsw_sp_acl_atcam_lkey_id {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key;
+ refcount_t refcnt;
+ u32 id;
+};
+
+struct mlxsw_sp_acl_atcam_region_ops {
+ int (*init)(struct mlxsw_sp_acl_atcam_region *aregion);
+ void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion);
+ struct mlxsw_sp_acl_atcam_lkey_id *
+ (*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id);
+ void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id);
+};
+
+struct mlxsw_sp_acl_atcam_region_generic {
+ struct mlxsw_sp_acl_atcam_lkey_id dummy_lkey_id;
+};
+
+struct mlxsw_sp_acl_atcam_region_12kb {
+ struct rhashtable lkey_ht;
+ unsigned int max_lkey_id;
+ unsigned long *used_lkey_id;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_atcam_lkey_id_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_atcam_lkey_id_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_node),
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_atcam_entry_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_node),
+};
+
+static bool
+mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp);
+}
+
+static int
+mlxsw_sp_acl_atcam_region_generic_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_atcam_region_generic *region_generic;
+
+ region_generic = kzalloc(sizeof(*region_generic), GFP_KERNEL);
+ if (!region_generic)
+ return -ENOMEM;
+
+ refcount_set(&region_generic->dummy_lkey_id.refcnt, 1);
+ aregion->priv = region_generic;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ kfree(aregion->priv);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u8 erp_id)
+{
+ struct mlxsw_sp_acl_atcam_region_generic *region_generic;
+
+ region_generic = aregion->priv;
+ return &region_generic->dummy_lkey_id;
+}
+
+static void
+mlxsw_sp_acl_atcam_generic_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+}
+
+static const struct mlxsw_sp_acl_atcam_region_ops
+mlxsw_sp_acl_atcam_region_generic_ops = {
+ .init = mlxsw_sp_acl_atcam_region_generic_init,
+ .fini = mlxsw_sp_acl_atcam_region_generic_fini,
+ .lkey_id_get = mlxsw_sp_acl_atcam_generic_lkey_id_get,
+ .lkey_id_put = mlxsw_sp_acl_atcam_generic_lkey_id_put,
+};
+
+static int
+mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb;
+ size_t alloc_size;
+ u64 max_lkey_id;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID))
+ return -EIO;
+
+ max_lkey_id = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID);
+ region_12kb = kzalloc(sizeof(*region_12kb), GFP_KERNEL);
+ if (!region_12kb)
+ return -ENOMEM;
+
+ alloc_size = BITS_TO_LONGS(max_lkey_id) * sizeof(unsigned long);
+ region_12kb->used_lkey_id = kzalloc(alloc_size, GFP_KERNEL);
+ if (!region_12kb->used_lkey_id) {
+ err = -ENOMEM;
+ goto err_used_lkey_id_alloc;
+ }
+
+ err = rhashtable_init(&region_12kb->lkey_ht,
+ &mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ region_12kb->max_lkey_id = max_lkey_id;
+ aregion->priv = region_12kb;
+
+ return 0;
+
+err_rhashtable_init:
+ kfree(region_12kb->used_lkey_id);
+err_used_lkey_id_alloc:
+ kfree(region_12kb);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_12kb_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+
+ rhashtable_destroy(&region_12kb->lkey_ht);
+ kfree(region_12kb->used_lkey_id);
+ kfree(region_12kb);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_lkey_id_create(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key *ht_key)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ u32 id;
+ int err;
+
+ id = find_first_zero_bit(region_12kb->used_lkey_id,
+ region_12kb->max_lkey_id);
+ if (id < region_12kb->max_lkey_id)
+ __set_bit(id, region_12kb->used_lkey_id);
+ else
+ return ERR_PTR(-ENOBUFS);
+
+ lkey_id = kzalloc(sizeof(*lkey_id), GFP_KERNEL);
+ if (!lkey_id) {
+ err = -ENOMEM;
+ goto err_lkey_id_alloc;
+ }
+
+ lkey_id->id = id;
+ memcpy(&lkey_id->ht_key, ht_key, sizeof(*ht_key));
+ refcount_set(&lkey_id->refcnt, 1);
+
+ err = rhashtable_insert_fast(&region_12kb->lkey_ht,
+ &lkey_id->ht_node,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return lkey_id;
+
+err_rhashtable_insert:
+ kfree(lkey_id);
+err_lkey_id_alloc:
+ __clear_bit(id, region_12kb->used_lkey_id);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ u32 id = lkey_id->id;
+
+ rhashtable_remove_fast(&region_12kb->lkey_ht, &lkey_id->ht_node,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ kfree(lkey_id);
+ __clear_bit(id, region_12kb->used_lkey_id);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u8 erp_id)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key = {{ 0 } };
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key,
+ NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END);
+ ht_key.erp_id = erp_id;
+ lkey_id = rhashtable_lookup_fast(&region_12kb->lkey_ht, &ht_key,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (lkey_id) {
+ refcount_inc(&lkey_id->refcnt);
+ return lkey_id;
+ }
+
+ return mlxsw_sp_acl_atcam_lkey_id_create(aregion, &ht_key);
+}
+
+static void
+mlxsw_sp_acl_atcam_12kb_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+ if (refcount_dec_and_test(&lkey_id->refcnt))
+ mlxsw_sp_acl_atcam_lkey_id_destroy(aregion, lkey_id);
+}
+
+static const struct mlxsw_sp_acl_atcam_region_ops
+mlxsw_sp_acl_atcam_region_12kb_ops = {
+ .init = mlxsw_sp_acl_atcam_region_12kb_init,
+ .fini = mlxsw_sp_acl_atcam_region_12kb_fini,
+ .lkey_id_get = mlxsw_sp_acl_atcam_12kb_lkey_id_get,
+ .lkey_id_put = mlxsw_sp_acl_atcam_12kb_lkey_id_put,
+};
+
+static const struct mlxsw_sp_acl_atcam_region_ops *
+mlxsw_sp_acl_atcam_region_ops_arr[] = {
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] =
+ &mlxsw_sp_acl_atcam_region_12kb_ops,
+};
+
+int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ u16 region_id)
+{
+ char perar_pl[MLXSW_REG_PERAR_LEN];
+ /* For now, just assume that every region has 12 key blocks */
+ u16 hw_region = region_id * 3;
+ u64 max_regions;
+
+ max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
+ if (hw_region >= max_regions)
+ return -ENOBUFS;
+
+ mlxsw_reg_perar_pack(perar_pl, region_id, hw_region);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl);
+}
+
+static void
+mlxsw_sp_acl_atcam_region_type_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ enum mlxsw_sp_acl_atcam_region_type region_type;
+ unsigned int blocks_count;
+
+ /* We already know the blocks count can not exceed the maximum
+ * blocks count.
+ */
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ if (blocks_count <= 2)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB;
+ else if (blocks_count <= 4)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB;
+ else if (blocks_count <= 8)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB;
+ else
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB;
+
+ aregion->type = region_type;
+ aregion->ops = mlxsw_sp_acl_atcam_region_ops_arr[region_type];
+}
+
+int
+mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops)
+{
+ int err;
+
+ aregion->region = region;
+ aregion->atcam = atcam;
+ mlxsw_sp_acl_atcam_region_type_init(aregion);
+
+ err = rhashtable_init(&aregion->entries_ht,
+ &mlxsw_sp_acl_atcam_entries_ht_params);
+ if (err)
+ return err;
+ err = aregion->ops->init(aregion);
+ if (err)
+ goto err_ops_init;
+ err = mlxsw_sp_acl_erp_region_init(aregion);
+ if (err)
+ goto err_erp_region_init;
+ err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion,
+ region, ops);
+ if (err)
+ goto err_ctcam_region_init;
+
+ return 0;
+
+err_ctcam_region_init:
+ mlxsw_sp_acl_erp_region_fini(aregion);
+err_erp_region_init:
+ aregion->ops->fini(aregion);
+err_ops_init:
+ rhashtable_destroy(&aregion->entries_ht);
+ return err;
+}
+
+void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ mlxsw_sp_acl_ctcam_region_fini(&aregion->cregion);
+ mlxsw_sp_acl_erp_region_fini(aregion);
+ aregion->ops->fini(aregion);
+ rhashtable_destroy(&aregion->entries_ht);
+}
+
+void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ unsigned int priority)
+{
+ mlxsw_sp_acl_ctcam_chunk_init(&aregion->cregion, &achunk->cchunk,
+ priority);
+}
+
+void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk)
+{
+ mlxsw_sp_acl_ctcam_chunk_fini(&achunk->cchunk);
+}
+
+static int
+mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+ u32 kvdl_index, priority;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, true);
+ if (err)
+ return err;
+
+ lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id);
+ if (IS_ERR(lkey_id))
+ return PTR_ERR(lkey_id);
+ aentry->lkey_id = lkey_id;
+
+ kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
+ mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
+ priority, region->tcam_region_info,
+ aentry->ht_key.enc_key, erp_id,
+ refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
+ kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
+ if (err)
+ goto err_ptce3_write;
+
+ return 0;
+
+err_ptce3_write:
+ aregion->ops->lkey_id_put(aregion, lkey_id);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+
+ mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
+ region->tcam_region_info, aentry->ht_key.enc_key,
+ erp_id, refcount_read(&lkey_id->refcnt) != 1,
+ lkey_id->id, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
+ aregion->ops->lkey_id_put(aregion, lkey_id);
+}
+
+static int
+__mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ struct mlxsw_sp_acl_erp *erp;
+ unsigned int blocks_count;
+ int err;
+
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values,
+ aentry->ht_key.enc_key, mask, 0, blocks_count - 1);
+
+ erp = mlxsw_sp_acl_erp_get(aregion, mask, false);
+ if (IS_ERR(erp))
+ return PTR_ERR(erp);
+ aentry->erp = erp;
+ aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp);
+
+ /* We can't insert identical rules into the A-TCAM, so fail and
+ * let the rule spill into C-TCAM
+ */
+ err = rhashtable_lookup_insert_fast(&aregion->entries_ht,
+ &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ err = mlxsw_sp_acl_atcam_region_entry_insert(mlxsw_sp, aregion, aentry,
+ rulei);
+ if (err)
+ goto err_rule_insert;
+
+ return 0;
+
+err_rule_insert:
+ rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+err_rhashtable_insert:
+ mlxsw_sp_acl_erp_put(aregion, erp);
+ return err;
+}
+
+static void
+__mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry);
+ rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+ mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+}
+
+int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ int err;
+
+ err = __mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, aregion, aentry, rulei);
+ if (!err)
+ return 0;
+
+ /* It is possible we failed to add the rule to the A-TCAM due to
+ * exceeded number of masks. Try to spill into C-TCAM.
+ */
+ err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &aregion->cregion,
+ &achunk->cchunk, &aentry->centry,
+ rulei, true);
+ if (!err)
+ return 0;
+
+ return err;
+}
+
+void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ if (mlxsw_sp_acl_atcam_is_centry(aentry))
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &aregion->cregion,
+ &achunk->cchunk, &aentry->centry);
+ else
+ __mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, aregion, aentry);
+}
+
+int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ return mlxsw_sp_acl_erps_init(mlxsw_sp, atcam);
+}
+
+void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ mlxsw_sp_acl_erps_fini(mlxsw_sp, atcam);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
new file mode 100644
index 000000000000..e3c6fe8b1d40
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+static int
+mlxsw_sp_acl_ctcam_region_resize(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ u16 new_size)
+{
+ char ptar_pl[MLXSW_REG_PTAR_LEN];
+
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
+ region->key_type, new_size, region->id,
+ region->tcam_region_info);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+}
+
+static void
+mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ u16 src_offset, u16 dst_offset, u16 size)
+{
+ char prcr_pl[MLXSW_REG_PRCR_LEN];
+
+ mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
+ region->tcam_region_info, src_offset,
+ region->tcam_region_info, dst_offset, size);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
+}
+
+static int
+mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority)
+{
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ unsigned int blocks_count;
+ char *act_set;
+ u32 priority;
+ char *mask;
+ char *key;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority,
+ fillup_priority);
+ if (err)
+ return err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+ region->tcam_region_info,
+ centry->parman_item.index, priority);
+ key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
+ mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0,
+ blocks_count - 1);
+
+ err = cregion->ops->entry_insert(cregion, centry, mask);
+ if (err)
+ return err;
+
+ /* Only the first action set belongs here, the rest is in KVD */
+ act_set = mlxsw_afa_block_first_set(rulei->act_block);
+ mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+static void
+mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+ cregion->region->tcam_region_info,
+ centry->parman_item.index, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ cregion->ops->entry_remove(cregion, centry);
+}
+
+static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp_acl_ctcam_region *cregion = priv;
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ return mlxsw_sp_acl_ctcam_region_resize(mlxsw_sp, region, new_count);
+}
+
+static void mlxsw_sp_acl_ctcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp_acl_ctcam_region *cregion = priv;
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+
+ mlxsw_sp_acl_ctcam_region_move(mlxsw_sp, region,
+ from_index, to_index, count);
+}
+
+static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = {
+ .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp_acl_ctcam_region_parman_resize,
+ .move = mlxsw_sp_acl_ctcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+int
+mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops)
+{
+ cregion->region = region;
+ cregion->ops = ops;
+ cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops,
+ cregion);
+ if (!cregion->parman)
+ return -ENOMEM;
+ return 0;
+}
+
+void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion)
+{
+ parman_destroy(cregion->parman);
+}
+
+void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ unsigned int priority)
+{
+ parman_prio_init(cregion->parman, &cchunk->parman_prio, priority);
+}
+
+void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk)
+{
+ parman_prio_fini(&cchunk->parman_prio);
+}
+
+int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority)
+{
+ int err;
+
+ err = parman_item_add(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion, centry,
+ rulei, fillup_priority);
+ if (err)
+ goto err_rule_insert;
+ return 0;
+
+err_rule_insert:
+ parman_item_remove(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+ return err;
+}
+
+void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion, centry);
+ parman_item_remove(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
new file mode 100644
index 000000000000..0a4fd3c8662a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -0,0 +1,1168 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/genalloc.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rhashtable.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "reg.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+/* gen_pool_alloc() returns 0 when allocation fails, so use an offset */
+#define MLXSW_SP_ACL_ERP_GENALLOC_OFFSET 0x100
+#define MLXSW_SP_ACL_ERP_MAX_PER_REGION 16
+
+struct mlxsw_sp_acl_erp_core {
+ unsigned int erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX + 1];
+ struct gen_pool *erp_tables;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned int num_erp_banks;
+};
+
+struct mlxsw_sp_acl_erp_key {
+ char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN];
+ bool ctcam;
+};
+
+struct mlxsw_sp_acl_erp {
+ struct mlxsw_sp_acl_erp_key key;
+ u8 id;
+ u8 index;
+ refcount_t refcnt;
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
+ struct list_head list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_erp_table *erp_table;
+};
+
+struct mlxsw_sp_acl_erp_master_mask {
+ DECLARE_BITMAP(bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
+ unsigned int count[MLXSW_SP_ACL_TCAM_MASK_LEN];
+};
+
+struct mlxsw_sp_acl_erp_table {
+ struct mlxsw_sp_acl_erp_master_mask master_mask;
+ DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ struct list_head atcam_erps_list;
+ struct rhashtable erp_ht;
+ struct mlxsw_sp_acl_erp_core *erp_core;
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ const struct mlxsw_sp_acl_erp_table_ops *ops;
+ unsigned long base_index;
+ unsigned int num_atcam_erps;
+ unsigned int num_max_atcam_erps;
+ unsigned int num_ctcam_erps;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_erp_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_erp, key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node),
+};
+
+struct mlxsw_sp_acl_erp_table_ops {
+ struct mlxsw_sp_acl_erp *
+ (*erp_create)(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+ void (*erp_destroy)(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+};
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static void
+mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_multiple_masks_ops = {
+ .erp_create = mlxsw_sp_acl_erp_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_two_masks_ops = {
+ .erp_create = mlxsw_sp_acl_erp_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_second_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_single_mask_ops = {
+ .erp_create = mlxsw_sp_acl_erp_second_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_first_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = {
+ .erp_create = mlxsw_sp_acl_erp_first_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy,
+};
+
+bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp)
+{
+ return erp->key.ctcam;
+}
+
+u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp)
+{
+ return erp->id;
+}
+
+static unsigned int
+mlxsw_sp_acl_erp_table_entry_size(const struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion;
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+
+ return erp_core->erpt_entries_size[aregion->type];
+}
+
+static int mlxsw_sp_acl_erp_id_get(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 *p_id)
+{
+ u8 id;
+
+ id = find_first_zero_bit(erp_table->erp_id_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ if (id < MLXSW_SP_ACL_ERP_MAX_PER_REGION) {
+ __set_bit(id, erp_table->erp_id_bitmap);
+ *p_id = id;
+ return 0;
+ }
+
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_erp_id_put(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 id)
+{
+ __clear_bit(id, erp_table->erp_id_bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_master_mask_bit_set(unsigned long bit,
+ struct mlxsw_sp_acl_erp_master_mask *mask)
+{
+ if (mask->count[bit]++ == 0)
+ __set_bit(bit, mask->bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_master_mask_bit_clear(unsigned long bit,
+ struct mlxsw_sp_acl_erp_master_mask *mask)
+{
+ if (--mask->count[bit] == 0)
+ __clear_bit(bit, mask->bitmap);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ char percr_pl[MLXSW_REG_PERCR_LEN];
+ char *master_mask;
+
+ mlxsw_reg_percr_pack(percr_pl, region->id);
+ master_mask = mlxsw_reg_percr_master_mask_data(percr_pl);
+ bitmap_to_arr32((u32 *) master_mask, erp_table->master_mask.bitmap,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp)
+{
+ unsigned long bit;
+ int err;
+
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_set(bit,
+ &erp_table->master_mask);
+
+ err = mlxsw_sp_acl_erp_master_mask_update(erp_table);
+ if (err)
+ goto err_master_mask_update;
+
+ return 0;
+
+err_master_mask_update:
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
+ &erp_table->master_mask);
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp)
+{
+ unsigned long bit;
+ int err;
+
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
+ &erp_table->master_mask);
+
+ err = mlxsw_sp_acl_erp_master_mask_update(erp_table);
+ if (err)
+ goto err_master_mask_update;
+
+ return 0;
+
+err_master_mask_update:
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_set(bit,
+ &erp_table->master_mask);
+ return err;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ erp = kzalloc(sizeof(*erp), GFP_KERNEL);
+ if (!erp)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_acl_erp_id_get(erp_table, &erp->id);
+ if (err)
+ goto err_erp_id_get;
+
+ memcpy(&erp->key, key, sizeof(*key));
+ bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ list_add(&erp->list, &erp_table->atcam_erps_list);
+ refcount_set(&erp->refcnt, 1);
+ erp_table->num_atcam_erps++;
+ erp->erp_table = erp_table;
+
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ if (err)
+ goto err_master_mask_set;
+
+ err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return erp;
+
+err_rhashtable_insert:
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+err_master_mask_set:
+ erp_table->num_atcam_erps--;
+ list_del(&erp->list);
+ mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
+err_erp_id_get:
+ kfree(erp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+
+ rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ erp_table->num_atcam_erps--;
+ list_del(&erp->list);
+ mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
+ kfree(erp);
+}
+
+static int
+mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned int num_erps,
+ enum mlxsw_sp_acl_atcam_region_type region_type,
+ unsigned long *p_index)
+{
+ unsigned int num_rows, entry_size;
+
+ /* We only allow allocations of entire rows */
+ if (num_erps % erp_core->num_erp_banks != 0)
+ return -EINVAL;
+
+ entry_size = erp_core->erpt_entries_size[region_type];
+ num_rows = num_erps / erp_core->num_erp_banks;
+
+ *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+ if (*p_index == 0)
+ return -ENOBUFS;
+ *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_erp_table_free(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned int num_erps,
+ enum mlxsw_sp_acl_atcam_region_type region_type,
+ unsigned long index)
+{
+ unsigned long base_index;
+ unsigned int entry_size;
+ size_t size;
+
+ entry_size = erp_core->erpt_entries_size[region_type];
+ base_index = index + MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+ size = num_erps / erp_core->num_erp_banks * entry_size;
+ gen_pool_free(erp_core->erp_tables, base_index, size);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_table_master_rp(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ if (!list_is_singular(&erp_table->atcam_erps_list))
+ return NULL;
+
+ return list_first_entry(&erp_table->atcam_erps_list,
+ struct mlxsw_sp_acl_erp, list);
+}
+
+static int mlxsw_sp_acl_erp_index_get(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 *p_index)
+{
+ u8 index;
+
+ index = find_first_zero_bit(erp_table->erp_index_bitmap,
+ erp_table->num_max_atcam_erps);
+ if (index < erp_table->num_max_atcam_erps) {
+ __set_bit(index, erp_table->erp_index_bitmap);
+ *p_index = index;
+ return 0;
+ }
+
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_erp_index_put(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 index)
+{
+ __clear_bit(index, erp_table->erp_index_bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_table_locate(const struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp,
+ u8 *p_erpt_bank, u8 *p_erpt_index)
+{
+ unsigned int entry_size = mlxsw_sp_acl_erp_table_entry_size(erp_table);
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ unsigned int row;
+
+ *p_erpt_bank = erp->index % erp_core->num_erp_banks;
+ row = erp->index / erp_core->num_erp_banks;
+ *p_erpt_index = erp_table->base_index + row * entry_size;
+}
+
+static int
+mlxsw_sp_acl_erp_table_erp_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ enum mlxsw_reg_perpt_key_size key_size;
+ char perpt_pl[MLXSW_REG_PERPT_LEN];
+ u8 erpt_bank, erpt_index;
+
+ mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index);
+ key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type;
+ mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id,
+ 0, erp_table->base_index, erp->index,
+ erp->key.mask);
+ mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, true);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl);
+}
+
+static void mlxsw_sp_acl_erp_table_erp_del(struct mlxsw_sp_acl_erp *erp)
+{
+ char empty_mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ enum mlxsw_reg_perpt_key_size key_size;
+ char perpt_pl[MLXSW_REG_PERPT_LEN];
+ u8 erpt_bank, erpt_index;
+
+ mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index);
+ key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type;
+ mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id,
+ 0, erp_table->base_index, erp->index, empty_mask);
+ mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_table_enable(struct mlxsw_sp_acl_erp_table *erp_table,
+ bool ctcam_le)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static void
+mlxsw_sp_acl_erp_table_disable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+ struct mlxsw_sp_acl_erp *master_rp;
+
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ /* It is possible we do not have a master RP when we disable the
+ * table when there are no rules in the A-TCAM and the last C-TCAM
+ * rule is deleted
+ */
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, false, false, 0, 0,
+ master_rp ? master_rp->id : 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_table_relocate(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ list_for_each_entry(erp, &erp_table->atcam_erps_list, list) {
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+ }
+
+ return 0;
+
+err_table_erp_add:
+ list_for_each_entry_continue_reverse(erp, &erp_table->atcam_erps_list,
+ list)
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_table_expand(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ unsigned int num_erps, old_num_erps = erp_table->num_max_atcam_erps;
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ unsigned long old_base_index = erp_table->base_index;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ int err;
+
+ if (erp_table->num_atcam_erps < erp_table->num_max_atcam_erps)
+ return 0;
+
+ if (erp_table->num_max_atcam_erps == MLXSW_SP_ACL_ERP_MAX_PER_REGION)
+ return -ENOBUFS;
+
+ num_erps = old_num_erps + erp_core->num_erp_banks;
+ err = mlxsw_sp_acl_erp_table_alloc(erp_core, num_erps,
+ erp_table->aregion->type,
+ &erp_table->base_index);
+ if (err)
+ return err;
+ erp_table->num_max_atcam_erps = num_erps;
+
+ err = mlxsw_sp_acl_erp_table_relocate(erp_table);
+ if (err)
+ goto err_table_relocate;
+
+ err = mlxsw_sp_acl_erp_table_enable(erp_table, ctcam_le);
+ if (err)
+ goto err_table_enable;
+
+ mlxsw_sp_acl_erp_table_free(erp_core, old_num_erps,
+ erp_table->aregion->type, old_base_index);
+
+ return 0;
+
+err_table_enable:
+err_table_relocate:
+ erp_table->num_max_atcam_erps = old_num_erps;
+ mlxsw_sp_acl_erp_table_free(erp_core, num_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ erp_table->base_index = old_base_index;
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ struct mlxsw_sp_acl_erp *master_rp;
+ int err;
+
+ /* Initially, allocate a single eRP row. Expand later as needed */
+ err = mlxsw_sp_acl_erp_table_alloc(erp_core, erp_core->num_erp_banks,
+ erp_table->aregion->type,
+ &erp_table->base_index);
+ if (err)
+ return err;
+ erp_table->num_max_atcam_erps = erp_core->num_erp_banks;
+
+ /* Transition the sole RP currently configured (the master RP)
+ * to the eRP table
+ */
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ if (!master_rp) {
+ err = -EINVAL;
+ goto err_table_master_rp;
+ }
+
+ /* Maintain the same eRP bank for the master RP, so that we
+ * wouldn't need to update the bloom filter
+ */
+ master_rp->index = master_rp->index % erp_core->num_erp_banks;
+ __set_bit(master_rp->index, erp_table->erp_index_bitmap);
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, master_rp);
+ if (err)
+ goto err_table_master_rp_add;
+
+ err = mlxsw_sp_acl_erp_table_enable(erp_table, false);
+ if (err)
+ goto err_table_enable;
+
+ return 0;
+
+err_table_enable:
+ mlxsw_sp_acl_erp_table_erp_del(master_rp);
+err_table_master_rp_add:
+ __clear_bit(master_rp->index, erp_table->erp_index_bitmap);
+err_table_master_rp:
+ mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_erp_region_master_mask_trans(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ struct mlxsw_sp_acl_erp *master_rp;
+
+ mlxsw_sp_acl_erp_table_disable(erp_table);
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ if (!master_rp)
+ return;
+ mlxsw_sp_acl_erp_table_erp_del(master_rp);
+ __clear_bit(master_rp->index, erp_table->erp_index_bitmap);
+ mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+}
+
+static int
+mlxsw_sp_acl_erp_region_erp_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, true);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static void mlxsw_sp_acl_erp_region_erp_del(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, false);
+
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_region_ctcam_enable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ /* No need to re-enable lookup in the C-TCAM */
+ if (erp_table->num_ctcam_erps > 1)
+ return 0;
+
+ return mlxsw_sp_acl_erp_table_enable(erp_table, true);
+}
+
+static void
+mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ /* Only disable C-TCAM lookup when last C-TCAM eRP is deleted */
+ if (erp_table->num_ctcam_erps > 1)
+ return;
+
+ mlxsw_sp_acl_erp_table_enable(erp_table, false);
+}
+
+static void
+mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ switch (erp_table->num_atcam_erps) {
+ case 2:
+ /* Keep using the eRP table, but correctly set the
+ * operations pointer so that when an A-TCAM eRP is
+ * deleted we will transition to use the master mask
+ */
+ erp_table->ops = &erp_two_masks_ops;
+ break;
+ case 1:
+ /* We only kept the eRP table because we had C-TCAM
+ * eRPs in use. Now that the last C-TCAM eRP is gone we
+ * can stop using the table and transition to use the
+ * master mask
+ */
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ erp_table->ops = &erp_single_mask_ops;
+ break;
+ case 0:
+ /* There are no more eRPs of any kind used by the region
+ * so free its eRP table and transition to initial state
+ */
+ mlxsw_sp_acl_erp_table_disable(erp_table);
+ mlxsw_sp_acl_erp_table_free(erp_table->erp_core,
+ erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ erp_table->ops = &erp_no_mask_ops;
+ break;
+ default:
+ break;
+ }
+}
+
+static struct mlxsw_sp_acl_erp *
+__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ erp = kzalloc(sizeof(*erp), GFP_KERNEL);
+ if (!erp)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&erp->key, key, sizeof(*key));
+ bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ refcount_set(&erp->refcnt, 1);
+ erp_table->num_ctcam_erps++;
+ erp->erp_table = erp_table;
+
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ if (err)
+ goto err_master_mask_set;
+
+ err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table);
+ if (err)
+ goto err_erp_region_ctcam_enable;
+
+ /* When C-TCAM is used, the eRP table must be used */
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ return erp;
+
+err_erp_region_ctcam_enable:
+ rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+err_rhashtable_insert:
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+err_master_mask_set:
+ erp_table->num_ctcam_erps--;
+ kfree(erp);
+ return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ /* There is a special situation where we need to spill rules
+ * into the C-TCAM, yet the region is still using a master
+ * mask and thus not performing a lookup in the C-TCAM. This
+ * can happen when two rules that only differ in priority - and
+ * thus sharing the same key - are programmed. In this case
+ * we transition the region to use an eRP table
+ */
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+ if (IS_ERR(erp)) {
+ err = PTR_ERR(erp);
+ goto err_erp_create;
+ }
+
+ return erp;
+
+err_erp_create:
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+
+ mlxsw_sp_acl_erp_region_ctcam_disable(erp_table);
+ rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ erp_table->num_ctcam_erps--;
+ kfree(erp);
+
+ /* Once the last C-TCAM eRP was destroyed, the state we
+ * transition to depends on the number of A-TCAM eRPs currently
+ * in use
+ */
+ if (erp_table->num_ctcam_erps > 0)
+ return;
+ mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ if (key->ctcam)
+ return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+
+ /* Expand the eRP table for the new eRP, if needed */
+ err = mlxsw_sp_acl_erp_table_expand(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp))
+ return erp;
+
+ err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index);
+ if (err)
+ goto err_erp_index_get;
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+
+ err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp);
+ if (err)
+ goto err_region_erp_add;
+
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ return erp;
+
+err_region_erp_add:
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+err_table_erp_add:
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+err_erp_index_get:
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ if (erp->key.ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp);
+
+ mlxsw_sp_acl_erp_region_erp_del(erp);
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+
+ if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0)
+ erp_table->ops = &erp_two_masks_ops;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ if (key->ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+
+ /* Transition to use eRP table instead of master mask */
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp)) {
+ err = PTR_ERR(erp);
+ goto err_erp_create;
+ }
+
+ err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index);
+ if (err)
+ goto err_erp_index_get;
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+
+ err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp);
+ if (err)
+ goto err_region_erp_add;
+
+ erp_table->ops = &erp_two_masks_ops;
+
+ return erp;
+
+err_region_erp_add:
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+err_table_erp_add:
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+err_erp_index_get:
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+err_erp_create:
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ if (erp->key.ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp);
+
+ mlxsw_sp_acl_erp_region_erp_del(erp);
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ /* Transition to use master mask instead of eRP table */
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+
+ erp_table->ops = &erp_single_mask_ops;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+
+ if (key->ctcam)
+ return ERR_PTR(-EINVAL);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp))
+ return erp;
+
+ erp_table->ops = &erp_single_mask_ops;
+
+ return erp;
+}
+
+static void
+mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ erp_table->ops = &erp_no_mask_ops;
+}
+
+static void
+mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ WARN_ON(1);
+}
+
+struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key key;
+ struct mlxsw_sp_acl_erp *erp;
+
+ /* eRPs are allocated from a shared resource, but currently all
+ * allocations are done under RTNL.
+ */
+ ASSERT_RTNL();
+
+ memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+ key.ctcam = ctcam;
+ erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key,
+ mlxsw_sp_acl_erp_ht_params);
+ if (erp) {
+ refcount_inc(&erp->refcnt);
+ return erp;
+ }
+
+ return erp_table->ops->erp_create(erp_table, &key);
+}
+
+void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ ASSERT_RTNL();
+
+ if (!refcount_dec_and_test(&erp->refcnt))
+ return;
+
+ erp_table->ops->erp_destroy(erp_table, erp);
+}
+
+static struct mlxsw_sp_acl_erp_table *
+mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ int err;
+
+ erp_table = kzalloc(sizeof(*erp_table), GFP_KERNEL);
+ if (!erp_table)
+ return ERR_PTR(-ENOMEM);
+
+ err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ erp_table->erp_core = aregion->atcam->erp_core;
+ erp_table->ops = &erp_no_mask_ops;
+ INIT_LIST_HEAD(&erp_table->atcam_erps_list);
+ erp_table->aregion = aregion;
+
+ return erp_table;
+
+err_rhashtable_init:
+ kfree(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ WARN_ON(!list_empty(&erp_table->atcam_erps_list));
+ rhashtable_destroy(&erp_table->erp_ht);
+ kfree(erp_table);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ char percr_pl[MLXSW_REG_PERCR_LEN];
+
+ mlxsw_reg_percr_pack(percr_pl, aregion->region->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, aregion->region->id, false, false, 0,
+ 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ int err;
+
+ erp_table = mlxsw_sp_acl_erp_table_create(aregion);
+ if (IS_ERR(erp_table))
+ return PTR_ERR(erp_table);
+ aregion->erp_table = erp_table;
+
+ /* Initialize the region's master mask to all zeroes */
+ err = mlxsw_sp_acl_erp_master_mask_init(aregion);
+ if (err)
+ goto err_erp_master_mask_init;
+
+ /* Initialize the region to not use the eRP table */
+ err = mlxsw_sp_acl_erp_region_param_init(aregion);
+ if (err)
+ goto err_erp_region_param_init;
+
+ return 0;
+
+err_erp_region_param_init:
+err_erp_master_mask_init:
+ mlxsw_sp_acl_erp_table_destroy(erp_table);
+ return err;
+}
+
+void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ mlxsw_sp_acl_erp_table_destroy(aregion->erp_table);
+}
+
+static int
+mlxsw_sp_acl_erp_tables_sizes_query(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ unsigned int size;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB))
+ return -EIO;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] = size;
+
+ return 0;
+}
+
+static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ unsigned int erpt_bank_size;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANK_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANKS))
+ return -EIO;
+ erpt_bank_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_ERPT_BANK_SIZE);
+ erp_core->num_erp_banks = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_ERPT_BANKS);
+
+ erp_core->erp_tables = gen_pool_create(0, -1);
+ if (!erp_core->erp_tables)
+ return -ENOMEM;
+ gen_pool_set_algo(erp_core->erp_tables, gen_pool_best_fit, NULL);
+
+ err = gen_pool_add(erp_core->erp_tables,
+ MLXSW_SP_ACL_ERP_GENALLOC_OFFSET, erpt_bank_size,
+ -1);
+ if (err)
+ goto err_gen_pool_add;
+
+ /* Different regions require masks of different sizes */
+ err = mlxsw_sp_acl_erp_tables_sizes_query(mlxsw_sp, erp_core);
+ if (err)
+ goto err_erp_tables_sizes_query;
+
+ return 0;
+
+err_erp_tables_sizes_query:
+err_gen_pool_add:
+ gen_pool_destroy(erp_core->erp_tables);
+ return err;
+}
+
+static void mlxsw_sp_acl_erp_tables_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ gen_pool_destroy(erp_core->erp_tables);
+}
+
+int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core;
+ int err;
+
+ erp_core = kzalloc(sizeof(*erp_core), GFP_KERNEL);
+ if (!erp_core)
+ return -ENOMEM;
+ erp_core->mlxsw_sp = mlxsw_sp;
+ atcam->erp_core = erp_core;
+
+ err = mlxsw_sp_acl_erp_tables_init(mlxsw_sp, erp_core);
+ if (err)
+ goto err_erp_tables_init;
+
+ return 0;
+
+err_erp_tables_init:
+ kfree(erp_core);
+ return err;
+}
+
+void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ mlxsw_sp_acl_erp_tables_fini(mlxsw_sp, atcam->erp_core);
+ kfree(atcam->erp_core);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 510ce48d87f7..e47d1d286e93 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -1,46 +1,12 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
- * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include "spectrum_acl_flex_actions.h"
#include "core_acl_flex_actions.h"
#include "spectrum_span.h"
-#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1
-
static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
- char *enc_actions, bool is_first)
+ char *enc_actions, bool is_first, bool ca)
{
struct mlxsw_sp *mlxsw_sp = priv;
char pefa_pl[MLXSW_REG_PEFA_LEN];
@@ -53,11 +19,11 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
if (is_first)
return 0;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE,
- &kvdl_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, &kvdl_index);
if (err)
return err;
- mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, ca, enc_actions);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
if (err)
goto err_pefa_write;
@@ -65,10 +31,25 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
return 0;
err_pefa_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, kvdl_index);
return err;
}
+static int mlxsw_sp1_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions,
+ is_first, false);
+}
+
+static int mlxsw_sp2_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions,
+ is_first, true);
+}
+
static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
bool is_first)
{
@@ -76,7 +57,29 @@ static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
if (is_first)
return;
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, kvdl_index);
+}
+
+static int mlxsw_sp1_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
+ bool *activity)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
+ bool *activity)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ int err;
+
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, true, NULL);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ return err;
+ mlxsw_reg_pefa_unpack(pefa_pl, activity);
+ return 0;
}
static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
@@ -87,7 +90,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
u32 kvdl_index;
int err;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, &kvdl_index);
if (err)
return err;
mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
@@ -98,7 +102,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
return 0;
err_ppbs_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, kvdl_index);
return err;
}
@@ -106,7 +111,8 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
{
struct mlxsw_sp *mlxsw_sp = priv;
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, kvdl_index);
}
static int
@@ -154,22 +160,36 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
mlxsw_sp_span_mirror_del(in_port, span_id, type, false);
}
-static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
- .kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
+const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp1_act_kvdl_set_add,
+ .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_set_activity_get = mlxsw_sp1_act_kvdl_set_activity_get,
+ .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
+ .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+ .counter_index_get = mlxsw_sp_act_counter_index_get,
+ .counter_index_put = mlxsw_sp_act_counter_index_put,
+ .mirror_add = mlxsw_sp_act_mirror_add,
+ .mirror_del = mlxsw_sp_act_mirror_del,
+};
+
+const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp2_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_set_activity_get = mlxsw_sp2_act_kvdl_set_activity_get,
.kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
.kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
.counter_index_get = mlxsw_sp_act_counter_index_get,
.counter_index_put = mlxsw_sp_act_counter_index_put,
.mirror_add = mlxsw_sp_act_mirror_add,
.mirror_del = mlxsw_sp_act_mirror_del,
+ .dummy_first_set = true,
};
int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_ACTIONS_PER_SET),
- &mlxsw_sp_act_afa_ops, mlxsw_sp);
+ mlxsw_sp->afa_ops, mlxsw_sp);
return PTR_ERR_OR_ZERO(mlxsw_sp->afa);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
index bd6d552d95b9..fe436d816d0c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H
#define _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
new file mode 100644
index 000000000000..d409b09ba8df
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "spectrum.h"
+#include "item.h"
+#include "core_acl_flex_keys.h"
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
+ MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
+};
+
+static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
+ MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
+ MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
+ MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4),
+ MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
+ MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
+ MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
+ MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip),
+ MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex),
+ MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
+};
+
+#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
+
+static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
+ char *output)
+{
+ unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
+ char *output_indexed = output + offset;
+
+ memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
+}
+
+const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
+ .blocks = mlxsw_sp1_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
+ .encode_block = mlxsw_sp1_afk_encode_block,
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x04, 0, 8), /* RX_ACL_SYSTEM_PORT */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16),
+ MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */
+};
+
+static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2),
+ MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3),
+ MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
+ MLXSW_AFK_BLOCK(0x15, mlxsw_sp_afk_element_info_mac_5),
+ MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
+ MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
+ MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
+ MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
+ MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2),
+ MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3),
+ MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4),
+ MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5),
+ MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0),
+ MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2),
+};
+
+#define MLXSW_SP2_AFK_BITS_PER_BLOCK 36
+
+/* A block in Spectrum-2 is of the following form:
+ *
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | | | | | | | | | | | | | | | | | | | | | | | | | | | | |35|34|33|32|
+ * +-----------------------------------------------------------------------------------------------+
+ * |31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ */
+MLXSW_ITEM64(sp2_afk, block, value, 0x00, 0, MLXSW_SP2_AFK_BITS_PER_BLOCK);
+
+/* The key / mask block layout in Spectrum-2 is of the following form:
+ *
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | | | | | | | | | | | | | | | | | block11_high |
+ * +-----------------------------------------------------------------------------------------------+
+ * | block11_low | block10_high |
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * ...
+ */
+
+struct mlxsw_sp2_afk_block_layout {
+ unsigned short offset;
+ struct mlxsw_item item;
+};
+
+#define MLXSW_SP2_AFK_BLOCK_LAYOUT(_block, _offset, _shift) \
+ { \
+ .offset = _offset, \
+ { \
+ .shift = _shift, \
+ .size = {.bits = MLXSW_SP2_AFK_BITS_PER_BLOCK}, \
+ .name = #_block, \
+ } \
+ } \
+
+static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = {
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block0, 0x30, 0),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block1, 0x2C, 4),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block2, 0x28, 8),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block3, 0x24, 12),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block4, 0x20, 16),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block5, 0x1C, 20),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block6, 0x18, 24),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block7, 0x14, 28),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block8, 0x0C, 0),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block9, 0x08, 4),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block10, 0x04, 8),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12),
+};
+
+static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
+ char *output)
+{
+ u64 block_value = mlxsw_sp2_afk_block_value_get(block);
+ const struct mlxsw_sp2_afk_block_layout *block_layout;
+
+ if (WARN_ON(block_index < 0 ||
+ block_index >= ARRAY_SIZE(mlxsw_sp2_afk_blocks_layout)))
+ return;
+
+ block_layout = &mlxsw_sp2_afk_blocks_layout[block_index];
+ __mlxsw_item_set64(output + block_layout->offset,
+ &block_layout->item, 0, block_value);
+}
+
+const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
+ .blocks = mlxsw_sp2_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks),
+ .encode_block = mlxsw_sp2_afk_encode_block,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
deleted file mode 100644
index fb8031828454..000000000000
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
-#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
-
-#include "core_acl_flex_keys.h"
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6),
- MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
- MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6),
- MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
- MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6),
- MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
- MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
- MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
- MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32),
- MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
- MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
- MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
- MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
- MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6),
- MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
- MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
- MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8),
- MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
- MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
-};
-
-static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = {
- MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac),
- MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac),
- MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
- MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
- MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
- MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4),
- MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
- MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
- MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
- MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip),
- MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex),
- MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
-};
-
-#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks)
-
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index ad1b548e3cac..e171513bb32a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -39,25 +8,25 @@
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
-#include <linux/parman.h>
#include "reg.h"
#include "core.h"
#include "resources.h"
#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
#include "core_acl_flex_keys.h"
-struct mlxsw_sp_acl_tcam {
- unsigned long *used_regions; /* bit array */
- unsigned int max_regions;
- unsigned long *used_groups; /* bit array */
- unsigned int max_groups;
- unsigned int max_group_size;
-};
+size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ return ops->priv_size;
+}
-static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
{
- struct mlxsw_sp_acl_tcam *tcam = priv;
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
u64 max_tcam_regions;
u64 max_regions;
u64 max_groups;
@@ -88,21 +57,53 @@ static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
+
+ err = ops->init(mlxsw_sp, tcam->priv, tcam);
+ if (err)
+ goto err_tcam_init;
+
return 0;
+err_tcam_init:
+ kfree(tcam->used_groups);
err_alloc_used_groups:
kfree(tcam->used_regions);
return err;
}
-static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
{
- struct mlxsw_sp_acl_tcam *tcam = priv;
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ ops->fini(mlxsw_sp, tcam->priv);
kfree(tcam->used_groups);
kfree(tcam->used_regions);
}
+int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 *priority, bool fillup_priority)
+{
+ u64 max_priority;
+
+ if (!fillup_priority) {
+ *priority = 0;
+ return 0;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
+ return -EIO;
+
+ max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
+ if (rulei->priority > max_priority)
+ return -EINVAL;
+
+ /* Unlike in TC, in HW, higher number means higher priority. */
+ *priority = max_priority - rulei->priority;
+ return 0;
+}
+
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
@@ -157,37 +158,25 @@ struct mlxsw_sp_acl_tcam_group {
struct mlxsw_sp_acl_tcam_group_ops *ops;
const struct mlxsw_sp_acl_tcam_pattern *patterns;
unsigned int patterns_count;
-};
-
-struct mlxsw_sp_acl_tcam_region {
- struct list_head list; /* Member of a TCAM group */
- struct list_head chunk_list; /* List of chunks under this region */
- struct parman *parman;
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_acl_tcam_group *group;
- u16 id; /* ACL ID and region ID - they are same */
- char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
- struct mlxsw_afk_key_info *key_info;
- struct {
- struct parman_prio parman_prio;
- struct parman_item parman_item;
- struct mlxsw_sp_acl_rule_info *rulei;
- } catchall;
+ bool tmplt_elusage_set;
+ struct mlxsw_afk_element_usage tmplt_elusage;
};
struct mlxsw_sp_acl_tcam_chunk {
struct list_head list; /* Member of a TCAM region */
struct rhash_head ht_node; /* Member of a chunk HT */
unsigned int priority; /* Priority within the region and group */
- struct parman_prio parman_prio;
struct mlxsw_sp_acl_tcam_group *group;
struct mlxsw_sp_acl_tcam_region *region;
unsigned int ref_count;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
};
struct mlxsw_sp_acl_tcam_entry {
- struct parman_item parman_item;
struct mlxsw_sp_acl_tcam_chunk *chunk;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
};
static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
@@ -216,13 +205,19 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
struct mlxsw_sp_acl_tcam_group *group,
const struct mlxsw_sp_acl_tcam_pattern *patterns,
- unsigned int patterns_count)
+ unsigned int patterns_count,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
{
int err;
group->tcam = tcam;
group->patterns = patterns;
group->patterns_count = patterns_count;
+ if (tmplt_elusage) {
+ group->tmplt_elusage_set = true;
+ memcpy(&group->tmplt_elusage, tmplt_elusage,
+ sizeof(group->tmplt_elusage));
+ }
INIT_LIST_HEAD(&group->region_list);
err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
if (err)
@@ -431,6 +426,15 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
const struct mlxsw_sp_acl_tcam_pattern *pattern;
int i;
+ /* In case the template is set, we don't have to look up the pattern
+ * and just use the template.
+ */
+ if (group->tmplt_elusage_set) {
+ memcpy(out, &group->tmplt_elusage, sizeof(*out));
+ WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
+ return;
+ }
+
for (i = 0; i < group->patterns_count; i++) {
pattern = &group->patterns[i];
mlxsw_afk_element_usage_fill(out, pattern->elements,
@@ -441,9 +445,6 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
memcpy(out, elusage, sizeof(*out));
}
-#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
-#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
-
static int
mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
@@ -455,6 +456,7 @@ mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
+ region->key_type,
MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
region->id, region->tcam_region_info);
encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
@@ -477,24 +479,13 @@ mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
{
char ptar_pl[MLXSW_REG_PTAR_LEN];
- mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
+ region->key_type, 0, region->id,
region->tcam_region_info);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
}
static int
-mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- u16 new_size)
-{
- char ptar_pl[MLXSW_REG_PTAR_LEN];
-
- mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
- new_size, region->id, region->tcam_region_info);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
-}
-
-static int
mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
@@ -516,193 +507,22 @@ mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
}
-static int
-mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset,
- struct mlxsw_sp_acl_rule_info *rulei)
-{
- char ptce2_pl[MLXSW_REG_PTCE2_LEN];
- char *act_set;
- char *mask;
- char *key;
-
- mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
- region->tcam_region_info, offset);
- key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
- mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
- mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
-
- /* Only the first action set belongs here, the rest is in KVD */
- act_set = mlxsw_afa_block_first_set(rulei->act_block);
- mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
-}
-
-static void
-mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset)
-{
- char ptce2_pl[MLXSW_REG_PTCE2_LEN];
-
- mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
- region->tcam_region_info, offset);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
-}
-
-static int
-mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset,
- bool *activity)
-{
- char ptce2_pl[MLXSW_REG_PTCE2_LEN];
- int err;
-
- mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
- region->tcam_region_info, offset);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
- if (err)
- return err;
- *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
- return 0;
-}
-
-#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
-
-static int
-mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region)
-{
- struct parman_prio *parman_prio = &region->catchall.parman_prio;
- struct parman_item *parman_item = &region->catchall.parman_item;
- struct mlxsw_sp_acl_rule_info *rulei;
- int err;
-
- parman_prio_init(region->parman, parman_prio,
- MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
- err = parman_item_add(region->parman, parman_prio, parman_item);
- if (err)
- goto err_parman_item_add;
-
- rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
- if (IS_ERR(rulei)) {
- err = PTR_ERR(rulei);
- goto err_rulei_create;
- }
-
- err = mlxsw_sp_acl_rulei_act_continue(rulei);
- if (WARN_ON(err))
- goto err_rulei_act_continue;
-
- err = mlxsw_sp_acl_rulei_commit(rulei);
- if (err)
- goto err_rulei_commit;
-
- err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
- parman_item->index, rulei);
- region->catchall.rulei = rulei;
- if (err)
- goto err_rule_insert;
-
- return 0;
-
-err_rule_insert:
-err_rulei_commit:
-err_rulei_act_continue:
- mlxsw_sp_acl_rulei_destroy(rulei);
-err_rulei_create:
- parman_item_remove(region->parman, parman_prio, parman_item);
-err_parman_item_add:
- parman_prio_fini(parman_prio);
- return err;
-}
-
-static void
-mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region)
-{
- struct parman_prio *parman_prio = &region->catchall.parman_prio;
- struct parman_item *parman_item = &region->catchall.parman_item;
- struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
-
- mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
- parman_item->index);
- mlxsw_sp_acl_rulei_destroy(rulei);
- parman_item_remove(region->parman, parman_prio, parman_item);
- parman_prio_fini(parman_prio);
-}
-
-static void
-mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- u16 src_offset, u16 dst_offset, u16 size)
-{
- char prcr_pl[MLXSW_REG_PRCR_LEN];
-
- mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
- region->tcam_region_info, src_offset,
- region->tcam_region_info, dst_offset, size);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
-}
-
-static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
- unsigned long new_count)
-{
- struct mlxsw_sp_acl_tcam_region *region = priv;
- struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
- u64 max_tcam_rules;
-
- max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
- if (new_count > max_tcam_rules)
- return -EINVAL;
- return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
-}
-
-static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
- unsigned long from_index,
- unsigned long to_index,
- unsigned long count)
-{
- struct mlxsw_sp_acl_tcam_region *region = priv;
- struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
-
- mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
- from_index, to_index, count);
-}
-
-static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
- .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
- .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
- .resize = mlxsw_sp_acl_tcam_region_parman_resize,
- .move = mlxsw_sp_acl_tcam_region_parman_move,
- .algo = PARMAN_ALGO_TYPE_LSORT,
-};
-
static struct mlxsw_sp_acl_tcam_region *
mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
struct mlxsw_afk_element_usage *elusage)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_tcam_region *region;
int err;
- region = kzalloc(sizeof(*region), GFP_KERNEL);
+ region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
if (!region)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&region->chunk_list);
region->mlxsw_sp = mlxsw_sp;
- region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
- region);
- if (!region->parman) {
- err = -ENOMEM;
- goto err_parman_create;
- }
-
region->key_info = mlxsw_afk_key_info_get(afk, elusage);
if (IS_ERR(region->key_info)) {
err = PTR_ERR(region->key_info);
@@ -713,6 +533,11 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_region_id_get;
+ err = ops->region_associate(mlxsw_sp, region);
+ if (err)
+ goto err_tcam_region_associate;
+
+ region->key_type = ops->key_type;
err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
if (err)
goto err_tcam_region_alloc;
@@ -721,23 +546,22 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_tcam_region_enable;
- err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
+ err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region);
if (err)
- goto err_tcam_region_catchall_add;
+ goto err_tcam_region_init;
return region;
-err_tcam_region_catchall_add:
+err_tcam_region_init:
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
err_tcam_region_enable:
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
err_tcam_region_alloc:
+err_tcam_region_associate:
mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
err_region_id_get:
mlxsw_afk_key_info_put(region->key_info);
err_key_info_get:
- parman_destroy(region->parman);
-err_parman_create:
kfree(region);
return ERR_PTR(err);
}
@@ -746,12 +570,13 @@ static void
mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
- mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->region_fini(mlxsw_sp, region->priv);
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
mlxsw_afk_key_info_put(region->key_info);
- parman_destroy(region->parman);
kfree(region);
}
@@ -826,13 +651,14 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk;
int err;
if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
return ERR_PTR(-EINVAL);
- chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
if (!chunk)
return ERR_PTR(-ENOMEM);
chunk->priority = priority;
@@ -844,7 +670,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_chunk_assoc;
- parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
+ ops->chunk_init(chunk->region->priv, chunk->priv, priority);
err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
mlxsw_sp_acl_tcam_chunk_ht_params);
@@ -854,7 +680,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
return chunk;
err_rhashtable_insert:
- parman_prio_fini(&chunk->parman_prio);
+ ops->chunk_fini(chunk->priv);
mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
err_chunk_assoc:
kfree(chunk);
@@ -865,11 +691,12 @@ static void
mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_chunk *chunk)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_group *group = chunk->group;
rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
mlxsw_sp_acl_tcam_chunk_ht_params);
- parman_prio_fini(&chunk->parman_prio);
+ ops->chunk_fini(chunk->priv);
mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
kfree(chunk);
}
@@ -903,11 +730,19 @@ static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
}
+static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ return ops->entry_priv_size;
+}
+
static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
struct mlxsw_sp_acl_tcam_entry *entry,
struct mlxsw_sp_acl_rule_info *rulei)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk;
struct mlxsw_sp_acl_tcam_region *region;
int err;
@@ -918,24 +753,16 @@ static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(chunk);
region = chunk->region;
- err = parman_item_add(region->parman, &chunk->parman_prio,
- &entry->parman_item);
- if (err)
- goto err_parman_item_add;
- err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
- entry->parman_item.index,
- rulei);
+ err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
+ entry->priv, rulei);
if (err)
- goto err_rule_insert;
+ goto err_entry_add;
entry->chunk = chunk;
return 0;
-err_rule_insert:
- parman_item_remove(region->parman, &chunk->parman_prio,
- &entry->parman_item);
-err_parman_item_add:
+err_entry_add:
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
return err;
}
@@ -943,13 +770,11 @@ err_parman_item_add:
static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
struct mlxsw_sp_acl_tcam_region *region = chunk->region;
- mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
- entry->parman_item.index);
- parman_item_remove(region->parman, &chunk->parman_prio,
- &entry->parman_item);
+ ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
}
@@ -958,22 +783,24 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry,
bool *activity)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
struct mlxsw_sp_acl_tcam_region *region = chunk->region;
- return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
- entry->parman_item.index,
- activity);
+ return ops->entry_activity_get(mlxsw_sp, region->priv,
+ entry->priv, activity);
}
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
- MLXSW_AFK_ELEMENT_DMAC,
- MLXSW_AFK_ELEMENT_SMAC,
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
MLXSW_AFK_ELEMENT_ETHERTYPE,
MLXSW_AFK_ELEMENT_IP_PROTO,
- MLXSW_AFK_ELEMENT_SRC_IP4,
- MLXSW_AFK_ELEMENT_DST_IP4,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
@@ -987,10 +814,14 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
MLXSW_AFK_ELEMENT_ETHERTYPE,
MLXSW_AFK_ELEMENT_IP_PROTO,
- MLXSW_AFK_ELEMENT_SRC_IP6_HI,
- MLXSW_AFK_ELEMENT_SRC_IP6_LO,
- MLXSW_AFK_ELEMENT_DST_IP6_HI,
- MLXSW_AFK_ELEMENT_DST_IP6_LO,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
};
@@ -1019,14 +850,16 @@ struct mlxsw_sp_acl_tcam_flower_rule {
static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
- void *priv, void *ruleset_priv)
+ struct mlxsw_sp_acl_tcam *tcam,
+ void *ruleset_priv,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- struct mlxsw_sp_acl_tcam *tcam = priv;
return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
mlxsw_sp_acl_tcam_patterns,
- MLXSW_SP_ACL_TCAM_PATTERNS_COUNT);
+ MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
+ tmplt_elusage);
}
static void
@@ -1070,6 +903,12 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
}
+static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
+ mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
+}
+
static int
mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv, void *rule_priv,
@@ -1107,7 +946,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
.ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
.ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
- .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
+ .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size,
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
.rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
@@ -1118,7 +957,7 @@ mlxsw_sp_acl_tcam_profile_ops_arr[] = {
[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
};
-static const struct mlxsw_sp_acl_profile_ops *
+const struct mlxsw_sp_acl_profile_ops *
mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_acl_profile profile)
{
@@ -1131,10 +970,3 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
return NULL;
return ops;
}
-
-const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
- .priv_size = sizeof(struct mlxsw_sp_acl_tcam),
- .init = mlxsw_sp_acl_tcam_init,
- .fini = mlxsw_sp_acl_tcam_fini,
- .profile_ops = mlxsw_sp_acl_tcam_profile_ops,
-};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
new file mode 100644
index 000000000000..219a4e26c332
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_ACL_TCAM_H
+#define _MLXSW_SPECTRUM_ACL_TCAM_H
+
+#include <linux/list.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+
+struct mlxsw_sp_acl_tcam {
+ unsigned long *used_regions; /* bit array */
+ unsigned int max_regions;
+ unsigned long *used_groups; /* bit array */
+ unsigned int max_groups;
+ unsigned int max_group_size;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam);
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam);
+int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 *priority, bool fillup_priority);
+
+struct mlxsw_sp_acl_profile_ops {
+ size_t ruleset_priv_size;
+ int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv,
+ struct mlxsw_afk_element_usage *tmplt_elusage);
+ void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+ int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ u16 (*ruleset_group_id)(void *ruleset_priv);
+ size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp);
+ int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+ int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
+ bool *activity);
+};
+
+const struct mlxsw_sp_acl_profile_ops *
+mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_acl_profile profile);
+
+#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
+
+#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
+
+#define MLXSW_SP_ACL_TCAM_MASK_LEN \
+ (MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN * BITS_PER_BYTE)
+
+struct mlxsw_sp_acl_tcam_group;
+
+struct mlxsw_sp_acl_tcam_region {
+ struct list_head list; /* Member of a TCAM group */
+ struct list_head chunk_list; /* List of chunks under this region */
+ struct mlxsw_sp_acl_tcam_group *group;
+ enum mlxsw_reg_ptar_key_type key_type;
+ u16 id; /* ACL ID and region ID - they are same */
+ char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
+ struct mlxsw_afk_key_info *key_info;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_acl_ctcam_region {
+ struct parman *parman;
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops;
+ struct mlxsw_sp_acl_tcam_region *region;
+};
+
+struct mlxsw_sp_acl_ctcam_chunk {
+ struct parman_prio parman_prio;
+};
+
+struct mlxsw_sp_acl_ctcam_entry {
+ struct parman_item parman_item;
+};
+
+struct mlxsw_sp_acl_ctcam_region_ops {
+ int (*entry_insert)(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask);
+ void (*entry_remove)(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry);
+};
+
+int
+mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops);
+void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion);
+void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ unsigned int priority);
+void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk);
+int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority);
+void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry);
+static inline unsigned int
+mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ return centry->parman_item.index;
+}
+
+enum mlxsw_sp_acl_atcam_region_type {
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB,
+ __MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX,
+};
+
+#define MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX \
+ (__MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX - 1)
+
+struct mlxsw_sp_acl_atcam {
+ struct mlxsw_sp_acl_erp_core *erp_core;
+};
+
+struct mlxsw_sp_acl_atcam_region {
+ struct rhashtable entries_ht; /* A-TCAM only */
+ struct mlxsw_sp_acl_ctcam_region cregion;
+ const struct mlxsw_sp_acl_atcam_region_ops *ops;
+ struct mlxsw_sp_acl_tcam_region *region;
+ struct mlxsw_sp_acl_atcam *atcam;
+ enum mlxsw_sp_acl_atcam_region_type type;
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ void *priv;
+};
+
+struct mlxsw_sp_acl_atcam_entry_ht_key {
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ u8 erp_id;
+};
+
+struct mlxsw_sp_acl_atcam_chunk {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+};
+
+struct mlxsw_sp_acl_atcam_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
+ struct mlxsw_sp_acl_ctcam_entry centry;
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ struct mlxsw_sp_acl_erp *erp;
+};
+
+static inline struct mlxsw_sp_acl_atcam_region *
+mlxsw_sp_acl_tcam_cregion_aregion(struct mlxsw_sp_acl_ctcam_region *cregion)
+{
+ return container_of(cregion, struct mlxsw_sp_acl_atcam_region, cregion);
+}
+
+static inline struct mlxsw_sp_acl_atcam_entry *
+mlxsw_sp_acl_tcam_centry_aentry(struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ return container_of(centry, struct mlxsw_sp_acl_atcam_entry, centry);
+}
+
+int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ u16 region_id);
+int
+mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops);
+void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ unsigned int priority);
+void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk);
+int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei);
+void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+
+struct mlxsw_sp_acl_erp;
+
+bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp);
+u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp);
+struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam);
+void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp *erp);
+int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
+int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 0a9adc5962fb..4327487553c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 0f46775e0307..83c2e1e5f216 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
index fd34d0a01073..81465e267b10 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arkdis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_CNT_H
#define _MLXSW_SPECTRUM_CNT_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index b6ed7f7c531e..b25048c6c761 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/netdevice.h>
#include <linux/string.h>
@@ -255,6 +224,270 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
return 0;
}
+static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
+ struct dcb_app *app)
+{
+ int prio;
+
+ if (app->priority >= IEEE_8021QAZ_MAX_TCS) {
+ netdev_err(dev, "APP entry with priority value %u is invalid\n",
+ app->priority);
+ return -EINVAL;
+ }
+
+ switch (app->selector) {
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ if (app->protocol >= 64) {
+ netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
+ app->protocol);
+ return -EINVAL;
+ }
+
+ /* Warn about any DSCP APP entries with the same PID. */
+ prio = fls(dcb_ieee_getapp_mask(dev, app));
+ if (prio--) {
+ if (prio < app->priority)
+ netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n",
+ app->priority, app->protocol, prio);
+ else if (prio > app->priority)
+ netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n",
+ app->priority, app->protocol, prio);
+ }
+ break;
+
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ if (app->protocol) {
+ netdev_err(dev, "EtherType APP entries with protocol value != 0 not supported\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ netdev_err(dev, "APP entries with selector %u not supported\n",
+ app->selector);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8
+mlxsw_sp_port_dcb_app_default_prio(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ u8 prio_mask;
+
+ prio_mask = dcb_ieee_getapp_default_prio_mask(mlxsw_sp_port->dev);
+ if (prio_mask)
+ /* Take the highest configured priority. */
+ return fls(prio_mask) - 1;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_port_dcb_app_dscp_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 default_prio,
+ struct dcb_ieee_app_dscp_map *map)
+{
+ int i;
+
+ dcb_ieee_getapp_dscp_prio_mask_map(mlxsw_sp_port->dev, map);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
+ if (map->map[i])
+ map->map[i] = fls(map->map[i]) - 1;
+ else
+ map->map[i] = default_prio;
+ }
+}
+
+static bool
+mlxsw_sp_port_dcb_app_prio_dscp_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_prio_map *map)
+{
+ bool have_dscp = false;
+ int i;
+
+ dcb_ieee_getapp_prio_dscp_mask_map(mlxsw_sp_port->dev, map);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
+ if (map->map[i]) {
+ map->map[i] = fls64(map->map[i]) - 1;
+ have_dscp = true;
+ }
+ }
+
+ return have_dscp;
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpts(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpts_pl[MLXSW_REG_QPTS_LEN];
+
+ mlxsw_reg_qpts_pack(qpts_pl, mlxsw_sp_port->local_port, ts);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpts), qpts_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qrwe(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool rewrite_dscp)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qrwe_pl[MLXSW_REG_QRWE_LEN];
+
+ mlxsw_reg_qrwe_pack(qrwe_pl, mlxsw_sp_port->local_port,
+ false, rewrite_dscp);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qrwe), qrwe_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_toggle_trust(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ bool rewrite_dscp = ts == MLXSW_REG_QPTS_TRUST_STATE_DSCP;
+ int err;
+
+ if (mlxsw_sp_port->dcb.trust_state == ts)
+ return 0;
+
+ err = mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port, ts);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update_qrwe(mlxsw_sp_port, rewrite_dscp);
+ if (err)
+ goto err_update_qrwe;
+
+ mlxsw_sp_port->dcb.trust_state = ts;
+ return 0;
+
+err_update_qrwe:
+ mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port,
+ mlxsw_sp_port->dcb.trust_state);
+ return err;
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpdpm(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_dscp_map *map)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdpm_pl[MLXSW_REG_QPDPM_LEN];
+ short int i;
+
+ mlxsw_reg_qpdpm_pack(qpdpm_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i)
+ mlxsw_reg_qpdpm_dscp_pack(qpdpm_pl, i, map->map[i]);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdpm), qpdpm_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpdsm(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_prio_map *map)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdsm_pl[MLXSW_REG_QPDSM_LEN];
+ short int i;
+
+ mlxsw_reg_qpdsm_pack(qpdsm_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i)
+ mlxsw_reg_qpdsm_prio_pack(qpdsm_pl, i, map->map[i]);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdsm), qpdsm_pl);
+}
+
+static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct dcb_ieee_app_prio_map prio_map;
+ struct dcb_ieee_app_dscp_map dscp_map;
+ u8 default_prio;
+ bool have_dscp;
+ int err;
+
+ default_prio = mlxsw_sp_port_dcb_app_default_prio(mlxsw_sp_port);
+ have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
+ &prio_map);
+
+ if (!have_dscp) {
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_PCP);
+ if (err)
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
+ return err;
+ }
+
+ mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
+ &dscp_map);
+ err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
+ &dscp_map);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure priority map\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_dcb_app_update_qpdsm(mlxsw_sp_port,
+ &prio_map);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure DSCP rewrite map\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_DSCP);
+ if (err) {
+ /* A failure to set trust DSCP means that the QPDPM and QPDSM
+ * maps installed above are not in effect. And since we are here
+ * attempting to set trust DSCP, we couldn't have attempted to
+ * switch trust to PCP. Thus no cleanup is necessary.
+ */
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L3\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_dcbnl_app_validate(dev, app);
+ if (err)
+ return err;
+
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port);
+ if (err)
+ goto err_update;
+
+ return 0;
+
+err_update:
+ dcb_ieee_delapp(dev, app);
+ return err;
+}
+
+static int mlxsw_sp_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port);
+ if (err)
+ netdev_err(dev, "Failed to update DCB APP configuration\n");
+ return 0;
+}
+
static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev,
struct ieee_maxrate *maxrate)
{
@@ -394,6 +627,8 @@ static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
.ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate,
.ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc,
.ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc,
+ .ieee_setapp = mlxsw_sp_dcbnl_ieee_setapp,
+ .ieee_delapp = mlxsw_sp_dcbnl_ieee_delapp,
.getdcbx = mlxsw_sp_dcbnl_getdcbx,
.setdcbx = mlxsw_sp_dcbnl_setdcbx,
@@ -467,6 +702,7 @@ int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
if (err)
goto err_port_pfc_init;
+ mlxsw_sp_port->dcb.trust_state = MLXSW_REG_QPTS_TRUST_STATE_PCP;
mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index f56fa18d6b26..41e607a14846 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arakdis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <net/devlink.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
index 815d543cf114..e689576231ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_PIPELINE_H_
#define _MLXSW_PIPELINE_H_
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 54262af4e98f..715d24ff937e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 89dbf569dff5..ebd1b24ebaa5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -48,7 +17,8 @@
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
- struct tcf_exts *exts)
+ struct tcf_exts *exts,
+ struct netlink_ext_ack *extack)
{
const struct tc_action *a;
LIST_HEAD(actions);
@@ -58,7 +28,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return 0;
/* Count action is inserted first */
- err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
+ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
if (err)
return err;
@@ -66,16 +36,22 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_ok(a)) {
err = mlxsw_sp_acl_rulei_act_terminate(rulei);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
return err;
+ }
} else if (is_tcf_gact_shot(a)) {
err = mlxsw_sp_acl_rulei_act_drop(rulei);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
return err;
+ }
} else if (is_tcf_gact_trap(a)) {
err = mlxsw_sp_acl_rulei_act_trap(rulei);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
return err;
+ }
} else if (is_tcf_gact_goto_chain(a)) {
u32 chain_index = tcf_gact_goto_chain_index(a);
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -89,8 +65,10 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
return err;
+ }
} else if (is_tcf_mirred_egress_redirect(a)) {
struct net_device *out_dev;
struct mlxsw_sp_fid *fid;
@@ -99,20 +77,21 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
fid_index = mlxsw_sp_fid_index(fid);
err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
- fid_index);
+ fid_index, extack);
if (err)
return err;
out_dev = tcf_mirred_dev(a);
err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
- out_dev);
+ out_dev, extack);
if (err)
return err;
} else if (is_tcf_mirred_egress_mirror(a)) {
struct net_device *out_dev = tcf_mirred_dev(a);
err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
- block, out_dev);
+ block, out_dev,
+ extack);
if (err)
return err;
} else if (is_tcf_vlan(a)) {
@@ -123,8 +102,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
action, vid,
- proto, prio);
+ proto, prio, extack);
} else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
return -EOPNOTSUPP;
}
@@ -144,10 +124,12 @@ static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
f->mask);
- mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
- ntohl(key->src), ntohl(mask->src));
- mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
- ntohl(key->dst), ntohl(mask->dst));
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ (char *) &key->src,
+ (char *) &mask->src, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ (char *) &key->dst,
+ (char *) &mask->dst, 4);
}
static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
@@ -161,24 +143,31 @@ static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
f->mask);
- size_t addr_half_size = sizeof(key->src) / 2;
-
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
- &key->src.s6_addr[0],
- &mask->src.s6_addr[0],
- addr_half_size);
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
- &key->src.s6_addr[addr_half_size],
- &mask->src.s6_addr[addr_half_size],
- addr_half_size);
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
- &key->dst.s6_addr[0],
- &mask->dst.s6_addr[0],
- addr_half_size);
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
- &key->dst.s6_addr[addr_half_size],
- &mask->dst.s6_addr[addr_half_size],
- addr_half_size);
+
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ &key->src.s6_addr[0x0],
+ &mask->src.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ &key->src.s6_addr[0x4],
+ &mask->src.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ &key->src.s6_addr[0x8],
+ &mask->src.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ &key->src.s6_addr[0xC],
+ &mask->src.s6_addr[0xC], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ &key->dst.s6_addr[0x0],
+ &mask->dst.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ &key->dst.s6_addr[0x4],
+ &mask->dst.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ &key->dst.s6_addr[0x8],
+ &mask->dst.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ &key->dst.s6_addr[0xC],
+ &mask->dst.s6_addr[0xC], 4);
}
static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
@@ -192,6 +181,7 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
return 0;
if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
return -EINVAL;
}
@@ -220,6 +210,7 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
return 0;
if (ip_proto != IPPROTO_TCP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
return -EINVAL;
}
@@ -246,6 +237,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
return 0;
if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
return -EINVAL;
}
@@ -290,6 +282,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_VLAN))) {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
return -EOPNOTSUPP;
}
@@ -340,13 +333,17 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
f->mask);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
- MLXSW_AFK_ELEMENT_DMAC,
- key->dst, mask->dst,
- sizeof(key->dst));
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ key->dst, mask->dst, 2);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
- MLXSW_AFK_ELEMENT_SMAC,
- key->src, mask->src,
- sizeof(key->src));
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ key->dst + 2, mask->dst + 2, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ key->src, mask->src, 2);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
+ key->src + 2, mask->src + 2, 4);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -358,6 +355,11 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
+
+ if (mlxsw_sp_acl_block_is_egress_bound(block)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
+ return -EOPNOTSUPP;
+ }
if (mask->vlan_id != 0)
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_VID,
@@ -387,7 +389,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts,
+ f->common.extack);
}
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
@@ -401,11 +404,12 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
- MLXSW_SP_ACL_PROFILE_FLOWER);
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
- rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
+ rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie,
+ f->common.extack);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto err_rule_create;
@@ -445,7 +449,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
- MLXSW_SP_ACL_PROFILE_FLOWER);
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
if (IS_ERR(ruleset))
return;
@@ -471,7 +475,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
- MLXSW_SP_ACL_PROFILE_FLOWER);
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
if (WARN_ON(IS_ERR(ruleset)))
return -EINVAL;
@@ -493,3 +497,40 @@ err_rule_get_stats:
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
return err;
}
+
+int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule_info rulei;
+ int err;
+
+ memset(&rulei, 0, sizeof(rulei));
+ err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
+ if (err)
+ return err;
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+ &rulei.values.elusage);
+
+ /* keep the reference to the ruleset */
+ return PTR_ERR_OR_ZERO(ruleset);
+}
+
+void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
+ if (IS_ERR(ruleset))
+ return;
+ /* put the reference to the ruleset kept in create */
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 98d896c14b87..00db26c96bf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
- * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index 6909d867bb59..bb5c4d4a5872 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
- * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_IPIP_H_
#define _MLXSW_IPIP_H_
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index fe4327f547d2..1e4cdee7bcd7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -1,454 +1,75 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
-#include <linux/bitops.h>
+#include <linux/slab.h>
#include "spectrum.h"
-#define MLXSW_SP_KVDL_SINGLE_BASE 0
-#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
-#define MLXSW_SP_KVDL_SINGLE_END \
- (MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1)
-
-#define MLXSW_SP_KVDL_CHUNKS_BASE \
- (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
-#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152
-#define MLXSW_SP_KVDL_CHUNKS_END \
- (MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1)
-
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \
- (MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE)
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \
- (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE)
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \
- (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1)
-
-#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1
-#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
-
-struct mlxsw_sp_kvdl_part_info {
- unsigned int part_index;
- unsigned int start_index;
- unsigned int end_index;
- unsigned int alloc_size;
- enum mlxsw_sp_resource_id resource_id;
-};
-
-enum mlxsw_sp_kvdl_part_id {
- MLXSW_SP_KVDL_PART_ID_SINGLE,
- MLXSW_SP_KVDL_PART_ID_CHUNKS,
- MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS,
-};
-
-#define MLXSW_SP_KVDL_PART_INFO(id) \
-[MLXSW_SP_KVDL_PART_ID_##id] = { \
- .start_index = MLXSW_SP_KVDL_##id##_BASE, \
- .end_index = MLXSW_SP_KVDL_##id##_END, \
- .alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \
- .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
-}
-
-static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = {
- MLXSW_SP_KVDL_PART_INFO(SINGLE),
- MLXSW_SP_KVDL_PART_INFO(CHUNKS),
- MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS),
-};
-
-#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info)
-
-struct mlxsw_sp_kvdl_part {
- struct mlxsw_sp_kvdl_part_info info;
- unsigned long usage[0]; /* Entries */
-};
-
struct mlxsw_sp_kvdl {
- struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN];
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
};
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl,
- unsigned int alloc_size)
-{
- struct mlxsw_sp_kvdl_part *part, *min_part = NULL;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
- part = kvdl->parts[i];
- if (alloc_size <= part->info.alloc_size &&
- (!min_part ||
- part->info.alloc_size <= min_part->info.alloc_size))
- min_part = part;
- }
-
- return min_part ?: ERR_PTR(-ENOBUFS);
-}
-
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index)
-{
- struct mlxsw_sp_kvdl_part *part;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
- part = kvdl->parts[i];
- if (kvdl_index >= part->info.start_index &&
- kvdl_index <= part->info.end_index)
- return part;
- }
-
- return ERR_PTR(-EINVAL);
-}
-
-static u32
-mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info,
- unsigned int entry_index)
-{
- return info->start_index + entry_index * info->alloc_size;
-}
-
-static unsigned int
-mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info,
- u32 kvdl_index)
-{
- return (kvdl_index - info->start_index) / info->alloc_size;
-}
-
-static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
- u32 *p_kvdl_index)
-{
- const struct mlxsw_sp_kvdl_part_info *info = &part->info;
- unsigned int entry_index, nr_entries;
-
- nr_entries = (info->end_index - info->start_index + 1) /
- info->alloc_size;
- entry_index = find_first_zero_bit(part->usage, nr_entries);
- if (entry_index == nr_entries)
- return -ENOBUFS;
- __set_bit(entry_index, part->usage);
-
- *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index);
-
- return 0;
-}
-
-static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part,
- u32 kvdl_index)
-{
- const struct mlxsw_sp_kvdl_part_info *info = &part->info;
- unsigned int entry_index;
-
- entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index);
- __clear_bit(entry_index, part->usage);
-}
-
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
- u32 *p_entry_index)
-{
- struct mlxsw_sp_kvdl_part *part;
-
- /* Find partition with smallest allocation size satisfying the
- * requested size.
- */
- part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
- if (IS_ERR(part))
- return PTR_ERR(part);
-
- return mlxsw_sp_kvdl_part_alloc(part, p_entry_index);
-}
-
-void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
-{
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index);
- if (IS_ERR(part))
- return;
- mlxsw_sp_kvdl_part_free(part, entry_index);
-}
-
-int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
- unsigned int entry_count,
- unsigned int *p_alloc_size)
-{
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
- if (IS_ERR(part))
- return PTR_ERR(part);
-
- *p_alloc_size = part->info.alloc_size;
-
- return 0;
-}
-
-static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part,
- struct mlxsw_sp_kvdl_part *part_prev,
- unsigned int size)
-{
-
- if (!part_prev) {
- part->info.end_index = size - 1;
- } else {
- part->info.start_index = part_prev->info.end_index + 1;
- part->info.end_index = part->info.start_index + size - 1;
- }
-}
-
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_kvdl_part_info *info,
- struct mlxsw_sp_kvdl_part *part_prev)
+int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- struct mlxsw_sp_kvdl_part *part;
- bool need_update = true;
- unsigned int nr_entries;
- size_t usage_size;
- u64 resource_size;
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops = mlxsw_sp->kvdl_ops;
+ struct mlxsw_sp_kvdl *kvdl;
int err;
- err = devlink_resource_size_get(devlink, info->resource_id,
- &resource_size);
- if (err) {
- need_update = false;
- resource_size = info->end_index - info->start_index + 1;
- }
-
- nr_entries = div_u64(resource_size, info->alloc_size);
- usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
- part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
- if (!part)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&part->info, info, sizeof(part->info));
-
- if (need_update)
- mlxsw_sp_kvdl_part_update(part, part_prev, resource_size);
- return part;
-}
-
-static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part)
-{
- kfree(part);
-}
-
-static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- const struct mlxsw_sp_kvdl_part_info *info;
- struct mlxsw_sp_kvdl_part *part_prev = NULL;
- int err, i;
+ kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl) + kvdl_ops->priv_size,
+ GFP_KERNEL);
+ if (!kvdl)
+ return -ENOMEM;
+ kvdl->kvdl_ops = kvdl_ops;
+ mlxsw_sp->kvdl = kvdl;
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
- info = &mlxsw_sp_kvdl_parts_info[i];
- kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info,
- part_prev);
- if (IS_ERR(kvdl->parts[i])) {
- err = PTR_ERR(kvdl->parts[i]);
- goto err_kvdl_part_init;
- }
- part_prev = kvdl->parts[i];
- }
+ err = kvdl_ops->init(mlxsw_sp, kvdl->priv);
+ if (err)
+ goto err_init;
return 0;
-err_kvdl_part_init:
- for (i--; i >= 0; i--)
- mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
+err_init:
+ kfree(kvdl);
return err;
}
-static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
- mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
-}
-
-static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
-{
- const struct mlxsw_sp_kvdl_part_info *info = &part->info;
- unsigned int nr_entries;
- int bit = -1;
- u64 occ = 0;
-
- nr_entries = (info->end_index -
- info->start_index + 1) /
- info->alloc_size;
- while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
- < nr_entries)
- occ += info->alloc_size;
- return occ;
-}
-
-static u64 mlxsw_sp_kvdl_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- u64 occ = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
- occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]);
-
- return occ;
-}
-
-static u64 mlxsw_sp_kvdl_single_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
- return mlxsw_sp_kvdl_part_occ(part);
-}
-
-static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
- return mlxsw_sp_kvdl_part_occ(part);
-}
-
-static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_kvdl_part *part;
- part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
- return mlxsw_sp_kvdl_part_occ(part);
+ kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
+ kfree(kvdl);
}
-int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- static struct devlink_resource_size_params size_params;
- u32 kvdl_max_size;
- int err;
-
- kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
- MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
- MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
-
- devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
- MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE,
- DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
- MLXSW_SP_KVDL_SINGLE_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
- if (err)
- return err;
-
- devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
- MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE,
- DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
- MLXSW_SP_KVDL_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
- if (err)
- return err;
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
- MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
- DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
- return err;
+ return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_entry_index);
}
-int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- struct mlxsw_sp_kvdl *kvdl;
- int err;
-
- kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL);
- if (!kvdl)
- return -ENOMEM;
- mlxsw_sp->kvdl = kvdl;
-
- err = mlxsw_sp_kvdl_parts_init(mlxsw_sp);
- if (err)
- goto err_kvdl_parts_init;
-
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- mlxsw_sp_kvdl_occ_get,
- mlxsw_sp);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- mlxsw_sp_kvdl_single_occ_get,
- mlxsw_sp);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- mlxsw_sp_kvdl_chunks_occ_get,
- mlxsw_sp);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- mlxsw_sp_kvdl_large_chunks_occ_get,
- mlxsw_sp);
-
- return 0;
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
-err_kvdl_parts_init:
- kfree(mlxsw_sp->kvdl);
- return err;
+ kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
+ entry_count, entry_index);
}
-void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
+int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR);
- mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
- kfree(mlxsw_sp->kvdl);
+ return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_alloc_count);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index a82539609d49..54275624718b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/rhashtable.h>
#include <net/ipv6.h>
@@ -1075,6 +1044,6 @@ void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
cancel_delayed_work_sync(&mr->stats_update_dw);
- mr->mr_ops->fini(mr->priv);
+ mr->mr_ops->fini(mlxsw_sp, mr->priv);
kfree(mr);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
index 7c864a86811d..3cde3671fe35 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_MCROUTER_H
#define _MLXSW_SPECTRUM_MCROUTER_H
@@ -46,15 +15,6 @@ enum mlxsw_sp_mr_route_action {
MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD,
};
-enum mlxsw_sp_mr_route_prio {
- MLXSW_SP_MR_ROUTE_PRIO_SG,
- MLXSW_SP_MR_ROUTE_PRIO_STARG,
- MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
- __MLXSW_SP_MR_ROUTE_PRIO_MAX
-};
-
-#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
-
struct mlxsw_sp_mr_route_key {
int vrid;
enum mlxsw_sp_l3proto proto;
@@ -101,7 +61,7 @@ struct mlxsw_sp_mr_ops {
u16 erif_index);
void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv);
- void (*fini)(void *priv);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
};
struct mlxsw_sp_mr;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 4f4c0d311883..346f4a5fe053 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -1,41 +1,9 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
-#include <linux/parman.h>
#include "spectrum_mr_tcam.h"
#include "reg.h"
@@ -43,15 +11,8 @@
#include "core_acl_flex_actions.h"
#include "spectrum_mr.h"
-struct mlxsw_sp_mr_tcam_region {
- struct mlxsw_sp *mlxsw_sp;
- enum mlxsw_reg_rtar_key_type rtar_key_type;
- struct parman *parman;
- struct parman_prio *parman_prios;
-};
-
struct mlxsw_sp_mr_tcam {
- struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
+ void *priv;
};
/* This struct maps to one RIGR2 register entry */
@@ -84,8 +45,6 @@ mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
INIT_LIST_HEAD(&erif_list->erif_sublists);
}
-#define MLXSW_SP_KVDL_RIGR2_SIZE 1
-
static struct mlxsw_sp_mr_erif_sublist *
mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list)
@@ -96,8 +55,8 @@ mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
if (!erif_sublist)
return ERR_PTR(-ENOMEM);
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
- &erif_sublist->rigr2_kvdl_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ 1, &erif_sublist->rigr2_kvdl_index);
if (err) {
kfree(erif_sublist);
return ERR_PTR(err);
@@ -112,7 +71,8 @@ mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_erif_sublist *erif_sublist)
{
list_del(&erif_sublist->list);
- mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ 1, erif_sublist->rigr2_kvdl_index);
kfree(erif_sublist);
}
@@ -221,12 +181,11 @@ struct mlxsw_sp_mr_tcam_route {
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
u32 counter_index;
- struct parman_item parman_item;
- struct parman_prio *parman_prio;
enum mlxsw_sp_mr_route_action action;
struct mlxsw_sp_mr_route_key key;
u16 irif_index;
u16 min_mtu;
+ void *priv;
};
static struct mlxsw_afa_block *
@@ -297,60 +256,6 @@ mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
mlxsw_afa_block_destroy(afa_block);
}
-static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
- struct parman_item *parman_item,
- struct mlxsw_sp_mr_route_key *key,
- struct mlxsw_afa_block *afa_block)
-{
- char rmft2_pl[MLXSW_REG_RMFT2_LEN];
-
- switch (key->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
- key->vrid,
- MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
- ntohl(key->group.addr4),
- ntohl(key->group_mask.addr4),
- ntohl(key->source.addr4),
- ntohl(key->source_mask.addr4),
- mlxsw_afa_block_first_set(afa_block));
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
- key->vrid,
- MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
- key->group.addr6,
- key->group_mask.addr6,
- key->source.addr6,
- key->source_mask.addr6,
- mlxsw_afa_block_first_set(afa_block));
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
-}
-
-static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
- struct mlxsw_sp_mr_route_key *key,
- struct parman_item *parman_item)
-{
- struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
- char rmft2_pl[MLXSW_REG_RMFT2_LEN];
-
- switch (key->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
- vrid, 0, 0, 0, 0, 0, 0, NULL);
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
- vrid, 0, 0, zero_addr, zero_addr,
- zero_addr, zero_addr, NULL);
- break;
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
-}
-
static int
mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list,
@@ -370,51 +275,12 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static struct mlxsw_sp_mr_tcam_region *
-mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam,
- enum mlxsw_sp_l3proto proto)
-{
- return &mr_tcam->tcam_regions[proto];
-}
-
-static int
-mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
- struct mlxsw_sp_mr_tcam_route *route,
- enum mlxsw_sp_mr_route_prio prio)
-{
- struct mlxsw_sp_mr_tcam_region *tcam_region;
- int err;
-
- tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
- route->key.proto);
- err = parman_item_add(tcam_region->parman,
- &tcam_region->parman_prios[prio],
- &route->parman_item);
- if (err)
- return err;
-
- route->parman_prio = &tcam_region->parman_prios[prio];
- return 0;
-}
-
-static void
-mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
- struct mlxsw_sp_mr_tcam_route *route)
-{
- struct mlxsw_sp_mr_tcam_region *tcam_region;
-
- tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
- route->key.proto);
-
- parman_item_remove(tcam_region->parman,
- route->parman_prio, &route->parman_item);
-}
-
static int
mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_params *route_params)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
int err;
@@ -448,22 +314,23 @@ mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
goto err_afa_block_create;
}
- /* Allocate place in the TCAM */
- err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
- route_params->prio);
- if (err)
- goto err_parman_item_add;
+ route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL);
+ if (!route->priv) {
+ err = -ENOMEM;
+ goto err_route_priv_alloc;
+ }
/* Write the route to the TCAM */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, route->afa_block);
+ err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv,
+ &route->key, route->afa_block,
+ route_params->prio);
if (err)
- goto err_route_replace;
+ goto err_route_create;
return 0;
-err_route_replace:
- mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
-err_parman_item_add:
+err_route_create:
+ kfree(route->priv);
+err_route_priv_alloc:
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
err_afa_block_create:
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
@@ -476,12 +343,12 @@ err_counter_alloc:
static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
void *priv, void *route_priv)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
- mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
- &route->key, &route->parman_item);
- mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
+ ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key);
+ kfree(route->priv);
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
@@ -502,6 +369,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv,
enum mlxsw_sp_mr_route_action route_action)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_afa_block *afa_block;
int err;
@@ -516,8 +384,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(afa_block);
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err;
@@ -534,6 +401,7 @@ err:
static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 min_mtu)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_afa_block *afa_block;
int err;
@@ -549,8 +417,7 @@ static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(afa_block);
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err;
@@ -596,6 +463,7 @@ static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 erif_index)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_erif_sublist *erif_sublist;
struct mlxsw_sp_mr_tcam_erif_list erif_list;
@@ -630,8 +498,7 @@ static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
}
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err_route_write;
@@ -653,6 +520,7 @@ static int
mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
struct mlxsw_sp_mr_route_info *route_info)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
@@ -677,8 +545,7 @@ mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
}
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err_route_write;
@@ -699,167 +566,36 @@ err_erif_populate:
return err;
}
-#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
-#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
-
-static int
-mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rtar_pl[MLXSW_REG_RTAR_LEN];
-
- mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
- mr_tcam_region->rtar_key_type,
- MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static void
-mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rtar_pl[MLXSW_REG_RTAR_LEN];
-
- mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
- mr_tcam_region->rtar_key_type, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
- unsigned long new_count)
-{
- struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rtar_pl[MLXSW_REG_RTAR_LEN];
- u64 max_tcam_rules;
-
- max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
- if (new_count > max_tcam_rules)
- return -EINVAL;
- mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
- mr_tcam_region->rtar_key_type, new_count);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
- unsigned long from_index,
- unsigned long to_index,
- unsigned long count)
-{
- struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rrcr_pl[MLXSW_REG_RRCR_LEN];
-
- mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
- from_index, count,
- mr_tcam_region->rtar_key_type, to_index);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
-}
-
-static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
- .base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
- .resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
- .resize = mlxsw_sp_mr_tcam_region_parman_resize,
- .move = mlxsw_sp_mr_tcam_region_parman_move,
- .algo = PARMAN_ALGO_TYPE_LSORT,
-};
-
-static int
-mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
- enum mlxsw_reg_rtar_key_type rtar_key_type)
-{
- struct parman_prio *parman_prios;
- struct parman *parman;
- int err;
- int i;
-
- mr_tcam_region->rtar_key_type = rtar_key_type;
- mr_tcam_region->mlxsw_sp = mlxsw_sp;
-
- err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
- if (err)
- return err;
-
- parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
- mr_tcam_region);
- if (!parman) {
- err = -ENOMEM;
- goto err_parman_create;
- }
- mr_tcam_region->parman = parman;
-
- parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
- sizeof(*parman_prios), GFP_KERNEL);
- if (!parman_prios) {
- err = -ENOMEM;
- goto err_parman_prios_alloc;
- }
- mr_tcam_region->parman_prios = parman_prios;
-
- for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
- parman_prio_init(mr_tcam_region->parman,
- &mr_tcam_region->parman_prios[i], i);
- return 0;
-
-err_parman_prios_alloc:
- parman_destroy(parman);
-err_parman_create:
- mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
- return err;
-}
-
-static void
-mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
- int i;
-
- for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
- parman_prio_fini(&mr_tcam_region->parman_prios[i]);
- kfree(mr_tcam_region->parman_prios);
- parman_destroy(mr_tcam_region->parman);
- mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
-}
-
static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
- struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
- u32 rtar_key;
int err;
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
- !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES))
return -EIO;
- rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
- err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
- &region[MLXSW_SP_L3_PROTO_IPV4],
- rtar_key);
- if (err)
- return err;
+ mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL);
+ if (!mr_tcam->priv)
+ return -ENOMEM;
- rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
- err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
- &region[MLXSW_SP_L3_PROTO_IPV6],
- rtar_key);
+ err = ops->init(mlxsw_sp, mr_tcam->priv);
if (err)
- goto err_ipv6_region_init;
-
+ goto err_init;
return 0;
-err_ipv6_region_init:
- mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+err_init:
+ kfree(mr_tcam->priv);
return err;
}
-static void mlxsw_sp_mr_tcam_fini(void *priv)
+static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
- struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
- mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
- mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+ ops->fini(mr_tcam->priv);
+ kfree(mr_tcam->priv);
}
const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
index f9b59ee25406..3c84151b4c33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_MCROUTER_TCAM_H
#define _MLXSW_SPECTRUM_MCROUTER_TCAM_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index cad603c35271..bdf53cf350f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Nogah Frankel <nogahf@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 6aaaf3d9ba31..3a96307f51b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1,39 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
- * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
- * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -48,6 +14,7 @@
#include <linux/route.h>
#include <linux/gcd.h>
#include <linux/random.h>
+#include <linux/if_macvlan.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -60,6 +27,7 @@
#include <net/ndisc.h>
#include <net/ipv6.h>
#include <net/fib_notifier.h>
+#include <net/switchdev.h>
#include "spectrum.h"
#include "core.h"
@@ -163,7 +131,9 @@ struct mlxsw_sp_rif_ops {
const struct mlxsw_sp_rif_params *params);
int (*configure)(struct mlxsw_sp_rif *rif);
void (*deconfigure)(struct mlxsw_sp_rif *rif);
- struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
+ struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack);
+ void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
@@ -342,10 +312,6 @@ static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}
-static struct mlxsw_sp_rif *
-mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
-
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
struct mlxsw_sp_prefix_usage {
@@ -1109,7 +1075,8 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
u32 tunnel_index;
int err;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ 1, &tunnel_index);
if (err)
return err;
@@ -1125,7 +1092,8 @@ static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
/* Unlink this node from the IPIP entry that it's the decap entry of. */
fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
fib_entry->decap.ipip_entry = NULL;
- mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ 1, fib_entry->decap.tunnel_index);
}
static struct mlxsw_sp_fib_node *
@@ -2434,17 +2402,48 @@ static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
kfree(net_work);
}
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
+
+static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
+{
+ struct mlxsw_sp_netevent_work *net_work =
+ container_of(work, struct mlxsw_sp_netevent_work, work);
+ struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
+
+ __mlxsw_sp_router_init(mlxsw_sp);
+ kfree(net_work);
+}
+
+static int mlxsw_sp_router_schedule_work(struct net *net,
+ struct notifier_block *nb,
+ void (*cb)(struct work_struct *))
+{
+ struct mlxsw_sp_netevent_work *net_work;
+ struct mlxsw_sp_router *router;
+
+ if (!net_eq(net, &init_net))
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (!net_work)
+ return NOTIFY_BAD;
+
+ router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
+ INIT_WORK(&net_work->work, cb);
+ net_work->mlxsw_sp = router->mlxsw_sp;
+ mlxsw_core_schedule_work(&net_work->work);
+ return NOTIFY_DONE;
+}
+
static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct mlxsw_sp_netevent_work *net_work;
struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_sp_router *router;
struct mlxsw_sp *mlxsw_sp;
unsigned long interval;
struct neigh_parms *p;
struct neighbour *n;
- struct net *net;
switch (event) {
case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@@ -2498,20 +2497,12 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
break;
case NETEVENT_IPV4_MPATH_HASH_UPDATE:
case NETEVENT_IPV6_MPATH_HASH_UPDATE:
- net = ptr;
-
- if (!net_eq(net, &init_net))
- return NOTIFY_DONE;
-
- net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
- if (!net_work)
- return NOTIFY_BAD;
+ return mlxsw_sp_router_schedule_work(ptr, nb,
+ mlxsw_sp_router_mp_hash_event_work);
- router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
- INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
- net_work->mlxsw_sp = router->mlxsw_sp;
- mlxsw_core_schedule_work(&net_work->work);
- break;
+ case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
+ return mlxsw_sp_router_schedule_work(ptr, nb,
+ mlxsw_sp_router_update_priority_work);
}
return NOTIFY_DONE;
@@ -3165,8 +3156,9 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
* by the device and make sure the request can be satisfied.
*/
mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
- err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
- &alloc_size);
+ err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ *p_adj_grp_size, &alloc_size);
if (err)
return err;
/* It is possible the allocation results in more allocated
@@ -3278,7 +3270,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
/* No valid allocation size available. */
goto set_trap;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ ecmp_size, &adj_index);
if (err) {
/* We ran out of KVD linear space, just set the
* trap and let everything flow through kernel.
@@ -3313,7 +3306,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
old_adj_index, old_ecmp_size);
- mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ old_ecmp_size, old_adj_index);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
goto set_trap;
@@ -3335,7 +3329,8 @@ set_trap:
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
if (old_adj_index_valid)
- mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ nh_grp->ecmp_size, nh_grp->adj_index);
}
static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
@@ -4756,6 +4751,12 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
kfree(mlxsw_sp_rt6);
}
+static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
+{
+ /* RTF_CACHE routes are ignored */
+ return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
+}
+
static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
{
@@ -4765,11 +4766,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct fib6_info *nrt, bool append)
+ const struct fib6_info *nrt, bool replace)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
- if (!append)
+ if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
return NULL;
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
@@ -4784,7 +4785,8 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
break;
if (rt->fib6_metric < nrt->fib6_metric)
continue;
- if (rt->fib6_metric == nrt->fib6_metric)
+ if (rt->fib6_metric == nrt->fib6_metric &&
+ mlxsw_sp_fib6_rt_can_mp(rt))
return fib6_entry;
if (rt->fib6_metric > nrt->fib6_metric)
break;
@@ -5163,7 +5165,7 @@ static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
const struct fib6_info *nrt, bool replace)
{
- struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
@@ -5172,13 +5174,18 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
continue;
if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
break;
- if (replace && rt->fib6_metric == nrt->fib6_metric)
- return fib6_entry;
+ if (replace && rt->fib6_metric == nrt->fib6_metric) {
+ if (mlxsw_sp_fib6_rt_can_mp(rt) ==
+ mlxsw_sp_fib6_rt_can_mp(nrt))
+ return fib6_entry;
+ if (mlxsw_sp_fib6_rt_can_mp(nrt))
+ fallback = fallback ?: fib6_entry;
+ }
if (rt->fib6_metric > nrt->fib6_metric)
- return fib6_entry;
+ return fallback ?: fib6_entry;
}
- return NULL;
+ return fallback;
}
static int
@@ -5304,8 +5311,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info *rt, bool replace,
- bool append)
+ struct fib6_info *rt, bool replace)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
@@ -5331,7 +5337,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
/* Before creating a new entry, try to append route to an existing
* multipath entry.
*/
- fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append);
+ fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
if (fib6_entry) {
err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
if (err)
@@ -5339,14 +5345,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
return 0;
}
- /* We received an append event, yet did not find any route to
- * append to.
- */
- if (WARN_ON(append)) {
- err = -EINVAL;
- goto err_fib6_entry_append;
- }
-
fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
if (IS_ERR(fib6_entry)) {
err = PTR_ERR(fib6_entry);
@@ -5364,7 +5362,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
err_fib6_node_entry_link:
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
err_fib6_entry_create:
-err_fib6_entry_append:
err_fib6_entry_nexthop_add:
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
return err;
@@ -5715,7 +5712,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
- bool replace, append;
+ bool replace;
int err;
rtnl_lock();
@@ -5726,10 +5723,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_ADD:
replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
err = mlxsw_sp_router_fib6_add(mlxsw_sp,
- fib_work->fen6_info.rt, replace,
- append);
+ fib_work->fen6_info.rt, replace);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
@@ -5967,7 +5962,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct mlxsw_sp_rif *
+struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
@@ -6024,6 +6019,12 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
!list_empty(&inet6_dev->addr_list))
addr_list_empty = false;
+ /* macvlans do not have a RIF, but rather piggy back on the
+ * RIF of their lower device.
+ */
+ if (netif_is_macvlan(dev) && addr_list_empty)
+ return true;
+
if (rif && addr_list_empty &&
!netif_is_l3_slave(rif->dev))
return true;
@@ -6125,6 +6126,11 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
+{
+ return rif->fid;
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -6162,7 +6168,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
rif->ops = ops;
if (ops->fid_get) {
- fid = ops->fid_get(rif);
+ fid = ops->fid_get(rif, extack);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
goto err_fid_get;
@@ -6267,7 +6273,7 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
}
/* FID was already created, just take a reference */
- fid = rif->ops->fid_get(rif);
+ fid = rif->ops->fid_get(rif, extack);
err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
if (err)
goto err_fid_port_vid_map;
@@ -6432,6 +6438,123 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
return 0;
}
+static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
+{
+ u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+ return ether_addr_equal_masked(mac, vrrp4, mask);
+}
+
+static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
+{
+ u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+ return ether_addr_equal_masked(mac, vrrp6, mask);
+}
+
+static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+ const u8 *mac, bool adding)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u8 vrrp_id = adding ? mac[5] : 0;
+ int err;
+
+ if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
+ !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
+ return 0;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
+ mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
+ else
+ mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
+ struct mlxsw_sp_rif *rif;
+ int err;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
+ if (!rif) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
+ macvlan_dev->dev_addr, true);
+ if (err)
+ goto err_rif_vrrp_add;
+
+ /* Make sure the bridge driver does not have this MAC pointing at
+ * some other port.
+ */
+ if (rif->ops->fdb_del)
+ rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
+
+ return 0;
+
+err_rif_vrrp_add:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+ return err;
+}
+
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
+ /* If we do not have a RIF, then we already took care of
+ * removing the macvlan's MAC during RIF deletion.
+ */
+ if (!rif)
+ return;
+ mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
+ false);
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+}
+
+static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp;
+
+ mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
+ case NETDEV_DOWN:
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+ break;
+ }
+
+ return 0;
+}
+
static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
unsigned long event,
struct netlink_ext_ack *extack)
@@ -6444,6 +6567,8 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
else if (is_vlan_dev(dev))
return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
+ else if (netif_is_macvlan(dev))
+ return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack);
else
return 0;
}
@@ -6684,7 +6809,10 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
int err = 0;
- if (!mlxsw_sp)
+ /* We do not create a RIF for a macvlan, but only use it to
+ * direct more MAC addresses to the router.
+ */
+ if (!mlxsw_sp || netif_is_macvlan(l3_dev))
return 0;
switch (event) {
@@ -6705,6 +6833,27 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
return err;
}
+static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
+{
+ struct mlxsw_sp_rif *rif = data;
+
+ if (!netif_is_macvlan(dev))
+ return 0;
+
+ return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+}
+
+static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
+{
+ if (!netif_is_macvlan_port(rif->dev))
+ return 0;
+
+ netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
+ return netdev_walk_all_upper_dev_rcu(rif->dev,
+ __mlxsw_sp_rif_macvlan_flush, rif);
+}
+
static struct mlxsw_sp_rif_subport *
mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
{
@@ -6771,11 +6920,13 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_rif_subport_op(rif, false);
}
static struct mlxsw_sp_fid *
-mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
}
@@ -6857,6 +7008,7 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
@@ -6865,19 +7017,49 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
}
static struct mlxsw_sp_fid *
-mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
- u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
+ u16 vid;
+ int err;
+
+ if (is_vlan_dev(rif->dev)) {
+ vid = vlan_dev_vlan_id(rif->dev);
+ } else {
+ err = br_vlan_get_pvid(rif->dev, &vid);
+ if (err < 0 || !vid) {
+ NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
+ return ERR_PTR(-EINVAL);
+ }
+ }
return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
}
+static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct switchdev_notifier_fdb_info info;
+ struct net_device *br_dev;
+ struct net_device *dev;
+
+ br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
+ dev = br_fdb_find_port(br_dev, mac, vid);
+ if (!dev)
+ return;
+
+ info.addr = mac;
+ info.vid = vid;
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+}
+
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
.configure = mlxsw_sp_rif_vlan_configure,
.deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
@@ -6929,6 +7111,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
@@ -6937,17 +7120,33 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
}
static struct mlxsw_sp_fid *
-mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
}
+static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+ struct switchdev_notifier_fdb_info info;
+ struct net_device *dev;
+
+ dev = br_fdb_find_port(rif->dev, mac, 0);
+ if (!dev)
+ return;
+
+ info.addr = mac;
+ info.vid = 0;
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+}
+
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
.type = MLXSW_SP_RIF_TYPE_FID,
.rif_size = sizeof(struct mlxsw_sp_rif),
.configure = mlxsw_sp_rif_fid_configure,
.deconfigure = mlxsw_sp_rif_fid_deconfigure,
.fid_get = mlxsw_sp_rif_fid_fid_get,
+ .fdb_del = mlxsw_sp_rif_fid_fdb_del,
};
static struct mlxsw_sp_rif_ipip_lb *
@@ -7172,6 +7371,7 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
+ bool usp = init_net.ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
int err;
@@ -7182,7 +7382,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
- mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
+ mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index a01edcf56797..1a60391daafa 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_ROUTER_H_
#define _MLXSW_ROUTER_H_
@@ -66,6 +35,8 @@ struct mlxsw_sp_neigh_entry;
struct mlxsw_sp_nexthop;
struct mlxsw_sp_ipip_entry;
+struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
@@ -75,6 +46,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 3d187d88cc7c..d965fd275c90 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -1,41 +1,11 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#include <linux/if_bridge.h>
#include <linux/list.h>
#include <net/arp.h>
#include <net/gre.h>
+#include <net/lag.h>
#include <net/ndisc.h>
#include <net/ip6_tunnel.h>
@@ -254,7 +224,9 @@ mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
struct list_head *iter;
netdev_for_each_lower_dev(lag_dev, dev, iter)
- if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev))
+ if (netif_carrier_ok(dev) &&
+ net_lag_port_dev_txable(dev) &&
+ mlxsw_sp_port_dev_check(dev))
return dev;
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 14a6de904db1..5e04252f2a11 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -1,35 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.h
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_SPAN_H
#define _MLXSW_SPECTRUM_SPAN_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index eea5666a86b2..0d8444aaba01 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -1135,6 +1102,39 @@ err_port_vlan_set:
return err;
}
+static int
+mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_fid *fid;
+ u16 pvid;
+ u16 vid;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
+ if (!rif)
+ return 0;
+ fid = mlxsw_sp_rif_fid(rif);
+ pvid = mlxsw_sp_fid_8021q_vid(fid);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ if (vid != pvid) {
+ netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ } else {
+ if (vid == pvid) {
+ netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans)
@@ -1146,8 +1146,18 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid;
- if (netif_is_bridge_master(orig_dev))
- return -EOPNOTSUPP;
+ if (netif_is_bridge_master(orig_dev)) {
+ int err = 0;
+
+ if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
+ br_vlan_enabled(orig_dev) &&
+ switchdev_trans_ph_prepare(trans))
+ err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
+ orig_dev, vlan);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
if (switchdev_trans_ph_prepare(trans))
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
index bc44d5effc28..c218e10bd835 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
@@ -1,35 +1,5 @@
-/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
index c698ec4fd9d4..bcf2e79a21c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/switchib.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 3922c1cfe5f5..2d4f213e154d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015-2016 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 399e9d6993f7..53020724c2f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -1,38 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/trap.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXSW_TRAP_H
#define _MLXSW_TRAP_H
@@ -63,6 +31,7 @@ enum {
MLXSW_TRAP_ID_LBERROR = 0x54,
MLXSW_TRAP_ID_IPV4_OSPF = 0x55,
MLXSW_TRAP_ID_IPV4_PIM = 0x58,
+ MLXSW_TRAP_ID_IPV4_VRRP = 0x59,
MLXSW_TRAP_ID_RPF = 0x5C,
MLXSW_TRAP_ID_IP2ME = 0x5F,
MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60,
@@ -78,6 +47,7 @@ enum {
MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
MLXSW_TRAP_ID_IPV6_PIM = 0x79,
+ MLXSW_TRAP_ID_IPV6_VRRP = 0x7A,
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
MLXSW_TRAP_ID_IPV6_BGP = 0x89,
MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
index fdf94720ca62..da51dd9d5e44 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/txheader.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_TXHEADER_H
#define _MLXSW_TXHEADER_H
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index f3e9dd47b56f..0e9719fbc624 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -30,6 +30,7 @@
#include <linux/ethtool.h>
#include <linux/cache.h>
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/mii.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -1078,7 +1079,7 @@ static void ks_stop_rx(struct ks_net *ks)
} /* ks_stop_rx */
-static unsigned long const ethernet_polynomial = 0x04c11db7U;
+static unsigned long const ethernet_polynomial = CRC32_POLY_BE;
static unsigned long ether_gen_crc(int length, u8 *data)
{
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index b72d1bd11296..ebbdfb908745 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3373,7 +3373,6 @@ static void port_get_link_speed(struct ksz_port *port)
*/
static void port_set_link_speed(struct ksz_port *port)
{
- struct ksz_port_info *info;
struct ksz_hw *hw = port->hw;
u16 data;
u16 cfg;
@@ -3382,8 +3381,6 @@ static void port_set_link_speed(struct ksz_port *port)
int p;
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
- info = &hw->port_info[p];
-
port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 71dca8bd51ac..16bd3f44dbe8 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -46,6 +46,7 @@ config LAN743X
tristate "LAN743x support"
depends on PCI
select PHYLIB
+ select CRC16
---help---
Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index 2e982cc249fb..538926d2b43f 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -6,4 +6,4 @@ obj-$(CONFIG_ENC28J60) += enc28j60.o
obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o
obj-$(CONFIG_LAN743X) += lan743x.o
-lan743x-objs := lan743x_main.o
+lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
new file mode 100644
index 000000000000..07c1eb63415a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -0,0 +1,723 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#include <linux/netdevice.h>
+#include "lan743x_main.h"
+#include "lan743x_ethtool.h"
+#include <linux/net_tstamp.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+
+/* eeprom */
+#define LAN743X_EEPROM_MAGIC (0x74A5)
+#define LAN743X_OTP_MAGIC (0x74F3)
+#define EEPROM_INDICATOR_1 (0xA5)
+#define EEPROM_INDICATOR_2 (0xAA)
+#define EEPROM_MAC_OFFSET (0x01)
+#define MAX_EEPROM_SIZE 512
+#define OTP_INDICATOR_1 (0xF3)
+#define OTP_INDICATOR_2 (0xF7)
+
+static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ unsigned long timeout;
+ u32 buf;
+ int i;
+
+ buf = lan743x_csr_read(adapter, OTP_PWR_DN);
+
+ if (buf & OTP_PWR_DN_PWRDN_N_) {
+ /* clear it and wait to be cleared */
+ lan743x_csr_write(adapter, OTP_PWR_DN, 0);
+
+ timeout = jiffies + HZ;
+ do {
+ udelay(1);
+ buf = lan743x_csr_read(adapter, OTP_PWR_DN);
+ if (time_after(jiffies, timeout)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "timeout on OTP_PWR_DN completion\n");
+ return -EIO;
+ }
+ } while (buf & OTP_PWR_DN_PWRDN_N_);
+ }
+
+ /* set to BYTE program mode */
+ lan743x_csr_write(adapter, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+
+ for (i = 0; i < length; i++) {
+ lan743x_csr_write(adapter, OTP_ADDR1,
+ ((offset + i) >> 8) &
+ OTP_ADDR1_15_11_MASK_);
+ lan743x_csr_write(adapter, OTP_ADDR2,
+ ((offset + i) &
+ OTP_ADDR2_10_3_MASK_));
+ lan743x_csr_write(adapter, OTP_PRGM_DATA, data[i]);
+ lan743x_csr_write(adapter, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
+ lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+ timeout = jiffies + HZ;
+ do {
+ udelay(1);
+ buf = lan743x_csr_read(adapter, OTP_STATUS);
+ if (time_after(jiffies, timeout)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Timeout on OTP_STATUS completion\n");
+ return -EIO;
+ }
+ } while (buf & OTP_STATUS_BUSY_);
+ }
+
+ return 0;
+}
+
+static int lan743x_eeprom_wait(struct lan743x_adapter *adapter)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+
+ do {
+ val = lan743x_csr_read(adapter, E2P_CMD);
+
+ if (!(val & E2P_CMD_EPC_BUSY_) ||
+ (val & E2P_CMD_EPC_TIMEOUT_))
+ break;
+ usleep_range(40, 100);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "EEPROM read operation timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int lan743x_eeprom_confirm_not_busy(struct lan743x_adapter *adapter)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+
+ do {
+ val = lan743x_csr_read(adapter, E2P_CMD);
+
+ if (!(val & E2P_CMD_EPC_BUSY_))
+ return 0;
+
+ usleep_range(40, 100);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ netif_warn(adapter, drv, adapter->netdev, "EEPROM is busy\n");
+ return -EIO;
+}
+
+static int lan743x_eeprom_read(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_eeprom_confirm_not_busy(adapter);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
+ val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, E2P_CMD, val);
+
+ retval = lan743x_eeprom_wait(adapter);
+ if (retval < 0)
+ return retval;
+
+ val = lan743x_csr_read(adapter, E2P_DATA);
+ data[i] = val & 0xFF;
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan743x_eeprom_write(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_eeprom_confirm_not_busy(adapter);
+ if (retval)
+ return retval;
+
+ /* Issue write/erase enable command */
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
+ lan743x_csr_write(adapter, E2P_CMD, val);
+
+ retval = lan743x_eeprom_wait(adapter);
+ if (retval < 0)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ /* Fill data register */
+ val = data[i];
+ lan743x_csr_write(adapter, E2P_DATA, val);
+
+ /* Send "write" command */
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
+ val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, E2P_CMD, val);
+
+ retval = lan743x_eeprom_wait(adapter);
+ if (retval < 0)
+ return retval;
+
+ offset++;
+ }
+
+ return 0;
+}
+
+static void lan743x_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->bus_info,
+ pci_name(adapter->pdev), sizeof(info->bus_info));
+}
+
+static u32 lan743x_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void lan743x_ethtool_set_msglevel(struct net_device *netdev,
+ u32 msglevel)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = msglevel;
+}
+
+static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
+{
+ return MAX_EEPROM_SIZE;
+}
+
+static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return lan743x_eeprom_read(adapter, ee->offset, ee->len, data);
+}
+
+static int lan743x_ethtool_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret = -EINVAL;
+
+ if (ee->magic == LAN743X_EEPROM_MAGIC)
+ ret = lan743x_eeprom_write(adapter, ee->offset, ee->len,
+ data);
+ /* Beware! OTP is One Time Programming ONLY!
+ * So do some strict condition check before messing up
+ */
+ else if ((ee->magic == LAN743X_OTP_MAGIC) &&
+ (ee->offset == 0) &&
+ (ee->len == MAX_EEPROM_SIZE) &&
+ (data[0] == OTP_INDICATOR_1))
+ ret = lan743x_otp_write(adapter, ee->offset, ee->len, data);
+
+ return ret;
+}
+
+static const char lan743x_set0_hw_cnt_strings[][ETH_GSTRING_LEN] = {
+ "RX FCS Errors",
+ "RX Alignment Errors",
+ "Rx Fragment Errors",
+ "RX Jabber Errors",
+ "RX Undersize Frame Errors",
+ "RX Oversize Frame Errors",
+ "RX Dropped Frames",
+ "RX Unicast Byte Count",
+ "RX Broadcast Byte Count",
+ "RX Multicast Byte Count",
+ "RX Unicast Frames",
+ "RX Broadcast Frames",
+ "RX Multicast Frames",
+ "RX Pause Frames",
+ "RX 64 Byte Frames",
+ "RX 65 - 127 Byte Frames",
+ "RX 128 - 255 Byte Frames",
+ "RX 256 - 511 Bytes Frames",
+ "RX 512 - 1023 Byte Frames",
+ "RX 1024 - 1518 Byte Frames",
+ "RX Greater 1518 Byte Frames",
+};
+
+static const char lan743x_set1_sw_cnt_strings[][ETH_GSTRING_LEN] = {
+ "RX Queue 0 Frames",
+ "RX Queue 1 Frames",
+ "RX Queue 2 Frames",
+ "RX Queue 3 Frames",
+};
+
+static const char lan743x_set2_hw_cnt_strings[][ETH_GSTRING_LEN] = {
+ "RX Total Frames",
+ "EEE RX LPI Transitions",
+ "EEE RX LPI Time",
+ "RX Counter Rollover Status",
+ "TX FCS Errors",
+ "TX Excess Deferral Errors",
+ "TX Carrier Errors",
+ "TX Bad Byte Count",
+ "TX Single Collisions",
+ "TX Multiple Collisions",
+ "TX Excessive Collision",
+ "TX Late Collisions",
+ "TX Unicast Byte Count",
+ "TX Broadcast Byte Count",
+ "TX Multicast Byte Count",
+ "TX Unicast Frames",
+ "TX Broadcast Frames",
+ "TX Multicast Frames",
+ "TX Pause Frames",
+ "TX 64 Byte Frames",
+ "TX 65 - 127 Byte Frames",
+ "TX 128 - 255 Byte Frames",
+ "TX 256 - 511 Bytes Frames",
+ "TX 512 - 1023 Byte Frames",
+ "TX 1024 - 1518 Byte Frames",
+ "TX Greater 1518 Byte Frames",
+ "TX Total Frames",
+ "EEE TX LPI Transitions",
+ "EEE TX LPI Time",
+ "TX Counter Rollover Status",
+};
+
+static const u32 lan743x_set0_hw_cnt_addr[] = {
+ STAT_RX_FCS_ERRORS,
+ STAT_RX_ALIGNMENT_ERRORS,
+ STAT_RX_FRAGMENT_ERRORS,
+ STAT_RX_JABBER_ERRORS,
+ STAT_RX_UNDERSIZE_FRAME_ERRORS,
+ STAT_RX_OVERSIZE_FRAME_ERRORS,
+ STAT_RX_DROPPED_FRAMES,
+ STAT_RX_UNICAST_BYTE_COUNT,
+ STAT_RX_BROADCAST_BYTE_COUNT,
+ STAT_RX_MULTICAST_BYTE_COUNT,
+ STAT_RX_UNICAST_FRAMES,
+ STAT_RX_BROADCAST_FRAMES,
+ STAT_RX_MULTICAST_FRAMES,
+ STAT_RX_PAUSE_FRAMES,
+ STAT_RX_64_BYTE_FRAMES,
+ STAT_RX_65_127_BYTE_FRAMES,
+ STAT_RX_128_255_BYTE_FRAMES,
+ STAT_RX_256_511_BYTES_FRAMES,
+ STAT_RX_512_1023_BYTE_FRAMES,
+ STAT_RX_1024_1518_BYTE_FRAMES,
+ STAT_RX_GREATER_1518_BYTE_FRAMES,
+};
+
+static const u32 lan743x_set2_hw_cnt_addr[] = {
+ STAT_RX_TOTAL_FRAMES,
+ STAT_EEE_RX_LPI_TRANSITIONS,
+ STAT_EEE_RX_LPI_TIME,
+ STAT_RX_COUNTER_ROLLOVER_STATUS,
+ STAT_TX_FCS_ERRORS,
+ STAT_TX_EXCESS_DEFERRAL_ERRORS,
+ STAT_TX_CARRIER_ERRORS,
+ STAT_TX_BAD_BYTE_COUNT,
+ STAT_TX_SINGLE_COLLISIONS,
+ STAT_TX_MULTIPLE_COLLISIONS,
+ STAT_TX_EXCESSIVE_COLLISION,
+ STAT_TX_LATE_COLLISIONS,
+ STAT_TX_UNICAST_BYTE_COUNT,
+ STAT_TX_BROADCAST_BYTE_COUNT,
+ STAT_TX_MULTICAST_BYTE_COUNT,
+ STAT_TX_UNICAST_FRAMES,
+ STAT_TX_BROADCAST_FRAMES,
+ STAT_TX_MULTICAST_FRAMES,
+ STAT_TX_PAUSE_FRAMES,
+ STAT_TX_64_BYTE_FRAMES,
+ STAT_TX_65_127_BYTE_FRAMES,
+ STAT_TX_128_255_BYTE_FRAMES,
+ STAT_TX_256_511_BYTES_FRAMES,
+ STAT_TX_512_1023_BYTE_FRAMES,
+ STAT_TX_1024_1518_BYTE_FRAMES,
+ STAT_TX_GREATER_1518_BYTE_FRAMES,
+ STAT_TX_TOTAL_FRAMES,
+ STAT_EEE_TX_LPI_TRANSITIONS,
+ STAT_EEE_TX_LPI_TIME,
+ STAT_TX_COUNTER_ROLLOVER_STATUS
+};
+
+static void lan743x_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, lan743x_set0_hw_cnt_strings,
+ sizeof(lan743x_set0_hw_cnt_strings));
+ memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings)],
+ lan743x_set1_sw_cnt_strings,
+ sizeof(lan743x_set1_sw_cnt_strings));
+ memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) +
+ sizeof(lan743x_set1_sw_cnt_strings)],
+ lan743x_set2_hw_cnt_strings,
+ sizeof(lan743x_set2_hw_cnt_strings));
+ break;
+ }
+}
+
+static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int data_index = 0;
+ u32 buf;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lan743x_set0_hw_cnt_addr); i++) {
+ buf = lan743x_csr_read(adapter, lan743x_set0_hw_cnt_addr[i]);
+ data[data_index++] = (u64)buf;
+ }
+ for (i = 0; i < ARRAY_SIZE(adapter->rx); i++)
+ data[data_index++] = (u64)(adapter->rx[i].frame_count);
+ for (i = 0; i < ARRAY_SIZE(lan743x_set2_hw_cnt_addr); i++) {
+ buf = lan743x_csr_read(adapter, lan743x_set2_hw_cnt_addr[i]);
+ data[data_index++] = (u64)buf;
+ }
+}
+
+static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ {
+ int ret;
+
+ ret = ARRAY_SIZE(lan743x_set0_hw_cnt_strings);
+ ret += ARRAY_SIZE(lan743x_set1_sw_cnt_strings);
+ ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings);
+ return ret;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan743x_ethtool_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *rxnfc,
+ u32 *rule_locs)
+{
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXFH:
+ rxnfc->data = 0;
+ switch (rxnfc->flow_type) {
+ case TCP_V4_FLOW:case UDP_V4_FLOW:
+ case TCP_V6_FLOW:case UDP_V6_FLOW:
+ rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case IPV4_FLOW: case IPV6_FLOW:
+ rxnfc->data |= RXH_IP_SRC | RXH_IP_DST;
+ return 0;
+ }
+ break;
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = LAN743X_USED_RX_CHANNELS;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static u32 lan743x_ethtool_get_rxfh_key_size(struct net_device *netdev)
+{
+ return 40;
+}
+
+static u32 lan743x_ethtool_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return 128;
+}
+
+static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
+ u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ if (indir) {
+ int dw_index;
+ int byte_index = 0;
+
+ for (dw_index = 0; dw_index < 32; dw_index++) {
+ u32 four_entries =
+ lan743x_csr_read(adapter, RFE_INDX(dw_index));
+
+ byte_index = dw_index << 2;
+ indir[byte_index + 0] =
+ ((four_entries >> 0) & 0x000000FF);
+ indir[byte_index + 1] =
+ ((four_entries >> 8) & 0x000000FF);
+ indir[byte_index + 2] =
+ ((four_entries >> 16) & 0x000000FF);
+ indir[byte_index + 3] =
+ ((four_entries >> 24) & 0x000000FF);
+ }
+ }
+ if (key) {
+ int dword_index;
+ int byte_index = 0;
+
+ for (dword_index = 0; dword_index < 10; dword_index++) {
+ u32 four_entries =
+ lan743x_csr_read(adapter,
+ RFE_HASH_KEY(dword_index));
+
+ byte_index = dword_index << 2;
+ key[byte_index + 0] =
+ ((four_entries >> 0) & 0x000000FF);
+ key[byte_index + 1] =
+ ((four_entries >> 8) & 0x000000FF);
+ key[byte_index + 2] =
+ ((four_entries >> 16) & 0x000000FF);
+ key[byte_index + 3] =
+ ((four_entries >> 24) & 0x000000FF);
+ }
+ }
+ if (hfunc)
+ (*hfunc) = ETH_RSS_HASH_TOP;
+ return 0;
+}
+
+static int lan743x_ethtool_set_rxfh(struct net_device *netdev,
+ const u32 *indir, const u8 *key,
+ const u8 hfunc)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ u32 indir_value = 0;
+ int dword_index = 0;
+ int byte_index = 0;
+
+ for (dword_index = 0; dword_index < 32; dword_index++) {
+ byte_index = dword_index << 2;
+ indir_value =
+ (((indir[byte_index + 0] & 0x000000FF) << 0) |
+ ((indir[byte_index + 1] & 0x000000FF) << 8) |
+ ((indir[byte_index + 2] & 0x000000FF) << 16) |
+ ((indir[byte_index + 3] & 0x000000FF) << 24));
+ lan743x_csr_write(adapter, RFE_INDX(dword_index),
+ indir_value);
+ }
+ }
+ if (key) {
+ int dword_index = 0;
+ int byte_index = 0;
+ u32 key_value = 0;
+
+ for (dword_index = 0; dword_index < 10; dword_index++) {
+ byte_index = dword_index << 2;
+ key_value =
+ ((((u32)(key[byte_index + 0])) << 0) |
+ (((u32)(key[byte_index + 1])) << 8) |
+ (((u32)(key[byte_index + 2])) << 16) |
+ (((u32)(key[byte_index + 3])) << 24));
+ lan743x_csr_write(adapter, RFE_HASH_KEY(dword_index),
+ key_value);
+ }
+ }
+ return 0;
+}
+
+static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *ts_info)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp.ptp_clock)
+ ts_info->phc_index = ptp_clock_index(adapter->ptp.ptp_clock);
+ else
+ ts_info->phc_index = -1;
+
+ ts_info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ ts_info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+static int lan743x_ethtool_get_eee(struct net_device *netdev,
+ struct ethtool_eee *eee)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u32 buf;
+ int ret;
+
+ if (!phydev)
+ return -EIO;
+ if (!phydev->drv) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Missing PHY Driver\n");
+ return -EIO;
+ }
+
+ ret = phy_ethtool_get_eee(phydev, eee);
+ if (ret < 0)
+ return ret;
+
+ buf = lan743x_csr_read(adapter, MAC_CR);
+ if (buf & MAC_CR_EEE_EN_) {
+ eee->eee_enabled = true;
+ eee->eee_active = !!(eee->advertised & eee->lp_advertised);
+ eee->tx_lpi_enabled = true;
+ /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
+ buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
+ eee->tx_lpi_timer = buf;
+ } else {
+ eee->eee_enabled = false;
+ eee->eee_active = false;
+ eee->tx_lpi_enabled = false;
+ eee->tx_lpi_timer = 0;
+ }
+
+ return 0;
+}
+
+static int lan743x_ethtool_set_eee(struct net_device *netdev,
+ struct ethtool_eee *eee)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = NULL;
+ u32 buf = 0;
+ int ret = 0;
+
+ if (!netdev)
+ return -EINVAL;
+ adapter = netdev_priv(netdev);
+ if (!adapter)
+ return -EINVAL;
+ phydev = netdev->phydev;
+ if (!phydev)
+ return -EIO;
+ if (!phydev->drv) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Missing PHY Driver\n");
+ return -EIO;
+ }
+
+ if (eee->eee_enabled) {
+ ret = phy_init_eee(phydev, 0);
+ if (ret) {
+ netif_err(adapter, drv, adapter->netdev,
+ "EEE initialization failed\n");
+ return ret;
+ }
+
+ buf = (u32)eee->tx_lpi_timer;
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf);
+
+ buf = lan743x_csr_read(adapter, MAC_CR);
+ buf |= MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, buf);
+ } else {
+ buf = lan743x_csr_read(adapter, MAC_CR);
+ buf &= ~MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, buf);
+ }
+
+ return phy_ethtool_set_eee(phydev, eee);
+}
+
+#ifdef CONFIG_PM
+static void lan743x_ethtool_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+ phy_ethtool_get_wol(netdev->phydev, wol);
+
+ wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
+ WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+
+ wol->wolopts |= adapter->wolopts;
+}
+
+static int lan743x_ethtool_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ adapter->wolopts = 0;
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wolopts |= WAKE_UCAST;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wolopts |= WAKE_MCAST;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wolopts |= WAKE_BCAST;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wolopts |= WAKE_MAGIC;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wolopts |= WAKE_PHY;
+ if (wol->wolopts & WAKE_ARP)
+ adapter->wolopts |= WAKE_ARP;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
+
+ phy_ethtool_set_wol(netdev->phydev, wol);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+const struct ethtool_ops lan743x_ethtool_ops = {
+ .get_drvinfo = lan743x_ethtool_get_drvinfo,
+ .get_msglevel = lan743x_ethtool_get_msglevel,
+ .set_msglevel = lan743x_ethtool_set_msglevel,
+ .get_link = ethtool_op_get_link,
+
+ .get_eeprom_len = lan743x_ethtool_get_eeprom_len,
+ .get_eeprom = lan743x_ethtool_get_eeprom,
+ .set_eeprom = lan743x_ethtool_set_eeprom,
+ .get_strings = lan743x_ethtool_get_strings,
+ .get_ethtool_stats = lan743x_ethtool_get_ethtool_stats,
+ .get_sset_count = lan743x_ethtool_get_sset_count,
+ .get_rxnfc = lan743x_ethtool_get_rxnfc,
+ .get_rxfh_key_size = lan743x_ethtool_get_rxfh_key_size,
+ .get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size,
+ .get_rxfh = lan743x_ethtool_get_rxfh,
+ .set_rxfh = lan743x_ethtool_set_rxfh,
+ .get_ts_info = lan743x_ethtool_get_ts_info,
+ .get_eee = lan743x_ethtool_get_eee,
+ .set_eee = lan743x_ethtool_set_eee,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+#ifdef CONFIG_PM
+ .get_wol = lan743x_ethtool_get_wol,
+ .set_wol = lan743x_ethtool_set_wol,
+#endif
+};
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.h b/drivers/net/ethernet/microchip/lan743x_ethtool.h
new file mode 100644
index 000000000000..d0d11a777a58
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#ifndef _LAN743X_ETHTOOL_H
+#define _LAN743X_ETHTOOL_H
+
+#include "linux/ethtool.h"
+
+extern const struct ethtool_ops lan743x_ethtool_ops;
+
+#endif /* _LAN743X_ETHTOOL_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index dd947e4dd3ce..e7dce79ff2c9 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -11,7 +11,9 @@
#include <linux/phy.h>
#include <linux/rtnetlink.h>
#include <linux/iopoll.h>
+#include <linux/crc16.h>
#include "lan743x_main.h"
+#include "lan743x_ethtool.h"
static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
{
@@ -53,13 +55,13 @@ return_error:
return ret;
}
-static u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
+u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
{
return ioread32(&adapter->csr.csr_address[offset]);
}
-static void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
- u32 data)
+void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
+ u32 data)
{
iowrite32(data, &adapter->csr.csr_address[offset]);
}
@@ -265,6 +267,10 @@ static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
lan743x_intr_software_isr(adapter);
int_sts &= ~INT_BIT_SW_GP_;
}
+ if (int_sts & INT_BIT_1588_) {
+ lan743x_ptp_isr(adapter);
+ int_sts &= ~INT_BIT_1588_;
+ }
}
if (int_sts)
lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
@@ -828,7 +834,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
}
if (!mac_address_valid)
- random_ether_addr(adapter->mac_address);
+ eth_random_addr(adapter->mac_address);
lan743x_mac_set_address(adapter, adapter->mac_address);
ether_addr_copy(netdev->dev_addr, adapter->mac_address);
return 0;
@@ -974,6 +980,7 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
ksettings.base.duplex,
local_advertisement,
remote_advertisement);
+ lan743x_ptp_update_latency(adapter, ksettings.base.speed);
}
}
@@ -1023,6 +1030,24 @@ return_error:
return ret;
}
+static void lan743x_rfe_open(struct lan743x_adapter *adapter)
+{
+ lan743x_csr_write(adapter, RFE_RSS_CFG,
+ RFE_RSS_CFG_UDP_IPV6_EX_ |
+ RFE_RSS_CFG_TCP_IPV6_EX_ |
+ RFE_RSS_CFG_IPV6_EX_ |
+ RFE_RSS_CFG_UDP_IPV6_ |
+ RFE_RSS_CFG_TCP_IPV6_ |
+ RFE_RSS_CFG_IPV6_ |
+ RFE_RSS_CFG_UDP_IPV4_ |
+ RFE_RSS_CFG_TCP_IPV4_ |
+ RFE_RSS_CFG_IPV4_ |
+ RFE_RSS_CFG_VALID_HASH_BITS_ |
+ RFE_RSS_CFG_RSS_QUEUE_ENABLE_ |
+ RFE_RSS_CFG_RSS_HASH_STORE_ |
+ RFE_RSS_CFG_RSS_ENABLE_);
+}
+
static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
{
u8 *mac_addr;
@@ -1206,6 +1231,7 @@ static void lan743x_tx_release_desc(struct lan743x_tx *tx,
struct lan743x_tx_buffer_info *buffer_info = NULL;
struct lan743x_tx_descriptor *descriptor = NULL;
u32 descriptor_type = 0;
+ bool ignore_sync;
descriptor = &tx->ring_cpu_ptr[descriptor_index];
buffer_info = &tx->buffer_info[descriptor_index];
@@ -1236,11 +1262,27 @@ clean_up_data_descriptor:
buffer_info->dma_ptr = 0;
buffer_info->buffer_length = 0;
}
- if (buffer_info->skb) {
+ if (!buffer_info->skb)
+ goto clear_active;
+
+ if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
+ goto clear_skb;
}
+ if (cleanup) {
+ lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
+ dev_kfree_skb(buffer_info->skb);
+ } else {
+ ignore_sync = (buffer_info->flags &
+ TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
+ lan743x_ptp_tx_timestamp_skb(tx->adapter,
+ buffer_info->skb, ignore_sync);
+ }
+
+clear_skb:
+ buffer_info->skb = NULL;
+
clear_active:
buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
@@ -1301,10 +1343,25 @@ static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
return last_head - last_tail - 1;
}
+void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
+ bool enable_timestamping,
+ bool enable_onestep_sync)
+{
+ if (enable_timestamping)
+ tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED;
+ else
+ tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED;
+ if (enable_onestep_sync)
+ tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC;
+ else
+ tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC;
+}
+
static int lan743x_tx_frame_start(struct lan743x_tx *tx,
unsigned char *first_buffer,
unsigned int first_buffer_length,
unsigned int frame_length,
+ bool time_stamp,
bool check_sum)
{
/* called only from within lan743x_tx_xmit_frame.
@@ -1342,6 +1399,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
TX_DESC_DATA0_DTYPE_DATA_ |
TX_DESC_DATA0_FS_ |
TX_DESC_DATA0_FCS_;
+ if (time_stamp)
+ tx->frame_data0 |= TX_DESC_DATA0_TSE_;
if (check_sum)
tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
@@ -1455,6 +1514,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
static void lan743x_tx_frame_end(struct lan743x_tx *tx,
struct sk_buff *skb,
+ bool time_stamp,
bool ignore_sync)
{
/* called only from within lan743x_tx_xmit_frame
@@ -1472,6 +1532,8 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
buffer_info = &tx->buffer_info[tx->frame_tail];
buffer_info->skb = skb;
+ if (time_stamp)
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
@@ -1500,6 +1562,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
unsigned int frame_length = 0;
unsigned int head_length = 0;
unsigned long irq_flags = 0;
+ bool do_timestamp = false;
bool ignore_sync = false;
int nr_frags = 0;
bool gso = false;
@@ -1521,6 +1584,14 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
}
/* space available, transmit skb */
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) &&
+ (lan743x_ptp_request_tx_timestamp(tx->adapter))) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ do_timestamp = true;
+ if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
+ ignore_sync = true;
+ }
head_length = skb_headlen(skb);
frame_length = skb_pagelen(skb);
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -1534,6 +1605,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
if (lan743x_tx_frame_start(tx,
skb->data, head_length,
start_frame_length,
+ do_timestamp,
skb->ip_summed == CHECKSUM_PARTIAL)) {
dev_kfree_skb(skb);
goto unlock;
@@ -1561,7 +1633,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
}
finish:
- lan743x_tx_frame_end(tx, skb, ignore_sync);
+ lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
unlock:
spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
@@ -2390,6 +2462,8 @@ static int lan743x_netdev_close(struct net_device *netdev)
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
lan743x_rx_close(&adapter->rx[index]);
+ lan743x_ptp_close(adapter);
+
lan743x_phy_close(adapter);
lan743x_mac_close(adapter);
@@ -2417,6 +2491,12 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_mac;
+ ret = lan743x_ptp_open(adapter);
+ if (ret)
+ goto close_phy;
+
+ lan743x_rfe_open(adapter);
+
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
ret = lan743x_rx_open(&adapter->rx[index]);
if (ret)
@@ -2434,6 +2514,9 @@ close_rx:
if (adapter->rx[index].ring_cpu_ptr)
lan743x_rx_close(&adapter->rx[index]);
}
+ lan743x_ptp_close(adapter);
+
+close_phy:
lan743x_phy_close(adapter);
close_mac:
@@ -2461,6 +2544,8 @@ static int lan743x_netdev_ioctl(struct net_device *netdev,
{
if (!netif_running(netdev))
return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP)
+ return lan743x_ptp_ioctl(netdev, ifr, cmd);
return phy_mii_ioctl(netdev->phydev, ifr, cmd);
}
@@ -2585,6 +2670,11 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->intr.irq = adapter->pdev->irq;
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
mutex_init(&adapter->dp_lock);
+
+ ret = lan743x_gpio_init(adapter);
+ if (ret)
+ return ret;
+
ret = lan743x_mac_init(adapter);
if (ret)
return ret;
@@ -2593,6 +2683,10 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
if (ret)
return ret;
+ ret = lan743x_ptp_init(adapter);
+ if (ret)
+ return ret;
+
lan743x_rfe_update_mac_address(adapter);
ret = lan743x_dmac_init(adapter);
@@ -2689,6 +2783,7 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
goto cleanup_hardware;
adapter->netdev->netdev_ops = &lan743x_netdev_ops;
+ adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
adapter->netdev->hw_features = adapter->netdev->features;
@@ -2747,10 +2842,182 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
lan743x_netdev_close(netdev);
rtnl_unlock();
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+#endif
+
/* clean up lan743x portion */
lan743x_hardware_cleanup(adapter);
}
+#ifdef CONFIG_PM
+static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
+{
+ return bitrev16(crc16(0xFFFF, buf, len));
+}
+
+static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
+{
+ const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
+ const u8 ipv6_multicast[3] = { 0x33, 0x33 };
+ const u8 arp_type[2] = { 0x08, 0x06 };
+ int mask_index;
+ u32 pmtctl;
+ u32 wucsr;
+ u32 macrx;
+ u16 crc;
+
+ for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
+
+ /* clear wake settings */
+ pmtctl = lan743x_csr_read(adapter, PMT_CTL);
+ pmtctl |= PMT_CTL_WUPS_MASK_;
+ pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
+ PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
+ PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
+
+ macrx = lan743x_csr_read(adapter, MAC_RX);
+
+ wucsr = 0;
+ mask_index = 0;
+
+ pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
+
+ if (adapter->wolopts & WAKE_PHY) {
+ pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
+ pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
+ }
+ if (adapter->wolopts & WAKE_MAGIC) {
+ wucsr |= MAC_WUCSR_MPEN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_UCAST) {
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_BCAST) {
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_MCAST) {
+ /* IPv4 multicast */
+ crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
+ MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
+ (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
+ (crc & MAC_WUF_CFG_CRC16_MASK_));
+ lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
+ lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ /* IPv6 multicast */
+ crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
+ MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
+ (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
+ (crc & MAC_WUF_CFG_CRC16_MASK_));
+ lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
+ lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_ARP) {
+ /* set MAC_WUF_CFG & WUF_MASK
+ * for packettype (offset 12,13) = ARP (0x0806)
+ */
+ crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
+ MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
+ (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
+ (crc & MAC_WUF_CFG_CRC16_MASK_));
+ lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
+ lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+
+ lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
+ lan743x_csr_write(adapter, PMT_CTL, pmtctl);
+ lan743x_csr_write(adapter, MAC_RX, macrx);
+}
+
+static int lan743x_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ lan743x_pcidev_shutdown(pdev);
+
+ /* clear all wakes */
+ lan743x_csr_write(adapter, MAC_WUCSR, 0);
+ lan743x_csr_write(adapter, MAC_WUCSR2, 0);
+ lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
+
+ if (adapter->wolopts)
+ lan743x_pm_set_wol(adapter);
+
+ /* Host sets PME_En, put D3hot */
+ ret = pci_prepare_to_sleep(pdev);
+
+ return 0;
+}
+
+static int lan743x_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ ret = lan743x_hardware_init(adapter, pdev);
+ if (ret) {
+ netif_err(adapter, probe, adapter->netdev,
+ "lan743x_hardware_init returned %d\n", ret);
+ }
+
+ /* open netdev when netdev is at running state while resume.
+ * For instance, it is true when system wakesup after pm-suspend
+ * However, it is false when system wakes up after suspend GUI menu
+ */
+ if (netif_running(netdev))
+ lan743x_netdev_open(netdev);
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops lan743x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
+};
+#endif /*CONFIG_PM */
+
static const struct pci_device_id lan743x_pcidev_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
{ 0, }
@@ -2761,6 +3028,9 @@ static struct pci_driver lan743x_pcidev_driver = {
.id_table = lan743x_pcidev_tbl,
.probe = lan743x_pcidev_probe,
.remove = lan743x_pcidev_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &lan743x_pm_ops,
+#endif
.shutdown = lan743x_pcidev_shutdown,
};
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 73b463a9df61..0e82b6368798 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -4,12 +4,17 @@
#ifndef _LAN743X_H
#define _LAN743X_H
+#include "lan743x_ptp.h"
+
#define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>"
#define DRIVER_DESC "LAN743x PCIe Gigabit Ethernet Driver"
#define DRIVER_NAME "lan743x"
/* Register Definitions */
#define ID_REV (0x00)
+#define ID_REV_ID_MASK_ (0xFFFF0000)
+#define ID_REV_ID_LAN7430_ (0x74300000)
+#define ID_REV_ID_LAN7431_ (0x74310000)
#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \
(((id_rev) & 0xFFF00000) == 0x74300000)
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
@@ -24,8 +29,18 @@
#define HW_CFG_LRST_ BIT(1)
#define PMT_CTL (0x014)
+#define PMT_CTL_ETH_PHY_D3_COLD_OVR_ BIT(27)
+#define PMT_CTL_MAC_D3_RX_CLK_OVR_ BIT(25)
+#define PMT_CTL_ETH_PHY_EDPD_PLL_CTL_ BIT(24)
+#define PMT_CTL_ETH_PHY_D3_OVR_ BIT(23)
+#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18)
+#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15)
+#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13)
#define PMT_CTL_READY_ BIT(7)
#define PMT_CTL_ETH_PHY_RST_ BIT(4)
+#define PMT_CTL_WOL_EN_ BIT(3)
+#define PMT_CTL_ETH_PHY_WAKE_EN_ BIT(2)
+#define PMT_CTL_WUPS_MASK_ (0x00000003)
#define DP_SEL (0x024)
#define DP_SEL_DPRDY_ BIT(31)
@@ -42,6 +57,31 @@
#define DP_DATA_0 (0x030)
+#define E2P_CMD (0x040)
+#define E2P_CMD_EPC_BUSY_ BIT(31)
+#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000)
+#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000)
+#define E2P_CMD_EPC_CMD_READ_ (0x00000000)
+#define E2P_CMD_EPC_TIMEOUT_ BIT(10)
+#define E2P_CMD_EPC_ADDR_MASK_ (0x000001FF)
+
+#define E2P_DATA (0x044)
+
+#define GPIO_CFG0 (0x050)
+#define GPIO_CFG0_GPIO_DIR_BIT_(bit) BIT(16 + (bit))
+#define GPIO_CFG0_GPIO_DATA_BIT_(bit) BIT(0 + (bit))
+
+#define GPIO_CFG1 (0x054)
+#define GPIO_CFG1_GPIOEN_BIT_(bit) BIT(16 + (bit))
+#define GPIO_CFG1_GPIOBUF_BIT_(bit) BIT(0 + (bit))
+
+#define GPIO_CFG2 (0x058)
+#define GPIO_CFG2_1588_POL_BIT_(bit) BIT(0 + (bit))
+
+#define GPIO_CFG3 (0x05C)
+#define GPIO_CFG3_1588_CH_SEL_BIT_(bit) BIT(16 + (bit))
+#define GPIO_CFG3_1588_OE_BIT_(bit) BIT(0 + (bit))
+
#define FCT_RX_CTL (0xAC)
#define FCT_RX_CTL_EN_(channel) BIT(28 + (channel))
#define FCT_RX_CTL_DIS_(channel) BIT(24 + (channel))
@@ -62,6 +102,7 @@
((value << 0) & FCT_FLOW_CTL_ON_THRESHOLD_)
#define MAC_CR (0x100)
+#define MAC_CR_EEE_EN_ BIT(17)
#define MAC_CR_ADD_ BIT(12)
#define MAC_CR_ASD_ BIT(11)
#define MAC_CR_CNTR_RST_ BIT(5)
@@ -97,6 +138,40 @@
#define MAC_MII_DATA (0x124)
+#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130)
+
+#define MAC_WUCSR (0x140)
+#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
+#define MAC_WUCSR_PFDA_EN_ BIT(3)
+#define MAC_WUCSR_WAKE_EN_ BIT(2)
+#define MAC_WUCSR_MPEN_ BIT(1)
+#define MAC_WUCSR_BCST_EN_ BIT(0)
+
+#define MAC_WK_SRC (0x144)
+
+#define MAC_WUF_CFG0 (0x150)
+#define MAC_NUM_OF_WUF_CFG (32)
+#define MAC_WUF_CFG_BEGIN (MAC_WUF_CFG0)
+#define MAC_WUF_CFG(index) (MAC_WUF_CFG_BEGIN + (4 * (index)))
+#define MAC_WUF_CFG_EN_ BIT(31)
+#define MAC_WUF_CFG_TYPE_MCAST_ (0x02000000)
+#define MAC_WUF_CFG_TYPE_ALL_ (0x01000000)
+#define MAC_WUF_CFG_OFFSET_SHIFT_ (16)
+#define MAC_WUF_CFG_CRC16_MASK_ (0x0000FFFF)
+
+#define MAC_WUF_MASK0_0 (0x200)
+#define MAC_WUF_MASK0_1 (0x204)
+#define MAC_WUF_MASK0_2 (0x208)
+#define MAC_WUF_MASK0_3 (0x20C)
+#define MAC_WUF_MASK0_BEGIN (MAC_WUF_MASK0_0)
+#define MAC_WUF_MASK1_BEGIN (MAC_WUF_MASK0_1)
+#define MAC_WUF_MASK2_BEGIN (MAC_WUF_MASK0_2)
+#define MAC_WUF_MASK3_BEGIN (MAC_WUF_MASK0_3)
+#define MAC_WUF_MASK0(index) (MAC_WUF_MASK0_BEGIN + (0x10 * (index)))
+#define MAC_WUF_MASK1(index) (MAC_WUF_MASK1_BEGIN + (0x10 * (index)))
+#define MAC_WUF_MASK2(index) (MAC_WUF_MASK2_BEGIN + (0x10 * (index)))
+#define MAC_WUF_MASK3(index) (MAC_WUF_MASK3_BEGIN + (0x10 * (index)))
+
/* offset 0x400 - 0x500, x may range from 0 to 32, for a total of 33 entries */
#define RFE_ADDR_FILT_HI(x) (0x400 + (8 * (x)))
#define RFE_ADDR_FILT_HI_VALID_ BIT(31)
@@ -111,13 +186,35 @@
#define RFE_CTL_MCAST_HASH_ BIT(3)
#define RFE_CTL_DA_PERFECT_ BIT(1)
+#define RFE_RSS_CFG (0x554)
+#define RFE_RSS_CFG_UDP_IPV6_EX_ BIT(16)
+#define RFE_RSS_CFG_TCP_IPV6_EX_ BIT(15)
+#define RFE_RSS_CFG_IPV6_EX_ BIT(14)
+#define RFE_RSS_CFG_UDP_IPV6_ BIT(13)
+#define RFE_RSS_CFG_TCP_IPV6_ BIT(12)
+#define RFE_RSS_CFG_IPV6_ BIT(11)
+#define RFE_RSS_CFG_UDP_IPV4_ BIT(10)
+#define RFE_RSS_CFG_TCP_IPV4_ BIT(9)
+#define RFE_RSS_CFG_IPV4_ BIT(8)
+#define RFE_RSS_CFG_VALID_HASH_BITS_ (0x000000E0)
+#define RFE_RSS_CFG_RSS_QUEUE_ENABLE_ BIT(2)
+#define RFE_RSS_CFG_RSS_HASH_STORE_ BIT(1)
+#define RFE_RSS_CFG_RSS_ENABLE_ BIT(0)
+
+#define RFE_HASH_KEY(index) (0x558 + (index << 2))
+
+#define RFE_INDX(index) (0x580 + (index << 2))
+
+#define MAC_WUCSR2 (0x600)
+
#define INT_STS (0x780)
#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel))
#define INT_BIT_ALL_RX_ (0x0F000000)
#define INT_BIT_DMA_TX_(channel) BIT(16 + (channel))
#define INT_BIT_ALL_TX_ (0x000F0000)
#define INT_BIT_SW_GP_ BIT(9)
-#define INT_BIT_ALL_OTHER_ (0x00000280)
+#define INT_BIT_1588_ BIT(7)
+#define INT_BIT_ALL_OTHER_ (INT_BIT_SW_GP_ | INT_BIT_1588_)
#define INT_BIT_MAS_ BIT(0)
#define INT_SET (0x784)
@@ -158,6 +255,71 @@
#define INT_MOD_CFG6 (0x7D8)
#define INT_MOD_CFG7 (0x7DC)
+#define PTP_CMD_CTL (0x0A00)
+#define PTP_CMD_CTL_PTP_CLK_STP_NSEC_ BIT(6)
+#define PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_ BIT(5)
+#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4)
+#define PTP_CMD_CTL_PTP_CLOCK_READ_ BIT(3)
+#define PTP_CMD_CTL_PTP_ENABLE_ BIT(2)
+#define PTP_CMD_CTL_PTP_DISABLE_ BIT(1)
+#define PTP_CMD_CTL_PTP_RESET_ BIT(0)
+#define PTP_GENERAL_CONFIG (0x0A04)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \
+ (0x7 << (1 + ((channel) << 2)))
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_ (0)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_ (1)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_ (2)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (3)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (4)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (5)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
+ (((value) & 0x7) << (1 + ((channel) << 2)))
+#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2))
+
+#define PTP_INT_STS (0x0A08)
+#define PTP_INT_EN_SET (0x0A0C)
+#define PTP_INT_EN_CLR (0x0A10)
+#define PTP_INT_BIT_TX_SWTS_ERR_ BIT(13)
+#define PTP_INT_BIT_TX_TS_ BIT(12)
+#define PTP_INT_BIT_TIMER_B_ BIT(1)
+#define PTP_INT_BIT_TIMER_A_ BIT(0)
+
+#define PTP_CLOCK_SEC (0x0A14)
+#define PTP_CLOCK_NS (0x0A18)
+#define PTP_CLOCK_SUBNS (0x0A1C)
+#define PTP_CLOCK_RATE_ADJ (0x0A20)
+#define PTP_CLOCK_RATE_ADJ_DIR_ BIT(31)
+#define PTP_CLOCK_STEP_ADJ (0x0A2C)
+#define PTP_CLOCK_STEP_ADJ_DIR_ BIT(31)
+#define PTP_CLOCK_STEP_ADJ_VALUE_MASK_ (0x3FFFFFFF)
+#define PTP_CLOCK_TARGET_SEC_X(channel) (0x0A30 + ((channel) << 4))
+#define PTP_CLOCK_TARGET_NS_X(channel) (0x0A34 + ((channel) << 4))
+#define PTP_CLOCK_TARGET_RELOAD_SEC_X(channel) (0x0A38 + ((channel) << 4))
+#define PTP_CLOCK_TARGET_RELOAD_NS_X(channel) (0x0A3C + ((channel) << 4))
+#define PTP_LATENCY (0x0A5C)
+#define PTP_LATENCY_TX_SET_(tx_latency) (((u32)(tx_latency)) << 16)
+#define PTP_LATENCY_RX_SET_(rx_latency) \
+ (((u32)(rx_latency)) & 0x0000FFFF)
+#define PTP_CAP_INFO (0x0A60)
+#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x00000070) >> 4)
+
+#define PTP_TX_MOD (0x0AA4)
+#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ (0x10000000)
+
+#define PTP_TX_MOD2 (0x0AA8)
+#define PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_ (0x00000001)
+
+#define PTP_TX_EGRESS_SEC (0x0AAC)
+#define PTP_TX_EGRESS_NS (0x0AB0)
+#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_ (0xC0000000)
+#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_ (0x00000000)
+#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_ (0x40000000)
+#define PTP_TX_EGRESS_NS_TS_NS_MASK_ (0x3FFFFFFF)
+
+#define PTP_TX_MSG_HEADER (0x0AB4)
+#define PTP_TX_MSG_HEADER_MSG_TYPE_ (0x000F0000)
+#define PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_ (0x00000000)
+
#define DMAC_CFG (0xC00)
#define DMAC_CFG_COAL_EN_ BIT(16)
#define DMAC_CFG_CH_ARB_SEL_RX_HIGH_ (0x00000000)
@@ -288,9 +450,33 @@
#define TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_ BIT(3)
#define TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_ (0x00000007)
+#define OTP_PWR_DN (0x1000)
+#define OTP_PWR_DN_PWRDN_N_ BIT(0)
+
+#define OTP_ADDR1 (0x1004)
+#define OTP_ADDR1_15_11_MASK_ (0x1F)
+
+#define OTP_ADDR2 (0x1008)
+#define OTP_ADDR2_10_3_MASK_ (0xFF)
+
+#define OTP_PRGM_DATA (0x1010)
+
+#define OTP_PRGM_MODE (0x1014)
+#define OTP_PRGM_MODE_BYTE_ BIT(0)
+
+#define OTP_TST_CMD (0x1024)
+#define OTP_TST_CMD_PRGVRFY_ BIT(3)
+
+#define OTP_CMD_GO (0x1028)
+#define OTP_CMD_GO_GO_ BIT(0)
+
+#define OTP_STATUS (0x1030)
+#define OTP_STATUS_BUSY_ BIT(0)
+
/* MAC statistics registers */
#define STAT_RX_FCS_ERRORS (0x1200)
#define STAT_RX_ALIGNMENT_ERRORS (0x1204)
+#define STAT_RX_FRAGMENT_ERRORS (0x1208)
#define STAT_RX_JABBER_ERRORS (0x120C)
#define STAT_RX_UNDERSIZE_FRAME_ERRORS (0x1210)
#define STAT_RX_OVERSIZE_FRAME_ERRORS (0x1214)
@@ -298,12 +484,26 @@
#define STAT_RX_UNICAST_BYTE_COUNT (0x121C)
#define STAT_RX_BROADCAST_BYTE_COUNT (0x1220)
#define STAT_RX_MULTICAST_BYTE_COUNT (0x1224)
+#define STAT_RX_UNICAST_FRAMES (0x1228)
+#define STAT_RX_BROADCAST_FRAMES (0x122C)
#define STAT_RX_MULTICAST_FRAMES (0x1230)
+#define STAT_RX_PAUSE_FRAMES (0x1234)
+#define STAT_RX_64_BYTE_FRAMES (0x1238)
+#define STAT_RX_65_127_BYTE_FRAMES (0x123C)
+#define STAT_RX_128_255_BYTE_FRAMES (0x1240)
+#define STAT_RX_256_511_BYTES_FRAMES (0x1244)
+#define STAT_RX_512_1023_BYTE_FRAMES (0x1248)
+#define STAT_RX_1024_1518_BYTE_FRAMES (0x124C)
+#define STAT_RX_GREATER_1518_BYTE_FRAMES (0x1250)
#define STAT_RX_TOTAL_FRAMES (0x1254)
+#define STAT_EEE_RX_LPI_TRANSITIONS (0x1258)
+#define STAT_EEE_RX_LPI_TIME (0x125C)
+#define STAT_RX_COUNTER_ROLLOVER_STATUS (0x127C)
#define STAT_TX_FCS_ERRORS (0x1280)
#define STAT_TX_EXCESS_DEFERRAL_ERRORS (0x1284)
#define STAT_TX_CARRIER_ERRORS (0x1288)
+#define STAT_TX_BAD_BYTE_COUNT (0x128C)
#define STAT_TX_SINGLE_COLLISIONS (0x1290)
#define STAT_TX_MULTIPLE_COLLISIONS (0x1294)
#define STAT_TX_EXCESSIVE_COLLISION (0x1298)
@@ -311,8 +511,21 @@
#define STAT_TX_UNICAST_BYTE_COUNT (0x12A0)
#define STAT_TX_BROADCAST_BYTE_COUNT (0x12A4)
#define STAT_TX_MULTICAST_BYTE_COUNT (0x12A8)
+#define STAT_TX_UNICAST_FRAMES (0x12AC)
+#define STAT_TX_BROADCAST_FRAMES (0x12B0)
#define STAT_TX_MULTICAST_FRAMES (0x12B4)
+#define STAT_TX_PAUSE_FRAMES (0x12B8)
+#define STAT_TX_64_BYTE_FRAMES (0x12BC)
+#define STAT_TX_65_127_BYTE_FRAMES (0x12C0)
+#define STAT_TX_128_255_BYTE_FRAMES (0x12C4)
+#define STAT_TX_256_511_BYTES_FRAMES (0x12C8)
+#define STAT_TX_512_1023_BYTE_FRAMES (0x12CC)
+#define STAT_TX_1024_1518_BYTE_FRAMES (0x12D0)
+#define STAT_TX_GREATER_1518_BYTE_FRAMES (0x12D4)
#define STAT_TX_TOTAL_FRAMES (0x12D8)
+#define STAT_EEE_TX_LPI_TRANSITIONS (0x12DC)
+#define STAT_EEE_TX_LPI_TIME (0x12E0)
+#define STAT_TX_COUNTER_ROLLOVER_STATUS (0x12FC)
/* End of Register definitions */
@@ -415,8 +628,12 @@ struct lan743x_tx_buffer_info;
#define TX_FRAME_FLAG_IN_PROGRESS BIT(0)
+#define TX_TS_FLAG_TIMESTAMPING_ENABLED BIT(0)
+#define TX_TS_FLAG_ONE_STEP_SYNC BIT(1)
+
struct lan743x_tx {
struct lan743x_adapter *adapter;
+ u32 ts_flags;
u32 vector_flags;
int channel_number;
@@ -443,6 +660,10 @@ struct lan743x_tx {
struct sk_buff *overflow_skb;
};
+void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
+ bool enable_timestamping,
+ bool enable_onestep_sync);
+
/* RX */
struct lan743x_rx_descriptor;
struct lan743x_rx_buffer_info;
@@ -473,6 +694,9 @@ struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
int msg_enable;
+#ifdef CONFIG_PM
+ u32 wolopts;
+#endif
struct pci_dev *pdev;
struct lan743x_csr csr;
struct lan743x_intr intr;
@@ -480,6 +704,9 @@ struct lan743x_adapter {
/* lock, used to prevent concurrent access to data port */
struct mutex dp_lock;
+ struct lan743x_gpio gpio;
+ struct lan743x_ptp ptp;
+
u8 mac_address[ETH_ALEN];
struct lan743x_phy phy;
@@ -530,6 +757,7 @@ struct lan743x_adapter {
#define TX_DESC_DATA0_IPE_ (0x00200000)
#define TX_DESC_DATA0_TPE_ (0x00100000)
#define TX_DESC_DATA0_FCS_ (0x00020000)
+#define TX_DESC_DATA0_TSE_ (0x00010000)
#define TX_DESC_DATA0_BUF_LENGTH_MASK_ (0x0000FFFF)
#define TX_DESC_DATA0_EXT_LSO_ (0x00200000)
#define TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_ (0x000FFFFF)
@@ -543,6 +771,7 @@ struct lan743x_tx_descriptor {
} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
#define TX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
+#define TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED BIT(1)
#define TX_BUFFER_INFO_FLAG_IGNORE_SYNC BIT(2)
#define TX_BUFFER_INFO_FLAG_SKB_FRAGMENT BIT(3)
struct lan743x_tx_buffer_info {
@@ -594,4 +823,7 @@ struct lan743x_rx_buffer_info {
#define RX_PROCESS_RESULT_PACKET_RECEIVED (1)
#define RX_PROCESS_RESULT_PACKET_DROPPED (2)
+u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset);
+void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data);
+
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
new file mode 100644
index 000000000000..ccdf9123f26f
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -0,0 +1,1159 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#include <linux/netdevice.h>
+#include "lan743x_main.h"
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/net_tstamp.h>
+
+#include "lan743x_ptp.h"
+
+#define LAN743X_NUMBER_OF_GPIO (12)
+#define LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB (31249999)
+#define LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM (2047999934)
+
+static bool lan743x_ptp_is_enabled(struct lan743x_adapter *adapter);
+static void lan743x_ptp_enable(struct lan743x_adapter *adapter);
+static void lan743x_ptp_disable(struct lan743x_adapter *adapter);
+static void lan743x_ptp_reset(struct lan743x_adapter *adapter);
+static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter,
+ u32 seconds, u32 nano_seconds,
+ u32 sub_nano_seconds);
+
+int lan743x_gpio_init(struct lan743x_adapter *adapter)
+{
+ struct lan743x_gpio *gpio = &adapter->gpio;
+
+ spin_lock_init(&gpio->gpio_lock);
+
+ gpio->gpio_cfg0 = 0; /* set all direction to input, data = 0 */
+ gpio->gpio_cfg1 = 0x0FFF0000;/* disable all gpio, set to open drain */
+ gpio->gpio_cfg2 = 0;/* set all to 1588 low polarity level */
+ gpio->gpio_cfg3 = 0;/* disable all 1588 output */
+ lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+ lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
+ lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2);
+ lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3);
+
+ return 0;
+}
+
+static void lan743x_ptp_wait_till_cmd_done(struct lan743x_adapter *adapter,
+ u32 bit_mask)
+{
+ int timeout = 1000;
+ u32 data = 0;
+
+ while (timeout &&
+ (data = (lan743x_csr_read(adapter, PTP_CMD_CTL) &
+ bit_mask))) {
+ usleep_range(1000, 20000);
+ timeout--;
+ }
+ if (data) {
+ netif_err(adapter, drv, adapter->netdev,
+ "timeout waiting for cmd to be done, cmd = 0x%08X\n",
+ bit_mask);
+ }
+}
+
+static void lan743x_ptp_tx_ts_enqueue_ts(struct lan743x_adapter *adapter,
+ u32 seconds, u32 nano_seconds,
+ u32 header)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->tx_ts_queue_size < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
+ ptp->tx_ts_seconds_queue[ptp->tx_ts_queue_size] = seconds;
+ ptp->tx_ts_nseconds_queue[ptp->tx_ts_queue_size] = nano_seconds;
+ ptp->tx_ts_header_queue[ptp->tx_ts_queue_size] = header;
+ ptp->tx_ts_queue_size++;
+ } else {
+ netif_err(adapter, drv, adapter->netdev,
+ "tx ts queue overflow\n");
+ }
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+static void lan743x_ptp_tx_ts_complete(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ struct skb_shared_hwtstamps tstamps;
+ u32 header, nseconds, seconds;
+ bool ignore_sync = false;
+ struct sk_buff *skb;
+ int c, i;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ c = ptp->tx_ts_skb_queue_size;
+
+ if (c > ptp->tx_ts_queue_size)
+ c = ptp->tx_ts_queue_size;
+ if (c <= 0)
+ goto done;
+
+ for (i = 0; i < c; i++) {
+ ignore_sync = ((ptp->tx_ts_ignore_sync_queue &
+ BIT(i)) != 0);
+ skb = ptp->tx_ts_skb_queue[i];
+ nseconds = ptp->tx_ts_nseconds_queue[i];
+ seconds = ptp->tx_ts_seconds_queue[i];
+ header = ptp->tx_ts_header_queue[i];
+
+ memset(&tstamps, 0, sizeof(tstamps));
+ tstamps.hwtstamp = ktime_set(seconds, nseconds);
+ if (!ignore_sync ||
+ ((header & PTP_TX_MSG_HEADER_MSG_TYPE_) !=
+ PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_))
+ skb_tstamp_tx(skb, &tstamps);
+
+ dev_kfree_skb(skb);
+
+ ptp->tx_ts_skb_queue[i] = NULL;
+ ptp->tx_ts_seconds_queue[i] = 0;
+ ptp->tx_ts_nseconds_queue[i] = 0;
+ ptp->tx_ts_header_queue[i] = 0;
+ }
+
+ /* shift queue */
+ ptp->tx_ts_ignore_sync_queue >>= c;
+ for (i = c; i < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS; i++) {
+ ptp->tx_ts_skb_queue[i - c] = ptp->tx_ts_skb_queue[i];
+ ptp->tx_ts_seconds_queue[i - c] = ptp->tx_ts_seconds_queue[i];
+ ptp->tx_ts_nseconds_queue[i - c] = ptp->tx_ts_nseconds_queue[i];
+ ptp->tx_ts_header_queue[i - c] = ptp->tx_ts_header_queue[i];
+
+ ptp->tx_ts_skb_queue[i] = NULL;
+ ptp->tx_ts_seconds_queue[i] = 0;
+ ptp->tx_ts_nseconds_queue[i] = 0;
+ ptp->tx_ts_header_queue[i] = 0;
+ }
+ ptp->tx_ts_skb_queue_size -= c;
+ ptp->tx_ts_queue_size -= c;
+done:
+ ptp->pending_tx_timestamps -= c;
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int result = -ENODEV;
+ int index = 0;
+
+ mutex_lock(&ptp->command_lock);
+ for (index = 0; index < LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS; index++) {
+ if (!(test_bit(index, &ptp->used_event_ch))) {
+ ptp->used_event_ch |= BIT(index);
+ result = index;
+ break;
+ }
+ }
+ mutex_unlock(&ptp->command_lock);
+ return result;
+}
+
+static void lan743x_ptp_release_event_ch(struct lan743x_adapter *adapter,
+ int event_channel)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+ if (test_bit(event_channel, &ptp->used_event_ch)) {
+ ptp->used_event_ch &= ~BIT(event_channel);
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "attempted release on a not used event_channel = %d\n",
+ event_channel);
+ }
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
+ u32 *seconds, u32 *nano_seconds,
+ u32 *sub_nano_seconds);
+static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
+ s64 time_step_ns);
+
+static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter,
+ int bit, int ptp_channel)
+{
+ struct lan743x_gpio *gpio = &adapter->gpio;
+ unsigned long irq_flags = 0;
+ int bit_mask = BIT(bit);
+ int ret = -EBUSY;
+
+ spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
+
+ if (!(gpio->used_bits & bit_mask)) {
+ gpio->used_bits |= bit_mask;
+ gpio->output_bits |= bit_mask;
+ gpio->ptp_bits |= bit_mask;
+
+ /* set as output, and zero initial value */
+ gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(bit);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+
+ /* enable gpio, and set buffer type to push pull */
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(bit);
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
+
+ /* set 1588 polarity to high */
+ gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2);
+
+ if (!ptp_channel) {
+ /* use channel A */
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(bit);
+ } else {
+ /* use channel B */
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(bit);
+ }
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3);
+
+ ret = bit;
+ }
+ spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
+ return ret;
+}
+
+static void lan743x_gpio_release(struct lan743x_adapter *adapter, int bit)
+{
+ struct lan743x_gpio *gpio = &adapter->gpio;
+ unsigned long irq_flags = 0;
+ int bit_mask = BIT(bit);
+
+ spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
+ if (gpio->used_bits & bit_mask) {
+ gpio->used_bits &= ~bit_mask;
+ if (gpio->output_bits & bit_mask) {
+ gpio->output_bits &= ~bit_mask;
+
+ if (gpio->ptp_bits & bit_mask) {
+ gpio->ptp_bits &= ~bit_mask;
+ /* disable ptp output */
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG3,
+ gpio->gpio_cfg3);
+ }
+ /* release gpio output */
+
+ /* disable gpio */
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(bit);
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
+
+ /* reset back to input */
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(bit);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+ }
+ }
+ spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
+}
+
+static int lan743x_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 lan743x_rate_adj = 0;
+ bool positive = true;
+ u64 u64_delta = 0;
+
+ if ((scaled_ppm < (-LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM)) ||
+ scaled_ppm > LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM) {
+ return -EINVAL;
+ }
+ if (scaled_ppm > 0) {
+ u64_delta = (u64)scaled_ppm;
+ positive = true;
+ } else {
+ u64_delta = (u64)(-scaled_ppm);
+ positive = false;
+ }
+ u64_delta = (u64_delta << 19);
+ lan743x_rate_adj = div_u64(u64_delta, 1000000);
+
+ if (positive)
+ lan743x_rate_adj |= PTP_CLOCK_RATE_ADJ_DIR_;
+
+ lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ,
+ lan743x_rate_adj);
+
+ return 0;
+}
+
+static int lan743x_ptpci_adjfreq(struct ptp_clock_info *ptpci, s32 delta_ppb)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 lan743x_rate_adj = 0;
+ bool positive = true;
+ u32 u32_delta = 0;
+ u64 u64_delta = 0;
+
+ if ((delta_ppb < (-LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB)) ||
+ delta_ppb > LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB) {
+ return -EINVAL;
+ }
+ if (delta_ppb > 0) {
+ u32_delta = (u32)delta_ppb;
+ positive = true;
+ } else {
+ u32_delta = (u32)(-delta_ppb);
+ positive = false;
+ }
+ u64_delta = (((u64)u32_delta) << 35);
+ lan743x_rate_adj = div_u64(u64_delta, 1000000000);
+
+ if (positive)
+ lan743x_rate_adj |= PTP_CLOCK_RATE_ADJ_DIR_;
+
+ lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ,
+ lan743x_rate_adj);
+
+ return 0;
+}
+
+static int lan743x_ptpci_adjtime(struct ptp_clock_info *ptpci, s64 delta)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+
+ lan743x_ptp_clock_step(adapter, delta);
+
+ return 0;
+}
+
+static int lan743x_ptpci_gettime64(struct ptp_clock_info *ptpci,
+ struct timespec64 *ts)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 nano_seconds = 0;
+ u32 seconds = 0;
+
+ lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL);
+ ts->tv_sec = seconds;
+ ts->tv_nsec = nano_seconds;
+
+ return 0;
+}
+
+static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci,
+ const struct timespec64 *ts)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 nano_seconds = 0;
+ u32 seconds = 0;
+
+ if (ts) {
+ if (ts->tv_sec > 0xFFFFFFFFLL ||
+ ts->tv_sec < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ts->tv_sec out of range, %lld\n",
+ ts->tv_sec);
+ return -ERANGE;
+ }
+ if (ts->tv_nsec >= 1000000000L ||
+ ts->tv_nsec < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ts->tv_nsec out of range, %ld\n",
+ ts->tv_nsec);
+ return -ERANGE;
+ }
+ seconds = ts->tv_sec;
+ nano_seconds = ts->tv_nsec;
+ lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0);
+ } else {
+ netif_warn(adapter, drv, adapter->netdev, "ts == NULL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 general_config = 0;
+
+ if (ptp->perout_gpio_bit >= 0) {
+ lan743x_gpio_release(adapter, ptp->perout_gpio_bit);
+ ptp->perout_gpio_bit = -1;
+ }
+
+ if (ptp->perout_event_ch >= 0) {
+ /* set target to far in the future, effectively disabling it */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch),
+ 0);
+
+ general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
+ general_config |= PTP_GENERAL_CONFIG_RELOAD_ADD_X_
+ (ptp->perout_event_ch);
+ lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
+ lan743x_ptp_release_event_ch(adapter, ptp->perout_event_ch);
+ ptp->perout_event_ch = -1;
+ }
+}
+
+static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
+ struct ptp_perout_request *perout)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 period_sec = 0, period_nsec = 0;
+ u32 start_sec = 0, start_nsec = 0;
+ u32 general_config = 0;
+ int pulse_width = 0;
+ int perout_bit = 0;
+
+ if (!on) {
+ lan743x_ptp_perout_off(adapter);
+ return 0;
+ }
+
+ if (ptp->perout_event_ch >= 0 ||
+ ptp->perout_gpio_bit >= 0) {
+ /* already on, turn off first */
+ lan743x_ptp_perout_off(adapter);
+ }
+
+ ptp->perout_event_ch = lan743x_ptp_reserve_event_ch(adapter);
+ if (ptp->perout_event_ch < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Failed to reserve event channel for PEROUT\n");
+ goto failed;
+ }
+
+ switch (adapter->csr.id_rev & ID_REV_ID_MASK_) {
+ case ID_REV_ID_LAN7430_:
+ perout_bit = 2;/* GPIO 2 is preferred on EVB LAN7430 */
+ break;
+ case ID_REV_ID_LAN7431_:
+ perout_bit = 4;/* GPIO 4 is preferred on EVB LAN7431 */
+ break;
+ }
+
+ ptp->perout_gpio_bit = lan743x_gpio_rsrv_ptp_out(adapter,
+ perout_bit,
+ ptp->perout_event_ch);
+
+ if (ptp->perout_gpio_bit < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Failed to reserve gpio %d for PEROUT\n",
+ perout_bit);
+ goto failed;
+ }
+
+ start_sec = perout->start.sec;
+ start_sec += perout->start.nsec / 1000000000;
+ start_nsec = perout->start.nsec % 1000000000;
+
+ period_sec = perout->period.sec;
+ period_sec += perout->period.nsec / 1000000000;
+ period_nsec = perout->period.nsec % 1000000000;
+
+ if (period_sec == 0) {
+ if (period_nsec >= 400000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (period_nsec >= 20000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (period_nsec >= 2000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (period_nsec >= 200000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (period_nsec >= 20000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (period_nsec >= 200) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ goto failed;
+ }
+ } else {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ }
+
+ /* turn off by setting target far in future */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch), 0);
+
+ /* Configure to pulse every period */
+ general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
+ general_config &= ~(PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_
+ (ptp->perout_event_ch));
+ general_config |= PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_
+ (ptp->perout_event_ch, pulse_width);
+ general_config &= ~PTP_GENERAL_CONFIG_RELOAD_ADD_X_
+ (ptp->perout_event_ch);
+ lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
+
+ /* set the reload to one toggle cycle */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_SEC_X(ptp->perout_event_ch),
+ period_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_NS_X(ptp->perout_event_ch),
+ period_nsec);
+
+ /* set the start time */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ start_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch),
+ start_nsec);
+
+ return 0;
+
+failed:
+ lan743x_ptp_perout_off(adapter);
+ return -ENODEV;
+}
+
+static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *request, int on)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+
+ if (request) {
+ switch (request->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return -EINVAL;
+ case PTP_CLK_REQ_PEROUT:
+ if (request->perout.index == 0)
+ return lan743x_ptp_perout(adapter, on,
+ &request->perout);
+ return -EINVAL;
+ case PTP_CLK_REQ_PPS:
+ return -EINVAL;
+ default:
+ netif_err(adapter, drv, adapter->netdev,
+ "request->type == %d, Unknown\n",
+ request->type);
+ break;
+ }
+ } else {
+ netif_err(adapter, drv, adapter->netdev, "request == NULL\n");
+ }
+ return 0;
+}
+
+static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 cap_info, cause, header, nsec, seconds;
+ bool new_timestamp_available = false;
+ int count = 0;
+
+ while ((count < 100) &&
+ (lan743x_csr_read(adapter, PTP_INT_STS) & PTP_INT_BIT_TX_TS_)) {
+ count++;
+ cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO);
+
+ if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) {
+ seconds = lan743x_csr_read(adapter,
+ PTP_TX_EGRESS_SEC);
+ nsec = lan743x_csr_read(adapter, PTP_TX_EGRESS_NS);
+ cause = (nsec &
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_);
+ header = lan743x_csr_read(adapter,
+ PTP_TX_MSG_HEADER);
+
+ if (cause == PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) {
+ nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_;
+ lan743x_ptp_tx_ts_enqueue_ts(adapter,
+ seconds, nsec,
+ header);
+ new_timestamp_available = true;
+ } else if (cause ==
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Auto capture cause not supported\n");
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "unknown tx timestamp capture cause\n");
+ }
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "TX TS INT but no TX TS CNT\n");
+ }
+ lan743x_csr_write(adapter, PTP_INT_STS, PTP_INT_BIT_TX_TS_);
+ }
+
+ if (new_timestamp_available)
+ lan743x_ptp_tx_ts_complete(adapter);
+
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
+
+ return -1;
+}
+
+static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
+ u32 *seconds, u32 *nano_seconds,
+ u32 *sub_nano_seconds)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_READ_);
+
+ if (seconds)
+ (*seconds) = lan743x_csr_read(adapter, PTP_CLOCK_SEC);
+
+ if (nano_seconds)
+ (*nano_seconds) = lan743x_csr_read(adapter, PTP_CLOCK_NS);
+
+ if (sub_nano_seconds)
+ (*sub_nano_seconds) =
+ lan743x_csr_read(adapter, PTP_CLOCK_SUBNS);
+
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
+ s64 time_step_ns)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 nano_seconds_step = 0;
+ u64 abs_time_step_ns = 0;
+ u32 unsigned_seconds = 0;
+ u32 nano_seconds = 0;
+ u32 remainder = 0;
+ s32 seconds = 0;
+
+ if (time_step_ns > 15000000000LL) {
+ /* convert to clock set */
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds += remainder;
+ if (nano_seconds >= 1000000000) {
+ unsigned_seconds++;
+ nano_seconds -= 1000000000;
+ }
+ lan743x_ptp_clock_set(adapter, unsigned_seconds,
+ nano_seconds, 0);
+ return;
+ } else if (time_step_ns < -15000000000LL) {
+ /* convert to clock set */
+ time_step_ns = -time_step_ns;
+
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds_step = remainder;
+ if (nano_seconds < nano_seconds_step) {
+ unsigned_seconds--;
+ nano_seconds += 1000000000;
+ }
+ nano_seconds -= nano_seconds_step;
+ lan743x_ptp_clock_set(adapter, unsigned_seconds,
+ nano_seconds, 0);
+ return;
+ }
+
+ /* do clock step */
+ if (time_step_ns >= 0) {
+ abs_time_step_ns = (u64)(time_step_ns);
+ seconds = (s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder);
+ nano_seconds = (u32)remainder;
+ } else {
+ abs_time_step_ns = (u64)(-time_step_ns);
+ seconds = -((s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder));
+ nano_seconds = (u32)remainder;
+ if (nano_seconds > 0) {
+ /* subtracting nano seconds is not allowed
+ * convert to subtracting from seconds,
+ * and adding to nanoseconds
+ */
+ seconds--;
+ nano_seconds = (1000000000 - nano_seconds);
+ }
+ }
+
+ if (nano_seconds > 0) {
+ /* add 8 ns to cover the likely normal increment */
+ nano_seconds += 8;
+ }
+
+ if (nano_seconds >= 1000000000) {
+ /* carry into seconds */
+ seconds++;
+ nano_seconds -= 1000000000;
+ }
+
+ while (seconds) {
+ mutex_lock(&ptp->command_lock);
+ if (seconds > 0) {
+ u32 adjustment_value = (u32)seconds;
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+ lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ,
+ PTP_CLOCK_STEP_ADJ_DIR_ |
+ adjustment_value);
+ seconds -= ((s32)adjustment_value);
+ } else {
+ u32 adjustment_value = (u32)(-seconds);
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+ lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ,
+ adjustment_value);
+ seconds += ((s32)adjustment_value);
+ }
+ lan743x_csr_write(adapter, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_);
+ lan743x_ptp_wait_till_cmd_done(adapter,
+ PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_);
+ mutex_unlock(&ptp->command_lock);
+ }
+ if (nano_seconds) {
+ mutex_lock(&ptp->command_lock);
+ lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ,
+ PTP_CLOCK_STEP_ADJ_DIR_ |
+ (nano_seconds &
+ PTP_CLOCK_STEP_ADJ_VALUE_MASK_));
+ lan743x_csr_write(adapter, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLK_STP_NSEC_);
+ lan743x_ptp_wait_till_cmd_done(adapter,
+ PTP_CMD_CTL_PTP_CLK_STP_NSEC_);
+ mutex_unlock(&ptp->command_lock);
+ }
+}
+
+void lan743x_ptp_isr(void *context)
+{
+ struct lan743x_adapter *adapter = (struct lan743x_adapter *)context;
+ struct lan743x_ptp *ptp = NULL;
+ int enable_flag = 1;
+ u32 ptp_int_sts = 0;
+
+ ptp = &adapter->ptp;
+
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_1588_);
+
+ ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
+ ptp_int_sts &= lan743x_csr_read(adapter, PTP_INT_EN_SET);
+
+ if (ptp_int_sts & PTP_INT_BIT_TX_TS_) {
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+ enable_flag = 0;/* tasklet will re-enable later */
+ }
+ if (ptp_int_sts & PTP_INT_BIT_TX_SWTS_ERR_) {
+ netif_err(adapter, drv, adapter->netdev,
+ "PTP TX Software Timestamp Error\n");
+ /* clear int status bit */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TX_SWTS_ERR_);
+ }
+ if (ptp_int_sts & PTP_INT_BIT_TIMER_B_) {
+ /* clear int status bit */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TIMER_B_);
+ }
+ if (ptp_int_sts & PTP_INT_BIT_TIMER_A_) {
+ /* clear int status bit */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TIMER_A_);
+ }
+
+ if (enable_flag) {
+ /* re-enable isr */
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
+ }
+}
+
+static void lan743x_ptp_tx_ts_enqueue_skb(struct lan743x_adapter *adapter,
+ struct sk_buff *skb, bool ignore_sync)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->tx_ts_skb_queue_size < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
+ ptp->tx_ts_skb_queue[ptp->tx_ts_skb_queue_size] = skb;
+ if (ignore_sync)
+ ptp->tx_ts_ignore_sync_queue |=
+ BIT(ptp->tx_ts_skb_queue_size);
+ ptp->tx_ts_skb_queue_size++;
+ } else {
+ /* this should never happen, so long as the tx channel
+ * calls and honors the result from
+ * lan743x_ptp_request_tx_timestamp
+ */
+ netif_err(adapter, drv, adapter->netdev,
+ "tx ts skb queue overflow\n");
+ dev_kfree_skb(skb);
+ }
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+static void lan743x_ptp_sync_to_system_clock(struct lan743x_adapter *adapter)
+{
+ struct timespec64 ts;
+
+ ktime_get_clocktai_ts64(&ts);
+
+ lan743x_ptp_clock_set(adapter, ts.tv_sec, ts.tv_nsec, 0);
+}
+
+void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
+ u32 link_speed)
+{
+ switch (link_speed) {
+ case 10:
+ lan743x_csr_write(adapter, PTP_LATENCY,
+ PTP_LATENCY_TX_SET_(0) |
+ PTP_LATENCY_RX_SET_(0));
+ break;
+ case 100:
+ lan743x_csr_write(adapter, PTP_LATENCY,
+ PTP_LATENCY_TX_SET_(181) |
+ PTP_LATENCY_RX_SET_(594));
+ break;
+ case 1000:
+ lan743x_csr_write(adapter, PTP_LATENCY,
+ PTP_LATENCY_TX_SET_(30) |
+ PTP_LATENCY_RX_SET_(525));
+ break;
+ }
+}
+
+int lan743x_ptp_init(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_init(&ptp->command_lock);
+ spin_lock_init(&ptp->tx_ts_lock);
+ ptp->used_event_ch = 0;
+ ptp->perout_event_ch = -1;
+ ptp->perout_gpio_bit = -1;
+ return 0;
+}
+
+int lan743x_ptp_open(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int ret = -ENODEV;
+ u32 temp;
+
+ lan743x_ptp_reset(adapter);
+ lan743x_ptp_sync_to_system_clock(adapter);
+ temp = lan743x_csr_read(adapter, PTP_TX_MOD2);
+ temp |= PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_;
+ lan743x_csr_write(adapter, PTP_TX_MOD2, temp);
+ lan743x_ptp_enable(adapter);
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
+ lan743x_csr_write(adapter, PTP_INT_EN_SET,
+ PTP_INT_BIT_TX_SWTS_ERR_ | PTP_INT_BIT_TX_TS_);
+ ptp->flags |= PTP_FLAG_ISR_ENABLED;
+
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK))
+ return 0;
+
+ snprintf(ptp->pin_config[0].name, 32, "lan743x_ptp_pin_0");
+ ptp->pin_config[0].index = 0;
+ ptp->pin_config[0].func = PTP_PF_PEROUT;
+ ptp->pin_config[0].chan = 0;
+
+ ptp->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(ptp->ptp_clock_info.name, 16, "%pm",
+ adapter->netdev->dev_addr);
+ ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB;
+ ptp->ptp_clock_info.n_alarm = 0;
+ ptp->ptp_clock_info.n_ext_ts = 0;
+ ptp->ptp_clock_info.n_per_out = 1;
+ ptp->ptp_clock_info.n_pins = 0;
+ ptp->ptp_clock_info.pps = 0;
+ ptp->ptp_clock_info.pin_config = NULL;
+ ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
+ ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq;
+ ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
+ ptp->ptp_clock_info.gettime64 = lan743x_ptpci_gettime64;
+ ptp->ptp_clock_info.getcrosststamp = NULL;
+ ptp->ptp_clock_info.settime64 = lan743x_ptpci_settime64;
+ ptp->ptp_clock_info.enable = lan743x_ptpci_enable;
+ ptp->ptp_clock_info.do_aux_work = lan743x_ptpci_do_aux_work;
+ ptp->ptp_clock_info.verify = NULL;
+
+ ptp->ptp_clock = ptp_clock_register(&ptp->ptp_clock_info,
+ &adapter->pdev->dev);
+
+ if (IS_ERR(ptp->ptp_clock)) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "ptp_clock_register failed\n");
+ goto done;
+ }
+ ptp->flags |= PTP_FLAG_PTP_CLOCK_REGISTERED;
+ netif_info(adapter, ifup, adapter->netdev,
+ "successfully registered ptp clock\n");
+
+ return 0;
+done:
+ lan743x_ptp_close(adapter);
+ return ret;
+}
+
+void lan743x_ptp_close(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int index;
+
+ if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
+ ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED) {
+ ptp_clock_unregister(ptp->ptp_clock);
+ ptp->ptp_clock = NULL;
+ ptp->flags &= ~PTP_FLAG_PTP_CLOCK_REGISTERED;
+ netif_info(adapter, drv, adapter->netdev,
+ "ptp clock unregister\n");
+ }
+
+ if (ptp->flags & PTP_FLAG_ISR_ENABLED) {
+ lan743x_csr_write(adapter, PTP_INT_EN_CLR,
+ PTP_INT_BIT_TX_SWTS_ERR_ |
+ PTP_INT_BIT_TX_TS_);
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_1588_);
+ ptp->flags &= ~PTP_FLAG_ISR_ENABLED;
+ }
+
+ /* clean up pending timestamp requests */
+ lan743x_ptp_tx_ts_complete(adapter);
+ spin_lock_bh(&ptp->tx_ts_lock);
+ for (index = 0;
+ index < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS;
+ index++) {
+ struct sk_buff *skb = ptp->tx_ts_skb_queue[index];
+
+ if (skb)
+ dev_kfree_skb(skb);
+ ptp->tx_ts_skb_queue[index] = NULL;
+ ptp->tx_ts_seconds_queue[index] = 0;
+ ptp->tx_ts_nseconds_queue[index] = 0;
+ }
+ ptp->tx_ts_skb_queue_size = 0;
+ ptp->tx_ts_queue_size = 0;
+ ptp->pending_tx_timestamps = 0;
+ spin_unlock_bh(&ptp->tx_ts_lock);
+
+ lan743x_ptp_disable(adapter);
+}
+
+void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter,
+ bool ts_insert_enable)
+{
+ u32 ptp_tx_mod = lan743x_csr_read(adapter, PTP_TX_MOD);
+
+ if (ts_insert_enable)
+ ptp_tx_mod |= PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_;
+ else
+ ptp_tx_mod &= ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_;
+
+ lan743x_csr_write(adapter, PTP_TX_MOD, ptp_tx_mod);
+}
+
+static bool lan743x_ptp_is_enabled(struct lan743x_adapter *adapter)
+{
+ if (lan743x_csr_read(adapter, PTP_CMD_CTL) & PTP_CMD_CTL_PTP_ENABLE_)
+ return true;
+ return false;
+}
+
+static void lan743x_ptp_enable(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ if (lan743x_ptp_is_enabled(adapter)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "PTP already enabled\n");
+ goto done;
+ }
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_);
+done:
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_disable(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+ if (!lan743x_ptp_is_enabled(adapter)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "PTP already disabled\n");
+ goto done;
+ }
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_DISABLE_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_ENABLE_);
+done:
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_reset(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ if (lan743x_ptp_is_enabled(adapter)) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Attempting reset while enabled\n");
+ goto done;
+ }
+
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_RESET_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_RESET_);
+done:
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter,
+ u32 seconds, u32 nano_seconds,
+ u32 sub_nano_seconds)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ lan743x_csr_write(adapter, PTP_CLOCK_SEC, seconds);
+ lan743x_csr_write(adapter, PTP_CLOCK_NS, nano_seconds);
+ lan743x_csr_write(adapter, PTP_CLOCK_SUBNS, sub_nano_seconds);
+
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+ mutex_unlock(&ptp->command_lock);
+}
+
+bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ bool result = false;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->pending_tx_timestamps < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
+ /* request granted */
+ ptp->pending_tx_timestamps++;
+ result = true;
+ }
+ spin_unlock_bh(&ptp->tx_ts_lock);
+ return result;
+}
+
+void lan743x_ptp_unrequest_tx_timestamp(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->pending_tx_timestamps > 0)
+ ptp->pending_tx_timestamps--;
+ else
+ netif_err(adapter, drv, adapter->netdev,
+ "unrequest failed, pending_tx_timestamps==0\n");
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
+ struct sk_buff *skb, bool ignore_sync)
+{
+ lan743x_ptp_tx_ts_enqueue_skb(adapter, skb, ignore_sync);
+
+ lan743x_ptp_tx_ts_complete(adapter);
+}
+
+int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int ret = 0;
+ int index;
+
+ if (!ifr) {
+ netif_err(adapter, drv, adapter->netdev,
+ "SIOCSHWTSTAMP, ifr == NULL\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ignoring hwtstamp_config.flags == 0x%08X, expected 0\n",
+ config.flags);
+ }
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ index++)
+ lan743x_tx_set_timestamping_mode(&adapter->tx[index],
+ false, false);
+ lan743x_ptp_set_sync_ts_insert(adapter, false);
+ break;
+ case HWTSTAMP_TX_ON:
+ for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ index++)
+ lan743x_tx_set_timestamping_mode(&adapter->tx[index],
+ true, false);
+ lan743x_ptp_set_sync_ts_insert(adapter, false);
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ index++)
+ lan743x_tx_set_timestamping_mode(&adapter->tx[index],
+ true, true);
+
+ lan743x_ptp_set_sync_ts_insert(adapter, true);
+ break;
+ default:
+ netif_warn(adapter, drv, adapter->netdev,
+ " tx_type = %d, UNKNOWN\n", config.tx_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret)
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+ return ret;
+}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
new file mode 100644
index 000000000000..5fc1b3cd5e33
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#ifndef _LAN743X_PTP_H
+#define _LAN743X_PTP_H
+
+#include "linux/ptp_clock_kernel.h"
+#include "linux/netdevice.h"
+
+struct lan743x_adapter;
+
+/* GPIO */
+struct lan743x_gpio {
+ /* gpio_lock: used to prevent concurrent access to gpio settings */
+ spinlock_t gpio_lock;
+
+ int used_bits;
+ int output_bits;
+ int ptp_bits;
+ u32 gpio_cfg0;
+ u32 gpio_cfg1;
+ u32 gpio_cfg2;
+ u32 gpio_cfg3;
+};
+
+int lan743x_gpio_init(struct lan743x_adapter *adapter);
+
+void lan743x_ptp_isr(void *context);
+bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter);
+void lan743x_ptp_unrequest_tx_timestamp(struct lan743x_adapter *adapter);
+void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
+ struct sk_buff *skb, bool ignore_sync);
+int lan743x_ptp_init(struct lan743x_adapter *adapter);
+int lan743x_ptp_open(struct lan743x_adapter *adapter);
+void lan743x_ptp_close(struct lan743x_adapter *adapter);
+void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
+ u32 link_speed);
+
+int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+
+#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4)
+
+#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1)
+#define PTP_FLAG_ISR_ENABLED BIT(2)
+
+struct lan743x_ptp {
+ int flags;
+
+ /* command_lock: used to prevent concurrent ptp commands */
+ struct mutex command_lock;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct ptp_pin_desc pin_config[1];
+
+#define LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS (2)
+ unsigned long used_event_ch;
+
+ int perout_event_ch;
+ int perout_gpio_bit;
+
+ /* tx_ts_lock: used to prevent concurrent access to timestamp arrays */
+ spinlock_t tx_ts_lock;
+ int pending_tx_timestamps;
+ struct sk_buff *tx_ts_skb_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ unsigned int tx_ts_ignore_sync_queue;
+ int tx_ts_skb_queue_size;
+ u32 tx_ts_seconds_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ u32 tx_ts_nseconds_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ u32 tx_ts_header_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ int tx_ts_queue_size;
+};
+
+#endif /* _LAN743X_PTP_H */
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index fb2c8f8071e6..1a4f2bb48ead 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -148,12 +148,191 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
return 0;
}
+static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
+{
+ /* Select the VID to configure */
+ ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid),
+ ANA_TABLES_VLANTIDX);
+ /* Set the vlan port members mask and issue a write command */
+ ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) |
+ ANA_TABLES_VLANACCESS_CMD_WRITE,
+ ANA_TABLES_VLANACCESS);
+
+ return ocelot_vlant_wait_for_completion(ocelot);
+}
+
+static void ocelot_vlan_mode(struct ocelot_port *port,
+ netdev_features_t features)
+{
+ struct ocelot *ocelot = port->ocelot;
+ u8 p = port->chip_port;
+ u32 val;
+
+ /* Filtering */
+ val = ocelot_read(ocelot, ANA_VLANMASK);
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ val |= BIT(p);
+ else
+ val &= ~BIT(p);
+ ocelot_write(ocelot, val, ANA_VLANMASK);
+}
+
+static void ocelot_vlan_port_apply(struct ocelot *ocelot,
+ struct ocelot_port *port)
+{
+ u32 val;
+
+ /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Default vlan to clasify for untagged frames (may be zero) */
+ val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid);
+ if (port->vlan_aware)
+ val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
+
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_VLAN_CFG_VLAN_VID_M |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
+ ANA_PORT_VLAN_CFG, port->chip_port);
+
+ /* Drop frames with multicast source address */
+ val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA;
+ if (port->vlan_aware && !port->vid)
+ /* If port is vlan-aware and tagged, drop untagged and priority
+ * tagged frames.
+ */
+ val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+ ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port);
+
+ /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */
+ val = REW_TAG_CFG_TAG_TPID_CFG(0);
+
+ if (port->vlan_aware) {
+ if (port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val |= REW_TAG_CFG_TAG_CFG(1);
+ else
+ /* Tag all frames */
+ val |= REW_TAG_CFG_TAG_CFG(3);
+ }
+ ocelot_rmw_gix(ocelot, val,
+ REW_TAG_CFG_TAG_TPID_CFG_M |
+ REW_TAG_CFG_TAG_CFG_M,
+ REW_TAG_CFG, port->chip_port);
+
+ /* Set default VLAN and tag type to 8021Q. */
+ val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) |
+ REW_PORT_VLAN_CFG_PORT_VID(port->vid);
+ ocelot_rmw_gix(ocelot, val,
+ REW_PORT_VLAN_CFG_PORT_TPID_M |
+ REW_PORT_VLAN_CFG_PORT_VID_M,
+ REW_PORT_VLAN_CFG, port->chip_port);
+}
+
+static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
+ bool untagged)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ int ret;
+
+ /* Add the port MAC address to with the right VLAN information */
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
+ ENTRYTYPE_LOCKED);
+
+ /* Make the port a member of the VLAN */
+ ocelot->vlan_mask[vid] |= BIT(port->chip_port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
+ /* Untagged egress vlan clasification */
+ if (untagged)
+ port->vid = vid;
+
+ ocelot_vlan_port_apply(ocelot, port);
+
+ return 0;
+}
+
+static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ int ret;
+
+ /* 8021q removes VID 0 on module unload for all interfaces
+ * with VLAN filtering feature. We need to keep it to receive
+ * untagged traffic.
+ */
+ if (vid == 0)
+ return 0;
+
+ /* Del the port MAC address to with the right VLAN information */
+ ocelot_mact_forget(ocelot, dev->dev_addr, vid);
+
+ /* Stop the port from being a member of the vlan */
+ ocelot->vlan_mask[vid] &= ~BIT(port->chip_port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Ingress */
+ if (port->pvid == vid)
+ port->pvid = 0;
+
+ /* Egress */
+ if (port->vid == vid)
+ port->vid = 0;
+
+ ocelot_vlan_port_apply(ocelot, port);
+
+ return 0;
+}
+
static void ocelot_vlan_init(struct ocelot *ocelot)
{
+ u16 port, vid;
+
/* Clear VLAN table, by default all ports are members of all VLANs */
ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
ANA_TABLES_VLANACCESS);
ocelot_vlant_wait_for_completion(ocelot);
+
+ /* Configure the port VLAN memberships */
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ ocelot->vlan_mask[vid] = 0;
+ ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ }
+
+ /* Because VLAN filtering is enabled, we need VID 0 to get untagged
+ * traffic. It is added automatically if 8021q module is loaded, but
+ * we can't rely on it since module may be not loaded.
+ */
+ ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0);
+ ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]);
+
+ /* Configure the CPU port to be VLAN aware */
+ ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+ ANA_PORT_VLAN_CFG, ocelot->num_phys_ports);
+
+ /* Set vlan ingress filter mask to all ports but the CPU port by
+ * default.
+ */
+ ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
+ ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port);
+ }
}
/* Watermark encode
@@ -344,10 +523,9 @@ static int ocelot_port_stop(struct net_device *dev)
static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
{
ifh[0] = IFH_INJ_BYPASS;
- ifh[1] = (0xff00 & info->port) >> 8;
+ ifh[1] = (0xf00 & info->port) >> 8;
ifh[2] = (0xff & info->port) << 24;
- ifh[3] = IFH_INJ_POP_CNT_DISABLE | (info->cpuq << 20) |
- (info->tag_type << 16) | info->vid;
+ ifh[3] = (info->tag_type << 16) | info->vid;
return 0;
}
@@ -370,11 +548,13 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
info.port = BIT(port->chip_port);
- info.cpuq = 0xff;
+ info.tag_type = IFH_TAG_TYPE_C;
+ info.vid = skb_vlan_tag_get(skb);
ocelot_gen_ifh(ifh, &info);
for (i = 0; i < IFH_LEN; i++)
- ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
+ ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
+ QS_INJ_WR, grp);
count = (skb->len + 3) / 4;
last = skb->len % 4;
@@ -538,6 +718,20 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct ocelot_port *port = netdev_priv(dev);
struct ocelot *ocelot = port->ocelot;
+ if (!vid) {
+ if (!port->vlan_aware)
+ /* If the bridge is not VLAN aware and no VID was
+ * provided, set it to pvid to ensure the MAC entry
+ * matches incoming untagged packets
+ */
+ vid = port->pvid;
+ else
+ /* If the bridge is VLAN aware a VID must be provided as
+ * otherwise the learnt entry wouldn't match any frame.
+ */
+ return -EINVAL;
+ }
+
return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
ENTRYTYPE_NORMAL);
}
@@ -689,6 +883,30 @@ end:
return ret;
}
+static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ return ocelot_vlan_vid_add(dev, vid, false, true);
+}
+
+static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ return ocelot_vlan_vid_del(dev, vid);
+}
+
+static int ocelot_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ netdev_features_t changed = dev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
+ ocelot_vlan_mode(port, features);
+
+ return 0;
+}
+
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
@@ -700,6 +918,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_fdb_add = ocelot_fdb_add,
.ndo_fdb_del = ocelot_fdb_del,
.ndo_fdb_dump = ocelot_fdb_dump,
+ .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
+ .ndo_set_features = ocelot_set_features,
};
static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -779,6 +1000,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_get_strings,
.get_ethtool_stats = ocelot_get_ethtool_stats,
.get_sset_count = ocelot_get_sset_count,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int ocelot_port_attr_get(struct net_device *dev,
@@ -913,6 +1136,10 @@ static int ocelot_port_attr_set(struct net_device *dev,
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ ocelot_port->vlan_aware = attr->u.vlan_filtering;
+ ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port);
+ break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled);
break;
@@ -924,6 +1151,40 @@ static int ocelot_port_attr_set(struct net_device *dev,
return err;
}
+static int ocelot_port_obj_add_vlan(struct net_device *dev,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ int ret;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ret = ocelot_vlan_vid_add(dev, vid,
+ vlan->flags & BRIDGE_VLAN_INFO_PVID,
+ vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ocelot_port_vlan_del_vlan(struct net_device *dev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ int ret;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ret = ocelot_vlan_vid_del(dev, vid);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
const unsigned char *addr,
u16 vid)
@@ -950,7 +1211,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
bool new = false;
if (!vid)
- vid = 1;
+ vid = port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
@@ -991,7 +1252,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
u16 vid = mdb->vid;
if (!vid)
- vid = 1;
+ vid = port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
@@ -1023,6 +1284,11 @@ static int ocelot_port_obj_add(struct net_device *dev,
int ret = 0;
switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ ret = ocelot_port_obj_add_vlan(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
+ break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
trans);
@@ -1040,6 +1306,10 @@ static int ocelot_port_obj_del(struct net_device *dev,
int ret = 0;
switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ ret = ocelot_port_vlan_del_vlan(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
@@ -1085,6 +1355,142 @@ static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port,
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
+
+ /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */
+ ocelot_port->vlan_aware = 0;
+ ocelot_port->pvid = 0;
+ ocelot_port->vid = 0;
+}
+
+static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
+{
+ int i, port, lag;
+
+ /* Reset destination and aggregation PGIDS */
+ for (port = 0; port < ocelot->num_phys_ports; port++)
+ ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
+
+ for (i = PGID_AGGR; i < PGID_SRC; i++)
+ ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
+ ANA_PGID_PGID, i);
+
+ /* Now, set PGIDs for each LAG */
+ for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
+ unsigned long bond_mask;
+ int aggr_count = 0;
+ u8 aggr_idx[16];
+
+ bond_mask = ocelot->lags[lag];
+ if (!bond_mask)
+ continue;
+
+ for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
+ // Destination mask
+ ocelot_write_rix(ocelot, bond_mask,
+ ANA_PGID_PGID, port);
+ aggr_idx[aggr_count] = port;
+ aggr_count++;
+ }
+
+ for (i = PGID_AGGR; i < PGID_SRC; i++) {
+ u32 ac;
+
+ ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
+ ac &= ~bond_mask;
+ ac |= BIT(aggr_idx[i % aggr_count]);
+ ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
+ }
+ }
+}
+
+static void ocelot_setup_lag(struct ocelot *ocelot, int lag)
+{
+ unsigned long bond_mask = ocelot->lags[lag];
+ unsigned int p;
+
+ for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) {
+ u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+
+ port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+
+ /* Use lag port as logical port for port i */
+ ocelot_write_gix(ocelot, port_cfg |
+ ANA_PORT_PORT_CFG_PORTID_VAL(lag),
+ ANA_PORT_PORT_CFG, p);
+ }
+}
+
+static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
+ struct net_device *bond)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int p = ocelot_port->chip_port;
+ int lag, lp;
+ struct net_device *ndev;
+ u32 bond_mask = 0;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(bond, ndev) {
+ struct ocelot_port *port = netdev_priv(ndev);
+
+ bond_mask |= BIT(port->chip_port);
+ }
+ rcu_read_unlock();
+
+ lp = __ffs(bond_mask);
+
+ /* If the new port is the lowest one, use it as the logical port from
+ * now on
+ */
+ if (p == lp) {
+ lag = p;
+ ocelot->lags[p] = bond_mask;
+ bond_mask &= ~BIT(p);
+ if (bond_mask) {
+ lp = __ffs(bond_mask);
+ ocelot->lags[lp] = 0;
+ }
+ } else {
+ lag = lp;
+ ocelot->lags[lp] |= BIT(p);
+ }
+
+ ocelot_setup_lag(ocelot, lag);
+ ocelot_set_aggr_pgids(ocelot);
+
+ return 0;
+}
+
+static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port,
+ struct net_device *bond)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int p = ocelot_port->chip_port;
+ u32 port_cfg;
+ int i;
+
+ /* Remove port from any lag */
+ for (i = 0; i < ocelot->num_phys_ports; i++)
+ ocelot->lags[i] &= ~BIT(ocelot_port->chip_port);
+
+ /* if it was the logical port of the lag, move the lag config to the
+ * next port
+ */
+ if (ocelot->lags[p]) {
+ int n = __ffs(ocelot->lags[p]);
+
+ ocelot->lags[n] = ocelot->lags[p];
+ ocelot->lags[p] = 0;
+
+ ocelot_setup_lag(ocelot, n);
+ }
+
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+ port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+ ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p),
+ ANA_PORT_PORT_CFG, p);
+
+ ocelot_set_aggr_pgids(ocelot);
}
/* Checks if the net_device instance given to us originate from our driver. */
@@ -1112,6 +1518,17 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
else
ocelot_port_bridge_leave(ocelot_port,
info->upper_dev);
+
+ ocelot_vlan_port_apply(ocelot_port->ocelot,
+ ocelot_port);
+ }
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking)
+ err = ocelot_port_lag_join(ocelot_port,
+ info->upper_dev);
+ else
+ ocelot_port_lag_leave(ocelot_port,
+ info->upper_dev);
}
break;
default:
@@ -1128,6 +1545,20 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
+ if (event == NETDEV_PRECHANGEUPPER &&
+ netif_is_lag_master(info->upper_dev)) {
+ struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
+ struct netlink_ext_ack *extack;
+
+ if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ extack = netdev_notifier_info_to_extack(&info->info);
+ NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
+
+ ret = -EINVAL;
+ goto notify;
+ }
+ }
+
if (netif_is_lag_master(dev)) {
struct net_device *slave;
struct list_head *iter;
@@ -1175,6 +1606,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
dev->ethtool_ops = &ocelot_ethtool_ops;
dev->switchdev_ops = &ocelot_port_switchdev_ops;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
dev->dev_addr[ETH_ALEN - 1] += port;
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
@@ -1186,6 +1620,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
goto err_register_netdev;
}
+ /* Basic L2 initialization */
+ ocelot_vlan_port_apply(ocelot, ocelot_port);
+
return 0;
err_register_netdev:
@@ -1200,6 +1637,11 @@ int ocelot_init(struct ocelot *ocelot)
int i, cpu = ocelot->num_phys_ports;
char queue_name[32];
+ ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
+ sizeof(u32), GFP_KERNEL);
+ if (!ocelot->lags)
+ return -ENOMEM;
+
ocelot->stats = devm_kcalloc(ocelot->dev,
ocelot->num_phys_ports * ocelot->num_stats,
sizeof(u64), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 097bd12a10d4..616bec30dfa3 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -493,7 +493,7 @@ struct ocelot {
u8 num_cpu_ports;
struct ocelot_port **ports;
- u16 lags[16];
+ u32 *lags;
/* Keep track of the vlan port masks */
u32 vlan_mask[VLAN_N_VID];
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 18df7d934e81..26bb3b18f3be 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -29,7 +29,7 @@ static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info)
info->port = (ifh[2] & GENMASK(14, 11)) >> 11;
info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20;
- info->tag_type = (ifh[3] & GENMASK(16, 16)) >> 16;
+ info->tag_type = (ifh[3] & BIT(16)) >> 16;
info->vid = ifh[3] & GENMASK(11, 0);
return 0;
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 71899009c468..c26e0f70c494 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -2,8 +2,8 @@
# Exar device configuration
#
-config NET_VENDOR_EXAR
- bool "Exar devices"
+config NET_VENDOR_NETERION
+ bool "Neterion (Exar) devices"
default y
depends on PCI
---help---
@@ -11,16 +11,19 @@ config NET_VENDOR_EXAR
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about Exar cards. If you say Y, you will be asked for
- your specific card in the following questions.
+ the questions about Neterion/Exar cards. If you say Y, you will be
+ asked for your specific card in the following questions.
-if NET_VENDOR_EXAR
+if NET_VENDOR_NETERION
config S2IO
- tristate "Exar Xframe 10Gb Ethernet Adapter"
+ tristate "Neterion (Exar) Xframe 10Gb Ethernet Adapter"
depends on PCI
---help---
This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
+ These were originally released from S2IO, which renamed itself
+ Neterion. So, the adapters might be labeled as either one, depending
+ on its age.
More specific information on configuring the driver is in
<file:Documentation/networking/s2io.txt>.
@@ -29,11 +32,13 @@ config S2IO
will be called s2io.
config VXGE
- tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
+ tristate "Neterion (Exar) X3100 Series 10GbE PCIe Server Adapter"
depends on PCI
---help---
This driver supports Exar Corp's X3100 Series 10 GbE PCIe
- I/O Virtualized Server Adapter.
+ I/O Virtualized Server Adapter. These were originally released from
+ Neterion, which was later acquired by Exar. So, the adapters might be
+ labeled as either one, depending on its age.
More specific information on configuring the driver is in
<file:Documentation/networking/vxge.txt>.
@@ -50,4 +55,4 @@ config VXGE_DEBUG_TRACE_ALL
the vxge driver. By default only few debug trace statements are
enabled.
-endif # NET_VENDOR_EXAR
+endif # NET_VENDOR_NETERION
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 358ed6118881..398011c87643 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -14,7 +14,6 @@
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
-#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include "vxge-traffic.h"
@@ -1095,12 +1094,9 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
{
struct __vxge_hw_device *hldev;
struct list_head *p, *n;
- u16 ret;
- if (blockpool == NULL) {
- ret = 1;
- goto exit;
- }
+ if (!blockpool)
+ return;
hldev = blockpool->hldev;
@@ -1123,8 +1119,7 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
kfree((void *)p);
}
- ret = 0;
-exit:
+
return;
}
@@ -2260,14 +2255,11 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
struct __vxge_hw_blockpool *blockpool;
struct __vxge_hw_blockpool_entry *entry = NULL;
dma_addr_t dma_addr;
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 req_out;
blockpool = &devh->block_pool;
if (block_addr == NULL) {
blockpool->req_out--;
- status = VXGE_HW_FAIL;
goto exit;
}
@@ -2277,7 +2269,6 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
blockpool->req_out--;
- status = VXGE_HW_FAIL;
goto exit;
}
@@ -2292,7 +2283,7 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
else
list_del(&entry->item);
- if (entry != NULL) {
+ if (entry) {
entry->length = length;
entry->memblock = block_addr;
entry->dma_addr = dma_addr;
@@ -2300,13 +2291,10 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
entry->dma_handle = dma_h;
list_add(&entry->item, &blockpool->free_block_list);
blockpool->pool_size++;
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ }
blockpool->req_out--;
- req_out = blockpool->req_out;
exit:
return;
}
@@ -2358,7 +2346,6 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
struct __vxge_hw_blockpool_entry *entry = NULL;
struct __vxge_hw_blockpool *blockpool;
void *memblock = NULL;
- enum vxge_hw_status status = VXGE_HW_OK;
blockpool = &devh->block_pool;
@@ -2368,10 +2355,8 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
&dma_object->handle,
&dma_object->acc_handle);
- if (memblock == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ if (!memblock)
goto exit;
- }
dma_object->addr = pci_map_single(devh->pdev, memblock, size,
PCI_DMA_BIDIRECTIONAL);
@@ -2380,7 +2365,6 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
dma_object->addr))) {
vxge_os_dma_free(devh->pdev, memblock,
&dma_object->acc_handle);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
@@ -3784,17 +3768,20 @@ vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
itable[j]);
+ /* fall through */
case 2:
*data0 |=
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
itable[j]);
+ /* fall through */
case 3:
*data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
itable[j]);
+ /* fall through */
case 4:
*data1 |=
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index cb87fccb9f6a..2572a4b91c7c 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -43,8 +43,6 @@
#include "fw.h"
#include "main.h"
-#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
-
#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
@@ -441,7 +439,10 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
}
if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
- nfp_bpf_event_output(bpf, skb);
+ if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
+ dev_consume_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
return;
}
@@ -465,3 +466,21 @@ err_unlock:
err_free:
dev_kfree_skb_any(skb);
}
+
+void
+nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+ const struct cmsg_hdr *hdr = data;
+
+ if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
+ cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
+ return;
+ }
+
+ if (hdr->type == CMSG_TYPE_BPF_EVENT)
+ nfp_bpf_event_output(bpf, data, len);
+ else
+ cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
+ hdr->type);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index 4c7972e3db63..e4f9b7ec8528 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -51,6 +51,7 @@ enum bpf_cap_tlv_type {
NFP_BPF_CAP_TYPE_MAPS = 3,
NFP_BPF_CAP_TYPE_RANDOM = 4,
NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5,
+ NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6,
};
struct nfp_bpf_cap_tlv_func {
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 8a92088df0d7..eff57f7d056a 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -34,10 +34,11 @@
#define pr_fmt(fmt) "NFP net bpf: " fmt
#include <linux/bug.h>
-#include <linux/kernel.h>
#include <linux/bpf.h>
#include <linux/filter.h>
+#include <linux/kernel.h>
#include <linux/pkt_cls.h>
+#include <linux/reciprocal_div.h>
#include <linux/unistd.h>
#include "main.h"
@@ -416,6 +417,60 @@ emit_alu(struct nfp_prog *nfp_prog, swreg dst,
}
static void
+__emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg,
+ enum mul_type type, enum mul_step step, u16 breg, bool swap,
+ bool wr_both, bool dst_lmextn, bool src_lmextn)
+{
+ u64 insn;
+
+ insn = OP_MUL_BASE |
+ FIELD_PREP(OP_MUL_A_SRC, areg) |
+ FIELD_PREP(OP_MUL_B_SRC, breg) |
+ FIELD_PREP(OP_MUL_STEP, step) |
+ FIELD_PREP(OP_MUL_DST_AB, dst_ab) |
+ FIELD_PREP(OP_MUL_SW, swap) |
+ FIELD_PREP(OP_MUL_TYPE, type) |
+ FIELD_PREP(OP_MUL_WR_AB, wr_both) |
+ FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type,
+ enum mul_step step, swreg rreg)
+{
+ struct nfp_insn_ur_regs reg;
+ u16 areg;
+ int err;
+
+ if (type == MUL_TYPE_START && step != MUL_STEP_NONE) {
+ nfp_prog->error = -EINVAL;
+ return;
+ }
+
+ if (step == MUL_LAST || step == MUL_LAST_2) {
+ /* When type is step and step Number is LAST or LAST2, left
+ * source is used as destination.
+ */
+ err = swreg_to_unrestricted(lreg, reg_none(), rreg, &reg);
+ areg = reg.dst;
+ } else {
+ err = swreg_to_unrestricted(reg_none(), lreg, rreg, &reg);
+ areg = reg.areg;
+ }
+
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap,
+ reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
+}
+
+static void
__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
bool zero, bool swap, bool wr_both,
@@ -670,7 +725,7 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
xfer_num = round_up(len, 4) / 4;
if (src_40bit_addr)
- addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
+ addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base,
&off);
/* Setup PREV_ALU fields to override memory read length. */
@@ -1380,6 +1435,133 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
SHF_SC_R_ROT, 16);
}
+static void
+wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
+ swreg rreg, bool gen_high_half)
+{
+ emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg);
+ emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none());
+ if (gen_high_half)
+ emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2,
+ reg_none());
+ else
+ wrp_immed(nfp_prog, dst_hi, 0);
+}
+
+static void
+wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
+ swreg rreg)
+{
+ emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg);
+ emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none());
+}
+
+static int
+wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ bool gen_high_half, bool ropnd_from_reg)
+{
+ swreg multiplier, multiplicand, dst_hi, dst_lo;
+ const struct bpf_insn *insn = &meta->insn;
+ u32 lopnd_max, ropnd_max;
+ u8 dst_reg;
+
+ dst_reg = insn->dst_reg;
+ multiplicand = reg_a(dst_reg * 2);
+ dst_hi = reg_both(dst_reg * 2 + 1);
+ dst_lo = reg_both(dst_reg * 2);
+ lopnd_max = meta->umax_dst;
+ if (ropnd_from_reg) {
+ multiplier = reg_b(insn->src_reg * 2);
+ ropnd_max = meta->umax_src;
+ } else {
+ u32 imm = insn->imm;
+
+ multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+ ropnd_max = imm;
+ }
+ if (lopnd_max > U16_MAX || ropnd_max > U16_MAX)
+ wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier,
+ gen_high_half);
+ else
+ wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier);
+
+ return 0;
+}
+
+static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm)
+{
+ swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst);
+ struct reciprocal_value_adv rvalue;
+ u8 pre_shift, exp;
+ swreg magic;
+
+ if (imm > U32_MAX) {
+ wrp_immed(nfp_prog, dst_both, 0);
+ return 0;
+ }
+
+ /* NOTE: because we are using "reciprocal_value_adv" which doesn't
+ * support "divisor > (1u << 31)", we need to JIT separate NFP sequence
+ * to handle such case which actually equals to the result of unsigned
+ * comparison "dst >= imm" which could be calculated using the following
+ * NFP sequence:
+ *
+ * alu[--, dst, -, imm]
+ * immed[imm, 0]
+ * alu[dst, imm, +carry, 0]
+ *
+ */
+ if (imm > 1U << 31) {
+ swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+
+ emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b);
+ wrp_immed(nfp_prog, imm_a(nfp_prog), 0);
+ emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C,
+ reg_imm(0));
+ return 0;
+ }
+
+ rvalue = reciprocal_value_adv(imm, 32);
+ exp = rvalue.exp;
+ if (rvalue.is_wide_m && !(imm & 1)) {
+ pre_shift = fls(imm & -imm) - 1;
+ rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift);
+ } else {
+ pre_shift = 0;
+ }
+ magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog));
+ if (imm == 1U << exp) {
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+ SHF_SC_R_SHF, exp);
+ } else if (rvalue.is_wide_m) {
+ wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a,
+ magic, true);
+ emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB,
+ imm_b(nfp_prog));
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+ SHF_SC_R_SHF, 1);
+ emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD,
+ imm_b(nfp_prog));
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+ SHF_SC_R_SHF, rvalue.sh - 1);
+ } else {
+ if (pre_shift)
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
+ dst_b, SHF_SC_R_SHF, pre_shift);
+ wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true);
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
+ dst_b, SHF_SC_R_SHF, rvalue.sh);
+ }
+
+ return 0;
+}
+
static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
@@ -1460,6 +1642,51 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u32 ret_einval, end;
+ swreg plen, delta;
+
+ BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN));
+
+ plen = imm_a(nfp_prog);
+ delta = reg_a(2 * 2);
+
+ ret_einval = nfp_prog_current_offset(nfp_prog) + 9;
+ end = nfp_prog_current_offset(nfp_prog) + 11;
+
+ /* Calculate resulting length */
+ emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta);
+ /* delta == 0 is not allowed by the kernel, add must overflow to make
+ * length smaller.
+ */
+ emit_br(nfp_prog, BR_BCC, ret_einval, 0);
+
+ /* if (new_len < 14) then -EINVAL */
+ emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN));
+ emit_br(nfp_prog, BR_BMI, ret_einval, 0);
+
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_ADD, delta);
+ emit_alu(nfp_prog, pv_len(nfp_prog),
+ pv_len(nfp_prog), ALU_OP_ADD, delta);
+
+ emit_br(nfp_prog, BR_UNC, end, 2);
+ wrp_immed(nfp_prog, reg_both(0), 0);
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
+ return -EINVAL;
+
+ wrp_immed(nfp_prog, reg_both(0), -22);
+ wrp_immed(nfp_prog, reg_both(1), ~0);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, end))
+ return -EINVAL;
+
+ return 0;
+}
+
static int
map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
@@ -1684,6 +1911,31 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, true, true);
+}
+
+static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, true, false);
+}
+
+static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm);
+}
+
+static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ /* NOTE: verifier hook has rejected cases for which verifier doesn't
+ * know whether the source operand is constant or not.
+ */
+ return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src);
+}
+
static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -1772,8 +2024,8 @@ static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst, src;
dst = insn->dst_reg * 2;
- umin = meta->umin;
- umax = meta->umax;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
if (umin == umax)
return __shl_imm64(nfp_prog, dst, umin);
@@ -1881,8 +2133,8 @@ static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst, src;
dst = insn->dst_reg * 2;
- umin = meta->umin;
- umax = meta->umax;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
if (umin == umax)
return __shr_imm64(nfp_prog, dst, umin);
@@ -1995,8 +2247,8 @@ static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst, src;
dst = insn->dst_reg * 2;
- umin = meta->umin;
- umax = meta->umax;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
if (umin == umax)
return __ashr_imm64(nfp_prog, dst, umin);
@@ -2097,6 +2349,26 @@ static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
}
+static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, false, true);
+}
+
+static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, false, false);
+}
+
+static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return div_reg64(nfp_prog, meta);
+}
+
+static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return div_imm64(nfp_prog, meta);
+}
+
static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
u8 dst = meta->insn.dst_reg * 2;
@@ -2814,6 +3086,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
switch (meta->insn.imm) {
case BPF_FUNC_xdp_adjust_head:
return adjust_head(nfp_prog, meta);
+ case BPF_FUNC_xdp_adjust_tail:
+ return adjust_tail(nfp_prog, meta);
case BPF_FUNC_map_lookup_elem:
case BPF_FUNC_map_update_elem:
case BPF_FUNC_map_delete_elem:
@@ -2848,6 +3122,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
[BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
[BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
+ [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64,
+ [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64,
+ [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64,
+ [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64,
[BPF_ALU64 | BPF_NEG] = neg_reg64,
[BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64,
[BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
@@ -2867,6 +3145,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU | BPF_ADD | BPF_K] = add_imm,
[BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
[BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
+ [BPF_ALU | BPF_MUL | BPF_X] = mul_reg,
+ [BPF_ALU | BPF_MUL | BPF_K] = mul_imm,
+ [BPF_ALU | BPF_DIV | BPF_X] = div_reg,
+ [BPF_ALU | BPF_DIV | BPF_K] = div_imm,
[BPF_ALU | BPF_NEG] = neg_reg,
[BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
[BPF_ALU | BPF_END | BPF_X] = end_reg32,
@@ -3299,7 +3581,8 @@ curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
return false;
- if (ld_meta->ptr.type != PTR_TO_PACKET)
+ if (ld_meta->ptr.type != PTR_TO_PACKET &&
+ ld_meta->ptr.type != PTR_TO_MAP_VALUE)
return false;
if (st_meta->ptr.type != PTR_TO_PACKET)
@@ -3647,6 +3930,7 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
struct nfp_insn_meta *meta1, *meta2;
struct nfp_bpf_map *nfp_map;
struct bpf_map *map;
+ u32 id;
nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
if (meta1->skip || meta2->skip)
@@ -3658,11 +3942,14 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
map = (void *)(unsigned long)((u32)meta1->insn.imm |
(u64)meta2->insn.imm << 32);
- if (bpf_map_offload_neutral(map))
- continue;
- nfp_map = map_to_offmap(map)->dev_priv;
+ if (bpf_map_offload_neutral(map)) {
+ id = map->id;
+ } else {
+ nfp_map = map_to_offmap(map)->dev_priv;
+ id = nfp_map->tid;
+ }
- meta1->insn.imm = nfp_map->tid;
+ meta1->insn.imm = id;
meta2->insn.imm = 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index fcdfb8e7fdea..970af07f4656 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -45,8 +45,8 @@
const struct rhashtable_params nfp_bpf_maps_neutral_params = {
.nelem_hint = 4,
- .key_len = FIELD_SIZEOF(struct nfp_bpf_neutral_map, ptr),
- .key_offset = offsetof(struct nfp_bpf_neutral_map, ptr),
+ .key_len = FIELD_SIZEOF(struct bpf_map, id),
+ .key_offset = offsetof(struct nfp_bpf_neutral_map, map_id),
.head_offset = offsetof(struct nfp_bpf_neutral_map, l),
.automatic_shrinking = true,
};
@@ -66,26 +66,19 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog, struct netlink_ext_ack *extack)
{
bool running, xdp_running;
- int ret;
if (!nfp_net_ebpf_capable(nn))
return -EINVAL;
running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
- xdp_running = running && nn->dp.bpf_offload_xdp;
+ xdp_running = running && nn->xdp_hw.prog;
if (!prog && !xdp_running)
return 0;
if (prog && running && !xdp_running)
return -EBUSY;
- ret = nfp_net_bpf_offload(nn, prog, running, extack);
- /* Stop offload if replace not possible */
- if (ret && prog)
- nfp_bpf_xdp_offload(app, nn, NULL, extack);
-
- nn->dp.bpf_offload_xdp = prog && !ret;
- return ret;
+ return nfp_net_bpf_offload(nn, prog, running, extack);
}
static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
@@ -206,7 +199,7 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
nfp_bpf_setup_tc_block_cb,
- nn, nn);
+ nn, nn, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
nfp_bpf_setup_tc_block_cb,
@@ -341,6 +334,14 @@ nfp_bpf_parse_cap_qsel(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
return 0;
}
+static int
+nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value,
+ u32 length)
+{
+ bpf->adjust_tail = true;
+ return 0;
+}
+
static int nfp_bpf_parse_capabilities(struct nfp_app *app)
{
struct nfp_cpp *cpp = app->pf->cpp;
@@ -387,6 +388,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
if (nfp_bpf_parse_cap_qsel(app->priv, value, length))
goto err_release_free;
break;
+ case NFP_BPF_CAP_TYPE_ADJUST_TAIL:
+ if (nfp_bpf_parse_cap_adjust_tail(app->priv, value,
+ length))
+ goto err_release_free;
+ break;
default:
nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
break;
@@ -408,6 +414,20 @@ err_release_free:
return -EINVAL;
}
+static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+
+ return bpf_offload_dev_netdev_register(bpf->bpf_dev, netdev);
+}
+
+static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+
+ bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev);
+}
+
static int nfp_bpf_init(struct nfp_app *app)
{
struct nfp_app_bpf *bpf;
@@ -431,6 +451,11 @@ static int nfp_bpf_init(struct nfp_app *app)
if (err)
goto err_free_neutral_maps;
+ bpf->bpf_dev = bpf_offload_dev_create();
+ err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
+ if (err)
+ goto err_free_neutral_maps;
+
return 0;
err_free_neutral_maps:
@@ -449,6 +474,7 @@ static void nfp_bpf_clean(struct nfp_app *app)
{
struct nfp_app_bpf *bpf = app->priv;
+ bpf_offload_dev_destroy(bpf->bpf_dev);
WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
@@ -470,10 +496,14 @@ const struct nfp_app_type app_bpf = {
.extra_cap = nfp_bpf_extra_cap,
+ .ndo_init = nfp_bpf_ndo_init,
+ .ndo_uninit = nfp_bpf_ndo_uninit,
+
.vnic_alloc = nfp_bpf_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free,
.ctrl_msg_rx = nfp_bpf_ctrl_msg_rx,
+ .ctrl_msg_rx_raw = nfp_bpf_ctrl_msg_rx_raw,
.setup_tc = nfp_bpf_setup_tc,
.bpf = nfp_ndo_bpf,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 654fe7823e5e..dbd00982fd2b 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -47,6 +47,8 @@
#include "../nfp_asm.h"
#include "fw.h"
+#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
+
/* For relocation logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
*/
@@ -110,6 +112,8 @@ enum pkt_vec {
* struct nfp_app_bpf - bpf app priv structure
* @app: backpointer to the app
*
+ * @bpf_dev: BPF offload device handle
+ *
* @tag_allocator: bitmap of control message tags in use
* @tag_alloc_next: next tag bit to allocate
* @tag_alloc_last: next tag bit to be freed
@@ -146,10 +150,13 @@ enum pkt_vec {
*
* @pseudo_random: FW initialized the pseudo-random machinery (CSRs)
* @queue_select: BPF can set the RX queue ID in packet vector
+ * @adjust_tail: BPF can simply trunc packet size for adjust tail
*/
struct nfp_app_bpf {
struct nfp_app *app;
+ struct bpf_offload_dev *bpf_dev;
+
DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
u16 tag_alloc_next;
u16 tag_alloc_last;
@@ -189,6 +196,7 @@ struct nfp_app_bpf {
bool pseudo_random;
bool queue_select;
+ bool adjust_tail;
};
enum nfp_bpf_map_use {
@@ -217,6 +225,7 @@ struct nfp_bpf_map {
struct nfp_bpf_neutral_map {
struct rhash_head l;
struct bpf_map *ptr;
+ u32 map_id;
u32 count;
};
@@ -263,8 +272,10 @@ struct nfp_bpf_reg_state {
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
- * @umin: copy of core verifier umin_value.
- * @umax: copy of core verifier umax_value.
+ * @umin_src: copy of core verifier umin_value for src opearnd.
+ * @umax_src: copy of core verifier umax_value for src operand.
+ * @umin_dst: copy of core verifier umin_value for dst opearnd.
+ * @umax_dst: copy of core verifier umax_value for dst operand.
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
@@ -300,12 +311,15 @@ struct nfp_insn_meta {
struct bpf_reg_state arg1;
struct nfp_bpf_reg_state arg2;
};
- /* We are interested in range info for some operands,
- * for example, the shift amount.
+ /* We are interested in range info for operands of ALU
+ * operations. For example, shift amount, multiplicand and
+ * multiplier etc.
*/
struct {
- u64 umin;
- u64 umax;
+ u64 umin_src;
+ u64 umax_src;
+ u64 umin_dst;
+ u64 umax_dst;
};
};
unsigned int off;
@@ -339,6 +353,11 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
return BPF_MODE(meta->insn.code);
}
+static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta)
+{
+ return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU;
+}
+
static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
{
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
@@ -384,23 +403,14 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
}
-static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta)
+static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
{
- u8 code = meta->insn.code;
- bool is_alu, is_shift;
- u8 opclass, opcode;
-
- opclass = BPF_CLASS(code);
- is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU;
- if (!is_alu)
- return false;
-
- opcode = BPF_OP(code);
- is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH;
- if (!is_shift)
- return false;
+ return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL;
+}
- return BPF_SRC(code) == BPF_X;
+static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
+{
+ return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
}
/**
@@ -496,7 +506,11 @@ int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key);
-int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb);
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
+ unsigned int len);
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
+void
+nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data,
+ unsigned int len);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 7eae4c0266f8..1ccd6371a15b 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -67,7 +67,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
ASSERT_RTNL();
/* Reuse path - other offloaded program is already tracking this map. */
- record = rhashtable_lookup_fast(&bpf->maps_neutral, &map,
+ record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
nfp_bpf_maps_neutral_params);
if (record) {
nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
@@ -89,6 +89,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
}
record->ptr = map;
+ record->map_id = map->id;
record->count = 1;
err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
@@ -190,8 +191,10 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
meta->insn = prog[i];
meta->n = i;
- if (is_mbpf_indir_shift(meta))
- meta->umin = U64_MAX;
+ if (is_mbpf_alu(meta)) {
+ meta->umin_src = U64_MAX;
+ meta->umin_dst = U64_MAX;
+ }
list_add_tail(&meta->l, &nfp_prog->insns);
}
@@ -377,11 +380,23 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
bpf->maps.max_elems - bpf->map_elems_in_use);
return -ENOMEM;
}
- if (offmap->map.key_size > bpf->maps.max_key_sz ||
- offmap->map.value_size > bpf->maps.max_val_sz ||
- round_up(offmap->map.key_size, 8) +
+
+ if (round_up(offmap->map.key_size, 8) +
round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
- pr_info("elements don't fit in device constraints\n");
+ pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
+ round_up(offmap->map.key_size, 8) +
+ round_up(offmap->map.value_size, 8),
+ bpf->maps.max_elem_sz);
+ return -ENOMEM;
+ }
+ if (offmap->map.key_size > bpf->maps.max_key_sz) {
+ pr_info("map key size %u, FW max is %u\n",
+ offmap->map.key_size, bpf->maps.max_key_sz);
+ return -ENOMEM;
+ }
+ if (offmap->map.value_size > bpf->maps.max_val_sz) {
+ pr_info("map value size %u, FW max is %u\n",
+ offmap->map.value_size, bpf->maps.max_val_sz);
return -ENOMEM;
}
@@ -451,43 +466,43 @@ nfp_bpf_perf_event_copy(void *dst, const void *src,
return 0;
}
-int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb)
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
+ unsigned int len)
{
- struct cmsg_bpf_event *cbe = (void *)skb->data;
- u32 pkt_size, data_size;
- struct bpf_map *map;
+ struct cmsg_bpf_event *cbe = (void *)data;
+ struct nfp_bpf_neutral_map *record;
+ u32 pkt_size, data_size, map_id;
+ u64 map_id_full;
- if (skb->len < sizeof(struct cmsg_bpf_event))
- goto err_drop;
+ if (len < sizeof(struct cmsg_bpf_event))
+ return -EINVAL;
pkt_size = be32_to_cpu(cbe->pkt_size);
data_size = be32_to_cpu(cbe->data_size);
- map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr);
+ map_id_full = be64_to_cpu(cbe->map_ptr);
+ map_id = map_id_full;
- if (skb->len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
- goto err_drop;
+ if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ return -EINVAL;
if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
- goto err_drop;
+ return -EINVAL;
rcu_read_lock();
- if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map,
- nfp_bpf_maps_neutral_params)) {
+ record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
+ nfp_bpf_maps_neutral_params);
+ if (!record || map_id_full > U32_MAX) {
rcu_read_unlock();
- pr_warn("perf event: dest map pointer %px not recognized, dropping event\n",
- map);
- goto err_drop;
+ cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
+ map_id_full, map_id_full);
+ return -EINVAL;
}
- bpf_event_output(map, be32_to_cpu(cbe->cpu_id),
+ bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
&cbe->data[round_up(pkt_size, 4)], data_size,
cbe->data, pkt_size, nfp_bpf_perf_event_copy);
rcu_read_unlock();
- dev_consume_skb_any(skb);
return 0;
-err_drop:
- dev_kfree_skb_any(skb);
- return -EINVAL;
}
static int
@@ -564,14 +579,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
{
int err;
- if (prog) {
- struct bpf_prog_offload *offload = prog->aux->offload;
-
- if (!offload)
- return -EINVAL;
- if (offload->netdev != nn->dp.netdev)
- return -EINVAL;
- }
+ if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev))
+ return -EINVAL;
if (prog && old_prog) {
u8 cap;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 4bfeba7b21b2..a6e9248669e1 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -178,6 +178,13 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
break;
+ case BPF_FUNC_xdp_adjust_tail:
+ if (!bpf->adjust_tail) {
+ pr_vlog(env, "adjust_tail not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
case BPF_FUNC_map_lookup_elem:
if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
bpf->helpers.map_lookup, reg1) ||
@@ -517,6 +524,82 @@ nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
}
static int
+nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *sreg =
+ cur_regs(env) + meta->insn.src_reg;
+ const struct bpf_reg_state *dreg =
+ cur_regs(env) + meta->insn.dst_reg;
+
+ meta->umin_src = min(meta->umin_src, sreg->umin_value);
+ meta->umax_src = max(meta->umax_src, sreg->umax_value);
+ meta->umin_dst = min(meta->umin_dst, dreg->umin_value);
+ meta->umax_dst = max(meta->umax_dst, dreg->umax_value);
+
+ /* NFP supports u16 and u32 multiplication.
+ *
+ * For ALU64, if either operand is beyond u32's value range, we reject
+ * it. One thing to note, if the source operand is BPF_K, then we need
+ * to check "imm" field directly, and we'd reject it if it is negative.
+ * Because for ALU64, "imm" (with s32 type) is expected to be sign
+ * extended to s64 which NFP mul doesn't support.
+ *
+ * For ALU32, it is fine for "imm" be negative though, because the
+ * result is 32-bits and there is no difference on the low halve of
+ * the result for signed/unsigned mul, so we will get correct result.
+ */
+ if (is_mbpf_mul(meta)) {
+ if (meta->umax_dst > U32_MAX) {
+ pr_vlog(env, "multiplier is not within u32 value range\n");
+ return -EINVAL;
+ }
+ if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) {
+ pr_vlog(env, "multiplicand is not within u32 value range\n");
+ return -EINVAL;
+ }
+ if (mbpf_class(meta) == BPF_ALU64 &&
+ mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
+ pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n");
+ return -EINVAL;
+ }
+ }
+
+ /* NFP doesn't have divide instructions, we support divide by constant
+ * through reciprocal multiplication. Given NFP support multiplication
+ * no bigger than u32, we'd require divisor and dividend no bigger than
+ * that as well.
+ *
+ * Also eBPF doesn't support signed divide and has enforced this on C
+ * language level by failing compilation. However LLVM assembler hasn't
+ * enforced this, so it is possible for negative constant to leak in as
+ * a BPF_K operand through assembly code, we reject such cases as well.
+ */
+ if (is_mbpf_div(meta)) {
+ if (meta->umax_dst > U32_MAX) {
+ pr_vlog(env, "dividend is not within u32 value range\n");
+ return -EINVAL;
+ }
+ if (mbpf_src(meta) == BPF_X) {
+ if (meta->umin_src != meta->umax_src) {
+ pr_vlog(env, "divisor is not constant\n");
+ return -EINVAL;
+ }
+ if (meta->umax_src > U32_MAX) {
+ pr_vlog(env, "divisor is not within u32 value range\n");
+ return -EINVAL;
+ }
+ }
+ if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
+ pr_vlog(env, "divide by negative constant is not supported\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
@@ -551,13 +634,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
if (is_mbpf_xadd(meta))
return nfp_bpf_check_xadd(nfp_prog, meta, env);
- if (is_mbpf_indir_shift(meta)) {
- const struct bpf_reg_state *sreg =
- cur_regs(env) + meta->insn.src_reg;
-
- meta->umin = min(meta->umin, sreg->umin_value);
- meta->umax = max(meta->umax, sreg->umax_value);
- }
+ if (is_mbpf_alu(meta))
+ return nfp_bpf_check_alu(nfp_prog, meta, env);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 4a6d2db75071..0ba0356ec4e6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -32,8 +32,10 @@
*/
#include <linux/bitfield.h>
+#include <net/geneve.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
+#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_pedit.h>
@@ -44,6 +46,16 @@
#include "main.h"
#include "../nfp_net_repr.h"
+/* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
+ * to change. Such changes will break our FW ABI.
+ */
+#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
+#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
+#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
+#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
+ NFP_FL_TUNNEL_KEY | \
+ NFP_FL_TUNNEL_GENEVE_OPT)
+
static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
{
size_t act_size = sizeof(struct nfp_fl_pop_vlan);
@@ -226,7 +238,71 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
}
static int
-nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
+nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
+ const struct tc_action *action)
+{
+ struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ int opt_len, opt_cnt, act_start, tot_push_len;
+ u8 *src = ip_tunnel_info_opts(ip_tun);
+
+ /* We need to populate the options in reverse order for HW.
+ * Therefore we go through the options, calculating the
+ * number of options and the total size, then we populate
+ * them in reverse order in the action list.
+ */
+ opt_cnt = 0;
+ tot_push_len = 0;
+ opt_len = ip_tun->options_len;
+ while (opt_len > 0) {
+ struct geneve_opt *opt = (struct geneve_opt *)src;
+
+ opt_cnt++;
+ if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
+ return -EOPNOTSUPP;
+
+ tot_push_len += sizeof(struct nfp_fl_push_geneve) +
+ opt->length * 4;
+ if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
+ return -EOPNOTSUPP;
+
+ opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
+ src += sizeof(struct geneve_opt) + opt->length * 4;
+ }
+
+ if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ act_start = *list_len;
+ *list_len += tot_push_len;
+ src = ip_tunnel_info_opts(ip_tun);
+ while (opt_cnt) {
+ struct geneve_opt *opt = (struct geneve_opt *)src;
+ struct nfp_fl_push_geneve *push;
+ size_t act_size, len;
+
+ opt_cnt--;
+ act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
+ tot_push_len -= act_size;
+ len = act_start + tot_push_len;
+
+ push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
+ push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
+ push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ push->reserved = 0;
+ push->class = opt->opt_class;
+ push->type = opt->type;
+ push->length = opt->length;
+ memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
+
+ src += sizeof(struct geneve_opt) + opt->length * 4;
+ }
+
+ return 0;
+}
+
+static int
+nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
+ struct nfp_fl_set_ipv4_udp_tun *set_tun,
const struct tc_action *action,
struct nfp_fl_pre_tunnel *pre_tun,
enum nfp_flower_tun_type tun_type,
@@ -234,16 +310,19 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
{
size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
- struct net *net;
- if (ip_tun->options_len)
+ BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
+ NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
+ NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
+ if (ip_tun->options_len &&
+ (tun_type != NFP_FL_TUNNEL_GENEVE ||
+ !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
return -EOPNOTSUPP;
- net = dev_net(netdev);
-
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
@@ -254,7 +333,42 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
set_tun->tun_id = ip_tun->key.tun_id;
- set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+
+ if (ip_tun->key.ttl) {
+ set_tun->ttl = ip_tun->key.ttl;
+ } else {
+ struct net *net = dev_net(netdev);
+ struct flowi4 flow = {};
+ struct rtable *rt;
+ int err;
+
+ /* Do a route lookup to determine ttl - if fails then use
+ * default. Note that CONFIG_INET is a requirement of
+ * CONFIG_NET_SWITCHDEV so must be defined here.
+ */
+ flow.daddr = ip_tun->key.u.ipv4.dst;
+ flow.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(net, &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (!err) {
+ set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
+ ip_rt_put(rt);
+ } else {
+ set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+ }
+ }
+
+ set_tun->tos = ip_tun->key.tos;
+
+ if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
+ ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
+ return -EOPNOTSUPP;
+ set_tun->tun_flags = ip_tun->key.tun_flags;
+
+ if (tun_type == NFP_FL_TUNNEL_GENEVE) {
+ set_tun->tun_proto = htons(ETH_P_TEB);
+ set_tun->tun_len = ip_tun->options_len / 4;
+ }
/* Complete pre_tunnel action. */
pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
@@ -398,8 +512,27 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
return 0;
}
+static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
+{
+ switch (ip_proto) {
+ case 0:
+ /* Filter doesn't force proto match,
+ * both TCP and UDP will be updated if encountered
+ */
+ return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
+ case IPPROTO_TCP:
+ return TCA_CSUM_UPDATE_FLAG_TCP;
+ case IPPROTO_UDP:
+ return TCA_CSUM_UPDATE_FLAG_UDP;
+ default:
+ /* All other protocols will be ignored by FW */
+ return 0;
+ }
+}
+
static int
-nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
+nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
+ char *nfp_action, int *a_len, u32 *csum_updated)
{
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
struct nfp_fl_set_ip4_addrs set_ip_addr;
@@ -409,6 +542,7 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
int idx, nkeys, err;
size_t act_size;
u32 offset, cmd;
+ u8 ip_proto = 0;
memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
memset(&set_ip6_src, 0, sizeof(set_ip6_src));
@@ -451,6 +585,15 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
return err;
}
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *basic;
+
+ basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->key);
+ ip_proto = basic->ip_proto;
+ }
+
if (set_eth.head.len_lw) {
act_size = sizeof(set_eth);
memcpy(nfp_action, &set_eth, act_size);
@@ -459,6 +602,10 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
act_size = sizeof(set_ip_addr);
memcpy(nfp_action, &set_ip_addr, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
+ *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
+ nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
@@ -471,18 +618,30 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_dst.head.len_lw) {
act_size = sizeof(set_ip6_dst);
memcpy(nfp_action, &set_ip6_dst, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_src.head.len_lw) {
act_size = sizeof(set_ip6_src);
memcpy(nfp_action, &set_ip6_src, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_tport.head.len_lw) {
act_size = sizeof(set_tport);
memcpy(nfp_action, &set_tport, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
}
return 0;
@@ -493,12 +652,18 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt)
+ int *out_cnt, u32 *csum_updated)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_output *output;
int err, prelag_size;
+ /* If csum_updated has not been reset by now, it means HW will
+ * incorrectly update csums when they are not requested.
+ */
+ if (*csum_updated)
+ return -EOPNOTSUPP;
+
if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
@@ -529,10 +694,11 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
static int
nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
+ struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt)
+ int *out_cnt, u32 *csum_updated)
{
struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
@@ -545,14 +711,14 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
} else if (is_tcf_mirred_egress_redirect(a)) {
err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
- out_cnt);
+ out_cnt, csum_updated);
if (err)
return err;
} else if (is_tcf_mirred_egress_mirror(a)) {
err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
- out_cnt);
+ out_cnt, csum_updated);
if (err)
return err;
@@ -592,9 +758,13 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
+ err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
+ if (err)
+ return err;
+
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type,
- netdev);
+ err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
+ *tun_type, netdev);
if (err)
return err;
*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
@@ -602,8 +772,17 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
/* Tunnel decap is handled by default so accept action. */
return 0;
} else if (is_tcf_pedit(a)) {
- if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
+ if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
+ a_len, csum_updated))
+ return -EOPNOTSUPP;
+ } else if (is_tcf_csum(a)) {
+ /* csum action requests recalc of something we have not fixed */
+ if (tcf_csum_update_flags(a) & ~*csum_updated)
return -EOPNOTSUPP;
+ /* If we will correctly fix the csum we can remove it from the
+ * csum update list. Which will later be used to check support.
+ */
+ *csum_updated &= ~tcf_csum_update_flags(a);
} else {
/* Currently we do not handle any other actions. */
return -EOPNOTSUPP;
@@ -620,6 +799,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
int act_len, act_cnt, err, tun_out_cnt, out_cnt;
enum nfp_flower_tun_type tun_type;
const struct tc_action *a;
+ u32 csum_updated = 0;
LIST_HEAD(actions);
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
@@ -632,8 +812,9 @@ int nfp_flower_compile_action(struct nfp_app *app,
tcf_exts_to_list(flow->exts, &actions);
list_for_each_entry(a, &actions, list) {
- err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev,
- &tun_type, &tun_out_cnt, &out_cnt);
+ err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
+ netdev, &tun_type, &tun_out_cnt,
+ &out_cnt, &csum_updated);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4a7f3510a296..325954b829c8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -37,6 +37,7 @@
#include <linux/bitfield.h>
#include <linux/skbuff.h>
#include <linux/types.h>
+#include <net/geneve.h>
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
@@ -51,6 +52,7 @@
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
+#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
@@ -81,6 +83,11 @@
#define NFP_FL_MAX_A_SIZ 1216
#define NFP_FL_LW_SIZ 2
+/* Maximum allowed geneve options */
+#define NFP_FL_MAX_GENEVE_OPT_ACT 32
+#define NFP_FL_MAX_GENEVE_OPT_CNT 64
+#define NFP_FL_MAX_GENEVE_OPT_KEY 32
+
/* Action opcodes */
#define NFP_FL_ACTION_OPCODE_OUTPUT 0
#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
@@ -94,6 +101,7 @@
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
+#define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26
#define NFP_FL_ACTION_OPCODE_NUM 32
#define NFP_FL_OUT_FLAGS_LAST BIT(15)
@@ -203,10 +211,22 @@ struct nfp_fl_set_ipv4_udp_tun {
__be16 reserved;
__be64 tun_id __packed;
__be32 tun_type_index;
- __be16 reserved2;
+ __be16 tun_flags;
u8 ttl;
- u8 reserved3;
- __be32 extra[2];
+ u8 tos;
+ __be32 extra;
+ u8 tun_len;
+ u8 res2;
+ __be16 tun_proto;
+};
+
+struct nfp_fl_push_geneve {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be16 class;
+ u8 type;
+ u8 length;
+ u8 opt_data[];
};
/* Metadata with L2 (1W/4B)
@@ -346,7 +366,7 @@ struct nfp_flower_ipv6 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv4_addr_dst |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | Reserved |
+ * | Reserved | tos | ttl |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -356,10 +376,17 @@ struct nfp_flower_ipv6 {
struct nfp_flower_ipv4_udp_tun {
__be32 ip_src;
__be32 ip_dst;
- __be32 reserved[2];
+ __be16 reserved1;
+ u8 tos;
+ u8 ttl;
+ __be32 reserved2;
__be32 tun_id;
};
+struct nfp_flower_geneve_options {
+ u8 data[NFP_FL_MAX_GENEVE_OPT_KEY];
+};
+
#define NFP_FL_TUN_VNI_OFFSET 8
/* The base header for a control message packet.
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 0c4c957717ea..bf10598f66ae 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -564,8 +564,9 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
if (lag_upper_info &&
lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
(lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
- (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
- lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) {
+ (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
+ lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 &&
+ lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) {
can_offload = false;
nfp_flower_cmsg_warn(priv->app,
"Unable to offload tx_type %u hash %u\n",
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 1decf3a1cad3..e57d23746585 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -80,7 +80,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
return NFP_REPR_TYPE_VF;
}
- return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC;
+ return __NFP_REPR_TYPE_MAX;
}
static struct net_device *
@@ -91,6 +91,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
u8 port = 0;
repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
+ if (repr_type > NFP_REPR_TYPE_MAX)
+ return NULL;
reprs = rcu_dereference(app->reprs[repr_type]);
if (!reprs)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index bbe5764d26cb..85f8209bf007 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -69,11 +69,12 @@ struct nfp_app;
/* Extra features bitmap. */
#define NFP_FL_FEATS_GENEVE BIT(0)
#define NFP_FL_NBI_MTU_SETTING BIT(1)
+#define NFP_FL_FEATS_GENEVE_OPT BIT(2)
#define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
- struct timespec64 *last_used;
+ ktime_t *last_used;
u8 init_unallocated;
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 91935405f586..a0c72f277faa 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
NFP_FLOWER_MASK_MPLS_Q;
frame->mpls_lse = cpu_to_be32(t_mpls);
+ } else if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC)) {
+ /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
+ * bit, which indicates an mpls ether type but without any
+ * mpls fields.
+ */
+ struct flow_dissector_key_basic *key_basic;
+
+ key_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->key);
+ if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
+ key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
+ frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
}
}
@@ -248,6 +262,21 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
}
+static int
+nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow,
+ bool mask_version)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_enc_opts *opts;
+
+ opts = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_OPTS,
+ target);
+ memcpy(key_buf, opts->data, opts->len);
+
+ return 0;
+}
+
static void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
struct tc_cls_flower_offload *flow,
@@ -256,6 +285,7 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_ipv4_addrs *tun_ips;
struct flow_dissector_key_keyid *vni;
+ struct flow_dissector_key_ip *ip;
memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
@@ -279,6 +309,14 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
frame->ip_src = tun_ips->src;
frame->ip_dst = tun_ips->dst;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP,
+ target);
+ frame->tos = ip->tos;
+ frame->ttl = ip->ttl;
+ }
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
@@ -401,6 +439,16 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
nfp_flow->nfp_tun_ipv4_addr = tun_dst;
nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
}
+
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
+ err = nfp_flower_compile_geneve_opt(ext, flow, false);
+ if (err)
+ return err;
+
+ err = nfp_flower_compile_geneve_opt(msk, flow, true);
+ if (err)
+ return err;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 93fb809f50d1..c098730544b7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -158,7 +158,6 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
{
struct nfp_flower_priv *priv = app->priv;
struct circ_buf *ring;
- struct timespec64 now;
ring = &priv->mask_ids.mask_id_free_list;
/* Checking if buffer is full. */
@@ -169,8 +168,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
(NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
- getnstimeofday64(&now);
- priv->mask_ids.last_used[mask_id] = now;
+ priv->mask_ids.last_used[mask_id] = ktime_get();
return 0;
}
@@ -178,7 +176,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
{
struct nfp_flower_priv *priv = app->priv;
- struct timespec64 delta, now;
+ ktime_t reuse_timeout;
struct circ_buf *ring;
u8 temp_id, freed_id;
@@ -198,10 +196,10 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
*mask_id = temp_id;
- getnstimeofday64(&now);
- delta = timespec64_sub(now, priv->mask_ids.last_used[*mask_id]);
+ reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
+ NFP_FL_MASK_REUSE_TIME_NS);
- if (timespec64_to_ns(&delta) < NFP_FL_MASK_REUSE_TIME_NS)
+ if (ktime_before(ktime_get(), reuse_timeout))
goto err_not_found;
memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index c42e64f32333..2edab01c3beb 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -66,6 +66,8 @@
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
BIT(FLOW_DISSECTOR_KEY_MPLS) | \
BIT(FLOW_DISSECTOR_KEY_IP))
@@ -74,7 +76,9 @@
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP))
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
@@ -139,6 +143,21 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
}
static int
+nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
+ u32 *key_layer_two, int *key_size)
+{
+ if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY)
+ return -EOPNOTSUPP;
+
+ if (enc_opts->len > 0) {
+ *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
+ *key_size += sizeof(struct nfp_flower_geneve_options);
+ }
+
+ return 0;
+}
+
+static int
nfp_flower_calculate_key_layers(struct nfp_app *app,
struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow,
@@ -151,6 +170,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
u32 key_layer_two;
u8 key_layer;
int key_size;
+ int err;
if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
return -EOPNOTSUPP;
@@ -176,6 +196,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
struct flow_dissector_key_ports *mask_enc_ports = NULL;
+ struct flow_dissector_key_enc_opts *enc_op = NULL;
struct flow_dissector_key_ports *enc_ports = NULL;
struct flow_dissector_key_control *mask_enc_ctl =
skb_flow_dissector_target(flow->dissector,
@@ -212,11 +233,21 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (mask_enc_ports->dst != cpu_to_be16(~0))
return -EOPNOTSUPP;
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ enc_op = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_OPTS,
+ flow->key);
+ }
+
switch (enc_ports->dst) {
case htons(NFP_FL_VXLAN_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (enc_op)
+ return -EOPNOTSUPP;
break;
case htons(NFP_FL_GENEVE_PORT):
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
@@ -226,6 +257,15 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_size += sizeof(struct nfp_flower_ext_meta);
key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (!enc_op)
+ break;
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
+ return -EOPNOTSUPP;
+ err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two,
+ &key_size);
+ if (err)
+ return err;
break;
default:
return -EOPNOTSUPP;
@@ -264,6 +304,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
case cpu_to_be16(ETH_P_ARP):
return -EOPNOTSUPP;
+ case cpu_to_be16(ETH_P_MPLS_UC):
+ case cpu_to_be16(ETH_P_MPLS_MC):
+ if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
+ key_layer |= NFP_FLOWER_LAYER_MAC;
+ key_size += sizeof(struct nfp_flower_mac_mpls);
+ }
+ break;
+
/* Will be included in layer 2. */
case cpu_to_be16(ETH_P_8021Q):
break;
@@ -576,9 +624,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
return nfp_flower_del_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_STATS:
return nfp_flower_get_stats(app, netdev, flower, egress);
+ default:
+ return -EOPNOTSUPP;
}
-
- return -EOPNOTSUPP;
}
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
@@ -627,7 +675,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
nfp_flower_setup_tc_block_cb,
- repr, repr);
+ repr, repr, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
nfp_flower_setup_tc_block_cb,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 78afe75129ab..382bb93cb090 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -317,7 +317,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload.dst_ipv4 = flow->daddr;
/* If entry has expired send dst IP with all other fields 0. */
- if (!(neigh->nud_state & NUD_VALID)) {
+ if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
/* Trigger ARP to verify invalid neighbour state. */
neigh_event_send(neigh, NULL);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index f28b244f4ee7..8607d09ab732 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -86,6 +86,23 @@ const char *nfp_app_mip_name(struct nfp_app *app)
return nfp_mip_name(app->pf->mip);
}
+int nfp_app_ndo_init(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+ if (!app || !app->type->ndo_init)
+ return 0;
+ return app->type->ndo_init(app, netdev);
+}
+
+void nfp_app_ndo_uninit(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+ if (app && app->type->ndo_uninit)
+ app->type->ndo_uninit(app, netdev);
+}
+
u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data)
{
if (!port || !port->app || !port->app->type->port_get_stats)
@@ -155,6 +172,8 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
if (WARN_ON(!apps[id]->name || !apps[id]->vnic_alloc))
return ERR_PTR(-EINVAL);
+ if (WARN_ON(!apps[id]->ctrl_msg_rx && apps[id]->ctrl_msg_rx_raw))
+ return ERR_PTR(-EINVAL);
app = kzalloc(sizeof(*app), GFP_KERNEL);
if (!app)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index ee74caacb015..4e1eb3395648 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -78,6 +78,8 @@ extern const struct nfp_app_type app_abm;
* @init: perform basic app checks and init
* @clean: clean app state
* @extra_cap: extra capabilities string
+ * @ndo_init: vNIC and repr netdev .ndo_init
+ * @ndo_uninit: vNIC and repr netdev .ndo_unint
* @vnic_alloc: allocate vNICs (assign port types, etc.)
* @vnic_free: free up app's vNIC state
* @vnic_init: vNIC netdev was registered
@@ -96,6 +98,7 @@ extern const struct nfp_app_type app_abm;
* @start: start application logic
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
+ * @ctrl_msg_rx_raw: handler for control messages from data queues
* @setup_tc: setup TC ndo
* @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
@@ -117,6 +120,9 @@ struct nfp_app_type {
const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn);
+ int (*ndo_init)(struct nfp_app *app, struct net_device *netdev);
+ void (*ndo_uninit)(struct nfp_app *app, struct net_device *netdev);
+
int (*vnic_alloc)(struct nfp_app *app, struct nfp_net *nn,
unsigned int id);
void (*vnic_free)(struct nfp_app *app, struct nfp_net *nn);
@@ -145,6 +151,8 @@ struct nfp_app_type {
void (*stop)(struct nfp_app *app);
void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
+ void (*ctrl_msg_rx_raw)(struct nfp_app *app, const void *data,
+ unsigned int len);
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
@@ -200,6 +208,9 @@ static inline void nfp_app_clean(struct nfp_app *app)
app->type->clean(app);
}
+int nfp_app_ndo_init(struct net_device *netdev);
+void nfp_app_ndo_uninit(struct net_device *netdev);
+
static inline int nfp_app_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
unsigned int id)
{
@@ -310,6 +321,11 @@ static inline bool nfp_app_ctrl_has_meta(struct nfp_app *app)
return app->type->ctrl_has_meta;
}
+static inline bool nfp_app_ctrl_uses_data_vnics(struct nfp_app *app)
+{
+ return app && app->type->ctrl_msg_rx_raw;
+}
+
static inline const char *nfp_app_extra_cap(struct nfp_app *app,
struct nfp_net *nn)
{
@@ -373,6 +389,16 @@ static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
app->type->ctrl_msg_rx(app, skb);
}
+static inline void
+nfp_app_ctrl_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
+{
+ if (!app || !app->type->ctrl_msg_rx_raw)
+ return;
+
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), true, 0, data, len);
+ app->type->ctrl_msg_rx_raw(app, data, len);
+}
+
static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode)
{
if (!app->type->eswitch_mode_get)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index f6677bc9875a..fad0e62a910c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -93,6 +93,7 @@ enum br_mask {
BR_BNE = 0x01,
BR_BMI = 0x02,
BR_BHS = 0x04,
+ BR_BCC = 0x05,
BR_BLO = 0x05,
BR_BGE = 0x08,
BR_BLT = 0x09,
@@ -426,4 +427,32 @@ static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset)
return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR;
}
+enum mul_type {
+ MUL_TYPE_START = 0x00,
+ MUL_TYPE_STEP_24x8 = 0x01,
+ MUL_TYPE_STEP_16x16 = 0x02,
+ MUL_TYPE_STEP_32x32 = 0x03,
+};
+
+enum mul_step {
+ MUL_STEP_1 = 0x00,
+ MUL_STEP_NONE = MUL_STEP_1,
+ MUL_STEP_2 = 0x01,
+ MUL_STEP_3 = 0x02,
+ MUL_STEP_4 = 0x03,
+ MUL_LAST = 0x04,
+ MUL_LAST_2 = 0x05,
+};
+
+#define OP_MUL_BASE 0x0f800000000ULL
+#define OP_MUL_A_SRC 0x000000003ffULL
+#define OP_MUL_B_SRC 0x000000ffc00ULL
+#define OP_MUL_STEP 0x00000700000ULL
+#define OP_MUL_DST_AB 0x00000800000ULL
+#define OP_MUL_SW 0x00040000000ULL
+#define OP_MUL_TYPE 0x00180000000ULL
+#define OP_MUL_WR_AB 0x20000000000ULL
+#define OP_MUL_SRC_LMEXTN 0x40000000000ULL
+#define OP_MUL_DST_LMEXTN 0x80000000000ULL
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 46b76d5a726c..4a540c5e27fe 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -236,17 +236,20 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
int err;
pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err);
- if (!err)
- return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
+ if (err) {
+ /* For backwards compatibility if symbol not found allow all */
+ pf->limit_vfs = ~0;
+ if (err == -ENOENT)
+ return 0;
- pf->limit_vfs = ~0;
- pci_sriov_set_totalvfs(pf->pdev, 0); /* 0 is unset */
- /* Allow any setting for backwards compatibility if symbol not found */
- if (err == -ENOENT)
- return 0;
+ nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
+ return err;
+ }
- nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
- return err;
+ err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
+ if (err)
+ nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err);
+ return 0;
}
static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
@@ -668,7 +671,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
err = nfp_net_pci_probe(pf);
if (err)
- goto err_sriov_unlimit;
+ goto err_fw_unload;
err = nfp_hwmon_register(pf);
if (err) {
@@ -680,8 +683,6 @@ static int nfp_pci_probe(struct pci_dev *pdev,
err_net_remove:
nfp_net_pci_remove(pf);
-err_sriov_unlimit:
- pci_sriov_set_totalvfs(pf->pdev, 0);
err_fw_unload:
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
@@ -715,7 +716,6 @@ static void nfp_pci_remove(struct pci_dev *pdev)
nfp_hwmon_unregister(pf);
nfp_pcie_sriov_disable(pdev);
- pci_sriov_set_totalvfs(pf->pdev, 0);
nfp_net_pci_remove(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 2a71a9ffd095..439e6ffe2f05 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -250,7 +250,7 @@ struct nfp_net_tx_ring {
struct nfp_net_tx_desc *txds;
dma_addr_t dma;
- unsigned int size;
+ size_t size;
bool is_xdp;
} ____cacheline_aligned;
@@ -350,9 +350,9 @@ struct nfp_net_rx_buf {
* @qcp_fl: Pointer to base of the QCP freelist queue
* @rxbufs: Array of transmitted FL/RX buffers
* @rxds: Virtual address of FL/RX ring in host memory
+ * @xdp_rxq: RX-ring info avail for XDP
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
- * @xdp_rxq: RX-ring info avail for XDP
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -364,14 +364,15 @@ struct nfp_net_rx_ring {
u32 idx;
int fl_qcidx;
- unsigned int size;
u8 __iomem *qcp_fl;
struct nfp_net_rx_buf *rxbufs;
struct nfp_net_rx_desc *rxds;
- dma_addr_t dma;
struct xdp_rxq_info xdp_rxq;
+
+ dma_addr_t dma;
+ size_t size;
} ____cacheline_aligned;
/**
@@ -485,7 +486,6 @@ struct nfp_stat_pair {
* @dev: Backpointer to struct device
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
- * @bpf_offload_xdp: Offloaded BPF program is XDP
* @chained_metadata_format: Firemware will use new metadata format
* @rx_dma_dir: Mapping direction for RX buffers
* @rx_dma_off: Offset at which DMA packets (for XDP headroom)
@@ -510,7 +510,6 @@ struct nfp_net_dp {
struct net_device *netdev;
u8 is_vf:1;
- u8 bpf_offload_xdp:1;
u8 chained_metadata_format:1;
u8 rx_dma_dir;
@@ -553,8 +552,8 @@ struct nfp_net_dp {
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
- * @xdp_flags: Flags with which XDP prog was loaded
- * @xdp_prog: XDP prog (for ctrl path, both DRV and HW modes)
+ * @xdp: Information about the driver XDP program
+ * @xdp_hw: Information about the HW XDP program
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
@@ -610,8 +609,8 @@ struct nfp_net {
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
- u32 xdp_flags;
- struct bpf_prog *xdp_prog;
+ struct xdp_attachment_info xdp;
+ struct xdp_attachment_info xdp_hw;
unsigned int max_tx_rings;
unsigned int max_rx_rings;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index d4c27f849f9b..a8b9fbab5f73 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -53,6 +53,8 @@
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/mm.h>
+#include <linux/overflow.h>
#include <linux/page_ref.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
@@ -945,11 +947,10 @@ err_free:
/**
* nfp_net_tx_complete() - Handled completed TX packets
- * @tx_ring: TX ring structure
- *
- * Return: Number of completed TX descriptors
+ * @tx_ring: TX ring structure
+ * @budget: NAPI budget (only used as bool to determine if in NAPI context)
*/
-static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
+static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
@@ -999,7 +1000,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
/* check for last gather fragment */
if (fidx == nr_frags - 1)
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, budget);
tx_ring->txbufs[idx].dma_addr = 0;
tx_ring->txbufs[idx].skb = NULL;
@@ -1077,7 +1078,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
* @dp: NFP Net data path struct
* @tx_ring: TX ring structure
*
- * Assumes that the device is stopped
+ * Assumes that the device is stopped, must be idempotent.
*/
static void
nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
@@ -1119,7 +1120,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
tx_ring->rd_p++;
}
- memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
+ memset(tx_ring->txds, 0, tx_ring->size);
tx_ring->wr_p = 0;
tx_ring->rd_p = 0;
tx_ring->qcp_rd_p = 0;
@@ -1279,13 +1280,18 @@ static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
* nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
* @rx_ring: RX ring structure
*
- * Warning: Do *not* call if ring buffers were never put on the FW freelist
- * (i.e. device was not enabled)!
+ * Assumes that the device is stopped, must be idempotent.
*/
static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
{
unsigned int wr_idx, last_idx;
+ /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
+ * kept at cnt - 1 FL bufs.
+ */
+ if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
+ return;
+
/* Move the empty entry to the end of the list */
wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
last_idx = rx_ring->cnt - 1;
@@ -1294,7 +1300,7 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
rx_ring->rxbufs[last_idx].dma_addr = 0;
rx_ring->rxbufs[last_idx].frag = NULL;
- memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
+ memset(rx_ring->rxds, 0, rx_ring->size);
rx_ring->wr_p = 0;
rx_ring->rd_p = 0;
}
@@ -1709,8 +1715,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
}
- if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
- dp->bpf_offload_xdp) && !meta.portid) {
+ if (xdp_prog && !meta.portid) {
void *orig_data = rxbuf->frag + pkt_off;
unsigned int dma_off;
int act;
@@ -1752,6 +1757,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
}
+ if (likely(!meta.portid)) {
+ netdev = dp->netdev;
+ } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
+ pkt_len);
+ nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ } else {
+ struct nfp_net *nn;
+
+ nn = netdev_priv(dp->netdev);
+ netdev = nfp_app_repr_get(nn->app, meta.portid);
+ if (unlikely(!netdev)) {
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
skb = build_skb(rxbuf->frag, true_bufsz);
if (unlikely(!skb)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
@@ -1767,20 +1795,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
- if (likely(!meta.portid)) {
- netdev = dp->netdev;
- } else {
- struct nfp_net *nn;
-
- nn = netdev_priv(dp->netdev);
- netdev = nfp_app_repr_get(nn->app, meta.portid);
- if (unlikely(!netdev)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
- continue;
- }
- nfp_repr_inc_rx_stats(netdev, pkt_len);
- }
-
skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len);
@@ -1828,7 +1842,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
unsigned int pkts_polled = 0;
if (r_vec->tx_ring)
- nfp_net_tx_complete(r_vec->tx_ring);
+ nfp_net_tx_complete(r_vec->tx_ring, budget);
if (r_vec->rx_ring)
pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
@@ -2062,7 +2076,7 @@ static void nfp_ctrl_poll(unsigned long arg)
struct nfp_net_r_vector *r_vec = (void *)arg;
spin_lock_bh(&r_vec->lock);
- nfp_net_tx_complete(r_vec->tx_ring);
+ nfp_net_tx_complete(r_vec->tx_ring, 0);
__nfp_ctrl_tx_queued(r_vec);
spin_unlock_bh(&r_vec->lock);
@@ -2121,7 +2135,7 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- kfree(tx_ring->txbufs);
+ kvfree(tx_ring->txbufs);
if (tx_ring->txds)
dma_free_coherent(dp->dev, tx_ring->size,
@@ -2145,18 +2159,17 @@ static int
nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- int sz;
tx_ring->cnt = dp->txd_cnt;
- tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
+ tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->txds)
goto err_alloc;
- sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
- tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
+ tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
+ GFP_KERNEL);
if (!tx_ring->txbufs)
goto err_alloc;
@@ -2270,7 +2283,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
if (dp->netdev)
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- kfree(rx_ring->rxbufs);
+ kvfree(rx_ring->rxbufs);
if (rx_ring->rxds)
dma_free_coherent(dp->dev, rx_ring->size,
@@ -2293,7 +2306,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
static int
nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{
- int sz, err;
+ int err;
if (dp->netdev) {
err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
@@ -2303,14 +2316,14 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
}
rx_ring->cnt = dp->rxd_cnt;
- rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
+ rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->rxds)
goto err_alloc;
- sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
- rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
+ rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs),
+ GFP_KERNEL);
if (!rx_ring->rxbufs)
goto err_alloc;
@@ -2508,6 +2521,8 @@ static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
/**
* nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
* @nn: NFP Net device to reconfigure
+ *
+ * Warning: must be fully idempotent.
*/
static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
{
@@ -3115,6 +3130,21 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void nfp_net_netpoll(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int i;
+
+ /* nfp_net's NAPIs are statically allocated so even if there is a race
+ * with reconfig path this will simply try to schedule some disabled
+ * NAPI instances.
+ */
+ for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
+ napi_schedule_irqoff(&nn->r_vecs[i].napi);
+}
+#endif
+
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3377,14 +3407,18 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev,
nfp_net_set_vxlan_port(nn, idx, 0);
}
-static int
-nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
- struct netlink_ext_ack *extack)
+static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
{
+ struct bpf_prog *prog = bpf->prog;
struct nfp_net_dp *dp;
+ int err;
+
+ if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
+ return -EBUSY;
if (!prog == !nn->dp.xdp_prog) {
WRITE_ONCE(nn->dp.xdp_prog, prog);
+ xdp_attachment_setup(&nn->xdp, bpf);
return 0;
}
@@ -3398,38 +3432,26 @@ nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
- return nfp_net_ring_reconfig(nn, dp, extack);
+ err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
+ if (err)
+ return err;
+
+ xdp_attachment_setup(&nn->xdp, bpf);
+ return 0;
}
-static int
-nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
- struct netlink_ext_ack *extack)
+static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
{
- struct bpf_prog *drv_prog, *offload_prog;
int err;
- if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES)
+ if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
return -EBUSY;
- /* Load both when no flags set to allow easy activation of driver path
- * when program is replaced by one which can't be offloaded.
- */
- drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog;
- offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog;
-
- err = nfp_net_xdp_setup_drv(nn, drv_prog, extack);
+ err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
if (err)
return err;
- err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack);
- if (err && flags & XDP_FLAGS_HW_MODE)
- return err;
-
- if (nn->xdp_prog)
- bpf_prog_put(nn->xdp_prog);
- nn->xdp_prog = prog;
- nn->xdp_flags = flags;
-
+ xdp_attachment_setup(&nn->xdp_hw, bpf);
return 0;
}
@@ -3439,16 +3461,13 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
+ return nfp_net_xdp_setup_drv(nn, xdp);
case XDP_SETUP_PROG_HW:
- return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags,
- xdp->extack);
+ return nfp_net_xdp_setup_hw(nn, xdp);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nn->xdp_prog;
- if (nn->dp.bpf_offload_xdp)
- xdp->prog_attached = XDP_ATTACHED_HW;
- xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
- xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
- return 0;
+ return xdp_attachment_query(&nn->xdp, xdp);
+ case XDP_QUERY_PROG_HW:
+ return xdp_attachment_query(&nn->xdp_hw, xdp);
default:
return nfp_app_bpf(nn->app, nn, xdp);
}
@@ -3476,12 +3495,17 @@ static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
}
const struct net_device_ops nfp_net_netdev_ops = {
+ .ndo_init = nfp_app_ndo_init,
+ .ndo_uninit = nfp_app_ndo_uninit,
.ndo_open = nfp_net_netdev_open,
.ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_net_tx,
.ndo_get_stats64 = nfp_net_stat64,
.ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = nfp_net_netpoll,
+#endif
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
@@ -3840,6 +3864,9 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.mtu = NFP_NET_DEFAULT_MTU;
nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
+ if (nfp_app_ctrl_uses_data_vnics(nn->app))
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA;
+
if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
nfp_net_rss_init(nn);
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index bb63c115537d..44d3ea75d043 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -127,6 +127,7 @@
#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */
#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
#define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */
+#define NFP_NET_CFG_CTRL_CMSG_DATA (0x1 << 12) /* RX cmsgs on data Qs */
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 26d1cc4e2906..6a79c8e4a7a4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -233,12 +233,10 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static void
nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
- struct nfp_app *app;
-
- app = nfp_app_from_netdev(netdev);
- if (!app)
- return;
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ strlcpy(drvinfo->bus_info, pci_name(app->pdev),
+ sizeof(drvinfo->bus_info));
nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
}
@@ -452,7 +450,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS;
+ return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
@@ -460,7 +458,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
struct nfp_net *nn = netdev_priv(netdev);
int i;
- for (i = 0; i < nn->dp.num_r_vecs; i++) {
+ for (i = 0; i < nn->max_r_vecs; i++) {
data = nfp_pr_et(data, "rvec_%u_rx_pkts", i);
data = nfp_pr_et(data, "rvec_%u_tx_pkts", i);
data = nfp_pr_et(data, "rvec_%u_tx_busy", i);
@@ -486,7 +484,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
u64 tmp[NN_RVEC_GATHER_STATS];
unsigned int i, j;
- for (i = 0; i < nn->dp.num_r_vecs; i++) {
+ for (i = 0; i < nn->max_r_vecs; i++) {
unsigned int start;
do {
@@ -521,15 +519,13 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
return data;
}
-static unsigned int
-nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings)
+static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs)
{
- return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2;
+ return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4;
}
static u8 *
-nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
- unsigned int tx_rings, bool repr)
+nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
{
int swap_off, i;
@@ -549,36 +545,29 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
data = nfp_pr_et(data, nfp_net_et_stats[i].name);
- for (i = 0; i < tx_rings; i++) {
- data = nfp_pr_et(data, "txq_%u_pkts", i);
- data = nfp_pr_et(data, "txq_%u_bytes", i);
- }
-
- for (i = 0; i < rx_rings; i++) {
+ for (i = 0; i < num_vecs; i++) {
data = nfp_pr_et(data, "rxq_%u_pkts", i);
data = nfp_pr_et(data, "rxq_%u_bytes", i);
+ data = nfp_pr_et(data, "txq_%u_pkts", i);
+ data = nfp_pr_et(data, "txq_%u_bytes", i);
}
return data;
}
static u64 *
-nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem,
- unsigned int rx_rings, unsigned int tx_rings)
+nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
{
unsigned int i;
for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
*data++ = readq(mem + nfp_net_et_stats[i].off);
- for (i = 0; i < tx_rings; i++) {
- *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
- *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
- }
-
- for (i = 0; i < rx_rings; i++) {
+ for (i = 0; i < num_vecs; i++) {
*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
}
return data;
@@ -633,8 +622,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
data = nfp_vnic_get_sw_stats_strings(netdev, data);
- data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings,
- nn->dp.num_tx_rings,
+ data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs,
false);
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(nn->port, data);
@@ -649,8 +637,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
struct nfp_net *nn = netdev_priv(netdev);
data = nfp_vnic_get_sw_stats(netdev, data);
- data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
- nn->dp.num_rx_rings, nn->dp.num_tx_rings);
+ data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs);
data = nfp_mac_get_stats(netdev, data);
data = nfp_app_port_get_stats(nn->port, data);
}
@@ -662,8 +649,7 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
return nfp_vnic_get_sw_stats_count(netdev) +
- nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings,
- nn->dp.num_tx_rings) +
+ nfp_vnic_get_hw_stats_count(nn->max_r_vecs) +
nfp_mac_get_stats_count(netdev) +
nfp_app_port_get_stats_count(nn->port);
default:
@@ -679,7 +665,7 @@ static void nfp_port_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
if (nfp_port_is_vnic(port))
- data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true);
+ data = nfp_vnic_get_hw_stats_strings(data, 0, true);
else
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(port, data);
@@ -694,7 +680,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
struct nfp_port *port = nfp_port_from_netdev(netdev);
if (nfp_port_is_vnic(port))
- data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0);
+ data = nfp_vnic_get_hw_stats(data, port->vnic, 0);
else
data = nfp_mac_get_stats(netdev, data);
data = nfp_app_port_get_stats(port, data);
@@ -708,7 +694,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
if (nfp_port_is_vnic(port))
- count = nfp_vnic_get_hw_stats_count(0, 0);
+ count = nfp_vnic_get_hw_stats_count(0);
else
count = nfp_mac_get_stats_count(netdev);
count += nfp_app_port_get_stats_count(port);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index d7b712f6362f..18a09cdcd9c6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -262,6 +262,8 @@ err_port_disable:
}
const struct net_device_ops nfp_repr_netdev_ops = {
+ .ndo_init = nfp_app_ndo_init,
+ .ndo_uninit = nfp_app_ndo_uninit,
.ndo_open = nfp_repr_open,
.ndo_stop = nfp_repr_stop,
.ndo_start_xmit = nfp_repr_xmit,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 749655c329b2..c8d0b1016a64 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -1248,7 +1248,7 @@ static void nfp6000_free(struct nfp_cpp *cpp)
kfree(nfp);
}
-static void nfp6000_read_serial(struct device *dev, u8 *serial)
+static int nfp6000_read_serial(struct device *dev, u8 *serial)
{
struct pci_dev *pdev = to_pci_dev(dev);
int pos;
@@ -1256,25 +1256,29 @@ static void nfp6000_read_serial(struct device *dev, u8 *serial)
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
if (!pos) {
- memset(serial, 0, NFP_SERIAL_LEN);
- return;
+ dev_err(dev, "can't find PCIe Serial Number Capability\n");
+ return -EINVAL;
}
pci_read_config_dword(pdev, pos + 4, &reg);
put_unaligned_be16(reg >> 16, serial + 4);
pci_read_config_dword(pdev, pos + 8, &reg);
put_unaligned_be32(reg, serial);
+
+ return 0;
}
-static u16 nfp6000_get_interface(struct device *dev)
+static int nfp6000_get_interface(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int pos;
u32 reg;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (!pos)
- return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff);
+ if (!pos) {
+ dev_err(dev, "can't find PCIe Serial Number Capability\n");
+ return -EINVAL;
+ }
pci_read_config_dword(pdev, pos + 4, &reg);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index b0da3d436850..c338d539fa96 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -364,8 +364,8 @@ struct nfp_cpp_operations {
int (*init)(struct nfp_cpp *cpp);
void (*free)(struct nfp_cpp *cpp);
- void (*read_serial)(struct device *dev, u8 *serial);
- u16 (*get_interface)(struct device *dev);
+ int (*read_serial)(struct device *dev, u8 *serial);
+ int (*get_interface)(struct device *dev);
int (*area_init)(struct nfp_cpp_area *area,
u32 dest, unsigned long long address,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index ef30597aa319..73de57a09800 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -1163,10 +1163,10 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
{
const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
struct nfp_cpp *cpp;
+ int ifc, err;
u32 mask[2];
u32 xpbaddr;
size_t tgt;
- int err;
cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
if (!cpp) {
@@ -1176,9 +1176,19 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
cpp->op = ops;
cpp->priv = priv;
- cpp->interface = ops->get_interface(parent);
- if (ops->read_serial)
- ops->read_serial(parent, cpp->serial);
+
+ ifc = ops->get_interface(parent);
+ if (ifc < 0) {
+ err = ifc;
+ goto err_free_cpp;
+ }
+ cpp->interface = ifc;
+ if (ops->read_serial) {
+ err = ops->read_serial(parent, cpp->serial);
+ if (err)
+ goto err_free_cpp;
+ }
+
rwlock_init(&cpp->resource_lock);
init_waitqueue_head(&cpp->waitq);
lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
@@ -1191,7 +1201,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
err = device_register(&cpp->dev);
if (err < 0) {
put_device(&cpp->dev);
- goto err_dev;
+ goto err_free_cpp;
}
dev_set_drvdata(&cpp->dev, cpp);
@@ -1238,7 +1248,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
err_out:
device_unregister(&cpp->dev);
-err_dev:
+err_free_cpp:
kfree(cpp);
err_malloc:
return ERR_PTR(err);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
index cd34097b79f1..40510860341b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
@@ -207,7 +207,7 @@ nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr)
* nfp_nffw_info_open() - Acquire the lock on the NFFW table
* @cpp: NFP CPP handle
*
- * Return: 0, or -ERRNO
+ * Return: pointer to nfp_nffw_info object or ERR_PTR()
*/
struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
{
@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
nfp_resource_address(state->res),
fwinf, sizeof(*fwinf));
- if (err < sizeof(*fwinf))
+ if (err < (int)sizeof(*fwinf))
goto err_release;
if (!nffw_res_flg_init_get(fwinf))
@@ -253,10 +253,8 @@ err_free:
}
/**
- * nfp_nffw_info_release() - Release the lock on the NFFW table
+ * nfp_nffw_info_close() - Release the lock on the NFFW table and free state
* @state: NFP FW info state
- *
- * Return: 0, or -ERRNO
*/
void nfp_nffw_info_close(struct nfp_nffw_info *state)
{
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 09f674ec0f9e..76efed058f33 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -155,7 +155,6 @@ struct nixge_priv {
int tx_irq;
int rx_irq;
- u32 last_link;
/* Buffer descriptors */
struct nixge_hw_dma_bd *tx_bd_v;
@@ -504,7 +503,6 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_skb->skb = skb;
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
- cur_p->app4 = (unsigned long)skb;
tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
/* Start the transfer */
@@ -740,22 +738,12 @@ static void nixge_dma_err_handler(unsigned long data)
cur_p->phys = 0;
cur_p->cntrl = 0;
cur_p->status = 0;
- cur_p->app0 = 0;
- cur_p->app1 = 0;
- cur_p->app2 = 0;
- cur_p->app3 = 0;
- cur_p->app4 = 0;
cur_p->sw_id_offset = 0;
}
for (i = 0; i < RX_BD_NUM; i++) {
cur_p = &lp->rx_bd_v[i];
cur_p->status = 0;
- cur_p->app0 = 0;
- cur_p->app1 = 0;
- cur_p->app2 = 0;
- cur_p->app3 = 0;
- cur_p->app4 = 0;
}
lp->tx_bd_ci = 0;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 7cbd0174459c..1d9b0d44ddb6 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5777,7 +5777,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
(np->rx_ring_size +
np->tx_ring_size),
&np->ring_addr,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!np->rx_ring.orig)
goto out_unmap;
np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
@@ -5786,7 +5786,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
sizeof(struct ring_desc_ex) *
(np->rx_ring_size +
np->tx_ring_size),
- &np->ring_addr, GFP_ATOMIC);
+ &np->ring_addr, GFP_KERNEL);
if (!np->rx_ring.ex)
goto out_unmap;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
index 31288d4ad248..862de0f3bc41 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_PCH_GBE) += pch_gbe.o
pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o
-pch_gbe-y += pch_gbe_api.o pch_gbe_main.o
+pch_gbe-y += pch_gbe_main.o
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 697e29dd4bd3..44c2f291e766 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -326,32 +326,6 @@ struct pch_gbe_regs {
#define PCH_GBE_FC_FULL 3
#define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL
-
-struct pch_gbe_hw;
-/**
- * struct pch_gbe_functions - HAL APi function pointer
- * @get_bus_info: for pch_gbe_hal_get_bus_info
- * @init_hw: for pch_gbe_hal_init_hw
- * @read_phy_reg: for pch_gbe_hal_read_phy_reg
- * @write_phy_reg: for pch_gbe_hal_write_phy_reg
- * @reset_phy: for pch_gbe_hal_phy_hw_reset
- * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset
- * @power_up_phy: for pch_gbe_hal_power_up_phy
- * @power_down_phy: for pch_gbe_hal_power_down_phy
- * @read_mac_addr: for pch_gbe_hal_read_mac_addr
- */
-struct pch_gbe_functions {
- void (*get_bus_info) (struct pch_gbe_hw *);
- s32 (*init_hw) (struct pch_gbe_hw *);
- s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *);
- s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16);
- void (*reset_phy) (struct pch_gbe_hw *);
- void (*sw_reset_phy) (struct pch_gbe_hw *);
- void (*power_up_phy) (struct pch_gbe_hw *hw);
- void (*power_down_phy) (struct pch_gbe_hw *hw);
- s32 (*read_mac_addr) (struct pch_gbe_hw *);
-};
-
/**
* struct pch_gbe_mac_info - MAC information
* @addr[6]: Store the MAC address
@@ -394,17 +368,6 @@ struct pch_gbe_phy_info {
/*!
* @ingroup Gigabit Ether driver Layer
- * @struct pch_gbe_bus_info
- * @brief Bus information
- */
-struct pch_gbe_bus_info {
- u8 type;
- u8 speed;
- u8 width;
-};
-
-/*!
- * @ingroup Gigabit Ether driver Layer
* @struct pch_gbe_hw
* @brief Hardware information
*/
@@ -414,10 +377,8 @@ struct pch_gbe_hw {
struct pch_gbe_regs __iomem *reg;
spinlock_t miim_lock;
- const struct pch_gbe_functions *func;
struct pch_gbe_mac_info mac;
struct pch_gbe_phy_info phy;
- struct pch_gbe_bus_info bus;
};
/**
@@ -680,7 +641,6 @@ void pch_gbe_set_ethtool_ops(struct net_device *netdev);
/* pch_gbe_mac.c */
s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
-s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
u16 data);
#endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
deleted file mode 100644
index 51250363566b..000000000000
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
- *
- * This code was derived from the Intel e1000e Linux driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "pch_gbe.h"
-#include "pch_gbe_phy.h"
-#include "pch_gbe_api.h"
-
-/* bus type values */
-#define pch_gbe_bus_type_unknown 0
-#define pch_gbe_bus_type_pci 1
-#define pch_gbe_bus_type_pcix 2
-#define pch_gbe_bus_type_pci_express 3
-#define pch_gbe_bus_type_reserved 4
-
-/* bus speed values */
-#define pch_gbe_bus_speed_unknown 0
-#define pch_gbe_bus_speed_33 1
-#define pch_gbe_bus_speed_66 2
-#define pch_gbe_bus_speed_100 3
-#define pch_gbe_bus_speed_120 4
-#define pch_gbe_bus_speed_133 5
-#define pch_gbe_bus_speed_2500 6
-#define pch_gbe_bus_speed_reserved 7
-
-/* bus width values */
-#define pch_gbe_bus_width_unknown 0
-#define pch_gbe_bus_width_pcie_x1 1
-#define pch_gbe_bus_width_pcie_x2 2
-#define pch_gbe_bus_width_pcie_x4 4
-#define pch_gbe_bus_width_32 5
-#define pch_gbe_bus_width_64 6
-#define pch_gbe_bus_width_reserved 7
-
-/**
- * pch_gbe_plat_get_bus_info - Obtain bus information for adapter
- * @hw: Pointer to the HW structure
- */
-static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
-{
- hw->bus.type = pch_gbe_bus_type_pci_express;
- hw->bus.speed = pch_gbe_bus_speed_2500;
- hw->bus.width = pch_gbe_bus_width_pcie_x1;
-}
-
-/**
- * pch_gbe_plat_init_hw - Initialize hardware
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * Negative value: Failed-EBUSY
- */
-static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw)
-{
- s32 ret_val;
-
- ret_val = pch_gbe_phy_get_id(hw);
- if (ret_val) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
- return ret_val;
- }
- pch_gbe_phy_init_setting(hw);
- /* Setup Mac interface option RGMII */
-#ifdef PCH_GBE_MAC_IFOP_RGMII
- pch_gbe_phy_set_rgmii(hw);
-#endif
- return ret_val;
-}
-
-static const struct pch_gbe_functions pch_gbe_ops = {
- .get_bus_info = pch_gbe_plat_get_bus_info,
- .init_hw = pch_gbe_plat_init_hw,
- .read_phy_reg = pch_gbe_phy_read_reg_miic,
- .write_phy_reg = pch_gbe_phy_write_reg_miic,
- .reset_phy = pch_gbe_phy_hw_reset,
- .sw_reset_phy = pch_gbe_phy_sw_reset,
- .power_up_phy = pch_gbe_phy_power_up,
- .power_down_phy = pch_gbe_phy_power_down,
- .read_mac_addr = pch_gbe_mac_read_mac_addr
-};
-
-/**
- * pch_gbe_plat_init_function_pointers - Init func ptrs
- * @hw: Pointer to the HW structure
- */
-static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
-{
- /* Set PHY parameter */
- hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
- /* Set function pointers */
- hw->func = &pch_gbe_ops;
-}
-
-/**
- * pch_gbe_hal_setup_init_funcs - Initializes function pointers
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw)
-{
- if (!hw->reg) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: Registers not mapped\n");
- return -ENOSYS;
- }
- pch_gbe_plat_init_function_pointers(hw);
- return 0;
-}
-
-/**
- * pch_gbe_hal_get_bus_info - Obtain bus information for adapter
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
-{
- if (!hw->func->get_bus_info) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return;
- }
- hw->func->get_bus_info(hw);
-}
-
-/**
- * pch_gbe_hal_init_hw - Initialize hardware
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
-{
- if (!hw->func->init_hw) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return -ENOSYS;
- }
- return hw->func->init_hw(hw);
-}
-
-/**
- * pch_gbe_hal_read_phy_reg - Reads PHY register
- * @hw: Pointer to the HW structure
- * @offset: The register to read
- * @data: The buffer to store the 16-bit read.
- * Returns:
- * 0: Successfully
- * Negative value: Failed
- */
-s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
- u16 *data)
-{
- if (!hw->func->read_phy_reg)
- return 0;
- return hw->func->read_phy_reg(hw, offset, data);
-}
-
-/**
- * pch_gbe_hal_write_phy_reg - Writes PHY register
- * @hw: Pointer to the HW structure
- * @offset: The register to read
- * @data: The value to write.
- * Returns:
- * 0: Successfully
- * Negative value: Failed
- */
-s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset,
- u16 data)
-{
- if (!hw->func->write_phy_reg)
- return 0;
- return hw->func->write_phy_reg(hw, offset, data);
-}
-
-/**
- * pch_gbe_hal_phy_hw_reset - Hard PHY reset
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw)
-{
- if (!hw->func->reset_phy) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return;
- }
- hw->func->reset_phy(hw);
-}
-
-/**
- * pch_gbe_hal_phy_sw_reset - Soft PHY reset
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
-{
- if (!hw->func->sw_reset_phy) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return;
- }
- hw->func->sw_reset_phy(hw);
-}
-
-/**
- * pch_gbe_hal_read_mac_addr - Reads MAC address
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw)
-{
- if (!hw->func->read_mac_addr) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return -ENOSYS;
- }
- return hw->func->read_mac_addr(hw);
-}
-
-/**
- * pch_gbe_hal_power_up_phy - Power up PHY
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw)
-{
- if (hw->func->power_up_phy)
- hw->func->power_up_phy(hw);
-}
-
-/**
- * pch_gbe_hal_power_down_phy - Power down PHY
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw)
-{
- if (hw->func->power_down_phy)
- hw->func->power_down_phy(hw);
-}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
deleted file mode 100644
index 91ce07c8306c..000000000000
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
- *
- * This code was derived from the Intel e1000e Linux driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef _PCH_GBE_API_H_
-#define _PCH_GBE_API_H_
-
-#include "pch_gbe_phy.h"
-
-s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw);
-void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data);
-s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data);
-void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw);
-void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw);
-void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw);
-void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw);
-
-#endif
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 731ce1e419e4..adaa0024adfe 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -17,7 +17,7 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
-#include "pch_gbe_api.h"
+#include "pch_gbe_phy.h"
/**
* pch_gbe_stats - Stats item information
@@ -125,7 +125,7 @@ static int pch_gbe_set_link_ksettings(struct net_device *netdev,
u32 advertising;
int ret;
- pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
+ pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET);
memcpy(&copy_ecmd, ecmd, sizeof(*ecmd));
@@ -204,7 +204,7 @@ static void pch_gbe_get_regs(struct net_device *netdev,
*regs_buff++ = ioread32(&hw->reg->INT_ST + i);
/* PHY register */
for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) {
- pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp);
+ pch_gbe_phy_read_reg_miic(&adapter->hw, i, &tmp);
*regs_buff++ = tmp;
}
}
@@ -349,25 +349,12 @@ static int pch_gbe_set_ringparam(struct net_device *netdev,
err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
if (err)
goto err_setup_tx;
- /* save the new, restore the old in order to free it,
- * then restore the new back again */
-#ifdef RINGFREE
- adapter->rx_ring = rx_old;
- adapter->tx_ring = tx_old;
- pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
- pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
- kfree(tx_old);
- kfree(rx_old);
- adapter->rx_ring = rxdr;
- adapter->tx_ring = txdr;
-#else
pch_gbe_free_rx_resources(adapter, rx_old);
pch_gbe_free_tx_resources(adapter, tx_old);
kfree(tx_old);
kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
-#endif
err = pch_gbe_up(adapter);
}
return err;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 34a1581eda95..43c0c10dfeb7 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -18,7 +18,7 @@
*/
#include "pch_gbe.h"
-#include "pch_gbe_api.h"
+#include "pch_gbe_phy.h"
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_classify.h>
@@ -34,7 +34,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_DMA_ALIGN 0
#define PCH_GBE_DMA_PADDING 2
#define PCH_GBE_WATCHDOG_PERIOD (5 * HZ) /* watchdog time */
-#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
@@ -113,8 +112,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define MINNOW_PHY_RESET_GPIO 13
-static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
-
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
@@ -290,7 +287,7 @@ static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
* Returns:
* 0: Successful.
*/
-s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
+static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
{
struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
u32 adr1a, adr1b;
@@ -369,9 +366,7 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
/* Read the MAC address. and store to the private data */
pch_gbe_mac_read_mac_addr(hw);
iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
-#ifdef PCH_GBE_MAC_IFOP_RGMII
iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
-#endif
pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
/* Setup the receive addresses */
pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
@@ -416,44 +411,6 @@ static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
}
-
-/**
- * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
- * @hw: Pointer to the HW structure
- * @mc_addr_list: Array of multicast addresses to program
- * @mc_addr_count: Number of multicast addresses to program
- * @mar_used_count: The first MAC Address register free to program
- * @mar_total_num: Total number of supported MAC Address Registers
- */
-static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count,
- u32 mar_used_count, u32 mar_total_num)
-{
- u32 i, adrmask;
-
- /* Load the first set of multicast addresses into the exact
- * filters (RAR). If there are not enough to fill the RAR
- * array, clear the filters.
- */
- for (i = mar_used_count; i < mar_total_num; i++) {
- if (mc_addr_count) {
- pch_gbe_mac_mar_set(hw, mc_addr_list, i);
- mc_addr_count--;
- mc_addr_list += ETH_ALEN;
- } else {
- /* Clear MAC address mask */
- adrmask = ioread32(&hw->reg->ADDR_MASK);
- iowrite32((adrmask | (0x0001 << i)),
- &hw->reg->ADDR_MASK);
- /* wait busy */
- pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
- /* Clear MAC address */
- iowrite32(0, &hw->reg->mac_adr[i].high);
- iowrite32(0, &hw->reg->mac_adr[i].low);
- }
- }
-}
-
/**
* pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
@@ -763,14 +720,23 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
void pch_gbe_reset(struct pch_gbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ s32 ret_val;
- pch_gbe_mac_reset_hw(&adapter->hw);
+ pch_gbe_mac_reset_hw(hw);
/* reprogram multicast address register after reset */
pch_gbe_set_multi(netdev);
/* Setup the receive address. */
- pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
- if (pch_gbe_hal_init_hw(&adapter->hw))
- netdev_err(netdev, "Hardware Error\n");
+ pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES);
+
+ ret_val = pch_gbe_phy_get_id(hw);
+ if (ret_val) {
+ netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
+ return;
+ }
+ pch_gbe_phy_init_setting(hw);
+ /* Setup Mac interface option RGMII */
+ pch_gbe_phy_set_rgmii(hw);
}
/**
@@ -1036,7 +1002,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
unsigned long rgmii = 0;
/* Set the RGMII control. */
-#ifdef PCH_GBE_MAC_IFOP_RGMII
switch (speed) {
case SPEED_10:
rgmii = (PCH_GBE_RGMII_RATE_2_5M |
@@ -1052,10 +1017,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
break;
}
iowrite32(rgmii, &hw->reg->RGMII_CTRL);
-#else /* GMII */
- rgmii = 0;
- iowrite32(rgmii, &hw->reg->RGMII_CTRL);
-#endif
}
static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
u16 duplex)
@@ -2029,12 +1990,8 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
- /* Initialize the hardware-specific values */
- if (pch_gbe_hal_setup_init_funcs(hw)) {
- netdev_err(netdev, "Hardware Initialization Failure\n");
- return -EIO;
- }
if (pch_gbe_alloc_queues(adapter)) {
netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
@@ -2075,7 +2032,7 @@ static int pch_gbe_open(struct net_device *netdev)
err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
if (err)
goto err_setup_rx;
- pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_phy_power_up(hw);
err = pch_gbe_up(adapter);
if (err)
goto err_up;
@@ -2084,7 +2041,7 @@ static int pch_gbe_open(struct net_device *netdev)
err_up:
if (!adapter->wake_up_evt)
- pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_phy_power_down(hw);
pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
err_setup_rx:
pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
@@ -2107,7 +2064,7 @@ static int pch_gbe_stop(struct net_device *netdev)
pch_gbe_down(adapter);
if (!adapter->wake_up_evt)
- pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_phy_power_down(hw);
pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
return 0;
@@ -2148,50 +2105,52 @@ static void pch_gbe_set_multi(struct net_device *netdev)
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
- u8 *mta_list;
- u32 rctl;
- int i;
- int mc_count;
+ u32 rctl, adrmask;
+ int mc_count, i;
netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
- /* Check for Promiscuous and All Multicast modes */
+ /* By default enable address & multicast filtering */
rctl = ioread32(&hw->reg->RX_MODE);
+ rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN;
+
+ /* Promiscuous mode disables all hardware address filtering */
+ if (netdev->flags & IFF_PROMISC)
+ rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
+
+ /* If we want to monitor more multicast addresses than the hardware can
+ * support then disable hardware multicast filtering.
+ */
mc_count = netdev_mc_count(netdev);
- if ((netdev->flags & IFF_PROMISC)) {
- rctl &= ~PCH_GBE_ADD_FIL_EN;
- rctl &= ~PCH_GBE_MLT_FIL_EN;
- } else if ((netdev->flags & IFF_ALLMULTI)) {
- /* all the multicasting receive permissions */
- rctl |= PCH_GBE_ADD_FIL_EN;
+ if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES)
rctl &= ~PCH_GBE_MLT_FIL_EN;
- } else {
- if (mc_count >= PCH_GBE_MAR_ENTRIES) {
- /* all the multicasting receive permissions */
- rctl |= PCH_GBE_ADD_FIL_EN;
- rctl &= ~PCH_GBE_MLT_FIL_EN;
- } else {
- rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
- }
- }
+
iowrite32(rctl, &hw->reg->RX_MODE);
- if (mc_count >= PCH_GBE_MAR_ENTRIES)
- return;
- mta_list = kmalloc_array(ETH_ALEN, mc_count, GFP_ATOMIC);
- if (!mta_list)
+ /* If we're not using multicast filtering then there's no point
+ * configuring the unused MAC address registers.
+ */
+ if (!(rctl & PCH_GBE_MLT_FIL_EN))
return;
- /* The shared function expects a packed array of only addresses. */
- i = 0;
- netdev_for_each_mc_addr(ha, netdev) {
- if (i == mc_count)
- break;
- memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
+ /* Load the first set of multicast addresses into MAC address registers
+ * for use by hardware filtering.
+ */
+ i = 1;
+ netdev_for_each_mc_addr(ha, netdev)
+ pch_gbe_mac_mar_set(hw, ha->addr, i++);
+
+ /* If there are spare MAC registers, mask & clear them */
+ for (; i < PCH_GBE_MAR_ENTRIES; i++) {
+ /* Clear MAC address mask */
+ adrmask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+ /* Clear MAC address */
+ iowrite32(0, &hw->reg->mac_adr[i].high);
+ iowrite32(0, &hw->reg->mac_adr[i].low);
}
- pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
- PCH_GBE_MAR_ENTRIES);
- kfree(mta_list);
netdev_dbg(netdev,
"RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
@@ -2437,7 +2396,7 @@ static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
}
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
- pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_phy_power_up(hw);
pch_gbe_reset(adapter);
/* Clear wake up status */
pch_gbe_mac_set_wol_event(hw, 0);
@@ -2482,7 +2441,7 @@ static int __pch_gbe_suspend(struct pci_dev *pdev)
pch_gbe_mac_set_wol_event(hw, wufc);
pci_disable_device(pdev);
} else {
- pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_phy_power_down(hw);
pch_gbe_mac_set_wol_event(hw, wufc);
pci_disable_device(pdev);
}
@@ -2511,7 +2470,7 @@ static int pch_gbe_resume(struct device *device)
return err;
}
pci_set_master(pdev);
- pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_phy_power_up(hw);
pch_gbe_reset(adapter);
/* Clear wake on lan control and status */
pch_gbe_mac_set_wol_event(hw, 0);
@@ -2541,7 +2500,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
unregister_netdev(netdev);
- pch_gbe_hal_phy_hw_reset(&adapter->hw);
+ pch_gbe_phy_hw_reset(&adapter->hw);
free_netdev(netdev);
}
@@ -2627,10 +2586,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "PHY initialize error\n");
goto err_free_adapter;
}
- pch_gbe_hal_get_bus_info(&adapter->hw);
/* Read the MAC address. and store to the private data */
- ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
+ ret = pch_gbe_mac_read_mac_addr(&adapter->hw);
if (ret) {
dev_err(&pdev->dev, "MAC address Read Error\n");
goto err_free_adapter;
@@ -2677,7 +2635,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
return 0;
err_free_adapter:
- pch_gbe_hal_phy_hw_reset(&adapter->hw);
+ pch_gbe_phy_hw_reset(&adapter->hw);
err_free_netdev:
free_netdev(netdev);
return ret;
@@ -2776,32 +2734,7 @@ static struct pci_driver pch_gbe_driver = {
.shutdown = pch_gbe_shutdown,
.err_handler = &pch_gbe_err_handler
};
-
-
-static int __init pch_gbe_init_module(void)
-{
- int ret;
-
- pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION);
- ret = pci_register_driver(&pch_gbe_driver);
- if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
- if (copybreak == 0) {
- pr_info("copybreak disabled\n");
- } else {
- pr_info("copybreak enabled for packets <= %u bytes\n",
- copybreak);
- }
- }
- return ret;
-}
-
-static void __exit pch_gbe_exit_module(void)
-{
- pci_unregister_driver(&pch_gbe_driver);
-}
-
-module_init(pch_gbe_init_module);
-module_exit(pch_gbe_exit_module);
+module_pci_driver(pch_gbe_driver);
MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
@@ -2809,8 +2742,4 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
-module_param(copybreak, uint, 0644);
-MODULE_PARM_DESC(copybreak,
- "Maximum size of packet that is copied to a new buffer on receive");
-
/* pch_gbe_main.c */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index a5cad5ea9436..6b35b573beef 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -184,7 +184,7 @@ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data)
* pch_gbe_phy_sw_reset - PHY software reset
* @hw: Pointer to the HW structure
*/
-void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
+static void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
{
u16 phy_ctrl;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 95ad0151ad02..23ac38711619 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -21,12 +21,10 @@
#define PCH_GBE_PHY_REGS_LEN 32
#define PCH_GBE_PHY_RESET_DELAY_US 10
-#define PCH_GBE_MAC_IFOP_RGMII
s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw);
s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data);
s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data);
-void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw);
void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw);
void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index b5ea2a56106e..1df28f2edd1f 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -2,7 +2,7 @@
# Packet engine device configuration
#
-config NET_PACKET_ENGINE
+config NET_VENDOR_PACKET_ENGINES
bool "Packet Engine devices"
default y
depends on PCI
@@ -14,7 +14,7 @@ config NET_PACKET_ENGINE
the questions about packet engine devices. If you say Y, you will
be asked for your specific card in the following questions.
-if NET_PACKET_ENGINE
+if NET_VENDOR_PACKET_ENGINES
config HAMACHI
tristate "Packet Engines Hamachi GNIC-II support"
@@ -40,4 +40,4 @@ config YELLOWFIN
To compile this driver as a module, choose M here: the module
will be called yellowfin. This is recommended.
-endif # NET_PACKET_ENGINE
+endif # NET_VENDOR_PACKET_ENGINES
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 3157f97dd782..3c1be87cdfa5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -167,9 +167,9 @@ skip:
case NETXEN_BRDTYPE_P3_REF_QG:
case NETXEN_BRDTYPE_P3_4_GB:
case NETXEN_BRDTYPE_P3_4_GB_MM:
-
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
+ /* fall through */
case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4_LP:
@@ -198,6 +198,7 @@ skip:
supported |= SUPPORTED_TP;
check_sfp_module = netif_running(dev) &&
adapter->has_link_events;
+ /* fall through */
case NETXEN_BRDTYPE_P2_SB31_10G:
case NETXEN_BRDTYPE_P3_10G_XFP:
supported |= SUPPORTED_FIBRE;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 1cd39c9a0345..52ad80621335 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -566,9 +566,8 @@ static int
netxen_send_cmd_descs(struct netxen_adapter *adapter,
struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
{
- u32 i, producer, consumer;
+ u32 i, producer;
struct netxen_cmd_buffer *pbuf;
- struct cmd_desc_type0 *cmd_desc;
struct nx_host_tx_ring *tx_ring;
i = 0;
@@ -580,7 +579,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
__netif_tx_lock_bh(tx_ring->txq);
producer = tx_ring->producer;
- consumer = tx_ring->sw_consumer;
if (nr_desc >= netxen_tx_avail(tx_ring)) {
netif_tx_stop_queue(tx_ring->txq);
@@ -595,8 +593,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
}
do {
- cmd_desc = &cmd_desc_arr[i];
-
pbuf = &tx_ring->cmd_buf_arr[producer];
pbuf->skb = NULL;
pbuf->frag_count = 0;
@@ -2350,7 +2346,7 @@ static int netxen_md_entry_err_chk(struct netxen_adapter *adapter,
static int netxen_parse_md_template(struct netxen_adapter *adapter)
{
int num_of_entries, buff_level, e_cnt, esize;
- int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0;
+ int rv = 0, sane_start = 0, sane_end = 0;
char *dbuff;
void *template_buff = adapter->mdump.md_template;
char *dump_buff = adapter->mdump.md_capture_buff;
@@ -2386,8 +2382,6 @@ static int netxen_parse_md_template(struct netxen_adapter *adapter)
break;
case RDEND:
entry->hdr.driver_flags |= NX_DUMP_SKIP;
- if (!sane_end)
- end_cnt = e_cnt;
sane_end += 1;
break;
case CNTRL:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 8259e8309320..69aa7fc392c5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2073,7 +2073,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct skb_frag_struct *frag;
u32 producer;
- int frag_count, no_of_desc;
+ int frag_count;
u32 num_txd = tx_ring->num_desc;
frag_count = skb_shinfo(skb)->nr_frags + 1;
@@ -2093,8 +2093,6 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
frag_count = 1 + skb_shinfo(skb)->nr_frags;
}
- /* 4 fragments per cmd des */
- no_of_desc = (frag_count + 3) >> 2;
if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
netif_stop_queue(netdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 00db3401b898..a60e1c8d470a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -336,6 +336,10 @@ struct qed_hw_info {
*/
u8 num_active_tc;
u8 offload_tc;
+ bool offload_tc_set;
+
+ bool multi_tc_roce_en;
+#define IS_QED_MULTI_TC_ROCE(p_hwfn) (((p_hwfn)->hw_info.multi_tc_roce_en))
u32 concrete_fid;
u16 opaque_fid;
@@ -399,8 +403,8 @@ struct qed_qm_info {
u16 start_pq;
u8 start_vport;
u16 pure_lb_pq;
- u16 offload_pq;
- u16 low_latency_pq;
+ u16 first_ofld_pq;
+ u16 first_llt_pq;
u16 pure_ack_pq;
u16 ooo_pq;
u16 first_vf_pq;
@@ -502,6 +506,7 @@ enum BAR_ID {
struct qed_nvm_image_info {
u32 num_images;
struct bist_nvm_image_att *image_att;
+ bool valid;
};
#define DRV_MODULE_VERSION \
@@ -880,11 +885,14 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
#define PQ_FLAGS_OFLD (BIT(5))
#define PQ_FLAGS_VFS (BIT(6))
#define PQ_FLAGS_LLT (BIT(7))
+#define PQ_FLAGS_MTC (BIT(8))
/* physical queue index for cm context intialization */
u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
+u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
+u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
@@ -920,4 +928,6 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
enum qed_mfw_tlv_type type,
union qed_mfw_tlv_data *tlv_data);
+
+void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index b5b5ff725426..f1977aa440e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1531,7 +1531,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
}
/* CM PF */
-void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+static void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
{
/* XCM pure-LB queue */
STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index f0b01385d5cb..6bb76e6d3c14 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -208,7 +208,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
/* QM reconf data */
if (p_info->personality == personality)
- p_info->offload_tc = tc;
+ qed_hw_info_set_offload_tc(p_info, tc);
}
/* Update app protocol data and hw_info fields with the TLV info */
@@ -221,7 +221,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
struct qed_hw_info *p_info = &p_hwfn->hw_info;
enum qed_pci_personality personality;
enum dcbx_protocol_type id;
- char *name;
int i;
for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
@@ -231,7 +230,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
continue;
personality = qed_dcbx_app_update[i].personality;
- name = qed_dcbx_app_update[i].name;
qed_dcbx_set_params(p_data, p_info, enable,
prio, tc, type, personality);
@@ -709,9 +707,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
- ARRAY_SIZE(p_local->local_chassis_id));
+ sizeof(p_local->local_chassis_id));
memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
- ARRAY_SIZE(p_local->local_port_id));
+ sizeof(p_local->local_port_id));
}
static void
@@ -723,9 +721,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
- ARRAY_SIZE(p_remote->peer_chassis_id));
+ sizeof(p_remote->peer_chassis_id));
memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
- ARRAY_SIZE(p_remote->peer_port_id));
+ sizeof(p_remote->peer_port_id));
}
static int
@@ -869,7 +867,7 @@ static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
return rc;
}
-void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type)
+static void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type)
{
struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
void *cookie = hwfn->cdev->ops_cookie;
@@ -991,6 +989,24 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
}
+u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri)
+{
+ struct qed_dcbx_get *dcbx_info = &p_hwfn->p_dcbx_info->get;
+
+ if (pri >= QED_MAX_PFC_PRIORITIES) {
+ DP_ERR(p_hwfn, "Invalid priority %d\n", pri);
+ return QED_DCBX_DEFAULT_TC;
+ }
+
+ if (!dcbx_info->operational.valid) {
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Dcbx parameters not available\n");
+ return QED_DCBX_DEFAULT_TC;
+ }
+
+ return dcbx_info->operational.params.ets_pri_tc_tbl[pri];
+}
+
#ifdef CONFIG_DCB
static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
struct qed_dcbx_get *p_get,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index 5feb90e049e0..a4d688c04e18 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -123,4 +123,7 @@ void qed_dcbx_info_free(struct qed_hwfn *p_hwfn);
void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
+#define QED_DCBX_DEFAULT_TC 0
+
+u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index a14e48489029..1aa9fc1c5890 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -6723,7 +6723,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
format_idx = header & MFW_TRACE_EVENTID_MASK;
/* Skip message if its index doesn't exist in the meta data */
- if (format_idx > s_mcp_trace_meta.formats_num) {
+ if (format_idx >= s_mcp_trace_meta.formats_num) {
u8 format_size =
(u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
MFW_TRACE_PRM_SIZE_SHIFT);
@@ -7838,8 +7838,8 @@ int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
}
-int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
- enum qed_nvm_images image_id, u32 *length)
+static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id, u32 *length)
{
struct qed_nvm_image_att image_att;
int rc;
@@ -7854,8 +7854,9 @@ int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
- u32 *num_dumped_bytes, enum qed_nvm_images image_id)
+static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes,
+ enum qed_nvm_images image_id)
{
struct qed_hwfn *p_hwfn =
&cdev->hwfns[cdev->dbg_params.engine_for_debug];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 329781cda77f..016ca8a7ec8a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -215,6 +215,8 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
break;
case QED_PCI_ETH_ROCE:
flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+ if (IS_QED_MULTI_TC_ROCE(p_hwfn))
+ flags |= PQ_FLAGS_MTC;
break;
case QED_PCI_ETH_IWARP:
flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
@@ -230,20 +232,30 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
}
/* Getters for resource amounts necessary for qm initialization */
-u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
+static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
{
return p_hwfn->hw_info.num_hw_tc;
}
-u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
{
return IS_QED_SRIOV(p_hwfn->cdev) ?
p_hwfn->cdev->p_iov_info->total_vfs : 0;
}
+static u8 qed_init_qm_get_num_mtc_tcs(struct qed_hwfn *p_hwfn)
+{
+ u32 pq_flags = qed_get_pq_flags(p_hwfn);
+
+ if (!(PQ_FLAGS_MTC & pq_flags))
+ return 1;
+
+ return qed_init_qm_get_num_tcs(p_hwfn);
+}
+
#define NUM_DEFAULT_RLS 1
-u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
{
u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
@@ -261,7 +273,7 @@ u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
return num_pf_rls;
}
-u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
{
u32 pq_flags = qed_get_pq_flags(p_hwfn);
@@ -273,7 +285,7 @@ u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
}
/* calc amount of PQs according to the requested flags */
-u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
{
u32 pq_flags = qed_get_pq_flags(p_hwfn);
@@ -282,8 +294,11 @@ u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
(!!(PQ_FLAGS_MCOS & pq_flags)) *
qed_init_qm_get_num_tcs(p_hwfn) +
(!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) +
- (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) +
- (!!(PQ_FLAGS_LLT & pq_flags)) +
+ (!!(PQ_FLAGS_ACK & pq_flags)) +
+ (!!(PQ_FLAGS_OFLD & pq_flags)) *
+ qed_init_qm_get_num_mtc_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LLT & pq_flags)) *
+ qed_init_qm_get_num_mtc_tcs(p_hwfn) +
(!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn);
}
@@ -394,7 +409,25 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
/* defines for pq init */
#define PQ_INIT_DEFAULT_WRR_GROUP 1
#define PQ_INIT_DEFAULT_TC 0
-#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
+
+void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc)
+{
+ p_info->offload_tc = tc;
+ p_info->offload_tc_set = true;
+}
+
+static bool qed_is_offload_tc_set(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.offload_tc_set;
+}
+
+static u32 qed_get_offload_tc(struct qed_hwfn *p_hwfn)
+{
+ if (qed_is_offload_tc_set(p_hwfn))
+ return p_hwfn->hw_info.offload_tc;
+
+ return PQ_INIT_DEFAULT_TC;
+}
static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info,
@@ -456,9 +489,9 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
case PQ_FLAGS_ACK:
return &qm_info->pure_ack_pq;
case PQ_FLAGS_OFLD:
- return &qm_info->offload_pq;
+ return &qm_info->first_ofld_pq;
case PQ_FLAGS_LLT:
- return &qm_info->low_latency_pq;
+ return &qm_info->first_llt_pq;
case PQ_FLAGS_VFS:
return &qm_info->first_vf_pq;
default:
@@ -507,14 +540,26 @@ u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
}
-u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl)
+u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)
+{
+ u16 first_ofld_pq, pq_offset;
+
+ first_ofld_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ?
+ tc : PQ_INIT_DEFAULT_TC;
+
+ return first_ofld_pq + pq_offset;
+}
+
+u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc)
{
- u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn);
+ u16 first_llt_pq, pq_offset;
- if (rl > max_rl)
- DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+ first_llt_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT);
+ pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ?
+ tc : PQ_INIT_DEFAULT_TC;
- return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+ return first_llt_pq + pq_offset;
}
/* Functions for creating specific types of pqs */
@@ -548,7 +593,22 @@ static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn)
return;
qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
- qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+ qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn),
+ PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_mtc_pqs(struct qed_hwfn *p_hwfn)
+{
+ u8 num_tcs = qed_init_qm_get_num_mtc_tcs(p_hwfn);
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc;
+
+ /* override pq's TC if offload TC is set */
+ for (tc = 0; tc < num_tcs; tc++)
+ qed_init_qm_pq(p_hwfn, qm_info,
+ qed_is_offload_tc_set(p_hwfn) ?
+ p_hwfn->hw_info.offload_tc : tc,
+ PQ_INIT_SHARE_VPORT);
}
static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
@@ -559,7 +619,7 @@ static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
return;
qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
- qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+ qed_init_qm_mtc_pqs(p_hwfn);
}
static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
@@ -570,7 +630,7 @@ static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
return;
qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
- qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+ qed_init_qm_mtc_pqs(p_hwfn);
}
static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn)
@@ -611,7 +671,8 @@ static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn)
qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
- qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
+ qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn),
+ PQ_INIT_PF_RL);
}
static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn)
@@ -652,12 +713,19 @@ static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
return -EINVAL;
}
- if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) {
- DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
- return -EINVAL;
+ if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
+ return 0;
+
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
+ p_hwfn->hw_info.multi_tc_roce_en = 0;
+ DP_NOTICE(p_hwfn,
+ "multi-tc roce was disabled to reduce requested amount of pqs\n");
+ if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
+ return 0;
}
- return 0;
+ DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ return -EINVAL;
}
static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
@@ -671,11 +739,13 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
/* top level params */
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
- "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+ "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, llt_pq %d, pure_ack_pq %d\n",
qm_info->start_pq,
qm_info->start_vport,
qm_info->pure_lb_pq,
- qm_info->offload_pq, qm_info->pure_ack_pq);
+ qm_info->first_ofld_pq,
+ qm_info->first_llt_pq,
+ qm_info->pure_ack_pq);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
"ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
@@ -1719,14 +1789,14 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
p_hwfn->hw_info.hw_mode);
if (rc)
break;
- /* Fall into */
+ /* Fall through */
case FW_MSG_CODE_DRV_LOAD_PORT:
rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
if (rc)
break;
- /* Fall into */
+ /* Fall through */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
p_params->p_tunn,
@@ -1804,7 +1874,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
DP_INFO(p_hwfn, "Failed to update driver state\n");
rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
- QED_OV_ESWITCH_VEB);
+ QED_OV_ESWITCH_NONE);
if (rc)
DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
}
@@ -2908,6 +2978,9 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol;
}
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn))
+ p_hwfn->hw_info.multi_tc_roce_en = 1;
+
p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
p_hwfn->hw_info.num_active_tc = 1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index bee10c1781fb..8faceb691657 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12444,6 +12444,8 @@ struct public_drv_mb {
#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
+
#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
@@ -12543,6 +12545,15 @@ struct public_drv_mb {
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000
+
/* Resource Allocation params - Driver version support */
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
@@ -12596,6 +12607,9 @@ struct public_drv_mb {
#define FW_MSG_CODE_PHY_OK 0x00110000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_CODE_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
@@ -12687,6 +12701,8 @@ struct mcp_public_data {
struct public_func func[MCP_GLOB_FUNC_MAX];
};
+#define MAX_I2C_TRANSACTION_SIZE 16
+
/* OCBB definitions */
enum tlvs {
/* Category 1: Device Properties */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d845badf9b90..d6430dfebd83 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1225,19 +1225,6 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
0);
}
-void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- u32 rfs_cm_hdr_event_id;
-
- /* Set RFS event ID to be awakened i Tstorm By Prs */
- rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
- rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
- PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
- rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
- PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
- qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
-}
-
void qed_gft_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 pf_id,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index c0d4a54a5edb..1135387bd99d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -873,8 +873,8 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
}
-void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_conn *p_conn)
+static void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn)
{
qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 90a2b53096e2..17f3dfa2cc94 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -377,7 +377,7 @@ qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
}
}
-const char *iwarp_state_names[] = {
+const static char *iwarp_state_names[] = {
"IDLE",
"RTS",
"TERMINATE",
@@ -942,7 +942,7 @@ qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
}
-void
+static void
qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct mpa_v2_hdr *mpa_v2_params;
@@ -967,7 +967,7 @@ qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
mpa_data_size;
}
-void
+static void
qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct qed_iwarp_cm_event_params params;
@@ -2500,7 +2500,7 @@ static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
/* The only slowpath for iwarp ll2 is unalign flush. When this completion
* is received, need to reset the FPDU.
*/
-void
+static void
qed_iwarp_ll2_slowpath(void *cxt,
u8 connection_handle,
u32 opaque_data_0, u32 opaque_data_1)
@@ -2803,8 +2803,9 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
}
-void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
- struct qed_iwarp_ep *ep, u8 fw_return_code)
+static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep,
+ u8 fw_return_code)
{
struct qed_iwarp_cm_event_params params;
@@ -2824,8 +2825,9 @@ void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
ep->event_cb(ep->cb_context, &params);
}
-void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
- struct qed_iwarp_ep *ep, int fw_ret_code)
+static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep,
+ int fw_ret_code)
{
struct qed_iwarp_cm_event_params params;
bool event_cb = false;
@@ -2954,7 +2956,7 @@ qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
}
}
-void
+static void
qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
struct qed_iwarp_ep *ep, u8 fw_return_code)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 99973e10b179..82a1bd1f8a8c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -665,7 +665,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
p_ramrod->common.update_approx_mcast_flg = 1;
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
- u32 *p_bins = (u32 *)p_params->bins;
+ u32 *p_bins = p_params->bins;
p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
}
@@ -1476,8 +1476,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
- unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct vport_update_ramrod_data *p_ramrod = NULL;
+ u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u8 abs_vport_id = 0;
@@ -1513,26 +1513,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
/* explicitly clear out the entire vector */
memset(&p_ramrod->approx_mcast.bins, 0,
sizeof(p_ramrod->approx_mcast.bins));
- memset(bins, 0, sizeof(unsigned long) *
- ETH_MULTICAST_MAC_BINS_IN_REGS);
+ memset(bins, 0, sizeof(bins));
/* filter ADD op is explicit set op and it removes
* any existing filters for the vport
*/
if (p_filter_cmd->opcode == QED_FILTER_ADD) {
for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
- u32 bit;
+ u32 bit, nbits;
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
- __set_bit(bit, bins);
+ nbits = sizeof(u32) * BITS_PER_BYTE;
+ bins[bit / nbits] |= 1 << (bit % nbits);
}
/* Convert to correct endianity */
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
struct vport_update_ramrod_mcast *p_ramrod_bins;
- u32 *p_bins = (u32 *)bins;
p_ramrod_bins = &p_ramrod->approx_mcast;
- p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
+ p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
}
}
@@ -2189,16 +2188,17 @@ out:
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
int i;
memset(info, 0, sizeof(*info));
- info->num_tc = 1;
-
if (IS_PF(cdev)) {
int max_vf_vlan_filters = 0;
int max_vf_mac_filters = 0;
+ info->num_tc = p_hwfn->hw_info.num_hw_tc;
+
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
u16 num_queues = 0;
@@ -2249,6 +2249,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
} else {
u16 total_cids = 0;
+ info->num_tc = 1;
+
/* Determine queues & XDP support */
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -2555,7 +2557,7 @@ static int qed_start_txq(struct qed_dev *cdev,
rc = qed_eth_tx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid,
- p_params, 0,
+ p_params, p_params->tc,
pbl_addr, pbl_size, ret_params);
if (rc) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 806a8da257e9..8d80f1095d17 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -215,7 +215,7 @@ struct qed_sp_vport_update_params {
u8 anti_spoofing_en;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
- unsigned long bins[8];
+ u32 bins[8];
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
struct qed_sge_tpa_params *sge_tpa_params;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 012973d75ad0..14ac9cab2653 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -158,7 +158,8 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer);
}
-void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
+static void qed_ll2b_complete_rx_packet(void *cxt,
+ struct qed_ll2_comp_rx_data *data)
{
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_buffer *buffer = data->cookie;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 5c10fd7210c3..2094d86a7a08 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -371,7 +371,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
goto err2;
}
- DP_INFO(cdev, "qed_probe completed successffuly\n");
+ DP_INFO(cdev, "qed_probe completed successfully\n");
return cdev;
@@ -789,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
/* We want a minimum of one slowpath and one fastpath vector per hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
+ if (is_kdump_kernel()) {
+ DP_INFO(cdev,
+ "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
+ cdev->int_params.in.min_msix_cnt);
+ cdev->int_params.in.num_vectors =
+ cdev->int_params.in.min_msix_cnt;
+ }
+
rc = qed_set_int_mode(cdev, false);
if (rc) {
DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
@@ -940,13 +948,14 @@ static void qed_update_pf_params(struct qed_dev *cdev,
params->eth_pf_params.num_arfs_filters = 0;
/* In case we might support RDMA, don't allow qede to be greedy
- * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
+ * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
+ * per hwfn.
*/
if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
u16 *num_cons;
num_cons = &params->eth_pf_params.num_cons;
- *num_cons = min_t(u16, *num_cons, 192);
+ *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
}
for (i = 0; i < cdev->num_hwfns; i++) {
@@ -2094,6 +2103,28 @@ out:
return status;
}
+static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
+ u8 dev_addr, u32 offset, u32 len)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
+ offset, len, buf);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
@@ -2136,6 +2167,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
.update_wol = &qed_update_wol,
+ .read_module_eeprom = &qed_read_module_eeprom,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 4e0b443c9519..d89a0e22f6e4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -570,12 +570,13 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
+static int
+qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
{
struct qed_mcp_mb_params mb_params;
int rc;
@@ -592,6 +593,9 @@ int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
*o_mcp_resp = mb_params.mcp_resp;
*o_mcp_param = mb_params.mcp_param;
+ /* nvm_info needs to be updated */
+ p_hwfn->nvm_info.valid = false;
+
return 0;
}
@@ -1208,6 +1212,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
break;
default:
p_link->speed = 0;
+ p_link->link_up = 0;
}
if (p_link->link_up && p_link->speed)
@@ -1305,9 +1310,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
phy_cfg.adv_speed = params->speed.advertised_speeds;
phy_cfg.loopback_mode = params->loopback_mode;
- if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
- if (params->eee.enable)
- phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
+
+ /* There are MFWs that share this capability regardless of whether
+ * this is feasible or not. And given that at the very least adv_caps
+ * would be set internally by qed, we want to make sure LFA would
+ * still work.
+ */
+ if ((p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
+ phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
if (params->eee.tx_lpi_enable)
phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
if (params->eee.adv_caps & QED_EEE_1G_ADV)
@@ -1541,7 +1552,8 @@ qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
- p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
+ qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
+ p_hwfn->ufp_info.tc);
qed_qm_reconf(p_hwfn, p_ptt);
} else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
@@ -2463,6 +2475,55 @@ out:
return rc;
}
+int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
+{
+ u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
+ u32 resp, param;
+ int rc;
+
+ nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
+ nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
+
+ addr = offset;
+ offset = 0;
+ bytes_left = len;
+ while (bytes_left > 0) {
+ bytes_to_copy = min_t(u32, bytes_left,
+ MAX_I2C_TRANSACTION_SIZE);
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((addr + offset) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
+ nvm_offset |= (bytes_to_copy <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_READ,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send a transceiver read command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
+ return -ENODEV;
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ return -EINVAL;
+
+ offset += buf_size;
+ bytes_left -= buf_size;
+ }
+
+ return 0;
+}
+
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 drv_mb_param = 0, rsp, param;
@@ -2555,11 +2616,14 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
{
- struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info;
+ struct qed_nvm_image_info nvm_info;
struct qed_ptt *p_ptt;
int rc;
u32 i;
+ if (p_hwfn->nvm_info.valid)
+ return 0;
+
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt) {
DP_ERR(p_hwfn, "failed to acquire ptt\n");
@@ -2567,29 +2631,29 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
}
/* Acquire from MFW the amount of available images */
- nvm_info->num_images = 0;
+ nvm_info.num_images = 0;
rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
- p_ptt, &nvm_info->num_images);
+ p_ptt, &nvm_info.num_images);
if (rc == -EOPNOTSUPP) {
DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
goto out;
- } else if (rc || !nvm_info->num_images) {
+ } else if (rc || !nvm_info.num_images) {
DP_ERR(p_hwfn, "Failed getting number of images\n");
goto err0;
}
- nvm_info->image_att = kmalloc_array(nvm_info->num_images,
- sizeof(struct bist_nvm_image_att),
- GFP_KERNEL);
- if (!nvm_info->image_att) {
+ nvm_info.image_att = kmalloc_array(nvm_info.num_images,
+ sizeof(struct bist_nvm_image_att),
+ GFP_KERNEL);
+ if (!nvm_info.image_att) {
rc = -ENOMEM;
goto err0;
}
/* Iterate over images and get their attributes */
- for (i = 0; i < nvm_info->num_images; i++) {
+ for (i = 0; i < nvm_info.num_images; i++) {
rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
- &nvm_info->image_att[i], i);
+ &nvm_info.image_att[i], i);
if (rc) {
DP_ERR(p_hwfn,
"Failed getting image index %d attributes\n", i);
@@ -2597,14 +2661,22 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
}
DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
- nvm_info->image_att[i].len);
+ nvm_info.image_att[i].len);
}
out:
+ /* Update hwfn's nvm_info */
+ if (nvm_info.num_images) {
+ p_hwfn->nvm_info.num_images = nvm_info.num_images;
+ kfree(p_hwfn->nvm_info.image_att);
+ p_hwfn->nvm_info.image_att = nvm_info.image_att;
+ p_hwfn->nvm_info.valid = true;
+ }
+
qed_ptt_release(p_hwfn, p_ptt);
return 0;
err1:
- kfree(nvm_info->image_att);
+ kfree(nvm_info.image_att);
err0:
qed_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2641,6 +2713,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
+ qed_mcp_nvm_info_populate(p_hwfn);
for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
if (type == p_hwfn->nvm_info.image_att[i].image_type)
break;
@@ -2937,7 +3010,7 @@ static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
return rc;
}
-int
+static int
__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_resc_lock_params *p_params)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 632a838f1fe3..047976d5c6e9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -840,6 +840,22 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
/**
+ * @brief Read from sfp
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param port - transceiver port
+ * @param addr - I2C address
+ * @param offset - offset in sfp
+ * @param len - buffer length
+ * @param p_buf - buffer to read into
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
+
+/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 101d677114f2..be941cfaa2d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -134,7 +134,7 @@ static bool qed_bmap_is_empty(struct qed_bmap *bmap)
return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
}
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{
/* First sb id for RoCE is after all the l2 sb */
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
@@ -706,7 +706,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
return qed_rdma_start_fw(p_hwfn, params, p_ptt);
}
-int qed_rdma_stop(void *rdma_cxt)
+static int qed_rdma_stop(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_close_func_ramrod_data *p_ramrod;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index b5ce1581645f..7d7a64c55ff1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -44,8 +44,10 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/if_vlan.h>
#include "qed.h"
#include "qed_cxt.h"
+#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
@@ -157,7 +159,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
-void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
{
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
@@ -231,16 +233,33 @@ static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
+static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ u8 pri, tc = 0;
+
+ if (qp->vlan_id) {
+ pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ tc = qed_dcbx_get_priority_tc(p_hwfn, pri);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "qp icid %u tc: %u (vlan priority %s)\n",
+ qp->icid, tc, qp->vlan_id ? "enabled" : "disabled");
+
+ return tc;
+}
+
static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
struct roce_create_qp_resp_ramrod_data *p_ramrod;
+ u16 regular_latency_queue, low_latency_queue;
struct qed_sp_init_data init_data;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 regular_latency_queue;
enum protocol_type proto;
int rc;
+ u8 tc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -324,12 +343,17 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->rq_cq_id);
- regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
-
+ tc = qed_roce_get_qp_tc(p_hwfn, qp);
+ regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
+ low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "qp icid %u pqs: regular_latency %u low_latency %u\n",
+ qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
+ low_latency_queue - CM_TX_PQ_BASE);
p_ramrod->regular_latency_phy_queue =
cpu_to_le16(regular_latency_queue);
p_ramrod->low_latency_phy_queue =
- cpu_to_le16(regular_latency_queue);
+ cpu_to_le16(low_latency_queue);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
@@ -345,11 +369,6 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
qp->stats_queue;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "rc = %d regular physical queue = 0x%x\n", rc,
- regular_latency_queue);
-
if (rc)
goto err;
@@ -375,12 +394,13 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
struct roce_create_qp_req_ramrod_data *p_ramrod;
+ u16 regular_latency_queue, low_latency_queue;
struct qed_sp_init_data init_data;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 regular_latency_queue;
enum protocol_type proto;
int rc;
+ u8 tc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -453,12 +473,17 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
p_ramrod->cq_cid =
cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
- regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
-
+ tc = qed_roce_get_qp_tc(p_hwfn, qp);
+ regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
+ low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "qp icid %u pqs: regular_latency %u low_latency %u\n",
+ qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
+ low_latency_queue - CM_TX_PQ_BASE);
p_ramrod->regular_latency_phy_queue =
cpu_to_le16(regular_latency_queue);
p_ramrod->low_latency_phy_queue =
- cpu_to_le16(regular_latency_queue);
+ cpu_to_le16(low_latency_queue);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
@@ -471,9 +496,6 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
qp->stats_queue;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
-
if (rc)
goto err;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index f01bf52bc381..9b08a9d9e151 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -672,8 +672,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
return 0;
}
-bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
- int vfid, bool b_fail_malicious)
+static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
+ int vfid, bool b_fail_malicious)
{
/* Check PF supports sriov */
if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
@@ -687,7 +687,7 @@ bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
return true;
}
-bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
{
return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
}
@@ -2831,7 +2831,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
p_data->update_approx_mcast_flg = 1;
memcpy(p_data->bins, p_mcast_tlv->bins,
- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
}
@@ -3979,7 +3979,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
}
}
-void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
+static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
{
int i;
@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
static int qed_sriov_enable(struct qed_dev *cdev, int num)
{
struct qed_iov_vf_init_params params;
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
int i, j, rc;
if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
/* Initialize HW for VF access */
for_each_hwfn(cdev, j) {
- struct qed_hwfn *hwfn = &cdev->hwfns[j];
- struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+ hwfn = &cdev->hwfns[j];
+ ptt = qed_ptt_acquire(hwfn);
/* Make sure not to use more than 16 queues per VF */
params.num_queues = min_t(int,
@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
goto err;
}
+ hwfn = QED_LEADING_HWFN(cdev);
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
+ if (rc)
+ DP_INFO(cdev, "Failed to update eswitch mode\n");
+ qed_ptt_release(hwfn, ptt);
+
return num;
err:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 2d7fcd6a0777..3d4269659820 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -169,7 +169,7 @@ static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn,
p_qid_tlv->qid = p_cid->qid_usage_idx;
}
-int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
+static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp;
@@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
resp_size += sizeof(struct pfvf_def_resp_tlv);
memcpy(p_mcast_tlv->bins, p_params->bins,
- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
}
update_rx = p_params->accept_flags.update_rx_mode_config;
@@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
u32 bit;
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
- __set_bit(bit, sp_params.bins);
+ sp_params.bins[bit / 32] |= 1 << (bit % 32);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 4f05d5eb3cf5..033409db86ae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
struct channel_tlv tl;
u8 padding[4];
- u64 bins[8];
+ /* There are only 256 approx bins, and in HSI they're divided into
+ * 32-bit values. As old VFs used to set-bit to the values on its side,
+ * the upper half of the array is never expected to contain any data.
+ */
+ u64 bins[4];
+ u64 obsolete_bins[4];
};
struct vfpf_vport_update_accept_param_tlv {
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index d7ed0d3dbf71..6a4d266fb8e2 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -52,6 +52,9 @@
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_eth_if.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 33
#define QEDE_REVISION_VERSION 0
@@ -386,6 +389,15 @@ struct qede_tx_queue {
#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
QEDE_MAX_TSS_CNT(edev))
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
+#define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx) ((edev)->fp_num_rx + \
+ ((idx) % QEDE_TSS_COUNT(edev)))
+#define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx) ((idx) / QEDE_TSS_COUNT(edev))
+#define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq) ((QEDE_TSS_COUNT(edev) * \
+ (txq)->cos) + (txq)->index)
+#define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \
+ (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
+ [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)]))
+#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0]))
/* Regular Tx requires skb + metadata for release purpose,
* while XDP requires the pages and the mapped address.
@@ -399,6 +411,8 @@ struct qede_tx_queue {
/* Slowpath; Should be kept in end [unless missing padding] */
void *handle;
+ u16 cos;
+ u16 ndev_txq_id;
};
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
@@ -458,7 +472,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
void qede_free_arfs(struct qede_dev *edev);
int qede_alloc_arfs(struct qede_dev *edev);
int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
-int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
+int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie);
int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd);
int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
u32 *rule_locs);
@@ -524,6 +538,8 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
+int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ struct tc_cls_flower_offload *f);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
@@ -541,5 +557,7 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define QEDE_RX_HDR_SIZE 256
#define QEDE_MAX_JUMBO_PACKET_SIZE 9600
#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
+#define for_each_cos_in_txq(edev, var) \
+ for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++)
#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index f4a0f8ff8261..19652cd27ca7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -222,7 +222,7 @@ static void qede_get_strings_stats_txq(struct qede_dev *edev,
QEDE_TXQ_XDP_TO_IDX(edev, txq),
qede_tqstats_arr[i].string);
else
- sprintf(*buf, "%d: %s", txq->index,
+ sprintf(*buf, "%d_%d: %s", txq->index, txq->cos,
qede_tqstats_arr[i].string);
*buf += ETH_GSTRING_LEN;
}
@@ -262,8 +262,13 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
if (fp->type & QEDE_FASTPATH_XDP)
qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
- if (fp->type & QEDE_FASTPATH_TX)
- qede_get_strings_stats_txq(edev, fp->txq, &buf);
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_get_strings_stats_txq(edev,
+ &fp->txq[cos], &buf);
+ }
}
/* Account for non-queue statistics */
@@ -338,8 +343,12 @@ static void qede_get_ethtool_stats(struct net_device *dev,
if (fp->type & QEDE_FASTPATH_XDP)
qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
- if (fp->type & QEDE_FASTPATH_TX)
- qede_get_ethtool_stats_txq(fp->txq, &buf);
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_get_ethtool_stats_txq(&fp->txq[cos], &buf);
+ }
}
for (i = 0; i < QEDE_NUM_STATS; i++) {
@@ -366,7 +375,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
num_stats--;
/* Account for the Regular Tx statistics */
- num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
+ num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS *
+ edev->dev_info.num_tc;
/* Account for the Regular Rx statistics */
num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
@@ -741,9 +751,17 @@ static int qede_get_coalesce(struct net_device *dev,
}
for_each_queue(i) {
+ struct qede_tx_queue *txq;
+
fp = &edev->fp_array[i];
+
+ /* All TX queues of given fastpath uses same
+ * coalescing value, so no need to iterate over
+ * all TCs, TC0 txq should suffice.
+ */
if (fp->type & QEDE_FASTPATH_TX) {
- tx_handle = fp->txq->handle;
+ txq = QEDE_FP_TC0_TXQ(fp);
+ tx_handle = txq->handle;
break;
}
}
@@ -801,9 +819,17 @@ static int qede_set_coalesce(struct net_device *dev,
}
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ struct qede_tx_queue *txq;
+
+ /* All TX queues of given fastpath uses same
+ * coalescing value, so no need to iterate over
+ * all TCs, TC0 txq should suffice.
+ */
+ txq = QEDE_FP_TC0_TXQ(fp);
+
rc = edev->ops->common->set_coalesce(edev->cdev,
0, txc,
- fp->txq->handle);
+ txq->handle);
if (rc) {
DP_INFO(edev,
"Set TX coalesce error, rc = %d\n", rc);
@@ -1259,7 +1285,7 @@ static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
rc = qede_add_cls_rule(edev, info);
break;
case ETHTOOL_SRXCLSRLDEL:
- rc = qede_del_cls_rule(edev, info);
+ rc = qede_delete_flow_filter(edev, info->fs.location);
break;
default:
DP_INFO(edev, "Command parameters not supported\n");
@@ -1385,8 +1411,10 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
u16 val;
for_each_queue(i) {
- if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
- txq = edev->fp_array[i].txq;
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ txq = QEDE_FP_TC0_TXQ(fp);
break;
}
}
@@ -1780,6 +1808,92 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
+static int qede_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u8 buf[4];
+ int rc;
+
+ /* Read first 4 bytes to find the sfp type */
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A0, 0, 4);
+ if (rc) {
+ DP_ERR(edev, "Failed reading EEPROM data %d\n", rc);
+ return rc;
+ }
+
+ switch (buf[0]) {
+ case 0x3: /* SFP, SFP+, SFP-28 */
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case 0xc: /* QSFP */
+ case 0xd: /* QSFP+ */
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case 0x11: /* QSFP-28 */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ default:
+ DP_ERR(edev, "Unknown transceiver type 0x%x\n", buf[0]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qede_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 start_addr = ee->offset, size = 0;
+ u8 *buf = data;
+ int rc = 0;
+
+ /* Read A0 section */
+ if (ee->offset < ETH_MODULE_SFF_8079_LEN) {
+ /* Limit transfer size to the A0 section boundary */
+ if (ee->offset + ee->len > ETH_MODULE_SFF_8079_LEN)
+ size = ETH_MODULE_SFF_8079_LEN - ee->offset;
+ else
+ size = ee->len;
+
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A0,
+ start_addr, size);
+ if (rc) {
+ DP_ERR(edev, "Failed reading A0 section %d\n", rc);
+ return rc;
+ }
+
+ buf += size;
+ start_addr += size;
+ }
+
+ /* Read A2 section */
+ if (start_addr >= ETH_MODULE_SFF_8079_LEN &&
+ start_addr < ETH_MODULE_SFF_8472_LEN) {
+ size = ee->len - size;
+ /* Limit transfer size to the A2 section boundary */
+ if (start_addr + size > ETH_MODULE_SFF_8472_LEN)
+ size = ETH_MODULE_SFF_8472_LEN - start_addr;
+ start_addr -= ETH_MODULE_SFF_8079_LEN;
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A2,
+ start_addr, size);
+ if (rc) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Failed reading A2 section %d\n", rc);
+ return 0;
+ }
+ }
+
+ return rc;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
@@ -1813,6 +1927,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
.self_test = qede_self_test,
+ .get_module_info = qede_get_module_info,
+ .get_module_eeprom = qede_get_module_eeprom,
.get_eee = qede_get_eee,
.set_eee = qede_set_eee,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index b823bfe2ea4d..9673d19308e6 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -83,7 +83,7 @@ struct qede_arfs_fltr_node {
struct qede_arfs_tuple tuple;
u32 flow_id;
- u16 sw_id;
+ u64 sw_id;
u16 rxq_id;
u16 next_rxq_id;
u8 vfid;
@@ -138,7 +138,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
n->tuple.stringify(&n->tuple, tuple_buffer);
DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
- "%s sw_id[0x%x]: %s [vf %u queue %d]\n",
+ "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
add_fltr ? "Adding" : "Deleting",
n->sw_id, tuple_buffer, n->vfid, rxq_id);
}
@@ -152,7 +152,10 @@ static void
qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
{
kfree(fltr->data);
- clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
+
+ if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
+ clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
+
kfree(fltr);
}
@@ -214,7 +217,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
if (fw_rc) {
DP_NOTICE(edev,
- "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
+ "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
fw_rc, fltr->flow_id, fltr->sw_id,
ntohs(fltr->tuple.src_port),
ntohs(fltr->tuple.dst_port), fltr->rxq_id);
@@ -1116,7 +1119,6 @@ int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return qede_xdp_set(edev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!edev->xdp_prog;
xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
return 0;
default:
@@ -1349,7 +1351,7 @@ out:
}
static struct qede_arfs_fltr_node *
-qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location)
+qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
{
struct qede_arfs_fltr_node *fltr;
@@ -1600,6 +1602,69 @@ static int qede_flow_spec_validate_unused(struct qede_dev *edev,
return 0;
}
+static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
+ struct qede_arfs_tuple *t)
+{
+ /* We must have Only 4-tuples/l4 port/src ip/dst ip
+ * as an input.
+ */
+ if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ } else if (!t->src_port && t->dst_port &&
+ !t->src_ipv4 && !t->dst_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
+ } else if (!t->src_port && !t->dst_port &&
+ !t->dst_ipv4 && t->src_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
+ } else if (!t->src_port && !t->dst_port &&
+ t->dst_ipv4 && !t->src_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
+ } else {
+ DP_INFO(edev, "Invalid N-tuple\n");
+ return -EOPNOTSUPP;
+ }
+
+ t->ip_comp = qede_flow_spec_ipv4_cmp;
+ t->build_hdr = qede_flow_build_ipv4_hdr;
+ t->stringify = qede_flow_stringify_ipv4_hdr;
+
+ return 0;
+}
+
+static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct in6_addr *zaddr)
+{
+ /* We must have Only 4-tuples/l4 port/src ip/dst ip
+ * as an input.
+ */
+ if (t->src_port && t->dst_port &&
+ memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ } else if (!t->src_port && t->dst_port &&
+ !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
+ } else if (!t->src_port && !t->dst_port &&
+ !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
+ } else if (!t->src_port && !t->dst_port &&
+ memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
+ } else {
+ DP_INFO(edev, "Invalid N-tuple\n");
+ return -EOPNOTSUPP;
+ }
+
+ t->ip_comp = qede_flow_spec_ipv6_cmp;
+ t->build_hdr = qede_flow_build_ipv6_hdr;
+
+ return 0;
+}
+
static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
struct qede_arfs_tuple *t,
struct ethtool_rx_flow_spec *fs)
@@ -1639,27 +1704,7 @@ static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
t->src_port = fs->h_u.tcp_ip4_spec.psrc;
t->dst_port = fs->h_u.tcp_ip4_spec.pdst;
- /* We must either have a valid 4-tuple or only dst port
- * or only src ip as an input
- */
- if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
- t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
- } else if (!t->src_port && t->dst_port &&
- !t->src_ipv4 && !t->dst_ipv4) {
- t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
- } else if (!t->src_port && !t->dst_port &&
- !t->dst_ipv4 && t->src_ipv4) {
- t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
- } else {
- DP_INFO(edev, "Invalid N-tuple\n");
- return -EOPNOTSUPP;
- }
-
- t->ip_comp = qede_flow_spec_ipv4_cmp;
- t->build_hdr = qede_flow_build_ipv4_hdr;
- t->stringify = qede_flow_stringify_ipv4_hdr;
-
- return 0;
+ return qede_set_v4_tuple_to_profile(edev, t);
}
static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev,
@@ -1691,10 +1736,8 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
struct ethtool_rx_flow_spec *fs)
{
struct in6_addr zero_addr;
- void *p;
- p = &zero_addr;
- memset(p, 0, sizeof(zero_addr));
+ memset(&zero_addr, 0, sizeof(zero_addr));
if ((fs->h_u.tcp_ip6_spec.psrc &
fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) {
@@ -1721,30 +1764,7 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
t->src_port = fs->h_u.tcp_ip6_spec.psrc;
t->dst_port = fs->h_u.tcp_ip6_spec.pdst;
- /* We must make sure we have a valid 4-tuple or only dest port
- * or only src ip as an input
- */
- if (t->src_port && t->dst_port &&
- memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
- memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
- t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
- } else if (!t->src_port && t->dst_port &&
- !memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
- !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
- t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
- } else if (!t->src_port && !t->dst_port &&
- !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) &&
- memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) {
- t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
- } else {
- DP_INFO(edev, "Invalid N-tuple\n");
- return -EOPNOTSUPP;
- }
-
- t->ip_comp = qede_flow_spec_ipv6_cmp;
- t->build_hdr = qede_flow_build_ipv6_hdr;
-
- return 0;
+ return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
}
static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev,
@@ -1942,9 +1962,8 @@ unlock:
return rc;
}
-int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
+int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
{
- struct ethtool_rx_flow_spec *fsp = &info->fs;
struct qede_arfs_fltr_node *fltr = NULL;
int rc = -EPERM;
@@ -1953,7 +1972,7 @@ int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
goto unlock;
fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
- fsp->location);
+ cookie);
if (!fltr)
goto unlock;
@@ -1983,3 +2002,293 @@ unlock:
__qede_unlock(edev);
return count;
}
+
+static int qede_parse_actions(struct qede_dev *edev,
+ struct tcf_exts *exts)
+{
+ int rc = -EINVAL, num_act = 0;
+ const struct tc_action *a;
+ bool is_drop = false;
+ LIST_HEAD(actions);
+
+ if (!tcf_exts_has_actions(exts)) {
+ DP_NOTICE(edev, "No tc actions received\n");
+ return rc;
+ }
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ num_act++;
+
+ if (is_tcf_gact_shot(a))
+ is_drop = true;
+ }
+
+ if (num_act == 1 && is_drop)
+ return 0;
+
+ return rc;
+}
+
+static int
+qede_tc_parse_ports(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *t)
+{
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_dissector_key_ports *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->mask);
+
+ if ((key->src && mask->src != U16_MAX) ||
+ (key->dst && mask->dst != U16_MAX)) {
+ DP_NOTICE(edev, "Do not support ports masks\n");
+ return -EINVAL;
+ }
+
+ t->src_port = key->src;
+ t->dst_port = key->dst;
+ }
+
+ return 0;
+}
+
+static int
+qede_tc_parse_v6_common(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *t)
+{
+ struct in6_addr zero_addr, addr;
+
+ memset(&zero_addr, 0, sizeof(addr));
+ memset(&addr, 0xff, sizeof(addr));
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_dissector_key_ipv6_addrs *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->mask);
+
+ if ((memcmp(&key->src, &zero_addr, sizeof(addr)) &&
+ memcmp(&mask->src, &addr, sizeof(addr))) ||
+ (memcmp(&key->dst, &zero_addr, sizeof(addr)) &&
+ memcmp(&mask->dst, &addr, sizeof(addr)))) {
+ DP_NOTICE(edev,
+ "Do not support IPv6 address prefix/mask\n");
+ return -EINVAL;
+ }
+
+ memcpy(&t->src_ipv6, &key->src, sizeof(addr));
+ memcpy(&t->dst_ipv6, &key->dst, sizeof(addr));
+ }
+
+ if (qede_tc_parse_ports(edev, f, t))
+ return -EINVAL;
+
+ return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
+}
+
+static int
+qede_tc_parse_v4_common(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *t)
+{
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_dissector_key_ipv4_addrs *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->mask);
+
+ if ((key->src && mask->src != U32_MAX) ||
+ (key->dst && mask->dst != U32_MAX)) {
+ DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
+ return -EINVAL;
+ }
+
+ t->src_ipv4 = key->src;
+ t->dst_ipv4 = key->dst;
+ }
+
+ if (qede_tc_parse_ports(edev, f, t))
+ return -EINVAL;
+
+ return qede_set_v4_tuple_to_profile(edev, t);
+}
+
+static int
+qede_tc_parse_tcp_v6(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_TCP;
+ tuple->eth_proto = htons(ETH_P_IPV6);
+
+ return qede_tc_parse_v6_common(edev, f, tuple);
+}
+
+static int
+qede_tc_parse_tcp_v4(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_TCP;
+ tuple->eth_proto = htons(ETH_P_IP);
+
+ return qede_tc_parse_v4_common(edev, f, tuple);
+}
+
+static int
+qede_tc_parse_udp_v6(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_UDP;
+ tuple->eth_proto = htons(ETH_P_IPV6);
+
+ return qede_tc_parse_v6_common(edev, f, tuple);
+}
+
+static int
+qede_tc_parse_udp_v4(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_UDP;
+ tuple->eth_proto = htons(ETH_P_IP);
+
+ return qede_tc_parse_v4_common(edev, f, tuple);
+}
+
+static int
+qede_parse_flower_attr(struct qede_dev *edev, __be16 proto,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ int rc = -EINVAL;
+ u8 ip_proto = 0;
+
+ memset(tuple, 0, sizeof(*tuple));
+
+ if (f->dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ DP_NOTICE(edev, "Unsupported key set:0x%x\n",
+ f->dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (proto != htons(ETH_P_IP) &&
+ proto != htons(ETH_P_IPV6)) {
+ DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
+ return -EPROTONOSUPPORT;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ ip_proto = key->ip_proto;
+ }
+
+ if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
+ rc = qede_tc_parse_tcp_v4(edev, f, tuple);
+ else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
+ rc = qede_tc_parse_tcp_v6(edev, f, tuple);
+ else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
+ rc = qede_tc_parse_udp_v4(edev, f, tuple);
+ else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
+ rc = qede_tc_parse_udp_v6(edev, f, tuple);
+ else
+ DP_NOTICE(edev, "Invalid tc protocol request\n");
+
+ return rc;
+}
+
+int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ struct tc_cls_flower_offload *f)
+{
+ struct qede_arfs_fltr_node *n;
+ int min_hlen, rc = -EINVAL;
+ struct qede_arfs_tuple t;
+
+ __qede_lock(edev);
+
+ if (!edev->arfs) {
+ rc = -EPERM;
+ goto unlock;
+ }
+
+ /* parse flower attribute and prepare filter */
+ if (qede_parse_flower_attr(edev, proto, f, &t))
+ goto unlock;
+
+ /* Validate profile mode and number of filters */
+ if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
+ edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
+ DP_NOTICE(edev,
+ "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
+ t.mode, edev->arfs->mode, edev->arfs->filter_count);
+ goto unlock;
+ }
+
+ /* parse tc actions and get the vf_id */
+ if (qede_parse_actions(edev, f->exts))
+ goto unlock;
+
+ if (qede_flow_find_fltr(edev, &t)) {
+ rc = -EEXIST;
+ goto unlock;
+ }
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ min_hlen = qede_flow_get_min_header_size(&t);
+
+ n->data = kzalloc(min_hlen, GFP_KERNEL);
+ if (!n->data) {
+ kfree(n);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ memcpy(&n->tuple, &t, sizeof(n->tuple));
+
+ n->buf_len = min_hlen;
+ n->b_is_drop = true;
+ n->sw_id = f->cookie;
+
+ n->tuple.build_hdr(&n->tuple, n->data);
+
+ rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
+ if (rc)
+ goto unlock;
+
+ qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
+ rc = qede_poll_arfs_filter_config(edev, n);
+
+unlock:
+ __qede_unlock(edev);
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 6c702399b801..1a78027de071 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -408,12 +408,12 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
+ unsigned int pkts_compl = 0, bytes_compl = 0;
struct netdev_queue *netdev_txq;
u16 hw_bd_cons;
- unsigned int pkts_compl = 0, bytes_compl = 0;
int rc;
- netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
barrier();
@@ -1119,8 +1119,10 @@ static bool qede_rx_xdp(struct qede_dev *edev,
default:
bpf_warn_invalid_xdp_action(act);
+ /* Fall through */
case XDP_ABORTED:
trace_xdp_exception(edev->ndev, prog, act);
+ /* Fall through */
case XDP_DROP:
qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
}
@@ -1363,9 +1365,14 @@ static bool qede_poll_is_more_work(struct qede_fastpath *fp)
if (qede_txq_has_work(fp->xdp_tx))
return true;
- if (likely(fp->type & QEDE_FASTPATH_TX))
- if (qede_txq_has_work(fp->txq))
- return true;
+ if (likely(fp->type & QEDE_FASTPATH_TX)) {
+ int cos;
+
+ for_each_cos_in_txq(fp->edev, cos) {
+ if (qede_txq_has_work(&fp->txq[cos]))
+ return true;
+ }
+ }
return false;
}
@@ -1380,8 +1387,14 @@ int qede_poll(struct napi_struct *napi, int budget)
struct qede_dev *edev = fp->edev;
int rx_work_done = 0;
- if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
- qede_tx_int(edev, fp->txq);
+ if (likely(fp->type & QEDE_FASTPATH_TX)) {
+ int cos;
+
+ for_each_cos_in_txq(fp->edev, cos) {
+ if (qede_txq_has_work(&fp->txq[cos]))
+ qede_tx_int(edev, &fp->txq[cos]);
+ }
+ }
if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
qede_xdp_tx_int(edev, fp->xdp_tx);
@@ -1442,8 +1455,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Get tx-queue context and netdev index */
txq_index = skb_get_queue_mapping(skb);
- WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
- txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
+ WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
+ txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
netdev_txq = netdev_get_tx_queue(ndev, txq_index);
WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 6a796040a32c..46d0f2eaa0c0 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -536,6 +536,97 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
+static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ int cos, count, offset;
+
+ if (num_tc > edev->dev_info.num_tc)
+ return -EINVAL;
+
+ netdev_reset_tc(ndev);
+ netdev_set_num_tc(ndev, num_tc);
+
+ for_each_cos_in_txq(edev, cos) {
+ count = QEDE_TSS_COUNT(edev);
+ offset = cos * QEDE_TSS_COUNT(edev);
+ netdev_set_tc_queue(ndev, cos, count, offset);
+ }
+
+ return 0;
+}
+
+static int
+qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f,
+ __be16 proto)
+{
+ switch (f->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return qede_add_tc_flower_fltr(edev, proto, f);
+ case TC_CLSFLOWER_DESTROY:
+ return qede_delete_flow_filter(edev, f->cookie);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct tc_cls_flower_offload *f;
+ struct qede_dev *edev = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ f = type_data;
+ return qede_set_flower(edev, f, f->common.protocol);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qede_setup_tc_block(struct qede_dev *edev,
+ struct tc_block_offload *f)
+{
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ qede_setup_tc_block_cb,
+ edev, edev, f->extack);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, qede_setup_tc_block_cb, edev);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct tc_mqprio_qopt *mqprio;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return qede_setup_tc_block(edev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ mqprio = type_data;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ return qede_setup_tc(dev, mqprio->num_tc);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -568,6 +659,7 @@ static const struct net_device_ops qede_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = qede_rx_flow_steer,
#endif
+ .ndo_setup_tc = qede_setup_tc_offload,
};
static const struct net_device_ops qede_netdev_vf_ops = {
@@ -621,7 +713,8 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
struct qede_dev *edev;
ndev = alloc_etherdev_mqs(sizeof(*edev),
- info->num_queues, info->num_queues);
+ info->num_queues * info->num_tc,
+ info->num_queues);
if (!ndev) {
pr_err("etherdev allocation failed\n");
return NULL;
@@ -688,7 +781,7 @@ static void qede_init_ndev(struct qede_dev *edev)
/* user-changeble features */
hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
hw_features |= NETIF_F_NTUPLE;
@@ -830,7 +923,8 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
}
if (fp->type & QEDE_FASTPATH_TX) {
- fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
+ fp->txq = kcalloc(edev->dev_info.num_tc,
+ sizeof(*fp->txq), GFP_KERNEL);
if (!fp->txq)
goto err;
}
@@ -879,10 +973,15 @@ static void qede_sp_task(struct work_struct *work)
static void qede_update_pf_params(struct qed_dev *cdev)
{
struct qed_pf_params pf_params;
+ u16 num_cons;
/* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
- pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
+
+ /* 1 rx + 1 xdp + max tx cos */
+ num_cons = QED_MIN_L2_CONS;
+
+ pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
/* Same for VFs - make sure they'll have sufficient connections
* to support XDP Tx queues.
@@ -1363,8 +1462,12 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
if (fp->type & QEDE_FASTPATH_XDP)
qede_free_mem_txq(edev, fp->xdp_tx);
- if (fp->type & QEDE_FASTPATH_TX)
- qede_free_mem_txq(edev, fp->txq);
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_free_mem_txq(edev, &fp->txq[cos]);
+ }
}
/* This function allocates all memory needed for a single fp (i.e. an entity
@@ -1391,9 +1494,13 @@ static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
}
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_alloc_mem_txq(edev, fp->txq);
- if (rc)
- goto out;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
+ if (rc)
+ goto out;
+ }
}
out:
@@ -1466,10 +1573,23 @@ static void qede_init_fp(struct qede_dev *edev)
}
if (fp->type & QEDE_FASTPATH_TX) {
- fp->txq->index = txq_index++;
- if (edev->dev_info.is_legacy)
- fp->txq->is_legacy = 1;
- fp->txq->dev = &edev->pdev->dev;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ struct qede_tx_queue *txq = &fp->txq[cos];
+ u16 ndev_tx_id;
+
+ txq->cos = cos;
+ txq->index = txq_index;
+ ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
+ txq->ndev_txq_id = ndev_tx_id;
+
+ if (edev->dev_info.is_legacy)
+ txq->is_legacy = 1;
+ txq->dev = &edev->pdev->dev;
+ }
+
+ txq_index++;
}
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -1483,7 +1603,9 @@ static int qede_set_real_num_queues(struct qede_dev *edev)
{
int rc = 0;
- rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
+ rc = netif_set_real_num_tx_queues(edev->ndev,
+ QEDE_TSS_COUNT(edev) *
+ edev->dev_info.num_tc);
if (rc) {
DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
return rc;
@@ -1685,9 +1807,13 @@ static int qede_stop_queues(struct qede_dev *edev)
fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_drain_txq(edev, fp->txq, true);
- if (rc)
- return rc;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_drain_txq(edev, &fp->txq[cos], true);
+ if (rc)
+ return rc;
+ }
}
if (fp->type & QEDE_FASTPATH_XDP) {
@@ -1703,9 +1829,13 @@ static int qede_stop_queues(struct qede_dev *edev)
/* Stop the Tx Queue(s) */
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_stop_txq(edev, fp->txq, i);
- if (rc)
- return rc;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_stop_txq(edev, &fp->txq[cos], i);
+ if (rc)
+ return rc;
+ }
}
/* Stop the Rx Queue */
@@ -1758,6 +1888,7 @@ static int qede_start_txq(struct qede_dev *edev,
params.p_sb = fp->sb_info;
params.sb_idx = sb_idx;
+ params.tc = txq->cos;
rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
page_cnt, &ret_params);
@@ -1877,9 +2008,14 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
}
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
- if (rc)
- goto out;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
+ TX_PI(cos));
+ if (rc)
+ goto out;
+ }
}
}
@@ -1973,6 +2109,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
bool is_locked)
{
struct qed_link_params link_params;
+ u8 num_tc;
int rc;
DP_INFO(edev, "Starting qede load\n");
@@ -2019,6 +2156,10 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+ num_tc = netdev_get_num_tc(edev->ndev);
+ num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
+ qede_setup_tc(edev->ndev, num_tc);
+
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
@@ -2143,7 +2284,7 @@ static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
- netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
if (netif_xmit_stopped(netdev_txq))
return true;
@@ -2208,9 +2349,11 @@ static void qede_get_eth_tlv_data(void *dev, void *data)
for_each_queue(i) {
fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
- if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod)
+ struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
+
+ if (txq->sw_tx_cons != txq->sw_tx_prod)
etlv->txqs_empty = false;
- if (qede_is_txq_full(edev, fp->txq))
+ if (qede_is_txq_full(edev, txq))
etlv->num_txqs_full++;
}
if (fp->type & QEDE_FASTPATH_RX) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 02adb513f475..013ff567283c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
{
struct qede_ptp *ptp = edev->ptp;
- if (!ptp)
- return -EIO;
+ if (!ptp) {
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+
+ return 0;
+ }
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 7f7deeaf1cf0..3b0adda7cc9c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -351,9 +351,9 @@ skip:
case QLCNIC_BRDTYPE_P3P_REF_QG:
case QLCNIC_BRDTYPE_P3P_4_GB:
case QLCNIC_BRDTYPE_P3P_4_GB_MM:
-
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
+ /* fall through */
case QLCNIC_BRDTYPE_P3P_10G_CX4:
case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
@@ -377,6 +377,7 @@ skip:
supported |= SUPPORTED_TP;
check_sfp_module = netif_running(adapter->netdev) &&
ahw->has_link_events;
+ /* fall through */
case QLCNIC_BRDTYPE_P3P_10G_XFP:
supported |= SUPPORTED_FIBRE;
advertising |= ADVERTISED_FIBRE;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 7848cf04b29a..822aa393c370 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -276,13 +276,6 @@ static const unsigned crb_hub_agt[64] = {
0,
};
-static const u32 msi_tgt_status[8] = {
- ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
- ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
- ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
- ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
-};
-
/* PCI Windowing for DDR regions. */
#define QLCNIC_PCIE_SEM_TIMEOUT 10000
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 0c744b9c6e0a..77e386ebff09 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -212,7 +212,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
vp->max_tx_bw = MAX_BW;
vp->min_tx_bw = MIN_BW;
vp->spoofchk = false;
- random_ether_addr(vp->mac);
+ eth_random_addr(vp->mac);
dev_info(&adapter->pdev->dev,
"MAC Address %pM is configured for VF %d\n",
vp->mac, i);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 891f03a7a33d..8d7b9bb910f2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
ret = kstrtoul(buf, 16, &data);
+ if (ret)
+ return ret;
switch (data) {
case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index 4be65d6761b3..957c72985a06 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -1176,6 +1176,7 @@ void ql_mpi_idc_work(struct work_struct *work)
case MB_CMD_PORT_RESET:
case MB_CMD_STOP_FW:
ql_link_off(qdev);
+ /* Fall through */
case MB_CMD_SET_PORT_CFG:
/* Signal the resulting link up AEN
* that the frame routing and mac addr
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5803cd6db406..206f0266463e 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -658,7 +658,7 @@ qcaspi_netdev_open(struct net_device *dev)
return ret;
}
- netif_start_queue(qca->net_dev);
+ /* SPI thread takes care of TX queue */
return 0;
}
@@ -760,6 +760,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
qca->net_dev->stats.tx_errors++;
/* Trigger tx queue flush and QCA7000 reset */
qca->sync = QCASPI_SYNC_UNKNOWN;
+
+ if (qca->spi_thread)
+ wake_up_process(qca->spi_thread);
}
static int
@@ -878,22 +881,22 @@ qca_spi_probe(struct spi_device *spi)
if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
(qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
- dev_info(&spi->dev, "Invalid clkspeed: %d\n",
- qcaspi_clkspeed);
+ dev_err(&spi->dev, "Invalid clkspeed: %d\n",
+ qcaspi_clkspeed);
return -EINVAL;
}
if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
(qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
- dev_info(&spi->dev, "Invalid burst len: %d\n",
- qcaspi_burst_len);
+ dev_err(&spi->dev, "Invalid burst len: %d\n",
+ qcaspi_burst_len);
return -EINVAL;
}
if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
(qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
- dev_info(&spi->dev, "Invalid pluggable: %d\n",
- qcaspi_pluggable);
+ dev_err(&spi->dev, "Invalid pluggable: %d\n",
+ qcaspi_pluggable);
return -EINVAL;
}
@@ -955,8 +958,8 @@ qca_spi_probe(struct spi_device *spi)
}
if (register_netdev(qcaspi_devs)) {
- dev_info(&spi->dev, "Unable to register net device %s\n",
- qcaspi_devs->name);
+ dev_err(&spi->dev, "Unable to register net device %s\n",
+ qcaspi_devs->name);
free_netdev(qcaspi_devs);
return -EFAULT;
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index b9a7548ec6a0..0afc3d335d56 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -210,7 +210,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
- random_ether_addr(rmnet_dev->dev_addr);
+ eth_random_addr(rmnet_dev->dev_addr);
rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
/* Raw IP mode */
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 7c69f4c8134d..96d1b9c08f1a 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -99,7 +99,8 @@ config R8169
depends on PCI
select FW_LOADER
select CRC32
- select MII
+ select PHYLIB
+ select REALTEK_PHY
---help---
Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 75dfac0248f4..0efa977c422d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -15,27 +15,22 @@
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/in.h>
+#include <linux/io.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
-#include <linux/pci-aspm.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#define RTL8169_VERSION "2.3LK-NAPI"
#define MODULENAME "r8169"
-#define PFX MODULENAME ": "
#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
@@ -57,19 +52,6 @@
#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
-#ifdef RTL8169_DEBUG
-#define assert(expr) \
- if (!(expr)) { \
- printk( "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr,__FILE__,__func__,__LINE__); \
- }
-#define dprintk(fmt, args...) \
- do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
-#else
-#define assert(expr) do {} while (0)
-#define dprintk(fmt, args...) do {} while (0)
-#endif /* RTL8169_DEBUG */
-
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
@@ -95,7 +77,6 @@ static const int multicast_filter_limit = 32;
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
#define RTL8169_TX_TIMEOUT (6*HZ)
-#define RTL8169_PHY_TIMEOUT (10*HZ)
/* write/read MMIO register */
#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg))
@@ -160,136 +141,70 @@ enum mac_version {
RTL_GIGA_MAC_NONE = 0xff,
};
-enum rtl_tx_desc_version {
- RTL_TD_0 = 0,
- RTL_TD_1 = 1,
-};
-
#define JUMBO_1K ETH_DATA_LEN
#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
-#define _R(NAME,TD,FW,SZ) { \
- .name = NAME, \
- .txd_version = TD, \
- .fw_name = FW, \
- .jumbo_max = SZ, \
-}
-
static const struct {
const char *name;
- enum rtl_tx_desc_version txd_version;
const char *fw_name;
- u16 jumbo_max;
} rtl_chip_infos[] = {
/* PCI devices. */
- [RTL_GIGA_MAC_VER_01] =
- _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_02] =
- _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_03] =
- _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_04] =
- _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_05] =
- _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_06] =
- _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K),
+ [RTL_GIGA_MAC_VER_01] = {"RTL8169" },
+ [RTL_GIGA_MAC_VER_02] = {"RTL8169s" },
+ [RTL_GIGA_MAC_VER_03] = {"RTL8110s" },
+ [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" },
+ [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" },
+ [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" },
/* PCI-E devices. */
- [RTL_GIGA_MAC_VER_07] =
- _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_08] =
- _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_09] =
- _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_10] =
- _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_11] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K),
- [RTL_GIGA_MAC_VER_12] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K),
- [RTL_GIGA_MAC_VER_13] =
- _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_14] =
- _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_15] =
- _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_16] =
- _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_17] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K),
- [RTL_GIGA_MAC_VER_18] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_19] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_20] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_21] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_22] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_23] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_24] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_25] =
- _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_26] =
- _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_27] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_28] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_29] =
- _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_30] =
- _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_31] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_32] =
- _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_33] =
- _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_34] =
- _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, JUMBO_9K),
- [RTL_GIGA_MAC_VER_35] =
- _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_36] =
- _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_37] =
- _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_38] =
- _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_39] =
- _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_40] =
- _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_41] =
- _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_42] =
- _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, JUMBO_9K),
- [RTL_GIGA_MAC_VER_43] =
- _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, JUMBO_1K),
- [RTL_GIGA_MAC_VER_44] =
- _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_45] =
- _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_46] =
- _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_47] =
- _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_48] =
- _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, JUMBO_1K),
- [RTL_GIGA_MAC_VER_49] =
- _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_50] =
- _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_51] =
- _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K),
+ [RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
+ [RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
+ [RTL_GIGA_MAC_VER_09] = {"RTL8102e" },
+ [RTL_GIGA_MAC_VER_10] = {"RTL8101e" },
+ [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
+ [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" },
+ [RTL_GIGA_MAC_VER_13] = {"RTL8101e" },
+ [RTL_GIGA_MAC_VER_14] = {"RTL8100e" },
+ [RTL_GIGA_MAC_VER_15] = {"RTL8100e" },
+ [RTL_GIGA_MAC_VER_16] = {"RTL8101e" },
+ [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
+ [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
+ [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" },
+ [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" },
+ [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" },
+ [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" },
+ [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" },
+ [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" },
+ [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1},
+ [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2},
+ [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" },
+ [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" },
+ [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1},
+ [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1},
+ [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" },
+ [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1},
+ [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2},
+ [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3},
+ [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1},
+ [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2},
+ [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 },
+ [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 },
+ [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
+ [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
+ [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" },
+ [RTL_GIGA_MAC_VER_42] = {"RTL8168g/8111g", FIRMWARE_8168G_3},
+ [RTL_GIGA_MAC_VER_43] = {"RTL8106e", FIRMWARE_8106E_2},
+ [RTL_GIGA_MAC_VER_44] = {"RTL8411", FIRMWARE_8411_2 },
+ [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1},
+ [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
+ [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1},
+ [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2},
+ [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" },
+ [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
+ [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
};
-#undef _R
enum cfg_version {
RTL_CFG_0 = 0x00,
@@ -399,12 +314,6 @@ enum rtl_registers {
FuncForceEvent = 0xfc,
};
-enum rtl8110_registers {
- TBICSR = 0x64,
- TBI_ANAR = 0x68,
- TBI_LPAR = 0x6a,
-};
-
enum rtl8168_8101_registers {
CSIDR = 0x64,
CSIAR = 0x68,
@@ -571,14 +480,6 @@ enum rtl_register_content {
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
ASPM_en = (1 << 0), /* ASPM enable */
- /* TBICSR p.28 */
- TBIReset = 0x80000000,
- TBILoopback = 0x40000000,
- TBINwEnable = 0x20000000,
- TBINwRestart = 0x10000000,
- TBILinkOk = 0x02000000,
- TBINwComplete = 0x01000000,
-
/* CPlusCmd p.31 */
EnableBist = (1 << 15), // 8168 8101
Mac_dbgo_oe = (1 << 14), // 8168 8101
@@ -732,7 +633,6 @@ enum rtl_flag {
RTL_FLAG_TASK_ENABLED,
RTL_FLAG_TASK_SLOW_PENDING,
RTL_FLAG_TASK_RESET_PENDING,
- RTL_FLAG_TASK_PHY_PENDING,
RTL_FLAG_MAX
};
@@ -760,7 +660,6 @@ struct rtl8169_private {
dma_addr_t RxPhyAddr;
void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
- struct timer_list timer;
u16 cp_cmd;
u16 event_slow;
@@ -776,14 +675,7 @@ struct rtl8169_private {
void (*disable)(struct rtl8169_private *);
} jumbo_ops;
- int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
- int (*get_link_ksettings)(struct net_device *,
- struct ethtool_link_ksettings *);
- void (*phy_reset_enable)(struct rtl8169_private *tp);
void (*hw_start)(struct rtl8169_private *tp);
- unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
- unsigned int (*link_ok)(struct rtl8169_private *tp);
- int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
struct {
@@ -792,7 +684,8 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- struct mii_if_info mii;
+ unsigned supports_gmii:1;
+ struct mii_bus *mii_bus;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
@@ -822,7 +715,6 @@ MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
MODULE_LICENSE("GPL");
-MODULE_VERSION(RTL8169_VERSION);
MODULE_FIRMWARE(FIRMWARE_8168D_1);
MODULE_FIRMWARE(FIRMWARE_8168D_2);
MODULE_FIRMWARE(FIRMWARE_8168E_1);
@@ -1143,21 +1035,6 @@ static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
rtl_writephy(tp, reg_addr, (val & ~m) | p);
}
-static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
- int val)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl_writephy(tp, location, val);
-}
-
-static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- return rtl_readphy(tp, location);
-}
-
DECLARE_RTL_COND(rtl_ephyar_cond)
{
return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
@@ -1478,54 +1355,22 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
RTL_R8(tp, ChipCmd);
}
-static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
-{
- return RTL_R32(tp, TBICSR) & TBIReset;
-}
-
-static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
-{
- return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
-}
-
-static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp)
-{
- return RTL_R32(tp, TBICSR) & TBILinkOk;
-}
-
-static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp)
-{
- return RTL_R8(tp, PHYstatus) & LinkStatus;
-}
-
-static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
-{
- RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset);
-}
-
-static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
-{
- unsigned int val;
-
- val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
- rtl_writephy(tp, MII_BMCR, val & 0xffff);
-}
-
static void rtl_link_chg_patch(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return;
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
- if (RTL_R8(tp, PHYstatus) & _1000bpsF) {
+ if (phydev->speed == SPEED_1000) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
ERIAR_EXGMAC);
- } else if (RTL_R8(tp, PHYstatus) & _100bps) {
+ } else if (phydev->speed == SPEED_100) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
@@ -1543,7 +1388,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
ERIAR_EXGMAC);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36) {
- if (RTL_R8(tp, PHYstatus) & _1000bpsF) {
+ if (phydev->speed == SPEED_1000) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
@@ -1555,7 +1400,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
ERIAR_EXGMAC);
}
} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
- if (RTL_R8(tp, PHYstatus) & _10bps) {
+ if (phydev->speed == SPEED_10) {
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
@@ -1567,25 +1412,6 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
}
}
-static void rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp)
-{
- struct device *d = tp_to_dev(tp);
-
- if (tp->link_ok(tp)) {
- rtl_link_chg_patch(tp);
- /* This is to cancel a scheduled suspend if there's one. */
- pm_request_resume(d);
- netif_carrier_on(dev);
- if (net_ratelimit())
- netif_info(tp, ifup, dev, "link up\n");
- } else {
- netif_carrier_off(dev);
- netif_info(tp, ifdown, dev, "link down\n");
- pm_runtime_idle(d);
- }
-}
-
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -1626,21 +1452,11 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
- struct device *d = tp_to_dev(tp);
-
- pm_runtime_get_noresume(d);
rtl_lock_work(tp);
-
wol->supported = WAKE_ANY;
- if (pm_runtime_active(d))
- wol->wolopts = __rtl8169_get_wol(tp);
- else
- wol->wolopts = tp->saved_wolopts;
-
+ wol->wolopts = tp->saved_wolopts;
rtl_unlock_work(tp);
-
- pm_runtime_put_noidle(d);
}
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1716,18 +1532,21 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct rtl8169_private *tp = netdev_priv(dev);
struct device *d = tp_to_dev(tp);
+ if (wol->wolopts & ~WAKE_ANY)
+ return -EINVAL;
+
pm_runtime_get_noresume(d);
rtl_lock_work(tp);
+ tp->saved_wolopts = wol->wolopts;
+
if (pm_runtime_active(d))
- __rtl8169_set_wol(tp, wol->wolopts);
- else
- tp->saved_wolopts = wol->wolopts;
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
rtl_unlock_work(tp);
- device_set_wakeup_enable(d, wol->wolopts);
+ device_set_wakeup_enable(d, tp->saved_wolopts);
pm_runtime_put_noidle(d);
@@ -1746,7 +1565,6 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl_fw *rtl_fw = tp->rtl_fw;
strlcpy(info->driver, MODULENAME, sizeof(info->driver));
- strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
if (!IS_ERR_OR_NULL(rtl_fw))
@@ -1759,124 +1577,6 @@ static int rtl8169_get_regs_len(struct net_device *dev)
return R8169_REGS_SIZE;
}
-static int rtl8169_set_speed_tbi(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex, u32 ignored)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret = 0;
- u32 reg;
-
- reg = RTL_R32(tp, TBICSR);
- if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
- (duplex == DUPLEX_FULL)) {
- RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart));
- } else if (autoneg == AUTONEG_ENABLE)
- RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart);
- else {
- netif_warn(tp, link, dev,
- "incorrect speed setting refused in TBI mode\n");
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static int rtl8169_set_speed_xmii(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex, u32 adv)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int giga_ctrl, bmcr;
- int rc = -EINVAL;
-
- rtl_writephy(tp, 0x1f, 0x0000);
-
- if (autoneg == AUTONEG_ENABLE) {
- int auto_nego;
-
- auto_nego = rtl_readphy(tp, MII_ADVERTISE);
- auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
-
- if (adv & ADVERTISED_10baseT_Half)
- auto_nego |= ADVERTISE_10HALF;
- if (adv & ADVERTISED_10baseT_Full)
- auto_nego |= ADVERTISE_10FULL;
- if (adv & ADVERTISED_100baseT_Half)
- auto_nego |= ADVERTISE_100HALF;
- if (adv & ADVERTISED_100baseT_Full)
- auto_nego |= ADVERTISE_100FULL;
-
- auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-
- giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
- giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
-
- /* The 8100e/8101e/8102e do Fast Ethernet only. */
- if (tp->mii.supports_gmii) {
- if (adv & ADVERTISED_1000baseT_Half)
- giga_ctrl |= ADVERTISE_1000HALF;
- if (adv & ADVERTISED_1000baseT_Full)
- giga_ctrl |= ADVERTISE_1000FULL;
- } else if (adv & (ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full)) {
- netif_info(tp, link, dev,
- "PHY does not support 1000Mbps\n");
- goto out;
- }
-
- bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
-
- rtl_writephy(tp, MII_ADVERTISE, auto_nego);
- rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
- } else {
- if (speed == SPEED_10)
- bmcr = 0;
- else if (speed == SPEED_100)
- bmcr = BMCR_SPEED100;
- else
- goto out;
-
- if (duplex == DUPLEX_FULL)
- bmcr |= BMCR_FULLDPLX;
- }
-
- rtl_writephy(tp, MII_BMCR, bmcr);
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03) {
- if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
- rtl_writephy(tp, 0x17, 0x2138);
- rtl_writephy(tp, 0x0e, 0x0260);
- } else {
- rtl_writephy(tp, 0x17, 0x2108);
- rtl_writephy(tp, 0x0e, 0x0000);
- }
- }
-
- rc = 0;
-out:
- return rc;
-}
-
-static int rtl8169_set_speed(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex, u32 advertising)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
-
- ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
- if (ret < 0)
- goto out;
-
- if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
- (advertising & ADVERTISED_1000baseT_Full) &&
- !pci_is_pcie(tp->pci_dev)) {
- mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
- }
-out:
- return ret;
-}
-
static netdev_features_t rtl8169_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1940,76 +1640,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
}
-static int rtl8169_get_link_ksettings_tbi(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- u32 status;
- u32 supported, advertising;
-
- supported =
- SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
- cmd->base.port = PORT_FIBRE;
-
- status = RTL_R32(tp, TBICSR);
- advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
- cmd->base.autoneg = !!(status & TBINwEnable);
-
- cmd->base.speed = SPEED_1000;
- cmd->base.duplex = DUPLEX_FULL; /* Always set */
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
- return 0;
-}
-
-static int rtl8169_get_link_ksettings_xmii(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- mii_ethtool_get_link_ksettings(&tp->mii, cmd);
-
- return 0;
-}
-
-static int rtl8169_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int rc;
-
- rtl_lock_work(tp);
- rc = tp->get_link_ksettings(dev, cmd);
- rtl_unlock_work(tp);
-
- return rc;
-}
-
-static int rtl8169_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int rc;
- u32 advertising;
-
- if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising))
- return -EINVAL;
-
- del_timer_sync(&tp->timer);
-
- rtl_lock_work(tp);
- rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
- cmd->base.duplex, advertising);
- rtl_unlock_work(tp);
-
- return rc;
-}
-
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
@@ -2185,13 +1815,6 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
-static int rtl8169_nway_reset(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- return mii_nway_restart(&tp->mii);
-}
-
/*
* Interrupt coalescing
*
@@ -2264,7 +1887,7 @@ static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
const struct rtl_coalesce_info *ci;
int rc;
- rc = rtl8169_get_link_ksettings(dev, &ecmd);
+ rc = phy_ethtool_get_link_ksettings(dev, &ecmd);
if (rc < 0)
return ERR_PTR(rc);
@@ -2422,9 +2045,9 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_sset_count = rtl8169_get_sset_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
.get_ts_info = ethtool_op_get_ts_info,
- .nway_reset = rtl8169_nway_reset,
- .get_link_ksettings = rtl8169_get_link_ksettings,
- .set_link_ksettings = rtl8169_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -2537,15 +2160,15 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
"unknown MAC, using family default\n");
tp->mac_version = default_version;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
- tp->mac_version = tp->mii.supports_gmii ?
+ tp->mac_version = tp->supports_gmii ?
RTL_GIGA_MAC_VER_42 :
RTL_GIGA_MAC_VER_43;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
- tp->mac_version = tp->mii.supports_gmii ?
+ tp->mac_version = tp->supports_gmii ?
RTL_GIGA_MAC_VER_45 :
RTL_GIGA_MAC_VER_47;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
- tp->mac_version = tp->mii.supports_gmii ?
+ tp->mac_version = tp->supports_gmii ?
RTL_GIGA_MAC_VER_46 :
RTL_GIGA_MAC_VER_48;
}
@@ -2553,7 +2176,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
static void rtl8169_print_mac_version(struct rtl8169_private *tp)
{
- dprintk("mac_version = 0x%02x\n", tp->mac_version);
+ netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version);
}
struct phy_reg {
@@ -4405,62 +4028,16 @@ static void rtl_hw_phy_config(struct net_device *dev)
}
}
-static void rtl_phy_work(struct rtl8169_private *tp)
-{
- struct timer_list *timer = &tp->timer;
- unsigned long timeout = RTL8169_PHY_TIMEOUT;
-
- assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
-
- if (tp->phy_reset_pending(tp)) {
- /*
- * A busy loop could burn quite a few cycles on nowadays CPU.
- * Let's delay the execution of the timer for a few ticks.
- */
- timeout = HZ/10;
- goto out_mod_timer;
- }
-
- if (tp->link_ok(tp))
- return;
-
- netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
-
- tp->phy_reset_enable(tp);
-
-out_mod_timer:
- mod_timer(timer, jiffies + timeout);
-}
-
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
{
if (!test_and_set_bit(flag, tp->wk.flags))
schedule_work(&tp->wk.work);
}
-static void rtl8169_phy_timer(struct timer_list *t)
-{
- struct rtl8169_private *tp = from_timer(tp, t, timer);
-
- rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
-}
-
-DECLARE_RTL_COND(rtl_phy_reset_cond)
-{
- return tp->phy_reset_pending(tp);
-}
-
-static void rtl8169_phy_reset(struct net_device *dev,
- struct rtl8169_private *tp)
-{
- tp->phy_reset_enable(tp);
- rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
-}
-
static bool rtl_tbi_enabled(struct rtl8169_private *tp)
{
return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
- (RTL_R8(tp, PHYstatus) & TBI_Enable);
+ (RTL_R8(tp, PHYstatus) & TBI_Enable);
}
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
@@ -4468,7 +4045,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
rtl_hw_phy_config(dev);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ netif_dbg(tp, drv, dev,
+ "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
RTL_W8(tp, 0x82, 0x01);
}
@@ -4478,23 +4056,18 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ netif_dbg(tp, drv, dev,
+ "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
RTL_W8(tp, 0x82, 0x01);
- dprintk("Set PHY Reg 0x0bh = 0x00h\n");
+ netif_dbg(tp, drv, dev,
+ "Set PHY Reg 0x0bh = 0x00h\n");
rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
}
- rtl8169_phy_reset(dev, tp);
-
- rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
- ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- (tp->mii.supports_gmii ?
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full : 0));
+ /* We may have called phy_speed_down before */
+ phy_speed_up(dev->phydev);
- if (rtl_tbi_enabled(tp))
- netif_info(tp, link, dev, "TBI auto-negotiating\n");
+ genphy_soft_reset(dev->phydev);
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -4539,34 +4112,10 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
-}
-
-static int rtl_xmii_ioctl(struct rtl8169_private *tp,
- struct mii_ioctl_data *data, int cmd)
-{
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = 32; /* Internal PHY */
- return 0;
-
- case SIOCGMIIREG:
- data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
- return 0;
-
- case SIOCSMIIREG:
- rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
- return 0;
- }
- return -EOPNOTSUPP;
-}
+ if (!netif_running(dev))
+ return -ENODEV;
-static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
-{
- return -EOPNOTSUPP;
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static void rtl_init_mdio_ops(struct rtl8169_private *tp)
@@ -4594,30 +4143,6 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
}
}
-static void rtl_speed_down(struct rtl8169_private *tp)
-{
- u32 adv;
- int lpa;
-
- rtl_writephy(tp, 0x1f, 0x0000);
- lpa = rtl_readphy(tp, MII_LPA);
-
- if (lpa & (LPA_10HALF | LPA_10FULL))
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
- else if (lpa & (LPA_100HALF | LPA_100FULL))
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
- else
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- (tp->mii.supports_gmii ?
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full : 0);
-
- rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
- adv);
-}
-
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -4639,56 +4164,15 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
{
- if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
+ if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp))
return false;
- rtl_speed_down(tp);
+ phy_speed_down(tp->dev->phydev, false);
rtl_wol_suspend_quirk(tp);
return true;
}
-static void r8168_phy_power_up(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- rtl_writephy(tp, 0x0e, 0x0000);
- break;
- default:
- break;
- }
- rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
-
- /* give MAC/PHY some time to resume */
- msleep(20);
-}
-
-static void r8168_phy_power_down(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
- break;
-
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- rtl_writephy(tp, 0x0e, 0x0200);
- default:
- rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
- break;
- }
-}
-
static void r8168_pll_power_down(struct rtl8169_private *tp)
{
if (r8168_check_dash(tp))
@@ -4701,8 +4185,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
if (rtl_wol_pll_power_down(tp))
return;
- r8168_phy_power_down(tp);
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
@@ -4754,7 +4236,9 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
break;
}
- r8168_phy_power_up(tp);
+ phy_resume(tp->dev->phydev);
+ /* give MAC/PHY some time to resume */
+ msleep(20);
}
static void rtl_pll_power_down(struct rtl8169_private *tp)
@@ -5172,8 +4656,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
tp->mac_version == RTL_GIGA_MAC_VER_03) {
- dprintk("Set MAC Reg C+CR Offset 0xe0. "
- "Bit-3 and bit-14 MUST be 1\n");
+ netif_dbg(tp, drv, tp->dev,
+ "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
tp->cp_cmd |= (1 << 14);
}
@@ -5236,12 +4720,7 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
rtl_csi_write(tp, 0x070c, csi | val << 24);
}
-static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
-{
- rtl_csi_access_enable(tp, 0x17);
-}
-
-static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
+static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
{
rtl_csi_access_enable(tp, 0x27);
}
@@ -5290,6 +4769,17 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
RTL_W8(tp, Config3, data);
}
+static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
+{
+ if (enable) {
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
+ } else {
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ }
+}
+
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5337,7 +4827,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
{ 0x07, 0, 0x2000 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
@@ -5346,7 +4836,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5359,7 +4849,7 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5383,7 +4873,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
{ 0x06, 0x0080, 0x0000 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
@@ -5399,7 +4889,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
{ 0x03, 0x0400, 0x0220 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
@@ -5413,14 +4903,14 @@ static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
__rtl_hw_start_8168cp(tp);
}
static void rtl_hw_start_8168d(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_disable_clock_request(tp);
@@ -5435,7 +4925,7 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5453,7 +4943,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
{ 0x0c, 0x0100, 0x0020 }
};
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5482,7 +4972,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
{ 0x0a, 0x0000, 0x0040 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
@@ -5507,7 +4997,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
{ 0x19, 0x0000, 0x0224 }
};
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
@@ -5536,11 +5026,13 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168f(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5611,7 +5103,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5646,9 +5138,9 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5681,9 +5173,9 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
@@ -5700,8 +5192,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
@@ -5711,7 +5202,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5780,6 +5271,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
r8168_mac_ocp_write(tp, 0xc094, 0x0000);
r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
@@ -5793,7 +5286,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5831,11 +5324,12 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1));
rtl_hw_start_8168ep(tp);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
@@ -5847,14 +5341,15 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2));
rtl_hw_start_8168ep(tp);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
@@ -5868,8 +5363,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3));
rtl_hw_start_8168ep(tp);
@@ -5889,6 +5383,8 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
data = r8168_mac_ocp_read(tp, 0xe860);
data |= 0x0080;
r8168_mac_ocp_write(tp, 0xe860, data);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168(struct rtl8169_private *tp)
@@ -6006,8 +5502,9 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
break;
default:
- printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
- tp->dev->name, tp->mac_version);
+ netif_err(tp, drv, tp->dev,
+ "unknown chipset (mac_version = %d)\n",
+ tp->mac_version);
break;
}
}
@@ -6026,7 +5523,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
};
u8 cfg1;
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, DBG_REG, FIX_NAK_1);
@@ -6045,7 +5542,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -6100,7 +5597,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
{ 0x1e, 0, 0x4000 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
@@ -6384,7 +5881,6 @@ static void rtl_reset_work(struct rtl8169_private *tp)
napi_enable(&tp->napi);
rtl_hw_start(tp);
netif_wake_queue(dev);
- rtl8169_check_link_status(dev, tp);
}
static void rtl8169_tx_timeout(struct net_device *dev)
@@ -6958,20 +6454,15 @@ release_descriptor:
static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct rtl8169_private *tp = dev_instance;
- int handled = 0;
- u16 status;
+ u16 status = rtl_get_events(tp);
- status = rtl_get_events(tp);
- if (status && status != 0xffff) {
- status &= RTL_EVENT_NAPI | tp->event_slow;
- if (status) {
- handled = 1;
+ if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow)))
+ return IRQ_NONE;
- rtl_irq_disable(tp);
- napi_schedule_irqoff(&tp->napi);
- }
- }
- return IRQ_RETVAL(handled);
+ rtl_irq_disable(tp);
+ napi_schedule_irqoff(&tp->napi);
+
+ return IRQ_HANDLED;
}
/*
@@ -7001,7 +6492,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
rtl8169_pcierr_interrupt(dev);
if (status & LinkChg)
- rtl8169_check_link_status(dev, tp);
+ phy_mac_interrupt(dev->phydev);
rtl_irq_enable_all(tp);
}
@@ -7015,7 +6506,6 @@ static void rtl_task(struct work_struct *work)
/* XXX - keep rtl_slow_event_work() as first element. */
{ RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
{ RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
- { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
};
struct rtl8169_private *tp =
container_of(work, struct rtl8169_private, wk.work);
@@ -7084,11 +6574,51 @@ static void rtl8169_rx_missed(struct net_device *dev)
RTL_W32(tp, RxMissed, 0);
}
+static void r8169_phylink_handler(struct net_device *ndev)
+{
+ struct rtl8169_private *tp = netdev_priv(ndev);
+
+ if (netif_carrier_ok(ndev)) {
+ rtl_link_chg_patch(tp);
+ pm_request_resume(&tp->pci_dev->dev);
+ } else {
+ pm_runtime_idle(&tp->pci_dev->dev);
+ }
+
+ if (net_ratelimit())
+ phy_print_status(ndev->phydev);
+}
+
+static int r8169_phy_connect(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0);
+ phy_interface_t phy_mode;
+ int ret;
+
+ phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII :
+ PHY_INTERFACE_MODE_MII;
+
+ ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler,
+ phy_mode);
+ if (ret)
+ return ret;
+
+ if (!tp->supports_gmii)
+ phy_set_max_speed(phydev, SPEED_100);
+
+ /* Ensure to advertise everything, incl. pause */
+ phydev->advertising = phydev->supported;
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
static void rtl8169_down(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- del_timer_sync(&tp->timer);
+ phy_stop(dev->phydev);
napi_disable(&tp->napi);
netif_stop_queue(dev);
@@ -7129,6 +6659,8 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work);
+ phy_disconnect(dev->phydev);
+
pci_free_irq(pdev, 0, tp);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
@@ -7148,7 +6680,7 @@ static void rtl8169_netpoll(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), dev);
+ rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
}
#endif
@@ -7189,6 +6721,10 @@ static int rtl_open(struct net_device *dev)
if (retval < 0)
goto err_release_fw_2;
+ retval = r8169_phy_connect(tp);
+ if (retval)
+ goto err_free_irq;
+
rtl_lock_work(tp);
set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
@@ -7204,17 +6740,17 @@ static int rtl_open(struct net_device *dev)
if (!rtl8169_init_counter_offsets(tp))
netif_warn(tp, hw, dev, "counter reset/update failed\n");
+ phy_start(dev->phydev);
netif_start_queue(dev);
rtl_unlock_work(tp);
- tp->saved_wolopts = 0;
pm_runtime_put_sync(&pdev->dev);
-
- rtl8169_check_link_status(dev, tp);
out:
return retval;
+err_free_irq:
+ pci_free_irq(pdev, 0, tp);
err_release_fw_2:
rtl_release_firmware(tp);
rtl8169_rx_clear(tp);
@@ -7293,6 +6829,7 @@ static void rtl8169_net_suspend(struct net_device *dev)
if (!netif_running(dev))
return;
+ phy_stop(dev->phydev);
netif_device_detach(dev);
netif_stop_queue(dev);
@@ -7323,6 +6860,9 @@ static void __rtl8169_resume(struct net_device *dev)
netif_device_attach(dev);
rtl_pll_power_up(tp);
+ rtl8169_init_phy(dev, tp);
+
+ phy_start(tp->dev->phydev);
rtl_lock_work(tp);
napi_enable(&tp->napi);
@@ -7336,9 +6876,6 @@ static int rtl8169_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl8169_init_phy(dev, tp);
if (netif_running(dev))
__rtl8169_resume(dev);
@@ -7352,13 +6889,10 @@ static int rtl8169_runtime_suspend(struct device *device)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray) {
- rtl_pll_power_down(tp);
+ if (!tp->TxDescArray)
return 0;
- }
rtl_lock_work(tp);
- tp->saved_wolopts = __rtl8169_get_wol(tp);
__rtl8169_set_wol(tp, WAKE_ANY);
rtl_unlock_work(tp);
@@ -7383,11 +6917,8 @@ static int rtl8169_runtime_resume(struct device *device)
rtl_lock_work(tp);
__rtl8169_set_wol(tp, tp->saved_wolopts);
- tp->saved_wolopts = 0;
rtl_unlock_work(tp);
- rtl8169_init_phy(dev, tp);
-
__rtl8169_resume(dev);
return 0;
@@ -7455,7 +6986,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
rtl8169_hw_reset(tp);
if (system_state == SYSTEM_POWER_OFF) {
- if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ if (tp->saved_wolopts) {
rtl_wol_suspend_quirk(tp);
rtl_wol_shutdown_quirk(tp);
}
@@ -7476,6 +7007,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
netif_napi_del(&tp->napi);
unregister_netdev(dev);
+ mdiobus_unregister(tp->mii_bus);
rtl_release_firmware(tp);
@@ -7539,12 +7071,20 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
{
unsigned int flags;
- if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
flags = PCI_IRQ_LEGACY;
- } else {
+ break;
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_40:
+ /* This version was reported to have issues with resume
+ * from suspend when using MSI-X
+ */
+ flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
+ break;
+ default:
flags = PCI_IRQ_ALL_TYPES;
}
@@ -7561,6 +7101,68 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond)
return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
}
+static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (phyaddr > 0)
+ return -ENODEV;
+
+ return rtl_readphy(tp, phyreg);
+}
+
+static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
+ int phyreg, u16 val)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (phyaddr > 0)
+ return -ENODEV;
+
+ rtl_writephy(tp, phyreg, val);
+
+ return 0;
+}
+
+static int r8169_mdio_register(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ struct phy_device *phydev;
+ struct mii_bus *new_bus;
+ int ret;
+
+ new_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!new_bus)
+ return -ENOMEM;
+
+ new_bus->name = "r8169";
+ new_bus->priv = tp;
+ new_bus->parent = &pdev->dev;
+ new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x",
+ PCI_DEVID(pdev->bus->number, pdev->devfn));
+
+ new_bus->read = r8169_mdio_read_reg;
+ new_bus->write = r8169_mdio_write_reg;
+
+ ret = mdiobus_register(new_bus);
+ if (ret)
+ return ret;
+
+ phydev = mdiobus_get_phy(new_bus, 0);
+ if (!phydev) {
+ mdiobus_unregister(new_bus);
+ return -ENODEV;
+ }
+
+ /* PHY will be woken up in rtl_open() */
+ phy_suspend(phydev);
+
+ tp->mii_bus = new_bus;
+
+ return 0;
+}
+
static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
u32 data;
@@ -7614,19 +7216,48 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
}
}
+/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
+static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int rtl_jumbo_max(struct rtl8169_private *tp)
+{
+ /* Non-GBit versions don't support jumbo frames */
+ if (!tp->supports_gmii)
+ return JUMBO_1K;
+
+ switch (tp->mac_version) {
+ /* RTL8169 */
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ return JUMBO_7K;
+ /* RTL8168b */
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ return JUMBO_4K;
+ /* RTL8168c */
+ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
+ return JUMBO_6K;
+ default:
+ return JUMBO_9K;
+ }
+}
+
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
struct rtl8169_private *tp;
- struct mii_if_info *mii;
struct net_device *dev;
int chipset, region, i;
- int rc;
-
- if (netif_msg_drv(&debug)) {
- printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
- MODULENAME, RTL8169_VERSION);
- }
+ int jumbo_max, rc;
dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
if (!dev)
@@ -7638,19 +7269,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->dev = dev;
tp->pci_dev = pdev;
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
-
- mii = &tp->mii;
- mii->dev = dev;
- mii->mdio_read = rtl_mdio_read;
- mii->mdio_write = rtl_mdio_write;
- mii->phy_id_mask = 0x1f;
- mii->reg_num_mask = 0x1f;
- mii->supports_gmii = cfg->has_gmii;
-
- /* disable ASPM completely as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
+ tp->supports_gmii = cfg->has_gmii;
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
@@ -7689,6 +7308,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
rtl8169_get_mac_version(tp, cfg->default_ver);
+ if (rtl_tbi_enabled(tp)) {
+ dev_err(&pdev->dev, "TBI fiber mode not supported\n");
+ return -ENODEV;
+ }
+
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
if ((sizeof(dma_addr_t) > 4) &&
@@ -7734,24 +7358,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
- /* override BIOS settings, use userspace tools to enable WOL */
- __rtl8169_set_wol(tp, 0);
-
- if (rtl_tbi_enabled(tp)) {
- tp->set_speed = rtl8169_set_speed_tbi;
- tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi;
- tp->phy_reset_enable = rtl8169_tbi_reset_enable;
- tp->phy_reset_pending = rtl8169_tbi_reset_pending;
- tp->link_ok = rtl8169_tbi_link_ok;
- tp->do_ioctl = rtl_tbi_ioctl;
- } else {
- tp->set_speed = rtl8169_set_speed_xmii;
- tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii;
- tp->phy_reset_enable = rtl8169_xmii_reset_enable;
- tp->phy_reset_pending = rtl8169_xmii_reset_pending;
- tp->link_ok = rtl8169_xmii_link_ok;
- tp->do_ioctl = rtl_xmii_ioctl;
- }
+ tp->saved_wolopts = __rtl8169_get_wol(tp);
mutex_init(&tp->wk.mutex);
u64_stats_init(&tp->rx_stats.syncp);
@@ -7789,6 +7396,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
tp->cp_cmd |= RxChkSum | RxVlan;
@@ -7800,16 +7408,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- switch (rtl_chip_infos[chipset].txd_version) {
- case RTL_TD_0:
- tp->tso_csum = rtl8169_tso_csum_v1;
- break;
- case RTL_TD_1:
+ if (rtl_chip_supports_csum_v2(tp)) {
tp->tso_csum = rtl8169_tso_csum_v2;
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
- break;
- default:
- WARN_ON_ONCE(1);
+ } else {
+ tp->tso_csum = rtl8169_tso_csum_v1;
}
dev->hw_features |= NETIF_F_RXALL;
@@ -7817,14 +7420,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* MTU range: 60 - hw-specific max */
dev->min_mtu = ETH_ZLEN;
- dev->max_mtu = rtl_chip_infos[chipset].jumbo_max;
+ jumbo_max = rtl_jumbo_max(tp);
+ dev->max_mtu = jumbo_max;
tp->hw_start = cfg->hw_start;
tp->event_slow = cfg->event_slow;
tp->coalesce_info = cfg->coalesce_info;
- timer_setup(&tp->timer, rtl8169_phy_timer, 0);
-
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
@@ -7835,30 +7437,39 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- rc = register_netdev(dev);
- if (rc < 0)
+ rc = r8169_mdio_register(tp);
+ if (rc)
return rc;
+ /* chip gets powered up in rtl_open() */
+ rtl_pll_power_down(tp);
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_mdio_unregister;
+
netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr,
(u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff),
pci_irq_vector(pdev, 0));
- if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
- netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
- "tx checksumming: %s]\n",
- rtl_chip_infos[chipset].jumbo_max,
- tp->mac_version <= RTL_GIGA_MAC_VER_06 ? "ok" : "ko");
- }
+
+ if (jumbo_max > JUMBO_1K)
+ netif_info(tp, probe, dev,
+ "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
+ jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
+ "ok" : "ko");
if (r8168_check_dash(tp))
rtl8168_driver_start(tp);
- netif_carrier_off(dev);
-
if (pci_dev_run_wake(pdev))
pm_runtime_put_sync(&pdev->dev);
return 0;
+
+err_mdio_unregister:
+ mdiobus_unregister(tp->mii_bus);
+ return rc;
}
static struct pci_driver rtl8169_pci_driver = {
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 27be51f0a421..f3f7477043ce 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -17,7 +17,6 @@ if NET_VENDOR_RENESAS
config SH_ETH
tristate "Renesas SuperH Ethernet support"
- depends on HAS_DMA
depends on ARCH_RENESAS || SUPERH || COMPILE_TEST
select CRC32
select MII
@@ -31,7 +30,6 @@ config SH_ETH
config RAVB
tristate "Renesas Ethernet AVB support"
- depends on HAS_DMA
depends on ARCH_RENESAS || COMPILE_TEST
select CRC32
select MII
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 68f122140966..c06f2df895c2 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -980,6 +980,13 @@ static void ravb_adjust_link(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
bool new_state = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable TX and RX right over here, if E-MAC change is ignored */
+ if (priv->no_avb_link)
+ ravb_rcv_snd_disable(ndev);
if (phydev->link) {
if (phydev->duplex != priv->duplex) {
@@ -997,18 +1004,21 @@ static void ravb_adjust_link(struct net_device *ndev)
ravb_modify(ndev, ECMR, ECMR_TXF, 0);
new_state = true;
priv->link = phydev->link;
- if (priv->no_avb_link)
- ravb_rcv_snd_enable(ndev);
}
} else if (priv->link) {
new_state = true;
priv->link = 0;
priv->speed = 0;
priv->duplex = -1;
- if (priv->no_avb_link)
- ravb_rcv_snd_disable(ndev);
}
+ /* Enable TX and RX right over here, if E-MAC change is ignored */
+ if (priv->no_avb_link && phydev->link)
+ ravb_rcv_snd_enable(ndev);
+
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
}
@@ -1096,75 +1106,6 @@ static int ravb_phy_start(struct net_device *ndev)
return 0;
}
-static int ravb_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *cmd)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned long flags;
-
- if (!ndev->phydev)
- return -ENODEV;
-
- spin_lock_irqsave(&priv->lock, flags);
- phy_ethtool_ksettings_get(ndev->phydev, cmd);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-static int ravb_set_link_ksettings(struct net_device *ndev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned long flags;
- int error;
-
- if (!ndev->phydev)
- return -ENODEV;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable TX and RX */
- ravb_rcv_snd_disable(ndev);
-
- error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
- if (error)
- goto error_exit;
-
- if (cmd->base.duplex == DUPLEX_FULL)
- priv->duplex = 1;
- else
- priv->duplex = 0;
-
- ravb_set_duplex(ndev);
-
-error_exit:
- mdelay(1);
-
- /* Enable TX and RX */
- ravb_rcv_snd_enable(ndev);
-
- mmiowb();
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return error;
-}
-
-static int ravb_nway_reset(struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- int error = -ENODEV;
- unsigned long flags;
-
- if (ndev->phydev) {
- spin_lock_irqsave(&priv->lock, flags);
- error = phy_start_aneg(ndev->phydev);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- return error;
-}
-
static u32 ravb_get_msglevel(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -1226,7 +1167,7 @@ static int ravb_get_sset_count(struct net_device *netdev, int sset)
}
static void ravb_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *estats, u64 *data)
{
struct ravb_private *priv = netdev_priv(ndev);
int i = 0;
@@ -1258,7 +1199,7 @@ static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
+ memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
break;
}
}
@@ -1377,7 +1318,7 @@ static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops ravb_ethtool_ops = {
- .nway_reset = ravb_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_msglevel = ravb_get_msglevel,
.set_msglevel = ravb_set_msglevel,
.get_link = ethtool_op_get_link,
@@ -1387,8 +1328,8 @@ static const struct ethtool_ops ravb_ethtool_ops = {
.get_ringparam = ravb_get_ringparam,
.set_ringparam = ravb_set_ringparam,
.get_ts_info = ravb_get_ts_info,
- .get_link_ksettings = ravb_get_link_ksettings,
- .set_link_ksettings = ravb_set_link_ksettings,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_wol = ravb_get_wol,
.set_wol = ravb_set_wol,
};
@@ -1623,7 +1564,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* TAG and timestamp required flag */
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
- desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
+ desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
}
skb_tx_timestamp(skb);
@@ -1656,7 +1597,8 @@ drop:
}
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
/* If skb needs TX timestamp, it is handled in network control queue */
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e9007b613f17..5573199c4536 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -439,10 +439,15 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
enum_index);
}
+static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index)
+{
+ return mdp->reg_offset[enum_index];
+}
+
static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
int enum_index)
{
- u16 offset = mdp->reg_offset[enum_index];
+ u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
return;
@@ -452,7 +457,7 @@ static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
{
- u16 offset = mdp->reg_offset[enum_index];
+ u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
return ~0U;
@@ -622,7 +627,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
.tpauser = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -672,7 +676,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -798,7 +801,6 @@ static struct sh_eth_cpu_data r8a77980_data = {
.hw_swap = 1,
.nbst = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -851,7 +853,6 @@ static struct sh_eth_cpu_data sh7724_data = {
.tpauser = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
};
static void sh_eth_set_rate_sh7757(struct net_device *ndev)
@@ -898,7 +899,6 @@ static struct sh_eth_cpu_data sh7757_data = {
.hw_swap = 1,
.no_ade = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.rtrate = 1,
.dual_port = 1,
};
@@ -978,7 +978,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -1467,7 +1466,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Descriptor format */
sh_eth_ring_format(ndev);
if (mdp->cd->rpadir)
- sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
+ sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR);
/* all sh_eth int mask */
sh_eth_write(ndev, 0, EESIPR);
@@ -1527,9 +1526,9 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* mask reset */
if (mdp->cd->apr)
- sh_eth_write(ndev, APR_AP, APR);
+ sh_eth_write(ndev, 1, APR);
if (mdp->cd->mpr)
- sh_eth_write(ndev, MPR_MP, MPR);
+ sh_eth_write(ndev, 1, MPR);
if (mdp->cd->tpauser)
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
@@ -1927,8 +1926,15 @@ static void sh_eth_adjust_link(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
+ unsigned long flags;
int new_state = 0;
+ spin_lock_irqsave(&mdp->lock, flags);
+
+ /* Disable TX and RX right over here, if E-MAC change is ignored */
+ if (mdp->cd->no_psr || mdp->no_ether_link)
+ sh_eth_rcv_snd_disable(ndev);
+
if (phydev->link) {
if (phydev->duplex != mdp->duplex) {
new_state = 1;
@@ -1947,18 +1953,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
new_state = 1;
mdp->link = phydev->link;
- if (mdp->cd->no_psr || mdp->no_ether_link)
- sh_eth_rcv_snd_enable(ndev);
}
} else if (mdp->link) {
new_state = 1;
mdp->link = 0;
mdp->speed = 0;
mdp->duplex = -1;
- if (mdp->cd->no_psr || mdp->no_ether_link)
- sh_eth_rcv_snd_disable(ndev);
}
+ /* Enable TX and RX right over here, if E-MAC change is ignored */
+ if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
+ sh_eth_rcv_snd_enable(ndev);
+
+ mmiowb();
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
if (new_state && netif_msg_link(mdp))
phy_print_status(phydev);
}
@@ -2030,60 +2039,6 @@ static int sh_eth_phy_start(struct net_device *ndev)
return 0;
}
-static int sh_eth_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *cmd)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- unsigned long flags;
-
- if (!ndev->phydev)
- return -ENODEV;
-
- spin_lock_irqsave(&mdp->lock, flags);
- phy_ethtool_ksettings_get(ndev->phydev, cmd);
- spin_unlock_irqrestore(&mdp->lock, flags);
-
- return 0;
-}
-
-static int sh_eth_set_link_ksettings(struct net_device *ndev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- unsigned long flags;
- int ret;
-
- if (!ndev->phydev)
- return -ENODEV;
-
- spin_lock_irqsave(&mdp->lock, flags);
-
- /* disable tx and rx */
- sh_eth_rcv_snd_disable(ndev);
-
- ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
- if (ret)
- goto error_exit;
-
- if (cmd->base.duplex == DUPLEX_FULL)
- mdp->duplex = 1;
- else
- mdp->duplex = 0;
-
- if (mdp->cd->set_duplex)
- mdp->cd->set_duplex(ndev);
-
-error_exit:
- mdelay(1);
-
- /* enable tx and rx */
- sh_eth_rcv_snd_enable(ndev);
-
- spin_unlock_irqrestore(&mdp->lock, flags);
-
- return ret;
-}
-
/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
* version must be bumped as well. Just adding registers up to that
* limit is fine, as long as the existing register indices don't
@@ -2263,22 +2218,6 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
pm_runtime_put_sync(&mdp->pdev->dev);
}
-static int sh_eth_nway_reset(struct net_device *ndev)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- unsigned long flags;
- int ret;
-
- if (!ndev->phydev)
- return -ENODEV;
-
- spin_lock_irqsave(&mdp->lock, flags);
- ret = phy_start_aneg(ndev->phydev);
- spin_unlock_irqrestore(&mdp->lock, flags);
-
- return ret;
-}
-
static u32 sh_eth_get_msglevel(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2429,7 +2368,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_regs_len = sh_eth_get_regs_len,
.get_regs = sh_eth_get_regs,
- .nway_reset = sh_eth_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_msglevel = sh_eth_get_msglevel,
.set_msglevel = sh_eth_set_msglevel,
.get_link = ethtool_op_get_link,
@@ -2438,8 +2377,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_sset_count = sh_eth_get_sset_count,
.get_ringparam = sh_eth_get_ringparam,
.set_ringparam = sh_eth_set_ringparam,
- .get_link_ksettings = sh_eth_get_link_ksettings,
- .set_link_ksettings = sh_eth_set_link_ksettings,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_wol = sh_eth_get_wol,
.set_wol = sh_eth_set_wol,
};
@@ -2737,34 +2676,36 @@ static int sh_eth_tsu_busy(struct net_device *ndev)
return 0;
}
-static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
+static int sh_eth_tsu_write_entry(struct net_device *ndev, u16 offset,
const u8 *addr)
{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
u32 val;
val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
- iowrite32(val, reg);
+ iowrite32(val, mdp->tsu_addr + offset);
if (sh_eth_tsu_busy(ndev) < 0)
return -EBUSY;
val = addr[4] << 8 | addr[5];
- iowrite32(val, reg + 4);
+ iowrite32(val, mdp->tsu_addr + offset + 4);
if (sh_eth_tsu_busy(ndev) < 0)
return -EBUSY;
return 0;
}
-static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
+static void sh_eth_tsu_read_entry(struct net_device *ndev, u16 offset, u8 *addr)
{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
u32 val;
- val = ioread32(reg);
+ val = ioread32(mdp->tsu_addr + offset);
addr[0] = (val >> 24) & 0xff;
addr[1] = (val >> 16) & 0xff;
addr[2] = (val >> 8) & 0xff;
addr[3] = val & 0xff;
- val = ioread32(reg + 4);
+ val = ioread32(mdp->tsu_addr + offset + 4);
addr[4] = (val >> 8) & 0xff;
addr[5] = val & 0xff;
}
@@ -2773,12 +2714,12 @@ static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i;
u8 c_addr[ETH_ALEN];
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
- sh_eth_tsu_read_entry(reg_offset, c_addr);
+ sh_eth_tsu_read_entry(ndev, reg_offset, c_addr);
if (ether_addr_equal(addr, c_addr))
return i;
}
@@ -2800,7 +2741,7 @@ static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
int entry)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int ret;
u8 blank[ETH_ALEN];
@@ -2817,7 +2758,7 @@ static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i, ret;
if (!mdp->cd->tsu)
@@ -2891,15 +2832,15 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev)
static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
u8 addr[ETH_ALEN];
- void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i;
if (!mdp->cd->tsu)
return;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
- sh_eth_tsu_read_entry(reg_offset, addr);
+ sh_eth_tsu_read_entry(ndev, reg_offset, addr);
if (is_multicast_ether_addr(addr))
sh_eth_tsu_del_entry(ndev, addr);
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 726c55a82dd7..f94be99cf400 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -383,12 +383,12 @@ enum ECSIPR_STATUS_MASK_BIT {
/* APR */
enum APR_BIT {
- APR_AP = 0x00000001,
+ APR_AP = 0x0000ffff,
};
/* MPR */
enum MPR_BIT {
- MPR_MP = 0x00000001,
+ MPR_MP = 0x0000ffff,
};
/* TRSCER */
@@ -403,8 +403,7 @@ enum DESC_I_BIT {
/* RPADIR */
enum RPADIR_BIT {
- RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
- RPADIR_PADR = 0x0003f,
+ RPADIR_PADS = 0x1f0000, RPADIR_PADR = 0xffff,
};
/* FDR */
@@ -488,7 +487,6 @@ struct sh_eth_cpu_data {
u32 ecsipr_value;
u32 fdr_value;
u32 fcftr_value;
- u32 rpadir_value;
/* interrupt checking mask */
u32 tx_check;
@@ -560,10 +558,4 @@ struct sh_eth_private {
unsigned wol_enabled:1;
};
-static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
- int enum_index)
-{
- return mdp->tsu_addr + mdp->reg_offset[enum_index];
-}
-
#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 542b67d436df..c9aad0eda57f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -319,6 +319,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
case TCP_V4_FLOW:
case UDP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -329,6 +330,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
case TCP_V6_FLOW:
case UDP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 3bac58d0f88b..c5c297e78d06 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -6,3 +6,5 @@ sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
obj-$(CONFIG_SFC) += sfc.o
+
+obj-$(CONFIG_SFC_FALCON) += falcon/
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 23f0785c0573..7eeac3d6cfe8 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4288,9 +4288,9 @@ static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
return -EPROTONOSUPPORT;
}
-static s32 efx_ef10_filter_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec,
- bool replace_equal)
+static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
{
DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -4307,7 +4307,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
bool is_mc_recip;
s32 rc;
- down_read(&efx->filter_sem);
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
table = efx->filter_state;
down_write(&table->lock);
@@ -4498,10 +4498,22 @@ out_unlock:
if (rss_locked)
mutex_unlock(&efx->rss_lock);
up_write(&table->lock);
- up_read(&efx->filter_sem);
return rc;
}
+static s32 efx_ef10_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ s32 ret;
+
+ down_read(&efx->filter_sem);
+ ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal);
+ up_read(&efx->filter_sem);
+
+ return ret;
+}
+
static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
{
/* no need to do anything here on EF10 */
@@ -5285,7 +5297,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
- rc = efx_ef10_filter_insert(efx, &spec, true);
+ rc = efx_ef10_filter_insert_locked(efx, &spec, true);
if (rc < 0) {
if (rollback) {
netif_info(efx, drv, efx->net_dev,
@@ -5314,7 +5326,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, vlan->vid, baddr);
- rc = efx_ef10_filter_insert(efx, &spec, true);
+ rc = efx_ef10_filter_insert_locked(efx, &spec, true);
if (rc < 0) {
netif_warn(efx, drv, efx->net_dev,
"Broadcast filter insert failed rc=%d\n", rc);
@@ -5370,7 +5382,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
if (vlan->vid != EFX_FILTER_VID_UNSPEC)
efx_filter_set_eth_local(&spec, vlan->vid, NULL);
- rc = efx_ef10_filter_insert(efx, &spec, true);
+ rc = efx_ef10_filter_insert_locked(efx, &spec, true);
if (rc < 0) {
const char *um = multicast ? "Multicast" : "Unicast";
const char *encap_name = "";
@@ -5430,7 +5442,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
filter_flags, 0);
eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, vlan->vid, baddr);
- rc = efx_ef10_filter_insert(efx, &spec, true);
+ rc = efx_ef10_filter_insert_locked(efx, &spec, true);
if (rc < 0) {
netif_warn(efx, drv, efx->net_dev,
"Broadcast filter insert failed rc=%d\n",
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 019cef1d3cf7..3d76fd1504c2 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -199,7 +199,7 @@ static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
return -ENOMEM;
for (i = 0; i < efx->vf_count; i++) {
- random_ether_addr(nic_data->vf[i].mac);
+ eth_random_addr(nic_data->vf[i].mac);
nic_data->vf[i].efx = NULL;
nic_data->vf[i].vlan = EFX_EF10_NO_VLAN;
@@ -564,7 +564,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct ef10_vf *vf;
- u16 old_vlan, new_vlan;
+ u16 new_vlan;
int rc = 0, rc2 = 0;
if (vf_i >= efx->vf_count)
@@ -619,7 +619,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
}
/* Do the actual vlan change */
- old_vlan = vf->vlan;
vf->vlan = new_vlan;
/* Restore everything in reverse order */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index ad4a354ce570..330233286e78 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -264,11 +264,17 @@ static int efx_check_disabled(struct efx_nic *efx)
static int efx_process_channel(struct efx_channel *channel, int budget)
{
struct efx_tx_queue *tx_queue;
+ struct list_head rx_list;
int spent;
if (unlikely(!channel->enabled))
return 0;
+ /* Prepare the batch receive list */
+ EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
+ INIT_LIST_HEAD(&rx_list);
+ channel->rx_list = &rx_list;
+
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->pkts_compl = 0;
tx_queue->bytes_compl = 0;
@@ -291,6 +297,10 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
}
}
+ /* Receive any packets we queued up */
+ netif_receive_skb_list(channel->rx_list);
+ channel->rx_list = NULL;
+
return spent;
}
@@ -555,6 +565,8 @@ static int efx_probe_channel(struct efx_channel *channel)
goto fail;
}
+ channel->rx_list = NULL;
+
return 0;
fail:
@@ -1871,12 +1883,6 @@ static void efx_remove_filters(struct efx_nic *efx)
up_write(&efx->filter_sem);
}
-static void efx_restore_filters(struct efx_nic *efx)
-{
- down_read(&efx->filter_sem);
- efx->type->filter_table_restore(efx);
- up_read(&efx->filter_sem);
-}
/**************************************************************************
*
@@ -2688,6 +2694,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
mutex_lock(&efx->rss_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
method != RESET_TYPE_DATAPATH)
@@ -2745,9 +2752,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (efx->type->rx_restore_rss_contexts)
efx->type->rx_restore_rss_contexts(efx);
mutex_unlock(&efx->rss_lock);
- down_read(&efx->filter_sem);
- efx_restore_filters(efx);
- up_read(&efx->filter_sem);
+ efx->type->filter_table_restore(efx);
+ up_write(&efx->filter_sem);
if (efx->type->sriov_reset)
efx->type->sriov_reset(efx);
@@ -2764,6 +2770,7 @@ fail:
efx->port_initialized = false;
mutex_unlock(&efx->rss_lock);
+ up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
return rc;
@@ -3180,6 +3187,7 @@ bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
return true;
}
+static
struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
const struct efx_filter_spec *spec)
{
@@ -3472,7 +3480,9 @@ static int efx_pci_probe_main(struct efx_nic *efx)
efx_init_napi(efx);
+ down_write(&efx->filter_sem);
rc = efx->type->init(efx);
+ up_write(&efx->filter_sem);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to initialise NIC\n");
@@ -3764,7 +3774,9 @@ static int efx_pm_resume(struct device *dev)
rc = efx->type->reset(efx, RESET_TYPE_ALL);
if (rc)
return rc;
+ down_write(&efx->filter_sem);
rc = efx->type->init(efx);
+ up_write(&efx->filter_sem);
if (rc)
return rc;
rc = efx_pm_thaw(dev);
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 56049157a5af..1ccdb7a82e2a 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -963,6 +963,7 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev,
switch (info->flow_type) {
case TCP_V4_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 8edf20967c82..e045a5d6b938 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
if (!state)
return -ENOMEM;
efx->filter_state = state;
+ init_rwsem(&state->lock);
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 65568925c3ef..961b92979640 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -448,6 +448,7 @@ enum efx_sync_events_state {
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
* by __efx_rx_packet(), if @rx_pkt_n_frags != 0
+ * @rx_list: list of SKBs from current RX, awaiting processing
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
* @sync_events_state: Current state of sync events on this channel
@@ -500,6 +501,8 @@ struct efx_channel {
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
+ struct list_head *rx_list;
+
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d2e254f2f72b..396ff01298cd 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -634,7 +634,12 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
return;
/* Pass the packet up */
- netif_receive_skb(skb);
+ if (channel->rx_list != NULL)
+ /* Add to list, will pass up later */
+ list_add_tail(&skb->list, channel->rx_list);
+ else
+ /* No list, so pass it up now */
+ netif_receive_skb(skb);
}
/* Handle a received packet. Second half: Touches packet payload. */
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 949aaef390b6..15c62c160953 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -321,7 +321,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static int card_idx = -1;
void __iomem *ioaddr;
int chip_idx = (int) ent->driver_data;
- int irq;
struct net_device *dev;
struct epic_private *ep;
int i, ret, option = 0, duplex = 0;
@@ -338,7 +337,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_enable_device(pdev);
if (ret)
goto out;
- irq = pdev->irq;
if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
dev_err(&pdev->dev, "no PCI region space\n");
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 05157442a980..b1b53f6c452f 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -74,7 +74,6 @@ static const char version[] =
#include <linux/skbuff.h>
#include <linux/dmaengine.h>
-#include <linux/dma/pxa-dma.h>
#include <asm/io.h>
@@ -1795,7 +1794,6 @@ static int smc911x_probe(struct net_device *dev)
#ifdef SMC_USE_DMA
struct dma_slave_config config;
dma_cap_mask_t mask;
- struct pxad_param param;
#endif
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
@@ -1971,15 +1969,8 @@ static int smc911x_probe(struct net_device *dev)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- param.prio = PXAD_PRIO_LOWEST;
- param.drcmr = -1UL;
-
- lp->rxdma =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param, &dev->dev, "rx");
- lp->txdma =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param, &dev->dev, "tx");
+ lp->rxdma = dma_request_channel(mask, NULL, NULL);
+ lp->txdma = dma_request_channel(mask, NULL, NULL);
lp->rxdma_active = 0;
lp->txdma_active = 0;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 080428762858..b944828f9ea3 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2019,17 +2019,10 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
# endif
if (lp->cfg.flags & SMC91X_USE_DMA) {
dma_cap_mask_t mask;
- struct pxad_param param;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- param.prio = PXAD_PRIO_LOWEST;
- param.drcmr = -1UL;
-
- lp->dma_chan =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param, &dev->dev,
- "data");
+ lp->dma_chan = dma_request_channel(mask, NULL, NULL);
}
#endif
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index b337ee97e0c0..a27352229fc2 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -301,7 +301,6 @@ struct smc_local {
* as RX which can overrun memory and lose packets.
*/
#include <linux/dma-mapping.h>
-#include <linux/dma/pxa-dma.h>
#ifdef SMC_insl
#undef SMC_insl
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index e080d3e7c582..7aa5ebb6766c 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -232,8 +232,7 @@
#define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
#define NETSEC_EEPROM_PKT_ME_SIZE 0x24
-#define DESC_NUM 128
-#define NAPI_BUDGET (DESC_NUM / 2)
+#define DESC_NUM 256
#define DESC_SZ sizeof(struct netsec_de)
@@ -642,8 +641,6 @@ static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
tmp_skb = netsec_alloc_skb(priv, &td);
- dma_rmb();
-
tail = dring->tail;
if (!tmp_skb) {
@@ -657,8 +654,6 @@ static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
/* move tail ahead */
dring->tail = (dring->tail + 1) % DESC_NUM;
- dring->pkt_cnt--;
-
return skb;
}
@@ -731,25 +726,24 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
struct net_device *ndev = priv->ndev;
struct netsec_rx_pkt_info rx_info;
- int done = 0, rx_num = 0;
+ int done = 0;
struct netsec_desc desc;
struct sk_buff *skb;
u16 len;
while (done < budget) {
- if (!rx_num) {
- rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
- dring->pkt_cnt += rx_num;
+ u16 idx = dring->tail;
+ struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
- /* move head 'rx_num' */
- dring->head = (dring->head + rx_num) % DESC_NUM;
+ if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD))
+ break;
- rx_num = dring->pkt_cnt;
- if (!rx_num)
- break;
- }
+ /* This barrier is needed to keep us from reading
+ * any other fields out of the netsec_de until we have
+ * verified the descriptor has been written back
+ */
+ dma_rmb();
done++;
- rx_num--;
skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
if (unlikely(!skb) || rx_info.err_flag) {
netif_err(priv, drv, priv->ndev,
@@ -780,11 +774,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
static int netsec_napi_poll(struct napi_struct *napi, int budget)
{
struct netsec_priv *priv;
- struct net_device *ndev;
int tx, rx, done, todo;
priv = container_of(napi, struct netsec_priv, napi);
- ndev = priv->ndev;
todo = budget;
do {
@@ -1666,7 +1658,7 @@ static int netsec_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "hardware revision %d.%d\n",
hw_ver >> 16, hw_ver & 0xffff);
- netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET);
+ netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
ndev->netdev_ops = &netsec_netdev_ops;
ndev->ethtool_ops = &netsec_ethtool_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 68e9e2640c62..99967a80a8c8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -5,7 +5,8 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
- stmmac_tc.o $(stmmac-y)
+ stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
+ $(stmmac-y)
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 78fd0f8b8e81..1854f270ad66 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -36,12 +36,14 @@
#include "mmc.h"
/* Synopsys Core versions */
-#define DWMAC_CORE_3_40 0x34
-#define DWMAC_CORE_3_50 0x35
-#define DWMAC_CORE_4_00 0x40
-#define DWMAC_CORE_4_10 0x41
-#define DWMAC_CORE_5_00 0x50
-#define DWMAC_CORE_5_10 0x51
+#define DWMAC_CORE_3_40 0x34
+#define DWMAC_CORE_3_50 0x35
+#define DWMAC_CORE_4_00 0x40
+#define DWMAC_CORE_4_10 0x41
+#define DWMAC_CORE_5_00 0x50
+#define DWMAC_CORE_5_10 0x51
+#define DWXGMAC_CORE_2_10 0x21
+
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
/* These need to be power of two, and >= 4 */
@@ -398,6 +400,8 @@ struct mac_link {
u32 speed10;
u32 speed100;
u32 speed1000;
+ u32 speed2500;
+ u32 speed10000;
u32 duplex;
};
@@ -439,6 +443,7 @@ struct stmmac_rx_routing {
int dwmac100_setup(struct stmmac_priv *priv);
int dwmac1000_setup(struct stmmac_priv *priv);
int dwmac4_setup(struct stmmac_priv *priv);
+int dwxgmac2_setup(struct stmmac_priv *priv);
void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index 3304095c934c..fad503820e04 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -78,6 +78,8 @@ static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "snps,dwmac-4.00"},
{ .compatible = "snps,dwmac-4.10a"},
{ .compatible = "snps,dwmac"},
+ { .compatible = "snps,dwxgmac-2.10"},
+ { .compatible = "snps,dwxgmac"},
{ }
};
MODULE_DEVICE_TABLE(of, dwmac_generic_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index f08625a02cea..7b923362ee55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -61,6 +61,7 @@ struct rk_priv_data {
struct clk *mac_clk_tx;
struct clk *clk_mac_ref;
struct clk *clk_mac_refout;
+ struct clk *clk_mac_speed;
struct clk *aclk_mac;
struct clk *pclk_mac;
struct clk *clk_phy;
@@ -83,6 +84,64 @@ struct rk_priv_data {
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define PX30_GRF_GMAC_CON1 0x0904
+
+/* PX30_GRF_GMAC_CON1 */
+#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
+ GRF_BIT(6))
+#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2)
+#define PX30_GMAC_SPEED_100M GRF_BIT(2)
+
+static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ int ret;
+
+ if (IS_ERR(bsp_priv->clk_mac_speed)) {
+ dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_SPEED_10M);
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
+ __func__, ret);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_SPEED_100M);
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
+ __func__, ret);
+
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops px30_ops = {
+ .set_to_rmii = px30_set_to_rmii,
+ .set_rmii_speed = px30_set_rmii_speed,
+};
+
#define RK3128_GRF_MAC_CON0 0x0168
#define RK3128_GRF_MAC_CON1 0x016c
@@ -1042,6 +1101,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
}
}
+ bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed");
+ if (IS_ERR(bsp_priv->clk_mac_speed))
+ dev_err(dev, "cannot get clock %s\n", "clk_mac_speed");
+
if (bsp_priv->clock_input) {
dev_info(dev, "clock input from PHY\n");
} else {
@@ -1094,6 +1157,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
if (!IS_ERR(bsp_priv->mac_clk_tx))
clk_prepare_enable(bsp_priv->mac_clk_tx);
+ if (!IS_ERR(bsp_priv->clk_mac_speed))
+ clk_prepare_enable(bsp_priv->clk_mac_speed);
+
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_prepare_enable(bsp_priv->clk_mac);
@@ -1118,6 +1184,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
clk_disable_unprepare(bsp_priv->pclk_mac);
clk_disable_unprepare(bsp_priv->mac_clk_tx);
+
+ clk_disable_unprepare(bsp_priv->clk_mac_speed);
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_disable_unprepare(bsp_priv->clk_mac);
@@ -1414,6 +1482,7 @@ static int rk_gmac_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,px30-gmac", .data = &px30_ops },
{ .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 2e6e2a96b4f2..f9a61f90cfbc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -37,7 +37,7 @@
* is done in the "stmmac files"
*/
-/* struct emac_variant - Descrive dwmac-sun8i hardware variant
+/* struct emac_variant - Describe dwmac-sun8i hardware variant
* @default_syscon_value: The default value of the EMAC register in syscon
* This value is used for disabling properly EMAC
* and used as a good starting value in case of the
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index d37f17ca62fe..edb6053bd980 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -407,6 +407,29 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
}
}
+static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
+{
+ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
+ if (qmode != MTL_QUEUE_AVB)
+ mtl_tx_op |= MTL_OP_MODE_TXQEN;
+ else
+ mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
+
+ writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+}
+
+static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+ value &= ~DMA_RBSZ_MASK;
+ value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
+
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+}
+
const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
@@ -431,6 +454,8 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
.enable_tso = dwmac4_enable_tso,
+ .qmode = dwmac4_qmode,
+ .set_bfsize = dwmac4_set_bfsize,
};
const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -457,4 +482,6 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
.enable_tso = dwmac4_enable_tso,
+ .qmode = dwmac4_qmode,
+ .set_bfsize = dwmac4_set_bfsize,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index c63c1fe3f26b..22a4a6dbb1a4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -120,6 +120,8 @@
/* DMA Rx Channel X Control register defines */
#define DMA_CONTROL_SR BIT(0)
+#define DMA_RBSZ_MASK GENMASK(14, 1)
+#define DMA_RBSZ_SHIFT 1
/* Interrupt status per channel */
#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
new file mode 100644
index 000000000000..0a80fa25afe3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC definitions.
+ */
+
+#ifndef __STMMAC_DWXGMAC2_H__
+#define __STMMAC_DWXGMAC2_H__
+
+#include "common.h"
+
+/* Misc */
+#define XGMAC_JUMBO_LEN 16368
+
+/* MAC Registers */
+#define XGMAC_TX_CONFIG 0x00000000
+#define XGMAC_CONFIG_SS_OFF 29
+#define XGMAC_CONFIG_SS_MASK GENMASK(30, 29)
+#define XGMAC_CONFIG_SS_10000 (0x0 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_2500 (0x2 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_1000 (0x3 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SARC GENMASK(22, 20)
+#define XGMAC_CONFIG_SARC_SHIFT 20
+#define XGMAC_CONFIG_JD BIT(16)
+#define XGMAC_CONFIG_TE BIT(0)
+#define XGMAC_CORE_INIT_TX (XGMAC_CONFIG_JD)
+#define XGMAC_RX_CONFIG 0x00000004
+#define XGMAC_CONFIG_ARPEN BIT(31)
+#define XGMAC_CONFIG_GPSL GENMASK(29, 16)
+#define XGMAC_CONFIG_GPSL_SHIFT 16
+#define XGMAC_CONFIG_S2KP BIT(11)
+#define XGMAC_CONFIG_IPC BIT(9)
+#define XGMAC_CONFIG_JE BIT(8)
+#define XGMAC_CONFIG_WD BIT(7)
+#define XGMAC_CONFIG_GPSLCE BIT(6)
+#define XGMAC_CONFIG_CST BIT(2)
+#define XGMAC_CONFIG_ACS BIT(1)
+#define XGMAC_CONFIG_RE BIT(0)
+#define XGMAC_CORE_INIT_RX 0
+#define XGMAC_PACKET_FILTER 0x00000008
+#define XGMAC_FILTER_RA BIT(31)
+#define XGMAC_FILTER_PM BIT(4)
+#define XGMAC_FILTER_HMC BIT(2)
+#define XGMAC_FILTER_PR BIT(0)
+#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
+#define XGMAC_RXQ_CTRL0 0x000000a0
+#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
+#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
+#define XGMAC_RXQ_CTRL2 0x000000a8
+#define XGMAC_RXQ_CTRL3 0x000000ac
+#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_PSRQ_SHIFT(x) ((x) * 8)
+#define XGMAC_INT_STATUS 0x000000b0
+#define XGMAC_PMTIS BIT(4)
+#define XGMAC_INT_EN 0x000000b4
+#define XGMAC_TSIE BIT(12)
+#define XGMAC_LPIIE BIT(5)
+#define XGMAC_PMTIE BIT(4)
+#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE | XGMAC_TSIE)
+#define XGMAC_Qx_TX_FLOW_CTRL(x) (0x00000070 + (x) * 4)
+#define XGMAC_PT GENMASK(31, 16)
+#define XGMAC_PT_SHIFT 16
+#define XGMAC_TFE BIT(1)
+#define XGMAC_RX_FLOW_CTRL 0x00000090
+#define XGMAC_RFE BIT(0)
+#define XGMAC_PMT 0x000000c0
+#define XGMAC_GLBLUCAST BIT(9)
+#define XGMAC_RWKPKTEN BIT(2)
+#define XGMAC_MGKPKTEN BIT(1)
+#define XGMAC_PWRDWN BIT(0)
+#define XGMAC_HW_FEATURE0 0x0000011c
+#define XGMAC_HWFEAT_SAVLANINS BIT(27)
+#define XGMAC_HWFEAT_RXCOESEL BIT(16)
+#define XGMAC_HWFEAT_TXCOESEL BIT(14)
+#define XGMAC_HWFEAT_TSSEL BIT(12)
+#define XGMAC_HWFEAT_AVSEL BIT(11)
+#define XGMAC_HWFEAT_RAVSEL BIT(10)
+#define XGMAC_HWFEAT_ARPOFFSEL BIT(9)
+#define XGMAC_HWFEAT_MGKSEL BIT(7)
+#define XGMAC_HWFEAT_RWKSEL BIT(6)
+#define XGMAC_HWFEAT_GMIISEL BIT(1)
+#define XGMAC_HW_FEATURE1 0x00000120
+#define XGMAC_HWFEAT_TSOEN BIT(18)
+#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6)
+#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0)
+#define XGMAC_HW_FEATURE2 0x00000124
+#define XGMAC_HWFEAT_PPSOUTNUM GENMASK(26, 24)
+#define XGMAC_HWFEAT_TXCHCNT GENMASK(21, 18)
+#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12)
+#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
+#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
+#define XGMAC_MDIO_ADDR 0x00000200
+#define XGMAC_MDIO_DATA 0x00000204
+#define XGMAC_MDIO_C22P 0x00000220
+#define XGMAC_ADDR0_HIGH 0x00000300
+#define XGMAC_AE BIT(31)
+#define XGMAC_DCS GENMASK(19, 16)
+#define XGMAC_DCS_SHIFT 16
+#define XGMAC_ADDR0_LOW 0x00000304
+#define XGMAC_ARP_ADDR 0x00000c10
+#define XGMAC_TIMESTAMP_STATUS 0x00000d20
+#define XGMAC_TXTSC BIT(15)
+#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
+#define XGMAC_TXTSSTSLO GENMASK(30, 0)
+#define XGMAC_TXTIMESTAMP_SEC 0x00000d34
+
+/* MTL Registers */
+#define XGMAC_MTL_OPMODE 0x00001000
+#define XGMAC_ETSALG GENMASK(6, 5)
+#define XGMAC_WRR (0x0 << 5)
+#define XGMAC_WFQ (0x1 << 5)
+#define XGMAC_DWRR (0x2 << 5)
+#define XGMAC_RAA BIT(2)
+#define XGMAC_MTL_INT_STATUS 0x00001020
+#define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030
+#define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034
+#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 3, (x) * 8)
+#define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8)
+#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
+#define XGMAC_TQS GENMASK(25, 16)
+#define XGMAC_TQS_SHIFT 16
+#define XGMAC_TTC GENMASK(6, 4)
+#define XGMAC_TTC_SHIFT 4
+#define XGMAC_TXQEN GENMASK(3, 2)
+#define XGMAC_TXQEN_SHIFT 2
+#define XGMAC_TSF BIT(1)
+#define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x)))
+#define XGMAC_RQS GENMASK(25, 16)
+#define XGMAC_RQS_SHIFT 16
+#define XGMAC_EHFC BIT(7)
+#define XGMAC_RSF BIT(5)
+#define XGMAC_RTC GENMASK(1, 0)
+#define XGMAC_RTC_SHIFT 0
+#define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x)))
+#define XGMAC_RXOIE BIT(16)
+#define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x)))
+#define XGMAC_RXOVFIS BIT(16)
+#define XGMAC_ABPSIS BIT(1)
+#define XGMAC_TXUNFIS BIT(0)
+
+/* DMA Registers */
+#define XGMAC_DMA_MODE 0x00003000
+#define XGMAC_SWR BIT(0)
+#define XGMAC_DMA_SYSBUS_MODE 0x00003004
+#define XGMAC_WR_OSR_LMT GENMASK(29, 24)
+#define XGMAC_WR_OSR_LMT_SHIFT 24
+#define XGMAC_RD_OSR_LMT GENMASK(21, 16)
+#define XGMAC_RD_OSR_LMT_SHIFT 16
+#define XGMAC_EN_LPI BIT(15)
+#define XGMAC_LPI_XIT_PKT BIT(14)
+#define XGMAC_AAL BIT(12)
+#define XGMAC_BLEN GENMASK(7, 1)
+#define XGMAC_BLEN256 BIT(7)
+#define XGMAC_BLEN128 BIT(6)
+#define XGMAC_BLEN64 BIT(5)
+#define XGMAC_BLEN32 BIT(4)
+#define XGMAC_BLEN16 BIT(3)
+#define XGMAC_BLEN8 BIT(2)
+#define XGMAC_BLEN4 BIT(1)
+#define XGMAC_UNDEF BIT(0)
+#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
+#define XGMAC_PBLx8 BIT(16)
+#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
+#define XGMAC_TxPBL GENMASK(21, 16)
+#define XGMAC_TxPBL_SHIFT 16
+#define XGMAC_TSE BIT(12)
+#define XGMAC_OSP BIT(4)
+#define XGMAC_TXST BIT(0)
+#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x)))
+#define XGMAC_RxPBL GENMASK(21, 16)
+#define XGMAC_RxPBL_SHIFT 16
+#define XGMAC_RXST BIT(0)
+#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_LADDR(x) (0x0000311c + (0x80 * (x)))
+#define XGMAC_DMA_CH_TxDESC_TAIL_LPTR(x) (0x00003124 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_TAIL_LPTR(x) (0x0000312c + (0x80 * (x)))
+#define XGMAC_DMA_CH_TxDESC_RING_LEN(x) (0x00003130 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_RING_LEN(x) (0x00003134 + (0x80 * (x)))
+#define XGMAC_DMA_CH_INT_EN(x) (0x00003138 + (0x80 * (x)))
+#define XGMAC_NIE BIT(15)
+#define XGMAC_AIE BIT(14)
+#define XGMAC_RBUE BIT(7)
+#define XGMAC_RIE BIT(6)
+#define XGMAC_TIE BIT(0)
+#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
+ XGMAC_RIE | XGMAC_TIE)
+#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
+#define XGMAC_RWT GENMASK(7, 0)
+#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
+#define XGMAC_NIS BIT(15)
+#define XGMAC_AIS BIT(14)
+#define XGMAC_FBE BIT(12)
+#define XGMAC_RBU BIT(7)
+#define XGMAC_RI BIT(6)
+#define XGMAC_TPS BIT(1)
+#define XGMAC_TI BIT(0)
+
+/* Descriptors */
+#define XGMAC_TDES2_IOC BIT(31)
+#define XGMAC_TDES2_TTSE BIT(30)
+#define XGMAC_TDES2_B2L GENMASK(29, 16)
+#define XGMAC_TDES2_B2L_SHIFT 16
+#define XGMAC_TDES2_B1L GENMASK(13, 0)
+#define XGMAC_TDES3_OWN BIT(31)
+#define XGMAC_TDES3_CTXT BIT(30)
+#define XGMAC_TDES3_FD BIT(29)
+#define XGMAC_TDES3_LD BIT(28)
+#define XGMAC_TDES3_CPC GENMASK(27, 26)
+#define XGMAC_TDES3_CPC_SHIFT 26
+#define XGMAC_TDES3_TCMSSV BIT(26)
+#define XGMAC_TDES3_THL GENMASK(22, 19)
+#define XGMAC_TDES3_THL_SHIFT 19
+#define XGMAC_TDES3_TSE BIT(18)
+#define XGMAC_TDES3_CIC GENMASK(17, 16)
+#define XGMAC_TDES3_CIC_SHIFT 16
+#define XGMAC_TDES3_TPL GENMASK(17, 0)
+#define XGMAC_TDES3_FL GENMASK(14, 0)
+#define XGMAC_RDES3_OWN BIT(31)
+#define XGMAC_RDES3_CTXT BIT(30)
+#define XGMAC_RDES3_IOC BIT(30)
+#define XGMAC_RDES3_LD BIT(28)
+#define XGMAC_RDES3_CDA BIT(27)
+#define XGMAC_RDES3_ES BIT(15)
+#define XGMAC_RDES3_PL GENMASK(13, 0)
+#define XGMAC_RDES3_TSD BIT(6)
+#define XGMAC_RDES3_TSA BIT(4)
+
+#endif /* __STMMAC_DWXGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
new file mode 100644
index 000000000000..d182f82f7b58
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC support.
+ */
+
+#include "stmmac.h"
+#include "dwxgmac2.h"
+
+static void dwxgmac2_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int mtu = dev->mtu;
+ u32 tx, rx;
+
+ tx = readl(ioaddr + XGMAC_TX_CONFIG);
+ rx = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ tx |= XGMAC_CORE_INIT_TX;
+ rx |= XGMAC_CORE_INIT_RX;
+
+ if (mtu >= 9000) {
+ rx |= XGMAC_CONFIG_GPSLCE;
+ rx |= XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT;
+ rx |= XGMAC_CONFIG_WD;
+ } else if (mtu > 2000) {
+ rx |= XGMAC_CONFIG_JE;
+ } else if (mtu > 1500) {
+ rx |= XGMAC_CONFIG_S2KP;
+ }
+
+ if (hw->ps) {
+ tx |= XGMAC_CONFIG_TE;
+ tx &= ~hw->link.speed_mask;
+
+ switch (hw->ps) {
+ case SPEED_10000:
+ tx |= hw->link.speed10000;
+ break;
+ case SPEED_2500:
+ tx |= hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ default:
+ tx |= hw->link.speed1000;
+ break;
+ }
+ }
+
+ writel(tx, ioaddr + XGMAC_TX_CONFIG);
+ writel(rx, ioaddr + XGMAC_RX_CONFIG);
+ writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
+}
+
+static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
+ u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ if (enable) {
+ tx |= XGMAC_CONFIG_TE;
+ rx |= XGMAC_CONFIG_RE;
+ } else {
+ tx &= ~XGMAC_CONFIG_TE;
+ rx &= ~XGMAC_CONFIG_RE;
+ }
+
+ writel(tx, ioaddr + XGMAC_TX_CONFIG);
+ writel(rx, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_RX_CONFIG);
+ if (hw->rx_csum)
+ value |= XGMAC_CONFIG_IPC;
+ else
+ value &= ~XGMAC_CONFIG_IPC;
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+
+ return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
+}
+
+static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
+ if (mode == MTL_QUEUE_AVB)
+ value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
+ else if (mode == MTL_QUEUE_DCB)
+ value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
+ writel(value, ioaddr + XGMAC_RXQ_CTRL0);
+}
+
+static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_PSRQ(queue);
+ value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
+
+ writel(value, ioaddr + reg);
+}
+
+static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
+ u32 rx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_OPMODE);
+ value &= ~XGMAC_RAA;
+
+ switch (rx_alg) {
+ case MTL_RX_ALGORITHM_SP:
+ break;
+ case MTL_RX_ALGORITHM_WSP:
+ value |= XGMAC_RAA;
+ break;
+ default:
+ break;
+ }
+
+ writel(value, ioaddr + XGMAC_MTL_OPMODE);
+}
+
+static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
+ u32 tx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_OPMODE);
+ value &= ~XGMAC_ETSALG;
+
+ switch (tx_alg) {
+ case MTL_TX_ALGORITHM_WRR:
+ value |= XGMAC_WRR;
+ break;
+ case MTL_TX_ALGORITHM_WFQ:
+ value |= XGMAC_WFQ;
+ break;
+ case MTL_TX_ALGORITHM_DWRR:
+ value |= XGMAC_DWRR;
+ break;
+ default:
+ break;
+ }
+
+ writel(value, ioaddr + XGMAC_MTL_OPMODE);
+}
+
+static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
+ u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_QxMDMACH(queue);
+ value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
+
+ writel(value, ioaddr + reg);
+}
+
+static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
+ struct stmmac_extra_stats *x)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 stat, en;
+
+ en = readl(ioaddr + XGMAC_INT_EN);
+ stat = readl(ioaddr + XGMAC_INT_STATUS);
+
+ stat &= en;
+
+ if (stat & XGMAC_PMTIS) {
+ x->irq_receive_pmt_irq_n++;
+ readl(ioaddr + XGMAC_PMT);
+ }
+
+ return 0;
+}
+
+static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int ret = 0;
+ u32 status;
+
+ status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
+ if (status & BIT(chan)) {
+ u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
+
+ if (chan_status & XGMAC_RXOVFIS)
+ ret |= CORE_IRQ_MTL_RX_OVERFLOW;
+
+ writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
+ }
+
+ return ret;
+}
+
+static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 i;
+
+ if (fc & FLOW_RX)
+ writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
+ if (fc & FLOW_TX) {
+ for (i = 0; i < tx_cnt; i++) {
+ u32 value = XGMAC_TFE;
+
+ if (duplex)
+ value |= pause_time << XGMAC_PT_SHIFT;
+
+ writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
+ }
+ }
+}
+
+static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 val = 0x0;
+
+ if (mode & WAKE_MAGIC)
+ val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
+ if (mode & WAKE_UCAST)
+ val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
+ if (val) {
+ u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
+ cfg |= XGMAC_CONFIG_RE;
+ writel(cfg, ioaddr + XGMAC_RX_CONFIG);
+ }
+
+ writel(val, ioaddr + XGMAC_PMT);
+}
+
+static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = (addr[5] << 8) | addr[4];
+ writel(value | XGMAC_AE, ioaddr + XGMAC_ADDR0_HIGH);
+
+ value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(value, ioaddr + XGMAC_ADDR0_LOW);
+}
+
+static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + XGMAC_ADDR0_HIGH);
+ lo_addr = readl(ioaddr + XGMAC_ADDR0_LOW);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
+
+static void dwxgmac2_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 value = XGMAC_FILTER_RA;
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= XGMAC_FILTER_PR;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
+ value |= XGMAC_FILTER_PM;
+ writel(~0x0, ioaddr + XGMAC_HASH_TABLE(0));
+ writel(~0x0, ioaddr + XGMAC_HASH_TABLE(1));
+ }
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+}
+
+const struct stmmac_ops dwxgmac210_ops = {
+ .core_init = dwxgmac2_core_init,
+ .set_mac = dwxgmac2_set_mac,
+ .rx_ipc = dwxgmac2_rx_ipc,
+ .rx_queue_enable = dwxgmac2_rx_queue_enable,
+ .rx_queue_prio = dwxgmac2_rx_queue_prio,
+ .tx_queue_prio = NULL,
+ .rx_queue_routing = NULL,
+ .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = NULL,
+ .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
+ .config_cbs = NULL,
+ .dump_regs = NULL,
+ .host_irq_status = dwxgmac2_host_irq_status,
+ .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
+ .flow_ctrl = dwxgmac2_flow_ctrl,
+ .pmt = dwxgmac2_pmt,
+ .set_umac_addr = dwxgmac2_set_umac_addr,
+ .get_umac_addr = dwxgmac2_get_umac_addr,
+ .set_eee_mode = NULL,
+ .reset_eee_mode = NULL,
+ .set_eee_timer = NULL,
+ .set_eee_pls = NULL,
+ .pcs_ctrl_ane = NULL,
+ .pcs_rane = NULL,
+ .pcs_get_adv_lp = NULL,
+ .debug = NULL,
+ .set_filter = dwxgmac2_set_filter,
+};
+
+int dwxgmac2_setup(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ dev_info(priv->device, "\tXGMAC2\n");
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ mac->link.duplex = 0;
+ mac->link.speed10 = 0;
+ mac->link.speed100 = 0;
+ mac->link.speed1000 = XGMAC_CONFIG_SS_1000;
+ mac->link.speed2500 = XGMAC_CONFIG_SS_2500;
+ mac->link.speed10000 = XGMAC_CONFIG_SS_10000;
+ mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
+
+ mac->mii.addr = XGMAC_MDIO_ADDR;
+ mac->mii.data = XGMAC_MDIO_DATA;
+ mac->mii.addr_shift = 16;
+ mac->mii.addr_mask = GENMASK(20, 16);
+ mac->mii.reg_shift = 0;
+ mac->mii.reg_mask = GENMASK(15, 0);
+ mac->mii.clk_csr_shift = 19;
+ mac->mii.clk_csr_mask = GENMASK(21, 19);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
new file mode 100644
index 000000000000..1d858fdec997
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC support.
+ */
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "dwxgmac2.h"
+
+static int dwxgmac2_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+ int ret = tx_done;
+
+ if (unlikely(tdes3 & XGMAC_TDES3_OWN))
+ return tx_dma_own;
+ if (likely(!(tdes3 & XGMAC_TDES3_LD)))
+ return tx_not_ls;
+
+ return ret;
+}
+
+static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ int ret = good_frame;
+
+ if (unlikely(rdes3 & XGMAC_RDES3_OWN))
+ return dma_own;
+ if (likely(!(rdes3 & XGMAC_RDES3_LD)))
+ return discard_frame;
+ if (unlikely(rdes3 & XGMAC_RDES3_ES))
+ ret = discard_frame;
+
+ return ret;
+}
+
+static int dwxgmac2_get_tx_len(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des2) & XGMAC_TDES2_B1L);
+}
+
+static int dwxgmac2_get_tx_owner(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des3) & XGMAC_TDES3_OWN) > 0;
+}
+
+static void dwxgmac2_set_tx_owner(struct dma_desc *p)
+{
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_OWN);
+}
+
+static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
+{
+ p->des3 = cpu_to_le32(XGMAC_RDES3_OWN);
+
+ if (!disable_rx_ic)
+ p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC);
+}
+
+static int dwxgmac2_get_tx_ls(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0;
+}
+
+static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe)
+{
+ return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL);
+}
+
+static void dwxgmac2_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des2 |= cpu_to_le32(XGMAC_TDES2_TTSE);
+}
+
+static int dwxgmac2_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return 0; /* Not supported */
+}
+
+static inline void dwxgmac2_get_timestamp(void *desc, u32 ats, u64 *ts)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u64 ns = 0;
+
+ ns += le32_to_cpu(p->des1) * 1000000000ULL;
+ ns += le32_to_cpu(p->des0);
+
+ *ts = ns;
+}
+
+static int dwxgmac2_rx_check_timestamp(void *desc)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ bool desc_valid, ts_valid;
+
+ desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT);
+ ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA);
+
+ if (likely(desc_valid && ts_valid))
+ return 0;
+ return -EINVAL;
+}
+
+static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
+ u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ int ret = -EBUSY;
+
+ if (likely(rdes3 & XGMAC_RDES3_CDA)) {
+ ret = dwxgmac2_rx_check_timestamp(next_desc);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end)
+{
+ dwxgmac2_set_rx_owner(p, disable_rx_ic);
+}
+
+static void dwxgmac2_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ bool csum_flag, int mode, bool tx_own,
+ bool ls, unsigned int tot_pkt_len)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+
+ p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L);
+
+ tdes3 = tot_pkt_len & XGMAC_TDES3_FL;
+ if (is_fs)
+ tdes3 |= XGMAC_TDES3_FD;
+ else
+ tdes3 &= ~XGMAC_TDES3_FD;
+
+ if (csum_flag)
+ tdes3 |= 0x3 << XGMAC_TDES3_CIC_SHIFT;
+ else
+ tdes3 &= ~XGMAC_TDES3_CIC;
+
+ if (ls)
+ tdes3 |= XGMAC_TDES3_LD;
+ else
+ tdes3 &= ~XGMAC_TDES3_LD;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= XGMAC_TDES3_OWN;
+
+ if (is_fs && tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ dma_wmb();
+
+ p->des3 = cpu_to_le32(tdes3);
+}
+
+static void dwxgmac2_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
+ int len1, int len2, bool tx_own,
+ bool ls, unsigned int tcphdrlen,
+ unsigned int tcppayloadlen)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+
+ if (len1)
+ p->des2 |= cpu_to_le32(len1 & XGMAC_TDES2_B1L);
+ if (len2)
+ p->des2 |= cpu_to_le32((len2 << XGMAC_TDES2_B2L_SHIFT) &
+ XGMAC_TDES2_B2L);
+ if (is_fs) {
+ tdes3 |= XGMAC_TDES3_FD | XGMAC_TDES3_TSE;
+ tdes3 |= (tcphdrlen << XGMAC_TDES3_THL_SHIFT) &
+ XGMAC_TDES3_THL;
+ tdes3 |= tcppayloadlen & XGMAC_TDES3_TPL;
+ } else {
+ tdes3 &= ~XGMAC_TDES3_FD;
+ }
+
+ if (ls)
+ tdes3 |= XGMAC_TDES3_LD;
+ else
+ tdes3 &= ~XGMAC_TDES3_LD;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= XGMAC_TDES3_OWN;
+
+ if (is_fs && tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ dma_wmb();
+
+ p->des3 = cpu_to_le32(tdes3);
+}
+
+static void dwxgmac2_release_tx_desc(struct dma_desc *p, int mode)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwxgmac2_set_tx_ic(struct dma_desc *p)
+{
+ p->des2 |= cpu_to_le32(XGMAC_TDES2_IOC);
+}
+
+static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = cpu_to_le32(mss);
+ p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV);
+}
+
+static void dwxgmac2_get_addr(struct dma_desc *p, unsigned int *addr)
+{
+ *addr = le32_to_cpu(p->des0);
+}
+
+static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des0 = cpu_to_le32(addr);
+ p->des1 = 0;
+}
+
+static void dwxgmac2_clear(struct dma_desc *p)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+const struct stmmac_desc_ops dwxgmac210_desc_ops = {
+ .tx_status = dwxgmac2_get_tx_status,
+ .rx_status = dwxgmac2_get_rx_status,
+ .get_tx_len = dwxgmac2_get_tx_len,
+ .get_tx_owner = dwxgmac2_get_tx_owner,
+ .set_tx_owner = dwxgmac2_set_tx_owner,
+ .set_rx_owner = dwxgmac2_set_rx_owner,
+ .get_tx_ls = dwxgmac2_get_tx_ls,
+ .get_rx_frame_len = dwxgmac2_get_rx_frame_len,
+ .enable_tx_timestamp = dwxgmac2_enable_tx_timestamp,
+ .get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status,
+ .get_rx_timestamp_status = dwxgmac2_get_rx_timestamp_status,
+ .get_timestamp = dwxgmac2_get_timestamp,
+ .set_tx_ic = dwxgmac2_set_tx_ic,
+ .prepare_tx_desc = dwxgmac2_prepare_tx_desc,
+ .prepare_tso_tx_desc = dwxgmac2_prepare_tso_tx_desc,
+ .release_tx_desc = dwxgmac2_release_tx_desc,
+ .init_rx_desc = dwxgmac2_init_rx_desc,
+ .init_tx_desc = dwxgmac2_init_tx_desc,
+ .set_mss = dwxgmac2_set_mss,
+ .get_addr = dwxgmac2_get_addr,
+ .set_addr = dwxgmac2_set_addr,
+ .clear = dwxgmac2_clear,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
new file mode 100644
index 000000000000..20909036e002
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC support.
+ */
+
+#include <linux/iopoll.h>
+#include "stmmac.h"
+#include "dwxgmac2.h"
+
+static int dwxgmac2_dma_reset(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_MODE);
+
+ /* DMA SW reset */
+ writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
+
+ return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
+ !(value & XGMAC_SWR), 0, 100000);
+}
+
+static void dwxgmac2_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, int atds)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
+
+ if (dma_cfg->aal)
+ value |= XGMAC_AAL;
+
+ writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+}
+
+static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+
+ if (dma_cfg->pblx8)
+ value |= XGMAC_PBLx8;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+ writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+}
+
+static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
+{
+ u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value &= ~XGMAC_RxPBL;
+ value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+
+ writel(dma_rx_phy, ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
+}
+
+static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+ value &= ~XGMAC_TxPBL;
+ value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
+ value |= XGMAC_OSP;
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ writel(dma_tx_phy, ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
+}
+
+static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
+ int i;
+
+ if (axi->axi_lpi_en)
+ value |= XGMAC_EN_LPI;
+ if (axi->axi_xit_frm)
+ value |= XGMAC_LPI_XIT_PKT;
+
+ value &= ~XGMAC_WR_OSR_LMT;
+ value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
+ XGMAC_WR_OSR_LMT;
+
+ value &= ~XGMAC_RD_OSR_LMT;
+ value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
+ XGMAC_RD_OSR_LMT;
+
+ value &= ~XGMAC_BLEN;
+ for (i = 0; i < AXI_BLEN; i++) {
+ if (axi->axi_blen[i])
+ value &= ~XGMAC_UNDEF;
+
+ switch (axi->axi_blen[i]) {
+ case 256:
+ value |= XGMAC_BLEN256;
+ break;
+ case 128:
+ value |= XGMAC_BLEN128;
+ break;
+ case 64:
+ value |= XGMAC_BLEN64;
+ break;
+ case 32:
+ value |= XGMAC_BLEN32;
+ break;
+ case 16:
+ value |= XGMAC_BLEN16;
+ break;
+ case 8:
+ value |= XGMAC_BLEN8;
+ break;
+ case 4:
+ value |= XGMAC_BLEN4;
+ break;
+ }
+ }
+
+ writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+}
+
+static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
+ unsigned int rqs = fifosz / 256 - 1;
+
+ if (mode == SF_DMA_MODE) {
+ value |= XGMAC_RSF;
+ } else {
+ value &= ~XGMAC_RSF;
+ value &= ~XGMAC_RTC;
+
+ if (mode <= 64)
+ value |= 0x0 << XGMAC_RTC_SHIFT;
+ else if (mode <= 96)
+ value |= 0x2 << XGMAC_RTC_SHIFT;
+ else
+ value |= 0x3 << XGMAC_RTC_SHIFT;
+ }
+
+ value &= ~XGMAC_RQS;
+ value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
+
+ writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
+
+ /* Enable MTL RX overflow */
+ value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
+ writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
+}
+
+static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+ unsigned int tqs = fifosz / 256 - 1;
+
+ if (mode == SF_DMA_MODE) {
+ value |= XGMAC_TSF;
+ } else {
+ value &= ~XGMAC_TSF;
+ value &= ~XGMAC_TTC;
+
+ if (mode <= 64)
+ value |= 0x0 << XGMAC_TTC_SHIFT;
+ else if (mode <= 96)
+ value |= 0x2 << XGMAC_TTC_SHIFT;
+ else if (mode <= 128)
+ value |= 0x3 << XGMAC_TTC_SHIFT;
+ else if (mode <= 192)
+ value |= 0x4 << XGMAC_TTC_SHIFT;
+ else if (mode <= 256)
+ value |= 0x5 << XGMAC_TTC_SHIFT;
+ else if (mode <= 384)
+ value |= 0x6 << XGMAC_TTC_SHIFT;
+ else
+ value |= 0x7 << XGMAC_TTC_SHIFT;
+ }
+
+ value &= ~XGMAC_TXQEN;
+ if (qmode != MTL_QUEUE_AVB)
+ value |= 0x2 << XGMAC_TXQEN_SHIFT;
+ else
+ value |= 0x1 << XGMAC_TXQEN_SHIFT;
+
+ value &= ~XGMAC_TQS;
+ value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
+
+ writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+}
+
+static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+{
+ writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+}
+
+static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+{
+ writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+}
+
+static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+ value |= XGMAC_TXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_TX_CONFIG);
+ value |= XGMAC_CONFIG_TE;
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+ value &= ~XGMAC_TXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_TX_CONFIG);
+ value &= ~XGMAC_CONFIG_TE;
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value |= XGMAC_RXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_RX_CONFIG);
+ value |= XGMAC_CONFIG_RE;
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value &= ~XGMAC_RXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_RX_CONFIG);
+ value &= ~XGMAC_CONFIG_RE;
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan)
+{
+ u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
+ int ret = 0;
+
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & XGMAC_AIS)) {
+ if (unlikely(intr_status & XGMAC_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret |= tx_hard_error;
+ }
+ if (unlikely(intr_status & XGMAC_FBE)) {
+ x->fatal_bus_error_irq++;
+ ret |= tx_hard_error;
+ }
+ }
+
+ /* TX/RX NORMAL interrupts */
+ if (likely(intr_status & XGMAC_NIS)) {
+ x->normal_irq_n++;
+
+ if (likely(intr_status & XGMAC_RI)) {
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+ if (likely(value & XGMAC_RIE)) {
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & XGMAC_TI)) {
+ x->tx_normal_irq_n++;
+ ret |= handle_tx;
+ }
+ }
+
+ /* Clear interrupts */
+ writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
+
+ return ret;
+}
+
+static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
+{
+ u32 hw_cap;
+
+ /* MAC HW feature 0 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
+ dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
+ dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
+ dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
+ dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
+ dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
+ dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
+ dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
+ dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
+
+ /* MAC HW feature 1 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
+ dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
+ dma_cap->tx_fifo_size =
+ 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
+ dma_cap->rx_fifo_size =
+ 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
+
+ /* MAC HW feature 2 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
+ dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
+ dma_cap->number_tx_channel =
+ ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
+ dma_cap->number_rx_channel =
+ ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
+ dma_cap->number_tx_queues =
+ ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
+ dma_cap->number_rx_queues =
+ ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
+}
+
+static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
+{
+ u32 i;
+
+ for (i = 0; i < nchan; i++)
+ writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i));
+}
+
+static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+{
+ writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
+}
+
+static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+{
+ writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
+}
+
+static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
+{
+ writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
+}
+
+static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
+{
+ writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
+}
+
+static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ if (en)
+ value |= XGMAC_TSE;
+ else
+ value &= ~XGMAC_TSE;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+}
+
+static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value |= bfsize << 1;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+}
+
+const struct stmmac_dma_ops dwxgmac210_dma_ops = {
+ .reset = dwxgmac2_dma_reset,
+ .init = dwxgmac2_dma_init,
+ .init_chan = dwxgmac2_dma_init_chan,
+ .init_rx_chan = dwxgmac2_dma_init_rx_chan,
+ .init_tx_chan = dwxgmac2_dma_init_tx_chan,
+ .axi = dwxgmac2_dma_axi,
+ .dump_regs = NULL,
+ .dma_rx_mode = dwxgmac2_dma_rx_mode,
+ .dma_tx_mode = dwxgmac2_dma_tx_mode,
+ .enable_dma_irq = dwxgmac2_enable_dma_irq,
+ .disable_dma_irq = dwxgmac2_disable_dma_irq,
+ .start_tx = dwxgmac2_dma_start_tx,
+ .stop_tx = dwxgmac2_dma_stop_tx,
+ .start_rx = dwxgmac2_dma_start_rx,
+ .stop_rx = dwxgmac2_dma_stop_rx,
+ .dma_interrupt = dwxgmac2_dma_interrupt,
+ .get_hw_feature = dwxgmac2_get_hw_feature,
+ .rx_watchdog = dwxgmac2_rx_watchdog,
+ .set_rx_ring_len = dwxgmac2_set_rx_ring_len,
+ .set_tx_ring_len = dwxgmac2_set_tx_ring_len,
+ .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
+ .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
+ .enable_tso = dwxgmac2_enable_tso,
+ .set_bfsize = dwxgmac2_set_bfsize,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 1f50e83cafb2..357309a6d6a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -72,6 +72,7 @@ static int stmmac_dwmac4_quirks(struct stmmac_priv *priv)
static const struct stmmac_hwif_entry {
bool gmac;
bool gmac4;
+ bool xgmac;
u32 min_id;
const struct stmmac_regs_off regs;
const void *desc;
@@ -87,6 +88,7 @@ static const struct stmmac_hwif_entry {
{
.gmac = false,
.gmac4 = false,
+ .xgmac = false,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC3_X_OFFSET,
@@ -103,6 +105,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = true,
.gmac4 = false,
+ .xgmac = false,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC3_X_OFFSET,
@@ -119,6 +122,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = false,
.gmac4 = true,
+ .xgmac = false,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -135,6 +139,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = false,
.gmac4 = true,
+ .xgmac = false,
.min_id = DWMAC_CORE_4_00,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -151,6 +156,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = false,
.gmac4 = true,
+ .xgmac = false,
.min_id = DWMAC_CORE_4_10,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -167,6 +173,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = false,
.gmac4 = true,
+ .xgmac = false,
.min_id = DWMAC_CORE_5_10,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -180,11 +187,29 @@ static const struct stmmac_hwif_entry {
.tc = &dwmac510_tc_ops,
.setup = dwmac4_setup,
.quirks = NULL,
- }
+ }, {
+ .gmac = false,
+ .gmac4 = false,
+ .xgmac = true,
+ .min_id = DWXGMAC_CORE_2_10,
+ .regs = {
+ .ptp_off = PTP_XGMAC_OFFSET,
+ .mmc_off = 0,
+ },
+ .desc = &dwxgmac210_desc_ops,
+ .dma = &dwxgmac210_dma_ops,
+ .mac = &dwxgmac210_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = NULL,
+ .setup = dwxgmac2_setup,
+ .quirks = NULL,
+ },
};
int stmmac_hwif_init(struct stmmac_priv *priv)
{
+ bool needs_xgmac = priv->plat->has_xgmac;
bool needs_gmac4 = priv->plat->has_gmac4;
bool needs_gmac = priv->plat->has_gmac;
const struct stmmac_hwif_entry *entry;
@@ -195,7 +220,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
if (needs_gmac) {
id = stmmac_get_id(priv, GMAC_VERSION);
- } else if (needs_gmac4) {
+ } else if (needs_gmac4 || needs_xgmac) {
id = stmmac_get_id(priv, GMAC4_VERSION);
} else {
id = 0;
@@ -229,6 +254,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
continue;
if (needs_gmac4 ^ entry->gmac4)
continue;
+ if (needs_xgmac ^ entry->xgmac)
+ continue;
/* Use synopsys_id var because some setups can override this */
if (priv->synopsys_id < entry->min_id)
continue;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index e44e7b26ce82..92b8944f26e3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -183,6 +183,8 @@ struct stmmac_dma_ops {
void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+ void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
+ void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
};
#define stmmac_reset(__priv, __args...) \
@@ -235,6 +237,10 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
#define stmmac_enable_tso(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+#define stmmac_dma_qmode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, qmode, __args)
+#define stmmac_set_dma_bfsize(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
struct mac_device_info;
struct net_device;
@@ -441,17 +447,22 @@ struct stmmac_mode_ops {
struct stmmac_priv;
struct tc_cls_u32_offload;
+struct tc_cbs_qopt_offload;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
int (*setup_cls_u32)(struct stmmac_priv *priv,
struct tc_cls_u32_offload *cls);
+ int (*setup_cbs)(struct stmmac_priv *priv,
+ struct tc_cbs_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
stmmac_do_callback(__priv, tc, init, __args)
#define stmmac_tc_setup_cls_u32(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cls_u32, __args)
+#define stmmac_tc_setup_cbs(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_cbs, __args)
struct stmmac_regs_off {
u32 ptp_off;
@@ -468,6 +479,9 @@ extern const struct stmmac_ops dwmac410_ops;
extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
+extern const struct stmmac_ops dwxgmac210_ops;
+extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
+extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index cba46b62a1cd..ff1ffb46198a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -51,9 +51,10 @@
#include <linux/reset.h>
#include <linux/of_mdio.h>
#include "dwmac1000.h"
+#include "dwxgmac2.h"
#include "hwif.h"
-#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
+#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
/* Module parameters */
@@ -262,6 +263,21 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
else
priv->clk_csr = 0;
}
+
+ if (priv->plat->has_xgmac) {
+ if (clk_rate > 400000000)
+ priv->clk_csr = 0x5;
+ else if (clk_rate > 350000000)
+ priv->clk_csr = 0x4;
+ else if (clk_rate > 300000000)
+ priv->clk_csr = 0x3;
+ else if (clk_rate > 250000000)
+ priv->clk_csr = 0x2;
+ else if (clk_rate > 150000000)
+ priv->clk_csr = 0x1;
+ else
+ priv->clk_csr = 0x0;
+ }
}
static void print_pkt(unsigned char *buf, int len)
@@ -498,7 +514,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
if (!priv->hwts_rx_en)
return;
/* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4)
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
desc = np;
/* Check if timestamp is available */
@@ -540,6 +556,9 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
u32 ts_event_en = 0;
u32 value = 0;
u32 sec_inc;
+ bool xmac;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
netdev_alert(priv->dev, "No support for HW time stamping\n");
@@ -575,7 +594,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* PTP v1, UDP, any kind of event packet */
config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* take time stamp for all event messages */
- if (priv->plat->has_gmac4)
+ if (xmac)
snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
else
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -610,7 +629,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- if (priv->plat->has_gmac4)
+ if (xmac)
snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
else
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -647,7 +666,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- if (priv->plat->has_gmac4)
+ if (xmac)
snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
else
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -718,7 +737,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* program Sub Second Increment reg */
stmmac_config_sub_second_increment(priv,
priv->ptpaddr, priv->plat->clk_ptp_rate,
- priv->plat->has_gmac4, &sec_inc);
+ xmac, &sec_inc);
temp = div_u64(1000000000ULL, sec_inc);
/* Store sub second increment and flags for later use */
@@ -755,12 +774,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
*/
static int stmmac_init_ptp(struct stmmac_priv *priv)
{
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
priv->adv_ts = 0;
- /* Check if adv_ts can be enabled for dwmac 4.x core */
- if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
+ /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
+ if (xmac && priv->dma_cap.atime_stamp)
priv->adv_ts = 1;
/* Dwmac 3.x core with extend_desc can support adv_ts */
else if (priv->extend_desc && priv->dma_cap.atime_stamp)
@@ -1804,6 +1825,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
rxfifosz, qmode);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
+ chan);
}
for (chan = 0; chan < tx_channels_count; chan++) {
@@ -2171,6 +2194,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret;
}
+ /* DMA Configuration */
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+
+ if (priv->plat->axi)
+ stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
+
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
rx_q = &priv->rx_queue[chan];
@@ -2201,12 +2230,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
for (chan = 0; chan < dma_csr_ch; chan++)
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
- /* DMA Configuration */
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
-
- if (priv->plat->axi)
- stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
-
return ret;
}
@@ -2524,9 +2547,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
netdev_warn(priv->dev, "%s: failed debugFS registration\n",
__func__);
#endif
- /* Start the ball rolling... */
- stmmac_start_all_dma(priv);
-
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if (priv->use_riwt) {
@@ -2547,6 +2567,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
}
+ /* Start the ball rolling... */
+ stmmac_start_all_dma(priv);
+
return 0;
}
@@ -3303,6 +3326,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
int coe = priv->hw->rx_csum;
unsigned int next_entry;
unsigned int count = 0;
+ bool xmac;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -3404,7 +3430,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* in case of GMAC4 because it needs
* to refill the used descriptors, always.
*/
- if (unlikely(!priv->plat->has_gmac4 &&
+ if (unlikely(!xmac &&
((frame_len < priv->rx_copybreak) ||
stmmac_rx_threshold_count(rx_q)))) {
skb = netdev_alloc_skb_ip_align(priv->dev,
@@ -3640,7 +3666,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queues_count;
u32 queue;
+ bool xmac;
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
if (priv->irq_wake)
@@ -3659,7 +3687,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
/* To handle GMAC own interrupts */
- if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
+ if ((priv->plat->has_gmac) || xmac) {
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
int mtl_status;
@@ -3776,7 +3804,7 @@ static int stmmac_setup_tc_block(struct stmmac_priv *priv,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
- priv, priv);
+ priv, priv, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
return 0;
@@ -3793,6 +3821,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_SETUP_BLOCK:
return stmmac_setup_tc_block(priv, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return stmmac_tc_setup_cbs(priv, priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4265,6 +4295,8 @@ int stmmac_dvr_probe(struct device *device,
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
ndev->max_mtu = JUMBO_LEN;
+ else if (priv->plat->has_xgmac)
+ ndev->max_mtu = XGMAC_JUMBO_LEN;
else
ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
@@ -4286,7 +4318,8 @@ int stmmac_dvr_probe(struct device *device,
* has to be disable and this can be done by passing the
* riwt_off field from the platform.
*/
- if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
+ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
+ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
dev_info(priv->device,
"Enable RX Mitigation via HW Watchdog Timer\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 5df1a608e566..b72ef171477e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -29,6 +29,7 @@
#include <linux/phy.h>
#include <linux/slab.h>
+#include "dwxgmac2.h"
#include "stmmac.h"
#define MII_BUSY 0x00000001
@@ -39,6 +40,115 @@
#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT)
#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
+/* XGMAC defines */
+#define MII_XGMAC_SADDR BIT(18)
+#define MII_XGMAC_CMD_SHIFT 16
+#define MII_XGMAC_WRITE (1 << MII_XGMAC_CMD_SHIFT)
+#define MII_XGMAC_READ (3 << MII_XGMAC_CMD_SHIFT)
+#define MII_XGMAC_BUSY BIT(22)
+#define MII_XGMAC_MAX_C22ADDR 3
+#define MII_XGMAC_C22P_MASK GENMASK(MII_XGMAC_MAX_C22ADDR, 0)
+
+static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
+ int phyreg, u32 *hw_addr)
+{
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 tmp;
+
+ /* HW does not support C22 addr >= 4 */
+ if (phyaddr > MII_XGMAC_MAX_C22ADDR)
+ return -ENODEV;
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
+ /* Set port as Clause 22 */
+ tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
+ tmp &= ~MII_XGMAC_C22P_MASK;
+ tmp |= BIT(phyaddr);
+ writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
+
+ *hw_addr = (phyaddr << 16) | (phyreg & 0x1f);
+ return 0;
+}
+
+static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 tmp, addr, value = MII_XGMAC_BUSY;
+ int ret;
+
+ if (phyreg & MII_ADDR_C45) {
+ return -EOPNOTSUPP;
+ } else {
+ ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ return ret;
+ }
+
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ value |= MII_XGMAC_SADDR | MII_XGMAC_READ;
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
+ /* Set the MII address register to read */
+ writel(addr, priv->ioaddr + mii_address);
+ writel(value, priv->ioaddr + mii_data);
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
+ /* Read the data from the MII data register */
+ return readl(priv->ioaddr + mii_data) & GENMASK(15, 0);
+}
+
+static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
+ int phyreg, u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 addr, tmp, value = MII_XGMAC_BUSY;
+ int ret;
+
+ if (phyreg & MII_ADDR_C45) {
+ return -EOPNOTSUPP;
+ } else {
+ ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ return ret;
+ }
+
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ value |= phydata | MII_XGMAC_SADDR;
+ value |= MII_XGMAC_WRITE;
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
+ /* Set the MII address register to write */
+ writel(addr, priv->ioaddr + mii_address);
+ writel(value, priv->ioaddr + mii_data);
+
+ /* Wait until any existing MII operation is complete */
+ return readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000);
+}
+
/**
* stmmac_mdio_read
* @bus: points to the mii_bus structure
@@ -205,7 +315,7 @@ int stmmac_mdio_register(struct net_device *ndev)
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct device_node *mdio_node = priv->plat->mdio_node;
struct device *dev = ndev->dev.parent;
- int addr, found;
+ int addr, found, max_addr;
if (!mdio_bus_data)
return 0;
@@ -223,8 +333,23 @@ int stmmac_mdio_register(struct net_device *ndev)
#endif
new_bus->name = "stmmac";
- new_bus->read = &stmmac_mdio_read;
- new_bus->write = &stmmac_mdio_write;
+
+ if (priv->plat->has_xgmac) {
+ new_bus->read = &stmmac_xgmac2_mdio_read;
+ new_bus->write = &stmmac_xgmac2_mdio_write;
+
+ /* Right now only C22 phys are supported */
+ max_addr = MII_XGMAC_MAX_C22ADDR + 1;
+
+ /* Check if DT specified an unsupported phy addr */
+ if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR)
+ dev_err(dev, "Unsupported phy_addr (max=%d)\n",
+ MII_XGMAC_MAX_C22ADDR);
+ } else {
+ new_bus->read = &stmmac_mdio_read;
+ new_bus->write = &stmmac_mdio_write;
+ max_addr = PHY_MAX_ADDR;
+ }
new_bus->reset = &stmmac_mdio_reset;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -243,7 +368,7 @@ int stmmac_mdio_register(struct net_device *ndev)
goto bus_register_done;
found = 0;
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ for (addr = 0; addr < max_addr; addr++) {
struct phy_device *phydev = mdiobus_get_phy(new_bus, addr);
if (!phydev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 8d375e51a526..c54a50dbd5ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -257,7 +257,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
/* Enable pci device */
- ret = pcim_enable_device(pdev);
+ ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
__func__);
@@ -300,9 +300,45 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
static void stmmac_pci_remove(struct pci_dev *pdev)
{
stmmac_dvr_remove(&pdev->dev);
+ pci_disable_device(pdev);
}
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
+static int __maybe_unused stmmac_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = stmmac_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ return 0;
+}
+
+static int __maybe_unused stmmac_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return stmmac_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
/* synthetic ID, no official vendor */
#define PCI_VENDOR_ID_STMMAC 0x700
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 6d141f3931eb..3609c7b696c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -94,7 +94,6 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
/**
* stmmac_axi_setup - parse DT parameters for programming the AXI register
* @pdev: platform device
- * @priv: driver private struct.
* Description:
* if required, from device-tree the AXI internal register can be tuned
* by using platform parameters.
@@ -487,6 +486,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->force_sf_dma_mode = 1;
}
+ if (of_device_is_compatible(np, "snps,dwxgmac")) {
+ plat->has_xgmac = 1;
+ plat->pmt = 1;
+ plat->tso_en = of_property_read_bool(np, "snps,tso");
+ }
+
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 0cb0e39a2be9..2293e21f789f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -71,6 +71,9 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
u32 sec, nsec;
u32 quotient, reminder;
int neg_adj = 0;
+ bool xmac;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (delta < 0) {
neg_adj = 1;
@@ -82,8 +85,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
nsec = reminder;
spin_lock_irqsave(&priv->ptp_lock, flags);
- stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj,
- priv->plat->has_gmac4);
+ stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index f4b31d69f60e..ecccf895fd7e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -21,6 +21,7 @@
#ifndef __STMMAC_PTP_H__
#define __STMMAC_PTP_H__
+#define PTP_XGMAC_OFFSET 0xd00
#define PTP_GMAC4_OFFSET 0xb00
#define PTP_GMAC3_X_OFFSET 0x700
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 2258cd8cc844..1a96dd9c1091 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -289,7 +289,67 @@ static int tc_init(struct stmmac_priv *priv)
return 0;
}
+static int tc_setup_cbs(struct stmmac_priv *priv,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 queue = qopt->queue;
+ u32 ptr, speed_div;
+ u32 mode_to_use;
+ u64 value;
+ int ret;
+
+ /* Queue 0 is not AVB capable */
+ if (queue <= 0 || queue >= tx_queues_count)
+ return -EINVAL;
+ if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
+ return -EOPNOTSUPP;
+
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
+ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
+ if (ret)
+ return ret;
+
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+ } else if (!qopt->enable) {
+ return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
+ }
+
+ /* Port Transmit Rate and Speed Divider */
+ ptr = (priv->speed == SPEED_100) ? 4 : 8;
+ speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000;
+
+ /* Final adjustments for HW */
+ value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
+
+ value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
+ priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
+
+ value = qopt->hicredit * 1024ll * 8;
+ priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
+
+ value = qopt->locredit * 1024ll * 8;
+ priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
+
+ ret = stmmac_config_cbs(priv, priv->hw,
+ priv->plat->tx_queues_cfg[queue].send_slope,
+ priv->plat->tx_queues_cfg[queue].idle_slope,
+ priv->plat->tx_queues_cfg[queue].high_credit,
+ priv->plat->tx_queues_cfg[queue].low_credit,
+ queue);
+ if (ret)
+ return ret;
+
+ dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
+ queue, qopt->sendslope, qopt->idleslope,
+ qopt->hicredit, qopt->locredit);
+ return 0;
+}
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
};
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index a5dd627fe2f9..d42f47f6c632 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -101,7 +101,8 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
}
static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct vnet_port *port = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 88c12474a0c3..9319d84bf49f 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -1225,25 +1225,9 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
bmsr = err;
if (bmsr & BMSR_LSTATUS) {
- u16 adv, lpa;
-
- err = mii_read(np, np->phy_addr, MII_ADVERTISE);
- if (err < 0)
- goto out;
- adv = err;
-
- err = mii_read(np, np->phy_addr, MII_LPA);
- if (err < 0)
- goto out;
- lpa = err;
-
- err = mii_read(np, np->phy_addr, MII_ESTATUS);
- if (err < 0)
- goto out;
link_up = 1;
current_speed = SPEED_1000;
current_duplex = DUPLEX_FULL;
-
}
lp->active_speed = current_speed;
lp->active_duplex = current_duplex;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index a94f50442613..12539b357a78 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -234,7 +234,8 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
}
static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = __tx_port_find(vp, skb);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
index 458a7844260a..99d86e39ff54 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/bitrev.h>
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/dcbnl.h>
#include "dwc-xlgmac.h"
@@ -193,7 +194,6 @@ static u32 xlgmac_vid_crc32_le(__le16 vid_le)
{
unsigned char *data = (unsigned char *)&vid_le;
unsigned char data_byte = 0;
- u32 poly = 0xedb88320;
u32 crc = ~0;
u32 temp = 0;
int i, bits;
@@ -208,7 +208,7 @@ static u32 xlgmac_vid_crc32_le(__le16 vid_le)
data_byte >>= 1;
if (temp)
- crc ^= poly;
+ crc ^= CRC32_POLY_LE;
}
return crc;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 163d8d16bc24..dc966ddb6d81 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1151,7 +1151,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
struct rx_map *dm;
struct rxf_fifo *f;
struct rxdb *db;
- struct sk_buff *skb;
int delta;
ENTER;
@@ -1161,7 +1160,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
DBG("db=%p f=%p\n", db, f);
dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
DBG("dm=%p\n", dm);
- skb = dm->skb;
rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
rxfd->va_lo = rxdd->va_lo;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 358edab9e72e..832bce07c385 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -39,12 +39,15 @@
#include <linux/sys_soc.h>
#include <linux/pinctrl/consumer.h>
+#include <net/pkt_cls.h>
#include "cpsw.h"
#include "cpsw_ale.h"
#include "cpts.h"
#include "davinci_cpdma.h"
+#include <net/pkt_sched.h>
+
#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
NETIF_MSG_DRV | NETIF_MSG_LINK | \
NETIF_MSG_IFUP | NETIF_MSG_INTR | \
@@ -153,6 +156,12 @@ do { \
#define IRQ_NUM 2
#define CPSW_MAX_QUEUES 8
#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
+#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
+#define CPSW_FIFO_SHAPE_EN_SHIFT 16
+#define CPSW_FIFO_RATE_EN_SHIFT 20
+#define CPSW_TC_NUM 4
+#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
+#define CPSW_PCT_MASK 0x7f
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
@@ -253,23 +262,24 @@ struct cpsw_ss_regs {
#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
/* Bit definitions for the CPSW2_CONTROL register */
-#define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */
-#define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */
-#define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */
-#define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */
-#define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */
-#define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */
-#define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */
-#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
-#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
-#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
-#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */
-#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */
-#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
-#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
-#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
-#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
-#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
+#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
+#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
+#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
+#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
+#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
+#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
+#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
+#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
#define CTRL_V2_TS_BITS \
(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
@@ -281,7 +291,7 @@ struct cpsw_ss_regs {
#define CTRL_V3_TS_BITS \
- (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
TS_LTYPE1_EN)
@@ -453,6 +463,9 @@ struct cpsw_priv {
u8 mac_addr[ETH_ALEN];
bool rx_pause;
bool tx_pause;
+ bool mqprio_hw;
+ int fifo_bw[CPSW_TC_NUM];
+ int shp_cfg_speed;
u32 emac_port;
struct cpsw_common *cpsw;
};
@@ -552,40 +565,28 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
(func)(slave++, ##arg); \
} while (0)
-#define cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb) \
- do { \
- if (!cpsw->data.dual_emac) \
- break; \
- if (CPDMA_RX_SOURCE_PORT(status) == 1) { \
- ndev = cpsw->slaves[0].ndev; \
- skb->dev = ndev; \
- } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \
- ndev = cpsw->slaves[1].ndev; \
- skb->dev = ndev; \
- } \
- } while (0)
-#define cpsw_add_mcast(cpsw, priv, addr) \
- do { \
- if (cpsw->data.dual_emac) { \
- struct cpsw_slave *slave = cpsw->slaves + \
- priv->emac_port; \
- int slave_port = cpsw_get_slave_port( \
- slave->slave_num); \
- cpsw_ale_add_mcast(cpsw->ale, addr, \
- 1 << slave_port | ALE_PORT_HOST, \
- ALE_VLAN, slave->port_vlan, 0); \
- } else { \
- cpsw_ale_add_mcast(cpsw->ale, addr, \
- ALE_ALL_PORTS, \
- 0, 0, 0); \
- } \
- } while (0)
-
static inline int cpsw_get_slave_port(u32 slave_num)
{
return slave_num + 1;
}
+static void cpsw_add_mcast(struct cpsw_priv *priv, u8 *addr)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (cpsw->data.dual_emac) {
+ struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
+ int slave_port = cpsw_get_slave_port(slave->slave_num);
+
+ cpsw_ale_add_mcast(cpsw->ale, addr,
+ 1 << slave_port | ALE_PORT_HOST,
+ ALE_VLAN, slave->port_vlan, 0);
+ return;
+ }
+
+ cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
+}
+
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -693,7 +694,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* program multicast address list into ALE register */
netdev_for_each_mc_addr(ha, ndev) {
- cpsw_add_mcast(cpsw, priv, (u8 *)ha->addr);
+ cpsw_add_mcast(priv, ha->addr);
}
}
}
@@ -785,10 +786,16 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct sk_buff *skb = token;
struct sk_buff *new_skb;
struct net_device *ndev = skb->dev;
- int ret = 0;
+ int ret = 0, port;
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb);
+ if (cpsw->data.dual_emac) {
+ port = CPDMA_RX_SOURCE_PORT(status);
+ if (port) {
+ ndev = cpsw->slaves[--port].ndev;
+ skb->dev = ndev;
+ }
+ }
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
/* In dual emac mode check for all interfaces */
@@ -967,8 +974,8 @@ static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
/* process every unprocessed channel */
ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
- for (ch = 0, num_tx = 0; ch_map; ch_map >>= 1, ch++) {
- if (!(ch_map & 0x01))
+ for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
+ if (!(ch_map & 0x80))
continue;
txv = &cpsw->txv[ch];
@@ -1077,6 +1084,38 @@ static void cpsw_set_slave_mac(struct cpsw_slave *slave,
slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
}
+static bool cpsw_shp_is_off(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = 7 << shift;
+ val = val & mask;
+
+ return !val;
+}
+
+static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = (1 << --fifo) << shift;
+ val = on ? val | mask : val & ~mask;
+
+ writel_relaxed(val, &cpsw->regs->ptype);
+}
+
static void _cpsw_adjust_link(struct cpsw_slave *slave,
struct cpsw_priv *priv, bool *link)
{
@@ -1116,6 +1155,12 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
mac_control |= BIT(4);
*link = true;
+
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed &&
+ !cpsw_shp_is_off(priv))
+ dev_warn(priv->dev,
+ "Speed was changed, CBS shaper speeds are changed!");
} else {
mac_control = 0;
/* disable forwarding */
@@ -1577,6 +1622,231 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
soft_reset_slave(slave);
}
+static int cpsw_tc_to_fifo(int tc, int num_tc)
+{
+ if (tc == num_tc - 1)
+ return 0;
+
+ return CPSW_FIFO_SHAPERS_NUM - tc;
+}
+
+static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 val = 0, send_pct, shift;
+ struct cpsw_slave *slave;
+ int pct = 0, i;
+
+ if (bw > priv->shp_cfg_speed * 1000)
+ goto err;
+
+ /* shaping has to stay enabled for highest fifos linearly
+ * and fifo bw no more then interface can allow
+ */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ send_pct = slave_read(slave, SEND_PERCENT);
+ for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
+ if (!bw) {
+ if (i >= fifo || !priv->fifo_bw[i])
+ continue;
+
+ dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
+ continue;
+ }
+
+ if (!priv->fifo_bw[i] && i > fifo) {
+ dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
+ return -EINVAL;
+ }
+
+ shift = (i - 1) * 8;
+ if (i == fifo) {
+ send_pct &= ~(CPSW_PCT_MASK << shift);
+ val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
+ if (!val)
+ val = 1;
+
+ send_pct |= val << shift;
+ pct += val;
+ continue;
+ }
+
+ if (priv->fifo_bw[i])
+ pct += (send_pct >> shift) & CPSW_PCT_MASK;
+ }
+
+ if (pct >= 100)
+ goto err;
+
+ slave_write(slave, send_pct, SEND_PERCENT);
+ priv->fifo_bw[fifo] = bw;
+
+ dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
+ DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
+
+ return 0;
+err:
+ dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
+ return -EINVAL;
+}
+
+static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 tx_in_ctl_rg, val;
+ int ret;
+
+ ret = cpsw_set_fifo_bw(priv, fifo, bw);
+ if (ret)
+ return ret;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
+
+ if (!bw)
+ cpsw_fifo_shp_on(priv, fifo, bw);
+
+ val = slave_read(slave, tx_in_ctl_rg);
+ if (cpsw_shp_is_off(priv)) {
+ /* disable FIFOs rate limited queues */
+ val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
+
+ /* set type of FIFO queues to normal priority mode */
+ val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
+
+ /* set type of FIFO queues to be rate limited */
+ if (bw)
+ val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
+ else
+ priv->shp_cfg_speed = 0;
+ }
+
+ /* toggle a FIFO rate limited queue */
+ if (bw)
+ val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ else
+ val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ slave_write(slave, val, tx_in_ctl_rg);
+
+ /* FIFO transmit shape enable */
+ cpsw_fifo_shp_on(priv, fifo, bw);
+ return 0;
+}
+
+/* Defaults:
+ * class A - prio 3
+ * class B - prio 2
+ * shaping for class A should be set first
+ */
+static int cpsw_set_cbs(struct net_device *ndev,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int prev_speed = 0;
+ int tc, ret, fifo;
+ u32 bw = 0;
+
+ tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
+
+ /* enable channels in backward order, as highest FIFOs must be rate
+ * limited first and for compliance with CPDMA rate limited channels
+ * that also used in bacward order. FIFO0 cannot be rate limited.
+ */
+ fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
+ if (!fifo) {
+ dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
+ return -EINVAL;
+ }
+
+ /* do nothing, it's disabled anyway */
+ if (!qopt->enable && !priv->fifo_bw[fifo])
+ return 0;
+
+ /* shapers can be set if link speed is known */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ if (slave->phy && slave->phy->link) {
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed)
+ prev_speed = priv->shp_cfg_speed;
+
+ priv->shp_cfg_speed = slave->phy->speed;
+ }
+
+ if (!priv->shp_cfg_speed) {
+ dev_err(priv->dev, "Link speed is not known");
+ return -1;
+ }
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ bw = qopt->enable ? qopt->idleslope : 0;
+ ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
+ if (ret) {
+ priv->shp_cfg_speed = prev_speed;
+ prev_speed = 0;
+ }
+
+ if (bw && prev_speed)
+ dev_warn(priv->dev,
+ "Speed was changed, CBS shaper speeds are changed!");
+
+ pm_runtime_put_sync(cpsw->dev);
+ return ret;
+}
+
+static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ int fifo, bw;
+
+ for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
+ bw = priv->fifo_bw[fifo];
+ if (!bw)
+ continue;
+
+ cpsw_set_fifo_rlimit(priv, fifo, bw);
+ }
+}
+
+static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 tx_prio_map = 0;
+ int i, tc, fifo;
+ u32 tx_prio_rg;
+
+ if (!priv->mqprio_hw)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ tc = netdev_get_prio_tc_map(priv->ndev, i);
+ fifo = CPSW_FIFO_SHAPERS_NUM - tc;
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave_write(slave, tx_prio_map, tx_prio_rg);
+}
+
+/* restore resources after port reset */
+static void cpsw_restore(struct cpsw_priv *priv)
+{
+ /* restore MQPRIO offload */
+ for_each_slave(priv, cpsw_mqprio_resume, priv);
+
+ /* restore CBS offload */
+ for_each_slave(priv, cpsw_cbs_resume, priv);
+}
+
static int cpsw_ndo_open(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1656,6 +1926,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
}
+ cpsw_restore(priv);
+
/* Enable Interrupt pacing if configured */
if (cpsw->coal_intvl != 0) {
struct ethtool_coalesce coal;
@@ -2086,14 +2358,16 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
int i;
for (i = 0; i < cpsw->data.slaves; i++) {
- if (vid == cpsw->slaves[i].port_vlan)
- return -EINVAL;
+ if (vid == cpsw->slaves[i].port_vlan) {
+ ret = -EINVAL;
+ goto err;
+ }
}
}
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
ret = cpsw_add_vlan_ale_entry(priv, vid);
-
+err:
pm_runtime_put(cpsw->dev);
return ret;
}
@@ -2119,22 +2393,17 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
for (i = 0; i < cpsw->data.slaves; i++) {
if (vid == cpsw->slaves[i].port_vlan)
- return -EINVAL;
+ goto err;
}
}
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
- if (ret != 0)
- return ret;
-
- ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
- HOST_PORT_NUM, ALE_VLAN, vid);
- if (ret != 0)
- return ret;
-
- ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
- 0, ALE_VLAN, vid);
+ ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+err:
pm_runtime_put(cpsw->dev);
return ret;
}
@@ -2190,6 +2459,78 @@ static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
return ret;
}
+static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int fifo, num_tc, count, offset;
+ struct cpsw_slave *slave;
+ u32 tx_prio_map = 0;
+ int i, tc, ret;
+
+ num_tc = mqprio->qopt.num_tc;
+ if (num_tc > CPSW_TC_NUM)
+ return -EINVAL;
+
+ if (mqprio->mode != TC_MQPRIO_MODE_DCB)
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ if (num_tc) {
+ for (i = 0; i < 8; i++) {
+ tc = mqprio->qopt.prio_tc_map[i];
+ fifo = cpsw_tc_to_fifo(tc, num_tc);
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ netdev_set_num_tc(ndev, num_tc);
+ for (i = 0; i < num_tc; i++) {
+ count = mqprio->qopt.count[i];
+ offset = mqprio->qopt.offset[i];
+ netdev_set_tc_queue(ndev, i, count, offset);
+ }
+ }
+
+ if (!mqprio->qopt.hw) {
+ /* restore default configuration */
+ netdev_reset_tc(ndev);
+ tx_prio_map = TX_PRIORITY_MAPPING;
+ }
+
+ priv->mqprio_hw = mqprio->qopt.hw;
+
+ offset = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ slave_write(slave, tx_prio_map, offset);
+
+ pm_runtime_put_sync(cpsw->dev);
+
+ return 0;
+}
+
+static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return cpsw_set_cbs(ndev, type_data);
+
+ case TC_SETUP_QDISC_MQPRIO:
+ return cpsw_set_mqprio(ndev, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -2205,6 +2546,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
#endif
.ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
+ .ndo_setup_tc = cpsw_ndo_setup_tc,
};
static int cpsw_get_regs_len(struct net_device *ndev)
@@ -2431,7 +2773,7 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
void (*handler)(void *, int, int);
struct netdev_queue *queue;
struct cpsw_vector *vec;
- int ret, *ch;
+ int ret, *ch, vch;
if (rx) {
ch = &cpsw->rx_ch_num;
@@ -2444,7 +2786,8 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
}
while (*ch < ch_num) {
- vec[*ch].ch = cpdma_chan_create(cpsw->dma, *ch, handler, rx);
+ vch = rx ? *ch : 7 - *ch;
+ vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
queue = netdev_get_tx_queue(priv->ndev, *ch);
queue->tx_maxrate = 0;
@@ -2927,7 +3270,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
priv_sl2->mac_addr);
} else {
- random_ether_addr(priv_sl2->mac_addr);
+ eth_random_addr(priv_sl2->mac_addr);
dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
priv_sl2->mac_addr);
}
@@ -2935,7 +3278,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
priv_sl2->emac_port = 1;
cpsw->slaves[1].ndev = ndev;
- ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
@@ -2981,7 +3324,7 @@ static int cpsw_probe(struct platform_device *pdev)
u32 slave_offset, sliver_offset, slave_size;
const struct soc_device_attribute *soc;
struct cpsw_common *cpsw;
- int ret = 0, i;
+ int ret = 0, i, ch;
int irq;
cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
@@ -3156,7 +3499,8 @@ static int cpsw_probe(struct platform_device *pdev)
if (soc)
cpsw->quirk_irq = 1;
- cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
+ ch = cpsw->quirk_irq ? 0 : 7;
+ cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
if (IS_ERR(cpsw->txv[0].ch)) {
dev_err(priv->dev, "error initializing tx dma channel\n");
ret = PTR_ERR(cpsw->txv[0].ch);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 93dc05c194d3..5766225a4ce1 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -394,7 +394,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
- return -EINVAL;
+ return -ENOENT;
cpsw_ale_read(ale, idx, ale_entry);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 6f63c8729afc..b96b93c686bf 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -114,7 +114,10 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
dev_consume_skb_any(skb);
dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
mtype, seqid);
- } else if (time_after(jiffies, skb_cb->tmo)) {
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 1s */
dev_dbg(cpts->dev,
"expiring tx timestamp mtype %u seqid %04x\n",
@@ -158,6 +161,7 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
*/
break;
}
+ /* fall through */
case CPTS_EV_PUSH:
case CPTS_EV_RX:
list_del_init(&event->list);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index cdbddf16dd29..4236dcdd5634 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -205,7 +205,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
* devices (e.g. cpsw switches) use plain old memory. Descriptor pools
* abstract out these details
*/
-int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
+static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
{
struct cpdma_params *cpdma_params = &ctlr->params;
struct cpdma_desc_pool *pool;
@@ -406,37 +406,36 @@ static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
struct cpdma_chan *chan;
u32 old_rate = ch->rate;
u32 new_rmask = 0;
- int rlim = 1;
+ int rlim = 0;
int i;
- *prio_mode = 0;
for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
chan = ctlr->channels[i];
- if (!chan) {
- rlim = 0;
+ if (!chan)
continue;
- }
if (chan == ch)
chan->rate = rate;
if (chan->rate) {
- if (rlim) {
- new_rmask |= chan->mask;
- } else {
- ch->rate = old_rate;
- dev_err(ctlr->dev, "Prev channel of %dch is not rate limited\n",
- chan->chan_num);
- return -EINVAL;
- }
- } else {
- *prio_mode = 1;
- rlim = 0;
+ rlim = 1;
+ new_rmask |= chan->mask;
+ continue;
}
+
+ if (rlim)
+ goto err;
}
*rmask = new_rmask;
+ *prio_mode = rlim;
return 0;
+
+err:
+ ch->rate = old_rate;
+ dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
+ chan->chan_num);
+ return -EINVAL;
}
static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index a1a6445b5a7e..f270beebb428 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1387,6 +1387,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
static int match_first_device(struct device *dev, void *data)
{
+ if (dev->parent && dev->parent->of_node)
+ return of_device_is_compatible(dev->parent->of_node,
+ "ti,davinci_mdio");
+
return !strncmp(dev_name(dev), "davinci_mdio", 12);
}
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index e40aa3e31af2..a1d335a3c5e4 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1889,13 +1889,6 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
return err;
}
-static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
- select_queue_fallback_t fallback)
-{
- return 0;
-}
-
static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -1972,7 +1965,7 @@ static const struct net_device_ops netcp_netdev_ops = {
.ndo_vlan_rx_add_vid = netcp_rx_add_vid,
.ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
.ndo_tx_timeout = netcp_ndo_tx_timeout,
- .ndo_select_queue = netcp_select_queue,
+ .ndo_select_queue = dev_pick_tx_zero,
.ndo_setup_tc = netcp_setup_tc,
};
@@ -2052,7 +2045,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
if (is_valid_ether_addr(efuse_mac_addr))
ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
else
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
devm_iounmap(dev, efuse);
devm_release_mem_region(dev, res.start, size);
@@ -2061,7 +2054,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
if (mac_addr)
ether_addr_copy(ndev->dev_addr, mac_addr);
else
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
}
ret = of_property_read_string(node_interface, "rx-channel",
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index c769cd9d11e7..93d142867c2a 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -966,6 +966,7 @@ static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCGMIIPHY: /* get address of MII PHY in use. */
data->phy_id = phy;
+ /* fall through */
case SIOCGMIIREG: /* read MII PHY register. */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 16c3bfbe1992..757a3b37ae8a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -218,6 +218,7 @@ issue:
ret = of_mdiobus_register(bus, np1);
if (ret) {
mdiobus_free(bus);
+ lp->mii_bus = NULL;
return ret;
}
return 0;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 2a0c06e0f730..42f1f518dad6 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -70,7 +70,8 @@
#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
* only. This is not documented
- * in the HW spec */
+ * in the HW spec
+ */
/* Define for programming the MAC address into the EmacLite */
#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
@@ -94,11 +95,11 @@
-#define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */
+#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */
#define ALIGNMENT 4
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -238,8 +239,8 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
/* Set up to output the remaining data */
align_buffer = 0;
- to_u8_ptr = (u8 *) &align_buffer;
- from_u8_ptr = (u8 *) from_u16_ptr;
+ to_u8_ptr = (u8 *)&align_buffer;
+ from_u8_ptr = (u8 *)from_u16_ptr;
/* Output the remaining data */
for (; length > 0; length--)
@@ -272,7 +273,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
u32 align_buffer;
from_u32_ptr = src_ptr;
- to_u16_ptr = (u16 *) dest_ptr;
+ to_u16_ptr = (u16 *)dest_ptr;
for (; length > 3; length -= 4) {
/* Copy each word into the temporary buffer */
@@ -288,9 +289,9 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
u8 *to_u8_ptr, *from_u8_ptr;
/* Set up to read the remaining data */
- to_u8_ptr = (u8 *) to_u16_ptr;
+ to_u8_ptr = (u8 *)to_u16_ptr;
align_buffer = *from_u32_ptr++;
- from_u8_ptr = (u8 *) &align_buffer;
+ from_u8_ptr = (u8 *)&align_buffer;
/* Read the remaining data */
for (; length > 0; length--)
@@ -336,7 +337,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
} else if (drvdata->tx_ping_pong != 0) {
/* If the expected buffer is full, try the other buffer,
- * if it is configured in HW */
+ * if it is configured in HW
+ */
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
@@ -349,7 +351,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
return -1; /* Buffer was full, return failure */
/* Write the frame to the buffer */
- xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
+ xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
addr + XEL_TPLR_OFFSET);
@@ -357,7 +359,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
/* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame
- * has been transmitted */
+ * has been transmitted
+ */
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
@@ -369,6 +372,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
* xemaclite_recv_data - Receive a frame
* @drvdata: Pointer to the Emaclite device private data
* @data: Address where the data is to be received
+ * @maxlen: Maximum supported ethernet packet length
*
* This function is intended to be called from the interrupt context or
* with a wrapper which waits for the receive frame to be available.
@@ -394,7 +398,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
/* The instance is out of sync, try other buffer if other
* buffer is configured, return 0 otherwise. If the instance is
* out of sync, do not update the 'next_rx_buf_to_use' since it
- * will correct on subsequent calls */
+ * will correct on subsequent calls
+ */
if (drvdata->rx_ping_pong != 0)
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
@@ -408,13 +413,15 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
return 0; /* No data was available */
}
- /* Get the protocol type of the ethernet frame that arrived */
+ /* Get the protocol type of the ethernet frame that arrived
+ */
proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
- * or an IP packet or an ARP packet */
+ * or an IP packet or an ARP packet
+ */
if (proto_type > ETH_DATA_LEN) {
if (proto_type == ETH_P_IP) {
@@ -430,7 +437,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
else
/* Field contains type other than IP or ARP, use max
- * frame size and let user parse it */
+ * frame size and let user parse it
+ */
length = ETH_FRAME_LEN + ETH_FCS_LEN;
} else
/* Use the length in the frame, plus the header and trailer */
@@ -440,7 +448,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = maxlen;
/* Read from the EmacLite device */
- xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
+ xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
data, length);
/* Acknowledge the frame */
@@ -471,7 +479,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
/* Determine the expected Tx buffer address */
addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
- xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
+ xemaclite_aligned_write(address_ptr, (u32 __force *)addr, ETH_ALEN);
xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
@@ -488,7 +496,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
/**
* xemaclite_set_mac_address - Set the MAC address for this device
* @dev: Pointer to the network device instance
- * @addr: Void pointer to the sockaddr structure
+ * @address: Void pointer to the sockaddr structure
*
* This function copies the HW address from the sockaddr strucutre to the
* net_device structure and updates the address in HW.
@@ -564,19 +572,19 @@ static void xemaclite_tx_handler(struct net_device *dev)
struct net_local *lp = netdev_priv(dev);
dev->stats.tx_packets++;
- if (lp->deferred_skb) {
- if (xemaclite_send_data(lp,
- (u8 *) lp->deferred_skb->data,
- lp->deferred_skb->len) != 0)
- return;
- else {
- dev->stats.tx_bytes += lp->deferred_skb->len;
- dev_kfree_skb_irq(lp->deferred_skb);
- lp->deferred_skb = NULL;
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
- }
- }
+
+ if (!lp->deferred_skb)
+ return;
+
+ if (xemaclite_send_data(lp, (u8 *)lp->deferred_skb->data,
+ lp->deferred_skb->len))
+ return;
+
+ dev->stats.tx_bytes += lp->deferred_skb->len;
+ dev_kfree_skb_irq(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
}
/**
@@ -602,18 +610,18 @@ static void xemaclite_rx_handler(struct net_device *dev)
return;
}
- /*
- * A new skb should have the data halfword aligned, but this code is
+ /* A new skb should have the data halfword aligned, but this code is
* here just in case that isn't true. Calculate how many
* bytes we should reserve to get the data to start on a word
- * boundary */
+ * boundary
+ */
align = BUFFER_ALIGN(skb->data);
if (align)
skb_reserve(skb, align);
skb_reserve(skb, 2);
- len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
+ len = xemaclite_recv_data(lp, (u8 *)skb->data, len);
if (!len) {
dev->stats.rx_errors++;
@@ -639,6 +647,8 @@ static void xemaclite_rx_handler(struct net_device *dev)
* @dev_id: Void pointer to the network device instance used as callback
* reference
*
+ * Return: IRQ_HANDLED
+ *
* This function handles the Tx and Rx interrupts of the EmacLite device.
*/
static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
@@ -706,8 +716,8 @@ static int xemaclite_mdio_wait(struct net_local *lp)
unsigned long end = jiffies + 2;
/* wait for the MDIO interface to not be busy or timeout
- after some time.
- */
+ * after some time.
+ */
while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
if (time_before_eq(end, jiffies)) {
@@ -757,7 +767,7 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
- "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
+ "%s(phy_id=%i, reg=%x) == %x\n", __func__,
phy_id, reg, rc);
return rc;
@@ -772,6 +782,8 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
*
* This function waits till the device is ready to accept a new MDIO
* request and then writes the val to the MDIO Write Data register.
+ *
+ * Return: 0 upon success or a negative error upon failure
*/
static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u16 val)
@@ -780,7 +792,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u32 ctrl_reg;
dev_dbg(&lp->ndev->dev,
- "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+ "%s(phy_id=%i, reg=%x, val=%x)\n", __func__,
phy_id, reg, val);
if (xemaclite_mdio_wait(lp))
@@ -805,7 +817,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
/**
* xemaclite_mdio_setup - Register mii_bus for the Emaclite device
* @lp: Pointer to the Emaclite device private data
- * @ofdev: Pointer to OF device structure
+ * @dev: Pointer to OF device structure
*
* This function enables MDIO bus in the Emaclite device and registers a
* mii_bus.
@@ -905,6 +917,9 @@ static void xemaclite_adjust_link(struct net_device *ndev)
* This function sets the MAC address, requests an IRQ and enables interrupts
* for the Emaclite device and starts the Tx queue.
* It also connects to the phy device, if MDIO is included in Emaclite device.
+ *
+ * Return: 0 on success. -ENODEV, if PHY cannot be connected.
+ * Non-zero error value on failure.
*/
static int xemaclite_open(struct net_device *dev)
{
@@ -975,6 +990,8 @@ static int xemaclite_open(struct net_device *dev)
* This function stops the Tx queue, disables interrupts and frees the IRQ for
* the Emaclite device.
* It also disconnects the phy device associated with the Emaclite device.
+ *
+ * Return: 0, always.
*/
static int xemaclite_close(struct net_device *dev)
{
@@ -1017,10 +1034,11 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
new_skb = orig_skb;
spin_lock_irqsave(&lp->reset_lock, flags);
- if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
+ if (xemaclite_send_data(lp, (u8 *)new_skb->data, len) != 0) {
/* If the Emaclite Tx buffer is busy, stop the Tx queue and
* defer the skb for transmission during the ISR, after the
- * current transmission is complete */
+ * current transmission is complete
+ */
netif_stop_queue(dev);
lp->deferred_skb = new_skb;
/* Take the time stamp now, since we can't do this in an ISR. */
@@ -1052,13 +1070,12 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
{
u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
- if (p) {
- return (bool)*p;
- } else {
- dev_warn(&ofdev->dev, "Parameter %s not found,"
- "defaulting to false\n", s);
+ if (!p) {
+ dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false\n", s);
return false;
}
+
+ return (bool)*p;
}
static const struct net_device_ops xemaclite_netdev_ops;
@@ -1066,7 +1083,6 @@ static const struct net_device_ops xemaclite_netdev_ops;
/**
* xemaclite_of_probe - Probe method for the Emaclite device.
* @ofdev: Pointer to OF device structure
- * @match: Pointer to the structure used for matching a device
*
* This function probes for the Emaclite device in the device tree.
* It initializes the driver data structure and the hardware, sets the MAC
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 750954be5a74..d3eae1239045 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1395,8 +1395,8 @@ static void fjes_watch_unshare_task(struct work_struct *work)
while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
(wait_time < 3000)) {
- for (epidx = 0; epidx < hw->max_epid; epidx++) {
- if (epidx == hw->my_epid)
+ for (epidx = 0; epidx < max_epid; epidx++) {
+ if (epidx == my_epid)
continue;
is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
@@ -1453,8 +1453,8 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
if (hw->hw_info.buffer_unshare_reserve_bit) {
- for (epidx = 0; epidx < hw->max_epid; epidx++) {
- if (epidx == hw->my_epid)
+ for (epidx = 0; epidx < max_epid; epidx++) {
+ if (epidx == my_epid)
continue;
if (test_bit(epidx,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 750eaa53bf0c..6acb6b5718b9 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -236,7 +236,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
}
/* Update tunnel dst according to Geneve options. */
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
- gnvh->options, gnvh->opt_len * 4);
+ gnvh->options, gnvh->opt_len * 4,
+ TUNNEL_GENEVE_OPT);
} else {
/* Drop packets w/ critical options,
* since we don't support any...
@@ -418,11 +419,12 @@ static int geneve_hlen(struct genevehdr *gh)
return sizeof(*gh) + gh->opt_len * 4;
}
-static struct sk_buff **geneve_gro_receive(struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *geneve_gro_receive(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
{
- struct sk_buff *p, **pp = NULL;
+ struct sk_buff *pp = NULL;
+ struct sk_buff *p;
struct genevehdr *gh, *gh2;
unsigned int hlen, gh_len, off_gnv;
const struct packet_offload *ptype;
@@ -449,7 +451,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
goto out;
}
- for (p = *head; p; p = p->next) {
+ list_for_each_entry(p, head, list) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
@@ -476,7 +478,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
out_unlock:
rcu_read_unlock();
out:
- NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_flush_final(skb, pp, flush);
return pp;
}
@@ -674,7 +676,8 @@ static void geneve_build_header(struct genevehdr *geneveh,
geneveh->proto_type = htons(ETH_P_TEB);
geneveh->rsvd2 = 0;
- ip_tunnel_info_opts_get(geneveh->options, info);
+ if (info->key.tun_flags & TUNNEL_GENEVE_OPT)
+ ip_tunnel_info_opts_get(geneveh->options, info);
}
static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index ec629a730005..7a145172d503 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1255,7 +1255,7 @@ out:
return skb->len;
}
-static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
+static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
[GTPA_LINK] = { .type = NLA_U32, },
[GTPA_VERSION] = { .type = NLA_U32, },
[GTPA_TID] = { .type = NLA_U64, },
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 32f49c4ce457..d79a69dd2146 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -878,10 +878,8 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
{
- unsigned char channel;
int actual;
- channel = cmd & SIXP_CHN_MASK;
if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */
/* RX and DCD flags can only be set in the same prio command,
@@ -933,10 +931,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
static void decode_std_command(struct sixpack *sp, unsigned char cmd)
{
- unsigned char checksum = 0, rest = 0, channel;
+ unsigned char checksum = 0, rest = 0;
short i;
- channel = cmd & SIXP_CHN_MASK;
switch (cmd & SIXP_CMD_MASK) { /* normal command */
case SIXP_SEOF:
if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) {
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 1a924b867b07..a32ded5b4f41 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net,
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
-void rndis_set_subchannel(struct work_struct *w);
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
@@ -873,6 +873,17 @@ struct netvsc_ethtool_stats {
unsigned long wake_queue;
};
+struct netvsc_ethtool_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 vf_rx_packets;
+ u64 vf_rx_bytes;
+ u64 vf_tx_packets;
+ u64 vf_tx_bytes;
+};
+
struct netvsc_vf_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5d5bd513847f..31c3d77b4733 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
VM_PKT_DATA_INBAND, 0);
}
+/* Worker to setup sub channels on initial setup
+ * Initial hotplug event occurs in softirq context
+ * and can't wait for channels.
+ */
+static void netvsc_subchan_work(struct work_struct *w)
+{
+ struct netvsc_device *nvdev =
+ container_of(w, struct netvsc_device, subchan_work);
+ struct rndis_device *rdev;
+ int i, ret;
+
+ /* Avoid deadlock with device removal already under RTNL */
+ if (!rtnl_trylock()) {
+ schedule_work(w);
+ return;
+ }
+
+ rdev = nvdev->extension;
+ if (rdev) {
+ ret = rndis_set_subchannel(rdev->ndev, nvdev);
+ if (ret == 0) {
+ netif_device_attach(rdev->ndev);
+ } else {
+ /* fallback to only primary channel */
+ for (i = 1; i < nvdev->num_chn; i++)
+ netif_napi_del(&nvdev->chan_table[i].napi);
+
+ nvdev->max_chn = 1;
+ nvdev->num_chn = 1;
+ }
+ }
+
+ rtnl_unlock();
+}
+
static struct netvsc_device *alloc_net_device(void)
{
struct netvsc_device *net_device;
@@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void)
init_completion(&net_device->channel_init_wait);
init_waitqueue_head(&net_device->subchan_open);
- INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
+ INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
return net_device;
}
@@ -1239,6 +1274,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
struct hv_device *device = netvsc_channel_to_device(channel);
struct net_device *ndev = hv_get_drvdata(device);
int work_done = 0;
+ int ret;
/* If starting a new interval */
if (!nvchan->desc)
@@ -1250,16 +1286,18 @@ int netvsc_poll(struct napi_struct *napi, int budget)
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
- /* If send of pending receive completions suceeded
- * and did not exhaust NAPI budget this time
- * and not doing busy poll
+ /* Send any pending receive completions */
+ ret = send_recv_completions(ndev, net_device, nvchan);
+
+ /* If it did not exhaust NAPI budget this time
+ * and not doing busy poll
* then re-enable host interrupts
- * and reschedule if ring is not empty.
+ * and reschedule if ring is not empty
+ * or sending receive completion failed.
*/
- if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
- work_done < budget &&
+ if (work_done < budget &&
napi_complete_done(napi, work_done) &&
- hv_end_read(&channel->inbound) &&
+ (ret || hv_end_read(&channel->inbound)) &&
napi_schedule_prep(napi)) {
hv_begin_read(&channel->inbound);
__napi_schedule(napi);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index fe2256bf1d13..507f68190cb1 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -329,7 +329,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
}
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct net_device_context *ndc = netdev_priv(ndev);
@@ -343,9 +343,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
if (vf_ops->ndo_select_queue)
txq = vf_ops->ndo_select_queue(vf_netdev, skb,
- accel_priv, fallback);
+ sb_dev, fallback);
else
- txq = fallback(vf_netdev, skb);
+ txq = fallback(vf_netdev, skb, NULL);
/* Record the queue selected by VF so that it can be
* used for common case where VF has more queues than
@@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev,
if (IS_ERR(nvdev))
return PTR_ERR(nvdev);
- /* Note: enable and attach happen when sub-channels setup */
+ if (nvdev->num_chn > 1) {
+ ret = rndis_set_subchannel(ndev, nvdev);
+ /* if unavailable, just proceed with one queue */
+ if (ret) {
+ nvdev->max_chn = 1;
+ nvdev->num_chn = 1;
+ }
+ }
+
+ /* In any case device is now ready */
+ netif_device_attach(ndev);
+
+ /* Note: enable and attach happen when sub-channels setup */
netif_carrier_off(ndev);
if (netif_running(ndev)) {
@@ -1106,6 +1118,64 @@ static void netvsc_get_vf_stats(struct net_device *net,
}
}
+static void netvsc_get_pcpu_stats(struct net_device *net,
+ struct netvsc_ethtool_pcpu_stats *pcpu_tot)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
+ int i;
+
+ /* fetch percpu stats of vf */
+ for_each_possible_cpu(i) {
+ const struct netvsc_vf_pcpu_stats *stats =
+ per_cpu_ptr(ndev_ctx->vf_stats, i);
+ struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ this_tot->vf_rx_packets = stats->rx_packets;
+ this_tot->vf_tx_packets = stats->tx_packets;
+ this_tot->vf_rx_bytes = stats->rx_bytes;
+ this_tot->vf_tx_bytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ this_tot->rx_packets = this_tot->vf_rx_packets;
+ this_tot->tx_packets = this_tot->vf_tx_packets;
+ this_tot->rx_bytes = this_tot->vf_rx_bytes;
+ this_tot->tx_bytes = this_tot->vf_tx_bytes;
+ }
+
+ /* fetch percpu stats of netvsc */
+ for (i = 0; i < nvdev->num_chn; i++) {
+ const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
+ const struct netvsc_stats *stats;
+ struct netvsc_ethtool_pcpu_stats *this_tot =
+ &pcpu_tot[nvchan->channel->target_cpu];
+ u64 packets, bytes;
+ unsigned int start;
+
+ stats = &nvchan->tx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ this_tot->tx_bytes += bytes;
+ this_tot->tx_packets += packets;
+
+ stats = &nvchan->rx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ this_tot->rx_bytes += bytes;
+ this_tot->rx_packets += packets;
+ }
+}
+
static void netvsc_get_stats64(struct net_device *net,
struct rtnl_link_stats64 *t)
{
@@ -1203,6 +1273,23 @@ static const struct {
{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
+}, pcpu_stats[] = {
+ { "cpu%u_rx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
+ { "cpu%u_rx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
+ { "cpu%u_tx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
+ { "cpu%u_tx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
+ { "cpu%u_vf_rx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
+ { "cpu%u_vf_rx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
+ { "cpu%u_vf_tx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
+ { "cpu%u_vf_tx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
}, vf_stats[] = {
{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
{ "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
@@ -1214,6 +1301,9 @@ static const struct {
#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
+/* statistics per queue (rx/tx packets/bytes) */
+#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
+
/* 4 statistics per queue (rx/tx packets/bytes) */
#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
@@ -1229,7 +1319,8 @@ static int netvsc_get_sset_count(struct net_device *dev, int string_set)
case ETH_SS_STATS:
return NETVSC_GLOBAL_STATS_LEN
+ NETVSC_VF_STATS_LEN
- + NETVSC_QUEUE_STATS_LEN(nvdev);
+ + NETVSC_QUEUE_STATS_LEN(nvdev)
+ + NETVSC_PCPU_STATS_LEN;
default:
return -EINVAL;
}
@@ -1243,9 +1334,10 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
const void *nds = &ndc->eth_stats;
const struct netvsc_stats *qstats;
struct netvsc_vf_pcpu_stats sum;
+ struct netvsc_ethtool_pcpu_stats *pcpu_sum;
unsigned int start;
u64 packets, bytes;
- int i, j;
+ int i, j, cpu;
if (!nvdev)
return;
@@ -1277,6 +1369,19 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
data[i++] = packets;
data[i++] = bytes;
}
+
+ pcpu_sum = kvmalloc_array(num_possible_cpus(),
+ sizeof(struct netvsc_ethtool_pcpu_stats),
+ GFP_KERNEL);
+ netvsc_get_pcpu_stats(dev, pcpu_sum);
+ for_each_present_cpu(cpu) {
+ struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
+
+ for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
+ data[i++] = *(u64 *)((void *)this_sum
+ + pcpu_stats[j].offset);
+ }
+ kvfree(pcpu_sum);
}
static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -1284,7 +1389,7 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
u8 *p = data;
- int i;
+ int i, cpu;
if (!nvdev)
return;
@@ -1312,6 +1417,13 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
+ for_each_present_cpu(cpu) {
+ for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
+ sprintf(p, pcpu_stats[i].name, cpu);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+
break;
}
}
@@ -2089,6 +2201,9 @@ static int netvsc_probe(struct hv_device *dev,
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+ if (nvdev->num_chn > 1)
+ schedule_work(&nvdev->subchan_work);
+
/* hw_features computed in rndis_netdev_set_hwcaps() */
net->features = net->hw_features |
NETIF_F_HIGHDMA | NETIF_F_SG |
@@ -2188,6 +2303,9 @@ static struct hv_driver netvsc_drv = {
.id_table = id_table,
.probe = netvsc_probe,
.remove = netvsc_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
/*
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 5428bb261102..2a5209f23f29 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
* This breaks overlap of processing the host message for the
* new primary channel with the initialization of sub-channels.
*/
-void rndis_set_subchannel(struct work_struct *w)
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
{
- struct netvsc_device *nvdev
- = container_of(w, struct netvsc_device, subchan_work);
struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
- struct net_device_context *ndev_ctx;
- struct rndis_device *rdev;
- struct net_device *ndev;
- struct hv_device *hv_dev;
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct hv_device *hv_dev = ndev_ctx->device_ctx;
+ struct rndis_device *rdev = nvdev->extension;
int i, ret;
- if (!rtnl_trylock()) {
- schedule_work(w);
- return;
- }
-
- rdev = nvdev->extension;
- if (!rdev)
- goto unlock; /* device was removed */
-
- ndev = rdev->ndev;
- ndev_ctx = netdev_priv(ndev);
- hv_dev = ndev_ctx->device_ctx;
+ ASSERT_RTNL();
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
@@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w)
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret) {
netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
- goto failed;
+ return ret;
}
wait_for_completion(&nvdev->channel_init_wait);
if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
netdev_err(ndev, "sub channel request failed\n");
- goto failed;
+ return -EIO;
}
nvdev->num_chn = 1 +
@@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w)
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
ndev_ctx->tx_table[i] = i % nvdev->num_chn;
- netif_device_attach(ndev);
- rtnl_unlock();
- return;
-
-failed:
- /* fallback to only primary channel */
- for (i = 1; i < nvdev->num_chn; i++)
- netif_napi_del(&nvdev->chan_table[i].napi);
-
- nvdev->max_chn = 1;
- nvdev->num_chn = 1;
-
- netif_device_attach(ndev);
-unlock:
- rtnl_unlock();
+ return 0;
}
static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
@@ -1360,20 +1332,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
netif_napi_add(net, &net_device->chan_table[i].napi,
netvsc_poll, NAPI_POLL_WEIGHT);
- if (net_device->num_chn > 1)
- schedule_work(&net_device->subchan_work);
+ return net_device;
out:
- /* if unavailable, just proceed with one queue */
- if (ret) {
- net_device->max_chn = 1;
- net_device->num_chn = 1;
- }
-
- /* No sub channels, device is ready */
- if (net_device->num_chn == 1)
- netif_device_attach(net);
-
+ /* setting up multiple channels failed */
+ net_device->max_chn = 1;
+ net_device->num_chn = 1;
return net_device;
err_dev_remv:
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 8782f5655e3f..0e372f392cb1 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -115,3 +115,14 @@ config IEEE802154_MCR20A
This driver can also be built as a module. To do so, say M here.
the module will be called 'mcr20a'.
+
+config IEEE802154_HWSIM
+ depends on IEEE802154_DRIVERS && MAC802154
+ tristate "Simulated radio testing tool for mac802154"
+ ---help---
+ This driver is a developer testing tool that can be used to test
+ IEEE 802.15.4 networking stack (mac802154) functionality. This is not
+ needed for normal wpan usage and is only for testing.
+
+ This driver can also be built as a module. To do so say M here.
+ The module will be called 'mac802154_hwsim'.
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index 104744d5a668..0c78b6298060 100644
--- a/drivers/net/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o
obj-$(CONFIG_IEEE802154_ADF7242) += adf7242.o
obj-$(CONFIG_IEEE802154_CA8210) += ca8210.o
obj-$(CONFIG_IEEE802154_MCR20A) += mcr20a.o
+obj-$(CONFIG_IEEE802154_HWSIM) += mac802154_hwsim.o
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 64f1b1e77bc0..23a52b9293f3 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -275,6 +275,8 @@ struct adf7242_local {
struct spi_message stat_msg;
struct spi_transfer stat_xfer;
struct dentry *debugfs_root;
+ struct delayed_work work;
+ struct workqueue_struct *wqueue;
unsigned long flags;
int tx_stat;
bool promiscuous;
@@ -575,10 +577,26 @@ static int adf7242_cmd_rx(struct adf7242_local *lp)
/* Wait until the ACK is sent */
adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
adf7242_clear_irqstat(lp);
+ mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
return adf7242_cmd(lp, CMD_RC_RX);
}
+static void adf7242_rx_cal_work(struct work_struct *work)
+{
+ struct adf7242_local *lp =
+ container_of(work, struct adf7242_local, work.work);
+
+ /* Reissuing RC_RX every 400ms - to adjust for offset
+ * drift in receiver (datasheet page 61, OCL section)
+ */
+
+ if (!test_bit(FLAG_XMIT, &lp->flags)) {
+ adf7242_cmd(lp, CMD_RC_PHY_RDY);
+ adf7242_cmd_rx(lp);
+ }
+}
+
static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm)
{
struct adf7242_local *lp = hw->priv;
@@ -686,7 +704,7 @@ static int adf7242_start(struct ieee802154_hw *hw)
enable_irq(lp->spi->irq);
set_bit(FLAG_START, &lp->flags);
- return adf7242_cmd(lp, CMD_RC_RX);
+ return adf7242_cmd_rx(lp);
}
static void adf7242_stop(struct ieee802154_hw *hw)
@@ -694,6 +712,7 @@ static void adf7242_stop(struct ieee802154_hw *hw)
struct adf7242_local *lp = hw->priv;
disable_irq(lp->spi->irq);
+ cancel_delayed_work_sync(&lp->work);
adf7242_cmd(lp, CMD_RC_IDLE);
clear_bit(FLAG_START, &lp->flags);
adf7242_clear_irqstat(lp);
@@ -719,7 +738,10 @@ static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8);
adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16);
- return adf7242_cmd(lp, CMD_RC_RX);
+ if (test_bit(FLAG_START, &lp->flags))
+ return adf7242_cmd_rx(lp);
+ else
+ return adf7242_cmd(lp, CMD_RC_PHY_RDY);
}
static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw,
@@ -814,6 +836,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
/* ensure existing instances of the IRQ handler have completed */
disable_irq(lp->spi->irq);
set_bit(FLAG_XMIT, &lp->flags);
+ cancel_delayed_work_sync(&lp->work);
reinit_completion(&lp->tx_complete);
adf7242_cmd(lp, CMD_RC_PHY_RDY);
adf7242_clear_irqstat(lp);
@@ -952,6 +975,7 @@ static irqreturn_t adf7242_isr(int irq, void *data)
unsigned int xmit;
u8 irq1;
+ mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA)))
@@ -1241,6 +1265,9 @@ static int adf7242_probe(struct spi_device *spi)
spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg);
spi_set_drvdata(spi, lp);
+ INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
+ lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
+ WQ_MEM_RECLAIM);
ret = adf7242_hw_init(lp);
if (ret)
@@ -1284,6 +1311,9 @@ static int adf7242_remove(struct spi_device *spi)
if (!IS_ERR_OR_NULL(lp->debugfs_root))
debugfs_remove_recursive(lp->debugfs_root);
+ cancel_delayed_work_sync(&lp->work);
+ destroy_workqueue(lp->wqueue);
+
ieee802154_unregister_hw(lp->hw);
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 77abedf0b524..3d9e91579866 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -940,7 +940,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
static int
at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
{
- BUG_ON(!level);
+ WARN_ON(!level);
*level = 0xbe;
return 0;
}
@@ -1121,8 +1121,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
u16 addr = le16_to_cpu(filt->short_addr);
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for saddr\n");
+ dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__);
__at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
__at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
}
@@ -1130,8 +1129,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 pan = le16_to_cpu(filt->pan_id);
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for pan id\n");
+ dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__);
__at86rf230_write(lp, RG_PAN_ID_0, pan);
__at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
}
@@ -1140,15 +1138,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
u8 i, addr[8];
memcpy(addr, &filt->ieee_addr, 8);
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for IEEE addr\n");
+ dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__);
for (i = 0; i < 8; i++)
__at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
}
if (changed & IEEE802154_AFILT_PANC_CHANGED) {
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for panc change\n");
+ dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__);
if (filt->pan_coord)
at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
else
@@ -1252,7 +1248,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
}
-
static int
at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
{
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 0d673f7682ee..3b0588d7e702 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -49,7 +49,7 @@ struct fakelb_phy {
static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
{
- BUG_ON(!level);
+ WARN_ON(!level);
*level = 0xbe;
return 0;
@@ -254,6 +254,9 @@ static __init int fakelb_init_module(void)
{
ieee802154fake_dev = platform_device_register_simple(
"ieee802154fakelb", -1, NULL, 0);
+
+ pr_warn("fakelb driver is marked as deprecated, please use mac802154_hwsim!\n");
+
return platform_driver_register(&ieee802154fake_driver);
}
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
new file mode 100644
index 000000000000..bf70ab892e69
--- /dev/null
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -0,0 +1,937 @@
+/*
+ * HWSIM IEEE 802.15.4 interface
+ *
+ * (C) 2018 Mojatau, Alexander Aring <aring@mojatau.com>
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Based on fakelb, original Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
+#include <linux/netdevice.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <net/mac802154.h>
+#include <net/cfg802154.h>
+#include <net/genetlink.h>
+#include "mac802154_hwsim.h"
+
+MODULE_DESCRIPTION("Software simulator of IEEE 802.15.4 radio(s) for mac802154");
+MODULE_LICENSE("GPL");
+
+static LIST_HEAD(hwsim_phys);
+static DEFINE_MUTEX(hwsim_phys_lock);
+
+static LIST_HEAD(hwsim_ifup_phys);
+
+static struct platform_device *mac802154hwsim_dev;
+
+/* MAC802154_HWSIM netlink family */
+static struct genl_family hwsim_genl_family;
+
+static int hwsim_radio_idx;
+
+enum hwsim_multicast_groups {
+ HWSIM_MCGRP_CONFIG,
+};
+
+static const struct genl_multicast_group hwsim_mcgrps[] = {
+ [HWSIM_MCGRP_CONFIG] = { .name = "config", },
+};
+
+struct hwsim_pib {
+ u8 page;
+ u8 channel;
+
+ struct rcu_head rcu;
+};
+
+struct hwsim_edge_info {
+ u8 lqi;
+
+ struct rcu_head rcu;
+};
+
+struct hwsim_edge {
+ struct hwsim_phy *endpoint;
+ struct hwsim_edge_info __rcu *info;
+
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+struct hwsim_phy {
+ struct ieee802154_hw *hw;
+ u32 idx;
+
+ struct hwsim_pib __rcu *pib;
+
+ bool suspended;
+ struct list_head edges;
+
+ struct list_head list;
+ struct list_head list_ifup;
+};
+
+static int hwsim_add_one(struct genl_info *info, struct device *dev,
+ bool init);
+static void hwsim_del(struct hwsim_phy *phy);
+
+static int hwsim_hw_ed(struct ieee802154_hw *hw, u8 *level)
+{
+ *level = 0xbe;
+
+ return 0;
+}
+
+static int hwsim_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+ struct hwsim_phy *phy = hw->priv;
+ struct hwsim_pib *pib, *pib_old;
+
+ pib = kzalloc(sizeof(*pib), GFP_KERNEL);
+ if (!pib)
+ return -ENOMEM;
+
+ pib->page = page;
+ pib->channel = channel;
+
+ pib_old = rtnl_dereference(phy->pib);
+ rcu_assign_pointer(phy->pib, pib);
+ kfree_rcu(pib_old, rcu);
+ return 0;
+}
+
+static int hwsim_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
+{
+ struct hwsim_phy *current_phy = hw->priv;
+ struct hwsim_pib *current_pib, *endpoint_pib;
+ struct hwsim_edge_info *einfo;
+ struct hwsim_edge *e;
+
+ WARN_ON(current_phy->suspended);
+
+ rcu_read_lock();
+ current_pib = rcu_dereference(current_phy->pib);
+ list_for_each_entry_rcu(e, &current_phy->edges, list) {
+ /* Can be changed later in rx_irqsafe, but this is only a
+ * performance tweak. Received radio should drop the frame
+ * in mac802154 stack anyway... so we don't need to be
+ * 100% of locking here to check on suspended
+ */
+ if (e->endpoint->suspended)
+ continue;
+
+ endpoint_pib = rcu_dereference(e->endpoint->pib);
+ if (current_pib->page == endpoint_pib->page &&
+ current_pib->channel == endpoint_pib->channel) {
+ struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC);
+
+ einfo = rcu_dereference(e->info);
+ if (newskb)
+ ieee802154_rx_irqsafe(e->endpoint->hw, newskb,
+ einfo->lqi);
+ }
+ }
+ rcu_read_unlock();
+
+ ieee802154_xmit_complete(hw, skb, false);
+ return 0;
+}
+
+static int hwsim_hw_start(struct ieee802154_hw *hw)
+{
+ struct hwsim_phy *phy = hw->priv;
+
+ phy->suspended = false;
+ list_add_rcu(&phy->list_ifup, &hwsim_ifup_phys);
+ synchronize_rcu();
+
+ return 0;
+}
+
+static void hwsim_hw_stop(struct ieee802154_hw *hw)
+{
+ struct hwsim_phy *phy = hw->priv;
+
+ phy->suspended = true;
+ list_del_rcu(&phy->list_ifup);
+ synchronize_rcu();
+}
+
+static int
+hwsim_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
+{
+ return 0;
+}
+
+static const struct ieee802154_ops hwsim_ops = {
+ .owner = THIS_MODULE,
+ .xmit_async = hwsim_hw_xmit,
+ .ed = hwsim_hw_ed,
+ .set_channel = hwsim_hw_channel,
+ .start = hwsim_hw_start,
+ .stop = hwsim_hw_stop,
+ .set_promiscuous_mode = hwsim_set_promiscuous_mode,
+};
+
+static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ return hwsim_add_one(info, &mac802154hwsim_dev->dev, false);
+}
+
+static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct hwsim_phy *phy, *tmp;
+ s64 idx = -1;
+
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]);
+
+ mutex_lock(&hwsim_phys_lock);
+ list_for_each_entry_safe(phy, tmp, &hwsim_phys, list) {
+ if (idx == phy->idx) {
+ hwsim_del(phy);
+ mutex_unlock(&hwsim_phys_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&hwsim_phys_lock);
+
+ return -ENODEV;
+}
+
+static int append_radio_msg(struct sk_buff *skb, struct hwsim_phy *phy)
+{
+ struct nlattr *nl_edges, *nl_edge;
+ struct hwsim_edge_info *einfo;
+ struct hwsim_edge *e;
+ int ret;
+
+ ret = nla_put_u32(skb, MAC802154_HWSIM_ATTR_RADIO_ID, phy->idx);
+ if (ret < 0)
+ return ret;
+
+ rcu_read_lock();
+ if (list_empty(&phy->edges)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ nl_edges = nla_nest_start(skb, MAC802154_HWSIM_ATTR_RADIO_EDGES);
+ if (!nl_edges) {
+ rcu_read_unlock();
+ return -ENOBUFS;
+ }
+
+ list_for_each_entry_rcu(e, &phy->edges, list) {
+ nl_edge = nla_nest_start(skb, MAC802154_HWSIM_ATTR_RADIO_EDGE);
+ if (!nl_edge) {
+ rcu_read_unlock();
+ nla_nest_cancel(skb, nl_edges);
+ return -ENOBUFS;
+ }
+
+ ret = nla_put_u32(skb, MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID,
+ e->endpoint->idx);
+ if (ret < 0) {
+ rcu_read_unlock();
+ nla_nest_cancel(skb, nl_edge);
+ nla_nest_cancel(skb, nl_edges);
+ return ret;
+ }
+
+ einfo = rcu_dereference(e->info);
+ ret = nla_put_u8(skb, MAC802154_HWSIM_EDGE_ATTR_LQI,
+ einfo->lqi);
+ if (ret < 0) {
+ rcu_read_unlock();
+ nla_nest_cancel(skb, nl_edge);
+ nla_nest_cancel(skb, nl_edges);
+ return ret;
+ }
+
+ nla_nest_end(skb, nl_edge);
+ }
+ rcu_read_unlock();
+
+ nla_nest_end(skb, nl_edges);
+
+ return 0;
+}
+
+static int hwsim_get_radio(struct sk_buff *skb, struct hwsim_phy *phy,
+ u32 portid, u32 seq,
+ struct netlink_callback *cb, int flags)
+{
+ void *hdr;
+ int res = -EMSGSIZE;
+
+ hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags,
+ MAC802154_HWSIM_CMD_GET_RADIO);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (cb)
+ genl_dump_check_consistent(cb, hdr);
+
+ res = append_radio_msg(skb, phy);
+ if (res < 0)
+ goto out_err;
+
+ genlmsg_end(skb, hdr);
+ return 0;
+
+out_err:
+ genlmsg_cancel(skb, hdr);
+ return res;
+}
+
+static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct hwsim_phy *phy;
+ struct sk_buff *skb;
+ int idx, res = -ENODEV;
+
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID])
+ return -EINVAL;
+ idx = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]);
+
+ mutex_lock(&hwsim_phys_lock);
+ list_for_each_entry(phy, &hwsim_phys, list) {
+ if (phy->idx != idx)
+ continue;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ res = -ENOMEM;
+ goto out_err;
+ }
+
+ res = hwsim_get_radio(skb, phy, info->snd_portid,
+ info->snd_seq, NULL, 0);
+ if (res < 0) {
+ nlmsg_free(skb);
+ goto out_err;
+ }
+
+ genlmsg_reply(skb, info);
+ break;
+ }
+
+out_err:
+ mutex_unlock(&hwsim_phys_lock);
+
+ return res;
+}
+
+static int hwsim_dump_radio_nl(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ int idx = cb->args[0];
+ struct hwsim_phy *phy;
+ int res;
+
+ mutex_lock(&hwsim_phys_lock);
+
+ if (idx == hwsim_radio_idx)
+ goto done;
+
+ list_for_each_entry(phy, &hwsim_phys, list) {
+ if (phy->idx < idx)
+ continue;
+
+ res = hwsim_get_radio(skb, phy, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
+ if (res < 0)
+ break;
+
+ idx = phy->idx + 1;
+ }
+
+ cb->args[0] = idx;
+
+done:
+ mutex_unlock(&hwsim_phys_lock);
+ return skb->len;
+}
+
+/* caller need to held hwsim_phys_lock */
+static struct hwsim_phy *hwsim_get_radio_by_id(uint32_t idx)
+{
+ struct hwsim_phy *phy;
+
+ list_for_each_entry(phy, &hwsim_phys, list) {
+ if (phy->idx == idx)
+ return phy;
+ }
+
+ return NULL;
+}
+
+static const struct nla_policy hwsim_edge_policy[MAC802154_HWSIM_EDGE_ATTR_MAX + 1] = {
+ [MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] = { .type = NLA_U32 },
+ [MAC802154_HWSIM_EDGE_ATTR_LQI] = { .type = NLA_U8 },
+};
+
+static struct hwsim_edge *hwsim_alloc_edge(struct hwsim_phy *endpoint, u8 lqi)
+{
+ struct hwsim_edge_info *einfo;
+ struct hwsim_edge *e;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return NULL;
+
+ einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+ if (!einfo) {
+ kfree(e);
+ return NULL;
+ }
+
+ einfo->lqi = 0xff;
+ rcu_assign_pointer(e->info, einfo);
+ e->endpoint = endpoint;
+
+ return e;
+}
+
+static void hwsim_free_edge(struct hwsim_edge *e)
+{
+ struct hwsim_edge_info *einfo;
+
+ rcu_read_lock();
+ einfo = rcu_dereference(e->info);
+ rcu_read_unlock();
+
+ kfree_rcu(einfo, rcu);
+ kfree_rcu(e, rcu);
+}
+
+static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1];
+ struct hwsim_phy *phy_v0, *phy_v1;
+ struct hwsim_edge *e;
+ u32 v0, v1;
+
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+ !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
+ return -EINVAL;
+
+ if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
+ info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
+ hwsim_edge_policy, NULL))
+ return -EINVAL;
+
+ if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID])
+ return -EINVAL;
+
+ v0 = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]);
+ v1 = nla_get_u32(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]);
+
+ if (v0 == v1)
+ return -EINVAL;
+
+ mutex_lock(&hwsim_phys_lock);
+ phy_v0 = hwsim_get_radio_by_id(v0);
+ if (!phy_v0) {
+ mutex_unlock(&hwsim_phys_lock);
+ return -ENOENT;
+ }
+
+ phy_v1 = hwsim_get_radio_by_id(v1);
+ if (!phy_v1) {
+ mutex_unlock(&hwsim_phys_lock);
+ return -ENOENT;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &phy_v0->edges, list) {
+ if (e->endpoint->idx == v1) {
+ mutex_unlock(&hwsim_phys_lock);
+ rcu_read_unlock();
+ return -EEXIST;
+ }
+ }
+ rcu_read_unlock();
+
+ e = hwsim_alloc_edge(phy_v1, 0xff);
+ if (!e) {
+ mutex_unlock(&hwsim_phys_lock);
+ return -ENOMEM;
+ }
+ list_add_rcu(&e->list, &phy_v0->edges);
+ /* wait until changes are done under hwsim_phys_lock lock
+ * should prevent of calling this function twice while
+ * edges list has not the changes yet.
+ */
+ synchronize_rcu();
+ mutex_unlock(&hwsim_phys_lock);
+
+ return 0;
+}
+
+static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1];
+ struct hwsim_phy *phy_v0;
+ struct hwsim_edge *e;
+ u32 v0, v1;
+
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+ !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
+ return -EINVAL;
+
+ if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+ info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
+ hwsim_edge_policy, NULL))
+ return -EINVAL;
+
+ if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID])
+ return -EINVAL;
+
+ v0 = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]);
+ v1 = nla_get_u32(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]);
+
+ mutex_lock(&hwsim_phys_lock);
+ phy_v0 = hwsim_get_radio_by_id(v0);
+ if (!phy_v0) {
+ mutex_unlock(&hwsim_phys_lock);
+ return -ENOENT;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &phy_v0->edges, list) {
+ if (e->endpoint->idx == v1) {
+ rcu_read_unlock();
+ list_del_rcu(&e->list);
+ hwsim_free_edge(e);
+ /* same again - wait until list changes are done */
+ synchronize_rcu();
+ mutex_unlock(&hwsim_phys_lock);
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+
+ mutex_unlock(&hwsim_phys_lock);
+
+ return -ENOENT;
+}
+
+static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
+{
+ struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1];
+ struct hwsim_edge_info *einfo;
+ struct hwsim_phy *phy_v0;
+ struct hwsim_edge *e;
+ u32 v0, v1;
+ u8 lqi;
+
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+ !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
+ return -EINVAL;
+
+ if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+ info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
+ hwsim_edge_policy, NULL))
+ return -EINVAL;
+
+ if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] &&
+ !edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI])
+ return -EINVAL;
+
+ v0 = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]);
+ v1 = nla_get_u32(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]);
+ lqi = nla_get_u8(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI]);
+
+ mutex_lock(&hwsim_phys_lock);
+ phy_v0 = hwsim_get_radio_by_id(v0);
+ if (!phy_v0) {
+ mutex_unlock(&hwsim_phys_lock);
+ return -ENOENT;
+ }
+
+ einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+ if (!einfo) {
+ mutex_unlock(&hwsim_phys_lock);
+ return -ENOMEM;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &phy_v0->edges, list) {
+ if (e->endpoint->idx == v1) {
+ einfo->lqi = lqi;
+ rcu_assign_pointer(e->info, einfo);
+ rcu_read_unlock();
+ mutex_unlock(&hwsim_phys_lock);
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+
+ kfree(einfo);
+ mutex_unlock(&hwsim_phys_lock);
+
+ return -ENOENT;
+}
+
+/* MAC802154_HWSIM netlink policy */
+
+static const struct nla_policy hwsim_genl_policy[MAC802154_HWSIM_ATTR_MAX + 1] = {
+ [MAC802154_HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 },
+ [MAC802154_HWSIM_ATTR_RADIO_EDGE] = { .type = NLA_NESTED },
+ [MAC802154_HWSIM_ATTR_RADIO_EDGES] = { .type = NLA_NESTED },
+};
+
+/* Generic Netlink operations array */
+static const struct genl_ops hwsim_nl_ops[] = {
+ {
+ .cmd = MAC802154_HWSIM_CMD_NEW_RADIO,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_new_radio_nl,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MAC802154_HWSIM_CMD_DEL_RADIO,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_del_radio_nl,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MAC802154_HWSIM_CMD_GET_RADIO,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_get_radio_nl,
+ .dumpit = hwsim_dump_radio_nl,
+ },
+ {
+ .cmd = MAC802154_HWSIM_CMD_NEW_EDGE,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_new_edge_nl,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MAC802154_HWSIM_CMD_DEL_EDGE,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_del_edge_nl,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MAC802154_HWSIM_CMD_SET_EDGE,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_set_edge_lqi,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+};
+
+static struct genl_family hwsim_genl_family __ro_after_init = {
+ .name = "MAC802154_HWSIM",
+ .version = 1,
+ .maxattr = MAC802154_HWSIM_ATTR_MAX,
+ .module = THIS_MODULE,
+ .ops = hwsim_nl_ops,
+ .n_ops = ARRAY_SIZE(hwsim_nl_ops),
+ .mcgrps = hwsim_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
+};
+
+static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb,
+ struct genl_info *info)
+{
+ if (info)
+ genl_notify(&hwsim_genl_family, mcast_skb, info,
+ HWSIM_MCGRP_CONFIG, GFP_KERNEL);
+ else
+ genlmsg_multicast(&hwsim_genl_family, mcast_skb, 0,
+ HWSIM_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static void hwsim_mcast_new_radio(struct genl_info *info, struct hwsim_phy *phy)
+{
+ struct sk_buff *mcast_skb;
+ void *data;
+
+ mcast_skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!mcast_skb)
+ return;
+
+ data = genlmsg_put(mcast_skb, 0, 0, &hwsim_genl_family, 0,
+ MAC802154_HWSIM_CMD_NEW_RADIO);
+ if (!data)
+ goto out_err;
+
+ if (append_radio_msg(mcast_skb, phy) < 0)
+ goto out_err;
+
+ genlmsg_end(mcast_skb, data);
+
+ hwsim_mcast_config_msg(mcast_skb, info);
+ return;
+
+out_err:
+ genlmsg_cancel(mcast_skb, data);
+ nlmsg_free(mcast_skb);
+}
+
+static void hwsim_edge_unsubscribe_me(struct hwsim_phy *phy)
+{
+ struct hwsim_phy *tmp;
+ struct hwsim_edge *e;
+
+ rcu_read_lock();
+ /* going to all phy edges and remove phy from it */
+ list_for_each_entry(tmp, &hwsim_phys, list) {
+ list_for_each_entry_rcu(e, &tmp->edges, list) {
+ if (e->endpoint->idx == phy->idx) {
+ list_del_rcu(&e->list);
+ hwsim_free_edge(e);
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ synchronize_rcu();
+}
+
+static int hwsim_subscribe_all_others(struct hwsim_phy *phy)
+{
+ struct hwsim_phy *sub;
+ struct hwsim_edge *e;
+
+ list_for_each_entry(sub, &hwsim_phys, list) {
+ e = hwsim_alloc_edge(sub, 0xff);
+ if (!e)
+ goto me_fail;
+
+ list_add_rcu(&e->list, &phy->edges);
+ }
+
+ list_for_each_entry(sub, &hwsim_phys, list) {
+ e = hwsim_alloc_edge(phy, 0xff);
+ if (!e)
+ goto sub_fail;
+
+ list_add_rcu(&e->list, &sub->edges);
+ }
+
+ return 0;
+
+me_fail:
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &phy->edges, list) {
+ list_del_rcu(&e->list);
+ hwsim_free_edge(e);
+ }
+ rcu_read_unlock();
+sub_fail:
+ hwsim_edge_unsubscribe_me(phy);
+ return -ENOMEM;
+}
+
+static int hwsim_add_one(struct genl_info *info, struct device *dev,
+ bool init)
+{
+ struct ieee802154_hw *hw;
+ struct hwsim_phy *phy;
+ struct hwsim_pib *pib;
+ int idx;
+ int err;
+
+ idx = hwsim_radio_idx++;
+
+ hw = ieee802154_alloc_hw(sizeof(*phy), &hwsim_ops);
+ if (!hw)
+ return -ENOMEM;
+
+ phy = hw->priv;
+ phy->hw = hw;
+
+ /* 868 MHz BPSK 802.15.4-2003 */
+ hw->phy->supported.channels[0] |= 1;
+ /* 915 MHz BPSK 802.15.4-2003 */
+ hw->phy->supported.channels[0] |= 0x7fe;
+ /* 2.4 GHz O-QPSK 802.15.4-2003 */
+ hw->phy->supported.channels[0] |= 0x7FFF800;
+ /* 868 MHz ASK 802.15.4-2006 */
+ hw->phy->supported.channels[1] |= 1;
+ /* 915 MHz ASK 802.15.4-2006 */
+ hw->phy->supported.channels[1] |= 0x7fe;
+ /* 868 MHz O-QPSK 802.15.4-2006 */
+ hw->phy->supported.channels[2] |= 1;
+ /* 915 MHz O-QPSK 802.15.4-2006 */
+ hw->phy->supported.channels[2] |= 0x7fe;
+ /* 2.4 GHz CSS 802.15.4a-2007 */
+ hw->phy->supported.channels[3] |= 0x3fff;
+ /* UWB Sub-gigahertz 802.15.4a-2007 */
+ hw->phy->supported.channels[4] |= 1;
+ /* UWB Low band 802.15.4a-2007 */
+ hw->phy->supported.channels[4] |= 0x1e;
+ /* UWB High band 802.15.4a-2007 */
+ hw->phy->supported.channels[4] |= 0xffe0;
+ /* 750 MHz O-QPSK 802.15.4c-2009 */
+ hw->phy->supported.channels[5] |= 0xf;
+ /* 750 MHz MPSK 802.15.4c-2009 */
+ hw->phy->supported.channels[5] |= 0xf0;
+ /* 950 MHz BPSK 802.15.4d-2009 */
+ hw->phy->supported.channels[6] |= 0x3ff;
+ /* 950 MHz GFSK 802.15.4d-2009 */
+ hw->phy->supported.channels[6] |= 0x3ffc00;
+
+ ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+
+ /* hwsim phy channel 13 as default */
+ hw->phy->current_channel = 13;
+ pib = kzalloc(sizeof(*pib), GFP_KERNEL);
+ if (!pib) {
+ err = -ENOMEM;
+ goto err_pib;
+ }
+
+ rcu_assign_pointer(phy->pib, pib);
+ phy->idx = idx;
+ INIT_LIST_HEAD(&phy->edges);
+
+ hw->flags = IEEE802154_HW_PROMISCUOUS;
+ hw->parent = dev;
+
+ err = ieee802154_register_hw(hw);
+ if (err)
+ goto err_reg;
+
+ mutex_lock(&hwsim_phys_lock);
+ if (init) {
+ err = hwsim_subscribe_all_others(phy);
+ if (err < 0) {
+ mutex_unlock(&hwsim_phys_lock);
+ goto err_reg;
+ }
+ }
+ list_add_tail(&phy->list, &hwsim_phys);
+ mutex_unlock(&hwsim_phys_lock);
+
+ hwsim_mcast_new_radio(info, phy);
+
+ return idx;
+
+err_reg:
+ kfree(pib);
+err_pib:
+ ieee802154_free_hw(phy->hw);
+ return err;
+}
+
+static void hwsim_del(struct hwsim_phy *phy)
+{
+ struct hwsim_pib *pib;
+
+ hwsim_edge_unsubscribe_me(phy);
+
+ list_del(&phy->list);
+
+ rcu_read_lock();
+ pib = rcu_dereference(phy->pib);
+ rcu_read_unlock();
+
+ kfree_rcu(pib, rcu);
+
+ ieee802154_unregister_hw(phy->hw);
+ ieee802154_free_hw(phy->hw);
+}
+
+static int hwsim_probe(struct platform_device *pdev)
+{
+ struct hwsim_phy *phy, *tmp;
+ int err, i;
+
+ for (i = 0; i < 2; i++) {
+ err = hwsim_add_one(NULL, &pdev->dev, true);
+ if (err < 0)
+ goto err_slave;
+ }
+
+ dev_info(&pdev->dev, "Added 2 mac802154 hwsim hardware radios\n");
+ return 0;
+
+err_slave:
+ mutex_lock(&hwsim_phys_lock);
+ list_for_each_entry_safe(phy, tmp, &hwsim_phys, list)
+ hwsim_del(phy);
+ mutex_unlock(&hwsim_phys_lock);
+ return err;
+}
+
+static int hwsim_remove(struct platform_device *pdev)
+{
+ struct hwsim_phy *phy, *tmp;
+
+ mutex_lock(&hwsim_phys_lock);
+ list_for_each_entry_safe(phy, tmp, &hwsim_phys, list)
+ hwsim_del(phy);
+ mutex_unlock(&hwsim_phys_lock);
+
+ return 0;
+}
+
+static struct platform_driver mac802154hwsim_driver = {
+ .probe = hwsim_probe,
+ .remove = hwsim_remove,
+ .driver = {
+ .name = "mac802154_hwsim",
+ },
+};
+
+static __init int hwsim_init_module(void)
+{
+ int rc;
+
+ rc = genl_register_family(&hwsim_genl_family);
+ if (rc)
+ return rc;
+
+ mac802154hwsim_dev = platform_device_register_simple("mac802154_hwsim",
+ -1, NULL, 0);
+ if (IS_ERR(mac802154hwsim_dev)) {
+ rc = PTR_ERR(mac802154hwsim_dev);
+ goto platform_dev;
+ }
+
+ rc = platform_driver_register(&mac802154hwsim_driver);
+ if (rc < 0)
+ goto platform_drv;
+
+ return 0;
+
+platform_drv:
+ genl_unregister_family(&hwsim_genl_family);
+platform_dev:
+ platform_device_unregister(mac802154hwsim_dev);
+ return rc;
+}
+
+static __exit void hwsim_remove_module(void)
+{
+ genl_unregister_family(&hwsim_genl_family);
+ platform_driver_unregister(&mac802154hwsim_driver);
+ platform_device_unregister(mac802154hwsim_dev);
+}
+
+module_init(hwsim_init_module);
+module_exit(hwsim_remove_module);
diff --git a/drivers/net/ieee802154/mac802154_hwsim.h b/drivers/net/ieee802154/mac802154_hwsim.h
new file mode 100644
index 000000000000..6c6e30e3871d
--- /dev/null
+++ b/drivers/net/ieee802154/mac802154_hwsim.h
@@ -0,0 +1,73 @@
+#ifndef __MAC802154_HWSIM_H
+#define __MAC802154_HWSIM_H
+
+/* mac802154 hwsim netlink commands
+ *
+ * @MAC802154_HWSIM_CMD_UNSPEC: unspecified command to catch error
+ * @MAC802154_HWSIM_CMD_GET_RADIO: fetch information about existing radios
+ * @MAC802154_HWSIM_CMD_SET_RADIO: change radio parameters during runtime
+ * @MAC802154_HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters
+ * returns the radio ID (>= 0) or negative on errors, if successful
+ * then multicast the result
+ * @MAC802154_HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted
+ * @MAC802154_HWSIM_CMD_GET_EDGE: fetch information about existing edges
+ * @MAC802154_HWSIM_CMD_SET_EDGE: change edge parameters during runtime
+ * @MAC802154_HWSIM_CMD_DEL_EDGE: delete edges between radios
+ * @MAC802154_HWSIM_CMD_NEW_EDGE: create a new edge between two radios
+ * @__MAC802154_HWSIM_CMD_MAX: enum limit
+ */
+enum {
+ MAC802154_HWSIM_CMD_UNSPEC,
+
+ MAC802154_HWSIM_CMD_GET_RADIO,
+ MAC802154_HWSIM_CMD_SET_RADIO,
+ MAC802154_HWSIM_CMD_NEW_RADIO,
+ MAC802154_HWSIM_CMD_DEL_RADIO,
+
+ MAC802154_HWSIM_CMD_GET_EDGE,
+ MAC802154_HWSIM_CMD_SET_EDGE,
+ MAC802154_HWSIM_CMD_DEL_EDGE,
+ MAC802154_HWSIM_CMD_NEW_EDGE,
+
+ __MAC802154_HWSIM_CMD_MAX,
+};
+
+#define MAC802154_HWSIM_CMD_MAX (__MAC802154_HWSIM_MAX - 1)
+
+/* mac802154 hwsim netlink attributes
+ *
+ * @MAC802154_HWSIM_ATTR_UNSPEC: unspecified attribute to catch error
+ * @MAC802154_HWSIM_ATTR_RADIO_ID: u32 attribute to identify the radio
+ * @MAC802154_HWSIM_ATTR_EDGE: nested attribute of edges
+ * @MAC802154_HWSIM_ATTR_EDGES: list if nested attributes which contains the
+ * edge information according the radio id
+ * @__MAC802154_HWSIM_ATTR_MAX: enum limit
+ */
+enum {
+ MAC802154_HWSIM_ATTR_UNSPEC,
+ MAC802154_HWSIM_ATTR_RADIO_ID,
+ MAC802154_HWSIM_ATTR_RADIO_EDGE,
+ MAC802154_HWSIM_ATTR_RADIO_EDGES,
+ __MAC802154_HWSIM_ATTR_MAX,
+};
+
+#define MAC802154_HWSIM_ATTR_MAX (__MAC802154_HWSIM_ATTR_MAX - 1)
+
+/* mac802154 hwsim edge netlink attributes
+ *
+ * @MAC802154_HWSIM_EDGE_ATTR_UNSPEC: unspecified attribute to catch error
+ * @MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID: radio id where the edge points to
+ * @MAC802154_HWSIM_EDGE_ATTR_LQI: LQI value which the endpoint radio will
+ * receive for this edge
+ * @__MAC802154_HWSIM_ATTR_MAX: enum limit
+ */
+enum {
+ MAC802154_HWSIM_EDGE_ATTR_UNSPEC,
+ MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID,
+ MAC802154_HWSIM_EDGE_ATTR_LQI,
+ __MAC802154_HWSIM_EDGE_ATTR_MAX,
+};
+
+#define MAC802154_HWSIM_EDGE_ATTR_MAX (__MAC802154_HWSIM_EDGE_ATTR_MAX - 1)
+
+#endif /* __MAC802154_HWSIM_H */
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index de0d7f28a181..e428277781ac 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -15,10 +15,11 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/skbuff.h>
#include <linux/of_gpio.h>
#include <linux/regmap.h>
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index d02f0a7c534e..4a949569ec4c 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
{
struct ipvl_dev *ipvlan;
struct net_device *mdev = port->dev;
- int err = 0;
+ unsigned int flags;
+ int err;
ASSERT_RTNL();
if (port->mode != nval) {
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ flags = ipvlan->dev->flags;
+ if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
+ err = dev_change_flags(ipvlan->dev,
+ flags | IFF_NOARP);
+ } else {
+ err = dev_change_flags(ipvlan->dev,
+ flags & ~IFF_NOARP);
+ }
+ if (unlikely(err))
+ goto fail;
+ }
if (nval == IPVLAN_MODE_L3S) {
/* New mode is L3S */
err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
@@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
mdev->l3mdev_ops = &ipvl_l3mdev_ops;
mdev->priv_flags |= IFF_L3MDEV_MASTER;
} else
- return err;
+ goto fail;
} else if (port->mode == IPVLAN_MODE_L3S) {
/* Old mode was L3S */
mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
mdev->l3mdev_ops = NULL;
}
- list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
- ipvlan->dev->flags |= IFF_NOARP;
- else
- ipvlan->dev->flags &= ~IFF_NOARP;
- }
port->mode = nval;
}
+ return 0;
+
+fail:
+ /* Undo the flags changes that have been done so far. */
+ list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
+ flags = ipvlan->dev->flags;
+ if (port->mode == IPVLAN_MODE_L3 ||
+ port->mode == IPVLAN_MODE_L3S)
+ dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
+ else
+ dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
+ }
+
return err;
}
@@ -594,7 +614,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
ipvlan->phy_dev = phy_dev;
ipvlan->dev = dev;
ipvlan->sfeatures = IPVLAN_FEATURES;
- ipvlan_adjust_mtu(ipvlan, phy_dev);
+ if (!tb[IFLA_MTU])
+ ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
spin_lock_init(&ipvlan->addrs_lock);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index adde8fc45588..cfda146f3b3b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -514,7 +514,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *vlan = netdev_priv(dev);
const struct macvlan_port *port = vlan->port;
const struct macvlan_dev *dest;
- void *accel_priv = NULL;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
const struct ethhdr *eth = (void *)skb->data;
@@ -533,15 +532,10 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
return NET_XMIT_SUCCESS;
}
}
-
- /* For packets that are non-multicast and not bridged we will pass
- * the necessary information so that the lowerdev can distinguish
- * the source of the packets via the accel_priv value.
- */
- accel_priv = vlan->accel_priv;
xmit_world:
skb->dev = vlan->lowerdev;
- return dev_queue_xmit_accel(skb, accel_priv);
+ return dev_queue_xmit_accel(skb,
+ netdev_get_sb_channel(dev) ? dev : NULL);
}
static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
@@ -1647,6 +1641,7 @@ static int macvlan_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_UP:
+ case NETDEV_DOWN:
case NETDEV_CHANGE:
list_for_each_entry(vlan, &port->vlans, list)
netif_stacked_transfer_operstate(vlan->lowerdev,
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 4f390fa557e4..7ae1856d1f18 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -115,7 +115,8 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
}
static u16 net_failover_select_queue(struct net_device *dev,
- struct sk_buff *skb, void *accel_priv,
+ struct sk_buff *skb,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct net_failover_info *nfo_info = netdev_priv(dev);
@@ -128,9 +129,9 @@ static u16 net_failover_select_queue(struct net_device *dev,
if (ops->ndo_select_queue)
txq = ops->ndo_select_queue(primary_dev, skb,
- accel_priv, fallback);
+ sb_dev, fallback);
else
- txq = fallback(primary_dev, skb);
+ txq = fallback(primary_dev, skb, NULL);
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
@@ -219,14 +220,14 @@ static int net_failover_change_mtu(struct net_device *dev, int new_mtu)
struct net_device *primary_dev, *standby_dev;
int ret = 0;
- primary_dev = rcu_dereference(nfo_info->primary_dev);
+ primary_dev = rtnl_dereference(nfo_info->primary_dev);
if (primary_dev) {
ret = dev_set_mtu(primary_dev, new_mtu);
if (ret)
return ret;
}
- standby_dev = rcu_dereference(nfo_info->standby_dev);
+ standby_dev = rtnl_dereference(nfo_info->standby_dev);
if (standby_dev) {
ret = dev_set_mtu(standby_dev, new_mtu);
if (ret) {
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index 449b2a1a1800..0fee1d06c084 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -13,3 +13,7 @@ endif
ifneq ($(CONFIG_NET_DEVLINK),)
netdevsim-objs += devlink.o fib.o
endif
+
+ifneq ($(CONFIG_XFRM_OFFLOAD),)
+netdevsim-objs += ipsec.o
+endif
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 75c25306d234..81444208b216 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -92,7 +92,7 @@ static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
static bool nsim_xdp_offload_active(struct netdevsim *ns)
{
- return ns->xdp_prog_mode == XDP_ATTACHED_HW;
+ return ns->xdp_hw.prog;
}
static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
@@ -195,14 +195,14 @@ static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns));
}
-static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
+static int
+nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
+ struct xdp_attachment_info *xdp)
{
int err;
- if (ns->xdp_prog && (bpf->flags ^ ns->xdp_flags) & XDP_FLAGS_MODES) {
- NSIM_EA(bpf->extack, "program loaded with different flags");
+ if (!xdp_attachment_flags_ok(xdp, bpf))
return -EBUSY;
- }
if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
@@ -219,18 +219,7 @@ static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
return err;
}
- if (ns->xdp_prog)
- bpf_prog_put(ns->xdp_prog);
-
- ns->xdp_prog = bpf->prog;
- ns->xdp_flags = bpf->flags;
-
- if (!bpf->prog)
- ns->xdp_prog_mode = XDP_ATTACHED_NONE;
- else if (bpf->command == XDP_SETUP_PROG)
- ns->xdp_prog_mode = XDP_ATTACHED_DRV;
- else
- ns->xdp_prog_mode = XDP_ATTACHED_HW;
+ xdp_attachment_setup(xdp, bpf);
return 0;
}
@@ -249,8 +238,8 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
state->state = "verify";
/* Program id is not populated yet when we create the state. */
- sprintf(name, "%u", ns->prog_id_gen++);
- state->ddir = debugfs_create_dir(name, ns->ddir_bpf_bound_progs);
+ sprintf(name, "%u", ns->sdev->prog_id_gen++);
+ state->ddir = debugfs_create_dir(name, ns->sdev->ddir_bpf_bound_progs);
if (IS_ERR_OR_NULL(state->ddir)) {
kfree(state);
return -ENOMEM;
@@ -261,7 +250,7 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
&state->state, &nsim_bpf_string_fops);
debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
- list_add_tail(&state->l, &ns->bpf_bound_progs);
+ list_add_tail(&state->l, &ns->sdev->bpf_bound_progs);
prog->aux->offload->dev_priv = state;
@@ -290,10 +279,6 @@ static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
return -EINVAL;
}
- if (nsim_xdp_offload_active(ns)) {
- NSIM_EA(bpf->extack, "xdp offload active, can't load drv prog");
- return -EBUSY;
- }
return 0;
}
@@ -309,7 +294,7 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
return -EINVAL;
}
- if (bpf->prog->aux->offload->netdev != ns->netdev) {
+ if (!bpf_offload_dev_match(bpf->prog, ns->netdev)) {
NSIM_EA(bpf->extack, "program bound to different dev");
return -EINVAL;
}
@@ -512,7 +497,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
}
offmap->dev_ops = &nsim_bpf_map_ops;
- list_add_tail(&nmap->l, &ns->bpf_bound_maps);
+ list_add_tail(&nmap->l, &ns->sdev->bpf_bound_maps);
return 0;
@@ -567,22 +552,21 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
nsim_bpf_destroy_prog(bpf->offload.prog);
return 0;
case XDP_QUERY_PROG:
- bpf->prog_attached = ns->xdp_prog_mode;
- bpf->prog_id = ns->xdp_prog ? ns->xdp_prog->aux->id : 0;
- bpf->prog_flags = ns->xdp_prog ? ns->xdp_flags : 0;
- return 0;
+ return xdp_attachment_query(&ns->xdp, bpf);
+ case XDP_QUERY_PROG_HW:
+ return xdp_attachment_query(&ns->xdp_hw, bpf);
case XDP_SETUP_PROG:
err = nsim_setup_prog_checks(ns, bpf);
if (err)
return err;
- return nsim_xdp_set_prog(ns, bpf);
+ return nsim_xdp_set_prog(ns, bpf, &ns->xdp);
case XDP_SETUP_PROG_HW:
err = nsim_setup_prog_hw_checks(ns, bpf);
if (err)
return err;
- return nsim_xdp_set_prog(ns, bpf);
+ return nsim_xdp_set_prog(ns, bpf, &ns->xdp_hw);
case BPF_OFFLOAD_MAP_ALLOC:
if (!ns->bpf_map_accept)
return -EOPNOTSUPP;
@@ -598,8 +582,26 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
int nsim_bpf_init(struct netdevsim *ns)
{
- INIT_LIST_HEAD(&ns->bpf_bound_progs);
- INIT_LIST_HEAD(&ns->bpf_bound_maps);
+ int err;
+
+ if (ns->sdev->refcnt == 1) {
+ INIT_LIST_HEAD(&ns->sdev->bpf_bound_progs);
+ INIT_LIST_HEAD(&ns->sdev->bpf_bound_maps);
+
+ ns->sdev->ddir_bpf_bound_progs =
+ debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir);
+ if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
+ return -ENOMEM;
+
+ ns->sdev->bpf_dev = bpf_offload_dev_create();
+ err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
+ if (err)
+ return err;
+ }
+
+ err = bpf_offload_dev_netdev_register(ns->sdev->bpf_dev, ns->netdev);
+ if (err)
+ goto err_destroy_bdev;
debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
&ns->bpf_offloaded_id);
@@ -609,10 +611,6 @@ int nsim_bpf_init(struct netdevsim *ns)
&ns->bpf_bind_accept);
debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir,
&ns->bpf_bind_verifier_delay);
- ns->ddir_bpf_bound_progs =
- debugfs_create_dir("bpf_bound_progs", ns->ddir);
- if (IS_ERR_OR_NULL(ns->ddir_bpf_bound_progs))
- return -ENOMEM;
ns->bpf_tc_accept = true;
debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir,
@@ -631,12 +629,23 @@ int nsim_bpf_init(struct netdevsim *ns)
&ns->bpf_map_accept);
return 0;
+
+err_destroy_bdev:
+ if (ns->sdev->refcnt == 1)
+ bpf_offload_dev_destroy(ns->sdev->bpf_dev);
+ return err;
}
void nsim_bpf_uninit(struct netdevsim *ns)
{
- WARN_ON(!list_empty(&ns->bpf_bound_progs));
- WARN_ON(!list_empty(&ns->bpf_bound_maps));
- WARN_ON(ns->xdp_prog);
+ WARN_ON(ns->xdp.prog);
+ WARN_ON(ns->xdp_hw.prog);
WARN_ON(ns->bpf_offloaded);
+ bpf_offload_dev_netdev_unregister(ns->sdev->bpf_dev, ns->netdev);
+
+ if (ns->sdev->refcnt == 1) {
+ WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs));
+ WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps));
+ bpf_offload_dev_destroy(ns->sdev->bpf_dev);
+ }
}
diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c
index ba663e5af168..5135fc371f01 100644
--- a/drivers/net/netdevsim/devlink.c
+++ b/drivers/net/netdevsim/devlink.c
@@ -207,6 +207,7 @@ void nsim_devlink_teardown(struct netdevsim *ns)
struct net *net = nsim_to_net(ns);
bool *reg_devlink = net_generic(net, nsim_devlink_id);
+ devlink_resources_unregister(ns->devlink, NULL);
devlink_unregister(ns->devlink);
devlink_free(ns->devlink);
ns->devlink = NULL;
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
new file mode 100644
index 000000000000..2dcf6cc269d0
--- /dev/null
+++ b/drivers/net/netdevsim/ipsec.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */
+
+#include <crypto/aead.h>
+#include <linux/debugfs.h>
+#include <net/xfrm.h>
+
+#include "netdevsim.h"
+
+#define NSIM_IPSEC_AUTH_BITS 128
+
+static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct netdevsim *ns = filp->private_data;
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+ size_t bufsize;
+ char *buf, *p;
+ int len;
+ int i;
+
+ /* the buffer needed is
+ * (num SAs * 3 lines each * ~60 bytes per line) + one more line
+ */
+ bufsize = (ipsec->count * 4 * 60) + 60;
+ buf = kzalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ p = buf;
+ p += snprintf(p, bufsize - (p - buf),
+ "SA count=%u tx=%u\n",
+ ipsec->count, ipsec->tx);
+
+ for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
+ struct nsim_sa *sap = &ipsec->sa[i];
+
+ if (!sap->used)
+ continue;
+
+ p += snprintf(p, bufsize - (p - buf),
+ "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
+ i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
+ sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
+ p += snprintf(p, bufsize - (p - buf),
+ "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
+ i, be32_to_cpu(sap->xs->id.spi),
+ sap->xs->id.proto, sap->salt, sap->crypt);
+ p += snprintf(p, bufsize - (p - buf),
+ "sa[%i] key=0x%08x %08x %08x %08x\n",
+ i, sap->key[0], sap->key[1],
+ sap->key[2], sap->key[3]);
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf);
+
+ kfree(buf);
+ return len;
+}
+
+static const struct file_operations ipsec_dbg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = nsim_dbg_netdev_ops_read,
+};
+
+static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec)
+{
+ u32 i;
+
+ if (ipsec->count == NSIM_IPSEC_MAX_SA_COUNT)
+ return -ENOSPC;
+
+ /* search sa table */
+ for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
+ if (!ipsec->sa[i].used)
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
+ u32 *mykey, u32 *mysalt)
+{
+ const char aes_gcm_name[] = "rfc4106(gcm(aes))";
+ struct net_device *dev = xs->xso.dev;
+ unsigned char *key_data;
+ char *alg_name = NULL;
+ int key_len;
+
+ if (!xs->aead) {
+ netdev_err(dev, "Unsupported IPsec algorithm\n");
+ return -EINVAL;
+ }
+
+ if (xs->aead->alg_icv_len != NSIM_IPSEC_AUTH_BITS) {
+ netdev_err(dev, "IPsec offload requires %d bit authentication\n",
+ NSIM_IPSEC_AUTH_BITS);
+ return -EINVAL;
+ }
+
+ key_data = &xs->aead->alg_key[0];
+ key_len = xs->aead->alg_key_len;
+ alg_name = xs->aead->alg_name;
+
+ if (strcmp(alg_name, aes_gcm_name)) {
+ netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
+ aes_gcm_name);
+ return -EINVAL;
+ }
+
+ /* 160 accounts for 16 byte key and 4 byte salt */
+ if (key_len > NSIM_IPSEC_AUTH_BITS) {
+ *mysalt = ((u32 *)key_data)[4];
+ } else if (key_len == NSIM_IPSEC_AUTH_BITS) {
+ *mysalt = 0;
+ } else {
+ netdev_err(dev, "IPsec hw offload only supports 128 bit keys with optional 32 bit salt\n");
+ return -EINVAL;
+ }
+ memcpy(mykey, key_data, 16);
+
+ return 0;
+}
+
+static int nsim_ipsec_add_sa(struct xfrm_state *xs)
+{
+ struct nsim_ipsec *ipsec;
+ struct net_device *dev;
+ struct netdevsim *ns;
+ struct nsim_sa sa;
+ u16 sa_idx;
+ int ret;
+
+ dev = xs->xso.dev;
+ ns = netdev_priv(dev);
+ ipsec = &ns->ipsec;
+
+ if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+ netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
+ xs->id.proto);
+ return -EINVAL;
+ }
+
+ if (xs->calg) {
+ netdev_err(dev, "Compression offload not supported\n");
+ return -EINVAL;
+ }
+
+ /* find the first unused index */
+ ret = nsim_ipsec_find_empty_idx(ipsec);
+ if (ret < 0) {
+ netdev_err(dev, "No space for SA in Rx table!\n");
+ return ret;
+ }
+ sa_idx = (u16)ret;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.used = true;
+ sa.xs = xs;
+
+ if (sa.xs->id.proto & IPPROTO_ESP)
+ sa.crypt = xs->ealg || xs->aead;
+
+ /* get the key and salt */
+ ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt);
+ if (ret) {
+ netdev_err(dev, "Failed to get key data for SA table\n");
+ return ret;
+ }
+
+ if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ sa.rx = true;
+
+ if (xs->props.family == AF_INET6)
+ memcpy(sa.ipaddr, &xs->id.daddr.a6, 16);
+ else
+ memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4);
+ }
+
+ /* the preparations worked, so save the info */
+ memcpy(&ipsec->sa[sa_idx], &sa, sizeof(sa));
+
+ /* the XFRM stack doesn't like offload_handle == 0,
+ * so add a bitflag in case our array index is 0
+ */
+ xs->xso.offload_handle = sa_idx | NSIM_IPSEC_VALID;
+ ipsec->count++;
+
+ return 0;
+}
+
+static void nsim_ipsec_del_sa(struct xfrm_state *xs)
+{
+ struct netdevsim *ns = netdev_priv(xs->xso.dev);
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+ u16 sa_idx;
+
+ sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID;
+ if (!ipsec->sa[sa_idx].used) {
+ netdev_err(ns->netdev, "Invalid SA for delete sa_idx=%d\n",
+ sa_idx);
+ return;
+ }
+
+ memset(&ipsec->sa[sa_idx], 0, sizeof(struct nsim_sa));
+ ipsec->count--;
+}
+
+static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+{
+ struct netdevsim *ns = netdev_priv(xs->xso.dev);
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+
+ ipsec->ok++;
+
+ return true;
+}
+
+static const struct xfrmdev_ops nsim_xfrmdev_ops = {
+ .xdo_dev_state_add = nsim_ipsec_add_sa,
+ .xdo_dev_state_delete = nsim_ipsec_del_sa,
+ .xdo_dev_offload_ok = nsim_ipsec_offload_ok,
+};
+
+bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
+{
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+ struct xfrm_state *xs;
+ struct nsim_sa *tsa;
+ u32 sa_idx;
+
+ /* do we even need to check this packet? */
+ if (!skb->sp)
+ return true;
+
+ if (unlikely(!skb->sp->len)) {
+ netdev_err(ns->netdev, "no xfrm state len = %d\n",
+ skb->sp->len);
+ return false;
+ }
+
+ xs = xfrm_input_state(skb);
+ if (unlikely(!xs)) {
+ netdev_err(ns->netdev, "no xfrm_input_state() xs = %p\n", xs);
+ return false;
+ }
+
+ sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID;
+ if (unlikely(sa_idx >= NSIM_IPSEC_MAX_SA_COUNT)) {
+ netdev_err(ns->netdev, "bad sa_idx=%d max=%d\n",
+ sa_idx, NSIM_IPSEC_MAX_SA_COUNT);
+ return false;
+ }
+
+ tsa = &ipsec->sa[sa_idx];
+ if (unlikely(!tsa->used)) {
+ netdev_err(ns->netdev, "unused sa_idx=%d\n", sa_idx);
+ return false;
+ }
+
+ if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+ netdev_err(ns->netdev, "unexpected proto=%d\n", xs->id.proto);
+ return false;
+ }
+
+ ipsec->tx++;
+
+ return true;
+}
+
+void nsim_ipsec_init(struct netdevsim *ns)
+{
+ ns->netdev->xfrmdev_ops = &nsim_xfrmdev_ops;
+
+#define NSIM_ESP_FEATURES (NETIF_F_HW_ESP | \
+ NETIF_F_HW_ESP_TX_CSUM | \
+ NETIF_F_GSO_ESP)
+
+ ns->netdev->features |= NSIM_ESP_FEATURES;
+ ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES;
+
+ ns->ipsec.pfile = debugfs_create_file("ipsec", 0400, ns->ddir, ns,
+ &ipsec_dbg_fops);
+}
+
+void nsim_ipsec_teardown(struct netdevsim *ns)
+{
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+
+ if (ipsec->count)
+ netdev_err(ns->netdev, "tearing down IPsec offload with %d SAs left\n",
+ ipsec->count);
+ debugfs_remove_recursive(ipsec->pfile);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index ec68f38213d9..8d8e2b3f263e 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -22,6 +22,7 @@
#include <net/netlink.h>
#include <net/pkt_cls.h>
#include <net/rtnetlink.h>
+#include <net/switchdev.h>
#include "netdevsim.h"
@@ -40,6 +41,9 @@ struct nsim_vf_config {
static u32 nsim_dev_id;
+static struct dentry *nsim_ddir;
+static struct dentry *nsim_sdev_ddir;
+
static int nsim_num_vf(struct device *dev)
{
struct netdevsim *ns = to_nsim(dev);
@@ -144,8 +148,29 @@ static struct device_type nsim_dev_type = {
.release = nsim_dev_release,
};
+static int
+nsim_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(ns->sdev->switch_id);
+ memcpy(&attr->u.ppid.id, &ns->sdev->switch_id,
+ attr->u.ppid.id_len);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct switchdev_ops nsim_switchdev_ops = {
+ .switchdev_port_attr_get = nsim_port_attr_get,
+};
+
static int nsim_init(struct net_device *dev)
{
+ char sdev_ddir_name[10], sdev_link_name[32];
struct netdevsim *ns = netdev_priv(dev);
int err;
@@ -154,9 +179,32 @@ static int nsim_init(struct net_device *dev)
if (IS_ERR_OR_NULL(ns->ddir))
return -ENOMEM;
+ if (!ns->sdev) {
+ ns->sdev = kzalloc(sizeof(*ns->sdev), GFP_KERNEL);
+ if (!ns->sdev) {
+ err = -ENOMEM;
+ goto err_debugfs_destroy;
+ }
+ ns->sdev->refcnt = 1;
+ ns->sdev->switch_id = nsim_dev_id;
+ sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id);
+ ns->sdev->ddir = debugfs_create_dir(sdev_ddir_name,
+ nsim_sdev_ddir);
+ if (IS_ERR_OR_NULL(ns->sdev->ddir)) {
+ err = PTR_ERR_OR_ZERO(ns->sdev->ddir) ?: -EINVAL;
+ goto err_sdev_free;
+ }
+ } else {
+ sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id);
+ ns->sdev->refcnt++;
+ }
+
+ sprintf(sdev_link_name, "../../" DRV_NAME "_sdev/%s", sdev_ddir_name);
+ debugfs_create_symlink("sdev", ns->ddir, sdev_link_name);
+
err = nsim_bpf_init(ns);
if (err)
- goto err_debugfs_destroy;
+ goto err_sdev_destroy;
ns->dev.id = nsim_dev_id++;
ns->dev.bus = &nsim_bus;
@@ -166,17 +214,26 @@ static int nsim_init(struct net_device *dev)
goto err_bpf_uninit;
SET_NETDEV_DEV(dev, &ns->dev);
+ SWITCHDEV_SET_OPS(dev, &nsim_switchdev_ops);
err = nsim_devlink_setup(ns);
if (err)
goto err_unreg_dev;
+ nsim_ipsec_init(ns);
+
return 0;
err_unreg_dev:
device_unregister(&ns->dev);
err_bpf_uninit:
nsim_bpf_uninit(ns);
+err_sdev_destroy:
+ if (!--ns->sdev->refcnt) {
+ debugfs_remove_recursive(ns->sdev->ddir);
+err_sdev_free:
+ kfree(ns->sdev);
+ }
err_debugfs_destroy:
debugfs_remove_recursive(ns->ddir);
return err;
@@ -186,9 +243,14 @@ static void nsim_uninit(struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ nsim_ipsec_teardown(ns);
nsim_devlink_teardown(ns);
debugfs_remove_recursive(ns->ddir);
nsim_bpf_uninit(ns);
+ if (!--ns->sdev->refcnt) {
+ debugfs_remove_recursive(ns->sdev->ddir);
+ kfree(ns->sdev);
+ }
}
static void nsim_free(struct net_device *dev)
@@ -203,11 +265,15 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ if (!nsim_ipsec_tx(ns, skb))
+ goto out;
+
u64_stats_update_begin(&ns->syncp);
ns->tx_packets++;
ns->tx_bytes += skb->len;
u64_stats_update_end(&ns->syncp);
+out:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -221,8 +287,7 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu)
{
struct netdevsim *ns = netdev_priv(dev);
- if (ns->xdp_prog_mode == XDP_ATTACHED_DRV &&
- new_mtu > NSIM_XDP_MAX_MTU)
+ if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU)
return -EBUSY;
dev->mtu = new_mtu;
@@ -260,7 +325,7 @@ nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f)
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb,
- ns, ns);
+ ns, ns, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns);
return 0;
@@ -464,15 +529,46 @@ static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
+static int nsim_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (tb[IFLA_LINK]) {
+ struct net_device *joindev;
+ struct netdevsim *joinns;
+
+ joindev = __dev_get_by_index(src_net,
+ nla_get_u32(tb[IFLA_LINK]));
+ if (!joindev)
+ return -ENODEV;
+ if (joindev->netdev_ops != &nsim_netdev_ops)
+ return -EINVAL;
+
+ joinns = netdev_priv(joindev);
+ if (!joinns->sdev || !joinns->sdev->refcnt)
+ return -EINVAL;
+ ns->sdev = joinns->sdev;
+ }
+
+ return register_netdevice(dev);
+}
+
+static void nsim_dellink(struct net_device *dev, struct list_head *head)
+{
+ unregister_netdevice_queue(dev, head);
+}
+
static struct rtnl_link_ops nsim_link_ops __read_mostly = {
.kind = DRV_NAME,
.priv_size = sizeof(struct netdevsim),
.setup = nsim_setup,
.validate = nsim_validate,
+ .newlink = nsim_newlink,
+ .dellink = nsim_dellink,
};
-struct dentry *nsim_ddir;
-
static int __init nsim_module_init(void)
{
int err;
@@ -481,9 +577,15 @@ static int __init nsim_module_init(void)
if (IS_ERR_OR_NULL(nsim_ddir))
return -ENOMEM;
+ nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL);
+ if (IS_ERR_OR_NULL(nsim_sdev_ddir)) {
+ err = -ENOMEM;
+ goto err_debugfs_destroy;
+ }
+
err = bus_register(&nsim_bus);
if (err)
- goto err_debugfs_destroy;
+ goto err_sdir_destroy;
err = nsim_devlink_init();
if (err)
@@ -499,6 +601,8 @@ err_dl_fini:
nsim_devlink_exit();
err_unreg_bus:
bus_unregister(&nsim_bus);
+err_sdir_destroy:
+ debugfs_remove_recursive(nsim_sdev_ddir);
err_debugfs_destroy:
debugfs_remove_recursive(nsim_ddir);
return err;
@@ -509,6 +613,7 @@ static void __exit nsim_module_exit(void)
rtnl_link_unregister(&nsim_link_ops);
nsim_devlink_exit();
bus_unregister(&nsim_bus);
+ debugfs_remove_recursive(nsim_sdev_ddir);
debugfs_remove_recursive(nsim_ddir);
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 8ca50b72c328..384c254fafc5 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/u64_stats_sync.h>
+#include <net/xdp.h>
#define DRV_NAME "netdevsim"
@@ -26,9 +27,46 @@
#define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg)
struct bpf_prog;
+struct bpf_offload_dev;
struct dentry;
struct nsim_vf_config;
+struct netdevsim_shared_dev {
+ unsigned int refcnt;
+ u32 switch_id;
+
+ struct dentry *ddir;
+
+ struct bpf_offload_dev *bpf_dev;
+
+ struct dentry *ddir_bpf_bound_progs;
+ u32 prog_id_gen;
+
+ struct list_head bpf_bound_progs;
+ struct list_head bpf_bound_maps;
+};
+
+#define NSIM_IPSEC_MAX_SA_COUNT 33
+#define NSIM_IPSEC_VALID BIT(31)
+
+struct nsim_sa {
+ struct xfrm_state *xs;
+ __be32 ipaddr[4];
+ u32 key[4];
+ u32 salt;
+ bool used;
+ bool crypt;
+ bool rx;
+};
+
+struct nsim_ipsec {
+ struct nsim_sa sa[NSIM_IPSEC_MAX_SA_COUNT];
+ struct dentry *pfile;
+ u32 count;
+ u32 tx;
+ u32 ok;
+};
+
struct netdevsim {
struct net_device *netdev;
@@ -37,6 +75,7 @@ struct netdevsim {
struct u64_stats_sync syncp;
struct device dev;
+ struct netdevsim_shared_dev *sdev;
struct dentry *ddir;
@@ -46,16 +85,11 @@ struct netdevsim {
struct bpf_prog *bpf_offloaded;
u32 bpf_offloaded_id;
- u32 xdp_flags;
- int xdp_prog_mode;
- struct bpf_prog *xdp_prog;
-
- u32 prog_id_gen;
+ struct xdp_attachment_info xdp;
+ struct xdp_attachment_info xdp_hw;
bool bpf_bind_accept;
u32 bpf_bind_verifier_delay;
- struct dentry *ddir_bpf_bound_progs;
- struct list_head bpf_bound_progs;
bool bpf_tc_accept;
bool bpf_tc_non_bound_accept;
@@ -63,14 +97,12 @@ struct netdevsim {
bool bpf_xdpoffload_accept;
bool bpf_map_accept;
- struct list_head bpf_bound_maps;
#if IS_ENABLED(CONFIG_NET_DEVLINK)
struct devlink *devlink;
#endif
+ struct nsim_ipsec ipsec;
};
-extern struct dentry *nsim_ddir;
-
#ifdef CONFIG_BPF_SYSCALL
int nsim_bpf_init(struct netdevsim *ns);
void nsim_bpf_uninit(struct netdevsim *ns);
@@ -148,6 +180,25 @@ static inline void nsim_devlink_exit(void)
}
#endif
+#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
+void nsim_ipsec_init(struct netdevsim *ns);
+void nsim_ipsec_teardown(struct netdevsim *ns);
+bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb);
+#else
+static inline void nsim_ipsec_init(struct netdevsim *ns)
+{
+}
+
+static inline void nsim_ipsec_teardown(struct netdevsim *ns)
+{
+}
+
+static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
+{
+ return true;
+}
+#endif
+
static inline struct netdevsim *to_nsim(struct device *ptr)
{
return container_of(ptr, struct netdevsim, dev);
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 9f6f7ccd44f7..b12023bc2cab 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -430,7 +430,7 @@ static int ntb_netdev_probe(struct device *client_dev)
ndev->hw_features = ndev->features;
ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
- random_ether_addr(ndev->perm_addr);
+ eth_random_addr(ndev->perm_addr);
memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
ndev->netdev_ops = &ntb_netdev_ops;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 343989f9f9d9..82070792edbb 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -28,7 +28,7 @@ config MDIO_BCM_IPROC
config MDIO_BCM_UNIMAC
tristate "Broadcom UniMAC MDIO bus controller"
- depends on HAS_IOMEM && OF_MDIO
+ depends on HAS_IOMEM
help
This module provides a driver for the Broadcom UniMAC MDIO busses.
This hardware can be found in the Broadcom GENET Ethernet MAC
@@ -92,7 +92,8 @@ config MDIO_CAVIUM
config MDIO_GPIO
tristate "GPIO lib-based bitbanged MDIO buses"
- depends on MDIO_BITBANG && GPIOLIB
+ depends on MDIO_BITBANG
+ depends on GPIOLIB || COMPILE_TEST
---help---
Supports GPIO lib-based MDIO busses.
@@ -213,6 +214,7 @@ comment "MII PHY device drivers"
config SFP
tristate "SFP cage support"
depends on I2C && PHYLINK
+ depends on HWMON || HWMON=n
select MDIO_I2C
config AMD_PHY
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 01d2ff2f6241..b2b6307d64a4 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -229,6 +229,7 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
phy_read(phydev, MII_BMSR);
switch (rev) {
+ case 0xa0:
case 0xb0:
ret = bcm7xxx_28nm_b0_afe_config_init(phydev);
break;
@@ -659,6 +660,7 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
+ BCM7XXX_28NM_GPHY(PHY_ID_BCM_OMEGA, "Broadcom Omega Combo GPHY"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 79e9b103188b..29aa8d772b0c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -757,13 +757,16 @@ static int decode_evnt(struct dp83640_private *dp83640,
phy_txts = data;
- switch (words) { /* fall through in every case */
+ switch (words) {
case 3:
dp83640->edata.sec_hi = phy_txts->sec_hi;
+ /* fall through */
case 2:
dp83640->edata.sec_lo = phy_txts->sec_lo;
+ /* fall through */
case 1:
dp83640->edata.ns_hi = phy_txts->ns_hi;
+ /* fall through */
case 0:
dp83640->edata.ns_lo = phy_txts->ns_lo;
}
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 081d99aa3985..78cad134a79e 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -21,6 +21,7 @@
#define MII_DP83811_SGMII_CTRL 0x09
#define MII_DP83811_INT_STAT1 0x12
#define MII_DP83811_INT_STAT2 0x13
+#define MII_DP83811_INT_STAT3 0x18
#define MII_DP83811_RESET_CTRL 0x1f
#define DP83811_HW_RESET BIT(15)
@@ -44,6 +45,11 @@
#define DP83811_OVERVOLTAGE_INT_EN BIT(6)
#define DP83811_UNDERVOLTAGE_INT_EN BIT(7)
+/* INT_STAT3 bits */
+#define DP83811_LPS_INT_EN BIT(0)
+#define DP83811_NO_FRAME_INT_EN BIT(3)
+#define DP83811_POR_DONE_INT_EN BIT(4)
+
#define MII_DP83811_RXSOP1 0x04a5
#define MII_DP83811_RXSOP2 0x04a6
#define MII_DP83811_RXSOP3 0x04a7
@@ -81,6 +87,10 @@ static int dp83811_ack_interrupt(struct phy_device *phydev)
if (err < 0)
return err;
+ err = phy_read(phydev, MII_DP83811_INT_STAT3);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -216,13 +226,29 @@ static int dp83811_config_intr(struct phy_device *phydev)
DP83811_UNDERVOLTAGE_INT_EN);
err = phy_write(phydev, MII_DP83811_INT_STAT2, misr_status);
+ if (err < 0)
+ return err;
+
+ misr_status = phy_read(phydev, MII_DP83811_INT_STAT3);
+ if (misr_status < 0)
+ return misr_status;
+
+ misr_status |= (DP83811_LPS_INT_EN |
+ DP83811_NO_FRAME_INT_EN |
+ DP83811_POR_DONE_INT_EN);
+
+ err = phy_write(phydev, MII_DP83811_INT_STAT3, misr_status);
} else {
err = phy_write(phydev, MII_DP83811_INT_STAT1, 0);
if (err < 0)
return err;
- err = phy_write(phydev, MII_DP83811_INT_STAT1, 0);
+ err = phy_write(phydev, MII_DP83811_INT_STAT2, 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_DP83811_INT_STAT3, 0);
}
return err;
@@ -258,21 +284,19 @@ static int dp83811_config_init(struct phy_device *phydev)
if (err < 0)
return err;
+ value = phy_read(phydev, MII_DP83811_SGMII_CTRL);
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- value = phy_read(phydev, MII_DP83811_SGMII_CTRL);
- if (!(value & DP83811_SGMII_EN)) {
- err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
+ err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
(DP83811_SGMII_EN | value));
- if (err < 0)
- return err;
- } else {
- err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
- (~DP83811_SGMII_EN & value));
- if (err < 0)
- return err;
- }
+ } else {
+ err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
+ (~DP83811_SGMII_EN & value));
}
+ if (err < 0)
+
+ return err;
+
value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN;
return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 001fe1df7557..67b260877f30 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -259,10 +259,8 @@ static int __init fixed_mdio_bus_init(void)
int ret;
pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0);
- if (IS_ERR(pdev)) {
- ret = PTR_ERR(pdev);
- goto err_pdev;
- }
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
fmb->mii_bus = mdiobus_alloc();
if (fmb->mii_bus == NULL) {
@@ -287,7 +285,6 @@ err_mdiobus_alloc:
mdiobus_free(fmb->mii_bus);
err_mdiobus_reg:
platform_device_unregister(pdev);
-err_pdev:
return ret;
}
module_init(fixed_mdio_bus_init);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b8f57e9b9379..f7c69ca34056 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -130,8 +130,9 @@
#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12)
#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14)
-#define MII_88E1121_PHY_LED_CTRL 16
+#define MII_PHY_LED_CTRL 16
#define MII_88E1121_PHY_LED_DEF 0x0030
+#define MII_88E1510_PHY_LED_DEF 0x1177
#define MII_M1011_PHY_STATUS 0x11
#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -632,8 +633,40 @@ error:
return err;
}
+static void marvell_config_led(struct phy_device *phydev)
+{
+ u16 def_config;
+ int err;
+
+ switch (MARVELL_PHY_FAMILY_ID(phydev->phy_id)) {
+ /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+ case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1121R):
+ case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1318S):
+ def_config = MII_88E1121_PHY_LED_DEF;
+ break;
+ /* Default PHY LED config:
+ * LED[0] .. 1000Mbps Link
+ * LED[1] .. 100Mbps Link
+ * LED[2] .. Blink, Activity
+ */
+ case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510):
+ def_config = MII_88E1510_PHY_LED_DEF;
+ break;
+ default:
+ return;
+ }
+
+ err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL,
+ def_config);
+ if (err < 0)
+ pr_warn("Fail to config marvell phy LED.\n");
+}
+
static int marvell_config_init(struct phy_device *phydev)
{
+ /* Set defalut LED */
+ marvell_config_led(phydev);
+
/* Set registers from marvell,reg-init DT property */
return marvell_of_reg_init(phydev);
}
@@ -646,7 +679,7 @@ static int m88e1116r_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- mdelay(500);
+ msleep(500);
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
@@ -813,21 +846,6 @@ static int m88e1111_config_init(struct phy_device *phydev)
return genphy_soft_reset(phydev);
}
-static int m88e1121_config_init(struct phy_device *phydev)
-{
- int err;
-
- /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
- err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
- MII_88E1121_PHY_LED_CTRL,
- MII_88E1121_PHY_LED_DEF);
- if (err < 0)
- return err;
-
- /* Set marvell,reg-init configuration from device tree */
- return marvell_config_init(phydev);
-}
-
static int m88e1318_config_init(struct phy_device *phydev)
{
if (phy_interrupt_is_valid(phydev)) {
@@ -841,7 +859,7 @@ static int m88e1318_config_init(struct phy_device *phydev)
return err;
}
- return m88e1121_config_init(phydev);
+ return marvell_config_init(phydev);
}
static int m88e1510_config_init(struct phy_device *phydev)
@@ -2087,7 +2105,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = &m88e1121_probe,
- .config_init = &m88e1121_config_init,
+ .config_init = &marvell_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index 0831b7142df7..c017486e9b86 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -13,7 +13,7 @@
* You should have received a copy of the GNU General Public License
* version 2 (GPLv2) along with this source code.
*/
-
+#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/of_mdio.h>
@@ -22,7 +22,14 @@
#include <linux/mdio-mux.h>
#include <linux/delay.h>
-#define MDIO_PARAM_OFFSET 0x00
+#define MDIO_RATE_ADJ_EXT_OFFSET 0x000
+#define MDIO_RATE_ADJ_INT_OFFSET 0x004
+#define MDIO_RATE_ADJ_DIVIDENT_SHIFT 16
+
+#define MDIO_SCAN_CTRL_OFFSET 0x008
+#define MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR 28
+
+#define MDIO_PARAM_OFFSET 0x23c
#define MDIO_PARAM_MIIM_CYCLE 29
#define MDIO_PARAM_INTERNAL_SEL 25
#define MDIO_PARAM_BUS_ID 22
@@ -30,27 +37,56 @@
#define MDIO_PARAM_PHY_ID 16
#define MDIO_PARAM_PHY_DATA 0
-#define MDIO_READ_OFFSET 0x04
+#define MDIO_READ_OFFSET 0x240
#define MDIO_READ_DATA_MASK 0xffff
-#define MDIO_ADDR_OFFSET 0x08
+#define MDIO_ADDR_OFFSET 0x244
-#define MDIO_CTRL_OFFSET 0x0C
+#define MDIO_CTRL_OFFSET 0x248
#define MDIO_CTRL_WRITE_OP 0x1
#define MDIO_CTRL_READ_OP 0x2
-#define MDIO_STAT_OFFSET 0x10
+#define MDIO_STAT_OFFSET 0x24c
#define MDIO_STAT_DONE 1
#define BUS_MAX_ADDR 32
#define EXT_BUS_START_ADDR 16
+#define MDIO_REG_ADDR_SPACE_SIZE 0x250
+
+#define MDIO_OPERATING_FREQUENCY 11000000
+#define MDIO_RATE_ADJ_DIVIDENT 1
+
struct iproc_mdiomux_desc {
void *mux_handle;
void __iomem *base;
struct device *dev;
struct mii_bus *mii_bus;
+ struct clk *core_clk;
};
+static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md)
+{
+ u32 divisor;
+ u32 val;
+
+ /* Disable external mdio master access */
+ val = readl(md->base + MDIO_SCAN_CTRL_OFFSET);
+ val |= BIT(MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR);
+ writel(val, md->base + MDIO_SCAN_CTRL_OFFSET);
+
+ if (md->core_clk) {
+ /* use rate adjust regs to derrive the mdio's operating
+ * frequency from the specified core clock
+ */
+ divisor = clk_get_rate(md->core_clk) / MDIO_OPERATING_FREQUENCY;
+ divisor = divisor / (MDIO_RATE_ADJ_DIVIDENT + 1);
+ val = divisor;
+ val |= MDIO_RATE_ADJ_DIVIDENT << MDIO_RATE_ADJ_DIVIDENT_SHIFT;
+ writel(val, md->base + MDIO_RATE_ADJ_EXT_OFFSET);
+ writel(val, md->base + MDIO_RATE_ADJ_INT_OFFSET);
+ }
+}
+
static int iproc_mdio_wait_for_idle(void __iomem *base, bool result)
{
unsigned int timeout = 1000; /* loop for 1s */
@@ -169,18 +205,39 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
md->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res->start & 0xfff) {
+ /* For backward compatibility in case the
+ * base address is specified with an offset.
+ */
+ dev_info(&pdev->dev, "fix base address in dt-blob\n");
+ res->start &= ~0xfff;
+ res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
+ }
md->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(md->base)) {
dev_err(&pdev->dev, "failed to ioremap register\n");
return PTR_ERR(md->base);
}
- md->mii_bus = mdiobus_alloc();
+ md->mii_bus = devm_mdiobus_alloc(&pdev->dev);
if (!md->mii_bus) {
dev_err(&pdev->dev, "mdiomux bus alloc failed\n");
return -ENOMEM;
}
+ md->core_clk = devm_clk_get(&pdev->dev, NULL);
+ if (md->core_clk == ERR_PTR(-ENOENT) ||
+ md->core_clk == ERR_PTR(-EINVAL))
+ md->core_clk = NULL;
+ else if (IS_ERR(md->core_clk))
+ return PTR_ERR(md->core_clk);
+
+ rc = clk_prepare_enable(md->core_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to enable core clk\n");
+ return rc;
+ }
+
bus = md->mii_bus;
bus->priv = md;
bus->name = "iProc MDIO mux bus";
@@ -194,7 +251,7 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
rc = mdiobus_register(bus);
if (rc) {
dev_err(&pdev->dev, "mdiomux registration failed\n");
- goto out;
+ goto out_clk;
}
platform_set_drvdata(pdev, md);
@@ -206,27 +263,55 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
goto out_register;
}
+ mdio_mux_iproc_config(md);
+
dev_info(md->dev, "iProc mdiomux registered\n");
return 0;
out_register:
mdiobus_unregister(bus);
-out:
- mdiobus_free(bus);
+out_clk:
+ clk_disable_unprepare(md->core_clk);
return rc;
}
static int mdio_mux_iproc_remove(struct platform_device *pdev)
{
- struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev);
+ struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
mdio_mux_uninit(md->mux_handle);
mdiobus_unregister(md->mii_bus);
- mdiobus_free(md->mii_bus);
+ clk_disable_unprepare(md->core_clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mdio_mux_iproc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(md->core_clk);
return 0;
}
+static int mdio_mux_iproc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
+
+ clk_prepare_enable(md->core_clk);
+ mdio_mux_iproc_config(md);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mdio_mux_iproc_pm_ops,
+ mdio_mux_iproc_suspend, mdio_mux_iproc_resume);
+
static const struct of_device_id mdio_mux_iproc_match[] = {
{
.compatible = "brcm,mdio-mux-iproc",
@@ -239,6 +324,7 @@ static struct platform_driver mdiomux_iproc_driver = {
.driver = {
.name = "mdio-mux-iproc",
.of_match_table = mdio_mux_iproc_match,
+ .pm = &mdio_mux_iproc_pm_ops,
},
.probe = mdio_mux_iproc_probe,
.remove = mdio_mux_iproc_remove,
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 082ffef0dec4..bc90764a8b8d 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -20,23 +20,23 @@
struct mdio_mux_gpio_state {
struct gpio_descs *gpios;
void *mux_handle;
+ int values[];
};
static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
void *data)
{
struct mdio_mux_gpio_state *s = data;
- int values[s->gpios->ndescs];
unsigned int n;
if (current_child == desired_child)
return 0;
for (n = 0; n < s->gpios->ndescs; n++)
- values[n] = (desired_child >> n) & 1;
+ s->values[n] = (desired_child >> n) & 1;
gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc,
- values);
+ s->values);
return 0;
}
@@ -44,15 +44,21 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
static int mdio_mux_gpio_probe(struct platform_device *pdev)
{
struct mdio_mux_gpio_state *s;
+ struct gpio_descs *gpios;
int r;
- s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
- if (!s)
+ gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(gpios))
+ return PTR_ERR(gpios);
+
+ s = devm_kzalloc(&pdev->dev, struct_size(s, values, gpios->ndescs),
+ GFP_KERNEL);
+ if (!s) {
+ gpiod_put_array(gpios);
return -ENOMEM;
+ }
- s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
- if (IS_ERR(s->gpios))
- return PTR_ERR(s->gpios);
+ s->gpios = gpios;
r = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL);
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 650c2667d523..84ca9ff40ae0 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -123,7 +123,7 @@ static const struct vsc8531_edge_rate_table edge_table[] = {
};
#endif /* CONFIG_OF_MDIO */
-static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
+static int vsc85xx_phy_page_set(struct phy_device *phydev, u16 page)
{
int rc;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 537297d2b4b4..1ee25877c4d1 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -467,6 +467,20 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_mii_ioctl);
+static int phy_config_aneg(struct phy_device *phydev)
+{
+ if (phydev->drv->config_aneg)
+ return phydev->drv->config_aneg(phydev);
+
+ /* Clause 45 PHYs that don't implement Clause 22 registers are not
+ * allowed to call genphy_config_aneg()
+ */
+ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
+ return -EOPNOTSUPP;
+
+ return genphy_config_aneg(phydev);
+}
+
/**
* phy_start_aneg_priv - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
@@ -493,10 +507,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
/* Invalidate LP advertising flags */
phydev->lp_advertising = 0;
- if (phydev->drv->config_aneg)
- err = phydev->drv->config_aneg(phydev);
- else
- err = genphy_config_aneg(phydev);
+ err = phy_config_aneg(phydev);
if (err < 0)
goto out_unlock;
@@ -514,7 +525,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
* negotiation may already be done and aneg interrupt may not be
* generated.
*/
- if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
+ if (!phy_polling_mode(phydev) && phydev->state == PHY_AN) {
err = phy_aneg_done(phydev);
if (err > 0) {
trigger = true;
@@ -546,6 +557,84 @@ int phy_start_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_start_aneg);
+static int phy_poll_aneg_done(struct phy_device *phydev)
+{
+ unsigned int retries = 100;
+ int ret;
+
+ do {
+ msleep(100);
+ ret = phy_aneg_done(phydev);
+ } while (!ret && --retries);
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * phy_speed_down - set speed to lowest speed supported by both link partners
+ * @phydev: the phy_device struct
+ * @sync: perform action synchronously
+ *
+ * Description: Typically used to save energy when waiting for a WoL packet
+ *
+ * WARNING: Setting sync to false may cause the system being unable to suspend
+ * in case the PHY generates an interrupt when finishing the autonegotiation.
+ * This interrupt may wake up the system immediately after suspend.
+ * Therefore use sync = false only if you're sure it's safe with the respective
+ * network chip.
+ */
+int phy_speed_down(struct phy_device *phydev, bool sync)
+{
+ u32 adv = phydev->lp_advertising & phydev->supported;
+ u32 adv_old = phydev->advertising;
+ int ret;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return 0;
+
+ if (adv & PHY_10BT_FEATURES)
+ phydev->advertising &= ~(PHY_100BT_FEATURES |
+ PHY_1000BT_FEATURES);
+ else if (adv & PHY_100BT_FEATURES)
+ phydev->advertising &= ~PHY_1000BT_FEATURES;
+
+ if (phydev->advertising == adv_old)
+ return 0;
+
+ ret = phy_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ return sync ? phy_poll_aneg_done(phydev) : 0;
+}
+EXPORT_SYMBOL_GPL(phy_speed_down);
+
+/**
+ * phy_speed_up - (re)set advertised speeds to all supported speeds
+ * @phydev: the phy_device struct
+ *
+ * Description: Used to revert the effect of phy_speed_down
+ */
+int phy_speed_up(struct phy_device *phydev)
+{
+ u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES;
+ u32 adv_old = phydev->advertising;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return 0;
+
+ phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask);
+
+ if (phydev->advertising == adv_old)
+ return 0;
+
+ return phy_config_aneg(phydev);
+}
+EXPORT_SYMBOL_GPL(phy_speed_up);
+
/**
* phy_start_machine - start PHY state machine tracking
* @phydev: the phy_device struct
@@ -894,7 +983,7 @@ void phy_state_machine(struct work_struct *work)
needs_aneg = true;
break;
case PHY_NOLINK:
- if (phydev->irq != PHY_POLL)
+ if (!phy_polling_mode(phydev))
break;
err = phy_read_status(phydev);
@@ -935,7 +1024,7 @@ void phy_state_machine(struct work_struct *work)
/* Only register a CHANGE if we are polling and link changed
* since latest checking.
*/
- if (phydev->irq == PHY_POLL) {
+ if (phy_polling_mode(phydev)) {
old_link = phydev->link;
err = phy_read_status(phydev);
if (err)
@@ -1034,7 +1123,7 @@ void phy_state_machine(struct work_struct *work)
* PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
* between states from phy_mac_interrupt()
*/
- if (phydev->irq == PHY_POLL)
+ if (phy_polling_mode(phydev))
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
PHY_STATE_TIME * HZ);
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bd0f339f69fd..db1172db1e7c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1555,6 +1555,14 @@ int genphy_read_status(struct phy_device *phydev)
if (adv < 0)
return adv;
+ if (lpagb & LPA_1000MSFAIL) {
+ if (adv & CTL1000_ENABLE_MASTER)
+ phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n");
+ else
+ phydev_err(phydev, "Master/Slave resolution failed\n");
+ return -ENOLINK;
+ }
+
phydev->lp_advertising =
mii_stat1000_to_ethtool_lpa_t(lpagb);
common_adv_gb = lpagb & adv << 2;
@@ -1724,11 +1732,8 @@ EXPORT_SYMBOL(genphy_loopback);
static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- /* The default values for phydev->supported are provided by the PHY
- * driver "features" member, we want to reset to sane defaults first
- * before supporting higher speeds.
- */
- phydev->supported &= PHY_DEFAULT_FEATURES;
+ phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
+ PHY_10BT_FEATURES);
switch (max_speed) {
default:
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index af4dc4425be2..3ba5cf2a8a5f 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1688,4 +1688,34 @@ static const struct sfp_upstream_ops sfp_phylink_ops = {
.disconnect_phy = phylink_sfp_disconnect_phy,
};
+/* Helpers for MAC drivers */
+
+/**
+ * phylink_helper_basex_speed() - 1000BaseX/2500BaseX helper
+ * @state: a pointer to a &struct phylink_link_state
+ *
+ * Inspect the interface mode, advertising mask or forced speed and
+ * decide whether to run at 2.5Gbit or 1Gbit appropriately, switching
+ * the interface mode to suit. @state->interface is appropriately
+ * updated, and the advertising mask has the "other" baseX_Full flag
+ * cleared.
+ */
+void phylink_helper_basex_speed(struct phylink_link_state *state)
+{
+ if (phy_interface_mode_is_8023z(state->interface)) {
+ bool want_2500 = state->an_enabled ?
+ phylink_test(state->advertising, 2500baseX_Full) :
+ state->speed == SPEED_2500;
+
+ if (want_2500) {
+ phylink_clear(state->advertising, 1000baseX_Full);
+ state->interface = PHY_INTERFACE_MODE_2500BASEX;
+ } else {
+ phylink_clear(state->advertising, 2500baseX_Full);
+ state->interface = PHY_INTERFACE_MODE_1000BASEX;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_helper_basex_speed);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 082fb40c656d..7fc8508b5231 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -37,6 +37,9 @@
#define RTL8201F_ISR 0x1e
#define RTL8201F_IER 0x13
+#define RTL8366RB_POWER_SAVE 0x15
+#define RTL8366RB_POWER_SAVE_ON BIT(12)
+
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
@@ -128,6 +131,37 @@ static int rtl8211f_config_intr(struct phy_device *phydev)
return phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
}
+static int rtl8211_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Quirk was copied from vendor driver. Unfortunately it includes no
+ * description of the magic numbers.
+ */
+ if (phydev->speed == SPEED_100 && phydev->autoneg == AUTONEG_DISABLE) {
+ phy_write(phydev, 0x17, 0x2138);
+ phy_write(phydev, 0x0e, 0x0260);
+ } else {
+ phy_write(phydev, 0x17, 0x2108);
+ phy_write(phydev, 0x0e, 0x0000);
+ }
+
+ return 0;
+}
+
+static int rtl8211c_config_init(struct phy_device *phydev)
+{
+ /* RTL8211C has an issue when operating in Gigabit slave mode */
+ phy_set_bits(phydev, MII_CTRL1000,
+ CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
+
+ return genphy_config_init(phydev);
+}
+
static int rtl8211f_config_init(struct phy_device *phydev)
{
int ret;
@@ -159,6 +193,24 @@ static int rtl8211b_resume(struct phy_device *phydev)
return genphy_resume(phydev);
}
+static int rtl8366rb_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_config_init(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_set_bits(phydev, RTL8366RB_POWER_SAVE,
+ RTL8366RB_POWER_SAVE_ON);
+ if (ret) {
+ dev_err(&phydev->mdio.dev,
+ "error enabling power management\n");
+ }
+
+ return ret;
+}
+
static struct phy_driver realtek_drvs[] = {
{
.phy_id = 0x00008201,
@@ -179,6 +231,14 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ .phy_id = 0x001cc910,
+ .name = "RTL8211 Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .config_aneg = rtl8211_config_aneg,
+ .read_mmd = &genphy_read_mmd_unsupported,
+ .write_mmd = &genphy_write_mmd_unsupported,
+ }, {
.phy_id = 0x001cc912,
.name = "RTL8211B Gigabit Ethernet",
.phy_id_mask = 0x001fffff,
@@ -191,6 +251,14 @@ static struct phy_driver realtek_drvs[] = {
.suspend = rtl8211b_suspend,
.resume = rtl8211b_resume,
}, {
+ .phy_id = 0x001cc913,
+ .name = "RTL8211C Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .config_init = rtl8211c_config_init,
+ .read_mmd = &genphy_read_mmd_unsupported,
+ .write_mmd = &genphy_write_mmd_unsupported,
+ }, {
.phy_id = 0x001cc914,
.name = "RTL8211DN Gigabit Ethernet",
.phy_id_mask = 0x001fffff,
@@ -223,6 +291,15 @@ static struct phy_driver realtek_drvs[] = {
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ }, {
+ .phy_id = 0x001cc961,
+ .name = "RTL8366RB Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &rtl8366rb_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
},
};
@@ -230,10 +307,13 @@ module_phy_driver(realtek_drvs);
static struct mdio_device_id __maybe_unused realtek_tbl[] = {
{ 0x001cc816, 0x001fffff },
+ { 0x001cc910, 0x001fffff },
{ 0x001cc912, 0x001fffff },
+ { 0x001cc913, 0x001fffff },
{ 0x001cc914, 0x001fffff },
{ 0x001cc915, 0x001fffff },
{ 0x001cc916, 0x001fffff },
+ { 0x001cc961, 0x001fffff },
{ }
};
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index d437f4f5ed52..740655261e5b 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -349,7 +349,6 @@ static int sfp_register_bus(struct sfp_bus *bus)
}
if (bus->started)
bus->socket_ops->start(bus->sfp);
- bus->netdev->sfp_bus = bus;
bus->registered = true;
return 0;
}
@@ -364,7 +363,6 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
if (bus->phydev && ops && ops->disconnect_phy)
ops->disconnect_phy(bus->upstream);
}
- bus->netdev->sfp_bus = NULL;
bus->registered = false;
}
@@ -436,6 +434,14 @@ void sfp_upstream_stop(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_upstream_stop);
+static void sfp_upstream_clear(struct sfp_bus *bus)
+{
+ bus->upstream_ops = NULL;
+ bus->upstream = NULL;
+ bus->netdev->sfp_bus = NULL;
+ bus->netdev = NULL;
+}
+
/**
* sfp_register_upstream() - Register the neighbouring device
* @fwnode: firmware node for the SFP bus
@@ -461,9 +467,13 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
bus->upstream_ops = ops;
bus->upstream = upstream;
bus->netdev = ndev;
+ ndev->sfp_bus = bus;
- if (bus->sfp)
+ if (bus->sfp) {
ret = sfp_register_bus(bus);
+ if (ret)
+ sfp_upstream_clear(bus);
+ }
rtnl_unlock();
}
@@ -488,8 +498,7 @@ void sfp_unregister_upstream(struct sfp_bus *bus)
rtnl_lock();
if (bus->sfp)
sfp_unregister_bus(bus);
- bus->upstream = NULL;
- bus->netdev = NULL;
+ sfp_upstream_clear(bus);
rtnl_unlock();
sfp_bus_put(bus);
@@ -561,6 +570,13 @@ void sfp_module_remove(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_module_remove);
+static void sfp_socket_clear(struct sfp_bus *bus)
+{
+ bus->sfp_dev = NULL;
+ bus->sfp = NULL;
+ bus->socket_ops = NULL;
+}
+
struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
const struct sfp_socket_ops *ops)
{
@@ -573,8 +589,11 @@ struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
bus->sfp = sfp;
bus->socket_ops = ops;
- if (bus->netdev)
+ if (bus->netdev) {
ret = sfp_register_bus(bus);
+ if (ret)
+ sfp_socket_clear(bus);
+ }
rtnl_unlock();
}
@@ -592,9 +611,7 @@ void sfp_unregister_socket(struct sfp_bus *bus)
rtnl_lock();
if (bus->netdev)
sfp_unregister_bus(bus);
- bus->sfp_dev = NULL;
- bus->sfp = NULL;
- bus->socket_ops = NULL;
+ sfp_socket_clear(bus);
rtnl_unlock();
sfp_bus_put(bus);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index c4c92db86dfa..4637d980310e 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1,5 +1,7 @@
+#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/hwmon.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
@@ -58,6 +60,69 @@ enum {
SFP_S_TX_DISABLE,
};
+static const char * const mod_state_strings[] = {
+ [SFP_MOD_EMPTY] = "empty",
+ [SFP_MOD_PROBE] = "probe",
+ [SFP_MOD_HPOWER] = "hpower",
+ [SFP_MOD_PRESENT] = "present",
+ [SFP_MOD_ERROR] = "error",
+};
+
+static const char *mod_state_to_str(unsigned short mod_state)
+{
+ if (mod_state >= ARRAY_SIZE(mod_state_strings))
+ return "Unknown module state";
+ return mod_state_strings[mod_state];
+}
+
+static const char * const dev_state_strings[] = {
+ [SFP_DEV_DOWN] = "down",
+ [SFP_DEV_UP] = "up",
+};
+
+static const char *dev_state_to_str(unsigned short dev_state)
+{
+ if (dev_state >= ARRAY_SIZE(dev_state_strings))
+ return "Unknown device state";
+ return dev_state_strings[dev_state];
+}
+
+static const char * const event_strings[] = {
+ [SFP_E_INSERT] = "insert",
+ [SFP_E_REMOVE] = "remove",
+ [SFP_E_DEV_DOWN] = "dev_down",
+ [SFP_E_DEV_UP] = "dev_up",
+ [SFP_E_TX_FAULT] = "tx_fault",
+ [SFP_E_TX_CLEAR] = "tx_clear",
+ [SFP_E_LOS_HIGH] = "los_high",
+ [SFP_E_LOS_LOW] = "los_low",
+ [SFP_E_TIMEOUT] = "timeout",
+};
+
+static const char *event_to_str(unsigned short event)
+{
+ if (event >= ARRAY_SIZE(event_strings))
+ return "Unknown event";
+ return event_strings[event];
+}
+
+static const char * const sm_state_strings[] = {
+ [SFP_S_DOWN] = "down",
+ [SFP_S_INIT] = "init",
+ [SFP_S_WAIT_LOS] = "wait_los",
+ [SFP_S_LINK_UP] = "link_up",
+ [SFP_S_TX_FAULT] = "tx_fault",
+ [SFP_S_REINIT] = "reinit",
+ [SFP_S_TX_DISABLE] = "rx_disable",
+};
+
+static const char *sm_state_to_str(unsigned short sm_state)
+{
+ if (sm_state >= ARRAY_SIZE(sm_state_strings))
+ return "Unknown state";
+ return sm_state_strings[sm_state];
+}
+
static const char *gpio_of_names[] = {
"mod-def0",
"los",
@@ -131,6 +196,12 @@ struct sfp {
unsigned int sm_retries;
struct sfp_eeprom_id id;
+#if IS_ENABLED(CONFIG_HWMON)
+ struct sfp_diag diag;
+ struct device *hwmon_dev;
+ char *hwmon_name;
+#endif
+
};
static bool sff_module_supported(const struct sfp_eeprom_id *id)
@@ -316,6 +387,719 @@ static unsigned int sfp_check(void *buf, size_t len)
return check;
}
+/* hwmon */
+#if IS_ENABLED(CONFIG_HWMON)
+static umode_t sfp_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct sfp *sfp = data;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_lcrit_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_lcrit:
+ case hwmon_temp_crit:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_min_alarm:
+ case hwmon_in_max_alarm:
+ case hwmon_in_lcrit_alarm:
+ case hwmon_in_crit_alarm:
+ case hwmon_in_min:
+ case hwmon_in_max:
+ case hwmon_in_lcrit:
+ case hwmon_in_crit:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_min_alarm:
+ case hwmon_curr_max_alarm:
+ case hwmon_curr_lcrit_alarm:
+ case hwmon_curr_crit_alarm:
+ case hwmon_curr_min:
+ case hwmon_curr_max:
+ case hwmon_curr_lcrit:
+ case hwmon_curr_crit:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_power:
+ /* External calibration of receive power requires
+ * floating point arithmetic. Doing that in the kernel
+ * is not easy, so just skip it. If the module does
+ * not require external calibration, we can however
+ * show receiver power, since FP is then not needed.
+ */
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_EXT_CAL &&
+ channel == 1)
+ return 0;
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_min_alarm:
+ case hwmon_power_max_alarm:
+ case hwmon_power_lcrit_alarm:
+ case hwmon_power_crit_alarm:
+ case hwmon_power_min:
+ case hwmon_power_max:
+ case hwmon_power_lcrit:
+ case hwmon_power_crit:
+ return 0444;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value)
+{
+ __be16 val;
+ int err;
+
+ err = sfp_read(sfp, true, reg, &val, sizeof(val));
+ if (err < 0)
+ return err;
+
+ *value = be16_to_cpu(val);
+
+ return 0;
+}
+
+static void sfp_hwmon_to_rx_power(long *value)
+{
+ *value = DIV_ROUND_CLOSEST(*value, 100);
+}
+
+static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset,
+ long *value)
+{
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_EXT_CAL)
+ *value = DIV_ROUND_CLOSEST(*value * slope, 256) + offset;
+}
+
+static void sfp_hwmon_calibrate_temp(struct sfp *sfp, long *value)
+{
+ sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_t_slope),
+ be16_to_cpu(sfp->diag.cal_t_offset), value);
+
+ if (*value >= 0x8000)
+ *value -= 0x10000;
+
+ *value = DIV_ROUND_CLOSEST(*value * 1000, 256);
+}
+
+static void sfp_hwmon_calibrate_vcc(struct sfp *sfp, long *value)
+{
+ sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_v_slope),
+ be16_to_cpu(sfp->diag.cal_v_offset), value);
+
+ *value = DIV_ROUND_CLOSEST(*value, 10);
+}
+
+static void sfp_hwmon_calibrate_bias(struct sfp *sfp, long *value)
+{
+ sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_txi_slope),
+ be16_to_cpu(sfp->diag.cal_txi_offset), value);
+
+ *value = DIV_ROUND_CLOSEST(*value, 500);
+}
+
+static void sfp_hwmon_calibrate_tx_power(struct sfp *sfp, long *value)
+{
+ sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_txpwr_slope),
+ be16_to_cpu(sfp->diag.cal_txpwr_offset), value);
+
+ *value = DIV_ROUND_CLOSEST(*value, 10);
+}
+
+static int sfp_hwmon_read_temp(struct sfp *sfp, int reg, long *value)
+{
+ int err;
+
+ err = sfp_hwmon_read_sensor(sfp, reg, value);
+ if (err < 0)
+ return err;
+
+ sfp_hwmon_calibrate_temp(sfp, value);
+
+ return 0;
+}
+
+static int sfp_hwmon_read_vcc(struct sfp *sfp, int reg, long *value)
+{
+ int err;
+
+ err = sfp_hwmon_read_sensor(sfp, reg, value);
+ if (err < 0)
+ return err;
+
+ sfp_hwmon_calibrate_vcc(sfp, value);
+
+ return 0;
+}
+
+static int sfp_hwmon_read_bias(struct sfp *sfp, int reg, long *value)
+{
+ int err;
+
+ err = sfp_hwmon_read_sensor(sfp, reg, value);
+ if (err < 0)
+ return err;
+
+ sfp_hwmon_calibrate_bias(sfp, value);
+
+ return 0;
+}
+
+static int sfp_hwmon_read_tx_power(struct sfp *sfp, int reg, long *value)
+{
+ int err;
+
+ err = sfp_hwmon_read_sensor(sfp, reg, value);
+ if (err < 0)
+ return err;
+
+ sfp_hwmon_calibrate_tx_power(sfp, value);
+
+ return 0;
+}
+
+static int sfp_hwmon_read_rx_power(struct sfp *sfp, int reg, long *value)
+{
+ int err;
+
+ err = sfp_hwmon_read_sensor(sfp, reg, value);
+ if (err < 0)
+ return err;
+
+ sfp_hwmon_to_rx_power(value);
+
+ return 0;
+}
+
+static int sfp_hwmon_temp(struct sfp *sfp, u32 attr, long *value)
+{
+ u8 status;
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return sfp_hwmon_read_temp(sfp, SFP_TEMP, value);
+
+ case hwmon_temp_lcrit:
+ *value = be16_to_cpu(sfp->diag.temp_low_alarm);
+ sfp_hwmon_calibrate_temp(sfp, value);
+ return 0;
+
+ case hwmon_temp_min:
+ *value = be16_to_cpu(sfp->diag.temp_low_warn);
+ sfp_hwmon_calibrate_temp(sfp, value);
+ return 0;
+ case hwmon_temp_max:
+ *value = be16_to_cpu(sfp->diag.temp_high_warn);
+ sfp_hwmon_calibrate_temp(sfp, value);
+ return 0;
+
+ case hwmon_temp_crit:
+ *value = be16_to_cpu(sfp->diag.temp_high_alarm);
+ sfp_hwmon_calibrate_temp(sfp, value);
+ return 0;
+
+ case hwmon_temp_lcrit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_TEMP_LOW);
+ return 0;
+
+ case hwmon_temp_min_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_TEMP_LOW);
+ return 0;
+
+ case hwmon_temp_max_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_TEMP_HIGH);
+ return 0;
+
+ case hwmon_temp_crit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_TEMP_HIGH);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_vcc(struct sfp *sfp, u32 attr, long *value)
+{
+ u8 status;
+ int err;
+
+ switch (attr) {
+ case hwmon_in_input:
+ return sfp_hwmon_read_vcc(sfp, SFP_VCC, value);
+
+ case hwmon_in_lcrit:
+ *value = be16_to_cpu(sfp->diag.volt_low_alarm);
+ sfp_hwmon_calibrate_vcc(sfp, value);
+ return 0;
+
+ case hwmon_in_min:
+ *value = be16_to_cpu(sfp->diag.volt_low_warn);
+ sfp_hwmon_calibrate_vcc(sfp, value);
+ return 0;
+
+ case hwmon_in_max:
+ *value = be16_to_cpu(sfp->diag.volt_high_warn);
+ sfp_hwmon_calibrate_vcc(sfp, value);
+ return 0;
+
+ case hwmon_in_crit:
+ *value = be16_to_cpu(sfp->diag.volt_high_alarm);
+ sfp_hwmon_calibrate_vcc(sfp, value);
+ return 0;
+
+ case hwmon_in_lcrit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_VCC_LOW);
+ return 0;
+
+ case hwmon_in_min_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_VCC_LOW);
+ return 0;
+
+ case hwmon_in_max_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_VCC_HIGH);
+ return 0;
+
+ case hwmon_in_crit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_VCC_HIGH);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_bias(struct sfp *sfp, u32 attr, long *value)
+{
+ u8 status;
+ int err;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ return sfp_hwmon_read_bias(sfp, SFP_TX_BIAS, value);
+
+ case hwmon_curr_lcrit:
+ *value = be16_to_cpu(sfp->diag.bias_low_alarm);
+ sfp_hwmon_calibrate_bias(sfp, value);
+ return 0;
+
+ case hwmon_curr_min:
+ *value = be16_to_cpu(sfp->diag.bias_low_warn);
+ sfp_hwmon_calibrate_bias(sfp, value);
+ return 0;
+
+ case hwmon_curr_max:
+ *value = be16_to_cpu(sfp->diag.bias_high_warn);
+ sfp_hwmon_calibrate_bias(sfp, value);
+ return 0;
+
+ case hwmon_curr_crit:
+ *value = be16_to_cpu(sfp->diag.bias_high_alarm);
+ sfp_hwmon_calibrate_bias(sfp, value);
+ return 0;
+
+ case hwmon_curr_lcrit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_TX_BIAS_LOW);
+ return 0;
+
+ case hwmon_curr_min_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_TX_BIAS_LOW);
+ return 0;
+
+ case hwmon_curr_max_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_TX_BIAS_HIGH);
+ return 0;
+
+ case hwmon_curr_crit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_TX_BIAS_HIGH);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_tx_power(struct sfp *sfp, u32 attr, long *value)
+{
+ u8 status;
+ int err;
+
+ switch (attr) {
+ case hwmon_power_input:
+ return sfp_hwmon_read_tx_power(sfp, SFP_TX_POWER, value);
+
+ case hwmon_power_lcrit:
+ *value = be16_to_cpu(sfp->diag.txpwr_low_alarm);
+ sfp_hwmon_calibrate_tx_power(sfp, value);
+ return 0;
+
+ case hwmon_power_min:
+ *value = be16_to_cpu(sfp->diag.txpwr_low_warn);
+ sfp_hwmon_calibrate_tx_power(sfp, value);
+ return 0;
+
+ case hwmon_power_max:
+ *value = be16_to_cpu(sfp->diag.txpwr_high_warn);
+ sfp_hwmon_calibrate_tx_power(sfp, value);
+ return 0;
+
+ case hwmon_power_crit:
+ *value = be16_to_cpu(sfp->diag.txpwr_high_alarm);
+ sfp_hwmon_calibrate_tx_power(sfp, value);
+ return 0;
+
+ case hwmon_power_lcrit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_TXPWR_LOW);
+ return 0;
+
+ case hwmon_power_min_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_TXPWR_LOW);
+ return 0;
+
+ case hwmon_power_max_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_TXPWR_HIGH);
+ return 0;
+
+ case hwmon_power_crit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_TXPWR_HIGH);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_rx_power(struct sfp *sfp, u32 attr, long *value)
+{
+ u8 status;
+ int err;
+
+ switch (attr) {
+ case hwmon_power_input:
+ return sfp_hwmon_read_rx_power(sfp, SFP_RX_POWER, value);
+
+ case hwmon_power_lcrit:
+ *value = be16_to_cpu(sfp->diag.rxpwr_low_alarm);
+ sfp_hwmon_to_rx_power(value);
+ return 0;
+
+ case hwmon_power_min:
+ *value = be16_to_cpu(sfp->diag.rxpwr_low_warn);
+ sfp_hwmon_to_rx_power(value);
+ return 0;
+
+ case hwmon_power_max:
+ *value = be16_to_cpu(sfp->diag.rxpwr_high_warn);
+ sfp_hwmon_to_rx_power(value);
+ return 0;
+
+ case hwmon_power_crit:
+ *value = be16_to_cpu(sfp->diag.rxpwr_high_alarm);
+ sfp_hwmon_to_rx_power(value);
+ return 0;
+
+ case hwmon_power_lcrit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM1, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM1_RXPWR_LOW);
+ return 0;
+
+ case hwmon_power_min_alarm:
+ err = sfp_read(sfp, true, SFP_WARN1, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN1_RXPWR_LOW);
+ return 0;
+
+ case hwmon_power_max_alarm:
+ err = sfp_read(sfp, true, SFP_WARN1, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN1_RXPWR_HIGH);
+ return 0;
+
+ case hwmon_power_crit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM1, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM1_RXPWR_HIGH);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct sfp *sfp = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ return sfp_hwmon_temp(sfp, attr, value);
+ case hwmon_in:
+ return sfp_hwmon_vcc(sfp, attr, value);
+ case hwmon_curr:
+ return sfp_hwmon_bias(sfp, attr, value);
+ case hwmon_power:
+ switch (channel) {
+ case 0:
+ return sfp_hwmon_tx_power(sfp, attr, value);
+ case 1:
+ return sfp_hwmon_rx_power(sfp, attr, value);
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops sfp_hwmon_ops = {
+ .is_visible = sfp_hwmon_is_visible,
+ .read = sfp_hwmon_read,
+};
+
+static u32 sfp_hwmon_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_chip = {
+ .type = hwmon_chip,
+ .config = sfp_hwmon_chip_config,
+};
+
+static u32 sfp_hwmon_temp_config[] = {
+ HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
+ HWMON_T_CRIT | HWMON_T_LCRIT |
+ HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
+ 0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_temp_channel_info = {
+ .type = hwmon_temp,
+ .config = sfp_hwmon_temp_config,
+};
+
+static u32 sfp_hwmon_vcc_config[] = {
+ HWMON_I_INPUT |
+ HWMON_I_MAX | HWMON_I_MIN |
+ HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM |
+ HWMON_I_CRIT | HWMON_I_LCRIT |
+ HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM,
+ 0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_vcc_channel_info = {
+ .type = hwmon_in,
+ .config = sfp_hwmon_vcc_config,
+};
+
+static u32 sfp_hwmon_bias_config[] = {
+ HWMON_C_INPUT |
+ HWMON_C_MAX | HWMON_C_MIN |
+ HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM |
+ HWMON_C_CRIT | HWMON_C_LCRIT |
+ HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM,
+ 0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_bias_channel_info = {
+ .type = hwmon_curr,
+ .config = sfp_hwmon_bias_config,
+};
+
+static u32 sfp_hwmon_power_config[] = {
+ /* Transmit power */
+ HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+ HWMON_P_CRIT | HWMON_P_LCRIT |
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM,
+ /* Receive power */
+ HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+ HWMON_P_CRIT | HWMON_P_LCRIT |
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM,
+ 0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_power_channel_info = {
+ .type = hwmon_power,
+ .config = sfp_hwmon_power_config,
+};
+
+static const struct hwmon_channel_info *sfp_hwmon_info[] = {
+ &sfp_hwmon_chip,
+ &sfp_hwmon_vcc_channel_info,
+ &sfp_hwmon_temp_channel_info,
+ &sfp_hwmon_bias_channel_info,
+ &sfp_hwmon_power_channel_info,
+ NULL,
+};
+
+static const struct hwmon_chip_info sfp_hwmon_chip_info = {
+ .ops = &sfp_hwmon_ops,
+ .info = sfp_hwmon_info,
+};
+
+static int sfp_hwmon_insert(struct sfp *sfp)
+{
+ int err, i;
+
+ if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE)
+ return 0;
+
+ if (!(sfp->id.ext.diagmon & SFP_DIAGMON_DDM))
+ return 0;
+
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
+ /* This driver in general does not support address
+ * change.
+ */
+ return 0;
+
+ err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
+ if (err < 0)
+ return err;
+
+ sfp->hwmon_name = kstrdup(dev_name(sfp->dev), GFP_KERNEL);
+ if (!sfp->hwmon_name)
+ return -ENODEV;
+
+ for (i = 0; sfp->hwmon_name[i]; i++)
+ if (hwmon_is_bad_char(sfp->hwmon_name[i]))
+ sfp->hwmon_name[i] = '_';
+
+ sfp->hwmon_dev = hwmon_device_register_with_info(sfp->dev,
+ sfp->hwmon_name, sfp,
+ &sfp_hwmon_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(sfp->hwmon_dev);
+}
+
+static void sfp_hwmon_remove(struct sfp *sfp)
+{
+ hwmon_device_unregister(sfp->hwmon_dev);
+ kfree(sfp->hwmon_name);
+}
+#else
+static int sfp_hwmon_insert(struct sfp *sfp)
+{
+ return 0;
+}
+
+static void sfp_hwmon_remove(struct sfp *sfp)
+{
+}
+#endif
+
/* Helpers */
static void sfp_module_tx_disable(struct sfp *sfp)
{
@@ -636,6 +1420,10 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
dev_warn(sfp->dev,
"module address swap to access page 0xA2 is not supported.\n");
+ ret = sfp_hwmon_insert(sfp);
+ if (ret < 0)
+ return ret;
+
ret = sfp_module_insert(sfp->sfp_bus, &sfp->id);
if (ret < 0)
return ret;
@@ -647,6 +1435,8 @@ static void sfp_sm_mod_remove(struct sfp *sfp)
{
sfp_module_remove(sfp->sfp_bus);
+ sfp_hwmon_remove(sfp);
+
if (sfp->mod_phy)
sfp_sm_phy_detach(sfp);
@@ -661,8 +1451,11 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
{
mutex_lock(&sfp->sm_mutex);
- dev_dbg(sfp->dev, "SM: enter %u:%u:%u event %u\n",
- sfp->sm_mod_state, sfp->sm_dev_state, sfp->sm_state, event);
+ dev_dbg(sfp->dev, "SM: enter %s:%s:%s event %s\n",
+ mod_state_to_str(sfp->sm_mod_state),
+ dev_state_to_str(sfp->sm_dev_state),
+ sm_state_to_str(sfp->sm_state),
+ event_to_str(event));
/* This state machine tracks the insert/remove state of
* the module, and handles probing the on-board EEPROM.
@@ -793,8 +1586,10 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
break;
}
- dev_dbg(sfp->dev, "SM: exit %u:%u:%u\n",
- sfp->sm_mod_state, sfp->sm_dev_state, sfp->sm_state);
+ dev_dbg(sfp->dev, "SM: exit %s:%s:%s\n",
+ mod_state_to_str(sfp->sm_mod_state),
+ dev_state_to_str(sfp->sm_dev_state),
+ sm_state_to_str(sfp->sm_state));
mutex_unlock(&sfp->sm_mutex);
}
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index d9dd8fbfffc7..fbf9ad429593 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -72,6 +72,10 @@
#define PHY_ID_VSC8572 0x000704d0
#define PHY_ID_VSC8574 0x000704a0
#define PHY_ID_VSC8601 0x00070420
+#define PHY_ID_VSC7385 0x00070450
+#define PHY_ID_VSC7388 0x00070480
+#define PHY_ID_VSC7395 0x00070550
+#define PHY_ID_VSC7398 0x00070580
#define PHY_ID_VSC8662 0x00070660
#define PHY_ID_VSC8221 0x000fc550
#define PHY_ID_VSC8211 0x000fc4b0
@@ -116,6 +120,137 @@ static int vsc824x_config_init(struct phy_device *phydev)
return err;
}
+#define VSC73XX_EXT_PAGE_ACCESS 0x1f
+
+static int vsc73xx_read_page(struct phy_device *phydev)
+{
+ return __phy_read(phydev, VSC73XX_EXT_PAGE_ACCESS);
+}
+
+static int vsc73xx_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, VSC73XX_EXT_PAGE_ACCESS, page);
+}
+
+static void vsc73xx_config_init(struct phy_device *phydev)
+{
+ /* Receiver init */
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x0c, 0x0300, 0x0200);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ /* Config LEDs 0x61 */
+ phy_modify(phydev, MII_TPISTATUS, 0xff00, 0x0061);
+}
+
+static int vsc738x_config_init(struct phy_device *phydev)
+{
+ u16 rev;
+ /* This magic sequence appear in the application note
+ * "VSC7385/7388 PHY Configuration".
+ *
+ * Maybe one day we will get to know what it all means.
+ */
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x08, 0x0200, 0x0200);
+ phy_write(phydev, 0x1f, 0x52b5);
+ phy_write(phydev, 0x10, 0xb68a);
+ phy_modify(phydev, 0x12, 0xff07, 0x0003);
+ phy_modify(phydev, 0x11, 0x00ff, 0x00a2);
+ phy_write(phydev, 0x10, 0x968a);
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x08, 0x0200, 0x0000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ /* Read revision */
+ rev = phy_read(phydev, MII_PHYSID2);
+ rev &= 0x0f;
+
+ /* Special quirk for revision 0 */
+ if (rev == 0) {
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x08, 0x0200, 0x0200);
+ phy_write(phydev, 0x1f, 0x52b5);
+ phy_write(phydev, 0x12, 0x0000);
+ phy_write(phydev, 0x11, 0x0689);
+ phy_write(phydev, 0x10, 0x8f92);
+ phy_write(phydev, 0x1f, 0x52b5);
+ phy_write(phydev, 0x12, 0x0000);
+ phy_write(phydev, 0x11, 0x0e35);
+ phy_write(phydev, 0x10, 0x9786);
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x08, 0x0200, 0x0000);
+ phy_write(phydev, 0x17, 0xff80);
+ phy_write(phydev, 0x17, 0x0000);
+ }
+
+ phy_write(phydev, 0x1f, 0x0000);
+ phy_write(phydev, 0x12, 0x0048);
+
+ if (rev == 0) {
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_write(phydev, 0x14, 0x6600);
+ phy_write(phydev, 0x1f, 0x0000);
+ phy_write(phydev, 0x18, 0xa24e);
+ } else {
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x16, 0x0fc0, 0x0240);
+ phy_modify(phydev, 0x14, 0x6000, 0x4000);
+ /* bits 14-15 in extended register 0x14 controls DACG amplitude
+ * 6 = -8%, 2 is hardware default
+ */
+ phy_write(phydev, 0x1f, 0x0001);
+ phy_modify(phydev, 0x14, 0xe000, 0x6000);
+ phy_write(phydev, 0x1f, 0x0000);
+ }
+
+ vsc73xx_config_init(phydev);
+
+ return genphy_config_init(phydev);
+}
+
+static int vsc739x_config_init(struct phy_device *phydev)
+{
+ /* This magic sequence appears in the VSC7395 SparX-G5e application
+ * note "VSC7395/VSC7398 PHY Configuration"
+ *
+ * Maybe one day we will get to know what it all means.
+ */
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x08, 0x0200, 0x0200);
+ phy_write(phydev, 0x1f, 0x52b5);
+ phy_write(phydev, 0x10, 0xb68a);
+ phy_modify(phydev, 0x12, 0xff07, 0x0003);
+ phy_modify(phydev, 0x11, 0x00ff, 0x00a2);
+ phy_write(phydev, 0x10, 0x968a);
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x08, 0x0200, 0x0000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ phy_write(phydev, 0x1f, 0x0000);
+ phy_write(phydev, 0x12, 0x0048);
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x16, 0x0fc0, 0x0240);
+ phy_modify(phydev, 0x14, 0x6000, 0x4000);
+ phy_write(phydev, 0x1f, 0x0001);
+ phy_modify(phydev, 0x14, 0xe000, 0x6000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ vsc73xx_config_init(phydev);
+
+ return genphy_config_init(phydev);
+}
+
+static int vsc73xx_config_aneg(struct phy_device *phydev)
+{
+ /* The VSC73xx switches does not like to be instructed to
+ * do autonegotiation in any way, it prefers that you just go
+ * with the power-on/reset defaults. Writing some registers will
+ * just make autonegotiation permanently fail.
+ */
+ return 0;
+}
+
/* This adds a skew for both TX and RX clocks, so the skew should only be
* applied to "rgmii-id" interfaces. It may not work as expected
* on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */
@@ -319,6 +454,42 @@ static struct phy_driver vsc82xx_driver[] = {
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
+ .phy_id = PHY_ID_VSC7385,
+ .name = "Vitesse VSC7385",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .config_init = vsc738x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+}, {
+ .phy_id = PHY_ID_VSC7388,
+ .name = "Vitesse VSC7388",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .config_init = vsc738x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+}, {
+ .phy_id = PHY_ID_VSC7395,
+ .name = "Vitesse VSC7395",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .config_init = vsc739x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+}, {
+ .phy_id = PHY_ID_VSC7398,
+ .name = "Vitesse VSC7398",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .config_init = vsc739x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+}, {
.phy_id = PHY_ID_VSC8662,
.name = "Vitesse VSC8662",
.phy_id_mask = 0x000ffff0,
@@ -358,6 +529,10 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8514, 0x000ffff0 },
{ PHY_ID_VSC8572, 0x000ffff0 },
{ PHY_ID_VSC8574, 0x000ffff0 },
+ { PHY_ID_VSC7385, 0x000ffff0 },
+ { PHY_ID_VSC7388, 0x000ffff0 },
+ { PHY_ID_VSC7395, 0x000ffff0 },
+ { PHY_ID_VSC7398, 0x000ffff0 },
{ PHY_ID_VSC8662, 0x000ffff0 },
{ PHY_ID_VSC8221, 0x000ffff0 },
{ PHY_ID_VSC8211, 0x000ffff0 },
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 2e5150b0b8d5..74a8782313cf 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -33,17 +33,22 @@ struct gmii2rgmii {
struct phy_device *phy_dev;
struct phy_driver *phy_drv;
struct phy_driver conv_phy_drv;
- int addr;
+ struct mdio_device *mdio;
};
static int xgmiitorgmii_read_status(struct phy_device *phydev)
{
struct gmii2rgmii *priv = phydev->priv;
+ struct mii_bus *bus = priv->mdio->bus;
+ int addr = priv->mdio->addr;
u16 val = 0;
+ int err;
- priv->phy_drv->read_status(phydev);
+ err = priv->phy_drv->read_status(phydev);
+ if (err < 0)
+ return err;
- val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
+ val = mdiobus_read(bus, addr, XILINX_GMII2RGMII_REG);
val &= ~XILINX_GMII2RGMII_SPEED_MASK;
if (phydev->speed == SPEED_1000)
@@ -53,7 +58,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
else
val |= BMCR_SPEED10;
- mdiobus_write(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG, val);
+ mdiobus_write(bus, addr, XILINX_GMII2RGMII_REG, val);
return 0;
}
@@ -81,7 +86,12 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev)
return -EPROBE_DEFER;
}
- priv->addr = mdiodev->addr;
+ if (!priv->phy_dev->drv) {
+ dev_info(dev, "Attached phy not ready\n");
+ return -EPROBE_DEFER;
+ }
+
+ priv->mdio = mdiodev;
priv->phy_drv = priv->phy_dev->drv;
memcpy(&priv->conv_phy_drv, priv->phy_dev->drv,
sizeof(struct phy_driver));
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 6c7fd98cb00a..a205750b431b 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -96,7 +96,7 @@ static inline void sha_pad_init(struct sha_pad *shapad)
*/
struct ppp_mppe_state {
struct crypto_skcipher *arc4;
- struct crypto_ahash *sha1;
+ struct shash_desc *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
unsigned char session_key[MPPE_MAX_KEY_LEN];
@@ -136,25 +136,16 @@ struct ppp_mppe_state {
*/
static void get_new_key_from_sha(struct ppp_mppe_state * state)
{
- AHASH_REQUEST_ON_STACK(req, state->sha1);
- struct scatterlist sg[4];
- unsigned int nbytes;
-
- sg_init_table(sg, 4);
-
- nbytes = setup_sg(&sg[0], state->master_key, state->keylen);
- nbytes += setup_sg(&sg[1], sha_pad->sha_pad1,
- sizeof(sha_pad->sha_pad1));
- nbytes += setup_sg(&sg[2], state->session_key, state->keylen);
- nbytes += setup_sg(&sg[3], sha_pad->sha_pad2,
- sizeof(sha_pad->sha_pad2));
-
- ahash_request_set_tfm(req, state->sha1);
- ahash_request_set_callback(req, 0, NULL, NULL);
- ahash_request_set_crypt(req, sg, state->sha1_digest, nbytes);
-
- crypto_ahash_digest(req);
- ahash_request_zero(req);
+ crypto_shash_init(state->sha1);
+ crypto_shash_update(state->sha1, state->master_key,
+ state->keylen);
+ crypto_shash_update(state->sha1, sha_pad->sha_pad1,
+ sizeof(sha_pad->sha_pad1));
+ crypto_shash_update(state->sha1, state->session_key,
+ state->keylen);
+ crypto_shash_update(state->sha1, sha_pad->sha_pad2,
+ sizeof(sha_pad->sha_pad2));
+ crypto_shash_final(state->sha1, state->sha1_digest);
}
/*
@@ -200,6 +191,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
static void *mppe_alloc(unsigned char *options, int optlen)
{
struct ppp_mppe_state *state;
+ struct crypto_shash *shash;
unsigned int digestsize;
if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
@@ -217,13 +209,21 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out_free;
}
- state->sha1 = crypto_alloc_ahash("sha1", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(state->sha1)) {
- state->sha1 = NULL;
+ shash = crypto_alloc_shash("sha1", 0, 0);
+ if (IS_ERR(shash))
+ goto out_free;
+
+ state->sha1 = kmalloc(sizeof(*state->sha1) +
+ crypto_shash_descsize(shash),
+ GFP_KERNEL);
+ if (!state->sha1) {
+ crypto_free_shash(shash);
goto out_free;
}
+ state->sha1->tfm = shash;
+ state->sha1->flags = 0;
- digestsize = crypto_ahash_digestsize(state->sha1);
+ digestsize = crypto_shash_digestsize(shash);
if (digestsize < MPPE_MAX_KEY_LEN)
goto out_free;
@@ -246,7 +246,10 @@ static void *mppe_alloc(unsigned char *options, int optlen)
out_free:
kfree(state->sha1_digest);
- crypto_free_ahash(state->sha1);
+ if (state->sha1) {
+ crypto_free_shash(state->sha1->tfm);
+ kzfree(state->sha1);
+ }
crypto_free_skcipher(state->arc4);
kfree(state);
out:
@@ -261,7 +264,8 @@ static void mppe_free(void *arg)
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
if (state) {
kfree(state->sha1_digest);
- crypto_free_ahash(state->sha1);
+ crypto_free_shash(state->sha1->tfm);
+ kzfree(state->sha1);
crypto_free_skcipher(state->arc4);
kfree(state);
}
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index de51e8f70f44..ce61231e96ea 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1107,7 +1107,7 @@ static const struct proto_ops pppoe_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = pppoe_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b070959737ff..6a047d30e8c6 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -41,11 +41,6 @@
#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
-static struct team_port *team_port_get_rcu(const struct net_device *dev)
-{
- return rcu_dereference(dev->rx_handler_data);
-}
-
static struct team_port *team_port_get_rtnl(const struct net_device *dev)
{
struct team_port *port = rtnl_dereference(dev->rx_handler_data);
@@ -1707,7 +1702,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
}
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a192a017cc68..ebd07ad82431 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -200,6 +200,7 @@ struct tun_flow_entry {
};
#define TUN_NUM_FLOW_ENTRIES 1024
+#define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
struct tun_prog {
struct rcu_head rcu;
@@ -406,7 +407,7 @@ static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
static inline u32 tun_hashfn(u32 rxhash)
{
- return rxhash & 0x3ff;
+ return rxhash & TUN_MASK_FLOW_ENTRIES;
}
static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
@@ -607,7 +608,8 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
}
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct tun_struct *tun = netdev_priv(dev);
u16 ret;
@@ -1268,7 +1270,6 @@ static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return tun_xdp_set(dev, xdp->prog, xdp->extack);
case XDP_QUERY_PROG:
xdp->prog_id = tun_xdp_query(dev);
- xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
@@ -1688,7 +1689,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
case XDP_TX:
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
- if (tun_xdp_tx(tun->dev, &xdp))
+ if (tun_xdp_tx(tun->dev, &xdp) < 0)
goto err_redirect;
rcu_read_unlock();
local_bh_enable();
@@ -3216,7 +3217,7 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
goto out;
if (on) {
- __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
+ __f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
tfile->flags |= TUN_FASYNC;
} else
tfile->flags &= ~TUN_FASYNC;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 3d4f7959dabb..b654f05b2ccd 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev)
priv->presvd_phy_advertise);
/* Restore BMCR */
+ if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
+ priv->presvd_phy_bmcr |= BMCR_ANRESTART;
+
asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
priv->presvd_phy_bmcr);
- mii_nway_restart(&dev->mii);
priv->presvd_phy_advertise = 0;
priv->presvd_phy_bmcr = 0;
}
@@ -691,24 +693,32 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
u32 phyid;
struct asix_common_private *priv;
- usbnet_get_endpoints(dev,intf);
+ usbnet_get_endpoints(dev, intf);
- /* Get the MAC address */
- if (dev->driver_info->data & FLAG_EEPROM_MAC) {
- for (i = 0; i < (ETH_ALEN >> 1); i++) {
- ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i,
- 0, 2, buf + i * 2, 0);
- if (ret < 0)
- break;
- }
+ /* Maybe the boot loader passed the MAC address via device tree */
+ if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) {
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from device tree");
} else {
- ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
- 0, 0, ETH_ALEN, buf, 0);
- }
+ /* Try getting the MAC address from EEPROM */
+ if (dev->driver_info->data & FLAG_EEPROM_MAC) {
+ for (i = 0; i < (ETH_ALEN >> 1); i++) {
+ ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM,
+ 0x04 + i, 0, 2, buf + i * 2,
+ 0);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
+ 0, 0, ETH_ALEN, buf, 0);
+ }
- if (ret < 0) {
- netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
- return ret;
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read MAC address: %d\n",
+ ret);
+ return ret;
+ }
}
asix_set_netdev_dev_addr(dev, buf);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 18d36dff97ea..424053bd8b21 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -869,6 +869,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
default:
dev_warn(&intf->dev,
"Couldn't detect memory size, assuming 32k\n");
+ /* fall through */
case 0x87654321:
catc_set_reg(catc, TxBufCount, 4);
catc_set_reg(catc, RxBufCount, 16);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 288ecd999171..78b16eb9e58c 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -99,6 +99,7 @@ static void tx_complete(struct urb *req)
struct net_device *dev = skb->dev;
struct usbpn_dev *pnd = netdev_priv(dev);
int status = req->status;
+ unsigned long flags;
switch (status) {
case 0:
@@ -109,16 +110,17 @@ static void tx_complete(struct urb *req)
case -ECONNRESET:
case -ESHUTDOWN:
dev->stats.tx_aborted_errors++;
+ /* fall through */
default:
dev->stats.tx_errors++;
dev_dbg(&dev->dev, "TX error (%d)\n", status);
}
dev->stats.tx_packets++;
- spin_lock(&pnd->tx_lock);
+ spin_lock_irqsave(&pnd->tx_lock, flags);
pnd->tx_queue--;
netif_wake_queue(dev);
- spin_unlock(&pnd->tx_lock);
+ spin_unlock_irqrestore(&pnd->tx_lock, flags);
dev_kfree_skb_any(skb);
usb_free_urb(req);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index e53883ad6107..184c24baca15 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -999,6 +999,7 @@ static void read_bulk_callback(struct urb *urb)
struct hso_net *odev = urb->context;
struct net_device *net;
int result;
+ unsigned long flags;
int status = urb->status;
/* is al ok? (Filip: Who's Al ?) */
@@ -1028,11 +1029,11 @@ static void read_bulk_callback(struct urb *urb)
if (urb->actual_length) {
/* Handle the IP stream, add header and push it onto network
* stack if the packet is complete. */
- spin_lock(&odev->net_lock);
+ spin_lock_irqsave(&odev->net_lock, flags);
packetizeRx(odev, urb->transfer_buffer, urb->actual_length,
(urb->transfer_buffer_length >
urb->actual_length) ? 1 : 0);
- spin_unlock(&odev->net_lock);
+ spin_unlock_irqrestore(&odev->net_lock, flags);
}
/* We are done with this URB, resubmit it. Prep the USB to wait for
@@ -1193,6 +1194,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
{
struct hso_serial *serial = urb->context;
int status = urb->status;
+ unsigned long flags;
hso_dbg(0x8, "--- Got serial_read_bulk callback %02x ---\n", status);
@@ -1216,10 +1218,10 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
/* Valid data, handle RX data */
- spin_lock(&serial->serial_lock);
+ spin_lock_irqsave(&serial->serial_lock, flags);
serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
put_rxbuf_data_and_resubmit_bulk_urb(serial);
- spin_unlock(&serial->serial_lock);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
}
/*
@@ -1502,12 +1504,13 @@ static void tiocmget_intr_callback(struct urb *urb)
DUMP(serial_state_notification,
sizeof(struct hso_serial_state_notification));
} else {
+ unsigned long flags;
UART_state_bitmap = le16_to_cpu(serial_state_notification->
UART_state_bitmap);
prev_UART_state_bitmap = tiocmget->prev_UART_state_bitmap;
icount = &tiocmget->icount;
- spin_lock(&serial->serial_lock);
+ spin_lock_irqsave(&serial->serial_lock, flags);
if ((UART_state_bitmap & B_OVERRUN) !=
(prev_UART_state_bitmap & B_OVERRUN))
icount->parity++;
@@ -1530,7 +1533,7 @@ static void tiocmget_intr_callback(struct urb *urb)
(prev_UART_state_bitmap & B_RX_CARRIER))
icount->dcd++;
tiocmget->prev_UART_state_bitmap = UART_state_bitmap;
- spin_unlock(&serial->serial_lock);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
tiocmget->intr_completed = 1;
wake_up_interruptible(&tiocmget->waitq);
}
@@ -1729,7 +1732,6 @@ static int hso_serial_ioctl(struct tty_struct *tty,
/* starts a transmit */
static void hso_kick_transmit(struct hso_serial *serial)
{
- u8 *temp;
unsigned long flags;
int res;
@@ -1745,14 +1747,12 @@ static void hso_kick_transmit(struct hso_serial *serial)
goto out;
/* Switch pointers around to avoid memcpy */
- temp = serial->tx_buffer;
- serial->tx_buffer = serial->tx_data;
- serial->tx_data = temp;
+ swap(serial->tx_buffer, serial->tx_data);
serial->tx_data_count = serial->tx_buffer_count;
serial->tx_buffer_count = 0;
- /* If temp is set, it means we switched buffers */
- if (temp && serial->write_data) {
+ /* If serial->tx_data is set, it means we switched buffers */
+ if (serial->tx_data && serial->write_data) {
res = serial->write_data(serial);
if (res >= 0)
serial->tx_urb_used = 1;
@@ -1852,6 +1852,7 @@ static void intr_callback(struct urb *urb)
struct hso_serial *serial;
unsigned char *port_req;
int status = urb->status;
+ unsigned long flags;
int i;
usb_mark_last_busy(urb->dev);
@@ -1879,7 +1880,7 @@ static void intr_callback(struct urb *urb)
if (serial != NULL) {
hso_dbg(0x1, "Pending read interrupt on port %d\n",
i);
- spin_lock(&serial->serial_lock);
+ spin_lock_irqsave(&serial->serial_lock, flags);
if (serial->rx_state == RX_IDLE &&
serial->port.count > 0) {
/* Setup and send a ctrl req read on
@@ -1893,7 +1894,8 @@ static void intr_callback(struct urb *urb)
hso_dbg(0x1, "Already a read pending on port %d or port not open\n",
i);
}
- spin_unlock(&serial->serial_lock);
+ spin_unlock_irqrestore(&serial->serial_lock,
+ flags);
}
}
}
@@ -1920,6 +1922,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
{
struct hso_serial *serial = urb->context;
int status = urb->status;
+ unsigned long flags;
/* sanity check */
if (!serial) {
@@ -1927,9 +1930,9 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
return;
}
- spin_lock(&serial->serial_lock);
+ spin_lock_irqsave(&serial->serial_lock, flags);
serial->tx_urb_used = 0;
- spin_unlock(&serial->serial_lock);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
if (status) {
handle_usb_error(status, __func__, serial->parent);
return;
@@ -1971,14 +1974,15 @@ static void ctrl_callback(struct urb *urb)
struct hso_serial *serial = urb->context;
struct usb_ctrlrequest *req;
int status = urb->status;
+ unsigned long flags;
/* sanity check */
if (!serial)
return;
- spin_lock(&serial->serial_lock);
+ spin_lock_irqsave(&serial->serial_lock, flags);
serial->tx_urb_used = 0;
- spin_unlock(&serial->serial_lock);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
if (status) {
handle_usb_error(status, __func__, serial->parent);
return;
@@ -1994,9 +1998,9 @@ static void ctrl_callback(struct urb *urb)
(USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) {
/* response to a read command */
serial->rx_urb_filled[0] = 1;
- spin_lock(&serial->serial_lock);
+ spin_lock_irqsave(&serial->serial_lock, flags);
put_rxbuf_data_and_resubmit_ctrl_urb(serial);
- spin_unlock(&serial->serial_lock);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
} else {
hso_put_activity(serial->parent);
tty_port_tty_wakeup(&serial->port);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index f1605833c5cf..913e50bab0a2 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -587,7 +587,7 @@ static void kaweth_usb_receive(struct urb *urb)
struct kaweth_device *kaweth = urb->context;
struct net_device *net = kaweth->net;
int status = urb->status;
-
+ unsigned long flags;
int count = urb->actual_length;
int count2 = urb->transfer_buffer_length;
@@ -619,12 +619,12 @@ static void kaweth_usb_receive(struct urb *urb)
net->stats.rx_errors++;
dev_dbg(dev, "Status was -EOVERFLOW.\n");
}
- spin_lock(&kaweth->device_lock);
+ spin_lock_irqsave(&kaweth->device_lock, flags);
if (IS_BLOCKED(kaweth->status)) {
- spin_unlock(&kaweth->device_lock);
+ spin_unlock_irqrestore(&kaweth->device_lock, flags);
return;
}
- spin_unlock(&kaweth->device_lock);
+ spin_unlock_irqrestore(&kaweth->device_lock, flags);
if(status && status != -EREMOTEIO && count != 1) {
dev_err(&kaweth->intf->dev,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 8dff87ec6d99..a9991c5f4736 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -64,6 +64,7 @@
#define DEFAULT_RX_CSUM_ENABLE (true)
#define DEFAULT_TSO_CSUM_ENABLE (true)
#define DEFAULT_VLAN_FILTER_ENABLE (true)
+#define DEFAULT_VLAN_RX_OFFLOAD (true)
#define TX_OVERHEAD (8)
#define RXW_PADDING 2
@@ -1241,6 +1242,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
mod_timer(&dev->stat_monitor,
jiffies + STAT_UPDATE_TIMER);
}
+
+ tasklet_schedule(&dev->bh);
}
return ret;
@@ -1646,7 +1649,7 @@ lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
struct lan78xx_net *dev = netdev_priv(netdev);
/* Read Device/MAC registers */
- for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++)
+ for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
if (!netdev->phydev)
@@ -1720,7 +1723,7 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
"MAC address read from EEPROM");
} else {
/* generate random MAC */
- random_ether_addr(addr);
+ eth_random_addr(addr);
netif_dbg(dev, ifup, dev->net,
"MAC address set to random addr");
}
@@ -2298,7 +2301,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
- ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+ ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
netdev->mtu = new_mtu;
@@ -2364,6 +2367,11 @@ static int lan78xx_set_features(struct net_device *netdev,
}
if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
+ else
+ pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
else
pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
@@ -2587,7 +2595,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
buf |= FCT_TX_CTL_EN_;
ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
- ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+ ret = lan78xx_set_rx_max_frame_length(dev,
+ dev->net->mtu + VLAN_ETH_HLEN);
ret = lan78xx_read_reg(dev, MAC_RX, &buf);
buf |= MAC_RX_RXEN_;
@@ -2975,6 +2984,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
if (DEFAULT_TSO_CSUM_ENABLE)
dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
+ if (DEFAULT_VLAN_RX_OFFLOAD)
+ dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (DEFAULT_VLAN_FILTER_ENABLE)
+ dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
dev->net->hw_features = dev->net->features;
ret = lan78xx_setup_irq_domain(dev);
@@ -3039,8 +3054,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
struct sk_buff *skb,
u32 rx_cmd_a, u32 rx_cmd_b)
{
+ /* HW Checksum offload appears to be flawed if used when not stripping
+ * VLAN headers. Drop back to S/W checksums under these conditions.
+ */
if (!(dev->net->features & NETIF_F_RXCSUM) ||
- unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+ unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
+ ((rx_cmd_a & RX_CMD_A_FVTG_) &&
+ !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
skb->ip_summed = CHECKSUM_NONE;
} else {
skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
@@ -3048,6 +3068,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
}
}
+static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
+ struct sk_buff *skb,
+ u32 rx_cmd_a, u32 rx_cmd_b)
+{
+ if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (rx_cmd_a & RX_CMD_A_FVTG_))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ (rx_cmd_b & 0xffff));
+}
+
static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
{
int status;
@@ -3112,6 +3142,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
if (skb->len == size) {
lan78xx_rx_csum_offload(dev, skb,
rx_cmd_a, rx_cmd_b);
+ lan78xx_rx_vlan_offload(dev, skb,
+ rx_cmd_a, rx_cmd_b);
skb_trim(skb, skb->len - 4); /* remove fcs */
skb->truesize = size + sizeof(struct sk_buff);
@@ -3130,6 +3162,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
skb_set_tail_pointer(skb2, size);
lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+ lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
skb_trim(skb2, skb2->len - 4); /* remove fcs */
skb2->truesize = size + sizeof(struct sk_buff);
@@ -3313,6 +3346,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
pkt_cnt = 0;
count = 0;
length = 0;
+ spin_lock_irqsave(&tqp->lock, flags);
for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
if (skb_is_gso(skb)) {
if (pkt_cnt) {
@@ -3321,7 +3355,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
}
count = 1;
length = skb->len - TX_OVERHEAD;
- skb2 = skb_dequeue(tqp);
+ __skb_unlink(skb, tqp);
+ spin_unlock_irqrestore(&tqp->lock, flags);
goto gso_skb;
}
@@ -3330,6 +3365,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
pkt_cnt++;
}
+ spin_unlock_irqrestore(&tqp->lock, flags);
/* copy to a single skb */
skb = alloc_skb(skb_totallen, GFP_ATOMIC);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6514c86f043e..f4247b275e09 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1067,7 +1067,7 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg1d, 0);
set_register(pegasus, Reg7b, 1);
- mdelay(100);
+ msleep(100);
if ((pegasus->features & HAS_HOME_PNA) && mii_mode)
set_register(pegasus, Reg7b, 0);
else
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8e8b51f171f4..cb0cc30c3d6a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1246,12 +1246,14 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 86f7196f9d91..97742708460b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1252,6 +1252,7 @@ static void read_bulk_callback(struct urb *urb)
int status = urb->status;
struct rx_agg *agg;
struct r8152 *tp;
+ unsigned long flags;
agg = urb->context;
if (!agg)
@@ -1281,9 +1282,9 @@ static void read_bulk_callback(struct urb *urb)
if (urb->actual_length < ETH_ZLEN)
break;
- spin_lock(&tp->rx_lock);
+ spin_lock_irqsave(&tp->rx_lock, flags);
list_add_tail(&agg->list, &tp->rx_done);
- spin_unlock(&tp->rx_lock);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
napi_schedule(&tp->napi);
return;
case -ESHUTDOWN:
@@ -1311,6 +1312,7 @@ static void write_bulk_callback(struct urb *urb)
struct net_device *netdev;
struct tx_agg *agg;
struct r8152 *tp;
+ unsigned long flags;
int status = urb->status;
agg = urb->context;
@@ -1332,9 +1334,9 @@ static void write_bulk_callback(struct urb *urb)
stats->tx_bytes += agg->skb_len;
}
- spin_lock(&tp->tx_lock);
+ spin_lock_irqsave(&tp->tx_lock, flags);
list_add_tail(&agg->list, &tp->tx_free);
- spin_unlock(&tp->tx_lock);
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
usb_autopm_put_interface_async(tp->intf);
@@ -1374,6 +1376,7 @@ static void intr_callback(struct urb *urb)
case -ECONNRESET: /* unlink */
case -ESHUTDOWN:
netif_device_detach(tp->netdev);
+ /* fall through */
case -ENOENT:
case -EPROTO:
netif_info(tp, intr, tp->netdev,
@@ -2739,6 +2742,7 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
r8152_mdio_write(tp, MII_BMCR, data);
data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
+ /* fall through */
default:
if (data != PHY_STAT_LAN_ON)
@@ -3962,7 +3966,8 @@ static int rtl8152_close(struct net_device *netdev)
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&tp->pm_notifier);
#endif
- napi_disable(&tp->napi);
+ if (!test_bit(RTL8152_UNPLUG, &tp->flags))
+ napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
@@ -4410,7 +4415,6 @@ out1:
static int rtl8152_system_suspend(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
- int ret = 0;
netif_device_detach(netdev);
@@ -4425,7 +4429,7 @@ static int rtl8152_system_suspend(struct r8152 *tp)
napi_enable(napi);
}
- return ret;
+ return 0;
}
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 5f565bd574da..80373a9171dd 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -391,6 +391,7 @@ static void read_bulk_callback(struct urb *urb)
u16 rx_stat;
int status = urb->status;
int result;
+ unsigned long flags;
dev = urb->context;
if (!dev)
@@ -432,9 +433,9 @@ static void read_bulk_callback(struct urb *urb)
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += pkt_len;
- spin_lock(&dev->rx_pool_lock);
+ spin_lock_irqsave(&dev->rx_pool_lock, flags);
skb = pull_skb(dev);
- spin_unlock(&dev->rx_pool_lock);
+ spin_unlock_irqrestore(&dev->rx_pool_lock, flags);
if (!skb)
goto resched;
@@ -681,7 +682,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
(netdev->flags & IFF_ALLMULTI)) {
rx_creg &= 0xfffe;
rx_creg |= 0x0002;
- dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
+ dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name);
} else {
/* ~RX_MULTICAST, ~RX_PROMISCUOUS */
rx_creg &= 0x00fc;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 7a6a1fe79309..05553d252446 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -82,6 +82,9 @@ static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
+static int smsc75xx_link_ok_nopm(struct usbnet *dev);
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev);
+
static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
u32 *data, int in_pm)
{
@@ -852,6 +855,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
return -EIO;
}
+ /* phy workaround for gig link */
+ smsc75xx_phy_gig_workaround(dev);
+
smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
@@ -987,6 +993,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
return -EIO;
}
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev)
+{
+ struct mii_if_info *mii = &dev->mii;
+ int ret = 0, timeout = 0;
+ u32 buf, link_up = 0;
+
+ /* Set the phy in Gig loopback */
+ smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040);
+
+ /* Wait for the link up */
+ do {
+ link_up = smsc75xx_link_ok_nopm(dev);
+ usleep_range(10000, 20000);
+ timeout++;
+ } while ((!link_up) && (timeout < 1000));
+
+ if (timeout >= 1000) {
+ netdev_warn(dev->net, "Timeout waiting for PHY link up\n");
+ return -EIO;
+ }
+
+ /* phy reset */
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
+ return ret;
+ }
+
+ buf |= PMT_CTL_PHY_RST;
+
+ ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
+ return ret;
+ }
+
+ timeout = 0;
+ do {
+ usleep_range(10000, 20000);
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n",
+ ret);
+ return ret;
+ }
+ timeout++;
+ } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
+
+ if (timeout >= 100) {
+ netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int smsc75xx_reset(struct usbnet *dev)
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 2d316c1b851b..6ac232e52bf7 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -358,7 +358,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
/* power up and reset phy */
sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
/* at least 10ms, here 20ms for safe */
- mdelay(20);
+ msleep(20);
sr_write_reg(dev, SR_PRR, 0);
/* at least 1ms, here 2ms for reading right register */
udelay(2 * 1000);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index a69ad39ee57e..8d679c8b7f25 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -17,22 +17,47 @@
#include <net/rtnetlink.h>
#include <net/dst.h>
#include <net/xfrm.h>
+#include <net/xdp.h>
#include <linux/veth.h>
#include <linux/module.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/ptr_ring.h>
+#include <linux/bpf_trace.h>
#define DRV_NAME "veth"
#define DRV_VERSION "1.0"
+#define VETH_XDP_FLAG BIT(0)
+#define VETH_RING_SIZE 256
+#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
+
+/* Separating two types of XDP xmit */
+#define VETH_XDP_TX BIT(0)
+#define VETH_XDP_REDIR BIT(1)
+
struct pcpu_vstats {
u64 packets;
u64 bytes;
struct u64_stats_sync syncp;
};
+struct veth_rq {
+ struct napi_struct xdp_napi;
+ struct net_device *dev;
+ struct bpf_prog __rcu *xdp_prog;
+ struct xdp_mem_info xdp_mem;
+ bool rx_notify_masked;
+ struct ptr_ring xdp_ring;
+ struct xdp_rxq_info xdp_rxq;
+};
+
struct veth_priv {
struct net_device __rcu *peer;
atomic64_t dropped;
- unsigned requested_headroom;
+ struct bpf_prog *_xdp_prog;
+ struct veth_rq *rq;
+ unsigned int requested_headroom;
};
/*
@@ -98,11 +123,67 @@ static const struct ethtool_ops veth_ethtool_ops = {
.get_link_ksettings = veth_get_link_ksettings,
};
+/* general routines */
+
+static bool veth_is_xdp_frame(void *ptr)
+{
+ return (unsigned long)ptr & VETH_XDP_FLAG;
+}
+
+static void *veth_ptr_to_xdp(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
+}
+
+static void *veth_xdp_to_ptr(void *ptr)
+{
+ return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
+}
+
+static void veth_ptr_free(void *ptr)
+{
+ if (veth_is_xdp_frame(ptr))
+ xdp_return_frame(veth_ptr_to_xdp(ptr));
+ else
+ kfree_skb(ptr);
+}
+
+static void __veth_xdp_flush(struct veth_rq *rq)
+{
+ /* Write ptr_ring before reading rx_notify_masked */
+ smp_mb();
+ if (!rq->rx_notify_masked) {
+ rq->rx_notify_masked = true;
+ napi_schedule(&rq->xdp_napi);
+ }
+}
+
+static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
+{
+ if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+
+ return NET_RX_SUCCESS;
+}
+
+static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
+ struct veth_rq *rq, bool xdp)
+{
+ return __dev_forward_skb(dev, skb) ?: xdp ?
+ veth_xdp_rx(rq, skb) :
+ netif_rx(skb);
+}
+
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct veth_priv *priv = netdev_priv(dev);
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct veth_rq *rq = NULL;
struct net_device *rcv;
int length = skb->len;
+ bool rcv_xdp = false;
+ int rxq;
rcu_read_lock();
rcv = rcu_dereference(priv->peer);
@@ -111,7 +192,16 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
- if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
+ rcv_priv = netdev_priv(rcv);
+ rxq = skb_get_queue_mapping(skb);
+ if (rxq < rcv->real_num_rx_queues) {
+ rq = &rcv_priv->rq[rxq];
+ rcv_xdp = rcu_access_pointer(rq->xdp_prog);
+ if (rcv_xdp)
+ skb_record_rx_queue(skb, rxq);
+ }
+
+ if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
u64_stats_update_begin(&stats->syncp);
@@ -122,14 +212,15 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
drop:
atomic64_inc(&priv->dropped);
}
+
+ if (rcv_xdp)
+ __veth_xdp_flush(rq);
+
rcu_read_unlock();
+
return NETDEV_TX_OK;
}
-/*
- * general routines
- */
-
static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
@@ -179,18 +270,502 @@ static void veth_set_multicast_list(struct net_device *dev)
{
}
+static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
+ int buflen)
+{
+ struct sk_buff *skb;
+
+ if (!buflen) {
+ buflen = SKB_DATA_ALIGN(headroom + len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+ skb = build_skb(head, buflen);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, len);
+
+ return skb;
+}
+
+static int veth_select_rxq(struct net_device *dev)
+{
+ return smp_processor_id() % dev->real_num_rx_queues;
+}
+
+static int veth_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct net_device *rcv;
+ unsigned int max_len;
+ struct veth_rq *rq;
+ int i, drops = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ rcv = rcu_dereference(priv->peer);
+ if (unlikely(!rcv))
+ return -ENXIO;
+
+ rcv_priv = netdev_priv(rcv);
+ rq = &rcv_priv->rq[veth_select_rxq(rcv)];
+ /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
+ * side. This means an XDP program is loaded on the peer and the peer
+ * device is up.
+ */
+ if (!rcu_access_pointer(rq->xdp_prog))
+ return -ENXIO;
+
+ max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
+
+ spin_lock(&rq->xdp_ring.producer_lock);
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *frame = frames[i];
+ void *ptr = veth_xdp_to_ptr(frame);
+
+ if (unlikely(frame->len > max_len ||
+ __ptr_ring_produce(&rq->xdp_ring, ptr))) {
+ xdp_return_frame_rx_napi(frame);
+ drops++;
+ }
+ }
+ spin_unlock(&rq->xdp_ring.producer_lock);
+
+ if (flags & XDP_XMIT_FLUSH)
+ __veth_xdp_flush(rq);
+
+ return n - drops;
+}
+
+static void veth_xdp_flush(struct net_device *dev)
+{
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct net_device *rcv;
+ struct veth_rq *rq;
+
+ rcu_read_lock();
+ rcv = rcu_dereference(priv->peer);
+ if (unlikely(!rcv))
+ goto out;
+
+ rcv_priv = netdev_priv(rcv);
+ rq = &rcv_priv->rq[veth_select_rxq(rcv)];
+ /* xdp_ring is initialized on receive side? */
+ if (unlikely(!rcu_access_pointer(rq->xdp_prog)))
+ goto out;
+
+ __veth_xdp_flush(rq);
+out:
+ rcu_read_unlock();
+}
+
+static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
+{
+ struct xdp_frame *frame = convert_to_xdp_frame(xdp);
+
+ if (unlikely(!frame))
+ return -EOVERFLOW;
+
+ return veth_xdp_xmit(dev, 1, &frame, 0);
+}
+
+static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
+ struct xdp_frame *frame,
+ unsigned int *xdp_xmit)
+{
+ void *hard_start = frame->data - frame->headroom;
+ void *head = hard_start - sizeof(struct xdp_frame);
+ int len = frame->len, delta = 0;
+ struct xdp_frame orig_frame;
+ struct bpf_prog *xdp_prog;
+ unsigned int headroom;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (likely(xdp_prog)) {
+ struct xdp_buff xdp;
+ u32 act;
+
+ xdp.data_hard_start = hard_start;
+ xdp.data = frame->data;
+ xdp.data_end = frame->data + frame->len;
+ xdp.data_meta = frame->data - frame->metasize;
+ xdp.rxq = &rq->xdp_rxq;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ delta = frame->data - xdp.data;
+ len = xdp.data_end - xdp.data;
+ break;
+ case XDP_TX:
+ orig_frame = *frame;
+ xdp.data_hard_start = head;
+ xdp.rxq->mem = frame->mem;
+ if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
+ trace_xdp_exception(rq->dev, xdp_prog, act);
+ frame = &orig_frame;
+ goto err_xdp;
+ }
+ *xdp_xmit |= VETH_XDP_TX;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_REDIRECT:
+ orig_frame = *frame;
+ xdp.data_hard_start = head;
+ xdp.rxq->mem = frame->mem;
+ if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
+ frame = &orig_frame;
+ goto err_xdp;
+ }
+ *xdp_xmit |= VETH_XDP_REDIR;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(rq->dev, xdp_prog, act);
+ case XDP_DROP:
+ goto err_xdp;
+ }
+ }
+ rcu_read_unlock();
+
+ headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
+ skb = veth_build_skb(head, headroom, len, 0);
+ if (!skb) {
+ xdp_return_frame(frame);
+ goto err;
+ }
+
+ xdp_scrub_frame(frame);
+ skb->protocol = eth_type_trans(skb, rq->dev);
+err:
+ return skb;
+err_xdp:
+ rcu_read_unlock();
+ xdp_return_frame(frame);
+xdp_xmit:
+ return NULL;
+}
+
+static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
+ unsigned int *xdp_xmit)
+{
+ u32 pktlen, headroom, act, metalen;
+ void *orig_data, *orig_data_end;
+ struct bpf_prog *xdp_prog;
+ int mac_len, delta, off;
+ struct xdp_buff xdp;
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (unlikely(!xdp_prog)) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ mac_len = skb->data - skb_mac_header(skb);
+ pktlen = skb->len + mac_len;
+ headroom = skb_headroom(skb) - mac_len;
+
+ if (skb_shared(skb) || skb_head_is_locked(skb) ||
+ skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
+ struct sk_buff *nskb;
+ int size, head_off;
+ void *head, *start;
+ struct page *page;
+
+ size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ if (size > PAGE_SIZE)
+ goto drop;
+
+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page)
+ goto drop;
+
+ head = page_address(page);
+ start = head + VETH_XDP_HEADROOM;
+ if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
+ page_frag_free(head);
+ goto drop;
+ }
+
+ nskb = veth_build_skb(head,
+ VETH_XDP_HEADROOM + mac_len, skb->len,
+ PAGE_SIZE);
+ if (!nskb) {
+ page_frag_free(head);
+ goto drop;
+ }
+
+ skb_copy_header(nskb, skb);
+ head_off = skb_headroom(nskb) - skb_headroom(skb);
+ skb_headers_offset_update(nskb, head_off);
+ if (skb->sk)
+ skb_set_owner_w(nskb, skb->sk);
+ consume_skb(skb);
+ skb = nskb;
+ }
+
+ xdp.data_hard_start = skb->head;
+ xdp.data = skb_mac_header(skb);
+ xdp.data_end = xdp.data + pktlen;
+ xdp.data_meta = xdp.data;
+ xdp.rxq = &rq->xdp_rxq;
+ orig_data = xdp.data;
+ orig_data_end = xdp.data_end;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ get_page(virt_to_page(xdp.data));
+ consume_skb(skb);
+ xdp.rxq->mem = rq->xdp_mem;
+ if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
+ trace_xdp_exception(rq->dev, xdp_prog, act);
+ goto err_xdp;
+ }
+ *xdp_xmit |= VETH_XDP_TX;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_REDIRECT:
+ get_page(virt_to_page(xdp.data));
+ consume_skb(skb);
+ xdp.rxq->mem = rq->xdp_mem;
+ if (xdp_do_redirect(rq->dev, &xdp, xdp_prog))
+ goto err_xdp;
+ *xdp_xmit |= VETH_XDP_REDIR;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(rq->dev, xdp_prog, act);
+ case XDP_DROP:
+ goto drop;
+ }
+ rcu_read_unlock();
+
+ delta = orig_data - xdp.data;
+ off = mac_len + delta;
+ if (off > 0)
+ __skb_push(skb, off);
+ else if (off < 0)
+ __skb_pull(skb, -off);
+ skb->mac_header -= delta;
+ off = xdp.data_end - orig_data_end;
+ if (off != 0)
+ __skb_put(skb, off);
+ skb->protocol = eth_type_trans(skb, rq->dev);
+
+ metalen = xdp.data - xdp.data_meta;
+ if (metalen)
+ skb_metadata_set(skb, metalen);
+out:
+ return skb;
+drop:
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return NULL;
+err_xdp:
+ rcu_read_unlock();
+ page_frag_free(xdp.data);
+xdp_xmit:
+ return NULL;
+}
+
+static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit)
+{
+ int i, done = 0;
+
+ for (i = 0; i < budget; i++) {
+ void *ptr = __ptr_ring_consume(&rq->xdp_ring);
+ struct sk_buff *skb;
+
+ if (!ptr)
+ break;
+
+ if (veth_is_xdp_frame(ptr)) {
+ skb = veth_xdp_rcv_one(rq, veth_ptr_to_xdp(ptr),
+ xdp_xmit);
+ } else {
+ skb = veth_xdp_rcv_skb(rq, ptr, xdp_xmit);
+ }
+
+ if (skb)
+ napi_gro_receive(&rq->xdp_napi, skb);
+
+ done++;
+ }
+
+ return done;
+}
+
+static int veth_poll(struct napi_struct *napi, int budget)
+{
+ struct veth_rq *rq =
+ container_of(napi, struct veth_rq, xdp_napi);
+ unsigned int xdp_xmit = 0;
+ int done;
+
+ xdp_set_return_frame_no_direct();
+ done = veth_xdp_rcv(rq, budget, &xdp_xmit);
+
+ if (done < budget && napi_complete_done(napi, done)) {
+ /* Write rx_notify_masked before reading ptr_ring */
+ smp_store_mb(rq->rx_notify_masked, false);
+ if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
+ rq->rx_notify_masked = true;
+ napi_schedule(&rq->xdp_napi);
+ }
+ }
+
+ if (xdp_xmit & VETH_XDP_TX)
+ veth_xdp_flush(rq->dev);
+ if (xdp_xmit & VETH_XDP_REDIR)
+ xdp_do_flush_map();
+ xdp_clear_return_frame_no_direct();
+
+ return done;
+}
+
+static int veth_napi_add(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int err, i;
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
+ if (err)
+ goto err_xdp_ring;
+ }
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&rq->xdp_napi);
+ }
+
+ return 0;
+err_xdp_ring:
+ for (i--; i >= 0; i--)
+ ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
+
+ return err;
+}
+
+static void veth_napi_del(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ napi_disable(&rq->xdp_napi);
+ napi_hash_del(&rq->xdp_napi);
+ }
+ synchronize_net();
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ netif_napi_del(&rq->xdp_napi);
+ rq->rx_notify_masked = false;
+ ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
+ }
+}
+
+static int veth_enable_xdp(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int err, i;
+
+ if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i);
+ if (err < 0)
+ goto err_rxq_reg;
+
+ err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err < 0)
+ goto err_reg_mem;
+
+ /* Save original mem info as it can be overwritten */
+ rq->xdp_mem = rq->xdp_rxq.mem;
+ }
+
+ err = veth_napi_add(dev);
+ if (err)
+ goto err_rxq_reg;
+ }
+
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
+
+ return 0;
+err_reg_mem:
+ xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
+err_rxq_reg:
+ for (i--; i >= 0; i--)
+ xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
+
+ return err;
+}
+
+static void veth_disable_xdp(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
+ veth_napi_del(dev);
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ rq->xdp_rxq.mem = rq->xdp_mem;
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ }
+}
+
static int veth_open(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer = rtnl_dereference(priv->peer);
+ int err;
if (!peer)
return -ENOTCONN;
+ if (priv->_xdp_prog) {
+ err = veth_enable_xdp(dev);
+ if (err)
+ return err;
+ }
+
if (peer->flags & IFF_UP) {
netif_carrier_on(dev);
netif_carrier_on(peer);
}
+
return 0;
}
@@ -203,6 +778,9 @@ static int veth_close(struct net_device *dev)
if (peer)
netif_carrier_off(peer);
+ if (priv->_xdp_prog)
+ veth_disable_xdp(dev);
+
return 0;
}
@@ -211,16 +789,48 @@ static int is_valid_veth_mtu(int mtu)
return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
}
+static int veth_alloc_queues(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int i;
+
+ priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
+ if (!priv->rq)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->num_rx_queues; i++)
+ priv->rq[i].dev = dev;
+
+ return 0;
+}
+
+static void veth_free_queues(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+
+ kfree(priv->rq);
+}
+
static int veth_dev_init(struct net_device *dev)
{
+ int err;
+
dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats);
if (!dev->vstats)
return -ENOMEM;
+
+ err = veth_alloc_queues(dev);
+ if (err) {
+ free_percpu(dev->vstats);
+ return err;
+ }
+
return 0;
}
static void veth_dev_free(struct net_device *dev)
{
+ veth_free_queues(dev);
free_percpu(dev->vstats);
}
@@ -228,7 +838,7 @@ static void veth_dev_free(struct net_device *dev)
static void veth_poll_controller(struct net_device *dev)
{
/* veth only receives frames when its peer sends one
- * Since it's a synchronous operation, we are guaranteed
+ * Since it has nothing to do with disabling irqs, we are guaranteed
* never to have pending data when we poll for it so
* there is nothing to do here.
*
@@ -253,6 +863,23 @@ static int veth_get_iflink(const struct net_device *dev)
return iflink;
}
+static netdev_features_t veth_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
+
+ peer = rtnl_dereference(priv->peer);
+ if (peer) {
+ struct veth_priv *peer_priv = netdev_priv(peer);
+
+ if (peer_priv->_xdp_prog)
+ features &= ~NETIF_F_GSO_SOFTWARE;
+ }
+
+ return features;
+}
+
static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
{
struct veth_priv *peer_priv, *priv = netdev_priv(dev);
@@ -276,6 +903,103 @@ out:
rcu_read_unlock();
}
+static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+ struct net_device *peer;
+ unsigned int max_mtu;
+ int err;
+
+ old_prog = priv->_xdp_prog;
+ priv->_xdp_prog = prog;
+ peer = rtnl_dereference(priv->peer);
+
+ if (prog) {
+ if (!peer) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
+ err = -ENOTCONN;
+ goto err;
+ }
+
+ max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
+ peer->hard_header_len -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ if (peer->mtu > max_mtu) {
+ NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
+ err = -ERANGE;
+ goto err;
+ }
+
+ if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
+ NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
+ err = -ENOSPC;
+ goto err;
+ }
+
+ if (dev->flags & IFF_UP) {
+ err = veth_enable_xdp(dev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
+ goto err;
+ }
+ }
+
+ if (!old_prog) {
+ peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
+ peer->max_mtu = max_mtu;
+ }
+ }
+
+ if (old_prog) {
+ if (!prog) {
+ if (dev->flags & IFF_UP)
+ veth_disable_xdp(dev);
+
+ if (peer) {
+ peer->hw_features |= NETIF_F_GSO_SOFTWARE;
+ peer->max_mtu = ETH_MAX_MTU;
+ }
+ }
+ bpf_prog_put(old_prog);
+ }
+
+ if ((!!old_prog ^ !!prog) && peer)
+ netdev_update_features(peer);
+
+ return 0;
+err:
+ priv->_xdp_prog = old_prog;
+
+ return err;
+}
+
+static u32 veth_xdp_query(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ const struct bpf_prog *xdp_prog;
+
+ xdp_prog = priv->_xdp_prog;
+ if (xdp_prog)
+ return xdp_prog->aux->id;
+
+ return 0;
+}
+
+static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return veth_xdp_set(dev, xdp->prog, xdp->extack);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = veth_xdp_query(dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -288,8 +1012,11 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_poll_controller = veth_poll_controller,
#endif
.ndo_get_iflink = veth_get_iflink,
+ .ndo_fix_features = veth_fix_features,
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = veth_set_rx_headroom,
+ .ndo_bpf = veth_xdp,
+ .ndo_xdp_xmit = veth_xdp_xmit,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
@@ -455,6 +1182,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
priv = netdev_priv(peer);
rcu_assign_pointer(priv->peer, dev);
+
return 0;
err_register_dev:
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b6c9a2af3732..765920905226 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -30,7 +30,7 @@
#include <linux/cpu.h>
#include <linux/average.h>
#include <linux/filter.h>
-#include <linux/netdevice.h>
+#include <linux/kernel.h>
#include <linux/pci.h>
#include <net/route.h>
#include <net/xdp.h>
@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
#define VIRTIO_XDP_HEADROOM 256
+/* Separating two types of XDP xmit */
+#define VIRTIO_XDP_TX BIT(0)
+#define VIRTIO_XDP_REDIR BIT(1)
+
/* RX packet size EWMA. The average packet size is used to determine the packet
* buffer size when refilling RX rings. As the entire RX ring may be refilled
* at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -78,25 +82,43 @@ struct virtnet_sq_stats {
struct u64_stats_sync syncp;
u64 packets;
u64 bytes;
+ u64 xdp_tx;
+ u64 xdp_tx_drops;
+ u64 kicks;
};
struct virtnet_rq_stats {
struct u64_stats_sync syncp;
u64 packets;
u64 bytes;
+ u64 drops;
+ u64 xdp_packets;
+ u64 xdp_tx;
+ u64 xdp_redirects;
+ u64 xdp_drops;
+ u64 kicks;
};
#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
- { "packets", VIRTNET_SQ_STAT(packets) },
- { "bytes", VIRTNET_SQ_STAT(bytes) },
+ { "packets", VIRTNET_SQ_STAT(packets) },
+ { "bytes", VIRTNET_SQ_STAT(bytes) },
+ { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
+ { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
+ { "kicks", VIRTNET_SQ_STAT(kicks) },
};
static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
- { "packets", VIRTNET_RQ_STAT(packets) },
- { "bytes", VIRTNET_RQ_STAT(bytes) },
+ { "packets", VIRTNET_RQ_STAT(packets) },
+ { "bytes", VIRTNET_RQ_STAT(bytes) },
+ { "drops", VIRTNET_RQ_STAT(drops) },
+ { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
+ { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
+ { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
+ { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
+ { "kicks", VIRTNET_RQ_STAT(kicks) },
};
#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
@@ -443,22 +465,12 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
return 0;
}
-static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi,
- struct xdp_frame *xdpf)
+static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
{
- struct xdp_frame *xdpf_sent;
- struct send_queue *sq;
- unsigned int len;
unsigned int qp;
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
- sq = &vi->sq[qp];
-
- /* Free up any pending old buffers before queueing new ones. */
- while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
- xdp_return_frame(xdpf_sent);
-
- return __virtnet_xdp_xmit_one(vi, sq, xdpf);
+ return &vi->sq[qp];
}
static int virtnet_xdp_xmit(struct net_device *dev,
@@ -470,23 +482,28 @@ static int virtnet_xdp_xmit(struct net_device *dev,
struct bpf_prog *xdp_prog;
struct send_queue *sq;
unsigned int len;
- unsigned int qp;
int drops = 0;
- int err;
+ int kicks = 0;
+ int ret, err;
int i;
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
- return -EINVAL;
+ sq = virtnet_xdp_sq(vi);
- qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
- sq = &vi->sq[qp];
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
+ ret = -EINVAL;
+ drops = n;
+ goto out;
+ }
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
*/
xdp_prog = rcu_dereference(rq->xdp_prog);
- if (!xdp_prog)
- return -ENXIO;
+ if (!xdp_prog) {
+ ret = -ENXIO;
+ drops = n;
+ goto out;
+ }
/* Free up any pending old buffers before queueing new ones. */
while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
@@ -501,11 +518,20 @@ static int virtnet_xdp_xmit(struct net_device *dev,
drops++;
}
}
+ ret = n - drops;
- if (flags & XDP_XMIT_FLUSH)
- virtqueue_kick(sq->vq);
+ if (flags & XDP_XMIT_FLUSH) {
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
+ kicks = 1;
+ }
+out:
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.xdp_tx += n;
+ sq->stats.xdp_tx_drops += drops;
+ sq->stats.kicks += kicks;
+ u64_stats_update_end(&sq->stats.syncp);
- return n - drops;
+ return ret;
}
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
@@ -582,7 +608,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct receive_queue *rq,
void *buf, void *ctx,
unsigned int len,
- bool *xdp_xmit)
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
{
struct sk_buff *skb;
struct bpf_prog *xdp_prog;
@@ -597,6 +624,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
int err;
len -= vi->hdr_len;
+ stats->bytes += len;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -638,6 +666,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ stats->xdp_packets++;
switch (act) {
case XDP_PASS:
@@ -646,26 +675,29 @@ static struct sk_buff *receive_small(struct net_device *dev,
len = xdp.data_end - xdp.data;
break;
case XDP_TX:
+ stats->xdp_tx++;
xdpf = convert_to_xdp_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
- err = __virtnet_xdp_tx_xmit(vi, xdpf);
- if (unlikely(err)) {
+ err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
+ if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp;
}
- *xdp_xmit = true;
+ *xdp_xmit |= VIRTIO_XDP_TX;
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
+ stats->xdp_redirects++;
err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (err)
goto err_xdp;
- *xdp_xmit = true;
+ *xdp_xmit |= VIRTIO_XDP_REDIR;
rcu_read_unlock();
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
case XDP_DROP:
@@ -691,7 +723,8 @@ err:
err_xdp:
rcu_read_unlock();
- dev->stats.rx_dropped++;
+ stats->xdp_drops++;
+ stats->drops++;
put_page(page);
xdp_xmit:
return NULL;
@@ -701,18 +734,20 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
void *buf,
- unsigned int len)
+ unsigned int len,
+ struct virtnet_rq_stats *stats)
{
struct page *page = buf;
struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
+ stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
goto err;
return skb;
err:
- dev->stats.rx_dropped++;
+ stats->drops++;
give_pages(rq, page);
return NULL;
}
@@ -723,7 +758,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
void *buf,
void *ctx,
unsigned int len,
- bool *xdp_xmit)
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -736,6 +772,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
int err;
head_skb = NULL;
+ stats->bytes += len - vi->hdr_len;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -784,6 +821,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ stats->xdp_packets++;
switch (act) {
case XDP_PASS:
@@ -808,37 +846,41 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
break;
case XDP_TX:
+ stats->xdp_tx++;
xdpf = convert_to_xdp_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
- err = __virtnet_xdp_tx_xmit(vi, xdpf);
- if (unlikely(err)) {
+ err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
+ if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
if (unlikely(xdp_page != page))
put_page(xdp_page);
goto err_xdp;
}
- *xdp_xmit = true;
+ *xdp_xmit |= VIRTIO_XDP_TX;
if (unlikely(xdp_page != page))
put_page(page);
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
+ stats->xdp_redirects++;
err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (err) {
if (unlikely(xdp_page != page))
put_page(xdp_page);
goto err_xdp;
}
- *xdp_xmit = true;
+ *xdp_xmit |= VIRTIO_XDP_REDIR;
if (unlikely(xdp_page != page))
put_page(page);
rcu_read_unlock();
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
+ /* fall through */
case XDP_DROP:
if (unlikely(xdp_page != page))
__free_pages(xdp_page, 0);
@@ -873,6 +915,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_buf;
}
+ stats->bytes += len;
page = virt_to_head_page(buf);
truesize = mergeable_ctx_to_truesize(ctx);
@@ -918,6 +961,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
err_xdp:
rcu_read_unlock();
+ stats->xdp_drops++;
err_skb:
put_page(page);
while (num_buf-- > 1) {
@@ -928,23 +972,25 @@ err_skb:
dev->stats.rx_length_errors++;
break;
}
+ stats->bytes += len;
page = virt_to_head_page(buf);
put_page(page);
}
err_buf:
- dev->stats.rx_dropped++;
+ stats->drops++;
dev_kfree_skb(head_skb);
xdp_xmit:
return NULL;
}
-static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
- void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
+static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ void *buf, unsigned int len, void **ctx,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
{
struct net_device *dev = vi->dev;
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
- int ret;
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
@@ -956,23 +1002,22 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
} else {
put_page(virt_to_head_page(buf));
}
- return 0;
+ return;
}
if (vi->mergeable_rx_bufs)
- skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit);
+ skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
+ stats);
else if (vi->big_packets)
- skb = receive_big(dev, vi, rq, buf, len);
+ skb = receive_big(dev, vi, rq, buf, len, stats);
else
- skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit);
+ skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
if (unlikely(!skb))
- return 0;
+ return;
hdr = skb_vnet_hdr(skb);
- ret = skb->len;
-
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -989,12 +1034,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
ntohs(skb->protocol), skb->len, skb->pkt_type);
napi_gro_receive(&rq->napi, skb);
- return ret;
+ return;
frame_err:
dev->stats.rx_frame_errors++;
dev_kfree_skb(skb);
- return 0;
}
/* Unlike mergeable buffers, all buffers are allocated to the
@@ -1161,7 +1205,12 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
if (err)
break;
} while (rq->vq->num_free);
- virtqueue_kick(rq->vq);
+ if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.kicks++;
+ u64_stats_update_end(&rq->stats.syncp);
+ }
+
return !oom;
}
@@ -1232,25 +1281,28 @@ static void refill_work(struct work_struct *work)
}
}
-static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
+static int virtnet_receive(struct receive_queue *rq, int budget,
+ unsigned int *xdp_xmit)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
- unsigned int len, received = 0, bytes = 0;
+ struct virtnet_rq_stats stats = {};
+ unsigned int len;
void *buf;
+ int i;
if (!vi->big_packets || vi->mergeable_rx_bufs) {
void *ctx;
- while (received < budget &&
+ while (stats.packets < budget &&
(buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
- bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit);
- received++;
+ receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
+ stats.packets++;
}
} else {
- while (received < budget &&
+ while (stats.packets < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
- bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit);
- received++;
+ receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
+ stats.packets++;
}
}
@@ -1260,11 +1312,16 @@ static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
}
u64_stats_update_begin(&rq->stats.syncp);
- rq->stats.bytes += bytes;
- rq->stats.packets += received;
+ for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+ size_t offset = virtnet_rq_stats_desc[i].offset;
+ u64 *item;
+
+ item = (u64 *)((u8 *)&rq->stats + offset);
+ *item += *(u64 *)((u8 *)&stats + offset);
+ }
u64_stats_update_end(&rq->stats.syncp);
- return received;
+ return stats.packets;
}
static void free_old_xmit_skbs(struct send_queue *sq)
@@ -1320,8 +1377,8 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
container_of(napi, struct receive_queue, napi);
struct virtnet_info *vi = rq->vq->vdev->priv;
struct send_queue *sq;
- unsigned int received, qp;
- bool xdp_xmit = false;
+ unsigned int received;
+ unsigned int xdp_xmit = 0;
virtnet_poll_cleantx(rq);
@@ -1331,12 +1388,16 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
- if (xdp_xmit) {
- qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
- smp_processor_id();
- sq = &vi->sq[qp];
- virtqueue_kick(sq->vq);
+ if (xdp_xmit & VIRTIO_XDP_REDIR)
xdp_do_flush_map();
+
+ if (xdp_xmit & VIRTIO_XDP_TX) {
+ sq = virtnet_xdp_sq(vi);
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.kicks++;
+ u64_stats_update_end(&sq->stats.syncp);
+ }
}
return received;
@@ -1498,8 +1559,13 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (kick || netif_xmit_stopped(txq))
- virtqueue_kick(sq->vq);
+ if (kick || netif_xmit_stopped(txq)) {
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.kicks++;
+ u64_stats_update_end(&sq->stats.syncp);
+ }
+ }
return NETDEV_TX_OK;
}
@@ -1603,7 +1669,7 @@ static void virtnet_stats(struct net_device *dev,
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
- u64 tpackets, tbytes, rpackets, rbytes;
+ u64 tpackets, tbytes, rpackets, rbytes, rdrops;
struct receive_queue *rq = &vi->rq[i];
struct send_queue *sq = &vi->sq[i];
@@ -1617,17 +1683,18 @@ static void virtnet_stats(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
rpackets = rq->stats.packets;
rbytes = rq->stats.bytes;
+ rdrops = rq->stats.drops;
} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
tot->rx_bytes += rbytes;
tot->tx_bytes += tbytes;
+ tot->rx_dropped += rdrops;
}
tot->tx_dropped = dev->stats.tx_dropped;
tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
- tot->rx_dropped = dev->stats.rx_dropped;
tot->rx_length_errors = dev->stats.rx_length_errors;
tot->rx_frame_errors = dev->stats.rx_frame_errors;
}
@@ -1811,8 +1878,8 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
if (vi->affinity_hint_set) {
for (i = 0; i < vi->max_queue_pairs; i++) {
- virtqueue_set_affinity(vi->rq[i].vq, -1);
- virtqueue_set_affinity(vi->sq[i].vq, -1);
+ virtqueue_set_affinity(vi->rq[i].vq, NULL);
+ virtqueue_set_affinity(vi->sq[i].vq, NULL);
}
vi->affinity_hint_set = false;
@@ -1821,28 +1888,41 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
static void virtnet_set_affinity(struct virtnet_info *vi)
{
- int i;
- int cpu;
+ cpumask_var_t mask;
+ int stragglers;
+ int group_size;
+ int i, j, cpu;
+ int num_cpu;
+ int stride;
- /* In multiqueue mode, when the number of cpu is equal to the number of
- * queue pairs, we let the queue pairs to be private to one cpu by
- * setting the affinity hint to eliminate the contention.
- */
- if (vi->curr_queue_pairs == 1 ||
- vi->max_queue_pairs != num_online_cpus()) {
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
virtnet_clean_affinity(vi, -1);
return;
}
- i = 0;
- for_each_online_cpu(cpu) {
- virtqueue_set_affinity(vi->rq[i].vq, cpu);
- virtqueue_set_affinity(vi->sq[i].vq, cpu);
- netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
- i++;
+ num_cpu = num_online_cpus();
+ stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
+ stragglers = num_cpu >= vi->curr_queue_pairs ?
+ num_cpu % vi->curr_queue_pairs :
+ 0;
+ cpu = cpumask_next(-1, cpu_online_mask);
+
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ group_size = stride + (i < stragglers ? 1 : 0);
+
+ for (j = 0; j < group_size; j++) {
+ cpumask_set_cpu(cpu, mask);
+ cpu = cpumask_next_wrap(cpu, cpu_online_mask,
+ nr_cpu_ids, false);
+ }
+ virtqueue_set_affinity(vi->rq[i].vq, mask);
+ virtqueue_set_affinity(vi->sq[i].vq, mask);
+ __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false);
+ cpumask_clear(mask);
}
vi->affinity_hint_set = true;
+ free_cpumask_var(mask);
}
static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
@@ -2335,7 +2415,6 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
case XDP_QUERY_PROG:
xdp->prog_id = virtnet_xdp_query(dev);
- xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index aee0e60471f1..ababba37d735 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -568,11 +568,12 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
return vh;
}
-static struct sk_buff **vxlan_gro_receive(struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *vxlan_gro_receive(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
{
- struct sk_buff *p, **pp = NULL;
+ struct sk_buff *pp = NULL;
+ struct sk_buff *p;
struct vxlanhdr *vh, *vh2;
unsigned int hlen, off_vx;
int flush = 1;
@@ -607,7 +608,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
- for (p = *head; p; p = p->next) {
+ list_for_each_entry(p, head, list) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
@@ -623,9 +624,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
flush = 0;
out:
- skb_gro_remcsum_cleanup(skb, &grc);
- skb->remcsum_offload = 0;
- NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
return pp;
}
@@ -638,9 +637,62 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
-/* Add new entry to forwarding table -- assumes lock held */
+static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
+ const u8 *mac, __u16 state,
+ __be32 src_vni, __u8 ndm_flags)
+{
+ struct vxlan_fdb *f;
+
+ f = kmalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return NULL;
+ f->state = state;
+ f->flags = ndm_flags;
+ f->updated = f->used = jiffies;
+ f->vni = src_vni;
+ INIT_LIST_HEAD(&f->remotes);
+ memcpy(f->eth_addr, mac, ETH_ALEN);
+
+ return f;
+}
+
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __be16 port, __be32 src_vni,
+ __be32 vni, __u32 ifindex, __u8 ndm_flags,
+ struct vxlan_fdb **fdb)
+{
+ struct vxlan_rdst *rd = NULL;
+ struct vxlan_fdb *f;
+ int rc;
+
+ if (vxlan->cfg.addrmax &&
+ vxlan->addrcnt >= vxlan->cfg.addrmax)
+ return -ENOSPC;
+
+ netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
+ f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
+ if (!f)
+ return -ENOMEM;
+
+ rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+ if (rc < 0) {
+ kfree(f);
+ return rc;
+ }
+
+ ++vxlan->addrcnt;
+ hlist_add_head_rcu(&f->hlist,
+ vxlan_fdb_head(vxlan, mac, src_vni));
+
+ *fdb = f;
+
+ return 0;
+}
+
+/* Add new entry to forwarding table -- assumes lock held */
+static int vxlan_fdb_update(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
__be16 port, __be32 src_vni, __be32 vni,
__u32 ifindex, __u8 ndm_flags)
@@ -689,37 +741,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
if (!(flags & NLM_F_CREATE))
return -ENOENT;
- if (vxlan->cfg.addrmax &&
- vxlan->addrcnt >= vxlan->cfg.addrmax)
- return -ENOSPC;
-
/* Disallow replace to add a multicast entry */
if ((flags & NLM_F_REPLACE) &&
(is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
return -EOPNOTSUPP;
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
- f = kmalloc(sizeof(*f), GFP_ATOMIC);
- if (!f)
- return -ENOMEM;
-
- notify = 1;
- f->state = state;
- f->flags = ndm_flags;
- f->updated = f->used = jiffies;
- f->vni = src_vni;
- INIT_LIST_HEAD(&f->remotes);
- memcpy(f->eth_addr, mac, ETH_ALEN);
-
- rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
- if (rc < 0) {
- kfree(f);
+ rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
+ vni, ifindex, ndm_flags, &f);
+ if (rc < 0)
return rc;
- }
-
- ++vxlan->addrcnt;
- hlist_add_head_rcu(&f->hlist,
- vxlan_fdb_head(vxlan, mac, src_vni));
+ notify = 1;
}
if (notify) {
@@ -743,13 +775,15 @@ static void vxlan_fdb_free(struct rcu_head *head)
kfree(f);
}
-static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
+static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+ bool do_notify)
{
netdev_dbg(vxlan->dev,
"delete %pM\n", f->eth_addr);
--vxlan->addrcnt;
- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
+ if (do_notify)
+ vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
hlist_del_rcu(&f->hlist);
call_rcu(&f->rcu, vxlan_fdb_free);
@@ -865,7 +899,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -EAFNOSUPPORT;
spin_lock_bh(&vxlan->hash_lock);
- err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
+ err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex, ndm->ndm_flags);
spin_unlock_bh(&vxlan->hash_lock);
@@ -899,7 +933,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
goto out;
}
- vxlan_fdb_destroy(vxlan, f);
+ vxlan_fdb_destroy(vxlan, f, true);
out:
return 0;
@@ -1008,7 +1042,7 @@ static bool vxlan_snoop(struct net_device *dev,
/* close off race between vxlan_flush and incoming packets */
if (netif_running(dev))
- vxlan_fdb_create(vxlan, src_mac, src_ip,
+ vxlan_fdb_update(vxlan, src_mac, src_ip,
NUD_REACHABLE,
NLM_F_EXCL|NLM_F_CREATE,
vxlan->cfg.dst_port,
@@ -2121,7 +2155,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
vni = tunnel_id_to_key32(info->key.tun_id);
ifindex = 0;
dst_cache = &info->dst_cache;
- if (info->options_len)
+ if (info->options_len &&
+ info->key.tun_flags & TUNNEL_VXLAN_OPT)
md = ip_tunnel_info_opts(info);
ttl = info->key.ttl;
tos = info->key.tos;
@@ -2366,7 +2401,7 @@ static void vxlan_cleanup(struct timer_list *t)
"garbage collect %pM\n",
f->eth_addr);
f->state = NUD_STALE;
- vxlan_fdb_destroy(vxlan, f);
+ vxlan_fdb_destroy(vxlan, f, true);
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
@@ -2417,7 +2452,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
spin_lock_bh(&vxlan->hash_lock);
f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f)
- vxlan_fdb_destroy(vxlan, f);
+ vxlan_fdb_destroy(vxlan, f, true);
spin_unlock_bh(&vxlan->hash_lock);
}
@@ -2471,7 +2506,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
continue;
/* the all_zeros_mac entry is deleted at vxlan_uninit */
if (!is_zero_ether_addr(f->eth_addr))
- vxlan_fdb_destroy(vxlan, f);
+ vxlan_fdb_destroy(vxlan, f, true);
}
}
spin_unlock_bh(&vxlan->hash_lock);
@@ -3162,6 +3197,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_fdb *f = NULL;
int err;
err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3175,24 +3211,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
err = vxlan_fdb_create(vxlan, all_zeros_mac,
&vxlan->default_dst.remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
- NLM_F_EXCL | NLM_F_CREATE,
vxlan->cfg.dst_port,
vxlan->default_dst.remote_vni,
vxlan->default_dst.remote_vni,
vxlan->default_dst.remote_ifindex,
- NTF_SELF);
+ NTF_SELF, &f);
if (err)
return err;
}
err = register_netdevice(dev);
+ if (err)
+ goto errout;
+
+ err = rtnl_configure_link(dev, NULL);
if (err) {
- vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
- return err;
+ unregister_netdevice(dev);
+ goto errout;
}
+ /* notify default fdb entry */
+ if (f)
+ vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
+
list_add(&vxlan->next, &vn->vxlan_list);
return 0;
+errout:
+ if (f)
+ vxlan_fdb_destroy(vxlan, f, false);
+ return err;
}
static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
@@ -3427,6 +3474,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct vxlan_rdst *dst = &vxlan->default_dst;
struct vxlan_rdst old_dst;
struct vxlan_config conf;
+ struct vxlan_fdb *f = NULL;
int err;
err = vxlan_nl2conf(tb, data,
@@ -3455,16 +3503,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
err = vxlan_fdb_create(vxlan, all_zeros_mac,
&dst->remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
- NLM_F_CREATE | NLM_F_APPEND,
vxlan->cfg.dst_port,
dst->remote_vni,
dst->remote_vni,
dst->remote_ifindex,
- NTF_SELF);
+ NTF_SELF, &f);
if (err) {
spin_unlock_bh(&vxlan->hash_lock);
return err;
}
+ vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
}
spin_unlock_bh(&vxlan->hash_lock);
}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index bd46b2552980..2a3f0f1a2b0a 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2134,7 +2134,6 @@ static void
fst_openport(struct fst_port_info *port)
{
int signals;
- int txq_length;
/* Only init things if card is actually running. This allows open to
* succeed for downloads etc.
@@ -2161,7 +2160,6 @@ fst_openport(struct fst_port_info *port)
else
netif_carrier_off(port_to_dev(port));
- txq_length = port->txqe - port->txqs;
port->txqe = 0;
port->txqs = 0;
}
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 9b09c9d0d0fb..5f0366a125e2 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -192,7 +192,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
ALIGNMENT_OF_UCC_HDLC_PRAM);
- if (priv->ucc_pram_offset < 0) {
+ if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
ret = -ENOMEM;
goto free_tx_bd;
@@ -230,14 +230,14 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
/* Alloc riptr, tiptr */
riptr = qe_muram_alloc(32, 32);
- if (riptr < 0) {
+ if (IS_ERR_VALUE(riptr)) {
dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
ret = -ENOMEM;
goto free_tx_skbuff;
}
tiptr = qe_muram_alloc(32, 32);
- if (tiptr < 0) {
+ if (IS_ERR_VALUE(tiptr)) {
dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
ret = -ENOMEM;
goto free_riptr;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 90a4ad9a2d08..4907453f17f5 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1362,7 +1362,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
case 0x001:
printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
break;
- case 0x010:
+ case 0x002:
printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
break;
default:
@@ -1491,7 +1491,6 @@ static int lmc_rx(struct net_device *dev)
lmc_softc_t *sc = dev_to_sc(dev);
int i;
int rx_work_limit = LMC_RXDESCS;
- unsigned int next_rx;
int rxIntLoopCnt; /* debug -baz */
int localLengthErrCnt = 0;
long stat;
@@ -1505,7 +1504,6 @@ static int lmc_rx(struct net_device *dev)
rxIntLoopCnt = 0; /* debug -baz */
i = sc->lmc_next_rx % LMC_RXDESCS;
- next_rx = sc->lmc_next_rx;
while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
{
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 4c417903e9be..094cea775d0c 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -566,13 +566,12 @@ static void i2400m_msg_ack_hook(struct i2400m *i2400m,
{
int result;
struct device *dev = i2400m_dev(i2400m);
- unsigned ack_type, ack_status;
+ unsigned int ack_type;
char strerr[32];
/* Chew on the message, we might need some information from
* here */
ack_type = le16_to_cpu(l3l4_hdr->type);
- ack_status = le16_to_cpu(l3l4_hdr->status);
switch (ack_type) {
case I2400M_MT_CMD_ENTER_POWERSAVE:
/* This is just left here for the sake of example, as
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index a89b5685e68b..e9fc168bb734 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1552,7 +1552,6 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
int ret, itr;
struct device *dev = i2400m_dev(i2400m);
struct i2400m_fw *i2400m_fw;
- const struct i2400m_bcf_hdr *bcf; /* Firmware data */
const struct firmware *fw;
const char *fw_name;
@@ -1574,7 +1573,7 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
}
/* Load firmware files to memory. */
- for (itr = 0, bcf = NULL, ret = -ENOENT; ; itr++) {
+ for (itr = 0, ret = -ENOENT; ; itr++) {
fw_name = i2400m->bus_fw_names[itr];
if (fw_name == NULL) {
dev_err(dev, "Could not find a usable firmware image\n");
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index a654687b5fa2..9ab3f0fdfea4 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -535,14 +535,12 @@ void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
{
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
struct device *dev = i2400m_dev(i2400m);
- int protocol;
d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n",
i2400m, skb, skb->len, cs);
switch(cs) {
case I2400M_CS_IPV4_0:
case I2400M_CS_IPV4:
- protocol = ETH_P_IP;
i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
skb->data - ETH_HLEN,
cpu_to_be16(ETH_P_IP));
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
index 502c346aa790..529ebca1e9e1 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -130,12 +130,12 @@ retry:
dev_err(dev, "BM-CMD: too many stalls in "
"URB; resetting device\n");
usb_queue_reset_device(i2400mu->usb_iface);
- /* fallthrough */
} else {
usb_clear_halt(i2400mu->usb_dev, pipe);
msleep(10); /* give the device some time */
goto retry;
}
+ /* fall through */
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
index 99ef81b3d5a5..3a0e7226768a 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -131,12 +131,12 @@ retry:
dev_err(dev, "BM-CMD: too many stalls in "
"URB; resetting device\n");
usb_queue_reset_device(i2400mu->usb_iface);
- /* fallthrough */
} else {
usb_clear_halt(i2400mu->usb_dev, usb_pipe);
msleep(10); /* give the device some time */
goto retry;
}
+ /* fall through */
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 84f071ac0d84..54ff5930126c 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -1,15 +1,15 @@
config ATH10K
- tristate "Atheros 802.11ac wireless cards support"
- depends on MAC80211 && HAS_DMA
+ tristate "Atheros 802.11ac wireless cards support"
+ depends on MAC80211 && HAS_DMA
select ATH_COMMON
select CRC32
select WANT_DEV_COREDUMP
select ATH10K_CE
- ---help---
- This module adds support for wireless adapters based on
- Atheros IEEE 802.11ac family of chipsets.
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros IEEE 802.11ac family of chipsets.
- If you choose to build a module, it'll be called ath10k.
+ If you choose to build a module, it'll be called ath10k.
config ATH10K_CE
bool
@@ -41,12 +41,12 @@ config ATH10K_USB
work in progress and will not fully work.
config ATH10K_SNOC
- tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
- depends on ATH10K && ARCH_QCOM
- ---help---
- This module adds support for integrated WCN3990 chip connected
- to system NOC(SNOC). Currently work in progress and will not
- fully work.
+ tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
+ depends on ATH10K && ARCH_QCOM
+ ---help---
+ This module adds support for integrated WCN3990 chip connected
+ to system NOC(SNOC). Currently work in progress and will not
+ fully work.
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index fa39ffffd34d..c9bd0e2b5db7 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -133,11 +133,8 @@ static void ath10k_ahb_clock_deinit(struct ath10k *ar)
static int ath10k_ahb_clock_enable(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
- struct device *dev;
int ret;
- dev = &ar_ahb->pdev->dev;
-
if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) ||
IS_ERR_OR_NULL(ar_ahb->ref_clk) ||
IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
@@ -451,12 +448,10 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct platform_device *pdev;
- struct device *dev;
struct resource *res;
int ret;
pdev = ar_ahb->pdev;
- dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 3b96a43fbda4..18c709c484e7 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1512,7 +1512,7 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
if (ret) {
dma_free_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc) +
+ (nentries * sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space_unaligned,
base_addr);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index dbeffaef6024..b8fb5382dede 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -383,4 +383,46 @@ static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar)
return CE_INTERRUPT_SUMMARY;
}
+/* Host software's Copy Engine configuration. */
+#define CE_ATTR_FLAGS 0
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+
+/* Establish a mapping between a service/direction and a pipe. */
+struct service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
#endif /* _CE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index ad4f6e3c0737..c40cd129afe7 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -41,10 +41,8 @@ static bool uart_print;
static bool skip_otp;
static bool rawmode;
-/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA
- * by default.
- */
-unsigned long ath10k_coredump_mask = 0x3;
+unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) |
+ BIT(ATH10K_FW_CRASH_DUMP_CE_DATA);
/* FIXME: most of these should be readonly */
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
@@ -82,6 +80,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -113,6 +112,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -145,6 +145,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -176,6 +177,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -207,6 +209,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -238,6 +241,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -272,6 +276,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.target_cpu_freq = 176000000,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -309,6 +314,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 11,
@@ -347,6 +353,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
+ .spectral_bin_offset = 8,
/* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz
* or 2x2 160Mhz, long-guard-interval.
@@ -388,6 +395,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
+ .spectral_bin_offset = 8,
/* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or
* 1x1 160Mhz, long-guard-interval.
@@ -423,6 +431,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -456,6 +465,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.target_cpu_freq = 176000000,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -494,6 +504,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 11,
@@ -2084,6 +2095,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+ ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC;
break;
case ATH10K_FW_WMI_OP_VERSION_10_4:
ar->max_num_peers = TARGET_10_4_NUM_PEERS;
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 951dbdd1c9eb..9feea02e7d37 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -48,7 +48,8 @@
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
-#define ATH10K_NUM_CHANS 40
+#define ATH10K_NUM_CHANS 41
+#define ATH10K_MAX_5G_CHAN 173
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
@@ -185,6 +186,11 @@ struct ath10k_wmi {
const struct wmi_ops *ops;
const struct wmi_peer_flags_map *peer_flags;
+ u32 mgmt_max_num_pending_tx;
+
+ /* Protected by data_lock */
+ struct idr mgmt_pending_tx;
+
u32 num_mem_chunks;
u32 rx_decap_mode;
struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 0d98c93a3aba..0baaad90b8d1 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1727,7 +1727,9 @@ int ath10k_debug_start(struct ath10k *ar)
ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
}
- if (ar->debug.nf_cal_period) {
+ if (ar->debug.nf_cal_period &&
+ !test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
ret = ath10k_wmi_pdev_set_param(ar,
ar->wmi.pdev_param->cal_period,
ar->debug.nf_cal_period);
@@ -1744,7 +1746,9 @@ void ath10k_debug_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
- ath10k_debug_cal_data_fetch(ar);
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features))
+ ath10k_debug_cal_data_fetch(ar);
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
@@ -2293,6 +2297,52 @@ static const struct file_operations fops_tpc_stats_final = {
.llseek = default_llseek,
};
+static ssize_t ath10k_write_warm_hw_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ bool val;
+
+ if (kstrtobool_from_user(user_buf, count, &val))
+ return -EFAULT;
+
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!(test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map)))
+ ath10k_warn(ar, "wmi service for reset chip is not available\n");
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pdev_reset,
+ WMI_RST_MODE_WARM_RESET);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to enable warm hw reset: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_warm_hw_reset = {
+ .write = ath10k_write_warm_hw_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
@@ -2367,15 +2417,18 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar,
&fops_fw_dbglog);
- debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar,
- &fops_cal_data);
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar,
+ &fops_cal_data);
+
+ debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar,
+ &fops_nf_cal_period);
+ }
debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar,
&fops_ani_enable);
- debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar,
- &fops_nf_cal_period);
-
if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy,
ar, &fops_simulate_radar);
@@ -2418,6 +2471,9 @@ int ath10k_debug_register(struct ath10k *ar)
ar->debug.debugfs_phy, ar,
&fops_tpc_stats_final);
+ debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar,
+ &fops_warm_hw_reset);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 8902720b4e49..331b8d558791 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -274,7 +274,7 @@ ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
struct ath10k *ar = htc->ar;
int bundle_cnt = len / sizeof(*report);
- if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
+ if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
bundle_cnt);
return -EINVAL;
@@ -655,7 +655,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
htc->max_msgs_per_htc_bundle =
min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
- HTC_HOST_MAX_MSG_PER_BUNDLE);
+ HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
ath10k_dbg(ar, ATH10K_DBG_HTC,
"Extended ready message. RX bundle size: %d\n",
htc->max_msgs_per_htc_bundle);
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 34877597dd6a..51fda6c23f69 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -50,7 +50,8 @@ struct ath10k;
* 4-byte aligned.
*/
-#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
+#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8
+#define HTC_HOST_MAX_MSG_PER_TX_BUNDLE 16
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
@@ -58,6 +59,7 @@ enum ath10k_htc_tx_flags {
};
enum ath10k_htc_rx_flags {
+ ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
};
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index c72d8af122a2..4d1cd90d6d27 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -268,11 +268,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
- spin_unlock_bh(&htt->rx_ring.lock);
if (ret)
ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
return ret;
}
@@ -284,7 +285,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
skb_queue_purge(&htt->rx_in_ord_compl_q);
skb_queue_purge(&htt->tx_fetch_ind_q);
+ spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
dma_free_coherent(htt->ar->dev,
ath10k_htt_get_rx_ring_size(htt),
@@ -1089,7 +1092,7 @@ static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
- __skb_queue_tail(&ar->htt.rx_msdus_q, skb);
+ skb_queue_tail(&ar->htt.rx_msdus_q, skb);
}
static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
@@ -2810,7 +2813,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
- __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
+ skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
@@ -2874,7 +2877,7 @@ static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
if (skb_queue_empty(&ar->htt.rx_msdus_q))
break;
- skb = __skb_dequeue(&ar->htt.rx_msdus_q);
+ skb = skb_dequeue(&ar->htt.rx_msdus_q);
if (!skb)
break;
ath10k_process_rx(ar, skb);
@@ -2905,7 +2908,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
goto exit;
}
- while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
+ while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_in_ord_ind(ar, skb);
spin_unlock_bh(&htt->rx_ring.lock);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 5d8b97a0ccaa..7cff0d52338f 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -208,10 +208,10 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
struct ath10k *ar = htt->ar;
int ret;
- lockdep_assert_held(&htt->tx_lock);
-
+ spin_lock_bh(&htt->tx_lock);
ret = idr_alloc(&htt->pending_tx, skb, 0,
htt->max_num_pending_tx, GFP_ATOMIC);
+ spin_unlock_bh(&htt->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
@@ -1056,7 +1056,7 @@ static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
return HTT_DATA_TX_EXT_TID_MGMT;
else if (cb->flags & ATH10K_SKB_F_QOS)
- return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
else
return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
}
@@ -1077,9 +1077,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
- spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
- spin_unlock_bh(&htt->tx_lock);
if (res < 0)
goto err;
@@ -1161,9 +1159,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
struct htt_msdu_ext_desc *ext_desc = NULL;
struct htt_msdu_ext_desc *ext_desc_t = NULL;
- spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
- spin_unlock_bh(&htt->tx_lock);
if (res < 0)
goto err;
@@ -1202,7 +1198,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- /* pass through */
+ /* fall through */
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
ext_desc_t = htt->frag_desc.vaddr_desc_32;
@@ -1363,9 +1359,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
struct htt_msdu_ext_desc_64 *ext_desc = NULL;
struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
- spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
- spin_unlock_bh(&htt->tx_lock);
if (res < 0)
goto err;
@@ -1404,7 +1398,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- /* pass through */
+ /* fall through */
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
ext_desc_t = htt->frag_desc.vaddr_desc_64;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 23467e9fefeb..977f79ebb4fd 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -586,6 +586,9 @@ struct ath10k_hw_params {
/* target supporting retention restore on ddr */
bool rri_on_ddr;
+
+ /* Number of bytes to be the offset for each FFT sample */
+ int spectral_bin_offset;
};
struct htt_rx_desc;
@@ -696,6 +699,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_WOW_PATTERNS 22
+#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50)
/* Target specific defines for WMI-HL-1.0 firmware */
#define TARGET_HL_10_TLV_NUM_PEERS 14
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e9c2fb318c03..90f9372dec25 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -101,6 +101,8 @@ static struct ieee80211_rate ath10k_rates_rev2[] = {
#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
+#define ath10k_wmi_legacy_rates ath10k_rates
+
static bool ath10k_mac_bitrate_is_cck(int bitrate)
{
switch (bitrate) {
@@ -3085,6 +3087,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
passive = channel->flags & IEEE80211_CHAN_NO_IR;
ch->passive = passive;
+ /* the firmware is ignoring the "radar" flag of the
+ * channel and is scanning actively using Probe Requests
+ * on "Radar detection"/DFS channels which are not
+ * marked as "available"
+ */
+ ch->passive |= ch->chan_radar;
+
ch->freq = channel->center_freq;
ch->band_center_freq1 = channel->center_freq;
ch->min_power = 0;
@@ -4026,7 +4035,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
drv_priv);
/* Prevent aggressive sta/tid taking over tx queue */
- max = 16;
+ max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
ret = 0;
while (ath10k_mac_tx_can_push(hw, txq) && max--) {
ret = ath10k_mac_tx_push_txq(hw, txq);
@@ -4047,6 +4056,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
rcu_read_unlock();
spin_unlock_bh(&ar->txqs_lock);
}
+EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
/************/
/* Scanning */
@@ -4287,7 +4297,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
struct ieee80211_txq *f_txq;
struct ath10k_txq *f_artxq;
int ret = 0;
- int max = 16;
+ int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
spin_lock_bh(&ar->txqs_lock);
if (list_empty(&artxq->list))
@@ -5438,8 +5448,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
- int ret = 0;
+ struct cfg80211_chan_def def;
u32 vdev_param, pdev_param, slottime, preamble;
+ u16 bitrate, hw_value;
+ u8 rate;
+ int rateidx, ret = 0;
+ enum nl80211_band band;
mutex_lock(&ar->conf_mutex);
@@ -5607,6 +5621,44 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
+ band = def.chan->band;
+ rateidx = vif->bss_conf.mcast_rate[band] - 1;
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+
+ bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate;
+ hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value;
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ rate = ATH10K_HW_RATECODE(hw_value, 0, preamble);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d mcast_rate %x\n",
+ arvif->vdev_id, rate);
+
+ vdev_param = ar->wmi.vdev_param->mcast_data_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath10k_warn(ar,
+ "failed to set mcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ vdev_param = ar->wmi.vdev_param->bcast_data_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath10k_warn(ar,
+ "failed to set bcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -6058,8 +6110,19 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
ath10k_mac_max_vht_nss(vht_mcs_mask)));
if (changed & IEEE80211_RC_BW_CHANGED) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
- sta->addr, bw);
+ enum wmi_phy_mode mode;
+
+ mode = chan_to_phymode(&def);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
+ sta->addr, bw, mode);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_PHYMODE, mode);
+ if (err) {
+ ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
+ sta->addr, mode, err);
+ goto exit;
+ }
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
WMI_PEER_CHAN_WIDTH, bw);
@@ -6100,6 +6163,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr);
}
+exit:
mutex_unlock(&ar->conf_mutex);
}
@@ -6922,7 +6986,6 @@ ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
const struct cfg80211_bitrate_mask *mask,
u8 *rate, u8 *nss)
{
- struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
int rate_idx;
int i;
u16 bitrate;
@@ -6932,8 +6995,11 @@ ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
if (hweight32(mask->control[band].legacy) == 1) {
rate_idx = ffs(mask->control[band].legacy) - 1;
- hw_rate = sband->bitrates[rate_idx].hw_value;
- bitrate = sband->bitrates[rate_idx].bitrate;
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value;
+ bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate;
if (ath10k_mac_bitrate_is_cck(bitrate))
preamble = WMI_RATE_PREAMBLE_CCK;
@@ -7725,7 +7791,7 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
return;
sinfo->rx_duration = arsta->rx_duration;
- sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
if (!arsta->txrate.legacy && !arsta->txrate.nss)
return;
@@ -7738,7 +7804,7 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
sinfo->txrate.bw = arsta->txrate.bw;
}
sinfo->txrate.flags = arsta->txrate.flags;
- sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
static const struct ieee80211_ops ath10k_ops = {
@@ -7858,6 +7924,9 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
CHAN5G(161, 5805, 0),
CHAN5G(165, 5825, 0),
CHAN5G(169, 5845, 0),
+ CHAN5G(173, 5865, 0),
+ /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */
+ /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */
};
struct ath10k *ath10k_mac_create(size_t priv_size)
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index e52fd83156b6..0ed436657108 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -86,48 +86,6 @@ struct pcie_state {
/* PCIE_CONFIG_FLAG definitions */
#define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
-/* Host software's Copy Engine configuration. */
-#define CE_ATTR_FLAGS 0
-
-/*
- * Configuration information for a Copy Engine pipe.
- * Passed from Host to Target during startup (one per CE).
- *
- * NOTE: Structure is shared between Host software and Target firmware!
- */
-struct ce_pipe_config {
- __le32 pipenum;
- __le32 pipedir;
- __le32 nentries;
- __le32 nbytes_max;
- __le32 flags;
- __le32 reserved;
-};
-
-/*
- * Directions for interconnect pipe configuration.
- * These definitions may be used during configuration and are shared
- * between Host and Target.
- *
- * Pipe Directions are relative to the Host, so PIPEDIR_IN means
- * "coming IN over air through Target to Host" as with a WiFi Rx operation.
- * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
- * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
- * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
- * over the interconnect.
- */
-#define PIPEDIR_NONE 0
-#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
-#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
-#define PIPEDIR_INOUT 3 /* bidirectional */
-
-/* Establish a mapping between a service/direction and a pipe. */
-struct service_to_pipe {
- __le32 service_id;
- __le32 pipedir;
- __le32 pipenum;
-};
-
/* Per-pipe state. */
struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index d612ce8c9cff..7f61591ce0de 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -30,6 +30,7 @@
#include "debug.h"
#include "hif.h"
#include "htc.h"
+#include "mac.h"
#include "targaddrs.h"
#include "trace.h"
#include "sdio.h"
@@ -396,6 +397,7 @@ static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
int ret;
payload_len = le16_to_cpu(htc_hdr->len);
+ skb->len = payload_len + sizeof(struct ath10k_htc_hdr);
if (trailer_present) {
trailer = skb->data + sizeof(*htc_hdr) +
@@ -434,12 +436,14 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
enum ath10k_htc_ep_id id;
int ret, i, *n_lookahead_local;
u32 *lookaheads_local;
+ int lookahead_idx = 0;
for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
lookaheads_local = lookaheads;
n_lookahead_local = n_lookahead;
- id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid;
+ id = ((struct ath10k_htc_hdr *)
+ &lookaheads[lookahead_idx++])->eid;
if (id >= ATH10K_HTC_EP_COUNT) {
ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
@@ -462,6 +466,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
/* Only read lookahead's from RX trailers
* for the last packet in a bundle.
*/
+ lookahead_idx--;
lookaheads_local = NULL;
n_lookahead_local = NULL;
}
@@ -505,11 +510,11 @@ static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
*bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
- if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE) {
+ if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
ath10k_warn(ar,
"HTC bundle length %u exceeds maximum %u\n",
le16_to_cpu(htc_hdr->len),
- HTC_HOST_MAX_MSG_PER_BUNDLE);
+ HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
return -ENOMEM;
}
@@ -600,6 +605,9 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
* ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
* packet skb's have been allocated in the previous step.
*/
+ if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
+ full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
+
ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
act_len,
full_len,
@@ -1342,6 +1350,8 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
break;
} while (time_before(jiffies, timeout) && !done);
+ ath10k_mac_tx_push_pending(ar);
+
sdio_claim_host(ar_sdio->func);
if (ret && ret != -ECANCELED)
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
index 4ff7b545293b..453eb6263143 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.h
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -96,14 +96,14 @@
* way:
*
* Let's assume that each packet in a bundle of the maximum bundle size
- * (HTC_HOST_MAX_MSG_PER_BUNDLE) has the HTC header bundle count set
- * to the maximum value (HTC_HOST_MAX_MSG_PER_BUNDLE).
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set
+ * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
*
* in this case the driver must allocate
- * (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE) skb's.
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's.
*/
#define ATH10K_SDIO_MAX_RX_MSGS \
- (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE)
+ (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE)
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index a3a7042fe13a..fa1843a7e0fd 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -14,19 +14,20 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/module.h>
+#include <linux/clk.h>
#include <linux/kernel.h>
-#include "debug.h"
-#include "hif.h"
-#include "htc.h"
-#include "ce.h"
-#include "snoc.h"
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <linux/clk.h>
-#define WCN3990_CE_ATTR_FLAGS 0
+
+#include "ce.h"
+#include "debug.h"
+#include "hif.h"
+#include "htc.h"
+#include "snoc.h"
+
#define ATH10K_SNOC_RX_POST_RETRY_MS 50
#define CE_POLL_PIPE 4
@@ -449,7 +450,7 @@ static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
{
- struct ath10k_pci *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
+ struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
struct ath10k *ar = ar_snoc->ar;
ath10k_snoc_rx_post(ar);
@@ -820,7 +821,7 @@ static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
.write32 = ath10k_snoc_write32,
};
-int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
+static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
int i;
@@ -868,7 +869,7 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
return done;
}
-void ath10k_snoc_init_napi(struct ath10k *ar)
+static void ath10k_snoc_init_napi(struct ath10k *ar)
{
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
ATH10K_NAPI_BUDGET);
@@ -1303,13 +1304,13 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
ar->ce_priv = &ar_snoc->ce;
- ath10k_snoc_resource_init(ar);
+ ret = ath10k_snoc_resource_init(ar);
if (ret) {
ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
goto err_core_destroy;
}
- ath10k_snoc_setup_resource(ar);
+ ret = ath10k_snoc_setup_resource(ar);
if (ret) {
ath10k_warn(ar, "failed to setup resource: %d\n", ret);
goto err_core_destroy;
@@ -1388,25 +1389,7 @@ static struct platform_driver ath10k_snoc_driver = {
.of_match_table = ath10k_snoc_dt_match,
},
};
-
-static int __init ath10k_snoc_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&ath10k_snoc_driver);
- if (ret)
- pr_err("failed to register ath10k snoc driver: %d\n",
- ret);
-
- return ret;
-}
-module_init(ath10k_snoc_init);
-
-static void __exit ath10k_snoc_exit(void)
-{
- platform_driver_unregister(&ath10k_snoc_driver);
-}
-module_exit(ath10k_snoc_exit);
+module_platform_driver(ath10k_snoc_driver);
MODULE_AUTHOR("Qualcomm");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index 05dc98f46ccd..f9e530189d48 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -19,7 +19,6 @@
#include "hw.h"
#include "ce.h"
-#include "pci.h"
struct ath10k_snoc_drv_priv {
enum ath10k_hw_rev hw_rev;
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index af6995de7e00..653b6d013207 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -145,7 +145,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]);
bins = (u8 *)fftr;
- bins += sizeof(*fftr);
+ bins += sizeof(*fftr) + ar->hw_params.spectral_bin_offset;
fft_sample->tsf = __cpu_to_be64(tsf);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 5ecce04005d2..7fd63bbf8e24 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -31,6 +31,8 @@ struct wmi_ops {
struct wmi_scan_ev_arg *arg);
int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg);
+ int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_ch_info_ev_arg *arg);
int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
@@ -262,6 +264,16 @@ ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
}
static inline int
+ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_mgmt_tx_compl)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
+}
+
+static inline int
ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 8c49a26fc571..cdc1e64d52ad 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -618,6 +618,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_TDLS_PEER_EVENTID:
ath10k_wmi_event_tdls_peer(ar, skb);
break;
+ case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
+ ath10k_wmi_event_mgmt_tx_compl(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@@ -659,6 +662,31 @@ static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
return 0;
}
+static int
+ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_mgmt_tx_compl_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
+
+ arg->desc_id = ev->desc_id;
+ arg->status = ev->status;
+ arg->pdev_id = ev->pdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)
@@ -1076,6 +1104,8 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = reg->eeprom_rd;
+ arg->low_5ghz_chan = reg->low_5ghz_chan;
+ arg->high_5ghz_chan = reg->high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = svc_bmap;
arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
@@ -1584,6 +1614,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
+ cfg->wmi_send_separate = __cpu_to_le32(0);
+ cfg->num_ocb_vdevs = __cpu_to_le32(0);
+ cfg->num_ocb_channels = __cpu_to_le32(0);
+ cfg->num_ocb_schedules = __cpu_to_le32(0);
+ cfg->host_capab = __cpu_to_le32(0);
ath10k_wmi_put_host_mem_chunks(ar, chunks);
@@ -1614,10 +1649,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
ie_len = roundup(arg->ie_len, 4);
len = (sizeof(*tlv) + sizeof(*cmd)) +
- (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
- (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
- (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
- (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+ sizeof(*tlv) + chan_len +
+ sizeof(*tlv) + ssid_len +
+ sizeof(*tlv) + bssid_len +
+ sizeof(*tlv) + ie_len;
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
@@ -1785,7 +1820,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
{
struct wmi_tlv_vdev_start_cmd *cmd;
struct wmi_channel *ch;
- struct wmi_p2p_noa_descriptor *noa;
struct wmi_tlv *tlv;
struct sk_buff *skb;
size_t len;
@@ -1843,7 +1877,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = 0;
- noa = (void *)tlv->value;
/* Note: This is a nested TLV containing:
* [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
@@ -2605,6 +2638,30 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
return skb;
}
+static int
+ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
+ dma_addr_t paddr)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ int ret;
+
+ pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
+ if (!pkt_addr)
+ return -ENOMEM;
+
+ pkt_addr->vaddr = skb;
+ pkt_addr->paddr = paddr;
+
+ spin_lock_bh(&ar->data_lock);
+ ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
+ wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
+ return ret;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
dma_addr_t paddr)
@@ -2616,9 +2673,9 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
u32 buf_len = msdu->len;
struct wmi_tlv *tlv;
struct sk_buff *skb;
+ int len, desc_id;
u32 vdev_id;
void *ptr;
- int len;
if (!cb->vif)
return ERR_PTR(-EINVAL);
@@ -2649,13 +2706,17 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
if (!skb)
return ERR_PTR(-ENOMEM);
+ desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
+ if (desc_id < 0)
+ goto err_free_skb;
+
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
- cmd->desc_id = 0;
+ cmd->desc_id = __cpu_to_le32(desc_id);
cmd->chanfreq = 0;
cmd->buf_len = __cpu_to_le32(buf_len);
cmd->frame_len = __cpu_to_le32(msdu->len);
@@ -2672,6 +2733,10 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
memcpy(ptr, msdu->data, buf_len);
return skb;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+ return ERR_PTR(desc_id);
}
static struct sk_buff *
@@ -2700,7 +2765,8 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
static struct sk_buff *
ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
- u32 log_level) {
+ u32 log_level)
+{
struct wmi_tlv_dbglog_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
@@ -3835,6 +3901,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
+ .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 3e1e340cd834..4f0c20c90642 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -320,6 +320,7 @@ enum wmi_tlv_event_id {
WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_TLV_MGMT_TX_COMPLETION_EVENTID,
WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
WMI_TLV_BA_RSP_SSN_EVENTID,
@@ -1573,6 +1574,17 @@ struct wmi_tlv {
u8 value[0];
} __packed;
+struct ath10k_mgmt_tx_pkt_addr {
+ void *vaddr;
+ dma_addr_t paddr;
+};
+
+struct wmi_tlv_mgmt_tx_compl_ev {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+};
+
#define WMI_TLV_MGMT_RX_NUM_RSSI 4
struct wmi_tlv_mgmt_rx_ev {
@@ -1670,6 +1682,11 @@ struct wmi_tlv_resource_config {
__le32 keep_alive_pattern_size;
__le32 max_tdls_concurrent_sleep_sta;
__le32 max_tdls_concurrent_buffer_sta;
+ __le32 wmi_send_separate;
+ __le32 num_ocb_vdevs;
+ __le32 num_ocb_channels;
+ __le32 num_ocb_schedules;
+ __le32 host_capab;
} __packed;
struct wmi_tlv_init_cmd {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index f97ab795cf2e..fd612d2905b0 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1333,7 +1333,7 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
- .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET,
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
@@ -2313,6 +2313,59 @@ static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
return true;
}
+static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
+ u32 status)
+{
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *msdu;
+ int ret;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id);
+ if (!pkt_addr) {
+ ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
+ desc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+ msdu->len, DMA_FROM_DEVICE);
+ info = IEEE80211_SKB_CB(msdu);
+ info->flags |= status;
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ ret = 0;
+
+out:
+ idr_remove(&wmi->mgmt_pending_tx, desc_id);
+ spin_unlock_bh(&ar->data_lock);
+ return ret;
+}
+
+int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
+ int ret;
+
+ ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
+ return ret;
+ }
+
+ wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_id),
+ __le32_to_cpu(arg.status));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
+
+ return 0;
+}
+
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
@@ -2366,7 +2419,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
*/
if (channel >= 1 && channel <= 14) {
status->band = NL80211_BAND_2GHZ;
- } else if (channel >= 36 && channel <= 169) {
+ } else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) {
status->band = NL80211_BAND_5GHZ;
} else {
/* Shouldn't happen unless list of advertised channels to
@@ -4602,10 +4655,6 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
ev = (struct wmi_pdev_tpc_config_event *)skb->data;
- tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
- if (!tpc_stats)
- return;
-
num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
@@ -4614,6 +4663,10 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
return;
}
+ tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+ if (!tpc_stats)
+ return;
+
ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
num_tx_chain);
@@ -5018,13 +5071,11 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
void *vaddr;
pool_size = num_units * round_up(unit_len, 4);
- vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
+ vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
if (!vaddr)
return -ENOMEM;
- memset(vaddr, 0, pool_size);
-
ar->wmi.mem_chunks[idx].vaddr = vaddr;
ar->wmi.mem_chunks[idx].paddr = paddr;
ar->wmi.mem_chunks[idx].len = pool_size;
@@ -9075,6 +9126,11 @@ int ath10k_wmi_attach(struct ath10k *ar)
INIT_WORK(&ar->radar_confirmation_work,
ath10k_radar_confirmation_work);
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ idr_init(&ar->wmi.mgmt_pending_tx);
+ }
+
return 0;
}
@@ -9093,8 +9149,35 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar)
ar->wmi.num_mem_chunks = 0;
}
+static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
+ void *ctx)
+{
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
+ struct ath10k *ar = ctx;
+ struct sk_buff *msdu;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "force cleanup mgmt msdu_id %hu\n", msdu_id);
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+ msdu->len, DMA_FROM_DEVICE);
+ ieee80211_free_txskb(ar->hw, msdu);
+
+ return 0;
+}
+
void ath10k_wmi_detach(struct ath10k *ar)
{
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ spin_lock_bh(&ar->data_lock);
+ idr_for_each(&ar->wmi.mgmt_pending_tx,
+ ath10k_wmi_mgmt_tx_clean_up_pending, ar);
+ idr_destroy(&ar->wmi.mgmt_pending_tx);
+ spin_unlock_bh(&ar->data_lock);
+ }
+
cancel_work_sync(&ar->svc_rdy_work);
if (ar->svc_rdy_skb)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index b48db54e9865..36220258e3c7 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -462,6 +462,7 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS);
SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT);
SVCSTR(WMI_SERVICE_TPC_STATS_FINAL);
+ SVCSTR(WMI_SERVICE_RESET_CHIP);
default:
return NULL;
}
@@ -3934,7 +3935,11 @@ enum wmi_10x_pdev_param {
WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
WMI_10X_PDEV_PARAM_RTS_FIXED_RATE,
- WMI_10X_PDEV_PARAM_CAL_PERIOD
+ WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ WMI_10X_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_10X_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_10X_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_10X_PDEV_PARAM_PDEV_RESET
};
enum wmi_10_4_pdev_param {
@@ -6144,6 +6149,7 @@ enum wmi_peer_param {
WMI_PEER_NSS = 0x5,
WMI_PEER_USE_4ADDR = 0x6,
WMI_PEER_DEBUG = 0xa,
+ WMI_PEER_PHYMODE = 0xd,
WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
};
@@ -6500,6 +6506,15 @@ struct wmi_force_fw_hang_cmd {
__le32 delay_ms;
} __packed;
+enum wmi_pdev_reset_mode_type {
+ WMI_RST_MODE_TX_FLUSH = 1,
+ WMI_RST_MODE_WARM_RESET,
+ WMI_RST_MODE_COLD_RESET,
+ WMI_RST_MODE_WARM_RESET_RESTORE_CAL,
+ WMI_RST_MODE_COLD_RESET_RESTORE_CAL,
+ WMI_RST_MODE_MAX,
+};
+
enum ath10k_dbglog_level {
ATH10K_DBGLOG_LEVEL_VERBOSE = 0,
ATH10K_DBGLOG_LEVEL_INFO = 1,
@@ -6599,6 +6614,12 @@ struct wmi_scan_ev_arg {
__le32 vdev_id;
};
+struct wmi_tlv_mgmt_tx_compl_ev_arg {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+};
+
struct wmi_mgmt_rx_ev_arg {
__le32 channel;
__le32 snr;
@@ -7070,6 +7091,7 @@ int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index f23c851765df..05140d8baa36 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -670,6 +670,7 @@ ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
break;
case NL80211_IFTYPE_ADHOC:
AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM);
+ /* fall through */
default:
/* On non-STA modes timer1 is used as next DMA
* beacon alert (DBA) timer and timer2 as next
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index b1b8bc326830..ae08572c4b58 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -483,7 +483,6 @@ static u32
ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
{
u32 mix, step;
- u32 *rf;
const struct ath5k_gain_opt *go;
const struct ath5k_gain_opt_step *g_step;
const struct ath5k_rf_reg *rf_regs;
@@ -502,7 +501,6 @@ ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
if (ah->ah_rf_banks == NULL)
return 0;
- rf = ah->ah_rf_banks;
ah->ah_gain.g_f_corr = 0;
/* No VGA (Variable Gain Amplifier) override, skip */
@@ -549,13 +547,10 @@ ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
{
const struct ath5k_rf_reg *rf_regs;
u32 step, mix_ovr, level[4];
- u32 *rf;
if (ah->ah_rf_banks == NULL)
return false;
- rf = ah->ah_rf_banks;
-
if (ah->ah_radio == AR5K_RF5111) {
rf_regs = rf_regs_5111;
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index 334dbd834b3a..bde5a10d470c 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -534,7 +534,7 @@ int ath6kl_bmi_init(struct ath6kl *ar)
/* cmd + addr + len + data_size */
ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3);
- ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC);
+ ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_KERNEL);
if (!ar->bmi.cmd_buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 0687697d5e2d..e121187f371f 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1811,20 +1811,20 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
if (vif->target_stats.rx_byte) {
sinfo->rx_bytes = vif->target_stats.rx_byte;
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
sinfo->rx_packets = vif->target_stats.rx_pkt;
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
}
if (vif->target_stats.tx_byte) {
sinfo->tx_bytes = vif->target_stats.tx_byte;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
sinfo->tx_packets = vif->target_stats.tx_pkt;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
}
sinfo->signal = vif->target_stats.cs_rssi;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
rate = vif->target_stats.tx_ucast_rate;
@@ -1857,12 +1857,12 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
if (test_bit(CONNECTED, &vif->flags) &&
test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
vif->nw_type == INFRA_NETWORK) {
- sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM);
sinfo->bss_param.flags = 0;
sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
@@ -3899,16 +3899,19 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
switch (ar->hw.cap) {
case WMI_11AN_CAP:
ht = true;
+ /* fall through */
case WMI_11A_CAP:
band_5gig = true;
break;
case WMI_11GN_CAP:
ht = true;
+ /* fall through */
case WMI_11G_CAP:
band_2gig = true;
break;
case WMI_11AGN_CAP:
ht = true;
+ /* fall through */
case WMI_11AG_CAP:
band_2gig = true;
band_5gig = true;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 546243e11737..434b66829646 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -746,10 +746,8 @@ static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
struct htc_endpoint *ep;
struct htc_packet *packet;
u8 ep_id, *netdata;
- u32 netlen;
netdata = skb->data;
- netlen = skb->len;
htc_hdr = (struct htc_frame_hdr *) netdata;
@@ -855,12 +853,8 @@ static int htc_process_trailer(struct htc_target *target, u8 *buffer,
{
struct htc_credit_report *report;
struct htc_record_hdr *record;
- u8 *record_buf, *orig_buf;
- int orig_len, status;
-
- orig_buf = buffer;
- orig_len = len;
- status = 0;
+ u8 *record_buf;
+ int status = 0;
while (len > 0) {
if (len < sizeof(struct htc_record_hdr)) {
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 808fb30be9ad..0c61dbaa62a4 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -272,7 +272,7 @@ int ath6kl_read_fwlogs(struct ath6kl *ar)
{
struct ath6kl_dbglog_hdr debug_hdr;
struct ath6kl_dbglog_buf debug_buf;
- u32 address, length, dropped, firstbuf, debug_hdr_addr;
+ u32 address, length, firstbuf, debug_hdr_addr;
int ret, loop;
u8 *buf;
@@ -303,7 +303,6 @@ int ath6kl_read_fwlogs(struct ath6kl *ar)
address = TARG_VTOP(ar->target_type,
le32_to_cpu(debug_hdr.dbuf_addr));
firstbuf = address;
- dropped = le32_to_cpu(debug_hdr.dropped);
ret = ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
if (ret)
goto out;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 2195b1b7a8a6..bb50680580f3 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1415,6 +1415,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x19))},
{},
};
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 618d12ed4b40..b22ed499f7ba 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1701,7 +1701,6 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
struct ath6kl_sta *sta;
struct aggr_info_conn *aggr_conn = NULL;
struct rxtid *rxtid;
- struct rxtid_stats *stats;
u16 hold_q_size;
u8 tid, aid;
@@ -1722,7 +1721,6 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
return;
rxtid = &aggr_conn->rx_tid[tid];
- stats = &aggr_conn->stat[tid];
if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 2bd982c3a479..63019c3de034 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -19,6 +19,7 @@
#include <linux/nl80211.h>
#include <linux/platform_device.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include "ath9k.h"
static const struct platform_device_id ath9k_platform_id_table[] = {
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 7922550c2159..ef2dd68d3f77 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -583,12 +583,14 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
case 0x5:
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
+ /* fall through */
case 0x3:
if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
break;
}
+ /* else: fall through */
case 0x1:
case 0x2:
case 0x7:
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 50fcd343c41a..fd9db8ca99d7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -676,10 +676,10 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
return 0;
ah->cal_list_curr = currCal = currCal->calNext;
- if (currCal->calState == CAL_WAITING) {
+ if (currCal->calState == CAL_WAITING)
ath9k_hw_reset_calibration(ah, currCal);
- return 0;
- }
+
+ return 0;
}
/* Do NF cal only at longer intervals */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 61a9b85045d2..713291881208 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -119,6 +119,7 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
aModeRefSel = 2;
if (aModeRefSel)
break;
+ /* else: fall through */
case 1:
default:
aModeRefSel = 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index fe5102ca5010..98c5f524a360 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1800,6 +1800,8 @@ static void ar9003_hw_spectral_scan_config(struct ath_hw *ah,
static void ar9003_hw_spectral_scan_trigger(struct ath_hw *ah)
{
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ENABLE);
/* Activate spectral scan */
REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
AR_PHY_SPECTRAL_SCAN_ACTIVE);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index ef0de4f1312c..21ba20981a80 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -342,7 +342,7 @@ struct ath_chanctx {
struct ath_beacon_config beacon;
struct ath9k_hw_cal_data caldata;
- struct timespec tsf_ts;
+ struct timespec64 tsf_ts;
u64 tsf_val;
u32 last_beacon;
@@ -1021,7 +1021,7 @@ struct ath_softc {
struct ath_offchannel offchannel;
struct ath_chanctx *next_chan;
struct completion go_beacon;
- struct timespec last_event_time;
+ struct timespec64 last_event_time;
#endif
unsigned long driver_data;
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 1b05b5d7a038..fd61ae4782b6 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -233,9 +233,9 @@ static const char *chanctx_state_string(enum ath_chanctx_state state)
static u32 chanctx_event_delta(struct ath_softc *sc)
{
u64 ms;
- struct timespec ts, *old;
+ struct timespec64 ts, *old;
- getrawmonotonic(&ts);
+ ktime_get_raw_ts64(&ts);
old = &sc->last_event_time;
ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000;
@@ -334,7 +334,7 @@ ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx)
static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
{
struct ath_chanctx *prev, *cur;
- struct timespec ts;
+ struct timespec64 ts;
u32 cur_tsf, prev_tsf, beacon_int;
s32 offset;
@@ -346,7 +346,7 @@ static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
if (!prev->switch_after_beacon)
return;
- getrawmonotonic(&ts);
+ ktime_get_raw_ts64(&ts);
cur_tsf = (u32) cur->tsf_val +
ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
@@ -1230,7 +1230,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_chanctx *old_ctx;
- struct timespec ts;
+ struct timespec64 ts;
bool measure_time = false;
bool send_ps = false;
bool queues_stopped = false;
@@ -1260,7 +1260,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_unlock_bh(&sc->chan_lock);
if (sc->next_chan == &sc->offchannel.chan) {
- getrawmonotonic(&ts);
+ ktime_get_raw_ts64(&ts);
measure_time = true;
}
@@ -1277,7 +1277,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_lock_bh(&sc->chan_lock);
if (sc->cur_chan != &sc->offchannel.chan) {
- getrawmonotonic(&sc->cur_chan->tsf_ts);
+ ktime_get_raw_ts64(&sc->cur_chan->tsf_ts);
sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index f685843a2ff3..0a6eb8a8c1ed 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -538,7 +538,7 @@ static int read_file_interrupt(struct seq_file *file, void *data)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
PR_IS("RXLP", rxlp);
PR_IS("RXHP", rxhp);
- PR_IS("WATHDOG", bb_watchdog);
+ PR_IS("WATCHDOG", bb_watchdog);
} else {
PR_IS("RX", rxok);
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index cb0eef13af1c..fb649d85b8fc 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -138,6 +138,7 @@ static void hif_usb_mgmt_cb(struct urb *urb)
{
struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
struct hif_device_usb *hif_dev;
+ unsigned long flags;
bool txok = true;
if (!cmd || !cmd->skb || !cmd->hif_dev)
@@ -158,14 +159,14 @@ static void hif_usb_mgmt_cb(struct urb *urb)
* If the URBs are being flushed, no need to complete
* this packet.
*/
- spin_lock(&hif_dev->tx.tx_lock);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
- spin_unlock(&hif_dev->tx.tx_lock);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
dev_kfree_skb_any(cmd->skb);
kfree(cmd);
return;
}
- spin_unlock(&hif_dev->tx.tx_lock);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
break;
default:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 585736a837ed..799010ed04e0 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1107,25 +1107,26 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+ unsigned long flags;
- spin_lock(&priv->rx.rxbuflock);
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
if (!tmp_buf->in_process) {
rxbuf = tmp_buf;
break;
}
}
- spin_unlock(&priv->rx.rxbuflock);
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
if (rxbuf == NULL) {
ath_dbg(common, ANY, "No free RX buffer\n");
goto err;
}
- spin_lock(&priv->rx.rxbuflock);
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
rxbuf->skb = skb;
rxbuf->in_process = true;
- spin_unlock(&priv->rx.rxbuflock);
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
tasklet_schedule(&priv->rx_tasklet);
return;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index e60bea4604e4..bb319f22761f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -496,7 +496,7 @@ static void ath9k_hw_init_macaddr(struct ath_hw *ah)
ath_err(common, "eeprom contains invalid mac address: %pM\n",
common->macaddr);
- random_ether_addr(common->macaddr);
+ eth_random_addr(common->macaddr);
ath_err(common, "random mac address will be used: %pM\n",
common->macaddr);
@@ -1835,13 +1835,13 @@ fail:
return -EINVAL;
}
-u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur)
+u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur)
{
- struct timespec ts;
+ struct timespec64 ts;
s64 usec;
if (!cur) {
- getrawmonotonic(&ts);
+ ktime_get_raw_ts64(&ts);
cur = &ts;
}
@@ -1859,7 +1859,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveLedState;
u32 saveDefAntenna;
u32 macStaId1;
- struct timespec tsf_ts;
+ struct timespec64 tsf_ts;
u32 tsf_offset;
u64 tsf = 0;
int r;
@@ -1905,7 +1905,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
/* Save TSF before chip reset, a cold reset clears it */
- getrawmonotonic(&tsf_ts);
+ ktime_get_raw_ts64(&tsf_ts);
tsf = ath9k_hw_gettsf64(ah);
saveLedState = REG_READ(ah, AR_CFG_LED) &
@@ -2942,16 +2942,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
struct ieee80211_channel *channel;
int chan_pwr, new_pwr;
+ u16 ctl = NO_CTL;
if (!chan)
return;
+ if (!test)
+ ctl = ath9k_regd_get_ctl(reg, chan);
+
channel = chan->chan;
chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
new_pwr = min_t(int, chan_pwr, reg->power_limit);
- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(reg, chan),
+ ah->eep_ops->set_txpower(ah, chan, ctl,
get_antenna_gain(ah, chan), new_pwr, test);
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 9804a24a2dc0..68956cdc8c9a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1060,7 +1060,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
-u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur);
+u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b6663c80e7dd..1049773378f2 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1865,7 +1865,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
tsf -= le64_to_cpu(avp->tsf_adjust);
- getrawmonotonic(&avp->chanctx->tsf_ts);
+ ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
if (sc->cur_chan == avp->chanctx)
ath9k_hw_settsf64(sc->sc_ah, tsf);
avp->chanctx->tsf_val = tsf;
@@ -1881,7 +1881,7 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- getrawmonotonic(&avp->chanctx->tsf_ts);
+ ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
if (sc->cur_chan == avp->chanctx)
ath9k_hw_reset_tsf(sc->sc_ah);
avp->chanctx->tsf_val = 0;
@@ -1928,6 +1928,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
flush = true;
+ /* fall through */
case IEEE80211_AMPDU_TX_STOP_CONT:
ath9k_ps_wakeup(sc);
ath_tx_aggr_stop(sc, sta, tid);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 645f0fbd9179..92b2dd396436 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -18,7 +18,6 @@
#include <linux/nl80211.h>
#include <linux/pci.h>
-#include <linux/pci-aspm.h>
#include <linux/module.h>
#include "ath9k.h"
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index b0b5579b7560..d1f6710ca63b 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -209,6 +209,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
{
struct wmi *wmi = priv;
struct wmi_cmd_hdr *hdr;
+ unsigned long flags;
u16 cmd_id;
if (unlikely(wmi->stopped))
@@ -218,20 +219,20 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
cmd_id = be16_to_cpu(hdr->command_id);
if (cmd_id & 0x1000) {
- spin_lock(&wmi->wmi_lock);
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
__skb_queue_tail(&wmi->wmi_event_queue, skb);
- spin_unlock(&wmi->wmi_lock);
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
tasklet_schedule(&wmi->wmi_event_tasklet);
return;
}
/* Check if there has been a timeout. */
- spin_lock(&wmi->wmi_lock);
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
- spin_unlock(&wmi->wmi_lock);
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
goto free_skb;
}
- spin_unlock(&wmi->wmi_lock);
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
/* WMI command response */
ath9k_wmi_rsp_callback(wmi, skb);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 7fdb152be0bb..43b6c8508e49 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -62,7 +62,7 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
int txok);
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- int seqno);
+ struct ath_buf *bf);
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
@@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->status.status_driver_data[0];
- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_STATUS_EOSP)) {
ieee80211_tx_status(hw, skb);
return;
}
@@ -295,7 +296,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
}
if (fi->baw_tracked) {
- ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
+ ath_tx_update_baw(sc, tid, bf);
sendbar = true;
}
@@ -311,10 +312,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
}
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- int seqno)
+ struct ath_buf *bf)
{
+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+ u16 seqno = bf->bf_state.seqno;
int index, cindex;
+ if (!fi->baw_tracked)
+ return;
+
index = ATH_BA_INDEX(tid->seq_start, seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
@@ -335,6 +341,9 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
u16 seqno = bf->bf_state.seqno;
int index, cindex;
+ if (fi->baw_tracked)
+ return;
+
index = ATH_BA_INDEX(tid->seq_start, seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
__set_bit(cindex, tid->tx_buf);
@@ -611,7 +620,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* complete the acked-ones/xretried ones; update
* block-ack window
*/
- ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_update_baw(sc, tid, bf);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -641,7 +650,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* run out of tx buf.
*/
if (!tbf) {
- ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_update_baw(sc, tid, bf);
ath_tx_complete_buf(sc, bf, txq,
&bf_head, NULL, ts,
@@ -969,7 +978,8 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_lastbf = bf;
tx_info = IEEE80211_SKB_CB(skb);
- tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+ tx_info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
+ IEEE80211_TX_STATUS_EOSP);
/*
* No aggregation session is running, but there may be frames
@@ -1009,11 +1019,14 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);
- ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_update_baw(sc, tid, bf);
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
continue;
}
+ if (bf_isampdu(bf))
+ ath_tx_addto_baw(sc, tid, bf);
+
return bf;
}
@@ -1071,8 +1084,6 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_next = NULL;
/* link buffers of this frame to the aggregate */
- if (!fi->baw_tracked)
- ath_tx_addto_baw(sc, tid, bf);
bf->bf_state.ndelim = ndelim;
list_add_tail(&bf->list, bf_q);
@@ -1659,6 +1670,22 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
}
}
+
+static void
+ath9k_set_moredata(struct ath_softc *sc, struct ath_buf *bf, bool val)
+{
+ struct ieee80211_hdr *hdr;
+ u16 mask = cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ u16 mask_val = mask * val;
+
+ hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
+ if ((hdr->frame_control & mask) != mask_val) {
+ hdr->frame_control = (hdr->frame_control & ~mask) | mask_val;
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ sizeof(*hdr), DMA_TO_DEVICE);
+ }
+}
+
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
@@ -1689,12 +1716,11 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
if (!bf)
break;
+ ath9k_set_moredata(sc, bf, true);
list_add_tail(&bf->list, &bf_q);
ath_set_rates(tid->an->vif, tid->an->sta, bf);
- if (bf_isampdu(bf)) {
- ath_tx_addto_baw(sc, tid, bf);
+ if (bf_isampdu(bf))
bf->bf_state.bf_type &= ~BUF_AGGR;
- }
if (bf_tail)
bf_tail->bf_next = bf;
@@ -1712,6 +1738,9 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
if (list_empty(&bf_q))
return;
+ if (!more_data)
+ ath9k_set_moredata(sc, bf_tail, false);
+
info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
info->flags |= IEEE80211_TX_STATUS_EOSP;
@@ -2407,7 +2436,6 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
.txq = sc->beacon.cabq
};
struct ath_tx_info info = {};
- struct ieee80211_hdr *hdr;
struct ath_buf *bf_tail = NULL;
struct ath_buf *bf;
LIST_HEAD(bf_q);
@@ -2451,15 +2479,10 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (list_empty(&bf_q))
return;
- bf = list_first_entry(&bf_q, struct ath_buf, list);
- hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
-
- if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) {
- hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
- sizeof(*hdr), DMA_TO_DEVICE);
- }
+ bf = list_last_entry(&bf_q, struct ath_buf, list);
+ ath9k_set_moredata(sc, bf, false);
+ bf = list_first_entry(&bf_q, struct ath_buf, list);
ath_txq_lock(sc, txctl.txq);
ath_tx_fill_desc(sc, bf, txctl.txq, 0);
ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index aeb5e6e806be..79998a3ddb7a 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -493,7 +493,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
+ struct wcn36xx_sta *sta_priv = sta ? wcn36xx_sta_to_priv(sta) : NULL;
int ret = 0;
u8 key[WLAN_MAX_KEY_LEN];
@@ -512,7 +512,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
break;
case WLAN_CIPHER_SUITE_WEP104:
- vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP104;
break;
case WLAN_CIPHER_SUITE_CCMP:
vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
@@ -567,15 +567,19 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_conf->keyidx,
key_conf->keylen,
key);
+
if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
(WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
- sta_priv->is_data_encrypted = true;
- wcn36xx_smd_set_stakey(wcn,
- vif_priv->encrypt_type,
- key_conf->keyidx,
- key_conf->keylen,
- key,
- get_sta_index(vif, sta_priv));
+ list_for_each_entry(sta_priv,
+ &vif_priv->sta_list, list) {
+ sta_priv->is_data_encrypted = true;
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ }
}
}
break;
@@ -984,6 +988,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
mutex_lock(&wcn->conf_mutex);
vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
+ INIT_LIST_HEAD(&vif_priv->sta_list);
list_add(&vif_priv->list, &wcn->vif_list);
wcn36xx_smd_add_sta_self(wcn, vif);
@@ -1005,6 +1010,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
spin_lock_init(&sta_priv->ampdu_lock);
sta_priv->vif = vif_priv;
+ list_add(&sta_priv->list, &vif_priv->sta_list);
+
/*
* For STA mode HW will be configured on BSS_CHANGED_ASSOC because
* at this stage AID is not available yet.
@@ -1032,6 +1039,7 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
mutex_lock(&wcn->conf_mutex);
+ list_del(&sta_priv->list);
wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
sta_priv->vif = NULL;
@@ -1153,8 +1161,6 @@ static const struct ieee80211_ops wcn36xx_ops = {
static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
{
- int ret = 0;
-
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
@@ -1201,7 +1207,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
wiphy_ext_feature_set(wcn->hw->wiphy,
NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- return ret;
+ return 0;
}
static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index b4dadf75d565..00098f24116d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -250,7 +250,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
{
- int ret = 0;
+ int ret;
unsigned long start;
struct wcn36xx_hal_msg_header *hdr =
(struct wcn36xx_hal_msg_header *)wcn->hal_buf;
@@ -446,7 +446,7 @@ static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
int wcn36xx_smd_start(struct wcn36xx *wcn)
{
struct wcn36xx_hal_mac_start_req_msg msg_body, *body;
- int ret = 0;
+ int ret;
int i;
size_t len;
@@ -493,7 +493,7 @@ out:
int wcn36xx_smd_stop(struct wcn36xx *wcn)
{
struct wcn36xx_hal_mac_stop_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
@@ -520,7 +520,7 @@ out:
int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
{
struct wcn36xx_hal_init_scan_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
@@ -549,7 +549,7 @@ out:
int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel)
{
struct wcn36xx_hal_start_scan_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
@@ -579,7 +579,7 @@ out:
int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel)
{
struct wcn36xx_hal_end_scan_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
@@ -610,7 +610,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
enum wcn36xx_hal_sys_mode mode)
{
struct wcn36xx_hal_finish_scan_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
@@ -732,7 +732,7 @@ out:
static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
{
struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
- int ret = 0;
+ int ret;
ret = wcn36xx_smd_rsp_status_check(buf, len);
if (ret)
@@ -747,7 +747,7 @@ int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
struct ieee80211_vif *vif, int ch)
{
struct wcn36xx_hal_switch_channel_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
@@ -860,7 +860,7 @@ int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn,
u8 *channels, size_t channel_count)
{
struct wcn36xx_hal_update_scan_params_req_ex msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
@@ -931,7 +931,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_add_sta_self_req msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
@@ -965,7 +965,7 @@ out:
int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
{
struct wcn36xx_hal_del_sta_self_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
@@ -993,7 +993,7 @@ out:
int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_delete_sta_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
@@ -1040,7 +1040,7 @@ static int wcn36xx_smd_join_rsp(void *buf, size_t len)
int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
{
struct wcn36xx_hal_join_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
@@ -1089,7 +1089,7 @@ int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
enum wcn36xx_hal_link_state state)
{
struct wcn36xx_hal_set_link_state_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
@@ -1215,7 +1215,7 @@ int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
{
struct wcn36xx_hal_config_sta_req_msg msg;
struct wcn36xx_hal_config_sta_params *sta_params;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
@@ -1414,7 +1414,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct wcn36xx_hal_config_bss_params *bss;
struct wcn36xx_hal_config_sta_params *sta_params;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
@@ -1579,7 +1579,7 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
u16 p2p_off)
{
struct wcn36xx_hal_send_beacon_req_msg msg_body;
- int ret = 0, pad, pvm_len;
+ int ret, pad, pvm_len;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
@@ -1653,7 +1653,7 @@ int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
struct sk_buff *skb)
{
struct wcn36xx_hal_send_probe_resp_req_msg msg;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
@@ -1700,7 +1700,7 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_set_sta_key_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
@@ -1708,12 +1708,20 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
msg_body.set_sta_key_params.sta_index = sta_index;
msg_body.set_sta_key_params.enc_type = enc_type;
- msg_body.set_sta_key_params.key[0].id = keyidx;
- msg_body.set_sta_key_params.key[0].unicast = 1;
- msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
- msg_body.set_sta_key_params.key[0].pae_role = 0;
- msg_body.set_sta_key_params.key[0].length = keylen;
- memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+ if (enc_type == WCN36XX_HAL_ED_WEP104 ||
+ enc_type == WCN36XX_HAL_ED_WEP40) {
+ /* Use bss key for wep (static) */
+ msg_body.set_sta_key_params.def_wep_idx = keyidx;
+ msg_body.set_sta_key_params.wep_type = 0;
+ } else {
+ msg_body.set_sta_key_params.key[0].id = keyidx;
+ msg_body.set_sta_key_params.key[0].unicast = 1;
+ msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
+ msg_body.set_sta_key_params.key[0].pae_role = 0;
+ msg_body.set_sta_key_params.key[0].length = keylen;
+ memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+ }
+
msg_body.set_sta_key_params.single_tid_rc = 1;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -1741,7 +1749,7 @@ int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
u8 *key)
{
struct wcn36xx_hal_set_bss_key_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
@@ -1778,7 +1786,7 @@ int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
@@ -1810,7 +1818,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
u8 keyidx)
{
struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
@@ -1839,7 +1847,7 @@ int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_enter_bmps_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
@@ -1869,7 +1877,7 @@ int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_exit_bmps_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
@@ -1895,7 +1903,7 @@ out:
int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
{
struct wcn36xx_hal_set_power_params_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
@@ -1930,7 +1938,7 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
{
struct wcn36xx_hal_keep_alive_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
@@ -1968,7 +1976,7 @@ int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5)
{
struct wcn36xx_hal_dump_cmd_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
@@ -2013,7 +2021,6 @@ void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
- int ret = 0;
if (cap < 0 || cap > 127) {
wcn36xx_warn("error cap idx %d\n", cap);
@@ -2022,8 +2029,8 @@ int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
arr_idx = cap / 32;
bit_idx = cap % 32;
- ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
- return ret;
+
+ return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
}
void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
@@ -2043,7 +2050,7 @@ void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
- int ret = 0, i;
+ int ret, i;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@@ -2079,7 +2086,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_add_ba_session_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
@@ -2117,7 +2124,7 @@ out:
int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
{
struct wcn36xx_hal_add_ba_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
@@ -2145,7 +2152,7 @@ out:
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
{
struct wcn36xx_hal_del_ba_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
@@ -2185,7 +2192,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
@@ -2364,7 +2371,7 @@ int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
{
struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
size_t len;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
@@ -2399,7 +2406,7 @@ int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c
index 1279064a3b71..51a038022c8b 100644
--- a/drivers/net/wireless/ath/wcn36xx/testmode.c
+++ b/drivers/net/wireless/ath/wcn36xx/testmode.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 11e74015c79a..a58f313983b9 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -129,6 +129,8 @@ struct wcn36xx_vif {
u8 self_sta_index;
u8 self_dpu_desc_index;
u8 self_ucast_dpu_sign;
+
+ struct list_head sta_list;
};
/**
@@ -154,6 +156,7 @@ struct wcn36xx_vif {
* |______________|_____________|_______________|
*/
struct wcn36xx_sta {
+ struct list_head list;
struct wcn36xx_vif *vif;
u16 aid;
u16 tid;
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 398edd2a7f2b..d3d61ae459e2 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -9,6 +9,7 @@ wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o
wil6210-y += wmi.o
wil6210-y += interrupt.o
wil6210-y += txrx.o
+wil6210-y += txrx_edma.o
wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += fw.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 78946f28d0c7..f79c337105cb 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -302,14 +302,14 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
sinfo->generation = wil->sinfo_gen;
- sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) |
- BIT(NL80211_STA_INFO_TX_BYTES) |
- BIT(NL80211_STA_INFO_RX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_RX_BITRATE) |
- BIT(NL80211_STA_INFO_TX_BITRATE) |
- BIT(NL80211_STA_INFO_RX_DROP_MISC) |
- BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled = BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_RX_BITRATE) |
+ BIT_ULL(NL80211_STA_INFO_TX_BITRATE) |
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->txrate.flags = RATE_INFO_FLAGS_60G;
sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
@@ -322,7 +322,7 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
sinfo->tx_failed = stats->tx_errors;
if (test_bit(wil_vif_fwconnected, vif->status)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING,
wil->fw_capabilities))
sinfo->signal = reply.evt.rssi;
@@ -689,11 +689,12 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
- /* check we are client side */
+ /* scan is supported on client interfaces and on AP interface */
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_AP:
break;
default:
return -EOPNOTSUPP;
@@ -1089,18 +1090,51 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
int rc;
bool tx_status;
- /* Note, currently we do not support the "wait" parameter, user-space
- * must call remain_on_channel before mgmt_tx or listen on a channel
- * another way (AP/PCP or connected station)
- * in addition we need to check if specified "chan" argument is
- * different from currently "listened" channel and fail if it is.
+ wil_dbg_misc(wil, "mgmt_tx: channel %d offchan %d, wait %d\n",
+ params->chan ? params->chan->hw_value : -1,
+ params->offchan,
+ params->wait);
+
+ /* Note, currently we support the "wait" parameter only on AP mode.
+ * In other modes, user-space must call remain_on_channel before
+ * mgmt_tx or listen on a channel other than active one.
*/
- rc = wmi_mgmt_tx(vif, buf, len);
- tx_status = (rc == 0);
+ if (params->chan && params->chan->hw_value == 0) {
+ wil_err(wil, "invalid channel\n");
+ return -EINVAL;
+ }
+ if (wdev->iftype != NL80211_IFTYPE_AP) {
+ wil_dbg_misc(wil,
+ "send WMI_SW_TX_REQ_CMDID on non-AP interfaces\n");
+ rc = wmi_mgmt_tx(vif, buf, len);
+ goto out;
+ }
+
+ if (!params->chan || params->chan->hw_value == vif->channel) {
+ wil_dbg_misc(wil,
+ "send WMI_SW_TX_REQ_CMDID for on-channel\n");
+ rc = wmi_mgmt_tx(vif, buf, len);
+ goto out;
+ }
+
+ if (params->offchan == 0) {
+ wil_err(wil,
+ "invalid channel params: current %d requested %d, off-channel not allowed\n",
+ vif->channel, params->chan->hw_value);
+ return -EBUSY;
+ }
+
+ /* use the wmi_mgmt_tx_ext only on AP mode and off-channel */
+ rc = wmi_mgmt_tx_ext(vif, buf, len, params->chan->hw_value,
+ params->wait);
+
+out:
+ tx_status = (rc == 0);
cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
tx_status, GFP_KERNEL);
+
return rc;
}
@@ -1726,7 +1760,7 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int authorize;
int cid, i;
- struct vring_tx_data *txdata = NULL;
+ struct wil_ring_tx_data *txdata = NULL;
wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x mid %d\n",
mac, params->sta_flags_mask, params->sta_flags_set,
@@ -1746,20 +1780,20 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
return -ENOLINK;
}
- for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++)
- if (wil->vring2cid_tid[i][0] == cid) {
- txdata = &wil->vring_tx_data[i];
+ for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++)
+ if (wil->ring2cid_tid[i][0] == cid) {
+ txdata = &wil->ring_tx_data[i];
break;
}
if (!txdata) {
- wil_err(wil, "vring data not found\n");
+ wil_err(wil, "ring data not found\n");
return -ENOLINK;
}
authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED);
txdata->dot1x_open = authorize ? 1 : 0;
- wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i,
+ wil_dbg_misc(wil, "cid %d ring %d authorize %d\n", cid, i,
txdata->dot1x_open);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index ebfdff4d328c..51c3330bc316 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -29,7 +29,10 @@
/* Nasty hack. Better have per device instances */
static u32 mem_addr;
static u32 dbg_txdesc_index;
-static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
+static u32 dbg_ring_index; /* 24+ for Rx, 0..23 for Tx */
+static u32 dbg_status_msg_index;
+/* 0..wil->num_rx_status_rings-1 for Rx, wil->tx_sring_idx for Tx */
+static u32 dbg_sring_index;
enum dbg_off_type {
doff_u32 = 0,
@@ -47,20 +50,53 @@ struct dbg_off {
enum dbg_off_type type;
};
-static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
- const char *name, struct vring *vring,
- char _s, char _h)
+static void wil_print_desc_edma(struct seq_file *s, struct wil6210_priv *wil,
+ struct wil_ring *ring,
+ char _s, char _h, int idx)
{
- void __iomem *x = wmi_addr(wil, vring->hwtail);
+ u8 num_of_descs;
+ bool has_skb = false;
+
+ if (ring->is_rx) {
+ struct wil_rx_enhanced_desc *rx_d =
+ (struct wil_rx_enhanced_desc *)
+ &ring->va[idx].rx.enhanced;
+ u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
+
+ has_skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ seq_printf(s, "%c", (has_skb) ? _h : _s);
+ } else {
+ struct wil_tx_enhanced_desc *d =
+ (struct wil_tx_enhanced_desc *)
+ &ring->va[idx].tx.enhanced;
+
+ num_of_descs = (u8)d->mac.d[2];
+ has_skb = ring->ctx[idx].skb;
+ if (num_of_descs >= 1)
+ seq_printf(s, "%c", ring->ctx[idx].skb ? _h : _s);
+ else
+ /* num_of_descs == 0, it's a frag in a list of descs */
+ seq_printf(s, "%c", has_skb ? 'h' : _s);
+ }
+}
+
+static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,
+ const char *name, struct wil_ring *ring,
+ char _s, char _h)
+{
+ void __iomem *x = wmi_addr(wil, ring->hwtail);
u32 v;
- seq_printf(s, "VRING %s = {\n", name);
- seq_printf(s, " pa = %pad\n", &vring->pa);
- seq_printf(s, " va = 0x%p\n", vring->va);
- seq_printf(s, " size = %d\n", vring->size);
- seq_printf(s, " swtail = %d\n", vring->swtail);
- seq_printf(s, " swhead = %d\n", vring->swhead);
- seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
+ seq_printf(s, "RING %s = {\n", name);
+ seq_printf(s, " pa = %pad\n", &ring->pa);
+ seq_printf(s, " va = 0x%p\n", ring->va);
+ seq_printf(s, " size = %d\n", ring->size);
+ if (wil->use_enhanced_dma_hw && ring->is_rx)
+ seq_printf(s, " swtail = %u\n", *ring->edma_rx_swtail.va);
+ else
+ seq_printf(s, " swtail = %d\n", ring->swtail);
+ seq_printf(s, " swhead = %d\n", ring->swhead);
+ seq_printf(s, " hwtail = [0x%08x] -> ", ring->hwtail);
if (x) {
v = readl(x);
seq_printf(s, "0x%08x = %d\n", v, v);
@@ -68,41 +104,45 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
seq_puts(s, "???\n");
}
- if (vring->va && (vring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
+ if (ring->va && (ring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
uint i;
- for (i = 0; i < vring->size; i++) {
- volatile struct vring_tx_desc *d = &vring->va[i].tx;
-
- if ((i % 128) == 0 && (i != 0))
+ for (i = 0; i < ring->size; i++) {
+ if ((i % 128) == 0 && i != 0)
seq_puts(s, "\n");
- seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
- _s : (vring->ctx[i].skb ? _h : 'h'));
+ if (wil->use_enhanced_dma_hw) {
+ wil_print_desc_edma(s, wil, ring, _s, _h, i);
+ } else {
+ volatile struct vring_tx_desc *d =
+ &ring->va[i].tx.legacy;
+ seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
+ _s : (ring->ctx[i].skb ? _h : 'h'));
+ }
}
seq_puts(s, "\n");
}
seq_puts(s, "}\n");
}
-static int wil_vring_debugfs_show(struct seq_file *s, void *data)
+static int wil_ring_debugfs_show(struct seq_file *s, void *data)
{
uint i;
struct wil6210_priv *wil = s->private;
- wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_');
+ wil_print_ring(s, wil, "rx", &wil->ring_rx, 'S', '_');
- for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
- struct vring *vring = &wil->vring_tx[i];
- struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+ for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
+ struct wil_ring *ring = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
- if (vring->va) {
- int cid = wil->vring2cid_tid[i][0];
- int tid = wil->vring2cid_tid[i][1];
- u32 swhead = vring->swhead;
- u32 swtail = vring->swtail;
- int used = (vring->size + swhead - swtail)
- % vring->size;
- int avail = vring->size - used - 1;
+ if (ring->va) {
+ int cid = wil->ring2cid_tid[i][0];
+ int tid = wil->ring2cid_tid[i][1];
+ u32 swhead = ring->swhead;
+ u32 swtail = ring->swtail;
+ int used = (ring->size + swhead - swtail)
+ % ring->size;
+ int avail = ring->size - used - 1;
char name[10];
char sidle[10];
/* performance monitoring */
@@ -137,20 +177,88 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
txdata->dot1x_open ? "+" : "-",
used, avail, sidle);
- wil_print_vring(s, wil, name, vring, '_', 'H');
+ wil_print_ring(s, wil, name, ring, '_', 'H');
+ }
+ }
+
+ return 0;
+}
+
+static int wil_ring_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_ring_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_ring = {
+ .open = wil_ring_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ void __iomem *x = wmi_addr(wil, sring->hwtail);
+ int sring_idx = sring - wil->srings;
+ u32 v;
+
+ seq_printf(s, "Status Ring %s [ %d ] = {\n",
+ sring->is_rx ? "RX" : "TX", sring_idx);
+ seq_printf(s, " pa = %pad\n", &sring->pa);
+ seq_printf(s, " va = 0x%pK\n", sring->va);
+ seq_printf(s, " size = %d\n", sring->size);
+ seq_printf(s, " elem_size = %zu\n", sring->elem_size);
+ seq_printf(s, " swhead = %d\n", sring->swhead);
+ seq_printf(s, " hwtail = [0x%08x] -> ", sring->hwtail);
+ if (x) {
+ v = readl_relaxed(x);
+ seq_printf(s, "0x%08x = %d\n", v, v);
+ } else {
+ seq_puts(s, "???\n");
+ }
+ seq_printf(s, " desc_rdy_pol = %d\n", sring->desc_rdy_pol);
+
+ if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
+ uint i;
+
+ for (i = 0; i < sring->size; i++) {
+ u32 *sdword_0 =
+ (u32 *)(sring->va + (sring->elem_size * i));
+
+ if ((i % 128) == 0 && i != 0)
+ seq_puts(s, "\n");
+ if (i == sring->swhead)
+ seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
+ 'X' : 'x');
+ else
+ seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
+ '1' : '0');
}
+ seq_puts(s, "\n");
}
+ seq_puts(s, "}\n");
+}
+
+static int wil_srings_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ int i = 0;
+
+ for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++)
+ if (wil->srings[i].va)
+ wil_print_sring(s, wil, &wil->srings[i]);
return 0;
}
-static int wil_vring_seq_open(struct inode *inode, struct file *file)
+static int wil_srings_seq_open(struct inode *inode, struct file *file)
{
- return single_open(file, wil_vring_debugfs_show, inode->i_private);
+ return single_open(file, wil_srings_debugfs_show, inode->i_private);
}
-static const struct file_operations fops_vring = {
- .open = wil_vring_seq_open,
+static const struct file_operations fops_srings = {
+ .open = wil_srings_seq_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek,
@@ -162,8 +270,8 @@ static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false);
}
-static void wil_print_ring(struct seq_file *s, const char *prefix,
- void __iomem *off)
+static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
+ void __iomem *off)
{
struct wil6210_priv *wil = s->private;
struct wil6210_mbox_ring r;
@@ -249,9 +357,9 @@ static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
if (ret < 0)
return ret;
- wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
+ wil_print_mbox_ring(s, "tx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, tx));
- wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
+ wil_print_mbox_ring(s, "rx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, rx));
wil_pm_runtime_put(wil);
@@ -719,13 +827,13 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
if ((strcmp(cmd, "add") == 0) ||
(strcmp(cmd, "del_tx") == 0)) {
- struct vring_tx_data *txdata;
+ struct wil_ring_tx_data *txdata;
if (p1 < 0 || p1 >= WIL6210_MAX_TX_RINGS) {
wil_err(wil, "BACK: invalid ring id %d\n", p1);
return -EINVAL;
}
- txdata = &wil->vring_tx_data[p1];
+ txdata = &wil->ring_tx_data[p1];
if (strcmp(cmd, "add") == 0) {
if (rc < 3) {
wil_err(wil, "BACK: add require at least 2 params\n");
@@ -972,54 +1080,93 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- struct vring *vring;
- bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS);
+ struct wil_ring *ring;
+ bool tx;
+ int ring_idx = dbg_ring_index;
+ int txdesc_idx = dbg_txdesc_index;
+ volatile struct vring_tx_desc *d;
+ volatile u32 *u;
+ struct sk_buff *skb;
+
+ if (wil->use_enhanced_dma_hw) {
+ /* RX ring index == 0 */
+ if (ring_idx >= WIL6210_MAX_TX_RINGS) {
+ seq_printf(s, "invalid ring index %d\n", ring_idx);
+ return 0;
+ }
+ tx = ring_idx > 0; /* desc ring 0 is reserved for RX */
+ } else {
+ /* RX ring index == WIL6210_MAX_TX_RINGS */
+ if (ring_idx > WIL6210_MAX_TX_RINGS) {
+ seq_printf(s, "invalid ring index %d\n", ring_idx);
+ return 0;
+ }
+ tx = (ring_idx < WIL6210_MAX_TX_RINGS);
+ }
- vring = tx ? &wil->vring_tx[dbg_vring_index] : &wil->vring_rx;
+ ring = tx ? &wil->ring_tx[ring_idx] : &wil->ring_rx;
- if (!vring->va) {
+ if (!ring->va) {
if (tx)
- seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index);
+ seq_printf(s, "No Tx[%2d] RING\n", ring_idx);
else
- seq_puts(s, "No Rx VRING\n");
+ seq_puts(s, "No Rx RING\n");
return 0;
}
- if (dbg_txdesc_index < vring->size) {
- /* use struct vring_tx_desc for Rx as well,
- * only field used, .dma.length, is the same
- */
- volatile struct vring_tx_desc *d =
- &vring->va[dbg_txdesc_index].tx;
- volatile u32 *u = (volatile u32 *)d;
- struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
-
+ if (txdesc_idx >= ring->size) {
if (tx)
- seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index,
- dbg_txdesc_index);
+ seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
+ ring_idx, txdesc_idx, ring->size);
else
- seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index);
- seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- u[0], u[1], u[2], u[3]);
- seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- u[4], u[5], u[6], u[7]);
- seq_printf(s, " SKB = 0x%p\n", skb);
+ seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
+ txdesc_idx, ring->size);
+ return 0;
+ }
- if (skb) {
- skb_get(skb);
- wil_seq_print_skb(s, skb);
- kfree_skb(skb);
+ /* use struct vring_tx_desc for Rx as well,
+ * only field used, .dma.length, is the same
+ */
+ d = &ring->va[txdesc_idx].tx.legacy;
+ u = (volatile u32 *)d;
+ skb = NULL;
+
+ if (wil->use_enhanced_dma_hw) {
+ if (tx) {
+ skb = ring->ctx[txdesc_idx].skb;
+ } else {
+ struct wil_rx_enhanced_desc *rx_d =
+ (struct wil_rx_enhanced_desc *)
+ &ring->va[txdesc_idx].rx.enhanced;
+ u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
+
+ if (!wil_val_in_range(buff_id, 0,
+ wil->rx_buff_mgmt.size)) {
+ seq_printf(s, "invalid buff_id %d\n", buff_id);
+ return 0;
+ }
+ skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
}
- seq_puts(s, "}\n");
} else {
- if (tx)
- seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
- dbg_vring_index, dbg_txdesc_index,
- vring->size);
- else
- seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
- dbg_txdesc_index, vring->size);
+ skb = ring->ctx[txdesc_idx].skb;
}
+ if (tx)
+ seq_printf(s, "Tx[%2d][%3d] = {\n", ring_idx,
+ txdesc_idx);
+ else
+ seq_printf(s, "Rx[%3d] = {\n", txdesc_idx);
+ seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+ seq_printf(s, " SKB = 0x%p\n", skb);
+
+ if (skb) {
+ skb_get(skb);
+ wil_seq_print_skb(s, skb);
+ kfree_skb(skb);
+ }
+ seq_puts(s, "}\n");
return 0;
}
@@ -1036,6 +1183,115 @@ static const struct file_operations fops_txdesc = {
.llseek = seq_lseek,
};
+/*---------Tx/Rx status message------------*/
+static int wil_status_msg_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ int sring_idx = dbg_sring_index;
+ struct wil_status_ring *sring;
+ bool tx = sring_idx == wil->tx_sring_idx ? 1 : 0;
+ u32 status_msg_idx = dbg_status_msg_index;
+ u32 *u;
+
+ if (sring_idx >= WIL6210_MAX_STATUS_RINGS) {
+ seq_printf(s, "invalid status ring index %d\n", sring_idx);
+ return 0;
+ }
+
+ sring = &wil->srings[sring_idx];
+
+ if (!sring->va) {
+ seq_printf(s, "No %cX status ring\n", tx ? 'T' : 'R');
+ return 0;
+ }
+
+ if (status_msg_idx >= sring->size) {
+ seq_printf(s, "%cxDesc index (%d) >= size (%d)\n",
+ tx ? 'T' : 'R', status_msg_idx, sring->size);
+ return 0;
+ }
+
+ u = sring->va + (sring->elem_size * status_msg_idx);
+
+ seq_printf(s, "%cx[%d][%3d] = {\n",
+ tx ? 'T' : 'R', sring_idx, status_msg_idx);
+
+ seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ if (!tx && !wil->use_compressed_rx_status)
+ seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+
+ seq_puts(s, "}\n");
+
+ return 0;
+}
+
+static int wil_status_msg_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_status_msg_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_status_msg = {
+ .open = wil_status_msg_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh)
+{
+ struct wil_rx_buff *it;
+ int i = 0;
+
+ list_for_each_entry(it, lh, list) {
+ if ((i % 16) == 0 && i != 0)
+ seq_puts(s, "\n ");
+ seq_printf(s, "[%4d] ", it->id);
+ i++;
+ }
+ seq_printf(s, "\nNumber of buffers: %u\n", i);
+
+ return i;
+}
+
+static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil_rx_buff_mgmt *rbm = &wil->rx_buff_mgmt;
+ int num_active;
+ int num_free;
+
+ seq_printf(s, " size = %zu\n", rbm->size);
+ seq_printf(s, " free_list_empty_cnt = %lu\n",
+ rbm->free_list_empty_cnt);
+
+ /* Print active list */
+ seq_puts(s, " Active list:\n");
+ num_active = wil_print_rx_buff(s, &rbm->active);
+ seq_puts(s, "\n Free list:\n");
+ num_free = wil_print_rx_buff(s, &rbm->free);
+
+ seq_printf(s, " Total number of buffers: %u\n",
+ num_active + num_free);
+
+ return 0;
+}
+
+static int wil_rx_buff_mgmt_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_rx_buff_mgmt_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_rx_buff_mgmt = {
+ .open = wil_rx_buff_mgmt_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
/*---------beamforming------------*/
static char *wil_bfstatus_str(u32 status)
{
@@ -1132,7 +1388,7 @@ static const struct file_operations fops_bf = {
};
/*---------temp------------*/
-static void print_temp(struct seq_file *s, const char *prefix, u32 t)
+static void print_temp(struct seq_file *s, const char *prefix, s32 t)
{
switch (t) {
case 0:
@@ -1140,7 +1396,8 @@ static void print_temp(struct seq_file *s, const char *prefix, u32 t)
seq_printf(s, "%s N/A\n", prefix);
break;
default:
- seq_printf(s, "%s %d.%03d\n", prefix, t / 1000, t % 1000);
+ seq_printf(s, "%s %s%d.%03d\n", prefix, (t < 0 ? "-" : ""),
+ abs(t / 1000), abs(t % 1000));
break;
}
}
@@ -1148,7 +1405,7 @@ static void print_temp(struct seq_file *s, const char *prefix, u32 t)
static int wil_temp_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- u32 t_m, t_r;
+ s32 t_m, t_r;
int rc = wmi_get_temperature(wil, &t_m, &t_r);
if (rc) {
@@ -1384,6 +1641,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
int i;
u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
+ unsigned long long drop_dup_mcast = r->drop_dup_mcast;
seq_printf(s, "([%2d]) 0x%03x [", r->buf_size, r->head_seq_num);
for (i = 0; i < r->buf_size; i++) {
@@ -1393,9 +1651,9 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
}
seq_printf(s,
- "] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n",
- r->total, drop_dup + drop_old, drop_dup, drop_old,
- r->ssn_last_drop);
+ "] total %llu drop %llu (dup %llu + old %llu + dup mcast %llu) last 0x%03x\n",
+ r->total, drop_dup + drop_old + drop_dup_mcast, drop_dup,
+ drop_old, drop_dup_mcast, r->ssn_last_drop);
}
static void wil_print_rxtid_crypto(struct seq_file *s, int tid,
@@ -1477,6 +1735,12 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
p->stats.rx_short_frame,
p->stats.rx_large_frame,
p->stats.rx_replay);
+ seq_printf(s,
+ "mic error %lu, key error %lu, amsdu error %lu, csum error %lu\n",
+ p->stats.rx_mic_error,
+ p->stats.rx_key_error,
+ p->stats.rx_amsdu_error,
+ p->stats.rx_csum_err);
seq_puts(s, "Rx/MCS:");
for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
@@ -1538,6 +1802,343 @@ static const struct file_operations fops_mids = {
.llseek = seq_lseek,
};
+static int wil_tx_latency_debugfs_show(struct seq_file *s, void *data)
+__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
+{
+ struct wil6210_priv *wil = s->private;
+ int i, bin;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ struct wil_sta_info *p = &wil->sta[i];
+ char *status = "unknown";
+ u8 aid = 0;
+ u8 mid;
+
+ if (!p->tx_latency_bins)
+ continue;
+
+ switch (p->status) {
+ case wil_sta_unused:
+ status = "unused ";
+ break;
+ case wil_sta_conn_pending:
+ status = "pending ";
+ break;
+ case wil_sta_connected:
+ status = "connected";
+ aid = p->aid;
+ break;
+ }
+ mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
+ seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, p->addr, status,
+ mid, aid);
+
+ if (p->status == wil_sta_connected) {
+ u64 num_packets = 0;
+ u64 tx_latency_avg = p->stats.tx_latency_total_us;
+
+ seq_puts(s, "Tx/Latency bin:");
+ for (bin = 0; bin < WIL_NUM_LATENCY_BINS; bin++) {
+ seq_printf(s, " %lld",
+ p->tx_latency_bins[bin]);
+ num_packets += p->tx_latency_bins[bin];
+ }
+ seq_puts(s, "\n");
+ if (!num_packets)
+ continue;
+ do_div(tx_latency_avg, num_packets);
+ seq_printf(s, "Tx/Latency min/avg/max (us): %d/%lld/%d",
+ p->stats.tx_latency_min_us,
+ tx_latency_avg,
+ p->stats.tx_latency_max_us);
+
+ seq_puts(s, "\n");
+ }
+ }
+
+ return 0;
+}
+
+static int wil_tx_latency_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_tx_latency_debugfs_show,
+ inode->i_private);
+}
+
+static ssize_t wil_tx_latency_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int val, rc, i;
+ bool enable;
+
+ rc = kstrtoint_from_user(buf, len, 0, &val);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+ if (val == 1)
+ /* default resolution */
+ val = 500;
+ if (val && (val < 50 || val > 1000)) {
+ wil_err(wil, "Invalid resolution %d\n", val);
+ return -EINVAL;
+ }
+
+ enable = !!val;
+ if (wil->tx_latency == enable)
+ return len;
+
+ wil_info(wil, "%s TX latency measurements (resolution %dusec)\n",
+ enable ? "Enabling" : "Disabling", val);
+
+ if (enable) {
+ size_t sz = sizeof(u64) * WIL_NUM_LATENCY_BINS;
+
+ wil->tx_latency_res = val;
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ struct wil_sta_info *sta = &wil->sta[i];
+
+ kfree(sta->tx_latency_bins);
+ sta->tx_latency_bins = kzalloc(sz, GFP_KERNEL);
+ if (!sta->tx_latency_bins)
+ return -ENOMEM;
+ sta->stats.tx_latency_min_us = U32_MAX;
+ sta->stats.tx_latency_max_us = 0;
+ sta->stats.tx_latency_total_us = 0;
+ }
+ }
+ wil->tx_latency = enable;
+
+ return len;
+}
+
+static const struct file_operations fops_tx_latency = {
+ .open = wil_tx_latency_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_tx_latency_write,
+ .llseek = seq_lseek,
+};
+
+static void wil_link_stats_print_basic(struct wil6210_vif *vif,
+ struct seq_file *s,
+ struct wmi_link_stats_basic *basic)
+{
+ char per[5] = "?";
+
+ if (basic->per_average != 0xff)
+ snprintf(per, sizeof(per), "%d%%", basic->per_average);
+
+ seq_printf(s, "CID %d {\n"
+ "\tTxMCS %d TxTpt %d\n"
+ "\tGoodput(rx:tx) %d:%d\n"
+ "\tRxBcastFrames %d\n"
+ "\tRSSI %d SQI %d SNR %d PER %s\n"
+ "\tRx RFC %d Ant num %d\n"
+ "\tSectors(rx:tx) my %d:%d peer %d:%d\n"
+ "}\n",
+ basic->cid,
+ basic->bf_mcs, le32_to_cpu(basic->tx_tpt),
+ le32_to_cpu(basic->rx_goodput),
+ le32_to_cpu(basic->tx_goodput),
+ le32_to_cpu(basic->rx_bcast_frames),
+ basic->rssi, basic->sqi, basic->snr, per,
+ basic->selected_rfc, basic->rx_effective_ant_num,
+ basic->my_rx_sector, basic->my_tx_sector,
+ basic->other_rx_sector, basic->other_tx_sector);
+}
+
+static void wil_link_stats_print_global(struct wil6210_priv *wil,
+ struct seq_file *s,
+ struct wmi_link_stats_global *global)
+{
+ seq_printf(s, "Frames(rx:tx) %d:%d\n"
+ "BA Frames(rx:tx) %d:%d\n"
+ "Beacons %d\n"
+ "Rx Errors (MIC:CRC) %d:%d\n"
+ "Tx Errors (no ack) %d\n",
+ le32_to_cpu(global->rx_frames),
+ le32_to_cpu(global->tx_frames),
+ le32_to_cpu(global->rx_ba_frames),
+ le32_to_cpu(global->tx_ba_frames),
+ le32_to_cpu(global->tx_beacons),
+ le32_to_cpu(global->rx_mic_errors),
+ le32_to_cpu(global->rx_crc_errors),
+ le32_to_cpu(global->tx_fail_no_ack));
+}
+
+static void wil_link_stats_debugfs_show_vif(struct wil6210_vif *vif,
+ struct seq_file *s)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_link_stats_basic *stats;
+ int i;
+
+ if (!vif->fw_stats_ready) {
+ seq_puts(s, "no statistics\n");
+ return;
+ }
+
+ seq_printf(s, "TSF %lld\n", vif->fw_stats_tsf);
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ if (wil->sta[i].status == wil_sta_unused)
+ continue;
+ if (wil->sta[i].mid != vif->mid)
+ continue;
+
+ stats = &wil->sta[i].fw_stats_basic;
+ wil_link_stats_print_basic(vif, s, stats);
+ }
+}
+
+static int wil_link_stats_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil6210_vif *vif;
+ int i, rc;
+
+ rc = mutex_lock_interruptible(&wil->vif_mutex);
+ if (rc)
+ return rc;
+
+ /* iterate over all MIDs and show per-cid statistics. Then show the
+ * global statistics
+ */
+ for (i = 0; i < wil->max_vifs; i++) {
+ vif = wil->vifs[i];
+
+ seq_printf(s, "MID %d ", i);
+ if (!vif) {
+ seq_puts(s, "unused\n");
+ continue;
+ }
+
+ wil_link_stats_debugfs_show_vif(vif, s);
+ }
+
+ mutex_unlock(&wil->vif_mutex);
+
+ return 0;
+}
+
+static int wil_link_stats_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_link_stats_debugfs_show, inode->i_private);
+}
+
+static ssize_t wil_link_stats_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int cid, interval, rc, i;
+ struct wil6210_vif *vif;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ /* specify cid (use -1 for all cids) and snapshot interval in ms */
+ rc = sscanf(kbuf, "%d %d", &cid, &interval);
+ kfree(kbuf);
+ if (rc < 0)
+ return rc;
+ if (rc < 2 || interval < 0)
+ return -EINVAL;
+
+ wil_info(wil, "request link statistics, cid %d interval %d\n",
+ cid, interval);
+
+ rc = mutex_lock_interruptible(&wil->vif_mutex);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < wil->max_vifs; i++) {
+ vif = wil->vifs[i];
+ if (!vif)
+ continue;
+
+ rc = wmi_link_stats_cfg(vif, WMI_LINK_STATS_TYPE_BASIC,
+ (cid == -1 ? 0xff : cid), interval);
+ if (rc)
+ wil_err(wil, "link statistics failed for mid %d\n", i);
+ }
+ mutex_unlock(&wil->vif_mutex);
+
+ return len;
+}
+
+static const struct file_operations fops_link_stats = {
+ .open = wil_link_stats_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_link_stats_write,
+ .llseek = seq_lseek,
+};
+
+static int
+wil_link_stats_global_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ if (!wil->fw_stats_global.ready)
+ return 0;
+
+ seq_printf(s, "TSF %lld\n", wil->fw_stats_global.tsf);
+ wil_link_stats_print_global(wil, s, &wil->fw_stats_global.stats);
+
+ return 0;
+}
+
+static int
+wil_link_stats_global_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_link_stats_global_debugfs_show,
+ inode->i_private);
+}
+
+static ssize_t
+wil_link_stats_global_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int interval, rc;
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+
+ /* specify snapshot interval in ms */
+ rc = kstrtoint_from_user(buf, len, 0, &interval);
+ if (rc || interval < 0) {
+ wil_err(wil, "Invalid argument\n");
+ return -EINVAL;
+ }
+
+ wil_info(wil, "request global link stats, interval %d\n", interval);
+
+ rc = wmi_link_stats_cfg(vif, WMI_LINK_STATS_TYPE_GLOBAL, 0, interval);
+ if (rc)
+ wil_err(wil, "global link stats failed %d\n", rc);
+
+ return rc ? rc : len;
+}
+
+static const struct file_operations fops_link_stats_global = {
+ .open = wil_link_stats_global_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_link_stats_global_write,
+ .llseek = seq_lseek,
+};
+
static ssize_t wil_read_file_led_cfg(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1760,6 +2361,60 @@ static const struct file_operations fops_suspend_stats = {
.open = simple_open,
};
+/*---------compressed_rx_status---------*/
+static ssize_t wil_compressed_rx_status_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int compressed_rx_status;
+ int rc;
+
+ rc = kstrtoint_from_user(buf, len, 0, &compressed_rx_status);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+
+ if (wil_has_active_ifaces(wil, true, false)) {
+ wil_err(wil, "cannot change edma config after iface is up\n");
+ return -EPERM;
+ }
+
+ wil_info(wil, "%sable compressed_rx_status\n",
+ compressed_rx_status ? "En" : "Dis");
+
+ wil->use_compressed_rx_status = compressed_rx_status;
+
+ return len;
+}
+
+static int
+wil_compressed_rx_status_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ seq_printf(s, "%d\n", wil->use_compressed_rx_status);
+
+ return 0;
+}
+
+static int
+wil_compressed_rx_status_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_compressed_rx_status_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_compressed_rx_status = {
+ .open = wil_compressed_rx_status_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_compressed_rx_status_write,
+ .llseek = seq_lseek,
+};
+
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@@ -1790,7 +2445,7 @@ static const struct {
const struct file_operations *fops;
} dbg_files[] = {
{"mbox", 0444, &fops_mbox},
- {"vrings", 0444, &fops_vring},
+ {"rings", 0444, &fops_ring},
{"stations", 0444, &fops_sta},
{"mids", 0444, &fops_mids},
{"desc", 0444, &fops_txdesc},
@@ -1813,6 +2468,13 @@ static const struct {
{"fw_capabilities", 0444, &fops_fw_capabilities},
{"fw_version", 0444, &fops_fw_version},
{"suspend_stats", 0644, &fops_suspend_stats},
+ {"compressed_rx_status", 0644, &fops_compressed_rx_status},
+ {"srings", 0444, &fops_srings},
+ {"status_msg", 0444, &fops_status_msg},
+ {"rx_buff_mgmt", 0444, &fops_rx_buff_mgmt},
+ {"tx_latency", 0644, &fops_tx_latency},
+ {"link_stats", 0644, &fops_link_stats},
+ {"link_stats_global", 0644, &fops_link_stats_global},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1858,7 +2520,12 @@ static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(chip_revision, 0444, doff_u8),
WIL_FIELD(abft_len, 0644, doff_u8),
WIL_FIELD(wakeup_trigger, 0644, doff_u8),
- WIL_FIELD(vring_idle_trsh, 0644, doff_u32),
+ WIL_FIELD(ring_idle_trsh, 0644, doff_u32),
+ WIL_FIELD(num_rx_status_rings, 0644, doff_u8),
+ WIL_FIELD(rx_status_ring_order, 0644, doff_u32),
+ WIL_FIELD(tx_status_ring_order, 0644, doff_u32),
+ WIL_FIELD(rx_buff_id_count, 0644, doff_u32),
+ WIL_FIELD(amsdu_en, 0644, doff_u8),
{},
};
@@ -1872,9 +2539,11 @@ static const struct dbg_off dbg_wil_regs[] = {
/* static parameters */
static const struct dbg_off dbg_statics[] = {
{"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32},
- {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32},
+ {"ring_index", 0644, (ulong)&dbg_ring_index, doff_u32},
{"mem_addr", 0644, (ulong)&mem_addr, doff_u32},
{"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
+ {"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32},
+ {"sring_index", 0644, (ulong)&dbg_sring_index, doff_u32},
{},
};
@@ -1922,10 +2591,14 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
void wil6210_debugfs_remove(struct wil6210_priv *wil)
{
+ int i;
+
debugfs_remove_recursive(wil->debug);
wil->debug = NULL;
kfree(wil->dbg_data.data_arr);
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++)
+ kfree(wil->sta[i].tx_latency_bins);
/* free pmc memory without sending command to fw, as it will
* be reset on the way down anyway
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index e7ff41e623d2..a04c87ffd37b 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -101,7 +101,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
if (ret < 0)
return ret;
- wil_configure_interrupt_moderation(wil);
+ wil->txrx_ops.configure_interrupt_moderation(wil);
wil_pm_runtime_put(wil);
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 540fc20984d8..3e2bbbceca06 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -22,6 +23,8 @@
MODULE_FIRMWARE(WIL_FW_NAME_DEFAULT);
MODULE_FIRMWARE(WIL_FW_NAME_SPARROW_PLUS);
MODULE_FIRMWARE(WIL_BOARD_FILE_NAME);
+MODULE_FIRMWARE(WIL_FW_NAME_TALYN);
+MODULE_FIRMWARE(WIL_BRD_NAME_TALYN);
static
void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 718161b829c2..388b3d4717ca 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -145,7 +145,7 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
capabilities);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
memcpy(wil->fw_capabilities, rec->capabilities,
- min(sizeof(wil->fw_capabilities), capa_size));
+ min_t(size_t, sizeof(wil->fw_capabilities), capa_size));
wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1,
rec->capabilities, capa_size, false);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 84e9840c1752..5d287a8e1b45 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -44,6 +44,8 @@
(~(BIT_DMA_EP_RX_ICR_RX_HTRSH)))
#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
+#define WIL6210_IMC_TX_EDMA BIT_TX_STATUS_IRQ
+#define WIL6210_IMC_RX_EDMA BIT_RX_STATUS_IRQ
#define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \
ISR_MISC_MBOX_EVT | \
ISR_MISC_FW_ERROR)
@@ -87,12 +89,24 @@ static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
WIL6210_IRQ_DISABLE);
}
+static void wil6210_mask_irq_tx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
+}
+
static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
{
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
+static void wil6210_mask_irq_rx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
+}
+
static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
{
wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
@@ -125,6 +139,12 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
WIL6210_IMC_TX);
}
+void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_TX_EDMA);
+}
+
void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
{
bool unmask_rx_htrsh = atomic_read(&wil->connected_vifs) > 0;
@@ -133,6 +153,12 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH);
}
+void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_RX_EDMA);
+}
+
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
{
wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
@@ -164,7 +190,9 @@ void wil_mask_irq(struct wil6210_priv *wil)
wil_dbg_irq(wil, "mask_irq\n");
wil6210_mask_irq_tx(wil);
+ wil6210_mask_irq_tx_edma(wil);
wil6210_mask_irq_rx(wil);
+ wil6210_mask_irq_rx_edma(wil);
wil6210_mask_irq_misc(wil, true);
wil6210_mask_irq_pseudo(wil);
}
@@ -179,13 +207,43 @@ void wil_unmask_irq(struct wil6210_priv *wil)
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_MISC_VALUE);
+ wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
wil6210_unmask_irq_pseudo(wil);
- wil6210_unmask_irq_tx(wil);
- wil6210_unmask_irq_rx(wil);
+ if (wil->use_enhanced_dma_hw) {
+ wil6210_unmask_irq_tx_edma(wil);
+ wil6210_unmask_irq_rx_edma(wil);
+ } else {
+ wil6210_unmask_irq_tx(wil);
+ wil6210_unmask_irq_rx(wil);
+ }
wil6210_unmask_irq_misc(wil, true);
}
+void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil)
+{
+ u32 moderation;
+
+ wil_s(wil, RGF_INT_GEN_IDLE_TIME_LIMIT, WIL_EDMA_IDLE_TIME_LIMIT_USEC);
+
+ wil_s(wil, RGF_INT_GEN_TIME_UNIT_LIMIT, WIL_EDMA_TIME_UNIT_CLK_CYCLES);
+
+ /* Update RX and TX moderation */
+ moderation = wil->rx_max_burst_duration |
+ (WIL_EDMA_AGG_WATERMARK << WIL_EDMA_AGG_WATERMARK_POS);
+ wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_0, moderation);
+ wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_1, moderation);
+
+ /* Treat special events as regular
+ * (set bit 0 to 0x1 and clear bits 1-8)
+ */
+ wil_c(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1FE);
+ wil_s(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1);
+}
+
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
@@ -294,6 +352,97 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
return IRQ_HANDLED;
}
+static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ bool need_unmask = true;
+
+ trace_wil6210_irq_rx(isr);
+ wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err(wil, "spurious IRQ: RX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_rx_edma(wil);
+
+ if (likely(isr & BIT_RX_STATUS_IRQ)) {
+ wil_dbg_irq(wil, "RX status ring\n");
+ isr &= ~BIT_RX_STATUS_IRQ;
+ if (likely(test_bit(wil_status_fwready, wil->status))) {
+ if (likely(test_bit(wil_status_napi_en, wil->status))) {
+ wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
+ need_unmask = false;
+ napi_schedule(&wil->napi_rx);
+ } else {
+ wil_err(wil,
+ "Got Rx interrupt while stopping interface\n");
+ }
+ } else {
+ wil_err(wil, "Got Rx interrupt while in reset\n");
+ }
+ }
+
+ if (unlikely(isr))
+ wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+ /* Rx IRQ will be enabled when NAPI processing finished */
+
+ atomic_inc(&wil->isr_count_rx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_rx_edma(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ bool need_unmask = true;
+
+ trace_wil6210_irq_tx(isr);
+ wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err(wil, "spurious IRQ: TX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_tx_edma(wil);
+
+ if (likely(isr & BIT_TX_STATUS_IRQ)) {
+ wil_dbg_irq(wil, "TX status ring\n");
+ isr &= ~BIT_TX_STATUS_IRQ;
+ if (likely(test_bit(wil_status_fwready, wil->status))) {
+ wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
+ need_unmask = false;
+ napi_schedule(&wil->napi_tx);
+ } else {
+ wil_err(wil, "Got Tx status ring IRQ while in reset\n");
+ }
+ }
+
+ if (unlikely(isr))
+ wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
+
+ /* Tx IRQ will be enabled when NAPI processing finished */
+
+ atomic_inc(&wil->isr_count_tx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_tx_edma(wil);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -476,6 +625,15 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
wil6210_unmask_irq_misc(wil, false);
+ /* in non-triple MSI case, this is done inside wil6210_thread_irq
+ * because it has to be done after unmasking the pseudo.
+ */
+ if (wil->n_msi == 3 && wil->suspend_resp_rcvd) {
+ wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
+ wil->suspend_resp_comp = true;
+ wake_up_interruptible(&wil->wq);
+ }
+
return IRQ_HANDLED;
}
@@ -510,30 +668,53 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
*/
static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
{
+ u32 icm_rx, icr_rx, imv_rx;
+ u32 icm_tx, icr_tx, imv_tx;
+ u32 icm_misc, icr_misc, imv_misc;
+
if (!test_bit(wil_status_irqen, wil->status)) {
- u32 icm_rx = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, ICM));
- u32 icr_rx = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, ICR));
- u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
+ if (wil->use_enhanced_dma_hw) {
+ icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_rx = wil_r(wil, RGF_INT_GEN_RX_ICR +
offsetof(struct RGF_ICR, IMV));
- u32 icm_tx = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, ICM));
- u32 icr_tx = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, ICR));
- u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+ icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_tx = wil_r(wil, RGF_INT_GEN_TX_ICR +
+ offsetof(struct RGF_ICR, IMV));
+ } else {
+ icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
offsetof(struct RGF_ICR, IMV));
- u32 icm_misc = wil_ioread32_and_clear(wil->csr +
+ icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+ offsetof(struct RGF_ICR, IMV));
+ }
+ icm_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICM));
- u32 icr_misc = wil_ioread32_and_clear(wil->csr +
+ icr_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
- u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
+ imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
offsetof(struct RGF_ICR, IMV));
/* HALP interrupt can be unmasked when misc interrupts are
@@ -592,11 +773,11 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
* voting for wake thread - need at least 1 vote
*/
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
- (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
+ (wil->txrx_ops.irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
rc = IRQ_WAKE_THREAD;
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
- (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
+ (wil->txrx_ops.irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
rc = IRQ_WAKE_THREAD;
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
@@ -610,6 +791,40 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
return rc;
}
+static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+
+ /* IRQ's are in the following order:
+ * - Tx
+ * - Rx
+ * - Misc
+ */
+ rc = request_irq(irq, wil->txrx_ops.irq_tx, IRQF_SHARED,
+ WIL_NAME "_tx", wil);
+ if (rc)
+ return rc;
+
+ rc = request_irq(irq + 1, wil->txrx_ops.irq_rx, IRQF_SHARED,
+ WIL_NAME "_rx", wil);
+ if (rc)
+ goto free0;
+
+ rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
+ wil6210_irq_misc_thread,
+ IRQF_SHARED, WIL_NAME "_misc", wil);
+ if (rc)
+ goto free1;
+
+ return 0;
+free1:
+ free_irq(irq + 1, wil);
+free0:
+ free_irq(irq, wil);
+
+ return rc;
+}
+
/* can't use wil_ioread32_and_clear because ICC value is not set yet */
static inline void wil_clear32(void __iomem *addr)
{
@@ -624,6 +839,10 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
wmb(); /* make sure write completed */
@@ -646,16 +865,28 @@ void wil6210_clear_halp(struct wil6210_priv *wil)
wil6210_unmask_halp(wil);
}
-int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
+int wil6210_init_irq(struct wil6210_priv *wil, int irq)
{
int rc;
- wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx");
+ wil_dbg_misc(wil, "init_irq: %s, n_msi=%d\n",
+ wil->n_msi ? "MSI" : "INTx", wil->n_msi);
- rc = request_threaded_irq(irq, wil6210_hardirq,
- wil6210_thread_irq,
- use_msi ? 0 : IRQF_SHARED,
- WIL_NAME, wil);
+ if (wil->use_enhanced_dma_hw) {
+ wil->txrx_ops.irq_tx = wil6210_irq_tx_edma;
+ wil->txrx_ops.irq_rx = wil6210_irq_rx_edma;
+ } else {
+ wil->txrx_ops.irq_tx = wil6210_irq_tx;
+ wil->txrx_ops.irq_rx = wil6210_irq_rx;
+ }
+
+ if (wil->n_msi == 3)
+ rc = wil6210_request_3msi(wil, irq);
+ else
+ rc = request_threaded_irq(irq, wil6210_hardirq,
+ wil6210_thread_irq,
+ wil->n_msi ? 0 : IRQF_SHARED,
+ WIL_NAME, wil);
return rc;
}
@@ -665,4 +896,8 @@ void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
wil_mask_irq(wil);
free_irq(irq, wil);
+ if (wil->n_msi == 3) {
+ free_irq(irq + 1, wil);
+ free_irq(irq + 2, wil);
+ }
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index e7006c2428a0..7debed6bec06 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -21,11 +21,14 @@
#include "wil6210.h"
#include "txrx.h"
+#include "txrx_edma.h"
#include "wmi.h"
#include "boot_loader.h"
#define WAIT_FOR_HALP_VOTE_MS 100
#define WAIT_FOR_SCAN_ABORT_MS 1000
+#define WIL_DEFAULT_NUM_RX_STATUS_RINGS 1
+#define WIL_BOARD_FILE_MAX_NAMELEN 128
bool debug_fw; /* = false; */
module_param(debug_fw, bool, 0444);
@@ -110,9 +113,29 @@ MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444);
MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
-#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
+enum {
+ WIL_BOOT_ERR,
+ WIL_BOOT_VANILLA,
+ WIL_BOOT_PRODUCTION,
+ WIL_BOOT_DEVELOPMENT,
+};
+
+enum {
+ WIL_SIG_STATUS_VANILLA = 0x0,
+ WIL_SIG_STATUS_DEVELOPMENT = 0x1,
+ WIL_SIG_STATUS_PRODUCTION = 0x2,
+ WIL_SIG_STATUS_CORRUPTED_PRODUCTION = 0x3,
+};
+
+#define RST_DELAY (20) /* msec, for loop in @wil_wait_device_ready */
#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */
+#define PMU_READY_DELAY_MS (4) /* ms, for sleep in @wil_wait_device_ready */
+
+#define OTP_HW_DELAY (200) /* usec, loop in @wil_wait_device_ready_talyn_mb */
+/* round up to be above 2 ms total */
+#define OTP_HW_COUNT (1 + 2000 / OTP_HW_DELAY)
+
/*
* Due to a hardware issue,
* one has to read/write to/from NIC in 32-bit chunks;
@@ -160,6 +183,37 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
}
}
+static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
+{
+ struct wil_ring *ring = &wil->ring_tx[id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
+
+ lockdep_assert_held(&wil->mutex);
+
+ if (!ring->va)
+ return;
+
+ wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
+
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
+ txdata->mid = U8_MAX;
+ txdata->enabled = 0; /* no Tx can be in progress or start anew */
+ spin_unlock_bh(&txdata->lock);
+ /* napi_synchronize waits for completion of the current NAPI but will
+ * not prevent the next NAPI run.
+ * Add a memory barrier to guarantee that txdata->enabled is zeroed
+ * before napi_synchronize so that the next scheduled NAPI will not
+ * handle this vring
+ */
+ wmb();
+ /* make sure NAPI won't touch this vring */
+ if (test_bit(wil_status_napi_en, wil->status))
+ napi_synchronize(&wil->napi_tx);
+
+ wil->txrx_ops.ring_fini_tx(wil, ring);
+}
+
static void wil_disconnect_cid(struct wil6210_vif *vif, int cid,
u16 reason_code, bool from_event)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
@@ -219,12 +273,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
/* release vrings */
- for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
- if (wil->vring2cid_tid[i][0] == cid)
- wil_vring_fini_tx(wil, i);
+ for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
+ if (wil->ring2cid_tid[i][0] == cid)
+ wil_ring_fini_tx(wil, i);
}
/* statistics */
memset(&sta->stats, 0, sizeof(sta->stats));
+ sta->stats.tx_latency_min_us = U32_MAX;
}
static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid)
@@ -453,18 +508,19 @@ static void wil_fw_error_worker(struct work_struct *work)
mutex_unlock(&wil->mutex);
}
-static int wil_find_free_vring(struct wil6210_priv *wil)
+static int wil_find_free_ring(struct wil6210_priv *wil)
{
int i;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- if (!wil->vring_tx[i].va)
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ if (!wil->ring_tx[i].va)
return i;
}
return -EINVAL;
}
-int wil_tx_init(struct wil6210_vif *vif, int cid)
+int wil_ring_init_tx(struct wil6210_vif *vif, int cid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc = -EINVAL, ringid;
@@ -473,16 +529,17 @@ int wil_tx_init(struct wil6210_vif *vif, int cid)
wil_err(wil, "No connection pending\n");
goto out;
}
- ringid = wil_find_free_vring(wil);
+ ringid = wil_find_free_ring(wil);
if (ringid < 0) {
wil_err(wil, "No free vring found\n");
goto out;
}
- wil_dbg_wmi(wil, "Configure for connection CID %d MID %d vring %d\n",
+ wil_dbg_wmi(wil, "Configure for connection CID %d MID %d ring %d\n",
cid, vif->mid, ringid);
- rc = wil_vring_init_tx(vif, ringid, 1 << tx_ring_order, cid, 0);
+ rc = wil->txrx_ops.ring_init_tx(vif, ringid, 1 << tx_ring_order,
+ cid, 0);
if (rc)
wil_err(wil, "init TX for CID %d MID %d vring %d failed\n",
cid, vif->mid, ringid);
@@ -494,19 +551,19 @@ out:
int wil_bcast_init(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
- int ri = vif->bcast_vring, rc;
+ int ri = vif->bcast_ring, rc;
- if ((ri >= 0) && wil->vring_tx[ri].va)
+ if (ri >= 0 && wil->ring_tx[ri].va)
return 0;
- ri = wil_find_free_vring(wil);
+ ri = wil_find_free_ring(wil);
if (ri < 0)
return ri;
- vif->bcast_vring = ri;
- rc = wil_vring_init_bcast(vif, ri, 1 << bcast_ring_order);
+ vif->bcast_ring = ri;
+ rc = wil->txrx_ops.ring_init_bcast(vif, ri, 1 << bcast_ring_order);
if (rc)
- vif->bcast_vring = -1;
+ vif->bcast_ring = -1;
return rc;
}
@@ -514,13 +571,13 @@ int wil_bcast_init(struct wil6210_vif *vif)
void wil_bcast_fini(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
- int ri = vif->bcast_vring;
+ int ri = vif->bcast_ring;
if (ri < 0)
return;
- vif->bcast_vring = -1;
- wil_vring_fini_tx(wil, ri);
+ vif->bcast_ring = -1;
+ wil_ring_fini_tx(wil, ri);
}
void wil_bcast_fini_all(struct wil6210_priv *wil)
@@ -548,7 +605,7 @@ int wil_priv_init(struct wil6210_priv *wil)
}
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++)
- spin_lock_init(&wil->vring_tx_data[i].lock);
+ spin_lock_init(&wil->ring_tx_data[i].lock);
mutex_init(&wil->mutex);
mutex_init(&wil->vif_mutex);
@@ -589,11 +646,30 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
WMI_WAKEUP_TRIGGER_BCAST;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
- wil->vring_idle_trsh = 16;
+ wil->ring_idle_trsh = 16;
wil->reply_mid = U8_MAX;
wil->max_vifs = 1;
+ /* edma configuration can be updated via debugfs before allocation */
+ wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS;
+ wil->use_compressed_rx_status = true;
+ wil->use_rx_hw_reordering = true;
+ wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
+
+ /* Rx status ring size should be bigger than the number of RX buffers
+ * in order to prevent backpressure on the status ring, which may
+ * cause HW freeze.
+ */
+ wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
+ /* Number of RX buffer IDs should be bigger than the RX descriptor
+ * ring size as in HW reorder flow, the HW can consume additional
+ * buffers before releasing the previous ones.
+ */
+ wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
+
+ wil->amsdu_en = 1;
+
return 0;
out_wmi_wq:
@@ -736,14 +812,24 @@ static void wil_bl_prepare_halt(struct wil6210_priv *wil)
static inline void wil_halt_cpu(struct wil6210_priv *wil)
{
- wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
- wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
+ if (wil->hw_version >= HW_VER_TALYN_MB) {
+ wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB,
+ BIT_USER_USER_CPU_MAN_RST);
+ wil_w(wil, RGF_USER_MAC_CPU_0_TALYN_MB,
+ BIT_USER_MAC_CPU_MAN_RST);
+ } else {
+ wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
+ wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
+ }
}
static inline void wil_release_cpu(struct wil6210_priv *wil)
{
/* Start CPU */
- wil_w(wil, RGF_USER_USER_CPU_0, 1);
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB, 1);
+ else
+ wil_w(wil, RGF_USER_USER_CPU_0, 1);
}
static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
@@ -767,11 +853,146 @@ static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
}
}
-static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
+static int wil_wait_device_ready(struct wil6210_priv *wil, int no_flash)
{
int delay = 0;
u32 x, x1 = 0;
+ /* wait until device ready. */
+ if (no_flash) {
+ msleep(PMU_READY_DELAY_MS);
+
+ wil_dbg_misc(wil, "Reset completed\n");
+ } else {
+ do {
+ msleep(RST_DELAY);
+ x = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready));
+ if (x1 != x) {
+ wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
+ x1, x);
+ x1 = x;
+ }
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
+ x);
+ return -ETIME;
+ }
+ } while (x != BL_READY);
+
+ wil_dbg_misc(wil, "Reset completed in %d ms\n",
+ delay * RST_DELAY);
+ }
+
+ return 0;
+}
+
+static int wil_wait_device_ready_talyn_mb(struct wil6210_priv *wil)
+{
+ u32 otp_hw;
+ u8 signature_status;
+ bool otp_signature_err;
+ bool hw_section_done;
+ u32 otp_qc_secured;
+ int delay = 0;
+
+ /* Wait for OTP signature test to complete */
+ usleep_range(2000, 2200);
+
+ wil->boot_config = WIL_BOOT_ERR;
+
+ /* Poll until OTP signature status is valid.
+ * In vanilla and development modes, when signature test is complete
+ * HW sets BIT_OTP_SIGNATURE_ERR_TALYN_MB.
+ * In production mode BIT_OTP_SIGNATURE_ERR_TALYN_MB remains 0, poll
+ * for signature status change to 2 or 3.
+ */
+ do {
+ otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
+ signature_status = WIL_GET_BITS(otp_hw, 8, 9);
+ otp_signature_err = otp_hw & BIT_OTP_SIGNATURE_ERR_TALYN_MB;
+
+ if (otp_signature_err &&
+ signature_status == WIL_SIG_STATUS_VANILLA) {
+ wil->boot_config = WIL_BOOT_VANILLA;
+ break;
+ }
+ if (otp_signature_err &&
+ signature_status == WIL_SIG_STATUS_DEVELOPMENT) {
+ wil->boot_config = WIL_BOOT_DEVELOPMENT;
+ break;
+ }
+ if (!otp_signature_err &&
+ signature_status == WIL_SIG_STATUS_PRODUCTION) {
+ wil->boot_config = WIL_BOOT_PRODUCTION;
+ break;
+ }
+ if (!otp_signature_err &&
+ signature_status ==
+ WIL_SIG_STATUS_CORRUPTED_PRODUCTION) {
+ /* Unrecognized OTP signature found. Possibly a
+ * corrupted production signature, access control
+ * is applied as in production mode, therefore
+ * do not fail
+ */
+ wil->boot_config = WIL_BOOT_PRODUCTION;
+ break;
+ }
+ if (delay++ > OTP_HW_COUNT)
+ break;
+
+ usleep_range(OTP_HW_DELAY, OTP_HW_DELAY + 10);
+ } while (!otp_signature_err && signature_status == 0);
+
+ if (wil->boot_config == WIL_BOOT_ERR) {
+ wil_err(wil,
+ "invalid boot config, signature_status %d otp_signature_err %d\n",
+ signature_status, otp_signature_err);
+ return -ETIME;
+ }
+
+ wil_dbg_misc(wil,
+ "signature test done in %d usec, otp_hw 0x%x, boot_config %d\n",
+ delay * OTP_HW_DELAY, otp_hw, wil->boot_config);
+
+ if (wil->boot_config == WIL_BOOT_VANILLA)
+ /* Assuming not SPI boot (currently not supported) */
+ goto out;
+
+ hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
+ delay = 0;
+
+ while (!hw_section_done) {
+ msleep(RST_DELAY);
+
+ otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
+ hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
+
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "TO waiting for hw_section_done\n");
+ return -ETIME;
+ }
+ }
+
+ wil_dbg_misc(wil, "HW section done in %d ms\n", delay * RST_DELAY);
+
+ otp_qc_secured = wil_r(wil, RGF_OTP_QC_SECURED);
+ wil->secured_boot = otp_qc_secured & BIT_BOOT_FROM_ROM ? 1 : 0;
+ wil_dbg_misc(wil, "secured boot is %sabled\n",
+ wil->secured_boot ? "en" : "dis");
+
+out:
+ wil_dbg_misc(wil, "Reset completed\n");
+
+ return 0;
+}
+
+static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
+{
+ u32 x;
+ int rc;
+
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
/* Clear MAC link up */
@@ -811,10 +1032,17 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
- wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
- wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
- wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
- wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
+ if (wil->hw_version >= HW_VER_TALYN_MB) {
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x7e000000);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0xc00000f0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
+ } else {
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xfe000000);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
+ }
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
@@ -830,34 +1058,12 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
- /* wait until device ready. typical time is 20..80 msec */
- if (no_flash)
- do {
- msleep(RST_DELAY);
- x = wil_r(wil, USER_EXT_USER_PMU_3);
- if (delay++ > RST_COUNT) {
- wil_err(wil, "Reset not completed, PMU_3 0x%08x\n",
- x);
- return -ETIME;
- }
- } while ((x & BIT_PMU_DEVICE_RDY) == 0);
+ if (wil->hw_version == HW_VER_TALYN_MB)
+ rc = wil_wait_device_ready_talyn_mb(wil);
else
- do {
- msleep(RST_DELAY);
- x = wil_r(wil, RGF_USER_BL +
- offsetof(struct bl_dedicated_registers_v0,
- boot_loader_ready));
- if (x1 != x) {
- wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
- x1, x);
- x1 = x;
- }
- if (delay++ > RST_COUNT) {
- wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
- x);
- return -ETIME;
- }
- } while (x != BL_READY);
+ rc = wil_wait_device_ready(wil, no_flash);
+ if (rc)
+ return rc;
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@@ -865,7 +1071,7 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
- if (no_flash) {
+ if (wil->hw_version < HW_VER_TALYN_MB && no_flash) {
/* Reset OTP HW vectors to fit 40MHz */
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027);
@@ -880,7 +1086,6 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57);
}
- wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
return 0;
}
@@ -925,6 +1130,9 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
}
+ if (test_bit(WMI_FW_CAPABILITY_TX_REQ_EXT, wil->fw_capabilities))
+ wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX;
+
if (wil->platform_ops.set_features) {
features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
wil->fw_capabilities) &&
@@ -932,8 +1140,20 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
wil->platform_capa)) ?
BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
+ if (wil->n_msi == 3)
+ features |= BIT(WIL_PLATFORM_FEATURE_TRIPLE_MSI);
+
wil->platform_ops.set_features(wil->platform_handle, features);
}
+
+ if (test_bit(WMI_FW_CAPABILITY_BACK_WIN_SIZE_64,
+ wil->fw_capabilities)) {
+ wil->max_agg_wsize = WIL_MAX_AGG_WSIZE_64;
+ wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE_128;
+ } else {
+ wil->max_agg_wsize = WIL_MAX_AGG_WSIZE;
+ wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE;
+ }
}
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -945,6 +1165,28 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
le32_to_cpus(&r->head);
}
+/* construct actual board file name to use */
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len)
+{
+ const char *board_file;
+ const char *wil_talyn_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
+ WIL_FW_NAME_TALYN;
+
+ if (wil->board_file) {
+ board_file = wil->board_file;
+ } else {
+ /* If specific FW file is used for Talyn,
+ * use specific board file
+ */
+ if (strcmp(wil->wil_fw_name, wil_talyn_fw_name) == 0)
+ board_file = WIL_BRD_NAME_TALYN;
+ else
+ board_file = WIL_BOARD_FILE_NAME;
+ }
+
+ strlcpy(buf, board_file, len);
+}
+
static int wil_get_bl_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil->main_ndev;
@@ -1042,8 +1284,14 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
struct net_device *ndev = wil->main_ndev;
struct wiphy *wiphy = wil_to_wiphy(wil);
u8 mac[8];
+ int mac_addr;
- wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(RGF_OTP_MAC),
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ mac_addr = RGF_OTP_MAC_TALYN_MB;
+ else
+ mac_addr = RGF_OTP_MAC;
+
+ wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
sizeof(mac));
if (!is_valid_ether_addr(mac)) {
wil_err(wil, "Invalid MAC %pM\n", mac);
@@ -1060,7 +1308,7 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{
- ulong to = msecs_to_jiffies(1000);
+ ulong to = msecs_to_jiffies(2000);
ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
if (0 == left) {
@@ -1147,8 +1395,13 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
/* it is W1C, clear by writing back same value */
wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
- /* clear PAL_UNIT_ICR (potential D0->D3 leftover) */
- wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR), 0);
+ /* clear PAL_UNIT_ICR (potential D0->D3 leftover)
+ * In Talyn-MB host cannot access this register due to
+ * access control, hence PAL_UNIT_ICR is cleared by the FW
+ */
+ if (wil->hw_version < HW_VER_TALYN_MB)
+ wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR),
+ 0);
if (wil->fw_calib_result > 0) {
__le32 val = cpu_to_le32(wil->fw_calib_result |
@@ -1284,7 +1537,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
rc = wil_target_reset(wil, no_flash);
wil6210_clear_irq(wil);
wil_enable_irq(wil);
- wil_rx_fini(wil);
+ wil->txrx_ops.rx_fini(wil);
+ wil->txrx_ops.tx_fini(wil);
if (rc) {
if (!no_flash)
wil_bl_crash_info(wil, true);
@@ -1304,8 +1558,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
+ char board_file[WIL_BOARD_FILE_MAX_NAMELEN];
+
+ if (wil->secured_boot) {
+ wil_err(wil, "secured boot is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ board_file[0] = '\0';
+ wil_get_board_file(wil, board_file, sizeof(board_file));
wil_info(wil, "Use firmware <%s> + board <%s>\n",
- wil->wil_fw_name, WIL_BOARD_FILE_NAME);
+ wil->wil_fw_name, board_file);
if (!no_flash)
wil_bl_prepare_halt(wil);
@@ -1317,11 +1580,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (rc)
goto out;
if (wil->brd_file_addr)
- rc = wil_request_board(wil, WIL_BOARD_FILE_NAME);
+ rc = wil_request_board(wil, board_file);
else
- rc = wil_request_firmware(wil,
- WIL_BOARD_FILE_NAME,
- true);
+ rc = wil_request_firmware(wil, board_file, true);
if (rc)
goto out;
@@ -1337,7 +1598,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
clear_bit(wil_status_resetting, wil->status);
if (load_fw) {
- wil_configure_interrupt_moderation(wil);
wil_unmask_irq(wil);
/* we just started MAC, wait for FW ready */
@@ -1352,6 +1612,15 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
+ wil->txrx_ops.configure_interrupt_moderation(wil);
+
+ /* Enable OFU rdy valid bug fix, to prevent hang in oful34_rx
+ * while there is back-pressure from Host during RX
+ */
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ wil_s(wil, RGF_DMA_MISC_CTL,
+ BIT_OFUL34_RDY_VALID_BUG_FIX_EN);
+
rc = wil_restore_vifs(wil);
if (rc) {
wil_err(wil, "failed to restore vifs, rc %d\n", rc);
@@ -1406,8 +1675,12 @@ int __wil_up(struct wil6210_priv *wil)
if (rc)
return rc;
- /* Rx VRING. After MAC and beacon */
- rc = wil_rx_init(wil, 1 << rx_ring_order);
+ /* Rx RING. After MAC and beacon */
+ rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order);
+ if (rc)
+ return rc;
+
+ rc = wil->txrx_ops.tx_init(wil);
if (rc)
return rc;
@@ -1568,3 +1841,11 @@ void wil_halp_unvote(struct wil6210_priv *wil)
mutex_unlock(&wil->halp.lock);
}
+
+void wil_init_txrx_ops(struct wil6210_priv *wil)
+{
+ if (wil->use_enhanced_dma_hw)
+ wil_init_txrx_ops_edma(wil);
+ else
+ wil_init_txrx_ops_legacy_dma(wil);
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index eb6c14ed65a4..7a78a06bd356 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -120,6 +120,27 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
return done;
}
+static int wil6210_netdev_poll_rx_edma(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_rx);
+ int quota = budget;
+ int done;
+
+ wil_rx_handle_edma(wil, &quota);
+ done = budget - quota;
+
+ if (done < budget) {
+ napi_complete_done(napi, done);
+ wil6210_unmask_irq_rx_edma(wil);
+ wil_dbg_txrx(wil, "NAPI RX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
+
+ return done;
+}
+
static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
@@ -129,11 +150,11 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
/* always process ALL Tx complete, regardless budget - it is fast */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- struct vring *vring = &wil->vring_tx[i];
- struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+ struct wil_ring *ring = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
struct wil6210_vif *vif;
- if (!vring->va || !txdata->enabled ||
+ if (!ring->va || !txdata->enabled ||
txdata->mid >= wil->max_vifs)
continue;
@@ -157,6 +178,30 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
return min(tx_done, budget);
}
+static int wil6210_netdev_poll_tx_edma(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_tx);
+ int tx_done;
+ /* There is only one status TX ring */
+ struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
+
+ if (!sring->va)
+ return 0;
+
+ tx_done = wil_tx_sring_handler(wil, sring);
+
+ if (tx_done < budget) {
+ napi_complete(napi);
+ wil6210_unmask_irq_tx_edma(wil);
+ wil_dbg_txrx(wil, "NAPI TX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
+
+ return min(tx_done, budget);
+}
+
static void wil_dev_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -228,7 +273,7 @@ static void wil_p2p_discovery_timer_fn(struct timer_list *t)
static void wil_vif_init(struct wil6210_vif *vif)
{
- vif->bcast_vring = -1;
+ vif->bcast_ring = -1;
mutex_init(&vif->probe_client_mutex);
@@ -418,11 +463,21 @@ int wil_if_add(struct wil6210_priv *wil)
}
init_dummy_netdev(&wil->napi_ndev);
- netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
- WIL6210_NAPI_BUDGET);
- netif_tx_napi_add(&wil->napi_ndev,
- &wil->napi_tx, wil6210_netdev_poll_tx,
- WIL6210_NAPI_BUDGET);
+ if (wil->use_enhanced_dma_hw) {
+ netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ wil6210_netdev_poll_rx_edma,
+ WIL6210_NAPI_BUDGET);
+ netif_tx_napi_add(&wil->napi_ndev,
+ &wil->napi_tx, wil6210_netdev_poll_tx_edma,
+ WIL6210_NAPI_BUDGET);
+ } else {
+ netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ wil6210_netdev_poll_rx,
+ WIL6210_NAPI_BUDGET);
+ netif_tx_napi_add(&wil->napi_ndev,
+ &wil->napi_tx, wil6210_netdev_poll_tx,
+ WIL6210_NAPI_BUDGET);
+ }
wil_update_net_queues_bh(wil, vif, NULL, true);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 19cbc6add637..89119e7facd0 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -24,11 +24,11 @@
#include <linux/rtnetlink.h>
#include <linux/pm_runtime.h>
-static bool use_msi = true;
-module_param(use_msi, bool, 0444);
-MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
+static int n_msi = 3;
+module_param(n_msi, int, 0444);
+MODULE_PARM_DESC(n_msi, " Use MSI interrupt: 0 - use INTx, 1 - single, or 3 - (default) ");
-static bool ftm_mode;
+bool ftm_mode;
module_param(ftm_mode, bool, 0444);
MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
@@ -85,7 +85,7 @@ int wil_set_capabilities(struct wil6210_priv *wil)
wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
break;
case JTAG_DEV_ID_TALYN:
- wil->hw_name = "Talyn";
+ wil->hw_name = "Talyn-MA";
wil->hw_version = HW_VER_TALYN;
memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
@@ -93,6 +93,25 @@ int wil_set_capabilities(struct wil6210_priv *wil)
if (wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1) &
BIT_NO_FLASH_INDICATION)
set_bit(hw_capa_no_flash, wil->hw_capa);
+ wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
+ WIL_FW_NAME_TALYN;
+ if (wil_fw_verify_file_exists(wil, wil_fw_name))
+ wil->wil_fw_name = wil_fw_name;
+ break;
+ case JTAG_DEV_ID_TALYN_MB:
+ wil->hw_name = "Talyn-MB";
+ wil->hw_version = HW_VER_TALYN_MB;
+ memcpy(fw_mapping, talyn_mb_fw_mapping,
+ sizeof(talyn_mb_fw_mapping));
+ wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
+ wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
+ set_bit(hw_capa_no_flash, wil->hw_capa);
+ wil->use_enhanced_dma_hw = true;
+ wil->use_rx_hw_reordering = true;
+ wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
+ WIL_FW_NAME_TALYN;
+ if (wil_fw_verify_file_exists(wil, wil_fw_name))
+ wil->wil_fw_name = wil_fw_name;
break;
default:
wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
@@ -102,6 +121,8 @@ int wil_set_capabilities(struct wil6210_priv *wil)
return -EINVAL;
}
+ wil_init_txrx_ops(wil);
+
iccm_section = wil_find_fw_mapping("fw_code");
if (!iccm_section) {
wil_err(wil, "fw_code section not found in fw_mapping\n");
@@ -129,12 +150,24 @@ int wil_set_capabilities(struct wil6210_priv *wil)
void wil_disable_irq(struct wil6210_priv *wil)
{
- disable_irq(wil->pdev->irq);
+ int irq = wil->pdev->irq;
+
+ disable_irq(irq);
+ if (wil->n_msi == 3) {
+ disable_irq(irq + 1);
+ disable_irq(irq + 2);
+ }
}
void wil_enable_irq(struct wil6210_priv *wil)
{
- enable_irq(wil->pdev->irq);
+ int irq = wil->pdev->irq;
+
+ enable_irq(irq);
+ if (wil->n_msi == 3) {
+ enable_irq(irq + 1);
+ enable_irq(irq + 2);
+ }
}
static void wil_remove_all_additional_vifs(struct wil6210_priv *wil)
@@ -161,28 +194,47 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
* and only MSI should be used
*/
int msi_only = pdev->msi_enabled;
- bool _use_msi = use_msi;
wil_dbg_misc(wil, "if_pcie_enable\n");
pci_set_master(pdev);
- wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
+ /* how many MSI interrupts to request? */
+ switch (n_msi) {
+ case 3:
+ case 1:
+ wil_dbg_misc(wil, "Setup %d MSI interrupts\n", n_msi);
+ break;
+ case 0:
+ wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
+ break;
+ default:
+ wil_err(wil, "Invalid n_msi=%d, default to 1\n", n_msi);
+ n_msi = 1;
+ }
+
+ if (n_msi == 3 &&
+ pci_alloc_irq_vectors(pdev, n_msi, n_msi, PCI_IRQ_MSI) < n_msi) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ n_msi = 1;
+ }
- if (use_msi && pci_enable_msi(pdev)) {
+ if (n_msi == 1 && pci_enable_msi(pdev)) {
wil_err(wil, "pci_enable_msi failed, use INTx\n");
- _use_msi = false;
+ n_msi = 0;
}
- if (!_use_msi && msi_only) {
+ wil->n_msi = n_msi;
+
+ if (wil->n_msi == 0 && msi_only) {
wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
rc = -ENODEV;
goto stop_master;
}
- rc = wil6210_init_irq(wil, pdev->irq, _use_msi);
+ rc = wil6210_init_irq(wil, pdev->irq);
if (rc)
- goto stop_master;
+ goto release_vectors;
/* need reset here to obtain MAC */
mutex_lock(&wil->mutex);
@@ -195,8 +247,9 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
release_irq:
wil6210_fini_irq(wil, pdev->irq);
- /* safe to call if no MSI */
- pci_disable_msi(pdev);
+ release_vectors:
+ /* safe to call if no allocation */
+ pci_free_irq_vectors(pdev);
stop_master:
pci_clear_master(pdev);
return rc;
@@ -257,8 +310,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.fw_recovery = wil_platform_rop_fw_recovery,
};
u32 bar_size = pci_resource_len(pdev, 0);
- int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
- int i;
+ int dma_addr_size[] = {64, 48, 40, 32}; /* keep descending order */
+ int i, start_idx;
/* check HW */
dev_info(&pdev->dev, WIL_NAME
@@ -293,24 +346,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto if_free;
}
/* rollback to err_plat */
-
- /* device supports >32bit addresses */
- for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
- rc = dma_set_mask_and_coherent(dev,
- DMA_BIT_MASK(dma_addr_size[i]));
- if (rc) {
- dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
- dma_addr_size[i], rc);
- continue;
- }
- dev_info(dev, "using dma mask %d", dma_addr_size[i]);
- wil->dma_addr_size = dma_addr_size[i];
- break;
- }
-
- if (wil->dma_addr_size == 0)
- goto err_plat;
-
rc = pci_enable_device(pdev);
if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
@@ -350,6 +385,28 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
goto err_iounmap;
}
+
+ /* device supports >32bit addresses.
+ * for legacy DMA start from 48 bit.
+ */
+ start_idx = wil->use_enhanced_dma_hw ? 0 : 1;
+
+ for (i = start_idx; i < ARRAY_SIZE(dma_addr_size); i++) {
+ rc = dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(dma_addr_size[i]));
+ if (rc) {
+ dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
+ dma_addr_size[i], rc);
+ continue;
+ }
+ dev_info(dev, "using dma mask %d", dma_addr_size[i]);
+ wil->dma_addr_size = dma_addr_size[i];
+ break;
+ }
+
+ if (wil->dma_addr_size == 0)
+ goto err_iounmap;
+
wil6210_clear_irq(wil);
/* FW should raise IRQ when ready */
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index ba81fb3ac96f..3a4194779ddf 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -211,7 +211,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
goto reject_suspend;
}
- if (!wil_is_rx_idle(wil)) {
+ if (!wil->txrx_ops.is_rx_idle(wil)) {
wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
wil->suspend_stats.rejected_by_host++;
goto reject_suspend;
@@ -235,9 +235,9 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
start = jiffies;
data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
- while (!wil_is_rx_idle(wil)) {
+ while (!wil->txrx_ops.is_rx_idle(wil)) {
if (time_after(jiffies, data_comp_to)) {
- if (wil_is_rx_idle(wil))
+ if (wil->txrx_ops.is_rx_idle(wil))
break;
wil_err(wil,
"TO waiting for idle RX, suspend failed\n");
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 76f8084c1fd8..b608aa16b4f1 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -95,17 +95,17 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
struct wil6210_vif *vif;
struct net_device *ndev;
- struct vring_rx_desc *d = wil_skb_rxdesc(skb);
- int tid = wil_rxdesc_tid(d);
- int cid = wil_rxdesc_cid(d);
- int mid = wil_rxdesc_mid(d);
- u16 seq = wil_rxdesc_seq(d);
- int mcast = wil_rxdesc_mcast(d);
- struct wil_sta_info *sta = &wil->sta[cid];
+ int tid, cid, mid, mcast, retry;
+ u16 seq;
+ struct wil_sta_info *sta;
struct wil_tid_ampdu_rx *r;
u16 hseq;
int index;
+ wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq,
+ &mcast, &retry);
+ sta = &wil->sta[cid];
+
wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
mid, cid, tid, seq, mcast);
@@ -117,11 +117,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
}
ndev = vif_to_ndev(vif);
- if (unlikely(mcast)) {
- wil_netif_rx_any(skb, ndev);
- return;
- }
-
spin_lock(&sta->tid_rx_lock);
r = sta->tid_rx[tid];
@@ -130,6 +125,19 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
goto out;
}
+ if (unlikely(mcast)) {
+ if (retry && seq == r->mcast_last_seq) {
+ r->drop_dup_mcast++;
+ wil_dbg_txrx(wil, "Rx drop: dup mcast seq 0x%03x\n",
+ seq);
+ dev_kfree_skb(skb);
+ goto out;
+ }
+ r->mcast_last_seq = seq;
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+
r->total++;
hseq = r->head_seq_num;
@@ -262,6 +270,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
r->buf_size = size;
r->stored_mpdu_num = 0;
r->first_time = true;
+ r->mcast_last_seq = U16_MAX;
return r;
}
@@ -288,7 +297,7 @@ void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
/* ADDBA processing */
static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
{
- u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE /
+ u16 max_agg_size = min_t(u16, wil->max_agg_wsize, wil->max_ampdu_size /
(mtu_max + WIL_MAX_MPDU_OVERHEAD));
if (!req_agg_wsize)
@@ -315,7 +324,10 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
* bits 6..15: buffer size
*/
u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
- bool agg_amsdu = !!(param_set & BIT(0));
+ bool agg_amsdu = wil->use_enhanced_dma_hw &&
+ wil->use_rx_hw_reordering &&
+ test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
+ wil->amsdu_en && (param_set & BIT(0));
int ba_policy = param_set & BIT(1);
u16 status = WLAN_STATUS_SUCCESS;
u16 ssn = seq_ctrl >> 4;
@@ -352,16 +364,17 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
if (status == WLAN_STATUS_SUCCESS) {
if (req_agg_wsize == 0) {
wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
- WIL_MAX_AGG_WSIZE);
- agg_wsize = WIL_MAX_AGG_WSIZE;
+ wil->max_agg_wsize);
+ agg_wsize = wil->max_agg_wsize;
} else {
agg_wsize = min_t(u16,
- WIL_MAX_AGG_WSIZE, req_agg_wsize);
+ wil->max_agg_wsize, req_agg_wsize);
}
}
- rc = wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token, status,
- agg_amsdu, agg_wsize, agg_timeout);
+ rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
+ status, agg_amsdu, agg_wsize,
+ agg_timeout);
if (rc || (status != WLAN_STATUS_SUCCESS)) {
wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
status);
@@ -384,7 +397,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
{
u8 agg_wsize = wil_agg_size(wil, wsize);
u16 agg_timeout = 0;
- struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
int rc = 0;
if (txdata->addba_in_progress) {
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index c4db2a9d9f7f..853abc3a73e4 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -187,6 +187,40 @@ TRACE_EVENT(wil6210_rx,
__entry->seq, __entry->type, __entry->subtype)
);
+TRACE_EVENT(wil6210_rx_status,
+ TP_PROTO(struct wil6210_priv *wil, u8 use_compressed, u16 buff_id,
+ void *msg),
+ TP_ARGS(wil, use_compressed, buff_id, msg),
+ TP_STRUCT__entry(__field(u8, use_compressed)
+ __field(u16, buff_id)
+ __field(unsigned int, len)
+ __field(u8, mid)
+ __field(u8, cid)
+ __field(u8, tid)
+ __field(u8, type)
+ __field(u8, subtype)
+ __field(u16, seq)
+ __field(u8, mcs)
+ ),
+ TP_fast_assign(__entry->use_compressed = use_compressed;
+ __entry->buff_id = buff_id;
+ __entry->len = wil_rx_status_get_length(msg);
+ __entry->mid = wil_rx_status_get_mid(msg);
+ __entry->cid = wil_rx_status_get_cid(msg);
+ __entry->tid = wil_rx_status_get_tid(msg);
+ __entry->type = wil_rx_status_get_frame_type(wil,
+ msg);
+ __entry->subtype = wil_rx_status_get_fc1(wil, msg);
+ __entry->seq = wil_rx_status_get_seq(wil, msg);
+ __entry->mcs = wil_rx_status_get_mcs(msg);
+ ),
+ TP_printk(
+ "compressed %d buff_id %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x type 0x%1x subtype 0x%1x",
+ __entry->use_compressed, __entry->buff_id, __entry->len,
+ __entry->mid, __entry->cid, __entry->tid, __entry->mcs,
+ __entry->seq, __entry->type, __entry->subtype)
+);
+
TRACE_EVENT(wil6210_tx,
TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags),
TP_ARGS(vring, index, len, frags),
@@ -226,6 +260,31 @@ TRACE_EVENT(wil6210_tx_done,
__entry->err)
);
+TRACE_EVENT(wil6210_tx_status,
+ TP_PROTO(struct wil_ring_tx_status *msg, u16 index,
+ unsigned int len),
+ TP_ARGS(msg, index, len),
+ TP_STRUCT__entry(__field(u16, index)
+ __field(unsigned int, len)
+ __field(u8, num_descs)
+ __field(u8, ring_id)
+ __field(u8, status)
+ __field(u8, mcs)
+
+ ),
+ TP_fast_assign(__entry->index = index;
+ __entry->len = len;
+ __entry->num_descs = msg->num_descriptors;
+ __entry->ring_id = msg->ring_id;
+ __entry->status = msg->status;
+ __entry->mcs = wil_tx_status_get_mcs(msg);
+ ),
+ TP_printk(
+ "ring_id %d swtail 0x%x len %d num_descs %d status 0x%x mcs %d",
+ __entry->ring_id, __entry->index, __entry->len,
+ __entry->num_descs, __entry->status, __entry->mcs)
+);
+
#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/
#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__)
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index b9a9fa828961..6a7943e487fb 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -28,6 +28,7 @@
#include "wmi.h"
#include "txrx.h"
#include "trace.h"
+#include "txrx_edma.h"
static bool rtap_include_phy_info;
module_param(rtap_include_phy_info, bool, 0444);
@@ -47,62 +48,28 @@ static inline uint wil_rx_snaplen(void)
return rx_align_2 ? 6 : 0;
}
-static inline int wil_vring_is_empty(struct vring *vring)
+/* wil_ring_wmark_low - low watermark for available descriptor space */
+static inline int wil_ring_wmark_low(struct wil_ring *ring)
{
- return vring->swhead == vring->swtail;
+ return ring->size / 8;
}
-static inline u32 wil_vring_next_tail(struct vring *vring)
+/* wil_ring_wmark_high - high watermark for available descriptor space */
+static inline int wil_ring_wmark_high(struct wil_ring *ring)
{
- return (vring->swtail + 1) % vring->size;
-}
-
-static inline void wil_vring_advance_head(struct vring *vring, int n)
-{
- vring->swhead = (vring->swhead + n) % vring->size;
-}
-
-static inline int wil_vring_is_full(struct vring *vring)
-{
- return wil_vring_next_tail(vring) == vring->swhead;
-}
-
-/* Used space in Tx Vring */
-static inline int wil_vring_used_tx(struct vring *vring)
-{
- u32 swhead = vring->swhead;
- u32 swtail = vring->swtail;
- return (vring->size + swhead - swtail) % vring->size;
-}
-
-/* Available space in Tx Vring */
-static inline int wil_vring_avail_tx(struct vring *vring)
-{
- return vring->size - wil_vring_used_tx(vring) - 1;
-}
-
-/* wil_vring_wmark_low - low watermark for available descriptor space */
-static inline int wil_vring_wmark_low(struct vring *vring)
-{
- return vring->size/8;
-}
-
-/* wil_vring_wmark_high - high watermark for available descriptor space */
-static inline int wil_vring_wmark_high(struct vring *vring)
-{
- return vring->size/4;
+ return ring->size / 4;
}
/* returns true if num avail descriptors is lower than wmark_low */
-static inline int wil_vring_avail_low(struct vring *vring)
+static inline int wil_ring_avail_low(struct wil_ring *ring)
{
- return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring);
+ return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
}
/* returns true if num avail descriptors is higher than wmark_high */
-static inline int wil_vring_avail_high(struct vring *vring)
+static inline int wil_ring_avail_high(struct wil_ring *ring)
{
- return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
+ return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
}
/* returns true when all tx vrings are empty */
@@ -112,9 +79,10 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
unsigned long data_comp_to;
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- struct vring *vring = &wil->vring_tx[i];
- int vring_index = vring - wil->vring_tx;
- struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ struct wil_ring *vring = &wil->ring_tx[i];
+ int vring_index = vring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata =
+ &wil->ring_tx_data[vring_index];
spin_lock(&txdata->lock);
@@ -126,7 +94,7 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
data_comp_to = jiffies + msecs_to_jiffies(
WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
- while (!wil_vring_is_empty(vring)) {
+ while (!wil_ring_is_empty(vring)) {
if (time_after(jiffies, data_comp_to)) {
wil_dbg_pm(wil,
"TO waiting for idle tx\n");
@@ -150,13 +118,7 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
return true;
}
-/* wil_val_in_range - check if value in [min,max) */
-static inline bool wil_val_in_range(int val, int min, int max)
-{
- return val >= min && val < max;
-}
-
-static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
+static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
size_t sz = vring->size * sizeof(vring->va[0]);
@@ -205,7 +167,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
* we can use any
*/
for (i = 0; i < vring->size; i++) {
- volatile struct vring_tx_desc *_d = &vring->va[i].tx;
+ volatile struct vring_tx_desc *_d =
+ &vring->va[i].tx.legacy;
_d->dma.status = TX_DMA_STATUS_DU;
}
@@ -216,9 +179,10 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
return 0;
}
-static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
+static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc,
struct wil_ctx *ctx)
{
+ struct vring_tx_desc *d = &desc->legacy;
dma_addr_t pa = wil_desc_addr(&d->dma.addr);
u16 dmalen = le16_to_cpu(d->dma.length);
@@ -234,15 +198,14 @@ static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
}
}
-static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
- int tx)
+static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
size_t sz = vring->size * sizeof(vring->va[0]);
lockdep_assert_held(&wil->mutex);
- if (tx) {
- int vring_index = vring - wil->vring_tx;
+ if (!vring->is_rx) {
+ int vring_index = vring - wil->ring_tx;
wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
vring_index, vring->size, vring->va,
@@ -253,33 +216,33 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
&vring->pa, vring->ctx);
}
- while (!wil_vring_is_empty(vring)) {
+ while (!wil_ring_is_empty(vring)) {
dma_addr_t pa;
u16 dmalen;
struct wil_ctx *ctx;
- if (tx) {
+ if (!vring->is_rx) {
struct vring_tx_desc dd, *d = &dd;
volatile struct vring_tx_desc *_d =
- &vring->va[vring->swtail].tx;
+ &vring->va[vring->swtail].tx.legacy;
ctx = &vring->ctx[vring->swtail];
if (!ctx) {
wil_dbg_txrx(wil,
"ctx(%d) was already completed\n",
vring->swtail);
- vring->swtail = wil_vring_next_tail(vring);
+ vring->swtail = wil_ring_next_tail(vring);
continue;
}
*d = *_d;
- wil_txdesc_unmap(dev, d, ctx);
+ wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
- vring->swtail = wil_vring_next_tail(vring);
+ vring->swtail = wil_ring_next_tail(vring);
} else { /* rx */
struct vring_rx_desc dd, *d = &dd;
volatile struct vring_rx_desc *_d =
- &vring->va[vring->swhead].rx;
+ &vring->va[vring->swhead].rx.legacy;
ctx = &vring->ctx[vring->swhead];
*d = *_d;
@@ -287,7 +250,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
dmalen = le16_to_cpu(d->dma.length);
dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
kfree_skb(ctx->skb);
- wil_vring_advance_head(vring, 1);
+ wil_ring_advance_head(vring, 1);
}
}
dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
@@ -302,13 +265,13 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
*
* Safe to call from IRQ
*/
-static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
+static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
u32 i, int headroom)
{
struct device *dev = wil_to_dev(wil);
unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
struct vring_rx_desc dd, *d = &dd;
- volatile struct vring_rx_desc *_d = &vring->va[i].rx;
+ volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
dma_addr_t pa;
struct sk_buff *skb = dev_alloc_skb(sz + headroom);
@@ -318,6 +281,12 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
skb_reserve(skb, headroom);
skb_put(skb, sz);
+ /**
+ * Make sure that the network stack calculates checksum for packets
+ * which failed the HW checksum calculation
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
kfree_skb(skb);
@@ -445,19 +414,12 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
}
}
-/* similar to ieee80211_ version, but FC contain only 1-st byte */
-static inline int wil_is_back_req(u8 fc)
-{
- return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
- (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
-}
-
-bool wil_is_rx_idle(struct wil6210_priv *wil)
+static bool wil_is_rx_idle(struct wil6210_priv *wil)
{
struct vring_rx_desc *_d;
- struct vring *vring = &wil->vring_rx;
+ struct wil_ring *ring = &wil->ring_rx;
- _d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx;
+ _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
if (_d->dma.status & RX_DMA_STATUS_DU)
return false;
@@ -472,7 +434,7 @@ bool wil_is_rx_idle(struct wil6210_priv *wil)
* Safe to call from IRQ
*/
static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
- struct vring *vring)
+ struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
struct wil6210_vif *vif;
@@ -492,11 +454,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
again:
- if (unlikely(wil_vring_is_empty(vring)))
+ if (unlikely(wil_ring_is_empty(vring)))
return NULL;
i = (int)vring->swhead;
- _d = &vring->va[i].rx;
+ _d = &vring->va[i].rx.legacy;
if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
/* it is not error, we just reached end of Rx done area */
return NULL;
@@ -504,7 +466,7 @@ again:
skb = vring->ctx[i].skb;
vring->ctx[i].skb = NULL;
- wil_vring_advance_head(vring, 1);
+ wil_ring_advance_head(vring, 1);
if (!skb) {
wil_err(wil, "No Rx skb at [%d]\n", i);
goto again;
@@ -613,6 +575,8 @@ again:
* mis-calculates TCP checksum - if it should be 0x0,
* it writes 0xffff in violation of RFC 1624
*/
+ else
+ stats->rx_csum_err++;
}
if (snaplen) {
@@ -641,15 +605,15 @@ again:
static int wil_rx_refill(struct wil6210_priv *wil, int count)
{
struct net_device *ndev = wil->main_ndev;
- struct vring *v = &wil->vring_rx;
+ struct wil_ring *v = &wil->ring_rx;
u32 next_tail;
int rc = 0;
int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
WIL6210_RTAP_SIZE : 0;
- for (; next_tail = wil_vring_next_tail(v),
- (next_tail != v->swhead) && (count-- > 0);
- v->swtail = next_tail) {
+ for (; next_tail = wil_ring_next_tail(v),
+ (next_tail != v->swhead) && (count-- > 0);
+ v->swtail = next_tail) {
rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
if (unlikely(rc)) {
wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
@@ -677,7 +641,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
* Cut'n'paste from original memcmp (see lib/string.c)
* with minimal modifications
*/
-static int reverse_memcmp(const void *cs, const void *ct, size_t count)
+int reverse_memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res = 0;
@@ -722,6 +686,30 @@ static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
return 0;
}
+static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_net_stats *stats)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+ if ((d->dma.status & RX_DMA_STATUS_ERROR) &&
+ (d->dma.error & RX_DMA_ERROR_MIC)) {
+ stats->rx_mic_error++;
+ wil_dbg_txrx(wil, "MIC error, dropping packet\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
+ int *security)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+ *cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+ *security = wil_rxdesc_security(d);
+}
+
/*
* Pass Rx packet to the netif. Update statistics.
* Called in softirq context (NAPI poll).
@@ -733,15 +721,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
struct wil6210_priv *wil = ndev_to_wil(ndev);
struct wireless_dev *wdev = vif_to_wdev(vif);
unsigned int len = skb->len;
- struct vring_rx_desc *d = wil_skb_rxdesc(skb);
- int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
- int security = wil_rxdesc_security(d);
+ int cid;
+ int security;
struct ethhdr *eth = (void *)skb->data;
/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
* is not suitable, need to look at data
*/
int mcast = is_multicast_ether_addr(eth->h_dest);
- struct wil_net_stats *stats = &wil->sta[cid].stats;
+ struct wil_net_stats *stats;
struct sk_buff *xmit_skb = NULL;
static const char * const gro_res_str[] = {
[GRO_MERGED] = "GRO_MERGED",
@@ -751,6 +738,10 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
[GRO_DROP] = "GRO_DROP",
};
+ wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
+
+ stats = &wil->sta[cid].stats;
+
if (ndev->features & NETIF_F_RXHASH)
/* fake L4 to ensure it won't be re-calculated later
* set hash to any non-zero value to activate rps
@@ -761,13 +752,19 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
skb_orphan(skb);
- if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
+ if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
rc = GRO_DROP;
dev_kfree_skb(skb);
stats->rx_replay++;
goto stats;
}
+ /* check errors reported by HW and update statistics */
+ if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
if (mcast) {
/* send multicast frames both to higher layers in
@@ -835,7 +832,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
- struct vring *v = &wil->vring_rx;
+ struct wil_ring *v = &wil->ring_rx;
struct sk_buff *skb;
if (unlikely(!v->va)) {
@@ -875,9 +872,9 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil)
}
}
-int wil_rx_init(struct wil6210_priv *wil, u16 size)
+static int wil_rx_init(struct wil6210_priv *wil, u16 size)
{
- struct vring *vring = &wil->vring_rx;
+ struct wil_ring *vring = &wil->ring_rx;
int rc;
wil_dbg_misc(wil, "rx_init\n");
@@ -890,6 +887,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
wil_rx_buf_len_init(wil);
vring->size = size;
+ vring->is_rx = true;
rc = wil_vring_alloc(wil, vring);
if (rc)
return rc;
@@ -904,22 +902,46 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
return 0;
err_free:
- wil_vring_free(wil, vring, 0);
+ wil_vring_free(wil, vring);
return rc;
}
-void wil_rx_fini(struct wil6210_priv *wil)
+static void wil_rx_fini(struct wil6210_priv *wil)
{
- struct vring *vring = &wil->vring_rx;
+ struct wil_ring *vring = &wil->ring_rx;
wil_dbg_misc(wil, "rx_fini\n");
if (vring->va)
- wil_vring_free(wil, vring, 0);
+ wil_vring_free(wil, vring);
+}
+
+static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
+ u32 len, int vring_index)
+{
+ struct vring_tx_desc *d = &desc->legacy;
+
+ wil_desc_addr_set(&d->dma.addr, pa);
+ d->dma.ip_length = 0;
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.b11 = 0/*14 | BIT(7)*/;
+ d->dma.error = 0;
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = cpu_to_le16((u16)len);
+ d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+ d->mac.d[0] = 0;
+ d->mac.d[1] = 0;
+ d->mac.d[2] = 0;
+ d->mac.ucode_cmd = 0;
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
}
-static inline void wil_tx_data_init(struct vring_tx_data *txdata)
+void wil_tx_data_init(struct wil_ring_tx_data *txdata)
{
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = 0;
@@ -935,8 +957,8 @@ static inline void wil_tx_data_init(struct vring_tx_data *txdata)
spin_unlock_bh(&txdata->lock);
}
-int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
- int cid, int tid)
+static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
+ int cid, int tid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
@@ -966,8 +988,8 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
} __packed reply = {
.cmd = {.status = WMI_FW_STATUS_FAILURE},
};
- struct vring *vring = &wil->vring_tx[id];
- struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+ struct wil_ring *vring = &wil->ring_tx[id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
@@ -980,13 +1002,14 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
}
wil_tx_data_init(txdata);
+ vring->is_rx = false;
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
goto out;
- wil->vring2cid_tid[id][0] = cid;
- wil->vring2cid_tid[id][1] = tid;
+ wil->ring2cid_tid[id][0] = cid;
+ wil->ring2cid_tid[id][1] = tid;
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
@@ -1019,9 +1042,9 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
txdata->dot1x_open = false;
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
- wil_vring_free(wil, vring, 1);
- wil->vring2cid_tid[id][0] = WIL6210_MAX_CID;
- wil->vring2cid_tid[id][1] = 0;
+ wil_vring_free(wil, vring);
+ wil->ring2cid_tid[id][0] = WIL6210_MAX_CID;
+ wil->ring2cid_tid[id][1] = 0;
out:
@@ -1050,8 +1073,8 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
} __packed reply = {
.cmd = {.status = WMI_FW_STATUS_FAILURE},
};
- struct vring *vring = &wil->vring_tx[id];
- struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+ struct wil_ring *vring = &wil->ring_tx[id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
@@ -1064,13 +1087,14 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
}
wil_tx_data_init(txdata);
+ vring->is_rx = false;
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
goto out;
- wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
- wil->vring2cid_tid[id][1] = 0; /* TID */
+ wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
+ wil->ring2cid_tid[id][1] = 0; /* TID */
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
@@ -1101,62 +1125,32 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
txdata->enabled = 0;
txdata->dot1x_open = false;
spin_unlock_bh(&txdata->lock);
- wil_vring_free(wil, vring, 1);
+ wil_vring_free(wil, vring);
out:
return rc;
}
-void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
-{
- struct vring *vring = &wil->vring_tx[id];
- struct vring_tx_data *txdata = &wil->vring_tx_data[id];
-
- lockdep_assert_held(&wil->mutex);
-
- if (!vring->va)
- return;
-
- wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
-
- spin_lock_bh(&txdata->lock);
- txdata->dot1x_open = false;
- txdata->mid = U8_MAX;
- txdata->enabled = 0; /* no Tx can be in progress or start anew */
- spin_unlock_bh(&txdata->lock);
- /* napi_synchronize waits for completion of the current NAPI but will
- * not prevent the next NAPI run.
- * Add a memory barrier to guarantee that txdata->enabled is zeroed
- * before napi_synchronize so that the next scheduled NAPI will not
- * handle this vring
- */
- wmb();
- /* make sure NAPI won't touch this vring */
- if (test_bit(wil_status_napi_en, wil->status))
- napi_synchronize(&wil->napi_tx);
-
- wil_vring_free(wil, vring, 1);
-}
-
-static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
- struct wil6210_vif *vif,
- struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
{
int i;
struct ethhdr *eth = (void *)skb->data;
int cid = wil_find_cid(wil, vif->mid, eth->h_dest);
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
if (cid < 0)
return NULL;
/* TODO: fix for multiple TID */
- for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
- if (wil->vring2cid_tid[i][0] == cid) {
- struct vring *v = &wil->vring_tx[i];
- struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+ if (wil->ring2cid_tid[i][0] == cid) {
+ struct wil_ring *v = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
eth->h_dest, i);
@@ -1174,42 +1168,43 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
return NULL;
}
-static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, struct sk_buff *skb);
+static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb);
-static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
- struct wil6210_vif *vif,
- struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
{
- struct vring *v;
+ struct wil_ring *ring;
int i;
u8 cid;
- struct vring_tx_data *txdata;
+ struct wil_ring_tx_data *txdata;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
/* In the STA mode, it is expected to have only 1 VRING
* for the AP we connected to.
* find 1-st vring eligible for this skb and use it.
*/
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- v = &wil->vring_tx[i];
- txdata = &wil->vring_tx_data[i];
- if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ ring = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
+ if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
continue;
- cid = wil->vring2cid_tid[i][0];
+ cid = wil->ring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
- return v;
+ return ring;
}
- wil_dbg_txrx(wil, "Tx while no vrings active?\n");
+ wil_dbg_txrx(wil, "Tx while no rings active?\n");
return NULL;
}
@@ -1225,22 +1220,22 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
* Use old strategy when new is not supported yet:
* - for PBSS
*/
-static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
- struct wil6210_vif *vif,
- struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
{
- struct vring *v;
- struct vring_tx_data *txdata;
- int i = vif->bcast_vring;
+ struct wil_ring *v;
+ struct wil_ring_tx_data *txdata;
+ int i = vif->bcast_ring;
if (i < 0)
return NULL;
- v = &wil->vring_tx[i];
- txdata = &wil->vring_tx_data[i];
+ v = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
if (!v->va || !txdata->enabled)
return NULL;
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
return NULL;
return v;
@@ -1250,35 +1245,36 @@ static void wil_set_da_for_vring(struct wil6210_priv *wil,
struct sk_buff *skb, int vring_index)
{
struct ethhdr *eth = (void *)skb->data;
- int cid = wil->vring2cid_tid[vring_index][0];
+ int cid = wil->ring2cid_tid[vring_index][0];
ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
}
-static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
- struct wil6210_vif *vif,
- struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
{
- struct vring *v, *v2;
+ struct wil_ring *v, *v2;
struct sk_buff *skb2;
int i;
u8 cid;
struct ethhdr *eth = (void *)skb->data;
char *src = eth->h_source;
- struct vring_tx_data *txdata, *txdata2;
+ struct wil_ring_tx_data *txdata, *txdata2;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
/* find 1-st vring eligible for data */
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- v = &wil->vring_tx[i];
- txdata = &wil->vring_tx_data[i];
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ v = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
continue;
- cid = wil->vring2cid_tid[i][0];
+ cid = wil->ring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
/* don't Tx back to source when re-routing Rx->Tx at the AP */
@@ -1298,15 +1294,15 @@ found:
/* find other active vrings and duplicate skb for each */
for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
- v2 = &wil->vring_tx[i];
- txdata2 = &wil->vring_tx_data[i];
+ v2 = &wil->ring_tx[i];
+ txdata2 = &wil->ring_tx_data[i];
if (!v2->va || txdata2->mid != vif->mid)
continue;
- cid = wil->vring2cid_tid[i][0];
+ cid = wil->ring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
@@ -1316,7 +1312,7 @@ found:
if (skb2) {
wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
wil_set_da_for_vring(wil, skb2, i);
- wil_tx_vring(wil, vif, v2, skb2);
+ wil_tx_ring(wil, vif, v2, skb2);
} else {
wil_err(wil, "skb_copy failed\n");
}
@@ -1325,28 +1321,6 @@ found:
return v;
}
-static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
- int vring_index)
-{
- wil_desc_addr_set(&d->dma.addr, pa);
- d->dma.ip_length = 0;
- /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
- d->dma.b11 = 0/*14 | BIT(7)*/;
- d->dma.error = 0;
- d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
- d->dma.length = cpu_to_le16((u16)len);
- d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
- d->mac.d[0] = 0;
- d->mac.d[1] = 0;
- d->mac.d[2] = 0;
- d->mac.ucode_cmd = 0;
- /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
- d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
- (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
-
- return 0;
-}
-
static inline
void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
{
@@ -1454,7 +1428,7 @@ static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
}
static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, struct sk_buff *skb)
+ struct wil_ring *vring, struct sk_buff *skb)
{
struct device *dev = wil_to_dev(wil);
@@ -1474,13 +1448,13 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
int sg_desc_cnt = 0; /* number of descriptors for current mss*/
u32 swhead = vring->swhead;
- int used, avail = wil_vring_avail_tx(vring);
+ int used, avail = wil_ring_avail_tx(vring);
int nr_frags = skb_shinfo(skb)->nr_frags;
int min_desc_required = nr_frags + 1;
int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
int f, len, hdrlen, headlen;
- int vring_index = vring - wil->vring_tx;
- struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ int vring_index = vring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index];
uint i = swhead;
dma_addr_t pa;
const skb_frag_t *frag = NULL;
@@ -1548,7 +1522,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
tcp_hdr_len = tcp_hdrlen(skb);
skb_net_hdr_len = skb_network_header_len(skb);
- _hdr_desc = &vring->va[i].tx;
+ _hdr_desc = &vring->va[i].tx.legacy;
pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
@@ -1556,7 +1530,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
goto err_exit;
}
- wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa,
+ hdrlen, vring_index);
wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
tcp_hdr_len, skb_net_hdr_len);
wil_tx_last_desc(hdr_desc);
@@ -1613,7 +1588,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
goto mem_error;
}
- _desc = &vring->va[i].tx;
+ _desc = &vring->va[i].tx.legacy;
if (!_first_desc) {
_first_desc = _desc;
@@ -1623,7 +1598,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
d = &desc_mem;
}
- wil_tx_desc_map(d, pa, lenmss, vring_index);
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
+ pa, lenmss, vring_index);
wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
is_ipv4, tcp_hdr_len,
skb_net_hdr_len);
@@ -1701,8 +1677,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
vring->ctx[i].skb = skb_get(skb);
/* performance monitoring */
- used = wil_vring_used_tx(vring);
- if (wil_val_in_range(wil->vring_idle_trsh,
+ used = wil_ring_used_tx(vring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
used, used + descs_used)) {
txdata->idle += get_cycles() - txdata->last_idle;
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
@@ -1717,7 +1693,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
wmb();
/* advance swhead */
- wil_vring_advance_head(vring, descs_used);
+ wil_ring_advance_head(vring, descs_used);
wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
/* make sure all writes to descriptors (shared memory) are done before
@@ -1725,6 +1701,11 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
*/
wmb();
+ if (wil->tx_latency)
+ *(ktime_t *)&skb->cb = ktime_get();
+ else
+ memset(skb->cb, 0, sizeof(ktime_t));
+
wil_w(wil, vring->hwtail, vring->swhead);
return 0;
@@ -1733,12 +1714,12 @@ mem_error:
struct wil_ctx *ctx;
i = (swhead + descs_used - 1) % vring->size;
- d = (struct vring_tx_desc *)&vring->va[i].tx;
- _desc = &vring->va[i].tx;
+ d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
+ _desc = &vring->va[i].tx.legacy;
*d = *_desc;
_desc->dma.status = TX_DMA_STATUS_DU;
ctx = &vring->ctx[i];
- wil_txdesc_unmap(dev, d, ctx);
+ wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
memset(ctx, 0, sizeof(*ctx));
descs_used--;
}
@@ -1746,26 +1727,26 @@ err_exit:
return rc;
}
-static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, struct sk_buff *skb)
+static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb)
{
struct device *dev = wil_to_dev(wil);
struct vring_tx_desc dd, *d = &dd;
volatile struct vring_tx_desc *_d;
- u32 swhead = vring->swhead;
- int avail = wil_vring_avail_tx(vring);
+ u32 swhead = ring->swhead;
+ int avail = wil_ring_avail_tx(ring);
int nr_frags = skb_shinfo(skb)->nr_frags;
uint f = 0;
- int vring_index = vring - wil->vring_tx;
- struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ int ring_index = ring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
uint i = swhead;
dma_addr_t pa;
int used;
- bool mcast = (vring_index == vif->bcast_vring);
+ bool mcast = (ring_index == vif->bcast_ring);
uint len = skb_headlen(skb);
- wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len,
- vring_index);
+ wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
+ skb->len, ring_index, nr_frags);
if (unlikely(!txdata->enabled))
return -EINVAL;
@@ -1773,23 +1754,24 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
if (unlikely(avail < 1 + nr_frags)) {
wil_err_ratelimited(wil,
"Tx ring[%2d] full. No space for %d fragments\n",
- vring_index, 1 + nr_frags);
+ ring_index, 1 + nr_frags);
return -ENOMEM;
}
- _d = &vring->va[i].tx;
+ _d = &ring->va[i].tx.legacy;
pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
- wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
+ wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index,
skb_headlen(skb), skb->data, &pa);
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
if (unlikely(dma_mapping_error(dev, pa)))
return -EINVAL;
- vring->ctx[i].mapped_as = wil_mapped_as_single;
+ ring->ctx[i].mapped_as = wil_mapped_as_single;
/* 1-st segment */
- wil_tx_desc_map(d, pa, len, vring_index);
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
+ ring_index);
if (unlikely(mcast)) {
d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
@@ -1798,11 +1780,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
/* Process TCP/UDP checksum offloading */
if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
- vring_index);
+ ring_index);
goto dma_error;
}
- vring->ctx[i].nr_frags = nr_frags;
+ ring->ctx[i].nr_frags = nr_frags;
wil_tx_desc_set_nr_frags(d, nr_frags + 1);
/* middle segments */
@@ -1812,20 +1794,21 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
int len = skb_frag_size(frag);
*_d = *d;
- wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
+ wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
- i = (swhead + f + 1) % vring->size;
- _d = &vring->va[i].tx;
+ i = (swhead + f + 1) % ring->size;
+ _d = &ring->va[i].tx.legacy;
pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
wil_err(wil, "Tx[%2d] failed to map fragment\n",
- vring_index);
+ ring_index);
goto dma_error;
}
- vring->ctx[i].mapped_as = wil_mapped_as_page;
- wil_tx_desc_map(d, pa, len, vring_index);
+ ring->ctx[i].mapped_as = wil_mapped_as_page;
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
+ pa, len, ring_index);
/* no need to check return code -
* if it succeeded for 1-st descriptor,
* it will succeed here too
@@ -1837,7 +1820,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
*_d = *d;
- wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
+ wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
@@ -1845,15 +1828,15 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
* to prevent skb release before accounting
* in case of immediate "tx done"
*/
- vring->ctx[i].skb = skb_get(skb);
+ ring->ctx[i].skb = skb_get(skb);
/* performance monitoring */
- used = wil_vring_used_tx(vring);
- if (wil_val_in_range(wil->vring_idle_trsh,
+ used = wil_ring_used_tx(ring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
used, used + nr_frags + 1)) {
txdata->idle += get_cycles() - txdata->last_idle;
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
- vring_index, used, used + nr_frags + 1);
+ ring_index, used, used + nr_frags + 1);
}
/* Make sure to advance the head only after descriptor update is done.
@@ -1864,17 +1847,22 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
wmb();
/* advance swhead */
- wil_vring_advance_head(vring, nr_frags + 1);
- wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
- vring->swhead);
- trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
+ wil_ring_advance_head(ring, nr_frags + 1);
+ wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead,
+ ring->swhead);
+ trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
/* make sure all writes to descriptors (shared memory) are done before
* committing them to HW
*/
wmb();
- wil_w(wil, vring->hwtail, vring->swhead);
+ if (wil->tx_latency)
+ *(ktime_t *)&skb->cb = ktime_get();
+ else
+ memset(skb->cb, 0, sizeof(ktime_t));
+
+ wil_w(wil, ring->hwtail, ring->swhead);
return 0;
dma_error:
@@ -1883,12 +1871,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
for (f = 0; f < nr_frags; f++) {
struct wil_ctx *ctx;
- i = (swhead + f) % vring->size;
- ctx = &vring->ctx[i];
- _d = &vring->va[i].tx;
+ i = (swhead + f) % ring->size;
+ ctx = &ring->ctx[i];
+ _d = &ring->va[i].tx.legacy;
*d = *_d;
_d->dma.status = TX_DMA_STATUS_DU;
- wil_txdesc_unmap(dev, d, ctx);
+ wil->txrx_ops.tx_desc_unmap(dev,
+ (union wil_tx_desc *)d,
+ ctx);
memset(ctx, 0, sizeof(*ctx));
}
@@ -1896,11 +1886,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
return -EINVAL;
}
-static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, struct sk_buff *skb)
+static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb)
{
- int vring_index = vring - wil->vring_tx;
- struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ int ring_index = ring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
int rc;
spin_lock(&txdata->lock);
@@ -1914,8 +1904,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
return -EINVAL;
}
- rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
- (wil, vif, vring, skb);
+ rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
+ (wil, vif, ring, skb);
spin_unlock(&txdata->lock);
@@ -1941,7 +1931,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
*/
static inline void __wil_update_net_queues(struct wil6210_priv *wil,
struct wil6210_vif *vif,
- struct vring *vring,
+ struct wil_ring *ring,
bool check_stop)
{
int i;
@@ -1949,9 +1939,9 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
if (unlikely(!vif))
return;
- if (vring)
+ if (ring)
wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
- (int)(vring - wil->vring_tx), vif->mid, check_stop,
+ (int)(ring - wil->ring_tx), vif->mid, check_stop,
vif->net_queue_stopped);
else
wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
@@ -1962,7 +1952,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
return;
if (check_stop) {
- if (!vring || unlikely(wil_vring_avail_low(vring))) {
+ if (!ring || unlikely(wil_ring_avail_low(ring))) {
/* not enough room in the vring */
netif_tx_stop_all_queues(vif_to_ndev(vif));
vif->net_queue_stopped = true;
@@ -1978,22 +1968,22 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
/* check wake */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- struct vring *cur_vring = &wil->vring_tx[i];
- struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+ struct wil_ring *cur_ring = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
- if (txdata->mid != vif->mid || !cur_vring->va ||
- !txdata->enabled || cur_vring == vring)
+ if (txdata->mid != vif->mid || !cur_ring->va ||
+ !txdata->enabled || cur_ring == ring)
continue;
- if (wil_vring_avail_low(cur_vring)) {
- wil_dbg_txrx(wil, "vring %d full, can't wake\n",
- (int)(cur_vring - wil->vring_tx));
+ if (wil_ring_avail_low(cur_ring)) {
+ wil_dbg_txrx(wil, "ring %d full, can't wake\n",
+ (int)(cur_ring - wil->ring_tx));
return;
}
}
- if (!vring || wil_vring_avail_high(vring)) {
- /* enough room in the vring */
+ if (!ring || wil_ring_avail_high(ring)) {
+ /* enough room in the ring */
wil_dbg_txrx(wil, "calling netif_tx_wake\n");
netif_tx_wake_all_queues(vif_to_ndev(vif));
vif->net_queue_stopped = false;
@@ -2001,18 +1991,18 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
}
void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, bool check_stop)
+ struct wil_ring *ring, bool check_stop)
{
spin_lock(&wil->net_queue_lock);
- __wil_update_net_queues(wil, vif, vring, check_stop);
+ __wil_update_net_queues(wil, vif, ring, check_stop);
spin_unlock(&wil->net_queue_lock);
}
void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, bool check_stop)
+ struct wil_ring *ring, bool check_stop)
{
spin_lock_bh(&wil->net_queue_lock);
- __wil_update_net_queues(wil, vif, vring, check_stop);
+ __wil_update_net_queues(wil, vif, ring, check_stop);
spin_unlock_bh(&wil->net_queue_lock);
}
@@ -2022,7 +2012,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct wil6210_priv *wil = vif_to_wil(vif);
struct ethhdr *eth = (void *)skb->data;
bool bcast = is_multicast_ether_addr(eth->h_dest);
- struct vring *vring;
+ struct wil_ring *ring;
static bool pr_once_fw;
int rc;
@@ -2048,36 +2038,36 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* find vring */
if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
/* in STA mode (ESS), all to same VRING (to AP) */
- vring = wil_find_tx_vring_sta(wil, vif, skb);
+ ring = wil_find_tx_ring_sta(wil, vif, skb);
} else if (bcast) {
if (vif->pbss)
/* in pbss, no bcast VRING - duplicate skb in
* all stations VRINGs
*/
- vring = wil_find_tx_bcast_2(wil, vif, skb);
+ ring = wil_find_tx_bcast_2(wil, vif, skb);
else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
/* AP has a dedicated bcast VRING */
- vring = wil_find_tx_bcast_1(wil, vif, skb);
+ ring = wil_find_tx_bcast_1(wil, vif, skb);
else
/* unexpected combination, fallback to duplicating
* the skb in all stations VRINGs
*/
- vring = wil_find_tx_bcast_2(wil, vif, skb);
+ ring = wil_find_tx_bcast_2(wil, vif, skb);
} else {
/* unicast, find specific VRING by dest. address */
- vring = wil_find_tx_ucast(wil, vif, skb);
+ ring = wil_find_tx_ucast(wil, vif, skb);
}
- if (unlikely(!vring)) {
- wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
+ if (unlikely(!ring)) {
+ wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest);
goto drop;
}
/* set up vring entry */
- rc = wil_tx_vring(wil, vif, vring, skb);
+ rc = wil_tx_ring(wil, vif, ring, skb);
switch (rc) {
case 0:
/* shall we stop net queues? */
- wil_update_net_queues_bh(wil, vif, vring, true);
+ wil_update_net_queues_bh(wil, vif, ring, true);
/* statistics will be updated on the tx_complete */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2093,20 +2083,29 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NET_XMIT_DROP;
}
-static inline bool wil_need_txstat(struct sk_buff *skb)
+void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_sta_info *sta)
{
- struct ethhdr *eth = (void *)skb->data;
+ int skb_time_us;
+ int bin;
- return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
- (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
-}
+ if (!wil->tx_latency)
+ return;
-static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
-{
- if (unlikely(wil_need_txstat(skb)))
- skb_complete_wifi_ack(skb, acked);
- else
- acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
+ if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0)
+ return;
+
+ skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb);
+ bin = skb_time_us / wil->tx_latency_res;
+ bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1);
+
+ wil_dbg_txrx(wil, "skb time %dus => bin %d\n", skb_time_us, bin);
+ sta->tx_latency_bins[bin]++;
+ sta->stats.tx_latency_total_us += skb_time_us;
+ if (skb_time_us < sta->stats.tx_latency_min_us)
+ sta->stats.tx_latency_min_us = skb_time_us;
+ if (skb_time_us > sta->stats.tx_latency_max_us)
+ sta->stats.tx_latency_max_us = skb_time_us;
}
/**
@@ -2121,10 +2120,10 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
struct wil6210_priv *wil = vif_to_wil(vif);
struct net_device *ndev = vif_to_ndev(vif);
struct device *dev = wil_to_dev(wil);
- struct vring *vring = &wil->vring_tx[ringid];
- struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+ struct wil_ring *vring = &wil->ring_tx[ringid];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
int done = 0;
- int cid = wil->vring2cid_tid[ringid][0];
+ int cid = wil->ring2cid_tid[ringid][0];
struct wil_net_stats *stats = NULL;
volatile struct vring_tx_desc *_d;
int used_before_complete;
@@ -2142,12 +2141,12 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
- used_before_complete = wil_vring_used_tx(vring);
+ used_before_complete = wil_ring_used_tx(vring);
if (cid < WIL6210_MAX_CID)
stats = &wil->sta[cid].stats;
- while (!wil_vring_is_empty(vring)) {
+ while (!wil_ring_is_empty(vring)) {
int new_swtail;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
/**
@@ -2158,7 +2157,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
int lf = (vring->swtail + ctx->nr_frags) % vring->size;
/* TODO: check we are not past head */
- _d = &vring->va[lf].tx;
+ _d = &vring->va[lf].tx.legacy;
if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
break;
@@ -2170,7 +2169,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
ctx = &vring->ctx[vring->swtail];
skb = ctx->skb;
- _d = &vring->va[vring->swtail].tx;
+ _d = &vring->va[vring->swtail].tx.legacy;
*d = *_d;
@@ -2184,7 +2183,9 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
- wil_txdesc_unmap(dev, d, ctx);
+ wil->txrx_ops.tx_desc_unmap(dev,
+ (union wil_tx_desc *)d,
+ ctx);
if (skb) {
if (likely(d->dma.error == 0)) {
@@ -2193,6 +2194,9 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
if (stats) {
stats->tx_packets++;
stats->tx_bytes += skb->len;
+
+ wil_tx_latency_calc(wil, skb,
+ &wil->sta[cid]);
}
} else {
ndev->stats.tx_errors++;
@@ -2203,7 +2207,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
}
memset(ctx, 0, sizeof(*ctx));
/* Make sure the ctx is zeroed before updating the tail
- * to prevent a case where wil_tx_vring will see
+ * to prevent a case where wil_tx_ring will see
* this descriptor as used and handle it before ctx zero
* is completed.
*/
@@ -2213,14 +2217,14 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
* so hardware will not try to process this desc.,
* - rest of descriptor will be initialized on Tx.
*/
- vring->swtail = wil_vring_next_tail(vring);
+ vring->swtail = wil_ring_next_tail(vring);
done++;
}
}
/* performance monitoring */
- used_new = wil_vring_used_tx(vring);
- if (wil_val_in_range(wil->vring_idle_trsh,
+ used_new = wil_ring_used_tx(vring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
used_new, used_before_complete)) {
wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
ringid, used_before_complete, used_new);
@@ -2233,3 +2237,49 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
return done;
}
+
+static inline int wil_tx_init(struct wil6210_priv *wil)
+{
+ return 0;
+}
+
+static inline void wil_tx_fini(struct wil6210_priv *wil) {}
+
+static void wil_get_reorder_params(struct wil6210_priv *wil,
+ struct sk_buff *skb, int *tid, int *cid,
+ int *mid, u16 *seq, int *mcast, int *retry)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+ *tid = wil_rxdesc_tid(d);
+ *cid = wil_rxdesc_cid(d);
+ *mid = wil_rxdesc_mid(d);
+ *seq = wil_rxdesc_seq(d);
+ *mcast = wil_rxdesc_mcast(d);
+ *retry = wil_rxdesc_retry(d);
+}
+
+void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
+{
+ wil->txrx_ops.configure_interrupt_moderation =
+ wil_configure_interrupt_moderation;
+ /* TX ops */
+ wil->txrx_ops.tx_desc_map = wil_tx_desc_map;
+ wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap;
+ wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso;
+ wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
+ wil->txrx_ops.ring_fini_tx = wil_vring_free;
+ wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
+ wil->txrx_ops.tx_init = wil_tx_init;
+ wil->txrx_ops.tx_fini = wil_tx_fini;
+ /* RX ops */
+ wil->txrx_ops.rx_init = wil_rx_init;
+ wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
+ wil->txrx_ops.get_reorder_params = wil_get_reorder_params;
+ wil->txrx_ops.get_netif_rx_params =
+ wil_get_netif_rx_params;
+ wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check;
+ wil->txrx_ops.rx_error_check = wil_rx_error_check;
+ wil->txrx_ops.is_rx_idle = wil_is_rx_idle;
+ wil->txrx_ops.rx_fini = wil_rx_fini;
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 5f07717acc2c..9d83be481839 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -18,6 +18,9 @@
#ifndef WIL6210_TXRX_H
#define WIL6210_TXRX_H
+#include "wil6210.h"
+#include "txrx_edma.h"
+
#define BUF_SW_OWNED (1)
#define BUF_HW_OWNED (0)
@@ -29,19 +32,13 @@
/* Tx/Rx path */
-/* Common representation of physical address in Vring */
-struct vring_dma_addr {
- __le32 addr_low;
- __le16 addr_high;
-} __packed;
-
-static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr)
+static inline dma_addr_t wil_desc_addr(struct wil_ring_dma_addr *addr)
{
return le32_to_cpu(addr->addr_low) |
((u64)le16_to_cpu(addr->addr_high) << 32);
}
-static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
+static inline void wil_desc_addr_set(struct wil_ring_dma_addr *addr,
dma_addr_t pa)
{
addr->addr_low = cpu_to_le32(lower_32_bits(pa));
@@ -294,7 +291,7 @@ struct vring_tx_mac {
*/
struct vring_tx_dma {
u32 d0;
- struct vring_dma_addr addr;
+ struct wil_ring_dma_addr addr;
u8 ip_length;
u8 b11; /* 0..6: mac_length; 7:ip_version */
u8 error; /* 0..2: err; 3..7: reserved; */
@@ -428,7 +425,7 @@ struct vring_rx_mac {
struct vring_rx_dma {
u32 d0;
- struct vring_dma_addr addr;
+ struct wil_ring_dma_addr addr;
u8 ip_length;
u8 b11;
u8 error;
@@ -441,14 +438,24 @@ struct vring_tx_desc {
struct vring_tx_dma dma;
} __packed;
+union wil_tx_desc {
+ struct vring_tx_desc legacy;
+ struct wil_tx_enhanced_desc enhanced;
+} __packed;
+
struct vring_rx_desc {
struct vring_rx_mac mac;
struct vring_rx_dma dma;
} __packed;
-union vring_desc {
- struct vring_tx_desc tx;
- struct vring_rx_desc rx;
+union wil_rx_desc {
+ struct vring_rx_desc legacy;
+ struct wil_rx_enhanced_desc enhanced;
+} __packed;
+
+union wil_ring_desc {
+ union wil_tx_desc tx;
+ union wil_rx_desc rx;
} __packed;
static inline int wil_rxdesc_tid(struct vring_rx_desc *d)
@@ -493,6 +500,11 @@ static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d)
return WIL_GET_BITS(d->mac.d0, 28, 31);
}
+static inline int wil_rxdesc_retry(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 31, 31);
+}
+
static inline int wil_rxdesc_key_id(struct vring_rx_desc *d)
{
return WIL_GET_BITS(d->mac.d1, 4, 5);
@@ -528,6 +540,76 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
return (void *)skb->cb;
}
+static inline int wil_ring_is_empty(struct wil_ring *ring)
+{
+ return ring->swhead == ring->swtail;
+}
+
+static inline u32 wil_ring_next_tail(struct wil_ring *ring)
+{
+ return (ring->swtail + 1) % ring->size;
+}
+
+static inline void wil_ring_advance_head(struct wil_ring *ring, int n)
+{
+ ring->swhead = (ring->swhead + n) % ring->size;
+}
+
+static inline int wil_ring_is_full(struct wil_ring *ring)
+{
+ return wil_ring_next_tail(ring) == ring->swhead;
+}
+
+static inline bool wil_need_txstat(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (void *)skb->data;
+
+ return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
+}
+
+static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
+{
+ if (unlikely(wil_need_txstat(skb)))
+ skb_complete_wifi_ack(skb, acked);
+ else
+ acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
+}
+
+/* Used space in Tx ring */
+static inline int wil_ring_used_tx(struct wil_ring *ring)
+{
+ u32 swhead = ring->swhead;
+ u32 swtail = ring->swtail;
+
+ return (ring->size + swhead - swtail) % ring->size;
+}
+
+/* Available space in Tx ring */
+static inline int wil_ring_avail_tx(struct wil_ring *ring)
+{
+ return ring->size - wil_ring_used_tx(ring) - 1;
+}
+
+static inline int wil_get_min_tx_ring_id(struct wil6210_priv *wil)
+{
+ /* In Enhanced DMA ring 0 is reserved for RX */
+ return wil->use_enhanced_dma_hw ? 1 : 0;
+}
+
+/* similar to ieee80211_ version, but FC contain only 1-st byte */
+static inline int wil_is_back_req(u8 fc)
+{
+ return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
+}
+
+/* wil_val_in_range - check if value in [min,max) */
+static inline bool wil_val_in_range(int val, int min, int max)
+{
+ return val >= min && val < max;
+}
+
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
@@ -536,5 +618,9 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
int size, u16 ssn);
void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
struct wil_tid_ampdu_rx *r);
+void wil_tx_data_init(struct wil_ring_tx_data *txdata);
+void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil);
+void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_sta_info *sta);
#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
new file mode 100644
index 000000000000..bca61cb44c37
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -0,0 +1,1608 @@
+/*
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/moduleparam.h>
+#include <linux/prefetch.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "wil6210.h"
+#include "txrx_edma.h"
+#include "txrx.h"
+#include "trace.h"
+
+#define WIL_EDMA_MAX_DATA_OFFSET (2)
+/* RX buffer size must be aligned to 4 bytes */
+#define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
+
+static void wil_tx_desc_unmap_edma(struct device *dev,
+ union wil_tx_desc *desc,
+ struct wil_ctx *ctx)
+{
+ struct wil_tx_enhanced_desc *d = (struct wil_tx_enhanced_desc *)desc;
+ dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma);
+ u16 dmalen = le16_to_cpu(d->dma.length);
+
+ switch (ctx->mapped_as) {
+ case wil_mapped_as_single:
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ case wil_mapped_as_page:
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
+static int wil_find_free_sring(struct wil6210_priv *wil)
+{
+ int i;
+
+ for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) {
+ if (!wil->srings[i].va)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static void wil_sring_free(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz;
+
+ if (!sring || !sring->va)
+ return;
+
+ sz = sring->elem_size * sring->size;
+
+ wil_dbg_misc(wil, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n",
+ sz, sring->va, &sring->pa);
+
+ dma_free_coherent(dev, sz, (void *)sring->va, sring->pa);
+ sring->pa = 0;
+ sring->va = NULL;
+}
+
+static int wil_sring_alloc(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = sring->elem_size * sring->size;
+
+ wil_dbg_misc(wil, "status_ring_alloc: size=%zu\n", sz);
+
+ if (sz == 0) {
+ wil_err(wil, "Cannot allocate a zero size status ring\n");
+ return -EINVAL;
+ }
+
+ sring->swhead = 0;
+
+ /* Status messages are allocated and initialized to 0. This is necessary
+ * since DR bit should be initialized to 0.
+ */
+ sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
+ if (!sring->va)
+ return -ENOMEM;
+
+ wil_dbg_misc(wil, "status_ring[%d] 0x%p:%pad\n", sring->size, sring->va,
+ &sring->pa);
+
+ return 0;
+}
+
+static int wil_tx_init_edma(struct wil6210_priv *wil)
+{
+ int ring_id = wil_find_free_sring(wil);
+ struct wil_status_ring *sring;
+ int rc;
+ u16 status_ring_size;
+
+ if (wil->tx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
+ wil->tx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
+ wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
+
+ status_ring_size = 1 << wil->tx_status_ring_order;
+
+ wil_dbg_misc(wil, "init TX sring: size=%u, ring_id=%u\n",
+ status_ring_size, ring_id);
+
+ if (ring_id < 0)
+ return ring_id;
+
+ /* Allocate Tx status ring. Tx descriptor rings will be
+ * allocated on WMI connect event
+ */
+ sring = &wil->srings[ring_id];
+
+ sring->is_rx = false;
+ sring->size = status_ring_size;
+ sring->elem_size = sizeof(struct wil_ring_tx_status);
+ rc = wil_sring_alloc(wil, sring);
+ if (rc)
+ return rc;
+
+ rc = wil_wmi_tx_sring_cfg(wil, ring_id);
+ if (rc)
+ goto out_free;
+
+ sring->desc_rdy_pol = 1;
+ wil->tx_sring_idx = ring_id;
+
+ return 0;
+out_free:
+ wil_sring_free(wil, sring);
+ return rc;
+}
+
+/**
+ * Allocate one skb for Rx descriptor RING
+ */
+static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
+ struct wil_ring *ring, u32 i)
+{
+ struct device *dev = wil_to_dev(wil);
+ unsigned int sz = ALIGN(wil->rx_buf_len, 4);
+ dma_addr_t pa;
+ u16 buff_id;
+ struct list_head *active = &wil->rx_buff_mgmt.active;
+ struct list_head *free = &wil->rx_buff_mgmt.free;
+ struct wil_rx_buff *rx_buff;
+ struct wil_rx_buff *buff_arr = wil->rx_buff_mgmt.buff_arr;
+ struct sk_buff *skb;
+ struct wil_rx_enhanced_desc dd, *d = &dd;
+ struct wil_rx_enhanced_desc *_d = (struct wil_rx_enhanced_desc *)
+ &ring->va[i].rx.enhanced;
+
+ if (unlikely(list_empty(free))) {
+ wil->rx_buff_mgmt.free_list_empty_cnt++;
+ return -EAGAIN;
+ }
+
+ skb = dev_alloc_skb(sz);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_put(skb, sz);
+
+ /**
+ * Make sure that the network stack calculates checksum for packets
+ * which failed the HW checksum calculation
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ /* Get the buffer ID - the index of the rx buffer in the buff_arr */
+ rx_buff = list_first_entry(free, struct wil_rx_buff, list);
+ buff_id = rx_buff->id;
+
+ /* Move a buffer from the free list to the active list */
+ list_move(&rx_buff->list, active);
+
+ buff_arr[buff_id].skb = skb;
+
+ wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
+ d->dma.length = cpu_to_le16(sz);
+ d->mac.buff_id = cpu_to_le16(buff_id);
+ *_d = *d;
+
+ /* Save the physical address in skb->cb for later use in dma_unmap */
+ memcpy(skb->cb, &pa, sizeof(pa));
+
+ return 0;
+}
+
+static inline
+void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg)
+{
+ memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)),
+ sring->elem_size);
+}
+
+static inline void wil_sring_advance_swhead(struct wil_status_ring *sring)
+{
+ sring->swhead = (sring->swhead + 1) % sring->size;
+ if (sring->swhead == 0)
+ sring->desc_rdy_pol = 1 - sring->desc_rdy_pol;
+}
+
+static int wil_rx_refill_edma(struct wil6210_priv *wil)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+ u32 next_head;
+ int rc = 0;
+ u32 swtail = *ring->edma_rx_swtail.va;
+
+ for (; next_head = wil_ring_next_head(ring), (next_head != swtail);
+ ring->swhead = next_head) {
+ rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
+ if (unlikely(rc)) {
+ if (rc == -EAGAIN)
+ wil_dbg_txrx(wil, "No free buffer ID found\n");
+ else
+ wil_err_ratelimited(wil,
+ "Error %d in refill desc[%d]\n",
+ rc, ring->swhead);
+ break;
+ }
+ }
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ wil_w(wil, ring->hwtail, ring->swhead);
+
+ return rc;
+}
+
+static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
+ struct wil_ring *ring)
+{
+ struct device *dev = wil_to_dev(wil);
+ u32 next_tail;
+ u32 swhead = (ring->swhead + 1) % ring->size;
+ dma_addr_t pa;
+ u16 dmalen;
+
+ for (; next_tail = wil_ring_next_tail(ring), (next_tail != swhead);
+ ring->swtail = next_tail) {
+ struct wil_rx_enhanced_desc dd, *d = &dd;
+ struct wil_rx_enhanced_desc *_d =
+ (struct wil_rx_enhanced_desc *)
+ &ring->va[ring->swtail].rx.enhanced;
+ struct sk_buff *skb;
+ u16 buff_id;
+
+ *d = *_d;
+ pa = wil_rx_desc_get_addr_edma(&d->dma);
+ dmalen = le16_to_cpu(d->dma.length);
+ dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
+
+ /* Extract the SKB from the rx_buff management array */
+ buff_id = __le16_to_cpu(d->mac.buff_id);
+ if (buff_id >= wil->rx_buff_mgmt.size) {
+ wil_err(wil, "invalid buff_id %d\n", buff_id);
+ continue;
+ }
+ skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
+ if (unlikely(!skb))
+ wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+ else
+ kfree_skb(skb);
+
+ /* Move the buffer from the active to the free list */
+ list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
+ }
+}
+
+static void wil_free_rx_buff_arr(struct wil6210_priv *wil)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+
+ if (!wil->rx_buff_mgmt.buff_arr)
+ return;
+
+ /* Move all the buffers to the free list in case active list is
+ * not empty in order to release all SKBs before deleting the array
+ */
+ wil_move_all_rx_buff_to_free_list(wil, ring);
+
+ kfree(wil->rx_buff_mgmt.buff_arr);
+ wil->rx_buff_mgmt.buff_arr = NULL;
+}
+
+static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
+ size_t size)
+{
+ struct wil_rx_buff *buff_arr;
+ struct list_head *active = &wil->rx_buff_mgmt.active;
+ struct list_head *free = &wil->rx_buff_mgmt.free;
+ int i;
+
+ wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff),
+ GFP_KERNEL);
+ if (!wil->rx_buff_mgmt.buff_arr)
+ return -ENOMEM;
+
+ /* Set list heads */
+ INIT_LIST_HEAD(active);
+ INIT_LIST_HEAD(free);
+
+ /* Linkify the list */
+ buff_arr = wil->rx_buff_mgmt.buff_arr;
+ for (i = 0; i < size; i++) {
+ list_add(&buff_arr[i].list, free);
+ buff_arr[i].id = i;
+ }
+
+ wil->rx_buff_mgmt.size = size;
+
+ return 0;
+}
+
+static int wil_init_rx_sring(struct wil6210_priv *wil,
+ u16 status_ring_size,
+ size_t elem_size,
+ u16 ring_id)
+{
+ struct wil_status_ring *sring = &wil->srings[ring_id];
+ int rc;
+
+ wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size,
+ ring_id);
+
+ memset(&sring->rx_data, 0, sizeof(sring->rx_data));
+
+ sring->is_rx = true;
+ sring->size = status_ring_size;
+ sring->elem_size = elem_size;
+ rc = wil_sring_alloc(wil, sring);
+ if (rc)
+ return rc;
+
+ rc = wil_wmi_rx_sring_add(wil, ring_id);
+ if (rc)
+ goto out_free;
+
+ sring->desc_rdy_pol = 1;
+
+ return 0;
+out_free:
+ wil_sring_free(wil, sring);
+ return rc;
+}
+
+static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
+ struct wil_ring *ring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = ring->size * sizeof(ring->va[0]);
+
+ wil_dbg_misc(wil, "alloc_desc_ring:\n");
+
+ BUILD_BUG_ON(sizeof(ring->va[0]) != 32);
+
+ ring->swhead = 0;
+ ring->swtail = 0;
+ ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL);
+ if (!ring->ctx)
+ goto err;
+
+ ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
+ if (!ring->va)
+ goto err_free_ctx;
+
+ if (ring->is_rx) {
+ sz = sizeof(*ring->edma_rx_swtail.va);
+ ring->edma_rx_swtail.va =
+ dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
+ GFP_KERNEL);
+ if (!ring->edma_rx_swtail.va)
+ goto err_free_va;
+ }
+
+ wil_dbg_misc(wil, "%s ring[%d] 0x%p:%pad 0x%p\n",
+ ring->is_rx ? "RX" : "TX",
+ ring->size, ring->va, &ring->pa, ring->ctx);
+
+ return 0;
+err_free_va:
+ dma_free_coherent(dev, ring->size * sizeof(ring->va[0]),
+ (void *)ring->va, ring->pa);
+ ring->va = NULL;
+err_free_ctx:
+ kfree(ring->ctx);
+ ring->ctx = NULL;
+err:
+ return -ENOMEM;
+}
+
+static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz;
+ int ring_index = 0;
+
+ if (!ring->va)
+ return;
+
+ sz = ring->size * sizeof(ring->va[0]);
+
+ lockdep_assert_held(&wil->mutex);
+ if (ring->is_rx) {
+ wil_dbg_misc(wil, "free Rx ring [%d] 0x%p:%pad 0x%p\n",
+ ring->size, ring->va,
+ &ring->pa, ring->ctx);
+
+ wil_move_all_rx_buff_to_free_list(wil, ring);
+ goto out;
+ }
+
+ /* TX ring */
+ ring_index = ring - wil->ring_tx;
+
+ wil_dbg_misc(wil, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n",
+ ring_index, ring->size, ring->va,
+ &ring->pa, ring->ctx);
+
+ while (!wil_ring_is_empty(ring)) {
+ struct wil_ctx *ctx;
+
+ struct wil_tx_enhanced_desc dd, *d = &dd;
+ struct wil_tx_enhanced_desc *_d =
+ (struct wil_tx_enhanced_desc *)
+ &ring->va[ring->swtail].tx.enhanced;
+
+ ctx = &ring->ctx[ring->swtail];
+ if (!ctx) {
+ wil_dbg_txrx(wil,
+ "ctx(%d) was already completed\n",
+ ring->swtail);
+ ring->swtail = wil_ring_next_tail(ring);
+ continue;
+ }
+ *d = *_d;
+ wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
+ ring->swtail = wil_ring_next_tail(ring);
+ }
+
+out:
+ dma_free_coherent(dev, sz, (void *)ring->va, ring->pa);
+ kfree(ring->ctx);
+ ring->pa = 0;
+ ring->va = NULL;
+ ring->ctx = NULL;
+}
+
+static int wil_init_rx_desc_ring(struct wil6210_priv *wil, u16 desc_ring_size,
+ int status_ring_id)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+ int rc;
+
+ wil_dbg_misc(wil, "init RX desc ring\n");
+
+ ring->size = desc_ring_size;
+ ring->is_rx = true;
+ rc = wil_ring_alloc_desc_ring(wil, ring);
+ if (rc)
+ return rc;
+
+ rc = wil_wmi_rx_desc_ring_add(wil, status_ring_id);
+ if (rc)
+ goto out_free;
+
+ return 0;
+out_free:
+ wil_ring_free_edma(wil, ring);
+ return rc;
+}
+
+static void wil_get_reorder_params_edma(struct wil6210_priv *wil,
+ struct sk_buff *skb, int *tid,
+ int *cid, int *mid, u16 *seq,
+ int *mcast, int *retry)
+{
+ struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
+
+ *tid = wil_rx_status_get_tid(s);
+ *cid = wil_rx_status_get_cid(s);
+ *mid = wil_rx_status_get_mid(s);
+ *seq = le16_to_cpu(wil_rx_status_get_seq(wil, s));
+ *mcast = wil_rx_status_get_mcast(s);
+ *retry = wil_rx_status_get_retry(s);
+}
+
+static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid,
+ int *security)
+{
+ struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
+
+ *cid = wil_rx_status_get_cid(s);
+ *security = wil_rx_status_get_security(s);
+}
+
+static int wil_rx_crypto_check_edma(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct wil_rx_status_extended *st;
+ int cid, tid, key_id, mc;
+ struct wil_sta_info *s;
+ struct wil_tid_crypto_rx *c;
+ struct wil_tid_crypto_rx_single *cc;
+ const u8 *pn;
+
+ /* In HW reorder, HW is responsible for crypto check */
+ if (wil->use_rx_hw_reordering)
+ return 0;
+
+ st = wil_skb_rxstatus(skb);
+
+ cid = wil_rx_status_get_cid(st);
+ tid = wil_rx_status_get_tid(st);
+ key_id = wil_rx_status_get_key_id(st);
+ mc = wil_rx_status_get_mcast(st);
+ s = &wil->sta[cid];
+ c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
+ cc = &c->key_id[key_id];
+ pn = (u8 *)&st->ext.pn_15_0;
+
+ if (!cc->key_set) {
+ wil_err_ratelimited(wil,
+ "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
+ cid, tid, mc, key_id);
+ return -EINVAL;
+ }
+
+ if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
+ wil_err_ratelimited(wil,
+ "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
+ cid, tid, mc, key_id, pn, cc->pn);
+ return -EINVAL;
+ }
+ memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
+
+ return 0;
+}
+
+static bool wil_is_rx_idle_edma(struct wil6210_priv *wil)
+{
+ struct wil_status_ring *sring;
+ struct wil_rx_status_extended msg1;
+ void *msg = &msg1;
+ u8 dr_bit;
+ int i;
+
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ sring = &wil->srings[i];
+ if (!sring->va)
+ continue;
+
+ wil_get_next_rx_status_msg(sring, msg);
+ dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
+
+ /* Check if there are unhandled RX status messages */
+ if (dr_bit == sring->desc_rdy_pol)
+ return false;
+ }
+
+ return true;
+}
+
+static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
+{
+ wil->rx_buf_len = rx_large_buf ?
+ WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT;
+}
+
+static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
+{
+ u16 status_ring_size;
+ struct wil_ring *ring = &wil->ring_rx;
+ int rc;
+ size_t elem_size = wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended);
+ int i;
+ u16 max_rx_pl_per_desc;
+
+ /* In SW reorder one must use extended status messages */
+ if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
+ wil_err(wil,
+ "compressed RX status cannot be used with SW reorder\n");
+ return -EINVAL;
+ }
+
+ if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
+ wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
+ wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
+
+ status_ring_size = 1 << wil->rx_status_ring_order;
+
+ wil_dbg_misc(wil,
+ "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n",
+ desc_ring_size, status_ring_size, elem_size);
+
+ wil_rx_buf_len_init_edma(wil);
+
+ max_rx_pl_per_desc = ALIGN(wil->rx_buf_len, 4);
+
+ /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
+ if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
+ wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
+
+ wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
+ wil->num_rx_status_rings);
+
+ rc = wil_wmi_cfg_def_rx_offload(wil, max_rx_pl_per_desc);
+ if (rc)
+ return rc;
+
+ /* Allocate status ring */
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ int sring_id = wil_find_free_sring(wil);
+
+ if (sring_id < 0) {
+ rc = -EFAULT;
+ goto err_free_status;
+ }
+ rc = wil_init_rx_sring(wil, status_ring_size, elem_size,
+ sring_id);
+ if (rc)
+ goto err_free_status;
+ }
+
+ /* Allocate descriptor ring */
+ rc = wil_init_rx_desc_ring(wil, desc_ring_size,
+ WIL_DEFAULT_RX_STATUS_RING_ID);
+ if (rc)
+ goto err_free_status;
+
+ if (wil->rx_buff_id_count >= status_ring_size) {
+ wil_info(wil,
+ "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n",
+ wil->rx_buff_id_count, status_ring_size,
+ status_ring_size - 1);
+ wil->rx_buff_id_count = status_ring_size - 1;
+ }
+
+ /* Allocate Rx buffer array */
+ rc = wil_init_rx_buff_arr(wil, wil->rx_buff_id_count);
+ if (rc)
+ goto err_free_desc;
+
+ /* Fill descriptor ring with credits */
+ rc = wil_rx_refill_edma(wil);
+ if (rc)
+ goto err_free_rx_buff_arr;
+
+ return 0;
+err_free_rx_buff_arr:
+ wil_free_rx_buff_arr(wil);
+err_free_desc:
+ wil_ring_free_edma(wil, ring);
+err_free_status:
+ for (i = 0; i < wil->num_rx_status_rings; i++)
+ wil_sring_free(wil, &wil->srings[i]);
+
+ return rc;
+}
+
+static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
+ int size, int cid, int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ lockdep_assert_held(&wil->mutex);
+
+ wil_dbg_misc(wil,
+ "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
+ ring_id, cid, tid, wil->tx_sring_idx);
+
+ wil_tx_data_init(txdata);
+ ring->size = size;
+ rc = wil_ring_alloc_desc_ring(wil, ring);
+ if (rc)
+ goto out;
+
+ wil->ring2cid_tid[ring_id][0] = cid;
+ wil->ring2cid_tid[ring_id][1] = tid;
+ if (!vif->privacy)
+ txdata->dot1x_open = true;
+
+ rc = wil_wmi_tx_desc_ring_add(vif, ring_id, cid, tid);
+ if (rc) {
+ wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed\n");
+ goto out_free;
+ }
+
+ if (txdata->dot1x_open && agg_wsize >= 0)
+ wil_addba_tx_request(wil, ring_id, agg_wsize);
+
+ return 0;
+ out_free:
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
+ txdata->enabled = 0;
+ spin_unlock_bh(&txdata->lock);
+ wil_ring_free_edma(wil, ring);
+ wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
+ wil->ring2cid_tid[ring_id][1] = 0;
+
+ out:
+ return rc;
+}
+
+/* This function is used only for RX SW reorder */
+static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid,
+ struct sk_buff *skb, struct wil_net_stats *stats)
+{
+ u8 ftype;
+ u8 fc1;
+ int mid;
+ int tid;
+ u16 seq;
+ struct wil6210_vif *vif;
+
+ ftype = wil_rx_status_get_frame_type(wil, msg);
+ if (ftype == IEEE80211_FTYPE_DATA)
+ return 0;
+
+ fc1 = wil_rx_status_get_fc1(wil, msg);
+ mid = wil_rx_status_get_mid(msg);
+ tid = wil_rx_status_get_tid(msg);
+ seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg));
+ vif = wil->vifs[mid];
+
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil, "RX descriptor with invalid mid %d", mid);
+ return -EAGAIN;
+ }
+
+ wil_dbg_txrx(wil,
+ "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+ fc1, mid, cid, tid, seq);
+ if (stats)
+ stats->rx_non_data_frame++;
+ if (wil_is_back_req(fc1)) {
+ wil_dbg_txrx(wil,
+ "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
+ mid, cid, tid, seq);
+ wil_rx_bar(wil, vif, cid, tid, seq);
+ } else {
+ u32 sz = wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended);
+
+ /* print again all info. One can enable only this
+ * without overhead for printing every Rx frame
+ */
+ wil_dbg_txrx(wil,
+ "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+ fc1, mid, cid, tid, seq);
+ wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)msg, sz, false);
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+ }
+
+ return -EAGAIN;
+}
+
+static int wil_rx_error_check_edma(struct wil6210_priv *wil,
+ struct sk_buff *skb,
+ struct wil_net_stats *stats)
+{
+ int error;
+ int l2_rx_status;
+ int l3_rx_status;
+ int l4_rx_status;
+ void *msg = wil_skb_rxstatus(skb);
+
+ error = wil_rx_status_get_error(msg);
+ if (!error) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return 0;
+ }
+
+ l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
+ if (l2_rx_status != 0) {
+ wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
+ l2_rx_status);
+ /* Due to HW issue, KEY error will trigger a MIC error */
+ if (l2_rx_status & WIL_RX_EDMA_ERROR_MIC) {
+ wil_dbg_txrx(wil,
+ "L2 MIC/KEY error, dropping packet\n");
+ stats->rx_mic_error++;
+ }
+ if (l2_rx_status & WIL_RX_EDMA_ERROR_KEY) {
+ wil_dbg_txrx(wil, "L2 KEY error, dropping packet\n");
+ stats->rx_key_error++;
+ }
+ if (l2_rx_status & WIL_RX_EDMA_ERROR_REPLAY) {
+ wil_dbg_txrx(wil,
+ "L2 REPLAY error, dropping packet\n");
+ stats->rx_replay++;
+ }
+ if (l2_rx_status & WIL_RX_EDMA_ERROR_AMSDU) {
+ wil_dbg_txrx(wil,
+ "L2 AMSDU error, dropping packet\n");
+ stats->rx_amsdu_error++;
+ }
+ return -EFAULT;
+ }
+
+ l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
+ l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
+ if (!l3_rx_status && !l4_rx_status)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* If HW reports bad checksum, let IP stack re-check it
+ * For example, HW don't understand Microsoft IP stack that
+ * mis-calculates TCP checksum - if it should be 0x0,
+ * it writes 0xffff in violation of RFC 1624
+ */
+ else
+ stats->rx_csum_err++;
+
+ return 0;
+}
+
+static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct wil_rx_status_extended msg1;
+ void *msg = &msg1;
+ u16 buff_id;
+ struct sk_buff *skb;
+ dma_addr_t pa;
+ struct wil_ring_rx_data *rxdata = &sring->rx_data;
+ unsigned int sz = ALIGN(wil->rx_buf_len, 4);
+ struct wil_net_stats *stats = NULL;
+ u16 dmalen;
+ int cid;
+ bool eop, headstolen;
+ int delta;
+ u8 dr_bit;
+ u8 data_offset;
+ struct wil_rx_status_extended *s;
+ u16 sring_idx = sring - wil->srings;
+
+ BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
+
+again:
+ wil_get_next_rx_status_msg(sring, msg);
+ dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
+
+ /* Completed handling all the ready status messages */
+ if (dr_bit != sring->desc_rdy_pol)
+ return NULL;
+
+ /* Extract the buffer ID from the status message */
+ buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
+ if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) {
+ wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
+ buff_id, sring->swhead);
+ wil_sring_advance_swhead(sring);
+ goto again;
+ }
+
+ wil_sring_advance_swhead(sring);
+
+ /* Extract the SKB from the rx_buff management array */
+ skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
+ if (!skb) {
+ wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+ goto again;
+ }
+
+ memcpy(&pa, skb->cb, sizeof(pa));
+ dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+ dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
+
+ trace_wil6210_rx_status(wil, wil->use_compressed_rx_status, buff_id,
+ msg);
+ wil_dbg_txrx(wil, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n",
+ buff_id, sring_idx, dmalen);
+ wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)msg, wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended), false);
+
+ /* Move the buffer from the active list to the free list */
+ list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
+
+ eop = wil_rx_status_get_eop(msg);
+
+ cid = wil_rx_status_get_cid(msg);
+ if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) {
+ wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
+ cid, sring->swhead);
+ rxdata->skipping = true;
+ goto skipping;
+ }
+ stats = &wil->sta[cid].stats;
+
+ if (unlikely(skb->len < ETH_HLEN)) {
+ wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len);
+ stats->rx_short_frame++;
+ rxdata->skipping = true;
+ goto skipping;
+ }
+
+ if (unlikely(dmalen > sz)) {
+ wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+ stats->rx_large_frame++;
+ rxdata->skipping = true;
+ }
+
+skipping:
+ /* skipping indicates if a certain SKB should be dropped.
+ * It is set in case there is an error on the current SKB or in case
+ * of RX chaining: as long as we manage to merge the SKBs it will
+ * be false. once we have a bad SKB or we don't manage to merge SKBs
+ * it will be set to the !EOP value of the current SKB.
+ * This guarantees that all the following SKBs until EOP will also
+ * get dropped.
+ */
+ if (unlikely(rxdata->skipping)) {
+ kfree_skb(skb);
+ if (rxdata->skb) {
+ kfree_skb(rxdata->skb);
+ rxdata->skb = NULL;
+ }
+ rxdata->skipping = !eop;
+ goto again;
+ }
+
+ skb_trim(skb, dmalen);
+
+ prefetch(skb->data);
+
+ if (!rxdata->skb) {
+ rxdata->skb = skb;
+ } else {
+ if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen,
+ &delta))) {
+ kfree_skb_partial(skb, headstolen);
+ } else {
+ wil_err(wil, "failed to merge skbs!\n");
+ kfree_skb(skb);
+ kfree_skb(rxdata->skb);
+ rxdata->skb = NULL;
+ rxdata->skipping = !eop;
+ goto again;
+ }
+ }
+
+ if (!eop)
+ goto again;
+
+ /* reaching here rxdata->skb always contains a full packet */
+ skb = rxdata->skb;
+ rxdata->skb = NULL;
+ rxdata->skipping = false;
+
+ if (stats) {
+ stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
+ if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
+ stats->rx_per_mcs[stats->last_mcs_rx]++;
+ }
+
+ if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
+ wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) {
+ kfree_skb(skb);
+ goto again;
+ }
+
+ /* Compensate for the HW data alignment according to the status
+ * message
+ */
+ data_offset = wil_rx_status_get_data_offset(msg);
+ if (data_offset == 0xFF ||
+ data_offset > WIL_EDMA_MAX_DATA_OFFSET) {
+ wil_err(wil, "Unexpected data offset %d\n", data_offset);
+ kfree_skb(skb);
+ goto again;
+ }
+
+ skb_pull(skb, data_offset);
+
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ /* Has to be done after dma_unmap_single as skb->cb is also
+ * used for holding the pa
+ */
+ s = wil_skb_rxstatus(skb);
+ memcpy(s, msg, sring->elem_size);
+
+ return skb;
+}
+
+void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota)
+{
+ struct net_device *ndev;
+ struct wil_ring *ring = &wil->ring_rx;
+ struct wil_status_ring *sring;
+ struct sk_buff *skb;
+ int i;
+
+ if (unlikely(!ring->va)) {
+ wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+ return;
+ }
+ wil_dbg_txrx(wil, "rx_handle\n");
+
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ sring = &wil->srings[i];
+ if (unlikely(!sring->va)) {
+ wil_err(wil,
+ "Rx IRQ while Rx status ring %d not yet initialized\n",
+ i);
+ continue;
+ }
+
+ while ((*quota > 0) &&
+ (NULL != (skb =
+ wil_sring_reap_rx_edma(wil, sring)))) {
+ (*quota)--;
+ if (wil->use_rx_hw_reordering) {
+ void *msg = wil_skb_rxstatus(skb);
+ int mid = wil_rx_status_get_mid(msg);
+ struct wil6210_vif *vif = wil->vifs[mid];
+
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil,
+ "RX desc invalid mid %d",
+ mid);
+ kfree_skb(skb);
+ continue;
+ }
+ ndev = vif_to_ndev(vif);
+ wil_netif_rx_any(skb, ndev);
+ } else {
+ wil_rx_reorder(wil, skb);
+ }
+ }
+
+ wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
+ }
+
+ wil_rx_refill_edma(wil);
+}
+
+static int wil_tx_desc_map_edma(union wil_tx_desc *desc,
+ dma_addr_t pa,
+ u32 len,
+ int ring_index)
+{
+ struct wil_tx_enhanced_desc *d =
+ (struct wil_tx_enhanced_desc *)&desc->enhanced;
+
+ memset(d, 0, sizeof(struct wil_tx_enhanced_desc));
+
+ wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
+
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.length = cpu_to_le16((u16)len);
+ d->mac.d[0] = (ring_index << WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS);
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi;
+ * 3 - eth mode
+ */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
+}
+
+static inline void
+wil_get_next_tx_status_msg(struct wil_status_ring *sring,
+ struct wil_ring_tx_status *msg)
+{
+ struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *)
+ (sring->va + (sring->elem_size * sring->swhead));
+
+ *msg = *_msg;
+}
+
+/**
+ * Clean up transmitted skb's from the Tx descriptor RING.
+ * Return number of descriptors cleared.
+ */
+int wil_tx_sring_handler(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct net_device *ndev;
+ struct device *dev = wil_to_dev(wil);
+ struct wil_ring *ring = NULL;
+ struct wil_ring_tx_data *txdata;
+ /* Total number of completed descriptors in all descriptor rings */
+ int desc_cnt = 0;
+ int cid;
+ struct wil_net_stats *stats = NULL;
+ struct wil_tx_enhanced_desc *_d;
+ unsigned int ring_id;
+ unsigned int num_descs;
+ int i;
+ u8 dr_bit; /* Descriptor Ready bit */
+ struct wil_ring_tx_status msg;
+ struct wil6210_vif *vif;
+ int used_before_complete;
+ int used_new;
+
+ wil_get_next_tx_status_msg(sring, &msg);
+ dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
+
+ /* Process completion messages while DR bit has the expected polarity */
+ while (dr_bit == sring->desc_rdy_pol) {
+ num_descs = msg.num_descriptors;
+ if (!num_descs) {
+ wil_err(wil, "invalid num_descs 0\n");
+ goto again;
+ }
+
+ /* Find the corresponding descriptor ring */
+ ring_id = msg.ring_id;
+
+ if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) {
+ wil_err(wil, "invalid ring id %d\n", ring_id);
+ goto again;
+ }
+ ring = &wil->ring_tx[ring_id];
+ if (unlikely(!ring->va)) {
+ wil_err(wil, "Tx irq[%d]: ring not initialized\n",
+ ring_id);
+ goto again;
+ }
+ txdata = &wil->ring_tx_data[ring_id];
+ if (unlikely(!txdata->enabled)) {
+ wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id);
+ goto again;
+ }
+ vif = wil->vifs[txdata->mid];
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil, "invalid MID %d for ring %d\n",
+ txdata->mid, ring_id);
+ goto again;
+ }
+
+ ndev = vif_to_ndev(vif);
+
+ cid = wil->ring2cid_tid[ring_id][0];
+ if (cid < WIL6210_MAX_CID)
+ stats = &wil->sta[cid].stats;
+
+ wil_dbg_txrx(wil,
+ "tx_status: completed desc_ring (%d), num_descs (%d)\n",
+ ring_id, num_descs);
+
+ used_before_complete = wil_ring_used_tx(ring);
+
+ for (i = 0 ; i < num_descs; ++i) {
+ struct wil_ctx *ctx = &ring->ctx[ring->swtail];
+ struct wil_tx_enhanced_desc dd, *d = &dd;
+ u16 dmalen;
+ struct sk_buff *skb = ctx->skb;
+
+ _d = (struct wil_tx_enhanced_desc *)
+ &ring->va[ring->swtail].tx.enhanced;
+ *d = *_d;
+
+ dmalen = le16_to_cpu(d->dma.length);
+ trace_wil6210_tx_status(&msg, ring->swtail, dmalen);
+ wil_dbg_txrx(wil,
+ "TxC[%2d][%3d] : %d bytes, status 0x%02x\n",
+ ring_id, ring->swtail, dmalen,
+ msg.status);
+ wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)&msg, sizeof(msg),
+ false);
+
+ wil_tx_desc_unmap_edma(dev,
+ (union wil_tx_desc *)d,
+ ctx);
+
+ if (skb) {
+ if (likely(msg.status == 0)) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ if (stats) {
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+
+ wil_tx_latency_calc(wil, skb,
+ &wil->sta[cid]);
+ }
+ } else {
+ ndev->stats.tx_errors++;
+ if (stats)
+ stats->tx_errors++;
+ }
+ wil_consume_skb(skb, msg.status == 0);
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ /* Make sure the ctx is zeroed before updating the tail
+ * to prevent a case where wil_tx_ring will see
+ * this descriptor as used and handle it before ctx zero
+ * is completed.
+ */
+ wmb();
+
+ ring->swtail = wil_ring_next_tail(ring);
+
+ desc_cnt++;
+ }
+
+ /* performance monitoring */
+ used_new = wil_ring_used_tx(ring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
+ used_new, used_before_complete)) {
+ wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
+ ring_id, used_before_complete, used_new);
+ txdata->last_idle = get_cycles();
+ }
+
+again:
+ wil_sring_advance_swhead(sring);
+
+ wil_get_next_tx_status_msg(sring, &msg);
+ dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
+ }
+
+ /* shall we wake net queues? */
+ if (desc_cnt)
+ wil_update_net_queues(wil, vif, NULL, false);
+
+ /* Update the HW tail ptr (RD ptr) */
+ wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
+
+ return desc_cnt;
+}
+
+/**
+ * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
+ * 2 - middle, 3 - last descriptor.
+ */
+static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc *d,
+ int tso_desc_type, bool is_ipv4,
+ int tcp_hdr_len,
+ int skb_net_hdr_len,
+ int mss)
+{
+ /* Number of descriptors */
+ d->mac.d[2] |= 1;
+ /* Maximum Segment Size */
+ d->mac.tso_mss |= cpu_to_le16(mss >> 2);
+ /* L4 header len: TCP header length */
+ d->dma.l4_hdr_len |= tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK;
+ /* EOP, TSO desc type, Segmentation enable,
+ * Insert IPv4 and TCP / UDP Checksum
+ */
+ d->dma.cmd |= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS) |
+ tso_desc_type << WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS |
+ BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS) |
+ BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS) |
+ BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS);
+ /* Calculate pseudo-header */
+ d->dma.w1 |= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS) |
+ BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS);
+ /* IP Header Length */
+ d->dma.ip_length |= skb_net_hdr_len;
+ /* MAC header length and IP address family*/
+ d->dma.b11 |= ETH_HLEN |
+ is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
+}
+
+static int wil_tx_tso_gen_desc(struct wil6210_priv *wil, void *buff_addr,
+ int len, uint i, int tso_desc_type,
+ skb_frag_t *frag, struct wil_ring *ring,
+ struct sk_buff *skb, bool is_ipv4,
+ int tcp_hdr_len, int skb_net_hdr_len,
+ int mss, int *descs_used)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct wil_tx_enhanced_desc *_desc = (struct wil_tx_enhanced_desc *)
+ &ring->va[i].tx.enhanced;
+ struct wil_tx_enhanced_desc desc_mem, *d = &desc_mem;
+ int ring_index = ring - wil->ring_tx;
+ dma_addr_t pa;
+
+ if (len == 0)
+ return 0;
+
+ if (!frag) {
+ pa = dma_map_single(dev, buff_addr, len, DMA_TO_DEVICE);
+ ring->ctx[i].mapped_as = wil_mapped_as_single;
+ } else {
+ pa = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
+ ring->ctx[i].mapped_as = wil_mapped_as_page;
+ }
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ wil_err(wil, "TSO: Skb DMA map error\n");
+ return -EINVAL;
+ }
+
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa,
+ len, ring_index);
+ wil_tx_desc_offload_setup_tso_edma(d, tso_desc_type, is_ipv4,
+ tcp_hdr_len,
+ skb_net_hdr_len, mss);
+
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ if (tso_desc_type == wil_tso_type_lst)
+ ring->ctx[i].skb = skb_get(skb);
+
+ wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ *_desc = *d;
+ (*descs_used)++;
+
+ return 0;
+}
+
+static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct wil_ring *ring,
+ struct sk_buff *skb)
+{
+ int ring_index = ring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */
+ int used, avail = wil_ring_avail_tx(ring);
+ int f, hdrlen, headlen;
+ int gso_type;
+ bool is_ipv4;
+ u32 swhead = ring->swhead;
+ int descs_used = 0; /* total number of used descriptors */
+ int rc = -EINVAL;
+ int tcp_hdr_len;
+ int skb_net_hdr_len;
+ int mss = skb_shinfo(skb)->gso_size;
+
+ wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len,
+ ring_index);
+
+ if (unlikely(!txdata->enabled))
+ return -EINVAL;
+
+ if (unlikely(avail < min_desc_required)) {
+ wil_err_ratelimited(wil,
+ "TSO: Tx ring[%2d] full. No space for %d fragments\n",
+ ring_index, min_desc_required);
+ return -ENOMEM;
+ }
+
+ gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
+ switch (gso_type) {
+ case SKB_GSO_TCPV4:
+ is_ipv4 = true;
+ break;
+ case SKB_GSO_TCPV6:
+ is_ipv4 = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return -EINVAL;
+
+ /* tcp header length and skb network header length are fixed for all
+ * packet's descriptors - read them once here
+ */
+ tcp_hdr_len = tcp_hdrlen(skb);
+ skb_net_hdr_len = skb_network_header_len(skb);
+
+ /* First descriptor must contain the header only
+ * Header Length = MAC header len + IP header len + TCP header len
+ */
+ hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len;
+ wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n",
+ hdrlen);
+ rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead,
+ wil_tso_type_hdr, NULL, ring, skb,
+ is_ipv4, tcp_hdr_len, skb_net_hdr_len,
+ mss, &descs_used);
+ if (rc)
+ return -EINVAL;
+
+ /* Second descriptor contains the head */
+ headlen = skb_headlen(skb) - hdrlen;
+ wil_dbg_txrx(wil, "TSO: process skb head, headlen %u\n", headlen);
+ rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen,
+ (swhead + descs_used) % ring->size,
+ (nr_frags != 0) ? wil_tso_type_first :
+ wil_tso_type_lst, NULL, ring, skb,
+ is_ipv4, tcp_hdr_len, skb_net_hdr_len,
+ mss, &descs_used);
+ if (rc)
+ goto mem_error;
+
+ /* Rest of the descriptors are from the SKB fragments */
+ for (f = 0; f < nr_frags; f++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+ int len = frag->size;
+
+ wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
+ len, descs_used);
+
+ rc = wil_tx_tso_gen_desc(wil, NULL, len,
+ (swhead + descs_used) % ring->size,
+ (f != nr_frags - 1) ?
+ wil_tso_type_mid : wil_tso_type_lst,
+ frag, ring, skb, is_ipv4,
+ tcp_hdr_len, skb_net_hdr_len,
+ mss, &descs_used);
+ if (rc)
+ goto mem_error;
+ }
+
+ /* performance monitoring */
+ used = wil_ring_used_tx(ring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
+ used, used + descs_used)) {
+ txdata->idle += get_cycles() - txdata->last_idle;
+ wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
+ ring_index, used, used + descs_used);
+ }
+
+ /* advance swhead */
+ wil_ring_advance_head(ring, descs_used);
+ wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead);
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ if (wil->tx_latency)
+ *(ktime_t *)&skb->cb = ktime_get();
+ else
+ memset(skb->cb, 0, sizeof(ktime_t));
+
+ wil_w(wil, ring->hwtail, ring->swhead);
+
+ return 0;
+
+mem_error:
+ while (descs_used > 0) {
+ struct device *dev = wil_to_dev(wil);
+ struct wil_ctx *ctx;
+ int i = (swhead + descs_used - 1) % ring->size;
+ struct wil_tx_enhanced_desc dd, *d = &dd;
+ struct wil_tx_enhanced_desc *_desc =
+ (struct wil_tx_enhanced_desc *)
+ &ring->va[i].tx.enhanced;
+
+ *d = *_desc;
+ ctx = &ring->ctx[i];
+ wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
+ memset(ctx, 0, sizeof(*ctx));
+ descs_used--;
+ }
+ return rc;
+}
+
+static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id,
+ int size)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ int rc;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ wil_dbg_misc(wil, "init bcast: ring_id=%d, sring_id=%d\n",
+ ring_id, wil->tx_sring_idx);
+
+ lockdep_assert_held(&wil->mutex);
+
+ wil_tx_data_init(txdata);
+ ring->size = size;
+ ring->is_rx = false;
+ rc = wil_ring_alloc_desc_ring(wil, ring);
+ if (rc)
+ goto out;
+
+ wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; /* CID */
+ wil->ring2cid_tid[ring_id][1] = 0; /* TID */
+ if (!vif->privacy)
+ txdata->dot1x_open = true;
+
+ rc = wil_wmi_bcast_desc_ring_add(vif, ring_id);
+ if (rc)
+ goto out_free;
+
+ return 0;
+
+ out_free:
+ spin_lock_bh(&txdata->lock);
+ txdata->enabled = 0;
+ txdata->dot1x_open = false;
+ spin_unlock_bh(&txdata->lock);
+ wil_ring_free_edma(wil, ring);
+
+out:
+ return rc;
+}
+
+static void wil_tx_fini_edma(struct wil6210_priv *wil)
+{
+ struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
+
+ wil_dbg_misc(wil, "free TX sring\n");
+
+ wil_sring_free(wil, sring);
+}
+
+static void wil_rx_data_free(struct wil_status_ring *sring)
+{
+ if (!sring)
+ return;
+
+ kfree_skb(sring->rx_data.skb);
+ sring->rx_data.skb = NULL;
+}
+
+static void wil_rx_fini_edma(struct wil6210_priv *wil)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+ int i;
+
+ wil_dbg_misc(wil, "rx_fini_edma\n");
+
+ wil_ring_free_edma(wil, ring);
+
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ wil_rx_data_free(&wil->srings[i]);
+ wil_sring_free(wil, &wil->srings[i]);
+ }
+
+ wil_free_rx_buff_arr(wil);
+}
+
+void wil_init_txrx_ops_edma(struct wil6210_priv *wil)
+{
+ wil->txrx_ops.configure_interrupt_moderation =
+ wil_configure_interrupt_moderation_edma;
+ /* TX ops */
+ wil->txrx_ops.ring_init_tx = wil_ring_init_tx_edma;
+ wil->txrx_ops.ring_fini_tx = wil_ring_free_edma;
+ wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma;
+ wil->txrx_ops.tx_init = wil_tx_init_edma;
+ wil->txrx_ops.tx_fini = wil_tx_fini_edma;
+ wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma;
+ wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma;
+ wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma;
+ /* RX ops */
+ wil->txrx_ops.rx_init = wil_rx_init_edma;
+ wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma;
+ wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma;
+ wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma;
+ wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma;
+ wil->txrx_ops.rx_error_check = wil_rx_error_check_edma;
+ wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma;
+ wil->txrx_ops.rx_fini = wil_rx_fini_edma;
+}
+
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
new file mode 100644
index 000000000000..a7fe9292fda3
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2012-2016,2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WIL6210_TXRX_EDMA_H
+#define WIL6210_TXRX_EDMA_H
+
+#include "wil6210.h"
+
+/* limit status ring size in range [ring size..max ring size] */
+#define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN)
+#define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX)
+/* RX sring order should be bigger than RX ring order */
+#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (11)
+#define WIL_TX_SRING_SIZE_ORDER_DEFAULT (12)
+#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (1536)
+
+#define WIL_DEFAULT_RX_STATUS_RING_ID 0
+#define WIL_RX_DESC_RING_ID 0
+#define WIL_RX_STATUS_IRQ_IDX 0
+#define WIL_TX_STATUS_IRQ_IDX 1
+
+#define WIL_EDMA_AGG_WATERMARK (0xffff)
+#define WIL_EDMA_AGG_WATERMARK_POS (16)
+
+#define WIL_EDMA_IDLE_TIME_LIMIT_USEC (50)
+#define WIL_EDMA_TIME_UNIT_CLK_CYCLES (330) /* fits 1 usec */
+
+/* Error field */
+#define WIL_RX_EDMA_ERROR_MIC (1)
+#define WIL_RX_EDMA_ERROR_KEY (2) /* Key missing */
+#define WIL_RX_EDMA_ERROR_REPLAY (3)
+#define WIL_RX_EDMA_ERROR_AMSDU (4)
+#define WIL_RX_EDMA_ERROR_FCS (7)
+
+#define WIL_RX_EDMA_ERROR_L3_ERR (BIT(0) | BIT(1))
+#define WIL_RX_EDMA_ERROR_L4_ERR (BIT(0) | BIT(1))
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_BIT BIT(11)
+#define WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK 0x7
+#define WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK 0xf
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_CID_POS 2
+#define WIL_RX_EDMA_DLPF_LU_HIT_CID_POS 4
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_TID_POS 5
+
+#define WIL_RX_EDMA_MID_VALID_BIT BIT(22)
+
+#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16
+#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6
+
+#define WIL_EDMA_DESC_TX_CFG_EOP_POS 0
+#define WIL_EDMA_DESC_TX_CFG_EOP_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS 3
+#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_LEN 2
+
+#define WIL_EDMA_DESC_TX_CFG_SEG_EN_POS 5
+#define WIL_EDMA_DESC_TX_CFG_SEG_EN_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS 6
+#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS 7
+#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS 15
+#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS 5
+#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_LEN 1
+
+/* Enhanced Rx descriptor - MAC part
+ * [dword 0] : Reserved
+ * [dword 1] : Reserved
+ * [dword 2] : Reserved
+ * [dword 3]
+ * bit 0..15 : Buffer ID
+ * bit 16..31 : Reserved
+ */
+struct wil_ring_rx_enhanced_mac {
+ u32 d[3];
+ __le16 buff_id;
+ u16 reserved;
+} __packed;
+
+/* Enhanced Rx descriptor - DMA part
+ * [dword 0] - Reserved
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31
+ * [dword 2]
+ * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47
+ * bit 16..31 : Reserved
+ * [dword 3]
+ * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63
+ * bit 16..31 : length
+ */
+struct wil_ring_rx_enhanced_dma {
+ u32 d0;
+ struct wil_ring_dma_addr addr;
+ u16 w5;
+ __le16 addr_high_high;
+ __le16 length;
+} __packed;
+
+struct wil_rx_enhanced_desc {
+ struct wil_ring_rx_enhanced_mac mac;
+ struct wil_ring_rx_enhanced_dma dma;
+} __packed;
+
+/* Enhanced Tx descriptor - DMA part
+ * [dword 0]
+ * Same as legacy
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31
+ * [dword 2]
+ * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47
+ * bit 16..23 : ip_length:8 The IP header length for the TX IP checksum
+ * offload feature
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
+ * [dword 3]
+ * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63
+ * bit 16..31 : length
+ */
+struct wil_ring_tx_enhanced_dma {
+ u8 l4_hdr_len;
+ u8 cmd;
+ u16 w1;
+ struct wil_ring_dma_addr addr;
+ u8 ip_length;
+ u8 b11; /* 0..6: mac_length; 7:ip_version */
+ __le16 addr_high_high;
+ __le16 length;
+} __packed;
+
+/* Enhanced Tx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 9 : lifetime_expiry_value:10
+ * bit 10 : interrupt_en:1
+ * bit 11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit 14 : timestamp_insertion:1
+ * bit 15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit 27 : mcs_en:1
+ * bit 28..30 : reserved1:3
+ * bit 31 : sn_preserved:1
+ * [dword 1]
+ * bit 0.. 3 : pkt_mode:4
+ * bit 4 : pkt_mode_en:1
+ * bit 5..14 : reserved0:10
+ * bit 15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit 20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit 23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit 31 : max_retry_en:1
+ * [dword 2]
+ * bit 0.. 7 : num_of_descriptors:8
+ * bit 8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11
+ * bit 20 : snap_hdr_insertion_en:1
+ * bit 21 : vlan_removal_en:1
+ * bit 22..23 : reserved0:2
+ * bit 24 : Dest ID extension:1
+ * bit 25..31 : reserved0:7
+ * [dword 3]
+ * bit 0..15 : tso_mss:16
+ * bit 16..31 : descriptor_scratchpad:16 - mailbox between driver and ucode
+ */
+struct wil_ring_tx_enhanced_mac {
+ u32 d[3];
+ __le16 tso_mss;
+ u16 scratchpad;
+} __packed;
+
+struct wil_tx_enhanced_desc {
+ struct wil_ring_tx_enhanced_mac mac;
+ struct wil_ring_tx_enhanced_dma dma;
+} __packed;
+
+#define TX_STATUS_DESC_READY_POS 7
+
+/* Enhanced TX status message
+ * [dword 0]
+ * bit 0.. 7 : Number of Descriptor:8 - The number of descriptors that
+ * are used to form the packets. It is needed for WB when
+ * releasing the packet
+ * bit 8..15 : tx_ring_id:8 The transmission ring ID that is related to
+ * the message
+ * bit 16..23 : Status:8 - The TX status Code
+ * 0x0 - A successful transmission
+ * 0x1 - Retry expired
+ * 0x2 - Lifetime Expired
+ * 0x3 - Released
+ * 0x4-0xFF - Reserved
+ * bit 24..30 : Reserved:7
+ * bit 31 : Descriptor Ready bit:1 - It is initiated to
+ * zero by the driver when the ring is created. It is set by the HW
+ * to one for each completed status message. Each wrap around,
+ * the DR bit value is flipped.
+ * [dword 1]
+ * bit 0..31 : timestamp:32 - Set when MPDU is transmitted.
+ * [dword 2]
+ * bit 0.. 4 : MCS:5 - The transmitted MCS value
+ * bit 5 : Reserved:1
+ * bit 6.. 7 : CB mode:2 - 0-DMG 1-EDMG 2-Wide
+ * bit 8..12 : QID:5 - The QID that was used for the transmission
+ * bit 13..15 : Reserved:3
+ * bit 16..20 : Num of MSDUs:5 - Number of MSDUs in the aggregation
+ * bit 21..22 : Reserved:2
+ * bit 23 : Retry:1 - An indication that the transmission was retried
+ * bit 24..31 : TX-Sector:8 - the antenna sector that was used for
+ * transmission
+ * [dword 3]
+ * bit 0..11 : Sequence number:12 - The Sequence Number that was used
+ * for the MPDU transmission
+ * bit 12..31 : Reserved:20
+ */
+struct wil_ring_tx_status {
+ u8 num_descriptors;
+ u8 ring_id;
+ u8 status;
+ u8 desc_ready; /* Only the last bit should be set */
+ u32 timestamp;
+ u32 d2;
+ u16 seq_number; /* Only the first 12 bits */
+ u16 w7;
+} __packed;
+
+/* Enhanced Rx status message - compressed part
+ * [dword 0]
+ * bit 0.. 2 : L2 Rx Status:3 - The L2 packet reception Status
+ * 0-Success, 1-MIC Error, 2-Key Error, 3-Replay Error,
+ * 4-A-MSDU Error, 5-Reserved, 6-Reserved, 7-FCS Error
+ * bit 3.. 4 : L3 Rx Status:2 - Bit0 - L3I - L3 identified and checksum
+ * calculated, Bit1- L3Err - IPv4 Checksum Error
+ * bit 5.. 6 : L4 Rx Status:2 - Bit0 - L4I - L4 identified and checksum
+ * calculated, Bit1- L4Err - TCP/UDP Checksum Error
+ * bit 7 : Reserved:1
+ * bit 8..19 : Flow ID:12 - MSDU flow ID
+ * bit 20..21 : MID:2 - The MAC ID
+ * bit 22 : MID_V:1 - The MAC ID field is valid
+ * bit 23 : L3T:1 - IP types: 0-IPv6, 1-IPv4
+ * bit 24 : L4T:1 - Layer 4 Type: 0-UDP, 1-TCP
+ * bit 25 : BC:1 - The received MPDU is broadcast
+ * bit 26 : MC:1 - The received MPDU is multicast
+ * bit 27 : Raw:1 - The MPDU received with no translation
+ * bit 28 : Sec:1 - The FC control (b14) - Frame Protected
+ * bit 29 : Error:1 - An error is set when (L2 status != 0) ||
+ * (L3 status == 3) || (L4 status == 3)
+ * bit 30 : EOP:1 - End of MSDU signaling. It is set to mark the end
+ * of the transfer, otherwise the status indicates buffer
+ * only completion.
+ * bit 31 : Descriptor Ready bit:1 - It is initiated to
+ * zero by the driver when the ring is created. It is set
+ * by the HW to one for each completed status message.
+ * Each wrap around, the DR bit value is flipped.
+ * [dword 1]
+ * bit 0.. 5 : MAC Len:6 - The number of bytes that are used for L2 header
+ * bit 6..11 : IPLEN:6 - The number of DW that are used for L3 header
+ * bit 12..15 : I4Len:4 - The number of DW that are used for L4 header
+ * bit 16..21 : MCS:6 - The received MCS field from the PLCP Header
+ * bit 22..23 : CB mode:2 - The CB Mode: 0-DMG, 1-EDMG, 2-Wide
+ * bit 24..27 : Data Offset:4 - The data offset, a code that describe the
+ * payload shift from the beginning of the buffer:
+ * 0 - 0 Bytes, 3 - 2 Bytes
+ * bit 28 : A-MSDU Present:1 - The QoS (b7) A-MSDU present field
+ * bit 29 : A-MSDU Type:1 The QoS (b8) A-MSDU Type field
+ * bit 30 : A-MPDU:1 - Packet is part of aggregated MPDU
+ * bit 31 : Key ID:1 - The extracted Key ID from the encryption header
+ * [dword 2]
+ * bit 0..15 : Buffer ID:16 - The Buffer Identifier
+ * bit 16..31 : Length:16 - It indicates the valid bytes that are stored
+ * in the current descriptor buffer. For multiple buffer
+ * descriptor, SW need to sum the total descriptor length
+ * in all buffers to produce the packet length
+ * [dword 3]
+ * bit 0..31 : timestamp:32 - The MPDU Timestamp.
+ */
+struct wil_rx_status_compressed {
+ u32 d0;
+ u32 d1;
+ __le16 buff_id;
+ __le16 length;
+ u32 timestamp;
+} __packed;
+
+/* Enhanced Rx status message - extension part
+ * [dword 0]
+ * bit 0.. 4 : QID:5 - The Queue Identifier that the packet is received
+ * from
+ * bit 5.. 7 : Reserved:3
+ * bit 8..11 : TID:4 - The QoS (b3-0) TID Field
+ * bit 12..15 Source index:4 - The Source index that was found
+ during Parsing the TA. This field is used to define the
+ source of the packet
+ * bit 16..18 : Destination index:3 - The Destination index that
+ was found during Parsing the RA.
+ * bit 19..20 : DS Type:2 - The FC Control (b9-8) - From / To DS
+ * bit 21..22 : MIC ICR:2 - this signal tells the DMA to assert an
+ interrupt after it writes the packet
+ * bit 23 : ESOP:1 - The QoS (b4) ESOP field
+ * bit 24 : RDG:1
+ * bit 25..31 : Reserved:7
+ * [dword 1]
+ * bit 0.. 1 : Frame Type:2 - The FC Control (b3-2) - MPDU Type
+ (management, data, control and extension)
+ * bit 2.. 5 : Syb type:4 - The FC Control (b7-4) - Frame Subtype
+ * bit 6..11 : Ext sub type:6 - The FC Control (b11-8) - Frame Extended
+ * Subtype
+ * bit 12..13 : ACK Policy:2 - The QoS (b6-5) ACK Policy fields
+ * bit 14 : DECRYPT_BYP:1 - The MPDU is bypass by the decryption unit
+ * bit 15..23 : Reserved:9
+ * bit 24..31 : RSSI/SNR:8 - The RSSI / SNR measurement for the received
+ * MPDU
+ * [dword 2]
+ * bit 0..11 : SN:12 - The received Sequence number field
+ * bit 12..15 : Reserved:4
+ * bit 16..31 : PN bits [15:0]:16
+ * [dword 3]
+ * bit 0..31 : PN bits [47:16]:32
+ */
+struct wil_rx_status_extension {
+ u32 d0;
+ u32 d1;
+ __le16 seq_num; /* only lower 12 bits */
+ u16 pn_15_0;
+ u32 pn_47_16;
+} __packed;
+
+struct wil_rx_status_extended {
+ struct wil_rx_status_compressed comp;
+ struct wil_rx_status_extension ext;
+} __packed;
+
+static inline void *wil_skb_rxstatus(struct sk_buff *skb)
+{
+ return (void *)skb->cb;
+}
+
+static inline __le16 wil_rx_status_get_length(void *msg)
+{
+ return ((struct wil_rx_status_compressed *)msg)->length;
+}
+
+static inline u8 wil_rx_status_get_mcs(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 16, 21);
+}
+
+static inline u16 wil_rx_status_get_flow_id(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 8, 19);
+}
+
+static inline u8 wil_rx_status_get_mcast(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 26, 26);
+}
+
+/**
+ * In case of DLPF miss the parsing of flow Id should be as follows:
+ * dest_id:2
+ * src_id :3 - cid
+ * tid:3
+ * Otherwise:
+ * tid:4
+ * cid:4
+ */
+
+static inline u8 wil_rx_status_get_cid(void *msg)
+{
+ u16 val = wil_rx_status_get_flow_id(msg);
+
+ if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
+ /* CID is in bits 2..4 */
+ return (val >> WIL_RX_EDMA_DLPF_LU_MISS_CID_POS) &
+ WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+ else
+ /* CID is in bits 4..7 */
+ return (val >> WIL_RX_EDMA_DLPF_LU_HIT_CID_POS) &
+ WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK;
+}
+
+static inline u8 wil_rx_status_get_tid(void *msg)
+{
+ u16 val = wil_rx_status_get_flow_id(msg);
+
+ if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
+ /* TID is in bits 5..7 */
+ return (val >> WIL_RX_EDMA_DLPF_LU_MISS_TID_POS) &
+ WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+ else
+ /* TID is in bits 0..3 */
+ return val & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+}
+
+static inline int wil_rx_status_get_desc_rdy_bit(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 31, 31);
+}
+
+static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 30, 30);
+}
+
+static inline __le16 wil_rx_status_get_buff_id(void *msg)
+{
+ return ((struct wil_rx_status_compressed *)msg)->buff_id;
+}
+
+static inline u8 wil_rx_status_get_data_offset(void *msg)
+{
+ u8 val = WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 24, 27);
+
+ switch (val) {
+ case 0: return 0;
+ case 3: return 2;
+ default: return 0xFF;
+ }
+}
+
+static inline int wil_rx_status_get_frame_type(struct wil6210_priv *wil,
+ void *msg)
+{
+ if (wil->use_compressed_rx_status)
+ return IEEE80211_FTYPE_DATA;
+
+ return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
+ 0, 1) << 2;
+}
+
+static inline int wil_rx_status_get_fc1(struct wil6210_priv *wil, void *msg)
+{
+ if (wil->use_compressed_rx_status)
+ return 0;
+
+ return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
+ 0, 5) << 2;
+}
+
+static inline __le16 wil_rx_status_get_seq(struct wil6210_priv *wil, void *msg)
+{
+ if (wil->use_compressed_rx_status)
+ return 0;
+
+ return ((struct wil_rx_status_extended *)msg)->ext.seq_num;
+}
+
+static inline u8 wil_rx_status_get_retry(void *msg)
+{
+ /* retry bit is missing in EDMA HW. return 1 to be on the safe side */
+ return 1;
+}
+
+static inline int wil_rx_status_get_mid(void *msg)
+{
+ if (!(((struct wil_rx_status_compressed *)msg)->d0 &
+ WIL_RX_EDMA_MID_VALID_BIT))
+ return 0; /* use the default MID */
+
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 20, 21);
+}
+
+static inline int wil_rx_status_get_error(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 29, 29);
+}
+
+static inline int wil_rx_status_get_l2_rx_status(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 0, 2);
+}
+
+static inline int wil_rx_status_get_l3_rx_status(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 3, 4);
+}
+
+static inline int wil_rx_status_get_l4_rx_status(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 5, 6);
+}
+
+static inline int wil_rx_status_get_security(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 28, 28);
+}
+
+static inline u8 wil_rx_status_get_key_id(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 31, 31);
+}
+
+static inline u8 wil_tx_status_get_mcs(struct wil_ring_tx_status *msg)
+{
+ return WIL_GET_BITS(msg->d2, 0, 4);
+}
+
+static inline u32 wil_ring_next_head(struct wil_ring *ring)
+{
+ return (ring->swhead + 1) % ring->size;
+}
+
+static inline void wil_desc_set_addr_edma(struct wil_ring_dma_addr *addr,
+ __le16 *addr_high_high,
+ dma_addr_t pa)
+{
+ addr->addr_low = cpu_to_le32(lower_32_bits(pa));
+ addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
+ *addr_high_high = cpu_to_le16((u16)(upper_32_bits(pa) >> 16));
+}
+
+static inline
+dma_addr_t wil_tx_desc_get_addr_edma(struct wil_ring_tx_enhanced_dma *dma)
+{
+ return le32_to_cpu(dma->addr.addr_low) |
+ ((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
+ ((u64)le16_to_cpu(dma->addr_high_high) << 48);
+}
+
+static inline
+dma_addr_t wil_rx_desc_get_addr_edma(struct wil_ring_rx_enhanced_dma *dma)
+{
+ return le32_to_cpu(dma->addr.addr_low) |
+ ((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
+ ((u64)le16_to_cpu(dma->addr_high_high) << 48);
+}
+
+void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil);
+int wil_tx_sring_handler(struct wil6210_priv *wil,
+ struct wil_status_ring *sring);
+void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota);
+void wil_init_txrx_ops_edma(struct wil6210_priv *wil);
+
+#endif /* WIL6210_TXRX_EDMA_H */
+
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index b623510c6f6c..17c294b1ead1 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -24,6 +24,7 @@
#include <net/cfg80211.h>
#include <linux/timex.h>
#include <linux/types.h>
+#include <linux/irqreturn.h>
#include "wmi.h"
#include "wil_platform.h"
#include "fw.h"
@@ -36,6 +37,11 @@ extern bool rx_align_2;
extern bool rx_large_buf;
extern bool debug_fw;
extern bool disable_ap_sme;
+extern bool ftm_mode;
+
+struct wil6210_priv;
+struct wil6210_vif;
+union wil_tx_desc;
#define WIL_NAME "wil6210"
@@ -45,11 +51,17 @@ extern bool disable_ap_sme;
#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw"
#define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw"
+#define WIL_FW_NAME_TALYN "wil6436.fw"
+#define WIL_FW_NAME_FTM_TALYN "wil6436_ftm.fw"
+#define WIL_BRD_NAME_TALYN "wil6436.brd"
+
#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
+#define WIL_NUM_LATENCY_BINS 200
+
/* maximum number of virtual interfaces the driver supports
* (including the main interface)
*/
@@ -80,6 +92,10 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
+#define WIL_MAX_AMPDU_SIZE_128 (128 * 1024) /* FW/HW limit */
+#define WIL_MAX_AGG_WSIZE_64 (64) /* FW/HW limit */
+#define WIL6210_MAX_STATUS_RINGS (8)
+
/* Hardware offload block adds the following:
* 26 bytes - 3-address QoS data header
* 8 bytes - IV + EIV (for GCMP)
@@ -203,7 +219,9 @@ struct RGF_ICR {
#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */
#define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2)
#define RGF_USER_OTP_HW_RD_MACHINE_1 (0x880ce0)
- #define BIT_NO_FLASH_INDICATION BIT(8)
+ #define BIT_OTP_SIGNATURE_ERR_TALYN_MB BIT(0)
+ #define BIT_OTP_HW_SECTION_DONE_TALYN_MB BIT(2)
+ #define BIT_NO_FLASH_INDICATION BIT(8)
#define RGF_USER_XPM_IFC_RD_TIME1 (0x880cec)
#define RGF_USER_XPM_IFC_RD_TIME2 (0x880cf0)
#define RGF_USER_XPM_IFC_RD_TIME3 (0x880cf4)
@@ -284,6 +302,8 @@ struct RGF_ICR {
#define BIT_DMA_ITR_RX_IDL_CNT_CTL_FOREVER BIT(2)
#define BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR BIT(3)
#define BIT_DMA_ITR_RX_IDL_CNT_CTL_REACHED_TRESH BIT(4)
+#define RGF_DMA_MISC_CTL (0x881d6c)
+ #define BIT_OFUL34_RDY_VALID_BUG_FIX_EN BIT(7)
#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
@@ -305,20 +325,49 @@ struct RGF_ICR {
#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
#define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
+#define RGF_OTP_QC_SECURED (0x8a0038)
+ #define BIT_BOOT_FROM_ROM BIT(31)
+
+/* eDMA */
+#define RGF_INT_COUNT_ON_SPECIAL_EVT (0x8b62d8)
+
+#define RGF_INT_CTRL_INT_GEN_CFG_0 (0x8bc000)
+#define RGF_INT_CTRL_INT_GEN_CFG_1 (0x8bc004)
+#define RGF_INT_GEN_TIME_UNIT_LIMIT (0x8bc0c8)
+
+#define RGF_INT_GEN_CTRL (0x8bc0ec)
+ #define BIT_CONTROL_0 BIT(0)
+
+/* eDMA status interrupts */
+#define RGF_INT_GEN_RX_ICR (0x8bc0f4)
+ #define BIT_RX_STATUS_IRQ BIT(WIL_RX_STATUS_IRQ_IDX)
+#define RGF_INT_GEN_TX_ICR (0x8bc110)
+ #define BIT_TX_STATUS_IRQ BIT(WIL_TX_STATUS_IRQ_IDX)
+#define RGF_INT_CTRL_RX_INT_MASK (0x8bc12c)
+#define RGF_INT_CTRL_TX_INT_MASK (0x8bc130)
+
+#define RGF_INT_GEN_IDLE_TIME_LIMIT (0x8bc134)
+
#define USER_EXT_USER_PMU_3 (0x88d00c)
#define BIT_PMU_DEVICE_RDY BIT(0)
#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
#define JTAG_DEV_ID_SPARROW (0x2632072f)
#define JTAG_DEV_ID_TALYN (0x7e0e1)
+ #define JTAG_DEV_ID_TALYN_MB (0x1007e0e1)
#define RGF_USER_REVISION_ID (0x88afe4)
#define RGF_USER_REVISION_ID_MASK (3)
#define REVISION_ID_SPARROW_B0 (0x0)
#define REVISION_ID_SPARROW_D0 (0x3)
+#define RGF_OTP_MAC_TALYN_MB (0x8a0304)
#define RGF_OTP_MAC (0x8a0620)
+/* Talyn-MB */
+#define RGF_USER_USER_CPU_0_TALYN_MB (0x8c0138)
+#define RGF_USER_MAC_CPU_0_TALYN_MB (0x8c0154)
+
/* crash codes for FW/Ucode stored here */
/* ASSERT RGFs */
@@ -332,6 +381,7 @@ enum {
HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
HW_VER_TALYN, /* JTAG_DEV_ID_TALYN */
+ HW_VER_TALYN_MB /* JTAG_DEV_ID_TALYN_MB */
};
/* popular locations */
@@ -349,7 +399,14 @@ enum {
/* Hardware definitions end */
#define SPARROW_FW_MAPPING_TABLE_SIZE 10
#define TALYN_FW_MAPPING_TABLE_SIZE 13
-#define MAX_FW_MAPPING_TABLE_SIZE 13
+#define TALYN_MB_FW_MAPPING_TABLE_SIZE 19
+#define MAX_FW_MAPPING_TABLE_SIZE 19
+
+/* Common representation of physical address in wil ring */
+struct wil_ring_dma_addr {
+ __le32 addr_low;
+ __le16 addr_high;
+} __packed;
struct fw_map {
u32 from; /* linker address - from, inclusive */
@@ -357,12 +414,14 @@ struct fw_map {
u32 host; /* PCI/Host address - BAR0 + 0x880000 */
const char *name; /* for debugfs */
bool fw; /* true if FW mapping, false if UCODE mapping */
+ bool crash_dump; /* true if should be dumped during crash dump */
};
/* array size should be in sync with actual definition in the wmi.c */
extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE];
extern const struct fw_map sparrow_d0_mac_rgf_ext;
extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE];
+extern const struct fw_map talyn_mb_fw_mapping[TALYN_MB_FW_MAPPING_TABLE_SIZE];
extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
/**
@@ -438,7 +497,7 @@ enum { /* for wil_ctx.mapped_as */
};
/**
- * struct wil_ctx - software context for Vring descriptor
+ * struct wil_ctx - software context for ring descriptor
*/
struct wil_ctx {
struct sk_buff *skb;
@@ -446,22 +505,123 @@ struct wil_ctx {
u8 mapped_as;
};
-union vring_desc;
+struct wil_desc_ring_rx_swtail { /* relevant for enhanced DMA only */
+ u32 *va;
+ dma_addr_t pa;
+};
-struct vring {
+/**
+ * A general ring structure, used for RX and TX.
+ * In legacy DMA it represents the vring,
+ * In enahnced DMA it represents the descriptor ring (vrings are handled by FW)
+ */
+struct wil_ring {
dma_addr_t pa;
- volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */
- u16 size; /* number of vring_desc elements */
+ volatile union wil_ring_desc *va;
+ u16 size; /* number of wil_ring_desc elements */
u32 swtail;
u32 swhead;
u32 hwtail; /* write here to inform hw */
struct wil_ctx *ctx; /* ctx[size] - software context */
+ struct wil_desc_ring_rx_swtail edma_rx_swtail;
+ bool is_rx;
+};
+
+/**
+ * Additional data for Rx ring.
+ * Used for enhanced DMA RX chaining.
+ */
+struct wil_ring_rx_data {
+ /* the skb being assembled */
+ struct sk_buff *skb;
+ /* true if we are skipping a bad fragmented packet */
+ bool skipping;
+ u16 buff_size;
+};
+
+/**
+ * Status ring structure, used for enhanced DMA completions for RX and TX.
+ */
+struct wil_status_ring {
+ dma_addr_t pa;
+ void *va; /* pointer to ring_[tr]x_status elements */
+ u16 size; /* number of status elements */
+ size_t elem_size; /* status element size in bytes */
+ u32 swhead;
+ u32 hwtail; /* write here to inform hw */
+ bool is_rx;
+ u8 desc_rdy_pol; /* Expected descriptor ready bit polarity */
+ struct wil_ring_rx_data rx_data;
+};
+
+#define WIL_STA_TID_NUM (16)
+#define WIL_MCS_MAX (12) /* Maximum MCS supported */
+
+struct wil_net_stats {
+ unsigned long rx_packets;
+ unsigned long tx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long tx_errors;
+ u32 tx_latency_min_us;
+ u32 tx_latency_max_us;
+ u64 tx_latency_total_us;
+ unsigned long rx_dropped;
+ unsigned long rx_non_data_frame;
+ unsigned long rx_short_frame;
+ unsigned long rx_large_frame;
+ unsigned long rx_replay;
+ unsigned long rx_mic_error;
+ unsigned long rx_key_error; /* eDMA specific */
+ unsigned long rx_amsdu_error; /* eDMA specific */
+ unsigned long rx_csum_err;
+ u16 last_mcs_rx;
+ u64 rx_per_mcs[WIL_MCS_MAX + 1];
};
/**
- * Additional data for Tx Vring
+ * struct tx_rx_ops - different TX/RX ops for legacy and enhanced
+ * DMA flow
*/
-struct vring_tx_data {
+struct wil_txrx_ops {
+ void (*configure_interrupt_moderation)(struct wil6210_priv *wil);
+ /* TX ops */
+ int (*ring_init_tx)(struct wil6210_vif *vif, int ring_id,
+ int size, int cid, int tid);
+ void (*ring_fini_tx)(struct wil6210_priv *wil, struct wil_ring *ring);
+ int (*ring_init_bcast)(struct wil6210_vif *vif, int id, int size);
+ int (*tx_init)(struct wil6210_priv *wil);
+ void (*tx_fini)(struct wil6210_priv *wil);
+ int (*tx_desc_map)(union wil_tx_desc *desc, dma_addr_t pa,
+ u32 len, int ring_index);
+ void (*tx_desc_unmap)(struct device *dev,
+ union wil_tx_desc *desc,
+ struct wil_ctx *ctx);
+ int (*tx_ring_tso)(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb);
+ irqreturn_t (*irq_tx)(int irq, void *cookie);
+ /* RX ops */
+ int (*rx_init)(struct wil6210_priv *wil, u16 ring_size);
+ void (*rx_fini)(struct wil6210_priv *wil);
+ int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid,
+ u8 tid, u8 token, u16 status, bool amsdu,
+ u16 agg_wsize, u16 timeout);
+ void (*get_reorder_params)(struct wil6210_priv *wil,
+ struct sk_buff *skb, int *tid, int *cid,
+ int *mid, u16 *seq, int *mcast, int *retry);
+ void (*get_netif_rx_params)(struct sk_buff *skb,
+ int *cid, int *security);
+ int (*rx_crypto_check)(struct wil6210_priv *wil, struct sk_buff *skb);
+ int (*rx_error_check)(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_net_stats *stats);
+ bool (*is_rx_idle)(struct wil6210_priv *wil);
+ irqreturn_t (*irq_rx)(int irq, void *cookie);
+};
+
+/**
+ * Additional data for Tx ring
+ */
+struct wil_ring_tx_data {
bool dot1x_open;
int enabled;
cycles_t idle, last_idle, begin;
@@ -503,6 +663,8 @@ struct pci_dev;
* @drop_dup: duplicate frames dropped for this reorder buffer
* @drop_old: old frames dropped for this reorder buffer
* @first_time: true when this buffer used 1-st time
+ * @mcast_last_seq: sequence number (SN) of last received multicast packet
+ * @drop_dup_mcast: duplicate multicast frames dropped for this reorder buffer
*/
struct wil_tid_ampdu_rx {
struct sk_buff **reorder_buf;
@@ -516,6 +678,8 @@ struct wil_tid_ampdu_rx {
unsigned long long drop_dup;
unsigned long long drop_old;
bool first_time; /* is it 1-st time this buffer used? */
+ u16 mcast_last_seq; /* multicast dup detection */
+ unsigned long long drop_dup_mcast;
};
/**
@@ -550,24 +714,6 @@ enum wil_sta_status {
wil_sta_connected = 2,
};
-#define WIL_STA_TID_NUM (16)
-#define WIL_MCS_MAX (12) /* Maximum MCS supported */
-
-struct wil_net_stats {
- unsigned long rx_packets;
- unsigned long tx_packets;
- unsigned long rx_bytes;
- unsigned long tx_bytes;
- unsigned long tx_errors;
- unsigned long rx_dropped;
- unsigned long rx_non_data_frame;
- unsigned long rx_short_frame;
- unsigned long rx_large_frame;
- unsigned long rx_replay;
- u16 last_mcs_rx;
- u64 rx_per_mcs[WIL_MCS_MAX + 1];
-};
-
/**
* struct wil_sta_info - data for peer
*
@@ -581,6 +727,14 @@ struct wil_sta_info {
u8 mid;
enum wil_sta_status status;
struct wil_net_stats stats;
+ /**
+ * 20 latency bins. 1st bin counts packets with latency
+ * of 0..tx_latency_res, last bin counts packets with latency
+ * of 19*tx_latency_res and above.
+ * tx_latency_res is configured from "tx_latency" debug-fs.
+ */
+ u64 *tx_latency_bins;
+ struct wmi_link_stats_basic fw_stats_basic;
/* Rx BACK */
struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
spinlock_t tid_rx_lock; /* guarding tid_rx array */
@@ -681,7 +835,7 @@ struct wil6210_vif {
u8 hidden_ssid; /* relevant in AP mode */
u32 ap_isolate; /* no intra-BSS communication */
bool pbss;
- int bcast_vring;
+ int bcast_ring;
struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
int locally_generated_disc; /* relevant in STA mode */
struct timer_list connect_timer;
@@ -695,6 +849,39 @@ struct wil6210_vif {
struct mutex probe_client_mutex; /* protect @probe_client_pending */
struct work_struct probe_client_worker;
int net_queue_stopped; /* netif_tx_stop_all_queues invoked */
+ bool fw_stats_ready; /* per-cid statistics are ready inside sta_info */
+ u64 fw_stats_tsf; /* measurement timestamp */
+};
+
+/**
+ * RX buffer allocated for enhanced DMA RX descriptors
+ */
+struct wil_rx_buff {
+ struct sk_buff *skb;
+ struct list_head list;
+ int id;
+};
+
+/**
+ * During Rx completion processing, the driver extracts a buffer ID which
+ * is used as an index to the rx_buff_mgmt.buff_arr array and then the SKB
+ * is given to the network stack and the buffer is moved from the 'active'
+ * list to the 'free' list.
+ * During Rx refill, SKBs are attached to free buffers and moved to the
+ * 'active' list.
+ */
+struct wil_rx_buff_mgmt {
+ struct wil_rx_buff *buff_arr;
+ size_t size; /* number of items in buff_arr */
+ struct list_head active;
+ struct list_head free;
+ unsigned long free_list_empty_cnt; /* statistics */
+};
+
+struct wil_fw_stats_global {
+ bool ready;
+ u64 tsf; /* measurement timestamp */
+ struct wmi_link_stats_global stats;
};
struct wil6210_priv {
@@ -702,6 +889,7 @@ struct wil6210_priv {
u32 bar_size;
struct wiphy *wiphy;
struct net_device *main_ndev;
+ int n_msi;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
u8 fw_version[ETHTOOL_FWVERS_LEN];
@@ -761,14 +949,20 @@ struct wil6210_priv {
struct net_device napi_ndev; /* dummy net_device serving all VIFs */
/* DMA related */
- struct vring vring_rx;
+ struct wil_ring ring_rx;
unsigned int rx_buf_len;
- struct vring vring_tx[WIL6210_MAX_TX_RINGS];
- struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
- u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
+ struct wil_ring ring_tx[WIL6210_MAX_TX_RINGS];
+ struct wil_ring_tx_data ring_tx_data[WIL6210_MAX_TX_RINGS];
+ struct wil_status_ring srings[WIL6210_MAX_STATUS_RINGS];
+ u8 num_rx_status_rings;
+ int tx_sring_idx;
+ u8 ring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
struct wil_sta_info sta[WIL6210_MAX_CID];
- u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */
+ u32 ring_idle_trsh; /* HW fetches up to 16 descriptors at once */
u32 dma_addr_size; /* indicates dma addr size */
+ struct wil_rx_buff_mgmt rx_buff_mgmt;
+ bool use_enhanced_dma_hw;
+ struct wil_txrx_ops txrx_ops;
struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
/* statistics */
@@ -781,6 +975,8 @@ struct wil6210_priv {
u8 wakeup_trigger;
struct wil_suspend_stats suspend_stats;
struct wil_debugfs_data dbg_data;
+ bool tx_latency; /* collect TX latency measurements */
+ size_t tx_latency_res; /* bin resolution in usec */
void *platform_handle;
struct wil_platform_ops platform_ops;
@@ -811,6 +1007,21 @@ struct wil6210_priv {
u32 rgf_fw_assert_code_addr;
u32 rgf_ucode_assert_code_addr;
u32 iccm_base;
+
+ /* relevant only for eDMA */
+ bool use_compressed_rx_status;
+ u32 rx_status_ring_order;
+ u32 tx_status_ring_order;
+ u32 rx_buff_id_count;
+ bool amsdu_en;
+ bool use_rx_hw_reordering;
+ bool secured_boot;
+ u8 boot_config;
+
+ struct wil_fw_stats_global fw_stats_global;
+
+ u32 max_agg_wsize;
+ u32 max_ampdu_size;
};
#define wil_to_wiphy(i) (i->wiphy)
@@ -894,6 +1105,8 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
wil_w(wil, reg, wil_r(wil, reg) & ~val);
}
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len);
+
#if defined(CONFIG_DYNAMIC_DEBUG)
#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
@@ -990,7 +1203,7 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index,
int key_usage);
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie);
-int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
@@ -1010,13 +1223,14 @@ int wmi_new_sta(struct wil6210_vif *vif, const u8 *mac, u8 aid);
int wmi_port_allocate(struct wil6210_priv *wil, u8 mid,
const u8 *mac, enum nl80211_iftype iftype);
int wmi_port_delete(struct wil6210_priv *wil, u8 mid);
+int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval);
int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid,
u8 cidxtid, u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl);
int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
void wil6210_clear_irq(struct wil6210_priv *wil);
-int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
+int wil6210_init_irq(struct wil6210_priv *wil, int irq);
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
void wil_mask_irq(struct wil6210_priv *wil);
void wil_unmask_irq(struct wil6210_priv *wil);
@@ -1083,30 +1297,28 @@ void wil_probe_client_flush(struct wil6210_vif *vif);
void wil_probe_client_worker(struct work_struct *work);
void wil_disconnect_worker(struct work_struct *work);
-int wil_rx_init(struct wil6210_priv *wil, u16 size);
-void wil_rx_fini(struct wil6210_priv *wil);
+void wil_init_txrx_ops(struct wil6210_priv *wil);
/* TX API */
-int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
- int cid, int tid);
-void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
-int wil_tx_init(struct wil6210_vif *vif, int cid);
+int wil_ring_init_tx(struct wil6210_vif *vif, int cid);
int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size);
int wil_bcast_init(struct wil6210_vif *vif);
void wil_bcast_fini(struct wil6210_vif *vif);
void wil_bcast_fini_all(struct wil6210_priv *wil);
void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, bool should_stop);
+ struct wil_ring *ring, bool should_stop);
void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, bool check_stop);
+ struct wil_ring *ring, bool check_stop);
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int wil_tx_complete(struct wil6210_vif *vif, int ringid);
void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
+void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil);
/* RX API */
void wil_rx_handle(struct wil6210_priv *wil, int *quota);
void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
+void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil);
int wil_iftype_nl2wmi(enum nl80211_iftype type);
@@ -1127,7 +1339,6 @@ bool wil_is_wmi_idle(struct wil6210_priv *wil);
int wmi_resume(struct wil6210_priv *wil);
int wmi_suspend(struct wil6210_priv *wil);
bool wil_is_tx_idle(struct wil6210_priv *wil);
-bool wil_is_rx_idle(struct wil6210_priv *wil);
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
void wil_fw_core_dump(struct wil6210_priv *wil);
@@ -1141,5 +1352,22 @@ int wmi_start_sched_scan(struct wil6210_priv *wil,
struct cfg80211_sched_scan_request *request);
int wmi_stop_sched_scan(struct wil6210_priv *wil);
int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len);
+int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
+ u8 channel, u16 duration_ms);
+
+int reverse_memcmp(const void *cs, const void *ct, size_t count);
+
+/* WMI for enhanced DMA */
+int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id);
+int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil,
+ u16 max_rx_pl_per_desc);
+int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id);
+int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id);
+int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
+ int tid);
+int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id);
+int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
+ u8 tid, u8 token, u16 status, bool amsdu,
+ u16 agg_wsize, u16 timeout);
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 1ed330674d9b..dc33a0b4c3fa 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -36,7 +37,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
- if (!map->fw)
+ if (!map->crash_dump)
continue;
if (map->host < host_min)
@@ -85,7 +86,7 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
- if (!map->fw)
+ if (!map->crash_dump)
continue;
data = (void * __force)wil->csr + HOSTADDR(map->host);
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 177026e5323b..bca090611477 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -29,6 +29,7 @@ enum wil_platform_event {
enum wil_platform_features {
WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
+ WIL_PLATFORM_FEATURE_TRIPLE_MSI = 1,
WIL_PLATFORM_FEATURE_MAX,
};
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 5d991243cdb5..42c02a20ec97 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -89,28 +89,28 @@ MODULE_PARM_DESC(led_id,
*/
const struct fw_map sparrow_fw_mapping[] = {
/* FW code RAM 256k */
- {0x000000, 0x040000, 0x8c0000, "fw_code", true},
+ {0x000000, 0x040000, 0x8c0000, "fw_code", true, true},
/* FW data RAM 32k */
- {0x800000, 0x808000, 0x900000, "fw_data", true},
+ {0x800000, 0x808000, 0x900000, "fw_data", true, true},
/* periph data 128k */
- {0x840000, 0x860000, 0x908000, "fw_peri", true},
+ {0x840000, 0x860000, 0x908000, "fw_peri", true, true},
/* various RGF 40k */
- {0x880000, 0x88a000, 0x880000, "rgf", true},
+ {0x880000, 0x88a000, 0x880000, "rgf", true, true},
/* AGC table 4k */
- {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
/* Pcie_ext_rgf 4k */
- {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
/* mac_ext_rgf 512b */
- {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true},
+ {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true, true},
/* upper area 548k */
- {0x8c0000, 0x949000, 0x8c0000, "upper", true},
+ {0x8c0000, 0x949000, 0x8c0000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 128k */
- {0x000000, 0x020000, 0x920000, "uc_code", false},
+ {0x000000, 0x020000, 0x920000, "uc_code", false, false},
/* ucode data RAM 16k */
- {0x800000, 0x804000, 0x940000, "uc_data", false},
+ {0x800000, 0x804000, 0x940000, "uc_data", false, false},
};
/**
@@ -118,7 +118,7 @@ const struct fw_map sparrow_fw_mapping[] = {
* it is a bit larger to support extra features
*/
const struct fw_map sparrow_d0_mac_rgf_ext = {
- 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true
+ 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true, true
};
/**
@@ -134,34 +134,89 @@ const struct fw_map sparrow_d0_mac_rgf_ext = {
*/
const struct fw_map talyn_fw_mapping[] = {
/* FW code RAM 1M */
- {0x000000, 0x100000, 0x900000, "fw_code", true},
+ {0x000000, 0x100000, 0x900000, "fw_code", true, true},
/* FW data RAM 128k */
- {0x800000, 0x820000, 0xa00000, "fw_data", true},
+ {0x800000, 0x820000, 0xa00000, "fw_data", true, true},
/* periph. data RAM 96k */
- {0x840000, 0x858000, 0xa20000, "fw_peri", true},
+ {0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
/* various RGF 40k */
- {0x880000, 0x88a000, 0x880000, "rgf", true},
+ {0x880000, 0x88a000, 0x880000, "rgf", true, true},
/* AGC table 4k */
- {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
/* Pcie_ext_rgf 4k */
- {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
/* mac_ext_rgf 1344b */
- {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true},
+ {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true, true},
/* ext USER RGF 4k */
- {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true},
+ {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
/* OTP 4k */
- {0x8a0000, 0x8a1000, 0x8a0000, "otp", true},
+ {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
/* DMA EXT RGF 64k */
- {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true},
+ {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
/* upper area 1536k */
- {0x900000, 0xa80000, 0x900000, "upper", true},
+ {0x900000, 0xa80000, 0x900000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 256k */
- {0x000000, 0x040000, 0xa38000, "uc_code", false},
+ {0x000000, 0x040000, 0xa38000, "uc_code", false, false},
/* ucode data RAM 32k */
- {0x800000, 0x808000, 0xa78000, "uc_data", false},
+ {0x800000, 0x808000, 0xa78000, "uc_data", false, false},
+};
+
+/**
+ * @talyn_mb_fw_mapping provides memory remapping table for Talyn-MB
+ *
+ * array size should be in sync with the declaration in the wil6210.h
+ *
+ * Talyn MB memory mapping:
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xc80000 4Mb BAR0
+ * 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM
+ * 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH
+ */
+const struct fw_map talyn_mb_fw_mapping[] = {
+ /* FW code RAM 768k */
+ {0x000000, 0x0c0000, 0x900000, "fw_code", true, true},
+ /* FW data RAM 128k */
+ {0x800000, 0x820000, 0xa00000, "fw_data", true, true},
+ /* periph. data RAM 96k */
+ {0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
+ /* various RGF 40k */
+ {0x880000, 0x88a000, 0x880000, "rgf", true, true},
+ /* AGC table 4k */
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
+ /* Pcie_ext_rgf 4k */
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
+ /* mac_ext_rgf 2256b */
+ {0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true, true},
+ /* ext USER RGF 4k */
+ {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
+ /* SEC PKA 16k */
+ {0x890000, 0x894000, 0x890000, "sec_pka", true, true},
+ /* SEC KDF RGF 3096b */
+ {0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true, true},
+ /* SEC MAIN 2124b */
+ {0x89a000, 0x89a84c, 0x89a000, "sec_main", true, true},
+ /* OTP 4k */
+ {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
+ /* DMA EXT RGF 64k */
+ {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
+ /* DUM USER RGF 528b */
+ {0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true},
+ /* DMA OFU 296b */
+ {0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true},
+ /* ucode debug 4k */
+ {0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true, true},
+ /* upper area 1536k */
+ {0x900000, 0xa80000, 0x900000, "upper", true, true},
+ /* UCODE areas - accessible by debugfs blobs but not by
+ * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
+ */
+ /* ucode code RAM 256k */
+ {0x000000, 0x040000, 0xa38000, "uc_code", false, false},
+ /* ucode data RAM 32k */
+ {0x800000, 0x808000, 0xa78000, "uc_data", false, false},
};
struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
@@ -365,14 +420,16 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_DEL_STA_CMD";
case WMI_DISCONNECT_STA_CMDID:
return "WMI_DISCONNECT_STA_CMD";
- case WMI_VRING_BA_EN_CMDID:
- return "WMI_VRING_BA_EN_CMD";
- case WMI_VRING_BA_DIS_CMDID:
- return "WMI_VRING_BA_DIS_CMD";
+ case WMI_RING_BA_EN_CMDID:
+ return "WMI_RING_BA_EN_CMD";
+ case WMI_RING_BA_DIS_CMDID:
+ return "WMI_RING_BA_DIS_CMD";
case WMI_RCP_DELBA_CMDID:
return "WMI_RCP_DELBA_CMD";
case WMI_RCP_ADDBA_RESP_CMDID:
return "WMI_RCP_ADDBA_RESP_CMD";
+ case WMI_RCP_ADDBA_RESP_EDMA_CMDID:
+ return "WMI_RCP_ADDBA_RESP_EDMA_CMD";
case WMI_PS_DEV_PROFILE_CFG_CMDID:
return "WMI_PS_DEV_PROFILE_CFG_CMD";
case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
@@ -395,6 +452,22 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_START_SCHED_SCAN_CMD";
case WMI_STOP_SCHED_SCAN_CMDID:
return "WMI_STOP_SCHED_SCAN_CMD";
+ case WMI_TX_STATUS_RING_ADD_CMDID:
+ return "WMI_TX_STATUS_RING_ADD_CMD";
+ case WMI_RX_STATUS_RING_ADD_CMDID:
+ return "WMI_RX_STATUS_RING_ADD_CMD";
+ case WMI_TX_DESC_RING_ADD_CMDID:
+ return "WMI_TX_DESC_RING_ADD_CMD";
+ case WMI_RX_DESC_RING_ADD_CMDID:
+ return "WMI_RX_DESC_RING_ADD_CMD";
+ case WMI_BCAST_DESC_RING_ADD_CMDID:
+ return "WMI_BCAST_DESC_RING_ADD_CMD";
+ case WMI_CFG_DEF_RX_OFFLOAD_CMDID:
+ return "WMI_CFG_DEF_RX_OFFLOAD_CMD";
+ case WMI_LINK_STATS_CMDID:
+ return "WMI_LINK_STATS_CMD";
+ case WMI_SW_TX_REQ_EXT_CMDID:
+ return "WMI_SW_TX_REQ_EXT_CMDID";
default:
return "Untracked CMD";
}
@@ -449,8 +522,8 @@ static const char *eventid2name(u16 eventid)
return "WMI_RCP_ADDBA_REQ_EVENT";
case WMI_DELBA_EVENTID:
return "WMI_DELBA_EVENT";
- case WMI_VRING_EN_EVENTID:
- return "WMI_VRING_EN_EVENT";
+ case WMI_RING_EN_EVENTID:
+ return "WMI_RING_EN_EVENT";
case WMI_DATA_PORT_OPEN_EVENTID:
return "WMI_DATA_PORT_OPEN_EVENT";
case WMI_AOA_MEAS_EVENTID:
@@ -519,6 +592,20 @@ static const char *eventid2name(u16 eventid)
return "WMI_STOP_SCHED_SCAN_EVENT";
case WMI_SCHED_SCAN_RESULT_EVENTID:
return "WMI_SCHED_SCAN_RESULT_EVENT";
+ case WMI_TX_STATUS_RING_CFG_DONE_EVENTID:
+ return "WMI_TX_STATUS_RING_CFG_DONE_EVENT";
+ case WMI_RX_STATUS_RING_CFG_DONE_EVENTID:
+ return "WMI_RX_STATUS_RING_CFG_DONE_EVENT";
+ case WMI_TX_DESC_RING_CFG_DONE_EVENTID:
+ return "WMI_TX_DESC_RING_CFG_DONE_EVENT";
+ case WMI_RX_DESC_RING_CFG_DONE_EVENTID:
+ return "WMI_RX_DESC_RING_CFG_DONE_EVENT";
+ case WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID:
+ return "WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENT";
+ case WMI_LINK_STATS_CONFIG_DONE_EVENTID:
+ return "WMI_LINK_STATS_CONFIG_DONE_EVENT";
+ case WMI_LINK_STATS_EVENTID:
+ return "WMI_LINK_STATS_EVENT";
default:
return "Untracked EVENT";
}
@@ -906,7 +993,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
wil->sta[evt->cid].mid = vif->mid;
wil->sta[evt->cid].status = wil_sta_conn_pending;
- rc = wil_tx_init(vif, evt->cid);
+ rc = wil_ring_init_tx(vif, evt->cid);
if (rc) {
wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
evt->cid, rc);
@@ -1063,16 +1150,16 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
}
}
-static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len)
+static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
- struct wmi_vring_en_event *evt = d;
- u8 vri = evt->vring_index;
+ struct wmi_ring_en_event *evt = d;
+ u8 vri = evt->ring_index;
struct wireless_dev *wdev = vif_to_wdev(vif);
wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid);
- if (vri >= ARRAY_SIZE(wil->vring_tx)) {
+ if (vri >= ARRAY_SIZE(wil->ring_tx)) {
wil_err(wil, "Enable for invalid vring %d\n", vri);
return;
}
@@ -1081,8 +1168,8 @@ static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len)
/* in AP mode with disable_ap_sme, this is done by
* wil_cfg80211_change_station()
*/
- wil->vring_tx_data[vri].dot1x_open = true;
- if (vri == vif->bcast_vring) /* no BA for bcast */
+ wil->ring_tx_data[vri].dot1x_open = true;
+ if (vri == vif->bcast_ring) /* no BA for bcast */
return;
if (agg_wsize >= 0)
wil_addba_tx_request(wil, vri, agg_wsize);
@@ -1093,7 +1180,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_ba_status_event *evt = d;
- struct vring_tx_data *txdata;
+ struct wil_ring_tx_data *txdata;
wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
evt->ringid,
@@ -1112,7 +1199,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
evt->amsdu = 0;
}
- txdata = &wil->vring_tx_data[evt->ringid];
+ txdata = &wil->ring_tx_data[evt->ringid];
txdata->agg_timeout = le16_to_cpu(evt->ba_timeout);
txdata->agg_wsize = evt->agg_wsize;
@@ -1150,11 +1237,11 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
if (!evt->from_initiator) {
int i;
/* find Tx vring it belongs to */
- for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
- if ((wil->vring2cid_tid[i][0] == cid) &&
- (wil->vring2cid_tid[i][1] == tid)) {
- struct vring_tx_data *txdata =
- &wil->vring_tx_data[i];
+ for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
+ if (wil->ring2cid_tid[i][0] == cid &&
+ wil->ring2cid_tid[i][1] == tid) {
+ struct wil_ring_tx_data *txdata =
+ &wil->ring_tx_data[i];
wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i);
txdata->agg_timeout = 0;
@@ -1164,7 +1251,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
break; /* max. 1 matching ring */
}
}
- if (i >= ARRAY_SIZE(wil->vring2cid_tid))
+ if (i >= ARRAY_SIZE(wil->ring2cid_tid))
wil_err(wil, "DELBA: unable to find Tx vring\n");
return;
}
@@ -1250,6 +1337,130 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
cfg80211_sched_scan_results(wiphy, 0);
}
+static void wil_link_stats_store_basic(struct wil6210_vif *vif,
+ struct wmi_link_stats_basic *basic)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ u8 cid = basic->cid;
+ struct wil_sta_info *sta;
+
+ if (cid < 0 || cid >= WIL6210_MAX_CID) {
+ wil_err(wil, "invalid cid %d\n", cid);
+ return;
+ }
+
+ sta = &wil->sta[cid];
+ sta->fw_stats_basic = *basic;
+}
+
+static void wil_link_stats_store_global(struct wil6210_vif *vif,
+ struct wmi_link_stats_global *global)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ wil->fw_stats_global.stats = *global;
+}
+
+static void wmi_link_stats_parse(struct wil6210_vif *vif, u64 tsf,
+ bool has_next, void *payload,
+ size_t payload_size)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ size_t hdr_size = sizeof(struct wmi_link_stats_record);
+ size_t stats_size, record_size, expected_size;
+ struct wmi_link_stats_record *hdr;
+
+ if (payload_size < hdr_size) {
+ wil_err(wil, "link stats wrong event size %zu\n", payload_size);
+ return;
+ }
+
+ while (payload_size >= hdr_size) {
+ hdr = payload;
+ stats_size = le16_to_cpu(hdr->record_size);
+ record_size = hdr_size + stats_size;
+
+ if (payload_size < record_size) {
+ wil_err(wil, "link stats payload ended unexpectedly, size %zu < %zu\n",
+ payload_size, record_size);
+ return;
+ }
+
+ switch (hdr->record_type_id) {
+ case WMI_LINK_STATS_TYPE_BASIC:
+ expected_size = sizeof(struct wmi_link_stats_basic);
+ if (stats_size < expected_size) {
+ wil_err(wil, "link stats invalid basic record size %zu < %zu\n",
+ stats_size, expected_size);
+ return;
+ }
+ if (vif->fw_stats_ready) {
+ /* clean old statistics */
+ vif->fw_stats_tsf = 0;
+ vif->fw_stats_ready = 0;
+ }
+
+ wil_link_stats_store_basic(vif, payload + hdr_size);
+
+ if (!has_next) {
+ vif->fw_stats_tsf = tsf;
+ vif->fw_stats_ready = 1;
+ }
+
+ break;
+ case WMI_LINK_STATS_TYPE_GLOBAL:
+ expected_size = sizeof(struct wmi_link_stats_global);
+ if (stats_size < sizeof(struct wmi_link_stats_global)) {
+ wil_err(wil, "link stats invalid global record size %zu < %zu\n",
+ stats_size, expected_size);
+ return;
+ }
+
+ if (wil->fw_stats_global.ready) {
+ /* clean old statistics */
+ wil->fw_stats_global.tsf = 0;
+ wil->fw_stats_global.ready = 0;
+ }
+
+ wil_link_stats_store_global(vif, payload + hdr_size);
+
+ if (!has_next) {
+ wil->fw_stats_global.tsf = tsf;
+ wil->fw_stats_global.ready = 1;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ /* skip to next record */
+ payload += record_size;
+ payload_size -= record_size;
+ }
+}
+
+static void
+wmi_evt_link_stats(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_link_stats_event *evt = d;
+ size_t payload_size;
+
+ if (len < offsetof(struct wmi_link_stats_event, payload)) {
+ wil_err(wil, "stats event way too short %d\n", len);
+ return;
+ }
+ payload_size = le16_to_cpu(evt->payload_size);
+ if (len < sizeof(struct wmi_link_stats_event) + payload_size) {
+ wil_err(wil, "stats event too short %d\n", len);
+ return;
+ }
+
+ wmi_link_stats_parse(vif, le64_to_cpu(evt->tsf), evt->has_next,
+ evt->payload, payload_size);
+}
+
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
@@ -1277,9 +1488,10 @@ static const struct {
{WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
{WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
{WMI_DELBA_EVENTID, wmi_evt_delba},
- {WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
+ {WMI_RING_EN_EVENTID, wmi_evt_ring_en},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
{WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result},
+ {WMI_LINK_STATS_EVENTID, wmi_evt_link_stats},
};
/*
@@ -1909,7 +2121,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
return rc;
}
-int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
@@ -2063,29 +2275,32 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
int wmi_addba(struct wil6210_priv *wil, u8 mid,
u8 ringid, u8 size, u16 timeout)
{
- struct wmi_vring_ba_en_cmd cmd = {
- .ringid = ringid,
+ u8 amsdu = wil->use_enhanced_dma_hw && wil->use_rx_hw_reordering &&
+ test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
+ wil->amsdu_en;
+ struct wmi_ring_ba_en_cmd cmd = {
+ .ring_id = ringid,
.agg_max_wsize = size,
.ba_timeout = cpu_to_le16(timeout),
- .amsdu = 0,
+ .amsdu = amsdu,
};
- wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size,
- timeout);
+ wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d amsdu %d)\n",
+ ringid, size, timeout, amsdu);
- return wmi_send(wil, WMI_VRING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
+ return wmi_send(wil, WMI_RING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
}
int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason)
{
- struct wmi_vring_ba_dis_cmd cmd = {
- .ringid = ringid,
+ struct wmi_ring_ba_dis_cmd cmd = {
+ .ring_id = ringid,
.reason = cpu_to_le16(reason),
};
wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason);
- return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
+ return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
}
int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason)
@@ -2146,6 +2361,54 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
return rc;
}
+int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
+ u8 token, u16 status, bool amsdu, u16 agg_wsize,
+ u16 timeout)
+{
+ int rc;
+ struct wmi_rcp_addba_resp_edma_cmd cmd = {
+ .cid = cid,
+ .tid = tid,
+ .dialog_token = token,
+ .status_code = cpu_to_le16(status),
+ /* bit 0: A-MSDU supported
+ * bit 1: policy (should be 0 for us)
+ * bits 2..5: TID
+ * bits 6..15: buffer size
+ */
+ .ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
+ (agg_wsize << 6)),
+ .ba_timeout = cpu_to_le16(timeout),
+ /* route all the connections to status ring 0 */
+ .status_ring_id = WIL_DEFAULT_RX_STATUS_RING_ID,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_rcp_addba_resp_sent_event evt;
+ } __packed reply = {
+ .evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)},
+ };
+
+ wil_dbg_wmi(wil,
+ "ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s, sring_id %d\n",
+ cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-",
+ WIL_DEFAULT_RX_STATUS_RING_ID);
+
+ rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_EDMA_CMDID, mid, &cmd,
+ sizeof(cmd), WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status) {
+ wil_err(wil, "ADDBA response failed with status %d\n",
+ le16_to_cpu(reply.evt.status));
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
enum wmi_ps_profile_type ps_profile)
{
@@ -2852,3 +3115,351 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
return rc;
}
+
+int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
+ u8 channel, u16 duration_ms)
+{
+ size_t total;
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct ieee80211_mgmt *mgmt_frame = (void *)buf;
+ struct wmi_sw_tx_req_ext_cmd *cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_sw_tx_complete_event evt;
+ } __packed evt = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ int rc;
+
+ wil_dbg_wmi(wil, "mgmt_tx_ext mid %d channel %d duration %d\n",
+ vif->mid, channel, duration_ms);
+ wil_hex_dump_wmi("mgmt_tx_ext frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+
+ if (len < sizeof(struct ieee80211_hdr_3addr)) {
+ wil_err(wil, "short frame. len %zu\n", len);
+ return -EINVAL;
+ }
+
+ total = sizeof(*cmd) + len;
+ if (total < len) {
+ wil_err(wil, "mgmt_tx_ext invalid len %zu\n", len);
+ return -EINVAL;
+ }
+
+ cmd = kzalloc(total, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
+ cmd->len = cpu_to_le16(len);
+ memcpy(cmd->payload, buf, len);
+ cmd->channel = channel - 1;
+ cmd->duration_ms = cpu_to_le16(duration_ms);
+
+ rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total,
+ WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
+ if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "mgmt_tx_ext failed with status %d\n",
+ evt.evt.status);
+ rc = -EINVAL;
+ }
+
+ kfree(cmd);
+
+ return rc;
+}
+
+int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id)
+{
+ int rc;
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ struct wil_status_ring *sring = &wil->srings[ring_id];
+ struct wmi_tx_status_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(sring->size),
+ },
+ .irq_index = WIL_TX_STATUS_IRQ_IDX
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_tx_status_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_id = ring_id;
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
+ rc = wmi_call(wil, WMI_TX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TX_STATUS_RING_CFG_DONE_EVENTID,
+ &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+ return 0;
+}
+
+int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil, u16 max_rx_pl_per_desc)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ int rc;
+ struct wmi_cfg_def_rx_offload_cmd cmd = {
+ .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(WIL_MAX_ETH_MTU)),
+ .max_rx_pl_per_desc = cpu_to_le16(max_rx_pl_per_desc),
+ .decap_trans_type = WMI_DECAP_TYPE_802_3,
+ .l2_802_3_offload_ctrl = 0,
+ .l3_l4_ctrl = 1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_cfg_def_rx_offload_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ rc = wmi_call(wil, WMI_CFG_DEF_RX_OFFLOAD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil_status_ring *sring = &wil->srings[ring_id];
+ int rc;
+ struct wmi_rx_status_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(sring->size),
+ .ring_id = ring_id,
+ },
+ .rx_msg_type = wil->use_compressed_rx_status ?
+ WMI_RX_MSG_TYPE_COMPRESSED :
+ WMI_RX_MSG_TYPE_EXTENDED,
+ .irq_index = WIL_RX_STATUS_IRQ_IDX,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_rx_status_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
+ rc = wmi_call(wil, WMI_RX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_RX_STATUS_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+ return 0;
+}
+
+int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil_ring *ring = &wil->ring_rx;
+ int rc;
+ struct wmi_rx_desc_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(ring->size),
+ .ring_id = WIL_RX_DESC_RING_ID,
+ },
+ .status_ring_id = status_ring_id,
+ .irq_index = WIL_RX_STATUS_IRQ_IDX,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_rx_desc_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+ cmd.sw_tail_host_addr = cpu_to_le64(ring->edma_rx_swtail.pa);
+ rc = wmi_call(wil, WMI_RX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_RX_DESC_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+ return 0;
+}
+
+int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
+ int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int sring_id = wil->tx_sring_idx; /* there is only one TX sring */
+ int rc;
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+ struct wmi_tx_desc_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(ring->size),
+ .ring_id = ring_id,
+ },
+ .status_ring_id = sring_id,
+ .cid = cid,
+ .tid = tid,
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .schd_params = {
+ .priority = cpu_to_le16(0),
+ .timeslot_us = cpu_to_le16(0xfff),
+ }
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_tx_desc_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+ rc = wmi_call(wil, WMI_TX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&txdata->lock);
+ ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+ txdata->mid = vif->mid;
+ txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
+ return 0;
+}
+
+int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ int rc;
+ struct wmi_bcast_desc_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(ring->size),
+ .ring_id = ring_id,
+ },
+ .status_ring_id = wil->tx_sring_idx,
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_rx_desc_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+ rc = wmi_call(wil, WMI_BCAST_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_BCAST_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Broadcast Tx config failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&txdata->lock);
+ ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+ txdata->mid = vif->mid;
+ txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
+ return 0;
+}
+
+int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_link_stats_cmd cmd = {
+ .record_type_mask = cpu_to_le32(type),
+ .cid = cid,
+ .action = WMI_LINK_STATS_SNAPSHOT,
+ .interval_msec = cpu_to_le32(interval),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_link_stats_config_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ int rc;
+
+ rc = wmi_call(wil, WMI_LINK_STATS_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_LINK_STATS_CONFIG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_LINK_STATS_CMDID failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Link statistics config failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index dc503d903786..139acb2caf92 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -53,6 +53,17 @@
* must always be kept equal to (WMI_RF_RX2TX_LENGTH+1)
*/
#define WMI_RF_RX2TX_CONF_LENGTH (4)
+/* Qos configuration */
+#define WMI_QOS_NUM_OF_PRIORITY (4)
+#define WMI_QOS_MIN_DEFAULT_WEIGHT (10)
+#define WMI_QOS_VRING_SLOT_MIN_MS (2)
+#define WMI_QOS_VRING_SLOT_MAX_MS (10)
+/* (WMI_QOS_MIN_DEFAULT_WEIGHT * WMI_QOS_VRING_SLOT_MAX_MS /
+ * WMI_QOS_VRING_SLOT_MIN_MS)
+ */
+#define WMI_QOS_MAX_WEIGHT 50
+#define WMI_QOS_SET_VIF_PRIORITY (0xFF)
+#define WMI_QOS_DEFAULT_PRIORITY (WMI_QOS_NUM_OF_PRIORITY)
/* Mailbox interface
* used for commands and events
@@ -86,6 +97,12 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_PNO = 15,
WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18,
WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19,
+ WMI_FW_CAPABILITY_MULTI_VIFS = 20,
+ WMI_FW_CAPABILITY_FT_ROAMING = 21,
+ WMI_FW_CAPABILITY_BACK_WIN_SIZE_64 = 22,
+ WMI_FW_CAPABILITY_AMSDU = 23,
+ WMI_FW_CAPABILITY_RAW_MODE = 24,
+ WMI_FW_CAPABILITY_TX_REQ_EXT = 25,
WMI_FW_CAPABILITY_MAX,
};
@@ -109,6 +126,9 @@ enum wmi_command_id {
WMI_SET_PROBED_SSID_CMDID = 0x0A,
/* deprecated */
WMI_SET_LISTEN_INT_CMDID = 0x0B,
+ WMI_FT_AUTH_CMDID = 0x0C,
+ WMI_FT_REASSOC_CMDID = 0x0D,
+ WMI_UPDATE_FT_IES_CMDID = 0x0E,
WMI_BCON_CTRL_CMDID = 0x0F,
WMI_ADD_CIPHER_KEY_CMDID = 0x16,
WMI_DELETE_CIPHER_KEY_CMDID = 0x17,
@@ -117,6 +137,12 @@ enum wmi_command_id {
WMI_SET_WSC_STATUS_CMDID = 0x41,
WMI_PXMT_RANGE_CFG_CMDID = 0x42,
WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43,
+ WMI_RADAR_GENERAL_CONFIG_CMDID = 0x100,
+ WMI_RADAR_CONFIG_SELECT_CMDID = 0x101,
+ WMI_RADAR_PARAMS_CONFIG_CMDID = 0x102,
+ WMI_RADAR_SET_MODE_CMDID = 0x103,
+ WMI_RADAR_CONTROL_CMDID = 0x104,
+ WMI_RADAR_PCI_CONTROL_CMDID = 0x105,
WMI_MEM_READ_CMDID = 0x800,
WMI_MEM_WR_CMDID = 0x801,
WMI_ECHO_CMDID = 0x803,
@@ -148,8 +174,8 @@ enum wmi_command_id {
WMI_CFG_RX_CHAIN_CMDID = 0x820,
WMI_VRING_CFG_CMDID = 0x821,
WMI_BCAST_VRING_CFG_CMDID = 0x822,
- WMI_VRING_BA_EN_CMDID = 0x823,
- WMI_VRING_BA_DIS_CMDID = 0x824,
+ WMI_RING_BA_EN_CMDID = 0x823,
+ WMI_RING_BA_DIS_CMDID = 0x824,
WMI_RCP_ADDBA_RESP_CMDID = 0x825,
WMI_RCP_DELBA_CMDID = 0x826,
WMI_SET_SSID_CMDID = 0x827,
@@ -157,12 +183,17 @@ enum wmi_command_id {
WMI_SET_PCP_CHANNEL_CMDID = 0x829,
WMI_GET_PCP_CHANNEL_CMDID = 0x82A,
WMI_SW_TX_REQ_CMDID = 0x82B,
+ /* Event is shared between WMI_SW_TX_REQ_CMDID and
+ * WMI_SW_TX_REQ_EXT_CMDID
+ */
+ WMI_SW_TX_REQ_EXT_CMDID = 0x82C,
WMI_MLME_PUSH_CMDID = 0x835,
WMI_BEAMFORMING_MGMT_CMDID = 0x836,
WMI_BF_TXSS_MGMT_CMDID = 0x837,
WMI_BF_SM_MGMT_CMDID = 0x838,
WMI_BF_RXSS_MGMT_CMDID = 0x839,
WMI_BF_TRIG_CMDID = 0x83A,
+ WMI_RCP_ADDBA_RESP_EDMA_CMDID = 0x83B,
WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
WMI_SET_SECTORS_CMDID = 0x849,
@@ -205,7 +236,12 @@ enum wmi_command_id {
WMI_GET_PCP_FACTOR_CMDID = 0x91B,
/* Power Save Configuration Commands */
WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
+ WMI_RS_ENABLE_CMDID = 0x91E,
+ WMI_RS_CFG_EX_CMDID = 0x91F,
+ WMI_GET_DETAILED_RS_RES_EX_CMDID = 0x920,
+ /* deprecated */
WMI_RS_CFG_CMDID = 0x921,
+ /* deprecated */
WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
WMI_AOA_MEAS_CMDID = 0x923,
WMI_BRP_SET_ANT_LIMIT_CMDID = 0x924,
@@ -234,7 +270,15 @@ enum wmi_command_id {
WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5,
WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6,
WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7,
+ /* deprecated */
WMI_BF_CONTROL_CMDID = 0x9AA,
+ WMI_BF_CONTROL_EX_CMDID = 0x9AB,
+ WMI_TX_STATUS_RING_ADD_CMDID = 0x9C0,
+ WMI_RX_STATUS_RING_ADD_CMDID = 0x9C1,
+ WMI_TX_DESC_RING_ADD_CMDID = 0x9C2,
+ WMI_RX_DESC_RING_ADD_CMDID = 0x9C3,
+ WMI_BCAST_DESC_RING_ADD_CMDID = 0x9C4,
+ WMI_CFG_DEF_RX_OFFLOAD_CMDID = 0x9C5,
WMI_SCHEDULING_SCHEME_CMDID = 0xA01,
WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02,
WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03,
@@ -244,6 +288,11 @@ enum wmi_command_id {
WMI_GET_CCA_INDICATIONS_CMDID = 0xA07,
WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_CMDID = 0xA08,
WMI_INTERNAL_FW_IOCTL_CMDID = 0xA0B,
+ WMI_LINK_STATS_CMDID = 0xA0C,
+ WMI_SET_GRANT_MCS_CMDID = 0xA0E,
+ WMI_SET_AP_SLOT_SIZE_CMDID = 0xA0F,
+ WMI_SET_VRING_PRIORITY_WEIGHT_CMDID = 0xA10,
+ WMI_SET_VRING_PRIORITY_CMDID = 0xA11,
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
WMI_ABORT_SCAN_CMDID = 0xF007,
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
@@ -442,6 +491,30 @@ struct wmi_start_sched_scan_cmd {
struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM];
} __packed;
+/* WMI_FT_AUTH_CMDID */
+struct wmi_ft_auth_cmd {
+ u8 bssid[WMI_MAC_LEN];
+ /* enum wmi_channel */
+ u8 channel;
+ /* enum wmi_channel */
+ u8 edmg_channel;
+ u8 reserved[4];
+} __packed;
+
+/* WMI_FT_REASSOC_CMDID */
+struct wmi_ft_reassoc_cmd {
+ u8 bssid[WMI_MAC_LEN];
+ u8 reserved[2];
+} __packed;
+
+/* WMI_UPDATE_FT_IES_CMDID */
+struct wmi_update_ft_ies_cmd {
+ /* Length of the FT IEs */
+ __le16 ie_len;
+ u8 reserved[2];
+ u8 ie_info[0];
+} __packed;
+
/* WMI_SET_PROBED_SSID_CMDID */
#define MAX_PROBED_SSID_INDEX (3)
@@ -498,6 +571,109 @@ struct wmi_pxmt_snr2_range_cfg_cmd {
s8 snr2range_arr[2];
} __packed;
+/* WMI_RADAR_GENERAL_CONFIG_CMDID */
+struct wmi_radar_general_config_cmd {
+ /* Number of pulses (CIRs) in FW FIFO to initiate pulses transfer
+ * from FW to Host
+ */
+ __le32 fifo_watermark;
+ /* In unit of us, in the range [100, 1000000] */
+ __le32 t_burst;
+ /* Valid in the range [1, 32768], 0xFFFF means infinite */
+ __le32 n_bursts;
+ /* In unit of 330Mhz clk, in the range [4, 2000]*330 */
+ __le32 t_pulse;
+ /* In the range of [1,4096] */
+ __le16 n_pulses;
+ /* Number of taps after cTap per CIR */
+ __le16 n_samples;
+ /* Offset from the main tap (0 = zero-distance). In the range of [0,
+ * 255]
+ */
+ u8 first_sample_offset;
+ /* Number of Pulses to average, 1, 2, 4, 8 */
+ u8 pulses_to_avg;
+ /* Number of adjacent taps to average, 1, 2, 4, 8 */
+ u8 samples_to_avg;
+ /* The index to config general params */
+ u8 general_index;
+ u8 reserved[4];
+} __packed;
+
+/* WMI_RADAR_CONFIG_SELECT_CMDID */
+struct wmi_radar_config_select_cmd {
+ /* Select the general params index to use */
+ u8 general_index;
+ u8 reserved[3];
+ /* 0 means don't update burst_active_vector */
+ __le32 burst_active_vector;
+ /* 0 means don't update pulse_active_vector */
+ __le32 pulse_active_vector;
+} __packed;
+
+/* WMI_RADAR_PARAMS_CONFIG_CMDID */
+struct wmi_radar_params_config_cmd {
+ /* The burst index selected to config */
+ u8 burst_index;
+ /* 0-not active, 1-active */
+ u8 burst_en;
+ /* The pulse index selected to config */
+ u8 pulse_index;
+ /* 0-not active, 1-active */
+ u8 pulse_en;
+ /* TX RF to use on current pulse */
+ u8 tx_rfc_idx;
+ u8 tx_sector;
+ /* Offset from calibrated value.(expected to be 0)(value is row in
+ * Gain-LUT, not dB)
+ */
+ s8 tx_rf_gain_comp;
+ /* expected to be 0 */
+ s8 tx_bb_gain_comp;
+ /* RX RF to use on current pulse */
+ u8 rx_rfc_idx;
+ u8 rx_sector;
+ /* Offset from calibrated value.(expected to be 0)(value is row in
+ * Gain-LUT, not dB)
+ */
+ s8 rx_rf_gain_comp;
+ /* Value in dB.(expected to be 0) */
+ s8 rx_bb_gain_comp;
+ /* Offset from calibrated value.(expected to be 0) */
+ s8 rx_timing_offset;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_SET_MODE_CMDID */
+struct wmi_radar_set_mode_cmd {
+ /* 0-disable/1-enable */
+ u8 enable;
+ /* enum wmi_channel */
+ u8 channel;
+ /* In the range of [0,7], 0xff means use default */
+ u8 tx_rfc_idx;
+ /* In the range of [0,7], 0xff means use default */
+ u8 rx_rfc_idx;
+} __packed;
+
+/* WMI_RADAR_CONTROL_CMDID */
+struct wmi_radar_control_cmd {
+ /* 0-stop/1-start */
+ u8 start;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_PCI_CONTROL_CMDID */
+struct wmi_radar_pci_control_cmd {
+ /* pcie host buffer start address */
+ __le64 base_addr;
+ /* pcie host control block address */
+ __le64 control_block_addr;
+ /* pcie host buffer size */
+ __le32 buffer_size;
+ __le32 reserved;
+} __packed;
+
/* WMI_RF_MGMT_CMDID */
enum wmi_rf_mgmt_type {
WMI_RF_MGMT_W_DISABLE = 0x00,
@@ -635,12 +811,18 @@ struct wmi_pcp_start_cmd {
u8 pcp_max_assoc_sta;
u8 hidden_ssid;
u8 is_go;
- u8 reserved0[5];
+ /* enum wmi_channel WMI_CHANNEL_9..WMI_CHANNEL_12 */
+ u8 edmg_channel;
+ u8 raw_mode;
+ u8 reserved[3];
/* A-BFT length override if non-0 */
u8 abft_len;
/* enum wmi_ap_sme_offload_mode_e */
u8 ap_sme_offload_mode;
u8 network_type;
+ /* enum wmi_channel WMI_CHANNEL_1..WMI_CHANNEL_6; for EDMG this is
+ * the primary channel number
+ */
u8 channel;
u8 disable_sec_offload;
u8 disable_sec;
@@ -653,6 +835,17 @@ struct wmi_sw_tx_req_cmd {
u8 payload[0];
} __packed;
+/* WMI_SW_TX_REQ_EXT_CMDID */
+struct wmi_sw_tx_req_ext_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 len;
+ __le16 duration_ms;
+ /* Channel to use, 0xFF for currently active channel */
+ u8 channel;
+ u8 reserved[5];
+ u8 payload[0];
+} __packed;
+
/* WMI_VRING_SWITCH_TIMING_CONFIG_CMDID */
struct wmi_vring_switch_timing_config_cmd {
/* Set vring timing configuration:
@@ -679,6 +872,7 @@ struct wmi_vring_cfg_schd {
enum wmi_vring_cfg_encap_trans_type {
WMI_VRING_ENC_TYPE_802_3 = 0x00,
WMI_VRING_ENC_TYPE_NATIVE_WIFI = 0x01,
+ WMI_VRING_ENC_TYPE_NONE = 0x02,
};
enum wmi_vring_cfg_ds_cfg {
@@ -736,7 +930,11 @@ struct wmi_vring_cfg {
u8 cid;
/* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
u8 tid;
- u8 reserved[2];
+ /* Update the vring's priority for Qos purpose. Set to
+ * WMI_QOS_DEFAULT_PRIORITY to use MID's QoS priority
+ */
+ u8 qos_priority;
+ u8 reserved;
} __packed;
enum wmi_vring_cfg_cmd_action {
@@ -767,6 +965,78 @@ struct wmi_bcast_vring_cfg_cmd {
struct wmi_bcast_vring_cfg vring_cfg;
} __packed;
+struct wmi_edma_ring_cfg {
+ __le64 ring_mem_base;
+ /* size in number of items */
+ __le16 ring_size;
+ u8 ring_id;
+ u8 reserved;
+} __packed;
+
+enum wmi_rx_msg_type {
+ WMI_RX_MSG_TYPE_COMPRESSED = 0x00,
+ WMI_RX_MSG_TYPE_EXTENDED = 0x01,
+};
+
+struct wmi_tx_status_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ u8 irq_index;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_rx_status_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ u8 irq_index;
+ /* wmi_rx_msg_type */
+ u8 rx_msg_type;
+ u8 reserved[2];
+} __packed;
+
+struct wmi_cfg_def_rx_offload_cmd {
+ __le16 max_msdu_size;
+ __le16 max_rx_pl_per_desc;
+ u8 decap_trans_type;
+ u8 l2_802_3_offload_ctrl;
+ u8 l2_nwifi_offload_ctrl;
+ u8 vlan_id;
+ u8 nwifi_ds_trans_type;
+ u8 l3_l4_ctrl;
+ u8 reserved[6];
+} __packed;
+
+struct wmi_tx_desc_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ __le16 max_msdu_size;
+ /* Correlated status ring (0-63) */
+ u8 status_ring_id;
+ u8 cid;
+ u8 tid;
+ u8 encap_trans_type;
+ u8 mac_ctrl;
+ u8 to_resolution;
+ u8 agg_max_wsize;
+ u8 reserved[3];
+ struct wmi_vring_cfg_schd schd_params;
+} __packed;
+
+struct wmi_rx_desc_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ u8 irq_index;
+ /* 0-63 status rings */
+ u8 status_ring_id;
+ u8 reserved[2];
+ __le64 sw_tail_host_addr;
+} __packed;
+
+struct wmi_bcast_desc_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ __le16 max_msdu_size;
+ /* Correlated status ring (0-63) */
+ u8 status_ring_id;
+ u8 encap_trans_type;
+ u8 reserved[4];
+} __packed;
+
/* WMI_LO_POWER_CALIB_FROM_OTP_CMDID */
struct wmi_lo_power_calib_from_otp_cmd {
/* index to read from OTP. zero based */
@@ -781,18 +1051,18 @@ struct wmi_lo_power_calib_from_otp_event {
u8 reserved[3];
} __packed;
-/* WMI_VRING_BA_EN_CMDID */
-struct wmi_vring_ba_en_cmd {
- u8 ringid;
+/* WMI_RING_BA_EN_CMDID */
+struct wmi_ring_ba_en_cmd {
+ u8 ring_id;
u8 agg_max_wsize;
__le16 ba_timeout;
u8 amsdu;
u8 reserved[3];
} __packed;
-/* WMI_VRING_BA_DIS_CMDID */
-struct wmi_vring_ba_dis_cmd {
- u8 ringid;
+/* WMI_RING_BA_DIS_CMDID */
+struct wmi_ring_ba_dis_cmd {
+ u8 ring_id;
u8 reserved;
__le16 reason;
} __packed;
@@ -950,6 +1220,21 @@ struct wmi_rcp_addba_resp_cmd {
u8 reserved[2];
} __packed;
+/* WMI_RCP_ADDBA_RESP_EDMA_CMDID */
+struct wmi_rcp_addba_resp_edma_cmd {
+ u8 cid;
+ u8 tid;
+ u8 dialog_token;
+ u8 reserved;
+ __le16 status_code;
+ /* ieee80211_ba_parameterset field to send */
+ __le16 ba_param_set;
+ __le16 ba_timeout;
+ u8 status_ring_id;
+ /* wmi_cfg_rx_chain_cmd_reorder_type */
+ u8 reorder_type;
+} __packed;
+
/* WMI_RCP_DELBA_CMDID */
struct wmi_rcp_delba_cmd {
/* Used for cid less than 8. For higher cid set
@@ -999,8 +1284,8 @@ struct wmi_echo_cmd {
} __packed;
/* WMI_DEEP_ECHO_CMDID
- * Check FW and ucode are alive
- * Returned event: WMI_ECHO_RSP_EVENTID
+ * Check FW and uCode is alive
+ * Returned event: WMI_DEEP_ECHO_RSP_EVENTID
*/
struct wmi_deep_echo_cmd {
__le32 value;
@@ -1324,6 +1609,10 @@ struct wmi_fixed_scheduling_config_complete_event {
u8 reserved[3];
} __packed;
+/* This value exists for backwards compatibility only.
+ * Do not use it in new commands.
+ * Use dynamic arrays where possible.
+ */
#define WMI_NUM_MCS (13)
/* WMI_FIXED_SCHEDULING_CONFIG_CMDID */
@@ -1371,6 +1660,52 @@ struct wmi_set_multi_directed_omnis_config_event {
u8 reserved[3];
} __packed;
+/* WMI_RADAR_GENERAL_CONFIG_EVENTID */
+struct wmi_radar_general_config_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_CONFIG_SELECT_EVENTID */
+struct wmi_radar_config_select_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+ /* In unit of bytes */
+ __le32 fifo_size;
+ /* In unit of bytes */
+ __le32 pulse_size;
+} __packed;
+
+/* WMI_RADAR_PARAMS_CONFIG_EVENTID */
+struct wmi_radar_params_config_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_SET_MODE_EVENTID */
+struct wmi_radar_set_mode_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_CONTROL_EVENTID */
+struct wmi_radar_control_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_PCI_CONTROL_EVENTID */
+struct wmi_radar_pci_control_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
/* WMI_SET_LONG_RANGE_CONFIG_CMDID */
struct wmi_set_long_range_config_cmd {
__le32 reserved;
@@ -1383,12 +1718,12 @@ struct wmi_set_long_range_config_complete_event {
u8 reserved[3];
} __packed;
-/* payload max size is 236 bytes: max event buffer size (256) - WMI headers
+/* payload max size is 1024 bytes: max event buffer size (1044) - WMI headers
* (16) - prev struct field size (4)
*/
-#define WMI_MAX_IOCTL_PAYLOAD_SIZE (236)
-#define WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE (236)
-#define WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE (236)
+#define WMI_MAX_IOCTL_PAYLOAD_SIZE (1024)
+#define WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE (1024)
+#define WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE (1024)
enum wmi_internal_fw_ioctl_code {
WMI_INTERNAL_FW_CODE_NONE = 0x0,
@@ -1428,7 +1763,37 @@ struct wmi_internal_fw_event_event {
__le32 payload[0];
} __packed;
-/* WMI_BF_CONTROL_CMDID */
+/* WMI_SET_VRING_PRIORITY_WEIGHT_CMDID */
+struct wmi_set_vring_priority_weight_cmd {
+ /* Array of weights. Valid values are
+ * WMI_QOS_MIN_DEFAULT_WEIGHT...WMI_QOS_MAX_WEIGHT. Weight #0 is
+ * hard-coded WMI_QOS_MIN_WEIGHT. This array provide the weights
+ * #1..#3
+ */
+ u8 weight[3];
+ u8 reserved;
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_CMDID */
+struct wmi_vring_priority {
+ u8 vring_idx;
+ /* Weight index. Valid value is 0-3 */
+ u8 priority;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_CMDID */
+struct wmi_set_vring_priority_cmd {
+ /* number of entries in vring_priority. Set to
+ * WMI_QOS_SET_VIF_PRIORITY to update the VIF's priority, and there
+ * will be only one entry in vring_priority
+ */
+ u8 num_of_vrings;
+ u8 reserved[3];
+ struct wmi_vring_priority vring_priority[0];
+} __packed;
+
+/* WMI_BF_CONTROL_CMDID - deprecated */
struct wmi_bf_control_cmd {
/* wmi_bf_triggers */
__le32 triggers;
@@ -1470,6 +1835,95 @@ struct wmi_bf_control_cmd {
u8 reserved2[2];
} __packed;
+/* BF configuration for each MCS */
+struct wmi_bf_control_ex_mcs {
+ /* Long term throughput threshold [Mbps] */
+ u8 long_term_mbps_th_tbl;
+ u8 reserved;
+ /* Long term timeout threshold table [msec] */
+ __le16 long_term_trig_timeout_per_mcs;
+} __packed;
+
+/* WMI_BF_CONTROL_EX_CMDID */
+struct wmi_bf_control_ex_cmd {
+ /* wmi_bf_triggers */
+ __le32 triggers;
+ /* enum wmi_edmg_tx_mode */
+ u8 tx_mode;
+ /* DISABLED = 0, ENABLED = 1 , DRY_RUN = 2 */
+ u8 txss_mode;
+ /* DISABLED = 0, ENABLED = 1, DRY_RUN = 2 */
+ u8 brp_mode;
+ /* Max cts threshold (correspond to
+ * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP)
+ */
+ u8 bf_trigger_max_cts_failure_thr;
+ /* Max cts threshold in dense (correspond to
+ * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP)
+ */
+ u8 bf_trigger_max_cts_failure_dense_thr;
+ /* Max b-ack threshold (correspond to
+ * WMI_BF_TRIGGER_MAX_BACK_FAILURE)
+ */
+ u8 bf_trigger_max_back_failure_thr;
+ /* Max b-ack threshold in dense (correspond to
+ * WMI_BF_TRIGGER_MAX_BACK_FAILURE)
+ */
+ u8 bf_trigger_max_back_failure_dense_thr;
+ u8 reserved0;
+ /* Wrong sectors threshold */
+ __le32 wrong_sector_bis_thr;
+ /* BOOL to enable/disable long term trigger */
+ u8 long_term_enable;
+ /* 1 = Update long term thresholds from the long_term_mbps_th_tbl and
+ * long_term_trig_timeout_per_mcs arrays, 0 = Ignore
+ */
+ u8 long_term_update_thr;
+ u8 each_mcs_cfg_size;
+ u8 reserved1;
+ /* Configuration for each MCS */
+ struct wmi_bf_control_ex_mcs each_mcs_cfg[0];
+} __packed;
+
+/* WMI_LINK_STATS_CMD */
+enum wmi_link_stats_action {
+ WMI_LINK_STATS_SNAPSHOT = 0x00,
+ WMI_LINK_STATS_PERIODIC = 0x01,
+ WMI_LINK_STATS_STOP_PERIODIC = 0x02,
+};
+
+/* WMI_LINK_STATS_EVENT record identifiers */
+enum wmi_link_stats_record_type {
+ WMI_LINK_STATS_TYPE_BASIC = 0x01,
+ WMI_LINK_STATS_TYPE_GLOBAL = 0x02,
+};
+
+/* WMI_LINK_STATS_CMDID */
+struct wmi_link_stats_cmd {
+ /* bitmask of required record types
+ * (wmi_link_stats_record_type_e)
+ */
+ __le32 record_type_mask;
+ /* 0xff for all cids */
+ u8 cid;
+ /* wmi_link_stats_action_e */
+ u8 action;
+ u8 reserved[6];
+ /* range = 100 - 10000 */
+ __le32 interval_msec;
+} __packed;
+
+/* WMI_SET_GRANT_MCS_CMDID */
+struct wmi_set_grant_mcs_cmd {
+ u8 mcs;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_AP_SLOT_SIZE_CMDID */
+struct wmi_set_ap_slot_size_cmd {
+ __le32 slot_size;
+} __packed;
+
/* WMI Events
* List of Events (target to host)
*/
@@ -1482,10 +1936,19 @@ enum wmi_event_id {
WMI_SCHED_SCAN_RESULT_EVENTID = 0x1007,
WMI_SCAN_COMPLETE_EVENTID = 0x100A,
WMI_REPORT_STATISTICS_EVENTID = 0x100B,
+ WMI_FT_AUTH_STATUS_EVENTID = 0x100C,
+ WMI_FT_REASSOC_STATUS_EVENTID = 0x100D,
+ WMI_RADAR_GENERAL_CONFIG_EVENTID = 0x1100,
+ WMI_RADAR_CONFIG_SELECT_EVENTID = 0x1101,
+ WMI_RADAR_PARAMS_CONFIG_EVENTID = 0x1102,
+ WMI_RADAR_SET_MODE_EVENTID = 0x1103,
+ WMI_RADAR_CONTROL_EVENTID = 0x1104,
+ WMI_RADAR_PCI_CONTROL_EVENTID = 0x1105,
WMI_RD_MEM_RSP_EVENTID = 0x1800,
WMI_FW_READY_EVENTID = 0x1801,
WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
WMI_ECHO_RSP_EVENTID = 0x1803,
+ WMI_DEEP_ECHO_RSP_EVENTID = 0x1804,
/* deprecated */
WMI_FS_TUNE_DONE_EVENTID = 0x180A,
/* deprecated */
@@ -1511,6 +1974,9 @@ enum wmi_event_id {
WMI_DELBA_EVENTID = 0x1826,
WMI_GET_SSID_EVENTID = 0x1828,
WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
+ /* Event is shared between WMI_SW_TX_REQ_CMDID and
+ * WMI_SW_TX_REQ_EXT_CMDID
+ */
WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
@@ -1535,7 +2001,7 @@ enum wmi_event_id {
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
- WMI_VRING_EN_EVENTID = 0x1865,
+ WMI_RING_EN_EVENTID = 0x1865,
WMI_GET_RF_STATUS_EVENTID = 0x1866,
WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867,
WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID = 0x1868,
@@ -1558,7 +2024,12 @@ enum wmi_event_id {
WMI_PCP_FACTOR_EVENTID = 0x191A,
/* Power Save Configuration Events */
WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C,
+ WMI_RS_ENABLE_EVENTID = 0x191E,
+ WMI_RS_CFG_EX_EVENTID = 0x191F,
+ WMI_GET_DETAILED_RS_RES_EX_EVENTID = 0x1920,
+ /* deprecated */
WMI_RS_CFG_DONE_EVENTID = 0x1921,
+ /* deprecated */
WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
WMI_AOA_MEAS_EVENTID = 0x1923,
WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924,
@@ -1586,7 +2057,14 @@ enum wmi_event_id {
WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5,
WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6,
WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7,
+ /* deprecated */
WMI_BF_CONTROL_EVENTID = 0x19AA,
+ WMI_BF_CONTROL_EX_EVENTID = 0x19AB,
+ WMI_TX_STATUS_RING_CFG_DONE_EVENTID = 0x19C0,
+ WMI_RX_STATUS_RING_CFG_DONE_EVENTID = 0x19C1,
+ WMI_TX_DESC_RING_CFG_DONE_EVENTID = 0x19C2,
+ WMI_RX_DESC_RING_CFG_DONE_EVENTID = 0x19C3,
+ WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID = 0x19C5,
WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01,
WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02,
WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03,
@@ -1597,6 +2075,12 @@ enum wmi_event_id {
WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_EVENTID = 0x1A08,
WMI_INTERNAL_FW_EVENT_EVENTID = 0x1A0A,
WMI_INTERNAL_FW_IOCTL_EVENTID = 0x1A0B,
+ WMI_LINK_STATS_CONFIG_DONE_EVENTID = 0x1A0C,
+ WMI_LINK_STATS_EVENTID = 0x1A0D,
+ WMI_SET_GRANT_MCS_EVENTID = 0x1A0E,
+ WMI_SET_AP_SLOT_SIZE_EVENTID = 0x1A0F,
+ WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID = 0x1A10,
+ WMI_SET_VRING_PRIORITY_EVENTID = 0x1A11,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
@@ -1861,6 +2345,33 @@ struct wmi_scan_complete_event {
__le32 status;
} __packed;
+/* WMI_FT_AUTH_STATUS_EVENTID */
+struct wmi_ft_auth_status_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+ u8 mac_addr[WMI_MAC_LEN];
+ __le16 ie_len;
+ u8 ie_info[0];
+} __packed;
+
+/* WMI_FT_REASSOC_STATUS_EVENTID */
+struct wmi_ft_reassoc_status_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ /* association id received from new AP */
+ u8 aid;
+ /* enum wmi_channel */
+ u8 channel;
+ /* enum wmi_channel */
+ u8 edmg_channel;
+ u8 mac_addr[WMI_MAC_LEN];
+ __le16 beacon_ie_len;
+ __le16 reassoc_req_ie_len;
+ __le16 reassoc_resp_ie_len;
+ u8 ie_info[0];
+} __packed;
+
/* wmi_rx_mgmt_info */
struct wmi_rx_mgmt_info {
u8 mcs;
@@ -1997,6 +2508,49 @@ struct wmi_rcp_addba_resp_sent_event {
u8 reserved2[2];
} __packed;
+/* WMI_TX_STATUS_RING_CFG_DONE_EVENTID */
+struct wmi_tx_status_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_RX_STATUS_RING_CFG_DONE_EVENTID */
+struct wmi_rx_status_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID */
+struct wmi_cfg_def_rx_offload_done_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TX_DESC_RING_CFG_DONE_EVENTID */
+struct wmi_tx_desc_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_RX_DESC_RING_CFG_DONE_EVENTID */
+struct wmi_rx_desc_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
/* WMI_RCP_ADDBA_REQ_EVENTID */
struct wmi_rcp_addba_req_event {
/* Used for cid less than 8. For higher cid set
@@ -2047,9 +2601,9 @@ struct wmi_data_port_open_event {
u8 reserved[3];
} __packed;
-/* WMI_VRING_EN_EVENTID */
-struct wmi_vring_en_event {
- u8 vring_index;
+/* WMI_RING_EN_EVENTID */
+struct wmi_ring_en_event {
+ u8 ring_index;
u8 reserved[3];
} __packed;
@@ -2174,6 +2728,11 @@ struct wmi_echo_rsp_event {
__le32 echoed_value;
} __packed;
+/* WMI_DEEP_ECHO_RSP_EVENTID */
+struct wmi_deep_echo_rsp_event {
+ __le32 echoed_value;
+} __packed;
+
/* WMI_RF_PWR_ON_DELAY_RSP_EVENTID */
struct wmi_rf_pwr_on_delay_rsp_event {
/* wmi_fw_status */
@@ -2312,6 +2871,81 @@ struct wmi_rs_cfg {
__le32 mcs_en_vec;
} __packed;
+enum wmi_edmg_tx_mode {
+ WMI_TX_MODE_DMG = 0x0,
+ WMI_TX_MODE_EDMG_CB1 = 0x1,
+ WMI_TX_MODE_EDMG_CB2 = 0x2,
+ WMI_TX_MODE_EDMG_CB1_LONG_LDPC = 0x3,
+ WMI_TX_MODE_EDMG_CB2_LONG_LDPC = 0x4,
+ WMI_TX_MODE_MAX,
+};
+
+/* Rate search parameters common configuration */
+struct wmi_rs_cfg_ex_common {
+ /* enum wmi_edmg_tx_mode */
+ u8 mode;
+ /* stop threshold [0-100] */
+ u8 stop_th;
+ /* MCS1 stop threshold [0-100] */
+ u8 mcs1_fail_th;
+ u8 max_back_failure_th;
+ /* Debug feature for disabling internal RS trigger (which is
+ * currently triggered by BF Done)
+ */
+ u8 dbg_disable_internal_trigger;
+ u8 reserved[3];
+ __le32 back_failure_mask;
+} __packed;
+
+/* Rate search parameters configuration per MCS */
+struct wmi_rs_cfg_ex_mcs {
+ /* The maximal allowed PER for each MCS
+ * MCS will be considered as failed if PER during RS is higher
+ */
+ u8 per_threshold;
+ /* Number of MPDUs for each MCS
+ * this is the minimal statistic required to make an educated
+ * decision
+ */
+ u8 min_frame_cnt;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_RS_CFG_EX_CMDID */
+struct wmi_rs_cfg_ex_cmd {
+ /* Configuration for all MCSs */
+ struct wmi_rs_cfg_ex_common common_cfg;
+ u8 each_mcs_cfg_size;
+ u8 reserved[3];
+ /* Configuration for each MCS */
+ struct wmi_rs_cfg_ex_mcs each_mcs_cfg[0];
+} __packed;
+
+/* WMI_RS_CFG_EX_EVENTID */
+struct wmi_rs_cfg_ex_event {
+ /* enum wmi_edmg_tx_mode */
+ u8 mode;
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_RS_ENABLE_CMDID */
+struct wmi_rs_enable_cmd {
+ u8 cid;
+ /* enable or disable rate search */
+ u8 rs_enable;
+ u8 reserved[2];
+ __le32 mcs_en_vec;
+} __packed;
+
+/* WMI_RS_ENABLE_EVENTID */
+struct wmi_rs_enable_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
/* Slot types */
enum wmi_sched_scheme_slot_type {
WMI_SCHED_SLOT_SP = 0x0,
@@ -2404,7 +3038,7 @@ struct wmi_scheduling_scheme_event {
u8 reserved[1];
} __packed;
-/* WMI_RS_CFG_CMDID */
+/* WMI_RS_CFG_CMDID - deprecated */
struct wmi_rs_cfg_cmd {
/* connection id */
u8 cid;
@@ -2414,7 +3048,7 @@ struct wmi_rs_cfg_cmd {
struct wmi_rs_cfg rs_cfg;
} __packed;
-/* WMI_RS_CFG_DONE_EVENTID */
+/* WMI_RS_CFG_DONE_EVENTID - deprecated */
struct wmi_rs_cfg_done_event {
u8 cid;
/* enum wmi_fw_status */
@@ -2422,7 +3056,7 @@ struct wmi_rs_cfg_done_event {
u8 reserved[2];
} __packed;
-/* WMI_GET_DETAILED_RS_RES_CMDID */
+/* WMI_GET_DETAILED_RS_RES_CMDID - deprecated */
struct wmi_get_detailed_rs_res_cmd {
/* connection id */
u8 cid;
@@ -2447,7 +3081,7 @@ struct wmi_rs_results {
u8 mcs;
} __packed;
-/* WMI_GET_DETAILED_RS_RES_EVENTID */
+/* WMI_GET_DETAILED_RS_RES_EVENTID - deprecated */
struct wmi_get_detailed_rs_res_event {
u8 cid;
/* enum wmi_rs_results_status */
@@ -2457,6 +3091,45 @@ struct wmi_get_detailed_rs_res_event {
u8 reserved[3];
} __packed;
+/* WMI_GET_DETAILED_RS_RES_EX_CMDID */
+struct wmi_get_detailed_rs_res_ex_cmd {
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/* Rate search results */
+struct wmi_rs_results_ex_common {
+ /* RS timestamp */
+ __le32 tsf;
+ /* RS selected MCS */
+ u8 mcs;
+ /* enum wmi_edmg_tx_mode */
+ u8 mode;
+ u8 reserved[2];
+} __packed;
+
+/* Rate search results */
+struct wmi_rs_results_ex_mcs {
+ /* number of sent MPDUs */
+ u8 num_of_tx_pkt;
+ /* number of non-acked MPDUs */
+ u8 num_of_non_acked_pkt;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_EX_EVENTID */
+struct wmi_get_detailed_rs_res_ex_event {
+ u8 cid;
+ /* enum wmi_rs_results_status */
+ u8 status;
+ u8 reserved0[2];
+ struct wmi_rs_results_ex_common common_rs_results;
+ u8 each_mcs_results_size;
+ u8 reserved1[3];
+ /* Results for each MCS */
+ struct wmi_rs_results_ex_mcs each_mcs_results[0];
+} __packed;
+
/* BRP antenna limit mode */
enum wmi_brp_ant_limit_mode {
/* Disable BRP force antenna limit */
@@ -3207,13 +3880,20 @@ struct wmi_get_assoc_list_res_event {
u8 reserved[3];
} __packed;
-/* WMI_BF_CONTROL_EVENTID */
+/* WMI_BF_CONTROL_EVENTID - deprecated */
struct wmi_bf_control_event {
/* wmi_fw_status */
u8 status;
u8 reserved[3];
} __packed;
+/* WMI_BF_CONTROL_EX_EVENTID */
+struct wmi_bf_control_ex_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
/* WMI_COMMAND_NOT_SUPPORTED_EVENTID */
struct wmi_command_not_supported_event {
/* device id */
@@ -3283,4 +3963,96 @@ struct wmi_internal_fw_set_channel_event {
u8 reserved[3];
} __packed;
+/* WMI_LINK_STATS_CONFIG_DONE_EVENTID */
+struct wmi_link_stats_config_done_event {
+ /* wmi_fw_status_e */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_LINK_STATS_EVENTID */
+struct wmi_link_stats_event {
+ __le64 tsf;
+ __le16 payload_size;
+ u8 has_next;
+ u8 reserved[5];
+ /* a stream of wmi_link_stats_record_s */
+ u8 payload[0];
+} __packed;
+
+/* WMI_LINK_STATS_EVENT */
+struct wmi_link_stats_record {
+ /* wmi_link_stats_record_type_e */
+ u8 record_type_id;
+ u8 reserved;
+ __le16 record_size;
+ u8 record[0];
+} __packed;
+
+/* WMI_LINK_STATS_TYPE_BASIC */
+struct wmi_link_stats_basic {
+ u8 cid;
+ s8 rssi;
+ u8 sqi;
+ u8 bf_mcs;
+ u8 per_average;
+ u8 selected_rfc;
+ u8 rx_effective_ant_num;
+ u8 my_rx_sector;
+ u8 my_tx_sector;
+ u8 other_rx_sector;
+ u8 other_tx_sector;
+ u8 reserved[7];
+ /* 1/4 Db units */
+ __le16 snr;
+ __le32 tx_tpt;
+ __le32 tx_goodput;
+ __le32 rx_goodput;
+ __le32 bf_count;
+ __le32 rx_bcast_frames;
+} __packed;
+
+/* WMI_LINK_STATS_TYPE_GLOBAL */
+struct wmi_link_stats_global {
+ /* all ack-able frames */
+ __le32 rx_frames;
+ /* all ack-able frames */
+ __le32 tx_frames;
+ __le32 rx_ba_frames;
+ __le32 tx_ba_frames;
+ __le32 tx_beacons;
+ __le32 rx_mic_errors;
+ __le32 rx_crc_errors;
+ __le32 tx_fail_no_ack;
+ u8 reserved[8];
+} __packed;
+
+/* WMI_SET_GRANT_MCS_EVENTID */
+struct wmi_set_grant_mcs_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_AP_SLOT_SIZE_EVENTID */
+struct wmi_set_ap_slot_size_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID */
+struct wmi_set_vring_priority_weight_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_EVENTID */
+struct wmi_set_vring_priority_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index b01dc34d55af..74538085cfb7 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1399,6 +1399,7 @@ static int atmel_validate_channel(struct atmel_private *priv, int channel)
return 0;
}
+#ifdef CONFIG_PROC_FS
static int atmel_proc_show(struct seq_file *m, void *v)
{
struct atmel_private *priv = m->private;
@@ -1481,6 +1482,7 @@ static int atmel_proc_show(struct seq_file *m, void *v)
seq_printf(m, "Current state:\t\t%s\n", s);
return 0;
}
+#endif
static const struct net_device_ops atmel_netdev_ops = {
.ndo_open = atmel_open,
@@ -1516,10 +1518,9 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
priv->present_callback = card_present;
priv->card = card;
priv->firmware = NULL;
- priv->firmware_id[0] = '\0';
priv->firmware_type = fw_type;
if (firmware) /* module parameter */
- strcpy(priv->firmware_id, firmware);
+ strlcpy(priv->firmware_id, firmware, sizeof(priv->firmware_id));
priv->bus_type = card_present ? BUS_TYPE_PCCARD : BUS_TYPE_PCI;
priv->station_state = STATION_STATE_DOWN;
priv->do_rx_crc = 0;
@@ -2646,14 +2647,9 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
}
- if (!(new_firmware = kmalloc(com.len, GFP_KERNEL))) {
- rc = -ENOMEM;
- break;
- }
-
- if (copy_from_user(new_firmware, com.data, com.len)) {
- kfree(new_firmware);
- rc = -EFAULT;
+ new_firmware = memdup_user(com.data, com.len);
+ if (IS_ERR(new_firmware)) {
+ rc = PTR_ERR(new_firmware);
break;
}
@@ -3681,7 +3677,7 @@ static int probe_atmel_card(struct net_device *dev)
atmel_write16(dev, GCR, 0x0060);
atmel_write16(dev, GCR, 0x0040);
- mdelay(500);
+ msleep(500);
if (atmel_read16(dev, MR2) == 0) {
/* No stored firmware so load a small stub which just
diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
index cb987c2ecc6b..87131f663292 100644
--- a/drivers/net/wireless/broadcom/b43/leds.c
+++ b/drivers/net/wireless/broadcom/b43/leds.c
@@ -131,7 +131,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
led->wl = dev->wl;
led->index = led_index;
led->activelow = activelow;
- strncpy(led->name, name, sizeof(led->name));
+ strlcpy(led->name, name, sizeof(led->name));
atomic_set(&led->state, 0);
led->led_dev.name = led->name;
diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c
index fd4565389c77..bc922118b6ac 100644
--- a/drivers/net/wireless/broadcom/b43legacy/leds.c
+++ b/drivers/net/wireless/broadcom/b43legacy/leds.c
@@ -101,7 +101,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev,
led->dev = dev;
led->index = led_index;
led->activelow = activelow;
- strncpy(led->name, name, sizeof(led->name));
+ strlcpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig
index 9d99eb42d917..6acba67bca07 100644
--- a/drivers/net/wireless/broadcom/brcm80211/Kconfig
+++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig
@@ -60,7 +60,6 @@ config BRCMFMAC_PCIE
bool "PCIE bus interface support for FullMAC driver"
depends on BRCMFMAC
depends on PCI
- depends on HAS_DMA
select BRCMFMAC_PROTO_MSGBUF
select FW_LOADER
---help---
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b6122aad639e..5444e6213d45 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2434,7 +2434,7 @@ static void brcmf_convert_sta_flags(u32 fw_sta_flags, struct station_info *si)
struct nl80211_sta_flag_update *sfu;
brcmf_dbg(TRACE, "flags %08x\n", fw_sta_flags);
- si->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+ si->filled |= BIT_ULL(NL80211_STA_INFO_STA_FLAGS);
sfu = &si->sta_flags;
sfu->mask = BIT(NL80211_STA_FLAG_WME) |
BIT(NL80211_STA_FLAG_AUTHENTICATED) |
@@ -2470,7 +2470,7 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
brcmf_err("Failed to get bss info (%d)\n", err);
goto out_kfree;
}
- si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+ si->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM);
si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period);
si->bss_param.dtim_period = buf->bss_le.dtim_period;
capability = le16_to_cpu(buf->bss_le.capability);
@@ -2501,7 +2501,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
brcmf_err("BRCMF_C_GET_RATE error (%d)\n", err);
return err;
}
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
sinfo->txrate.legacy = rate * 5;
memset(&scbval, 0, sizeof(scbval));
@@ -2512,7 +2512,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
return err;
}
rssi = le32_to_cpu(scbval.val);
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = rssi;
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_GET_PKTCNTS, &pktcnt,
@@ -2521,10 +2521,10 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
brcmf_err("BRCMF_C_GET_GET_PKTCNTS error (%d)\n", err);
return err;
}
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS) |
- BIT(NL80211_STA_INFO_RX_DROP_MISC) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->rx_packets = le32_to_cpu(pktcnt.rx_good_pkt);
sinfo->rx_dropped_misc = le32_to_cpu(pktcnt.rx_bad_pkt);
sinfo->tx_packets = le32_to_cpu(pktcnt.tx_good_pkt);
@@ -2571,7 +2571,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
}
}
brcmf_dbg(TRACE, "version %d\n", le16_to_cpu(sta_info_le.ver));
- sinfo->filled = BIT(NL80211_STA_INFO_INACTIVE_TIME);
+ sinfo->filled = BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME);
sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
sta_flags = le32_to_cpu(sta_info_le.flags);
brcmf_convert_sta_flags(sta_flags, sinfo);
@@ -2581,33 +2581,33 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
else
sinfo->sta_flags.set &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
if (sta_flags & BRCMF_STA_ASSOC) {
- sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME);
sinfo->connected_time = le32_to_cpu(sta_info_le.in);
brcmf_fill_bss_param(ifp, sinfo);
}
if (sta_flags & BRCMF_STA_SCBSTATS) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->tx_failed = le32_to_cpu(sta_info_le.tx_failures);
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
sinfo->tx_packets = le32_to_cpu(sta_info_le.tx_pkts);
sinfo->tx_packets += le32_to_cpu(sta_info_le.tx_mcast_pkts);
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
sinfo->rx_packets = le32_to_cpu(sta_info_le.rx_ucast_pkts);
sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts);
if (sinfo->tx_packets) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
sinfo->txrate.legacy =
le32_to_cpu(sta_info_le.tx_rate) / 100;
}
if (sinfo->rx_packets) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
sinfo->rxrate.legacy =
le32_to_cpu(sta_info_le.rx_rate) / 100;
}
if (le16_to_cpu(sta_info_le.ver) >= 4) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES);
sinfo->tx_bytes = le64_to_cpu(sta_info_le.tx_tot_bytes);
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
}
total_rssi = 0;
@@ -2623,10 +2623,10 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
}
}
if (count_rssi) {
- sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
sinfo->chains = count_rssi;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
total_rssi /= count_rssi;
sinfo->signal = total_rssi;
} else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
@@ -2639,7 +2639,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
goto done;
} else {
rssi = le32_to_cpu(scb_val.val);
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = rssi;
brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
}
@@ -6926,15 +6926,15 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
cfg->d11inf.io_type = (u8)io_type;
brcmu_d11_attach(&cfg->d11inf);
- err = brcmf_setup_wiphy(wiphy, ifp);
- if (err < 0)
- goto priv_out;
-
/* regulatory notifer below needs access to cfg so
* assign it now.
*/
drvr->config = cfg;
+ err = brcmf_setup_wiphy(wiphy, ifp);
+ if (err < 0)
+ goto priv_out;
+
brcmf_dbg(INFO, "Registering custom regulatory\n");
wiphy->reg_notifier = brcmf_cfg80211_reg_notifier;
wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 72954fd6df3b..b1f702faff4f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -21,6 +21,7 @@
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
#include <net/addrconf.h>
+#include <net/ieee80211_radiotap.h>
#include <net/ipv6.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
@@ -404,6 +405,30 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
netif_rx_ni(skb);
}
+void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FMT_RADIOTAP)) {
+ /* Do nothing */
+ } else {
+ struct ieee80211_radiotap_header *radiotap;
+
+ /* TODO: use RX status to fill some radiotap data */
+ radiotap = skb_push(skb, sizeof(*radiotap));
+ memset(radiotap, 0, sizeof(*radiotap));
+ radiotap->it_len = cpu_to_le16(sizeof(*radiotap));
+
+ /* TODO: 4 bytes with receive status? */
+ skb->len -= 4;
+ }
+
+ skb->dev = ifp->ndev;
+ skb_reset_mac_header(skb);
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_802_2);
+
+ brcmf_netif_rx(ifp, skb);
+}
+
static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
struct brcmf_if **ifp)
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 401f50458686..dcf6e27cc16f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -121,6 +121,7 @@ struct brcmf_pub {
struct brcmf_if *iflist[BRCMF_MAX_IFS];
s32 if2bss[BRCMF_MAX_IFS];
+ struct brcmf_if *mon_if;
struct mutex proto_block;
unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
@@ -216,6 +217,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
+void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
int __init brcmf_core_init(void);
void __exit brcmf_core_exit(void);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 800a423c7bc2..8347da632a5b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -48,6 +48,8 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_MBSS, "mbss" },
{ BRCMF_FEAT_MCHAN, "mchan" },
{ BRCMF_FEAT_P2P, "p2p" },
+ { BRCMF_FEAT_MONITOR, "monitor" },
+ { BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
};
#ifdef DEBUG
@@ -91,6 +93,42 @@ static int brcmf_feat_debugfs_read(struct seq_file *seq, void *data)
}
#endif /* DEBUG */
+struct brcmf_feat_fwfeat {
+ const char * const fwid;
+ u32 feat_flags;
+};
+
+static const struct brcmf_feat_fwfeat brcmf_feat_fwfeat_map[] = {
+ /* brcmfmac43602-pcie.ap.bin from linux-firmware.git commit ea1178515b88 */
+ { "01-6cb8e269", BIT(BRCMF_FEAT_MONITOR) },
+ /* brcmfmac4366b-pcie.bin from linux-firmware.git commit 52442afee990 */
+ { "01-c47a91a4", BIT(BRCMF_FEAT_MONITOR) },
+};
+
+static void brcmf_feat_firmware_overrides(struct brcmf_pub *drv)
+{
+ const struct brcmf_feat_fwfeat *e;
+ u32 feat_flags = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(brcmf_feat_fwfeat_map); i++) {
+ e = &brcmf_feat_fwfeat_map[i];
+ if (!strcmp(e->fwid, drv->fwver)) {
+ feat_flags = e->feat_flags;
+ break;
+ }
+ }
+
+ if (!feat_flags)
+ return;
+
+ for (i = 0; i < BRCMF_FEAT_LAST; i++)
+ if (feat_flags & BIT(i))
+ brcmf_dbg(INFO, "enabling firmware feature: %s\n",
+ brcmf_feat_names[i]);
+ drv->feat_flags |= feat_flags;
+}
+
/**
* brcmf_feat_iovar_int_get() - determine feature through iovar query.
*
@@ -251,6 +289,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
}
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
+ brcmf_feat_firmware_overrides(drvr);
+
/* set chip related quirks */
switch (drvr->bus_if->chip) {
case BRCM_CC_43236_CHIP_ID:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index d1193825e559..0b4974df353a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -33,6 +33,8 @@
* MFP: 802.11w Management Frame Protection.
* GSCAN: enhanced scan offload feature.
* FWSUP: Firmware supplicant.
+ * MONITOR: firmware can pass monitor packets to host.
+ * MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -48,7 +50,9 @@
BRCMF_FEAT_DEF(WOWL_ARP_ND) \
BRCMF_FEAT_DEF(MFP) \
BRCMF_FEAT_DEF(GSCAN) \
- BRCMF_FEAT_DEF(FWSUP)
+ BRCMF_FEAT_DEF(FWSUP) \
+ BRCMF_FEAT_DEF(MONITOR) \
+ BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 4b290705e3e6..d5bb81e88762 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -32,11 +32,30 @@
#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002
-#define BRCMF_STA_WME 0x00000002 /* WMM association */
-#define BRCMF_STA_AUTHE 0x00000008 /* Authenticated */
-#define BRCMF_STA_ASSOC 0x00000010 /* Associated */
-#define BRCMF_STA_AUTHO 0x00000020 /* Authorized */
-#define BRCMF_STA_SCBSTATS 0x00004000 /* Per STA debug stats */
+#define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */
+#define BRCMF_STA_WME 0x00000002 /* WMM association */
+#define BRCMF_STA_NONERP 0x00000004 /* No ERP */
+#define BRCMF_STA_AUTHE 0x00000008 /* Authenticated */
+#define BRCMF_STA_ASSOC 0x00000010 /* Associated */
+#define BRCMF_STA_AUTHO 0x00000020 /* Authorized */
+#define BRCMF_STA_WDS 0x00000040 /* Wireless Distribution System */
+#define BRCMF_STA_WDS_LINKUP 0x00000080 /* WDS traffic/probes flowing properly */
+#define BRCMF_STA_PS 0x00000100 /* STA is in power save mode from AP's viewpoint */
+#define BRCMF_STA_APSD_BE 0x00000200 /* APSD delv/trigger for AC_BE is default enabled */
+#define BRCMF_STA_APSD_BK 0x00000400 /* APSD delv/trigger for AC_BK is default enabled */
+#define BRCMF_STA_APSD_VI 0x00000800 /* APSD delv/trigger for AC_VI is default enabled */
+#define BRCMF_STA_APSD_VO 0x00001000 /* APSD delv/trigger for AC_VO is default enabled */
+#define BRCMF_STA_N_CAP 0x00002000 /* STA 802.11n capable */
+#define BRCMF_STA_SCBSTATS 0x00004000 /* Per STA debug stats */
+#define BRCMF_STA_AMPDU_CAP 0x00008000 /* STA AMPDU capable */
+#define BRCMF_STA_AMSDU_CAP 0x00010000 /* STA AMSDU capable */
+#define BRCMF_STA_MIMO_PS 0x00020000 /* mimo ps mode is enabled */
+#define BRCMF_STA_MIMO_RTS 0x00040000 /* send rts in mimo ps mode */
+#define BRCMF_STA_RIFS_CAP 0x00080000 /* rifs enabled */
+#define BRCMF_STA_VHT_CAP 0x00100000 /* STA VHT(11ac) capable */
+#define BRCMF_STA_WPS 0x00200000 /* WPS state */
+#define BRCMF_STA_DWDS_CAP 0x01000000 /* DWDS CAP */
+#define BRCMF_STA_DWDS 0x02000000 /* DWDS active */
/* size of brcmf_scan_params not including variable length array */
#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
@@ -155,6 +174,8 @@
#define BRCMF_MFP_CAPABLE 1
#define BRCMF_MFP_REQUIRED 2
+#define BRCMF_VHT_CAP_MCS_MAP_NSS_MAX 8
+
/* MAX_CHUNK_LEN is the maximum length for data passing to firmware in each
* ioctl. It is relatively small because firmware has small maximum size input
* playload restriction for ioctls.
@@ -531,6 +552,8 @@ struct brcmf_sta_info_le {
/* w/hi bit set if basic */
__le32 in; /* seconds elapsed since associated */
__le32 listen_interval_inms; /* Min Listen interval in ms for STA */
+
+ /* Fields valid for ver >= 3 */
__le32 tx_pkts; /* # of packets transmitted */
__le32 tx_failures; /* # of packets failed */
__le32 rx_ucast_pkts; /* # of unicast packets received */
@@ -539,6 +562,8 @@ struct brcmf_sta_info_le {
__le32 rx_rate; /* Rate of last successful rx frame */
__le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
__le32 rx_decrypt_failures; /* # of packet decrypted failed */
+
+ /* Fields valid for ver >= 4 */
__le32 tx_tot_pkts; /* # of tx pkts (ucast + mcast) */
__le32 rx_tot_pkts; /* # of data packets recvd (uni + mcast) */
__le32 tx_mcast_pkts; /* # of mcast pkts txed */
@@ -575,6 +600,14 @@ struct brcmf_sta_info_le {
*/
__le32 rx_pkts_retried; /* # rx with retry bit set */
__le32 tx_rate_fallback; /* lowest fallback TX rate */
+
+ /* Fields valid for ver >= 5 */
+ struct {
+ __le32 count; /* # rates in this set */
+ u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
+ u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */
+ __le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
+ } rateset_adv;
};
struct brcmf_chanspec_list {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index c40ba8855cd5..4e8397a0cbc8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -69,6 +69,8 @@
#define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8
#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01
+#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02
+#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07
#define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5
#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
@@ -1128,6 +1130,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
struct sk_buff *skb;
u16 data_offset;
u16 buflen;
+ u16 flags;
u32 idx;
struct brcmf_if *ifp;
@@ -1137,6 +1140,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
data_offset = le16_to_cpu(rx_complete->data_offset);
buflen = le16_to_cpu(rx_complete->data_len);
idx = le32_to_cpu(rx_complete->msg.request_id);
+ flags = le16_to_cpu(rx_complete->flags);
skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
msgbuf->rx_pktids, idx);
@@ -1150,6 +1154,20 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
skb_trim(skb, buflen);
+ if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) ==
+ BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) {
+ ifp = msgbuf->drvr->mon_if;
+
+ if (!ifp) {
+ brcmf_err("Received unexpected monitor pkt\n");
+ brcmu_pkt_buf_free_skb(skb);
+ return;
+ }
+
+ brcmf_netif_mon_rx(ifp, skb);
+ return;
+ }
+
ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx);
if (!ifp || !ifp->ndev) {
brcmf_err("Received pkt for invalid ifidx %d\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 45928b5b8d97..4fffa6988087 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1785,7 +1785,8 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
- fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus);
+ /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
+ fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
fwreq->bus_nr = devinfo->pdev->bus->number;
return fwreq;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index c99a191e8d69..a907d7b065fa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4296,6 +4296,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
if (bus) {
+ /* Stop watchdog task */
+ if (bus->watchdog_tsk) {
+ send_sig(SIGTERM, bus->watchdog_tsk, 1);
+ kthread_stop(bus->watchdog_tsk);
+ bus->watchdog_tsk = NULL;
+ }
+
/* De-register interrupt handler */
brcmf_sdiod_intr_unregister(bus->sdiodev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index 3a13d176b221..35e3b101e5cf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -159,7 +159,7 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
{
u16 data;
- if ((addr == RADIO_IDCODE))
+ if (addr == RADIO_IDCODE)
return 0xffff;
switch (pi->pubpi.phy_type) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 1a187557982e..bedec1606caa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -16904,7 +16904,7 @@ static void wlc_phy_workarounds_nphy_rev3(struct brcms_phy *pi)
}
}
-void wlc_phy_workarounds_nphy_rev1(struct brcms_phy *pi)
+static void wlc_phy_workarounds_nphy_rev1(struct brcms_phy *pi)
{
static const u8 rfseq_rx2tx_events[] = {
NPHY_RFSEQ_CMD_NOP,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
index b9672da24a9d..b24bc57ca91b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
@@ -213,7 +213,7 @@ static const s16 log_table[] = {
30498,
31267,
32024,
- 32768
+ 32767
};
#define LOG_TABLE_SIZE 32 /* log_table size */
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 72046e182745..04dd7a936593 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -3419,7 +3419,7 @@ done:
static void airo_handle_tx(struct airo_info *ai, u16 status)
{
- int i, len = 0, index = -1;
+ int i, index = -1;
u16 fid;
if (test_bit(FLAG_MPI, &ai->flags)) {
@@ -3443,11 +3443,9 @@ static void airo_handle_tx(struct airo_info *ai, u16 status)
fid = IN4500(ai, TXCOMPLFID);
- for(i = 0; i < MAX_FIDS; i++) {
- if ((ai->fids[i] & 0xffff) == fid) {
- len = ai->fids[i] >> 16;
+ for (i = 0; i < MAX_FIDS; i++) {
+ if ((ai->fids[i] & 0xffff) == fid)
index = i;
- }
}
if (index != -1) {
diff --git a/drivers/net/wireless/cisco/airo_cs.c b/drivers/net/wireless/cisco/airo_cs.c
index d9ed22b4cc6b..3718f958c0fc 100644
--- a/drivers/net/wireless/cisco/airo_cs.c
+++ b/drivers/net/wireless/cisco/airo_cs.c
@@ -102,11 +102,8 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
static int airo_config(struct pcmcia_device *link)
{
- struct local_info *dev;
int ret;
- dev = link->priv;
-
dev_dbg(&link->dev, "airo_config\n");
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index b8fd3cc90634..910db46db6a1 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -692,7 +692,7 @@ static void printk_buf(int level, const u8 * data, u32 len)
static void schedule_reset(struct ipw2100_priv *priv)
{
- unsigned long now = get_seconds();
+ time64_t now = ktime_get_boottime_seconds();
/* If we haven't received a reset request within the backoff period,
* then we can reset the backoff interval so this reset occurs
@@ -701,10 +701,10 @@ static void schedule_reset(struct ipw2100_priv *priv)
(now - priv->last_reset > priv->reset_backoff))
priv->reset_backoff = 0;
- priv->last_reset = get_seconds();
+ priv->last_reset = now;
if (!(priv->status & STATUS_RESET_PENDING)) {
- IPW_DEBUG_INFO("%s: Scheduling firmware restart (%ds).\n",
+ IPW_DEBUG_INFO("%s: Scheduling firmware restart (%llds).\n",
priv->net_dev->name, priv->reset_backoff);
netif_carrier_off(priv->net_dev);
netif_stop_queue(priv->net_dev);
@@ -2079,7 +2079,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
memcpy(priv->bssid, bssid, ETH_ALEN);
priv->status |= STATUS_ASSOCIATING;
- priv->connect_start = get_seconds();
+ priv->connect_start = ktime_get_boottime_seconds();
schedule_delayed_work(&priv->wx_event_work, HZ / 10);
}
@@ -4070,8 +4070,8 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr,
#define DUMP_VAR(x,y) len += sprintf(buf + len, # x ": %" y "\n", priv-> x)
if (priv->status & STATUS_ASSOCIATED)
- len += sprintf(buf + len, "connected: %lu\n",
- get_seconds() - priv->connect_start);
+ len += sprintf(buf + len, "connected: %llu\n",
+ ktime_get_boottime_seconds() - priv->connect_start);
else
len += sprintf(buf + len, "not connected\n");
@@ -4108,7 +4108,7 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr,
DUMP_VAR(txq_stat.lo, "d");
DUMP_VAR(ieee->scans, "d");
- DUMP_VAR(reset_backoff, "d");
+ DUMP_VAR(reset_backoff, "lld");
return len;
}
@@ -5112,11 +5112,9 @@ static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv)
.host_command_length = ETH_ALEN
};
int err;
- int len;
IPW_DEBUG_HC("DISASSOCIATION_BSSID\n");
- len = ETH_ALEN;
/* The Firmware currently ignores the BSSID and just disassociates from
* the currently associated AP -- but in the off chance that a future
* firmware does use the BSSID provided here, we go ahead and try and
@@ -6437,7 +6435,7 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state)
pci_disable_device(pci_dev);
pci_set_power_state(pci_dev, PCI_D3hot);
- priv->suspend_at = get_seconds();
+ priv->suspend_at = ktime_get_boottime_seconds();
mutex_unlock(&priv->action_mutex);
@@ -6482,7 +6480,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
* the queue of needed */
netif_device_attach(dev);
- priv->suspend_time = get_seconds() - priv->suspend_at;
+ priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at;
/* Bring the device back up */
if (!(priv->status & STATUS_RF_KILL_SW))
@@ -7723,7 +7721,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
struct libipw_device *ieee = priv->ieee;
struct lib80211_crypt_data *crypt;
struct iw_param *param = &wrqu->param;
- int ret = 0;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
@@ -7733,7 +7730,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
/*
* wpa_supplicant will control these internally
*/
- ret = -EOPNOTSUPP;
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
@@ -7801,9 +7797,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
{
struct ipw2100_priv *priv = libipw_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- __le16 reason;
-
- reason = cpu_to_le16(mlme->reason_code);
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.h b/drivers/net/wireless/intel/ipw2x00/ipw2100.h
index ce3e35f6b60f..8c11c7fa2eef 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.h
@@ -491,7 +491,7 @@ struct ipw2100_priv {
/* Statistics */
int resets;
- int reset_backoff;
+ time64_t reset_backoff;
/* Context */
u8 essid[IW_ESSID_MAX_SIZE];
@@ -500,8 +500,8 @@ struct ipw2100_priv {
u8 channel;
int last_mode;
- unsigned long connect_start;
- unsigned long last_reset;
+ time64_t connect_start;
+ time64_t last_reset;
u32 channel_mask;
u32 fatal_error;
@@ -581,9 +581,9 @@ struct ipw2100_priv {
int user_requested_scan;
- /* Track time in suspend */
- unsigned long suspend_at;
- unsigned long suspend_time;
+ /* Track time in suspend, using CLOCK_BOOTTIME */
+ time64_t suspend_at;
+ time64_t suspend_time;
u32 interrupts;
int tx_interrupts;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 8a858f7e36f4..9644e7b93645 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -7112,7 +7112,7 @@ static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
{
u32 ret = 0;
- if ((priv == NULL))
+ if (!priv)
return 0;
if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
@@ -11888,7 +11888,7 @@ static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
- priv->suspend_at = get_seconds();
+ priv->suspend_at = ktime_get_boottime_seconds();
return 0;
}
@@ -11925,7 +11925,7 @@ static int ipw_pci_resume(struct pci_dev *pdev)
* the queue of needed */
netif_device_attach(dev);
- priv->suspend_time = get_seconds() - priv->suspend_at;
+ priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at;
/* Bring the device back up */
schedule_work(&priv->up);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
index aa301d1eee3c..f98ab1f71edd 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
@@ -1343,9 +1343,9 @@ struct ipw_priv {
s8 tx_power;
- /* Track time in suspend */
- unsigned long suspend_at;
- unsigned long suspend_time;
+ /* Track time in suspend using CLOCK_BOOTIME */
+ time64_t suspend_at;
+ time64_t suspend_time;
#ifdef CONFIG_PM
u32 pm_state[16];
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
index dd29f46d086b..d32d39fa2686 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -479,7 +479,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee,
{
struct iw_point *erq = &(wrqu->encoding);
int len, key;
- struct lib80211_crypt_data *crypt;
struct libipw_security *sec = &ieee->sec;
LIBIPW_DEBUG_WX("GET_ENCODE\n");
@@ -492,7 +491,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee,
} else
key = ieee->crypt_info.tx_keyidx;
- crypt = ieee->crypt_info.crypt[key];
erq->flags = key + 1;
if (!sec->enabled) {
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-debug.c b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
index c1b4441fb8b2..a2960032be81 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
@@ -95,7 +95,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
- "acumulative delta max\n",
+ "accumulative delta max\n",
"Statistics_Rx - OFDM:");
pos +=
scnprintf(buf + pos, bufsz - pos,
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 62a9794f952b..57e3b6cca234 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -476,8 +476,6 @@ il3945_tx_skb(struct il_priv *il,
int txq_id = skb_get_queue_mapping(skb);
u16 len, idx, hdr_len;
u16 firstlen, secondlen;
- u8 id;
- u8 unicast;
u8 sta_id;
u8 tid = 0;
__le16 fc;
@@ -496,9 +494,6 @@ il3945_tx_skb(struct il_priv *il,
goto drop_unlock;
}
- unicast = !is_multicast_ether_addr(hdr->addr1);
- id = 0;
-
fc = hdr->frame_control;
#ifdef CONFIG_IWLEGACY_DEBUG
@@ -957,10 +952,8 @@ il3945_rx_queue_restock(struct il_priv *il)
struct list_head *element;
struct il_rx_buf *rxb;
unsigned long flags;
- int write;
spin_lock_irqsave(&rxq->lock, flags);
- write = rxq->write & ~0x7;
while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
/* Get next free Rx buffer, remove from free list */
element = rxq->rx_free.next;
@@ -2725,7 +2718,6 @@ void
il3945_post_associate(struct il_priv *il)
{
int rc = 0;
- struct ieee80211_conf *conf = NULL;
if (!il->vif || !il->is_open)
return;
@@ -2738,8 +2730,6 @@ il3945_post_associate(struct il_priv *il)
il_scan_cancel_timeout(il, 200);
- conf = &il->hw->conf;
-
il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
il3945_commit_rxon(il);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index dbf164d48ed3..3e568ce2fb20 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -1634,7 +1634,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
{
struct il_channel_info *ch_info;
s8 max_power;
- u8 a_band;
u8 i;
if (il->tx_power_user_lmt == power) {
@@ -1650,7 +1649,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
for (i = 0; i < il->channel_count; i++) {
ch_info = &il->channel_info[i];
- a_band = il_is_channel_a_band(ch_info);
/* find minimum power of all user and regulatory constraints
* (does not consider h/w clipping limitations) */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 562e94870a9c..280cd8ae1696 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -1338,15 +1338,12 @@ il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
u32 *accum_stats;
u32 *delta, *max_delta;
struct stats_general_common *general, *accum_general;
- struct stats_tx *tx, *accum_tx;
prev_stats = (__le32 *) &il->_4965.stats;
accum_stats = (u32 *) &il->_4965.accum_stats;
size = sizeof(struct il_notif_stats);
general = &il->_4965.stats.general.common;
accum_general = &il->_4965.accum_stats.general.common;
- tx = &il->_4965.stats.tx;
- accum_tx = &il->_4965.accum_stats.tx;
delta = (u32 *) &il->_4965.delta_stats;
max_delta = (u32 *) &il->_4965.max_delta;
@@ -4784,7 +4781,6 @@ static void
il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
{
struct il_priv *il = context;
- struct il_ucode_header *ucode;
int err;
struct il4965_firmware_pieces pieces;
const unsigned int api_max = il->cfg->ucode_api_max;
@@ -4814,8 +4810,6 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
}
/* Data from ucode file: header followed by uCode images */
- ucode = (struct il_ucode_header *)ucode_raw->data;
-
err = il4965_load_firmware(il, ucode_raw, &pieces);
if (err)
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 4d08d78c6b71..04e376cc898c 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -7,13 +7,13 @@ iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
+iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
+iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
-iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index a63ca8820568..fedb108db68f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -63,6 +63,7 @@
static const struct iwl_base_params iwl2000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -76,6 +77,7 @@ static const struct iwl_base_params iwl2000_base_params = {
static const struct iwl_base_params iwl2030_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 57,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index d4ba66aecdc9..91ca77c7571c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -59,7 +59,7 @@
#define IWL_22000_UCODE_API_MAX 38
/* Lowest firmware API version supported */
-#define IWL_22000_UCODE_API_MIN 24
+#define IWL_22000_UCODE_API_MIN 39
/* NVM versions */
#define IWL_22000_NVM_VERSION 0x0a1d
@@ -73,29 +73,48 @@
#define IWL_22000_SMEM_OFFSET 0x400000
#define IWL_22000_SMEM_LEN 0xD0000
-#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
-#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_22000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
-#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
+#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
+#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
+#define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
+#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
+#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
+#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_MODULE_FIRMWARE(api) \
IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
+ IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_22000 10
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
.num_of_queues = 512,
+ .max_tfd_queue_size = 256,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+static const struct iwl_base_params iwl_22560_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+ .num_of_queues = 512,
+ .max_tfd_queue_size = 65536,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -110,11 +129,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
-#define IWL_DEVICE_22000 \
+#define IWL_DEVICE_22000_COMMON \
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_22000, \
- .base_params = &iwl_22000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
.non_shared_ant = ANT_A, \
@@ -129,6 +146,10 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
+ .ht_params = &iwl_22000_ht_params, \
+ .nvm_ver = IWL_22000_NVM_VERSION, \
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.use_tfh = true, \
.rf_id = true, \
.gen2 = true, \
@@ -136,86 +157,114 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.dbgc_supported = true, \
.min_umac_error_event_table = 0x400000
+#define IWL_DEVICE_22500 \
+ IWL_DEVICE_22000_COMMON, \
+ .device_family = IWL_DEVICE_FAMILY_22000, \
+ .base_params = &iwl_22000_base_params, \
+ .csr = &iwl_csr_v1
+
+#define IWL_DEVICE_22560 \
+ IWL_DEVICE_22000_COMMON, \
+ .device_family = IWL_DEVICE_FAMILY_22560, \
+ .base_params = &iwl_22560_base_params, \
+ .csr = &iwl_csr_v2
+
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
};
const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
.cdb = true,
};
const struct iwl_cfg iwl22000_2ac_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_JF_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
};
const struct iwl_cfg iwl22000_2ax_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = {
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_HR_F0_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_22000_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_JF_B0_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_A0_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
+ .name = "Intel(R) Dual Band Wireless AX 22560",
+ .fw_name_pre = IWL_22000_SU_Z0_FW_PRE,
+ IWL_DEVICE_22560,
+ .cdb = true,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index a224f1be1ec2..36151e61a26f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -53,6 +53,7 @@
static const struct iwl_base_params iwl5000_base_params = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.pll_cfg = true,
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index dbcec7ce7863..b5d8274761d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -72,6 +72,7 @@
static const struct iwl_base_params iwl6000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -84,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
static const struct iwl_base_params iwl6050_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -96,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
static const struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index 69bfa827e82a..a62c8346f13a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -123,6 +123,7 @@
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
.num_of_queues = 31,
+ .max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 7262e973e0d6..c46fa712985b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -104,6 +104,7 @@
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = 31,
+ .max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e20c30b29c03..24b2f7cbb308 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -95,6 +95,7 @@
static const struct iwl_base_params iwl9000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
.num_of_queues = 31,
+ .max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -178,6 +179,17 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
+const struct iwl_cfg iwl9260_killer_2ac_cfg = {
+ .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
const struct iwl_cfg iwl9270_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9270",
.fw_name_pre = IWL9260A_FW_PRE,
@@ -267,6 +279,34 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
.soc_latency = 5000,
};
+const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
+ .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
+ .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9460",
.fw_name_pre = IWL9000A_FW_PRE,
@@ -327,6 +367,36 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
+const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
+ .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+ .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
+ .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+ .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 007bfe7656a4..08d3d8a190f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -187,20 +189,4 @@ struct iwl_card_state_notif {
__le32 flags;
} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
-/**
- * struct iwl_fseq_ver_mismatch_nty - Notification about version
- *
- * This notification does not have a direct impact on the init flow.
- * It means that another core (not WiFi) has initiated the FSEQ flow
- * and updated the FSEQ version. The driver only prints an error when
- * this occurs.
- *
- * @aux_read_fseq_ver: auxiliary read FSEQ version
- * @wifi_fseq_ver: FSEQ version (embedded in WiFi)
- */
-struct iwl_fseq_ver_mismatch_ntf {
- __le32 aux_read_fseq_ver;
- __le32 wifi_fseq_ver;
-} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */
-
#endif /* __iwl_fw_api_alive_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index f285bacc8726..6dad748e5cdc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -193,7 +193,8 @@ enum iwl_legacy_cmds {
FW_GET_ITEM_CMD = 0x1a,
/**
- * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2,
+ * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
+ * &struct iwl_tx_cmd_gen3,
* response in &struct iwl_mvm_tx_resp or
* &struct iwl_mvm_tx_resp_v3
*/
@@ -646,13 +647,6 @@ enum iwl_system_subcmd_ids {
* @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
*/
INIT_EXTENDED_CFG_CMD = 0x03,
-
- /**
- * @FSEQ_VER_MISMATCH_NTF: Notification about fseq version
- * mismatch during init. The format is specified in
- * &struct iwl_fseq_ver_mismatch_ntf.
- */
- FSEQ_VER_MISMATCH_NTF = 0xFF,
};
#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 5f6e855006dd..59b3c6e8f37b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,6 +85,16 @@ enum iwl_data_path_subcmd_ids {
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
/**
+ * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd
+ */
+ STA_HE_CTXT_CMD = 0x7,
+
+ /**
+ * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
+ */
+ RFH_QUEUE_CONFIG_CMD = 0xD,
+
+ /**
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
*/
TLC_MNG_CONFIG_CMD = 0xF,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index f2e31e040a7b..55594c93b014 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -279,6 +281,10 @@ enum iwl_mac_filter_flags {
MAC_FILTER_OUT_BCAST = BIT(8),
MAC_FILTER_IN_CRC32 = BIT(11),
MAC_FILTER_IN_PROBE_REQUEST = BIT(12),
+ /**
+ * @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax
+ */
+ MAC_FILTER_IN_11AX = BIT(14),
};
/**
@@ -406,4 +412,170 @@ struct iwl_missed_beacons_notif {
__le32 num_recvd_beacons;
} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
+/**
+ * struct iwl_he_backoff_conf - used for backoff configuration
+ * Per each trigger-based AC, (set by MU EDCA Parameter set info-element)
+ * used for backoff configuration of TXF5..TXF8 trigger based.
+ * The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via
+ * trigger-based TX.
+ * @cwmin: CW min
+ * @cwmax: CW max
+ * @aifsn: AIFSN
+ * AIFSN=0, means that no backoff from the specified TRIG-BASED AC is
+ * allowed till the MU-TIMER is 0
+ * @mu_time: MU time in 8TU units
+ */
+struct iwl_he_backoff_conf {
+ __le16 cwmin;
+ __le16 cwmax;
+ __le16 aifsn;
+ __le16 mu_time;
+} __packed; /* AC_QOS_DOT11AX_API_S */
+
+#define MAX_HE_SUPP_NSS 2
+#define MAX_HE_CHANNEL_BW_INDX 4
+
+/**
+ * struct iwl_he_pkt_ext - QAM thresholds
+ * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
+ * The IE is organized in the following way:
+ * Support for Nss x BW (or RU) matrix:
+ * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ * QAM_tx < QAM_th1 --> PPE=0us
+ * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+ * QAM_th2 <= QAM_tx --> PPE=16us
+ * @pkt_ext_qam_th: QAM thresholds
+ * For each Nss/Bw define 2 QAM thrsholds (0..5)
+ * For rates below the low_th, no need for PPE
+ * For rates between low_th and high_th, need 8us PPE
+ * For rates equal or higher then the high_th, need 16us PPE
+ * Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
+ * (0-low_th, 1-high_th)
+ */
+struct iwl_he_pkt_ext {
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S */
+
+/**
+ * enum iwl_he_sta_ctxt_flags - HE STA context flags
+ * @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific
+ * control frames such as TRIG, NDPA, BACK)
+ * @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS
+ * color for RX filter but use MAC header
+ * @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation
+ * @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap
+ * of 32-bits
+ * @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid
+ * and should be used
+ * @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation
+ * is enabled according to UORA element existence
+ * @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing
+ * @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK-
+ * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
+ * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA
+ * parameter set, i.e. the backoff counters for trig-based ACs
+ */
+enum iwl_he_sta_ctxt_flags {
+ STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
+ STA_CTXT_HE_BSS_COLOR_DIS = BIT(5),
+ STA_CTXT_HE_PARTIAL_BSS_COLOR = BIT(6),
+ STA_CTXT_HE_32BIT_BA_BITMAP = BIT(7),
+ STA_CTXT_HE_PACKET_EXT = BIT(8),
+ STA_CTXT_HE_TRIG_RND_ALLOC = BIT(9),
+ STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10),
+ STA_CTXT_HE_ACK_ENABLED = BIT(11),
+ STA_CTXT_HE_MU_EDCA_CW = BIT(12),
+};
+
+/**
+ * enum iwl_he_htc_flags - HE HTC support flags
+ * @IWL_HE_HTC_SUPPORT: HE-HTC support
+ * @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule
+ * support via A-control field
+ * @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field
+ * @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field
+ * @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field
+ */
+enum iwl_he_htc_flags {
+ IWL_HE_HTC_SUPPORT = BIT(0),
+ IWL_HE_HTC_UL_MU_RESP_SCHED = BIT(3),
+ IWL_HE_HTC_BSR_SUPP = BIT(4),
+ IWL_HE_HTC_OMI_SUPP = BIT(5),
+ IWL_HE_HTC_BQR_SUPP = BIT(6),
+};
+
+/*
+ * @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in
+ * response to HE MRQ and if the STA provides unsolicited HE MFB
+ */
+#define IWL_HE_HTC_LINK_ADAP_POS (1)
+#define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK (0)
+#define IWL_HE_HTC_LINK_ADAP_UNSOLICITED (2 << IWL_HE_HTC_LINK_ADAP_POS)
+#define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS)
+
+/**
+ * struct iwl_he_sta_context_cmd - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @reserved3: reserved byte for future use
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ */
+struct iwl_he_sta_context_cmd {
+ u8 sta_id;
+ u8 tid_limit;
+ u8 reserved1;
+ u8 reserved2;
+ __le32 flags;
+
+ /* The below fields are set via Multiple BSSID IE */
+ u8 ref_bssid_addr[6];
+ __le16 reserved0;
+
+ /* The below fields are set via HE-capabilities IE */
+ __le32 htc_flags;
+
+ u8 frag_flags;
+ u8 frag_level;
+ u8 frag_max_num;
+ u8 frag_min_size;
+
+ /* The below fields are set via PPE thresholds element */
+ struct iwl_he_pkt_ext pkt_ext;
+
+ /* The below fields are set via HE-Operation IE */
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ __le16 frame_time_rts_th;
+
+ /* Random access parameter set (i.e. RAPS) */
+ u8 rand_alloc_ecwmin;
+ u8 rand_alloc_ecwmax;
+ __le16 reserved3;
+
+ /* The below fields are set via MU EDCA parameter set element */
+ struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S */
+
#endif /* __iwl_fw_api_mac_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 8d6dc9189985..6c5338364794 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -195,7 +195,6 @@ struct iwl_nvm_get_info_general {
* @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
* @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
* @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
- * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
* @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
* @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
* @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
@@ -206,6 +205,9 @@ enum iwl_nvm_mac_sku_flags {
NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1),
NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2),
NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3),
+ /**
+ * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
+ */
NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4),
NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5),
NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 21e13a315421..087fae91baef 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -314,8 +314,11 @@ enum {
IWL_RATE_MCS_8_INDEX,
IWL_RATE_MCS_9_INDEX,
IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
+ IWL_RATE_MCS_10_INDEX,
+ IWL_RATE_MCS_11_INDEX,
+ IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX,
IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
- IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
+ IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1,
};
#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -440,8 +443,8 @@ enum {
#define RATE_LEGACY_RATE_MSK 0xff
/* Bit 10 - OFDM HE */
-#define RATE_MCS_OFDM_HE_POS 10
-#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS)
+#define RATE_MCS_HE_POS 10
+#define RATE_MCS_HE_MSK BIT(RATE_MCS_HE_POS)
/*
* Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
@@ -482,15 +485,33 @@ enum {
#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
/*
- * Bit 20-21: HE guard interval and LTF type.
- * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us,
- * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us
+ * Bit 20-21: HE LTF type and guard interval
+ * HE (ext) SU:
+ * 0 1xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 & SGI (bit 13) clear 4xLTF+3.2us
+ * 3 & SGI (bit 13) set 4xLTF+0.8us
+ * HE MU:
+ * 0 4xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * HE TRIG:
+ * 0 1xLTF+1.6us
+ * 1 2xLTF+1.6us
+ * 2 4xLTF+3.2us
+ * 3 (does not occur)
*/
#define RATE_MCS_HE_GI_LTF_POS 20
#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS)
/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
#define RATE_MCS_HE_TYPE_POS 22
+#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS)
#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
@@ -501,6 +522,9 @@ enum {
#define RATE_MCS_LDPC_POS 27
#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
+/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define RATE_MCS_HE_106T_POS 28
+#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS)
/* Link Quality definitions */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 7e570c4a9df0..2f599353c885 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -343,6 +345,169 @@ enum iwl_rx_mpdu_mac_info {
IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
};
+/*
+ * enum iwl_rx_he_phy - HE PHY data
+ */
+enum iwl_rx_he_phy {
+ IWL_RX_HE_PHY_BEAM_CHNG = BIT(0),
+ IWL_RX_HE_PHY_UPLINK = BIT(1),
+ IWL_RX_HE_PHY_BSS_COLOR_MASK = 0xfc,
+ IWL_RX_HE_PHY_SPATIAL_REUSE_MASK = 0xf00,
+ IWL_RX_HE_PHY_SU_EXT_BW10 = BIT(12),
+ IWL_RX_HE_PHY_TXOP_DUR_MASK = 0xfe000,
+ IWL_RX_HE_PHY_LDPC_EXT_SYM = BIT(20),
+ IWL_RX_HE_PHY_PRE_FEC_PAD_MASK = 0x600000,
+ IWL_RX_HE_PHY_PE_DISAMBIG = BIT(23),
+ IWL_RX_HE_PHY_DOPPLER = BIT(24),
+ /* 6 bits reserved */
+ IWL_RX_HE_PHY_DELIM_EOF = BIT(31),
+
+ /* second dword - MU data */
+ IWL_RX_HE_PHY_SIGB_COMPRESSION = BIT_ULL(32 + 0),
+ IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL,
+ IWL_RX_HE_PHY_HE_LTF_NUM_MASK = 0xe000000000ULL,
+ IWL_RX_HE_PHY_RU_ALLOC_SEC80 = BIT_ULL(32 + 8),
+ /* trigger encoded */
+ IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL,
+ IWL_RX_HE_PHY_SIGB_MCS_MASK = 0xf000000000000ULL,
+ /* 1 bit reserved */
+ IWL_RX_HE_PHY_SIGB_DCM = BIT_ULL(32 + 21),
+ IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL,
+ /* 8 bits reserved */
+};
+
+/**
+ * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v1 {
+ /* DW7 - carries rss_hash only when rpa_en == 1 */
+ /**
+ * @rss_hash: RSS hash value
+ */
+ __le32 rss_hash;
+ /* DW8 - carries filter_match only when rpa_en == 1 */
+ /**
+ * @filter_match: filter match value
+ */
+ __le32 filter_match;
+ /* DW9 */
+ /**
+ * @rate_n_flags: RX rate/flags encoding
+ */
+ __le32 rate_n_flags;
+ /* DW10 */
+ /**
+ * @energy_a: energy chain A
+ */
+ u8 energy_a;
+ /**
+ * @energy_b: energy chain B
+ */
+ u8 energy_b;
+ /**
+ * @channel: channel number
+ */
+ u8 channel;
+ /**
+ * @mac_context: MAC context mask
+ */
+ u8 mac_context;
+ /* DW11 */
+ /**
+ * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+ */
+ __le32 gp2_on_air_rise;
+ /* DW12 & DW13 */
+ union {
+ /**
+ * @tsf_on_air_rise:
+ * TSF value on air rise (INA), only valid if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
+ __le64 tsf_on_air_rise;
+ /**
+ * @he_phy_data:
+ * HE PHY data, see &enum iwl_rx_he_phy, valid
+ * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+ */
+ __le64 he_phy_data;
+ };
+} __packed;
+
+/**
+ * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v3 {
+ /* DW7 - carries filter_match only when rpa_en == 1 */
+ /**
+ * @filter_match: filter match value
+ */
+ __le32 filter_match;
+ /* DW8 - carries rss_hash only when rpa_en == 1 */
+ /**
+ * @rss_hash: RSS hash value
+ */
+ __le32 rss_hash;
+ /* DW9 */
+ /**
+ * @partial_hash: 31:0 ip/tcp header hash
+ * w/o some fields (such as IP SRC addr)
+ */
+ __le32 partial_hash;
+ /* DW10 */
+ /**
+ * @raw_xsum: raw xsum value
+ */
+ __le32 raw_xsum;
+ /* DW11 */
+ /**
+ * @rate_n_flags: RX rate/flags encoding
+ */
+ __le32 rate_n_flags;
+ /* DW12 */
+ /**
+ * @energy_a: energy chain A
+ */
+ u8 energy_a;
+ /**
+ * @energy_b: energy chain B
+ */
+ u8 energy_b;
+ /**
+ * @channel: channel number
+ */
+ u8 channel;
+ /**
+ * @mac_context: MAC context mask
+ */
+ u8 mac_context;
+ /* DW13 */
+ /**
+ * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+ */
+ __le32 gp2_on_air_rise;
+ /* DW14 & DW15 */
+ union {
+ /**
+ * @tsf_on_air_rise:
+ * TSF value on air rise (INA), only valid if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
+ __le64 tsf_on_air_rise;
+ /**
+ * @he_phy_data:
+ * HE PHY data, see &enum iwl_rx_he_phy, valid
+ * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+ */
+ __le64 he_phy_data;
+ };
+ /* DW16 & DW17 */
+ /**
+ * @reserved: reserved
+ */
+ __le32 reserved[2];
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+
/**
* struct iwl_rx_mpdu_desc - RX MPDU descriptor
*/
@@ -400,51 +565,14 @@ struct iwl_rx_mpdu_desc {
* @reorder_data: &enum iwl_rx_mpdu_reorder_data
*/
__le32 reorder_data;
- /* DW7 - carries rss_hash only when rpa_en == 1 */
- /**
- * @rss_hash: RSS hash value
- */
- __le32 rss_hash;
- /* DW8 - carries filter_match only when rpa_en == 1 */
- /**
- * @filter_match: filter match value
- */
- __le32 filter_match;
- /* DW9 */
- /**
- * @rate_n_flags: RX rate/flags encoding
- */
- __le32 rate_n_flags;
- /* DW10 */
- /**
- * @energy_a: energy chain A
- */
- u8 energy_a;
- /**
- * @energy_b: energy chain B
- */
- u8 energy_b;
- /**
- * @channel: channel number
- */
- u8 channel;
- /**
- * @mac_context: MAC context mask
- */
- u8 mac_context;
- /* DW11 */
- /**
- * @gp2_on_air_rise: GP2 timer value on air rise (INA)
- */
- __le32 gp2_on_air_rise;
- /* DW12 & DW13 */
- /**
- * @tsf_on_air_rise:
- * TSF value on air rise (INA), only valid if
- * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
- */
- __le64 tsf_on_air_rise;
-} __packed;
+
+ union {
+ struct iwl_rx_mpdu_desc_v1 v1;
+ struct iwl_rx_mpdu_desc_v3 v3;
+ };
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+
+#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
struct iwl_frame_release {
u8 baid;
@@ -587,4 +715,36 @@ struct iwl_ba_window_status_notif {
__le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @q_num: Q num
+ * @enable: enable queue
+ * @reserved: alignment
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @fr_bd_cb: DMA address of freeRB table
+ * @ur_bd_cb: DMA address of used RB table
+ * @fr_bd_wid: Initial index of the free table
+ */
+struct iwl_rfh_queue_data {
+ u8 q_num;
+ u8 enable;
+ __le16 reserved;
+ __le64 urbd_stts_wrptr;
+ __le64 fr_bd_cb;
+ __le64 ur_bd_cb;
+ __le32 fr_bd_wid;
+} __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */
+
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @num_queues: number of queues configured
+ * @reserved: alignment
+ * @data: DMA addresses per-queue
+ */
+struct iwl_rfh_queue_config {
+ u8 num_queues;
+ u8 reserved[3];
+ struct iwl_rfh_queue_data data[];
+} __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */
+
#endif /* __iwl_fw_api_rx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index a2a40b515a3c..514b86123d3d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -320,6 +322,29 @@ struct iwl_tx_cmd_gen2 {
struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_7 */
+/**
+ * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @flags: combination of &enum iwl_tx_cmd_flags
+ * @offload_assist: TX offload configuration
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @ttl: time to live - packet lifetime limit. The FW should drop if
+ * passed.
+ * @hdr: 802.11 header
+ */
+struct iwl_tx_cmd_gen3 {
+ __le16 len;
+ __le16 flags;
+ __le32 offload_assist;
+ struct iwl_dram_sec_info dram_info;
+ __le32 rate_n_flags;
+ __le64 ttl;
+ struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_8 */
+
/*
* TX response related data
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c
deleted file mode 100644
index 6f75985eea66..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#include "iwl-drv.h"
-#include "runtime.h"
-#include "fw/api/commands.h"
-#include "fw/api/alive.h"
-
-static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data;
-
- IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n",
- __le32_to_cpu(fseq->aux_read_fseq_ver),
- __le32_to_cpu(fseq->wifi_fseq_ver));
-}
-
-void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
-
- switch (cmd) {
- case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF):
- iwl_fwrt_fseq_ver_mismatch(fwrt, rxb);
- break;
- default:
- break;
- }
-}
-IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index fa283285fcbe..a31a42e673c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -243,39 +243,47 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return;
- /* Pull RXF1 */
- iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
- /* Pull RXF2 */
- iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
- RXF_DIFF_FROM_PREV, 1);
- /* Pull LMAC2 RXF1 */
- if (fwrt->smem_cfg.num_lmacs > 1)
- iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size,
- LMAC2_PRPH_OFFSET, 2);
-
- /* Pull TXF data from LMAC1 */
- for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
- /* Mark the number of TXF we're pulling now */
- iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
- iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i],
- 0, i);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
+ /* Pull RXF1 */
+ iwl_fwrt_dump_rxf(fwrt, dump_data,
+ cfg->lmac[0].rxfifo1_size, 0, 0);
+ /* Pull RXF2 */
+ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
+ RXF_DIFF_FROM_PREV, 1);
+ /* Pull LMAC2 RXF1 */
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ iwl_fwrt_dump_rxf(fwrt, dump_data,
+ cfg->lmac[1].rxfifo1_size,
+ LMAC2_PRPH_OFFSET, 2);
}
- /* Pull TXF data from LMAC2 */
- if (fwrt->smem_cfg.num_lmacs > 1) {
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+ /* Pull TXF data from LMAC1 */
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
- iwl_trans_write_prph(fwrt->trans,
- TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
- i);
+ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
iwl_fwrt_dump_txf(fwrt, dump_data,
- cfg->lmac[1].txfifo_size[i],
- LMAC2_PRPH_OFFSET,
- i + cfg->num_txfifo_entries);
+ cfg->lmac[0].txfifo_size[i], 0, i);
+ }
+
+ /* Pull TXF data from LMAC2 */
+ if (fwrt->smem_cfg.num_lmacs > 1) {
+ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
+ i++) {
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(fwrt->trans,
+ TXF_LARC_NUM +
+ LMAC2_PRPH_OFFSET, i);
+ iwl_fwrt_dump_txf(fwrt, dump_data,
+ cfg->lmac[1].txfifo_size[i],
+ LMAC2_PRPH_OFFSET,
+ i + cfg->num_txfifo_entries);
+ }
}
}
- if (fw_has_capa(&fwrt->fw->ucode_capa,
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+ fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
for (i = 0;
@@ -600,42 +608,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
fifo_data_len = 0;
- /* Count RXF2 size */
- if (mem_cfg->rxfifo2_size) {
- /* Add header info */
- fifo_data_len += mem_cfg->rxfifo2_size +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
-
- /* Count RXF1 sizes */
- for (i = 0; i < mem_cfg->num_lmacs; i++) {
- if (!mem_cfg->lmac[i].rxfifo1_size)
- continue;
-
- /* Add header info */
- fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
- /* Count TXF sizes */
- for (i = 0; i < mem_cfg->num_lmacs; i++) {
- int j;
+ /* Count RXF2 size */
+ if (mem_cfg->rxfifo2_size) {
+ /* Add header info */
+ fifo_data_len +=
+ mem_cfg->rxfifo2_size +
+ sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ }
- for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
- if (!mem_cfg->lmac[i].txfifo_size[j])
+ /* Count RXF1 sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ if (!mem_cfg->lmac[i].rxfifo1_size)
continue;
/* Add header info */
fifo_data_len +=
- mem_cfg->lmac[i].txfifo_size[j] +
+ mem_cfg->lmac[i].rxfifo1_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
}
- if (fw_has_capa(&fwrt->fw->ucode_capa,
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+ size_t fifo_const_len = sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+
+ /* Count TXF sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ int j;
+
+ for (j = 0; j < mem_cfg->num_txfifo_entries;
+ j++) {
+ if (!mem_cfg->lmac[i].txfifo_size[j])
+ continue;
+
+ /* Add header info */
+ fifo_data_len +=
+ fifo_const_len +
+ mem_cfg->lmac[i].txfifo_size[j];
+ }
+ }
+ }
+
+ if ((fwrt->fw->dbg_dump_mask &
+ BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
+ fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
for (i = 0;
i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
@@ -652,7 +672,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* Make room for PRPH registers */
- if (!fwrt->trans->cfg->gen2) {
+ if (!fwrt->trans->cfg->gen2 &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
i++) {
/* The range includes both boundaries */
@@ -667,7 +688,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
if (!fwrt->trans->cfg->gen2 &&
- fwrt->trans->cfg->mq_rx_supported) {
+ fwrt->trans->cfg->mq_rx_supported &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
for (i = 0; i <
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
/* The range includes both boundaries */
@@ -681,34 +703,42 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
}
- if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
}
file_len = sizeof(*dump_file) +
- sizeof(*dump_data) * 3 +
- sizeof(*dump_smem_cfg) +
fifo_data_len +
prph_len +
- radio_len +
- sizeof(*dump_info);
-
- /* Make room for the SMEM, if it exists */
- if (smem_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
- /* Make room for the secondary SRAM, if it exists */
- if (sram2_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
- /* Make room for MEM segments */
- for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
- le32_to_cpu(fw_dbg_mem[i].len);
+ radio_len;
+
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
+ file_len += sizeof(*dump_data) + sizeof(*dump_info);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
+ file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
+
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+ /* Make room for the SMEM, if it exists */
+ if (smem_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ smem_len;
+
+ /* Make room for the secondary SRAM, if it exists */
+ if (sram2_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ sram2_len;
+
+ /* Make room for MEM segments */
+ for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ le32_to_cpu(fw_dbg_mem[i].len);
+ }
}
/* Make room for fw's virtual image pages, if it exists */
- if (!fwrt->trans->cfg->gen2 &&
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+ !fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block)
file_len += fwrt->num_of_paging_blk *
@@ -722,12 +752,14 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
}
- if (fwrt->dump.desc)
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ fwrt->dump.desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
fwrt->dump.desc->len;
- if (!fwrt->fw->n_dbg_mem_tlv)
- file_len += sram_len + sizeof(*dump_mem);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
+ !fwrt->fw->n_dbg_mem_tlv)
+ file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem);
dump_file = vzalloc(file_len);
if (!dump_file) {
@@ -740,48 +772,56 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data;
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
- dump_data->len = cpu_to_le32(sizeof(*dump_info));
- dump_info = (void *)dump_data->data;
- dump_info->device_family =
- fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
- cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
- cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
- dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
- memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
- sizeof(dump_info->fw_human_readable));
- strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
- sizeof(dump_info->dev_human_readable));
- strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
- sizeof(dump_info->bus_human_readable));
-
- dump_data = iwl_fw_error_next_data(dump_data);
-
- /* Dump shared memory configuration */
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
- dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
- dump_smem_cfg = (void *)dump_data->data;
- dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
- dump_smem_cfg->num_txfifo_entries =
- cpu_to_le32(mem_cfg->num_txfifo_entries);
- for (i = 0; i < MAX_NUM_LMAC; i++) {
- int j;
-
- for (j = 0; j < TX_FIFO_MAX_NUM; j++)
- dump_smem_cfg->lmac[i].txfifo_size[j] =
- cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]);
- dump_smem_cfg->lmac[i].rxfifo1_size =
- cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
- }
- dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size);
- dump_smem_cfg->internal_txfifo_addr =
- cpu_to_le32(mem_cfg->internal_txfifo_addr);
- for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
- dump_smem_cfg->internal_txfifo_size[i] =
- cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
+ dump_data->len = cpu_to_le32(sizeof(*dump_info));
+ dump_info = (void *)dump_data->data;
+ dump_info->device_family =
+ fwrt->trans->cfg->device_family ==
+ IWL_DEVICE_FAMILY_7000 ?
+ cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
+ cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
+ dump_info->hw_step =
+ cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
+ memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
+ sizeof(dump_info->fw_human_readable));
+ strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
+ sizeof(dump_info->dev_human_readable) - 1);
+ strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
+ sizeof(dump_info->bus_human_readable) - 1);
+
+ dump_data = iwl_fw_error_next_data(dump_data);
}
- dump_data = iwl_fw_error_next_data(dump_data);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
+ /* Dump shared memory configuration */
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
+ dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
+ dump_smem_cfg = (void *)dump_data->data;
+ dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
+ dump_smem_cfg->num_txfifo_entries =
+ cpu_to_le32(mem_cfg->num_txfifo_entries);
+ for (i = 0; i < MAX_NUM_LMAC; i++) {
+ int j;
+ u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
+
+ for (j = 0; j < TX_FIFO_MAX_NUM; j++)
+ dump_smem_cfg->lmac[i].txfifo_size[j] =
+ cpu_to_le32(txf_size[j]);
+ dump_smem_cfg->lmac[i].rxfifo1_size =
+ cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
+ }
+ dump_smem_cfg->rxfifo2_size =
+ cpu_to_le32(mem_cfg->rxfifo2_size);
+ dump_smem_cfg->internal_txfifo_addr =
+ cpu_to_le32(mem_cfg->internal_txfifo_addr);
+ for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
+ dump_smem_cfg->internal_txfifo_size[i] =
+ cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
+ }
+
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
/* We only dump the FIFOs if the FW is in error state */
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
@@ -790,7 +830,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
iwl_read_radio_regs(fwrt, &dump_data);
}
- if (fwrt->dump.desc) {
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
fwrt->dump.desc->len);
@@ -805,7 +846,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (monitor_dump_only)
goto dump_trans_data;
- if (!fwrt->fw->n_dbg_mem_tlv) {
+ if (!fwrt->fw->n_dbg_mem_tlv &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -821,6 +863,9 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
bool success;
+ if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)))
+ break;
+
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -854,7 +899,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data);
}
- if (smem_len) {
+ if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
@@ -867,7 +912,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data);
}
- if (sram2_len) {
+ if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
@@ -881,7 +926,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* Dump fw's virtual image */
- if (!fwrt->trans->cfg->gen2 &&
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+ !fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block) {
IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 9d939cbaf6c6..bbf2b265a06a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -146,6 +146,9 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51,
IWL_UCODE_TLV_IML = 52,
+
+ /* TLVs 0x1000-0x2000 are for internal driver usage */
+ IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
};
struct iwl_ucode_tlv {
@@ -318,7 +321,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
* is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
- * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used)
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
* @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
* @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
@@ -889,39 +892,4 @@ struct iwl_fw_dbg_conf_tlv {
struct iwl_fw_dbg_conf_hcmd hcmd;
} __packed;
-/**
- * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
- * @max_scan_cache_size: total space allocated for scan results (in bytes).
- * @max_scan_buckets: maximum number of channel buckets.
- * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
- * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
- * @max_scan_reporting_threshold: max possible report threshold. in percentage.
- * @max_hotlist_aps: maximum number of entries for hotlist APs.
- * @max_significant_change_aps: maximum number of entries for significant
- * change APs.
- * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
- * hold.
- * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
- * @max_number_epno_networks: max number of epno entries.
- * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
- * specified.
- * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
- * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
- */
-struct iwl_fw_gscan_capabilities {
- __le32 max_scan_cache_size;
- __le32 max_scan_buckets;
- __le32 max_ap_cache_per_scan;
- __le32 max_rssi_sample_size;
- __le32 max_scan_reporting_threshold;
- __le32 max_hotlist_aps;
- __le32 max_significant_change_aps;
- __le32 max_bssid_history_entries;
- __le32 max_hotlist_ssids;
- __le32 max_number_epno_networks;
- __le32 max_number_epno_networks_by_ssid;
- __le32 max_number_of_white_listed_ssid;
- __le32 max_number_of_black_listed_ssid;
-} __packed;
-
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index f4912382b6af..0861b97c4233 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -193,41 +193,6 @@ struct iwl_fw_cscheme_list {
} __packed;
/**
- * struct iwl_gscan_capabilities - gscan capabilities supported by FW
- * @max_scan_cache_size: total space allocated for scan results (in bytes).
- * @max_scan_buckets: maximum number of channel buckets.
- * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
- * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
- * @max_scan_reporting_threshold: max possible report threshold. in percentage.
- * @max_hotlist_aps: maximum number of entries for hotlist APs.
- * @max_significant_change_aps: maximum number of entries for significant
- * change APs.
- * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
- * hold.
- * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
- * @max_number_epno_networks: max number of epno entries.
- * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
- * specified.
- * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
- * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
- */
-struct iwl_gscan_capabilities {
- u32 max_scan_cache_size;
- u32 max_scan_buckets;
- u32 max_ap_cache_per_scan;
- u32 max_rssi_sample_size;
- u32 max_scan_reporting_threshold;
- u32 max_hotlist_aps;
- u32 max_significant_change_aps;
- u32 max_bssid_history_entries;
- u32 max_hotlist_ssids;
- u32 max_number_epno_networks;
- u32 max_number_epno_networks_by_ssid;
- u32 max_number_of_white_listed_ssid;
- u32 max_number_of_black_listed_ssid;
-};
-
-/**
* enum iwl_fw_type - iwlwifi firmware type
* @IWL_FW_DVM: DVM firmware
* @IWL_FW_MVM: MVM firmware
@@ -298,7 +263,7 @@ struct iwl_fw {
size_t n_dbg_mem_tlv;
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
- struct iwl_gscan_capabilities gscan_capa;
+ u32 dbg_dump_mask;
};
static inline const char *get_fw_dbg_mode_string(int mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index d8db1dd100b0..ed23367f7088 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -168,7 +168,4 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
-void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_cmd_buffer *rxb);
-
#endif /* __iwl_fw_runtime_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index fb4b6442b4d7..ff85d69c2a8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -143,7 +145,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
return;
pkt = cmd.resp_pkt;
- if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+ if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
iwl_parse_shared_mem_22000(fwrt, pkt);
else
iwl_parse_shared_mem(fwrt, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index c503b26793f6..12fddcf15bab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -93,6 +93,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
IWL_DEVICE_FAMILY_9000,
IWL_DEVICE_FAMILY_22000,
+ IWL_DEVICE_FAMILY_22560,
};
/*
@@ -176,6 +177,7 @@ static inline u8 num_of_ant(u8 mask)
* @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
* is in flight. This is due to a HW bug in 7260, 3160 and 7265.
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
+ * @max_tfd_queue_size: max number of entries in tfd queue.
*/
struct iwl_base_params {
unsigned int wd_timeout;
@@ -191,6 +193,7 @@ struct iwl_base_params {
scd_chain_ext_wa:1;
u16 num_of_queues; /* def: HW dependent */
+ u32 max_tfd_queue_size; /* def: HW dependent */
u8 max_ll_items;
u8 led_compensation;
@@ -551,6 +554,7 @@ extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
+extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
@@ -558,17 +562,23 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
-extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
+extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
new file mode 100644
index 000000000000..ebea99189ca9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -0,0 +1,286 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_context_info_file_gen3_h__
+#define __iwl_context_info_file_gen3_h__
+
+#include "iwl-context-info.h"
+
+#define CSR_CTXT_INFO_BOOT_CTRL 0x0
+#define CSR_CTXT_INFO_ADDR 0x118
+#define CSR_IML_DATA_ADDR 0x120
+#define CSR_IML_SIZE_ADDR 0x128
+#define CSR_IML_RESP_ADDR 0x12c
+
+/* Set bit for enabling automatic function boot */
+#define CSR_AUTO_FUNC_BOOT_ENA BIT(1)
+/* Set bit for initiating function boot */
+#define CSR_AUTO_FUNC_INIT BIT(7)
+
+/**
+ * enum iwl_prph_scratch_mtr_format - tfd size configuration
+ * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd
+ */
+enum iwl_prph_scratch_mtr_format {
+ IWL_PRPH_MTR_FORMAT_16B = 0x0,
+ IWL_PRPH_MTR_FORMAT_32B = 0x40000,
+ IWL_PRPH_MTR_FORMAT_64B = 0x80000,
+ IWL_PRPH_MTR_FORMAT_256B = 0xC0000,
+};
+
+/**
+ * enum iwl_prph_scratch_flags - PRPH scratch control flags
+ * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
+ * in hwm config.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
+ * multicomm.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
+ * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
+ * completion descriptor, 1 for responses (legacy)
+ * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
+ * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
+ * 3: 256 bit.
+ */
+enum iwl_prph_scratch_flags {
+ IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
+ IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
+ IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
+ IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10),
+ IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11),
+ IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
+ IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
+ IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
+};
+
+/*
+ * struct iwl_prph_scratch_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: prph scratch information version id
+ * @size: the size of the context information in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_version {
+ __le16 mac_id;
+ __le16 version;
+ __le16 size;
+ __le16 reserved;
+} __packed; /* PERIPH_SCRATCH_VERSION_S */
+
+/*
+ * struct iwl_prph_scratch_control - control structure
+ * @control_flags: context information flags see &enum iwl_prph_scratch_flags
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_control {
+ __le32 control_flags;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_CONTROL_S */
+
+/*
+ * struct iwl_prph_scratch_ror_cfg - ror config
+ * @ror_base_addr: ror start address
+ * @ror_size: ror size in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_ror_cfg {
+ __le64 ror_base_addr;
+ __le32 ror_size;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_ROR_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_hwm_cfg - hwm config
+ * @hwm_base_addr: hwm start address
+ * @hwm_size: hwm size in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_hwm_cfg {
+ __le64 hwm_base_addr;
+ __le32 hwm_size;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_rbd_cfg {
+ __le64 free_rbd_addr;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @ror_cfg: ror configuration
+ * @hwm_cfg: hwm configuration
+ * @rbd_cfg: default RX queue configuration
+ */
+struct iwl_prph_scratch_ctrl_cfg {
+ struct iwl_prph_scratch_version version;
+ struct iwl_prph_scratch_control control;
+ struct iwl_prph_scratch_ror_cfg ror_cfg;
+ struct iwl_prph_scratch_hwm_cfg hwm_cfg;
+ struct iwl_prph_scratch_rbd_cfg rbd_cfg;
+} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
+
+/*
+ * struct iwl_prph_scratch - peripheral scratch mapping
+ * @ctrl_cfg: control and configuration of prph scratch
+ * @dram: firmware images addresses in DRAM
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch {
+ struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
+ __le32 reserved[16];
+ struct iwl_context_info_dram dram;
+} __packed; /* PERIPH_SCRATCH_S */
+
+/*
+ * struct iwl_prph_info - peripheral information
+ * @boot_stage_mirror: reflects the value in the Boot Stage CSR register
+ * @ipc_status_mirror: reflects the value in the IPC Status CSR register
+ * @sleep_notif: indicates the peripheral sleep status
+ * @reserved: reserved
+ */
+struct iwl_prph_info {
+ __le32 boot_stage_mirror;
+ __le32 ipc_status_mirror;
+ __le32 sleep_notif;
+ __le32 reserved;
+} __packed; /* PERIPH_INFO_S */
+
+/*
+ * struct iwl_context_info_gen3 - device INIT configuration
+ * @version: version of the context information
+ * @size: size of context information in DWs
+ * @config: context in which the peripheral would execute - a subset of
+ * capability csr register published by the peripheral
+ * @prph_info_base_addr: the peripheral information structure start address
+ * @cr_head_idx_arr_base_addr: the completion ring head index array
+ * start address
+ * @tr_tail_idx_arr_base_addr: the transfer ring tail index array
+ * start address
+ * @cr_tail_idx_arr_base_addr: the completion ring tail index array
+ * start address
+ * @tr_head_idx_arr_base_addr: the transfer ring head index array
+ * start address
+ * @cr_idx_arr_size: number of entries in the completion ring index array
+ * @tr_idx_arr_size: number of entries in the transfer ring index array
+ * @mtr_base_addr: the message transfer ring start address
+ * @mcr_base_addr: the message completion ring start address
+ * @mtr_size: number of entries which the message transfer ring can hold
+ * @mcr_size: number of entries which the message completion ring can hold
+ * @mtr_doorbell_vec: the doorbell vector associated with the message
+ * transfer ring
+ * @mcr_doorbell_vec: the doorbell vector associated with the message
+ * completion ring
+ * @mtr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a transfer descriptor in the message transfer ring
+ * @mcr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a completion descriptor in the message completion ring
+ * @mtr_opt_header_size: the size of the optional header in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mtr_opt_footer_size: the size of the optional footer in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mcr_opt_header_size: the size of the optional header in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @mcr_opt_footer_size: the size of the optional footer in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @msg_rings_ctrl_flags: message rings control flags
+ * @prph_info_msi_vec: the MSI which shall be generated by the peripheral
+ * after updating the Peripheral Information structure
+ * @prph_scratch_base_addr: the peripheral scratch structure start address
+ * @prph_scratch_size: the size of the peripheral scratch structure in DWs
+ * @reserved: reserved
+ */
+struct iwl_context_info_gen3 {
+ __le16 version;
+ __le16 size;
+ __le32 config;
+ __le64 prph_info_base_addr;
+ __le64 cr_head_idx_arr_base_addr;
+ __le64 tr_tail_idx_arr_base_addr;
+ __le64 cr_tail_idx_arr_base_addr;
+ __le64 tr_head_idx_arr_base_addr;
+ __le16 cr_idx_arr_size;
+ __le16 tr_idx_arr_size;
+ __le64 mtr_base_addr;
+ __le64 mcr_base_addr;
+ __le16 mtr_size;
+ __le16 mcr_size;
+ __le16 mtr_doorbell_vec;
+ __le16 mcr_doorbell_vec;
+ __le16 mtr_msi_vec;
+ __le16 mcr_msi_vec;
+ u8 mtr_opt_header_size;
+ u8 mtr_opt_footer_size;
+ u8 mcr_opt_header_size;
+ u8 mcr_opt_footer_size;
+ __le16 msg_rings_ctrl_flags;
+ __le16 prph_info_msi_vec;
+ __le64 prph_scratch_base_addr;
+ __le32 prph_scratch_size;
+ __le32 reserved;
+} __packed; /* IPC_CONTEXT_INFO_S */
+
+int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ const struct fw_img *fw);
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
+
+#endif /* __iwl_context_info_file_gen3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index b870c0986744..4b6fdf3b15fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -199,5 +201,8 @@ struct iwl_context_info {
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
+int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
+ const struct fw_img *fw,
+ struct iwl_context_info_dram *ctxt_dram);
#endif /* __iwl_context_info_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index ba971d3946e2..9019de99f077 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -339,6 +339,9 @@ enum {
/* HW_RF CHIP ID */
#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
+/* HW_RF CHIP STEP */
+#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
+
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -592,6 +595,8 @@ enum msix_fh_int_causes {
enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
+ MSIX_HW_INT_CAUSES_REG_IPC = BIT(1),
+ MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index c59ce4f8a5ed..c0631255aee7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -402,35 +402,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
return 0;
}
-static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
- const u32 len)
-{
- struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
- struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
-
- capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
- capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
- capa->max_ap_cache_per_scan =
- le32_to_cpu(fw_capa->max_ap_cache_per_scan);
- capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
- capa->max_scan_reporting_threshold =
- le32_to_cpu(fw_capa->max_scan_reporting_threshold);
- capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
- capa->max_significant_change_aps =
- le32_to_cpu(fw_capa->max_significant_change_aps);
- capa->max_bssid_history_entries =
- le32_to_cpu(fw_capa->max_bssid_history_entries);
- capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids);
- capa->max_number_epno_networks =
- le32_to_cpu(fw_capa->max_number_epno_networks);
- capa->max_number_epno_networks_by_ssid =
- le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid);
- capa->max_number_of_white_listed_ssid =
- le32_to_cpu(fw_capa->max_number_of_white_listed_ssid);
- capa->max_number_of_black_listed_ssid =
- le32_to_cpu(fw_capa->max_number_of_black_listed_ssid);
-}
-
/*
* Gets uCode section from tlv.
*/
@@ -644,7 +615,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
u32 build, paging_mem_size;
int num_of_cpus;
bool usniffer_req = false;
- bool gscan_capa = false;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -1043,6 +1013,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
break;
}
+ case IWL_UCODE_TLV_FW_DBG_DUMP_LST: {
+ if (tlv_len != sizeof(u32)) {
+ IWL_ERR(drv,
+ "dbg lst mask size incorrect, skip\n");
+ break;
+ }
+
+ drv->fw.dbg_dump_mask =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ }
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
*usniffer_images = true;
iwl_store_ucode_sec(pieces, tlv_data,
@@ -1079,16 +1060,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
paging_mem_size;
break;
case IWL_UCODE_TLV_FW_GSCAN_CAPA:
- /*
- * Don't return an error in case of a shorter tlv_len
- * to enable loading of FW that has an old format
- * of GSCAN capabilities TLV.
- */
- if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities))
- break;
-
- iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
- gscan_capa = true;
+ /* ignored */
break;
case IWL_UCODE_TLV_FW_MEM_SEG: {
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
@@ -1153,19 +1125,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
- /*
- * If ucode advertises that it supports GSCAN but GSCAN
- * capabilities TLV is not present, or if it has an old format,
- * warn and continue without GSCAN.
- */
- if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
- !gscan_capa) {
- IWL_DEBUG_INFO(drv,
- "GSCAN is supported but capabilities TLV is unavailable\n");
- __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
- capa->_capa);
- }
-
return 0;
invalid_tlv_len:
@@ -1316,6 +1275,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
+ /* dump all fw memory areas by default */
+ fw->dbg_dump_mask = 0xffffffff;
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
@@ -1787,7 +1748,8 @@ MODULE_PARM_DESC(11n_disable,
"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
MODULE_PARM_DESC(amsdu_size,
- "amsdu size 0: 12K for multi Rx queue devices, 4K for other devices 1:4K 2:8K 3:12K (default 0)");
+ "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, "
+ "4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)");
module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
@@ -1856,3 +1818,7 @@ module_param_named(remove_when_gone,
0444);
MODULE_PARM_DESC(remove_when_gone,
"Remove dev from PCIe bus if it is deemed inaccessible (default: false)");
+
+module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
+ S_IRUGO);
+MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 777f5df8a0c6..a4c96215933b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program;
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -33,6 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -767,7 +767,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
if ((cfg->mq_rx_supported &&
- iwlwifi_mod_params.amsdu_size != IWL_AMSDU_4K) ||
+ iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 11789ffb6512..df0e9ffff706 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -33,6 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -434,13 +434,15 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* RXF to DRAM.
* Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
*/
-#define RFH_GEN_STATUS 0xA09808
+#define RFH_GEN_STATUS 0xA09808
+#define RFH_GEN_STATUS_GEN3 0xA07824
#define RBD_FETCH_IDLE BIT(29)
#define SRAM_DMA_IDLE BIT(30)
#define RXF_DMA_IDLE BIT(31)
/* DMA configuration */
-#define RFH_RXF_DMA_CFG 0xA09820
+#define RFH_RXF_DMA_CFG 0xA09820
+#define RFH_RXF_DMA_CFG_GEN3 0xA07880
/* RB size */
#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
#define RFH_RXF_DMA_RB_SIZE_POS 16
@@ -643,10 +645,13 @@ struct iwl_rb_status {
#define TFD_QUEUE_SIZE_MAX (256)
+#define TFD_QUEUE_SIZE_MAX_GEN3 (65536)
/* cb size is the exponent - 3 */
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \
+ TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
@@ -753,7 +758,7 @@ struct iwl_tfh_tfd {
* For devices up to 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-16 - station index
- * For 22000 and on:
+ * For 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
@@ -762,4 +767,15 @@ struct iwlagn_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
} __packed;
+/**
+ * struct iwl_gen3_bc_tbl scheduler byte count table gen3
+ * For 22560 and on:
+ * @tfd_offset: 0-12 - tx command byte count
+ * 12-13 - number of 64 byte chunks
+ * 14-16 - reserved
+ */
+struct iwl_gen3_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
+} __packed;
+
#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index a7dd8a8cddf9..97072cf75bca 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -17,9 +18,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program;
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -31,6 +30,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -90,6 +90,8 @@ enum iwl_amsdu_size {
IWL_AMSDU_4K = 1,
IWL_AMSDU_8K = 2,
IWL_AMSDU_12K = 3,
+ /* Add 2K at the end to avoid breaking current API */
+ IWL_AMSDU_2K = 4,
};
enum iwl_uapsd_disable {
@@ -144,6 +146,10 @@ struct iwl_mod_params {
bool lar_disable;
bool fw_monitor;
bool disable_11ac;
+ /**
+ * @disable_11ax: disable HE capabilities, default = false
+ */
+ bool disable_11ax;
bool remove_when_gone;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index b815ba38dbdb..b4c3a957c102 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -430,6 +430,13 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
else
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
+ case IWL_AMSDU_2K:
+ if (cfg->mq_rx_supported)
+ vht_cap->cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ else
+ WARN(1, "RB size of 2K is not supported by this device\n");
+ break;
case IWL_AMSDU_4K:
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
@@ -463,6 +470,101 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
}
+static struct ieee80211_sband_iftype_data iwl_he_capa = {
+ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
+ IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_DUAL_BAND |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
+ .phy_cap_info[3] =
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
+ .phy_cap_info[4] =
+ IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
+ .phy_cap_info[5] =
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
+ .phy_cap_info[6] =
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
+ .phy_cap_info[7] =
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP7_MAX_NC_7,
+ .phy_cap_info[8] =
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU,
+ },
+ /*
+ * Set default Tx/Rx HE MCS NSS Support field. Indicate support
+ * for up to 2 spatial streams and all MCS, without any special
+ * cases
+ */
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xffff),
+ .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ /*
+ * Set default PPE thresholds, with PPET16 set to 0, PPET8 set
+ * to 7
+ */
+ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
+ },
+};
+
+static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
+ u8 tx_chains, u8 rx_chains)
+{
+ if (sband->band == NL80211_BAND_2GHZ ||
+ sband->band == NL80211_BAND_5GHZ)
+ sband->iftype_data = &iwl_he_capa;
+ else
+ return;
+
+ sband->n_iftype_data = 1;
+
+ /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
+ if ((tx_chains & rx_chains) != ANT_AB) {
+ iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &=
+ ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS;
+ iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS;
+ }
+}
+
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *nvm_ch_flags, u8 tx_chains,
@@ -483,6 +585,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
tx_chains, rx_chains);
+ if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+ iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+
sband = &data->bands[NL80211_BAND_5GHZ];
sband->band = NL80211_BAND_5GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
@@ -495,6 +600,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
tx_chains, rx_chains);
+ if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+ iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
n_used, n_channels);
@@ -1293,6 +1401,8 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
nvm->sku_cap_11n_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
+ nvm->sku_cap_11ax_enable =
+ !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
nvm->sku_cap_band_24ghz_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
nvm->sku_cap_band_52ghz_enable =
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 1b9c627ee34d..279dd7b7a3fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -350,6 +350,8 @@ static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
{
switch (rb_size) {
+ case IWL_AMSDU_2K:
+ return get_order(2 * 1024);
case IWL_AMSDU_4K:
return get_order(4 * 1024);
case IWL_AMSDU_8K:
@@ -438,6 +440,20 @@ struct iwl_trans_txq_scd_cfg {
};
/**
+ * struct iwl_trans_rxq_dma_data - RX queue DMA data
+ * @fr_bd_cb: DMA address of free BD cyclic buffer
+ * @fr_bd_wid: Initial write index of the free BD cyclic buffer
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @ur_bd_cb: DMA address of used BD cyclic buffer
+ */
+struct iwl_trans_rxq_dma_data {
+ u64 fr_bd_cb;
+ u32 fr_bd_wid;
+ u64 urbd_stts_wrptr;
+ u64 ur_bd_cb;
+};
+
+/**
* struct iwl_trans_ops - transport specific operations
*
* All the handlers MUST be implemented
@@ -557,6 +573,8 @@ struct iwl_trans_ops {
int cmd_id, int size,
unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
+ int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data);
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
@@ -753,6 +771,7 @@ struct iwl_trans {
const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
+ u32 dbg_dump_mask;
u8 dbg_dest_reg_num;
enum iwl_plat_pm_mode system_pm_mode;
@@ -945,6 +964,16 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
cfg, queue_wdg_timeout);
}
+static inline int
+iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
+{
+ if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
+ return -ENOTSUPP;
+
+ return trans->ops->rxq_dma_data(trans, queue, data);
+}
+
static inline void
iwl_trans_txq_free(struct iwl_trans *trans, int queue)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 3fcf489f3120..79bdae994822 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1037,6 +1037,13 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
#endif
+ /*
+ * TODO: this is needed because the firmware is not stopping
+ * the recording automatically before entering D3. This can
+ * be removed once the FW starts doing that.
+ */
+ iwl_fw_dbg_stop_recording(&mvm->fwrt);
+
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 1c4178f20441..05b77419953c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1150,6 +1150,10 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
struct iwl_rx_mpdu_desc *desc;
int bin_len = count / 2;
int ret = -EINVAL;
+ size_t mpdu_cmd_hdr_size =
+ (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_rx_mpdu_desc) :
+ IWL_RX_DESC_SIZE_V1;
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
@@ -1168,7 +1172,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
goto out;
/* avoid invalid memory access */
- if (bin_len < sizeof(*pkt) + sizeof(*desc))
+ if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
goto out;
/* check this is RX packet */
@@ -1179,7 +1183,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
/* check the length in metadata matches actual received length */
desc = (void *)pkt->data;
if (le16_to_cpu(desc->mpdu_len) !=
- (bin_len - sizeof(*desc) - sizeof(*pkt)))
+ (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
goto out;
local_bh_disable();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 866c91c923be..6bb1a99a197a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -130,6 +130,41 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}
+static int iwl_configure_rxq(struct iwl_mvm *mvm)
+{
+ int i, num_queues, size;
+ struct iwl_rfh_queue_config *cmd;
+
+ /* Do not configure default queue, it is configured via context info */
+ num_queues = mvm->trans->num_rx_queues - 1;
+
+ size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data);
+
+ cmd = kzalloc(size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->num_queues = num_queues;
+
+ for (i = 0; i < num_queues; i++) {
+ struct iwl_trans_rxq_dma_data data;
+
+ cmd->data[i].q_num = i + 1;
+ iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
+
+ cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
+ cmd->data[i].urbd_stts_wrptr =
+ cpu_to_le64(data.urbd_stts_wrptr);
+ cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
+ cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP,
+ RFH_QUEUE_CONFIG_CMD),
+ 0, size, cmd);
+}
+
static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
{
struct iwl_dqa_enable_cmd dqa_cmd = {
@@ -301,7 +336,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
@@ -1007,9 +1042,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
/* Init RSS configuration */
- /* TODO - remove 22000 disablement when we have RXQ config API */
- if (iwl_mvm_has_new_rx_api(mvm) &&
- mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ ret = iwl_configure_rxq(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
+ ret);
+ goto error;
+ }
+ }
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 8ba16fc24e3a..b3fd20502abb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -780,6 +780,10 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+ if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
+
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index a6e072234398..b15b0d84bb7e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -36,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -914,7 +915,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
- u8 buf_size = params->buf_size;
+ u16 buf_size = params->buf_size;
bool amsdu = params->amsdu;
u16 timeout = params->timeout;
@@ -1897,6 +1898,194 @@ void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
iwl_mvm_mu_mimo_iface_iterator, notif);
}
+static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
+{
+ u8 byte_num = ppe_pos_bit / 8;
+ u8 bit_num = ppe_pos_bit % 8;
+ u8 residue_bits;
+ u8 res;
+
+ if (bit_num <= 5)
+ return (ppe[byte_num] >> bit_num) &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
+
+ /*
+ * If bit_num > 5, we have to combine bits with next byte.
+ * Calculate how many bits we need to take from current byte (called
+ * here "residue_bits"), and add them to bits from next byte.
+ */
+
+ residue_bits = 8 - bit_num;
+
+ res = (ppe[byte_num + 1] &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
+ residue_bits;
+ res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
+
+ return res;
+}
+
+static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, u8 sta_id)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
+ .sta_id = sta_id,
+ .tid_limit = IWL_MAX_TID_COUNT,
+ .bss_color = vif->bss_conf.bss_color,
+ .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
+ .frame_time_rts_th =
+ cpu_to_le16(vif->bss_conf.frame_time_rts_th),
+ };
+ struct ieee80211_sta *sta;
+ u32 flags;
+ int i;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
+ if (IS_ERR(sta)) {
+ rcu_read_unlock();
+ WARN(1, "Can't find STA to configure HE\n");
+ return;
+ }
+
+ if (!sta->he_cap.has_he) {
+ rcu_read_unlock();
+ return;
+ }
+
+ flags = 0;
+
+ /* HTC flags */
+ if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
+ IEEE80211_HE_MAC_CAP0_HTC_HE)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
+ if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
+ (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
+ u8 link_adap =
+ ((sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
+ (sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
+
+ if (link_adap == 2)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
+ else if (link_adap == 3)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
+ }
+ if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
+
+ /* If PPE Thresholds exist, parse them into a FW-familiar format */
+ if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ u8 nss = (sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK) + 1;
+ u8 ru_index_bitmap =
+ (sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+ u8 *ppe = &sta->he_cap.ppe_thres[0];
+ u8 ppe_pos_bit = 7; /* Starting after PPE header */
+
+ /*
+ * FW currently supports only nss == MAX_HE_SUPP_NSS
+ *
+ * If nss > MAX: we can ignore values we don't support
+ * If nss < MAX: we can set zeros in other streams
+ */
+ if (nss > MAX_HE_SUPP_NSS) {
+ IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
+ nss = MAX_HE_SUPP_NSS;
+ }
+
+ for (i = 0; i < nss; i++) {
+ u8 ru_index_tmp = ru_index_bitmap << 1;
+ u8 bw;
+
+ for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
+ ru_index_tmp >>= 1;
+ if (!(ru_index_tmp & 1))
+ continue;
+
+ sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
+ iwl_mvm_he_get_ppe_val(ppe,
+ ppe_pos_bit);
+ ppe_pos_bit +=
+ IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
+ iwl_mvm_he_get_ppe_val(ppe,
+ ppe_pos_bit);
+ ppe_pos_bit +=
+ IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ }
+ }
+
+ flags |= STA_CTXT_HE_PACKET_EXT;
+ }
+ rcu_read_unlock();
+
+ /* Mark MU EDCA as enabled, unless none detected on some AC */
+ flags |= STA_CTXT_HE_MU_EDCA_CW;
+ for (i = 0; i < AC_NUM; i++) {
+ struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
+ &mvmvif->queue_params[i].mu_edca_param_rec;
+
+ if (!mvmvif->queue_params[i].mu_edca) {
+ flags &= ~STA_CTXT_HE_MU_EDCA_CW;
+ break;
+ }
+
+ sta_ctxt_cmd.trig_based_txf[i].cwmin =
+ cpu_to_le16(mu_edca->ecw_min_max & 0xf);
+ sta_ctxt_cmd.trig_based_txf[i].cwmax =
+ cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
+ sta_ctxt_cmd.trig_based_txf[i].aifsn =
+ cpu_to_le16(mu_edca->aifsn);
+ sta_ctxt_cmd.trig_based_txf[i].mu_time =
+ cpu_to_le16(mu_edca->mu_edca_timer);
+ }
+
+ if (vif->bss_conf.multi_sta_back_32bit)
+ flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
+
+ if (vif->bss_conf.ack_enabled)
+ flags |= STA_CTXT_HE_ACK_ENABLED;
+
+ if (vif->bss_conf.uora_exists) {
+ flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
+
+ sta_ctxt_cmd.rand_alloc_ecwmin =
+ vif->bss_conf.uora_ocw_range & 0x7;
+ sta_ctxt_cmd.rand_alloc_ecwmax =
+ (vif->bss_conf.uora_ocw_range >> 3) & 0x7;
+ }
+
+ /* TODO: support Multi BSSID IE */
+
+ sta_ctxt_cmd.flags = cpu_to_le32(flags);
+
+ if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
+ DATA_PATH_GROUP, 0),
+ 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
+ IWL_ERR(mvm, "Failed to config FW to work HE!\n");
+}
+
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -1910,8 +2099,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* beacon interval, which was not known when the station interface was
* added.
*/
- if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
+ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+ if (vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
+
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+ }
/*
* If we're not associated yet, take the (new) BSSID before associating
@@ -4216,7 +4410,7 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
if (mvmsta->avg_energy) {
sinfo->signal_avg = mvmsta->avg_energy;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
if (!fw_has_capa(&mvm->fw->ucode_capa,
@@ -4240,11 +4434,11 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
mvmvif->beacon_stats.accu_num_beacons;
- sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
if (mvmvif->beacon_stats.avg_signal) {
/* firmware only reports a value after RXing a few beacons */
sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
- sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
}
unlock:
mutex_unlock(&mvm->mutex);
@@ -4364,13 +4558,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
- /* TODO - remove this when we have RXQ config API */
- if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) {
- qmask = BIT(0);
- if (notif->sync)
- atomic_set(&mvm->queue_sync_counter, 1);
- }
-
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 6a4ba160c59e..b3987a0a7018 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -654,7 +654,7 @@ struct iwl_mvm_tcm {
struct iwl_mvm_reorder_buffer {
u16 head_sn;
u16 num_stored;
- u8 buf_size;
+ u16 buf_size;
int queue;
u16 last_amsdu;
u8 last_sub_index;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ff1e518096c5..0e26619fb330 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -448,6 +448,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(DQA_ENABLE_CMD),
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(STA_HE_CTXT_CMD),
+ HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
@@ -620,7 +622,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (iwl_mvm_has_new_rx_api(mvm)) {
op_mode->ops = &iwl_mvm_ops_mq;
- trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
+ trans->rx_mpdu_cmd_hdr_size =
+ (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_rx_mpdu_desc) :
+ IWL_RX_DESC_SIZE_V1;
} else {
op_mode->ops = &iwl_mvm_ops;
trans->rx_mpdu_cmd_hdr_size =
@@ -703,11 +709,17 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
/* the hardware splits the A-MSDU */
- if (mvm->cfg->mq_rx_supported)
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ trans_cfg.rx_buf_size = IWL_AMSDU_2K;
+ /* TODO: remove when balanced power mode is fw supported */
+ iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM;
+ } else if (mvm->cfg->mq_rx_supported) {
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+ }
trans->wide_cmd_header = true;
- trans_cfg.bc_table_dword = true;
+ trans_cfg.bc_table_dword =
+ mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
@@ -738,6 +750,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
sizeof(trans->dbg_conf_tlv));
trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
+ trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len;
@@ -1003,10 +1016,8 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
schedule_work(&mvm->async_handlers_wk);
- return;
+ break;
}
-
- iwl_fwrt_handle_notification(&mvm->fwrt, rxb);
}
static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index b8b2b819e8e7..8169d1450b3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -183,6 +183,43 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
}
}
+static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
+{
+ switch (mcs) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+ case IEEE80211_HE_MCS_NOT_SUPPORTED:
+ return 0;
+ }
+
+ WARN(1, "invalid HE MCS %d\n", mcs);
+ return 0;
+}
+
+static void
+rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
+ const struct ieee80211_sta_he_cap *he_cap,
+ struct iwl_tlc_config_cmd *cmd)
+{
+ u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
+ u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
+ int i;
+
+ for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
+ u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+ u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+
+ cmd->ht_rates[i][0] =
+ cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
+ cmd->ht_rates[i][1] =
+ cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
+ }
+}
+
static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd *cmd)
@@ -192,6 +229,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
unsigned long supp; /* must be unsigned long for for_each_set_bit */
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
/* non HT rates */
supp = 0;
@@ -202,7 +240,11 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
cmd->non_ht_rates = cpu_to_le16(supp);
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
- if (vht_cap && vht_cap->vht_supported) {
+ /* HT/VHT rates */
+ if (he_cap && he_cap->has_he) {
+ cmd->mode = IWL_TLC_MNG_MODE_HE;
+ rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
+ } else if (vht_cap && vht_cap->vht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_VHT;
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
} else if (ht_cap && ht_cap->ht_supported) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 642da10b0b7f..30cfd7d50bc9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -363,7 +363,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
idx += 1;
if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
return idx;
- } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK ||
+ rate_n_flags & RATE_MCS_HE_MSK) {
idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
idx += IWL_RATE_MCS_0_INDEX;
@@ -372,6 +373,9 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
idx++;
if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
return idx;
+ if ((rate_n_flags & RATE_MCS_HE_MSK) &&
+ (idx <= IWL_LAST_HE_RATE))
+ return idx;
} else {
/* legacy rate format, search for match in table */
@@ -516,6 +520,8 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type)
[LQ_HT_MIMO2] = "HT MIMO",
[LQ_VHT_SISO] = "VHT SISO",
[LQ_VHT_MIMO2] = "VHT MIMO",
+ [LQ_HE_SISO] = "HE SISO",
+ [LQ_HE_MIMO2] = "HE MIMO",
};
if (type < LQ_NONE || type >= LQ_MAX)
@@ -900,7 +906,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
/* Legacy */
if (!(ucode_rate & RATE_MCS_HT_MSK) &&
- !(ucode_rate & RATE_MCS_VHT_MSK)) {
+ !(ucode_rate & RATE_MCS_VHT_MSK) &&
+ !(ucode_rate & RATE_MCS_HE_MSK)) {
if (num_of_ant == 1) {
if (band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
@@ -911,7 +918,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
return 0;
}
- /* HT or VHT */
+ /* HT, VHT or HE */
if (ucode_rate & RATE_MCS_SGI_MSK)
rate->sgi = true;
if (ucode_rate & RATE_MCS_LDPC_MSK)
@@ -953,10 +960,24 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
} else {
WARN_ON_ONCE(1);
}
+ } else if (ucode_rate & RATE_MCS_HE_MSK) {
+ nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ rate->type = LQ_HE_SISO;
+ WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+ "stbc %d bfer %d", rate->stbc, rate->bfer);
+ } else if (nss == 2) {
+ rate->type = LQ_HE_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
+ } else {
+ WARN_ON_ONCE(1);
+ }
}
WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
- !is_vht(rate));
+ !is_he(rate) && !is_vht(rate));
return 0;
}
@@ -3606,7 +3627,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
if (!(rate & RATE_MCS_HT_MSK) &&
- !(rate & RATE_MCS_VHT_MSK)) {
+ !(rate & RATE_MCS_VHT_MSK) &&
+ !(rate & RATE_MCS_HE_MSK)) {
int index = iwl_hwrate_to_plcp_idx(rate);
return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
@@ -3625,6 +3647,11 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
mcs = rate & RATE_HT_MCS_INDEX_MSK;
nss = ((rate & RATE_HT_MCS_NSS_MSK)
>> RATE_HT_MCS_NSS_POS) + 1;
+ } else if (rate & RATE_MCS_HE_MSK) {
+ type = "HE";
+ mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+ >> RATE_VHT_MCS_NSS_POS) + 1;
} else {
type = "Unknown"; /* shouldn't happen */
}
@@ -3886,6 +3913,8 @@ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
[IWL_RATE_MCS_7_INDEX] = "MCS7",
[IWL_RATE_MCS_8_INDEX] = "MCS8",
[IWL_RATE_MCS_9_INDEX] = "MCS9",
+ [IWL_RATE_MCS_10_INDEX] = "MCS10",
+ [IWL_RATE_MCS_11_INDEX] = "MCS11",
};
char *buff, *pos, *endpos;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index cffb8c852934..d2cf484e2b73 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -144,8 +144,13 @@ enum {
#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64)
+/*
+ * FIXME - various places in firmware API still use u8,
+ * e.g. LQ command and SCD config command.
+ * This should be 256 instead.
+ */
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255)
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
@@ -162,6 +167,8 @@ enum iwl_table_type {
LQ_HT_MIMO2,
LQ_VHT_SISO, /* VHT types */
LQ_VHT_MIMO2,
+ LQ_HE_SISO, /* HE types */
+ LQ_HE_MIMO2,
LQ_MAX,
};
@@ -183,11 +190,16 @@ struct rs_rate {
#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
-#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type))
-#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type))
+#define is_type_he_siso(type) ((type) == LQ_HE_SISO)
+#define is_type_he_mimo2(type) ((type) == LQ_HE_MIMO2)
+#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type) || \
+ is_type_he_siso(type))
+#define is_type_mimo2(type) (is_type_ht_mimo2(type) || \
+ is_type_vht_mimo2(type) || is_type_he_mimo2(type))
#define is_type_mimo(type) (is_type_mimo2(type))
#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
+#define is_type_he(type) (is_type_he_siso(type) || is_type_he_mimo2(type))
#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
@@ -201,6 +213,7 @@ struct rs_rate {
#define is_mimo(rate) is_type_mimo((rate)->type)
#define is_ht(rate) is_type_ht((rate)->type)
#define is_vht(rate) is_type_vht((rate)->type)
+#define is_he(rate) is_type_he((rate)->type)
#define is_a_band(rate) is_type_a_band((rate)->type)
#define is_g_band(rate) is_type_g_band((rate)->type)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 129c4c09648d..b53148f972a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -196,22 +198,31 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct sk_buff *skb, int queue,
struct ieee80211_sta *sta)
{
- if (iwl_mvm_check_pn(mvm, skb, queue, sta))
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+
+ if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
kfree_skb(skb);
- else
+ } else {
+ unsigned int radiotap_len = 0;
+
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
+ radiotap_len += sizeof(struct ieee80211_radiotap_he);
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
+ radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
+ __skb_push(skb, radiotap_len);
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+ }
}
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
- struct iwl_rx_mpdu_desc *desc,
- struct ieee80211_rx_status *rx_status)
+ struct ieee80211_rx_status *rx_status,
+ u32 rate_n_flags, int energy_a,
+ int energy_b)
{
- int energy_a, energy_b, max_energy;
- u32 rate_flags = le32_to_cpu(desc->rate_n_flags);
+ int max_energy;
+ u32 rate_flags = rate_n_flags;
- energy_a = desc->energy_a;
energy_a = energy_a ? -energy_a : S8_MIN;
- energy_b = desc->energy_b;
energy_b = energy_b ? -energy_b : S8_MIN;
max_energy = max(energy_a, energy_b);
@@ -356,7 +367,8 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
tid = IWL_MAX_TID_COUNT;
/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
- sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+ sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
dup_data->last_seq[tid] == hdr->seq_ctrl &&
@@ -850,17 +862,41 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
- struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
+ struct ieee80211_hdr *hdr;
u32 len = le16_to_cpu(desc->mpdu_len);
- u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
+ u32 rate_n_flags, gp2_on_air_rise;
u16 phy_info = le16_to_cpu(desc->phy_info);
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
- u8 crypt_len = 0;
+ u8 crypt_len = 0, channel, energy_a, energy_b;
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ u32 he_type = 0xffffffff;
+ /* this is invalid e.g. because puncture type doesn't allow 0b11 */
+#define HE_PHY_DATA_INVAL ((u64)-1)
+ u64 he_phy_data = HE_PHY_DATA_INVAL;
+ size_t desc_size;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ channel = desc->v3.channel;
+ gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+ energy_a = desc->v3.energy_a;
+ energy_b = desc->v3.energy_b;
+ desc_size = sizeof(*desc);
+ } else {
+ rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
+ channel = desc->v1.channel;
+ gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
+ energy_a = desc->v1.energy_a;
+ energy_b = desc->v1.energy_b;
+ desc_size = IWL_RX_DESC_SIZE_V1;
+ }
+
+ hdr = (void *)(pkt->data + desc_size);
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
@@ -882,6 +918,51 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
+ if (rate_n_flags & RATE_MCS_HE_MSK) {
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+ };
+ static const struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+ .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
+ };
+ unsigned int radiotap_len = 0;
+
+ he = skb_put_data(skb, &known, sizeof(known));
+ radiotap_len += sizeof(known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+ he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ if (he_type == RATE_MCS_HE_TYPE_MU) {
+ he_mu = skb_put_data(skb, &mu_known,
+ sizeof(mu_known));
+ radiotap_len += sizeof(mu_known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ }
+ }
+
+ /* temporarily hide the radiotap data */
+ __skb_pull(skb, radiotap_len);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
le32_to_cpu(pkt->len_n_flags), queue,
&crypt_len)) {
@@ -904,20 +985,80 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
- rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
+ u64 tsf_on_air_rise;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
+ else
+ tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
+
+ rx_status->mactime = tsf_on_air_rise;
/* TSF as indicated by the firmware is at INA time */
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+ } else if (he_type == RATE_MCS_HE_TYPE_SU) {
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
+ if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
+ he_phy_data))
+ he->data3 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->ampdu_reference = mvm->ampdu_ref;
+ mvm->ampdu_ref++;
+
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+ he_phy_data))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+ } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
}
- rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
- rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
- NL80211_BAND_2GHZ;
- rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
+ rx_status->device_timestamp = gp2_on_air_rise;
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+ rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band);
- iwl_mvm_get_signal_strength(mvm, desc, rx_status);
+ iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
+ energy_b);
/* update aggregation data for monitor sake on default queue */
if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->ampdu_reference = mvm->ampdu_ref;
@@ -925,6 +1066,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (toggle_bit != mvm->ampdu_toggle) {
mvm->ampdu_ref++;
mvm->ampdu_toggle = toggle_bit;
+
+ if (he_phy_data != HE_PHY_DATA_INVAL &&
+ he_type == RATE_MCS_HE_TYPE_MU) {
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+ he_phy_data))
+ rx_status->flag |=
+ RX_FLAG_AMPDU_EOF_BIT;
+ }
}
}
@@ -1033,7 +1183,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
}
- /* Set up the HT phy flags */
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20:
break;
@@ -1048,6 +1197,70 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
break;
}
+ if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+ rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ if (rate_n_flags & RATE_MCS_HE_MSK &&
+ phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
+ he_type == RATE_MCS_HE_TYPE_MU) {
+ /*
+ * Unfortunately, we have to leave the mac80211 data
+ * incorrect for the case that we receive an HE-MU
+ * transmission and *don't* have the he_mu pointer,
+ * i.e. we don't have the phy data (due to the bits
+ * being used for TSF). This shouldn't happen though
+ * as management frames where we need the TSF/timers
+ * are not be transmitted in HE-MU, I think.
+ */
+ u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
+ u8 offs = 0;
+
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+
+ switch (ru) {
+ case 0 ... 36:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+ he->data2 |=
+ le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
+ if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+ } else if (he) {
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+ }
+
if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
rate_n_flags & RATE_MCS_SGI_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
@@ -1072,6 +1285,119 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_BF;
+ } else if (he) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->nss =
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
+
+ rx_status->he_dcm =
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+
+#define CHECK_TYPE(F) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+ CHECK_TYPE(SU);
+ CHECK_TYPE(EXT_SU);
+ CHECK_TYPE(MU);
+ CHECK_TYPE(TRIG);
+
+ he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+ if (rate_n_flags & RATE_MCS_BF_POS)
+ he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+ switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+ RATE_MCS_HE_GI_LTF_POS) {
+ case 0:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case 1:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case 2:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ break;
+ case 3:
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ switch (he_type) {
+ case RATE_MCS_HE_TYPE_SU: {
+ u16 val;
+
+ /* LTF syms correspond to streams */
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ switch (rx_status->nss) {
+ case 1:
+ val = 0;
+ break;
+ case 2:
+ val = 1;
+ break;
+ case 3:
+ case 4:
+ val = 2;
+ break;
+ case 5:
+ case 6:
+ val = 3;
+ break;
+ case 7:
+ case 8:
+ val = 4;
+ break;
+ default:
+ WARN_ONCE(1, "invalid nss: %d\n",
+ rx_status->nss);
+ val = 0;
+ }
+ he->data5 |=
+ le16_encode_bits(val,
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+ }
+ break;
+ case RATE_MCS_HE_TYPE_MU: {
+ u16 val;
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ if (he_phy_data == HE_PHY_DATA_INVAL)
+ break;
+
+ val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
+ he_phy_data);
+
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ he->data5 |=
+ cpu_to_le16(FIELD_PREP(
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
+ val));
+ }
+ break;
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ case RATE_MCS_HE_TYPE_TRIG:
+ /* not supported yet */
+ break;
+ }
} else {
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 9263b9aa8b72..18db1ed92d9b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2184,7 +2184,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
struct iwl_mvm_baid_data *data,
- u16 ssn, u8 buf_size)
+ u16 ssn, u16 buf_size)
{
int i;
@@ -2211,7 +2211,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
}
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
+ int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
@@ -2273,7 +2273,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (start) {
cmd.add_immediate_ba_tid = (u8) tid;
cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
- cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
+ cmd.rx_ba_window = cpu_to_le16(buf_size);
} else {
cmd.remove_immediate_ba_tid = (u8) tid;
}
@@ -2559,7 +2559,7 @@ out:
}
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u8 buf_size,
+ struct ieee80211_sta *sta, u16 tid, u16 buf_size,
bool amsdu)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 1c43ea8dd8cc..0fc211108149 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -412,7 +412,7 @@ struct iwl_mvm_sta {
u32 tfd_queue_msk;
u32 mac_id_n_color;
u16 tid_disable_agg;
- u8 max_agg_bufsize;
+ u16 max_agg_bufsize;
enum iwl_sta_type sta_type;
enum ieee80211_sta_state sta_state;
bool bt_reduced_txpower;
@@ -518,11 +518,11 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start, u8 buf_size, u16 timeout);
+ int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u8 buf_size,
+ struct ieee80211_sta *sta, u16 tid, u16 buf_size,
bool amsdu);
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index cf2591f2ac23..ff193dca2020 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -484,13 +484,15 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
/* Make sure we zero enough of dev_cmd */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
- struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
u16 offload_assist = 0;
+ u32 rate_n_flags = 0;
+ u16 flags = 0;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
@@ -507,25 +509,43 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
- cmd->offload_assist |= cpu_to_le16(offload_assist);
+ if (!info->control.hw_key)
+ flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
- /* Total # bytes to be transmitted */
- cmd->len = cpu_to_le16((u16)skb->len);
+ /* For data packets rate info comes from the fw */
+ if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
+ flags |= IWL_TX_FLAGS_CMD_RATE;
+ rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
+ }
- /* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) {
+ struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
- if (!info->control.hw_key)
- cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
+ cmd->offload_assist |= cpu_to_le32(offload_assist);
- /* For data packets rate info comes from the fw */
- if (ieee80211_is_data(hdr->frame_control) && sta)
- goto out;
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
- cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
- cmd->rate_n_flags =
- cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
+ /* Copy MAC header from skb into command buffer */
+ memcpy(cmd->hdr, hdr, hdrlen);
+ cmd->flags = cpu_to_le16(flags);
+ cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ } else {
+ struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+
+ cmd->offload_assist |= cpu_to_le16(offload_assist);
+
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(cmd->hdr, hdr, hdrlen);
+
+ cmd->flags = cpu_to_le32(flags);
+ cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ }
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
new file mode 100644
index 000000000000..2146fda8da2f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -0,0 +1,207 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+#include "iwl-context-info-gen3.h"
+#include "internal.h"
+#include "iwl-prph.h"
+
+int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ const struct fw_img *fw)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_context_info_gen3 *ctxt_info_gen3;
+ struct iwl_prph_scratch *prph_scratch;
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+ struct iwl_prph_info *prph_info;
+ void *iml_img;
+ u32 control_flags = 0;
+ int ret;
+
+ /* Allocate prph scratch */
+ prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
+ &trans_pcie->prph_scratch_dma_addr,
+ GFP_KERNEL);
+ if (!prph_scratch)
+ return -ENOMEM;
+
+ prph_sc_ctrl = &prph_scratch->ctrl_cfg;
+
+ prph_sc_ctrl->version.version = 0;
+ prph_sc_ctrl->version.mac_id =
+ cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
+
+ control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
+ IWL_PRPH_SCRATCH_MTR_MODE |
+ (IWL_PRPH_MTR_FORMAT_256B &
+ IWL_PRPH_SCRATCH_MTR_FORMAT) |
+ IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
+ IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+ prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+
+ /* initialize RX default queue */
+ prph_sc_ctrl->rbd_cfg.free_rbd_addr =
+ cpu_to_le64(trans_pcie->rxq->bd_dma);
+
+ /* Configure debug, for integration */
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+ prph_sc_ctrl->hwm_cfg.hwm_base_addr =
+ cpu_to_le64(trans_pcie->fw_mon_phys);
+ prph_sc_ctrl->hwm_cfg.hwm_size =
+ cpu_to_le32(trans_pcie->fw_mon_size);
+
+ /* allocate ucode sections in dram and set addresses */
+ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
+ if (ret) {
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_scratch),
+ prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ return ret;
+ }
+
+ /* Allocate prph information
+ * currently we don't assign to the prph info anything, but it would get
+ * assigned later */
+ prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
+ &trans_pcie->prph_info_dma_addr,
+ GFP_KERNEL);
+ if (!prph_info)
+ return -ENOMEM;
+
+ /* Allocate context info */
+ ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
+ sizeof(*ctxt_info_gen3),
+ &trans_pcie->ctxt_info_dma_addr,
+ GFP_KERNEL);
+ if (!ctxt_info_gen3)
+ return -ENOMEM;
+
+ ctxt_info_gen3->prph_info_base_addr =
+ cpu_to_le64(trans_pcie->prph_info_dma_addr);
+ ctxt_info_gen3->prph_scratch_base_addr =
+ cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
+ ctxt_info_gen3->prph_scratch_size =
+ cpu_to_le32(sizeof(*prph_scratch));
+ ctxt_info_gen3->cr_head_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+ ctxt_info_gen3->tr_tail_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
+ ctxt_info_gen3->cr_tail_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
+ ctxt_info_gen3->cr_idx_arr_size =
+ cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
+ ctxt_info_gen3->tr_idx_arr_size =
+ cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
+ ctxt_info_gen3->mtr_base_addr =
+ cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+ ctxt_info_gen3->mcr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+ ctxt_info_gen3->mtr_size =
+ cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS));
+ ctxt_info_gen3->mcr_size =
+ cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
+
+ trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
+ trans_pcie->prph_info = prph_info;
+ trans_pcie->prph_scratch = prph_scratch;
+
+ /* Allocate IML */
+ iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
+ &trans_pcie->iml_dma_addr, GFP_KERNEL);
+ if (!iml_img)
+ return -ENOMEM;
+
+ memcpy(iml_img, trans->iml, trans->iml_len);
+
+ iwl_enable_interrupts(trans);
+
+ /* kick FW self load */
+ iwl_write64(trans, CSR_CTXT_INFO_ADDR,
+ trans_pcie->ctxt_info_dma_addr);
+ iwl_write64(trans, CSR_IML_DATA_ADDR,
+ trans_pcie->iml_dma_addr);
+ iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
+ iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
+
+ return 0;
+}
+
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->ctxt_info_gen3)
+ return;
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+ trans_pcie->ctxt_info_gen3,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_dma_addr = 0;
+ trans_pcie->ctxt_info_gen3 = NULL;
+
+ iwl_pcie_ctxt_info_free_fw_img(trans);
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
+ trans_pcie->prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ trans_pcie->prph_scratch_dma_addr = 0;
+ trans_pcie->prph_scratch = NULL;
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
+ trans_pcie->prph_info,
+ trans_pcie->prph_info_dma_addr);
+ trans_pcie->prph_info_dma_addr = 0;
+ trans_pcie->prph_info = NULL;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 3fc4343581ee..b2cd7ef5fc3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,57 +57,6 @@
#include "internal.h"
#include "iwl-prph.h"
-static int iwl_pcie_get_num_sections(const struct fw_img *fw,
- int start)
-{
- int i = 0;
-
- while (start < fw->num_sec &&
- fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
- fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
- start++;
- i++;
- }
-
- return i;
-}
-
-static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
- const struct fw_desc *sec,
- struct iwl_dram_data *dram)
-{
- dram->block = dma_alloc_coherent(trans->dev, sec->len,
- &dram->physical,
- GFP_KERNEL);
- if (!dram->block)
- return -ENOMEM;
-
- dram->size = sec->len;
- memcpy(dram->block, sec->data, sec->len);
-
- return 0;
-}
-
-static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
- int i;
-
- if (!dram->fw) {
- WARN_ON(dram->fw_cnt);
- return;
- }
-
- for (i = 0; i < dram->fw_cnt; i++)
- dma_free_coherent(trans->dev, dram->fw[i].size,
- dram->fw[i].block, dram->fw[i].physical);
-
- kfree(dram->fw);
- dram->fw_cnt = 0;
- dram->fw = NULL;
-}
-
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -128,13 +79,12 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
dram->paging = NULL;
}
-static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans,
- const struct fw_img *fw,
- struct iwl_context_info *ctxt_info)
+int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
+ const struct fw_img *fw,
+ struct iwl_context_info_dram *ctxt_dram)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
- struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
int i, ret, lmac_cnt, umac_cnt, paging_cnt;
if (WARN(dram->paging,
@@ -247,7 +197,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
/* allocate ucode sections in dram and set addresses */
- ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
+ ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
if (ret) {
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
ctxt_info, trans_pcie->ctxt_info_dma_addr);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 38234bda9017..b150da4c6721 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -72,7 +72,6 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
-#include <linux/pci-aspm.h>
#include <linux/acpi.h>
#include "fw/acpi.h"
@@ -545,6 +544,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
@@ -554,6 +556,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
@@ -578,6 +581,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
@@ -604,6 +609,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -630,6 +637,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
@@ -656,6 +665,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -682,6 +693,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -708,6 +721,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -743,6 +758,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
@@ -771,6 +788,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -797,6 +816,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -806,19 +827,32 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
/* 22000 Series */
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
#endif /* CONFIG_IWLMVM */
@@ -981,6 +1015,10 @@ static int iwl_pci_resume(struct device *device)
if (!trans->op_mode)
return 0;
+ /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return 0;
+
/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
iwl_pcie_conf_msix_hw(trans_pcie);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 45ea32796cda..b63d44b7cd7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -3,6 +3,7 @@
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -17,8 +18,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * this program.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -45,6 +45,7 @@
#include "iwl-debug.h"
#include "iwl-io.h"
#include "iwl-op-mode.h"
+#include "iwl-drv.h"
/* We need 2 entries for the TX command and header, and another one might
* be needed for potential data in the SKB's head. The remaining ones can
@@ -59,6 +60,7 @@
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
#define RX_PENDING_WATERMARK 16
+#define FIRST_RX_QUEUE 512
struct iwl_host_cmd;
@@ -71,6 +73,7 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
+ * @size: size used from the buffer
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
@@ -78,6 +81,7 @@ struct iwl_rx_mem_buffer {
u16 vid;
bool invalid;
struct list_head list;
+ u32 size;
};
/**
@@ -98,14 +102,121 @@ struct isr_statistics {
u32 unhandled;
};
+#define IWL_CD_STTS_OPTIMIZED_POS 0
+#define IWL_CD_STTS_OPTIMIZED_MSK 0x01
+#define IWL_CD_STTS_TRANSFER_STATUS_POS 1
+#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E
+#define IWL_CD_STTS_WIFI_STATUS_POS 4
+#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
+
+/**
+ * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
+ * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
+ * In sniffer mode, when split is used, set in last CD completion. (RX)
+ * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
+ * all CD completion. (RX)
+ * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
+ */
+enum iwl_completion_desc_transfer_status {
+ IWL_CD_STTS_UNUSED,
+ IWL_CD_STTS_UNUSED_2,
+ IWL_CD_STTS_END_TRANSFER,
+ IWL_CD_STTS_OVERFLOW,
+ IWL_CD_STTS_ABORTED,
+ IWL_CD_STTS_ERROR,
+};
+
+/**
+ * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
+ * @IWL_CD_STTS_VALID: the packet is valid (RX)
+ * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
+ * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
+ * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
+ * @IWL_CD_STTS_DUP: duplicate packet (RX)
+ * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
+ * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
+ * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
+ * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
+ * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
+ * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
+ * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
+ * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
+ * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
+ * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
+ */
+enum iwl_completion_desc_wifi_status {
+ IWL_CD_STTS_VALID,
+ IWL_CD_STTS_FCS_ERR,
+ IWL_CD_STTS_SEC_KEY_ERR,
+ IWL_CD_STTS_DECRYPTION_ERR,
+ IWL_CD_STTS_DUP,
+ IWL_CD_STTS_ICV_MIC_ERR,
+ IWL_CD_STTS_INTERNAL_SNAP_ERR,
+ IWL_CD_STTS_SEC_PORT_FAIL,
+ IWL_CD_STTS_BA_OLD_SN,
+ IWL_CD_STTS_QOS_NULL,
+ IWL_CD_STTS_MAC_HDR_ERR,
+ IWL_CD_STTS_MAX_RETRANS,
+ IWL_CD_STTS_EX_LIFETIME,
+ IWL_CD_STTS_NOT_USED,
+ IWL_CD_STTS_REPLAY_ERR,
+};
+
+#define IWL_RX_TD_TYPE_MSK 0xff000000
+#define IWL_RX_TD_SIZE_MSK 0x00ffffff
+#define IWL_RX_TD_SIZE_2K BIT(11)
+#define IWL_RX_TD_TYPE 0
+
+/**
+ * struct iwl_rx_transfer_desc - transfer descriptor
+ * @type_n_size: buffer type (bit 0: external buff valid,
+ * bit 1: optional footer valid, bit 2-7: reserved)
+ * and buffer size
+ * @addr: ptr to free buffer start address
+ * @rbid: unique tag of the buffer
+ * @reserved: reserved
+ */
+struct iwl_rx_transfer_desc {
+ __le32 type_n_size;
+ __le64 addr;
+ __le16 rbid;
+ __le16 reserved;
+} __packed;
+
+#define IWL_RX_CD_SIZE 0xffffff00
+
+/**
+ * struct iwl_rx_completion_desc - completion descriptor
+ * @type: buffer type (bit 0: external buff valid,
+ * bit 1: optional footer valid, bit 2-7: reserved)
+ * @status: status of the completion
+ * @reserved1: reserved
+ * @rbid: unique tag of the received buffer
+ * @size: buffer size, masked by IWL_RX_CD_SIZE
+ * @reserved2: reserved
+ */
+struct iwl_rx_completion_desc {
+ u8 type;
+ u8 status;
+ __le16 reserved1;
+ __le16 rbid;
+ __le32 size;
+ u8 reserved2[22];
+} __packed;
+
/**
* struct iwl_rxq - Rx queue
* @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
+ * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
+ * @tr_tail: driver's pointer to the transmission ring tail buffer
+ * @tr_tail_dma: physical address of the buffer for the transmission ring tail
+ * @cr_tail: driver's pointer to the completion ring tail buffer
+ * @cr_tail_dma: physical address of the buffer for the completion ring tail
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
@@ -125,8 +236,16 @@ struct iwl_rxq {
int id;
void *bd;
dma_addr_t bd_dma;
- __le32 *used_bd;
+ union {
+ void *used_bd;
+ __le32 *bd_32;
+ struct iwl_rx_completion_desc *cd;
+ };
dma_addr_t used_bd_dma;
+ __le16 *tr_tail;
+ dma_addr_t tr_tail_dma;
+ __le16 *cr_tail;
+ dma_addr_t cr_tail_dma;
u32 read;
u32 write;
u32 free_count;
@@ -136,7 +255,7 @@ struct iwl_rxq {
struct list_head rx_free;
struct list_head rx_used;
bool need_update;
- struct iwl_rb_status *rb_stts;
+ void *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
struct napi_struct napi;
@@ -175,18 +294,36 @@ struct iwl_dma_ptr {
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
*/
-static inline int iwl_queue_inc_wrap(int index)
+static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
{
- return ++index & (TFD_QUEUE_SIZE_MAX - 1);
+ return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+}
+
+/**
+ * iwl_get_closed_rb_stts - get closed rb stts from different structs
+ * @rxq - the rxq to get the rb stts from
+ */
+static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ __le16 *rb_stts = rxq->rb_stts;
+
+ return READ_ONCE(*rb_stts);
+ } else {
+ struct iwl_rb_status *rb_stts = rxq->rb_stts;
+
+ return READ_ONCE(rb_stts->closed_rb_num);
+ }
}
/**
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index
*/
-static inline int iwl_queue_dec_wrap(int index)
+static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
{
- return --index & (TFD_QUEUE_SIZE_MAX - 1);
+ return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
}
struct iwl_cmd_meta {
@@ -315,6 +452,18 @@ enum iwl_shared_irq_flags {
};
/**
+ * enum iwl_image_response_code - image response values
+ * @IWL_IMAGE_RESP_DEF: the default value of the register
+ * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
+ * @IWL_IMAGE_RESP_FAIL: iml reading failed
+ */
+enum iwl_image_response_code {
+ IWL_IMAGE_RESP_DEF = 0,
+ IWL_IMAGE_RESP_SUCCESS = 1,
+ IWL_IMAGE_RESP_FAIL = 2,
+};
+
+/**
* struct iwl_dram_data
* @physical: page phy pointer
* @block: pointer to the allocated block/page
@@ -347,6 +496,12 @@ struct iwl_self_init_dram {
* @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
* @ctxt_info: context information for FW self init
+ * @ctxt_info_gen3: context information for gen3 devices
+ * @prph_info: prph info for self init
+ * @prph_scratch: prph scratch for self init
+ * @ctxt_info_dma_addr: dma addr of context information
+ * @prph_info_dma_addr: dma addr of prph info
+ * @prph_scratch_dma_addr: dma addr of prph scratch
* @ctxt_info_dma_addr: dma addr of context information
* @init_dram: DRAM data of firmware image (including paging).
* Context information addresses will be taken from here.
@@ -391,8 +546,16 @@ struct iwl_trans_pcie {
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
struct iwl_rb_allocator rba;
- struct iwl_context_info *ctxt_info;
+ union {
+ struct iwl_context_info *ctxt_info;
+ struct iwl_context_info_gen3 *ctxt_info_gen3;
+ };
+ struct iwl_prph_info *prph_info;
+ struct iwl_prph_scratch *prph_scratch;
dma_addr_t ctxt_info_dma_addr;
+ dma_addr_t prph_info_dma_addr;
+ dma_addr_t prph_scratch_dma_addr;
+ dma_addr_t iml_dma_addr;
struct iwl_self_init_dram init_dram;
struct iwl_trans *trans;
@@ -477,6 +640,20 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
return (void *)trans->trans_specific;
}
+static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
+ struct msix_entry *entry)
+{
+ /*
+ * Before sending the interrupt the HW disables it to prevent
+ * a nested interrupt. This is done by writing 1 to the corresponding
+ * bit in the mask register. After handling the interrupt, it should be
+ * re-enabled by clearing this bit. This register is defined as
+ * write 1 clear (W1C) register, meaning that it's being clear
+ * by writing 1 to the bit.
+ */
+ iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
+}
+
static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
{
@@ -504,6 +681,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
+int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ struct iwl_rxq *rxq);
/*****************************************************
* ICT - interrupt handling
@@ -588,6 +770,60 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
+#define IWL_NUM_OF_COMPLETION_RINGS 31
+#define IWL_NUM_OF_TRANSFER_RINGS 527
+
+static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
+ int start)
+{
+ int i = 0;
+
+ while (start < fw->num_sec &&
+ fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
+ fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
+ start++;
+ i++;
+ }
+
+ return i;
+}
+
+static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const struct fw_desc *sec,
+ struct iwl_dram_data *dram)
+{
+ dram->block = dma_alloc_coherent(trans->dev, sec->len,
+ &dram->physical,
+ GFP_KERNEL);
+ if (!dram->block)
+ return -ENOMEM;
+
+ dram->size = sec->len;
+ memcpy(dram->block, sec->data, sec->len);
+
+ return 0;
+}
+
+static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ int i;
+
+ if (!dram->fw) {
+ WARN_ON(dram->fw_cnt);
+ return;
+ }
+
+ for (i = 0; i < dram->fw_cnt; i++)
+ dma_free_coherent(trans->dev, dram->fw[i].size,
+ dram->fw[i].block, dram->fw[i].physical);
+
+ kfree(dram->fw);
+ dram->fw_cnt = 0;
+ dram->fw = NULL;
+}
+
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -660,7 +896,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
-static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
+static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
@@ -676,6 +912,29 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
return txq->tfds + trans_pcie->tfd_size * idx;
}
+static inline const char *queue_name(struct device *dev,
+ struct iwl_trans_pcie *trans_p, int i)
+{
+ if (trans_p->shared_vec_mask) {
+ int vec = trans_p->shared_vec_mask &
+ IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+
+ if (i == 0)
+ return DRV_NAME ": shared IRQ";
+
+ return devm_kasprintf(dev, GFP_KERNEL,
+ DRV_NAME ": queue %d", i + vec);
+ }
+ if (i == 0)
+ return DRV_NAME ": default queue";
+
+ if (i == trans_p->alloc_vecs - 1)
+ return DRV_NAME ": exception";
+
+ return devm_kasprintf(dev, GFP_KERNEL,
+ DRV_NAME ": queue %d", i);
+}
+
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -730,9 +989,13 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{
- return q->write_ptr >= q->read_ptr ?
- (i >= q->read_ptr && i < q->write_ptr) :
- !(i < q->read_ptr && i >= q->write_ptr);
+ int index = iwl_pcie_get_cmd_index(q, i);
+ int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
+ int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
+
+ return w >= r ?
+ (index >= r && index < w) :
+ !(index < r && index >= w);
}
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
@@ -801,7 +1064,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
bool was_in_rfkill);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-int iwl_queue_space(const struct iwl_txq *q);
+int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
@@ -818,6 +1081,9 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
#endif
+/* common functions that are used by gen3 transport */
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
+
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index d15f5ba2dc77..d017aa2a0a8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -18,8 +18,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * this program.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -37,6 +36,7 @@
#include "iwl-io.h"
#include "internal.h"
#include "iwl-op-mode.h"
+#include "iwl-context-info-gen3.h"
/******************************************************************************
*
@@ -167,7 +167,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
- if (trans->cfg->mq_rx_supported) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ /* TODO: remove this for 22560 once fw does it */
+ iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
+ return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
+ RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+ } else if (trans->cfg->mq_rx_supported) {
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -209,7 +214,11 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->cfg->mq_rx_supported)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ iwl_write32(trans, HBUS_TARG_WRPTR,
+ (rxq->write_actual |
+ ((FIRST_RX_QUEUE + rxq->id) << 16)));
+ else if (trans->cfg->mq_rx_supported)
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
else
@@ -233,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
}
}
+static void iwl_pcie_restock_bd(struct iwl_trans *trans,
+ struct iwl_rxq *rxq,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ struct iwl_rx_transfer_desc *bd = rxq->bd;
+
+ bd[rxq->write].type_n_size =
+ cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
+ ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
+ bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
+ bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
+ } else {
+ __le64 *bd = rxq->bd;
+
+ bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+ }
+}
+
/*
* iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
*/
@@ -254,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
spin_lock(&rxq->lock);
while (rxq->free_count) {
- __le64 *bd = (__le64 *)rxq->bd;
-
/* Get next free Rx buffer, remove from free list */
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
list);
@@ -264,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
/* 12 first bits are expected to be empty */
WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
/* Point to Rx buffer via next RBD in circular buffer */
- bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+ iwl_pcie_restock_bd(trans, rxq, rxb);
rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
rxq->free_count--;
}
@@ -391,8 +417,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
- struct iwl_rxq *rxq)
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
@@ -448,7 +474,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
}
}
-static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
@@ -608,89 +634,174 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans);
}
-static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- struct device *dev = trans->dev;
- int i;
- int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
- sizeof(__le32);
+ struct iwl_rx_transfer_desc *rx_td;
- if (WARN_ON(trans_pcie->rxq))
- return -EINVAL;
+ if (use_rx_td)
+ return sizeof(*rx_td);
+ else
+ return trans->cfg->mq_rx_supported ? sizeof(__le64) :
+ sizeof(__le32);
+}
- trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
- GFP_KERNEL);
- if (!trans_pcie->rxq)
- return -EINVAL;
+static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct device *dev = trans->dev;
+ bool use_rx_td = (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560);
+ int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+
+ if (rxq->bd)
+ dma_free_coherent(trans->dev,
+ free_size * rxq->queue_size,
+ rxq->bd, rxq->bd_dma);
+ rxq->bd_dma = 0;
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(trans->dev,
+ use_rx_td ? sizeof(__le16) :
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->rb_stts_dma = 0;
+ rxq->rb_stts = NULL;
+
+ if (rxq->used_bd)
+ dma_free_coherent(trans->dev,
+ (use_rx_td ? sizeof(*rxq->cd) :
+ sizeof(__le32)) * rxq->queue_size,
+ rxq->used_bd, rxq->used_bd_dma);
+ rxq->used_bd_dma = 0;
+ rxq->used_bd = NULL;
+
+ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ return;
- spin_lock_init(&rba->lock);
+ if (rxq->tr_tail)
+ dma_free_coherent(dev, sizeof(__le16),
+ rxq->tr_tail, rxq->tr_tail_dma);
+ rxq->tr_tail_dma = 0;
+ rxq->tr_tail = NULL;
+
+ if (rxq->cr_tail)
+ dma_free_coherent(dev, sizeof(__le16),
+ rxq->cr_tail, rxq->cr_tail_dma);
+ rxq->cr_tail_dma = 0;
+ rxq->cr_tail = NULL;
+}
- for (i = 0; i < trans->num_rx_queues; i++) {
- struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct device *dev = trans->dev;
+ int i;
+ int free_size;
+ bool use_rx_td = (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560);
- spin_lock_init(&rxq->lock);
- if (trans->cfg->mq_rx_supported)
- rxq->queue_size = MQ_RX_TABLE_SIZE;
- else
- rxq->queue_size = RX_QUEUE_SIZE;
+ spin_lock_init(&rxq->lock);
+ if (trans->cfg->mq_rx_supported)
+ rxq->queue_size = MQ_RX_TABLE_SIZE;
+ else
+ rxq->queue_size = RX_QUEUE_SIZE;
- /*
- * Allocate the circular buffer of Read Buffer Descriptors
- * (RBDs)
- */
- rxq->bd = dma_zalloc_coherent(dev,
- free_size * rxq->queue_size,
- &rxq->bd_dma, GFP_KERNEL);
- if (!rxq->bd)
- goto err;
+ free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
- if (trans->cfg->mq_rx_supported) {
- rxq->used_bd = dma_zalloc_coherent(dev,
- sizeof(__le32) *
- rxq->queue_size,
- &rxq->used_bd_dma,
- GFP_KERNEL);
- if (!rxq->used_bd)
- goto err;
- }
+ /*
+ * Allocate the circular buffer of Read Buffer Descriptors
+ * (RBDs)
+ */
+ rxq->bd = dma_zalloc_coherent(dev,
+ free_size * rxq->queue_size,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err;
- /*Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
- &rxq->rb_stts_dma,
+ if (trans->cfg->mq_rx_supported) {
+ rxq->used_bd = dma_zalloc_coherent(dev,
+ (use_rx_td ?
+ sizeof(*rxq->cd) :
+ sizeof(__le32)) *
+ rxq->queue_size,
+ &rxq->used_bd_dma,
GFP_KERNEL);
- if (!rxq->rb_stts)
+ if (!rxq->used_bd)
goto err;
}
+
+ /* Allocate the driver's pointer to receive buffer status */
+ rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
+ sizeof(__le16) :
+ sizeof(struct iwl_rb_status),
+ &rxq->rb_stts_dma,
+ GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err;
+
+ if (!use_rx_td)
+ return 0;
+
+ /* Allocate the driver's pointer to TR tail */
+ rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
+ &rxq->tr_tail_dma,
+ GFP_KERNEL);
+ if (!rxq->tr_tail)
+ goto err;
+
+ /* Allocate the driver's pointer to CR tail */
+ rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
+ &rxq->cr_tail_dma,
+ GFP_KERNEL);
+ if (!rxq->cr_tail)
+ goto err;
+ /*
+ * W/A 22560 device step Z0 must be non zero bug
+ * TODO: remove this when stop supporting Z0
+ */
+ *rxq->cr_tail = cpu_to_le16(500);
+
return 0;
err:
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- if (rxq->bd)
- dma_free_coherent(dev, free_size * rxq->queue_size,
- rxq->bd, rxq->bd_dma);
- rxq->bd_dma = 0;
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(trans->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
-
- if (rxq->used_bd)
- dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
- rxq->used_bd, rxq->used_bd_dma);
- rxq->used_bd_dma = 0;
- rxq->used_bd = NULL;
+ iwl_pcie_free_rxq_dma(trans, rxq);
}
kfree(trans_pcie->rxq);
return -ENOMEM;
}
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i, ret;
+
+ if (WARN_ON(trans_pcie->rxq))
+ return -EINVAL;
+
+ trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
+ GFP_KERNEL);
+ if (!trans_pcie->rxq)
+ return -EINVAL;
+
+ spin_lock_init(&rba->lock);
+
+ for (i = 0; i < trans->num_rx_queues; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -792,6 +903,9 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
int i;
switch (trans_pcie->rx_buf_size) {
+ case IWL_AMSDU_2K:
+ rb_size = RFH_RXF_DMA_RB_SIZE_2K;
+ break;
case IWL_AMSDU_4K:
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
break;
@@ -872,7 +986,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
iwl_pcie_enable_rx_wake(trans, true);
}
-static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
{
lockdep_assert_held(&rxq->lock);
@@ -882,7 +996,7 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
rxq->used_count = 0;
}
-static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
{
WARN_ON(1);
return 0;
@@ -931,7 +1045,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = 0;
rxq->write = 0;
rxq->write_actual = 0;
- memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
+ memset(rxq->rb_stts, 0,
+ (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(__le16) : sizeof(struct iwl_rb_status));
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -1002,8 +1118,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
- sizeof(__le32);
int i;
/*
@@ -1022,27 +1136,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- if (rxq->bd)
- dma_free_coherent(trans->dev,
- free_size * rxq->queue_size,
- rxq->bd, rxq->bd_dma);
- rxq->bd_dma = 0;
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(trans->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- else
- IWL_DEBUG_INFO(trans,
- "Free rxq->rb_stts which is NULL\n");
-
- if (rxq->used_bd)
- dma_free_coherent(trans->dev,
- sizeof(__le32) * rxq->queue_size,
- rxq->used_bd, rxq->used_bd_dma);
- rxq->used_bd_dma = 0;
- rxq->used_bd = NULL;
+ iwl_pcie_free_rxq_dma(trans, rxq);
if (rxq->napi.poll)
netif_napi_del(&rxq->napi);
@@ -1202,6 +1296,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
page_stolen |= rxcb._page_stolen;
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ break;
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
}
@@ -1236,6 +1332,45 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
}
+static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
+ struct iwl_rxq *rxq, int i)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_mem_buffer *rxb;
+ u16 vid;
+
+ if (!trans->cfg->mq_rx_supported) {
+ rxb = rxq->queue[i];
+ rxq->queue[i] = NULL;
+ return rxb;
+ }
+
+ /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
+ else
+ vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
+
+ if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
+ goto out_err;
+
+ rxb = trans_pcie->global_table[vid - 1];
+ if (rxb->invalid)
+ goto out_err;
+
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
+
+ rxb->invalid = true;
+
+ return rxb;
+
+out_err:
+ WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
+ iwl_force_nmi(trans);
+ return NULL;
+}
+
/*
* iwl_pcie_rx_handle - Main entry function for receiving responses from fw
*/
@@ -1250,7 +1385,7 @@ restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */
@@ -1266,30 +1401,9 @@ restart:
if (unlikely(rxq->used_count == rxq->queue_size / 2))
emergency = true;
- if (trans->cfg->mq_rx_supported) {
- /*
- * used_bd is a 32 bit but only 12 are used to retrieve
- * the vid
- */
- u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
-
- if (WARN(!vid ||
- vid > ARRAY_SIZE(trans_pcie->global_table),
- "Invalid rxb index from HW %u\n", (u32)vid)) {
- iwl_force_nmi(trans);
- goto out;
- }
- rxb = trans_pcie->global_table[vid - 1];
- if (WARN(rxb->invalid,
- "Invalid rxb from HW %u\n", (u32)vid)) {
- iwl_force_nmi(trans);
- goto out;
- }
- rxb->invalid = true;
- } else {
- rxb = rxq->queue[i];
- rxq->queue[i] = NULL;
- }
+ rxb = iwl_pcie_get_rxb(trans, rxq, i);
+ if (!rxb)
+ goto out;
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
@@ -1331,6 +1445,9 @@ restart:
out:
/* Backtrack one entry */
rxq->read = i;
+ /* update cr tail with the rxq read pointer */
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ *rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock);
/*
@@ -1362,20 +1479,6 @@ static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
}
-static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
- struct msix_entry *entry)
-{
- /*
- * Before sending the interrupt the HW disables it to prevent
- * a nested interrupt. This is done by writing 1 to the corresponding
- * bit in the mask register. After handling the interrupt, it should be
- * re-enabled by clearing this bit. This register is defined as
- * write 1 clear (W1C) register, meaning that it's being clear
- * by writing 1 to the bit.
- */
- iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
-}
-
/*
* iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
* This interrupt handler should be used with RSS queue only.
@@ -1970,7 +2073,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* Error detected by uCode */
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
IWL_ERR(trans,
"Microcode SW error detected. Restarting 0x%X.\n",
inta_fh);
@@ -1995,8 +2099,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
- /* uCode wakes up after power-down sleep */
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
+ inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
+ /* Reflect IML transfer status */
+ int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
+
+ IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
+ if (res == IWL_IMAGE_RESP_FAIL) {
+ isr_stats->sw++;
+ iwl_pcie_irq_handle_error(trans);
+ }
+ } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ /* uCode wakes up after power-down sleep */
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
iwl_pcie_rxq_check_wrptr(trans);
iwl_pcie_txq_check_wrptrs(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index b8e8dac2895d..2bc67219ed3e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -53,6 +53,7 @@
#include "iwl-trans.h"
#include "iwl-prph.h"
#include "iwl-context-info.h"
+#include "iwl-context-info-gen3.h"
#include "internal.h"
/*
@@ -188,7 +189,10 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
}
iwl_pcie_ctxt_info_free_paging(trans);
- iwl_pcie_ctxt_info_free(trans);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+ iwl_pcie_ctxt_info_gen3_free(trans);
+ else
+ iwl_pcie_ctxt_info_free(trans);
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
@@ -346,7 +350,10 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
- ret = iwl_pcie_ctxt_info_init(trans, fw);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+ ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
+ else
+ ret = iwl_pcie_ctxt_info_init(trans, fw);
if (ret)
goto out;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 7229991ae70d..7d319b6863fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -84,6 +84,7 @@
#include "iwl-scd.h"
#include "iwl-agn-hw.h"
#include "fw/error-dump.h"
+#include "fw/dbg.h"
#include "internal.h"
#include "iwl-fh.h"
@@ -203,7 +204,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
trans_pcie->fw_mon_size = 0;
}
-static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct page *page = NULL;
@@ -1132,21 +1133,44 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
+static struct iwl_causes_list causes_list_v2[] = {
+ {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
+ {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
+ {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
+ {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
+ {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
+ {MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11},
+ {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15},
+ {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
+ {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
+ {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
+ {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
+ {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
+ {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
+ {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
+};
+
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
- int i;
+ int i, arr_size =
+ (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+ ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
/*
* Access all non RX causes and map them to the default irq.
* In case we are missing at least one interrupt vector,
* the first interrupt vector will serve non-RX and FBQ causes.
*/
- for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
- iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
- iwl_clear_bit(trans, causes_list[i].mask_reg,
- causes_list[i].cause_num);
+ for (i = 0; i < arr_size; i++) {
+ struct iwl_causes_list *causes =
+ (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+ causes_list : causes_list_v2;
+
+ iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
+ iwl_clear_bit(trans, causes[i].mask_reg,
+ causes[i].cause_num);
}
}
@@ -1539,18 +1563,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_pcie_enable_rx_wake(trans, true);
- /*
- * Reconfigure IVAR table in case of MSIX or reset ict table in
- * MSI mode since HW reset erased it.
- * Also enables interrupts - none will happen as
- * the device doesn't know we're waking it up, only when
- * the opmode actually tells it after this call.
- */
- iwl_pcie_conf_msix_hw(trans_pcie);
- if (!trans_pcie->msix_enabled)
- iwl_pcie_reset_ict(trans);
- iwl_enable_interrupts(trans);
-
iwl_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->cfg->csr->flag_mac_access_req));
iwl_set_bit(trans, CSR_GP_CNTRL,
@@ -1568,6 +1580,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return ret;
}
+ /*
+ * Reconfigure IVAR table in case of MSIX or reset ict table in
+ * MSI mode since HW reset erased it.
+ * Also enables interrupts - none will happen as
+ * the device doesn't know we're waking it up, only when
+ * the opmode actually tells it after this call.
+ */
+ iwl_pcie_conf_msix_hw(trans_pcie);
+ if (!trans_pcie->msix_enabled)
+ iwl_pcie_reset_ict(trans);
+ iwl_enable_interrupts(trans);
+
iwl_pcie_set_pwr(trans, false);
if (!reset) {
@@ -1685,29 +1709,6 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
}
}
-static const char *queue_name(struct device *dev,
- struct iwl_trans_pcie *trans_p, int i)
-{
- if (trans_p->shared_vec_mask) {
- int vec = trans_p->shared_vec_mask &
- IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
-
- if (i == 0)
- return DRV_NAME ": shared IRQ";
-
- return devm_kasprintf(dev, GFP_KERNEL,
- DRV_NAME ": queue %d", i + vec);
- }
- if (i == 0)
- return DRV_NAME ": default queue";
-
- if (i == trans_p->alloc_vecs - 1)
- return DRV_NAME ": exception";
-
- return devm_kasprintf(dev, GFP_KERNEL,
- DRV_NAME ": queue %d", i);
-}
-
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie)
{
@@ -2236,12 +2237,28 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
jiffies_to_msecs(txq->wd_timeout),
txq->read_ptr, txq->write_ptr,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (TFD_QUEUE_SIZE_MAX - 1),
+ (trans->cfg->base_params->max_tfd_queue_size - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (TFD_QUEUE_SIZE_MAX - 1),
+ (trans->cfg->base_params->max_tfd_queue_size - 1),
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
+static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
+ return -EINVAL;
+
+ data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
+ data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
+ data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
+ data->fr_bd_wid = 0;
+
+ return 0;
+}
+
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2522,10 +2539,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
rxq->free_count);
if (rxq->rb_stts) {
+ u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
+ rxq));
pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: %u\n",
- le16_to_cpu(rxq->rb_stts->closed_rb_num) &
- 0x0FFF);
+ r & 0x0FFF);
} else {
pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: Not Allocated\n");
@@ -2731,7 +2749,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
spin_lock(&rxq->lock);
- r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
for (i = rxq->read, j = 0;
i != r && j < allocated_rb_nums;
@@ -2934,11 +2952,12 @@ static struct iwl_trans_dump_data
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
- u32 len, num_rbs;
+ u32 len, num_rbs = 0;
u32 monitor_len;
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
- !trans->cfg->mq_rx_supported;
+ !trans->cfg->mq_rx_supported &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
/* transport dump header */
len = sizeof(*dump_data);
@@ -2990,6 +3009,10 @@ static struct iwl_trans_dump_data
}
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+ if (!(trans->dbg_dump_mask &
+ BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
+ return NULL;
+
dump_data = vzalloc(len);
if (!dump_data)
return NULL;
@@ -3002,22 +3025,28 @@ static struct iwl_trans_dump_data
}
/* CSR registers */
- len += sizeof(*data) + IWL_CSR_TO_DUMP;
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+ len += sizeof(*data) + IWL_CSR_TO_DUMP;
/* FH registers */
- if (trans->cfg->gen2)
- len += sizeof(*data) +
- (FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2);
- else
- len += sizeof(*data) +
- (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
+ if (trans->cfg->gen2)
+ len += sizeof(*data) +
+ (FH_MEM_UPPER_BOUND_GEN2 -
+ FH_MEM_LOWER_BOUND_GEN2);
+ else
+ len += sizeof(*data) +
+ (FH_MEM_UPPER_BOUND -
+ FH_MEM_LOWER_BOUND);
+ }
if (dump_rbs) {
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
- num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num))
- & 0x0FFF;
+ num_rbs =
+ le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
+ & 0x0FFF;
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) +
sizeof(struct iwl_fw_error_dump_rb) +
@@ -3025,7 +3054,8 @@ static struct iwl_trans_dump_data
}
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2)
+ if (trans->cfg->gen2 &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
len += sizeof(*data) +
sizeof(struct iwl_fw_error_dump_paging) +
@@ -3037,41 +3067,51 @@ static struct iwl_trans_dump_data
len = 0;
data = (void *)dump_data->data;
- data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
- txcmd = (void *)data->data;
- spin_lock_bh(&cmdq->lock);
- ptr = cmdq->write_ptr;
- for (i = 0; i < cmdq->n_window; i++) {
- u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
- u32 caplen, cmdlen;
-
- cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
- trans_pcie->tfd_size * ptr);
- caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
-
- if (cmdlen) {
- len += sizeof(*txcmd) + caplen;
- txcmd->cmdlen = cpu_to_le32(cmdlen);
- txcmd->caplen = cpu_to_le32(caplen);
- memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
- txcmd = (void *)((u8 *)txcmd->data + caplen);
+
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
+ u16 tfd_size = trans_pcie->tfd_size;
+
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+ txcmd = (void *)data->data;
+ spin_lock_bh(&cmdq->lock);
+ ptr = cmdq->write_ptr;
+ for (i = 0; i < cmdq->n_window; i++) {
+ u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+ u32 caplen, cmdlen;
+
+ cmdlen = iwl_trans_pcie_get_cmdlen(trans,
+ cmdq->tfds +
+ tfd_size * ptr);
+ caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+ if (cmdlen) {
+ len += sizeof(*txcmd) + caplen;
+ txcmd->cmdlen = cpu_to_le32(cmdlen);
+ txcmd->caplen = cpu_to_le32(caplen);
+ memcpy(txcmd->data, cmdq->entries[idx].cmd,
+ caplen);
+ txcmd = (void *)((u8 *)txcmd->data + caplen);
+ }
+
+ ptr = iwl_queue_dec_wrap(trans, ptr);
}
+ spin_unlock_bh(&cmdq->lock);
- ptr = iwl_queue_dec_wrap(ptr);
+ data->len = cpu_to_le32(len);
+ len += sizeof(*data);
+ data = iwl_fw_error_next_data(data);
}
- spin_unlock_bh(&cmdq->lock);
- data->len = cpu_to_le32(len);
- len += sizeof(*data);
- data = iwl_fw_error_next_data(data);
-
- len += iwl_trans_pcie_dump_csr(trans, &data);
- len += iwl_trans_pcie_fh_regs_dump(trans, &data);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+ len += iwl_trans_pcie_dump_csr(trans, &data);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
+ len += iwl_trans_pcie_fh_regs_dump(trans, &data);
if (dump_rbs)
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2) {
+ if (trans->cfg->gen2 &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
struct iwl_fw_error_dump_paging *paging;
dma_addr_t addr =
@@ -3091,8 +3131,8 @@ static struct iwl_trans_dump_data
len += sizeof(*data) + sizeof(*paging) + page_len;
}
}
-
- len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
+ len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
dump_data->len = len;
@@ -3187,6 +3227,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
.txq_free = iwl_trans_pcie_dyn_txq_free,
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
+ .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -3349,14 +3390,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
#if IS_ENABLED(CONFIG_IWLMVM)
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
- if (trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) {
+
+ if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
u32 hw_status;
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
- if (hw_status & UMAG_GEN_HW_IS_FPGA)
- trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0;
- else
+ if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP)
+ /*
+ * b step fw is the same for physical card and fpga
+ */
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
+ CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
+ } else {
+ /*
+ * a step no FPGA
+ */
trans->cfg = &iwl22000_2ac_cfg_hr;
+ }
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 48890a1c825f..b99f33ff9123 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,6 +52,7 @@
*****************************************************************************/
#include <linux/pm_runtime.h>
#include <net/tso.h>
+#include <linux/tcp.h>
#include "iwl-debug.h"
#include "iwl-csr.h"
@@ -84,16 +87,20 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
*/
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, u16 byte_cnt,
int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+ struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+ struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
__le16 bc_ent;
- len = DIV_ROUND_UP(len, 4);
+ if (trans_pcie->bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
return;
@@ -111,7 +118,10 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+ else
+ scd_bc_tbl->tfd_offset[idx] = bc_ent;
}
/*
@@ -355,52 +365,89 @@ out_err:
return -EINVAL;
}
-static
-struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta)
+static struct
+iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
dma_addr_t tb_phys;
- bool amsdu;
- int i, len, tb1_len, tb2_len, hdr_len;
+ int len;
void *tb1_addr;
- memset(tfd, 0, sizeof(*tfd));
+ tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
- amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
- (*ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
+
+ if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
+ len + IWL_FIRST_TB_SIZE,
+ hdr_len, dev_cmd))
+ goto out_err;
+
+ /* building the A-MSDU might have changed this data, memcpy it now */
+ memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
+ return tfd;
+
+out_err:
+ iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
+{
+ int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int i, len, tb1_len, tb2_len;
+ void *tb1_addr;
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+
/* The first TB points to bi-directional DMA data */
- if (!amsdu)
- memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
- /* there must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
-
/*
* The second TB (tb1) points to the remainder of the TX command
* and the 802.11 header - dword aligned size
* (This calculation modifies the TX command, so do it before the
* setup of the first TB)
*/
- len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
- ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
- /* do not align A-MSDU to dword as the subframe header aligns it */
- if (amsdu)
- tb1_len = len;
- else
- tb1_len = ALIGN(len, 4);
+ tb1_len = ALIGN(len, 4);
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
@@ -409,23 +456,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
- if (amsdu) {
- if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
- tb1_len + IWL_FIRST_TB_SIZE,
- hdr_len, dev_cmd))
- goto out_err;
-
- /*
- * building the A-MSDU might have changed this data, so memcpy
- * it now
- */
- memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE);
- return tfd;
- }
-
/* set up TFD's third entry to point to remainder of skb's head */
tb2_len = skb_headlen(skb) - hdr_len;
@@ -467,13 +497,50 @@ out_err:
return NULL;
}
+static
+struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+ int len, hdr_len;
+ bool amsdu;
+
+ /* There must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ len = sizeof(struct iwl_tx_cmd_gen2);
+ else
+ len = sizeof(struct iwl_tx_cmd_gen3);
+
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (amsdu)
+ return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
+ out_meta, hdr_len, len);
+
+ return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
+ hdr_len, len);
+}
+
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ u16 cmd_len;
int idx;
void *tfd;
@@ -488,11 +555,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(txq) < txq->high_mark) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+ } else {
+ struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+ }
+
+ if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(txq) < 3)) {
+ if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -526,7 +605,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
+ iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
iwl_pcie_gen2_get_num_tbs(trans, tfd));
/* start timer if queue currently empty */
@@ -538,7 +617,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
/*
* At this point the frame is "transmitted" successfully
@@ -650,7 +729,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
- if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -787,7 +866,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
iwl_trans_ref(trans);
}
/* Increment and update queue's write index */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -954,7 +1033,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_gen2_free_tfd(trans, txq);
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
@@ -1062,6 +1141,9 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
if (!txq)
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
+ (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl));
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
@@ -1113,7 +1195,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
txq->id = qid;
trans_pcie->txq[qid] = txq;
- wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1);
+ wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
txq->read_ptr = wr_ptr;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 473fe7ccb07c..93f0d387688a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -71,27 +71,28 @@
*
***************************************************/
-int iwl_queue_space(const struct iwl_txq *q)
+int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
/*
* To avoid ambiguity between empty and completely full queues, there
- * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
- * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
+ * should always be less than max_tfd_queue_size elements in the queue.
+ * If q->n_window is smaller than max_tfd_queue_size, there is no need
* to reserve any queue entries for this purpose.
*/
- if (q->n_window < TFD_QUEUE_SIZE_MAX)
+ if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
max = q->n_window;
else
- max = TFD_QUEUE_SIZE_MAX - 1;
+ max = trans->cfg->base_params->max_tfd_queue_size - 1;
/*
- * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
- * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
+ * max_tfd_queue_size is a power of 2, so the following is equivalent to
+ * modulo by max_tfd_queue_size and is well defined.
*/
- used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
+ used = (q->write_ptr - q->read_ptr) &
+ (trans->cfg->base_params->max_tfd_queue_size - 1);
if (WARN_ON(used > max))
return 0;
@@ -489,7 +490,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
+ size_t tfd_sz = trans_pcie->tfd_size *
+ trans->cfg->base_params->max_tfd_queue_size;
size_t tb0_buf_sz;
int i;
@@ -555,12 +557,16 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
int ret;
+ u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
txq->need_update = false;
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ /* max_tfd_queue_size must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
+ "Max tfd queue size must be a power of two, but is %d",
+ tfd_queue_max_size))
+ return -EINVAL;
/* Initialize queue's high/low-water marks, and head/tail indexes */
ret = iwl_queue_init(txq, slots_num);
@@ -637,7 +643,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
@@ -696,7 +702,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+ trans_pcie->tfd_size *
+ trans->cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
txq->tfds = NULL;
@@ -916,9 +923,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
int ret;
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
- u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
+ bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_gen3_bc_tbl) :
+ sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
@@ -928,7 +937,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
}
ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
- scd_bc_tbls_size);
+ bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
goto error;
@@ -1064,7 +1073,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
- int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
+ int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
+ int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
int last_to_free;
/* This function is not meant to release cmd queue*/
@@ -1079,7 +1089,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
}
- if (txq->read_ptr == tfd_num)
+ if (read_ptr == tfd_num)
goto out;
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
@@ -1087,12 +1097,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
- last_to_free = iwl_queue_dec_wrap(tfd_num);
+ last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
if (!iwl_queue_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
+ __func__, txq_id, last_to_free,
+ trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
goto out;
}
@@ -1101,10 +1112,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
for (;
- txq->read_ptr != tfd_num;
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
- int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb = txq->entries[idx].skb;
+ read_ptr != tfd_num;
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
+ read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
+ struct sk_buff *skb = txq->entries[read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
@@ -1113,7 +1124,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
__skb_queue_tail(skbs, skb);
- txq->entries[idx].skb = NULL;
+ txq->entries[read_ptr].skb = NULL;
if (!trans->cfg->use_tfh)
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
@@ -1123,7 +1134,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_pcie_txq_progress(txq);
- if (iwl_queue_space(txq) > txq->low_mark &&
+ if (iwl_queue_space(trans, txq) > txq->low_mark &&
test_bit(txq_id, trans_pcie->queue_stopped)) {
struct sk_buff_head overflow_skbs;
@@ -1155,7 +1166,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
}
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(txq) > txq->low_mark)
+ if (iwl_queue_space(trans, txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
}
@@ -1225,23 +1236,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
struct iwl_txq *txq = trans_pcie->txq[txq_id];
unsigned long flags;
int nfreed = 0;
+ u16 r;
lockdep_assert_held(&txq->lock);
- if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
+ idx = iwl_pcie_get_cmd_index(txq, idx);
+ r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+
+ if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
+ (!iwl_queue_used(txq, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
+ __func__, txq_id, idx,
+ trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
+ for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
+ r = iwl_queue_inc_wrap(trans, r)) {
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
- idx, txq->write_ptr, txq->read_ptr);
+ idx, txq->write_ptr, r);
iwl_force_nmi(trans);
}
}
@@ -1555,7 +1573,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -1711,7 +1729,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -2311,11 +2329,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(txq) < txq->high_mark) {
+ if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(txq) < 3)) {
+ if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -2444,7 +2462,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index d1884b8913e7..0094b1d2b577 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -66,7 +66,7 @@ static void prism2_send_mgmt(struct net_device *dev,
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
static int ap_debug_proc_show(struct seq_file *m, void *v)
{
struct ap_data *ap = PDE_DATA(file_inode(m->file));
@@ -81,8 +81,7 @@ static int ap_debug_proc_show(struct seq_file *m, void *v)
seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc);
return 0;
}
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta)
{
@@ -990,7 +989,7 @@ static void prism2_send_mgmt(struct net_device *dev,
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
+#ifdef CONFIG_PROC_FS
static int prism2_sta_proc_show(struct seq_file *m, void *v)
{
struct sta_info *sta = m->private;
@@ -1059,6 +1058,7 @@ static int prism2_sta_proc_show(struct seq_file *m, void *v)
return 0;
}
+#endif
static void handle_add_proc_queue(struct work_struct *work)
{
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 2720aa39f530..ad1aa65fee7f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -151,13 +151,6 @@ static int prism2_get_ram_size(local_info_t *local);
#define HFA384X_MAGIC 0x8A32
#endif
-
-static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
-{
- return HFA384X_INW(reg);
-}
-
-
static void hfa384x_read_regs(struct net_device *dev,
struct hfa384x_regs *regs)
{
@@ -2897,7 +2890,12 @@ static void hostap_tick_timer(struct timer_list *t)
}
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
+static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
+{
+ return HFA384X_INW(reg);
+}
+
static int prism2_registers_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -2951,8 +2949,7 @@ static int prism2_registers_proc_show(struct seq_file *m, void *v)
return 0;
}
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
struct set_tim_data {
struct list_head list;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index 5b33ccab9188..703d74cea3c2 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -11,8 +11,7 @@
#define PROC_LIMIT (PAGE_SIZE - 80)
-
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
static int prism2_debug_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -43,9 +42,9 @@ static int prism2_debug_proc_show(struct seq_file *m, void *v)
return 0;
}
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
+#ifdef CONFIG_PROC_FS
static int prism2_stats_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -82,6 +81,7 @@ static int prism2_stats_proc_show(struct seq_file *m, void *v)
return 0;
}
+#endif
static int prism2_wds_proc_show(struct seq_file *m, void *v)
{
@@ -174,6 +174,7 @@ static const struct seq_operations prism2_bss_list_proc_seqops = {
.show = prism2_bss_list_proc_show,
};
+#ifdef CONFIG_PROC_FS
static int prism2_crypt_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -190,6 +191,7 @@ static int prism2_crypt_proc_show(struct seq_file *m, void *v)
}
return 0;
}
+#endif
static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
size_t count, loff_t *_pos)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 18e819d964f1..998dfac0fcff 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2,6 +2,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -2517,6 +2518,123 @@ out_err:
nlmsg_free(mcast_skb);
}
+static const struct ieee80211_sband_iftype_data he_capa_2ghz = {
+ /* TODO: should we support other types, e.g., P2P?*/
+ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_DUAL_BAND,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes unset, as
+ * DCM, beam forming, RU and PPE threshold information
+ * are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xffff),
+ .tx_mcs_160 = cpu_to_le16(0xffff),
+ .rx_mcs_80p80 = cpu_to_le16(0xffff),
+ .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ },
+};
+
+static const struct ieee80211_sband_iftype_data he_capa_5ghz = {
+ /* TODO: should we support other types, e.g., P2P?*/
+ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_DUAL_BAND |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes unset, as
+ * DCM, beam forming, RU and PPE threshold information
+ * are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80p80 = cpu_to_le16(0xfffa),
+ },
+ },
+};
+
+static void mac80211_hswim_he_capab(struct ieee80211_supported_band *sband)
+{
+ if (sband->band == NL80211_BAND_2GHZ)
+ sband->iftype_data =
+ (struct ieee80211_sband_iftype_data *)&he_capa_2ghz;
+ else if (sband->band == NL80211_BAND_5GHZ)
+ sband->iftype_data =
+ (struct ieee80211_sband_iftype_data *)&he_capa_5ghz;
+ else
+ return;
+
+ sband->n_iftype_data = 1;
+}
+
static int mac80211_hwsim_new_radio(struct genl_info *info,
struct hwsim_new_radio_params *param)
{
@@ -2678,6 +2796,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband = &data->bands[band];
+
+ sband->band = band;
+
switch (band) {
case NL80211_BAND_2GHZ:
sband->channels = data->channels_2ghz;
@@ -2734,6 +2855,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->ht_cap.mcs.rx_mask[1] = 0xff;
sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ mac80211_hswim_he_capab(sband);
+
hw->wiphy->bands[band] = sband;
}
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index f99031cfdf86..57edfada0665 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -1559,10 +1559,10 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
int ret;
size_t i;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_RX_BYTES) |
- BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
sinfo->tx_bytes = priv->dev->stats.tx_bytes;
sinfo->tx_packets = priv->dev->stats.tx_packets;
sinfo->rx_bytes = priv->dev->stats.rx_bytes;
@@ -1572,14 +1572,14 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
ret = lbs_get_rssi(priv, &signal, &noise);
if (ret == 0) {
sinfo->signal = signal;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
/* Convert priv->cur_rate from hw_value to NL80211 value */
for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) {
if (priv->cur_rate == lbs_rates[i].hw_value) {
sinfo->txrate.legacy = lbs_rates[i].bitrate;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
break;
}
}
diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h
index dd1ee1f0af48..469134930026 100644
--- a/drivers/net/wireless/marvell/libertas/dev.h
+++ b/drivers/net/wireless/marvell/libertas/dev.h
@@ -104,6 +104,7 @@ struct lbs_private {
u8 fw_ready;
u8 surpriseremoved;
u8 setup_fw_on_resume;
+ u8 power_up_on_resume;
int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
void (*reset_card) (struct lbs_private *priv);
int (*power_save) (struct lbs_private *priv);
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 2300e796c6ab..43743c26c071 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1290,15 +1290,23 @@ static void if_sdio_remove(struct sdio_func *func)
static int if_sdio_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- int ret;
struct if_sdio_card *card = sdio_get_drvdata(func);
+ struct lbs_private *priv = card->priv;
+ int ret;
mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
+ priv->power_up_on_resume = false;
/* If we're powered off anyway, just let the mmc layer remove the
* card. */
- if (!lbs_iface_active(card->priv))
- return -ENOSYS;
+ if (!lbs_iface_active(priv)) {
+ if (priv->fw_ready) {
+ priv->power_up_on_resume = true;
+ if_sdio_power_off(card);
+ }
+
+ return 0;
+ }
dev_info(dev, "%s: suspend: PM flags = 0x%x\n",
sdio_func_id(func), flags);
@@ -1306,9 +1314,14 @@ static int if_sdio_suspend(struct device *dev)
/* If we aren't being asked to wake on anything, we should bail out
* and let the SD stack power down the card.
*/
- if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
+ if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
dev_info(dev, "Suspend without wake params -- powering down card\n");
- return -ENOSYS;
+ if (priv->fw_ready) {
+ priv->power_up_on_resume = true;
+ if_sdio_power_off(card);
+ }
+
+ return 0;
}
if (!(flags & MMC_PM_KEEP_POWER)) {
@@ -1321,7 +1334,7 @@ static int if_sdio_suspend(struct device *dev)
if (ret)
return ret;
- ret = lbs_suspend(card->priv);
+ ret = lbs_suspend(priv);
if (ret)
return ret;
@@ -1336,6 +1349,11 @@ static int if_sdio_resume(struct device *dev)
dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func));
+ if (card->priv->power_up_on_resume) {
+ if_sdio_power_on(card);
+ wait_event(card->pwron_waitq, card->priv->fw_ready);
+ }
+
ret = lbs_resume(card->priv);
return ret;
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index ffea610f67e2..c67a8e7be310 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -614,6 +614,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
struct if_usb_card *cardp,
struct lbs_private *priv)
{
+ unsigned long flags;
u8 i;
if (recvlength > LBS_CMD_BUFFER_SIZE) {
@@ -623,9 +624,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
return;
}
- BUG_ON(!in_interrupt());
-
- spin_lock(&priv->driver_lock);
+ spin_lock_irqsave(&priv->driver_lock, flags);
i = (priv->resp_idx == 0) ? 1 : 0;
BUG_ON(priv->resp_len[i]);
@@ -635,7 +634,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
kfree_skb(skb);
lbs_notify_command_response(priv, i);
- spin_unlock(&priv->driver_lock);
+ spin_unlock_irqrestore(&priv->driver_lock, flags);
lbs_deb_usbd(&cardp->udev->dev,
"Wake up main thread to handle cmd response\n");
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index 5153922e7ce1..e92fc5001171 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -603,6 +603,8 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
struct if_usb_card *cardp,
struct lbtf_private *priv)
{
+ unsigned long flags;
+
if (recvlength > LBS_CMD_BUFFER_SIZE) {
lbtf_deb_usbd(&cardp->udev->dev,
"The receive buffer is too large\n");
@@ -610,14 +612,12 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
return;
}
- BUG_ON(!in_interrupt());
-
- spin_lock(&priv->driver_lock);
+ spin_lock_irqsave(&priv->driver_lock, flags);
memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,
recvlength - MESSAGE_HEADER_LEN);
kfree_skb(skb);
lbtf_cmd_response_rx(priv);
- spin_unlock(&priv->driver_lock);
+ spin_unlock_irqrestore(&priv->driver_lock, flags);
}
/**
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 5d75c971004b..e2addd8b878b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -696,10 +696,11 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
"Send delba to tid=%d, %pM\n",
tid, rx_reor_tbl_ptr->ta);
mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
- goto exit;
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
+ return;
}
}
-exit:
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 7ab44cd32a9d..8e63d14c1e1c 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -103,6 +103,8 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
* There could be holes in the buffer, which are skipped by the function.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -111,25 +113,21 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
{
int pkt_to_send, i;
void *rx_tmp_ptr;
- unsigned long flags;
pkt_to_send = (start_win > tbl->start_win) ?
min((start_win - tbl->start_win), tbl->win_size) :
tbl->win_size;
for (i = 0; i < pkt_to_send; ++i) {
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
rx_tmp_ptr = NULL;
if (tbl->rx_reorder_ptr[i]) {
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
}
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
if (rx_tmp_ptr)
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -140,7 +138,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
}
tbl->start_win = start_win;
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
/*
@@ -150,6 +147,8 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
* The start window is adjusted automatically when a hole is located.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -157,21 +156,15 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
{
int i, j, xchg;
void *rx_tmp_ptr;
- unsigned long flags;
for (i = 0; i < tbl->win_size; ++i) {
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
- if (!tbl->rx_reorder_ptr[i]) {
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
+ if (!tbl->rx_reorder_ptr[i])
break;
- }
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -184,7 +177,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
}
}
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
/*
@@ -192,6 +184,8 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
*
* The function stops the associated timer and dispatches all the
* pending packets in the Rx reorder table before deletion.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -217,11 +211,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
del_timer_sync(&tbl->timer_context.timer);
tbl->timer_context.timer_is_set = false;
-
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_del(&tbl->list);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
-
kfree(tbl->rx_reorder_ptr);
kfree(tbl);
@@ -234,22 +224,17 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
/*
* This function returns the pointer to an entry in Rx reordering
* table which matches the given TA/TID pair.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
- if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
+ if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
return tbl;
- }
- }
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return NULL;
}
@@ -266,14 +251,9 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
return;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
- if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
+ if (!memcmp(tbl->ta, ta, ETH_ALEN))
mwifiex_del_rx_reorder_entry(priv, tbl);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- }
- }
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
@@ -282,24 +262,18 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
/*
* This function finds the last sequence number used in the packets
* buffered in Rx reordering table.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static int
mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
{
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
- struct mwifiex_private *priv = ctx->priv;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
- if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
+ if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
return i;
- }
- }
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return -1;
}
@@ -317,17 +291,22 @@ mwifiex_flush_data(struct timer_list *t)
struct reorder_tmr_cnxt *ctx =
from_timer(ctx, t, timer);
int start_win, seq_num;
+ unsigned long flags;
ctx->timer_is_set = false;
+ spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
seq_num = mwifiex_11n_find_last_seq_num(ctx);
- if (seq_num < 0)
+ if (seq_num < 0) {
+ spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
return;
+ }
mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
start_win);
+ spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
}
/*
@@ -354,11 +333,14 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
* If we get a TID, ta pair which is already present dispatch all the
* the packets and move the window size until the ssn
*/
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (tbl) {
mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* if !tbl then create one */
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
if (!new_node)
@@ -569,16 +551,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
int prev_start_win, start_win, end_win, win_size;
u16 pkt_index;
bool init_window_shift = false;
+ unsigned long flags;
int ret = 0;
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
if (pkt_type != PKT_TYPE_BAR)
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
@@ -665,6 +651,8 @@ done:
if (!tbl->timer_context.timer_is_set ||
prev_start_win != tbl->start_win)
mwifiex_11n_rxreorder_timer_restart(tbl);
+
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
@@ -693,14 +681,18 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
peer_mac, tid, initiator);
if (cleanup_rx_reorder_tbl) {
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
peer_mac);
if (!tbl) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
mwifiex_dbg(priv->adapter, EVENT,
"event: TID, TA not found in table\n");
return;
}
mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} else {
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
if (!ptx_tbl) {
@@ -734,6 +726,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
int tid, win_size;
struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set;
+ unsigned long flags;
block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
@@ -747,17 +740,20 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
add_ba_rsp->peer_mac_addr, tid);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl)
mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return 0;
}
win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
>> BLOCKACKPARAM_WINSIZE_POS;
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl) {
@@ -768,6 +764,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
else
tbl->amsdu = false;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_dbg(priv->adapter, CMD,
"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -807,11 +804,8 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
- &priv->rx_reorder_tbl_ptr, list) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ &priv->rx_reorder_tbl_ptr, list)
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- }
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
@@ -935,6 +929,7 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
int tlv_buf_left = len;
int ret;
u8 *tmp;
+ unsigned long flags;
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len);
@@ -954,14 +949,18 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
tlv_bitmap_len);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
rx_reor_tbl_ptr =
mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
tlv_rxba->mac);
if (!rx_reor_tbl_ptr) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
mwifiex_dbg(priv->adapter, ERROR,
"Can not find rx_reorder_tbl!");
return;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
for (i = 0; i < tlv_bitmap_len; i++) {
for (j = 0 ; j < 8; j++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 4b5ae9098504..adc88433faa8 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1353,17 +1353,17 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
{
u32 rate;
- sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) | BIT(NL80211_STA_INFO_TX_BYTES) |
- BIT(NL80211_STA_INFO_RX_PACKETS) | BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_BITRATE) |
- BIT(NL80211_STA_INFO_SIGNAL) | BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->filled = BIT_ULL(NL80211_STA_INFO_RX_BYTES) | BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS) | BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_BITRATE) |
+ BIT_ULL(NL80211_STA_INFO_SIGNAL) | BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
if (!node)
return -ENOENT;
- sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) |
- BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
+ BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->inactive_time =
jiffies_to_msecs(jiffies - node->stats.last_rx);
@@ -1413,7 +1413,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
sinfo->txrate.legacy = rate * 5;
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
- sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM);
sinfo->bss_param.flags = 0;
if (priv->curr_bss_params.bss_descriptor.cap_info_bitmap &
WLAN_CAPABILITY_SHORT_PREAMBLE)
@@ -2322,7 +2322,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
if (priv->scan_block)
priv->scan_block = false;
- if (adapter->surprise_removed || adapter->is_cmd_timedout) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"%s: Ignore connection.\t"
"Card removed or FW in bad state\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 9cfcdf6bec52..60db2b969e20 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -372,7 +372,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
adapter->ps_state = PS_STATE_SLEEP_CFM;
if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) &&
- (adapter->is_hs_configured &&
+ (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags) &&
!adapter->sleep_period.period)) {
adapter->pm_wakeup_card_req = true;
mwifiex_hs_activated_event(mwifiex_get_priv
@@ -564,25 +564,26 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
return -1;
}
- if (adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: device in suspended state\n");
return -1;
}
- if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
+ if (test_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags) &&
+ cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: host entering sleep state\n");
return -1;
}
- if (adapter->surprise_removed) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: card is removed\n");
return -1;
}
- if (adapter->is_cmd_timedout) {
+ if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: FW is in bad state\n");
return -1;
@@ -789,7 +790,8 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
if (priv && (host_cmd->command !=
cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) {
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event(priv, false);
}
}
@@ -825,7 +827,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
return -1;
}
- adapter->is_cmd_timedout = 0;
+ clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
@@ -927,7 +929,7 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
struct mwifiex_adapter *adapter = from_timer(adapter, t, cmd_timer);
struct cmd_ctrl_node *cmd_node;
- adapter->is_cmd_timedout = 1;
+ set_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
if (!adapter->curr_cmd) {
mwifiex_dbg(adapter, ERROR,
"cmd: empty curr_cmd\n");
@@ -953,7 +955,8 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
mwifiex_dbg(adapter, MSG,
"is_cmd_timedout = %d\n",
- adapter->is_cmd_timedout);
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT,
+ &adapter->work_flags));
mwifiex_dbg(adapter, MSG,
"num_tx_timeout = %d\n",
adapter->dbg.num_tx_timeout);
@@ -1135,7 +1138,8 @@ void
mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
{
if (activated) {
- if (priv->adapter->is_hs_configured) {
+ if (test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &priv->adapter->work_flags)) {
priv->adapter->hs_activated = true;
mwifiex_update_rxreor_flags(priv->adapter,
RXREOR_FORCE_NO_DROP);
@@ -1186,11 +1190,11 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
phs_cfg->params.hs_config.gap);
}
if (conditions != HS_CFG_CANCEL) {
- adapter->is_hs_configured = true;
+ set_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
if (adapter->iface_type == MWIFIEX_USB)
mwifiex_hs_activated_event(priv, true);
} else {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
if (adapter->hs_activated)
mwifiex_hs_activated_event(priv, false);
}
@@ -1212,8 +1216,8 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
adapter->if_ops.wakeup(adapter);
adapter->hs_activated = false;
- adapter->is_hs_configured = false;
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_ANY),
false);
@@ -1273,7 +1277,7 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
return;
}
adapter->pm_wakeup_card_req = true;
- if (adapter->is_hs_configured)
+ if (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags))
mwifiex_hs_activated_event(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
true);
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 07453932f703..cce70252fd96 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -813,7 +813,7 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf,
MWIFIEX_SYNC_CMD, &hscfg);
mwifiex_enable_hs(priv->adapter);
- priv->adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &priv->adapter->work_flags);
ret = count;
done:
kfree(buf);
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index b10baacb51c9..75cbd609d606 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -355,8 +355,14 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
case WLAN_EID_HT_OPERATION:
case WLAN_EID_VHT_CAPABILITY:
case WLAN_EID_VHT_OPERATION:
- case WLAN_EID_VENDOR_SPECIFIC:
break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ /* Skip only Microsoft WMM IE */
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WMM,
+ (const u8 *)hdr,
+ hdr->len + sizeof(struct ieee_types_header)))
+ break;
default:
memcpy(gen_ie->ie_buffer + ie_len, hdr,
hdr->len + sizeof(struct ieee_types_header));
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index d239e9248c05..673e89dff0b5 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -233,7 +233,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->event_received = false;
adapter->data_received = false;
- adapter->surprise_removed = false;
+ clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
@@ -270,7 +270,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
adapter->hs_cfg.conditions = cpu_to_le32(HS_CFG_COND_DEF);
adapter->hs_cfg.gpio = HS_CFG_GPIO_DEF;
adapter->hs_cfg.gap = HS_CFG_GAP_DEF;
@@ -439,7 +439,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
priv = adapter->priv[i];
- spin_lock_init(&priv->rx_pkt_lock);
spin_lock_init(&priv->wmm.ra_list_spinlock);
spin_lock_init(&priv->curr_bcn_buf_lock);
spin_lock_init(&priv->sta_list_spinlock);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 510f6b8e717d..20cee5c397fb 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -404,7 +404,8 @@ process_start:
!skb_queue_empty(&adapter->tx_data_q)) {
mwifiex_process_tx_queue(adapter);
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event
(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -420,7 +421,8 @@ process_start:
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
mwifiex_process_bypass_tx(adapter);
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event
(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -435,7 +437,8 @@ process_start:
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
mwifiex_wmm_process_tx(adapter);
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event
(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -647,7 +650,7 @@ err_dnld_fw:
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
@@ -870,7 +873,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
"data: %lu BSS(%d-%d): Data <= kernel\n",
jiffies, priv->bss_type, priv->bss_num);
- if (priv->adapter->surprise_removed) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags)) {
kfree_skb(skb);
priv->stats.tx_dropped++;
return 0;
@@ -1279,7 +1282,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];
@@ -1371,7 +1375,7 @@ static void mwifiex_rx_work_queue(struct work_struct *work)
struct mwifiex_adapter *adapter =
container_of(work, struct mwifiex_adapter, rx_work);
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
return;
mwifiex_process_rx(adapter);
}
@@ -1387,7 +1391,7 @@ static void mwifiex_main_work_queue(struct work_struct *work)
struct mwifiex_adapter *adapter =
container_of(work, struct mwifiex_adapter, main_work);
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
return;
mwifiex_main_process(adapter);
}
@@ -1404,7 +1408,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
if (adapter->if_ops.disable_int)
adapter->if_ops.disable_int(adapter);
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
adapter->int_status = 0;
@@ -1492,11 +1496,11 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
adapter->if_ops.up_dev(adapter);
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
- adapter->surprise_removed = false;
+ clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
init_waitqueue_head(&adapter->init_wait_q);
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
adapter->hs_activated = false;
- adapter->is_cmd_timedout = 0;
+ clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
init_waitqueue_head(&adapter->hs_activate_wait_q);
init_waitqueue_head(&adapter->cmd_wait_q.wait);
adapter->cmd_wait_q.status = 0;
@@ -1551,7 +1555,7 @@ err_init_fw:
adapter->if_ops.unregister_dev(adapter);
err_kmalloc:
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
mwifiex_dbg(adapter, ERROR,
@@ -1648,9 +1652,9 @@ mwifiex_add_card(void *card, struct completion *fw_done,
adapter->fw_done = fw_done;
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
- adapter->surprise_removed = false;
+ clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
init_waitqueue_head(&adapter->init_wait_q);
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
adapter->hs_activated = false;
init_waitqueue_head(&adapter->hs_activate_wait_q);
init_waitqueue_head(&adapter->cmd_wait_q.wait);
@@ -1698,7 +1702,7 @@ err_init_fw:
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
err_registerdev:
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
pr_debug("info: %s: shutdown mwifiex\n", __func__);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 69ac0a22c28c..b025ba164412 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -517,6 +517,14 @@ enum mwifiex_iface_work_flags {
MWIFIEX_IFACE_WORK_CARD_RESET,
};
+enum mwifiex_adapter_work_flags {
+ MWIFIEX_SURPRISE_REMOVED,
+ MWIFIEX_IS_CMD_TIMEDOUT,
+ MWIFIEX_IS_SUSPENDED,
+ MWIFIEX_IS_HS_CONFIGURED,
+ MWIFIEX_IS_HS_ENABLING,
+};
+
struct mwifiex_band_config {
u8 chan_band:2;
u8 chan_width:2;
@@ -616,9 +624,6 @@ struct mwifiex_private {
struct list_head rx_reorder_tbl_ptr;
/* spin lock for rx_reorder_tbl_ptr queue */
spinlock_t rx_reorder_tbl_lock;
- /* spin lock for Rx packets */
- spinlock_t rx_pkt_lock;
-
#define MWIFIEX_ASSOC_RSP_BUF_SIZE 500
u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE];
u32 assoc_rsp_size;
@@ -875,7 +880,7 @@ struct mwifiex_adapter {
struct device *dev;
struct wiphy *wiphy;
u8 perm_addr[ETH_ALEN];
- bool surprise_removed;
+ unsigned long work_flags;
u32 fw_release_number;
u8 intf_hdr_len;
u16 init_wait_q_woken;
@@ -929,7 +934,6 @@ struct mwifiex_adapter {
struct cmd_ctrl_node *curr_cmd;
/* spin lock for command */
spinlock_t mwifiex_cmd_lock;
- u8 is_cmd_timedout;
u16 last_init_cmd;
struct timer_list cmd_timer;
struct list_head cmd_free_q;
@@ -979,13 +983,10 @@ struct mwifiex_adapter {
u16 pps_uapsd_mode;
u32 pm_wakeup_fw_try;
struct timer_list wakeup_timer;
- u8 is_hs_configured;
struct mwifiex_hs_config_param hs_cfg;
u8 hs_activated;
u16 hs_activate_wait_q_woken;
wait_queue_head_t hs_activate_wait_q;
- bool is_suspended;
- bool hs_enabling;
u8 event_body[MAX_EVENT_SIZE];
u32 hw_dot_11n_dev_cap;
u8 hw_dev_mcs_support;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 0c42b7296ddd..3fe81b2a929a 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -170,7 +170,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
- adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
mwifiex_disable_wake(adapter);
return -EFAULT;
}
@@ -178,8 +178,8 @@ static int mwifiex_pcie_suspend(struct device *dev)
flush_workqueue(adapter->workqueue);
/* Indicate device suspended */
- adapter->is_suspended = true;
- adapter->hs_enabling = false;
+ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
return 0;
}
@@ -207,13 +207,13 @@ static int mwifiex_pcie_resume(struct device *dev)
adapter = card->adapter;
- if (!adapter->is_suspended) {
+ if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, WARN,
"Device already resumed\n");
return 0;
}
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_ASYNC_CMD);
@@ -2430,7 +2430,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
}
adapter = card->adapter;
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
goto exit;
if (card->msix_enable)
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 895b806cdb03..8e483b0bc3b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1495,7 +1495,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
return -EBUSY;
}
- if (adapter->surprise_removed || adapter->is_cmd_timedout) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"Ignore scan. Card removed or firmware in bad state\n");
return -EFAULT;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index dfdcbc4f141a..d49fbd58afa7 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -181,13 +181,13 @@ static int mwifiex_sdio_resume(struct device *dev)
adapter = card->adapter;
- if (!adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, WARN,
"device already resumed\n");
return 0;
}
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
/* Disable Host Sleep */
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
@@ -260,7 +260,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
- if (adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"%s: not allowed while suspended\n", __func__);
return -1;
@@ -450,7 +450,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
- adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
mwifiex_disable_wake(adapter);
return -EFAULT;
}
@@ -460,8 +460,8 @@ static int mwifiex_sdio_suspend(struct device *dev)
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
/* Indicate device suspended */
- adapter->is_suspended = true;
- adapter->hs_enabling = false;
+ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 03a6492662ca..a327fc5b36e3 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -224,7 +224,8 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
adapter->tx_lock_flag = false;
adapter->pps_uapsd_mode = false;
- if (adapter->is_cmd_timedout && adapter->curr_cmd)
+ if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags) &&
+ adapter->curr_cmd)
return;
priv->media_connected = false;
mwifiex_dbg(adapter, MSG,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 5414b755cf82..b454b5f85503 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -419,7 +419,8 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
}
if (hs_cfg->is_invoke_hostcmd) {
if (hs_cfg->conditions == HS_CFG_CANCEL) {
- if (!adapter->is_hs_configured)
+ if (!test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags))
/* Already cancelled */
break;
/* Save previous condition */
@@ -535,7 +536,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
memset(&hscfg, 0, sizeof(hscfg));
hscfg.is_invoke_hostcmd = true;
- adapter->hs_enabling = true;
+ set_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
mwifiex_cancel_all_pending_cmd(adapter);
if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
@@ -601,7 +602,8 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
else
info->wep_status = false;
- info->is_hs_configured = adapter->is_hs_configured;
+ info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
info->is_deep_sleep = adapter->is_deep_sleep;
return 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index 620f8650a742..37c24b95e642 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -143,7 +143,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
int ret;
struct mwifiex_txinfo *tx_info = NULL;
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
return -1;
if (!priv->media_connected)
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 1e6a62c69ac5..a83c5afc256a 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -289,32 +289,6 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
src_node->stats.rx_packets++;
}
- skb->dev = priv->netdev;
- skb->protocol = eth_type_trans(skb, priv->netdev);
- skb->ip_summed = CHECKSUM_NONE;
-
- /* This is required only in case of 11n and USB/PCIE as we alloc
- * a buffer of 4K only if its 11N (to be able to receive 4K
- * AMSDU packets). In case of SD we allocate buffers based
- * on the size of packet and hence this is not needed.
- *
- * Modifying the truesize here as our allocation for each
- * skb is 4K but we only receive 2K packets and this cause
- * the kernel to start dropping packets in case where
- * application has allocated buffer based on 2K size i.e.
- * if there a 64K packet received (in IP fragments and
- * application allocates 64K to receive this packet but
- * this packet would almost double up because we allocate
- * each 1.5K fragment in 4K and pass it up. As soon as the
- * 64K limit hits kernel will start to drop rest of the
- * fragments. Currently we fail the Filesndl-ht.scr script
- * for UDP, hence this fix
- */
- if ((adapter->iface_type == MWIFIEX_USB ||
- adapter->iface_type == MWIFIEX_PCIE) &&
- (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
- skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
-
if (is_multicast_ether_addr(p_ethhdr->h_dest) ||
mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) {
if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)
@@ -350,6 +324,32 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
return 0;
}
+ skb->dev = priv->netdev;
+ skb->protocol = eth_type_trans(skb, priv->netdev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* This is required only in case of 11n and USB/PCIE as we alloc
+ * a buffer of 4K only if its 11N (to be able to receive 4K
+ * AMSDU packets). In case of SD we allocate buffers based
+ * on the size of packet and hence this is not needed.
+ *
+ * Modifying the truesize here as our allocation for each
+ * skb is 4K but we only receive 2K packets and this cause
+ * the kernel to start dropping packets in case where
+ * application has allocated buffer based on 2K size i.e.
+ * if there a 64K packet received (in IP fragments and
+ * application allocates 64K to receive this packet but
+ * this packet would almost double up because we allocate
+ * each 1.5K fragment in 4K and pass it up. As soon as the
+ * 64K limit hits kernel will start to drop rest of the
+ * fragments. Currently we fail the Filesndl-ht.scr script
+ * for UDP, hence this fix
+ */
+ if ((adapter->iface_type == MWIFIEX_USB ||
+ adapter->iface_type == MWIFIEX_PCIE) &&
+ skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)
+ skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
+
/* Forward multicast/broadcast packet to upper layer*/
if (in_interrupt())
netif_rx(skb);
@@ -421,12 +421,15 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (!priv->ap_11n_enabled ||
(!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
(le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
ret = mwifiex_handle_uap_rx_forward(priv, skb);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* Reorder and send to kernel */
pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 6e3cf9817730..433c6a16870b 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -181,7 +181,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
atomic_dec(&card->rx_data_urb_pending);
if (recv_length) {
- if (urb->status || (adapter->surprise_removed)) {
+ if (urb->status ||
+ test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"URB status is failed: %d\n", urb->status);
/* Do not free skb in case of command ep */
@@ -218,10 +219,10 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
dev_kfree_skb_any(skb);
}
} else if (urb->status) {
- if (!adapter->is_suspended) {
+ if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, FATAL,
"Card is removed: %d\n", urb->status);
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
}
dev_kfree_skb_any(skb);
return;
@@ -529,7 +530,7 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
}
- if (unlikely(adapter->is_suspended))
+ if (unlikely(test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)))
mwifiex_dbg(adapter, WARN,
"Device already suspended\n");
@@ -537,19 +538,19 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
- adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
return -EFAULT;
}
- /* 'is_suspended' flag indicates device is suspended.
+ /* 'MWIFIEX_IS_SUSPENDED' bit indicates device is suspended.
* It must be set here before the usb_kill_urb() calls. Reason
* is in the complete handlers, urb->status(= -ENOENT) and
* this flag is used in combination to distinguish between a
* 'suspended' state and a 'disconnect' one.
*/
- adapter->is_suspended = true;
- adapter->hs_enabling = false;
+ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
usb_kill_urb(card->rx_cmd.urb);
@@ -593,7 +594,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
}
adapter = card->adapter;
- if (unlikely(!adapter->is_suspended)) {
+ if (unlikely(!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags))) {
mwifiex_dbg(adapter, WARN,
"Device already resumed\n");
return 0;
@@ -602,7 +603,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
/* Indicate device resumed. The netdev queue will be resumed only
* after the urbs have been re-submitted
*/
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
if (!atomic_read(&card->rx_data_urb_pending))
for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
@@ -644,11 +645,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
MWIFIEX_FUNC_SHUTDOWN);
}
- if (adapter->workqueue)
- flush_workqueue(adapter->workqueue);
-
- mwifiex_usb_free(card);
-
mwifiex_dbg(adapter, FATAL,
"%s: removing card\n", __func__);
mwifiex_remove_card(adapter);
@@ -1163,13 +1159,13 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
unsigned long flags;
int idx, ret;
- if (adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"%s: not allowed while suspended\n", __func__);
return -1;
}
- if (adapter->surprise_removed) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
return -1;
}
@@ -1356,6 +1352,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+ mwifiex_usb_free(card);
+
mwifiex_usb_cleanup_tx_aggr(adapter);
card->adapter = NULL;
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 6dd212898117..f9b71539d33e 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -197,9 +197,11 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
info->is_deep_sleep = adapter->is_deep_sleep;
info->pm_wakeup_card_req = adapter->pm_wakeup_card_req;
info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try;
- info->is_hs_configured = adapter->is_hs_configured;
+ info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
info->hs_activated = adapter->hs_activated;
- info->is_cmd_timedout = adapter->is_cmd_timedout;
+ info->is_cmd_timedout = test_bit(MWIFIEX_IS_CMD_TIMEDOUT,
+ &adapter->work_flags);
info->num_cmd_host_to_card_failure
= adapter->dbg.num_cmd_host_to_card_failure;
info->num_cmd_sleep_cfm_host_to_card_failure
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 936a0a841af8..407b9932ca4d 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -599,7 +599,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
if (priv->adapter->if_ops.clean_pcie_ring &&
- !priv->adapter->surprise_removed)
+ !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index fc05d79c80d0..b6c5f17dca30 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -1,10 +1,37 @@
config MT76_CORE
tristate
+config MT76_USB
+ tristate
+ depends on MT76_CORE
+
+config MT76x2_COMMON
+ tristate
+ depends on MT76_CORE
+
+config MT76x0U
+ tristate "MediaTek MT76x0U (USB) support"
+ select MT76_CORE
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7610U-based wireless USB dongles.
+
config MT76x2E
tristate "MediaTek MT76x2E (PCIe) support"
select MT76_CORE
+ select MT76x2_COMMON
depends on MAC80211
depends on PCI
---help---
This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
+
+config MT76x2U
+ tristate "MediaTek MT76x2U (USB) support"
+ select MT76_CORE
+ select MT76_USB
+ select MT76x2_COMMON
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7612U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index a0156bc01dea..158d10d2716c 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -1,15 +1,31 @@
obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76_USB) += mt76-usb.o
+obj-$(CONFIG_MT76x0U) += mt76x0/
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
obj-$(CONFIG_MT76x2E) += mt76x2e.o
+obj-$(CONFIG_MT76x2U) += mt76x2u.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
+mt76-usb-y := usb.o usb_trace.o usb_mcu.o
+
CFLAGS_trace.o := -I$(src)
+CFLAGS_usb_trace.o := -I$(src)
+
+mt76x2-common-y := \
+ mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \
+ mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \
+ mt76x2_debugfs.o
mt76x2e-y := \
mt76x2_pci.o mt76x2_dma.o \
- mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \
- mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \
+ mt76x2_main.o mt76x2_init.o mt76x2_tx.o \
+ mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \
mt76x2_dfs.o mt76x2_trace.o
+mt76x2u-y := \
+ mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \
+ mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o
+
CFLAGS_mt76x2_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 1e8cdce919d9..73c8b2805c97 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -113,7 +113,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
if (nframes)
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
REORDER_TIMEOUT);
- mt76_rx_complete(dev, &frames, -1);
+ mt76_rx_complete(dev, &frames, NULL);
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 3dbedcedc2c4..c51da2205b93 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -239,6 +239,80 @@ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
iowrite32(q->head, &q->regs->cpu_idx);
}
+int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_queue_entry e;
+ struct mt76_txwi_cache *t;
+ struct mt76_queue_buf buf[32];
+ struct sk_buff *iter;
+ dma_addr_t addr;
+ int len;
+ u32 tx_info = 0;
+ int n, ret;
+
+ t = mt76_get_txwi(dev);
+ if (!t) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return -ENOMEM;
+ }
+
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
+ &tx_info);
+ dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ if (ret < 0)
+ goto free;
+
+ len = skb->len - skb->data_len;
+ addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr)) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ n = 0;
+ buf[n].addr = t->dma_addr;
+ buf[n++].len = dev->drv->txwi_size;
+ buf[n].addr = addr;
+ buf[n++].len = len;
+
+ skb_walk_frags(skb, iter) {
+ if (n == ARRAY_SIZE(buf))
+ goto unmap;
+
+ addr = dma_map_single(dev->dev, iter->data, iter->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr))
+ goto unmap;
+
+ buf[n].addr = addr;
+ buf[n++].len = iter->len;
+ }
+
+ if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+ goto unmap;
+
+ return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
+
+unmap:
+ ret = -ENOMEM;
+ for (n--; n > 0; n--)
+ dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
+ DMA_TO_DEVICE);
+
+free:
+ e.skb = skb;
+ e.txwi = t;
+ dev->drv->tx_complete_skb(dev, q, &e, true);
+ mt76_put_txwi(dev, t);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
+
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
{
@@ -400,7 +474,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
do {
cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
- mt76_rx_poll_complete(dev, qid);
+ mt76_rx_poll_complete(dev, qid, napi);
done += cur;
} while (cur && done < budget);
@@ -436,6 +510,7 @@ static const struct mt76_queue_ops mt76_dma_ops = {
.init = mt76_dma_init,
.alloc = mt76_dma_alloc_queue,
.add_buf = mt76_dma_add_buf,
+ .tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 1dad39697929..27248e24a19b 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -25,6 +25,39 @@
#define MT_DMA_CTL_LAST_SEC0 BIT(30)
#define MT_DMA_CTL_DMA_DONE BIT(31)
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_INFO_TX_BURST BIT(17)
+#define MT_TXD_INFO_80211 BIT(19)
+#define MT_TXD_INFO_TSO BIT(20)
+#define MT_TXD_INFO_CSO BIT(21)
+#define MT_TXD_INFO_WIV BIT(24)
+#define MT_TXD_INFO_QSEL GENMASK(26, 25)
+#define MT_TXD_INFO_DPORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
+#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
+
+/* MCU request message header */
+#define MT_MCU_MSG_LEN GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
+#define MT_MCU_MSG_PORT GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD BIT(30)
+
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_RX_RXWI_LEN 32
+
struct mt76_desc {
__le32 buf0;
__le32 ctrl;
@@ -32,6 +65,16 @@ struct mt76_desc {
__le32 info;
} __packed __aligned(4);
+enum dma_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
int mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index d62e34e7eadf..029d54bce9e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -303,14 +303,6 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
SET_IEEE80211_DEV(hw, dev->dev);
SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
-
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy->available_antennas_tx = dev->antenna_mask;
@@ -591,15 +583,11 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
}
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
- int queue)
+ struct napi_struct *napi)
{
- struct napi_struct *napi = NULL;
struct ieee80211_sta *sta;
struct sk_buff *skb;
- if (queue >= 0)
- napi = &dev->napi[queue];
-
spin_lock(&dev->rx_lock);
while ((skb = __skb_dequeue(frames)) != NULL) {
if (mt76_check_ccmp_pn(skb)) {
@@ -613,7 +601,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
spin_unlock(&dev->rx_lock);
}
-void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct napi_struct *napi)
{
struct sk_buff_head frames;
struct sk_buff *skb;
@@ -625,5 +614,6 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
mt76_rx_aggr_reorder(skb, &frames);
}
- mt76_rx_complete(dev, &frames, q);
+ mt76_rx_complete(dev, &frames, napi);
}
+EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index d2166fbf50ff..2eab35879163 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/leds.h>
+#include <linux/usb.h>
#include <net/mac80211.h>
#include "util.h"
@@ -30,6 +31,7 @@
#define MT_RX_BUF_SIZE 2048
struct mt76_dev;
+struct mt76_wcid;
struct mt76_bus_ops {
u32 (*rr)(struct mt76_dev *dev, u32 offset);
@@ -62,12 +64,22 @@ struct mt76_queue_buf {
int len;
};
+struct mt76u_buf {
+ struct mt76_dev *dev;
+ struct urb *urb;
+ size_t len;
+ bool done;
+};
+
struct mt76_queue_entry {
union {
void *buf;
struct sk_buff *skb;
};
- struct mt76_txwi_cache *txwi;
+ union {
+ struct mt76_txwi_cache *txwi;
+ struct mt76u_buf ubuf;
+ };
bool schedule;
};
@@ -88,6 +100,7 @@ struct mt76_queue {
struct list_head swq;
int swq_queued;
+ u16 first;
u16 head;
u16 tail;
int ndesc;
@@ -110,6 +123,10 @@ struct mt76_queue_ops {
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi);
+ int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
+
void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
int *len, u32 *info, bool *more);
@@ -187,9 +204,13 @@ struct mt76_rx_tid {
enum {
MT76_STATE_INITIALIZED,
MT76_STATE_RUNNING,
+ MT76_STATE_MCU_RUNNING,
MT76_SCANNING,
MT76_RESET,
MT76_OFFCHANNEL,
+ MT76_REMOVED,
+ MT76_READING_STATS,
+ MT76_MORE_STATS,
};
struct mt76_hw_cap {
@@ -210,6 +231,8 @@ struct mt76_driver_ops {
void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush);
+ bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
+
void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
struct sk_buff *skb);
@@ -229,6 +252,64 @@ struct mt76_sband {
struct mt76_channel_state *chan;
};
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM BIT(31)
+#define MT_VEND_TYPE_CFG BIT(30)
+#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
+enum mt_vendor_req {
+ MT_VEND_DEV_MODE = 0x1,
+ MT_VEND_WRITE = 0x2,
+ MT_VEND_MULTI_WRITE = 0x6,
+ MT_VEND_MULTI_READ = 0x7,
+ MT_VEND_READ_EEPROM = 0x9,
+ MT_VEND_WRITE_FCE = 0x42,
+ MT_VEND_WRITE_CFG = 0x46,
+ MT_VEND_READ_CFG = 0x47,
+};
+
+enum mt76u_in_ep {
+ MT_EP_IN_PKT_RX,
+ MT_EP_IN_CMD_RESP,
+ __MT_EP_IN_MAX,
+};
+
+enum mt76u_out_ep {
+ MT_EP_OUT_INBAND_CMD,
+ MT_EP_OUT_AC_BK,
+ MT_EP_OUT_AC_BE,
+ MT_EP_OUT_AC_VI,
+ MT_EP_OUT_AC_VO,
+ MT_EP_OUT_HCCA,
+ __MT_EP_OUT_MAX,
+};
+
+#define MT_SG_MAX_SIZE 8
+#define MT_NUM_TX_ENTRIES 256
+#define MT_NUM_RX_ENTRIES 128
+#define MCU_RESP_URB_SIZE 1024
+struct mt76_usb {
+ struct mutex usb_ctrl_mtx;
+ u8 data[32];
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+ struct delayed_work stat_work;
+
+ u8 out_ep[__MT_EP_OUT_MAX];
+ u16 out_max_packet;
+ u8 in_ep[__MT_EP_IN_MAX];
+ u16 in_max_packet;
+
+ struct mt76u_mcu {
+ struct mutex mutex;
+ struct completion cmpl;
+ struct mt76u_buf res;
+ u32 msg_seq;
+ } mcu;
+};
+
struct mt76_dev {
struct ieee80211_hw *hw;
struct cfg80211_chan_def chandef;
@@ -271,6 +352,8 @@ struct mt76_dev {
char led_name[32];
bool led_al;
u8 led_pin;
+
+ struct mt76_usb usb;
};
enum mt76_phy_type {
@@ -390,6 +473,26 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);
+/* increment with wrap-around */
+static inline int mt76_incr(int val, int size)
+{
+ return (val + 1) & (size - 1);
+}
+
+/* decrement with wrap-around */
+static inline int mt76_decr(int val, int size)
+{
+ return (val - 1) & (size - 1);
+}
+
+/* Hardware uses mirrored order of queues with Q3
+ * having the highest priority
+ */
+static inline u8 q2hwq(u8 q)
+{
+ return q ^ 0x3;
+}
+
static inline struct ieee80211_txq *
mtxq_to_txq(struct mt76_txq *mtxq)
{
@@ -409,9 +512,9 @@ wcid_to_sta(struct mt76_wcid *wcid)
return container_of(ptr, struct ieee80211_sta, drv_priv);
}
-int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta);
+int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
@@ -442,10 +545,69 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
+struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
- int queue);
-void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q);
+ struct napi_struct *napi);
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+/* usb */
+static inline bool mt76u_urb_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN &&
+ urb->status != -ENOENT;
+}
+
+/* Map hardware queues to usb endpoints */
+static inline u8 q2ep(u8 qid)
+{
+ /* TODO: take management packets to queue 5 */
+ return qid + 1;
+}
+
+static inline bool mt76u_check_sg(struct mt76_dev *dev)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ return (udev->bus->sg_tablesize > 0 &&
+ (udev->bus->no_sg_constraint ||
+ udev->speed == USB_SPEED_WIRELESS));
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len);
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+u32 mt76u_rr(struct mt76_dev *dev, u32 addr);
+void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
+void mt76u_deinit(struct mt76_dev *dev);
+int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
+ int nsgs, int len, int sglen, gfp_t gfp);
+void mt76u_buf_free(struct mt76u_buf *buf);
+int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
+ struct mt76u_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context);
+int mt76u_submit_rx_buffers(struct mt76_dev *dev);
+int mt76u_alloc_queues(struct mt76_dev *dev);
+void mt76u_stop_queues(struct mt76_dev *dev);
+void mt76u_stop_stat_wk(struct mt76_dev *dev);
+void mt76u_queues_deinit(struct mt76_dev *dev);
+int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
+
+int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+ int data_len, u32 max_payload, u32 offset);
+void mt76u_mcu_complete_urb(struct urb *urb);
+struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len);
+int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp);
+void mt76u_mcu_fw_reset(struct mt76_dev *dev);
+int mt76u_mcu_init_rx(struct mt76_dev *dev);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
new file mode 100644
index 000000000000..7843908261ba
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_MT76x0U) += mt76x0.o
+
+mt76x0-objs = \
+ usb.o init.o main.o mcu.o trace.o dma.o eeprom.o phy.o \
+ mac.o util.o debugfs.o tx.o core.o
+# ccflags-y := -DDEBUG
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/core.c b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
new file mode 100644
index 000000000000..892803fce842
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+
+int mt76x0_wait_asic_ready(struct mt76x0_dev *dev)
+{
+ int i = 100;
+ u32 val;
+
+ do {
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -EIO;
+
+ val = mt76_rr(dev, MT_MAC_CSR0);
+ if (val && ~val)
+ return 0;
+
+ udelay(10);
+ } while (i--);
+
+ return -EIO;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
new file mode 100644
index 000000000000..e7a77a886068
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+
+#include "mt76x0.h"
+#include "eeprom.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+ struct mt76x0_dev *dev = data;
+
+ mt76_wr(dev, dev->debugfs_reg, val);
+ return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+ struct mt76x0_dev *dev = data;
+
+ *val = mt76_rr(dev, dev->debugfs_reg);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt76x0_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt76x0_dev *dev = file->private;
+ int i, j;
+
+#define stat_printf(grp, off, name) \
+ seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
+
+ stat_printf(rx_stat, 0, rx_crc_err);
+ stat_printf(rx_stat, 1, rx_phy_err);
+ stat_printf(rx_stat, 2, rx_false_cca);
+ stat_printf(rx_stat, 3, rx_plcp_err);
+ stat_printf(rx_stat, 4, rx_fifo_overflow);
+ stat_printf(rx_stat, 5, rx_duplicate);
+
+ stat_printf(tx_stat, 0, tx_fail_cnt);
+ stat_printf(tx_stat, 1, tx_bcn_cnt);
+ stat_printf(tx_stat, 2, tx_success);
+ stat_printf(tx_stat, 3, tx_retransmit);
+ stat_printf(tx_stat, 4, tx_zero_len);
+ stat_printf(tx_stat, 5, tx_underflow);
+
+ stat_printf(aggr_stat, 0, non_aggr_tx);
+ stat_printf(aggr_stat, 1, aggr_tx);
+
+ stat_printf(zero_len_del, 0, tx_zero_len_del);
+ stat_printf(zero_len_del, 1, rx_zero_len_del);
+#undef stat_printf
+
+ seq_puts(file, "Aggregations stats:\n");
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%08llx ",
+ dev->stats.aggr_n[i * 8 + j]);
+ seq_putc(file, '\n');
+ }
+
+ seq_printf(file, "recent average AMPDU len: %d\n",
+ atomic_read(&dev->avg_ampdu_len));
+
+ return 0;
+}
+
+static int
+mt76x0_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt76x0_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+ .open = mt76x0_ampdu_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
+mt76x0_eeprom_param_read(struct seq_file *file, void *data)
+{
+ struct mt76x0_dev *dev = file->private;
+ int i;
+
+ seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
+ seq_printf(file, "RSSI offset 2GHz: %hhx %hhx\n",
+ dev->ee->rssi_offset_2ghz[0], dev->ee->rssi_offset_2ghz[1]);
+ seq_printf(file, "RSSI offset 5GHz: %hhx %hhx %hhx\n",
+ dev->ee->rssi_offset_5ghz[0], dev->ee->rssi_offset_5ghz[1],
+ dev->ee->rssi_offset_5ghz[2]);
+ seq_printf(file, "Temperature offset: %hhx\n", dev->ee->temp_off);
+ seq_printf(file, "LNA gain 2Ghz: %hhx\n", dev->ee->lna_gain_2ghz);
+ seq_printf(file, "LNA gain 5Ghz: %hhx %hhx %hhx\n",
+ dev->ee->lna_gain_5ghz[0], dev->ee->lna_gain_5ghz[1],
+ dev->ee->lna_gain_5ghz[2]);
+ seq_printf(file, "Power Amplifier type %hhx\n", dev->ee->pa_type);
+ seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+ dev->ee->reg.start + dev->ee->reg.num - 1);
+
+ seq_puts(file, "Per channel power:\n");
+ for (i = 0; i < 58; i++)
+ seq_printf(file, "\t%d chan:%d pwr:%d\n", i, i,
+ dev->ee->tx_pwr_per_chan[i]);
+
+ seq_puts(file, "Per rate power 2GHz:\n");
+ for (i = 0; i < 5; i++)
+ seq_printf(file, "\t %d bw20:%d bw40:%d\n",
+ i, dev->ee->tx_pwr_cfg_2g[i][0],
+ dev->ee->tx_pwr_cfg_5g[i][1]);
+
+ seq_puts(file, "Per rate power 5GHz:\n");
+ for (i = 0; i < 5; i++)
+ seq_printf(file, "\t %d bw20:%d bw40:%d\n",
+ i, dev->ee->tx_pwr_cfg_5g[i][0],
+ dev->ee->tx_pwr_cfg_5g[i][1]);
+
+ return 0;
+}
+
+static int
+mt76x0_eeprom_param_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt76x0_eeprom_param_read, inode->i_private);
+}
+
+static const struct file_operations fops_eeprom_param = {
+ .open = mt76x0_eeprom_param_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void mt76x0_init_debugfs(struct mt76x0_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("mt76x0", dev->mt76.hw->wiphy->debugfsdir);
+ if (!dir)
+ return;
+
+ debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+ debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
+ &fops_regval);
+ debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+ debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
+ &fops_eeprom_param);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
new file mode 100644
index 000000000000..e2efb430419b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "dma.h"
+#include "usb.h"
+#include "trace.h"
+
+static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
+ struct mt76x0_dma_buf_rx *e, gfp_t gfp);
+
+static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
+{
+ const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
+ unsigned int hdrlen;
+
+ if (unlikely(len < 10))
+ return 0;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (unlikely(hdrlen > len))
+ return 0;
+ return hdrlen;
+}
+
+static struct sk_buff *
+mt76x0_rx_skb_from_seg(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
+ void *data, u32 seg_len, u32 truesize, struct page *p)
+{
+ struct sk_buff *skb;
+ u32 true_len, hdr_len = 0, copy, frag;
+
+ skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ true_len = mt76x0_mac_process_rx(dev, skb, data, rxwi);
+ if (!true_len || true_len > seg_len)
+ goto bad_frame;
+
+ hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
+ if (!hdr_len)
+ goto bad_frame;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+ memcpy(skb_put(skb, hdr_len), data, hdr_len);
+
+ data += hdr_len + 2;
+ true_len -= hdr_len;
+ hdr_len = 0;
+ }
+
+ /* If not doing paged RX allocated skb will always have enough space */
+ copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
+ frag = true_len - copy;
+
+ memcpy(skb_put(skb, copy), data, copy);
+ data += copy;
+
+ if (frag) {
+ skb_add_rx_frag(skb, 0, p, data - page_address(p),
+ frag, truesize);
+ get_page(p);
+ }
+
+ return skb;
+
+bad_frame:
+ dev_err_ratelimited(dev->mt76.dev, "Error: incorrect frame len:%u hdr:%u\n",
+ true_len, hdr_len);
+ dev_kfree_skb(skb);
+ return NULL;
+}
+
+static void mt76x0_rx_process_seg(struct mt76x0_dev *dev, u8 *data,
+ u32 seg_len, struct page *p)
+{
+ struct sk_buff *skb;
+ struct mt76x0_rxwi *rxwi;
+ u32 fce_info, truesize = seg_len;
+
+ /* DMA_INFO field at the beginning of the segment contains only some of
+ * the information, we need to read the FCE descriptor from the end.
+ */
+ fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
+ seg_len -= MT_FCE_INFO_LEN;
+
+ data += MT_DMA_HDR_LEN;
+ seg_len -= MT_DMA_HDR_LEN;
+
+ rxwi = (struct mt76x0_rxwi *) data;
+ data += sizeof(struct mt76x0_rxwi);
+ seg_len -= sizeof(struct mt76x0_rxwi);
+
+ if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
+ dev_err_once(dev->mt76.dev, "Error: RX path seen a non-pkt urb\n");
+
+ trace_mt76x0_rx(&dev->mt76, rxwi, fce_info);
+
+ skb = mt76x0_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
+ if (!skb)
+ return;
+
+ spin_lock(&dev->mac_lock);
+ ieee80211_rx(dev->mt76.hw, skb);
+ spin_unlock(&dev->mac_lock);
+}
+
+static u16 mt76x0_rx_next_seg_len(u8 *data, u32 data_len)
+{
+ u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
+ sizeof(struct mt76x0_rxwi) + MT_FCE_INFO_LEN;
+ u16 dma_len = get_unaligned_le16(data);
+
+ if (data_len < min_seg_len ||
+ WARN_ON(!dma_len) ||
+ WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
+ WARN_ON(dma_len & 0x3))
+ return 0;
+
+ return MT_DMA_HDRS + dma_len;
+}
+
+static void
+mt76x0_rx_process_entry(struct mt76x0_dev *dev, struct mt76x0_dma_buf_rx *e)
+{
+ u32 seg_len, data_len = e->urb->actual_length;
+ u8 *data = page_address(e->p);
+ struct page *new_p = NULL;
+ int cnt = 0;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return;
+
+ /* Copy if there is very little data in the buffer. */
+ if (data_len > 512)
+ new_p = dev_alloc_pages(MT_RX_ORDER);
+
+ while ((seg_len = mt76x0_rx_next_seg_len(data, data_len))) {
+ mt76x0_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
+
+ data_len -= seg_len;
+ data += seg_len;
+ cnt++;
+ }
+
+ if (cnt > 1)
+ trace_mt76x0_rx_dma_aggr(&dev->mt76, cnt, !!new_p);
+
+ if (new_p) {
+ /* we have one extra ref from the allocator */
+ __free_pages(e->p, MT_RX_ORDER);
+
+ e->p = new_p;
+ }
+}
+
+static struct mt76x0_dma_buf_rx *
+mt76x0_rx_get_pending_entry(struct mt76x0_dev *dev)
+{
+ struct mt76x0_rx_queue *q = &dev->rx_q;
+ struct mt76x0_dma_buf_rx *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (!q->pending)
+ goto out;
+
+ buf = &q->e[q->start];
+ q->pending--;
+ q->start = (q->start + 1) % q->entries;
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+
+ return buf;
+}
+
+static void mt76x0_complete_rx(struct urb *urb)
+{
+ struct mt76x0_dev *dev = urb->context;
+ struct mt76x0_rx_queue *q = &dev->rx_q;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (mt76x0_urb_has_error(urb))
+ dev_err(dev->mt76.dev, "Error: RX urb failed:%d\n", urb->status);
+ if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
+ goto out;
+
+ q->end = (q->end + 1) % q->entries;
+ q->pending++;
+ tasklet_schedule(&dev->rx_tasklet);
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static void mt76x0_rx_tasklet(unsigned long data)
+{
+ struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
+ struct mt76x0_dma_buf_rx *e;
+
+ while ((e = mt76x0_rx_get_pending_entry(dev))) {
+ if (e->urb->status)
+ continue;
+
+ mt76x0_rx_process_entry(dev, e);
+ mt76x0_submit_rx_buf(dev, e, GFP_ATOMIC);
+ }
+}
+
+static void mt76x0_complete_tx(struct urb *urb)
+{
+ struct mt76x0_tx_queue *q = urb->context;
+ struct mt76x0_dev *dev = q->dev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (mt76x0_urb_has_error(urb))
+ dev_err(dev->mt76.dev, "Error: TX urb failed:%d\n", urb->status);
+ if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
+ goto out;
+
+ skb = q->e[q->start].skb;
+ trace_mt76x0_tx_dma_done(&dev->mt76, skb);
+
+ __skb_queue_tail(&dev->tx_skb_done, skb);
+ tasklet_schedule(&dev->tx_tasklet);
+
+ if (q->used == q->entries - q->entries / 8)
+ ieee80211_wake_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
+
+ q->start = (q->start + 1) % q->entries;
+ q->used--;
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+static void mt76x0_tx_tasklet(unsigned long data)
+{
+ struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
+ struct sk_buff_head skbs;
+ unsigned long flags;
+
+ __skb_queue_head_init(&skbs);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ set_bit(MT76_MORE_STATS, &dev->mt76.state);
+ if (!test_and_set_bit(MT76_READING_STATS, &dev->mt76.state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+
+ skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+
+ mt76x0_tx_status(dev, skb);
+ }
+}
+
+static int mt76x0_dma_submit_tx(struct mt76x0_dev *dev,
+ struct sk_buff *skb, u8 ep)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep]);
+ struct mt76x0_dma_buf_tx *e;
+ struct mt76x0_tx_queue *q = &dev->tx_q[ep];
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (WARN_ON_ONCE(q->entries <= q->used)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ e = &q->e[q->end];
+ e->skb = skb;
+ usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
+ mt76x0_complete_tx, q);
+ ret = usb_submit_urb(e->urb, GFP_ATOMIC);
+ if (ret) {
+ /* Special-handle ENODEV from TX urb submission because it will
+ * often be the first ENODEV we see after device is removed.
+ */
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->mt76.state);
+ else
+ dev_err(dev->mt76.dev, "Error: TX urb submit failed:%d\n",
+ ret);
+ goto out;
+ }
+
+ q->end = (q->end + 1) % q->entries;
+ q->used++;
+
+ if (q->used >= q->entries)
+ ieee80211_stop_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ return ret;
+}
+
+/* Map USB endpoint number to Q id in the DMA engine */
+static enum mt76_qsel ep2dmaq(u8 ep)
+{
+ if (ep == 5)
+ return MT_QSEL_MGMT;
+ return MT_QSEL_EDCA;
+}
+
+int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q)
+{
+ u8 ep = q2ep(hw_q);
+ u32 dma_flags;
+ int ret;
+
+ dma_flags = MT_TXD_PKT_INFO_80211;
+ if (wcid->hw_key_idx == 0xff)
+ dma_flags |= MT_TXD_PKT_INFO_WIV;
+
+ ret = mt76x0_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
+ if (ret)
+ return ret;
+
+ ret = mt76x0_dma_submit_tx(dev, skb, ep);
+
+ if (ret) {
+ ieee80211_free_txskb(dev->mt76.hw, skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt76x0_kill_rx(struct mt76x0_dev *dev)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ int next = dev->rx_q.end;
+
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+ usb_poison_urb(dev->rx_q.e[next].urb);
+ spin_lock_irqsave(&dev->rx_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
+ struct mt76x0_dma_buf_rx *e, gfp_t gfp)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ u8 *buf = page_address(e->p);
+ unsigned pipe;
+ int ret;
+
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[MT_EP_IN_PKT_RX]);
+
+ usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
+ mt76x0_complete_rx, dev);
+
+ trace_mt76x0_submit_urb(&dev->mt76, e->urb);
+ ret = usb_submit_urb(e->urb, gfp);
+ if (ret)
+ dev_err(dev->mt76.dev, "Error: submit RX URB failed:%d\n", ret);
+
+ return ret;
+}
+
+static int mt76x0_submit_rx(struct mt76x0_dev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ ret = mt76x0_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt76x0_free_rx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
+ usb_free_urb(dev->rx_q.e[i].urb);
+ }
+}
+
+static int mt76x0_alloc_rx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ memset(&dev->rx_q, 0, sizeof(dev->rx_q));
+ dev->rx_q.dev = dev;
+ dev->rx_q.entries = N_RX_ENTRIES;
+
+ for (i = 0; i < N_RX_ENTRIES; i++) {
+ dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
+
+ if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mt76x0_free_tx_queue(struct mt76x0_tx_queue *q)
+{
+ int i;
+
+ WARN_ON(q->used);
+
+ for (i = 0; i < q->entries; i++) {
+ usb_poison_urb(q->e[i].urb);
+ usb_free_urb(q->e[i].urb);
+ }
+}
+
+static void mt76x0_free_tx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ mt76x0_free_tx_queue(&dev->tx_q[i]);
+}
+
+static int mt76x0_alloc_tx_queue(struct mt76x0_dev *dev,
+ struct mt76x0_tx_queue *q)
+{
+ int i;
+
+ q->dev = dev;
+ q->entries = N_TX_ENTRIES;
+
+ for (i = 0; i < N_TX_ENTRIES; i++) {
+ q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!q->e[i].urb)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int mt76x0_alloc_tx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ dev->tx_q = devm_kcalloc(dev->mt76.dev, __MT_EP_OUT_MAX,
+ sizeof(*dev->tx_q), GFP_KERNEL);
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ if (mt76x0_alloc_tx_queue(dev, &dev->tx_q[i]))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mt76x0_dma_init(struct mt76x0_dev *dev)
+{
+ int ret = -ENOMEM;
+
+ tasklet_init(&dev->tx_tasklet, mt76x0_tx_tasklet, (unsigned long) dev);
+ tasklet_init(&dev->rx_tasklet, mt76x0_rx_tasklet, (unsigned long) dev);
+
+ ret = mt76x0_alloc_tx(dev);
+ if (ret)
+ goto err;
+ ret = mt76x0_alloc_rx(dev);
+ if (ret)
+ goto err;
+
+ ret = mt76x0_submit_rx(dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ mt76x0_dma_cleanup(dev);
+ return ret;
+}
+
+void mt76x0_dma_cleanup(struct mt76x0_dev *dev)
+{
+ mt76x0_kill_rx(dev);
+
+ tasklet_kill(&dev->rx_tasklet);
+
+ mt76x0_free_rx(dev);
+ mt76x0_free_tx(dev);
+
+ tasklet_kill(&dev->tx_tasklet);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
new file mode 100644
index 000000000000..891ce1c3461f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_DMA_H
+#define __MT76X0U_DMA_H
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
+
+/* Common Tx DMA descriptor fields */
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+/* Tx DMA MCU command specific flags */
+#define MT_TXD_CMD_SEQ GENMASK(19, 16)
+#define MT_TXD_CMD_TYPE GENMASK(26, 20)
+
+enum mt76_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
+enum mt76_info_type {
+ DMA_PACKET,
+ DMA_COMMAND,
+};
+
+/* Tx DMA packet specific flags */
+#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
+#define MT_TXD_PKT_INFO_80211 BIT(19)
+#define MT_TXD_PKT_INFO_TSO BIT(20)
+#define MT_TXD_PKT_INFO_CSO BIT(21)
+#define MT_TXD_PKT_INFO_WIV BIT(24)
+#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
+
+enum mt76_qsel {
+ MT_QSEL_MGMT,
+ MT_QSEL_HCCA,
+ MT_QSEL_EDCA,
+ MT_QSEL_EDCA_2,
+};
+
+
+static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb,
+ enum mt76_msg_port d_port,
+ enum mt76_info_type type, u32 flags)
+{
+ u32 info;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+
+ info = flags |
+ FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
+ FIELD_PREP(MT_TXD_INFO_TYPE, type);
+
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+ return skb_put_padto(skb, round_up(skb->len, 4) + 4);
+}
+
+static inline int
+mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
+{
+ flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
+ return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
+}
+
+/* Common Rx DMA descriptor fields */
+#define MT_RXD_INFO_LEN GENMASK(13, 0)
+#define MT_RXD_INFO_PCIE_INTR BIT(24)
+#define MT_RXD_INFO_QSEL GENMASK(26, 25)
+#define MT_RXD_INFO_PORT GENMASK(29, 27)
+#define MT_RXD_INFO_TYPE GENMASK(31, 30)
+
+/* Rx DMA packet specific flags */
+#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
+#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
+#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
+#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
+#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
+#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
+
+/* Rx DMA MCU command specific flags */
+#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
+#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
+
+enum mt76_evt_type {
+ CMD_DONE,
+ CMD_ERROR,
+ CMD_RETRY,
+ EVENT_PWR_RSP,
+ EVENT_WOW_RSP,
+ EVENT_CARRIER_DETECT_RSP,
+ EVENT_DFS_DETECT_RSP,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
new file mode 100644
index 000000000000..36da1e6bc21a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt76x0.h"
+#include "eeprom.h"
+
+static bool
+field_valid(u8 val)
+{
+ return val != 0xff;
+}
+
+static s8
+field_validate(u8 val)
+{
+ if (!field_valid(val))
+ return 0;
+
+ return val;
+}
+
+static inline int
+sign_extend(u32 val, unsigned int size)
+{
+ bool sign = val & BIT(size - 1);
+
+ val &= BIT(size - 1) - 1;
+
+ return sign ? val : -val;
+}
+
+static int
+mt76x0_efuse_read(struct mt76x0_dev *dev, u16 addr, u8 *data,
+ enum mt76x0_eeprom_access_modes mode)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+ FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
+ MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+ /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
+ * will not return valid data but it's ok.
+ */
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+#define MT_MAP_READS DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16)
+static int
+mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
+{
+ u8 data[MT_MAP_READS * 16];
+ int ret, i;
+ u32 start = 0, end = 0, cnt_free;
+
+ for (i = 0; i < MT_MAP_READS; i++) {
+ ret = mt76x0_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
+ data + i * 16, MT_EE_PHYSICAL_READ);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+ if (!data[i]) {
+ if (!start)
+ start = MT_EE_USAGE_MAP_START + i;
+ end = MT_EE_USAGE_MAP_START + i;
+ }
+ cnt_free = end - start + 1;
+
+ if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+ dev_err(dev->mt76.dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+mt76x0_set_chip_cap(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ enum mt76x2_board_type { BOARD_TYPE_2GHZ = 1, BOARD_TYPE_5GHZ = 2 };
+ u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
+ u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+ dev_dbg(dev->mt76.dev, "NIC_CONF0: %04x NIC_CONF1: %04x\n", nic_conf0, nic_conf1);
+
+ switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, nic_conf0)) {
+ case BOARD_TYPE_5GHZ:
+ dev->ee->has_5ghz = true;
+ break;
+ case BOARD_TYPE_2GHZ:
+ dev->ee->has_2ghz = true;
+ break;
+ default:
+ dev->ee->has_2ghz = true;
+ dev->ee->has_5ghz = true;
+ break;
+ }
+
+ dev_dbg(dev->mt76.dev, "Has 2GHZ %d 5GHZ %d\n", dev->ee->has_2ghz, dev->ee->has_5ghz);
+
+ if (!field_valid(nic_conf1 & 0xff))
+ nic_conf1 &= 0xff00;
+
+ if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+ dev_err(dev->mt76.dev,
+ "Error: this driver does not support HW RF ctrl\n");
+
+ if (!field_valid(nic_conf0 >> 8))
+ return;
+
+ if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+ FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+ dev_err(dev->mt76.dev,
+ "Error: device has more than 1 RX/TX stream!\n");
+
+ dev->ee->pa_type = FIELD_GET(MT_EE_NIC_CONF_0_PA_TYPE, nic_conf0);
+ dev_dbg(dev->mt76.dev, "PA Type %d\n", dev->ee->pa_type);
+}
+
+static int
+mt76x0_set_macaddr(struct mt76x0_dev *dev, const u8 *eeprom)
+{
+ const void *src = eeprom + MT_EE_MAC_ADDR;
+
+ ether_addr_copy(dev->macaddr, src);
+
+ if (!is_valid_ether_addr(dev->macaddr)) {
+ eth_random_addr(dev->macaddr);
+ dev_info(dev->mt76.dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->macaddr);
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+
+ return 0;
+}
+
+static void
+mt76x0_set_temp_offset(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ u8 temp = eeprom[MT_EE_TEMP_OFFSET];
+
+ if (field_valid(temp))
+ dev->ee->temp_off = sign_extend(temp, 8);
+ else
+ dev->ee->temp_off = -10;
+}
+
+static void
+mt76x0_set_country_reg(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ /* Note: - region 31 is not valid for mt76x0 (see rtmp_init.c)
+ * - comments in rtmp_def.h are incorrect (see rt_channel.c)
+ */
+ static const struct reg_channel_bounds chan_bounds[] = {
+ /* EEPROM country regions 0 - 7 */
+ { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 },
+ { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 },
+ /* EEPROM country regions 32 - 33 */
+ { 1, 11 }, { 1, 14 }
+ };
+ u8 val = eeprom[MT_EE_COUNTRY_REGION_2GHZ];
+ int idx = -1;
+
+ dev_dbg(dev->mt76.dev, "REG 2GHZ %u REG 5GHZ %u\n", val, eeprom[MT_EE_COUNTRY_REGION_5GHZ]);
+ if (val < 8)
+ idx = val;
+ if (val > 31 && val < 33)
+ idx = val - 32 + 8;
+
+ if (idx != -1)
+ dev_info(dev->mt76.dev,
+ "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+ val, chan_bounds[idx].start,
+ chan_bounds[idx].start + chan_bounds[idx].num - 1);
+ else
+ idx = 5; /* channels 1 - 14 */
+
+ dev->ee->reg = chan_bounds[idx];
+
+ /* TODO: country region 33 is special - phy should be set to B-mode
+ * before entering channel 14 (see sta/connect.c)
+ */
+}
+
+static void
+mt76x0_set_rf_freq_off(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ u8 comp;
+
+ dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
+ comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+
+ if (comp & BIT(7))
+ dev->ee->rf_freq_off -= comp & 0x7f;
+ else
+ dev->ee->rf_freq_off += comp;
+}
+
+static void
+mt76x0_set_lna_gain(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ u8 gain;
+
+ dev->ee->lna_gain_2ghz = eeprom[MT_EE_LNA_GAIN_2GHZ];
+ dev->ee->lna_gain_5ghz[0] = eeprom[MT_EE_LNA_GAIN_5GHZ_0];
+
+ gain = eeprom[MT_EE_LNA_GAIN_5GHZ_1];
+ if (gain == 0xff || gain == 0)
+ dev->ee->lna_gain_5ghz[1] = dev->ee->lna_gain_5ghz[0];
+ else
+ dev->ee->lna_gain_5ghz[1] = gain;
+
+ gain = eeprom[MT_EE_LNA_GAIN_5GHZ_2];
+ if (gain == 0xff || gain == 0)
+ dev->ee->lna_gain_5ghz[2] = dev->ee->lna_gain_5ghz[0];
+ else
+ dev->ee->lna_gain_5ghz[2] = gain;
+}
+
+static void
+mt76x0_set_rssi_offset(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ int i;
+ s8 *rssi_offset = dev->ee->rssi_offset_2ghz;
+
+ for (i = 0; i < 2; i++) {
+ rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+
+ if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+ dev_warn(dev->mt76.dev,
+ "Warning: EEPROM RSSI is invalid %02hhx\n",
+ rssi_offset[i]);
+ rssi_offset[i] = 0;
+ }
+ }
+
+ rssi_offset = dev->ee->rssi_offset_5ghz;
+
+ for (i = 0; i < 3; i++) {
+ rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET_5GHZ + i];
+
+ if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+ dev_warn(dev->mt76.dev,
+ "Warning: EEPROM RSSI is invalid %02hhx\n",
+ rssi_offset[i]);
+ rssi_offset[i] = 0;
+ }
+ }
+}
+
+static u32
+calc_bw40_power_rate(u32 value, int delta)
+{
+ u32 ret = 0;
+ int i, tmp;
+
+ for (i = 0; i < 4; i++) {
+ tmp = s6_to_int((value >> i*8) & 0xff) + delta;
+ ret |= (u32)(int_to_s6(tmp)) << i*8;
+ }
+
+ return ret;
+}
+
+static s8
+get_delta(u8 val)
+{
+ s8 ret;
+
+ if (!field_valid(val) || !(val & BIT(7)))
+ return 0;
+
+ ret = val & 0x1f;
+ if (ret > 8)
+ ret = 8;
+ if (val & BIT(6))
+ ret = -ret;
+
+ return ret;
+}
+
+static void
+mt76x0_set_tx_power_per_rate(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ s8 bw40_delta_2g, bw40_delta_5g;
+ u32 val;
+ int i;
+
+ bw40_delta_2g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
+ bw40_delta_5g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40 + 1]);
+
+ for (i = 0; i < 5; i++) {
+ val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+
+ /* Skip last 16 bits. */
+ if (i == 4)
+ val &= 0x0000ffff;
+
+ dev->ee->tx_pwr_cfg_2g[i][0] = val;
+ dev->ee->tx_pwr_cfg_2g[i][1] = calc_bw40_power_rate(val, bw40_delta_2g);
+ }
+
+ /* Reading per rate tx power for 5 GHz band is a bit more complex. Note
+ * we mix 16 bit and 32 bit reads and sometimes do shifts.
+ */
+ val = get_unaligned_le16(eeprom + 0x120);
+ val <<= 16;
+ dev->ee->tx_pwr_cfg_5g[0][0] = val;
+ dev->ee->tx_pwr_cfg_5g[0][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le32(eeprom + 0x122);
+ dev->ee->tx_pwr_cfg_5g[1][0] = val;
+ dev->ee->tx_pwr_cfg_5g[1][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le16(eeprom + 0x126);
+ dev->ee->tx_pwr_cfg_5g[2][0] = val;
+ dev->ee->tx_pwr_cfg_5g[2][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le16(eeprom + 0xec);
+ val <<= 16;
+ dev->ee->tx_pwr_cfg_5g[3][0] = val;
+ dev->ee->tx_pwr_cfg_5g[3][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le16(eeprom + 0xee);
+ dev->ee->tx_pwr_cfg_5g[4][0] = val;
+ dev->ee->tx_pwr_cfg_5g[4][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+}
+
+static void
+mt76x0_set_tx_power_per_chan(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ int i;
+ u8 tx_pwr;
+
+ for (i = 0; i < 14; i++) {
+ tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_2GHZ + i];
+ if (tx_pwr <= 0x3f && tx_pwr > 0)
+ dev->ee->tx_pwr_per_chan[i] = tx_pwr;
+ else
+ dev->ee->tx_pwr_per_chan[i] = 5;
+ }
+
+ for (i = 0; i < 40; i++) {
+ tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_5GHZ + i];
+ if (tx_pwr <= 0x3f && tx_pwr > 0)
+ dev->ee->tx_pwr_per_chan[14 + i] = tx_pwr;
+ else
+ dev->ee->tx_pwr_per_chan[14 + i] = 5;
+ }
+
+ dev->ee->tx_pwr_per_chan[54] = dev->ee->tx_pwr_per_chan[22];
+ dev->ee->tx_pwr_per_chan[55] = dev->ee->tx_pwr_per_chan[28];
+ dev->ee->tx_pwr_per_chan[56] = dev->ee->tx_pwr_per_chan[34];
+ dev->ee->tx_pwr_per_chan[57] = dev->ee->tx_pwr_per_chan[44];
+}
+
+int
+mt76x0_eeprom_init(struct mt76x0_dev *dev)
+{
+ u8 *eeprom;
+ int i, ret;
+
+ ret = mt76x0_efuse_physical_size_check(dev);
+ if (ret)
+ return ret;
+
+ dev->ee = devm_kzalloc(dev->mt76.dev, sizeof(*dev->ee), GFP_KERNEL);
+ if (!dev->ee)
+ return -ENOMEM;
+
+ eeprom = kmalloc(MT76X0_EEPROM_SIZE, GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ for (i = 0; i + 16 <= MT76X0_EEPROM_SIZE; i += 16) {
+ ret = mt76x0_efuse_read(dev, i, eeprom + i, MT_EE_READ);
+ if (ret)
+ goto out;
+ }
+
+ if (eeprom[MT_EE_VERSION_EE] > MT76X0U_EE_MAX_VER)
+ dev_warn(dev->mt76.dev,
+ "Warning: unsupported EEPROM version %02hhx\n",
+ eeprom[MT_EE_VERSION_EE]);
+ dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+ eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
+
+ mt76x0_set_macaddr(dev, eeprom);
+ mt76x0_set_chip_cap(dev, eeprom);
+ mt76x0_set_country_reg(dev, eeprom);
+ mt76x0_set_rf_freq_off(dev, eeprom);
+ mt76x0_set_temp_offset(dev, eeprom);
+ mt76x0_set_lna_gain(dev, eeprom);
+ mt76x0_set_rssi_offset(dev, eeprom);
+ dev->chainmask = 0x0101;
+
+ mt76x0_set_tx_power_per_rate(dev, eeprom);
+ mt76x0_set_tx_power_per_chan(dev, eeprom);
+
+out:
+ kfree(eeprom);
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
new file mode 100644
index 000000000000..e37b573aed7b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_EEPROM_H
+#define __MT76X0U_EEPROM_H
+
+struct mt76x0_dev;
+
+#define MT76X0U_EE_MAX_VER 0x0c
+#define MT76X0_EEPROM_SIZE 512
+
+#define MT76X0U_DEFAULT_TX_POWER 6
+
+enum mt76_eeprom_field {
+ MT_EE_CHIP_ID = 0x00,
+ MT_EE_VERSION_FAE = 0x02,
+ MT_EE_VERSION_EE = 0x03,
+ MT_EE_MAC_ADDR = 0x04,
+ MT_EE_NIC_CONF_0 = 0x34,
+ MT_EE_NIC_CONF_1 = 0x36,
+ MT_EE_COUNTRY_REGION_5GHZ = 0x38,
+ MT_EE_COUNTRY_REGION_2GHZ = 0x39,
+ MT_EE_FREQ_OFFSET = 0x3a,
+ MT_EE_NIC_CONF_2 = 0x42,
+
+ MT_EE_LNA_GAIN_2GHZ = 0x44,
+ MT_EE_LNA_GAIN_5GHZ_0 = 0x45,
+ MT_EE_RSSI_OFFSET = 0x46,
+ MT_EE_RSSI_OFFSET_5GHZ = 0x4a,
+ MT_EE_LNA_GAIN_5GHZ_1 = 0x49,
+ MT_EE_LNA_GAIN_5GHZ_2 = 0x4d,
+
+ MT_EE_TX_POWER_DELTA_BW40 = 0x50,
+
+ MT_EE_TX_POWER_OFFSET_2GHZ = 0x52,
+
+ MT_EE_TX_TSSI_SLOPE = 0x6e,
+ MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f,
+ MT_EE_TX_TSSI_OFFSET = 0x76,
+
+ MT_EE_TX_POWER_OFFSET_5GHZ = 0x78,
+
+ MT_EE_TEMP_OFFSET = 0xd1,
+ MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb,
+ MT_EE_TX_POWER_BYRATE_BASE = 0xde,
+
+ MT_EE_TX_POWER_BYRATE_BASE_5GHZ = 0x120,
+
+ MT_EE_USAGE_MAP_START = 0x1e0,
+ MT_EE_USAGE_MAP_END = 0x1fc,
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
+
+#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \
+ (i) * 4)
+
+#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
+ MT_EE_USAGE_MAP_START + 1)
+
+enum mt76x0_eeprom_access_modes {
+ MT_EE_READ = 0,
+ MT_EE_PHYSICAL_READ = 1,
+};
+
+struct reg_channel_bounds {
+ u8 start;
+ u8 num;
+};
+
+struct mt76x0_eeprom_params {
+ u8 rf_freq_off;
+ s16 temp_off;
+ s8 rssi_offset_2ghz[2];
+ s8 rssi_offset_5ghz[3];
+ s8 lna_gain_2ghz;
+ s8 lna_gain_5ghz[3];
+ u8 pa_type;
+
+ /* TX_PWR_CFG_* values from EEPROM for 20 and 40 Mhz bandwidths. */
+ u32 tx_pwr_cfg_2g[5][2];
+ u32 tx_pwr_cfg_5g[5][2];
+
+ u8 tx_pwr_per_chan[58];
+
+ struct reg_channel_bounds reg;
+
+ bool has_2ghz;
+ bool has_5ghz;
+};
+
+int mt76x0_eeprom_init(struct mt76x0_dev *dev);
+
+static inline u32 s6_validate(u32 reg)
+{
+ WARN_ON(reg & ~GENMASK(5, 0));
+ return reg & GENMASK(5, 0);
+}
+
+static inline int s6_to_int(u32 reg)
+{
+ int s6;
+
+ s6 = s6_validate(reg);
+ if (s6 & BIT(5))
+ s6 -= BIT(6);
+
+ return s6;
+}
+
+static inline u32 int_to_s6(int val)
+{
+ if (val < -0x20)
+ return 0x20;
+ if (val > 0x1f)
+ return 0x1f;
+
+ return val & 0x3f;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
new file mode 100644
index 000000000000..7cdb3e740522
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -0,0 +1,720 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "mcu.h"
+#include "usb.h"
+
+#include "initvals.h"
+
+static void
+mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
+{
+ int i;
+
+ /* Note: we don't turn off WLAN_CLK because that makes the device
+ * not respond properly on the probe path.
+ * In case anyone (PSM?) wants to use this function we can
+ * bring the clock stuff back and fixup the probe path.
+ */
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ if (!enable)
+ return;
+
+ for (i = 200; i; i--) {
+ val = mt76_rr(dev, MT_CMB_CTRL);
+
+ if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
+ break;
+
+ udelay(20);
+ }
+
+ /* Note: vendor driver tries to disable/enable wlan here and retry
+ * but the code which does it is so buggy it must have never
+ * triggered, so don't bother.
+ */
+ if (!i)
+ dev_err(dev->mt76.dev, "Error: PLL and XTAL check failed!\n");
+}
+
+void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
+{
+ u32 val;
+
+ mutex_lock(&dev->hw_atomic_mutex);
+
+ val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (reset) {
+ val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ }
+ }
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt76x0_set_wlan_state(dev, val, enable);
+
+ mutex_unlock(&dev->hw_atomic_mutex);
+}
+
+static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_PBF_SYS_CTRL);
+ val &= ~0x2000;
+ mt76_wr(dev, MT_PBF_SYS_CTRL, val);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+
+ msleep(200);
+}
+
+static void mt76x0_init_usb_dma(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+ val |= FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
+ MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+ if (dev->in_max_packet == 512)
+ val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+ val = mt76_rr(dev, MT_COM_REG0);
+ if (val & 1)
+ dev_dbg(dev->mt76.dev, "MCU not ready\n");
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+ val |= MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+#define RANDOM_WRITE(dev, tab) \
+ mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, tab, ARRAY_SIZE(tab));
+
+static int mt76x0_init_bbp(struct mt76x0_dev *dev)
+{
+ int ret, i;
+
+ ret = mt76x0_wait_bbp_ready(dev);
+ if (ret)
+ return ret;
+
+ RANDOM_WRITE(dev, mt76x0_bbp_init_tab);
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+ const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+ const struct mt76_reg_pair *pair = &item->reg_pair;
+
+ if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
+ mt76_wr(dev, pair->reg, pair->value);
+ }
+
+ RANDOM_WRITE(dev, mt76x0_dcoc_tab);
+
+ return 0;
+}
+
+static void
+mt76_init_beacon_offsets(struct mt76x0_dev *dev)
+{
+ u16 base = MT_BEACON_BASE;
+ u32 regs[4] = {};
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ u16 addr = dev->beacon_offsets[i];
+
+ regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
+{
+ u32 reg;
+
+ RANDOM_WRITE(dev, common_mac_reg_table);
+
+ mt76_init_beacon_offsets(dev);
+
+ /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
+ RANDOM_WRITE(dev, mt76x0_mac_reg_table);
+
+ /* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */
+ reg = mt76_rr(dev, MT_MAC_SYS_CTRL);
+ reg &= ~0x3;
+ mt76_wr(dev, MT_MAC_SYS_CTRL, reg);
+
+ if (is_mt7610e(dev)) {
+ /* Disable COEX_EN */
+ reg = mt76_rr(dev, MT_COEXCFG0);
+ reg &= 0xFFFFFFFE;
+ mt76_wr(dev, MT_COEXCFG0, reg);
+ }
+
+ /* Set 0x141C[15:12]=0xF */
+ reg = mt76_rr(dev, MT_EXT_CCA_CFG);
+ reg |= 0x0000F000;
+ mt76_wr(dev, MT_EXT_CCA_CFG, reg);
+
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ /*
+ TxRing 9 is for Mgmt frame.
+ TxRing 8 is for In-band command frame.
+ WMM_RG0_TXQMA: This register setting is for FCE to define the rule of TxRing 9.
+ WMM_RG1_TXQMA: This register setting is for FCE to define the rule of TxRing 8.
+ */
+ reg = mt76_rr(dev, MT_WMM_CTRL);
+ reg &= ~0x000003FF;
+ reg |= 0x00000201;
+ mt76_wr(dev, MT_WMM_CTRL, reg);
+
+ /* TODO: Probably not needed */
+ mt76_wr(dev, 0x7028, 0);
+ mt76_wr(dev, 0x7010, 0);
+ mt76_wr(dev, 0x7024, 0);
+ msleep(10);
+}
+
+static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS; i++) {
+ vals[i * 2] = 0xffffffff;
+ vals[i * 2 + 1] = 0x00ffffff;
+ }
+
+ ret = mt76x0_burst_write_regs(dev, MT_WCID_ADDR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static int mt76x0_init_key_mem(struct mt76x0_dev *dev)
+{
+ u32 vals[4] = {};
+
+ return mt76x0_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
+ vals, ARRAY_SIZE(vals));
+}
+
+static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS * 2; i++)
+ vals[i] = 1;
+
+ ret = mt76x0_burst_write_regs(dev, MT_WCID_ATTR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static void mt76x0_reset_counters(struct mt76x0_dev *dev)
+{
+ mt76_rr(dev, MT_RX_STA_CNT0);
+ mt76_rr(dev, MT_RX_STA_CNT1);
+ mt76_rr(dev, MT_RX_STA_CNT2);
+ mt76_rr(dev, MT_TX_STA_CNT0);
+ mt76_rr(dev, MT_TX_STA_CNT1);
+ mt76_rr(dev, MT_TX_STA_CNT2);
+}
+
+int mt76x0_mac_start(struct mt76x0_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+ return -ETIMEDOUT;
+
+ dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
+ MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
+ MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
+ MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
+ MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
+{
+ int i, ok;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: TX DMA did not stop!\n");
+
+ /* Page count on TxQ */
+ i = 200;
+ while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+ (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+ (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+ msleep(10);
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: MAC TX did not stop!\n");
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Page count on RxQ */
+ ok = 0;
+ i = 200;
+ while (i--) {
+ if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
+ !mt76_rr(dev, 0x0a30) &&
+ !mt76_rr(dev, 0x0a34)) {
+ if (ok++ > 5)
+ break;
+ continue;
+ }
+ msleep(1);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n");
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: RX DMA did not stop!\n");
+}
+
+void mt76x0_mac_stop(struct mt76x0_dev *dev)
+{
+ mt76x0_mac_stop_hw(dev);
+ flush_delayed_work(&dev->stat_work);
+ cancel_delayed_work_sync(&dev->stat_work);
+}
+
+static void mt76x0_stop_hardware(struct mt76x0_dev *dev)
+{
+ mt76x0_chip_onoff(dev, false, false);
+}
+
+int mt76x0_init_hardware(struct mt76x0_dev *dev)
+{
+ static const u16 beacon_offsets[16] = {
+ /* 512 byte per beacon */
+ 0xc000, 0xc200, 0xc400, 0xc600,
+ 0xc800, 0xca00, 0xcc00, 0xce00,
+ 0xd000, 0xd200, 0xd400, 0xd600,
+ 0xd800, 0xda00, 0xdc00, 0xde00
+ };
+ int ret;
+
+ dev->beacon_offsets = beacon_offsets;
+
+ mt76x0_chip_onoff(dev, true, true);
+
+ ret = mt76x0_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+ ret = mt76x0_mcu_init(dev);
+ if (ret)
+ goto err;
+
+ if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /* Wait for ASIC ready after FW load. */
+ ret = mt76x0_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ mt76x0_reset_csr_bbp(dev);
+ mt76x0_init_usb_dma(dev);
+
+ mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0x0);
+ mt76_wr(dev, MT_TSO_CTRL, 0x0);
+
+ ret = mt76x0_mcu_cmd_init(dev);
+ if (ret)
+ goto err;
+ ret = mt76x0_dma_init(dev);
+ if (ret)
+ goto err_mcu;
+
+ mt76x0_init_mac_registers(dev);
+
+ if (!mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 1000)) {
+ ret = -EIO;
+ goto err_rx;
+ }
+
+ ret = mt76x0_init_bbp(dev);
+ if (ret)
+ goto err_rx;
+
+ ret = mt76x0_init_wcid_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt76x0_init_key_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt76x0_init_wcid_attr_mem(dev);
+ if (ret)
+ goto err_rx;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX));
+
+ mt76x0_reset_counters(dev);
+
+ mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+
+ mt76_wr(dev, MT_TXOP_CTRL_CFG,
+ FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+ FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+ ret = mt76x0_eeprom_init(dev);
+ if (ret)
+ goto err_rx;
+
+ mt76x0_phy_init(dev);
+ return 0;
+
+err_rx:
+ mt76x0_dma_cleanup(dev);
+err_mcu:
+ mt76x0_mcu_cmd_deinit(dev);
+err:
+ mt76x0_chip_onoff(dev, false, false);
+ return ret;
+}
+
+void mt76x0_cleanup(struct mt76x0_dev *dev)
+{
+ if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return;
+
+ mt76x0_stop_hardware(dev);
+ mt76x0_dma_cleanup(dev);
+ mt76x0_mcu_cmd_deinit(dev);
+}
+
+struct mt76x0_dev *mt76x0_alloc_device(struct device *pdev)
+{
+ struct ieee80211_hw *hw;
+ struct mt76x0_dev *dev;
+
+ hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x0_ops);
+ if (!hw)
+ return NULL;
+
+ dev = hw->priv;
+ dev->mt76.dev = pdev;
+ dev->mt76.hw = hw;
+ mutex_init(&dev->usb_ctrl_mtx);
+ mutex_init(&dev->reg_atomic_mutex);
+ mutex_init(&dev->hw_atomic_mutex);
+ mutex_init(&dev->mutex);
+ spin_lock_init(&dev->tx_lock);
+ spin_lock_init(&dev->rx_lock);
+ spin_lock_init(&dev->mt76.lock);
+ spin_lock_init(&dev->mac_lock);
+ spin_lock_init(&dev->con_mon_lock);
+ atomic_set(&dev->avg_ampdu_len, 1);
+ skb_queue_head_init(&dev->tx_skb_done);
+
+ dev->stat_wq = alloc_workqueue("mt76x0", WQ_UNBOUND, 0);
+ if (!dev->stat_wq) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
+ return dev;
+}
+
+#define CHAN2G(_idx, _freq) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+ CHAN2G(1, 2412),
+ CHAN2G(2, 2417),
+ CHAN2G(3, 2422),
+ CHAN2G(4, 2427),
+ CHAN2G(5, 2432),
+ CHAN2G(6, 2437),
+ CHAN2G(7, 2442),
+ CHAN2G(8, 2447),
+ CHAN2G(9, 2452),
+ CHAN2G(10, 2457),
+ CHAN2G(11, 2462),
+ CHAN2G(12, 2467),
+ CHAN2G(13, 2472),
+ CHAN2G(14, 2484),
+};
+
+#define CHAN5G(_idx, _freq) { \
+ .band = NL80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_5ghz[] = {
+ CHAN5G(36, 5180),
+ CHAN5G(40, 5200),
+ CHAN5G(44, 5220),
+ CHAN5G(46, 5230),
+ CHAN5G(48, 5240),
+ CHAN5G(52, 5260),
+ CHAN5G(56, 5280),
+ CHAN5G(60, 5300),
+ CHAN5G(64, 5320),
+
+ CHAN5G(100, 5500),
+ CHAN5G(104, 5520),
+ CHAN5G(108, 5540),
+ CHAN5G(112, 5560),
+ CHAN5G(116, 5580),
+ CHAN5G(120, 5600),
+ CHAN5G(124, 5620),
+ CHAN5G(128, 5640),
+ CHAN5G(132, 5660),
+ CHAN5G(136, 5680),
+ CHAN5G(140, 5700),
+};
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+static struct ieee80211_rate mt76_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+
+static int
+mt76_init_sband(struct mt76x0_dev *dev, struct ieee80211_supported_band *sband,
+ const struct ieee80211_channel *chan, int n_chan,
+ struct ieee80211_rate *rates, int n_rates)
+{
+ struct ieee80211_sta_ht_cap *ht_cap;
+ void *chanlist;
+ int size;
+
+ size = n_chan * sizeof(*chan);
+ chanlist = devm_kmemdup(dev->mt76.dev, chan, size, GFP_KERNEL);
+ if (!chanlist)
+ return -ENOMEM;
+
+ sband->channels = chanlist;
+ sband->n_channels = n_chan;
+ sband->bitrates = rates;
+ sband->n_bitrates = n_rates;
+
+ ht_cap = &sband->ht_cap;
+ ht_cap->ht_supported = true;
+ ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ ht_cap->mcs.rx_mask[0] = 0xff;
+ ht_cap->mcs.rx_mask[4] = 0x1;
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
+
+ return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt76x0_dev *dev)
+{
+ dev->mt76.hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->mt76.sband_2g.sband;
+
+ WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
+ ARRAY_SIZE(mt76_channels_2ghz));
+
+
+ return mt76_init_sband(dev, &dev->mt76.sband_2g.sband,
+ mt76_channels_2ghz, ARRAY_SIZE(mt76_channels_2ghz),
+ mt76_rates, ARRAY_SIZE(mt76_rates));
+}
+
+static int
+mt76_init_sband_5g(struct mt76x0_dev *dev)
+{
+ dev->mt76.hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->mt76.sband_5g.sband;
+
+ return mt76_init_sband(dev, &dev->mt76.sband_5g.sband,
+ mt76_channels_5ghz, ARRAY_SIZE(mt76_channels_5ghz),
+ mt76_rates + 4, ARRAY_SIZE(mt76_rates) - 4);
+}
+
+
+int mt76x0_register_device(struct mt76x0_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->mt76.hw;
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
+ * entry no. 1 like it does in the vendor driver.
+ */
+ dev->wcid_mask[0] |= 1;
+
+ /* init fake wcid for monitor interfaces */
+ dev->mon_wcid = devm_kmalloc(dev->mt76.dev, sizeof(*dev->mon_wcid),
+ GFP_KERNEL);
+ if (!dev->mon_wcid)
+ return -ENOMEM;
+ dev->mon_wcid->idx = 0xff;
+ dev->mon_wcid->hw_key_idx = -1;
+
+ SET_IEEE80211_DEV(hw, dev->mt76.dev);
+
+ hw->queues = 4;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+
+ hw->sta_data_size = sizeof(struct mt76_sta);
+ hw->vif_data_size = sizeof(struct mt76_vif);
+
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ if (dev->ee->has_2ghz) {
+ ret = mt76_init_sband_2g(dev);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->ee->has_5ghz) {
+ ret = mt76_init_sband_5g(dev);
+ if (ret)
+ return ret;
+ }
+
+ dev->mt76.chandef.chan = &dev->mt76.sband_2g.sband.channels[0];
+
+ INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work);
+ INIT_DELAYED_WORK(&dev->stat_work, mt76x0_tx_stat);
+
+ ret = ieee80211_register_hw(hw);
+ if (ret)
+ return ret;
+
+ mt76x0_init_debugfs(dev);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
new file mode 100644
index 000000000000..24afcfd94b4e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -0,0 +1,282 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_INITVALS_H
+#define __MT76X0U_INITVALS_H
+
+#include "phy.h"
+
+static const struct mt76_reg_pair common_mac_reg_table[] = {
+#if 1
+ {MT_BCN_OFFSET(0), 0xf8f0e8e0}, /* 0x3800(e0), 0x3A00(e8), 0x3C00(f0), 0x3E00(f8), 512B for each beacon */
+ {MT_BCN_OFFSET(1), 0x6f77d0c8}, /* 0x3200(c8), 0x3400(d0), 0x1DC0(77), 0x1BC0(6f), 512B for each beacon */
+#endif
+
+ {MT_LEGACY_BASIC_RATE, 0x0000013f}, /* Basic rate set bitmap*/
+ {MT_HT_BASIC_RATE, 0x00008003}, /* Basic HT rate set , 20M, MCS=3, MM. Format is the same as in TXWI.*/
+ {MT_MAC_SYS_CTRL, 0x00}, /* 0x1004, , default Disable RX*/
+ {MT_RX_FILTR_CFG, 0x17f97}, /*0x1400 , RX filter control, */
+ {MT_BKOFF_SLOT_CFG, 0x209}, /* default set short slot time, CC_DELAY_TIME should be 2 */
+ /*{TX_SW_CFG0, 0x40a06}, Gary,2006-08-23 */
+ {MT_TX_SW_CFG0, 0x0}, /* Gary,2008-05-21 for CWC test */
+ {MT_TX_SW_CFG1, 0x80606}, /* Gary,2006-08-23 */
+ {MT_TX_LINK_CFG, 0x1020}, /* Gary,2006-08-23 */
+ /*{TX_TIMEOUT_CFG, 0x00182090}, CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT*/
+ {MT_TX_TIMEOUT_CFG, 0x000a2090}, /* CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT , Modify for 2860E ,2007-08-01*/
+ {MT_MAX_LEN_CFG, 0xa0fff | 0x00001000}, /* 0x3018, MAX frame length. Max PSDU = 16kbytes.*/
+ {MT_LED_CFG, 0x7f031e46}, /* Gary, 2006-08-23*/
+
+ {MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f /*0xbfbf3f1f*/},
+ {MT_PBF_RX_MAX_PCNT, 0x9f},
+
+ /*{TX_RTY_CFG, 0x6bb80408}, Jan, 2006/11/16*/
+/* WMM_ACM_SUPPORT */
+/* {TX_RTY_CFG, 0x6bb80101}, sample*/
+ {MT_TX_RETRY_CFG, 0x47d01f0f}, /* Jan, 2006/11/16, Set TxWI->ACK =0 in Probe Rsp Modify for 2860E ,2007-08-03*/
+
+ {MT_AUTO_RSP_CFG, 0x00000013}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
+ {MT_CCK_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
+ {MT_OFDM_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
+ {MT_PBF_CFG, 0xf40006}, /* Only enable Queue 2*/
+ {MT_MM40_PROT_CFG, 0x3F44084}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
+ {MT_WPDMA_GLO_CFG, 0x00000030},
+ {MT_GF20_PROT_CFG, 0x01744004}, /* set 19:18 --> Short NAV for MIMO PS*/
+ {MT_GF40_PROT_CFG, 0x03F44084},
+ {MT_MM20_PROT_CFG, 0x01744004},
+ {MT_TXOP_CTRL_CFG, 0x0000583f, /*0x0000243f*/ /*0x000024bf*/}, /*Extension channel backoff.*/
+ {MT_TX_RTS_CFG, 0x00092b20},
+
+ {MT_EXP_ACK_TIME, 0x002400ca}, /* default value */
+ {MT_TXOP_HLDR_ET, 0x00000002},
+
+ /* Jerry comments 2008/01/16: we use SIFS = 10us in CCK defaultly, but it seems that 10us
+ is too small for INTEL 2200bg card, so in MBSS mode, the delta time between beacon0
+ and beacon1 is SIFS (10us), so if INTEL 2200bg card connects to BSS0, the ping
+ will always lost. So we change the SIFS of CCK from 10us to 16us. */
+ {MT_XIFS_TIME_CFG, 0x33a41010},
+ {MT_PWR_PIN_CFG, 0x00000000},
+};
+
+static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
+ /* {MT_IOCFG_6, 0xA0040080 }, */
+ {MT_PBF_SYS_CTRL, 0x00080c00 },
+ {MT_PBF_CFG, 0x77723c1f },
+ {MT_FCE_PSE_CTRL, 0x00000001 },
+
+ {MT_AMPDU_MAX_LEN_20M1S, 0xBAA99887 },
+
+ /* Delay bb_tx_pe for proper tx_mcs_pwr update */
+ {MT_TX_SW_CFG0, 0x00000601 },
+
+ /* Set rf_tx_pe deassert time to 1us by Chee's comment @MT7650_CR_setting_1018.xlsx */
+ {MT_TX_SW_CFG1, 0x00040000 },
+ {MT_TX_SW_CFG2, 0x00000000 },
+
+ /* disable Tx info report */
+ {0xa44, 0x0000000 },
+
+ {MT_HEADER_TRANS_CTRL_REG, 0x0},
+ {MT_TSO_CTRL, 0x0},
+
+ /* BB_PA_MODE_CFG0(0x1214) Keep default value @20120903 */
+ {MT_BB_PA_MODE_CFG1, 0x00500055},
+
+ /* RF_PA_MODE_CFG0(0x121C) Keep default value @20120903 */
+ {MT_RF_PA_MODE_CFG1, 0x00500055},
+
+ {MT_TX_ALC_CFG_0, 0x2F2F000C},
+ {MT_TX0_BB_GAIN_ATTEN, 0x00000000}, /* set BBP atten gain = 0 */
+
+ {MT_TX_PWR_CFG_0, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_1, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_2, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_3, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_4, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_7, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_8, 0x3A},
+ {MT_TX_PWR_CFG_9, 0x3A},
+ /* Enable Tx length > 4095 byte */
+ {0x150C, 0x00000002},
+
+ /* Disable bt_abort_tx_en(0x1238[21] = 0) which is not used at MT7650 */
+ {0x1238, 0x001700C8},
+ /* PMU_OCLEVEL<5:1> from default <5'b10010> to <5'b11011> for normal driver */
+ /* {MT_LDO_CTRL_0, 0x00A647B6}, */
+
+ /* Default LDO_DIG supply 1.26V, change to 1.2V */
+ {MT_LDO_CTRL_1, 0x6B006464 },
+/*
+ {MT_HT_BASIC_RATE, 0x00004003 },
+ {MT_HT_CTRL_CFG, 0x000001FF },
+*/
+};
+
+
+static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
+ {MT_BBP(CORE, 1), 0x00000002},
+ {MT_BBP(CORE, 4), 0x00000000},
+ {MT_BBP(CORE, 24), 0x00000000},
+ {MT_BBP(CORE, 32), 0x4003000a},
+ {MT_BBP(CORE, 42), 0x00000000},
+ {MT_BBP(CORE, 44), 0x00000000},
+
+ {MT_BBP(IBI, 11), 0x00000080},
+
+ /*
+ 0x2300[5] Default Antenna:
+ 0 for WIFI main antenna
+ 1 for WIFI aux antenna
+
+ */
+ {MT_BBP(AGC, 0), 0x00021400},
+ {MT_BBP(AGC, 1), 0x00000003},
+ {MT_BBP(AGC, 2), 0x003A6464},
+ {MT_BBP(AGC, 15), 0x88A28CB8},
+ {MT_BBP(AGC, 22), 0x00001E21},
+ {MT_BBP(AGC, 23), 0x0000272C},
+ {MT_BBP(AGC, 24), 0x00002F3A},
+ {MT_BBP(AGC, 25), 0x8000005A},
+ {MT_BBP(AGC, 26), 0x007C2005},
+ {MT_BBP(AGC, 34), 0x000A0C0C},
+ {MT_BBP(AGC, 37), 0x2121262C},
+ {MT_BBP(AGC, 41), 0x38383E45},
+ {MT_BBP(AGC, 57), 0x00001010},
+ {MT_BBP(AGC, 59), 0xBAA20E96},
+ {MT_BBP(AGC, 63), 0x00000001},
+
+ {MT_BBP(TXC, 0), 0x00280403},
+ {MT_BBP(TXC, 1), 0x00000000},
+
+ {MT_BBP(RXC, 1), 0x00000012},
+ {MT_BBP(RXC, 2), 0x00000011},
+ {MT_BBP(RXC, 3), 0x00000005},
+ {MT_BBP(RXC, 4), 0x00000000},
+ {MT_BBP(RXC, 5), 0xF977C4EC},
+ {MT_BBP(RXC, 7), 0x00000090},
+
+ {MT_BBP(TXO, 8), 0x00000000},
+
+ {MT_BBP(TXBE, 0), 0x00000000},
+ {MT_BBP(TXBE, 4), 0x00000004},
+ {MT_BBP(TXBE, 6), 0x00000000},
+ {MT_BBP(TXBE, 8), 0x00000014},
+ {MT_BBP(TXBE, 9), 0x20000000},
+ {MT_BBP(TXBE, 10), 0x00000000},
+ {MT_BBP(TXBE, 12), 0x00000000},
+ {MT_BBP(TXBE, 13), 0x00000000},
+ {MT_BBP(TXBE, 14), 0x00000000},
+ {MT_BBP(TXBE, 15), 0x00000000},
+ {MT_BBP(TXBE, 16), 0x00000000},
+ {MT_BBP(TXBE, 17), 0x00000000},
+
+ {MT_BBP(RXFE, 1), 0x00008800}, /* Add for E3 */
+ {MT_BBP(RXFE, 3), 0x00000000},
+ {MT_BBP(RXFE, 4), 0x00000000},
+
+ {MT_BBP(RXO, 13), 0x00000092},
+ {MT_BBP(RXO, 14), 0x00060612},
+ {MT_BBP(RXO, 15), 0xC8321B18},
+ {MT_BBP(RXO, 16), 0x0000001E},
+ {MT_BBP(RXO, 17), 0x00000000},
+ {MT_BBP(RXO, 18), 0xCC00A993},
+ {MT_BBP(RXO, 19), 0xB9CB9CB9},
+ {MT_BBP(RXO, 20), 0x26c00057},
+ {MT_BBP(RXO, 21), 0x00000001},
+ {MT_BBP(RXO, 24), 0x00000006},
+};
+
+static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 8), 0x0E344EF0}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 8), 0x122C54F2}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 14), 0x310F2E39}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 14), 0x310F2A3F}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 32), 0x00003230}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 32), 0x0000181C}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 33), 0x00003240}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 33), 0x00003218}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 35), 0x11112016}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 35), 0x11112016}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXO, 28), 0x0000008A}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXO, 28), 0x0000008A}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 4), 0x1FEDA049}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 4), 0x1FECA054}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 6), 0x00000045}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 6), 0x0000000A}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 12), 0x05052879}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 12), 0x050528F9}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 12), 0x050528F9}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 13), 0x35050004}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 13), 0x2C3A0406}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 27), 0x000000E1}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 27), 0x000000EC}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 28), 0x00060806}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00050806}},
+ {RF_A_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00060801}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_80, {MT_BBP(AGC, 28), 0x00060806}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 31), 0x00000F23}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 31), 0x00000F13}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 39), 0x2A2A3036}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A2C36}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A3036}},
+ {RF_A_BAND | RF_BW_80, {MT_BBP(AGC, 39), 0x2A2A2A36}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 43), 0x27273438}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 43), 0x27272D38}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 43), 0x27272B30}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 51), 0x17171C1C}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 51), 0xFFFFFFFF}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 53), 0x26262A2F}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 53), 0x2626322F}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 53), 0xFFFFFFFF}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 55), 0x40404E58}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 55), 0x40405858}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 55), 0xFFFFFFFF}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 58), 0x00001010}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 58), 0x00000000}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXFE, 0), 0x3D5000E0}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXFE, 0), 0x895000E0}},
+};
+
+static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
+ {MT_BBP(CAL, 47), 0x000010F0 },
+ {MT_BBP(CAL, 48), 0x00008080 },
+ {MT_BBP(CAL, 49), 0x00000F07 },
+ {MT_BBP(CAL, 50), 0x00000040 },
+ {MT_BBP(CAL, 51), 0x00000404 },
+ {MT_BBP(CAL, 52), 0x00080803 },
+ {MT_BBP(CAL, 53), 0x00000704 },
+ {MT_BBP(CAL, 54), 0x00002828 },
+ {MT_BBP(CAL, 55), 0x00005050 },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
new file mode 100644
index 000000000000..95d43efc1f3d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
@@ -0,0 +1,772 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_PHY_INITVALS_H
+#define __MT76X0U_PHY_INITVALS_H
+
+#define RF_REG_PAIR(bank, reg, value) \
+ { (bank) << 16 | (reg), value }
+
+
+static const struct mt76_reg_pair mt76x0_rf_central_tab[] = {
+/*
+ Bank 0 - For central blocks: BG, PLL, XTAL, LO, ADC/DAC
+*/
+ { MT_RF(0, 1), 0x01},
+ { MT_RF(0, 2), 0x11},
+
+ /*
+ R3 ~ R7: VCO Cal.
+ */
+ { MT_RF(0, 3), 0x73}, /* VCO Freq Cal - No Bypass, VCO Amp Cal - No Bypass */
+ { MT_RF(0, 4), 0x30}, /* R4 b<7>=1, VCO cal */
+ { MT_RF(0, 5), 0x00},
+ { MT_RF(0, 6), 0x41}, /* Set the open loop amplitude to middle since bypassing amplitude calibration */
+ { MT_RF(0, 7), 0x00},
+
+ /*
+ XO
+ */
+ { MT_RF(0, 8), 0x00},
+ { MT_RF(0, 9), 0x00},
+ { MT_RF(0, 10), 0x0C},
+ { MT_RF(0, 11), 0x00},
+ { MT_RF(0, 12), 0x00},
+
+ /*
+ BG
+ */
+ { MT_RF(0, 13), 0x00},
+ { MT_RF(0, 14), 0x00},
+ { MT_RF(0, 15), 0x00},
+
+ /*
+ LDO
+ */
+ { MT_RF(0, 19), 0x20},
+ /*
+ XO
+ */
+ { MT_RF(0, 20), 0x22},
+ { MT_RF(0, 21), 0x12},
+ { MT_RF(0, 23), 0x00},
+ { MT_RF(0, 24), 0x33}, /* See band selection for R24<1:0> */
+ { MT_RF(0, 25), 0x00},
+
+ /*
+ PLL, See Freq Selection
+ */
+ { MT_RF(0, 26), 0x00},
+ { MT_RF(0, 27), 0x00},
+ { MT_RF(0, 28), 0x00},
+ { MT_RF(0, 29), 0x00},
+ { MT_RF(0, 30), 0x00},
+ { MT_RF(0, 31), 0x00},
+ { MT_RF(0, 32), 0x00},
+ { MT_RF(0, 33), 0x00},
+ { MT_RF(0, 34), 0x00},
+ { MT_RF(0, 35), 0x00},
+ { MT_RF(0, 36), 0x00},
+ { MT_RF(0, 37), 0x00},
+
+ /*
+ LO Buffer
+ */
+ { MT_RF(0, 38), 0x2F},
+
+ /*
+ Test Ports
+ */
+ { MT_RF(0, 64), 0x00},
+ { MT_RF(0, 65), 0x80},
+ { MT_RF(0, 66), 0x01},
+ { MT_RF(0, 67), 0x04},
+
+ /*
+ ADC/DAC
+ */
+ { MT_RF(0, 68), 0x00},
+ { MT_RF(0, 69), 0x08},
+ { MT_RF(0, 70), 0x08},
+ { MT_RF(0, 71), 0x40},
+ { MT_RF(0, 72), 0xD0},
+ { MT_RF(0, 73), 0x93},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_2g_channel_0_tab[] = {
+/*
+ Bank 5 - Channel 0 2G RF registers
+*/
+ /*
+ RX logic operation
+ */
+ /* RF_R00 Change in SelectBand6590 */
+
+ { MT_RF(5, 2), 0x0C}, /* 5G+2G (MT7610U) */
+ { MT_RF(5, 3), 0x00},
+
+ /*
+ TX logic operation
+ */
+ { MT_RF(5, 4), 0x00},
+ { MT_RF(5, 5), 0x84},
+ { MT_RF(5, 6), 0x02},
+
+ /*
+ LDO
+ */
+ { MT_RF(5, 7), 0x00},
+ { MT_RF(5, 8), 0x00},
+ { MT_RF(5, 9), 0x00},
+
+ /*
+ RX
+ */
+ { MT_RF(5, 10), 0x51},
+ { MT_RF(5, 11), 0x22},
+ { MT_RF(5, 12), 0x22},
+ { MT_RF(5, 13), 0x0F},
+ { MT_RF(5, 14), 0x47}, /* Increase mixer current for more gain */
+ { MT_RF(5, 15), 0x25},
+ { MT_RF(5, 16), 0xC7}, /* Tune LNA2 tank */
+ { MT_RF(5, 17), 0x00},
+ { MT_RF(5, 18), 0x00},
+ { MT_RF(5, 19), 0x30}, /* Improve max Pin */
+ { MT_RF(5, 20), 0x33},
+ { MT_RF(5, 21), 0x02},
+ { MT_RF(5, 22), 0x32}, /* Tune LNA1 tank */
+ { MT_RF(5, 23), 0x00},
+ { MT_RF(5, 24), 0x25},
+ { MT_RF(5, 26), 0x00},
+ { MT_RF(5, 27), 0x12},
+ { MT_RF(5, 28), 0x0F},
+ { MT_RF(5, 29), 0x00},
+
+ /*
+ LOGEN
+ */
+ { MT_RF(5, 30), 0x51}, /* Tune LOGEN tank */
+ { MT_RF(5, 31), 0x35},
+ { MT_RF(5, 32), 0x31},
+ { MT_RF(5, 33), 0x31},
+ { MT_RF(5, 34), 0x34},
+ { MT_RF(5, 35), 0x03},
+ { MT_RF(5, 36), 0x00},
+
+ /*
+ TX
+ */
+ { MT_RF(5, 37), 0xDD}, /* Improve 3.2GHz spur */
+ { MT_RF(5, 38), 0xB3},
+ { MT_RF(5, 39), 0x33},
+ { MT_RF(5, 40), 0xB1},
+ { MT_RF(5, 41), 0x71},
+ { MT_RF(5, 42), 0xF2},
+ { MT_RF(5, 43), 0x47},
+ { MT_RF(5, 44), 0x77},
+ { MT_RF(5, 45), 0x0E},
+ { MT_RF(5, 46), 0x10},
+ { MT_RF(5, 47), 0x00},
+ { MT_RF(5, 48), 0x53},
+ { MT_RF(5, 49), 0x03},
+ { MT_RF(5, 50), 0xEF},
+ { MT_RF(5, 51), 0xC7},
+ { MT_RF(5, 52), 0x62},
+ { MT_RF(5, 53), 0x62},
+ { MT_RF(5, 54), 0x00},
+ { MT_RF(5, 55), 0x00},
+ { MT_RF(5, 56), 0x0F},
+ { MT_RF(5, 57), 0x0F},
+ { MT_RF(5, 58), 0x16},
+ { MT_RF(5, 59), 0x16},
+ { MT_RF(5, 60), 0x10},
+ { MT_RF(5, 61), 0x10},
+ { MT_RF(5, 62), 0xD0},
+ { MT_RF(5, 63), 0x6C},
+ { MT_RF(5, 64), 0x58},
+ { MT_RF(5, 65), 0x58},
+ { MT_RF(5, 66), 0xF2},
+ { MT_RF(5, 67), 0xE8},
+ { MT_RF(5, 68), 0xF0},
+ { MT_RF(5, 69), 0xF0},
+ { MT_RF(5, 127), 0x04},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_5g_channel_0_tab[] = {
+/*
+ Bank 6 - Channel 0 5G RF registers
+*/
+ /*
+ RX logic operation
+ */
+ /* RF_R00 Change in SelectBandmt76x0 */
+
+ { MT_RF(6, 2), 0x0C},
+ { MT_RF(6, 3), 0x00},
+
+ /*
+ TX logic operation
+ */
+ { MT_RF(6, 4), 0x00},
+ { MT_RF(6, 5), 0x84},
+ { MT_RF(6, 6), 0x02},
+
+ /*
+ LDO
+ */
+ { MT_RF(6, 7), 0x00},
+ { MT_RF(6, 8), 0x00},
+ { MT_RF(6, 9), 0x00},
+
+ /*
+ RX
+ */
+ { MT_RF(6, 10), 0x00},
+ { MT_RF(6, 11), 0x01},
+
+ { MT_RF(6, 13), 0x23},
+ { MT_RF(6, 14), 0x00},
+ { MT_RF(6, 15), 0x04},
+ { MT_RF(6, 16), 0x22},
+
+ { MT_RF(6, 18), 0x08},
+ { MT_RF(6, 19), 0x00},
+ { MT_RF(6, 20), 0x00},
+ { MT_RF(6, 21), 0x00},
+ { MT_RF(6, 22), 0xFB},
+
+ /*
+ LOGEN5G
+ */
+ { MT_RF(6, 25), 0x76},
+ { MT_RF(6, 26), 0x24},
+ { MT_RF(6, 27), 0x04},
+ { MT_RF(6, 28), 0x00},
+ { MT_RF(6, 29), 0x00},
+
+ /*
+ TX
+ */
+ { MT_RF(6, 37), 0xBB},
+ { MT_RF(6, 38), 0xB3},
+
+ { MT_RF(6, 40), 0x33},
+ { MT_RF(6, 41), 0x33},
+
+ { MT_RF(6, 43), 0x03},
+ { MT_RF(6, 44), 0xB3},
+
+ { MT_RF(6, 46), 0x17},
+ { MT_RF(6, 47), 0x0E},
+ { MT_RF(6, 48), 0x10},
+ { MT_RF(6, 49), 0x07},
+
+ { MT_RF(6, 62), 0x00},
+ { MT_RF(6, 63), 0x00},
+ { MT_RF(6, 64), 0xF1},
+ { MT_RF(6, 65), 0x0F},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_vga_channel_0_tab[] = {
+/*
+ Bank 7 - Channel 0 VGA RF registers
+*/
+ /* E3 CR */
+ { MT_RF(7, 0), 0x47}, /* Allow BBP/MAC to do calibration */
+ { MT_RF(7, 1), 0x00},
+ { MT_RF(7, 2), 0x00},
+ { MT_RF(7, 3), 0x00},
+ { MT_RF(7, 4), 0x00},
+
+ { MT_RF(7, 10), 0x13},
+ { MT_RF(7, 11), 0x0F},
+ { MT_RF(7, 12), 0x13}, /* For dcoc */
+ { MT_RF(7, 13), 0x13}, /* For dcoc */
+ { MT_RF(7, 14), 0x13}, /* For dcoc */
+ { MT_RF(7, 15), 0x20}, /* For dcoc */
+ { MT_RF(7, 16), 0x22}, /* For dcoc */
+
+ { MT_RF(7, 17), 0x7C},
+
+ { MT_RF(7, 18), 0x00},
+ { MT_RF(7, 19), 0x00},
+ { MT_RF(7, 20), 0x00},
+ { MT_RF(7, 21), 0xF1},
+ { MT_RF(7, 22), 0x11},
+ { MT_RF(7, 23), 0xC2},
+ { MT_RF(7, 24), 0x41},
+ { MT_RF(7, 25), 0x20},
+ { MT_RF(7, 26), 0x40},
+ { MT_RF(7, 27), 0xD7},
+ { MT_RF(7, 28), 0xA2},
+ { MT_RF(7, 29), 0x60},
+ { MT_RF(7, 30), 0x49},
+ { MT_RF(7, 31), 0x20},
+ { MT_RF(7, 32), 0x44},
+ { MT_RF(7, 33), 0xC1},
+ { MT_RF(7, 34), 0x60},
+ { MT_RF(7, 35), 0xC0},
+
+ { MT_RF(7, 61), 0x01},
+
+ { MT_RF(7, 72), 0x3C},
+ { MT_RF(7, 73), 0x34},
+ { MT_RF(7, 74), 0x00},
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_bw_switch_tab[] = {
+ /* Bank, Register, Bw/Band, Value */
+ { MT_RF(0, 17), RF_G_BAND | RF_BW_20, 0x00},
+ { MT_RF(0, 17), RF_G_BAND | RF_BW_40, 0x00},
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_20, 0x00},
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_40, 0x00},
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_80, 0x00},
+
+ /* TODO: need to check B7.R6 & B7.R7 setting for 2.4G again @20121112 */
+ { MT_RF(7, 6), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 6), RF_G_BAND | RF_BW_40, 0x1C},
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_40, 0x20},
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 7), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 7), RF_G_BAND | RF_BW_40, 0x20},
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_40, 0x20},
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 8), RF_G_BAND | RF_BW_20, 0x03},
+ { MT_RF(7, 8), RF_G_BAND | RF_BW_40, 0x01},
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_20, 0x03},
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_40, 0x01},
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_80, 0x00},
+
+ /* TODO: need to check B7.R58 & B7.R59 setting for 2.4G again @20121112 */
+ { MT_RF(7, 58), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 58), RF_G_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 59), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 59), RF_G_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 60), RF_G_BAND | RF_BW_20, 0xAA},
+ { MT_RF(7, 60), RF_G_BAND | RF_BW_40, 0xAA},
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_20, 0xAA},
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_40, 0xAA},
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_80, 0xAA},
+
+ { MT_RF(7, 76), RF_BW_20, 0x40},
+ { MT_RF(7, 76), RF_BW_40, 0x40},
+ { MT_RF(7, 76), RF_BW_80, 0x10},
+
+ { MT_RF(7, 77), RF_BW_20, 0x40},
+ { MT_RF(7, 77), RF_BW_40, 0x40},
+ { MT_RF(7, 77), RF_BW_80, 0x10},
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_band_switch_tab[] = {
+ /* Bank, Register, Bw/Band, Value */
+ { MT_RF(0, 16), RF_G_BAND, 0x20},
+ { MT_RF(0, 16), RF_A_BAND, 0x20},
+
+ { MT_RF(0, 18), RF_G_BAND, 0x00},
+ { MT_RF(0, 18), RF_A_BAND, 0x00},
+
+ { MT_RF(0, 39), RF_G_BAND, 0x36},
+ { MT_RF(0, 39), RF_A_BAND_LB, 0x34},
+ { MT_RF(0, 39), RF_A_BAND_MB, 0x33},
+ { MT_RF(0, 39), RF_A_BAND_HB, 0x31},
+ { MT_RF(0, 39), RF_A_BAND_11J, 0x36},
+
+ { MT_RF(6, 12), RF_A_BAND_LB, 0x44},
+ { MT_RF(6, 12), RF_A_BAND_MB, 0x44},
+ { MT_RF(6, 12), RF_A_BAND_HB, 0x55},
+ { MT_RF(6, 12), RF_A_BAND_11J, 0x44},
+
+ { MT_RF(6, 17), RF_A_BAND_LB, 0x02},
+ { MT_RF(6, 17), RF_A_BAND_MB, 0x00},
+ { MT_RF(6, 17), RF_A_BAND_HB, 0x00},
+ { MT_RF(6, 17), RF_A_BAND_11J, 0x05},
+
+ { MT_RF(6, 24), RF_A_BAND_LB, 0xA1},
+ { MT_RF(6, 24), RF_A_BAND_MB, 0x41},
+ { MT_RF(6, 24), RF_A_BAND_HB, 0x21},
+ { MT_RF(6, 24), RF_A_BAND_11J, 0xE1},
+
+ { MT_RF(6, 39), RF_A_BAND_LB, 0x36},
+ { MT_RF(6, 39), RF_A_BAND_MB, 0x34},
+ { MT_RF(6, 39), RF_A_BAND_HB, 0x32},
+ { MT_RF(6, 39), RF_A_BAND_11J, 0x37},
+
+ { MT_RF(6, 42), RF_A_BAND_LB, 0xFB},
+ { MT_RF(6, 42), RF_A_BAND_MB, 0xF3},
+ { MT_RF(6, 42), RF_A_BAND_HB, 0xEB},
+ { MT_RF(6, 42), RF_A_BAND_11J, 0xEB},
+
+ /* Move R6-R45, R50~R59 to mt76x0_RF_INT_PA_5G_Channel_0_RegTb/mt76x0_RF_EXT_PA_5G_Channel_0_RegTb */
+
+ { MT_RF(6, 127), RF_G_BAND, 0x84},
+ { MT_RF(6, 127), RF_A_BAND, 0x04},
+
+ { MT_RF(7, 5), RF_G_BAND, 0x40},
+ { MT_RF(7, 5), RF_A_BAND, 0x00},
+
+ { MT_RF(7, 9), RF_G_BAND, 0x00},
+ { MT_RF(7, 9), RF_A_BAND, 0x00},
+
+ { MT_RF(7, 70), RF_G_BAND, 0x00},
+ { MT_RF(7, 70), RF_A_BAND, 0x6D},
+
+ { MT_RF(7, 71), RF_G_BAND, 0x00},
+ { MT_RF(7, 71), RF_A_BAND, 0xB0},
+
+ { MT_RF(7, 78), RF_G_BAND, 0x00},
+ { MT_RF(7, 78), RF_A_BAND, 0x55},
+
+ { MT_RF(7, 79), RF_G_BAND, 0x00},
+ { MT_RF(7, 79), RF_A_BAND, 0x55},
+};
+
+static const struct mt76x0_freq_item mt76x0_frequency_plan[] = {
+ {1, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2412 */
+ {2, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1}, /* Freq 2417 */
+ {3, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2422 */
+ {4, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2427 */
+ {5, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2432 */
+ {6, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2437 */
+ {7, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2442 */
+ {8, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1}, /* Freq 2447 */
+ {9, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2452 */
+ {10, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0}, /* Freq 2457 */
+ {11, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2462 */
+ {12, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2467 */
+ {13, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2472 */
+ {14, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2484 */
+
+ {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 4915 */
+ {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4920 */
+ {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4925 */
+ {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4935 */
+ {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4940 */
+ {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4945 */
+ {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4960 */
+ {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4980 */
+
+ {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5180 */
+ {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5185 */
+ {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5190 */
+ {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5195 */
+ {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5200 */
+ {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5205 */
+ {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5210 */
+ {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5215 */
+ {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5220 */
+ {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5225 */
+ {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5230 */
+ {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5235 */
+ {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5240 */
+ {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5245 */
+ {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5250 */
+ {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5255 */
+ {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5260 */
+ {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5265 */
+ {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5270 */
+ {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5275 */
+ {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5280 */
+ {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5285 */
+ {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5290 */
+ {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5295 */
+ {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5300 */
+ {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5305 */
+ {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5310 */
+ {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5315 */
+ {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5320 */
+
+ {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5500 */
+ {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5505 */
+ {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5510 */
+ {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5515 */
+ {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5520 */
+ {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5525 */
+ {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5530 */
+ {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5535 */
+ {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5540 */
+ {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5545 */
+ {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5550 */
+ {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5555 */
+ {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5560 */
+ {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5565 */
+ {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5570 */
+ {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5575 */
+ {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5580 */
+ {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5585 */
+ {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5590 */
+ {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5595 */
+ {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5600 */
+ {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5605 */
+ {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5610 */
+ {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5615 */
+ {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5620 */
+ {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5625 */
+ {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5630 */
+ {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5635 */
+ {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5640 */
+ {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5645 */
+ {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5650 */
+ {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5655 */
+ {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5660 */
+ {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5665 */
+ {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5670 */
+ {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5675 */
+ {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5680 */
+
+ {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5685 */
+ {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5690 */
+ {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5695 */
+ {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5700 */
+ {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5705 */
+ {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5710 */
+ {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5715 */
+ {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5720 */
+ {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5725 */
+ {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5730 */
+ {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5735 */
+ {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5740 */
+ {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5745 */
+ {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5750 */
+ {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5755 */
+ {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5760 */
+ {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5765 */
+ {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5770 */
+ {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5775 */
+ {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5780 */
+ {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5785 */
+ {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5790 */
+ {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5795 */
+ {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5800 */
+ {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5805 */
+ {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5810 */
+ {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5815 */
+ {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5820 */
+ {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5825 */
+ {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5830 */
+ {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5835 */
+ {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5840 */
+ {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5845 */
+ {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5850 */
+ {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5855 */
+ {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5860 */
+ {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5865 */
+};
+
+static const struct mt76x0_freq_item mt76x0_sdm_frequency_plan[] = {
+ {1, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2412 */
+ {2, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3}, /* Freq 2417 */
+ {3, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3}, /* Freq 2422 */
+ {4, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3}, /* Freq 2427 */
+ {5, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3}, /* Freq 2432 */
+ {6, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3}, /* Freq 2437 */
+ {7, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3}, /* Freq 2442 */
+ {8, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3}, /* Freq 2447 */
+ {9, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3}, /* Freq 2452 */
+ {10, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3}, /* Freq 2457 */
+ {11, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2222, 0x3}, /* Freq 2462 */
+ {12, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x7777, 0x3}, /* Freq 2467 */
+ {13, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2472 */
+ {14, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3}, /* Freq 2484 */
+
+ {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 4915 */
+ {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x0, 0x3}, /* Freq 4920 */
+ {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2AAA, 0x3}, /* Freq 4925 */
+ {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x8000, 0x3}, /* Freq 4935 */
+ {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 4940 */
+ {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 4945 */
+ {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 4960 */
+ {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 4980 */
+
+ {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 5180 */
+ {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 5185 */
+ {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5190 */
+ {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5195 */
+ {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5200 */
+ {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5205 */
+ {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5210 */
+ {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5215 */
+ {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5220 */
+ {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5225 */
+ {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5230 */
+ {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5235 */
+ {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5240 */
+ {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5245 */
+ {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5250 */
+ {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5255 */
+ {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5260 */
+ {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5265 */
+ {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5270 */
+ {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5275 */
+ {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5280 */
+ {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5285 */
+ {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5290 */
+ {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5295 */
+ {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5300 */
+ {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5305 */
+ {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5310 */
+ {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5315 */
+ {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5320 */
+
+ {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5500 */
+ {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5505 */
+ {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5510 */
+ {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5515 */
+ {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5520 */
+ {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5525 */
+ {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5530 */
+ {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5535 */
+ {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5540 */
+ {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5545 */
+ {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5550 */
+ {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5555 */
+ {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5560 */
+ {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5565 */
+ {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5570 */
+ {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5575 */
+ {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5580 */
+ {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5585 */
+ {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5590 */
+ {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5595 */
+ {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5600 */
+ {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5605 */
+ {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5610 */
+ {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5615 */
+ {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5620 */
+ {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5625 */
+ {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5630 */
+ {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5635 */
+ {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5640 */
+ {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5645 */
+ {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5650 */
+ {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5655 */
+ {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5660 */
+ {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5665 */
+ {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5670 */
+ {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5675 */
+ {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5680 */
+
+ {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5685 */
+ {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5690 */
+ {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5695 */
+ {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5700 */
+ {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5705 */
+ {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5710 */
+ {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5715 */
+ {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5720 */
+ {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5725 */
+ {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5730 */
+ {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5735 */
+ {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5740 */
+ {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5745 */
+ {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5750 */
+ {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5755 */
+ {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5760 */
+ {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5765 */
+ {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5770 */
+ {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5775 */
+ {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5780 */
+ {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5785 */
+ {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5790 */
+ {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5795 */
+ {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5800 */
+ {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5805 */
+ {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5810 */
+ {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5815 */
+ {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5820 */
+ {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5825 */
+ {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5830 */
+ {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5835 */
+ {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5840 */
+ {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5845 */
+ {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5850 */
+ {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5855 */
+ {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5860 */
+ {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5865 */
+};
+
+static const u8 mt76x0_sdm_channel[] = {
+ 183, 185, 43, 45, 54, 55, 57, 58, 102, 103, 105, 106, 115, 117, 126, 127, 129, 130, 139, 141, 150, 151, 153, 154, 163, 165
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_ext_pa_tab[] = {
+ { MT_RF(6, 45), RF_A_BAND_LB, 0x63},
+ { MT_RF(6, 45), RF_A_BAND_MB, 0x43},
+ { MT_RF(6, 45), RF_A_BAND_HB, 0x33},
+ { MT_RF(6, 45), RF_A_BAND_11J, 0x73},
+
+ { MT_RF(6, 50), RF_A_BAND_LB, 0x02},
+ { MT_RF(6, 50), RF_A_BAND_MB, 0x02},
+ { MT_RF(6, 50), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 50), RF_A_BAND_11J, 0x02},
+
+ { MT_RF(6, 51), RF_A_BAND_LB, 0x02},
+ { MT_RF(6, 51), RF_A_BAND_MB, 0x02},
+ { MT_RF(6, 51), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 51), RF_A_BAND_11J, 0x02},
+
+ { MT_RF(6, 52), RF_A_BAND_LB, 0x08},
+ { MT_RF(6, 52), RF_A_BAND_MB, 0x08},
+ { MT_RF(6, 52), RF_A_BAND_HB, 0x08},
+ { MT_RF(6, 52), RF_A_BAND_11J, 0x08},
+
+ { MT_RF(6, 53), RF_A_BAND_LB, 0x08},
+ { MT_RF(6, 53), RF_A_BAND_MB, 0x08},
+ { MT_RF(6, 53), RF_A_BAND_HB, 0x08},
+ { MT_RF(6, 53), RF_A_BAND_11J, 0x08},
+
+ { MT_RF(6, 54), RF_A_BAND_LB, 0x0A},
+ { MT_RF(6, 54), RF_A_BAND_MB, 0x0A},
+ { MT_RF(6, 54), RF_A_BAND_HB, 0x0A},
+ { MT_RF(6, 54), RF_A_BAND_11J, 0x0A},
+
+ { MT_RF(6, 55), RF_A_BAND_LB, 0x0A},
+ { MT_RF(6, 55), RF_A_BAND_MB, 0x0A},
+ { MT_RF(6, 55), RF_A_BAND_HB, 0x0A},
+ { MT_RF(6, 55), RF_A_BAND_11J, 0x0A},
+
+ { MT_RF(6, 56), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 56), RF_A_BAND_MB, 0x05},
+ { MT_RF(6, 56), RF_A_BAND_HB, 0x05},
+ { MT_RF(6, 56), RF_A_BAND_11J, 0x05},
+
+ { MT_RF(6, 57), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 57), RF_A_BAND_MB, 0x05},
+ { MT_RF(6, 57), RF_A_BAND_HB, 0x05},
+ { MT_RF(6, 57), RF_A_BAND_11J, 0x05},
+
+ { MT_RF(6, 58), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 58), RF_A_BAND_MB, 0x03},
+ { MT_RF(6, 58), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 58), RF_A_BAND_11J, 0x07},
+
+ { MT_RF(6, 59), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 59), RF_A_BAND_MB, 0x03},
+ { MT_RF(6, 59), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 59), RF_A_BAND_11J, 0x07},
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
new file mode 100644
index 000000000000..91a84be36d3b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "trace.h"
+#include <linux/etherdevice.h>
+
+static void
+mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+ enum nl80211_band band)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ txrate->idx = idx;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case MT_PHY_BW_80:
+ txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76_mac_fill_tx_status(struct mt76x0_dev *dev, struct ieee80211_tx_info *info,
+ struct mt76_tx_status *st, int n_frames)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ if (!n_frames)
+ return;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76_mac_process_tx_rate(&rate[last_rate], st->rate,
+ dev->mt76.chandef.chan->band);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + last_rate;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+
+ rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = n_frames;
+ info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+ if (st->pktid & MT_TXWI_PKTID_PROBE)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 4);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return cpu_to_le16(rateval);
+}
+
+void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->mt76.lock, flags);
+ wcid->tx_rate = mt76x0_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_irqrestore(&dev->mt76.lock, flags);
+}
+
+struct mt76_tx_status mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev)
+{
+ struct mt76_tx_status stat = {};
+ u32 stat2, stat1;
+
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ stat.valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+ stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+ stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+ stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+ stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+ stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+ stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+ stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+ return stat;
+}
+
+void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ struct mt76_sta *msta = NULL;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ void *priv;
+ priv = msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta, drv_priv);
+ }
+
+ if (msta && stat->aggr) {
+ u32 stat_val, stat_cache;
+
+ stat_val = stat->rate;
+ stat_val |= ((u32) stat->retry) << 16;
+ stat_cache = msta->status.rate;
+ stat_cache |= ((u32) msta->status.retry) << 16;
+
+ if (*update == 0 && stat_val == stat_cache &&
+ stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+ msta->n_frames++;
+ goto out;
+ }
+
+ mt76_mac_fill_tx_status(dev, &info, &msta->status,
+ msta->n_frames);
+ msta->status = *stat;
+ msta->n_frames = 1;
+ *update = 0;
+ } else {
+ mt76_mac_fill_tx_status(dev, &info, stat, 1);
+ *update = 1;
+ }
+
+ spin_lock_bh(&dev->mac_lock);
+ ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
+ spin_unlock_bh(&dev->mac_lock);
+out:
+ rcu_read_unlock();
+}
+
+void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+ int ht_mode)
+{
+ int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ u32 prot[6];
+ bool ht_rts[4] = {};
+ int i;
+
+ prot[0] = MT_PROT_NAV_SHORT |
+ MT_PROT_TXOP_ALLOW_ALL |
+ MT_PROT_RTS_THR_EN;
+ prot[1] = prot[0];
+ if (legacy_prot)
+ prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+ prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
+ prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
+
+ if (legacy_prot) {
+ prot[2] |= MT_PROT_RATE_CCK_11;
+ prot[3] |= MT_PROT_RATE_CCK_11;
+ prot[4] |= MT_PROT_RATE_CCK_11;
+ prot[5] |= MT_PROT_RATE_CCK_11;
+ } else {
+ prot[2] |= MT_PROT_RATE_OFDM_24;
+ prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+ prot[4] |= MT_PROT_RATE_OFDM_24;
+ prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+ }
+
+ switch (mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ ht_rts[1] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+ }
+
+ if (non_gf)
+ ht_rts[2] = ht_rts[3] = true;
+
+ for (i = 0; i < 4; i++)
+ if (ht_rts[i])
+ prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
+
+ for (i = 0; i < 6; i++)
+ mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+}
+
+void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
+{
+ if (short_preamb)
+ mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+ else
+ mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
+{
+ u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG);
+
+ val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ if (!enable) {
+ mt76_wr(dev, MT_BEACON_TIME_CFG, val);
+ return;
+ }
+
+ val &= ~MT_BEACON_TIME_CFG_INTVAL;
+ val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN;
+}
+
+static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
+{
+ u32 val = mt76_rr(dev, 0x10f4);
+
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+
+ dev_err(dev->mt76.dev, "Error: MAC specific condition occurred\n");
+
+ mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+ udelay(10);
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+}
+void mt76x0_mac_work(struct work_struct *work)
+{
+ struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ mac_work.work);
+ struct {
+ u32 addr_base;
+ u32 span;
+ u64 *stat_base;
+ } spans[] = {
+ { MT_RX_STA_CNT0, 3, dev->stats.rx_stat },
+ { MT_TX_STA_CNT0, 3, dev->stats.tx_stat },
+ { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat },
+ { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del },
+ { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] },
+ { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] },
+ };
+ u32 sum, n;
+ int i, j, k;
+
+ /* Note: using MCU_RANDOM_READ is actually slower then reading all the
+ * registers by hand. MCU takes ca. 20ms to complete read of 24
+ * registers while reading them one by one will takes roughly
+ * 24*200us =~ 5ms.
+ */
+
+ k = 0;
+ n = 0;
+ sum = 0;
+ for (i = 0; i < ARRAY_SIZE(spans); i++)
+ for (j = 0; j < spans[i].span; j++) {
+ u32 val = mt76_rr(dev, spans[i].addr_base + j * 4);
+
+ spans[i].stat_base[j * 2] += val & 0xffff;
+ spans[i].stat_base[j * 2 + 1] += val >> 16;
+
+ /* Calculate average AMPDU length */
+ if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
+ spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
+ continue;
+
+ n += (val >> 16) + (val & 0xffff);
+ sum += (val & 0xffff) * (1 + k * 2) +
+ (val >> 16) * (2 + k * 2);
+ k++;
+ }
+
+ atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
+
+ mt76x0_check_mac_err(dev);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ);
+}
+
+void
+mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ u8 zmac[ETH_ALEN] = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ if (mac)
+ memcpy(zmac, mac, sizeof(zmac));
+
+ mt76x0_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
+}
+
+void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
+{
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid;
+ void *msta;
+ u8 min_factor = 3;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
+ wcid = rcu_dereference(dev->wcid[i]);
+ if (!wcid)
+ continue;
+
+ msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(msta, struct ieee80211_sta, drv_priv);
+
+ min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+ }
+ rcu_read_unlock();
+
+ mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
+ FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
+}
+
+static void
+mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (idx >= 8)
+ idx = 0;
+
+ if (status->band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ }
+
+ if (idx >= 4)
+ idx = 0;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ status->rate_idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->encoding = RX_ENC_VHT;
+ status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+ status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (rate & MT_RXWI_RATE_LDPC)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void
+mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
+ u16 rate, int rssi)
+{
+ dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
+ dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256;
+}
+
+static int
+mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
+
+ return ieee80211_is_beacon(hdr->frame_control) &&
+ ether_addr_equal(hdr->addr2, dev->ap_bssid);
+}
+
+u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi)
+{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct mt76x0_rxwi *rxwi = rxi;
+ u32 len, ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ int rssi;
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ if (WARN_ON(len < 10))
+ return 0;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+ }
+
+ status->chains = BIT(0);
+ rssi = mt76x0_phy_get_rssi(dev, rxwi);
+ status->chain_signal[0] = status->signal = rssi;
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+
+ mt76_mac_process_rate(status, rate);
+
+ spin_lock_bh(&dev->con_mon_lock);
+ if (mt76x0_rx_is_our_beacon(dev, data)) {
+ mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi);
+ } else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) {
+ if (dev->avg_rssi == 0)
+ dev->avg_rssi = rssi;
+ else
+ dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16;
+
+ }
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ return len;
+}
+
+static enum mt76_cipher_type
+mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_mt76x0_set_key(&dev->mt76, idx);
+
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP) {
+ /* Note: start with 1 to comply with spec,
+ * (see comment on common/cmm_wpa.c:4291).
+ */
+ iv_data[0] |= 1;
+ iv_data[3] |= 0x20;
+ }
+ }
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ val = mt76_rr(dev, MT_WCID_ATTR(idx));
+ val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
+ val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+ FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+ val &= ~MT_WCID_ATTR_PAIRWISE;
+ val |= MT_WCID_ATTR_PAIRWISE *
+ !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ mt76_wr(dev, MT_WCID_ATTR(idx), val);
+
+ return 0;
+}
+
+int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_mt76x0_set_shared_key(&dev->mt76, vif_idx, key_idx);
+
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
+ key_data, sizeof(key_data));
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
new file mode 100644
index 000000000000..bea067b71c13
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_MAC_H
+#define __MT76_MAC_H
+
+/* Note: values in original "RSSI" and "SNR" fields are not actually what they
+ * are called for MT76X0U, names used by this driver are educated guesses
+ * (see vendor mac/ral_omac.c).
+ */
+struct mt76x0_rxwi {
+ __le32 rxinfo;
+
+ __le32 ctl;
+
+ __le16 tid_sn;
+ __le16 rate;
+
+ s8 rssi[4];
+
+ __le32 bbp_rxinfo[4];
+} __packed __aligned(4);
+
+#define MT_RXINFO_BA BIT(0)
+#define MT_RXINFO_DATA BIT(1)
+#define MT_RXINFO_NULL BIT(2)
+#define MT_RXINFO_FRAG BIT(3)
+#define MT_RXINFO_U2M BIT(4)
+#define MT_RXINFO_MULTICAST BIT(5)
+#define MT_RXINFO_BROADCAST BIT(6)
+#define MT_RXINFO_MYBSS BIT(7)
+#define MT_RXINFO_CRCERR BIT(8)
+#define MT_RXINFO_ICVERR BIT(9)
+#define MT_RXINFO_MICERR BIT(10)
+#define MT_RXINFO_AMSDU BIT(11)
+#define MT_RXINFO_HTC BIT(12)
+#define MT_RXINFO_RSSI BIT(13)
+#define MT_RXINFO_L2PAD BIT(14)
+#define MT_RXINFO_AMPDU BIT(15)
+#define MT_RXINFO_DECRYPT BIT(16)
+#define MT_RXINFO_BSSIDX3 BIT(17)
+#define MT_RXINFO_WAPI_KEY BIT(18)
+#define MT_RXINFO_PN_LEN GENMASK(21, 19)
+#define MT_RXINFO_SW_PKT_80211 BIT(22)
+#define MT_RXINFO_TCP_SUM_BYPASS BIT(28)
+#define MT_RXINFO_IP_SUM_BYPASS BIT(29)
+#define MT_RXINFO_TCP_SUM_ERR BIT(30)
+#define MT_RXINFO_IP_SUM_ERR BIT(31)
+
+#define MT_RXWI_CTL_WCID GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16)
+#define MT_RXWI_CTL_TID GENMASK(31, 28)
+
+#define MT_RXWI_FRAG GENMASK(3, 0)
+#define MT_RXWI_SN GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC BIT(6)
+#define MT_RXWI_RATE_BW GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI BIT(9)
+#define MT_RXWI_RATE_STBC BIT(10)
+#define MT_RXWI_RATE_LDPC_ETXBF BIT(11)
+#define MT_RXWI_RATE_SND BIT(12)
+#define MT_RXWI_RATE_PHY GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4)
+
+#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0)
+#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6)
+#define MT_RXWI_ANT_AUX_LNA BIT(7)
+
+#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0)
+
+enum mt76_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+ MT_PHY_BW_80,
+};
+
+struct mt76_txwi {
+ __le16 flags;
+ __le16 rate_ctl;
+ u8 ack_ctl;
+ u8 wcid;
+ __le16 len_ctl;
+ __le32 iv;
+ __le32 eiv;
+ u8 aid;
+ u8 txstream;
+ u8 ctl2;
+ u8 pktid;
+} __packed __aligned(4);
+
+#define MT_TXWI_FLAGS_FRAG BIT(0)
+#define MT_TXWI_FLAGS_MMPS BIT(1)
+#define MT_TXWI_FLAGS_CFACK BIT(2)
+#define MT_TXWI_FLAGS_TS BIT(3)
+#define MT_TXWI_FLAGS_AMPDU BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
+#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10)
+#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13)
+#define MT_TXWI_FLAGS_TX_RPT BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
+
+#define MT_TXWI_RATE_MCS GENMASK(6, 0)
+#define MT_TXWI_RATE_BW BIT(7)
+#define MT_TXWI_RATE_SGI BIT(8)
+#define MT_TXWI_RATE_STBC GENMASK(10, 9)
+#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14)
+
+#define MT_TXWI_ACK_CTL_REQ BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
+
+#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0)
+
+#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0)
+#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4)
+#define MT_TXWI_CTL_PIFS_REV BIT(6)
+
+#define MT_TXWI_PKTID_PROBE BIT(7)
+
+u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi);
+int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+
+int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key);
+u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val);
+struct mt76_tx_status
+mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev);
+void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
new file mode 100644
index 000000000000..cf6ffb1ba4a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "mac.h"
+#include <linux/etherdevice.h>
+
+static int mt76x0_start(struct ieee80211_hw *hw)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt76x0_mac_start(dev);
+ if (ret)
+ goto out;
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void mt76x0_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mac_work);
+ mt76x0_mac_stop(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+
+static int mt76x0_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int idx;
+
+ idx = ffs(~dev->vif_mask);
+ if (!idx || idx > 8)
+ return -ENOSPC;
+
+ idx--;
+ dev->vif_mask |= BIT(idx);
+
+ mvif->idx = idx;
+ mvif->group_wcid.idx = GROUP_WCID(idx);
+ mvif->group_wcid.hw_key_idx = -1;
+
+ return 0;
+}
+
+static void mt76x0_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int wcid = mvif->group_wcid.idx;
+
+ dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+}
+
+static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static void
+mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt76x0_phy_con_cal_onoff(dev, info);
+
+ if (changed & BSS_CHANGED_BSSID) {
+ mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
+
+ /* Note: this is a hack because beacon_int is not changed
+ * on leave nor is any more appropriate event generated.
+ * rt2x00 doesn't seem to be bothered though.
+ */
+ if (is_zero_ether_addr(info->bssid))
+ mt76x0_mac_config_tsf(dev, false, 0);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
+ mt76_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
+ mt76_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+ mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
+ mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT)
+ mt76x0_mac_config_tsf(dev, true, info->beacon_int);
+
+ if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt76x0_mac_set_protection(dev, info->use_cts_prot,
+ info->ht_operation_mode);
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE)
+ mt76x0_mac_set_short_preamble(dev, info->use_short_preamble);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+ MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt76x0_phy_recalibrate_after_assoc(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x0_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt76x0_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+ mt76x0_mac_set_ampdu_factor(dev);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static int
+mt76x0_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+ mt76x0_mac_wcid_setup(dev, idx, 0, NULL);
+ mt76x0_mac_set_ampdu_factor(dev);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static void
+mt76x0_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+}
+
+static void
+mt76x0_sw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ mt76x0_agc_save(dev);
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mt76x0_agc_restore(dev);
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+static int
+mt76x0_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
+ struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else {
+ if (idx == wcid->hw_key_idx)
+ wcid->hw_key_idx = -1;
+
+ key = NULL;
+ }
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76x0_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76x0_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76x0_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
+
+ return 0;
+}
+
+static int
+mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+
+ WARN_ON(msta->wcid.idx > N_WCIDS);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ msta->agg_ssn[tid] = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates;
+ struct ieee80211_tx_rate rate = {};
+
+ rcu_read_lock();
+ rates = rcu_dereference(sta->rates);
+
+ if (!rates)
+ goto out;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76x0_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+
+out:
+ rcu_read_unlock();
+}
+
+const struct ieee80211_ops mt76x0_ops = {
+ .tx = mt76x0_tx,
+ .start = mt76x0_start,
+ .stop = mt76x0_stop,
+ .add_interface = mt76x0_add_interface,
+ .remove_interface = mt76x0_remove_interface,
+ .config = mt76x0_config,
+ .configure_filter = mt76_configure_filter,
+ .bss_info_changed = mt76x0_bss_info_changed,
+ .sta_add = mt76x0_sta_add,
+ .sta_remove = mt76x0_sta_remove,
+ .sta_notify = mt76x0_sta_notify,
+ .set_key = mt76x0_set_key,
+ .conf_tx = mt76x0_conf_tx,
+ .sw_scan_start = mt76x0_sw_scan,
+ .sw_scan_complete = mt76x0_sw_scan_complete,
+ .ampdu_action = mt76_ampdu_action,
+ .sta_rate_tbl_update = mt76_sta_rate_tbl_update,
+ .set_rts_threshold = mt76x0_set_rts_threshold,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
new file mode 100644
index 000000000000..8affacbab90a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
@@ -0,0 +1,656 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include "mt76x0.h"
+#include "dma.h"
+#include "mcu.h"
+#include "usb.h"
+#include "trace.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x38f8
+#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MCU_RESP_URB_SIZE 1024
+
+static inline int firmware_running(struct mt76x0_dev *dev)
+{
+ return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+ put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static inline void mt76x0_dma_skb_wrap_cmd(struct sk_buff *skb,
+ u8 seq, enum mcu_cmd cmd)
+{
+ WARN_ON(mt76x0_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
+ FIELD_PREP(MT_TXD_CMD_SEQ, seq) |
+ FIELD_PREP(MT_TXD_CMD_TYPE, cmd)));
+}
+
+static inline void trace_mt76x0_mcu_msg_send_cs(struct mt76_dev *dev,
+ struct sk_buff *skb, bool need_resp)
+{
+ u32 i, csum = 0;
+
+ for (i = 0; i < skb->len / 4; i++)
+ csum ^= get_unaligned_le32(skb->data + i * 4);
+
+ trace_mt76x0_mcu_msg_send(dev, skb, csum, need_resp);
+}
+
+static struct sk_buff *
+mt76x0_mcu_msg_alloc(struct mt76x0_dev *dev, const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
+
+ skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (skb) {
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ memcpy(skb_put(skb, len), data, len);
+ }
+ return skb;
+}
+
+static void mt76x0_read_resp_regs(struct mt76x0_dev *dev, int len)
+{
+ int i;
+ int n = dev->mcu.reg_pairs_len;
+ u8 *buf = dev->mcu.resp.buf;
+
+ buf += 4;
+ len -= 8;
+
+ if (dev->mcu.burst_read) {
+ u32 reg = dev->mcu.reg_pairs[0].reg - dev->mcu.reg_base;
+
+ WARN_ON_ONCE(len/4 != n);
+ for (i = 0; i < n; i++) {
+ u32 val = get_unaligned_le32(buf + 4*i);
+
+ dev->mcu.reg_pairs[i].reg = reg++;
+ dev->mcu.reg_pairs[i].value = val;
+ }
+ } else {
+ WARN_ON_ONCE(len/8 != n);
+ for (i = 0; i < n; i++) {
+ u32 reg = get_unaligned_le32(buf + 8*i) - dev->mcu.reg_base;
+ u32 val = get_unaligned_le32(buf + 8*i + 4);
+
+ WARN_ON_ONCE(dev->mcu.reg_pairs[i].reg != reg);
+ dev->mcu.reg_pairs[i].value = val;
+ }
+ }
+}
+
+static int mt76x0_mcu_wait_resp(struct mt76x0_dev *dev, u8 seq)
+{
+ struct urb *urb = dev->mcu.resp.urb;
+ u32 rxfce;
+ int urb_status, ret, try = 5;
+
+ while (try--) {
+ if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
+ msecs_to_jiffies(300))) {
+ dev_warn(dev->mt76.dev, "Warning: %s retrying\n", __func__);
+ continue;
+ }
+
+ /* Make copies of important data before reusing the urb */
+ rxfce = get_unaligned_le32(dev->mcu.resp.buf);
+ urb_status = urb->status * mt76x0_urb_has_error(urb);
+
+ if (urb_status == 0 && dev->mcu.reg_pairs)
+ mt76x0_read_resp_regs(dev, urb->actual_length);
+
+ ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt76x0_complete_urb,
+ &dev->mcu.resp_cmpl);
+ if (ret)
+ return ret;
+
+ if (urb_status)
+ dev_err(dev->mt76.dev, "Error: MCU resp urb failed:%d\n",
+ urb_status);
+
+ if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+ return 0;
+
+ dev_err(dev->mt76.dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+ }
+
+ dev_err(dev->mt76.dev, "Error: %s timed out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+static int
+__mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd, bool wait_resp)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
+ dev->out_ep[MT_EP_OUT_INBAND_CMD]);
+ int sent, ret;
+ u8 seq = 0;
+
+ if (wait_resp)
+ while (!seq)
+ seq = ++dev->mcu.msg_seq & 0xf;
+
+ mt76x0_dma_skb_wrap_cmd(skb, seq, cmd);
+
+ if (dev->mcu.resp_cmpl.done)
+ dev_err(dev->mt76.dev, "Error: MCU response pre-completed!\n");
+
+ trace_mt76x0_mcu_msg_send_cs(&dev->mt76, skb, wait_resp);
+ trace_mt76x0_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len);
+
+ ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Error: send MCU cmd failed:%d\n", ret);
+ goto out;
+ }
+ if (sent != skb->len)
+ dev_err(dev->mt76.dev, "Error: %s sent != skb->len\n", __func__);
+
+ if (wait_resp)
+ ret = mt76x0_mcu_wait_resp(dev, seq);
+
+out:
+ return ret;
+}
+
+static int
+mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd, bool wait_resp)
+{
+ int ret;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return 0;
+
+ mutex_lock(&dev->mcu.mutex);
+ ret = __mt76x0_mcu_msg_send(dev, skb, cmd, wait_resp);
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+
+int mt76x0_mcu_function_select(struct mt76x0_dev *dev,
+ enum mcu_function func, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76x0_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
+}
+
+int
+mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(cal),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76x0_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ ret = mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt76x0_write_reg_pairs(dev, base, data + cnt, n - cnt);
+}
+
+int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+ if (cnt != n)
+ return -EINVAL;
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ mutex_lock(&dev->mcu.mutex);
+
+ dev->mcu.reg_pairs = data;
+ dev->mcu.reg_pairs_len = n;
+ dev->mcu.reg_base = base;
+ dev->mcu.burst_read = false;
+
+ ret = __mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_READ, true);
+
+ dev->mcu.reg_pairs = NULL;
+
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+
+}
+
+int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
+ const u32 *data, int n)
+{
+ const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_regs_per_cmd, n);
+
+ skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
+ for (i = 0; i < cnt; i++)
+ skb_put_le32(skb, data[i]);
+
+ ret = mt76x0_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt76x0_burst_write_regs(dev, offset + cnt * 4,
+ data + cnt, n - cnt);
+}
+
+#if 0
+static int mt76x0_burst_read_regs(struct mt76x0_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+ struct sk_buff *skb;
+ int cnt, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+ if (cnt != n)
+ return -EINVAL;
+
+ skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ skb_put_le32(skb, base + data[0].reg);
+ skb_put_le32(skb, n);
+
+ mutex_lock(&dev->mcu.mutex);
+
+ dev->mcu.reg_pairs = data;
+ dev->mcu.reg_pairs_len = n;
+ dev->mcu.reg_base = base;
+ dev->mcu.burst_read = true;
+
+ ret = __mt76x0_mcu_msg_send(dev, skb, CMD_BURST_READ, true);
+
+ dev->mcu.reg_pairs = NULL;
+
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+#endif
+
+struct mt76_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76_fw {
+ struct mt76_fw_header hdr;
+ u8 ivb[MT_MCU_IVB_SIZE];
+ u8 ilm[];
+};
+
+static int __mt76x0_dma_fw(struct mt76x0_dev *dev,
+ const struct mt76x0_dma_buf *dma_buf,
+ const void *data, u32 len, u32 dst_addr)
+{
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ struct mt76x0_dma_buf buf = *dma_buf; /* we need to fake length */
+ __le32 reg;
+ u32 val;
+ int ret;
+
+ reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_COMMAND) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_TXD_INFO_LEN, len));
+ memcpy(buf.buf, &reg, sizeof(reg));
+ memcpy(buf.buf + sizeof(reg), data, len);
+ memset(buf.buf + sizeof(reg) + len, 0, 8);
+
+ ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ if (ret)
+ return ret;
+ len = roundup(len, 4);
+ ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+ if (ret)
+ return ret;
+
+ buf.len = MT_DMA_HDR_LEN + len + 4;
+ ret = mt76x0_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
+ &buf, GFP_KERNEL,
+ mt76x0_complete_urb, &cmpl);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
+ dev_err(dev->mt76.dev, "Error: firmware upload timed out\n");
+ usb_kill_urb(buf.urb);
+ return -ETIMEDOUT;
+ }
+ if (mt76x0_urb_has_error(buf.urb)) {
+ dev_err(dev->mt76.dev, "Error: firmware upload urb failed:%d\n",
+ buf.urb->status);
+ return buf.urb->status;
+ }
+
+ val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ msleep(5);
+
+ return 0;
+}
+
+static int
+mt76x0_dma_fw(struct mt76x0_dev *dev, struct mt76x0_dma_buf *dma_buf,
+ const void *data, int len, u32 dst_addr)
+{
+ int n, ret;
+
+ if (len == 0)
+ return 0;
+
+ n = min(MCU_FW_URB_MAX_PAYLOAD, len);
+ ret = __mt76x0_dma_fw(dev, dma_buf, data, n, dst_addr);
+ if (ret)
+ return ret;
+
+#if 0
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
+ return -ETIMEDOUT;
+#endif
+
+ return mt76x0_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
+}
+
+static int
+mt76x0_upload_firmware(struct mt76x0_dev *dev, const struct mt76_fw *fw)
+{
+ struct mt76x0_dma_buf dma_buf;
+ void *ivb;
+ u32 ilm_len, dlm_len;
+ int i, ret;
+
+ ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
+ if (!ivb)
+ return -ENOMEM;
+ if (mt76x0_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
+ dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %zu\n",
+ ilm_len, sizeof(fw->ivb));
+ ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
+ if (ret)
+ goto error;
+
+ dlm_len = le32_to_cpu(fw->hdr.dlm_len);
+ dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+ ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
+ dlm_len, MT_MCU_DLM_OFFSET);
+ if (ret)
+ goto error;
+
+ ret = mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ 0x12, 0, ivb, sizeof(fw->ivb));
+ if (ret < 0)
+ goto error;
+ ret = 0;
+
+ for (i = 100; i && !firmware_running(dev); i--)
+ msleep(10);
+ if (!i) {
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ dev_dbg(dev->mt76.dev, "Firmware running!\n");
+error:
+ kfree(ivb);
+ mt76x0_usb_free_buf(dev, &dma_buf);
+
+ return ret;
+}
+
+static int mt76x0_load_firmware(struct mt76x0_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76_fw_header *hdr;
+ int len, ret;
+ u32 val;
+
+ mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN));
+
+ if (firmware_running(dev))
+ return 0;
+
+ ret = request_firmware(&fw, MT7610_FIRMWARE, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto err_inv_fw;
+
+ hdr = (const struct mt76_fw_header *) fw->data;
+
+ if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+ goto err_inv_fw;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto err_inv_fw;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_dbg(dev->mt76.dev,
+ "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+ le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt76_wr(dev, 0x1004, 0x2c);
+
+ mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
+ mt76x0_vendor_reset(dev);
+ msleep(5);
+/*
+ mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+ MT_PBF_CFG_TX1Q_EN |
+ MT_PBF_CFG_TX2Q_EN |
+ MT_PBF_CFG_TX3Q_EN));
+*/
+
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 3);
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+ val |= MT_USB_DMA_CFG_TX_WL_DROP;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_TX_WL_DROP;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+ ret = mt76x0_upload_firmware(dev, (const struct mt76_fw *)fw->data);
+ release_firmware(fw);
+
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ return ret;
+
+err_inv_fw:
+ dev_err(dev->mt76.dev, "Invalid firmware image\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+int mt76x0_mcu_init(struct mt76x0_dev *dev)
+{
+ int ret;
+
+ mutex_init(&dev->mcu.mutex);
+
+ ret = mt76x0_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+ return 0;
+}
+
+int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev)
+{
+ int ret;
+
+ ret = mt76x0_mcu_function_select(dev, Q_SELECT, 1);
+ if (ret)
+ return ret;
+
+ init_completion(&dev->mcu.resp_cmpl);
+ if (mt76x0_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
+ mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+ return -ENOMEM;
+ }
+
+ ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt76x0_complete_urb, &dev->mcu.resp_cmpl);
+ if (ret) {
+ mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+ return ret;
+ }
+
+ return 0;
+}
+
+void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev)
+{
+ usb_kill_urb(dev->mcu.resp.urb);
+ mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
new file mode 100644
index 000000000000..8c2f77f4c3f5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_MCU_H
+#define __MT76X0U_MCU_H
+
+struct mt76x0_dev;
+
+/* Register definitions */
+#define MT_MCU_RESET_CTL 0x070C
+#define MT_MCU_INT_LEVEL 0x0718
+#define MT_MCU_COM_REG0 0x0730
+#define MT_MCU_COM_REG1 0x0734
+#define MT_MCU_COM_REG2 0x0738
+#define MT_MCU_COM_REG3 0x073C
+
+#define MT_MCU_IVB_SIZE 0x40
+#define MT_MCU_DLM_OFFSET 0x80000
+
+#define MT_MCU_MEMMAP_WLAN 0x00410000
+/* We use same space for BBP as for MAC regs
+ * #define MT_MCU_MEMMAP_BBP 0x40000000
+ */
+#define MT_MCU_MEMMAP_RF 0x80000000
+
+#define INBAND_PACKET_MAX_LEN 192
+
+enum mcu_cmd {
+ CMD_FUN_SET_OP = 1,
+ CMD_LOAD_CR = 2,
+ CMD_INIT_GAIN_OP = 3,
+ CMD_DYNC_VGA_OP = 6,
+ CMD_TDLS_CH_SW = 7,
+ CMD_BURST_WRITE = 8,
+ CMD_READ_MODIFY_WRITE = 9,
+ CMD_RANDOM_READ = 10,
+ CMD_BURST_READ = 11,
+ CMD_RANDOM_WRITE = 12,
+ CMD_LED_MODE_OP = 16,
+ CMD_POWER_SAVING_OP = 20,
+ CMD_WOW_CONFIG = 21,
+ CMD_WOW_QUERY = 22,
+ CMD_WOW_FEATURE = 24,
+ CMD_CARRIER_DETECT_OP = 28,
+ CMD_RADOR_DETECT_OP = 29,
+ CMD_SWITCH_CHANNEL_OP = 30,
+ CMD_CALIBRATION_OP = 31,
+ CMD_BEACON_OP = 32,
+ CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+ Q_SELECT = 1,
+ BW_SETTING = 2,
+ ATOMIC_TSSI_SETTING = 5,
+};
+
+enum mcu_power_mode {
+ RADIO_OFF = 0x30,
+ RADIO_ON = 0x31,
+ RADIO_OFF_AUTO_WAKEUP = 0x32,
+ RADIO_OFF_ADVANCE = 0x33,
+ RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibrate {
+ MCU_CAL_R = 1,
+ MCU_CAL_RXDCOC,
+ MCU_CAL_LC,
+ MCU_CAL_LOFT,
+ MCU_CAL_TXIQ,
+ MCU_CAL_BW,
+ MCU_CAL_DPD,
+ MCU_CAL_RXIQ,
+ MCU_CAL_TXDCOC,
+ MCU_CAL_RX_GROUP_DELAY,
+ MCU_CAL_TX_GROUP_DELAY,
+};
+
+int mt76x0_mcu_init(struct mt76x0_dev *dev);
+int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev);
+void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev);
+
+int
+mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val);
+
+int
+mt76x0_mcu_function_select(struct mt76x0_dev *dev, enum mcu_function func, u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
new file mode 100644
index 000000000000..fc9857f61771
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MT76X0U_H
+#define MT76X0U_H
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT_CALIBRATE_INTERVAL (4 * HZ)
+
+#define MT_FREQ_CAL_INIT_DELAY (30 * HZ)
+#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ)
+#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2)
+
+#define MT_BBP_REG_VERSION 0x00
+
+#define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
+#define MT_RX_ORDER 3
+#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER)
+
+struct mt76x0_dma_buf {
+ struct urb *urb;
+ void *buf;
+ dma_addr_t dma;
+ size_t len;
+};
+
+struct mt76x0_mcu {
+ struct mutex mutex;
+
+ u8 msg_seq;
+
+ struct mt76x0_dma_buf resp;
+ struct completion resp_cmpl;
+
+ struct mt76_reg_pair *reg_pairs;
+ unsigned int reg_pairs_len;
+ u32 reg_base;
+ bool burst_read;
+};
+
+struct mac_stats {
+ u64 rx_stat[6];
+ u64 tx_stat[6];
+ u64 aggr_stat[2];
+ u64 aggr_n[32];
+ u64 zero_len_del[2];
+};
+
+#define N_RX_ENTRIES 16
+struct mt76x0_rx_queue {
+ struct mt76x0_dev *dev;
+
+ struct mt76x0_dma_buf_rx {
+ struct urb *urb;
+ struct page *p;
+ } e[N_RX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int pending;
+};
+
+#define N_TX_ENTRIES 64
+
+struct mt76x0_tx_queue {
+ struct mt76x0_dev *dev;
+
+ struct mt76x0_dma_buf_tx {
+ struct urb *urb;
+ struct sk_buff *skb;
+ } e[N_TX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int used;
+ unsigned int fifo_seq;
+};
+
+/* WCID allocation:
+ * 0: mcast wcid
+ * 1: bssid wcid
+ * 1...: STAs
+ * ...7e: group wcids
+ * 7f: reserved
+ */
+#define N_WCIDS 128
+#define GROUP_WCID(idx) (254 - idx)
+
+struct mt76x0_eeprom_params;
+
+#define MT_EE_TEMPERATURE_SLOPE 39
+#define MT_FREQ_OFFSET_INVALID -128
+
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM BIT(31)
+#define MT_VEND_TYPE_CFG BIT(30)
+#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
+
+enum mt_bw {
+ MT_BW_20,
+ MT_BW_40,
+};
+
+/**
+ * struct mt76x0_dev - adapter structure
+ * @lock: protects @wcid->tx_rate.
+ * @mac_lock: locks out mac80211's tx status and rx paths.
+ * @tx_lock: protects @tx_q and changes of MT76_STATE_*_STATS
+ * flags in @state.
+ * @rx_lock: protects @rx_q.
+ * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
+ * @mutex: ensures exclusive access from mac80211 callbacks.
+ * @reg_atomic_mutex: ensures atomicity of indirect register accesses
+ * (accesses to RF and BBP).
+ * @hw_atomic_mutex: ensures exclusive access to HW during critical
+ * operations (power management, channel switch).
+ */
+struct mt76x0_dev {
+ struct mt76_dev mt76; /* must be first */
+
+ struct mutex mutex;
+
+ struct mutex usb_ctrl_mtx;
+ u8 data[32];
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+
+ u8 out_ep[__MT_EP_OUT_MAX];
+ u16 out_max_packet;
+ u8 in_ep[__MT_EP_IN_MAX];
+ u16 in_max_packet;
+
+ unsigned long wcid_mask[DIV_ROUND_UP(N_WCIDS, BITS_PER_LONG)];
+ unsigned long vif_mask;
+
+ struct mt76x0_mcu mcu;
+
+ struct delayed_work cal_work;
+ struct delayed_work mac_work;
+
+ struct workqueue_struct *stat_wq;
+ struct delayed_work stat_work;
+
+ struct mt76_wcid *mon_wcid;
+ struct mt76_wcid __rcu *wcid[N_WCIDS];
+
+ spinlock_t mac_lock;
+
+ const u16 *beacon_offsets;
+
+ u8 macaddr[ETH_ALEN];
+ struct mt76x0_eeprom_params *ee;
+
+ struct mutex reg_atomic_mutex;
+ struct mutex hw_atomic_mutex;
+
+ u32 rxfilter;
+ u32 debugfs_reg;
+
+ /* TX */
+ spinlock_t tx_lock;
+ struct mt76x0_tx_queue *tx_q;
+ struct sk_buff_head tx_skb_done;
+
+ atomic_t avg_ampdu_len;
+
+ /* RX */
+ spinlock_t rx_lock;
+ struct mt76x0_rx_queue rx_q;
+
+ /* Connection monitoring things */
+ spinlock_t con_mon_lock;
+ u8 ap_bssid[ETH_ALEN];
+
+ s8 bcn_freq_off;
+ u8 bcn_phy_mode;
+
+ int avg_rssi; /* starts at 0 and converges */
+
+ u8 agc_save;
+ u16 chainmask;
+
+ struct mac_stats stats;
+};
+
+struct mt76x0_wcid {
+ u8 idx;
+ u8 hw_key_idx;
+
+ u16 tx_rate;
+ bool tx_rate_set;
+ u8 tx_rate_nss;
+};
+
+struct mt76_vif {
+ u8 idx;
+
+ struct mt76_wcid group_wcid;
+};
+
+struct mt76_tx_status {
+ u8 valid:1;
+ u8 success:1;
+ u8 aggr:1;
+ u8 ack_req:1;
+ u8 is_probe:1;
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+ u16 rate;
+} __packed __aligned(2);
+
+struct mt76_sta {
+ struct mt76_wcid wcid;
+ struct mt76_tx_status status;
+ int n_frames;
+ u16 agg_ssn[IEEE80211_NUM_TIDS];
+};
+
+struct mt76_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+struct mt76x0_rxwi;
+
+extern const struct ieee80211_ops mt76x0_ops;
+
+static inline bool is_mt7610e(struct mt76x0_dev *dev)
+{
+ /* TODO */
+ return false;
+}
+
+void mt76x0_init_debugfs(struct mt76x0_dev *dev);
+
+int mt76x0_wait_asic_ready(struct mt76x0_dev *dev);
+
+/* Compatibility with mt76 */
+#define mt76_rmw_field(_dev, _reg, _field, _val) \
+ mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int len);
+int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int len);
+int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
+ const u32 *data, int n);
+void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr);
+
+/* Init */
+struct mt76x0_dev *mt76x0_alloc_device(struct device *dev);
+int mt76x0_init_hardware(struct mt76x0_dev *dev);
+int mt76x0_register_device(struct mt76x0_dev *dev);
+void mt76x0_cleanup(struct mt76x0_dev *dev);
+void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
+
+int mt76x0_mac_start(struct mt76x0_dev *dev);
+void mt76x0_mac_stop(struct mt76x0_dev *dev);
+
+/* PHY */
+void mt76x0_phy_init(struct mt76x0_dev *dev);
+int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev);
+void mt76x0_agc_save(struct mt76x0_dev *dev);
+void mt76x0_agc_restore(struct mt76x0_dev *dev);
+int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev);
+int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi);
+void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
+ struct ieee80211_bss_conf *info);
+
+/* MAC */
+void mt76x0_mac_work(struct work_struct *work);
+void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+ int ht_mode);
+void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb);
+void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval);
+void
+mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev);
+
+/* TX */
+void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb);
+void mt76x0_tx_stat(struct work_struct *work);
+
+/* util */
+void mt76x0_remove_hdr_pad(struct sk_buff *skb);
+int mt76x0_insert_hdr_pad(struct sk_buff *skb);
+
+int mt76x0_dma_init(struct mt76x0_dev *dev);
+void mt76x0_dma_cleanup(struct mt76x0_dev *dev);
+
+int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
new file mode 100644
index 000000000000..5da7bfbe907f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -0,0 +1,1008 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "phy.h"
+#include "initvals.h"
+#include "initvals_phy.h"
+
+#include <linux/etherdevice.h>
+
+static int
+mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
+{
+ int ret = 0;
+ u8 bank, reg;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -ENODEV;
+
+ bank = MT_RF_BANK(offset);
+ reg = MT_RF_REG(offset);
+
+ if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ return -EINVAL;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt76_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+ MT_RF_CSR_CFG_WR |
+ MT_RF_CSR_CFG_KICK);
+ trace_mt76x0_rf_write(&dev->mt76, bank, offset, value);
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n",
+ bank, reg, ret);
+
+ return ret;
+}
+
+static int
+mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
+{
+ int ret = -ETIMEDOUT;
+ u32 val;
+ u8 bank, reg;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -ENODEV;
+
+ bank = MT_RF_BANK(offset);
+ reg = MT_RF_REG(offset);
+
+ if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ return -EINVAL;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ mt76_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+ MT_RF_CSR_CFG_KICK);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ val = mt76_rr(dev, MT_RF_CSR_CFG);
+ if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == reg &&
+ FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+ ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
+ trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret);
+ }
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n",
+ bank, reg, ret);
+
+ return ret;
+}
+
+static int
+rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ struct mt76_reg_pair pair = {
+ .reg = offset,
+ .value = val,
+ };
+
+ return mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+ } else {
+ WARN_ON_ONCE(1);
+ return mt76x0_rf_csr_wr(dev, offset, val);
+ }
+}
+
+static int
+rf_rr(struct mt76x0_dev *dev, u32 offset)
+{
+ int ret;
+ u32 val;
+
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ struct mt76_reg_pair pair = {
+ .reg = offset,
+ };
+
+ ret = mt76x0_read_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+ val = pair.value;
+ } else {
+ WARN_ON_ONCE(1);
+ ret = val = mt76x0_rf_csr_rr(dev, offset);
+ }
+
+ return (ret < 0) ? ret : val;
+}
+
+static int
+rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = rf_rr(dev, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ ret = rf_wr(dev, offset, val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static int
+rf_set(struct mt76x0_dev *dev, u32 offset, u8 val)
+{
+ return rf_rmw(dev, offset, 0, val);
+}
+
+#if 0
+static int
+rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask)
+{
+ return rf_rmw(dev, offset, mask, 0);
+}
+#endif
+
+#define RF_RANDOM_WRITE(dev, tab) \
+ mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));
+
+int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
+{
+ int i = 20;
+ u32 val;
+
+ do {
+ val = mt76_rr(dev, MT_BBP(CORE, 0));
+ printk("BBP version %08x\n", val);
+ if (val && ~val)
+ break;
+ } while (--i);
+
+ if (!i) {
+ dev_err(dev->mt76.dev, "Error: BBP is not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void
+mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width,
+ u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+
+int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi)
+{
+ s8 lna_gain, rssi_offset;
+ int val;
+
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) {
+ lna_gain = dev->ee->lna_gain_2ghz;
+ rssi_offset = dev->ee->rssi_offset_2ghz[0];
+ } else {
+ lna_gain = dev->ee->lna_gain_5ghz[0];
+ rssi_offset = dev->ee->rssi_offset_5ghz[0];
+ }
+
+ val = rxwi->rssi[0] + rssi_offset - lna_gain;
+
+ return val;
+}
+
+static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
+{
+ u8 val;
+
+ val = rf_rr(dev, MT_RF(0, 4));
+ if ((val & 0x70) != 0x30)
+ return;
+
+ /*
+ * Calibration Mode - Open loop, closed loop, and amplitude:
+ * B0.R06.[0]: 1
+ * B0.R06.[3:1] bp_close_code: 100
+ * B0.R05.[7:0] bp_open_code: 0x0
+ * B0.R04.[2:0] cal_bits: 000
+ * B0.R03.[2:0] startup_time: 011
+ * B0.R03.[6:4] settle_time:
+ * 80MHz channel: 110
+ * 40MHz channel: 101
+ * 20MHz channel: 100
+ */
+ val = rf_rr(dev, MT_RF(0, 6));
+ val &= ~0xf;
+ val |= 0x09;
+ rf_wr(dev, MT_RF(0, 6), val);
+
+ val = rf_rr(dev, MT_RF(0, 5));
+ if (val != 0)
+ rf_wr(dev, MT_RF(0, 5), 0x0);
+
+ val = rf_rr(dev, MT_RF(0, 4));
+ val &= ~0x07;
+ rf_wr(dev, MT_RF(0, 4), val);
+
+ val = rf_rr(dev, MT_RF(0, 3));
+ val &= ~0x77;
+ if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) {
+ val |= 0x63;
+ } else if (channel == 3 || channel == 4 || channel == 10) {
+ val |= 0x53;
+ } else if (channel == 2 || channel == 5 || channel == 6 ||
+ channel == 8 || channel == 11 || channel == 12) {
+ val |= 0x43;
+ } else {
+ WARN(1, "Unknown channel %u\n", channel);
+ return;
+ }
+ rf_wr(dev, MT_RF(0, 3), val);
+
+ /* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */
+ val = rf_rr(dev, MT_RF(0, 4));
+ val = ((val & ~(0x80)) | 0x80);
+ rf_wr(dev, MT_RF(0, 4), val);
+
+ msleep(2);
+}
+
+static void
+mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper)
+{
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+
+static void
+mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+
+ rf_wr(dev, MT_RF(5, 0), 0x45);
+ rf_wr(dev, MT_RF(6, 0), 0x44);
+
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+
+ mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
+ break;
+ case NL80211_BAND_5GHZ:
+ RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+
+ rf_wr(dev, MT_RF(5, 0), 0x44);
+ rf_wr(dev, MT_RF(6, 0), 0x45);
+
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+
+ mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
+ break;
+ default:
+ break;
+ }
+}
+
+#define EXT_PA_2G_5G 0x0
+#define EXT_PA_5G_ONLY 0x1
+#define EXT_PA_2G_ONLY 0x2
+#define INT_PA_2G_5G 0x3
+
+static void
+mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+{
+ u16 rf_band = rf_bw_band & 0xff00;
+ u16 rf_bw = rf_bw_band & 0x00ff;
+ u32 mac_reg;
+ u8 rf_val;
+ int i;
+ bool bSDM = false;
+ const struct mt76x0_freq_item *freq_item;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_sdm_channel); i++) {
+ if (channel == mt76x0_sdm_channel[i]) {
+ bSDM = true;
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_frequency_plan); i++) {
+ if (channel == mt76x0_frequency_plan[i].channel) {
+ rf_band = mt76x0_frequency_plan[i].band;
+
+ if (bSDM)
+ freq_item = &(mt76x0_sdm_frequency_plan[i]);
+ else
+ freq_item = &(mt76x0_frequency_plan[i]);
+
+ rf_wr(dev, MT_RF(0, 37), freq_item->pllR37);
+ rf_wr(dev, MT_RF(0, 36), freq_item->pllR36);
+ rf_wr(dev, MT_RF(0, 35), freq_item->pllR35);
+ rf_wr(dev, MT_RF(0, 34), freq_item->pllR34);
+ rf_wr(dev, MT_RF(0, 33), freq_item->pllR33);
+
+ rf_val = rf_rr(dev, MT_RF(0, 32));
+ rf_val &= ~0xE0;
+ rf_val |= freq_item->pllR32_b7b5;
+ rf_wr(dev, MT_RF(0, 32), rf_val);
+
+ /* R32<4:0> pll_den: (Denomina - 8) */
+ rf_val = rf_rr(dev, MT_RF(0, 32));
+ rf_val &= ~0x1F;
+ rf_val |= freq_item->pllR32_b4b0;
+ rf_wr(dev, MT_RF(0, 32), rf_val);
+
+ /* R31<7:5> */
+ rf_val = rf_rr(dev, MT_RF(0, 31));
+ rf_val &= ~0xE0;
+ rf_val |= freq_item->pllR31_b7b5;
+ rf_wr(dev, MT_RF(0, 31), rf_val);
+
+ /* R31<4:0> pll_k(Nominator) */
+ rf_val = rf_rr(dev, MT_RF(0, 31));
+ rf_val &= ~0x1F;
+ rf_val |= freq_item->pllR31_b4b0;
+ rf_wr(dev, MT_RF(0, 31), rf_val);
+
+ /* R30<7> sdm_reset_n */
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x80;
+ if (bSDM) {
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+ rf_val |= 0x80;
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+ } else {
+ rf_val |= freq_item->pllR30_b7;
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+ }
+
+ /* R30<6:2> sdmmash_prbs,sin */
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x7C;
+ rf_val |= freq_item->pllR30_b6b2;
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+
+ /* R30<1> sdm_bp */
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x02;
+ rf_val |= (freq_item->pllR30_b1 << 1);
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+
+ /* R30<0> R29<7:0> (hex) pll_n */
+ rf_val = freq_item->pll_n & 0x00FF;
+ rf_wr(dev, MT_RF(0, 29), rf_val);
+
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x1;
+ rf_val |= ((freq_item->pll_n >> 8) & 0x0001);
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+
+ /* R28<7:6> isi_iso */
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0xC0;
+ rf_val |= freq_item->pllR28_b7b6;
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R28<5:4> pfd_dly */
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0x30;
+ rf_val |= freq_item->pllR28_b5b4;
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R28<3:2> clksel option */
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0x0C;
+ rf_val |= freq_item->pllR28_b3b2;
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R28<1:0> R27<7:0> R26<7:0> (hex) sdm_k */
+ rf_val = freq_item->pll_sdm_k & 0x000000FF;
+ rf_wr(dev, MT_RF(0, 26), rf_val);
+
+ rf_val = ((freq_item->pll_sdm_k >> 8) & 0x000000FF);
+ rf_wr(dev, MT_RF(0, 27), rf_val);
+
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0x3;
+ rf_val |= ((freq_item->pll_sdm_k >> 16) & 0x0003);
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R24<1:0> xo_div */
+ rf_val = rf_rr(dev, MT_RF(0, 24));
+ rf_val &= ~0x3;
+ rf_val |= freq_item->pllR24_b1b0;
+ rf_wr(dev, MT_RF(0, 24), rf_val);
+
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+ if (rf_bw == mt76x0_rf_bw_switch_tab[i].bw_band) {
+ rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_bw_switch_tab[i].value);
+ } else if ((rf_bw == (mt76x0_rf_bw_switch_tab[i].bw_band & 0xFF)) &&
+ (rf_band & mt76x0_rf_bw_switch_tab[i].bw_band)) {
+ rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_bw_switch_tab[i].value);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+ if (mt76x0_rf_band_switch_tab[i].bw_band & rf_band) {
+ rf_wr(dev, mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_band_switch_tab[i].value);
+ }
+ }
+
+ mac_reg = mt76_rr(dev, MT_RF_MISC);
+ mac_reg &= ~0xC; /* Clear 0x518[3:2] */
+ mt76_wr(dev, MT_RF_MISC, mac_reg);
+
+ if (dev->ee->pa_type == INT_PA_2G_5G ||
+ (dev->ee->pa_type == EXT_PA_5G_ONLY && (rf_band & RF_G_BAND)) ||
+ (dev->ee->pa_type == EXT_PA_2G_ONLY && (rf_band & RF_A_BAND))) {
+ ; /* Internal PA - nothing to do. */
+ } else {
+ /*
+ MT_RF_MISC (offset: 0x0518)
+ [2]1'b1: enable external A band PA, 1'b0: disable external A band PA
+ [3]1'b1: enable external G band PA, 1'b0: disable external G band PA
+ */
+ if (rf_band & RF_A_BAND) {
+ mac_reg = mt76_rr(dev, MT_RF_MISC);
+ mac_reg |= 0x4;
+ mt76_wr(dev, MT_RF_MISC, mac_reg);
+ } else {
+ mac_reg = mt76_rr(dev, MT_RF_MISC);
+ mac_reg |= 0x8;
+ mt76_wr(dev, MT_RF_MISC, mac_reg);
+ }
+
+ /* External PA */
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_ext_pa_tab); i++)
+ if (mt76x0_rf_ext_pa_tab[i].bw_band & rf_band)
+ rf_wr(dev, mt76x0_rf_ext_pa_tab[i].rf_bank_reg,
+ mt76x0_rf_ext_pa_tab[i].value);
+ }
+
+ if (rf_band & RF_G_BAND) {
+ mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x63707400);
+ /* Set Atten mode = 2 For G band, Disable Tx Inc dcoc. */
+ mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+ mac_reg &= 0x896400FF;
+ mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+ } else {
+ mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x686A7800);
+ /* Set Atten mode = 0 For Ext A band, Disable Tx Inc dcoc Cal. */
+ mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+ mac_reg &= 0x890400FF;
+ mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+ }
+}
+
+static void
+mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+ const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+ const struct mt76_reg_pair *pair = &item->reg_pair;
+
+ if ((rf_bw_band & item->bw_band) != rf_bw_band)
+ continue;
+
+ if (pair->reg == MT_BBP(AGC, 8)) {
+ u32 val = pair->value;
+ u8 gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
+
+ if (channel > 14) {
+ if (channel < 100)
+ gain -= dev->ee->lna_gain_5ghz[0]*2;
+ else if (channel < 137)
+ gain -= dev->ee->lna_gain_5ghz[1]*2;
+ else
+ gain -= dev->ee->lna_gain_5ghz[2]*2;
+
+ } else {
+ gain -= dev->ee->lna_gain_2ghz*2;
+ }
+
+ val &= ~MT_BBP_AGC_GAIN;
+ val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain);
+ mt76_wr(dev, pair->reg, val);
+ } else {
+ mt76_wr(dev, pair->reg, pair->value);
+ }
+ }
+}
+
+#if 0
+static void
+mt76x0_extra_power_over_mac(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = ((mt76_rr(dev, MT_TX_PWR_CFG_1) & 0x00003f00) >> 8);
+ val |= ((mt76_rr(dev, MT_TX_PWR_CFG_2) & 0x00003f00) << 8);
+ mt76_wr(dev, MT_TX_PWR_CFG_7, val);
+
+ /* TODO: fix VHT */
+ val = ((mt76_rr(dev, MT_TX_PWR_CFG_3) & 0x0000ff00) >> 8);
+ mt76_wr(dev, MT_TX_PWR_CFG_8, val);
+
+ val = ((mt76_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
+ mt76_wr(dev, MT_TX_PWR_CFG_9, val);
+}
+
+static void
+mt76x0_phy_set_tx_power(struct mt76x0_dev *dev, u8 channel, u8 rf_bw_band)
+{
+ u32 val;
+ int i;
+ int bw = (rf_bw_band & RF_BW_20) ? 0 : 1;
+
+ for (i = 0; i < 4; i++) {
+ if (channel <= 14)
+ val = dev->ee->tx_pwr_cfg_2g[i][bw];
+ else
+ val = dev->ee->tx_pwr_cfg_5g[i][bw];
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0 + 4*i, val);
+ }
+
+ mt76x0_extra_power_over_mac(dev);
+}
+#endif
+
+static void
+mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
+{
+ enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
+ int bw;
+
+ switch (width) {
+ default:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ bw = BW_20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ bw = BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = BW_80;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ bw = BW_10;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_5:
+ /* TODO error */
+ return ;
+ }
+
+ mt76x0_mcu_function_select(dev, BW_SETTING, bw);
+}
+
+static void
+mt76x0_phy_set_chan_pwr(struct mt76x0_dev *dev, u8 channel)
+{
+ static const int mt76x0_tx_pwr_ch_list[] = {
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,
+ 36,38,40,44,46,48,52,54,56,60,62,64,
+ 100,102,104,108,110,112,116,118,120,124,126,128,132,134,136,140,
+ 149,151,153,157,159,161,165,167,169,171,173,
+ 42,58,106,122,155
+ };
+ int i;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_tx_pwr_ch_list); i++)
+ if (mt76x0_tx_pwr_ch_list[i] == channel)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(mt76x0_tx_pwr_ch_list)))
+ return;
+
+ val = mt76_rr(dev, MT_TX_ALC_CFG_0);
+ val &= ~0x3f3f;
+ val |= dev->ee->tx_pwr_per_chan[i];
+ val |= 0x2f2f << 16;
+ mt76_wr(dev, MT_TX_ALC_CFG_0, val);
+}
+
+static int
+__mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ int ch_group_index, freq, freq1;
+ u8 channel;
+ u32 val;
+ u16 rf_bw_band;
+
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+ channel = chandef->chan->hw_value;
+ rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ if (freq1 > freq)
+ ch_group_index = 0;
+ else
+ ch_group_index = 1;
+ channel += 2 - ch_group_index * 4;
+ rf_bw_band |= RF_BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ channel += 6 - ch_group_index * 4;
+ rf_bw_band |= RF_BW_80;
+ break;
+ default:
+ ch_group_index = 0;
+ rf_bw_band |= RF_BW_20;
+ break;
+ }
+
+ mt76x0_bbp_set_bw(dev, chandef->width);
+ mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index);
+ mt76x0_mac_set_ctrlch(dev, ch_group_index & 1);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ mt76x0_phy_set_band(dev, chandef->chan->band);
+ mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
+
+ /* set Japan Tx filter at channel 14 */
+ val = mt76_rr(dev, MT_BBP(CORE, 1));
+ if (channel == 14)
+ val |= 0x20;
+ else
+ val &= ~0x20;
+ mt76_wr(dev, MT_BBP(CORE, 1), val);
+
+ mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band);
+
+ /* Vendor driver don't do it */
+ /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */
+
+ if (scan)
+ mt76x0_vco_cal(dev, channel);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+ mt76x0_phy_set_chan_pwr(dev, channel);
+
+ dev->mt76.chandef = *chandef;
+ return 0;
+}
+
+int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ int ret;
+
+ mutex_lock(&dev->hw_atomic_mutex);
+ ret = __mt76x0_phy_set_channel(dev, chandef);
+ mutex_unlock(&dev->hw_atomic_mutex);
+
+ return ret;
+}
+
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
+{
+ u32 tx_alc, reg_val;
+ u8 channel = dev->mt76.chandef.chan->hw_value;
+ int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_R, 0);
+
+ mt76x0_vco_cal(dev, channel);
+
+ tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
+ usleep_range(500, 700);
+
+ reg_val = mt76_rr(dev, 0x2124);
+ reg_val &= 0xffffff7e;
+ mt76_wr(dev, 0x2124, reg_val);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz);
+
+ mt76_wr(dev, 0x2124, reg_val);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
+ msleep(100);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+}
+
+void mt76x0_agc_save(struct mt76x0_dev *dev)
+{
+ /* Only one RX path */
+ dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
+}
+
+void mt76x0_agc_restore(struct mt76x0_dev *dev)
+{
+ mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
+}
+
+static void mt76x0_temp_sensor(struct mt76x0_dev *dev)
+{
+ u8 rf_b7_73, rf_b0_66, rf_b0_67;
+ int cycle, temp;
+ u32 val;
+ s32 sval;
+
+ rf_b7_73 = rf_rr(dev, MT_RF(7, 73));
+ rf_b0_66 = rf_rr(dev, MT_RF(0, 66));
+ rf_b0_67 = rf_rr(dev, MT_RF(0, 73));
+
+ rf_wr(dev, MT_RF(7, 73), 0x02);
+ rf_wr(dev, MT_RF(0, 66), 0x23);
+ rf_wr(dev, MT_RF(0, 73), 0x01);
+
+ mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
+
+ for (cycle = 0; cycle < 2000; cycle++) {
+ val = mt76_rr(dev, MT_BBP(CORE, 34));
+ if (!(val & 0x10))
+ break;
+ udelay(3);
+ }
+
+ if (cycle >= 2000) {
+ val &= 0x10;
+ mt76_wr(dev, MT_BBP(CORE, 34), val);
+ goto done;
+ }
+
+ sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+ if (!(sval & 0x80))
+ sval &= 0x7f; /* Positive */
+ else
+ sval |= 0xffffff00; /* Negative */
+
+ temp = (35 * (sval - dev->ee->temp_off))/ 10 + 25;
+
+done:
+ rf_wr(dev, MT_RF(7, 73), rf_b7_73);
+ rf_wr(dev, MT_RF(0, 66), rf_b0_66);
+ rf_wr(dev, MT_RF(0, 73), rf_b0_67);
+}
+
+static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev)
+{
+ u32 val, init_vga;
+
+ init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E;
+ if (dev->avg_rssi > -60)
+ init_vga -= 0x20;
+ else if (dev->avg_rssi > -70)
+ init_vga -= 0x10;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 8));
+ val &= 0xFFFF80FF;
+ val |= init_vga << 8;
+ mt76_wr(dev, MT_BBP(AGC,8), val);
+}
+
+static void mt76x0_phy_calibrate(struct work_struct *work)
+{
+ struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ cal_work.work);
+
+ mt76x0_dynamic_vga_tuning(dev);
+ mt76x0_temp_sensor(dev);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
+ struct ieee80211_bss_conf *info)
+{
+ /* Start/stop collecting beacon data */
+ spin_lock_bh(&dev->con_mon_lock);
+ ether_addr_copy(dev->ap_bssid, info->bssid);
+ dev->avg_rssi = 0;
+ dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+ spin_unlock_bh(&dev->con_mon_lock);
+}
+
+static void
+mt76x0_set_rx_chains(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~(BIT(3) | BIT(4));
+
+ if (dev->chainmask & BIT(1))
+ val |= BIT(3);
+
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+
+ mb();
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+}
+
+static void
+mt76x0_set_tx_dac(struct mt76x0_dev *dev)
+{
+ if (dev->chainmask & BIT(1))
+ mt76_set(dev, MT_BBP(TXBE, 5), 3);
+ else
+ mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+}
+
+static void
+mt76x0_rf_init(struct mt76x0_dev *dev)
+{
+ int i;
+ u8 val;
+
+ RF_RANDOM_WRITE(dev, mt76x0_rf_central_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_vga_channel_0_tab);
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+ const struct mt76x0_rf_switch_item *item = &mt76x0_rf_bw_switch_tab[i];
+
+ if (item->bw_band == RF_BW_20)
+ rf_wr(dev, item->rf_bank_reg, item->value);
+ else if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
+ rf_wr(dev, item->rf_bank_reg, item->value);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+ if (mt76x0_rf_band_switch_tab[i].bw_band & RF_G_BAND) {
+ rf_wr(dev,
+ mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_band_switch_tab[i].value);
+ }
+ }
+
+ /*
+ Frequency calibration
+ E1: B0.R22<6:0>: xo_cxo<6:0>
+ E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
+ */
+ rf_wr(dev, MT_RF(0, 22), min_t(u8, dev->ee->rf_freq_off, 0xBF));
+ val = rf_rr(dev, MT_RF(0, 22));
+
+ /*
+ Reset the DAC (Set B0.R73<7>=1, then set B0.R73<7>=0, and then set B0.R73<7>) during power up.
+ */
+ val = rf_rr(dev, MT_RF(0, 73));
+ val |= 0x80;
+ rf_wr(dev, MT_RF(0, 73), val);
+ val &= ~0x80;
+ rf_wr(dev, MT_RF(0, 73), val);
+ val |= 0x80;
+ rf_wr(dev, MT_RF(0, 73), val);
+
+ /*
+ vcocal_en (initiate VCO calibration (reset after completion)) - It should be at the end of RF configuration.
+ */
+ rf_set(dev, MT_RF(0, 4), 0x80);
+}
+
+static void mt76x0_ant_select(struct mt76x0_dev *dev)
+{
+ /* Single antenna mode. */
+ mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
+ mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
+ mt76_clear(dev, MT_COEXCFG0, BIT(2));
+ mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
+}
+
+void mt76x0_phy_init(struct mt76x0_dev *dev)
+{
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate);
+
+ mt76x0_ant_select(dev);
+
+ mt76x0_rf_init(dev);
+
+ mt76x0_set_rx_chains(dev);
+ mt76x0_set_tx_dac(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
new file mode 100644
index 000000000000..2880a43c3cb0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
@@ -0,0 +1,81 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MT76X0_PHY_H_
+#define _MT76X0_PHY_H_
+
+#define RF_G_BAND 0x0100
+#define RF_A_BAND 0x0200
+#define RF_A_BAND_LB 0x0400
+#define RF_A_BAND_MB 0x0800
+#define RF_A_BAND_HB 0x1000
+#define RF_A_BAND_11J 0x2000
+
+#define RF_BW_20 1
+#define RF_BW_40 2
+#define RF_BW_10 4
+#define RF_BW_80 8
+
+#define MT_RF(bank, reg) ((bank) << 16 | (reg))
+#define MT_RF_BANK(offset) (offset >> 16)
+#define MT_RF_REG(offset) (offset & 0xff)
+
+struct mt76x0_bbp_switch_item {
+ u16 bw_band;
+ struct mt76_reg_pair reg_pair;
+};
+
+struct mt76x0_rf_switch_item {
+ u32 rf_bank_reg;
+ u16 bw_band;
+ u8 value;
+};
+
+struct mt76x0_freq_item {
+ u8 channel;
+ u32 band;
+ u8 pllR37;
+ u8 pllR36;
+ u8 pllR35;
+ u8 pllR34;
+ u8 pllR33;
+ u8 pllR32_b7b5;
+ u8 pllR32_b4b0; /* PLL_DEN (Denomina - 8) */
+ u8 pllR31_b7b5;
+ u8 pllR31_b4b0; /* PLL_K (Nominator *)*/
+ u8 pllR30_b7; /* sdm_reset_n */
+ u8 pllR30_b6b2; /* sdmmash_prbs,sin */
+ u8 pllR30_b1; /* sdm_bp */
+ u16 pll_n; /* R30<0>, R29<7:0> (hex) */
+ u8 pllR28_b7b6; /* isi,iso */
+ u8 pllR28_b5b4; /* pfd_dly */
+ u8 pllR28_b3b2; /* clksel option */
+ u32 pll_sdm_k; /* R28<1:0>, R27<7:0>, R26<7:0> (hex) SDM_k */
+ u8 pllR24_b1b0; /* xo_div */
+};
+
+struct mt76x0_rate_pwr_item {
+ s8 mcs_power;
+ u8 rf_pa_mode;
+};
+
+struct mt76x0_rate_pwr_tab {
+ struct mt76x0_rate_pwr_item cck[4];
+ struct mt76x0_rate_pwr_item ofdm[8];
+ struct mt76x0_rate_pwr_item ht[8];
+ struct mt76x0_rate_pwr_item vht[10];
+ struct mt76x0_rate_pwr_item stbc[8];
+ struct mt76x0_rate_pwr_item mcs32;
+};
+
+#endif /* _MT76X0_PHY_H_ */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
new file mode 100644
index 000000000000..16bed4aaa242
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
@@ -0,0 +1,651 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_REGS_H
+#define __MT76_REGS_H
+
+#include <linux/bitops.h>
+
+#define MT_ASIC_VERSION 0x0000
+
+#define MT76XX_REV_E3 0x22
+#define MT76XX_REV_E4 0x33
+
+#define MT_CMB_CTRL 0x0020
+#define MT_CMB_CTRL_XTAL_RDY BIT(22)
+#define MT_CMB_CTRL_PLL_LD BIT(23)
+
+#define MT_EFUSE_CTRL 0x0024
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_DATA_BASE 0x0028
+#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0 0x0040
+#define MT_COEXCFG0_COEX_EN BIT(0)
+
+#define MT_COEXCFG3 0x004c
+
+#define MT_LDO_CTRL_0 0x006c
+#define MT_LDO_CTRL_1 0x0070
+
+#define MT_WLAN_FUN_CTRL 0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0 0x0100
+#define MT_XO_CTRL1 0x0104
+#define MT_XO_CTRL2 0x0108
+#define MT_XO_CTRL3 0x010c
+#define MT_XO_CTRL4 0x0110
+
+#define MT_XO_CTRL5 0x0114
+#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
+
+#define MT_XO_CTRL6 0x0118
+#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
+
+#define MT_XO_CTRL7 0x011c
+
+#define MT_IOCFG_6 0x0124
+#define MT_WLAN_MTC_CTRL 0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
+
+#define MT_INT_SOURCE_CSR 0x0200
+#define MT_INT_MASK_CSR 0x0204
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n) BIT(_n + 4)
+#define MT_INT_RX_COHERENT BIT(16)
+#define MT_INT_TX_COHERENT BIT(17)
+#define MT_INT_ANY_COHERENT BIT(18)
+#define MT_INT_MCU_CMD BIT(19)
+#define MT_INT_TBTT BIT(20)
+#define MT_INT_PRE_TBTT BIT(21)
+#define MT_INT_TX_STAT BIT(22)
+#define MT_INT_AUTO_WAKEUP BIT(23)
+#define MT_INT_GPTIMER BIT(24)
+#define MT_INT_RXDELAYINT BIT(26)
+#define MT_INT_TXDELAYINT BIT(27)
+
+#define MT_WPDMA_GLO_CFG 0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MT_WPDMA_RST_IDX 0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG 0x0210
+
+#define MT_WMM_AIFSN 0x0214
+#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMIN 0x0218
+#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMAX 0x021c
+#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE 0x0220
+#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+
+#define MT_WMM_CTRL 0x0230 /* MT76x0 */
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_USB_DMA_CFG 0x238
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_TX_WL_DROP BIT(16)
+#define MT_USB_DMA_CFG_WAKEUP_EN BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PADDING BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_WL_LPK_EN BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
+#if 0
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25)
+#endif
+
+#define MT_TSO_CTRL 0x0250
+#define MT_HEADER_TRANS_CTRL_REG 0x0260
+
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
+#define MT_TX_RING_BASE 0x0300
+#define MT_RX_RING_BASE 0x03c0
+#define MT_RING_SIZE 0x10
+
+#define MT_TX_HW_QUEUE_MCU 8
+#define MT_TX_HW_QUEUE_MGMT 9
+
+#define MT_PBF_SYS_CTRL 0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
+
+#define MT_PBF_CFG 0x0404
+#define MT_PBF_CFG_TX0Q_EN BIT(0)
+#define MT_PBF_CFG_TX1Q_EN BIT(1)
+#define MT_PBF_CFG_TX2Q_EN BIT(2)
+#define MT_PBF_CFG_TX3Q_EN BIT(3)
+#define MT_PBF_CFG_RX0Q_EN BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT 0x0408
+#define MT_PBF_RX_MAX_PCNT 0x040c
+
+#define MT_BCN_OFFSET_BASE 0x041c
+#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RXQ_STA 0x0430
+#define MT_TXQ_STA 0x0434
+#define MT_RF_CSR_CFG 0x0500
+#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
+#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
+#define MT_RF_CSR_CFG_WR BIT(30)
+#define MT_RF_CSR_CFG_KICK BIT(31)
+
+#define MT_RF_BYPASS_0 0x0504
+#define MT_RF_BYPASS_1 0x0508
+#define MT_RF_SETTING_0 0x050c
+
+#define MT_RF_MISC 0x0518
+#define MT_RF_DATA_WRITE 0x0524
+
+#define MT_RF_CTRL 0x0528
+#define MT_RF_CTRL_ADDR GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE BIT(12)
+#define MT_RF_CTRL_BUSY BIT(13)
+#define MT_RF_CTRL_IDX BIT(16)
+
+#define MT_RF_DATA_READ 0x052c
+
+#define MT_COM_REG0 0x0730
+#define MT_COM_REG1 0x0734
+#define MT_COM_REG2 0x0738
+#define MT_COM_REG3 0x073C
+
+#define MT_FCE_PSE_CTRL 0x0800
+#define MT_FCE_PARAMETERS 0x0804
+#define MT_FCE_CSO 0x0808
+
+#define MT_FCE_L2_STUFF 0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
+
+#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
+
+#define MT_FCE_SKIP_FS 0x0a6c
+
+#define MT_MAC_CSR0 0x1000
+#define MT_MAC_SYS_CTRL 0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
+
+#define MT_MAC_ADDR_DW0 0x1008
+#define MT_MAC_ADDR_DW1 0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0 0x1010
+#define MT_MAC_BSSID_DW1 0x1014
+#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG 0x1018
+#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12)
+
+#define MT_LED_CFG 0x102c
+
+#define MT_AMPDU_MAX_LEN_20M1S 0x1030
+#define MT_AMPDU_MAX_LEN_20M2S 0x1034
+#define MT_AMPDU_MAX_LEN_40M1S 0x1038
+#define MT_AMPDU_MAX_LEN_40M2S 0x103c
+#define MT_AMPDU_MAX_LEN 0x1040
+
+#define MT_WCID_DROP_BASE 0x106c
+#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK 0x108c
+
+#define MT_MAC_APC_BSSID_BASE 0x1090
+#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN BIT(16)
+
+#define MT_XIFS_TIME_CFG 0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
+
+#define MT_BKOFF_SLOT_CFG 0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
+
+#define MT_BEACON_TIME_CFG 0x1114
+#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG 0x1118
+#define MT_TBTT_TIMER_CFG 0x1124
+
+#define MT_INT_TIMER_CFG 0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN 0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
+
+#define MT_MAC_STATUS 0x1200
+#define MT_MAC_STATUS_TX BIT(0)
+#define MT_MAC_STATUS_RX BIT(1)
+
+#define MT_PWR_PIN_CFG 0x1204
+#define MT_AUX_CLK_CFG 0x120c
+
+#define MT_BB_PA_MODE_CFG0 0x1214
+#define MT_BB_PA_MODE_CFG1 0x1218
+#define MT_RF_PA_MODE_CFG0 0x121c
+#define MT_RF_PA_MODE_CFG1 0x1220
+
+#define MT_RF_PA_MODE_ADJ0 0x1228
+#define MT_RF_PA_MODE_ADJ1 0x122c
+
+#define MT_DACCLK_EN_DLY_CFG 0x1264
+
+#define MT_EDCA_CFG_BASE 0x1300
+#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0 0x1314
+#define MT_TX_PWR_CFG_1 0x1318
+#define MT_TX_PWR_CFG_2 0x131c
+#define MT_TX_PWR_CFG_3 0x1320
+#define MT_TX_PWR_CFG_4 0x1324
+
+#define MT_TX_BAND_CFG 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
+#define MT_TX_BAND_CFG_5G BIT(1)
+#define MT_TX_BAND_CFG_2G BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY 0x1384
+#define MT_TX_MPDU_ADJ_INT 0x1388
+
+#define MT_TX_PWR_CFG_7 0x13d4
+#define MT_TX_PWR_CFG_8 0x13d8
+#define MT_TX_PWR_CFG_9 0x13dc
+
+#define MT_TX_SW_CFG0 0x1330
+#define MT_TX_SW_CFG1 0x1334
+#define MT_TX_SW_CFG2 0x1338
+
+#define MT_TXOP_CTRL_CFG 0x1340
+#define MT_TXOP_TRUN_EN GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
+#define MT_TXOP_CTRL
+
+#define MT_TX_RTS_CFG 0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK BIT(24)
+
+#define MT_TX_TIMEOUT_CFG 0x1348
+#define MT_TX_RETRY_CFG 0x134c
+#define MT_TX_LINK_CFG 0x1350
+#define MT_HT_FBK_CFG0 0x1354
+#define MT_HT_FBK_CFG1 0x1358
+#define MT_LG_FBK_CFG0 0x135c
+#define MT_LG_FBK_CFG1 0x1360
+
+#define MT_CCK_PROT_CFG 0x1364
+#define MT_OFDM_PROT_CFG 0x1368
+#define MT_MM20_PROT_CFG 0x136c
+#define MT_MM40_PROT_CFG 0x1370
+#define MT_GF20_PROT_CFG 0x1374
+#define MT_GF40_PROT_CFG 0x1378
+
+#define MT_PROT_RATE GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS BIT(16)
+#define MT_PROT_CTRL_CTS2SELF BIT(17)
+#define MT_PROT_NAV_SHORT BIT(18)
+#define MT_PROT_NAV_LONG BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20 BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40 BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20 BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
+#define MT_PROT_RTS_THR_EN BIT(26)
+#define MT_PROT_RATE_CCK_11 0x0003
+#define MT_PROT_RATE_OFDM_6 0x4000
+#define MT_PROT_RATE_OFDM_24 0x4004
+#define MT_PROT_RATE_DUP_OFDM_24 0x4084
+#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
+ ~MT_PROT_TXOP_ALLOW_MM40 & \
+ ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME 0x1380
+
+#define MT_TX_PWR_CFG_0_EXT 0x1390
+#define MT_TX_PWR_CFG_1_EXT 0x1394
+
+#define MT_TX_FBK_LIMIT 0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR 0x13a0
+#define MT_TX1_RF_GAIN_CORR 0x13a4
+#define MT_TX0_RF_GAIN_ATTEN 0x13a8
+
+#define MT_TX_ALC_CFG_0 0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1 0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2 0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX0_BB_GAIN_ATTEN 0x13c0
+
+#define MT_TX_ALC_VGA3 0x13c8
+
+#define MT_TX_PROT_CFG6 0x13e0
+#define MT_TX_PROT_CFG7 0x13e4
+#define MT_TX_PROT_CFG8 0x13e8
+
+#define MT_PIFS_TX_CFG 0x13ec
+
+#define MT_RX_FILTR_CFG 0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
+#define MT_RX_FILTR_CFG_MCAST BIT(5)
+#define MT_RX_FILTR_CFG_BCAST BIT(6)
+#define MT_RX_FILTR_CFG_DUP BIT(7)
+#define MT_RX_FILTR_CFG_CFACK BIT(8)
+#define MT_RX_FILTR_CFG_CFEND BIT(9)
+#define MT_RX_FILTR_CFG_ACK BIT(10)
+#define MT_RX_FILTR_CFG_CTS BIT(11)
+#define MT_RX_FILTR_CFG_RTS BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
+#define MT_RX_FILTR_CFG_BA BIT(14)
+#define MT_RX_FILTR_CFG_BAR BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+
+#define MT_AUTO_RSP_CFG 0x1404
+
+#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
+
+#define MT_LEGACY_BASIC_RATE 0x1408
+#define MT_HT_BASIC_RATE 0x140c
+#define MT_HT_CTRL_CFG 0x1410
+#define MT_RX_PARSER_CFG 0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0)
+
+#define MT_EXT_CCA_CFG 0x141c
+#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3 0x1478
+
+#define MT_PN_PAD_MODE 0x150c
+
+#define MT_TXOP_HLDR_ET 0x1608
+
+#define MT_PROT_AUTO_TX_CFG 0x1648
+
+#define MT_RX_STA_CNT0 0x1700
+#define MT_RX_STA_CNT1 0x1704
+#define MT_RX_STA_CNT2 0x1708
+#define MT_TX_STA_CNT0 0x170c
+#define MT_TX_STA_CNT1 0x1710
+#define MT_TX_STA_CNT2 0x1714
+
+/* Vendor driver defines content of the second word of STAT_FIFO as follows:
+ * MT_TX_STAT_FIFO_RATE GENMASK(26, 16)
+ * MT_TX_STAT_FIFO_ETXBF BIT(27)
+ * MT_TX_STAT_FIFO_SND BIT(28)
+ * MT_TX_STAT_FIFO_ITXBF BIT(29)
+ * However, tests show that b16-31 have the same layout as TXWI rate_ctl
+ * with rate set to rate at which frame was acked.
+ */
+#define MT_TX_STAT_FIFO 0x1718
+#define MT_TX_STAT_FIFO_VALID BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
+#define MT_TX_STAT_FIFO_AGGR BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
+#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT 0x171c
+
+#define MT_TX_AGG_CNT_BASE0 0x1720
+
+#define MT_MPDU_DENSITY_CNT 0x1740
+
+#define MT_TX_AGG_CNT_BASE1 0x174c
+
+#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
+ MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+ MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT 0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID GENMASK(15, 8)
+
+#define MT_BBP_CORE_BASE 0x2000
+#define MT_BBP_IBI_BASE 0x2100
+#define MT_BBP_AGC_BASE 0x2300
+#define MT_BBP_TXC_BASE 0x2400
+#define MT_BBP_RXC_BASE 0x2500
+#define MT_BBP_TXO_BASE 0x2600
+#define MT_BBP_TXBE_BASE 0x2700
+#define MT_BBP_RXFE_BASE 0x2800
+#define MT_BBP_RXO_BASE 0x2900
+#define MT_BBP_DFS_BASE 0x2a00
+#define MT_BBP_TR_BASE 0x2b00
+#define MT_BBP_CAL_BASE 0x2c00
+#define MT_BBP_DSC_BASE 0x2e00
+#define MT_BBP_PFMU_BASE 0x2f00
+
+#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_GAIN GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE 0x1800
+#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE 0x4000
+
+#define MT_WCID_KEY_BASE 0x8000
+#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE 0xa000
+#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE 0xa800
+#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0 0xac00
+#define MT_SKEY_BASE_1 0xb400
+#define MT_SKEY_0(_bss, _idx) \
+ (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx) \
+ (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx) \
+ ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0 0xb000
+#define MT_SKEY_MODE_BASE_1 0xb3f0
+#define MT_SKEY_MODE_0(_bss) \
+ (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss) \
+ (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss) \
+ ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE 0xc000
+
+#define MT_TEMP_SENSOR 0x1d000
+#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
+
+enum mt76_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CKIP40,
+ MT_CIPHER_CKIP104,
+ MT_CIPHER_CKIP128,
+ MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
new file mode 100644
index 000000000000..8abdd3cd546d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
new file mode 100644
index 000000000000..8a752a09f2dc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(__MT76X0U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76X0U_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x0.h"
+#include "mac.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x0
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s "
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT "%04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, mt76x0_reg_read,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, mt76x0_reg_write,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+TRACE_EVENT(mt76x0_submit_urb,
+ TP_PROTO(struct mt76_dev *dev, struct urb *u),
+ TP_ARGS(dev, u),
+ TP_STRUCT__entry(
+ DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = u->pipe;
+ __entry->len = u->transfer_buffer_length;
+ ),
+ TP_printk(DEV_PR_FMT "p:%08x len:%u",
+ DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+#define trace_mt76x0_submit_urb_sync(__dev, __pipe, __len) ({ \
+ struct urb u; \
+ u.pipe = __pipe; \
+ u.transfer_buffer_length = __len; \
+ trace_mt76x0_submit_urb(__dev, &u); \
+})
+
+TRACE_EVENT(mt76x0_mcu_msg_send,
+ TP_PROTO(struct mt76_dev *dev,
+ struct sk_buff *skb, u32 csum, bool resp),
+ TP_ARGS(dev, skb, csum, resp),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, info)
+ __field(u32, csum)
+ __field(bool, resp)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->info = *(u32 *)skb->data;
+ __entry->csum = csum;
+ __entry->resp = resp;
+ ),
+ TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
+ DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
+);
+
+TRACE_EVENT(mt76x0_vend_req,
+ TP_PROTO(struct mt76_dev *dev, unsigned pipe, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t buflen, int ret),
+ TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
+ __field(u16, val) __field(u16, offset) __field(void*, buf)
+ __field(int, buflen) __field(int, ret)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = pipe;
+ __entry->req = req;
+ __entry->req_type = req_type;
+ __entry->val = val;
+ __entry->offset = offset;
+ __entry->buf = buf;
+ __entry->buflen = buflen;
+ __entry->ret = ret;
+ ),
+ TP_printk(DEV_PR_FMT
+ "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
+ DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
+ __entry->req_type, __entry->val, __entry->offset,
+ !!__entry->buf, __entry->buflen)
+);
+
+DECLARE_EVENT_CLASS(dev_rf_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, bank)
+ __field(u8, reg)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ __entry->bank = bank;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
+ DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_read,
+ TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_write,
+ TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_simple_evt,
+ TP_PROTO(struct mt76_dev *dev, u8 val),
+ TP_ARGS(dev, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->val = val;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
+ )
+);
+
+TRACE_EVENT(mt76x0_rx,
+ TP_PROTO(struct mt76_dev *dev, struct mt76x0_rxwi *rxwi, u32 f),
+ TP_ARGS(dev, rxwi, f),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt76x0_rxwi, rxwi)
+ __field(u32, fce_info)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->rxwi = *rxwi;
+ __entry->fce_info = f;
+ ),
+ TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x", DEV_PR_ARG,
+ le32_to_cpu(__entry->rxwi.rxinfo),
+ le32_to_cpu(__entry->rxwi.ctl))
+);
+
+TRACE_EVENT(mt76x0_tx,
+ TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb,
+ struct mt76_sta *sta, struct mt76_txwi *h),
+ TP_ARGS(dev, skb, sta, h),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt76_txwi, h)
+ __field(struct sk_buff *, skb)
+ __field(struct mt76_sta *, sta)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->h = *h;
+ __entry->skb = skb;
+ __entry->sta = sta;
+ ),
+ TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx "
+ "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
+ __entry->skb, __entry->sta,
+ le16_to_cpu(__entry->h.flags),
+ le16_to_cpu(__entry->h.rate_ctl),
+ __entry->h.ack_ctl, __entry->h.wcid,
+ le16_to_cpu(__entry->h.len_ctl))
+);
+
+TRACE_EVENT(mt76x0_tx_dma_done,
+ TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb),
+ TP_ARGS(dev, skb),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(struct sk_buff *, skb)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->skb = skb;
+ ),
+ TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
+);
+
+TRACE_EVENT(mt76x0_tx_status_cleaned,
+ TP_PROTO(struct mt76_dev *dev, int cleaned),
+ TP_ARGS(dev, cleaned),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(int, cleaned)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cleaned = cleaned;
+ ),
+ TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
+);
+
+TRACE_EVENT(mt76x0_tx_status,
+ TP_PROTO(struct mt76_dev *dev, u32 stat1, u32 stat2),
+ TP_ARGS(dev, stat1, stat2),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, stat1) __field(u32, stat2)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->stat1 = stat1;
+ __entry->stat2 = stat2;
+ ),
+ TP_printk(DEV_PR_FMT "%08x %08x",
+ DEV_PR_ARG, __entry->stat1, __entry->stat2)
+);
+
+TRACE_EVENT(mt76x0_rx_dma_aggr,
+ TP_PROTO(struct mt76_dev *dev, int cnt, bool paged),
+ TP_ARGS(dev, cnt, paged),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, cnt)
+ __field(bool, paged)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cnt = cnt;
+ __entry->paged = paged;
+ ),
+ TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
+ DEV_PR_ARG, __entry->cnt, __entry->paged)
+);
+
+DEFINE_EVENT(dev_simple_evt, mt76x0_set_key,
+ TP_PROTO(struct mt76_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(mt76x0_set_shared_key,
+ TP_PROTO(struct mt76_dev *dev, u8 vid, u8 key),
+ TP_ARGS(dev, vid, key),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, vid)
+ __field(u8, key)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->vid = vid;
+ __entry->key = key;
+ ),
+ TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+ DEV_PR_ARG, __entry->vid, __entry->key)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
new file mode 100644
index 000000000000..751b49c28ae5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "trace.h"
+
+/* Take mac80211 Q id from the skb and translate it to hardware Q id */
+static u8 skb2q(struct sk_buff *skb)
+{
+ int qid = skb_get_queue_mapping(skb);
+
+ if (WARN_ON(qid >= MT_TXQ_PSD)) {
+ qid = MT_TXQ_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ return q2hwq(qid);
+}
+
+static void mt76x0_tx_skb_remove_dma_overhead(struct sk_buff *skb,
+ struct ieee80211_tx_info *info)
+{
+ int pkt_len = (unsigned long)info->status.status_driver_data[0];
+
+ skb_pull(skb, sizeof(struct mt76_txwi) + 4);
+ if (ieee80211_get_hdrlen_from_skb(skb) % 4)
+ mt76x0_remove_hdr_pad(skb);
+
+ skb_trim(skb, pkt_len);
+}
+
+void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ mt76x0_tx_skb_remove_dma_overhead(skb, info);
+
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ spin_lock(&dev->mac_lock);
+ ieee80211_tx_status(dev->mt76.hw, skb);
+ spin_unlock(&dev->mac_lock);
+}
+
+static int mt76x0_skb_rooms(struct mt76x0_dev *dev, struct sk_buff *skb)
+{
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u32 need_head;
+
+ need_head = sizeof(struct mt76_txwi) + 4;
+ if (hdr_len % 4)
+ need_head += 2;
+
+ return skb_cow(skb, need_head);
+}
+
+static struct mt76_txwi *
+mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, struct mt76_wcid *wcid,
+ int pkt_len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct mt76_txwi *txwi;
+ unsigned long flags;
+ u16 txwi_flags = 0;
+ u32 pkt_id;
+ u16 rate_ctl;
+ u8 nss;
+
+ txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (!wcid->tx_rate_set)
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+
+ spin_lock_irqsave(&dev->mt76.lock, flags);
+ if (rate->idx < 0 || !rate->count) {
+ rate_ctl = wcid->tx_rate;
+ nss = wcid->tx_rate_nss;
+ } else {
+ rate_ctl = mt76x0_mac_tx_rate_val(dev, rate, &nss);
+ }
+ spin_unlock_irqrestore(&dev->mt76.lock, flags);
+
+ txwi->rate_ctl = cpu_to_le16(rate_ctl);
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ pkt_id = 1;
+ } else {
+ pkt_id = 0;
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ pkt_id |= MT_TXWI_PKTID_PROBE;
+
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 7, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ ba_size = 0;
+ } else {
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU;
+ txwi_flags |= FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density);
+ }
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+ }
+
+ txwi->wcid = wcid->idx;
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(pkt_len);
+ txwi->pktid = pkt_id;
+
+ return txwi;
+}
+
+void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x0_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
+ struct mt76_sta *msta = NULL;
+ struct mt76_wcid *wcid = dev->mon_wcid;
+ struct mt76_txwi *txwi;
+ int pkt_len = skb->len;
+ int hw_q = skb2q(skb);
+
+ BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
+
+ if (mt76x0_skb_rooms(dev, skb) || mt76x0_insert_hdr_pad(skb)) {
+ ieee80211_free_txskb(dev->mt76.hw, skb);
+ return;
+ }
+
+ if (sta) {
+ msta = (struct mt76_sta *) sta->drv_priv;
+ wcid = &msta->wcid;
+ } else if (vif && (!info->control.hw_key && wcid->hw_key_idx != -1)) {
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+ wcid = &mvif->group_wcid;
+ }
+
+ txwi = mt76x0_push_txwi(dev, skb, sta, wcid, pkt_len);
+
+ if (mt76x0_dma_enqueue_tx(dev, skb, wcid, hw_q))
+ return;
+
+ trace_mt76x0_tx(&dev->mt76, skb, msta, txwi);
+}
+
+void mt76x0_tx_stat(struct work_struct *work)
+{
+ struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ stat_work.work);
+ struct mt76_tx_status stat;
+ unsigned long flags;
+ int cleaned = 0;
+ u8 update = 1;
+
+ while (!test_bit(MT76_REMOVED, &dev->mt76.state)) {
+ stat = mt76x0_mac_fetch_tx_status(dev);
+ if (!stat.valid)
+ break;
+
+ mt76x0_send_tx_status(dev, &stat, &update);
+
+ cleaned++;
+ }
+ trace_mt76x0_tx_status_cleaned(&dev->mt76, cleaned);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+ if (cleaned)
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+ else if (test_and_clear_bit(MT76_MORE_STATS, &dev->mt76.state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(20));
+ else
+ clear_bit(MT76_READING_STATS, &dev->mt76.state);
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
+ u32 val;
+
+ /* TODO: should we do funny things with the parameters?
+ * See what mt76x0_set_default_edca() used to do in init.c.
+ */
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ WARN_ON(params->txop > 0xff);
+ WARN_ON(params->aifs > 0xf);
+ WARN_ON(cw_min > 0xf);
+ WARN_ON(cw_max > 0xf);
+
+ val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
+ * a really long txop on AC0 (see connect.c:2009) but only on
+ * connect? When not connected should be 0.
+ */
+ if (!hw_q)
+ val |= 0x60;
+ else
+ val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
+ mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
new file mode 100644
index 000000000000..54ae1f113be2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt76x0.h"
+#include "usb.h"
+#include "trace.h"
+
+static struct usb_device_id mt76x0_device_table[] = {
+ { USB_DEVICE(0x148F, 0x7610) }, /* MT7610U */
+ { USB_DEVICE(0x13B1, 0x003E) }, /* Linksys AE6000 */
+ { USB_DEVICE(0x0E8D, 0x7610) }, /* Sabrent NTWLAC */
+ { USB_DEVICE(0x7392, 0xa711) }, /* Edimax 7711mac */
+ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax / Elecom */
+ { USB_DEVICE(0x148f, 0x761a) }, /* TP-Link TL-WDN5200 */
+ { USB_DEVICE(0x148f, 0x760a) }, /* TP-Link unknown */
+ { USB_DEVICE(0x0b05, 0x17d1) }, /* Asus USB-AC51 */
+ { USB_DEVICE(0x0b05, 0x17db) }, /* Asus USB-AC50 */
+ { USB_DEVICE(0x0df6, 0x0075) }, /* Sitecom WLA-3100 */
+ { USB_DEVICE(0x2019, 0xab31) }, /* Planex GW-450D */
+ { USB_DEVICE(0x2001, 0x3d02) }, /* D-LINK DWA-171 rev B1 */
+ { USB_DEVICE(0x0586, 0x3425) }, /* Zyxel NWD6505 */
+ { USB_DEVICE(0x07b8, 0x7610) }, /* AboCom AU7212 */
+ { USB_DEVICE(0x04bb, 0x0951) }, /* I-O DATA WN-AC433UK */
+ { USB_DEVICE(0x057c, 0x8502) }, /* AVM FRITZ!WLAN USB Stick AC 430 */
+ { USB_DEVICE(0x293c, 0x5702) }, /* Comcast Xfinity KXW02AAA */
+ { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */
+ { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */
+ { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */
+ { USB_DEVICE(0x2357, 0x0105) }, /* TP-LINK Archer T1U */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, /* MT7630U */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, /* MT7650U */
+ { 0, }
+};
+
+bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
+ struct mt76x0_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+
+ buf->len = len;
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+
+ return !buf->urb || !buf->buf;
+}
+
+void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+
+ usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
+ usb_free_urb(buf->urb);
+}
+
+int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
+ struct mt76x0_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ unsigned pipe;
+ int ret;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[ep_idx]);
+ else
+ pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep_idx]);
+
+ usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
+ complete_fn, context);
+ buf->urb->transfer_dma = buf->dma;
+ buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ trace_mt76x0_submit_urb(&dev->mt76, buf->urb);
+ ret = usb_submit_urb(buf->urb, gfp);
+ if (ret)
+ dev_err(dev->mt76.dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
+ dir, ep_idx, ret);
+ return ret;
+}
+
+void mt76x0_complete_urb(struct urb *urb)
+{
+ struct completion *cmpl = urb->context;
+
+ complete(cmpl);
+}
+
+int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen)
+{
+ int i, ret;
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+ const unsigned int pipe = (direction == USB_DIR_IN) ?
+ usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ ret = usb_control_msg(usb_dev, pipe, req, req_type,
+ val, offset, buf, buflen,
+ MT_VEND_REQ_TOUT_MS);
+ trace_mt76x0_vend_req(&dev->mt76, pipe, req, req_type, val, offset,
+ buf, buflen, ret);
+
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->mt76.state);
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+
+ msleep(5);
+ }
+
+ dev_err(dev->mt76.dev, "Vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+
+ return ret;
+}
+
+void mt76x0_vendor_reset(struct mt76x0_dev *dev)
+{
+ mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+}
+
+static u32 mt76x0_rr(struct mt76_dev *dev, u32 offset)
+{
+ struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
+ int ret;
+ u32 val = ~0;
+
+ WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+
+ mutex_lock(&mdev->usb_ctrl_mtx);
+
+ ret = mt76x0_vendor_request((struct mt76x0_dev *)dev, MT_VEND_MULTI_READ, USB_DIR_IN,
+ 0, offset, mdev->data, MT_VEND_BUF);
+ if (ret == MT_VEND_BUF)
+ val = get_unaligned_le32(mdev->data);
+ else if (ret > 0)
+ dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
+ ret, offset);
+
+ mutex_unlock(&mdev->usb_ctrl_mtx);
+
+ trace_mt76x0_reg_read(dev, offset, val);
+ return val;
+}
+
+int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ struct mt76x0_dev *mdev = dev;
+ int ret;
+
+ mutex_lock(&mdev->usb_ctrl_mtx);
+
+ ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
+ val & 0xffff, offset, NULL, 0);
+ if (!ret)
+ ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
+ val >> 16, offset + 2, NULL, 0);
+
+ mutex_unlock(&mdev->usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt76x0_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
+ int ret;
+
+ WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
+
+ mutex_lock(&mdev->usb_ctrl_mtx);
+
+ put_unaligned_le32(val, mdev->data);
+ ret = mt76x0_vendor_request(mdev, MT_VEND_MULTI_WRITE, USB_DIR_OUT,
+ 0, offset, mdev->data, MT_VEND_BUF);
+ trace_mt76x0_reg_write(dev, offset, val);
+
+ mutex_unlock(&mdev->usb_ctrl_mtx);
+}
+
+static u32 mt76x0_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ val |= mt76x0_rr(dev, offset) & ~mask;
+ mt76x0_wr(dev, offset, val);
+ return val;
+}
+
+static void mt76x0_wr_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
+ WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+
+ mt76x0_burst_write_regs((struct mt76x0_dev *) dev, offset, data, len / 4);
+}
+
+void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr)
+{
+ mt76_wr(dev, offset, get_unaligned_le32(addr));
+ mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
+}
+
+static int mt76x0_assign_pipes(struct usb_interface *usb_intf,
+ struct mt76x0_dev *dev)
+{
+ struct usb_endpoint_descriptor *ep_desc;
+ struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
+ unsigned i, ep_i = 0, ep_o = 0;
+
+ BUILD_BUG_ON(sizeof(dev->in_ep) < __MT_EP_IN_MAX);
+ BUILD_BUG_ON(sizeof(dev->out_ep) < __MT_EP_OUT_MAX);
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ ep_i++ < __MT_EP_IN_MAX) {
+ dev->in_ep[ep_i - 1] = usb_endpoint_num(ep_desc);
+ dev->in_max_packet = usb_endpoint_maxp(ep_desc);
+ /* Note: this is ignored by usb sub-system but vendor
+ * code does it. We can drop this at some point.
+ */
+ dev->in_ep[ep_i - 1] |= USB_DIR_IN;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ ep_o++ < __MT_EP_OUT_MAX) {
+ dev->out_ep[ep_o - 1] = usb_endpoint_num(ep_desc);
+ dev->out_max_packet = usb_endpoint_maxp(ep_desc);
+ }
+ }
+
+ if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
+ dev_err(dev->mt76.dev, "Error: wrong pipe number in:%d out:%d\n",
+ ep_i, ep_o);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mt76x0_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+ struct mt76x0_dev *dev;
+ u32 asic_rev, mac_rev;
+ int ret;
+ static const struct mt76_bus_ops usb_ops = {
+ .rr = mt76x0_rr,
+ .wr = mt76x0_wr,
+ .rmw = mt76x0_rmw,
+ .copy = mt76x0_wr_copy,
+ };
+
+ dev = mt76x0_alloc_device(&usb_intf->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ usb_dev = usb_get_dev(usb_dev);
+ usb_reset_device(usb_dev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ dev->mt76.bus = &usb_ops;
+
+ ret = mt76x0_assign_pipes(usb_intf, dev);
+ if (ret)
+ goto err;
+
+ /* Disable the HW, otherwise MCU fail to initalize on hot reboot */
+ mt76x0_chip_onoff(dev, false, false);
+
+ ret = mt76x0_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
+ mac_rev = mt76_rr(dev, MT_MAC_CSR0);
+ dev_info(dev->mt76.dev, "ASIC revision: %08x MAC revision: %08x\n",
+ asic_rev, mac_rev);
+
+ /* Note: vendor driver skips this check for MT76X0U */
+ if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+ dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n");
+
+ ret = mt76x0_init_hardware(dev);
+ if (ret)
+ goto err;
+
+ ret = mt76x0_register_device(dev);
+ if (ret)
+ goto err_hw;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ return 0;
+err_hw:
+ mt76x0_cleanup(dev);
+err:
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->mt76.hw);
+ return ret;
+}
+
+static void mt76x0_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ if (!initalized)
+ return;
+
+ ieee80211_unregister_hw(dev->mt76.hw);
+ mt76x0_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->mt76.hw);
+}
+
+static int mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state)
+{
+ struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+
+ mt76x0_cleanup(dev);
+
+ return 0;
+}
+
+static int mt76x0_resume(struct usb_interface *usb_intf)
+{
+ struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ int ret;
+
+ ret = mt76x0_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
+MODULE_FIRMWARE(MT7610_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt76x0_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x0_device_table,
+ .probe = mt76x0_probe,
+ .disconnect = mt76x0_disconnect,
+ .suspend = mt76x0_suspend,
+ .resume = mt76x0_resume,
+ .reset_resume = mt76x0_resume,
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x0_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
new file mode 100644
index 000000000000..492e431390a8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_USB_H
+#define __MT76X0U_USB_H
+
+#include "mt76x0.h"
+
+#define MT7610_FIRMWARE "mediatek/mt7610u.bin"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+#define MT_VEND_DEV_MODE_RESET 1
+
+#define MT_VEND_BUF sizeof(__le32)
+
+static inline struct usb_device *mt76x0_to_usb_dev(struct mt76x0_dev *mt76x0)
+{
+ return interface_to_usbdev(to_usb_interface(mt76x0->mt76.dev));
+}
+
+static inline struct usb_device *mt76_to_usb_dev(struct mt76_dev *mt76)
+{
+ return interface_to_usbdev(to_usb_interface(mt76->dev));
+}
+
+static inline bool mt76x0_urb_has_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ENOENT &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN;
+}
+
+bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
+ struct mt76x0_dma_buf *buf);
+void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf);
+int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
+ struct mt76x0_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context);
+void mt76x0_complete_urb(struct urb *urb);
+
+int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen);
+void mt76x0_vendor_reset(struct mt76x0_dev *dev);
+int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
new file mode 100644
index 000000000000..7856dd760419
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+
+void mt76x0_remove_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ memmove(skb->data + 2, skb->data, len);
+ skb_pull(skb, 2);
+}
+
+int mt76x0_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+ int ret;
+
+ if (len % 4 == 0)
+ return 0;
+
+ ret = skb_cow(skb, 2);
+ if (ret)
+ return ret;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
index dc12bbdbb2ee..dca3209bf5f1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h
@@ -27,11 +27,15 @@
#include <linux/mutex.h>
#include <linux/bitops.h>
#include <linux/kfifo.h>
+#include <linux/average.h>
#define MT7662_FIRMWARE "mt7662.bin"
#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
#define MT7662_EEPROM_SIZE 512
+#define MT7662U_FIRMWARE "mediatek/mt7662u.bin"
+#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin"
+
#define MT76x2_RX_RING_SIZE 256
#define MT_RX_HEADROOM 32
@@ -47,11 +51,14 @@
#include "mt76x2_mac.h"
#include "mt76x2_dfs.h"
+DECLARE_EWMA(signal, 10, 8)
+
struct mt76x2_mcu {
struct mutex mutex;
wait_queue_head_t wait;
struct sk_buff_head res_q;
+ struct mt76u_buf res_u;
u32 msg_seq;
};
@@ -69,9 +76,8 @@ struct mt76x2_calibration {
u8 agc_gain_init[MT_MAX_CHAINS];
u8 agc_gain_cur[MT_MAX_CHAINS];
- int avg_rssi[MT_MAX_CHAINS];
- int avg_rssi_all;
-
+ u16 false_cca;
+ s8 avg_rssi_all;
s8 agc_gain_adjust;
s8 low_gain;
@@ -120,10 +126,13 @@ struct mt76x2_dev {
u8 beacon_mask;
u8 beacon_data_mask;
- u32 rxfilter;
+ u8 tbtt_count;
+ u16 beacon_int;
u16 chainmask;
+ u32 rxfilter;
+
struct mt76x2_calibration cal;
s8 target_power;
@@ -149,8 +158,28 @@ struct mt76x2_sta {
struct mt76x2_vif *vif;
struct mt76x2_tx_status status;
int n_frames;
+
+ struct ewma_signal rssi;
+ int inactive_count;
};
+static inline bool mt76x2_wait_for_mac(struct mt76x2_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 500; i++) {
+ switch (mt76_rr(dev, MT_MAC_CSR0)) {
+ case 0:
+ case ~0:
+ break;
+ default:
+ return true;
+ }
+ usleep_range(5000, 10000);
+ }
+ return false;
+}
+
static inline bool is_mt7612(struct mt76x2_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7612;
@@ -158,6 +187,14 @@ static inline bool is_mt7612(struct mt76x2_dev *dev)
void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
+static inline bool mt76x2_channel_silent(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+ return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+ chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
{
mt76x2_set_irq_mask(dev, 0, mask);
@@ -168,11 +205,29 @@ static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
mt76x2_set_irq_mask(dev, mask, 0);
}
+static inline bool mt76x2_wait_for_bbp(struct mt76x2_dev *dev)
+{
+ return mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+ 0, 100);
+}
+
+static inline bool wait_for_wpdma(struct mt76x2_dev *dev)
+{
+ return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+ 0, 1000);
+}
+
extern const struct ieee80211_ops mt76x2_ops;
+extern struct ieee80211_rate mt76x2_rates[12];
+
struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
int mt76x2_register_device(struct mt76x2_dev *dev);
void mt76x2_init_debugfs(struct mt76x2_dev *dev);
+void mt76x2_init_device(struct mt76x2_dev *dev);
irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
void mt76x2_phy_power_on(struct mt76x2_dev *dev);
@@ -186,7 +241,7 @@ void mt76x2_phy_set_antenna(struct mt76x2_dev *dev);
int mt76x2_phy_start(struct mt76x2_dev *dev);
int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
struct cfg80211_chan_def *chandef);
-int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
+int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
void mt76x2_phy_calibrate(struct work_struct *work);
void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
@@ -214,6 +269,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
u32 *tx_info);
void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush);
+void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val);
void mt76x2_pre_tbtt_tasklet(unsigned long arg);
@@ -230,4 +286,45 @@ s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
+int mt76x2_insert_hdr_pad(struct sk_buff *skb);
+
+bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat);
+void mt76x2_send_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat, u8 *update);
+void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable);
+void mt76x2_init_txpower(struct mt76x2_dev *dev,
+ struct ieee80211_supported_band *sband);
+void mt76_write_mac_initvals(struct mt76x2_dev *dev);
+
+int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt76x2_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x2_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
+void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq);
+void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
+ enum nl80211_band band);
+void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
+ enum nl80211_band band, u8 bw);
+void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl);
+void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper);
+int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev);
+void mt76x2_apply_gain_adj(struct mt76x2_dev *dev);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
new file mode 100644
index 000000000000..a2338ba139b4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
+{
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return;
+
+ mtxq = (struct mt76_txq *) txq->drv_priv;
+ if (txq->sta) {
+ struct mt76x2_sta *sta;
+
+ sta = (struct mt76x2_sta *) txq->sta->drv_priv;
+ mtxq->wcid = &sta->wcid;
+ } else {
+ struct mt76x2_vif *mvif;
+
+ mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
+ mtxq->wcid = &mvif->group_wcid;
+ }
+
+ mt76_txq_init(&dev->mt76, txq);
+}
+EXPORT_SYMBOL_GPL(mt76x2_txq_init);
+
+int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct ieee80211_sta *sta = params->sta;
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_ampdu_action);
+
+int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+ int i;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76x2_mac_wcid_set_drop(dev, idx, false);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76x2_txq_init(dev, sta->txq[i]);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
+ ewma_signal_init(&msta->rssi);
+
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_add);
+
+int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+ int i;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76_txq_remove(&dev->mt76, sta->txq[i]);
+ mt76x2_mac_wcid_set_drop(dev, idx, true);
+ mt76_wcid_free(dev->wcid_mask, idx);
+ mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_remove);
+
+void mt76x2_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mt76_txq_remove(&dev->mt76, vif->txq);
+}
+EXPORT_SYMBOL_GPL(mt76x2_remove_interface);
+
+int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct mt76x2_sta *msta;
+ struct mt76_wcid *wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * The hardware does not support per-STA RX GTK, fall back
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
+ wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ wcid->sw_iv = true;
+ }
+ } else {
+ if (idx == wcid->hw_key_idx) {
+ wcid->hw_key_idx = -1;
+ wcid->sw_iv = true;
+ }
+
+ key = NULL;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+EXPORT_SYMBOL_GPL(mt76x2_set_key);
+
+int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, qid;
+ u32 val;
+
+ qid = dev->mt76.q_tx[queue].hw_idx;
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+ FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(qid));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_TXOP(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_conf_tx);
+
+void mt76x2_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_filter);
+
+void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+ struct ieee80211_tx_rate rate = {};
+
+ if (!rates)
+ return;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+ msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_rate_tbl_update);
+
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ void *rxwi = skb->data;
+
+ if (q == MT_RXQ_MCU) {
+ skb_queue_tail(&dev->mcu.res_q, skb);
+ wake_up(&dev->mcu.wait);
+ return;
+ }
+
+ skb_pull(skb, sizeof(struct mt76x2_rxwi));
+ if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ mt76_rx(&dev->mt76, q, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
index 955ea3e692dd..77b5ff1be05f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
@@ -91,12 +91,20 @@ mt76x2_dfs_stat_read(struct seq_file *file, void *data)
struct mt76x2_dev *dev = file->private;
struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ seq_printf(file, "allocated sequences:\t%d\n",
+ dfs_pd->seq_stats.seq_pool_len);
+ seq_printf(file, "used sequences:\t\t%d\n",
+ dfs_pd->seq_stats.seq_len);
+ seq_puts(file, "\n");
+
for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
seq_printf(file, "engine: %d\n", i);
seq_printf(file, " hw pattern detected:\t%d\n",
dfs_pd->stats[i].hw_pattern);
seq_printf(file, " hw pulse discarded:\t%d\n",
dfs_pd->stats[i].hw_pulse_discarded);
+ seq_printf(file, " sw pattern detected:\t%d\n",
+ dfs_pd->stats[i].sw_pattern);
}
return 0;
@@ -115,6 +123,18 @@ static const struct file_operations fops_dfs_stat = {
.release = single_release,
};
+static int read_agc(struct seq_file *file, void *data)
+{
+ struct mt76x2_dev *dev = dev_get_drvdata(file->private);
+
+ seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all);
+ seq_printf(file, "low_gain: %d\n", dev->cal.low_gain);
+ seq_printf(file, "false_cca: %d\n", dev->cal.false_cca);
+ seq_printf(file, "agc_gain_adjust: %d\n", dev->cal.agc_gain_adjust);
+
+ return 0;
+}
+
void mt76x2_init_debugfs(struct mt76x2_dev *dev)
{
struct dentry *dir;
@@ -130,4 +150,7 @@ void mt76x2_init_debugfs(struct mt76x2_dev *dev)
debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
read_txpower);
+
+ debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
}
+EXPORT_SYMBOL_GPL(mt76x2_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
index f936dc9a5476..374cc655c11d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
@@ -159,6 +159,81 @@ static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
mt76_wr(dev, MT_BBP(DFS, 36), data);
}
+static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_sequence *seq)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ list_add(&seq->head, &dfs_pd->seq_pool);
+
+ dfs_pd->seq_stats.seq_pool_len++;
+ dfs_pd->seq_stats.seq_len--;
+}
+
+static
+struct mt76x2_dfs_sequence *mt76x2_dfs_seq_pool_get(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sequence *seq;
+
+ if (list_empty(&dfs_pd->seq_pool)) {
+ seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC);
+ } else {
+ seq = list_first_entry(&dfs_pd->seq_pool,
+ struct mt76x2_dfs_sequence,
+ head);
+ list_del(&seq->head);
+ dfs_pd->seq_stats.seq_pool_len--;
+ }
+ if (seq)
+ dfs_pd->seq_stats.seq_len++;
+
+ return seq;
+}
+
+static int mt76x2_dfs_get_multiple(int val, int frac, int margin)
+{
+ int remainder, factor;
+
+ if (!frac)
+ return 0;
+
+ if (abs(val - frac) <= margin)
+ return 1;
+
+ factor = val / frac;
+ remainder = val % frac;
+
+ if (remainder > margin) {
+ if ((frac - remainder) <= margin)
+ factor++;
+ else
+ factor = 0;
+ }
+ return factor;
+}
+
+static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sequence *seq, *tmp_seq;
+ int i;
+
+ /* reset hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+ /* reset sw detector */
+ for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
+ dfs_pd->event_rb[i].h_rb = 0;
+ dfs_pd->event_rb[i].t_rb = 0;
+ }
+
+ list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
+ list_del_init(&seq->head);
+ mt76x2_dfs_seq_pool_put(dev, seq);
+ }
+}
+
static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
{
bool ret = false;
@@ -295,6 +370,256 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
return ret;
}
+static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event)
+{
+ u32 data;
+
+ /* 1st: DFS_R37[31]: 0 (engine 0) - 1 (engine 2)
+ * 2nd: DFS_R37[21:0]: pulse time
+ * 3rd: DFS_R37[11:0]: pulse width
+ * 3rd: DFS_R37[25:16]: phase
+ * 4th: DFS_R37[12:0]: current pwr
+ * 4th: DFS_R37[21:16]: pwr stable counter
+ *
+ * 1st: DFS_R37[31:0] set to 0xffffffff means no event detected
+ */
+ data = mt76_rr(dev, MT_BBP(DFS, 37));
+ if (!MT_DFS_CHECK_EVENT(data))
+ return false;
+
+ event->engine = MT_DFS_EVENT_ENGINE(data);
+ data = mt76_rr(dev, MT_BBP(DFS, 37));
+ event->ts = MT_DFS_EVENT_TIMESTAMP(data);
+ data = mt76_rr(dev, MT_BBP(DFS, 37));
+ event->width = MT_DFS_EVENT_WIDTH(data);
+
+ return true;
+}
+
+static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event)
+{
+ if (event->engine == 2) {
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_event_rb *event_buff = &dfs_pd->event_rb[1];
+ u16 last_event_idx;
+ u32 delta_ts;
+
+ last_event_idx = mt76_decr(event_buff->t_rb,
+ MT_DFS_EVENT_BUFLEN);
+ delta_ts = event->ts - event_buff->data[last_event_idx].ts;
+ if (delta_ts < MT_DFS_EVENT_TIME_MARGIN &&
+ event_buff->data[last_event_idx].width >= 200)
+ return false;
+ }
+ return true;
+}
+
+static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_event_rb *event_buff;
+
+ /* add radar event to ring buffer */
+ event_buff = event->engine == 2 ? &dfs_pd->event_rb[1]
+ : &dfs_pd->event_rb[0];
+ event_buff->data[event_buff->t_rb] = *event;
+ event_buff->data[event_buff->t_rb].fetch_ts = jiffies;
+
+ event_buff->t_rb = mt76_incr(event_buff->t_rb, MT_DFS_EVENT_BUFLEN);
+ if (event_buff->t_rb == event_buff->h_rb)
+ event_buff->h_rb = mt76_incr(event_buff->h_rb,
+ MT_DFS_EVENT_BUFLEN);
+}
+
+static int mt76x2_dfs_create_sequence(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event,
+ u16 cur_len)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sw_detector_params *sw_params;
+ u32 width_delta, with_sum, factor, cur_pri;
+ struct mt76x2_dfs_sequence seq, *seq_p;
+ struct mt76x2_dfs_event_rb *event_rb;
+ struct mt76x2_dfs_event *cur_event;
+ int i, j, end, pri;
+
+ event_rb = event->engine == 2 ? &dfs_pd->event_rb[1]
+ : &dfs_pd->event_rb[0];
+
+ i = mt76_decr(event_rb->t_rb, MT_DFS_EVENT_BUFLEN);
+ end = mt76_decr(event_rb->h_rb, MT_DFS_EVENT_BUFLEN);
+
+ while (i != end) {
+ cur_event = &event_rb->data[i];
+ with_sum = event->width + cur_event->width;
+
+ sw_params = &dfs_pd->sw_dpd_params;
+ switch (dev->dfs_pd.region) {
+ case NL80211_DFS_FCC:
+ case NL80211_DFS_JP:
+ if (with_sum < 600)
+ width_delta = 8;
+ else
+ width_delta = with_sum >> 3;
+ break;
+ case NL80211_DFS_ETSI:
+ if (event->engine == 2)
+ width_delta = with_sum >> 6;
+ else if (with_sum < 620)
+ width_delta = 24;
+ else
+ width_delta = 8;
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ return -EINVAL;
+ }
+
+ pri = event->ts - cur_event->ts;
+ if (abs(event->width - cur_event->width) > width_delta ||
+ pri < sw_params->min_pri)
+ goto next;
+
+ if (pri > sw_params->max_pri)
+ break;
+
+ seq.pri = event->ts - cur_event->ts;
+ seq.first_ts = cur_event->ts;
+ seq.last_ts = event->ts;
+ seq.engine = event->engine;
+ seq.count = 2;
+
+ j = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
+ while (j != end) {
+ cur_event = &event_rb->data[j];
+ cur_pri = event->ts - cur_event->ts;
+ factor = mt76x2_dfs_get_multiple(cur_pri, seq.pri,
+ sw_params->pri_margin);
+ if (factor > 0) {
+ seq.first_ts = cur_event->ts;
+ seq.count++;
+ }
+
+ j = mt76_decr(j, MT_DFS_EVENT_BUFLEN);
+ }
+ if (seq.count <= cur_len)
+ goto next;
+
+ seq_p = mt76x2_dfs_seq_pool_get(dev);
+ if (!seq_p)
+ return -ENOMEM;
+
+ *seq_p = seq;
+ INIT_LIST_HEAD(&seq_p->head);
+ list_add(&seq_p->head, &dfs_pd->sequences);
+next:
+ i = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
+ }
+ return 0;
+}
+
+static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sw_detector_params *sw_params;
+ struct mt76x2_dfs_sequence *seq, *tmp_seq;
+ u16 max_seq_len = 0;
+ u32 factor, pri;
+
+ sw_params = &dfs_pd->sw_dpd_params;
+ list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
+ if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) {
+ list_del_init(&seq->head);
+ mt76x2_dfs_seq_pool_put(dev, seq);
+ continue;
+ }
+
+ if (event->engine != seq->engine)
+ continue;
+
+ pri = event->ts - seq->last_ts;
+ factor = mt76x2_dfs_get_multiple(pri, seq->pri,
+ sw_params->pri_margin);
+ if (factor > 0) {
+ seq->last_ts = event->ts;
+ seq->count++;
+ max_seq_len = max_t(u16, max_seq_len, seq->count);
+ }
+ }
+ return max_seq_len;
+}
+
+static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sequence *seq;
+
+ if (list_empty(&dfs_pd->sequences))
+ return false;
+
+ list_for_each_entry(seq, &dfs_pd->sequences, head) {
+ if (seq->count > MT_DFS_SEQUENCE_TH) {
+ dfs_pd->stats[seq->engine].sw_pattern++;
+ return true;
+ }
+ }
+ return false;
+}
+
+static void mt76x2_dfs_add_events(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_event event;
+ int i, seq_len;
+
+ /* disable debug mode */
+ mt76x2_dfs_set_capture_mode_ctrl(dev, false);
+ for (i = 0; i < MT_DFS_EVENT_LOOP; i++) {
+ if (!mt76x2_dfs_fetch_event(dev, &event))
+ break;
+
+ if (dfs_pd->last_event_ts > event.ts)
+ mt76x2_dfs_detector_reset(dev);
+ dfs_pd->last_event_ts = event.ts;
+
+ if (!mt76x2_dfs_check_event(dev, &event))
+ continue;
+
+ seq_len = mt76x2_dfs_add_event_to_sequence(dev, &event);
+ mt76x2_dfs_create_sequence(dev, &event, seq_len);
+
+ mt76x2_dfs_queue_event(dev, &event);
+ }
+ mt76x2_dfs_set_capture_mode_ctrl(dev, true);
+}
+
+static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_event_rb *event_buff;
+ struct mt76x2_dfs_event *event;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
+ event_buff = &dfs_pd->event_rb[i];
+
+ while (event_buff->h_rb != event_buff->t_rb) {
+ event = &event_buff->data[event_buff->h_rb];
+
+ /* sorted list */
+ if (time_is_after_jiffies(event->fetch_ts +
+ MT_DFS_EVENT_WINDOW))
+ break;
+ event_buff->h_rb = mt76_incr(event_buff->h_rb,
+ MT_DFS_EVENT_BUFLEN);
+ }
+ }
+}
+
static void mt76x2_dfs_tasklet(unsigned long arg)
{
struct mt76x2_dev *dev = (struct mt76x2_dev *)arg;
@@ -305,6 +630,24 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
if (test_bit(MT76_SCANNING, &dev->mt76.state))
goto out;
+ if (time_is_before_jiffies(dfs_pd->last_sw_check +
+ MT_DFS_SW_TIMEOUT)) {
+ bool radar_detected;
+
+ dfs_pd->last_sw_check = jiffies;
+
+ mt76x2_dfs_add_events(dev);
+ radar_detected = mt76x2_dfs_check_detection(dev);
+ if (radar_detected) {
+ /* sw detector rx radar pattern */
+ ieee80211_radar_detected(dev->mt76.hw);
+ mt76x2_dfs_detector_reset(dev);
+
+ return;
+ }
+ mt76x2_dfs_check_event_window(dev);
+ }
+
engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
if (!(engine_mask & 0xf))
goto out;
@@ -326,9 +669,7 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
/* hw detector rx radar pattern */
dfs_pd->stats[i].hw_pattern++;
ieee80211_radar_detected(dev->mt76.hw);
-
- /* reset hw detector */
- mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+ mt76x2_dfs_detector_reset(dev);
return;
}
@@ -340,6 +681,32 @@ out:
mt76x2_irq_enable(dev, MT_INT_GPTIMER);
}
+static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ switch (dev->dfs_pd.region) {
+ case NL80211_DFS_FCC:
+ dfs_pd->sw_dpd_params.max_pri = MT_DFS_FCC_MAX_PRI;
+ dfs_pd->sw_dpd_params.min_pri = MT_DFS_FCC_MIN_PRI;
+ dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
+ break;
+ case NL80211_DFS_ETSI:
+ dfs_pd->sw_dpd_params.max_pri = MT_DFS_ETSI_MAX_PRI;
+ dfs_pd->sw_dpd_params.min_pri = MT_DFS_ETSI_MIN_PRI;
+ dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN << 2;
+ break;
+ case NL80211_DFS_JP:
+ dfs_pd->sw_dpd_params.max_pri = MT_DFS_JP_MAX_PRI;
+ dfs_pd->sw_dpd_params.min_pri = MT_DFS_JP_MIN_PRI;
+ dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ break;
+ }
+}
+
static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
{
u32 data;
@@ -462,6 +829,7 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
dev->dfs_pd.region != NL80211_DFS_UNSET) {
+ mt76x2_dfs_init_sw_detector(dev);
mt76x2_dfs_set_bbp_params(dev);
/* enable debug mode */
mt76x2_dfs_set_capture_mode_ctrl(dev, true);
@@ -486,7 +854,10 @@ void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
{
struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ INIT_LIST_HEAD(&dfs_pd->sequences);
+ INIT_LIST_HEAD(&dfs_pd->seq_pool);
dfs_pd->region = NL80211_DFS_UNSET;
+ dfs_pd->last_sw_check = jiffies;
tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet,
(unsigned long)dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
index 8dbc783cc6bc..693f421bf096 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
@@ -33,6 +33,22 @@
#define MT_DFS_PKT_END_MASK 0
#define MT_DFS_CH_EN 0xf
+/* sw detector params */
+#define MT_DFS_EVENT_LOOP 64
+#define MT_DFS_SW_TIMEOUT (HZ / 20)
+#define MT_DFS_EVENT_WINDOW (HZ / 5)
+#define MT_DFS_SEQUENCE_WINDOW (200 * (1 << 20))
+#define MT_DFS_EVENT_TIME_MARGIN 2000
+#define MT_DFS_PRI_MARGIN 4
+#define MT_DFS_SEQUENCE_TH 6
+
+#define MT_DFS_FCC_MAX_PRI ((28570 << 1) + 1000)
+#define MT_DFS_FCC_MIN_PRI (3000 - 2)
+#define MT_DFS_JP_MAX_PRI ((80000 << 1) + 1000)
+#define MT_DFS_JP_MIN_PRI (28500 - 2)
+#define MT_DFS_ETSI_MAX_PRI (133333 + 125000 + 117647 + 1000)
+#define MT_DFS_ETSI_MIN_PRI (4500 - 20)
+
struct mt76x2_radar_specs {
u8 mode;
u16 avg_len;
@@ -50,6 +66,32 @@ struct mt76x2_radar_specs {
u16 pwr_jmp;
};
+#define MT_DFS_CHECK_EVENT(x) ((x) != GENMASK(31, 0))
+#define MT_DFS_EVENT_ENGINE(x) (((x) & BIT(31)) ? 2 : 0)
+#define MT_DFS_EVENT_TIMESTAMP(x) ((x) & GENMASK(21, 0))
+#define MT_DFS_EVENT_WIDTH(x) ((x) & GENMASK(11, 0))
+struct mt76x2_dfs_event {
+ unsigned long fetch_ts;
+ u32 ts;
+ u16 width;
+ u8 engine;
+};
+
+#define MT_DFS_EVENT_BUFLEN 256
+struct mt76x2_dfs_event_rb {
+ struct mt76x2_dfs_event data[MT_DFS_EVENT_BUFLEN];
+ int h_rb, t_rb;
+};
+
+struct mt76x2_dfs_sequence {
+ struct list_head head;
+ u32 first_ts;
+ u32 last_ts;
+ u32 pri;
+ u16 count;
+ u8 engine;
+};
+
struct mt76x2_dfs_hw_pulse {
u8 engine;
u32 period;
@@ -58,9 +100,21 @@ struct mt76x2_dfs_hw_pulse {
u32 burst;
};
+struct mt76x2_dfs_sw_detector_params {
+ u32 min_pri;
+ u32 max_pri;
+ u32 pri_margin;
+};
+
struct mt76x2_dfs_engine_stats {
u32 hw_pattern;
u32 hw_pulse_discarded;
+ u32 sw_pattern;
+};
+
+struct mt76x2_dfs_seq_stats {
+ u32 seq_pool_len;
+ u32 seq_len;
};
struct mt76x2_dfs_pattern_detector {
@@ -69,6 +123,16 @@ struct mt76x2_dfs_pattern_detector {
u8 chirp_pulse_cnt;
u32 chirp_pulse_ts;
+ struct mt76x2_dfs_sw_detector_params sw_dpd_params;
+ struct mt76x2_dfs_event_rb event_rb[2];
+
+ struct list_head sequences;
+ struct list_head seq_pool;
+ struct mt76x2_dfs_seq_stats seq_stats;
+
+ unsigned long last_sw_check;
+ u32 last_event_ts;
+
struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
struct tasklet_struct dfs_tasklet;
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
index fd1ec4743e0b..6720a6a1313f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
@@ -66,27 +66,6 @@ mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
return 0;
}
-void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
- void *rxwi = skb->data;
-
- if (q == MT_RXQ_MCU) {
- skb_queue_tail(&dev->mcu.res_q, skb);
- wake_up(&dev->mcu.wait);
- return;
- }
-
- skb_pull(skb, sizeof(struct mt76x2_rxwi));
- if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
- dev_kfree_skb(skb);
- return;
- }
-
- mt76_rx(&dev->mt76, q, skb);
-}
-
static int
mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
index e9d426bbf91a..da294558c268 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
@@ -19,34 +19,6 @@
#include "dma.h"
-#define MT_TXD_INFO_LEN GENMASK(15, 0)
-#define MT_TXD_INFO_NEXT_VLD BIT(16)
-#define MT_TXD_INFO_TX_BURST BIT(17)
-#define MT_TXD_INFO_80211 BIT(19)
-#define MT_TXD_INFO_TSO BIT(20)
-#define MT_TXD_INFO_CSO BIT(21)
-#define MT_TXD_INFO_WIV BIT(24)
-#define MT_TXD_INFO_QSEL GENMASK(26, 25)
-#define MT_TXD_INFO_DPORT GENMASK(29, 27)
-#define MT_TXD_INFO_TYPE GENMASK(31, 30)
-
-#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
-#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
-#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
-#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
-#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
-#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
-#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
-#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
-
-/* MCU request message header */
-#define MT_MCU_MSG_LEN GENMASK(15, 0)
-#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
-#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
-#define MT_MCU_MSG_PORT GENMASK(29, 27)
-#define MT_MCU_MSG_TYPE GENMASK(31, 30)
-#define MT_MCU_MSG_TYPE_CMD BIT(30)
-
enum mt76x2_qsel {
MT_QSEL_MGMT,
MT_QSEL_HCCA,
@@ -54,14 +26,4 @@ enum mt76x2_qsel {
MT_QSEL_EDCA_2,
};
-enum dma_msg_port {
- WLAN_PORT,
- CPU_RX_PORT,
- CPU_TX_PORT,
- HOST_PORT,
- VIRTUAL_CPU_RX_PORT,
- VIRTUAL_CPU_TX_PORT,
- DISCARD,
-};
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
index 95d5f7d888f0..1753bcb36356 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
@@ -40,8 +40,7 @@ mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
return 0;
}
-static void
-mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
+void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
{
u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
@@ -58,6 +57,7 @@ mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
break;
}
}
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_parse_hw_cap);
static int
mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
@@ -415,6 +415,7 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
}
+EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
static s8
mt76x2_rate_power_val(u8 val)
@@ -482,6 +483,7 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
val >>= 8;
t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
}
+EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
{
@@ -493,6 +495,7 @@ int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
return ret;
}
+EXPORT_SYMBOL_GPL(mt76x2_get_max_rate_power);
static void
mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
@@ -600,6 +603,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
t->delta_bw40 = mt76x2_rate_power_val(bw40);
t->delta_bw80 = mt76x2_rate_power_val(bw80);
}
+EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
{
@@ -632,6 +636,7 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
return 0;
}
+EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp);
bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
{
@@ -642,6 +647,7 @@ bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
else
return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
}
+EXPORT_SYMBOL_GPL(mt76x2_ext_pa_enabled);
int mt76x2_eeprom_init(struct mt76x2_dev *dev)
{
@@ -658,3 +664,6 @@ int mt76x2_eeprom_init(struct mt76x2_dev *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
index aa0b0c040375..0f3e4d2f4fee 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
@@ -155,6 +155,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
+void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev);
static inline bool
mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
index 79ab93613e06..b814391f79ac 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
@@ -19,39 +19,6 @@
#include "mt76x2_eeprom.h"
#include "mt76x2_mcu.h"
-struct mt76x2_reg_pair {
- u32 reg;
- u32 value;
-};
-
-static bool
-mt76x2_wait_for_mac(struct mt76x2_dev *dev)
-{
- int i;
-
- for (i = 0; i < 500; i++) {
- switch (mt76_rr(dev, MT_MAC_CSR0)) {
- case 0:
- case ~0:
- break;
- default:
- return true;
- }
- usleep_range(5000, 10000);
- }
-
- return false;
-}
-
-static bool
-wait_for_wpdma(struct mt76x2_dev *dev)
-{
- return mt76_poll(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
- MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
- 0, 1000);
-}
-
static void
mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
{
@@ -71,107 +38,6 @@ mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
}
static void
-mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
- const struct mt76x2_reg_pair *data, int len)
-{
- while (len > 0) {
- mt76_wr(dev, data->reg, data->value);
- len--;
- data++;
- }
-}
-
-static void
-mt76_write_mac_initvals(struct mt76x2_dev *dev)
-{
-#define DEFAULT_PROT_CFG \
- (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
- FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
- FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
- MT_PROT_CFG_RTS_THRESH)
-
-#define DEFAULT_PROT_CFG_20 \
- (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
- FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
- FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
- FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
-
-#define DEFAULT_PROT_CFG_40 \
- (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
- FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
- FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
- FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
-
- static const struct mt76x2_reg_pair vals[] = {
- /* Copied from MediaTek reference source */
- { MT_PBF_SYS_CTRL, 0x00080c00 },
- { MT_PBF_CFG, 0x1efebcff },
- { MT_FCE_PSE_CTRL, 0x00000001 },
- { MT_MAC_SYS_CTRL, 0x0000000c },
- { MT_MAX_LEN_CFG, 0x003e3f00 },
- { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
- { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
- { MT_XIFS_TIME_CFG, 0x33a40d0a },
- { MT_BKOFF_SLOT_CFG, 0x00000209 },
- { MT_TBTT_SYNC_CFG, 0x00422010 },
- { MT_PWR_PIN_CFG, 0x00000000 },
- { 0x1238, 0x001700c8 },
- { MT_TX_SW_CFG0, 0x00101001 },
- { MT_TX_SW_CFG1, 0x00010000 },
- { MT_TX_SW_CFG2, 0x00000000 },
- { MT_TXOP_CTRL_CFG, 0x0400583f },
- { MT_TX_RTS_CFG, 0x00100020 },
- { MT_TX_TIMEOUT_CFG, 0x000a2290 },
- { MT_TX_RETRY_CFG, 0x47f01f0f },
- { MT_EXP_ACK_TIME, 0x002c00dc },
- { MT_TX_PROT_CFG6, 0xe3f42004 },
- { MT_TX_PROT_CFG7, 0xe3f42084 },
- { MT_TX_PROT_CFG8, 0xe3f42104 },
- { MT_PIFS_TX_CFG, 0x00060fff },
- { MT_RX_FILTR_CFG, 0x00015f97 },
- { MT_LEGACY_BASIC_RATE, 0x0000017f },
- { MT_HT_BASIC_RATE, 0x00004003 },
- { MT_PN_PAD_MODE, 0x00000003 },
- { MT_TXOP_HLDR_ET, 0x00000002 },
- { 0xa44, 0x00000000 },
- { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
- { MT_TSO_CTRL, 0x00000000 },
- { MT_AUX_CLK_CFG, 0x00000000 },
- { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
- { MT_TX_ALC_CFG_4, 0x00000000 },
- { MT_TX_ALC_VGA3, 0x00000000 },
- { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_8, 0x0000003a },
- { MT_TX_PWR_CFG_9, 0x0000003a },
- { MT_EFUSE_CTRL, 0x0000d000 },
- { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
- { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
- { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
- { MT_TX_SW_CFG3, 0x00000004 },
- { MT_HT_FBK_TO_LEGACY, 0x00001818 },
- { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
- { MT_PROT_AUTO_TX_CFG, 0x00830083 },
- { MT_HT_CTRL_CFG, 0x000001ff },
- };
- struct mt76x2_reg_pair prot_vals[] = {
- { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG },
- { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG },
- { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
- { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
- { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
- { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
- };
-
- mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
- mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
-}
-
-static void
mt76x2_fixup_xtal(struct mt76x2_dev *dev)
{
u16 eep_val;
@@ -360,41 +226,6 @@ int mt76x2_mac_start(struct mt76x2_dev *dev)
return 0;
}
-void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
-{
- bool stopped = false;
- u32 rts_cfg;
- int i;
-
- mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
-
- rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
- mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
-
- /* Wait for MAC to become idle */
- for (i = 0; i < 300; i++) {
- if ((mt76_rr(dev, MT_MAC_STATUS) &
- (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
- mt76_rr(dev, MT_BBP(IBI, 12))) {
- udelay(1);
- continue;
- }
-
- stopped = true;
- break;
- }
-
- if (force && !stopped) {
- mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
- mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
-
- mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
- mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
- }
-
- mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
-}
-
void mt76x2_mac_resume(struct mt76x2_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL,
@@ -498,45 +329,6 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
MT_TX_TIMEOUT_CFG_ACKTO, ackto);
}
-static void
-mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
-{
- u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
-
- if (enable)
- val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
- MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
- else
- val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
- MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
-
- mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
- udelay(20);
-}
-
-static void
-mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
-{
- u32 val;
-
- val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
-
- val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
-
- if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
- val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
- mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
- udelay(20);
-
- val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
- }
-
- mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
- udelay(20);
-
- mt76x2_set_wlan_state(dev, enable);
-}
-
int mt76x2_init_hardware(struct mt76x2_dev *dev)
{
static const u16 beacon_offsets[16] = {
@@ -567,11 +359,6 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev)
tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
(unsigned long) dev);
- dev->chainmask = 0x202;
- dev->global_wcid.idx = 255;
- dev->global_wcid.hw_key_idx = -1;
- dev->slottime = 9;
-
val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
MT_WPDMA_GLO_CFG_BIG_ENDIAN |
@@ -663,34 +450,6 @@ static void mt76x2_regd_notifier(struct wiphy *wiphy,
mt76x2_dfs_set_domain(dev, request->dfs_region);
}
-#define CCK_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
- .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
- .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
-}
-
-#define OFDM_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
- .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
-}
-
-static struct ieee80211_rate mt76x2_rates[] = {
- CCK_RATE(0, 10),
- CCK_RATE(1, 20),
- CCK_RATE(2, 55),
- CCK_RATE(3, 110),
- OFDM_RATE(0, 60),
- OFDM_RATE(1, 90),
- OFDM_RATE(2, 120),
- OFDM_RATE(3, 180),
- OFDM_RATE(4, 240),
- OFDM_RATE(5, 360),
- OFDM_RATE(6, 480),
- OFDM_RATE(7, 540),
-};
-
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = 1,
@@ -767,37 +526,6 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
mt76x2_led_set_config(mt76, 0xff, 0);
}
-static void
-mt76x2_init_txpower(struct mt76x2_dev *dev,
- struct ieee80211_supported_band *sband)
-{
- struct ieee80211_channel *chan;
- struct mt76x2_tx_power_info txp;
- struct mt76_rate_power t = {};
- int target_power;
- int i;
-
- for (i = 0; i < sband->n_channels; i++) {
- chan = &sband->channels[i];
-
- mt76x2_get_power_info(dev, &txp, chan);
-
- target_power = max_t(int, (txp.chain[0].target_power +
- txp.chain[0].delta),
- (txp.chain[1].target_power +
- txp.chain[1].delta));
-
- mt76x2_get_rate_power(dev, &t, chan);
-
- chan->max_power = mt76x2_get_max_rate_power(&t) +
- target_power;
- chan->max_power /= 2;
-
- /* convert to combined output power on 2x2 devices */
- chan->max_power += 3;
- }
-}
-
int mt76x2_register_device(struct mt76x2_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@@ -812,20 +540,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
return -ENOMEM;
kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+ INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+
+ mt76x2_init_device(dev);
ret = mt76x2_init_hardware(dev);
if (ret)
return ret;
- hw->queues = 4;
- hw->max_rates = 1;
- hw->max_report_rates = 7;
- hw->max_rate_tries = 1;
- hw->extra_tx_headroom = 2;
-
- hw->sta_data_size = sizeof(struct mt76x2_sta);
- hw->vif_data_size = sizeof(struct mt76x2_vif);
-
for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
u8 *addr = dev->macaddr_list[i].addr;
@@ -845,16 +568,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
wiphy->reg_notifier = mt76x2_regd_notifier;
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
-
- ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
-
- INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
- INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
- dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
mt76x2_dfs_init_detector(dev);
@@ -862,9 +584,6 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
- /* init antenna configuration */
- dev->mt76.antenna_mask = 3;
-
ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
ARRAY_SIZE(mt76x2_rates));
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
new file mode 100644
index 000000000000..324b2a4b8b67
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+struct ieee80211_rate mt76x2_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+EXPORT_SYMBOL_GPL(mt76x2_rates);
+
+struct mt76x2_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+static void
+mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
+{
+ u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+}
+
+void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ }
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt76x2_set_wlan_state(dev, enable);
+}
+EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
+
+static void
+mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
+ const struct mt76x2_reg_pair *data, int len)
+{
+ while (len > 0) {
+ mt76_wr(dev, data->reg, data->value);
+ len--;
+ data++;
+ }
+}
+
+void mt76_write_mac_initvals(struct mt76x2_dev *dev)
+{
+#define DEFAULT_PROT_CFG_CCK \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
+ MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_OFDM \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
+ MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+ static const struct mt76x2_reg_pair vals[] = {
+ /* Copied from MediaTek reference source */
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x1efebcff },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_MAC_SYS_CTRL, 0x0000000c },
+ { MT_MAX_LEN_CFG, 0x003e3f00 },
+ { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
+ { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
+ { MT_XIFS_TIME_CFG, 0x33a40d0a },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TBTT_SYNC_CFG, 0x00422010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+ { 0x1238, 0x001700c8 },
+ { MT_TX_SW_CFG0, 0x00101001 },
+ { MT_TX_SW_CFG1, 0x00010000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { MT_TXOP_CTRL_CFG, 0x0400583f },
+ { MT_TX_RTS_CFG, 0x00100020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2290 },
+ { MT_TX_RETRY_CFG, 0x47f01f0f },
+ { MT_EXP_ACK_TIME, 0x002c00dc },
+ { MT_TX_PROT_CFG6, 0xe3f42004 },
+ { MT_TX_PROT_CFG7, 0xe3f42084 },
+ { MT_TX_PROT_CFG8, 0xe3f42104 },
+ { MT_PIFS_TX_CFG, 0x00060fff },
+ { MT_RX_FILTR_CFG, 0x00015f97 },
+ { MT_LEGACY_BASIC_RATE, 0x0000017f },
+ { MT_HT_BASIC_RATE, 0x00004003 },
+ { MT_PN_PAD_MODE, 0x00000003 },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { 0xa44, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_TSO_CTRL, 0x00000000 },
+ { MT_AUX_CLK_CFG, 0x00000000 },
+ { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
+ { MT_TX_ALC_CFG_4, 0x00000000 },
+ { MT_TX_ALC_VGA3, 0x00000000 },
+ { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_8, 0x0000003a },
+ { MT_TX_PWR_CFG_9, 0x0000003a },
+ { MT_EFUSE_CTRL, 0x0000d000 },
+ { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
+ { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
+ { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
+ { MT_TX_SW_CFG3, 0x00000004 },
+ { MT_HT_FBK_TO_LEGACY, 0x00001818 },
+ { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
+ { MT_PROT_AUTO_TX_CFG, 0x00830083 },
+ { MT_HT_CTRL_CFG, 0x000001ff },
+ };
+ struct mt76x2_reg_pair prot_vals[] = {
+ { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK },
+ { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG_OFDM },
+ { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ };
+
+ mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
+ mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
+}
+EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
+
+void mt76x2_init_device(struct mt76x2_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ hw->queues = 4;
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+ hw->extra_tx_headroom = 2;
+
+ hw->sta_data_size = sizeof(struct mt76x2_sta);
+ hw->vif_data_size = sizeof(struct mt76x2_vif);
+
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ dev->chainmask = 0x202;
+ dev->global_wcid.idx = 255;
+ dev->global_wcid.hw_key_idx = -1;
+ dev->slottime = 9;
+
+ /* init antenna configuration */
+ dev->mt76.antenna_mask = 3;
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_device);
+
+void mt76x2_init_txpower(struct mt76x2_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_channel *chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76_rate_power t = {};
+ int target_power;
+ int i;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ target_power = max_t(int, (txp.chain[0].target_power +
+ txp.chain[0].delta),
+ (txp.chain[1].target_power +
+ txp.chain[1].delta));
+
+ mt76x2_get_rate_power(dev, &t, chan);
+
+ chan->max_power = mt76x2_get_max_rate_power(&t) +
+ target_power;
+ chan->max_power /= 2;
+
+ /* convert to combined output power on 2x2 devices */
+ chan->max_power += 3;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
index b49aea4da2d6..23cf437d14f9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
@@ -28,500 +28,12 @@ void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
get_unaligned_le16(addr + 4));
}
-static int
-mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
-{
- u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
- switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
- case MT_PHY_TYPE_OFDM:
- if (idx >= 8)
- idx = 0;
-
- if (status->band == NL80211_BAND_2GHZ)
- idx += 4;
-
- status->rate_idx = idx;
- return 0;
- case MT_PHY_TYPE_CCK:
- if (idx >= 8) {
- idx -= 8;
- status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
- }
-
- if (idx >= 4)
- idx = 0;
-
- status->rate_idx = idx;
- return 0;
- case MT_PHY_TYPE_HT_GF:
- status->enc_flags |= RX_ENC_FLAG_HT_GF;
- /* fall through */
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- status->rate_idx = idx;
- break;
- case MT_PHY_TYPE_VHT:
- status->encoding = RX_ENC_VHT;
- status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
- status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
- break;
- default:
- return -EINVAL;
- }
-
- if (rate & MT_RXWI_RATE_LDPC)
- status->enc_flags |= RX_ENC_FLAG_LDPC;
-
- if (rate & MT_RXWI_RATE_SGI)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
-
- if (rate & MT_RXWI_RATE_STBC)
- status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
-
- switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
- case MT_PHY_BW_20:
- break;
- case MT_PHY_BW_40:
- status->bw = RATE_INFO_BW_40;
- break;
- case MT_PHY_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static __le16
-mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
- const struct ieee80211_tx_rate *rate, u8 *nss_val)
-{
- u16 rateval;
- u8 phy, rate_idx;
- u8 nss = 1;
- u8 bw = 0;
-
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- rate_idx = rate->idx;
- nss = 1 + (rate->idx >> 4);
- phy = MT_PHY_TYPE_VHT;
- if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- bw = 2;
- else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- bw = 1;
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- rate_idx = rate->idx;
- nss = 1 + (rate->idx >> 3);
- phy = MT_PHY_TYPE_HT;
- if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
- phy = MT_PHY_TYPE_HT_GF;
- if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- bw = 1;
- } else {
- const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
- u16 val;
-
- r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
- if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- val = r->hw_value_short;
- else
- val = r->hw_value;
-
- phy = val >> 8;
- rate_idx = val & 0xff;
- bw = 0;
- }
-
- rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
- rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
- rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- rateval |= MT_RXWI_RATE_SGI;
-
- *nss_val = nss;
- return cpu_to_le16(rateval);
-}
-
-void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
-{
- u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
- u32 bit = MT_WCID_DROP_MASK(idx);
-
- /* prevent unnecessary writes */
- if ((val & bit) != (bit * drop))
- mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
-}
-
-void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
- const struct ieee80211_tx_rate *rate)
-{
- spin_lock_bh(&dev->mt76.lock);
- wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
- wcid->tx_rate_set = true;
- spin_unlock_bh(&dev->mt76.lock);
-}
-
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rate = &info->control.rates[0];
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
- u16 txwi_flags = 0;
- u8 nss;
- s8 txpwr_adj, max_txpwr_adj;
- u8 ccmp_pn[8];
-
- memset(txwi, 0, sizeof(*txwi));
-
- if (wcid)
- txwi->wcid = wcid->idx;
- else
- txwi->wcid = 0xff;
-
- txwi->pktid = 1;
-
- if (wcid && wcid->sw_iv && key) {
- u64 pn = atomic64_inc_return(&key->tx_pn);
- ccmp_pn[0] = pn;
- ccmp_pn[1] = pn >> 8;
- ccmp_pn[2] = 0;
- ccmp_pn[3] = 0x20 | (key->keyidx << 6);
- ccmp_pn[4] = pn >> 16;
- ccmp_pn[5] = pn >> 24;
- ccmp_pn[6] = pn >> 32;
- ccmp_pn[7] = pn >> 40;
- txwi->iv = *((__le32 *)&ccmp_pn[0]);
- txwi->eiv = *((__le32 *)&ccmp_pn[1]);
- }
-
- spin_lock_bh(&dev->mt76.lock);
- if (wcid && (rate->idx < 0 || !rate->count)) {
- txwi->rate = wcid->tx_rate;
- max_txpwr_adj = wcid->max_txpwr_adj;
- nss = wcid->tx_rate_nss;
- } else {
- txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
- max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
- }
- spin_unlock_bh(&dev->mt76.lock);
-
- txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
- max_txpwr_adj);
- txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
-
- if (mt76xx_rev(dev) >= MT76XX_REV_E4)
- txwi->txstream = 0x13;
- else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
- !(txwi->rate & cpu_to_le16(rate_ht_mask)))
- txwi->txstream = 0x93;
-
- if (info->flags & IEEE80211_TX_CTL_LDPC)
- txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
- if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
- txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
- if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
- txwi_flags |= MT_TXWI_FLAGS_MMPS;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- txwi->pktid |= MT_TXWI_PKTID_PROBE;
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
- u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
-
- ba_size <<= sta->ht_cap.ampdu_factor;
- ba_size = min_t(int, 63, ba_size - 1);
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- ba_size = 0;
- txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
-
- txwi_flags |= MT_TXWI_FLAGS_AMPDU |
- FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
- sta->ht_cap.ampdu_density);
- }
-
- if (ieee80211_is_probe_resp(hdr->frame_control) ||
- ieee80211_is_beacon(hdr->frame_control))
- txwi_flags |= MT_TXWI_FLAGS_TS;
-
- txwi->flags |= cpu_to_le16(txwi_flags);
- txwi->len_ctl = cpu_to_le16(skb->len);
-}
-
-static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
-{
- int hdrlen;
-
- if (!len)
- return;
-
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- memmove(skb->data + len, skb->data, hdrlen);
- skb_pull(skb, len);
-}
-
-static struct mt76_wcid *
-mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, u8 idx, bool unicast)
-{
- struct mt76x2_sta *sta;
- struct mt76_wcid *wcid;
-
- if (idx >= ARRAY_SIZE(dev->wcid))
- return NULL;
-
- wcid = rcu_dereference(dev->wcid[idx]);
- if (unicast || !wcid)
- return wcid;
-
- sta = container_of(wcid, struct mt76x2_sta, wcid);
- return &sta->vif->group_wcid;
-}
-
-int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
- void *rxi)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
- struct mt76x2_rxwi *rxwi = rxi;
- u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
- u32 ctl = le32_to_cpu(rxwi->ctl);
- u16 rate = le16_to_cpu(rxwi->rate);
- u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
- bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
- int pad_len = 0;
- u8 pn_len;
- u8 wcid;
- int len;
-
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
- return -EINVAL;
-
- if (rxinfo & MT_RXINFO_L2PAD)
- pad_len += 2;
-
- if (rxinfo & MT_RXINFO_DECRYPT) {
- status->flag |= RX_FLAG_DECRYPTED;
- status->flag |= RX_FLAG_MMIC_STRIPPED;
- status->flag |= RX_FLAG_MIC_STRIPPED;
- status->flag |= RX_FLAG_IV_STRIPPED;
- }
-
- wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
- status->wcid = mt76x2_rx_get_sta_wcid(dev, wcid, unicast);
-
- len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
- pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
- if (pn_len) {
- int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
- u8 *data = skb->data + offset;
-
- status->iv[0] = data[7];
- status->iv[1] = data[6];
- status->iv[2] = data[5];
- status->iv[3] = data[4];
- status->iv[4] = data[1];
- status->iv[5] = data[0];
-
- /*
- * Driver CCMP validation can't deal with fragments.
- * Let mac80211 take care of it.
- */
- if (rxinfo & MT_RXINFO_FRAG) {
- status->flag &= ~RX_FLAG_IV_STRIPPED;
- } else {
- pad_len += pn_len << 2;
- len -= pn_len << 2;
- }
- }
-
- mt76x2_remove_hdr_pad(skb, pad_len);
-
- if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
- status->aggr = true;
-
- if (WARN_ON_ONCE(len > skb->len))
- return -EINVAL;
-
- pskb_trim(skb, len);
- status->chains = BIT(0) | BIT(1);
- status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
- status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
- status->signal = max(status->chain_signal[0], status->chain_signal[1]);
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
-
- status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
- status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
-
- return mt76x2_mac_process_rate(status, rate);
-}
-
-static int
-mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
- enum nl80211_band band)
-{
- u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
- txrate->idx = 0;
- txrate->flags = 0;
- txrate->count = 1;
-
- switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
- case MT_PHY_TYPE_OFDM:
- if (band == NL80211_BAND_2GHZ)
- idx += 4;
-
- txrate->idx = idx;
- return 0;
- case MT_PHY_TYPE_CCK:
- if (idx >= 8)
- idx -= 8;
-
- txrate->idx = idx;
- return 0;
- case MT_PHY_TYPE_HT_GF:
- txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* fall through */
- case MT_PHY_TYPE_HT:
- txrate->flags |= IEEE80211_TX_RC_MCS;
- txrate->idx = idx;
- break;
- case MT_PHY_TYPE_VHT:
- txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
- txrate->idx = idx;
- break;
- default:
- return -EINVAL;
- }
-
- switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
- case MT_PHY_BW_20:
- break;
- case MT_PHY_BW_40:
- txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- break;
- case MT_PHY_BW_80:
- txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
- break;
- default:
- return -EINVAL;
- }
-
- if (rate & MT_RXWI_RATE_SGI)
- txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
-
- return 0;
-}
-
-static void
-mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
- struct ieee80211_tx_info *info,
- struct mt76x2_tx_status *st, int n_frames)
-{
- struct ieee80211_tx_rate *rate = info->status.rates;
- int cur_idx, last_rate;
- int i;
-
- if (!n_frames)
- return;
-
- last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
- mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
- dev->mt76.chandef.chan->band);
- if (last_rate < IEEE80211_TX_MAX_RATES - 1)
- rate[last_rate + 1].idx = -1;
-
- cur_idx = rate[last_rate].idx + st->retry;
- for (i = 0; i <= last_rate; i++) {
- rate[i].flags = rate[last_rate].flags;
- rate[i].idx = max_t(int, 0, cur_idx - i);
- rate[i].count = 1;
- }
-
- if (last_rate > 0)
- rate[last_rate - 1].count = st->retry + 1 - last_rate;
-
- info->status.ampdu_len = n_frames;
- info->status.ampdu_ack_len = st->success ? n_frames : 0;
-
- if (st->pktid & MT_TXWI_PKTID_PROBE)
- info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
-
- if (st->aggr)
- info->flags |= IEEE80211_TX_CTL_AMPDU |
- IEEE80211_TX_STAT_AMPDU;
-
- if (!st->ack_req)
- info->flags |= IEEE80211_TX_CTL_NO_ACK;
- else if (st->success)
- info->flags |= IEEE80211_TX_STAT_ACK;
-}
-
-static void
-mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
- u8 *update)
-{
- struct ieee80211_tx_info info = {};
- struct ieee80211_sta *sta = NULL;
- struct mt76_wcid *wcid = NULL;
- struct mt76x2_sta *msta = NULL;
-
- rcu_read_lock();
- if (stat->wcid < ARRAY_SIZE(dev->wcid))
- wcid = rcu_dereference(dev->wcid[stat->wcid]);
-
- if (wcid) {
- void *priv;
-
- priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
- sta = container_of(priv, struct ieee80211_sta,
- drv_priv);
- }
-
- if (msta && stat->aggr) {
- u32 stat_val, stat_cache;
-
- stat_val = stat->rate;
- stat_val |= ((u32) stat->retry) << 16;
- stat_cache = msta->status.rate;
- stat_cache |= ((u32) msta->status.retry) << 16;
-
- if (*update == 0 && stat_val == stat_cache &&
- stat->wcid == msta->status.wcid && msta->n_frames < 32) {
- msta->n_frames++;
- goto out;
- }
-
- mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
- msta->n_frames);
-
- msta->status = *stat;
- msta->n_frames = 1;
- *update = 0;
- } else {
- mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
- *update = 1;
- }
-
- ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
-
-out:
- rcu_read_unlock();
-}
-
void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
{
struct mt76x2_tx_status stat = {};
unsigned long flags;
u8 update = 1;
+ bool ret;
if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
return;
@@ -529,26 +41,13 @@ void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
trace_mac_txstat_poll(dev);
while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
- u32 stat1, stat2;
-
spin_lock_irqsave(&dev->irq_lock, flags);
- stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
- stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
- if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
- spin_unlock_irqrestore(&dev->irq_lock, flags);
- break;
- }
-
+ ret = mt76x2_mac_load_tx_status(dev, &stat);
spin_unlock_irqrestore(&dev->irq_lock, flags);
- stat.valid = 1;
- stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
- stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
- stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
- stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
- stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
- stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
- stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+ if (!ret)
+ break;
+
trace_mac_txstat_fetch(dev, &stat);
if (!irq) {
@@ -597,104 +96,6 @@ void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
dev_kfree_skb_any(e->skb);
}
-static enum mt76x2_cipher_type
-mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
-{
- memset(key_data, 0, 32);
- if (!key)
- return MT_CIPHER_NONE;
-
- if (key->keylen > 32)
- return MT_CIPHER_NONE;
-
- memcpy(key_data, key->key, key->keylen);
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MT_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MT_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MT_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_CCMP:
- return MT_CIPHER_AES_CCMP;
- default:
- return MT_CIPHER_NONE;
- }
-}
-
-void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
-{
- struct mt76_wcid_addr addr = {};
- u32 attr;
-
- attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
- FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
-
- mt76_wr(dev, MT_WCID_ATTR(idx), attr);
-
- mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
- mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
-
- if (idx >= 128)
- return;
-
- if (mac)
- memcpy(addr.macaddr, mac, ETH_ALEN);
-
- mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
-}
-
-int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
- struct ieee80211_key_conf *key)
-{
- enum mt76x2_cipher_type cipher;
- u8 key_data[32];
- u8 iv_data[8];
-
- cipher = mt76x2_mac_get_key_info(key, key_data);
- if (cipher == MT_CIPHER_NONE && key)
- return -EOPNOTSUPP;
-
- mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
- mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
-
- memset(iv_data, 0, sizeof(iv_data));
- if (key) {
- mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
- !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
- iv_data[3] = key->keyidx << 6;
- if (cipher >= MT_CIPHER_TKIP)
- iv_data[3] |= 0x20;
- }
-
- mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
-
- return 0;
-}
-
-int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
- struct ieee80211_key_conf *key)
-{
- enum mt76x2_cipher_type cipher;
- u8 key_data[32];
- u32 val;
-
- cipher = mt76x2_mac_get_key_info(key, key_data);
- if (cipher == MT_CIPHER_NONE && key)
- return -EOPNOTSUPP;
-
- val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
- val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
- val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
- mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
-
- mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
- sizeof(key_data));
-
- return 0;
-}
-
static int
mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
{
@@ -704,7 +105,7 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
return -ENOSPC;
- mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL);
+ mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
offset += sizeof(txwi);
@@ -839,3 +240,33 @@ void mt76x2_mac_work(struct work_struct *work)
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
}
+
+void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val)
+{
+ u32 data = 0;
+
+ if (val != ~0)
+ data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
+ MT_PROT_CFG_RTS_THRESH;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
+
+ mt76_rmw(dev, MT_CCK_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_OFDM_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_MM20_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_MM40_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_GF20_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_GF40_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG6,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG7,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG8,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
index c048cd06df6b..5af0107ba748 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
@@ -166,7 +166,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
void *rxi);
void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta);
+ struct ieee80211_sta *sta, int len);
void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
new file mode 100644
index 000000000000..6542644bc325
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
@@ -0,0 +1,699 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
+{
+ bool stopped = false;
+ u32 rts_cfg;
+ int i;
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 300; i++) {
+ if ((mt76_rr(dev, MT_MAC_STATUS) &
+ (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
+ mt76_rr(dev, MT_BBP(IBI, 12))) {
+ udelay(1);
+ continue;
+ }
+
+ stopped = true;
+ break;
+ }
+
+ if (force && !stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
+
+bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat)
+{
+ u32 stat1, stat2;
+
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+ if (!stat->valid)
+ return false;
+
+ stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+ stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+ stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+ stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+ stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+ stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+ stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_load_tx_status);
+
+static int
+mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+ enum nl80211_band band)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case MT_PHY_BW_80:
+ txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ return 0;
+}
+
+static void
+mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
+ struct ieee80211_tx_info *info,
+ struct mt76x2_tx_status *st, int n_frames)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ if (!n_frames)
+ return;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
+ dev->mt76.chandef.chan->band);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + last_rate;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+ rate[last_rate].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = n_frames;
+ info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+ if (st->pktid & MT_TXWI_PKTID_PROBE)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+void mt76x2_send_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat, u8 *update)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ struct mt76x2_sta *msta = NULL;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ void *priv;
+
+ priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta,
+ drv_priv);
+ }
+
+ if (msta && stat->aggr) {
+ u32 stat_val, stat_cache;
+
+ stat_val = stat->rate;
+ stat_val |= ((u32) stat->retry) << 16;
+ stat_cache = msta->status.rate;
+ stat_cache |= ((u32) msta->status.retry) << 16;
+
+ if (*update == 0 && stat_val == stat_cache &&
+ stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+ msta->n_frames++;
+ goto out;
+ }
+
+ mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
+ msta->n_frames);
+
+ msta->status = *stat;
+ msta->n_frames = 1;
+ *update = 0;
+ } else {
+ mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
+ *update = 1;
+ }
+
+ ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(mt76x2_send_tx_status);
+
+static enum mt76x2_cipher_type
+mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x2_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76x2_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+ sizeof(key_data));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_shared_key_setup);
+
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x2_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+
+ cipher = mt76x2_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+ !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP)
+ iv_data[3] |= 0x20;
+ }
+
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_key);
+
+static __le16
+mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 4);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return cpu_to_le16(rateval);
+}
+
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ spin_lock_bh(&dev->mt76.lock);
+ wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_bh(&dev->mt76.lock);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_rate);
+
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+ u16 txwi_flags = 0;
+ u8 nss;
+ s8 txpwr_adj, max_txpwr_adj;
+ u8 ccmp_pn[8];
+
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (wcid)
+ txwi->wcid = wcid->idx;
+ else
+ txwi->wcid = 0xff;
+
+ txwi->pktid = 1;
+
+ if (wcid && wcid->sw_iv && key) {
+ u64 pn = atomic64_inc_return(&key->tx_pn);
+ ccmp_pn[0] = pn;
+ ccmp_pn[1] = pn >> 8;
+ ccmp_pn[2] = 0;
+ ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+ ccmp_pn[4] = pn >> 16;
+ ccmp_pn[5] = pn >> 24;
+ ccmp_pn[6] = pn >> 32;
+ ccmp_pn[7] = pn >> 40;
+ txwi->iv = *((__le32 *)&ccmp_pn[0]);
+ txwi->eiv = *((__le32 *)&ccmp_pn[1]);
+ }
+
+ spin_lock_bh(&dev->mt76.lock);
+ if (wcid && (rate->idx < 0 || !rate->count)) {
+ txwi->rate = wcid->tx_rate;
+ max_txpwr_adj = wcid->max_txpwr_adj;
+ nss = wcid->tx_rate_nss;
+ } else {
+ txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
+ max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
+ }
+ spin_unlock_bh(&dev->mt76.lock);
+
+ txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
+ max_txpwr_adj);
+ txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E4)
+ txwi->txstream = 0x13;
+ else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
+ !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+ txwi->txstream = 0x93;
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ txwi->pktid |= MT_TXWI_PKTID_PROBE;
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 63, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ ba_size = 0;
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density);
+ }
+
+ if (ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control))
+ txwi_flags |= MT_TXWI_FLAGS_TS;
+
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(len);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi);
+
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
+{
+ u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+ u32 bit = MT_WCID_DROP_MASK(idx);
+
+ /* prevent unnecessary writes */
+ if ((val & bit) != (bit * drop))
+ mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_drop);
+
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ struct mt76_wcid_addr addr = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+
+ if (idx >= 128)
+ return;
+
+ if (mac)
+ memcpy(addr.macaddr, mac, ETH_ALEN);
+
+ mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_setup);
+
+static int
+mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (idx >= 8)
+ idx = 0;
+
+ if (status->band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ }
+
+ if (idx >= 4)
+ idx = 0;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ status->rate_idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->encoding = RX_ENC_VHT;
+ status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+ status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_LDPC)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
+{
+ int hdrlen;
+
+ if (!len)
+ return;
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + len, skb->data, hdrlen);
+ skb_pull(skb, len);
+}
+
+int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
+{
+ struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
+
+ rssi += cal->rssi_offset[chain];
+ rssi -= cal->lna_gain;
+
+ return rssi;
+}
+
+static struct mt76x2_sta *
+mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
+{
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->wcid[idx]);
+ if (!wcid)
+ return NULL;
+
+ return container_of(wcid, struct mt76x2_sta, wcid);
+}
+
+static struct mt76_wcid *
+mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta,
+ bool unicast)
+{
+ if (!sta)
+ return NULL;
+
+ if (unicast)
+ return &sta->wcid;
+ else
+ return &sta->vif->group_wcid;
+}
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+ void *rxi)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct mt76x2_rxwi *rxwi = rxi;
+ struct mt76x2_sta *sta;
+ u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
+ u32 ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
+ bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
+ int pad_len = 0;
+ u8 pn_len;
+ u8 wcid;
+ int len;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ return -EINVAL;
+
+ if (rxinfo & MT_RXINFO_L2PAD)
+ pad_len += 2;
+
+ if (rxinfo & MT_RXINFO_DECRYPT) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+ status->flag |= RX_FLAG_MIC_STRIPPED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
+
+ wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
+ sta = mt76x2_rx_get_sta(dev, wcid);
+ status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
+ if (pn_len) {
+ int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
+ u8 *data = skb->data + offset;
+
+ status->iv[0] = data[7];
+ status->iv[1] = data[6];
+ status->iv[2] = data[5];
+ status->iv[3] = data[4];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ /*
+ * Driver CCMP validation can't deal with fragments.
+ * Let mac80211 take care of it.
+ */
+ if (rxinfo & MT_RXINFO_FRAG) {
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+ } else {
+ pad_len += pn_len << 2;
+ len -= pn_len << 2;
+ }
+ }
+
+ mt76x2_remove_hdr_pad(skb, pad_len);
+
+ if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
+ status->aggr = true;
+
+ if (WARN_ON_ONCE(len > skb->len))
+ return -EINVAL;
+
+ pskb_trim(skb, len);
+ status->chains = BIT(0) | BIT(1);
+ status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0);
+ status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1);
+ status->signal = max(status->chain_signal[0], status->chain_signal[1]);
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+
+ status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+ status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
+
+ if (sta) {
+ ewma_signal_add(&sta->rssi, status->signal);
+ sta->inactive_count = 0;
+ }
+
+ return mt76x2_mac_process_rate(status, rate);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
index ce90ff999b49..680a89f8aa87 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
@@ -53,30 +53,6 @@ mt76x2_stop(struct ieee80211_hw *hw)
mutex_unlock(&dev->mutex);
}
-static void
-mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
-{
- struct mt76_txq *mtxq;
-
- if (!txq)
- return;
-
- mtxq = (struct mt76_txq *) txq->drv_priv;
- if (txq->sta) {
- struct mt76x2_sta *sta;
-
- sta = (struct mt76x2_sta *) txq->sta->drv_priv;
- mtxq->wcid = &sta->wcid;
- } else {
- struct mt76x2_vif *mvif;
-
- mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
- mtxq->wcid = &mvif->group_wcid;
- }
-
- mt76_txq_init(&dev->mt76, txq);
-}
-
static int
mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@@ -111,14 +87,6 @@ mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return 0;
}
-static void
-mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct mt76x2_dev *dev = hw->priv;
-
- mt76_txq_remove(&dev->mt76, vif->txq);
-}
-
static int
mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
{
@@ -194,39 +162,6 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
}
static void
-mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast)
-{
- struct mt76x2_dev *dev = hw->priv;
- u32 flags = 0;
-
-#define MT76_FILTER(_flag, _hw) do { \
- flags |= *total_flags & FIF_##_flag; \
- dev->rxfilter &= ~(_hw); \
- dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
- } while (0)
-
- mutex_lock(&dev->mutex);
-
- dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
-
- MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
- MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
- MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
- MT_RX_FILTR_CFG_CTS |
- MT_RX_FILTR_CFG_CFEND |
- MT_RX_FILTR_CFG_CFACK |
- MT_RX_FILTR_CFG_BA |
- MT_RX_FILTR_CFG_CTRL_RSV);
- MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
-
- *total_flags = flags;
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
- mutex_unlock(&dev->mutex);
-}
-
-static void
mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
@@ -238,10 +173,13 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & BSS_CHANGED_BSSID)
mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
- if (changed & BSS_CHANGED_BEACON_INT)
+ if (changed & BSS_CHANGED_BEACON_INT) {
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL,
info->beacon_int << 4);
+ dev->beacon_int = info->beacon_int;
+ dev->tbtt_count = 0;
+ }
if (changed & BSS_CHANGED_BEACON_ENABLED) {
tasklet_disable(&dev->pre_tbtt_tasklet);
@@ -260,66 +198,6 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_unlock(&dev->mutex);
}
-static int
-mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- int ret = 0;
- int idx = 0;
- int i;
-
- mutex_lock(&dev->mutex);
-
- idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
- if (idx < 0) {
- ret = -ENOSPC;
- goto out;
- }
-
- msta->vif = mvif;
- msta->wcid.sta = 1;
- msta->wcid.idx = idx;
- msta->wcid.hw_key_idx = -1;
- mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
- mt76x2_mac_wcid_set_drop(dev, idx, false);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76x2_txq_init(dev, sta->txq[i]);
-
- if (vif->type == NL80211_IFTYPE_AP)
- set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
-
- rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
-
-out:
- mutex_unlock(&dev->mutex);
-
- return ret;
-}
-
-static int
-mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- int idx = msta->wcid.idx;
- int i;
-
- mutex_lock(&dev->mutex);
- rcu_assign_pointer(dev->wcid[idx], NULL);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76_txq_remove(&dev->mt76, sta->txq[i]);
- mt76x2_mac_wcid_set_drop(dev, idx, true);
- mt76_wcid_free(dev->wcid_mask, idx);
- mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
- mutex_unlock(&dev->mutex);
-
- return 0;
-}
-
void
mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
{
@@ -331,117 +209,6 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
mt76x2_mac_wcid_set_drop(dev, idx, ps);
}
-static int
-mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- struct mt76x2_sta *msta;
- struct mt76_wcid *wcid;
- int idx = key->keyidx;
- int ret;
-
- /* fall back to sw encryption for unsupported ciphers */
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- case WLAN_CIPHER_SUITE_TKIP:
- case WLAN_CIPHER_SUITE_CCMP:
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- /*
- * The hardware does not support per-STA RX GTK, fall back
- * to software mode for these.
- */
- if ((vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MESH_POINT) &&
- (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
- key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EOPNOTSUPP;
-
- msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
- wcid = msta ? &msta->wcid : &mvif->group_wcid;
-
- if (cmd == SET_KEY) {
- key->hw_key_idx = wcid->idx;
- wcid->hw_key_idx = idx;
- if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
- wcid->sw_iv = true;
- }
- } else {
- if (idx == wcid->hw_key_idx) {
- wcid->hw_key_idx = -1;
- wcid->sw_iv = true;
- }
-
- key = NULL;
- }
- mt76_wcid_key_setup(&dev->mt76, wcid, key);
-
- if (!msta) {
- if (key || wcid->hw_key_idx == idx) {
- ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
- if (ret)
- return ret;
- }
-
- return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
- }
-
- return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
-}
-
-static int
-mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct mt76x2_dev *dev = hw->priv;
- u8 cw_min = 5, cw_max = 10, qid;
- u32 val;
-
- qid = dev->mt76.q_tx[queue].hw_idx;
-
- if (params->cw_min)
- cw_min = fls(params->cw_min);
- if (params->cw_max)
- cw_max = fls(params->cw_max);
-
- val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
- FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
- FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
- FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
- mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
-
- val = mt76_rr(dev, MT_WMM_TXOP(qid));
- val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
- val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
- mt76_wr(dev, MT_WMM_TXOP(qid), val);
-
- val = mt76_rr(dev, MT_WMM_AIFSN);
- val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
- val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
- mt76_wr(dev, MT_WMM_AIFSN, val);
-
- val = mt76_rr(dev, MT_WMM_CWMIN);
- val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
- val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
- mt76_wr(dev, MT_WMM_CWMIN, val);
-
- val = mt76_rr(dev, MT_WMM_CWMAX);
- val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
- val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
- mt76_wr(dev, MT_WMM_CWMAX, val);
-
- return 0;
-}
-
static void
mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
@@ -480,75 +247,6 @@ mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
return 0;
}
-static int
-mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
-{
- enum ieee80211_ampdu_mlme_action action = params->action;
- struct ieee80211_sta *sta = params->sta;
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct ieee80211_txq *txq = sta->txq[params->tid];
- u16 tid = params->tid;
- u16 *ssn = &params->ssn;
- struct mt76_txq *mtxq;
-
- if (!txq)
- return -EINVAL;
-
- mtxq = (struct mt76_txq *)txq->drv_priv;
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
- mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
- break;
- case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
- mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
- BIT(16 + tid));
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- mtxq->aggr = true;
- mtxq->send_bar = false;
- ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- break;
- case IEEE80211_AMPDU_TX_STOP_FLUSH:
- case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- mtxq->aggr = false;
- ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- break;
- case IEEE80211_AMPDU_TX_START:
- mtxq->agg_ssn = *ssn << 4;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- case IEEE80211_AMPDU_TX_STOP_CONT:
- mtxq->aggr = false;
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- }
-
- return 0;
-}
-
-static void
-mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
- struct ieee80211_tx_rate rate = {};
-
- if (!rates)
- return;
-
- rate.idx = rates->rate[0].idx;
- rate.flags = rates->rate[0].flags;
- mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
- msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
-}
-
static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
s16 coverage_class)
{
@@ -600,6 +298,21 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
return 0;
}
+static int
+mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ if (val != ~0 && val > 0xffff)
+ return -EINVAL;
+
+ mutex_lock(&dev->mutex);
+ mt76x2_mac_set_tx_protection(dev, val);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
const struct ieee80211_ops mt76x2_ops = {
.tx = mt76x2_tx,
.start = mt76x2_start,
@@ -626,5 +339,6 @@ const struct ieee80211_ops mt76x2_ops = {
.set_tim = mt76x2_set_tim,
.set_antenna = mt76x2_set_antenna,
.get_antenna = mt76x2_get_antenna,
+ .set_rts_threshold = mt76x2_set_rts_threshold,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
index dfd36d736b06..743da57760dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
@@ -23,23 +23,6 @@
#include "mt76x2_dma.h"
#include "mt76x2_eeprom.h"
-struct mt76x2_fw_header {
- __le32 ilm_len;
- __le32 dlm_len;
- __le16 build_ver;
- __le16 fw_ver;
- u8 pad[4];
- char build_time[16];
-};
-
-struct mt76x2_patch_header {
- char build_time[16];
- char platform[4];
- char hw_version[4];
- char patch_version[4];
- u8 pad[2];
-};
-
static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
{
struct sk_buff *skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
index d7a7e83262ce..e40293f21417 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
@@ -146,6 +146,23 @@ struct mt76x2_tssi_comp {
u8 offset1;
} __packed __aligned(4);
+struct mt76x2_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76x2_patch_header {
+ char build_time[16];
+ char platform[4];
+ char hw_version[4];
+ char patch_version[4];
+ u8 pad[2];
+};
+
int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
u32 param);
int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
index c1c38ca3330a..84c96c0415b6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
@@ -19,172 +19,6 @@
#include "mt76x2_mcu.h"
#include "mt76x2_eeprom.h"
-static void
-mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
-{
- s8 gain;
-
- gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
- gain -= offset / 2;
- mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
-}
-
-static void
-mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
-{
- s8 gain;
-
- gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
- gain += offset;
- mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
-}
-
-static void
-mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
-{
- s8 *gain_adj = dev->cal.rx.high_gain;
-
- mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
- mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
-
- mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
- mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
-}
-
-static u32
-mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
-{
- u32 val = 0;
-
- val |= (v1 & (BIT(6) - 1)) << 0;
- val |= (v2 & (BIT(6) - 1)) << 8;
- val |= (v3 & (BIT(6) - 1)) << 16;
- val |= (v4 & (BIT(6) - 1)) << 24;
- return val;
-}
-
-int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
-{
- struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
-
- rssi += cal->rssi_offset[chain];
- rssi -= cal->lna_gain;
-
- return rssi;
-}
-
-static void
-mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
-{
- int i;
-
- for (i = 0; i < sizeof(r->all); i++)
- r->all[i] += offset;
-}
-
-static void
-mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
-{
- int i;
-
- for (i = 0; i < sizeof(r->all); i++)
- if (r->all[i] > limit)
- r->all[i] = limit;
-}
-
-static int
-mt76x2_get_min_rate_power(struct mt76_rate_power *r)
-{
- int i;
- s8 ret = 0;
-
- for (i = 0; i < sizeof(r->all); i++) {
- if (!r->all[i])
- continue;
-
- if (ret)
- ret = min(ret, r->all[i]);
- else
- ret = r->all[i];
- }
-
- return ret;
-}
-
-
-void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
-{
- enum nl80211_chan_width width = dev->mt76.chandef.width;
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
- struct mt76x2_tx_power_info txp;
- int txp_0, txp_1, delta = 0;
- struct mt76_rate_power t = {};
- int base_power, gain;
-
- mt76x2_get_power_info(dev, &txp, chan);
-
- if (width == NL80211_CHAN_WIDTH_40)
- delta = txp.delta_bw40;
- else if (width == NL80211_CHAN_WIDTH_80)
- delta = txp.delta_bw80;
-
- mt76x2_get_rate_power(dev, &t, chan);
- mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
- mt76x2_limit_rate_power(&t, dev->txpower_conf);
- dev->txpower_cur = mt76x2_get_max_rate_power(&t);
-
- base_power = mt76x2_get_min_rate_power(&t);
- delta += base_power - txp.chain[0].target_power;
- txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
- txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
-
- gain = min(txp_0, txp_1);
- if (gain < 0) {
- base_power -= gain;
- txp_0 -= gain;
- txp_1 -= gain;
- } else if (gain > 0x2f) {
- base_power -= gain - 0x2f;
- txp_0 = 0x2f;
- txp_1 = 0x2f;
- }
-
- mt76x2_add_rate_power_offset(&t, -base_power);
- dev->target_power = txp.chain[0].target_power;
- dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
- dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
- dev->rate_power = t;
-
- mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
- mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
-
- mt76_wr(dev, MT_TX_PWR_CFG_0,
- mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_1,
- mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_2,
- mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
- mt76_wr(dev, MT_TX_PWR_CFG_3,
- mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_4,
- mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
- mt76_wr(dev, MT_TX_PWR_CFG_7,
- mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
- mt76_wr(dev, MT_TX_PWR_CFG_8,
- mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
- mt76_wr(dev, MT_TX_PWR_CFG_9,
- mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
-}
-
-static bool
-mt76x2_channel_silent(struct mt76x2_dev *dev)
-{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-
- return ((chan->flags & IEEE80211_CHAN_RADAR) &&
- chan->dfs_state != NL80211_DFS_AVAILABLE);
-}
-
static bool
mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
{
@@ -243,140 +77,6 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
dev->cal.channel_cal_done = true;
}
-static void
-mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band)
-{
- u32 pa_mode[2];
- u32 pa_mode_adj;
-
- if (band == NL80211_BAND_2GHZ) {
- pa_mode[0] = 0x010055ff;
- pa_mode[1] = 0x00550055;
-
- mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
- mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
- mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
- } else {
- mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
- mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
- }
- } else {
- pa_mode[0] = 0x0000ffff;
- pa_mode[1] = 0x00ff00ff;
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
- mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
- } else {
- mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
- mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
- }
-
- if (mt76x2_ext_pa_enabled(dev, band))
- pa_mode_adj = 0x04000000;
- else
- pa_mode_adj = 0;
-
- mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
- mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
- }
-
- mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
- mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
- mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
- mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- u32 val;
-
- if (band == NL80211_BAND_2GHZ)
- val = 0x3c3c023c;
- else
- val = 0x363c023c;
-
- mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
- } else {
- if (band == NL80211_BAND_2GHZ) {
- u32 val = 0x0f3c3c3c;
-
- mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
- } else {
- mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
- mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
- }
- }
-}
-
-static void
-mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw)
-{
- u32 cfg0, cfg1;
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- cfg0 = bw ? 0x000b0c01 : 0x00101101;
- cfg1 = 0x00011414;
- } else {
- cfg0 = bw ? 0x000b0b01 : 0x00101001;
- cfg1 = 0x00021414;
- }
- mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
- mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
-
- mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
-}
-
-static void
-mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
-{
- int core_val, agc_val;
-
- switch (width) {
- case NL80211_CHAN_WIDTH_80:
- core_val = 3;
- agc_val = 7;
- break;
- case NL80211_CHAN_WIDTH_40:
- core_val = 2;
- agc_val = 3;
- break;
- default:
- core_val = 0;
- agc_val = 1;
- break;
- }
-
- mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
- mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
-}
-
-static void
-mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
-{
- switch (band) {
- case NL80211_BAND_2GHZ:
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- case NL80211_BAND_5GHZ:
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- }
-
- mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
- primary_upper);
-}
-
void mt76x2_phy_set_antenna(struct mt76x2_dev *dev)
{
u32 val;
@@ -485,12 +185,14 @@ static void
mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
{
u32 false_cca;
- u8 limit = dev->cal.low_gain > 1 ? 4 : 16;
+ u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
+ dev->cal.false_cca = false_cca;
if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
dev->cal.agc_gain_adjust += 2;
- else if (false_cca < 10 && dev->cal.agc_gain_adjust > 0)
+ else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
+ (dev->cal.agc_gain_adjust >= limit && false_cca < 500))
dev->cal.agc_gain_adjust -= 2;
else
return;
@@ -501,57 +203,65 @@ mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
static void
mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
{
- u32 val = mt76_rr(dev, MT_BBP(AGC, 20));
- int rssi0 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI0, val);
- int rssi1 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI1, val);
u8 *gain = dev->cal.agc_gain_init;
- u8 gain_delta;
+ u8 low_gain_delta, gain_delta;
+ bool gain_change;
int low_gain;
+ u32 val;
- dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 +
- (rssi0 << 8) / 16;
- dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 +
- (rssi1 << 8) / 16;
- dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] +
- dev->cal.avg_rssi[1]) / 512;
+ dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
- if (dev->cal.low_gain == low_gain) {
+ gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
+ dev->cal.low_gain = low_gain;
+
+ if (!gain_change) {
mt76x2_phy_adjust_vga_gain(dev);
return;
}
- dev->cal.low_gain = low_gain;
-
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
- else
+ val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
+ if (low_gain == 2)
+ val |= 0x3;
+ else
+ val |= 0x5;
+ mt76_wr(dev, MT_BBP(AGC, 26), val);
+ } else {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
+ }
- if (low_gain) {
- mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
+ if (mt76x2_has_ext_lna(dev))
+ low_gain_delta = 10;
+ else
+ low_gain_delta = 14;
+
+ if (low_gain == 2) {
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
- if (mt76x2_has_ext_lna(dev))
- gain_delta = 10;
- else
- gain_delta = 14;
+ gain_delta = low_gain_delta;
+ dev->cal.agc_gain_adjust = 0;
} else {
- mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
else
mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
gain_delta = 0;
+ dev->cal.agc_gain_adjust = low_gain_delta;
}
dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
- dev->cal.agc_gain_adjust = 0;
mt76x2_phy_set_gain_val(dev);
+
+ /* clear false CCA counters */
+ mt76_rr(dev, MT_RX_STAT_1);
}
int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
new file mode 100644
index 000000000000..9fd6ab4cbb94
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain -= offset / 2;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain += offset;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
+{
+ s8 *gain_adj = dev->cal.rx.high_gain;
+
+ mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+ mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+ mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+ mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
+ enum nl80211_band band)
+{
+ u32 pa_mode[2];
+ u32 pa_mode_adj;
+
+ if (band == NL80211_BAND_2GHZ) {
+ pa_mode[0] = 0x010055ff;
+ pa_mode[1] = 0x00550055;
+
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+ } else {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+ }
+ } else {
+ pa_mode[0] = 0x0000ffff;
+ pa_mode[1] = 0x00ff00ff;
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+ } else {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+ }
+
+ if (mt76x2_ext_pa_enabled(dev, band))
+ pa_mode_adj = 0x04000000;
+ else
+ pa_mode_adj = 0;
+
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+ }
+
+ mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ u32 val;
+
+ if (band == NL80211_BAND_2GHZ)
+ val = 0x3c3c023c;
+ else
+ val = 0x363c023c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+ } else {
+ if (band == NL80211_BAND_2GHZ) {
+ u32 val = 0x0f3c3c3c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+ } else {
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
+
+static void
+mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ if (r->all[i] > limit)
+ r->all[i] = limit;
+}
+
+static u32
+mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+ u32 val = 0;
+
+ val |= (v1 & (BIT(6) - 1)) << 0;
+ val |= (v2 & (BIT(6) - 1)) << 8;
+ val |= (v3 & (BIT(6) - 1)) << 16;
+ val |= (v4 & (BIT(6) - 1)) << 24;
+ return val;
+}
+
+static void
+mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ r->all[i] += offset;
+}
+
+static int
+mt76x2_get_min_rate_power(struct mt76_rate_power *r)
+{
+ int i;
+ s8 ret = 0;
+
+ for (i = 0; i < sizeof(r->all); i++) {
+ if (!r->all[i])
+ continue;
+
+ if (ret)
+ ret = min(ret, r->all[i]);
+ else
+ ret = r->all[i];
+ }
+
+ return ret;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+{
+ enum nl80211_chan_width width = dev->mt76.chandef.width;
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ int txp_0, txp_1, delta = 0;
+ struct mt76_rate_power t = {};
+ int base_power, gain;
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (width == NL80211_CHAN_WIDTH_40)
+ delta = txp.delta_bw40;
+ else if (width == NL80211_CHAN_WIDTH_80)
+ delta = txp.delta_bw80;
+
+ mt76x2_get_rate_power(dev, &t, chan);
+ mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
+ mt76x2_limit_rate_power(&t, dev->txpower_conf);
+ dev->txpower_cur = mt76x2_get_max_rate_power(&t);
+
+ base_power = mt76x2_get_min_rate_power(&t);
+ delta += base_power - txp.chain[0].target_power;
+ txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
+ txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
+
+ gain = min(txp_0, txp_1);
+ if (gain < 0) {
+ base_power -= gain;
+ txp_0 -= gain;
+ txp_1 -= gain;
+ } else if (gain > 0x2f) {
+ base_power -= gain - 0x2f;
+ txp_0 = 0x2f;
+ txp_1 = 0x2f;
+ }
+
+ mt76x2_add_rate_power_offset(&t, -base_power);
+ dev->target_power = txp.chain[0].target_power;
+ dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
+ dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
+ dev->rate_power = t;
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0,
+ mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_1,
+ mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_2,
+ mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
+ mt76_wr(dev, MT_TX_PWR_CFG_3,
+ mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_4,
+ mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_7,
+ mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
+ mt76_wr(dev, MT_TX_PWR_CFG_8,
+ mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_9,
+ mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
+
+void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
+ enum nl80211_band band, u8 bw)
+{
+ u32 cfg0, cfg1;
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ cfg0 = bw ? 0x000b0c01 : 0x00101101;
+ cfg1 = 0x00011414;
+ } else {
+ cfg0 = bw ? 0x000b0b01 : 0x00101001;
+ cfg1 = 0x00021414;
+ }
+ mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+ mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+ mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
+
+void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
+
+void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ case NL80211_BAND_5GHZ:
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
+
+int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
+{
+ struct mt76x2_sta *sta;
+ struct mt76_wcid *wcid;
+ int i, j, min_rssi = 0;
+ s8 cur_rssi;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+ unsigned long mask = dev->wcid_mask[i];
+
+ if (!mask)
+ continue;
+
+ for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ wcid = rcu_dereference(dev->wcid[j]);
+ if (!wcid)
+ continue;
+
+ sta = container_of(wcid, struct mt76x2_sta, wcid);
+ spin_lock(&dev->mt76.rx_lock);
+ if (sta->inactive_count++ < 5)
+ cur_rssi = ewma_signal_read(&sta->rssi);
+ else
+ cur_rssi = 0;
+ spin_unlock(&dev->mt76.rx_lock);
+
+ if (cur_rssi < min_rssi)
+ min_rssi = cur_rssi;
+ }
+ }
+
+ rcu_read_unlock();
+ local_bh_enable();
+
+ if (!min_rssi)
+ return -75;
+
+ return min_rssi;
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
index b9c334d9e5b8..1551ea453180 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
@@ -75,6 +75,21 @@
#define MT_XO_CTRL7 0x011c
+#define MT_USB_U3DMA_CFG 0x9018
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_UDMA_TX_WL_DROP BIT(16)
+#define MT_USB_DMA_CFG_WAKE_UP_EN BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PAD BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
+
#define MT_WLAN_MTC_CTRL 0x10148
#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
@@ -150,6 +165,9 @@
#define MT_TX_HW_QUEUE_MCU 8
#define MT_TX_HW_QUEUE_MGMT 9
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
#define MT_PBF_SYS_CTRL 0x0400
#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
@@ -202,6 +220,11 @@
#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
+#define MT_FCE_SKIP_FS 0x0a6c
+
#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
#define MT_MAC_CSR0 0x1000
@@ -214,6 +237,7 @@
#define MT_MAC_ADDR_DW0 0x1008
#define MT_MAC_ADDR_DW1 0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
#define MT_MAC_BSSID_DW0 0x1010
#define MT_MAC_BSSID_DW1 0x1014
@@ -351,6 +375,7 @@
#define MT_TX_TIMEOUT_CFG_ACKTO GENMASK(15, 8)
#define MT_TX_RETRY_CFG 0x134c
+#define MT_TX_LINK_CFG 0x1350
#define MT_VHT_HT_FBK_CFG1 0x1358
#define MT_PROT_CFG_RATE GENMASK(15, 0)
@@ -425,6 +450,7 @@
#define MT_RX_FILTR_CFG_BAR BIT(15)
#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+#define MT_AUTO_RSP_CFG 0x1404
#define MT_LEGACY_BASIC_RATE 0x1408
#define MT_HT_BASIC_RATE 0x140c
@@ -460,6 +486,10 @@
#define MT_RX_STAT_2_DUP_ERRORS GENMASK(15, 0)
#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
+#define MT_TX_STA_0 0x170c
+#define MT_TX_STA_1 0x1710
+#define MT_TX_STA_2 0x1714
+
#define MT_TX_STAT_FIFO 0x1718
#define MT_TX_STAT_FIFO_VALID BIT(0)
#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
index e46eafc4c436..4c907882e8b0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
@@ -23,129 +23,6 @@ struct beacon_bc_data {
struct sk_buff *tail[8];
};
-void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct mt76x2_dev *dev = hw->priv;
- struct ieee80211_vif *vif = info->control.vif;
- struct mt76_wcid *wcid = &dev->global_wcid;
-
- if (control->sta) {
- struct mt76x2_sta *msta;
-
- msta = (struct mt76x2_sta *) control->sta->drv_priv;
- wcid = &msta->wcid;
- /* sw encrypted frames */
- if (!info->control.hw_key && wcid->hw_key_idx != -1)
- control->sta = NULL;
- }
-
- if (vif && !control->sta) {
- struct mt76x2_vif *mvif;
-
- mvif = (struct mt76x2_vif *) vif->drv_priv;
- wcid = &mvif->group_wcid;
- }
-
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
-}
-
-void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- ieee80211_free_txskb(mt76_hw(dev), skb);
- } else {
- ieee80211_tx_info_clear_status(info);
- info->status.rates[0].idx = -1;
- info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status(mt76_hw(dev), skb);
- }
-}
-
-s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
- const struct ieee80211_tx_rate *rate)
-{
- s8 max_txpwr;
-
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- u8 mcs = ieee80211_rate_get_vht_mcs(rate);
-
- if (mcs == 8 || mcs == 9) {
- max_txpwr = dev->rate_power.vht[8];
- } else {
- u8 nss, idx;
-
- nss = ieee80211_rate_get_vht_nss(rate);
- idx = ((nss - 1) << 3) + mcs;
- max_txpwr = dev->rate_power.ht[idx & 0xf];
- }
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
- } else {
- enum nl80211_band band = dev->mt76.chandef.chan->band;
-
- if (band == NL80211_BAND_2GHZ) {
- const struct ieee80211_rate *r;
- struct wiphy *wiphy = mt76_hw(dev)->wiphy;
- struct mt76_rate_power *rp = &dev->rate_power;
-
- r = &wiphy->bands[band]->bitrates[rate->idx];
- if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
- max_txpwr = rp->cck[r->hw_value & 0x3];
- else
- max_txpwr = rp->ofdm[r->hw_value & 0x7];
- } else {
- max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
- }
- }
-
- return max_txpwr;
-}
-
-s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
-{
- txpwr = min_t(s8, txpwr, dev->txpower_conf);
- txpwr -= (dev->target_power + dev->target_power_delta[0]);
- txpwr = min_t(s8, txpwr, max_txpwr_adj);
-
- if (!dev->enable_tpc)
- return 0;
- else if (txpwr >= 0)
- return min_t(s8, txpwr, 7);
- else
- return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
-}
-
-void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
-{
- s8 txpwr_adj;
-
- txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
- dev->rate_power.ofdm[4]);
- mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
- MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
- mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
- MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
-}
-
-static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
-{
- int len = ieee80211_get_hdrlen_from_skb(skb);
-
- if (len % 4 == 0)
- return 0;
-
- skb_push(skb, 2);
- memmove(skb->data, skb->data + 2, len);
-
- skb->data[len] = 0;
- skb->data[len + 1] = 0;
- return 2;
-}
-
int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
@@ -159,7 +36,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
- mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta);
+ mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
ret = mt76x2_insert_hdr_pad(skb);
if (ret < 0)
@@ -218,6 +95,37 @@ mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
data->tail[mvif->idx] = skb;
}
+static void
+mt76x2_resync_beacon_timer(struct mt76x2_dev *dev)
+{
+ u32 timer_val = dev->beacon_int << 4;
+
+ dev->tbtt_count++;
+
+ /*
+ * Beacon timer drifts by 1us every tick, the timer is configured
+ * in 1/16 TU (64us) units.
+ */
+ if (dev->tbtt_count < 62)
+ return;
+
+ if (dev->tbtt_count >= 64) {
+ dev->tbtt_count = 0;
+ return;
+ }
+
+ /*
+ * The updated beacon interval takes effect after two TBTT, because
+ * at this point the original interval has already been loaded into
+ * the next TBTT_TIMER value
+ */
+ if (dev->tbtt_count == 62)
+ timer_val -= 1;
+
+ mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_INTVAL, timer_val);
+}
+
void mt76x2_pre_tbtt_tasklet(unsigned long arg)
{
struct mt76x2_dev *dev = (struct mt76x2_dev *) arg;
@@ -226,6 +134,8 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg)
struct sk_buff *skb;
int i, nframes;
+ mt76x2_resync_beacon_timer(dev);
+
data.dev = dev;
__skb_queue_head_init(&data.q);
@@ -256,7 +166,8 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg)
struct ieee80211_vif *vif = info->control.vif;
struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL);
+ mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
+ NULL);
}
spin_unlock_bh(&q->lock);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
new file mode 100644
index 000000000000..36afb166fa3f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x2_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->global_wcid;
+
+ if (control->sta) {
+ struct mt76x2_sta *msta;
+
+ msta = (struct mt76x2_sta *)control->sta->drv_priv;
+ wcid = &msta->wcid;
+ /* sw encrypted frames */
+ if (!info->control.hw_key && wcid->hw_key_idx != -1)
+ control->sta = NULL;
+ }
+
+ if (vif && !control->sta) {
+ struct mt76x2_vif *mvif;
+
+ mvif = (struct mt76x2_vif *)vif->drv_priv;
+ wcid = &mvif->group_wcid;
+ }
+
+ mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx);
+
+int mt76x2_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ if (len % 4 == 0)
+ return 0;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 2;
+}
+EXPORT_SYMBOL_GPL(mt76x2_insert_hdr_pad);
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate)
+{
+ s8 max_txpwr;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+ if (mcs == 8 || mcs == 9) {
+ max_txpwr = dev->rate_power.vht[8];
+ } else {
+ u8 nss, idx;
+
+ nss = ieee80211_rate_get_vht_nss(rate);
+ idx = ((nss - 1) << 3) + mcs;
+ max_txpwr = dev->rate_power.ht[idx & 0xf];
+ }
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
+ } else {
+ enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+ if (band == NL80211_BAND_2GHZ) {
+ const struct ieee80211_rate *r;
+ struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+ struct mt76_rate_power *rp = &dev->rate_power;
+
+ r = &wiphy->bands[band]->bitrates[rate->idx];
+ if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+ max_txpwr = rp->cck[r->hw_value & 0x3];
+ else
+ max_txpwr = rp->ofdm[r->hw_value & 0x7];
+ } else {
+ max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
+ }
+ }
+
+ return max_txpwr;
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_get_max_txpwr_adj);
+
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+ txpwr = min_t(s8, txpwr, dev->txpower_conf);
+ txpwr -= (dev->target_power + dev->target_power_delta[0]);
+ txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+ if (!dev->enable_tpc)
+ return 0;
+ else if (txpwr >= 0)
+ return min_t(s8, txpwr, 7);
+ else
+ return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_get_txpwr_adj);
+
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
+{
+ s8 txpwr_adj;
+
+ txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
+ dev->rate_power.ofdm[4]);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_set_txpwr_auto);
+
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ieee80211_free_txskb(mt76_hw(dev), skb);
+ } else {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status(mt76_hw(dev), skb);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_complete);
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
new file mode 100644
index 000000000000..1428cfdee579
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "mt76x2u.h"
+
+static const struct usb_device_id mt76x2u_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
+ { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
+ { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
+ { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
+ { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
+ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
+ { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
+ { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
+ { },
+};
+
+static int mt76x2u_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76x2_dev *dev;
+ int err;
+
+ dev = mt76x2u_alloc_device(&intf->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ udev = usb_get_dev(udev);
+ usb_reset_device(udev);
+
+ err = mt76u_init(&dev->mt76, intf);
+ if (err < 0)
+ goto err;
+
+ dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+ err = mt76x2u_register_device(dev);
+ if (err < 0)
+ goto err;
+
+ return 0;
+
+err:
+ ieee80211_free_hw(mt76_hw(dev));
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
+
+ return err;
+}
+
+static void mt76x2u_disconnect(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ set_bit(MT76_REMOVED, &dev->mt76.state);
+ ieee80211_unregister_hw(hw);
+ mt76x2u_cleanup(dev);
+
+ ieee80211_free_hw(hw);
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
+}
+
+static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
+ pm_message_t state)
+{
+ struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76_usb *usb = &dev->mt76.usb;
+
+ mt76u_stop_queues(&dev->mt76);
+ mt76x2u_stop_hw(dev);
+ usb_kill_urb(usb->mcu.res.urb);
+
+ return 0;
+}
+
+static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
+{
+ struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76_usb *usb = &dev->mt76.usb;
+ int err;
+
+ reinit_completion(&usb->mcu.cmpl);
+ err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
+ MT_EP_IN_CMD_RESP,
+ &usb->mcu.res, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (err < 0)
+ return err;
+
+ err = mt76u_submit_rx_buffers(&dev->mt76);
+ if (err < 0)
+ return err;
+
+ tasklet_enable(&usb->rx_tasklet);
+ tasklet_enable(&usb->tx_tasklet);
+
+ return mt76x2u_init_hardware(dev);
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
+MODULE_FIRMWARE(MT7662U_FIRMWARE);
+MODULE_FIRMWARE(MT7662U_ROM_PATCH);
+
+static struct usb_driver mt76x2u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x2u_device_table,
+ .probe = mt76x2u_probe,
+ .disconnect = mt76x2u_disconnect,
+#ifdef CONFIG_PM
+ .suspend = mt76x2u_suspend,
+ .resume = mt76x2u_resume,
+ .reset_resume = mt76x2u_resume,
+#endif /* CONFIG_PM */
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x2u_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
new file mode 100644
index 000000000000..008092f0cd8a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2U_H
+#define __MT76x2U_H
+
+#include <linux/device.h>
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+#include "mt76x2_mcu.h"
+
+#define MT7612U_EEPROM_SIZE 512
+
+#define MT_USB_AGGR_SIZE_LIMIT 21 /* 1024B unit */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* 33ns unit */
+
+extern const struct ieee80211_ops mt76x2u_ops;
+
+struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev);
+int mt76x2u_register_device(struct mt76x2_dev *dev);
+int mt76x2u_init_hardware(struct mt76x2_dev *dev);
+void mt76x2u_cleanup(struct mt76x2_dev *dev);
+void mt76x2u_stop_hw(struct mt76x2_dev *dev);
+
+void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr);
+int mt76x2u_mac_reset(struct mt76x2_dev *dev);
+void mt76x2u_mac_resume(struct mt76x2_dev *dev);
+int mt76x2u_mac_start(struct mt76x2_dev *dev);
+int mt76x2u_mac_stop(struct mt76x2_dev *dev);
+
+int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x2u_phy_calibrate(struct work_struct *work);
+void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev);
+void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev);
+void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev);
+
+void mt76x2u_mcu_complete_urb(struct urb *urb);
+int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan);
+int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+ u32 val);
+int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data);
+int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+ bool force);
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+ bool ext, int rssi, u32 false_cca);
+int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val);
+int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type,
+ u8 temp_level, u8 channel);
+int mt76x2u_mcu_init(struct mt76x2_dev *dev);
+int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev);
+void mt76x2u_mcu_deinit(struct mt76x2_dev *dev);
+
+int mt76x2u_alloc_queues(struct mt76x2_dev *dev);
+void mt76x2u_queues_deinit(struct mt76x2_dev *dev);
+void mt76x2u_stop_queues(struct mt76x2_dev *dev);
+bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update);
+int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info);
+void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
+int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
+ u32 flags);
+
+#endif /* __MT76x2U_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
new file mode 100644
index 000000000000..1ca5dd05b265
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "dma.h"
+
+static void mt76x2u_remove_dma_hdr(struct sk_buff *skb)
+{
+ int hdr_len;
+
+ skb_pull(skb, sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN);
+ hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ if (hdr_len % 4) {
+ memmove(skb->data + 2, skb->data, hdr_len);
+ skb_pull(skb, 2);
+ }
+}
+
+static int
+mt76x2u_check_skb_rooms(struct sk_buff *skb)
+{
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u32 need_head;
+
+ need_head = sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN;
+ if (hdr_len % 4)
+ need_head += 2;
+ return skb_cow(skb, need_head);
+}
+
+static int
+mt76x2u_set_txinfo(struct sk_buff *skb,
+ struct mt76_wcid *wcid, u8 ep)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mt76x2_qsel qsel;
+ u32 flags;
+
+ if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+ ep == MT_EP_OUT_HCCA)
+ qsel = MT_QSEL_MGMT;
+ else
+ qsel = MT_QSEL_EDCA;
+
+ flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
+ if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+ flags |= MT_TXD_INFO_WIV;
+
+ return mt76u_skb_dma_info(skb, WLAN_PORT, flags);
+}
+
+bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x2_tx_status stat;
+
+ if (!mt76x2_mac_load_tx_status(dev, &stat))
+ return false;
+
+ mt76x2_send_tx_status(dev, &stat, update);
+
+ return true;
+}
+
+int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x2_txwi *txwi;
+ int err, len = skb->len;
+
+ err = mt76x2u_check_skb_rooms(skb);
+ if (err < 0)
+ return -ENOMEM;
+
+ mt76x2_insert_hdr_pad(skb);
+
+ txwi = skb_push(skb, sizeof(struct mt76x2_txwi));
+ mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+
+ return mt76x2u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
+}
+
+void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+ mt76x2u_remove_dma_hdr(e->skb);
+ mt76x2_tx_complete(dev, e->skb);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
new file mode 100644
index 000000000000..9b81e7641c06
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+static void mt76x2u_init_dma(struct mt76x2_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+
+ val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
+ MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+
+ /* disable AGGR_BULK_RX in order to receive one
+ * frame in each rx urb and avoid copies
+ */
+ val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+}
+
+static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev)
+{
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
+ udelay(1);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
+
+ mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
+ udelay(1);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
+ usleep_range(150, 200);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
+ usleep_range(50, 100);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
+}
+
+static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit)
+{
+ int shift = unit ? 8 : 0;
+ u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
+
+ /* Enable RF BG */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
+ usleep_range(10, 20);
+
+ /* Enable RFDIG LDO/AFE/ABB/ADDA */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
+ usleep_range(10, 20);
+
+ /* Switch RFDIG power to internal LDO */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
+ usleep_range(10, 20);
+
+ mt76x2u_power_on_rf_patch(dev);
+
+ mt76_set(dev, 0x530, 0xf);
+}
+
+static void mt76x2u_power_on(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ /* Turn on WL MTCMOS */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
+ MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+ val = MT_WLAN_MTC_CTRL_STATE_UP |
+ MT_WLAN_MTC_CTRL_PWR_ACK |
+ MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+ mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
+ usleep_range(10, 20);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+ usleep_range(10, 20);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
+
+ /* Turn on AD/DA power down */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
+
+ /* WLAN function enable */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
+
+ /* Release BBP software reset */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
+
+ mt76x2u_power_on_rf(dev, 0);
+ mt76x2u_power_on_rf(dev, 1);
+}
+
+static int mt76x2u_init_eeprom(struct mt76x2_dev *dev)
+{
+ u32 val, i;
+
+ dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
+ MT7612U_EEPROM_SIZE,
+ GFP_KERNEL);
+ dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
+ if (!dev->mt76.eeprom.data)
+ return -ENOMEM;
+
+ for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
+ val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
+ put_unaligned_le32(val, dev->mt76.eeprom.data + i);
+ }
+
+ mt76x2_eeprom_parse_hw_cap(dev);
+ return 0;
+}
+
+struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .tx_prepare_skb = mt76x2u_tx_prepare_skb,
+ .tx_complete_skb = mt76x2u_tx_complete_skb,
+ .tx_status_data = mt76x2u_tx_status_data,
+ .rx_skb = mt76x2_queue_rx_skb,
+ };
+ struct mt76x2_dev *dev;
+ struct mt76_dev *mdev;
+
+ mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
+ if (!mdev)
+ return NULL;
+
+ dev = container_of(mdev, struct mt76x2_dev, mt76);
+ mdev->dev = pdev;
+ mdev->drv = &drv_ops;
+
+ mutex_init(&dev->mutex);
+
+ return dev;
+}
+
+static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
+ mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
+ mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840);
+ mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
+}
+
+int mt76x2u_init_hardware(struct mt76x2_dev *dev)
+{
+ static const u16 beacon_offsets[] = {
+ /* 512 byte per beacon */
+ 0xc000, 0xc200, 0xc400, 0xc600,
+ 0xc800, 0xca00, 0xcc00, 0xce00,
+ 0xd000, 0xd200, 0xd400, 0xd600,
+ 0xd800, 0xda00, 0xdc00, 0xde00
+ };
+ const struct mt76_wcid_addr addr = {
+ .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ .ba_mask = 0,
+ };
+ int i, err;
+
+ dev->beacon_offsets = beacon_offsets;
+
+ mt76x2_reset_wlan(dev, true);
+ mt76x2u_power_on(dev);
+
+ if (!mt76x2_wait_for_mac(dev))
+ return -ETIMEDOUT;
+
+ err = mt76x2u_mcu_fw_init(dev);
+ if (err < 0)
+ return err;
+
+ if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
+ return -EIO;
+
+ /* wait for asic ready after fw load. */
+ if (!mt76x2_wait_for_mac(dev))
+ return -ETIMEDOUT;
+
+ mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
+ mt76_wr(dev, MT_TSO_CTRL, 0);
+
+ mt76x2u_init_dma(dev);
+
+ err = mt76x2u_mcu_init(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76x2u_mac_reset(dev);
+ if (err < 0)
+ return err;
+
+ mt76x2u_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+ mt76x2u_init_beacon_offsets(dev);
+
+ if (!mt76x2_wait_for_bbp(dev))
+ return -ETIMEDOUT;
+
+ /* reset wcid table */
+ for (i = 0; i < 254; i++)
+ mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr,
+ sizeof(struct mt76_wcid_addr));
+
+ /* reset shared key table and pairwise key table */
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0);
+ for (i = 0; i < 256; i++)
+ mt76_wr(dev, MT_WCID_ATTR(i), 1);
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
+
+ err = mt76x2u_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+ if (err < 0)
+ return err;
+
+ mt76x2u_phy_set_rxpath(dev);
+ mt76x2u_phy_set_txdac(dev);
+
+ return mt76x2u_mac_stop(dev);
+}
+
+int mt76x2u_register_device(struct mt76x2_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int err;
+
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
+ mt76x2_init_device(dev);
+
+ err = mt76x2u_init_eeprom(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76u_mcu_init_rx(&dev->mt76);
+ if (err < 0)
+ return err;
+
+ err = mt76u_alloc_queues(&dev->mt76);
+ if (err < 0)
+ goto fail;
+
+ err = mt76x2u_init_hardware(dev);
+ if (err < 0)
+ goto fail;
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ err = mt76_register_device(&dev->mt76, true, mt76x2_rates,
+ ARRAY_SIZE(mt76x2_rates));
+ if (err)
+ goto fail;
+
+ /* check hw sg support in order to enable AMSDU */
+ if (mt76u_check_sg(&dev->mt76))
+ hw->max_tx_fragments = MT_SG_MAX_SIZE;
+ else
+ hw->max_tx_fragments = 1;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ mt76x2_init_debugfs(dev);
+ mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+ return 0;
+
+fail:
+ mt76x2u_cleanup(dev);
+ return err;
+}
+
+void mt76x2u_stop_hw(struct mt76x2_dev *dev)
+{
+ mt76u_stop_stat_wk(&dev->mt76);
+ cancel_delayed_work_sync(&dev->cal_work);
+ mt76x2u_mac_stop(dev);
+}
+
+void mt76x2u_cleanup(struct mt76x2_dev *dev)
+{
+ mt76x2u_mcu_set_radio_state(dev, false);
+ mt76x2u_stop_hw(dev);
+ mt76u_queues_deinit(&dev->mt76);
+ mt76x2u_mcu_deinit(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
new file mode 100644
index 000000000000..eab7ab297aa6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev)
+{
+ mt76_rr(dev, MT_RX_STAT_0);
+ mt76_rr(dev, MT_RX_STAT_1);
+ mt76_rr(dev, MT_RX_STAT_2);
+ mt76_rr(dev, MT_TX_STA_0);
+ mt76_rr(dev, MT_TX_STA_1);
+ mt76_rr(dev, MT_TX_STA_2);
+}
+
+static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
+{
+ s8 offset = 0;
+ u16 eep_val;
+
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+ offset = eep_val & 0x7f;
+ if ((eep_val & 0xff) == 0xff)
+ offset = 0;
+ else if (eep_val & 0x80)
+ offset = 0 - offset;
+
+ eep_val >>= 8;
+ if (eep_val == 0x00 || eep_val == 0xff) {
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+ eep_val &= 0xff;
+
+ if (eep_val == 0x00 || eep_val == 0xff)
+ eep_val = 0x14;
+ }
+
+ eep_val &= 0x7f;
+ mt76_rmw_field(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL5),
+ MT_XO_CTRL5_C2_VAL, eep_val + offset);
+ mt76_set(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL6), MT_XO_CTRL6_C2_CTRL);
+
+ mt76_wr(dev, 0x504, 0x06000000);
+ mt76_wr(dev, 0x50c, 0x08800000);
+ mdelay(5);
+ mt76_wr(dev, 0x504, 0x0);
+
+ /* decrease SIFS from 16us to 13us */
+ mt76_rmw_field(dev, MT_XIFS_TIME_CFG,
+ MT_XIFS_TIME_CFG_OFDM_SIFS, 0xd);
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, MT_BKOFF_SLOT_CFG_CC_DELAY, 1);
+
+ /* init fce */
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+ case 0:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+ break;
+ case 1:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+ break;
+ default:
+ break;
+ }
+}
+
+int mt76x2u_mac_reset(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5));
+
+ /* init pbf regs */
+ mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+ mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+
+ mt76_write_mac_initvals(dev);
+
+ mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
+ mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
+ mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
+ mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
+
+ mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
+ mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
+ mt76_wr(dev, MT_WMM_CWMAX, 0x34aa);
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+
+ if (is_mt7612(dev))
+ mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+ mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
+ mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+ mt76x2u_mac_fixup_xtal(dev);
+
+ return 0;
+}
+
+int mt76x2u_mac_start(struct mt76x2_dev *dev)
+{
+ mt76x2u_mac_reset_counters(dev);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ wait_for_wpdma(dev);
+ usleep_range(50, 100);
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ return 0;
+}
+
+int mt76x2u_mac_stop(struct mt76x2_dev *dev)
+{
+ int i, count = 0, val;
+ bool stopped = false;
+ u32 rts_cfg;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -EIO;
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
+ mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+
+ /* wait tx dma to stop */
+ for (i = 0; i < 2000; i++) {
+ val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+ if (!(val & MT_USB_DMA_CFG_TX_BUSY) && i > 10)
+ break;
+ usleep_range(50, 100);
+ }
+
+ /* page count on TxQ */
+ for (i = 0; i < 200; i++) {
+ if (!(mt76_rr(dev, 0x0438) & 0xffffffff) &&
+ !(mt76_rr(dev, 0x0a30) & 0x000000ff) &&
+ !(mt76_rr(dev, 0x0a34) & 0xff00ff00))
+ break;
+ usleep_range(10, 20);
+ }
+
+ /* disable tx-rx */
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 1000; i++) {
+ if (!(mt76_rr(dev, MT_MAC_STATUS) & MT_MAC_STATUS_TX) &&
+ !mt76_rr(dev, MT_BBP(IBI, 12))) {
+ stopped = true;
+ break;
+ }
+ usleep_range(10, 20);
+ }
+
+ if (!stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ /* page count on RxQ */
+ for (i = 0; i < 200; i++) {
+ if (!(mt76_rr(dev, 0x0430) & 0x00ff0000) &&
+ !(mt76_rr(dev, 0x0a30) & 0xffffffff) &&
+ !(mt76_rr(dev, 0x0a34) & 0xffffffff) &&
+ ++count > 10)
+ break;
+ msleep(50);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 2000))
+ dev_warn(dev->mt76.dev, "MAC RX failed to stop\n");
+
+ /* wait rx dma to stop */
+ for (i = 0; i < 2000; i++) {
+ val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+ if (!(val & MT_USB_DMA_CFG_RX_BUSY) && i > 10)
+ break;
+ usleep_range(50, 100);
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+
+ return 0;
+}
+
+void mt76x2u_mac_resume(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20));
+ mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1));
+}
+
+void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr)
+{
+ ether_addr_copy(dev->mt76.macaddr, addr);
+
+ if (!is_valid_ether_addr(dev->mt76.macaddr)) {
+ eth_random_addr(dev->mt76.macaddr);
+ dev_info(dev->mt76.dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->mt76.macaddr);
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1,
+ get_unaligned_le16(dev->mt76.macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
new file mode 100644
index 000000000000..7367ba111119
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+
+static int mt76x2u_start(struct ieee80211_hw *hw)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt76x2u_mac_start(dev);
+ if (ret)
+ goto out;
+
+ set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void mt76x2u_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ mt76x2u_stop_hw(dev);
+ mutex_unlock(&dev->mutex);
+}
+
+static int mt76x2u_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *)vif->drv_priv;
+ unsigned int idx = 0;
+
+ if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
+ mt76x2u_mac_setaddr(dev, vif->addr);
+
+ mvif->idx = idx;
+ mvif->group_wcid.idx = MT_VIF_WCID(idx);
+ mvif->group_wcid.hw_key_idx = -1;
+ mt76x2_txq_init(dev, vif->txq);
+
+ return 0;
+}
+
+static int
+mt76x2u_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ int err;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ set_bit(MT76_RESET, &dev->mt76.state);
+
+ mt76_set_channel(&dev->mt76);
+
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
+ mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+ mt76x2_mac_stop(dev, false);
+
+ err = mt76x2u_phy_set_channel(dev, chandef);
+
+ mt76x2u_mac_resume(dev);
+
+ clear_bit(MT76_RESET, &dev->mt76.state);
+ mt76_txq_schedule_all(&dev->mt76);
+
+ return err;
+}
+
+static void
+mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ mt76x2u_phy_channel_calibrate(dev);
+ mt76x2_apply_gain_adj(dev);
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ mt76_wr(dev, MT_MAC_BSSID_DW0,
+ get_unaligned_le32(info->bssid));
+ mt76_wr(dev, MT_MAC_BSSID_DW1,
+ get_unaligned_le16(info->bssid + 4));
+ }
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ int err = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ err = mt76x2u_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ dev->txpower_conf = hw->conf.power_level * 2;
+
+ /* convert to per-chain power for 2x2 devices */
+ dev->txpower_conf -= 6;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ mt76x2_phy_set_txpower(dev);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return err;
+}
+
+static void
+mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+const struct ieee80211_ops mt76x2u_ops = {
+ .tx = mt76x2_tx,
+ .start = mt76x2u_start,
+ .stop = mt76x2u_stop,
+ .add_interface = mt76x2u_add_interface,
+ .remove_interface = mt76x2_remove_interface,
+ .sta_add = mt76x2_sta_add,
+ .sta_remove = mt76x2_sta_remove,
+ .set_key = mt76x2_set_key,
+ .ampdu_action = mt76x2_ampdu_action,
+ .config = mt76x2u_config,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .bss_info_changed = mt76x2u_bss_info_changed,
+ .configure_filter = mt76x2_configure_filter,
+ .conf_tx = mt76x2_conf_tx,
+ .sw_scan_start = mt76x2u_sw_scan,
+ .sw_scan_complete = mt76x2u_sw_scan_complete,
+ .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
new file mode 100644
index 000000000000..22c16d638baa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+#define MT_CMD_HDR_LEN 4
+#define MT_INBAND_PACKET_MAX_LEN 192
+#define MT_MCU_MEMMAP_WLAN 0x410000
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x3900
+#define MCU_ROM_PATCH_MAX_PAYLOAD 2048
+
+#define MT76U_MCU_ILM_OFFSET 0x80000
+#define MT76U_MCU_DLM_OFFSET 0x110000
+#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
+
+static int
+mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
+ u32 val)
+{
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP,
+ func != Q_SELECT);
+}
+
+int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val)
+{
+ struct {
+ __le32 mode;
+ __le32 level;
+ } __packed __aligned(4) msg = {
+ .mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF),
+ .level = cpu_to_le32(0),
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP,
+ false);
+}
+
+int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+ u8 channel)
+{
+ struct {
+ u8 cr_mode;
+ u8 temp;
+ u8 ch;
+ u8 _pad0;
+ __le32 cfg;
+ } __packed __aligned(4) msg = {
+ .cr_mode = type,
+ .temp = temp_level,
+ .ch = channel,
+ };
+ struct sk_buff *skb;
+ u32 val;
+
+ val = BIT(31);
+ val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+ val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+ msg.cfg = cpu_to_le32(val);
+
+ /* first set the channel without the extension channel info */
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true);
+}
+
+int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan)
+{
+ struct {
+ u8 idx;
+ u8 scan;
+ u8 bw;
+ u8 _pad0;
+
+ __le16 chainmask;
+ u8 ext_chan;
+ u8 _pad1;
+
+ } __packed __aligned(4) msg = {
+ .idx = channel,
+ .scan = scan,
+ .bw = bw,
+ .chainmask = cpu_to_le16(dev->chainmask),
+ };
+ struct sk_buff *skb;
+
+ /* first set the channel without the extension channel info */
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+
+ mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
+
+ usleep_range(5000, 10000);
+
+ msg.ext_chan = 0xe0 + bw_index;
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
+}
+
+int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+ u32 val)
+{
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(type),
+ .value = cpu_to_le32(val),
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+ bool force)
+{
+ struct {
+ __le32 channel;
+ __le32 gain_val;
+ } __packed __aligned(4) msg = {
+ .channel = cpu_to_le32(channel),
+ .gain_val = cpu_to_le32(gain),
+ };
+ struct sk_buff *skb;
+
+ if (force)
+ msg.channel |= cpu_to_le32(BIT(31));
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true);
+}
+
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+ bool ext, int rssi, u32 false_cca)
+{
+ struct {
+ __le32 channel;
+ __le32 rssi_val;
+ __le32 false_cca_val;
+ } __packed __aligned(4) msg = {
+ .rssi_val = cpu_to_le32(rssi),
+ .false_cca_val = cpu_to_le32(false_cca),
+ };
+ struct sk_buff *skb;
+ u32 val = channel;
+
+ if (ap)
+ val |= BIT(31);
+ if (ext)
+ val |= BIT(30);
+ msg.channel = cpu_to_le32(val);
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true);
+}
+
+int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data)
+{
+ struct {
+ __le32 id;
+ struct mt76x2_tssi_comp data;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+ .data = *tssi_data,
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
+}
+
+static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev)
+{
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x12, 0, NULL, 0);
+}
+
+static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+ const u8 data[] = {
+ 0x6f, 0xfc, 0x08, 0x01,
+ 0x20, 0x04, 0x00, 0x00,
+ 0x00, 0x09, 0x00,
+ };
+
+ memcpy(usb->data, data, sizeof(data));
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_CLASS,
+ 0x12, 0, usb->data, sizeof(data));
+}
+
+static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+ u8 data[] = {
+ 0x6f, 0xfc, 0x05, 0x01,
+ 0x07, 0x01, 0x00, 0x04
+ };
+
+ memcpy(usb->data, data, sizeof(data));
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_CLASS,
+ 0x12, 0, usb->data, sizeof(data));
+}
+
+static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
+{
+ bool rom_protect = !is_mt7612(dev);
+ struct mt76x2_patch_header *hdr;
+ u32 val, patch_mask, patch_reg;
+ const struct firmware *fw;
+ int err;
+
+ if (rom_protect &&
+ !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+ dev_err(dev->mt76.dev,
+ "could not get hardware semaphore for ROM PATCH\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+ patch_mask = BIT(0);
+ patch_reg = MT_MCU_CLOCK_CTL;
+ } else {
+ patch_mask = BIT(1);
+ patch_reg = MT_MCU_COM_REG0;
+ }
+
+ if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+ dev_info(dev->mt76.dev, "ROM patch already applied\n");
+ return 0;
+ }
+
+ err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev);
+ if (err < 0)
+ return err;
+
+ if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "failed to load firmware\n");
+ err = -EIO;
+ goto out;
+ }
+
+ hdr = (struct mt76x2_patch_header *)fw->data;
+ dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+ /* enable USB_DMA_CFG */
+ val = MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+
+ /* vendor reset */
+ mt76u_mcu_fw_reset(&dev->mt76);
+ usleep_range(5000, 10000);
+
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+ err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+ fw->size - sizeof(*hdr),
+ MCU_ROM_PATCH_MAX_PAYLOAD,
+ MT76U_MCU_ROM_PATCH_OFFSET);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ mt76x2u_mcu_enable_patch(dev);
+ mt76x2u_mcu_reset_wmt(dev);
+ mdelay(20);
+
+ if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) {
+ dev_err(dev->mt76.dev, "failed to load ROM patch\n");
+ err = -ETIMEDOUT;
+ }
+
+out:
+ if (rom_protect)
+ mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+ release_firmware(fw);
+ return err;
+}
+
+static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
+{
+ u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
+ const struct mt76x2_fw_header *hdr;
+ int err, len, ilm_len, dlm_len;
+ const struct firmware *fw;
+
+ err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev);
+ if (err < 0)
+ return err;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt76x2_fw_header *)fw->data;
+ ilm_len = le32_to_cpu(hdr->ilm_len);
+ dlm_len = le32_to_cpu(hdr->dlm_len);
+ len = sizeof(*hdr) + ilm_len + dlm_len;
+ if (fw->size != len) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+ val = le16_to_cpu(hdr->build_ver);
+ dev_info(dev->mt76.dev, "Build: %x\n", val);
+ dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+ /* vendor reset */
+ mt76u_mcu_fw_reset(&dev->mt76);
+ usleep_range(5000, 10000);
+
+ /* enable USB_DMA_CFG */
+ val = MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+ /* load ILM */
+ err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+ ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+ MT76U_MCU_ILM_OFFSET);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ /* load DLM */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ dlm_offset += 0x800;
+ err = mt76u_mcu_fw_send_data(&dev->mt76,
+ fw->data + sizeof(*hdr) + ilm_len,
+ dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+ dlm_offset);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ mt76x2u_mcu_load_ivb(dev);
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) {
+ dev_err(dev->mt76.dev, "firmware failed to start\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ dev_dbg(dev->mt76.dev, "firmware running\n");
+
+out:
+ release_firmware(fw);
+ return err;
+}
+
+int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev)
+{
+ int err;
+
+ err = mt76x2u_mcu_load_rom_patch(dev);
+ if (err < 0)
+ return err;
+
+ return mt76x2u_mcu_load_firmware(dev);
+}
+
+int mt76x2u_mcu_init(struct mt76x2_dev *dev)
+{
+ int err;
+
+ err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1);
+ if (err < 0)
+ return err;
+
+ return mt76x2u_mcu_set_radio_state(dev, true);
+}
+
+void mt76x2u_mcu_deinit(struct mt76x2_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+
+ usb_kill_urb(usb->mcu.res.urb);
+ mt76u_buf_free(&usb->mcu.res);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
new file mode 100644
index 000000000000..5158063d0c2e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~BIT(4);
+
+ switch (dev->chainmask & 0xf) {
+ case 2:
+ val |= BIT(3);
+ break;
+ default:
+ val &= ~BIT(3);
+ break;
+ }
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev)
+{
+ int txpath;
+
+ txpath = (dev->chainmask >> 8) & 0xf;
+ switch (txpath) {
+ case 2:
+ mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
+ break;
+ default:
+ mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
+ break;
+ }
+}
+
+void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+ if (mt76x2_channel_silent(dev))
+ return;
+
+ mt76x2u_mac_stop(dev);
+
+ if (is_5ghz)
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+
+ mt76x2u_mac_resume(dev);
+}
+
+static void
+mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76x2_tssi_comp t = {};
+
+ if (!dev->cal.tssi_cal_done)
+ return;
+
+ if (!dev->cal.tssi_comp_pending) {
+ /* TSSI trigger */
+ t.cal_mode = BIT(0);
+ mt76x2u_mcu_tssi_comp(dev, &t);
+ dev->cal.tssi_comp_pending = true;
+ } else {
+ if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+ return;
+
+ dev->cal.tssi_comp_pending = false;
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (mt76x2_ext_pa_enabled(dev, chan->band))
+ t.pa_mode = 1;
+
+ t.cal_mode = BIT(1);
+ t.slope0 = txp.chain[0].tssi_slope;
+ t.offset0 = txp.chain[0].tssi_offset;
+ t.slope1 = txp.chain[1].tssi_slope;
+ t.offset1 = txp.chain[1].tssi_offset;
+ mt76x2u_mcu_tssi_comp(dev, &t);
+
+ if (t.pa_mode || dev->cal.dpd_cal_done)
+ return;
+
+ usleep_range(10000, 20000);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
+ dev->cal.dpd_cal_done = true;
+ }
+}
+
+static void
+mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
+{
+ u8 channel = dev->mt76.chandef.chan->hw_value;
+ int freq, freq1;
+ u32 false_cca;
+
+ freq = dev->mt76.chandef.chan->center_freq;
+ freq1 = dev->mt76.chandef.center_freq1;
+
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80: {
+ int ch_group_index;
+
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ channel += 6 - ch_group_index * 4;
+ break;
+ }
+ case NL80211_CHAN_WIDTH_40:
+ if (freq1 > freq)
+ channel += 2;
+ else
+ channel -= 2;
+ break;
+ default:
+ break;
+ }
+
+ dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
+ false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
+ mt76_rr(dev, MT_RX_STAT_1));
+
+ mt76x2u_mcu_set_dynamic_vga(dev, channel, false, false,
+ dev->cal.avg_rssi_all, false_cca);
+}
+
+void mt76x2u_phy_calibrate(struct work_struct *work)
+{
+ struct mt76x2_dev *dev;
+
+ dev = container_of(work, struct mt76x2_dev, cal_work.work);
+ mt76x2u_phy_tssi_compensate(dev);
+ mt76x2u_phy_update_channel_gain(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ struct ieee80211_channel *chan = chandef->chan;
+ u8 channel = chan->hw_value, bw, bw_index;
+ int ch_group_index, freq, freq1, ret;
+
+ dev->cal.channel_cal_done = false;
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = 1;
+ if (freq1 > freq) {
+ bw_index = 1;
+ ch_group_index = 0;
+ } else {
+ bw_index = 3;
+ ch_group_index = 1;
+ }
+ channel += 2 - ch_group_index * 4;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ bw = 2;
+ bw_index = ch_group_index;
+ channel += 6 - ch_group_index * 4;
+ break;
+ default:
+ bw = 0;
+ bw_index = 0;
+ ch_group_index = 0;
+ break;
+ }
+
+ mt76x2_read_rx_gain(dev);
+ mt76x2_phy_set_txpower_regs(dev, chan->band);
+ mt76x2_configure_tx_delay(dev, chan->band, bw);
+ mt76x2_phy_set_txpower(dev);
+
+ mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ ret = mt76x2u_mcu_set_channel(dev, channel, bw, bw_index, scan);
+ if (ret)
+ return ret;
+
+ mt76x2u_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+ /* Enable LDPC Rx */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+ if (!dev->cal.init_cal_done) {
+ u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+ if (val != 0xff)
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_R, 0);
+ }
+
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+ /* Rx LPF calibration */
+ if (!dev->cal.init_cal_done)
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_RC, 0);
+ dev->cal.init_cal_done = true;
+
+ mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
+ mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+ mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0X04101b3f);
+
+ mt76_set(dev, MT_BBP(TXO, 4), BIT(25));
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(8));
+
+ if (scan)
+ return 0;
+
+ if (mt76x2_tssi_enabled(dev)) {
+ /* init default values for temp compensation */
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ 0x38);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+ 0x38);
+
+ /* init tssi calibration */
+ if (!mt76x2_channel_silent(dev)) {
+ struct ieee80211_channel *chan;
+ u32 flag = 0;
+
+ chan = dev->mt76.chandef.chan;
+ if (chan->band == NL80211_BAND_5GHZ)
+ flag |= BIT(0);
+ if (mt76x2_ext_pa_enabled(dev, chan->band))
+ flag |= BIT(8);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+ dev->cal.tssi_cal_done = true;
+ }
+ }
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index e96956710fb2..af48d43bb7dc 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -51,7 +51,7 @@ __mt76_get_txwi(struct mt76_dev *dev)
return t;
}
-static struct mt76_txwi_cache *
+struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
@@ -91,80 +91,6 @@ mt76_txq_get_qid(struct ieee80211_txq *txq)
return txq->ac;
}
-int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
-{
- struct mt76_queue_entry e;
- struct mt76_txwi_cache *t;
- struct mt76_queue_buf buf[32];
- struct sk_buff *iter;
- dma_addr_t addr;
- int len;
- u32 tx_info = 0;
- int n, ret;
-
- t = mt76_get_txwi(dev);
- if (!t) {
- ieee80211_free_txskb(dev->hw, skb);
- return -ENOMEM;
- }
-
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
- &tx_info);
- dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- if (ret < 0)
- goto free;
-
- len = skb->len - skb->data_len;
- addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = -ENOMEM;
- goto free;
- }
-
- n = 0;
- buf[n].addr = t->dma_addr;
- buf[n++].len = dev->drv->txwi_size;
- buf[n].addr = addr;
- buf[n++].len = len;
-
- skb_walk_frags(skb, iter) {
- if (n == ARRAY_SIZE(buf))
- goto unmap;
-
- addr = dma_map_single(dev->dev, iter->data, iter->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
- goto unmap;
-
- buf[n].addr = addr;
- buf[n++].len = iter->len;
- }
-
- if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
- goto unmap;
-
- return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
-
-unmap:
- ret = -ENOMEM;
- for (n--; n > 0; n--)
- dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
- DMA_TO_DEVICE);
-
-free:
- e.skb = skb;
- e.txwi = t;
- dev->drv->tx_complete_skb(dev, q, &e, true);
- mt76_put_txwi(dev, t);
- return ret;
-}
-EXPORT_SYMBOL_GPL(mt76_tx_queue_skb);
-
void
mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
@@ -185,7 +111,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
q = &dev->q_tx[qid];
spin_lock_bh(&q->lock);
- mt76_tx_queue_skb(dev, q, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8)
@@ -241,7 +167,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
info->flags |= IEEE80211_TX_STATUS_EOSP;
mt76_skb_set_moredata(skb, !last);
- mt76_tx_queue_skb(dev, hwq, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
}
void
@@ -321,7 +247,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
if (idx < 0)
return idx;
@@ -356,7 +282,8 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (cur_ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
+ txq->sta);
if (idx < 0)
return idx;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
new file mode 100644
index 000000000000..7780b07543bb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+#include "usb_trace.h"
+#include "dma.h"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+/* should be called with usb_ctrl_mtx locked */
+static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned int pipe;
+ int i, ret;
+
+ pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
+ : usb_sndctrlpipe(udev, 0);
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ if (test_bit(MT76_REMOVED, &dev->state))
+ return -EIO;
+
+ ret = usb_control_msg(udev, pipe, req, req_type, val,
+ offset, buf, len, MT_VEND_REQ_TOUT_MS);
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->state);
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+ usleep_range(5000, 10000);
+ }
+
+ dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+ return ret;
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len)
+{
+ int ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = __mt76u_vendor_request(dev, req, req_type,
+ val, offset, buf, len);
+ trace_usb_reg_wr(dev, offset, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76u_vendor_request);
+
+/* should be called with usb_ctrl_mtx locked */
+static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u32 data = ~0;
+ u16 offset;
+ int ret;
+ u8 req;
+
+ switch (addr & MT_VEND_TYPE_MASK) {
+ case MT_VEND_TYPE_EEPROM:
+ req = MT_VEND_READ_EEPROM;
+ break;
+ case MT_VEND_TYPE_CFG:
+ req = MT_VEND_READ_CFG;
+ break;
+ default:
+ req = MT_VEND_MULTI_READ;
+ break;
+ }
+ offset = addr & ~MT_VEND_TYPE_MASK;
+
+ ret = __mt76u_vendor_request(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ 0, offset, usb->data, sizeof(__le32));
+ if (ret == sizeof(__le32))
+ data = get_unaligned_le32(usb->data);
+ trace_usb_reg_rr(dev, addr, data);
+
+ return data;
+}
+
+u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = __mt76u_rr(dev, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+/* should be called with usb_ctrl_mtx locked */
+static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u16 offset;
+ u8 req;
+
+ switch (addr & MT_VEND_TYPE_MASK) {
+ case MT_VEND_TYPE_CFG:
+ req = MT_VEND_WRITE_CFG;
+ break;
+ default:
+ req = MT_VEND_MULTI_WRITE;
+ break;
+ }
+ offset = addr & ~MT_VEND_TYPE_MASK;
+
+ put_unaligned_le32(val, usb->data);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR, 0,
+ offset, usb->data, sizeof(__le32));
+ trace_usb_reg_wr(dev, addr, val);
+}
+
+void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ __mt76u_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= __mt76u_rr(dev, addr) & ~mask;
+ __mt76u_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static void mt76u_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ const u32 *val = data;
+ int i, ret;
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ for (i = 0; i < (len / 4); i++) {
+ put_unaligned_le32(val[i], usb->data);
+ ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0, offset + i * 4, usb->data,
+ sizeof(__le32));
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ val & 0xffff, offset, NULL, 0);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ val >> 16, offset + 2, NULL, 0);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+EXPORT_SYMBOL_GPL(mt76u_single_wr);
+
+static int
+mt76u_set_endpoints(struct usb_interface *intf,
+ struct mt76_usb *usb)
+{
+ struct usb_host_interface *intf_desc = intf->cur_altsetting;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i, in_ep = 0, out_ep = 0;
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ in_ep < __MT_EP_IN_MAX) {
+ usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
+ usb->in_max_packet = usb_endpoint_maxp(ep_desc);
+ in_ep++;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ out_ep < __MT_EP_OUT_MAX) {
+ usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
+ usb->out_max_packet = usb_endpoint_maxp(ep_desc);
+ out_ep++;
+ }
+ }
+
+ if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
+ return -EINVAL;
+ return 0;
+}
+
+static int
+mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
+ int nsgs, int len, int sglen)
+{
+ struct urb *urb = buf->urb;
+ int i;
+
+ for (i = 0; i < nsgs; i++) {
+ struct page *page;
+ void *data;
+ int offset;
+
+ data = netdev_alloc_frag(len);
+ if (!data)
+ break;
+
+ page = virt_to_head_page(data);
+ offset = data - page_address(page);
+ sg_set_page(&urb->sg[i], page, sglen, offset);
+ }
+
+ if (i < nsgs) {
+ int j;
+
+ for (j = nsgs; j < urb->num_sgs; j++)
+ skb_free_frag(sg_virt(&urb->sg[j]));
+ urb->num_sgs = i;
+ }
+
+ urb->num_sgs = max_t(int, i, urb->num_sgs);
+ buf->len = urb->num_sgs * sglen,
+ sg_init_marker(urb->sg, urb->num_sgs);
+
+ return i ? : -ENOMEM;
+}
+
+int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
+ int nsgs, int len, int sglen, gfp_t gfp)
+{
+ buf->urb = usb_alloc_urb(0, gfp);
+ if (!buf->urb)
+ return -ENOMEM;
+
+ buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
+ gfp);
+ if (!buf->urb->sg)
+ return -ENOMEM;
+
+ sg_init_table(buf->urb->sg, nsgs);
+ buf->dev = dev;
+
+ return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
+}
+EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
+
+void mt76u_buf_free(struct mt76u_buf *buf)
+{
+ struct urb *urb = buf->urb;
+ int i;
+
+ for (i = 0; i < urb->num_sgs; i++)
+ skb_free_frag(sg_virt(&urb->sg[i]));
+ usb_free_urb(buf->urb);
+}
+EXPORT_SYMBOL_GPL(mt76u_buf_free);
+
+int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
+ struct mt76u_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned int pipe;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
+ else
+ pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
+
+ usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
+ complete_fn, context);
+
+ return usb_submit_urb(buf->urb, gfp);
+}
+EXPORT_SYMBOL_GPL(mt76u_submit_buf);
+
+static inline struct mt76u_buf
+*mt76u_get_next_rx_entry(struct mt76_queue *q)
+{
+ struct mt76u_buf *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (q->queued > 0) {
+ buf = &q->entry[q->head].ubuf;
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued--;
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return buf;
+}
+
+static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
+{
+ u16 dma_len, min_len;
+
+ dma_len = get_unaligned_le16(data);
+ min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
+ MT_FCE_INFO_LEN;
+
+ if (data_len < min_len || WARN_ON(!dma_len) ||
+ WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
+ WARN_ON(dma_len & 0x3))
+ return -EINVAL;
+ return dma_len;
+}
+
+static int
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ u8 *data = sg_virt(&urb->sg[0]);
+ int data_len, len, nsgs = 1;
+ struct sk_buff *skb;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
+ return 0;
+
+ len = mt76u_get_rx_entry_len(data, urb->actual_length);
+ if (len < 0)
+ return 0;
+
+ skb = build_skb(data, q->buf_size);
+ if (!skb)
+ return 0;
+
+ data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ if (skb->tail + data_len > skb->end) {
+ dev_kfree_skb(skb);
+ return 1;
+ }
+
+ __skb_put(skb, data_len);
+ len -= data_len;
+
+ while (len > 0) {
+ data_len = min_t(int, len, urb->sg[nsgs].length);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ sg_page(&urb->sg[nsgs]),
+ urb->sg[nsgs].offset,
+ data_len, q->buf_size);
+ len -= data_len;
+ nsgs++;
+ }
+ dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
+
+ return nsgs;
+}
+
+static void mt76u_complete_rx(struct urb *urb)
+{
+ struct mt76_dev *dev = urb->context;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ unsigned long flags;
+
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
+ /* fall through */
+ case 0:
+ break;
+ }
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
+ goto out;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued++;
+ tasklet_schedule(&dev->usb.rx_tasklet);
+out:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static void mt76u_rx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int err, nsgs, buf_len = q->buf_size;
+ struct mt76u_buf *buf;
+
+ rcu_read_lock();
+
+ while (true) {
+ buf = mt76u_get_next_rx_entry(q);
+ if (!buf)
+ break;
+
+ nsgs = mt76u_process_rx_entry(dev, buf->urb);
+ if (nsgs > 0) {
+ err = mt76u_fill_rx_sg(dev, buf, nsgs,
+ buf_len,
+ SKB_WITH_OVERHEAD(buf_len));
+ if (err < 0)
+ break;
+ }
+ mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
+ buf, GFP_ATOMIC,
+ mt76u_complete_rx, dev);
+ }
+ mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+
+ rcu_read_unlock();
+}
+
+int mt76u_submit_rx_buffers(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ unsigned long flags;
+ int i, err = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+ for (i = 0; i < q->ndesc; i++) {
+ err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
+ &q->entry[i].ubuf, GFP_ATOMIC,
+ mt76u_complete_rx, dev);
+ if (err < 0)
+ break;
+ }
+ q->head = q->tail = 0;
+ q->queued = 0;
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
+
+static int mt76u_alloc_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i, err, nsgs;
+
+ spin_lock_init(&q->lock);
+ q->entry = devm_kzalloc(dev->dev,
+ MT_NUM_RX_ENTRIES * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ if (mt76u_check_sg(dev)) {
+ q->buf_size = MT_RX_BUF_SIZE;
+ nsgs = MT_SG_MAX_SIZE;
+ } else {
+ q->buf_size = PAGE_SIZE;
+ nsgs = 1;
+ }
+
+ for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
+ err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
+ nsgs, q->buf_size,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+ }
+ q->ndesc = MT_NUM_RX_ENTRIES;
+
+ return mt76u_submit_rx_buffers(dev);
+}
+
+static void mt76u_free_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ mt76u_buf_free(&q->entry[i].ubuf);
+}
+
+static void mt76u_stop_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ usb_kill_urb(q->entry[i].ubuf.urb);
+}
+
+int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
+{
+ struct sk_buff *iter, *last = skb;
+ u32 info, pad;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+ info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ skb_walk_frags(skb, iter) {
+ last = iter;
+ if (!iter->next) {
+ skb->data_len += pad;
+ skb->len += pad;
+ break;
+ }
+ }
+
+ if (unlikely(pad)) {
+ if (__skb_pad(last, pad, true))
+ return -ENOMEM;
+ __skb_put(last, pad);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
+
+static void mt76u_tx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76u_buf *buf;
+ struct mt76_queue *q;
+ bool wake;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+
+ spin_lock_bh(&q->lock);
+ while (true) {
+ buf = &q->entry[q->head].ubuf;
+ if (!buf->done || !q->queued)
+ break;
+
+ dev->drv->tx_complete_skb(dev, q,
+ &q->entry[q->head],
+ false);
+
+ if (q->entry[q->head].schedule) {
+ q->entry[q->head].schedule = false;
+ q->swq_queued--;
+ }
+
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued--;
+ }
+ mt76_txq_schedule(dev, q);
+ wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+ if (!q->queued)
+ wake_up(&dev->tx_wait);
+
+ spin_unlock_bh(&q->lock);
+
+ if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
+ ieee80211_queue_delayed_work(dev->hw,
+ &dev->usb.stat_work,
+ msecs_to_jiffies(10));
+
+ if (wake)
+ ieee80211_wake_queue(dev->hw, i);
+ }
+}
+
+static void mt76u_tx_status_data(struct work_struct *work)
+{
+ struct mt76_usb *usb;
+ struct mt76_dev *dev;
+ u8 update = 1;
+ u16 count = 0;
+
+ usb = container_of(work, struct mt76_usb, stat_work.work);
+ dev = container_of(usb, struct mt76_dev, usb);
+
+ while (true) {
+ if (test_bit(MT76_REMOVED, &dev->state))
+ break;
+
+ if (!dev->drv->tx_status_data(dev, &update))
+ break;
+ count++;
+ }
+
+ if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
+ ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
+ msecs_to_jiffies(10));
+ else
+ clear_bit(MT76_READING_STATS, &dev->state);
+}
+
+static void mt76u_complete_tx(struct urb *urb)
+{
+ struct mt76u_buf *buf = urb->context;
+ struct mt76_dev *dev = buf->dev;
+
+ if (mt76u_urb_error(urb))
+ dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
+ buf->done = true;
+
+ tasklet_schedule(&dev->usb.tx_tasklet);
+}
+
+static int
+mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
+{
+ int nsgs = 1 + skb_shinfo(skb)->nr_frags;
+ struct sk_buff *iter;
+
+ skb_walk_frags(skb, iter)
+ nsgs += 1 + skb_shinfo(iter)->nr_frags;
+
+ memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
+
+ nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
+ sg_init_marker(urb->sg, nsgs);
+ urb->num_sgs = nsgs;
+
+ return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
+}
+
+static int
+mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ u8 ep = q2ep(q->hw_idx);
+ struct mt76u_buf *buf;
+ u16 idx = q->tail;
+ unsigned int pipe;
+ int err;
+
+ if (q->queued == q->ndesc)
+ return -ENOSPC;
+
+ err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
+ if (err < 0)
+ return err;
+
+ buf = &q->entry[idx].ubuf;
+ buf->done = false;
+
+ err = mt76u_tx_build_sg(skb, buf->urb);
+ if (err < 0)
+ return err;
+
+ pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
+ usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
+ mt76u_complete_tx, buf);
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->entry[idx].skb = skb;
+ q->queued++;
+
+ return idx;
+}
+
+static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct mt76u_buf *buf;
+ int err;
+
+ while (q->first != q->tail) {
+ buf = &q->entry[q->first].ubuf;
+ err = usb_submit_urb(buf->urb, GFP_ATOMIC);
+ if (err < 0) {
+ if (err == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->state);
+ else
+ dev_err(dev->dev, "tx urb submit failed:%d\n",
+ err);
+ break;
+ }
+ q->first = (q->first + 1) % q->ndesc;
+ }
+}
+
+static int mt76u_alloc_tx(struct mt76_dev *dev)
+{
+ struct mt76u_buf *buf;
+ struct mt76_queue *q;
+ size_t size;
+ int i, j;
+
+ size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->swq);
+ q->hw_idx = q2hwq(i);
+
+ q->entry = devm_kzalloc(dev->dev,
+ MT_NUM_TX_ENTRIES * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->ndesc = MT_NUM_TX_ENTRIES;
+ for (j = 0; j < q->ndesc; j++) {
+ buf = &q->entry[j].ubuf;
+ buf->dev = dev;
+
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!buf->urb)
+ return -ENOMEM;
+
+ buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+ if (!buf->urb->sg)
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static void mt76u_free_tx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ for (j = 0; j < q->ndesc; j++)
+ usb_free_urb(q->entry[j].ubuf.urb);
+ }
+}
+
+static void mt76u_stop_tx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ for (j = 0; j < q->ndesc; j++)
+ usb_kill_urb(q->entry[j].ubuf.urb);
+ }
+}
+
+void mt76u_stop_queues(struct mt76_dev *dev)
+{
+ tasklet_disable(&dev->usb.rx_tasklet);
+ tasklet_disable(&dev->usb.tx_tasklet);
+
+ mt76u_stop_rx(dev);
+ mt76u_stop_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_queues);
+
+void mt76u_stop_stat_wk(struct mt76_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->usb.stat_work);
+ clear_bit(MT76_READING_STATS, &dev->state);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
+
+void mt76u_queues_deinit(struct mt76_dev *dev)
+{
+ mt76u_stop_queues(dev);
+
+ mt76u_free_rx(dev);
+ mt76u_free_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
+
+int mt76u_alloc_queues(struct mt76_dev *dev)
+{
+ int err;
+
+ err = mt76u_alloc_rx(dev);
+ if (err < 0)
+ goto err;
+
+ err = mt76u_alloc_tx(dev);
+ if (err < 0)
+ goto err;
+
+ return 0;
+err:
+ mt76u_queues_deinit(dev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
+
+static const struct mt76_queue_ops usb_queue_ops = {
+ .tx_queue_skb = mt76u_tx_queue_skb,
+ .kick = mt76u_tx_kick,
+};
+
+int mt76u_init(struct mt76_dev *dev,
+ struct usb_interface *intf)
+{
+ static const struct mt76_bus_ops mt76u_ops = {
+ .rr = mt76u_rr,
+ .wr = mt76u_wr,
+ .rmw = mt76u_rmw,
+ .copy = mt76u_copy,
+ };
+ struct mt76_usb *usb = &dev->usb;
+
+ tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
+ tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
+ INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
+ skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
+
+ init_completion(&usb->mcu.cmpl);
+ mutex_init(&usb->mcu.mutex);
+
+ mutex_init(&usb->usb_ctrl_mtx);
+ dev->bus = &mt76u_ops;
+ dev->queue_ops = &usb_queue_ops;
+
+ return mt76u_set_endpoints(intf, usb);
+}
+EXPORT_SYMBOL_GPL(mt76u_init);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
new file mode 100644
index 000000000000..070be803d463
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76.h"
+#include "dma.h"
+
+#define MT_CMD_HDR_LEN 4
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, MT_CMD_HDR_LEN);
+ skb_put_data(skb, data, len);
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_msg_alloc);
+
+void mt76u_mcu_complete_urb(struct urb *urb)
+{
+ struct completion *cmpl = urb->context;
+
+ complete(cmpl);
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb);
+
+static int mt76u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
+{
+ struct mt76_usb *usb = &dev->usb;
+ struct mt76u_buf *buf = &usb->mcu.res;
+ int i, ret;
+ u32 rxfce;
+
+ for (i = 0; i < 5; i++) {
+ if (!wait_for_completion_timeout(&usb->mcu.cmpl,
+ msecs_to_jiffies(300)))
+ continue;
+
+ if (buf->urb->status)
+ return -EIO;
+
+ rxfce = get_unaligned_le32(sg_virt(&buf->urb->sg[0]));
+ ret = mt76u_submit_buf(dev, USB_DIR_IN,
+ MT_EP_IN_CMD_RESP,
+ buf, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (ret)
+ return ret;
+
+ if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce))
+ return 0;
+
+ dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
+ FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
+ }
+
+ dev_err(dev->dev, "error: %s timed out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76_usb *usb = &dev->usb;
+ unsigned int pipe;
+ int ret, sent;
+ u8 seq = 0;
+ u32 info;
+
+ if (test_bit(MT76_REMOVED, &dev->state))
+ return 0;
+
+ mutex_lock(&usb->mcu.mutex);
+
+ pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
+ if (wait_resp) {
+ seq = ++usb->mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++usb->mcu.msg_seq & 0xf;
+ }
+
+ info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+ FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+ MT_MCU_MSG_TYPE_CMD;
+ ret = mt76u_skb_dma_info(skb, CPU_TX_PORT, info);
+ if (ret)
+ goto out;
+
+ ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
+ if (ret)
+ goto out;
+
+ if (wait_resp)
+ ret = mt76u_mcu_wait_resp(dev, seq);
+
+out:
+ mutex_unlock(&usb->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_send_msg);
+
+void mt76u_mcu_fw_reset(struct mt76_dev *dev)
+{
+ mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x1, 0, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_fw_reset);
+
+static int
+__mt76u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
+ const void *fw_data, int len, u32 dst_addr)
+{
+ u8 *data = sg_virt(&buf->urb->sg[0]);
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ __le32 info;
+ u32 val;
+ int err;
+
+ info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_MCU_MSG_LEN, len) |
+ MT_MCU_MSG_TYPE_CMD);
+
+ memcpy(data, &info, sizeof(info));
+ memcpy(data + sizeof(info), fw_data, len);
+ memset(data + sizeof(info) + len, 0, 4);
+
+ mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ len = roundup(len, 4);
+ mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+
+ buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
+ err = mt76u_submit_buf(dev, USB_DIR_OUT,
+ MT_EP_OUT_INBAND_CMD,
+ buf, GFP_KERNEL,
+ mt76u_mcu_complete_urb, &cmpl);
+ if (err < 0)
+ return err;
+
+ if (!wait_for_completion_timeout(&cmpl,
+ msecs_to_jiffies(1000))) {
+ dev_err(dev->dev, "firmware upload timed out\n");
+ usb_kill_urb(buf->urb);
+ return -ETIMEDOUT;
+ }
+
+ if (mt76u_urb_error(buf->urb)) {
+ dev_err(dev->dev, "firmware upload failed: %d\n",
+ buf->urb->status);
+ return buf->urb->status;
+ }
+
+ val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ return 0;
+}
+
+int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+ int data_len, u32 max_payload, u32 offset)
+{
+ int err, len, pos = 0, max_len = max_payload - 8;
+ struct mt76u_buf buf;
+
+ err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ while (data_len > 0) {
+ len = min_t(int, data_len, max_len);
+ err = __mt76u_mcu_fw_send_data(dev, &buf, data + pos,
+ len, offset + pos);
+ if (err < 0)
+ break;
+
+ data_len -= len;
+ pos += len;
+ usleep_range(5000, 10000);
+ }
+ mt76u_buf_free(&buf);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_fw_send_data);
+
+int mt76u_mcu_init_rx(struct mt76_dev *dev)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int err;
+
+ err = mt76u_buf_alloc(dev, &usb->mcu.res, 1,
+ MCU_RESP_URB_SIZE, MCU_RESP_URB_SIZE,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &usb->mcu.res, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (err < 0)
+ mt76u_buf_free(&usb->mcu.res);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.c b/drivers/net/wireless/mediatek/mt76/usb_trace.c
new file mode 100644
index 000000000000..7e1f540f0b7a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "usb_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.h b/drivers/net/wireless/mediatek/mt76/usb_trace.h
new file mode 100644
index 000000000000..52db7012304a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76_USB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_USB_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76_usb
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT " %04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_rr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_wr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE usb_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index d3b611aaf061..faea99b7a445 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -603,6 +603,7 @@ int mt7601u_register_device(struct mt7601u_dev *dev)
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 7b21016012c3..0f1789020960 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -308,6 +308,17 @@ mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int idx = key->keyidx;
int ret;
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (cmd == SET_KEY) {
key->hw_key_idx = wcid->idx;
wcid->hw_key_idx = idx;
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index 9d2f9a776ef1..b804abd464ae 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -986,13 +986,15 @@ static void mt7601u_agc_tune(struct mt7601u_dev *dev)
*/
spin_lock_bh(&dev->con_mon_lock);
avg_rssi = ewma_rssi_read(&dev->avg_rssi);
- WARN_ON_ONCE(avg_rssi == 0);
+ spin_unlock_bh(&dev->con_mon_lock);
+ if (avg_rssi == 0)
+ return;
+
avg_rssi = -avg_rssi;
if (avg_rssi <= -70)
val -= 0x20;
else if (avg_rssi <= -60)
val -= 0x10;
- spin_unlock_bh(&dev->con_mon_lock);
if (val != mt7601u_bbp_rr(dev, 66))
mt7601u_bbp_wr(dev, 66, val);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Kconfig b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
index 025fa6018550..8d1492a90bd1 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/Kconfig
+++ b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
@@ -7,7 +7,7 @@ config QTNFMAC
config QTNFMAC_PEARL_PCIE
tristate "Quantenna QSR10g PCIe support"
default n
- depends on HAS_DMA && PCI && CFG80211
+ depends on PCI && CFG80211
select QTNFMAC
select FW_LOADER
select CRC32
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 220e2b710208..4aa332f4646b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -654,8 +654,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
- ret = -EFAULT;
- goto out;
+ return -EFAULT;
}
if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
@@ -844,6 +843,88 @@ static int qtnf_set_mac_acl(struct wiphy *wiphy,
return ret;
}
+static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, int timeout)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY :
+ QLINK_PM_OFF, timeout);
+ if (ret) {
+ pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_vif *vif;
+ int ret = 0;
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (!wowlan) {
+ pr_debug("WoWLAN triggers are not enabled\n");
+ qtnf_virtual_intf_cleanup(vif->netdev);
+ goto exit;
+ }
+
+ qtnf_scan_done(vif->mac, true);
+
+ ret = qtnf_cmd_send_wowlan_set(vif, wowlan);
+ if (ret) {
+ pr_err("MAC%u: failed to set WoWLAN triggers\n",
+ mac->macid);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int qtnf_resume(struct wiphy *wiphy)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_vif *vif;
+ int ret = 0;
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ ret = qtnf_cmd_send_wowlan_set(vif, NULL);
+ if (ret) {
+ pr_err("MAC%u: failed to reset WoWLAN triggers\n",
+ mac->macid);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static void qtnf_set_wakeup(struct wiphy *wiphy, bool enabled)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_bus *bus = mac->bus;
+
+ device_set_wakeup_enable(bus->dev, enabled);
+}
+#endif
+
static struct cfg80211_ops qtn_cfg80211_ops = {
.add_virtual_intf = qtnf_add_virtual_intf,
.change_virtual_intf = qtnf_change_virtual_intf,
@@ -870,6 +951,12 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.channel_switch = qtnf_channel_switch,
.start_radar_detection = qtnf_start_radar_detection,
.set_mac_acl = qtnf_set_mac_acl,
+ .set_power_mgmt = qtnf_set_power_mgmt,
+#ifdef CONFIG_PM
+ .suspend = qtnf_suspend,
+ .resume = qtnf_resume,
+ .set_wakeup = qtnf_set_wakeup,
+#endif
};
static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
@@ -922,6 +1009,9 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
qtn_cfg80211_ops.start_radar_detection = NULL;
+ if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT))
+ qtn_cfg80211_ops.set_power_mgmt = NULL;
+
wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac));
if (!wiphy)
return NULL;
@@ -976,7 +1066,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->retry_long = macinfo->lretry_limit;
wiphy->coverage_class = macinfo->coverage_class;
- wiphy->max_scan_ssids = QTNF_MAX_SSID_LIST_LENGTH;
+ wiphy->max_scan_ssids =
+ (hw_info->max_scan_ssids) ? hw_info->max_scan_ssids : 1;
wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
wiphy->mgmt_stypes = qtnf_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
@@ -995,6 +1086,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
@@ -1014,6 +1106,14 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
if (hw_info->hw_capab & QLINK_HW_CAPAB_STA_INACT_TIMEOUT)
wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
+ if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
+ wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+
+#ifdef CONFIG_PM
+ if (macinfo->wowlan)
+ wiphy->wowlan = macinfo->wowlan;
+#endif
+
if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
REGULATORY_CUSTOM_REG;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index c5d94a95e21a..ae9e77300533 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -640,83 +640,83 @@ qtnf_cmd_sta_info_parse(struct station_info *sinfo,
return;
if (qtnf_sta_stat_avail(inactive_time, QLINK_STA_INFO_INACTIVE_TIME)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME);
sinfo->inactive_time = le32_to_cpu(stats->inactive_time);
}
if (qtnf_sta_stat_avail(connected_time,
QLINK_STA_INFO_CONNECTED_TIME)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME);
sinfo->connected_time = le32_to_cpu(stats->connected_time);
}
if (qtnf_sta_stat_avail(signal, QLINK_STA_INFO_SIGNAL)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = stats->signal - QLINK_RSSI_OFFSET;
}
if (qtnf_sta_stat_avail(signal_avg, QLINK_STA_INFO_SIGNAL_AVG)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
sinfo->signal_avg = stats->signal_avg - QLINK_RSSI_OFFSET;
}
if (qtnf_sta_stat_avail(rxrate, QLINK_STA_INFO_RX_BITRATE)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
qtnf_sta_info_parse_rate(&sinfo->rxrate, &stats->rxrate);
}
if (qtnf_sta_stat_avail(txrate, QLINK_STA_INFO_TX_BITRATE)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
qtnf_sta_info_parse_rate(&sinfo->txrate, &stats->txrate);
}
if (qtnf_sta_stat_avail(sta_flags, QLINK_STA_INFO_STA_FLAGS)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_STA_FLAGS);
qtnf_sta_info_parse_flags(&sinfo->sta_flags, &stats->sta_flags);
}
if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
}
if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES);
sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
}
if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES64)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
}
if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES64)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
}
if (qtnf_sta_stat_avail(rx_packets, QLINK_STA_INFO_RX_PACKETS)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
sinfo->rx_packets = le32_to_cpu(stats->rx_packets);
}
if (qtnf_sta_stat_avail(tx_packets, QLINK_STA_INFO_TX_PACKETS)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
sinfo->tx_packets = le32_to_cpu(stats->tx_packets);
}
if (qtnf_sta_stat_avail(rx_beacon, QLINK_STA_INFO_BEACON_RX)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
sinfo->rx_beacon = le64_to_cpu(stats->rx_beacon);
}
if (qtnf_sta_stat_avail(rx_dropped_misc, QLINK_STA_INFO_RX_DROP_MISC)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
sinfo->rx_dropped_misc = le32_to_cpu(stats->rx_dropped_misc);
}
if (qtnf_sta_stat_avail(tx_failed, QLINK_STA_INFO_TX_FAILED)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->tx_failed = le32_to_cpu(stats->tx_failed);
}
@@ -1092,6 +1092,9 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
case QTN_TLV_ID_UBOOT_VER:
uboot_ver = (const void *)tlv->val;
break;
+ case QTN_TLV_ID_MAX_SCAN_SSIDS:
+ hwinfo->max_scan_ssids = *tlv->val;
+ break;
default:
break;
}
@@ -1135,6 +1138,37 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
return 0;
}
+static void
+qtnf_parse_wowlan_info(struct qtnf_wmac *mac,
+ const struct qlink_wowlan_capab_data *wowlan)
+{
+ struct qtnf_mac_info *mac_info = &mac->macinfo;
+ const struct qlink_wowlan_support *data1;
+ struct wiphy_wowlan_support *supp;
+
+ supp = kzalloc(sizeof(*supp), GFP_KERNEL);
+ if (!supp)
+ return;
+
+ switch (le16_to_cpu(wowlan->version)) {
+ case 0x1:
+ data1 = (struct qlink_wowlan_support *)wowlan->data;
+
+ supp->flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT;
+ supp->n_patterns = le32_to_cpu(data1->n_patterns);
+ supp->pattern_max_len = le32_to_cpu(data1->pattern_max_len);
+ supp->pattern_min_len = le32_to_cpu(data1->pattern_min_len);
+
+ mac_info->wowlan = supp;
+ break;
+ default:
+ pr_warn("MAC%u: unsupported WoWLAN version 0x%x\n",
+ mac->macid, le16_to_cpu(wowlan->version));
+ kfree(supp);
+ break;
+ }
+}
+
static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const u8 *tlv_buf, size_t tlv_buf_size)
{
@@ -1144,6 +1178,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const struct qlink_iface_comb_num *comb_num;
const struct qlink_iface_limit_record *rec;
const struct qlink_iface_limit *lim;
+ const struct qlink_wowlan_capab_data *wowlan;
u16 rec_len;
u16 tlv_type;
u16 tlv_value_len;
@@ -1252,7 +1287,31 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
ext_capa_mask = (u8 *)tlv->val;
ext_capa_mask_len = tlv_value_len;
break;
+ case QTN_TLV_ID_WOWLAN_CAPAB:
+ if (tlv_value_len < sizeof(*wowlan))
+ return -EINVAL;
+
+ wowlan = (void *)tlv->val;
+ if (!le16_to_cpu(wowlan->len)) {
+ pr_warn("MAC%u: skip empty WoWLAN data\n",
+ mac->macid);
+ break;
+ }
+
+ rec_len = sizeof(*wowlan) + le16_to_cpu(wowlan->len);
+ if (unlikely(tlv_value_len != rec_len)) {
+ pr_warn("MAC%u: WoWLAN data size mismatch\n",
+ mac->macid);
+ return -EINVAL;
+ }
+
+ kfree(mac->macinfo.wowlan);
+ mac->macinfo.wowlan = NULL;
+ qtnf_parse_wowlan_info(mac, wowlan);
+ break;
default:
+ pr_warn("MAC%u: unknown TLV type %u\n",
+ mac->macid, tlv_type);
break;
}
@@ -2234,6 +2293,22 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
qchan->chan.flags = cpu_to_le32(flags);
}
+static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
+ const u8 *mac_addr,
+ const u8 *mac_addr_mask)
+{
+ struct qlink_random_mac_addr *randmac;
+ struct qlink_tlv_hdr *hdr =
+ skb_put(cmd_skb, sizeof(*hdr) + sizeof(*randmac));
+
+ hdr->type = cpu_to_le16(QTN_TLV_ID_RANDOM_MAC_ADDR);
+ hdr->len = cpu_to_le16(sizeof(*randmac));
+ randmac = (struct qlink_random_mac_addr *)hdr->val;
+
+ memcpy(randmac->mac_addr, mac_addr, ETH_ALEN);
+ memcpy(randmac->mac_addr_mask, mac_addr_mask, ETH_ALEN);
+}
+
int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb;
@@ -2244,11 +2319,6 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
int count = 0;
int ret;
- if (scan_req->n_ssids > QTNF_MAX_SSID_LIST_LENGTH) {
- pr_err("MAC%u: too many SSIDs in scan request\n", mac->macid);
- return -EINVAL;
- }
-
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_SCAN,
sizeof(struct qlink_cmd));
@@ -2291,6 +2361,15 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
}
}
+ if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ pr_debug("MAC%u: scan with random addr=%pM, mask=%pM\n",
+ mac->macid,
+ scan_req->mac_addr, scan_req->mac_addr_mask);
+
+ qtnf_cmd_randmac_tlv_add(cmd_skb, scan_req->mac_addr,
+ scan_req->mac_addr_mask);
+ }
+
ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
if (unlikely(ret))
@@ -2774,3 +2853,93 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
return ret;
}
+
+int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ struct qlink_cmd_pm_set *cmd;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_PM_SET, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_pm_set *)cmd_skb->data;
+ cmd->pm_mode = pm_mode;
+ cmd->pm_standby_timer = cpu_to_le32(timeout);
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("cmd exec failed: 0x%.4X\n", res_code);
+ ret = -EFAULT;
+ }
+
+out:
+ qtnf_bus_unlock(bus);
+ return ret;
+}
+
+int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
+ const struct cfg80211_wowlan *wowl)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ struct qlink_cmd_wowlan_set *cmd;
+ u32 triggers = 0;
+ int count = 0;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_WOWLAN_SET, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ qtnf_bus_lock(bus);
+
+ cmd = (struct qlink_cmd_wowlan_set *)cmd_skb->data;
+
+ if (wowl) {
+ if (wowl->disconnect)
+ triggers |= QLINK_WOWLAN_TRIG_DISCONNECT;
+
+ if (wowl->magic_pkt)
+ triggers |= QLINK_WOWLAN_TRIG_MAGIC_PKT;
+
+ if (wowl->n_patterns && wowl->patterns) {
+ triggers |= QLINK_WOWLAN_TRIG_PATTERN_PKT;
+ while (count < wowl->n_patterns) {
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb,
+ QTN_TLV_ID_WOWLAN_PATTERN,
+ wowl->patterns[count].pattern,
+ wowl->patterns[count].pattern_len);
+ count++;
+ }
+ }
+ }
+
+ cmd->triggers = cpu_to_le32(triggers);
+
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("cmd exec failed: 0x%.4X\n", res_code);
+ ret = -EFAULT;
+ }
+
+out:
+ qtnf_bus_unlock(bus);
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index cf9274add26d..1ac41156c192 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -76,5 +76,8 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
u32 cac_time_ms);
int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
const struct cfg80211_acl_data *params);
+int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout);
+int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
+ const struct cfg80211_wowlan *wowl);
#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index a6a450984f9a..19abbc4e23e0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -179,6 +179,30 @@ static void qtnf_netdev_tx_timeout(struct net_device *ndev)
}
}
+static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ struct sockaddr *sa = addr;
+ int ret;
+ unsigned char old_addr[ETH_ALEN];
+
+ memcpy(old_addr, sa->sa_data, sizeof(old_addr));
+
+ ret = eth_mac_addr(ndev, sa);
+ if (ret)
+ return ret;
+
+ qtnf_scan_done(vif->mac, true);
+
+ ret = qtnf_cmd_send_change_intf_type(vif, vif->wdev.iftype,
+ sa->sa_data);
+
+ if (ret)
+ memcpy(ndev->dev_addr, old_addr, ETH_ALEN);
+
+ return ret;
+}
+
/* Network device ops handlers */
const struct net_device_ops qtnf_netdev_ops = {
.ndo_open = qtnf_netdev_open,
@@ -186,6 +210,7 @@ const struct net_device_ops qtnf_netdev_ops = {
.ndo_start_xmit = qtnf_netdev_hard_start_xmit,
.ndo_tx_timeout = qtnf_netdev_tx_timeout,
.ndo_get_stats64 = qtnf_netdev_get_stats64,
+ .ndo_set_mac_address = qtnf_netdev_set_mac_address,
};
static int qtnf_mac_init_single_band(struct wiphy *wiphy,
@@ -470,6 +495,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
qtnf_mac_iface_comb_free(mac);
kfree(mac->macinfo.extended_capabilities);
kfree(mac->macinfo.extended_capabilities_mask);
+ kfree(mac->macinfo.wowlan);
wiphy_free(wiphy);
bus->mac[macid] = NULL;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 214435448335..a1e338a1f055 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -40,7 +40,6 @@
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
-#define QTNF_MAX_SSID_LIST_LENGTH 2
#define QTNF_MAX_VSIE_LEN 255
#define QTNF_MAX_INTF 8
#define QTNF_MAX_EVENT_QUEUE_LEN 255
@@ -111,6 +110,7 @@ struct qtnf_mac_info {
u8 *extended_capabilities;
u8 *extended_capabilities_mask;
u8 extended_capabilities_len;
+ struct wiphy_wowlan_support *wowlan;
};
struct qtnf_chan_stats {
@@ -145,6 +145,7 @@ struct qtnf_hw_info {
u8 total_rx_chain;
char fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
+ u8 max_scan_ssids;
};
struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index f85deda703fb..99d37e3efba6 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -69,11 +69,15 @@ struct qlink_msg_header {
* associated STAs due to inactivity. Inactivity timeout period is taken
* from QLINK_CMD_START_AP parameters.
* @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality
+ * @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address
+ * Randomization in probe requests.
*/
enum qlink_hw_capab {
QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1),
QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2),
+ QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3),
+ QLINK_HW_CAPAB_PWR_MGMT = BIT(4),
};
enum qlink_iface_type {
@@ -253,6 +257,8 @@ enum qlink_cmd_type {
QLINK_CMD_CHAN_STATS = 0x0054,
QLINK_CMD_CONNECT = 0x0060,
QLINK_CMD_DISCONNECT = 0x0061,
+ QLINK_CMD_PM_SET = 0x0062,
+ QLINK_CMD_WOWLAN_SET = 0x0063,
};
/**
@@ -665,6 +671,54 @@ struct qlink_acl_data {
struct qlink_mac_address mac_addrs[0];
} __packed;
+/**
+ * enum qlink_pm_mode - Power Management mode
+ *
+ * @QLINK_PM_OFF: normal mode, no power saving enabled
+ * @QLINK_PM_AUTO_STANDBY: enable auto power save mode
+ */
+enum qlink_pm_mode {
+ QLINK_PM_OFF = 0,
+ QLINK_PM_AUTO_STANDBY = 1,
+};
+
+/**
+ * struct qlink_cmd_pm_set - data for QLINK_CMD_PM_SET command
+ *
+ * @pm_standby timer: period of network inactivity in seconds before
+ * putting a radio in power save mode
+ * @pm_mode: power management mode
+ */
+struct qlink_cmd_pm_set {
+ struct qlink_cmd chdr;
+ __le32 pm_standby_timer;
+ u8 pm_mode;
+} __packed;
+
+/**
+ * enum qlink_wowlan_trigger
+ *
+ * @QLINK_WOWLAN_TRIG_DISCONNECT: wakeup on disconnect
+ * @QLINK_WOWLAN_TRIG_MAGIC_PKT: wakeup on magic packet
+ * @QLINK_WOWLAN_TRIG_PATTERN_PKT: wakeup on user-defined packet
+ */
+enum qlink_wowlan_trigger {
+ QLINK_WOWLAN_TRIG_DISCONNECT = BIT(0),
+ QLINK_WOWLAN_TRIG_MAGIC_PKT = BIT(1),
+ QLINK_WOWLAN_TRIG_PATTERN_PKT = BIT(2),
+};
+
+/**
+ * struct qlink_cmd_wowlan_set - data for QLINK_CMD_WOWLAN_SET command
+ *
+ * @triggers: requested bitmask of WoWLAN triggers
+ */
+struct qlink_cmd_wowlan_set {
+ struct qlink_cmd chdr;
+ __le32 triggers;
+ u8 data[0];
+} __packed;
+
/* QLINK Command Responses messages related definitions
*/
@@ -1062,6 +1116,8 @@ struct qlink_event_radar {
* @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
* &struct qlink_sta_stats. Valid values are marked as such in a bitmap
* carried by QTN_TLV_ID_STA_STATS_MAP.
+ * @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan
+ * for in any given scan.
*/
enum qlink_tlv_id {
QTN_TLV_ID_FRAG_THRESH = 0x0201,
@@ -1089,6 +1145,10 @@ enum qlink_tlv_id {
QTN_TLV_ID_HW_ID = 0x0405,
QTN_TLV_ID_CALIBRATION_VER = 0x0406,
QTN_TLV_ID_UBOOT_VER = 0x0407,
+ QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408,
+ QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409,
+ QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
+ QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
};
struct qlink_tlv_hdr {
@@ -1360,4 +1420,49 @@ struct qlink_sta_stats {
u8 rsvd[1];
};
+/**
+ * struct qlink_random_mac_addr - data for QTN_TLV_ID_RANDOM_MAC_ADDR TLV
+ *
+ * Specifies MAC address mask/value for generation random MAC address
+ * during scan.
+ *
+ * @mac_addr: MAC address used with randomisation
+ * @mac_addr_mask: MAC address mask used with randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
+ */
+struct qlink_random_mac_addr {
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+} __packed;
+
+/**
+ * struct qlink_wowlan_capab_data - data for QTN_TLV_ID_WOWLAN_CAPAB TLV
+ *
+ * WoWLAN capabilities supported by cards.
+ *
+ * @version: version of WoWLAN data structure, to ensure backward
+ * compatibility for firmwares with limited WoWLAN support
+ * @len: Total length of WoWLAN data
+ * @data: supported WoWLAN features
+ */
+struct qlink_wowlan_capab_data {
+ __le16 version;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+/**
+ * struct qlink_wowlan_support - supported WoWLAN capabilities
+ *
+ * @n_patterns: number of supported wakeup patterns
+ * @pattern_max_len: maximum length of each pattern
+ * @pattern_min_len: minimum length of each pattern
+ */
+struct qlink_wowlan_support {
+ __le32 n_patterns;
+ __le32 pattern_max_len;
+ __le32 pattern_min_len;
+} __packed;
+
#endif /* _QTN_QLINK_H_ */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index c380c1f56ba6..fa2fd64084ac 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -527,24 +527,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
-int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
-
- return rt2x00dev->ops->lib->sta_add(rt2x00dev, vif, sta);
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_sta_add);
-
-int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
-
- return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta);
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
-
void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h b/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h
index bc0ca5f58f38..283e2e607bba 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h
@@ -28,12 +28,6 @@
#include <linux/pci.h>
/*
- * This variable should be used with the
- * pci_driver structure initialization.
- */
-#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
-
-/*
* PCI driver handlers.
*/
int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index a7e0a17aa7e8..08c607c031bc 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -470,7 +470,6 @@ static inline struct rcs __iomem *rcs_base(ray_dev_t *dev)
static int ray_init(struct net_device *dev)
{
int i;
- UCHAR *p;
struct ccs __iomem *pccs;
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
@@ -513,12 +512,9 @@ static int ray_init(struct net_device *dev)
init_startup_params(local);
/* copy mac address to startup parameters */
- if (parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
- p = local->sparm.b4.a_mac_addr;
- } else {
+ if (!parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
memcpy(&local->sparm.b4.a_mac_addr,
&local->startup_res.station_addr, ADDRLEN);
- p = local->sparm.b4.a_mac_addr;
}
clear_interrupt(local); /* Clear any interrupt from the card */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
index fde89866fa8d..51e32df6120b 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
@@ -363,7 +363,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev)
rtl8187se_rf_writereg(dev, 0x00, 0x0037); mdelay(11);
rtl8187se_rf_writereg(dev, 0x04, 0x0160); mdelay(11);
rtl8187se_rf_writereg(dev, 0x07, 0x0080); mdelay(11);
- rtl8187se_rf_writereg(dev, 0x02, 0x088D); mdelay(221);
+ rtl8187se_rf_writereg(dev, 0x02, 0x088D); msleep(221);
rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
rtl8187se_rf_writereg(dev, 0x07, 0x0000); mdelay(1);
rtl8187se_rf_writereg(dev, 0x07, 0x0180); mdelay(1);
@@ -386,7 +386,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev)
rtl8187se_rf_writereg(dev, 0x00, 0x00BF); mdelay(1);
rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
- rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(31);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0975); msleep(31);
rtl8187se_rf_writereg(dev, 0x00, 0x0197); mdelay(1);
rtl8187se_rf_writereg(dev, 0x05, 0x05AB); mdelay(1);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 39c817eddd78..f4122c8fdd97 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -484,18 +484,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
}
-void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
del_timer_sync(&rtlpriv->works.watchdog_timer);
- cancel_delayed_work(&rtlpriv->works.watchdog_wq);
- cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
- cancel_delayed_work(&rtlpriv->works.ps_work);
- cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
- cancel_delayed_work(&rtlpriv->works.fwevt_wq);
- cancel_delayed_work(&rtlpriv->works.c2hcmd_wq);
+ cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq);
+ if (ips_wq)
+ cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+ else
+ cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
+ cancel_delayed_work_sync(&rtlpriv->works.ps_work);
+ cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq);
+ cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq);
+ cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq);
}
EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
@@ -1904,7 +1907,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
reject_agg, ctrl_agg_size, agg_size);
rtlpriv->hw->max_rx_aggregation_subframes =
- (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF);
+ (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT);
}
EXPORT_SYMBOL(rtl_rx_ampdu_apply);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index 912f205779c3..a7ae40eaa3cd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
void rtl_deinit_rfkill(struct ieee80211_hw *hw);
void rtl_watch_dog_timer_callback(struct timer_list *t);
-void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq);
bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index df3facc8e5a4..6597f7cb3411 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -2818,23 +2818,11 @@ static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
@@ -2949,23 +2937,11 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3008,23 +2984,11 @@ static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3071,23 +3035,11 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3121,23 +3073,11 @@ static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3189,23 +3129,11 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3264,23 +3192,11 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
@@ -3336,23 +3252,11 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
@@ -3436,23 +3340,11 @@ static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index cfea57efa7f4..4bf7967590ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -130,7 +130,6 @@ found_alt:
firmware->size);
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
}
- rtlpriv->rtlhal.fwsize = firmware->size;
release_firmware(firmware);
}
@@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
/* reset sec info */
rtl_cam_reset_sec_info(hw);
- rtl_deinit_deferred_work(hw);
+ rtl_deinit_deferred_work(hw, false);
}
rtlpriv->intf_ops->adapter_stop(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index ae13bcfb3bf0..5d1fda16fc8c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -2377,7 +2377,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
ieee80211_unregister_hw(hw);
rtlmac->mac80211_registered = 0;
} else {
- rtl_deinit_deferred_work(hw);
+ rtl_deinit_deferred_work(hw, false);
rtlpriv->intf_ops->adapter_stop(hw);
}
rtlpriv->cfg->ops->disable_interrupt(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 71af24e2e051..479a4cfc245d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -71,7 +71,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
/*<1> Stop all timer */
- rtl_deinit_deferred_work(hw);
+ rtl_deinit_deferred_work(hw, true);
/*<2> Disable Interrupt */
rtlpriv->cfg->ops->disable_interrupt(hw);
@@ -292,7 +292,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
enum rf_pwrstate rtstate;
- cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+ cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
mutex_lock(&rtlpriv->locks.ips_mutex);
if (ppsc->inactiveps) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index f9faffc498bc..2ac5004d7a40 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1132,7 +1132,7 @@ void rtl_usb_disconnect(struct usb_interface *intf)
ieee80211_unregister_hw(hw);
rtlmac->mac80211_registered = 0;
} else {
- rtl_deinit_deferred_work(hw);
+ rtl_deinit_deferred_work(hw, false);
rtlpriv->intf_ops->adapter_stop(hw);
}
/*deinit rfkill */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 9935bd09db1f..51e4e92d95a0 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2480,7 +2480,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len);
if (ret == 0) {
sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
len = sizeof(rssi);
@@ -2488,7 +2488,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
&rssi, &len);
if (ret == 0) {
sinfo->signal = level_to_qual(le32_to_cpu(rssi));
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
}
@@ -2928,6 +2928,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
while (buflen >= sizeof(*auth_req)) {
auth_req = (void *)buf;
+ if (buflen < le32_to_cpu(auth_req->length))
+ return;
type = "unknown";
flags = le32_to_cpu(auth_req->flags);
pairwise_error = false;
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 0761e61591bd..01edf960ff3c 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -26,6 +26,9 @@ static struct ta_metadata metadata_flash_content[] = {
{"flash_content", 0x00010000},
{"rsi/rs9113_wlan_qspi.rps", 0x00010000},
{"rsi/rs9113_wlan_bt_dual_mode.rps", 0x00010000},
+ {"flash_content", 0x00010000},
+ {"rsi/rs9113_ap_bt_dual_mode.rps", 0x00010000},
+
};
int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb)
@@ -54,7 +57,6 @@ int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
struct ieee80211_vif *vif;
struct rsi_mgmt_desc *mgmt_desc;
struct skb_info *tx_params;
- struct ieee80211_bss_conf *bss = NULL;
struct rsi_xtended_desc *xtend_desc = NULL;
u8 header_size;
u32 dword_align_bytes = 0;
@@ -88,7 +90,6 @@ int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
tx_params->internal_hdr_size = header_size;
memset(&skb->data[0], 0, header_size);
- bss = &vif->bss_conf;
wh = (struct ieee80211_hdr *)&skb->data[header_size];
mgmt_desc = (struct rsi_mgmt_desc *)skb->data;
@@ -145,7 +146,6 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
struct ieee80211_hdr *wh = NULL;
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
- struct ieee80211_bss_conf *bss;
struct rsi_data_desc *data_desc;
struct rsi_xtended_desc *xtend_desc;
u8 ieee80211_size = MIN_802_11_HDR_LEN;
@@ -156,7 +156,6 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
vif = info->control.vif;
- bss = &vif->bss_conf;
tx_params = (struct skb_info *)info->driver_data;
header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc);
@@ -246,7 +245,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
}
}
- data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff);
+ data_desc->mac_flags |= cpu_to_le16(seq_num & 0xfff);
data_desc->qid_tid = ((skb->priority & 0xf) |
((tx_params->tid & 0xf) << 4));
data_desc->sta_id = tx_params->sta_id;
@@ -285,7 +284,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
struct ieee80211_bss_conf *bss;
- struct ieee80211_hdr *wh;
int status = -EINVAL;
u8 header_size;
@@ -301,7 +299,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
bss = &vif->bss_conf;
tx_params = (struct skb_info *)info->driver_data;
header_size = tx_params->internal_hdr_size;
- wh = (struct ieee80211_hdr *)&skb->data[header_size];
if (((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
@@ -744,13 +741,11 @@ static int ping_pong_write(struct rsi_hw *adapter, u8 cmd, u8 *addr, u32 size)
static int auto_fw_upgrade(struct rsi_hw *adapter, u8 *flash_content,
u32 content_size)
{
- u8 cmd, *temp_flash_content;
+ u8 cmd;
u32 temp_content_size, num_flash, index;
u32 flash_start_address;
int status;
- temp_flash_content = flash_content;
-
if (content_size > MAX_FLASH_FILE_SIZE) {
rsi_dbg(ERR_ZONE,
"%s: Flash Content size is more than 400K %u\n",
@@ -842,7 +837,6 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
const struct firmware *fw_entry = NULL;
u32 regout_val = 0, content_size;
u16 tmp_regout_val = 0;
- u8 *flash_content = NULL;
struct ta_metadata *metadata_p;
int status;
@@ -904,28 +898,22 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
__func__, metadata_p->name);
return status;
}
- flash_content = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
- if (!flash_content) {
- rsi_dbg(ERR_ZONE, "%s: Failed to copy firmware\n", __func__);
- status = -EIO;
- goto fail;
- }
content_size = fw_entry->size;
rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", content_size);
/* Get the firmware version */
common->lmac_ver.ver.info.fw_ver[0] =
- flash_content[LMAC_VER_OFFSET] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET] & 0xFF;
common->lmac_ver.ver.info.fw_ver[1] =
- flash_content[LMAC_VER_OFFSET + 1] & 0xFF;
- common->lmac_ver.major = flash_content[LMAC_VER_OFFSET + 2] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET + 1] & 0xFF;
+ common->lmac_ver.major = fw_entry->data[LMAC_VER_OFFSET + 2] & 0xFF;
common->lmac_ver.release_num =
- flash_content[LMAC_VER_OFFSET + 3] & 0xFF;
- common->lmac_ver.minor = flash_content[LMAC_VER_OFFSET + 4] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET + 3] & 0xFF;
+ common->lmac_ver.minor = fw_entry->data[LMAC_VER_OFFSET + 4] & 0xFF;
common->lmac_ver.patch_num = 0;
rsi_print_version(common);
- status = bl_write_header(adapter, flash_content, content_size);
+ status = bl_write_header(adapter, (u8 *)fw_entry->data, content_size);
if (status) {
rsi_dbg(ERR_ZONE,
"%s: RPS Image header loading failed\n",
@@ -967,7 +955,7 @@ fw_upgrade:
rsi_dbg(INFO_ZONE, "Burn Command Pass.. Upgrading the firmware\n");
- status = auto_fw_upgrade(adapter, flash_content, content_size);
+ status = auto_fw_upgrade(adapter, (u8 *)fw_entry->data, content_size);
if (status == 0) {
rsi_dbg(ERR_ZONE, "Firmware upgradation Done\n");
goto load_image_cmd;
@@ -981,13 +969,11 @@ fw_upgrade:
success:
rsi_dbg(ERR_ZONE, "***** Firmware Loading successful *****\n");
- kfree(flash_content);
release_firmware(fw_entry);
return 0;
fail:
rsi_dbg(ERR_ZONE, "##### Firmware loading failed #####\n");
- kfree(flash_content);
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 2ca7464b7fa3..4e510cbe0a89 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -416,7 +416,8 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
/* Get free vap index */
for (i = 0; i < RSI_MAX_VIFS; i++) {
- if (!adapter->vifs[i]) {
+ if (!adapter->vifs[i] ||
+ !memcmp(vif->addr, adapter->vifs[i]->addr, ETH_ALEN)) {
vap_idx = i;
break;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 1485a0c89df2..01d99ed985ee 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -122,7 +122,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
u8 extended_desc)
{
struct ieee80211_tx_info *info;
- struct skb_info *rx_params;
struct sk_buff *skb = NULL;
u8 payload_offset;
struct ieee80211_vif *vif;
@@ -149,10 +148,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
vif = rsi_get_vif(common->priv, wh->addr1);
info = IEEE80211_SKB_CB(skb);
- rx_params = (struct skb_info *)info->driver_data;
- rx_params->rssi = rsi_get_rssi(buffer);
- rx_params->channel = rsi_get_connected_channel(vif);
-
return skb;
}
@@ -336,7 +331,6 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
spin_lock_init(&adapter->ps_lock);
timer_setup(&common->roc_timer, rsi_roc_timeout, 0);
init_completion(&common->wlan_init_completion);
- common->init_done = true;
adapter->device_model = RSI_DEV_9113;
common->oper_mode = oper_mode;
@@ -374,6 +368,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
}
#endif
+ common->init_done = true;
return adapter;
err:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index d0e5937cad6d..1095df7d9573 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -334,20 +334,17 @@ static int rsi_load_radio_caps(struct rsi_common *common)
struct ieee80211_conf *conf = &hw->conf;
if (conf_is_ht40_plus(conf)) {
- radio_caps->radio_cfg_info =
- RSI_CMDDESC_LOWER_20_ENABLE;
- radio_caps->radio_info =
- RSI_CMDDESC_LOWER_20_ENABLE;
+ radio_caps->ppe_ack_rate =
+ cpu_to_le16(LOWER_20_ENABLE |
+ (LOWER_20_ENABLE >> 12));
} else if (conf_is_ht40_minus(conf)) {
- radio_caps->radio_cfg_info =
- RSI_CMDDESC_UPPER_20_ENABLE;
- radio_caps->radio_info =
- RSI_CMDDESC_UPPER_20_ENABLE;
+ radio_caps->ppe_ack_rate =
+ cpu_to_le16(UPPER_20_ENABLE |
+ (UPPER_20_ENABLE >> 12));
} else {
- radio_caps->radio_cfg_info =
- RSI_CMDDESC_40MHZ;
- radio_caps->radio_info =
- RSI_CMDDESC_FULL_40_ENABLE;
+ radio_caps->ppe_ack_rate =
+ cpu_to_le16((BW_40MHZ << 12) |
+ FULL40M_ENABLE);
}
}
}
@@ -749,7 +746,7 @@ int rsi_hal_load_key(struct rsi_common *common,
key_descriptor |= RSI_CIPHER_TKIP;
}
key_descriptor |= RSI_PROTECT_DATA_FRAMES;
- key_descriptor |= ((key_id << RSI_KEY_ID_OFFSET) & RSI_KEY_ID_MASK);
+ key_descriptor |= (key_id << RSI_KEY_ID_OFFSET);
rsi_set_len_qno(&set_key->desc_dword0.len_qno,
(frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q);
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 416981d99229..5733e440ecaf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1394,10 +1394,7 @@ static const struct dev_pm_ops rsi_pm_ops = {
#endif
static const struct sdio_device_id rsi_dev_table[] = {
- { SDIO_DEVICE(0x303, 0x100) },
- { SDIO_DEVICE(0x041B, 0x0301) },
- { SDIO_DEVICE(0x041B, 0x0201) },
- { SDIO_DEVICE(0x041B, 0x9330) },
+ { SDIO_DEVICE(RSI_SDIO_VID_9113, RSI_SDIO_PID_9113) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 6ce6b754df12..c0a163e40402 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -835,11 +835,7 @@ static int rsi_resume(struct usb_interface *intf)
#endif
static const struct usb_device_id rsi_dev_table[] = {
- { USB_DEVICE(0x0303, 0x0100) },
- { USB_DEVICE(0x041B, 0x0301) },
- { USB_DEVICE(0x041B, 0x0201) },
- { USB_DEVICE(0x041B, 0x9330) },
- { USB_DEVICE(0x1618, 0x9113) },
+ { USB_DEVICE(RSI_USB_VID_9113, RSI_USB_PID_9113) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index 14620935c925..359fbdf85739 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -22,7 +22,7 @@
#include "rsi_main.h"
#define MAX_MGMT_PKT_SIZE 512
-#define RSI_NEEDED_HEADROOM 80
+#define RSI_NEEDED_HEADROOM 84
#define RSI_RCV_BUFFER_LEN 2000
#define RSI_11B_MODE 0
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index 353dbdf31e75..66dcd2ec9051 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -28,6 +28,9 @@
#include <linux/mmc/sdio_ids.h>
#include "rsi_main.h"
+#define RSI_SDIO_VID_9113 0x041B
+#define RSI_SDIO_PID_9113 0x9330
+
enum sdio_interrupt_type {
BUFFER_FULL = 0x0,
BUFFER_AVAILABLE = 0x2,
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
index b6fe79f0a513..5b2eddd1a2ee 100644
--- a/drivers/net/wireless/rsi/rsi_usb.h
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -22,6 +22,9 @@
#include "rsi_main.h"
#include "rsi_common.h"
+#define RSI_USB_VID_9113 0x1618
+#define RSI_USB_PID_9113 0x9113
+
#define USB_INTERNAL_REG_1 0x25000
#define RSI_USB_READY_MAGIC_NUM 0xab
#define FW_STATUS_REG 0x41050012
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 22009e14a8fc..4a4f797bb10f 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -20,6 +20,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/err.h>
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 86ccf84ea0c6..597e934c4630 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -20,6 +20,8 @@
*
*/
+#include <linux/pm_runtime.h>
+
#include "../wlcore/debugfs.h"
#include "../wlcore/wlcore.h"
#include "../wlcore/debug.h"
@@ -276,15 +278,18 @@ static ssize_t radar_detection_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl18xx_cmd_radar_detection_debug(wl, channel);
if (ret < 0)
count = ret;
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -315,15 +320,18 @@ static ssize_t dynamic_fw_traces_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl18xx_acx_dynamic_fw_traces(wl);
if (ret < 0)
count = ret;
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -374,9 +382,11 @@ static ssize_t radar_debug_mode_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif_ap(wl, wlvif) {
wlcore_cmd_generic_cfg(wl, wlvif,
@@ -384,7 +394,8 @@ static ssize_t radar_debug_mode_write(struct file *file,
wl->radar_debug_mode, 0);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index ca0f936fc119..496b9b63cea1 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -20,6 +20,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/ip.h>
#include <linux/firmware.h>
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 3ca9167d6146..7c83915a7c5e 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -31,7 +31,6 @@
#include "wlcore.h"
#include "debug.h"
#include "wl12xx_80211.h"
-#include "ps.h"
#include "hw_ops.h"
int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 761cf8573a80..903968735a74 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
@@ -191,6 +192,12 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
+ goto free_vector;
+ }
+
do {
if (time_after(jiffies, timeout_time)) {
wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
@@ -222,6 +229,9 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
} while (!event);
out:
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
+free_vector:
kfree(events_vector);
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index a2cb408be8aa..aeb74e74698e 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -26,6 +26,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include "wlcore.h"
#include "debug.h"
@@ -65,9 +66,11 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (!wl->plt &&
time_after(jiffies, wl->stats.fw_stats_update +
@@ -76,7 +79,8 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
wl->stats.fw_stats_update = jiffies;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -118,14 +122,18 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value,
return;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
+
return;
+ }
chip_op = arg;
chip_op(wl);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
}
@@ -292,9 +300,11 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* In case we're already in PSM, trigger it again to set new timeout
* immediately without waiting for re-association
@@ -305,7 +315,8 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
wl1271_ps_set_mode(wl, wlvif, STATION_AUTO_PS_MODE);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -359,9 +370,11 @@ static ssize_t forced_ps_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* In case we're already in PSM, trigger it again to switch mode
* immediately without waiting for re-association
@@ -374,7 +387,8 @@ static ssize_t forced_ps_write(struct file *file,
wl1271_ps_set_mode(wl, wlvif, ps_mode);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -838,15 +852,18 @@ static ssize_t rx_streaming_interval_write(struct file *file,
wl->conf.rx_streaming.interval = value;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -893,15 +910,18 @@ static ssize_t rx_streaming_always_write(struct file *file,
wl->conf.rx_streaming.always = value;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -940,15 +960,18 @@ static ssize_t beacon_filtering_write(struct file *file,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -1019,16 +1042,19 @@ static ssize_t sleep_auth_write(struct file *file,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_acx_sleep_auth(wl, value);
if (ret < 0)
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -1083,7 +1109,7 @@ static ssize_t dev_mem_read(struct file *file,
* Don't fail if elp_wakeup returns an error, so the device's memory
* could be read even if the FW crashed
*/
- wl1271_ps_elp_wakeup(wl);
+ pm_runtime_get_sync(wl->dev);
/* store current partition and switch partition */
memcpy(&old_part, &wl->curr_part, sizeof(old_part));
@@ -1102,7 +1128,8 @@ read_err:
goto part_err;
part_err:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
skip_read:
mutex_unlock(&wl->mutex);
@@ -1164,7 +1191,7 @@ static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
* Don't fail if elp_wakeup returns an error, so the device's memory
* could be read even if the FW crashed
*/
- wl1271_ps_elp_wakeup(wl);
+ pm_runtime_get_sync(wl->dev);
/* store current partition and switch partition */
memcpy(&old_part, &wl->curr_part, sizeof(old_part));
@@ -1183,7 +1210,8 @@ write_err:
goto part_err;
part_err:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
skip_write:
mutex_unlock(&wl->mutex);
@@ -1247,8 +1275,9 @@ static ssize_t fw_logger_write(struct file *file,
}
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
+ ret = pm_runtime_get_sync(wl->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
count = ret;
goto out;
}
@@ -1257,7 +1286,8 @@ static ssize_t fw_logger_write(struct file *file,
ret = wl12xx_cmd_config_fwlog(wl);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 3a51ab116e79..89b0d0fade9f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -26,6 +26,7 @@
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/pm_runtime.h>
#include "wlcore.h"
#include "debug.h"
@@ -43,6 +44,7 @@
#define WL1271_BOOT_RETRIES 3
#define WL1271_SUSPEND_SLEEP 100
+#define WL1271_WAKEUP_TIMEOUT 500
static char *fwlog_param;
static int fwlog_mem_blocks = -1;
@@ -153,9 +155,11 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
if (!wl->conf.rx_streaming.interval)
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_set_rx_streaming(wl, wlvif, true);
if (ret < 0)
@@ -166,7 +170,8 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -183,16 +188,19 @@ static void wl1271_rx_streaming_disable_work(struct work_struct *work)
if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_set_rx_streaming(wl, wlvif, false);
if (ret)
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -229,9 +237,11 @@ static void wlcore_rc_update_work(struct work_struct *work)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (ieee80211_vif_is_mesh(vif)) {
ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
@@ -243,7 +253,8 @@ static void wlcore_rc_update_work(struct work_struct *work)
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -539,15 +550,16 @@ static int wlcore_irq_locked(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
while (!done && loopcount--) {
/*
* In order to avoid a race with the hardirq, clear the flag
- * before acknowledging the chip. Since the mutex is held,
- * wl1271_ps_elp_wakeup cannot be called concurrently.
+ * before acknowledging the chip.
*/
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_atomic();
@@ -641,7 +653,8 @@ static int wlcore_irq_locked(struct wl1271 *wl)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
return ret;
@@ -796,8 +809,6 @@ void wl12xx_queue_recovery_work(struct wl1271 *wl)
wl->state = WLCORE_STATE_RESTARTING;
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
- wl1271_ps_elp_wakeup(wl);
- wlcore_disable_interrupts_nosync(wl);
ieee80211_queue_work(wl->hw, &wl->recovery_work);
}
}
@@ -819,6 +830,7 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
{
u32 end_of_log = 0;
+ int error;
if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
return;
@@ -830,8 +842,11 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
* Do not send a stop fwlog command if the fw is hanged or if
* dbgpins are used (due to some fw bug).
*/
- if (wl1271_ps_elp_wakeup(wl))
+ error = pm_runtime_get_sync(wl->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(wl->dev);
return;
+ }
if (!wl->watchdog_recovery &&
wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
wl12xx_cmd_stop_fwlog(wl);
@@ -919,12 +934,20 @@ static void wl1271_recovery_work(struct work_struct *work)
container_of(work, struct wl1271, recovery_work);
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
+ int error;
mutex_lock(&wl->mutex);
if (wl->state == WLCORE_STATE_OFF || wl->plt)
goto out_unlock;
+ error = pm_runtime_get_sync(wl->dev);
+ if (error < 0) {
+ wl1271_warning("Enable for recovery failed");
+ pm_runtime_put_noidle(wl->dev);
+ }
+ wlcore_disable_interrupts_nosync(wl);
+
if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
wl12xx_read_fwlog_panic(wl);
@@ -958,6 +981,8 @@ static void wl1271_recovery_work(struct work_struct *work)
}
wlcore_op_stop_locked(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
ieee80211_restart_hw(wl->hw);
@@ -978,24 +1003,6 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
}
-static int wlcore_fw_sleep(struct wl1271 *wl)
-{
- int ret;
-
- mutex_lock(&wl->mutex);
- ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
- goto out;
- }
- set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
-out:
- mutex_unlock(&wl->mutex);
- mdelay(WL1271_SUSPEND_SLEEP);
-
- return 0;
-}
-
static int wl1271_setup(struct wl1271 *wl)
{
wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
@@ -1184,7 +1191,6 @@ int wl1271_plt_stop(struct wl1271 *wl)
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work);
- cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
mutex_lock(&wl->mutex);
@@ -1719,6 +1725,7 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
+ unsigned long flags;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
@@ -1734,8 +1741,9 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
+ ret = pm_runtime_get_sync(wl->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
mutex_unlock(&wl->mutex);
return ret;
}
@@ -1765,6 +1773,7 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
goto out_sleep;
out_sleep:
+ pm_runtime_put_noidle(wl->dev);
mutex_unlock(&wl->mutex);
if (ret < 0) {
@@ -1775,21 +1784,7 @@ out_sleep:
/* flush any remaining work */
wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
- /*
- * disable and re-enable interrupts in order to flush
- * the threaded_irq
- */
- wlcore_disable_interrupts(wl);
-
- /*
- * set suspended flag to avoid triggering a new threaded_irq
- * work. no need for spinlock as interrupts are disabled.
- */
- set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
-
- wlcore_enable_interrupts(wl);
flush_work(&wl->tx_work);
- flush_delayed_work(&wl->elp_work);
/*
* Cancel the watchdog even if above tx_flush failed. We will detect
@@ -1798,15 +1793,14 @@ out_sleep:
cancel_delayed_work(&wl->tx_watchdog_work);
/*
- * Use an immediate call for allowing the firmware to go into power
- * save during suspend.
- * Using a workque for this last write was only hapenning on resume
- * leaving the firmware with power save disabled during suspend,
- * while consuming full power during wowlan suspend.
+ * set suspended flag to avoid triggering a new threaded_irq
+ * work.
*/
- wlcore_fw_sleep(wl);
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
- return 0;
+ return pm_runtime_force_suspend(wl->dev);
}
static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
@@ -1821,6 +1815,12 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
wl->wow_enabled);
WARN_ON(!wl->wow_enabled);
+ ret = pm_runtime_force_resume(wl->dev);
+ if (ret < 0) {
+ wl1271_error("ELP wakeup failure!");
+ goto out_sleep;
+ }
+
/*
* re-enable irq_work enqueuing, and call irq_work directly if
* there is a pending work.
@@ -1857,9 +1857,11 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
goto out_sleep;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
@@ -1878,7 +1880,8 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
wl->wow_enabled = false;
@@ -1945,7 +1948,6 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
- cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
/* let's notify MAC80211 about the remaining pending TX frames */
@@ -2060,13 +2062,16 @@ static void wlcore_channel_switch_work(struct work_struct *work)
vif = wl12xx_wlvif_to_vif(wlvif);
ieee80211_chswitch_done(vif, false);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_cmd_stop_channel_switch(wl, wlvif);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -2128,14 +2133,17 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work)
if (!time_after(time_spare, wlvif->pending_auth_reply_time))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* cancel the ROC if active */
wlcore_update_inconn_sta(wl, wlvif, NULL, false);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -2537,9 +2545,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
wl12xx_get_vif_count(hw, vif, &vif_count);
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out_unlock;
/*
* in some very corner case HW recovery scenarios its possible to
@@ -2568,14 +2573,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (wl12xx_need_fw_change(wl, vif_count, true)) {
- wl12xx_force_active_psm(wl);
- set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
- mutex_unlock(&wl->mutex);
- wl1271_recovery_work(&wl->recovery_work);
- return 0;
- }
-
/*
* TODO: after the nvs issue will be solved, move this block
* to start(), and make sure here the driver is ON.
@@ -2592,6 +2589,24 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
+ /*
+ * Call runtime PM only after possible wl12xx_init_fw() above
+ * is done. Otherwise we do not have interrupts enabled.
+ */
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
+ goto out_unlock;
+ }
+
+ if (wl12xx_need_fw_change(wl, vif_count, true)) {
+ wl12xx_force_active_psm(wl);
+ set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
+ mutex_unlock(&wl->mutex);
+ wl1271_recovery_work(&wl->recovery_work);
+ return 0;
+ }
+
if (!wlcore_is_p2p_mgmt(wlvif)) {
ret = wl12xx_cmd_role_enable(wl, vif->addr,
role_type, &wlvif->role_id);
@@ -2622,7 +2637,8 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
else
wl->sta_count++;
out:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out_unlock:
mutex_unlock(&wl->mutex);
@@ -2677,9 +2693,11 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
/* disable active roles */
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto deinit;
+ }
if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS) {
@@ -2697,7 +2715,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
goto deinit;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
}
deinit:
wl12xx_tx_reset_wlvif(wl, wlvif);
@@ -3121,9 +3140,11 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* configure each interface */
wl12xx_for_each_wlvif(wl, wlvif) {
@@ -3133,7 +3154,8 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3202,9 +3224,11 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
@@ -3247,7 +3271,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
*/
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3454,13 +3479,16 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto out_wake_queues;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out_wake_queues;
+ }
ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out_wake_queues:
if (might_change_spare)
@@ -3600,9 +3628,11 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
goto out_unlock;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out_unlock;
+ }
wlvif->default_key = key_idx;
@@ -3616,7 +3646,8 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out_unlock:
mutex_unlock(&wl->mutex);
@@ -3634,7 +3665,7 @@ void wlcore_regdomain_config(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
+ ret = pm_runtime_get_sync(wl->dev);
if (ret < 0)
goto out;
@@ -3644,7 +3675,8 @@ void wlcore_regdomain_config(struct wl1271 *wl)
goto out;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -3678,9 +3710,11 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* fail if there is any role in ROC */
if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
@@ -3691,7 +3725,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
ret = wlcore_scan(hw->priv, vif, ssid, len, req);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3718,9 +3753,11 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
ret = wl->ops->scan_stop(wl, wlvif);
@@ -3741,7 +3778,8 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
ieee80211_scan_completed(wl->hw, &info);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3766,9 +3804,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
if (ret < 0)
@@ -3777,7 +3817,8 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
wl->sched_vif = wlvif;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
@@ -3797,13 +3838,16 @@ static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl->ops->sched_scan_stop(wl, wlvif);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3822,15 +3866,18 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_acx_frag_threshold(wl, value);
if (ret < 0)
wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3851,16 +3898,19 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_acx_rts_threshold(wl, wlvif, value);
if (ret < 0)
wl1271_warning("set rts threshold failed: %d", ret);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4607,9 +4657,11 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if ((changed & BSS_CHANGED_TXPOWER) &&
bss_conf->txpower != wlvif->power_level) {
@@ -4626,7 +4678,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
else
wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4665,9 +4718,11 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
@@ -4690,7 +4745,8 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
}
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -4719,9 +4775,11 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wlvif->band = ctx->def.chan->band;
wlvif->channel = channel;
@@ -4737,7 +4795,8 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
wlvif->radar_enabled = true;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4768,9 +4827,11 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (wlvif->radar_enabled) {
wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
@@ -4778,7 +4839,8 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
wlvif->radar_enabled = false;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -4835,9 +4897,11 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
for (i = 0; i < n_vifs; i++) {
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
@@ -4847,7 +4911,8 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
goto out_sleep;
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4878,9 +4943,11 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/*
* the txop is confed in units of 32us by the mac80211,
@@ -4899,7 +4966,8 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
0, 0);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4923,16 +4991,19 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
if (ret < 0)
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -5238,13 +5309,16 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
if (new_state < old_state)
@@ -5293,9 +5367,11 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
ba_bitmap = &wl->links[hlid].ba_bitmap;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
tid, action);
@@ -5368,7 +5444,8 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
ret = -EINVAL;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -5402,16 +5479,19 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl1271_set_band_rate(wl, wlvif);
wlvif->basic_rate =
wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_acx_sta_rate_policies(wl, wlvif);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
}
out:
mutex_unlock(&wl->mutex);
@@ -5441,9 +5521,11 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* TODO: change mac80211 to pass vif as param */
@@ -5465,7 +5547,8 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -5532,9 +5615,11 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
if (ret)
@@ -5543,7 +5628,8 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -5584,9 +5670,11 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
if (ret < 0)
@@ -5596,7 +5684,8 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
msecs_to_jiffies(duration));
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
@@ -5638,13 +5727,16 @@ static int wlcore_roc_completed(struct wl1271 *wl)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = __wlcore_roc_completed(wl);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -5719,19 +5811,22 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out_sleep;
+ }
ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
if (ret < 0)
goto out_sleep;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = rssi_dbm;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -6065,16 +6160,16 @@ static int wl1271_register_hw(struct wl1271 *wl)
}
if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
- wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n");
+ wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
if (!strcmp(pdev_data->family->name, "wl18xx")) {
- wl1271_warning("This default nvs file can be removed from the file system\n");
+ wl1271_warning("This default nvs file can be removed from the file system");
} else {
- wl1271_warning("Your device performance is not optimized.\n");
- wl1271_warning("Please use the calibrator tool to configure your device.\n");
+ wl1271_warning("Your device performance is not optimized.");
+ wl1271_warning("Please use the calibrator tool to configure your device.");
}
if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
- wl1271_warning("Fuse mac address is zero. using random mac\n");
+ wl1271_warning("Fuse mac address is zero. using random mac");
/* Use TI oui and a random nic */
oui_addr = WLCORE_TI_OUI_ADDRESS;
nic_addr = get_random_int();
@@ -6300,7 +6395,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
skb_queue_head_init(&wl->deferred_rx_queue);
skb_queue_head_init(&wl->deferred_tx_queue);
- INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
@@ -6575,6 +6669,99 @@ out:
complete_all(&wl->nvs_loading_complete);
}
+static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ struct wl12xx_vif *wlvif;
+ int error;
+
+ /* We do not enter elp sleep in PLT mode */
+ if (wl->plt)
+ return 0;
+
+ /* Nothing to do if no ELP mode requested */
+ if (wl->sleep_auth != WL1271_PSM_ELP)
+ return 0;
+
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ return -EBUSY;
+ }
+
+ wl1271_debug(DEBUG_PSM, "chip to elp");
+ error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
+ if (error < 0) {
+ wl12xx_queue_recovery_work(wl);
+
+ return error;
+ }
+
+ set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
+
+ return 0;
+}
+
+static int __maybe_unused wlcore_runtime_resume(struct device *dev)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ DECLARE_COMPLETION_ONSTACK(compl);
+ unsigned long flags;
+ int ret;
+ unsigned long start_time = jiffies;
+ bool pending = false;
+
+ /* Nothing to do if no ELP mode requested */
+ if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
+ return 0;
+
+ wl1271_debug(DEBUG_PSM, "waking up chip from elp");
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
+ pending = true;
+ else
+ wl->elp_compl = &compl;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto err;
+ }
+
+ if (!pending) {
+ ret = wait_for_completion_timeout(&compl,
+ msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
+ if (ret == 0) {
+ wl1271_error("ELP wakeup timeout!");
+ wl12xx_queue_recovery_work(wl);
+
+ /* Return no error for runtime PM for recovery */
+ return 0;
+ }
+ }
+
+ clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
+
+ wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
+ jiffies_to_msecs(jiffies - start_time));
+
+ return 0;
+
+err:
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->elp_compl = NULL;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ return ret;
+}
+
+static const struct dev_pm_ops wlcore_pm_ops = {
+ SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
+ wlcore_runtime_resume,
+ NULL)
+};
+
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
{
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
@@ -6602,6 +6789,11 @@ int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
wlcore_nvs_cb(NULL, wl);
}
+ wl->dev->driver->pm = &wlcore_pm_ops;
+ pm_runtime_set_autosuspend_delay(wl->dev, 50);
+ pm_runtime_use_autosuspend(wl->dev);
+ pm_runtime_enable(wl->dev);
+
return ret;
}
EXPORT_SYMBOL_GPL(wlcore_probe);
@@ -6610,6 +6802,13 @@ int wlcore_remove(struct platform_device *pdev)
{
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
struct wl1271 *wl = platform_get_drvdata(pdev);
+ int error;
+
+ error = pm_runtime_get_sync(wl->dev);
+ if (error < 0)
+ dev_warn(wl->dev, "PM runtime failed: %i\n", error);
+
+ wl->dev->driver->pm = NULL;
if (pdev_data->family && pdev_data->family->nvs_name)
wait_for_completion(&wl->nvs_loading_complete);
@@ -6621,6 +6820,11 @@ int wlcore_remove(struct platform_device *pdev)
disable_irq_wake(wl->irq);
}
wl1271_unregister_hw(wl);
+
+ pm_runtime_put_sync(wl->dev);
+ pm_runtime_dont_use_autosuspend(wl->dev);
+ pm_runtime_disable(wl->dev);
+
free_irq(wl->irq, wl);
wlcore_free_hw(wl);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index b36133b739cb..9de843d1984b 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -26,152 +26,6 @@
#include "tx.h"
#include "debug.h"
-#define WL1271_WAKEUP_TIMEOUT 500
-
-#define ELP_ENTRY_DELAY 30
-#define ELP_ENTRY_DELAY_FORCE_PS 5
-
-void wl1271_elp_work(struct work_struct *work)
-{
- struct delayed_work *dwork;
- struct wl1271 *wl;
- struct wl12xx_vif *wlvif;
- int ret;
-
- dwork = to_delayed_work(work);
- wl = container_of(dwork, struct wl1271, elp_work);
-
- wl1271_debug(DEBUG_PSM, "elp work");
-
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state != WLCORE_STATE_ON))
- goto out;
-
- /* our work might have been already cancelled */
- if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
- goto out;
-
- if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
- goto out;
-
- wl12xx_for_each_wlvif(wl, wlvif) {
- if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
- test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
- goto out;
- }
-
- wl1271_debug(DEBUG_PSM, "chip to elp");
- ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
- goto out;
- }
-
- set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
-
-out:
- mutex_unlock(&wl->mutex);
-}
-
-/* Routines to toggle sleep mode while in ELP */
-void wl1271_ps_elp_sleep(struct wl1271 *wl)
-{
- struct wl12xx_vif *wlvif;
- u32 timeout;
-
- /* We do not enter elp sleep in PLT mode */
- if (wl->plt)
- return;
-
- if (wl->sleep_auth != WL1271_PSM_ELP)
- return;
-
- /* we shouldn't get consecutive sleep requests */
- if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
- return;
-
- wl12xx_for_each_wlvif(wl, wlvif) {
- if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
- test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
- return;
- }
-
- timeout = wl->conf.conn.forced_ps ?
- ELP_ENTRY_DELAY_FORCE_PS : ELP_ENTRY_DELAY;
- ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
- msecs_to_jiffies(timeout));
-}
-EXPORT_SYMBOL_GPL(wl1271_ps_elp_sleep);
-
-int wl1271_ps_elp_wakeup(struct wl1271 *wl)
-{
- DECLARE_COMPLETION_ONSTACK(compl);
- unsigned long flags;
- int ret;
- unsigned long start_time = jiffies;
- bool pending = false;
-
- /*
- * we might try to wake up even if we didn't go to sleep
- * before (e.g. on boot)
- */
- if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))
- return 0;
-
- /* don't cancel_sync as it might contend for a mutex and deadlock */
- cancel_delayed_work(&wl->elp_work);
-
- if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
- return 0;
-
- wl1271_debug(DEBUG_PSM, "waking up chip from elp");
-
- /*
- * The spinlock is required here to synchronize both the work and
- * the completion variable in one entity.
- */
- spin_lock_irqsave(&wl->wl_lock, flags);
- if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
- pending = true;
- else
- wl->elp_compl = &compl;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
- goto err;
- }
-
- if (!pending) {
- ret = wait_for_completion_timeout(
- &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
- if (ret == 0) {
- wl1271_error("ELP wakeup timeout!");
- wl12xx_queue_recovery_work(wl);
- ret = -ETIMEDOUT;
- goto err;
- }
- }
-
- clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
-
- wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
- jiffies_to_msecs(jiffies - start_time));
- goto out;
-
-err:
- spin_lock_irqsave(&wl->wl_lock, flags);
- wl->elp_compl = NULL;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- return ret;
-
-out:
- return 0;
-}
-EXPORT_SYMBOL_GPL(wl1271_ps_elp_wakeup);
-
int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum wl1271_cmd_ps_mode mode)
{
diff --git a/drivers/net/wireless/ti/wlcore/ps.h b/drivers/net/wireless/ti/wlcore/ps.h
index de4f9da8ed26..411727587f95 100644
--- a/drivers/net/wireless/ti/wlcore/ps.h
+++ b/drivers/net/wireless/ti/wlcore/ps.h
@@ -29,9 +29,6 @@
int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum wl1271_cmd_ps_mode mode);
-void wl1271_ps_elp_sleep(struct wl1271 *wl);
-int wl1271_ps_elp_wakeup(struct wl1271 *wl);
-void wl1271_elp_work(struct work_struct *work);
void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid, bool clean_queues);
void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 0f15696195f8..078a4940bc5c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
static void wl1271_rx_status(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct ieee80211_rx_status *status,
- u8 beacon)
+ u8 beacon, u8 probe_rsp)
{
memset(status, 0, sizeof(struct ieee80211_rx_status));
@@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
}
+ if (beacon || probe_rsp)
+ status->boottime_ns = ktime_get_boot_ns();
+
if (beacon)
wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
status->band);
@@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
if (ieee80211_is_data_present(hdr->frame_control))
is_data = 1;
- wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
+ wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
+ ieee80211_is_probe_resp(hdr->frame_control));
wlcore_hw_set_rx_csum(wl, desc, skb);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 5612f5916b4e..764e723e4ef9 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -22,13 +22,13 @@
*/
#include <linux/ieee80211.h>
+#include <linux/pm_runtime.h>
#include "wlcore.h"
#include "debug.h"
#include "cmd.h"
#include "scan.h"
#include "acx.h"
-#include "ps.h"
#include "tx.h"
void wl1271_scan_complete_work(struct work_struct *work)
@@ -67,17 +67,17 @@ void wl1271_scan_complete_work(struct work_struct *work)
wl->scan.req = NULL;
wl->scan_wlvif = NULL;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
/* restore hardware connection monitoring template */
wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
}
- wl1271_ps_elp_sleep(wl);
-
if (wl->scan.failed) {
wl1271_info("Scan completed due to error.");
wl12xx_queue_recovery_work(wl);
@@ -85,6 +85,9 @@ void wl1271_scan_complete_work(struct work_struct *work)
wlcore_cmd_regdomain_config_locked(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
+
ieee80211_scan_completed(wl->hw, &info);
out:
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index d31eb775e023..7425ba9471d0 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -19,9 +19,11 @@
*
*/
+#include <linux/pm_runtime.h>
+
+#include "acx.h"
#include "wlcore.h"
#include "debug.h"
-#include "ps.h"
#include "sysfs.h"
static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
@@ -68,12 +70,15 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl1271_acx_sg_enable(wl, wl->sg_enabled);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 009ec07c4cec..dcb2c8b0feb6 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -22,13 +22,13 @@
*/
#include "testmode.h"
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <net/genetlink.h>
#include "wlcore.h"
#include "debug.h"
#include "acx.h"
-#include "ps.h"
#include "io.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
@@ -97,9 +97,11 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_cmd_test(wl, buf, buf_len, answer);
if (ret < 0) {
@@ -141,7 +143,8 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -169,9 +172,11 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
@@ -205,7 +210,8 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
out_free:
kfree(cmd);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 00e9b4624dcf..b6e19c2d66b0 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include "wlcore.h"
@@ -868,9 +869,11 @@ void wl1271_tx_work(struct work_struct *work)
int ret;
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wlcore_tx_work_locked(wl);
if (ret < 0) {
@@ -878,7 +881,8 @@ void wl1271_tx_work(struct work_struct *work)
goto out;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index 5c0bcb1fe1a1..dbe78d8491ef 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -8,12 +8,13 @@
* version 2 as published by the Free Software Foundation.
*/
+#include <linux/pm_runtime.h>
+
#include <net/mac80211.h>
#include <net/netlink.h>
#include "wlcore.h"
#include "debug.h"
-#include "ps.h"
#include "hw_ops.h"
#include "vendor_cmd.h"
@@ -55,14 +56,17 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wlcore_smart_config_start(wl,
nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]));
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -87,13 +91,16 @@ wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wlcore_smart_config_stop(wl);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -131,16 +138,19 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wlcore_smart_config_set_group_key(wl,
nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]),
nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]),
nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]));
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 95fbedc8ea34..d4b1f66ef457 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -348,7 +348,6 @@ struct wl1271 {
enum nl80211_band band;
struct completion *elp_compl;
- struct delayed_work elp_work;
/* in dBm */
int power_level;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index e840985385fc..32ec121ccac2 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -233,7 +233,6 @@ enum wl12xx_flags {
WL1271_FLAG_TX_QUEUE_STOPPED,
WL1271_FLAG_TX_PENDING,
WL1271_FLAG_IN_ELP,
- WL1271_FLAG_ELP_REQUESTED,
WL1271_FLAG_IRQ_RUNNING,
WL1271_FLAG_FW_TX_BUSY,
WL1271_FLAG_DUMMY_PACKET_PENDING,
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
index 07b94eda9604..dd6a86b899eb 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
@@ -1341,7 +1341,7 @@ int zd_chip_control_leds(struct zd_chip *chip, enum led_status status)
case ZD_LED_SCANNING:
ioreqs[0].value = FW_LINK_OFF;
ioreqs[1].value = v[1] & ~other_led;
- if (get_seconds() % 3 == 0) {
+ if ((u32)ktime_get_seconds() % 3 == 0) {
ioreqs[1].value &= ~chip->link_led;
} else {
ioreqs[1].value |= chip->link_led;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index c30bf118c67d..c2cda3acd4af 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -371,25 +371,27 @@ static inline void handle_regs_int_override(struct urb *urb)
{
struct zd_usb *usb = urb->context;
struct zd_usb_interrupt *intr = &usb->intr;
+ unsigned long flags;
- spin_lock(&intr->lock);
+ spin_lock_irqsave(&intr->lock, flags);
if (atomic_read(&intr->read_regs_enabled)) {
atomic_set(&intr->read_regs_enabled, 0);
intr->read_regs_int_overridden = 1;
complete(&intr->read_regs.completion);
}
- spin_unlock(&intr->lock);
+ spin_unlock_irqrestore(&intr->lock, flags);
}
static inline void handle_regs_int(struct urb *urb)
{
struct zd_usb *usb = urb->context;
struct zd_usb_interrupt *intr = &usb->intr;
+ unsigned long flags;
int len;
u16 int_num;
ZD_ASSERT(in_interrupt());
- spin_lock(&intr->lock);
+ spin_lock_irqsave(&intr->lock, flags);
int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
if (int_num == CR_INTERRUPT) {
@@ -425,7 +427,7 @@ static inline void handle_regs_int(struct urb *urb)
}
out:
- spin_unlock(&intr->lock);
+ spin_unlock_irqrestore(&intr->lock, flags);
/* CR_INTERRUPT might override read_reg too. */
if (int_num == CR_INTERRUPT && atomic_read(&intr->read_regs_enabled))
@@ -665,6 +667,7 @@ static void rx_urb_complete(struct urb *urb)
struct zd_usb_rx *rx;
const u8 *buffer;
unsigned int length;
+ unsigned long flags;
switch (urb->status) {
case 0:
@@ -693,14 +696,14 @@ static void rx_urb_complete(struct urb *urb)
/* If there is an old first fragment, we don't care. */
dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment));
- spin_lock(&rx->lock);
+ spin_lock_irqsave(&rx->lock, flags);
memcpy(rx->fragment, buffer, length);
rx->fragment_length = length;
- spin_unlock(&rx->lock);
+ spin_unlock_irqrestore(&rx->lock, flags);
goto resubmit;
}
- spin_lock(&rx->lock);
+ spin_lock_irqsave(&rx->lock, flags);
if (rx->fragment_length > 0) {
/* We are on a second fragment, we believe */
ZD_ASSERT(length + rx->fragment_length <=
@@ -710,9 +713,9 @@ static void rx_urb_complete(struct urb *urb)
handle_rx_packet(usb, rx->fragment,
rx->fragment_length + length);
rx->fragment_length = 0;
- spin_unlock(&rx->lock);
+ spin_unlock_irqrestore(&rx->lock, flags);
} else {
- spin_unlock(&rx->lock);
+ spin_unlock_irqrestore(&rx->lock, flags);
handle_rx_packet(usb, buffer, length);
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 78ebe494fef0..92274c237200 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -148,14 +148,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
}
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
- return fallback(dev, skb) % dev->real_num_tx_queues;
+ return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
xenvif_set_skb_hash(vif, skb);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index a27daa23c9dc..3621e05a7494 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1603,9 +1603,9 @@ static void xenvif_ctrl_action(struct xenvif *vif)
static bool xenvif_ctrl_work_todo(struct xenvif *vif)
{
if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
- return 1;
+ return true;
- return 0;
+ return false;
}
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 922ce0abf5cf..73f596a90c69 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -87,6 +87,7 @@ struct netfront_cb {
/* IRQ name is queue name with "-tx" or "-rx" appended */
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
struct netfront_stats {
@@ -545,7 +546,8 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
}
static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
unsigned int num_queues = dev->real_num_tx_queues;
u32 hash;
@@ -893,7 +895,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *skb,
struct sk_buff_head *list)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *nskb;
@@ -902,15 +903,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
RING_GET_RESPONSE(&queue->rx, ++cons);
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
- if (shinfo->nr_frags == MAX_SKB_FRAGS) {
+ if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
BUG_ON(pull_to <= skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
- BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
+ BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
- skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ skb_frag_page(nfrag),
rx->offset, rx->status, PAGE_SIZE);
skb_shinfo(nskb)->nr_frags = 0;
@@ -1330,6 +1332,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
xenbus_switch_state(dev, XenbusStateInitialising);
+ wait_event(module_load_q,
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateClosed &&
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateUnknown);
return netdev;
exit:
@@ -1597,14 +1604,16 @@ static int xennet_init_queue(struct netfront_queue *queue)
{
unsigned short i;
int err = 0;
+ char *devid;
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
- snprintf(queue->name, sizeof(queue->name), "%s-q%u",
- queue->info->netdev->name, queue->id);
+ devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
+ snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
+ devid, queue->id);
/* Initialise tx_skbs as a free chain containing every entry. */
queue->tx_skb_freelist = 0;
@@ -1810,7 +1819,7 @@ static int talk_to_netback(struct xenbus_device *dev,
err = xen_net_read_mac(dev, info->netdev->dev_addr);
if (err) {
xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
- goto out;
+ goto out_unlocked;
}
rtnl_lock();
@@ -1925,6 +1934,7 @@ abort_transaction_no_dev_fatal:
xennet_destroy_queues(info);
out:
rtnl_unlock();
+out_unlocked:
device_unregister(&dev->dev);
return err;
}
@@ -1950,10 +1960,6 @@ static int xennet_connect(struct net_device *dev)
/* talk_to_netback() sets the correct number of queues */
num_queues = dev->real_num_tx_queues;
- rtnl_lock();
- netdev_update_features(dev);
- rtnl_unlock();
-
if (dev->reg_state == NETREG_UNINITIALIZED) {
err = register_netdev(dev);
if (err) {
@@ -1963,6 +1969,10 @@ static int xennet_connect(struct net_device *dev)
}
}
+ rtnl_lock();
+ netdev_update_features(dev);
+ rtnl_unlock();
+
/*
* All public and private state should now be sane. Get
* ready to start sending and receiving packets and give the driver
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index bd35eab652be..945cc903d8f1 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -160,13 +160,14 @@ static void nfcmrvl_tx_complete(struct urb *urb)
struct nci_dev *ndev = (struct nci_dev *)skb->dev;
struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data;
+ unsigned long flags;
nfc_info(priv->dev, "urb %p status %d count %d\n",
urb, urb->status, urb->actual_length);
- spin_lock(&drv_data->txlock);
+ spin_lock_irqsave(&drv_data->txlock, flags);
drv_data->tx_in_flight--;
- spin_unlock(&drv_data->txlock);
+ spin_unlock_irqrestore(&drv_data->txlock, flags);
kfree(urb->setup_packet);
kfree_skb(skb);
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index d5553c47014f..5d823e965883 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -74,7 +74,7 @@ static void pn533_recv_response(struct urb *urb)
struct sk_buff *skb = NULL;
if (!urb->status) {
- skb = alloc_skb(urb->actual_length, GFP_KERNEL);
+ skb = alloc_skb(urb->actual_length, GFP_ATOMIC);
if (!skb) {
nfc_err(&phy->udev->dev, "failed to alloc memory\n");
} else {
@@ -186,7 +186,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
/* request for response for sent packet directly */
- rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC);
+ rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
if (rc)
goto error;
} else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index f624ae27eabe..5ee5f40b4dfc 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -19,6 +19,7 @@
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/ntb.h>
+#include <linux/pci.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
MODULE_VERSION("0.1");
@@ -1487,7 +1488,7 @@ static int switchtec_ntb_add(struct device *dev,
stdev->sndev = NULL;
- if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
+ if (stdev->pdev->class != (PCI_CLASS_BRIDGE_OTHER << 8))
return -ENODEV;
sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c
index a59b6c4bb5b8..ad3d17c42e23 100644
--- a/drivers/nubus/bus.c
+++ b/drivers/nubus/bus.c
@@ -5,6 +5,7 @@
// Copyright (C) 2017 Finn Thain
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/nubus.h>
#include <linux/seq_file.h>
@@ -93,6 +94,8 @@ int nubus_device_register(struct nubus_board *board)
board->dev.release = nubus_device_release;
board->dev.bus = &nubus_bus_type;
dev_set_name(&board->dev, "slot.%X", board->slot);
+ board->dev.dma_mask = &board->dev.coherent_dma_mask;
+ dma_set_mask(&board->dev, DMA_BIT_MASK(32));
return device_register(&board->dev);
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 85de8053aa34..0360c015f658 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1423,11 +1423,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off,
- bool is_write, sector_t sector)
+ unsigned int op, sector_t sector)
{
int ret;
- if (!is_write) {
+ if (!op_is_write(op)) {
ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page);
} else {
@@ -1464,7 +1464,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
}
err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(bio_op(bio)), iter.bi_sector);
+ bio_op(bio), iter.bi_sector);
if (err) {
dev_err(&btt->nd_btt->dev,
"io error in %s sector %lld, len %d,\n",
@@ -1483,16 +1483,16 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, bool is_write)
+ struct page *page, unsigned int op)
{
struct btt *btt = bdev->bd_disk->private_data;
int rc;
unsigned int len;
len = hpage_nr_pages(page) * PAGE_SIZE;
- rc = btt_do_bvec(btt, NULL, page, len, 0, is_write, sector);
+ rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector);
if (rc == 0)
- page_endio(page, is_write, 0);
+ page_endio(page, op_is_write(op), 0);
return rc;
}
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 9d17abd9f8d0..98317e7ce5b5 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -397,16 +397,15 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
return false;
*start = jiffies;
- generic_start_io_acct(disk->queue, bio_data_dir(bio),
- bio_sectors(bio), &disk->part0);
+ generic_start_io_acct(disk->queue, bio_op(bio), bio_sectors(bio),
+ &disk->part0);
return true;
}
static inline void nd_iostat_end(struct bio *bio, unsigned long start)
{
struct gendisk *disk = bio->bi_disk;
- generic_end_io_acct(disk->queue, bio_data_dir(bio), &disk->part0,
- start);
+ generic_end_io_acct(disk->queue, bio_op(bio), &disk->part0, start);
}
static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
unsigned int len)
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index f3096564acb2..c23649867696 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -120,7 +120,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
}
static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
- unsigned int len, unsigned int off, bool is_write,
+ unsigned int len, unsigned int off, unsigned int op,
sector_t sector)
{
blk_status_t rc = BLK_STS_OK;
@@ -131,7 +131,7 @@ static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
bad_pmem = true;
- if (!is_write) {
+ if (!op_is_write(op)) {
if (unlikely(bad_pmem))
rc = BLK_STS_IOERR;
else {
@@ -180,8 +180,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
- bvec.bv_offset, op_is_write(bio_op(bio)),
- iter.bi_sector);
+ bvec.bv_offset, bio_op(bio), iter.bi_sector);
if (rc) {
bio->bi_status = rc;
break;
@@ -198,13 +197,13 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, bool is_write)
+ struct page *page, unsigned int op)
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
blk_status_t rc;
rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
- 0, is_write, sector);
+ 0, op, sector);
/*
* The ->rw_page interface is subtle and tricky. The core
@@ -213,7 +212,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
* caused by double completion.
*/
if (rc == 0)
- page_endio(page, is_write, 0);
+ page_endio(page, op_is_write(op), 0);
return blk_status_to_errno(rc);
}
@@ -417,7 +416,8 @@ static int pmem_attach_disk(struct device *dev,
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+ if (pmem->pfn_flags & PFN_MAP)
+ blk_queue_flag_set(QUEUE_FLAG_DAX, q);
q->queuedata = pmem;
disk = alloc_disk_node(0, nid);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 46df030b2c3f..dd8ec1dd9219 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
static void nvme_ns_remove(struct nvme_ns *ns);
static int nvme_revalidate_disk(struct gendisk *disk);
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
+static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
+ unsigned nsid);
+
+static void nvme_set_queue_dying(struct nvme_ns *ns)
+{
+ /*
+ * Revalidating a dead namespace sets capacity to 0. This will end
+ * buffered writers dirtying pages that can't be synced.
+ */
+ if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ return;
+ revalidate_disk(ns->disk);
+ blk_set_queue_dying(ns->queue);
+ /* Forcibly unquiesce queues to avoid blocking dispatch */
+ blk_mq_unquiesce_queue(ns->queue);
+}
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
@@ -236,7 +252,8 @@ void nvme_complete_rq(struct request *req)
trace_nvme_complete_rq(req);
if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
- if (nvme_req_needs_failover(req, status)) {
+ if ((req->cmd_flags & REQ_NVME_MPATH) &&
+ blk_path_error(status)) {
nvme_failover_req(req);
return;
}
@@ -601,6 +618,8 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
return BLK_STS_NOTSUPP;
control |= NVME_RW_PRINFO_PRACT;
+ } else if (req_op(req) == REQ_OP_WRITE) {
+ t10_pi_prepare(req, ns->pi_type);
}
switch (ns->pi_type) {
@@ -611,8 +630,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
case NVME_NS_DPS_PI_TYPE2:
control |= NVME_RW_PRINFO_PRCHK_GUARD |
NVME_RW_PRINFO_PRCHK_REF;
- cmnd->rw.reftag = cpu_to_le32(
- nvme_block_nr(ns, blk_rq_pos(req)));
+ cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
break;
}
}
@@ -622,6 +640,22 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
return 0;
}
+void nvme_cleanup_cmd(struct request *req)
+{
+ if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
+ nvme_req(req)->status == 0) {
+ struct nvme_ns *ns = req->rq_disk->private_data;
+
+ t10_pi_complete(req, ns->pi_type,
+ blk_rq_bytes(req) >> ns->lba_shift);
+ }
+ if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ kfree(page_address(req->special_vec.bv_page) +
+ req->special_vec.bv_offset);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
+
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd)
{
@@ -652,10 +686,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
}
cmd->common.command_id = req->tag;
- if (ns)
- trace_nvme_setup_nvm_cmd(req->q->id, cmd);
- else
- trace_nvme_setup_admin_cmd(cmd);
+ trace_nvme_setup_cmd(req, cmd);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -848,9 +879,6 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
if (unlikely(ctrl->kato == 0))
return;
- INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
- memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
- ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
@@ -1040,18 +1068,21 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
#define NVME_AEN_SUPPORTED \
- (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT)
+ (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | NVME_AEN_CFG_ANA_CHANGE)
static void nvme_enable_aen(struct nvme_ctrl *ctrl)
{
- u32 result;
+ u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
int status;
- status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT,
- ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result);
+ if (!supported_aens)
+ return;
+
+ status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
+ NULL, 0, &result);
if (status)
dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
- ctrl->oaes & NVME_AEN_SUPPORTED);
+ supported_aens);
}
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
@@ -1151,19 +1182,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
static void nvme_update_formats(struct nvme_ctrl *ctrl)
{
- struct nvme_ns *ns, *next;
- LIST_HEAD(rm_list);
+ struct nvme_ns *ns;
- down_write(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->disk && nvme_revalidate_disk(ns->disk)) {
- list_move_tail(&ns->list, &rm_list);
- }
- }
- up_write(&ctrl->namespaces_rwsem);
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ if (ns->disk && nvme_revalidate_disk(ns->disk))
+ nvme_set_queue_dying(ns);
+ up_read(&ctrl->namespaces_rwsem);
- list_for_each_entry_safe(ns, next, &rm_list, list)
- nvme_ns_remove(ns);
+ nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
}
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -1218,7 +1245,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
- (void __user *)(uintptr_t)cmd.metadata, cmd.metadata,
+ (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
0, &cmd.result, timeout);
nvme_passthru_end(ctrl, effects);
@@ -1457,6 +1484,12 @@ static void nvme_update_disk_info(struct gendisk *disk,
set_capacity(disk, capacity);
nvme_config_discard(ns);
+
+ if (id->nsattr & (1 << 0))
+ set_disk_ro(disk, true);
+ else
+ set_disk_ro(disk, false);
+
blk_mq_unfreeze_queue(disk->queue);
}
@@ -2255,21 +2288,16 @@ out_unlock:
return ret;
}
-int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- u8 log_page, void *log,
- size_t size, u64 offset)
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
+ void *log, size_t size, u64 offset)
{
struct nvme_command c = { };
unsigned long dwlen = size / 4 - 1;
c.get_log_page.opcode = nvme_admin_get_log_page;
-
- if (ns)
- c.get_log_page.nsid = cpu_to_le32(ns->head->ns_id);
- else
- c.get_log_page.nsid = cpu_to_le32(NVME_NSID_ALL);
-
+ c.get_log_page.nsid = cpu_to_le32(nsid);
c.get_log_page.lid = log_page;
+ c.get_log_page.lsp = lsp;
c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
@@ -2278,12 +2306,6 @@ int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}
-static int nvme_get_log(struct nvme_ctrl *ctrl, u8 log_page, void *log,
- size_t size)
-{
- return nvme_get_log_ext(ctrl, NULL, log_page, log, size, 0);
-}
-
static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
{
int ret;
@@ -2294,8 +2316,8 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
if (!ctrl->effects)
return 0;
- ret = nvme_get_log(ctrl, NVME_LOG_CMD_EFFECTS, ctrl->effects,
- sizeof(*ctrl->effects));
+ ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0,
+ ctrl->effects, sizeof(*ctrl->effects), 0);
if (ret) {
kfree(ctrl->effects);
ctrl->effects = NULL;
@@ -2386,6 +2408,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
nvme_set_queue_limits(ctrl, ctrl->admin_q);
ctrl->sgls = le32_to_cpu(id->sgls);
ctrl->kas = le16_to_cpu(id->kas);
+ ctrl->max_namespaces = le32_to_cpu(id->mnan);
if (id->rtd3e) {
/* us -> s */
@@ -2445,8 +2468,12 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
}
+ ret = nvme_mpath_init(ctrl, id);
kfree(id);
+ if (ret < 0)
+ return ret;
+
if (ctrl->apst_enabled && !prev_apst_enabled)
dev_pm_qos_expose_latency_tolerance(ctrl->device);
else if (!ctrl->apst_enabled && prev_apst_enabled)
@@ -2665,6 +2692,10 @@ static struct attribute *nvme_ns_id_attrs[] = {
&dev_attr_nguid.attr,
&dev_attr_eui.attr,
&dev_attr_nsid.attr,
+#ifdef CONFIG_NVME_MULTIPATH
+ &dev_attr_ana_grpid.attr,
+ &dev_attr_ana_state.attr,
+#endif
NULL,
};
@@ -2687,6 +2718,14 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
return 0;
}
+#ifdef CONFIG_NVME_MULTIPATH
+ if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
+ if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
+ return 0;
+ if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
+ return 0;
+ }
+#endif
return a->mode;
}
@@ -3060,8 +3099,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
nvme_get_ctrl(ctrl);
- kfree(id);
-
device_add_disk(ctrl->device, ns->disk);
if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_id_attr_group))
@@ -3071,8 +3108,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
ns->disk->disk_name);
- nvme_mpath_add_disk(ns->head);
+ nvme_mpath_add_disk(ns, id);
nvme_fault_inject_init(ns);
+ kfree(id);
+
return;
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
@@ -3138,7 +3177,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
down_write(&ctrl->namespaces_rwsem);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
- if (ns->head->ns_id > nsid)
+ if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
list_move_tail(&ns->list, &rm_list);
}
up_write(&ctrl->namespaces_rwsem);
@@ -3214,7 +3253,8 @@ static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
* raced with us in reading the log page, which could cause us to miss
* updates.
*/
- error = nvme_get_log(ctrl, NVME_LOG_CHANGED_NS, log, log_size);
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log,
+ log_size, 0);
if (error)
dev_warn(ctrl->device,
"reading changed ns log failed: %d\n", error);
@@ -3331,9 +3371,9 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
if (!log)
return;
- if (nvme_get_log(ctrl, NVME_LOG_FW_SLOT, log, sizeof(*log)))
- dev_warn(ctrl->device,
- "Get FW SLOT INFO log error\n");
+ if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log,
+ sizeof(*log), 0))
+ dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
kfree(log);
}
@@ -3379,6 +3419,13 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
case NVME_AER_NOTICE_FW_ACT_STARTING:
queue_work(nvme_wq, &ctrl->fw_act_work);
break;
+#ifdef CONFIG_NVME_MULTIPATH
+ case NVME_AER_NOTICE_ANA:
+ if (!ctrl->ana_log_buf)
+ break;
+ queue_work(nvme_wq, &ctrl->ana_work);
+ break;
+#endif
default:
dev_warn(ctrl->device, "async event result %08x\n", result);
}
@@ -3411,6 +3458,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
+ nvme_mpath_stop(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
flush_work(&ctrl->scan_work);
@@ -3448,6 +3496,7 @@ static void nvme_free_ctrl(struct device *dev)
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
kfree(ctrl->effects);
+ nvme_mpath_uninit(ctrl);
if (subsys) {
mutex_lock(&subsys->lock);
@@ -3484,6 +3533,10 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
+ INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+ memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
+ ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
+
ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
goto out;
@@ -3542,19 +3595,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
if (ctrl->admin_q)
blk_mq_unquiesce_queue(ctrl->admin_q);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
- /*
- * Revalidating a dead namespace sets capacity to 0. This will
- * end buffered writers dirtying pages that can't be synced.
- */
- if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
- continue;
- revalidate_disk(ns->disk);
- blk_set_queue_dying(ns->queue);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ nvme_set_queue_dying(ns);
- /* Forcibly unquiesce queues to avoid blocking dispatch */
- blk_mq_unquiesce_queue(ns->queue);
- }
up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_kill_queues);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 903eb4545e26..206d63cb1afc 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -474,7 +474,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
{
- if (ctrl->opts->max_reconnects != -1 &&
+ if (ctrl->opts->max_reconnects == -1 ||
ctrl->nr_reconnects < ctrl->opts->max_reconnects)
return true;
@@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
/*
* For something we're not in a state to send to the device the default action
* is to busy it and retry it after the controller state is recovered. However,
- * anything marked for failfast or nvme multipath is immediately failed.
+ * if the controller is deleting or if anything is marked for failfast or
+ * nvme multipath it is immediately failed.
*
* Note: commands used to initialize the controller will be marked for failfast.
* Note: nvme cli/ioctl commands are marked for failfast.
*/
-blk_status_t nvmf_fail_nonready_command(struct request *rq)
+blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
+ struct request *rq)
{
- if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+ if (ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DEAD &&
+ !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index e1818a27aa2d..aa2fdb2a2e8f 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
-blk_status_t nvmf_fail_nonready_command(struct request *rq);
+blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
+ struct request *rq);
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 41d45a1b5c62..611e70cae754 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1737,6 +1737,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
+ nvme_req(rq)->ctrl = &ctrl->ctrl;
return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
}
@@ -2272,7 +2273,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
- return nvmf_fail_nonready_command(rq);
+ return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
ret = nvme_setup_cmd(ns, rq, sqe);
if (ret)
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 41279da799ed..6fe5923c95d4 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -414,12 +414,6 @@ static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id,
/* Set compacted version for upper layers */
geo->version = NVM_OCSSD_SPEC_20;
- if (!(geo->major_ver_id == 2 && geo->minor_ver_id == 0)) {
- pr_err("nvm: OCSSD version not supported (v%d.%d)\n",
- geo->major_ver_id, geo->minor_ver_id);
- return -EINVAL;
- }
-
geo->num_ch = le16_to_cpu(id->num_grp);
geo->num_lun = le16_to_cpu(id->num_pu);
geo->all_luns = geo->num_ch * geo->num_lun;
@@ -583,7 +577,13 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
struct ppa_addr ppa;
size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
size_t log_pos, offset, len;
- int ret, i;
+ int ret, i, max_len;
+
+ /*
+ * limit requests to maximum 256K to avoid issuing arbitrary large
+ * requests when the device does not specific a maximum transfer size.
+ */
+ max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
/* Normalize lba address space to obtain log offset */
ppa.ppa = slba;
@@ -596,10 +596,11 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
offset = log_pos * sizeof(struct nvme_nvm_chk_meta);
while (left) {
- len = min_t(unsigned int, left, ctrl->max_hw_sectors << 9);
+ len = min_t(unsigned int, left, max_len);
- ret = nvme_get_log_ext(ctrl, ns, NVME_NVM_LOG_REPORT_CHUNK,
- dev_meta, len, offset);
+ ret = nvme_get_log(ctrl, ns->head->ns_id,
+ NVME_NVM_LOG_REPORT_CHUNK, 0, dev_meta, len,
+ offset);
if (ret) {
dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
break;
@@ -662,12 +663,10 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q,
rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
- if (rqd->bio) {
+ if (rqd->bio)
blk_init_request_from_bio(rq, rqd->bio);
- } else {
+ else
rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
- rq->__data_len = 0;
- }
return rq;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 1ffd3e8b13a1..5a9562881d4e 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 Christoph Hellwig.
+ * Copyright (c) 2017-2018 Christoph Hellwig.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,11 @@ module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
+inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+{
+ return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
+}
+
/*
* If multipathing is enabled we need to always use the subsystem instance
* number for numbering our devices to avoid conflicts between subsystems that
@@ -45,6 +50,7 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
void nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
+ u16 status = nvme_req(req)->status;
unsigned long flags;
spin_lock_irqsave(&ns->head->requeue_lock, flags);
@@ -52,15 +58,35 @@ void nvme_failover_req(struct request *req)
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
blk_mq_end_request(req, 0);
- nvme_reset_ctrl(ns->ctrl);
- kblockd_schedule_work(&ns->head->requeue_work);
-}
+ switch (status & 0x7ff) {
+ case NVME_SC_ANA_TRANSITION:
+ case NVME_SC_ANA_INACCESSIBLE:
+ case NVME_SC_ANA_PERSISTENT_LOSS:
+ /*
+ * If we got back an ANA error we know the controller is alive,
+ * but not ready to serve this namespaces. The spec suggests
+ * we should update our general state here, but due to the fact
+ * that the admin and I/O queues are not serialized that is
+ * fundamentally racy. So instead just clear the current path,
+ * mark the the path as pending and kick of a re-read of the ANA
+ * log page ASAP.
+ */
+ nvme_mpath_clear_current_path(ns);
+ if (ns->ctrl->ana_log_buf) {
+ set_bit(NVME_NS_ANA_PENDING, &ns->flags);
+ queue_work(nvme_wq, &ns->ctrl->ana_work);
+ }
+ break;
+ default:
+ /*
+ * Reset the controller for any non-ANA error as we don't know
+ * what caused the error.
+ */
+ nvme_reset_ctrl(ns->ctrl);
+ break;
+ }
-bool nvme_req_needs_failover(struct request *req, blk_status_t error)
-{
- if (!(req->cmd_flags & REQ_NVME_MPATH))
- return false;
- return blk_path_error(error);
+ kblockd_schedule_work(&ns->head->requeue_work);
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
@@ -75,25 +101,51 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
up_read(&ctrl->namespaces_rwsem);
}
+static const char *nvme_ana_state_names[] = {
+ [0] = "invalid state",
+ [NVME_ANA_OPTIMIZED] = "optimized",
+ [NVME_ANA_NONOPTIMIZED] = "non-optimized",
+ [NVME_ANA_INACCESSIBLE] = "inaccessible",
+ [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
+ [NVME_ANA_CHANGE] = "change",
+};
+
static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head)
{
- struct nvme_ns *ns;
+ struct nvme_ns *ns, *fallback = NULL;
list_for_each_entry_rcu(ns, &head->list, siblings) {
- if (ns->ctrl->state == NVME_CTRL_LIVE) {
+ if (ns->ctrl->state != NVME_CTRL_LIVE ||
+ test_bit(NVME_NS_ANA_PENDING, &ns->flags))
+ continue;
+ switch (ns->ana_state) {
+ case NVME_ANA_OPTIMIZED:
rcu_assign_pointer(head->current_path, ns);
return ns;
+ case NVME_ANA_NONOPTIMIZED:
+ fallback = ns;
+ break;
+ default:
+ break;
}
}
- return NULL;
+ if (fallback)
+ rcu_assign_pointer(head->current_path, fallback);
+ return fallback;
+}
+
+static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
+{
+ return ns->ctrl->state == NVME_CTRL_LIVE &&
+ ns->ana_state == NVME_ANA_OPTIMIZED;
}
inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
{
struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu);
- if (unlikely(!ns || ns->ctrl->state != NVME_CTRL_LIVE))
+ if (unlikely(!ns || !nvme_path_is_optimized(ns)))
ns = __nvme_find_path(head);
return ns;
}
@@ -142,7 +194,7 @@ static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
srcu_idx = srcu_read_lock(&head->srcu);
ns = srcu_dereference(head->current_path, &head->srcu);
- if (likely(ns && ns->ctrl->state == NVME_CTRL_LIVE))
+ if (likely(ns && nvme_path_is_optimized(ns)))
found = ns->queue->poll_fn(q, qc);
srcu_read_unlock(&head->srcu, srcu_idx);
return found;
@@ -176,6 +228,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
struct request_queue *q;
bool vwc = false;
+ mutex_init(&head->lock);
bio_list_init(&head->requeue_list);
spin_lock_init(&head->requeue_lock);
INIT_WORK(&head->requeue_work, nvme_requeue_work);
@@ -220,29 +273,232 @@ out:
return -ENOMEM;
}
-void nvme_mpath_add_disk(struct nvme_ns_head *head)
+static void nvme_mpath_set_live(struct nvme_ns *ns)
{
+ struct nvme_ns_head *head = ns->head;
+
+ lockdep_assert_held(&ns->head->lock);
+
if (!head->disk)
return;
- mutex_lock(&head->subsys->lock);
if (!(head->disk->flags & GENHD_FL_UP)) {
device_add_disk(&head->subsys->dev, head->disk);
if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
&nvme_ns_id_attr_group))
- pr_warn("%s: failed to create sysfs group for identification\n",
- head->disk->disk_name);
+ dev_warn(&head->subsys->dev,
+ "failed to create id group.\n");
+ }
+
+ kblockd_schedule_work(&ns->head->requeue_work);
+}
+
+static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
+ int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
+ void *))
+{
+ void *base = ctrl->ana_log_buf;
+ size_t offset = sizeof(struct nvme_ana_rsp_hdr);
+ int error, i;
+
+ lockdep_assert_held(&ctrl->ana_lock);
+
+ for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
+ struct nvme_ana_group_desc *desc = base + offset;
+ u32 nr_nsids = le32_to_cpu(desc->nnsids);
+ size_t nsid_buf_size = nr_nsids * sizeof(__le32);
+
+ if (WARN_ON_ONCE(desc->grpid == 0))
+ return -EINVAL;
+ if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
+ return -EINVAL;
+ if (WARN_ON_ONCE(desc->state == 0))
+ return -EINVAL;
+ if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
+ return -EINVAL;
+
+ offset += sizeof(*desc);
+ if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
+ return -EINVAL;
+
+ error = cb(ctrl, desc, data);
+ if (error)
+ return error;
+
+ offset += nsid_buf_size;
+ if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline bool nvme_state_is_live(enum nvme_ana_state state)
+{
+ return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
+}
+
+static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
+ struct nvme_ns *ns)
+{
+ enum nvme_ana_state old;
+
+ mutex_lock(&ns->head->lock);
+ old = ns->ana_state;
+ ns->ana_grpid = le32_to_cpu(desc->grpid);
+ ns->ana_state = desc->state;
+ clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
+
+ if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
+ nvme_mpath_set_live(ns);
+ mutex_unlock(&ns->head->lock);
+}
+
+static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+ struct nvme_ana_group_desc *desc, void *data)
+{
+ u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
+ unsigned *nr_change_groups = data;
+ struct nvme_ns *ns;
+
+ dev_info(ctrl->device, "ANA group %d: %s.\n",
+ le32_to_cpu(desc->grpid),
+ nvme_ana_state_names[desc->state]);
+
+ if (desc->state == NVME_ANA_CHANGE)
+ (*nr_change_groups)++;
+
+ if (!nr_nsids)
+ return 0;
+
+ down_write(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ if (ns->head->ns_id != le32_to_cpu(desc->nsids[n]))
+ continue;
+ nvme_update_ns_ana_state(desc, ns);
+ if (++n == nr_nsids)
+ break;
+ }
+ up_write(&ctrl->namespaces_rwsem);
+ WARN_ON_ONCE(n < nr_nsids);
+ return 0;
+}
+
+static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
+{
+ u32 nr_change_groups = 0;
+ int error;
+
+ mutex_lock(&ctrl->ana_lock);
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
+ groups_only ? NVME_ANA_LOG_RGO : 0,
+ ctrl->ana_log_buf, ctrl->ana_log_size, 0);
+ if (error) {
+ dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
+ goto out_unlock;
+ }
+
+ error = nvme_parse_ana_log(ctrl, &nr_change_groups,
+ nvme_update_ana_state);
+ if (error)
+ goto out_unlock;
+
+ /*
+ * In theory we should have an ANATT timer per group as they might enter
+ * the change state at different times. But that is a lot of overhead
+ * just to protect against a target that keeps entering new changes
+ * states while never finishing previous ones. But we'll still
+ * eventually time out once all groups are in change state, so this
+ * isn't a big deal.
+ *
+ * We also double the ANATT value to provide some slack for transports
+ * or AEN processing overhead.
+ */
+ if (nr_change_groups)
+ mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
+ else
+ del_timer_sync(&ctrl->anatt_timer);
+out_unlock:
+ mutex_unlock(&ctrl->ana_lock);
+ return error;
+}
+
+static void nvme_ana_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
+
+ nvme_read_ana_log(ctrl, false);
+}
+
+static void nvme_anatt_timeout(struct timer_list *t)
+{
+ struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
+
+ dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
+ nvme_reset_ctrl(ctrl);
+}
+
+void nvme_mpath_stop(struct nvme_ctrl *ctrl)
+{
+ if (!nvme_ctrl_use_ana(ctrl))
+ return;
+ del_timer_sync(&ctrl->anatt_timer);
+ cancel_work_sync(&ctrl->ana_work);
+}
+
+static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
+}
+DEVICE_ATTR_RO(ana_grpid);
+
+static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+
+ return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
+}
+DEVICE_ATTR_RO(ana_state);
+
+static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
+ struct nvme_ana_group_desc *desc, void *data)
+{
+ struct nvme_ns *ns = data;
+
+ if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
+ nvme_update_ns_ana_state(desc, ns);
+ return -ENXIO; /* just break out of the loop */
+ }
+
+ return 0;
+}
+
+void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ if (nvme_ctrl_use_ana(ns->ctrl)) {
+ mutex_lock(&ns->ctrl->ana_lock);
+ ns->ana_grpid = le32_to_cpu(id->anagrpid);
+ nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
+ mutex_unlock(&ns->ctrl->ana_lock);
+ } else {
+ mutex_lock(&ns->head->lock);
+ ns->ana_state = NVME_ANA_OPTIMIZED;
+ nvme_mpath_set_live(ns);
+ mutex_unlock(&ns->head->lock);
}
- mutex_unlock(&head->subsys->lock);
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
- &nvme_ns_id_attr_group);
- del_gendisk(head->disk);
+ if (head->disk->flags & GENHD_FL_UP) {
+ sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
+ &nvme_ns_id_attr_group);
+ del_gendisk(head->disk);
+ }
blk_set_queue_dying(head->disk->queue);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
@@ -250,3 +506,52 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
blk_cleanup_queue(head->disk->queue);
put_disk(head->disk);
}
+
+int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ int error;
+
+ if (!nvme_ctrl_use_ana(ctrl))
+ return 0;
+
+ ctrl->anacap = id->anacap;
+ ctrl->anatt = id->anatt;
+ ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
+ ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
+
+ mutex_init(&ctrl->ana_lock);
+ timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
+ ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
+ ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
+ if (!(ctrl->anacap & (1 << 6)))
+ ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
+
+ if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
+ dev_err(ctrl->device,
+ "ANA log page size (%zd) larger than MDTS (%d).\n",
+ ctrl->ana_log_size,
+ ctrl->max_hw_sectors << SECTOR_SHIFT);
+ dev_err(ctrl->device, "disabling ANA support.\n");
+ return 0;
+ }
+
+ INIT_WORK(&ctrl->ana_work, nvme_ana_work);
+ ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
+ if (!ctrl->ana_log_buf)
+ goto out;
+
+ error = nvme_read_ana_log(ctrl, true);
+ if (error)
+ goto out_free_ana_log_buf;
+ return 0;
+out_free_ana_log_buf:
+ kfree(ctrl->ana_log_buf);
+out:
+ return -ENOMEM;
+}
+
+void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
+{
+ kfree(ctrl->ana_log_buf);
+}
+
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 0c4a33df3b2f..bb4a2003c097 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -102,6 +102,7 @@ struct nvme_request {
u8 retries;
u8 flags;
u16 status;
+ struct nvme_ctrl *ctrl;
};
/*
@@ -119,6 +120,13 @@ static inline struct nvme_request *nvme_req(struct request *req)
return blk_mq_rq_to_pdu(req);
}
+static inline u16 nvme_req_qid(struct request *req)
+{
+ if (!req->rq_disk)
+ return 0;
+ return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1;
+}
+
/* The below value is the specific amount of delay needed before checking
* readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
@@ -175,6 +183,7 @@ struct nvme_ctrl {
u16 oacs;
u16 nssa;
u16 nr_streams;
+ u32 max_namespaces;
atomic_t abort_limit;
u8 vwc;
u32 vs;
@@ -197,6 +206,19 @@ struct nvme_ctrl {
struct work_struct fw_act_work;
unsigned long events;
+#ifdef CONFIG_NVME_MULTIPATH
+ /* asymmetric namespace access: */
+ u8 anacap;
+ u8 anatt;
+ u32 anagrpmax;
+ u32 nanagrpid;
+ struct mutex ana_lock;
+ struct nvme_ana_rsp_hdr *ana_log_buf;
+ size_t ana_log_size;
+ struct timer_list anatt_timer;
+ struct work_struct ana_work;
+#endif
+
/* Power saving configuration */
u64 ps_max_latency_us;
bool apst_enabled;
@@ -261,6 +283,7 @@ struct nvme_ns_head {
struct bio_list requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
+ struct mutex lock;
#endif
struct list_head list;
struct srcu_struct srcu;
@@ -287,6 +310,10 @@ struct nvme_ns {
struct nvme_ctrl *ctrl;
struct request_queue *queue;
struct gendisk *disk;
+#ifdef CONFIG_NVME_MULTIPATH
+ enum nvme_ana_state ana_state;
+ u32 ana_grpid;
+#endif
struct list_head siblings;
struct nvm_dev *ndev;
struct kref kref;
@@ -299,8 +326,9 @@ struct nvme_ns {
bool ext;
u8 pi_type;
unsigned long flags;
-#define NVME_NS_REMOVING 0
-#define NVME_NS_DEAD 1
+#define NVME_NS_REMOVING 0
+#define NVME_NS_DEAD 1
+#define NVME_NS_ANA_PENDING 2
u16 noiob;
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
@@ -356,14 +384,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
return (sector >> (ns->lba_shift - 9));
}
-static inline void nvme_cleanup_cmd(struct request *req)
-{
- if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
- kfree(page_address(req->special_vec.bv_page) +
- req->special_vec.bv_offset);
- }
-}
-
static inline void nvme_end_request(struct request *req, __le16 status,
union nvme_result result)
{
@@ -420,6 +440,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
+void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
@@ -435,21 +456,24 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
-int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- u8 log_page, void *log, size_t size, u64 offset);
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
+ void *log, size_t size, u64 offset);
extern const struct attribute_group nvme_ns_id_attr_group;
extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
+bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
void nvme_failover_req(struct request *req);
-bool nvme_req_needs_failover(struct request *req, blk_status_t error);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
-void nvme_mpath_add_disk(struct nvme_ns_head *head);
+void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
+int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
+void nvme_mpath_stop(struct nvme_ctrl *ctrl);
static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
@@ -468,7 +492,14 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
kblockd_schedule_work(&head->requeue_work);
}
+extern struct device_attribute dev_attr_ana_grpid;
+extern struct device_attribute dev_attr_ana_state;
+
#else
+static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+{
+ return false;
+}
/*
* Without the multipath code enabled, multiple controller per subsystems are
* visible as devices and thus we cannot use the subsystem instance.
@@ -482,11 +513,6 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
static inline void nvme_failover_req(struct request *req)
{
}
-static inline bool nvme_req_needs_failover(struct request *req,
- blk_status_t error)
-{
- return false;
-}
static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{
}
@@ -495,7 +521,8 @@ static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
{
return 0;
}
-static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
+static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
+ struct nvme_id_ns *id)
{
}
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
@@ -507,6 +534,17 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
}
+static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
+ struct nvme_id_ctrl *id)
+{
+ return 0;
+}
+static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
+{
+}
+static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ba943f211687..1b9951d2067e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -418,6 +418,8 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
BUG_ON(!nvmeq);
iod->nvmeq = nvmeq;
+
+ nvme_req(req)->ctrl = &dev->ctrl;
return 0;
}
@@ -535,73 +537,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
mempool_free(iod->sg, dev->iod_mempool);
}
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
- if (be32_to_cpu(pi->ref_tag) == v)
- pi->ref_tag = cpu_to_be32(p);
-}
-
-static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
- if (be32_to_cpu(pi->ref_tag) == p)
- pi->ref_tag = cpu_to_be32(v);
-}
-
-/**
- * nvme_dif_remap - remaps ref tags to bip seed and physical lba
- *
- * The virtual start sector is the one that was originally submitted by the
- * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical
- * start sector may be different. Remap protection information to match the
- * physical LBA on writes, and back to the original seed on reads.
- *
- * Type 0 and 3 do not have a ref tag, so no remapping required.
- */
-static void nvme_dif_remap(struct request *req,
- void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
-{
- struct nvme_ns *ns = req->rq_disk->private_data;
- struct bio_integrity_payload *bip;
- struct t10_pi_tuple *pi;
- void *p, *pmap;
- u32 i, nlb, ts, phys, virt;
-
- if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
- return;
-
- bip = bio_integrity(req->bio);
- if (!bip)
- return;
-
- pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
-
- p = pmap;
- virt = bip_get_seed(bip);
- phys = nvme_block_nr(ns, blk_rq_pos(req));
- nlb = (blk_rq_bytes(req) >> ns->lba_shift);
- ts = ns->disk->queue->integrity.tuple_size;
-
- for (i = 0; i < nlb; i++, virt++, phys++) {
- pi = (struct t10_pi_tuple *)p;
- dif_swap(phys, virt, pi);
- p += ts;
- }
- kunmap_atomic(pmap);
-}
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-static void nvme_dif_remap(struct request *req,
- void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
-{
-}
-static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-}
-static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-}
-#endif
-
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
{
int i;
@@ -827,9 +762,6 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
goto out_unmap;
- if (req_op(req) == REQ_OP_WRITE)
- nvme_dif_remap(req, nvme_dif_prep);
-
if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
goto out_unmap;
}
@@ -852,11 +784,8 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
if (iod->nents) {
dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
- if (blk_integrity_rq(req)) {
- if (req_op(req) == REQ_OP_READ)
- nvme_dif_remap(req, nvme_dif_complete);
+ if (blk_integrity_rq(req))
dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
- }
}
nvme_cleanup_cmd(req);
@@ -2556,11 +2485,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
quirks |= check_vendor_combination_bug(pdev);
- result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
- quirks);
- if (result)
- goto release_pools;
-
/*
* Double check that our mempool alloc size will cover the biggest
* command we support.
@@ -2578,6 +2502,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto release_pools;
}
+ result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
+ quirks);
+ if (result)
+ goto release_mempool;
+
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
nvme_get_ctrl(&dev->ctrl);
@@ -2585,6 +2514,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+ release_mempool:
+ mempool_destroy(dev->iod_mempool);
release_pools:
nvme_release_prp_pools(dev);
unmap:
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 9544625c0b7d..dc042017c293 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -40,13 +40,14 @@
#define NVME_RDMA_MAX_SEGMENTS 256
-#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
+#define NVME_RDMA_MAX_INLINE_SEGMENTS 4
struct nvme_rdma_device {
struct ib_device *dev;
struct ib_pd *pd;
struct kref ref;
struct list_head entry;
+ unsigned int num_inline_segments;
};
struct nvme_rdma_qe {
@@ -117,6 +118,7 @@ struct nvme_rdma_ctrl {
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
+ bool use_inline_data;
};
static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
@@ -249,7 +251,7 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
/* +1 for drain */
init_attr.cap.max_recv_wr = queue->queue_size + 1;
init_attr.cap.max_recv_sge = 1;
- init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS;
+ init_attr.cap.max_send_sge = 1 + dev->num_inline_segments;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr.qp_type = IB_QPT_RC;
init_attr.send_cq = queue->ib_cq;
@@ -286,6 +288,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
struct ib_device *ibdev = dev->dev;
int ret;
+ nvme_req(rq)->ctrl = &ctrl->ctrl;
ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
DMA_TO_DEVICE);
if (ret)
@@ -374,6 +377,8 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
goto out_free_pd;
}
+ ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS,
+ ndev->dev->attrs.max_send_sge - 1);
list_add(&ndev->entry, &device_list);
out_unlock:
mutex_unlock(&device_list_mutex);
@@ -732,8 +737,11 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_cleanup_queue(ctrl->ctrl.admin_q);
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
}
- nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
- sizeof(struct nvme_command), DMA_TO_DEVICE);
+ if (ctrl->async_event_sqe.data) {
+ nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ ctrl->async_event_sqe.data = NULL;
+ }
nvme_rdma_free_queue(&ctrl->queues[0]);
}
@@ -865,6 +873,31 @@ out_free_io_queues:
return ret;
}
+static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ bool remove)
+{
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request,
+ &ctrl->ctrl);
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_destroy_admin_queue(ctrl, remove);
+}
+
+static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
+ bool remove)
+{
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request,
+ &ctrl->ctrl);
+ if (remove)
+ nvme_start_queues(&ctrl->ctrl);
+ nvme_rdma_destroy_io_queues(ctrl, remove);
+ }
+}
+
static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
@@ -909,21 +942,44 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
}
}
-static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
+static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
{
- struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
- struct nvme_rdma_ctrl, reconnect_work);
+ int ret = -EINVAL;
bool changed;
- int ret;
- ++ctrl->ctrl.nr_reconnects;
-
- ret = nvme_rdma_configure_admin_queue(ctrl, false);
+ ret = nvme_rdma_configure_admin_queue(ctrl, new);
if (ret)
- goto requeue;
+ return ret;
+
+ if (ctrl->ctrl.icdoff) {
+ dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
+ goto destroy_admin;
+ }
+
+ if (!(ctrl->ctrl.sgls & (1 << 2))) {
+ dev_err(ctrl->ctrl.device,
+ "Mandatory keyed sgls are not supported!\n");
+ goto destroy_admin;
+ }
+
+ if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl sqsize %u, clamping down\n",
+ ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
+ }
+
+ if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
+ dev_warn(ctrl->ctrl.device,
+ "sqsize %u > ctrl maxcmd %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
+ ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
+ }
+
+ if (ctrl->ctrl.sgls & (1 << 20))
+ ctrl->use_inline_data = true;
if (ctrl->ctrl.queue_count > 1) {
- ret = nvme_rdma_configure_io_queues(ctrl, false);
+ ret = nvme_rdma_configure_io_queues(ctrl, new);
if (ret)
goto destroy_admin;
}
@@ -932,10 +988,31 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
if (!changed) {
/* state change failure is ok if we're in DELETING state */
WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
- return;
+ ret = -EINVAL;
+ goto destroy_io;
}
nvme_start_ctrl(&ctrl->ctrl);
+ return 0;
+
+destroy_io:
+ if (ctrl->ctrl.queue_count > 1)
+ nvme_rdma_destroy_io_queues(ctrl, new);
+destroy_admin:
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
+ nvme_rdma_destroy_admin_queue(ctrl, new);
+ return ret;
+}
+
+static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_rdma_ctrl, reconnect_work);
+
+ ++ctrl->ctrl.nr_reconnects;
+
+ if (nvme_rdma_setup_ctrl(ctrl, false))
+ goto requeue;
dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
ctrl->ctrl.nr_reconnects);
@@ -944,9 +1021,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
return;
-destroy_admin:
- nvme_rdma_stop_queue(&ctrl->queues[0]);
- nvme_rdma_destroy_admin_queue(ctrl, false);
requeue:
dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
ctrl->ctrl.nr_reconnects);
@@ -959,27 +1033,9 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl, err_work);
nvme_stop_keep_alive(&ctrl->ctrl);
-
- if (ctrl->ctrl.queue_count > 1) {
- nvme_stop_queues(&ctrl->ctrl);
- nvme_rdma_stop_io_queues(ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- nvme_rdma_destroy_io_queues(ctrl, false);
- }
-
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
- nvme_rdma_stop_queue(&ctrl->queues[0]);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- nvme_rdma_destroy_admin_queue(ctrl, false);
-
- /*
- * queues are not a live anymore, so restart the queues to fail fast
- * new IO
- */
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
+ nvme_rdma_teardown_admin_queue(ctrl, false);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we're in DELETING state */
@@ -1037,7 +1093,6 @@ static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
struct nvme_rdma_request *req)
{
- struct ib_send_wr *bad_wr;
struct ib_send_wr wr = {
.opcode = IB_WR_LOCAL_INV,
.next = NULL,
@@ -1049,7 +1104,7 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
req->reg_cqe.done = nvme_rdma_inv_rkey_done;
wr.wr_cqe = &req->reg_cqe;
- return ib_post_send(queue->qp, &wr, &bad_wr);
+ return ib_post_send(queue->qp, &wr, NULL);
}
static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
@@ -1087,19 +1142,27 @@ static int nvme_rdma_set_sg_null(struct nvme_command *c)
}
static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
- struct nvme_rdma_request *req, struct nvme_command *c)
+ struct nvme_rdma_request *req, struct nvme_command *c,
+ int count)
{
struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+ struct scatterlist *sgl = req->sg_table.sgl;
+ struct ib_sge *sge = &req->sge[1];
+ u32 len = 0;
+ int i;
- req->sge[1].addr = sg_dma_address(req->sg_table.sgl);
- req->sge[1].length = sg_dma_len(req->sg_table.sgl);
- req->sge[1].lkey = queue->device->pd->local_dma_lkey;
+ for (i = 0; i < count; i++, sgl++, sge++) {
+ sge->addr = sg_dma_address(sgl);
+ sge->length = sg_dma_len(sgl);
+ sge->lkey = queue->device->pd->local_dma_lkey;
+ len += sge->length;
+ }
sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
- sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
+ sg->length = cpu_to_le32(len);
sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
- req->num_sge++;
+ req->num_sge += count;
return 0;
}
@@ -1192,15 +1255,16 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
goto out_free_table;
}
- if (count == 1) {
+ if (count <= dev->num_inline_segments) {
if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
+ queue->ctrl->use_inline_data &&
blk_rq_payload_bytes(rq) <=
nvme_rdma_inline_data_size(queue)) {
- ret = nvme_rdma_map_sg_inline(queue, req, c);
+ ret = nvme_rdma_map_sg_inline(queue, req, c, count);
goto out;
}
- if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+ if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
ret = nvme_rdma_map_sg_single(queue, req, c);
goto out;
}
@@ -1243,7 +1307,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
struct ib_send_wr *first)
{
- struct ib_send_wr wr, *bad_wr;
+ struct ib_send_wr wr;
int ret;
sge->addr = qe->dma;
@@ -1262,7 +1326,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
else
first = &wr;
- ret = ib_post_send(queue->qp, first, &bad_wr);
+ ret = ib_post_send(queue->qp, first, NULL);
if (unlikely(ret)) {
dev_err(queue->ctrl->ctrl.device,
"%s failed with error code %d\n", __func__, ret);
@@ -1273,7 +1337,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
struct nvme_rdma_qe *qe)
{
- struct ib_recv_wr wr, *bad_wr;
+ struct ib_recv_wr wr;
struct ib_sge list;
int ret;
@@ -1288,7 +1352,7 @@ static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
wr.sg_list = &list;
wr.num_sge = 1;
- ret = ib_post_recv(queue->qp, &wr, &bad_wr);
+ ret = ib_post_recv(queue->qp, &wr, NULL);
if (unlikely(ret)) {
dev_err(queue->ctrl->ctrl.device,
"%s failed with error code %d\n", __func__, ret);
@@ -1571,6 +1635,7 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
nvme_rdma_destroy_queue_ib(queue);
+ /* fall through */
case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event);
@@ -1636,7 +1701,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(rq->tag < 0);
if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
- return nvmf_fail_nonready_command(rq);
+ return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
dev = queue->device->dev;
ib_dma_sync_single_for_cpu(dev, sqe->dma,
@@ -1733,25 +1798,12 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
- if (ctrl->ctrl.queue_count > 1) {
- nvme_stop_queues(&ctrl->ctrl);
- nvme_rdma_stop_io_queues(ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- nvme_rdma_destroy_io_queues(ctrl, shutdown);
- }
-
+ nvme_rdma_teardown_io_queues(ctrl, shutdown);
if (shutdown)
nvme_shutdown_ctrl(&ctrl->ctrl);
else
nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
-
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
- nvme_rdma_stop_queue(&ctrl->queues[0]);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
- nvme_rdma_destroy_admin_queue(ctrl, shutdown);
+ nvme_rdma_teardown_admin_queue(ctrl, shutdown);
}
static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
@@ -1763,8 +1815,6 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
{
struct nvme_rdma_ctrl *ctrl =
container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
- int ret;
- bool changed;
nvme_stop_ctrl(&ctrl->ctrl);
nvme_rdma_shutdown_ctrl(ctrl, false);
@@ -1775,25 +1825,9 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
return;
}
- ret = nvme_rdma_configure_admin_queue(ctrl, false);
- if (ret)
+ if (nvme_rdma_setup_ctrl(ctrl, false))
goto out_fail;
- if (ctrl->ctrl.queue_count > 1) {
- ret = nvme_rdma_configure_io_queues(ctrl, false);
- if (ret)
- goto out_fail;
- }
-
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- if (!changed) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
- return;
- }
-
- nvme_start_ctrl(&ctrl->ctrl);
-
return;
out_fail:
@@ -1956,49 +1990,10 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
WARN_ON_ONCE(!changed);
- ret = nvme_rdma_configure_admin_queue(ctrl, true);
+ ret = nvme_rdma_setup_ctrl(ctrl, true);
if (ret)
goto out_uninit_ctrl;
- /* sanity check icdoff */
- if (ctrl->ctrl.icdoff) {
- dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
- ret = -EINVAL;
- goto out_remove_admin_queue;
- }
-
- /* sanity check keyed sgls */
- if (!(ctrl->ctrl.sgls & (1 << 2))) {
- dev_err(ctrl->ctrl.device,
- "Mandatory keyed sgls are not supported!\n");
- ret = -EINVAL;
- goto out_remove_admin_queue;
- }
-
- /* only warn if argument is too large here, will clamp later */
- if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
- dev_warn(ctrl->ctrl.device,
- "queue_size %zu > ctrl sqsize %u, clamping down\n",
- opts->queue_size, ctrl->ctrl.sqsize + 1);
- }
-
- /* warn if maxcmd is lower than sqsize+1 */
- if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
- dev_warn(ctrl->ctrl.device,
- "sqsize %u > ctrl maxcmd %u, clamping down\n",
- ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
- ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
- }
-
- if (opts->nr_io_queues) {
- ret = nvme_rdma_configure_io_queues(ctrl, true);
- if (ret)
- goto out_remove_admin_queue;
- }
-
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
-
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
@@ -2008,13 +2003,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
mutex_unlock(&nvme_rdma_ctrl_mutex);
- nvme_start_ctrl(&ctrl->ctrl);
-
return &ctrl->ctrl;
-out_remove_admin_queue:
- nvme_rdma_stop_queue(&ctrl->queues[0]);
- nvme_rdma_destroy_admin_queue(ctrl, true);
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 41944bbef835..25b0e310f4a8 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -128,3 +128,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
return nvme_trace_common(p, cdw10);
}
}
+
+const char *nvme_trace_disk_name(struct trace_seq *p, char *name)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ if (*name)
+ trace_seq_printf(p, "disk=%s, ", name);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 01390f0e1671..a490790d6691 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -50,13 +50,8 @@
nvme_admin_opcode_name(nvme_admin_security_recv), \
nvme_admin_opcode_name(nvme_admin_sanitize_nvm))
-const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
- u8 *cdw10);
-#define __parse_nvme_admin_cmd(opcode, cdw10) \
- nvme_trace_parse_admin_cmd(p, opcode, cdw10)
-
#define nvme_opcode_name(opcode) { opcode, #opcode }
-#define show_opcode_name(val) \
+#define show_nvm_opcode_name(val) \
__print_symbolic(val, \
nvme_opcode_name(nvme_cmd_flush), \
nvme_opcode_name(nvme_cmd_write), \
@@ -70,85 +65,92 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
nvme_opcode_name(nvme_cmd_resv_acquire), \
nvme_opcode_name(nvme_cmd_resv_release))
-const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
- u8 *cdw10);
-#define __parse_nvme_cmd(opcode, cdw10) \
- nvme_trace_parse_nvm_cmd(p, opcode, cdw10)
-
-TRACE_EVENT(nvme_setup_admin_cmd,
- TP_PROTO(struct nvme_command *cmd),
- TP_ARGS(cmd),
- TP_STRUCT__entry(
- __field(u8, opcode)
- __field(u8, flags)
- __field(u16, cid)
- __field(u64, metadata)
- __array(u8, cdw10, 24)
- ),
- TP_fast_assign(
- __entry->opcode = cmd->common.opcode;
- __entry->flags = cmd->common.flags;
- __entry->cid = cmd->common.command_id;
- __entry->metadata = le64_to_cpu(cmd->common.metadata);
- memcpy(__entry->cdw10, cmd->common.cdw10,
- sizeof(__entry->cdw10));
- ),
- TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
- __entry->cid, __entry->flags, __entry->metadata,
- show_admin_opcode_name(__entry->opcode),
- __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
-);
-
+#define show_opcode_name(qid, opcode) \
+ (qid ? show_nvm_opcode_name(opcode) : show_admin_opcode_name(opcode))
-TRACE_EVENT(nvme_setup_nvm_cmd,
- TP_PROTO(int qid, struct nvme_command *cmd),
- TP_ARGS(qid, cmd),
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+
+#define parse_nvme_cmd(qid, opcode, cdw10) \
+ (qid ? \
+ nvme_trace_parse_nvm_cmd(p, opcode, cdw10) : \
+ nvme_trace_parse_admin_cmd(p, opcode, cdw10))
+
+const char *nvme_trace_disk_name(struct trace_seq *p, char *name);
+#define __print_disk_name(name) \
+ nvme_trace_disk_name(p, name)
+
+#ifndef TRACE_HEADER_MULTI_READ
+static inline void __assign_disk_name(char *name, struct gendisk *disk)
+{
+ if (disk)
+ memcpy(name, disk->disk_name, DISK_NAME_LEN);
+ else
+ memset(name, 0, DISK_NAME_LEN);
+}
+#endif
+
+TRACE_EVENT(nvme_setup_cmd,
+ TP_PROTO(struct request *req, struct nvme_command *cmd),
+ TP_ARGS(req, cmd),
TP_STRUCT__entry(
- __field(int, qid)
- __field(u8, opcode)
- __field(u8, flags)
- __field(u16, cid)
- __field(u32, nsid)
- __field(u64, metadata)
- __array(u8, cdw10, 24)
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, ctrl_id)
+ __field(int, qid)
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u16, cid)
+ __field(u32, nsid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
),
TP_fast_assign(
- __entry->qid = qid;
- __entry->opcode = cmd->common.opcode;
- __entry->flags = cmd->common.flags;
- __entry->cid = cmd->common.command_id;
- __entry->nsid = le32_to_cpu(cmd->common.nsid);
- __entry->metadata = le64_to_cpu(cmd->common.metadata);
- memcpy(__entry->cdw10, cmd->common.cdw10,
- sizeof(__entry->cdw10));
+ __entry->ctrl_id = nvme_req(req)->ctrl->instance;
+ __entry->qid = nvme_req_qid(req);
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->nsid = le32_to_cpu(cmd->common.nsid);
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ __assign_disk_name(__entry->disk, req->rq_disk);
+ memcpy(__entry->cdw10, cmd->common.cdw10,
+ sizeof(__entry->cdw10));
),
- TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
- __entry->qid, __entry->nsid, __entry->cid,
+ TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ __entry->ctrl_id, __print_disk_name(__entry->disk),
+ __entry->qid, __entry->cid, __entry->nsid,
__entry->flags, __entry->metadata,
- show_opcode_name(__entry->opcode),
- __parse_nvme_cmd(__entry->opcode, __entry->cdw10))
+ show_opcode_name(__entry->qid, __entry->opcode),
+ parse_nvme_cmd(__entry->qid, __entry->opcode, __entry->cdw10))
);
TRACE_EVENT(nvme_complete_rq,
TP_PROTO(struct request *req),
TP_ARGS(req),
TP_STRUCT__entry(
- __field(int, qid)
- __field(int, cid)
- __field(u64, result)
- __field(u8, retries)
- __field(u8, flags)
- __field(u16, status)
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, ctrl_id)
+ __field(int, qid)
+ __field(int, cid)
+ __field(u64, result)
+ __field(u8, retries)
+ __field(u8, flags)
+ __field(u16, status)
),
TP_fast_assign(
- __entry->qid = req->q->id;
- __entry->cid = req->tag;
- __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
- __entry->retries = nvme_req(req)->retries;
- __entry->flags = nvme_req(req)->flags;
- __entry->status = nvme_req(req)->status;
+ __entry->ctrl_id = nvme_req(req)->ctrl->instance;
+ __entry->qid = nvme_req_qid(req);
+ __entry->cid = req->tag;
+ __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
+ __entry->retries = nvme_req(req)->retries;
+ __entry->flags = nvme_req(req)->flags;
+ __entry->status = nvme_req(req)->status;
+ __assign_disk_name(__entry->disk, req->rq_disk);
),
- TP_printk("qid=%d, cmdid=%u, res=%llu, retries=%u, flags=0x%x, status=%u",
+ TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%llu, retries=%u, flags=0x%x, status=%u",
+ __entry->ctrl_id, __print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->result,
__entry->retries, __entry->flags, __entry->status)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 38803576d5e1..a21caea1e080 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -19,6 +19,19 @@
#include <asm/unaligned.h>
#include "nvmet.h"
+/*
+ * This helper allows us to clear the AEN based on the RAE bit,
+ * Please use this helper when processing the log pages which are
+ * associated with the AEN.
+ */
+static inline void nvmet_clear_aen(struct nvmet_req *req, u32 aen_bit)
+{
+ int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15;
+
+ if (!rae)
+ clear_bit(aen_bit, &req->sq->ctrl->aen_masked);
+}
+
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
{
u32 len = le16_to_cpu(cmd->get_log_page.numdu);
@@ -128,6 +141,36 @@ out:
nvmet_req_complete(req, status);
}
+static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
+{
+ u16 status = NVME_SC_INTERNAL;
+ struct nvme_effects_log *log;
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ goto out;
+
+ log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
+
+ log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
+
+ status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+
+ kfree(log);
+out:
+ nvmet_req_complete(req, status);
+}
+
static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -146,12 +189,76 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
if (!status)
status = nvmet_zero_sgl(req, len, req->data_len - len);
ctrl->nr_changed_ns = 0;
- clear_bit(NVME_AEN_CFG_NS_ATTR, &ctrl->aen_masked);
+ nvmet_clear_aen(req, NVME_AEN_CFG_NS_ATTR);
mutex_unlock(&ctrl->lock);
out:
nvmet_req_complete(req, status);
}
+static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
+ struct nvme_ana_group_desc *desc)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_ns *ns;
+ u32 count = 0;
+
+ if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+ if (ns->anagrpid == grpid)
+ desc->nsids[count++] = cpu_to_le32(ns->nsid);
+ rcu_read_unlock();
+ }
+
+ desc->grpid = cpu_to_le32(grpid);
+ desc->nnsids = cpu_to_le32(count);
+ desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
+ desc->state = req->port->ana_state[grpid];
+ memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
+ return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
+}
+
+static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
+{
+ struct nvme_ana_rsp_hdr hdr = { 0, };
+ struct nvme_ana_group_desc *desc;
+ size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
+ size_t len;
+ u32 grpid;
+ u16 ngrps = 0;
+ u16 status;
+
+ status = NVME_SC_INTERNAL;
+ desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
+ NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
+ if (!desc)
+ goto out;
+
+ down_read(&nvmet_ana_sem);
+ for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
+ if (!nvmet_ana_group_enabled[grpid])
+ continue;
+ len = nvmet_format_ana_group(req, grpid, desc);
+ status = nvmet_copy_to_sgl(req, offset, desc, len);
+ if (status)
+ break;
+ offset += len;
+ ngrps++;
+ }
+
+ hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
+ hdr.ngrps = cpu_to_le16(ngrps);
+ nvmet_clear_aen(req, NVME_AEN_CFG_ANA_CHANGE);
+ up_read(&nvmet_ana_sem);
+
+ kfree(desc);
+
+ /* copy the header last once we know the number of groups */
+ status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
+out:
+ nvmet_req_complete(req, status);
+}
+
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -183,8 +290,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
* the safest is to leave it as zeroes.
*/
- /* we support multiple ports and multiples hosts: */
- id->cmic = (1 << 0) | (1 << 1);
+ /* we support multiple ports, multiples hosts and ANA: */
+ id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
/* no limit on data transfer sizes for now */
id->mdts = 0;
@@ -208,7 +315,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* first slot is read-only, only one slot supported */
id->frmw = (1 << 0) | (1 << 1);
- id->lpa = (1 << 0) | (1 << 2);
+ id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
id->npss = 0;
@@ -222,6 +329,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
+ id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
NVME_CTRL_ONCS_WRITE_ZEROES);
@@ -238,19 +346,24 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
if (ctrl->ops->has_keyed_sgls)
id->sgls |= cpu_to_le32(1 << 2);
- if (ctrl->ops->sqe_inline_size)
+ if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
strcpy(id->subnqn, ctrl->subsys->subsysnqn);
/* Max command capsule size is sqe + single page of in-capsule data */
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
- ctrl->ops->sqe_inline_size) / 16);
+ req->port->inline_data_size) / 16);
/* Max response capsule size is cqe */
id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
id->msdbd = ctrl->ops->msdbd;
+ id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
+ id->anatt = 10; /* random value */
+ id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
+ id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
+
/*
* Meh, we don't really support any power state. Fake up the same
* values that qemu does.
@@ -259,6 +372,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->psd[0].entry_lat = cpu_to_le32(0x10);
id->psd[0].exit_lat = cpu_to_le32(0x4);
+ id->nwpc = 1 << 0; /* write protect and no write protect */
+
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
kfree(id);
@@ -292,8 +407,15 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* nuse = ncap = nsze isn't always true, but we have no way to find
* that out from the underlying device.
*/
- id->ncap = id->nuse = id->nsze =
- cpu_to_le64(ns->size >> ns->blksize_shift);
+ id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
+ switch (req->port->ana_state[ns->anagrpid]) {
+ case NVME_ANA_INACCESSIBLE:
+ case NVME_ANA_PERSISTENT_LOSS:
+ break;
+ default:
+ id->nuse = id->nsze;
+ break;
+ }
/*
* We just provide a single LBA format that matches what the
@@ -307,11 +429,14 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* controllers, but also with any other user of the block device.
*/
id->nmic = (1 << 0);
+ id->anagrpid = cpu_to_le32(ns->anagrpid);
- memcpy(&id->nguid, &ns->nguid, sizeof(uuid_le));
+ memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
id->lbaf[0].ds = ns->blksize_shift;
+ if (ns->readonly)
+ id->nsattr |= (1 << 0);
nvmet_put_namespace(ns);
done:
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
@@ -424,6 +549,52 @@ static void nvmet_execute_abort(struct nvmet_req *req)
nvmet_req_complete(req, 0);
}
+static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
+{
+ u16 status;
+
+ if (req->ns->file)
+ status = nvmet_file_flush(req);
+ else
+ status = nvmet_bdev_flush(req);
+
+ if (status)
+ pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
+ return status;
+}
+
+static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
+{
+ u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]);
+ struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
+
+ req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
+ if (unlikely(!req->ns))
+ return status;
+
+ mutex_lock(&subsys->lock);
+ switch (write_protect) {
+ case NVME_NS_WRITE_PROTECT:
+ req->ns->readonly = true;
+ status = nvmet_write_protect_flush_sync(req);
+ if (status)
+ req->ns->readonly = false;
+ break;
+ case NVME_NS_NO_WRITE_PROTECT:
+ req->ns->readonly = false;
+ status = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (!status)
+ nvmet_ns_changed(subsys, req->ns->nsid);
+ mutex_unlock(&subsys->lock);
+ return status;
+}
+
static void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
@@ -454,6 +625,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
case NVME_FEAT_HOST_ID:
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
break;
+ case NVME_FEAT_WRITE_PROTECT:
+ status = nvmet_set_feat_write_protect(req);
+ break;
default:
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
@@ -462,6 +636,26 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
+static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
+{
+ struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ u32 result;
+
+ req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
+ if (!req->ns)
+ return NVME_SC_INVALID_NS | NVME_SC_DNR;
+
+ mutex_lock(&subsys->lock);
+ if (req->ns->readonly == true)
+ result = NVME_NS_WRITE_PROTECT;
+ else
+ result = NVME_NS_NO_WRITE_PROTECT;
+ nvmet_set_result(req, result);
+ mutex_unlock(&subsys->lock);
+
+ return 0;
+}
+
static void nvmet_execute_get_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
@@ -513,6 +707,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
sizeof(req->sq->ctrl->hostid));
break;
+ case NVME_FEAT_WRITE_PROTECT:
+ status = nvmet_get_feat_write_protect(req);
+ break;
default:
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
@@ -586,6 +783,12 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
case NVME_LOG_CHANGED_NS:
req->execute = nvmet_execute_get_log_changed_ns;
return 0;
+ case NVME_LOG_CMD_EFFECTS:
+ req->execute = nvmet_execute_get_log_cmd_effects_ns;
+ return 0;
+ case NVME_LOG_ANA:
+ req->execute = nvmet_execute_get_log_page_ana;
+ return 0;
}
break;
case nvme_admin_identify:
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d3f3b3ec4d1a..b37a8e3e3f80 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -218,6 +218,35 @@ static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, addr_trsvcid);
+static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
+}
+
+static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int ret;
+
+ if (port->enabled) {
+ pr_err("Cannot modify inline_data_size while port enabled\n");
+ pr_err("Disable the port before modifying\n");
+ return -EACCES;
+ }
+ ret = kstrtoint(page, 0, &port->inline_data_size);
+ if (ret) {
+ pr_err("Invalid value '%s' for inline_data_size\n", page);
+ return -EINVAL;
+ }
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_inline_data_size);
+
static ssize_t nvmet_addr_trtype_show(struct config_item *item,
char *page)
{
@@ -282,6 +311,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
{
struct nvmet_ns *ns = to_nvmet_ns(item);
struct nvmet_subsys *subsys = ns->subsys;
+ size_t len;
int ret;
mutex_lock(&subsys->lock);
@@ -289,10 +319,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
if (ns->enabled)
goto out_unlock;
- kfree(ns->device_path);
+ ret = -EINVAL;
+ len = strcspn(page, "\n");
+ if (!len)
+ goto out_unlock;
+ kfree(ns->device_path);
ret = -ENOMEM;
- ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL);
+ ns->device_path = kstrndup(page, len, GFP_KERNEL);
if (!ns->device_path)
goto out_unlock;
@@ -382,6 +416,39 @@ out_unlock:
CONFIGFS_ATTR(nvmet_ns_, device_nguid);
+static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
+}
+
+static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ u32 oldgrpid, newgrpid;
+ int ret;
+
+ ret = kstrtou32(page, 0, &newgrpid);
+ if (ret)
+ return ret;
+
+ if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
+ return -EINVAL;
+
+ down_write(&nvmet_ana_sem);
+ oldgrpid = ns->anagrpid;
+ nvmet_ana_group_enabled[newgrpid]++;
+ ns->anagrpid = newgrpid;
+ nvmet_ana_group_enabled[oldgrpid]--;
+ nvmet_ana_chgcnt++;
+ up_write(&nvmet_ana_sem);
+
+ nvmet_send_ana_event(ns->subsys, NULL);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
+
static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
@@ -407,11 +474,41 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_ns_, enable);
+static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
+}
+
+static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ bool val;
+
+ if (strtobool(page, &val))
+ return -EINVAL;
+
+ mutex_lock(&ns->subsys->lock);
+ if (ns->enabled) {
+ pr_err("disable ns before setting buffered_io value.\n");
+ mutex_unlock(&ns->subsys->lock);
+ return -EINVAL;
+ }
+
+ ns->buffered_io = val;
+ mutex_unlock(&ns->subsys->lock);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, buffered_io);
+
static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_device_path,
&nvmet_ns_attr_device_nguid,
&nvmet_ns_attr_device_uuid,
+ &nvmet_ns_attr_ana_grpid,
&nvmet_ns_attr_enable,
+ &nvmet_ns_attr_buffered_io,
NULL,
};
@@ -858,6 +955,134 @@ static const struct config_item_type nvmet_referrals_type = {
.ct_group_ops = &nvmet_referral_group_ops,
};
+static struct {
+ enum nvme_ana_state state;
+ const char *name;
+} nvmet_ana_state_names[] = {
+ { NVME_ANA_OPTIMIZED, "optimized" },
+ { NVME_ANA_NONOPTIMIZED, "non-optimized" },
+ { NVME_ANA_INACCESSIBLE, "inaccessible" },
+ { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" },
+ { NVME_ANA_CHANGE, "change" },
+};
+
+static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_ana_group *grp = to_ana_group(item);
+ enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
+ if (state != nvmet_ana_state_names[i].state)
+ continue;
+ return sprintf(page, "%s\n", nvmet_ana_state_names[i].name);
+ }
+
+ return sprintf(page, "\n");
+}
+
+static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ana_group *grp = to_ana_group(item);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
+ if (sysfs_streq(page, nvmet_ana_state_names[i].name))
+ goto found;
+ }
+
+ pr_err("Invalid value '%s' for ana_state\n", page);
+ return -EINVAL;
+
+found:
+ down_write(&nvmet_ana_sem);
+ grp->port->ana_state[grp->grpid] = nvmet_ana_state_names[i].state;
+ nvmet_ana_chgcnt++;
+ up_write(&nvmet_ana_sem);
+
+ nvmet_port_send_ana_event(grp->port);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
+
+static struct configfs_attribute *nvmet_ana_group_attrs[] = {
+ &nvmet_ana_group_attr_ana_state,
+ NULL,
+};
+
+static void nvmet_ana_group_release(struct config_item *item)
+{
+ struct nvmet_ana_group *grp = to_ana_group(item);
+
+ if (grp == &grp->port->ana_default_group)
+ return;
+
+ down_write(&nvmet_ana_sem);
+ grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
+ nvmet_ana_group_enabled[grp->grpid]--;
+ up_write(&nvmet_ana_sem);
+
+ nvmet_port_send_ana_event(grp->port);
+ kfree(grp);
+}
+
+static struct configfs_item_operations nvmet_ana_group_item_ops = {
+ .release = nvmet_ana_group_release,
+};
+
+static const struct config_item_type nvmet_ana_group_type = {
+ .ct_item_ops = &nvmet_ana_group_item_ops,
+ .ct_attrs = nvmet_ana_group_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_ana_groups_make_group(
+ struct config_group *group, const char *name)
+{
+ struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
+ struct nvmet_ana_group *grp;
+ u32 grpid;
+ int ret;
+
+ ret = kstrtou32(name, 0, &grpid);
+ if (ret)
+ goto out;
+
+ ret = -EINVAL;
+ if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
+ goto out;
+
+ ret = -ENOMEM;
+ grp = kzalloc(sizeof(*grp), GFP_KERNEL);
+ if (!grp)
+ goto out;
+ grp->port = port;
+ grp->grpid = grpid;
+
+ down_write(&nvmet_ana_sem);
+ nvmet_ana_group_enabled[grpid]++;
+ up_write(&nvmet_ana_sem);
+
+ nvmet_port_send_ana_event(grp->port);
+
+ config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
+ return &grp->group;
+out:
+ return ERR_PTR(ret);
+}
+
+static struct configfs_group_operations nvmet_ana_groups_group_ops = {
+ .make_group = nvmet_ana_groups_make_group,
+};
+
+static const struct config_item_type nvmet_ana_groups_type = {
+ .ct_group_ops = &nvmet_ana_groups_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
/*
* Ports definitions.
*/
@@ -865,6 +1090,7 @@ static void nvmet_port_release(struct config_item *item)
{
struct nvmet_port *port = to_nvmet_port(item);
+ kfree(port->ana_state);
kfree(port);
}
@@ -874,6 +1100,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_traddr,
&nvmet_attr_addr_trsvcid,
&nvmet_attr_addr_trtype,
+ &nvmet_attr_param_inline_data_size,
NULL,
};
@@ -892,6 +1119,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
{
struct nvmet_port *port;
u16 portid;
+ u32 i;
if (kstrtou16(name, 0, &portid))
return ERR_PTR(-EINVAL);
@@ -900,9 +1128,24 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
if (!port)
return ERR_PTR(-ENOMEM);
+ port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
+ sizeof(*port->ana_state), GFP_KERNEL);
+ if (!port->ana_state) {
+ kfree(port);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
+ if (i == NVMET_DEFAULT_ANA_GRPID)
+ port->ana_state[1] = NVME_ANA_OPTIMIZED;
+ else
+ port->ana_state[i] = NVME_ANA_INACCESSIBLE;
+ }
+
INIT_LIST_HEAD(&port->entry);
INIT_LIST_HEAD(&port->subsystems);
INIT_LIST_HEAD(&port->referrals);
+ port->inline_data_size = -1; /* < 0 == let the transport choose */
port->disc_addr.portid = cpu_to_le16(portid);
config_group_init_type_name(&port->group, name, &nvmet_port_type);
@@ -915,6 +1158,18 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
"referrals", &nvmet_referrals_type);
configfs_add_default_group(&port->referrals_group, &port->group);
+ config_group_init_type_name(&port->ana_groups_group,
+ "ana_groups", &nvmet_ana_groups_type);
+ configfs_add_default_group(&port->ana_groups_group, &port->group);
+
+ port->ana_default_group.port = port;
+ port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
+ config_group_init_type_name(&port->ana_default_group.group,
+ __stringify(NVMET_DEFAULT_ANA_GRPID),
+ &nvmet_ana_group_type);
+ configfs_add_default_group(&port->ana_default_group.group,
+ &port->ana_groups_group);
+
return &port->group;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 74d4b785d2da..ebf3e7a6c49e 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -18,6 +18,7 @@
#include "nvmet.h"
+struct workqueue_struct *buffered_io_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
@@ -39,6 +40,10 @@ static DEFINE_IDA(cntlid_ida);
*/
DECLARE_RWSEM(nvmet_config_sem);
+u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
+u64 nvmet_ana_chgcnt;
+DECLARE_RWSEM(nvmet_ana_sem);
+
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
const char *subsysnqn);
@@ -175,7 +180,7 @@ out_unlock:
mutex_unlock(&ctrl->lock);
}
-static void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
+void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
{
struct nvmet_ctrl *ctrl;
@@ -189,6 +194,33 @@ static void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
}
}
+void nvmet_send_ana_event(struct nvmet_subsys *subsys,
+ struct nvmet_port *port)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (port && ctrl->port != port)
+ continue;
+ if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_ANA_CHANGE))
+ continue;
+ nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
+ }
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_port_send_ana_event(struct nvmet_port *port)
+{
+ struct nvmet_subsys_link *p;
+
+ down_read(&nvmet_config_sem);
+ list_for_each_entry(p, &port->subsystems, entry)
+ nvmet_send_ana_event(p->subsys, port);
+ up_read(&nvmet_config_sem);
+}
+
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
{
int ret = 0;
@@ -241,6 +273,10 @@ int nvmet_enable_port(struct nvmet_port *port)
return ret;
}
+ /* If the transport didn't set inline_data_size, then disable it. */
+ if (port->inline_data_size < 0)
+ port->inline_data_size = 0;
+
port->enabled = true;
return 0;
}
@@ -332,14 +368,18 @@ static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
int nvmet_ns_enable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
- int ret = 0;
+ int ret;
mutex_lock(&subsys->lock);
+ ret = -EMFILE;
+ if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
+ goto out_unlock;
+ ret = 0;
if (ns->enabled)
goto out_unlock;
ret = nvmet_bdev_ns_enable(ns);
- if (ret)
+ if (ret == -ENOTBLK)
ret = nvmet_file_ns_enable(ns);
if (ret)
goto out_unlock;
@@ -369,6 +409,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
list_add_tail_rcu(&ns->dev_link, &old->dev_link);
}
+ subsys->nr_namespaces++;
nvmet_ns_changed(subsys, ns->nsid);
ns->enabled = true;
@@ -409,6 +450,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
percpu_ref_exit(&ns->ref);
mutex_lock(&subsys->lock);
+ subsys->nr_namespaces--;
nvmet_ns_changed(subsys, ns->nsid);
nvmet_ns_dev_disable(ns);
out_unlock:
@@ -419,6 +461,10 @@ void nvmet_ns_free(struct nvmet_ns *ns)
{
nvmet_ns_disable(ns);
+ down_write(&nvmet_ana_sem);
+ nvmet_ana_group_enabled[ns->anagrpid]--;
+ up_write(&nvmet_ana_sem);
+
kfree(ns->device_path);
kfree(ns);
}
@@ -436,7 +482,14 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
ns->nsid = nsid;
ns->subsys = subsys;
+
+ down_write(&nvmet_ana_sem);
+ ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
+ nvmet_ana_group_enabled[ns->anagrpid]++;
+ up_write(&nvmet_ana_sem);
+
uuid_gen(&ns->uuid);
+ ns->buffered_io = false;
return ns;
}
@@ -542,6 +595,35 @@ int nvmet_sq_init(struct nvmet_sq *sq)
}
EXPORT_SYMBOL_GPL(nvmet_sq_init);
+static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
+ struct nvmet_ns *ns)
+{
+ enum nvme_ana_state state = port->ana_state[ns->anagrpid];
+
+ if (unlikely(state == NVME_ANA_INACCESSIBLE))
+ return NVME_SC_ANA_INACCESSIBLE;
+ if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
+ return NVME_SC_ANA_PERSISTENT_LOSS;
+ if (unlikely(state == NVME_ANA_CHANGE))
+ return NVME_SC_ANA_TRANSITION;
+ return 0;
+}
+
+static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
+{
+ if (unlikely(req->ns->readonly)) {
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_flush:
+ break;
+ default:
+ return NVME_SC_NS_WRITE_PROTECTED;
+ }
+ }
+
+ return 0;
+}
+
static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -554,6 +636,12 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
if (unlikely(!req->ns))
return NVME_SC_INVALID_NS | NVME_SC_DNR;
+ ret = nvmet_check_ana_state(req->port, req->ns);
+ if (unlikely(ret))
+ return ret;
+ ret = nvmet_io_cmd_check_access(req);
+ if (unlikely(ret))
+ return ret;
if (req->ns->file)
return nvmet_file_parse_io_cmd(req);
@@ -870,6 +958,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
nvmet_init_cap(ctrl);
+ ctrl->port = req->port;
+
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
@@ -1109,6 +1199,15 @@ static int __init nvmet_init(void)
{
int error;
+ nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
+
+ buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
+ WQ_MEM_RECLAIM, 0);
+ if (!buffered_io_wq) {
+ error = -ENOMEM;
+ goto out;
+ }
+
error = nvmet_init_discovery();
if (error)
goto out;
@@ -1129,6 +1228,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_configfs();
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
+ destroy_workqueue(buffered_io_wq);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 08656b849bd6..eae29f493a07 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -171,7 +171,7 @@ static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
if (ctrl->ops->has_keyed_sgls)
id->sgls |= cpu_to_le32(1 << 2);
- if (ctrl->ops->sqe_inline_size)
+ if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
strcpy(id->subnqn, ctrl->subsys->subsysnqn);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 408279cb6f2c..29b4b236afd8 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
struct work_struct work;
} __aligned(sizeof(unsigned long long));
+/* desired maximum for a single sequence - if sg list allows it */
#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
-#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
enum nvmet_fcp_datadir {
NVMET_FCP_NODATA,
@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
struct nvme_fc_cmd_iu cmdiubuf;
struct nvme_fc_ersp_iu rspiubuf;
dma_addr_t rspdma;
+ struct scatterlist *next_sg;
struct scatterlist *data_sg;
int data_sg_cnt;
u32 offset;
@@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
INIT_LIST_HEAD(&newrec->assoc_list);
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
- newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
- template->max_sgl_segments);
+ newrec->max_sg_cnt = template->max_sgl_segments;
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
@@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE));
/* note: write from initiator perspective */
+ fod->next_sg = fod->data_sg;
return 0;
@@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod, u8 op)
{
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+ struct scatterlist *sg = fod->next_sg;
unsigned long flags;
- u32 tlen;
+ u32 remaininglen = fod->req.transfer_len - fod->offset;
+ u32 tlen = 0;
int ret;
fcpreq->op = op;
fcpreq->offset = fod->offset;
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
- tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
- (fod->req.transfer_len - fod->offset));
+ /*
+ * for next sequence:
+ * break at a sg element boundary
+ * attempt to keep sequence length capped at
+ * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
+ * be longer if a single sg element is larger
+ * than that amount. This is done to avoid creating
+ * a new sg list to use for the tgtport api.
+ */
+ fcpreq->sg = sg;
+ fcpreq->sg_cnt = 0;
+ while (tlen < remaininglen &&
+ fcpreq->sg_cnt < tgtport->max_sg_cnt &&
+ tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
+ fcpreq->sg_cnt++;
+ tlen += sg_dma_len(sg);
+ sg = sg_next(sg);
+ }
+ if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
+ fcpreq->sg_cnt++;
+ tlen += min_t(u32, sg_dma_len(sg), remaininglen);
+ sg = sg_next(sg);
+ }
+ if (tlen < remaininglen)
+ fod->next_sg = sg;
+ else
+ fod->next_sg = NULL;
+
fcpreq->transfer_length = tlen;
fcpreq->transferred_length = 0;
fcpreq->fcp_error = 0;
fcpreq->rsplen = 0;
- fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
- fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
-
/*
* If the last READDATA request: check if LLDD supports
* combined xfr with response.
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index e0b0f7df70c2..7bc9f6240432 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -124,6 +124,13 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
submit_bio(bio);
}
+u16 nvmet_bdev_flush(struct nvmet_req *req)
+{
+ if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
+ return NVME_SC_INTERNAL | NVME_SC_DNR;
+ return 0;
+}
+
static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns,
struct nvme_dsm_range *range, struct bio **bio)
{
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 8c42b3a8c420..81a9dc5290a8 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -16,6 +16,8 @@
void nvmet_file_ns_disable(struct nvmet_ns *ns)
{
if (ns->file) {
+ if (ns->buffered_io)
+ flush_workqueue(buffered_io_wq);
mempool_destroy(ns->bvec_pool);
ns->bvec_pool = NULL;
kmem_cache_destroy(ns->bvec_cache);
@@ -27,11 +29,14 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
int nvmet_file_ns_enable(struct nvmet_ns *ns)
{
- int ret;
+ int flags = O_RDWR | O_LARGEFILE;
struct kstat stat;
+ int ret;
+
+ if (!ns->buffered_io)
+ flags |= O_DIRECT;
- ns->file = filp_open(ns->device_path,
- O_RDWR | O_LARGEFILE | O_DIRECT, 0);
+ ns->file = filp_open(ns->device_path, flags, 0);
if (IS_ERR(ns->file)) {
pr_err("failed to open file %s: (%ld)\n",
ns->device_path, PTR_ERR(ns->file));
@@ -100,7 +105,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
iocb->ki_pos = pos;
iocb->ki_filp = req->ns->file;
- iocb->ki_flags = IOCB_DIRECT | ki_flags;
+ iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
ret = call_iter(iocb, &iter);
@@ -140,6 +145,12 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
return;
}
+ pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
+ if (unlikely(pos + req->data_len > req->ns->size)) {
+ nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+ return;
+ }
+
if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
GFP_KERNEL);
@@ -155,8 +166,6 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
is_sync = true;
}
- pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
-
memset(&req->f.iocb, 0, sizeof(struct kiocb));
for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
@@ -189,14 +198,31 @@ out:
nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
}
-static void nvmet_file_flush_work(struct work_struct *w)
+static void nvmet_file_buffered_io_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
- int ret;
- ret = vfs_fsync(req->ns->file, 1);
+ nvmet_file_execute_rw(req);
+}
- nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
+{
+ INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
+ queue_work(buffered_io_wq, &req->f.work);
+}
+
+u16 nvmet_file_flush(struct nvmet_req *req)
+{
+ if (vfs_fsync(req->ns->file, 1) < 0)
+ return NVME_SC_INTERNAL | NVME_SC_DNR;
+ return 0;
+}
+
+static void nvmet_file_flush_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+
+ nvmet_req_complete(req, nvmet_file_flush(req));
}
static void nvmet_file_execute_flush(struct nvmet_req *req)
@@ -209,22 +235,30 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
{
int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
struct nvme_dsm_range range;
- loff_t offset;
- loff_t len;
- int i, ret;
+ loff_t offset, len;
+ u16 ret;
+ int i;
for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
- if (nvmet_copy_from_sgl(req, i * sizeof(range), &range,
- sizeof(range)))
+ ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
+ sizeof(range));
+ if (ret)
break;
+
offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
- ret = vfs_fallocate(req->ns->file, mode, offset, len);
- if (ret)
+ if (offset + len > req->ns->size) {
+ ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
break;
+ }
+
+ if (vfs_fallocate(req->ns->file, mode, offset, len)) {
+ ret = NVME_SC_INTERNAL | NVME_SC_DNR;
+ break;
+ }
}
- nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+ nvmet_req_complete(req, ret);
}
static void nvmet_file_dsm_work(struct work_struct *w)
@@ -263,6 +297,11 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
req->ns->blksize_shift);
+ if (unlikely(offset + len > req->ns->size)) {
+ nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+ return;
+ }
+
ret = vfs_fallocate(req->ns->file, mode, offset, len);
nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
}
@@ -280,7 +319,10 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
switch (cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
- req->execute = nvmet_file_execute_rw;
+ if (req->ns->buffered_io)
+ req->execute = nvmet_file_execute_rw_buffered_io;
+ else
+ req->execute = nvmet_file_execute_rw;
req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d8d91f04bd7e..9908082b32c4 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_status_t ret;
if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
- return nvmf_fail_nonready_command(req);
+ return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
ret = nvme_setup_cmd(ns, req, &iod->cmd);
if (ret)
@@ -227,6 +227,7 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
{
struct nvme_loop_ctrl *ctrl = set->driver_data;
+ nvme_req(req)->ctrl = &ctrl->ctrl;
return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 480dfe10fad9..ec9af4ee03b6 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -30,12 +30,11 @@
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
-
/*
* Supported optional AENs:
*/
#define NVMET_AEN_CFG_OPTIONAL \
- NVME_AEN_CFG_NS_ATTR
+ (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
/*
* Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
@@ -59,12 +58,15 @@ struct nvmet_ns {
struct percpu_ref ref;
struct block_device *bdev;
struct file *file;
+ bool readonly;
u32 nsid;
u32 blksize_shift;
loff_t size;
u8 nguid[16];
uuid_t uuid;
+ u32 anagrpid;
+ bool buffered_io;
bool enabled;
struct nvmet_subsys *subsys;
const char *device_path;
@@ -97,6 +99,18 @@ struct nvmet_sq {
struct completion confirm_done;
};
+struct nvmet_ana_group {
+ struct config_group group;
+ struct nvmet_port *port;
+ u32 grpid;
+};
+
+static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_ana_group,
+ group);
+}
+
/**
* struct nvmet_port - Common structure to keep port
* information for the target.
@@ -114,8 +128,12 @@ struct nvmet_port {
struct list_head subsystems;
struct config_group referrals_group;
struct list_head referrals;
+ struct config_group ana_groups_group;
+ struct nvmet_ana_group ana_default_group;
+ enum nvme_ana_state *ana_state;
void *priv;
bool enabled;
+ int inline_data_size;
};
static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
@@ -124,6 +142,13 @@ static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
group);
}
+static inline struct nvmet_port *ana_groups_to_port(
+ struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_port,
+ ana_groups_group);
+}
+
struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_cq **cqs;
@@ -138,6 +163,8 @@ struct nvmet_ctrl {
u16 cntlid;
u32 kato;
+ struct nvmet_port *port;
+
u32 aen_enabled;
unsigned long aen_masked;
struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
@@ -166,6 +193,7 @@ struct nvmet_subsys {
struct kref ref;
struct list_head namespaces;
+ unsigned int nr_namespaces;
unsigned int max_nsid;
struct list_head ctrls;
@@ -225,7 +253,6 @@ struct nvmet_req;
struct nvmet_fabrics_ops {
struct module *owner;
unsigned int type;
- unsigned int sqe_inline_size;
unsigned int msdbd;
bool has_keyed_sgls : 1;
void (*queue_response)(struct nvmet_req *req);
@@ -269,6 +296,8 @@ struct nvmet_req {
const struct nvmet_fabrics_ops *ops;
};
+extern struct workqueue_struct *buffered_io_wq;
+
static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
{
req->rsp->status = cpu_to_le16(status << 1);
@@ -337,6 +366,10 @@ void nvmet_ns_disable(struct nvmet_ns *ns);
struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
void nvmet_ns_free(struct nvmet_ns *ns);
+void nvmet_send_ana_event(struct nvmet_subsys *subsys,
+ struct nvmet_port *port);
+void nvmet_port_send_ana_event(struct nvmet_port *port);
+
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
@@ -357,6 +390,22 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd);
#define NVMET_QUEUE_SIZE 1024
#define NVMET_NR_QUEUES 128
#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
+
+/*
+ * Nice round number that makes a list of nsids fit into a page.
+ * Should become tunable at some point in the future.
+ */
+#define NVMET_MAX_NAMESPACES 1024
+
+/*
+ * 0 is not a valid ANA group ID, so we start numbering at 1.
+ *
+ * ANA Group 1 exists without manual intervention, has namespaces assigned to it
+ * by default, and is available in an optimized state through all ports.
+ */
+#define NVMET_MAX_ANAGRPS 128
+#define NVMET_DEFAULT_ANA_GRPID 1
+
#define NVMET_KAS 10
#define NVMET_DISC_KATO 120
@@ -370,6 +419,10 @@ extern struct nvmet_subsys *nvmet_disc_subsys;
extern u64 nvmet_genctr;
extern struct rw_semaphore nvmet_config_sem;
+extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
+extern u64 nvmet_ana_chgcnt;
+extern struct rw_semaphore nvmet_ana_sem;
+
bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
const char *hostnqn);
@@ -377,6 +430,9 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
int nvmet_file_ns_enable(struct nvmet_ns *ns);
void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
void nvmet_file_ns_disable(struct nvmet_ns *ns);
+u16 nvmet_bdev_flush(struct nvmet_req *req);
+u16 nvmet_file_flush(struct nvmet_req *req);
+void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
static inline u32 nvmet_rw_len(struct nvmet_req *req)
{
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 52e0c5d579a7..3533e918ea37 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -33,16 +33,17 @@
#include "nvmet.h"
/*
- * We allow up to a page of inline data to go with the SQE
+ * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
*/
-#define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE
+#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE
+#define NVMET_RDMA_MAX_INLINE_SGE 4
+#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
struct nvmet_rdma_cmd {
- struct ib_sge sge[2];
+ struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
struct ib_cqe cqe;
struct ib_recv_wr wr;
- struct scatterlist inline_sg;
- struct page *inline_page;
+ struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
struct nvme_command *nvme_cmd;
struct nvmet_rdma_queue *queue;
};
@@ -116,6 +117,8 @@ struct nvmet_rdma_device {
size_t srq_size;
struct kref ref;
struct list_head entry;
+ int inline_data_size;
+ int inline_page_count;
};
static bool nvmet_rdma_use_srq;
@@ -138,6 +141,11 @@ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
static const struct nvmet_fabrics_ops nvmet_rdma_ops;
+static int num_pages(int len)
+{
+ return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
+}
+
/* XXX: really should move to a generic header sooner or later.. */
static inline u32 get_unaligned_le24(const u8 *p)
{
@@ -184,6 +192,71 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
}
+static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *c)
+{
+ struct scatterlist *sg;
+ struct ib_sge *sge;
+ int i;
+
+ if (!ndev->inline_data_size)
+ return;
+
+ sg = c->inline_sg;
+ sge = &c->sge[1];
+
+ for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
+ if (sge->length)
+ ib_dma_unmap_page(ndev->device, sge->addr,
+ sge->length, DMA_FROM_DEVICE);
+ if (sg_page(sg))
+ __free_page(sg_page(sg));
+ }
+}
+
+static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *c)
+{
+ struct scatterlist *sg;
+ struct ib_sge *sge;
+ struct page *pg;
+ int len;
+ int i;
+
+ if (!ndev->inline_data_size)
+ return 0;
+
+ sg = c->inline_sg;
+ sg_init_table(sg, ndev->inline_page_count);
+ sge = &c->sge[1];
+ len = ndev->inline_data_size;
+
+ for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
+ pg = alloc_page(GFP_KERNEL);
+ if (!pg)
+ goto out_err;
+ sg_assign_page(sg, pg);
+ sge->addr = ib_dma_map_page(ndev->device,
+ pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ndev->device, sge->addr))
+ goto out_err;
+ sge->length = min_t(int, len, PAGE_SIZE);
+ sge->lkey = ndev->pd->local_dma_lkey;
+ len -= sge->length;
+ }
+
+ return 0;
+out_err:
+ for (; i >= 0; i--, sg--, sge--) {
+ if (sge->length)
+ ib_dma_unmap_page(ndev->device, sge->addr,
+ sge->length, DMA_FROM_DEVICE);
+ if (sg_page(sg))
+ __free_page(sg_page(sg));
+ }
+ return -ENOMEM;
+}
+
static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *c, bool admin)
{
@@ -200,33 +273,17 @@ static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
c->sge[0].length = sizeof(*c->nvme_cmd);
c->sge[0].lkey = ndev->pd->local_dma_lkey;
- if (!admin) {
- c->inline_page = alloc_pages(GFP_KERNEL,
- get_order(NVMET_RDMA_INLINE_DATA_SIZE));
- if (!c->inline_page)
- goto out_unmap_cmd;
- c->sge[1].addr = ib_dma_map_page(ndev->device,
- c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE,
- DMA_FROM_DEVICE);
- if (ib_dma_mapping_error(ndev->device, c->sge[1].addr))
- goto out_free_inline_page;
- c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE;
- c->sge[1].lkey = ndev->pd->local_dma_lkey;
- }
+ if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
+ goto out_unmap_cmd;
c->cqe.done = nvmet_rdma_recv_done;
c->wr.wr_cqe = &c->cqe;
c->wr.sg_list = c->sge;
- c->wr.num_sge = admin ? 1 : 2;
+ c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
return 0;
-out_free_inline_page:
- if (!admin) {
- __free_pages(c->inline_page,
- get_order(NVMET_RDMA_INLINE_DATA_SIZE));
- }
out_unmap_cmd:
ib_dma_unmap_single(ndev->device, c->sge[0].addr,
sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
@@ -240,12 +297,8 @@ out:
static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *c, bool admin)
{
- if (!admin) {
- ib_dma_unmap_page(ndev->device, c->sge[1].addr,
- NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE);
- __free_pages(c->inline_page,
- get_order(NVMET_RDMA_INLINE_DATA_SIZE));
- }
+ if (!admin)
+ nvmet_rdma_free_inline_pages(ndev, c);
ib_dma_unmap_single(ndev->device, c->sge[0].addr,
sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
kfree(c->nvme_cmd);
@@ -382,15 +435,21 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *cmd)
{
- struct ib_recv_wr *bad_wr;
+ int ret;
ib_dma_sync_single_for_device(ndev->device,
cmd->sge[0].addr, cmd->sge[0].length,
DMA_FROM_DEVICE);
if (ndev->srq)
- return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
- return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
+ ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
+ else
+ ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
+
+ if (unlikely(ret))
+ pr_err("post_recv cmd failed\n");
+
+ return ret;
}
static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
@@ -429,7 +488,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
}
- if (rsp->req.sg != &rsp->cmd->inline_sg)
+ if (rsp->req.sg != rsp->cmd->inline_sg)
sgl_free(rsp->req.sg);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
@@ -472,7 +531,7 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
struct nvmet_rdma_rsp *rsp =
container_of(req, struct nvmet_rdma_rsp, req);
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
- struct ib_send_wr *first_wr, *bad_wr;
+ struct ib_send_wr *first_wr;
if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
@@ -493,7 +552,7 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
rsp->send_sge.addr, rsp->send_sge.length,
DMA_TO_DEVICE);
- if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
+ if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
pr_err("sending cmd response failed\n");
nvmet_rdma_release_rsp(rsp);
}
@@ -529,10 +588,25 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
u64 off)
{
- sg_init_table(&rsp->cmd->inline_sg, 1);
- sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off);
- rsp->req.sg = &rsp->cmd->inline_sg;
- rsp->req.sg_cnt = 1;
+ int sg_count = num_pages(len);
+ struct scatterlist *sg;
+ int i;
+
+ sg = rsp->cmd->inline_sg;
+ for (i = 0; i < sg_count; i++, sg++) {
+ if (i < sg_count - 1)
+ sg_unmark_end(sg);
+ else
+ sg_mark_end(sg);
+ sg->offset = off;
+ sg->length = min_t(int, len, PAGE_SIZE - off);
+ len -= sg->length;
+ if (!i)
+ off = 0;
+ }
+
+ rsp->req.sg = rsp->cmd->inline_sg;
+ rsp->req.sg_cnt = sg_count;
}
static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
@@ -544,7 +618,7 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
if (!nvme_is_write(rsp->req.cmd))
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) {
+ if (off + len > rsp->queue->dev->inline_data_size) {
pr_err("invalid inline data offset!\n");
return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
}
@@ -743,7 +817,7 @@ static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
srq_size = 4095; /* XXX: tune */
srq_attr.attr.max_wr = srq_size;
- srq_attr.attr.max_sge = 2;
+ srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
srq_attr.attr.srq_limit = 0;
srq_attr.srq_type = IB_SRQT_BASIC;
srq = ib_create_srq(ndev->pd, &srq_attr);
@@ -765,11 +839,16 @@ static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
ndev->srq = srq;
ndev->srq_size = srq_size;
- for (i = 0; i < srq_size; i++)
- nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
+ for (i = 0; i < srq_size; i++) {
+ ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
+ if (ret)
+ goto out_free_cmds;
+ }
return 0;
+out_free_cmds:
+ nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
out_destroy_srq:
ib_destroy_srq(srq);
return ret;
@@ -793,7 +872,10 @@ static void nvmet_rdma_free_dev(struct kref *ref)
static struct nvmet_rdma_device *
nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
{
+ struct nvmet_port *port = cm_id->context;
struct nvmet_rdma_device *ndev;
+ int inline_page_count;
+ int inline_sge_count;
int ret;
mutex_lock(&device_list_mutex);
@@ -807,6 +889,18 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
if (!ndev)
goto out_err;
+ inline_page_count = num_pages(port->inline_data_size);
+ inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
+ cm_id->device->attrs.max_recv_sge) - 1;
+ if (inline_page_count > inline_sge_count) {
+ pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
+ port->inline_data_size, cm_id->device->name,
+ inline_sge_count * PAGE_SIZE);
+ port->inline_data_size = inline_sge_count * PAGE_SIZE;
+ inline_page_count = inline_sge_count;
+ }
+ ndev->inline_data_size = port->inline_data_size;
+ ndev->inline_page_count = inline_page_count;
ndev->device = cm_id->device;
kref_init(&ndev->ref);
@@ -874,14 +968,14 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
- ndev->device->attrs.max_sge);
+ ndev->device->attrs.max_send_sge);
if (ndev->srq) {
qp_attr.srq = ndev->srq;
} else {
/* +1 for drain */
qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
- qp_attr.cap.max_recv_sge = 2;
+ qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
}
ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
@@ -899,13 +993,17 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
if (!ndev->srq) {
for (i = 0; i < queue->recv_queue_size; i++) {
queue->cmds[i].queue = queue;
- nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
+ ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
+ if (ret)
+ goto err_destroy_qp;
}
}
out:
return ret;
+err_destroy_qp:
+ rdma_destroy_qp(queue->cm_id);
err_destroy_cq:
ib_free_cq(queue->cq);
goto out;
@@ -1379,6 +1477,15 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
return -EINVAL;
}
+ if (port->inline_data_size < 0) {
+ port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
+ } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
+ pr_warn("inline_data_size %u is too large, reducing to %u\n",
+ port->inline_data_size,
+ NVMET_RDMA_MAX_INLINE_DATA_SIZE);
+ port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
+ }
+
ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
port->disc_addr.trsvcid, &addr);
if (ret) {
@@ -1456,7 +1563,6 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_RDMA,
- .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE,
.msdbd = 1,
.has_keyed_sgls = 1,
.add_port = nvmet_rdma_add_port,
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 54a3c298247b..0a7a470ee859 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -181,4 +181,15 @@ config RAVE_SP_EEPROM
help
Say y here to enable Rave SP EEPROM support.
+config SC27XX_EFUSE
+ tristate "Spreadtrum SC27XX eFuse Support"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of Spreadtrum
+ SC27XX PMICs from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sc27xx-efuse.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 27e96a8efd1c..4e8c61628f1a 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -39,4 +39,5 @@ obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o
nvmem_snvs_lpgpr-y := snvs_lpgpr.o
obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o
nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o
-
+obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
+nvmem-sc27xx-efuse-y := sc27xx-efuse.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index b5b0cdc21d01..aa1657831b70 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -31,7 +31,6 @@ struct nvmem_device {
struct device dev;
int stride;
int word_size;
- int ncells;
int id;
int users;
size_t size;
@@ -389,7 +388,6 @@ int nvmem_add_cells(struct nvmem_device *nvmem,
nvmem_cell_add(cells[i]);
}
- nvmem->ncells = ncells;
/* remove tmp array */
kfree(cells);
@@ -936,6 +934,10 @@ struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
return cell;
}
+ /* NULL cell_id only allowed for device tree; invalid otherwise */
+ if (!cell_id)
+ return ERR_PTR(-EINVAL);
+
return nvmem_cell_get_from_list(cell_id);
}
EXPORT_SYMBOL_GPL(nvmem_cell_get);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 60816c856dd6..afb429a417fe 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -409,6 +409,12 @@ static const struct ocotp_params imx6sl_params = {
.set_timing = imx_ocotp_set_imx6_timing,
};
+static const struct ocotp_params imx6sll_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
static const struct ocotp_params imx6sx_params = {
.nregs = 128,
.bank_address_words = 0,
@@ -433,6 +439,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params },
{ .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params },
{ .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
+ { .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
index b1af966206a6..a9534a6e8636 100644
--- a/drivers/nvmem/lpc18xx_eeprom.c
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index e66adf17a747..58c998b2e3bc 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -14,6 +14,7 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 4f650baad983..fbb1f1df6fc7 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
diff --git a/drivers/nvmem/rave-sp-eeprom.c b/drivers/nvmem/rave-sp-eeprom.c
index 50aeea6ec6cc..66699d44f73d 100644
--- a/drivers/nvmem/rave-sp-eeprom.c
+++ b/drivers/nvmem/rave-sp-eeprom.c
@@ -35,6 +35,7 @@ enum rave_sp_eeprom_header_size {
RAVE_SP_EEPROM_HEADER_SMALL = 4U,
RAVE_SP_EEPROM_HEADER_BIG = 5U,
};
+#define RAVE_SP_EEPROM_HEADER_MAX RAVE_SP_EEPROM_HEADER_BIG
#define RAVE_SP_EEPROM_PAGE_SIZE 32U
@@ -97,9 +98,12 @@ static int rave_sp_eeprom_io(struct rave_sp_eeprom *eeprom,
const unsigned int rsp_size =
is_write ? sizeof(*page) - sizeof(page->data) : sizeof(*page);
unsigned int offset = 0;
- u8 cmd[cmd_size];
+ u8 cmd[RAVE_SP_EEPROM_HEADER_MAX + sizeof(page->data)];
int ret;
+ if (WARN_ON(cmd_size > sizeof(cmd)))
+ return -EINVAL;
+
cmd[offset++] = eeprom->address;
cmd[offset++] = 0;
cmd[offset++] = type;
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
new file mode 100644
index 000000000000..33185d8d82cf
--- /dev/null
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Spreadtrum Communications Inc.
+
+#include <linux/hwspinlock.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/nvmem-provider.h>
+
+/* PMIC global registers definition */
+#define SC27XX_MODULE_EN 0xc08
+#define SC27XX_EFUSE_EN BIT(6)
+
+/* Efuse controller registers definition */
+#define SC27XX_EFUSE_GLB_CTRL 0x0
+#define SC27XX_EFUSE_DATA_RD 0x4
+#define SC27XX_EFUSE_DATA_WR 0x8
+#define SC27XX_EFUSE_BLOCK_INDEX 0xc
+#define SC27XX_EFUSE_MODE_CTRL 0x10
+#define SC27XX_EFUSE_STATUS 0x14
+#define SC27XX_EFUSE_WR_TIMING_CTRL 0x20
+#define SC27XX_EFUSE_RD_TIMING_CTRL 0x24
+#define SC27XX_EFUSE_EFUSE_DEB_CTRL 0x28
+
+/* Mask definition for SC27XX_EFUSE_BLOCK_INDEX register */
+#define SC27XX_EFUSE_BLOCK_MASK GENMASK(4, 0)
+
+/* Bits definitions for SC27XX_EFUSE_MODE_CTRL register */
+#define SC27XX_EFUSE_PG_START BIT(0)
+#define SC27XX_EFUSE_RD_START BIT(1)
+#define SC27XX_EFUSE_CLR_RDDONE BIT(2)
+
+/* Bits definitions for SC27XX_EFUSE_STATUS register */
+#define SC27XX_EFUSE_PGM_BUSY BIT(0)
+#define SC27XX_EFUSE_READ_BUSY BIT(1)
+#define SC27XX_EFUSE_STANDBY BIT(2)
+#define SC27XX_EFUSE_GLOBAL_PROT BIT(3)
+#define SC27XX_EFUSE_RD_DONE BIT(4)
+
+/* Block number and block width (bytes) definitions */
+#define SC27XX_EFUSE_BLOCK_MAX 32
+#define SC27XX_EFUSE_BLOCK_WIDTH 2
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define SC27XX_EFUSE_HWLOCK_TIMEOUT 5000
+
+/* Timeout (us) of polling the status */
+#define SC27XX_EFUSE_POLL_TIMEOUT 3000000
+#define SC27XX_EFUSE_POLL_DELAY_US 10000
+
+struct sc27xx_efuse {
+ struct device *dev;
+ struct regmap *regmap;
+ struct hwspinlock *hwlock;
+ struct mutex mutex;
+ u32 base;
+};
+
+/*
+ * On Spreadtrum platform, we have multi-subsystems will access the unique
+ * efuse controller, so we need one hardware spinlock to synchronize between
+ * the multiple subsystems.
+ */
+static int sc27xx_efuse_lock(struct sc27xx_efuse *efuse)
+{
+ int ret;
+
+ mutex_lock(&efuse->mutex);
+
+ ret = hwspin_lock_timeout_raw(efuse->hwlock,
+ SC27XX_EFUSE_HWLOCK_TIMEOUT);
+ if (ret) {
+ dev_err(efuse->dev, "timeout to get the hwspinlock\n");
+ mutex_unlock(&efuse->mutex);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sc27xx_efuse_unlock(struct sc27xx_efuse *efuse)
+{
+ hwspin_unlock_raw(efuse->hwlock);
+ mutex_unlock(&efuse->mutex);
+}
+
+static int sc27xx_efuse_poll_status(struct sc27xx_efuse *efuse, u32 bits)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read_poll_timeout(efuse->regmap,
+ efuse->base + SC27XX_EFUSE_STATUS,
+ val, (val & bits),
+ SC27XX_EFUSE_POLL_DELAY_US,
+ SC27XX_EFUSE_POLL_TIMEOUT);
+ if (ret) {
+ dev_err(efuse->dev, "timeout to update the efuse status\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sc27xx_efuse *efuse = context;
+ u32 buf;
+ int ret;
+
+ if (offset > SC27XX_EFUSE_BLOCK_MAX || bytes > SC27XX_EFUSE_BLOCK_WIDTH)
+ return -EINVAL;
+
+ ret = sc27xx_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ /* Enable the efuse controller. */
+ ret = regmap_update_bits(efuse->regmap, SC27XX_MODULE_EN,
+ SC27XX_EFUSE_EN, SC27XX_EFUSE_EN);
+ if (ret)
+ goto unlock_efuse;
+
+ /*
+ * Before reading, we should ensure the efuse controller is in
+ * standby state.
+ */
+ ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_STANDBY);
+ if (ret)
+ goto disable_efuse;
+
+ /* Set the block address to be read. */
+ ret = regmap_write(efuse->regmap,
+ efuse->base + SC27XX_EFUSE_BLOCK_INDEX,
+ offset & SC27XX_EFUSE_BLOCK_MASK);
+ if (ret)
+ goto disable_efuse;
+
+ /* Start reading process from efuse memory. */
+ ret = regmap_update_bits(efuse->regmap,
+ efuse->base + SC27XX_EFUSE_MODE_CTRL,
+ SC27XX_EFUSE_RD_START,
+ SC27XX_EFUSE_RD_START);
+ if (ret)
+ goto disable_efuse;
+
+ /*
+ * Polling the read done status to make sure the reading process
+ * is completed, that means the data can be read out now.
+ */
+ ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_RD_DONE);
+ if (ret)
+ goto disable_efuse;
+
+ /* Read data from efuse memory. */
+ ret = regmap_read(efuse->regmap, efuse->base + SC27XX_EFUSE_DATA_RD,
+ &buf);
+ if (ret)
+ goto disable_efuse;
+
+ /* Clear the read done flag. */
+ ret = regmap_update_bits(efuse->regmap,
+ efuse->base + SC27XX_EFUSE_MODE_CTRL,
+ SC27XX_EFUSE_CLR_RDDONE,
+ SC27XX_EFUSE_CLR_RDDONE);
+
+disable_efuse:
+ /* Disable the efuse controller after reading. */
+ regmap_update_bits(efuse->regmap, SC27XX_MODULE_EN, SC27XX_EFUSE_EN, 0);
+unlock_efuse:
+ sc27xx_efuse_unlock(efuse);
+
+ if (!ret)
+ memcpy(val, &buf, bytes);
+
+ return ret;
+}
+
+static int sc27xx_efuse_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct nvmem_config econfig = { };
+ struct nvmem_device *nvmem;
+ struct sc27xx_efuse *efuse;
+ int ret;
+
+ efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ efuse->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!efuse->regmap) {
+ dev_err(&pdev->dev, "failed to get efuse regmap\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "reg", &efuse->base);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get efuse base address\n");
+ return ret;
+ }
+
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get hwspinlock id\n");
+ return ret;
+ }
+
+ efuse->hwlock = hwspin_lock_request_specific(ret);
+ if (!efuse->hwlock) {
+ dev_err(&pdev->dev, "failed to request hwspinlock\n");
+ return -ENXIO;
+ }
+
+ mutex_init(&efuse->mutex);
+ efuse->dev = &pdev->dev;
+ platform_set_drvdata(pdev, efuse);
+
+ econfig.stride = 1;
+ econfig.word_size = 1;
+ econfig.read_only = true;
+ econfig.name = "sc27xx-efuse";
+ econfig.size = SC27XX_EFUSE_BLOCK_MAX * SC27XX_EFUSE_BLOCK_WIDTH;
+ econfig.reg_read = sc27xx_efuse_read;
+ econfig.priv = efuse;
+ econfig.dev = &pdev->dev;
+ nvmem = devm_nvmem_register(&pdev->dev, &econfig);
+ if (IS_ERR(nvmem)) {
+ dev_err(&pdev->dev, "failed to register nvmem config\n");
+ hwspin_lock_free(efuse->hwlock);
+ return PTR_ERR(nvmem);
+ }
+
+ return 0;
+}
+
+static int sc27xx_efuse_remove(struct platform_device *pdev)
+{
+ struct sc27xx_efuse *efuse = platform_get_drvdata(pdev);
+
+ hwspin_lock_free(efuse->hwlock);
+ return 0;
+}
+
+static const struct of_device_id sc27xx_efuse_of_match[] = {
+ { .compatible = "sprd,sc2731-efuse" },
+ { }
+};
+
+static struct platform_driver sc27xx_efuse_driver = {
+ .probe = sc27xx_efuse_probe,
+ .remove = sc27xx_efuse_remove,
+ .driver = {
+ .name = "sc27xx-efuse",
+ .of_match_table = sc27xx_efuse_of_match,
+ },
+};
+
+module_platform_driver(sc27xx_efuse_driver);
+
+MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum SC27xx efuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c
index 271f0b2ff86a..286910336ef6 100644
--- a/drivers/nvmem/uniphier-efuse.c
+++ b/drivers/nvmem/uniphier-efuse.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 53349912ac75..7ddbf0a1ab86 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -846,7 +846,7 @@ EXPORT_SYMBOL(of_iomap);
* for a given device_node
* @device: the device whose io range will be mapped
* @index: index of the io range
- * @name: name of the resource
+ * @name: name "override" for the memory region request or NULL
*
* Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
* error code on failure. Usage example:
@@ -856,7 +856,7 @@ EXPORT_SYMBOL(of_iomap);
* return PTR_ERR(base);
*/
void __iomem *of_io_request_and_map(struct device_node *np, int index,
- const char *name)
+ const char *name)
{
struct resource res;
void __iomem *mem;
@@ -864,6 +864,8 @@ void __iomem *of_io_request_and_map(struct device_node *np, int index,
if (of_address_to_resource(np, index, &res))
return IOMEM_ERR_PTR(-EINVAL);
+ if (!name)
+ name = res.name;
if (!request_mem_region(res.start, resource_size(&res), name))
return IOMEM_ERR_PTR(-EBUSY);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 848f549164cd..466e3c8582f0 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -102,7 +102,7 @@ static u32 phandle_cache_mask;
* - the phandle lookup overhead reduction provided by the cache
* will likely be less
*/
-static void of_populate_phandle_cache(void)
+void of_populate_phandle_cache(void)
{
unsigned long flags;
u32 cache_entries;
@@ -134,8 +134,7 @@ out:
raw_spin_unlock_irqrestore(&devtree_lock, flags);
}
-#ifndef CONFIG_MODULES
-static int __init of_free_phandle_cache(void)
+int of_free_phandle_cache(void)
{
unsigned long flags;
@@ -148,6 +147,7 @@ static int __init of_free_phandle_cache(void)
return 0;
}
+#if !defined(CONFIG_MODULES)
late_initcall_sync(of_free_phandle_cache);
#endif
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 33d85511d790..5957cd4fa262 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -127,20 +127,20 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
}
/*
- * Set default coherent_dma_mask to 32 bit. Drivers are expected to
- * setup the correct supported mask.
+ * If @dev is expected to be DMA-capable then the bus code that created
+ * it should have initialised its dma_mask pointer by this point. For
+ * now, we'll continue the legacy behaviour of coercing it to the
+ * coherent mask if not, but we'll no longer do so quietly.
*/
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
- /*
- * Set it to coherent_dma_mask by default if the architecture
- * code has not set it.
- */
- if (!dev->dma_mask)
+ if (!dev->dma_mask) {
+ dev_warn(dev, "DMA mask not set\n");
dev->dma_mask = &dev->coherent_dma_mask;
+ }
- if (!size)
+ if (!size && dev->coherent_dma_mask)
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+ else if (!size)
+ size = 1ULL << 32;
dev->dma_pfn_offset = offset;
@@ -149,6 +149,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
* set by the driver.
*/
mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
+ dev->bus_dma_mask = mask;
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 6da20b9688f7..800ad252cf9c 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -1034,14 +1034,7 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
bool hotpluggable;
/* We are scanning "memory" nodes only */
- if (type == NULL) {
- /*
- * The longtrail doesn't have a device_type on the
- * /memory node, so look for the node called /memory@0.
- */
- if (!IS_ENABLED(CONFIG_PPC32) || depth != 1 || strcmp(uname, "memory@0") != 0)
- return 0;
- } else if (strcmp(type, "memory") != 0)
+ if (type == NULL || strcmp(type, "memory") != 0)
return 0;
reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index d963baf8e53a..e92391d6d1bd 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -367,14 +367,23 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev,
phy_interface_t iface;
struct device_node *phy_np;
struct phy_device *phy;
+ int ret;
iface = of_get_phy_mode(np);
if (iface < 0)
return NULL;
-
- phy_np = of_parse_phandle(np, "phy-handle", 0);
- if (!phy_np)
- return NULL;
+ if (of_phy_is_fixed_link(np)) {
+ ret = of_phy_register_fixed_link(np);
+ if (ret < 0) {
+ netdev_err(dev, "broken fixed-link specification\n");
+ return NULL;
+ }
+ phy_np = of_node_get(np);
+ } else {
+ phy_np = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy_np)
+ return NULL;
+ }
phy = of_phy_connect(dev, phy_np, hndlr, 0, iface);
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 891d780c076a..216175d11d3d 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -79,6 +79,8 @@ int of_resolve_phandles(struct device_node *tree);
#if defined(CONFIG_OF_OVERLAY)
void of_overlay_mutex_lock(void);
void of_overlay_mutex_unlock(void);
+int of_free_phandle_cache(void);
+void of_populate_phandle_cache(void);
#else
static inline void of_overlay_mutex_lock(void) {};
static inline void of_overlay_mutex_unlock(void) {};
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 7baa53e5b1d7..eda57ef12fd0 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -804,6 +804,8 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
goto err_free_overlay_changeset;
}
+ of_populate_phandle_cache();
+
ret = __of_changeset_apply_notify(&ovcs->cset);
if (ret)
pr_err("overlay changeset entry notify error %d\n", ret);
@@ -1046,8 +1048,17 @@ int of_overlay_remove(int *ovcs_id)
list_del(&ovcs->ovcs_list);
+ /*
+ * Disable phandle cache. Avoids race condition that would arise
+ * from removing cache entry when the associated node is deleted.
+ */
+ of_free_phandle_cache();
+
ret_apply = 0;
ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
+
+ of_populate_phandle_cache();
+
if (ret) {
if (ret_apply)
devicetree_state_flags |= DTSF_REVERT_FAIL;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 6925d993e1f0..7ba90c290a42 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -185,6 +185,9 @@ static struct platform_device *of_platform_device_create_pdata(
if (!dev)
goto err_clear_flag;
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ if (!dev->dev.dma_mask)
+ dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
of_msi_configure(&dev->dev, dev->dev.of_node);
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index 2d1a5c737c6e..f12b9da69255 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -267,7 +267,7 @@ static void parport_ieee1284_terminate (struct parport *port)
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
- /* fall-though.. */
+ /* fall through */
default:
/* Terminate from all other modes. */
@@ -615,6 +615,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
parport_negotiate (port, IEEE1284_MODE_COMPAT);
+ /* fall through */
case IEEE1284_MODE_COMPAT:
DPRINTK (KERN_DEBUG "%s: Using compatibility mode\n",
port->name);
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index ae9e01ef7599..461fd8a24278 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -58,6 +58,7 @@ enum parport_pc_pci_cards {
timedia_9079c,
wch_ch353_1s1p,
wch_ch353_2s1p,
+ wch_ch382_0s1p,
wch_ch382_2s1p,
brainboxes_5s1p,
sunix_2s1p,
@@ -147,6 +148,7 @@ static struct parport_pc_pci cards[] = {
/* timedia_9079c */ { 1, { { 2, 3 }, } },
/* wch_ch353_1s1p*/ { 1, { { 1, -1}, } },
/* wch_ch353_2s1p*/ { 1, { { 2, -1}, } },
+ /* wch_ch382_0s1p*/ { 1, { { 2, -1}, } },
/* wch_ch382_2s1p*/ { 1, { { 2, -1}, } },
/* brainboxes_5s1p */ { 1, { { 3, -1 }, } },
/* sunix_2s1p */ { 1, { { 3, -1 }, } },
@@ -252,6 +254,7 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
/* WCH CARDS */
{ 0x4348, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p},
{ 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p},
+ { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382_0s1p},
{ 0x1c00, 0x3250, 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p},
/* BrainBoxes PX272/PX306 MIO card */
@@ -494,6 +497,12 @@ static struct pciserial_board pci_parport_serial_boards[] = {
.base_baud = 115200,
.uart_offset = 8,
},
+ [wch_ch382_0s1p] = {
+ .flags = FL_BASE0,
+ .num_ports = 0,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
[wch_ch382_2s1p] = {
.flags = FL_BASE0,
.num_ports = 2,
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 01cf1c1a841a..8de329546b82 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -286,12 +286,16 @@ static int bpp_probe(struct platform_device *op)
ops = kmemdup(&parport_sunbpp_ops, sizeof(struct parport_operations),
GFP_KERNEL);
- if (!ops)
+ if (!ops) {
+ err = -ENOMEM;
goto out_unmap;
+ }
dprintk(("register_port\n"));
- if (!(p = parport_register_port((unsigned long)base, irq, dma, ops)))
+ if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) {
+ err = -ENOMEM;
goto out_free_ops;
+ }
p->size = size;
p->dev = &op->dev;
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 535201984b8b..1b2cfe51e8d7 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -28,10 +28,10 @@ obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o
obj-$(CONFIG_PCI_ECAM) += ecam.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
-obj-y += controller/
-obj-y += switch/
-
# Endpoint library must be initialized before its users
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
+obj-y += controller/
+obj-y += switch/
+
ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 4923a2a8e14b..5b78f3b1b918 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -273,6 +273,9 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
+ if (!pdev->eetlp_prefix_path)
+ return -EINVAL;
+
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 35b7fc87eac5..5cb40b2518f9 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -330,7 +330,7 @@ void pci_bus_add_device(struct pci_dev *dev)
return;
}
- dev->is_added = 1;
+ pci_dev_assign_added(dev, true);
}
EXPORT_SYMBOL_GPL(pci_bus_add_device);
@@ -347,14 +347,14 @@ void pci_bus_add_devices(const struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip already-added devices */
- if (dev->is_added)
+ if (pci_dev_is_added(dev))
continue;
pci_bus_add_device(dev);
}
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip if device attach failed */
- if (!dev->is_added)
+ if (!pci_dev_is_added(dev))
continue;
child = dev->subordinate;
if (child)
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 18fa09b3ac8f..028b287466fb 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -96,14 +96,13 @@ config PCI_HOST_GENERIC
depends on OF
select PCI_HOST_COMMON
select IRQ_DOMAIN
- select PCI_DOMAINS
help
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
- depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST
+ depends on OF || COMPILE_TEST
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
@@ -138,7 +137,6 @@ config PCI_VERSATILE
config PCIE_IPROC
tristate
- select PCI_DOMAINS
help
This enables the iProc PCIe core controller support for Broadcom's
iProc family of SoCs. An appropriate bus interface driver needs
@@ -176,7 +174,6 @@ config PCIE_IPROC_MSI
config PCIE_ALTERA
bool "Altera PCIe controller"
depends on ARM || NIOS2 || COMPILE_TEST
- select PCI_DOMAINS
help
Say Y here if you want to enable PCIe controller support on Altera
FPGA.
@@ -242,6 +239,16 @@ config PCIE_MEDIATEK
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
+config PCIE_MOBIVEIL
+ bool "Mobiveil AXI PCIe controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Say Y here if you want to enable support for the Mobiveil AXI PCIe
+ Soft IP. It has up to 8 outbound and inbound windows
+ for address translation and it is a PCIe Gen4 IP.
+
config PCIE_TANGO_SMP8759
bool "Tango SMP8759 PCIe controller (DANGEROUS)"
depends on ARCH_TANGO && PCI_MSI && OF
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 24322b92f200..d56a507495c5 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
+obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 16f52c626b4b..91b0194240a5 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -58,7 +58,6 @@ config PCIE_DW_PLAT_HOST
depends on PCI && PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCIE_DW_PLAT
- default y
help
Enables support for the PCIe controller in the Designware IP to
work in host mode. There are two instances of PCIe controller in
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 345aab56ce8b..ce9224a36f62 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -370,7 +370,7 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
}
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num)
+ enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 4cc1e5df8c79..cee5f2f590e2 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -421,7 +421,6 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
}
}
- pp->root_bus_nr = -1;
pp->ops = &exynos_pcie_host_ops;
ret = dw_pcie_host_init(pp);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 80f604602783..4a9a673b4777 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -667,7 +667,6 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
}
}
- pp->root_bus_nr = -1;
pp->ops = &imx6_pcie_host_ops;
ret = dw_pcie_host_init(pp);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 3722a5f31e5e..e88bd221fffe 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -347,7 +347,6 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
}
}
- pp->root_bus_nr = -1;
pp->ops = &keystone_pcie_host_ops;
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
if (ret) {
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 072fd7ecc29f..0c389a30ef5d 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -172,7 +172,6 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
struct device *dev = &pdev->dev;
int ret;
- pp->root_bus_nr = -1;
pp->ops = &armada8k_pcie_host_ops;
pp->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 321b56cfd5d0..dba83abfe764 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -399,7 +399,6 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
}
}
- pp->root_bus_nr = -1;
pp->ops = &artpec6_pcie_host_ops;
ret = dw_pcie_host_init(pp);
@@ -428,7 +427,7 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num)
+ enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 8650416f6f9e..1e7b02221eac 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -40,6 +40,39 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
__dw_pcie_ep_reset_bar(pci, bar, 0);
}
+static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
+ u8 cap)
+{
+ u8 cap_id, next_cap_ptr;
+ u16 reg;
+
+ reg = dw_pcie_readw_dbi(pci, cap_ptr);
+ next_cap_ptr = (reg & 0xff00) >> 8;
+ cap_id = (reg & 0x00ff);
+
+ if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
+ return 0;
+
+ if (cap_id == cap)
+ return cap_ptr;
+
+ return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
+}
+
+static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
+{
+ u8 next_cap_ptr;
+ u16 reg;
+
+ reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
+ next_cap_ptr = (reg & 0x00ff);
+
+ if (!next_cap_ptr)
+ return 0;
+
+ return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
+}
+
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr)
{
@@ -213,36 +246,84 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
{
- int val;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+
+ if (!ep->msi_cap)
+ return -EINVAL;
+
+ reg = ep->msi_cap + PCI_MSI_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ if (!(val & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+ val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
+
+ return val;
+}
+
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+
+ if (!ep->msi_cap)
+ return -EINVAL;
+
+ reg = ep->msi_cap + PCI_MSI_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ val &= ~PCI_MSI_FLAGS_QMASK;
+ val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, reg, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
- val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
- if (!(val & MSI_CAP_MSI_EN_MASK))
+ if (!ep->msix_cap)
return -EINVAL;
- val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
+ reg = ep->msix_cap + PCI_MSIX_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ if (!(val & PCI_MSIX_FLAGS_ENABLE))
+ return -EINVAL;
+
+ val &= PCI_MSIX_FLAGS_QSIZE;
+
return val;
}
-static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int)
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
{
- int val;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+
+ if (!ep->msix_cap)
+ return -EINVAL;
- val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
- val &= ~MSI_CAP_MMC_MASK;
- val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
+ reg = ep->msix_cap + PCI_MSIX_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ val &= ~PCI_MSIX_FLAGS_QSIZE;
+ val |= interrupts;
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
+ dw_pcie_writew_dbi(pci, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num)
+ enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -282,32 +363,52 @@ static const struct pci_epc_ops epc_ops = {
.unmap_addr = dw_pcie_ep_unmap_addr,
.set_msi = dw_pcie_ep_set_msi,
.get_msi = dw_pcie_ep_get_msi,
+ .set_msix = dw_pcie_ep_set_msix,
+ .get_msix = dw_pcie_ep_get_msix,
.raise_irq = dw_pcie_ep_raise_irq,
.start = dw_pcie_ep_start,
.stop = dw_pcie_ep_stop,
};
+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ dev_err(dev, "EP cannot trigger legacy IRQs\n");
+
+ return -EINVAL;
+}
+
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
u16 msg_ctrl, msg_data;
- u32 msg_addr_lower, msg_addr_upper;
+ u32 msg_addr_lower, msg_addr_upper, reg;
u64 msg_addr;
bool has_upper;
int ret;
+ if (!ep->msi_cap)
+ return -EINVAL;
+
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
- msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
+ reg = ep->msi_cap + PCI_MSI_FLAGS;
+ msg_ctrl = dw_pcie_readw_dbi(pci, reg);
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
- msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
+ reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
+ msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
if (has_upper) {
- msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
- msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64);
+ reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
+ msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
+ reg = ep->msi_cap + PCI_MSI_DATA_64;
+ msg_data = dw_pcie_readw_dbi(pci, reg);
} else {
msg_addr_upper = 0;
- msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32);
+ reg = ep->msi_cap + PCI_MSI_DATA_32;
+ msg_data = dw_pcie_readw_dbi(pci, reg);
}
msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
@@ -322,6 +423,64 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
+ u16 tbl_offset, bir;
+ u32 bar_addr_upper, bar_addr_lower;
+ u32 msg_addr_upper, msg_addr_lower;
+ u32 reg, msg_data, vec_ctrl;
+ u64 tbl_addr, msg_addr, reg_u64;
+ void __iomem *msix_tbl;
+ int ret;
+
+ reg = ep->msix_cap + PCI_MSIX_TABLE;
+ tbl_offset = dw_pcie_readl_dbi(pci, reg);
+ bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
+ tbl_offset &= PCI_MSIX_TABLE_OFFSET;
+ tbl_offset >>= 3;
+
+ reg = PCI_BASE_ADDRESS_0 + (4 * bir);
+ bar_addr_upper = 0;
+ bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
+ reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
+ if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
+ bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
+
+ tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
+ tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
+ tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
+ PCI_MSIX_ENTRY_SIZE);
+ if (!msix_tbl)
+ return -EINVAL;
+
+ msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
+ msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
+ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
+ msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
+ vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
+ iounmap(msix_tbl);
+
+ if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)
+ return -EPERM;
+
+ ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ epc->mem->page_size);
+ if (ret)
+ return ret;
+
+ writel(msg_data, ep->msi_mem);
+
+ dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+
+ return 0;
+}
+
void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
@@ -386,15 +545,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return -ENOMEM;
ep->outbound_addr = addr;
- if (ep->ops->ep_init)
- ep->ops->ep_init(ep);
-
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "Failed to create epc device\n");
return PTR_ERR(epc);
}
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
+
ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
if (ret < 0)
epc->max_functions = 1;
@@ -409,15 +571,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
epc->mem->page_size);
if (!ep->msi_mem) {
- dev_err(dev, "Failed to reserve memory for MSI\n");
+ dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
return -ENOMEM;
}
+ ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI);
- epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER;
- EPC_FEATURE_SET_BAR(epc->features, BAR_0);
+ ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
- ep->epc = epc;
- epc_set_drvdata(epc, ep);
dw_pcie_setup(pci);
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 781aa03aeede..29a05759a294 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -363,7 +363,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
- ret = pci_remap_iospace(win->res, pp->io_base);
+ ret = devm_pci_remap_iospace(dev, win->res,
+ pp->io_base);
if (ret) {
dev_warn(dev, "Error %d: failed to map resource %pR\n",
ret, win->res);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 5937fed4c938..c12bf794d69c 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -70,24 +70,29 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
enum pci_barno bar;
for (bar = BAR_0; bar <= BAR_5; bar++)
dw_pcie_ep_reset_bar(pci, bar);
+
+ epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
+ epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
}
static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type,
- u8 interrupt_num)
+ u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
case PCI_EPC_IRQ_LEGACY:
- dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
- return -EINVAL;
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
case PCI_EPC_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
}
@@ -118,7 +123,6 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
return pp->msi_irq;
}
- pp->root_bus_nr = -1;
pp->ops = &dw_plat_pcie_host_ops;
ret = dw_pcie_host_init(pp);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index bee4e2535a61..96126fd8403c 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -96,17 +96,6 @@
#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
((0x3 << 20) | ((region) << 9) | (0x1 << 8))
-#define MSI_MESSAGE_CONTROL 0x52
-#define MSI_CAP_MMC_SHIFT 1
-#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT)
-#define MSI_CAP_MME_SHIFT 4
-#define MSI_CAP_MSI_EN_MASK 0x1
-#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
-#define MSI_MESSAGE_ADDR_L32 0x54
-#define MSI_MESSAGE_ADDR_U32 0x58
-#define MSI_MESSAGE_DATA_32 0x58
-#define MSI_MESSAGE_DATA_64 0x5C
-
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
@@ -191,7 +180,7 @@ enum dw_pcie_as_type {
struct dw_pcie_ep_ops {
void (*ep_init)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num);
+ enum pci_epc_irq_type type, u16 interrupt_num);
};
struct dw_pcie_ep {
@@ -208,6 +197,8 @@ struct dw_pcie_ep {
u32 num_ob_windows;
void __iomem *msi_mem;
phys_addr_t msi_mem_phys;
+ u8 msi_cap; /* MSI capability offset */
+ u8 msix_cap; /* MSI-X capability offset */
};
struct dw_pcie_ops {
@@ -357,8 +348,11 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
#else
static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
@@ -374,12 +368,23 @@ static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
}
+static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+{
+ return 0;
+}
+
static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
return 0;
}
+static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ return 0;
+}
+
static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 3611d6ce9a92..7b32e619b959 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -420,7 +420,6 @@ static int histb_pcie_probe(struct platform_device *pdev)
phy_init(hipcie->phy);
}
- pp->root_bus_nr = -1;
pp->ops = &histb_pcie_host_ops;
platform_set_drvdata(pdev, hipcie);
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index d2970a009eb5..5352e0c3be82 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -430,6 +430,9 @@ static int kirin_pcie_host_init(struct pcie_port *pp)
{
kirin_pcie_establish_link(pp);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+
return 0;
}
@@ -445,9 +448,34 @@ static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
.host_init = kirin_pcie_host_init,
};
+static int kirin_pcie_add_msi(struct dw_pcie *pci,
+ struct platform_device *pdev)
+{
+ int irq;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev,
+ "failed to get MSI IRQ (%d)\n", irq);
+ return irq;
+ }
+
+ pci->pp.msi_irq = irq;
+ }
+
+ return 0;
+}
+
static int __init kirin_add_pcie_port(struct dw_pcie *pci,
struct platform_device *pdev)
{
+ int ret;
+
+ ret = kirin_pcie_add_msi(pci, pdev);
+ if (ret)
+ return ret;
+
pci->pp.ops = &kirin_pcie_host_ops;
return dw_pcie_host_init(&pci->pp);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index a1d0198081a6..4352c1cb926d 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1251,7 +1251,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- pp->root_bus_nr = -1;
pp->ops = &qcom_pcie_dw_ops;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index ecb58f7b7566..7d0cdfd8138b 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -210,7 +210,6 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
return ret;
}
- pp->root_bus_nr = -1;
pp->ops = &spear13xx_pcie_host_ops;
ret = dw_pcie_host_init(pp);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index d3172d5d3d35..6b4555ff2548 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -111,24 +111,6 @@
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
-/* PCIe window configuration */
-#define OB_WIN_BASE_ADDR 0x4c00
-#define OB_WIN_BLOCK_SIZE 0x20
-#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
- OB_WIN_BLOCK_SIZE * (win) + \
- (offset))
-#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
-#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
-#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
-#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
-#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
-#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
-#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
-
-/* PCIe window types */
-#define OB_PCIE_MEM 0x0
-#define OB_PCIE_IO 0x4
-
/* LMI registers base address and register offsets */
#define LMI_BASE_ADDR 0x6000
#define CFG_REG (LMI_BASE_ADDR + 0x0)
@@ -247,34 +229,9 @@ static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
-/*
- * Set PCIe address window register which could be used for memory
- * mapping.
- */
-static void advk_pcie_set_ob_win(struct advk_pcie *pcie,
- u32 win_num, u32 match_ms,
- u32 match_ls, u32 mask_ms,
- u32 mask_ls, u32 remap_ms,
- u32 remap_ls, u32 action)
-{
- advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num));
- advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num));
- advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num));
- advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num));
- advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num));
- advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num));
- advk_writel(pcie, action, OB_WIN_ACTIONS(win_num));
- advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num));
-}
-
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
- int i;
-
- /* Point PCIe unit MBUS decode windows to DRAM space */
- for (i = 0; i < 8; i++)
- advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0);
/* Set to Direct mode */
reg = advk_readl(pcie, CTRL_CONFIG_REG);
@@ -433,6 +390,15 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
+static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
+ int devfn)
+{
+ if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
+ return false;
+
+ return true;
+}
+
static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
@@ -440,7 +406,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
u32 reg;
int ret;
- if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
+ if (!advk_pcie_valid_device(pcie, bus, devfn)) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@@ -494,7 +460,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int offset;
int ret;
- if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
+ if (!advk_pcie_valid_device(pcie, bus, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
if (where % size)
@@ -843,13 +809,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
switch (resource_type(res)) {
case IORESOURCE_IO:
- advk_pcie_set_ob_win(pcie, 1,
- upper_32_bits(res->start),
- lower_32_bits(res->start),
- 0, 0xF8000000, 0,
- lower_32_bits(res->start),
- OB_PCIE_IO);
- err = pci_remap_iospace(res, iobase);
+ err = devm_pci_remap_iospace(dev, res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
@@ -857,12 +817,6 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
}
break;
case IORESOURCE_MEM:
- advk_pcie_set_ob_win(pcie, 0,
- upper_32_bits(res->start),
- lower_32_bits(res->start),
- 0x0, 0xF8000000, 0,
- lower_32_bits(res->start),
- (2 << 20) | OB_PCIE_MEM);
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
break;
case IORESOURCE_BUS:
@@ -889,7 +843,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
struct resource *res;
- struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
int ret, irq;
@@ -943,21 +896,13 @@ static int advk_pcie_probe(struct platform_device *pdev)
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
- ret = pci_scan_root_bus_bridge(bridge);
+ ret = pci_host_probe(bridge);
if (ret < 0) {
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
return ret;
}
- bus = bridge->bus;
-
- pci_bus_assign_resources(bus);
-
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
-
- pci_bus_add_devices(bus);
return 0;
}
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index a1ebe9ed441f..bf5ece5d9291 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -355,11 +355,13 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
irq = of_irq_get(intc, 0);
if (irq <= 0) {
dev_err(p->dev, "failed to get parent IRQ\n");
+ of_node_put(intc);
return irq ?: -EINVAL;
}
p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
&faraday_pci_irqdomain_ops, p);
+ of_node_put(intc);
if (!p->irqdomain) {
dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
return -EINVAL;
@@ -501,7 +503,7 @@ static int faraday_pci_probe(struct platform_device *pdev)
dev_err(dev, "illegal IO mem size\n");
return -EINVAL;
}
- ret = pci_remap_iospace(io, io_base);
+ ret = devm_pci_remap_iospace(dev, io, io_base);
if (ret) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
ret, io);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 6cc5036ac83c..c00f82cc54aa 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -45,6 +45,7 @@
#include <linux/irqdomain.h>
#include <asm/irqdomain.h>
#include <asm/apic.h>
+#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/hyperv.h>
#include <linux/refcount.h>
@@ -1073,6 +1074,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct pci_bus *pbus;
struct pci_dev *pdev;
struct cpumask *dest;
+ unsigned long flags;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
struct {
@@ -1164,14 +1166,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* the channel callback directly when channel->target_cpu is
* the current CPU. When the higher level interrupt code
* calls us with interrupt enabled, let's add the
- * local_bh_disable()/enable() to avoid race.
+ * local_irq_save()/restore() to avoid race:
+ * hv_pci_onchannelcallback() can also run in tasklet.
*/
- local_bh_disable();
+ local_irq_save(flags);
if (hbus->hdev->channel->target_cpu == smp_processor_id())
hv_pci_onchannelcallback(hbus);
- local_bh_enable();
+ local_irq_restore(flags);
if (hpdev->state == hv_pcichild_ejecting) {
dev_err_once(&hbus->hdev->device,
@@ -1543,7 +1546,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
unsigned long flags;
int ret;
- hpdev = kzalloc(sizeof(*hpdev), GFP_ATOMIC);
+ hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
if (!hpdev)
return NULL;
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 23e270839e6a..50eb0729385b 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -125,6 +125,7 @@ struct mvebu_pcie {
struct platform_device *pdev;
struct mvebu_pcie_port *ports;
struct msi_controller *msi;
+ struct list_head resources;
struct resource io;
struct resource realio;
struct resource mem;
@@ -800,7 +801,7 @@ static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
- struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct mvebu_pcie *pcie = bus->sysdata;
struct mvebu_pcie_port *port;
int ret;
@@ -826,7 +827,7 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
- struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct mvebu_pcie *pcie = bus->sysdata;
struct mvebu_pcie_port *port;
int ret;
@@ -857,36 +858,6 @@ static struct pci_ops mvebu_pcie_ops = {
.write = mvebu_pcie_wr_conf,
};
-static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
-{
- struct mvebu_pcie *pcie = sys_to_pcie(sys);
- int err, i;
-
- pcie->mem.name = "PCI MEM";
- pcie->realio.name = "PCI I/O";
-
- if (resource_size(&pcie->realio) != 0)
- pci_add_resource_offset(&sys->resources, &pcie->realio,
- sys->io_offset);
-
- pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
- pci_add_resource(&sys->resources, &pcie->busn);
-
- err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources);
- if (err)
- return 0;
-
- for (i = 0; i < pcie->nports; i++) {
- struct mvebu_pcie_port *port = &pcie->ports[i];
-
- if (!port->base)
- continue;
- mvebu_pcie_setup_hw(port);
- }
-
- return 1;
-}
-
static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
const struct resource *res,
resource_size_t start,
@@ -917,31 +888,6 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
return start;
}
-static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
-{
- struct hw_pci hw;
-
- memset(&hw, 0, sizeof(hw));
-
-#ifdef CONFIG_PCI_MSI
- hw.msi_ctrl = pcie->msi;
-#endif
-
- hw.nr_controllers = 1;
- hw.private_data = (void **)&pcie;
- hw.setup = mvebu_pcie_setup;
- hw.map_irq = of_irq_parse_and_map_pci;
- hw.ops = &mvebu_pcie_ops;
- hw.align_resource = mvebu_pcie_align_resource;
-
- pci_common_init_dev(&pcie->pdev->dev, &hw);
-}
-
-/*
- * Looks up the list of register addresses encoded into the reg =
- * <...> property for one that matches the given port/lane. Once
- * found, maps it.
- */
static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
struct device_node *np,
struct mvebu_pcie_port *port)
@@ -1190,46 +1136,79 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
clk_disable_unprepare(port->clk);
}
-static int mvebu_pcie_probe(struct platform_device *pdev)
+/*
+ * We can't use devm_of_pci_get_host_bridge_resources() because we
+ * need to parse our special DT properties encoding the MEM and IO
+ * apertures.
+ */
+static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
{
- struct device *dev = &pdev->dev;
- struct mvebu_pcie *pcie;
+ struct device *dev = &pcie->pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *child;
- int num, i, ret;
+ unsigned int i;
+ int ret;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
- return -ENOMEM;
+ INIT_LIST_HEAD(&pcie->resources);
- pcie->pdev = pdev;
- platform_set_drvdata(pdev, pcie);
+ /* Get the bus range */
+ ret = of_pci_parse_bus_range(np, &pcie->busn);
+ if (ret) {
+ dev_err(dev, "failed to parse bus-range property: %d\n", ret);
+ return ret;
+ }
+ pci_add_resource(&pcie->resources, &pcie->busn);
- /* Get the PCIe memory and I/O aperture */
+ /* Get the PCIe memory aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
if (resource_size(&pcie->mem) == 0) {
dev_err(dev, "invalid memory aperture size\n");
return -EINVAL;
}
+ pcie->mem.name = "PCI MEM";
+ pci_add_resource(&pcie->resources, &pcie->mem);
+
+ /* Get the PCIe IO aperture */
mvebu_mbus_get_pcie_io_aperture(&pcie->io);
if (resource_size(&pcie->io) != 0) {
pcie->realio.flags = pcie->io.flags;
pcie->realio.start = PCIBIOS_MIN_IO;
pcie->realio.end = min_t(resource_size_t,
- IO_SPACE_LIMIT,
- resource_size(&pcie->io));
- } else
- pcie->realio = pcie->io;
+ IO_SPACE_LIMIT - SZ_64K,
+ resource_size(&pcie->io) - 1);
+ pcie->realio.name = "PCI I/O";
- /* Get the bus range */
- ret = of_pci_parse_bus_range(np, &pcie->busn);
- if (ret) {
- dev_err(dev, "failed to parse bus-range property: %d\n", ret);
- return ret;
+ for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
+ pci_ioremap_io(i, pcie->io.start + i);
+
+ pci_add_resource(&pcie->resources, &pcie->realio);
}
+ return devm_request_pci_bus_resources(dev, &pcie->resources);
+}
+
+static int mvebu_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mvebu_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int num, i, ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
+
+ ret = mvebu_pcie_parse_request_resources(pcie);
+ if (ret)
+ return ret;
+
num = of_get_available_child_count(np);
pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
@@ -1272,20 +1251,24 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
continue;
}
+ mvebu_pcie_setup_hw(port);
mvebu_pcie_set_local_dev_nr(port, 1);
mvebu_sw_pci_bridge_init(port);
}
pcie->nports = i;
- for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
- pci_ioremap_io(i, pcie->io.start + i);
-
- mvebu_pcie_enable(pcie);
-
- platform_set_drvdata(pdev, pcie);
-
- return 0;
+ list_splice_init(&pcie->resources, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = pcie;
+ bridge->busnr = 0;
+ bridge->ops = &mvebu_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+ bridge->align_resource = mvebu_pcie_align_resource;
+ bridge->msi = pcie->msi;
+
+ return pci_host_probe(bridge);
}
static const struct of_device_id mvebu_pcie_of_match_table[] = {
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index 68b8bfbdb867..d219404bad92 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -537,7 +537,7 @@ static int v3_pci_setup_resource(struct v3_pci *v3,
v3->io_bus_addr = io->start - win->offset;
dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
io, &v3->io_bus_addr);
- ret = pci_remap_iospace(io, io_base);
+ ret = devm_pci_remap_iospace(dev, io, io_base);
if (ret) {
dev_warn(dev,
"error %d: failed to map resource %pR\n",
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c
index 994f32061b32..f59ad2728c0b 100644
--- a/drivers/pci/controller/pci-versatile.c
+++ b/drivers/pci/controller/pci-versatile.c
@@ -82,7 +82,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
switch (resource_type(res)) {
case IORESOURCE_IO:
- err = pci_remap_iospace(res, iobase);
+ err = devm_pci_remap_iospace(dev, res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index d854d67e873c..ffda3e8b4742 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -423,7 +423,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
case IORESOURCE_IO:
xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
res->start - window->offset);
- ret = pci_remap_iospace(res, io_base);
+ ret = devm_pci_remap_iospace(dev, res, io_base);
if (ret < 0)
return ret;
break;
diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c
index e3fe4124e3af..9e87dd7f9ac3 100644
--- a/drivers/pci/controller/pcie-cadence-ep.c
+++ b/drivers/pci/controller/pcie-cadence-ep.c
@@ -238,7 +238,7 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
- u16 flags, mmc, mme;
+ u16 flags, mme;
/* Validate that the MSI feature is actually enabled. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
@@ -249,7 +249,6 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
* Get the Multiple Message Enable bitfield from the Message Control
* register.
*/
- mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1;
mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
return mme;
@@ -363,7 +362,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
}
static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
- enum pci_epc_irq_type type, u8 interrupt_num)
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -439,6 +439,7 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
struct pci_epc *epc;
struct resource *res;
int ret;
+ int phy_count;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
if (!ep)
@@ -473,6 +474,12 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
if (!ep->ob_addr)
return -ENOMEM;
+ ret = cdns_pcie_init_phy(dev, pcie);
+ if (ret) {
+ dev_err(dev, "failed to init phy\n");
+ return ret;
+ }
+ platform_set_drvdata(pdev, pcie);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
@@ -521,6 +528,10 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
err_get_sync:
pm_runtime_disable(dev);
+ cdns_pcie_disable_phy(pcie);
+ phy_count = pcie->phy_count;
+ while (phy_count--)
+ device_link_del(pcie->link[phy_count]);
return ret;
}
@@ -528,6 +539,7 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_put_sync(dev);
@@ -536,13 +548,14 @@ static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
pm_runtime_disable(dev);
- /* The PCIe controller can't be disabled. */
+ cdns_pcie_disable_phy(pcie);
}
static struct platform_driver cdns_pcie_ep_driver = {
.driver = {
.name = "cdns-pcie-ep",
.of_match_table = cdns_pcie_ep_of_match,
+ .pm = &cdns_pcie_pm_ops,
},
.probe = cdns_pcie_ep_probe,
.shutdown = cdns_pcie_ep_shutdown,
diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c
index a4ebbd37b553..ec394f6a19c8 100644
--- a/drivers/pci/controller/pcie-cadence-host.c
+++ b/drivers/pci/controller/pcie-cadence-host.c
@@ -58,6 +58,11 @@ static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
return pcie->reg_base + (where & 0xfff);
}
+ /* Check that the link is up */
+ if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
+ return NULL;
+ /* Clear AXI link-down status */
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
/* Update Output registers for AXI region 0. */
addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
@@ -239,6 +244,7 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
struct cdns_pcie *pcie;
struct resource *res;
int ret;
+ int phy_count;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
if (!bridge)
@@ -290,6 +296,13 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
}
pcie->mem_res = res;
+ ret = cdns_pcie_init_phy(dev, pcie);
+ if (ret) {
+ dev_err(dev, "failed to init phy\n");
+ return ret;
+ }
+ platform_set_drvdata(pdev, pcie);
+
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
@@ -322,15 +335,35 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
err_get_sync:
pm_runtime_disable(dev);
+ cdns_pcie_disable_phy(pcie);
+ phy_count = pcie->phy_count;
+ while (phy_count--)
+ device_link_del(pcie->link[phy_count]);
return ret;
}
+static void cdns_pcie_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+ pm_runtime_disable(dev);
+ cdns_pcie_disable_phy(pcie);
+}
+
static struct platform_driver cdns_pcie_host_driver = {
.driver = {
.name = "cdns-pcie-host",
.of_match_table = cdns_pcie_host_of_match,
+ .pm = &cdns_pcie_pm_ops,
},
.probe = cdns_pcie_host_probe,
+ .shutdown = cdns_pcie_shutdown,
};
builtin_platform_driver(cdns_pcie_host_driver);
diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c
index 138d113eb45d..86f1b002c846 100644
--- a/drivers/pci/controller/pcie-cadence.c
+++ b/drivers/pci/controller/pcie-cadence.c
@@ -124,3 +124,126 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
}
+
+void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
+{
+ int i = pcie->phy_count;
+
+ while (i--) {
+ phy_power_off(pcie->phy[i]);
+ phy_exit(pcie->phy[i]);
+ }
+}
+
+int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pcie->phy_count; i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret < 0) {
+ phy_exit(pcie->phy[i]);
+ goto err_phy;
+ }
+ }
+
+ return 0;
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(pcie->phy[i]);
+ phy_exit(pcie->phy[i]);
+ }
+
+ return ret;
+}
+
+int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
+{
+ struct device_node *np = dev->of_node;
+ int phy_count;
+ struct phy **phy;
+ struct device_link **link;
+ int i;
+ int ret;
+ const char *name;
+
+ phy_count = of_property_count_strings(np, "phy-names");
+ if (phy_count < 1) {
+ dev_err(dev, "no phy-names. PHY will not be initialized\n");
+ pcie->phy_count = 0;
+ return 0;
+ }
+
+ phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ for (i = 0; i < phy_count; i++) {
+ of_property_read_string_index(np, "phy-names", i, &name);
+ phy[i] = devm_phy_optional_get(dev, name);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+ if (!link[i]) {
+ ret = -EINVAL;
+ goto err_link;
+ }
+ }
+
+ pcie->phy_count = phy_count;
+ pcie->phy = phy;
+ pcie->link = link;
+
+ ret = cdns_pcie_enable_phy(pcie);
+ if (ret)
+ goto err_link;
+
+ return 0;
+
+err_link:
+ while (--i >= 0)
+ device_link_del(link[i]);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cdns_pcie_suspend_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+
+ cdns_pcie_disable_phy(pcie);
+
+ return 0;
+}
+
+static int cdns_pcie_resume_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = cdns_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+const struct dev_pm_ops cdns_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
+ cdns_pcie_resume_noirq)
+};
diff --git a/drivers/pci/controller/pcie-cadence.h b/drivers/pci/controller/pcie-cadence.h
index 4bb27333b05c..ae6bf2a2b3d3 100644
--- a/drivers/pci/controller/pcie-cadence.h
+++ b/drivers/pci/controller/pcie-cadence.h
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/phy/phy.h>
/*
* Local Management Registers
@@ -165,6 +166,9 @@
#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
(CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
+/* AXI link down register */
+#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+
enum cdns_pcie_rp_bar {
RP_BAR0,
RP_BAR1,
@@ -229,6 +233,9 @@ struct cdns_pcie {
struct resource *mem_res;
bool is_rc;
u8 bus;
+ int phy_count;
+ struct phy **phy;
+ struct device_link **link;
};
/* Register access */
@@ -279,7 +286,7 @@ static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
}
static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
- u32 reg, u16 value)
+ u32 reg, u32 value)
{
writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
@@ -307,5 +314,9 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
u32 r, u64 cpu_addr);
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
+void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
+int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
+int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
+extern const struct dev_pm_ops cdns_pcie_pm_ops;
#endif /* _PCIE_CADENCE_H */
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 3c76c5fa4f32..3160e9342a2f 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -85,6 +85,8 @@
#define IMAP_VALID_SHIFT 0
#define IMAP_VALID BIT(IMAP_VALID_SHIFT)
+#define IPROC_PCI_PM_CAP 0x48
+#define IPROC_PCI_PM_CAP_MASK 0xffff
#define IPROC_PCI_EXP_CAP 0xac
#define IPROC_PCIE_REG_INVALID 0xffff
@@ -375,6 +377,17 @@ static const u16 iproc_pcie_reg_paxc_v2[] = {
[IPROC_PCIE_CFG_DATA] = 0x1fc,
};
+/*
+ * List of device IDs of controllers that have corrupted capability list that
+ * require SW fixup
+ */
+static const u16 iproc_pcie_corrupt_cap_did[] = {
+ 0x16cd,
+ 0x16f0,
+ 0xd802,
+ 0xd804
+};
+
static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
{
struct iproc_pcie *pcie = bus->sysdata;
@@ -495,6 +508,49 @@ static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p)
return data;
}
+static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
+{
+ u32 i, dev_id;
+
+ switch (where & ~0x3) {
+ case PCI_VENDOR_ID:
+ dev_id = *val >> 16;
+
+ /*
+ * Activate fixup for those controllers that have corrupted
+ * capability list registers
+ */
+ for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++)
+ if (dev_id == iproc_pcie_corrupt_cap_did[i])
+ pcie->fix_paxc_cap = true;
+ break;
+
+ case IPROC_PCI_PM_CAP:
+ if (pcie->fix_paxc_cap) {
+ /* advertise PM, force next capability to PCIe */
+ *val &= ~IPROC_PCI_PM_CAP_MASK;
+ *val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM;
+ }
+ break;
+
+ case IPROC_PCI_EXP_CAP:
+ if (pcie->fix_paxc_cap) {
+ /* advertise root port, version 2, terminate here */
+ *val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 |
+ PCI_CAP_ID_EXP;
+ }
+ break;
+
+ case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
+ /* Don't advertise CRS SV support */
+ *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ break;
+
+ default:
+ break;
+ }
+}
+
static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
@@ -509,13 +565,10 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
/* root complex access */
if (busno == 0) {
ret = pci_generic_config_read32(bus, devfn, where, size, val);
- if (ret != PCIBIOS_SUCCESSFUL)
- return ret;
+ if (ret == PCIBIOS_SUCCESSFUL)
+ iproc_pcie_fix_cap(pcie, where, val);
- /* Don't advertise CRS SV support */
- if ((where & ~0x3) == IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL)
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
- return PCIBIOS_SUCCESSFUL;
+ return ret;
}
cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
@@ -529,6 +582,25 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
if (size <= 2)
*val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+ /*
+ * For PAXC and PAXCv2, the total number of PFs that one can enumerate
+ * depends on the firmware configuration. Unfortunately, due to an ASIC
+ * bug, unconfigured PFs cannot be properly hidden from the root
+ * complex. As a result, write access to these PFs will cause bus lock
+ * up on the embedded processor
+ *
+ * Since all unconfigured PFs are left with an incorrect, staled device
+ * ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access
+ * early here and reject them all
+ */
+#define DEVICE_ID_MASK 0xffff0000
+#define DEVICE_ID_SHIFT 16
+ if (pcie->rej_unconfig_pf &&
+ (where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID)
+ if ((*val & DEVICE_ID_MASK) ==
+ (PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+
return PCIBIOS_SUCCESSFUL;
}
@@ -628,7 +700,7 @@ static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
struct iproc_pcie *pcie = iproc_data(bus);
iproc_pcie_apb_err_disable(bus, true);
- if (pcie->type == IPROC_PCIE_PAXB_V2)
+ if (pcie->iproc_cfg_read)
ret = iproc_pcie_config_read(bus, devfn, where, size, val);
else
ret = pci_generic_config_read32(bus, devfn, where, size, val);
@@ -808,14 +880,14 @@ static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
- dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
- window_idx, oarr_offset, &axi_addr, &pci_addr);
- dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n",
- readl(pcie->base + oarr_offset),
- readl(pcie->base + oarr_offset + 4));
- dev_info(dev, "omap lo 0x%x omap hi 0x%x\n",
- readl(pcie->base + omap_offset),
- readl(pcie->base + omap_offset + 4));
+ dev_dbg(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
+ window_idx, oarr_offset, &axi_addr, &pci_addr);
+ dev_dbg(dev, "oarr lo 0x%x oarr hi 0x%x\n",
+ readl(pcie->base + oarr_offset),
+ readl(pcie->base + oarr_offset + 4));
+ dev_dbg(dev, "omap lo 0x%x omap hi 0x%x\n",
+ readl(pcie->base + omap_offset),
+ readl(pcie->base + omap_offset + 4));
return 0;
}
@@ -982,8 +1054,8 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
iproc_pcie_reg_is_invalid(imap_offset))
return -EINVAL;
- dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
- region_idx, iarr_offset, &axi_addr, &pci_addr);
+ dev_dbg(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
+ region_idx, iarr_offset, &axi_addr, &pci_addr);
/*
* Program the IARR registers. The upper 32-bit IARR register is
@@ -993,9 +1065,9 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
pcie->base + iarr_offset);
writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
- dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n",
- readl(pcie->base + iarr_offset),
- readl(pcie->base + iarr_offset + 4));
+ dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n",
+ readl(pcie->base + iarr_offset),
+ readl(pcie->base + iarr_offset + 4));
/*
* Now program the IMAP registers. Each IARR region may have one or
@@ -1009,10 +1081,10 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
writel(upper_32_bits(axi_addr),
pcie->base + imap_offset + ib_map->imap_addr_offset);
- dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
- window_idx, readl(pcie->base + imap_offset),
- readl(pcie->base + imap_offset +
- ib_map->imap_addr_offset));
+ dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
+ window_idx, readl(pcie->base + imap_offset),
+ readl(pcie->base + imap_offset +
+ ib_map->imap_addr_offset));
imap_offset += ib_map->imap_window_offset;
axi_addr += size;
@@ -1144,10 +1216,22 @@ static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
return ret;
}
-static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
+static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr,
+ bool enable)
{
u32 val;
+ if (!enable) {
+ /*
+ * Disable PAXC MSI steering. All write transfers will be
+ * treated as non-MSI transfers
+ */
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
+ val &= ~MSI_ENABLE_CFG;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
+ return;
+ }
+
/*
* Program bits [43:13] of address of GITS_TRANSLATER register into
* bits [30:0] of the MSI base address register. In fact, in all iProc
@@ -1201,7 +1285,7 @@ static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
return ret;
break;
case IPROC_PCIE_PAXC_V2:
- iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr);
+ iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr, true);
break;
default:
return -EINVAL;
@@ -1271,6 +1355,7 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
break;
case IPROC_PCIE_PAXB:
regs = iproc_pcie_reg_paxb;
+ pcie->iproc_cfg_read = true;
pcie->has_apb_err_disable = true;
if (pcie->need_ob_cfg) {
pcie->ob_map = paxb_ob_map;
@@ -1293,10 +1378,14 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
case IPROC_PCIE_PAXC:
regs = iproc_pcie_reg_paxc;
pcie->ep_is_internal = true;
+ pcie->iproc_cfg_read = true;
+ pcie->rej_unconfig_pf = true;
break;
case IPROC_PCIE_PAXC_V2:
regs = iproc_pcie_reg_paxc_v2;
pcie->ep_is_internal = true;
+ pcie->iproc_cfg_read = true;
+ pcie->rej_unconfig_pf = true;
pcie->need_msi_steer = true;
break;
default:
@@ -1427,6 +1516,24 @@ int iproc_pcie_remove(struct iproc_pcie *pcie)
}
EXPORT_SYMBOL(iproc_pcie_remove);
+/*
+ * The MSI parsing logic in certain revisions of Broadcom PAXC based root
+ * complex does not work and needs to be disabled
+ */
+static void quirk_paxc_disable_msi_parsing(struct pci_dev *pdev)
+{
+ struct iproc_pcie *pcie = iproc_data(pdev->bus);
+
+ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ iproc_pcie_paxc_v2_msi_steer(pcie, 0, false);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0,
+ quirk_paxc_disable_msi_parsing);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802,
+ quirk_paxc_disable_msi_parsing);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
+ quirk_paxc_disable_msi_parsing);
+
MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-iproc.h b/drivers/pci/controller/pcie-iproc.h
index 814b600b383a..4f03ea539805 100644
--- a/drivers/pci/controller/pcie-iproc.h
+++ b/drivers/pci/controller/pcie-iproc.h
@@ -58,8 +58,13 @@ struct iproc_msi;
* @phy: optional PHY device that controls the Serdes
* @map_irq: function callback to map interrupts
* @ep_is_internal: indicates an internal emulated endpoint device is connected
+ * @iproc_cfg_read: indicates the iProc config read function should be used
+ * @rej_unconfig_pf: indicates the root complex needs to detect and reject
+ * enumeration against unconfigured physical functions emulated in the ASIC
* @has_apb_err_disable: indicates the controller can be configured to prevent
* unsupported request from being forwarded as an APB bus error
+ * @fix_paxc_cap: indicates the controller has corrupted capability list in its
+ * config space registers and requires SW based fixup
*
* @need_ob_cfg: indicates SW needs to configure the outbound mapping window
* @ob: outbound mapping related parameters
@@ -84,7 +89,10 @@ struct iproc_pcie {
struct phy *phy;
int (*map_irq)(const struct pci_dev *, u8, u8);
bool ep_is_internal;
+ bool iproc_cfg_read;
+ bool rej_unconfig_pf;
bool has_apb_err_disable;
+ bool fix_paxc_cap;
bool need_ob_cfg;
struct iproc_pcie_ob ob;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 0baabe30858f..861dda69f366 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1109,7 +1109,7 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
if (err < 0)
return err;
- pci_remap_iospace(&pcie->pio, pcie->io.start);
+ devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
return 0;
}
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index 4d6c20e47bed..a939e8d31735 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -23,6 +23,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../pci.h"
+
/* register offsets and bit positions */
/*
@@ -107,7 +109,7 @@
#define CFG_WINDOW_TYPE 0
#define IO_WINDOW_TYPE 1
#define MEM_WINDOW_TYPE 2
-#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024)
+#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
#define MAX_PIO_WINDOWS 8
/* Parameters for the waiting for link up routine */
@@ -130,7 +132,7 @@ struct mobiveil_pcie {
void __iomem *config_axi_slave_base; /* endpoint config base */
void __iomem *csr_axi_slave_base; /* root port config base */
void __iomem *apb_csr_base; /* MSI register base */
- void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */
+ phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
struct irq_domain *intx_domain;
raw_spinlock_t intx_mask_lock;
int irq;
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index 874d75c9ee4a..c8febb009454 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -680,7 +680,11 @@ static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
if (err)
return err;
- return phy_power_on(pcie->phy);
+ err = phy_power_on(pcie->phy);
+ if (err)
+ phy_exit(pcie->phy);
+
+ return err;
}
static int rcar_msi_alloc(struct rcar_msi *chip)
@@ -1165,7 +1169,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
if (rcar_pcie_hw_init(pcie)) {
dev_info(dev, "PCIe link down\n");
err = -ENODEV;
- goto err_clk_disable;
+ goto err_phy_shutdown;
}
data = rcar_pci_read_reg(pcie, MACSR);
@@ -1177,7 +1181,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
dev_err(dev,
"failed to enable MSI support: %d\n",
err);
- goto err_clk_disable;
+ goto err_phy_shutdown;
}
}
@@ -1191,6 +1195,12 @@ err_msi_teardown:
if (IS_ENABLED(CONFIG_PCI_MSI))
rcar_pcie_teardown_msi(pcie);
+err_phy_shutdown:
+ if (pcie->phy) {
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+ }
+
err_clk_disable:
clk_disable_unprepare(pcie->bus_clk);
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 6beba8ed7b84..b8163c56a142 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -472,7 +472,7 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
enum pci_epc_irq_type type,
- u8 interrupt_num)
+ u16 interrupt_num)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 6a4bbb5b3de0..fb32840ce8e6 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -559,7 +559,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
PCI_NUM_INTX,
&legacy_domain_ops,
pcie);
-
+ of_node_put(legacy_intc_node);
if (!pcie->legacy_irq_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index b110a3a814e3..7b1389d8e2a5 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -509,6 +509,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
&intx_domain_ops,
port);
+ of_node_put(pcie_intc_node);
if (!port->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENODEV;
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 942b64fc7f1f..fd2dbd7eed7b 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -197,9 +197,20 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d
int i, best = 1;
unsigned long flags;
- if (pci_is_bridge(msi_desc_to_pci_dev(desc)) || vmd->msix_count == 1)
+ if (vmd->msix_count == 1)
return &vmd->irqs[0];
+ /*
+ * White list for fast-interrupt handlers. All others will share the
+ * "slow" interrupt vector.
+ */
+ switch (msi_desc_to_pci_dev(desc)->class) {
+ case PCI_CLASS_STORAGE_EXPRESS:
+ break;
+ default:
+ return &vmd->irqs[0];
+ }
+
raw_spin_lock_irqsave(&list_lock, flags);
for (i = 1; i < vmd->msix_count; i++)
if (vmd->irqs[i].count < vmd->irqs[best].count)
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 63ed706445b9..3e86fa3c7da3 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -18,13 +18,16 @@
#include <linux/pci-epf.h>
#include <linux/pci_regs.h>
+#define IRQ_TYPE_LEGACY 0
+#define IRQ_TYPE_MSI 1
+#define IRQ_TYPE_MSIX 2
+
#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
#define COMMAND_RAISE_MSI_IRQ BIT(1)
-#define MSI_NUMBER_SHIFT 2
-#define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT)
-#define COMMAND_READ BIT(8)
-#define COMMAND_WRITE BIT(9)
-#define COMMAND_COPY BIT(10)
+#define COMMAND_RAISE_MSIX_IRQ BIT(2)
+#define COMMAND_READ BIT(3)
+#define COMMAND_WRITE BIT(4)
+#define COMMAND_COPY BIT(5)
#define STATUS_READ_SUCCESS BIT(0)
#define STATUS_READ_FAIL BIT(1)
@@ -45,6 +48,7 @@ struct pci_epf_test {
struct pci_epf *epf;
enum pci_barno test_reg_bar;
bool linkup_notifier;
+ bool msix_available;
struct delayed_work cmd_handler;
};
@@ -56,6 +60,8 @@ struct pci_epf_test_reg {
u64 dst_addr;
u32 size;
u32 checksum;
+ u32 irq_type;
+ u32 irq_number;
} __packed;
static struct pci_epf_header test_header = {
@@ -244,31 +250,42 @@ err:
return ret;
}
-static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
+static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
+ u16 irq)
{
- u8 msi_count;
struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
reg->status |= STATUS_IRQ_RAISED;
- msi_count = pci_epc_get_msi(epc, epf->func_no);
- if (irq > msi_count || msi_count <= 0)
+
+ switch (irq_type) {
+ case IRQ_TYPE_LEGACY:
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
- else
+ break;
+ case IRQ_TYPE_MSI:
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
+ break;
+ case IRQ_TYPE_MSIX:
+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
+ break;
+ default:
+ dev_err(dev, "Failed to raise IRQ, unknown type\n");
+ break;
+ }
}
static void pci_epf_test_cmd_handler(struct work_struct *work)
{
int ret;
- u8 irq;
- u8 msi_count;
+ int count;
u32 command;
struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
cmd_handler.work);
struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
@@ -280,7 +297,10 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->command = 0;
reg->status = 0;
- irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
+ if (reg->irq_type > IRQ_TYPE_MSIX) {
+ dev_err(dev, "Failed to detect IRQ type\n");
+ goto reset_handler;
+ }
if (command & COMMAND_RAISE_LEGACY_IRQ) {
reg->status = STATUS_IRQ_RAISED;
@@ -294,7 +314,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_WRITE_FAIL;
else
reg->status |= STATUS_WRITE_SUCCESS;
- pci_epf_test_raise_irq(epf_test, irq);
+ pci_epf_test_raise_irq(epf_test, reg->irq_type,
+ reg->irq_number);
goto reset_handler;
}
@@ -304,7 +325,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_READ_SUCCESS;
else
reg->status |= STATUS_READ_FAIL;
- pci_epf_test_raise_irq(epf_test, irq);
+ pci_epf_test_raise_irq(epf_test, reg->irq_type,
+ reg->irq_number);
goto reset_handler;
}
@@ -314,16 +336,28 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_COPY_SUCCESS;
else
reg->status |= STATUS_COPY_FAIL;
- pci_epf_test_raise_irq(epf_test, irq);
+ pci_epf_test_raise_irq(epf_test, reg->irq_type,
+ reg->irq_number);
goto reset_handler;
}
if (command & COMMAND_RAISE_MSI_IRQ) {
- msi_count = pci_epc_get_msi(epc, epf->func_no);
- if (irq > msi_count || msi_count <= 0)
+ count = pci_epc_get_msi(epc, epf->func_no);
+ if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
+ reg->irq_number);
+ goto reset_handler;
+ }
+
+ if (command & COMMAND_RAISE_MSIX_IRQ) {
+ count = pci_epc_get_msix(epc, epf->func_no);
+ if (reg->irq_number > count || count <= 0)
+ goto reset_handler;
+ reg->status = STATUS_IRQ_RAISED;
+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
+ reg->irq_number);
goto reset_handler;
}
@@ -440,6 +474,8 @@ static int pci_epf_test_bind(struct pci_epf *epf)
else
epf_test->linkup_notifier = true;
+ epf_test->msix_available = epc->features & EPC_FEATURE_MSIX_AVAILABLE;
+
epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
ret = pci_epc_write_header(epc, epf->func_no, header);
@@ -457,8 +493,18 @@ static int pci_epf_test_bind(struct pci_epf *epf)
return ret;
ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
- if (ret)
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
return ret;
+ }
+
+ if (epf_test->msix_available) {
+ ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
+ if (ret) {
+ dev_err(dev, "MSI-X configuration failed\n");
+ return ret;
+ }
+ }
if (!epf_test->linkup_notifier)
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 018ea3433cb5..d1288a0bd530 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -286,6 +286,28 @@ static ssize_t pci_epf_msi_interrupts_show(struct config_item *item,
to_pci_epf_group(item)->epf->msi_interrupts);
}
+static ssize_t pci_epf_msix_interrupts_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ u16 val;
+ int ret;
+
+ ret = kstrtou16(page, 0, &val);
+ if (ret)
+ return ret;
+
+ to_pci_epf_group(item)->epf->msix_interrupts = val;
+
+ return len;
+}
+
+static ssize_t pci_epf_msix_interrupts_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%d\n",
+ to_pci_epf_group(item)->epf->msix_interrupts);
+}
+
PCI_EPF_HEADER_R(vendorid)
PCI_EPF_HEADER_W_u16(vendorid)
@@ -327,6 +349,7 @@ CONFIGFS_ATTR(pci_epf_, subsys_vendor_id);
CONFIGFS_ATTR(pci_epf_, subsys_id);
CONFIGFS_ATTR(pci_epf_, interrupt_pin);
CONFIGFS_ATTR(pci_epf_, msi_interrupts);
+CONFIGFS_ATTR(pci_epf_, msix_interrupts);
static struct configfs_attribute *pci_epf_attrs[] = {
&pci_epf_attr_vendorid,
@@ -340,6 +363,7 @@ static struct configfs_attribute *pci_epf_attrs[] = {
&pci_epf_attr_subsys_id,
&pci_epf_attr_interrupt_pin,
&pci_epf_attr_msi_interrupts,
+ &pci_epf_attr_msix_interrupts,
NULL,
};
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index b0ee42739c3c..094dcc3203b8 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -131,13 +131,13 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
* pci_epc_raise_irq() - interrupt the host system
* @epc: the EPC device which has to interrupt the host
* @func_no: the endpoint function number in the EPC device
- * @type: specify the type of interrupt; legacy or MSI
- * @interrupt_num: the MSI interrupt number
+ * @type: specify the type of interrupt; legacy, MSI or MSI-X
+ * @interrupt_num: the MSI or MSI-X interrupt number
*
- * Invoke to raise an MSI or legacy interrupt
+ * Invoke to raise an legacy, MSI or MSI-X interrupt
*/
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num)
+ enum pci_epc_irq_type type, u16 interrupt_num)
{
int ret;
unsigned long flags;
@@ -201,7 +201,8 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
u8 encode_int;
unsigned long flags;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+ interrupts > 32)
return -EINVAL;
if (!epc->ops->set_msi)
@@ -218,6 +219,63 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
EXPORT_SYMBOL_GPL(pci_epc_set_msi);
/**
+ * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
+ * @epc: the EPC device to which MSI-X interrupts was requested
+ * @func_no: the endpoint function number in the EPC device
+ *
+ * Invoke to get the number of MSI-X interrupts allocated by the RC
+ */
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
+{
+ int interrupt;
+ unsigned long flags;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return 0;
+
+ if (!epc->ops->get_msix)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ interrupt = epc->ops->get_msix(epc, func_no);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ if (interrupt < 0)
+ return 0;
+
+ return interrupt + 1;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_msix);
+
+/**
+ * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
+ * @epc: the EPC device on which MSI-X has to be configured
+ * @func_no: the endpoint function number in the EPC device
+ * @interrupts: number of MSI-X interrupts required by the EPF
+ *
+ * Invoke to set the required number of MSI-X interrupts.
+ */
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+{
+ int ret;
+ unsigned long flags;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+ interrupts < 1 || interrupts > 2048)
+ return -EINVAL;
+
+ if (!epc->ops->set_msix)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_set_msix);
+
+/**
* pci_epc_unmap_addr() - unmap CPU address from PCI address
* @epc: the EPC device on which address is allocated
* @func_no: the endpoint function number in the EPC device
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 523a8cab3bfb..825fa24427a3 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -137,6 +137,20 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
}
EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
+static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
+{
+ struct config_group *group, *tmp;
+
+ if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
+ return;
+
+ mutex_lock(&pci_epf_mutex);
+ list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
+ pci_ep_cfs_remove_epf_group(group);
+ list_del(&driver->epf_group);
+ mutex_unlock(&pci_epf_mutex);
+}
+
/**
* pci_epf_unregister_driver() - unregister the PCI EPF driver
* @driver: the PCI EPF driver that has to be unregistered
@@ -145,17 +159,38 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
*/
void pci_epf_unregister_driver(struct pci_epf_driver *driver)
{
- struct config_group *group;
-
- mutex_lock(&pci_epf_mutex);
- list_for_each_entry(group, &driver->epf_group, group_entry)
- pci_ep_cfs_remove_epf_group(group);
- list_del(&driver->epf_group);
- mutex_unlock(&pci_epf_mutex);
+ pci_epf_remove_cfs(driver);
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
+static int pci_epf_add_cfs(struct pci_epf_driver *driver)
+{
+ struct config_group *group;
+ const struct pci_epf_device_id *id;
+
+ if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
+ return 0;
+
+ INIT_LIST_HEAD(&driver->epf_group);
+
+ id = driver->id_table;
+ while (id->name[0]) {
+ group = pci_ep_cfs_add_epf_group(id->name);
+ if (IS_ERR(group)) {
+ pci_epf_remove_cfs(driver);
+ return PTR_ERR(group);
+ }
+
+ mutex_lock(&pci_epf_mutex);
+ list_add_tail(&group->group_entry, &driver->epf_group);
+ mutex_unlock(&pci_epf_mutex);
+ id++;
+ }
+
+ return 0;
+}
+
/**
* __pci_epf_register_driver() - register a new PCI EPF driver
* @driver: structure representing PCI EPF driver
@@ -167,8 +202,6 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner)
{
int ret;
- struct config_group *group;
- const struct pci_epf_device_id *id;
if (!driver->ops)
return -EINVAL;
@@ -183,16 +216,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
if (ret)
return ret;
- INIT_LIST_HEAD(&driver->epf_group);
-
- id = driver->id_table;
- while (id->name[0]) {
- group = pci_ep_cfs_add_epf_group(id->name);
- mutex_lock(&pci_epf_mutex);
- list_add_tail(&group->group_entry, &driver->epf_group);
- mutex_unlock(&pci_epf_mutex);
- id++;
- }
+ pci_epf_add_cfs(driver);
return 0;
}
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 3979f89b250a..6b7c1ed58e7e 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -7,7 +7,6 @@
* All rights reserved.
*
* Send feedback to <kristen.c.accardi@intel.com>
- *
*/
#include <linux/module.h>
@@ -75,23 +74,34 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
/*
- * Per PCI firmware specification, we should run the ACPI _OSC
- * method to get control of hotplug hardware before using it. If
- * an _OSC is missing, we look for an OSHP to do the same thing.
- * To handle different BIOS behavior, we look for _OSC on a root
- * bridge preferentially (according to PCI fw spec). Later for
- * OSHP within the scope of the hotplug controller and its parents,
- * up to the host bridge under which this controller exists.
+ * If there's no ACPI host bridge (i.e., ACPI support is compiled
+ * into the kernel but the hardware platform doesn't support ACPI),
+ * there's nothing to do here.
*/
- if (shpchp_is_native(pdev))
- return 0;
-
- /* If _OSC exists, we should not evaluate OSHP */
host = pci_find_host_bridge(pdev->bus);
root = acpi_pci_find_root(ACPI_HANDLE(&host->dev));
- if (root->osc_support_set)
- goto no_control;
+ if (!root)
+ return 0;
+ /*
+ * If _OSC exists, it determines whether we're allowed to manage
+ * the SHPC. We executed it while enumerating the host bridge.
+ */
+ if (root->osc_support_set) {
+ if (host->native_shpc_hotplug)
+ return 0;
+ return -ENODEV;
+ }
+
+ /*
+ * In the absence of _OSC, we're always allowed to manage the SHPC.
+ * However, if an OSHP method is present, we must execute it so the
+ * firmware can transfer control to the OS, e.g., direct interrupts
+ * to the OS instead of to the firmware.
+ *
+ * N.B. The PCI Firmware Spec (r3.2, sec 4.8) does not endorse
+ * searching up the ACPI hierarchy, so the loops below are suspect.
+ */
handle = ACPI_HANDLE(&pdev->dev);
if (!handle) {
/*
@@ -120,7 +130,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
if (ACPI_FAILURE(status))
break;
}
-no_control:
+
pci_info(pdev, "Cannot get control of SHPC hotplug\n");
kfree(string.pointer);
return -ENODEV;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 12b5655fd390..ad32ffbc4b91 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -254,20 +254,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-/**
- * release_slot - free up the memory used by a slot
- * @hotplug_slot: slot to free
- */
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
-
- kfree(slot->hotplug_slot);
- kfree(slot);
-}
-
/* callback routine to initialize 'struct slot' for each slot */
int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
unsigned int sun)
@@ -287,7 +273,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
slot->hotplug_slot->info = &slot->info;
slot->hotplug_slot->private = slot;
- slot->hotplug_slot->release = &release_slot;
slot->hotplug_slot->ops = &acpi_hotplug_slot_ops;
slot->acpi_slot = acpiphp_slot;
@@ -324,13 +309,12 @@ error:
void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
{
struct slot *slot = acpiphp_slot->slot;
- int retval = 0;
pr_info("Slot [%s] unregistered\n", slot_name(slot));
- retval = pci_hp_deregister(slot->hotplug_slot);
- if (retval)
- pr_err("pci_hp_deregister failed with error %d\n", retval);
+ pci_hp_deregister(slot->hotplug_slot);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 3a17b290df5d..ef0b1b6ba86f 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -509,7 +509,7 @@ static void enable_slot(struct acpiphp_slot *slot)
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Assume that newly added devices are powered on already. */
- if (!dev->is_added)
+ if (!pci_dev_is_added(dev))
dev->current_state = PCI_D0;
}
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 07b533adc9df..52a339baf06c 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -195,10 +195,8 @@ get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static void release_slot(struct hotplug_slot *hotplug_slot)
+static void release_slot(struct slot *slot)
{
- struct slot *slot = hotplug_slot->private;
-
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
pci_dev_put(slot->dev);
@@ -253,7 +251,6 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i);
hotplug_slot->private = slot;
- hotplug_slot->release = &release_slot;
hotplug_slot->ops = &cpci_hotplug_slot_ops;
/*
@@ -308,12 +305,8 @@ cpci_hp_unregister_bus(struct pci_bus *bus)
slots--;
dbg("deregistering slot %s", slot_name(slot));
- status = pci_hp_deregister(slot->hotplug_slot);
- if (status) {
- err("pci_hp_deregister failed with error %d",
- status);
- break;
- }
+ pci_hp_deregister(slot->hotplug_slot);
+ release_slot(slot);
}
}
up_write(&list_rwsem);
@@ -623,6 +616,7 @@ cleanup_slots(void)
list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) {
list_del(&slot->slot_list);
pci_hp_deregister(slot->hotplug_slot);
+ release_slot(slot);
}
cleanup_null:
up_write(&list_rwsem);
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 1797e36ec586..5a06636e910a 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -266,17 +266,6 @@ static void __iomem *get_SMBIOS_entry(void __iomem *smbios_start,
return previous;
}
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
-
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
- kfree(slot);
-}
-
static int ctrl_slot_cleanup(struct controller *ctrl)
{
struct slot *old_slot, *next_slot;
@@ -285,9 +274,11 @@ static int ctrl_slot_cleanup(struct controller *ctrl)
ctrl->slot = NULL;
while (old_slot) {
- /* memory will be freed by the release_slot callback */
next_slot = old_slot->next;
pci_hp_deregister(old_slot->hotplug_slot);
+ kfree(old_slot->hotplug_slot->info);
+ kfree(old_slot->hotplug_slot);
+ kfree(old_slot);
old_slot = next_slot;
}
@@ -678,7 +669,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
/* register this slot with the hotplug pci core */
- hotplug_slot->release = &release_slot;
hotplug_slot->private = slot;
snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 1869b0411ce0..4ea57e9019f1 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -673,7 +673,20 @@ static void free_slots(void)
list_for_each_entry_safe(slot_cur, next, &ibmphp_slot_head,
ibm_slot_list) {
- pci_hp_deregister(slot_cur->hotplug_slot);
+ pci_hp_del(slot_cur->hotplug_slot);
+ slot_cur->ctrl = NULL;
+ slot_cur->bus_on = NULL;
+
+ /*
+ * We don't want to actually remove the resources,
+ * since ibmphp_free_resources() will do just that.
+ */
+ ibmphp_unconfigure_card(&slot_cur, -1);
+
+ pci_hp_destroy(slot_cur->hotplug_slot);
+ kfree(slot_cur->hotplug_slot->info);
+ kfree(slot_cur->hotplug_slot);
+ kfree(slot_cur);
}
debug("%s -- exit\n", __func__);
}
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 64549aa24c0f..6f8e90e3ec08 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -699,25 +699,6 @@ static int fillslotinfo(struct hotplug_slot *hotplug_slot)
return rc;
}
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot;
-
- if (!hotplug_slot || !hotplug_slot->private)
- return;
-
- slot = hotplug_slot->private;
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
- slot->ctrl = NULL;
- slot->bus_on = NULL;
-
- /* we don't want to actually remove the resources, since free_resources will do just that */
- ibmphp_unconfigure_card(&slot, -1);
-
- kfree(slot);
-}
-
static struct pci_driver ibmphp_driver;
/*
@@ -941,7 +922,6 @@ static int __init ebda_rsrc_controller(void)
tmp_slot->hotplug_slot = hp_slot_ptr;
hp_slot_ptr->private = tmp_slot;
- hp_slot_ptr->release = release_slot;
rc = fillslotinfo(hp_slot_ptr);
if (rc)
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index af92fed46ab7..90fde5f106d8 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -396,8 +396,9 @@ static struct hotplug_slot *get_slot_from_name(const char *name)
* @owner: caller module owner
* @mod_name: caller module name
*
- * Registers a hotplug slot with the pci hotplug subsystem, which will allow
- * userspace interaction to the slot.
+ * Prepares a hotplug slot for in-kernel use and immediately publishes it to
+ * user space in one go. Drivers may alternatively carry out the two steps
+ * separately by invoking pci_hp_initialize() and pci_hp_add().
*
* Returns 0 if successful, anything else for an error.
*/
@@ -406,45 +407,91 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
struct module *owner, const char *mod_name)
{
int result;
+
+ result = __pci_hp_initialize(slot, bus, devnr, name, owner, mod_name);
+ if (result)
+ return result;
+
+ result = pci_hp_add(slot);
+ if (result)
+ pci_hp_destroy(slot);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(__pci_hp_register);
+
+/**
+ * __pci_hp_initialize - prepare hotplug slot for in-kernel use
+ * @slot: pointer to the &struct hotplug_slot to initialize
+ * @bus: bus this slot is on
+ * @devnr: slot number
+ * @name: name registered with kobject core
+ * @owner: caller module owner
+ * @mod_name: caller module name
+ *
+ * Allocate and fill in a PCI slot for use by a hotplug driver. Once this has
+ * been called, the driver may invoke hotplug_slot_name() to get the slot's
+ * unique name. The driver must be prepared to handle a ->reset_slot callback
+ * from this point on.
+ *
+ * Returns 0 on success or a negative int on error.
+ */
+int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus,
+ int devnr, const char *name, struct module *owner,
+ const char *mod_name)
+{
struct pci_slot *pci_slot;
if (slot == NULL)
return -ENODEV;
if ((slot->info == NULL) || (slot->ops == NULL))
return -EINVAL;
- if (slot->release == NULL) {
- dbg("Why are you trying to register a hotplug slot without a proper release function?\n");
- return -EINVAL;
- }
slot->ops->owner = owner;
slot->ops->mod_name = mod_name;
- mutex_lock(&pci_hp_mutex);
/*
* No problems if we call this interface from both ACPI_PCI_SLOT
* driver and call it here again. If we've already created the
* pci_slot, the interface will simply bump the refcount.
*/
pci_slot = pci_create_slot(bus, devnr, name, slot);
- if (IS_ERR(pci_slot)) {
- result = PTR_ERR(pci_slot);
- goto out;
- }
+ if (IS_ERR(pci_slot))
+ return PTR_ERR(pci_slot);
slot->pci_slot = pci_slot;
pci_slot->hotplug = slot;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__pci_hp_initialize);
- list_add(&slot->slot_list, &pci_hotplug_slot_list);
+/**
+ * pci_hp_add - publish hotplug slot to user space
+ * @slot: pointer to the &struct hotplug_slot to publish
+ *
+ * Make a hotplug slot's sysfs interface available and inform user space of its
+ * addition by sending a uevent. The hotplug driver must be prepared to handle
+ * all &struct hotplug_slot_ops callbacks from this point on.
+ *
+ * Returns 0 on success or a negative int on error.
+ */
+int pci_hp_add(struct hotplug_slot *slot)
+{
+ struct pci_slot *pci_slot = slot->pci_slot;
+ int result;
result = fs_add_slot(pci_slot);
+ if (result)
+ return result;
+
kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
- dbg("Added slot %s to the list\n", name);
-out:
+ mutex_lock(&pci_hp_mutex);
+ list_add(&slot->slot_list, &pci_hotplug_slot_list);
mutex_unlock(&pci_hp_mutex);
- return result;
+ dbg("Added slot %s to the list\n", hotplug_slot_name(slot));
+ return 0;
}
-EXPORT_SYMBOL_GPL(__pci_hp_register);
+EXPORT_SYMBOL_GPL(pci_hp_add);
/**
* pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem
@@ -455,35 +502,62 @@ EXPORT_SYMBOL_GPL(__pci_hp_register);
*
* Returns 0 if successful, anything else for an error.
*/
-int pci_hp_deregister(struct hotplug_slot *slot)
+void pci_hp_deregister(struct hotplug_slot *slot)
+{
+ pci_hp_del(slot);
+ pci_hp_destroy(slot);
+}
+EXPORT_SYMBOL_GPL(pci_hp_deregister);
+
+/**
+ * pci_hp_del - unpublish hotplug slot from user space
+ * @slot: pointer to the &struct hotplug_slot to unpublish
+ *
+ * Remove a hotplug slot's sysfs interface.
+ *
+ * Returns 0 on success or a negative int on error.
+ */
+void pci_hp_del(struct hotplug_slot *slot)
{
struct hotplug_slot *temp;
- struct pci_slot *pci_slot;
- if (!slot)
- return -ENODEV;
+ if (WARN_ON(!slot))
+ return;
mutex_lock(&pci_hp_mutex);
temp = get_slot_from_name(hotplug_slot_name(slot));
- if (temp != slot) {
+ if (WARN_ON(temp != slot)) {
mutex_unlock(&pci_hp_mutex);
- return -ENODEV;
+ return;
}
list_del(&slot->slot_list);
-
- pci_slot = slot->pci_slot;
- fs_remove_slot(pci_slot);
+ mutex_unlock(&pci_hp_mutex);
dbg("Removed slot %s from the list\n", hotplug_slot_name(slot));
+ fs_remove_slot(slot->pci_slot);
+}
+EXPORT_SYMBOL_GPL(pci_hp_del);
- slot->release(slot);
+/**
+ * pci_hp_destroy - remove hotplug slot from in-kernel use
+ * @slot: pointer to the &struct hotplug_slot to destroy
+ *
+ * Destroy a PCI slot used by a hotplug driver. Once this has been called,
+ * the driver may no longer invoke hotplug_slot_name() to get the slot's
+ * unique name. The driver no longer needs to handle a ->reset_slot callback
+ * from this point on.
+ *
+ * Returns 0 on success or a negative int on error.
+ */
+void pci_hp_destroy(struct hotplug_slot *slot)
+{
+ struct pci_slot *pci_slot = slot->pci_slot;
+
+ slot->pci_slot = NULL;
pci_slot->hotplug = NULL;
pci_destroy_slot(pci_slot);
- mutex_unlock(&pci_hp_mutex);
-
- return 0;
}
-EXPORT_SYMBOL_GPL(pci_hp_deregister);
+EXPORT_SYMBOL_GPL(pci_hp_destroy);
/**
* pci_hp_change_slot_info - changes the slot's information structure in the core
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 5f892065585e..811cf83f956d 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/sched/signal.h> /* signal_pending() */
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/workqueue.h>
#include "../pcie/portdrv.h"
@@ -57,49 +58,111 @@ do { \
dev_warn(&ctrl->pcie->device, format, ## arg)
#define SLOT_NAME_SIZE 10
+
+/**
+ * struct slot - PCIe hotplug slot
+ * @state: current state machine position
+ * @ctrl: pointer to the slot's controller structure
+ * @hotplug_slot: pointer to the structure registered with the PCI hotplug core
+ * @work: work item to turn the slot on or off after 5 seconds in response to
+ * an Attention Button press
+ * @lock: protects reads and writes of @state;
+ * protects scheduling, execution and cancellation of @work
+ */
struct slot {
u8 state;
struct controller *ctrl;
struct hotplug_slot *hotplug_slot;
- struct delayed_work work; /* work for button event */
+ struct delayed_work work;
struct mutex lock;
- struct mutex hotplug_lock;
- struct workqueue_struct *wq;
-};
-
-struct event_info {
- u32 event_type;
- struct slot *p_slot;
- struct work_struct work;
};
+/**
+ * struct controller - PCIe hotplug controller
+ * @ctrl_lock: serializes writes to the Slot Control register
+ * @pcie: pointer to the controller's PCIe port service device
+ * @reset_lock: prevents access to the Data Link Layer Link Active bit in the
+ * Link Status register and to the Presence Detect State bit in the Slot
+ * Status register during a slot reset which may cause them to flap
+ * @slot: pointer to the controller's slot structure
+ * @queue: wait queue to wake up on reception of a Command Completed event,
+ * used for synchronous writes to the Slot Control register
+ * @slot_cap: cached copy of the Slot Capabilities register
+ * @slot_ctrl: cached copy of the Slot Control register
+ * @poll_thread: thread to poll for slot events if no IRQ is available,
+ * enabled with pciehp_poll_mode module parameter
+ * @cmd_started: jiffies when the Slot Control register was last written;
+ * the next write is allowed 1 second later, absent a Command Completed
+ * interrupt (PCIe r4.0, sec 6.7.3.2)
+ * @cmd_busy: flag set on Slot Control register write, cleared by IRQ handler
+ * on reception of a Command Completed event
+ * @link_active_reporting: cached copy of Data Link Layer Link Active Reporting
+ * Capable bit in Link Capabilities register; if this bit is zero, the
+ * Data Link Layer Link Active bit in the Link Status register will never
+ * be set and the driver is thus confined to wait 1 second before assuming
+ * the link to a hotplugged device is up and accessing it
+ * @notification_enabled: whether the IRQ was requested successfully
+ * @power_fault_detected: whether a power fault was detected by the hardware
+ * that has not yet been cleared by the user
+ * @pending_events: used by the IRQ handler to save events retrieved from the
+ * Slot Status register for later consumption by the IRQ thread
+ * @request_result: result of last user request submitted to the IRQ thread
+ * @requester: wait queue to wake up on completion of user request,
+ * used for synchronous slot enable/disable request via sysfs
+ */
struct controller {
- struct mutex ctrl_lock; /* controller lock */
- struct pcie_device *pcie; /* PCI Express port service */
+ struct mutex ctrl_lock;
+ struct pcie_device *pcie;
+ struct rw_semaphore reset_lock;
struct slot *slot;
- wait_queue_head_t queue; /* sleep & wake process */
+ wait_queue_head_t queue;
u32 slot_cap;
u16 slot_ctrl;
- struct timer_list poll_timer;
+ struct task_struct *poll_thread;
unsigned long cmd_started; /* jiffies */
unsigned int cmd_busy:1;
unsigned int link_active_reporting:1;
unsigned int notification_enabled:1;
unsigned int power_fault_detected;
+ atomic_t pending_events;
+ int request_result;
+ wait_queue_head_t requester;
};
-#define INT_PRESENCE_ON 1
-#define INT_PRESENCE_OFF 2
-#define INT_POWER_FAULT 3
-#define INT_BUTTON_PRESS 4
-#define INT_LINK_UP 5
-#define INT_LINK_DOWN 6
-
-#define STATIC_STATE 0
+/**
+ * DOC: Slot state
+ *
+ * @OFF_STATE: slot is powered off, no subordinate devices are enumerated
+ * @BLINKINGON_STATE: slot will be powered on after the 5 second delay,
+ * green led is blinking
+ * @BLINKINGOFF_STATE: slot will be powered off after the 5 second delay,
+ * green led is blinking
+ * @POWERON_STATE: slot is currently powering on
+ * @POWEROFF_STATE: slot is currently powering off
+ * @ON_STATE: slot is powered on, subordinate devices have been enumerated
+ */
+#define OFF_STATE 0
#define BLINKINGON_STATE 1
#define BLINKINGOFF_STATE 2
#define POWERON_STATE 3
#define POWEROFF_STATE 4
+#define ON_STATE 5
+
+/**
+ * DOC: Flags to request an action from the IRQ thread
+ *
+ * These are stored together with events read from the Slot Status register,
+ * hence must be greater than its 16-bit width.
+ *
+ * %DISABLE_SLOT: Disable the slot in response to a user request via sysfs or
+ * an Attention Button press after the 5 second delay
+ * %RERUN_ISR: Used by the IRQ handler to inform the IRQ thread that the
+ * hotplug port was inaccessible when the interrupt occurred, requiring
+ * that the IRQ handler is rerun by the IRQ thread after it has made the
+ * hotplug port accessible by runtime resuming its parents to D0
+ */
+#define DISABLE_SLOT (1 << 16)
+#define RERUN_ISR (1 << 17)
#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_ABP)
#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PCP)
@@ -113,15 +176,17 @@ struct controller {
int pciehp_sysfs_enable_slot(struct slot *slot);
int pciehp_sysfs_disable_slot(struct slot *slot);
-void pciehp_queue_interrupt_event(struct slot *slot, u32 event_type);
+void pciehp_request(struct controller *ctrl, int action);
+void pciehp_handle_button_press(struct slot *slot);
+void pciehp_handle_disable_request(struct slot *slot);
+void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events);
int pciehp_configure_device(struct slot *p_slot);
-int pciehp_unconfigure_device(struct slot *p_slot);
+void pciehp_unconfigure_device(struct slot *p_slot);
void pciehp_queue_pushbutton_work(struct work_struct *work);
struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
-int pciehp_enable_slot(struct slot *p_slot);
-int pciehp_disable_slot(struct slot *p_slot);
-void pcie_reenable_notification(struct controller *ctrl);
+void pcie_shutdown_notification(struct controller *ctrl);
+void pcie_clear_hotplug_events(struct controller *ctrl);
int pciehp_power_on_slot(struct slot *slot);
void pciehp_power_off_slot(struct slot *slot);
void pciehp_get_power_status(struct slot *slot, u8 *status);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 44a6a63802d5..ec48c9433ae5 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -26,11 +26,12 @@
#include <linux/interrupt.h>
#include <linux/time.h>
+#include "../pci.h"
+
/* Global variables */
bool pciehp_debug;
bool pciehp_poll_mode;
int pciehp_poll_time;
-static bool pciehp_force;
/*
* not really modular, but the easiest way to keep compat with existing
@@ -39,11 +40,9 @@ static bool pciehp_force;
module_param(pciehp_debug, bool, 0644);
module_param(pciehp_poll_mode, bool, 0644);
module_param(pciehp_poll_time, int, 0644);
-module_param(pciehp_force, bool, 0644);
MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
-MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
#define PCIE_MODULE_NAME "pciehp"
@@ -56,17 +55,6 @@ static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
static int reset_slot(struct hotplug_slot *slot, int probe);
-/**
- * release_slot - free up the memory used by a slot
- * @hotplug_slot: slot to free
- */
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- kfree(hotplug_slot->ops);
- kfree(hotplug_slot->info);
- kfree(hotplug_slot);
-}
-
static int init_slot(struct controller *ctrl)
{
struct slot *slot = ctrl->slot;
@@ -107,15 +95,14 @@ static int init_slot(struct controller *ctrl)
/* register this slot with the hotplug pci core */
hotplug->info = info;
hotplug->private = slot;
- hotplug->release = &release_slot;
hotplug->ops = ops;
slot->hotplug_slot = hotplug;
snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
- retval = pci_hp_register(hotplug,
- ctrl->pcie->port->subordinate, 0, name);
+ retval = pci_hp_initialize(hotplug,
+ ctrl->pcie->port->subordinate, 0, name);
if (retval)
- ctrl_err(ctrl, "pci_hp_register failed: error %d\n", retval);
+ ctrl_err(ctrl, "pci_hp_initialize failed: error %d\n", retval);
out:
if (retval) {
kfree(ops);
@@ -127,7 +114,12 @@ out:
static void cleanup_slot(struct controller *ctrl)
{
- pci_hp_deregister(ctrl->slot->hotplug_slot);
+ struct hotplug_slot *hotplug_slot = ctrl->slot->hotplug_slot;
+
+ pci_hp_destroy(hotplug_slot);
+ kfree(hotplug_slot->ops);
+ kfree(hotplug_slot->info);
+ kfree(hotplug_slot);
}
/*
@@ -136,8 +128,11 @@ static void cleanup_slot(struct controller *ctrl)
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
struct slot *slot = hotplug_slot->private;
+ struct pci_dev *pdev = slot->ctrl->pcie->port;
+ pci_config_pm_runtime_get(pdev);
pciehp_set_attention_status(slot, status);
+ pci_config_pm_runtime_put(pdev);
return 0;
}
@@ -160,8 +155,11 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
+ struct pci_dev *pdev = slot->ctrl->pcie->port;
+ pci_config_pm_runtime_get(pdev);
pciehp_get_power_status(slot, value);
+ pci_config_pm_runtime_put(pdev);
return 0;
}
@@ -176,16 +174,22 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
+ struct pci_dev *pdev = slot->ctrl->pcie->port;
+ pci_config_pm_runtime_get(pdev);
pciehp_get_latch_status(slot, value);
+ pci_config_pm_runtime_put(pdev);
return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
+ struct pci_dev *pdev = slot->ctrl->pcie->port;
+ pci_config_pm_runtime_get(pdev);
pciehp_get_adapter_status(slot, value);
+ pci_config_pm_runtime_put(pdev);
return 0;
}
@@ -196,12 +200,40 @@ static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
return pciehp_reset_slot(slot, probe);
}
+/**
+ * pciehp_check_presence() - synthesize event if presence has changed
+ *
+ * On probe and resume, an explicit presence check is necessary to bring up an
+ * occupied slot or bring down an unoccupied slot. This can't be triggered by
+ * events in the Slot Status register, they may be stale and are therefore
+ * cleared. Secondly, sending an interrupt for "events that occur while
+ * interrupt generation is disabled [when] interrupt generation is subsequently
+ * enabled" is optional per PCIe r4.0, sec 6.7.3.4.
+ */
+static void pciehp_check_presence(struct controller *ctrl)
+{
+ struct slot *slot = ctrl->slot;
+ u8 occupied;
+
+ down_read(&ctrl->reset_lock);
+ mutex_lock(&slot->lock);
+
+ pciehp_get_adapter_status(slot, &occupied);
+ if ((occupied && (slot->state == OFF_STATE ||
+ slot->state == BLINKINGON_STATE)) ||
+ (!occupied && (slot->state == ON_STATE ||
+ slot->state == BLINKINGOFF_STATE)))
+ pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
+
+ mutex_unlock(&slot->lock);
+ up_read(&ctrl->reset_lock);
+}
+
static int pciehp_probe(struct pcie_device *dev)
{
int rc;
struct controller *ctrl;
struct slot *slot;
- u8 occupied, poweron;
/* If this is not a "hotplug" service, we have no business here. */
if (dev->service != PCIE_PORT_SERVICE_HP)
@@ -238,21 +270,20 @@ static int pciehp_probe(struct pcie_device *dev)
goto err_out_free_ctrl_slot;
}
- /* Check if slot is occupied */
+ /* Publish to user space */
slot = ctrl->slot;
- pciehp_get_adapter_status(slot, &occupied);
- pciehp_get_power_status(slot, &poweron);
- if (occupied && pciehp_force) {
- mutex_lock(&slot->hotplug_lock);
- pciehp_enable_slot(slot);
- mutex_unlock(&slot->hotplug_lock);
+ rc = pci_hp_add(slot->hotplug_slot);
+ if (rc) {
+ ctrl_err(ctrl, "Publication to user space failed (%d)\n", rc);
+ goto err_out_shutdown_notification;
}
- /* If empty slot's power status is on, turn power off */
- if (!occupied && poweron && POWER_CTRL(ctrl))
- pciehp_power_off_slot(slot);
+
+ pciehp_check_presence(ctrl);
return 0;
+err_out_shutdown_notification:
+ pcie_shutdown_notification(ctrl);
err_out_free_ctrl_slot:
cleanup_slot(ctrl);
err_out_release_ctlr:
@@ -264,6 +295,8 @@ static void pciehp_remove(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
+ pci_hp_del(ctrl->slot->hotplug_slot);
+ pcie_shutdown_notification(ctrl);
cleanup_slot(ctrl);
pciehp_release_ctrl(ctrl);
}
@@ -274,27 +307,28 @@ static int pciehp_suspend(struct pcie_device *dev)
return 0;
}
-static int pciehp_resume(struct pcie_device *dev)
+static int pciehp_resume_noirq(struct pcie_device *dev)
{
- struct controller *ctrl;
- struct slot *slot;
- u8 status;
+ struct controller *ctrl = get_service_data(dev);
+ struct slot *slot = ctrl->slot;
- ctrl = get_service_data(dev);
+ /* pci_restore_state() just wrote to the Slot Control register */
+ ctrl->cmd_started = jiffies;
+ ctrl->cmd_busy = true;
- /* reinitialize the chipset's event detection logic */
- pcie_reenable_notification(ctrl);
+ /* clear spurious events from rediscovery of inserted card */
+ if (slot->state == ON_STATE || slot->state == BLINKINGOFF_STATE)
+ pcie_clear_hotplug_events(ctrl);
- slot = ctrl->slot;
+ return 0;
+}
+
+static int pciehp_resume(struct pcie_device *dev)
+{
+ struct controller *ctrl = get_service_data(dev);
+
+ pciehp_check_presence(ctrl);
- /* Check if slot is occupied */
- pciehp_get_adapter_status(slot, &status);
- mutex_lock(&slot->hotplug_lock);
- if (status)
- pciehp_enable_slot(slot);
- else
- pciehp_disable_slot(slot);
- mutex_unlock(&slot->hotplug_lock);
return 0;
}
#endif /* PM */
@@ -309,6 +343,7 @@ static struct pcie_port_service_driver hpdriver_portdrv = {
#ifdef CONFIG_PM
.suspend = pciehp_suspend,
+ .resume_noirq = pciehp_resume_noirq,
.resume = pciehp_resume,
#endif /* PM */
};
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index c684faa43387..da7c72372ffc 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -17,28 +17,11 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
-static void interrupt_event_handler(struct work_struct *work);
-
-void pciehp_queue_interrupt_event(struct slot *p_slot, u32 event_type)
-{
- struct event_info *info;
-
- info = kmalloc(sizeof(*info), GFP_ATOMIC);
- if (!info) {
- ctrl_err(p_slot->ctrl, "dropped event %d (ENOMEM)\n", event_type);
- return;
- }
-
- INIT_WORK(&info->work, interrupt_event_handler);
- info->event_type = event_type;
- info->p_slot = p_slot;
- queue_work(p_slot->wq, &info->work);
-}
-
/* The following routines constitute the bulk of the
hotplug controller logic
*/
@@ -119,14 +102,11 @@ err_exit:
* remove_board - Turns off slot and LEDs
* @p_slot: slot where board is being removed
*/
-static int remove_board(struct slot *p_slot)
+static void remove_board(struct slot *p_slot)
{
- int retval;
struct controller *ctrl = p_slot->ctrl;
- retval = pciehp_unconfigure_device(p_slot);
- if (retval)
- return retval;
+ pciehp_unconfigure_device(p_slot);
if (POWER_CTRL(ctrl)) {
pciehp_power_off_slot(p_slot);
@@ -141,86 +121,30 @@ static int remove_board(struct slot *p_slot)
/* turn off Green LED */
pciehp_green_led_off(p_slot);
- return 0;
}
-struct power_work_info {
- struct slot *p_slot;
- struct work_struct work;
- unsigned int req;
-#define DISABLE_REQ 0
-#define ENABLE_REQ 1
-};
-
-/**
- * pciehp_power_thread - handle pushbutton events
- * @work: &struct work_struct describing work to be done
- *
- * Scheduled procedure to handle blocking stuff for the pushbuttons.
- * Handles all pending events and exits.
- */
-static void pciehp_power_thread(struct work_struct *work)
-{
- struct power_work_info *info =
- container_of(work, struct power_work_info, work);
- struct slot *p_slot = info->p_slot;
- int ret;
-
- switch (info->req) {
- case DISABLE_REQ:
- mutex_lock(&p_slot->hotplug_lock);
- pciehp_disable_slot(p_slot);
- mutex_unlock(&p_slot->hotplug_lock);
- mutex_lock(&p_slot->lock);
- p_slot->state = STATIC_STATE;
- mutex_unlock(&p_slot->lock);
- break;
- case ENABLE_REQ:
- mutex_lock(&p_slot->hotplug_lock);
- ret = pciehp_enable_slot(p_slot);
- mutex_unlock(&p_slot->hotplug_lock);
- if (ret)
- pciehp_green_led_off(p_slot);
- mutex_lock(&p_slot->lock);
- p_slot->state = STATIC_STATE;
- mutex_unlock(&p_slot->lock);
- break;
- default:
- break;
- }
-
- kfree(info);
-}
+static int pciehp_enable_slot(struct slot *slot);
+static int pciehp_disable_slot(struct slot *slot);
-static void pciehp_queue_power_work(struct slot *p_slot, int req)
+void pciehp_request(struct controller *ctrl, int action)
{
- struct power_work_info *info;
-
- p_slot->state = (req == ENABLE_REQ) ? POWERON_STATE : POWEROFF_STATE;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- ctrl_err(p_slot->ctrl, "no memory to queue %s request\n",
- (req == ENABLE_REQ) ? "poweron" : "poweroff");
- return;
- }
- info->p_slot = p_slot;
- INIT_WORK(&info->work, pciehp_power_thread);
- info->req = req;
- queue_work(p_slot->wq, &info->work);
+ atomic_or(action, &ctrl->pending_events);
+ if (!pciehp_poll_mode)
+ irq_wake_thread(ctrl->pcie->irq, ctrl);
}
void pciehp_queue_pushbutton_work(struct work_struct *work)
{
struct slot *p_slot = container_of(work, struct slot, work.work);
+ struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
- pciehp_queue_power_work(p_slot, DISABLE_REQ);
+ pciehp_request(ctrl, DISABLE_SLOT);
break;
case BLINKINGON_STATE:
- pciehp_queue_power_work(p_slot, ENABLE_REQ);
+ pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
break;
default:
break;
@@ -228,18 +152,15 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
mutex_unlock(&p_slot->lock);
}
-/*
- * Note: This function must be called with slot->lock held
- */
-static void handle_button_press_event(struct slot *p_slot)
+void pciehp_handle_button_press(struct slot *p_slot)
{
struct controller *ctrl = p_slot->ctrl;
- u8 getstatus;
+ mutex_lock(&p_slot->lock);
switch (p_slot->state) {
- case STATIC_STATE:
- pciehp_get_power_status(p_slot, &getstatus);
- if (getstatus) {
+ case OFF_STATE:
+ case ON_STATE:
+ if (p_slot->state == ON_STATE) {
p_slot->state = BLINKINGOFF_STATE;
ctrl_info(ctrl, "Slot(%s): Powering off due to button press\n",
slot_name(p_slot));
@@ -251,7 +172,7 @@ static void handle_button_press_event(struct slot *p_slot)
/* blink green LED and turn off amber */
pciehp_green_led_blink(p_slot);
pciehp_set_attention_status(p_slot, 0);
- queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
+ schedule_delayed_work(&p_slot->work, 5 * HZ);
break;
case BLINKINGOFF_STATE:
case BLINKINGON_STATE:
@@ -262,118 +183,104 @@ static void handle_button_press_event(struct slot *p_slot)
*/
ctrl_info(ctrl, "Slot(%s): Button cancel\n", slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
- if (p_slot->state == BLINKINGOFF_STATE)
+ if (p_slot->state == BLINKINGOFF_STATE) {
+ p_slot->state = ON_STATE;
pciehp_green_led_on(p_slot);
- else
+ } else {
+ p_slot->state = OFF_STATE;
pciehp_green_led_off(p_slot);
+ }
pciehp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n",
slot_name(p_slot));
- p_slot->state = STATIC_STATE;
- break;
- case POWEROFF_STATE:
- case POWERON_STATE:
- /*
- * Ignore if the slot is on power-on or power-off state;
- * this means that the previous attention button action
- * to hot-add or hot-remove is undergoing
- */
- ctrl_info(ctrl, "Slot(%s): Button ignored\n",
- slot_name(p_slot));
break;
default:
ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
slot_name(p_slot), p_slot->state);
break;
}
+ mutex_unlock(&p_slot->lock);
}
-/*
- * Note: This function must be called with slot->lock held
- */
-static void handle_link_event(struct slot *p_slot, u32 event)
+void pciehp_handle_disable_request(struct slot *slot)
{
- struct controller *ctrl = p_slot->ctrl;
+ struct controller *ctrl = slot->ctrl;
- switch (p_slot->state) {
+ mutex_lock(&slot->lock);
+ switch (slot->state) {
case BLINKINGON_STATE:
case BLINKINGOFF_STATE:
- cancel_delayed_work(&p_slot->work);
- /* Fall through */
- case STATIC_STATE:
- pciehp_queue_power_work(p_slot, event == INT_LINK_UP ?
- ENABLE_REQ : DISABLE_REQ);
- break;
- case POWERON_STATE:
- if (event == INT_LINK_UP) {
- ctrl_info(ctrl, "Slot(%s): Link Up event ignored; already powering on\n",
- slot_name(p_slot));
- } else {
- ctrl_info(ctrl, "Slot(%s): Link Down event queued; currently getting powered on\n",
- slot_name(p_slot));
- pciehp_queue_power_work(p_slot, DISABLE_REQ);
- }
- break;
- case POWEROFF_STATE:
- if (event == INT_LINK_UP) {
- ctrl_info(ctrl, "Slot(%s): Link Up event queued; currently getting powered off\n",
- slot_name(p_slot));
- pciehp_queue_power_work(p_slot, ENABLE_REQ);
- } else {
- ctrl_info(ctrl, "Slot(%s): Link Down event ignored; already powering off\n",
- slot_name(p_slot));
- }
- break;
- default:
- ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
- slot_name(p_slot), p_slot->state);
+ cancel_delayed_work(&slot->work);
break;
}
+ slot->state = POWEROFF_STATE;
+ mutex_unlock(&slot->lock);
+
+ ctrl->request_result = pciehp_disable_slot(slot);
}
-static void interrupt_event_handler(struct work_struct *work)
+void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events)
{
- struct event_info *info = container_of(work, struct event_info, work);
- struct slot *p_slot = info->p_slot;
- struct controller *ctrl = p_slot->ctrl;
-
- mutex_lock(&p_slot->lock);
- switch (info->event_type) {
- case INT_BUTTON_PRESS:
- handle_button_press_event(p_slot);
- break;
- case INT_POWER_FAULT:
- if (!POWER_CTRL(ctrl))
- break;
- pciehp_set_attention_status(p_slot, 1);
- pciehp_green_led_off(p_slot);
- break;
- case INT_PRESENCE_ON:
- pciehp_queue_power_work(p_slot, ENABLE_REQ);
+ struct controller *ctrl = slot->ctrl;
+ bool link_active;
+ u8 present;
+
+ /*
+ * If the slot is on and presence or link has changed, turn it off.
+ * Even if it's occupied again, we cannot assume the card is the same.
+ */
+ mutex_lock(&slot->lock);
+ switch (slot->state) {
+ case BLINKINGOFF_STATE:
+ cancel_delayed_work(&slot->work);
+ /* fall through */
+ case ON_STATE:
+ slot->state = POWEROFF_STATE;
+ mutex_unlock(&slot->lock);
+ if (events & PCI_EXP_SLTSTA_DLLSC)
+ ctrl_info(ctrl, "Slot(%s): Link Down\n",
+ slot_name(slot));
+ if (events & PCI_EXP_SLTSTA_PDC)
+ ctrl_info(ctrl, "Slot(%s): Card not present\n",
+ slot_name(slot));
+ pciehp_disable_slot(slot);
break;
- case INT_PRESENCE_OFF:
- /*
- * Regardless of surprise capability, we need to
- * definitely remove a card that has been pulled out!
- */
- pciehp_queue_power_work(p_slot, DISABLE_REQ);
+ default:
+ mutex_unlock(&slot->lock);
break;
- case INT_LINK_UP:
- case INT_LINK_DOWN:
- handle_link_event(p_slot, info->event_type);
+ }
+
+ /* Turn the slot on if it's occupied or link is up */
+ mutex_lock(&slot->lock);
+ pciehp_get_adapter_status(slot, &present);
+ link_active = pciehp_check_link_active(ctrl);
+ if (!present && !link_active) {
+ mutex_unlock(&slot->lock);
+ return;
+ }
+
+ switch (slot->state) {
+ case BLINKINGON_STATE:
+ cancel_delayed_work(&slot->work);
+ /* fall through */
+ case OFF_STATE:
+ slot->state = POWERON_STATE;
+ mutex_unlock(&slot->lock);
+ if (present)
+ ctrl_info(ctrl, "Slot(%s): Card present\n",
+ slot_name(slot));
+ if (link_active)
+ ctrl_info(ctrl, "Slot(%s): Link Up\n",
+ slot_name(slot));
+ ctrl->request_result = pciehp_enable_slot(slot);
break;
default:
+ mutex_unlock(&slot->lock);
break;
}
- mutex_unlock(&p_slot->lock);
-
- kfree(info);
}
-/*
- * Note: This function must be called with slot->hotplug_lock held
- */
-int pciehp_enable_slot(struct slot *p_slot)
+static int __pciehp_enable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
struct controller *ctrl = p_slot->ctrl;
@@ -404,17 +311,29 @@ int pciehp_enable_slot(struct slot *p_slot)
return board_added(p_slot);
}
-/*
- * Note: This function must be called with slot->hotplug_lock held
- */
-int pciehp_disable_slot(struct slot *p_slot)
+static int pciehp_enable_slot(struct slot *slot)
+{
+ struct controller *ctrl = slot->ctrl;
+ int ret;
+
+ pm_runtime_get_sync(&ctrl->pcie->port->dev);
+ ret = __pciehp_enable_slot(slot);
+ if (ret && ATTN_BUTTN(ctrl))
+ pciehp_green_led_off(slot); /* may be blinking */
+ pm_runtime_put(&ctrl->pcie->port->dev);
+
+ mutex_lock(&slot->lock);
+ slot->state = ret ? OFF_STATE : ON_STATE;
+ mutex_unlock(&slot->lock);
+
+ return ret;
+}
+
+static int __pciehp_disable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
struct controller *ctrl = p_slot->ctrl;
- if (!p_slot->ctrl)
- return 1;
-
if (POWER_CTRL(p_slot->ctrl)) {
pciehp_get_power_status(p_slot, &getstatus);
if (!getstatus) {
@@ -424,32 +343,50 @@ int pciehp_disable_slot(struct slot *p_slot)
}
}
- return remove_board(p_slot);
+ remove_board(p_slot);
+ return 0;
+}
+
+static int pciehp_disable_slot(struct slot *slot)
+{
+ struct controller *ctrl = slot->ctrl;
+ int ret;
+
+ pm_runtime_get_sync(&ctrl->pcie->port->dev);
+ ret = __pciehp_disable_slot(slot);
+ pm_runtime_put(&ctrl->pcie->port->dev);
+
+ mutex_lock(&slot->lock);
+ slot->state = OFF_STATE;
+ mutex_unlock(&slot->lock);
+
+ return ret;
}
int pciehp_sysfs_enable_slot(struct slot *p_slot)
{
- int retval = -ENODEV;
struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGON_STATE:
- cancel_delayed_work(&p_slot->work);
- case STATIC_STATE:
- p_slot->state = POWERON_STATE;
+ case OFF_STATE:
mutex_unlock(&p_slot->lock);
- mutex_lock(&p_slot->hotplug_lock);
- retval = pciehp_enable_slot(p_slot);
- mutex_unlock(&p_slot->hotplug_lock);
- mutex_lock(&p_slot->lock);
- p_slot->state = STATIC_STATE;
- break;
+ /*
+ * The IRQ thread becomes a no-op if the user pulls out the
+ * card before the thread wakes up, so initialize to -ENODEV.
+ */
+ ctrl->request_result = -ENODEV;
+ pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
+ wait_event(ctrl->requester,
+ !atomic_read(&ctrl->pending_events));
+ return ctrl->request_result;
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
slot_name(p_slot));
break;
case BLINKINGOFF_STATE:
+ case ON_STATE:
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already enabled\n",
slot_name(p_slot));
@@ -461,32 +398,28 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
}
mutex_unlock(&p_slot->lock);
- return retval;
+ return -ENODEV;
}
int pciehp_sysfs_disable_slot(struct slot *p_slot)
{
- int retval = -ENODEV;
struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
- cancel_delayed_work(&p_slot->work);
- case STATIC_STATE:
- p_slot->state = POWEROFF_STATE;
+ case ON_STATE:
mutex_unlock(&p_slot->lock);
- mutex_lock(&p_slot->hotplug_lock);
- retval = pciehp_disable_slot(p_slot);
- mutex_unlock(&p_slot->hotplug_lock);
- mutex_lock(&p_slot->lock);
- p_slot->state = STATIC_STATE;
- break;
+ pciehp_request(ctrl, DISABLE_SLOT);
+ wait_event(ctrl->requester,
+ !atomic_read(&ctrl->pending_events));
+ return ctrl->request_result;
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
slot_name(p_slot));
break;
case BLINKINGON_STATE:
+ case OFF_STATE:
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already disabled\n",
slot_name(p_slot));
@@ -498,5 +431,5 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
}
mutex_unlock(&p_slot->lock);
- return retval;
+ return -ENODEV;
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 718b6073afad..7136e3430925 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -17,8 +17,9 @@
#include <linux/types.h>
#include <linux/signal.h>
#include <linux/jiffies.h>
-#include <linux/timer.h>
+#include <linux/kthread.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/slab.h>
@@ -31,47 +32,24 @@ static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
return ctrl->pcie->port;
}
-static irqreturn_t pcie_isr(int irq, void *dev_id);
-static void start_int_poll_timer(struct controller *ctrl, int sec);
-
-/* This is the interrupt polling timeout function. */
-static void int_poll_timeout(struct timer_list *t)
-{
- struct controller *ctrl = from_timer(ctrl, t, poll_timer);
-
- /* Poll for interrupt events. regs == NULL => polling */
- pcie_isr(0, ctrl);
-
- if (!pciehp_poll_time)
- pciehp_poll_time = 2; /* default polling interval is 2 sec */
-
- start_int_poll_timer(ctrl, pciehp_poll_time);
-}
-
-/* This function starts the interrupt polling timer. */
-static void start_int_poll_timer(struct controller *ctrl, int sec)
-{
- /* Clamp to sane value */
- if ((sec <= 0) || (sec > 60))
- sec = 2;
-
- ctrl->poll_timer.expires = jiffies + sec * HZ;
- add_timer(&ctrl->poll_timer);
-}
+static irqreturn_t pciehp_isr(int irq, void *dev_id);
+static irqreturn_t pciehp_ist(int irq, void *dev_id);
+static int pciehp_poll(void *data);
static inline int pciehp_request_irq(struct controller *ctrl)
{
int retval, irq = ctrl->pcie->irq;
- /* Install interrupt polling timer. Start with 10 sec delay */
if (pciehp_poll_mode) {
- timer_setup(&ctrl->poll_timer, int_poll_timeout, 0);
- start_int_poll_timer(ctrl, 10);
- return 0;
+ ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
+ "pciehp_poll-%s",
+ slot_name(ctrl->slot));
+ return PTR_ERR_OR_ZERO(ctrl->poll_thread);
}
/* Installs the interrupt handler */
- retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl);
+ retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
+ IRQF_SHARED, MY_NAME, ctrl);
if (retval)
ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
irq);
@@ -81,7 +59,7 @@ static inline int pciehp_request_irq(struct controller *ctrl)
static inline void pciehp_free_irq(struct controller *ctrl)
{
if (pciehp_poll_mode)
- del_timer_sync(&ctrl->poll_timer);
+ kthread_stop(ctrl->poll_thread);
else
free_irq(ctrl->pcie->irq, ctrl);
}
@@ -293,6 +271,11 @@ int pciehp_check_link_status(struct controller *ctrl)
found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
PCI_DEVFN(0, 0));
+ /* ignore link or presence changes up to this point */
+ if (found)
+ atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
+ &ctrl->pending_events);
+
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
@@ -339,7 +322,9 @@ int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
struct pci_dev *pdev = ctrl_dev(slot->ctrl);
u16 slot_ctrl;
+ pci_config_pm_runtime_get(pdev);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+ pci_config_pm_runtime_put(pdev);
*status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
return 0;
}
@@ -350,7 +335,9 @@ void pciehp_get_attention_status(struct slot *slot, u8 *status)
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
+ pci_config_pm_runtime_get(pdev);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+ pci_config_pm_runtime_put(pdev);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
@@ -425,9 +412,12 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ pci_config_pm_runtime_get(pdev);
pcie_write_cmd_nowait(ctrl, status << 6,
PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
+ pci_config_pm_runtime_put(pdev);
return 0;
}
@@ -539,20 +529,35 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
- struct pci_bus *subordinate = pdev->subordinate;
- struct pci_dev *dev;
- struct slot *slot = ctrl->slot;
+ struct device *parent = pdev->dev.parent;
u16 status, events;
- u8 present;
- bool link;
- /* Interrupts cannot originate from a controller that's asleep */
+ /*
+ * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
+ */
if (pdev->current_state == PCI_D3cold)
return IRQ_NONE;
+ /*
+ * Keep the port accessible by holding a runtime PM ref on its parent.
+ * Defer resume of the parent to the IRQ thread if it's suspended.
+ * Mask the interrupt until then.
+ */
+ if (parent) {
+ pm_runtime_get_noresume(parent);
+ if (!pm_runtime_active(parent)) {
+ pm_runtime_put(parent);
+ disable_irq_nosync(irq);
+ atomic_or(RERUN_ISR, &ctrl->pending_events);
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
if (status == (u16) ~0) {
ctrl_info(ctrl, "%s: no response from device\n", __func__);
+ if (parent)
+ pm_runtime_put(parent);
return IRQ_NONE;
}
@@ -571,86 +576,119 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
if (ctrl->power_fault_detected)
events &= ~PCI_EXP_SLTSTA_PFD;
- if (!events)
+ if (!events) {
+ if (parent)
+ pm_runtime_put(parent);
return IRQ_NONE;
-
- /* Capture link status before clearing interrupts */
- if (events & PCI_EXP_SLTSTA_DLLSC)
- link = pciehp_check_link_active(ctrl);
+ }
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
+ if (parent)
+ pm_runtime_put(parent);
- /* Check Command Complete Interrupt Pending */
+ /*
+ * Command Completed notifications are not deferred to the
+ * IRQ thread because it may be waiting for their arrival.
+ */
if (events & PCI_EXP_SLTSTA_CC) {
ctrl->cmd_busy = 0;
smp_mb();
wake_up(&ctrl->queue);
+
+ if (events == PCI_EXP_SLTSTA_CC)
+ return IRQ_HANDLED;
+
+ events &= ~PCI_EXP_SLTSTA_CC;
+ }
+
+ if (pdev->ignore_hotplug) {
+ ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
+ return IRQ_HANDLED;
}
- if (subordinate) {
- list_for_each_entry(dev, &subordinate->devices, bus_list) {
- if (dev->ignore_hotplug) {
- ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n",
- events, pci_name(dev));
- return IRQ_HANDLED;
- }
+ /* Save pending events for consumption by IRQ thread. */
+ atomic_or(events, &ctrl->pending_events);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t pciehp_ist(int irq, void *dev_id)
+{
+ struct controller *ctrl = (struct controller *)dev_id;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ struct slot *slot = ctrl->slot;
+ irqreturn_t ret;
+ u32 events;
+
+ pci_config_pm_runtime_get(pdev);
+
+ /* rerun pciehp_isr() if the port was inaccessible on interrupt */
+ if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
+ ret = pciehp_isr(irq, dev_id);
+ enable_irq(irq);
+ if (ret != IRQ_WAKE_THREAD) {
+ pci_config_pm_runtime_put(pdev);
+ return ret;
}
}
+ synchronize_hardirq(irq);
+ events = atomic_xchg(&ctrl->pending_events, 0);
+ if (!events) {
+ pci_config_pm_runtime_put(pdev);
+ return IRQ_NONE;
+ }
+
/* Check Attention Button Pressed */
if (events & PCI_EXP_SLTSTA_ABP) {
ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
slot_name(slot));
- pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS);
+ pciehp_handle_button_press(slot);
}
/*
- * Check Link Status Changed at higher precedence than Presence
- * Detect Changed. The PDS value may be set to "card present" from
- * out-of-band detection, which may be in conflict with a Link Down
- * and cause the wrong event to queue.
+ * Disable requests have higher priority than Presence Detect Changed
+ * or Data Link Layer State Changed events.
*/
- if (events & PCI_EXP_SLTSTA_DLLSC) {
- ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
- link ? "Up" : "Down");
- pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
- INT_LINK_DOWN);
- } else if (events & PCI_EXP_SLTSTA_PDC) {
- present = !!(status & PCI_EXP_SLTSTA_PDS);
- ctrl_info(ctrl, "Slot(%s): Card %spresent\n", slot_name(slot),
- present ? "" : "not ");
- pciehp_queue_interrupt_event(slot, present ? INT_PRESENCE_ON :
- INT_PRESENCE_OFF);
- }
+ down_read(&ctrl->reset_lock);
+ if (events & DISABLE_SLOT)
+ pciehp_handle_disable_request(slot);
+ else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
+ pciehp_handle_presence_or_link_change(slot, events);
+ up_read(&ctrl->reset_lock);
/* Check Power Fault Detected */
if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
- pciehp_queue_interrupt_event(slot, INT_POWER_FAULT);
+ pciehp_set_attention_status(slot, 1);
+ pciehp_green_led_off(slot);
}
+ pci_config_pm_runtime_put(pdev);
+ wake_up(&ctrl->requester);
return IRQ_HANDLED;
}
-static irqreturn_t pcie_isr(int irq, void *dev_id)
+static int pciehp_poll(void *data)
{
- irqreturn_t rc, handled = IRQ_NONE;
+ struct controller *ctrl = data;
- /*
- * To guarantee that all interrupt events are serviced, we need to
- * re-inspect Slot Status register after clearing what is presumed
- * to be the last pending interrupt.
- */
- do {
- rc = pciehp_isr(irq, dev_id);
- if (rc == IRQ_HANDLED)
- handled = IRQ_HANDLED;
- } while (rc == IRQ_HANDLED);
+ schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
+
+ while (!kthread_should_stop()) {
+ /* poll for interrupt events or user requests */
+ while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
+ atomic_read(&ctrl->pending_events))
+ pciehp_ist(IRQ_NOTCONNECTED, ctrl);
- /* Return IRQ_HANDLED if we handled one or more events */
- return handled;
+ if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
+ pciehp_poll_time = 2; /* clamp to sane value */
+
+ schedule_timeout_idle(pciehp_poll_time * HZ);
+ }
+
+ return 0;
}
static void pcie_enable_notification(struct controller *ctrl)
@@ -691,17 +729,6 @@ static void pcie_enable_notification(struct controller *ctrl)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
}
-void pcie_reenable_notification(struct controller *ctrl)
-{
- /*
- * Clear both Presence and Data Link Layer Changed to make sure
- * those events still fire after we have re-enabled them.
- */
- pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
- PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
- pcie_enable_notification(ctrl);
-}
-
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;
@@ -715,6 +742,12 @@ static void pcie_disable_notification(struct controller *ctrl)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
}
+void pcie_clear_hotplug_events(struct controller *ctrl)
+{
+ pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+}
+
/*
* pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
* bus reset of the bridge, but at the same time we want to ensure that it is
@@ -728,10 +761,13 @@ int pciehp_reset_slot(struct slot *slot, int probe)
struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 stat_mask = 0, ctrl_mask = 0;
+ int rc;
if (probe)
return 0;
+ down_write(&ctrl->reset_lock);
+
if (!ATTN_BUTTN(ctrl)) {
ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
stat_mask |= PCI_EXP_SLTSTA_PDC;
@@ -742,18 +778,16 @@ int pciehp_reset_slot(struct slot *slot, int probe)
pcie_write_cmd(ctrl, 0, ctrl_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
- if (pciehp_poll_mode)
- del_timer_sync(&ctrl->poll_timer);
- pci_reset_bridge_secondary_bus(ctrl->pcie->port);
+ rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
- if (pciehp_poll_mode)
- int_poll_timeout(&ctrl->poll_timer);
- return 0;
+
+ up_write(&ctrl->reset_lock);
+ return rc;
}
int pcie_init_notification(struct controller *ctrl)
@@ -765,7 +799,7 @@ int pcie_init_notification(struct controller *ctrl)
return 0;
}
-static void pcie_shutdown_notification(struct controller *ctrl)
+void pcie_shutdown_notification(struct controller *ctrl)
{
if (ctrl->notification_enabled) {
pcie_disable_notification(ctrl);
@@ -776,32 +810,29 @@ static void pcie_shutdown_notification(struct controller *ctrl)
static int pcie_init_slot(struct controller *ctrl)
{
+ struct pci_bus *subordinate = ctrl_dev(ctrl)->subordinate;
struct slot *slot;
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
- slot->wq = alloc_ordered_workqueue("pciehp-%u", 0, PSN(ctrl));
- if (!slot->wq)
- goto abort;
+ down_read(&pci_bus_sem);
+ slot->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
+ up_read(&pci_bus_sem);
slot->ctrl = ctrl;
mutex_init(&slot->lock);
- mutex_init(&slot->hotplug_lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
ctrl->slot = slot;
return 0;
-abort:
- kfree(slot);
- return -ENOMEM;
}
static void pcie_cleanup_slot(struct controller *ctrl)
{
struct slot *slot = ctrl->slot;
- cancel_delayed_work(&slot->work);
- destroy_workqueue(slot->wq);
+
+ cancel_delayed_work_sync(&slot->work);
kfree(slot);
}
@@ -826,6 +857,7 @@ struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
u32 slot_cap, link_cap;
+ u8 occupied, poweron;
struct pci_dev *pdev = dev->port;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
@@ -847,6 +879,8 @@ struct controller *pcie_init(struct pcie_device *dev)
ctrl->slot_cap = slot_cap;
mutex_init(&ctrl->ctrl_lock);
+ init_rwsem(&ctrl->reset_lock);
+ init_waitqueue_head(&ctrl->requester);
init_waitqueue_head(&ctrl->queue);
dbg_ctrl(ctrl);
@@ -855,16 +889,11 @@ struct controller *pcie_init(struct pcie_device *dev)
if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
ctrl->link_active_reporting = 1;
- /*
- * Clear all remaining event bits in Slot Status register except
- * Presence Detect Changed. We want to make sure possible
- * hotplug event is triggered when the interrupt is unmasked so
- * that we don't lose that event.
- */
+ /* Clear all remaining event bits in Slot Status register. */
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
- PCI_EXP_SLTSTA_DLLSC);
+ PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
@@ -883,6 +912,19 @@ struct controller *pcie_init(struct pcie_device *dev)
if (pcie_init_slot(ctrl))
goto abort_ctrl;
+ /*
+ * If empty slot's power status is on, turn power off. The IRQ isn't
+ * requested yet, so avoid triggering a notification with this command.
+ */
+ if (POWER_CTRL(ctrl)) {
+ pciehp_get_adapter_status(ctrl->slot, &occupied);
+ pciehp_get_power_status(ctrl->slot, &poweron);
+ if (!occupied && poweron) {
+ pcie_disable_notification(ctrl);
+ pciehp_power_off_slot(ctrl->slot);
+ }
+ }
+
return ctrl;
abort_ctrl:
@@ -893,7 +935,6 @@ abort:
void pciehp_release_ctrl(struct controller *ctrl)
{
- pcie_shutdown_notification(ctrl);
pcie_cleanup_slot(ctrl);
kfree(ctrl);
}
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 3f518dea856d..5c58c22e0c08 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -62,9 +62,8 @@ int pciehp_configure_device(struct slot *p_slot)
return ret;
}
-int pciehp_unconfigure_device(struct slot *p_slot)
+void pciehp_unconfigure_device(struct slot *p_slot)
{
- int rc = 0;
u8 presence = 0;
struct pci_dev *dev, *temp;
struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
@@ -107,5 +106,4 @@ int pciehp_unconfigure_device(struct slot *p_slot)
}
pci_unlock_rescan_remove();
- return rc;
}
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
deleted file mode 100644
index c19694a04d2c..000000000000
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ /dev/null
@@ -1,348 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * PCI Hot Plug Controller Skeleton Driver - 0.3
- *
- * Copyright (C) 2001,2003 Greg Kroah-Hartman (greg@kroah.com)
- * Copyright (C) 2001,2003 IBM Corp.
- *
- * All rights reserved.
- *
- * This driver is to be used as a skeleton driver to show how to interface
- * with the pci hotplug core easily.
- *
- * Send feedback to <greg@kroah.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/pci_hotplug.h>
-#include <linux/init.h>
-
-#define SLOT_NAME_SIZE 10
-struct slot {
- u8 number;
- struct hotplug_slot *hotplug_slot;
- struct list_head slot_list;
- char name[SLOT_NAME_SIZE];
-};
-
-static LIST_HEAD(slot_list);
-
-#define MY_NAME "pcihp_skeleton"
-
-#define dbg(format, arg...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG "%s: " format "\n", \
- MY_NAME, ## arg); \
- } while (0)
-#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME, ## arg)
-#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME, ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME, ## arg)
-
-/* local variables */
-static bool debug;
-static int num_slots;
-
-#define DRIVER_VERSION "0.3"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
-#define DRIVER_DESC "Hot Plug PCI Controller Skeleton Driver"
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-module_param(debug, bool, 0644);
-MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-
-static int enable_slot(struct hotplug_slot *slot);
-static int disable_slot(struct hotplug_slot *slot);
-static int set_attention_status(struct hotplug_slot *slot, u8 value);
-static int hardware_test(struct hotplug_slot *slot, u32 value);
-static int get_power_status(struct hotplug_slot *slot, u8 *value);
-static int get_attention_status(struct hotplug_slot *slot, u8 *value);
-static int get_latch_status(struct hotplug_slot *slot, u8 *value);
-static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
-
-static struct hotplug_slot_ops skel_hotplug_slot_ops = {
- .enable_slot = enable_slot,
- .disable_slot = disable_slot,
- .set_attention_status = set_attention_status,
- .hardware_test = hardware_test,
- .get_power_status = get_power_status,
- .get_attention_status = get_attention_status,
- .get_latch_status = get_latch_status,
- .get_adapter_status = get_adapter_status,
-};
-
-static int enable_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- /*
- * Fill in code here to enable the specified slot
- */
-
- return retval;
-}
-
-static int disable_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- /*
- * Fill in code here to disable the specified slot
- */
-
- return retval;
-}
-
-static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- switch (status) {
- case 0:
- /*
- * Fill in code here to turn light off
- */
- break;
-
- case 1:
- default:
- /*
- * Fill in code here to turn light on
- */
- break;
- }
-
- return retval;
-}
-
-static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- switch (value) {
- case 0:
- /* Specify a test here */
- break;
- case 1:
- /* Specify another test here */
- break;
- }
-
- return retval;
-}
-
-static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- /*
- * Fill in logic to get the current power status of the specific
- * slot and store it in the *value location.
- */
-
- return retval;
-}
-
-static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- /*
- * Fill in logic to get the current attention status of the specific
- * slot and store it in the *value location.
- */
-
- return retval;
-}
-
-static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- /*
- * Fill in logic to get the current latch status of the specific
- * slot and store it in the *value location.
- */
-
- return retval;
-}
-
-static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- /*
- * Fill in logic to get the current adapter status of the specific
- * slot and store it in the *value location.
- */
-
- return retval;
-}
-
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
- kfree(slot);
-}
-
-static void make_slot_name(struct slot *slot)
-{
- /*
- * Stupid way to make a filename out of the slot name.
- * replace this if your hardware provides a better way to name slots.
- */
- snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d", slot->number);
-}
-
-/**
- * init_slots - initialize 'struct slot' structures for each slot
- *
- */
-static int __init init_slots(void)
-{
- struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
- int retval;
- int i;
-
- /*
- * Create a structure for each slot, and register that slot
- * with the pci_hotplug subsystem.
- */
- for (i = 0; i < num_slots; ++i) {
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot) {
- retval = -ENOMEM;
- goto error;
- }
-
- hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot) {
- retval = -ENOMEM;
- goto error_slot;
- }
- slot->hotplug_slot = hotplug_slot;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- retval = -ENOMEM;
- goto error_hpslot;
- }
- hotplug_slot->info = info;
-
- slot->number = i;
-
- hotplug_slot->name = slot->name;
- hotplug_slot->private = slot;
- hotplug_slot->release = &release_slot;
- make_slot_name(slot);
- hotplug_slot->ops = &skel_hotplug_slot_ops;
-
- /*
- * Initialize the slot info structure with some known
- * good values.
- */
- get_power_status(hotplug_slot, &info->power_status);
- get_attention_status(hotplug_slot, &info->attention_status);
- get_latch_status(hotplug_slot, &info->latch_status);
- get_adapter_status(hotplug_slot, &info->adapter_status);
-
- dbg("registering slot %d\n", i);
- retval = pci_hp_register(slot->hotplug_slot);
- if (retval) {
- err("pci_hp_register failed with error %d\n", retval);
- goto error_info;
- }
-
- /* add slot to our internal list */
- list_add(&slot->slot_list, &slot_list);
- }
-
- return 0;
-error_info:
- kfree(info);
-error_hpslot:
- kfree(hotplug_slot);
-error_slot:
- kfree(slot);
-error:
- return retval;
-}
-
-static void __exit cleanup_slots(void)
-{
- struct slot *slot, *next;
-
- /*
- * Unregister all of our slots with the pci_hotplug subsystem.
- * Memory will be freed in release_slot() callback after slot's
- * lifespan is finished.
- */
- list_for_each_entry_safe(slot, next, &slot_list, slot_list) {
- list_del(&slot->slot_list);
- pci_hp_deregister(slot->hotplug_slot);
- }
-}
-
-static int __init pcihp_skel_init(void)
-{
- int retval;
-
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
- /*
- * Do specific initialization stuff for your driver here
- * like initializing your controller hardware (if any) and
- * determining the number of slots you have in the system
- * right now.
- */
- num_slots = 5;
-
- return init_slots();
-}
-
-static void __exit pcihp_skel_exit(void)
-{
- /*
- * Clean everything up.
- */
- cleanup_slots();
-}
-
-module_init(pcihp_skel_init);
-module_exit(pcihp_skel_exit);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 6c2e8d7307c6..3276a5e4c430 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -538,9 +538,8 @@ static struct hotplug_slot_ops php_slot_ops = {
.disable_slot = pnv_php_disable_slot,
};
-static void pnv_php_release(struct hotplug_slot *slot)
+static void pnv_php_release(struct pnv_php_slot *php_slot)
{
- struct pnv_php_slot *php_slot = slot->private;
unsigned long flags;
/* Remove from global or child list */
@@ -596,7 +595,6 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
php_slot->power_state_check = false;
php_slot->slot.ops = &php_slot_ops;
php_slot->slot.info = &php_slot->slot_info;
- php_slot->slot.release = pnv_php_release;
php_slot->slot.private = php_slot;
INIT_LIST_HEAD(&php_slot->children);
@@ -924,6 +922,7 @@ static void pnv_php_unregister_one(struct device_node *dn)
php_slot->state = PNV_PHP_STATE_OFFLINE;
pci_hp_deregister(&php_slot->slot);
+ pnv_php_release(php_slot);
pnv_php_put_slot(php_slot);
}
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index fb5e0845429d..857c358b727b 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -404,13 +404,13 @@ static void __exit cleanup_slots(void)
/*
* Unregister all of our slots with the pci_hotplug subsystem,
* and free up all memory that we had allocated.
- * memory will be freed in release_slot callback.
*/
list_for_each_entry_safe(slot, next, &rpaphp_slot_head,
rpaphp_slot_list) {
list_del(&slot->rpaphp_slot_list);
pci_hp_deregister(slot->hotplug_slot);
+ dealloc_slot_struct(slot);
}
return;
}
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 3840a2075e6a..b916c8e4372d 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -19,12 +19,6 @@
#include "rpaphp.h"
/* free up the memory used by a slot */
-static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = (struct slot *) hotplug_slot->private;
- dealloc_slot_struct(slot);
-}
-
void dealloc_slot_struct(struct slot *slot)
{
kfree(slot->hotplug_slot->info);
@@ -56,7 +50,6 @@ struct slot *alloc_slot_struct(struct device_node *dn,
slot->power_domain = power_domain;
slot->hotplug_slot->private = slot;
slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops;
- slot->hotplug_slot->release = &rpaphp_release_slot;
return (slot);
@@ -90,10 +83,8 @@ int rpaphp_deregister_slot(struct slot *slot)
__func__, slot->name);
list_del(&slot->rpaphp_slot_list);
-
- retval = pci_hp_deregister(php_slot);
- if (retval)
- err("Problem unregistering a slot %s\n", slot->name);
+ pci_hp_deregister(php_slot);
+ dealloc_slot_struct(slot);
dbg("%s - Exit: rc[%d]\n", __func__, retval);
return retval;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index ffdc2977395d..93b5341d282c 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -130,15 +130,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
- kfree(slot);
-}
-
static struct hotplug_slot_ops s390_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
@@ -175,7 +166,6 @@ int zpci_init_slot(struct zpci_dev *zdev)
hotplug_slot->info = info;
hotplug_slot->ops = &s390_hotplug_slot_ops;
- hotplug_slot->release = &release_slot;
get_power_status(hotplug_slot, &info->power_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
@@ -209,5 +199,8 @@ void zpci_exit_slot(struct zpci_dev *zdev)
continue;
list_del(&slot->slot_list);
pci_hp_deregister(slot->hotplug_slot);
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
}
}
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 78b6bdbb3a39..babd23409f61 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -628,7 +628,6 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
goto alloc_err;
}
bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
- bss_hotplug_slot->release = &sn_release_slot;
rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name);
if (rc)
@@ -656,8 +655,10 @@ alloc_err:
sn_release_slot(bss_hotplug_slot);
/* destroy anything else on the list */
- while ((bss_hotplug_slot = sn_hp_destroy()))
+ while ((bss_hotplug_slot = sn_hp_destroy())) {
pci_hp_deregister(bss_hotplug_slot);
+ sn_release_slot(bss_hotplug_slot);
+ }
return rc;
}
@@ -703,8 +704,10 @@ static void __exit sn_pci_hotplug_exit(void)
{
struct hotplug_slot *bss_hotplug_slot;
- while ((bss_hotplug_slot = sn_hp_destroy()))
+ while ((bss_hotplug_slot = sn_hp_destroy())) {
pci_hp_deregister(bss_hotplug_slot);
+ sn_release_slot(bss_hotplug_slot);
+ }
if (!list_empty(&sn_hp_list))
printk(KERN_ERR "%s: internal list is not empty\n", __FILE__);
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index e91be287f292..97cee23f3d51 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -61,22 +61,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
.get_adapter_status = get_adapter_status,
};
-/**
- * release_slot - free up the memory used by a slot
- * @hotplug_slot: slot to free
- */
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
- kfree(slot);
-}
-
static int init_slots(struct controller *ctrl)
{
struct slot *slot;
@@ -125,7 +109,6 @@ static int init_slots(struct controller *ctrl)
/* register this slot with the hotplug pci core */
hotplug_slot->private = slot;
- hotplug_slot->release = &release_slot;
snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
hotplug_slot->ops = &shpchp_hotplug_slot_ops;
@@ -171,6 +154,9 @@ void cleanup_slots(struct controller *ctrl)
cancel_delayed_work(&slot->work);
destroy_workqueue(slot->wq);
pci_hp_deregister(slot->hotplug_slot);
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
}
}
@@ -270,11 +256,30 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
+static bool shpc_capable(struct pci_dev *bridge)
+{
+ /*
+ * It is assumed that AMD GOLAM chips support SHPC but they do not
+ * have SHPC capability.
+ */
+ if (bridge->vendor == PCI_VENDOR_ID_AMD &&
+ bridge->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
+ return true;
+
+ if (pci_find_capability(bridge, PCI_CAP_ID_SHPC))
+ return true;
+
+ return false;
+}
+
static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
struct controller *ctrl;
+ if (!shpc_capable(pdev))
+ return -ENODEV;
+
if (acpi_get_hp_hw_control_from_firmware(pdev))
return -ENODEV;
@@ -303,6 +308,7 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_cleanup_slots;
+ pdev->shpc_managed = 1;
return 0;
err_cleanup_slots:
@@ -319,6 +325,7 @@ static void shpc_remove(struct pci_dev *dev)
{
struct controller *ctrl = pci_get_drvdata(dev);
+ dev->shpc_managed = 0;
shpchp_remove_ctrl_files(ctrl);
ctrl->hpc_ops->release_ctlr(ctrl);
kfree(ctrl);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 1047b56e5730..1267dcc5a531 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -654,6 +654,7 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot)
switch (p_slot->state) {
case BLINKINGON_STATE:
cancel_delayed_work(&p_slot->work);
+ /* fall through */
case STATIC_STATE:
p_slot->state = POWERON_STATE;
mutex_unlock(&p_slot->lock);
@@ -689,6 +690,7 @@ int shpchp_sysfs_disable_slot(struct slot *p_slot)
switch (p_slot->state) {
case BLINKINGOFF_STATE:
cancel_delayed_work(&p_slot->work);
+ /* fall through */
case STATIC_STATE:
p_slot->state = POWEROFF_STATE;
mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index d0d73dbbd5ca..c5f3cd4ed766 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -575,6 +575,22 @@ void pci_iov_release(struct pci_dev *dev)
}
/**
+ * pci_iov_remove - clean up SR-IOV state after PF driver is detached
+ * @dev: the PCI device
+ */
+void pci_iov_remove(struct pci_dev *dev)
+{
+ struct pci_sriov *iov = dev->sriov;
+
+ if (!dev->is_physfn)
+ return;
+
+ iov->driver_max_VFs = iov->total_VFs;
+ if (iov->num_VFs)
+ pci_warn(dev, "driver left SR-IOV enabled after remove\n");
+}
+
+/**
* pci_iov_update_resource - update a VF BAR
* @dev: the PCI device
* @resno: the resource number
@@ -802,15 +818,15 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{
if (!dev->is_physfn)
return -ENOSYS;
+
if (numvfs > dev->sriov->total_VFs)
return -EINVAL;
/* Shouldn't change if VFs already enabled */
if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
return -EBUSY;
- else
- dev->sriov->driver_max_VFs = numvfs;
+ dev->sriov->driver_max_VFs = numvfs;
return 0;
}
EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index 2a808e10645f..a1de501a2729 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -86,13 +86,17 @@ int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler,
va_list ap;
int ret;
char *devname;
+ unsigned long irqflags = IRQF_SHARED;
+
+ if (!handler)
+ irqflags |= IRQF_ONESHOT;
va_start(ap, fmt);
devname = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn,
- IRQF_SHARED, devname, dev_id);
+ irqflags, devname, dev_id);
if (ret)
kfree(devname);
return ret;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 4d88afdfc843..f2ef896464b3 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1446,6 +1446,9 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
info->flags |= MSI_FLAG_MUST_REACTIVATE;
+ /* PCI-MSI is oneshot-safe */
+ info->chip->flags |= IRQCHIP_ONESHOT_SAFE;
+
domain = msi_create_irq_domain(fwnode, info, parent);
if (!domain)
return NULL;
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index d088c9147f10..1836b8ddf292 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -266,7 +266,7 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
struct list_head *resources, resource_size_t *io_base)
{
struct device_node *dev_node = dev->of_node;
- struct resource *res;
+ struct resource *res, tmp_res;
struct resource *bus_range;
struct of_pci_range range;
struct of_pci_range_parser parser;
@@ -320,18 +320,16 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
continue;
- res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
+ err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
+ if (err)
+ continue;
+
+ res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
if (!res) {
err = -ENOMEM;
goto failed;
}
- err = of_pci_range_to_resource(&range, dev_node, res);
- if (err) {
- devm_kfree(dev, res);
- continue;
- }
-
if (resource_type(res) == IORESOURCE_IO) {
if (!io_base) {
dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
@@ -612,7 +610,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
switch (resource_type(res)) {
case IORESOURCE_IO:
- err = pci_remap_iospace(res, iobase);
+ err = devm_pci_remap_iospace(dev, res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 65113b6eed14..c2ab57705043 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -403,24 +403,7 @@ bool pciehp_is_native(struct pci_dev *bridge)
*/
bool shpchp_is_native(struct pci_dev *bridge)
{
- const struct pci_host_bridge *host;
-
- if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
- return false;
-
- /*
- * It is assumed that AMD GOLAM chips support SHPC but they do not
- * have SHPC capability.
- */
- if (bridge->vendor == PCI_VENDOR_ID_AMD &&
- bridge->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
- return true;
-
- if (!pci_find_capability(bridge, PCI_CAP_ID_SHPC))
- return false;
-
- host = pci_find_host_bridge(bridge->bus);
- return host->native_shpc_hotplug;
+ return bridge->shpc_managed;
}
/**
@@ -629,6 +612,16 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ /*
+ * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
+ * system-wide suspend/resume confuses the platform firmware, so avoid
+ * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint
+ * devices are expected to be in D3 before invoking the S3 entry path
+ * from the firmware, so they should not be affected by this issue.
+ */
+ if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
+ return true;
+
if (!adev || !acpi_device_power_manageable(adev))
return false;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index c125d53033c6..bef17c3fca67 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -445,6 +445,7 @@ static int pci_device_remove(struct device *dev)
}
pcibios_free_irq(pci_dev);
pci_dev->driver = NULL;
+ pci_iov_remove(pci_dev);
}
/* Undo the runtime PM settings in local_pci_probe() */
@@ -1667,7 +1668,7 @@ static int __init pci_driver_init(void)
if (ret)
return ret;
#endif
-
+ dma_debug_add_bus(&pci_bus_type);
return 0;
}
postcore_initcall(pci_driver_init);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 0c4653c1d2ce..9ecfe13157c0 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -23,7 +23,6 @@
#include <linux/fs.h>
#include <linux/capability.h>
#include <linux/security.h>
-#include <linux/pci-aspm.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <linux/pm_runtime.h>
@@ -1449,7 +1448,9 @@ static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
if (val != 1)
return -EINVAL;
+ pm_runtime_get_sync(dev);
result = pci_reset_function(pdev);
+ pm_runtime_put(dev);
if (result < 0)
return result;
@@ -1746,6 +1747,9 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
#endif
&pci_bridge_attr_group,
&pcie_dev_attr_group,
+#ifdef CONFIG_PCIEAER
+ &aer_stats_attr_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 97acba712e4e..29ff9619b5fa 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -23,7 +23,6 @@
#include <linux/string.h>
#include <linux/log2.h>
#include <linux/logic_pio.h>
-#include <linux/pci-aspm.h>
#include <linux/pm_wakeup.h>
#include <linux/interrupt.h>
#include <linux/device.h>
@@ -115,6 +114,9 @@ static bool pcie_ari_disabled;
/* If set, the PCIe ATS capability will not be used. */
static bool pcie_ats_disabled;
+/* If set, the PCI config space of each device is printed during boot. */
+bool pci_early_dump;
+
bool pci_ats_disabled(void)
{
return pcie_ats_disabled;
@@ -191,6 +193,168 @@ void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
#endif
+/**
+ * pci_dev_str_match_path - test if a path string matches a device
+ * @dev: the PCI device to test
+ * @p: string to match the device against
+ * @endptr: pointer to the string after the match
+ *
+ * Test if a string (typically from a kernel parameter) formatted as a
+ * path of device/function addresses matches a PCI device. The string must
+ * be of the form:
+ *
+ * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
+ *
+ * A path for a device can be obtained using 'lspci -t'. Using a path
+ * is more robust against bus renumbering than using only a single bus,
+ * device and function address.
+ *
+ * Returns 1 if the string matches the device, 0 if it does not and
+ * a negative error code if it fails to parse the string.
+ */
+static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
+ const char **endptr)
+{
+ int ret;
+ int seg, bus, slot, func;
+ char *wpath, *p;
+ char end;
+
+ *endptr = strchrnul(path, ';');
+
+ wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
+ if (!wpath)
+ return -ENOMEM;
+
+ while (1) {
+ p = strrchr(wpath, '/');
+ if (!p)
+ break;
+ ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
+ if (ret != 2) {
+ ret = -EINVAL;
+ goto free_and_exit;
+ }
+
+ if (dev->devfn != PCI_DEVFN(slot, func)) {
+ ret = 0;
+ goto free_and_exit;
+ }
+
+ /*
+ * Note: we don't need to get a reference to the upstream
+ * bridge because we hold a reference to the top level
+ * device which should hold a reference to the bridge,
+ * and so on.
+ */
+ dev = pci_upstream_bridge(dev);
+ if (!dev) {
+ ret = 0;
+ goto free_and_exit;
+ }
+
+ *p = 0;
+ }
+
+ ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
+ &func, &end);
+ if (ret != 4) {
+ seg = 0;
+ ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
+ if (ret != 3) {
+ ret = -EINVAL;
+ goto free_and_exit;
+ }
+ }
+
+ ret = (seg == pci_domain_nr(dev->bus) &&
+ bus == dev->bus->number &&
+ dev->devfn == PCI_DEVFN(slot, func));
+
+free_and_exit:
+ kfree(wpath);
+ return ret;
+}
+
+/**
+ * pci_dev_str_match - test if a string matches a device
+ * @dev: the PCI device to test
+ * @p: string to match the device against
+ * @endptr: pointer to the string after the match
+ *
+ * Test if a string (typically from a kernel parameter) matches a specified
+ * PCI device. The string may be of one of the following formats:
+ *
+ * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
+ * pci:<vendor>:<device>[:<subvendor>:<subdevice>]
+ *
+ * The first format specifies a PCI bus/device/function address which
+ * may change if new hardware is inserted, if motherboard firmware changes,
+ * or due to changes caused in kernel parameters. If the domain is
+ * left unspecified, it is taken to be 0. In order to be robust against
+ * bus renumbering issues, a path of PCI device/function numbers may be used
+ * to address the specific device. The path for a device can be determined
+ * through the use of 'lspci -t'.
+ *
+ * The second format matches devices using IDs in the configuration
+ * space which may match multiple devices in the system. A value of 0
+ * for any field will match all devices. (Note: this differs from
+ * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
+ * legacy reasons and convenience so users don't have to specify
+ * FFFFFFFFs on the command line.)
+ *
+ * Returns 1 if the string matches the device, 0 if it does not and
+ * a negative error code if the string cannot be parsed.
+ */
+static int pci_dev_str_match(struct pci_dev *dev, const char *p,
+ const char **endptr)
+{
+ int ret;
+ int count;
+ unsigned short vendor, device, subsystem_vendor, subsystem_device;
+
+ if (strncmp(p, "pci:", 4) == 0) {
+ /* PCI vendor/device (subvendor/subdevice) IDs are specified */
+ p += 4;
+ ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
+ &subsystem_vendor, &subsystem_device, &count);
+ if (ret != 4) {
+ ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
+ if (ret != 2)
+ return -EINVAL;
+
+ subsystem_vendor = 0;
+ subsystem_device = 0;
+ }
+
+ p += count;
+
+ if ((!vendor || vendor == dev->vendor) &&
+ (!device || device == dev->device) &&
+ (!subsystem_vendor ||
+ subsystem_vendor == dev->subsystem_vendor) &&
+ (!subsystem_device ||
+ subsystem_device == dev->subsystem_device))
+ goto found;
+ } else {
+ /*
+ * PCI Bus, Device, Function IDs are specified
+ * (optionally, may include a path of devfns following it)
+ */
+ ret = pci_dev_str_match_path(dev, p, &p);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ goto found;
+ }
+
+ *endptr = p;
+ return 0;
+
+found:
+ *endptr = p;
+ return 1;
+}
static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
u8 pos, int cap, int *ttl)
@@ -1171,6 +1335,33 @@ static void pci_restore_config_space(struct pci_dev *pdev)
}
}
+static void pci_restore_rebar_state(struct pci_dev *pdev)
+{
+ unsigned int pos, nbars, i;
+ u32 ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ if (!pos)
+ return;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ for (i = 0; i < nbars; i++, pos += 8) {
+ struct resource *res;
+ int bar_idx, size;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
+ res = pdev->resource + bar_idx;
+ size = order_base_2((resource_size(res) >> 20) | 1) - 1;
+ ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
+ ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
+ pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
+ }
+}
+
/**
* pci_restore_state - Restore the saved state of a PCI device
* @dev: - PCI device that we're dealing with
@@ -1186,6 +1377,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_pri_state(dev);
pci_restore_ats_state(dev);
pci_restore_vc_state(dev);
+ pci_restore_rebar_state(dev);
pci_cleanup_aer_error_status_regs(dev);
@@ -2045,6 +2237,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
case PCI_D2:
if (pci_no_d1d2(dev))
break;
+ /* else: fall through */
default:
target_state = state;
}
@@ -2290,7 +2483,7 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
* @bridge: Bridge to check
*
* This function checks if it is possible to move the bridge to D3.
- * Currently we only allow D3 for recent enough PCIe ports.
+ * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
*/
bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
@@ -2305,18 +2498,27 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
return false;
/*
- * Hotplug interrupts cannot be delivered if the link is down,
- * so parents of a hotplug port must stay awake. In addition,
- * hotplug ports handled by firmware in System Management Mode
+ * Hotplug ports handled by firmware in System Management Mode
* may not be put into D3 by the OS (Thunderbolt on non-Macs).
- * For simplicity, disallow in general for now.
*/
- if (bridge->is_hotplug_bridge)
+ if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
return false;
if (pci_bridge_d3_force)
return true;
+ /* Even the oldest 2010 Thunderbolt controller supports D3. */
+ if (bridge->is_thunderbolt)
+ return true;
+
+ /*
+ * Hotplug ports handled natively by the OS were not validated
+ * by vendors for runtime D3 at least until 2018 because there
+ * was no OS support.
+ */
+ if (bridge->is_hotplug_bridge)
+ return false;
+
/*
* It should be safe to put PCIe ports from 2015 or newer
* to D3.
@@ -2820,6 +3022,66 @@ void pci_request_acs(void)
pci_acs_enable = 1;
}
+static const char *disable_acs_redir_param;
+
+/**
+ * pci_disable_acs_redir - disable ACS redirect capabilities
+ * @dev: the PCI device
+ *
+ * For only devices specified in the disable_acs_redir parameter.
+ */
+static void pci_disable_acs_redir(struct pci_dev *dev)
+{
+ int ret = 0;
+ const char *p;
+ int pos;
+ u16 ctrl;
+
+ if (!disable_acs_redir_param)
+ return;
+
+ p = disable_acs_redir_param;
+ while (*p) {
+ ret = pci_dev_str_match(dev, p, &p);
+ if (ret < 0) {
+ pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
+ disable_acs_redir_param);
+
+ break;
+ } else if (ret == 1) {
+ /* Found a match */
+ break;
+ }
+
+ if (*p != ';' && *p != ',') {
+ /* End of param or invalid format */
+ break;
+ }
+ p++;
+ }
+
+ if (ret != 1)
+ return;
+
+ if (!pci_dev_specific_disable_acs_redir(dev))
+ return;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos) {
+ pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
+ return;
+ }
+
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+
+ /* P2P Request & Completion Redirect */
+ ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
+
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+
+ pci_info(dev, "disabled ACS redirect\n");
+}
+
/**
* pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
* @dev: the PCI device
@@ -2859,12 +3121,22 @@ static void pci_std_enable_acs(struct pci_dev *dev)
void pci_enable_acs(struct pci_dev *dev)
{
if (!pci_acs_enable)
- return;
+ goto disable_acs_redir;
if (!pci_dev_specific_enable_acs(dev))
- return;
+ goto disable_acs_redir;
pci_std_enable_acs(dev);
+
+disable_acs_redir:
+ /*
+ * Note: pci_disable_acs_redir() must be called even if ACS was not
+ * enabled by the kernel because it may have been enabled by
+ * platform firmware. So if we are told to disable it, we should
+ * always disable it after setting the kernel's default
+ * preferences.
+ */
+ pci_disable_acs_redir(dev);
}
static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
@@ -3070,7 +3342,7 @@ int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
return pos;
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
- return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> 8;
+ return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
}
/**
@@ -3093,7 +3365,7 @@ int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
- ctrl |= size << 8;
+ ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
return 0;
}
@@ -3579,6 +3851,44 @@ void pci_unmap_iospace(struct resource *res)
}
EXPORT_SYMBOL(pci_unmap_iospace);
+static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
+{
+ struct resource **res = ptr;
+
+ pci_unmap_iospace(*res);
+}
+
+/**
+ * devm_pci_remap_iospace - Managed pci_remap_iospace()
+ * @dev: Generic device to remap IO address for
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
+ *
+ * Managed pci_remap_iospace(). Map is automatically unmapped on driver
+ * detach.
+ */
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+ phys_addr_t phys_addr)
+{
+ const struct resource **ptr;
+ int error;
+
+ ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ error = pci_remap_iospace(res, phys_addr);
+ if (error) {
+ devres_free(ptr);
+ } else {
+ *ptr = res;
+ devres_add(dev, ptr);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL(devm_pci_remap_iospace);
+
/**
* devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
* @dev: Generic device to remap IO address for
@@ -4039,7 +4349,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
* Returns true if the device advertises support for PCIe function level
* resets.
*/
-static bool pcie_has_flr(struct pci_dev *dev)
+bool pcie_has_flr(struct pci_dev *dev)
{
u32 cap;
@@ -4049,6 +4359,7 @@ static bool pcie_has_flr(struct pci_dev *dev)
pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
return cap & PCI_EXP_DEVCAP_FLR;
}
+EXPORT_SYMBOL_GPL(pcie_has_flr);
/**
* pcie_flr - initiate a PCIe function level reset
@@ -4224,19 +4535,18 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
}
/**
- * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
+ * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
* @dev: Bridge device
*
* Use the bridge control register to assert reset on the secondary bus.
* Devices on the secondary bus are left in power-on state.
*/
-int pci_reset_bridge_secondary_bus(struct pci_dev *dev)
+int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
pcibios_reset_secondary_bus(dev);
return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
}
-EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
{
@@ -4253,9 +4563,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
if (probe)
return 0;
- pci_reset_bridge_secondary_bus(dev->bus->self);
-
- return 0;
+ return pci_bridge_secondary_bus_reset(dev->bus->self);
}
static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
@@ -4787,7 +5095,7 @@ int pci_probe_reset_slot(struct pci_slot *slot)
EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
/**
- * pci_reset_slot - reset a PCI slot
+ * __pci_reset_slot - Try to reset a PCI slot
* @slot: PCI slot to reset
*
* A PCI bus may host multiple slots, each slot may support a reset mechanism
@@ -4799,33 +5107,9 @@ EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
* through this function. PCI config space of all devices in the slot and
* behind the slot is saved before and restored after reset.
*
- * Return 0 on success, non-zero on error.
- */
-int pci_reset_slot(struct pci_slot *slot)
-{
- int rc;
-
- rc = pci_slot_reset(slot, 1);
- if (rc)
- return rc;
-
- pci_slot_save_and_disable(slot);
-
- rc = pci_slot_reset(slot, 0);
-
- pci_slot_restore(slot);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(pci_reset_slot);
-
-/**
- * pci_try_reset_slot - Try to reset a PCI slot
- * @slot: PCI slot to reset
- *
* Same as above except return -EAGAIN if the slot cannot be locked
*/
-int pci_try_reset_slot(struct pci_slot *slot)
+static int __pci_reset_slot(struct pci_slot *slot)
{
int rc;
@@ -4846,10 +5130,11 @@ int pci_try_reset_slot(struct pci_slot *slot)
return rc;
}
-EXPORT_SYMBOL_GPL(pci_try_reset_slot);
static int pci_bus_reset(struct pci_bus *bus, int probe)
{
+ int ret;
+
if (!bus->self || !pci_bus_resetable(bus))
return -ENOTTY;
@@ -4860,11 +5145,11 @@ static int pci_bus_reset(struct pci_bus *bus, int probe)
might_sleep();
- pci_reset_bridge_secondary_bus(bus->self);
+ ret = pci_bridge_secondary_bus_reset(bus->self);
pci_bus_unlock(bus);
- return 0;
+ return ret;
}
/**
@@ -4880,15 +5165,12 @@ int pci_probe_reset_bus(struct pci_bus *bus)
EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
/**
- * pci_reset_bus - reset a PCI bus
+ * __pci_reset_bus - Try to reset a PCI bus
* @bus: top level PCI bus to reset
*
- * Do a bus reset on the given bus and any subordinate buses, saving
- * and restoring state of all devices.
- *
- * Return 0 on success, non-zero on error.
+ * Same as above except return -EAGAIN if the bus cannot be locked
*/
-int pci_reset_bus(struct pci_bus *bus)
+static int __pci_reset_bus(struct pci_bus *bus)
{
int rc;
@@ -4898,42 +5180,30 @@ int pci_reset_bus(struct pci_bus *bus)
pci_bus_save_and_disable(bus);
- rc = pci_bus_reset(bus, 0);
+ if (pci_bus_trylock(bus)) {
+ might_sleep();
+ rc = pci_bridge_secondary_bus_reset(bus->self);
+ pci_bus_unlock(bus);
+ } else
+ rc = -EAGAIN;
pci_bus_restore(bus);
return rc;
}
-EXPORT_SYMBOL_GPL(pci_reset_bus);
/**
- * pci_try_reset_bus - Try to reset a PCI bus
- * @bus: top level PCI bus to reset
+ * pci_reset_bus - Try to reset a PCI bus
+ * @pdev: top level PCI device to reset via slot/bus
*
* Same as above except return -EAGAIN if the bus cannot be locked
*/
-int pci_try_reset_bus(struct pci_bus *bus)
+int pci_reset_bus(struct pci_dev *pdev)
{
- int rc;
-
- rc = pci_bus_reset(bus, 1);
- if (rc)
- return rc;
-
- pci_bus_save_and_disable(bus);
-
- if (pci_bus_trylock(bus)) {
- might_sleep();
- pci_reset_bridge_secondary_bus(bus->self);
- pci_bus_unlock(bus);
- } else
- rc = -EAGAIN;
-
- pci_bus_restore(bus);
-
- return rc;
+ return pci_probe_reset_slot(pdev->slot) ?
+ __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
}
-EXPORT_SYMBOL_GPL(pci_try_reset_bus);
+EXPORT_SYMBOL_GPL(pci_reset_bus);
/**
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
@@ -5222,6 +5492,7 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
return PCI_SPEED_UNKNOWN;
}
+EXPORT_SYMBOL(pcie_get_speed_cap);
/**
* pcie_get_width_cap - query for the PCI device's link width capability
@@ -5240,6 +5511,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
return PCIE_LNK_WIDTH_UNKNOWN;
}
+EXPORT_SYMBOL(pcie_get_width_cap);
/**
* pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
@@ -5264,14 +5536,16 @@ u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
}
/**
- * pcie_print_link_status - Report the PCI device's link speed and width
+ * __pcie_print_link_status - Report the PCI device's link speed and width
* @dev: PCI device to query
+ * @verbose: Print info even when enough bandwidth is available
*
- * Report the available bandwidth at the device. If this is less than the
- * device is capable of, report the device's maximum possible bandwidth and
- * the upstream link that limits its performance to less than that.
+ * If the available bandwidth at the device is less than the device is
+ * capable of, report the device's maximum possible bandwidth and the
+ * upstream link that limits its performance. If @verbose, always print
+ * the available bandwidth, even if the device isn't constrained.
*/
-void pcie_print_link_status(struct pci_dev *dev)
+void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
{
enum pcie_link_width width, width_cap;
enum pci_bus_speed speed, speed_cap;
@@ -5281,11 +5555,11 @@ void pcie_print_link_status(struct pci_dev *dev)
bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
- if (bw_avail >= bw_cap)
+ if (bw_avail >= bw_cap && verbose)
pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
bw_cap / 1000, bw_cap % 1000,
PCIE_SPEED2STR(speed_cap), width_cap);
- else
+ else if (bw_avail < bw_cap)
pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
bw_avail / 1000, bw_avail % 1000,
PCIE_SPEED2STR(speed), width,
@@ -5293,6 +5567,17 @@ void pcie_print_link_status(struct pci_dev *dev)
bw_cap / 1000, bw_cap % 1000,
PCIE_SPEED2STR(speed_cap), width_cap);
}
+
+/**
+ * pcie_print_link_status - Report the PCI device's link speed and width
+ * @dev: PCI device to query
+ *
+ * Report the available bandwidth at the device.
+ */
+void pcie_print_link_status(struct pci_dev *dev)
+{
+ __pcie_print_link_status(dev, true);
+}
EXPORT_SYMBOL(pcie_print_link_status);
/**
@@ -5387,8 +5672,19 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
* @dev: the PCI device for which alias is added
* @devfn: alias slot and function
*
- * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
- * It should be called early, preferably as PCI fixup header quirk.
+ * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
+ * which is used to program permissible bus-devfn source addresses for DMA
+ * requests in an IOMMU. These aliases factor into IOMMU group creation
+ * and are useful for devices generating DMA requests beyond or different
+ * from their logical bus-devfn. Examples include device quirks where the
+ * device simply uses the wrong devfn, as well as non-transparent bridges
+ * where the alias may be a proxy for devices in another domain.
+ *
+ * IOMMU group creation is performed during device discovery or addition,
+ * prior to any potential DMA mapping and therefore prior to driver probing
+ * (especially for userspace assigned devices where IOMMU group definition
+ * cannot be left as a userspace activity). DMA aliases should therefore
+ * be configured via quirks, such as the PCI fixup header quirk.
*/
void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
{
@@ -5454,10 +5750,10 @@ static DEFINE_SPINLOCK(resource_alignment_lock);
static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
bool *resize)
{
- int seg, bus, slot, func, align_order, count;
- unsigned short vendor, device, subsystem_vendor, subsystem_device;
+ int align_order, count;
resource_size_t align = pcibios_default_alignment();
- char *p;
+ const char *p;
+ int ret;
spin_lock(&resource_alignment_lock);
p = resource_alignment_param;
@@ -5477,58 +5773,21 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
} else {
align_order = -1;
}
- if (strncmp(p, "pci:", 4) == 0) {
- /* PCI vendor/device (subvendor/subdevice) ids are specified */
- p += 4;
- if (sscanf(p, "%hx:%hx:%hx:%hx%n",
- &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
- if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
- printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
- p);
- break;
- }
- subsystem_vendor = subsystem_device = 0;
- }
- p += count;
- if ((!vendor || (vendor == dev->vendor)) &&
- (!device || (device == dev->device)) &&
- (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
- (!subsystem_device || (subsystem_device == dev->subsystem_device))) {
- *resize = true;
- if (align_order == -1)
- align = PAGE_SIZE;
- else
- align = 1 << align_order;
- /* Found */
- break;
- }
- }
- else {
- if (sscanf(p, "%x:%x:%x.%x%n",
- &seg, &bus, &slot, &func, &count) != 4) {
- seg = 0;
- if (sscanf(p, "%x:%x.%x%n",
- &bus, &slot, &func, &count) != 3) {
- /* Invalid format */
- printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
- p);
- break;
- }
- }
- p += count;
- if (seg == pci_domain_nr(dev->bus) &&
- bus == dev->bus->number &&
- slot == PCI_SLOT(dev->devfn) &&
- func == PCI_FUNC(dev->devfn)) {
- *resize = true;
- if (align_order == -1)
- align = PAGE_SIZE;
- else
- align = 1 << align_order;
- /* Found */
- break;
- }
+
+ ret = pci_dev_str_match(dev, p, &p);
+ if (ret == 1) {
+ *resize = true;
+ if (align_order == -1)
+ align = PAGE_SIZE;
+ else
+ align = 1 << align_order;
+ break;
+ } else if (ret < 0) {
+ pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
+ p);
+ break;
}
+
if (*p != ';' && *p != ',') {
/* End of param or invalid format */
break;
@@ -5805,6 +6064,8 @@ static int __init pci_setup(char *str)
pcie_ats_disabled = true;
} else if (!strcmp(str, "noaer")) {
pci_no_aer();
+ } else if (!strcmp(str, "earlydump")) {
+ pci_early_dump = true;
} else if (!strncmp(str, "realloc=", 8)) {
pci_realloc_get_opt(str + 8);
} else if (!strncmp(str, "realloc", 7)) {
@@ -5841,6 +6102,8 @@ static int __init pci_setup(char *str)
pcie_bus_config = PCIE_BUS_PEER2PEER;
} else if (!strncmp(str, "pcie_scan_all", 13)) {
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
+ } else if (!strncmp(str, "disable_acs_redir=", 18)) {
+ disable_acs_redir_param = str + 18;
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index c358e7a07f3f..6e0d1528d471 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -7,6 +7,7 @@
#define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */
extern const unsigned char pcie_link_speed[];
+extern bool pci_early_dump;
bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
@@ -33,6 +34,7 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
enum pci_mmap_api mmap_api);
int pci_probe_reset_function(struct pci_dev *dev);
+int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
/**
* struct pci_platform_pm_ops - Firmware PM callbacks
@@ -225,6 +227,10 @@ enum pci_bar_type {
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int crs_timeout);
+bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
+ int crs_timeout);
+int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout);
+
int pci_setup_device(struct pci_dev *dev);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
@@ -259,6 +265,7 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
enum pcie_link_width *width);
+void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
/* Single Root I/O Virtualization */
struct pci_sriov {
@@ -288,6 +295,7 @@ struct pci_sriov {
/* pci_dev priv_flags */
#define PCI_DEV_DISCONNECTED 0
+#define PCI_DEV_ADDED 1
static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
{
@@ -300,6 +308,44 @@ static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
}
+static inline void pci_dev_assign_added(struct pci_dev *dev, bool added)
+{
+ assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added);
+}
+
+static inline bool pci_dev_is_added(const struct pci_dev *dev)
+{
+ return test_bit(PCI_DEV_ADDED, &dev->priv_flags);
+}
+
+#ifdef CONFIG_PCIEAER
+#include <linux/aer.h>
+
+#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
+
+struct aer_err_info {
+ struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
+ int error_dev_num;
+
+ unsigned int id:16;
+
+ unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
+ unsigned int __pad1:5;
+ unsigned int multi_error_valid:1;
+
+ unsigned int first_error:5;
+ unsigned int __pad2:2;
+ unsigned int tlp_header_valid:1;
+
+ unsigned int status; /* COR/UNCOR Error Status */
+ unsigned int mask; /* COR/UNCOR Error Mask */
+ struct aer_header_log_regs tlp; /* TLP Header */
+};
+
+int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
+void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+#endif /* CONFIG_PCIEAER */
+
#ifdef CONFIG_PCI_ATS
void pci_restore_ats_state(struct pci_dev *dev);
#else
@@ -311,6 +357,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
#ifdef CONFIG_PCI_IOV
int pci_iov_init(struct pci_dev *dev);
void pci_iov_release(struct pci_dev *dev);
+void pci_iov_remove(struct pci_dev *dev);
void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
@@ -325,6 +372,9 @@ static inline void pci_iov_release(struct pci_dev *dev)
{
}
+static inline void pci_iov_remove(struct pci_dev *dev)
+{
+}
static inline void pci_restore_iov_state(struct pci_dev *dev)
{
}
@@ -352,6 +402,25 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
}
void pci_enable_acs(struct pci_dev *dev);
+#ifdef CONFIG_PCI_QUIRKS
+int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
+int pci_dev_specific_enable_acs(struct pci_dev *dev);
+int pci_dev_specific_disable_acs_redir(struct pci_dev *dev);
+#else
+static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
+ u16 acs_flags)
+{
+ return -ENOTTY;
+}
+static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
+{
+ return -ENOTTY;
+}
+static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+{
+ return -ENOTTY;
+}
+#endif
/* PCI error reporting and recovery */
void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service);
@@ -452,4 +521,19 @@ static inline int devm_of_pci_get_host_bridge_resources(struct device *dev,
}
#endif
+#ifdef CONFIG_PCIEAER
+void pci_no_aer(void);
+void pci_aer_init(struct pci_dev *dev);
+void pci_aer_exit(struct pci_dev *dev);
+extern const struct attribute_group aer_stats_attr_group;
+void pci_aer_clear_fatal_status(struct pci_dev *dev);
+void pci_aer_clear_device_status(struct pci_dev *dev);
+#else
+static inline void pci_no_aer(void) { }
+static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
+static inline void pci_aer_exit(struct pci_dev *d) { }
+static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
+static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
+#endif
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index a2e88386af28..83180edd6ed4 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -31,26 +31,9 @@
#include "portdrv.h"
#define AER_ERROR_SOURCES_MAX 100
-#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
-struct aer_err_info {
- struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
- int error_dev_num;
-
- unsigned int id:16;
-
- unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
- unsigned int __pad1:5;
- unsigned int multi_error_valid:1;
-
- unsigned int first_error:5;
- unsigned int __pad2:2;
- unsigned int tlp_header_valid:1;
-
- unsigned int status; /* COR/UNCOR Error Status */
- unsigned int mask; /* COR/UNCOR Error Mask */
- struct aer_header_log_regs tlp; /* TLP Header */
-};
+#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
+#define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/
struct aer_err_source {
unsigned int status;
@@ -76,6 +59,42 @@ struct aer_rpc {
*/
};
+/* AER stats for the device */
+struct aer_stats {
+
+ /*
+ * Fields for all AER capable devices. They indicate the errors
+ * "as seen by this device". Note that this may mean that if an
+ * end point is causing problems, the AER counters may increment
+ * at its link partner (e.g. root port) because the errors will be
+ * "seen" by the link partner and not the the problematic end point
+ * itself (which may report all counters as 0 as it never saw any
+ * problems).
+ */
+ /* Counters for different type of correctable errors */
+ u64 dev_cor_errs[AER_MAX_TYPEOF_COR_ERRS];
+ /* Counters for different type of fatal uncorrectable errors */
+ u64 dev_fatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
+ /* Counters for different type of nonfatal uncorrectable errors */
+ u64 dev_nonfatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
+ /* Total number of ERR_COR sent by this device */
+ u64 dev_total_cor_errs;
+ /* Total number of ERR_FATAL sent by this device */
+ u64 dev_total_fatal_errs;
+ /* Total number of ERR_NONFATAL sent by this device */
+ u64 dev_total_nonfatal_errs;
+
+ /*
+ * Fields for Root ports & root complex event collectors only, these
+ * indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL
+ * messages received by the root port / event collector, INCLUDING the
+ * ones that are generated internally (by the rootport itself)
+ */
+ u64 rootport_total_cor_errs;
+ u64 rootport_total_fatal_errs;
+ u64 rootport_total_nonfatal_errs;
+};
+
#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
PCI_ERR_UNC_ECRC| \
PCI_ERR_UNC_UNSUP| \
@@ -303,12 +322,13 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return 0;
+ if (pcie_ports_native)
+ return 0;
+
if (!dev->__aer_firmware_first_valid)
aer_set_firmware_first(dev);
return dev->__aer_firmware_first;
}
-#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
- PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
static bool aer_firmware_first;
@@ -323,6 +343,9 @@ bool aer_acpi_firmware_first(void)
.firmware_first = 0,
};
+ if (pcie_ports_native)
+ return false;
+
if (!parsed) {
apei_hest_parse(aer_hest_parse, &info);
aer_firmware_first = info.firmware_first;
@@ -357,16 +380,30 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
+void pci_aer_clear_device_status(struct pci_dev *dev)
+{
+ u16 sta;
+
+ pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
+ pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
+}
+
int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
int pos;
- u32 status;
+ u32 status, sev;
pos = dev->aer_cap;
if (!pos)
return -EIO;
+ if (pcie_aer_get_firmware_first(dev))
+ return -EIO;
+
+ /* Clear status bits for ERR_NONFATAL errors only */
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
+ status &= ~sev;
if (status)
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
@@ -374,6 +411,26 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
+void pci_aer_clear_fatal_status(struct pci_dev *dev)
+{
+ int pos;
+ u32 status, sev;
+
+ pos = dev->aer_cap;
+ if (!pos)
+ return;
+
+ if (pcie_aer_get_firmware_first(dev))
+ return;
+
+ /* Clear status bits for ERR_FATAL errors only */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
+ status &= sev;
+ if (status)
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+}
+
int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
{
int pos;
@@ -387,6 +444,9 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
if (!pos)
return -EIO;
+ if (pcie_aer_get_firmware_first(dev))
+ return -EIO;
+
port_type = pci_pcie_type(dev);
if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
@@ -402,10 +462,20 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
return 0;
}
-int pci_aer_init(struct pci_dev *dev)
+void pci_aer_init(struct pci_dev *dev)
{
dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- return pci_cleanup_aer_error_status_regs(dev);
+
+ if (dev->aer_cap)
+ dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+
+ pci_cleanup_aer_error_status_regs(dev);
+}
+
+void pci_aer_exit(struct pci_dev *dev)
+{
+ kfree(dev->aer_stats);
+ dev->aer_stats = NULL;
}
#define AER_AGENT_RECEIVER 0
@@ -458,52 +528,52 @@ static const char *aer_error_layer[] = {
"Transaction Layer"
};
-static const char *aer_correctable_error_string[] = {
- "Receiver Error", /* Bit Position 0 */
+static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = {
+ "RxErr", /* Bit Position 0 */
NULL,
NULL,
NULL,
NULL,
NULL,
- "Bad TLP", /* Bit Position 6 */
- "Bad DLLP", /* Bit Position 7 */
- "RELAY_NUM Rollover", /* Bit Position 8 */
+ "BadTLP", /* Bit Position 6 */
+ "BadDLLP", /* Bit Position 7 */
+ "Rollover", /* Bit Position 8 */
NULL,
NULL,
NULL,
- "Replay Timer Timeout", /* Bit Position 12 */
- "Advisory Non-Fatal", /* Bit Position 13 */
- "Corrected Internal Error", /* Bit Position 14 */
- "Header Log Overflow", /* Bit Position 15 */
+ "Timeout", /* Bit Position 12 */
+ "NonFatalErr", /* Bit Position 13 */
+ "CorrIntErr", /* Bit Position 14 */
+ "HeaderOF", /* Bit Position 15 */
};
-static const char *aer_uncorrectable_error_string[] = {
+static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = {
"Undefined", /* Bit Position 0 */
NULL,
NULL,
NULL,
- "Data Link Protocol", /* Bit Position 4 */
- "Surprise Down Error", /* Bit Position 5 */
+ "DLP", /* Bit Position 4 */
+ "SDES", /* Bit Position 5 */
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
- "Poisoned TLP", /* Bit Position 12 */
- "Flow Control Protocol", /* Bit Position 13 */
- "Completion Timeout", /* Bit Position 14 */
- "Completer Abort", /* Bit Position 15 */
- "Unexpected Completion", /* Bit Position 16 */
- "Receiver Overflow", /* Bit Position 17 */
- "Malformed TLP", /* Bit Position 18 */
+ "TLP", /* Bit Position 12 */
+ "FCP", /* Bit Position 13 */
+ "CmpltTO", /* Bit Position 14 */
+ "CmpltAbrt", /* Bit Position 15 */
+ "UnxCmplt", /* Bit Position 16 */
+ "RxOF", /* Bit Position 17 */
+ "MalfTLP", /* Bit Position 18 */
"ECRC", /* Bit Position 19 */
- "Unsupported Request", /* Bit Position 20 */
- "ACS Violation", /* Bit Position 21 */
- "Uncorrectable Internal Error", /* Bit Position 22 */
- "MC Blocked TLP", /* Bit Position 23 */
- "AtomicOp Egress Blocked", /* Bit Position 24 */
- "TLP Prefix Blocked Error", /* Bit Position 25 */
+ "UnsupReq", /* Bit Position 20 */
+ "ACSViol", /* Bit Position 21 */
+ "UncorrIntErr", /* Bit Position 22 */
+ "BlockedTLP", /* Bit Position 23 */
+ "AtomicOpBlocked", /* Bit Position 24 */
+ "TLPBlockedErr", /* Bit Position 25 */
};
static const char *aer_agent_string[] = {
@@ -513,6 +583,144 @@ static const char *aer_agent_string[] = {
"Transmitter ID"
};
+#define aer_stats_dev_attr(name, stats_array, strings_array, \
+ total_string, total_field) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ unsigned int i; \
+ char *str = buf; \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ u64 *stats = pdev->aer_stats->stats_array; \
+ \
+ for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \
+ if (strings_array[i]) \
+ str += sprintf(str, "%s %llu\n", \
+ strings_array[i], stats[i]); \
+ else if (stats[i]) \
+ str += sprintf(str, #stats_array "_bit[%d] %llu\n",\
+ i, stats[i]); \
+ } \
+ str += sprintf(str, "TOTAL_%s %llu\n", total_string, \
+ pdev->aer_stats->total_field); \
+ return str-buf; \
+} \
+static DEVICE_ATTR_RO(name)
+
+aer_stats_dev_attr(aer_dev_correctable, dev_cor_errs,
+ aer_correctable_error_string, "ERR_COR",
+ dev_total_cor_errs);
+aer_stats_dev_attr(aer_dev_fatal, dev_fatal_errs,
+ aer_uncorrectable_error_string, "ERR_FATAL",
+ dev_total_fatal_errs);
+aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
+ aer_uncorrectable_error_string, "ERR_NONFATAL",
+ dev_total_nonfatal_errs);
+
+#define aer_stats_rootport_attr(name, field) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ return sprintf(buf, "%llu\n", pdev->aer_stats->field); \
+} \
+static DEVICE_ATTR_RO(name)
+
+aer_stats_rootport_attr(aer_rootport_total_err_cor,
+ rootport_total_cor_errs);
+aer_stats_rootport_attr(aer_rootport_total_err_fatal,
+ rootport_total_fatal_errs);
+aer_stats_rootport_attr(aer_rootport_total_err_nonfatal,
+ rootport_total_nonfatal_errs);
+
+static struct attribute *aer_stats_attrs[] __ro_after_init = {
+ &dev_attr_aer_dev_correctable.attr,
+ &dev_attr_aer_dev_fatal.attr,
+ &dev_attr_aer_dev_nonfatal.attr,
+ &dev_attr_aer_rootport_total_err_cor.attr,
+ &dev_attr_aer_rootport_total_err_fatal.attr,
+ &dev_attr_aer_rootport_total_err_nonfatal.attr,
+ NULL
+};
+
+static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (!pdev->aer_stats)
+ return 0;
+
+ if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
+ a == &dev_attr_aer_rootport_total_err_fatal.attr ||
+ a == &dev_attr_aer_rootport_total_err_nonfatal.attr) &&
+ pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group aer_stats_attr_group = {
+ .attrs = aer_stats_attrs,
+ .is_visible = aer_stats_attrs_are_visible,
+};
+
+static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
+ struct aer_err_info *info)
+{
+ int status, i, max = -1;
+ u64 *counter = NULL;
+ struct aer_stats *aer_stats = pdev->aer_stats;
+
+ if (!aer_stats)
+ return;
+
+ switch (info->severity) {
+ case AER_CORRECTABLE:
+ aer_stats->dev_total_cor_errs++;
+ counter = &aer_stats->dev_cor_errs[0];
+ max = AER_MAX_TYPEOF_COR_ERRS;
+ break;
+ case AER_NONFATAL:
+ aer_stats->dev_total_nonfatal_errs++;
+ counter = &aer_stats->dev_nonfatal_errs[0];
+ max = AER_MAX_TYPEOF_UNCOR_ERRS;
+ break;
+ case AER_FATAL:
+ aer_stats->dev_total_fatal_errs++;
+ counter = &aer_stats->dev_fatal_errs[0];
+ max = AER_MAX_TYPEOF_UNCOR_ERRS;
+ break;
+ }
+
+ status = (info->status & ~info->mask);
+ for (i = 0; i < max; i++)
+ if (status & (1 << i))
+ counter[i]++;
+}
+
+static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
+ struct aer_err_source *e_src)
+{
+ struct aer_stats *aer_stats = pdev->aer_stats;
+
+ if (!aer_stats)
+ return;
+
+ if (e_src->status & PCI_ERR_ROOT_COR_RCV)
+ aer_stats->rootport_total_cor_errs++;
+
+ if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
+ if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
+ aer_stats->rootport_total_fatal_errs++;
+ else
+ aer_stats->rootport_total_nonfatal_errs++;
+ }
+}
+
static void __print_tlp_header(struct pci_dev *dev,
struct aer_header_log_regs *t)
{
@@ -545,9 +753,10 @@ static void __aer_print_error(struct pci_dev *dev,
pci_err(dev, " [%2d] Unknown Error Bit%s\n",
i, info->first_error == i ? " (First)" : "");
}
+ pci_dev_aer_stats_incr(dev, info);
}
-static void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
+void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
int layer, agent;
int id = ((dev->bus->number << 8) | dev->devfn);
@@ -799,6 +1008,7 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
+ pci_aer_clear_device_status(dev);
} else if (info->severity == AER_NONFATAL)
pcie_do_nonfatal_recovery(dev);
else if (info->severity == AER_FATAL)
@@ -876,7 +1086,7 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
#endif
/**
- * get_device_error_info - read error status from dev and store it to info
+ * aer_get_device_error_info - read error status from dev and store it to info
* @dev: pointer to the device expected to have a error record
* @info: pointer to structure to store the error record
*
@@ -884,7 +1094,7 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
*
* Note that @info is reused among all error devices. Clear fields properly.
*/
-static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
+int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
int pos, temp;
@@ -942,11 +1152,11 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
/* Report all before handle them, not to lost records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (get_device_error_info(e_info->dev[i], e_info))
+ if (aer_get_device_error_info(e_info->dev[i], e_info))
aer_print_error(e_info->dev[i], e_info);
}
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (get_device_error_info(e_info->dev[i], e_info))
+ if (aer_get_device_error_info(e_info->dev[i], e_info))
handle_error_source(e_info->dev[i], e_info);
}
}
@@ -962,6 +1172,8 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
struct pci_dev *pdev = rpc->rpd;
struct aer_err_info *e_info = &rpc->e_info;
+ pci_rootport_aer_stats_incr(pdev, e_src);
+
/*
* There is a possibility that both correctable error and
* uncorrectable error being logged. Report correctable error first.
@@ -1305,6 +1517,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
{
u32 reg32;
int pos;
+ int rc;
pos = dev->aer_cap;
@@ -1313,7 +1526,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- pci_reset_bridge_secondary_bus(dev);
+ rc = pci_bridge_secondary_bus_reset(dev);
pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
@@ -1325,7 +1538,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- return PCI_ERS_RESULT_RECOVERED;
+ return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
/**
@@ -1336,20 +1549,8 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
*/
static void aer_error_resume(struct pci_dev *dev)
{
- int pos;
- u32 status, mask;
- u16 reg16;
-
- /* Clean up Root device status */
- pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &reg16);
- pcie_capability_write_word(dev, PCI_EXP_DEVSTA, reg16);
-
- /* Clean AER Root Error Status */
- pos = dev->aer_cap;
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
- status &= ~mask; /* Clear corresponding nonfatal bits */
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+ pci_aer_clear_device_status(dev);
+ pci_cleanup_aer_uncorrect_error_status(dev);
}
static struct pcie_port_service_driver aerdriver = {
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index c687c817b47d..5326916715d2 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1127,11 +1127,9 @@ static int pcie_aspm_set_policy(const char *val,
if (aspm_disabled)
return -EPERM;
- for (i = 0; i < ARRAY_SIZE(policy_str); i++)
- if (!strncmp(val, policy_str[i], strlen(policy_str[i])))
- break;
- if (i >= ARRAY_SIZE(policy_str))
- return -EINVAL;
+ i = sysfs_match_string(policy_str, val);
+ if (i < 0)
+ return i;
if (i == aspm_policy)
return 0;
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 921ed979109d..f03279fc87cd 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -6,6 +6,7 @@
* Copyright (C) 2016 Intel Corp.
*/
+#include <linux/aer.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -16,10 +17,8 @@
struct dpc_dev {
struct pcie_device *dev;
- struct work_struct work;
u16 cap_pos;
bool rp_extensions;
- u32 rp_pio_status;
u8 rp_log_size;
};
@@ -65,19 +64,13 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
return 0;
}
-static void dpc_wait_link_inactive(struct dpc_dev *dpc)
-{
- struct pci_dev *pdev = dpc->dev->port;
-
- pcie_wait_for_link(pdev, false);
-}
-
static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
struct dpc_dev *dpc;
struct pcie_device *pciedev;
struct device *devdpc;
- u16 cap, ctl;
+
+ u16 cap;
/*
* DPC disables the Link automatically in hardware, so it has
@@ -92,34 +85,17 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
* Wait until the Link is inactive, then clear DPC Trigger Status
* to allow the Port to leave DPC.
*/
- dpc_wait_link_inactive(dpc);
+ pcie_wait_for_link(pdev, false);
if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc))
return PCI_ERS_RESULT_DISCONNECT;
- if (dpc->rp_extensions && dpc->rp_pio_status) {
- pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS,
- dpc->rp_pio_status);
- dpc->rp_pio_status = 0;
- }
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER);
- pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
- pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
- ctl | PCI_EXP_DPC_CTL_INT_EN);
-
return PCI_ERS_RESULT_RECOVERED;
}
-static void dpc_work(struct work_struct *work)
-{
- struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
- struct pci_dev *pdev = dpc->dev->port;
-
- /* We configure DPC so it only triggers on ERR_FATAL */
- pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
-}
static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
{
@@ -134,8 +110,6 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
dev_err(dev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
status, mask);
- dpc->rp_pio_status = status;
-
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc);
@@ -146,15 +120,14 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
first_error = (dpc_status & 0x1f00) >> 8;
- status &= ~mask;
for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
- if (status & (1 << i))
+ if ((status & ~mask) & (1 << i))
dev_err(dev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
first_error == i ? " (First)" : "");
}
if (dpc->rp_log_size < 4)
- return;
+ goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
&dw0);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
@@ -167,7 +140,7 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
dw0, dw1, dw2, dw3);
if (dpc->rp_log_size < 5)
- return;
+ goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
dev_err(dev, "RP PIO ImpSpec Log %#010x\n", log);
@@ -176,43 +149,26 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
dev_err(dev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
}
+ clear_status:
+ pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
}
-static irqreturn_t dpc_irq(int irq, void *context)
+static irqreturn_t dpc_handler(int irq, void *context)
{
- struct dpc_dev *dpc = (struct dpc_dev *)context;
+ struct aer_err_info info;
+ struct dpc_dev *dpc = context;
struct pci_dev *pdev = dpc->dev->port;
struct device *dev = &dpc->dev->device;
- u16 cap = dpc->cap_pos, ctl, status, source, reason, ext_reason;
-
- pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
-
- if (!(ctl & PCI_EXP_DPC_CTL_INT_EN) || ctl == (u16)(~0))
- return IRQ_NONE;
+ u16 cap = dpc->cap_pos, status, source, reason, ext_reason;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
-
- if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT))
- return IRQ_NONE;
-
- if (!(status & PCI_EXP_DPC_STATUS_TRIGGER)) {
- pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
- PCI_EXP_DPC_STATUS_INTERRUPT);
- return IRQ_HANDLED;
- }
-
- pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
- ctl & ~PCI_EXP_DPC_CTL_INT_EN);
-
- pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID,
- &source);
+ pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
dev_info(dev, "DPC containment event, status:%#06x source:%#06x\n",
- status, source);
+ status, source);
reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
-
dev_warn(dev, "DPC %s detected, remove downstream devices\n",
(reason == 0) ? "unmasked uncorrectable error" :
(reason == 1) ? "ERR_NONFATAL" :
@@ -220,15 +176,36 @@ static irqreturn_t dpc_irq(int irq, void *context)
(ext_reason == 0) ? "RP PIO error" :
(ext_reason == 1) ? "software trigger" :
"reserved error");
+
/* show RP PIO error detail information */
if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
dpc_process_rp_pio_error(dpc);
+ else if (reason == 0 && aer_get_device_error_info(pdev, &info)) {
+ aer_print_error(pdev, &info);
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ }
- pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
- PCI_EXP_DPC_STATUS_INTERRUPT);
+ /* We configure DPC so it only triggers on ERR_FATAL */
+ pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dpc_irq(int irq, void *context)
+{
+ struct dpc_dev *dpc = (struct dpc_dev *)context;
+ struct pci_dev *pdev = dpc->dev->port;
+ u16 cap = dpc->cap_pos, status;
+
+ pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
- schedule_work(&dpc->work);
+ if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || status == (u16)(~0))
+ return IRQ_NONE;
+ pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_INTERRUPT);
+ if (status & PCI_EXP_DPC_STATUS_TRIGGER)
+ return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
}
@@ -250,11 +227,11 @@ static int dpc_probe(struct pcie_device *dev)
dpc->cap_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
dpc->dev = dev;
- INIT_WORK(&dpc->work, dpc_work);
set_service_data(dev, dpc);
- status = devm_request_irq(device, dev->irq, dpc_irq, IRQF_SHARED,
- "pcie-dpc", dpc);
+ status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
+ dpc_handler, IRQF_SHARED,
+ "pcie-dpc", dpc);
if (status) {
dev_warn(device, "request IRQ%d failed: %d\n", dev->irq,
status);
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index f7ce0cb0b0b7..708fd3a0d646 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -175,9 +175,11 @@ out:
*/
static pci_ers_result_t default_reset_link(struct pci_dev *dev)
{
- pci_reset_bridge_secondary_bus(dev);
+ int rc;
+
+ rc = pci_bridge_secondary_bus_reset(dev);
pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
- return PCI_ERS_RESULT_RECOVERED;
+ return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
@@ -252,6 +254,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
dev->error_state = state;
pci_walk_bus(dev->subordinate, cb, &result_data);
if (cb == report_resume) {
+ pci_aer_clear_device_status(dev);
pci_cleanup_aer_uncorrect_error_status(dev);
dev->error_state = pci_channel_io_normal;
}
@@ -259,15 +262,10 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
/*
* If the error is reported by an end point, we think this
* error is related to the upstream link of the end point.
+ * The error is non fatal so the bus is ok; just invoke
+ * the callback for the function that logged the error.
*/
- if (state == pci_channel_io_normal)
- /*
- * the error is non fatal so the bus is ok, just invoke
- * the callback for the function that logged the error.
- */
- cb(dev, &result_data);
- else
- pci_walk_bus(dev->bus, cb, &result_data);
+ cb(dev, &result_data);
}
return result_data.result;
@@ -295,6 +293,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
parent = udev->subordinate;
pci_lock_rescan_remove();
+ pci_dev_get(dev);
list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
bus_list) {
pci_dev_get(pdev);
@@ -316,7 +315,8 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
* do error recovery on all subordinates of the bridge instead
* of the bridge and clear the error status of the bridge.
*/
- pci_cleanup_aer_uncorrect_error_status(dev);
+ pci_aer_clear_fatal_status(dev);
+ pci_aer_clear_device_status(dev);
}
if (result == PCI_ERS_RESULT_RECOVERED) {
@@ -328,6 +328,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
pci_info(dev, "Device recovery from fatal error failed\n");
}
+ pci_dev_put(dev);
pci_unlock_rescan_remove();
}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 6ffc797a0dc1..d59afa42fc14 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -50,6 +50,7 @@ struct pcie_port_service_driver {
int (*probe) (struct pcie_device *dev);
void (*remove) (struct pcie_device *dev);
int (*suspend) (struct pcie_device *dev);
+ int (*resume_noirq) (struct pcie_device *dev);
int (*resume) (struct pcie_device *dev);
/* Device driver may resume normal operations */
@@ -82,6 +83,7 @@ extern struct bus_type pcie_port_bus_type;
int pcie_port_device_register(struct pci_dev *dev);
#ifdef CONFIG_PM
int pcie_port_device_suspend(struct device *dev);
+int pcie_port_device_resume_noirq(struct device *dev);
int pcie_port_device_resume(struct device *dev);
#endif
void pcie_port_device_remove(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e0261ad4bcdd..7c37d815229e 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -353,14 +353,19 @@ error_disable:
}
#ifdef CONFIG_PM
-static int suspend_iter(struct device *dev, void *data)
+typedef int (*pcie_pm_callback_t)(struct pcie_device *);
+
+static int pm_iter(struct device *dev, void *data)
{
struct pcie_port_service_driver *service_driver;
+ size_t offset = *(size_t *)data;
+ pcie_pm_callback_t cb;
if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
service_driver = to_service_driver(dev->driver);
- if (service_driver->suspend)
- service_driver->suspend(to_pcie_device(dev));
+ cb = *(pcie_pm_callback_t *)((void *)service_driver + offset);
+ if (cb)
+ return cb(to_pcie_device(dev));
}
return 0;
}
@@ -371,20 +376,14 @@ static int suspend_iter(struct device *dev, void *data)
*/
int pcie_port_device_suspend(struct device *dev)
{
- return device_for_each_child(dev, NULL, suspend_iter);
+ size_t off = offsetof(struct pcie_port_service_driver, suspend);
+ return device_for_each_child(dev, &off, pm_iter);
}
-static int resume_iter(struct device *dev, void *data)
+int pcie_port_device_resume_noirq(struct device *dev)
{
- struct pcie_port_service_driver *service_driver;
-
- if ((dev->bus == &pcie_port_bus_type) &&
- (dev->driver)) {
- service_driver = to_service_driver(dev->driver);
- if (service_driver->resume)
- service_driver->resume(to_pcie_device(dev));
- }
- return 0;
+ size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
+ return device_for_each_child(dev, &off, pm_iter);
}
/**
@@ -393,7 +392,8 @@ static int resume_iter(struct device *dev, void *data)
*/
int pcie_port_device_resume(struct device *dev)
{
- return device_for_each_child(dev, NULL, resume_iter);
+ size_t off = offsetof(struct pcie_port_service_driver, resume);
+ return device_for_each_child(dev, &off, pm_iter);
}
#endif /* PM */
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 973f1b80a038..eef22dc29140 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -42,17 +42,6 @@ __setup("pcie_ports=", pcie_port_setup);
/* global data */
-static int pcie_portdrv_restore_config(struct pci_dev *dev)
-{
- int retval;
-
- retval = pci_enable_device(dev);
- if (retval)
- return retval;
- pci_set_master(dev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int pcie_port_runtime_suspend(struct device *dev)
{
@@ -76,10 +65,12 @@ static int pcie_port_runtime_idle(struct device *dev)
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
+ .resume_noirq = pcie_port_device_resume_noirq,
.resume = pcie_port_device_resume,
.freeze = pcie_port_device_suspend,
.thaw = pcie_port_device_resume,
.poweroff = pcie_port_device_suspend,
+ .restore_noirq = pcie_port_device_resume_noirq,
.restore = pcie_port_device_resume,
.runtime_suspend = pcie_port_runtime_suspend,
.runtime_resume = pcie_port_runtime_resume,
@@ -160,19 +151,6 @@ static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
return PCI_ERS_RESULT_RECOVERED;
}
-static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
-{
- /* If fatal, restore cfg space for possible link reset at upstream */
- if (dev->error_state == pci_channel_io_frozen) {
- dev->state_saved = true;
- pci_restore_state(dev);
- pcie_portdrv_restore_config(dev);
- pci_enable_pcie_error_reporting(dev);
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
static int resume_iter(struct device *device, void *data)
{
struct pcie_device *pcie_device;
@@ -208,7 +186,6 @@ static const struct pci_device_id port_pci_ids[] = { {
static const struct pci_error_handlers pcie_portdrv_err_handler = {
.error_detected = pcie_portdrv_error_detected,
.mmio_enabled = pcie_portdrv_mmio_enabled,
- .slot_reset = pcie_portdrv_slot_reset,
.resume = pcie_portdrv_err_resume,
};
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ac876e32de4b..ec784009a36b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/cpumask.h>
-#include <linux/pci-aspm.h>
#include <linux/aer.h>
#include <linux/acpi.h>
#include <linux/hypervisor.h>
@@ -1549,6 +1548,20 @@ static int pci_intx_mask_broken(struct pci_dev *dev)
return 0;
}
+static void early_dump_pci_device(struct pci_dev *pdev)
+{
+ u32 value[256 / 4];
+ int i;
+
+ pci_info(pdev, "config space:\n");
+
+ for (i = 0; i < 256; i += 4)
+ pci_read_config_dword(pdev, i, &value[i / 4]);
+
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+ value, 256, false);
+}
+
/**
* pci_setup_device - Fill in class and map information of a device
* @dev: the device structure to fill
@@ -1598,6 +1611,9 @@ int pci_setup_device(struct pci_dev *dev)
pci_printk(KERN_DEBUG, dev, "[%04x:%04x] type %02x class %#08x\n",
dev->vendor, dev->device, dev->hdr_type, dev->class);
+ if (pci_early_dump)
+ early_dump_pci_device(dev);
+
/* Need to have dev->class ready */
dev->cfg_size = pci_cfg_space_size(dev);
@@ -1725,11 +1741,15 @@ int pci_setup_device(struct pci_dev *dev)
static void pci_configure_mps(struct pci_dev *dev)
{
struct pci_dev *bridge = pci_upstream_bridge(dev);
- int mps, p_mps, rc;
+ int mps, mpss, p_mps, rc;
if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
return;
+ /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
+ if (dev->is_virtfn)
+ return;
+
mps = pcie_get_mps(dev);
p_mps = pcie_get_mps(bridge);
@@ -1749,6 +1769,14 @@ static void pci_configure_mps(struct pci_dev *dev)
if (pcie_bus_config != PCIE_BUS_DEFAULT)
return;
+ mpss = 128 << dev->pcie_mpss;
+ if (mpss < p_mps && pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
+ pcie_set_mps(bridge, mpss);
+ pci_info(dev, "Upstream bridge's Max Payload Size set to %d (was %d, max %d)\n",
+ mpss, p_mps, 128 << bridge->pcie_mpss);
+ p_mps = pcie_get_mps(bridge);
+ }
+
rc = pcie_set_mps(dev, p_mps);
if (rc) {
pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
@@ -1757,7 +1785,7 @@ static void pci_configure_mps(struct pci_dev *dev)
}
pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n",
- p_mps, mps, 128 << dev->pcie_mpss);
+ p_mps, mps, mpss);
}
static struct hpp_type0 pci_default_type0 = {
@@ -2042,6 +2070,29 @@ static void pci_configure_ltr(struct pci_dev *dev)
#endif
}
+static void pci_configure_eetlp_prefix(struct pci_dev *dev)
+{
+#ifdef CONFIG_PCI_PASID
+ struct pci_dev *bridge;
+ u32 cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
+ if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX))
+ return;
+
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ dev->eetlp_prefix_path = 1;
+ else {
+ bridge = pci_upstream_bridge(dev);
+ if (bridge && bridge->eetlp_prefix_path)
+ dev->eetlp_prefix_path = 1;
+ }
+#endif
+}
+
static void pci_configure_device(struct pci_dev *dev)
{
struct hotplug_params hpp;
@@ -2051,6 +2102,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_extended_tags(dev, NULL);
pci_configure_relaxed_ordering(dev);
pci_configure_ltr(dev);
+ pci_configure_eetlp_prefix(dev);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
@@ -2064,6 +2116,7 @@ static void pci_configure_device(struct pci_dev *dev)
static void pci_release_capabilities(struct pci_dev *dev)
{
+ pci_aer_exit(dev);
pci_vpd_release(dev);
pci_iov_release(dev);
pci_free_cap_save_buffers(dev);
@@ -2156,8 +2209,8 @@ static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
return true;
}
-bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
- int timeout)
+bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
+ int timeout)
{
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
return false;
@@ -2172,6 +2225,24 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
return true;
}
+
+bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
+ int timeout)
+{
+#ifdef CONFIG_PCI_QUIRKS
+ struct pci_dev *bridge = bus->self;
+
+ /*
+ * Certain IDT switches have an issue where they improperly trigger
+ * ACS Source Validation errors on completions for config reads.
+ */
+ if (bridge && bridge->vendor == PCI_VENDOR_ID_IDT &&
+ bridge->device == 0x80b5)
+ return pci_idt_bus_quirk(bus, devfn, l, timeout);
+#endif
+
+ return pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
+}
EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
/*
@@ -2205,6 +2276,25 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
return dev;
}
+static void pcie_report_downtraining(struct pci_dev *dev)
+{
+ if (!pci_is_pcie(dev))
+ return;
+
+ /* Look from the device up to avoid downstream ports with no devices */
+ if ((pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_LEG_END) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM))
+ return;
+
+ /* Multi-function PCIe devices share the same link/status */
+ if (PCI_FUNC(dev->devfn) != 0 || dev->is_virtfn)
+ return;
+
+ /* Print link status only if the device is constrained by the fabric */
+ __pcie_print_link_status(dev, false);
+}
+
static void pci_init_capabilities(struct pci_dev *dev)
{
/* Enhanced Allocation */
@@ -2240,6 +2330,8 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Advanced Error Reporting */
pci_aer_init(dev);
+ pcie_report_downtraining(dev);
+
if (pci_probe_reset_function(dev) == 0)
dev->reset_fn = 1;
}
@@ -2433,13 +2525,13 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
dev = pci_scan_single_device(bus, devfn);
if (!dev)
return 0;
- if (!dev->is_added)
+ if (!pci_dev_is_added(dev))
nr++;
for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
- if (!dev->is_added)
+ if (!pci_dev_is_added(dev))
nr++;
dev->multifunction = 1;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f439de848658..ef7143a274e0 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -25,8 +25,10 @@
#include <linux/sched.h>
#include <linux/ktime.h>
#include <linux/mm.h>
+#include <linux/nvme.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pm_runtime.h>
+#include <linux/switchtec.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
@@ -64,9 +66,15 @@ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
f->vendor == (u16) PCI_ANY_ID) &&
(f->device == dev->device ||
f->device == (u16) PCI_ANY_ID)) {
- calltime = fixup_debug_start(dev, f->hook);
- f->hook(dev);
- fixup_debug_report(dev, calltime, f->hook);
+ void (*hook)(struct pci_dev *dev);
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ hook = offset_to_ptr(&f->hook_offset);
+#else
+ hook = f->hook;
+#endif
+ calltime = fixup_debug_start(dev, hook);
+ hook(dev);
+ fixup_debug_report(dev, calltime, hook);
}
}
@@ -460,6 +468,7 @@ static void quirk_nfp6000(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, quirk_nfp6000);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
@@ -2105,6 +2114,7 @@ static void quirk_netmos(struct pci_dev *dev)
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return;
+ /* else: fall through */
case PCI_DEVICE_ID_NETMOS_9735:
case PCI_DEVICE_ID_NETMOS_9745:
case PCI_DEVICE_ID_NETMOS_9845:
@@ -2352,6 +2362,9 @@ static void quirk_paxc_bridge(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
#endif
/*
@@ -3664,6 +3677,108 @@ static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
+/*
+ * The Samsung SM961/PM961 controller can sometimes enter a fatal state after
+ * FLR where config space reads from the device return -1. We seem to be
+ * able to avoid this condition if we disable the NVMe controller prior to
+ * FLR. This quirk is generic for any NVMe class device requiring similar
+ * assistance to quiesce the device prior to FLR.
+ *
+ * NVMe specification: https://nvmexpress.org/resources/specifications/
+ * Revision 1.0e:
+ * Chapter 2: Required and optional PCI config registers
+ * Chapter 3: NVMe control registers
+ * Chapter 7.3: Reset behavior
+ */
+static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
+{
+ void __iomem *bar;
+ u16 cmd;
+ u32 cfg;
+
+ if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
+ !pcie_has_flr(dev) || !pci_resource_start(dev, 0))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ bar = pci_iomap(dev, 0, NVME_REG_CC + sizeof(cfg));
+ if (!bar)
+ return -ENOTTY;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY);
+
+ cfg = readl(bar + NVME_REG_CC);
+
+ /* Disable controller if enabled */
+ if (cfg & NVME_CC_ENABLE) {
+ u32 cap = readl(bar + NVME_REG_CAP);
+ unsigned long timeout;
+
+ /*
+ * Per nvme_disable_ctrl() skip shutdown notification as it
+ * could complete commands to the admin queue. We only intend
+ * to quiesce the device before reset.
+ */
+ cfg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
+
+ writel(cfg, bar + NVME_REG_CC);
+
+ /*
+ * Some controllers require an additional delay here, see
+ * NVME_QUIRK_DELAY_BEFORE_CHK_RDY. None of those are yet
+ * supported by this quirk.
+ */
+
+ /* Cap register provides max timeout in 500ms increments */
+ timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+
+ for (;;) {
+ u32 status = readl(bar + NVME_REG_CSTS);
+
+ /* Ready status becomes zero on disable complete */
+ if (!(status & NVME_CSTS_RDY))
+ break;
+
+ msleep(100);
+
+ if (time_after(jiffies, timeout)) {
+ pci_warn(dev, "Timeout waiting for NVMe ready status to clear after disable\n");
+ break;
+ }
+ }
+ }
+
+ pci_iounmap(dev, bar);
+
+ pcie_flr(dev);
+
+ return 0;
+}
+
+/*
+ * Intel DC P3700 NVMe controller will timeout waiting for ready status
+ * to change after NVMe enable if the driver starts interacting with the
+ * device too soon after FLR. A 250ms delay after FLR has heuristically
+ * proven to produce reliably working results for device assignment cases.
+ */
+static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
+{
+ if (!pcie_has_flr(dev))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pcie_flr(dev);
+
+ msleep(250);
+
+ return 0;
+}
+
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
reset_intel_82599_sfp_virtfn },
@@ -3671,6 +3786,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
reset_ivb_igd },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
reset_ivb_igd },
+ { PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
+ { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
{ 0 }
@@ -3740,6 +3857,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c78 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c134 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
quirk_dma_func1_alias);
@@ -4553,27 +4673,79 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
return 0;
}
-static const struct pci_dev_enable_acs {
+static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev)
+{
+ int pos;
+ u32 cap, ctrl;
+
+ if (!pci_quirk_intel_spt_pch_acs_match(dev))
+ return -ENOTTY;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return -ENOTTY;
+
+ pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
+ pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
+
+ ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
+
+ pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
+
+ pci_info(dev, "Intel SPT PCH root port workaround: disabled ACS redirect\n");
+
+ return 0;
+}
+
+static const struct pci_dev_acs_ops {
u16 vendor;
u16 device;
int (*enable_acs)(struct pci_dev *dev);
-} pci_dev_enable_acs[] = {
- { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
- { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_spt_pch_acs },
- { 0 }
+ int (*disable_acs_redir)(struct pci_dev *dev);
+} pci_dev_acs_ops[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ .enable_acs = pci_quirk_enable_intel_pch_acs,
+ },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ .enable_acs = pci_quirk_enable_intel_spt_pch_acs,
+ .disable_acs_redir = pci_quirk_disable_intel_spt_pch_acs_redir,
+ },
};
int pci_dev_specific_enable_acs(struct pci_dev *dev)
{
- const struct pci_dev_enable_acs *i;
- int ret;
+ const struct pci_dev_acs_ops *p;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
+ p = &pci_dev_acs_ops[i];
+ if ((p->vendor == dev->vendor ||
+ p->vendor == (u16)PCI_ANY_ID) &&
+ (p->device == dev->device ||
+ p->device == (u16)PCI_ANY_ID) &&
+ p->enable_acs) {
+ ret = p->enable_acs(dev);
+ if (ret >= 0)
+ return ret;
+ }
+ }
- for (i = pci_dev_enable_acs; i->enable_acs; i++) {
- if ((i->vendor == dev->vendor ||
- i->vendor == (u16)PCI_ANY_ID) &&
- (i->device == dev->device ||
- i->device == (u16)PCI_ANY_ID)) {
- ret = i->enable_acs(dev);
+ return -ENOTTY;
+}
+
+int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+{
+ const struct pci_dev_acs_ops *p;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
+ p = &pci_dev_acs_ops[i];
+ if ((p->vendor == dev->vendor ||
+ p->vendor == (u16)PCI_ANY_ID) &&
+ (p->device == dev->device ||
+ p->device == (u16)PCI_ANY_ID) &&
+ p->disable_acs_redir) {
+ ret = p->disable_acs_redir(dev);
if (ret >= 0)
return ret;
}
@@ -4753,3 +4925,197 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
+
+/*
+ * Some IDT switches incorrectly flag an ACS Source Validation error on
+ * completions for config read requests even though PCIe r4.0, sec
+ * 6.12.1.1, says that completions are never affected by ACS Source
+ * Validation. Here's the text of IDT 89H32H8G3-YC, erratum #36:
+ *
+ * Item #36 - Downstream port applies ACS Source Validation to Completions
+ * Section 6.12.1.1 of the PCI Express Base Specification 3.1 states that
+ * completions are never affected by ACS Source Validation. However,
+ * completions received by a downstream port of the PCIe switch from a
+ * device that has not yet captured a PCIe bus number are incorrectly
+ * dropped by ACS Source Validation by the switch downstream port.
+ *
+ * The workaround suggested by IDT is to issue a config write to the
+ * downstream device before issuing the first config read. This allows the
+ * downstream device to capture its bus and device numbers (see PCIe r4.0,
+ * sec 2.2.9), thus avoiding the ACS error on the completion.
+ *
+ * However, we don't know when the device is ready to accept the config
+ * write, so we do config reads until we receive a non-Config Request Retry
+ * Status, then do the config write.
+ *
+ * To avoid hitting the erratum when doing the config reads, we disable ACS
+ * SV around this process.
+ */
+int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout)
+{
+ int pos;
+ u16 ctrl = 0;
+ bool found;
+ struct pci_dev *bridge = bus->self;
+
+ pos = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ACS);
+
+ /* Disable ACS SV before initial config reads */
+ if (pos) {
+ pci_read_config_word(bridge, pos + PCI_ACS_CTRL, &ctrl);
+ if (ctrl & PCI_ACS_SV)
+ pci_write_config_word(bridge, pos + PCI_ACS_CTRL,
+ ctrl & ~PCI_ACS_SV);
+ }
+
+ found = pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
+
+ /* Write Vendor ID (read-only) so the endpoint latches its bus/dev */
+ if (found)
+ pci_bus_write_config_word(bus, devfn, PCI_VENDOR_ID, 0);
+
+ /* Re-enable ACS_SV if it was previously enabled */
+ if (ctrl & PCI_ACS_SV)
+ pci_write_config_word(bridge, pos + PCI_ACS_CTRL, ctrl);
+
+ return found;
+}
+
+/*
+ * Microsemi Switchtec NTB uses devfn proxy IDs to move TLPs between
+ * NT endpoints via the internal switch fabric. These IDs replace the
+ * originating requestor ID TLPs which access host memory on peer NTB
+ * ports. Therefore, all proxy IDs must be aliased to the NTB device
+ * to permit access when the IOMMU is turned on.
+ */
+static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
+{
+ void __iomem *mmio;
+ struct ntb_info_regs __iomem *mmio_ntb;
+ struct ntb_ctrl_regs __iomem *mmio_ctrl;
+ struct sys_info_regs __iomem *mmio_sys_info;
+ u64 partition_map;
+ u8 partition;
+ int pp;
+
+ if (pci_enable_device(pdev)) {
+ pci_err(pdev, "Cannot enable Switchtec device\n");
+ return;
+ }
+
+ mmio = pci_iomap(pdev, 0, 0);
+ if (mmio == NULL) {
+ pci_disable_device(pdev);
+ pci_err(pdev, "Cannot iomap Switchtec device\n");
+ return;
+ }
+
+ pci_info(pdev, "Setting Switchtec proxy ID aliases\n");
+
+ mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
+ mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
+ mmio_sys_info = mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
+
+ partition = ioread8(&mmio_ntb->partition_id);
+
+ partition_map = ioread32(&mmio_ntb->ep_map);
+ partition_map |= ((u64) ioread32(&mmio_ntb->ep_map + 4)) << 32;
+ partition_map &= ~(1ULL << partition);
+
+ for (pp = 0; pp < (sizeof(partition_map) * 8); pp++) {
+ struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
+ u32 table_sz = 0;
+ int te;
+
+ if (!(partition_map & (1ULL << pp)))
+ continue;
+
+ pci_dbg(pdev, "Processing partition %d\n", pp);
+
+ mmio_peer_ctrl = &mmio_ctrl[pp];
+
+ table_sz = ioread16(&mmio_peer_ctrl->req_id_table_size);
+ if (!table_sz) {
+ pci_warn(pdev, "Partition %d table_sz 0\n", pp);
+ continue;
+ }
+
+ if (table_sz > 512) {
+ pci_warn(pdev,
+ "Invalid Switchtec partition %d table_sz %d\n",
+ pp, table_sz);
+ continue;
+ }
+
+ for (te = 0; te < table_sz; te++) {
+ u32 rid_entry;
+ u8 devfn;
+
+ rid_entry = ioread32(&mmio_peer_ctrl->req_id_table[te]);
+ devfn = (rid_entry >> 1) & 0xFF;
+ pci_dbg(pdev,
+ "Aliasing Partition %d Proxy ID %02x.%d\n",
+ pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ pci_add_dma_alias(pdev, devfn);
+ }
+ }
+
+ pci_iounmap(pdev, mmio);
+ pci_disable_device(pdev);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8531,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8532,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8533,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8534,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8535,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8536,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8543,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8544,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8545,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8546,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8551,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8552,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8553,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8554,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8555,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8556,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8561,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8562,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8563,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8564,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8565,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8566,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8571,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8572,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8573,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8574,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8575,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8576,
+ quirk_switchtec_ntb_dma_alias);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 6f072eae4f7a..461e7fd2756f 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/module.h>
-#include <linux/pci-aspm.h>
#include "pci.h"
static void pci_free_resources(struct pci_dev *dev)
@@ -19,11 +18,12 @@ static void pci_stop_dev(struct pci_dev *dev)
{
pci_pme_active(dev, false);
- if (dev->is_added) {
+ if (pci_dev_is_added(dev)) {
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
- dev->is_added = 0;
+
+ pci_dev_assign_added(dev, false);
}
if (dev->bus->self)
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index a7b5c37a85ec..137bf0cee897 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -80,7 +80,8 @@ EXPORT_SYMBOL_GPL(pci_disable_rom);
* The PCI window size could be much larger than the
* actual image size.
*/
-size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
+static size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom,
+ size_t size)
{
void __iomem *image;
int last_image;
@@ -106,8 +107,14 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
length = readw(pds + 16);
image += length * 512;
/* Avoid iterating through memory outside the resource window */
- if (image > rom + size)
+ if (image >= rom + size)
break;
+ if (!last_image) {
+ if (readw(image) != 0xAA55) {
+ pci_info(pdev, "No more image in the PCI ROM\n");
+ break;
+ }
+ }
} while (length && !last_image);
/* never return a size larger than the PCI resource window */
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 47cd0c037433..9940cc70f38b 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -641,7 +641,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
- if (reg != MICROSEMI_VENDOR_ID)
+ if (reg != PCI_VENDOR_ID_MICROSEMI)
break;
reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
@@ -1203,7 +1203,7 @@ static void init_pff(struct switchtec_dev *stdev)
for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
- if (reg != MICROSEMI_VENDOR_ID)
+ if (reg != PCI_VENDOR_ID_MICROSEMI)
break;
}
@@ -1267,7 +1267,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
struct switchtec_dev *stdev;
int rc;
- if (pdev->class == MICROSEMI_NTB_CLASSCODE)
+ if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
request_module_nowait("ntb_hw_switchtec");
stdev = stdev_create(pdev);
@@ -1321,19 +1321,19 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
#define SWITCHTEC_PCI_DEVICE(device_id) \
{ \
- .vendor = MICROSEMI_VENDOR_ID, \
+ .vendor = PCI_VENDOR_ID_MICROSEMI, \
.device = device_id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
- .class = MICROSEMI_MGMT_CLASSCODE, \
+ .class = (PCI_CLASS_MEMORY_OTHER << 8), \
.class_mask = 0xFFFFFFFF, \
}, \
{ \
- .vendor = MICROSEMI_VENDOR_ID, \
+ .vendor = PCI_VENDOR_ID_MICROSEMI, \
.device = device_id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
- .class = MICROSEMI_NTB_CLASSCODE, \
+ .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
.class_mask = 0xFFFFFFFF, \
}
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 8617565ba561..4963c2e2bd4c 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -146,7 +146,7 @@ static int pci_vpd_wait(struct pci_dev *dev)
if (!vpd->busy)
return 0;
- while (time_before(jiffies, timeout)) {
+ do {
ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
&status);
if (ret < 0)
@@ -160,10 +160,13 @@ static int pci_vpd_wait(struct pci_dev *dev)
if (fatal_signal_pending(current))
return -EINTR;
+ if (time_after(jiffies, timeout))
+ break;
+
usleep_range(10, max_sleep);
if (max_sleep < 1024)
max_sleep *= 2;
- }
+ } while (true);
pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
return -ETIMEDOUT;
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 34aad895a239..18802096148e 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -712,42 +712,6 @@ int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev,
EXPORT_SYMBOL(pcmcia_request_irq);
-/**
- * pcmcia_request_exclusive_irq() - attempt to request an exclusive IRQ first
- * @p_dev: the associated PCMCIA device
- * @handler: IRQ handler to register
- *
- * pcmcia_request_exclusive_irq() is a wrapper around request_irq() which
- * attempts first to request an exclusive IRQ. If it fails, it also accepts
- * a shared IRQ, but prints out a warning. PCMCIA drivers should allow for
- * IRQ sharing and either use request_irq directly (then they need to call
- * free_irq() themselves, too), or the pcmcia_request_irq() function.
- */
-int __must_check
-__pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev,
- irq_handler_t handler)
-{
- int ret;
-
- if (!p_dev->irq)
- return -EINVAL;
-
- ret = request_irq(p_dev->irq, handler, 0, p_dev->devname, p_dev->priv);
- if (ret) {
- ret = pcmcia_request_irq(p_dev, handler);
- dev_warn(&p_dev->dev, "pcmcia: request for exclusive IRQ could not be fulfilled\n");
- dev_warn(&p_dev->dev, "pcmcia: the driver needs updating to supported shared IRQ lines\n");
- }
- if (ret)
- dev_info(&p_dev->dev, "request_irq() failed\n");
- else
- p_dev->_irq = 1;
-
- return ret;
-} /* pcmcia_request_exclusive_irq */
-EXPORT_SYMBOL(__pcmcia_request_exclusive_irq);
-
-
#ifdef CONFIG_PCMCIA_PROBE
/* mask of IRQs already reserved by other cards, we should avoid using them */
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 0d09d8e669cd..1bfeb160c5b1 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -53,6 +53,16 @@ enum {
CCI_IF_MAX,
};
+#define NUM_HW_CNTRS_CII_4XX 4
+#define NUM_HW_CNTRS_CII_5XX 8
+#define NUM_HW_CNTRS_MAX NUM_HW_CNTRS_CII_5XX
+
+#define FIXED_HW_CNTRS_CII_4XX 1
+#define FIXED_HW_CNTRS_CII_5XX 0
+#define FIXED_HW_CNTRS_MAX FIXED_HW_CNTRS_CII_4XX
+
+#define HW_CNTRS_MAX (NUM_HW_CNTRS_MAX + FIXED_HW_CNTRS_MAX)
+
struct event_range {
u32 min;
u32 max;
@@ -633,8 +643,7 @@ static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
{
int i;
struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
-
- DECLARE_BITMAP(mask, cci_pmu->num_cntrs);
+ DECLARE_BITMAP(mask, HW_CNTRS_MAX);
bitmap_zero(mask, cci_pmu->num_cntrs);
for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
@@ -940,7 +949,7 @@ static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
{
int i;
- DECLARE_BITMAP(saved_mask, cci_pmu->num_cntrs);
+ DECLARE_BITMAP(saved_mask, HW_CNTRS_MAX);
bitmap_zero(saved_mask, cci_pmu->num_cntrs);
pmu_save_counters(cci_pmu, saved_mask);
@@ -1245,7 +1254,7 @@ static int validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
- unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)];
+ unsigned long mask[BITS_TO_LONGS(HW_CNTRS_MAX)];
struct cci_pmu_hw_events fake_pmu = {
/*
* Initialise the fake PMU. We only need to populate the
@@ -1403,6 +1412,11 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
char *name = model->name;
u32 num_cntrs;
+ if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX))
+ return -EINVAL;
+ if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX))
+ return -EINVAL;
+
pmu_event_attr_group.attrs = model->event_attrs;
pmu_format_attr_group.attrs = model->format_attrs;
@@ -1455,8 +1469,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
#ifdef CONFIG_ARM_CCI400_PMU
[CCI400_R0] = {
.name = "CCI_400",
- .fixed_hw_cntrs = 1, /* Cycle counter */
- .num_hw_cntrs = 4,
+ .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
+ .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
.cntr_size = SZ_4K,
.format_attrs = cci400_pmu_format_attrs,
.event_attrs = cci400_r0_pmu_event_attrs,
@@ -1475,8 +1489,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
},
[CCI400_R1] = {
.name = "CCI_400_r1",
- .fixed_hw_cntrs = 1, /* Cycle counter */
- .num_hw_cntrs = 4,
+ .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
+ .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
.cntr_size = SZ_4K,
.format_attrs = cci400_pmu_format_attrs,
.event_attrs = cci400_r1_pmu_event_attrs,
@@ -1497,8 +1511,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
#ifdef CONFIG_ARM_CCI5xx_PMU
[CCI500_R0] = {
.name = "CCI_500",
- .fixed_hw_cntrs = 0,
- .num_hw_cntrs = 8,
+ .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
+ .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
.cntr_size = SZ_64K,
.format_attrs = cci5xx_pmu_format_attrs,
.event_attrs = cci5xx_pmu_event_attrs,
@@ -1521,8 +1535,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
},
[CCI550_R0] = {
.name = "CCI_550",
- .fixed_hw_cntrs = 0,
- .num_hw_cntrs = 8,
+ .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
+ .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
.cntr_size = SZ_64K,
.format_attrs = cci5xx_pmu_format_attrs,
.event_attrs = cci5xx_pmu_event_attrs,
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index b416ee18e6bb..7dd850e02f19 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -1485,17 +1486,9 @@ static int arm_ccn_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ccn);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
- if (!devm_request_mem_region(ccn->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
-
- ccn->base = devm_ioremap(ccn->dev, res->start,
- resource_size(res));
- if (!ccn->base)
- return -EFAULT;
+ ccn->base = devm_ioremap_resource(ccn->dev, res);
+ if (IS_ERR(ccn->base))
+ return PTR_ERR(ccn->base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res)
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index a6347d487635..7f01f6f60b87 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -28,6 +28,14 @@
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
static DEFINE_PER_CPU(int, cpu_irq);
+static inline u64 arm_pmu_event_max_period(struct perf_event *event)
+{
+ if (event->hw.flags & ARMPMU_EVT_64BIT)
+ return GENMASK_ULL(63, 0);
+ else
+ return GENMASK_ULL(31, 0);
+}
+
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
@@ -114,8 +122,10 @@ int armpmu_event_set_period(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
+ u64 max_period;
int ret = 0;
+ max_period = arm_pmu_event_max_period(event);
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
@@ -136,12 +146,12 @@ int armpmu_event_set_period(struct perf_event *event)
* effect we are reducing max_period to account for
* interrupt latency (and we are being very conservative).
*/
- if (left > (armpmu->max_period >> 1))
- left = armpmu->max_period >> 1;
+ if (left > (max_period >> 1))
+ left = (max_period >> 1);
local64_set(&hwc->prev_count, (u64)-left);
- armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
+ armpmu->write_counter(event, (u64)(-left) & max_period);
perf_event_update_userpage(event);
@@ -153,6 +163,7 @@ u64 armpmu_event_update(struct perf_event *event)
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
+ u64 max_period = arm_pmu_event_max_period(event);
again:
prev_raw_count = local64_read(&hwc->prev_count);
@@ -162,7 +173,7 @@ again:
new_raw_count) != prev_raw_count)
goto again;
- delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
+ delta = (new_raw_count - prev_raw_count) & max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
@@ -227,11 +238,10 @@ armpmu_del(struct perf_event *event, int flags)
armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
- clear_bit(idx, hw_events->used_mask);
- if (armpmu->clear_event_idx)
- armpmu->clear_event_idx(hw_events, event);
-
+ armpmu->clear_event_idx(hw_events, event);
perf_event_update_userpage(event);
+ /* Clear the allocated counter */
+ hwc->idx = -1;
}
static int
@@ -360,6 +370,7 @@ __hw_perf_event_init(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int mapping;
+ hwc->flags = 0;
mapping = armpmu->map_event(event);
if (mapping < 0) {
@@ -402,7 +413,7 @@ __hw_perf_event_init(struct perf_event *event)
* is far less likely to overtake the previous one unless
* you have some serious IRQ latency issues.
*/
- hwc->sample_period = armpmu->max_period >> 1;
+ hwc->sample_period = arm_pmu_event_max_period(event) >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
@@ -654,14 +665,9 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
int idx;
for (idx = 0; idx < armpmu->num_events; idx++) {
- /*
- * If the counter is not used skip it, there is no
- * need of stopping/restarting it.
- */
- if (!test_bit(idx, hw_events->used_mask))
- continue;
-
event = hw_events->events[idx];
+ if (!event)
+ continue;
switch (cmd) {
case CPU_PM_ENTER:
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 971ff336494a..96075cecb0ae 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -160,7 +160,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
static int armpmu_request_irqs(struct arm_pmu *armpmu)
{
struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
- int cpu, err;
+ int cpu, err = 0;
for_each_cpu(cpu, &armpmu->supported_cpus) {
int irq = per_cpu(hw_events->irq, cpu);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 44df61397a38..9efd2413240c 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -350,19 +350,21 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
/*
* Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
- * If multi-threading is supported, SCCL_ID is in MPIDR[aff3] and CCL_ID
- * is in MPIDR[aff2]; if not, SCCL_ID is in MPIDR[aff2] and CCL_ID is
- * in MPIDR[aff1]. If this changes in future, this shall be updated.
+ * If multi-threading is supported, CCL_ID is the low 3-bits in MPIDR[Aff2]
+ * and SCCL_ID is the upper 5-bits of Aff2 field; if not, SCCL_ID
+ * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
*/
static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
{
u64 mpidr = read_cpuid_mpidr();
if (mpidr & MPIDR_MT_BITMASK) {
+ int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+
if (sccl_id)
- *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+ *sccl_id = aff2 >> 3;
if (ccl_id)
- *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ *ccl_id = aff2 & 0x7;
} else {
if (sccl_id)
*sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 6bdb1dad805f..0e31f1392a53 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1463,7 +1463,7 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
case PMU_TYPE_IOB:
return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
case PMU_TYPE_IOB_SLOW:
- return devm_kasprintf(dev, GFP_KERNEL, "iob-slow%d", id);
+ return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
case PMU_TYPE_MCB:
return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
case PMU_TYPE_MC:
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 97d27b0d5cc7..8786a9674471 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -80,3 +80,13 @@ config PHY_BRCM_USB
This driver is required by the USB XHCI, EHCI and OHCI
drivers.
If unsure, say N.
+
+config PHY_BCM_SR_PCIE
+ tristate "Broadcom Stingray PCIe PHY driver"
+ depends on OF && (ARCH_BCM_IPROC || COMPILE_TEST)
+ select GENERIC_PHY
+ select MFD_SYSCON
+ default ARCH_BCM_IPROC
+ help
+ Enable this to support the Broadcom Stingray PCIe PHY
+ If unsure, say N.
diff --git a/drivers/phy/broadcom/Makefile b/drivers/phy/broadcom/Makefile
index 13e000c1a43a..0f60184e6662 100644
--- a/drivers/phy/broadcom/Makefile
+++ b/drivers/phy/broadcom/Makefile
@@ -9,3 +9,5 @@ obj-$(CONFIG_PHY_BRCM_SATA) += phy-brcm-sata.o
obj-$(CONFIG_PHY_BRCM_USB) += phy-brcm-usb-dvr.o
phy-brcm-usb-dvr-objs := phy-brcm-usb.o phy-brcm-usb-init.o
+
+obj-$(CONFIG_PHY_BCM_SR_PCIE) += phy-bcm-sr-pcie.o
diff --git a/drivers/phy/broadcom/phy-bcm-sr-pcie.c b/drivers/phy/broadcom/phy-bcm-sr-pcie.c
new file mode 100644
index 000000000000..c10e95f86de5
--- /dev/null
+++ b/drivers/phy/broadcom/phy-bcm-sr-pcie.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2018 Broadcom
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* we have up to 8 PAXB based RC. The 9th one is always PAXC */
+#define SR_NR_PCIE_PHYS 9
+#define SR_PAXC_PHY_IDX (SR_NR_PCIE_PHYS - 1)
+
+#define PCIE_PIPEMUX_CFG_OFFSET 0x10c
+#define PCIE_PIPEMUX_SELECT_STRAP 0xf
+
+#define CDRU_STRAP_DATA_LSW_OFFSET 0x5c
+#define PCIE_PIPEMUX_SHIFT 19
+#define PCIE_PIPEMUX_MASK 0xf
+
+#define MHB_MEM_PW_PAXC_OFFSET 0x1c0
+#define MHB_PWR_ARR_POWERON 0x8
+#define MHB_PWR_ARR_POWEROK 0x4
+#define MHB_PWR_POWERON 0x2
+#define MHB_PWR_POWEROK 0x1
+#define MHB_PWR_STATUS_MASK (MHB_PWR_ARR_POWERON | \
+ MHB_PWR_ARR_POWEROK | \
+ MHB_PWR_POWERON | \
+ MHB_PWR_POWEROK)
+
+struct sr_pcie_phy_core;
+
+/**
+ * struct sr_pcie_phy - Stingray PCIe PHY
+ *
+ * @core: pointer to the Stingray PCIe PHY core control
+ * @index: PHY index
+ * @phy: pointer to the kernel PHY device
+ */
+struct sr_pcie_phy {
+ struct sr_pcie_phy_core *core;
+ unsigned int index;
+ struct phy *phy;
+};
+
+/**
+ * struct sr_pcie_phy_core - Stingray PCIe PHY core control
+ *
+ * @dev: pointer to device
+ * @base: base register of PCIe SS
+ * @cdru: regmap to the CDRU device
+ * @mhb: regmap to the MHB device
+ * @pipemux: pipemuex strap
+ * @phys: array of PCIe PHYs
+ */
+struct sr_pcie_phy_core {
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *cdru;
+ struct regmap *mhb;
+ u32 pipemux;
+ struct sr_pcie_phy phys[SR_NR_PCIE_PHYS];
+};
+
+/*
+ * PCIe PIPEMUX lookup table
+ *
+ * Each array index represents a PIPEMUX strap setting
+ * The array element represents a bitmap where a set bit means the PCIe
+ * core and associated serdes has been enabled as RC and is available for use
+ */
+static const u8 pipemux_table[] = {
+ /* PIPEMUX = 0, EP 1x16 */
+ 0x00,
+ /* PIPEMUX = 1, EP 2x8 */
+ 0x00,
+ /* PIPEMUX = 2, EP 4x4 */
+ 0x00,
+ /* PIPEMUX = 3, RC 2x8, cores 0, 7 */
+ 0x81,
+ /* PIPEMUX = 4, RC 4x4, cores 0, 1, 6, 7 */
+ 0xc3,
+ /* PIPEMUX = 5, RC 8x2, all 8 cores */
+ 0xff,
+ /* PIPEMUX = 6, RC 3x4 + 2x2, cores 0, 2, 3, 6, 7 */
+ 0xcd,
+ /* PIPEMUX = 7, RC 1x4 + 6x2, cores 0, 2, 3, 4, 5, 6, 7 */
+ 0xfd,
+ /* PIPEMUX = 8, EP 1x8 + RC 4x2, cores 4, 5, 6, 7 */
+ 0xf0,
+ /* PIPEMUX = 9, EP 1x8 + RC 2x4, cores 6, 7 */
+ 0xc0,
+ /* PIPEMUX = 10, EP 2x4 + RC 2x4, cores 1, 6 */
+ 0x42,
+ /* PIPEMUX = 11, EP 2x4 + RC 4x2, cores 2, 3, 4, 5 */
+ 0x3c,
+ /* PIPEMUX = 12, EP 1x4 + RC 6x2, cores 2, 3, 4, 5, 6, 7 */
+ 0xfc,
+ /* PIPEMUX = 13, RC 2x4 + RC 1x4 + 2x2, cores 2, 3, 6 */
+ 0x4c,
+};
+
+/*
+ * Return true if the strap setting is valid
+ */
+static bool pipemux_strap_is_valid(u32 pipemux)
+{
+ return !!(pipemux < ARRAY_SIZE(pipemux_table));
+}
+
+/*
+ * Read the PCIe PIPEMUX from strap
+ */
+static u32 pipemux_strap_read(struct sr_pcie_phy_core *core)
+{
+ u32 pipemux;
+
+ /*
+ * Read PIPEMUX configuration register to determine the pipemux setting
+ *
+ * In the case when the value indicates using HW strap, fall back to
+ * use HW strap
+ */
+ pipemux = readl(core->base + PCIE_PIPEMUX_CFG_OFFSET);
+ pipemux &= PCIE_PIPEMUX_MASK;
+ if (pipemux == PCIE_PIPEMUX_SELECT_STRAP) {
+ regmap_read(core->cdru, CDRU_STRAP_DATA_LSW_OFFSET, &pipemux);
+ pipemux >>= PCIE_PIPEMUX_SHIFT;
+ pipemux &= PCIE_PIPEMUX_MASK;
+ }
+
+ return pipemux;
+}
+
+/*
+ * Given a PIPEMUX strap and PCIe core index, this function returns true if the
+ * PCIe core needs to be enabled
+ */
+static bool pcie_core_is_for_rc(struct sr_pcie_phy *phy)
+{
+ struct sr_pcie_phy_core *core = phy->core;
+ unsigned int core_idx = phy->index;
+
+ return !!((pipemux_table[core->pipemux] >> core_idx) & 0x1);
+}
+
+static int sr_pcie_phy_init(struct phy *p)
+{
+ struct sr_pcie_phy *phy = phy_get_drvdata(p);
+
+ /*
+ * Check whether this PHY is for root complex or not. If yes, return
+ * zero so the host driver can proceed to enumeration. If not, return
+ * an error and that will force the host driver to bail out
+ */
+ if (pcie_core_is_for_rc(phy))
+ return 0;
+
+ return -ENODEV;
+}
+
+static int sr_paxc_phy_init(struct phy *p)
+{
+ struct sr_pcie_phy *phy = phy_get_drvdata(p);
+ struct sr_pcie_phy_core *core = phy->core;
+ unsigned int core_idx = phy->index;
+ u32 val;
+
+ if (core_idx != SR_PAXC_PHY_IDX)
+ return -EINVAL;
+
+ regmap_read(core->mhb, MHB_MEM_PW_PAXC_OFFSET, &val);
+ if ((val & MHB_PWR_STATUS_MASK) != MHB_PWR_STATUS_MASK) {
+ dev_err(core->dev, "PAXC is not powered up\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct phy_ops sr_pcie_phy_ops = {
+ .init = sr_pcie_phy_init,
+ .owner = THIS_MODULE,
+};
+
+static const struct phy_ops sr_paxc_phy_ops = {
+ .init = sr_paxc_phy_init,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *sr_pcie_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct sr_pcie_phy_core *core;
+ int phy_idx;
+
+ core = dev_get_drvdata(dev);
+ if (!core)
+ return ERR_PTR(-EINVAL);
+
+ phy_idx = args->args[0];
+
+ if (WARN_ON(phy_idx >= SR_NR_PCIE_PHYS))
+ return ERR_PTR(-ENODEV);
+
+ return core->phys[phy_idx].phy;
+}
+
+static int sr_pcie_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct sr_pcie_phy_core *core;
+ struct resource *res;
+ struct phy_provider *provider;
+ unsigned int phy_idx = 0;
+
+ core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+
+ core->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ core->base = devm_ioremap_resource(core->dev, res);
+ if (IS_ERR(core->base))
+ return PTR_ERR(core->base);
+
+ core->cdru = syscon_regmap_lookup_by_phandle(node, "brcm,sr-cdru");
+ if (IS_ERR(core->cdru)) {
+ dev_err(core->dev, "unable to find CDRU device\n");
+ return PTR_ERR(core->cdru);
+ }
+
+ core->mhb = syscon_regmap_lookup_by_phandle(node, "brcm,sr-mhb");
+ if (IS_ERR(core->mhb)) {
+ dev_err(core->dev, "unable to find MHB device\n");
+ return PTR_ERR(core->mhb);
+ }
+
+ /* read the PCIe PIPEMUX strap setting */
+ core->pipemux = pipemux_strap_read(core);
+ if (!pipemux_strap_is_valid(core->pipemux)) {
+ dev_err(core->dev, "invalid PCIe PIPEMUX strap %u\n",
+ core->pipemux);
+ return -EIO;
+ }
+
+ for (phy_idx = 0; phy_idx < SR_NR_PCIE_PHYS; phy_idx++) {
+ struct sr_pcie_phy *p = &core->phys[phy_idx];
+ const struct phy_ops *ops;
+
+ if (phy_idx == SR_PAXC_PHY_IDX)
+ ops = &sr_paxc_phy_ops;
+ else
+ ops = &sr_pcie_phy_ops;
+
+ p->phy = devm_phy_create(dev, NULL, ops);
+ if (IS_ERR(p->phy)) {
+ dev_err(dev, "failed to create PCIe PHY\n");
+ return PTR_ERR(p->phy);
+ }
+
+ p->core = core;
+ p->index = phy_idx;
+ phy_set_drvdata(p->phy, p);
+ }
+
+ dev_set_drvdata(dev, core);
+
+ provider = devm_of_phy_provider_register(dev, sr_pcie_phy_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(dev, "failed to register PHY provider\n");
+ return PTR_ERR(provider);
+ }
+
+ dev_info(dev, "Stingray PCIe PHY driver initialized\n");
+
+ return 0;
+}
+
+static const struct of_device_id sr_pcie_phy_match_table[] = {
+ { .compatible = "brcm,sr-pcie-phy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sr_pcie_phy_match_table);
+
+static struct platform_driver sr_pcie_phy_driver = {
+ .driver = {
+ .name = "sr-pcie-phy",
+ .of_match_table = sr_pcie_phy_match_table,
+ },
+ .probe = sr_pcie_phy_probe,
+};
+module_platform_driver(sr_pcie_phy_driver);
+
+MODULE_AUTHOR("Ray Jui <ray.jui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom Stingray PCIe PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 1b7febc43da9..29d2c3b1913a 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -962,6 +962,10 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
{
void __iomem *ctrl = params->ctrl_regs;
+ USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
+ /* 1 millisecond - for USB clocks to settle down */
+ usleep_range(1000, 2000);
+
if (BRCM_ID(params->family_id) == 0x7366) {
/*
* The PHY3_SOFT_RESETB bits default to the wrong state.
diff --git a/drivers/phy/marvell/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c
index 2c7a57f2d595..c1bb6725e48f 100644
--- a/drivers/phy/marvell/phy-berlin-sata.c
+++ b/drivers/phy/marvell/phy-berlin-sata.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Berlin SATA PHY driver
*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Ténart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/phy/marvell/phy-berlin-usb.c b/drivers/phy/marvell/phy-berlin-usb.c
index 8f2b5cae360f..a43df63007c5 100644
--- a/drivers/phy/marvell/phy-berlin-usb.c
+++ b/drivers/phy/marvell/phy-berlin-usb.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
* Jisheng Zhang <jszhang@marvell.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/io.h>
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index 4ef429250d7b..86a5f7b9448b 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/io.h>
diff --git a/drivers/phy/mediatek/Makefile b/drivers/phy/mediatek/Makefile
index e5074b607d3d..ee49edc97ee9 100644
--- a/drivers/phy/mediatek/Makefile
+++ b/drivers/phy/mediatek/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the phy drivers.
#
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 38c281b5abbb..3eb8e1bd7b78 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015 MediaTek Inc.
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
*
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <dt-bindings/phy/phy.h>
@@ -50,6 +42,12 @@
#define PA0_RG_U2PLL_FORCE_ON BIT(15)
#define PA0_RG_USB20_INTR_EN BIT(5)
+#define U3P_USBPHYACR1 0x004
+#define PA1_RG_VRT_SEL GENMASK(14, 12)
+#define PA1_RG_VRT_SEL_VAL(x) ((0x7 & (x)) << 12)
+#define PA1_RG_TERM_SEL GENMASK(10, 8)
+#define PA1_RG_TERM_SEL_VAL(x) ((0x7 & (x)) << 8)
+
#define U3P_USBPHYACR2 0x008
#define PA2_RG_SIF_U2PLL_FORCE_EN BIT(18)
@@ -103,6 +101,9 @@
#define P2C_RG_AVALID BIT(2)
#define P2C_RG_IDDIG BIT(1)
+#define U3P_U2PHYBC12C 0x080
+#define P2C_RG_CHGDT_EN BIT(0)
+
#define U3P_U3_CHIP_GPIO_CTLD 0x0c
#define P3C_REG_IP_SW_RST BIT(31)
#define P3C_MCU_BUS_CK_GATE_EN BIT(30)
@@ -296,6 +297,10 @@ struct mtk_phy_instance {
struct clk *ref_clk; /* reference clock of anolog phy */
u32 index;
u8 type;
+ int eye_src;
+ int eye_vrt;
+ int eye_term;
+ bool bc12_en;
};
struct mtk_tphy {
@@ -320,6 +325,10 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
int fm_out;
u32 tmp;
+ /* use force value */
+ if (instance->eye_src)
+ return;
+
/* enable USB ring oscillator */
tmp = readl(com + U3P_USBPHYACR5);
tmp |= PA5_RG_U2_HSTX_SRCAL_EN;
@@ -826,6 +835,61 @@ static void phy_v2_banks_init(struct mtk_tphy *tphy,
}
}
+static void phy_parse_property(struct mtk_tphy *tphy,
+ struct mtk_phy_instance *instance)
+{
+ struct device *dev = &instance->phy->dev;
+
+ if (instance->type != PHY_TYPE_USB2)
+ return;
+
+ instance->bc12_en = device_property_read_bool(dev, "mediatek,bc12");
+ device_property_read_u32(dev, "mediatek,eye-src",
+ &instance->eye_src);
+ device_property_read_u32(dev, "mediatek,eye-vrt",
+ &instance->eye_vrt);
+ device_property_read_u32(dev, "mediatek,eye-term",
+ &instance->eye_term);
+ dev_dbg(dev, "bc12:%d, src:%d, vrt:%d, term:%d\n",
+ instance->bc12_en, instance->eye_src,
+ instance->eye_vrt, instance->eye_term);
+}
+
+static void u2_phy_props_set(struct mtk_tphy *tphy,
+ struct mtk_phy_instance *instance)
+{
+ struct u2phy_banks *u2_banks = &instance->u2_banks;
+ void __iomem *com = u2_banks->com;
+ u32 tmp;
+
+ if (instance->bc12_en) {
+ tmp = readl(com + U3P_U2PHYBC12C);
+ tmp |= P2C_RG_CHGDT_EN; /* BC1.2 path Enable */
+ writel(tmp, com + U3P_U2PHYBC12C);
+ }
+
+ if (instance->eye_src) {
+ tmp = readl(com + U3P_USBPHYACR5);
+ tmp &= ~PA5_RG_U2_HSTX_SRCTRL;
+ tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(instance->eye_src);
+ writel(tmp, com + U3P_USBPHYACR5);
+ }
+
+ if (instance->eye_vrt) {
+ tmp = readl(com + U3P_USBPHYACR1);
+ tmp &= ~PA1_RG_VRT_SEL;
+ tmp |= PA1_RG_VRT_SEL_VAL(instance->eye_vrt);
+ writel(tmp, com + U3P_USBPHYACR1);
+ }
+
+ if (instance->eye_term) {
+ tmp = readl(com + U3P_USBPHYACR1);
+ tmp &= ~PA1_RG_TERM_SEL;
+ tmp |= PA1_RG_TERM_SEL_VAL(instance->eye_term);
+ writel(tmp, com + U3P_USBPHYACR1);
+ }
+}
+
static int mtk_phy_init(struct phy *phy)
{
struct mtk_phy_instance *instance = phy_get_drvdata(phy);
@@ -847,6 +911,7 @@ static int mtk_phy_init(struct phy *phy)
switch (instance->type) {
case PHY_TYPE_USB2:
u2_phy_instance_init(tphy, instance);
+ u2_phy_props_set(tphy, instance);
break;
case PHY_TYPE_USB3:
u3_phy_instance_init(tphy, instance);
@@ -959,6 +1024,8 @@ static struct phy *mtk_phy_xlate(struct device *dev,
return ERR_PTR(-EINVAL);
}
+ phy_parse_property(tphy, instance);
+
return instance->phy;
}
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 23705e1a0023..0075fb0bef8c 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -182,13 +182,13 @@ static void phy_mdm6600_status(struct work_struct *work)
ddata = container_of(work, struct phy_mdm6600, status_work.work);
dev = ddata->dev;
- error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES,
+ error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
ddata->status_gpios->desc,
values);
if (error)
return;
- for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) {
+ for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
val |= values[i] << i;
dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
__func__, i, values[i], val);
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
index 2d0c70b5589f..abbbe75070da 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
@@ -55,6 +55,7 @@ static int qcom_usb_hs_phy_set_mode(struct phy *phy, enum phy_mode mode)
case PHY_MODE_USB_OTG:
case PHY_MODE_USB_HOST:
val |= ULPI_INT_IDGRD;
+ /* fall through */
case PHY_MODE_USB_DEVICE:
val |= ULPI_INT_SESS_VALID;
default:
diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig
index c845facacb06..4bd390c79d21 100644
--- a/drivers/phy/renesas/Kconfig
+++ b/drivers/phy/renesas/Kconfig
@@ -8,6 +8,13 @@ config PHY_RCAR_GEN2
help
Support for USB PHY found on Renesas R-Car generation 2 SoCs.
+config PHY_RCAR_GEN3_PCIE
+ tristate "Renesas R-Car generation 3 PCIe PHY driver"
+ depends on ARCH_RENESAS
+ select GENERIC_PHY
+ help
+ Support for the PCIe PHY found on Renesas R-Car generation 3 SoCs.
+
config PHY_RCAR_GEN3_USB2
tristate "Renesas R-Car generation 3 USB 2.0 PHY driver"
depends on ARCH_RENESAS
diff --git a/drivers/phy/renesas/Makefile b/drivers/phy/renesas/Makefile
index 8b6025916a93..4b76fc439ed6 100644
--- a/drivers/phy/renesas/Makefile
+++ b/drivers/phy/renesas/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_PHY_RCAR_GEN2) += phy-rcar-gen2.o
+obj-$(CONFIG_PHY_RCAR_GEN3_PCIE) += phy-rcar-gen3-pcie.o
obj-$(CONFIG_PHY_RCAR_GEN3_USB2) += phy-rcar-gen3-usb2.o
obj-$(CONFIG_PHY_RCAR_GEN3_USB3) += phy-rcar-gen3-usb3.o
diff --git a/drivers/phy/renesas/phy-rcar-gen3-pcie.c b/drivers/phy/renesas/phy-rcar-gen3-pcie.c
new file mode 100644
index 000000000000..c4e4aa216936
--- /dev/null
+++ b/drivers/phy/renesas/phy-rcar-gen3-pcie.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car Gen3 PCIe PHY driver
+ *
+ * Copyright (C) 2018 Cogent Embedded, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define PHY_CTRL 0x4000 /* R8A77980 only */
+
+/* PHY control register (PHY_CTRL) */
+#define PHY_CTRL_PHY_PWDN BIT(2)
+
+struct rcar_gen3_phy {
+ struct phy *phy;
+ spinlock_t lock;
+ void __iomem *base;
+};
+
+static void rcar_gen3_phy_pcie_modify_reg(struct phy *p, unsigned int reg,
+ u32 clear, u32 set)
+{
+ struct rcar_gen3_phy *phy = phy_get_drvdata(p);
+ void __iomem *base = phy->base;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&phy->lock, flags);
+
+ value = readl(base + reg);
+ value &= ~clear;
+ value |= set;
+ writel(value, base + reg);
+
+ spin_unlock_irqrestore(&phy->lock, flags);
+}
+
+static int r8a77980_phy_pcie_power_on(struct phy *p)
+{
+ /* Power on the PCIe PHY */
+ rcar_gen3_phy_pcie_modify_reg(p, PHY_CTRL, PHY_CTRL_PHY_PWDN, 0);
+
+ return 0;
+}
+
+static int r8a77980_phy_pcie_power_off(struct phy *p)
+{
+ /* Power off the PCIe PHY */
+ rcar_gen3_phy_pcie_modify_reg(p, PHY_CTRL, 0, PHY_CTRL_PHY_PWDN);
+
+ return 0;
+}
+
+static const struct phy_ops r8a77980_phy_pcie_ops = {
+ .power_on = r8a77980_phy_pcie_power_on,
+ .power_off = r8a77980_phy_pcie_power_off,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id rcar_gen3_phy_pcie_match_table[] = {
+ { .compatible = "renesas,r8a77980-pcie-phy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rcar_gen3_phy_pcie_match_table);
+
+static int rcar_gen3_phy_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *provider;
+ struct rcar_gen3_phy *phy;
+ struct resource *res;
+ void __iomem *base;
+ int error;
+
+ if (!dev->of_node) {
+ dev_err(dev,
+ "This driver must only be instantiated from the device tree\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ spin_lock_init(&phy->lock);
+
+ phy->base = base;
+
+ /*
+ * devm_phy_create() will call pm_runtime_enable(&phy->dev);
+ * And then, phy-core will manage runtime PM for this device.
+ */
+ pm_runtime_enable(dev);
+
+ phy->phy = devm_phy_create(dev, NULL, &r8a77980_phy_pcie_ops);
+ if (IS_ERR(phy->phy)) {
+ dev_err(dev, "Failed to create PCIe PHY\n");
+ error = PTR_ERR(phy->phy);
+ goto error;
+ }
+ phy_set_drvdata(phy->phy, phy);
+
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(dev, "Failed to register PHY provider\n");
+ error = PTR_ERR(provider);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ pm_runtime_disable(dev);
+
+ return error;
+}
+
+static int rcar_gen3_phy_pcie_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+};
+
+static struct platform_driver rcar_gen3_phy_driver = {
+ .driver = {
+ .name = "phy_rcar_gen3_pcie",
+ .of_match_table = rcar_gen3_phy_pcie_match_table,
+ },
+ .probe = rcar_gen3_phy_pcie_probe,
+ .remove = rcar_gen3_phy_pcie_remove,
+};
+
+module_platform_driver(rcar_gen3_phy_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 PCIe PHY");
+MODULE_AUTHOR("Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index dd50371225bc..e86752be1f19 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -161,10 +161,10 @@ config PINCTRL_MCP23S08
select REGMAP_SPI if SPI_MASTER
select GENERIC_PINCONF
help
- SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
- I/O expanders.
- This provides a GPIO interface supporting inputs and outputs.
- The I2C versions of the chips can be used as interrupt-controller.
+ SPI/I2C driver for Microchip MCP23S08 / MCP23S17 / MCP23S18 /
+ MCP23008 / MCP23017 / MCP23018 I/O expanders.
+ This provides a GPIO interface supporting inputs and outputs and a
+ corresponding interrupt-controller.
config PINCTRL_OXNAS
bool
@@ -332,6 +332,7 @@ config PINCTRL_OCELOT
depends on OF
depends on MSCC_OCELOT || COMPILE_TEST
select GPIOLIB
+ select GPIOLIB_IRQCHIP
select GENERIC_PINCONF
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
@@ -360,6 +361,7 @@ source "drivers/pinctrl/vt8500/Kconfig"
source "drivers/pinctrl/mediatek/Kconfig"
source "drivers/pinctrl/zte/Kconfig"
source "drivers/pinctrl/meson/Kconfig"
+source "drivers/pinctrl/cirrus/Kconfig"
config PINCTRL_XWAY
bool
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index de40863e7297..46ef9bd52096 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -64,3 +64,4 @@ obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_VT8500) += vt8500/
obj-y += mediatek/
obj-$(CONFIG_PINCTRL_ZX) += zte/
+obj-y += cirrus/
diff --git a/drivers/pinctrl/actions/Kconfig b/drivers/pinctrl/actions/Kconfig
index 490927b4ea76..2397cb0f6011 100644
--- a/drivers/pinctrl/actions/Kconfig
+++ b/drivers/pinctrl/actions/Kconfig
@@ -5,6 +5,7 @@ config PINCTRL_OWL
select PINCONF
select GENERIC_PINCONF
select GPIOLIB
+ select GPIOLIB_IRQCHIP
help
Say Y here to enable Actions Semi OWL pinctrl driver
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index b5c880b50bb3..9d18c02f192b 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -45,6 +46,9 @@ struct owl_pinctrl {
struct clk *clk;
const struct owl_pinctrl_soc_data *soc;
void __iomem *base;
+ struct irq_chip irq_chip;
+ unsigned int num_irq;
+ unsigned int *irq;
};
static void owl_update_bits(void __iomem *base, u32 mask, u32 val)
@@ -701,10 +705,213 @@ static int owl_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+static void irq_set_type(struct owl_pinctrl *pctrl, int gpio, unsigned int type)
+{
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+ unsigned int offset, value, irq_type = 0;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ /*
+ * Since the hardware doesn't support interrupts on both edges,
+ * emulate it in the software by setting the single edge
+ * interrupt and switching to the opposite edge while ACKing
+ * the interrupt
+ */
+ if (owl_gpio_get(&pctrl->chip, gpio))
+ irq_type = OWL_GPIO_INT_EDGE_FALLING;
+ else
+ irq_type = OWL_GPIO_INT_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ irq_type = OWL_GPIO_INT_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_type = OWL_GPIO_INT_EDGE_FALLING;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_type = OWL_GPIO_INT_LEVEL_HIGH;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_type = OWL_GPIO_INT_LEVEL_LOW;
+ break;
+
+ default:
+ break;
+ }
+
+ port = owl_gpio_get_port(pctrl, &gpio);
+ if (WARN_ON(port == NULL))
+ return;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ offset = (gpio < 16) ? 4 : 0;
+ value = readl_relaxed(gpio_base + port->intc_type + offset);
+ value &= ~(OWL_GPIO_INT_MASK << ((gpio % 16) * 2));
+ value |= irq_type << ((gpio % 16) * 2);
+ writel_relaxed(value, gpio_base + port->intc_type + offset);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static void owl_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct owl_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+ unsigned int gpio = data->hwirq;
+ u32 val;
+
+ port = owl_gpio_get_port(pctrl, &gpio);
+ if (WARN_ON(port == NULL))
+ return;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ owl_gpio_update_reg(gpio_base + port->intc_msk, gpio, false);
+
+ /* disable port interrupt if no interrupt pending bit is active */
+ val = readl_relaxed(gpio_base + port->intc_msk);
+ if (val == 0)
+ owl_gpio_update_reg(gpio_base + port->intc_ctl,
+ OWL_GPIO_CTLR_ENABLE, false);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static void owl_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct owl_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+ unsigned int gpio = data->hwirq;
+ u32 value;
+
+ port = owl_gpio_get_port(pctrl, &gpio);
+ if (WARN_ON(port == NULL))
+ return;
+
+ gpio_base = pctrl->base + port->offset;
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ /* enable port interrupt */
+ value = readl_relaxed(gpio_base + port->intc_ctl);
+ value |= BIT(OWL_GPIO_CTLR_ENABLE) | BIT(OWL_GPIO_CTLR_SAMPLE_CLK_24M);
+ writel_relaxed(value, gpio_base + port->intc_ctl);
+
+ /* enable GPIO interrupt */
+ owl_gpio_update_reg(gpio_base + port->intc_msk, gpio, true);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static void owl_gpio_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct owl_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+ unsigned int gpio = data->hwirq;
+
+ /*
+ * Switch the interrupt edge to the opposite edge of the interrupt
+ * which got triggered for the case of emulating both edges
+ */
+ if (irqd_get_trigger_type(data) == IRQ_TYPE_EDGE_BOTH) {
+ if (owl_gpio_get(gc, gpio))
+ irq_set_type(pctrl, gpio, IRQ_TYPE_EDGE_FALLING);
+ else
+ irq_set_type(pctrl, gpio, IRQ_TYPE_EDGE_RISING);
+ }
+
+ port = owl_gpio_get_port(pctrl, &gpio);
+ if (WARN_ON(port == NULL))
+ return;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ owl_gpio_update_reg(gpio_base + port->intc_ctl,
+ OWL_GPIO_CTLR_PENDING, true);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int owl_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct owl_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ irq_set_handler_locked(data, handle_level_irq);
+ else
+ irq_set_handler_locked(data, handle_edge_irq);
+
+ irq_set_type(pctrl, data->hwirq, type);
+
+ return 0;
+}
+
+static void owl_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct owl_pinctrl *pctrl = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_domain *domain = pctrl->chip.irq.domain;
+ unsigned int parent = irq_desc_get_irq(desc);
+ const struct owl_gpio_port *port;
+ void __iomem *base;
+ unsigned int pin, irq, offset = 0, i;
+ unsigned long pending_irq;
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < pctrl->soc->nports; i++) {
+ port = &pctrl->soc->ports[i];
+ base = pctrl->base + port->offset;
+
+ /* skip ports that are not associated with this irq */
+ if (parent != pctrl->irq[i])
+ goto skip;
+
+ pending_irq = readl_relaxed(base + port->intc_pd);
+
+ for_each_set_bit(pin, &pending_irq, port->pins) {
+ irq = irq_find_mapping(domain, offset + pin);
+ generic_handle_irq(irq);
+
+ /* clear pending interrupt */
+ owl_gpio_update_reg(base + port->intc_pd, pin, true);
+ }
+
+skip:
+ offset += port->pins;
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
static int owl_gpio_init(struct owl_pinctrl *pctrl)
{
struct gpio_chip *chip;
- int ret;
+ struct gpio_irq_chip *gpio_irq;
+ int ret, i, j, offset;
chip = &pctrl->chip;
chip->base = -1;
@@ -714,6 +921,35 @@ static int owl_gpio_init(struct owl_pinctrl *pctrl)
chip->owner = THIS_MODULE;
chip->of_node = pctrl->dev->of_node;
+ pctrl->irq_chip.name = chip->of_node->name;
+ pctrl->irq_chip.irq_ack = owl_gpio_irq_ack;
+ pctrl->irq_chip.irq_mask = owl_gpio_irq_mask;
+ pctrl->irq_chip.irq_unmask = owl_gpio_irq_unmask;
+ pctrl->irq_chip.irq_set_type = owl_gpio_irq_set_type;
+
+ gpio_irq = &chip->irq;
+ gpio_irq->chip = &pctrl->irq_chip;
+ gpio_irq->handler = handle_simple_irq;
+ gpio_irq->default_type = IRQ_TYPE_NONE;
+ gpio_irq->parent_handler = owl_gpio_irq_handler;
+ gpio_irq->parent_handler_data = pctrl;
+ gpio_irq->num_parents = pctrl->num_irq;
+ gpio_irq->parents = pctrl->irq;
+
+ gpio_irq->map = devm_kcalloc(pctrl->dev, chip->ngpio,
+ sizeof(*gpio_irq->map), GFP_KERNEL);
+ if (!gpio_irq->map)
+ return -ENOMEM;
+
+ for (i = 0, offset = 0; i < pctrl->soc->nports; i++) {
+ const struct owl_gpio_port *port = &pctrl->soc->ports[i];
+
+ for (j = 0; j < port->pins; j++)
+ gpio_irq->map[offset + j] = gpio_irq->parents[i];
+
+ offset += port->pins;
+ }
+
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "failed to register gpiochip\n");
@@ -728,7 +964,7 @@ int owl_pinctrl_probe(struct platform_device *pdev,
{
struct resource *res;
struct owl_pinctrl *pctrl;
- int ret;
+ int ret, i;
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
@@ -772,14 +1008,40 @@ int owl_pinctrl_probe(struct platform_device *pdev,
&owl_pinctrl_desc, pctrl);
if (IS_ERR(pctrl->pctrldev)) {
dev_err(&pdev->dev, "could not register Actions OWL pinmux driver\n");
- return PTR_ERR(pctrl->pctrldev);
+ ret = PTR_ERR(pctrl->pctrldev);
+ goto err_exit;
+ }
+
+ ret = platform_irq_count(pdev);
+ if (ret < 0)
+ goto err_exit;
+
+ pctrl->num_irq = ret;
+
+ pctrl->irq = devm_kcalloc(&pdev->dev, pctrl->num_irq,
+ sizeof(*pctrl->irq), GFP_KERNEL);
+ if (!pctrl->irq) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ for (i = 0; i < pctrl->num_irq ; i++) {
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ goto err_exit;
+ pctrl->irq[i] = ret;
}
ret = owl_gpio_init(pctrl);
if (ret)
- return ret;
+ goto err_exit;
platform_set_drvdata(pdev, pctrl);
return 0;
+
+err_exit:
+ clk_disable_unprepare(pctrl->clk);
+
+ return ret;
}
diff --git a/drivers/pinctrl/actions/pinctrl-owl.h b/drivers/pinctrl/actions/pinctrl-owl.h
index 74342378937c..a724d1d406d4 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.h
+++ b/drivers/pinctrl/actions/pinctrl-owl.h
@@ -29,6 +29,18 @@ enum owl_pinconf_drv {
OWL_PINCONF_DRV_12MA,
};
+/* GPIO CTRL Bit Definition */
+#define OWL_GPIO_CTLR_PENDING 0
+#define OWL_GPIO_CTLR_ENABLE 1
+#define OWL_GPIO_CTLR_SAMPLE_CLK_24M 2
+
+/* GPIO TYPE Bit Definition */
+#define OWL_GPIO_INT_LEVEL_HIGH 0
+#define OWL_GPIO_INT_LEVEL_LOW 1
+#define OWL_GPIO_INT_EDGE_RISING 2
+#define OWL_GPIO_INT_EDGE_FALLING 3
+#define OWL_GPIO_INT_MASK 3
+
/**
* struct owl_pullctl - Actions pad pull control register
* @reg: offset to the pull control register
@@ -121,6 +133,10 @@ struct owl_pinmux_func {
* @outen: offset of the output enable register.
* @inen: offset of the input enable register.
* @dat: offset of the data register.
+ * @intc_ctl: offset of the interrupt control register.
+ * @intc_pd: offset of the interrupt pending register.
+ * @intc_msk: offset of the interrupt mask register.
+ * @intc_type: offset of the interrupt type register.
*/
struct owl_gpio_port {
unsigned int offset;
@@ -128,6 +144,10 @@ struct owl_gpio_port {
unsigned int outen;
unsigned int inen;
unsigned int dat;
+ unsigned int intc_ctl;
+ unsigned int intc_pd;
+ unsigned int intc_msk;
+ unsigned int intc_type;
};
/**
@@ -140,7 +160,7 @@ struct owl_gpio_port {
* @ngroups: number of entries in @groups.
* @padinfo: array describing the pad info of this SoC.
* @ngpios: number of pingroups the driver should expose as GPIOs.
- * @port: array describing all GPIO ports of this SoC.
+ * @ports: array describing all GPIO ports of this SoC.
* @nports: number of GPIO ports in this SoC.
*/
struct owl_pinctrl_soc_data {
diff --git a/drivers/pinctrl/actions/pinctrl-s900.c b/drivers/pinctrl/actions/pinctrl-s900.c
index 5503c7945764..ea67b14ef93b 100644
--- a/drivers/pinctrl/actions/pinctrl-s900.c
+++ b/drivers/pinctrl/actions/pinctrl-s900.c
@@ -1821,22 +1821,27 @@ static struct owl_padinfo s900_padinfo[NUM_PADS] = {
[SGPIO3] = PAD_INFO_PULLCTL_ST(SGPIO3)
};
-#define OWL_GPIO_PORT(port, base, count, _outen, _inen, _dat) \
- [OWL_GPIO_PORT_##port] = { \
- .offset = base, \
- .pins = count, \
- .outen = _outen, \
- .inen = _inen, \
- .dat = _dat, \
+#define OWL_GPIO_PORT(port, base, count, _outen, _inen, _dat, \
+ _intc_ctl, _intc_pd, _intc_msk, _intc_type) \
+ [OWL_GPIO_PORT_##port] = { \
+ .offset = base, \
+ .pins = count, \
+ .outen = _outen, \
+ .inen = _inen, \
+ .dat = _dat, \
+ .intc_ctl = _intc_ctl, \
+ .intc_pd = _intc_pd, \
+ .intc_msk = _intc_msk, \
+ .intc_type = _intc_type, \
}
static const struct owl_gpio_port s900_gpio_ports[] = {
- OWL_GPIO_PORT(A, 0x0000, 32, 0x0, 0x4, 0x8),
- OWL_GPIO_PORT(B, 0x000C, 32, 0x0, 0x4, 0x8),
- OWL_GPIO_PORT(C, 0x0018, 12, 0x0, 0x4, 0x8),
- OWL_GPIO_PORT(D, 0x0024, 30, 0x0, 0x4, 0x8),
- OWL_GPIO_PORT(E, 0x0030, 32, 0x0, 0x4, 0x8),
- OWL_GPIO_PORT(F, 0x00F0, 8, 0x0, 0x4, 0x8)
+ OWL_GPIO_PORT(A, 0x0000, 32, 0x0, 0x4, 0x8, 0x204, 0x208, 0x20C, 0x240),
+ OWL_GPIO_PORT(B, 0x000C, 32, 0x0, 0x4, 0x8, 0x534, 0x204, 0x208, 0x23C),
+ OWL_GPIO_PORT(C, 0x0018, 12, 0x0, 0x4, 0x8, 0x52C, 0x200, 0x204, 0x238),
+ OWL_GPIO_PORT(D, 0x0024, 30, 0x0, 0x4, 0x8, 0x524, 0x1FC, 0x200, 0x234),
+ OWL_GPIO_PORT(E, 0x0030, 32, 0x0, 0x4, 0x8, 0x51C, 0x1F8, 0x1FC, 0x230),
+ OWL_GPIO_PORT(F, 0x00F0, 8, 0x0, 0x4, 0x8, 0x460, 0x140, 0x144, 0x178)
};
static struct owl_pinctrl_soc_data s900_pinctrl_data = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 7f13ce8450a3..aefe3c33dffd 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -95,7 +95,7 @@ static inline void aspeed_sig_desc_print_val(
*
* @desc: The signal descriptor of interest
* @enabled: True to query the enabled state, false to query disabled state
- * @regmap: The IP block's regmap instance
+ * @map: The IP block's regmap instance
*
* Return: 1 if the descriptor's bitfield is configured to the state
* selected by @enabled, 0 if not, and less than zero if an unrecoverable
@@ -594,7 +594,7 @@ static inline const struct aspeed_pin_config *find_pinconf_config(
/**
* @param: pinconf configuration parameter
* @arg: The supported argument for @param, or -1 if any value is supported
- * @value: The register value to write to configure @arg for @param
+ * @val: The register value to write to configure @arg for @param
*
* The map is to be used in conjunction with the configuration array supplied
* by the driver implementation.
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index 35c17653c694..87618a4e90e4 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -460,8 +460,8 @@ static int nsp_pinmux_enable(struct pinctrl_dev *pctrl_dev,
const struct nsp_pin_function *func;
const struct nsp_pin_group *grp;
- if (grp_select > pinctrl->num_groups ||
- func_select > pinctrl->num_functions)
+ if (grp_select >= pinctrl->num_groups ||
+ func_select >= pinctrl->num_functions)
return -EINVAL;
func = &pinctrl->functions[func_select];
@@ -577,6 +577,8 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
return PTR_ERR(pinctrl->base0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ return -EINVAL;
pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
if (!pinctrl->base1) {
diff --git a/drivers/pinctrl/berlin/Kconfig b/drivers/pinctrl/berlin/Kconfig
index 8fe6ad7795dc..0dd60278e973 100644
--- a/drivers/pinctrl/berlin/Kconfig
+++ b/drivers/pinctrl/berlin/Kconfig
@@ -5,6 +5,11 @@ config PINCTRL_BERLIN
select PINMUX
select REGMAP_MMIO
+config PINCTRL_AS370
+ bool "Synaptics as370 pin controller driver"
+ depends on OF
+ select PINCTRL_BERLIN
+
config PINCTRL_BERLIN_BG2
def_bool MACH_BERLIN_BG2
depends on OF
diff --git a/drivers/pinctrl/berlin/Makefile b/drivers/pinctrl/berlin/Makefile
index 6f641ce2c830..00c53ca3676d 100644
--- a/drivers/pinctrl/berlin/Makefile
+++ b/drivers/pinctrl/berlin/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_PINCTRL_BERLIN_BG2) += berlin-bg2.o
obj-$(CONFIG_PINCTRL_BERLIN_BG2CD) += berlin-bg2cd.o
obj-$(CONFIG_PINCTRL_BERLIN_BG2Q) += berlin-bg2q.o
obj-$(CONFIG_PINCTRL_BERLIN_BG4CT) += berlin-bg4ct.o
+obj-$(CONFIG_PINCTRL_AS370) += pinctrl-as370.o
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index d6d183e9db17..b5903fffb3d0 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -216,10 +216,8 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
}
/* we will reallocate later */
- pctrl->functions = devm_kcalloc(&pdev->dev,
- max_functions,
- sizeof(*pctrl->functions),
- GFP_KERNEL);
+ pctrl->functions = kcalloc(max_functions,
+ sizeof(*pctrl->functions), GFP_KERNEL);
if (!pctrl->functions)
return -ENOMEM;
@@ -257,8 +255,10 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
function++;
}
- if (!found)
+ if (!found) {
+ kfree(pctrl->functions);
return -EINVAL;
+ }
if (!function->groups) {
function->groups =
@@ -267,8 +267,10 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
sizeof(char *),
GFP_KERNEL);
- if (!function->groups)
+ if (!function->groups) {
+ kfree(pctrl->functions);
return -ENOMEM;
+ }
}
groups = function->groups;
diff --git a/drivers/pinctrl/berlin/pinctrl-as370.c b/drivers/pinctrl/berlin/pinctrl-as370.c
new file mode 100644
index 000000000000..d2bb811fc5fa
--- /dev/null
+++ b/drivers/pinctrl/berlin/pinctrl-as370.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synaptics AS370 pinctrl driver
+ *
+ * Copyright (C) 2018 Synaptics Incorporated
+ *
+ * Author: Jisheng Zhang <jszhang@kernel.org>
+ */
+
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "berlin.h"
+
+static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
+ BERLIN_PINCTRL_GROUP("I2S1_BCLKIO", 0x0, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO0 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* BCLKIO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG0 */
+ BERLIN_PINCTRL_GROUP("I2S1_LRCKIO", 0x0, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO1 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* LRCKIO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG1 */
+ BERLIN_PINCTRL_GROUP("I2S1_DO0", 0x0, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "por"), /* 1P8V RSTB*/
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO0 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio"), /* GPIO2 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG2 */
+ BERLIN_PINCTRL_GROUP("I2S1_DO1", 0x0, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "por"), /* 3P3V RSTB */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO1 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio"), /* GPIO3 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG3 */
+ BERLIN_PINCTRL_GROUP("I2S1_DO2", 0x0, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "por"), /* CORE RSTB */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO2 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm4"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio"), /* GPIO4 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG4 */
+ BERLIN_PINCTRL_GROUP("I2S1_DO3", 0x0, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO5 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO3 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm5"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "spififib"), /* SPDIFIB */
+ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG5 */
+ BERLIN_PINCTRL_GROUP("I2S1_MCLK", 0x0, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO6 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* MCLK */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG6 */
+ BERLIN_PINCTRL_GROUP("I2S2_BCLKIO", 0x0, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO7 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* BCLKIO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG7 */
+ BERLIN_PINCTRL_GROUP("I2S2_LRCKIO", 0x0, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO8 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* LRCKIO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG8 */
+ BERLIN_PINCTRL_GROUP("I2S2_DI0", 0x0, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO9 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI0 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm2"),
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG9 */
+ BERLIN_PINCTRL_GROUP("I2S2_DI1", 0x4, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO10 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI1 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm3"),
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG10 */
+ BERLIN_PINCTRL_GROUP("I2S2_DI2", 0x4, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO11 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI2 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm6"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "spdific"), /* SPDIFIC */
+ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG11 */
+ BERLIN_PINCTRL_GROUP("I2S2_DI3", 0x4, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO12 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI3 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm7"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "spdifia"), /* SPDIFIA */
+ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG12 */
+ BERLIN_PINCTRL_GROUP("PDM_CLKO", 0x4, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO13 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* CLKO */
+ BERLIN_PINCTRL_FUNCTION(0x2, "i2s2"), /* MCLK */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG13 */
+ BERLIN_PINCTRL_GROUP("PDM_DI0", 0x4, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO14 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* DI0 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG14 */
+ BERLIN_PINCTRL_GROUP("PDM_DI1", 0x4, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO15 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* DI1 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG15 */
+ BERLIN_PINCTRL_GROUP("PDM_DI2", 0x4, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO16 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* DI2 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm4"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "spdifid"), /* SPDIFID */
+ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG16 */
+ BERLIN_PINCTRL_GROUP("PDM_DI3", 0x4, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO17 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* DI3 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm5"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "spdifi"), /* SPDIFI */
+ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG17 */
+ BERLIN_PINCTRL_GROUP("NAND_IO0", 0x4, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO0 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* DATA0 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pcie0")), /* MDIO */
+ BERLIN_PINCTRL_GROUP("NAND_IO1", 0x4, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO1 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* DATA1 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pcie0")), /* MDC */
+ BERLIN_PINCTRL_GROUP("NAND_IO2", 0x8, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO2 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* DATA2 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pcie1")), /* MDIO */
+ BERLIN_PINCTRL_GROUP("NAND_IO3", 0x8, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO3 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* DATA3 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pcie1")), /* MDC */
+ BERLIN_PINCTRL_GROUP("NAND_IO4", 0x8, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO4 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc")), /* DATA4 */
+ BERLIN_PINCTRL_GROUP("NAND_IO5", 0x8, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO5 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc")), /* DATA5 */
+ BERLIN_PINCTRL_GROUP("NAND_IO6", 0x8, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO6 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc")), /* DATA6 */
+ BERLIN_PINCTRL_GROUP("NAND_IO7", 0x8, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO7 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc")), /* DATA7 */
+ BERLIN_PINCTRL_GROUP("NAND_ALE", 0x8, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* ALE */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm6"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO18 */
+ BERLIN_PINCTRL_GROUP("NAND_CLE", 0x8, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* CLE */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm7"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO19 */
+ BERLIN_PINCTRL_GROUP("NAND_WEn", 0x8, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* WEn */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO20 */
+ BERLIN_PINCTRL_GROUP("NAND_REn", 0x8, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* REn */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO21 */
+ BERLIN_PINCTRL_GROUP("NAND_WPn", 0xc, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* WPn */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* CLK */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO22 */
+ BERLIN_PINCTRL_GROUP("NAND_CEn", 0xc, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* CEn */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* RSTn */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO23 */
+ BERLIN_PINCTRL_GROUP("NAND_RDY", 0xc, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* RDY */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* CMD */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO24 */
+ BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* GPIO25 */
+ BERLIN_PINCTRL_GROUP("SPI1_SS1n", 0xc, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS1n */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio"), /* GPIO26 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm2")),
+ BERLIN_PINCTRL_GROUP("SPI1_SS2n", 0xc, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* RXD */
+ BERLIN_PINCTRL_FUNCTION(0x1, "spi1"), /* SS2n */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio"), /* GPIO27 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm3")),
+ BERLIN_PINCTRL_GROUP("SPI1_SS3n", 0xc, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* TXD */
+ BERLIN_PINCTRL_FUNCTION(0x1, "spi1"), /* SS3n */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO28 */
+ BERLIN_PINCTRL_GROUP("SPI1_SCLK", 0xc, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SCLK */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO29 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm4")),
+ BERLIN_PINCTRL_GROUP("SPI1_SDO", 0xc, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDO */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO30 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm5")),
+ BERLIN_PINCTRL_GROUP("SPI1_SDI", 0xc, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDI */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* GPIO31 */
+ BERLIN_PINCTRL_GROUP("USB0_DRV_VBUS", 0x10, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "usb0"), /* VBUS */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO32 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "refclko")), /* 25M */
+ BERLIN_PINCTRL_GROUP("TW1_SCL", 0x10, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO33 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "tw1")), /* SCL */
+ BERLIN_PINCTRL_GROUP("TW1_SDA", 0x10, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO34 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "tw1")), /* SDA */
+ BERLIN_PINCTRL_GROUP("TW0_SCL", 0x10, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO35 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "tw0")), /* SCL */
+ BERLIN_PINCTRL_GROUP("TW0_SDA", 0x10, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO36 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "tw0")), /* SDA */
+ BERLIN_PINCTRL_GROUP("TMS", 0x10, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TMS */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pwm0")),
+ BERLIN_PINCTRL_GROUP("TDI", 0x10, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TDI */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO38 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pwm1")),
+ BERLIN_PINCTRL_GROUP("TDO", 0x10, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TDO */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO39 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pwm0")),
+ BERLIN_PINCTRL_GROUP("PWM6", 0x10, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO40 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm6")),
+ BERLIN_PINCTRL_GROUP("PWM7", 0x10, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO41 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm7")),
+ BERLIN_PINCTRL_GROUP("PWM0", 0x14, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "por"), /* VDDCPUSOC RSTB */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm0"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO42 */
+ BERLIN_PINCTRL_GROUP("PWM1", 0x14, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO43 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm1")),
+ BERLIN_PINCTRL_GROUP("PWM2", 0x14, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO44 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm2")),
+ BERLIN_PINCTRL_GROUP("PWM3", 0x14, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO45 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm3")),
+ BERLIN_PINCTRL_GROUP("PWM4", 0x14, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO46 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm4")),
+ BERLIN_PINCTRL_GROUP("PWM5", 0x14, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO47 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm5")),
+ BERLIN_PINCTRL_GROUP("URT1_RTSn", 0x14, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO48 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* RTSn */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm6"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "tw1a"), /* SCL */
+ BERLIN_PINCTRL_FUNCTION(0x4, "aio"), /* DBG0 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG18 */
+ BERLIN_PINCTRL_GROUP("URT1_CTSn", 0x14, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO49 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* CTSn */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm7"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "tw1a"), /* SDA */
+ BERLIN_PINCTRL_FUNCTION(0x4, "aio"), /* DBG1 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG19 */
+ BERLIN_PINCTRL_GROUP("URT1_RXD", 0x14, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO50 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* RXD */
+ BERLIN_PINCTRL_FUNCTION(0x4, "aio"), /* DBG2 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG20 */
+ BERLIN_PINCTRL_GROUP("URT1_TXD", 0x14, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO51 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* TXD */
+ BERLIN_PINCTRL_FUNCTION(0x4, "aio"), /* DBG3 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG21 */
+ BERLIN_PINCTRL_GROUP("I2S3_DI", 0x18, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO52 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s3"), /* DI */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG22 */
+ BERLIN_PINCTRL_GROUP("I2S3_DO", 0x18, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO53 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s3"), /* DO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG23 */
+ BERLIN_PINCTRL_GROUP("I2S3_BCLKIO", 0x18, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO54 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s3"), /* BCLKIO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "clk")), /* DBG */
+ BERLIN_PINCTRL_GROUP("I2S3_LRCKIO", 0x18, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO55 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s3")), /* LRCKIO */
+ BERLIN_PINCTRL_GROUP("SD0_DAT0", 0x18, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "cpupll"), /* OUT */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT0 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO56 */
+ BERLIN_PINCTRL_GROUP("SD0_DAT1", 0x18, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "syspll"), /* OUT */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT1 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO57 */
+ BERLIN_PINCTRL_GROUP("SD0_CLK", 0x18, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO58 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0")), /* CLK */
+ BERLIN_PINCTRL_GROUP("SD0_DAT2", 0x18, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "mempll"), /* OUT */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT2 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO59 */
+ BERLIN_PINCTRL_GROUP("SD0_DAT3", 0x18, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "apll0"), /* OUT */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT3 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO60 */
+ BERLIN_PINCTRL_GROUP("SD0_CMD", 0x18, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "apll1"), /* OUT */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* CMD */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO61 */
+ BERLIN_PINCTRL_GROUP("SD0_CDn", 0x1c, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO62 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* CDn */
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm2")),
+ BERLIN_PINCTRL_GROUP("SD0_WP", 0x1c, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO63 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* WP */
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm3")),
+};
+
+static const struct berlin_pinctrl_desc as370_soc_pinctrl_data = {
+ .groups = as370_soc_pinctrl_groups,
+ .ngroups = ARRAY_SIZE(as370_soc_pinctrl_groups),
+};
+
+static const struct of_device_id as370_pinctrl_match[] = {
+ {
+ .compatible = "syna,as370-soc-pinctrl",
+ .data = &as370_soc_pinctrl_data,
+ },
+ {}
+};
+
+static int as370_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match =
+ of_match_device(as370_pinctrl_match, &pdev->dev);
+ struct regmap_config *rmconfig;
+ struct regmap *regmap;
+ struct resource *res;
+ void __iomem *base;
+
+ rmconfig = devm_kzalloc(&pdev->dev, sizeof(*rmconfig), GFP_KERNEL);
+ if (!rmconfig)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ rmconfig->reg_bits = 32,
+ rmconfig->val_bits = 32,
+ rmconfig->reg_stride = 4,
+ rmconfig->max_register = resource_size(res);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, rmconfig);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return berlin_pinctrl_probe_regmap(pdev, match->data, regmap);
+}
+
+static struct platform_driver as370_pinctrl_driver = {
+ .probe = as370_pinctrl_probe,
+ .driver = {
+ .name = "as370-pinctrl",
+ .of_match_table = as370_pinctrl_match,
+ },
+};
+builtin_platform_driver(as370_pinctrl_driver);
diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig
new file mode 100644
index 000000000000..27013e5949bc
--- /dev/null
+++ b/drivers/pinctrl/cirrus/Kconfig
@@ -0,0 +1,14 @@
+# This is all selected by the Madera MFD driver Kconfig options
+config PINCTRL_MADERA
+ tristate
+ select PINMUX
+ select GENERIC_PINCONF
+
+config PINCTRL_CS47L35
+ bool
+
+config PINCTRL_CS47L85
+ bool
+
+config PINCTRL_CS47L90
+ bool
diff --git a/drivers/pinctrl/cirrus/Makefile b/drivers/pinctrl/cirrus/Makefile
new file mode 100644
index 000000000000..6e4938cde9e3
--- /dev/null
+++ b/drivers/pinctrl/cirrus/Makefile
@@ -0,0 +1,13 @@
+# Cirrus Logic pinctrl drivers
+pinctrl-madera-objs := pinctrl-madera-core.o
+ifeq ($(CONFIG_PINCTRL_CS47L35),y)
+pinctrl-madera-objs += pinctrl-cs47l35.o
+endif
+ifeq ($(CONFIG_PINCTRL_CS47L85),y)
+pinctrl-madera-objs += pinctrl-cs47l85.o
+endif
+ifeq ($(CONFIG_PINCTRL_CS47L90),y)
+pinctrl-madera-objs += pinctrl-cs47l90.o
+endif
+
+obj-$(CONFIG_PINCTRL_MADERA) += pinctrl-madera.o
diff --git a/drivers/pinctrl/cirrus/pinctrl-cs47l35.c b/drivers/pinctrl/cirrus/pinctrl-cs47l35.c
new file mode 100644
index 000000000000..06b59160783d
--- /dev/null
+++ b/drivers/pinctrl/cirrus/pinctrl-cs47l35.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Pinctrl for Cirrus Logic CS47L35
+ *
+ * Copyright (C) 2016-2017 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#include <linux/err.h>
+#include <linux/mfd/madera/core.h>
+
+#include "pinctrl-madera.h"
+
+/*
+ * The alt func groups are the most commonly used functions we place these at
+ * the lower function indexes for convenience, and the less commonly used gpio
+ * functions at higher indexes.
+ *
+ * To stay consistent with the datasheet the function names are the same as
+ * the group names for that function's pins
+ *
+ * Note - all 1 less than in datasheet because these are zero-indexed
+ */
+static const unsigned int cs47l35_aif3_pins[] = { 0, 1, 2, 3 };
+static const unsigned int cs47l35_spk_pins[] = { 4, 5 };
+static const unsigned int cs47l35_aif1_pins[] = { 7, 8, 9, 10 };
+static const unsigned int cs47l35_aif2_pins[] = { 11, 12, 13, 14 };
+static const unsigned int cs47l35_mif1_pins[] = { 6, 15 };
+
+static const struct madera_pin_groups cs47l35_pin_groups[] = {
+ { "aif1", cs47l35_aif1_pins, ARRAY_SIZE(cs47l35_aif1_pins) },
+ { "aif2", cs47l35_aif2_pins, ARRAY_SIZE(cs47l35_aif2_pins) },
+ { "aif3", cs47l35_aif3_pins, ARRAY_SIZE(cs47l35_aif3_pins) },
+ { "mif1", cs47l35_mif1_pins, ARRAY_SIZE(cs47l35_mif1_pins) },
+ { "pdmspk1", cs47l35_spk_pins, ARRAY_SIZE(cs47l35_spk_pins) },
+};
+
+const struct madera_pin_chip cs47l35_pin_chip = {
+ .n_pins = CS47L35_NUM_GPIOS,
+ .pin_groups = cs47l35_pin_groups,
+ .n_pin_groups = ARRAY_SIZE(cs47l35_pin_groups),
+};
diff --git a/drivers/pinctrl/cirrus/pinctrl-cs47l85.c b/drivers/pinctrl/cirrus/pinctrl-cs47l85.c
new file mode 100644
index 000000000000..0a322e2a0fde
--- /dev/null
+++ b/drivers/pinctrl/cirrus/pinctrl-cs47l85.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Pinctrl for Cirrus Logic CS47L85
+ *
+ * Copyright (C) 2016-2017 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#include <linux/err.h>
+#include <linux/mfd/madera/core.h>
+
+#include "pinctrl-madera.h"
+
+/*
+ * The alt func groups are the most commonly used functions we place these at
+ * the lower function indexes for convenience, and the less commonly used gpio
+ * functions at higher indexes.
+ *
+ * To stay consistent with the datasheet the function names are the same as
+ * the group names for that function's pins
+ *
+ * Note - all 1 less than in datasheet because these are zero-indexed
+ */
+static const unsigned int cs47l85_mif1_pins[] = { 8, 9 };
+static const unsigned int cs47l85_mif2_pins[] = { 10, 11 };
+static const unsigned int cs47l85_mif3_pins[] = { 12, 13 };
+static const unsigned int cs47l85_aif1_pins[] = { 14, 15, 16, 17 };
+static const unsigned int cs47l85_aif2_pins[] = { 18, 19, 20, 21 };
+static const unsigned int cs47l85_aif3_pins[] = { 22, 23, 24, 25 };
+static const unsigned int cs47l85_aif4_pins[] = { 26, 27, 28, 29 };
+static const unsigned int cs47l85_dmic4_pins[] = { 30, 31 };
+static const unsigned int cs47l85_dmic5_pins[] = { 32, 33 };
+static const unsigned int cs47l85_dmic6_pins[] = { 34, 35 };
+static const unsigned int cs47l85_spk1_pins[] = { 36, 38 };
+static const unsigned int cs47l85_spk2_pins[] = { 37, 39 };
+
+static const struct madera_pin_groups cs47l85_pin_groups[] = {
+ { "aif1", cs47l85_aif1_pins, ARRAY_SIZE(cs47l85_aif1_pins) },
+ { "aif2", cs47l85_aif2_pins, ARRAY_SIZE(cs47l85_aif2_pins) },
+ { "aif3", cs47l85_aif3_pins, ARRAY_SIZE(cs47l85_aif3_pins) },
+ { "aif4", cs47l85_aif4_pins, ARRAY_SIZE(cs47l85_aif4_pins) },
+ { "mif1", cs47l85_mif1_pins, ARRAY_SIZE(cs47l85_mif1_pins) },
+ { "mif2", cs47l85_mif2_pins, ARRAY_SIZE(cs47l85_mif2_pins) },
+ { "mif3", cs47l85_mif3_pins, ARRAY_SIZE(cs47l85_mif3_pins) },
+ { "dmic4", cs47l85_dmic4_pins, ARRAY_SIZE(cs47l85_dmic4_pins) },
+ { "dmic5", cs47l85_dmic5_pins, ARRAY_SIZE(cs47l85_dmic5_pins) },
+ { "dmic6", cs47l85_dmic6_pins, ARRAY_SIZE(cs47l85_dmic6_pins) },
+ { "pdmspk1", cs47l85_spk1_pins, ARRAY_SIZE(cs47l85_spk1_pins) },
+ { "pdmspk2", cs47l85_spk2_pins, ARRAY_SIZE(cs47l85_spk2_pins) },
+};
+
+const struct madera_pin_chip cs47l85_pin_chip = {
+ .n_pins = CS47L85_NUM_GPIOS,
+ .pin_groups = cs47l85_pin_groups,
+ .n_pin_groups = ARRAY_SIZE(cs47l85_pin_groups),
+};
diff --git a/drivers/pinctrl/cirrus/pinctrl-cs47l90.c b/drivers/pinctrl/cirrus/pinctrl-cs47l90.c
new file mode 100644
index 000000000000..fc38f579f492
--- /dev/null
+++ b/drivers/pinctrl/cirrus/pinctrl-cs47l90.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Pinctrl for Cirrus Logic CS47L90
+ *
+ * Copyright (C) 2016-2017 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#include <linux/err.h>
+#include <linux/mfd/madera/core.h>
+
+#include "pinctrl-madera.h"
+
+/*
+ * The alt func groups are the most commonly used functions we place these at
+ * the lower function indexes for convenience, and the less commonly used gpio
+ * functions at higher indexes.
+ *
+ * To stay consistent with the datasheet the function names are the same as
+ * the group names for that function's pins
+ *
+ * Note - all 1 less than in datasheet because these are zero-indexed
+ */
+static const unsigned int cs47l90_mif1_pins[] = { 8, 9 };
+static const unsigned int cs47l90_mif2_pins[] = { 10, 11 };
+static const unsigned int cs47l90_mif3_pins[] = { 12, 13 };
+static const unsigned int cs47l90_aif1_pins[] = { 14, 15, 16, 17 };
+static const unsigned int cs47l90_aif2_pins[] = { 18, 19, 20, 21 };
+static const unsigned int cs47l90_aif3_pins[] = { 22, 23, 24, 25 };
+static const unsigned int cs47l90_aif4_pins[] = { 26, 27, 28, 29 };
+static const unsigned int cs47l90_dmic4_pins[] = { 30, 31 };
+static const unsigned int cs47l90_dmic5_pins[] = { 32, 33 };
+static const unsigned int cs47l90_dmic3_pins[] = { 34, 35 };
+static const unsigned int cs47l90_spk1_pins[] = { 36, 37 };
+
+static const struct madera_pin_groups cs47l90_pin_groups[] = {
+ { "aif1", cs47l90_aif1_pins, ARRAY_SIZE(cs47l90_aif1_pins) },
+ { "aif2", cs47l90_aif2_pins, ARRAY_SIZE(cs47l90_aif2_pins) },
+ { "aif3", cs47l90_aif3_pins, ARRAY_SIZE(cs47l90_aif3_pins) },
+ { "aif4", cs47l90_aif4_pins, ARRAY_SIZE(cs47l90_aif4_pins) },
+ { "mif1", cs47l90_mif1_pins, ARRAY_SIZE(cs47l90_mif1_pins) },
+ { "mif2", cs47l90_mif2_pins, ARRAY_SIZE(cs47l90_mif2_pins) },
+ { "mif3", cs47l90_mif3_pins, ARRAY_SIZE(cs47l90_mif3_pins) },
+ { "dmic3", cs47l90_dmic3_pins, ARRAY_SIZE(cs47l90_dmic3_pins) },
+ { "dmic4", cs47l90_dmic4_pins, ARRAY_SIZE(cs47l90_dmic4_pins) },
+ { "dmic5", cs47l90_dmic5_pins, ARRAY_SIZE(cs47l90_dmic5_pins) },
+ { "pdmspk1", cs47l90_spk1_pins, ARRAY_SIZE(cs47l90_spk1_pins) },
+};
+
+const struct madera_pin_chip cs47l90_pin_chip = {
+ .n_pins = CS47L90_NUM_GPIOS,
+ .pin_groups = cs47l90_pin_groups,
+ .n_pin_groups = ARRAY_SIZE(cs47l90_pin_groups),
+};
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
new file mode 100644
index 000000000000..ece41fb2848f
--- /dev/null
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -0,0 +1,1076 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Pinctrl for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2016-2018 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include <linux/mfd/madera/core.h>
+#include <linux/mfd/madera/registers.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-madera.h"
+
+/*
+ * Use pin GPIO names for consistency
+ * NOTE: IDs are zero-indexed for coding convenience
+ */
+static const struct pinctrl_pin_desc madera_pins[] = {
+ PINCTRL_PIN(0, "gpio1"),
+ PINCTRL_PIN(1, "gpio2"),
+ PINCTRL_PIN(2, "gpio3"),
+ PINCTRL_PIN(3, "gpio4"),
+ PINCTRL_PIN(4, "gpio5"),
+ PINCTRL_PIN(5, "gpio6"),
+ PINCTRL_PIN(6, "gpio7"),
+ PINCTRL_PIN(7, "gpio8"),
+ PINCTRL_PIN(8, "gpio9"),
+ PINCTRL_PIN(9, "gpio10"),
+ PINCTRL_PIN(10, "gpio11"),
+ PINCTRL_PIN(11, "gpio12"),
+ PINCTRL_PIN(12, "gpio13"),
+ PINCTRL_PIN(13, "gpio14"),
+ PINCTRL_PIN(14, "gpio15"),
+ PINCTRL_PIN(15, "gpio16"),
+ PINCTRL_PIN(16, "gpio17"),
+ PINCTRL_PIN(17, "gpio18"),
+ PINCTRL_PIN(18, "gpio19"),
+ PINCTRL_PIN(19, "gpio20"),
+ PINCTRL_PIN(20, "gpio21"),
+ PINCTRL_PIN(21, "gpio22"),
+ PINCTRL_PIN(22, "gpio23"),
+ PINCTRL_PIN(23, "gpio24"),
+ PINCTRL_PIN(24, "gpio25"),
+ PINCTRL_PIN(25, "gpio26"),
+ PINCTRL_PIN(26, "gpio27"),
+ PINCTRL_PIN(27, "gpio28"),
+ PINCTRL_PIN(28, "gpio29"),
+ PINCTRL_PIN(29, "gpio30"),
+ PINCTRL_PIN(30, "gpio31"),
+ PINCTRL_PIN(31, "gpio32"),
+ PINCTRL_PIN(32, "gpio33"),
+ PINCTRL_PIN(33, "gpio34"),
+ PINCTRL_PIN(34, "gpio35"),
+ PINCTRL_PIN(35, "gpio36"),
+ PINCTRL_PIN(36, "gpio37"),
+ PINCTRL_PIN(37, "gpio38"),
+ PINCTRL_PIN(38, "gpio39"),
+ PINCTRL_PIN(39, "gpio40"),
+};
+
+/*
+ * All single-pin functions can be mapped to any GPIO, however pinmux applies
+ * functions to pin groups and only those groups declared as supporting that
+ * function. To make this work we must put each pin in its own dummy group so
+ * that the functions can be described as applying to all pins.
+ * Since these do not correspond to anything in the actual hardware - they are
+ * merely an adaptation to pinctrl's view of the world - we use the same name
+ * as the pin to avoid confusion when comparing with datasheet instructions
+ */
+static const char * const madera_pin_single_group_names[] = {
+ "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40",
+};
+
+/* set of pin numbers for single-pin groups, zero-indexed */
+static const unsigned int madera_pin_single_group_pins[] = {
+ 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39,
+};
+
+static const char * const madera_aif1_group_names[] = { "aif1" };
+static const char * const madera_aif2_group_names[] = { "aif2" };
+static const char * const madera_aif3_group_names[] = { "aif3" };
+static const char * const madera_aif4_group_names[] = { "aif4" };
+static const char * const madera_mif1_group_names[] = { "mif1" };
+static const char * const madera_mif2_group_names[] = { "mif2" };
+static const char * const madera_mif3_group_names[] = { "mif3" };
+static const char * const madera_dmic3_group_names[] = { "dmic3" };
+static const char * const madera_dmic4_group_names[] = { "dmic4" };
+static const char * const madera_dmic5_group_names[] = { "dmic5" };
+static const char * const madera_dmic6_group_names[] = { "dmic6" };
+static const char * const madera_spk1_group_names[] = { "pdmspk1" };
+static const char * const madera_spk2_group_names[] = { "pdmspk2" };
+
+/*
+ * alt-functions always apply to a single pin group, other functions always
+ * apply to all pins
+ */
+static const struct {
+ const char *name;
+ const char * const *group_names;
+ u32 func;
+} madera_mux_funcs[] = {
+ {
+ .name = "aif1",
+ .group_names = madera_aif1_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "aif2",
+ .group_names = madera_aif2_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "aif3",
+ .group_names = madera_aif3_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "aif4",
+ .group_names = madera_aif4_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "mif1",
+ .group_names = madera_mif1_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "mif2",
+ .group_names = madera_mif2_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "mif3",
+ .group_names = madera_mif3_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "dmic3",
+ .group_names = madera_dmic3_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "dmic4",
+ .group_names = madera_dmic4_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "dmic5",
+ .group_names = madera_dmic5_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "dmic6",
+ .group_names = madera_dmic6_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "pdmspk1",
+ .group_names = madera_spk1_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "pdmspk2",
+ .group_names = madera_spk2_group_names,
+ .func = 0x000
+ },
+ {
+ .name = "io",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x001
+ },
+ {
+ .name = "dsp-gpio",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x002
+ },
+ {
+ .name = "irq1",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x003
+ },
+ {
+ .name = "irq2",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x004
+ },
+ {
+ .name = "fll1-clk",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x010
+ },
+ {
+ .name = "fll2-clk",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x011
+ },
+ {
+ .name = "fll3-clk",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x012
+ },
+ {
+ .name = "fllao-clk",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x013
+ },
+ {
+ .name = "fll1-lock",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x018
+ },
+ {
+ .name = "fll2-lock",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x019
+ },
+ {
+ .name = "fll3-lock",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x01a
+ },
+ {
+ .name = "fllao-lock",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x01b
+ },
+ {
+ .name = "opclk",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x040
+ },
+ {
+ .name = "opclk-async",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x041
+ },
+ {
+ .name = "pwm1",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x048
+ },
+ {
+ .name = "pwm2",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x049
+ },
+ {
+ .name = "spdif",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x04c
+ },
+ {
+ .name = "asrc1-in1-lock",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x088
+ },
+ {
+ .name = "asrc1-in2-lock",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x089
+ },
+ {
+ .name = "asrc2-in1-lock",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x08a
+ },
+ {
+ .name = "asrc2-in2-lock",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x08b
+ },
+ {
+ .name = "spkl-short-circuit",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x0b6
+ },
+ {
+ .name = "spkr-short-circuit",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x0b7
+ },
+ {
+ .name = "spk-shutdown",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x0e0
+ },
+ {
+ .name = "spk-overheat-shutdown",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x0e1
+ },
+ {
+ .name = "spk-overheat-warn",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x0e2
+ },
+ {
+ .name = "timer1-sts",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x140
+ },
+ {
+ .name = "timer2-sts",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x141
+ },
+ {
+ .name = "timer3-sts",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x142
+ },
+ {
+ .name = "timer4-sts",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x143
+ },
+ {
+ .name = "timer5-sts",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x144
+ },
+ {
+ .name = "timer6-sts",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x145
+ },
+ {
+ .name = "timer7-sts",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x146
+ },
+ {
+ .name = "timer8-sts",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x147
+ },
+ {
+ .name = "log1-fifo-ne",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x150
+ },
+ {
+ .name = "log2-fifo-ne",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x151
+ },
+ {
+ .name = "log3-fifo-ne",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x152
+ },
+ {
+ .name = "log4-fifo-ne",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x153
+ },
+ {
+ .name = "log5-fifo-ne",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x154
+ },
+ {
+ .name = "log6-fifo-ne",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x155
+ },
+ {
+ .name = "log7-fifo-ne",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x156
+ },
+ {
+ .name = "log8-fifo-ne",
+ .group_names = madera_pin_single_group_names,
+ .func = 0x157
+ },
+};
+
+static u16 madera_pin_make_drv_str(struct madera_pin_private *priv,
+ unsigned int milliamps)
+{
+ switch (milliamps) {
+ case 4:
+ return 0;
+ case 8:
+ return 2 << MADERA_GP1_DRV_STR_SHIFT;
+ default:
+ break;
+ }
+
+ dev_warn(priv->dev, "%u mA not a valid drive strength", milliamps);
+
+ return 0;
+}
+
+static unsigned int madera_pin_unmake_drv_str(struct madera_pin_private *priv,
+ u16 regval)
+{
+ regval = (regval & MADERA_GP1_DRV_STR_MASK) >> MADERA_GP1_DRV_STR_SHIFT;
+
+ switch (regval) {
+ case 0:
+ return 4;
+ case 2:
+ return 8;
+ default:
+ return 0;
+ }
+}
+
+static int madera_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ /* Number of alt function groups plus number of single-pin groups */
+ return priv->chip->n_pin_groups + priv->chip->n_pins;
+}
+
+static const char *madera_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ if (selector < priv->chip->n_pin_groups)
+ return priv->chip->pin_groups[selector].name;
+
+ selector -= priv->chip->n_pin_groups;
+ return madera_pin_single_group_names[selector];
+}
+
+static int madera_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ if (selector < priv->chip->n_pin_groups) {
+ *pins = priv->chip->pin_groups[selector].pins;
+ *num_pins = priv->chip->pin_groups[selector].n_pins;
+ } else {
+ /* return the dummy group for a single pin */
+ selector -= priv->chip->n_pin_groups;
+ *pins = &madera_pin_single_group_pins[selector];
+ *num_pins = 1;
+ }
+ return 0;
+}
+
+static void madera_pin_dbg_show_fn(struct madera_pin_private *priv,
+ struct seq_file *s,
+ unsigned int pin, unsigned int fn)
+{
+ const struct madera_pin_chip *chip = priv->chip;
+ int i, g_pin;
+
+ if (fn != 0) {
+ for (i = 0; i < ARRAY_SIZE(madera_mux_funcs); ++i) {
+ if (madera_mux_funcs[i].func == fn) {
+ seq_printf(s, " FN=%s",
+ madera_mux_funcs[i].name);
+ return;
+ }
+ }
+ return; /* ignore unknown function values */
+ }
+
+ /* alt function */
+ for (i = 0; i < chip->n_pin_groups; ++i) {
+ for (g_pin = 0; g_pin < chip->pin_groups[i].n_pins; ++g_pin) {
+ if (chip->pin_groups[i].pins[g_pin] == pin) {
+ seq_printf(s, " FN=%s",
+ chip->pin_groups[i].name);
+ return;
+ }
+ }
+ }
+}
+
+static void __maybe_unused madera_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned int pin)
+{
+ struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int conf[2];
+ unsigned int reg = MADERA_GPIO1_CTRL_1 + (2 * pin);
+ unsigned int fn;
+ int ret;
+
+ ret = regmap_read(priv->madera->regmap, reg, &conf[0]);
+ if (ret)
+ return;
+
+ ret = regmap_read(priv->madera->regmap, reg + 1, &conf[1]);
+ if (ret)
+ return;
+
+ seq_printf(s, "%04x:%04x", conf[0], conf[1]);
+
+ fn = (conf[0] & MADERA_GP1_FN_MASK) >> MADERA_GP1_FN_SHIFT;
+ madera_pin_dbg_show_fn(priv, s, pin, fn);
+
+ /* State of direction bit is only relevant if function==1 */
+ if (fn == 1) {
+ if (conf[1] & MADERA_GP1_DIR_MASK)
+ seq_puts(s, " IN");
+ else
+ seq_puts(s, " OUT");
+ }
+
+ if (conf[1] & MADERA_GP1_PU_MASK)
+ seq_puts(s, " PU");
+
+ if (conf[1] & MADERA_GP1_PD_MASK)
+ seq_puts(s, " PD");
+
+ if (conf[0] & MADERA_GP1_DB_MASK)
+ seq_puts(s, " DB");
+
+ if (conf[0] & MADERA_GP1_OP_CFG_MASK)
+ seq_puts(s, " OD");
+ else
+ seq_puts(s, " CMOS");
+
+ seq_printf(s, " DRV=%umA", madera_pin_unmake_drv_str(priv, conf[1]));
+
+ if (conf[0] & MADERA_GP1_IP_CFG_MASK)
+ seq_puts(s, "SCHMITT");
+}
+
+
+static const struct pinctrl_ops madera_pin_group_ops = {
+ .get_groups_count = madera_get_groups_count,
+ .get_group_name = madera_get_group_name,
+ .get_group_pins = madera_get_group_pins,
+#if IS_ENABLED(CONFIG_OF)
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_free_map,
+#endif
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ .pin_dbg_show = madera_pin_dbg_show,
+#endif
+};
+
+static int madera_mux_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(madera_mux_funcs);
+}
+
+static const char *madera_mux_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return madera_mux_funcs[selector].name;
+}
+
+static int madera_mux_get_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = madera_mux_funcs[selector].group_names;
+
+ if (madera_mux_funcs[selector].func == 0) {
+ /* alt func always maps to a single group */
+ *num_groups = 1;
+ } else {
+ /* other funcs map to all available gpio pins */
+ *num_groups = priv->chip->n_pins;
+ }
+
+ return 0;
+}
+
+static int madera_mux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned int group)
+{
+ struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev);
+ struct madera *madera = priv->madera;
+ const struct madera_pin_groups *pin_group = priv->chip->pin_groups;
+ unsigned int n_chip_groups = priv->chip->n_pin_groups;
+ const char *func_name = madera_mux_funcs[selector].name;
+ unsigned int reg;
+ int i, ret;
+
+ dev_dbg(priv->dev, "%s selecting %u (%s) for group %u (%s)\n",
+ __func__, selector, func_name, group,
+ madera_get_group_name(pctldev, group));
+
+ if (madera_mux_funcs[selector].func == 0) {
+ /* alt func pin assignments are codec-specific */
+ for (i = 0; i < n_chip_groups; ++i) {
+ if (strcmp(func_name, pin_group->name) == 0)
+ break;
+
+ ++pin_group;
+ }
+
+ if (i == n_chip_groups)
+ return -EINVAL;
+
+ for (i = 0; i < pin_group->n_pins; ++i) {
+ reg = MADERA_GPIO1_CTRL_1 + (2 * pin_group->pins[i]);
+
+ dev_dbg(priv->dev, "%s setting 0x%x func bits to 0\n",
+ __func__, reg);
+
+ ret = regmap_update_bits(madera->regmap, reg,
+ MADERA_GP1_FN_MASK, 0);
+ if (ret)
+ break;
+
+ }
+ } else {
+ /*
+ * for other funcs the group will be the gpio number and will
+ * be offset by the number of chip-specific functions at the
+ * start of the group list
+ */
+ group -= n_chip_groups;
+ reg = MADERA_GPIO1_CTRL_1 + (2 * group);
+
+ dev_dbg(priv->dev, "%s setting 0x%x func bits to 0x%x\n",
+ __func__, reg, madera_mux_funcs[selector].func);
+
+ ret = regmap_update_bits(madera->regmap,
+ reg,
+ MADERA_GP1_FN_MASK,
+ madera_mux_funcs[selector].func);
+ }
+
+ if (ret)
+ dev_err(priv->dev, "Failed to write to 0x%x (%d)\n", reg, ret);
+
+ return ret;
+}
+
+static int madera_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset,
+ bool input)
+{
+ struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev);
+ struct madera *madera = priv->madera;
+ unsigned int reg = MADERA_GPIO1_CTRL_2 + (2 * offset);
+ unsigned int val;
+ int ret;
+
+ if (input)
+ val = MADERA_GP1_DIR;
+ else
+ val = 0;
+
+ ret = regmap_update_bits(madera->regmap, reg, MADERA_GP1_DIR_MASK, val);
+ if (ret)
+ dev_err(priv->dev, "Failed to write to 0x%x (%d)\n", reg, ret);
+
+ return ret;
+}
+
+static int madera_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev);
+ struct madera *madera = priv->madera;
+ unsigned int reg = MADERA_GPIO1_CTRL_1 + (2 * offset);
+ int ret;
+
+ /* put the pin into GPIO mode */
+ ret = regmap_update_bits(madera->regmap, reg, MADERA_GP1_FN_MASK, 1);
+ if (ret)
+ dev_err(priv->dev, "Failed to write to 0x%x (%d)\n", reg, ret);
+
+ return ret;
+}
+
+static void madera_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev);
+ struct madera *madera = priv->madera;
+ unsigned int reg = MADERA_GPIO1_CTRL_1 + (2 * offset);
+ int ret;
+
+ /* disable GPIO by setting to GPIO IN */
+ madera_gpio_set_direction(pctldev, range, offset, true);
+
+ ret = regmap_update_bits(madera->regmap, reg, MADERA_GP1_FN_MASK, 1);
+ if (ret)
+ dev_err(priv->dev, "Failed to write to 0x%x (%d)\n", reg, ret);
+}
+
+static const struct pinmux_ops madera_pin_mux_ops = {
+ .get_functions_count = madera_mux_get_funcs_count,
+ .get_function_name = madera_mux_get_func_name,
+ .get_function_groups = madera_mux_get_groups,
+ .set_mux = madera_mux_set_mux,
+ .gpio_request_enable = madera_gpio_request_enable,
+ .gpio_disable_free = madera_gpio_disable_free,
+ .gpio_set_direction = madera_gpio_set_direction,
+ .strict = true, /* GPIO and other functions are exclusive */
+};
+
+static int madera_pin_conf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int param = pinconf_to_config_param(*config);
+ unsigned int result = 0;
+ unsigned int reg = MADERA_GPIO1_CTRL_1 + (2 * pin);
+ unsigned int conf[2];
+ int ret;
+
+ ret = regmap_read(priv->madera->regmap, reg, &conf[0]);
+ if (!ret)
+ ret = regmap_read(priv->madera->regmap, reg + 1, &conf[1]);
+
+ if (ret) {
+ dev_err(priv->dev, "Failed to read GP%d conf (%d)\n",
+ pin + 1, ret);
+ return ret;
+ }
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ conf[1] &= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK;
+ if (conf[1] == (MADERA_GP1_PU | MADERA_GP1_PD))
+ result = 1;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ conf[1] &= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK;
+ if (!conf[1])
+ result = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ conf[1] &= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK;
+ if (conf[1] == MADERA_GP1_PD_MASK)
+ result = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ conf[1] &= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK;
+ if (conf[1] == MADERA_GP1_PU_MASK)
+ result = 1;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (conf[0] & MADERA_GP1_OP_CFG_MASK)
+ result = 1;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ if (!(conf[0] & MADERA_GP1_OP_CFG_MASK))
+ result = 1;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ result = madera_pin_unmake_drv_str(priv, conf[1]);
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ if (conf[0] & MADERA_GP1_DB_MASK)
+ result = 1;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ if (conf[0] & MADERA_GP1_DIR_MASK)
+ result = 1;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT:
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (conf[0] & MADERA_GP1_IP_CFG_MASK)
+ result = 1;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ if ((conf[1] & MADERA_GP1_DIR_MASK) &&
+ (conf[0] & MADERA_GP1_LVL_MASK))
+ result = 1;
+ break;
+ default:
+ break;
+ }
+
+ *config = pinconf_to_config_packed(param, result);
+
+ return 0;
+}
+
+static int madera_pin_conf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev);
+ u16 conf[2] = {0, 0};
+ u16 mask[2] = {0, 0};
+ unsigned int reg = MADERA_GPIO1_CTRL_1 + (2 * pin);
+ unsigned int val;
+ int ret;
+
+ while (num_configs) {
+ dev_dbg(priv->dev, "%s config 0x%lx\n", __func__, *configs);
+
+ switch (pinconf_to_config_param(*configs)) {
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ mask[1] |= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK;
+ conf[1] |= MADERA_GP1_PU | MADERA_GP1_PD;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ mask[1] |= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK;
+ conf[1] &= ~(MADERA_GP1_PU | MADERA_GP1_PD);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mask[1] |= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK;
+ conf[1] |= MADERA_GP1_PD;
+ conf[1] &= ~MADERA_GP1_PU;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mask[1] |= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK;
+ conf[1] |= MADERA_GP1_PU;
+ conf[1] &= ~MADERA_GP1_PD;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ mask[0] |= MADERA_GP1_OP_CFG_MASK;
+ conf[0] |= MADERA_GP1_OP_CFG;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ mask[0] |= MADERA_GP1_OP_CFG_MASK;
+ conf[0] &= ~MADERA_GP1_OP_CFG;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ val = pinconf_to_config_argument(*configs);
+ mask[1] |= MADERA_GP1_DRV_STR_MASK;
+ conf[1] &= ~MADERA_GP1_DRV_STR_MASK;
+ conf[1] |= madera_pin_make_drv_str(priv, val);
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ mask[0] |= MADERA_GP1_DB_MASK;
+
+ /*
+ * we can't configure debounce time per-pin so value
+ * is just a flag
+ */
+ val = pinconf_to_config_argument(*configs);
+ if (val)
+ conf[0] |= MADERA_GP1_DB;
+ else
+ conf[0] &= ~MADERA_GP1_DB;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ val = pinconf_to_config_argument(*configs);
+ mask[1] |= MADERA_GP1_DIR_MASK;
+ if (val)
+ conf[1] |= MADERA_GP1_DIR;
+ else
+ conf[1] &= ~MADERA_GP1_DIR;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT:
+ val = pinconf_to_config_argument(*configs);
+ mask[0] |= MADERA_GP1_IP_CFG;
+ if (val)
+ conf[0] |= MADERA_GP1_IP_CFG;
+ else
+ conf[0] &= ~MADERA_GP1_IP_CFG;
+
+ mask[1] |= MADERA_GP1_DIR_MASK;
+ conf[1] |= MADERA_GP1_DIR;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ mask[0] |= MADERA_GP1_IP_CFG;
+ conf[0] |= MADERA_GP1_IP_CFG;
+ mask[1] |= MADERA_GP1_DIR_MASK;
+ conf[1] |= MADERA_GP1_DIR;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ val = pinconf_to_config_argument(*configs);
+ mask[0] |= MADERA_GP1_LVL_MASK;
+ if (val)
+ conf[0] |= MADERA_GP1_LVL;
+ else
+ conf[0] &= ~MADERA_GP1_LVL;
+
+ mask[1] |= MADERA_GP1_DIR_MASK;
+ conf[1] &= ~MADERA_GP1_DIR;
+ break;
+ default:
+ break;
+ }
+
+ ++configs;
+ --num_configs;
+ }
+
+ dev_dbg(priv->dev,
+ "%s gpio%d 0x%x:0x%x 0x%x:0x%x\n",
+ __func__, pin + 1, reg, conf[0], reg + 1, conf[1]);
+
+ ret = regmap_update_bits(priv->madera->regmap, reg, mask[0], conf[0]);
+ if (ret)
+ goto err;
+
+ ++reg;
+ ret = regmap_update_bits(priv->madera->regmap, reg, mask[1], conf[1]);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(priv->dev,
+ "Failed to write GPIO%d conf (%d) reg 0x%x\n",
+ pin + 1, ret, reg);
+
+ return ret;
+}
+
+static int madera_pin_conf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev);
+ const struct madera_pin_groups *pin_group;
+ unsigned int n_groups = priv->chip->n_pin_groups;
+ int i, ret;
+
+ dev_dbg(priv->dev, "%s setting group %s\n", __func__,
+ madera_get_group_name(pctldev, selector));
+
+ if (selector >= n_groups) {
+ /* group is a single pin, convert to pin number and set */
+ return madera_pin_conf_set(pctldev,
+ selector - n_groups,
+ configs,
+ num_configs);
+ } else {
+ pin_group = &priv->chip->pin_groups[selector];
+
+ for (i = 0; i < pin_group->n_pins; ++i) {
+ ret = madera_pin_conf_set(pctldev,
+ pin_group->pins[i],
+ configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops madera_pin_conf_ops = {
+ .pin_config_get = madera_pin_conf_get,
+ .pin_config_set = madera_pin_conf_set,
+ .pin_config_group_set = madera_pin_conf_group_set,
+
+};
+
+static struct pinctrl_desc madera_pin_desc = {
+ .name = "madera-pinctrl",
+ .pins = madera_pins,
+ .pctlops = &madera_pin_group_ops,
+ .pmxops = &madera_pin_mux_ops,
+ .confops = &madera_pin_conf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int madera_pin_probe(struct platform_device *pdev)
+{
+ struct madera *madera = dev_get_drvdata(pdev->dev.parent);
+ const struct madera_pdata *pdata = dev_get_platdata(madera->dev);
+ struct madera_pin_private *priv;
+ int ret;
+
+ BUILD_BUG_ON(ARRAY_SIZE(madera_pin_single_group_names) !=
+ ARRAY_SIZE(madera_pin_single_group_pins));
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ priv->madera = madera;
+ pdev->dev.of_node = madera->dev->of_node;
+
+ switch (madera->type) {
+ case CS47L35:
+ if (IS_ENABLED(CONFIG_PINCTRL_CS47L35))
+ priv->chip = &cs47l35_pin_chip;
+ break;
+ case CS47L85:
+ case WM1840:
+ if (IS_ENABLED(CONFIG_PINCTRL_CS47L85))
+ priv->chip = &cs47l85_pin_chip;
+ break;
+ case CS47L90:
+ case CS47L91:
+ if (IS_ENABLED(CONFIG_PINCTRL_CS47L90))
+ priv->chip = &cs47l90_pin_chip;
+ break;
+ default:
+ break;
+ }
+
+ if (!priv->chip)
+ return -ENODEV;
+
+ madera_pin_desc.npins = priv->chip->n_pins;
+
+ ret = devm_pinctrl_register_and_init(&pdev->dev,
+ &madera_pin_desc,
+ priv,
+ &priv->pctl);
+ if (ret) {
+ dev_err(priv->dev, "Failed pinctrl register (%d)\n", ret);
+ return ret;
+ }
+
+ /* if the configuration is provided through pdata, apply it */
+ if (pdata) {
+ ret = pinctrl_register_mappings(pdata->gpio_configs,
+ pdata->n_gpio_configs);
+ if (ret) {
+ dev_err(priv->dev,
+ "Failed to register pdata mappings (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = pinctrl_enable(priv->pctl);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable pinctrl (%d)\n", ret);
+ return ret;
+ }
+
+ dev_dbg(priv->dev, "pinctrl probed ok\n");
+
+ return 0;
+}
+
+static struct platform_driver madera_pin_driver = {
+ .probe = madera_pin_probe,
+ .driver = {
+ .name = "madera-pinctrl",
+ },
+};
+
+module_platform_driver(madera_pin_driver);
+
+MODULE_DESCRIPTION("Madera pinctrl driver");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera.h b/drivers/pinctrl/cirrus/pinctrl-madera.h
new file mode 100644
index 000000000000..8000f4f832a1
--- /dev/null
+++ b/drivers/pinctrl/cirrus/pinctrl-madera.h
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Pinctrl for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2016-2017 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#ifndef PINCTRL_MADERA_H
+#define PINCTRL_MADERA_H
+
+struct madera_pin_groups {
+ const char *name;
+ const unsigned int *pins;
+ unsigned int n_pins;
+};
+
+struct madera_pin_chip {
+ unsigned int n_pins;
+
+ const struct madera_pin_groups *pin_groups;
+ unsigned int n_pin_groups;
+};
+
+struct madera_pin_private {
+ struct madera *madera;
+
+ const struct madera_pin_chip *chip; /* chip-specific groups */
+
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+};
+
+extern const struct madera_pin_chip cs47l35_pin_chip;
+extern const struct madera_pin_chip cs47l85_pin_chip;
+extern const struct madera_pin_chip cs47l90_pin_chip;
+
+#endif
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index e5a303002021..a3dd777e3ce8 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/list.h>
-#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/pinctrl/consumer.h>
@@ -617,6 +616,26 @@ struct group_desc *pinctrl_generic_get_group(struct pinctrl_dev *pctldev,
}
EXPORT_SYMBOL_GPL(pinctrl_generic_get_group);
+static int pinctrl_generic_group_name_to_selector(struct pinctrl_dev *pctldev,
+ const char *function)
+{
+ const struct pinctrl_ops *ops = pctldev->desc->pctlops;
+ int ngroups = ops->get_groups_count(pctldev);
+ int selector = 0;
+
+ /* See if this pctldev has this group */
+ while (selector < ngroups) {
+ const char *gname = ops->get_group_name(pctldev, selector);
+
+ if (!strcmp(function, gname))
+ return selector;
+
+ selector++;
+ }
+
+ return -EINVAL;
+}
+
/**
* pinctrl_generic_add_group() - adds a new pin group
* @pctldev: pin controller device
@@ -631,6 +650,16 @@ int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
int *pins, int num_pins, void *data)
{
struct group_desc *group;
+ int selector;
+
+ if (!name)
+ return -EINVAL;
+
+ selector = pinctrl_generic_group_name_to_selector(pctldev, name);
+ if (selector >= 0)
+ return selector;
+
+ selector = pctldev->num_groups;
group = devm_kzalloc(pctldev->dev, sizeof(*group), GFP_KERNEL);
if (!group)
@@ -641,12 +670,11 @@ int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
group->num_pins = num_pins;
group->data = data;
- radix_tree_insert(&pctldev->pin_group_tree, pctldev->num_groups,
- group);
+ radix_tree_insert(&pctldev->pin_group_tree, selector, group);
pctldev->num_groups++;
- return 0;
+ return selector;
}
EXPORT_SYMBOL_GPL(pinctrl_generic_add_group);
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 8cf2eba17c8c..4a0526e567df 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -218,12 +218,6 @@ int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
int pinctrl_generic_remove_group(struct pinctrl_dev *pctldev,
unsigned int group_selector);
-static inline int
-pinctrl_generic_remove_last_group(struct pinctrl_dev *pctldev)
-{
- return pinctrl_generic_remove_group(pctldev, pctldev->num_groups - 1);
-}
-
#endif /* CONFIG_GENERIC_PINCTRL_GROUPS */
struct pinctrl_dev *get_pinctrl_dev_from_devname(const char *dev_name);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index c4aa411f5935..2969ff3162c3 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -111,17 +111,24 @@ static int dt_to_map_one_config(struct pinctrl *p,
int ret;
struct pinctrl_map *map;
unsigned num_maps;
+ bool allow_default = false;
/* Find the pin controller containing np_config */
np_pctldev = of_node_get(np_config);
for (;;) {
+ if (!allow_default)
+ allow_default = of_property_read_bool(np_pctldev,
+ "pinctrl-use-default");
+
np_pctldev = of_get_next_parent(np_pctldev);
if (!np_pctldev || of_node_is_root(np_pctldev)) {
- dev_info(p->dev, "could not find pctldev for node %pOF, deferring probe\n",
- np_config);
of_node_put(np_pctldev);
- /* OK let's just assume this will appear later then */
- return -EPROBE_DEFER;
+ ret = driver_deferred_probe_check_state(p->dev);
+ /* keep deferring if modules are enabled unless we've timed out */
+ if (IS_ENABLED(CONFIG_MODULES) && !allow_default && ret == -ENODEV)
+ ret = -EPROBE_DEFER;
+
+ return ret;
}
/* If we're creating a hog we can use the passed pctldev */
if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 0d8ba1ef5329..dccf64c55498 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -117,6 +117,13 @@ config PINCTRL_IMX7ULP
help
Say Y here to enable the imx7ulp pinctrl driver
+config PINCTRL_IMX8MQ
+ bool "IMX8MQ pinctrl driver"
+ depends on SOC_IMX8MQ
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx8mq pinctrl driver
+
config PINCTRL_VF610
bool "Freescale Vybrid VF610 pinctrl driver"
depends on SOC_VF610
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 368be8cfc9b1..73175b3e7c9c 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_PINCTRL_IMX6SX) += pinctrl-imx6sx.o
obj-$(CONFIG_PINCTRL_IMX6UL) += pinctrl-imx6ul.o
obj-$(CONFIG_PINCTRL_IMX7D) += pinctrl-imx7d.o
obj-$(CONFIG_PINCTRL_IMX7ULP) += pinctrl-imx7ulp.o
+obj-$(CONFIG_PINCTRL_IMX8MQ) += pinctrl-imx8mq.o
obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o
obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 1c6bb15579e1..b04edc22dad7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -383,7 +383,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
const char *name;
int i, ret;
- if (group > pctldev->num_groups)
+ if (group >= pctldev->num_groups)
return;
seq_puts(s, "\n");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index c3bdd90b1422..deb7870b3d1a 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -429,7 +429,7 @@ static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
const char *name;
int i, ret;
- if (group > info->ngroups)
+ if (group >= info->ngroups)
return;
seq_puts(s, "\n");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mq.c b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
new file mode 100644
index 000000000000..8d39af541d5f
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ * Copyright (C) 2018 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx8mq_pads {
+ MX8MQ_PAD_RESERVE0 = 0,
+ MX8MQ_PAD_RESERVE1 = 1,
+ MX8MQ_PAD_RESERVE2 = 2,
+ MX8MQ_PAD_RESERVE3 = 3,
+ MX8MQ_PAD_RESERVE4 = 4,
+ MX8MQ_IOMUXC_PMIC_STBY_REQ_CCMSRCGPCMIX = 5,
+ MX8MQ_IOMUXC_PMIC_ON_REQ_SNVSMIX = 6,
+ MX8MQ_IOMUXC_ONOFF_SNVSMIX = 7,
+ MX8MQ_IOMUXC_POR_B_SNVSMIX = 8,
+ MX8MQ_IOMUXC_RTC_RESET_B_SNVSMIX = 9,
+ MX8MQ_IOMUXC_GPIO1_IO00 = 10,
+ MX8MQ_IOMUXC_GPIO1_IO01 = 11,
+ MX8MQ_IOMUXC_GPIO1_IO02 = 12,
+ MX8MQ_IOMUXC_GPIO1_IO03 = 13,
+ MX8MQ_IOMUXC_GPIO1_IO04 = 14,
+ MX8MQ_IOMUXC_GPIO1_IO05 = 15,
+ MX8MQ_IOMUXC_GPIO1_IO06 = 16,
+ MX8MQ_IOMUXC_GPIO1_IO07 = 17,
+ MX8MQ_IOMUXC_GPIO1_IO08 = 18,
+ MX8MQ_IOMUXC_GPIO1_IO09 = 19,
+ MX8MQ_IOMUXC_GPIO1_IO10 = 20,
+ MX8MQ_IOMUXC_GPIO1_IO11 = 21,
+ MX8MQ_IOMUXC_GPIO1_IO12 = 22,
+ MX8MQ_IOMUXC_GPIO1_IO13 = 23,
+ MX8MQ_IOMUXC_GPIO1_IO14 = 24,
+ MX8MQ_IOMUXC_GPIO1_IO15 = 25,
+ MX8MQ_IOMUXC_ENET_MDC = 26,
+ MX8MQ_IOMUXC_ENET_MDIO = 27,
+ MX8MQ_IOMUXC_ENET_TD3 = 28,
+ MX8MQ_IOMUXC_ENET_TD2 = 29,
+ MX8MQ_IOMUXC_ENET_TD1 = 30,
+ MX8MQ_IOMUXC_ENET_TD0 = 31,
+ MX8MQ_IOMUXC_ENET_TX_CTL = 32,
+ MX8MQ_IOMUXC_ENET_TXC = 33,
+ MX8MQ_IOMUXC_ENET_RX_CTL = 34,
+ MX8MQ_IOMUXC_ENET_RXC = 35,
+ MX8MQ_IOMUXC_ENET_RD0 = 36,
+ MX8MQ_IOMUXC_ENET_RD1 = 37,
+ MX8MQ_IOMUXC_ENET_RD2 = 38,
+ MX8MQ_IOMUXC_ENET_RD3 = 39,
+ MX8MQ_IOMUXC_SD1_CLK = 40,
+ MX8MQ_IOMUXC_SD1_CMD = 41,
+ MX8MQ_IOMUXC_SD1_DATA0 = 42,
+ MX8MQ_IOMUXC_SD1_DATA1 = 43,
+ MX8MQ_IOMUXC_SD1_DATA2 = 44,
+ MX8MQ_IOMUXC_SD1_DATA3 = 45,
+ MX8MQ_IOMUXC_SD1_DATA4 = 46,
+ MX8MQ_IOMUXC_SD1_DATA5 = 47,
+ MX8MQ_IOMUXC_SD1_DATA6 = 48,
+ MX8MQ_IOMUXC_SD1_DATA7 = 49,
+ MX8MQ_IOMUXC_SD1_RESET_B = 50,
+ MX8MQ_IOMUXC_SD1_STROBE = 51,
+ MX8MQ_IOMUXC_SD2_CD_B = 52,
+ MX8MQ_IOMUXC_SD2_CLK = 53,
+ MX8MQ_IOMUXC_SD2_CMD = 54,
+ MX8MQ_IOMUXC_SD2_DATA0 = 55,
+ MX8MQ_IOMUXC_SD2_DATA1 = 56,
+ MX8MQ_IOMUXC_SD2_DATA2 = 57,
+ MX8MQ_IOMUXC_SD2_DATA3 = 58,
+ MX8MQ_IOMUXC_SD2_RESET_B = 59,
+ MX8MQ_IOMUXC_SD2_WP = 60,
+ MX8MQ_IOMUXC_NAND_ALE = 61,
+ MX8MQ_IOMUXC_NAND_CE0_B = 62,
+ MX8MQ_IOMUXC_NAND_CE1_B = 63,
+ MX8MQ_IOMUXC_NAND_CE2_B = 64,
+ MX8MQ_IOMUXC_NAND_CE3_B = 65,
+ MX8MQ_IOMUXC_NAND_CLE = 66,
+ MX8MQ_IOMUXC_NAND_DATA00 = 67,
+ MX8MQ_IOMUXC_NAND_DATA01 = 68,
+ MX8MQ_IOMUXC_NAND_DATA02 = 69,
+ MX8MQ_IOMUXC_NAND_DATA03 = 70,
+ MX8MQ_IOMUXC_NAND_DATA04 = 71,
+ MX8MQ_IOMUXC_NAND_DATA05 = 72,
+ MX8MQ_IOMUXC_NAND_DATA06 = 73,
+ MX8MQ_IOMUXC_NAND_DATA07 = 74,
+ MX8MQ_IOMUXC_NAND_DQS = 75,
+ MX8MQ_IOMUXC_NAND_RE_B = 76,
+ MX8MQ_IOMUXC_NAND_READY_B = 77,
+ MX8MQ_IOMUXC_NAND_WE_B = 78,
+ MX8MQ_IOMUXC_NAND_WP_B = 79,
+ MX8MQ_IOMUXC_SAI5_RXFS = 80,
+ MX8MQ_IOMUXC_SAI5_RXC = 81,
+ MX8MQ_IOMUXC_SAI5_RXD0 = 82,
+ MX8MQ_IOMUXC_SAI5_RXD1 = 83,
+ MX8MQ_IOMUXC_SAI5_RXD2 = 84,
+ MX8MQ_IOMUXC_SAI5_RXD3 = 85,
+ MX8MQ_IOMUXC_SAI5_MCLK = 86,
+ MX8MQ_IOMUXC_SAI1_RXFS = 87,
+ MX8MQ_IOMUXC_SAI1_RXC = 88,
+ MX8MQ_IOMUXC_SAI1_RXD0 = 89,
+ MX8MQ_IOMUXC_SAI1_RXD1 = 90,
+ MX8MQ_IOMUXC_SAI1_RXD2 = 91,
+ MX8MQ_IOMUXC_SAI1_RXD3 = 92,
+ MX8MQ_IOMUXC_SAI1_RXD4 = 93,
+ MX8MQ_IOMUXC_SAI1_RXD5 = 94,
+ MX8MQ_IOMUXC_SAI1_RXD6 = 95,
+ MX8MQ_IOMUXC_SAI1_RXD7 = 96,
+ MX8MQ_IOMUXC_SAI1_TXFS = 97,
+ MX8MQ_IOMUXC_SAI1_TXC = 98,
+ MX8MQ_IOMUXC_SAI1_TXD0 = 99,
+ MX8MQ_IOMUXC_SAI1_TXD1 = 100,
+ MX8MQ_IOMUXC_SAI1_TXD2 = 101,
+ MX8MQ_IOMUXC_SAI1_TXD3 = 102,
+ MX8MQ_IOMUXC_SAI1_TXD4 = 103,
+ MX8MQ_IOMUXC_SAI1_TXD5 = 104,
+ MX8MQ_IOMUXC_SAI1_TXD6 = 105,
+ MX8MQ_IOMUXC_SAI1_TXD7 = 106,
+ MX8MQ_IOMUXC_SAI1_MCLK = 107,
+ MX8MQ_IOMUXC_SAI2_RXFS = 108,
+ MX8MQ_IOMUXC_SAI2_RXC = 109,
+ MX8MQ_IOMUXC_SAI2_RXD0 = 110,
+ MX8MQ_IOMUXC_SAI2_TXFS = 111,
+ MX8MQ_IOMUXC_SAI2_TXC = 112,
+ MX8MQ_IOMUXC_SAI2_TXD0 = 113,
+ MX8MQ_IOMUXC_SAI2_MCLK = 114,
+ MX8MQ_IOMUXC_SAI3_RXFS = 115,
+ MX8MQ_IOMUXC_SAI3_RXC = 116,
+ MX8MQ_IOMUXC_SAI3_RXD = 117,
+ MX8MQ_IOMUXC_SAI3_TXFS = 118,
+ MX8MQ_IOMUXC_SAI3_TXC = 119,
+ MX8MQ_IOMUXC_SAI3_TXD = 120,
+ MX8MQ_IOMUXC_SAI3_MCLK = 121,
+ MX8MQ_IOMUXC_SPDIF_TX = 122,
+ MX8MQ_IOMUXC_SPDIF_RX = 123,
+ MX8MQ_IOMUXC_SPDIF_EXT_CLK = 124,
+ MX8MQ_IOMUXC_ECSPI1_SCLK = 125,
+ MX8MQ_IOMUXC_ECSPI1_MOSI = 126,
+ MX8MQ_IOMUXC_ECSPI1_MISO = 127,
+ MX8MQ_IOMUXC_ECSPI1_SS0 = 128,
+ MX8MQ_IOMUXC_ECSPI2_SCLK = 129,
+ MX8MQ_IOMUXC_ECSPI2_MOSI = 130,
+ MX8MQ_IOMUXC_ECSPI2_MISO = 131,
+ MX8MQ_IOMUXC_ECSPI2_SS0 = 132,
+ MX8MQ_IOMUXC_I2C1_SCL = 133,
+ MX8MQ_IOMUXC_I2C1_SDA = 134,
+ MX8MQ_IOMUXC_I2C2_SCL = 135,
+ MX8MQ_IOMUXC_I2C2_SDA = 136,
+ MX8MQ_IOMUXC_I2C3_SCL = 137,
+ MX8MQ_IOMUXC_I2C3_SDA = 138,
+ MX8MQ_IOMUXC_I2C4_SCL = 139,
+ MX8MQ_IOMUXC_I2C4_SDA = 140,
+ MX8MQ_IOMUXC_UART1_RXD = 141,
+ MX8MQ_IOMUXC_UART1_TXD = 142,
+ MX8MQ_IOMUXC_UART2_RXD = 143,
+ MX8MQ_IOMUXC_UART2_TXD = 144,
+ MX8MQ_IOMUXC_UART3_RXD = 145,
+ MX8MQ_IOMUXC_UART3_TXD = 146,
+ MX8MQ_IOMUXC_UART4_RXD = 147,
+ MX8MQ_IOMUXC_UART4_TXD = 148,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx8mq_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX8MQ_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(MX8MQ_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX8MQ_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(MX8MQ_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(MX8MQ_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_PMIC_STBY_REQ_CCMSRCGPCMIX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_PMIC_ON_REQ_SNVSMIX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ONOFF_SNVSMIX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_POR_B_SNVSMIX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_RTC_RESET_B_SNVSMIX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO00),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO01),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO02),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO03),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO04),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO05),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO06),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO07),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO08),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO09),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO10),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO11),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO12),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO13),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO14),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO15),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_MDC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_MDIO),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_TD3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_TD2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_TD1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_TD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_TX_CTL),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_TXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_RX_CTL),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_RXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_RD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_RD1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_RD2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_RD3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_CLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_CMD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA4),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA5),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA6),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA7),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_RESET_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_STROBE),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_CD_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_CLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_CMD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_DATA0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_DATA1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_DATA2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_DATA3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_RESET_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_WP),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_ALE),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_CE0_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_CE1_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_CE2_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_CE3_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_CLE),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA00),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA01),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA02),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA03),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA04),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA05),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA06),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA07),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DQS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_RE_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_READY_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_WE_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_WP_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_RXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_RXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_RXD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_RXD1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_RXD2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_RXD3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_MCLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD4),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD5),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD6),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD7),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD4),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD5),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD6),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD7),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_MCLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_RXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_RXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_RXD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_TXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_TXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_TXD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_MCLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_RXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_RXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_RXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_TXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_TXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_TXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_MCLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SPDIF_TX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SPDIF_RX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SPDIF_EXT_CLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI1_SCLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI1_MOSI),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI1_MISO),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI1_SS0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI2_SCLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI2_MOSI),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI2_MISO),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI2_SS0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C1_SCL),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C1_SDA),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C2_SCL),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C2_SDA),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C3_SCL),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C3_SDA),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C4_SCL),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C4_SDA),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART1_RXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART1_TXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART2_RXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART2_TXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART3_RXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART3_TXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART4_RXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART4_TXD),
+};
+
+static const struct imx_pinctrl_soc_info imx8mq_pinctrl_info = {
+ .pins = imx8mq_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx8mq_pinctrl_pads),
+ .gpr_compatible = "fsl,imx8mq-iomuxc-gpr",
+};
+
+static const struct of_device_id imx8mq_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx8mq-iomuxc", .data = &imx8mq_pinctrl_info, },
+ { /* sentinel */ }
+};
+
+static int imx8mq_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx8mq_pinctrl_info);
+}
+
+static struct platform_driver imx8mq_pinctrl_driver = {
+ .driver = {
+ .name = "imx8mq-pinctrl",
+ .of_match_table = of_match_ptr(imx8mq_pinctrl_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = imx8mq_pinctrl_probe,
+};
+
+static int __init imx8mq_pinctrl_init(void)
+{
+ return platform_driver_register(&imx8mq_pinctrl_driver);
+}
+arch_initcall(imx8mq_pinctrl_init);
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 4aea1b8504f7..452a14f78707 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -1,6 +1,6 @@
-#
+# SPDX-License-Identifier: GPL-2.0
# Intel pin control drivers
-#
+
if (X86 || COMPILE_TEST)
config PINCTRL_BAYTRAIL
@@ -90,6 +90,14 @@ config PINCTRL_GEMINILAKE
This pinctrl driver provides an interface that allows configuring
of Intel Gemini Lake SoC pins and using them as GPIOs.
+config PINCTRL_ICELAKE
+ tristate "Intel Ice Lake PCH pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Ice Lake PCH pins and using them as GPIOs.
+
config PINCTRL_LEWISBURG
tristate "Intel Lewisburg pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index fadfe3ea2b04..cb491e655749 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o
obj-$(CONFIG_PINCTRL_CEDARFORK) += pinctrl-cedarfork.o
obj-$(CONFIG_PINCTRL_DENVERTON) += pinctrl-denverton.o
obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
+obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 6b52ea1440a6..f38d596efa05 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Pinctrl GPIO driver for Intel Baytrail
- * Copyright (c) 2012-2013, Intel Corporation.
*
+ * Copyright (c) 2012-2013, Intel Corporation
* Author: Mathias Nyman <mathias.nyman@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/kernel.h>
@@ -1542,11 +1534,13 @@ static void byt_irq_unmask(struct irq_data *d)
switch (irqd_get_trigger_type(d)) {
case IRQ_TYPE_LEVEL_HIGH:
value |= BYT_TRIG_LVL;
+ /* fall through */
case IRQ_TYPE_EDGE_RISING:
value |= BYT_TRIG_POS;
break;
case IRQ_TYPE_LEVEL_LOW:
value |= BYT_TRIG_LVL;
+ /* fall through */
case IRQ_TYPE_EDGE_FALLING:
value |= BYT_TRIG_NEG;
break;
@@ -1691,7 +1685,8 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
value = readl(reg);
if (value)
dev_err(&vg->pdev->dev,
- "GPIO interrupt error, pins misconfigured\n");
+ "GPIO interrupt error, pins misconfigured. INT_STAT%u: 0x%08x\n",
+ base / 32, value);
}
}
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index e6e6fd112585..8b1c7b59ad3e 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Broxton SoC pinctrl/GPIO driver
*
* Copyright (C) 2015, 2016 Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
index 6243e7d95e7e..fb1afe55bf53 100644
--- a/drivers/pinctrl/intel/pinctrl-cannonlake.c
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Cannon Lake PCH pinctrl/GPIO driver
*
* Copyright (C) 2017, Intel Corporation
* Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
@@ -447,12 +444,8 @@ static const struct intel_function cnlh_functions[] = {
static const struct intel_community cnlh_communities[] = {
CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
- /*
- * ACPI MMIO resources are returned in reverse order for
- * communities 3 and 4.
- */
- CNL_COMMUNITY(3, 155, 248, cnlh_community3_gpps),
- CNL_COMMUNITY(2, 249, 298, cnlh_community4_gpps),
+ CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
+ CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
};
static const struct intel_pinctrl_soc_data cnlh_soc_data = {
diff --git a/drivers/pinctrl/intel/pinctrl-cedarfork.c b/drivers/pinctrl/intel/pinctrl-cedarfork.c
index 59216b0533d9..c788e37e338e 100644
--- a/drivers/pinctrl/intel/pinctrl-cedarfork.c
+++ b/drivers/pinctrl/intel/pinctrl-cedarfork.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Cedar Fork PCH pinctrl/GPIO driver
*
* Copyright (C) 2017, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
@@ -240,51 +237,51 @@ static const struct pinctrl_pin_desc cdf_pins[] = {
PINCTRL_PIN(179, "GBE_GPIO10"),
PINCTRL_PIN(180, "GBE_GPIO11"),
PINCTRL_PIN(181, "GBE_GPIO12"),
- PINCTRL_PIN(182, "SATA0_LED_N"),
- PINCTRL_PIN(183, "SATA1_LED_N"),
- PINCTRL_PIN(184, "SATA_PDETECT0"),
- PINCTRL_PIN(185, "SATA_PDETECT1"),
- PINCTRL_PIN(186, "SATA0_SDOUT"),
- PINCTRL_PIN(187, "SATA1_SDOUT"),
- PINCTRL_PIN(188, "SATA2_LED_N"),
- PINCTRL_PIN(189, "SATA_PDETECT2"),
- PINCTRL_PIN(190, "SATA2_SDOUT"),
+ PINCTRL_PIN(182, "PECI_SMB_DATA"),
+ PINCTRL_PIN(183, "SATA0_LED_N"),
+ PINCTRL_PIN(184, "SATA1_LED_N"),
+ PINCTRL_PIN(185, "SATA_PDETECT0"),
+ PINCTRL_PIN(186, "SATA_PDETECT1"),
+ PINCTRL_PIN(187, "SATA0_SDOUT"),
+ PINCTRL_PIN(188, "SATA1_SDOUT"),
+ PINCTRL_PIN(189, "SATA2_LED_N"),
+ PINCTRL_PIN(190, "SATA_PDETECT2"),
+ PINCTRL_PIN(191, "SATA2_SDOUT"),
/* EAST3 */
- PINCTRL_PIN(191, "ESPI_IO0"),
- PINCTRL_PIN(192, "ESPI_IO1"),
- PINCTRL_PIN(193, "ESPI_IO2"),
- PINCTRL_PIN(194, "ESPI_IO3"),
- PINCTRL_PIN(195, "ESPI_CLK"),
- PINCTRL_PIN(196, "ESPI_RST_N"),
- PINCTRL_PIN(197, "ESPI_CS0_N"),
- PINCTRL_PIN(198, "ESPI_ALRT0_N"),
- PINCTRL_PIN(199, "ESPI_CS1_N"),
- PINCTRL_PIN(200, "ESPI_ALRT1_N"),
- PINCTRL_PIN(201, "ESPI_CLK_LOOPBK"),
+ PINCTRL_PIN(192, "ESPI_IO0"),
+ PINCTRL_PIN(193, "ESPI_IO1"),
+ PINCTRL_PIN(194, "ESPI_IO2"),
+ PINCTRL_PIN(195, "ESPI_IO3"),
+ PINCTRL_PIN(196, "ESPI_CLK"),
+ PINCTRL_PIN(197, "ESPI_RST_N"),
+ PINCTRL_PIN(198, "ESPI_CS0_N"),
+ PINCTRL_PIN(199, "ESPI_ALRT0_N"),
+ PINCTRL_PIN(200, "ESPI_CS1_N"),
+ PINCTRL_PIN(201, "ESPI_ALRT1_N"),
+ PINCTRL_PIN(202, "ESPI_CLK_LOOPBK"),
/* EAST0 */
- PINCTRL_PIN(202, "SPI_CS0_N"),
- PINCTRL_PIN(203, "SPI_CS1_N"),
- PINCTRL_PIN(204, "SPI_MOSI_IO0"),
- PINCTRL_PIN(205, "SPI_MISO_IO1"),
- PINCTRL_PIN(206, "SPI_IO2"),
- PINCTRL_PIN(207, "SPI_IO3"),
- PINCTRL_PIN(208, "SPI_CLK"),
- PINCTRL_PIN(209, "SPI_CLK_LOOPBK"),
- PINCTRL_PIN(210, "SUSPWRDNACK"),
- PINCTRL_PIN(211, "PMU_SUSCLK"),
- PINCTRL_PIN(212, "ADR_COMPLETE"),
- PINCTRL_PIN(213, "ADR_TRIGGER_N"),
- PINCTRL_PIN(214, "PMU_SLP_S45_N"),
- PINCTRL_PIN(215, "PMU_SLP_S3_N"),
- PINCTRL_PIN(216, "PMU_WAKE_N"),
- PINCTRL_PIN(217, "PMU_PWRBTN_N"),
- PINCTRL_PIN(218, "PMU_RESETBUTTON_N"),
- PINCTRL_PIN(219, "PMU_PLTRST_N"),
- PINCTRL_PIN(220, "SUS_STAT_N"),
- PINCTRL_PIN(221, "PMU_I2C_CLK"),
- PINCTRL_PIN(222, "PMU_I2C_DATA"),
- PINCTRL_PIN(223, "PECI_SMB_CLK"),
- PINCTRL_PIN(224, "PECI_SMB_DATA"),
+ PINCTRL_PIN(203, "SPI_CS0_N"),
+ PINCTRL_PIN(204, "SPI_CS1_N"),
+ PINCTRL_PIN(205, "SPI_MOSI_IO0"),
+ PINCTRL_PIN(206, "SPI_MISO_IO1"),
+ PINCTRL_PIN(207, "SPI_IO2"),
+ PINCTRL_PIN(208, "SPI_IO3"),
+ PINCTRL_PIN(209, "SPI_CLK"),
+ PINCTRL_PIN(210, "SPI_CLK_LOOPBK"),
+ PINCTRL_PIN(211, "SUSPWRDNACK"),
+ PINCTRL_PIN(212, "PMU_SUSCLK"),
+ PINCTRL_PIN(213, "ADR_COMPLETE"),
+ PINCTRL_PIN(214, "ADR_TRIGGER_N"),
+ PINCTRL_PIN(215, "PMU_SLP_S45_N"),
+ PINCTRL_PIN(216, "PMU_SLP_S3_N"),
+ PINCTRL_PIN(217, "PMU_WAKE_N"),
+ PINCTRL_PIN(218, "PMU_PWRBTN_N"),
+ PINCTRL_PIN(219, "PMU_RESETBUTTON_N"),
+ PINCTRL_PIN(220, "PMU_PLTRST_N"),
+ PINCTRL_PIN(221, "SUS_STAT_N"),
+ PINCTRL_PIN(222, "PMU_I2C_CLK"),
+ PINCTRL_PIN(223, "PMU_I2C_DATA"),
+ PINCTRL_PIN(224, "PECI_SMB_CLK"),
PINCTRL_PIN(225, "PECI_SMB_ALRT_N"),
/* EMMC */
PINCTRL_PIN(226, "EMMC_CMD"),
@@ -315,9 +312,9 @@ static const struct intel_padgroup cdf_community0_gpps[] = {
};
static const struct intel_padgroup cdf_community1_gpps[] = {
- CDF_GPP(0, 168, 190), /* EAST2 */
- CDF_GPP(1, 191, 201), /* EAST3 */
- CDF_GPP(2, 202, 225), /* EAST0 */
+ CDF_GPP(0, 168, 191), /* EAST2 */
+ CDF_GPP(1, 192, 202), /* EAST3 */
+ CDF_GPP(2, 203, 225), /* EAST0 */
CDF_GPP(3, 226, 236), /* EMMC */
};
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 0f1019ae3993..6d31ad799987 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cherryview/Braswell pinctrl driver
*
@@ -7,10 +8,6 @@
* This driver is based on the original Cherryview GPIO driver by
* Ning Li <ning.li@intel.com>
* Alan Cox <alan@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/dmi.h>
diff --git a/drivers/pinctrl/intel/pinctrl-denverton.c b/drivers/pinctrl/intel/pinctrl-denverton.c
index 6572550cfe78..f321ab0d76e5 100644
--- a/drivers/pinctrl/intel/pinctrl-denverton.c
+++ b/drivers/pinctrl/intel/pinctrl-denverton.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Denverton SoC pinctrl/GPIO driver
*
* Copyright (C) 2017, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/pinctrl/intel/pinctrl-geminilake.c b/drivers/pinctrl/intel/pinctrl-geminilake.c
index a6b94c930007..5c4c96752fc1 100644
--- a/drivers/pinctrl/intel/pinctrl-geminilake.c
+++ b/drivers/pinctrl/intel/pinctrl-geminilake.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Gemini Lake SoC pinctrl/GPIO driver
*
* Copyright (C) 2017 Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/pinctrl/intel/pinctrl-icelake.c b/drivers/pinctrl/intel/pinctrl-icelake.c
new file mode 100644
index 000000000000..630b966ce081
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-icelake.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Ice Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2018, Intel Corporation
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define ICL_PAD_OWN 0x020
+#define ICL_PADCFGLOCK 0x080
+#define ICL_HOSTSW_OWN 0x0b0
+#define ICL_GPI_IE 0x110
+
+#define ICL_GPP(r, s, e, g) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
+ }
+
+#define ICL_NO_GPIO -1
+
+#define ICL_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = ICL_PAD_OWN, \
+ .padcfglock_offset = ICL_PADCFGLOCK, \
+ .hostown_offset = ICL_HOSTSW_OWN, \
+ .ie_offset = ICL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Ice Lake-LP */
+static const struct pinctrl_pin_desc icllp_pins[] = {
+ /* GPP_G */
+ PINCTRL_PIN(0, "SD3_CMD"),
+ PINCTRL_PIN(1, "SD3_D0"),
+ PINCTRL_PIN(2, "SD3_D1"),
+ PINCTRL_PIN(3, "SD3_D2"),
+ PINCTRL_PIN(4, "SD3_D3"),
+ PINCTRL_PIN(5, "SD3_CDB"),
+ PINCTRL_PIN(6, "SD3_CLK"),
+ PINCTRL_PIN(7, "SD3_WP"),
+ /* GPP_B */
+ PINCTRL_PIN(8, "CORE_VID_0"),
+ PINCTRL_PIN(9, "CORE_VID_1"),
+ PINCTRL_PIN(10, "VRALERTB"),
+ PINCTRL_PIN(11, "CPU_GP_2"),
+ PINCTRL_PIN(12, "CPU_GP_3"),
+ PINCTRL_PIN(13, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(14, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(15, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(16, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(17, "I2C5_SDA"),
+ PINCTRL_PIN(18, "I2C5_SCL"),
+ PINCTRL_PIN(19, "PMCALERTB"),
+ PINCTRL_PIN(20, "SLP_S0B"),
+ PINCTRL_PIN(21, "PLTRSTB"),
+ PINCTRL_PIN(22, "SPKR"),
+ PINCTRL_PIN(23, "GSPI0_CS0B"),
+ PINCTRL_PIN(24, "GSPI0_CLK"),
+ PINCTRL_PIN(25, "GSPI0_MISO"),
+ PINCTRL_PIN(26, "GSPI0_MOSI"),
+ PINCTRL_PIN(27, "GSPI1_CS0B"),
+ PINCTRL_PIN(28, "GSPI1_CLK"),
+ PINCTRL_PIN(29, "GSPI1_MISO"),
+ PINCTRL_PIN(30, "GSPI1_MOSI"),
+ PINCTRL_PIN(31, "SML1ALERTB"),
+ PINCTRL_PIN(32, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(33, "GSPI1_CLK_LOOPBK"),
+ /* GPP_A */
+ PINCTRL_PIN(34, "ESPI_IO_0"),
+ PINCTRL_PIN(35, "ESPI_IO_1"),
+ PINCTRL_PIN(36, "ESPI_IO_2"),
+ PINCTRL_PIN(37, "ESPI_IO_3"),
+ PINCTRL_PIN(38, "ESPI_CSB"),
+ PINCTRL_PIN(39, "ESPI_CLK"),
+ PINCTRL_PIN(40, "ESPI_RESETB"),
+ PINCTRL_PIN(41, "I2S2_SCLK"),
+ PINCTRL_PIN(42, "I2S2_SFRM"),
+ PINCTRL_PIN(43, "I2S2_TXD"),
+ PINCTRL_PIN(44, "I2S2_RXD"),
+ PINCTRL_PIN(45, "SATA_DEVSLP_2"),
+ PINCTRL_PIN(46, "SATAXPCIE_1"),
+ PINCTRL_PIN(47, "SATAXPCIE_2"),
+ PINCTRL_PIN(48, "USB2_OCB_1"),
+ PINCTRL_PIN(49, "USB2_OCB_2"),
+ PINCTRL_PIN(50, "USB2_OCB_3"),
+ PINCTRL_PIN(51, "DDSP_HPD_C"),
+ PINCTRL_PIN(52, "DDSP_HPD_B"),
+ PINCTRL_PIN(53, "DDSP_HPD_1"),
+ PINCTRL_PIN(54, "DDSP_HPD_2"),
+ PINCTRL_PIN(55, "I2S5_TXD"),
+ PINCTRL_PIN(56, "I2S5_RXD"),
+ PINCTRL_PIN(57, "I2S1_SCLK"),
+ PINCTRL_PIN(58, "ESPI_CLK_LOOPBK"),
+ /* GPP_H */
+ PINCTRL_PIN(59, "SD_1P8_SEL"),
+ PINCTRL_PIN(60, "SD_PWR_EN_B"),
+ PINCTRL_PIN(61, "GPPC_H_2"),
+ PINCTRL_PIN(62, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(63, "I2C2_SDA"),
+ PINCTRL_PIN(64, "I2C2_SCL"),
+ PINCTRL_PIN(65, "I2C3_SDA"),
+ PINCTRL_PIN(66, "I2C3_SCL"),
+ PINCTRL_PIN(67, "I2C4_SDA"),
+ PINCTRL_PIN(68, "I2C4_SCL"),
+ PINCTRL_PIN(69, "SRCCLKREQB_4"),
+ PINCTRL_PIN(70, "SRCCLKREQB_5"),
+ PINCTRL_PIN(71, "M2_SKT2_CFG_0"),
+ PINCTRL_PIN(72, "M2_SKT2_CFG_1"),
+ PINCTRL_PIN(73, "M2_SKT2_CFG_2"),
+ PINCTRL_PIN(74, "M2_SKT2_CFG_3"),
+ PINCTRL_PIN(75, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(76, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(77, "CPU_VCCIO_PWR_GATEB"),
+ PINCTRL_PIN(78, "TIME_SYNC_0"),
+ PINCTRL_PIN(79, "IMGCLKOUT_1"),
+ PINCTRL_PIN(80, "IMGCLKOUT_2"),
+ PINCTRL_PIN(81, "IMGCLKOUT_3"),
+ PINCTRL_PIN(82, "IMGCLKOUT_4"),
+ /* GPP_D */
+ PINCTRL_PIN(83, "ISH_GP_0"),
+ PINCTRL_PIN(84, "ISH_GP_1"),
+ PINCTRL_PIN(85, "ISH_GP_2"),
+ PINCTRL_PIN(86, "ISH_GP_3"),
+ PINCTRL_PIN(87, "IMGCLKOUT_0"),
+ PINCTRL_PIN(88, "SRCCLKREQB_0"),
+ PINCTRL_PIN(89, "SRCCLKREQB_1"),
+ PINCTRL_PIN(90, "SRCCLKREQB_2"),
+ PINCTRL_PIN(91, "SRCCLKREQB_3"),
+ PINCTRL_PIN(92, "ISH_SPI_CSB"),
+ PINCTRL_PIN(93, "ISH_SPI_CLK"),
+ PINCTRL_PIN(94, "ISH_SPI_MISO"),
+ PINCTRL_PIN(95, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(96, "ISH_UART0_RXD"),
+ PINCTRL_PIN(97, "ISH_UART0_TXD"),
+ PINCTRL_PIN(98, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(99, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(100, "ISH_GP_4"),
+ PINCTRL_PIN(101, "ISH_GP_5"),
+ PINCTRL_PIN(102, "I2S_MCLK"),
+ PINCTRL_PIN(103, "GSPI2_CLK_LOOPBK"),
+ /* GPP_F */
+ PINCTRL_PIN(104, "CNV_BRI_DT"),
+ PINCTRL_PIN(105, "CNV_BRI_RSP"),
+ PINCTRL_PIN(106, "CNV_RGI_DT"),
+ PINCTRL_PIN(107, "CNV_RGI_RSP"),
+ PINCTRL_PIN(108, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(109, "EMMC_HIP_MON"),
+ PINCTRL_PIN(110, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(111, "EMMC_CMD"),
+ PINCTRL_PIN(112, "EMMC_DATA0"),
+ PINCTRL_PIN(113, "EMMC_DATA1"),
+ PINCTRL_PIN(114, "EMMC_DATA2"),
+ PINCTRL_PIN(115, "EMMC_DATA3"),
+ PINCTRL_PIN(116, "EMMC_DATA4"),
+ PINCTRL_PIN(117, "EMMC_DATA5"),
+ PINCTRL_PIN(118, "EMMC_DATA6"),
+ PINCTRL_PIN(119, "EMMC_DATA7"),
+ PINCTRL_PIN(120, "EMMC_RCLK"),
+ PINCTRL_PIN(121, "EMMC_CLK"),
+ PINCTRL_PIN(122, "EMMC_RESETB"),
+ PINCTRL_PIN(123, "A4WP_PRESENT"),
+ /* vGPIO */
+ PINCTRL_PIN(124, "CNV_BTEN"),
+ PINCTRL_PIN(125, "CNV_WCEN"),
+ PINCTRL_PIN(126, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(127, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(128, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(129, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(130, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(131, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(132, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(133, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(134, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(135, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(136, "vUART0_TXD"),
+ PINCTRL_PIN(137, "vUART0_RXD"),
+ PINCTRL_PIN(138, "vUART0_CTS_B"),
+ PINCTRL_PIN(139, "vUART0_RTS_B"),
+ PINCTRL_PIN(140, "vISH_UART0_TXD"),
+ PINCTRL_PIN(141, "vISH_UART0_RXD"),
+ PINCTRL_PIN(142, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(143, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(144, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(145, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(146, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(147, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(148, "vI2S2_SCLK"),
+ PINCTRL_PIN(149, "vI2S2_SFRM"),
+ PINCTRL_PIN(150, "vI2S2_TXD"),
+ PINCTRL_PIN(151, "vI2S2_RXD"),
+ PINCTRL_PIN(152, "vSD3_CD_B"),
+ /* GPP_C */
+ PINCTRL_PIN(153, "SMBCLK"),
+ PINCTRL_PIN(154, "SMBDATA"),
+ PINCTRL_PIN(155, "SMBALERTB"),
+ PINCTRL_PIN(156, "SML0CLK"),
+ PINCTRL_PIN(157, "SML0DATA"),
+ PINCTRL_PIN(158, "SML0ALERTB"),
+ PINCTRL_PIN(159, "SML1CLK"),
+ PINCTRL_PIN(160, "SML1DATA"),
+ PINCTRL_PIN(161, "UART0_RXD"),
+ PINCTRL_PIN(162, "UART0_TXD"),
+ PINCTRL_PIN(163, "UART0_RTSB"),
+ PINCTRL_PIN(164, "UART0_CTSB"),
+ PINCTRL_PIN(165, "UART1_RXD"),
+ PINCTRL_PIN(166, "UART1_TXD"),
+ PINCTRL_PIN(167, "UART1_RTSB"),
+ PINCTRL_PIN(168, "UART1_CTSB"),
+ PINCTRL_PIN(169, "I2C0_SDA"),
+ PINCTRL_PIN(170, "I2C0_SCL"),
+ PINCTRL_PIN(171, "I2C1_SDA"),
+ PINCTRL_PIN(172, "I2C1_SCL"),
+ PINCTRL_PIN(173, "UART2_RXD"),
+ PINCTRL_PIN(174, "UART2_TXD"),
+ PINCTRL_PIN(175, "UART2_RTSB"),
+ PINCTRL_PIN(176, "UART2_CTSB"),
+ /* HVCMOS */
+ PINCTRL_PIN(177, "L_BKLTEN"),
+ PINCTRL_PIN(178, "L_BKLTCTL"),
+ PINCTRL_PIN(179, "L_VDDEN"),
+ PINCTRL_PIN(180, "SYS_PWROK"),
+ PINCTRL_PIN(181, "SYS_RESETB"),
+ PINCTRL_PIN(182, "MLK_RSTB"),
+ /* GPP_E */
+ PINCTRL_PIN(183, "SATAXPCIE_0"),
+ PINCTRL_PIN(184, "SPI1_IO_2"),
+ PINCTRL_PIN(185, "SPI1_IO_3"),
+ PINCTRL_PIN(186, "CPU_GP_0"),
+ PINCTRL_PIN(187, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(188, "SATA_DEVSLP_1"),
+ PINCTRL_PIN(189, "GPPC_E_6"),
+ PINCTRL_PIN(190, "CPU_GP_1"),
+ PINCTRL_PIN(191, "SATA_LEDB"),
+ PINCTRL_PIN(192, "USB2_OCB_0"),
+ PINCTRL_PIN(193, "SPI1_CSB"),
+ PINCTRL_PIN(194, "SPI1_CLK"),
+ PINCTRL_PIN(195, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(196, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(197, "DDSP_HPD_A"),
+ PINCTRL_PIN(198, "ISH_GP_6"),
+ PINCTRL_PIN(199, "ISH_GP_7"),
+ PINCTRL_PIN(200, "DISP_MISC_4"),
+ PINCTRL_PIN(201, "DDP1_CTRLCLK"),
+ PINCTRL_PIN(202, "DDP1_CTRLDATA"),
+ PINCTRL_PIN(203, "DDP2_CTRLCLK"),
+ PINCTRL_PIN(204, "DDP2_CTRLDATA"),
+ PINCTRL_PIN(205, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(206, "DDPA_CTRLDATA"),
+ /* JTAG */
+ PINCTRL_PIN(207, "JTAG_TDO"),
+ PINCTRL_PIN(208, "JTAGX"),
+ PINCTRL_PIN(209, "PRDYB"),
+ PINCTRL_PIN(210, "PREQB"),
+ PINCTRL_PIN(211, "CPU_TRSTB"),
+ PINCTRL_PIN(212, "JTAG_TDI"),
+ PINCTRL_PIN(213, "JTAG_TMS"),
+ PINCTRL_PIN(214, "JTAG_TCK"),
+ PINCTRL_PIN(215, "ITP_PMODE"),
+ /* GPP_R */
+ PINCTRL_PIN(216, "HDA_BCLK"),
+ PINCTRL_PIN(217, "HDA_SYNC"),
+ PINCTRL_PIN(218, "HDA_SDO"),
+ PINCTRL_PIN(219, "HDA_SDI_0"),
+ PINCTRL_PIN(220, "HDA_RSTB"),
+ PINCTRL_PIN(221, "HDA_SDI_1"),
+ PINCTRL_PIN(222, "I2S1_TXD"),
+ PINCTRL_PIN(223, "I2S1_RXD"),
+ /* GPP_S */
+ PINCTRL_PIN(224, "SNDW1_CLK"),
+ PINCTRL_PIN(225, "SNDW1_DATA"),
+ PINCTRL_PIN(226, "SNDW2_CLK"),
+ PINCTRL_PIN(227, "SNDW2_DATA"),
+ PINCTRL_PIN(228, "SNDW3_CLK"),
+ PINCTRL_PIN(229, "SNDW3_DATA"),
+ PINCTRL_PIN(230, "SNDW4_CLK"),
+ PINCTRL_PIN(231, "SNDW4_DATA"),
+ /* SPI */
+ PINCTRL_PIN(232, "SPI0_IO_2"),
+ PINCTRL_PIN(233, "SPI0_IO_3"),
+ PINCTRL_PIN(234, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(235, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(236, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(237, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(238, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(239, "SPI0_CLK"),
+ PINCTRL_PIN(240, "SPI0_CLK_LOOPBK"),
+};
+
+static const struct intel_padgroup icllp_community0_gpps[] = {
+ ICL_GPP(0, 0, 7, 0), /* GPP_G */
+ ICL_GPP(1, 8, 33, 32), /* GPP_B */
+ ICL_GPP(2, 34, 58, 64), /* GPP_A */
+};
+
+static const struct intel_padgroup icllp_community1_gpps[] = {
+ ICL_GPP(0, 59, 82, 96), /* GPP_H */
+ ICL_GPP(1, 83, 103, 128), /* GPP_D */
+ ICL_GPP(2, 104, 123, 160), /* GPP_F */
+ ICL_GPP(3, 124, 152, 192), /* vGPIO */
+};
+
+static const struct intel_padgroup icllp_community4_gpps[] = {
+ ICL_GPP(0, 153, 176, 224), /* GPP_C */
+ ICL_GPP(1, 177, 182, ICL_NO_GPIO), /* HVCMOS */
+ ICL_GPP(2, 183, 206, 256), /* GPP_E */
+ ICL_GPP(3, 207, 215, ICL_NO_GPIO), /* JTAG */
+};
+
+static const struct intel_padgroup icllp_community5_gpps[] = {
+ ICL_GPP(0, 216, 223, 288), /* GPP_R */
+ ICL_GPP(1, 224, 231, 320), /* GPP_S */
+ ICL_GPP(2, 232, 240, ICL_NO_GPIO), /* SPI */
+};
+
+static const struct intel_community icllp_communities[] = {
+ ICL_COMMUNITY(0, 0, 58, icllp_community0_gpps),
+ ICL_COMMUNITY(1, 59, 152, icllp_community1_gpps),
+ ICL_COMMUNITY(2, 153, 215, icllp_community4_gpps),
+ ICL_COMMUNITY(3, 216, 240, icllp_community5_gpps),
+};
+
+static const unsigned int icllp_spi0_pins[] = { 22, 23, 24, 25, 26 };
+static const unsigned int icllp_spi0_modes[] = { 3, 1, 1, 1, 1 };
+static const unsigned int icllp_spi1_pins[] = { 27, 28, 29, 30, 31 };
+static const unsigned int icllp_spi1_modes[] = { 1, 1, 1, 1, 3 };
+static const unsigned int icllp_spi2_pins[] = { 92, 93, 94, 95, 98 };
+static const unsigned int icllp_spi2_modes[] = { 3, 3, 3, 3, 2 };
+
+static const unsigned int icllp_i2c0_pins[] = { 169, 170 };
+static const unsigned int icllp_i2c1_pins[] = { 171, 172 };
+static const unsigned int icllp_i2c2_pins[] = { 63, 64 };
+static const unsigned int icllp_i2c3_pins[] = { 65, 66 };
+static const unsigned int icllp_i2c4_pins[] = { 67, 68 };
+
+static const unsigned int icllp_uart0_pins[] = { 161, 162, 163, 164 };
+static const unsigned int icllp_uart1_pins[] = { 165, 166, 167, 168 };
+static const unsigned int icllp_uart2_pins[] = { 173, 174, 175, 176 };
+
+static const struct intel_pingroup icllp_groups[] = {
+ PIN_GROUP("spi0_grp", icllp_spi0_pins, icllp_spi0_modes),
+ PIN_GROUP("spi1_grp", icllp_spi1_pins, icllp_spi1_modes),
+ PIN_GROUP("spi2_grp", icllp_spi2_pins, icllp_spi2_modes),
+ PIN_GROUP("i2c0_grp", icllp_i2c0_pins, 1),
+ PIN_GROUP("i2c1_grp", icllp_i2c1_pins, 1),
+ PIN_GROUP("i2c2_grp", icllp_i2c2_pins, 1),
+ PIN_GROUP("i2c3_grp", icllp_i2c3_pins, 1),
+ PIN_GROUP("i2c4_grp", icllp_i2c4_pins, 1),
+ PIN_GROUP("uart0_grp", icllp_uart0_pins, 1),
+ PIN_GROUP("uart1_grp", icllp_uart1_pins, 1),
+ PIN_GROUP("uart2_grp", icllp_uart2_pins, 1),
+};
+
+static const char * const icllp_spi0_groups[] = { "spi0_grp" };
+static const char * const icllp_spi1_groups[] = { "spi1_grp" };
+static const char * const icllp_spi2_groups[] = { "spi2_grp" };
+static const char * const icllp_i2c0_groups[] = { "i2c0_grp" };
+static const char * const icllp_i2c1_groups[] = { "i2c1_grp" };
+static const char * const icllp_i2c2_groups[] = { "i2c2_grp" };
+static const char * const icllp_i2c3_groups[] = { "i2c3_grp" };
+static const char * const icllp_i2c4_groups[] = { "i2c4_grp" };
+static const char * const icllp_uart0_groups[] = { "uart0_grp" };
+static const char * const icllp_uart1_groups[] = { "uart1_grp" };
+static const char * const icllp_uart2_groups[] = { "uart2_grp" };
+
+static const struct intel_function icllp_functions[] = {
+ FUNCTION("spi0", icllp_spi0_groups),
+ FUNCTION("spi1", icllp_spi1_groups),
+ FUNCTION("spi2", icllp_spi2_groups),
+ FUNCTION("i2c0", icllp_i2c0_groups),
+ FUNCTION("i2c1", icllp_i2c1_groups),
+ FUNCTION("i2c2", icllp_i2c2_groups),
+ FUNCTION("i2c3", icllp_i2c3_groups),
+ FUNCTION("i2c4", icllp_i2c4_groups),
+ FUNCTION("uart0", icllp_uart0_groups),
+ FUNCTION("uart1", icllp_uart1_groups),
+ FUNCTION("uart2", icllp_uart2_groups),
+};
+
+static const struct intel_pinctrl_soc_data icllp_soc_data = {
+ .pins = icllp_pins,
+ .npins = ARRAY_SIZE(icllp_pins),
+ .groups = icllp_groups,
+ .ngroups = ARRAY_SIZE(icllp_groups),
+ .functions = icllp_functions,
+ .nfunctions = ARRAY_SIZE(icllp_functions),
+ .communities = icllp_communities,
+ .ncommunities = ARRAY_SIZE(icllp_communities),
+};
+
+static int icl_pinctrl_probe(struct platform_device *pdev)
+{
+ return intel_pinctrl_probe(pdev, &icllp_soc_data);
+}
+
+static const struct dev_pm_ops icl_pinctrl_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
+ intel_pinctrl_resume)
+};
+
+static const struct acpi_device_id icl_pinctrl_acpi_match[] = {
+ { "INT3455" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, icl_pinctrl_acpi_match);
+
+static struct platform_driver icl_pinctrl_driver = {
+ .probe = icl_pinctrl_probe,
+ .driver = {
+ .name = "icelake-pinctrl",
+ .acpi_match_table = icl_pinctrl_acpi_match,
+ .pm = &icl_pinctrl_pm_ops,
+ },
+};
+
+module_platform_driver(icl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Ice Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 1e24a6b8a64e..62b009b27eda 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel pinctrl/GPIO core driver.
*
* Copyright (C) 2015, Intel Corporation
* Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -875,6 +872,36 @@ static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
return -EINVAL;
}
+static int intel_gpio_irq_reqres(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ int pin;
+ int ret;
+
+ pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
+ if (pin >= 0) {
+ ret = gpiochip_lock_as_irq(gc, pin);
+ if (ret) {
+ dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n",
+ pin);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void intel_gpio_irq_relres(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ int pin;
+
+ pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
+ if (pin >= 0)
+ gpiochip_unlock_as_irq(gc, pin);
+}
+
static void intel_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1090,6 +1117,8 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
static struct irq_chip intel_gpio_irqchip = {
.name = "intel-gpio",
+ .irq_request_resources = intel_gpio_irq_reqres,
+ .irq_release_resources = intel_gpio_irq_relres,
.irq_enable = intel_gpio_irq_enable,
.irq_ack = intel_gpio_irq_ack,
.irq_mask = intel_gpio_irq_mask,
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 98fdf9adf623..1785abf157e4 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Core pinctrl/GPIO driver for Intel GPIO controllers
*
* Copyright (C) 2015, Intel Corporation
* Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef PINCTRL_INTEL_H
diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
index 14d56ea6cfdc..99894647eddd 100644
--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
+++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Lewisburg pinctrl/GPIO driver
*
* Copyright (C) 2017, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index d9357054d41d..4fa69f988c7b 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -1,18 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Merrifield SoC pinctrl driver
*
* Copyright (C) 2016, Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index fee3435a6f15..7984392104fe 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Sunrisepoint PCH pinctrl/GPIO driver
*
* Copyright (C) 2015, Intel Corporation
* Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index 30f3316747e2..a613e546717a 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index e3f1ab2290fc..6f931b85701b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -1263,6 +1263,7 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
MTK_DISABLE);
if (err)
goto err;
+ /* else: fall through */
case PIN_CONFIG_INPUT_ENABLE:
case PIN_CONFIG_SLEW_RATE:
reg = (param == PIN_CONFIG_SLEW_RATE) ?
@@ -1424,7 +1425,7 @@ static struct pinctrl_desc mtk_desc = {
static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
{
- struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent);
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
int value, err;
err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value);
@@ -1436,7 +1437,7 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
- struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent);
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value);
}
@@ -1508,11 +1509,20 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
if (ret < 0)
return ret;
- ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
- chip->ngpio);
- if (ret < 0) {
- gpiochip_remove(chip);
- return ret;
+ /* Just for backward compatible for these old pinctrl nodes without
+ * "gpio-ranges" property. Otherwise, called directly from a
+ * DeviceTree-supported pinctrl driver is DEPRECATED.
+ * Please see Section 2.1 of
+ * Documentation/devicetree/bindings/gpio/gpio.txt on how to
+ * bind pinctrl and gpio drivers via the "gpio-ranges" property.
+ */
+ if (!of_find_property(np, "gpio-ranges", NULL)) {
+ ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
+ chip->ngpio);
+ if (ret < 0) {
+ gpiochip_remove(chip);
+ return ret;
+ }
}
return 0;
@@ -1528,7 +1538,7 @@ static int mtk_build_groups(struct mtk_pinctrl *hw)
err = pinctrl_generic_add_group(hw->pctrl, group->name,
group->pins, group->num_pins,
group->data);
- if (err) {
+ if (err < 0) {
dev_err(hw->dev, "Failed to register group %s\n",
group->name);
return err;
@@ -1549,7 +1559,7 @@ static int mtk_build_functions(struct mtk_pinctrl *hw)
func->group_names,
func->num_group_names,
func->data);
- if (err) {
+ if (err < 0) {
dev_err(hw->dev, "Failed to register function %s\n",
func->name);
return err;
@@ -1695,15 +1705,16 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
mtk_desc.custom_conf_items = mtk_conf_items;
#endif
- hw->pctrl = devm_pinctrl_register(&pdev->dev, &mtk_desc, hw);
- if (IS_ERR(hw->pctrl))
- return PTR_ERR(hw->pctrl);
+ err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw,
+ &hw->pctrl);
+ if (err)
+ return err;
/* Setup groups descriptions per SoC types */
err = mtk_build_groups(hw);
if (err) {
dev_err(&pdev->dev, "Failed to build groups\n");
- return 0;
+ return err;
}
/* Setup functions descriptions per SoC types */
@@ -1713,17 +1724,25 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
return err;
}
- err = mtk_build_gpiochip(hw, pdev->dev.of_node);
- if (err) {
- dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+ /* For able to make pinctrl_claim_hogs, we must not enable pinctrl
+ * until all groups and functions are being added one.
+ */
+ err = pinctrl_enable(hw->pctrl);
+ if (err)
return err;
- }
err = mtk_build_eint(hw, pdev);
if (err)
dev_warn(&pdev->dev,
"Failed to add EINT, but pinctrl still can work\n");
+ /* Build gpiochip should be after pinctrl_enable is done */
+ err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+ return err;
+ }
+
platform_set_drvdata(pdev, hw);
return 0;
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index 46a0918bd284..ad502eda4afa 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -672,6 +672,9 @@ static const unsigned int jtag_ao_tdo_pins[] = {GPIOAO_4};
static const unsigned int jtag_ao_clk_pins[] = {GPIOAO_5};
static const unsigned int jtag_ao_tms_pins[] = {GPIOAO_7};
+/* gen_clk */
+static const unsigned int gen_clk_ee_pins[] = {GPIOAO_13};
+
static struct meson_pmx_group meson_axg_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
@@ -718,6 +721,7 @@ static struct meson_pmx_group meson_axg_aobus_groups[] = {
GROUP(jtag_ao_tdo, 4),
GROUP(jtag_ao_clk, 4),
GROUP(jtag_ao_tms, 4),
+ GROUP(gen_clk_ee, 4),
};
static const char * const gpio_periphs_groups[] = {
@@ -947,6 +951,10 @@ static const char * const tdmb_groups[] = {
"tdmb_din2", "tdmb_dout2", "tdmb_din3", "tdmb_dout3",
};
+static const char * const gen_clk_ee_groups[] = {
+ "gen_clk_ee",
+};
+
static struct meson_pmx_func meson_axg_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
@@ -992,6 +1000,7 @@ static struct meson_pmx_func meson_axg_aobus_functions[] = {
FUNCTION(pwm_ao_c),
FUNCTION(pwm_ao_d),
FUNCTION(jtag_ao),
+ FUNCTION(gen_clk_ee),
};
static struct meson_bank meson_axg_periphs_banks[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 2c97a2e07a5f..4ceb06f8a33c 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -243,6 +243,8 @@ static const unsigned int i2s_out_ch67_y_pins[] = { GPIOY_10 };
static const unsigned int spdif_out_y_pins[] = { GPIOY_12 };
+static const unsigned int gen_clk_out_pins[] = { GPIOY_15 };
+
static const struct pinctrl_pin_desc meson_gxbb_aobus_pins[] = {
MESON_PIN(GPIOAO_0),
MESON_PIN(GPIOAO_1),
@@ -453,6 +455,7 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GROUP(i2s_out_ch45_y, 1, 6),
GROUP(i2s_out_ch67_y, 1, 7),
GROUP(spdif_out_y, 1, 9),
+ GROUP(gen_clk_out, 6, 15),
/* Bank Z */
GROUP(eth_mdio, 6, 1),
@@ -706,6 +709,10 @@ static const char * const spdif_out_groups[] = {
"spdif_out_y",
};
+static const char * const gen_clk_out_groups[] = {
+ "gen_clk_out",
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -790,6 +797,7 @@ static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(hdmi_i2c),
FUNCTION(i2s_out),
FUNCTION(spdif_out),
+ FUNCTION(gen_clk_out),
};
static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 53cf800688e9..aa48b3f23c7f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -80,6 +80,18 @@ struct armada_37xx_pmx_func {
unsigned int ngroups;
};
+struct armada_37xx_pm_state {
+ u32 out_en_l;
+ u32 out_en_h;
+ u32 out_val_l;
+ u32 out_val_h;
+ u32 irq_en_l;
+ u32 irq_en_h;
+ u32 irq_pol_l;
+ u32 irq_pol_h;
+ u32 selection;
+};
+
struct armada_37xx_pinctrl {
struct regmap *regmap;
void __iomem *base;
@@ -94,6 +106,7 @@ struct armada_37xx_pinctrl {
unsigned int ngroups;
struct armada_37xx_pmx_func *funcs;
unsigned int nfuncs;
+ struct armada_37xx_pm_state pm;
};
#define PIN_GRP(_name, _start, _nr, _mask, _func1, _func2) \
@@ -996,6 +1009,110 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
return 0;
}
+#if defined(CONFIG_PM)
+static int armada_3700_pinctrl_suspend(struct device *dev)
+{
+ struct armada_37xx_pinctrl *info = dev_get_drvdata(dev);
+
+ /* Save GPIO state */
+ regmap_read(info->regmap, OUTPUT_EN, &info->pm.out_en_l);
+ regmap_read(info->regmap, OUTPUT_EN + sizeof(u32), &info->pm.out_en_h);
+ regmap_read(info->regmap, OUTPUT_VAL, &info->pm.out_val_l);
+ regmap_read(info->regmap, OUTPUT_VAL + sizeof(u32),
+ &info->pm.out_val_h);
+
+ info->pm.irq_en_l = readl(info->base + IRQ_EN);
+ info->pm.irq_en_h = readl(info->base + IRQ_EN + sizeof(u32));
+ info->pm.irq_pol_l = readl(info->base + IRQ_POL);
+ info->pm.irq_pol_h = readl(info->base + IRQ_POL + sizeof(u32));
+
+ /* Save pinctrl state */
+ regmap_read(info->regmap, SELECTION, &info->pm.selection);
+
+ return 0;
+}
+
+static int armada_3700_pinctrl_resume(struct device *dev)
+{
+ struct armada_37xx_pinctrl *info = dev_get_drvdata(dev);
+ struct gpio_chip *gc;
+ struct irq_domain *d;
+ int i;
+
+ /* Restore GPIO state */
+ regmap_write(info->regmap, OUTPUT_EN, info->pm.out_en_l);
+ regmap_write(info->regmap, OUTPUT_EN + sizeof(u32),
+ info->pm.out_en_h);
+ regmap_write(info->regmap, OUTPUT_VAL, info->pm.out_val_l);
+ regmap_write(info->regmap, OUTPUT_VAL + sizeof(u32),
+ info->pm.out_val_h);
+
+ /*
+ * Input levels may change during suspend, which is not monitored at
+ * that time. GPIOs used for both-edge IRQs may not be synchronized
+ * anymore with their polarities (rising/falling edge) and must be
+ * re-configured manually.
+ */
+ gc = &info->gpio_chip;
+ d = gc->irq.domain;
+ for (i = 0; i < gc->ngpio; i++) {
+ u32 irq_bit = BIT(i % GPIO_PER_REG);
+ u32 mask, *irq_pol, input_reg, virq, type, level;
+
+ if (i < GPIO_PER_REG) {
+ mask = info->pm.irq_en_l;
+ irq_pol = &info->pm.irq_pol_l;
+ input_reg = INPUT_VAL;
+ } else {
+ mask = info->pm.irq_en_h;
+ irq_pol = &info->pm.irq_pol_h;
+ input_reg = INPUT_VAL + sizeof(u32);
+ }
+
+ if (!(mask & irq_bit))
+ continue;
+
+ virq = irq_find_mapping(d, i);
+ type = irq_get_trigger_type(virq);
+
+ /*
+ * Synchronize level and polarity for both-edge irqs:
+ * - a high input level expects a falling edge,
+ * - a low input level exepects a rising edge.
+ */
+ if ((type & IRQ_TYPE_SENSE_MASK) ==
+ IRQ_TYPE_EDGE_BOTH) {
+ regmap_read(info->regmap, input_reg, &level);
+ if ((*irq_pol ^ level) & irq_bit)
+ *irq_pol ^= irq_bit;
+ }
+ }
+
+ writel(info->pm.irq_en_l, info->base + IRQ_EN);
+ writel(info->pm.irq_en_h, info->base + IRQ_EN + sizeof(u32));
+ writel(info->pm.irq_pol_l, info->base + IRQ_POL);
+ writel(info->pm.irq_pol_h, info->base + IRQ_POL + sizeof(u32));
+
+ /* Restore pinctrl state */
+ regmap_write(info->regmap, SELECTION, info->pm.selection);
+
+ return 0;
+}
+
+/*
+ * Since pinctrl is an infrastructure module, its resume should be issued prior
+ * to other IO drivers.
+ */
+static const struct dev_pm_ops armada_3700_pinctrl_pm_ops = {
+ .suspend_late = armada_3700_pinctrl_suspend,
+ .resume_early = armada_3700_pinctrl_resume,
+};
+
+#define PINCTRL_ARMADA_37XX_DEV_PM_OPS (&armada_3700_pinctrl_pm_ops)
+#else
+#define PINCTRL_ARMADA_37XX_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static const struct of_device_id armada_37xx_pinctrl_of_match[] = {
{
.compatible = "marvell,armada3710-sb-pinctrl",
@@ -1049,6 +1166,7 @@ static struct platform_driver armada_37xx_pinctrl_driver = {
.driver = {
.name = "armada-37xx-pinctrl",
.of_match_table = armada_37xx_pinctrl_of_match,
+ .pm = PINCTRL_ARMADA_37XX_DEV_PM_OPS,
},
};
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index aa592ef23a29..e3689cc62a41 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -101,15 +101,16 @@ static int abx500_gpio_get_bit(struct gpio_chip *chip, u8 reg,
reg += offset / 8;
ret = abx500_get_register_interruptible(pct->dev,
AB8500_MISC, reg, &val);
-
- *bit = !!(val & BIT(pos));
-
- if (ret < 0)
+ if (ret < 0) {
dev_err(pct->dev,
"%s read reg =%x, offset=%x failed (%d)\n",
__func__, reg, offset, ret);
+ return ret;
+ }
- return ret;
+ *bit = !!(val & BIT(pos));
+
+ return 0;
}
static int abx500_gpio_set_bits(struct gpio_chip *chip, u8 reg,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 04ae139671c8..41ccc759b8b8 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -247,16 +247,16 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
if (pin_reg & BIT(INTERRUPT_ENABLE_OFF)) {
+ u8 level = (pin_reg >> ACTIVE_LEVEL_OFF) &
+ ACTIVE_LEVEL_MASK;
interrupt_enable = "interrupt is enabled|";
- if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF)) &&
- !(pin_reg & BIT(ACTIVE_LEVEL_OFF + 1)))
- active_level = "Active low|";
- else if (pin_reg & BIT(ACTIVE_LEVEL_OFF) &&
- !(pin_reg & BIT(ACTIVE_LEVEL_OFF + 1)))
+ if (level == ACTIVE_LEVEL_HIGH)
active_level = "Active high|";
- else if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF)) &&
- pin_reg & BIT(ACTIVE_LEVEL_OFF + 1))
+ else if (level == ACTIVE_LEVEL_LOW)
+ active_level = "Active low|";
+ else if (!(pin_reg & BIT(LEVEL_TRIG_OFF)) &&
+ level == ACTIVE_LEVEL_BOTH)
active_level = "Active on both|";
else
active_level = "Unknown Active level|";
@@ -552,7 +552,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
/* Each status bit covers four pins */
for (i = 0; i < 4; i++) {
regval = readl(regs + i);
- if (!(regval & PIN_IRQ_PENDING))
+ if (!(regval & PIN_IRQ_PENDING) ||
+ !(regval & BIT(INTERRUPT_MASK_OFF)))
continue;
irq = irq_find_mapping(gc->irq.domain, irqnr + i);
generic_handle_irq(irq);
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 8fa453a59da5..22af7edfdb38 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -54,6 +54,10 @@
#define ACTIVE_LEVEL_MASK 0x3UL
#define DRV_STRENGTH_SEL_MASK 0x3UL
+#define ACTIVE_LEVEL_HIGH 0x0UL
+#define ACTIVE_LEVEL_LOW 0x1UL
+#define ACTIVE_LEVEL_BOTH 0x2UL
+
#define DB_TYPE_NO_DEBOUNCE 0x0UL
#define DB_TYPE_PRESERVE_LOW_GLITCH 0x1UL
#define DB_TYPE_PRESERVE_HIGH_GLITCH 0x2UL
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 67e4d9ffa6b1..ef7ab208b951 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -14,6 +14,7 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/pinctrl/at91.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
/* FIXME: needed for gpio_to_irq(), get rid of this */
@@ -49,6 +50,8 @@
#define ATMEL_PIO_IFSCEN_MASK BIT(13)
#define ATMEL_PIO_OPD_MASK BIT(14)
#define ATMEL_PIO_SCHMITT_MASK BIT(15)
+#define ATMEL_PIO_DRVSTR_MASK GENMASK(17, 16)
+#define ATMEL_PIO_DRVSTR_OFFSET 16
#define ATMEL_PIO_CFGR_EVTSEL_MASK GENMASK(26, 24)
#define ATMEL_PIO_CFGR_EVTSEL_FALLING (0 << 24)
#define ATMEL_PIO_CFGR_EVTSEL_RISING (1 << 24)
@@ -75,6 +78,9 @@
#define ATMEL_GET_PIN_FUNC(pinfunc) ((pinfunc >> 16) & 0xf)
#define ATMEL_GET_PIN_IOSET(pinfunc) ((pinfunc >> 20) & 0xf)
+/* Custom pinconf parameters */
+#define ATMEL_PIN_CONFIG_DRIVE_STRENGTH (PIN_CONFIG_END + 1)
+
struct atmel_pioctrl_data {
unsigned nbanks;
};
@@ -139,6 +145,10 @@ static const char * const atmel_functions[] = {
"GPIO", "A", "B", "C", "D", "E", "F", "G"
};
+static const struct pinconf_generic_params atmel_custom_bindings[] = {
+ {"atmel,drive-strength", ATMEL_PIN_CONFIG_DRIVE_STRENGTH, 0},
+};
+
/* --- GPIO --- */
static unsigned int atmel_gpio_read(struct atmel_pioctrl *atmel_pioctrl,
unsigned int bank, unsigned int reg)
@@ -692,6 +702,11 @@ static int atmel_conf_pin_config_group_get(struct pinctrl_dev *pctldev,
return -EINVAL;
arg = 1;
break;
+ case ATMEL_PIN_CONFIG_DRIVE_STRENGTH:
+ if (!(res & ATMEL_PIO_DRVSTR_MASK))
+ return -EINVAL;
+ arg = (res & ATMEL_PIO_DRVSTR_MASK) >> ATMEL_PIO_DRVSTR_OFFSET;
+ break;
default:
return -ENOTSUPP;
}
@@ -777,6 +792,18 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
ATMEL_PIO_SODR);
}
break;
+ case ATMEL_PIN_CONFIG_DRIVE_STRENGTH:
+ switch (arg) {
+ case ATMEL_PIO_DRVSTR_LO:
+ case ATMEL_PIO_DRVSTR_ME:
+ case ATMEL_PIO_DRVSTR_HI:
+ conf &= (~ATMEL_PIO_DRVSTR_MASK);
+ conf |= arg << ATMEL_PIO_DRVSTR_OFFSET;
+ break;
+ default:
+ dev_warn(pctldev->dev, "drive strength not updated (incorrect value)\n");
+ }
+ break;
default:
dev_warn(pctldev->dev,
"unsupported configuration parameter: %u\n",
@@ -816,6 +843,19 @@ static void atmel_conf_pin_config_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, "%s ", "open-drain");
if (conf & ATMEL_PIO_SCHMITT_MASK)
seq_printf(s, "%s ", "schmitt");
+ if (conf & ATMEL_PIO_DRVSTR_MASK) {
+ switch ((conf & ATMEL_PIO_DRVSTR_MASK) >> ATMEL_PIO_DRVSTR_OFFSET) {
+ case ATMEL_PIO_DRVSTR_ME:
+ seq_printf(s, "%s ", "medium-drive");
+ break;
+ case ATMEL_PIO_DRVSTR_HI:
+ seq_printf(s, "%s ", "high-drive");
+ break;
+ /* ATMEL_PIO_DRVSTR_LO and 0 which is the default value at reset */
+ default:
+ seq_printf(s, "%s ", "low-drive");
+ }
+ }
}
static const struct pinconf_ops atmel_confops = {
@@ -931,10 +971,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
atmel_pioctrl->npins = atmel_pioctrl->nbanks * ATMEL_PIO_NPINS_PER_BANK;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "unable to get atmel pinctrl resource\n");
- return -EINVAL;
- }
atmel_pioctrl->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(atmel_pioctrl->reg_base))
return -EINVAL;
@@ -958,6 +994,8 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
return -ENOMEM;
atmel_pinctrl_desc.pins = pin_desc;
atmel_pinctrl_desc.npins = atmel_pioctrl->npins;
+ atmel_pinctrl_desc.num_custom_params = ARRAY_SIZE(atmel_custom_bindings);
+ atmel_pinctrl_desc.custom_params = atmel_custom_bindings;
/* One pin is one group since a pin can achieve all functions. */
group_names = devm_kcalloc(dev,
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index a52779f33ad4..afd0b533c40a 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -316,7 +316,7 @@ static const struct pinctrl_ops axp20x_pctrl_ops = {
.get_group_pins = axp20x_group_pins,
};
-static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
+static int axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
unsigned int mask_len,
struct axp20x_pinctrl_function *func,
const struct pinctrl_pin_desc *pins)
@@ -331,18 +331,22 @@ static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
func->groups = devm_kcalloc(dev,
ngroups, sizeof(const char *),
GFP_KERNEL);
+ if (!func->groups)
+ return -ENOMEM;
group = func->groups;
for_each_set_bit(bit, &mask_cpy, mask_len) {
*group = pins[bit].name;
group++;
}
}
+
+ return 0;
}
-static void axp20x_build_funcs_groups(struct platform_device *pdev)
+static int axp20x_build_funcs_groups(struct platform_device *pdev)
{
struct axp20x_pctl *pctl = platform_get_drvdata(pdev);
- int i, pin, npins = pctl->desc->npins;
+ int i, ret, pin, npins = pctl->desc->npins;
pctl->funcs[AXP20X_FUNC_GPIO_OUT].name = "gpio_out";
pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval = AXP20X_MUX_GPIO_OUT;
@@ -366,13 +370,19 @@ static void axp20x_build_funcs_groups(struct platform_device *pdev)
pctl->funcs[i].groups[pin] = pctl->desc->pins[pin].name;
}
- axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask,
+ ret = axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask,
npins, &pctl->funcs[AXP20X_FUNC_LDO],
pctl->desc->pins);
+ if (ret)
+ return ret;
- axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask,
+ ret = axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask,
npins, &pctl->funcs[AXP20X_FUNC_ADC],
pctl->desc->pins);
+ if (ret)
+ return ret;
+
+ return 0;
}
static const struct of_device_id axp20x_pctl_match[] = {
@@ -424,7 +434,11 @@ static int axp20x_pctl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pctl);
- axp20x_build_funcs_groups(pdev);
+ ret = axp20x_build_funcs_groups(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to build groups\n");
+ return ret;
+ }
pctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctrl_desc), GFP_KERNEL);
if (!pctrl_desc)
diff --git a/drivers/pinctrl/pinctrl-gemini.c b/drivers/pinctrl/pinctrl-gemini.c
index 8c9970ae8505..fa7d998e1d5a 100644
--- a/drivers/pinctrl/pinctrl-gemini.c
+++ b/drivers/pinctrl/pinctrl-gemini.c
@@ -1696,6 +1696,7 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
.name = "gmii_gmac0_grp",
.pins = gmii_gmac0_3516_pins,
.num_pins = ARRAY_SIZE(gmii_gmac0_3516_pins),
+ .mask = GEMINI_GMAC_IOSEL_MASK,
.driving_mask = GENMASK(17, 16),
},
{
@@ -1703,6 +1704,7 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
.pins = gmii_gmac1_3516_pins,
.num_pins = ARRAY_SIZE(gmii_gmac1_3516_pins),
/* Bring out RGMII on the GMAC1 pins */
+ .mask = GEMINI_GMAC_IOSEL_MASK,
.value = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
.driving_mask = GENMASK(19, 18),
},
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index a1d7156d0a43..6a1b6058b991 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -536,7 +536,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
- ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, input);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false);
}
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 022307dd4b54..4a8a8efadefa 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -666,7 +666,7 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
* can be used to fix state for MCP23xxx, that temporary
* lost its power supply.
*/
-#define MCP23S08_CONFIG_REGS 8
+#define MCP23S08_CONFIG_REGS 7
static int __check_mcp23s08_reg_cache(struct mcp23s08 *mcp)
{
int cached[MCP23S08_CONFIG_REGS];
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 15bb1cb8729b..f9fc2b3a8731 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
@@ -132,7 +133,7 @@ OCELOT_P(0, SG0, NONE, NONE);
OCELOT_P(1, SG0, NONE, NONE);
OCELOT_P(2, SG0, NONE, NONE);
OCELOT_P(3, SG0, NONE, NONE);
-OCELOT_P(4, IRQ0_IN, IRQ0_OUT, TWI);
+OCELOT_P(4, IRQ0_IN, IRQ0_OUT, TWI_SCL_M);
OCELOT_P(5, IRQ1_IN, IRQ1_OUT, PCI_WAKE);
OCELOT_P(6, UART, TWI_SCL_M, NONE);
OCELOT_P(7, UART, TWI_SCL_M, NONE);
@@ -427,11 +428,98 @@ static const struct gpio_chip ocelot_gpiolib_chip = {
.owner = THIS_MODULE,
};
+static void ocelot_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+
+ regmap_update_bits(info->map, OCELOT_GPIO_INTR_ENA, BIT(gpio), 0);
+}
+
+static void ocelot_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+
+ regmap_update_bits(info->map, OCELOT_GPIO_INTR_ENA, BIT(gpio),
+ BIT(gpio));
+}
+
+static void ocelot_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+
+ regmap_write_bits(info->map, OCELOT_GPIO_INTR, BIT(gpio), BIT(gpio));
+}
+
+static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
+
+static struct irq_chip ocelot_eoi_irqchip = {
+ .name = "gpio",
+ .irq_mask = ocelot_irq_mask,
+ .irq_eoi = ocelot_irq_ack,
+ .irq_unmask = ocelot_irq_unmask,
+ .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
+ .irq_set_type = ocelot_irq_set_type,
+};
+
+static struct irq_chip ocelot_irqchip = {
+ .name = "gpio",
+ .irq_mask = ocelot_irq_mask,
+ .irq_ack = ocelot_irq_ack,
+ .irq_unmask = ocelot_irq_unmask,
+ .irq_set_type = ocelot_irq_set_type,
+};
+
+static int ocelot_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ type &= IRQ_TYPE_SENSE_MASK;
+
+ if (!(type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_HIGH)))
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ irq_set_chip_handler_name_locked(data, &ocelot_eoi_irqchip,
+ handle_fasteoi_irq, NULL);
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_chip_handler_name_locked(data, &ocelot_irqchip,
+ handle_edge_irq, NULL);
+
+ return 0;
+}
+
+static void ocelot_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *parent_chip = irq_desc_get_chip(desc);
+ struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ unsigned int reg = 0, irq;
+ unsigned long irqs;
+
+ regmap_read(info->map, OCELOT_GPIO_INTR_IDENT, &reg);
+ if (!reg)
+ return;
+
+ chained_irq_enter(parent_chip, desc);
+
+ irqs = reg;
+
+ for_each_set_bit(irq, &irqs, OCELOT_PINS) {
+ generic_handle_irq(irq_linear_revmap(chip->irq.domain, irq));
+ }
+
+ chained_irq_exit(parent_chip, desc);
+}
+
static int ocelot_gpiochip_register(struct platform_device *pdev,
struct ocelot_pinctrl *info)
{
struct gpio_chip *gc;
- int ret;
+ int ret, irq;
info->gpio_chip = ocelot_gpiolib_chip;
@@ -446,7 +534,17 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
if (ret)
return ret;
- /* TODO: this can be used as an irqchip but no board is using that */
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (irq <= 0)
+ return irq;
+
+ ret = gpiochip_irqchip_add(gc, &ocelot_irqchip, 0, handle_edge_irq,
+ IRQ_TYPE_NONE);
+ if (ret)
+ return ret;
+
+ gpiochip_set_chained_irqchip(gc, &ocelot_irqchip, irq,
+ ocelot_irq_handler);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 717c0f4449a0..f76edf664539 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -1006,6 +1006,7 @@ static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev,
const char *grpname;
const char **fngrps;
int ret, npins;
+ int gsel, fsel;
npins = rza1_dt_node_pin_count(np);
if (npins < 0) {
@@ -1055,18 +1056,19 @@ static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev,
fngrps[0] = grpname;
mutex_lock(&rza1_pctl->mutex);
- ret = pinctrl_generic_add_group(pctldev, grpname, grpins, npins,
- NULL);
- if (ret) {
+ gsel = pinctrl_generic_add_group(pctldev, grpname, grpins, npins,
+ NULL);
+ if (gsel < 0) {
mutex_unlock(&rza1_pctl->mutex);
- return ret;
+ return gsel;
}
- ret = pinmux_generic_add_function(pctldev, grpname, fngrps, 1,
- mux_confs);
- if (ret)
+ fsel = pinmux_generic_add_function(pctldev, grpname, fngrps, 1,
+ mux_confs);
+ if (fsel < 0) {
+ ret = fsel;
goto remove_group;
- mutex_unlock(&rza1_pctl->mutex);
+ }
dev_info(rza1_pctl->dev, "Parsed function and group %s with %d pins\n",
grpname, npins);
@@ -1083,15 +1085,15 @@ static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev,
(*map)->data.mux.group = np->name;
(*map)->data.mux.function = np->name;
*num_maps = 1;
+ mutex_unlock(&rza1_pctl->mutex);
return 0;
remove_function:
- mutex_lock(&rza1_pctl->mutex);
- pinmux_generic_remove_last_function(pctldev);
+ pinmux_generic_remove_function(pctldev, fsel);
remove_group:
- pinctrl_generic_remove_last_group(pctldev);
+ pinctrl_generic_remove_group(pctldev, gsel);
mutex_unlock(&rza1_pctl->mutex);
dev_info(rza1_pctl->dev, "Unable to parse function and group %s\n",
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index e5647dac0818..7ec72ff2419a 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -747,38 +747,44 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
/**
* pcs_add_function() - adds a new function to the function list
* @pcs: pcs driver instance
- * @np: device node of the mux entry
+ * @fcn: new function allocated
* @name: name of the function
* @vals: array of mux register value pairs used by the function
* @nvals: number of mux register value pairs
* @pgnames: array of pingroup names for the function
* @npgnames: number of pingroup names
+ *
+ * Caller must take care of locking.
*/
-static struct pcs_function *pcs_add_function(struct pcs_device *pcs,
- struct device_node *np,
- const char *name,
- struct pcs_func_vals *vals,
- unsigned nvals,
- const char **pgnames,
- unsigned npgnames)
+static int pcs_add_function(struct pcs_device *pcs,
+ struct pcs_function **fcn,
+ const char *name,
+ struct pcs_func_vals *vals,
+ unsigned int nvals,
+ const char **pgnames,
+ unsigned int npgnames)
{
struct pcs_function *function;
- int res;
+ int selector;
function = devm_kzalloc(pcs->dev, sizeof(*function), GFP_KERNEL);
if (!function)
- return NULL;
+ return -ENOMEM;
function->vals = vals;
function->nvals = nvals;
- res = pinmux_generic_add_function(pcs->pctl, name,
- pgnames, npgnames,
- function);
- if (res)
- return NULL;
+ selector = pinmux_generic_add_function(pcs->pctl, name,
+ pgnames, npgnames,
+ function);
+ if (selector < 0) {
+ devm_kfree(pcs->dev, function);
+ *fcn = NULL;
+ } else {
+ *fcn = function;
+ }
- return function;
+ return selector;
}
/**
@@ -979,8 +985,8 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
{
const char *name = "pinctrl-single,pins";
struct pcs_func_vals *vals;
- int rows, *pins, found = 0, res = -ENOMEM, i;
- struct pcs_function *function;
+ int rows, *pins, found = 0, res = -ENOMEM, i, fsel, gsel;
+ struct pcs_function *function = NULL;
rows = pinctrl_count_index_with_args(np, name);
if (rows <= 0) {
@@ -1030,21 +1036,25 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
}
pgnames[0] = np->name;
- function = pcs_add_function(pcs, np, np->name, vals, found, pgnames, 1);
- if (!function) {
- res = -ENOMEM;
+ mutex_lock(&pcs->mutex);
+ fsel = pcs_add_function(pcs, &function, np->name, vals, found,
+ pgnames, 1);
+ if (fsel < 0) {
+ res = fsel;
goto free_pins;
}
- res = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
- if (res < 0)
+ gsel = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
+ if (gsel < 0) {
+ res = gsel;
goto free_function;
+ }
(*map)->type = PIN_MAP_TYPE_MUX_GROUP;
(*map)->data.mux.group = np->name;
(*map)->data.mux.function = np->name;
- if (PCS_HAS_PINCONF) {
+ if (PCS_HAS_PINCONF && function) {
res = pcs_parse_pinconf(pcs, np, function, map);
if (res)
goto free_pingroups;
@@ -1052,15 +1062,17 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
} else {
*num_maps = 1;
}
+ mutex_unlock(&pcs->mutex);
+
return 0;
free_pingroups:
- pinctrl_generic_remove_last_group(pcs->pctl);
+ pinctrl_generic_remove_group(pcs->pctl, gsel);
*num_maps = 1;
free_function:
- pinmux_generic_remove_last_function(pcs->pctl);
-
+ pinmux_generic_remove_function(pcs->pctl, fsel);
free_pins:
+ mutex_unlock(&pcs->mutex);
devm_kfree(pcs->dev, pins);
free_vals:
@@ -1077,9 +1089,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
{
const char *name = "pinctrl-single,bits";
struct pcs_func_vals *vals;
- int rows, *pins, found = 0, res = -ENOMEM, i;
+ int rows, *pins, found = 0, res = -ENOMEM, i, fsel, gsel;
int npins_in_row;
- struct pcs_function *function;
+ struct pcs_function *function = NULL;
rows = pinctrl_count_index_with_args(np, name);
if (rows <= 0) {
@@ -1166,15 +1178,19 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
}
pgnames[0] = np->name;
- function = pcs_add_function(pcs, np, np->name, vals, found, pgnames, 1);
- if (!function) {
- res = -ENOMEM;
+ mutex_lock(&pcs->mutex);
+ fsel = pcs_add_function(pcs, &function, np->name, vals, found,
+ pgnames, 1);
+ if (fsel < 0) {
+ res = fsel;
goto free_pins;
}
- res = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
- if (res < 0)
+ gsel = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
+ if (gsel < 0) {
+ res = gsel;
goto free_function;
+ }
(*map)->type = PIN_MAP_TYPE_MUX_GROUP;
(*map)->data.mux.group = np->name;
@@ -1186,14 +1202,17 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
}
*num_maps = 1;
+ mutex_unlock(&pcs->mutex);
+
return 0;
free_pingroups:
- pinctrl_generic_remove_last_group(pcs->pctl);
+ pinctrl_generic_remove_group(pcs->pctl, gsel);
*num_maps = 1;
free_function:
- pinmux_generic_remove_last_function(pcs->pctl);
+ pinmux_generic_remove_function(pcs->pctl, fsel);
free_pins:
+ mutex_unlock(&pcs->mutex);
devm_kfree(pcs->dev, pins);
free_vals:
@@ -1598,19 +1617,19 @@ static int pcs_save_context(struct pcs_device *pcs)
switch (pcs->width) {
case 64:
- regsl = (u64 *)pcs->saved_vals;
- for (i = 0; i < pcs->size / mux_bytes; i++)
- regsl[i] = pcs->read(pcs->base + i * mux_bytes);
+ regsl = pcs->saved_vals;
+ for (i = 0; i < pcs->size; i += mux_bytes)
+ *regsl++ = pcs->read(pcs->base + i);
break;
case 32:
- regsw = (u32 *)pcs->saved_vals;
- for (i = 0; i < pcs->size / mux_bytes; i++)
- regsw[i] = pcs->read(pcs->base + i * mux_bytes);
+ regsw = pcs->saved_vals;
+ for (i = 0; i < pcs->size; i += mux_bytes)
+ *regsw++ = pcs->read(pcs->base + i);
break;
case 16:
- regshw = (u16 *)pcs->saved_vals;
- for (i = 0; i < pcs->size / mux_bytes; i++)
- regshw[i] = pcs->read(pcs->base + i * mux_bytes);
+ regshw = pcs->saved_vals;
+ for (i = 0; i < pcs->size; i += mux_bytes)
+ *regshw++ = pcs->read(pcs->base + i);
break;
}
@@ -1628,19 +1647,19 @@ static void pcs_restore_context(struct pcs_device *pcs)
switch (pcs->width) {
case 64:
- regsl = (u64 *)pcs->saved_vals;
- for (i = 0; i < pcs->size / mux_bytes; i++)
- pcs->write(regsl[i], pcs->base + i * mux_bytes);
+ regsl = pcs->saved_vals;
+ for (i = 0; i < pcs->size; i += mux_bytes)
+ pcs->write(*regsl++, pcs->base + i);
break;
case 32:
- regsw = (u32 *)pcs->saved_vals;
- for (i = 0; i < pcs->size / mux_bytes; i++)
- pcs->write(regsw[i], pcs->base + i * mux_bytes);
+ regsw = pcs->saved_vals;
+ for (i = 0; i < pcs->size; i += mux_bytes)
+ pcs->write(*regsw++, pcs->base + i);
break;
case 16:
- regshw = (u16 *)pcs->saved_vals;
- for (i = 0; i < pcs->size / mux_bytes; i++)
- pcs->write(regshw[i], pcs->base + i * mux_bytes);
+ regshw = pcs->saved_vals;
+ for (i = 0; i < pcs->size; i += mux_bytes)
+ pcs->write(*regshw++, pcs->base + i);
break;
}
}
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 9cc80a500880..2b1a61dba224 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -13,6 +13,7 @@
*/
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index b8e9bda8ec98..5780442c068b 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -22,7 +22,6 @@
#include <linux/err.h>
#include <linux/list.h>
#include <linux/string.h>
-#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/pinctrl/machine.h>
@@ -308,7 +307,6 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev,
selector++;
}
- dev_err(pctldev->dev, "function '%s' not supported\n", function);
return -EINVAL;
}
@@ -775,6 +773,16 @@ int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
void *data)
{
struct function_desc *function;
+ int selector;
+
+ if (!name)
+ return -EINVAL;
+
+ selector = pinmux_func_name_to_selector(pctldev, name);
+ if (selector >= 0)
+ return selector;
+
+ selector = pctldev->num_functions;
function = devm_kzalloc(pctldev->dev, sizeof(*function), GFP_KERNEL);
if (!function)
@@ -785,12 +793,11 @@ int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
function->num_group_names = num_groups;
function->data = data;
- radix_tree_insert(&pctldev->pin_function_tree, pctldev->num_functions,
- function);
+ radix_tree_insert(&pctldev->pin_function_tree, selector, function);
pctldev->num_functions++;
- return 0;
+ return selector;
}
EXPORT_SYMBOL_GPL(pinmux_generic_add_function);
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index a331fcdbedd9..3319535c76cb 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -150,13 +150,6 @@ int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
int pinmux_generic_remove_function(struct pinctrl_dev *pctldev,
unsigned int selector);
-static inline int
-pinmux_generic_remove_last_function(struct pinctrl_dev *pctldev)
-{
- return pinmux_generic_remove_function(pctldev,
- pctldev->num_functions - 1);
-}
-
void pinmux_generic_free_functions(struct pinctrl_dev *pctldev);
#else
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 0e22f52b2a19..2155a30c282b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -250,22 +250,30 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
/* Convert register value to pinconf value */
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- arg = arg == MSM_NO_PULL;
+ if (arg != MSM_NO_PULL)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- arg = arg == MSM_PULL_DOWN;
+ if (arg != MSM_PULL_DOWN)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_BUS_HOLD:
if (pctrl->soc->pull_no_keeper)
return -ENOTSUPP;
- arg = arg == MSM_KEEPER;
+ if (arg != MSM_KEEPER)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_UP:
if (pctrl->soc->pull_no_keeper)
arg = arg == MSM_PULL_UP_NO_KEEPER;
else
arg = arg == MSM_PULL_UP;
+ if (!arg)
+ return -EINVAL;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
arg = msm_regval_to_drive(arg);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 3e66e0d10010..cf82db78e69e 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -390,31 +390,47 @@ static int pmic_gpio_config_get(struct pinctrl_dev *pctldev,
switch (param) {
case PIN_CONFIG_DRIVE_PUSH_PULL:
- arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_CMOS;
+ if (pad->buffer_type != PMIC_GPIO_OUT_BUF_CMOS)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS;
+ if (pad->buffer_type != PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_DRIVE_OPEN_SOURCE:
- arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS;
+ if (pad->buffer_type != PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- arg = pad->pullup == PMIC_GPIO_PULL_DOWN;
+ if (pad->pullup != PMIC_GPIO_PULL_DOWN)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_DISABLE:
- arg = pad->pullup = PMIC_GPIO_PULL_DISABLE;
+ if (pad->pullup != PMIC_GPIO_PULL_DISABLE)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_UP:
- arg = pad->pullup == PMIC_GPIO_PULL_UP_30;
+ if (pad->pullup != PMIC_GPIO_PULL_UP_30)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
- arg = !pad->is_enabled;
+ if (pad->is_enabled)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_POWER_SOURCE:
arg = pad->power_source;
break;
case PIN_CONFIG_INPUT_ENABLE:
- arg = pad->input_enabled;
+ if (!pad->input_enabled)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_OUTPUT:
arg = pad->out_value;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
index d82820fc349a..44c6b753f692 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
@@ -616,16 +616,22 @@ static const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = {
.nr_banks = ARRAY_SIZE(exynos5260_pin_banks0),
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos5260_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos5260_pin_banks1),
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5260_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos5260_pin_banks2),
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
},
};
@@ -842,30 +848,40 @@ static const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks0),
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos5420_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks1),
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5420_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks2),
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 3 data */
.pin_banks = exynos5420_pin_banks3,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks3),
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 4 data */
.pin_banks = exynos5420_pin_banks4,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks4),
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.retention_data = &exynos4_audio_retention_data,
},
};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index a263ddd94945..f49ea3d92aa1 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -25,6 +25,7 @@
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/soc/samsung/exynos-pmu.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <dt-bindings/pinctrl/samsung.h>
@@ -37,6 +38,8 @@ struct exynos_irq_chip {
u32 eint_con;
u32 eint_mask;
u32 eint_pend;
+ u32 eint_wake_mask_value;
+ u32 eint_wake_mask_reg;
};
static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip)
@@ -215,6 +218,7 @@ static struct exynos_irq_chip exynos_gpio_irq_chip = {
.eint_con = EXYNOS_GPIO_ECON_OFFSET,
.eint_mask = EXYNOS_GPIO_EMASK_OFFSET,
.eint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ /* eint_wake_mask_value not used */
};
static int exynos_eint_irq_map(struct irq_domain *h, unsigned int virq,
@@ -330,6 +334,8 @@ u32 exynos_get_eint_wake_mask(void)
static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
{
+ struct irq_chip *chip = irq_data_get_irq_chip(irqd);
+ struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long bit = 1UL << (2 * bank->eint_offset + irqd->hwirq);
@@ -339,6 +345,7 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
exynos_eint_wake_mask |= bit;
else
exynos_eint_wake_mask &= ~bit;
+ our_chip->eint_wake_mask_value = exynos_eint_wake_mask;
return 0;
}
@@ -346,6 +353,25 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
/*
* irq_chip for wakeup interrupts
*/
+static const struct exynos_irq_chip s5pv210_wkup_irq_chip __initconst = {
+ .chip = {
+ .name = "s5pv210_wkup_irq_chip",
+ .irq_unmask = exynos_irq_unmask,
+ .irq_mask = exynos_irq_mask,
+ .irq_ack = exynos_irq_ack,
+ .irq_set_type = exynos_irq_set_type,
+ .irq_set_wake = exynos_wkup_irq_set_wake,
+ .irq_request_resources = exynos_irq_request_resources,
+ .irq_release_resources = exynos_irq_release_resources,
+ },
+ .eint_con = EXYNOS_WKUP_ECON_OFFSET,
+ .eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
+ .eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
+ .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
+ /* Only difference with exynos4210_wkup_irq_chip: */
+ .eint_wake_mask_reg = S5PV210_EINT_WAKEUP_MASK,
+};
+
static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
.chip = {
.name = "exynos4210_wkup_irq_chip",
@@ -360,6 +386,8 @@ static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
.eint_con = EXYNOS_WKUP_ECON_OFFSET,
.eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
.eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
+ .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
+ .eint_wake_mask_reg = EXYNOS_EINT_WAKEUP_MASK,
};
static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
@@ -376,10 +404,14 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
.eint_con = EXYNOS7_WKUP_ECON_OFFSET,
.eint_mask = EXYNOS7_WKUP_EMASK_OFFSET,
.eint_pend = EXYNOS7_WKUP_EPEND_OFFSET,
+ .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
+ .eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK,
};
/* list of external wakeup controllers supported */
static const struct of_device_id exynos_wkup_irq_ids[] = {
+ { .compatible = "samsung,s5pv210-wakeup-eint",
+ .data = &s5pv210_wkup_irq_chip },
{ .compatible = "samsung,exynos4210-wakeup-eint",
.data = &exynos4210_wkup_irq_chip },
{ .compatible = "samsung,exynos7-wakeup-eint",
@@ -542,6 +574,27 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
return 0;
}
+static void
+exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
+ struct exynos_irq_chip *irq_chip)
+{
+ struct regmap *pmu_regs;
+
+ if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
+ dev_warn(drvdata->dev,
+ "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
+ return;
+ }
+
+ pmu_regs = drvdata->retention_ctrl->priv;
+ dev_info(drvdata->dev,
+ "Setting external wakeup interrupt mask: 0x%x\n",
+ irq_chip->eint_wake_mask_value);
+
+ regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg,
+ irq_chip->eint_wake_mask_value);
+}
+
static void exynos_pinctrl_suspend_bank(
struct samsung_pinctrl_drv_data *drvdata,
struct samsung_pin_bank *bank)
@@ -564,11 +617,24 @@ static void exynos_pinctrl_suspend_bank(
void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
{
struct samsung_pin_bank *bank = drvdata->pin_banks;
+ struct exynos_irq_chip *irq_chip = NULL;
int i;
- for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
+ for (i = 0; i < drvdata->nr_banks; ++i, ++bank) {
if (bank->eint_type == EINT_TYPE_GPIO)
exynos_pinctrl_suspend_bank(drvdata, bank);
+ else if (bank->eint_type == EINT_TYPE_WKUP) {
+ if (!irq_chip) {
+ irq_chip = bank->irq_chip;
+ exynos_pinctrl_set_eint_wakeup_mask(drvdata,
+ irq_chip);
+ } else if (bank->irq_chip != irq_chip) {
+ dev_warn(drvdata->dev,
+ "More than one external wakeup interrupt chip configured (bank: %s). This is not supported by hardware nor by driver.\n",
+ bank->name);
+ }
+ }
+ }
}
static void exynos_pinctrl_resume_bank(
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index f0cda9424dfe..e571bbd7139b 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -223,6 +223,13 @@ struct samsung_retention_data {
* interrupts for the controller.
* @eint_wkup_init: platform specific callback to setup the external wakeup
* interrupts for the controller.
+ * @suspend: platform specific suspend callback, executed during pin controller
+ * device suspend, see samsung_pinctrl_suspend()
+ * @resume: platform specific resume callback, executed during pin controller
+ * device suspend, see samsung_pinctrl_resume()
+ *
+ * External wakeup interrupts must define at least eint_wkup_init,
+ * retention_data and suspend in order for proper suspend/resume to work.
*/
struct samsung_pin_ctrl {
const struct samsung_pin_bank_data *pin_banks;
@@ -255,6 +262,10 @@ struct samsung_pin_ctrl {
* @pin_base: starting system wide pin number.
* @nr_pins: number of pins supported by the controller.
* @retention_ctrl: retention control runtime data.
+ * @suspend: platform specific suspend callback, executed during pin controller
+ * device suspend, see samsung_pinctrl_suspend()
+ * @resume: platform specific resume callback, executed during pin controller
+ * device suspend, see samsung_pinctrl_resume()
*/
struct samsung_pinctrl_drv_data {
struct list_head node;
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index d2bbee656381..cfd7de67e3e3 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -1758,6 +1758,263 @@ static const unsigned int du_disp_mux[] = {
DU_DISP_MARK,
};
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
+};
+
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+
+static const unsigned int hscif0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 12),
+};
+
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 15),
+};
+
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+
+static const unsigned int hscif1_data_a_mux[] = {
+ HRX1_A_MARK, HTX1_A_MARK,
+};
+
+static const unsigned int hscif1_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+
+static const unsigned int hscif1_clk_a_mux[] = {
+ HSCK1_A_MARK,
+};
+
+static const unsigned int hscif1_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
+};
+
+static const unsigned int hscif1_ctrl_a_mux[] = {
+ HRTS1_N_A_MARK, HCTS1_N_A_MARK,
+};
+
+static const unsigned int hscif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+
+static const unsigned int hscif1_data_b_mux[] = {
+ HRX1_B_MARK, HTX1_B_MARK,
+};
+
+static const unsigned int hscif1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 0),
+};
+
+static const unsigned int hscif1_clk_b_mux[] = {
+ HSCK1_B_MARK,
+};
+
+static const unsigned int hscif1_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+
+static const unsigned int hscif1_ctrl_b_mux[] = {
+ HRTS1_N_B_MARK, HCTS1_N_B_MARK,
+};
+
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+
+static const unsigned int hscif2_data_a_mux[] = {
+ HRX2_A_MARK, HTX2_A_MARK,
+};
+
+static const unsigned int hscif2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 10),
+};
+
+static const unsigned int hscif2_clk_a_mux[] = {
+ HSCK2_A_MARK,
+};
+
+static const unsigned int hscif2_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+};
+
+static const unsigned int hscif2_ctrl_a_mux[] = {
+ HRTS2_N_A_MARK, HCTS2_N_A_MARK,
+};
+
+static const unsigned int hscif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+
+static const unsigned int hscif2_data_b_mux[] = {
+ HRX2_B_MARK, HTX2_B_MARK,
+};
+
+static const unsigned int hscif2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+
+static const unsigned int hscif2_clk_b_mux[] = {
+ HSCK2_B_MARK,
+};
+
+static const unsigned int hscif2_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 19),
+};
+
+static const unsigned int hscif2_ctrl_b_mux[] = {
+ HRTS2_N_B_MARK, HCTS2_N_B_MARK,
+};
+
+static const unsigned int hscif2_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 25), RCAR_GP_PIN(6, 26),
+};
+
+static const unsigned int hscif2_data_c_mux[] = {
+ HRX2_C_MARK, HTX2_C_MARK,
+};
+
+static const unsigned int hscif2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 24),
+};
+
+static const unsigned int hscif2_clk_c_mux[] = {
+ HSCK2_C_MARK,
+};
+
+static const unsigned int hscif2_ctrl_c_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 27),
+};
+
+static const unsigned int hscif2_ctrl_c_mux[] = {
+ HRTS2_N_C_MARK, HCTS2_N_C_MARK,
+};
+
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
+};
+
+static const unsigned int hscif3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 22),
+};
+
+static const unsigned int hscif3_clk_mux[] = {
+ HSCK3_MARK,
+};
+
+static const unsigned int hscif3_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+
+static const unsigned int hscif3_ctrl_mux[] = {
+ HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+static const unsigned int hscif3_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+};
+
+static const unsigned int hscif3_data_b_mux[] = {
+ HRX3_B_MARK, HTX3_B_MARK,
+};
+
+static const unsigned int hscif3_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+};
+
+static const unsigned int hscif3_data_c_mux[] = {
+ HRX3_C_MARK, HTX3_C_MARK,
+};
+
+static const unsigned int hscif3_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+
+static const unsigned int hscif3_data_d_mux[] = {
+ HRX3_D_MARK, HTX3_D_MARK,
+};
+
+/* - HSCIF4 ----------------------------------------------------------------- */
+static const unsigned int hscif4_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+};
+
+static const unsigned int hscif4_data_a_mux[] = {
+ HRX4_A_MARK, HTX4_A_MARK,
+};
+
+static const unsigned int hscif4_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 11),
+};
+
+static const unsigned int hscif4_clk_mux[] = {
+ HSCK4_MARK,
+};
+
+static const unsigned int hscif4_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
+};
+
+static const unsigned int hscif4_ctrl_mux[] = {
+ HRTS4_N_MARK, HCTS4_N_MARK,
+};
+
+static const unsigned int hscif4_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+
+static const unsigned int hscif4_data_b_mux[] = {
+ HRX4_B_MARK, HTX4_B_MARK,
+};
+
/* - I2C -------------------------------------------------------------------- */
static const unsigned int i2c1_a_pins[] = {
/* SDA, SCL */
@@ -3169,6 +3426,34 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(du_oddf),
SH_PFC_PIN_GROUP(du_cde),
SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_a),
+ SH_PFC_PIN_GROUP(hscif2_clk_a),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_clk_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk),
+ SH_PFC_PIN_GROUP(hscif3_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_data_c),
+ SH_PFC_PIN_GROUP(hscif3_data_d),
+ SH_PFC_PIN_GROUP(hscif4_data_a),
+ SH_PFC_PIN_GROUP(hscif4_clk),
+ SH_PFC_PIN_GROUP(hscif4_ctrl),
+ SH_PFC_PIN_GROUP(hscif4_data_b),
SH_PFC_PIN_GROUP(i2c1_a),
SH_PFC_PIN_GROUP(i2c1_b),
SH_PFC_PIN_GROUP(i2c2_a),
@@ -3379,6 +3664,49 @@ static const char * const du_groups[] = {
"du_disp",
};
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ "hscif1_data_a",
+ "hscif1_clk_a",
+ "hscif1_ctrl_a",
+ "hscif1_data_b",
+ "hscif1_clk_b",
+ "hscif1_ctrl_b",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data_a",
+ "hscif2_clk_a",
+ "hscif2_ctrl_a",
+ "hscif2_data_b",
+ "hscif2_clk_b",
+ "hscif2_ctrl_b",
+ "hscif2_data_c",
+ "hscif2_clk_c",
+ "hscif2_ctrl_c",
+};
+
+static const char * const hscif3_groups[] = {
+ "hscif3_data_a",
+ "hscif3_clk",
+ "hscif3_ctrl",
+ "hscif3_data_b",
+ "hscif3_data_c",
+ "hscif3_data_d",
+};
+
+static const char * const hscif4_groups[] = {
+ "hscif4_data_a",
+ "hscif4_clk",
+ "hscif4_ctrl",
+ "hscif4_data_b",
+};
+
static const char * const i2c1_groups[] = {
"i2c1_a",
"i2c1_b",
@@ -3651,6 +3979,11 @@ static const char * const usb30_groups[] = {
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(avb),
SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+ SH_PFC_FUNCTION(hscif4),
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c6),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
index b02caf316711..eeb58b3bbc9a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
@@ -21,15 +21,13 @@
#include "core.h"
#include "sh_pfc.h"
-#define CFG_FLAGS SH_PFC_PIN_CFG_DRIVE_STRENGTH
-
#define CPU_ALL_PORT(fn, sfx) \
- PORT_GP_CFG_22(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS), \
- PORT_GP_CFG_17(2, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_CFG_17(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_CFG_6(4, fn, sfx, CFG_FLAGS), \
- PORT_GP_CFG_15(5, fn, sfx, CFG_FLAGS)
+ PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_28(1, fn, sfx), \
+ PORT_GP_CFG_17(2, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_6(4, fn, sfx), \
+ PORT_GP_15(5, fn, sfx)
/*
* F_() : just information
* FM() : macro for FN_xxx / xxx_MARK
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
index a68fd658aada..b81c807ac54d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
@@ -277,7 +277,7 @@
#define IP11_15_12 FM(TX0_A) FM(HTX1_A) FM(SSI_WS2_A) FM(RIF1_D0) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDAT1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_19_16 FM(CTS0_N_A) FM(NFDATA14_A) FM(AUDIO_CLKOUT_A) FM(RIF1_D1) FM(SCIF_CLK_A) FM(FMCLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_23_20 FM(RTS0_N_TANS_A) FM(NFDATA15_A) FM(AUDIO_CLKOUT1_A) FM(RIF1_CLK) FM(SCL2_A) FM(FMIN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_27_24 FM(SCK0_A) FM(HSCK1_A) FM(USB3HS0_ID) FM(RTS1_N_TANS) FM(SDA2_A) FM(FMCLK_C) F_(0, 0) F_(0, 0) FM(USB1_ID) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_27_24 FM(SCK0_A) FM(HSCK1_A) FM(USB3HS0_ID) FM(RTS1_N_TANS) FM(SDA2_A) FM(FMCLK_C) F_(0, 0) F_(0, 0) FM(USB0_ID) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_31_28 FM(RX1) FM(HRX2_B) FM(SSI_SCK9_B) FM(AUDIO_CLKOUT1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 - F */
@@ -1082,7 +1082,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP11_27_24, RTS1_N_TANS),
PINMUX_IPSR_MSEL(IP11_27_24, SDA2_A, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP11_27_24, FMCLK_C, SEL_FM_2),
- PINMUX_IPSR_GPSR(IP11_27_24, USB1_ID),
+ PINMUX_IPSR_GPSR(IP11_27_24, USB0_ID),
PINMUX_IPSR_GPSR(IP11_31_28, RX1),
PINMUX_IPSR_MSEL(IP11_31_28, HRX2_B, SEL_HSCIF2_1),
@@ -1784,6 +1784,53 @@ static const unsigned int scif_clk_b_mux[] = {
SCIF_CLK_B_MARK,
};
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_a_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 9),
+};
+
+static const unsigned int usb0_a_mux[] = {
+ USB0_PWEN_A_MARK, USB0_OVC_A_MARK,
+};
+
+static const unsigned int usb0_b_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
+};
+
+static const unsigned int usb0_b_mux[] = {
+ USB0_PWEN_B_MARK, USB0_OVC_B_MARK,
+};
+
+static const unsigned int usb0_id_pins[] = {
+ /* ID */
+ RCAR_GP_PIN(5, 0)
+};
+
+static const unsigned int usb0_id_mux[] = {
+ USB0_ID_MARK,
+};
+
+/* - USB30 ------------------------------------------------------------------ */
+static const unsigned int usb30_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 9),
+};
+
+static const unsigned int usb30_mux[] = {
+ USB30_PWEN_MARK, USB30_OVC_MARK,
+};
+
+static const unsigned int usb30_id_pins[] = {
+ /* ID */
+ RCAR_GP_PIN(5, 0),
+};
+
+static const unsigned int usb30_id_mux[] = {
+ USB3HS0_ID_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb_link),
SH_PFC_PIN_GROUP(avb_magic),
@@ -1837,6 +1884,11 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif5_data_c),
SH_PFC_PIN_GROUP(scif_clk_a),
SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(usb0_a),
+ SH_PFC_PIN_GROUP(usb0_b),
+ SH_PFC_PIN_GROUP(usb0_id),
+ SH_PFC_PIN_GROUP(usb30),
+ SH_PFC_PIN_GROUP(usb30_id),
};
static const char * const avb_groups[] = {
@@ -1933,6 +1985,17 @@ static const char * const scif_clk_groups[] = {
"scif_clk_b",
};
+static const char * const usb0_groups[] = {
+ "usb0_a",
+ "usb0_b",
+ "usb0_id",
+};
+
+static const char * const usb30_groups[] = {
+ "usb30",
+ "usb30_id",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(avb),
SH_PFC_FUNCTION(i2c1),
@@ -1948,6 +2011,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif5),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb30),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c b/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c
index 3cdad8bc8f93..5702b6704137 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c
@@ -13,6 +13,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "pinctrl-sprd.h"
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index dfed60982a8a..a9bec6e6fdd1 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -46,6 +46,8 @@
#define STM32_GPIO_PINS_PER_BANK 16
#define STM32_GPIO_IRQ_LINE 16
+#define SYSCFG_IRQMUX_MASK GENMASK(3, 0)
+
#define gpio_range_to_bank(chip) \
container_of(chip, struct stm32_gpio_bank, range)
@@ -73,6 +75,7 @@ struct stm32_gpio_bank {
struct fwnode_handle *fwnode;
struct irq_domain *domain;
u32 bank_nr;
+ u32 bank_ioport_nr;
};
struct stm32_pinctrl {
@@ -298,7 +301,7 @@ static int stm32_gpio_domain_activate(struct irq_domain *d,
struct stm32_gpio_bank *bank = d->host_data;
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
- regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_nr);
+ regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_ioport_nr);
return 0;
}
@@ -638,6 +641,11 @@ static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev,
}
range = pinctrl_find_gpio_range_from_pin(pctldev, g->pin);
+ if (!range) {
+ dev_err(pctl->dev, "No gpio range defined.\n");
+ return -EINVAL;
+ }
+
bank = gpiochip_get_data(range->gc);
pin = stm32_gpio_pin(g->pin);
@@ -806,11 +814,17 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
unsigned int pin, enum pin_config_param param,
enum pin_config_param arg)
{
+ struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct pinctrl_gpio_range *range;
struct stm32_gpio_bank *bank;
int offset, ret = 0;
range = pinctrl_find_gpio_range_from_pin(pctldev, pin);
+ if (!range) {
+ dev_err(pctl->dev, "No gpio range defined.\n");
+ return -EINVAL;
+ }
+
bank = gpiochip_get_data(range->gc);
offset = stm32_gpio_pin(pin);
@@ -892,6 +906,9 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
bool val;
range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
+ if (!range)
+ return;
+
bank = gpiochip_get_data(range->gc);
offset = stm32_gpio_pin(pin);
@@ -948,6 +965,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
struct device_node *np)
{
struct stm32_gpio_bank *bank = &pctl->banks[pctl->nbanks];
+ int bank_ioport_nr;
struct pinctrl_gpio_range *range = &bank->range;
struct of_phandle_args args;
struct device *dev = pctl->dev;
@@ -998,12 +1016,17 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
pinctrl_add_gpio_range(pctl->pctl_dev,
&pctl->banks[bank_nr].range);
}
+
+ if (of_property_read_u32(np, "st,bank-ioport", &bank_ioport_nr))
+ bank_ioport_nr = bank_nr;
+
bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
bank->gpio_chip.ngpio = npins;
bank->gpio_chip.of_node = np;
bank->gpio_chip.parent = dev;
bank->bank_nr = bank_nr;
+ bank->bank_ioport_nr = bank_ioport_nr;
spin_lock_init(&bank->lock);
/* create irq hierarchical domain */
@@ -1033,6 +1056,7 @@ static int stm32_pctrl_dt_setup_irq(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct regmap *rm;
int offset, ret, i;
+ int mask, mask_width;
parent = of_irq_find_parent(np);
if (!parent)
@@ -1052,12 +1076,21 @@ static int stm32_pctrl_dt_setup_irq(struct platform_device *pdev,
if (ret)
return ret;
+ ret = of_property_read_u32_index(np, "st,syscfg", 2, &mask);
+ if (ret)
+ mask = SYSCFG_IRQMUX_MASK;
+
+ mask_width = fls(mask);
+
for (i = 0; i < STM32_GPIO_PINS_PER_BANK; i++) {
struct reg_field mux;
mux.reg = offset + (i / 4) * 4;
- mux.lsb = (i % 4) * 4;
- mux.msb = mux.lsb + 3;
+ mux.lsb = (i % 4) * mask_width;
+ mux.msb = mux.lsb + mask_width - 1;
+
+ dev_dbg(dev, "irqmux%d: reg:%#x, lsb:%d, msb:%d\n",
+ i, mux.reg, mux.lsb, mux.msb);
pctl->irqmux[i] = devm_regmap_field_alloc(dev, rm, mux);
if (IS_ERR(pctl->irqmux[i]))
@@ -1166,7 +1199,7 @@ int stm32_pctl_probe(struct platform_device *pdev)
return PTR_ERR(pctl->pctl_dev);
}
- for_each_child_of_node(np, child)
+ for_each_available_child_of_node(np, child)
if (of_property_read_bool(child, "gpio-controller"))
banks++;
@@ -1179,7 +1212,7 @@ int stm32_pctl_probe(struct platform_device *pdev)
if (!pctl->banks)
return -ENOMEM;
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
if (of_property_read_bool(child, "gpio-controller")) {
ret = stm32_gpiolib_register_bank(pctl, child);
if (ret)
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index f974eee29a19..1aba75897d14 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -629,12 +629,12 @@ static void tegra_pinctrl_clear_parked_bits(struct tegra_pmx *pmx)
}
}
-static bool gpio_node_has_range(void)
+static bool gpio_node_has_range(const char *compatible)
{
struct device_node *np;
bool has_prop = false;
- np = of_find_compatible_node(NULL, NULL, "nvidia,tegra30-gpio");
+ np = of_find_compatible_node(NULL, NULL, compatible);
if (!np)
return has_prop;
@@ -728,7 +728,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
tegra_pinctrl_clear_parked_bits(pmx);
- if (!gpio_node_has_range())
+ if (!gpio_node_has_range(pmx->soc->gpio_compatible))
pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
platform_set_drvdata(pdev, pmx);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index aa33c20766c4..44c71941b5f8 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
@@ -189,6 +189,7 @@ struct tegra_pingroup {
*/
struct tegra_pinctrl_soc_data {
unsigned ngpios;
+ const char *gpio_compatible;
const struct pinctrl_pin_desc *pins;
unsigned npins;
struct tegra_function *functions;
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra114.c b/drivers/pinctrl/tegra/pinctrl-tegra114.c
index 56b33fca1bfc..d43c209e9c30 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra114.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra114.c
@@ -1839,6 +1839,7 @@ static const struct tegra_pingroup tegra114_groups[] = {
static const struct tegra_pinctrl_soc_data tegra114_pinctrl = {
.ngpios = NUM_GPIOS,
+ .gpio_compatible = "nvidia,tegra30-gpio",
.pins = tegra114_pins,
.npins = ARRAY_SIZE(tegra114_pins),
.functions = tegra114_functions,
@@ -1867,4 +1868,9 @@ static struct platform_driver tegra114_pinctrl_driver = {
},
.probe = tegra114_pinctrl_probe,
};
-builtin_platform_driver(tegra114_pinctrl_driver);
+
+static int __init tegra114_pinctrl_init(void)
+{
+ return platform_driver_register(&tegra114_pinctrl_driver);
+}
+arch_initcall(tegra114_pinctrl_init);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra124.c b/drivers/pinctrl/tegra/pinctrl-tegra124.c
index 7bc998ace0d5..5b07a5834d15 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra124.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra124.c
@@ -2051,6 +2051,7 @@ static const struct tegra_pingroup tegra124_groups[] = {
static const struct tegra_pinctrl_soc_data tegra124_pinctrl = {
.ngpios = NUM_GPIOS,
+ .gpio_compatible = "nvidia,tegra30-gpio",
.pins = tegra124_pins,
.npins = ARRAY_SIZE(tegra124_pins),
.functions = tegra124_functions,
@@ -2079,4 +2080,9 @@ static struct platform_driver tegra124_pinctrl_driver = {
},
.probe = tegra124_pinctrl_probe,
};
-builtin_platform_driver(tegra124_pinctrl_driver);
+
+static int __init tegra124_pinctrl_init(void)
+{
+ return platform_driver_register(&tegra124_pinctrl_driver);
+}
+arch_initcall(tegra124_pinctrl_init);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra20.c b/drivers/pinctrl/tegra/pinctrl-tegra20.c
index b6dd939d32cc..1fc82a9576e0 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra20.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra20.c
@@ -2221,6 +2221,7 @@ static const struct tegra_pingroup tegra20_groups[] = {
static const struct tegra_pinctrl_soc_data tegra20_pinctrl = {
.ngpios = NUM_GPIOS,
+ .gpio_compatible = "nvidia,tegra20-gpio",
.pins = tegra20_pins,
.npins = ARRAY_SIZE(tegra20_pins),
.functions = tegra20_functions,
@@ -2276,4 +2277,9 @@ static struct platform_driver tegra20_pinctrl_driver = {
},
.probe = tegra20_pinctrl_probe,
};
-builtin_platform_driver(tegra20_pinctrl_driver);
+
+static int __init tegra20_pinctrl_init(void)
+{
+ return platform_driver_register(&tegra20_pinctrl_driver);
+}
+arch_initcall(tegra20_pinctrl_init);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra210.c b/drivers/pinctrl/tegra/pinctrl-tegra210.c
index c244e5b17bd6..3e77f5474dd8 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra210.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra210.c
@@ -1553,6 +1553,7 @@ static const struct tegra_pingroup tegra210_groups[] = {
static const struct tegra_pinctrl_soc_data tegra210_pinctrl = {
.ngpios = NUM_GPIOS,
+ .gpio_compatible = "nvidia,tegra30-gpio",
.pins = tegra210_pins,
.npins = ARRAY_SIZE(tegra210_pins),
.functions = tegra210_functions,
@@ -1581,4 +1582,9 @@ static struct platform_driver tegra210_pinctrl_driver = {
},
.probe = tegra210_pinctrl_probe,
};
-builtin_platform_driver(tegra210_pinctrl_driver);
+
+static int __init tegra210_pinctrl_init(void)
+{
+ return platform_driver_register(&tegra210_pinctrl_driver);
+}
+arch_initcall(tegra210_pinctrl_init);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra30.c b/drivers/pinctrl/tegra/pinctrl-tegra30.c
index 1f180a20f2ab..10e617003e9c 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra30.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra30.c
@@ -2474,6 +2474,7 @@ static const struct tegra_pingroup tegra30_groups[] = {
static const struct tegra_pinctrl_soc_data tegra30_pinctrl = {
.ngpios = NUM_GPIOS,
+ .gpio_compatible = "nvidia,tegra30-gpio",
.pins = tegra30_pins,
.npins = ARRAY_SIZE(tegra30_pins),
.functions = tegra30_functions,
@@ -2502,4 +2503,9 @@ static struct platform_driver tegra30_pinctrl_driver = {
},
.probe = tegra30_pinctrl_probe,
};
-builtin_platform_driver(tegra30_pinctrl_driver);
+
+static int __init tegra30_pinctrl_init(void)
+{
+ return platform_driver_register(&tegra30_pinctrl_driver);
+}
+arch_initcall(tegra30_pinctrl_init);
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index 58825f68b58b..280dca725d6e 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
@@ -517,6 +518,10 @@ static const int i2c4_muxvals[] = {1, 1};
static const unsigned nand_pins[] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17};
static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {56, 57, 58, 59};
+static const int spi0_muxvals[] = {0, 0, 0, 0};
+static const unsigned spi1_pins[] = {169, 170, 171, 172};
+static const int spi1_muxvals[] = {1, 1, 1, 1};
static const unsigned system_bus_pins[] = {1, 2, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17};
static const int system_bus_muxvals[] = {0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
@@ -596,6 +601,8 @@ static const struct uniphier_pinctrl_group uniphier_ld11_groups[] = {
UNIPHIER_PINCTRL_GROUP(i2c3),
UNIPHIER_PINCTRL_GROUP(i2c4),
UNIPHIER_PINCTRL_GROUP(nand),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(uart0),
@@ -632,6 +639,8 @@ static const char * const i2c1_groups[] = {"i2c1"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const i2c4_groups[] = {"i2c4"};
static const char * const nand_groups[] = {"nand"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs1"};
static const char * const uart0_groups[] = {"uart0"};
@@ -657,6 +666,8 @@ static const struct uniphier_pinmux_function uniphier_ld11_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(i2c4),
UNIPHIER_PINMUX_FUNCTION(nand),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index 9f449b35e300..d2d56c985c83 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
@@ -606,6 +607,14 @@ static const unsigned nand_pins[] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned sd_pins[] = {10, 11, 12, 13, 14, 15, 16, 17};
static const int sd_muxvals[] = {3, 3, 3, 3, 3, 3, 3, 3}; /* No SDVOLC */
+static const unsigned spi0_pins[] = {56, 57, 58, 59};
+static const int spi0_muxvals[] = {0, 0, 0, 0};
+static const unsigned spi1_pins[] = {169, 170, 171, 172};
+static const int spi1_muxvals[] = {1, 1, 1, 1};
+static const unsigned spi2_pins[] = {86, 87, 88, 89};
+static const int spi2_muxvals[] = {1, 1, 1, 1};
+static const unsigned spi3_pins[] = {74, 75, 76, 77};
+static const int spi3_muxvals[] = {1, 1, 1, 1};
static const unsigned system_bus_pins[] = {1, 2, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17};
static const int system_bus_muxvals[] = {0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
@@ -685,6 +694,10 @@ static const struct uniphier_pinctrl_group uniphier_ld20_groups[] = {
UNIPHIER_PINCTRL_GROUP(i2c4),
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
+ UNIPHIER_PINCTRL_GROUP(spi2),
+ UNIPHIER_PINCTRL_GROUP(spi3),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(uart0),
@@ -722,6 +735,10 @@ static const char * const i2c3_groups[] = {"i2c3"};
static const char * const i2c4_groups[] = {"i2c4"};
static const char * const nand_groups[] = {"nand"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
+static const char * const spi2_groups[] = {"spi2"};
+static const char * const spi3_groups[] = {"spi3"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs1"};
static const char * const uart0_groups[] = {"uart0"};
@@ -751,6 +768,10 @@ static const struct uniphier_pinmux_function uniphier_ld20_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c4),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
+ UNIPHIER_PINMUX_FUNCTION(spi2),
+ UNIPHIER_PINMUX_FUNCTION(spi3),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
index 0b10ebc07eb8..03d87ad82726 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
@@ -576,6 +577,8 @@ static const unsigned nand_cs1_pins[] = {22, 23};
static const int nand_cs1_muxvals[] = {0, 0};
static const unsigned sd_pins[] = {44, 45, 46, 47, 48, 49, 50, 51, 52};
static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {135, 136, 137, 138};
+static const int spi0_muxvals[] = {12, 12, 12, 12};
static const unsigned system_bus_pins[] = {16, 17, 18, 19, 20, 165, 166, 167,
168, 169, 170, 171, 172, 173};
static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1,
@@ -640,6 +643,7 @@ static const struct uniphier_pinctrl_group uniphier_ld4_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs0),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
@@ -667,6 +671,7 @@ static const char * const i2c2_groups[] = {"i2c2"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs0",
"system_bus_cs1",
@@ -690,6 +695,7 @@ static const struct uniphier_pinmux_function uniphier_ld4_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
index 8e4d45fea885..31f36ea53911 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
@@ -769,6 +770,10 @@ static const unsigned nand_cs1_pins[] = {37, 38};
static const int nand_cs1_muxvals[] = {0, 0};
static const unsigned sd_pins[] = {47, 48, 49, 50, 51, 52, 53, 54, 55};
static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {199, 200, 201, 202};
+static const int spi0_muxvals[] = {8, 8, 8, 8};
+static const unsigned spi1_pins[] = {93, 94, 95, 96};
+static const int spi1_muxvals[] = {1, 1, 1, 1};
static const unsigned system_bus_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13};
static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -851,6 +856,8 @@ static const struct uniphier_pinctrl_group uniphier_ld6b_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(system_bus_cs2),
@@ -882,6 +889,8 @@ static const char * const i2c2_groups[] = {"i2c2"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs1",
"system_bus_cs2",
@@ -907,6 +916,8 @@ static const struct uniphier_pinmux_function uniphier_ld6b_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
index 24788a74c254..60722898d5c7 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
@@ -1050,6 +1051,10 @@ static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned sd1_pins[] = {319, 320, 321, 322, 323, 324, 325, 326,
327};
static const int sd1_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {199, 200, 201, 202};
+static const int spi0_muxvals[] = {11, 11, 11, 11};
+static const unsigned spi1_pins[] = {195, 196, 197, 198, 235, 238, 239};
+static const int spi1_muxvals[] = {11, 11, 11, 11, 11, 11, 11};
static const unsigned system_bus_pins[] = {25, 26, 27, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 38};
static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1138,6 +1143,8 @@ static const struct uniphier_pinctrl_group uniphier_pro4_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
UNIPHIER_PINCTRL_GROUP(sd1),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs0),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
@@ -1171,6 +1178,8 @@ static const char * const i2c6_groups[] = {"i2c6"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
static const char * const sd1_groups[] = {"sd1"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs0",
"system_bus_cs1",
@@ -1202,6 +1211,8 @@ static const struct uniphier_pinmux_function uniphier_pro4_functions[] = {
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
UNIPHIER_PINMUX_FUNCTION(sd1),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
index d5d5e579cb08..ae7981530141 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
@@ -818,6 +819,12 @@ static const unsigned nand_cs1_pins[] = {26, 27};
static const int nand_cs1_muxvals[] = {0, 0};
static const unsigned sd_pins[] = {250, 251, 252, 253, 254, 255, 256, 257, 258};
static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {120, 121, 122, 123};
+static const int spi0_muxvals[] = {0, 0, 0, 0};
+static const unsigned spi1_pins[] = {134, 139, 85, 86};
+static const int spi1_muxvals[] = {1, 1, 1, 1};
+static const unsigned spi2_pins[] = {55, 56, 57, 58, 82, 83, 84};
+static const int spi2_muxvals[] = {0, 0, 0, 0, 1, 1, 1};
static const unsigned system_bus_pins[] = {4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17};
static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -904,6 +911,9 @@ static const struct uniphier_pinctrl_group uniphier_pro5_groups[] = {
UNIPHIER_PINCTRL_GROUP(i2c5c),
UNIPHIER_PINCTRL_GROUP(i2c6),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
+ UNIPHIER_PINCTRL_GROUP(spi2),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs0),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
@@ -934,6 +944,9 @@ static const char * const i2c5_groups[] = {"i2c5", "i2c5b", "i2c5c"};
static const char * const i2c6_groups[] = {"i2c6"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
+static const char * const spi2_groups[] = {"spi2"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs0",
"system_bus_cs1",
@@ -961,6 +974,9 @@ static const struct uniphier_pinmux_function uniphier_pro5_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c6),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
+ UNIPHIER_PINMUX_FUNCTION(spi2),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
index 032619ad0e73..7975bd7f99c8 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
@@ -778,6 +779,10 @@ static const unsigned nand_cs1_pins[] = {37, 38};
static const int nand_cs1_muxvals[] = {8, 8};
static const unsigned sd_pins[] = {47, 48, 49, 50, 51, 52, 53, 54, 55};
static const int sd_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8};
+static const unsigned spi0_pins[] = {199, 200, 201, 202};
+static const int spi0_muxvals[] = {8, 8, 8, 8};
+static const unsigned spi1_pins[] = {93, 94, 95, 96};
+static const int spi1_muxvals[] = {1, 1, 1, 1};
static const unsigned system_bus_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13};
static const int system_bus_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
@@ -861,6 +866,8 @@ static const struct uniphier_pinctrl_group uniphier_pxs2_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(uart0),
@@ -897,6 +904,8 @@ static const char * const i2c5_groups[] = {"i2c5"};
static const char * const i2c6_groups[] = {"i2c6"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs1"};
static const char * const uart0_groups[] = {"uart0", "uart0b", "uart0b_ctsrts"};
@@ -928,6 +937,8 @@ static const struct uniphier_pinmux_function uniphier_pxs2_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c6),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
index 535bb2e935e4..b16ce283695b 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
@@ -808,6 +809,10 @@ static const unsigned int nand_pins[] = {16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned int sd_pins[] = {43, 44, 45, 46, 47, 48, 49, 50, 51};
static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {100, 101, 102, 103};
+static const int spi0_muxvals[] = {0, 0, 0, 0};
+static const unsigned spi1_pins[] = {112, 113, 114, 115};
+static const int spi1_muxvals[] = {2, 2, 2, 2};
static const unsigned int system_bus_pins[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14};
static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -886,6 +891,8 @@ static const struct uniphier_pinctrl_group uniphier_pxs3_groups[] = {
UNIPHIER_PINCTRL_GROUP(i2c3),
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(uart0),
@@ -913,6 +920,8 @@ static const char * const i2c2_groups[] = {"i2c2"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const nand_groups[] = {"nand"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs1"};
static const char * const uart0_groups[] = {"uart0", "uart0_ctsrts"};
@@ -936,6 +945,8 @@ static const struct uniphier_pinmux_function uniphier_pxs3_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
index 0f921a653164..cb44568fcbbc 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
@@ -504,6 +505,8 @@ static const unsigned nand_cs1_pins[] = {22, 23};
static const int nand_cs1_muxvals[] = {0, 0};
static const unsigned sd_pins[] = {32, 33, 34, 35, 36, 37, 38, 39, 40};
static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {118, 119, 120, 121};
+static const int spi0_muxvals[] = {3, 3, 3, 3};
static const unsigned system_bus_pins[] = {136, 137, 138, 139, 140, 141, 142,
143, 144, 145, 146, 147, 148, 149};
static const int system_bus_muxvals[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1,
@@ -570,6 +573,7 @@ static const struct uniphier_pinctrl_group uniphier_sld8_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(system_bus_cs2),
@@ -598,6 +602,7 @@ static const char * const i2c2_groups[] = {"i2c2"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs1",
"system_bus_cs2",
@@ -622,6 +627,7 @@ static const struct uniphier_pinmux_function uniphier_sld8_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index cb0df9eb3e0f..16b1615958aa 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -52,6 +52,26 @@ config CHROMEOS_TBMC
config CROS_EC_CTL
tristate
+config CROS_EC_I2C
+ tristate "ChromeOS Embedded Controller (I2C)"
+ depends on MFD_CROS_EC && I2C
+
+ help
+ If you say Y here, you get support for talking to the ChromeOS
+ EC through an I2C bus. This uses a simple byte-level protocol with
+ a checksum. Failing accesses will be retried three times to
+ improve reliability.
+
+config CROS_EC_SPI
+ tristate "ChromeOS Embedded Controller (SPI)"
+ depends on MFD_CROS_EC && SPI
+
+ ---help---
+ If you say Y here, you get support for talking to the ChromeOS EC
+ through a SPI bus, using a byte-level protocol. Since the EC's
+ response time cannot be guaranteed, we support ignoring
+ 'pre-amble' bytes before the response actually starts.
+
config CROS_EC_LPC
tristate "ChromeOS Embedded Controller (LPC)"
depends on MFD_CROS_EC && ACPI && (X86 || COMPILE_TEST)
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index e44c37a63fa9..cd591bf872bb 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -6,6 +6,8 @@ obj-$(CONFIG_CHROMEOS_TBMC) += chromeos_tbmc.o
cros_ec_ctl-objs := cros_ec_sysfs.o cros_ec_lightbar.o \
cros_ec_vbc.o cros_ec_debugfs.o
obj-$(CONFIG_CROS_EC_CTL) += cros_ec_ctl.o
+obj-$(CONFIG_CROS_EC_I2C) += cros_ec_i2c.o
+obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_reg.o
cros_ec_lpcs-$(CONFIG_CROS_EC_LPC_MEC) += cros_ec_lpc_mec.o
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
diff --git a/drivers/platform/chrome/chromeos_tbmc.c b/drivers/platform/chrome/chromeos_tbmc.c
index b935df6a9694..1e81f8144c0d 100644
--- a/drivers/platform/chrome/chromeos_tbmc.c
+++ b/drivers/platform/chrome/chromeos_tbmc.c
@@ -1,8 +1,16 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0
// Driver to detect Tablet Mode for ChromeOS convertible.
//
// Copyright (C) 2017 Google, Inc.
// Author: Gwendal Grignou <gwendal@chromium.org>
+//
+// On Chromebook using ACPI, this device listens for notification
+// from GOOG0006 and issue method TBMC to retrieve the status.
+//
+// GOOG0006 issues the notification when it receives EC_HOST_EVENT_MODE_CHANGE
+// from the EC.
+// Method TBMC reads EC_ACPI_MEM_DEVICE_ORIENTATION byte from the shared
+// memory region.
#include <linux/acpi.h>
#include <linux/input.h>
diff --git a/drivers/mfd/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index ef9b4763356f..ef9b4763356f 100644
--- a/drivers/mfd/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 8350ca2311c7..398393ab5df8 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -506,10 +506,31 @@ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
}
EXPORT_SYMBOL(cros_ec_cmd_xfer_status);
+static int get_next_event_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg,
+ int version, uint32_t size)
+{
+ int ret;
+
+ msg->version = version;
+ msg->command = EC_CMD_GET_NEXT_EVENT;
+ msg->insize = size;
+ msg->outsize = 0;
+
+ ret = cros_ec_cmd_xfer(ec_dev, msg);
+ if (ret > 0) {
+ ec_dev->event_size = ret - 1;
+ memcpy(&ec_dev->event_data, msg->data, ec_dev->event_size);
+ }
+
+ return ret;
+}
+
static int get_next_event(struct cros_ec_device *ec_dev)
{
u8 buffer[sizeof(struct cros_ec_command) + sizeof(ec_dev->event_data)];
struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
+ static int cmd_version = 1;
int ret;
if (ec_dev->suspended) {
@@ -517,18 +538,19 @@ static int get_next_event(struct cros_ec_device *ec_dev)
return -EHOSTDOWN;
}
- msg->version = 0;
- msg->command = EC_CMD_GET_NEXT_EVENT;
- msg->insize = sizeof(ec_dev->event_data);
- msg->outsize = 0;
+ if (cmd_version == 1) {
+ ret = get_next_event_xfer(ec_dev, msg, cmd_version,
+ sizeof(struct ec_response_get_next_event_v1));
+ if (ret < 0 || msg->result != EC_RES_INVALID_VERSION)
+ return ret;
- ret = cros_ec_cmd_xfer(ec_dev, msg);
- if (ret > 0) {
- ec_dev->event_size = ret - 1;
- memcpy(&ec_dev->event_data, msg->data,
- sizeof(ec_dev->event_data));
+ /* Fallback to version 0 for future send attempts */
+ cmd_version = 0;
}
+ ret = get_next_event_xfer(ec_dev, msg, cmd_version,
+ sizeof(struct ec_response_get_next_event));
+
return ret;
}
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index 2060d1483043..2060d1483043 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index fefbb8370da0..479031aa4f88 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -10,11 +10,6 @@ menuconfig GOLDFISH
if GOLDFISH
-config GOLDFISH_BUS
- bool "Goldfish platform bus"
- ---help---
- This is a virtual bus to host Goldfish Android Virtual Devices.
-
config GOLDFISH_PIPE
tristate "Goldfish virtual device for QEMU pipes"
---help---
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile
index d3487125838c..e0c202df9674 100644
--- a/drivers/platform/goldfish/Makefile
+++ b/drivers/platform/goldfish/Makefile
@@ -1,5 +1,4 @@
#
# Makefile for Goldfish platform specific drivers
#
-obj-$(CONFIG_GOLDFISH_BUS) += pdev_bus.o
obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 3e32a4c14d5f..2da567540c2d 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -48,6 +48,7 @@
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
@@ -645,7 +646,7 @@ static void goldfish_interrupt_task(unsigned long unused)
wake_up_interruptible(&pipe->wake_queue);
}
}
-DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0);
+static DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0);
/*
* The general idea of the interrupt handling:
diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c
deleted file mode 100644
index dd9ea463c2a4..000000000000
--- a/drivers/platform/goldfish/pdev_bus.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Copyright (C) 2011 Intel, Inc.
- * Copyright (C) 2013 Intel, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-
-#define PDEV_BUS_OP_DONE (0x00)
-#define PDEV_BUS_OP_REMOVE_DEV (0x04)
-#define PDEV_BUS_OP_ADD_DEV (0x08)
-
-#define PDEV_BUS_OP_INIT (0x00)
-
-#define PDEV_BUS_OP (0x00)
-#define PDEV_BUS_GET_NAME (0x04)
-#define PDEV_BUS_NAME_LEN (0x08)
-#define PDEV_BUS_ID (0x0c)
-#define PDEV_BUS_IO_BASE (0x10)
-#define PDEV_BUS_IO_SIZE (0x14)
-#define PDEV_BUS_IRQ (0x18)
-#define PDEV_BUS_IRQ_COUNT (0x1c)
-#define PDEV_BUS_GET_NAME_HIGH (0x20)
-
-struct pdev_bus_dev {
- struct list_head list;
- struct platform_device pdev;
- struct resource resources[0];
-};
-
-static void goldfish_pdev_worker(struct work_struct *work);
-
-static void __iomem *pdev_bus_base;
-static unsigned long pdev_bus_addr;
-static unsigned long pdev_bus_len;
-static u32 pdev_bus_irq;
-static LIST_HEAD(pdev_bus_new_devices);
-static LIST_HEAD(pdev_bus_registered_devices);
-static LIST_HEAD(pdev_bus_removed_devices);
-static DECLARE_WORK(pdev_bus_worker, goldfish_pdev_worker);
-
-
-static void goldfish_pdev_worker(struct work_struct *work)
-{
- int ret;
- struct pdev_bus_dev *pos, *n;
-
- list_for_each_entry_safe(pos, n, &pdev_bus_removed_devices, list) {
- list_del(&pos->list);
- platform_device_unregister(&pos->pdev);
- kfree(pos);
- }
- list_for_each_entry_safe(pos, n, &pdev_bus_new_devices, list) {
- list_del(&pos->list);
- ret = platform_device_register(&pos->pdev);
- if (ret)
- pr_err("goldfish_pdev_worker failed to register device, %s\n",
- pos->pdev.name);
- list_add_tail(&pos->list, &pdev_bus_registered_devices);
- }
-}
-
-static void goldfish_pdev_remove(void)
-{
- struct pdev_bus_dev *pos, *n;
- u32 base;
-
- base = readl(pdev_bus_base + PDEV_BUS_IO_BASE);
-
- list_for_each_entry_safe(pos, n, &pdev_bus_new_devices, list) {
- if (pos->resources[0].start == base) {
- list_del(&pos->list);
- kfree(pos);
- return;
- }
- }
- list_for_each_entry_safe(pos, n, &pdev_bus_registered_devices, list) {
- if (pos->resources[0].start == base) {
- list_del(&pos->list);
- list_add_tail(&pos->list, &pdev_bus_removed_devices);
- schedule_work(&pdev_bus_worker);
- return;
- }
- };
- pr_err("goldfish_pdev_remove could not find device at %x\n", base);
-}
-
-static int goldfish_new_pdev(void)
-{
- struct pdev_bus_dev *dev;
- u32 name_len;
- u32 irq = -1, irq_count;
- int resource_count = 2;
- u32 base;
- char *name;
-
- base = readl(pdev_bus_base + PDEV_BUS_IO_BASE);
-
- irq_count = readl(pdev_bus_base + PDEV_BUS_IRQ_COUNT);
- name_len = readl(pdev_bus_base + PDEV_BUS_NAME_LEN);
- if (irq_count)
- resource_count++;
-
- dev = kzalloc(sizeof(*dev) +
- sizeof(struct resource) * resource_count +
- name_len + 1 + sizeof(*dev->pdev.dev.dma_mask), GFP_ATOMIC);
- if (dev == NULL)
- return -ENOMEM;
-
- dev->pdev.num_resources = resource_count;
- dev->pdev.resource = (struct resource *)(dev + 1);
- dev->pdev.name = name = (char *)(dev->pdev.resource + resource_count);
- dev->pdev.dev.coherent_dma_mask = ~0;
- dev->pdev.dev.dma_mask = (void *)(dev->pdev.name + name_len + 1);
- *dev->pdev.dev.dma_mask = ~0;
-
-#ifdef CONFIG_64BIT
- writel((u32)((u64)name>>32), pdev_bus_base + PDEV_BUS_GET_NAME_HIGH);
-#endif
- writel((u32)(unsigned long)name, pdev_bus_base + PDEV_BUS_GET_NAME);
- name[name_len] = '\0';
- dev->pdev.id = readl(pdev_bus_base + PDEV_BUS_ID);
- dev->pdev.resource[0].start = base;
- dev->pdev.resource[0].end = base +
- readl(pdev_bus_base + PDEV_BUS_IO_SIZE) - 1;
- dev->pdev.resource[0].flags = IORESOURCE_MEM;
- if (irq_count) {
- irq = readl(pdev_bus_base + PDEV_BUS_IRQ);
- dev->pdev.resource[1].start = irq;
- dev->pdev.resource[1].end = irq + irq_count - 1;
- dev->pdev.resource[1].flags = IORESOURCE_IRQ;
- }
-
- pr_debug("goldfish_new_pdev %s at %x irq %d\n", name, base, irq);
- list_add_tail(&dev->list, &pdev_bus_new_devices);
- schedule_work(&pdev_bus_worker);
-
- return 0;
-}
-
-static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id)
-{
- irqreturn_t ret = IRQ_NONE;
-
- while (1) {
- u32 op = readl(pdev_bus_base + PDEV_BUS_OP);
-
- switch (op) {
- case PDEV_BUS_OP_REMOVE_DEV:
- goldfish_pdev_remove();
- ret = IRQ_HANDLED;
- break;
-
- case PDEV_BUS_OP_ADD_DEV:
- goldfish_new_pdev();
- ret = IRQ_HANDLED;
- break;
-
- case PDEV_BUS_OP_DONE:
- default:
- return ret;
- }
- }
-}
-
-static int goldfish_pdev_bus_probe(struct platform_device *pdev)
-{
- int ret;
- struct resource *r;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL)
- return -EINVAL;
-
- pdev_bus_addr = r->start;
- pdev_bus_len = resource_size(r);
-
- pdev_bus_base = ioremap(pdev_bus_addr, pdev_bus_len);
- if (pdev_bus_base == NULL) {
- ret = -ENOMEM;
- dev_err(&pdev->dev, "unable to map Goldfish MMIO.\n");
- goto free_resources;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (r == NULL) {
- ret = -ENOENT;
- goto free_map;
- }
-
- pdev_bus_irq = r->start;
-
- ret = request_irq(pdev_bus_irq, goldfish_pdev_bus_interrupt,
- IRQF_SHARED, "goldfish_pdev_bus", pdev);
- if (ret) {
- dev_err(&pdev->dev, "unable to request Goldfish IRQ\n");
- goto free_map;
- }
-
- writel(PDEV_BUS_OP_INIT, pdev_bus_base + PDEV_BUS_OP);
- return 0;
-
-free_map:
- iounmap(pdev_bus_base);
-free_resources:
- release_mem_region(pdev_bus_addr, pdev_bus_len);
- return ret;
-}
-
-static struct platform_driver goldfish_pdev_bus_driver = {
- .probe = goldfish_pdev_bus_probe,
- .driver = {
- .name = "goldfish_pdev_bus"
- }
-};
-builtin_platform_driver(goldfish_pdev_bus_driver);
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index 591bccdeaff9..cd8a90846063 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -23,4 +23,15 @@ config MLXREG_HOTPLUG
This driver handles hot-plug events for the power suppliers, power
cables and fans on the wide range Mellanox IB and Ethernet systems.
+config MLXREG_IO
+ tristate "Mellanox platform register access driver support"
+ depends on REGMAP
+ depends on HWMON
+ help
+ This driver allows access to Mellanox programmable device register
+ space through sysfs interface. The sets of registers for sysfs access
+ are defined per system type bases and include the registers related
+ to system resets operation, system reset causes monitoring and some
+ kinds of mux selection.
+
endif # MELLANOX_PLATFORM
diff --git a/drivers/platform/mellanox/Makefile b/drivers/platform/mellanox/Makefile
index 7c8385e497a8..57074d9c722c 100644
--- a/drivers/platform/mellanox/Makefile
+++ b/drivers/platform/mellanox/Makefile
@@ -4,3 +4,4 @@
# Mellanox Platform-Specific Drivers
#
obj-$(CONFIG_MLXREG_HOTPLUG) += mlxreg-hotplug.o
+obj-$(CONFIG_MLXREG_IO) += mlxreg-io.o
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index ac97aa020db3..b6d44550d98c 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -50,9 +50,8 @@
#define MLXREG_HOTPLUG_MASK_OFF 2
#define MLXREG_HOTPLUG_AGGR_MASK_OFF 1
-/* ASIC health parameters. */
-#define MLXREG_HOTPLUG_HEALTH_MASK 0x02
-#define MLXREG_HOTPLUG_RST_CNTR 3
+/* ASIC good health mask. */
+#define MLXREG_HOTPLUG_GOOD_HEALTH_MASK 0x02
#define MLXREG_HOTPLUG_ATTRS_MAX 24
#define MLXREG_HOTPLUG_NOT_ASSERT 3
@@ -103,6 +102,9 @@ static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
{
struct mlxreg_core_hotplug_platform_data *pdata;
+ /* Notify user by sending hwmon uevent. */
+ kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
+
/*
* Return if adapter number is negative. It could be in case hotplug
* event is not associated with hotplug device.
@@ -134,8 +136,13 @@ static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
return 0;
}
-static void mlxreg_hotplug_device_destroy(struct mlxreg_core_data *data)
+static void
+mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
+ struct mlxreg_core_data *data)
{
+ /* Notify user by sending hwmon uevent. */
+ kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
+
if (data->hpdev.client) {
i2c_unregister_device(data->hpdev.client);
data->hpdev.client = NULL;
@@ -278,14 +285,14 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
data = item->data + bit;
if (regval & BIT(bit)) {
if (item->inversed)
- mlxreg_hotplug_device_destroy(data);
+ mlxreg_hotplug_device_destroy(priv, data);
else
mlxreg_hotplug_device_create(priv, data);
} else {
if (item->inversed)
mlxreg_hotplug_device_create(priv, data);
else
- mlxreg_hotplug_device_destroy(data);
+ mlxreg_hotplug_device_destroy(priv, data);
}
}
@@ -325,21 +332,40 @@ mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
goto out;
regval &= data->mask;
- item->cache = regval;
- if (regval == MLXREG_HOTPLUG_HEALTH_MASK) {
- if ((data->health_cntr++ == MLXREG_HOTPLUG_RST_CNTR) ||
- !priv->after_probe) {
+
+ if (item->cache == regval)
+ goto ack_event;
+
+ /*
+ * ASIC health indication is provided through two bits. Bits
+ * value 0x2 indicates that ASIC reached the good health, value
+ * 0x0 indicates ASIC the bad health or dormant state and value
+ * 0x3 indicates the booting state. During ASIC reset it should
+ * pass the following states: dormant -> booting -> good.
+ */
+ if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) {
+ if (!data->attached) {
+ /*
+ * ASIC is in steady state. Connect associated
+ * device, if configured.
+ */
mlxreg_hotplug_device_create(priv, data);
data->attached = true;
}
} else {
if (data->attached) {
- mlxreg_hotplug_device_destroy(data);
+ /*
+ * ASIC health is failed after ASIC has been
+ * in steady state. Disconnect associated
+ * device, if it has been connected.
+ */
+ mlxreg_hotplug_device_destroy(priv, data);
data->attached = false;
data->health_cntr = 0;
}
}
-
+ item->cache = regval;
+ack_event:
/* Acknowledge event. */
ret = regmap_write(priv->regmap, data->reg +
MLXREG_HOTPLUG_EVENT_OFF, 0);
@@ -551,7 +577,7 @@ static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
/* Remove all the attached devices in group. */
count = item->count;
for (j = 0; j < count; j++, data++)
- mlxreg_hotplug_device_destroy(data);
+ mlxreg_hotplug_device_destroy(priv, data);
}
}
@@ -616,10 +642,6 @@ static int mlxreg_hotplug_probe(struct platform_device *pdev)
disable_irq(priv->irq);
spin_lock_init(&priv->lock);
INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
- /* Perform initial interrupts setup. */
- mlxreg_hotplug_set_irq(priv);
-
- priv->after_probe = true;
dev_set_drvdata(&pdev->dev, priv);
err = mlxreg_hotplug_attr_init(priv);
@@ -637,6 +659,10 @@ static int mlxreg_hotplug_probe(struct platform_device *pdev)
return PTR_ERR(priv->hwmon);
}
+ /* Perform initial interrupts setup. */
+ mlxreg_hotplug_set_irq(priv);
+ priv->after_probe = true;
+
return 0;
}
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
new file mode 100644
index 000000000000..acfaf64ffde6
--- /dev/null
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Mellanox register access driver
+ *
+ * Copyright (C) 2018 Mellanox Technologies
+ * Copyright (C) 2018 Vadim Pasternak <vadimp@mellanox.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* Attribute parameters. */
+#define MLXREG_IO_ATT_SIZE 10
+#define MLXREG_IO_ATT_NUM 48
+
+/**
+ * struct mlxreg_io_priv_data - driver's private data:
+ *
+ * @pdev: platform device;
+ * @pdata: platform data;
+ * @hwmon: hwmon device;
+ * @mlxreg_io_attr: sysfs attributes array;
+ * @mlxreg_io_dev_attr: sysfs sensor device attribute array;
+ * @group: sysfs attribute group;
+ * @groups: list of sysfs attribute group for hwmon registration;
+ */
+struct mlxreg_io_priv_data {
+ struct platform_device *pdev;
+ struct mlxreg_core_platform_data *pdata;
+ struct device *hwmon;
+ struct attribute *mlxreg_io_attr[MLXREG_IO_ATT_NUM + 1];
+ struct sensor_device_attribute mlxreg_io_dev_attr[MLXREG_IO_ATT_NUM];
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+};
+
+static int
+mlxreg_io_get_reg(void *regmap, struct mlxreg_core_data *data, u32 in_val,
+ bool rw_flag, u32 *regval)
+{
+ int ret;
+
+ ret = regmap_read(regmap, data->reg, regval);
+ if (ret)
+ goto access_error;
+
+ /*
+ * There are three kinds of attributes: single bit, full register's
+ * bits and bit sequence. For the first kind field mask indicates which
+ * bits are not related and field bit is set zero. For the second kind
+ * field mask is set to zero and field bit is set with all bits one.
+ * No special handling for such kind of attributes - pass value as is.
+ * For the third kind, field mask indicates which bits are related and
+ * field bit is set to the first bit number (from 1 to 32) is the bit
+ * sequence.
+ */
+ if (!data->bit) {
+ /* Single bit. */
+ if (rw_flag) {
+ /* For show: expose effective bit value as 0 or 1. */
+ *regval = !!(*regval & ~data->mask);
+ } else {
+ /* For store: set effective bit value. */
+ *regval &= data->mask;
+ if (in_val)
+ *regval |= ~data->mask;
+ }
+ } else if (data->mask) {
+ /* Bit sequence. */
+ if (rw_flag) {
+ /* For show: mask and shift right. */
+ *regval = ror32(*regval & data->mask, (data->bit - 1));
+ } else {
+ /* For store: shift to the position and mask. */
+ in_val = rol32(in_val, data->bit - 1) & data->mask;
+ /* Clear relevant bits and set them to new value. */
+ *regval = (*regval & ~data->mask) | in_val;
+ }
+ }
+
+access_error:
+ return ret;
+}
+
+static ssize_t
+mlxreg_io_attr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxreg_io_priv_data *priv = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ struct mlxreg_core_data *data = priv->pdata->data + index;
+ u32 regval = 0;
+ int ret;
+
+ ret = mlxreg_io_get_reg(priv->pdata->regmap, data, 0, true, &regval);
+ if (ret)
+ goto access_error;
+
+ return sprintf(buf, "%u\n", regval);
+
+access_error:
+ return ret;
+}
+
+static ssize_t
+mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct mlxreg_io_priv_data *priv = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ struct mlxreg_core_data *data = priv->pdata->data + index;
+ u32 input_val, regval;
+ int ret;
+
+ if (len > MLXREG_IO_ATT_SIZE)
+ return -EINVAL;
+
+ /* Convert buffer to input value. */
+ ret = kstrtou32(buf, len, &input_val);
+ if (ret)
+ return ret;
+
+ ret = mlxreg_io_get_reg(priv->pdata->regmap, data, input_val, false,
+ &regval);
+ if (ret)
+ goto access_error;
+
+ ret = regmap_write(priv->pdata->regmap, data->reg, regval);
+ if (ret)
+ goto access_error;
+
+ return len;
+
+access_error:
+ dev_err(&priv->pdev->dev, "Bus access error\n");
+ return ret;
+}
+
+static struct device_attribute mlxreg_io_devattr_rw = {
+ .show = mlxreg_io_attr_show,
+ .store = mlxreg_io_attr_store,
+};
+
+static int mlxreg_io_attr_init(struct mlxreg_io_priv_data *priv)
+{
+ int i;
+
+ priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
+ priv->pdata->counter,
+ sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!priv->group.attrs)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->pdata->counter; i++) {
+ priv->mlxreg_io_attr[i] =
+ &priv->mlxreg_io_dev_attr[i].dev_attr.attr;
+ memcpy(&priv->mlxreg_io_dev_attr[i].dev_attr,
+ &mlxreg_io_devattr_rw, sizeof(struct device_attribute));
+
+ /* Set attribute name as a label. */
+ priv->mlxreg_io_attr[i]->name =
+ devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
+ priv->pdata->data[i].label);
+
+ if (!priv->mlxreg_io_attr[i]->name) {
+ dev_err(&priv->pdev->dev, "Memory allocation failed for sysfs attribute %d.\n",
+ i + 1);
+ return -ENOMEM;
+ }
+
+ priv->mlxreg_io_dev_attr[i].dev_attr.attr.mode =
+ priv->pdata->data[i].mode;
+ priv->mlxreg_io_dev_attr[i].dev_attr.attr.name =
+ priv->mlxreg_io_attr[i]->name;
+ priv->mlxreg_io_dev_attr[i].index = i;
+ sysfs_attr_init(&priv->mlxreg_io_dev_attr[i].dev_attr.attr);
+ }
+
+ priv->group.attrs = priv->mlxreg_io_attr;
+ priv->groups[0] = &priv->group;
+ priv->groups[1] = NULL;
+
+ return 0;
+}
+
+static int mlxreg_io_probe(struct platform_device *pdev)
+{
+ struct mlxreg_io_priv_data *priv;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdata = dev_get_platdata(&pdev->dev);
+ if (!priv->pdata) {
+ dev_err(&pdev->dev, "Failed to get platform data.\n");
+ return -EINVAL;
+ }
+
+ priv->pdev = pdev;
+
+ err = mlxreg_io_attr_init(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev, "Failed to allocate attributes: %d\n",
+ err);
+ return err;
+ }
+
+ priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "mlxreg_io",
+ priv,
+ priv->groups);
+ if (IS_ERR(priv->hwmon)) {
+ dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
+ PTR_ERR(priv->hwmon));
+ return PTR_ERR(priv->hwmon);
+ }
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ return 0;
+}
+
+static struct platform_driver mlxreg_io_driver = {
+ .driver = {
+ .name = "mlxreg-io",
+ },
+ .probe = mlxreg_io_probe,
+};
+
+module_platform_driver(mlxreg_io_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox regmap I/O access driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mlxreg-io");
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index 322de58eebaf..f66521c7f846 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -30,7 +30,8 @@ int loongson3_cpu_temp(int cpu)
case PRID_REV_LOONGSON3B_R2:
reg = ((reg >> 8) & 0xff) - 100;
break;
- case PRID_REV_LOONGSON3A_R3:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
reg = (reg & 0xffff)*731/0x4000 - 273;
break;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ac4d48830415..0c1aa6c314f5 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -1196,16 +1196,16 @@ config INTEL_TURBO_MAX_3
This driver is only required when the system is not using Hardware
P-States (HWP). In HWP mode, priority can be read from ACPI tables.
-config SILEAD_DMI
- bool "Tablets with Silead touchscreens"
+config TOUCHSCREEN_DMI
+ bool "DMI based touchscreen configuration info"
depends on ACPI && DMI && I2C=y && TOUCHSCREEN_SILEAD
---help---
- Certain ACPI based tablets with Silead touchscreens do not have
- enough data in ACPI tables for the touchscreen driver to handle
- the touchscreen properly, as OEMs expected the data to be baked
- into the tablet model specific version of the driver shipped
- with the OS-image for the device. This option supplies the missing
- information. Enable this for x86 tablets with Silead touchscreens.
+ Certain ACPI based tablets with e.g. Silead or Chipone touchscreens
+ do not have enough data in ACPI tables for the touchscreen driver to
+ handle the touchscreen properly, as OEMs expect the data to be baked
+ into the tablet model specific version of the driver shipped with the
+ the OS-image for the device. This option supplies the missing info.
+ Enable this for x86 tablets with Silead or Chipone touchscreens.
config INTEL_CHTDC_TI_PWRBTN
tristate "Intel Cherry Trail Dollar Cove TI power button driver"
@@ -1218,6 +1218,17 @@ config INTEL_CHTDC_TI_PWRBTN
To compile this driver as a module, choose M here: the module
will be called intel_chtdc_ti_pwrbtn.
+config I2C_MULTI_INSTANTIATE
+ tristate "I2C multi instantiate pseudo device driver"
+ depends on I2C && ACPI
+ help
+ Some ACPI-based systems list multiple i2c-devices in a single ACPI
+ firmware-node. This driver will instantiate separate i2c-clients
+ for each device in the firmware-node.
+
+ To compile this driver as a module, choose M here: the module
+ will be called i2c-multi-instantiate.
+
endif # X86_PLATFORM_DEVICES
config PMC_ATOM
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 2ba6cb795338..e6d1becf81ce 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -78,7 +78,7 @@ obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o
-obj-$(CONFIG_SILEAD_DMI) += silead_dmi.o
+obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o
obj-$(CONFIG_SURFACE_3_BUTTON) += surface3_button.o
obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
@@ -91,3 +91,4 @@ obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
+obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 8952173dd380..fcfeadd1301f 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -672,10 +672,7 @@ static void __init find_quirks(void)
static bool has_cap(u32 cap)
{
- if ((interface->capability & cap) != 0)
- return 1;
-
- return 0;
+ return interface->capability & cap;
}
/*
@@ -2216,7 +2213,7 @@ static int __init acer_wmi_init(void)
if (wmi_has_guid(AMW0_GUID1) &&
!dmi_check_system(amw0_whitelist) &&
quirks == &quirk_unknown) {
- pr_err("Unsupported machine has AMW0_GUID1, unable to load\n");
+ pr_debug("Unsupported machine has AMW0_GUID1, unable to load\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 136ff2b4cce5..db2af09067db 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -496,6 +496,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
{ KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
+ { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
{ KE_END, 0},
};
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index 6afd011de9e5..7458f7602d5e 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -52,13 +52,12 @@ static const struct acpi_device_id device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, device_ids);
-static u64 asus_wireless_method(acpi_handle handle, const char *method,
- int param)
+static acpi_status asus_wireless_method(acpi_handle handle, const char *method,
+ int param, u64 *ret)
{
struct acpi_object_list p;
union acpi_object obj;
acpi_status s;
- u64 ret;
acpi_handle_debug(handle, "Evaluating method %s, parameter %#x\n",
method, param);
@@ -67,24 +66,27 @@ static u64 asus_wireless_method(acpi_handle handle, const char *method,
p.count = 1;
p.pointer = &obj;
- s = acpi_evaluate_integer(handle, (acpi_string) method, &p, &ret);
+ s = acpi_evaluate_integer(handle, (acpi_string) method, &p, ret);
if (ACPI_FAILURE(s))
acpi_handle_err(handle,
"Failed to eval method %s, param %#x (%d)\n",
method, param, s);
- acpi_handle_debug(handle, "%s returned %#llx\n", method, ret);
- return ret;
+ else
+ acpi_handle_debug(handle, "%s returned %#llx\n", method, *ret);
+
+ return s;
}
static enum led_brightness led_state_get(struct led_classdev *led)
{
struct asus_wireless_data *data;
- int s;
+ acpi_status s;
+ u64 ret;
data = container_of(led, struct asus_wireless_data, led);
s = asus_wireless_method(acpi_device_handle(data->adev), "HSWC",
- data->hswc_params->status);
- if (s == data->hswc_params->on)
+ data->hswc_params->status, &ret);
+ if (ACPI_SUCCESS(s) && ret == data->hswc_params->on)
return LED_FULL;
return LED_OFF;
}
@@ -92,10 +94,11 @@ static enum led_brightness led_state_get(struct led_classdev *led)
static void led_state_update(struct work_struct *work)
{
struct asus_wireless_data *data;
+ u64 ret;
data = container_of(work, struct asus_wireless_data, led_work);
asus_wireless_method(acpi_device_handle(data->adev), "HSWC",
- data->led_state);
+ data->led_state, &ret);
}
static void led_state_set(struct led_classdev *led, enum led_brightness value)
@@ -167,6 +170,7 @@ static int asus_wireless_add(struct acpi_device *adev)
data->led.brightness_get = led_state_get;
data->led.flags = LED_CORE_SUSPENDRESUME;
data->led.max_brightness = 1;
+ data->led.default_trigger = "rfkill-none";
err = devm_led_classdev_register(&adev->dev, &data->led);
if (err)
destroy_workqueue(data->wq);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 3d523ca64694..2d6e272315a8 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -67,6 +67,7 @@ MODULE_LICENSE("GPL");
#define NOTIFY_BRNDOWN_MAX 0x2e
#define NOTIFY_KBD_BRTUP 0xc4
#define NOTIFY_KBD_BRTDWN 0xc5
+#define NOTIFY_KBD_BRTTOGGLE 0xc7
/* WMI Methods */
#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
@@ -470,6 +471,7 @@ static void kbd_led_update(struct work_struct *work)
ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
+ led_classdev_notify_brightness_hw_changed(&asus->kbd_led, asus->kbd_led_wk);
}
static int kbd_led_read(struct asus_wmi *asus, int *level, int *env)
@@ -500,15 +502,16 @@ static int kbd_led_read(struct asus_wmi *asus, int *level, int *env)
return retval;
}
-static void kbd_led_set(struct led_classdev *led_cdev,
- enum led_brightness value)
+static void do_kbd_led_set(struct led_classdev *led_cdev, int value)
{
struct asus_wmi *asus;
+ int max_level;
asus = container_of(led_cdev, struct asus_wmi, kbd_led);
+ max_level = asus->kbd_led.max_brightness;
- if (value > asus->kbd_led.max_brightness)
- value = asus->kbd_led.max_brightness;
+ if (value > max_level)
+ value = max_level;
else if (value < 0)
value = 0;
@@ -516,6 +519,12 @@ static void kbd_led_set(struct led_classdev *led_cdev,
queue_work(asus->led_workqueue, &asus->kbd_led_work);
}
+static void kbd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ do_kbd_led_set(led_cdev, value);
+}
+
static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
{
struct asus_wmi *asus;
@@ -666,6 +675,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
asus->kbd_led_wk = led_val;
asus->kbd_led.name = "asus::kbd_backlight";
+ asus->kbd_led.flags = LED_BRIGHT_HW_CHANGED;
asus->kbd_led.brightness_set = kbd_led_set;
asus->kbd_led.brightness_get = kbd_led_get;
asus->kbd_led.max_brightness = 3;
@@ -858,12 +868,6 @@ static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-static void asus_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
-{
- kfree(hotplug_slot->info);
- kfree(hotplug_slot);
-}
-
static struct hotplug_slot_ops asus_hotplug_slot_ops = {
.owner = THIS_MODULE,
.get_adapter_status = asus_get_adapter_status,
@@ -905,7 +909,6 @@ static int asus_setup_pci_hotplug(struct asus_wmi *asus)
goto error_info;
asus->hotplug_slot->private = asus;
- asus->hotplug_slot->release = &asus_cleanup_pci_hotplug;
asus->hotplug_slot->ops = &asus_hotplug_slot_ops;
asus_get_adapter_status(asus->hotplug_slot,
&asus->hotplug_slot->info->adapter_status);
@@ -1051,8 +1054,11 @@ static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
* asus_unregister_rfkill_notifier()
*/
asus_rfkill_hotplug(asus);
- if (asus->hotplug_slot)
+ if (asus->hotplug_slot) {
pci_hp_deregister(asus->hotplug_slot);
+ kfree(asus->hotplug_slot->info);
+ kfree(asus->hotplug_slot);
+ }
if (asus->hotplug_workqueue)
destroy_workqueue(asus->hotplug_workqueue);
@@ -1758,6 +1764,22 @@ static void asus_wmi_notify(u32 value, void *context)
}
}
+ if (code == NOTIFY_KBD_BRTUP) {
+ do_kbd_led_set(&asus->kbd_led, asus->kbd_led_wk + 1);
+ goto exit;
+ }
+ if (code == NOTIFY_KBD_BRTDWN) {
+ do_kbd_led_set(&asus->kbd_led, asus->kbd_led_wk - 1);
+ goto exit;
+ }
+ if (code == NOTIFY_KBD_BRTTOGGLE) {
+ if (asus->kbd_led_wk == asus->kbd_led.max_brightness)
+ do_kbd_led_set(&asus->kbd_led, 0);
+ else
+ do_kbd_led_set(&asus->kbd_led, asus->kbd_led_wk + 1);
+ goto exit;
+ }
+
if (is_display_toggle(code) &&
asus->driver->quirks->no_display_toggle)
goto exit;
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index f1fa8612db40..06978c14c83b 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2185,7 +2185,7 @@ static int __init dell_init(void)
dell_fill_request(&buffer, token->location, 0, 0, 0);
ret = dell_send_request(&buffer,
CLASS_TOKEN_READ, SELECT_TOKEN_AC);
- if (ret)
+ if (ret == 0)
max_intensity = buffer.output[3];
}
diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c
index 9dc282ed5a9e..0537d44d45a6 100644
--- a/drivers/platform/x86/dell-smbios-base.c
+++ b/drivers/platform/x86/dell-smbios-base.c
@@ -212,6 +212,12 @@ int dell_smbios_call_filter(struct device *d,
if ((buffer->cmd_class == CLASS_TOKEN_READ ||
buffer->cmd_class == CLASS_TOKEN_WRITE) &&
buffer->cmd_select < 3) {
+ /* tokens enabled ? */
+ if (!da_tokens) {
+ dev_dbg(d, "no token support on this system\n");
+ return -EINVAL;
+ }
+
/* find the matching token ID */
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].location != buffer->input[0])
@@ -315,6 +321,9 @@ struct calling_interface_token *dell_smbios_find_token(int tokenid)
{
int i;
+ if (!da_tokens)
+ return NULL;
+
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].tokenID == tokenid)
return &da_tokens[i];
@@ -565,11 +574,6 @@ static int __init dell_smbios_init(void)
dmi_walk(find_tokens, NULL);
- if (!da_tokens) {
- pr_info("Unable to find dmi tokens\n");
- return -ENODEV;
- }
-
ret = platform_driver_register(&platform_driver);
if (ret)
goto fail_platform_driver;
@@ -583,13 +587,6 @@ static int __init dell_smbios_init(void)
if (ret)
goto fail_platform_device_add;
- /* duplicate tokens will cause problems building sysfs files */
- zero_duplicates(&platform_device->dev);
-
- ret = build_tokens_sysfs(platform_device);
- if (ret)
- goto fail_create_group;
-
/* register backends */
wmi = init_dell_smbios_wmi();
if (wmi)
@@ -600,7 +597,16 @@ static int __init dell_smbios_init(void)
if (wmi && smm) {
pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n",
wmi, smm);
- goto fail_sysfs;
+ goto fail_create_group;
+ }
+
+ if (da_tokens) {
+ /* duplicate tokens will cause problems building sysfs files */
+ zero_duplicates(&platform_device->dev);
+
+ ret = build_tokens_sysfs(platform_device);
+ if (ret)
+ goto fail_sysfs;
}
return 0;
@@ -628,7 +634,8 @@ static void __exit dell_smbios_exit(void)
exit_dell_smbios_smm();
mutex_lock(&smbios_mutex);
if (platform_device) {
- free_group(platform_device);
+ if (da_tokens)
+ free_group(platform_device);
platform_device_unregister(platform_device);
platform_driver_unregister(&platform_driver);
}
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c
index e9e9da556318..97a90bebc360 100644
--- a/drivers/platform/x86/dell-smbios-smm.c
+++ b/drivers/platform/x86/dell-smbios-smm.c
@@ -24,7 +24,7 @@
static int da_command_address;
static int da_command_code;
static struct calling_interface_buffer *buffer;
-struct platform_device *platform_device;
+static struct platform_device *platform_device;
static DEFINE_MUTEX(smm_mutex);
static const struct dmi_system_id dell_device_table[] __initconst = {
@@ -82,7 +82,7 @@ static void find_cmd_address(const struct dmi_header *dm, void *dummy)
}
}
-int dell_smbios_smm_call(struct calling_interface_buffer *input)
+static int dell_smbios_smm_call(struct calling_interface_buffer *input)
{
struct smi_cmd command;
size_t size;
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index fbefedb1c172..88afe5651d24 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -82,7 +82,7 @@ static int run_smbios_call(struct wmi_device *wdev)
return 0;
}
-int dell_smbios_wmi_call(struct calling_interface_buffer *buffer)
+static int dell_smbios_wmi_call(struct calling_interface_buffer *buffer)
{
struct wmi_smbios_priv *priv;
size_t difference;
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 4c38904a8a32..a4bbf6ecd1f0 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -726,12 +726,6 @@ static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-static void eeepc_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
-{
- kfree(hotplug_slot->info);
- kfree(hotplug_slot);
-}
-
static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
.owner = THIS_MODULE,
.get_adapter_status = eeepc_get_adapter_status,
@@ -758,7 +752,6 @@ static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
goto error_info;
eeepc->hotplug_slot->private = eeepc;
- eeepc->hotplug_slot->release = &eeepc_cleanup_pci_hotplug;
eeepc->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
eeepc_get_adapter_status(eeepc->hotplug_slot,
&eeepc->hotplug_slot->info->adapter_status);
@@ -837,8 +830,11 @@ static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
eeepc->wlan_rfkill = NULL;
}
- if (eeepc->hotplug_slot)
+ if (eeepc->hotplug_slot) {
pci_hp_deregister(eeepc->hotplug_slot);
+ kfree(eeepc->hotplug_slot->info);
+ kfree(eeepc->hotplug_slot);
+ }
if (eeepc->bluetooth_rfkill) {
rfkill_unregister(eeepc->bluetooth_rfkill);
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
new file mode 100644
index 000000000000..5456581b473c
--- /dev/null
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * I2C multi-instantiate driver, pseudo driver to instantiate multiple
+ * i2c-clients from a single fwnode.
+ *
+ * Copyright 2018 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+struct i2c_inst_data {
+ const char *type;
+ int gpio_irq_idx;
+};
+
+struct i2c_multi_inst_data {
+ int num_clients;
+ struct i2c_client *clients[0];
+};
+
+static int i2c_multi_inst_probe(struct platform_device *pdev)
+{
+ struct i2c_multi_inst_data *multi;
+ const struct acpi_device_id *match;
+ const struct i2c_inst_data *inst_data;
+ struct i2c_board_info board_info = {};
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev;
+ char name[32];
+ int i, ret;
+
+ match = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!match) {
+ dev_err(dev, "Error ACPI match data is missing\n");
+ return -ENODEV;
+ }
+ inst_data = (const struct i2c_inst_data *)match->driver_data;
+
+ adev = ACPI_COMPANION(dev);
+
+ /* Count number of clients to instantiate */
+ for (i = 0; inst_data[i].type; i++) {}
+
+ multi = devm_kmalloc(dev,
+ offsetof(struct i2c_multi_inst_data, clients[i]),
+ GFP_KERNEL);
+ if (!multi)
+ return -ENOMEM;
+
+ multi->num_clients = i;
+
+ for (i = 0; i < multi->num_clients; i++) {
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, inst_data[i].type, I2C_NAME_SIZE);
+ snprintf(name, sizeof(name), "%s-%s", match->id,
+ inst_data[i].type);
+ board_info.dev_name = name;
+ board_info.irq = 0;
+ if (inst_data[i].gpio_irq_idx != -1) {
+ ret = acpi_dev_gpio_irq_get(adev,
+ inst_data[i].gpio_irq_idx);
+ if (ret < 0) {
+ dev_err(dev, "Error requesting irq at index %d: %d\n",
+ inst_data[i].gpio_irq_idx, ret);
+ goto error;
+ }
+ board_info.irq = ret;
+ }
+ multi->clients[i] = i2c_acpi_new_device(dev, i, &board_info);
+ if (!multi->clients[i]) {
+ dev_err(dev, "Error creating i2c-client, idx %d\n", i);
+ ret = -ENODEV;
+ goto error;
+ }
+ }
+
+ platform_set_drvdata(pdev, multi);
+ return 0;
+
+error:
+ while (--i >= 0)
+ i2c_unregister_device(multi->clients[i]);
+
+ return ret;
+}
+
+static int i2c_multi_inst_remove(struct platform_device *pdev)
+{
+ struct i2c_multi_inst_data *multi = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < multi->num_clients; i++)
+ i2c_unregister_device(multi->clients[i]);
+
+ return 0;
+}
+
+static const struct i2c_inst_data bsg1160_data[] = {
+ { "bmc150_accel", 0 },
+ { "bmc150_magn", -1 },
+ { "bmg160", -1 },
+ {}
+};
+
+/*
+ * Note new device-ids must also be added to i2c_multi_instantiate_ids in
+ * drivers/acpi/scan.c: acpi_device_enumeration_by_parent().
+ */
+static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
+ { "BSG1160", (unsigned long)bsg1160_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
+
+static struct platform_driver i2c_multi_inst_driver = {
+ .driver = {
+ .name = "I2C multi instantiate pseudo device driver",
+ .acpi_match_table = ACPI_PTR(i2c_multi_inst_acpi_ids),
+ },
+ .probe = i2c_multi_inst_probe,
+ .remove = i2c_multi_inst_remove,
+};
+module_platform_driver(i2c_multi_inst_driver);
+
+MODULE_DESCRIPTION("I2C multi instantiate pseudo device driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 45b7cb01f410..d4f1259ff5a2 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1133,10 +1133,17 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
- .ident = "Lenovo Legion Y520-15IKBN",
+ .ident = "Lenovo Legion Y520-15IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBN"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKB"),
+ },
+ },
+ {
+ .ident = "Lenovo Y520-15IKBM",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBM"),
},
},
{
@@ -1154,6 +1161,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo Y720-15IKBM",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKBM"),
+ },
+ },
+ {
.ident = "Lenovo Yoga 2 11 / 13 / Pro",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index b5adba227783..6cf9b7fa5bf0 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -96,13 +96,140 @@ struct intel_hid_priv {
bool wakeup_mode;
};
-static int intel_hid_set_enable(struct device *device, bool enable)
+#define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054"
+
+enum intel_hid_dsm_fn_codes {
+ INTEL_HID_DSM_FN_INVALID,
+ INTEL_HID_DSM_BTNL_FN,
+ INTEL_HID_DSM_HDMM_FN,
+ INTEL_HID_DSM_HDSM_FN,
+ INTEL_HID_DSM_HDEM_FN,
+ INTEL_HID_DSM_BTNS_FN,
+ INTEL_HID_DSM_BTNE_FN,
+ INTEL_HID_DSM_HEBC_V1_FN,
+ INTEL_HID_DSM_VGBS_FN,
+ INTEL_HID_DSM_HEBC_V2_FN,
+ INTEL_HID_DSM_FN_MAX
+};
+
+static const char *intel_hid_dsm_fn_to_method[INTEL_HID_DSM_FN_MAX] = {
+ NULL,
+ "BTNL",
+ "HDMM",
+ "HDSM",
+ "HDEM",
+ "BTNS",
+ "BTNE",
+ "HEBC",
+ "VGBS",
+ "HEBC"
+};
+
+static unsigned long long intel_hid_dsm_fn_mask;
+static guid_t intel_dsm_guid;
+
+static bool intel_hid_execute_method(acpi_handle handle,
+ enum intel_hid_dsm_fn_codes fn_index,
+ unsigned long long arg)
{
+ union acpi_object *obj, argv4, req;
acpi_status status;
+ char *method_name;
- status = acpi_execute_simple_method(ACPI_HANDLE(device), "HDSM",
- enable);
- if (ACPI_FAILURE(status)) {
+ if (fn_index <= INTEL_HID_DSM_FN_INVALID ||
+ fn_index >= INTEL_HID_DSM_FN_MAX)
+ return false;
+
+ method_name = (char *)intel_hid_dsm_fn_to_method[fn_index];
+
+ if (!(intel_hid_dsm_fn_mask & fn_index))
+ goto skip_dsm_exec;
+
+ /* All methods expects a package with one integer element */
+ req.type = ACPI_TYPE_INTEGER;
+ req.integer.value = arg;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 1;
+ argv4.package.elements = &req;
+
+ obj = acpi_evaluate_dsm(handle, &intel_dsm_guid, 1, fn_index, &argv4);
+ if (obj) {
+ acpi_handle_debug(handle, "Exec DSM Fn code: %d[%s] success\n",
+ fn_index, method_name);
+ ACPI_FREE(obj);
+ return true;
+ }
+
+skip_dsm_exec:
+ status = acpi_execute_simple_method(handle, method_name, arg);
+ if (ACPI_SUCCESS(status))
+ return true;
+
+ return false;
+}
+
+static bool intel_hid_evaluate_method(acpi_handle handle,
+ enum intel_hid_dsm_fn_codes fn_index,
+ unsigned long long *result)
+{
+ union acpi_object *obj;
+ acpi_status status;
+ char *method_name;
+
+ if (fn_index <= INTEL_HID_DSM_FN_INVALID ||
+ fn_index >= INTEL_HID_DSM_FN_MAX)
+ return false;
+
+ method_name = (char *)intel_hid_dsm_fn_to_method[fn_index];
+
+ if (!(intel_hid_dsm_fn_mask & fn_index))
+ goto skip_dsm_eval;
+
+ obj = acpi_evaluate_dsm_typed(handle, &intel_dsm_guid,
+ 1, fn_index,
+ NULL, ACPI_TYPE_INTEGER);
+ if (obj) {
+ *result = obj->integer.value;
+ acpi_handle_debug(handle,
+ "Eval DSM Fn code: %d[%s] results: 0x%llx\n",
+ fn_index, method_name, *result);
+ ACPI_FREE(obj);
+ return true;
+ }
+
+skip_dsm_eval:
+ status = acpi_evaluate_integer(handle, method_name, NULL, result);
+ if (ACPI_SUCCESS(status))
+ return true;
+
+ return false;
+}
+
+static void intel_hid_init_dsm(acpi_handle handle)
+{
+ union acpi_object *obj;
+
+ guid_parse(HID_EVENT_FILTER_UUID, &intel_dsm_guid);
+
+ obj = acpi_evaluate_dsm_typed(handle, &intel_dsm_guid, 1, 0, NULL,
+ ACPI_TYPE_BUFFER);
+ if (obj) {
+ intel_hid_dsm_fn_mask = *obj->buffer.pointer;
+ ACPI_FREE(obj);
+ }
+
+ acpi_handle_debug(handle, "intel_hid_dsm_fn_mask = %llx\n",
+ intel_hid_dsm_fn_mask);
+}
+
+static int intel_hid_set_enable(struct device *device, bool enable)
+{
+ acpi_handle handle = ACPI_HANDLE(device);
+
+ /* Enable|disable features - power button is always enabled */
+ if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN,
+ enable)) {
dev_warn(device, "failed to %sable hotkeys\n",
enable ? "en" : "dis");
return -EIO;
@@ -129,9 +256,8 @@ static void intel_button_array_enable(struct device *device, bool enable)
}
/* Enable|disable features - power button is always enabled */
- status = acpi_execute_simple_method(handle, "BTNE",
- enable ? button_cap : 1);
- if (ACPI_FAILURE(status))
+ if (!intel_hid_execute_method(handle, INTEL_HID_DSM_BTNE_FN,
+ enable ? button_cap : 1))
dev_warn(device, "failed to set button capability\n");
}
@@ -217,7 +343,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
struct platform_device *device = context;
struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
unsigned long long ev_index;
- acpi_status status;
if (priv->wakeup_mode) {
/*
@@ -269,8 +394,8 @@ wakeup:
return;
}
- status = acpi_evaluate_integer(handle, "HDEM", NULL, &ev_index);
- if (ACPI_FAILURE(status)) {
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_HDEM_FN,
+ &ev_index)) {
dev_warn(&device->dev, "failed to get event index\n");
return;
}
@@ -284,17 +409,24 @@ static bool button_array_present(struct platform_device *device)
{
acpi_handle handle = ACPI_HANDLE(&device->dev);
unsigned long long event_cap;
- acpi_status status;
- bool supported = false;
- status = acpi_evaluate_integer(handle, "HEBC", NULL, &event_cap);
- if (ACPI_SUCCESS(status) && (event_cap & 0x20000))
- supported = true;
+ if (intel_hid_evaluate_method(handle, INTEL_HID_DSM_HEBC_V2_FN,
+ &event_cap)) {
+ /* Check presence of 5 button array or v2 power button */
+ if (event_cap & 0x60000)
+ return true;
+ }
+
+ if (intel_hid_evaluate_method(handle, INTEL_HID_DSM_HEBC_V1_FN,
+ &event_cap)) {
+ if (event_cap & 0x20000)
+ return true;
+ }
if (dmi_check_system(button_array_table))
- supported = true;
+ return true;
- return supported;
+ return false;
}
static int intel_hid_probe(struct platform_device *device)
@@ -305,8 +437,9 @@ static int intel_hid_probe(struct platform_device *device)
acpi_status status;
int err;
- status = acpi_evaluate_integer(handle, "HDMM", NULL, &mode);
- if (ACPI_FAILURE(status)) {
+ intel_hid_init_dsm(handle);
+
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_HDMM_FN, &mode)) {
dev_warn(&device->dev, "failed to read mode\n");
return -ENODEV;
}
@@ -352,13 +485,16 @@ static int intel_hid_probe(struct platform_device *device)
goto err_remove_notify;
if (priv->array) {
+ unsigned long long dummy;
+
intel_button_array_enable(&device->dev, true);
/* Call button load method to enable HID power button */
- status = acpi_evaluate_object(handle, "BTNL", NULL, NULL);
- if (ACPI_FAILURE(status))
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN,
+ &dummy)) {
dev_warn(&device->dev,
"failed to enable HID power button\n");
+ }
}
device_init_wakeup(&device->dev, true);
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index c13780b8dabb..06cd7e818ed5 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -17,6 +17,7 @@
/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
#define TABLET_MODE_FLAG 0x40
+#define DOCK_MODE_FLAG 0x80
MODULE_LICENSE("GPL");
MODULE_AUTHOR("AceLan Kao");
@@ -38,6 +39,8 @@ static const struct key_entry intel_vbtn_keymap[] = {
{ KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* volume-down key release */
{ KE_KEY, 0xC8, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key press */
{ KE_KEY, 0xC9, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key release */
+ { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */
+ { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */
{ KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */
{ KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */
{ KE_END },
@@ -121,6 +124,8 @@ static void detect_tablet_mode(struct platform_device *device)
m = !(obj->integer.value & TABLET_MODE_FLAG);
input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
+ m = (obj->integer.value & DOCK_MODE_FLAG) ? 1 : 0;
+ input_report_switch(priv->input_dev, SW_DOCK, m);
out:
kfree(vgbs_output.pointer);
}
diff --git a/drivers/platform/x86/intel_bxtwc_tmu.c b/drivers/platform/x86/intel_bxtwc_tmu.c
index ea865d4ca220..227943a20212 100644
--- a/drivers/platform/x86/intel_bxtwc_tmu.c
+++ b/drivers/platform/x86/intel_bxtwc_tmu.c
@@ -19,6 +19,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/mfd/intel_soc_pmic.h>
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 014fc1634a3d..c5ece7ef08c6 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -858,10 +858,7 @@ static u16 read_mgtv(struct ips_driver *ips)
static u16 read_ptv(struct ips_driver *ips)
{
- u16 val, slope, offset;
-
- slope = (ips->pta_val & PTA_SLOPE_MASK) >> PTA_SLOPE_SHIFT;
- offset = ips->pta_val & PTA_OFFSET_MASK;
+ u16 val;
val = thm_readw(THM_PTV) & PTV_MASK;
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 43bbe74743d9..2d272a3e0176 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -196,9 +196,67 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{}
};
+static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
+ {"AUDIO_D3", BIT(0)},
+ {"OTG_D3", BIT(1)},
+ {"XHCI_D3", BIT(2)},
+ {"LPIO_D3", BIT(3)},
+ {"SDX_D3", BIT(4)},
+ {"SATA_D3", BIT(5)},
+ {"UFS0_D3", BIT(6)},
+ {"UFS1_D3", BIT(7)},
+ {"EMMC_D3", BIT(8)},
+ {}
+};
+
+static const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
+ {"SDIO_PLL_OFF", BIT(0)},
+ {"USB2_PLL_OFF", BIT(1)},
+ {"AUDIO_PLL_OFF", BIT(2)},
+ {"OC_PLL_OFF", BIT(3)},
+ {"MAIN_PLL_OFF", BIT(4)},
+ {"XOSC_OFF", BIT(5)},
+ {"LPC_CLKS_GATED", BIT(6)},
+ {"PCIE_CLKREQS_IDLE", BIT(7)},
+ {"AUDIO_ROSC_OFF", BIT(8)},
+ {"HPET_XOSC_CLK_REQ", BIT(9)},
+ {"PMC_ROSC_SLOW_CLK", BIT(10)},
+ {"AON2_ROSC_GATED", BIT(11)},
+ {"CLKACKS_DEASSERTED", BIT(12)},
+ {}
+};
+
+static const struct pmc_bit_map cnp_slps0_dbg2_map[] = {
+ {"MPHY_CORE_GATED", BIT(0)},
+ {"CSME_GATED", BIT(1)},
+ {"USB2_SUS_GATED", BIT(2)},
+ {"DYN_FLEX_IO_IDLE", BIT(3)},
+ {"GBE_NO_LINK", BIT(4)},
+ {"THERM_SEN_DISABLED", BIT(5)},
+ {"PCIE_LOW_POWER", BIT(6)},
+ {"ISH_VNNAON_REQ_ACT", BIT(7)},
+ {"ISH_VNN_REQ_ACT", BIT(8)},
+ {"CNV_VNNAON_REQ_ACT", BIT(9)},
+ {"CNV_VNN_REQ_ACT", BIT(10)},
+ {"NPK_VNNON_REQ_ACT", BIT(11)},
+ {"PMSYNC_STATE_IDLE", BIT(12)},
+ {"ALST_GT_THRES", BIT(13)},
+ {"PMC_ARC_PG_READY", BIT(14)},
+ {}
+};
+
+static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
+ cnp_slps0_dbg0_map,
+ cnp_slps0_dbg1_map,
+ cnp_slps0_dbg2_map,
+ NULL,
+};
+
static const struct pmc_reg_map cnp_reg_map = {
.pfear_sts = cnp_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
.regmap_length = CNP_PMC_MMIO_REG_LEN,
.ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
@@ -252,6 +310,8 @@ static int pmc_core_check_read_lock_bit(void)
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
+static bool slps0_dbg_latch;
+
static void pmc_core_display_map(struct seq_file *s, int index,
u8 pf_reg, const struct pmc_bit_map *pf_map)
{
@@ -481,6 +541,57 @@ static const struct file_operations pmc_core_ltr_ignore_ops = {
.release = single_release,
};
+static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
+{
+ const struct pmc_reg_map *map = pmcdev->map;
+ u32 fd;
+
+ mutex_lock(&pmcdev->lock);
+
+ if (!reset && !slps0_dbg_latch)
+ goto out_unlock;
+
+ fd = pmc_core_reg_read(pmcdev, map->slps0_dbg_offset);
+ if (reset)
+ fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
+ else
+ fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
+ pmc_core_reg_write(pmcdev, map->slps0_dbg_offset, fd);
+
+ slps0_dbg_latch = 0;
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+}
+
+static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
+ const struct pmc_bit_map *map;
+ int offset;
+ u32 data;
+
+ pmc_core_slps0_dbg_latch(pmcdev, false);
+ offset = pmcdev->map->slps0_dbg_offset;
+ while (*maps) {
+ map = *maps;
+ data = pmc_core_reg_read(pmcdev, offset);
+ offset += 4;
+ while (map->name) {
+ seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
+ map->name,
+ data & map->bit_mask ?
+ "Yes" : "No");
+ ++map;
+ }
+ ++maps;
+ }
+ pmc_core_slps0_dbg_latch(pmcdev, true);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
+
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
debugfs_remove_recursive(pmcdev->dbgfs_dir);
@@ -514,6 +625,15 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
0444, dir, pmcdev,
&pmc_core_mphy_pg_ops);
+ if (pmcdev->map->slps0_dbg_maps) {
+ debugfs_create_file("slp_s0_debug_status", 0444,
+ dir, pmcdev,
+ &pmc_core_slps0_dbg_fops);
+
+ debugfs_create_bool("slp_s0_dbg_latch", 0644,
+ dir, &slps0_dbg_latch);
+ }
+
return 0;
}
#else
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index 5fa5f97870aa..93a7e99e1f8b 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -127,12 +127,14 @@ enum ppfear_regs {
#define CNP_PMC_SLP_S0_RES_COUNTER_OFFSET 0x193C
#define CNP_PMC_LTR_IGNORE_OFFSET 0x1B0C
#define CNP_PMC_PM_CFG_OFFSET 0x1818
+#define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4
/* Cannonlake: PGD PFET Enable Ack Status Register(s) start */
#define CNP_PMC_HOST_PPFEAR0A 0x1D90
#define CNP_PMC_MMIO_REG_LEN 0x2000
#define CNP_PPFEAR_NUM_ENTRIES 8
#define CNP_PMC_READ_DISABLE_BIT 22
+#define CNP_PMC_LATCH_SLPS0_EVENTS BIT(31)
struct pmc_bit_map {
const char *name;
@@ -145,6 +147,7 @@ struct pmc_bit_map {
* @pfear_sts: Maps name of IP block to PPFEAR* bit
* @mphy_sts: Maps name of MPHY lane to MPHY status lane status bit
* @pll_sts: Maps name of PLL to corresponding bit status
+ * @slps0_dbg_maps: Array of SLP_S0_DBG* registers containing debug info
* @slp_s0_offset: PWRMBASE offset to read SLP_S0 residency
* @ltr_ignore_offset: PWRMBASE offset to read/write LTR ignore bit
* @regmap_length: Length of memory to map from PWRMBASE address to access
@@ -153,6 +156,7 @@ struct pmc_bit_map {
* PPFEAR
* @pm_cfg_offset: PWRMBASE offset to PM_CFG register
* @pm_read_disable_bit: Bit index to read PMC_READ_DISABLE
+ * @slps0_dbg_offset: PWRMBASE offset to SLP_S0_DEBUG_REG*
*
* Each PCH has unique set of register offsets and bit indexes. This structure
* captures them to have a common implementation.
@@ -161,6 +165,7 @@ struct pmc_reg_map {
const struct pmc_bit_map *pfear_sts;
const struct pmc_bit_map *mphy_sts;
const struct pmc_bit_map *pll_sts;
+ const struct pmc_bit_map **slps0_dbg_maps;
const u32 slp_s0_offset;
const u32 ltr_ignore_offset;
const int regmap_length;
@@ -168,6 +173,7 @@ struct pmc_reg_map {
const int ppfear_buckets;
const u32 pm_cfg_offset;
const int pm_read_disable_bit;
+ const u32 slps0_dbg_offset;
};
/**
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index b5b890127479..2efeab650345 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -12,11 +12,13 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <asm/intel_punit_ipc.h>
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index a0fd9aa6d932..d89936c93ba0 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -47,15 +47,26 @@
/* LPC bus IO offsets */
#define MLXPLAT_CPLD_LPC_I2C_BASE_ADRR 0x2000
#define MLXPLAT_CPLD_LPC_REG_BASE_ADRR 0x2500
+#define MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET 0x00
+#define MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET 0x01
+#define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d
#define MLXPLAT_CPLD_LPC_REG_LED1_OFFSET 0x20
#define MLXPLAT_CPLD_LPC_REG_LED2_OFFSET 0x21
#define MLXPLAT_CPLD_LPC_REG_LED3_OFFSET 0x22
#define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23
#define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24
+#define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30
+#define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31
+#define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32
+#define MLXPLAT_CPLD_LPC_REG_WP2_OFFSET 0x33
+#define MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET 0x37
#define MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET 0x3a
#define MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET 0x3b
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET 0x40
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41
+#define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
+#define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
+#define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52
#define MLXPLAT_CPLD_LPC_REG_PSU_OFFSET 0x58
#define MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET 0x59
#define MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET 0x5a
@@ -65,9 +76,23 @@
#define MLXPLAT_CPLD_LPC_REG_FAN_OFFSET 0x88
#define MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET 0x89
#define MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET 0x8a
+#define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3
+#define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4
+#define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5
+#define MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET 0xe6
+#define MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET 0xe7
+#define MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET 0xe8
+#define MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET 0xe9
+#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xea
+#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xeb
+#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xec
+#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xed
+#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xee
+#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xef
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
+
#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
#define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
MLXPLAT_CPLD_LPC_I2C_CH1_OFF) | \
@@ -77,17 +102,20 @@
MLXPLAT_CPLD_LPC_PIO_OFFSET)
/* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */
+#define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF 0x04
#define MLXPLAT_CPLD_AGGR_PSU_MASK_DEF 0x08
#define MLXPLAT_CPLD_AGGR_PWR_MASK_DEF 0x08
#define MLXPLAT_CPLD_AGGR_FAN_MASK_DEF 0x40
-#define MLXPLAT_CPLD_AGGR_MASK_DEF (MLXPLAT_CPLD_AGGR_PSU_MASK_DEF | \
+#define MLXPLAT_CPLD_AGGR_MASK_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \
+ MLXPLAT_CPLD_AGGR_PSU_MASK_DEF | \
MLXPLAT_CPLD_AGGR_FAN_MASK_DEF)
+#define MLXPLAT_CPLD_AGGR_ASIC_MASK_NG 0x01
#define MLXPLAT_CPLD_AGGR_MASK_NG_DEF 0x04
-#define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc0
-#define MLXPLAT_CPLD_AGGR_MASK_MSN21XX 0x04
+#define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc1
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(5, 0)
#define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
@@ -122,12 +150,16 @@
* @pdev_mux - array of mux platform devices
* @pdev_hotplug - hotplug platform devices
* @pdev_led - led platform devices
+ * @pdev_io_regs - register access platform devices
+ * @pdev_fan - FAN platform devices
*/
struct mlxplat_priv {
struct platform_device *pdev_i2c;
struct platform_device *pdev_mux[MLXPLAT_CPLD_LPC_MUX_DEVS];
struct platform_device *pdev_hotplug;
struct platform_device *pdev_led;
+ struct platform_device *pdev_io_regs;
+ struct platform_device *pdev_fan;
};
/* Regions for LPC I2C controller and LPC base register space */
@@ -288,6 +320,15 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_items_data[] = {
},
};
+static struct mlxreg_core_data mlxplat_mlxcpld_default_asic_items_data[] = {
+ {
+ .label = "asic1",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
{
.data = mlxplat_mlxcpld_default_psu_items_data,
@@ -316,6 +357,15 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
.inversed = 1,
.health = false,
},
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
};
static
@@ -324,6 +374,8 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = {
@@ -352,6 +404,15 @@ static struct mlxreg_core_item mlxplat_mlxcpld_msn21xx_items[] = {
.inversed = 0,
.health = false,
},
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
};
static
@@ -454,6 +515,15 @@ static struct mlxreg_core_item mlxplat_mlxcpld_msn274x_items[] = {
.inversed = 1,
.health = false,
},
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
};
static
@@ -492,6 +562,15 @@ static struct mlxreg_core_item mlxplat_mlxcpld_msn201x_items[] = {
.inversed = 0,
.health = false,
},
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
};
static
@@ -589,6 +668,15 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_ng_items[] = {
.inversed = 1,
.health = false,
},
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
};
static
@@ -813,6 +901,278 @@ static struct mlxreg_core_platform_data mlxplat_default_ng_led_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data),
};
+/* Platform register access default */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_long_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_short_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_main_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_fw_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_hotswap_or_wd",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_asic_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "psu1_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "psu2_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "select_iio",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "asic_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_regs_io_data = {
+ .data = mlxplat_mlxcpld_default_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_regs_io_data),
+};
+
+/* Platform register access MSN21xx, MSN201x, MSN274x systems families data */
+static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_long_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_short_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_main_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_asic_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_hotswap_or_halt",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "psu1_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "psu2_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "asic_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_msn21xx_regs_io_data = {
+ .data = mlxplat_mlxcpld_msn21xx_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_regs_io_data),
+};
+
+/* Platform FAN default */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
+ {
+ .label = "pwm1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET,
+ },
+ {
+ .label = "tacho1",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET,
+ .mask = GENMASK(7, 0),
+ },
+ {
+ .label = "tacho2",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET,
+ .mask = GENMASK(7, 0),
+ },
+ {
+ .label = "tacho3",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET,
+ .mask = GENMASK(7, 0),
+ },
+ {
+ .label = "tacho4",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET,
+ .mask = GENMASK(7, 0),
+ },
+ {
+ .label = "tacho5",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET,
+ .mask = GENMASK(7, 0),
+ },
+ {
+ .label = "tacho6",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET,
+ .mask = GENMASK(7, 0),
+ },
+ {
+ .label = "tacho7",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET,
+ .mask = GENMASK(7, 0),
+ },
+ {
+ .label = "tacho8",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET,
+ .mask = GENMASK(7, 0),
+ },
+ {
+ .label = "tacho9",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET,
+ .mask = GENMASK(7, 0),
+ },
+ {
+ .label = "tacho10",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET,
+ .mask = GENMASK(7, 0),
+ },
+ {
+ .label = "tacho11",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET,
+ .mask = GENMASK(7, 0),
+ },
+ {
+ .label = "tacho12",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET,
+ .mask = GENMASK(7, 0),
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_fan_data = {
+ .data = mlxplat_mlxcpld_default_fan_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_data),
+};
static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
{
@@ -822,14 +1182,22 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
return true;
}
return false;
@@ -838,15 +1206,25 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_WP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
@@ -856,6 +1234,20 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
return true;
}
return false;
@@ -864,15 +1256,23 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
@@ -882,11 +1282,31 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
return true;
}
return false;
}
+static const struct reg_default mlxplat_mlxcpld_regmap_default[] = {
+ { MLXPLAT_CPLD_LPC_REG_WP1_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WP2_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+};
+
struct mlxplat_mlxcpld_regmap_context {
void __iomem *base;
};
@@ -919,6 +1339,8 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config = {
.writeable_reg = mlxplat_mlxcpld_writeable_reg,
.readable_reg = mlxplat_mlxcpld_readable_reg,
.volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_default,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_default),
.reg_read = mlxplat_mlxcpld_reg_read,
.reg_write = mlxplat_mlxcpld_reg_write,
};
@@ -930,6 +1352,8 @@ static struct resource mlxplat_mlxcpld_resources[] = {
static struct platform_device *mlxplat_dev;
static struct mlxreg_core_hotplug_platform_data *mlxplat_hotplug;
static struct mlxreg_core_platform_data *mlxplat_led;
+static struct mlxreg_core_platform_data *mlxplat_regs_io;
+static struct mlxreg_core_platform_data *mlxplat_fan;
static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
{
@@ -944,6 +1368,7 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
mlxplat_hotplug->deferred_nr =
mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_led_data;
+ mlxplat_regs_io = &mlxplat_default_regs_io_data;
return 1;
};
@@ -961,6 +1386,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_msn21xx_led_data;
+ mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
return 1;
};
@@ -978,6 +1404,7 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_led_data;
+ mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
return 1;
};
@@ -995,6 +1422,7 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
mlxplat_hotplug->deferred_nr =
mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_ng_led_data;
+ mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
return 1;
};
@@ -1012,6 +1440,7 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_msn21xx_led_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
return 1;
};
@@ -1163,7 +1592,7 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
static int __init mlxplat_init(void)
{
struct mlxplat_priv *priv;
- int i, nr, err;
+ int i, j, nr, err;
if (!dmi_check_system(mlxplat_dmi_table))
return -ENODEV;
@@ -1233,6 +1662,15 @@ static int __init mlxplat_init(void)
goto fail_platform_mux_register;
}
+ /* Set default registers. */
+ for (j = 0; j < mlxplat_mlxcpld_regmap_config.num_reg_defaults; j++) {
+ err = regmap_write(mlxplat_hotplug->regmap,
+ mlxplat_mlxcpld_regmap_default[j].reg,
+ mlxplat_mlxcpld_regmap_default[j].def);
+ if (err)
+ goto fail_platform_mux_register;
+ }
+
/* Add LED driver. */
mlxplat_led->regmap = mlxplat_hotplug->regmap;
priv->pdev_led = platform_device_register_resndata(
@@ -1244,14 +1682,48 @@ static int __init mlxplat_init(void)
goto fail_platform_hotplug_register;
}
+ /* Add registers io access driver. */
+ if (mlxplat_regs_io) {
+ mlxplat_regs_io->regmap = mlxplat_hotplug->regmap;
+ priv->pdev_io_regs = platform_device_register_resndata(
+ &mlxplat_dev->dev, "mlxreg-io",
+ PLATFORM_DEVID_NONE, NULL, 0,
+ mlxplat_regs_io,
+ sizeof(*mlxplat_regs_io));
+ if (IS_ERR(priv->pdev_io_regs)) {
+ err = PTR_ERR(priv->pdev_io_regs);
+ goto fail_platform_led_register;
+ }
+ }
+
+ /* Add FAN driver. */
+ if (mlxplat_fan) {
+ mlxplat_fan->regmap = mlxplat_hotplug->regmap;
+ priv->pdev_fan = platform_device_register_resndata(
+ &mlxplat_dev->dev, "mlxreg-fan",
+ PLATFORM_DEVID_NONE, NULL, 0,
+ mlxplat_fan,
+ sizeof(*mlxplat_fan));
+ if (IS_ERR(priv->pdev_fan)) {
+ err = PTR_ERR(priv->pdev_fan);
+ goto fail_platform_io_regs_register;
+ }
+ }
+
/* Sync registers with hardware. */
regcache_mark_dirty(mlxplat_hotplug->regmap);
err = regcache_sync(mlxplat_hotplug->regmap);
if (err)
- goto fail_platform_led_register;
+ goto fail_platform_fan_register;
return 0;
+fail_platform_fan_register:
+ if (mlxplat_fan)
+ platform_device_unregister(priv->pdev_fan);
+fail_platform_io_regs_register:
+ if (mlxplat_regs_io)
+ platform_device_unregister(priv->pdev_io_regs);
fail_platform_led_register:
platform_device_unregister(priv->pdev_led);
fail_platform_hotplug_register:
@@ -1272,6 +1744,10 @@ static void __exit mlxplat_exit(void)
struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
int i;
+ if (priv->pdev_fan)
+ platform_device_unregister(priv->pdev_fan);
+ if (priv->pdev_io_regs)
+ platform_device_unregister(priv->pdev_io_regs);
platform_device_unregister(priv->pdev_led);
platform_device_unregister(priv->pdev_hotplug);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index cae9b0595692..fde08a997557 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -57,6 +57,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/delay.h>
@@ -335,6 +336,7 @@ static struct {
u32 second_fan:1;
u32 beep_needs_two_args:1;
u32 mixer_no_level_control:1;
+ u32 battery_force_primary:1;
u32 input_device_registered:1;
u32 platform_drv_registered:1;
u32 platform_drv_attrs_registered:1;
@@ -343,7 +345,6 @@ static struct {
u32 sensors_pdev_attrs_registered:1;
u32 hotkey_poll_active:1;
u32 has_adaptive_kbd:1;
- u32 battery:1;
} tp_features;
static struct {
@@ -358,9 +359,9 @@ struct thinkpad_id_data {
char *bios_version_str; /* Something like 1ZET51WW (1.03z) */
char *ec_version_str; /* Something like 1ZHT51WW-1.04a */
- u16 bios_model; /* 1Y = 0x5931, 0 = unknown */
- u16 ec_model;
- u16 bios_release; /* 1ZETK1WW = 0x314b, 0 = unknown */
+ u32 bios_model; /* 1Y = 0x3159, 0 = unknown */
+ u32 ec_model;
+ u16 bios_release; /* 1ZETK1WW = 0x4b31, 0 = unknown */
u16 ec_release;
char *model_str; /* ThinkPad T43 */
@@ -444,17 +445,20 @@ do { \
/*
* Quirk handling helpers
*
- * ThinkPad IDs and versions seen in the field so far
- * are two-characters from the set [0-9A-Z], i.e. base 36.
+ * ThinkPad IDs and versions seen in the field so far are
+ * two or three characters from the set [0-9A-Z], i.e. base 36.
*
* We use values well outside that range as specials.
*/
-#define TPACPI_MATCH_ANY 0xffffU
+#define TPACPI_MATCH_ANY 0xffffffffU
+#define TPACPI_MATCH_ANY_VERSION 0xffffU
#define TPACPI_MATCH_UNKNOWN 0U
-/* TPID('1', 'Y') == 0x5931 */
-#define TPID(__c1, __c2) (((__c2) << 8) | (__c1))
+/* TPID('1', 'Y') == 0x3159 */
+#define TPID(__c1, __c2) (((__c1) << 8) | (__c2))
+#define TPID3(__c1, __c2, __c3) (((__c1) << 16) | ((__c2) << 8) | (__c3))
+#define TPVER TPID
#define TPACPI_Q_IBM(__id1, __id2, __quirk) \
{ .vendor = PCI_VENDOR_ID_IBM, \
@@ -468,6 +472,12 @@ do { \
.ec = TPACPI_MATCH_ANY, \
.quirks = (__quirk) }
+#define TPACPI_Q_LNV3(__id1, __id2, __id3, __quirk) \
+ { .vendor = PCI_VENDOR_ID_LENOVO, \
+ .bios = TPID3(__id1, __id2, __id3), \
+ .ec = TPACPI_MATCH_ANY, \
+ .quirks = (__quirk) }
+
#define TPACPI_QEC_LNV(__id1, __id2, __quirk) \
{ .vendor = PCI_VENDOR_ID_LENOVO, \
.bios = TPACPI_MATCH_ANY, \
@@ -476,8 +486,8 @@ do { \
struct tpacpi_quirk {
unsigned int vendor;
- u16 bios;
- u16 ec;
+ u32 bios;
+ u32 ec;
unsigned long quirks;
};
@@ -1647,16 +1657,16 @@ static void tpacpi_remove_driver_attributes(struct device_driver *drv)
{ .vendor = (__v), \
.bios = TPID(__id1, __id2), \
.ec = TPACPI_MATCH_ANY, \
- .quirks = TPACPI_MATCH_ANY << 16 \
- | (__bv1) << 8 | (__bv2) }
+ .quirks = TPACPI_MATCH_ANY_VERSION << 16 \
+ | TPVER(__bv1, __bv2) }
#define TPV_Q_X(__v, __bid1, __bid2, __bv1, __bv2, \
__eid, __ev1, __ev2) \
{ .vendor = (__v), \
.bios = TPID(__bid1, __bid2), \
.ec = __eid, \
- .quirks = (__ev1) << 24 | (__ev2) << 16 \
- | (__bv1) << 8 | (__bv2) }
+ .quirks = TPVER(__ev1, __ev2) << 16 \
+ | TPVER(__bv1, __bv2) }
#define TPV_QI0(__id1, __id2, __bv1, __bv2) \
TPV_Q(PCI_VENDOR_ID_IBM, __id1, __id2, __bv1, __bv2)
@@ -1798,7 +1808,7 @@ static void __init tpacpi_check_outdated_fw(void)
/* note that unknown versions are set to 0x0000 and we use that */
if ((bios_version > thinkpad_id.bios_release) ||
(ec_version > thinkpad_id.ec_release &&
- ec_version != TPACPI_MATCH_ANY)) {
+ ec_version != TPACPI_MATCH_ANY_VERSION)) {
/*
* The changelogs would let us track down the exact
* reason, but it is just too much of a pain to track
@@ -1928,7 +1938,7 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
/* first new observed key (star, favorites) is 0x1311 */
TP_ACPI_HOTKEYSCAN_STAR = 69,
TP_ACPI_HOTKEYSCAN_CLIPPING_TOOL2,
- TP_ACPI_HOTKEYSCAN_UNK25,
+ TP_ACPI_HOTKEYSCAN_CALCULATOR,
TP_ACPI_HOTKEYSCAN_BLUETOOTH,
TP_ACPI_HOTKEYSCAN_KEYBOARD,
@@ -3449,7 +3459,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_FAVORITES, /* Favorite app, 0x311 */
KEY_RESERVED, /* Clipping tool */
- KEY_RESERVED,
+ KEY_CALC, /* Calculator (above numpad, P52) */
KEY_BLUETOOTH, /* Bluetooth */
KEY_KEYBOARD /* Keyboard, 0x315 */
},
@@ -9365,7 +9375,9 @@ static int tpacpi_battery_probe(int battery)
{
int ret = 0;
- memset(&battery_info, 0, sizeof(struct tpacpi_battery_driver_data));
+ memset(&battery_info.batteries[battery], 0,
+ sizeof(battery_info.batteries[battery]));
+
/*
* 1) Get the current start threshold
* 2) Check for support
@@ -9420,7 +9432,8 @@ static int tpacpi_battery_probe(int battery)
static int tpacpi_battery_get_id(const char *battery_name)
{
- if (strcmp(battery_name, "BAT0") == 0)
+ if (strcmp(battery_name, "BAT0") == 0 ||
+ tp_features.battery_force_primary)
return BAT_PRIMARY;
if (strcmp(battery_name, "BAT1") == 0)
return BAT_SECONDARY;
@@ -9596,8 +9609,26 @@ static struct acpi_battery_hook battery_hook = {
/* Subdriver init/exit */
+static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
+ /*
+ * Individual addressing is broken on models that expose the
+ * primary battery as BAT1.
+ */
+ TPACPI_Q_LNV('J', '7', true), /* B5400 */
+ TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */
+ TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
+ TPACPI_Q_LNV3('R', '0', 'C', true), /* Thinkpad 13 */
+ TPACPI_Q_LNV3('R', '0', 'J', true), /* Thinkpad 13 gen 2 */
+};
+
static int __init tpacpi_battery_init(struct ibm_init_struct *ibm)
{
+ memset(&battery_info, 0, sizeof(battery_info));
+
+ tp_features.battery_force_primary = tpacpi_check_quirks(
+ battery_quirk_table,
+ ARRAY_SIZE(battery_quirk_table));
+
battery_hook_register(&battery_hook);
return 0;
}
@@ -9808,36 +9839,37 @@ err_out:
/* Probing */
-static bool __pure __init tpacpi_is_fw_digit(const char c)
+static char __init tpacpi_parse_fw_id(const char * const s,
+ u32 *model, u16 *release)
{
- return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z');
-}
+ int i;
+
+ if (!s || strlen(s) < 8)
+ goto invalid;
+
+ for (i = 0; i < 8; i++)
+ if (!((s[i] >= '0' && s[i] <= '9') ||
+ (s[i] >= 'A' && s[i] <= 'Z')))
+ goto invalid;
-static bool __pure __init tpacpi_is_valid_fw_id(const char * const s,
- const char t)
-{
/*
* Most models: xxyTkkWW (#.##c)
* Ancient 570/600 and -SL lacks (#.##c)
*/
- if (s && strlen(s) >= 8 &&
- tpacpi_is_fw_digit(s[0]) &&
- tpacpi_is_fw_digit(s[1]) &&
- s[2] == t &&
- (s[3] == 'T' || s[3] == 'N') &&
- tpacpi_is_fw_digit(s[4]) &&
- tpacpi_is_fw_digit(s[5]))
- return true;
+ if (s[3] == 'T' || s[3] == 'N') {
+ *model = TPID(s[0], s[1]);
+ *release = TPVER(s[4], s[5]);
+ return s[2];
/* New models: xxxyTkkW (#.##c); T550 and some others */
- return s && strlen(s) >= 8 &&
- tpacpi_is_fw_digit(s[0]) &&
- tpacpi_is_fw_digit(s[1]) &&
- tpacpi_is_fw_digit(s[2]) &&
- s[3] == t &&
- (s[4] == 'T' || s[4] == 'N') &&
- tpacpi_is_fw_digit(s[5]) &&
- tpacpi_is_fw_digit(s[6]);
+ } else if (s[4] == 'T' || s[4] == 'N') {
+ *model = TPID3(s[0], s[1], s[2]);
+ *release = TPVER(s[5], s[6]);
+ return s[3];
+ }
+
+invalid:
+ return '\0';
}
/* returns 0 - probe ok, or < 0 - probe error.
@@ -9849,6 +9881,7 @@ static int __must_check __init get_thinkpad_model_data(
const struct dmi_device *dev = NULL;
char ec_fw_string[18];
char const *s;
+ char t;
if (!tp)
return -EINVAL;
@@ -9868,15 +9901,11 @@ static int __must_check __init get_thinkpad_model_data(
return -ENOMEM;
/* Really ancient ThinkPad 240X will fail this, which is fine */
- if (!(tpacpi_is_valid_fw_id(tp->bios_version_str, 'E') ||
- tpacpi_is_valid_fw_id(tp->bios_version_str, 'C')))
+ t = tpacpi_parse_fw_id(tp->bios_version_str,
+ &tp->bios_model, &tp->bios_release);
+ if (t != 'E' && t != 'C')
return 0;
- tp->bios_model = tp->bios_version_str[0]
- | (tp->bios_version_str[1] << 8);
- tp->bios_release = (tp->bios_version_str[4] << 8)
- | tp->bios_version_str[5];
-
/*
* ThinkPad T23 or newer, A31 or newer, R50e or newer,
* X32 or newer, all Z series; Some models must have an
@@ -9895,12 +9924,9 @@ static int __must_check __init get_thinkpad_model_data(
if (!tp->ec_version_str)
return -ENOMEM;
- if (tpacpi_is_valid_fw_id(ec_fw_string, 'H')) {
- tp->ec_model = ec_fw_string[0]
- | (ec_fw_string[1] << 8);
- tp->ec_release = (ec_fw_string[4] << 8)
- | ec_fw_string[5];
- } else {
+ t = tpacpi_parse_fw_id(ec_fw_string,
+ &tp->ec_model, &tp->ec_release);
+ if (t != 'H') {
pr_notice("ThinkPad firmware release %s doesn't match the known patterns\n",
ec_fw_string);
pr_notice("please report this to %s\n",
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index eef76bfa5d73..e366977bda41 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -34,6 +34,7 @@
#define TOSHIBA_ACPI_VERSION "0.24"
#define PROC_INTERFACE_VERSION 1
+#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -1682,7 +1683,7 @@ static const struct file_operations keys_proc_fops = {
.write = keys_proc_write,
};
-static int version_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused version_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "driver: %s\n", TOSHIBA_ACPI_VERSION);
seq_printf(m, "proc_interface: %d\n", PROC_INTERFACE_VERSION);
@@ -1836,6 +1837,7 @@ static ssize_t kbd_backlight_mode_store(struct device *dev,
return ret;
toshiba->kbd_mode = mode;
+ toshiba_acpi->kbd_mode = mode;
/*
* Some laptop models with the second generation backlit
@@ -1852,7 +1854,7 @@ static ssize_t kbd_backlight_mode_store(struct device *dev,
* event via genetlink.
*/
if (toshiba->kbd_type == 2 &&
- !toshiba_acpi->kbd_event_generated)
+ !toshiba->kbd_event_generated)
schedule_work(&kbd_bl_work);
}
@@ -2413,16 +2415,21 @@ static const struct attribute_group toshiba_attr_group = {
static void toshiba_acpi_kbd_bl_work(struct work_struct *work)
{
- struct acpi_device *acpi_dev = toshiba_acpi->acpi_dev;
-
/* Update the sysfs entries */
- if (sysfs_update_group(&acpi_dev->dev.kobj,
+ if (sysfs_update_group(&toshiba_acpi->acpi_dev->dev.kobj,
&toshiba_attr_group))
pr_err("Unable to update sysfs entries\n");
+ /* Notify LED subsystem about keyboard backlight change */
+ if (toshiba_acpi->kbd_type == 2 &&
+ toshiba_acpi->kbd_mode != SCI_KBD_MODE_AUTO)
+ led_classdev_notify_brightness_hw_changed(&toshiba_acpi->kbd_led,
+ (toshiba_acpi->kbd_mode == SCI_KBD_MODE_ON) ?
+ LED_FULL : LED_OFF);
+
/* Emulate the keyboard backlight event */
- acpi_bus_generate_netlink_event(acpi_dev->pnp.device_class,
- dev_name(&acpi_dev->dev),
+ acpi_bus_generate_netlink_event(toshiba_acpi->acpi_dev->pnp.device_class,
+ dev_name(&toshiba_acpi->acpi_dev->dev),
0x92, 0);
}
@@ -3119,9 +3126,12 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
/*
* Only register the LED if KBD illumination is supported
* and the keyboard backlight operation mode is set to FN-Z
+ * or we detect a second gen keyboard backlight
*/
- if (dev->kbd_illum_supported && dev->kbd_mode == SCI_KBD_MODE_FNZ) {
+ if (dev->kbd_illum_supported &&
+ (dev->kbd_mode == SCI_KBD_MODE_FNZ || dev->kbd_type == 2)) {
dev->kbd_led.name = "toshiba::kbd_backlight";
+ dev->kbd_led.flags = LED_BRIGHT_HW_CHANGED;
dev->kbd_led.max_brightness = 1;
dev->kbd_led.brightness_set = toshiba_kbd_backlight_set;
dev->kbd_led.brightness_get = toshiba_kbd_backlight_get;
@@ -3237,11 +3247,16 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
pr_info("SATA power event received %x\n", event);
break;
case 0x92: /* Keyboard backlight mode changed */
- toshiba_acpi->kbd_event_generated = true;
+ dev->kbd_event_generated = true;
/* Update sysfs entries */
if (sysfs_update_group(&acpi_dev->dev.kobj,
&toshiba_attr_group))
pr_err("Unable to update sysfs entries\n");
+ /* Notify LED subsystem about keyboard backlight change */
+ if (dev->kbd_type == 2 && dev->kbd_mode != SCI_KBD_MODE_AUTO)
+ led_classdev_notify_brightness_hw_changed(&dev->kbd_led,
+ (dev->kbd_mode == SCI_KBD_MODE_ON) ?
+ LED_FULL : LED_OFF);
break;
case 0x85: /* Unknown */
case 0x8d: /* Unknown */
diff --git a/drivers/platform/x86/silead_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 853a7ce4601c..cb204f973491 100644
--- a/drivers/platform/x86/silead_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -1,5 +1,5 @@
/*
- * Silead touchscreen driver DMI based configuration code
+ * Touchscreen driver DMI based configuration code
*
* Copyright (c) 2017 Red Hat Inc.
*
@@ -20,95 +20,147 @@
#include <linux/property.h>
#include <linux/string.h>
-struct silead_ts_dmi_data {
+struct ts_dmi_data {
const char *acpi_name;
const struct property_entry *properties;
};
-static const struct property_entry cube_iwork8_air_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1660),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 900),
+/* NOTE: Please keep all entries sorted alphabetically */
+
+static const struct property_entry chuwi_hi8_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-cube-iwork8-air.fw"),
- PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi8.fw"),
{ }
};
-static const struct silead_ts_dmi_data cube_iwork8_air_data = {
+static const struct ts_dmi_data chuwi_hi8_data = {
+ .acpi_name = "MSSL0001:00",
+ .properties = chuwi_hi8_props,
+};
+
+static const struct property_entry chuwi_hi8_pro_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hi8_pro_data = {
.acpi_name = "MSSL1680:00",
- .properties = cube_iwork8_air_props,
+ .properties = chuwi_hi8_pro_props,
};
-static const struct property_entry jumper_ezpad_mini3_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1700),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
+static const struct property_entry chuwi_vi8_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1724),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-jumper-ezpad-mini3.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-vi8.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data jumper_ezpad_mini3_data = {
- .acpi_name = "MSSL1680:00",
- .properties = jumper_ezpad_mini3_props,
+static const struct ts_dmi_data chuwi_vi8_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_vi8_props,
};
-static const struct property_entry jumper_ezpad_6_pro_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
- PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro.fw"),
+static const struct property_entry chuwi_vi10_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 0),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1858),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-vi10.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data jumper_ezpad_6_pro_data = {
+static const struct ts_dmi_data chuwi_vi10_data = {
+ .acpi_name = "MSSL0002:00",
+ .properties = chuwi_vi10_props,
+};
+
+static const struct property_entry connect_tablet9_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 9),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1664),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 878),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-connect-tablet9.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data connect_tablet9_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = connect_tablet9_props,
+};
+
+static const struct property_entry cube_iwork8_air_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1660),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 900),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-cube-iwork8-air.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data cube_iwork8_air_data = {
.acpi_name = "MSSL1680:00",
- .properties = jumper_ezpad_6_pro_props,
+ .properties = cube_iwork8_air_props,
};
-static const struct property_entry dexp_ursus_7w_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 890),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 630),
- PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"),
+static const struct property_entry cube_knote_i1101_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 22),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1961),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1513),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-cube-knote-i1101.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data dexp_ursus_7w_data = {
+static const struct ts_dmi_data cube_knote_i1101_data = {
.acpi_name = "MSSL1680:00",
- .properties = dexp_ursus_7w_props,
+ .properties = cube_knote_i1101_props,
};
-static const struct property_entry surftab_twin_10_1_st10432_8_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1900),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
- PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3670-surftab-twin-10-1-st10432-8.fw"),
+static const struct property_entry dexp_ursus_7w_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 890),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 630),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data surftab_twin_10_1_st10432_8_data = {
+static const struct ts_dmi_data dexp_ursus_7w_data = {
.acpi_name = "MSSL1680:00",
- .properties = surftab_twin_10_1_st10432_8_props,
+ .properties = dexp_ursus_7w_props,
};
-static const struct property_entry surftab_wintron70_st70416_6_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
+static const struct property_entry digma_citi_e200_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-surftab-wintron70-st70416-6.fw"),
+ "gsl1686-digma_citi_e200.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data surftab_wintron70_st70416_6_data = {
+static const struct ts_dmi_data digma_citi_e200_data = {
.acpi_name = "MSSL1680:00",
- .properties = surftab_wintron70_st70416_6_props,
+ .properties = digma_citi_e200_props,
};
static const struct property_entry gp_electronic_t701_props[] = {
@@ -121,162 +173,181 @@ static const struct property_entry gp_electronic_t701_props[] = {
{ }
};
-static const struct silead_ts_dmi_data gp_electronic_t701_data = {
+static const struct ts_dmi_data gp_electronic_t701_data = {
.acpi_name = "MSSL1680:00",
.properties = gp_electronic_t701_props,
};
-static const struct property_entry pipo_w2s_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1660),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
- PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+static const struct property_entry itworks_tw891_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1600),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 890),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-pipo-w2s.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"),
{ }
};
-static const struct silead_ts_dmi_data pipo_w2s_data = {
+static const struct ts_dmi_data itworks_tw891_data = {
.acpi_name = "MSSL1680:00",
- .properties = pipo_w2s_props,
+ .properties = itworks_tw891_props,
};
-static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
- PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1692),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
- PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
+static const struct property_entry jumper_ezpad_6_pro_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data pov_mobii_wintab_p800w_v20_data = {
+static const struct ts_dmi_data jumper_ezpad_6_pro_data = {
.acpi_name = "MSSL1680:00",
- .properties = pov_mobii_wintab_p800w_v20_props,
+ .properties = jumper_ezpad_6_pro_props,
};
-static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1800),
+static const struct property_entry jumper_ezpad_mini3_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1700),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3692-pov-mobii-wintab-p800w.fw"),
- PROPERTY_ENTRY_BOOL("silead,home-button"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-jumper-ezpad-mini3.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
-static const struct silead_ts_dmi_data pov_mobii_wintab_p800w_v21_data = {
+static const struct ts_dmi_data jumper_ezpad_mini3_data = {
.acpi_name = "MSSL1680:00",
- .properties = pov_mobii_wintab_p800w_v21_props,
+ .properties = jumper_ezpad_mini3_props,
};
-static const struct property_entry itworks_tw891_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1600),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 890),
+static const struct property_entry onda_obook_20_plus_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-obook-20-plus.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data itworks_tw891_data = {
+static const struct ts_dmi_data onda_obook_20_plus_data = {
.acpi_name = "MSSL1680:00",
- .properties = itworks_tw891_props,
+ .properties = onda_obook_20_plus_props,
};
-static const struct property_entry chuwi_hi8_pro_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
+static const struct property_entry onda_v820w_32g_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1680-onda-v820w-32g.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data chuwi_hi8_pro_data = {
+static const struct ts_dmi_data onda_v820w_32g_data = {
.acpi_name = "MSSL1680:00",
- .properties = chuwi_hi8_pro_props,
+ .properties = onda_v820w_32g_props,
};
-static const struct property_entry digma_citi_e200_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
- PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+static const struct property_entry onda_v891w_v1_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 46),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-digma_citi_e200.fw"),
+ "gsl3680-onda-v891w-v1.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data digma_citi_e200_data = {
+static const struct ts_dmi_data onda_v891w_v1_data = {
.acpi_name = "MSSL1680:00",
- .properties = digma_citi_e200_props,
+ .properties = onda_v891w_v1_props,
};
-static const struct property_entry onda_obook_20_plus_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
- PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+static const struct property_entry onda_v891w_v3_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 35),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1625),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1135),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-obook-20-plus.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl3676-onda-v891w-v3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data onda_obook_20_plus_data = {
+static const struct ts_dmi_data onda_v891w_v3_data = {
.acpi_name = "MSSL1680:00",
- .properties = onda_obook_20_plus_props,
+ .properties = onda_v891w_v3_props,
};
-static const struct property_entry chuwi_hi8_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+static const struct property_entry pipo_w2s_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1660),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_BOOL("silead,home-button"),
- PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi8.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1680-pipo-w2s.fw"),
{ }
};
-static const struct silead_ts_dmi_data chuwi_hi8_data = {
- .acpi_name = "MSSL0001:00",
- .properties = chuwi_hi8_props,
+static const struct ts_dmi_data pipo_w2s_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pipo_w2s_props,
};
-static const struct property_entry chuwi_vi8_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1724),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1692),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-vi8.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data chuwi_vi8_data = {
- .acpi_name = "MSSL1680:00",
- .properties = chuwi_vi8_props,
+static const struct ts_dmi_data pov_mobii_wintab_p800w_v20_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pov_mobii_wintab_p800w_v20_props,
};
-static const struct property_entry trekstor_primebook_c13_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
+static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1800),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primebook-c13.fw"),
+ "gsl3692-pov-mobii-wintab-p800w.fw"),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data pov_mobii_wintab_p800w_v21_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pov_mobii_wintab_p800w_v21_props,
+};
+
+static const struct property_entry teclast_x3_plus_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-teclast-x3-plus.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data trekstor_primebook_c13_data = {
+static const struct ts_dmi_data teclast_x3_plus_data = {
.acpi_name = "MSSL1680:00",
- .properties = trekstor_primebook_c13_props,
+ .properties = teclast_x3_plus_props,
};
static const struct property_entry teclast_x98plus2_props[] = {
@@ -290,156 +361,162 @@ static const struct property_entry teclast_x98plus2_props[] = {
{ }
};
-static const struct silead_ts_dmi_data teclast_x98plus2_data = {
+static const struct ts_dmi_data teclast_x98plus2_data = {
.acpi_name = "MSSL1680:00",
.properties = teclast_x98plus2_props,
};
-static const struct property_entry teclast_x3_plus_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
- PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-teclast-x3-plus.fw"),
+static const struct property_entry trekstor_primebook_c13_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1680-trekstor-primebook-c13.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data teclast_x3_plus_data = {
+static const struct ts_dmi_data trekstor_primebook_c13_data = {
.acpi_name = "MSSL1680:00",
- .properties = teclast_x3_plus_props,
+ .properties = trekstor_primebook_c13_props,
};
-static const struct property_entry onda_v891w_v1_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-min-x", 46),
- PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
+static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1900),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3680-onda-v891w-v1.fw"),
+ "gsl3670-surftab-twin-10-1-st10432-8.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data trekstor_surftab_twin_10_1_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = trekstor_surftab_twin_10_1_props,
+};
+
+static const struct property_entry trekstor_surftab_wintron70_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1686-surftab-wintron70-st70416-6.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
-static const struct silead_ts_dmi_data onda_v891w_v1_data = {
+static const struct ts_dmi_data trekstor_surftab_wintron70_data = {
.acpi_name = "MSSL1680:00",
- .properties = onda_v891w_v1_props,
+ .properties = trekstor_surftab_wintron70_props,
};
-static const struct dmi_system_id silead_ts_dmi_table[] = {
+/* NOTE: Please keep this table sorted alphabetically */
+static const struct dmi_system_id touchscreen_dmi_table[] = {
{
- /* CUBE iwork8 Air */
- .driver_data = (void *)&cube_iwork8_air_data,
+ /* Chuwi Hi8 */
+ .driver_data = (void *)&chuwi_hi8_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "cube"),
- DMI_MATCH(DMI_PRODUCT_NAME, "i1-TF"),
- DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
},
},
{
- /* Jumper EZpad mini3 */
- .driver_data = (void *)&jumper_ezpad_mini3_data,
+ /* Chuwi Hi8 (H1D_S806_206) */
+ .driver_data = (void *)&chuwi_hi8_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
- /* jumperx.T87.KFBNEEA02 with the version-nr dropped */
- DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ DMI_MATCH(DMI_BIOS_VERSION, "H1D_S806_206"),
},
},
{
- /* Jumper EZpad 6 Pro */
- .driver_data = (void *)&jumper_ezpad_6_pro_data,
+ /* Chuwi Hi8 Pro (CWI513) */
+ .driver_data = (void *)&chuwi_hi8_pro_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
- DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
- DMI_MATCH(DMI_BIOS_VERSION, "5.12"),
- /* Above matches are too generic, add bios-date match */
- DMI_MATCH(DMI_BIOS_DATE, "08/18/2017"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X1D3_C806N"),
},
},
{
- /* DEXP Ursus 7W */
- .driver_data = (void *)&dexp_ursus_7w_data,
+ /* Chuwi Vi8 (CWI506) */
+ .driver_data = (void *)&chuwi_vi8_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
- DMI_MATCH(DMI_PRODUCT_NAME, "7W"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.D86JLBNR"),
},
},
{
- /* TrekStor SurfTab twin 10.1 ST10432-8 */
- .driver_data = (void *)&surftab_twin_10_1_st10432_8_data,
+ /* Chuwi Vi10 (CWI505) */
+ .driver_data = (void *)&chuwi_vi10_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
- DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab twin 10.1"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-PF02"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S165"),
},
},
{
- /* Trekstor Surftab Wintron 7.0 ST70416-6 */
- .driver_data = (void *)&surftab_wintron70_st70416_6_data,
+ /* Connect Tablet 9 */
+ .driver_data = (void *)&connect_tablet9_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ST70416-6"),
- /* Exact match, different versions need different fw */
- DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA04"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Connect"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Tablet 9"),
},
},
{
- /* Trekstor Surftab Wintron 7.0 ST70416-6, newer BIOS */
- .driver_data = (void *)&surftab_wintron70_st70416_6_data,
+ /* CUBE iwork8 Air */
+ .driver_data = (void *)&cube_iwork8_air_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
- DMI_MATCH(DMI_PRODUCT_NAME,
- "SurfTab wintron 7.0 ST70416-6"),
- /* Exact match, different versions need different fw */
- DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"),
+ DMI_MATCH(DMI_SYS_VENDOR, "cube"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i1-TF"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
},
},
{
- /* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */
- .driver_data = (void *)&surftab_wintron70_st70416_6_data,
+ /* Cube KNote i1101 */
+ .driver_data = (void *)&cube_knote_i1101_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Shenzhen PLOYER"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MOMO7W"),
- /* Exact match, different versions need different fw */
- DMI_MATCH(DMI_BIOS_VERSION, "MOMO.G.WI71C.MABMRBA02"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "L1W6_I1101"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ALLDOCUBE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i1101"),
},
},
{
- /* GP-electronic T701 */
- .driver_data = (void *)&gp_electronic_t701_data,
+ /* DEXP Ursus 7W */
+ .driver_data = (void *)&dexp_ursus_7w_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
- DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
- DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "7W"),
},
},
{
- /* Pipo W2S */
- .driver_data = (void *)&pipo_w2s_data,
+ /* Digma Citi E200 */
+ .driver_data = (void *)&digma_citi_e200_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "W2S"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Digma"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CITI E200"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
},
},
{
- /* Point of View mobii wintab p800w (v2.0) */
- .driver_data = (void *)&pov_mobii_wintab_p800w_v20_data,
+ /* GP-electronic T701 */
+ .driver_data = (void *)&gp_electronic_t701_data,
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
- DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
- DMI_MATCH(DMI_BIOS_VERSION, "3BAIR1014"),
- /* Above matches are too generic, add bios-date match */
- DMI_MATCH(DMI_BIOS_DATE, "10/24/2014"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
+ DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
},
},
{
- /* Point of View mobii wintab p800w (v2.1) */
- .driver_data = (void *)&pov_mobii_wintab_p800w_v21_data,
+ /* I.T.Works TW701 (same hardware as the Trekstor ST70416-6) */
+ .driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
- DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
- DMI_MATCH(DMI_BIOS_VERSION, "3BAIR1013"),
- /* Above matches are too generic, add bios-date match */
- DMI_MATCH(DMI_BIOS_DATE, "08/22/2014"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i71c"),
+ DMI_MATCH(DMI_BIOS_VERSION, "itWORKS.G.WI71C.JGBMRB"),
},
},
{
@@ -451,20 +528,23 @@ static const struct dmi_system_id silead_ts_dmi_table[] = {
},
},
{
- /* Chuwi Hi8 Pro */
- .driver_data = (void *)&chuwi_hi8_pro_data,
+ /* Jumper EZpad 6 Pro */
+ .driver_data = (void *)&jumper_ezpad_6_pro_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
- DMI_MATCH(DMI_PRODUCT_NAME, "X1D3_C806N"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
+ DMI_MATCH(DMI_BIOS_VERSION, "5.12"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "08/18/2017"),
},
},
{
- /* Digma Citi E200 */
- .driver_data = (void *)&digma_citi_e200_data,
+ /* Jumper EZpad mini3 */
+ .driver_data = (void *)&jumper_ezpad_mini3_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Digma"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CITI E200"),
- DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ /* jumperx.T87.KFBNEEA02 with the version-nr dropped */
+ DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
},
},
{
@@ -476,45 +556,71 @@ static const struct dmi_system_id silead_ts_dmi_table[] = {
},
},
{
- /* Chuwi Hi8 */
- .driver_data = (void *)&chuwi_hi8_data,
+ /* ONDA V820w DualOS */
+ .driver_data = (void *)&onda_v820w_32g_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
- DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "ONDA"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "V820w DualOS")
},
},
{
- /* Chuwi Hi8 (H1D_S806_206) */
- .driver_data = (void *)&chuwi_hi8_data,
+ /* ONDA V891w revision P891WBEBV1B00 aka v1 */
+ .driver_data = (void *)&onda_v891w_v1_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
- DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
- DMI_MATCH(DMI_BIOS_VERSION, "H1D_S806_206"),
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "ONDA"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONDA Tablet"),
+ DMI_EXACT_MATCH(DMI_BOARD_VERSION, "V001"),
+ /* Exact match, different versions need different fw */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "ONDA.W89EBBN08"),
},
},
{
- /* Chuwi Vi8 (CWI506) */
- .driver_data = (void *)&chuwi_vi8_data,
+ /* ONDA V891w Dual OS P891DCF2V1A01274 64GB */
+ .driver_data = (void *)&onda_v891w_v3_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
- DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
- DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.D86JLBNR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ONDA Tablet"),
+ DMI_MATCH(DMI_BIOS_VERSION, "ONDA.D890HBBNR0A"),
},
},
{
- /* Trekstor Primebook C13 */
- .driver_data = (void *)&trekstor_primebook_c13_data,
+ /* Pipo W2S */
+ .driver_data = (void *)&pipo_w2s_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Primebook C13"),
+ DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "W2S"),
},
},
{
- /* Teclast X98 Plus II */
- .driver_data = (void *)&teclast_x98plus2_data,
+ /* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */
+ .driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
- DMI_MATCH(DMI_PRODUCT_NAME, "X98 Plus II"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Shenzhen PLOYER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MOMO7W"),
+ /* Exact match, different versions need different fw */
+ DMI_MATCH(DMI_BIOS_VERSION, "MOMO.G.WI71C.MABMRBA02"),
+ },
+ },
+ {
+ /* Point of View mobii wintab p800w (v2.0) */
+ .driver_data = (void *)&pov_mobii_wintab_p800w_v20_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ DMI_MATCH(DMI_BIOS_VERSION, "3BAIR1014"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "10/24/2014"),
+ },
+ },
+ {
+ /* Point of View mobii wintab p800w (v2.1) */
+ .driver_data = (void *)&pov_mobii_wintab_p800w_v21_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ DMI_MATCH(DMI_BIOS_VERSION, "3BAIR1013"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "08/22/2014"),
},
},
{
@@ -527,52 +633,77 @@ static const struct dmi_system_id silead_ts_dmi_table[] = {
},
},
{
- /* I.T.Works TW701 */
- .driver_data = (void *)&surftab_wintron70_st70416_6_data,
+ /* Teclast X98 Plus II */
+ .driver_data = (void *)&teclast_x98plus2_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
- DMI_MATCH(DMI_PRODUCT_NAME, "i71c"),
- DMI_MATCH(DMI_BIOS_VERSION, "itWORKS.G.WI71C.JGBMRB"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X98 Plus II"),
},
},
{
- /* Yours Y8W81, same case and touchscreen as Chuwi Vi8 */
- .driver_data = (void *)&chuwi_vi8_data,
+ /* Trekstor Primebook C13 */
+ .driver_data = (void *)&trekstor_primebook_c13_data,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "YOURS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Y8W81"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Primebook C13"),
},
},
{
- /* ONDA V891w revision P891WBEBV1B00 aka v1 */
- .driver_data = (void *)&onda_v891w_v1_data,
+ /* TrekStor SurfTab twin 10.1 ST10432-8 */
+ .driver_data = (void *)&trekstor_surftab_twin_10_1_data,
.matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "ONDA"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONDA Tablet"),
- DMI_EXACT_MATCH(DMI_BOARD_VERSION, "V001"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab twin 10.1"),
+ },
+ },
+ {
+ /* Trekstor Surftab Wintron 7.0 ST70416-6 */
+ .driver_data = (void *)&trekstor_surftab_wintron70_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ST70416-6"),
/* Exact match, different versions need different fw */
- DMI_EXACT_MATCH(DMI_BIOS_VERSION, "ONDA.W89EBBN08"),
+ DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA04"),
+ },
+ },
+ {
+ /* Trekstor Surftab Wintron 7.0 ST70416-6, newer BIOS */
+ .driver_data = (void *)&trekstor_surftab_wintron70_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+ DMI_MATCH(DMI_PRODUCT_NAME,
+ "SurfTab wintron 7.0 ST70416-6"),
+ /* Exact match, different versions need different fw */
+ DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"),
+ },
+ },
+ {
+ /* Yours Y8W81, same case and touchscreen as Chuwi Vi8 */
+ .driver_data = (void *)&chuwi_vi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "YOURS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Y8W81"),
},
},
{ },
};
-static const struct silead_ts_dmi_data *silead_ts_data;
+static const struct ts_dmi_data *ts_data;
-static void silead_ts_dmi_add_props(struct i2c_client *client)
+static void ts_dmi_add_props(struct i2c_client *client)
{
struct device *dev = &client->dev;
int error;
if (has_acpi_companion(dev) &&
- !strncmp(silead_ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
- error = device_add_properties(dev, silead_ts_data->properties);
+ !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
+ error = device_add_properties(dev, ts_data->properties);
if (error)
dev_err(dev, "failed to add properties: %d\n", error);
}
}
-static int silead_ts_dmi_notifier_call(struct notifier_block *nb,
+static int ts_dmi_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
@@ -582,7 +713,7 @@ static int silead_ts_dmi_notifier_call(struct notifier_block *nb,
case BUS_NOTIFY_ADD_DEVICE:
client = i2c_verify_client(dev);
if (client)
- silead_ts_dmi_add_props(client);
+ ts_dmi_add_props(client);
break;
default:
@@ -592,22 +723,22 @@ static int silead_ts_dmi_notifier_call(struct notifier_block *nb,
return 0;
}
-static struct notifier_block silead_ts_dmi_notifier = {
- .notifier_call = silead_ts_dmi_notifier_call,
+static struct notifier_block ts_dmi_notifier = {
+ .notifier_call = ts_dmi_notifier_call,
};
-static int __init silead_ts_dmi_init(void)
+static int __init ts_dmi_init(void)
{
const struct dmi_system_id *dmi_id;
int error;
- dmi_id = dmi_first_match(silead_ts_dmi_table);
+ dmi_id = dmi_first_match(touchscreen_dmi_table);
if (!dmi_id)
return 0; /* Not an error */
- silead_ts_data = dmi_id->driver_data;
+ ts_data = dmi_id->driver_data;
- error = bus_register_notifier(&i2c_bus_type, &silead_ts_dmi_notifier);
+ error = bus_register_notifier(&i2c_bus_type, &ts_dmi_notifier);
if (error)
pr_err("%s: failed to register i2c bus notifier: %d\n",
__func__, error);
@@ -620,4 +751,4 @@ static int __init silead_ts_dmi_init(void)
* itself is ready (which happens at postcore initcall level), but before
* ACPI starts enumerating devices (at subsys initcall level).
*/
-arch_initcall(silead_ts_dmi_init);
+arch_initcall(ts_dmi_init);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 8e3d0146ff8c..04791ea5d97b 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -895,7 +895,6 @@ static int wmi_dev_probe(struct device *dev)
struct wmi_driver *wdriver =
container_of(dev->driver, struct wmi_driver, driver);
int ret = 0;
- int count;
char *buf;
if (ACPI_FAILURE(wmi_method_enable(wblock, 1)))
@@ -917,9 +916,8 @@ static int wmi_dev_probe(struct device *dev)
goto probe_failure;
}
- count = get_order(wblock->req_buf_size);
- wblock->handler_data = (void *)__get_free_pages(GFP_KERNEL,
- count);
+ wblock->handler_data = kmalloc(wblock->req_buf_size,
+ GFP_KERNEL);
if (!wblock->handler_data) {
ret = -ENOMEM;
goto probe_failure;
@@ -964,8 +962,7 @@ static int wmi_dev_remove(struct device *dev)
if (wdriver->filter_callback) {
misc_deregister(&wblock->char_dev);
kfree(wblock->char_dev.name);
- free_pages((unsigned long)wblock->handler_data,
- get_order(wblock->req_buf_size));
+ kfree(wblock->handler_data);
}
if (wdriver->remove)
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index cb0237143dbe..1360a7fa542c 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -18,6 +18,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/io.h>
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index df58fc878b3e..6533aa560aa1 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -104,6 +104,17 @@ config POWER_RESET_MSM
help
Power off and restart support for Qualcomm boards.
+config POWER_RESET_QCOM_PON
+ tristate "Qualcomm power-on driver"
+ depends on ARCH_QCOM
+ depends on MFD_SPMI_PMIC
+ select REBOOT_MODE
+ help
+ Power On support for Qualcomm boards.
+ If you have a Qualcomm platform and need support for
+ power-on and reboot reason, Say Y.
+ If unsure, Say N.
+
config POWER_RESET_OCELOT_RESET
bool "Microsemi Ocelot reset driver"
depends on MSCC_OCELOT || COMPILE_TEST
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 7778c7485cf1..0aebee954ac1 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
+obj-$(CONFIG_POWER_RESET_QCOM_PON) += qcom-pon.o
obj-$(CONFIG_POWER_RESET_OCELOT_RESET) += ocelot-reset.o
obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
diff --git a/drivers/power/reset/gemini-poweroff.c b/drivers/power/reset/gemini-poweroff.c
index 2ac291af1265..90e35c07240a 100644
--- a/drivers/power/reset/gemini-poweroff.c
+++ b/drivers/power/reset/gemini-poweroff.c
@@ -130,7 +130,17 @@ static int gemini_poweroff_probe(struct platform_device *pdev)
val |= GEMINI_CTRL_ENABLE;
writel(val, gpw->base + GEMINI_PWC_CTRLREG);
- /* Now that the state machine is active, clear the IRQ */
+ /* Clear the IRQ */
+ val = readl(gpw->base + GEMINI_PWC_CTRLREG);
+ val |= GEMINI_CTRL_IRQ_CLR;
+ writel(val, gpw->base + GEMINI_PWC_CTRLREG);
+
+ /* Wait for this to clear */
+ val = readl(gpw->base + GEMINI_PWC_STATREG);
+ while (val & 0x70U)
+ val = readl(gpw->base + GEMINI_PWC_STATREG);
+
+ /* Clear the IRQ again */
val = readl(gpw->base + GEMINI_PWC_CTRLREG);
val |= GEMINI_CTRL_IRQ_CLR;
writel(val, gpw->base + GEMINI_PWC_CTRLREG);
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index bfcd6fba6363..c484584745bc 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -2,7 +2,7 @@
* LTC2952 (PowerPath) driver
*
* Copyright (C) 2014, Xsens Technologies BV <info@xsens.com>
- * Maintainer: René Moll <linux@r-moll.nl>
+ * Maintainer: René Moll <linux@r-moll.nl>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -62,6 +62,7 @@
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/gpio/consumer.h>
#include <linux/reboot.h>
@@ -318,6 +319,6 @@ static struct platform_driver ltc2952_poweroff_driver = {
module_platform_driver(ltc2952_poweroff_driver);
-MODULE_AUTHOR("René Moll <rene.moll@xsens.com>");
+MODULE_AUTHOR("René Moll <rene.moll@xsens.com>");
MODULE_DESCRIPTION("LTC PowerPath power-off driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/qcom-pon.c b/drivers/power/reset/qcom-pon.c
new file mode 100644
index 000000000000..0c4caaa7e88f
--- /dev/null
+++ b/drivers/power/reset/qcom-pon.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-18 Linaro Limited
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/reboot-mode.h>
+#include <linux/regmap.h>
+
+#define PON_SOFT_RB_SPARE 0x8f
+
+struct pm8916_pon {
+ struct device *dev;
+ struct regmap *regmap;
+ u32 baseaddr;
+ struct reboot_mode_driver reboot_mode;
+};
+
+static int pm8916_reboot_mode_write(struct reboot_mode_driver *reboot,
+ unsigned int magic)
+{
+ struct pm8916_pon *pon = container_of
+ (reboot, struct pm8916_pon, reboot_mode);
+ int ret;
+
+ ret = regmap_update_bits(pon->regmap,
+ pon->baseaddr + PON_SOFT_RB_SPARE,
+ 0xfc, magic << 2);
+ if (ret < 0)
+ dev_err(pon->dev, "update reboot mode bits failed\n");
+
+ return ret;
+}
+
+static int pm8916_pon_probe(struct platform_device *pdev)
+{
+ struct pm8916_pon *pon;
+ int error;
+
+ pon = devm_kzalloc(&pdev->dev, sizeof(*pon), GFP_KERNEL);
+ if (!pon)
+ return -ENOMEM;
+
+ pon->dev = &pdev->dev;
+
+ pon->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!pon->regmap) {
+ dev_err(&pdev->dev, "failed to locate regmap\n");
+ return -ENODEV;
+ }
+
+ error = of_property_read_u32(pdev->dev.of_node, "reg",
+ &pon->baseaddr);
+ if (error)
+ return error;
+
+ pon->reboot_mode.dev = &pdev->dev;
+ pon->reboot_mode.write = pm8916_reboot_mode_write;
+ error = devm_reboot_mode_register(&pdev->dev, &pon->reboot_mode);
+ if (error) {
+ dev_err(&pdev->dev, "can't register reboot mode\n");
+ return error;
+ }
+
+ platform_set_drvdata(pdev, pon);
+
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id pm8916_pon_id_table[] = {
+ { .compatible = "qcom,pm8916-pon" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pm8916_pon_id_table);
+
+static struct platform_driver pm8916_pon_driver = {
+ .probe = pm8916_pon_probe,
+ .driver = {
+ .name = "pm8916-pon",
+ .of_match_table = of_match_ptr(pm8916_pon_id_table),
+ },
+};
+module_platform_driver(pm8916_pon_driver);
+
+MODULE_DESCRIPTION("pm8916 Power On driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 102f95a09460..e9e749f87517 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -35,6 +35,7 @@ static void vexpress_reset_do(struct device *dev, const char *what)
}
static struct device *vexpress_power_off_device;
+static atomic_t vexpress_restart_nb_refcnt = ATOMIC_INIT(0);
static void vexpress_power_off(void)
{
@@ -99,10 +100,13 @@ static int _vexpress_register_restart_handler(struct device *dev)
int err;
vexpress_restart_device = dev;
- err = register_restart_handler(&vexpress_restart_nb);
- if (err) {
- dev_err(dev, "cannot register restart handler (err=%d)\n", err);
- return err;
+ if (atomic_inc_return(&vexpress_restart_nb_refcnt) == 1) {
+ err = register_restart_handler(&vexpress_restart_nb);
+ if (err) {
+ dev_err(dev, "cannot register restart handler (err=%d)\n", err);
+ atomic_dec(&vexpress_restart_nb_refcnt);
+ return err;
+ }
}
device_create_file(dev, &dev_attr_active);
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
index c03e96e6a041..186901c96c01 100644
--- a/drivers/power/reset/zx-reboot.c
+++ b/drivers/power/reset/zx-reboot.c
@@ -51,6 +51,7 @@ static int zx_reboot_probe(struct platform_device *pdev)
np = of_find_compatible_node(NULL, NULL, "zte,zx296702-pcu");
pcu_base = of_iomap(np, 0);
+ of_node_put(np);
if (!pcu_base) {
iounmap(base);
WARN(1, "failed to map pcu_base address");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 428b426842f4..ff6dab0bf0dd 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -75,6 +75,17 @@ config BATTERY_88PM860X
help
Say Y here to enable battery monitor for Marvell 88PM860x chip.
+config CHARGER_ADP5061
+ tristate "ADP5061 battery charger driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here to enable support for the ADP5061 standalone battery
+ charger.
+
+ This driver can be built as a module. If so, the module will be
+ called adp5061.
+
config BATTERY_ACT8945A
tristate "Active-semi ACT8945A charger driver"
depends on MFD_ACT8945A || COMPILE_TEST
@@ -92,7 +103,7 @@ config BATTERY_CPCAP
config BATTERY_DS2760
tristate "DS2760 battery driver (HP iPAQ & others)"
- depends on W1 && W1_SLAVE_DS2760
+ depends on W1
help
Say Y here to enable support for batteries with ds2760 chip.
@@ -624,4 +635,14 @@ config CHARGER_RT9455
help
Say Y to enable support for Richtek RT9455 battery charger.
+config CHARGER_CROS_USBPD
+ tristate "ChromeOS EC based USBPD charger"
+ depends on MFD_CROS_EC
+ default n
+ help
+ Say Y here to enable ChromeOS EC based USBPD charger
+ driver. This driver gets various bits of information about
+ what is connected to USB PD ports from the EC and converts
+ that into power_supply properties.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index e83aa843bcc6..a26b402c45d9 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
obj-$(CONFIG_TEST_POWER) += test_power.o
obj-$(CONFIG_BATTERY_88PM860X) += 88pm860x_battery.o
+obj-$(CONFIG_CHARGER_ADP5061) += adp5061.o
obj-$(CONFIG_BATTERY_ACT8945A) += act8945a_charger.o
obj-$(CONFIG_BATTERY_AXP20X) += axp20x_battery.o
obj-$(CONFIG_CHARGER_AXP20X) += axp20x_ac_power.o
@@ -83,3 +84,4 @@ obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
+obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index d9c6c7bedd85..02356f9b5f22 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -379,15 +379,13 @@ static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
*/
static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
{
- struct timespec64 ts64;
+ time64_t now = ktime_get_boottime_seconds();
struct ab8500_fg_avg_cap *avg = &di->avg_cap;
- getnstimeofday64(&ts64);
-
do {
avg->sum += sample - avg->samples[avg->pos];
avg->samples[avg->pos] = sample;
- avg->time_stamps[avg->pos] = ts64.tv_sec;
+ avg->time_stamps[avg->pos] = now;
avg->pos++;
if (avg->pos == NBR_AVG_SAMPLES)
@@ -400,7 +398,7 @@ static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
* Check the time stamp for each sample. If too old,
* replace with latest sample
*/
- } while (ts64.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
+ } while (now - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
avg->avg = avg->sum / avg->nbr_samples;
@@ -439,14 +437,14 @@ static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
static void ab8500_fg_fill_cap_sample(struct ab8500_fg *di, int sample)
{
int i;
- struct timespec64 ts64;
+ time64_t now;
struct ab8500_fg_avg_cap *avg = &di->avg_cap;
- getnstimeofday64(&ts64);
+ now = ktime_get_boottime_seconds();
for (i = 0; i < NBR_AVG_SAMPLES; i++) {
avg->samples[i] = sample;
- avg->time_stamps[i] = ts64.tv_sec;
+ avg->time_stamps[i] = now;
}
avg->pos = 0;
diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c
new file mode 100644
index 000000000000..939fd3d8fb1a
--- /dev/null
+++ b/drivers/power/supply/adp5061.c
@@ -0,0 +1,745 @@
+/*
+ * ADP5061 I2C Programmable Linear Battery Charger
+ *
+ * Copyright 2018 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/power_supply.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+/* ADP5061 registers definition */
+#define ADP5061_ID 0x00
+#define ADP5061_REV 0x01
+#define ADP5061_VINX_SET 0x02
+#define ADP5061_TERM_SET 0x03
+#define ADP5061_CHG_CURR 0x04
+#define ADP5061_VOLTAGE_TH 0x05
+#define ADP5061_TIMER_SET 0x06
+#define ADP5061_FUNC_SET_1 0x07
+#define ADP5061_FUNC_SET_2 0x08
+#define ADP5061_INT_EN 0x09
+#define ADP5061_INT_ACT 0x0A
+#define ADP5061_CHG_STATUS_1 0x0B
+#define ADP5061_CHG_STATUS_2 0x0C
+#define ADP5061_FAULT 0x0D
+#define ADP5061_BATTERY_SHORT 0x10
+#define ADP5061_IEND 0x11
+
+/* ADP5061_VINX_SET */
+#define ADP5061_VINX_SET_ILIM_MSK GENMASK(3, 0)
+#define ADP5061_VINX_SET_ILIM_MODE(x) (((x) & 0x0F) << 0)
+
+/* ADP5061_TERM_SET */
+#define ADP5061_TERM_SET_VTRM_MSK GENMASK(7, 2)
+#define ADP5061_TERM_SET_VTRM_MODE(x) (((x) & 0x3F) << 2)
+#define ADP5061_TERM_SET_CHG_VLIM_MSK GENMASK(1, 0)
+#define ADP5061_TERM_SET_CHG_VLIM_MODE(x) (((x) & 0x03) << 0)
+
+/* ADP5061_CHG_CURR */
+#define ADP5061_CHG_CURR_ICHG_MSK GENMASK(6, 2)
+#define ADP5061_CHG_CURR_ICHG_MODE(x) (((x) & 0x1F) << 2)
+#define ADP5061_CHG_CURR_ITRK_DEAD_MSK GENMASK(1, 0)
+#define ADP5061_CHG_CURR_ITRK_DEAD_MODE(x) (((x) & 0x03) << 0)
+
+/* ADP5061_VOLTAGE_TH */
+#define ADP5061_VOLTAGE_TH_DIS_RCH_MSK BIT(7)
+#define ADP5061_VOLTAGE_TH_DIS_RCH_MODE(x) (((x) & 0x01) << 7)
+#define ADP5061_VOLTAGE_TH_VRCH_MSK GENMASK(6, 5)
+#define ADP5061_VOLTAGE_TH_VRCH_MODE(x) (((x) & 0x03) << 5)
+#define ADP5061_VOLTAGE_TH_VTRK_DEAD_MSK GENMASK(4, 3)
+#define ADP5061_VOLTAGE_TH_VTRK_DEAD_MODE(x) (((x) & 0x03) << 3)
+#define ADP5061_VOLTAGE_TH_VWEAK_MSK GENMASK(2, 0)
+#define ADP5061_VOLTAGE_TH_VWEAK_MODE(x) (((x) & 0x07) << 0)
+
+/* ADP5061_CHG_STATUS_1 */
+#define ADP5061_CHG_STATUS_1_VIN_OV(x) (((x) >> 7) & 0x1)
+#define ADP5061_CHG_STATUS_1_VIN_OK(x) (((x) >> 6) & 0x1)
+#define ADP5061_CHG_STATUS_1_VIN_ILIM(x) (((x) >> 5) & 0x1)
+#define ADP5061_CHG_STATUS_1_THERM_LIM(x) (((x) >> 4) & 0x1)
+#define ADP5061_CHG_STATUS_1_CHDONE(x) (((x) >> 3) & 0x1)
+#define ADP5061_CHG_STATUS_1_CHG_STATUS(x) (((x) >> 0) & 0x7)
+
+/* ADP5061_CHG_STATUS_2 */
+#define ADP5061_CHG_STATUS_2_THR_STATUS(x) (((x) >> 5) & 0x7)
+#define ADP5061_CHG_STATUS_2_RCH_LIM_INFO(x) (((x) >> 3) & 0x1)
+#define ADP5061_CHG_STATUS_2_BAT_STATUS(x) (((x) >> 0) & 0x7)
+
+/* ADP5061_IEND */
+#define ADP5061_IEND_IEND_MSK GENMASK(7, 5)
+#define ADP5061_IEND_IEND_MODE(x) (((x) & 0x07) << 5)
+
+#define ADP5061_NO_BATTERY 0x01
+#define ADP5061_ICHG_MAX 1300 // mA
+
+enum adp5061_chg_status {
+ ADP5061_CHG_OFF,
+ ADP5061_CHG_TRICKLE,
+ ADP5061_CHG_FAST_CC,
+ ADP5061_CHG_FAST_CV,
+ ADP5061_CHG_COMPLETE,
+ ADP5061_CHG_LDO_MODE,
+ ADP5061_CHG_TIMER_EXP,
+ ADP5061_CHG_BAT_DET,
+};
+
+static const int adp5061_chg_type[4] = {
+ [ADP5061_CHG_OFF] = POWER_SUPPLY_CHARGE_TYPE_NONE,
+ [ADP5061_CHG_TRICKLE] = POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
+ [ADP5061_CHG_FAST_CC] = POWER_SUPPLY_CHARGE_TYPE_FAST,
+ [ADP5061_CHG_FAST_CV] = POWER_SUPPLY_CHARGE_TYPE_FAST,
+};
+
+static const int adp5061_vweak_th[8] = {
+ 2700, 2800, 2900, 3000, 3100, 3200, 3300, 3400,
+};
+
+static const int adp5061_prechg_current[4] = {
+ 5, 10, 20, 80,
+};
+
+static const int adp5061_vmin[4] = {
+ 2000, 2500, 2600, 2900,
+};
+
+static const int adp5061_const_chg_vmax[4] = {
+ 3200, 3400, 3700, 3800,
+};
+
+static const int adp5061_const_ichg[24] = {
+ 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650,
+ 700, 750, 800, 850, 900, 950, 1000, 1050, 1100, 1200, 1300,
+};
+
+static const int adp5061_vmax[36] = {
+ 3800, 3820, 3840, 3860, 3880, 3900, 3920, 3940, 3960, 3980,
+ 4000, 4020, 4040, 4060, 4080, 4100, 4120, 4140, 4160, 4180,
+ 4200, 4220, 4240, 4260, 4280, 4300, 4320, 4340, 4360, 4380,
+ 4400, 4420, 4440, 4460, 4480, 4500,
+};
+
+static const int adp5061_in_current_lim[16] = {
+ 100, 150, 200, 250, 300, 400, 500, 600, 700,
+ 800, 900, 1000, 1200, 1500, 1800, 2100,
+};
+
+static const int adp5061_iend[8] = {
+ 12500, 32500, 52500, 72500, 92500, 117500, 142500, 170000,
+};
+
+struct adp5061_state {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct power_supply *psy;
+};
+
+static int adp5061_get_array_index(const int *array, u8 size, int val)
+{
+ int i;
+
+ for (i = 1; i < size; i++) {
+ if (val < array[i])
+ break;
+ }
+
+ return i-1;
+}
+
+static int adp5061_get_status(struct adp5061_state *st,
+ u8 *status1, u8 *status2)
+{
+ u8 buf[2];
+ int ret;
+
+ /* CHG_STATUS1 and CHG_STATUS2 are adjacent regs */
+ ret = regmap_bulk_read(st->regmap, ADP5061_CHG_STATUS_1,
+ &buf[0], 2);
+ if (ret < 0)
+ return ret;
+
+ *status1 = buf[0];
+ *status2 = buf[1];
+
+ return ret;
+}
+
+static int adp5061_get_input_current_limit(struct adp5061_state *st,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int mode, ret;
+
+ ret = regmap_read(st->regmap, ADP5061_VINX_SET, &regval);
+ if (ret < 0)
+ return ret;
+
+ mode = ADP5061_VINX_SET_ILIM_MODE(regval);
+ val->intval = adp5061_in_current_lim[mode] * 1000;
+
+ return ret;
+}
+
+static int adp5061_set_input_current_limit(struct adp5061_state *st, int val)
+{
+ int index;
+
+ /* Convert from uA to mA */
+ val /= 1000;
+ index = adp5061_get_array_index(adp5061_in_current_lim,
+ ARRAY_SIZE(adp5061_in_current_lim),
+ val);
+ if (index < 0)
+ return index;
+
+ return regmap_update_bits(st->regmap, ADP5061_VINX_SET,
+ ADP5061_VINX_SET_ILIM_MSK,
+ ADP5061_VINX_SET_ILIM_MODE(index));
+}
+
+static int adp5061_set_min_voltage(struct adp5061_state *st, int val)
+{
+ int index;
+
+ /* Convert from uV to mV */
+ val /= 1000;
+ index = adp5061_get_array_index(adp5061_vmin,
+ ARRAY_SIZE(adp5061_vmin),
+ val);
+ if (index < 0)
+ return index;
+
+ return regmap_update_bits(st->regmap, ADP5061_VOLTAGE_TH,
+ ADP5061_VOLTAGE_TH_VTRK_DEAD_MSK,
+ ADP5061_VOLTAGE_TH_VTRK_DEAD_MODE(index));
+}
+
+static int adp5061_get_min_voltage(struct adp5061_state *st,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADP5061_VOLTAGE_TH, &regval);
+ if (ret < 0)
+ return ret;
+
+ regval = ((regval & ADP5061_VOLTAGE_TH_VTRK_DEAD_MSK) >> 3);
+ val->intval = adp5061_vmin[regval] * 1000;
+
+ return ret;
+}
+
+static int adp5061_get_chg_volt_lim(struct adp5061_state *st,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int mode, ret;
+
+ ret = regmap_read(st->regmap, ADP5061_TERM_SET, &regval);
+ if (ret < 0)
+ return ret;
+
+ mode = ADP5061_TERM_SET_CHG_VLIM_MODE(regval);
+ val->intval = adp5061_const_chg_vmax[mode] * 1000;
+
+ return ret;
+}
+
+static int adp5061_get_max_voltage(struct adp5061_state *st,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADP5061_TERM_SET, &regval);
+ if (ret < 0)
+ return ret;
+
+ regval = ((regval & ADP5061_TERM_SET_VTRM_MSK) >> 2) - 0x0F;
+ if (regval >= ARRAY_SIZE(adp5061_vmax))
+ regval = ARRAY_SIZE(adp5061_vmax) - 1;
+
+ val->intval = adp5061_vmax[regval] * 1000;
+
+ return ret;
+}
+
+static int adp5061_set_max_voltage(struct adp5061_state *st, int val)
+{
+ int vmax_index;
+
+ /* Convert from uV to mV */
+ val /= 1000;
+ if (val > 4500)
+ val = 4500;
+
+ vmax_index = adp5061_get_array_index(adp5061_vmax,
+ ARRAY_SIZE(adp5061_vmax), val);
+ if (vmax_index < 0)
+ return vmax_index;
+
+ vmax_index += 0x0F;
+
+ return regmap_update_bits(st->regmap, ADP5061_TERM_SET,
+ ADP5061_TERM_SET_VTRM_MSK,
+ ADP5061_TERM_SET_VTRM_MODE(vmax_index));
+}
+
+static int adp5061_set_const_chg_vmax(struct adp5061_state *st, int val)
+{
+ int index;
+
+ /* Convert from uV to mV */
+ val /= 1000;
+ index = adp5061_get_array_index(adp5061_const_chg_vmax,
+ ARRAY_SIZE(adp5061_const_chg_vmax),
+ val);
+ if (index < 0)
+ return index;
+
+ return regmap_update_bits(st->regmap, ADP5061_TERM_SET,
+ ADP5061_TERM_SET_CHG_VLIM_MSK,
+ ADP5061_TERM_SET_CHG_VLIM_MODE(index));
+}
+
+static int adp5061_set_const_chg_current(struct adp5061_state *st, int val)
+{
+
+ int index;
+
+ /* Convert from uA to mA */
+ val /= 1000;
+ if (val > ADP5061_ICHG_MAX)
+ val = ADP5061_ICHG_MAX;
+
+ index = adp5061_get_array_index(adp5061_const_ichg,
+ ARRAY_SIZE(adp5061_const_ichg),
+ val);
+ if (index < 0)
+ return index;
+
+ return regmap_update_bits(st->regmap, ADP5061_CHG_CURR,
+ ADP5061_CHG_CURR_ICHG_MSK,
+ ADP5061_CHG_CURR_ICHG_MODE(index));
+}
+
+static int adp5061_get_const_chg_current(struct adp5061_state *st,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADP5061_CHG_CURR, &regval);
+ if (ret < 0)
+ return ret;
+
+ regval = ((regval & ADP5061_CHG_CURR_ICHG_MSK) >> 2);
+ if (regval >= ARRAY_SIZE(adp5061_const_ichg))
+ regval = ARRAY_SIZE(adp5061_const_ichg) - 1;
+
+ val->intval = adp5061_const_ichg[regval] * 1000;
+
+ return ret;
+}
+
+static int adp5061_get_prechg_current(struct adp5061_state *st,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADP5061_CHG_CURR, &regval);
+ if (ret < 0)
+ return ret;
+
+ regval &= ADP5061_CHG_CURR_ITRK_DEAD_MSK;
+ val->intval = adp5061_prechg_current[regval] * 1000;
+
+ return ret;
+}
+
+static int adp5061_set_prechg_current(struct adp5061_state *st, int val)
+{
+ int index;
+
+ /* Convert from uA to mA */
+ val /= 1000;
+ index = adp5061_get_array_index(adp5061_prechg_current,
+ ARRAY_SIZE(adp5061_prechg_current),
+ val);
+ if (index < 0)
+ return index;
+
+ return regmap_update_bits(st->regmap, ADP5061_CHG_CURR,
+ ADP5061_CHG_CURR_ITRK_DEAD_MSK,
+ ADP5061_CHG_CURR_ITRK_DEAD_MODE(index));
+}
+
+static int adp5061_get_vweak_th(struct adp5061_state *st,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADP5061_VOLTAGE_TH, &regval);
+ if (ret < 0)
+ return ret;
+
+ regval &= ADP5061_VOLTAGE_TH_VWEAK_MSK;
+ val->intval = adp5061_vweak_th[regval] * 1000;
+
+ return ret;
+}
+
+static int adp5061_set_vweak_th(struct adp5061_state *st, int val)
+{
+ int index;
+
+ /* Convert from uV to mV */
+ val /= 1000;
+ index = adp5061_get_array_index(adp5061_vweak_th,
+ ARRAY_SIZE(adp5061_vweak_th),
+ val);
+ if (index < 0)
+ return index;
+
+ return regmap_update_bits(st->regmap, ADP5061_VOLTAGE_TH,
+ ADP5061_VOLTAGE_TH_VWEAK_MSK,
+ ADP5061_VOLTAGE_TH_VWEAK_MODE(index));
+}
+
+static int adp5061_get_chg_type(struct adp5061_state *st,
+ union power_supply_propval *val)
+{
+ u8 status1, status2;
+ int chg_type, ret;
+
+ ret = adp5061_get_status(st, &status1, &status2);
+ if (ret < 0)
+ return ret;
+
+ chg_type = adp5061_chg_type[ADP5061_CHG_STATUS_1_CHG_STATUS(status1)];
+ if (chg_type > ADP5061_CHG_FAST_CV)
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ else
+ val->intval = chg_type;
+
+ return ret;
+}
+
+static int adp5061_get_charger_status(struct adp5061_state *st,
+ union power_supply_propval *val)
+{
+ u8 status1, status2;
+ int ret;
+
+ ret = adp5061_get_status(st, &status1, &status2);
+ if (ret < 0)
+ return ret;
+
+ switch (ADP5061_CHG_STATUS_1_CHG_STATUS(status1)) {
+ case ADP5061_CHG_OFF:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case ADP5061_CHG_TRICKLE:
+ case ADP5061_CHG_FAST_CC:
+ case ADP5061_CHG_FAST_CV:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case ADP5061_CHG_COMPLETE:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case ADP5061_CHG_TIMER_EXP:
+ /* The battery must be discharging if there is a charge fault */
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ return ret;
+}
+
+static int adp5061_get_battery_status(struct adp5061_state *st,
+ union power_supply_propval *val)
+{
+ u8 status1, status2;
+ int ret;
+
+ ret = adp5061_get_status(st, &status1, &status2);
+ if (ret < 0)
+ return ret;
+
+ switch (ADP5061_CHG_STATUS_2_BAT_STATUS(status2)) {
+ case 0x0: /* Battery monitor off */
+ case 0x1: /* No battery */
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ break;
+ case 0x2: /* VBAT < VTRK */
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ break;
+ case 0x3: /* VTRK < VBAT_SNS < VWEAK */
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ break;
+ case 0x4: /* VBAT_SNS > VWEAK */
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int adp5061_get_termination_current(struct adp5061_state *st,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADP5061_IEND, &regval);
+ if (ret < 0)
+ return ret;
+
+ regval = (regval & ADP5061_IEND_IEND_MSK) >> 5;
+ val->intval = adp5061_iend[regval];
+
+ return ret;
+}
+
+static int adp5061_set_termination_current(struct adp5061_state *st, int val)
+{
+ int index;
+
+ index = adp5061_get_array_index(adp5061_iend,
+ ARRAY_SIZE(adp5061_iend),
+ val);
+ if (index < 0)
+ return index;
+
+ return regmap_update_bits(st->regmap, ADP5061_IEND,
+ ADP5061_IEND_IEND_MSK,
+ ADP5061_IEND_IEND_MODE(index));
+}
+
+static int adp5061_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct adp5061_state *st = power_supply_get_drvdata(psy);
+ u8 status1, status2;
+ int mode, ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ ret = adp5061_get_status(st, &status1, &status2);
+ if (ret < 0)
+ return ret;
+
+ mode = ADP5061_CHG_STATUS_2_BAT_STATUS(status2);
+ if (mode == ADP5061_NO_BATTERY)
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ return adp5061_get_chg_type(st, val);
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ /* This property is used to indicate the input current
+ * limit into VINx (ILIM)
+ */
+ return adp5061_get_input_current_limit(st, val);
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ /* This property is used to indicate the termination
+ * voltage (VTRM)
+ */
+ return adp5061_get_max_voltage(st, val);
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ /*
+ * This property is used to indicate the trickle to fast
+ * charge threshold (VTRK_DEAD)
+ */
+ return adp5061_get_min_voltage(st, val);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ /* This property is used to indicate the charging
+ * voltage limit (CHG_VLIM)
+ */
+ return adp5061_get_chg_volt_lim(st, val);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ /*
+ * This property is used to indicate the value of the constant
+ * current charge (ICHG)
+ */
+ return adp5061_get_const_chg_current(st, val);
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ /*
+ * This property is used to indicate the value of the trickle
+ * and weak charge currents (ITRK_DEAD)
+ */
+ return adp5061_get_prechg_current(st, val);
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ /*
+ * This property is used to set the VWEAK threshold
+ * bellow this value, weak charge mode is entered
+ * above this value, fast chargerge mode is entered
+ */
+ return adp5061_get_vweak_th(st, val);
+ case POWER_SUPPLY_PROP_STATUS:
+ /*
+ * Indicate the charger status in relation to power
+ * supply status property
+ */
+ return adp5061_get_charger_status(st, val);
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ /*
+ * Indicate the battery status in relation to power
+ * supply capacity level property
+ */
+ return adp5061_get_battery_status(st, val);
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ /* Indicate the values of the termination current */
+ return adp5061_get_termination_current(st, val);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int adp5061_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct adp5061_state *st = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return adp5061_set_input_current_limit(st, val->intval);
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ return adp5061_set_max_voltage(st, val->intval);
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ return adp5061_set_min_voltage(st, val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ return adp5061_set_const_chg_vmax(st, val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ return adp5061_set_const_chg_current(st, val->intval);
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return adp5061_set_prechg_current(st, val->intval);
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ return adp5061_set_vweak_th(st, val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return adp5061_set_termination_current(st, val->intval);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int adp5061_prop_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static enum power_supply_property adp5061_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+};
+
+static const struct regmap_config adp5061_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct power_supply_desc adp5061_desc = {
+ .name = "adp5061",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .get_property = adp5061_get_property,
+ .set_property = adp5061_set_property,
+ .property_is_writeable = adp5061_prop_writeable,
+ .properties = adp5061_props,
+ .num_properties = ARRAY_SIZE(adp5061_props),
+};
+
+static int adp5061_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct power_supply_config psy_cfg = {};
+ struct adp5061_state *st;
+
+ st = devm_kzalloc(&client->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->client = client;
+ st->regmap = devm_regmap_init_i2c(client,
+ &adp5061_regmap_config);
+ if (IS_ERR(st->regmap)) {
+ dev_err(&client->dev, "Failed to initialize register map\n");
+ return -EINVAL;
+ }
+
+ i2c_set_clientdata(client, st);
+ psy_cfg.drv_data = st;
+
+ st->psy = devm_power_supply_register(&client->dev,
+ &adp5061_desc,
+ &psy_cfg);
+
+ if (IS_ERR(st->psy)) {
+ dev_err(&client->dev, "Failed to register power supply\n");
+ return PTR_ERR(st->psy);
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id adp5061_id[] = {
+ { "adp5061", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adp5061_id);
+
+static struct i2c_driver adp5061_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = adp5061_probe,
+ .id_table = adp5061_id,
+};
+module_i2c_driver(adp5061_driver);
+
+MODULE_DESCRIPTION("Analog Devices adp5061 battery charger driver");
+MODULE_AUTHOR("Stefan Popa <stefan.popa@analog.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index 44f70dcea61e..42001df4bd13 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -222,6 +222,7 @@ static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power,
case 100000:
if (power->axp20x_id == AXP221_ID)
return -EINVAL;
+ /* fall through */
case 500000:
case 900000:
val = (900000 - intval) / 400000;
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 6e1bc14c3304..735658ee1c60 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -718,7 +718,7 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
}
/* Determine charge current limit */
- cc = (ret & CHRG_CCCV_CC_MASK) >> CHRG_CCCV_CC_BIT_POS;
+ cc = (val & CHRG_CCCV_CC_MASK) >> CHRG_CCCV_CC_BIT_POS;
cc = (cc * CHRG_CCCV_CC_LSB_RES) + CHRG_CCCV_CC_OFFSET;
info->cc = cc;
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index d44ed8e17c47..f022e1b550df 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -26,7 +26,6 @@
* http://www.ti.com/product/bq27510-g1
* http://www.ti.com/product/bq27510-g2
* http://www.ti.com/product/bq27510-g3
- * http://www.ti.com/product/bq27520-g4
* http://www.ti.com/product/bq27520-g1
* http://www.ti.com/product/bq27520-g2
* http://www.ti.com/product/bq27520-g3
@@ -40,7 +39,9 @@
* http://www.ti.com/product/bq27545-g1
* http://www.ti.com/product/bq27421-g1
* http://www.ti.com/product/bq27425-g1
+ * http://www.ti.com/product/bq27426
* http://www.ti.com/product/bq27411-g1
+ * http://www.ti.com/product/bq27441-g1
* http://www.ti.com/product/bq27621-g1
*/
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
new file mode 100644
index 000000000000..688a16bacfbb
--- /dev/null
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Power supply driver for ChromeOS EC based USB PD Charger.
+ *
+ * Copyright (c) 2014 - 2018 Google, Inc
+ */
+
+#include <linux/module.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+
+#define CHARGER_DIR_NAME "CROS_USBPD_CHARGER%d"
+#define CHARGER_DIR_NAME_LENGTH sizeof(CHARGER_DIR_NAME)
+#define CHARGER_CACHE_UPDATE_DELAY msecs_to_jiffies(500)
+#define CHARGER_MANUFACTURER_MODEL_LENGTH 32
+
+#define DRV_NAME "cros-usbpd-charger"
+
+struct port_data {
+ int port_number;
+ char name[CHARGER_DIR_NAME_LENGTH];
+ char manufacturer[CHARGER_MANUFACTURER_MODEL_LENGTH];
+ char model_name[CHARGER_MANUFACTURER_MODEL_LENGTH];
+ struct power_supply *psy;
+ struct power_supply_desc psy_desc;
+ int psy_usb_type;
+ int psy_online;
+ int psy_status;
+ int psy_current_max;
+ int psy_voltage_max_design;
+ int psy_voltage_now;
+ int psy_power_max;
+ struct charger_data *charger;
+ unsigned long last_update;
+};
+
+struct charger_data {
+ struct device *dev;
+ struct cros_ec_dev *ec_dev;
+ struct cros_ec_device *ec_device;
+ int num_charger_ports;
+ int num_registered_psy;
+ struct port_data *ports[EC_USB_PD_MAX_PORTS];
+ struct notifier_block notifier;
+};
+
+static enum power_supply_property cros_usbpd_charger_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_USB_TYPE
+};
+
+static enum power_supply_usb_type cros_usbpd_charger_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_C,
+ POWER_SUPPLY_USB_TYPE_PD,
+ POWER_SUPPLY_USB_TYPE_PD_DRP,
+ POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID
+};
+
+static int cros_usbpd_charger_ec_command(struct charger_data *charger,
+ unsigned int version,
+ unsigned int command,
+ void *outdata,
+ unsigned int outsize,
+ void *indata,
+ unsigned int insize)
+{
+ struct cros_ec_dev *ec_dev = charger->ec_dev;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = version;
+ msg->command = ec_dev->cmd_offset + command;
+ msg->outsize = outsize;
+ msg->insize = insize;
+
+ if (outsize)
+ memcpy(msg->data, outdata, outsize);
+
+ ret = cros_ec_cmd_xfer_status(charger->ec_device, msg);
+ if (ret >= 0 && insize)
+ memcpy(indata, msg->data, insize);
+
+ kfree(msg);
+ return ret;
+}
+
+static int cros_usbpd_charger_get_num_ports(struct charger_data *charger)
+{
+ struct ec_response_usb_pd_ports resp;
+ int ret;
+
+ ret = cros_usbpd_charger_ec_command(charger, 0, EC_CMD_USB_PD_PORTS,
+ NULL, 0, &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_err(charger->dev,
+ "Unable to get the number or ports (err:0x%x)\n", ret);
+ return ret;
+ }
+
+ return resp.num_ports;
+}
+
+static int cros_usbpd_charger_get_discovery_info(struct port_data *port)
+{
+ struct charger_data *charger = port->charger;
+ struct ec_params_usb_pd_discovery_entry resp;
+ struct ec_params_usb_pd_info_request req;
+ int ret;
+
+ req.port = port->port_number;
+
+ ret = cros_usbpd_charger_ec_command(charger, 0,
+ EC_CMD_USB_PD_DISCOVERY,
+ &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_err(charger->dev,
+ "Unable to query discovery info (err:0x%x)\n", ret);
+ return ret;
+ }
+
+ dev_dbg(charger->dev, "Port %d: VID = 0x%x, PID=0x%x, PTYPE=0x%x\n",
+ port->port_number, resp.vid, resp.pid, resp.ptype);
+
+ snprintf(port->manufacturer, sizeof(port->manufacturer), "%x",
+ resp.vid);
+ snprintf(port->model_name, sizeof(port->model_name), "%x", resp.pid);
+
+ return 0;
+}
+
+static int cros_usbpd_charger_get_power_info(struct port_data *port)
+{
+ struct charger_data *charger = port->charger;
+ struct ec_response_usb_pd_power_info resp;
+ struct ec_params_usb_pd_power_info req;
+ int last_psy_status, last_psy_usb_type;
+ struct device *dev = charger->dev;
+ int ret;
+
+ req.port = port->port_number;
+ ret = cros_usbpd_charger_ec_command(charger, 0,
+ EC_CMD_USB_PD_POWER_INFO,
+ &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_err(dev, "Unable to query PD power info (err:0x%x)\n", ret);
+ return ret;
+ }
+
+ last_psy_status = port->psy_status;
+ last_psy_usb_type = port->psy_usb_type;
+
+ switch (resp.role) {
+ case USB_PD_PORT_POWER_DISCONNECTED:
+ port->psy_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ port->psy_online = 0;
+ break;
+ case USB_PD_PORT_POWER_SOURCE:
+ port->psy_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ port->psy_online = 0;
+ break;
+ case USB_PD_PORT_POWER_SINK:
+ port->psy_status = POWER_SUPPLY_STATUS_CHARGING;
+ port->psy_online = 1;
+ break;
+ case USB_PD_PORT_POWER_SINK_NOT_CHARGING:
+ port->psy_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ port->psy_online = 1;
+ break;
+ default:
+ dev_err(dev, "Unknown role %d\n", resp.role);
+ break;
+ }
+
+ port->psy_voltage_max_design = resp.meas.voltage_max;
+ port->psy_voltage_now = resp.meas.voltage_now;
+ port->psy_current_max = resp.meas.current_max;
+ port->psy_power_max = resp.max_power;
+
+ switch (resp.type) {
+ case USB_CHG_TYPE_BC12_SDP:
+ case USB_CHG_TYPE_VBUS:
+ port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case USB_CHG_TYPE_NONE:
+ /*
+ * For dual-role devices when we are a source, the firmware
+ * reports the type as NONE. Report such chargers as type
+ * USB_PD_DRP.
+ */
+ if (resp.role == USB_PD_PORT_POWER_SOURCE && resp.dualrole)
+ port->psy_usb_type = POWER_SUPPLY_USB_TYPE_PD_DRP;
+ else
+ port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case USB_CHG_TYPE_OTHER:
+ case USB_CHG_TYPE_PROPRIETARY:
+ port->psy_usb_type = POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID;
+ break;
+ case USB_CHG_TYPE_C:
+ port->psy_usb_type = POWER_SUPPLY_USB_TYPE_C;
+ break;
+ case USB_CHG_TYPE_BC12_DCP:
+ port->psy_usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ case USB_CHG_TYPE_BC12_CDP:
+ port->psy_usb_type = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ case USB_CHG_TYPE_PD:
+ if (resp.dualrole)
+ port->psy_usb_type = POWER_SUPPLY_USB_TYPE_PD_DRP;
+ else
+ port->psy_usb_type = POWER_SUPPLY_USB_TYPE_PD;
+ break;
+ case USB_CHG_TYPE_UNKNOWN:
+ /*
+ * While the EC is trying to determine the type of charger that
+ * has been plugged in, it will report the charger type as
+ * unknown. Additionally since the power capabilities are
+ * unknown, report the max current and voltage as zero.
+ */
+ port->psy_usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ port->psy_voltage_max_design = 0;
+ port->psy_current_max = 0;
+ break;
+ default:
+ dev_err(dev, "Port %d: default case!\n", port->port_number);
+ port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ }
+
+ port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
+
+ dev_dbg(dev,
+ "Port %d: type=%d vmax=%d vnow=%d cmax=%d clim=%d pmax=%d\n",
+ port->port_number, resp.type, resp.meas.voltage_max,
+ resp.meas.voltage_now, resp.meas.current_max,
+ resp.meas.current_lim, resp.max_power);
+
+ /*
+ * If power supply type or status changed, explicitly call
+ * power_supply_changed. This results in udev event getting generated
+ * and allows user mode apps to react quicker instead of waiting for
+ * their next poll of power supply status.
+ */
+ if (last_psy_usb_type != port->psy_usb_type ||
+ last_psy_status != port->psy_status)
+ power_supply_changed(port->psy);
+
+ return 0;
+}
+
+static int cros_usbpd_charger_get_port_status(struct port_data *port,
+ bool ratelimit)
+{
+ int ret;
+
+ if (ratelimit &&
+ time_is_after_jiffies(port->last_update +
+ CHARGER_CACHE_UPDATE_DELAY))
+ return 0;
+
+ ret = cros_usbpd_charger_get_power_info(port);
+ if (ret < 0)
+ return ret;
+
+ ret = cros_usbpd_charger_get_discovery_info(port);
+ port->last_update = jiffies;
+
+ return ret;
+}
+
+static void cros_usbpd_charger_power_changed(struct power_supply *psy)
+{
+ struct port_data *port = power_supply_get_drvdata(psy);
+ struct charger_data *charger = port->charger;
+ int i;
+
+ for (i = 0; i < charger->num_registered_psy; i++)
+ cros_usbpd_charger_get_port_status(charger->ports[i], false);
+}
+
+static int cros_usbpd_charger_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct port_data *port = power_supply_get_drvdata(psy);
+ struct charger_data *charger = port->charger;
+ struct cros_ec_device *ec_device = charger->ec_device;
+ struct device *dev = charger->dev;
+ int ret;
+
+ /* Only refresh ec_port_status for dynamic properties */
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ /*
+ * If mkbp_event_supported, then we can be assured that
+ * the driver's state for the online property is consistent
+ * with the hardware. However, if we aren't event driven,
+ * the optimization before to skip an ec_port_status get
+ * and only returned cached values of the online property will
+ * cause a delay in detecting a cable attach until one of the
+ * other properties are read.
+ *
+ * Allow an ec_port_status refresh for online property check
+ * if we're not already online to check for plug events if
+ * not mkbp_event_supported.
+ */
+ if (ec_device->mkbp_event_supported || port->psy_online)
+ break;
+ /* fall through */
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = cros_usbpd_charger_get_port_status(port, true);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get port status (err:0x%x)\n",
+ ret);
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = port->psy_online;
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = port->psy_status;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = port->psy_current_max * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = port->psy_voltage_max_design * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = port->psy_voltage_now * 1000;
+ break;
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = port->psy_usb_type;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = port->model_name;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = port->manufacturer;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cros_usbpd_charger_ec_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_device *ec_device;
+ struct charger_data *charger;
+ struct device *dev;
+ u32 host_event;
+
+ charger = container_of(nb, struct charger_data, notifier);
+ ec_device = charger->ec_device;
+ dev = charger->dev;
+
+ host_event = cros_ec_get_host_event(ec_device);
+ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU)) {
+ cros_usbpd_charger_power_changed(charger->ports[0]->psy);
+ return NOTIFY_OK;
+ } else {
+ return NOTIFY_DONE;
+ }
+}
+
+static void cros_usbpd_charger_unregister_notifier(void *data)
+{
+ struct charger_data *charger = data;
+ struct cros_ec_device *ec_device = charger->ec_device;
+
+ blocking_notifier_chain_unregister(&ec_device->event_notifier,
+ &charger->notifier);
+}
+
+static int cros_usbpd_charger_probe(struct platform_device *pd)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
+ struct cros_ec_device *ec_device = ec_dev->ec_dev;
+ struct power_supply_desc *psy_desc;
+ struct device *dev = &pd->dev;
+ struct charger_data *charger;
+ struct power_supply *psy;
+ struct port_data *port;
+ int ret = -EINVAL;
+ int i;
+
+ charger = devm_kzalloc(dev, sizeof(struct charger_data),
+ GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->dev = dev;
+ charger->ec_dev = ec_dev;
+ charger->ec_device = ec_device;
+
+ platform_set_drvdata(pd, charger);
+
+ charger->num_charger_ports = cros_usbpd_charger_get_num_ports(charger);
+ if (charger->num_charger_ports <= 0) {
+ /*
+ * This can happen on a system that doesn't support USB PD.
+ * Log a message, but no need to warn.
+ */
+ dev_info(dev, "No charging ports found\n");
+ ret = -ENODEV;
+ goto fail_nowarn;
+ }
+
+ for (i = 0; i < charger->num_charger_ports; i++) {
+ struct power_supply_config psy_cfg = {};
+
+ port = devm_kzalloc(dev, sizeof(struct port_data), GFP_KERNEL);
+ if (!port) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ port->charger = charger;
+ port->port_number = i;
+ sprintf(port->name, CHARGER_DIR_NAME, i);
+
+ psy_desc = &port->psy_desc;
+ psy_desc->name = port->name;
+ psy_desc->type = POWER_SUPPLY_TYPE_USB;
+ psy_desc->get_property = cros_usbpd_charger_get_prop;
+ psy_desc->external_power_changed =
+ cros_usbpd_charger_power_changed;
+ psy_desc->properties = cros_usbpd_charger_props;
+ psy_desc->num_properties =
+ ARRAY_SIZE(cros_usbpd_charger_props);
+ psy_desc->usb_types = cros_usbpd_charger_usb_types;
+ psy_desc->num_usb_types =
+ ARRAY_SIZE(cros_usbpd_charger_usb_types);
+ psy_cfg.drv_data = port;
+
+ psy = devm_power_supply_register_no_ws(dev, psy_desc,
+ &psy_cfg);
+ if (IS_ERR(psy)) {
+ dev_err(dev, "Failed to register power supply\n");
+ continue;
+ }
+ port->psy = psy;
+
+ charger->ports[charger->num_registered_psy++] = port;
+ }
+
+ if (!charger->num_registered_psy) {
+ ret = -ENODEV;
+ dev_err(dev, "No power supplies registered\n");
+ goto fail;
+ }
+
+ if (ec_device->mkbp_event_supported) {
+ /* Get PD events from the EC */
+ charger->notifier.notifier_call = cros_usbpd_charger_ec_event;
+ ret = blocking_notifier_chain_register(
+ &ec_device->event_notifier,
+ &charger->notifier);
+ if (ret < 0) {
+ dev_warn(dev, "failed to register notifier\n");
+ } else {
+ ret = devm_add_action_or_reset(dev,
+ cros_usbpd_charger_unregister_notifier,
+ charger);
+ if (ret < 0)
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ WARN(1, "%s: Failing probe (err:0x%x)\n", dev_name(dev), ret);
+
+fail_nowarn:
+ dev_info(dev, "Failing probe (err:0x%x)\n", ret);
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cros_usbpd_charger_resume(struct device *dev)
+{
+ struct charger_data *charger = dev_get_drvdata(dev);
+ int i;
+
+ if (!charger)
+ return 0;
+
+ for (i = 0; i < charger->num_registered_psy; i++) {
+ power_supply_changed(charger->ports[i]->psy);
+ charger->ports[i]->last_update =
+ jiffies - CHARGER_CACHE_UPDATE_DELAY;
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_usbpd_charger_pm_ops, NULL,
+ cros_usbpd_charger_resume);
+
+static struct platform_driver cros_usbpd_charger_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &cros_usbpd_charger_pm_ops,
+ },
+ .probe = cros_usbpd_charger_probe
+};
+
+module_platform_driver(cros_usbpd_charger_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC USBPD charger");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/power/supply/ds2760_battery.c b/drivers/power/supply/ds2760_battery.c
index ae180dc929c9..11bed88a89fa 100644
--- a/drivers/power/supply/ds2760_battery.c
+++ b/drivers/power/supply/ds2760_battery.c
@@ -27,9 +27,64 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
-
+#include <linux/suspend.h>
#include <linux/w1.h>
-#include "../../w1/slaves/w1_ds2760.h"
+#include <linux/of.h>
+
+static unsigned int cache_time = 1000;
+module_param(cache_time, uint, 0644);
+MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
+
+static bool pmod_enabled;
+module_param(pmod_enabled, bool, 0644);
+MODULE_PARM_DESC(pmod_enabled, "PMOD enable bit");
+
+static unsigned int rated_capacity;
+module_param(rated_capacity, uint, 0644);
+MODULE_PARM_DESC(rated_capacity, "rated battery capacity, 10*mAh or index");
+
+static unsigned int current_accum;
+module_param(current_accum, uint, 0644);
+MODULE_PARM_DESC(current_accum, "current accumulator value");
+
+#define W1_FAMILY_DS2760 0x30
+
+/* Known commands to the DS2760 chip */
+#define W1_DS2760_SWAP 0xAA
+#define W1_DS2760_READ_DATA 0x69
+#define W1_DS2760_WRITE_DATA 0x6C
+#define W1_DS2760_COPY_DATA 0x48
+#define W1_DS2760_RECALL_DATA 0xB8
+#define W1_DS2760_LOCK 0x6A
+
+/* Number of valid register addresses */
+#define DS2760_DATA_SIZE 0x40
+
+#define DS2760_PROTECTION_REG 0x00
+
+#define DS2760_STATUS_REG 0x01
+#define DS2760_STATUS_IE (1 << 2)
+#define DS2760_STATUS_SWEN (1 << 3)
+#define DS2760_STATUS_RNAOP (1 << 4)
+#define DS2760_STATUS_PMOD (1 << 5)
+
+#define DS2760_EEPROM_REG 0x07
+#define DS2760_SPECIAL_FEATURE_REG 0x08
+#define DS2760_VOLTAGE_MSB 0x0c
+#define DS2760_VOLTAGE_LSB 0x0d
+#define DS2760_CURRENT_MSB 0x0e
+#define DS2760_CURRENT_LSB 0x0f
+#define DS2760_CURRENT_ACCUM_MSB 0x10
+#define DS2760_CURRENT_ACCUM_LSB 0x11
+#define DS2760_TEMP_MSB 0x18
+#define DS2760_TEMP_LSB 0x19
+#define DS2760_EEPROM_BLOCK0 0x20
+#define DS2760_ACTIVE_FULL 0x20
+#define DS2760_EEPROM_BLOCK1 0x30
+#define DS2760_STATUS_WRITE_REG 0x31
+#define DS2760_RATED_CAPACITY 0x32
+#define DS2760_CURRENT_OFFSET_BIAS 0x33
+#define DS2760_ACTIVE_EMPTY 0x3b
struct ds2760_device_info {
struct device *dev;
@@ -55,28 +110,113 @@ struct ds2760_device_info {
int full_counter;
struct power_supply *bat;
struct power_supply_desc bat_desc;
- struct device *w1_dev;
struct workqueue_struct *monitor_wqueue;
struct delayed_work monitor_work;
struct delayed_work set_charged_work;
+ struct notifier_block pm_notifier;
};
-static unsigned int cache_time = 1000;
-module_param(cache_time, uint, 0644);
-MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
+static int w1_ds2760_io(struct device *dev, char *buf, int addr, size_t count,
+ int io)
+{
+ struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
-static bool pmod_enabled;
-module_param(pmod_enabled, bool, 0644);
-MODULE_PARM_DESC(pmod_enabled, "PMOD enable bit");
+ if (!dev)
+ return 0;
-static unsigned int rated_capacity;
-module_param(rated_capacity, uint, 0644);
-MODULE_PARM_DESC(rated_capacity, "rated battery capacity, 10*mAh or index");
+ mutex_lock(&sl->master->bus_mutex);
-static unsigned int current_accum;
-module_param(current_accum, uint, 0644);
-MODULE_PARM_DESC(current_accum, "current accumulator value");
+ if (addr > DS2760_DATA_SIZE || addr < 0) {
+ count = 0;
+ goto out;
+ }
+ if (addr + count > DS2760_DATA_SIZE)
+ count = DS2760_DATA_SIZE - addr;
+
+ if (!w1_reset_select_slave(sl)) {
+ if (!io) {
+ w1_write_8(sl->master, W1_DS2760_READ_DATA);
+ w1_write_8(sl->master, addr);
+ count = w1_read_block(sl->master, buf, count);
+ } else {
+ w1_write_8(sl->master, W1_DS2760_WRITE_DATA);
+ w1_write_8(sl->master, addr);
+ w1_write_block(sl->master, buf, count);
+ /* XXX w1_write_block returns void, not n_written */
+ }
+ }
+
+out:
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return count;
+}
+
+static int w1_ds2760_read(struct device *dev,
+ char *buf, int addr,
+ size_t count)
+{
+ return w1_ds2760_io(dev, buf, addr, count, 0);
+}
+
+static int w1_ds2760_write(struct device *dev,
+ char *buf,
+ int addr, size_t count)
+{
+ return w1_ds2760_io(dev, buf, addr, count, 1);
+}
+
+static int w1_ds2760_eeprom_cmd(struct device *dev, int addr, int cmd)
+{
+ struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ if (w1_reset_select_slave(sl) == 0) {
+ w1_write_8(sl->master, cmd);
+ w1_write_8(sl->master, addr);
+ }
+
+ mutex_unlock(&sl->master->bus_mutex);
+ return 0;
+}
+
+static int w1_ds2760_store_eeprom(struct device *dev, int addr)
+{
+ return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_COPY_DATA);
+}
+
+static int w1_ds2760_recall_eeprom(struct device *dev, int addr)
+{
+ return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_RECALL_DATA);
+}
+
+static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ return w1_ds2760_read(dev, buf, off, count);
+}
+
+static BIN_ATTR_RO(w1_slave, DS2760_DATA_SIZE);
+
+static struct bin_attribute *w1_ds2760_bin_attrs[] = {
+ &bin_attr_w1_slave,
+ NULL,
+};
+
+static const struct attribute_group w1_ds2760_group = {
+ .bin_attrs = w1_ds2760_bin_attrs,
+};
+static const struct attribute_group *w1_ds2760_groups[] = {
+ &w1_ds2760_group,
+ NULL,
+};
/* Some batteries have their rated capacity stored a N * 10 mAh, while
* others use an index into this table. */
static int rated_capacities[] = {
@@ -138,10 +278,10 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
count = DS2760_TEMP_LSB - start + 1;
}
- ret = w1_ds2760_read(di->w1_dev, di->raw + start, start, count);
+ ret = w1_ds2760_read(di->dev, di->raw + start, start, count);
if (ret != count) {
dev_warn(di->dev, "call to w1_ds2760_read failed (0x%p)\n",
- di->w1_dev);
+ di->dev);
return 1;
}
@@ -242,7 +382,7 @@ static void ds2760_battery_set_current_accum(struct ds2760_device_info *di,
acr[0] = acr_val >> 8;
acr[1] = acr_val & 0xff;
- if (w1_ds2760_write(di->w1_dev, acr, DS2760_CURRENT_ACCUM_MSB, 2) < 2)
+ if (w1_ds2760_write(di->dev, acr, DS2760_CURRENT_ACCUM_MSB, 2) < 2)
dev_warn(di->dev, "ACR write failed\n");
}
@@ -297,9 +437,9 @@ static void ds2760_battery_write_status(struct ds2760_device_info *di,
if (status == di->raw[DS2760_STATUS_REG])
return;
- w1_ds2760_write(di->w1_dev, &status, DS2760_STATUS_WRITE_REG, 1);
- w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
- w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+ w1_ds2760_write(di->dev, &status, DS2760_STATUS_WRITE_REG, 1);
+ w1_ds2760_store_eeprom(di->dev, DS2760_EEPROM_BLOCK1);
+ w1_ds2760_recall_eeprom(di->dev, DS2760_EEPROM_BLOCK1);
}
static void ds2760_battery_write_rated_capacity(struct ds2760_device_info *di,
@@ -308,9 +448,9 @@ static void ds2760_battery_write_rated_capacity(struct ds2760_device_info *di,
if (rated_capacity == di->raw[DS2760_RATED_CAPACITY])
return;
- w1_ds2760_write(di->w1_dev, &rated_capacity, DS2760_RATED_CAPACITY, 1);
- w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
- w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+ w1_ds2760_write(di->dev, &rated_capacity, DS2760_RATED_CAPACITY, 1);
+ w1_ds2760_store_eeprom(di->dev, DS2760_EEPROM_BLOCK1);
+ w1_ds2760_recall_eeprom(di->dev, DS2760_EEPROM_BLOCK1);
}
static void ds2760_battery_write_active_full(struct ds2760_device_info *di,
@@ -325,9 +465,9 @@ static void ds2760_battery_write_active_full(struct ds2760_device_info *di,
tmp[1] == di->raw[DS2760_ACTIVE_FULL + 1])
return;
- w1_ds2760_write(di->w1_dev, tmp, DS2760_ACTIVE_FULL, sizeof(tmp));
- w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK0);
- w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK0);
+ w1_ds2760_write(di->dev, tmp, DS2760_ACTIVE_FULL, sizeof(tmp));
+ w1_ds2760_store_eeprom(di->dev, DS2760_EEPROM_BLOCK0);
+ w1_ds2760_recall_eeprom(di->dev, DS2760_EEPROM_BLOCK0);
/* Write to the di->raw[] buffer directly - the DS2760_ACTIVE_FULL
* values won't be read back by ds2760_battery_read_status() */
@@ -383,9 +523,9 @@ static void ds2760_battery_set_charged_work(struct work_struct *work)
dev_dbg(di->dev, "%s: bias = %d\n", __func__, bias);
- w1_ds2760_write(di->w1_dev, &bias, DS2760_CURRENT_OFFSET_BIAS, 1);
- w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
- w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+ w1_ds2760_write(di->dev, &bias, DS2760_CURRENT_OFFSET_BIAS, 1);
+ w1_ds2760_store_eeprom(di->dev, DS2760_EEPROM_BLOCK1);
+ w1_ds2760_recall_eeprom(di->dev, DS2760_EEPROM_BLOCK1);
/* Write to the di->raw[] buffer directly - the CURRENT_OFFSET_BIAS
* value won't be read back by ds2760_battery_read_status() */
@@ -504,24 +644,55 @@ static enum power_supply_property ds2760_battery_props[] = {
POWER_SUPPLY_PROP_CAPACITY,
};
-static int ds2760_battery_probe(struct platform_device *pdev)
+static int ds2760_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event,
+ void *unused)
+{
+ struct ds2760_device_info *di =
+ container_of(notifier, struct ds2760_device_info, pm_notifier);
+
+ switch (pm_event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+
+ case PM_POST_RESTORE:
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ power_supply_changed(di->bat);
+ mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
+
+ break;
+
+ case PM_RESTORE_PREPARE:
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int w1_ds2760_add_slave(struct w1_slave *sl)
{
struct power_supply_config psy_cfg = {};
- char status;
- int retval = 0;
struct ds2760_device_info *di;
+ struct device *dev = &sl->dev;
+ int retval = 0;
+ char name[32];
+ char status;
- di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
+ di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
if (!di) {
retval = -ENOMEM;
goto di_alloc_failed;
}
- platform_set_drvdata(pdev, di);
+ snprintf(name, sizeof(name), "ds2760-battery.%d", dev->id);
- di->dev = &pdev->dev;
- di->w1_dev = pdev->dev.parent;
- di->bat_desc.name = dev_name(&pdev->dev);
+ di->dev = dev;
+ di->bat_desc.name = name;
di->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY;
di->bat_desc.properties = ds2760_battery_props;
di->bat_desc.num_properties = ARRAY_SIZE(ds2760_battery_props);
@@ -533,10 +704,30 @@ static int ds2760_battery_probe(struct platform_device *pdev)
di->bat_desc.external_power_changed =
ds2760_battery_external_power_changed;
- psy_cfg.drv_data = di;
+ psy_cfg.drv_data = di;
+
+ if (dev->of_node) {
+ u32 tmp;
+
+ psy_cfg.of_node = dev->of_node;
+
+ if (!of_property_read_bool(dev->of_node, "maxim,pmod-enabled"))
+ pmod_enabled = true;
+
+ if (!of_property_read_u32(dev->of_node,
+ "maxim,cache-time-ms", &tmp))
+ cache_time = tmp;
+
+ if (!of_property_read_u32(dev->of_node,
+ "rated-capacity-microamp-hours",
+ &tmp))
+ rated_capacity = tmp / 10; /* property is in mAh */
+ }
di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ sl->family_data = di;
+
/* enable sleep mode feature */
ds2760_battery_read_status(di);
status = di->raw[DS2760_STATUS_REG];
@@ -547,7 +738,7 @@ static int ds2760_battery_probe(struct platform_device *pdev)
ds2760_battery_write_status(di, status);
- /* set rated capacity from module param */
+ /* set rated capacity from module param or device tree */
if (rated_capacity)
ds2760_battery_write_rated_capacity(di, rated_capacity);
@@ -556,7 +747,7 @@ static int ds2760_battery_probe(struct platform_device *pdev)
if (current_accum)
ds2760_battery_set_current_accum(di, current_accum);
- di->bat = power_supply_register(&pdev->dev, &di->bat_desc, &psy_cfg);
+ di->bat = power_supply_register(dev, &di->bat_desc, &psy_cfg);
if (IS_ERR(di->bat)) {
dev_err(di->dev, "failed to register battery\n");
retval = PTR_ERR(di->bat);
@@ -566,14 +757,16 @@ static int ds2760_battery_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&di->monitor_work, ds2760_battery_work);
INIT_DELAYED_WORK(&di->set_charged_work,
ds2760_battery_set_charged_work);
- di->monitor_wqueue = alloc_ordered_workqueue(dev_name(&pdev->dev),
- WQ_MEM_RECLAIM);
+ di->monitor_wqueue = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!di->monitor_wqueue) {
retval = -ESRCH;
goto workqueue_failed;
}
queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ * 1);
+ di->pm_notifier.notifier_call = ds2760_pm_notifier;
+ register_pm_notifier(&di->pm_notifier);
+
goto success;
workqueue_failed:
@@ -584,65 +777,40 @@ success:
return retval;
}
-static int ds2760_battery_remove(struct platform_device *pdev)
+static void w1_ds2760_remove_slave(struct w1_slave *sl)
{
- struct ds2760_device_info *di = platform_get_drvdata(pdev);
+ struct ds2760_device_info *di = sl->family_data;
+ unregister_pm_notifier(&di->pm_notifier);
cancel_delayed_work_sync(&di->monitor_work);
cancel_delayed_work_sync(&di->set_charged_work);
destroy_workqueue(di->monitor_wqueue);
power_supply_unregister(di->bat);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int ds2760_battery_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- struct ds2760_device_info *di = platform_get_drvdata(pdev);
-
- di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN;
-
- return 0;
-}
-
-static int ds2760_battery_resume(struct platform_device *pdev)
-{
- struct ds2760_device_info *di = platform_get_drvdata(pdev);
-
- di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN;
- power_supply_changed(di->bat);
-
- mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
-
- return 0;
}
-#else
-
-#define ds2760_battery_suspend NULL
-#define ds2760_battery_resume NULL
-
-#endif /* CONFIG_PM */
-
-MODULE_ALIAS("platform:ds2760-battery");
+#ifdef CONFIG_OF
+static const struct of_device_id w1_ds2760_of_ids[] = {
+ { .compatible = "maxim,ds2760" },
+ {}
+};
+#endif
-static struct platform_driver ds2760_battery_driver = {
- .driver = {
- .name = "ds2760-battery",
- },
- .probe = ds2760_battery_probe,
- .remove = ds2760_battery_remove,
- .suspend = ds2760_battery_suspend,
- .resume = ds2760_battery_resume,
+static struct w1_family_ops w1_ds2760_fops = {
+ .add_slave = w1_ds2760_add_slave,
+ .remove_slave = w1_ds2760_remove_slave,
+ .groups = w1_ds2760_groups,
};
-module_platform_driver(ds2760_battery_driver);
+static struct w1_family w1_ds2760_family = {
+ .fid = W1_FAMILY_DS2760,
+ .fops = &w1_ds2760_fops,
+ .of_match_table = of_match_ptr(w1_ds2760_of_ids),
+};
+module_w1_family(w1_ds2760_family);
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, "
"Matt Reimer <mreimer@vpop.net>, "
"Anton Vorontsov <cbou@mail.ru>");
-MODULE_DESCRIPTION("ds2760 battery driver");
+MODULE_DESCRIPTION("1-wire Driver Dallas 2760 battery monitor chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2760));
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index 28dc056eaafa..bc462d1ec963 100644
--- a/drivers/power/supply/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
@@ -241,10 +241,10 @@ static int gab_probe(struct platform_device *pdev)
struct power_supply_desc *psy_desc;
struct power_supply_config psy_cfg = {};
struct gab_platform_data *pdata = pdev->dev.platform_data;
- enum power_supply_property *properties;
int ret = 0;
int chan;
- int index = 0;
+ int index = ARRAY_SIZE(gab_props);
+ bool any = false;
adc_bat = devm_kzalloc(&pdev->dev, sizeof(*adc_bat), GFP_KERNEL);
if (!adc_bat) {
@@ -278,8 +278,6 @@ static int gab_probe(struct platform_device *pdev)
}
memcpy(psy_desc->properties, gab_props, sizeof(gab_props));
- properties = (enum power_supply_property *)
- ((char *)psy_desc->properties + sizeof(gab_props));
/*
* getting channel from iio and copying the battery properties
@@ -293,15 +291,22 @@ static int gab_probe(struct platform_device *pdev)
adc_bat->channel[chan] = NULL;
} else {
/* copying properties for supported channels only */
- memcpy(properties + sizeof(*(psy_desc->properties)) * index,
- &gab_dyn_props[chan],
- sizeof(gab_dyn_props[chan]));
- index++;
+ int index2;
+
+ for (index2 = 0; index2 < index; index2++) {
+ if (psy_desc->properties[index2] ==
+ gab_dyn_props[chan])
+ break; /* already known */
+ }
+ if (index2 == index) /* really new */
+ psy_desc->properties[index++] =
+ gab_dyn_props[chan];
+ any = true;
}
}
/* none of the channels are supported so let's bail out */
- if (index == 0) {
+ if (!any) {
ret = -ENODEV;
goto second_mem_fail;
}
@@ -312,7 +317,7 @@ static int gab_probe(struct platform_device *pdev)
* as come channels may be not be supported by the device.So
* we need to take care of that.
*/
- psy_desc->num_properties = ARRAY_SIZE(gab_props) + index;
+ psy_desc->num_properties = index;
adc_bat->psy = power_supply_register(&pdev->dev, psy_desc, &psy_cfg);
if (IS_ERR(adc_bat->psy)) {
diff --git a/drivers/power/supply/lego_ev3_battery.c b/drivers/power/supply/lego_ev3_battery.c
index 7b993d669f7f..1ae3710909b7 100644
--- a/drivers/power/supply/lego_ev3_battery.c
+++ b/drivers/power/supply/lego_ev3_battery.c
@@ -39,7 +39,7 @@ static int lego_ev3_battery_get_property(struct power_supply *psy,
union power_supply_propval *val)
{
struct lego_ev3_battery *batt = power_supply_get_drvdata(psy);
- int val2;
+ int ret, val2;
switch (psp) {
case POWER_SUPPLY_PROP_TECHNOLOGY:
@@ -47,11 +47,18 @@ static int lego_ev3_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
/* battery voltage is iio channel * 2 + Vce of transistor */
- iio_read_channel_processed(batt->iio_v, &val->intval);
+ ret = iio_read_channel_processed(batt->iio_v, &val->intval);
+ if (ret)
+ return ret;
+
val->intval *= 2000;
- val->intval += 200000;
+ val->intval += 50000;
+
/* plus adjust for shunt resistor drop */
- iio_read_channel_processed(batt->iio_i, &val2);
+ ret = iio_read_channel_processed(batt->iio_i, &val2);
+ if (ret)
+ return ret;
+
val2 *= 1000;
val2 /= 15;
val->intval += val2;
@@ -64,7 +71,10 @@ static int lego_ev3_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
/* battery current is iio channel / 15 / 0.05 ohms */
- iio_read_channel_processed(batt->iio_i, &val->intval);
+ ret = iio_read_channel_processed(batt->iio_i, &val->intval);
+ if (ret)
+ return ret;
+
val->intval *= 20000;
val->intval /= 15;
break;
diff --git a/drivers/power/supply/max1721x_battery.c b/drivers/power/supply/max1721x_battery.c
index 9ee601a03d9b..9ca895b0dabb 100644
--- a/drivers/power/supply/max1721x_battery.c
+++ b/drivers/power/supply/max1721x_battery.c
@@ -372,7 +372,7 @@ static int devm_w1_max1721x_add_device(struct w1_slave *sl)
}
if (!info->rsense) {
- dev_warn(info->w1_dev, "RSenese not calibrated, set 10 mOhms!\n");
+ dev_warn(info->w1_dev, "RSense not calibrated, set 10 mOhms!\n");
info->rsense = 1000; /* in regs in 10^-5 */
}
dev_info(info->w1_dev, "RSense: %d mOhms.\n", info->rsense / 100);
diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c
index 6c78884bad5e..749c7926e3c9 100644
--- a/drivers/power/supply/max77693_charger.c
+++ b/drivers/power/supply/max77693_charger.c
@@ -567,6 +567,7 @@ static int max77693_set_charge_input_threshold_volt(struct max77693_charger *chg
case 4800000:
case 4900000:
data = (uvolt - 4700000) / 100000;
+ break;
default:
dev_err(chg->dev, "Wrong value for charge input voltage regulation threshold\n");
return -EINVAL;
diff --git a/drivers/power/supply/max8998_charger.c b/drivers/power/supply/max8998_charger.c
index b64cf0f14142..cad7d1a8feec 100644
--- a/drivers/power/supply/max8998_charger.c
+++ b/drivers/power/supply/max8998_charger.c
@@ -21,6 +21,7 @@
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index 3bc2eea7b3b7..6da79ae14860 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/device.h>
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index d21f478741c1..e85361878450 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/err.h>
@@ -140,8 +141,13 @@ static void power_supply_deferred_register_work(struct work_struct *work)
struct power_supply *psy = container_of(work, struct power_supply,
deferred_register_work.work);
- if (psy->dev.parent)
- mutex_lock(&psy->dev.parent->mutex);
+ if (psy->dev.parent) {
+ while (!mutex_trylock(&psy->dev.parent->mutex)) {
+ if (psy->removing)
+ return;
+ msleep(10);
+ }
+ }
power_supply_changed(psy);
@@ -1082,6 +1088,7 @@ EXPORT_SYMBOL_GPL(devm_power_supply_register_no_ws);
void power_supply_unregister(struct power_supply *psy)
{
WARN_ON(atomic_dec_return(&psy->use_cnt));
+ psy->removing = true;
cancel_work_sync(&psy->changed_work);
cancel_delayed_work_sync(&psy->deferred_register_work);
sysfs_remove_link(&psy->dev.kobj, "powers");
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index 83d7b4115857..8ba6abf584de 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/power/sbs-battery.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
@@ -156,6 +157,9 @@ static enum power_supply_property sbs_properties[] = {
POWER_SUPPLY_PROP_MODEL_NAME
};
+/* Supports special manufacturer commands from TI BQ20Z75 IC. */
+#define SBS_FLAGS_TI_BQ20Z75 BIT(0)
+
struct sbs_info {
struct i2c_client *client;
struct power_supply *power_supply;
@@ -168,6 +172,7 @@ struct sbs_info {
u32 poll_retry_count;
struct delayed_work work;
struct mutex mode_lock;
+ u32 flags;
};
static char model_name[I2C_SMBUS_BLOCK_MAX + 1];
@@ -316,16 +321,40 @@ static int sbs_get_battery_presence_and_health(
struct i2c_client *client, enum power_supply_property psp,
union power_supply_propval *val)
{
+ int ret;
+
+ if (psp == POWER_SUPPLY_PROP_PRESENT) {
+ /* Dummy command; if it succeeds, battery is present. */
+ ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
+ if (ret < 0)
+ val->intval = 0; /* battery disconnected */
+ else
+ val->intval = 1; /* battery present */
+ } else { /* POWER_SUPPLY_PROP_HEALTH */
+ /* SBS spec doesn't have a general health command. */
+ val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int sbs_get_ti_battery_presence_and_health(
+ struct i2c_client *client, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
s32 ret;
/*
* Write to ManufacturerAccess with ManufacturerAccess command
- * and then read the status. Do not check for error on the write
- * since not all batteries implement write access to this command,
- * while others mandate it.
+ * and then read the status.
*/
- sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr,
- MANUFACTURER_ACCESS_STATUS);
+ ret = sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr,
+ MANUFACTURER_ACCESS_STATUS);
+ if (ret < 0) {
+ if (psp == POWER_SUPPLY_PROP_PRESENT)
+ val->intval = 0; /* battery removed */
+ return ret;
+ }
ret = sbs_read_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr);
if (ret < 0) {
@@ -600,7 +629,12 @@ static int sbs_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_HEALTH:
- ret = sbs_get_battery_presence_and_health(client, psp, val);
+ if (client->flags & SBS_FLAGS_TI_BQ20Z75)
+ ret = sbs_get_ti_battery_presence_and_health(client,
+ psp, val);
+ else
+ ret = sbs_get_battery_presence_and_health(client, psp,
+ val);
if (psp == POWER_SUPPLY_PROP_PRESENT)
return 0;
break;
@@ -806,6 +840,7 @@ static int sbs_probe(struct i2c_client *client,
if (!chip)
return -ENOMEM;
+ chip->flags = (u32)(uintptr_t)of_device_get_match_data(&client->dev);
chip->client = client;
chip->enable_detection = false;
psy_cfg.of_node = client->dev.of_node;
@@ -911,16 +946,19 @@ static int sbs_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct sbs_info *chip = i2c_get_clientdata(client);
+ int ret;
if (chip->poll_time > 0)
cancel_delayed_work_sync(&chip->work);
- /*
- * Write to manufacturer access with sleep command.
- * Support is manufacturer dependend, so ignore errors.
- */
- sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr,
- MANUFACTURER_ACCESS_SLEEP);
+ if (chip->flags & SBS_FLAGS_TI_BQ20Z75) {
+ /* Write to manufacturer access with sleep command. */
+ ret = sbs_write_word_data(client,
+ sbs_data[REG_MANUFACTURER_DATA].addr,
+ MANUFACTURER_ACCESS_SLEEP);
+ if (chip->is_present && ret < 0)
+ return ret;
+ }
return 0;
}
@@ -941,7 +979,10 @@ MODULE_DEVICE_TABLE(i2c, sbs_id);
static const struct of_device_id sbs_dt_ids[] = {
{ .compatible = "sbs,sbs-battery" },
- { .compatible = "ti,bq20z75" },
+ {
+ .compatible = "ti,bq20z75",
+ .data = (void *)SBS_FLAGS_TI_BQ20Z75,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sbs_dt_ids);
diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
index 1f5234098aaf..814c2b81fdfe 100644
--- a/drivers/power/supply/tps65217_charger.c
+++ b/drivers/power/supply/tps65217_charger.c
@@ -1,20 +1,8 @@
-/*
- * Battery charger driver for TI's tps65217
- *
- * Copyright (c) 2015, Collabora Ltd.
-
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
-
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
-
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+// Battery charger driver for TI's tps65217
+//
+// Copyright (C) 2015 Collabora Ltd.
+// Author: Enric Balletbo i Serra <enric.balletbo@collabora.com>
/*
* Battery charger driver for TI's tps65217
diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index a2740cf57ad3..15c0ca15e2aa 100644
--- a/drivers/power/supply/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
@@ -230,7 +230,8 @@ static irqreturn_t wm8350_charger_handler(int irq, void *data)
case WM8350_IRQ_EXT_USB_FB:
case WM8350_IRQ_EXT_WALL_FB:
wm8350_charger_config(wm8350, policy);
- case WM8350_IRQ_EXT_BAT_FB: /* Fall through */
+ /* Fall through */
+ case WM8350_IRQ_EXT_BAT_FB:
power_supply_changed(power->battery);
power_supply_changed(power->usb);
power_supply_changed(power->ac);
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
index 85727ef6ce8e..6ac27e5908f5 100644
--- a/drivers/powercap/Kconfig
+++ b/drivers/powercap/Kconfig
@@ -29,4 +29,14 @@ config INTEL_RAPL
controller, CPU core (Power Plance 0), graphics uncore (Power Plane
1), etc.
+config IDLE_INJECT
+ bool "Idle injection framework"
+ depends on CPU_IDLE
+ default n
+ help
+ This enables support for the idle injection framework. It
+ provides a way to force idle periods on a set of specified
+ CPUs for power capping. Idle period can be injected
+ synchronously on a set of specified CPUs or alternatively
+ on a per CPU basis.
endif
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile
index 0a21ef31372b..1b328854b36e 100644
--- a/drivers/powercap/Makefile
+++ b/drivers/powercap/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_POWERCAP) += powercap_sys.o
obj-$(CONFIG_INTEL_RAPL) += intel_rapl.o
+obj-$(CONFIG_IDLE_INJECT) += idle_inject.o
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
new file mode 100644
index 000000000000..24ff2a068978
--- /dev/null
+++ b/drivers/powercap/idle_inject.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Linaro Limited
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * The idle injection framework provides a way to force CPUs to enter idle
+ * states for a specified fraction of time over a specified period.
+ *
+ * It relies on the smpboot kthreads feature providing common code for CPU
+ * hotplug and thread [un]parking.
+ *
+ * All of the kthreads used for idle injection are created at init time.
+ *
+ * Next, the users of the the idle injection framework provide a cpumask via
+ * its register function. The kthreads will be synchronized with respect to
+ * this cpumask.
+ *
+ * The idle + run duration is specified via separate helpers and that allows
+ * idle injection to be started.
+ *
+ * The idle injection kthreads will call play_idle() with the idle duration
+ * specified as per the above.
+ *
+ * After all of them have been woken up, a timer is set to start the next idle
+ * injection cycle.
+ *
+ * The timer interrupt handler will wake up the idle injection kthreads for
+ * all of the CPUs in the cpumask provided by the user.
+ *
+ * Idle injection is stopped synchronously and no leftover idle injection
+ * kthread activity after its completion is guaranteed.
+ *
+ * It is up to the user of this framework to provide a lock for higher-level
+ * synchronization to prevent race conditions like starting idle injection
+ * while unregistering from the framework.
+ */
+#define pr_fmt(fmt) "ii_dev: " fmt
+
+#include <linux/cpu.h>
+#include <linux/hrtimer.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smpboot.h>
+
+#include <uapi/linux/sched/types.h>
+
+/**
+ * struct idle_inject_thread - task on/off switch structure
+ * @tsk: task injecting the idle cycles
+ * @should_run: whether or not to run the task (for the smpboot kthread API)
+ */
+struct idle_inject_thread {
+ struct task_struct *tsk;
+ int should_run;
+};
+
+/**
+ * struct idle_inject_device - idle injection data
+ * @timer: idle injection period timer
+ * @idle_duration_ms: duration of CPU idle time to inject
+ * @run_duration_ms: duration of CPU run time to allow
+ * @cpumask: mask of CPUs affected by idle injection
+ */
+struct idle_inject_device {
+ struct hrtimer timer;
+ unsigned int idle_duration_ms;
+ unsigned int run_duration_ms;
+ unsigned long int cpumask[0];
+};
+
+static DEFINE_PER_CPU(struct idle_inject_thread, idle_inject_thread);
+static DEFINE_PER_CPU(struct idle_inject_device *, idle_inject_device);
+
+/**
+ * idle_inject_wakeup - Wake up idle injection threads
+ * @ii_dev: target idle injection device
+ *
+ * Every idle injection task associated with the given idle injection device
+ * and running on an online CPU will be woken up.
+ */
+static void idle_inject_wakeup(struct idle_inject_device *ii_dev)
+{
+ struct idle_inject_thread *iit;
+ unsigned int cpu;
+
+ for_each_cpu_and(cpu, to_cpumask(ii_dev->cpumask), cpu_online_mask) {
+ iit = per_cpu_ptr(&idle_inject_thread, cpu);
+ iit->should_run = 1;
+ wake_up_process(iit->tsk);
+ }
+}
+
+/**
+ * idle_inject_timer_fn - idle injection timer function
+ * @timer: idle injection hrtimer
+ *
+ * This function is called when the idle injection timer expires. It wakes up
+ * idle injection tasks associated with the timer and they, in turn, invoke
+ * play_idle() to inject a specified amount of CPU idle time.
+ *
+ * Return: HRTIMER_RESTART.
+ */
+static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
+{
+ unsigned int duration_ms;
+ struct idle_inject_device *ii_dev =
+ container_of(timer, struct idle_inject_device, timer);
+
+ duration_ms = READ_ONCE(ii_dev->run_duration_ms);
+ duration_ms += READ_ONCE(ii_dev->idle_duration_ms);
+
+ idle_inject_wakeup(ii_dev);
+
+ hrtimer_forward_now(timer, ms_to_ktime(duration_ms));
+
+ return HRTIMER_RESTART;
+}
+
+/**
+ * idle_inject_fn - idle injection work function
+ * @cpu: the CPU owning the task
+ *
+ * This function calls play_idle() to inject a specified amount of CPU idle
+ * time.
+ */
+static void idle_inject_fn(unsigned int cpu)
+{
+ struct idle_inject_device *ii_dev;
+ struct idle_inject_thread *iit;
+
+ ii_dev = per_cpu(idle_inject_device, cpu);
+ iit = per_cpu_ptr(&idle_inject_thread, cpu);
+
+ /*
+ * Let the smpboot main loop know that the task should not run again.
+ */
+ iit->should_run = 0;
+
+ play_idle(READ_ONCE(ii_dev->idle_duration_ms));
+}
+
+/**
+ * idle_inject_set_duration - idle and run duration update helper
+ * @run_duration_ms: CPU run time to allow in milliseconds
+ * @idle_duration_ms: CPU idle time to inject in milliseconds
+ */
+void idle_inject_set_duration(struct idle_inject_device *ii_dev,
+ unsigned int run_duration_ms,
+ unsigned int idle_duration_ms)
+{
+ if (run_duration_ms && idle_duration_ms) {
+ WRITE_ONCE(ii_dev->run_duration_ms, run_duration_ms);
+ WRITE_ONCE(ii_dev->idle_duration_ms, idle_duration_ms);
+ }
+}
+
+/**
+ * idle_inject_get_duration - idle and run duration retrieval helper
+ * @run_duration_ms: memory location to store the current CPU run time
+ * @idle_duration_ms: memory location to store the current CPU idle time
+ */
+void idle_inject_get_duration(struct idle_inject_device *ii_dev,
+ unsigned int *run_duration_ms,
+ unsigned int *idle_duration_ms)
+{
+ *run_duration_ms = READ_ONCE(ii_dev->run_duration_ms);
+ *idle_duration_ms = READ_ONCE(ii_dev->idle_duration_ms);
+}
+
+/**
+ * idle_inject_start - start idle injections
+ * @ii_dev: idle injection control device structure
+ *
+ * The function starts idle injection by first waking up all of the idle
+ * injection kthreads associated with @ii_dev to let them inject CPU idle time
+ * sets up a timer to start the next idle injection period.
+ *
+ * Return: -EINVAL if the CPU idle or CPU run time is not set or 0 on success.
+ */
+int idle_inject_start(struct idle_inject_device *ii_dev)
+{
+ unsigned int idle_duration_ms = READ_ONCE(ii_dev->idle_duration_ms);
+ unsigned int run_duration_ms = READ_ONCE(ii_dev->run_duration_ms);
+
+ if (!idle_duration_ms || !run_duration_ms)
+ return -EINVAL;
+
+ pr_debug("Starting injecting idle cycles on CPUs '%*pbl'\n",
+ cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
+
+ idle_inject_wakeup(ii_dev);
+
+ hrtimer_start(&ii_dev->timer,
+ ms_to_ktime(idle_duration_ms + run_duration_ms),
+ HRTIMER_MODE_REL);
+
+ return 0;
+}
+
+/**
+ * idle_inject_stop - stops idle injections
+ * @ii_dev: idle injection control device structure
+ *
+ * The function stops idle injection and waits for the threads to finish work.
+ * If CPU idle time is being injected when this function runs, then it will
+ * wait until the end of the cycle.
+ *
+ * When it returns, there is no more idle injection kthread activity. The
+ * kthreads are scheduled out and the periodic timer is off.
+ */
+void idle_inject_stop(struct idle_inject_device *ii_dev)
+{
+ struct idle_inject_thread *iit;
+ unsigned int cpu;
+
+ pr_debug("Stopping idle injection on CPUs '%*pbl'\n",
+ cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
+
+ hrtimer_cancel(&ii_dev->timer);
+
+ /*
+ * Stopping idle injection requires all of the idle injection kthreads
+ * associated with the given cpumask to be parked and stay that way, so
+ * prevent CPUs from going online at this point. Any CPUs going online
+ * after the loop below will be covered by clearing the should_run flag
+ * that will cause the smpboot main loop to schedule them out.
+ */
+ cpu_hotplug_disable();
+
+ /*
+ * Iterate over all (online + offline) CPUs here in case one of them
+ * goes offline with the should_run flag set so as to prevent its idle
+ * injection kthread from running when the CPU goes online again after
+ * the ii_dev has been freed.
+ */
+ for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
+ iit = per_cpu_ptr(&idle_inject_thread, cpu);
+ iit->should_run = 0;
+
+ wait_task_inactive(iit->tsk, 0);
+ }
+
+ cpu_hotplug_enable();
+}
+
+/**
+ * idle_inject_setup - prepare the current task for idle injection
+ * @cpu: not used
+ *
+ * Called once, this function is in charge of setting the current task's
+ * scheduler parameters to make it an RT task.
+ */
+static void idle_inject_setup(unsigned int cpu)
+{
+ struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+}
+
+/**
+ * idle_inject_should_run - function helper for the smpboot API
+ * @cpu: CPU the kthread is running on
+ *
+ * Return: whether or not the thread can run.
+ */
+static int idle_inject_should_run(unsigned int cpu)
+{
+ struct idle_inject_thread *iit =
+ per_cpu_ptr(&idle_inject_thread, cpu);
+
+ return iit->should_run;
+}
+
+/**
+ * idle_inject_register - initialize idle injection on a set of CPUs
+ * @cpumask: CPUs to be affected by idle injection
+ *
+ * This function creates an idle injection control device structure for the
+ * given set of CPUs and initializes the timer associated with it. It does not
+ * start any injection cycles.
+ *
+ * Return: NULL if memory allocation fails, idle injection control device
+ * pointer on success.
+ */
+struct idle_inject_device *idle_inject_register(struct cpumask *cpumask)
+{
+ struct idle_inject_device *ii_dev;
+ int cpu, cpu_rb;
+
+ ii_dev = kzalloc(sizeof(*ii_dev) + cpumask_size(), GFP_KERNEL);
+ if (!ii_dev)
+ return NULL;
+
+ cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask);
+ hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ii_dev->timer.function = idle_inject_timer_fn;
+
+ for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
+
+ if (per_cpu(idle_inject_device, cpu)) {
+ pr_err("cpu%d is already registered\n", cpu);
+ goto out_rollback;
+ }
+
+ per_cpu(idle_inject_device, cpu) = ii_dev;
+ }
+
+ return ii_dev;
+
+out_rollback:
+ for_each_cpu(cpu_rb, to_cpumask(ii_dev->cpumask)) {
+ if (cpu == cpu_rb)
+ break;
+ per_cpu(idle_inject_device, cpu_rb) = NULL;
+ }
+
+ kfree(ii_dev);
+
+ return NULL;
+}
+
+/**
+ * idle_inject_unregister - unregister idle injection control device
+ * @ii_dev: idle injection control device to unregister
+ *
+ * The function stops idle injection for the given control device,
+ * unregisters its kthreads and frees memory allocated when that device was
+ * created.
+ */
+void idle_inject_unregister(struct idle_inject_device *ii_dev)
+{
+ unsigned int cpu;
+
+ idle_inject_stop(ii_dev);
+
+ for_each_cpu(cpu, to_cpumask(ii_dev->cpumask))
+ per_cpu(idle_inject_device, cpu) = NULL;
+
+ kfree(ii_dev);
+}
+
+static struct smp_hotplug_thread idle_inject_threads = {
+ .store = &idle_inject_thread.tsk,
+ .setup = idle_inject_setup,
+ .thread_fn = idle_inject_fn,
+ .thread_comm = "idle_inject/%u",
+ .thread_should_run = idle_inject_should_run,
+};
+
+static int __init idle_inject_init(void)
+{
+ return smpboot_register_percpu_thread(&idle_inject_threads);
+}
+early_initcall(idle_inject_init);
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 474c988d2e95..d137c480db46 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -43,7 +43,7 @@ config PTP_1588_CLOCK_DTE
config PTP_1588_CLOCK_QORIQ
tristate "Freescale QorIQ 1588 timer as PTP clock"
- depends on GIANFAR
+ depends on GIANFAR || FSL_DPAA_ETH
depends on PTP_1588_CLOCK
default y
help
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 547dbdac9d54..01b0e2bb3319 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
case PTP_PF_PHYSYNC:
if (chan != 0)
return -EINVAL;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c
index 6edd3b9c7f01..a7dc43368df4 100644
--- a/drivers/ptp/ptp_dte.c
+++ b/drivers/ptp/ptp_dte.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/types.h>
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index e8652c148c52..fdd49c26bbcc 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -29,6 +29,7 @@
#include <linux/of_platform.h>
#include <linux/timex.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <linux/fsl/ptp_qoriq.h>
@@ -39,11 +40,12 @@
/* Caller must hold qoriq_ptp->lock. */
static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp)
{
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
u64 ns;
u32 lo, hi;
- lo = qoriq_read(&qoriq_ptp->regs->tmr_cnt_l);
- hi = qoriq_read(&qoriq_ptp->regs->tmr_cnt_h);
+ lo = qoriq_read(&regs->ctrl_regs->tmr_cnt_l);
+ hi = qoriq_read(&regs->ctrl_regs->tmr_cnt_h);
ns = ((u64) hi) << 32;
ns |= lo;
return ns;
@@ -52,16 +54,18 @@ static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp)
/* Caller must hold qoriq_ptp->lock. */
static void tmr_cnt_write(struct qoriq_ptp *qoriq_ptp, u64 ns)
{
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
u32 hi = ns >> 32;
u32 lo = ns & 0xffffffff;
- qoriq_write(&qoriq_ptp->regs->tmr_cnt_l, lo);
- qoriq_write(&qoriq_ptp->regs->tmr_cnt_h, hi);
+ qoriq_write(&regs->ctrl_regs->tmr_cnt_l, lo);
+ qoriq_write(&regs->ctrl_regs->tmr_cnt_h, hi);
}
/* Caller must hold qoriq_ptp->lock. */
static void set_alarm(struct qoriq_ptp *qoriq_ptp)
{
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
u64 ns;
u32 lo, hi;
@@ -70,16 +74,18 @@ static void set_alarm(struct qoriq_ptp *qoriq_ptp)
ns -= qoriq_ptp->tclk_period;
hi = ns >> 32;
lo = ns & 0xffffffff;
- qoriq_write(&qoriq_ptp->regs->tmr_alarm1_l, lo);
- qoriq_write(&qoriq_ptp->regs->tmr_alarm1_h, hi);
+ qoriq_write(&regs->alarm_regs->tmr_alarm1_l, lo);
+ qoriq_write(&regs->alarm_regs->tmr_alarm1_h, hi);
}
/* Caller must hold qoriq_ptp->lock. */
static void set_fipers(struct qoriq_ptp *qoriq_ptp)
{
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+
set_alarm(qoriq_ptp);
- qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
- qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
+ qoriq_write(&regs->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
+ qoriq_write(&regs->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
}
/*
@@ -89,16 +95,17 @@ static void set_fipers(struct qoriq_ptp *qoriq_ptp)
static irqreturn_t isr(int irq, void *priv)
{
struct qoriq_ptp *qoriq_ptp = priv;
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
struct ptp_clock_event event;
u64 ns;
u32 ack = 0, lo, hi, mask, val;
- val = qoriq_read(&qoriq_ptp->regs->tmr_tevent);
+ val = qoriq_read(&regs->ctrl_regs->tmr_tevent);
if (val & ETS1) {
ack |= ETS1;
- hi = qoriq_read(&qoriq_ptp->regs->tmr_etts1_h);
- lo = qoriq_read(&qoriq_ptp->regs->tmr_etts1_l);
+ hi = qoriq_read(&regs->etts_regs->tmr_etts1_h);
+ lo = qoriq_read(&regs->etts_regs->tmr_etts1_l);
event.type = PTP_CLOCK_EXTTS;
event.index = 0;
event.timestamp = ((u64) hi) << 32;
@@ -108,8 +115,8 @@ static irqreturn_t isr(int irq, void *priv)
if (val & ETS2) {
ack |= ETS2;
- hi = qoriq_read(&qoriq_ptp->regs->tmr_etts2_h);
- lo = qoriq_read(&qoriq_ptp->regs->tmr_etts2_l);
+ hi = qoriq_read(&regs->etts_regs->tmr_etts2_h);
+ lo = qoriq_read(&regs->etts_regs->tmr_etts2_l);
event.type = PTP_CLOCK_EXTTS;
event.index = 1;
event.timestamp = ((u64) hi) << 32;
@@ -130,16 +137,16 @@ static irqreturn_t isr(int irq, void *priv)
hi = ns >> 32;
lo = ns & 0xffffffff;
spin_lock(&qoriq_ptp->lock);
- qoriq_write(&qoriq_ptp->regs->tmr_alarm2_l, lo);
- qoriq_write(&qoriq_ptp->regs->tmr_alarm2_h, hi);
+ qoriq_write(&regs->alarm_regs->tmr_alarm2_l, lo);
+ qoriq_write(&regs->alarm_regs->tmr_alarm2_h, hi);
spin_unlock(&qoriq_ptp->lock);
qoriq_ptp->alarm_value = ns;
} else {
- qoriq_write(&qoriq_ptp->regs->tmr_tevent, ALM2);
+ qoriq_write(&regs->ctrl_regs->tmr_tevent, ALM2);
spin_lock(&qoriq_ptp->lock);
- mask = qoriq_read(&qoriq_ptp->regs->tmr_temask);
+ mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
mask &= ~ALM2EN;
- qoriq_write(&qoriq_ptp->regs->tmr_temask, mask);
+ qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
spin_unlock(&qoriq_ptp->lock);
qoriq_ptp->alarm_value = 0;
qoriq_ptp->alarm_interval = 0;
@@ -153,7 +160,7 @@ static irqreturn_t isr(int irq, void *priv)
}
if (ack) {
- qoriq_write(&qoriq_ptp->regs->tmr_tevent, ack);
+ qoriq_write(&regs->ctrl_regs->tmr_tevent, ack);
return IRQ_HANDLED;
} else
return IRQ_NONE;
@@ -169,6 +176,7 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
u32 tmr_add;
int neg_adj = 0;
struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
if (scaled_ppm < 0) {
neg_adj = 1;
@@ -186,7 +194,7 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
- qoriq_write(&qoriq_ptp->regs->tmr_add, tmr_add);
+ qoriq_write(&regs->ctrl_regs->tmr_add, tmr_add);
return 0;
}
@@ -250,6 +258,7 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
unsigned long flags;
u32 bit, mask;
@@ -266,23 +275,23 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp,
return -EINVAL;
}
spin_lock_irqsave(&qoriq_ptp->lock, flags);
- mask = qoriq_read(&qoriq_ptp->regs->tmr_temask);
+ mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
if (on)
mask |= bit;
else
mask &= ~bit;
- qoriq_write(&qoriq_ptp->regs->tmr_temask, mask);
+ qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
return 0;
case PTP_CLK_REQ_PPS:
spin_lock_irqsave(&qoriq_ptp->lock, flags);
- mask = qoriq_read(&qoriq_ptp->regs->tmr_temask);
+ mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
if (on)
mask |= PP1EN;
else
mask &= ~PP1EN;
- qoriq_write(&qoriq_ptp->regs->tmr_temask, mask);
+ qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
return 0;
@@ -309,20 +318,123 @@ static const struct ptp_clock_info ptp_qoriq_caps = {
.enable = ptp_qoriq_enable,
};
+/**
+ * qoriq_ptp_nominal_freq - calculate nominal frequency according to
+ * reference clock frequency
+ *
+ * @clk_src: reference clock frequency
+ *
+ * The nominal frequency is the desired clock frequency.
+ * It should be less than the reference clock frequency.
+ * It should be a factor of 1000MHz.
+ *
+ * Return the nominal frequency
+ */
+static u32 qoriq_ptp_nominal_freq(u32 clk_src)
+{
+ u32 remainder = 0;
+
+ clk_src /= 1000000;
+ remainder = clk_src % 100;
+ if (remainder) {
+ clk_src -= remainder;
+ clk_src += 100;
+ }
+
+ do {
+ clk_src -= 100;
+
+ } while (1000 % clk_src);
+
+ return clk_src * 1000000;
+}
+
+/**
+ * qoriq_ptp_auto_config - calculate a set of default configurations
+ *
+ * @qoriq_ptp: pointer to qoriq_ptp
+ * @node: pointer to device_node
+ *
+ * If below dts properties are not provided, this function will be
+ * called to calculate a set of default configurations for them.
+ * "fsl,tclk-period"
+ * "fsl,tmr-prsc"
+ * "fsl,tmr-add"
+ * "fsl,tmr-fiper1"
+ * "fsl,tmr-fiper2"
+ * "fsl,max-adj"
+ *
+ * Return 0 if success
+ */
+static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp,
+ struct device_node *node)
+{
+ struct clk *clk;
+ u64 freq_comp;
+ u64 max_adj;
+ u32 nominal_freq;
+ u32 remainder = 0;
+ u32 clk_src = 0;
+
+ qoriq_ptp->cksel = DEFAULT_CKSEL;
+
+ clk = of_clk_get(node, 0);
+ if (!IS_ERR(clk)) {
+ clk_src = clk_get_rate(clk);
+ clk_put(clk);
+ }
+
+ if (clk_src <= 100000000UL) {
+ pr_err("error reference clock value, or lower than 100MHz\n");
+ return -EINVAL;
+ }
+
+ nominal_freq = qoriq_ptp_nominal_freq(clk_src);
+ if (!nominal_freq)
+ return -EINVAL;
+
+ qoriq_ptp->tclk_period = 1000000000UL / nominal_freq;
+ qoriq_ptp->tmr_prsc = DEFAULT_TMR_PRSC;
+
+ /* Calculate initial frequency compensation value for TMR_ADD register.
+ * freq_comp = ceil(2^32 / freq_ratio)
+ * freq_ratio = reference_clock_freq / nominal_freq
+ */
+ freq_comp = ((u64)1 << 32) * nominal_freq;
+ freq_comp = div_u64_rem(freq_comp, clk_src, &remainder);
+ if (remainder)
+ freq_comp++;
+
+ qoriq_ptp->tmr_add = freq_comp;
+ qoriq_ptp->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - qoriq_ptp->tclk_period;
+ qoriq_ptp->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - qoriq_ptp->tclk_period;
+
+ /* max_adj = 1000000000 * (freq_ratio - 1.0) - 1
+ * freq_ratio = reference_clock_freq / nominal_freq
+ */
+ max_adj = 1000000000ULL * (clk_src - nominal_freq);
+ max_adj = div_u64(max_adj, nominal_freq) - 1;
+ qoriq_ptp->caps.max_adj = max_adj;
+
+ return 0;
+}
+
static int qoriq_ptp_probe(struct platform_device *dev)
{
struct device_node *node = dev->dev.of_node;
struct qoriq_ptp *qoriq_ptp;
+ struct qoriq_ptp_registers *regs;
struct timespec64 now;
int err = -ENOMEM;
u32 tmr_ctrl;
unsigned long flags;
+ void __iomem *base;
qoriq_ptp = kzalloc(sizeof(*qoriq_ptp), GFP_KERNEL);
if (!qoriq_ptp)
goto no_memory;
- err = -ENODEV;
+ err = -EINVAL;
qoriq_ptp->caps = ptp_qoriq_caps;
@@ -341,17 +453,21 @@ static int qoriq_ptp_probe(struct platform_device *dev)
"fsl,tmr-fiper2", &qoriq_ptp->tmr_fiper2) ||
of_property_read_u32(node,
"fsl,max-adj", &qoriq_ptp->caps.max_adj)) {
- pr_err("device tree node missing required elements\n");
- goto no_node;
+ pr_warn("device tree node missing required elements, try automatic configuration\n");
+
+ if (qoriq_ptp_auto_config(qoriq_ptp, node))
+ goto no_config;
}
+ err = -ENODEV;
+
qoriq_ptp->irq = platform_get_irq(dev, 0);
if (qoriq_ptp->irq < 0) {
pr_err("irq not in device tree\n");
goto no_node;
}
- if (request_irq(qoriq_ptp->irq, isr, 0, DRIVER, qoriq_ptp)) {
+ if (request_irq(qoriq_ptp->irq, isr, IRQF_SHARED, DRIVER, qoriq_ptp)) {
pr_err("request_irq failed\n");
goto no_node;
}
@@ -368,12 +484,27 @@ static int qoriq_ptp_probe(struct platform_device *dev)
spin_lock_init(&qoriq_ptp->lock);
- qoriq_ptp->regs = ioremap(qoriq_ptp->rsrc->start,
- resource_size(qoriq_ptp->rsrc));
- if (!qoriq_ptp->regs) {
+ base = ioremap(qoriq_ptp->rsrc->start,
+ resource_size(qoriq_ptp->rsrc));
+ if (!base) {
pr_err("ioremap ptp registers failed\n");
goto no_ioremap;
}
+
+ qoriq_ptp->base = base;
+
+ if (of_device_is_compatible(node, "fsl,fman-ptp-timer")) {
+ qoriq_ptp->regs.ctrl_regs = base + FMAN_CTRL_REGS_OFFSET;
+ qoriq_ptp->regs.alarm_regs = base + FMAN_ALARM_REGS_OFFSET;
+ qoriq_ptp->regs.fiper_regs = base + FMAN_FIPER_REGS_OFFSET;
+ qoriq_ptp->regs.etts_regs = base + FMAN_ETTS_REGS_OFFSET;
+ } else {
+ qoriq_ptp->regs.ctrl_regs = base + CTRL_REGS_OFFSET;
+ qoriq_ptp->regs.alarm_regs = base + ALARM_REGS_OFFSET;
+ qoriq_ptp->regs.fiper_regs = base + FIPER_REGS_OFFSET;
+ qoriq_ptp->regs.etts_regs = base + ETTS_REGS_OFFSET;
+ }
+
ktime_get_real_ts64(&now);
ptp_qoriq_settime(&qoriq_ptp->caps, &now);
@@ -383,13 +514,14 @@ static int qoriq_ptp_probe(struct platform_device *dev)
spin_lock_irqsave(&qoriq_ptp->lock, flags);
- qoriq_write(&qoriq_ptp->regs->tmr_ctrl, tmr_ctrl);
- qoriq_write(&qoriq_ptp->regs->tmr_add, qoriq_ptp->tmr_add);
- qoriq_write(&qoriq_ptp->regs->tmr_prsc, qoriq_ptp->tmr_prsc);
- qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
- qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
+ regs = &qoriq_ptp->regs;
+ qoriq_write(&regs->ctrl_regs->tmr_ctrl, tmr_ctrl);
+ qoriq_write(&regs->ctrl_regs->tmr_add, qoriq_ptp->tmr_add);
+ qoriq_write(&regs->ctrl_regs->tmr_prsc, qoriq_ptp->tmr_prsc);
+ qoriq_write(&regs->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
+ qoriq_write(&regs->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
set_alarm(qoriq_ptp);
- qoriq_write(&qoriq_ptp->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD);
+ qoriq_write(&regs->ctrl_regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD);
spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
@@ -405,11 +537,12 @@ static int qoriq_ptp_probe(struct platform_device *dev)
return 0;
no_clock:
- iounmap(qoriq_ptp->regs);
+ iounmap(qoriq_ptp->base);
no_ioremap:
release_resource(qoriq_ptp->rsrc);
no_resource:
free_irq(qoriq_ptp->irq, qoriq_ptp);
+no_config:
no_node:
kfree(qoriq_ptp);
no_memory:
@@ -419,12 +552,13 @@ no_memory:
static int qoriq_ptp_remove(struct platform_device *dev)
{
struct qoriq_ptp *qoriq_ptp = platform_get_drvdata(dev);
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
- qoriq_write(&qoriq_ptp->regs->tmr_temask, 0);
- qoriq_write(&qoriq_ptp->regs->tmr_ctrl, 0);
+ qoriq_write(&regs->ctrl_regs->tmr_temask, 0);
+ qoriq_write(&regs->ctrl_regs->tmr_ctrl, 0);
ptp_clock_unregister(qoriq_ptp->clock);
- iounmap(qoriq_ptp->regs);
+ iounmap(qoriq_ptp->base);
release_resource(qoriq_ptp->rsrc);
free_irq(qoriq_ptp->irq, qoriq_ptp);
kfree(qoriq_ptp);
@@ -434,6 +568,7 @@ static int qoriq_ptp_remove(struct platform_device *dev)
static const struct of_device_id match_table[] = {
{ .compatible = "fsl,etsec-ptp" },
+ { .compatible = "fsl,fman-ptp-timer" },
{},
};
MODULE_DEVICE_TABLE(of, match_table);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index a4d262db9945..504d252716f2 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -286,7 +286,7 @@ config PWM_MTK_DISP
config PWM_MEDIATEK
tristate "MediaTek PWM support"
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on ARCH_MEDIATEK || RALINK || COMPILE_TEST
help
Generic PWM framework driver for Mediatek ARM SoC.
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 771859aca4be..7c8d6a168ceb 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -21,8 +21,18 @@
#define BERLIN_PWM_EN 0x0
#define BERLIN_PWM_ENABLE BIT(0)
#define BERLIN_PWM_CONTROL 0x4
-#define BERLIN_PWM_PRESCALE_MASK 0x7
-#define BERLIN_PWM_PRESCALE_MAX 4096
+/*
+ * The prescaler claims to support 8 different moduli, configured using the
+ * low three bits of PWM_CONTROL. (Sequentially, they are 1, 4, 8, 16, 64,
+ * 256, 1024, and 4096.) However, the moduli from 4 to 1024 appear to be
+ * implemented by internally shifting TCNT left without adding additional
+ * bits. So, the max TCNT that actually works for a modulus of 4 is 0x3fff;
+ * for 8, 0x1fff; and so on. This means that those moduli are entirely
+ * useless, as we could just do the shift ourselves. The 4096 modulus is
+ * implemented with a real prescaler, so we do use that, but we treat it
+ * as a flag instead of pretending the modulus is actually configurable.
+ */
+#define BERLIN_PWM_PRESCALE_4096 0x7
#define BERLIN_PWM_INVERT_POLARITY BIT(3)
#define BERLIN_PWM_DUTY 0x8
#define BERLIN_PWM_TCNT 0xc
@@ -46,10 +56,6 @@ static inline struct berlin_pwm_chip *to_berlin_pwm_chip(struct pwm_chip *chip)
return container_of(chip, struct berlin_pwm_chip, chip);
}
-static const u32 prescaler_table[] = {
- 1, 4, 8, 16, 64, 256, 1024, 4096
-};
-
static inline u32 berlin_pwm_readl(struct berlin_pwm_chip *chip,
unsigned int channel, unsigned long offset)
{
@@ -86,33 +92,32 @@ static int berlin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm_dev,
int duty_ns, int period_ns)
{
struct berlin_pwm_chip *pwm = to_berlin_pwm_chip(chip);
- unsigned int prescale;
+ bool prescale_4096 = false;
u32 value, duty, period;
- u64 cycles, tmp;
+ u64 cycles;
cycles = clk_get_rate(pwm->clk);
cycles *= period_ns;
do_div(cycles, NSEC_PER_SEC);
- for (prescale = 0; prescale < ARRAY_SIZE(prescaler_table); prescale++) {
- tmp = cycles;
- do_div(tmp, prescaler_table[prescale]);
+ if (cycles > BERLIN_PWM_MAX_TCNT) {
+ prescale_4096 = true;
+ cycles >>= 12; // Prescaled by 4096
- if (tmp <= BERLIN_PWM_MAX_TCNT)
- break;
+ if (cycles > BERLIN_PWM_MAX_TCNT)
+ return -ERANGE;
}
- if (tmp > BERLIN_PWM_MAX_TCNT)
- return -ERANGE;
-
- period = tmp;
- cycles = tmp * duty_ns;
+ period = cycles;
+ cycles *= duty_ns;
do_div(cycles, period_ns);
duty = cycles;
value = berlin_pwm_readl(pwm, pwm_dev->hwpwm, BERLIN_PWM_CONTROL);
- value &= ~BERLIN_PWM_PRESCALE_MASK;
- value |= prescale;
+ if (prescale_4096)
+ value |= BERLIN_PWM_PRESCALE_4096;
+ else
+ value &= ~BERLIN_PWM_PRESCALE_4096;
berlin_pwm_writel(pwm, pwm_dev->hwpwm, value, BERLIN_PWM_CONTROL);
berlin_pwm_writel(pwm, pwm_dev->hwpwm, duty, BERLIN_PWM_DUTY);
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 9c13694eaa24..98f6ac6cf6ab 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2016 Google, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2, as published by
- * the Free Software Foundation.
- *
* Expose a PWM controlled by the ChromeOS EC to the host processor.
+ *
+ * Copyright (C) 2016 Google, Inc.
*/
#include <linux/module.h>
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 557b4ea16796..883378d055c6 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pwm.h>
@@ -75,6 +76,10 @@ enum fsl_pwm_clk {
FSL_PWM_CLK_MAX
};
+struct fsl_ftm_soc {
+ bool has_enable_bits;
+};
+
struct fsl_pwm_chip {
struct pwm_chip chip;
@@ -87,7 +92,10 @@ struct fsl_pwm_chip {
int period_ns;
+ struct clk *ipg_clk;
struct clk *clk[FSL_PWM_CLK_MAX];
+
+ const struct fsl_ftm_soc *soc;
};
static inline struct fsl_pwm_chip *to_fsl_chip(struct pwm_chip *chip)
@@ -97,16 +105,32 @@ static inline struct fsl_pwm_chip *to_fsl_chip(struct pwm_chip *chip)
static int fsl_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ int ret;
struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
- return clk_prepare_enable(fpc->clk[FSL_PWM_CLK_SYS]);
+ ret = clk_prepare_enable(fpc->ipg_clk);
+ if (!ret && fpc->soc->has_enable_bits) {
+ mutex_lock(&fpc->lock);
+ regmap_update_bits(fpc->regmap, FTM_SC, BIT(pwm->hwpwm + 16),
+ BIT(pwm->hwpwm + 16));
+ mutex_unlock(&fpc->lock);
+ }
+
+ return ret;
}
static void fsl_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
- clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
+ if (fpc->soc->has_enable_bits) {
+ mutex_lock(&fpc->lock);
+ regmap_update_bits(fpc->regmap, FTM_SC, BIT(pwm->hwpwm + 16),
+ 0);
+ mutex_unlock(&fpc->lock);
+ }
+
+ clk_disable_unprepare(fpc->ipg_clk);
}
static int fsl_pwm_calculate_default_ps(struct fsl_pwm_chip *fpc,
@@ -363,7 +387,7 @@ static int fsl_pwm_init(struct fsl_pwm_chip *fpc)
{
int ret;
- ret = clk_prepare_enable(fpc->clk[FSL_PWM_CLK_SYS]);
+ ret = clk_prepare_enable(fpc->ipg_clk);
if (ret)
return ret;
@@ -371,7 +395,7 @@ static int fsl_pwm_init(struct fsl_pwm_chip *fpc)
regmap_write(fpc->regmap, FTM_OUTINIT, 0x00);
regmap_write(fpc->regmap, FTM_OUTMASK, 0xFF);
- clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
+ clk_disable_unprepare(fpc->ipg_clk);
return 0;
}
@@ -408,6 +432,7 @@ static int fsl_pwm_probe(struct platform_device *pdev)
mutex_init(&fpc->lock);
+ fpc->soc = of_device_get_match_data(&pdev->dev);
fpc->chip.dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -441,6 +466,15 @@ static int fsl_pwm_probe(struct platform_device *pdev)
if (IS_ERR(fpc->clk[FSL_PWM_CLK_CNTEN]))
return PTR_ERR(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ /*
+ * ipg_clk is the interface clock for the IP. If not provided, use the
+ * ftm_sys clock as the default.
+ */
+ fpc->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fpc->ipg_clk))
+ fpc->ipg_clk = fpc->clk[FSL_PWM_CLK_SYS];
+
+
fpc->chip.ops = &fsl_pwm_ops;
fpc->chip.of_xlate = of_pwm_xlate_with_flags;
fpc->chip.of_pwm_n_cells = 3;
@@ -480,7 +514,7 @@ static int fsl_pwm_suspend(struct device *dev)
if (!test_bit(PWMF_REQUESTED, &pwm->flags))
continue;
- clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
+ clk_disable_unprepare(fpc->ipg_clk);
if (!pwm_is_enabled(pwm))
continue;
@@ -503,7 +537,7 @@ static int fsl_pwm_resume(struct device *dev)
if (!test_bit(PWMF_REQUESTED, &pwm->flags))
continue;
- clk_prepare_enable(fpc->clk[FSL_PWM_CLK_SYS]);
+ clk_prepare_enable(fpc->ipg_clk);
if (!pwm_is_enabled(pwm))
continue;
@@ -524,8 +558,17 @@ static const struct dev_pm_ops fsl_pwm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(fsl_pwm_suspend, fsl_pwm_resume)
};
+static const struct fsl_ftm_soc vf610_ftm_pwm = {
+ .has_enable_bits = false,
+};
+
+static const struct fsl_ftm_soc imx8qm_ftm_pwm = {
+ .has_enable_bits = true,
+};
+
static const struct of_device_id fsl_pwm_dt_ids[] = {
- { .compatible = "fsl,vf610-ftm-pwm", },
+ { .compatible = "fsl,vf610-ftm-pwm", .data = &vf610_ftm_pwm },
+ { .compatible = "fsl,imx8qm-ftm-pwm", .data = &imx8qm_ftm_pwm },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_pwm_dt_ids);
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 08cbe8120588..1d5242c9cde0 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* simple driver for PWM (Pulse Width Modulator) controller
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Derived from pxa PWM driver by eric miao <eric.miao@marvell.com>
*/
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index 328c124773b2..eb6674ce995f 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -57,6 +57,7 @@ static const char * const mtk_pwm_clk_name[MTK_CLK_MAX] = {
struct mtk_pwm_platform_data {
unsigned int num_pwms;
bool pwm45_fixup;
+ bool has_clks;
};
/**
@@ -86,6 +87,9 @@ static int mtk_pwm_clk_enable(struct pwm_chip *chip, struct pwm_device *pwm)
struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
int ret;
+ if (!pc->soc->has_clks)
+ return 0;
+
ret = clk_prepare_enable(pc->clks[MTK_CLK_TOP]);
if (ret < 0)
return ret;
@@ -112,6 +116,9 @@ static void mtk_pwm_clk_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
+ if (!pc->soc->has_clks)
+ return;
+
clk_disable_unprepare(pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]);
clk_disable_unprepare(pc->clks[MTK_CLK_MAIN]);
clk_disable_unprepare(pc->clks[MTK_CLK_TOP]);
@@ -239,7 +246,7 @@ static int mtk_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
- for (i = 0; i < data->num_pwms + 2; i++) {
+ for (i = 0; i < data->num_pwms + 2 && pc->soc->has_clks; i++) {
pc->clks[i] = devm_clk_get(&pdev->dev, mtk_pwm_clk_name[i]);
if (IS_ERR(pc->clks[i])) {
dev_err(&pdev->dev, "clock: %s fail: %ld\n",
@@ -274,22 +281,32 @@ static int mtk_pwm_remove(struct platform_device *pdev)
static const struct mtk_pwm_platform_data mt2712_pwm_data = {
.num_pwms = 8,
.pwm45_fixup = false,
+ .has_clks = true,
};
static const struct mtk_pwm_platform_data mt7622_pwm_data = {
.num_pwms = 6,
.pwm45_fixup = false,
+ .has_clks = true,
};
static const struct mtk_pwm_platform_data mt7623_pwm_data = {
.num_pwms = 5,
.pwm45_fixup = true,
+ .has_clks = true,
+};
+
+static const struct mtk_pwm_platform_data mt7628_pwm_data = {
+ .num_pwms = 4,
+ .pwm45_fixup = true,
+ .has_clks = false,
};
static const struct of_device_id mtk_pwm_of_match[] = {
{ .compatible = "mediatek,mt2712-pwm", .data = &mt2712_pwm_data },
{ .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data },
{ .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
+ { .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data },
{ },
};
MODULE_DEVICE_TABLE(of, mtk_pwm_of_match);
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 822860b4801a..c1ed641b3e26 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -458,7 +458,6 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
struct meson_pwm_channel *channels)
{
struct device *dev = meson->chip.dev;
- struct device_node *np = dev->of_node;
struct clk_init_data init;
unsigned int i;
char name[255];
@@ -467,7 +466,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
for (i = 0; i < meson->chip.npwm; i++) {
struct meson_pwm_channel *channel = &channels[i];
- snprintf(name, sizeof(name), "%pOF#mux%u", np, i);
+ snprintf(name, sizeof(name), "%s#mux%u", dev_name(dev), i);
init.name = name;
init.ops = &clk_mux_ops;
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index a6017ad9926c..04c0f6b95c1a 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -1,12 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/clk.h>
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 665da3c8fbce..f45798679e3c 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -264,8 +264,9 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
timer_pdata = dev_get_platdata(&timer_pdev->dev);
if (!timer_pdata) {
- dev_err(&pdev->dev, "dmtimer pdata structure NULL\n");
- ret = -EINVAL;
+ dev_dbg(&pdev->dev,
+ "dmtimer pdata structure NULL, deferring probe\n");
+ ret = -EPROBE_DEFER;
goto put;
}
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 7c13e2505080..0059b24cfdc3 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -217,10 +217,8 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
static int stm32_pwm_lp_remove(struct platform_device *pdev)
{
struct stm32_pwm_lp *priv = platform_get_drvdata(pdev);
- unsigned int i;
- for (i = 0; i < priv->chip.npwm; i++)
- pwm_disable(&priv->chip.pwms[i]);
+ pwm_disable(&priv->chip.pwms[0]);
return pwmchip_remove(&priv->chip);
}
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 4c22cb395040..f7b8a86fa5c5 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -33,10 +33,6 @@
#define TBCTL 0x00
#define TBPRD 0x0A
-#define TBCTL_RUN_MASK (BIT(15) | BIT(14))
-#define TBCTL_STOP_NEXT 0
-#define TBCTL_STOP_ON_CYCLE BIT(14)
-#define TBCTL_FREE_RUN (BIT(15) | BIT(14))
#define TBCTL_PRDLD_MASK BIT(3)
#define TBCTL_PRDLD_SHDW 0
#define TBCTL_PRDLD_IMDT BIT(3)
@@ -360,7 +356,7 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Channels polarity can be configured from action qualifier module */
configure_polarity(pc, pwm->hwpwm);
- /* Enable TBCLK before enabling PWM device */
+ /* Enable TBCLK */
ret = clk_enable(pc->tbclk);
if (ret) {
dev_err(chip->dev, "Failed to enable TBCLK for %s: %d\n",
@@ -368,9 +364,6 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return ret;
}
- /* Enable time counter for free_run */
- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN);
-
return 0;
}
@@ -388,6 +381,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
aqcsfrc_mask = AQCSFRC_CSFA_MASK;
}
+ /* Update shadow register first before modifying active register */
+ ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
/*
* Changes to immediate action on Action Qualifier. This puts
* Action Qualifier control on PWM output from next TBCLK
@@ -400,9 +395,6 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Disabling TBCLK on PWM disable */
clk_disable(pc->tbclk);
- /* Stop Time base counter */
- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT);
-
/* Disable clock on PWM disable */
pm_runtime_put_sync(chip->dev);
}
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index a8cb8d2f2abb..cbe467ff1aba 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -1006,7 +1006,6 @@ out_free:
static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
{
struct mport_cdev_priv *priv;
- struct mport_dev *md;
struct rio_async_tx_wait w_param;
struct mport_dma_req *req;
dma_cookie_t cookie;
@@ -1016,7 +1015,6 @@ static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
int ret;
priv = (struct mport_cdev_priv *)filp->private_data;
- md = priv->md;
if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
return -EFAULT;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 5dbccf5f3037..329cdd33ed62 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -180,9 +180,9 @@ config REGULATOR_BCM590XX
BCM590xx PMUs. This will enable support for the software
controllable LDO/Switching regulators.
-config REGULATOR_BD71837
+config REGULATOR_BD718XX
tristate "ROHM BD71837 Power Regulator"
- depends on MFD_BD71837
+ depends on MFD_ROHM_BD718XX
help
This driver supports voltage regulators on ROHM BD71837 PMIC.
This will enable support for the software controllable buck
@@ -633,12 +633,12 @@ config REGULATOR_PCF50633
on PCF50633
config REGULATOR_PFUZE100
- tristate "Freescale PFUZE100/200/3000 regulator driver"
+ tristate "Freescale PFUZE100/200/3000/3001 regulator driver"
depends on I2C
select REGMAP_I2C
help
Say y here to support the regulators found on the Freescale
- PFUZE100/200/3000 PMIC.
+ PFUZE100/200/3000/3001 PMIC.
config REGULATOR_PV88060
tristate "Powerventure Semiconductor PV88060 regulator"
@@ -682,6 +682,15 @@ config REGULATOR_QCOM_RPM
Qualcomm RPM as a module. The module will be named
"qcom_rpm-regulator".
+config REGULATOR_QCOM_RPMH
+ tristate "Qualcomm Technologies, Inc. RPMh regulator driver"
+ depends on QCOM_RPMH || COMPILE_TEST
+ help
+ This driver supports control of PMIC regulators via the RPMh hardware
+ block found on Qualcomm Technologies Inc. SoCs. RPMh regulator
+ control allows for voting on regulator state between multiple
+ processors within the SoC.
+
config REGULATOR_QCOM_SMD_RPM
tristate "Qualcomm SMD based RPM regulator driver"
depends on QCOM_SMD_RPM
@@ -950,6 +959,14 @@ config REGULATOR_TWL4030
This driver supports the voltage regulators provided by
this family of companion chips.
+config REGULATOR_UNIPHIER
+ tristate "UniPhier regulator driver"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && MFD_SYSCON
+ default ARCH_UNIPHIER
+ help
+ Support for regulators implemented on Socionext UniPhier SoCs.
+
config REGULATOR_VCTRL
tristate "Voltage controlled regulators"
depends on OF
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index bd818ceb7c72..801d9a34a203 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
-obj-$(CONFIG_REGULATOR_BD71837) += bd71837-regulator.o
+obj-$(CONFIG_REGULATOR_BD718XX) += bd71837-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
@@ -78,6 +78,7 @@ obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
+obj-$(CONFIG_REGULATOR_QCOM_RPMH) += qcom-rpmh-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
@@ -118,6 +119,7 @@ obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o
obj-$(CONFIG_REGULATOR_TPS65132) += tps65132-regulator.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o
+obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o
obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o
obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index f6d6a4ad9e8a..e976d073f28d 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -36,6 +36,8 @@ struct arizona_ldo1 {
struct regulator_consumer_supply supply;
struct regulator_init_data init_data;
+
+ struct gpio_desc *ena_gpiod;
};
static int arizona_ldo1_hc_list_voltage(struct regulator_dev *rdev,
@@ -253,12 +255,17 @@ static int arizona_ldo1_common_init(struct platform_device *pdev,
}
}
- /* We assume that high output = regulator off */
- config.ena_gpiod = devm_gpiod_get_optional(&pdev->dev, "wlf,ldoena",
- GPIOD_OUT_HIGH);
+ /* We assume that high output = regulator off
+ * Don't use devm, since we need to get against the parent device
+ * so clean up would happen at the wrong time
+ */
+ config.ena_gpiod = gpiod_get_optional(parent_dev, "wlf,ldoena",
+ GPIOD_OUT_LOW);
if (IS_ERR(config.ena_gpiod))
return PTR_ERR(config.ena_gpiod);
+ ldo1->ena_gpiod = config.ena_gpiod;
+
if (pdata->init_data)
config.init_data = pdata->init_data;
else
@@ -276,6 +283,9 @@ static int arizona_ldo1_common_init(struct platform_device *pdev,
of_node_put(config.of_node);
if (IS_ERR(ldo1->regulator)) {
+ if (config.ena_gpiod)
+ gpiod_put(config.ena_gpiod);
+
ret = PTR_ERR(ldo1->regulator);
dev_err(&pdev->dev, "Failed to register LDO1 supply: %d\n",
ret);
@@ -334,8 +344,19 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
return ret;
}
+static int arizona_ldo1_remove(struct platform_device *pdev)
+{
+ struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev);
+
+ if (ldo1->ena_gpiod)
+ gpiod_put(ldo1->ena_gpiod);
+
+ return 0;
+}
+
static struct platform_driver arizona_ldo1_driver = {
.probe = arizona_ldo1_probe,
+ .remove = arizona_ldo1_remove,
.driver = {
.name = "arizona-ldo1",
},
diff --git a/drivers/regulator/bd71837-regulator.c b/drivers/regulator/bd71837-regulator.c
index 6eae4d0432a2..0f8ac8dec3e1 100644
--- a/drivers/regulator/bd71837-regulator.c
+++ b/drivers/regulator/bd71837-regulator.c
@@ -2,19 +2,18 @@
// Copyright (C) 2018 ROHM Semiconductors
// bd71837-regulator.c ROHM BD71837MWV regulator driver
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd718x7.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/mfd/bd71837.h>
#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
struct bd71837_pmic {
struct regulator_desc descs[BD71837_REGULATOR_CNT];
@@ -39,7 +38,7 @@ static int bd71837_buck1234_set_ramp_delay(struct regulator_dev *rdev,
int id = rdev->desc->id;
unsigned int ramp_value = BUCK_RAMPRATE_10P00MV;
- dev_dbg(&(pmic->pdev->dev), "Buck[%d] Set Ramp = %d\n", id + 1,
+ dev_dbg(&pmic->pdev->dev, "Buck[%d] Set Ramp = %d\n", id + 1,
ramp_delay);
switch (ramp_delay) {
case 1 ... 1250:
@@ -73,14 +72,10 @@ static int bd71837_buck1234_set_ramp_delay(struct regulator_dev *rdev,
static int bd71837_set_voltage_sel_restricted(struct regulator_dev *rdev,
unsigned int sel)
{
- int ret;
-
- ret = regulator_is_enabled_regmap(rdev);
- if (!ret)
- ret = regulator_set_voltage_sel_regmap(rdev, sel);
- else if (ret == 1)
- ret = -EBUSY;
- return ret;
+ if (regulator_is_enabled_regmap(rdev))
+ return -EBUSY;
+
+ return regulator_set_voltage_sel_regmap(rdev, sel);
}
static struct regulator_ops bd71837_ldo_regulator_ops = {
@@ -195,7 +190,7 @@ static const struct regulator_linear_range bd71837_ldo1_voltage_ranges[] = {
* LDO2
* 0.8 or 0.9V
*/
-const unsigned int ldo_2_volts[] = {
+static const unsigned int ldo_2_volts[] = {
900000, 800000
};
@@ -495,7 +490,6 @@ struct reg_init {
static int bd71837_probe(struct platform_device *pdev)
{
struct bd71837_pmic *pmic;
- struct bd71837_board *pdata;
struct regulator_config config = { 0 };
struct reg_init pmic_regulator_inits[] = {
{
@@ -548,8 +542,7 @@ static int bd71837_probe(struct platform_device *pdev)
int i, err;
- pmic = devm_kzalloc(&pdev->dev, sizeof(struct bd71837_pmic),
- GFP_KERNEL);
+ pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
return -ENOMEM;
@@ -564,7 +557,6 @@ static int bd71837_probe(struct platform_device *pdev)
goto err;
}
platform_set_drvdata(pdev, pmic);
- pdata = dev_get_platdata(pmic->mfd->dev);
/* Register LOCK release */
err = regmap_update_bits(pmic->mfd->regmap, BD71837_REG_REGLOCK,
@@ -573,8 +565,8 @@ static int bd71837_probe(struct platform_device *pdev)
dev_err(&pmic->pdev->dev, "Failed to unlock PMIC (%d)\n", err);
goto err;
} else {
- dev_dbg(&pmic->pdev->dev, "%s: Unlocked lock register 0x%x\n",
- __func__, BD71837_REG_REGLOCK);
+ dev_dbg(&pmic->pdev->dev, "Unlocked lock register 0x%x\n",
+ BD71837_REG_REGLOCK);
}
for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) {
@@ -584,9 +576,6 @@ static int bd71837_probe(struct platform_device *pdev)
desc = &pmic->descs[i];
- if (pdata)
- config.init_data = pdata->init_data[i];
-
config.dev = pdev->dev.parent;
config.driver_data = pmic;
config.regmap = pmic->mfd->regmap;
@@ -619,8 +608,6 @@ static int bd71837_probe(struct platform_device *pdev)
pmic->rdev[i] = rdev;
}
- return 0;
-
err:
return err;
}
@@ -628,7 +615,6 @@ err:
static struct platform_driver bd71837_regulator = {
.driver = {
.name = "bd71837-pmic",
- .owner = THIS_MODULE,
},
.probe = bd71837_probe,
};
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index be574eb444eb..274c5ed7cd73 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -30,6 +30,7 @@ struct bd9571mwv_reg {
/* DDR Backup Power */
u8 bkup_mode_cnt_keepon; /* from "rohm,ddr-backup-power" */
u8 bkup_mode_cnt_saved;
+ bool bkup_mode_enabled;
/* Power switch type */
bool rstbmode_level;
@@ -171,13 +172,60 @@ static int bd9571mwv_bkup_mode_write(struct bd9571mwv *bd, unsigned int mode)
return 0;
}
+static ssize_t backup_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off");
+}
+
+static ssize_t backup_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
+ unsigned int mode;
+ int ret;
+
+ if (!count)
+ return 0;
+
+ ret = kstrtobool(buf, &bdreg->bkup_mode_enabled);
+ if (ret)
+ return ret;
+
+ if (!bdreg->rstbmode_level)
+ return count;
+
+ /*
+ * Configure DDR Backup Mode, to change the role of the accessory power
+ * switch from a power switch to a wake-up switch, or vice versa
+ */
+ ret = bd9571mwv_bkup_mode_read(bdreg->bd, &mode);
+ if (ret)
+ return ret;
+
+ mode &= ~BD9571MWV_BKUP_MODE_CNT_KEEPON_MASK;
+ if (bdreg->bkup_mode_enabled)
+ mode |= bdreg->bkup_mode_cnt_keepon;
+
+ ret = bd9571mwv_bkup_mode_write(bdreg->bd, mode);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(backup_mode);
+
static int bd9571mwv_suspend(struct device *dev)
{
struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
unsigned int mode;
int ret;
- if (!device_may_wakeup(dev))
+ if (!bdreg->bkup_mode_enabled)
return 0;
/* Save DDR Backup Mode */
@@ -204,7 +252,7 @@ static int bd9571mwv_resume(struct device *dev)
{
struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
- if (!device_may_wakeup(dev))
+ if (!bdreg->bkup_mode_enabled)
return 0;
/* Restore DDR Backup Mode */
@@ -215,9 +263,15 @@ static const struct dev_pm_ops bd9571mwv_pm = {
SET_SYSTEM_SLEEP_PM_OPS(bd9571mwv_suspend, bd9571mwv_resume)
};
+static int bd9571mwv_regulator_remove(struct platform_device *pdev)
+{
+ device_remove_file(&pdev->dev, &dev_attr_backup_mode);
+ return 0;
+}
#define DEV_PM_OPS &bd9571mwv_pm
#else
#define DEV_PM_OPS NULL
+#define bd9571mwv_regulator_remove NULL
#endif /* CONFIG_PM_SLEEP */
static int bd9571mwv_regulator_probe(struct platform_device *pdev)
@@ -270,14 +324,21 @@ static int bd9571mwv_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
+#ifdef CONFIG_PM_SLEEP
if (bdreg->bkup_mode_cnt_keepon) {
- device_set_wakeup_capable(&pdev->dev, true);
+ int ret;
+
/*
- * Wakeup is enabled by default in pulse mode, but needs
+ * Backup mode is enabled by default in pulse mode, but needs
* explicit user setup in level mode.
*/
- device_set_wakeup_enable(&pdev->dev, bdreg->rstbmode_pulse);
+ bdreg->bkup_mode_enabled = bdreg->rstbmode_pulse;
+
+ ret = device_create_file(&pdev->dev, &dev_attr_backup_mode);
+ if (ret)
+ return ret;
}
+#endif /* CONFIG_PM_SLEEP */
return 0;
}
@@ -294,6 +355,7 @@ static struct platform_driver bd9571mwv_regulator_driver = {
.pm = DEV_PM_OPS,
},
.probe = bd9571mwv_regulator_probe,
+ .remove = bd9571mwv_regulator_remove,
.id_table = bd9571mwv_regulator_id_table,
};
module_platform_driver(bd9571mwv_regulator_driver);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 6ed568b96c0e..bb1324f93143 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1740,6 +1740,8 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
rdev->use_count = 0;
}
+ device_link_add(dev, &rdev->dev, DL_FLAG_STATELESS);
+
return regulator;
}
@@ -1829,9 +1831,21 @@ static void _regulator_put(struct regulator *regulator)
debugfs_remove_recursive(regulator->debugfs);
- /* remove any sysfs entries */
- if (regulator->dev)
+ if (regulator->dev) {
+ int count = 0;
+ struct regulator *r;
+
+ list_for_each_entry(r, &rdev->consumer_list, list)
+ if (r->dev == regulator->dev)
+ count++;
+
+ if (count == 1)
+ device_link_remove(regulator->dev, &rdev->dev);
+
+ /* remove any sysfs entries */
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
+ }
+
regulator_lock(rdev);
list_del(&regulator->list);
@@ -4441,7 +4455,7 @@ void regulator_unregister(struct regulator_dev *rdev)
EXPORT_SYMBOL_GPL(regulator_unregister);
#ifdef CONFIG_SUSPEND
-static int _regulator_suspend_late(struct device *dev, void *data)
+static int _regulator_suspend(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
suspend_state_t *state = data;
@@ -4455,20 +4469,20 @@ static int _regulator_suspend_late(struct device *dev, void *data)
}
/**
- * regulator_suspend_late - prepare regulators for system wide suspend
+ * regulator_suspend - prepare regulators for system wide suspend
* @state: system suspend state
*
* Configure each regulator with it's suspend operating parameters for state.
*/
-static int regulator_suspend_late(struct device *dev)
+static int regulator_suspend(struct device *dev)
{
suspend_state_t state = pm_suspend_target_state;
return class_for_each_device(&regulator_class, NULL, &state,
- _regulator_suspend_late);
+ _regulator_suspend);
}
-static int _regulator_resume_early(struct device *dev, void *data)
+static int _regulator_resume(struct device *dev, void *data)
{
int ret = 0;
struct regulator_dev *rdev = dev_to_rdev(dev);
@@ -4481,35 +4495,35 @@ static int _regulator_resume_early(struct device *dev, void *data)
regulator_lock(rdev);
- if (rdev->desc->ops->resume_early &&
+ if (rdev->desc->ops->resume &&
(rstate->enabled == ENABLE_IN_SUSPEND ||
rstate->enabled == DISABLE_IN_SUSPEND))
- ret = rdev->desc->ops->resume_early(rdev);
+ ret = rdev->desc->ops->resume(rdev);
regulator_unlock(rdev);
return ret;
}
-static int regulator_resume_early(struct device *dev)
+static int regulator_resume(struct device *dev)
{
suspend_state_t state = pm_suspend_target_state;
return class_for_each_device(&regulator_class, NULL, &state,
- _regulator_resume_early);
+ _regulator_resume);
}
#else /* !CONFIG_SUSPEND */
-#define regulator_suspend_late NULL
-#define regulator_resume_early NULL
+#define regulator_suspend NULL
+#define regulator_resume NULL
#endif /* !CONFIG_SUSPEND */
#ifdef CONFIG_PM
static const struct dev_pm_ops __maybe_unused regulator_pm_ops = {
- .suspend_late = regulator_suspend_late,
- .resume_early = regulator_resume_early,
+ .suspend = regulator_suspend,
+ .resume = regulator_resume,
};
#endif
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
index bd910fe123d9..2131457937b7 100644
--- a/drivers/regulator/cpcap-regulator.c
+++ b/drivers/regulator/cpcap-regulator.c
@@ -271,6 +271,29 @@ static struct regulator_ops cpcap_regulator_ops = {
};
static const unsigned int unknown_val_tbl[] = { 0, };
+static const unsigned int sw2_sw4_val_tbl[] = { 612500, 625000, 637500,
+ 650000, 662500, 675000,
+ 687500, 700000, 712500,
+ 725000, 737500, 750000,
+ 762500, 775000, 787500,
+ 800000, 812500, 825000,
+ 837500, 850000, 862500,
+ 875000, 887500, 900000,
+ 912500, 925000, 937500,
+ 950000, 962500, 975000,
+ 987500, 1000000, 1012500,
+ 1025000, 1037500, 1050000,
+ 1062500, 1075000, 1087500,
+ 1100000, 1112500, 1125000,
+ 1137500, 1150000, 1162500,
+ 1175000, 1187500, 1200000,
+ 1212500, 1225000, 1237500,
+ 1250000, 1262500, 1275000,
+ 1287500, 1300000, 1312500,
+ 1325000, 1337500, 1350000,
+ 1362500, 1375000, 1387500,
+ 1400000, 1412500, 1425000,
+ 1437500, 1450000, 1462500, };
static const unsigned int sw5_val_tbl[] = { 0, 5050000, };
static const unsigned int vcam_val_tbl[] = { 2600000, 2700000, 2800000,
2900000, };
@@ -389,6 +412,82 @@ static struct cpcap_regulator omap4_regulators[] = {
{ /* sentinel */ },
};
+static struct cpcap_regulator xoom_regulators[] = {
+ CPCAP_REG(SW1, CPCAP_REG_S1C1, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW1_SEL, unknown_val_tbl,
+ 0, 0, 0, 0, 0, 0),
+ CPCAP_REG(SW2, CPCAP_REG_S2C1, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW2_SEL, sw2_sw4_val_tbl,
+ 0xf00, 0x7f, 0, 0x800, 0, 120),
+ CPCAP_REG(SW3, CPCAP_REG_S3C, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW3_SEL, unknown_val_tbl,
+ 0, 0, 0, 0, 0, 0),
+ CPCAP_REG(SW4, CPCAP_REG_S4C1, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW4_SEL, sw2_sw4_val_tbl,
+ 0xf00, 0x7f, 0, 0x900, 0, 100),
+ CPCAP_REG(SW5, CPCAP_REG_S5C, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW5_SEL, sw5_val_tbl,
+ 0x2a, 0, 0, 0x22, 0, 0),
+ CPCAP_REG(SW6, CPCAP_REG_S6C, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW6_SEL, unknown_val_tbl,
+ 0, 0, 0, 0, 0, 0),
+ CPCAP_REG(VCAM, CPCAP_REG_VCAMC, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_VCAM_SEL, vcam_val_tbl,
+ 0x87, 0x30, 4, 0x7, 0, 420),
+ CPCAP_REG(VCSI, CPCAP_REG_VCSIC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VCSI_SEL, vcsi_val_tbl,
+ 0x47, 0x10, 4, 0x7, 0, 350),
+ CPCAP_REG(VDAC, CPCAP_REG_VDACC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VDAC_SEL, vdac_val_tbl,
+ 0x87, 0x30, 4, 0x3, 0, 420),
+ CPCAP_REG(VDIG, CPCAP_REG_VDIGC, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_VDIG_SEL, vdig_val_tbl,
+ 0x87, 0x30, 4, 0x5, 0, 420),
+ CPCAP_REG(VFUSE, CPCAP_REG_VFUSEC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VFUSE_SEL, vfuse_val_tbl,
+ 0x80, 0xf, 0, 0x80, 0, 420),
+ CPCAP_REG(VHVIO, CPCAP_REG_VHVIOC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VHVIO_SEL, vhvio_val_tbl,
+ 0x17, 0, 0, 0x2, 0, 0),
+ CPCAP_REG(VSDIO, CPCAP_REG_VSDIOC, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_VSDIO_SEL, vsdio_val_tbl,
+ 0x87, 0x38, 3, 0x2, 0, 420),
+ CPCAP_REG(VPLL, CPCAP_REG_VPLLC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VPLL_SEL, vpll_val_tbl,
+ 0x43, 0x18, 3, 0x1, 0, 420),
+ CPCAP_REG(VRF1, CPCAP_REG_VRF1C, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VRF1_SEL, vrf1_val_tbl,
+ 0xac, 0x2, 1, 0xc, 0, 10),
+ CPCAP_REG(VRF2, CPCAP_REG_VRF2C, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VRF2_SEL, vrf2_val_tbl,
+ 0x23, 0x8, 3, 0x3, 0, 10),
+ CPCAP_REG(VRFREF, CPCAP_REG_VRFREFC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VRFREF_SEL, vrfref_val_tbl,
+ 0x23, 0x8, 3, 0x3, 0, 420),
+ CPCAP_REG(VWLAN1, CPCAP_REG_VWLAN1C, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VWLAN1_SEL, vwlan1_val_tbl,
+ 0x47, 0x10, 4, 0x5, 0, 420),
+ CPCAP_REG(VWLAN2, CPCAP_REG_VWLAN2C, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VWLAN2_SEL, vwlan2_val_tbl,
+ 0x20c, 0xc0, 6, 0x8, 0, 420),
+ CPCAP_REG(VSIM, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
+ 0xffff, vsim_val_tbl,
+ 0x23, 0x8, 3, 0x3, 0, 420),
+ CPCAP_REG(VSIMCARD, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
+ 0xffff, vsimcard_val_tbl,
+ 0x1e80, 0x8, 3, 0x1e00, 0, 420),
+ CPCAP_REG(VVIB, CPCAP_REG_VVIBC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VVIB_SEL, vvib_val_tbl,
+ 0x1, 0xc, 2, 0, 0x1, 500),
+ CPCAP_REG(VUSB, CPCAP_REG_VUSBC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VUSB_SEL, vusb_val_tbl,
+ 0x11c, 0x40, 6, 0xc, 0, 0),
+ CPCAP_REG(VAUDIO, CPCAP_REG_VAUDIOC, CPCAP_REG_ASSIGN4,
+ CPCAP_BIT_VAUDIO_SEL, vaudio_val_tbl,
+ 0x16, 0x1, 0, 0x4, 0, 0),
+ { /* sentinel */ },
+};
+
static const struct of_device_id cpcap_regulator_id_table[] = {
{
.compatible = "motorola,cpcap-regulator",
@@ -397,6 +496,10 @@ static const struct of_device_id cpcap_regulator_id_table[] = {
.compatible = "motorola,mapphone-cpcap-regulator",
.data = omap4_regulators,
},
+ {
+ .compatible = "motorola,xoom-cpcap-regulator",
+ .data = xoom_regulators,
+ },
{},
};
MODULE_DEVICE_TABLE(of, cpcap_regulator_id_table);
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 2df26f36c687..8cbcd2a3eb20 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -98,7 +98,7 @@ struct da9063_regulator_info {
struct da9063_dev_model {
const struct da9063_regulator_info *regulator_info;
unsigned n_regulators;
- unsigned dev_model;
+ enum da9063_type type;
};
/* Single regulator settings */
@@ -530,6 +530,32 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
DA9063_BMEM_ILIM_MASK),
},
{
+ DA9063_LDO(DA9063, LDO3, 900, 20, 3440),
+ .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO3_SEL),
+ .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO3_LIM),
+ },
+ {
+ DA9063_LDO(DA9063, LDO7, 900, 50, 3600),
+ .suspend = BFIELD(DA9063_REG_LDO7_CONT, DA9063_VLDO7_SEL),
+ .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO7_LIM),
+ },
+ {
+ DA9063_LDO(DA9063, LDO8, 900, 50, 3600),
+ .suspend = BFIELD(DA9063_REG_LDO8_CONT, DA9063_VLDO8_SEL),
+ .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO8_LIM),
+ },
+ {
+ DA9063_LDO(DA9063, LDO9, 950, 50, 3600),
+ .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL),
+ },
+ {
+ DA9063_LDO(DA9063, LDO11, 900, 50, 3600),
+ .suspend = BFIELD(DA9063_REG_LDO11_CONT, DA9063_VLDO11_SEL),
+ .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO11_LIM),
+ },
+
+ /* The following LDOs are present only on DA9063, not on DA9063L */
+ {
DA9063_LDO(DA9063, LDO1, 600, 20, 1860),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO1_SEL),
},
@@ -538,11 +564,6 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO2_SEL),
},
{
- DA9063_LDO(DA9063, LDO3, 900, 20, 3440),
- .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO3_SEL),
- .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO3_LIM),
- },
- {
DA9063_LDO(DA9063, LDO4, 900, 20, 3440),
.suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VLDO4_SEL),
.oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO4_LIM),
@@ -555,29 +576,11 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
DA9063_LDO(DA9063, LDO6, 900, 50, 3600),
.suspend = BFIELD(DA9063_REG_LDO6_CONT, DA9063_VLDO6_SEL),
},
- {
- DA9063_LDO(DA9063, LDO7, 900, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO7_CONT, DA9063_VLDO7_SEL),
- .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO7_LIM),
- },
- {
- DA9063_LDO(DA9063, LDO8, 900, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO8_CONT, DA9063_VLDO8_SEL),
- .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO8_LIM),
- },
- {
- DA9063_LDO(DA9063, LDO9, 950, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL),
- },
+
{
DA9063_LDO(DA9063, LDO10, 900, 50, 3600),
.suspend = BFIELD(DA9063_REG_LDO10_CONT, DA9063_VLDO10_SEL),
},
- {
- DA9063_LDO(DA9063, LDO11, 900, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO11_CONT, DA9063_VLDO11_SEL),
- .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO11_LIM),
- },
};
/* Link chip model with regulators info table */
@@ -585,7 +588,12 @@ static struct da9063_dev_model regulators_models[] = {
{
.regulator_info = da9063_regulator_info,
.n_regulators = ARRAY_SIZE(da9063_regulator_info),
- .dev_model = PMIC_DA9063,
+ .type = PMIC_TYPE_DA9063,
+ },
+ {
+ .regulator_info = da9063_regulator_info,
+ .n_regulators = ARRAY_SIZE(da9063_regulator_info) - 6,
+ .type = PMIC_TYPE_DA9063L,
},
{ }
};
@@ -641,28 +649,34 @@ static struct of_regulator_match da9063_matches[] = {
[DA9063_ID_BPERI] = { .name = "bperi", },
[DA9063_ID_BCORES_MERGED] = { .name = "bcores-merged" },
[DA9063_ID_BMEM_BIO_MERGED] = { .name = "bmem-bio-merged", },
+ [DA9063_ID_LDO3] = { .name = "ldo3", },
+ [DA9063_ID_LDO7] = { .name = "ldo7", },
+ [DA9063_ID_LDO8] = { .name = "ldo8", },
+ [DA9063_ID_LDO9] = { .name = "ldo9", },
+ [DA9063_ID_LDO11] = { .name = "ldo11", },
+ /* The following LDOs are present only on DA9063, not on DA9063L */
[DA9063_ID_LDO1] = { .name = "ldo1", },
[DA9063_ID_LDO2] = { .name = "ldo2", },
- [DA9063_ID_LDO3] = { .name = "ldo3", },
[DA9063_ID_LDO4] = { .name = "ldo4", },
[DA9063_ID_LDO5] = { .name = "ldo5", },
[DA9063_ID_LDO6] = { .name = "ldo6", },
- [DA9063_ID_LDO7] = { .name = "ldo7", },
- [DA9063_ID_LDO8] = { .name = "ldo8", },
- [DA9063_ID_LDO9] = { .name = "ldo9", },
[DA9063_ID_LDO10] = { .name = "ldo10", },
- [DA9063_ID_LDO11] = { .name = "ldo11", },
};
static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
struct platform_device *pdev,
struct of_regulator_match **da9063_reg_matches)
{
+ struct da9063 *da9063 = dev_get_drvdata(pdev->dev.parent);
struct da9063_regulators_pdata *pdata;
struct da9063_regulator_data *rdata;
struct device_node *node;
+ int da9063_matches_len = ARRAY_SIZE(da9063_matches);
int i, n, num;
+ if (da9063->type == PMIC_TYPE_DA9063L)
+ da9063_matches_len -= 6;
+
node = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
if (!node) {
dev_err(&pdev->dev, "Regulators device node not found\n");
@@ -670,7 +684,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
}
num = of_regulator_match(&pdev->dev, node, da9063_matches,
- ARRAY_SIZE(da9063_matches));
+ da9063_matches_len);
of_node_put(node);
if (num < 0) {
dev_err(&pdev->dev, "Failed to match regulators\n");
@@ -689,7 +703,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
pdata->n_regulators = num;
n = 0;
- for (i = 0; i < ARRAY_SIZE(da9063_matches); i++) {
+ for (i = 0; i < da9063_matches_len; i++) {
if (!da9063_matches[i].init_data)
continue;
@@ -741,12 +755,12 @@ static int da9063_regulator_probe(struct platform_device *pdev)
/* Find regulators set for particular device model */
for (model = regulators_models; model->regulator_info; model++) {
- if (model->dev_model == da9063->model)
+ if (model->type == da9063->type)
break;
}
if (!model->regulator_info) {
dev_err(&pdev->dev, "Chip model not recognised (%u)\n",
- da9063->model);
+ da9063->type);
return -ENODEV;
}
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index 0db288ce319c..bc7f4751bf9c 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -1,19 +1,9 @@
-/*
- * max14577.c - Regulator driver for the Maxim 14577/77836
- *
- * Copyright (C) 2013,2014 Samsung Electronics
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max14577.c - Regulator driver for the Maxim 14577/77836
+//
+// Copyright (C) 2013,2014 Samsung Electronics
+// Krzysztof Kozlowski <krzk@kernel.org>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/regulator/max77686-regulator.c b/drivers/regulator/max77686-regulator.c
index c301f3733475..bee060937f56 100644
--- a/drivers/regulator/max77686-regulator.c
+++ b/drivers/regulator/max77686-regulator.c
@@ -1,26 +1,12 @@
-/*
- * max77686.c - Regulator driver for the Maxim 77686
- *
- * Copyright (C) 2012 Samsung Electronics
- * Chiwoong Byun <woong.byun@samsung.com>
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77686.c - Regulator driver for the Maxim 77686
+//
+// Copyright (C) 2012 Samsung Electronics
+// Chiwoong Byun <woong.byun@samsung.com>
+// Jonghwa Lee <jonghwa3.lee@samsung.com>
+//
+// This driver is based on max8997.c
#include <linux/kernel.h>
#include <linux/bug.h>
diff --git a/drivers/regulator/max77693-regulator.c b/drivers/regulator/max77693-regulator.c
index e7000e777292..077ecbbfdf76 100644
--- a/drivers/regulator/max77693-regulator.c
+++ b/drivers/regulator/max77693-regulator.c
@@ -1,26 +1,12 @@
-/*
- * max77693.c - Regulator driver for the Maxim 77693 and 77843
- *
- * Copyright (C) 2013-2015 Samsung Electronics
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max77686.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77693.c - Regulator driver for the Maxim 77693 and 77843
+//
+// Copyright (C) 2013-2015 Samsung Electronics
+// Jonghwa Lee <jonghwa3.lee@samsung.com>
+// Krzysztof Kozlowski <krzk@kernel.org>
+//
+// This driver is based on max77686.c
#include <linux/err.h>
#include <linux/slab.h>
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index b6261903818c..c30cf5c9f2de 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -1,25 +1,15 @@
-/*
- * max77802.c - Regulator driver for the Maxim 77802
- *
- * Copyright (C) 2013-2014 Google, Inc
- * Simon Glass <sjg@chromium.org>
- *
- * Copyright (C) 2012 Samsung Electronics
- * Chiwoong Byun <woong.byun@samsung.com>
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77802.c - Regulator driver for the Maxim 77802
+//
+// Copyright (C) 2013-2014 Google, Inc
+// Simon Glass <sjg@chromium.org>
+//
+// Copyright (C) 2012 Samsung Electronics
+// Chiwoong Byun <woong.byun@samsung.com>
+// Jonghwa Lee <jonghwa3.lee@samsung.com>
+//
+// This driver is based on max8997.c
#include <linux/kernel.h>
#include <linux/bug.h>
diff --git a/drivers/regulator/max8997-regulator.c b/drivers/regulator/max8997-regulator.c
index a8ea30ee18a6..ad0c806b0737 100644
--- a/drivers/regulator/max8997-regulator.c
+++ b/drivers/regulator/max8997-regulator.c
@@ -1,25 +1,11 @@
-/*
- * max8997.c - Regulator driver for the Maxim 8997/8966
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8998.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8997.c - Regulator driver for the Maxim 8997/8966
+//
+// Copyright (C) 2011 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
+//
+// This driver is based on max8998.c
#include <linux/bug.h>
#include <linux/err.h>
@@ -165,8 +151,7 @@ static int max8997_list_voltage(struct regulator_dev *rdev,
int rid = rdev_get_id(rdev);
int val;
- if (rid >= ARRAY_SIZE(reg_voltage_map) ||
- rid < 0)
+ if (rid < 0 || rid >= ARRAY_SIZE(reg_voltage_map))
return -EINVAL;
desc = reg_voltage_map[rid];
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 6b9f262ebbb0..271bb736f3f5 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -1,24 +1,10 @@
-/*
- * max8998.c - Voltage regulator driver for the Maxim 8998
- *
- * Copyright (C) 2009-2010 Samsung Electronics
- * Kyungmin Park <kyungmin.park@samsung.com>
- * Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8998.c - Voltage regulator driver for the Maxim 8998
+//
+// Copyright (C) 2009-2010 Samsung Electronics
+// Kyungmin Park <kyungmin.park@samsung.com>
+// Marek Szyprowski <m.szyprowski@samsung.com>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 8d9dbcc775ea..31c3a236120a 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -17,6 +17,8 @@
#include <linux/slab.h>
#include <linux/regmap.h>
+#define PFUZE_FLAG_DISABLE_SW BIT(1)
+
#define PFUZE_NUMREGS 128
#define PFUZE100_VOL_OFFSET 0
#define PFUZE100_STANDBY_OFFSET 1
@@ -44,16 +46,18 @@
#define PFUZE100_VGEN5VOL 0x70
#define PFUZE100_VGEN6VOL 0x71
-enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3 };
+enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3, PFUZE3001 = 0x31, };
struct pfuze_regulator {
struct regulator_desc desc;
unsigned char stby_reg;
unsigned char stby_mask;
+ bool sw_reg;
};
struct pfuze_chip {
int chip_id;
+ int flags;
struct regmap *regmap;
struct device *dev;
struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR];
@@ -92,6 +96,7 @@ static const struct i2c_device_id pfuze_device_id[] = {
{.name = "pfuze100", .driver_data = PFUZE100},
{.name = "pfuze200", .driver_data = PFUZE200},
{.name = "pfuze3000", .driver_data = PFUZE3000},
+ {.name = "pfuze3001", .driver_data = PFUZE3001},
{ }
};
MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
@@ -100,6 +105,7 @@ static const struct of_device_id pfuze_dt_ids[] = {
{ .compatible = "fsl,pfuze100", .data = (void *)PFUZE100},
{ .compatible = "fsl,pfuze200", .data = (void *)PFUZE200},
{ .compatible = "fsl,pfuze3000", .data = (void *)PFUZE3000},
+ { .compatible = "fsl,pfuze3001", .data = (void *)PFUZE3001},
{ }
};
MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
@@ -108,10 +114,28 @@ static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
struct pfuze_chip *pfuze100 = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
+ bool reg_has_ramp_delay;
unsigned int ramp_bits;
int ret;
- if (id < PFUZE100_SWBST) {
+ switch (pfuze100->chip_id) {
+ case PFUZE3001:
+ /* no dynamic voltage scaling for PF3001 */
+ reg_has_ramp_delay = false;
+ break;
+ case PFUZE3000:
+ reg_has_ramp_delay = (id < PFUZE3000_SWBST);
+ break;
+ case PFUZE200:
+ reg_has_ramp_delay = (id < PFUZE200_SWBST);
+ break;
+ case PFUZE100:
+ default:
+ reg_has_ramp_delay = (id < PFUZE100_SWBST);
+ break;
+ }
+
+ if (reg_has_ramp_delay) {
ramp_delay = 12500 / ramp_delay;
ramp_bits = (ramp_delay >> 1) - (ramp_delay >> 3);
ret = regmap_update_bits(pfuze100->regmap,
@@ -119,8 +143,9 @@ static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
0xc0, ramp_bits << 6);
if (ret < 0)
dev_err(pfuze100->dev, "ramp failed, err %d\n", ret);
- } else
+ } else {
ret = -EACCES;
+ }
return ret;
}
@@ -142,6 +167,14 @@ static const struct regulator_ops pfuze100_fixed_regulator_ops = {
};
static const struct regulator_ops pfuze100_sw_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = pfuze100_set_ramp_delay,
+};
+
+static const struct regulator_ops pfuze100_sw_disable_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -192,13 +225,11 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
.vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
.vsel_mask = 0x3f, \
.enable_reg = (base) + PFUZE100_MODE_OFFSET, \
- .enable_val = 0xc, \
- .disable_val = 0x0, \
.enable_mask = 0xf, \
- .enable_time = 500, \
}, \
.stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
.stby_mask = 0x3f, \
+ .sw_reg = true, \
}
#define PFUZE100_SWB_REG(_chip, _name, base, mask, voltages) \
@@ -361,6 +392,19 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
};
+static struct pfuze_regulator pfuze3001_regulators[] = {
+ PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+ PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
+ PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+ PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE3001, VLDO2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+ PFUZE3000_VCC_REG(PFUZE3001, VCCSD, PFUZE100_VGEN3VOL, 2850000, 3300000, 150000),
+ PFUZE3000_VCC_REG(PFUZE3001, V33, PFUZE100_VGEN4VOL, 2850000, 3300000, 150000),
+ PFUZE100_VGEN_REG(PFUZE3001, VLDO3, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE3001, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+};
+
#ifdef CONFIG_OF
/* PFUZE100 */
static struct of_regulator_match pfuze100_matches[] = {
@@ -418,6 +462,21 @@ static struct of_regulator_match pfuze3000_matches[] = {
{ .name = "vldo4", },
};
+/* PFUZE3001 */
+static struct of_regulator_match pfuze3001_matches[] = {
+
+ { .name = "sw1", },
+ { .name = "sw2", },
+ { .name = "sw3", },
+ { .name = "vsnvs", },
+ { .name = "vldo1", },
+ { .name = "vldo2", },
+ { .name = "vccsd", },
+ { .name = "v33", },
+ { .name = "vldo3", },
+ { .name = "vldo4", },
+};
+
static struct of_regulator_match *pfuze_matches;
static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
@@ -430,6 +489,9 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
if (!np)
return -EINVAL;
+ if (of_property_read_bool(np, "fsl,pfuze-support-disable-sw"))
+ chip->flags |= PFUZE_FLAG_DISABLE_SW;
+
parent = of_get_child_by_name(np, "regulators");
if (!parent) {
dev_err(dev, "regulators node not found\n");
@@ -437,6 +499,11 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
}
switch (chip->chip_id) {
+ case PFUZE3001:
+ pfuze_matches = pfuze3001_matches;
+ ret = of_regulator_match(dev, parent, pfuze3001_matches,
+ ARRAY_SIZE(pfuze3001_matches));
+ break;
case PFUZE3000:
pfuze_matches = pfuze3000_matches;
ret = of_regulator_match(dev, parent, pfuze3000_matches,
@@ -508,7 +575,8 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
*/
dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
} else if ((value & 0x0f) != pfuze_chip->chip_id &&
- (value & 0xf0) >> 4 != pfuze_chip->chip_id) {
+ (value & 0xf0) >> 4 != pfuze_chip->chip_id &&
+ (value != pfuze_chip->chip_id)) {
/* device id NOT match with your setting */
dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
return -ENODEV;
@@ -588,6 +656,13 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
/* use the right regulators after identify the right device */
switch (pfuze_chip->chip_id) {
+ case PFUZE3001:
+ pfuze_chip->pfuze_regulators = pfuze3001_regulators;
+ regulator_num = ARRAY_SIZE(pfuze3001_regulators);
+ sw_check_start = PFUZE3001_SW2;
+ sw_check_end = PFUZE3001_SW2;
+ sw_hi = 1 << 3;
+ break;
case PFUZE3000:
pfuze_chip->pfuze_regulators = pfuze3000_regulators;
regulator_num = ARRAY_SIZE(pfuze3000_regulators);
@@ -611,7 +686,8 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
}
dev_info(&client->dev, "pfuze%s found.\n",
(pfuze_chip->chip_id == PFUZE100) ? "100" :
- ((pfuze_chip->chip_id == PFUZE200) ? "200" : "3000"));
+ (((pfuze_chip->chip_id == PFUZE200) ? "200" :
+ ((pfuze_chip->chip_id == PFUZE3000) ? "3000" : "3001"))));
memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators,
sizeof(pfuze_chip->regulator_descs));
@@ -636,7 +712,8 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
if (i >= sw_check_start && i <= sw_check_end) {
regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
if (val & sw_hi) {
- if (pfuze_chip->chip_id == PFUZE3000) {
+ if (pfuze_chip->chip_id == PFUZE3000 ||
+ pfuze_chip->chip_id == PFUZE3001) {
desc->volt_table = pfuze3000_sw2hi;
desc->n_voltages = ARRAY_SIZE(pfuze3000_sw2hi);
} else {
@@ -647,6 +724,21 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
}
}
+ /*
+ * Allow SW regulators to turn off. Checking it trough a flag is
+ * a workaround to keep the backward compatibility with existing
+ * old dtb's which may relay on the fact that we didn't disable
+ * the switched regulator till yet.
+ */
+ if (pfuze_chip->flags & PFUZE_FLAG_DISABLE_SW) {
+ if (pfuze_chip->regulator_descs[i].sw_reg) {
+ desc->ops = &pfuze100_sw_disable_regulator_ops;
+ desc->enable_val = 0x8;
+ desc->disable_val = 0x0;
+ desc->enable_time = 500;
+ }
+ }
+
config.dev = &client->dev;
config.init_data = init_data;
config.driver_data = pfuze_chip;
@@ -675,5 +767,5 @@ static struct i2c_driver pfuze_driver = {
module_i2c_driver(pfuze_driver);
MODULE_AUTHOR("Robin Gong <b38343@freescale.com>");
-MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/200/3000 PMIC");
+MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/200/3000/3001 PMIC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
new file mode 100644
index 000000000000..9f27daebd8c8
--- /dev/null
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+/**
+ * enum rpmh_regulator_type - supported RPMh accelerator types
+ * %VRM: RPMh VRM accelerator which supports voting on enable, voltage,
+ * and mode of LDO, SMPS, and BOB type PMIC regulators.
+ * %XOB: RPMh XOB accelerator which supports voting on the enable state
+ * of PMIC regulators.
+ */
+enum rpmh_regulator_type {
+ VRM,
+ XOB,
+};
+
+#define RPMH_REGULATOR_REG_VRM_VOLTAGE 0x0
+#define RPMH_REGULATOR_REG_ENABLE 0x4
+#define RPMH_REGULATOR_REG_VRM_MODE 0x8
+
+#define PMIC4_LDO_MODE_RETENTION 4
+#define PMIC4_LDO_MODE_LPM 5
+#define PMIC4_LDO_MODE_HPM 7
+
+#define PMIC4_SMPS_MODE_RETENTION 4
+#define PMIC4_SMPS_MODE_PFM 5
+#define PMIC4_SMPS_MODE_AUTO 6
+#define PMIC4_SMPS_MODE_PWM 7
+
+#define PMIC4_BOB_MODE_PASS 0
+#define PMIC4_BOB_MODE_PFM 1
+#define PMIC4_BOB_MODE_AUTO 2
+#define PMIC4_BOB_MODE_PWM 3
+
+/**
+ * struct rpmh_vreg_hw_data - RPMh regulator hardware configurations
+ * @regulator_type: RPMh accelerator type used to manage this
+ * regulator
+ * @ops: Pointer to regulator ops callback structure
+ * @voltage_range: The single range of voltages supported by this
+ * PMIC regulator type
+ * @n_voltages: The number of unique voltage set points defined
+ * by voltage_range
+ * @hpm_min_load_uA: Minimum load current in microamps that requires
+ * high power mode (HPM) operation. This is used
+ * for LDO hardware type regulators only.
+ * @pmic_mode_map: Array indexed by regulator framework mode
+ * containing PMIC hardware modes. Must be large
+ * enough to index all framework modes supported
+ * by this regulator hardware type.
+ * @of_map_mode: Maps an RPMH_REGULATOR_MODE_* mode value defined
+ * in device tree to a regulator framework mode
+ */
+struct rpmh_vreg_hw_data {
+ enum rpmh_regulator_type regulator_type;
+ const struct regulator_ops *ops;
+ const struct regulator_linear_range voltage_range;
+ int n_voltages;
+ int hpm_min_load_uA;
+ const int *pmic_mode_map;
+ unsigned int (*of_map_mode)(unsigned int mode);
+};
+
+/**
+ * struct rpmh_vreg - individual RPMh regulator data structure encapsulating a
+ * single regulator device
+ * @dev: Device pointer for the top-level PMIC RPMh
+ * regulator parent device. This is used as a
+ * handle in RPMh write requests.
+ * @addr: Base address of the regulator resource within
+ * an RPMh accelerator
+ * @rdesc: Regulator descriptor
+ * @hw_data: PMIC regulator configuration data for this RPMh
+ * regulator
+ * @always_wait_for_ack: Boolean flag indicating if a request must always
+ * wait for an ACK from RPMh before continuing even
+ * if it corresponds to a strictly lower power
+ * state (e.g. enabled --> disabled).
+ * @enabled: Flag indicating if the regulator is enabled or
+ * not
+ * @bypassed: Boolean indicating if the regulator is in
+ * bypass (pass-through) mode or not. This is
+ * only used by BOB rpmh-regulator resources.
+ * @voltage_selector: Selector used for get_voltage_sel() and
+ * set_voltage_sel() callbacks
+ * @mode: RPMh VRM regulator current framework mode
+ */
+struct rpmh_vreg {
+ struct device *dev;
+ u32 addr;
+ struct regulator_desc rdesc;
+ const struct rpmh_vreg_hw_data *hw_data;
+ bool always_wait_for_ack;
+
+ int enabled;
+ bool bypassed;
+ int voltage_selector;
+ unsigned int mode;
+};
+
+/**
+ * struct rpmh_vreg_init_data - initialization data for an RPMh regulator
+ * @name: Name for the regulator which also corresponds
+ * to the device tree subnode name of the regulator
+ * @resource_name: RPMh regulator resource name format string.
+ * This must include exactly one field: '%s' which
+ * is filled at run-time with the PMIC ID provided
+ * by device tree property qcom,pmic-id. Example:
+ * "ldo%s1" for RPMh resource "ldoa1".
+ * @supply_name: Parent supply regulator name
+ * @hw_data: Configuration data for this PMIC regulator type
+ */
+struct rpmh_vreg_init_data {
+ const char *name;
+ const char *resource_name;
+ const char *supply_name;
+ const struct rpmh_vreg_hw_data *hw_data;
+};
+
+/**
+ * rpmh_regulator_send_request() - send the request to RPMh
+ * @vreg: Pointer to the RPMh regulator
+ * @cmd: Pointer to the RPMh command to send
+ * @wait_for_ack: Boolean indicating if execution must wait until the
+ * request has been acknowledged as complete
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_send_request(struct rpmh_vreg *vreg,
+ struct tcs_cmd *cmd, bool wait_for_ack)
+{
+ int ret;
+
+ if (wait_for_ack || vreg->always_wait_for_ack)
+ ret = rpmh_write(vreg->dev, RPMH_ACTIVE_ONLY_STATE, cmd, 1);
+ else
+ ret = rpmh_write_async(vreg->dev, RPMH_ACTIVE_ONLY_STATE, cmd,
+ 1);
+
+ return ret;
+}
+
+static int _rpmh_regulator_vrm_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector, bool wait_for_ack)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+ struct tcs_cmd cmd = {
+ .addr = vreg->addr + RPMH_REGULATOR_REG_VRM_VOLTAGE,
+ };
+ int ret;
+
+ /* VRM voltage control register is set with voltage in millivolts. */
+ cmd.data = DIV_ROUND_UP(regulator_list_voltage_linear_range(rdev,
+ selector), 1000);
+
+ ret = rpmh_regulator_send_request(vreg, &cmd, wait_for_ack);
+ if (!ret)
+ vreg->voltage_selector = selector;
+
+ return ret;
+}
+
+static int rpmh_regulator_vrm_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+ if (vreg->enabled == -EINVAL) {
+ /*
+ * Cache the voltage and send it later when the regulator is
+ * enabled or disabled.
+ */
+ vreg->voltage_selector = selector;
+ return 0;
+ }
+
+ return _rpmh_regulator_vrm_set_voltage_sel(rdev, selector,
+ selector > vreg->voltage_selector);
+}
+
+static int rpmh_regulator_vrm_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->voltage_selector;
+}
+
+static int rpmh_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->enabled;
+}
+
+static int rpmh_regulator_set_enable_state(struct regulator_dev *rdev,
+ bool enable)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+ struct tcs_cmd cmd = {
+ .addr = vreg->addr + RPMH_REGULATOR_REG_ENABLE,
+ .data = enable,
+ };
+ int ret;
+
+ if (vreg->enabled == -EINVAL &&
+ vreg->voltage_selector != -ENOTRECOVERABLE) {
+ ret = _rpmh_regulator_vrm_set_voltage_sel(rdev,
+ vreg->voltage_selector, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = rpmh_regulator_send_request(vreg, &cmd, enable);
+ if (!ret)
+ vreg->enabled = enable;
+
+ return ret;
+}
+
+static int rpmh_regulator_enable(struct regulator_dev *rdev)
+{
+ return rpmh_regulator_set_enable_state(rdev, true);
+}
+
+static int rpmh_regulator_disable(struct regulator_dev *rdev)
+{
+ return rpmh_regulator_set_enable_state(rdev, false);
+}
+
+static int rpmh_regulator_vrm_set_mode_bypass(struct rpmh_vreg *vreg,
+ unsigned int mode, bool bypassed)
+{
+ struct tcs_cmd cmd = {
+ .addr = vreg->addr + RPMH_REGULATOR_REG_VRM_MODE,
+ };
+ int pmic_mode;
+
+ if (mode > REGULATOR_MODE_STANDBY)
+ return -EINVAL;
+
+ pmic_mode = vreg->hw_data->pmic_mode_map[mode];
+ if (pmic_mode < 0)
+ return pmic_mode;
+
+ if (bypassed)
+ cmd.data = PMIC4_BOB_MODE_PASS;
+ else
+ cmd.data = pmic_mode;
+
+ return rpmh_regulator_send_request(vreg, &cmd, true);
+}
+
+static int rpmh_regulator_vrm_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (mode == vreg->mode)
+ return 0;
+
+ ret = rpmh_regulator_vrm_set_mode_bypass(vreg, mode, vreg->bypassed);
+ if (!ret)
+ vreg->mode = mode;
+
+ return ret;
+}
+
+static unsigned int rpmh_regulator_vrm_get_mode(struct regulator_dev *rdev)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->mode;
+}
+
+/**
+ * rpmh_regulator_vrm_set_load() - set the regulator mode based upon the load
+ * current requested
+ * @rdev: Regulator device pointer for the rpmh-regulator
+ * @load_uA: Aggregated load current in microamps
+ *
+ * This function is used in the regulator_ops for VRM type RPMh regulator
+ * devices.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_vrm_set_load(struct regulator_dev *rdev, int load_uA)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+ unsigned int mode;
+
+ if (load_uA >= vreg->hw_data->hpm_min_load_uA)
+ mode = REGULATOR_MODE_NORMAL;
+ else
+ mode = REGULATOR_MODE_IDLE;
+
+ return rpmh_regulator_vrm_set_mode(rdev, mode);
+}
+
+static int rpmh_regulator_vrm_set_bypass(struct regulator_dev *rdev,
+ bool enable)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (vreg->bypassed == enable)
+ return 0;
+
+ ret = rpmh_regulator_vrm_set_mode_bypass(vreg, vreg->mode, enable);
+ if (!ret)
+ vreg->bypassed = enable;
+
+ return ret;
+}
+
+static int rpmh_regulator_vrm_get_bypass(struct regulator_dev *rdev,
+ bool *enable)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+ *enable = vreg->bypassed;
+
+ return 0;
+}
+
+static const struct regulator_ops rpmh_regulator_vrm_ops = {
+ .enable = rpmh_regulator_enable,
+ .disable = rpmh_regulator_disable,
+ .is_enabled = rpmh_regulator_is_enabled,
+ .set_voltage_sel = rpmh_regulator_vrm_set_voltage_sel,
+ .get_voltage_sel = rpmh_regulator_vrm_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_mode = rpmh_regulator_vrm_set_mode,
+ .get_mode = rpmh_regulator_vrm_get_mode,
+};
+
+static const struct regulator_ops rpmh_regulator_vrm_drms_ops = {
+ .enable = rpmh_regulator_enable,
+ .disable = rpmh_regulator_disable,
+ .is_enabled = rpmh_regulator_is_enabled,
+ .set_voltage_sel = rpmh_regulator_vrm_set_voltage_sel,
+ .get_voltage_sel = rpmh_regulator_vrm_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_mode = rpmh_regulator_vrm_set_mode,
+ .get_mode = rpmh_regulator_vrm_get_mode,
+ .set_load = rpmh_regulator_vrm_set_load,
+};
+
+static const struct regulator_ops rpmh_regulator_vrm_bypass_ops = {
+ .enable = rpmh_regulator_enable,
+ .disable = rpmh_regulator_disable,
+ .is_enabled = rpmh_regulator_is_enabled,
+ .set_voltage_sel = rpmh_regulator_vrm_set_voltage_sel,
+ .get_voltage_sel = rpmh_regulator_vrm_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_mode = rpmh_regulator_vrm_set_mode,
+ .get_mode = rpmh_regulator_vrm_get_mode,
+ .set_bypass = rpmh_regulator_vrm_set_bypass,
+ .get_bypass = rpmh_regulator_vrm_get_bypass,
+};
+
+static const struct regulator_ops rpmh_regulator_xob_ops = {
+ .enable = rpmh_regulator_enable,
+ .disable = rpmh_regulator_disable,
+ .is_enabled = rpmh_regulator_is_enabled,
+};
+
+/**
+ * rpmh_regulator_init_vreg() - initialize all attributes of an rpmh-regulator
+ * vreg: Pointer to the individual rpmh-regulator resource
+ * dev: Pointer to the top level rpmh-regulator PMIC device
+ * node: Pointer to the individual rpmh-regulator resource
+ * device node
+ * pmic_id: String used to identify the top level rpmh-regulator
+ * PMIC device on the board
+ * pmic_rpmh_data: Pointer to a null-terminated array of rpmh-regulator
+ * resources defined for the top level PMIC device
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg, struct device *dev,
+ struct device_node *node, const char *pmic_id,
+ const struct rpmh_vreg_init_data *pmic_rpmh_data)
+{
+ struct regulator_config reg_config = {};
+ char rpmh_resource_name[20] = "";
+ const struct rpmh_vreg_init_data *rpmh_data;
+ struct regulator_init_data *init_data;
+ struct regulator_dev *rdev;
+ int ret;
+
+ vreg->dev = dev;
+
+ for (rpmh_data = pmic_rpmh_data; rpmh_data->name; rpmh_data++)
+ if (!strcmp(rpmh_data->name, node->name))
+ break;
+
+ if (!rpmh_data->name) {
+ dev_err(dev, "Unknown regulator %s\n", node->name);
+ return -EINVAL;
+ }
+
+ scnprintf(rpmh_resource_name, sizeof(rpmh_resource_name),
+ rpmh_data->resource_name, pmic_id);
+
+ vreg->addr = cmd_db_read_addr(rpmh_resource_name);
+ if (!vreg->addr) {
+ dev_err(dev, "%s: could not find RPMh address for resource %s\n",
+ node->name, rpmh_resource_name);
+ return -ENODEV;
+ }
+
+ vreg->rdesc.name = rpmh_data->name;
+ vreg->rdesc.supply_name = rpmh_data->supply_name;
+ vreg->hw_data = rpmh_data->hw_data;
+
+ vreg->enabled = -EINVAL;
+ vreg->voltage_selector = -ENOTRECOVERABLE;
+ vreg->mode = REGULATOR_MODE_INVALID;
+
+ if (rpmh_data->hw_data->n_voltages) {
+ vreg->rdesc.linear_ranges = &rpmh_data->hw_data->voltage_range;
+ vreg->rdesc.n_linear_ranges = 1;
+ vreg->rdesc.n_voltages = rpmh_data->hw_data->n_voltages;
+ }
+
+ vreg->always_wait_for_ack = of_property_read_bool(node,
+ "qcom,always-wait-for-ack");
+
+ vreg->rdesc.owner = THIS_MODULE;
+ vreg->rdesc.type = REGULATOR_VOLTAGE;
+ vreg->rdesc.ops = vreg->hw_data->ops;
+ vreg->rdesc.of_map_mode = vreg->hw_data->of_map_mode;
+
+ init_data = of_get_regulator_init_data(dev, node, &vreg->rdesc);
+ if (!init_data)
+ return -ENOMEM;
+
+ if (rpmh_data->hw_data->regulator_type == XOB &&
+ init_data->constraints.min_uV &&
+ init_data->constraints.min_uV == init_data->constraints.max_uV) {
+ vreg->rdesc.fixed_uV = init_data->constraints.min_uV;
+ vreg->rdesc.n_voltages = 1;
+ }
+
+ reg_config.dev = dev;
+ reg_config.init_data = init_data;
+ reg_config.of_node = node;
+ reg_config.driver_data = vreg;
+
+ rdev = devm_regulator_register(dev, &vreg->rdesc, &reg_config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(dev, "%s: devm_regulator_register() failed, ret=%d\n",
+ node->name, ret);
+ return ret;
+ }
+
+ dev_dbg(dev, "%s regulator registered for RPMh resource %s @ 0x%05X\n",
+ node->name, rpmh_resource_name, vreg->addr);
+
+ return 0;
+}
+
+static const int pmic_mode_map_pmic4_ldo[REGULATOR_MODE_STANDBY + 1] = {
+ [REGULATOR_MODE_INVALID] = -EINVAL,
+ [REGULATOR_MODE_STANDBY] = PMIC4_LDO_MODE_RETENTION,
+ [REGULATOR_MODE_IDLE] = PMIC4_LDO_MODE_LPM,
+ [REGULATOR_MODE_NORMAL] = PMIC4_LDO_MODE_HPM,
+ [REGULATOR_MODE_FAST] = -EINVAL,
+};
+
+static unsigned int rpmh_regulator_pmic4_ldo_of_map_mode(unsigned int rpmh_mode)
+{
+ unsigned int mode;
+
+ switch (rpmh_mode) {
+ case RPMH_REGULATOR_MODE_HPM:
+ mode = REGULATOR_MODE_NORMAL;
+ break;
+ case RPMH_REGULATOR_MODE_LPM:
+ mode = REGULATOR_MODE_IDLE;
+ break;
+ case RPMH_REGULATOR_MODE_RET:
+ mode = REGULATOR_MODE_STANDBY;
+ break;
+ default:
+ mode = REGULATOR_MODE_INVALID;
+ }
+
+ return mode;
+}
+
+static const int pmic_mode_map_pmic4_smps[REGULATOR_MODE_STANDBY + 1] = {
+ [REGULATOR_MODE_INVALID] = -EINVAL,
+ [REGULATOR_MODE_STANDBY] = PMIC4_SMPS_MODE_RETENTION,
+ [REGULATOR_MODE_IDLE] = PMIC4_SMPS_MODE_PFM,
+ [REGULATOR_MODE_NORMAL] = PMIC4_SMPS_MODE_AUTO,
+ [REGULATOR_MODE_FAST] = PMIC4_SMPS_MODE_PWM,
+};
+
+static unsigned int
+rpmh_regulator_pmic4_smps_of_map_mode(unsigned int rpmh_mode)
+{
+ unsigned int mode;
+
+ switch (rpmh_mode) {
+ case RPMH_REGULATOR_MODE_HPM:
+ mode = REGULATOR_MODE_FAST;
+ break;
+ case RPMH_REGULATOR_MODE_AUTO:
+ mode = REGULATOR_MODE_NORMAL;
+ break;
+ case RPMH_REGULATOR_MODE_LPM:
+ mode = REGULATOR_MODE_IDLE;
+ break;
+ case RPMH_REGULATOR_MODE_RET:
+ mode = REGULATOR_MODE_STANDBY;
+ break;
+ default:
+ mode = REGULATOR_MODE_INVALID;
+ }
+
+ return mode;
+}
+
+static const int pmic_mode_map_pmic4_bob[REGULATOR_MODE_STANDBY + 1] = {
+ [REGULATOR_MODE_INVALID] = -EINVAL,
+ [REGULATOR_MODE_STANDBY] = -EINVAL,
+ [REGULATOR_MODE_IDLE] = PMIC4_BOB_MODE_PFM,
+ [REGULATOR_MODE_NORMAL] = PMIC4_BOB_MODE_AUTO,
+ [REGULATOR_MODE_FAST] = PMIC4_BOB_MODE_PWM,
+};
+
+static unsigned int rpmh_regulator_pmic4_bob_of_map_mode(unsigned int rpmh_mode)
+{
+ unsigned int mode;
+
+ switch (rpmh_mode) {
+ case RPMH_REGULATOR_MODE_HPM:
+ mode = REGULATOR_MODE_FAST;
+ break;
+ case RPMH_REGULATOR_MODE_AUTO:
+ mode = REGULATOR_MODE_NORMAL;
+ break;
+ case RPMH_REGULATOR_MODE_LPM:
+ mode = REGULATOR_MODE_IDLE;
+ break;
+ default:
+ mode = REGULATOR_MODE_INVALID;
+ }
+
+ return mode;
+}
+
+static const struct rpmh_vreg_hw_data pmic4_pldo = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_drms_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(1664000, 0, 255, 8000),
+ .n_voltages = 256,
+ .hpm_min_load_uA = 10000,
+ .pmic_mode_map = pmic_mode_map_pmic4_ldo,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic4_pldo_lv = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_drms_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(1256000, 0, 127, 8000),
+ .n_voltages = 128,
+ .hpm_min_load_uA = 10000,
+ .pmic_mode_map = pmic_mode_map_pmic4_ldo,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic4_nldo = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_drms_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
+ .n_voltages = 128,
+ .hpm_min_load_uA = 30000,
+ .pmic_mode_map = pmic_mode_map_pmic4_ldo,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic4_hfsmps3 = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ .n_voltages = 216,
+ .pmic_mode_map = pmic_mode_map_pmic4_smps,
+ .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic4_ftsmps426 = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000),
+ .n_voltages = 259,
+ .pmic_mode_map = pmic_mode_map_pmic4_smps,
+ .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic4_bob = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_bypass_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(1824000, 0, 83, 32000),
+ .n_voltages = 84,
+ .pmic_mode_map = pmic_mode_map_pmic4_bob,
+ .of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic4_lvs = {
+ .regulator_type = XOB,
+ .ops = &rpmh_regulator_xob_ops,
+ /* LVS hardware does not support voltage or mode configuration. */
+};
+
+#define RPMH_VREG(_name, _resource_name, _hw_data, _supply_name) \
+{ \
+ .name = _name, \
+ .resource_name = _resource_name, \
+ .hw_data = _hw_data, \
+ .supply_name = _supply_name, \
+}
+
+static const struct rpmh_vreg_init_data pm8998_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic4_ftsmps426, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic4_ftsmps426, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic4_hfsmps3, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic4_hfsmps3, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic4_hfsmps3, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic4_ftsmps426, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic4_ftsmps426, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic4_ftsmps426, "vdd-s8"),
+ RPMH_VREG("smps9", "smp%s9", &pmic4_ftsmps426, "vdd-s9"),
+ RPMH_VREG("smps10", "smp%s10", &pmic4_ftsmps426, "vdd-s10"),
+ RPMH_VREG("smps11", "smp%s11", &pmic4_ftsmps426, "vdd-s11"),
+ RPMH_VREG("smps12", "smp%s12", &pmic4_ftsmps426, "vdd-s12"),
+ RPMH_VREG("smps13", "smp%s13", &pmic4_ftsmps426, "vdd-s13"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic4_nldo, "vdd-l1-l27"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic4_nldo, "vdd-l2-l8-l17"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic4_nldo, "vdd-l3-l11"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic4_nldo, "vdd-l4-l5"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic4_nldo, "vdd-l4-l5"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic4_pldo, "vdd-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic4_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic4_nldo, "vdd-l2-l8-l17"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic4_pldo, "vdd-l9"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic4_pldo, "vdd-l10-l23-l25"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic4_nldo, "vdd-l3-l11"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic4_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic4_pldo, "vdd-l13-l19-l21"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic4_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic4_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic4_pldo, "vdd-l16-l28"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic4_nldo, "vdd-l2-l8-l17"),
+ RPMH_VREG("ldo18", "ldo%s18", &pmic4_pldo, "vdd-l18-l22"),
+ RPMH_VREG("ldo19", "ldo%s19", &pmic4_pldo, "vdd-l13-l19-l21"),
+ RPMH_VREG("ldo20", "ldo%s20", &pmic4_pldo, "vdd-l20-l24"),
+ RPMH_VREG("ldo21", "ldo%s21", &pmic4_pldo, "vdd-l13-l19-l21"),
+ RPMH_VREG("ldo22", "ldo%s22", &pmic4_pldo, "vdd-l18-l22"),
+ RPMH_VREG("ldo23", "ldo%s23", &pmic4_pldo, "vdd-l10-l23-l25"),
+ RPMH_VREG("ldo24", "ldo%s24", &pmic4_pldo, "vdd-l20-l24"),
+ RPMH_VREG("ldo25", "ldo%s25", &pmic4_pldo, "vdd-l10-l23-l25"),
+ RPMH_VREG("ldo26", "ldo%s26", &pmic4_nldo, "vdd-l26"),
+ RPMH_VREG("ldo27", "ldo%s27", &pmic4_nldo, "vdd-l1-l27"),
+ RPMH_VREG("ldo28", "ldo%s28", &pmic4_pldo, "vdd-l16-l28"),
+ RPMH_VREG("lvs1", "vs%s1", &pmic4_lvs, "vin-lvs-1-2"),
+ RPMH_VREG("lvs2", "vs%s2", &pmic4_lvs, "vin-lvs-1-2"),
+ {},
+};
+
+static const struct rpmh_vreg_init_data pmi8998_vreg_data[] = {
+ RPMH_VREG("bob", "bob%s1", &pmic4_bob, "vdd-bob"),
+ {},
+};
+
+static const struct rpmh_vreg_init_data pm8005_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic4_ftsmps426, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic4_ftsmps426, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic4_ftsmps426, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic4_ftsmps426, "vdd-s4"),
+ {},
+};
+
+static int rpmh_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct rpmh_vreg_init_data *vreg_data;
+ struct device_node *node;
+ struct rpmh_vreg *vreg;
+ const char *pmic_id;
+ int ret;
+
+ vreg_data = of_device_get_match_data(dev);
+ if (!vreg_data)
+ return -ENODEV;
+
+ ret = of_property_read_string(dev->of_node, "qcom,pmic-id", &pmic_id);
+ if (ret < 0) {
+ dev_err(dev, "qcom,pmic-id missing in DT node\n");
+ return ret;
+ }
+
+ for_each_available_child_of_node(dev->of_node, node) {
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg) {
+ of_node_put(node);
+ return -ENOMEM;
+ }
+
+ ret = rpmh_regulator_init_vreg(vreg, dev, node, pmic_id,
+ vreg_data);
+ if (ret < 0) {
+ of_node_put(node);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id rpmh_regulator_match_table[] = {
+ {
+ .compatible = "qcom,pm8998-rpmh-regulators",
+ .data = pm8998_vreg_data,
+ },
+ {
+ .compatible = "qcom,pmi8998-rpmh-regulators",
+ .data = pmi8998_vreg_data,
+ },
+ {
+ .compatible = "qcom,pm8005-rpmh-regulators",
+ .data = pm8005_vreg_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rpmh_regulator_match_table);
+
+static struct platform_driver rpmh_regulator_driver = {
+ .driver = {
+ .name = "qcom-rpmh-regulator",
+ .of_match_table = of_match_ptr(rpmh_regulator_match_table),
+ },
+ .probe = rpmh_regulator_probe,
+};
+module_platform_driver(rpmh_regulator_driver);
+
+MODULE_DESCRIPTION("Qualcomm RPMh regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 9817f1a75342..53a61fb65642 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -1060,7 +1060,7 @@ static irqreturn_t spmi_regulator_vs_ocp_isr(int irq, void *data)
#define SAW3_AVS_CTL_TGGL_MASK 0x8000000
#define SAW3_AVS_CTL_CLEAR_MASK 0x7efc00
-static struct regmap *saw_regmap = NULL;
+static struct regmap *saw_regmap;
static void spmi_saw_set_vdd(void *data)
{
@@ -1728,7 +1728,7 @@ static const struct spmi_regulator_data pmi8994_regulators[] = {
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
{ "l1", 0x4000, "vdd_l1", },
- { }
+ { }
};
static const struct of_device_id qcom_spmi_regulator_match[] = {
@@ -1752,7 +1752,8 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
const char *name;
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
- struct device_node *syscon;
+ struct device_node *syscon, *reg_node;
+ struct property *reg_prop;
int ret, lenp;
struct list_head *vreg_list;
@@ -1774,16 +1775,19 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
syscon = of_parse_phandle(node, "qcom,saw-reg", 0);
saw_regmap = syscon_node_to_regmap(syscon);
of_node_put(syscon);
- if (IS_ERR(regmap))
+ if (IS_ERR(saw_regmap))
dev_err(dev, "ERROR reading SAW regmap\n");
}
for (reg = match->data; reg->name; reg++) {
- if (saw_regmap && \
- of_find_property(of_find_node_by_name(node, reg->name), \
- "qcom,saw-slave", &lenp)) {
- continue;
+ if (saw_regmap) {
+ reg_node = of_get_child_by_name(node, reg->name);
+ reg_prop = of_find_property(reg_node, "qcom,saw-slave",
+ &lenp);
+ of_node_put(reg_node);
+ if (reg_prop)
+ continue;
}
vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
@@ -1816,13 +1820,17 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
if (ret)
continue;
- if (saw_regmap && \
- of_find_property(of_find_node_by_name(node, reg->name), \
- "qcom,saw-leader", &lenp)) {
- spmi_saw_ops = *(vreg->desc.ops);
- spmi_saw_ops.set_voltage_sel = \
- spmi_regulator_saw_set_voltage;
- vreg->desc.ops = &spmi_saw_ops;
+ if (saw_regmap) {
+ reg_node = of_get_child_by_name(node, reg->name);
+ reg_prop = of_find_property(reg_node, "qcom,saw-leader",
+ &lenp);
+ of_node_put(reg_node);
+ if (reg_prop) {
+ spmi_saw_ops = *(vreg->desc.ops);
+ spmi_saw_ops.set_voltage_sel =
+ spmi_regulator_saw_set_voltage;
+ vreg->desc.ops = &spmi_saw_ops;
+ }
}
config.dev = dev;
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 48f0ca90743c..095d25f3d2ea 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -1,13 +1,7 @@
-/*
- * Copyright (c) 2013 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2013 Samsung Electronics Co., Ltd
+// http://www.samsung.com
#include <linux/bug.h>
#include <linux/err.h>
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index d1207ec683db..5bb6f4ca48db 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -1,20 +1,7 @@
-/*
- * s2mps11.c
- *
- * Copyright (c) 2012-2014 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2012-2014 Samsung Electronics Co., Ltd
+// http://www.samsung.com
#include <linux/bug.h>
#include <linux/err.h>
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 0cbc980753c2..667d16dc83ce 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -1,15 +1,7 @@
-/*
- * s5m8767.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2011 Samsung Electronics Co., Ltd
+// http://www.samsung.com
#include <linux/err.h>
#include <linux/of_gpio.h>
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index fc12badf3805..d84fab616abf 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -232,6 +232,8 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
tps->strobes = devm_kcalloc(&pdev->dev,
TPS65217_NUM_REGULATOR, sizeof(u8),
GFP_KERNEL);
+ if (!tps->strobes)
+ return -ENOMEM;
platform_set_drvdata(pdev, tps);
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index a4921a70da55..276faeddc370 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -18,6 +18,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
new file mode 100644
index 000000000000..abf22acbd13e
--- /dev/null
+++ b/drivers/regulator/uniphier-regulator.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Regulator controller driver for UniPhier SoC
+// Copyright 2018 Socionext Inc.
+// Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/reset.h>
+
+#define MAX_CLKS 2
+#define MAX_RSTS 2
+
+struct uniphier_regulator_soc_data {
+ int nclks;
+ const char * const *clock_names;
+ int nrsts;
+ const char * const *reset_names;
+ const struct regulator_desc *desc;
+ const struct regmap_config *regconf;
+};
+
+struct uniphier_regulator_priv {
+ struct clk_bulk_data clk[MAX_CLKS];
+ struct reset_control *rst[MAX_RSTS];
+ const struct uniphier_regulator_soc_data *data;
+};
+
+static struct regulator_ops uniphier_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static int uniphier_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_regulator_priv *priv;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ struct resource *res;
+ void __iomem *base;
+ const char *name;
+ int i, ret, nr;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data))
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ for (i = 0; i < priv->data->nclks; i++)
+ priv->clk[i].id = priv->data->clock_names[i];
+ ret = devm_clk_bulk_get(dev, priv->data->nclks, priv->clk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < priv->data->nrsts; i++) {
+ name = priv->data->reset_names[i];
+ priv->rst[i] = devm_reset_control_get_shared(dev, name);
+ if (IS_ERR(priv->rst[i]))
+ return PTR_ERR(priv->rst[i]);
+ }
+
+ ret = clk_bulk_prepare_enable(priv->data->nclks, priv->clk);
+ if (ret)
+ return ret;
+
+ for (nr = 0; nr < priv->data->nrsts; nr++) {
+ ret = reset_control_deassert(priv->rst[nr]);
+ if (ret)
+ goto out_rst_assert;
+ }
+
+ regmap = devm_regmap_init_mmio(dev, base, priv->data->regconf);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ config.dev = dev;
+ config.driver_data = priv;
+ config.of_node = dev->of_node;
+ config.regmap = regmap;
+ config.init_data = of_get_regulator_init_data(dev, dev->of_node,
+ priv->data->desc);
+ rdev = devm_regulator_register(dev, priv->data->desc, &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ goto out_rst_assert;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+out_rst_assert:
+ while (nr--)
+ reset_control_assert(priv->rst[nr]);
+
+ clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
+
+ return ret;
+}
+
+static int uniphier_regulator_remove(struct platform_device *pdev)
+{
+ struct uniphier_regulator_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < priv->data->nrsts; i++)
+ reset_control_assert(priv->rst[i]);
+
+ clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
+
+ return 0;
+}
+
+/* USB3 controller data */
+#define USB3VBUS_OFFSET 0x0
+#define USB3VBUS_REG BIT(4)
+#define USB3VBUS_REG_EN BIT(3)
+static const struct regulator_desc uniphier_usb3_regulator_desc = {
+ .name = "vbus",
+ .of_match = of_match_ptr("vbus"),
+ .ops = &uniphier_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = USB3VBUS_OFFSET,
+ .enable_mask = USB3VBUS_REG_EN | USB3VBUS_REG,
+ .enable_val = USB3VBUS_REG_EN | USB3VBUS_REG,
+ .disable_val = USB3VBUS_REG_EN,
+};
+
+static const struct regmap_config uniphier_usb3_regulator_regconf = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 1,
+};
+
+static const char * const uniphier_pro4_clock_reset_names[] = {
+ "gio", "link",
+};
+
+static const struct uniphier_regulator_soc_data uniphier_pro4_usb3_data = {
+ .nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
+ .clock_names = uniphier_pro4_clock_reset_names,
+ .nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
+ .reset_names = uniphier_pro4_clock_reset_names,
+ .desc = &uniphier_usb3_regulator_desc,
+ .regconf = &uniphier_usb3_regulator_regconf,
+};
+
+static const char * const uniphier_pxs2_clock_reset_names[] = {
+ "link",
+};
+
+static const struct uniphier_regulator_soc_data uniphier_pxs2_usb3_data = {
+ .nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
+ .clock_names = uniphier_pxs2_clock_reset_names,
+ .nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
+ .reset_names = uniphier_pxs2_clock_reset_names,
+ .desc = &uniphier_usb3_regulator_desc,
+ .regconf = &uniphier_usb3_regulator_regconf,
+};
+
+static const struct of_device_id uniphier_regulator_match[] = {
+ /* USB VBUS */
+ {
+ .compatible = "socionext,uniphier-pro4-usb3-regulator",
+ .data = &uniphier_pro4_usb3_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-usb3-regulator",
+ .data = &uniphier_pxs2_usb3_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-usb3-regulator",
+ .data = &uniphier_pxs2_usb3_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-usb3-regulator",
+ .data = &uniphier_pxs2_usb3_data,
+ },
+ { /* Sentinel */ },
+};
+
+static struct platform_driver uniphier_regulator_driver = {
+ .probe = uniphier_regulator_probe,
+ .remove = uniphier_regulator_remove,
+ .driver = {
+ .name = "uniphier-regulator",
+ .of_match_table = uniphier_regulator_match,
+ },
+};
+module_platform_driver(uniphier_regulator_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier Regulator Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index cd1c168fd188..052d4dd347f9 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -93,6 +93,7 @@ config QCOM_ADSP_PIL
depends on QCOM_SYSMON || QCOM_SYSMON=n
select MFD_SYSCON
select QCOM_MDT_LOADER
+ select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
select QCOM_SCM
help
@@ -102,6 +103,11 @@ config QCOM_ADSP_PIL
config QCOM_RPROC_COMMON
tristate
+config QCOM_Q6V5_COMMON
+ tristate
+ depends on ARCH_QCOM
+ depends on QCOM_SMEM
+
config QCOM_Q6V5_PIL
tristate "Qualcomm Hexagon V5 Peripherial Image Loader"
depends on OF && ARCH_QCOM
@@ -110,12 +116,29 @@ config QCOM_Q6V5_PIL
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
select MFD_SYSCON
+ select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
select QCOM_SCM
help
Say y here to support the Qualcomm Peripherial Image Loader for the
Hexagon V5 based remote processors.
+config QCOM_Q6V5_WCSS
+ tristate "Qualcomm Hexagon based WCSS Peripheral Image Loader"
+ depends on OF && ARCH_QCOM
+ depends on QCOM_SMEM
+ depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
+ depends on QCOM_SYSMON || QCOM_SYSMON=n
+ select MFD_SYSCON
+ select QCOM_MDT_LOADER
+ select QCOM_Q6V5_COMMON
+ select QCOM_RPROC_COMMON
+ select QCOM_SCM
+ help
+ Say y here to support the Qualcomm Peripheral Image Loader for the
+ Hexagon V5 based WCSS remote processors.
+
config QCOM_SYSMON
tristate "Qualcomm sysmon driver"
depends on RPMSG
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 02627ede8d4a..03332fa7e2ee 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -16,7 +16,9 @@ obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
obj-$(CONFIG_KEYSTONE_REMOTEPROC) += keystone_remoteproc.o
obj-$(CONFIG_QCOM_ADSP_PIL) += qcom_adsp_pil.o
obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o
+obj-$(CONFIG_QCOM_Q6V5_COMMON) += qcom_q6v5.o
obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o
+obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o
obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
qcom_wcnss_pil-y += qcom_wcnss.o
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index b668e32996e2..e230bef71be1 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -20,8 +21,6 @@
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
-#include <mach/clock.h> /* for davinci_clk_reset_assert/deassert() */
-
#include "remoteproc_internal.h"
static char *da8xx_fw_name;
@@ -72,6 +71,7 @@ struct da8xx_rproc {
struct da8xx_rproc_mem *mem;
int num_mems;
struct clk *dsp_clk;
+ struct reset_control *dsp_reset;
void (*ack_fxn)(struct irq_data *data);
struct irq_data *irq_data;
void __iomem *chipsig;
@@ -138,6 +138,7 @@ static int da8xx_rproc_start(struct rproc *rproc)
struct device *dev = rproc->dev.parent;
struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv;
struct clk *dsp_clk = drproc->dsp_clk;
+ struct reset_control *dsp_reset = drproc->dsp_reset;
int ret;
/* hw requires the start (boot) address be on 1KB boundary */
@@ -155,7 +156,12 @@ static int da8xx_rproc_start(struct rproc *rproc)
return ret;
}
- davinci_clk_reset_deassert(dsp_clk);
+ ret = reset_control_deassert(dsp_reset);
+ if (ret) {
+ dev_err(dev, "reset_control_deassert() failed: %d\n", ret);
+ clk_disable_unprepare(dsp_clk);
+ return ret;
+ }
return 0;
}
@@ -163,8 +169,15 @@ static int da8xx_rproc_start(struct rproc *rproc)
static int da8xx_rproc_stop(struct rproc *rproc)
{
struct da8xx_rproc *drproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ int ret;
+
+ ret = reset_control_assert(drproc->dsp_reset);
+ if (ret) {
+ dev_err(dev, "reset_control_assert() failed: %d\n", ret);
+ return ret;
+ }
- davinci_clk_reset_assert(drproc->dsp_clk);
clk_disable_unprepare(drproc->dsp_clk);
return 0;
@@ -232,6 +245,7 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
struct resource *bootreg_res;
struct resource *chipsig_res;
struct clk *dsp_clk;
+ struct reset_control *dsp_reset;
void __iomem *chipsig;
void __iomem *bootreg;
int irq;
@@ -268,6 +282,15 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
return PTR_ERR(dsp_clk);
}
+ dsp_reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(dsp_reset)) {
+ if (PTR_ERR(dsp_reset) != -EPROBE_DEFER)
+ dev_err(dev, "unable to get reset control: %ld\n",
+ PTR_ERR(dsp_reset));
+
+ return PTR_ERR(dsp_reset);
+ }
+
if (dev->of_node) {
ret = of_reserved_mem_device_init(dev);
if (ret) {
@@ -284,9 +307,13 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
goto free_mem;
}
+ /* error recovery is not supported at present */
+ rproc->recovery_disabled = true;
+
drproc = rproc->priv;
drproc->rproc = rproc;
drproc->dsp_clk = dsp_clk;
+ drproc->dsp_reset = dsp_reset;
rproc->has_iommu = false;
ret = da8xx_rproc_get_internal_memories(pdev, drproc);
@@ -309,7 +336,7 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
* *not* in reset, but da8xx_rproc_start() needs the DSP to be
* held in reset at the time it is called.
*/
- ret = davinci_clk_reset_assert(drproc->dsp_clk);
+ ret = reset_control_assert(dsp_reset);
if (ret)
goto free_rproc;
diff --git a/drivers/remoteproc/qcom_adsp_pil.c b/drivers/remoteproc/qcom_adsp_pil.c
index 89a86ce07f99..d4339a6da616 100644
--- a/drivers/remoteproc/qcom_adsp_pil.c
+++ b/drivers/remoteproc/qcom_adsp_pil.c
@@ -31,6 +31,7 @@
#include <linux/soc/qcom/smem_state.h>
#include "qcom_common.h"
+#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
struct adsp_data {
@@ -48,14 +49,7 @@ struct qcom_adsp {
struct device *dev;
struct rproc *rproc;
- int wdog_irq;
- int fatal_irq;
- int ready_irq;
- int handover_irq;
- int stop_ack_irq;
-
- struct qcom_smem_state *state;
- unsigned stop_bit;
+ struct qcom_q6v5 q6v5;
struct clk *xo;
struct clk *aggre2_clk;
@@ -96,6 +90,8 @@ static int adsp_start(struct rproc *rproc)
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int ret;
+ qcom_q6v5_prepare(&adsp->q6v5);
+
ret = clk_prepare_enable(adsp->xo);
if (ret)
return ret;
@@ -119,16 +115,14 @@ static int adsp_start(struct rproc *rproc)
goto disable_px_supply;
}
- ret = wait_for_completion_timeout(&adsp->start_done,
- msecs_to_jiffies(5000));
- if (!ret) {
+ ret = qcom_q6v5_wait_for_start(&adsp->q6v5, msecs_to_jiffies(5000));
+ if (ret == -ETIMEDOUT) {
dev_err(adsp->dev, "start timed out\n");
qcom_scm_pas_shutdown(adsp->pas_id);
- ret = -ETIMEDOUT;
goto disable_px_supply;
}
- ret = 0;
+ return 0;
disable_px_supply:
regulator_disable(adsp->px_supply);
@@ -142,28 +136,34 @@ disable_xo_clk:
return ret;
}
+static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
+{
+ struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
+
+ regulator_disable(adsp->px_supply);
+ regulator_disable(adsp->cx_supply);
+ clk_disable_unprepare(adsp->aggre2_clk);
+ clk_disable_unprepare(adsp->xo);
+}
+
static int adsp_stop(struct rproc *rproc)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int handover;
int ret;
- qcom_smem_state_update_bits(adsp->state,
- BIT(adsp->stop_bit),
- BIT(adsp->stop_bit));
-
- ret = wait_for_completion_timeout(&adsp->stop_done,
- msecs_to_jiffies(5000));
- if (ret == 0)
+ ret = qcom_q6v5_request_stop(&adsp->q6v5);
+ if (ret == -ETIMEDOUT)
dev_err(adsp->dev, "timed out on wait\n");
- qcom_smem_state_update_bits(adsp->state,
- BIT(adsp->stop_bit),
- 0);
-
ret = qcom_scm_pas_shutdown(adsp->pas_id);
if (ret)
dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
+ handover = qcom_q6v5_unprepare(&adsp->q6v5);
+ if (handover)
+ qcom_pas_handover(&adsp->q6v5);
+
return ret;
}
@@ -187,53 +187,6 @@ static const struct rproc_ops adsp_ops = {
.load = adsp_load,
};
-static irqreturn_t adsp_wdog_interrupt(int irq, void *dev)
-{
- struct qcom_adsp *adsp = dev;
-
- rproc_report_crash(adsp->rproc, RPROC_WATCHDOG);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t adsp_fatal_interrupt(int irq, void *dev)
-{
- struct qcom_adsp *adsp = dev;
- size_t len;
- char *msg;
-
- msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, adsp->crash_reason_smem, &len);
- if (!IS_ERR(msg) && len > 0 && msg[0])
- dev_err(adsp->dev, "fatal error received: %s\n", msg);
-
- rproc_report_crash(adsp->rproc, RPROC_FATAL_ERROR);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t adsp_ready_interrupt(int irq, void *dev)
-{
- return IRQ_HANDLED;
-}
-
-static irqreturn_t adsp_handover_interrupt(int irq, void *dev)
-{
- struct qcom_adsp *adsp = dev;
-
- complete(&adsp->start_done);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t adsp_stop_ack_interrupt(int irq, void *dev)
-{
- struct qcom_adsp *adsp = dev;
-
- complete(&adsp->stop_done);
-
- return IRQ_HANDLED;
-}
-
static int adsp_init_clock(struct qcom_adsp *adsp)
{
int ret;
@@ -272,29 +225,6 @@ static int adsp_init_regulator(struct qcom_adsp *adsp)
return PTR_ERR_OR_ZERO(adsp->px_supply);
}
-static int adsp_request_irq(struct qcom_adsp *adsp,
- struct platform_device *pdev,
- const char *name,
- irq_handler_t thread_fn)
-{
- int ret;
-
- ret = platform_get_irq_byname(pdev, name);
- if (ret < 0) {
- dev_err(&pdev->dev, "no %s IRQ defined\n", name);
- return ret;
- }
-
- ret = devm_request_threaded_irq(&pdev->dev, ret,
- NULL, thread_fn,
- IRQF_ONESHOT,
- "adsp", adsp);
- if (ret)
- dev_err(&pdev->dev, "request %s IRQ failed\n", name);
-
- return ret;
-}
-
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
{
struct device_node *node;
@@ -348,13 +278,9 @@ static int adsp_probe(struct platform_device *pdev)
adsp->dev = &pdev->dev;
adsp->rproc = rproc;
adsp->pas_id = desc->pas_id;
- adsp->crash_reason_smem = desc->crash_reason_smem;
adsp->has_aggre2_clk = desc->has_aggre2_clk;
platform_set_drvdata(pdev, adsp);
- init_completion(&adsp->start_done);
- init_completion(&adsp->stop_done);
-
ret = adsp_alloc_memory_region(adsp);
if (ret)
goto free_rproc;
@@ -367,37 +293,10 @@ static int adsp_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
- ret = adsp_request_irq(adsp, pdev, "wdog", adsp_wdog_interrupt);
- if (ret < 0)
- goto free_rproc;
- adsp->wdog_irq = ret;
-
- ret = adsp_request_irq(adsp, pdev, "fatal", adsp_fatal_interrupt);
- if (ret < 0)
- goto free_rproc;
- adsp->fatal_irq = ret;
-
- ret = adsp_request_irq(adsp, pdev, "ready", adsp_ready_interrupt);
- if (ret < 0)
- goto free_rproc;
- adsp->ready_irq = ret;
-
- ret = adsp_request_irq(adsp, pdev, "handover", adsp_handover_interrupt);
- if (ret < 0)
- goto free_rproc;
- adsp->handover_irq = ret;
-
- ret = adsp_request_irq(adsp, pdev, "stop-ack", adsp_stop_ack_interrupt);
- if (ret < 0)
- goto free_rproc;
- adsp->stop_ack_irq = ret;
-
- adsp->state = qcom_smem_state_get(&pdev->dev, "stop",
- &adsp->stop_bit);
- if (IS_ERR(adsp->state)) {
- ret = PTR_ERR(adsp->state);
+ ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
+ qcom_pas_handover);
+ if (ret)
goto free_rproc;
- }
qcom_add_glink_subdev(rproc, &adsp->glink_subdev);
qcom_add_smd_subdev(rproc, &adsp->smd_subdev);
@@ -422,7 +321,6 @@ static int adsp_remove(struct platform_device *pdev)
{
struct qcom_adsp *adsp = platform_get_drvdata(pdev);
- qcom_smem_state_put(adsp->state);
rproc_del(adsp->rproc);
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index acfc99f82fb8..6f77840140bf 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -33,7 +33,7 @@
static BLOCKING_NOTIFIER_HEAD(ssr_notifiers);
-static int glink_subdev_probe(struct rproc_subdev *subdev)
+static int glink_subdev_start(struct rproc_subdev *subdev)
{
struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
@@ -42,7 +42,7 @@ static int glink_subdev_probe(struct rproc_subdev *subdev)
return PTR_ERR_OR_ZERO(glink->edge);
}
-static void glink_subdev_remove(struct rproc_subdev *subdev, bool crashed)
+static void glink_subdev_stop(struct rproc_subdev *subdev, bool crashed)
{
struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
@@ -64,7 +64,10 @@ void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink)
return;
glink->dev = dev;
- rproc_add_subdev(rproc, &glink->subdev, glink_subdev_probe, glink_subdev_remove);
+ glink->subdev.start = glink_subdev_start;
+ glink->subdev.stop = glink_subdev_stop;
+
+ rproc_add_subdev(rproc, &glink->subdev);
}
EXPORT_SYMBOL_GPL(qcom_add_glink_subdev);
@@ -126,7 +129,7 @@ int qcom_register_dump_segments(struct rproc *rproc,
}
EXPORT_SYMBOL_GPL(qcom_register_dump_segments);
-static int smd_subdev_probe(struct rproc_subdev *subdev)
+static int smd_subdev_start(struct rproc_subdev *subdev)
{
struct qcom_rproc_subdev *smd = to_smd_subdev(subdev);
@@ -135,7 +138,7 @@ static int smd_subdev_probe(struct rproc_subdev *subdev)
return PTR_ERR_OR_ZERO(smd->edge);
}
-static void smd_subdev_remove(struct rproc_subdev *subdev, bool crashed)
+static void smd_subdev_stop(struct rproc_subdev *subdev, bool crashed)
{
struct qcom_rproc_subdev *smd = to_smd_subdev(subdev);
@@ -157,7 +160,10 @@ void qcom_add_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd)
return;
smd->dev = dev;
- rproc_add_subdev(rproc, &smd->subdev, smd_subdev_probe, smd_subdev_remove);
+ smd->subdev.start = smd_subdev_start;
+ smd->subdev.stop = smd_subdev_stop;
+
+ rproc_add_subdev(rproc, &smd->subdev);
}
EXPORT_SYMBOL_GPL(qcom_add_smd_subdev);
@@ -202,11 +208,6 @@ void qcom_unregister_ssr_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier);
-static int ssr_notify_start(struct rproc_subdev *subdev)
-{
- return 0;
-}
-
static void ssr_notify_stop(struct rproc_subdev *subdev, bool crashed)
{
struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
@@ -227,8 +228,9 @@ void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr,
const char *ssr_name)
{
ssr->name = ssr_name;
+ ssr->subdev.stop = ssr_notify_stop;
- rproc_add_subdev(rproc, &ssr->subdev, ssr_notify_start, ssr_notify_stop);
+ rproc_add_subdev(rproc, &ssr->subdev);
}
EXPORT_SYMBOL_GPL(qcom_add_ssr_subdev);
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
new file mode 100644
index 000000000000..61a760ee4aac
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm Peripheral Image Loader for Q6V5
+ *
+ * Copyright (C) 2016-2018 Linaro Ltd.
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/remoteproc.h>
+#include "qcom_q6v5.h"
+
+/**
+ * qcom_q6v5_prepare() - reinitialize the qcom_q6v5 context before start
+ * @q6v5: reference to qcom_q6v5 context to be reinitialized
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5)
+{
+ reinit_completion(&q6v5->start_done);
+ reinit_completion(&q6v5->stop_done);
+
+ q6v5->running = true;
+ q6v5->handover_issued = false;
+
+ enable_irq(q6v5->handover_irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_prepare);
+
+/**
+ * qcom_q6v5_unprepare() - unprepare the qcom_q6v5 context after stop
+ * @q6v5: reference to qcom_q6v5 context to be unprepared
+ *
+ * Return: 0 on success, 1 if handover hasn't yet been called
+ */
+int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5)
+{
+ disable_irq(q6v5->handover_irq);
+
+ return !q6v5->handover_issued;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_unprepare);
+
+static irqreturn_t q6v5_wdog_interrupt(int irq, void *data)
+{
+ struct qcom_q6v5 *q6v5 = data;
+ size_t len;
+ char *msg;
+
+ /* Sometimes the stop triggers a watchdog rather than a stop-ack */
+ if (!q6v5->running) {
+ complete(&q6v5->stop_done);
+ return IRQ_HANDLED;
+ }
+
+ msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len);
+ if (!IS_ERR(msg) && len > 0 && msg[0])
+ dev_err(q6v5->dev, "watchdog received: %s\n", msg);
+ else
+ dev_err(q6v5->dev, "watchdog without message\n");
+
+ rproc_report_crash(q6v5->rproc, RPROC_WATCHDOG);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t q6v5_fatal_interrupt(int irq, void *data)
+{
+ struct qcom_q6v5 *q6v5 = data;
+ size_t len;
+ char *msg;
+
+ msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len);
+ if (!IS_ERR(msg) && len > 0 && msg[0])
+ dev_err(q6v5->dev, "fatal error received: %s\n", msg);
+ else
+ dev_err(q6v5->dev, "fatal error without message\n");
+
+ rproc_report_crash(q6v5->rproc, RPROC_FATAL_ERROR);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t q6v5_ready_interrupt(int irq, void *data)
+{
+ struct qcom_q6v5 *q6v5 = data;
+
+ complete(&q6v5->start_done);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * qcom_q6v5_wait_for_start() - wait for remote processor start signal
+ * @q6v5: reference to qcom_q6v5 context
+ * @timeout: timeout to wait for the event, in jiffies
+ *
+ * qcom_q6v5_unprepare() should not be called when this function fails.
+ *
+ * Return: 0 on success, -ETIMEDOUT on timeout
+ */
+int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&q6v5->start_done, timeout);
+ if (!ret)
+ disable_irq(q6v5->handover_irq);
+
+ return !ret ? -ETIMEDOUT : 0;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_wait_for_start);
+
+static irqreturn_t q6v5_handover_interrupt(int irq, void *data)
+{
+ struct qcom_q6v5 *q6v5 = data;
+
+ if (q6v5->handover)
+ q6v5->handover(q6v5);
+
+ q6v5->handover_issued = true;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t q6v5_stop_interrupt(int irq, void *data)
+{
+ struct qcom_q6v5 *q6v5 = data;
+
+ complete(&q6v5->stop_done);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * qcom_q6v5_request_stop() - request the remote processor to stop
+ * @q6v5: reference to qcom_q6v5 context
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
+{
+ int ret;
+
+ q6v5->running = false;
+
+ qcom_smem_state_update_bits(q6v5->state,
+ BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
+
+ ret = wait_for_completion_timeout(&q6v5->stop_done, 5 * HZ);
+
+ qcom_smem_state_update_bits(q6v5->state, BIT(q6v5->stop_bit), 0);
+
+ return ret == 0 ? -ETIMEDOUT : 0;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_request_stop);
+
+/**
+ * qcom_q6v5_init() - initializer of the q6v5 common struct
+ * @q6v5: handle to be initialized
+ * @pdev: platform_device reference for acquiring resources
+ * @rproc: associated remoteproc instance
+ * @crash_reason: SMEM id for crash reason string, or 0 if none
+ * @handover: function to be called when proxy resources should be released
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
+ struct rproc *rproc, int crash_reason,
+ void (*handover)(struct qcom_q6v5 *q6v5))
+{
+ int ret;
+
+ q6v5->rproc = rproc;
+ q6v5->dev = &pdev->dev;
+ q6v5->crash_reason = crash_reason;
+ q6v5->handover = handover;
+
+ init_completion(&q6v5->start_done);
+ init_completion(&q6v5->stop_done);
+
+ q6v5->wdog_irq = platform_get_irq_byname(pdev, "wdog");
+ ret = devm_request_threaded_irq(&pdev->dev, q6v5->wdog_irq,
+ NULL, q6v5_wdog_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 wdog", q6v5);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire wdog IRQ\n");
+ return ret;
+ }
+
+ q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal");
+ ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq,
+ NULL, q6v5_fatal_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 fatal", q6v5);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire fatal IRQ\n");
+ return ret;
+ }
+
+ q6v5->ready_irq = platform_get_irq_byname(pdev, "ready");
+ ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq,
+ NULL, q6v5_ready_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 ready", q6v5);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire ready IRQ\n");
+ return ret;
+ }
+
+ q6v5->handover_irq = platform_get_irq_byname(pdev, "handover");
+ ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq,
+ NULL, q6v5_handover_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 handover", q6v5);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire handover IRQ\n");
+ return ret;
+ }
+ disable_irq(q6v5->handover_irq);
+
+ q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack");
+ ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq,
+ NULL, q6v5_stop_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 stop", q6v5);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire stop-ack IRQ\n");
+ return ret;
+ }
+
+ q6v5->state = qcom_smem_state_get(&pdev->dev, "stop", &q6v5->stop_bit);
+ if (IS_ERR(q6v5->state)) {
+ dev_err(&pdev->dev, "failed to acquire stop state\n");
+ return PTR_ERR(q6v5->state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Q6V5");
diff --git a/drivers/remoteproc/qcom_q6v5.h b/drivers/remoteproc/qcom_q6v5.h
new file mode 100644
index 000000000000..7ac92c1e0f49
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __QCOM_Q6V5_H__
+#define __QCOM_Q6V5_H__
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+
+struct rproc;
+struct qcom_smem_state;
+
+struct qcom_q6v5 {
+ struct device *dev;
+ struct rproc *rproc;
+
+ struct qcom_smem_state *state;
+ unsigned stop_bit;
+
+ int wdog_irq;
+ int fatal_irq;
+ int ready_irq;
+ int handover_irq;
+ int stop_irq;
+
+ bool handover_issued;
+
+ struct completion start_done;
+ struct completion stop_done;
+
+ int crash_reason;
+
+ bool running;
+
+ void (*handover)(struct qcom_q6v5 *q6v5);
+};
+
+int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
+ struct rproc *rproc, int crash_reason,
+ void (*handover)(struct qcom_q6v5 *q6v5));
+
+int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5);
+int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5);
+int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5);
+int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout);
+
+#endif
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
index 2bf8e7c49f2a..d7a4b9eca5d2 100644
--- a/drivers/remoteproc/qcom_q6v5_pil.c
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -30,12 +30,11 @@
#include <linux/remoteproc.h>
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
-#include <linux/soc/qcom/smem.h>
-#include <linux/soc/qcom/smem_state.h>
#include <linux/iopoll.h>
#include "remoteproc_internal.h"
#include "qcom_common.h"
+#include "qcom_q6v5.h"
#include <linux/qcom_scm.h>
@@ -151,12 +150,7 @@ struct q6v5 {
struct reset_control *mss_restart;
- struct qcom_smem_state *state;
- unsigned stop_bit;
-
- int handover_irq;
-
- bool proxy_unvoted;
+ struct qcom_q6v5 q6v5;
struct clk *active_clks[8];
struct clk *reset_clks[4];
@@ -170,8 +164,6 @@ struct q6v5 {
int active_reg_count;
int proxy_reg_count;
- struct completion start_done;
- struct completion stop_done;
bool running;
phys_addr_t mba_phys;
@@ -798,9 +790,7 @@ static int q6v5_start(struct rproc *rproc)
int xfermemop_ret;
int ret;
- qproc->proxy_unvoted = false;
-
- enable_irq(qproc->handover_irq);
+ qcom_q6v5_prepare(&qproc->q6v5);
ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
@@ -875,11 +865,9 @@ static int q6v5_start(struct rproc *rproc)
if (ret)
goto reclaim_mpss;
- ret = wait_for_completion_timeout(&qproc->start_done,
- msecs_to_jiffies(5000));
- if (ret == 0) {
+ ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
+ if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "start timed out\n");
- ret = -ETIMEDOUT;
goto reclaim_mpss;
}
@@ -933,7 +921,7 @@ disable_proxy_reg:
qproc->proxy_reg_count);
disable_irqs:
- disable_irq(qproc->handover_irq);
+ qcom_q6v5_unprepare(&qproc->q6v5);
return ret;
}
@@ -946,16 +934,10 @@ static int q6v5_stop(struct rproc *rproc)
qproc->running = false;
- qcom_smem_state_update_bits(qproc->state,
- BIT(qproc->stop_bit), BIT(qproc->stop_bit));
-
- ret = wait_for_completion_timeout(&qproc->stop_done,
- msecs_to_jiffies(5000));
- if (ret == 0)
+ ret = qcom_q6v5_request_stop(&qproc->q6v5);
+ if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "timed out on wait\n");
- qcom_smem_state_update_bits(qproc->state, BIT(qproc->stop_bit), 0);
-
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
@@ -976,9 +958,8 @@ static int q6v5_stop(struct rproc *rproc)
q6v5_reset_assert(qproc);
- disable_irq(qproc->handover_irq);
-
- if (!qproc->proxy_unvoted) {
+ ret = qcom_q6v5_unprepare(&qproc->q6v5);
+ if (ret) {
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
@@ -1014,74 +995,14 @@ static const struct rproc_ops q6v5_ops = {
.load = q6v5_load,
};
-static irqreturn_t q6v5_wdog_interrupt(int irq, void *dev)
-{
- struct q6v5 *qproc = dev;
- size_t len;
- char *msg;
-
- /* Sometimes the stop triggers a watchdog rather than a stop-ack */
- if (!qproc->running) {
- complete(&qproc->stop_done);
- return IRQ_HANDLED;
- }
-
- msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
- if (!IS_ERR(msg) && len > 0 && msg[0])
- dev_err(qproc->dev, "watchdog received: %s\n", msg);
- else
- dev_err(qproc->dev, "watchdog without message\n");
-
- rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t q6v5_fatal_interrupt(int irq, void *dev)
-{
- struct q6v5 *qproc = dev;
- size_t len;
- char *msg;
-
- msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
- if (!IS_ERR(msg) && len > 0 && msg[0])
- dev_err(qproc->dev, "fatal error received: %s\n", msg);
- else
- dev_err(qproc->dev, "fatal error without message\n");
-
- rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t q6v5_ready_interrupt(int irq, void *dev)
-{
- struct q6v5 *qproc = dev;
-
- complete(&qproc->start_done);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t q6v5_handover_interrupt(int irq, void *dev)
+static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
{
- struct q6v5 *qproc = dev;
+ struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
-
- qproc->proxy_unvoted = true;
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t q6v5_stop_ack_interrupt(int irq, void *dev)
-{
- struct q6v5 *qproc = dev;
-
- complete(&qproc->stop_done);
- return IRQ_HANDLED;
}
static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
@@ -1154,30 +1075,6 @@ static int q6v5_init_reset(struct q6v5 *qproc)
return 0;
}
-static int q6v5_request_irq(struct q6v5 *qproc,
- struct platform_device *pdev,
- const char *name,
- irq_handler_t thread_fn)
-{
- int irq;
- int ret;
-
- irq = platform_get_irq_byname(pdev, name);
- if (irq < 0) {
- dev_err(&pdev->dev, "no %s IRQ defined\n", name);
- return irq;
- }
-
- ret = devm_request_threaded_irq(&pdev->dev, irq,
- NULL, thread_fn,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "q6v5", qproc);
- if (ret)
- dev_err(&pdev->dev, "request %s IRQ failed\n", name);
-
- return ret ? : irq;
-}
-
static int q6v5_alloc_memory_region(struct q6v5 *qproc)
{
struct device_node *child;
@@ -1247,9 +1144,6 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->rproc = rproc;
platform_set_drvdata(pdev, qproc);
- init_completion(&qproc->start_done);
- init_completion(&qproc->stop_done);
-
ret = q6v5_init_mem(qproc, pdev);
if (ret)
goto free_rproc;
@@ -1305,33 +1199,12 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->version = desc->version;
qproc->has_alt_reset = desc->has_alt_reset;
qproc->need_mem_protection = desc->need_mem_protection;
- ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
- if (ret < 0)
- goto free_rproc;
- ret = q6v5_request_irq(qproc, pdev, "fatal", q6v5_fatal_interrupt);
- if (ret < 0)
- goto free_rproc;
-
- ret = q6v5_request_irq(qproc, pdev, "ready", q6v5_ready_interrupt);
- if (ret < 0)
- goto free_rproc;
-
- ret = q6v5_request_irq(qproc, pdev, "handover", q6v5_handover_interrupt);
- if (ret < 0)
- goto free_rproc;
- qproc->handover_irq = ret;
- disable_irq(qproc->handover_irq);
-
- ret = q6v5_request_irq(qproc, pdev, "stop-ack", q6v5_stop_ack_interrupt);
- if (ret < 0)
+ ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
+ qcom_msa_handover);
+ if (ret)
goto free_rproc;
- qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit);
- if (IS_ERR(qproc->state)) {
- ret = PTR_ERR(qproc->state);
- goto free_rproc;
- }
qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
@@ -1370,7 +1243,6 @@ static const struct rproc_hexagon_res sdm845_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_clk_names = (char*[]){
"xo",
- "axis2",
"prng",
NULL
},
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
new file mode 100644
index 000000000000..f93e1e4a1cc0
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2018 Linaro Ltd.
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include "qcom_common.h"
+#include "qcom_q6v5.h"
+
+#define WCSS_CRASH_REASON 421
+
+/* Q6SS Register Offsets */
+#define Q6SS_RESET_REG 0x014
+#define Q6SS_GFMUX_CTL_REG 0x020
+#define Q6SS_PWR_CTL_REG 0x030
+#define Q6SS_MEM_PWR_CTL 0x0B0
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ_REG 0x0
+#define AXI_HALTACK_REG 0x4
+#define AXI_IDLE_REG 0x8
+
+#define HALT_ACK_TIMEOUT_MS 100
+
+/* Q6SS_RESET */
+#define Q6SS_STOP_CORE BIT(0)
+#define Q6SS_CORE_ARES BIT(1)
+#define Q6SS_BUS_ARES_ENABLE BIT(2)
+
+/* Q6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENABLE BIT(1)
+
+/* Q6SS_PWR_CTL */
+#define Q6SS_L2DATA_STBY_N BIT(18)
+#define Q6SS_SLP_RET_N BIT(19)
+#define Q6SS_CLAMP_IO BIT(20)
+#define QDSS_BHS_ON BIT(21)
+
+/* Q6SS parameters */
+#define Q6SS_LDO_BYP BIT(25)
+#define Q6SS_BHS_ON BIT(24)
+#define Q6SS_CLAMP_WL BIT(21)
+#define Q6SS_CLAMP_QMC_MEM BIT(22)
+#define HALT_CHECK_MAX_LOOPS 200
+#define Q6SS_XO_CBCR GENMASK(5, 3)
+
+/* Q6SS config/status registers */
+#define TCSR_GLOBAL_CFG0 0x0
+#define TCSR_GLOBAL_CFG1 0x4
+#define SSCAON_CONFIG 0x8
+#define SSCAON_STATUS 0xc
+#define Q6SS_BHS_STATUS 0x78
+#define Q6SS_RST_EVB 0x10
+
+#define BHS_EN_REST_ACK BIT(0)
+#define SSCAON_ENABLE BIT(13)
+#define SSCAON_BUS_EN BIT(15)
+#define SSCAON_BUS_MUX_MASK GENMASK(18, 16)
+
+#define MEM_BANKS 19
+#define TCSR_WCSS_CLK_MASK 0x1F
+#define TCSR_WCSS_CLK_ENABLE 0x14
+
+struct q6v5_wcss {
+ struct device *dev;
+
+ void __iomem *reg_base;
+ void __iomem *rmb_base;
+
+ struct regmap *halt_map;
+ u32 halt_q6;
+ u32 halt_wcss;
+ u32 halt_nc;
+
+ struct reset_control *wcss_aon_reset;
+ struct reset_control *wcss_reset;
+ struct reset_control *wcss_q6_reset;
+
+ struct qcom_q6v5 q6v5;
+
+ phys_addr_t mem_phys;
+ phys_addr_t mem_reloc;
+ void *mem_region;
+ size_t mem_size;
+};
+
+static int q6v5_wcss_reset(struct q6v5_wcss *wcss)
+{
+ int ret;
+ u32 val;
+ int i;
+
+ /* Assert resets, stop core */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ /* BHS require xo cbcr to be enabled */
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val |= 0x1;
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+
+ /* Read CLKOFF bit to go low indicating CLK is enabled */
+ ret = readl_poll_timeout(wcss->reg_base + Q6SS_XO_CBCR,
+ val, !(val & BIT(31)), 1,
+ HALT_CHECK_MAX_LOOPS);
+ if (ret) {
+ dev_err(wcss->dev,
+ "xo cbcr enabling timed out (rc:%d)\n", ret);
+ return ret;
+ }
+ /* Enable power block headswitch and wait for it to stabilize */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val |= Q6SS_BHS_ON;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+ udelay(1);
+
+ /* Put LDO in bypass mode */
+ val |= Q6SS_LDO_BYP;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Deassert Q6 compiler memory clamp */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val &= ~Q6SS_CLAMP_QMC_MEM;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Turn on L1, L2, ETB and JU memories 1 at a time */
+ val = readl(wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ for (i = MEM_BANKS; i >= 0; i--) {
+ val |= BIT(i);
+ writel(val, wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ /*
+ * Read back value to ensure the write is done then
+ * wait for 1us for both memory peripheral and data
+ * array to turn on.
+ */
+ val |= readl(wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ udelay(1);
+ }
+ /* Remove word line clamp */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val &= ~Q6SS_CLAMP_WL;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Remove IO clamp */
+ val &= ~Q6SS_CLAMP_IO;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Bring core out of reset */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val &= ~Q6SS_CORE_ARES;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ /* Turn on core clock */
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val |= Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+
+ /* Start core execution */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val &= ~Q6SS_STOP_CORE;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ return 0;
+}
+
+static int q6v5_wcss_start(struct rproc *rproc)
+{
+ struct q6v5_wcss *wcss = rproc->priv;
+ int ret;
+
+ qcom_q6v5_prepare(&wcss->q6v5);
+
+ /* Release Q6 and WCSS reset */
+ ret = reset_control_deassert(wcss->wcss_reset);
+ if (ret) {
+ dev_err(wcss->dev, "wcss_reset failed\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(wcss->wcss_q6_reset);
+ if (ret) {
+ dev_err(wcss->dev, "wcss_q6_reset failed\n");
+ goto wcss_reset;
+ }
+
+ /* Lithium configuration - clock gating and bus arbitration */
+ ret = regmap_update_bits(wcss->halt_map,
+ wcss->halt_nc + TCSR_GLOBAL_CFG0,
+ TCSR_WCSS_CLK_MASK,
+ TCSR_WCSS_CLK_ENABLE);
+ if (ret)
+ goto wcss_q6_reset;
+
+ ret = regmap_update_bits(wcss->halt_map,
+ wcss->halt_nc + TCSR_GLOBAL_CFG1,
+ 1, 0);
+ if (ret)
+ goto wcss_q6_reset;
+
+ /* Write bootaddr to EVB so that Q6WCSS will jump there after reset */
+ writel(rproc->bootaddr >> 4, wcss->reg_base + Q6SS_RST_EVB);
+
+ ret = q6v5_wcss_reset(wcss);
+ if (ret)
+ goto wcss_q6_reset;
+
+ ret = qcom_q6v5_wait_for_start(&wcss->q6v5, 5 * HZ);
+ if (ret == -ETIMEDOUT)
+ dev_err(wcss->dev, "start timed out\n");
+
+ return ret;
+
+wcss_q6_reset:
+ reset_control_assert(wcss->wcss_q6_reset);
+
+wcss_reset:
+ reset_control_assert(wcss->wcss_reset);
+
+ return ret;
+}
+
+static void q6v5_wcss_halt_axi_port(struct q6v5_wcss *wcss,
+ struct regmap *halt_map,
+ u32 offset)
+{
+ unsigned long timeout;
+ unsigned int val;
+ int ret;
+
+ /* Check if we're already idle */
+ ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
+ if (!ret && val)
+ return;
+
+ /* Assert halt request */
+ regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
+
+ /* Wait for halt */
+ timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
+ for (;;) {
+ ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
+ if (ret || val || time_after(jiffies, timeout))
+ break;
+
+ msleep(1);
+ }
+
+ ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
+ if (ret || !val)
+ dev_err(wcss->dev, "port failed halt\n");
+
+ /* Clear halt request (port will remain halted until reset) */
+ regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
+}
+
+static int q6v5_wcss_powerdown(struct q6v5_wcss *wcss)
+{
+ int ret;
+ u32 val;
+
+ /* 1 - Assert WCSS/Q6 HALTREQ */
+ q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_wcss);
+
+ /* 2 - Enable WCSSAON_CONFIG */
+ val = readl(wcss->rmb_base + SSCAON_CONFIG);
+ val |= SSCAON_ENABLE;
+ writel(val, wcss->rmb_base + SSCAON_CONFIG);
+
+ /* 3 - Set SSCAON_CONFIG */
+ val |= SSCAON_BUS_EN;
+ val &= ~SSCAON_BUS_MUX_MASK;
+ writel(val, wcss->rmb_base + SSCAON_CONFIG);
+
+ /* 4 - SSCAON_CONFIG 1 */
+ val |= BIT(1);
+ writel(val, wcss->rmb_base + SSCAON_CONFIG);
+
+ /* 5 - wait for SSCAON_STATUS */
+ ret = readl_poll_timeout(wcss->rmb_base + SSCAON_STATUS,
+ val, (val & 0xffff) == 0x400, 1000,
+ HALT_CHECK_MAX_LOOPS);
+ if (ret) {
+ dev_err(wcss->dev,
+ "can't get SSCAON_STATUS rc:%d)\n", ret);
+ return ret;
+ }
+
+ /* 6 - De-assert WCSS_AON reset */
+ reset_control_assert(wcss->wcss_aon_reset);
+
+ /* 7 - Disable WCSSAON_CONFIG 13 */
+ val = readl(wcss->rmb_base + SSCAON_CONFIG);
+ val &= ~SSCAON_ENABLE;
+ writel(val, wcss->rmb_base + SSCAON_CONFIG);
+
+ /* 8 - De-assert WCSS/Q6 HALTREQ */
+ reset_control_assert(wcss->wcss_reset);
+
+ return 0;
+}
+
+static int q6v5_q6_powerdown(struct q6v5_wcss *wcss)
+{
+ int ret;
+ u32 val;
+ int i;
+
+ /* 1 - Halt Q6 bus interface */
+ q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_q6);
+
+ /* 2 - Disable Q6 Core clock */
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val &= ~Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+
+ /* 3 - Clamp I/O */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val |= Q6SS_CLAMP_IO;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* 4 - Clamp WL */
+ val |= QDSS_BHS_ON;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* 5 - Clear Erase standby */
+ val &= ~Q6SS_L2DATA_STBY_N;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* 6 - Clear Sleep RTN */
+ val &= ~Q6SS_SLP_RET_N;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* 7 - turn off Q6 memory foot/head switch one bank at a time */
+ for (i = 0; i < 20; i++) {
+ val = readl(wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ val &= ~BIT(i);
+ writel(val, wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ mdelay(1);
+ }
+
+ /* 8 - Assert QMC memory RTN */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val |= Q6SS_CLAMP_QMC_MEM;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* 9 - Turn off BHS */
+ val &= ~Q6SS_BHS_ON;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+ udelay(1);
+
+ /* 10 - Wait till BHS Reset is done */
+ ret = readl_poll_timeout(wcss->reg_base + Q6SS_BHS_STATUS,
+ val, !(val & BHS_EN_REST_ACK), 1000,
+ HALT_CHECK_MAX_LOOPS);
+ if (ret) {
+ dev_err(wcss->dev, "BHS_STATUS not OFF (rc:%d)\n", ret);
+ return ret;
+ }
+
+ /* 11 - Assert WCSS reset */
+ reset_control_assert(wcss->wcss_reset);
+
+ /* 12 - Assert Q6 reset */
+ reset_control_assert(wcss->wcss_q6_reset);
+
+ return 0;
+}
+
+static int q6v5_wcss_stop(struct rproc *rproc)
+{
+ struct q6v5_wcss *wcss = rproc->priv;
+ int ret;
+
+ /* WCSS powerdown */
+ ret = qcom_q6v5_request_stop(&wcss->q6v5);
+ if (ret == -ETIMEDOUT) {
+ dev_err(wcss->dev, "timed out on wait\n");
+ return ret;
+ }
+
+ ret = q6v5_wcss_powerdown(wcss);
+ if (ret)
+ return ret;
+
+ /* Q6 Power down */
+ ret = q6v5_q6_powerdown(wcss);
+ if (ret)
+ return ret;
+
+ qcom_q6v5_unprepare(&wcss->q6v5);
+
+ return 0;
+}
+
+static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct q6v5_wcss *wcss = rproc->priv;
+ int offset;
+
+ offset = da - wcss->mem_reloc;
+ if (offset < 0 || offset + len > wcss->mem_size)
+ return NULL;
+
+ return wcss->mem_region + offset;
+}
+
+static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct q6v5_wcss *wcss = rproc->priv;
+
+ return qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
+ 0, wcss->mem_region, wcss->mem_phys,
+ wcss->mem_size, &wcss->mem_reloc);
+}
+
+static const struct rproc_ops q6v5_wcss_ops = {
+ .start = q6v5_wcss_start,
+ .stop = q6v5_wcss_stop,
+ .da_to_va = q6v5_wcss_da_to_va,
+ .load = q6v5_wcss_load,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+};
+
+static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss)
+{
+ struct device *dev = wcss->dev;
+
+ wcss->wcss_aon_reset = devm_reset_control_get(dev, "wcss_aon_reset");
+ if (IS_ERR(wcss->wcss_aon_reset)) {
+ dev_err(wcss->dev, "unable to acquire wcss_aon_reset\n");
+ return PTR_ERR(wcss->wcss_aon_reset);
+ }
+
+ wcss->wcss_reset = devm_reset_control_get(dev, "wcss_reset");
+ if (IS_ERR(wcss->wcss_reset)) {
+ dev_err(wcss->dev, "unable to acquire wcss_reset\n");
+ return PTR_ERR(wcss->wcss_reset);
+ }
+
+ wcss->wcss_q6_reset = devm_reset_control_get(dev, "wcss_q6_reset");
+ if (IS_ERR(wcss->wcss_q6_reset)) {
+ dev_err(wcss->dev, "unable to acquire wcss_q6_reset\n");
+ return PTR_ERR(wcss->wcss_q6_reset);
+ }
+
+ return 0;
+}
+
+static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss,
+ struct platform_device *pdev)
+{
+ struct of_phandle_args args;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
+ wcss->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wcss->reg_base))
+ return PTR_ERR(wcss->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
+ wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wcss->rmb_base))
+ return PTR_ERR(wcss->rmb_base);
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,halt-regs", 3, 0, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
+ return -EINVAL;
+ }
+
+ wcss->halt_map = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(wcss->halt_map))
+ return PTR_ERR(wcss->halt_map);
+
+ wcss->halt_q6 = args.args[0];
+ wcss->halt_wcss = args.args[1];
+ wcss->halt_nc = args.args[2];
+
+ return 0;
+}
+
+static int q6v5_alloc_memory_region(struct q6v5_wcss *wcss)
+{
+ struct reserved_mem *rmem = NULL;
+ struct device_node *node;
+ struct device *dev = wcss->dev;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+
+ if (!rmem) {
+ dev_err(dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ wcss->mem_phys = rmem->base;
+ wcss->mem_reloc = rmem->base;
+ wcss->mem_size = rmem->size;
+ wcss->mem_region = devm_ioremap_wc(dev, wcss->mem_phys, wcss->mem_size);
+ if (!wcss->mem_region) {
+ dev_err(dev, "unable to map memory region: %pa+%pa\n",
+ &rmem->base, &rmem->size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int q6v5_wcss_probe(struct platform_device *pdev)
+{
+ struct q6v5_wcss *wcss;
+ struct rproc *rproc;
+ int ret;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_wcss_ops,
+ "IPQ8074/q6_fw.mdt", sizeof(*wcss));
+ if (!rproc) {
+ dev_err(&pdev->dev, "failed to allocate rproc\n");
+ return -ENOMEM;
+ }
+
+ wcss = rproc->priv;
+ wcss->dev = &pdev->dev;
+
+ ret = q6v5_wcss_init_mmio(wcss, pdev);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_alloc_memory_region(wcss);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_wcss_init_reset(wcss);
+ if (ret)
+ goto free_rproc;
+
+ ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, WCSS_CRASH_REASON, NULL);
+ if (ret)
+ goto free_rproc;
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto free_rproc;
+
+ platform_set_drvdata(pdev, rproc);
+
+ return 0;
+
+free_rproc:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int q6v5_wcss_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+
+ rproc_del(rproc);
+ rproc_free(rproc);
+
+ return 0;
+}
+
+static const struct of_device_id q6v5_wcss_of_match[] = {
+ { .compatible = "qcom,ipq8074-wcss-pil" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, q6v5_wcss_of_match);
+
+static struct platform_driver q6v5_wcss_driver = {
+ .probe = q6v5_wcss_probe,
+ .remove = q6v5_wcss_remove,
+ .driver = {
+ .name = "qcom-q6v5-wcss-pil",
+ .of_match_table = q6v5_wcss_of_match,
+ },
+};
+module_platform_driver(q6v5_wcss_driver);
+
+MODULE_DESCRIPTION("Hexagon WCSS Peripheral Image Loader");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index f085545d7da5..e976a602b015 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -469,7 +469,10 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
qmi_add_lookup(&sysmon->qmi, 43, 0, 0);
- rproc_add_subdev(rproc, &sysmon->subdev, sysmon_start, sysmon_stop);
+ sysmon->subdev.start = sysmon_start;
+ sysmon->subdev.stop = sysmon_stop;
+
+ rproc_add_subdev(rproc, &sysmon->subdev);
sysmon->nb.notifier_call = sysmon_notify;
blocking_notifier_chain_register(&sysmon_notifiers, &sysmon->nb);
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index a9609d971f7f..aa6206706fe3 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -241,7 +241,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
if (notifyid > rproc->max_notifyid)
rproc->max_notifyid = notifyid;
- dev_dbg(dev, "vring%d: va %p dma %pad size 0x%x idr %d\n",
+ dev_dbg(dev, "vring%d: va %pK dma %pad size 0x%x idr %d\n",
i, va, &dma, size, notifyid);
rvring->va = va;
@@ -301,14 +301,14 @@ void rproc_free_vring(struct rproc_vring *rvring)
rsc->vring[idx].notifyid = -1;
}
-static int rproc_vdev_do_probe(struct rproc_subdev *subdev)
+static int rproc_vdev_do_start(struct rproc_subdev *subdev)
{
struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
return rproc_add_virtio_dev(rvdev, rvdev->id);
}
-static void rproc_vdev_do_remove(struct rproc_subdev *subdev, bool crashed)
+static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
{
struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
@@ -399,8 +399,10 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
list_add_tail(&rvdev->node, &rproc->rvdevs);
- rproc_add_subdev(rproc, &rvdev->subdev,
- rproc_vdev_do_probe, rproc_vdev_do_remove);
+ rvdev->subdev.start = rproc_vdev_do_start;
+ rvdev->subdev.stop = rproc_vdev_do_stop;
+
+ rproc_add_subdev(rproc, &rvdev->subdev);
return 0;
@@ -497,7 +499,7 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
rproc->num_traces++;
- dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n",
+ dev_dbg(dev, "%s added: va %pK, da 0x%x, len 0x%x\n",
name, ptr, rsc->da, rsc->len);
return 0;
@@ -635,7 +637,7 @@ static int rproc_handle_carveout(struct rproc *rproc,
goto free_carv;
}
- dev_dbg(dev, "carveout va %p, dma %pad, len 0x%x\n",
+ dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n",
va, &dma, rsc->len);
/*
@@ -774,32 +776,72 @@ static int rproc_handle_resources(struct rproc *rproc,
return ret;
}
-static int rproc_probe_subdevices(struct rproc *rproc)
+static int rproc_prepare_subdevices(struct rproc *rproc)
{
struct rproc_subdev *subdev;
int ret;
list_for_each_entry(subdev, &rproc->subdevs, node) {
- ret = subdev->probe(subdev);
- if (ret)
- goto unroll_registration;
+ if (subdev->prepare) {
+ ret = subdev->prepare(subdev);
+ if (ret)
+ goto unroll_preparation;
+ }
+ }
+
+ return 0;
+
+unroll_preparation:
+ list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
+ if (subdev->unprepare)
+ subdev->unprepare(subdev);
+ }
+
+ return ret;
+}
+
+static int rproc_start_subdevices(struct rproc *rproc)
+{
+ struct rproc_subdev *subdev;
+ int ret;
+
+ list_for_each_entry(subdev, &rproc->subdevs, node) {
+ if (subdev->start) {
+ ret = subdev->start(subdev);
+ if (ret)
+ goto unroll_registration;
+ }
}
return 0;
unroll_registration:
- list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node)
- subdev->remove(subdev, true);
+ list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
+ if (subdev->stop)
+ subdev->stop(subdev, true);
+ }
return ret;
}
-static void rproc_remove_subdevices(struct rproc *rproc, bool crashed)
+static void rproc_stop_subdevices(struct rproc *rproc, bool crashed)
{
struct rproc_subdev *subdev;
- list_for_each_entry_reverse(subdev, &rproc->subdevs, node)
- subdev->remove(subdev, crashed);
+ list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
+ if (subdev->stop)
+ subdev->stop(subdev, crashed);
+ }
+}
+
+static void rproc_unprepare_subdevices(struct rproc *rproc)
+{
+ struct rproc_subdev *subdev;
+
+ list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
+ if (subdev->unprepare)
+ subdev->unprepare(subdev);
+ }
}
/**
@@ -894,20 +936,26 @@ static int rproc_start(struct rproc *rproc, const struct firmware *fw)
rproc->table_ptr = loaded_table;
}
+ ret = rproc_prepare_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to prepare subdevices for %s: %d\n",
+ rproc->name, ret);
+ goto reset_table_ptr;
+ }
+
/* power up the remote processor */
ret = rproc->ops->start(rproc);
if (ret) {
dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
- return ret;
+ goto unprepare_subdevices;
}
- /* probe any subdevices for the remote processor */
- ret = rproc_probe_subdevices(rproc);
+ /* Start any subdevices for the remote processor */
+ ret = rproc_start_subdevices(rproc);
if (ret) {
dev_err(dev, "failed to probe subdevices for %s: %d\n",
rproc->name, ret);
- rproc->ops->stop(rproc);
- return ret;
+ goto stop_rproc;
}
rproc->state = RPROC_RUNNING;
@@ -915,6 +963,15 @@ static int rproc_start(struct rproc *rproc, const struct firmware *fw)
dev_info(dev, "remote processor %s is now up\n", rproc->name);
return 0;
+
+stop_rproc:
+ rproc->ops->stop(rproc);
+unprepare_subdevices:
+ rproc_unprepare_subdevices(rproc);
+reset_table_ptr:
+ rproc->table_ptr = rproc->cached_table;
+
+ return ret;
}
/*
@@ -1014,8 +1071,8 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
struct device *dev = &rproc->dev;
int ret;
- /* remove any subdevices for the remote processor */
- rproc_remove_subdevices(rproc, crashed);
+ /* Stop any subdevices for the remote processor */
+ rproc_stop_subdevices(rproc, crashed);
/* the installed resource table is no longer accessible */
rproc->table_ptr = rproc->cached_table;
@@ -1027,6 +1084,8 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
return ret;
}
+ rproc_unprepare_subdevices(rproc);
+
rproc->state = RPROC_OFFLINE;
dev_info(dev, "stopped remote processor %s\n", rproc->name);
@@ -1657,17 +1716,11 @@ EXPORT_SYMBOL(rproc_del);
* rproc_add_subdev() - add a subdevice to a remoteproc
* @rproc: rproc handle to add the subdevice to
* @subdev: subdev handle to register
- * @probe: function to call when the rproc boots
- * @remove: function to call when the rproc shuts down
+ *
+ * Caller is responsible for populating optional subdevice function pointers.
*/
-void rproc_add_subdev(struct rproc *rproc,
- struct rproc_subdev *subdev,
- int (*probe)(struct rproc_subdev *subdev),
- void (*remove)(struct rproc_subdev *subdev, bool crashed))
+void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
{
- subdev->probe = probe;
- subdev->remove = remove;
-
list_add_tail(&subdev->node, &rproc->subdevs);
}
EXPORT_SYMBOL(rproc_add_subdev);
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index a20488336aa0..a5c29f2764a3 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -231,7 +231,7 @@ static int rproc_rsc_table_show(struct seq_file *seq, void *p)
}
break;
default:
- seq_printf(seq, "Unknown resource type found: %d [hdr: %p]\n",
+ seq_printf(seq, "Unknown resource type found: %d [hdr: %pK]\n",
hdr->type, hdr);
break;
}
@@ -260,7 +260,7 @@ static int rproc_carveouts_show(struct seq_file *seq, void *p)
list_for_each_entry(carveout, &rproc->carveouts, node) {
seq_puts(seq, "Carveout memory entry:\n");
- seq_printf(seq, "\tVirtual address: %p\n", carveout->va);
+ seq_printf(seq, "\tVirtual address: %pK\n", carveout->va);
seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma);
seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da);
seq_printf(seq, "\tLength: 0x%x Bytes\n\n", carveout->len);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index b0633fd4c041..bbecd44df7e8 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -96,7 +96,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
size = vring_size(len, rvring->align);
memset(addr, 0, size);
- dev_dbg(dev, "vring%d: va %p qsz %d notifyid %d\n",
+ dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
id, addr, len, rvring->notifyid);
/*
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index 1ffb1f0c43d6..d711d9430a4f 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -195,7 +195,8 @@ static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
}
}
- dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%p\n", da, len, va);
+ dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%pK\n",
+ da, len, va);
return va;
}
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index c0b292be1b72..13d28fdbdbb5 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -73,6 +73,13 @@ config RESET_MESON
help
This enables the reset driver for Amlogic Meson SoCs.
+config RESET_MESON_AUDIO_ARB
+ tristate "Meson Audio Memory Arbiter Reset Driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ help
+ This enables the reset driver for Audio Memory Arbiter of
+ Amlogic's A113 based SoCs
+
config RESET_OXNAS
bool
@@ -82,6 +89,15 @@ config RESET_PISTACHIO
help
This enables the reset driver for ImgTec Pistachio SoCs.
+config RESET_QCOM_AOSS
+ bool "Qcom AOSS Reset Driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ This enables the AOSS (always on subsystem) reset driver
+ for Qualcomm SDM845 SoCs. Say Y if you want to control
+ reset signals provided by AOSS for Modem, Venus, ADSP,
+ GPU, Camera, Wireless, Display subsystem. Otherwise, say N.
+
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST
default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED
@@ -138,6 +154,16 @@ config RESET_UNIPHIER
Say Y if you want to control reset signals provided by System Control
block, Media I/O block, Peripheral Block.
+config RESET_UNIPHIER_USB3
+ tristate "USB3 reset driver for UniPhier SoCs"
+ depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ default ARCH_UNIPHIER
+ select RESET_SIMPLE
+ help
+ Support for the USB3 core reset on UniPhier SoCs.
+ Say Y if you want to control reset signals provided by
+ USB3 glue layer.
+
config RESET_ZYNQ
bool "ZYNQ Reset Driver" if COMPILE_TEST
default ARCH_ZYNQ
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index c1261dcfe9ad..4243c38228e2 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -12,13 +12,16 @@ obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
obj-$(CONFIG_RESET_MESON) += reset-meson.o
+obj-$(CONFIG_RESET_MESON_AUDIO_ARB) += reset-meson-audio-arb.o
obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
+obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
+obj-$(CONFIG_RESET_UNIPHIER_USB3) += reset-uniphier-usb3.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c
index 2674880e5492..a7455916e396 100644
--- a/drivers/reset/reset-ath79.c
+++ b/drivers/reset/reset-ath79.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/reboot.h>
diff --git a/drivers/reset/reset-axs10x.c b/drivers/reset/reset-axs10x.c
index afb298e46bd9..a854ef41364d 100644
--- a/drivers/reset/reset-axs10x.c
+++ b/drivers/reset/reset-axs10x.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 4db177bc89bc..97d9f08271c5 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -16,6 +16,7 @@
*/
#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/regmap.h>
@@ -80,7 +81,7 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev,
{
struct imx7_src *imx7src = to_imx7_src(rcdev);
const struct imx7_src_signal *signal = &imx7_src_signals[id];
- unsigned int value = 0;
+ unsigned int value = assert ? signal->bit : 0;
switch (id) {
case IMX7_RESET_PCIEPHY:
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c
new file mode 100644
index 000000000000..91751617b37a
--- /dev/null
+++ b/drivers/reset/reset-meson-audio-arb.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/reset/amlogic,meson-axg-audio-arb.h>
+
+struct meson_audio_arb_data {
+ struct reset_controller_dev rstc;
+ void __iomem *regs;
+ struct clk *clk;
+ const unsigned int *reset_bits;
+ spinlock_t lock;
+};
+
+#define ARB_GENERAL_BIT 31
+
+static const unsigned int axg_audio_arb_reset_bits[] = {
+ [AXG_ARB_TODDR_A] = 0,
+ [AXG_ARB_TODDR_B] = 1,
+ [AXG_ARB_TODDR_C] = 2,
+ [AXG_ARB_FRDDR_A] = 4,
+ [AXG_ARB_FRDDR_B] = 5,
+ [AXG_ARB_FRDDR_C] = 6,
+};
+
+static int meson_audio_arb_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ u32 val;
+ struct meson_audio_arb_data *arb =
+ container_of(rcdev, struct meson_audio_arb_data, rstc);
+
+ spin_lock(&arb->lock);
+ val = readl(arb->regs);
+
+ if (assert)
+ val &= ~BIT(arb->reset_bits[id]);
+ else
+ val |= BIT(arb->reset_bits[id]);
+
+ writel(val, arb->regs);
+ spin_unlock(&arb->lock);
+
+ return 0;
+}
+
+static int meson_audio_arb_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ u32 val;
+ struct meson_audio_arb_data *arb =
+ container_of(rcdev, struct meson_audio_arb_data, rstc);
+
+ val = readl(arb->regs);
+
+ return !(val & BIT(arb->reset_bits[id]));
+}
+
+static int meson_audio_arb_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return meson_audio_arb_update(rcdev, id, true);
+}
+
+static int meson_audio_arb_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return meson_audio_arb_update(rcdev, id, false);
+}
+
+static const struct reset_control_ops meson_audio_arb_rstc_ops = {
+ .assert = meson_audio_arb_assert,
+ .deassert = meson_audio_arb_deassert,
+ .status = meson_audio_arb_status,
+};
+
+static const struct of_device_id meson_audio_arb_of_match[] = {
+ { .compatible = "amlogic,meson-axg-audio-arb", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, meson_audio_arb_of_match);
+
+static int meson_audio_arb_remove(struct platform_device *pdev)
+{
+ struct meson_audio_arb_data *arb = platform_get_drvdata(pdev);
+
+ /* Disable all access */
+ spin_lock(&arb->lock);
+ writel(0, arb->regs);
+ spin_unlock(&arb->lock);
+
+ clk_disable_unprepare(arb->clk);
+
+ return 0;
+}
+
+static int meson_audio_arb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct meson_audio_arb_data *arb;
+ struct resource *res;
+ int ret;
+
+ arb = devm_kzalloc(dev, sizeof(*arb), GFP_KERNEL);
+ if (!arb)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, arb);
+
+ arb->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(arb->clk)) {
+ if (PTR_ERR(arb->clk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(arb->clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ arb->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(arb->regs))
+ return PTR_ERR(arb->regs);
+
+ spin_lock_init(&arb->lock);
+ arb->reset_bits = axg_audio_arb_reset_bits;
+ arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits);
+ arb->rstc.ops = &meson_audio_arb_rstc_ops;
+ arb->rstc.of_node = dev->of_node;
+
+ /*
+ * Enable general :
+ * In the initial state, all memory interfaces are disabled
+ * and the general bit is on
+ */
+ ret = clk_prepare_enable(arb->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable arb clock\n");
+ return ret;
+ }
+ writel(BIT(ARB_GENERAL_BIT), arb->regs);
+
+ /* Register reset controller */
+ ret = devm_reset_controller_register(dev, &arb->rstc);
+ if (ret) {
+ dev_err(dev, "failed to register arb reset controller\n");
+ meson_audio_arb_remove(pdev);
+ }
+
+ return ret;
+}
+
+static struct platform_driver meson_audio_arb_pdrv = {
+ .probe = meson_audio_arb_probe,
+ .remove = meson_audio_arb_remove,
+ .driver = {
+ .name = "meson-audio-arb-reset",
+ .of_match_table = meson_audio_arb_of_match,
+ },
+};
+module_platform_driver(meson_audio_arb_pdrv);
+
+MODULE_DESCRIPTION("Amlogic A113 Audio Memory Arbiter");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-qcom-aoss.c b/drivers/reset/reset-qcom-aoss.c
new file mode 100644
index 000000000000..36db96750450
--- /dev/null
+++ b/drivers/reset/reset-qcom-aoss.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <dt-bindings/reset/qcom,sdm845-aoss.h>
+
+struct qcom_aoss_reset_map {
+ unsigned int reg;
+};
+
+struct qcom_aoss_desc {
+ const struct qcom_aoss_reset_map *resets;
+ size_t num_resets;
+};
+
+struct qcom_aoss_reset_data {
+ struct reset_controller_dev rcdev;
+ void __iomem *base;
+ const struct qcom_aoss_desc *desc;
+};
+
+static const struct qcom_aoss_reset_map sdm845_aoss_resets[] = {
+ [AOSS_CC_MSS_RESTART] = {0x10000},
+ [AOSS_CC_CAMSS_RESTART] = {0x11000},
+ [AOSS_CC_VENUS_RESTART] = {0x12000},
+ [AOSS_CC_GPU_RESTART] = {0x13000},
+ [AOSS_CC_DISPSS_RESTART] = {0x14000},
+ [AOSS_CC_WCSS_RESTART] = {0x20000},
+ [AOSS_CC_LPASS_RESTART] = {0x30000},
+};
+
+static const struct qcom_aoss_desc sdm845_aoss_desc = {
+ .resets = sdm845_aoss_resets,
+ .num_resets = ARRAY_SIZE(sdm845_aoss_resets),
+};
+
+static inline struct qcom_aoss_reset_data *to_qcom_aoss_reset_data(
+ struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct qcom_aoss_reset_data, rcdev);
+}
+
+static int qcom_aoss_control_assert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ struct qcom_aoss_reset_data *data = to_qcom_aoss_reset_data(rcdev);
+ const struct qcom_aoss_reset_map *map = &data->desc->resets[idx];
+
+ writel(1, data->base + map->reg);
+ /* Wait 6 32kHz sleep cycles for reset */
+ usleep_range(200, 300);
+ return 0;
+}
+
+static int qcom_aoss_control_deassert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ struct qcom_aoss_reset_data *data = to_qcom_aoss_reset_data(rcdev);
+ const struct qcom_aoss_reset_map *map = &data->desc->resets[idx];
+
+ writel(0, data->base + map->reg);
+ /* Wait 6 32kHz sleep cycles for reset */
+ usleep_range(200, 300);
+ return 0;
+}
+
+static int qcom_aoss_control_reset(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ qcom_aoss_control_assert(rcdev, idx);
+
+ return qcom_aoss_control_deassert(rcdev, idx);
+}
+
+static const struct reset_control_ops qcom_aoss_reset_ops = {
+ .reset = qcom_aoss_control_reset,
+ .assert = qcom_aoss_control_assert,
+ .deassert = qcom_aoss_control_deassert,
+};
+
+static int qcom_aoss_reset_probe(struct platform_device *pdev)
+{
+ struct qcom_aoss_reset_data *data;
+ struct device *dev = &pdev->dev;
+ const struct qcom_aoss_desc *desc;
+ struct resource *res;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->desc = desc;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.ops = &qcom_aoss_reset_ops;
+ data->rcdev.nr_resets = desc->num_resets;
+ data->rcdev.of_node = dev->of_node;
+
+ return devm_reset_controller_register(dev, &data->rcdev);
+}
+
+static const struct of_device_id qcom_aoss_reset_of_match[] = {
+ { .compatible = "qcom,sdm845-aoss-cc", .data = &sdm845_aoss_desc },
+ {}
+};
+
+static struct platform_driver qcom_aoss_reset_driver = {
+ .probe = qcom_aoss_reset_probe,
+ .driver = {
+ .name = "qcom_aoss_reset",
+ .of_match_table = qcom_aoss_reset_of_match,
+ },
+};
+
+builtin_platform_driver(qcom_aoss_reset_driver);
+
+MODULE_DESCRIPTION("Qualcomm AOSS Reset Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index f7ce8910a392..a91107fc9e27 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -87,6 +87,7 @@ const struct reset_control_ops reset_simple_ops = {
.deassert = reset_simple_deassert,
.status = reset_simple_status,
};
+EXPORT_SYMBOL_GPL(reset_simple_ops);
/**
* struct reset_simple_devdata - simple reset controller properties
diff --git a/drivers/reset/reset-uniphier-usb3.c b/drivers/reset/reset-uniphier-usb3.c
new file mode 100644
index 000000000000..ffa1b19b594d
--- /dev/null
+++ b/drivers/reset/reset-uniphier-usb3.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// reset-uniphier-usb3.c - USB3 reset driver for UniPhier
+// Copyright 2018 Socionext Inc.
+// Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "reset-simple.h"
+
+#define MAX_CLKS 2
+#define MAX_RSTS 2
+
+struct uniphier_usb3_reset_soc_data {
+ int nclks;
+ const char * const *clock_names;
+ int nrsts;
+ const char * const *reset_names;
+};
+
+struct uniphier_usb3_reset_priv {
+ struct clk_bulk_data clk[MAX_CLKS];
+ struct reset_control *rst[MAX_RSTS];
+ struct reset_simple_data rdata;
+ const struct uniphier_usb3_reset_soc_data *data;
+};
+
+static int uniphier_usb3_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_usb3_reset_priv *priv;
+ struct resource *res;
+ resource_size_t size;
+ const char *name;
+ int i, ret, nr;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data || priv->data->nclks > MAX_CLKS ||
+ priv->data->nrsts > MAX_RSTS))
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ size = resource_size(res);
+ priv->rdata.membase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->rdata.membase))
+ return PTR_ERR(priv->rdata.membase);
+
+ for (i = 0; i < priv->data->nclks; i++)
+ priv->clk[i].id = priv->data->clock_names[i];
+ ret = devm_clk_bulk_get(dev, priv->data->nclks, priv->clk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < priv->data->nrsts; i++) {
+ name = priv->data->reset_names[i];
+ priv->rst[i] = devm_reset_control_get_shared(dev, name);
+ if (IS_ERR(priv->rst[i]))
+ return PTR_ERR(priv->rst[i]);
+ }
+
+ ret = clk_bulk_prepare_enable(priv->data->nclks, priv->clk);
+ if (ret)
+ return ret;
+
+ for (nr = 0; nr < priv->data->nrsts; nr++) {
+ ret = reset_control_deassert(priv->rst[nr]);
+ if (ret)
+ goto out_rst_assert;
+ }
+
+ spin_lock_init(&priv->rdata.lock);
+ priv->rdata.rcdev.owner = THIS_MODULE;
+ priv->rdata.rcdev.nr_resets = size * BITS_PER_BYTE;
+ priv->rdata.rcdev.ops = &reset_simple_ops;
+ priv->rdata.rcdev.of_node = dev->of_node;
+ priv->rdata.active_low = true;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = devm_reset_controller_register(dev, &priv->rdata.rcdev);
+ if (ret)
+ goto out_rst_assert;
+
+ return 0;
+
+out_rst_assert:
+ while (nr--)
+ reset_control_assert(priv->rst[nr]);
+
+ clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
+
+ return ret;
+}
+
+static int uniphier_usb3_reset_remove(struct platform_device *pdev)
+{
+ struct uniphier_usb3_reset_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < priv->data->nrsts; i++)
+ reset_control_assert(priv->rst[i]);
+
+ clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
+
+ return 0;
+}
+
+static const char * const uniphier_pro4_clock_reset_names[] = {
+ "gio", "link",
+};
+
+static const struct uniphier_usb3_reset_soc_data uniphier_pro4_data = {
+ .nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
+ .clock_names = uniphier_pro4_clock_reset_names,
+ .nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
+ .reset_names = uniphier_pro4_clock_reset_names,
+};
+
+static const char * const uniphier_pxs2_clock_reset_names[] = {
+ "link",
+};
+
+static const struct uniphier_usb3_reset_soc_data uniphier_pxs2_data = {
+ .nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
+ .clock_names = uniphier_pxs2_clock_reset_names,
+ .nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
+ .reset_names = uniphier_pxs2_clock_reset_names,
+};
+
+static const struct of_device_id uniphier_usb3_reset_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro4-usb3-reset",
+ .data = &uniphier_pro4_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-usb3-reset",
+ .data = &uniphier_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-usb3-reset",
+ .data = &uniphier_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-usb3-reset",
+ .data = &uniphier_pxs2_data,
+ },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_usb3_reset_match);
+
+static struct platform_driver uniphier_usb3_reset_driver = {
+ .probe = uniphier_usb3_reset_probe,
+ .remove = uniphier_usb3_reset_remove,
+ .driver = {
+ .name = "uniphier-usb3-reset",
+ .of_match_table = uniphier_usb3_reset_match,
+ },
+};
+module_platform_driver(uniphier_usb3_reset_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier USB3 Reset Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index e9030ff1bf2f..5605745663ae 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -202,6 +202,12 @@ static const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
#define UNIPHIER_PERI_RESET_FI2C(id, ch) \
UNIPHIER_RESETX((id), 0x114, 24 + (ch))
+#define UNIPHIER_PERI_RESET_SCSSI(id) \
+ UNIPHIER_RESETX((id), 0x110, 17)
+
+#define UNIPHIER_PERI_RESET_MCSSI(id) \
+ UNIPHIER_RESETX((id), 0x114, 14)
+
static const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
UNIPHIER_PERI_RESET_UART(0, 0),
UNIPHIER_PERI_RESET_UART(1, 1),
@@ -212,6 +218,7 @@ static const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
UNIPHIER_PERI_RESET_I2C(6, 2),
UNIPHIER_PERI_RESET_I2C(7, 3),
UNIPHIER_PERI_RESET_I2C(8, 4),
+ UNIPHIER_PERI_RESET_SCSSI(11),
UNIPHIER_RESET_END,
};
@@ -227,6 +234,8 @@ static const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = {
UNIPHIER_PERI_RESET_FI2C(8, 4),
UNIPHIER_PERI_RESET_FI2C(9, 5),
UNIPHIER_PERI_RESET_FI2C(10, 6),
+ UNIPHIER_PERI_RESET_SCSSI(11),
+ UNIPHIER_PERI_RESET_MCSSI(12),
UNIPHIER_RESET_END,
};
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index f505f58b797d..e2ce4e638258 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -40,7 +40,7 @@ struct glink_msg {
* struct glink_defer_cmd - deferred incoming control message
* @node: list node
* @msg: message header
- * data: payload of the message
+ * @data: payload of the message
*
* Copy of a received control message, to be added to @rx_queue and processed
* by @rx_work of @qcom_glink.
@@ -56,12 +56,13 @@ struct glink_defer_cmd {
* struct glink_core_rx_intent - RX intent
* RX intent
*
- * data: pointer to the data (may be NULL for zero-copy)
- * id: remote or local intent ID
- * size: size of the original intent (do not modify)
- * reuse: To mark if the intent can be reused after first use
- * in_use: To mark if intent is already in use for the channel
- * offset: next write offset (initially 0)
+ * @data: pointer to the data (may be NULL for zero-copy)
+ * @id: remote or local intent ID
+ * @size: size of the original intent (do not modify)
+ * @reuse: To mark if the intent can be reused after first use
+ * @in_use: To mark if intent is already in use for the channel
+ * @offset: next write offset (initially 0)
+ * @node: list node
*/
struct glink_core_rx_intent {
void *data;
@@ -89,10 +90,14 @@ struct glink_core_rx_intent {
* @idr_lock: synchronizes @lcids and @rcids modifications
* @lcids: idr of all channels with a known local channel id
* @rcids: idr of all channels with a known remote channel id
+ * @features: remote features
+ * @intentless: flag to indicate that there is no intent
*/
struct qcom_glink {
struct device *dev;
+ const char *name;
+
struct mbox_client mbox_client;
struct mbox_chan *mbox_chan;
@@ -512,8 +517,8 @@ static void qcom_glink_rx_done(struct qcom_glink *glink,
* qcom_glink_receive_version() - receive version/features from remote system
*
* @glink: pointer to transport interface
- * @r_version: remote version
- * @r_features: remote features
+ * @version: remote version
+ * @features: remote features
*
* This function is called in response to a remote-initiated version/feature
* negotiation sequence.
@@ -538,8 +543,8 @@ static void qcom_glink_receive_version(struct qcom_glink *glink,
* qcom_glink_receive_version_ack() - receive negotiation ack from remote system
*
* @glink: pointer to transport interface
- * @r_version: remote version response
- * @r_features: remote features response
+ * @version: remote version response
+ * @features: remote features response
*
* This function is called in response to a local-initiated version/feature
* negotiation sequence and is the counter-offer from the remote side based
@@ -567,7 +572,7 @@ static void qcom_glink_receive_version_ack(struct qcom_glink *glink,
/**
* qcom_glink_send_intent_req_ack() - convert an rx intent request ack cmd to
- wire format and transmit
+ * wire format and transmit
* @glink: The transport to transmit on.
* @channel: The glink channel
* @granted: The request response to encode.
@@ -594,7 +599,7 @@ static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink,
* transmit
* @glink: The transport to transmit on.
* @channel: The local channel
- * @size: The intent to pass on to remote.
+ * @intent: The intent to pass on to remote.
*
* Return: 0 on success or standard Linux error code.
*/
@@ -603,11 +608,11 @@ static int qcom_glink_advertise_intent(struct qcom_glink *glink,
struct glink_core_rx_intent *intent)
{
struct command {
- u16 id;
- u16 lcid;
- u32 count;
- u32 size;
- u32 liid;
+ __le16 id;
+ __le16 lcid;
+ __le32 count;
+ __le32 size;
+ __le32 liid;
} __packed;
struct command cmd;
@@ -698,9 +703,9 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
/**
* qcom_glink_handle_intent_req() - Receive a request for rx_intent
* from remote side
- * if_ptr: Pointer to the transport interface
- * rcid: Remote channel ID
- * size: size of the intent
+ * @glink: Pointer to the transport interface
+ * @cid: Remote channel ID
+ * @size: size of the intent
*
* The function searches for the local channel to which the request for
* rx_intent has arrived and allocates and notifies the remote back
@@ -1572,6 +1577,10 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
idr_init(&glink->lcids);
idr_init(&glink->rcids);
+ ret = of_property_read_string(dev->of_node, "label", &glink->name);
+ if (ret < 0)
+ glink->name = dev->of_node->name;
+
glink->mbox_client.dev = dev;
glink->mbox_client.knows_txdone = true;
glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 6437bbeebc91..8da83a4ebadc 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/sched.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smem.h>
#include <linux/wait.h>
@@ -93,6 +94,8 @@ static const struct {
/**
* struct qcom_smd_edge - representing a remote processor
+ * @dev: device associated with this edge
+ * @name: name of this edge
* @of_node: of_node handle for information related to this edge
* @edge_id: identifier of this edge
* @remote_pid: identifier of remote processor
@@ -106,6 +109,7 @@ static const struct {
* @channels_lock: guard for modifications of @channels
* @allocated: array of bitmaps representing already allocated channels
* @smem_available: last available amount of smem triggering a channel scan
+ * @new_channel_event: wait queue for new channel events
* @scan_work: work item for discovering new channels
* @state_work: work item for edge state changes
*/
@@ -172,10 +176,12 @@ struct qcom_smd_endpoint {
/**
* struct qcom_smd_channel - smd channel struct
* @edge: qcom_smd_edge this channel is living on
- * @qsdev: reference to a associated smd client device
+ * @qsept: reference to a associated smd endpoint
+ * @registered: flag to indicate if the channel is registered
* @name: name of the channel
* @state: local state of the channel
* @remote_state: remote state of the channel
+ * @state_change_event: state change event
* @info: byte aligned outgoing/incoming channel info
* @info_word: word aligned outgoing/incoming channel info
* @tx_lock: lock to make writes to the channel mutually exclusive
@@ -187,6 +193,7 @@ struct qcom_smd_endpoint {
* @cb: callback function registered for this channel
* @recv_lock: guard for rx info modifications and cb pointer
* @pkt_size: size of the currently handled packet
+ * @drvdata: driver private data
* @list: lite entry for @channels in qcom_smd_edge
*/
struct qcom_smd_channel {
@@ -726,6 +733,7 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
* @channel: channel handle
* @data: buffer of data to write
* @len: number of bytes to write
+ * @wait: flag to indicate if write has ca wait
*
* This is a blocking write of len bytes into the channel's tx ring buffer and
* signal the remote end. It will sleep until there is enough space available
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index 76a4477c6364..a76b963a7e50 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -285,6 +285,7 @@ static const struct file_operations rpmsg_eptdev_fops = {
.write = rpmsg_eptdev_write,
.poll = rpmsg_eptdev_poll,
.unlocked_ioctl = rpmsg_eptdev_ioctl,
+ .compat_ioctl = rpmsg_eptdev_ioctl,
};
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
@@ -445,6 +446,7 @@ static const struct file_operations rpmsg_ctrldev_fops = {
.open = rpmsg_ctrldev_open,
.release = rpmsg_ctrldev_release,
.unlocked_ioctl = rpmsg_ctrldev_ioctl,
+ .compat_ioctl = rpmsg_ctrldev_ioctl,
};
static void rpmsg_ctrldev_release_device(struct device *dev)
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index b714a543a91d..8122807db380 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/rpmsg.h>
#include <linux/of_device.h>
+#include <linux/pm_domain.h>
#include <linux/slab.h>
#include "rpmsg_internal.h"
@@ -449,6 +450,10 @@ static int rpmsg_dev_probe(struct device *dev)
struct rpmsg_endpoint *ept = NULL;
int err;
+ err = dev_pm_domain_attach(dev, true);
+ if (err)
+ goto out;
+
if (rpdrv->callback) {
strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
chinfo.src = rpdev->src;
@@ -490,6 +495,8 @@ static int rpmsg_dev_remove(struct device *dev)
rpdrv->remove(rpdev);
+ dev_pm_domain_detach(dev, true);
+
if (rpdev->ept)
rpmsg_destroy_ept(rpdev->ept);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index a2ba5db36145..7d7be60a2413 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -244,15 +244,6 @@ config RTC_DRV_DS1307
This driver can also be built as a module. If so, the module
will be called rtc-ds1307.
-config RTC_DRV_DS1307_HWMON
- bool "HWMON support for rtc-ds1307"
- depends on RTC_DRV_DS1307 && HWMON
- depends on !(RTC_DRV_DS1307=y && HWMON=m)
- default y
- help
- Say Y here if you want to expose temperature sensor data on
- rtc-ds1307 (only DS3231)
-
config RTC_DRV_DS1307_CENTURY
bool "Century bit support for rtc-ds1307"
depends on RTC_DRV_DS1307
@@ -1027,18 +1018,6 @@ config RTC_DS1685_PROC_REGS
Unless you are debugging this driver, choose N.
-config RTC_DS1685_SYSFS_REGS
- bool "SysFS access to RTC register bits"
- depends on RTC_DRV_DS1685_FAMILY && SYSFS
- help
- Enable this to provide access to the RTC control register bits
- in /sys. Some of the bits are read-write, others are read-only.
-
- Keep in mind that reading Control C's bits automatically clears
- all pending IRQ flags - this can cause lost interrupts.
-
- If you know that you need access to these bits, choose Y, Else N.
-
config RTC_DRV_DS1742
tristate "Maxim/Dallas DS1742/1743"
depends on HAS_IOMEM
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index d37588f08055..0fca4d74c76b 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -68,7 +68,7 @@ static int rtc_suspend(struct device *dev)
return 0;
}
- getnstimeofday64(&old_system);
+ ktime_get_real_ts64(&old_system);
old_rtc.tv_sec = rtc_tm_to_time64(&tm);
@@ -110,7 +110,7 @@ static int rtc_resume(struct device *dev)
return 0;
/* snapshot the current rtc and system time at resume */
- getnstimeofday64(&new_system);
+ ktime_get_real_ts64(&new_system);
err = rtc_read_time(rtc, &tm);
if (err < 0) {
pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev));
@@ -172,7 +172,6 @@ static struct rtc_device *rtc_allocate_device(void)
mutex_init(&rtc->ops_lock);
spin_lock_init(&rtc->irq_lock);
- spin_lock_init(&rtc->irq_task_lock);
init_waitqueue_head(&rtc->irq_queue);
/* Init timerqueue */
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 6d4012dd6922..3d577e259e91 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -265,8 +265,10 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
return err;
/* full-function RTCs won't have such missing fields */
- if (rtc_valid_tm(&alarm->time) == 0)
+ if (rtc_valid_tm(&alarm->time) == 0) {
+ rtc_add_offset(rtc, &alarm->time);
return 0;
+ }
/* get the "after" timestamp, to detect wrapped fields */
err = rtc_read_time(rtc, &now);
@@ -409,7 +411,6 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
if (err)
return err;
- rtc_subtract_offset(rtc, &alarm->time);
scheduled = rtc_tm_to_time64(&alarm->time);
/* Make sure we're not setting alarms in the past */
@@ -426,6 +427,8 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
* over right here, before we set the alarm.
*/
+ rtc_subtract_offset(rtc, &alarm->time);
+
if (!rtc->ops)
err = -ENODEV;
else if (!rtc->ops->set_alarm)
@@ -467,7 +470,6 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
mutex_unlock(&rtc->ops_lock);
- rtc_add_offset(rtc, &alarm->time);
return err;
}
EXPORT_SYMBOL_GPL(rtc_set_alarm);
@@ -605,12 +607,6 @@ void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
spin_unlock_irqrestore(&rtc->irq_lock, flags);
- /* call the task func */
- spin_lock_irqsave(&rtc->irq_task_lock, flags);
- if (rtc->irq_task)
- rtc->irq_task->func(rtc->irq_task->private_data);
- spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
-
wake_up_interruptible(&rtc->irq_queue);
kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
}
@@ -719,39 +715,6 @@ void rtc_class_close(struct rtc_device *rtc)
}
EXPORT_SYMBOL_GPL(rtc_class_close);
-int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task)
-{
- int retval = -EBUSY;
-
- if (task == NULL || task->func == NULL)
- return -EINVAL;
-
- /* Cannot register while the char dev is in use */
- if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
- return -EBUSY;
-
- spin_lock_irq(&rtc->irq_task_lock);
- if (rtc->irq_task == NULL) {
- rtc->irq_task = task;
- retval = 0;
- }
- spin_unlock_irq(&rtc->irq_task_lock);
-
- clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(rtc_irq_register);
-
-void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task)
-{
- spin_lock_irq(&rtc->irq_task_lock);
- if (rtc->irq_task == task)
- rtc->irq_task = NULL;
- spin_unlock_irq(&rtc->irq_task_lock);
-}
-EXPORT_SYMBOL_GPL(rtc_irq_unregister);
-
static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
{
/*
@@ -783,71 +746,45 @@ static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
* Context: any
*
* Note that rtc_irq_set_freq() should previously have been used to
- * specify the desired frequency of periodic IRQ task->func() callbacks.
+ * specify the desired frequency of periodic IRQ.
*/
-int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled)
+int rtc_irq_set_state(struct rtc_device *rtc, int enabled)
{
int err = 0;
- unsigned long flags;
-retry:
- spin_lock_irqsave(&rtc->irq_task_lock, flags);
- if (rtc->irq_task != NULL && task == NULL)
- err = -EBUSY;
- else if (rtc->irq_task != task)
- err = -EACCES;
- else {
- if (rtc_update_hrtimer(rtc, enabled) < 0) {
- spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
- cpu_relax();
- goto retry;
- }
- rtc->pie_enabled = enabled;
- }
- spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
+ while (rtc_update_hrtimer(rtc, enabled) < 0)
+ cpu_relax();
+
+ rtc->pie_enabled = enabled;
trace_rtc_irq_set_state(enabled, err);
return err;
}
-EXPORT_SYMBOL_GPL(rtc_irq_set_state);
/**
* rtc_irq_set_freq - set 2^N Hz periodic IRQ frequency for IRQ
* @rtc: the rtc device
* @task: currently registered with rtc_irq_register()
- * @freq: positive frequency with which task->func() will be called
+ * @freq: positive frequency
* Context: any
*
* Note that rtc_irq_set_state() is used to enable or disable the
* periodic IRQs.
*/
-int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
+int rtc_irq_set_freq(struct rtc_device *rtc, int freq)
{
int err = 0;
- unsigned long flags;
if (freq <= 0 || freq > RTC_MAX_FREQ)
return -EINVAL;
-retry:
- spin_lock_irqsave(&rtc->irq_task_lock, flags);
- if (rtc->irq_task != NULL && task == NULL)
- err = -EBUSY;
- else if (rtc->irq_task != task)
- err = -EACCES;
- else {
- rtc->irq_freq = freq;
- if (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) {
- spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
- cpu_relax();
- goto retry;
- }
- }
- spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
+
+ rtc->irq_freq = freq;
+ while (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0)
+ cpu_relax();
trace_rtc_irq_set_freq(freq, err);
return err;
}
-EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
/**
* rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
@@ -977,8 +914,8 @@ again:
timerqueue_del(&rtc->timerqueue, &timer->node);
trace_rtc_timer_dequeue(timer);
timer->enabled = 0;
- if (timer->task.func)
- timer->task.func(timer->task.private_data);
+ if (timer->func)
+ timer->func(timer->private_data);
trace_rtc_timer_fired(timer);
/* Re-add/fwd periodic timers */
@@ -1033,8 +970,8 @@ void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data)
{
timerqueue_init(&timer->node);
timer->enabled = 0;
- timer->task.func = f;
- timer->task.private_data = data;
+ timer->func = f;
+ timer->private_data = data;
}
/* rtc_timer_start - Sets an rtc_timer to fire in the future
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 1e4978c96ffd..bde53c8ccee2 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -30,6 +30,8 @@
#define RTC_IRQ_FREQ_1HZ BIT(2)
#define RTC_CCR 0x18
#define RTC_CCR_MODE BIT(15)
+#define RTC_CONF_TEST 0x1C
+#define RTC_NOMINAL_TIMING BIT(13)
#define RTC_TIME 0xC
#define RTC_ALARM1 0x10
@@ -75,6 +77,7 @@ struct armada38x_rtc {
void __iomem *regs_soc;
spinlock_t lock;
int irq;
+ bool initialized;
struct value_to_freq *val_to_freq;
struct armada38x_rtc_data *data;
};
@@ -226,6 +229,23 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
return 0;
}
+static void armada38x_rtc_reset(struct armada38x_rtc *rtc)
+{
+ u32 reg;
+
+ reg = rtc->data->read_rtc_reg(rtc, RTC_CONF_TEST);
+ /* If bits [7:0] are non-zero, assume RTC was uninitialized */
+ if (reg & 0xff) {
+ rtc_delayed_write(0, rtc, RTC_CONF_TEST);
+ msleep(500); /* Oscillator startup time */
+ rtc_delayed_write(0, rtc, RTC_TIME);
+ rtc_delayed_write(SOC_RTC_ALARM1 | SOC_RTC_ALARM2, rtc,
+ RTC_STATUS);
+ rtc_delayed_write(RTC_NOMINAL_TIMING, rtc, RTC_CCR);
+ }
+ rtc->initialized = true;
+}
+
static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct armada38x_rtc *rtc = dev_get_drvdata(dev);
@@ -237,6 +257,9 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
if (ret)
goto out;
+ if (!rtc->initialized)
+ armada38x_rtc_reset(rtc);
+
spin_lock_irqsave(&rtc->lock, flags);
rtc_delayed_write(time, rtc, RTC_TIME);
spin_unlock_irqrestore(&rtc->lock, flags);
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index d768f6747961..113493b52149 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -162,6 +162,10 @@ static int bq4802_probe(struct platform_device *pdev)
} else if (p->r->flags & IORESOURCE_MEM) {
p->regs = devm_ioremap(&pdev->dev, p->r->start,
resource_size(p->r));
+ if (!p->regs){
+ err = -ENOMEM;
+ goto out;
+ }
p->read = bq4802_read_mem;
p->write = bq4802_write_mem;
} else {
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 2fc517498a5d..fc5cf5c44ae7 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -8,6 +8,7 @@
*/
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/rtc.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
index 0abf98983e13..ccc17a2e293d 100644
--- a/drivers/rtc/rtc-core.h
+++ b/drivers/rtc/rtc-core.h
@@ -40,9 +40,23 @@ static inline void rtc_proc_del_device(struct rtc_device *rtc)
#ifdef CONFIG_RTC_INTF_SYSFS
const struct attribute_group **rtc_get_dev_attribute_groups(void);
+int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp);
+int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps);
#else
static inline const struct attribute_group **rtc_get_dev_attribute_groups(void)
{
return NULL;
}
+
+static inline
+int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp)
+{
+ return 0;
+}
+
+static inline
+int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps)
+{
+ return 0;
+}
#endif
diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c
index a8856f2b9bc2..6b477174a82f 100644
--- a/drivers/rtc/rtc-cpcap.c
+++ b/drivers/rtc/rtc-cpcap.c
@@ -24,6 +24,7 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index efa221e8bc22..43d962a9c210 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -341,11 +341,11 @@ static long rtc_dev_ioctl(struct file *file,
return rtc_set_time(rtc, &tm);
case RTC_PIE_ON:
- err = rtc_irq_set_state(rtc, NULL, 1);
+ err = rtc_irq_set_state(rtc, 1);
break;
case RTC_PIE_OFF:
- err = rtc_irq_set_state(rtc, NULL, 0);
+ err = rtc_irq_set_state(rtc, 0);
break;
case RTC_AIE_ON:
@@ -365,7 +365,7 @@ static long rtc_dev_ioctl(struct file *file,
return rtc_update_irq_enable(rtc, 0);
case RTC_IRQP_SET:
- err = rtc_irq_set_freq(rtc, NULL, arg);
+ err = rtc_irq_set_freq(rtc, arg);
break;
case RTC_IRQP_READ:
@@ -427,7 +427,7 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
/* Keep ioctl until all drivers are converted */
rtc_dev_ioctl(file, RTC_UIE_OFF, 0);
rtc_update_irq_enable(rtc, 0);
- rtc_irq_set_state(rtc, NULL, 0);
+ rtc_irq_set_state(rtc, 0);
clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
return 0;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index e9ec4160d7f6..4b2b4627daeb 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -44,6 +44,7 @@ enum ds_type {
ds_3231,
m41t0,
m41t00,
+ m41t11,
mcp794xx,
rx_8025,
rx_8130,
@@ -227,6 +228,11 @@ static const struct chip_desc chips[last_ds_type] = {
.irq_handler = rx8130_irq,
.rtc_ops = &rx8130_rtc_ops,
},
+ [m41t11] = {
+ /* this is battery backed SRAM */
+ .nvram_offset = 8,
+ .nvram_size = 56,
+ },
[mcp794xx] = {
.alarm = 1,
/* this is battery backed SRAM */
@@ -249,6 +255,7 @@ static const struct i2c_device_id ds1307_id[] = {
{ "ds3231", ds_3231 },
{ "m41t0", m41t0 },
{ "m41t00", m41t00 },
+ { "m41t11", m41t11 },
{ "mcp7940x", mcp794xx },
{ "mcp7941x", mcp794xx },
{ "pt7c4338", ds_1307 },
@@ -299,13 +306,17 @@ static const struct of_device_id ds1307_of_match[] = {
},
{
.compatible = "st,m41t0",
- .data = (void *)m41t00
+ .data = (void *)m41t0
},
{
.compatible = "st,m41t00",
.data = (void *)m41t00
},
{
+ .compatible = "st,m41t11",
+ .data = (void *)m41t11
+ },
+ {
.compatible = "microchip,mcp7940x",
.data = (void *)mcp794xx
},
@@ -347,6 +358,7 @@ static const struct acpi_device_id ds1307_acpi_ids[] = {
{ .id = "DS3231", .driver_data = ds_3231 },
{ .id = "M41T0", .driver_data = m41t0 },
{ .id = "M41T00", .driver_data = m41t00 },
+ { .id = "M41T11", .driver_data = m41t11 },
{ .id = "MCP7940X", .driver_data = mcp794xx },
{ .id = "MCP7941X", .driver_data = mcp794xx },
{ .id = "PT7C4338", .driver_data = ds_1307 },
@@ -1030,7 +1042,7 @@ static u8 ds1307_trickle_init(struct ds1307 *ds1307,
/*----------------------------------------------------------------------*/
-#ifdef CONFIG_RTC_DRV_DS1307_HWMON
+#if IS_REACHABLE(CONFIG_HWMON)
/*
* Temperature sensor support for ds3231 devices.
@@ -1576,6 +1588,7 @@ read_rtc:
case ds_1307:
case m41t0:
case m41t00:
+ case m41t11:
/* clock halted? turn it on, so clock can tick. */
if (tmp & DS1307_BIT_CH) {
regmap_write(ds1307->regmap, DS1307_REG_SECS, 0);
@@ -1641,6 +1654,7 @@ read_rtc:
case ds_1340:
case m41t0:
case m41t00:
+ case m41t11:
/*
* NOTE: ignores century bits; fix before deploying
* systems that will run through year 2100.
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 5c0db6c8134c..6f39f683a98c 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -1188,552 +1188,6 @@ ds1685_rtc_sysfs_misc_grp = {
.attrs = ds1685_rtc_sysfs_misc_attrs,
};
-#ifdef CONFIG_RTC_DS1685_SYSFS_REGS
-/**
- * struct ds1685_rtc_ctrl_regs.
- * @name: char pointer for the bit name.
- * @reg: control register the bit is in.
- * @bit: the bit's offset in the register.
- */
-struct ds1685_rtc_ctrl_regs {
- const char *name;
- const u8 reg;
- const u8 bit;
-};
-
-/*
- * Ctrl register bit lookup table.
- */
-static const struct ds1685_rtc_ctrl_regs
-ds1685_ctrl_regs_table[] = {
- { "uip", RTC_CTRL_A, RTC_CTRL_A_UIP },
- { "dv2", RTC_CTRL_A, RTC_CTRL_A_DV2 },
- { "dv1", RTC_CTRL_A, RTC_CTRL_A_DV1 },
- { "dv0", RTC_CTRL_A, RTC_CTRL_A_DV0 },
- { "rs3", RTC_CTRL_A, RTC_CTRL_A_RS3 },
- { "rs2", RTC_CTRL_A, RTC_CTRL_A_RS2 },
- { "rs1", RTC_CTRL_A, RTC_CTRL_A_RS1 },
- { "rs0", RTC_CTRL_A, RTC_CTRL_A_RS0 },
- { "set", RTC_CTRL_B, RTC_CTRL_B_SET },
- { "pie", RTC_CTRL_B, RTC_CTRL_B_PIE },
- { "aie", RTC_CTRL_B, RTC_CTRL_B_AIE },
- { "uie", RTC_CTRL_B, RTC_CTRL_B_UIE },
- { "sqwe", RTC_CTRL_B, RTC_CTRL_B_SQWE },
- { "dm", RTC_CTRL_B, RTC_CTRL_B_DM },
- { "2412", RTC_CTRL_B, RTC_CTRL_B_2412 },
- { "dse", RTC_CTRL_B, RTC_CTRL_B_DSE },
- { "irqf", RTC_CTRL_C, RTC_CTRL_C_IRQF },
- { "pf", RTC_CTRL_C, RTC_CTRL_C_PF },
- { "af", RTC_CTRL_C, RTC_CTRL_C_AF },
- { "uf", RTC_CTRL_C, RTC_CTRL_C_UF },
- { "vrt", RTC_CTRL_D, RTC_CTRL_D_VRT },
- { "vrt2", RTC_EXT_CTRL_4A, RTC_CTRL_4A_VRT2 },
- { "incr", RTC_EXT_CTRL_4A, RTC_CTRL_4A_INCR },
- { "pab", RTC_EXT_CTRL_4A, RTC_CTRL_4A_PAB },
- { "rf", RTC_EXT_CTRL_4A, RTC_CTRL_4A_RF },
- { "wf", RTC_EXT_CTRL_4A, RTC_CTRL_4A_WF },
- { "kf", RTC_EXT_CTRL_4A, RTC_CTRL_4A_KF },
-#if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689)
- { "bme", RTC_EXT_CTRL_4A, RTC_CTRL_4A_BME },
-#endif
- { "abe", RTC_EXT_CTRL_4B, RTC_CTRL_4B_ABE },
- { "e32k", RTC_EXT_CTRL_4B, RTC_CTRL_4B_E32K },
- { "cs", RTC_EXT_CTRL_4B, RTC_CTRL_4B_CS },
- { "rce", RTC_EXT_CTRL_4B, RTC_CTRL_4B_RCE },
- { "prs", RTC_EXT_CTRL_4B, RTC_CTRL_4B_PRS },
- { "rie", RTC_EXT_CTRL_4B, RTC_CTRL_4B_RIE },
- { "wie", RTC_EXT_CTRL_4B, RTC_CTRL_4B_WIE },
- { "kse", RTC_EXT_CTRL_4B, RTC_CTRL_4B_KSE },
- { NULL, 0, 0 },
-};
-
-/**
- * ds1685_rtc_sysfs_ctrl_regs_lookup - ctrl register bit lookup function.
- * @name: ctrl register bit to look up in ds1685_ctrl_regs_table.
- */
-static const struct ds1685_rtc_ctrl_regs*
-ds1685_rtc_sysfs_ctrl_regs_lookup(const char *name)
-{
- const struct ds1685_rtc_ctrl_regs *p = ds1685_ctrl_regs_table;
-
- for (; p->name != NULL; ++p)
- if (strcmp(p->name, name) == 0)
- return p;
-
- return NULL;
-}
-
-/**
- * ds1685_rtc_sysfs_ctrl_regs_show - reads a ctrl register bit via sysfs.
- * @dev: pointer to device structure.
- * @attr: pointer to device_attribute structure.
- * @buf: pointer to char array to hold the output.
- */
-static ssize_t
-ds1685_rtc_sysfs_ctrl_regs_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u8 tmp;
- struct ds1685_priv *rtc = dev_get_drvdata(dev);
- const struct ds1685_rtc_ctrl_regs *reg_info =
- ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
-
- /* Make sure we actually matched something. */
- if (!reg_info)
- return -EINVAL;
-
- /* No spinlock during a read -- mutex is already held. */
- ds1685_rtc_switch_to_bank1(rtc);
- tmp = rtc->read(rtc, reg_info->reg) & reg_info->bit;
- ds1685_rtc_switch_to_bank0(rtc);
-
- return sprintf(buf, "%d\n", (tmp ? 1 : 0));
-}
-
-/**
- * ds1685_rtc_sysfs_ctrl_regs_store - writes a ctrl register bit via sysfs.
- * @dev: pointer to device structure.
- * @attr: pointer to device_attribute structure.
- * @buf: pointer to char array to hold the output.
- * @count: number of bytes written.
- */
-static ssize_t
-ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ds1685_priv *rtc = dev_get_drvdata(dev);
- u8 reg = 0, bit = 0, tmp;
- unsigned long flags;
- long int val = 0;
- const struct ds1685_rtc_ctrl_regs *reg_info =
- ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
-
- /* We only accept numbers. */
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
-
- /* bits are binary, 0 or 1 only. */
- if ((val != 0) && (val != 1))
- return -ERANGE;
-
- /* Make sure we actually matched something. */
- if (!reg_info)
- return -EINVAL;
-
- reg = reg_info->reg;
- bit = reg_info->bit;
-
- /* Safe to spinlock during a write. */
- ds1685_rtc_begin_ctrl_access(rtc, &flags);
- tmp = rtc->read(rtc, reg);
- rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
- ds1685_rtc_end_ctrl_access(rtc, flags);
-
- return count;
-}
-
-/**
- * DS1685_RTC_SYSFS_CTRL_REG_RO - device_attribute for read-only register bit.
- * @bit: bit to read.
- */
-#define DS1685_RTC_SYSFS_CTRL_REG_RO(bit) \
- static DEVICE_ATTR(bit, S_IRUGO, \
- ds1685_rtc_sysfs_ctrl_regs_show, NULL)
-
-/**
- * DS1685_RTC_SYSFS_CTRL_REG_RW - device_attribute for read-write register bit.
- * @bit: bit to read or write.
- */
-#define DS1685_RTC_SYSFS_CTRL_REG_RW(bit) \
- static DEVICE_ATTR(bit, S_IRUGO | S_IWUSR, \
- ds1685_rtc_sysfs_ctrl_regs_show, \
- ds1685_rtc_sysfs_ctrl_regs_store)
-
-/*
- * Control Register A bits.
- */
-DS1685_RTC_SYSFS_CTRL_REG_RO(uip);
-DS1685_RTC_SYSFS_CTRL_REG_RW(dv2);
-DS1685_RTC_SYSFS_CTRL_REG_RW(dv1);
-DS1685_RTC_SYSFS_CTRL_REG_RO(dv0);
-DS1685_RTC_SYSFS_CTRL_REG_RW(rs3);
-DS1685_RTC_SYSFS_CTRL_REG_RW(rs2);
-DS1685_RTC_SYSFS_CTRL_REG_RW(rs1);
-DS1685_RTC_SYSFS_CTRL_REG_RW(rs0);
-
-static struct attribute*
-ds1685_rtc_sysfs_ctrla_attrs[] = {
- &dev_attr_uip.attr,
- &dev_attr_dv2.attr,
- &dev_attr_dv1.attr,
- &dev_attr_dv0.attr,
- &dev_attr_rs3.attr,
- &dev_attr_rs2.attr,
- &dev_attr_rs1.attr,
- &dev_attr_rs0.attr,
- NULL,
-};
-
-static const struct attribute_group
-ds1685_rtc_sysfs_ctrla_grp = {
- .name = "ctrla",
- .attrs = ds1685_rtc_sysfs_ctrla_attrs,
-};
-
-
-/*
- * Control Register B bits.
- */
-DS1685_RTC_SYSFS_CTRL_REG_RO(set);
-DS1685_RTC_SYSFS_CTRL_REG_RW(pie);
-DS1685_RTC_SYSFS_CTRL_REG_RW(aie);
-DS1685_RTC_SYSFS_CTRL_REG_RW(uie);
-DS1685_RTC_SYSFS_CTRL_REG_RW(sqwe);
-DS1685_RTC_SYSFS_CTRL_REG_RO(dm);
-DS1685_RTC_SYSFS_CTRL_REG_RO(2412);
-DS1685_RTC_SYSFS_CTRL_REG_RO(dse);
-
-static struct attribute*
-ds1685_rtc_sysfs_ctrlb_attrs[] = {
- &dev_attr_set.attr,
- &dev_attr_pie.attr,
- &dev_attr_aie.attr,
- &dev_attr_uie.attr,
- &dev_attr_sqwe.attr,
- &dev_attr_dm.attr,
- &dev_attr_2412.attr,
- &dev_attr_dse.attr,
- NULL,
-};
-
-static const struct attribute_group
-ds1685_rtc_sysfs_ctrlb_grp = {
- .name = "ctrlb",
- .attrs = ds1685_rtc_sysfs_ctrlb_attrs,
-};
-
-/*
- * Control Register C bits.
- *
- * Reading Control C clears these bits! Reading them individually can
- * possibly cause an interrupt to be missed. Use the /proc interface
- * to see all the bits in this register simultaneously.
- */
-DS1685_RTC_SYSFS_CTRL_REG_RO(irqf);
-DS1685_RTC_SYSFS_CTRL_REG_RO(pf);
-DS1685_RTC_SYSFS_CTRL_REG_RO(af);
-DS1685_RTC_SYSFS_CTRL_REG_RO(uf);
-
-static struct attribute*
-ds1685_rtc_sysfs_ctrlc_attrs[] = {
- &dev_attr_irqf.attr,
- &dev_attr_pf.attr,
- &dev_attr_af.attr,
- &dev_attr_uf.attr,
- NULL,
-};
-
-static const struct attribute_group
-ds1685_rtc_sysfs_ctrlc_grp = {
- .name = "ctrlc",
- .attrs = ds1685_rtc_sysfs_ctrlc_attrs,
-};
-
-/*
- * Control Register D bits.
- */
-DS1685_RTC_SYSFS_CTRL_REG_RO(vrt);
-
-static struct attribute*
-ds1685_rtc_sysfs_ctrld_attrs[] = {
- &dev_attr_vrt.attr,
- NULL,
-};
-
-static const struct attribute_group
-ds1685_rtc_sysfs_ctrld_grp = {
- .name = "ctrld",
- .attrs = ds1685_rtc_sysfs_ctrld_attrs,
-};
-
-/*
- * Control Register 4A bits.
- */
-DS1685_RTC_SYSFS_CTRL_REG_RO(vrt2);
-DS1685_RTC_SYSFS_CTRL_REG_RO(incr);
-DS1685_RTC_SYSFS_CTRL_REG_RW(pab);
-DS1685_RTC_SYSFS_CTRL_REG_RW(rf);
-DS1685_RTC_SYSFS_CTRL_REG_RW(wf);
-DS1685_RTC_SYSFS_CTRL_REG_RW(kf);
-#if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689)
-DS1685_RTC_SYSFS_CTRL_REG_RO(bme);
-#endif
-
-static struct attribute*
-ds1685_rtc_sysfs_ctrl4a_attrs[] = {
- &dev_attr_vrt2.attr,
- &dev_attr_incr.attr,
- &dev_attr_pab.attr,
- &dev_attr_rf.attr,
- &dev_attr_wf.attr,
- &dev_attr_kf.attr,
-#if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689)
- &dev_attr_bme.attr,
-#endif
- NULL,
-};
-
-static const struct attribute_group
-ds1685_rtc_sysfs_ctrl4a_grp = {
- .name = "ctrl4a",
- .attrs = ds1685_rtc_sysfs_ctrl4a_attrs,
-};
-
-/*
- * Control Register 4B bits.
- */
-DS1685_RTC_SYSFS_CTRL_REG_RW(abe);
-DS1685_RTC_SYSFS_CTRL_REG_RW(e32k);
-DS1685_RTC_SYSFS_CTRL_REG_RO(cs);
-DS1685_RTC_SYSFS_CTRL_REG_RW(rce);
-DS1685_RTC_SYSFS_CTRL_REG_RW(prs);
-DS1685_RTC_SYSFS_CTRL_REG_RW(rie);
-DS1685_RTC_SYSFS_CTRL_REG_RW(wie);
-DS1685_RTC_SYSFS_CTRL_REG_RW(kse);
-
-static struct attribute*
-ds1685_rtc_sysfs_ctrl4b_attrs[] = {
- &dev_attr_abe.attr,
- &dev_attr_e32k.attr,
- &dev_attr_cs.attr,
- &dev_attr_rce.attr,
- &dev_attr_prs.attr,
- &dev_attr_rie.attr,
- &dev_attr_wie.attr,
- &dev_attr_kse.attr,
- NULL,
-};
-
-static const struct attribute_group
-ds1685_rtc_sysfs_ctrl4b_grp = {
- .name = "ctrl4b",
- .attrs = ds1685_rtc_sysfs_ctrl4b_attrs,
-};
-
-
-/**
- * struct ds1685_rtc_ctrl_regs.
- * @name: char pointer for the bit name.
- * @reg: control register the bit is in.
- * @bit: the bit's offset in the register.
- */
-struct ds1685_rtc_time_regs {
- const char *name;
- const u8 reg;
- const u8 mask;
- const u8 min;
- const u8 max;
-};
-
-/*
- * Time/Date register lookup tables.
- */
-static const struct ds1685_rtc_time_regs
-ds1685_time_regs_bcd_table[] = {
- { "seconds", RTC_SECS, RTC_SECS_BCD_MASK, 0, 59 },
- { "minutes", RTC_MINS, RTC_MINS_BCD_MASK, 0, 59 },
- { "hours", RTC_HRS, RTC_HRS_24_BCD_MASK, 0, 23 },
- { "wday", RTC_WDAY, RTC_WDAY_MASK, 1, 7 },
- { "mday", RTC_MDAY, RTC_MDAY_BCD_MASK, 1, 31 },
- { "month", RTC_MONTH, RTC_MONTH_BCD_MASK, 1, 12 },
- { "year", RTC_YEAR, RTC_YEAR_BCD_MASK, 0, 99 },
- { "century", RTC_CENTURY, RTC_CENTURY_MASK, 0, 99 },
- { "alarm_seconds", RTC_SECS_ALARM, RTC_SECS_BCD_MASK, 0, 59 },
- { "alarm_minutes", RTC_MINS_ALARM, RTC_MINS_BCD_MASK, 0, 59 },
- { "alarm_hours", RTC_HRS_ALARM, RTC_HRS_24_BCD_MASK, 0, 23 },
- { "alarm_mday", RTC_MDAY_ALARM, RTC_MDAY_ALARM_MASK, 1, 31 },
- { NULL, 0, 0, 0, 0 },
-};
-
-static const struct ds1685_rtc_time_regs
-ds1685_time_regs_bin_table[] = {
- { "seconds", RTC_SECS, RTC_SECS_BIN_MASK, 0x00, 0x3b },
- { "minutes", RTC_MINS, RTC_MINS_BIN_MASK, 0x00, 0x3b },
- { "hours", RTC_HRS, RTC_HRS_24_BIN_MASK, 0x00, 0x17 },
- { "wday", RTC_WDAY, RTC_WDAY_MASK, 0x01, 0x07 },
- { "mday", RTC_MDAY, RTC_MDAY_BIN_MASK, 0x01, 0x1f },
- { "month", RTC_MONTH, RTC_MONTH_BIN_MASK, 0x01, 0x0c },
- { "year", RTC_YEAR, RTC_YEAR_BIN_MASK, 0x00, 0x63 },
- { "century", RTC_CENTURY, RTC_CENTURY_MASK, 0x00, 0x63 },
- { "alarm_seconds", RTC_SECS_ALARM, RTC_SECS_BIN_MASK, 0x00, 0x3b },
- { "alarm_minutes", RTC_MINS_ALARM, RTC_MINS_BIN_MASK, 0x00, 0x3b },
- { "alarm_hours", RTC_HRS_ALARM, RTC_HRS_24_BIN_MASK, 0x00, 0x17 },
- { "alarm_mday", RTC_MDAY_ALARM, RTC_MDAY_ALARM_MASK, 0x01, 0x1f },
- { NULL, 0, 0, 0x00, 0x00 },
-};
-
-/**
- * ds1685_rtc_sysfs_time_regs_bcd_lookup - time/date reg bit lookup function.
- * @name: register bit to look up in ds1685_time_regs_bcd_table.
- */
-static const struct ds1685_rtc_time_regs*
-ds1685_rtc_sysfs_time_regs_lookup(const char *name, bool bcd_mode)
-{
- const struct ds1685_rtc_time_regs *p;
-
- if (bcd_mode)
- p = ds1685_time_regs_bcd_table;
- else
- p = ds1685_time_regs_bin_table;
-
- for (; p->name != NULL; ++p)
- if (strcmp(p->name, name) == 0)
- return p;
-
- return NULL;
-}
-
-/**
- * ds1685_rtc_sysfs_time_regs_show - reads a time/date register via sysfs.
- * @dev: pointer to device structure.
- * @attr: pointer to device_attribute structure.
- * @buf: pointer to char array to hold the output.
- */
-static ssize_t
-ds1685_rtc_sysfs_time_regs_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u8 tmp;
- struct ds1685_priv *rtc = dev_get_drvdata(dev);
- const struct ds1685_rtc_time_regs *bcd_reg_info =
- ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, true);
- const struct ds1685_rtc_time_regs *bin_reg_info =
- ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false);
-
- /* Make sure we actually matched something. */
- if (!bcd_reg_info || !bin_reg_info)
- return -EINVAL;
-
- /* bcd_reg_info->reg == bin_reg_info->reg. */
- ds1685_rtc_begin_data_access(rtc);
- tmp = rtc->read(rtc, bcd_reg_info->reg);
- ds1685_rtc_end_data_access(rtc);
-
- tmp = ds1685_rtc_bcd2bin(rtc, tmp, bcd_reg_info->mask,
- bin_reg_info->mask);
-
- return sprintf(buf, "%d\n", tmp);
-}
-
-/**
- * ds1685_rtc_sysfs_time_regs_store - writes a time/date register via sysfs.
- * @dev: pointer to device structure.
- * @attr: pointer to device_attribute structure.
- * @buf: pointer to char array to hold the output.
- * @count: number of bytes written.
- */
-static ssize_t
-ds1685_rtc_sysfs_time_regs_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- long int val = 0;
- struct ds1685_priv *rtc = dev_get_drvdata(dev);
- const struct ds1685_rtc_time_regs *bcd_reg_info =
- ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, true);
- const struct ds1685_rtc_time_regs *bin_reg_info =
- ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false);
-
- /* We only accept numbers. */
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
-
- /* Make sure we actually matched something. */
- if (!bcd_reg_info || !bin_reg_info)
- return -EINVAL;
-
- /* Check for a valid range. */
- if (rtc->bcd_mode) {
- if ((val < bcd_reg_info->min) || (val > bcd_reg_info->max))
- return -ERANGE;
- } else {
- if ((val < bin_reg_info->min) || (val > bin_reg_info->max))
- return -ERANGE;
- }
-
- val = ds1685_rtc_bin2bcd(rtc, val, bin_reg_info->mask,
- bcd_reg_info->mask);
-
- /* bcd_reg_info->reg == bin_reg_info->reg. */
- ds1685_rtc_begin_data_access(rtc);
- rtc->write(rtc, bcd_reg_info->reg, val);
- ds1685_rtc_end_data_access(rtc);
-
- return count;
-}
-
-/**
- * DS1685_RTC_SYSFS_REG_RW - device_attribute for a read-write time register.
- * @reg: time/date register to read or write.
- */
-#define DS1685_RTC_SYSFS_TIME_REG_RW(reg) \
- static DEVICE_ATTR(reg, S_IRUGO | S_IWUSR, \
- ds1685_rtc_sysfs_time_regs_show, \
- ds1685_rtc_sysfs_time_regs_store)
-
-/*
- * Time/Date Register bits.
- */
-DS1685_RTC_SYSFS_TIME_REG_RW(seconds);
-DS1685_RTC_SYSFS_TIME_REG_RW(minutes);
-DS1685_RTC_SYSFS_TIME_REG_RW(hours);
-DS1685_RTC_SYSFS_TIME_REG_RW(wday);
-DS1685_RTC_SYSFS_TIME_REG_RW(mday);
-DS1685_RTC_SYSFS_TIME_REG_RW(month);
-DS1685_RTC_SYSFS_TIME_REG_RW(year);
-DS1685_RTC_SYSFS_TIME_REG_RW(century);
-DS1685_RTC_SYSFS_TIME_REG_RW(alarm_seconds);
-DS1685_RTC_SYSFS_TIME_REG_RW(alarm_minutes);
-DS1685_RTC_SYSFS_TIME_REG_RW(alarm_hours);
-DS1685_RTC_SYSFS_TIME_REG_RW(alarm_mday);
-
-static struct attribute*
-ds1685_rtc_sysfs_time_attrs[] = {
- &dev_attr_seconds.attr,
- &dev_attr_minutes.attr,
- &dev_attr_hours.attr,
- &dev_attr_wday.attr,
- &dev_attr_mday.attr,
- &dev_attr_month.attr,
- &dev_attr_year.attr,
- &dev_attr_century.attr,
- NULL,
-};
-
-static const struct attribute_group
-ds1685_rtc_sysfs_time_grp = {
- .name = "datetime",
- .attrs = ds1685_rtc_sysfs_time_attrs,
-};
-
-static struct attribute*
-ds1685_rtc_sysfs_alarm_attrs[] = {
- &dev_attr_alarm_seconds.attr,
- &dev_attr_alarm_minutes.attr,
- &dev_attr_alarm_hours.attr,
- &dev_attr_alarm_mday.attr,
- NULL,
-};
-
-static const struct attribute_group
-ds1685_rtc_sysfs_alarm_grp = {
- .name = "alarm",
- .attrs = ds1685_rtc_sysfs_alarm_attrs,
-};
-#endif /* CONFIG_RTC_DS1685_SYSFS_REGS */
-
-
/**
* ds1685_rtc_sysfs_register - register sysfs files.
* @dev: pointer to device structure.
@@ -1752,39 +1206,6 @@ ds1685_rtc_sysfs_register(struct device *dev)
if (ret)
return ret;
-#ifdef CONFIG_RTC_DS1685_SYSFS_REGS
- ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_ctrla_grp);
- if (ret)
- return ret;
-
- ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_ctrlb_grp);
- if (ret)
- return ret;
-
- ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_ctrlc_grp);
- if (ret)
- return ret;
-
- ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_ctrld_grp);
- if (ret)
- return ret;
-
- ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_ctrl4a_grp);
- if (ret)
- return ret;
-
- ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_ctrl4b_grp);
- if (ret)
- return ret;
-
- ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_time_grp);
- if (ret)
- return ret;
-
- ret = sysfs_create_group(&dev->kobj, &ds1685_rtc_sysfs_alarm_grp);
- if (ret)
- return ret;
-#endif
return 0;
}
@@ -1798,17 +1219,6 @@ ds1685_rtc_sysfs_unregister(struct device *dev)
sysfs_remove_bin_file(&dev->kobj, &ds1685_rtc_sysfs_nvram_attr);
sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_misc_grp);
-#ifdef CONFIG_RTC_DS1685_SYSFS_REGS
- sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_ctrla_grp);
- sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_ctrlb_grp);
- sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_ctrlc_grp);
- sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_ctrld_grp);
- sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_ctrl4a_grp);
- sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_ctrl4b_grp);
- sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_time_grp);
- sysfs_remove_group(&dev->kobj, &ds1685_rtc_sysfs_alarm_grp);
-#endif
-
return 0;
}
#endif /* CONFIG_SYSFS */
diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c
index 61f798c6101f..8f1dd88fa827 100644
--- a/drivers/rtc/rtc-ftrtc010.c
+++ b/drivers/rtc/rtc-ftrtc010.c
@@ -26,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/clk.h>
#define DRV_NAME "rtc-ftrtc010"
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 1a2c38cc0178..ea18a8f4bce0 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -14,6 +14,8 @@
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
+#include "rtc-core.h"
+#include <linux/of_irq.h>
/* Register map */
/* rtc section */
@@ -33,13 +35,16 @@
#define ISL1208_REG_SR_ARST (1<<7) /* auto reset */
#define ISL1208_REG_SR_XTOSCB (1<<6) /* crystal oscillator */
#define ISL1208_REG_SR_WRTC (1<<4) /* write rtc */
+#define ISL1208_REG_SR_EVT (1<<3) /* event */
#define ISL1208_REG_SR_ALM (1<<2) /* alarm */
#define ISL1208_REG_SR_BAT (1<<1) /* battery */
#define ISL1208_REG_SR_RTCF (1<<0) /* rtc fail */
#define ISL1208_REG_INT 0x08
#define ISL1208_REG_INT_ALME (1<<6) /* alarm enable */
#define ISL1208_REG_INT_IM (1<<7) /* interrupt/alarm mode */
-#define ISL1208_REG_09 0x09 /* reserved */
+#define ISL1219_REG_EV 0x09
+#define ISL1219_REG_EV_EVEN (1<<4) /* event detection enable */
+#define ISL1219_REG_EV_EVIENB (1<<7) /* event in pull-up disable */
#define ISL1208_REG_ATR 0x0a
#define ISL1208_REG_DTR 0x0b
@@ -57,8 +62,24 @@
#define ISL1208_REG_USR2 0x13
#define ISL1208_USR_SECTION_LEN 2
+/* event section */
+#define ISL1219_REG_SCT 0x14
+#define ISL1219_REG_MNT 0x15
+#define ISL1219_REG_HRT 0x16
+#define ISL1219_REG_DTT 0x17
+#define ISL1219_REG_MOT 0x18
+#define ISL1219_REG_YRT 0x19
+#define ISL1219_EVT_SECTION_LEN 6
+
static struct i2c_driver isl1208_driver;
+/* ISL1208 various variants */
+enum {
+ TYPE_ISL1208 = 0,
+ TYPE_ISL1218,
+ TYPE_ISL1219,
+};
+
/* block read */
static int
isl1208_i2c_read_regs(struct i2c_client *client, u8 reg, u8 buf[],
@@ -80,8 +101,8 @@ isl1208_i2c_read_regs(struct i2c_client *client, u8 reg, u8 buf[],
};
int ret;
- BUG_ON(reg > ISL1208_REG_USR2);
- BUG_ON(reg + len > ISL1208_REG_USR2 + 1);
+ WARN_ON(reg > ISL1219_REG_YRT);
+ WARN_ON(reg + len > ISL1219_REG_YRT + 1);
ret = i2c_transfer(client->adapter, msgs, 2);
if (ret > 0)
@@ -104,8 +125,8 @@ isl1208_i2c_set_regs(struct i2c_client *client, u8 reg, u8 const buf[],
};
int ret;
- BUG_ON(reg > ISL1208_REG_USR2);
- BUG_ON(reg + len > ISL1208_REG_USR2 + 1);
+ WARN_ON(reg > ISL1219_REG_YRT);
+ WARN_ON(reg + len > ISL1219_REG_YRT + 1);
i2c_buf[0] = reg;
memcpy(&i2c_buf[1], &buf[0], len);
@@ -493,6 +514,73 @@ isl1208_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
return isl1208_i2c_set_alarm(to_i2c_client(dev), alarm);
}
+static ssize_t timestamp0_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = dev_get_drvdata(dev);
+ int sr;
+
+ sr = isl1208_i2c_get_sr(client);
+ if (sr < 0) {
+ dev_err(dev, "%s: reading SR failed\n", __func__);
+ return sr;
+ }
+
+ sr &= ~ISL1208_REG_SR_EVT;
+
+ sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
+ if (sr < 0)
+ dev_err(dev, "%s: writing SR failed\n",
+ __func__);
+
+ return count;
+};
+
+static ssize_t timestamp0_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = dev_get_drvdata(dev);
+ u8 regs[ISL1219_EVT_SECTION_LEN] = { 0, };
+ struct rtc_time tm;
+ int sr;
+
+ sr = isl1208_i2c_get_sr(client);
+ if (sr < 0) {
+ dev_err(dev, "%s: reading SR failed\n", __func__);
+ return sr;
+ }
+
+ if (!(sr & ISL1208_REG_SR_EVT))
+ return 0;
+
+ sr = isl1208_i2c_read_regs(client, ISL1219_REG_SCT, regs,
+ ISL1219_EVT_SECTION_LEN);
+ if (sr < 0) {
+ dev_err(dev, "%s: reading event section failed\n",
+ __func__);
+ return 0;
+ }
+
+ /* MSB of each alarm register is an enable bit */
+ tm.tm_sec = bcd2bin(regs[ISL1219_REG_SCT - ISL1219_REG_SCT] & 0x7f);
+ tm.tm_min = bcd2bin(regs[ISL1219_REG_MNT - ISL1219_REG_SCT] & 0x7f);
+ tm.tm_hour = bcd2bin(regs[ISL1219_REG_HRT - ISL1219_REG_SCT] & 0x3f);
+ tm.tm_mday = bcd2bin(regs[ISL1219_REG_DTT - ISL1219_REG_SCT] & 0x3f);
+ tm.tm_mon =
+ bcd2bin(regs[ISL1219_REG_MOT - ISL1219_REG_SCT] & 0x1f) - 1;
+ tm.tm_year = bcd2bin(regs[ISL1219_REG_YRT - ISL1219_REG_SCT]) + 100;
+
+ sr = rtc_valid_tm(&tm);
+ if (sr)
+ return sr;
+
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)rtc_tm_to_time64(&tm));
+};
+
+static DEVICE_ATTR_RW(timestamp0);
+
static irqreturn_t
isl1208_rtc_interrupt(int irq, void *data)
{
@@ -538,6 +626,13 @@ isl1208_rtc_interrupt(int irq, void *data)
return err;
}
+ if (sr & ISL1208_REG_SR_EVT) {
+ sysfs_notify(&rtc->dev.kobj, NULL,
+ dev_attr_timestamp0.attr.name);
+ dev_warn(&client->dev, "event detected");
+ handled = 1;
+ }
+
return handled ? IRQ_HANDLED : IRQ_NONE;
}
@@ -623,11 +718,39 @@ static const struct attribute_group isl1208_rtc_sysfs_files = {
.attrs = isl1208_rtc_attrs,
};
+static struct attribute *isl1219_rtc_attrs[] = {
+ &dev_attr_timestamp0.attr,
+ NULL
+};
+
+static const struct attribute_group isl1219_rtc_sysfs_files = {
+ .attrs = isl1219_rtc_attrs,
+};
+
+static int isl1208_setup_irq(struct i2c_client *client, int irq)
+{
+ int rc = devm_request_threaded_irq(&client->dev, irq, NULL,
+ isl1208_rtc_interrupt,
+ IRQF_SHARED | IRQF_ONESHOT,
+ isl1208_driver.driver.name,
+ client);
+ if (!rc) {
+ device_init_wakeup(&client->dev, 1);
+ enable_irq_wake(irq);
+ } else {
+ dev_err(&client->dev,
+ "Unable to request irq %d, no alarm support\n",
+ irq);
+ }
+ return rc;
+}
+
static int
isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
int rc = 0;
struct rtc_device *rtc;
+ int evdet_irq = -1;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -642,6 +765,7 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
rtc->ops = &isl1208_rtc_ops;
i2c_set_clientdata(client, rtc);
+ dev_set_drvdata(&rtc->dev, client);
rc = isl1208_i2c_get_sr(client);
if (rc < 0) {
@@ -653,26 +777,46 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_warn(&client->dev, "rtc power failure detected, "
"please set clock.\n");
+ if (id->driver_data == TYPE_ISL1219) {
+ struct device_node *np = client->dev.of_node;
+ u32 evienb;
+
+ rc = i2c_smbus_read_byte_data(client, ISL1219_REG_EV);
+ if (rc < 0) {
+ dev_err(&client->dev, "failed to read EV reg\n");
+ return rc;
+ }
+ rc |= ISL1219_REG_EV_EVEN;
+ if (!of_property_read_u32(np, "isil,ev-evienb", &evienb)) {
+ if (evienb)
+ rc |= ISL1219_REG_EV_EVIENB;
+ else
+ rc &= ~ISL1219_REG_EV_EVIENB;
+ }
+ rc = i2c_smbus_write_byte_data(client, ISL1219_REG_EV, rc);
+ if (rc < 0) {
+ dev_err(&client->dev, "could not enable tamper detection\n");
+ return rc;
+ }
+ rc = rtc_add_group(rtc, &isl1219_rtc_sysfs_files);
+ if (rc)
+ return rc;
+ evdet_irq = of_irq_get_byname(np, "evdet");
+ }
+
rc = sysfs_create_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
if (rc)
return rc;
- if (client->irq > 0) {
- rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
- isl1208_rtc_interrupt,
- IRQF_SHARED | IRQF_ONESHOT,
- isl1208_driver.driver.name,
- client);
- if (!rc) {
- device_init_wakeup(&client->dev, 1);
- enable_irq_wake(client->irq);
- } else {
- dev_err(&client->dev,
- "Unable to request irq %d, no alarm support\n",
- client->irq);
- client->irq = 0;
- }
- }
+ if (client->irq > 0)
+ rc = isl1208_setup_irq(client, client->irq);
+ if (rc)
+ return rc;
+
+ if (evdet_irq > 0 && evdet_irq != client->irq)
+ rc = isl1208_setup_irq(client, evdet_irq);
+ if (rc)
+ return rc;
return rtc_register_device(rtc);
}
@@ -686,8 +830,9 @@ isl1208_remove(struct i2c_client *client)
}
static const struct i2c_device_id isl1208_id[] = {
- { "isl1208", 0 },
- { "isl1218", 0 },
+ { "isl1208", TYPE_ISL1208 },
+ { "isl1218", TYPE_ISL1218 },
+ { "isl1219", TYPE_ISL1219 },
{ }
};
MODULE_DEVICE_TABLE(i2c, isl1208_id);
@@ -695,6 +840,7 @@ MODULE_DEVICE_TABLE(i2c, isl1208_id);
static const struct of_device_id isl1208_of_match[] = {
{ .compatible = "isil,isl1208" },
{ .compatible = "isil,isl1218" },
+ { .compatible = "isil,isl1219" },
{ }
};
MODULE_DEVICE_TABLE(of, isl1208_of_match);
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 1053a406b3aa..ac9ca1042889 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -373,7 +373,6 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
struct m48t59_private *m48t59 = NULL;
struct resource *res;
int ret = -ENOMEM;
- char *name;
const struct rtc_class_ops *ops;
struct nvmem_config nvmem_cfg = {
.name = "m48t59-",
@@ -448,17 +447,14 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
}
switch (pdata->type) {
case M48T59RTC_TYPE_M48T59:
- name = "m48t59";
ops = &m48t59_rtc_ops;
pdata->offset = 0x1ff0;
break;
case M48T59RTC_TYPE_M48T02:
- name = "m48t02";
ops = &m48t02_rtc_ops;
pdata->offset = 0x7f0;
break;
case M48T59RTC_TYPE_M48T08:
- name = "m48t08";
ops = &m48t02_rtc_ops;
pdata->offset = 0x1ff0;
break;
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index cefde273fae6..8a60900d6b8b 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -1,16 +1,10 @@
-/*
- * RTC driver for Maxim MAX77686 and MAX77802
- *
- * Copyright (C) 2012 Samsung Electronics Co.Ltd
- *
- * based on rtc-max8997.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// RTC driver for Maxim MAX77686 and MAX77802
+//
+// Copyright (C) 2012 Samsung Electronics Co.Ltd
+//
+// based on rtc-max8997.c
#include <linux/i2c.h>
#include <linux/slab.h>
diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c
index e8cee123e8aa..08c661a332ec 100644
--- a/drivers/rtc/rtc-max8997.c
+++ b/drivers/rtc/rtc-max8997.c
@@ -1,16 +1,10 @@
-/*
- * RTC driver for Maxim MAX8997
- *
- * Copyright (C) 2013 Samsung Electronics Co.Ltd
- *
- * based on rtc-max8998.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// RTC driver for Maxim MAX8997
+//
+// Copyright (C) 2013 Samsung Electronics Co.Ltd
+//
+// based on rtc-max8998.c
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index d8c0f9b3f87d..c873b4509b3c 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -1,16 +1,10 @@
-/*
- * RTC driver for Maxim MAX8998
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Minkyu Kang <mk7.kang@samsung.com>
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// RTC driver for Maxim MAX8998
+//
+// Copyright (C) 2010 Samsung Electronics Co.Ltd
+// Author: Minkyu Kang <mk7.kang@samsung.com>
+// Author: Joonyoung Shim <jy0922.shim@samsung.com>
#include <linux/module.h>
#include <linux/i2c.h>
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index 1f892b238ddb..0fa33708fc49 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 097a4d4e2aba..1925aaf09093 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -367,10 +367,8 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
}
retval = rtc_register_device(mrst_rtc.rtc);
- if (retval) {
- retval = PTR_ERR(mrst_rtc.rtc);
+ if (retval)
goto cleanup0;
- }
dev_dbg(dev, "initialised\n");
return 0;
diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
index c75f26dc8fcc..007879a5042d 100644
--- a/drivers/rtc/rtc-mxc_v2.c
+++ b/drivers/rtc/rtc-mxc_v2.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 39086398833e..323ff55cc165 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -449,6 +449,7 @@ static void omap_rtc_power_off(void)
if (tm2bcd(&tm) < 0) {
dev_err(&rtc->rtc->dev, "power off failed\n");
+ rtc->type->lock(rtc);
return;
}
@@ -582,9 +583,7 @@ static int rtc_pinconf_get(struct pinctrl_dev *pctldev,
u32 val;
u16 arg = 0;
- rtc->type->unlock(rtc);
val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
- rtc->type->lock(rtc);
switch (param) {
case PIN_CONFIG_INPUT_ENABLE:
@@ -614,9 +613,7 @@ static int rtc_pinconf_set(struct pinctrl_dev *pctldev,
u32 param_val;
int i;
- rtc->type->unlock(rtc);
val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
- rtc->type->lock(rtc);
/* active low by default */
val |= OMAP_RTC_PMIC_EXT_WKUP_POL(pin);
@@ -861,13 +858,6 @@ static int omap_rtc_probe(struct platform_device *pdev)
goto err;
}
- if (rtc->is_pmic_controller) {
- if (!pm_power_off) {
- omap_rtc_power_off_rtc = rtc;
- pm_power_off = omap_rtc_power_off;
- }
- }
-
/* Support ext_wakeup pinconf */
rtc_pinctrl_desc.name = dev_name(&pdev->dev);
@@ -880,12 +870,21 @@ static int omap_rtc_probe(struct platform_device *pdev)
ret = rtc_register_device(rtc->rtc);
if (ret)
- goto err;
+ goto err_deregister_pinctrl;
rtc_nvmem_register(rtc->rtc, &omap_rtc_nvmem_config);
+ if (rtc->is_pmic_controller) {
+ if (!pm_power_off) {
+ omap_rtc_power_off_rtc = rtc;
+ pm_power_off = omap_rtc_power_off;
+ }
+ }
+
return 0;
+err_deregister_pinctrl:
+ pinctrl_unregister(rtc->pctldev);
err:
clk_disable_unprepare(rtc->clk);
device_init_wakeup(&pdev->dev, false);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index e83be1852c2f..9f99a0966550 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -36,6 +36,11 @@
#define PCF2127_REG_MO (0x08)
#define PCF2127_REG_YR (0x09)
+/* the pcf2127 has 512 bytes nvmem, pcf2129 doesn't */
+#define PCF2127_REG_RAM_addr_MSB 0x1a
+#define PCF2127_REG_RAM_wrt_cmd 0x1c
+#define PCF2127_REG_RAM_rd_cmd 0x1d
+
#define PCF2127_OSF BIT(7) /* Oscillator Fail flag */
struct pcf2127 {
@@ -183,10 +188,47 @@ static const struct rtc_class_ops pcf2127_rtc_ops = {
.set_time = pcf2127_rtc_set_time,
};
+static int pcf2127_nvmem_read(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct pcf2127 *pcf2127 = priv;
+ int ret;
+ unsigned char offsetbuf[] = { offset >> 8, offset };
+
+ ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_addr_MSB,
+ offsetbuf, 2);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_RAM_rd_cmd,
+ val, bytes);
+
+ return ret ?: bytes;
+}
+
+static int pcf2127_nvmem_write(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct pcf2127 *pcf2127 = priv;
+ int ret;
+ unsigned char offsetbuf[] = { offset >> 8, offset };
+
+ ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_addr_MSB,
+ offsetbuf, 2);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_wrt_cmd,
+ val, bytes);
+
+ return ret ?: bytes;
+}
+
static int pcf2127_probe(struct device *dev, struct regmap *regmap,
- const char *name)
+ const char *name, bool has_nvmem)
{
struct pcf2127 *pcf2127;
+ int ret = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -200,8 +242,21 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
pcf2127->rtc = devm_rtc_device_register(dev, name, &pcf2127_rtc_ops,
THIS_MODULE);
+ if (IS_ERR(pcf2127->rtc))
+ return PTR_ERR(pcf2127->rtc);
+
+ if (has_nvmem) {
+ struct nvmem_config nvmem_cfg = {
+ .priv = pcf2127,
+ .reg_read = pcf2127_nvmem_read,
+ .reg_write = pcf2127_nvmem_write,
+ .size = 512,
+ };
+
+ ret = rtc_nvmem_register(pcf2127->rtc, &nvmem_cfg);
+ }
- return PTR_ERR_OR_ZERO(pcf2127->rtc);
+ return ret;
}
#ifdef CONFIG_OF
@@ -309,11 +364,11 @@ static int pcf2127_i2c_probe(struct i2c_client *client,
}
return pcf2127_probe(&client->dev, regmap,
- pcf2127_i2c_driver.driver.name);
+ pcf2127_i2c_driver.driver.name, id->driver_data);
}
static const struct i2c_device_id pcf2127_i2c_id[] = {
- { "pcf2127", 0 },
+ { "pcf2127", 1 },
{ "pcf2129", 0 },
{ }
};
@@ -372,11 +427,12 @@ static int pcf2127_spi_probe(struct spi_device *spi)
return PTR_ERR(regmap);
}
- return pcf2127_probe(&spi->dev, regmap, pcf2127_spi_driver.driver.name);
+ return pcf2127_probe(&spi->dev, regmap, pcf2127_spi_driver.driver.name,
+ spi_get_device_id(spi)->driver_data);
}
static const struct spi_device_id pcf2127_spi_id[] = {
- { "pcf2127", 0 },
+ { "pcf2127", 1 },
{ "pcf2129", 0 },
{ }
};
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 49bcbb3d4a69..283c2335b01b 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -43,37 +43,38 @@ static struct i2c_driver pcf85063_driver;
static int pcf85063_stop_clock(struct i2c_client *client, u8 *ctrl1)
{
- s32 ret;
+ int rc;
+ u8 reg;
- ret = i2c_smbus_read_byte_data(client, PCF85063_REG_CTRL1);
- if (ret < 0) {
+ rc = i2c_smbus_read_byte_data(client, PCF85063_REG_CTRL1);
+ if (rc < 0) {
dev_err(&client->dev, "Failing to stop the clock\n");
return -EIO;
}
/* stop the clock */
- ret |= PCF85063_REG_CTRL1_STOP;
+ reg = rc | PCF85063_REG_CTRL1_STOP;
- ret = i2c_smbus_write_byte_data(client, PCF85063_REG_CTRL1, ret);
- if (ret < 0) {
+ rc = i2c_smbus_write_byte_data(client, PCF85063_REG_CTRL1, reg);
+ if (rc < 0) {
dev_err(&client->dev, "Failing to stop the clock\n");
return -EIO;
}
- *ctrl1 = ret;
+ *ctrl1 = reg;
return 0;
}
static int pcf85063_start_clock(struct i2c_client *client, u8 ctrl1)
{
- s32 ret;
+ int rc;
/* start the clock */
ctrl1 &= ~PCF85063_REG_CTRL1_STOP;
- ret = i2c_smbus_write_byte_data(client, PCF85063_REG_CTRL1, ctrl1);
- if (ret < 0) {
+ rc = i2c_smbus_write_byte_data(client, PCF85063_REG_CTRL1, ctrl1);
+ if (rc < 0) {
dev_err(&client->dev, "Failing to start the clock\n");
return -EIO;
}
diff --git a/drivers/rtc/rtc-r7301.c b/drivers/rtc/rtc-r7301.c
index 169704b2ce13..1943c8151152 100644
--- a/drivers/rtc/rtc-r7301.c
+++ b/drivers/rtc/rtc-r7301.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/regmap.h>
#include <linux/platform_device.h>
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 8428455432ca..6495f84f7428 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -1,19 +1,9 @@
-/*
- * Copyright (c) 2013-2014 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * Copyright (C) 2013 Google, Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2013-2014 Samsung Electronics Co., Ltd
+// http://www.samsung.com
+//
+// Copyright (C) 2013 Google, Inc
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index ed71d1113627..304d905cb23f 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -224,7 +224,6 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
info->rtc = rtc;
rtc->max_user_freq = RTC_FREQ;
- rtc_irq_set_freq(rtc, NULL, RTC_FREQ);
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_interrupt().
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 4f98543d1ea5..51ba414798a8 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -15,6 +15,7 @@
* for more details.
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
@@ -142,8 +143,6 @@ static int __sh_rtc_alarm(struct sh_rtc *rtc)
static int __sh_rtc_periodic(struct sh_rtc *rtc)
{
- struct rtc_device *rtc_dev = rtc->rtc_dev;
- struct rtc_task *irq_task;
unsigned int tmp, pending;
tmp = readb(rtc->regbase + RCR2);
@@ -160,14 +159,7 @@ static int __sh_rtc_periodic(struct sh_rtc *rtc)
else {
if (rtc->periodic_freq & PF_HP)
rtc->periodic_freq |= PF_COUNT;
- if (rtc->periodic_freq & PF_KOU) {
- spin_lock(&rtc_dev->irq_task_lock);
- irq_task = rtc_dev->irq_task;
- if (irq_task)
- irq_task->func(irq_task->private_data);
- spin_unlock(&rtc_dev->irq_task_lock);
- } else
- rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF);
+ rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF);
}
return pending;
@@ -223,81 +215,6 @@ static irqreturn_t sh_rtc_shared(int irq, void *dev_id)
return IRQ_RETVAL(ret);
}
-static int sh_rtc_irq_set_state(struct device *dev, int enable)
-{
- struct sh_rtc *rtc = dev_get_drvdata(dev);
- unsigned int tmp;
-
- spin_lock_irq(&rtc->lock);
-
- tmp = readb(rtc->regbase + RCR2);
-
- if (enable) {
- rtc->periodic_freq |= PF_KOU;
- tmp &= ~RCR2_PEF; /* Clear PES bit */
- tmp |= (rtc->periodic_freq & ~PF_HP); /* Set PES2-0 */
- } else {
- rtc->periodic_freq &= ~PF_KOU;
- tmp &= ~(RCR2_PESMASK | RCR2_PEF);
- }
-
- writeb(tmp, rtc->regbase + RCR2);
-
- spin_unlock_irq(&rtc->lock);
-
- return 0;
-}
-
-static int sh_rtc_irq_set_freq(struct device *dev, int freq)
-{
- struct sh_rtc *rtc = dev_get_drvdata(dev);
- int tmp, ret = 0;
-
- spin_lock_irq(&rtc->lock);
- tmp = rtc->periodic_freq & PF_MASK;
-
- switch (freq) {
- case 0:
- rtc->periodic_freq = 0x00;
- break;
- case 1:
- rtc->periodic_freq = 0x60;
- break;
- case 2:
- rtc->periodic_freq = 0x50;
- break;
- case 4:
- rtc->periodic_freq = 0x40;
- break;
- case 8:
- rtc->periodic_freq = 0x30 | PF_HP;
- break;
- case 16:
- rtc->periodic_freq = 0x30;
- break;
- case 32:
- rtc->periodic_freq = 0x20 | PF_HP;
- break;
- case 64:
- rtc->periodic_freq = 0x20;
- break;
- case 128:
- rtc->periodic_freq = 0x10 | PF_HP;
- break;
- case 256:
- rtc->periodic_freq = 0x10;
- break;
- default:
- ret = -ENOTSUPP;
- }
-
- if (ret == 0)
- rtc->periodic_freq |= tmp;
-
- spin_unlock_irq(&rtc->lock);
- return ret;
-}
-
static inline void sh_rtc_setaie(struct device *dev, unsigned int enable)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
@@ -674,8 +591,6 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
/* everything disabled by default */
- sh_rtc_irq_set_freq(&pdev->dev, 0);
- sh_rtc_irq_set_state(&pdev->dev, 0);
sh_rtc_setaie(&pdev->dev, 0);
sh_rtc_setcie(&pdev->dev, 0);
@@ -707,8 +622,6 @@ static int __exit sh_rtc_remove(struct platform_device *pdev)
{
struct sh_rtc *rtc = platform_get_drvdata(pdev);
- sh_rtc_irq_set_state(&pdev->dev, 0);
-
sh_rtc_setaie(&pdev->dev, 0);
sh_rtc_setcie(&pdev->dev, 0);
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 8a75cc3af6e7..b2483a749ac4 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -40,49 +40,83 @@ struct snvs_rtc_data {
struct clk *clk;
};
+/* Read 64 bit timer register, which could be in inconsistent state */
+static u64 rtc_read_lpsrt(struct snvs_rtc_data *data)
+{
+ u32 msb, lsb;
+
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &msb);
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &lsb);
+ return (u64)msb << 32 | lsb;
+}
+
+/* Read the secure real time counter, taking care to deal with the cases of the
+ * counter updating while being read.
+ */
static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
{
u64 read1, read2;
- u32 val;
+ unsigned int timeout = 100;
+ /* As expected, the registers might update between the read of the LSB
+ * reg and the MSB reg. It's also possible that one register might be
+ * in partially modified state as well.
+ */
+ read1 = rtc_read_lpsrt(data);
do {
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
- read1 = val;
- read1 <<= 32;
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
- read1 |= val;
-
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
- read2 = val;
- read2 <<= 32;
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
- read2 |= val;
- } while (read1 != read2);
+ read2 = read1;
+ read1 = rtc_read_lpsrt(data);
+ } while (read1 != read2 && --timeout);
+ if (!timeout)
+ dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
/* Convert 47-bit counter to 32-bit raw second count */
return (u32) (read1 >> CNTR_TO_SECS_SH);
}
-static void rtc_write_sync_lp(struct snvs_rtc_data *data)
+/* Just read the lsb from the counter, dealing with inconsistent state */
+static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb)
+{
+ u32 count1, count2;
+ unsigned int timeout = 100;
+
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+ do {
+ count2 = count1;
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+ } while (count1 != count2 && --timeout);
+ if (!timeout) {
+ dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
+ return -ETIMEDOUT;
+ }
+
+ *lsb = count1;
+ return 0;
+}
+
+static int rtc_write_sync_lp(struct snvs_rtc_data *data)
{
- u32 count1, count2, count3;
- int i;
-
- /* Wait for 3 CKIL cycles */
- for (i = 0; i < 3; i++) {
- do {
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
- } while (count1 != count2);
-
- /* Now wait until counter value changes */
- do {
- do {
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count3);
- } while (count2 != count3);
- } while (count3 == count1);
+ u32 count1, count2;
+ u32 elapsed;
+ unsigned int timeout = 1000;
+ int ret;
+
+ ret = rtc_read_lp_counter_lsb(data, &count1);
+ if (ret)
+ return ret;
+
+ /* Wait for 3 CKIL cycles, about 61.0-91.5 µs */
+ do {
+ ret = rtc_read_lp_counter_lsb(data, &count2);
+ if (ret)
+ return ret;
+ elapsed = count2 - count1; /* wrap around _is_ handled! */
+ } while (elapsed < 3 && --timeout);
+ if (!timeout) {
+ dev_err(&data->rtc->dev, "Timeout waiting for LPSRT Counter to change\n");
+ return -ETIMEDOUT;
}
+ return 0;
}
static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable)
@@ -166,9 +200,7 @@ static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
(SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
enable ? (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN) : 0);
- rtc_write_sync_lp(data);
-
- return 0;
+ return rtc_write_sync_lp(data);
}
static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -176,11 +208,14 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct snvs_rtc_data *data = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &alrm->time;
unsigned long time;
+ int ret;
rtc_tm_to_time(alrm_tm, &time);
regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0);
- rtc_write_sync_lp(data);
+ ret = rtc_write_sync_lp(data);
+ if (ret)
+ return ret;
regmap_write(data->regmap, data->offset + SNVS_LPTAR, time);
/* Clear alarm interrupt status bit */
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index d578e40d5a50..b76318fd5bb0 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -288,10 +288,22 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc_data);
- err = stmp_reset_block(rtc_data->io);
- if (err) {
- dev_err(&pdev->dev, "stmp_reset_block failed: %d\n", err);
- return err;
+ /*
+ * Resetting the rtc stops the watchdog timer that is potentially
+ * running. So (assuming it is running on purpose) don't reset if the
+ * watchdog is enabled.
+ */
+ if (readl(rtc_data->io + STMP3XXX_RTC_CTRL) &
+ STMP3XXX_RTC_CTRL_WATCHDOGEN) {
+ dev_info(&pdev->dev,
+ "Watchdog is running, skip resetting rtc\n");
+ } else {
+ err = stmp_reset_block(rtc_data->io);
+ if (err) {
+ dev_err(&pdev->dev, "stmp_reset_block failed: %d\n",
+ err);
+ return err;
+ }
}
/*
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 454da38c6012..f1ff30ade534 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -317,3 +317,46 @@ const struct attribute_group **rtc_get_dev_attribute_groups(void)
{
return rtc_attr_groups;
}
+
+int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps)
+{
+ size_t old_cnt = 0, add_cnt = 0, new_cnt;
+ const struct attribute_group **groups, **old;
+
+ if (rtc->registered)
+ return -EINVAL;
+ if (!grps)
+ return -EINVAL;
+
+ groups = rtc->dev.groups;
+ if (groups)
+ for (; *groups; groups++)
+ old_cnt++;
+
+ for (groups = grps; *groups; groups++)
+ add_cnt++;
+
+ new_cnt = old_cnt + add_cnt + 1;
+ groups = devm_kcalloc(&rtc->dev, new_cnt, sizeof(*groups), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(groups))
+ return PTR_ERR(groups);
+ memcpy(groups, rtc->dev.groups, old_cnt * sizeof(*groups));
+ memcpy(groups + old_cnt, grps, add_cnt * sizeof(*groups));
+ groups[old_cnt + add_cnt] = NULL;
+
+ old = rtc->dev.groups;
+ rtc->dev.groups = groups;
+ if (old && old != rtc_attr_groups)
+ devm_kfree(&rtc->dev, old);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtc_add_groups);
+
+int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp)
+{
+ const struct attribute_group *groups[] = { grp, NULL };
+
+ return rtc_add_groups(rtc, groups);
+}
+EXPORT_SYMBOL(rtc_add_group);
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 66efff60c4d5..8dc48fe7fc35 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -25,6 +25,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 8469256edc2a..ade6a82709be 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -22,7 +22,7 @@ struct rtc_test_data {
bool alarm_en;
};
-struct platform_device *pdev[MAX_RTC_TEST];
+static struct platform_device *pdev[MAX_RTC_TEST];
static int test_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d3a38c421503..a23e7d394a0a 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -41,6 +41,15 @@
#define DASD_DIAG_MOD "dasd_diag_mod"
+static unsigned int queue_depth = 32;
+static unsigned int nr_hw_queues = 4;
+
+module_param(queue_depth, uint, 0444);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
+
+module_param(nr_hw_queues, uint, 0444);
+MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
+
/*
* SECTION: exported variables of dasd.c
*/
@@ -64,8 +73,8 @@ static int dasd_alloc_queue(struct dasd_block *);
static void dasd_setup_queue(struct dasd_block *);
static void dasd_free_queue(struct dasd_block *);
static int dasd_flush_block_queue(struct dasd_block *);
-static void dasd_device_tasklet(struct dasd_device *);
-static void dasd_block_tasklet(struct dasd_block *);
+static void dasd_device_tasklet(unsigned long);
+static void dasd_block_tasklet(unsigned long);
static void do_kick_device(struct work_struct *);
static void do_restore_device(struct work_struct *);
static void do_reload_device(struct work_struct *);
@@ -116,8 +125,7 @@ struct dasd_device *dasd_alloc_device(void)
dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
spin_lock_init(&device->mem_lock);
atomic_set(&device->tasklet_scheduled, 0);
- tasklet_init(&device->tasklet,
- (void (*)(unsigned long)) dasd_device_tasklet,
+ tasklet_init(&device->tasklet, dasd_device_tasklet,
(unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue);
timer_setup(&device->timer, dasd_device_timeout, 0);
@@ -157,8 +165,7 @@ struct dasd_block *dasd_alloc_block(void)
atomic_set(&block->open_count, -1);
atomic_set(&block->tasklet_scheduled, 0);
- tasklet_init(&block->tasklet,
- (void (*)(unsigned long)) dasd_block_tasklet,
+ tasklet_init(&block->tasklet, dasd_block_tasklet,
(unsigned long) block);
INIT_LIST_HEAD(&block->ccw_queue);
spin_lock_init(&block->queue_lock);
@@ -2055,8 +2062,9 @@ EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
/*
* Acquire the device lock and process queues for the device.
*/
-static void dasd_device_tasklet(struct dasd_device *device)
+static void dasd_device_tasklet(unsigned long data)
{
+ struct dasd_device *device = (struct dasd_device *) data;
struct list_head final_queue;
atomic_set (&device->tasklet_scheduled, 0);
@@ -2774,8 +2782,9 @@ static void __dasd_block_start_head(struct dasd_block *block)
* block layer request queue, creates ccw requests, enqueues them on
* a dasd_device and processes ccw requests that have been returned.
*/
-static void dasd_block_tasklet(struct dasd_block *block)
+static void dasd_block_tasklet(unsigned long data)
{
+ struct dasd_block *block = (struct dasd_block *) data;
struct list_head final_queue;
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
@@ -3115,9 +3124,10 @@ static int dasd_alloc_queue(struct dasd_block *block)
block->tag_set.ops = &dasd_mq_ops;
block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
- block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
- block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
+ block->tag_set.nr_hw_queues = nr_hw_queues;
+ block->tag_set.queue_depth = queue_depth;
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ block->tag_set.numa_node = NUMA_NO_NODE;
rc = blk_mq_alloc_tag_set(&block->tag_set);
if (rc)
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index e36a114354fc..b9ce93e9df89 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -708,7 +708,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
struct ccw1 *ccw;
cqr = lcu->rsu_cqr;
- strncpy((char *) &cqr->magic, "ECKD", 4);
+ memcpy((char *) &cqr->magic, "ECKD", 4);
ASCEBC((char *) &cqr->magic, 4);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index b9ebb565ee2c..fab35c6170cc 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -426,7 +426,7 @@ dasd_add_busid(const char *bus_id, int features)
if (!devmap) {
/* This bus_id is new. */
new->devindex = dasd_max_devindex++;
- strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
+ strlcpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
new->features = features;
new->device = NULL;
list_add(&new->list, &dasd_hashlists[hash]);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bbf95b78ef5d..4e7b55a14b1a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1780,6 +1780,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
struct dasd_eckd_private *private = device->private;
int i;
+ if (!private)
+ return;
+
dasd_alias_disconnect_device_from_lcu(device);
private->ned = NULL;
private->sneq = NULL;
@@ -2035,8 +2038,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device)
static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
- cancel_work_sync(&device->reload_device);
- cancel_work_sync(&device->kick_validate);
+ if (cancel_work_sync(&device->reload_device))
+ dasd_put_device(device);
+ if (cancel_work_sync(&device->kick_validate))
+ dasd_put_device(device);
+
return 0;
};
@@ -3535,7 +3541,7 @@ static int prepare_itcw(struct itcw *itcw,
dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
&pfxdata, sizeof(pfxdata), total_data_size);
- return PTR_RET(dcw);
+ return PTR_ERR_OR_ZERO(dcw);
}
static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 6ef8714dc693..93bb09da7fdc 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -313,7 +313,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- strncpy(header.busid, dev_name(&device->cdev->dev),
+ strlcpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
@@ -356,7 +356,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- strncpy(header.busid, dev_name(&device->cdev->dev),
+ strlcpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 976b6bd4fb05..de6b96036aa4 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -228,14 +228,6 @@ struct dasd_ccw_req {
#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */
#define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */
-/*
- * There is no reliable way to determine the number of available CPUs on
- * LPAR but there is no big performance difference between 1 and the
- * maximum CPU number.
- * 64 is a good trade off performance wise.
- */
-#define DASD_NR_HW_QUEUES 64
-#define DASD_MAX_LCU_DEV 256
#define DASD_REQ_PER_DEV 4
/* Signature for error recovery functions. */
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index b1fcb76dd272..98f66b7b6794 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -455,6 +455,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
bdev->tag_set.nr_hw_queues = nr_requests;
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ bdev->tag_set.numa_node = NUMA_NO_NODE;
ret = blk_mq_alloc_tag_set(&bdev->tag_set);
if (ret)
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 0a4c13e1e76e..c6ab34f94b1b 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -12,11 +12,6 @@ GCOV_PROFILE_sclp_early_core.o := n
KCOV_INSTRUMENT_sclp_early_core.o := n
UBSAN_SANITIZE_sclp_early_core.o := n
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
-CFLAGS_sclp_early_core.o += -march=z900
-endif
-
CFLAGS_sclp_early_core.o += -D__NO_FORTIFY
CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 79eb60958015..567aedc03c76 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -39,8 +39,34 @@ static const int kbd_max_vals[] = {
};
static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals);
-static unsigned char ret_diacr[NR_DEAD] = {
- '`', '\'', '^', '~', '"', ','
+static const unsigned char ret_diacr[NR_DEAD] = {
+ '`', /* dead_grave */
+ '\'', /* dead_acute */
+ '^', /* dead_circumflex */
+ '~', /* dead_tilda */
+ '"', /* dead_diaeresis */
+ ',', /* dead_cedilla */
+ '_', /* dead_macron */
+ 'U', /* dead_breve */
+ '.', /* dead_abovedot */
+ '*', /* dead_abovering */
+ '=', /* dead_doubleacute */
+ 'c', /* dead_caron */
+ 'k', /* dead_ogonek */
+ 'i', /* dead_iota */
+ '#', /* dead_voiced_sound */
+ 'o', /* dead_semivoiced_sound */
+ '!', /* dead_belowdot */
+ '?', /* dead_hook */
+ '+', /* dead_horn */
+ '-', /* dead_stroke */
+ ')', /* dead_abovecomma */
+ '(', /* dead_abovereversedcomma */
+ ':', /* dead_doublegrave */
+ 'n', /* dead_invertedbreve */
+ ';', /* dead_belowcomma */
+ '$', /* dead_currency */
+ '@', /* dead_greek */
};
/*
@@ -334,37 +360,41 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
int cmd, int perm)
{
struct kbentry tmp;
+ unsigned long kb_index, kb_table;
ushort *key_map, val, ov;
if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
return -EFAULT;
+ kb_index = (unsigned long) tmp.kb_index;
#if NR_KEYS < 256
- if (tmp.kb_index >= NR_KEYS)
+ if (kb_index >= NR_KEYS)
return -EINVAL;
#endif
+ kb_table = (unsigned long) tmp.kb_table;
#if MAX_NR_KEYMAPS < 256
- if (tmp.kb_table >= MAX_NR_KEYMAPS)
+ if (kb_table >= MAX_NR_KEYMAPS)
return -EINVAL;
+ kb_table = array_index_nospec(kb_table , MAX_NR_KEYMAPS);
#endif
switch (cmd) {
case KDGKBENT:
- key_map = kbd->key_maps[tmp.kb_table];
+ key_map = kbd->key_maps[kb_table];
if (key_map) {
- val = U(key_map[tmp.kb_index]);
+ val = U(key_map[kb_index]);
if (KTYP(val) >= KBD_NR_TYPES)
val = K_HOLE;
} else
- val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP);
+ val = (kb_index ? K_HOLE : K_NOSUCHMAP);
return put_user(val, &user_kbe->kb_value);
case KDSKBENT:
if (!perm)
return -EPERM;
- if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) {
+ if (!kb_index && tmp.kb_value == K_NOSUCHMAP) {
/* disallocate map */
- key_map = kbd->key_maps[tmp.kb_table];
+ key_map = kbd->key_maps[kb_table];
if (key_map) {
- kbd->key_maps[tmp.kb_table] = NULL;
+ kbd->key_maps[kb_table] = NULL;
kfree(key_map);
}
break;
@@ -375,18 +405,18 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
return -EINVAL;
- if (!(key_map = kbd->key_maps[tmp.kb_table])) {
+ if (!(key_map = kbd->key_maps[kb_table])) {
int j;
key_map = kmalloc(sizeof(plain_map),
GFP_KERNEL);
if (!key_map)
return -ENOMEM;
- kbd->key_maps[tmp.kb_table] = key_map;
+ kbd->key_maps[kb_table] = key_map;
for (j = 0; j < NR_KEYS; j++)
key_map[j] = U(K_HOLE);
}
- ov = U(key_map[tmp.kb_index]);
+ ov = U(key_map[kb_index]);
if (tmp.kb_value == ov)
break; /* nothing to do */
/*
@@ -395,7 +425,7 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
- key_map[tmp.kb_index] = U(tmp.kb_value);
+ key_map[kb_index] = U(tmp.kb_value);
break;
}
return 0;
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 76c158c41510..4f1a69c9d81d 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -61,7 +61,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
struct appldata_product_id id;
int rc;
- strncpy(id.prod_nr, "LNXAPPL", 7);
+ memcpy(id.prod_nr, "LNXAPPL", 7);
id.prod_fn = myhdr->applid;
id.record_nr = myhdr->record_num;
id.version_nr = myhdr->version;
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index ee6f3b563728..e69b12a40636 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -64,42 +64,18 @@ static struct notifier_block call_home_panic_nb = {
.priority = INT_MAX,
};
-static int proc_handler_callhome(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *count,
- loff_t *ppos)
-{
- unsigned long val;
- int len, rc;
- char buf[3];
-
- if (!*count || (*ppos && !write)) {
- *count = 0;
- return 0;
- }
- if (!write) {
- len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
- rc = copy_to_user(buffer, buf, sizeof(buf));
- if (rc != 0)
- return -EFAULT;
- } else {
- len = *count;
- rc = kstrtoul_from_user(buffer, len, 0, &val);
- if (rc)
- return rc;
- if (val != 0 && val != 1)
- return -EINVAL;
- callhome_enabled = val;
- }
- *count = len;
- *ppos += len;
- return 0;
-}
+static int zero;
+static int one = 1;
static struct ctl_table callhome_table[] = {
{
.procname = "callhome",
+ .data = &callhome_enabled,
+ .maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_handler_callhome,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
},
{}
};
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 37e65a05517f..cdcde18e7220 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -113,16 +113,16 @@ static int crypt_enabled(struct tape_device *device)
static void ext_to_int_kekl(struct tape390_kekl *in,
struct tape3592_kekl *out)
{
- int i;
+ int len;
memset(out, 0, sizeof(*out));
if (in->type == TAPE390_KEKL_TYPE_HASH)
out->flags |= 0x40;
if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
out->flags |= 0x80;
- strncpy(out->label, in->label, 64);
- for (i = strlen(in->label); i < sizeof(out->label); i++)
- out->label[i] = ' ';
+ len = min(sizeof(out->label), strlen(in->label));
+ memcpy(out->label, in->label, len);
+ memset(out->label + len, ' ', sizeof(out->label) - len);
ASCEBC(out->label, sizeof(out->label));
}
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index a07102472ce9..b58df0dd0039 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -54,10 +54,10 @@ struct tape_class_device *register_tape_dev(
if (!tcd)
return ERR_PTR(-ENOMEM);
- strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
+ strlcpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
*s = '!';
- strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
+ strlcpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
*s = '!';
@@ -77,7 +77,7 @@ struct tape_class_device *register_tape_dev(
tcd->class_device = device_create(tape_class, device,
tcd->char_device->dev, NULL,
"%s", tcd->device_name);
- rc = PTR_RET(tcd->class_device);
+ rc = PTR_ERR_OR_ZERO(tcd->class_device);
if (rc)
goto fail_with_cdev;
rc = sysfs_create_link(
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 948ce82a7725..0fa1b6b1491a 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -68,7 +68,7 @@ static void vmcp_response_alloc(struct vmcp_session *session)
* anymore the system won't work anyway.
*/
if (order > 2)
- page = cma_alloc(vmcp_cma, nr_pages, 0, GFP_KERNEL);
+ page = cma_alloc(vmcp_cma, nr_pages, 0, false);
if (page) {
session->response = (char *)page_to_phys(page);
session->cma_alloc = 1;
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index afbdee74147d..51038ec309c1 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -471,14 +471,17 @@ int chp_new(struct chp_id chpid)
{
struct channel_subsystem *css = css_by_id(chpid.cssid);
struct channel_path *chp;
- int ret;
+ int ret = 0;
+ mutex_lock(&css->mutex);
if (chp_is_registered(chpid))
- return 0;
- chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
- if (!chp)
- return -ENOMEM;
+ goto out;
+ chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
+ if (!chp) {
+ ret = -ENOMEM;
+ goto out;
+ }
/* fill in status, etc. */
chp->chpid = chpid;
chp->state = 1;
@@ -505,21 +508,20 @@ int chp_new(struct chp_id chpid)
put_device(&chp->dev);
goto out;
}
- mutex_lock(&css->mutex);
+
if (css->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
device_unregister(&chp->dev);
- mutex_unlock(&css->mutex);
goto out;
}
}
css->chps[chpid.id] = chp;
- mutex_unlock(&css->mutex);
goto out;
out_free:
kfree(chp);
out:
+ mutex_unlock(&css->mutex);
return ret;
}
@@ -585,8 +587,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1,
switch (crw0->erc) {
case CRW_ERC_IPARM: /* Path has come. */
case CRW_ERC_INIT:
- if (!chp_is_registered(chpid))
- chp_new(chpid);
+ chp_new(chpid);
chsc_chp_online(chpid);
break;
case CRW_ERC_PERRI: /* Path has gone. */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 9029804dcd22..a0baee25134c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -91,7 +91,7 @@ struct chsc_ssd_area {
u16 sch; /* subchannel */
u8 chpid[8]; /* chpids 0-7 */
u16 fla[8]; /* full link addresses 0-7 */
-} __attribute__ ((packed));
+} __packed __aligned(PAGE_SIZE);
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
@@ -319,7 +319,7 @@ struct chsc_sei {
struct chsc_sei_nt2_area nt2_area;
u8 nt_area[PAGE_SIZE - 24];
} u;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
/*
* Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
@@ -841,7 +841,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
u32 : 4;
u32 fmt : 4;
u32 : 16;
- } __attribute__ ((packed)) *secm_area;
+ } *secm_area;
unsigned long flags;
int ret, ccode;
@@ -1014,7 +1014,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
u32 cmg : 8;
u32 zeroes3;
u32 data[NR_MEASUREMENT_CHARS];
- } __attribute__ ((packed)) *scmc_area;
+ } *scmc_area;
chp->shared = -1;
chp->cmg = -1;
@@ -1142,7 +1142,7 @@ int __init chsc_get_cssid(int idx)
u8 cssid;
u32 : 24;
} list[0];
- } __packed *sdcal_area;
+ } *sdcal_area;
int ret;
spin_lock_irq(&chsc_page_lock);
@@ -1192,7 +1192,7 @@ chsc_determine_css_characteristics(void)
u32 reserved4;
u32 general_char[510];
u32 chsc_char[508];
- } __attribute__ ((packed)) *scsc_area;
+ } *scsc_area;
spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
@@ -1236,7 +1236,7 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
unsigned int rsvd3[3];
u64 clock_delta;
unsigned int rsvd4[2];
- } __attribute__ ((packed)) *rr;
+ } *rr;
int rc;
memset(page, 0, PAGE_SIZE);
@@ -1261,7 +1261,7 @@ int chsc_sstpi(void *page, void *result, size_t size)
unsigned int rsvd0[3];
struct chsc_header response;
char data[];
- } __attribute__ ((packed)) *rr;
+ } *rr;
int rc;
memset(page, 0, PAGE_SIZE);
@@ -1284,7 +1284,7 @@ int chsc_siosl(struct subchannel_id schid)
u32 word3;
struct chsc_header response;
u32 word[11];
- } __attribute__ ((packed)) *siosl_area;
+ } *siosl_area;
unsigned long flags;
int ccode;
int rc;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 5c9f0dd33f4e..78aba8d94eec 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -15,12 +15,12 @@
#define NR_MEASUREMENT_CHARS 5
struct cmg_chars {
u32 values[NR_MEASUREMENT_CHARS];
-} __attribute__ ((packed));
+};
#define NR_MEASUREMENT_ENTRIES 8
struct cmg_entry {
u32 values[NR_MEASUREMENT_ENTRIES];
-} __attribute__ ((packed));
+};
struct channel_path_desc_fmt1 {
u8 flags;
@@ -38,7 +38,7 @@ struct channel_path_desc_fmt1 {
u8 s:1;
u8 f:1;
u32 zeros[2];
-} __attribute__ ((packed));
+};
struct channel_path_desc_fmt3 {
struct channel_path_desc_fmt1 fmt1_desc;
@@ -59,7 +59,7 @@ struct css_chsc_char {
u32:7;
u32 pnso:1; /* bit 116 */
u32:11;
-}__attribute__((packed));
+} __packed;
extern struct css_chsc_char css_chsc_characteristics;
@@ -82,7 +82,7 @@ struct chsc_ssqd_area {
struct chsc_header response;
u32:32;
struct qdio_ssqd_desc qdio_ssqd;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
struct chsc_scssc_area {
struct chsc_header request;
@@ -102,7 +102,7 @@ struct chsc_scssc_area {
u32 reserved[1004];
struct chsc_header response;
u32:32;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
struct chsc_scpd {
struct chsc_header request;
@@ -120,7 +120,7 @@ struct chsc_scpd {
struct chsc_header response;
u32:32;
u8 data[0];
-} __packed;
+} __packed __aligned(PAGE_SIZE);
struct chsc_sda_area {
struct chsc_header request;
@@ -199,7 +199,7 @@ struct chsc_scm_info {
u32 reserved2[10];
u64 restok;
struct sale scmal[248];
-} __packed;
+} __packed __aligned(PAGE_SIZE);
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
@@ -243,7 +243,7 @@ struct chsc_pnso_area {
struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
struct qdio_brinfo_entry_l2 l2[0];
} entries;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
int chsc_pnso_brinfo(struct subchannel_id schid,
struct chsc_pnso_area *brinfo_area,
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5130d7c67239..de744ca158fd 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -526,76 +526,6 @@ int cio_disable_subchannel(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
-static int cio_check_devno_blacklisted(struct subchannel *sch)
-{
- if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
- /*
- * This device must not be known to Linux. So we simply
- * say that there is no device and return ENODEV.
- */
- CIO_MSG_EVENT(6, "Blacklisted device detected "
- "at devno %04X, subchannel set %x\n",
- sch->schib.pmcw.dev, sch->schid.ssid);
- return -ENODEV;
- }
- return 0;
-}
-
-/**
- * cio_validate_subchannel - basic validation of subchannel
- * @sch: subchannel structure to be filled out
- * @schid: subchannel id
- *
- * Find out subchannel type and initialize struct subchannel.
- * Return codes:
- * 0 on success
- * -ENXIO for non-defined subchannels
- * -ENODEV for invalid subchannels or blacklisted devices
- * -EIO for subchannels in an invalid subchannel set
- */
-int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
-{
- char dbf_txt[15];
- int ccode;
- int err;
-
- sprintf(dbf_txt, "valsch%x", schid.sch_no);
- CIO_TRACE_EVENT(4, dbf_txt);
-
- /*
- * The first subchannel that is not-operational (ccode==3)
- * indicates that there aren't any more devices available.
- * If stsch gets an exception, it means the current subchannel set
- * is not valid.
- */
- ccode = stsch(schid, &sch->schib);
- if (ccode) {
- err = (ccode == 3) ? -ENXIO : ccode;
- goto out;
- }
- sch->st = sch->schib.pmcw.st;
- sch->schid = schid;
-
- switch (sch->st) {
- case SUBCHANNEL_TYPE_IO:
- case SUBCHANNEL_TYPE_MSG:
- if (!css_sch_is_valid(&sch->schib))
- err = -ENODEV;
- else
- err = cio_check_devno_blacklisted(sch);
- break;
- default:
- err = 0;
- }
- if (err)
- goto out;
-
- CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
- sch->schid.ssid, sch->schid.sch_no, sch->st);
-out:
- return err;
-}
-
/*
* do_cio_interrupt() handles all normal I/O device IRQ's
*/
@@ -719,6 +649,7 @@ struct subchannel *cio_probe_console(void)
{
struct subchannel_id schid;
struct subchannel *sch;
+ struct schib schib;
int sch_no, ret;
sch_no = cio_get_console_sch_no();
@@ -728,7 +659,11 @@ struct subchannel *cio_probe_console(void)
}
init_subchannel_id(&schid);
schid.sch_no = sch_no;
- sch = css_alloc_subchannel(schid);
+ ret = stsch(schid, &schib);
+ if (ret)
+ return ERR_PTR(-ENODEV);
+
+ sch = css_alloc_subchannel(schid, &schib);
if (IS_ERR(sch))
return sch;
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 94cd813bdcfe..9811fd8a0c73 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -119,7 +119,6 @@ DECLARE_PER_CPU(struct irb, cio_irb);
#define to_subchannel(n) container_of(n, struct subchannel, dev)
-extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
extern int cio_enable_subchannel(struct subchannel *, u32);
extern int cio_disable_subchannel (struct subchannel *);
extern int cio_cancel (struct subchannel *);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 9263a0fb3858..aea502922646 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -25,6 +25,7 @@
#include "css.h"
#include "cio.h"
+#include "blacklist.h"
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
@@ -168,18 +169,53 @@ static void css_subchannel_release(struct device *dev)
kfree(sch);
}
-struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
+static int css_validate_subchannel(struct subchannel_id schid,
+ struct schib *schib)
+{
+ int err;
+
+ switch (schib->pmcw.st) {
+ case SUBCHANNEL_TYPE_IO:
+ case SUBCHANNEL_TYPE_MSG:
+ if (!css_sch_is_valid(schib))
+ err = -ENODEV;
+ else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
+ CIO_MSG_EVENT(6, "Blacklisted device detected "
+ "at devno %04X, subchannel set %x\n",
+ schib->pmcw.dev, schid.ssid);
+ err = -ENODEV;
+ } else
+ err = 0;
+ break;
+ default:
+ err = 0;
+ }
+ if (err)
+ goto out;
+
+ CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
+ schid.ssid, schid.sch_no, schib->pmcw.st);
+out:
+ return err;
+}
+
+struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
+ struct schib *schib)
{
struct subchannel *sch;
int ret;
+ ret = css_validate_subchannel(schid, schib);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
if (!sch)
return ERR_PTR(-ENOMEM);
- ret = cio_validate_subchannel(sch, schid);
- if (ret < 0)
- goto err;
+ sch->schid = schid;
+ sch->schib = *schib;
+ sch->st = schib->pmcw.st;
ret = css_sch_create_locks(sch);
if (ret)
@@ -244,8 +280,7 @@ static void ssd_register_chpids(struct chsc_ssd_info *ssd)
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (ssd->path_mask & mask)
- if (!chp_is_registered(ssd->chpid[i]))
- chp_new(ssd->chpid[i]);
+ chp_new(ssd->chpid[i]);
}
}
@@ -382,12 +417,12 @@ int css_register_subchannel(struct subchannel *sch)
return ret;
}
-static int css_probe_device(struct subchannel_id schid)
+static int css_probe_device(struct subchannel_id schid, struct schib *schib)
{
struct subchannel *sch;
int ret;
- sch = css_alloc_subchannel(schid);
+ sch = css_alloc_subchannel(schid, schib);
if (IS_ERR(sch))
return PTR_ERR(sch);
@@ -436,23 +471,23 @@ EXPORT_SYMBOL_GPL(css_sch_is_valid);
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
{
struct schib schib;
+ int ccode;
if (!slow) {
/* Will be done on the slow path. */
return -EAGAIN;
}
- if (stsch(schid, &schib)) {
- /* Subchannel is not provided. */
- return -ENXIO;
- }
- if (!css_sch_is_valid(&schib)) {
- /* Unusable - ignore. */
- return 0;
- }
- CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
- schid.sch_no);
+ /*
+ * The first subchannel that is not-operational (ccode==3)
+ * indicates that there aren't any more devices available.
+ * If stsch gets an exception, it means the current subchannel set
+ * is not valid.
+ */
+ ccode = stsch(schid, &schib);
+ if (ccode)
+ return (ccode == 3) ? -ENXIO : ccode;
- return css_probe_device(schid);
+ return css_probe_device(schid, &schib);
}
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
@@ -1081,6 +1116,11 @@ static int __init channel_subsystem_init(void)
if (ret)
goto out_wq;
+ /* Register subchannels which are already in use. */
+ cio_register_early_subchannels();
+ /* Start initial subchannel evaluation. */
+ css_schedule_eval_all();
+
return ret;
out_wq:
destroy_workqueue(cio_work_q);
@@ -1120,10 +1160,6 @@ int css_complete_work(void)
*/
static int __init channel_subsystem_init_sync(void)
{
- /* Register subchannels which are already in use. */
- cio_register_early_subchannels();
- /* Start initial subchannel evaluation. */
- css_schedule_eval_all();
css_complete_work();
return 0;
}
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 30357cbf350a..8d832900a63d 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -103,7 +103,8 @@ extern void css_driver_unregister(struct css_driver *);
extern void css_sch_device_unregister(struct subchannel *);
extern int css_register_subchannel(struct subchannel *);
-extern struct subchannel *css_alloc_subchannel(struct subchannel_id);
+extern struct subchannel *css_alloc_subchannel(struct subchannel_id,
+ struct schib *schib);
extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
extern int max_ssid;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index f4ca72dd862f..9c7d9da42ba0 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -631,21 +631,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
unsigned long phys_aob = 0;
if (!q->use_cq)
- goto out;
+ return 0;
if (!q->aobs[bufnr]) {
struct qaob *aob = qdio_allocate_aob();
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
- q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
WARN_ON_ONCE(phys_aob & 0xFF);
}
-out:
+ q->sbal_state[bufnr].flags = 0;
return phys_aob;
}
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
index 1f8d1c1e566d..0ebb29b6fd6d 100644
--- a/drivers/s390/cio/trace.h
+++ b/drivers/s390/cio/trace.h
@@ -30,6 +30,17 @@ DECLARE_EVENT_CLASS(s390_class_schib,
__field(u16, schno)
__field(u16, devno)
__field_struct(struct schib, schib)
+ __field(u8, pmcw_ena)
+ __field(u8, pmcw_st)
+ __field(u8, pmcw_dnv)
+ __field(u16, pmcw_dev)
+ __field(u8, pmcw_lpm)
+ __field(u8, pmcw_pnom)
+ __field(u8, pmcw_lpum)
+ __field(u8, pmcw_pim)
+ __field(u8, pmcw_pam)
+ __field(u8, pmcw_pom)
+ __field(u64, pmcw_chpid)
__field(int, cc)
),
TP_fast_assign(
@@ -38,18 +49,29 @@ DECLARE_EVENT_CLASS(s390_class_schib,
__entry->schno = schid.sch_no;
__entry->devno = schib->pmcw.dev;
__entry->schib = *schib;
+ __entry->pmcw_ena = schib->pmcw.ena;
+ __entry->pmcw_st = schib->pmcw.ena;
+ __entry->pmcw_dnv = schib->pmcw.dnv;
+ __entry->pmcw_dev = schib->pmcw.dev;
+ __entry->pmcw_lpm = schib->pmcw.lpm;
+ __entry->pmcw_pnom = schib->pmcw.pnom;
+ __entry->pmcw_lpum = schib->pmcw.lpum;
+ __entry->pmcw_pim = schib->pmcw.pim;
+ __entry->pmcw_pam = schib->pmcw.pam;
+ __entry->pmcw_pom = schib->pmcw.pom;
+ memcpy(&__entry->pmcw_chpid, &schib->pmcw.chpid, 8);
__entry->cc = cc;
),
TP_printk("schid=%x.%x.%04x cc=%d ena=%d st=%d dnv=%d dev=%04x "
"lpm=0x%02x pnom=0x%02x lpum=0x%02x pim=0x%02x pam=0x%02x "
"pom=0x%02x chpids=%016llx",
__entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
- __entry->schib.pmcw.ena, __entry->schib.pmcw.st,
- __entry->schib.pmcw.dnv, __entry->schib.pmcw.dev,
- __entry->schib.pmcw.lpm, __entry->schib.pmcw.pnom,
- __entry->schib.pmcw.lpum, __entry->schib.pmcw.pim,
- __entry->schib.pmcw.pam, __entry->schib.pmcw.pom,
- *((u64 *) __entry->schib.pmcw.chpid)
+ __entry->pmcw_ena, __entry->pmcw_st,
+ __entry->pmcw_dnv, __entry->pmcw_dev,
+ __entry->pmcw_lpm, __entry->pmcw_pnom,
+ __entry->pmcw_lpum, __entry->pmcw_pim,
+ __entry->pmcw_pam, __entry->pmcw_pom,
+ __entry->pmcw_chpid
)
);
@@ -89,6 +111,13 @@ TRACE_EVENT(s390_cio_tsch,
__field(u8, ssid)
__field(u16, schno)
__field_struct(struct irb, irb)
+ __field(u8, scsw_dcc)
+ __field(u8, scsw_pno)
+ __field(u8, scsw_fctl)
+ __field(u8, scsw_actl)
+ __field(u8, scsw_stctl)
+ __field(u8, scsw_dstat)
+ __field(u8, scsw_cstat)
__field(int, cc)
),
TP_fast_assign(
@@ -96,15 +125,22 @@ TRACE_EVENT(s390_cio_tsch,
__entry->ssid = schid.ssid;
__entry->schno = schid.sch_no;
__entry->irb = *irb;
+ __entry->scsw_dcc = scsw_cc(&irb->scsw);
+ __entry->scsw_pno = scsw_pno(&irb->scsw);
+ __entry->scsw_fctl = scsw_fctl(&irb->scsw);
+ __entry->scsw_actl = scsw_actl(&irb->scsw);
+ __entry->scsw_stctl = scsw_stctl(&irb->scsw);
+ __entry->scsw_dstat = scsw_dstat(&irb->scsw);
+ __entry->scsw_cstat = scsw_cstat(&irb->scsw);
__entry->cc = cc;
),
TP_printk("schid=%x.%x.%04x cc=%d dcc=%d pno=%d fctl=0x%x actl=0x%x "
"stctl=0x%x dstat=0x%x cstat=0x%x",
__entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
- scsw_cc(&__entry->irb.scsw), scsw_pno(&__entry->irb.scsw),
- scsw_fctl(&__entry->irb.scsw), scsw_actl(&__entry->irb.scsw),
- scsw_stctl(&__entry->irb.scsw),
- scsw_dstat(&__entry->irb.scsw), scsw_cstat(&__entry->irb.scsw)
+ __entry->scsw_dcc, __entry->scsw_pno,
+ __entry->scsw_fctl, __entry->scsw_actl,
+ __entry->scsw_stctl,
+ __entry->scsw_dstat, __entry->scsw_cstat
)
);
@@ -122,6 +158,9 @@ TRACE_EVENT(s390_cio_tpi,
__field(u8, cssid)
__field(u8, ssid)
__field(u16, schno)
+ __field(u8, adapter_IO)
+ __field(u8, isc)
+ __field(u8, type)
),
TP_fast_assign(
__entry->cc = cc;
@@ -136,11 +175,14 @@ TRACE_EVENT(s390_cio_tpi,
__entry->cssid = __entry->tpi_info.schid.cssid;
__entry->ssid = __entry->tpi_info.schid.ssid;
__entry->schno = __entry->tpi_info.schid.sch_no;
+ __entry->adapter_IO = __entry->tpi_info.adapter_IO;
+ __entry->isc = __entry->tpi_info.isc;
+ __entry->type = __entry->tpi_info.type;
),
TP_printk("schid=%x.%x.%04x cc=%d a=%d isc=%d type=%d",
__entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
- __entry->tpi_info.adapter_IO, __entry->tpi_info.isc,
- __entry->tpi_info.type
+ __entry->adapter_IO, __entry->isc,
+ __entry->type
)
);
@@ -299,16 +341,20 @@ TRACE_EVENT(s390_cio_interrupt,
__field(u8, cssid)
__field(u8, ssid)
__field(u16, schno)
+ __field(u8, isc)
+ __field(u8, type)
),
TP_fast_assign(
__entry->tpi_info = *tpi_info;
- __entry->cssid = __entry->tpi_info.schid.cssid;
- __entry->ssid = __entry->tpi_info.schid.ssid;
- __entry->schno = __entry->tpi_info.schid.sch_no;
+ __entry->cssid = tpi_info->schid.cssid;
+ __entry->ssid = tpi_info->schid.ssid;
+ __entry->schno = tpi_info->schid.sch_no;
+ __entry->isc = tpi_info->isc;
+ __entry->type = tpi_info->type;
),
TP_printk("schid=%x.%x.%04x isc=%d type=%d",
__entry->cssid, __entry->ssid, __entry->schno,
- __entry->tpi_info.isc, __entry->tpi_info.type
+ __entry->isc, __entry->type
)
);
@@ -321,11 +367,13 @@ TRACE_EVENT(s390_cio_adapter_int,
TP_ARGS(tpi_info),
TP_STRUCT__entry(
__field_struct(struct tpi_info, tpi_info)
+ __field(u8, isc)
),
TP_fast_assign(
__entry->tpi_info = *tpi_info;
+ __entry->isc = tpi_info->isc;
),
- TP_printk("isc=%d", __entry->tpi_info.isc)
+ TP_printk("isc=%d", __entry->isc)
);
/**
@@ -339,16 +387,30 @@ TRACE_EVENT(s390_cio_stcrw,
TP_STRUCT__entry(
__field_struct(struct crw, crw)
__field(int, cc)
+ __field(u8, slct)
+ __field(u8, oflw)
+ __field(u8, chn)
+ __field(u8, rsc)
+ __field(u8, anc)
+ __field(u8, erc)
+ __field(u16, rsid)
),
TP_fast_assign(
__entry->crw = *crw;
__entry->cc = cc;
+ __entry->slct = crw->slct;
+ __entry->oflw = crw->oflw;
+ __entry->chn = crw->chn;
+ __entry->rsc = crw->rsc;
+ __entry->anc = crw->anc;
+ __entry->erc = crw->erc;
+ __entry->rsid = crw->rsid;
),
TP_printk("cc=%d slct=%d oflw=%d chn=%d rsc=%d anc=%d erc=0x%x "
"rsid=0x%x",
- __entry->cc, __entry->crw.slct, __entry->crw.oflw,
- __entry->crw.chn, __entry->crw.rsc, __entry->crw.anc,
- __entry->crw.erc, __entry->crw.rsid
+ __entry->cc, __entry->slct, __entry->oflw,
+ __entry->chn, __entry->rsc, __entry->anc,
+ __entry->erc, __entry->rsid
)
);
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
deleted file mode 100644
index 16b59ce5e01d..000000000000
--- a/drivers/s390/crypto/ap_asm.h
+++ /dev/null
@@ -1,236 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright IBM Corp. 2016
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * Adjunct processor bus inline assemblies.
- */
-
-#ifndef _AP_ASM_H_
-#define _AP_ASM_H_
-
-#include <asm/isc.h>
-
-/**
- * ap_intructions_available() - Test if AP instructions are available.
- *
- * Returns 0 if the AP instructions are installed.
- */
-static inline int ap_instructions_available(void)
-{
- register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
- register unsigned long reg1 asm ("1") = -ENODEV;
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(
- " .long 0xb2af0000\n" /* PQAP(TAPQ) */
- "0: la %1,0\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc");
- return reg1;
-}
-
-/**
- * ap_tapq(): Test adjunct processor queue.
- * @qid: The AP queue number
- * @info: Pointer to queue descriptor
- *
- * Returns AP queue status structure.
- */
-static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
-{
- register unsigned long reg0 asm ("0") = qid;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
- : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
- if (info)
- *info = reg2;
- return reg1;
-}
-
-/**
- * ap_pqap_rapq(): Reset adjunct processor queue.
- * @qid: The AP queue number
- *
- * Returns AP queue status structure.
- */
-static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
-{
- register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(
- ".long 0xb2af0000" /* PQAP(RAPQ) */
- : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
- return reg1;
-}
-
-/**
- * ap_aqic(): Control interruption for a specific AP.
- * @qid: The AP queue number
- * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
- * @ind: The notification indicator byte
- *
- * Returns AP queue status.
- */
-static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
- struct ap_qirq_ctrl qirqctrl,
- void *ind)
-{
- register unsigned long reg0 asm ("0") = qid | (3UL << 24);
- register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl;
- register struct ap_queue_status reg1_out asm ("1");
- register void *reg2 asm ("2") = ind;
-
- asm volatile(
- ".long 0xb2af0000" /* PQAP(AQIC) */
- : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
- :
- : "cc");
- return reg1_out;
-}
-
-/**
- * ap_qci(): Get AP configuration data
- *
- * Returns 0 on success, or -EOPNOTSUPP.
- */
-static inline int ap_qci(void *config)
-{
- register unsigned long reg0 asm ("0") = 0x04000000UL;
- register unsigned long reg1 asm ("1") = -EINVAL;
- register void *reg2 asm ("2") = (void *) config;
-
- asm volatile(
- ".long 0xb2af0000\n" /* PQAP(QCI) */
- "0: la %1,0\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (reg0), "+d" (reg1), "+d" (reg2)
- :
- : "cc", "memory");
-
- return reg1;
-}
-
-/*
- * union ap_qact_ap_info - used together with the
- * ap_aqic() function to provide a convenient way
- * to handle the ap info needed by the qact function.
- */
-union ap_qact_ap_info {
- unsigned long val;
- struct {
- unsigned int : 3;
- unsigned int mode : 3;
- unsigned int : 26;
- unsigned int cat : 8;
- unsigned int : 8;
- unsigned char ver[2];
- };
-};
-
-/**
- * ap_qact(): Query AP combatibility type.
- * @qid: The AP queue number
- * @apinfo: On input the info about the AP queue. On output the
- * alternate AP queue info provided by the qact function
- * in GR2 is stored in.
- *
- * Returns AP queue status. Check response_code field for failures.
- */
-static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
- union ap_qact_ap_info *apinfo)
-{
- register unsigned long reg0 asm ("0") = qid | (5UL << 24)
- | ((ifbit & 0x01) << 22);
- register unsigned long reg1_in asm ("1") = apinfo->val;
- register struct ap_queue_status reg1_out asm ("1");
- register unsigned long reg2 asm ("2") = 0;
-
- asm volatile(
- ".long 0xb2af0000" /* PQAP(QACT) */
- : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
- : : "cc");
- apinfo->val = reg2;
- return reg1_out;
-}
-
-/**
- * ap_nqap(): Send message to adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: The program supplied message identifier
- * @msg: The message text
- * @length: The message length
- *
- * Returns AP queue status structure.
- * Condition code 1 on NQAP can't happen because the L bit is 1.
- * Condition code 2 on NQAP also means the send is incomplete,
- * because a segment boundary was reached. The NQAP is repeated.
- */
-static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
- unsigned long long psmid,
- void *msg, size_t length)
-{
- register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = (unsigned long) msg;
- register unsigned long reg3 asm ("3") = (unsigned long) length;
- register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
- register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
-
- asm volatile (
- "0: .long 0xb2ad0042\n" /* NQAP */
- " brc 2,0b"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
- : "d" (reg4), "d" (reg5)
- : "cc", "memory");
- return reg1;
-}
-
-/**
- * ap_dqap(): Receive message from adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: Pointer to program supplied message identifier
- * @msg: The message text
- * @length: The message length
- *
- * Returns AP queue status structure.
- * Condition code 1 on DQAP means the receive has taken place
- * but only partially. The response is incomplete, hence the
- * DQAP is repeated.
- * Condition code 2 on DQAP also means the receive is incomplete,
- * this time because a segment boundary was reached. Again, the
- * DQAP is repeated.
- * Note that gpr2 is used by the DQAP instruction to keep track of
- * any 'residual' length, in case the instruction gets interrupted.
- * Hence it gets zeroed before the instruction.
- */
-static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
- unsigned long long *psmid,
- void *msg, size_t length)
-{
- register unsigned long reg0 asm("0") = qid | 0x80000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm("2") = 0UL;
- register unsigned long reg4 asm("4") = (unsigned long) msg;
- register unsigned long reg5 asm("5") = (unsigned long) length;
- register unsigned long reg6 asm("6") = 0UL;
- register unsigned long reg7 asm("7") = 0UL;
-
-
- asm volatile(
- "0: .long 0xb2ae0064\n" /* DQAP */
- " brc 6,0b\n"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2),
- "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7)
- : : "cc", "memory");
- *psmid = (((unsigned long long) reg6) << 32) + reg7;
- return reg1;
-}
-
-#endif /* _AP_ASM_H_ */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 35a0c2b52f82..ec891bc7d10a 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -34,9 +34,9 @@
#include <linux/crypto.h>
#include <linux/mod_devicetable.h>
#include <linux/debugfs.h>
+#include <linux/ctype.h>
#include "ap_bus.h"
-#include "ap_asm.h"
#include "ap_debug.h"
/*
@@ -44,19 +44,34 @@
*/
int ap_domain_index = -1; /* Adjunct Processor Domain Index */
static DEFINE_SPINLOCK(ap_domain_lock);
-module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP);
+module_param_named(domain, ap_domain_index, int, 0440);
MODULE_PARM_DESC(domain, "domain index for ap devices");
EXPORT_SYMBOL(ap_domain_index);
-static int ap_thread_flag = 0;
-module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP);
+static int ap_thread_flag;
+module_param_named(poll_thread, ap_thread_flag, int, 0440);
MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
+static char *apm_str;
+module_param_named(apmask, apm_str, charp, 0440);
+MODULE_PARM_DESC(apmask, "AP bus adapter mask.");
+
+static char *aqm_str;
+module_param_named(aqmask, aqm_str, charp, 0440);
+MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
+
static struct device *ap_root_device;
DEFINE_SPINLOCK(ap_list_lock);
LIST_HEAD(ap_card_list);
+/* Default permissions (card and domain masking) */
+static struct ap_perms {
+ DECLARE_BITMAP(apm, AP_DEVICES);
+ DECLARE_BITMAP(aqm, AP_DOMAINS);
+} ap_perms;
+static DEFINE_MUTEX(ap_perms_mutex);
+
static struct ap_config_info *ap_configuration;
static bool initialised;
@@ -79,22 +94,26 @@ static DECLARE_WORK(ap_scan_work, ap_scan_bus);
static void ap_tasklet_fn(unsigned long);
static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
-static struct task_struct *ap_poll_kthread = NULL;
+static struct task_struct *ap_poll_kthread;
static DEFINE_MUTEX(ap_poll_thread_mutex);
static DEFINE_SPINLOCK(ap_poll_timer_lock);
static struct hrtimer ap_poll_timer;
-/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
- * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
+/*
+ * In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
+ * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.
+ */
static unsigned long long poll_timeout = 250000;
/* Suspend flag */
static int ap_suspend_flag;
/* Maximum domain id */
static int ap_max_domain_id;
-/* Flag to check if domain was set through module parameter domain=. This is
+/*
+ * Flag to check if domain was set through module parameter domain=. This is
* important when supsend and resume is done in a z/VM environment where the
- * domain might change. */
-static int user_set_domain = 0;
+ * domain might change.
+ */
+static int user_set_domain;
static struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
@@ -174,24 +193,6 @@ static inline int ap_qact_available(void)
return 0;
}
-/**
- * ap_test_queue(): Test adjunct processor queue.
- * @qid: The AP queue number
- * @tbit: Test facilities bit
- * @info: Pointer to queue descriptor
- *
- * Returns AP queue status structure.
- */
-struct ap_queue_status ap_test_queue(ap_qid_t qid,
- int tbit,
- unsigned long *info)
-{
- if (tbit)
- qid |= 1UL << 23; /* set T bit*/
- return ap_tapq(qid, info);
-}
-EXPORT_SYMBOL(ap_test_queue);
-
/*
* ap_query_configuration(): Fetch cryptographic config info
*
@@ -200,7 +201,7 @@ EXPORT_SYMBOL(ap_test_queue);
* is returned, e.g. if the PQAP(QCI) instruction is not
* available, the return value will be -EOPNOTSUPP.
*/
-int ap_query_configuration(struct ap_config_info *info)
+static inline int ap_query_configuration(struct ap_config_info *info)
{
if (!ap_configuration_available())
return -EOPNOTSUPP;
@@ -493,7 +494,7 @@ static int ap_poll_thread_start(void)
return 0;
mutex_lock(&ap_poll_thread_mutex);
ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
- rc = PTR_RET(ap_poll_kthread);
+ rc = PTR_ERR_OR_ZERO(ap_poll_kthread);
if (rc)
ap_poll_kthread = NULL;
mutex_unlock(&ap_poll_thread_mutex);
@@ -550,7 +551,7 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
* It sets up a single environment variable DEV_TYPE which contains the
* hardware device type.
*/
-static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
+static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct ap_device *ap_dev = to_ap_dev(dev);
int retval = 0;
@@ -589,7 +590,7 @@ static int ap_dev_resume(struct device *dev)
static void ap_bus_suspend(void)
{
- AP_DBF(DBF_DEBUG, "ap_bus_suspend running\n");
+ AP_DBF(DBF_DEBUG, "%s running\n", __func__);
ap_suspend_flag = 1;
/*
@@ -626,7 +627,7 @@ static void ap_bus_resume(void)
{
int rc;
- AP_DBF(DBF_DEBUG, "ap_bus_resume running\n");
+ AP_DBF(DBF_DEBUG, "%s running\n", __func__);
/* remove all queue devices */
bus_for_each_dev(&ap_bus_type, NULL, NULL,
@@ -685,11 +686,97 @@ static struct bus_type ap_bus_type = {
.pm = &ap_bus_pm_ops,
};
+static int __ap_revise_reserved(struct device *dev, void *dummy)
+{
+ int rc, card, queue, devres, drvres;
+
+ if (is_queue_dev(dev)) {
+ card = AP_QID_CARD(to_ap_queue(dev)->qid);
+ queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
+ mutex_lock(&ap_perms_mutex);
+ devres = test_bit_inv(card, ap_perms.apm)
+ && test_bit_inv(queue, ap_perms.aqm);
+ mutex_unlock(&ap_perms_mutex);
+ drvres = to_ap_drv(dev->driver)->flags
+ & AP_DRIVER_FLAG_DEFAULT;
+ if (!!devres != !!drvres) {
+ AP_DBF(DBF_DEBUG, "reprobing queue=%02x.%04x\n",
+ card, queue);
+ rc = device_reprobe(dev);
+ }
+ }
+
+ return 0;
+}
+
+static void ap_bus_revise_bindings(void)
+{
+ bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
+}
+
+int ap_owned_by_def_drv(int card, int queue)
+{
+ int rc = 0;
+
+ if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
+ return -EINVAL;
+
+ mutex_lock(&ap_perms_mutex);
+
+ if (test_bit_inv(card, ap_perms.apm)
+ && test_bit_inv(queue, ap_perms.aqm))
+ rc = 1;
+
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(ap_owned_by_def_drv);
+
+int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
+ unsigned long *aqm)
+{
+ int card, queue, rc = 0;
+
+ mutex_lock(&ap_perms_mutex);
+
+ for (card = 0; !rc && card < AP_DEVICES; card++)
+ if (test_bit_inv(card, apm) &&
+ test_bit_inv(card, ap_perms.apm))
+ for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
+ if (test_bit_inv(queue, aqm) &&
+ test_bit_inv(queue, ap_perms.aqm))
+ rc = 1;
+
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);
+
static int ap_device_probe(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = to_ap_drv(dev->driver);
- int rc;
+ int card, queue, devres, drvres, rc;
+
+ if (is_queue_dev(dev)) {
+ /*
+ * If the apqn is marked as reserved/used by ap bus and
+ * default drivers, only probe with drivers with the default
+ * flag set. If it is not marked, only probe with drivers
+ * with the default flag not set.
+ */
+ card = AP_QID_CARD(to_ap_queue(dev)->qid);
+ queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
+ mutex_lock(&ap_perms_mutex);
+ devres = test_bit_inv(card, ap_perms.apm)
+ && test_bit_inv(queue, ap_perms.aqm);
+ mutex_unlock(&ap_perms_mutex);
+ drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
+ if (!!devres != !!drvres)
+ return -ENODEV;
+ }
/* Add queue/card to list of active queues/cards */
spin_lock_bh(&ap_list_lock);
@@ -770,8 +857,163 @@ void ap_bus_force_rescan(void)
EXPORT_SYMBOL(ap_bus_force_rescan);
/*
+ * hex2bitmap() - parse hex mask string and set bitmap.
+ * Valid strings are "0x012345678" with at least one valid hex number.
+ * Rest of the bitmap to the right is padded with 0. No spaces allowed
+ * within the string, the leading 0x may be omitted.
+ * Returns the bitmask with exactly the bits set as given by the hex
+ * string (both in big endian order).
+ */
+static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
+{
+ int i, n, b;
+
+ /* bits needs to be a multiple of 8 */
+ if (bits & 0x07)
+ return -EINVAL;
+
+ memset(bitmap, 0, bits / 8);
+
+ if (str[0] == '0' && str[1] == 'x')
+ str++;
+ if (*str == 'x')
+ str++;
+
+ for (i = 0; isxdigit(*str) && i < bits; str++) {
+ b = hex_to_bin(*str);
+ for (n = 0; n < 4; n++)
+ if (b & (0x08 >> n))
+ set_bit_inv(i + n, bitmap);
+ i += 4;
+ }
+
+ if (*str == '\n')
+ str++;
+ if (*str)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * str2clrsetmasks() - parse bitmask argument and set the clear and
+ * the set bitmap mask. A concatenation (done with ',') of these terms
+ * is recognized:
+ * +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
+ * <bitnr> may be any valid number (hex, decimal or octal) in the range
+ * 0...bits-1; the leading + or - is required. Here are some examples:
+ * +0-15,+32,-128,-0xFF
+ * -0-255,+1-16,+0x128
+ * +1,+2,+3,+4,-5,-7-10
+ * Returns a clear and a set bitmask. Every positive value in the string
+ * results in a bit set in the set mask and every negative value in the
+ * string results in a bit SET in the clear mask. As a bit may be touched
+ * more than once, the last 'operation' wins: +0-255,-128 = all but bit
+ * 128 set in the set mask, only bit 128 set in the clear mask.
+ */
+static int str2clrsetmasks(const char *str,
+ unsigned long *clrmap,
+ unsigned long *setmap,
+ int bits)
+{
+ int a, i, z;
+ char *np, sign;
+
+ /* bits needs to be a multiple of 8 */
+ if (bits & 0x07)
+ return -EINVAL;
+
+ memset(clrmap, 0, bits / 8);
+ memset(setmap, 0, bits / 8);
+
+ while (*str) {
+ sign = *str++;
+ if (sign != '+' && sign != '-')
+ return -EINVAL;
+ a = z = simple_strtoul(str, &np, 0);
+ if (str == np || a >= bits)
+ return -EINVAL;
+ str = np;
+ if (*str == '-') {
+ z = simple_strtoul(++str, &np, 0);
+ if (str == np || a > z || z >= bits)
+ return -EINVAL;
+ str = np;
+ }
+ for (i = a; i <= z; i++)
+ if (sign == '+') {
+ set_bit_inv(i, setmap);
+ clear_bit_inv(i, clrmap);
+ } else {
+ clear_bit_inv(i, setmap);
+ set_bit_inv(i, clrmap);
+ }
+ while (*str == ',' || *str == '\n')
+ str++;
+ }
+
+ return 0;
+}
+
+/*
+ * process_mask_arg() - parse a bitmap string and clear/set the
+ * bits in the bitmap accordingly. The string may be given as
+ * absolute value, a hex string like 0x1F2E3D4C5B6A" simple over-
+ * writing the current content of the bitmap. Or as relative string
+ * like "+1-16,-32,-0x40,+128" where only single bits or ranges of
+ * bits are cleared or set. Distinction is done based on the very
+ * first character which may be '+' or '-' for the relative string
+ * and othewise assume to be an absolute value string. If parsing fails
+ * a negative errno value is returned. All arguments and bitmaps are
+ * big endian order.
+ */
+static int process_mask_arg(const char *str,
+ unsigned long *bitmap, int bits,
+ struct mutex *lock)
+{
+ int i;
+
+ /* bits needs to be a multiple of 8 */
+ if (bits & 0x07)
+ return -EINVAL;
+
+ if (*str == '+' || *str == '-') {
+ DECLARE_BITMAP(clrm, bits);
+ DECLARE_BITMAP(setm, bits);
+
+ i = str2clrsetmasks(str, clrm, setm, bits);
+ if (i)
+ return i;
+ if (mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ for (i = 0; i < bits; i++) {
+ if (test_bit_inv(i, clrm))
+ clear_bit_inv(i, bitmap);
+ if (test_bit_inv(i, setm))
+ set_bit_inv(i, bitmap);
+ }
+ } else {
+ DECLARE_BITMAP(setm, bits);
+
+ i = hex2bitmap(str, setm, bits);
+ if (i)
+ return i;
+ if (mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ for (i = 0; i < bits; i++)
+ if (test_bit_inv(i, setm))
+ set_bit_inv(i, bitmap);
+ else
+ clear_bit_inv(i, bitmap);
+ }
+ mutex_unlock(lock);
+
+ return 0;
+}
+
+/*
* AP bus attributes.
*/
+
static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
@@ -783,7 +1025,8 @@ static ssize_t ap_domain_store(struct bus_type *bus,
int domain;
if (sscanf(buf, "%i\n", &domain) != 1 ||
- domain < 0 || domain > ap_max_domain_id)
+ domain < 0 || domain > ap_max_domain_id ||
+ !test_bit_inv(domain, ap_perms.aqm))
return -EINVAL;
spin_lock_bh(&ap_domain_lock);
ap_domain_index = domain;
@@ -794,7 +1037,7 @@ static ssize_t ap_domain_store(struct bus_type *bus,
return count;
}
-static BUS_ATTR(ap_domain, 0644, ap_domain_show, ap_domain_store);
+static BUS_ATTR_RW(ap_domain);
static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
{
@@ -809,8 +1052,7 @@ static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
ap_configuration->adm[6], ap_configuration->adm[7]);
}
-static BUS_ATTR(ap_control_domain_mask, 0444,
- ap_control_domain_mask_show, NULL);
+static BUS_ATTR_RO(ap_control_domain_mask);
static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
{
@@ -825,13 +1067,7 @@ static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
ap_configuration->aqm[6], ap_configuration->aqm[7]);
}
-static BUS_ATTR(ap_usage_domain_mask, 0444,
- ap_usage_domain_mask_show, NULL);
-
-static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
-}
+static BUS_ATTR_RO(ap_usage_domain_mask);
static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
{
@@ -839,10 +1075,15 @@ static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
ap_using_interrupts() ? 1 : 0);
}
-static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
+static BUS_ATTR_RO(ap_interrupts);
-static ssize_t ap_config_time_store(struct bus_type *bus,
- const char *buf, size_t count)
+static ssize_t config_time_show(struct bus_type *bus, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
+}
+
+static ssize_t config_time_store(struct bus_type *bus,
+ const char *buf, size_t count)
{
int time;
@@ -853,15 +1094,15 @@ static ssize_t ap_config_time_store(struct bus_type *bus,
return count;
}
-static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
+static BUS_ATTR_RW(config_time);
-static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
+static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
}
-static ssize_t ap_poll_thread_store(struct bus_type *bus,
- const char *buf, size_t count)
+static ssize_t poll_thread_store(struct bus_type *bus,
+ const char *buf, size_t count)
{
int flag, rc;
@@ -876,7 +1117,7 @@ static ssize_t ap_poll_thread_store(struct bus_type *bus,
return count;
}
-static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
+static BUS_ATTR_RW(poll_thread);
static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
{
@@ -905,7 +1146,7 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
return count;
}
-static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
+static BUS_ATTR_RW(poll_timeout);
static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
{
@@ -918,7 +1159,69 @@ static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
}
-static BUS_ATTR(ap_max_domain_id, 0444, ap_max_domain_id_show, NULL);
+static BUS_ATTR_RO(ap_max_domain_id);
+
+static ssize_t apmask_show(struct bus_type *bus, char *buf)
+{
+ int rc;
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+ rc = snprintf(buf, PAGE_SIZE,
+ "0x%016lx%016lx%016lx%016lx\n",
+ ap_perms.apm[0], ap_perms.apm[1],
+ ap_perms.apm[2], ap_perms.apm[3]);
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+
+static ssize_t apmask_store(struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ int rc;
+
+ rc = process_mask_arg(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex);
+ if (rc)
+ return rc;
+
+ ap_bus_revise_bindings();
+
+ return count;
+}
+
+static BUS_ATTR_RW(apmask);
+
+static ssize_t aqmask_show(struct bus_type *bus, char *buf)
+{
+ int rc;
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+ rc = snprintf(buf, PAGE_SIZE,
+ "0x%016lx%016lx%016lx%016lx\n",
+ ap_perms.aqm[0], ap_perms.aqm[1],
+ ap_perms.aqm[2], ap_perms.aqm[3]);
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+
+static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ int rc;
+
+ rc = process_mask_arg(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex);
+ if (rc)
+ return rc;
+
+ ap_bus_revise_bindings();
+
+ return count;
+}
+
+static BUS_ATTR_RW(aqmask);
static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_domain,
@@ -929,6 +1232,8 @@ static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_interrupts,
&bus_attr_poll_timeout,
&bus_attr_ap_max_domain_id,
+ &bus_attr_apmask,
+ &bus_attr_aqmask,
NULL,
};
@@ -957,7 +1262,8 @@ static int ap_select_domain(void)
best_domain = -1;
max_count = 0;
for (i = 0; i < AP_DOMAINS; i++) {
- if (!ap_test_config_domain(i))
+ if (!ap_test_config_domain(i) ||
+ !test_bit_inv(i, ap_perms.aqm))
continue;
count = 0;
for (j = 0; j < AP_DEVICES; j++) {
@@ -975,7 +1281,7 @@ static int ap_select_domain(void)
best_domain = i;
}
}
- if (best_domain >= 0){
+ if (best_domain >= 0) {
ap_domain_index = best_domain;
AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index);
spin_unlock_bh(&ap_domain_lock);
@@ -1057,7 +1363,7 @@ static void ap_scan_bus(struct work_struct *unused)
unsigned int func = 0;
int rc, id, dom, borked, domains, defdomdevs = 0;
- AP_DBF(DBF_DEBUG, "ap_scan_bus running\n");
+ AP_DBF(DBF_DEBUG, "%s running\n", __func__);
ap_query_configuration(ap_configuration);
if (ap_select_domain() != 0)
@@ -1182,7 +1488,8 @@ static void ap_scan_bus(struct work_struct *unused)
} /* end device loop */
if (defdomdevs < 1)
- AP_DBF(DBF_INFO, "no queue device with default domain %d available\n",
+ AP_DBF(DBF_INFO,
+ "no queue device with default domain %d available\n",
ap_domain_index);
out:
@@ -1206,6 +1513,27 @@ static int __init ap_debug_init(void)
return 0;
}
+static void __init ap_perms_init(void)
+{
+ /* all resources useable if no kernel parameter string given */
+ memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
+ memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
+
+ /* apm kernel parameter string */
+ if (apm_str) {
+ memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
+ process_mask_arg(apm_str, ap_perms.apm, AP_DEVICES,
+ &ap_perms_mutex);
+ }
+
+ /* aqm kernel parameter string */
+ if (aqm_str) {
+ memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
+ process_mask_arg(aqm_str, ap_perms.aqm, AP_DOMAINS,
+ &ap_perms_mutex);
+ }
+}
+
/**
* ap_module_init(): The module initialization code.
*
@@ -1220,11 +1548,14 @@ static int __init ap_module_init(void)
if (rc)
return rc;
- if (ap_instructions_available() != 0) {
+ if (!ap_instructions_available()) {
pr_warn("The hardware system does not support AP instructions\n");
return -ENODEV;
}
+ /* set up the AP permissions (ap and aq masks) */
+ ap_perms_init();
+
/* Get AP configuration data if available */
ap_init_configuration();
@@ -1233,7 +1564,9 @@ static int __init ap_module_init(void)
ap_max_domain_id ? ap_max_domain_id : AP_DOMAINS - 1;
else
max_domain_id = 15;
- if (ap_domain_index < -1 || ap_domain_index > max_domain_id) {
+ if (ap_domain_index < -1 || ap_domain_index > max_domain_id ||
+ (ap_domain_index >= 0 &&
+ !test_bit_inv(ap_domain_index, ap_perms.aqm))) {
pr_warn("%d is not a valid cryptographic domain\n",
ap_domain_index);
ap_domain_index = -1;
@@ -1261,7 +1594,7 @@ static int __init ap_module_init(void)
/* Create /sys/devices/ap. */
ap_root_device = root_device_register("ap");
- rc = PTR_RET(ap_root_device);
+ rc = PTR_ERR_OR_ZERO(ap_root_device);
if (rc)
goto out_bus;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 6a273c5ebca5..5246cd8c16a6 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright IBM Corp. 2006, 2012
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/types.h>
+#include <asm/isc.h>
#include <asm/ap.h>
#define AP_DEVICES 256 /* Number of AP devices. */
@@ -116,9 +117,18 @@ enum ap_wait {
struct ap_device;
struct ap_message;
+/*
+ * The ap driver struct includes a flags field which holds some info for
+ * the ap bus about the driver. Currently only one flag is supported and
+ * used: The DEFAULT flag marks an ap driver as a default driver which is
+ * used together with the apmask and aqmask whitelisting of the ap bus.
+ */
+#define AP_DRIVER_FLAG_DEFAULT 0x0001
+
struct ap_driver {
struct device_driver driver;
struct ap_device_id *ids;
+ unsigned int flags;
int (*probe)(struct ap_device *);
void (*remove)(struct ap_device *);
@@ -166,7 +176,7 @@ struct ap_queue {
int pendingq_count; /* # requests on pendingq list. */
int requestq_count; /* # requests on requestq list. */
int total_request_count; /* # requests ever for this AP device.*/
- int request_timeout; /* Request timout in jiffies. */
+ int request_timeout; /* Request timeout in jiffies. */
struct timer_list timeout; /* Timer for request timeouts. */
struct list_head pendingq; /* List of message sent to AP queue. */
struct list_head requestq; /* List of message yet to be sent. */
@@ -247,4 +257,27 @@ void ap_queue_resume(struct ap_device *ap_dev);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
int comp_device_type, unsigned int functions);
+/*
+ * check APQN for owned/reserved by ap bus and default driver(s).
+ * Checks if this APQN is or will be in use by the ap bus
+ * and the default set of drivers.
+ * If yes, returns 1, if not returns 0. On error a negative
+ * errno value is returned.
+ */
+int ap_owned_by_def_drv(int card, int queue);
+
+/*
+ * check 'matrix' of APQNs for owned/reserved by ap bus and
+ * default driver(s).
+ * Checks if there is at least one APQN in the given 'matrix'
+ * marked as owned/reserved by the ap bus and default driver(s).
+ * If such an APQN is found the return value is 1, otherwise
+ * 0 is returned. On error a negative errno value is returned.
+ * The parameter apm is a bitmask which should be declared
+ * as DECLARE_BITMAP(apm, AP_DEVICES), the aqm parameter is
+ * similar, should be declared as DECLARE_BITMAP(aqm, AP_DOMAINS).
+ */
+int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
+ unsigned long *aqm);
+
#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 2c726df210f6..63b4cc6cd7e5 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -14,40 +14,39 @@
#include <asm/facility.h>
#include "ap_bus.h"
-#include "ap_asm.h"
/*
* AP card related attributes.
*/
-static ssize_t ap_hwtype_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t hwtype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
}
-static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
+static DEVICE_ATTR_RO(hwtype);
-static ssize_t ap_raw_hwtype_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t raw_hwtype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
}
-static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
+static DEVICE_ATTR_RO(raw_hwtype);
-static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t depth_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
}
-static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
+static DEVICE_ATTR_RO(depth);
static ssize_t ap_functions_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -59,9 +58,9 @@ static ssize_t ap_functions_show(struct device *dev,
static DEVICE_ATTR_RO(ap_functions);
-static ssize_t ap_req_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t request_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct ap_card *ac = to_ap_card(dev);
unsigned int req_cnt;
@@ -73,9 +72,9 @@ static ssize_t ap_req_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
}
-static ssize_t ap_req_count_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t request_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ap_card *ac = to_ap_card(dev);
struct ap_queue *aq;
@@ -89,10 +88,10 @@ static ssize_t ap_req_count_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store);
+static DEVICE_ATTR_RW(request_count);
-static ssize_t ap_requestq_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t requestq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
struct ap_queue *aq;
@@ -106,10 +105,10 @@ static ssize_t ap_requestq_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
-static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
+static DEVICE_ATTR_RO(requestq_count);
-static ssize_t ap_pendingq_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pendingq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
struct ap_queue *aq;
@@ -123,15 +122,15 @@ static ssize_t ap_pendingq_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
-static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
+static DEVICE_ATTR_RO(pendingq_count);
-static ssize_t ap_modalias_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
}
-static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
+static DEVICE_ATTR_RO(modalias);
static struct attribute *ap_card_dev_attrs[] = {
&dev_attr_hwtype.attr,
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ba3a2e13b0eb..66f7334bcb03 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -14,26 +14,6 @@
#include <asm/facility.h>
#include "ap_bus.h"
-#include "ap_asm.h"
-
-/**
- * ap_queue_irq_ctrl(): Control interruption on a AP queue.
- * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
- * @ind: The notification indicator byte
- *
- * Returns AP queue status.
- *
- * Control interruption on the given AP queue.
- * Just a simple wrapper function for the low level PQAP(AQIC)
- * instruction available for other kernel modules.
- */
-struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid,
- struct ap_qirq_ctrl qirqctrl,
- void *ind)
-{
- return ap_aqic(qid, qirqctrl, ind);
-}
-EXPORT_SYMBOL(ap_queue_irq_ctrl);
/**
* ap_queue_enable_interruption(): Enable interruption on an AP queue.
@@ -482,9 +462,9 @@ EXPORT_SYMBOL(ap_queue_resume);
/*
* AP queue related attributes.
*/
-static ssize_t ap_req_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t request_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int req_cnt;
@@ -495,9 +475,9 @@ static ssize_t ap_req_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
}
-static ssize_t ap_req_count_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t request_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ap_queue *aq = to_ap_queue(dev);
@@ -508,10 +488,10 @@ static ssize_t ap_req_count_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store);
+static DEVICE_ATTR_RW(request_count);
-static ssize_t ap_requestq_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t requestq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int reqq_cnt = 0;
@@ -522,10 +502,10 @@ static ssize_t ap_requestq_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
-static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
+static DEVICE_ATTR_RO(requestq_count);
-static ssize_t ap_pendingq_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pendingq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int penq_cnt = 0;
@@ -536,10 +516,10 @@ static ssize_t ap_pendingq_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
-static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
+static DEVICE_ATTR_RO(pendingq_count);
-static ssize_t ap_reset_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t reset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc = 0;
@@ -561,10 +541,10 @@ static ssize_t ap_reset_show(struct device *dev,
return rc;
}
-static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL);
+static DEVICE_ATTR_RO(reset);
-static ssize_t ap_interrupt_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t interrupt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc = 0;
@@ -580,7 +560,7 @@ static ssize_t ap_interrupt_show(struct device *dev,
return rc;
}
-static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL);
+static DEVICE_ATTR_RO(interrupt);
static struct attribute *ap_queue_dev_attrs[] = {
&dev_attr_request_count.attr,
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 3929c8be8098..1b4001e0285f 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -82,20 +82,20 @@ static int check_secaeskeytoken(const u8 *token, int keybitsize)
if (t->type != 0x01) {
DEBUG_ERR(
- "check_secaeskeytoken secure token check failed, type mismatch 0x%02x != 0x01\n",
- (int) t->type);
+ "%s secure token check failed, type mismatch 0x%02x != 0x01\n",
+ __func__, (int) t->type);
return -EINVAL;
}
if (t->version != 0x04) {
DEBUG_ERR(
- "check_secaeskeytoken secure token check failed, version mismatch 0x%02x != 0x04\n",
- (int) t->version);
+ "%s secure token check failed, version mismatch 0x%02x != 0x04\n",
+ __func__, (int) t->version);
return -EINVAL;
}
if (keybitsize > 0 && t->bitsize != keybitsize) {
DEBUG_ERR(
- "check_secaeskeytoken secure token check failed, bitsize mismatch %d != %d\n",
- (int) t->bitsize, keybitsize);
+ "%s secure token check failed, bitsize mismatch %d != %d\n",
+ __func__, (int) t->bitsize, keybitsize);
return -EINVAL;
}
@@ -270,8 +270,8 @@ int pkey_genseckey(u16 cardnr, u16 domain,
break;
default:
DEBUG_ERR(
- "pkey_genseckey unknown/unsupported keytype %d\n",
- keytype);
+ "%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
rc = -EINVAL;
goto out;
}
@@ -290,15 +290,16 @@ int pkey_genseckey(u16 cardnr, u16 domain,
rc = _zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
- "pkey_genseckey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- (int) cardnr, (int) domain, rc);
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
+ __func__, (int) cardnr, (int) domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR(
- "pkey_genseckey secure key generate failure, card response %d/%d\n",
+ "%s secure key generate failure, card response %d/%d\n",
+ __func__,
(int) prepcblk->ccp_rtcode,
(int) prepcblk->ccp_rscode);
rc = -EIO;
@@ -315,8 +316,8 @@ int pkey_genseckey(u16 cardnr, u16 domain,
- sizeof(prepparm->lv3.keyblock.tokattr);
if (seckeysize != SECKEYBLOBSIZE) {
DEBUG_ERR(
- "pkey_genseckey secure token size mismatch %d != %d bytes\n",
- seckeysize, SECKEYBLOBSIZE);
+ "%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
rc = -EIO;
goto out;
}
@@ -407,8 +408,8 @@ int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype,
break;
default:
DEBUG_ERR(
- "pkey_clr2seckey unknown/unsupported keytype %d\n",
- keytype);
+ "%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
rc = -EINVAL;
goto out;
}
@@ -427,15 +428,16 @@ int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype,
rc = _zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
- "pkey_clr2seckey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- (int) cardnr, (int) domain, rc);
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
+ __func__, (int) cardnr, (int) domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR(
- "pkey_clr2seckey clear key import failure, card response %d/%d\n",
+ "%s clear key import failure, card response %d/%d\n",
+ __func__,
(int) prepcblk->ccp_rtcode,
(int) prepcblk->ccp_rscode);
rc = -EIO;
@@ -452,8 +454,8 @@ int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype,
- sizeof(prepparm->lv3.keyblock.tokattr);
if (seckeysize != SECKEYBLOBSIZE) {
DEBUG_ERR(
- "pkey_clr2seckey secure token size mismatch %d != %d bytes\n",
- seckeysize, SECKEYBLOBSIZE);
+ "%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
rc = -EIO;
goto out;
}
@@ -553,15 +555,16 @@ int pkey_sec2protkey(u16 cardnr, u16 domain,
rc = _zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
- "pkey_sec2protkey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- (int) cardnr, (int) domain, rc);
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
+ __func__, (int) cardnr, (int) domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR(
- "pkey_sec2protkey unwrap secure key failure, card response %d/%d\n",
+ "%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
(int) prepcblk->ccp_rtcode,
(int) prepcblk->ccp_rscode);
rc = -EIO;
@@ -569,7 +572,8 @@ int pkey_sec2protkey(u16 cardnr, u16 domain,
}
if (prepcblk->ccp_rscode != 0) {
DEBUG_WARN(
- "pkey_sec2protkey unwrap secure key warning, card response %d/%d\n",
+ "%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
(int) prepcblk->ccp_rtcode,
(int) prepcblk->ccp_rscode);
}
@@ -581,8 +585,8 @@ int pkey_sec2protkey(u16 cardnr, u16 domain,
/* check the returned keyblock */
if (prepparm->lv3.keyblock.version != 0x01) {
DEBUG_ERR(
- "pkey_sec2protkey reply param keyblock version mismatch 0x%02x != 0x01\n",
- (int) prepparm->lv3.keyblock.version);
+ "%s reply param keyblock version mismatch 0x%02x != 0x01\n",
+ __func__, (int) prepparm->lv3.keyblock.version);
rc = -EIO;
goto out;
}
@@ -599,8 +603,8 @@ int pkey_sec2protkey(u16 cardnr, u16 domain,
protkey->type = PKEY_KEYTYPE_AES_256;
break;
default:
- DEBUG_ERR("pkey_sec2protkey unknown/unsupported keytype %d\n",
- prepparm->lv3.keyblock.keylen);
+ DEBUG_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, prepparm->lv3.keyblock.keylen);
rc = -EIO;
goto out;
}
@@ -638,8 +642,8 @@ int pkey_clr2protkey(u32 keytype,
fc = CPACF_PCKMO_ENC_AES_256_KEY;
break;
default:
- DEBUG_ERR("pkey_clr2protkey unknown/unsupported keytype %d\n",
- keytype);
+ DEBUG_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
return -EINVAL;
}
@@ -699,7 +703,7 @@ static int query_crypto_facility(u16 cardnr, u16 domain,
/* fill request cprb param block with FQ request */
preqparm = (struct fqreqparm *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "FQ", 2);
- strncpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
+ memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
preqparm->lv1.len = sizeof(preqparm->lv1);
@@ -713,15 +717,16 @@ static int query_crypto_facility(u16 cardnr, u16 domain,
rc = _zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
- "query_crypto_facility zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- (int) cardnr, (int) domain, rc);
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
+ __func__, (int) cardnr, (int) domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR(
- "query_crypto_facility unwrap secure key failure, card response %d/%d\n",
+ "%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
(int) prepcblk->ccp_rtcode,
(int) prepcblk->ccp_rscode);
rc = -EIO;
@@ -993,7 +998,7 @@ int pkey_skey2pkey(const struct pkey_seckey *seckey,
}
if (rc)
- DEBUG_DBG("pkey_skey2pkey failed rc=%d\n", rc);
+ DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1030,7 +1035,7 @@ int pkey_verifykey(const struct pkey_seckey *seckey,
if (rc)
goto out;
if (t->mkvp == mkvp[1]) {
- DEBUG_DBG("pkey_verifykey secure key has old mkvp\n");
+ DEBUG_DBG("%s secure key has old mkvp\n", __func__);
if (pattributes)
*pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
}
@@ -1041,7 +1046,7 @@ int pkey_verifykey(const struct pkey_seckey *seckey,
*pdomain = domain;
out:
- DEBUG_DBG("pkey_verifykey rc=%d\n", rc);
+ DEBUG_DBG("%s rc=%d\n", __func__, rc);
return rc;
}
EXPORT_SYMBOL(pkey_verifykey);
@@ -1064,7 +1069,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = pkey_genseckey(kgs.cardnr, kgs.domain,
kgs.keytype, &kgs.seckey);
- DEBUG_DBG("pkey_ioctl pkey_genseckey()=%d\n", rc);
+ DEBUG_DBG("%s pkey_genseckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ugs, &kgs, sizeof(kgs)))
@@ -1079,7 +1084,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = pkey_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
&kcs.clrkey, &kcs.seckey);
- DEBUG_DBG("pkey_ioctl pkey_clr2seckey()=%d\n", rc);
+ DEBUG_DBG("%s pkey_clr2seckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ucs, &kcs, sizeof(kcs)))
@@ -1095,7 +1100,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = pkey_sec2protkey(ksp.cardnr, ksp.domain,
&ksp.seckey, &ksp.protkey);
- DEBUG_DBG("pkey_ioctl pkey_sec2protkey()=%d\n", rc);
+ DEBUG_DBG("%s pkey_sec2protkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
@@ -1110,7 +1115,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = pkey_clr2protkey(kcp.keytype,
&kcp.clrkey, &kcp.protkey);
- DEBUG_DBG("pkey_ioctl pkey_clr2protkey()=%d\n", rc);
+ DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ucp, &kcp, sizeof(kcp)))
@@ -1126,7 +1131,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = pkey_findcard(&kfc.seckey,
&kfc.cardnr, &kfc.domain, 1);
- DEBUG_DBG("pkey_ioctl pkey_findcard()=%d\n", rc);
+ DEBUG_DBG("%s pkey_findcard()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ufc, &kfc, sizeof(kfc)))
@@ -1140,7 +1145,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&ksp, usp, sizeof(ksp)))
return -EFAULT;
rc = pkey_skey2pkey(&ksp.seckey, &ksp.protkey);
- DEBUG_DBG("pkey_ioctl pkey_skey2pkey()=%d\n", rc);
+ DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
@@ -1155,7 +1160,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain,
&kvk.keysize, &kvk.attributes);
- DEBUG_DBG("pkey_ioctl pkey_verifykey()=%d\n", rc);
+ DEBUG_DBG("%s pkey_verifykey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(uvk, &kvk, sizeof(kvk)))
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index febcdb5135bf..e6854127b434 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -50,7 +50,7 @@ EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
static int zcrypt_hwrng_seed = 1;
-module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
+module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, 0440);
MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
DEFINE_SPINLOCK(zcrypt_list_lock);
@@ -182,7 +182,8 @@ static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
struct zcrypt_card *pref_zc,
- unsigned weight, unsigned pref_weight)
+ unsigned int weight,
+ unsigned int pref_weight)
{
if (!pref_zc)
return false;
@@ -196,7 +197,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
struct zcrypt_queue *pref_zq,
- unsigned weight, unsigned pref_weight)
+ unsigned int weight,
+ unsigned int pref_weight)
{
if (!pref_zq)
return false;
@@ -792,6 +794,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
case ICARSAMODEXPO: {
struct ica_rsa_modexpo __user *umex = (void __user *) arg;
struct ica_rsa_modexpo mex;
+
if (copy_from_user(&mex, umex, sizeof(mex)))
return -EFAULT;
do {
@@ -811,6 +814,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
case ICARSACRT: {
struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
struct ica_rsa_modexpo_crt crt;
+
if (copy_from_user(&crt, ucrt, sizeof(crt)))
return -EFAULT;
do {
@@ -830,6 +834,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
case ZSECSENDCPRB: {
struct ica_xcRB __user *uxcRB = (void __user *) arg;
struct ica_xcRB xcRB;
+
if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
return -EFAULT;
do {
@@ -849,6 +854,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
case ZSENDEP11CPRB: {
struct ep11_urb __user *uxcrb = (void __user *)arg;
struct ep11_urb xcrb;
+
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
do {
@@ -1037,7 +1043,7 @@ static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
return -EFAULT;
crt64.inputdata = compat_ptr(crt32.inputdata);
crt64.inputdatalength = crt32.inputdatalength;
- crt64.outputdata= compat_ptr(crt32.outputdata);
+ crt64.outputdata = compat_ptr(crt32.outputdata);
crt64.outputdatalength = crt32.outputdatalength;
crt64.bp_key = compat_ptr(crt32.bp_key);
crt64.bq_key = compat_ptr(crt32.bq_key);
@@ -1063,20 +1069,20 @@ struct compat_ica_xcRB {
unsigned int user_defined;
unsigned short request_ID;
unsigned int request_control_blk_length;
- unsigned char padding1[16 - sizeof (compat_uptr_t)];
+ unsigned char padding1[16 - sizeof(compat_uptr_t)];
compat_uptr_t request_control_blk_addr;
unsigned int request_data_length;
- char padding2[16 - sizeof (compat_uptr_t)];
+ char padding2[16 - sizeof(compat_uptr_t)];
compat_uptr_t request_data_address;
unsigned int reply_control_blk_length;
- char padding3[16 - sizeof (compat_uptr_t)];
+ char padding3[16 - sizeof(compat_uptr_t)];
compat_uptr_t reply_control_blk_addr;
unsigned int reply_data_length;
- char padding4[16 - sizeof (compat_uptr_t)];
+ char padding4[16 - sizeof(compat_uptr_t)];
compat_uptr_t reply_data_addr;
unsigned short priority_window;
unsigned int status;
-} __attribute__((packed));
+} __packed;
static long trans_xcRB32(struct file *filp, unsigned int cmd,
unsigned long arg)
@@ -1120,7 +1126,7 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd,
xcRB32.reply_data_length = xcRB64.reply_data_length;
xcRB32.status = xcRB64.status;
if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
- return -EFAULT;
+ return -EFAULT;
return rc;
}
@@ -1182,10 +1188,10 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
rc = zcrypt_rng((char *) zcrypt_rng_buffer);
if (rc < 0)
return -EIO;
- zcrypt_rng_buffer_index = rc / sizeof *data;
+ zcrypt_rng_buffer_index = rc / sizeof(*data);
}
*data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
- return sizeof *data;
+ return sizeof(*data);
}
static struct hwrng zcrypt_rng_dev = {
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index f149a8fee60d..a848625c1a5a 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* zcrypt 2.1.0
*
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index 233e1e695208..40cd4c1c2de8 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -38,28 +38,28 @@
* Device attributes common for all crypto card devices.
*/
-static ssize_t zcrypt_card_type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct zcrypt_card *zc = to_ap_card(dev)->private;
return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
}
-static DEVICE_ATTR(type, 0444, zcrypt_card_type_show, NULL);
+static DEVICE_ATTR_RO(type);
-static ssize_t zcrypt_card_online_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct zcrypt_card *zc = to_ap_card(dev)->private;
return snprintf(buf, PAGE_SIZE, "%d\n", zc->online);
}
-static ssize_t zcrypt_card_online_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct zcrypt_card *zc = to_ap_card(dev)->private;
struct zcrypt_queue *zq;
@@ -80,12 +80,23 @@ static ssize_t zcrypt_card_online_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(online, 0644, zcrypt_card_online_show,
- zcrypt_card_online_store);
+static DEVICE_ATTR_RW(online);
+
+static ssize_t load_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
+}
+
+static DEVICE_ATTR_RO(load);
static struct attribute *zcrypt_card_attrs[] = {
&dev_attr_type.attr,
&dev_attr_online.attr,
+ &dev_attr_load.attr,
NULL,
};
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index 011d61d8a4ae..e5b5c02c9d67 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* zcrypt 2.1.0
*
@@ -31,7 +31,7 @@ struct cca_token_hdr {
unsigned char version;
unsigned short token_length;
unsigned char reserved[4];
-} __attribute__((packed));
+} __packed;
#define CCA_TKN_HDR_ID_EXT 0x1E
@@ -51,7 +51,7 @@ struct cca_public_sec {
unsigned short exponent_len;
unsigned short modulus_bit_len;
unsigned short modulus_byte_len; /* In a private key, this is 0 */
-} __attribute__((packed));
+} __packed;
/**
* mapping for the cca private CRT key 'token'
@@ -85,7 +85,7 @@ struct cca_pvt_ext_CRT_sec {
unsigned short pad_len;
unsigned char reserved4[52];
unsigned char confounder[8];
-} __attribute__((packed));
+} __packed;
#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
@@ -99,7 +99,7 @@ struct cca_pvt_ext_CRT_sec {
* @mex: pointer to user input data
* @p: pointer to memory area for the key
*
- * Returns the size of the key area or -EFAULT
+ * Returns the size of the key area or negative errno value.
*/
static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
{
@@ -114,10 +114,19 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
struct cca_token_hdr pubHdr;
struct cca_public_sec pubSec;
char exponent[0];
- } __attribute__((packed)) *key = p;
+ } __packed *key = p;
unsigned char *temp;
int i;
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_modexpo(). However, do a plausibility check
+ * here to make sure the following copy_from_user() can't be utilized
+ * to compromise the system.
+ */
+ if (WARN_ON_ONCE(mex->inputdatalength > 512))
+ return -EINVAL;
+
memset(key, 0, sizeof(*key));
key->pubHdr = static_pub_hdr;
@@ -174,10 +183,19 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
struct cca_token_hdr token;
struct cca_pvt_ext_CRT_sec pvt;
char key_parts[0];
- } __attribute__((packed)) *key = p;
+ } __packed *key = p;
struct cca_public_sec *pub;
int short_len, long_len, pad_len, key_len, size;
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_crt(). However, do a plausibility check
+ * here to make sure the following copy_from_user() can't be utilized
+ * to compromise the system.
+ */
+ if (WARN_ON_ONCE(crt->inputdatalength > 512))
+ return -EINVAL;
+
memset(key, 0, sizeof(*key));
short_len = (crt->inputdatalength + 1) / 2;
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index e701194d3611..f4ae5fa30ec9 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -145,6 +145,7 @@ static struct ap_driver zcrypt_cex2a_card_driver = {
.probe = zcrypt_cex2a_card_probe,
.remove = zcrypt_cex2a_card_remove,
.ids = zcrypt_cex2a_card_ids,
+ .flags = AP_DRIVER_FLAG_DEFAULT,
};
/**
@@ -208,6 +209,7 @@ static struct ap_driver zcrypt_cex2a_queue_driver = {
.suspend = ap_queue_suspend,
.resume = ap_queue_resume,
.ids = zcrypt_cex2a_queue_ids,
+ .flags = AP_DRIVER_FLAG_DEFAULT,
};
int __init zcrypt_cex2a_init(void)
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
index c3c116777c93..66d58bc87c66 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.h
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* zcrypt 2.1.0
*
@@ -30,7 +30,7 @@ struct type50_hdr {
unsigned char reserved2;
unsigned char ignored;
unsigned short reserved3;
-} __attribute__((packed));
+} __packed;
#define TYPE50_TYPE_CODE 0x50
@@ -49,7 +49,7 @@ struct type50_meb1_msg {
unsigned char exponent[128];
unsigned char modulus[128];
unsigned char message[128];
-} __attribute__((packed));
+} __packed;
/* Mod-Exp, with a large modulus */
struct type50_meb2_msg {
@@ -59,7 +59,7 @@ struct type50_meb2_msg {
unsigned char exponent[256];
unsigned char modulus[256];
unsigned char message[256];
-} __attribute__((packed));
+} __packed;
/* Mod-Exp, with a larger modulus */
struct type50_meb3_msg {
@@ -69,7 +69,7 @@ struct type50_meb3_msg {
unsigned char exponent[512];
unsigned char modulus[512];
unsigned char message[512];
-} __attribute__((packed));
+} __packed;
/* CRT, with a small modulus */
struct type50_crb1_msg {
@@ -82,7 +82,7 @@ struct type50_crb1_msg {
unsigned char dq[64];
unsigned char u[64];
unsigned char message[128];
-} __attribute__((packed));
+} __packed;
/* CRT, with a large modulus */
struct type50_crb2_msg {
@@ -95,7 +95,7 @@ struct type50_crb2_msg {
unsigned char dq[128];
unsigned char u[128];
unsigned char message[256];
-} __attribute__((packed));
+} __packed;
/* CRT, with a larger modulus */
struct type50_crb3_msg {
@@ -108,7 +108,7 @@ struct type50_crb3_msg {
unsigned char dq[256];
unsigned char u[256];
unsigned char message[512];
-} __attribute__((packed));
+} __packed;
/**
* The type 80 response family is associated with a CEX2A card.
@@ -128,7 +128,7 @@ struct type80_hdr {
unsigned char code; /* 0x00 */
unsigned char reserved2[3];
unsigned char reserved3[8];
-} __attribute__((packed));
+} __packed;
int zcrypt_cex2a_init(void);
void zcrypt_cex2a_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index f305538334ad..35d58dbbc4da 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -214,6 +214,7 @@ static struct ap_driver zcrypt_cex4_card_driver = {
.probe = zcrypt_cex4_card_probe,
.remove = zcrypt_cex4_card_remove,
.ids = zcrypt_cex4_card_ids,
+ .flags = AP_DRIVER_FLAG_DEFAULT,
};
/**
@@ -283,6 +284,7 @@ static struct ap_driver zcrypt_cex4_queue_driver = {
.suspend = ap_queue_suspend,
.resume = ap_queue_resume,
.ids = zcrypt_cex4_queue_ids,
+ .flags = AP_DRIVER_FLAG_DEFAULT,
};
int __init zcrypt_cex4_init(void)
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 01598d83c60a..6f7ebc1dbe10 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* zcrypt 2.1.0
*
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index afe1b2bcd7ec..f159662c907b 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -27,13 +27,14 @@
#include "zcrypt_error.h"
#include "zcrypt_msgtype50.h"
-#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
+/* 4096 bits */
+#define CEX3A_MAX_MOD_SIZE 512
-#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
+/* max outputdatalength + type80_hdr */
+#define CEX2A_MAX_RESPONSE_SIZE 0x110
-#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
- * (max outputdatalength) +
- * type80_hdr*/
+/* 512 bit modulus, (max outputdatalength) + type80_hdr */
+#define CEX3A_MAX_RESPONSE_SIZE 0x210
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
@@ -209,6 +210,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
if (mod_len <= 128) {
struct type50_meb1_msg *meb1 = ap_msg->message;
+
memset(meb1, 0, sizeof(*meb1));
ap_msg->length = sizeof(*meb1);
meb1->header.msg_type_code = TYPE50_TYPE_CODE;
@@ -219,6 +221,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
inp = meb1->message + sizeof(meb1->message) - mod_len;
} else if (mod_len <= 256) {
struct type50_meb2_msg *meb2 = ap_msg->message;
+
memset(meb2, 0, sizeof(*meb2));
ap_msg->length = sizeof(*meb2);
meb2->header.msg_type_code = TYPE50_TYPE_CODE;
@@ -229,6 +232,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
inp = meb2->message + sizeof(meb2->message) - mod_len;
} else if (mod_len <= 512) {
struct type50_meb3_msg *meb3 = ap_msg->message;
+
memset(meb3, 0, sizeof(*meb3));
ap_msg->length = sizeof(*meb3);
meb3->header.msg_type_code = TYPE50_TYPE_CODE;
@@ -274,6 +278,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
*/
if (mod_len <= 128) { /* up to 1024 bit key size */
struct type50_crb1_msg *crb1 = ap_msg->message;
+
memset(crb1, 0, sizeof(*crb1));
ap_msg->length = sizeof(*crb1);
crb1->header.msg_type_code = TYPE50_TYPE_CODE;
@@ -287,6 +292,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
inp = crb1->message + sizeof(crb1->message) - mod_len;
} else if (mod_len <= 256) { /* up to 2048 bit key size */
struct type50_crb2_msg *crb2 = ap_msg->message;
+
memset(crb2, 0, sizeof(*crb2));
ap_msg->length = sizeof(*crb2);
crb2->header.msg_type_code = TYPE50_TYPE_CODE;
@@ -301,6 +307,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
} else if ((mod_len <= 512) && /* up to 4096 bit key size */
(zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
struct type50_crb3_msg *crb3 = ap_msg->message;
+
memset(crb3, 0, sizeof(*crb3));
ap_msg->length = sizeof(*crb3);
crb3->header.msg_type_code = TYPE50_TYPE_CODE;
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
index 0a36545cfb8e..8530f652ea4f 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* zcrypt 2.1.0
*
@@ -17,10 +17,10 @@
#define MSGTYPE50_NAME "zcrypt_msgtype50"
#define MSGTYPE50_VARIANT_DEFAULT 0
-#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /*sizeof(struct type50_crb2_msg)*/
-#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /*sizeof(struct type50_crb3_msg)*/
+#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
+#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /* sizeof(struct type50_crb3_msg) */
-#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/
+#define MSGTYPE_ADJUSTMENT 0x08 /* type04 extension (not needed in type50) */
unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *);
unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 97d4bacbc442..2101776a8148 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -246,7 +246,7 @@ int speed_idx_ep11(int req_type)
* @ap_msg: pointer to AP message
* @mex: pointer to user input data
*
- * Returns 0 on success or -EFAULT.
+ * Returns 0 on success or negative errno value.
*/
static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
@@ -272,6 +272,14 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
} __packed * msg = ap_msg->message;
int size;
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_modexpo(). However, make sure the following
+ * copy_from_user() never exceeds the allocated buffer space.
+ */
+ if (WARN_ON_ONCE(mex->inputdatalength > PAGE_SIZE))
+ return -EINVAL;
+
/* VUD.ciphertext */
msg->length = mex->inputdatalength + 2;
if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
@@ -307,7 +315,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
* @ap_msg: pointer to AP message
* @crt: pointer to user input data
*
- * Returns 0 on success or -EFAULT.
+ * Returns 0 on success or negative errno value.
*/
static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
@@ -334,6 +342,14 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
} __packed * msg = ap_msg->message;
int size;
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_crt(). However, make sure the following
+ * copy_from_user() never exceeds the allocated buffer space.
+ */
+ if (WARN_ON_ONCE(crt->inputdatalength > PAGE_SIZE))
+ return -EINVAL;
+
/* VUD.ciphertext */
msg->length = crt->inputdatalength + 2;
if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
@@ -405,8 +421,10 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
return -EINVAL;
- /* Overflow check
- sum must be greater (or equal) than the largest operand */
+ /*
+ * Overflow check
+ * sum must be greater (or equal) than the largest operand
+ */
req_sumlen = CEIL4(xcRB->request_control_blk_length) +
xcRB->request_data_length;
if ((CEIL4(xcRB->request_control_blk_length) <=
@@ -426,8 +444,10 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
if (replylen > MSGTYPE06_MAX_MSG_SIZE)
return -EINVAL;
- /* Overflow check
- sum must be greater (or equal) than the largest operand */
+ /*
+ * Overflow check
+ * sum must be greater (or equal) than the largest operand
+ */
resp_sumlen = CEIL4(xcRB->reply_control_blk_length) +
xcRB->reply_data_length;
if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ?
@@ -438,7 +458,7 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
/* prepare type6 header */
msg->hdr = static_type6_hdrX;
- memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
+ memcpy(msg->hdr.agent_id, &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
if (xcRB->request_data_length) {
msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
@@ -790,8 +810,10 @@ static int convert_response_ica(struct zcrypt_queue *zq,
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_ica(zq, reply,
outputdata, outputdatalength);
- /* Fall through, no break, incorrect cprb version is an unknown
- * response */
+ /*
+ * Fall through, no break, incorrect cprb version is an unknown
+ * response
+ */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -824,8 +846,10 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
}
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_xcrb(zq, reply, xcRB);
- /* Fall through, no break, incorrect cprb version is an unknown
- * response */
+ /*
+ * Fall through, no break, incorrect cprb version is an unknown
+ * response
+ */
default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zq->online = 0;
@@ -885,8 +909,10 @@ static int convert_response_rng(struct zcrypt_queue *zq,
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_rng(zq, reply, data);
- /* Fall through, no break, incorrect cprb version is an unknown
- * response */
+ /*
+ * Fall through, no break, incorrect cprb version is an unknown
+ * response
+ */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -988,7 +1014,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
}
} else {
memcpy(msg->message, reply->message, sizeof(error_reply));
- }
+ }
out:
complete(&(resp_type->work));
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index d314f4525518..e4c2f37d7ad9 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* zcrypt 2.1.0
*
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 159b0a0dd211..94d9f7224aea 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -95,7 +95,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq)
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
- } __attribute__((packed)) *reply;
+ } __packed *reply;
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
@@ -104,7 +104,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq)
char rule[8];
short int verb_length;
short int key_length;
- } __packed * msg;
+ } __packed *msg;
int rc, i;
ap_init_message(&ap_msg);
@@ -223,6 +223,7 @@ static struct ap_driver zcrypt_pcixcc_card_driver = {
.probe = zcrypt_pcixcc_card_probe,
.remove = zcrypt_pcixcc_card_remove,
.ids = zcrypt_pcixcc_card_ids,
+ .flags = AP_DRIVER_FLAG_DEFAULT,
};
/**
@@ -286,6 +287,7 @@ static struct ap_driver zcrypt_pcixcc_queue_driver = {
.suspend = ap_queue_suspend,
.resume = ap_queue_resume,
.ids = zcrypt_pcixcc_queue_ids,
+ .flags = AP_DRIVER_FLAG_DEFAULT,
};
int __init zcrypt_pcixcc_init(void)
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
index d678a3af83a7..cf73a0f91e9c 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.h
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* zcrypt 2.1.0
*
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 720434e18007..8df82c6ef66e 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -38,18 +38,18 @@
* Device attributes common for all crypto queue devices.
*/
-static ssize_t zcrypt_queue_online_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct zcrypt_queue *zq = to_ap_queue(dev)->private;
return snprintf(buf, PAGE_SIZE, "%d\n", zq->online);
}
-static ssize_t zcrypt_queue_online_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct zcrypt_queue *zq = to_ap_queue(dev)->private;
struct zcrypt_card *zc = zq->zcard;
@@ -72,11 +72,22 @@ static ssize_t zcrypt_queue_online_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show,
- zcrypt_queue_online_store);
+static DEVICE_ATTR_RW(online);
+
+static ssize_t load_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
+}
+
+static DEVICE_ATTR_RO(load);
static struct attribute *zcrypt_queue_attrs[] = {
&dev_attr_online.attr,
+ &dev_attr_load.attr,
NULL,
};
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index c7e484f70654..7c5a25ddf832 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -95,4 +95,14 @@ config CCWGROUP
tristate
default (LCS || CTCM || QETH)
+config ISM
+ tristate "Support for ISM vPCI Adapter"
+ depends on PCI && SMC
+ default n
+ help
+ Select this option if you want to use the Internal Shared Memory
+ vPCI Adapter.
+
+ To compile as a module choose M. The module name is ism.
+ If unsure, choose N.
endmenu
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 513b7ae64980..f2d6bbe57a6f 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -15,3 +15,6 @@ qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
obj-$(CONFIG_QETH_L2) += qeth_l2.o
qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
obj-$(CONFIG_QETH_L3) += qeth_l3.o
+
+ism-y := ism_drv.o
+obj-$(CONFIG_ISM) += ism.o
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
new file mode 100644
index 000000000000..0aab90817326
--- /dev/null
+++ b/drivers/s390/net/ism.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_ISM_H
+#define S390_ISM_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <net/smc.h>
+
+#define UTIL_STR_LEN 16
+
+/*
+ * Do not use the first word of the DMB bits to ensure 8 byte aligned access.
+ */
+#define ISM_DMB_WORD_OFFSET 1
+#define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32)
+#define ISM_NR_DMBS 1920
+
+#define ISM_REG_SBA 0x1
+#define ISM_REG_IEQ 0x2
+#define ISM_READ_GID 0x3
+#define ISM_ADD_VLAN_ID 0x4
+#define ISM_DEL_VLAN_ID 0x5
+#define ISM_SET_VLAN 0x6
+#define ISM_RESET_VLAN 0x7
+#define ISM_QUERY_INFO 0x8
+#define ISM_QUERY_RGID 0x9
+#define ISM_REG_DMB 0xA
+#define ISM_UNREG_DMB 0xB
+#define ISM_SIGNAL_IEQ 0xE
+#define ISM_UNREG_SBA 0x11
+#define ISM_UNREG_IEQ 0x12
+
+#define ISM_ERROR 0xFFFF
+
+struct ism_req_hdr {
+ u32 cmd;
+ u16 : 16;
+ u16 len;
+};
+
+struct ism_resp_hdr {
+ u32 cmd;
+ u16 ret;
+ u16 len;
+};
+
+union ism_reg_sba {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 sba;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(16);
+
+union ism_reg_ieq {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 ieq;
+ u64 len;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(16);
+
+union ism_read_gid {
+ struct {
+ struct ism_req_hdr hdr;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ u64 gid;
+ } response;
+} __aligned(16);
+
+union ism_qi {
+ struct {
+ struct ism_req_hdr hdr;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ u32 version;
+ u32 max_len;
+ u64 ism_state;
+ u64 my_gid;
+ u64 sba;
+ u64 ieq;
+ u32 ieq_len;
+ u32 : 32;
+ u32 dmbs_owned;
+ u32 dmbs_used;
+ u32 vlan_required;
+ u32 vlan_nr_ids;
+ u16 vlan_id[64];
+ } response;
+} __aligned(64);
+
+union ism_query_rgid {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 rgid;
+ u32 vlan_valid;
+ u32 vlan_id;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(16);
+
+union ism_reg_dmb {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 dmb;
+ u32 dmb_len;
+ u32 sba_idx;
+ u32 vlan_valid;
+ u32 vlan_id;
+ u64 rgid;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ u64 dmb_tok;
+ } response;
+} __aligned(32);
+
+union ism_sig_ieq {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 rgid;
+ u32 trigger_irq;
+ u32 event_code;
+ u64 info;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(32);
+
+union ism_unreg_dmb {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 dmb_tok;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(16);
+
+union ism_cmd_simple {
+ struct {
+ struct ism_req_hdr hdr;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(8);
+
+union ism_set_vlan_id {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 vlan_id;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(16);
+
+struct ism_eq_header {
+ u64 idx;
+ u64 ieq_len;
+ u64 entry_len;
+ u64 : 64;
+};
+
+struct ism_eq {
+ struct ism_eq_header header;
+ struct smcd_event entry[15];
+};
+
+struct ism_sba {
+ u32 s : 1; /* summary bit */
+ u32 e : 1; /* event bit */
+ u32 : 30;
+ u32 dmb_bits[ISM_NR_DMBS / 32];
+ u32 reserved[3];
+ u16 dmbe_mask[ISM_NR_DMBS];
+};
+
+struct ism_dev {
+ spinlock_t lock;
+ struct pci_dev *pdev;
+ struct smcd_dev *smcd;
+
+ void __iomem *ctl;
+
+ struct ism_sba *sba;
+ dma_addr_t sba_dma_addr;
+ DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+
+ struct ism_eq *ieq;
+ dma_addr_t ieq_dma_addr;
+
+ int ieq_idx;
+};
+
+#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
+ ((dmb) | (idx) << 24 | (sf) << 23 | (offset))
+
+static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data,
+ unsigned int size)
+{
+ struct zpci_dev *zdev = to_zpci(ism->pdev);
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size);
+
+ return zpci_write_block(req, data, dmb_req);
+}
+
+#endif /* S390_ISM_H */
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
new file mode 100644
index 000000000000..c0631895154e
--- /dev/null
+++ b/drivers/s390/net/ism_drv.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISM driver for s390.
+ *
+ * Copyright IBM Corp. 2018
+ */
+#define KMSG_COMPONENT "ism"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/err.h>
+#include <net/smc.h>
+
+#include <asm/debug.h>
+
+#include "ism.h"
+
+MODULE_DESCRIPTION("ISM driver for s390");
+MODULE_LICENSE("GPL");
+
+#define PCI_DEVICE_ID_IBM_ISM 0x04ED
+#define DRV_NAME "ism"
+
+static const struct pci_device_id ism_device_table[] = {
+ { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ism_device_table);
+
+static debug_info_t *ism_debug_info;
+
+static int ism_cmd(struct ism_dev *ism, void *cmd)
+{
+ struct ism_req_hdr *req = cmd;
+ struct ism_resp_hdr *resp = cmd;
+
+ memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req));
+ memcpy_toio(ism->ctl, req, sizeof(*req));
+
+ WRITE_ONCE(resp->ret, ISM_ERROR);
+
+ memcpy_fromio(resp, ism->ctl, sizeof(*resp));
+ if (resp->ret) {
+ debug_text_event(ism_debug_info, 0, "cmd failure");
+ debug_event(ism_debug_info, 0, resp, sizeof(*resp));
+ goto out;
+ }
+ memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp),
+ resp->len - sizeof(*resp));
+out:
+ return resp->ret;
+}
+
+static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
+{
+ union ism_cmd_simple cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = cmd_code;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ return ism_cmd(ism, &cmd);
+}
+
+static int query_info(struct ism_dev *ism)
+{
+ union ism_qi cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_QUERY_INFO;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ if (ism_cmd(ism, &cmd))
+ goto out;
+
+ debug_text_event(ism_debug_info, 3, "query info");
+ debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
+out:
+ return 0;
+}
+
+static int register_sba(struct ism_dev *ism)
+{
+ union ism_reg_sba cmd;
+ dma_addr_t dma_handle;
+ struct ism_sba *sba;
+
+ sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
+ &dma_handle, GFP_KERNEL);
+ if (!sba)
+ return -ENOMEM;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_REG_SBA;
+ cmd.request.hdr.len = sizeof(cmd.request);
+ cmd.request.sba = dma_handle;
+
+ if (ism_cmd(ism, &cmd)) {
+ dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
+ return -EIO;
+ }
+
+ ism->sba = sba;
+ ism->sba_dma_addr = dma_handle;
+
+ return 0;
+}
+
+static int register_ieq(struct ism_dev *ism)
+{
+ union ism_reg_ieq cmd;
+ dma_addr_t dma_handle;
+ struct ism_eq *ieq;
+
+ ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
+ &dma_handle, GFP_KERNEL);
+ if (!ieq)
+ return -ENOMEM;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_REG_IEQ;
+ cmd.request.hdr.len = sizeof(cmd.request);
+ cmd.request.ieq = dma_handle;
+ cmd.request.len = sizeof(*ieq);
+
+ if (ism_cmd(ism, &cmd)) {
+ dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
+ return -EIO;
+ }
+
+ ism->ieq = ieq;
+ ism->ieq_idx = -1;
+ ism->ieq_dma_addr = dma_handle;
+
+ return 0;
+}
+
+static int unregister_sba(struct ism_dev *ism)
+{
+ if (!ism->sba)
+ return 0;
+
+ if (ism_cmd_simple(ism, ISM_UNREG_SBA))
+ return -EIO;
+
+ dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
+ ism->sba, ism->sba_dma_addr);
+
+ ism->sba = NULL;
+ ism->sba_dma_addr = 0;
+
+ return 0;
+}
+
+static int unregister_ieq(struct ism_dev *ism)
+{
+ if (!ism->ieq)
+ return 0;
+
+ if (ism_cmd_simple(ism, ISM_UNREG_IEQ))
+ return -EIO;
+
+ dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
+ ism->ieq, ism->ieq_dma_addr);
+
+ ism->ieq = NULL;
+ ism->ieq_dma_addr = 0;
+
+ return 0;
+}
+
+static int ism_read_local_gid(struct ism_dev *ism)
+{
+ union ism_read_gid cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_READ_GID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ ret = ism_cmd(ism, &cmd);
+ if (ret)
+ goto out;
+
+ ism->smcd->local_gid = cmd.response.gid;
+out:
+ return ret;
+}
+
+static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
+ u32 vid)
+{
+ struct ism_dev *ism = smcd->priv;
+ union ism_query_rgid cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_QUERY_RGID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.rgid = rgid;
+ cmd.request.vlan_valid = vid_valid;
+ cmd.request.vlan_id = vid;
+
+ return ism_cmd(ism, &cmd);
+}
+
+static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
+{
+ clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
+ dmb->cpu_addr, dmb->dma_addr);
+}
+
+static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
+{
+ unsigned long bit;
+
+ if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
+ return -EINVAL;
+
+ if (!dmb->sba_idx) {
+ bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
+ ISM_DMB_BIT_OFFSET);
+ if (bit == ISM_NR_DMBS)
+ return -ENOMEM;
+
+ dmb->sba_idx = bit;
+ }
+ if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
+ test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
+ return -EINVAL;
+
+ dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len,
+ &dmb->dma_addr, GFP_KERNEL |
+ __GFP_NOWARN | __GFP_NOMEMALLOC |
+ __GFP_COMP | __GFP_NORETRY);
+ if (!dmb->cpu_addr)
+ clear_bit(dmb->sba_idx, ism->sba_bitmap);
+
+ return dmb->cpu_addr ? 0 : -ENOMEM;
+}
+
+static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+{
+ struct ism_dev *ism = smcd->priv;
+ union ism_reg_dmb cmd;
+ int ret;
+
+ ret = ism_alloc_dmb(ism, dmb);
+ if (ret)
+ goto out;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_REG_DMB;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.dmb = dmb->dma_addr;
+ cmd.request.dmb_len = dmb->dmb_len;
+ cmd.request.sba_idx = dmb->sba_idx;
+ cmd.request.vlan_valid = dmb->vlan_valid;
+ cmd.request.vlan_id = dmb->vlan_id;
+ cmd.request.rgid = dmb->rgid;
+
+ ret = ism_cmd(ism, &cmd);
+ if (ret) {
+ ism_free_dmb(ism, dmb);
+ goto out;
+ }
+ dmb->dmb_tok = cmd.response.dmb_tok;
+out:
+ return ret;
+}
+
+static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+{
+ struct ism_dev *ism = smcd->priv;
+ union ism_unreg_dmb cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_UNREG_DMB;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.dmb_tok = dmb->dmb_tok;
+
+ ret = ism_cmd(ism, &cmd);
+ if (ret)
+ goto out;
+
+ ism_free_dmb(ism, dmb);
+out:
+ return ret;
+}
+
+static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+{
+ struct ism_dev *ism = smcd->priv;
+ union ism_set_vlan_id cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.vlan_id = vlan_id;
+
+ return ism_cmd(ism, &cmd);
+}
+
+static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+{
+ struct ism_dev *ism = smcd->priv;
+ union ism_set_vlan_id cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.vlan_id = vlan_id;
+
+ return ism_cmd(ism, &cmd);
+}
+
+static int ism_set_vlan_required(struct smcd_dev *smcd)
+{
+ return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
+}
+
+static int ism_reset_vlan_required(struct smcd_dev *smcd)
+{
+ return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
+}
+
+static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
+ u32 event_code, u64 info)
+{
+ struct ism_dev *ism = smcd->priv;
+ union ism_sig_ieq cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.rgid = rgid;
+ cmd.request.trigger_irq = trigger_irq;
+ cmd.request.event_code = event_code;
+ cmd.request.info = info;
+
+ return ism_cmd(ism, &cmd);
+}
+
+static unsigned int max_bytes(unsigned int start, unsigned int len,
+ unsigned int boundary)
+{
+ return min(boundary - (start & (boundary - 1)), len);
+}
+
+static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data, unsigned int size)
+{
+ struct ism_dev *ism = smcd->priv;
+ unsigned int bytes;
+ u64 dmb_req;
+ int ret;
+
+ while (size) {
+ bytes = max_bytes(offset, size, PAGE_SIZE);
+ dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
+ offset);
+
+ ret = __ism_move(ism, dmb_req, data, bytes);
+ if (ret)
+ return ret;
+
+ size -= bytes;
+ data += bytes;
+ offset += bytes;
+ }
+
+ return 0;
+}
+
+static void ism_handle_event(struct ism_dev *ism)
+{
+ struct smcd_event *entry;
+
+ while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
+ if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
+ ism->ieq_idx = 0;
+
+ entry = &ism->ieq->entry[ism->ieq_idx];
+ debug_event(ism_debug_info, 2, entry, sizeof(*entry));
+ smcd_handle_event(ism->smcd, entry);
+ }
+}
+
+static irqreturn_t ism_handle_irq(int irq, void *data)
+{
+ struct ism_dev *ism = data;
+ unsigned long bit, end;
+ unsigned long *bv;
+
+ bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
+ end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
+
+ spin_lock(&ism->lock);
+ ism->sba->s = 0;
+ barrier();
+ for (bit = 0;;) {
+ bit = find_next_bit_inv(bv, end, bit);
+ if (bit >= end)
+ break;
+
+ clear_bit_inv(bit, bv);
+ barrier();
+ smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
+ ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
+ }
+
+ if (ism->sba->e) {
+ ism->sba->e = 0;
+ barrier();
+ ism_handle_event(ism);
+ }
+ spin_unlock(&ism->lock);
+ return IRQ_HANDLED;
+}
+
+static const struct smcd_ops ism_ops = {
+ .query_remote_gid = ism_query_rgid,
+ .register_dmb = ism_register_dmb,
+ .unregister_dmb = ism_unregister_dmb,
+ .add_vlan_id = ism_add_vlan_id,
+ .del_vlan_id = ism_del_vlan_id,
+ .set_vlan_required = ism_set_vlan_required,
+ .reset_vlan_required = ism_reset_vlan_required,
+ .signal_event = ism_signal_ieq,
+ .move_data = ism_move,
+};
+
+static int ism_dev_init(struct ism_dev *ism)
+{
+ struct pci_dev *pdev = ism->pdev;
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret <= 0)
+ goto out;
+
+ ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
+ pci_name(pdev), ism);
+ if (ret)
+ goto free_vectors;
+
+ ret = register_sba(ism);
+ if (ret)
+ goto free_irq;
+
+ ret = register_ieq(ism);
+ if (ret)
+ goto unreg_sba;
+
+ ret = ism_read_local_gid(ism);
+ if (ret)
+ goto unreg_ieq;
+
+ ret = smcd_register_dev(ism->smcd);
+ if (ret)
+ goto unreg_ieq;
+
+ query_info(ism);
+ return 0;
+
+unreg_ieq:
+ unregister_ieq(ism);
+unreg_sba:
+ unregister_sba(ism);
+free_irq:
+ free_irq(pci_irq_vector(pdev, 0), ism);
+free_vectors:
+ pci_free_irq_vectors(pdev);
+out:
+ return ret;
+}
+
+static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ism_dev *ism;
+ int ret;
+
+ ism = kzalloc(sizeof(*ism), GFP_KERNEL);
+ if (!ism)
+ return -ENOMEM;
+
+ spin_lock_init(&ism->lock);
+ dev_set_drvdata(&pdev->dev, ism);
+ ism->pdev = pdev;
+
+ ret = pci_enable_device_mem(pdev);
+ if (ret)
+ goto err;
+
+ ret = pci_request_mem_regions(pdev, DRV_NAME);
+ if (ret)
+ goto err_disable;
+
+ ism->ctl = pci_iomap(pdev, 2, 0);
+ if (!ism->ctl)
+ goto err_resource;
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ goto err_unmap;
+
+ pci_set_dma_seg_boundary(pdev, SZ_1M - 1);
+ pci_set_dma_max_seg_size(pdev, SZ_1M);
+ pci_set_master(pdev);
+
+ ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
+ ISM_NR_DMBS);
+ if (!ism->smcd)
+ goto err_unmap;
+
+ ism->smcd->priv = ism;
+ ret = ism_dev_init(ism);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ smcd_free_dev(ism->smcd);
+err_unmap:
+ pci_iounmap(pdev, ism->ctl);
+err_resource:
+ pci_release_mem_regions(pdev);
+err_disable:
+ pci_disable_device(pdev);
+err:
+ kfree(ism);
+ dev_set_drvdata(&pdev->dev, NULL);
+ return ret;
+}
+
+static void ism_dev_exit(struct ism_dev *ism)
+{
+ struct pci_dev *pdev = ism->pdev;
+
+ smcd_unregister_dev(ism->smcd);
+ unregister_ieq(ism);
+ unregister_sba(ism);
+ free_irq(pci_irq_vector(pdev, 0), ism);
+ pci_free_irq_vectors(pdev);
+}
+
+static void ism_remove(struct pci_dev *pdev)
+{
+ struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
+
+ ism_dev_exit(ism);
+
+ smcd_free_dev(ism->smcd);
+ pci_iounmap(pdev, ism->ctl);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+ dev_set_drvdata(&pdev->dev, NULL);
+ kfree(ism);
+}
+
+static int ism_suspend(struct device *dev)
+{
+ struct ism_dev *ism = dev_get_drvdata(dev);
+
+ ism_dev_exit(ism);
+ return 0;
+}
+
+static int ism_resume(struct device *dev)
+{
+ struct ism_dev *ism = dev_get_drvdata(dev);
+
+ return ism_dev_init(ism);
+}
+
+static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume);
+
+static struct pci_driver ism_driver = {
+ .name = DRV_NAME,
+ .id_table = ism_device_table,
+ .probe = ism_probe,
+ .remove = ism_remove,
+ .driver = {
+ .pm = &ism_pm_ops,
+ },
+};
+
+static int __init ism_init(void)
+{
+ int ret;
+
+ ism_debug_info = debug_register("ism", 2, 1, 16);
+ if (!ism_debug_info)
+ return -ENODEV;
+
+ debug_register_view(ism_debug_info, &debug_hex_ascii_view);
+ ret = pci_register_driver(&ism_driver);
+ if (ret)
+ debug_unregister(ism_debug_info);
+
+ return ret;
+}
+
+static void __exit ism_exit(void)
+{
+ pci_unregister_driver(&ism_driver);
+ debug_unregister(ism_debug_info);
+}
+
+module_init(ism_init);
+module_exit(ism_exit);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 2a5fec55bf60..34e0d476c5c6 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -104,6 +104,7 @@ struct qeth_dbf_info {
struct qeth_perf_stats {
unsigned int bufs_rec;
unsigned int bufs_sent;
+ unsigned int buf_elements_sent;
unsigned int skbs_sent_pack;
unsigned int bufs_sent_pack;
@@ -137,7 +138,6 @@ struct qeth_perf_stats {
unsigned int large_send_bytes;
unsigned int large_send_cnt;
unsigned int sg_skbs_sent;
- unsigned int sg_frags_sent;
/* initial values when measuring starts */
unsigned long initial_rx_packets;
unsigned long initial_tx_packets;
@@ -235,6 +235,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
#define QETH_IDX_FUNC_LEVEL_IQD 0x4108
#define QETH_BUFSIZE 4096
+#define CCW_CMD_WRITE 0x01
+#define CCW_CMD_READ 0x02
/**
* some more defs
@@ -465,7 +467,6 @@ struct qeth_qdio_out_buffer {
struct sk_buff_head skb_list;
int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
- struct qaob *aob;
struct qeth_qdio_out_q *q;
struct qeth_qdio_out_buffer *next_pending;
};
@@ -593,7 +594,7 @@ static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
*/
struct qeth_channel {
enum qeth_channel_states state;
- struct ccw1 ccw;
+ struct ccw1 *ccw;
spinlock_t iob_lock;
wait_queue_head_t wait_q;
struct ccw_device *ccwdev;
@@ -659,12 +660,8 @@ struct qeth_card_info {
char mcl_level[QETH_MCL_LENGTH + 1];
int guestlan;
int mac_bits;
- int portno;
enum qeth_card_types type;
enum qeth_link_types link_type;
- int is_multicast_different;
- int initial_mtu;
- int max_mtu;
int broadcast_capable;
int unique_id;
bool layer_enforced;
@@ -829,6 +826,17 @@ struct qeth_trap_id {
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
+ unsigned int elements)
+{
+ unsigned int i;
+
+ for (i = 0; i < elements; i++)
+ memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
+ buf->element[14].sflags = 0;
+ buf->element[15].sflags = 0;
+}
+
/**
* qeth_get_elements_for_range() - find number of SBALEs to cover range.
* @start: Start of the address range.
@@ -924,6 +932,19 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
data, QETH_PROT_IPV6);
}
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
+ int ipv);
+static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card,
+ struct sk_buff *skb,
+ int ipv, int cast_type)
+{
+ if (IS_IQD(card) && cast_type != RTN_UNICAST)
+ return card->qdio.out_qs[card->qdio.no_out_queues - 1];
+ if (!card->qdio.do_prio_queueing)
+ return card->qdio.out_qs[card->qdio.default_out_queue];
+ return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)];
+}
+
extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
extern const struct attribute_group *qeth_generic_attr_groups[];
@@ -944,6 +965,7 @@ extern struct qeth_card_list_struct qeth_core_card_list;
extern struct kmem_cache *qeth_core_header_cache;
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
+struct net_device *qeth_clone_netdev(struct net_device *orig);
void qeth_set_recovery_task(struct qeth_card *);
void qeth_clear_recovery_task(struct qeth_card *);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
@@ -961,7 +983,6 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
void *);
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
enum qeth_ipa_cmds, enum qeth_prot_versions);
-int qeth_query_setadapterparms(struct qeth_card *);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
@@ -974,24 +995,18 @@ void qeth_clear_cmd_buffers(struct qeth_channel *);
void qeth_clear_qdio_buffers(struct qeth_card *);
void qeth_setadp_promisc_mode(struct qeth_card *);
struct net_device_stats *qeth_get_stats(struct net_device *);
-int qeth_change_mtu(struct net_device *, int);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
void qeth_prepare_control_data(struct qeth_card *, int,
struct qeth_cmd_buffer *);
void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
-void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob);
struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
void *reply_param);
-int qeth_bridgeport_query_ports(struct qeth_card *card,
- enum qeth_sbp_roles *role, enum qeth_sbp_states *state);
-int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
-int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
-int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
int extra_elems, int data_offset);
int qeth_get_elements_for_frags(struct sk_buff *);
@@ -1015,7 +1030,6 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
-int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
void qeth_trace_features(struct qeth_card *);
void qeth_close_dev(struct qeth_card *);
int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
@@ -1029,13 +1043,15 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
__u16, __u16,
enum qeth_prot_versions);
int qeth_set_features(struct net_device *, netdev_features_t);
-void qeth_recover_features(struct net_device *dev);
+void qeth_enable_hw_features(struct net_device *dev);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
int qeth_vm_request_mac(struct qeth_card *card);
-int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
+int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr **hdr, unsigned int hdr_len,
+ unsigned int proto_len, unsigned int *elements);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 8e1474f1ffac..49f64eb3eab0 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -65,7 +65,6 @@ static struct mutex qeth_mod_mutex;
static void qeth_send_control_data_cb(struct qeth_channel *,
struct qeth_cmd_buffer *);
static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
-static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_buffers(struct qeth_card *);
@@ -73,9 +72,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification);
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
-static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf,
- enum qeth_qdio_buffer_states newbufstate);
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
struct workqueue_struct *qeth_wq;
@@ -476,7 +472,6 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
QETH_QDIO_BUF_HANDLED_DELAYED)) {
/* for recovery situations */
- q->bufs[bidx]->aob = q->bufstates[bidx].aob;
qeth_init_qdio_out_buf(q, bidx);
QETH_CARD_TEXT(q->card, 2, "clprecov");
}
@@ -489,6 +484,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
struct qaob *aob;
struct qeth_qdio_out_buffer *buffer;
enum iucv_tx_notify notification;
+ unsigned int i;
aob = (struct qaob *) phys_to_virt(phys_aob_addr);
QETH_CARD_TEXT(card, 5, "haob");
@@ -512,11 +508,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
}
qeth_notify_skbs(buffer->q, buffer, notification);
- buffer->aob = NULL;
- qeth_clear_output_buffer(buffer->q, buffer,
- QETH_QDIO_BUF_HANDLED_DELAYED);
+ /* Free dangling allocations. The attached skbs are handled by
+ * qeth_cleanup_handled_pending().
+ */
+ for (i = 0;
+ i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
+ i++) {
+ if (aob->sba[i] && buffer->is_header[i])
+ kmem_cache_free(qeth_core_header_cache,
+ (void *) aob->sba[i]);
+ }
+ atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
- /* from here on: do not touch buffer anymore */
qdio_release_aob(aob);
}
@@ -528,15 +531,24 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
queue == card->qdio.no_in_queues - 1;
}
+static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data)
+{
+ ccw->cmd_code = cmd_code;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = len;
+ ccw->cda = (__u32) __pa(data);
+}
+
static int __qeth_issue_next_read(struct qeth_card *card)
{
- int rc;
+ struct qeth_channel *channel = &card->read;
struct qeth_cmd_buffer *iob;
+ int rc;
QETH_CARD_TEXT(card, 5, "issnxrd");
- if (card->read.state != CH_STATE_UP)
+ if (channel->state != CH_STATE_UP)
return -EIO;
- iob = qeth_get_buffer(&card->read);
+ iob = qeth_get_buffer(channel);
if (!iob) {
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
@@ -544,14 +556,14 @@ static int __qeth_issue_next_read(struct qeth_card *card)
"available\n", dev_name(&card->gdev->dev));
return -ENOMEM;
}
- qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
+ qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
QETH_CARD_TEXT(card, 6, "noirqpnd");
- rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
+ rc = ccw_device_start(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0);
if (rc) {
QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
"rc=%i\n", dev_name(&card->gdev->dev), rc);
- atomic_set(&card->read.irq_pending, 0);
+ atomic_set(&channel->irq_pending, 0);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
wake_up(&card->wait_q);
@@ -649,8 +661,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
cmd->hdr.return_code, card);
}
card->lan_online = 0;
- if (card->dev && netif_carrier_ok(card->dev))
- netif_carrier_off(card->dev);
+ netif_carrier_off(card->dev);
return NULL;
case IPA_CMD_STARTLAN:
dev_info(&card->gdev->dev,
@@ -741,21 +752,6 @@ static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
return card;
}
-static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
- __u32 len)
-{
- struct qeth_card *card;
-
- card = CARD_FROM_CDEV(channel->ccwdev);
- QETH_CARD_TEXT(card, 4, "setupccw");
- if (channel == &card->read)
- memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
- else
- memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
- channel->ccw.count = len;
- channel->ccw.cda = (__u32) __pa(iob);
-}
-
static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
{
__u8 index;
@@ -903,11 +899,22 @@ out:
qeth_release_buffer(channel, iob);
}
-static int qeth_setup_channel(struct qeth_channel *channel)
+static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
{
int cnt;
QETH_DBF_TEXT(SETUP, 2, "setupch");
+
+ channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+ if (!channel->ccw)
+ return -ENOMEM;
+ channel->state = CH_STATE_DOWN;
+ atomic_set(&channel->irq_pending, 0);
+ init_waitqueue_head(&channel->wait_q);
+
+ if (!alloc_buffers)
+ return 0;
+
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
channel->iob[cnt].data =
kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
@@ -919,15 +926,14 @@ static int qeth_setup_channel(struct qeth_channel *channel)
channel->iob[cnt].rc = 0;
}
if (cnt < QETH_CMD_BUFFER_NO) {
+ kfree(channel->ccw);
while (cnt-- > 0)
kfree(channel->iob[cnt].data);
return -ENOMEM;
}
channel->io_buf_no = 0;
- atomic_set(&channel->irq_pending, 0);
spin_lock_init(&channel->iob_lock);
- init_waitqueue_head(&channel->wait_q);
return 0;
}
@@ -1261,8 +1267,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
}
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf,
- enum qeth_qdio_buffer_states newbufstate)
+ struct qeth_qdio_out_buffer *buf)
{
int i;
@@ -1270,23 +1275,19 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
atomic_dec(&queue->set_pci_flags_count);
- if (newbufstate == QETH_QDIO_BUF_EMPTY) {
- qeth_release_skbs(buf);
- }
+ qeth_release_skbs(buf);
+
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
if (buf->buffer->element[i].addr && buf->is_header[i])
kmem_cache_free(qeth_core_header_cache,
buf->buffer->element[i].addr);
buf->is_header[i] = 0;
- buf->buffer->element[i].length = 0;
- buf->buffer->element[i].addr = NULL;
- buf->buffer->element[i].eflags = 0;
- buf->buffer->element[i].sflags = 0;
}
- buf->buffer->element[15].eflags = 0;
- buf->buffer->element[15].sflags = 0;
+
+ qeth_scrub_qdio_buffer(buf->buffer,
+ QETH_MAX_BUFFER_ELEMENTS(queue->card));
buf->next_element_to_fill = 0;
- atomic_set(&buf->state, newbufstate);
+ atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
@@ -1297,7 +1298,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
if (!q->bufs[j])
continue;
qeth_cleanup_handled_pending(q, j, 1);
- qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
+ qeth_clear_output_buffer(q, q->bufs[j]);
if (free) {
kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
q->bufs[j] = NULL;
@@ -1339,6 +1340,7 @@ static void qeth_clean_channel(struct qeth_channel *channel)
QETH_DBF_TEXT(SETUP, 2, "freech");
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
kfree(channel->iob[cnt].data);
+ kfree(channel->ccw);
}
static void qeth_set_single_write_queues(struct qeth_card *card)
@@ -1395,6 +1397,10 @@ static void qeth_init_qdio_info(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP, 4, "intqdinf");
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+ card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+ card->qdio.no_out_queues = QETH_MAX_QUEUES;
+
/* inbound */
card->qdio.no_in_queues = 1;
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
@@ -1407,12 +1413,10 @@ static void qeth_init_qdio_info(struct qeth_card *card)
INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
}
-static void qeth_set_intial_options(struct qeth_card *card)
+static void qeth_set_initial_options(struct qeth_card *card)
{
card->options.route4.type = NO_ROUTER;
card->options.route6.type = NO_ROUTER;
- card->options.fake_broadcast = 0;
- card->options.performance_stats = 0;
card->options.rx_sg_cb = QETH_RX_SG_CB;
card->options.isolation = ISOLATION_MODE_NONE;
card->options.cq = QETH_CQ_DISABLED;
@@ -1455,19 +1459,13 @@ static void qeth_start_kernel_thread(struct work_struct *work)
}
static void qeth_buffer_reclaim_work(struct work_struct *);
-static int qeth_setup_card(struct qeth_card *card)
+static void qeth_setup_card(struct qeth_card *card)
{
-
QETH_DBF_TEXT(SETUP, 2, "setupcrd");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- card->read.state = CH_STATE_DOWN;
- card->write.state = CH_STATE_DOWN;
- card->data.state = CH_STATE_DOWN;
+ card->info.type = CARD_RDEV(card)->id.driver_info;
card->state = CARD_STATE_DOWN;
- card->lan_online = 0;
- card->read_or_write_problem = 0;
- card->dev = NULL;
spin_lock_init(&card->mclock);
spin_lock_init(&card->lock);
spin_lock_init(&card->ip_lock);
@@ -1475,24 +1473,15 @@ static int qeth_setup_card(struct qeth_card *card)
mutex_init(&card->conf_mutex);
mutex_init(&card->discipline_mutex);
mutex_init(&card->vid_list_mutex);
- card->thread_start_mask = 0;
- card->thread_allowed_mask = 0;
- card->thread_running_mask = 0;
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
INIT_LIST_HEAD(&card->cmd_waiter_list);
init_waitqueue_head(&card->wait_q);
- /* initial options */
- qeth_set_intial_options(card);
+ qeth_set_initial_options(card);
/* IP address takeover */
INIT_LIST_HEAD(&card->ipato.entries);
- card->ipato.enabled = false;
- card->ipato.invert4 = false;
- card->ipato.invert6 = false;
- /* init QDIO stuff */
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
- return 0;
}
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
@@ -1509,19 +1498,23 @@ static struct qeth_card *qeth_alloc_card(void)
struct qeth_card *card;
QETH_DBF_TEXT(SETUP, 2, "alloccrd");
- card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
goto out;
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- if (qeth_setup_channel(&card->read))
+ if (qeth_setup_channel(&card->read, true))
goto out_ip;
- if (qeth_setup_channel(&card->write))
+ if (qeth_setup_channel(&card->write, true))
goto out_channel;
+ if (qeth_setup_channel(&card->data, false))
+ goto out_data;
card->options.layer2 = -1;
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
+out_data:
+ qeth_clean_channel(&card->write);
out_channel:
qeth_clean_channel(&card->read);
out_ip:
@@ -1530,19 +1523,6 @@ out:
return NULL;
}
-static void qeth_determine_card_type(struct qeth_card *card)
-{
- QETH_DBF_TEXT(SETUP, 2, "detcdtyp");
-
- card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
- card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- card->info.type = CARD_RDEV(card)->id.driver_info;
- card->qdio.no_out_queues = QETH_MAX_QUEUES;
- if (card->info.type == QETH_CARD_TYPE_IQD)
- card->info.is_multicast_different = 0x0103;
- qeth_update_from_chp_desc(card);
-}
-
static int qeth_clear_channel(struct qeth_channel *channel)
{
unsigned long flags;
@@ -1683,13 +1663,10 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
if (!rcd_buf)
return -ENOMEM;
- channel->ccw.cmd_code = ciw->cmd;
- channel->ccw.cda = (__u32) __pa(rcd_buf);
- channel->ccw.count = ciw->count;
- channel->ccw.flags = CCW_FLAG_SLI;
+ qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
channel->state = CH_STATE_RCD;
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+ ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
QETH_RCD_PARM, LPM_ANYPATH, 0,
QETH_RCD_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1860,15 +1837,13 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
if (!iob)
return -ENOMEM;
iob->callback = idx_reply_cb;
- memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
- channel->ccw.count = QETH_BUFSIZE;
- channel->ccw.cda = (__u32) __pa(iob->data);
+ qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
wait_event(card->wait_q,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+ rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0, QETH_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1911,9 +1886,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
if (!iob)
return -ENOMEM;
iob->callback = idx_reply_cb;
- memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
- channel->ccw.count = IDX_ACTIVATE_SIZE;
- channel->ccw.cda = (__u32) __pa(iob->data);
+ qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE,
+ iob->data);
if (channel == &card->write) {
memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
@@ -1924,7 +1898,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
&card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
}
- tmp = ((__u8)card->info.portno) | 0x80;
+ tmp = ((u8)card->dev->dev_port) | 0x80;
memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
@@ -1939,7 +1913,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+ rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0, QETH_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1991,20 +1965,20 @@ static void qeth_idx_write_cb(struct qeth_channel *channel,
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
- dev_err(&card->write.ccwdev->dev,
+ dev_err(&channel->ccwdev->dev,
"The adapter is used exclusively by another "
"host\n");
else
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
" negative reply\n",
- dev_name(&card->write.ccwdev->dev));
+ dev_name(&channel->ccwdev->dev));
goto out;
}
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
"function level mismatch (sent: 0x%x, received: "
- "0x%x)\n", dev_name(&card->write.ccwdev->dev),
+ "0x%x)\n", dev_name(&channel->ccwdev->dev),
card->info.func_level, temp);
goto out;
}
@@ -2032,20 +2006,20 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
case QETH_IDX_ACT_ERR_EXCL:
- dev_err(&card->write.ccwdev->dev,
+ dev_err(&channel->ccwdev->dev,
"The adapter is used exclusively by another "
"host\n");
break;
case QETH_IDX_ACT_ERR_AUTH:
case QETH_IDX_ACT_ERR_AUTH_USER:
- dev_err(&card->read.ccwdev->dev,
+ dev_err(&channel->ccwdev->dev,
"Setting the device online failed because of "
"insufficient authorization\n");
break;
default:
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
" negative reply\n",
- dev_name(&card->read.ccwdev->dev));
+ dev_name(&channel->ccwdev->dev));
}
QETH_CARD_TEXT_(card, 2, "idxread%c",
QETH_IDX_ACT_CAUSE_CODE(iob->data));
@@ -2056,7 +2030,7 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
if (temp != qeth_peer_func_level(card->info.func_level)) {
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
"level mismatch (sent: 0x%x, received: 0x%x)\n",
- dev_name(&card->read.ccwdev->dev),
+ dev_name(&channel->ccwdev->dev),
card->info.func_level, temp);
goto out;
}
@@ -2073,7 +2047,7 @@ out:
void qeth_prepare_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob)
{
- qeth_setup_ccw(&card->write, iob->data, len);
+ qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data);
iob->callback = qeth_release_buffer;
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
@@ -2120,6 +2094,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
unsigned long cb_cmd),
void *reply_param)
{
+ struct qeth_channel *channel = iob->channel;
int rc;
unsigned long flags;
struct qeth_reply *reply = NULL;
@@ -2129,7 +2104,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
QETH_CARD_TEXT(card, 2, "sendctl");
if (card->read_or_write_problem) {
- qeth_release_buffer(iob->channel, iob);
+ qeth_release_buffer(channel, iob);
return -EIO;
}
reply = qeth_alloc_reply(card);
@@ -2141,7 +2116,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
init_waitqueue_head(&reply->wait_q);
- while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
+ while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
if (IS_IPA(iob->data)) {
cmd = __ipa_cmd(iob);
@@ -2161,21 +2136,21 @@ int qeth_send_control_data(struct qeth_card *card, int len,
timeout = jiffies + event_timeout;
QETH_CARD_TEXT(card, 6, "noirqpnd");
- spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
- rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0, event_timeout);
- spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
"ccw_device_start rc = %i\n",
- dev_name(&card->write.ccwdev->dev), rc);
+ dev_name(&channel->ccwdev->dev), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
spin_lock_irqsave(&card->lock, flags);
list_del_init(&reply->list);
qeth_put_reply(reply);
spin_unlock_irqrestore(&card->lock, flags);
- qeth_release_buffer(iob->channel, iob);
- atomic_set(&card->write.irq_pending, 0);
+ qeth_release_buffer(channel, iob);
+ atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
return rc;
}
@@ -2282,19 +2257,42 @@ static int qeth_cm_setup(struct qeth_card *card)
}
-static int qeth_get_initial_mtu_for_card(struct qeth_card *card)
+static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
{
- switch (card->info.type) {
- case QETH_CARD_TYPE_IQD:
- return card->info.max_mtu;
- case QETH_CARD_TYPE_OSD:
- case QETH_CARD_TYPE_OSX:
- if (!card->options.layer2)
- return ETH_DATA_LEN - 8; /* L3: allow for LLC + SNAP */
- /* fall through */
- default:
- return ETH_DATA_LEN;
+ struct net_device *dev = card->dev;
+ unsigned int new_mtu;
+
+ if (!max_mtu) {
+ /* IQD needs accurate max MTU to set up its RX buffers: */
+ if (IS_IQD(card))
+ return -EINVAL;
+ /* tolerate quirky HW: */
+ max_mtu = ETH_MAX_MTU;
+ }
+
+ rtnl_lock();
+ if (IS_IQD(card)) {
+ /* move any device with default MTU to new max MTU: */
+ new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
+
+ /* adjust RX buffer size to new max MTU: */
+ card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
+ if (dev->max_mtu && dev->max_mtu != max_mtu)
+ qeth_free_qdio_buffers(card);
+ } else {
+ if (dev->mtu)
+ new_mtu = dev->mtu;
+ /* default MTUs for first setup: */
+ else if (card->options.layer2)
+ new_mtu = ETH_DATA_LEN;
+ else
+ new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
}
+
+ dev->max_mtu = max_mtu;
+ dev->mtu = min(new_mtu, max_mtu);
+ rtnl_unlock();
+ return 0;
}
static int qeth_get_mtu_outof_framesize(int framesize)
@@ -2313,21 +2311,6 @@ static int qeth_get_mtu_outof_framesize(int framesize)
}
}
-static int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
-{
- switch (card->info.type) {
- case QETH_CARD_TYPE_OSD:
- case QETH_CARD_TYPE_OSM:
- case QETH_CARD_TYPE_OSX:
- case QETH_CARD_TYPE_IQD:
- return ((mtu >= 576) &&
- (mtu <= card->info.max_mtu));
- case QETH_CARD_TYPE_OSN:
- default:
- return 1;
- }
-}
-
static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
@@ -2346,29 +2329,10 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
if (card->info.type == QETH_CARD_TYPE_IQD) {
memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
mtu = qeth_get_mtu_outof_framesize(framesize);
- if (!mtu) {
- iob->rc = -EINVAL;
- QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
- return 0;
- }
- if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
- /* frame size has changed */
- if (card->dev &&
- ((card->dev->mtu == card->info.initial_mtu) ||
- (card->dev->mtu > mtu)))
- card->dev->mtu = mtu;
- qeth_free_qdio_buffers(card);
- }
- card->info.initial_mtu = mtu;
- card->info.max_mtu = mtu;
- card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
} else {
- card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
- iob->data);
- card->info.initial_mtu = min(card->info.max_mtu,
- qeth_get_initial_mtu_for_card(card));
- card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
+ mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
}
+ *(u16 *)reply->param = mtu;
memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
@@ -2382,11 +2346,19 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
return 0;
}
+static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
+{
+ if (IS_OSN(card))
+ return QETH_PROT_OSN2;
+ return (card->options.layer2 == 1) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
+}
+
static int qeth_ulp_enable(struct qeth_card *card)
{
- int rc;
- char prot_type;
+ u8 prot_type = qeth_mpc_select_prot_type(card);
struct qeth_cmd_buffer *iob;
+ u16 max_mtu;
+ int rc;
/*FIXME: trace view callbacks*/
QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
@@ -2394,25 +2366,17 @@ static int qeth_ulp_enable(struct qeth_card *card)
iob = qeth_wait_for_buffer(&card->write);
memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
- *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
- (__u8) card->info.portno;
- if (card->options.layer2)
- if (card->info.type == QETH_CARD_TYPE_OSN)
- prot_type = QETH_PROT_OSN2;
- else
- prot_type = QETH_PROT_LAYER2;
- else
- prot_type = QETH_PROT_TCPIP;
-
+ *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
&card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
- qeth_ulp_enable_cb, NULL);
- return rc;
-
+ qeth_ulp_enable_cb, &max_mtu);
+ if (rc)
+ return rc;
+ return qeth_update_max_mtu(card, max_mtu);
}
static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -2467,32 +2431,20 @@ static int qeth_ulp_setup(struct qeth_card *card)
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
{
- int rc;
struct qeth_qdio_out_buffer *newbuf;
- rc = 0;
newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
- if (!newbuf) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!newbuf)
+ return -ENOMEM;
+
newbuf->buffer = q->qdio_bufs[bidx];
skb_queue_head_init(&newbuf->skb_list);
lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
newbuf->q = q;
- newbuf->aob = NULL;
newbuf->next_pending = q->bufs[bidx];
atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
q->bufs[bidx] = newbuf;
- if (q->bufstates) {
- q->bufstates[bidx].user = newbuf;
- QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx);
- QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf);
- QETH_CARD_TEXT_(q->card, 2, "%lx",
- (long) newbuf->next_pending);
- }
-out:
- return rc;
+ return 0;
}
static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
@@ -2902,8 +2854,7 @@ int qeth_init_qdio_queues(struct qeth_card *card)
QDIO_MAX_BUFFERS_PER_Q);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
qeth_clear_output_buffer(card->qdio.out_qs[i],
- card->qdio.out_qs[i]->bufs[j],
- QETH_QDIO_BUF_EMPTY);
+ card->qdio.out_qs[i]->bufs[j]);
}
card->qdio.out_qs[i]->card = card;
card->qdio.out_qs[i]->next_buf_to_fill = 0;
@@ -2936,7 +2887,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
/* cmd->hdr.seqno is set by qeth_send_control_data() */
cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
- cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
+ cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
if (card->options.layer2)
cmd->hdr.prim_version_no = 2;
else
@@ -2966,9 +2917,10 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- char prot_type)
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
{
+ u8 prot_type = qeth_mpc_select_prot_type(card);
+
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
@@ -2988,18 +2940,9 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
void *reply_param)
{
int rc;
- char prot_type;
QETH_CARD_TEXT(card, 4, "sendipa");
-
- if (card->options.layer2)
- if (card->info.type == QETH_CARD_TYPE_OSN)
- prot_type = QETH_PROT_OSN2;
- else
- prot_type = QETH_PROT_LAYER2;
- else
- prot_type = QETH_PROT_TCPIP;
- qeth_prepare_ipa_cmd(card, iob, prot_type);
+ qeth_prepare_ipa_cmd(card, iob);
rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
iob, reply_cb, reply_param);
if (rc == -ETIME) {
@@ -3070,7 +3013,7 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
return iob;
}
-int qeth_query_setadapterparms(struct qeth_card *card)
+static int qeth_query_setadapterparms(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -3083,7 +3026,6 @@ int qeth_query_setadapterparms(struct qeth_card *card)
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
static int qeth_query_ipassists_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -3123,7 +3065,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
return 0;
}
-int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
+static int qeth_query_ipassists(struct qeth_card *card,
+ enum qeth_prot_versions prot)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -3135,7 +3078,6 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_query_ipassists);
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -3174,7 +3116,6 @@ int qeth_query_switch_attributes(struct qeth_card *card,
return qeth_send_ipa_cmd(card, iob,
qeth_query_switch_attributes_cb, sw_info);
}
-EXPORT_SYMBOL_GPL(qeth_query_switch_attributes);
static int qeth_query_setdiagass_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -3524,13 +3465,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
+ atomic_add(count, &queue->used_buffers);
+
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
if (queue->card->options.performance_stats)
queue->card->perf_stats.outbound_do_qdio_time +=
qeth_get_micros() -
queue->card->perf_stats.outbound_do_qdio_start_time;
- atomic_add(count, &queue->used_buffers);
if (rc) {
queue->card->stats.tx_errors += count;
/* ignore temporary SIGA errors without busy condition */
@@ -3595,7 +3537,7 @@ static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
- if (card->dev && (card->dev->flags & IFF_UP))
+ if (card->dev->flags & IFF_UP)
napi_schedule(&card->napi);
}
@@ -3628,10 +3570,10 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_configure_cq);
-
-static void qeth_qdio_cq_handler(struct qeth_card *card,
- unsigned int qdio_err,
- unsigned int queue, int first_element, int count) {
+static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
+ unsigned int queue, int first_element,
+ int count)
+{
struct qeth_qdio_q *cq = card->qdio.c_q;
int i;
int rc;
@@ -3657,25 +3599,17 @@ static void qeth_qdio_cq_handler(struct qeth_card *card,
for (i = first_element; i < first_element + count; ++i) {
int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
- int e;
+ int e = 0;
- e = 0;
while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
buffer->element[e].addr) {
unsigned long phys_aob_addr;
phys_aob_addr = (unsigned long) buffer->element[e].addr;
qeth_qdio_handle_aob(card, phys_aob_addr);
- buffer->element[e].addr = NULL;
- buffer->element[e].eflags = 0;
- buffer->element[e].sflags = 0;
- buffer->element[e].length = 0;
-
++e;
}
-
- buffer->element[15].eflags = 0;
- buffer->element[15].sflags = 0;
+ qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
}
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
card->qdio.c_q->next_buf_to_init,
@@ -3754,11 +3688,11 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
qeth_notify_skbs(queue, buffer,
TX_NOTIFY_PENDING);
}
- buffer->aob = queue->bufstates[bidx].aob;
QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
- QETH_CARD_TEXT(queue->card, 5, "aob");
- QETH_CARD_TEXT_(queue->card, 5, "%lx",
- virt_to_phys(buffer->aob));
+
+ /* prepare the queue slot for re-use: */
+ qeth_scrub_qdio_buffer(buffer->buffer,
+ QETH_MAX_BUFFER_ELEMENTS(card));
if (qeth_init_qdio_out_buf(queue, bidx)) {
QETH_CARD_TEXT(card, 2, "outofbuf");
qeth_schedule_recovery(card);
@@ -3772,8 +3706,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
qeth_notify_skbs(queue, buffer, n);
}
- qeth_clear_output_buffer(queue, buffer,
- QETH_QDIO_BUF_EMPTY);
+ qeth_clear_output_buffer(queue, buffer);
}
qeth_cleanup_handled_pending(queue, bidx, 0);
}
@@ -3800,15 +3733,11 @@ static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
* Note: Function assumes that we have 4 outbound queues.
*/
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
- int ipv, int cast_type)
+ int ipv)
{
__be16 *tci;
u8 tos;
- if (cast_type && card->info.is_multicast_different)
- return card->info.is_multicast_different &
- (card->qdio.no_out_queues - 1);
-
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_TOS:
case QETH_PRIO_Q_ING_PREC:
@@ -3872,6 +3801,17 @@ int qeth_get_elements_for_frags(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
+static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset)
+{
+ unsigned int elements = qeth_get_elements_for_frags(skb);
+ addr_t end = (addr_t)skb->data + skb_headlen(skb);
+ addr_t start = (addr_t)skb->data + data_offset;
+
+ if (start != end)
+ elements += qeth_get_elements_for_range(start, end);
+ return elements;
+}
+
/**
* qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags.
* @card: qeth card structure, to check max. elems.
@@ -3887,12 +3827,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
int qeth_get_elements_no(struct qeth_card *card,
struct sk_buff *skb, int extra_elems, int data_offset)
{
- addr_t end = (addr_t)skb->data + skb_headlen(skb);
- int elements = qeth_get_elements_for_frags(skb);
- addr_t start = (addr_t)skb->data + data_offset;
-
- if (start != end)
- elements += qeth_get_elements_for_range(start, end);
+ int elements = qeth_count_elements(skb, data_offset);
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
@@ -3926,32 +3861,87 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
/**
- * qeth_push_hdr() - push a qeth_hdr onto an skb.
- * @skb: skb that the qeth_hdr should be pushed onto.
+ * qeth_add_hw_header() - add a HW header to an skb.
+ * @skb: skb that the HW header should be added to.
* @hdr: double pointer to a qeth_hdr. When returning with >= 0,
* it contains a valid pointer to a qeth_hdr.
- * @len: length of the hdr that needs to be pushed on.
+ * @hdr_len: length of the HW header.
+ * @proto_len: length of protocol headers that need to be in same page as the
+ * HW header.
*
* Returns the pushed length. If the header can't be pushed on
* (eg. because it would cross a page boundary), it is allocated from
* the cache instead and 0 is returned.
+ * The number of needed buffer elements is returned in @elements.
* Error to create the hdr is indicated by returning with < 0.
*/
-int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len)
-{
- if (skb_headroom(skb) >= len &&
- qeth_get_elements_for_range((addr_t)skb->data - len,
- (addr_t)skb->data) == 1) {
- *hdr = skb_push(skb, len);
- return len;
+int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr **hdr, unsigned int hdr_len,
+ unsigned int proto_len, unsigned int *elements)
+{
+ const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+ const unsigned int contiguous = proto_len ? proto_len : 1;
+ unsigned int __elements;
+ addr_t start, end;
+ bool push_ok;
+ int rc;
+
+check_layout:
+ start = (addr_t)skb->data - hdr_len;
+ end = (addr_t)skb->data;
+
+ if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
+ /* Push HW header into same page as first protocol header. */
+ push_ok = true;
+ __elements = qeth_count_elements(skb, 0);
+ } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
+ /* Push HW header into a new page. */
+ push_ok = true;
+ __elements = 1 + qeth_count_elements(skb, 0);
+ } else {
+ /* Use header cache, copy protocol headers up. */
+ push_ok = false;
+ __elements = 1 + qeth_count_elements(skb, proto_len);
+ }
+
+ /* Compress skb to fit into one IO buffer: */
+ if (__elements > max_elements) {
+ if (!skb_is_nonlinear(skb)) {
+ /* Drop it, no easy way of shrinking it further. */
+ QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
+ max_elements, __elements, skb->len);
+ return -E2BIG;
+ }
+
+ rc = skb_linearize(skb);
+ if (card->options.performance_stats) {
+ if (rc)
+ card->perf_stats.tx_linfail++;
+ else
+ card->perf_stats.tx_lin++;
+ }
+ if (rc)
+ return rc;
+
+ /* Linearization changed the layout, re-evaluate: */
+ goto check_layout;
+ }
+
+ *elements = __elements;
+ /* Add the header: */
+ if (push_ok) {
+ *hdr = skb_push(skb, hdr_len);
+ return hdr_len;
}
/* fall back */
*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!*hdr)
return -ENOMEM;
+ /* Copy protocol headers behind HW header: */
+ skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
return 0;
}
-EXPORT_SYMBOL_GPL(qeth_push_hdr);
+EXPORT_SYMBOL_GPL(qeth_add_hw_header);
static void __qeth_fill_buffer(struct sk_buff *skb,
struct qeth_qdio_out_buffer *buf,
@@ -4231,24 +4221,6 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
-int qeth_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct qeth_card *card;
- char dbf_text[15];
-
- card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 4, "chgmtu");
- sprintf(dbf_text, "%8x", new_mtu);
- QETH_CARD_TEXT(card, 4, dbf_text);
-
- if (!qeth_mtu_is_valid(card, new_mtu))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-EXPORT_SYMBOL_GPL(qeth_change_mtu);
-
struct net_device_stats *qeth_get_stats(struct net_device *dev)
{
struct qeth_card *card;
@@ -4824,9 +4796,6 @@ int qeth_vm_request_mac(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
- if (!card->dev)
- return -ENODEV;
-
request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
if (!request || !response) {
@@ -4834,7 +4803,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
goto out;
}
- ccw_device_get_id(CARD_RDEV(card), &id);
+ ccw_device_get_id(CARD_DDEV(card), &id);
request->resp_buf_len = sizeof(*response);
request->resp_version = DIAG26C_VERSION2;
request->op_code = DIAG26C_GET_MAC;
@@ -5067,11 +5036,11 @@ out_free_nothing:
static void qeth_core_free_card(struct qeth_card *card)
{
-
QETH_DBF_TEXT(SETUP, 2, "freecrd");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
+ qeth_clean_channel(&card->data);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
kfree(card);
@@ -5706,6 +5675,53 @@ static void qeth_clear_dbf_list(void)
mutex_unlock(&qeth_dbf_list_mutex);
}
+static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
+{
+ struct net_device *dev;
+
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_IQD:
+ dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
+ break;
+ case QETH_CARD_TYPE_OSN:
+ dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
+ break;
+ default:
+ dev = alloc_etherdev(0);
+ }
+
+ if (!dev)
+ return NULL;
+
+ dev->ml_priv = card;
+ dev->watchdog_timeo = QETH_TX_TIMEOUT;
+ dev->min_mtu = IS_OSN(card) ? 64 : 576;
+ /* initialized when device first goes online: */
+ dev->max_mtu = 0;
+ dev->mtu = 0;
+ SET_NETDEV_DEV(dev, &card->gdev->dev);
+ netif_carrier_off(dev);
+
+ if (!IS_OSN(card)) {
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->hw_features |= NETIF_F_SG;
+ dev->vlan_features |= NETIF_F_SG;
+ }
+
+ return dev;
+}
+
+struct net_device *qeth_clone_netdev(struct net_device *orig)
+{
+ struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
+
+ if (!clone)
+ return NULL;
+
+ clone->dev_port = orig->dev_port;
+ return clone;
+}
+
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
@@ -5748,12 +5764,12 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
gdev->cdev[1]->handler = qeth_irq;
gdev->cdev[2]->handler = qeth_irq;
- qeth_determine_card_type(card);
- rc = qeth_setup_card(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ qeth_setup_card(card);
+ qeth_update_from_chp_desc(card);
+
+ card->dev = qeth_alloc_netdev(card);
+ if (!card->dev)
goto err_card;
- }
qeth_determine_capabilities(card);
enforced_disc = qeth_enforce_discipline(card);
@@ -5765,7 +5781,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
card->info.layer_enforced = true;
rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
- goto err_card;
+ goto err_load;
gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
? card->discipline->devtype
@@ -5783,6 +5799,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
err_disc:
qeth_core_free_discipline(card);
+err_load:
+ free_netdev(card->dev);
err_card:
qeth_core_free_card(card);
err_dev:
@@ -5805,10 +5823,10 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_del(&card->list);
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+ free_netdev(card->dev);
qeth_core_free_card(card);
dev_set_drvdata(&gdev->dev, NULL);
put_device(&gdev->dev);
- return;
}
static int qeth_core_set_online(struct ccwgroup_device *gdev)
@@ -5877,31 +5895,13 @@ static int qeth_core_restore(struct ccwgroup_device *gdev)
return 0;
}
-static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "qeth",
- },
- .ccw_driver = &qeth_ccw_driver,
- .setup = qeth_core_probe_device,
- .remove = qeth_core_remove_device,
- .set_online = qeth_core_set_online,
- .set_offline = qeth_core_set_offline,
- .shutdown = qeth_core_shutdown,
- .prepare = NULL,
- .complete = NULL,
- .freeze = qeth_core_freeze,
- .thaw = qeth_core_thaw,
- .restore = qeth_core_restore,
-};
-
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
size_t count)
{
int err;
- err = ccwgroup_create_dev(qeth_core_root_dev,
- &qeth_core_ccwgroup_driver, 3, buf);
+ err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
+ buf);
return err ? err : count;
}
@@ -5919,6 +5919,25 @@ static const struct attribute_group *qeth_drv_attr_groups[] = {
NULL,
};
+static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
+ .driver = {
+ .groups = qeth_drv_attr_groups,
+ .owner = THIS_MODULE,
+ .name = "qeth",
+ },
+ .ccw_driver = &qeth_ccw_driver,
+ .setup = qeth_core_probe_device,
+ .remove = qeth_core_remove_device,
+ .set_online = qeth_core_set_online,
+ .set_offline = qeth_core_set_offline,
+ .shutdown = qeth_core_shutdown,
+ .prepare = NULL,
+ .complete = NULL,
+ .freeze = qeth_core_freeze,
+ .thaw = qeth_core_thaw,
+ .restore = qeth_core_restore,
+};
+
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct qeth_card *card = dev->ml_priv;
@@ -5985,7 +6004,7 @@ static struct {
{"tx skbs packing"},
{"tx buffers packing"},
{"tx sg skbs"},
- {"tx sg frags"},
+ {"tx buffer elements"},
/* 10 */{"rx sg skbs"},
{"rx sg frags"},
{"rx sg page allocs"},
@@ -6044,7 +6063,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
data[6] = card->perf_stats.skbs_sent_pack;
data[7] = card->perf_stats.bufs_sent_pack;
data[8] = card->perf_stats.sg_skbs_sent;
- data[9] = card->perf_stats.sg_frags_sent;
+ data[9] = card->perf_stats.buf_elements_sent;
data[10] = card->perf_stats.sg_skbs_rx;
data[11] = card->perf_stats.sg_frags_rx;
data[12] = card->perf_stats.sg_alloc_page_rx;
@@ -6459,28 +6478,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
NETIF_F_IPV6_CSUM)
/**
- * qeth_recover_features() - Restore device features after recovery
- * @dev: the recovering net_device
- *
- * Caller must hold rtnl lock.
+ * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
+ * @dev: a net_device
*/
-void qeth_recover_features(struct net_device *dev)
+void qeth_enable_hw_features(struct net_device *dev)
{
- netdev_features_t features = dev->features;
struct qeth_card *card = dev->ml_priv;
+ netdev_features_t features;
+ rtnl_lock();
+ features = dev->features;
/* force-off any feature that needs an IPA sequence.
* netdev_update_features() will restart them.
*/
dev->features &= ~QETH_HW_FEATURES;
netdev_update_features(dev);
-
- if (features == dev->features)
- return;
- dev_warn(&card->gdev->dev,
- "Device recovery failed to restore all offload features\n");
+ if (features != dev->features)
+ dev_warn(&card->gdev->dev,
+ "Device recovery failed to restore all offload features\n");
+ rtnl_unlock();
}
-EXPORT_SYMBOL_GPL(qeth_recover_features);
+EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
@@ -6611,7 +6629,6 @@ static int __init qeth_core_init(void)
rc = ccw_driver_register(&qeth_ccw_driver);
if (rc)
goto ccw_err;
- qeth_core_ccwgroup_driver.driver.groups = qeth_drv_attr_groups;
rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
if (rc)
goto ccwgroup_err;
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 22428b769f9b..5bcb8dafc3ee 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -146,17 +146,6 @@ unsigned char IPA_PDU_HEADER[] = {
};
EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
-unsigned char WRITE_CCW[] = {
- 0x01, CCW_FLAG_SLI, 0, 0,
- 0, 0, 0, 0
-};
-
-unsigned char READ_CCW[] = {
- 0x02, CCW_FLAG_SLI, 0, 0,
- 0, 0, 0, 0
-};
-
-
struct ipa_rc_msg {
enum qeth_ipa_return_codes rc;
char *msg;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 878e62f35169..aa8b9196b089 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -64,6 +64,9 @@ enum qeth_card_types {
QETH_CARD_TYPE_OSX = 2,
};
+#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD)
+#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
+
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
/* only the first two bytes are looked at in qeth_get_cardname_short */
enum qeth_link_types {
@@ -815,10 +818,6 @@ extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
/* END OF IP Assist related definitions */
/*****************************************************************************/
-
-extern unsigned char WRITE_CCW[];
-extern unsigned char READ_CCW[];
-
extern unsigned char CM_ENABLE[];
#define CM_ENABLE_SIZE 0x63
#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c)
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index c3f18afb368b..25d0be25bcb3 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -112,7 +112,7 @@ static ssize_t qeth_dev_portno_show(struct device *dev,
if (!card)
return -EINVAL;
- return sprintf(buf, "%i\n", card->info.portno);
+ return sprintf(buf, "%i\n", card->dev->dev_port);
}
static ssize_t qeth_dev_portno_store(struct device *dev,
@@ -143,9 +143,7 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
rc = -EINVAL;
goto out;
}
- card->info.portno = portno;
- if (card->dev)
- card->dev->dev_port = portno;
+ card->dev->dev_port = portno;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -388,6 +386,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ struct net_device *ndev;
char *tmp;
int i, rc = 0;
enum qeth_discipline_id newdis;
@@ -424,8 +423,19 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
card->info.mac_bits = 0;
if (card->discipline) {
+ /* start with a new, pristine netdevice: */
+ ndev = qeth_clone_netdev(card->dev);
+ if (!ndev) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
card->discipline->remove(card->gdev);
qeth_core_free_discipline(card);
+ card->options.layer2 = -1;
+
+ free_netdev(card->dev);
+ card->dev = ndev;
}
rc = qeth_core_load_discipline(card, newdis);
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index f2130051ca11..ddc615b431a8 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -14,6 +14,11 @@ extern const struct attribute_group *qeth_l2_attr_groups[];
int qeth_l2_create_device_attributes(struct device *);
void qeth_l2_remove_device_attributes(struct device *);
void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
+int qeth_bridgeport_query_ports(struct qeth_card *card,
+ enum qeth_sbp_roles *role,
+ enum qeth_sbp_states *state);
+int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
+int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state);
int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a7cb37da6a21..710fa74892ae 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -26,7 +26,6 @@
static int qeth_l2_set_offline(struct ccwgroup_device *);
static int qeth_l2_stop(struct net_device *);
-static void qeth_l2_set_rx_mode(struct net_device *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -140,7 +139,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
{
- enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+ enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
int rc;
@@ -157,7 +156,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
{
- enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+ enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
int rc;
@@ -186,12 +185,12 @@ static void qeth_l2_del_all_macs(struct qeth_card *card)
static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
{
if (card->info.type == QETH_CARD_TYPE_OSN)
- return RTN_UNSPEC;
+ return RTN_UNICAST;
if (is_broadcast_ether_addr(skb->data))
return RTN_BROADCAST;
if (is_multicast_ether_addr(skb->data))
return RTN_MULTICAST;
- return RTN_UNSPEC;
+ return RTN_UNICAST;
}
static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
@@ -344,7 +343,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
kfree(tmpid);
}
- qeth_l2_set_rx_mode(card->dev);
return rc;
}
@@ -501,27 +499,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -ERESTARTSYS;
}
+ /* avoid racing against concurrent state change: */
+ if (!mutex_trylock(&card->conf_mutex))
+ return -EAGAIN;
+
if (!qeth_card_hw_is_reachable(card)) {
ether_addr_copy(dev->dev_addr, addr->sa_data);
- return 0;
+ goto out_unlock;
}
/* don't register the same address twice */
if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
- return 0;
+ goto out_unlock;
/* add the new address, switch over, drop the old */
rc = qeth_l2_send_setmac(card, addr->sa_data);
if (rc)
- return rc;
+ goto out_unlock;
ether_addr_copy(old_addr, dev->dev_addr);
ether_addr_copy(dev->dev_addr, addr->sa_data);
if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
qeth_l2_remove_mac(card, old_addr);
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- return 0;
+
+out_unlock:
+ mutex_unlock(&card->conf_mutex);
+ return rc;
}
static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -636,97 +641,58 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
qeth_promisc_to_bridge(card);
}
-static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int cast_type)
+static int qeth_l2_xmit(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue, int cast_type, int ipv)
{
- unsigned int data_offset = ETH_HLEN;
- struct qeth_hdr *hdr;
- int rc;
-
- hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
- if (!hdr)
- return -ENOMEM;
- qeth_l2_fill_header(hdr, skb, cast_type, skb->len);
- skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr),
- data_offset);
-
- if (!qeth_get_elements_no(card, skb, 1, data_offset)) {
- rc = -E2BIG;
- goto out;
- }
- rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
- sizeof(*hdr) + data_offset);
-out:
- if (rc)
- kmem_cache_free(qeth_core_header_cache, hdr);
- return rc;
-}
-
-static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int cast_type,
- int ipv)
-{
- int push_len = sizeof(struct qeth_hdr);
- unsigned int elements, nr_frags;
- unsigned int hdr_elements = 0;
+ const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0;
+ const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
+ unsigned int frame_len = skb->len;
+ unsigned int data_offset = 0;
struct qeth_hdr *hdr = NULL;
unsigned int hd_len = 0;
- int rc;
-
- /* fix hardware limitation: as long as we do not have sbal
- * chaining we can not send long frag lists
- */
- if (!qeth_get_elements_no(card, skb, 0, 0)) {
- rc = skb_linearize(skb);
-
- if (card->options.performance_stats) {
- if (rc)
- card->perf_stats.tx_linfail++;
- else
- card->perf_stats.tx_lin++;
- }
- if (rc)
- return rc;
- }
- nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int elements;
+ int push_len, rc;
+ bool is_sg;
- rc = skb_cow_head(skb, push_len);
+ rc = skb_cow_head(skb, hw_hdr_len);
if (rc)
return rc;
- push_len = qeth_push_hdr(skb, &hdr, push_len);
+
+ push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
+ &elements);
if (push_len < 0)
return push_len;
if (!push_len) {
- /* hdr was allocated from cache */
- hd_len = sizeof(*hdr);
- hdr_elements = 1;
+ /* HW header needs its own buffer element. */
+ hd_len = hw_hdr_len + proto_len;
+ data_offset = proto_len;
}
- qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len);
+ qeth_l2_fill_header(hdr, skb, cast_type, frame_len);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
if (card->options.performance_stats)
card->perf_stats.tx_csum++;
}
- elements = qeth_get_elements_no(card, skb, hdr_elements, 0);
- if (!elements) {
- rc = -E2BIG;
- goto out;
+ is_sg = skb_is_nonlinear(skb);
+ if (IS_IQD(card)) {
+ rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
+ hd_len);
+ } else {
+ /* TODO: drop skb_orphan() once TX completion is fast enough */
+ skb_orphan(skb);
+ rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
+ hd_len, elements);
}
- elements += hdr_elements;
- /* TODO: remove the skb_orphan() once TX completion is fast enough */
- skb_orphan(skb);
- rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements);
-out:
if (!rc) {
- if (card->options.performance_stats && nr_frags) {
- card->perf_stats.sg_skbs_sent++;
- /* nr_frags + skb->data */
- card->perf_stats.sg_frags_sent += nr_frags + 1;
+ if (card->options.performance_stats) {
+ card->perf_stats.buf_elements_sent += elements;
+ if (is_sg)
+ card->perf_stats.sg_skbs_sent++;
}
} else {
- if (hd_len)
+ if (!push_len)
kmem_cache_free(qeth_core_header_cache, hdr);
if (rc == -EBUSY)
/* roll back to ETH header */
@@ -763,34 +729,23 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
int tx_bytes = skb->len;
int rc;
- if (card->qdio.do_prio_queueing || (cast_type &&
- card->info.is_multicast_different))
- queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
- ipv, cast_type)];
- else
- queue = card->qdio.out_qs[card->qdio.default_out_queue];
-
if ((card->state != CARD_STATE_UP) || !card->lan_online) {
card->stats.tx_carrier_errors++;
goto tx_drop;
}
+ queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+
if (card->options.performance_stats) {
card->perf_stats.outbound_cnt++;
card->perf_stats.outbound_start_time = qeth_get_micros();
}
netif_stop_queue(dev);
- switch (card->info.type) {
- case QETH_CARD_TYPE_OSN:
+ if (IS_OSN(card))
rc = qeth_l2_xmit_osn(card, skb, queue);
- break;
- case QETH_CARD_TYPE_IQD:
- rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type);
- break;
- default:
- rc = qeth_l2_xmit_osa(card, skb, queue, cast_type, ipv);
- }
+ else
+ rc = qeth_l2_xmit(card, skb, queue, cast_type, ipv);
if (!rc) {
card->stats.tx_packets++;
@@ -899,13 +854,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l2_set_offline(cgdev);
-
- if (card->dev) {
- unregister_netdev(card->dev);
- free_netdev(card->dev);
- card->dev = NULL;
- }
- return;
+ unregister_netdev(card->dev);
}
static const struct ethtool_ops qeth_l2_ethtool_ops = {
@@ -934,7 +883,6 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
.ndo_set_mac_address = qeth_l2_set_mac_address,
- .ndo_change_mtu = qeth_change_mtu,
.ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
@@ -944,35 +892,19 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
static int qeth_l2_setup_netdev(struct qeth_card *card)
{
- switch (card->info.type) {
- case QETH_CARD_TYPE_IQD:
- card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN,
- ether_setup);
- break;
- case QETH_CARD_TYPE_OSN:
- card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
- ether_setup);
- break;
- default:
- card->dev = alloc_etherdev(0);
- }
+ int rc;
- if (!card->dev)
- return -ENODEV;
+ if (card->dev->netdev_ops)
+ return 0;
- card->dev->ml_priv = card;
card->dev->priv_flags |= IFF_UNICAST_FLT;
- card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
- card->dev->mtu = card->info.initial_mtu;
- card->dev->min_mtu = 64;
- card->dev->max_mtu = ETH_MAX_MTU;
- card->dev->dev_port = card->info.portno;
card->dev->netdev_ops = &qeth_l2_netdev_ops;
if (card->info.type == QETH_CARD_TYPE_OSN) {
card->dev->ethtool_ops = &qeth_l2_osn_ops;
card->dev->flags |= IFF_NOARP;
} else {
card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
+ card->dev->needed_headroom = sizeof(struct qeth_hdr);
}
if (card->info.type == QETH_CARD_TYPE_OSM)
@@ -980,14 +912,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
else
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
- if (card->info.type != QETH_CARD_TYPE_OSN &&
- card->info.type != QETH_CARD_TYPE_IQD) {
- card->dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- card->dev->needed_headroom = sizeof(struct qeth_hdr);
- card->dev->hw_features |= NETIF_F_SG;
- card->dev->vlan_features |= NETIF_F_SG;
- }
-
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->features |= NETIF_F_SG;
/* OSA 3S and earlier has no RX/TX support */
@@ -1006,12 +930,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->vlan_features |= NETIF_F_RXCSUM;
}
- card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
- SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
- netif_carrier_off(card->dev);
- return register_netdev(card->dev);
+ rc = register_netdev(card->dev);
+ if (rc)
+ card->dev->netdev_ops = NULL;
+ return rc;
}
static int qeth_l2_start_ipassists(struct qeth_card *card)
@@ -1057,10 +981,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
- if (!card->dev && qeth_l2_setup_netdev(card)) {
- rc = -ENODEV;
+ rc = qeth_l2_setup_netdev(card);
+ if (rc)
goto out_remove;
- }
if (card->info.type != QETH_CARD_TYPE_OSN &&
!qeth_l2_send_setmac(card, card->dev->dev_addr))
@@ -1112,20 +1035,18 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
netif_carrier_off(card->dev);
qeth_set_allowed_threads(card, 0xffffffff, 0);
+
+ qeth_enable_hw_features(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
if (recovery_mode &&
card->info.type != QETH_CARD_TYPE_OSN) {
__qeth_l2_open(card->dev);
+ qeth_l2_set_rx_mode(card->dev);
} else {
rtnl_lock();
dev_open(card->dev);
rtnl_unlock();
}
- /* this also sets saved unicast addresses */
- qeth_l2_set_rx_mode(card->dev);
- rtnl_lock();
- qeth_recover_features(card->dev);
- rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -1165,8 +1086,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
- if (card->dev && netif_carrier_ok(card->dev))
- netif_carrier_off(card->dev);
+ netif_carrier_off(card->dev);
recover_flag = card->state;
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -1239,8 +1159,7 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- if (card->dev)
- netif_device_detach(card->dev);
+ netif_device_detach(card->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_OFFLINE)
@@ -1273,8 +1192,7 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
rc = __qeth_l2_set_online(card->gdev, 0);
out:
qeth_set_allowed_threads(card, 0xffffffff, 0);
- if (card->dev)
- netif_device_attach(card->dev);
+ netif_device_attach(card->dev);
if (rc)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
@@ -1321,25 +1239,26 @@ EXPORT_SYMBOL_GPL(qeth_l2_discipline);
static int qeth_osn_send_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob)
{
+ struct qeth_channel *channel = iob->channel;
unsigned long flags;
int rc = 0;
QETH_CARD_TEXT(card, 5, "osndctrd");
wait_event(card->wait_q,
- atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
+ atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
qeth_prepare_control_data(card, len, iob);
QETH_CARD_TEXT(card, 6, "osnoirqp");
- spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
- rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
- spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
"ccw_device_start rc = %i\n", rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
- qeth_release_buffer(iob->channel, iob);
- atomic_set(&card->write.irq_pending, 0);
+ qeth_release_buffer(channel, iob);
+ atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
}
return rc;
@@ -1352,7 +1271,7 @@ static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "osndipa");
- qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
+ qeth_prepare_ipa_cmd(card, iob);
s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
s2 = (u16)data_len;
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
@@ -1871,7 +1790,6 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
return rc;
return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
}
-EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
static int qeth_bridgeport_set_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -2019,7 +1937,6 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL);
return qeth_anset_makerc(card, rc, response);
}
-EXPORT_SYMBOL_GPL(qeth_bridgeport_an_set);
static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
{
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e7fa479adf47..7175086677fb 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -117,9 +117,9 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
int rc = 0;
if (!card->ipato.enabled)
- return 0;
+ return false;
if (addr->type != QETH_IP_TYPE_NORMAL)
- return 0;
+ return false;
qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
(addr->proto == QETH_PROT_IPV4)? 4:16);
@@ -1978,17 +1978,17 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
(cast_type == RTN_MULTICAST) ||
(cast_type == RTN_ANYCAST))
return cast_type;
- return RTN_UNSPEC;
+ return RTN_UNICAST;
}
rcu_read_unlock();
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
- RTN_MULTICAST : RTN_UNSPEC;
+ RTN_MULTICAST : RTN_UNICAST;
else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
- RTN_MULTICAST : RTN_UNSPEC;
+ RTN_MULTICAST : RTN_UNICAST;
/* ... and MAC address */
if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
@@ -1997,22 +1997,21 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
return RTN_MULTICAST;
/* default to unicast */
- return RTN_UNSPEC;
+ return RTN_UNICAST;
}
-static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
- struct qeth_hdr *hdr, struct sk_buff *skb)
+static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
+ unsigned int data_len)
{
char daddr[16];
struct af_iucv_trans_hdr *iucv_hdr;
memset(hdr, 0, sizeof(struct qeth_hdr));
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
- hdr->hdr.l3.ext_flags = 0;
- hdr->hdr.l3.length = skb->len - ETH_HLEN;
+ hdr->hdr.l3.length = data_len;
hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
- iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
+ iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN);
memset(daddr, 0, sizeof(daddr));
daddr[0] = 0xfe;
daddr[1] = 0x80;
@@ -2051,6 +2050,12 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
}
+ if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) {
+ qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+ }
+
/* OSA only: */
if (!ipv) {
hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
@@ -2156,106 +2161,121 @@ static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
return elements;
}
-static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue, int ipv,
+ int cast_type)
{
- int rc;
- __be16 *tag;
+ const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
+ unsigned int frame_len, elements;
+ unsigned char eth_hdr[ETH_HLEN];
struct qeth_hdr *hdr = NULL;
- int hdr_elements = 0;
- int elements;
- struct qeth_card *card = dev->ml_priv;
- struct sk_buff *new_skb = NULL;
- int ipv = qeth_get_ip_version(skb);
- int cast_type = qeth_l3_get_cast_type(skb);
- struct qeth_qdio_out_q *queue =
- card->qdio.out_qs[card->qdio.do_prio_queueing
- || (cast_type && card->info.is_multicast_different) ?
- qeth_get_priority_queue(card, skb, ipv, cast_type) :
- card->qdio.default_out_queue];
- int tx_bytes = skb->len;
unsigned int hd_len = 0;
- bool use_tso;
- int data_offset = -1;
- unsigned int nr_frags;
-
- if (((card->info.type == QETH_CARD_TYPE_IQD) &&
- (((card->options.cq != QETH_CQ_ENABLED) && !ipv) ||
- ((card->options.cq == QETH_CQ_ENABLED) &&
- (be16_to_cpu(skb->protocol) != ETH_P_AF_IUCV)))) ||
- card->options.sniffer)
- goto tx_drop;
+ int push_len, rc;
+ bool is_sg;
- if ((card->state != CARD_STATE_UP) || !card->lan_online) {
- card->stats.tx_carrier_errors++;
- goto tx_drop;
+ /* re-use the L2 header area for the HW header: */
+ rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
+ if (rc)
+ return rc;
+ skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
+ skb_pull(skb, ETH_HLEN);
+ frame_len = skb->len;
+
+ push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0,
+ &elements);
+ if (push_len < 0)
+ return push_len;
+ if (!push_len) {
+ /* hdr was added discontiguous from skb->data */
+ hd_len = hw_hdr_len;
}
- if ((cast_type == RTN_BROADCAST) &&
- (card->info.broadcast_capable == 0))
- goto tx_drop;
+ if (skb->protocol == htons(ETH_P_AF_IUCV))
+ qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len);
+ else
+ qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
- if (card->options.performance_stats) {
- card->perf_stats.outbound_cnt++;
- card->perf_stats.outbound_start_time = qeth_get_micros();
+ is_sg = skb_is_nonlinear(skb);
+ if (IS_IQD(card)) {
+ rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len);
+ } else {
+ /* TODO: drop skb_orphan() once TX completion is fast enough */
+ skb_orphan(skb);
+ rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len,
+ elements);
}
+ if (!rc) {
+ if (card->options.performance_stats) {
+ card->perf_stats.buf_elements_sent += elements;
+ if (is_sg)
+ card->perf_stats.sg_skbs_sent++;
+ }
+ } else {
+ if (!push_len)
+ kmem_cache_free(qeth_core_header_cache, hdr);
+ if (rc == -EBUSY) {
+ /* roll back to ETH header */
+ skb_pull(skb, push_len);
+ skb_push(skb, ETH_HLEN);
+ skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
+ }
+ }
+ return rc;
+}
+
+static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue, int ipv, int cast_type)
+{
+ int elements, len, rc;
+ __be16 *tag;
+ struct qeth_hdr *hdr = NULL;
+ int hdr_elements = 0;
+ struct sk_buff *new_skb = NULL;
+ int tx_bytes = skb->len;
+ unsigned int hd_len;
+ bool use_tso, is_sg;
+
/* Ignore segment size from skb_is_gso(), 1 page is always used. */
use_tso = skb_is_gso(skb) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
- if (card->info.type == QETH_CARD_TYPE_IQD) {
- new_skb = skb;
- data_offset = ETH_HLEN;
- hd_len = sizeof(*hdr);
- hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
- if (!hdr)
- goto tx_drop;
- hdr_elements++;
- } else {
- /* create a clone with writeable headroom */
- new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso)
- + VLAN_HLEN);
- if (!new_skb)
- goto tx_drop;
-
- if (ipv == 4) {
- skb_pull(new_skb, ETH_HLEN);
- }
+ /* create a clone with writeable headroom */
+ new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
+ VLAN_HLEN);
+ if (!new_skb)
+ return -ENOMEM;
- if (ipv != 4 && skb_vlan_tag_present(new_skb)) {
- skb_push(new_skb, VLAN_HLEN);
- skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
- skb_copy_to_linear_data_offset(new_skb, 4,
- new_skb->data + 8, 4);
- skb_copy_to_linear_data_offset(new_skb, 8,
- new_skb->data + 12, 4);
- tag = (__be16 *)(new_skb->data + 12);
- *tag = cpu_to_be16(ETH_P_8021Q);
- *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
- }
+ if (ipv == 4) {
+ skb_pull(new_skb, ETH_HLEN);
+ } else if (skb_vlan_tag_present(new_skb)) {
+ skb_push(new_skb, VLAN_HLEN);
+ skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
+ skb_copy_to_linear_data_offset(new_skb, 4,
+ new_skb->data + 8, 4);
+ skb_copy_to_linear_data_offset(new_skb, 8,
+ new_skb->data + 12, 4);
+ tag = (__be16 *)(new_skb->data + 12);
+ *tag = cpu_to_be16(ETH_P_8021Q);
+ *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
}
- netif_stop_queue(dev);
-
/* fix hardware limitation: as long as we do not have sbal
* chaining we can not send long frag lists
*/
- if ((card->info.type != QETH_CARD_TYPE_IQD) &&
- ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
- (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
- int lin_rc = skb_linearize(new_skb);
+ if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
+ (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) {
+ rc = skb_linearize(new_skb);
if (card->options.performance_stats) {
- if (lin_rc)
+ if (rc)
card->perf_stats.tx_linfail++;
else
card->perf_stats.tx_lin++;
}
- if (lin_rc)
- goto tx_drop;
+ if (rc)
+ goto out;
}
- nr_frags = skb_shinfo(new_skb)->nr_frags;
if (use_tso) {
hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
@@ -2265,97 +2285,112 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
qeth_tso_fill_header(card, hdr, new_skb);
hdr_elements++;
} else {
- if (data_offset < 0) {
- hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
- qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
- new_skb->len -
- sizeof(struct qeth_hdr));
- } else {
- if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV)
- qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
- else {
- qeth_l3_fill_header(card, hdr, new_skb, ipv,
- cast_type,
- new_skb->len - data_offset);
- }
- }
-
- if (new_skb->ip_summed == CHECKSUM_PARTIAL) {
- qeth_tx_csum(new_skb, &hdr->hdr.l3.ext_flags, ipv);
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
- }
+ hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
+ qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+ new_skb->len - sizeof(struct qeth_hdr));
}
elements = use_tso ?
qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
- qeth_get_elements_no(card, new_skb, hdr_elements,
- (data_offset > 0) ? data_offset : 0);
+ qeth_get_elements_no(card, new_skb, hdr_elements, 0);
if (!elements) {
- if (data_offset >= 0)
- kmem_cache_free(qeth_core_header_cache, hdr);
- goto tx_drop;
+ rc = -E2BIG;
+ goto out;
}
elements += hdr_elements;
- if (card->info.type != QETH_CARD_TYPE_IQD) {
- int len;
- if (use_tso) {
- hd_len = sizeof(struct qeth_hdr_tso) +
- ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
- len = hd_len;
- } else {
- len = sizeof(struct qeth_hdr_layer3);
- }
+ if (use_tso) {
+ hd_len = sizeof(struct qeth_hdr_tso) +
+ ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
+ len = hd_len;
+ } else {
+ hd_len = 0;
+ len = sizeof(struct qeth_hdr_layer3);
+ }
- if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len))
- goto tx_drop;
- rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len,
- hd_len, elements);
- } else
- rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset,
- hd_len);
+ if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ is_sg = skb_is_nonlinear(new_skb);
+ rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len,
+ elements);
+out:
if (!rc) {
- card->stats.tx_packets++;
- card->stats.tx_bytes += tx_bytes;
if (new_skb != skb)
dev_kfree_skb_any(skb);
if (card->options.performance_stats) {
+ card->perf_stats.buf_elements_sent += elements;
+ if (is_sg)
+ card->perf_stats.sg_skbs_sent++;
if (use_tso) {
card->perf_stats.large_send_bytes += tx_bytes;
card->perf_stats.large_send_cnt++;
}
- if (nr_frags) {
- card->perf_stats.sg_skbs_sent++;
- /* nr_frags + skb->data */
- card->perf_stats.sg_frags_sent += nr_frags + 1;
- }
}
- rc = NETDEV_TX_OK;
} else {
- if (data_offset >= 0)
- kmem_cache_free(qeth_core_header_cache, hdr);
+ if (new_skb != skb)
+ dev_kfree_skb_any(new_skb);
+ }
+ return rc;
+}
- if (rc == -EBUSY) {
- if (new_skb != skb)
- dev_kfree_skb_any(new_skb);
- return NETDEV_TX_BUSY;
- } else
+static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int cast_type = qeth_l3_get_cast_type(skb);
+ struct qeth_card *card = dev->ml_priv;
+ int ipv = qeth_get_ip_version(skb);
+ struct qeth_qdio_out_q *queue;
+ int tx_bytes = skb->len;
+ int rc;
+
+ if (IS_IQD(card)) {
+ if (card->options.sniffer)
+ goto tx_drop;
+ if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
+ (card->options.cq == QETH_CQ_ENABLED &&
+ skb->protocol != htons(ETH_P_AF_IUCV)))
goto tx_drop;
}
- netif_wake_queue(dev);
- if (card->options.performance_stats)
- card->perf_stats.outbound_time += qeth_get_micros() -
- card->perf_stats.outbound_start_time;
- return rc;
+ if (card->state != CARD_STATE_UP || !card->lan_online) {
+ card->stats.tx_carrier_errors++;
+ goto tx_drop;
+ }
+
+ if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
+ goto tx_drop;
+
+ queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+
+ if (card->options.performance_stats) {
+ card->perf_stats.outbound_cnt++;
+ card->perf_stats.outbound_start_time = qeth_get_micros();
+ }
+ netif_stop_queue(dev);
+
+ if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4))
+ rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type);
+ else
+ rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
+
+ if (!rc) {
+ card->stats.tx_packets++;
+ card->stats.tx_bytes += tx_bytes;
+ if (card->options.performance_stats)
+ card->perf_stats.outbound_time += qeth_get_micros() -
+ card->perf_stats.outbound_start_time;
+ netif_wake_queue(dev);
+ return NETDEV_TX_OK;
+ } else if (rc == -EBUSY) {
+ return NETDEV_TX_BUSY;
+ } /* else fall through */
tx_drop:
card->stats.tx_dropped++;
card->stats.tx_errors++;
- if ((new_skb != skb) && new_skb)
- dev_kfree_skb_any(new_skb);
dev_kfree_skb_any(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
@@ -2449,7 +2484,6 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
- .ndo_change_mtu = qeth_change_mtu,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
@@ -2466,7 +2500,6 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
- .ndo_change_mtu = qeth_change_mtu,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
@@ -2479,6 +2512,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
{
int rc;
+ if (card->dev->netdev_ops)
+ return 0;
+
if (card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -2487,9 +2523,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
return -ENODEV;
}
- card->dev = alloc_etherdev(0);
- if (!card->dev)
- return -ENODEV;
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
/*IPv6 address autoconfiguration stuff*/
@@ -2497,9 +2530,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
card->dev->dev_id = card->info.unique_id & 0xffff;
- card->dev->hw_features |= NETIF_F_SG;
- card->dev->vlan_features |= NETIF_F_SG;
-
if (!card->info.guestlan) {
card->dev->features |= NETIF_F_SG;
card->dev->hw_features |= NETIF_F_TSO |
@@ -2513,38 +2543,35 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
}
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
- card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN,
- ether_setup);
- if (!card->dev)
- return -ENODEV;
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
+
rc = qeth_l3_iqd_read_initial_mac(card);
if (rc)
- return rc;
+ goto out;
+
if (card->options.hsuid[0])
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
} else
return -ENODEV;
- card->dev->ml_priv = card;
- card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
- card->dev->mtu = card->info.initial_mtu;
- card->dev->min_mtu = 64;
- card->dev->max_mtu = ETH_MAX_MTU;
- card->dev->dev_port = card->info.portno;
card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
+ card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
+
netif_keep_dst(card->dev);
- netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
- PAGE_SIZE);
+ if (card->dev->hw_features & NETIF_F_TSO)
+ netif_set_gso_max_size(card->dev,
+ PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
- SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
- netif_carrier_off(card->dev);
- return register_netdev(card->dev);
+ rc = register_netdev(card->dev);
+out:
+ if (rc)
+ card->dev->netdev_ops = NULL;
+ return rc;
}
static const struct device_type qeth_l3_devtype = {
@@ -2582,15 +2609,9 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l3_set_offline(cgdev);
- if (card->dev) {
- unregister_netdev(card->dev);
- free_netdev(card->dev);
- card->dev = NULL;
- }
-
+ unregister_netdev(card->dev);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
- return;
}
static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
@@ -2612,10 +2633,9 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
- if (!card->dev && qeth_l3_setup_netdev(card)) {
- rc = -ENODEV;
+ rc = qeth_l3_setup_netdev(card);
+ if (rc)
goto out_remove;
- }
if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
if (card->info.hwtrap &&
@@ -2662,14 +2682,16 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
netif_carrier_on(card->dev);
else
netif_carrier_off(card->dev);
+
+ qeth_enable_hw_features(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
rtnl_lock();
- if (recovery_mode)
+ if (recovery_mode) {
__qeth_l3_open(card->dev);
- else
+ qeth_l3_set_rx_mode(card->dev);
+ } else {
dev_open(card->dev);
- qeth_l3_set_rx_mode(card->dev);
- qeth_recover_features(card->dev);
+ }
rtnl_unlock();
}
qeth_trace_features(card);
@@ -2710,8 +2732,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
- if (card->dev && netif_carrier_ok(card->dev))
- netif_carrier_off(card->dev);
+ netif_carrier_off(card->dev);
recover_flag = card->state;
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -2779,8 +2800,7 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- if (card->dev)
- netif_device_detach(card->dev);
+ netif_device_detach(card->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_OFFLINE)
@@ -2813,8 +2833,7 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
rc = __qeth_l3_set_online(card->gdev, 0);
out:
qeth_set_allowed_threads(card, 0xffffffff, 0);
- if (card->dev)
- netif_device_attach(card->dev);
+ netif_device_attach(card->dev);
if (rc)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index f61192a048f4..45ac6d8705c6 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -299,8 +299,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
if (strlen(tmp) == 0) {
/* delete ip address only */
card->options.hsuid[0] = '\0';
- if (card->dev)
- memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+ memcpy(card->dev->perm_addr, card->options.hsuid, 9);
qeth_configure_cq(card, QETH_CQ_DISABLED);
return count;
}
@@ -311,8 +310,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
snprintf(card->options.hsuid, sizeof(card->options.hsuid),
"%-8s", tmp);
ASCEBC(card->options.hsuid, 8);
- if (card->dev)
- memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+ memcpy(card->dev->perm_addr, card->options.hsuid, 9);
rc = qeth_l3_modify_hsuid(card, true);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index a3a8c8d9d717..94f4d8fe85e0 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -101,7 +101,7 @@ static void __init zfcp_init_device_setup(char *devstr)
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
- strncpy(busid, token, ZFCP_BUS_ID_SIZE);
+ strlcpy(busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 99ba4a770406..27521fc3ef5a 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2038,6 +2038,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (twa_initialize_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
+ retval = -ENOMEM;
goto out_free_device_extension;
}
@@ -2060,6 +2061,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = ioremap(mem_addr, mem_len);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
+ retval = -ENOMEM;
goto out_release_mem_region;
}
@@ -2067,8 +2069,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
TW_DISABLE_INTERRUPTS(tw_dev);
/* Initialize the card */
- if (twa_reset_sequence(tw_dev, 0))
+ if (twa_reset_sequence(tw_dev, 0)) {
+ retval = -ENOMEM;
goto out_iounmap;
+ }
/* Set host specific parameters */
if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index cf9f2a09b47d..40c1e6e64f58 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1594,6 +1594,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (twl_initialize_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
+ retval = -ENOMEM;
goto out_free_device_extension;
}
@@ -1608,6 +1609,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = pci_iomap(pdev, 1, 0);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
+ retval = -ENOMEM;
goto out_release_mem_region;
}
@@ -1617,6 +1619,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
/* Initialize the card */
if (twl_reset_sequence(tw_dev, 0)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
+ retval = -ENOMEM;
goto out_iounmap;
}
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index f6179e3d6953..471366945bd4 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1925,7 +1925,7 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
if (test_bit(TW_IN_RESET, &tw_dev->flags))
return SCSI_MLQUEUE_HOST_BUSY;
- /* Save done function into Scsi_Cmnd struct */
+ /* Save done function into struct scsi_cmnd */
SCpnt->scsi_done = done;
/* Queue the command and get a request id */
@@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (tw_initialize_device_extension(tw_dev)) {
printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
+ retval = -ENOMEM;
goto out_free_device_extension;
}
@@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = pci_resource_start(pdev, 0);
if (!tw_dev->base_addr) {
printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
+ retval = -ENOMEM;
goto out_release_mem_region;
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 35c909bbf8ba..8fc851a9e116 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -49,6 +49,7 @@ config SCSI_NETLINK
config SCSI_MQ_DEFAULT
bool "SCSI: use blk-mq I/O path by default"
+ default y
depends on SCSI
---help---
This option enables the new blk-mq based I/O path for SCSI
@@ -841,18 +842,6 @@ config SCSI_IZIP_SLOW_CTR
Generally, saying N is fine.
-config SCSI_NCR_D700
- tristate "NCR Dual 700 MCA SCSI support"
- depends on MCA && SCSI
- select SCSI_SPI_ATTRS
- help
- This is a driver for the MicroChannel Dual 700 card produced by
- NCR and commonly used in 345x/35xx/4100 class machines. It always
- tries to negotiate sync and uses tag command queueing.
-
- Unless you have an NCR manufactured machine, the chances are that
- you do not have this SCSI card, so say N.
-
config SCSI_LASI700
tristate "HP Lasi SCSI support for 53c700/710"
depends on GSC && SCSI
@@ -1000,21 +989,9 @@ config SCSI_ZALON
used on the add-in Bluefish, Barracuda & Shrike SCSI cards.
Say Y here if you have one of these machines or cards.
-config SCSI_NCR_Q720
- tristate "NCR Quad 720 MCA SCSI support"
- depends on MCA && SCSI
- select SCSI_SPI_ATTRS
- help
- This is a driver for the MicroChannel Quad 720 card produced by
- NCR and commonly used in 345x/35xx/4100 class machines. It always
- tries to negotiate sync and uses tag command queueing.
-
- Unless you have an NCR manufactured machine, the chances are that
- you do not have this SCSI card, so say N.
-
config SCSI_NCR53C8XX_DEFAULT_TAGS
int "default tagged command queue depth"
- depends on SCSI_ZALON || SCSI_NCR_Q720
+ depends on SCSI_ZALON
default "8"
---help---
"Tagged command queuing" is a feature of SCSI-2 which improves
@@ -1040,7 +1017,7 @@ config SCSI_NCR53C8XX_DEFAULT_TAGS
config SCSI_NCR53C8XX_MAX_TAGS
int "maximum number of queued commands"
- depends on SCSI_ZALON || SCSI_NCR_Q720
+ depends on SCSI_ZALON
default "32"
---help---
This option allows you to specify the maximum number of commands
@@ -1057,7 +1034,7 @@ config SCSI_NCR53C8XX_MAX_TAGS
config SCSI_NCR53C8XX_SYNC
int "synchronous transfers frequency in MHz"
- depends on SCSI_ZALON || SCSI_NCR_Q720
+ depends on SCSI_ZALON
default "20"
---help---
The SCSI Parallel Interface-2 Standard defines 5 classes of transfer
@@ -1091,7 +1068,7 @@ config SCSI_NCR53C8XX_SYNC
config SCSI_NCR53C8XX_NO_DISCONNECT
bool "not allow targets to disconnect"
- depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0
+ depends on SCSI_ZALON && SCSI_NCR53C8XX_DEFAULT_TAGS=0
help
This option is only provided for safety if you suspect some SCSI
device of yours to not support properly the target-disconnect
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 80aca2456353..6d71b2a9592b 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -21,6 +21,7 @@ CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS
obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_SCSI) += scsi_mod.o
+obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_common.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
@@ -76,8 +77,6 @@ obj-$(CONFIG_SCSI_PM8001) += pm8001/
obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
-obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o
-obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
@@ -156,7 +155,6 @@ obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
scsi_mod-y += scsi.o hosts.o scsi_ioctl.o \
scsicam.o scsi_error.o scsi_lib.o
-scsi_mod-y += scsi_common.o
scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o
scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o
scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
@@ -180,7 +178,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
-DCONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS
CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
-NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
# Files generated that shall be removed upon make clean
clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
deleted file mode 100644
index b39a2409a507..000000000000
--- a/drivers/scsi/NCR_D700.c
+++ /dev/null
@@ -1,405 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* NCR Dual 700 MCA SCSI Driver
- *
- * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
-**-----------------------------------------------------------------------------
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-**-----------------------------------------------------------------------------
- */
-
-/* Notes:
- *
- * Most of the work is done in the chip specific module, 53c700.o
- *
- * TODO List:
- *
- * 1. Extract the SCSI ID from the voyager CMOS table (necessary to
- * support multi-host environments.
- *
- * */
-
-
-/* CHANGELOG
- *
- * Version 2.2
- *
- * Added mca_set_adapter_name().
- *
- * Version 2.1
- *
- * Modularise the driver into a Board piece (this file) and a chip
- * piece 53c700.[ch] and 53c700.scr, added module options. You can
- * now specify the scsi id by the parameters
- *
- * NCR_D700=slot:<n> [siop:<n>] id:<n> ....
- *
- * They need to be comma separated if compiled into the kernel
- *
- * Version 2.0
- *
- * Initial implementation of TCQ (Tag Command Queueing). TCQ is full
- * featured and uses the clock algorithm to keep track of outstanding
- * tags and guard against individual tag starvation. Also fixed a bug
- * in all of the 1.x versions where the D700_data_residue() function
- * was returning results off by 32 bytes (and thus causing the same 32
- * bytes to be written twice corrupting the data block). It turns out
- * the 53c700 only has a 6 bit DBC and DFIFO registers not 7 bit ones
- * like the 53c710 (The 710 is the only data manual still available,
- * which I'd been using to program the 700).
- *
- * Version 1.2
- *
- * Much improved message handling engine
- *
- * Version 1.1
- *
- * Add code to handle selection reasonably correctly. By the time we
- * get the selection interrupt, we've already responded, but drop off the
- * bus and hope the selector will go away.
- *
- * Version 1.0:
- *
- * Initial release. Fully functional except for procfs and tag
- * command queueing. Has only been tested on cards with 53c700-66
- * chips and only single ended. Features are
- *
- * 1. Synchronous data transfers to offset 8 (limit of 700-66) and
- * 100ns (10MHz) limit of SCSI-2
- *
- * 2. Disconnection and reselection
- *
- * Testing:
- *
- * I've only really tested this with the 700-66 chip, but have done
- * soak tests in multi-device environments to verify that
- * disconnections and reselections are being processed correctly.
- * */
-
-#define NCR_D700_VERSION "2.2"
-
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mca.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_transport.h>
-#include <scsi/scsi_transport_spi.h>
-
-#include "53c700.h"
-#include "NCR_D700.h"
-
-static char *NCR_D700; /* command line from insmod */
-
-MODULE_AUTHOR("James Bottomley");
-MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
-MODULE_LICENSE("GPL");
-module_param(NCR_D700, charp, 0);
-
-static __u8 id_array[2*(MCA_MAX_SLOT_NR + 1)] =
- { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
-
-#ifdef MODULE
-#define ARG_SEP ' '
-#else
-#define ARG_SEP ','
-#endif
-
-static int __init
-param_setup(char *string)
-{
- char *pos = string, *next;
- int slot = -1, siop = -1;
-
- while(pos != NULL && (next = strchr(pos, ':')) != NULL) {
- int val = (int)simple_strtoul(++next, NULL, 0);
-
- if(!strncmp(pos, "slot:", 5))
- slot = val;
- else if(!strncmp(pos, "siop:", 5))
- siop = val;
- else if(!strncmp(pos, "id:", 3)) {
- if(slot == -1) {
- printk(KERN_WARNING "NCR D700: Must specify slot for id parameter\n");
- } else if(slot > MCA_MAX_SLOT_NR) {
- printk(KERN_WARNING "NCR D700: Illegal slot %d for id %d\n", slot, val);
- } else {
- if(siop != 0 && siop != 1) {
- id_array[slot*2] = val;
- id_array[slot*2 + 1] =val;
- } else {
- id_array[slot*2 + siop] = val;
- }
- }
- }
- if((pos = strchr(pos, ARG_SEP)) != NULL)
- pos++;
- }
- return 1;
-}
-
-/* Host template. The 53c700 routine NCR_700_detect will
- * fill in all of the missing routines */
-static struct scsi_host_template NCR_D700_driver_template = {
- .module = THIS_MODULE,
- .name = "NCR Dual 700 MCA",
- .proc_name = "NCR_D700",
- .this_id = 7,
-};
-
-/* We needs this helper because we have two hosts per struct device */
-struct NCR_D700_private {
- struct device *dev;
- struct Scsi_Host *hosts[2];
- char name[30];
- char pad;
-};
-
-static int
-NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
- int slot, u32 region, int differential)
-{
- struct NCR_700_Host_Parameters *hostdata;
- struct Scsi_Host *host;
- int ret;
-
- hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
- if (!hostdata) {
- printk(KERN_ERR "NCR D700: SIOP%d: Failed to allocate host"
- "data, detatching\n", siop);
- return -ENOMEM;
- }
-
- if (!request_region(region, 64, "NCR_D700")) {
- printk(KERN_ERR "NCR D700: Failed to reserve IO region 0x%x\n",
- region);
- ret = -ENODEV;
- goto region_failed;
- }
-
- /* Fill in the three required pieces of hostdata */
- hostdata->base = ioport_map(region, 64);
- hostdata->differential = (((1<<siop) & differential) != 0);
- hostdata->clock = NCR_D700_CLOCK_MHZ;
- hostdata->burst_length = 8;
-
- /* and register the siop */
- host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev);
- if (!host) {
- ret = -ENOMEM;
- goto detect_failed;
- }
-
- p->hosts[siop] = host;
- /* FIXME: read this from SUS */
- host->this_id = id_array[slot * 2 + siop];
- host->irq = irq;
- host->base = region;
- scsi_scan_host(host);
-
- return 0;
-
- detect_failed:
- release_region(region, 64);
- region_failed:
- kfree(hostdata);
-
- return ret;
-}
-
-static irqreturn_t
-NCR_D700_intr(int irq, void *data)
-{
- struct NCR_D700_private *p = (struct NCR_D700_private *)data;
- int i, found = 0;
-
- for (i = 0; i < 2; i++)
- if (p->hosts[i] &&
- NCR_700_intr(irq, p->hosts[i]) == IRQ_HANDLED)
- found++;
-
- return found ? IRQ_HANDLED : IRQ_NONE;
-}
-
-/* Detect a D700 card. Note, because of the setup --- the chips are
- * essentially connectecd to the MCA bus independently, it is easier
- * to set them up as two separate host adapters, rather than one
- * adapter with two channels */
-static int
-NCR_D700_probe(struct device *dev)
-{
- struct NCR_D700_private *p;
- int differential;
- static int banner = 1;
- struct mca_device *mca_dev = to_mca_device(dev);
- int slot = mca_dev->slot;
- int found = 0;
- int irq, i;
- int pos3j, pos3k, pos3a, pos3b, pos4;
- __u32 base_addr, offset_addr;
-
- /* enable board interrupt */
- pos4 = mca_device_read_pos(mca_dev, 4);
- pos4 |= 0x4;
- mca_device_write_pos(mca_dev, 4, pos4);
-
- mca_device_write_pos(mca_dev, 6, 9);
- pos3j = mca_device_read_pos(mca_dev, 3);
- mca_device_write_pos(mca_dev, 6, 10);
- pos3k = mca_device_read_pos(mca_dev, 3);
- mca_device_write_pos(mca_dev, 6, 0);
- pos3a = mca_device_read_pos(mca_dev, 3);
- mca_device_write_pos(mca_dev, 6, 1);
- pos3b = mca_device_read_pos(mca_dev, 3);
-
- base_addr = ((pos3j << 8) | pos3k) & 0xfffffff0;
- offset_addr = ((pos3a << 8) | pos3b) & 0xffffff70;
-
- irq = (pos4 & 0x3) + 11;
- if(irq >= 13)
- irq++;
- if(banner) {
- printk(KERN_NOTICE "NCR D700: Driver Version " NCR_D700_VERSION "\n"
- "NCR D700: Copyright (c) 2001 by James.Bottomley@HansenPartnership.com\n"
- "NCR D700:\n");
- banner = 0;
- }
- /* now do the bus related transforms */
- irq = mca_device_transform_irq(mca_dev, irq);
- base_addr = mca_device_transform_ioport(mca_dev, base_addr);
- offset_addr = mca_device_transform_ioport(mca_dev, offset_addr);
-
- printk(KERN_NOTICE "NCR D700: found in slot %d irq = %d I/O base = 0x%x\n", slot, irq, offset_addr);
-
- /*outb(BOARD_RESET, base_addr);*/
-
- /* clear any pending interrupts */
- (void)inb(base_addr + 0x08);
- /* get modctl, used later for setting diff bits */
- switch(differential = (inb(base_addr + 0x08) >> 6)) {
- case 0x00:
- /* only SIOP1 differential */
- differential = 0x02;
- break;
- case 0x01:
- /* Both SIOPs differential */
- differential = 0x03;
- break;
- case 0x03:
- /* No SIOPs differential */
- differential = 0x00;
- break;
- default:
- printk(KERN_ERR "D700: UNEXPECTED DIFFERENTIAL RESULT 0x%02x\n",
- differential);
- differential = 0x00;
- break;
- }
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- p->dev = dev;
- snprintf(p->name, sizeof(p->name), "D700(%s)", dev_name(dev));
- if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) {
- printk(KERN_ERR "D700: request_irq failed\n");
- kfree(p);
- return -EBUSY;
- }
- /* plumb in both 700 chips */
- for (i = 0; i < 2; i++) {
- int err;
-
- if ((err = NCR_D700_probe_one(p, i, irq, slot,
- offset_addr + (0x80 * i),
- differential)) != 0)
- printk("D700: SIOP%d: probe failed, error = %d\n",
- i, err);
- else
- found++;
- }
-
- if (!found) {
- kfree(p);
- return -ENODEV;
- }
-
- mca_device_set_claim(mca_dev, 1);
- mca_device_set_name(mca_dev, "NCR_D700");
- dev_set_drvdata(dev, p);
- return 0;
-}
-
-static void
-NCR_D700_remove_one(struct Scsi_Host *host)
-{
- scsi_remove_host(host);
- NCR_700_release(host);
- kfree((struct NCR_700_Host_Parameters *)host->hostdata[0]);
- free_irq(host->irq, host);
- release_region(host->base, 64);
-}
-
-static int
-NCR_D700_remove(struct device *dev)
-{
- struct NCR_D700_private *p = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < 2; i++)
- NCR_D700_remove_one(p->hosts[i]);
-
- kfree(p);
- return 0;
-}
-
-static short NCR_D700_id_table[] = { NCR_D700_MCA_ID, 0 };
-
-static struct mca_driver NCR_D700_driver = {
- .id_table = NCR_D700_id_table,
- .driver = {
- .name = "NCR_D700",
- .bus = &mca_bus_type,
- .probe = NCR_D700_probe,
- .remove = NCR_D700_remove,
- },
-};
-
-static int __init NCR_D700_init(void)
-{
-#ifdef MODULE
- if (NCR_D700)
- param_setup(NCR_D700);
-#endif
-
- return mca_register_driver(&NCR_D700_driver);
-}
-
-static void __exit NCR_D700_exit(void)
-{
- mca_unregister_driver(&NCR_D700_driver);
-}
-
-module_init(NCR_D700_init);
-module_exit(NCR_D700_exit);
-
-__setup("NCR_D700=", param_setup);
diff --git a/drivers/scsi/NCR_D700.h b/drivers/scsi/NCR_D700.h
deleted file mode 100644
index eb675d782ef6..000000000000
--- a/drivers/scsi/NCR_D700.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* NCR Dual 700 MCA SCSI Driver
- *
- * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
- */
-
-#ifndef _NCR_D700_H
-#define _NCR_D700_H
-
-/* Don't turn on debugging messages */
-#undef NCR_D700_DEBUG
-
-/* The MCA identifier */
-#define NCR_D700_MCA_ID 0x0092
-
-/* Defines for the Board registers */
-#define BOARD_RESET 0x80 /* board level reset */
-#define ADD_PARENB 0x04 /* Address Parity Enabled */
-#define DAT_PARENB 0x01 /* Data Parity Enabled */
-#define SFBK_ENB 0x10 /* SFDBK Interrupt Enabled */
-#define LED0GREEN 0x20 /* Led 0 (red 0; green 1) */
-#define LED1GREEN 0x40 /* Led 1 (red 0; green 1) */
-#define LED0RED 0xDF /* Led 0 (red 0; green 1) */
-#define LED1RED 0xBF /* Led 1 (red 0; green 1) */
-
-#define NCR_D700_CLOCK_MHZ 50
-
-#endif
diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c
deleted file mode 100644
index 54e7d26908ee..000000000000
--- a/drivers/scsi/NCR_Q720.c
+++ /dev/null
@@ -1,376 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* NCR Quad 720 MCA SCSI Driver
- *
- * Copyright (C) 2003 by James.Bottomley@HansenPartnership.com
- */
-
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mca.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-
-#include "ncr53c8xx.h"
-
-#include "NCR_Q720.h"
-
-static struct ncr_chip q720_chip __initdata = {
- .revision_id = 0x0f,
- .burst_max = 3,
- .offset_max = 8,
- .nr_divisor = 4,
- .features = FE_WIDE | FE_DIFF | FE_VARCLK,
-};
-
-MODULE_AUTHOR("James Bottomley");
-MODULE_DESCRIPTION("NCR Quad 720 SCSI Driver");
-MODULE_LICENSE("GPL");
-
-#define NCR_Q720_VERSION "0.9"
-
-/* We needs this helper because we have up to four hosts per struct device */
-struct NCR_Q720_private {
- struct device *dev;
- void __iomem * mem_base;
- __u32 phys_mem_base;
- __u32 mem_size;
- __u8 irq;
- __u8 siops;
- __u8 irq_enable;
- struct Scsi_Host *hosts[4];
-};
-
-static struct scsi_host_template NCR_Q720_tpnt = {
- .module = THIS_MODULE,
- .proc_name = "NCR_Q720",
-};
-
-static irqreturn_t
-NCR_Q720_intr(int irq, void *data)
-{
- struct NCR_Q720_private *p = (struct NCR_Q720_private *)data;
- __u8 sir = (readb(p->mem_base + 0x0d) & 0xf0) >> 4;
- __u8 siop;
-
- sir |= ~p->irq_enable;
-
- if(sir == 0xff)
- return IRQ_NONE;
-
-
- while((siop = ffz(sir)) < p->siops) {
- sir |= 1<<siop;
- ncr53c8xx_intr(irq, p->hosts[siop]);
- }
- return IRQ_HANDLED;
-}
-
-static int __init
-NCR_Q720_probe_one(struct NCR_Q720_private *p, int siop,
- int irq, int slot, __u32 paddr, void __iomem *vaddr)
-{
- struct ncr_device device;
- __u8 scsi_id;
- static int unit = 0;
- __u8 scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
- __u8 differential = readb(vaddr + NCR_Q720_SCSR_OFFSET) & 0x20;
- __u8 version;
- int error;
-
- scsi_id = scsr1 >> 4;
- /* enable burst length 16 (FIXME: should allow this) */
- scsr1 |= 0x02;
- /* force a siop reset */
- scsr1 |= 0x04;
- writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
- udelay(10);
- version = readb(vaddr + 0x18) >> 4;
-
- memset(&device, 0, sizeof(struct ncr_device));
- /* Initialise ncr_device structure with items required by ncr_attach. */
- device.chip = q720_chip;
- device.chip.revision_id = version;
- device.host_id = scsi_id;
- device.dev = p->dev;
- device.slot.base = paddr;
- device.slot.base_c = paddr;
- device.slot.base_v = vaddr;
- device.slot.irq = irq;
- device.differential = differential ? 2 : 0;
- printk("Q720 probe unit %d (siop%d) at 0x%lx, diff = %d, vers = %d\n", unit, siop,
- (unsigned long)paddr, differential, version);
-
- p->hosts[siop] = ncr_attach(&NCR_Q720_tpnt, unit++, &device);
-
- if (!p->hosts[siop])
- goto fail;
-
- p->irq_enable |= (1<<siop);
- scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
- /* clear the disable interrupt bit */
- scsr1 &= ~0x01;
- writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
-
- error = scsi_add_host(p->hosts[siop], p->dev);
- if (error)
- ncr53c8xx_release(p->hosts[siop]);
- else
- scsi_scan_host(p->hosts[siop]);
- return error;
-
- fail:
- return -ENODEV;
-}
-
-/* Detect a Q720 card. Note, because of the setup --- the chips are
- * essentially connectecd to the MCA bus independently, it is easier
- * to set them up as two separate host adapters, rather than one
- * adapter with two channels */
-static int __init
-NCR_Q720_probe(struct device *dev)
-{
- struct NCR_Q720_private *p;
- static int banner = 1;
- struct mca_device *mca_dev = to_mca_device(dev);
- int slot = mca_dev->slot;
- int found = 0;
- int irq, i, siops;
- __u8 pos2, pos4, asr2, asr9, asr10;
- __u16 io_base;
- __u32 base_addr, mem_size;
- void __iomem *mem_base;
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- pos2 = mca_device_read_pos(mca_dev, 2);
- /* enable device */
- pos2 |= NCR_Q720_POS2_BOARD_ENABLE | NCR_Q720_POS2_INTERRUPT_ENABLE;
- mca_device_write_pos(mca_dev, 2, pos2);
-
- io_base = (pos2 & NCR_Q720_POS2_IO_MASK) << NCR_Q720_POS2_IO_SHIFT;
-
-
- if(banner) {
- printk(KERN_NOTICE "NCR Q720: Driver Version " NCR_Q720_VERSION "\n"
- "NCR Q720: Copyright (c) 2003 by James.Bottomley@HansenPartnership.com\n"
- "NCR Q720:\n");
- banner = 0;
- }
- io_base = mca_device_transform_ioport(mca_dev, io_base);
-
- /* OK, this is phase one of the bootstrap, we now know the
- * I/O space base address. All the configuration registers
- * are mapped here (including pos) */
-
- /* sanity check I/O mapping */
- i = inb(io_base) | (inb(io_base+1)<<8);
- if(i != NCR_Q720_MCA_ID) {
- printk(KERN_ERR "NCR_Q720, adapter failed to I/O map registers correctly at 0x%x(0x%x)\n", io_base, i);
- kfree(p);
- return -ENODEV;
- }
-
- /* Phase II, find the ram base and memory map the board register */
- pos4 = inb(io_base + 4);
- /* enable streaming data */
- pos4 |= 0x01;
- outb(pos4, io_base + 4);
- base_addr = (pos4 & 0x7e) << 20;
- base_addr += (pos4 & 0x80) << 23;
- asr10 = inb(io_base + 0x12);
- base_addr += (asr10 & 0x80) << 24;
- base_addr += (asr10 & 0x70) << 23;
-
- /* OK, got the base addr, now we need to find the ram size,
- * enable and map it */
- asr9 = inb(io_base + 0x11);
- i = (asr9 & 0xc0) >> 6;
- if(i == 0)
- mem_size = 1024;
- else
- mem_size = 1 << (19 + i);
-
- /* enable the sram mapping */
- asr9 |= 0x20;
-
- /* disable the rom mapping */
- asr9 &= ~0x10;
-
- outb(asr9, io_base + 0x11);
-
- if(!request_mem_region(base_addr, mem_size, "NCR_Q720")) {
- printk(KERN_ERR "NCR_Q720: Failed to claim memory region 0x%lx\n-0x%lx",
- (unsigned long)base_addr,
- (unsigned long)(base_addr + mem_size));
- goto out_free;
- }
-
- if (dma_declare_coherent_memory(dev, base_addr, base_addr,
- mem_size, 0)) {
- printk(KERN_ERR "NCR_Q720: DMA declare memory failed\n");
- goto out_release_region;
- }
-
- /* The first 1k of the memory buffer is a memory map of the registers
- */
- mem_base = dma_mark_declared_memory_occupied(dev, base_addr,
- 1024);
- if (IS_ERR(mem_base)) {
- printk("NCR_Q720 failed to reserve memory mapped region\n");
- goto out_release;
- }
-
- /* now also enable accesses in asr 2 */
- asr2 = inb(io_base + 0x0a);
-
- asr2 |= 0x01;
-
- outb(asr2, io_base + 0x0a);
-
- /* get the number of SIOPs (this should be 2 or 4) */
- siops = ((asr2 & 0xe0) >> 5) + 1;
-
- /* sanity check mapping (again) */
- i = readw(mem_base);
- if(i != NCR_Q720_MCA_ID) {
- printk(KERN_ERR "NCR_Q720, adapter failed to memory map registers correctly at 0x%lx(0x%x)\n", (unsigned long)base_addr, i);
- goto out_release;
- }
-
- irq = readb(mem_base + 5) & 0x0f;
-
-
- /* now do the bus related transforms */
- irq = mca_device_transform_irq(mca_dev, irq);
-
- printk(KERN_NOTICE "NCR Q720: found in slot %d irq = %d mem base = 0x%lx siops = %d\n", slot, irq, (unsigned long)base_addr, siops);
- printk(KERN_NOTICE "NCR Q720: On board ram %dk\n", mem_size/1024);
-
- p->dev = dev;
- p->mem_base = mem_base;
- p->phys_mem_base = base_addr;
- p->mem_size = mem_size;
- p->irq = irq;
- p->siops = siops;
-
- if (request_irq(irq, NCR_Q720_intr, IRQF_SHARED, "NCR_Q720", p)) {
- printk(KERN_ERR "NCR_Q720: request irq %d failed\n", irq);
- goto out_release;
- }
- /* disable all the siop interrupts */
- for(i = 0; i < siops; i++) {
- void __iomem *reg_scsr1 = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
- + i*NCR_Q720_SIOP_SHIFT + NCR_Q720_SCSR_OFFSET + 1;
- __u8 scsr1 = readb(reg_scsr1);
- scsr1 |= 0x01;
- writeb(scsr1, reg_scsr1);
- }
-
- /* plumb in all 720 chips */
- for (i = 0; i < siops; i++) {
- void __iomem *siop_v_base = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
- + i*NCR_Q720_SIOP_SHIFT;
- __u32 siop_p_base = base_addr + NCR_Q720_CHIP_REGISTER_OFFSET
- + i*NCR_Q720_SIOP_SHIFT;
- __u16 port = io_base + NCR_Q720_CHIP_REGISTER_OFFSET
- + i*NCR_Q720_SIOP_SHIFT;
- int err;
-
- outb(0xff, port + 0x40);
- outb(0x07, port + 0x41);
- if ((err = NCR_Q720_probe_one(p, i, irq, slot,
- siop_p_base, siop_v_base)) != 0)
- printk("Q720: SIOP%d: probe failed, error = %d\n",
- i, err);
- else
- found++;
- }
-
- if (!found) {
- kfree(p);
- return -ENODEV;
- }
-
- mca_device_set_claim(mca_dev, 1);
- mca_device_set_name(mca_dev, "NCR_Q720");
- dev_set_drvdata(dev, p);
-
- return 0;
-
- out_release:
- dma_release_declared_memory(dev);
- out_release_region:
- release_mem_region(base_addr, mem_size);
- out_free:
- kfree(p);
-
- return -ENODEV;
-}
-
-static void __exit
-NCR_Q720_remove_one(struct Scsi_Host *host)
-{
- scsi_remove_host(host);
- ncr53c8xx_release(host);
-}
-
-static int __exit
-NCR_Q720_remove(struct device *dev)
-{
- struct NCR_Q720_private *p = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < p->siops; i++)
- if(p->hosts[i])
- NCR_Q720_remove_one(p->hosts[i]);
-
- dma_release_declared_memory(dev);
- release_mem_region(p->phys_mem_base, p->mem_size);
- free_irq(p->irq, p);
- kfree(p);
- return 0;
-}
-
-static short NCR_Q720_id_table[] = { NCR_Q720_MCA_ID, 0 };
-
-static struct mca_driver NCR_Q720_driver = {
- .id_table = NCR_Q720_id_table,
- .driver = {
- .name = "NCR_Q720",
- .bus = &mca_bus_type,
- .probe = NCR_Q720_probe,
- .remove = NCR_Q720_remove,
- },
-};
-
-static int __init
-NCR_Q720_init(void)
-{
- int ret = ncr53c8xx_init();
- if (!ret)
- ret = mca_register_driver(&NCR_Q720_driver);
- if (ret)
- ncr53c8xx_exit();
- return ret;
-}
-
-static void __exit
-NCR_Q720_exit(void)
-{
- mca_unregister_driver(&NCR_Q720_driver);
- ncr53c8xx_exit();
-}
-
-module_init(NCR_Q720_init);
-module_exit(NCR_Q720_exit);
diff --git a/drivers/scsi/NCR_Q720.h b/drivers/scsi/NCR_Q720.h
deleted file mode 100644
index d5f46cdb736e..000000000000
--- a/drivers/scsi/NCR_Q720.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* NCR Quad 720 MCA SCSI Driver
- *
- * Copyright (C) 2003 by James.Bottomley@HansenPartnership.com
- */
-
-#ifndef _NCR_Q720_H
-#define _NCR_Q720_H
-
-/* The MCA identifier */
-#define NCR_Q720_MCA_ID 0x0720
-
-#define NCR_Q720_CLOCK_MHZ 30
-
-#define NCR_Q720_POS2_BOARD_ENABLE 0x01
-#define NCR_Q720_POS2_INTERRUPT_ENABLE 0x02
-#define NCR_Q720_POS2_PARITY_DISABLE 0x04
-#define NCR_Q720_POS2_IO_MASK 0xf8
-#define NCR_Q720_POS2_IO_SHIFT 8
-
-#define NCR_Q720_CHIP_REGISTER_OFFSET 0x200
-#define NCR_Q720_SCSR_OFFSET 0x070
-#define NCR_Q720_SIOP_SHIFT 0x080
-
-#endif
-
-
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index b2942ec3d455..23b17621b6d2 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -143,7 +143,7 @@ static u8 wait_chip_ready(struct orc_host * host)
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */
return 1;
- mdelay(100);
+ msleep(100);
}
return 0;
}
@@ -155,7 +155,7 @@ static u8 wait_firmware_ready(struct orc_host * host)
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */
return 1;
- mdelay(100); /* wait 100ms before try again */
+ msleep(100); /* wait 100ms before try again */
}
return 0;
}
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a9831bd37a73..6e356325d8d9 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -115,8 +115,6 @@
#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
#define ASENCODE_OVERLAPPED_COMMAND 0x00
-#define AAC_STAT_GOOD (DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD)
-
#define BYTE0(x) (unsigned char)(x)
#define BYTE1(x) (unsigned char)((x) >> 8)
#define BYTE2(x) (unsigned char)((x) >> 16)
@@ -1974,7 +1972,6 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
u32 lun_count, nexus;
u32 i, bus, target;
u8 expose_flag, attribs;
- u8 devtype;
lun_count = aac_get_safw_phys_lun_count(dev);
@@ -1992,23 +1989,23 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
continue;
if (expose_flag != 0) {
- devtype = AAC_DEVTYPE_RAID_MEMBER;
- goto update_devtype;
+ dev->hba_map[bus][target].devtype =
+ AAC_DEVTYPE_RAID_MEMBER;
+ continue;
}
if (nexus != 0 && (attribs & 8)) {
- devtype = AAC_DEVTYPE_NATIVE_RAW;
+ dev->hba_map[bus][target].devtype =
+ AAC_DEVTYPE_NATIVE_RAW;
dev->hba_map[bus][target].rmw_nexus =
nexus;
} else
- devtype = AAC_DEVTYPE_ARC_RAW;
+ dev->hba_map[bus][target].devtype =
+ AAC_DEVTYPE_ARC_RAW;
dev->hba_map[bus][target].scan_counter = dev->scan_counter;
aac_set_safw_target_qd(dev, bus, target);
-
-update_devtype:
- dev->hba_map[bus][target].devtype = devtype;
}
}
@@ -2962,7 +2959,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case SYNCHRONIZE_CACHE:
if (((aac_cache & 6) == 6) && dev->cache_protected) {
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
/* Issue FIB to tell Firmware to flush it's cache */
@@ -2990,7 +2988,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
arr[1] = scsicmd->cmnd[2];
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x80) {
/* unit serial number page */
arr[3] = setinqserial(dev, &arr[4],
@@ -3001,7 +3001,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x83) {
/* vpd page 0x83 - Device Identification Page */
char *sno = (char *)&inq_data;
@@ -3010,7 +3012,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
} else {
/* vpd page not implemented */
scsicmd->result = DID_OK << 16 |
@@ -3041,7 +3045,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
if (dev->in_reset)
@@ -3090,7 +3095,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
@@ -3116,7 +3122,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
@@ -3195,7 +3202,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd,
(char *)&mpd,
mode_buf_length);
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
case MODE_SENSE_10:
@@ -3272,7 +3280,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
(char *)&mpd10,
mode_buf_length);
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
case REQUEST_SENSE:
@@ -3281,7 +3290,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
sizeof(struct sense_data));
memset(&dev->fsa_dev[cid].sense_data, 0,
sizeof(struct sense_data));
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
case ALLOW_MEDIUM_REMOVAL:
@@ -3291,7 +3301,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
else
fsa_dev_ptr[cid].locked = 0;
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
/*
* These commands are all No-Ops
@@ -3315,7 +3326,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
case START_STOP:
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index d62ddd63f4fe..6e1b022a823d 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -514,7 +514,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
* The only invalid cases are if the caller requests to wait and
* does not request a response and if the caller does not want a
* response and the Fib is not allocated from pool. If a response
- * is not requesed the Fib will just be deallocaed by the DPC
+ * is not requested the Fib will just be deallocaed by the DPC
* routine when the response comes back from the adapter. No
* further processing will be done besides deleting the Fib. We
* will have a debug mode where the adapter can notify the host
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 417ba349e10e..ddc69738375f 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -65,7 +65,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs. We then return
- * back to the system. If no response was requesed we just
+ * back to the system. If no response was requested we just
* deallocate the Fib here and continue.
*/
while(aac_consumer_get(dev, q, &entry))
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 620166694171..576cdf9cc120 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -319,7 +319,7 @@ static void aac_rx_start_adapter(struct aac_dev *dev)
union aac_init *init;
init = dev->init;
- init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds());
// We can only use a 32 bit address here
rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 882f40353b96..efa96c1c6aa3 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -251,7 +251,7 @@ static void aac_sa_start_adapter(struct aac_dev *dev)
* Fill in the remaining pieces of the init.
*/
init = dev->init;
- init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds());
/* We can only use a 32 bit address here */
sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 4ebb35a29caa..7a51ccfa8662 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -409,7 +409,8 @@ static void aac_src_start_adapter(struct aac_dev *dev)
init = dev->init;
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
- init->r8.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ init->r8.host_elapsed_seconds =
+ cpu_to_le32(ktime_get_real_seconds());
src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
lower_32_bits(dev->init_pa),
upper_32_bits(dev->init_pa),
@@ -417,7 +418,8 @@ static void aac_src_start_adapter(struct aac_dev *dev)
(AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
0, 0, 0, NULL, NULL, NULL, NULL, NULL);
} else {
- init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ init->r7.host_elapsed_seconds =
+ cpu_to_le32(ktime_get_real_seconds());
// We can only use a 32 bit address here
src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 24e57e770432..713f69033f20 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2416,8 +2416,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
struct asc_board *boardp = shost_priv(s);
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
- printk(" host_busy %u, host_no %d,\n",
- atomic_read(&s->host_busy), s->host_no);
+ printk(" host_busy %d, host_no %d,\n",
+ scsi_host_busy(s), s->host_no);
printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
(ulong)s->base, (ulong)s->io_port, boardp->irq);
@@ -3182,8 +3182,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
shost->host_no);
seq_printf(m,
- " host_busy %u, max_id %u, max_lun %llu, max_channel %u\n",
- atomic_read(&shost->host_busy), shost->max_id,
+ " host_busy %d, max_id %u, max_lun %llu, max_channel %u\n",
+ scsi_host_busy(shost), shost->max_id,
shost->max_lun, shost->max_channel);
seq_printf(m,
@@ -8466,7 +8466,7 @@ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, adv_req_t *reqp)
}
/*
- * Execute a single 'Scsi_Cmnd'.
+ * Execute a single 'struct scsi_cmnd'.
*/
static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
{
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index bc0058df31c6..4d7b0e0adbf7 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -422,16 +422,16 @@ enum aha152x_state {
*
*/
struct aha152x_hostdata {
- Scsi_Cmnd *issue_SC;
+ struct scsi_cmnd *issue_SC;
/* pending commands to issue */
- Scsi_Cmnd *current_SC;
+ struct scsi_cmnd *current_SC;
/* current command on the bus */
- Scsi_Cmnd *disconnected_SC;
+ struct scsi_cmnd *disconnected_SC;
/* commands that disconnected */
- Scsi_Cmnd *done_SC;
+ struct scsi_cmnd *done_SC;
/* command that was completed */
spinlock_t lock;
@@ -510,7 +510,7 @@ struct aha152x_hostdata {
*
*/
struct aha152x_scdata {
- Scsi_Cmnd *next; /* next sc in queue */
+ struct scsi_cmnd *next; /* next sc in queue */
struct completion *done;/* semaphore to block on */
struct scsi_eh_save ses;
};
@@ -633,7 +633,7 @@ static void aha152x_error(struct Scsi_Host *shpnt, char *msg);
static void done(struct Scsi_Host *shpnt, int error);
/* diagnostics */
-static void show_command(Scsi_Cmnd * ptr);
+static void show_command(struct scsi_cmnd * ptr);
static void show_queues(struct Scsi_Host *shpnt);
static void disp_enintr(struct Scsi_Host *shpnt);
@@ -642,9 +642,9 @@ static void disp_enintr(struct Scsi_Host *shpnt);
* queue services:
*
*/
-static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
+static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC)
{
- Scsi_Cmnd *end;
+ struct scsi_cmnd *end;
SCNEXT(new_SC) = NULL;
if (!*SC)
@@ -656,9 +656,9 @@ static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
}
}
-static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd ** SC)
+static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd ** SC)
{
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
ptr = *SC;
if (ptr) {
@@ -668,9 +668,10 @@ static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd ** SC)
return ptr;
}
-static inline Scsi_Cmnd *remove_lun_SC(Scsi_Cmnd ** SC, int target, int lun)
+static inline struct scsi_cmnd *remove_lun_SC(struct scsi_cmnd ** SC,
+ int target, int lun)
{
- Scsi_Cmnd *ptr, *prev;
+ struct scsi_cmnd *ptr, *prev;
for (ptr = *SC, prev = NULL;
ptr && ((ptr->device->id != target) || (ptr->device->lun != lun));
@@ -689,9 +690,10 @@ static inline Scsi_Cmnd *remove_lun_SC(Scsi_Cmnd ** SC, int target, int lun)
return ptr;
}
-static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, Scsi_Cmnd *SCp)
+static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC,
+ struct scsi_cmnd *SCp)
{
- Scsi_Cmnd *ptr, *prev;
+ struct scsi_cmnd *ptr, *prev;
for (ptr = *SC, prev = NULL;
ptr && SCp!=ptr;
@@ -912,8 +914,9 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
/*
* Queue a command and setup interrupts for a free bus.
*/
-static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
- int phase, void (*done)(Scsi_Cmnd *))
+static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
+ struct completion *complete,
+ int phase, void (*done)(struct scsi_cmnd *))
{
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
@@ -987,7 +990,8 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
* queue a command
*
*/
-static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+static int aha152x_queue_lck(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
{
return aha152x_internal_queue(SCpnt, NULL, 0, done);
}
@@ -998,7 +1002,7 @@ static DEF_SCSI_QCMD(aha152x_queue)
/*
*
*/
-static void reset_done(Scsi_Cmnd *SCpnt)
+static void reset_done(struct scsi_cmnd *SCpnt)
{
if(SCSEM(SCpnt)) {
complete(SCSEM(SCpnt));
@@ -1011,10 +1015,10 @@ static void reset_done(Scsi_Cmnd *SCpnt)
* Abort a command
*
*/
-static int aha152x_abort(Scsi_Cmnd *SCpnt)
+static int aha152x_abort(struct scsi_cmnd *SCpnt)
{
struct Scsi_Host *shpnt = SCpnt->device->host;
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
unsigned long flags;
DO_LOCK(flags);
@@ -1052,7 +1056,7 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt)
* Reset a device
*
*/
-static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
+static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
{
struct Scsi_Host *shpnt = SCpnt->device->host;
DECLARE_COMPLETION(done);
@@ -1110,13 +1114,14 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
return ret;
}
-static void free_hard_reset_SCs(struct Scsi_Host *shpnt, Scsi_Cmnd **SCs)
+static void free_hard_reset_SCs(struct Scsi_Host *shpnt,
+ struct scsi_cmnd **SCs)
{
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
ptr=*SCs;
while(ptr) {
- Scsi_Cmnd *next;
+ struct scsi_cmnd *next;
if(SCDATA(ptr)) {
next = SCNEXT(ptr);
@@ -1171,7 +1176,7 @@ static int aha152x_bus_reset_host(struct Scsi_Host *shpnt)
* Reset the bus
*
*/
-static int aha152x_bus_reset(Scsi_Cmnd *SCpnt)
+static int aha152x_bus_reset(struct scsi_cmnd *SCpnt)
{
return aha152x_bus_reset_host(SCpnt->device->host);
}
@@ -1436,7 +1441,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
if(!(DONE_SC->SCp.phase & not_issued)) {
struct aha152x_scdata *sc;
- Scsi_Cmnd *ptr = DONE_SC;
+ struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
sc = SCDATA(ptr);
@@ -1451,7 +1456,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
}
if(DONE_SC && DONE_SC->scsi_done) {
- Scsi_Cmnd *ptr = DONE_SC;
+ struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
/* turn led off, when no commands are in the driver */
@@ -2247,13 +2252,13 @@ static void parerr_run(struct Scsi_Host *shpnt)
*/
static void rsti_run(struct Scsi_Host *shpnt)
{
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
shost_printk(KERN_NOTICE, shpnt, "scsi reset in\n");
ptr=DISCONNECTED_SC;
while(ptr) {
- Scsi_Cmnd *next = SCNEXT(ptr);
+ struct scsi_cmnd *next = SCNEXT(ptr);
if (!ptr->device->soft_reset) {
remove_SC(&DISCONNECTED_SC, ptr);
@@ -2438,7 +2443,7 @@ static void disp_enintr(struct Scsi_Host *shpnt)
/*
* Show the command data of a command
*/
-static void show_command(Scsi_Cmnd *ptr)
+static void show_command(struct scsi_cmnd *ptr)
{
scsi_print_command(ptr);
scmd_printk(KERN_DEBUG, ptr,
@@ -2462,7 +2467,7 @@ static void show_command(Scsi_Cmnd *ptr)
*/
static void show_queues(struct Scsi_Host *shpnt)
{
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
unsigned long flags;
DO_LOCK(flags);
@@ -2484,7 +2489,7 @@ static void show_queues(struct Scsi_Host *shpnt)
disp_enintr(shpnt);
}
-static void get_command(struct seq_file *m, Scsi_Cmnd * ptr)
+static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
{
int i;
@@ -2813,7 +2818,7 @@ static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length)
static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
{
int i;
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
unsigned long flags;
seq_puts(m, AHA152X_REVID "\n");
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index b48d5436f094..786bf7f32c64 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -207,11 +207,11 @@ static int aha1740_test_port(unsigned int base)
static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
{
struct Scsi_Host *host = (struct Scsi_Host *) dev_id;
- void (*my_done)(Scsi_Cmnd *);
+ void (*my_done)(struct scsi_cmnd *);
int errstatus, adapstat;
int number_serviced;
struct ecb *ecbptr;
- Scsi_Cmnd *SCtmp;
+ struct scsi_cmnd *SCtmp;
unsigned int base;
unsigned long flags;
int handled = 0;
@@ -311,7 +311,8 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static int aha1740_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+static int aha1740_queuecommand_lck(struct scsi_cmnd * SCpnt,
+ void (*done)(struct scsi_cmnd *))
{
unchar direction;
unchar *cmd = (unchar *) SCpnt->cmnd;
@@ -520,7 +521,7 @@ static int aha1740_biosparam(struct scsi_device *sdev,
return 0;
}
-static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
+static int aha1740_eh_abort_handler (struct scsi_cmnd *dummy)
{
/*
* From Alan Cox :
diff --git a/drivers/scsi/aha1740.h b/drivers/scsi/aha1740.h
index dfdaa4d3ea4e..6eeed6da0b54 100644
--- a/drivers/scsi/aha1740.h
+++ b/drivers/scsi/aha1740.h
@@ -135,8 +135,8 @@ struct ecb { /* Enhanced Control Block 6.1 */
/* Hardware defined portion ends here, rest is driver defined */
u8 sense[MAX_SENSE]; /* Sense area */
u8 status[MAX_STATUS]; /* Status area */
- Scsi_Cmnd *SCpnt; /* Link to the SCSI Command Block */
- void (*done) (Scsi_Cmnd *); /* Completion Function */
+ struct scsi_cmnd *SCpnt; /* Link to the SCSI Command Block */
+ void (*done) (struct scsi_cmnd *); /* Completion Function */
};
#define AHA1740CMD_NOP 0x00 /* No OP */
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 80e5b283fd81..1391e5f35918 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -1030,8 +1030,10 @@ static int __init aic94xx_init(void)
aic94xx_transport_template =
sas_domain_attach_transport(&aic94xx_transport_functions);
- if (!aic94xx_transport_template)
+ if (!aic94xx_transport_template) {
+ err = -ENOMEM;
goto out_destroy_caches;
+ }
err = pci_register_driver(&aic94xx_pci_driver);
if (err)
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 2e51ccc510e8..9c397a2794d6 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -49,7 +49,7 @@ struct device_attribute;
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
#define ARCMSR_MIN_OUTSTANDING_CMD 32
-#define ARCMSR_DRIVER_VERSION "v1.40.00.05-20180309"
+#define ARCMSR_DRIVER_VERSION "v1.40.00.09-20180709"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 732b5d9242f1..12316ef4c893 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1061,6 +1061,13 @@ static int arcmsr_resume(struct pci_dev *pdev)
pci_set_master(pdev);
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto controller_stop;
+ if (acb->adapter_type == ACB_ADAPTER_TYPE_E) {
+ writel(0, &acb->pmuE->host_int_status);
+ writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
+ acb->in_doorbell = 0;
+ acb->out_doorbell = 0;
+ acb->doneq_index = 0;
+ }
arcmsr_iop_init(acb);
arcmsr_init_get_devmap_timer(acb);
if (set_date_time)
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index b46997cf77e2..8996d2329e11 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1055,7 +1055,7 @@ static void tscam(struct Scsi_Host *host, bool wide_chip, u8 scam_on)
udelay(2); /* 2 deskew delay(45ns*2=90ns) */
val &= 0x007f; /* no bsy */
atp_writew_io(dev, 0, 0x1c, val);
- mdelay(128);
+ msleep(128);
val &= 0x00fb; /* after 1ms no msg */
atp_writew_io(dev, 0, 0x1c, val);
while ((atp_readb_io(dev, 0, 0x1c) & 0x04) != 0)
@@ -1286,9 +1286,9 @@ static void atp870_init(struct Scsi_Host *shpnt)
k = (atp_readb_base(atpdev, 0x3a) & 0xf3) | 0x10;
atp_writeb_base(atpdev, 0x3a, k);
atp_writeb_base(atpdev, 0x3a, k & 0xdf);
- mdelay(32);
+ msleep(32);
atp_writeb_base(atpdev, 0x3a, k);
- mdelay(32);
+ msleep(32);
atp_set_host_id(atpdev, 0, host_id);
tscam(shpnt, wide_chip, scam_on);
@@ -1370,9 +1370,9 @@ static void atp880_init(struct Scsi_Host *shpnt)
k = atp_readb_base(atpdev, 0x38) & 0x80;
atp_writeb_base(atpdev, 0x38, k);
atp_writeb_base(atpdev, 0x3b, 0x20);
- mdelay(32);
+ msleep(32);
atp_writeb_base(atpdev, 0x3b, 0);
- mdelay(32);
+ msleep(32);
atp_readb_io(atpdev, 0, 0x1b);
atp_readb_io(atpdev, 0, 0x17);
@@ -1454,10 +1454,10 @@ static void atp885_init(struct Scsi_Host *shpnt)
atp_writeb_base(atpdev, 0x28, k);
atp_writeb_pci(atpdev, 0, 1, 0x80);
atp_writeb_pci(atpdev, 1, 1, 0x80);
- mdelay(100);
+ msleep(100);
atp_writeb_pci(atpdev, 0, 1, 0);
atp_writeb_pci(atpdev, 1, 1, 0);
- mdelay(1000);
+ msleep(1000);
atp_readb_io(atpdev, 0, 0x1b);
atp_readb_io(atpdev, 0, 0x17);
atp_readb_io(atpdev, 1, 0x1b);
@@ -1473,7 +1473,7 @@ static void atp885_init(struct Scsi_Host *shpnt)
k = (k & 0x07) | 0x40;
atp_set_host_id(atpdev, 1, k);
- mdelay(600); /* this delay used to be called tscam_885() */
+ msleep(600); /* this delay used to be called tscam_885() */
dev_info(&pdev->dev, "Scanning Channel A SCSI Device ...\n");
atp_is(atpdev, 0, true, atp_readb_io(atpdev, 0, 0x1b) >> 7);
atp_writeb_io(atpdev, 0, 0x16, 0x80);
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 2eb66df3e3d6..c10aac4dbc5e 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1545,7 +1545,7 @@ int beiscsi_set_host_data(struct beiscsi_hba *phba)
snprintf((char *)ioctl->param.req.param_data,
sizeof(ioctl->param.req.param_data),
"Linux iSCSI v%s", BUILD_STR);
- ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len, 4);
+ ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len + 1, 4);
if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION)
ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION;
ret = be_mbox_notify(ctrl);
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index a398c54139aa..c8f0a2144b44 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,11 +1,14 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
- * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
+ * Host Bus Adapters. Refer to the README file included with this package
+ * for driver version and adapter compatibility.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
+ * Copyright (c) 2018 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as published
+ * by the Free Software Foundation.
*
* Contact Information:
* linux-drivers@broadcom.com
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 818d185d63f0..3660059784f7 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,11 +1,22 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
- * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
+ * Host Bus Adapters. Refer to the README file included with this package
+ * for driver version and adapter compatibility.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
+ * Copyright (c) 2018 Broadcom. All Rights Reserved.
+ * The term “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful. ALL EXPRESS
+ * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+ * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH
+ * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package.
*
* Contact Information:
* linux-drivers@broadcom.com
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 66ca967f2850..8fdc07b6c686 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,11 +1,22 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
- * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
+ * Host Bus Adapters. Refer to the README file included with this package
+ * for driver version and adapter compatibility.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
+ * Copyright (c) 2018 Broadcom. All Rights Reserved.
+ * The term “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful. ALL EXPRESS
+ * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+ * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH
+ * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package.
*
* Contact Information:
* linux-drivers@broadcom.com
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index c05d6e91e4bd..c4a33317d344 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -70,21 +70,18 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
host_status = DID_ERROR;
}
}
- cmnd->result = ScsiResult(host_status, scsi_status);
+ cmnd->result = host_status << 16 | scsi_status;
break;
case BFI_IOIM_STS_TIMEDOUT:
- host_status = DID_TIME_OUT;
- cmnd->result = ScsiResult(host_status, 0);
+ cmnd->result = DID_TIME_OUT << 16;
break;
case BFI_IOIM_STS_PATHTOV:
- host_status = DID_TRANSPORT_DISRUPTED;
- cmnd->result = ScsiResult(host_status, 0);
+ cmnd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
default:
- host_status = DID_ERROR;
- cmnd->result = ScsiResult(host_status, 0);
+ cmnd->result = DID_ERROR << 16;
}
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
@@ -117,7 +114,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
struct bfad_itnim_data_s *itnim_data;
struct bfad_itnim_s *itnim;
- cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD);
+ cmnd->result = DID_OK << 16 | SCSI_STATUS_GOOD;
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
if (cmnd->device->host != NULL)
@@ -144,7 +141,7 @@ bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio)
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
struct bfad_s *bfad = drv;
- cmnd->result = ScsiResult(DID_ERROR, 0);
+ cmnd->result = DID_ERROR << 16;
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
if (cmnd->device->host != NULL)
@@ -1253,14 +1250,14 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
printk(KERN_WARNING
"bfad%d, queuecommand %p %x failed, BFA stopped\n",
bfad->inst_no, cmnd, cmnd->cmnd[0]);
- cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
+ cmnd->result = DID_NO_CONNECT << 16;
goto out_fail_cmd;
}
itnim = itnim_data->itnim;
if (!itnim) {
- cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
+ cmnd->result = DID_IMM_RETRY << 16;
goto out_fail_cmd;
}
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index af66275570c3..e61ed8dad0b4 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -44,7 +44,6 @@ u32 bfad_im_supported_speeds(struct bfa_s *bfa);
#define MAX_FCP_LUN 16384
#define BFAD_TARGET_RESET_TMO 60
#define BFAD_LUN_RESET_TMO 60
-#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define BFA_QUEUE_FULL_RAMP_UP_TIME 120
/*
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 8f03a869ac98..e9e669a6c2bc 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2727,6 +2727,8 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
BNX2X_DOORBELL_PCI_BAR);
reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ if (!ep->qp.ctx_base)
+ return -ENOMEM;
goto arm_cq;
}
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index c535c52e72e5..1c5051b1c125 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -199,7 +199,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
buflength, &sshdr, timeout * HZ,
MAX_RETRIES, NULL);
- if (driver_byte(result) & DRIVER_SENSE) {
+ if (driver_byte(result) == DRIVER_SENSE) {
if (debug)
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
errno = ch_find_errno(&sshdr);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index a10cf25ee7f9..23d07e9f87d0 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -761,27 +761,116 @@ out:
static int
csio_hw_get_flash_params(struct csio_hw *hw)
{
+ /* Table for non-Numonix supported flash parts. Numonix parts are left
+ * to the preexisting code. All flash parts have 64KB sectors.
+ */
+ static struct flash_desc {
+ u32 vendor_and_model_id;
+ u32 size_mb;
+ } supported_flash[] = {
+ { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
+ };
+
+ u32 part, manufacturer;
+ u32 density, size = 0;
+ u32 flashid = 0;
int ret;
- uint32_t info = 0;
ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
if (!ret)
- ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
+ ret = csio_hw_sf1_read(hw, 3, 0, 1, &flashid);
csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
- if (ret != 0)
+ if (ret)
return ret;
- if ((info & 0xff) != 0x20) /* not a Numonix flash */
- return -EINVAL;
- info >>= 16; /* log2 of size */
- if (info >= 0x14 && info < 0x18)
- hw->params.sf_nsec = 1 << (info - 16);
- else if (info == 0x18)
- hw->params.sf_nsec = 64;
- else
- return -EINVAL;
- hw->params.sf_size = 1 << info;
+ /* Check to see if it's one of our non-standard supported Flash parts.
+ */
+ for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
+ if (supported_flash[part].vendor_and_model_id == flashid) {
+ hw->params.sf_size = supported_flash[part].size_mb;
+ hw->params.sf_nsec =
+ hw->params.sf_size / SF_SEC_SIZE;
+ goto found;
+ }
+
+ /* Decode Flash part size. The code below looks repetative with
+ * common encodings, but that's not guaranteed in the JEDEC
+ * specification for the Read JADEC ID command. The only thing that
+ * we're guaranteed by the JADEC specification is where the
+ * Manufacturer ID is in the returned result. After that each
+ * Manufacturer ~could~ encode things completely differently.
+ * Note, all Flash parts must have 64KB sectors.
+ */
+ manufacturer = flashid & 0xff;
+ switch (manufacturer) {
+ case 0x20: { /* Micron/Numonix */
+ /* This Density -> Size decoding table is taken from Micron
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x14 ... 0x19: /* 1MB - 32MB */
+ size = 1 << density;
+ break;
+ case 0x20: /* 64MB */
+ size = 1 << 26;
+ break;
+ case 0x21: /* 128MB */
+ size = 1 << 27;
+ break;
+ case 0x22: /* 256MB */
+ size = 1 << 28;
+ }
+ break;
+ }
+ case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
+ /* This Density -> Size decoding table is taken from ISSI
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x16: /* 32 MB */
+ size = 1 << 25;
+ break;
+ case 0x17: /* 64MB */
+ size = 1 << 26;
+ }
+ break;
+ }
+ case 0xc2: /* Macronix */
+ case 0xef: /* Winbond */ {
+ /* This Density -> Size decoding table is taken from
+ * Macronix and Winbond Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17: /* 8MB */
+ case 0x18: /* 16MB */
+ size = 1 << density;
+ }
+ }
+ }
+
+ /* If we didn't recognize the FLASH part, that's no real issue: the
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_
+ * use a FLASH part which is at least 4MB in size and has 64KB
+ * sectors. The unrecognized FLASH part is likely to be much larger
+ * than 4MB, but that's all we really need.
+ */
+ if (size == 0) {
+ csio_warn(hw, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+ flashid);
+ size = 1 << 22;
+ }
+
+ /* Store decoded Flash size */
+ hw->params.sf_size = size;
+ hw->params.sf_nsec = size / SF_SEC_SIZE;
+found:
+ if (hw->params.sf_size < FLASH_MIN_SIZE)
+ csio_warn(hw, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
+ flashid, hw->params.sf_size, FLASH_MIN_SIZE);
return 0;
}
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index faa357b62c61..5022e82ccc4f 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -39,6 +39,7 @@
#include <asm/page.h>
#include <linux/cache.h>
+#include "t4_values.h"
#include "csio_hw.h"
#include "csio_wr.h"
#include "csio_mb.h"
@@ -1309,8 +1310,11 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
struct csio_sge *sge = &wrm->sge;
uint32_t clsz = L1_CACHE_BYTES;
uint32_t s_hps = PAGE_SHIFT - 10;
- uint32_t ingpad = 0;
uint32_t stat_len = clsz > 64 ? 128 : 64;
+ u32 fl_align = clsz < 32 ? 32 : clsz;
+ u32 pack_align;
+ u32 ingpad, ingpack;
+ int pcie_cap;
csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
@@ -1318,14 +1322,82 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
SGE_HOST_PAGE_SIZE_A);
- sge->csio_fl_align = clsz < 32 ? 32 : clsz;
- ingpad = ilog2(sge->csio_fl_align) - 5;
+ /* T5 introduced the separation of the Free List Padding and
+ * Packing Boundaries. Thus, we can select a smaller Padding
+ * Boundary to avoid uselessly chewing up PCIe Link and Memory
+ * Bandwidth, and use a Packing Boundary which is large enough
+ * to avoid false sharing between CPUs, etc.
+ *
+ * For the PCI Link, the smaller the Padding Boundary the
+ * better. For the Memory Controller, a smaller Padding
+ * Boundary is better until we cross under the Memory Line
+ * Size (the minimum unit of transfer to/from Memory). If we
+ * have a Padding Boundary which is smaller than the Memory
+ * Line Size, that'll involve a Read-Modify-Write cycle on the
+ * Memory Controller which is never good.
+ */
+
+ /* We want the Packing Boundary to be based on the Cache Line
+ * Size in order to help avoid False Sharing performance
+ * issues between CPUs, etc. We also want the Packing
+ * Boundary to incorporate the PCI-E Maximum Payload Size. We
+ * get best performance when the Packing Boundary is a
+ * multiple of the Maximum Payload Size.
+ */
+ pack_align = fl_align;
+ pcie_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ u32 mps, mps_log;
+ u16 devctl;
+
+ /* The PCIe Device Control Maximum Payload Size field
+ * [bits 7:5] encodes sizes as powers of 2 starting at
+ * 128 bytes.
+ */
+ pci_read_config_word(hw->pdev,
+ pcie_cap + PCI_EXP_DEVCTL,
+ &devctl);
+ mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
+ mps = 1 << mps_log;
+ if (mps > pack_align)
+ pack_align = mps;
+ }
+
+ /* T5/T6 have a special interpretation of the "0"
+ * value for the Packing Boundary. This corresponds to 16
+ * bytes instead of the expected 32 bytes.
+ */
+ if (pack_align <= 16) {
+ ingpack = INGPACKBOUNDARY_16B_X;
+ fl_align = 16;
+ } else if (pack_align == 32) {
+ ingpack = INGPACKBOUNDARY_64B_X;
+ fl_align = 64;
+ } else {
+ u32 pack_align_log = fls(pack_align) - 1;
+
+ ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
+ fl_align = pack_align;
+ }
+
+ /* Use the smallest Ingress Padding which isn't smaller than
+ * the Memory Controller Read/Write Size. We'll take that as
+ * being 8 bytes since we don't know of any system with a
+ * wider Memory Controller Bus Width.
+ */
+ if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
+ ingpad = INGPADBOUNDARY_32B_X;
+ else
+ ingpad = T6_INGPADBOUNDARY_8B_X;
csio_set_reg_field(hw, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
EGRSTATUSPAGESIZE_F,
INGPADBOUNDARY_V(ingpad) |
EGRSTATUSPAGESIZE_V(stat_len != 64));
+ csio_set_reg_field(hw, SGE_CONTROL2_A,
+ INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
+ INGPACKBOUNDARY_V(ingpack));
/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
@@ -1337,14 +1409,16 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
csio_wr_reg32(hw,
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
- sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ fl_align - 1) & ~(fl_align - 1),
SGE_FL_BUFFER_SIZE2_A);
csio_wr_reg32(hw,
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
- sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ fl_align - 1) & ~(fl_align - 1),
SGE_FL_BUFFER_SIZE3_A);
}
+ sge->csio_fl_align = fl_align;
+
csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
/* default value of rx_dma_offset of the NIC driver */
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 2a3977823812..a39be94d110c 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -107,12 +107,12 @@ cxlflash_assign_ops(struct dev_dependent_vals *ddv)
{
const struct cxlflash_backend_ops *ops = NULL;
-#ifdef CONFIG_OCXL
+#ifdef CONFIG_OCXL_BASE
if (ddv->flags & CXLFLASH_OCXL_DEV)
ops = &cxlflash_ocxl_ops;
#endif
-#ifdef CONFIG_CXL
+#ifdef CONFIG_CXL_BASE
if (!(ddv->flags & CXLFLASH_OCXL_DEV))
ops = &cxlflash_cxl_ops;
#endif
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 0a95b5f25380..37b8dc60f5f6 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -88,10 +88,8 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name,
const struct file_operations *fops,
void *priv, int flags)
{
- struct qstr this;
- struct path path;
struct file *file;
- struct inode *inode = NULL;
+ struct inode *inode;
int rc;
if (fops->owner && !try_module_get(fops->owner)) {
@@ -116,33 +114,18 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name,
goto err3;
}
- this.name = name;
- this.len = strlen(name);
- this.hash = 0;
- path.dentry = d_alloc_pseudo(ocxlflash_vfs_mount->mnt_sb, &this);
- if (!path.dentry) {
- dev_err(dev, "%s: d_alloc_pseudo failed\n", __func__);
- rc = -ENOMEM;
- goto err4;
- }
-
- path.mnt = mntget(ocxlflash_vfs_mount);
- d_instantiate(path.dentry, inode);
-
- file = alloc_file(&path, OPEN_FMODE(flags), fops);
+ file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
+ flags & (O_ACCMODE | O_NONBLOCK), fops);
if (IS_ERR(file)) {
rc = PTR_ERR(file);
dev_err(dev, "%s: alloc_file failed rc=%d\n",
__func__, rc);
- goto err5;
+ goto err4;
}
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = priv;
out:
return file;
-err5:
- path_put(&path);
err4:
iput(inode);
err3:
@@ -1158,7 +1141,7 @@ static int afu_release(struct inode *inode, struct file *file)
*
* Return: 0 on success, -errno on failure
*/
-static int ocxlflash_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ocxlflash_context *ctx = vma->vm_file->private_data;
@@ -1181,8 +1164,7 @@ static int ocxlflash_mmap_fault(struct vm_fault *vmf)
mmio_area = ctx->psn_phys;
mmio_area += offset;
- vm_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
- return VM_FAULT_NOPAGE;
+ return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
}
static const struct vm_operations_struct ocxlflash_vmops = {
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index e489d89cbb45..acac6152f50b 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -339,7 +339,6 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
struct scsi_sense_hdr sshdr;
u8 *cmd_buf = NULL;
u8 *scsi_cmd = NULL;
- u8 *sense_buf = NULL;
int rc = 0;
int result = 0;
int retry_cnt = 0;
@@ -348,8 +347,7 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
retry:
cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
- sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
- if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
+ if (unlikely(!cmd_buf || !scsi_cmd)) {
rc = -ENOMEM;
goto out;
}
@@ -364,7 +362,7 @@ retry:
/* Drop the ioctl read semahpore across lengthy call */
up_read(&cfg->ioctl_rwsem);
result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
- CMD_BUFSIZE, sense_buf, &sshdr, to, CMD_RETRIES,
+ CMD_BUFSIZE, NULL, &sshdr, to, CMD_RETRIES,
0, 0, NULL);
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
@@ -395,7 +393,6 @@ retry:
if (retry_cnt++ < 1) {
kfree(cmd_buf);
kfree(scsi_cmd);
- kfree(sense_buf);
goto retry;
}
}
@@ -426,7 +423,6 @@ retry:
out:
kfree(cmd_buf);
kfree(scsi_cmd);
- kfree(sense_buf);
dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
__func__, gli->max_lba, gli->blk_len, rc);
@@ -1108,7 +1104,7 @@ out:
*
* Return: 0 on success, VM_FAULT_SIGBUS on failure
*/
-static int cxlflash_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
@@ -1119,7 +1115,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
struct ctx_info *ctxi = NULL;
struct page *err_page = NULL;
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- int rc = 0;
+ vm_fault_t rc = 0;
int ctxid;
ctxid = cfg->ops->process_element(ctx);
@@ -1159,7 +1155,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
out:
if (likely(ctxi))
put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
return rc;
err:
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 66e445a17d6c..2c904bf16b65 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -426,7 +426,6 @@ static int write_same16(struct scsi_device *sdev,
{
u8 *cmd_buf = NULL;
u8 *scsi_cmd = NULL;
- u8 *sense_buf = NULL;
int rc = 0;
int result = 0;
u64 offset = lba;
@@ -440,8 +439,7 @@ static int write_same16(struct scsi_device *sdev,
cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
- sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
- if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
+ if (unlikely(!cmd_buf || !scsi_cmd)) {
rc = -ENOMEM;
goto out;
}
@@ -457,7 +455,7 @@ static int write_same16(struct scsi_device *sdev,
/* Drop the ioctl read semahpore across lengthy call */
up_read(&cfg->ioctl_rwsem);
result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
- CMD_BUFSIZE, sense_buf, NULL, to,
+ CMD_BUFSIZE, NULL, NULL, to,
CMD_RETRIES, 0, 0, NULL);
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
@@ -482,7 +480,6 @@ static int write_same16(struct scsi_device *sdev,
out:
kfree(cmd_buf);
kfree(scsi_cmd);
- kfree(sense_buf);
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 60ef8df42b95..1ed2cd82129d 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3473,9 +3473,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
/*if( srb->cmd->cmnd[0] == INQUIRY && */
/* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
- if ((cmd->result == (DID_OK << 16)
- || status_byte(cmd->result) &
- CHECK_CONDITION)) {
+ if ((cmd->result == (DID_OK << 16) ||
+ status_byte(cmd->result) == CHECK_CONDITION)) {
if (!dcb->init_tcq_flag) {
add_dev(acb, dcb, ptr);
dcb->init_tcq_flag = 1;
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index ea23c8dffc25..54da3166da8d 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -754,9 +754,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
case ELS_LOGO:
if (fip->mode == FIP_MODE_VN2VN) {
if (fip->state != FIP_ST_VNMP_UP)
- return -EINVAL;
+ goto drop;
if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
- return -EINVAL;
+ goto drop;
} else {
if (fip->state != FIP_ST_ENABLED)
return 0;
@@ -799,9 +799,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
fip->send(fip, skb);
return -EINPROGRESS;
drop:
- kfree_skb(skb);
LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
op, ntoh24(fh->fh_d_id));
+ kfree_skb(skb);
return -EINVAL;
}
EXPORT_SYMBOL(fcoe_ctlr_els_send);
@@ -2175,15 +2175,13 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
- rcu_read_lock();
+ mutex_lock(&lport->disc.disc_mutex);
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
if (kref_get_unless_zero(&rdata->kref)) {
fc_rport_logoff(rdata);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
- rcu_read_unlock();
- mutex_lock(&lport->disc.disc_mutex);
lport->disc.disc_callback = NULL;
mutex_unlock(&lport->disc.disc_mutex);
}
@@ -2712,7 +2710,7 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
unsigned long deadline;
next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10);
- rcu_read_lock();
+ mutex_lock(&lport->disc.disc_mutex);
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
if (!kref_get_unless_zero(&rdata->kref))
continue;
@@ -2733,7 +2731,7 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
next_time = deadline;
kref_put(&rdata->kref, fc_rport_destroy);
}
- rcu_read_unlock();
+ mutex_unlock(&lport->disc.disc_mutex);
return next_time;
}
@@ -3080,8 +3078,6 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
mutex_lock(&disc->disc_mutex);
callback = disc->pending ? disc->disc_callback : NULL;
disc->pending = 0;
- mutex_unlock(&disc->disc_mutex);
- rcu_read_lock();
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
if (!kref_get_unless_zero(&rdata->kref))
continue;
@@ -3090,7 +3086,7 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
fc_rport_login(rdata);
kref_put(&rdata->kref, fc_rport_destroy);
}
- rcu_read_unlock();
+ mutex_unlock(&disc->disc_mutex);
if (callback)
callback(lport, DISC_EV_SUCCESS);
}
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 85604795d8ee..16709735b546 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -146,14 +146,14 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id);
static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
int gdth_from_wait, int* pIndex);
static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
- Scsi_Cmnd *scp);
+ struct scsi_cmnd *scp);
static int gdth_async_event(gdth_ha_str *ha);
static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
-static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority);
+static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority);
static void gdth_next(gdth_ha_str *ha);
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b);
-static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
+static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b);
+static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
u16 idx, gdth_evt_data *evt);
static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
@@ -161,10 +161,11 @@ static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
gdth_evt_str *estr);
static void gdth_clear_events(void);
-static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
+static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
char *buffer, u16 count);
-static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive);
+static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
+static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
+ u16 hdrive);
static void gdth_enable_int(gdth_ha_str *ha);
static int gdth_test_busy(gdth_ha_str *ha);
@@ -446,7 +447,7 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
int timeout, u32 *info)
{
gdth_ha_str *ha = shost_priv(sdev->host);
- Scsi_Cmnd *scp;
+ struct scsi_cmnd *scp;
struct gdth_cmndinfo cmndinfo;
DECLARE_COMPLETION_ONSTACK(wait);
int rval;
@@ -1982,11 +1983,11 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
/* command queueing/sending functions */
-static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
+static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority)
{
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- register Scsi_Cmnd *pscp;
- register Scsi_Cmnd *nscp;
+ register struct scsi_cmnd *pscp;
+ register struct scsi_cmnd *nscp;
unsigned long flags;
TRACE(("gdth_putq() priority %d\n",priority));
@@ -2000,11 +2001,11 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
scp->SCp.ptr = NULL;
} else { /* queue not empty */
pscp = ha->req_first;
- nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
/* priority: 0-highest,..,0xff-lowest */
while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) {
pscp = nscp;
- nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
}
pscp->SCp.ptr = (char *)scp;
scp->SCp.ptr = (char *)nscp;
@@ -2013,7 +2014,7 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
#ifdef GDTH_STATISTICS
flags = 0;
- for (nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
+ for (nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
++flags;
if (max_rq < flags) {
max_rq = flags;
@@ -2024,8 +2025,8 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
static void gdth_next(gdth_ha_str *ha)
{
- register Scsi_Cmnd *pscp;
- register Scsi_Cmnd *nscp;
+ register struct scsi_cmnd *pscp;
+ register struct scsi_cmnd *nscp;
u8 b, t, l, firsttime;
u8 this_cmd, next_cmd;
unsigned long flags = 0;
@@ -2040,10 +2041,10 @@ static void gdth_next(gdth_ha_str *ha)
next_cmd = gdth_polling ? FALSE:TRUE;
cmd_index = 0;
- for (nscp = pscp = ha->req_first; nscp; nscp = (Scsi_Cmnd *)nscp->SCp.ptr) {
+ for (nscp = pscp = ha->req_first; nscp; nscp = (struct scsi_cmnd *)nscp->SCp.ptr) {
struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp);
- if (nscp != pscp && nscp != (Scsi_Cmnd *)pscp->SCp.ptr)
- pscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ if (nscp != pscp && nscp != (struct scsi_cmnd *)pscp->SCp.ptr)
+ pscp = (struct scsi_cmnd *)pscp->SCp.ptr;
if (!nscp_cmndinfo->internal_command) {
b = nscp->device->channel;
t = nscp->device->id;
@@ -2250,7 +2251,7 @@ static void gdth_next(gdth_ha_str *ha)
if (!this_cmd)
break;
if (nscp == ha->req_first)
- ha->req_first = pscp = (Scsi_Cmnd *)nscp->SCp.ptr;
+ ha->req_first = pscp = (struct scsi_cmnd *)nscp->SCp.ptr;
else
pscp->SCp.ptr = nscp->SCp.ptr;
if (!next_cmd)
@@ -2275,7 +2276,7 @@ static void gdth_next(gdth_ha_str *ha)
* gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's
* buffers, kmap_atomic() as needed.
*/
-static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
+static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
char *buffer, u16 count)
{
u16 cpcount,i, max_sg = scsi_sg_count(scp);
@@ -2317,7 +2318,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
}
}
-static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
+static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
{
u8 t;
gdth_inq_data inq;
@@ -2419,7 +2420,8 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
return 0;
}
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
+static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
+ u16 hdrive)
{
register gdth_cmd_str *cmdp;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
@@ -2594,7 +2596,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
return cmd_index;
}
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
+static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
{
register gdth_cmd_str *cmdp;
u16 i;
@@ -2767,7 +2769,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
return cmd_index;
}
-static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
+static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
{
register gdth_cmd_str *cmdp;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
@@ -2958,7 +2960,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
gdt6_dpram_str __iomem *dp6_ptr;
gdt2_dpram_str __iomem *dp2_ptr;
- Scsi_Cmnd *scp;
+ struct scsi_cmnd *scp;
int rval, i;
u8 IStatus;
u16 Service;
@@ -3217,7 +3219,7 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id)
}
static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
- Scsi_Cmnd *scp)
+ struct scsi_cmnd *scp)
{
gdth_msg_str *msg;
gdth_cmd_str *cmdp;
@@ -3708,7 +3710,7 @@ static u8 gdth_timer_running;
static void gdth_timeout(struct timer_list *unused)
{
u32 i;
- Scsi_Cmnd *nscp;
+ struct scsi_cmnd *nscp;
gdth_ha_str *ha;
unsigned long flags;
@@ -3724,7 +3726,8 @@ static void gdth_timeout(struct timer_list *unused)
if (ha->cmd_tab[i].cmnd != UNUSED_CMND)
++act_stats;
- for (act_rq=0,nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
+ for (act_rq=0,
+ nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
++act_rq;
TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n",
@@ -3909,12 +3912,12 @@ static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
}
-static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
+static int gdth_eh_bus_reset(struct scsi_cmnd *scp)
{
gdth_ha_str *ha = shost_priv(scp->device->host);
int i;
unsigned long flags;
- Scsi_Cmnd *cmnd;
+ struct scsi_cmnd *cmnd;
u8 b;
TRACE2(("gdth_eh_bus_reset()\n"));
@@ -4465,7 +4468,7 @@ free_fail:
static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
gdth_ha_str *ha;
- Scsi_Cmnd *scp;
+ struct scsi_cmnd *scp;
unsigned long flags;
char cmnd[MAX_COMMAND_SIZE];
void __user *argp = (void __user *)arg;
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index e6e5ccb1e0f3..ee6ffcf388e8 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -162,9 +162,9 @@
#define BIGSECS 63 /* mapping 255*63 */
/* special command ptr. */
-#define UNUSED_CMND ((Scsi_Cmnd *)-1)
-#define INTERNAL_CMND ((Scsi_Cmnd *)-2)
-#define SCREEN_CMND ((Scsi_Cmnd *)-3)
+#define UNUSED_CMND ((struct scsi_cmnd *)-1)
+#define INTERNAL_CMND ((struct scsi_cmnd *)-2)
+#define SCREEN_CMND ((struct scsi_cmnd *)-3)
#define SPECIAL_SCP(p) (p==UNUSED_CMND || p==INTERNAL_CMND || p==SCREEN_CMND)
/* controller services */
@@ -867,7 +867,7 @@ typedef struct {
u16 service; /* service/firmware ver./.. */
u32 info;
u32 info2; /* additional info */
- Scsi_Cmnd *req_first; /* top of request queue */
+ struct scsi_cmnd *req_first; /* top of request queue */
struct {
u8 present; /* Flag: host drive present? */
u8 is_logdrv; /* Flag: log. drive (master)? */
@@ -896,7 +896,7 @@ typedef struct {
u32 id_list[MAXID]; /* IDs of the phys. devices */
} raw[MAXBUS]; /* SCSI channels */
struct {
- Scsi_Cmnd *cmnd; /* pending request */
+ struct scsi_cmnd *cmnd; /* pending request */
u16 service; /* service */
} cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */
struct gdth_cmndinfo { /* per-command private info */
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 20add49cdd32..3a9751a80225 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -626,7 +626,7 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
{
unsigned long flags;
int i;
- Scsi_Cmnd *scp;
+ struct scsi_cmnd *scp;
struct gdth_cmndinfo *cmndinfo;
u8 b, t;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 7052a5d45f7f..6c7d2e201abe 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
+#include <linux/lcm.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -199,17 +200,17 @@ struct hisi_sas_slot {
int dlvry_queue_slot;
int cmplt_queue;
int cmplt_queue_slot;
- int idx;
int abort;
int ready;
- void *buf;
- dma_addr_t buf_dma;
void *cmd_hdr;
dma_addr_t cmd_hdr_dma;
- struct work_struct abort_slot;
struct timer_list internal_abort_timer;
bool is_internal;
struct hisi_sas_tmf_task *tmf;
+ /* Do not reorder/change members after here */
+ void *buf;
+ dma_addr_t buf_dma;
+ int idx;
};
struct hisi_sas_hw {
@@ -277,6 +278,7 @@ struct hisi_hba {
int n_phy;
spinlock_t lock;
+ struct semaphore sem;
struct timer_list timer;
struct workqueue_struct *wq;
@@ -298,7 +300,6 @@ struct hisi_hba {
int queue_count;
- struct dma_pool *buffer_pool;
struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES];
struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES];
dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES];
@@ -319,6 +320,7 @@ struct hisi_hba {
const struct hisi_sas_hw *hw; /* Low level hw interface */
unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
struct work_struct rst_work;
+ u32 phy_state;
};
/* Generic HW DMA host memory structures */
@@ -479,4 +481,6 @@ extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
enum hisi_sas_phy_event event);
extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
+extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba);
+extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 6f562974f8f6..a4e2e6aa9a6b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -242,20 +242,16 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
task->data_dir);
}
- if (slot->buf)
- dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
spin_lock_irqsave(&dq->lock, flags);
list_del_init(&slot->entry);
spin_unlock_irqrestore(&dq->lock, flags);
- slot->buf = NULL;
- slot->task = NULL;
- slot->port = NULL;
+
+ memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
+
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot->idx);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
-
- /* slot memory is fully zeroed when it is reused */
}
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
@@ -285,40 +281,6 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
device_id, abort_flag, tag_to_abort);
}
-/*
- * This function will issue an abort TMF regardless of whether the
- * task is in the sdev or not. Then it will do the task complete
- * cleanup and callbacks.
- */
-static void hisi_sas_slot_abort(struct work_struct *work)
-{
- struct hisi_sas_slot *abort_slot =
- container_of(work, struct hisi_sas_slot, abort_slot);
- struct sas_task *task = abort_slot->task;
- struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
- struct scsi_cmnd *cmnd = task->uldd_task;
- struct hisi_sas_tmf_task tmf_task;
- struct scsi_lun lun;
- struct device *dev = hisi_hba->dev;
- int tag = abort_slot->idx;
-
- if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
- dev_err(dev, "cannot abort slot for non-ssp task\n");
- goto out;
- }
-
- int_to_scsilun(cmnd->device->lun, &lun);
- tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
-
- hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
-out:
- /* Do cleanup for this task */
- hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
- if (task->task_done)
- task->task_done(task);
-}
-
static int hisi_sas_task_prep(struct sas_task *task,
struct hisi_sas_dq **dq_pointer,
bool is_tmf, struct hisi_sas_tmf_task *tmf,
@@ -334,8 +296,8 @@ static int hisi_sas_task_prep(struct sas_task *task,
struct device *dev = hisi_hba->dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
- unsigned long flags, flags_dq;
struct hisi_sas_dq *dq;
+ unsigned long flags;
int wr_q_index;
if (!sas_port) {
@@ -430,30 +392,22 @@ static int hisi_sas_task_prep(struct sas_task *task,
goto err_out_dma_unmap;
slot = &hisi_hba->slot_info[slot_idx];
- memset(slot, 0, sizeof(struct hisi_sas_slot));
-
- slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
- GFP_ATOMIC, &slot->buf_dma);
- if (!slot->buf) {
- rc = -ENOMEM;
- goto err_out_tag;
- }
- spin_lock_irqsave(&dq->lock, flags_dq);
+ spin_lock_irqsave(&dq->lock, flags);
wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (wr_q_index < 0) {
- spin_unlock_irqrestore(&dq->lock, flags_dq);
+ spin_unlock_irqrestore(&dq->lock, flags);
rc = -EAGAIN;
- goto err_out_buf;
+ goto err_out_tag;
}
list_add_tail(&slot->delivery, &dq->list);
- spin_unlock_irqrestore(&dq->lock, flags_dq);
+ list_add_tail(&slot->entry, &sas_dev->list);
+ spin_unlock_irqrestore(&dq->lock, flags);
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
- slot->idx = slot_idx;
slot->n_elem = n_elem;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
@@ -464,7 +418,6 @@ static int hisi_sas_task_prep(struct sas_task *task,
slot->tmf = tmf;
slot->is_internal = is_tmf;
task->lldd_task = slot;
- INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
@@ -488,21 +441,15 @@ static int hisi_sas_task_prep(struct sas_task *task,
break;
}
- spin_lock_irqsave(&dq->lock, flags);
- list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock_irqrestore(&dq->lock, flags);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
++(*pass);
- slot->ready = 1;
+ WRITE_ONCE(slot->ready, 1);
return 0;
-err_out_buf:
- dma_pool_free(hisi_hba->buffer_pool, slot->buf,
- slot->buf_dma);
err_out_tag:
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
@@ -536,8 +483,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
struct device *dev = hisi_hba->dev;
struct hisi_sas_dq *dq = NULL;
- if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
- return -EINVAL;
+ if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
+ if (in_softirq())
+ return -EINVAL;
+
+ down(&hisi_hba->sem);
+ up(&hisi_hba->sem);
+ }
/* protect task_prep and start_delivery sequence */
rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
@@ -819,6 +771,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
for (i = 0; i < HISI_PHYES_NUM; i++)
INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
+
+ spin_lock_init(&phy->lock);
}
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
@@ -862,7 +816,6 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task
hisi_sas_slot_task_free(hisi_hba, task, slot);
}
-/* hisi_hba.lock should be locked */
static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
struct domain_device *device)
{
@@ -914,7 +867,9 @@ static void hisi_sas_dev_gone(struct domain_device *device)
hisi_sas_dereg_device(hisi_hba, device);
+ down(&hisi_hba->sem);
hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
+ up(&hisi_hba->sem);
device->lldd_dev = NULL;
}
@@ -1351,21 +1306,12 @@ static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
}
}
-static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
+void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
{
- struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
- u32 old_state, state;
- int rc;
-
- if (!hisi_hba->hw->soft_reset)
- return -1;
- if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
- return -1;
-
- dev_info(dev, "controller resetting...\n");
- old_state = hisi_hba->hw->get_phys_state(hisi_hba);
+ down(&hisi_hba->sem);
+ hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
@@ -1374,34 +1320,61 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
del_timer_sync(&hisi_hba->timer);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
- rc = hisi_hba->hw->soft_reset(hisi_hba);
- if (rc) {
- dev_warn(dev, "controller reset failed (%d)\n", rc);
- clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
- scsi_unblock_requests(shost);
- goto out;
- }
+}
+EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
- clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
+{
+ struct Scsi_Host *shost = hisi_hba->shost;
+ u32 state;
/* Init and wait for PHYs to come up and all libsas event finished. */
hisi_hba->hw->phys_init(hisi_hba);
msleep(1000);
hisi_sas_refresh_port_id(hisi_hba);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ up(&hisi_hba->sem);
if (hisi_hba->reject_stp_links_msk)
hisi_sas_terminate_stp_reject(hisi_hba);
hisi_sas_reset_init_all_devices(hisi_hba);
scsi_unblock_requests(shost);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
state = hisi_hba->hw->get_phys_state(hisi_hba);
- hisi_sas_rescan_topology(hisi_hba, old_state, state);
- dev_info(dev, "controller reset complete\n");
+ hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
-out:
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ int rc;
- return rc;
+ if (!hisi_hba->hw->soft_reset)
+ return -1;
+
+ if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ return -1;
+
+ dev_info(dev, "controller resetting...\n");
+ hisi_sas_controller_reset_prepare(hisi_hba);
+
+ rc = hisi_hba->hw->soft_reset(hisi_hba);
+ if (rc) {
+ dev_warn(dev, "controller reset failed (%d)\n", rc);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ up(&hisi_hba->sem);
+ scsi_unblock_requests(shost);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ return rc;
+ }
+
+ hisi_sas_controller_reset_done(hisi_hba);
+ dev_info(dev, "controller reset complete\n");
+
+ return 0;
}
static int hisi_sas_abort_task(struct sas_task *task)
@@ -1644,14 +1617,32 @@ out:
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
+ int rc, i;
queue_work(hisi_hba->wq, &r.work);
wait_for_completion(r.completion);
- if (r.done)
- return TMF_RESP_FUNC_COMPLETE;
+ if (!r.done)
+ return TMF_RESP_FUNC_FAILED;
+
+ for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
+ struct domain_device *device = sas_dev->sas_device;
- return TMF_RESP_FUNC_FAILED;
+ if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
+ DEV_IS_EXPANDER(device->dev_type))
+ continue;
+
+ rc = hisi_sas_debug_I_T_nexus_reset(device);
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
+ sas_dev->device_id, rc);
+ }
+
+ hisi_sas_release_tasks(hisi_hba);
+
+ return TMF_RESP_FUNC_COMPLETE;
}
static int hisi_sas_query_task(struct sas_task *task)
@@ -1723,21 +1714,13 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
spin_unlock_irqrestore(&hisi_hba->lock, flags);
slot = &hisi_hba->slot_info[slot_idx];
- memset(slot, 0, sizeof(struct hisi_sas_slot));
-
- slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
- GFP_ATOMIC, &slot->buf_dma);
- if (!slot->buf) {
- rc = -ENOMEM;
- goto err_out_tag;
- }
spin_lock_irqsave(&dq->lock, flags_dq);
wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (wr_q_index < 0) {
spin_unlock_irqrestore(&dq->lock, flags_dq);
rc = -EAGAIN;
- goto err_out_buf;
+ goto err_out_tag;
}
list_add_tail(&slot->delivery, &dq->list);
spin_unlock_irqrestore(&dq->lock, flags_dq);
@@ -1745,7 +1728,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
- slot->idx = slot_idx;
slot->n_elem = n_elem;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
@@ -1767,7 +1749,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- slot->ready = 1;
+ WRITE_ONCE(slot->ready, 1);
/* send abort command to the chip */
spin_lock_irqsave(&dq->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
@@ -1776,9 +1758,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
return 0;
-err_out_buf:
- dma_pool_free(hisi_hba->buffer_pool, slot->buf,
- slot->buf_dma);
err_out_tag:
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
@@ -1919,7 +1898,8 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
} else {
struct hisi_sas_port *port = phy->port;
- if (phy->in_reset) {
+ if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
+ phy->in_reset) {
dev_info(dev, "ignore flutter phy%d down\n", phy_no);
return;
}
@@ -2014,8 +1994,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
{
struct device *dev = hisi_hba->dev;
- int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
+ int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
+ int max_command_entries_ru, sz_slot_buf_ru;
+ int blk_cnt, slots_per_blk;
+ sema_init(&hisi_hba->sem, 1);
spin_lock_init(&hisi_hba->lock);
for (i = 0; i < hisi_hba->n_phy; i++) {
hisi_sas_phy_init(hisi_hba, i);
@@ -2045,29 +2028,27 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
/* Delivery queue */
s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
- hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
- &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
+ hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
+ &hisi_hba->cmd_hdr_dma[i],
+ GFP_KERNEL);
if (!hisi_hba->cmd_hdr[i])
goto err_out;
/* Completion queue */
s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
- hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
- &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
+ hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
+ &hisi_hba->complete_hdr_dma[i],
+ GFP_KERNEL);
if (!hisi_hba->complete_hdr[i])
goto err_out;
}
- s = sizeof(struct hisi_sas_slot_buf_table);
- hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
- if (!hisi_hba->buffer_pool)
- goto err_out;
-
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
- hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
- GFP_KERNEL);
+ hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
+ GFP_KERNEL);
if (!hisi_hba->itct)
goto err_out;
+ memset(hisi_hba->itct, 0, s);
hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
sizeof(struct hisi_sas_slot),
@@ -2075,15 +2056,45 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
if (!hisi_hba->slot_info)
goto err_out;
+ /* roundup to avoid overly large block size */
+ max_command_entries_ru = roundup(max_command_entries, 64);
+ sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64);
+ s = lcm(max_command_entries_ru, sz_slot_buf_ru);
+ blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
+ slots_per_blk = s / sz_slot_buf_ru;
+ for (i = 0; i < blk_cnt; i++) {
+ struct hisi_sas_slot_buf_table *buf;
+ dma_addr_t buf_dma;
+ int slot_index = i * slots_per_blk;
+
+ buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL);
+ if (!buf)
+ goto err_out;
+ memset(buf, 0, s);
+
+ for (j = 0; j < slots_per_blk; j++, slot_index++) {
+ struct hisi_sas_slot *slot;
+
+ slot = &hisi_hba->slot_info[slot_index];
+ slot->buf = buf;
+ slot->buf_dma = buf_dma;
+ slot->idx = slot_index;
+
+ buf++;
+ buf_dma += sizeof(*buf);
+ }
+ }
+
s = max_command_entries * sizeof(struct hisi_sas_iost);
- hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
- GFP_KERNEL);
+ hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
+ GFP_KERNEL);
if (!hisi_hba->iost)
goto err_out;
s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
- hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
- &hisi_hba->breakpoint_dma, GFP_KERNEL);
+ hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
+ &hisi_hba->breakpoint_dma,
+ GFP_KERNEL);
if (!hisi_hba->breakpoint)
goto err_out;
@@ -2094,14 +2105,16 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
goto err_out;
s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
- hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
- &hisi_hba->initial_fis_dma, GFP_KERNEL);
+ hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
+ &hisi_hba->initial_fis_dma,
+ GFP_KERNEL);
if (!hisi_hba->initial_fis)
goto err_out;
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
- hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
- &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
+ hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
+ &hisi_hba->sata_breakpoint_dma,
+ GFP_KERNEL);
if (!hisi_hba->sata_breakpoint)
goto err_out;
hisi_sas_init_mem(hisi_hba);
@@ -2122,54 +2135,6 @@ EXPORT_SYMBOL_GPL(hisi_sas_alloc);
void hisi_sas_free(struct hisi_hba *hisi_hba)
{
- struct device *dev = hisi_hba->dev;
- int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
-
- for (i = 0; i < hisi_hba->queue_count; i++) {
- s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
- if (hisi_hba->cmd_hdr[i])
- dma_free_coherent(dev, s,
- hisi_hba->cmd_hdr[i],
- hisi_hba->cmd_hdr_dma[i]);
-
- s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
- if (hisi_hba->complete_hdr[i])
- dma_free_coherent(dev, s,
- hisi_hba->complete_hdr[i],
- hisi_hba->complete_hdr_dma[i]);
- }
-
- dma_pool_destroy(hisi_hba->buffer_pool);
-
- s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
- if (hisi_hba->itct)
- dma_free_coherent(dev, s,
- hisi_hba->itct, hisi_hba->itct_dma);
-
- s = max_command_entries * sizeof(struct hisi_sas_iost);
- if (hisi_hba->iost)
- dma_free_coherent(dev, s,
- hisi_hba->iost, hisi_hba->iost_dma);
-
- s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
- if (hisi_hba->breakpoint)
- dma_free_coherent(dev, s,
- hisi_hba->breakpoint,
- hisi_hba->breakpoint_dma);
-
-
- s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
- if (hisi_hba->initial_fis)
- dma_free_coherent(dev, s,
- hisi_hba->initial_fis,
- hisi_hba->initial_fis_dma);
-
- s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
- if (hisi_hba->sata_breakpoint)
- dma_free_coherent(dev, s,
- hisi_hba->sata_breakpoint,
- hisi_hba->sata_breakpoint_dma);
-
if (hisi_hba->wq)
destroy_workqueue(hisi_hba->wq);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 89ab18c1959c..8f60f0e04599 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -903,23 +903,28 @@ get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
- struct hisi_sas_slot *s, *s1;
+ struct hisi_sas_slot *s, *s1, *s2 = NULL;
struct list_head *dq_list;
int dlvry_queue = dq->id;
- int wp, count = 0;
+ int wp;
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
- count++;
- wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+ s2 = s;
list_del(&s->delivery);
}
- if (!count)
+ if (!s2)
return;
+ /*
+ * Ensure that memories for slots built on other CPUs is observed.
+ */
+ smp_rmb();
+ wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
@@ -1296,11 +1301,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
!(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
slot_err_v1_hw(hisi_hba, task, slot);
- if (unlikely(slot->abort)) {
- queue_work(hisi_hba->wq, &slot->abort_slot);
- /* immediately return and do not complete */
+ if (unlikely(slot->abort))
return ts->stat;
- }
goto out;
}
@@ -1469,7 +1471,8 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
goto end;
}
- sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 213c530e63f2..9c5c5a601332 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1665,23 +1665,28 @@ get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
- struct hisi_sas_slot *s, *s1;
+ struct hisi_sas_slot *s, *s1, *s2 = NULL;
struct list_head *dq_list;
int dlvry_queue = dq->id;
- int wp, count = 0;
+ int wp;
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
- count++;
- wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+ s2 = s;
list_del(&s->delivery);
}
- if (!count)
+ if (!s2)
return;
+ /*
+ * Ensure that memories for slots built on other CPUs is observed.
+ */
+ smp_rmb();
+ wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
@@ -2840,7 +2845,8 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
- if (bcast_status & RX_BCAST_CHG_MSK)
+ if ((bcast_status & RX_BCAST_CHG_MSK) &&
+ !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
@@ -3234,8 +3240,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
if (fis->status & ATA_ERR) {
dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
fis->status);
- disable_phy_v2_hw(hisi_hba, phy_no);
- enable_phy_v2_hw(hisi_hba, phy_no);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
res = IRQ_NONE;
goto end;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 9f1e2d03f914..08b503e274b8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -51,7 +51,6 @@
#define CFG_ABT_SET_IPTT_DONE 0xd8
#define CFG_ABT_SET_IPTT_DONE_OFF 0
#define HGC_IOMB_PROC1_STATUS 0x104
-#define CFG_1US_TIMER_TRSH 0xcc
#define CHNL_INT_STATUS 0x148
#define HGC_AXI_FIFO_ERR_INFO 0x154
#define AXI_ERR_INFO_OFF 0
@@ -121,6 +120,8 @@
#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
#define PHY_CFG_DC_OPT_OFF 2
#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
+#define PHY_CFG_PHY_RST_OFF 3
+#define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
#define PHY_CTRL (PORT_BASE + 0x14)
#define PHY_CTRL_RESET_OFF 0
@@ -131,6 +132,9 @@
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
#define SL_CTA_OFF 17
#define SL_CTA_MSK (0x1 << SL_CTA_OFF)
+#define RX_PRIMS_STATUS (PORT_BASE + 0x98)
+#define RX_BCAST_CHG_OFF 1
+#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
@@ -206,6 +210,8 @@
#define AXI_MASTER_CFG_BASE (0x5000)
#define AM_CTRL_GLOBAL (0x0)
+#define AM_CTRL_SHUTDOWN_REQ_OFF 0
+#define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF)
#define AM_CURR_TRANS_RETURN (0x150)
#define AM_CFG_MAX_TRANS (0x5010)
@@ -425,7 +431,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
(u32)((1ULL << hisi_hba->queue_count) - 1));
hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
- hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
@@ -486,6 +491,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
+ hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
/* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
@@ -758,15 +764,25 @@ static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
cfg |= PHY_CFG_ENA_MSK;
+ cfg &= ~PHY_CFG_PHY_RST_MSK;
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
}
static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
{
u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+ u32 state;
cfg &= ~PHY_CFG_ENA_MSK;
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+
+ mdelay(50);
+
+ state = hisi_sas_read32(hisi_hba, PHY_STATE);
+ if (state & BIT(phy_no)) {
+ cfg |= PHY_CFG_PHY_RST_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+ }
}
static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -866,23 +882,28 @@ get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
- struct hisi_sas_slot *s, *s1;
+ struct hisi_sas_slot *s, *s1, *s2 = NULL;
struct list_head *dq_list;
int dlvry_queue = dq->id;
- int wp, count = 0;
+ int wp;
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
- count++;
- wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+ s2 = s;
list_del(&s->delivery);
}
- if (!count)
+ if (!s2)
return;
+ /*
+ * Ensure that memories for slots built on other CPUs is observed.
+ */
+ smp_rmb();
+ wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
@@ -1170,6 +1191,16 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
initial_fis = &hisi_hba->initial_fis[phy_no];
fis = &initial_fis->fis;
+
+ /* check ERR bit of Status Register */
+ if (fis->status & ATA_ERR) {
+ dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n",
+ phy_no, fis->status);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ res = IRQ_NONE;
+ goto end;
+ }
+
sas_phy->oob_mode = SATA_OOB_MODE;
attached_sas_addr[0] = 0x50;
attached_sas_addr[7] = phy_no;
@@ -1256,9 +1287,13 @@ static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ if ((bcast_status & RX_BCAST_CHG_MSK) &&
+ !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -1327,11 +1362,77 @@ static const struct hisi_sas_hw_error port_axi_error[] = {
},
};
-static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
+static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
{
- struct hisi_hba *hisi_hba = p;
+ u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1);
+ u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK);
struct device *dev = hisi_hba->dev;
+ int i;
+
+ irq_value &= ~irq_msk;
+ if (!irq_value)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
+ const struct hisi_sas_hw_error *error = &port_axi_error[i];
+
+ if (!(irq_value & error->irq_msk))
+ continue;
+
+ dev_err(dev, "%s error (phy%d 0x%x) found!\n",
+ error->msg, phy_no, irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value);
+}
+
+static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK);
+ u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct pci_dev *pci_dev = hisi_hba->pci_dev;
+ struct device *dev = hisi_hba->dev;
+
+ irq_value &= ~irq_msk;
+ if (!irq_value)
+ return;
+
+ if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n", phy_no);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ }
+
+ if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
+ u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no,
+ STP_LINK_TIMEOUT_STATE);
+
+ dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
+ phy_no, reg_value);
+ if (reg_value & BIT(4))
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ }
+
+ if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
+ (pci_dev->revision == 0x20)) {
+ u32 reg_value;
+ int rc;
+
+ rc = hisi_sas_read32_poll_timeout_atomic(
+ HILINK_ERR_DFX, reg_value,
+ !((reg_value >> 8) & BIT(phy_no)),
+ 1000, 10000);
+ if (rc)
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ }
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value);
+}
+
+static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
u32 irq_msk;
int phy_no = 0;
@@ -1341,84 +1442,12 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
while (irq_msk) {
u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
CHL_INT0);
- u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT1);
- u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT2);
- u32 irq_msk1 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT1_MSK);
- u32 irq_msk2 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT2_MSK);
-
- irq_value1 &= ~irq_msk1;
- irq_value2 &= ~irq_msk2;
-
- if ((irq_msk & (4 << (phy_no * 4))) &&
- irq_value1) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
- const struct hisi_sas_hw_error *error =
- &port_axi_error[i];
-
- if (!(irq_value1 & error->irq_msk))
- continue;
-
- dev_err(dev, "%s error (phy%d 0x%x) found!\n",
- error->msg, phy_no, irq_value1);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
-
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT1, irq_value1);
- }
- if (irq_msk & (8 << (phy_no * 4)) && irq_value2) {
- struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ if (irq_msk & (4 << (phy_no * 4)))
+ handle_chl_int1_v3_hw(hisi_hba, phy_no);
- if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
- dev_warn(dev, "phy%d identify timeout\n",
- phy_no);
- hisi_sas_notify_phy_event(phy,
- HISI_PHYE_LINK_RESET);
-
- }
-
- if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
- u32 reg_value = hisi_sas_phy_read32(hisi_hba,
- phy_no, STP_LINK_TIMEOUT_STATE);
-
- dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
- phy_no, reg_value);
- if (reg_value & BIT(4))
- hisi_sas_notify_phy_event(phy,
- HISI_PHYE_LINK_RESET);
- }
-
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT2, irq_value2);
-
- if ((irq_value2 & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
- (pci_dev->revision == 0x20)) {
- u32 reg_value;
- int rc;
-
- rc = hisi_sas_read32_poll_timeout_atomic(
- HILINK_ERR_DFX, reg_value,
- !((reg_value >> 8) & BIT(phy_no)),
- 1000, 10000);
- if (rc) {
- disable_phy_v3_hw(hisi_hba, phy_no);
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT2,
- BIT(CHL_INT2_RX_INVLD_DW_OFF));
- hisi_sas_phy_read32(hisi_hba, phy_no,
- ERR_CNT_INVLD_DW);
- mdelay(1);
- enable_phy_v3_hw(hisi_hba, phy_no);
- }
- }
- }
+ if (irq_msk & (8 << (phy_no * 4)))
+ handle_chl_int2_v3_hw(hisi_hba, phy_no);
if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
hisi_sas_phy_write32(hisi_hba, phy_no,
@@ -1964,11 +1993,11 @@ static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
}
-static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
+static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
+ u32 status, reg_val;
int rc;
- u32 status;
interrupt_disable_v3_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
@@ -1978,14 +2007,32 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
mdelay(10);
- hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1);
+ reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL);
+ reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK;
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL, reg_val);
/* wait until bus idle */
rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
AM_CURR_TRANS_RETURN, status,
status == 0x3, 10, 100);
if (rc) {
- dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+ dev_err(dev, "axi bus is not idle, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ int rc;
+
+ rc = disable_host_v3_hw(hisi_hba);
+ if (rc) {
+ dev_err(dev, "soft reset: disable host failed rc=%d\n", rc);
return rc;
}
@@ -2433,6 +2480,41 @@ static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ int rc;
+
+ dev_info(dev, "FLR prepare\n");
+ set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ hisi_sas_controller_reset_prepare(hisi_hba);
+
+ rc = disable_host_v3_hw(hisi_hba);
+ if (rc)
+ dev_err(dev, "FLR: disable host failed rc=%d\n", rc);
+}
+
+static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ int rc;
+
+ hisi_sas_init_mem(hisi_hba);
+
+ rc = hw_init_v3_hw(hisi_hba);
+ if (rc) {
+ dev_err(dev, "FLR: hw init failed rc=%d\n", rc);
+ return;
+ }
+
+ hisi_sas_controller_reset_done(hisi_hba);
+ dev_info(dev, "FLR done\n");
+}
+
enum {
/* instances of the controller */
hip08,
@@ -2444,38 +2526,24 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
- u32 device_state, status;
+ u32 device_state;
int rc;
- u32 reg_val;
if (!pdev->pm_cap) {
dev_err(dev, "PCI PM not supported\n");
return -ENODEV;
}
- set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ return -1;
+
scsi_block_requests(shost);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
flush_workqueue(hisi_hba->wq);
- /* disable DQ/PHY/bus */
- interrupt_disable_v3_hw(hisi_hba);
- hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
- hisi_sas_kill_tasklets(hisi_hba);
-
- hisi_sas_stop_phys(hisi_hba);
- reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
- AM_CTRL_GLOBAL);
- reg_val |= 0x1;
- hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
- AM_CTRL_GLOBAL, reg_val);
-
- /* wait until bus idle */
- rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
- AM_CURR_TRANS_RETURN, status,
- status == 0x3, 10, 100);
+ rc = disable_host_v3_hw(hisi_hba);
if (rc) {
- dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+ dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
scsi_unblock_requests(shost);
@@ -2538,6 +2606,8 @@ static const struct pci_error_handlers hisi_sas_err_handler = {
.error_detected = hisi_sas_error_detected_v3_hw,
.mmio_enabled = hisi_sas_mmio_enabled_v3_hw,
.slot_reset = hisi_sas_slot_reset_v3_hw,
+ .reset_prepare = hisi_sas_reset_prepare_v3_hw,
+ .reset_done = hisi_sas_reset_done_v3_hw,
};
static struct pci_driver sas_v3_pci_driver = {
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 3771e59a9fae..f02dcc875a09 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -563,6 +563,38 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);
+struct scsi_host_mq_in_flight {
+ int cnt;
+};
+
+static void scsi_host_check_in_flight(struct request *rq, void *data,
+ bool reserved)
+{
+ struct scsi_host_mq_in_flight *in_flight = data;
+
+ if (blk_mq_request_started(rq))
+ in_flight->cnt++;
+}
+
+/**
+ * scsi_host_busy - Return the host busy counter
+ * @shost: Pointer to Scsi_Host to inc.
+ **/
+int scsi_host_busy(struct Scsi_Host *shost)
+{
+ struct scsi_host_mq_in_flight in_flight = {
+ .cnt = 0,
+ };
+
+ if (!shost->use_blk_mq)
+ return atomic_read(&shost->host_busy);
+
+ blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
+ &in_flight);
+ return in_flight.cnt;
+}
+EXPORT_SYMBOL(scsi_host_busy);
+
/**
* scsi_host_put - dec a Scsi_Host ref count
* @shost: Pointer to Scsi_Host to dec.
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 15c7f3b6f35e..58bb70b886d7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3440,11 +3440,11 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
u16 bmic_device_index = 0;
- bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
-
- encl_dev->sas_address =
+ encl_dev->eli =
hpsa_get_enclosure_logical_identifier(h, scsi3addr);
+ bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
+
if (encl_dev->target == -1 || encl_dev->lun == -1) {
rc = IO_OK;
goto out;
@@ -9697,7 +9697,24 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy)
static int
hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
{
- *identifier = rphy->identify.sas_address;
+ struct Scsi_Host *shost = phy_to_shost(rphy);
+ struct ctlr_info *h;
+ struct hpsa_scsi_dev_t *sd;
+
+ if (!shost)
+ return -ENXIO;
+
+ h = shost_to_hba(shost);
+
+ if (!h)
+ return -ENXIO;
+
+ sd = hpsa_find_device_by_sas_rphy(h, rphy);
+ if (!sd)
+ return -ENXIO;
+
+ *identifier = sd->eli;
+
return 0;
}
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index fb9f5e7f8209..59e023696fff 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -68,6 +68,7 @@ struct hpsa_scsi_dev_t {
#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
unsigned char device_id[16]; /* from inquiry pg. 0x83 */
u64 sas_address;
+ u64 eli; /* from report diags. */
unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
unsigned char model[16]; /* bytes 16-31 of inquiry data */
unsigned char rev; /* byte 2 of inquiry data */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index daefe8172b04..b64ca977825d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1322,7 +1322,7 @@ static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
/**
* ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
- * @scmd: Scsi_Cmnd with the scatterlist
+ * @scmd: struct scsi_cmnd with the scatterlist
* @evt: ibmvfc event struct
* @vfc_cmd: vfc_cmd that contains the memory descriptor
* @dev: device for which to map dma memory
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 17df76f0be3c..9df8a1a2299c 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -93,7 +93,7 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
static int fast_fail = 1;
static int client_reserve = 1;
-static char partition_name[97] = "UNKNOWN";
+static char partition_name[96] = "UNKNOWN";
static unsigned int partition_number = -1;
static LIST_HEAD(ibmvscsi_head);
@@ -262,7 +262,7 @@ static void gather_partition_info(void)
ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
if (ppartition_name)
- strncpy(partition_name, ppartition_name,
+ strlcpy(partition_name, ppartition_name,
sizeof(partition_name));
p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
if (p_number_ptr)
@@ -681,7 +681,7 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
/**
* map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
- * @cmd: Scsi_Cmnd with the scatterlist
+ * @cmd: struct scsi_cmnd with the scatterlist
* @srp_cmd: srp_cmd that contains the memory descriptor
* @dev: device for which to map dma memory
*
@@ -1274,14 +1274,12 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
if (hostdata->client_migrated)
hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
- strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
+ strlcpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
sizeof(hostdata->caps.name));
- hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
location = of_get_property(of_node, "ibm,loc-code", NULL);
location = location ? location : dev_name(hostdata->dev);
- strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
- hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
+ strlcpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
req->buffer = cpu_to_be64(hostdata->caps_addr);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index c3a76af9f5fa..fac377320158 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2233,7 +2233,7 @@ static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
return -ENOMEM;
}
- nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0,
+ nexus->se_sess = target_setup_session(&tport->se_tpg, 0, 0,
TARGET_PROT_NORMAL, name, nexus,
NULL);
if (IS_ERR(nexus->se_sess)) {
@@ -2267,8 +2267,7 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
* Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
*/
target_wait_for_sess_cmds(se_sess);
- transport_deregister_session_configfs(se_sess);
- transport_deregister_session(se_sess);
+ target_remove_session(se_sess);
tport->ibmv_nexus = NULL;
kfree(nexus);
@@ -3928,7 +3927,6 @@ static void ibmvscsis_drop_tport(struct se_wwn *wwn)
}
static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
const char *name)
{
struct ibmvscsis_tport *tport =
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 87c94191033b..8c6627bc8a39 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -892,7 +892,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
/* Check for optional message byte */
if (imm_wait(dev) == (unsigned char) 0xb8)
imm_in(dev, &h, 1);
- cmd->result = (DID_OK << 16) + (l & STATUS_MASK);
+ cmd->result = (DID_OK << 16) | (l & STATUS_MASK);
}
if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) {
w_ctr(ppb, 0x4);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0a9b8b387bd2..f2ec80b0ffc0 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -760,7 +760,6 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->hrrq[i].allow_interrupts = 0;
spin_unlock(&ioa_cfg->hrrq[i]._lock);
}
- wmb();
/* Set interrupt mask to stop all new interrupts */
if (ioa_cfg->sis64)
@@ -2413,6 +2412,28 @@ static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
}
/**
+ * ipr_log_sis64_service_required_error - Log a sis64 service required error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_41_error *error;
+
+ error = &hostrcb->hcam.u.error64.u.type_41_error;
+
+ error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+ ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
+ ipr_log_hex_data(ioa_cfg, error->data,
+ be32_to_cpu(hostrcb->hcam.length) -
+ (offsetof(struct ipr_hostrcb_error, u) +
+ offsetof(struct ipr_hostrcb_type_41_error, data)));
+}
+/**
* ipr_log_generic_error - Log an adapter error.
* @ioa_cfg: ioa config struct
* @hostrcb: hostrcb struct
@@ -2587,6 +2608,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
case IPR_HOST_RCB_OVERLAY_ID_30:
ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
break;
+ case IPR_HOST_RCB_OVERLAY_ID_41:
+ ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
+ break;
case IPR_HOST_RCB_OVERLAY_ID_1:
case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
default:
@@ -8403,7 +8427,6 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
ioa_cfg->hrrq[i].allow_interrupts = 1;
spin_unlock(&ioa_cfg->hrrq[i]._lock);
}
- wmb();
if (ioa_cfg->sis64) {
/* Set the adapter to the correct endian mode. */
writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 93570734cbfb..68afbbde54d3 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1135,6 +1135,11 @@ struct ipr_hostrcb_type_30_error {
struct ipr_hostrcb64_fabric_desc desc[1];
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb_type_41_error {
+ u8 failure_reason[64];
+ __be32 data[200];
+}__attribute__((packed, aligned (4)));
+
struct ipr_hostrcb_error {
__be32 fd_ioasc;
struct ipr_res_addr fd_res_addr;
@@ -1173,6 +1178,7 @@ struct ipr_hostrcb64_error {
struct ipr_hostrcb_type_23_error type_23_error;
struct ipr_hostrcb_type_24_error type_24_error;
struct ipr_hostrcb_type_30_error type_30_error;
+ struct ipr_hostrcb_type_41_error type_41_error;
} u;
}__attribute__((packed, aligned (8)));
@@ -1218,6 +1224,7 @@ struct ipr_hcam {
#define IPR_HOST_RCB_OVERLAY_ID_24 0x24
#define IPR_HOST_RCB_OVERLAY_ID_26 0x26
#define IPR_HOST_RCB_OVERLAY_ID_30 0x30
+#define IPR_HOST_RCB_OVERLAY_ID_41 0x41
#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
u8 reserved1[3];
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 3f3569ec5ce3..f969a71348ef 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -59,34 +59,25 @@ static void fc_disc_restart(struct fc_disc *);
/**
* fc_disc_stop_rports() - Delete all the remote ports associated with the lport
* @disc: The discovery job to stop remote ports on
- *
- * Locking Note: This function expects that the lport mutex is locked before
- * calling it.
*/
static void fc_disc_stop_rports(struct fc_disc *disc)
{
- struct fc_lport *lport;
struct fc_rport_priv *rdata;
- lport = fc_disc_lport(disc);
+ lockdep_assert_held(&disc->disc_mutex);
- rcu_read_lock();
- list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ list_for_each_entry(rdata, &disc->rports, peers) {
if (kref_get_unless_zero(&rdata->kref)) {
fc_rport_logoff(rdata);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
- rcu_read_unlock();
}
/**
* fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
* @disc: The discovery object to which the RSCN applies
* @fp: The RSCN frame
- *
- * Locking Note: This function expects that the disc_mutex is locked
- * before it is called.
*/
static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
{
@@ -101,6 +92,8 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
LIST_HEAD(disc_ports);
struct fc_disc_port *dp, *next;
+ lockdep_assert_held(&disc->disc_mutex);
+
lport = fc_disc_lport(disc);
FC_DISC_DBG(disc, "Received an RSCN event\n");
@@ -220,12 +213,11 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
/**
* fc_disc_restart() - Restart discovery
* @disc: The discovery object to be restarted
- *
- * Locking Note: This function expects that the disc mutex
- * is already locked.
*/
static void fc_disc_restart(struct fc_disc *disc)
{
+ lockdep_assert_held(&disc->disc_mutex);
+
if (!disc->disc_callback)
return;
@@ -271,16 +263,13 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
* fc_disc_done() - Discovery has been completed
* @disc: The discovery context
* @event: The discovery completion status
- *
- * Locking Note: This function expects that the disc mutex is locked before
- * it is called. The discovery callback is then made with the lock released,
- * and the lock is re-taken before returning from this function
*/
static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
{
struct fc_lport *lport = fc_disc_lport(disc);
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&disc->disc_mutex);
FC_DISC_DBG(disc, "Discovery complete\n");
disc->pending = 0;
@@ -294,9 +283,11 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
* discovery, reverify or log them in. Otherwise, log them out.
* Skip ports which were never discovered. These are the dNS port
* and ports which were created by PLOGI.
+ *
+ * We don't need to use the _rcu variant here as the rport list
+ * is protected by the disc mutex which is already held on entry.
*/
- rcu_read_lock();
- list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ list_for_each_entry(rdata, &disc->rports, peers) {
if (!kref_get_unless_zero(&rdata->kref))
continue;
if (rdata->disc_id) {
@@ -307,7 +298,6 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
}
kref_put(&rdata->kref, fc_rport_destroy);
}
- rcu_read_unlock();
mutex_unlock(&disc->disc_mutex);
disc->disc_callback(lport, event);
mutex_lock(&disc->disc_mutex);
@@ -360,15 +350,14 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
/**
* fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
* @lport: The discovery context
- *
- * Locking Note: This function expects that the disc_mutex is locked
- * before it is called.
*/
static void fc_disc_gpn_ft_req(struct fc_disc *disc)
{
struct fc_frame *fp;
struct fc_lport *lport = fc_disc_lport(disc);
+ lockdep_assert_held(&disc->disc_mutex);
+
WARN_ON(!fc_lport_test_ready(lport));
disc->pending = 1;
@@ -658,8 +647,6 @@ out:
* @lport: The local port to initiate discovery on
* @rdata: remote port private data
*
- * Locking Note: This function expects that the disc_mutex is locked
- * before it is called.
* On failure, an error code is returned.
*/
static int fc_disc_gpn_id_req(struct fc_lport *lport,
@@ -667,6 +654,7 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport,
{
struct fc_frame *fp;
+ lockdep_assert_held(&lport->disc.disc_mutex);
fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
sizeof(struct fc_ns_fid));
if (!fp)
@@ -683,14 +671,13 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport,
* fc_disc_single() - Discover the directory information for a single target
* @lport: The local port the remote port is associated with
* @dp: The port to rediscover
- *
- * Locking Note: This function expects that the disc_mutex is locked
- * before it is called.
*/
static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
{
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&lport->disc.disc_mutex);
+
rdata = fc_rport_create(lport, dp->port_id);
if (!rdata)
return -ENOMEM;
@@ -708,7 +695,9 @@ static void fc_disc_stop(struct fc_lport *lport)
if (disc->pending)
cancel_delayed_work_sync(&disc->disc_work);
+ mutex_lock(&disc->disc_mutex);
fc_disc_stop_rports(disc);
+ mutex_unlock(&disc->disc_mutex);
}
/**
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 21be672679fb..be83590ed955 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -237,14 +237,13 @@ static const char *fc_lport_state(struct fc_lport *lport)
* @remote_fid: The FID of the ptp rport
* @remote_wwpn: The WWPN of the ptp rport
* @remote_wwnn: The WWNN of the ptp rport
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_ptp_setup(struct fc_lport *lport,
u32 remote_fid, u64 remote_wwpn,
u64 remote_wwnn)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (lport->ptp_rdata) {
fc_rport_logoff(lport->ptp_rdata);
kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
@@ -403,12 +402,11 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
* fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
* @lport: Fibre Channel local port receiving the RLIR
* @fp: The RLIR request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
fc_lport_state(lport));
@@ -420,9 +418,6 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
* fc_lport_recv_echo_req() - Handle received ECHO request
* @lport: The local port receiving the ECHO
* @fp: ECHO request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_echo_req(struct fc_lport *lport,
struct fc_frame *in_fp)
@@ -432,6 +427,8 @@ static void fc_lport_recv_echo_req(struct fc_lport *lport,
void *pp;
void *dp;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
fc_lport_state(lport));
@@ -456,9 +453,6 @@ static void fc_lport_recv_echo_req(struct fc_lport *lport,
* fc_lport_recv_rnid_req() - Handle received Request Node ID data request
* @lport: The local port receiving the RNID
* @fp: The RNID request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_rnid_req(struct fc_lport *lport,
struct fc_frame *in_fp)
@@ -474,6 +468,8 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
u8 fmt;
size_t len;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
fc_lport_state(lport));
@@ -515,12 +511,11 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
* fc_lport_recv_logo_req() - Handle received fabric LOGO request
* @lport: The local port receiving the LOGO
* @fp: The LOGO request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_lport_enter_reset(lport);
fc_frame_free(fp);
@@ -553,11 +548,11 @@ EXPORT_SYMBOL(fc_fabric_login);
/**
* __fc_linkup() - Handler for transport linkup events
* @lport: The lport whose link is up
- *
- * Locking: must be called with the lp_mutex held
*/
void __fc_linkup(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (!lport->link_up) {
lport->link_up = 1;
@@ -584,11 +579,11 @@ EXPORT_SYMBOL(fc_linkup);
/**
* __fc_linkdown() - Handler for transport linkdown events
* @lport: The lport whose link is down
- *
- * Locking: must be called with the lp_mutex held
*/
void __fc_linkdown(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (lport->link_up) {
lport->link_up = 0;
fc_lport_enter_reset(lport);
@@ -722,12 +717,11 @@ static void fc_lport_disc_callback(struct fc_lport *lport,
/**
* fc_rport_enter_ready() - Enter the ready state and start discovery
* @lport: The local port that is ready
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_ready(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered READY from state %s\n",
fc_lport_state(lport));
@@ -745,13 +739,12 @@ static void fc_lport_enter_ready(struct fc_lport *lport)
* @lport: The local port which will have its Port ID set.
* @port_id: The new port ID.
* @fp: The frame containing the incoming request, or NULL.
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
struct fc_frame *fp)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (port_id)
printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
lport->host->host_no, port_id);
@@ -801,9 +794,6 @@ EXPORT_SYMBOL(fc_lport_set_local_id);
* A received FLOGI request indicates a point-to-point connection.
* Accept it with the common service parameters indicating our N port.
* Set up to do a PLOGI if we have the higher-number WWPN.
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_flogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
@@ -816,6 +806,8 @@ static void fc_lport_recv_flogi_req(struct fc_lport *lport,
u32 remote_fid;
u32 local_fid;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
fc_lport_state(lport));
@@ -1006,12 +998,11 @@ EXPORT_SYMBOL(fc_lport_reset);
/**
* fc_lport_reset_locked() - Reset the local port w/ the lport lock held
* @lport: The local port to be reset
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_reset_locked(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (lport->dns_rdata) {
fc_rport_logoff(lport->dns_rdata);
lport->dns_rdata = NULL;
@@ -1035,12 +1026,11 @@ static void fc_lport_reset_locked(struct fc_lport *lport)
/**
* fc_lport_enter_reset() - Reset the local port
* @lport: The local port to be reset
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_reset(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
fc_lport_state(lport));
@@ -1065,12 +1055,11 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
/**
* fc_lport_enter_disabled() - Disable the local port
* @lport: The local port to be reset
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_disabled(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
fc_lport_state(lport));
@@ -1321,14 +1310,13 @@ err:
/**
* fc_lport_enter_scr() - Send a SCR (State Change Register) request
* @lport: The local port to register for state changes
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_scr(struct fc_lport *lport)
{
struct fc_frame *fp;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
fc_lport_state(lport));
@@ -1349,9 +1337,6 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
/**
* fc_lport_enter_ns() - register some object with the name server
* @lport: Fibre Channel local port to register
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
{
@@ -1360,6 +1345,8 @@ static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
int size = sizeof(struct fc_ct_hdr);
size_t len;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
fc_lport_state_names[state],
fc_lport_state(lport));
@@ -1419,14 +1406,13 @@ static struct fc_rport_operations fc_lport_rport_ops = {
/**
* fc_rport_enter_dns() - Create a fc_rport for the name server
* @lport: The local port requesting a remote port for the name server
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_dns(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
fc_lport_state(lport));
@@ -1449,9 +1435,6 @@ err:
/**
* fc_lport_enter_ms() - management server commands
* @lport: Fibre Channel local port to register
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
{
@@ -1461,6 +1444,8 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
size_t len;
int numattrs;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
fc_lport_state_names[state],
fc_lport_state(lport));
@@ -1536,14 +1521,13 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
/**
* fc_rport_enter_fdmi() - Create a fc_rport for the management server
* @lport: The local port requesting a remote port for the management server
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_fdmi(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
fc_lport_state(lport));
@@ -1668,15 +1652,14 @@ EXPORT_SYMBOL(fc_lport_logo_resp);
/**
* fc_rport_enter_logo() - Logout of the fabric
* @lport: The local port to be logged out
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_logo(struct fc_lport *lport)
{
struct fc_frame *fp;
struct fc_els_logo *logo;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
fc_lport_state(lport));
@@ -1811,14 +1794,13 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
/**
* fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
* @lport: Fibre Channel local port to be logged in to the fabric
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_flogi(struct fc_lport *lport)
{
struct fc_frame *fp;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
fc_lport_state(lport));
@@ -1962,9 +1944,6 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
* @job: The BSG Passthrough job
* @lport: The local port sending the request
* @did: The destination port id
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static int fc_lport_els_request(struct bsg_job *job,
struct fc_lport *lport,
@@ -1976,6 +1955,8 @@ static int fc_lport_els_request(struct bsg_job *job,
char *pp;
int len;
+ lockdep_assert_held(&lport->lp_mutex);
+
fp = fc_frame_alloc(lport, job->request_payload.payload_len);
if (!fp)
return -ENOMEM;
@@ -2023,9 +2004,6 @@ static int fc_lport_els_request(struct bsg_job *job,
* @lport: The local port sending the request
* @did: The destination FC-ID
* @tov: The timeout period to wait for the response
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static int fc_lport_ct_request(struct bsg_job *job,
struct fc_lport *lport, u32 did, u32 tov)
@@ -2036,6 +2014,8 @@ static int fc_lport_ct_request(struct bsg_job *job,
struct fc_ct_req *ct;
size_t len;
+ lockdep_assert_held(&lport->lp_mutex);
+
fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
job->request_payload.payload_len);
if (!fp)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 31d31aad3de1..372387a450df 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -136,13 +136,13 @@ EXPORT_SYMBOL(fc_rport_lookup);
* @ids: The identifiers for the new remote port
*
* The remote port will start in the INIT state.
- *
- * Locking note: must be called with the disc_mutex held.
*/
struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
{
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&lport->disc.disc_mutex);
+
rdata = fc_rport_lookup(lport, port_id);
if (rdata)
return rdata;
@@ -184,6 +184,7 @@ void fc_rport_destroy(struct kref *kref)
struct fc_rport_priv *rdata;
rdata = container_of(kref, struct fc_rport_priv, kref);
+ WARN_ON(!list_empty(&rdata->peers));
kfree_rcu(rdata, rcu);
}
EXPORT_SYMBOL(fc_rport_destroy);
@@ -245,12 +246,12 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
* fc_rport_state_enter() - Change the state of a remote port
* @rdata: The remote port whose state should change
* @new: The new state
- *
- * Locking Note: Called with the rport lock held
*/
static void fc_rport_state_enter(struct fc_rport_priv *rdata,
enum fc_rport_state new)
{
+ lockdep_assert_held(&rdata->rp_mutex);
+
if (rdata->rp_state != new)
rdata->retries = 0;
rdata->rp_state = new;
@@ -469,8 +470,6 @@ EXPORT_SYMBOL(fc_rport_login);
* @rdata: The remote port to be deleted
* @event: The event to report as the reason for deletion
*
- * Locking Note: Called with the rport lock held.
- *
* Allow state change into DELETE only once.
*
* Call queue_work only if there's no event already pending.
@@ -483,6 +482,8 @@ EXPORT_SYMBOL(fc_rport_login);
static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
enum fc_rport_event event)
{
+ lockdep_assert_held(&rdata->rp_mutex);
+
if (rdata->rp_state == RPORT_ST_DELETE)
return;
@@ -546,13 +547,12 @@ EXPORT_SYMBOL(fc_rport_logoff);
* fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
* @rdata: The remote port that is ready
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: schedules workqueue, does not modify kref
*/
static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
{
+ lockdep_assert_held(&rdata->rp_mutex);
+
fc_rport_state_enter(rdata, RPORT_ST_READY);
FC_RPORT_DBG(rdata, "Port is Ready\n");
@@ -615,15 +615,14 @@ static void fc_rport_timeout(struct work_struct *work)
* @rdata: The remote port the error is happened on
* @err: The error code
*
- * Locking Note: The rport lock is expected to be held before
- * calling this routine
- *
* Reference counting: does not modify kref
*/
static void fc_rport_error(struct fc_rport_priv *rdata, int err)
{
struct fc_lport *lport = rdata->local_port;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n",
-err, fc_rport_state(rdata), rdata->retries);
@@ -662,15 +661,14 @@ static void fc_rport_error(struct fc_rport_priv *rdata, int err)
* If the error was an exchange timeout retry immediately,
* otherwise wait for E_D_TOV.
*
- * Locking Note: The rport lock is expected to be held before
- * calling this routine
- *
* Reference counting: increments kref when scheduling retry_work
*/
static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err)
{
unsigned long delay = msecs_to_jiffies(rdata->e_d_tov);
+ lockdep_assert_held(&rdata->rp_mutex);
+
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
if (err == -FC_EX_CLOSED)
goto out;
@@ -822,9 +820,6 @@ bad:
* fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp
* @rdata: The remote port to send a FLOGI to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
@@ -832,6 +827,8 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
+ lockdep_assert_held(&rdata->rp_mutex);
+
if (!lport->point_to_multipoint)
return fc_rport_enter_plogi(rdata);
@@ -1071,9 +1068,6 @@ fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
* fc_rport_enter_plogi() - Send Port Login (PLOGI) request
* @rdata: The remote port to send a PLOGI to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
@@ -1081,6 +1075,8 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
+ lockdep_assert_held(&rdata->rp_mutex);
+
if (!fc_rport_compatible_roles(lport, rdata)) {
FC_RPORT_DBG(rdata, "PLOGI suppressed for incompatible role\n");
fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
@@ -1232,9 +1228,6 @@ put:
* fc_rport_enter_prli() - Send Process Login (PRLI) request
* @rdata: The remote port to send the PRLI request to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
@@ -1247,6 +1240,8 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
struct fc_frame *fp;
struct fc4_prov *prov;
+ lockdep_assert_held(&rdata->rp_mutex);
+
/*
* If the rport is one of the well known addresses
* we skip PRLI and RTV and go straight to READY.
@@ -1372,9 +1367,6 @@ put:
* fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request
* @rdata: The remote port to send the RTV request to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
@@ -1382,6 +1374,8 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
struct fc_frame *fp;
struct fc_lport *lport = rdata->local_port;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
fc_rport_state(rdata));
@@ -1406,8 +1400,6 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
* fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests
* @rdata: The remote port that sent the RTV request
* @in_fp: The RTV request frame
- *
- * Locking Note: Called with the lport and rport locks held.
*/
static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
struct fc_frame *in_fp)
@@ -1417,6 +1409,9 @@ static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
struct fc_els_rtv_acc *rtv;
struct fc_seq_els_data rjt_data;
+ lockdep_assert_held(&rdata->rp_mutex);
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_RPORT_DBG(rdata, "Received RTV request\n");
fp = fc_frame_alloc(lport, sizeof(*rtv));
@@ -1460,9 +1455,6 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
* fc_rport_enter_logo() - Send a logout (LOGO) request
* @rdata: The remote port to send the LOGO request to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
@@ -1470,6 +1462,8 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n",
fc_rport_state(rdata));
@@ -1548,9 +1542,6 @@ put:
* fc_rport_enter_adisc() - Send Address Discover (ADISC) request
* @rdata: The remote port to send the ADISC request to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
@@ -1558,6 +1549,8 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
fc_rport_state(rdata));
@@ -1581,8 +1574,6 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
* fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests
* @rdata: The remote port that sent the ADISC request
* @in_fp: The ADISC request frame
- *
- * Locking Note: Called with the lport and rport locks held.
*/
static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
struct fc_frame *in_fp)
@@ -1592,6 +1583,9 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
struct fc_els_adisc *adisc;
struct fc_seq_els_data rjt_data;
+ lockdep_assert_held(&rdata->rp_mutex);
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_RPORT_DBG(rdata, "Received ADISC request\n");
adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
@@ -1618,9 +1612,6 @@ drop:
* fc_rport_recv_rls_req() - Handle received Read Link Status request
* @rdata: The remote port that sent the RLS request
* @rx_fp: The PRLI request frame
- *
- * Locking Note: The rport lock is expected to be held before calling
- * this function.
*/
static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
struct fc_frame *rx_fp)
@@ -1634,6 +1625,8 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
struct fc_seq_els_data rjt_data;
struct fc_host_statistics *hst;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n",
fc_rport_state(rdata));
@@ -1687,8 +1680,6 @@ out:
* Handle incoming ELS requests that require port login.
* The ELS opcode has already been validated by the caller.
*
- * Locking Note: Called with the lport lock held.
- *
* Reference counting: does not modify kref
*/
static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
@@ -1696,6 +1687,8 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
struct fc_rport_priv *rdata;
struct fc_seq_els_data els_data;
+ lockdep_assert_held(&lport->lp_mutex);
+
rdata = fc_rport_lookup(lport, fc_frame_sid(fp));
if (!rdata) {
FC_RPORT_ID_DBG(lport, fc_frame_sid(fp),
@@ -1783,14 +1776,14 @@ busy:
* @lport: The local port that received the request
* @fp: The request frame
*
- * Locking Note: Called with the lport lock held.
- *
* Reference counting: does not modify kref
*/
void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_seq_els_data els_data;
+ lockdep_assert_held(&lport->lp_mutex);
+
/*
* Handle FLOGI, PLOGI and LOGO requests separately, since they
* don't require prior login.
@@ -1831,8 +1824,6 @@ EXPORT_SYMBOL(fc_rport_recv_req);
* @lport: The local port that received the PLOGI request
* @rx_fp: The PLOGI request frame
*
- * Locking Note: The rport lock is held before calling this function.
- *
* Reference counting: increments kref on return
*/
static void fc_rport_recv_plogi_req(struct fc_lport *lport,
@@ -1845,6 +1836,8 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
struct fc_seq_els_data rjt_data;
u32 sid;
+ lockdep_assert_held(&lport->lp_mutex);
+
sid = fc_frame_sid(fp);
FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
@@ -1955,9 +1948,6 @@ reject:
* fc_rport_recv_prli_req() - Handler for process login (PRLI) requests
* @rdata: The remote port that sent the PRLI request
* @rx_fp: The PRLI request frame
- *
- * Locking Note: The rport lock is expected to be held before calling
- * this function.
*/
static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
struct fc_frame *rx_fp)
@@ -1976,6 +1966,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
struct fc_seq_els_data rjt_data;
struct fc4_prov *prov;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
fc_rport_state(rdata));
@@ -2072,9 +2064,6 @@ drop:
* fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests
* @rdata: The remote port that sent the PRLO request
* @rx_fp: The PRLO request frame
- *
- * Locking Note: The rport lock is expected to be held before calling
- * this function.
*/
static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
struct fc_frame *rx_fp)
@@ -2091,6 +2080,8 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
unsigned int plen;
struct fc_seq_els_data rjt_data;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
fc_rport_state(rdata));
@@ -2144,9 +2135,6 @@ drop:
* @lport: The local port that received the LOGO request
* @fp: The LOGO request frame
*
- * Locking Note: The rport lock is expected to be held before calling
- * this function.
- *
* Reference counting: drops kref on return
*/
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
@@ -2154,6 +2142,8 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
struct fc_rport_priv *rdata;
u32 sid;
+ lockdep_assert_held(&lport->lp_mutex);
+
fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
sid = fc_frame_sid(fp);
@@ -2164,6 +2154,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
fc_rport_state(rdata));
+ rdata->flags &= ~FC_RP_STARTED;
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index d6093838f5f2..93c66ebad907 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -284,11 +284,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
*/
if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
iscsi_conn_printk(KERN_INFO, conn,
- "task [op %x/%x itt "
+ "task [op %x itt "
"0x%x/0x%x] "
"rejected.\n",
- task->hdr->opcode, opcode,
- task->itt, task->hdr_itt);
+ opcode, task->itt,
+ task->hdr_itt);
return -EACCES;
}
/*
@@ -297,10 +297,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
*/
if (conn->session->fast_abort) {
iscsi_conn_printk(KERN_INFO, conn,
- "task [op %x/%x itt "
+ "task [op %x itt "
"0x%x/0x%x] fast abort.\n",
- task->hdr->opcode, opcode,
- task->itt, task->hdr_itt);
+ opcode, task->itt,
+ task->hdr_itt);
return -EACCES;
}
break;
@@ -1705,6 +1705,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
sc->result = DID_NO_CONNECT << 16;
break;
}
+ /* fall through */
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1832,6 +1833,7 @@ static void iscsi_tmf_timedout(struct timer_list *t)
static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
struct iscsi_tm *hdr, int age,
int timeout)
+ __must_hold(&session->frwd_lock)
{
struct iscsi_session *session = conn->session;
struct iscsi_task *task;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 369ef8f23b24..4fcb9e65be57 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -695,7 +695,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
struct scsi_data_buffer *sdb = scsi_in(task->sc);
/*
- * Setup copy of Data-In into the Scsi_Cmnd
+ * Setup copy of Data-In into the struct scsi_cmnd
* Scatterlist case:
* We set up the iscsi_segment to point to the next
* scatterlist entry to copy to. As we go along,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index ff1d612f6fb9..64a958a99f6a 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -176,7 +176,6 @@ qc_already_gone:
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
{
- unsigned long flags;
struct sas_task *task;
struct scatterlist *sg;
int ret = AC_ERR_SYSTEM;
@@ -187,10 +186,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
struct Scsi_Host *host = sas_ha->core.shost;
struct sas_internal *i = to_sas_internal(host->transportt);
- /* TODO: audit callers to ensure they are ready for qc_issue to
- * unconditionally re-enable interrupts
- */
- local_irq_save(flags);
+ /* TODO: we should try to remove that unlock */
spin_unlock(ap->lock);
/* If the device fell off, no sense in issuing commands */
@@ -252,7 +248,6 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
out:
spin_lock(ap->lock);
- local_irq_restore(flags);
return ret;
}
@@ -557,34 +552,46 @@ int sas_ata_init(struct domain_device *found_dev)
{
struct sas_ha_struct *ha = found_dev->port->ha;
struct Scsi_Host *shost = ha->core.shost;
+ struct ata_host *ata_host;
struct ata_port *ap;
int rc;
- ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops);
- ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
- &sata_port_info,
- shost);
+ ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL);
+ if (!ata_host) {
+ SAS_DPRINTK("ata host alloc failed.\n");
+ return -ENOMEM;
+ }
+
+ ata_host_init(ata_host, ha->dev, &sas_sata_ops);
+
+ ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost);
if (!ap) {
SAS_DPRINTK("ata_sas_port_alloc failed.\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_host;
}
ap->private_data = found_dev;
ap->cbl = ATA_CBL_SATA;
ap->scsi_host = shost;
rc = ata_sas_port_init(ap);
- if (rc) {
- ata_sas_port_destroy(ap);
- return rc;
- }
- rc = ata_sas_tport_add(found_dev->sata_dev.ata_host.dev, ap);
- if (rc) {
- ata_sas_port_destroy(ap);
- return rc;
- }
+ if (rc)
+ goto destroy_port;
+
+ rc = ata_sas_tport_add(ata_host->dev, ap);
+ if (rc)
+ goto destroy_port;
+
+ found_dev->sata_dev.ata_host = ata_host;
found_dev->sata_dev.ap = ap;
return 0;
+
+destroy_port:
+ ata_sas_port_destroy(ap);
+free_host:
+ ata_host_put(ata_host);
+ return rc;
}
void sas_ata_task_abort(struct sas_task *task)
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 1ffca28fe6a8..0148ae62a52a 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -316,6 +316,8 @@ void sas_free_device(struct kref *kref)
if (dev_is_sata(dev) && dev->sata_dev.ap) {
ata_sas_tport_delete(dev->sata_dev.ap);
ata_sas_port_destroy(dev->sata_dev.ap);
+ ata_host_put(dev->sata_dev.ata_host);
+ dev->sata_dev.ata_host = NULL;
dev->sata_dev.ap = NULL;
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index ceab5e5c41c2..33229348dcb6 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -759,7 +759,7 @@ retry:
spin_unlock_irq(shost->host_lock);
SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
- __func__, atomic_read(&shost->host_busy), shost->host_failed);
+ __func__, scsi_host_busy(shost), shost->host_failed);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism),
@@ -801,7 +801,7 @@ out:
goto retry;
SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
- __func__, atomic_read(&shost->host_busy),
+ __func__, scsi_host_busy(shost),
shost->host_failed, tries);
}
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index cb6aa802c48e..092a971d066b 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,8 +1,8 @@
#/*******************************************************************
# * This file is part of the Emulex Linux Device Driver for *
# * Fibre Channel Host Bus Adapters. *
-# * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
-# * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+# * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+# * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
# * Copyright (C) 2004-2012 Emulex. All rights reserved. *
# * EMULEX and SLI are trademarks of Emulex. *
# * www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 20b249a649dd..e0d0da5f43d6 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -840,8 +840,7 @@ struct lpfc_hba {
#define LPFC_ENABLE_FCP 1
#define LPFC_ENABLE_NVME 2
#define LPFC_ENABLE_BOTH 3
- uint32_t nvme_embed_pbde;
- uint32_t fcp_embed_pbde;
+ uint32_t cfg_enable_pbde;
uint32_t io_channel_irqs; /* number of irqs for io channels */
struct nvmet_fc_target_port *targetport;
lpfc_vpd_t vpd; /* vital product data */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 729d343861f4..5a25553415f8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -64,6 +64,9 @@
#define LPFC_MIN_MRQ_POST 512
#define LPFC_MAX_MRQ_POST 2048
+#define LPFC_MAX_NVME_INFO_TMP_LEN 100
+#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
+
/*
* Write key size should be multiple of 4. If write key is changed
* make sure that library write key is also changed.
@@ -158,14 +161,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
char *statep;
int i;
int len = 0;
+ char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
- len += snprintf(buf, PAGE_SIZE, "NVME Disabled\n");
+ len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
return len;
}
if (phba->nvmet_support) {
if (!phba->targetport) {
- len = snprintf(buf, PAGE_SIZE,
+ len = scnprintf(buf, PAGE_SIZE,
"NVME Target: x%llx is not allocated\n",
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
@@ -175,135 +179,169 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
statep = "REGISTERED";
else
statep = "INIT";
- len += snprintf(buf + len, PAGE_SIZE - len,
- "NVME Target Enabled State %s\n",
- statep);
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
- "NVME Target: lpfc",
- phba->brd_no,
- wwn_to_u64(vport->fc_portname.u.wwn),
- wwn_to_u64(vport->fc_nodename.u.wwn),
- phba->targetport->port_id);
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "\nNVME Target: Statistics\n");
+ scnprintf(tmp, sizeof(tmp),
+ "NVME Target Enabled State %s\n",
+ statep);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
+ "NVME Target: lpfc",
+ phba->brd_no,
+ wwn_to_u64(vport->fc_portname.u.wwn),
+ wwn_to_u64(vport->fc_nodename.u.wwn),
+ phba->targetport->port_id);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
+ >= PAGE_SIZE)
+ goto buffer_done;
+
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Rcv %08x Drop %08x Abort %08x\n",
- atomic_read(&tgtp->rcv_ls_req_in),
- atomic_read(&tgtp->rcv_ls_req_drop),
- atomic_read(&tgtp->xmt_ls_abort));
+ scnprintf(tmp, sizeof(tmp),
+ "LS: Rcv %08x Drop %08x Abort %08x\n",
+ atomic_read(&tgtp->rcv_ls_req_in),
+ atomic_read(&tgtp->rcv_ls_req_drop),
+ atomic_read(&tgtp->xmt_ls_abort));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
if (atomic_read(&tgtp->rcv_ls_req_in) !=
atomic_read(&tgtp->rcv_ls_req_out)) {
- len += snprintf(buf+len, PAGE_SIZE-len,
- "Rcv LS: in %08x != out %08x\n",
- atomic_read(&tgtp->rcv_ls_req_in),
- atomic_read(&tgtp->rcv_ls_req_out));
+ scnprintf(tmp, sizeof(tmp),
+ "Rcv LS: in %08x != out %08x\n",
+ atomic_read(&tgtp->rcv_ls_req_in),
+ atomic_read(&tgtp->rcv_ls_req_out));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
}
- len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %08x Drop %08x Cmpl %08x\n",
- atomic_read(&tgtp->xmt_ls_rsp),
- atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "LS: RSP Abort %08x xb %08x Err %08x\n",
- atomic_read(&tgtp->xmt_ls_rsp_aborted),
- atomic_read(&tgtp->xmt_ls_rsp_xb_set),
- atomic_read(&tgtp->xmt_ls_rsp_error));
-
- len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP: Rcv %08x Defer %08x Release %08x "
- "Drop %08x\n",
- atomic_read(&tgtp->rcv_fcp_cmd_in),
- atomic_read(&tgtp->rcv_fcp_cmd_defer),
- atomic_read(&tgtp->xmt_fcp_release),
- atomic_read(&tgtp->rcv_fcp_cmd_drop));
+ scnprintf(tmp, sizeof(tmp),
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp),
+ atomic_read(&tgtp->xmt_ls_drop),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
+ atomic_read(&tgtp->xmt_ls_rsp_error));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "FCP: Rcv %08x Defer %08x Release %08x "
+ "Drop %08x\n",
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_defer),
+ atomic_read(&tgtp->xmt_fcp_release),
+ atomic_read(&tgtp->rcv_fcp_cmd_drop));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
atomic_read(&tgtp->rcv_fcp_cmd_out)) {
- len += snprintf(buf+len, PAGE_SIZE-len,
- "Rcv FCP: in %08x != out %08x\n",
- atomic_read(&tgtp->rcv_fcp_cmd_in),
- atomic_read(&tgtp->rcv_fcp_cmd_out));
+ scnprintf(tmp, sizeof(tmp),
+ "Rcv FCP: in %08x != out %08x\n",
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_out));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
}
- len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
- "drop %08x\n",
- atomic_read(&tgtp->xmt_fcp_read),
- atomic_read(&tgtp->xmt_fcp_read_rsp),
- atomic_read(&tgtp->xmt_fcp_write),
- atomic_read(&tgtp->xmt_fcp_rsp),
- atomic_read(&tgtp->xmt_fcp_drop));
-
- len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
- atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
- atomic_read(&tgtp->xmt_fcp_rsp_error),
- atomic_read(&tgtp->xmt_fcp_rsp_drop));
-
- len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
- atomic_read(&tgtp->xmt_fcp_rsp_aborted),
- atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
- atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "ABORT: Xmt %08x Cmpl %08x\n",
- atomic_read(&tgtp->xmt_fcp_abort),
- atomic_read(&tgtp->xmt_fcp_abort_cmpl));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x",
- atomic_read(&tgtp->xmt_abort_sol),
- atomic_read(&tgtp->xmt_abort_unsol),
- atomic_read(&tgtp->xmt_abort_rsp),
- atomic_read(&tgtp->xmt_abort_rsp_error));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "DELAY: ctx %08x fod %08x wqfull %08x\n",
- atomic_read(&tgtp->defer_ctx),
- atomic_read(&tgtp->defer_fod),
- atomic_read(&tgtp->defer_wqfull));
+ scnprintf(tmp, sizeof(tmp),
+ "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
+ "drop %08x\n",
+ atomic_read(&tgtp->xmt_fcp_read),
+ atomic_read(&tgtp->xmt_fcp_read_rsp),
+ atomic_read(&tgtp->xmt_fcp_write),
+ atomic_read(&tgtp->xmt_fcp_rsp),
+ atomic_read(&tgtp->xmt_fcp_drop));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
+ atomic_read(&tgtp->xmt_fcp_rsp_error),
+ atomic_read(&tgtp->xmt_fcp_rsp_drop));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "ABORT: Xmt %08x Cmpl %08x\n",
+ atomic_read(&tgtp->xmt_fcp_abort),
+ atomic_read(&tgtp->xmt_fcp_abort_cmpl));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n",
+ atomic_read(&tgtp->xmt_abort_sol),
+ atomic_read(&tgtp->xmt_abort_unsol),
+ atomic_read(&tgtp->xmt_abort_rsp),
+ atomic_read(&tgtp->xmt_abort_rsp_error));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "DELAY: ctx %08x fod %08x wqfull %08x\n",
+ atomic_read(&tgtp->defer_ctx),
+ atomic_read(&tgtp->defer_fod),
+ atomic_read(&tgtp->defer_wqfull));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
/* Calculate outstanding IOs */
tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
tot += atomic_read(&tgtp->xmt_fcp_release);
tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
- len += snprintf(buf + len, PAGE_SIZE - len,
- "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
- "CTX Outstanding %08llx\n",
- phba->sli4_hba.nvmet_xri_cnt,
- phba->sli4_hba.nvmet_io_wait_cnt,
- phba->sli4_hba.nvmet_io_wait_total,
- tot);
-
- len += snprintf(buf+len, PAGE_SIZE-len, "\n");
- return len;
+ scnprintf(tmp, sizeof(tmp),
+ "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
+ "CTX Outstanding %08llx\n\n",
+ phba->sli4_hba.nvmet_xri_cnt,
+ phba->sli4_hba.nvmet_io_wait_cnt,
+ phba->sli4_hba.nvmet_io_wait_total,
+ tot);
+ strlcat(buf, tmp, PAGE_SIZE);
+ goto buffer_done;
}
localport = vport->localport;
if (!localport) {
- len = snprintf(buf, PAGE_SIZE,
+ len = scnprintf(buf, PAGE_SIZE,
"NVME Initiator x%llx is not allocated\n",
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
}
lport = (struct lpfc_nvme_lport *)localport->private;
- len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
-
- spin_lock_irq(shost->host_lock);
- len += snprintf(buf + len, PAGE_SIZE - len,
- "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
- phba->brd_no,
- phba->sli4_hba.max_cfg_param.max_xri,
- phba->sli4_hba.nvme_xri_max,
- phba->sli4_hba.scsi_xri_max,
- lpfc_sli4_get_els_iocb_cnt(phba));
+ if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ rcu_read_lock();
+ scnprintf(tmp, sizeof(tmp),
+ "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
+ phba->brd_no,
+ phba->sli4_hba.max_cfg_param.max_xri,
+ phba->sli4_hba.nvme_xri_max,
+ phba->sli4_hba.scsi_xri_max,
+ lpfc_sli4_get_els_iocb_cnt(phba));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -311,13 +349,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
else
statep = "UNKNOWN ";
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
- "NVME LPORT lpfc",
- phba->brd_no,
- wwn_to_u64(vport->fc_portname.u.wwn),
- wwn_to_u64(vport->fc_nodename.u.wwn),
- localport->port_id, statep);
+ scnprintf(tmp, sizeof(tmp),
+ "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
+ "NVME LPORT lpfc",
+ phba->brd_no,
+ wwn_to_u64(vport->fc_portname.u.wwn),
+ wwn_to_u64(vport->fc_nodename.u.wwn),
+ localport->port_id, statep);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
rport = lpfc_ndlp_get_nrport(ndlp);
@@ -343,56 +383,77 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
}
/* Tab in to show lport ownership. */
- len += snprintf(buf + len, PAGE_SIZE - len,
- "NVME RPORT ");
- if (phba->brd_no >= 10)
- len += snprintf(buf + len, PAGE_SIZE - len, " ");
-
- len += snprintf(buf + len, PAGE_SIZE - len, "WWPN x%llx ",
- nrport->port_name);
- len += snprintf(buf + len, PAGE_SIZE - len, "WWNN x%llx ",
- nrport->node_name);
- len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ",
- nrport->port_id);
+ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ if (phba->brd_no >= 10) {
+ if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+
+ scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
+ nrport->port_name);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
+ nrport->node_name);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "DID x%06x ",
+ nrport->port_id);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
/* An NVME rport can have multiple roles. */
- if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "INITIATOR ");
- if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "TARGET ");
- if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "DISCSRVC ");
+ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
+ if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+ if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
+ if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+ if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
+ if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
FC_PORT_ROLE_NVME_TARGET |
- FC_PORT_ROLE_NVME_DISCOVERY))
- len += snprintf(buf + len, PAGE_SIZE - len,
- "UNKNOWN ROLE x%x",
- nrport->port_role);
-
- len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep);
- /* Terminate the string. */
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ FC_PORT_ROLE_NVME_DISCOVERY)) {
+ scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
+ nrport->port_role);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+
+ scnprintf(tmp, sizeof(tmp), "%s\n", statep);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
}
- spin_unlock_irq(shost->host_lock);
+ rcu_read_unlock();
if (!lport)
- return len;
-
- len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
- len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %010x Cmpl %010x Abort %08x\n",
- atomic_read(&lport->fc4NvmeLsRequests),
- atomic_read(&lport->fc4NvmeLsCmpls),
- atomic_read(&lport->xmt_ls_abort));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
- atomic_read(&lport->xmt_ls_err),
- atomic_read(&lport->cmpl_ls_xb),
- atomic_read(&lport->cmpl_ls_err));
+ goto buffer_done;
+
+ if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "LS: Xmt %010x Cmpl %010x Abort %08x\n",
+ atomic_read(&lport->fc4NvmeLsRequests),
+ atomic_read(&lport->fc4NvmeLsCmpls),
+ atomic_read(&lport->xmt_ls_abort));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
totin = 0;
totout = 0;
@@ -405,25 +466,46 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
data3 = atomic_read(&cstat->fc4NvmeControlRequests);
totout += (data1 + data2 + data3);
}
- len += snprintf(buf+len, PAGE_SIZE-len,
- "Total FCP Cmpl %016llx Issue %016llx "
- "OutIO %016llx\n",
- totin, totout, totout - totin);
-
- len += snprintf(buf+len, PAGE_SIZE-len,
- " abort %08x noxri %08x nondlp %08x qdepth %08x "
- "wqerr %08x err %08x\n",
- atomic_read(&lport->xmt_fcp_abort),
- atomic_read(&lport->xmt_fcp_noxri),
- atomic_read(&lport->xmt_fcp_bad_ndlp),
- atomic_read(&lport->xmt_fcp_qdepth),
- atomic_read(&lport->xmt_fcp_err),
- atomic_read(&lport->xmt_fcp_wqerr));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "FCP CMPL: xb %08x Err %08x\n",
- atomic_read(&lport->cmpl_fcp_xb),
- atomic_read(&lport->cmpl_fcp_err));
+ scnprintf(tmp, sizeof(tmp),
+ "Total FCP Cmpl %016llx Issue %016llx "
+ "OutIO %016llx\n",
+ totin, totout, totout - totin);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "\tabort %08x noxri %08x nondlp %08x qdepth %08x "
+ "wqerr %08x err %08x\n",
+ atomic_read(&lport->xmt_fcp_abort),
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_err),
+ atomic_read(&lport->xmt_fcp_wqerr));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "FCP CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
+ strlcat(buf, tmp, PAGE_SIZE);
+
+buffer_done:
+ len = strnlen(buf, PAGE_SIZE);
+
+ if (unlikely(len >= (PAGE_SIZE - 1))) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+ "6314 Catching potential buffer "
+ "overflow > PAGE_SIZE = %lu bytes\n",
+ PAGE_SIZE);
+ strlcpy(buf + PAGE_SIZE - 1 -
+ strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1),
+ LPFC_NVME_INFO_MORE_STR,
+ strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1)
+ + 1);
+ }
+
return len;
}
@@ -5836,6 +5918,24 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
+ } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
+ switch (phba->fc_linkspeed) {
+ case LPFC_ASYNC_LINK_SPEED_10GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_25GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_40GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_100GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
+ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
} else
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
@@ -5891,7 +5991,6 @@ lpfc_get_stats(struct Scsi_Host *shost)
struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
- unsigned long seconds;
int rc = 0;
/*
@@ -5992,12 +6091,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
hs->dumped_frames = -1;
- seconds = get_seconds();
- if (seconds < psli->stats_start)
- hs->seconds_since_last_reset = seconds +
- ((unsigned long)-1 - psli->stats_start);
- else
- hs->seconds_since_last_reset = seconds - psli->stats_start;
+ hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6076,7 +6170,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
else
lso->link_events = (phba->fc_eventTag >> 1);
- psli->stats_start = get_seconds();
+ psli->stats_start = ktime_get_seconds();
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6454,6 +6548,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_auto_imax = 0;
phba->initial_imax = phba->cfg_fcp_imax;
+ phba->cfg_enable_pbde = 0;
+
/* A value of 0 means use the number of CPUs found in the system */
if (phba->cfg_fcp_io_channel == 0)
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h
index 931db52692f5..9659a8fff971 100644
--- a/drivers/scsi/lpfc/lpfc_attr.h
+++ b/drivers/scsi/lpfc/lpfc_attr.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index edb1a18a6414..90745feca808 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index e7d95a4e8042..32347c87e3b4 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
index 6b32b0ae7506..43cf46a3a71f 100644
--- a/drivers/scsi/lpfc/lpfc_compat.h
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4ae9ba425e78..bea24bc4410a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -469,7 +469,6 @@ int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
void lpfc_start_fdiscs(struct lpfc_hba *phba);
struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
-#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5
#define HBA_EVENT_LINK_UP 2
#define HBA_EVENT_LINK_DOWN 3
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d4a200ae5a6f..1cbdc892ff95 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index f32eaeb2225a..30efc7bf91bd 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 5a7547f9d8d8..28e2b60fc5c0 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -150,6 +150,9 @@ struct lpfc_node_rrq {
unsigned long rrq_stop_time;
};
+#define lpfc_ndlp_check_qdepth(phba, ndlp) \
+ (ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri)
+
/* Defines for nlp_flag (uint32) */
#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6d84a10fef07..4dda969e947c 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -5640,8 +5640,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
" mbx status x%x\n",
shdr_status, shdr_add_status, mb->mbxStatus);
- if (mb->mbxStatus && !(shdr_status &&
- shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) {
+ if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status ||
+ (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) ||
+ (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) {
mempool_free(pmb, phba->mbox_mem_pool);
goto error;
}
@@ -5661,6 +5662,7 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lcb_res = (struct fc_lcb_res_frame *)
(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
icmd = &elsiocb->iocb;
icmd->ulpContext = lcb_context->rx_id;
icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
@@ -5669,7 +5671,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*((uint32_t *)(pcmd)) = ELS_CMD_ACC;
lcb_res->lcb_sub_command = lcb_context->sub_command;
lcb_res->lcb_type = lcb_context->type;
+ lcb_res->capability = lcb_context->capability;
lcb_res->lcb_frequency = lcb_context->frequency;
+ lcb_res->lcb_duration = lcb_context->duration;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
@@ -5712,6 +5716,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
uint32_t beacon_state)
{
struct lpfc_hba *phba = vport->phba;
+ union lpfc_sli4_cfg_shdr *cfg_shdr;
LPFC_MBOXQ_t *mbox = NULL;
uint32_t len;
int rc;
@@ -5720,6 +5725,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
if (!mbox)
return 1;
+ cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
len = sizeof(struct lpfc_mbx_set_beacon_config) -
sizeof(struct lpfc_sli4_cfg_mhdr);
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
@@ -5732,8 +5738,40 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
phba->sli4_hba.physical_port);
bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
beacon_state);
- bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1);
- bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0);
+ mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */
+
+ /*
+ * Check bv1s bit before issuing the mailbox
+ * if bv1s == 1, LCB V1 supported
+ * else, LCB V0 supported
+ */
+
+ if (phba->sli4_hba.pc_sli4_params.bv1s) {
+ /* COMMON_SET_BEACON_CONFIG_V1 */
+ cfg_shdr->request.word9 = BEACON_VERSION_V1;
+ lcb_context->capability |= LCB_CAPABILITY_DURATION;
+ bf_set(lpfc_mbx_set_beacon_port_type,
+ &mbox->u.mqe.un.beacon_config, 0);
+ bf_set(lpfc_mbx_set_beacon_duration_v1,
+ &mbox->u.mqe.un.beacon_config,
+ be16_to_cpu(lcb_context->duration));
+ } else {
+ /* COMMON_SET_BEACON_CONFIG_V0 */
+ if (be16_to_cpu(lcb_context->duration) != 0) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+ cfg_shdr->request.word9 = BEACON_VERSION_V0;
+ lcb_context->capability &= ~(LCB_CAPABILITY_DURATION);
+ bf_set(lpfc_mbx_set_beacon_state,
+ &mbox->u.mqe.un.beacon_config, beacon_state);
+ bf_set(lpfc_mbx_set_beacon_port_type,
+ &mbox->u.mqe.un.beacon_config, 1);
+ bf_set(lpfc_mbx_set_beacon_duration,
+ &mbox->u.mqe.un.beacon_config,
+ be16_to_cpu(lcb_context->duration));
+ }
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
@@ -5784,24 +5822,16 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
beacon->lcb_frequency,
be16_to_cpu(beacon->lcb_duration));
- if (phba->sli_rev < LPFC_SLI_REV4 ||
- (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
- LPFC_SLI_INTF_IF_TYPE_2)) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- goto rjt;
- }
-
- if (phba->hba_flag & HBA_FCOE_MODE) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- goto rjt;
- }
if (beacon->lcb_sub_command != LPFC_LCB_ON &&
beacon->lcb_sub_command != LPFC_LCB_OFF) {
rjt_err = LSRJT_CMD_UNSUPPORTED;
goto rjt;
}
- if (beacon->lcb_sub_command == LPFC_LCB_ON &&
- be16_to_cpu(beacon->lcb_duration) != 0) {
+
+ if (phba->sli_rev < LPFC_SLI_REV4 ||
+ phba->hba_flag & HBA_FCOE_MODE ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
+ LPFC_SLI_INTF_IF_TYPE_2)) {
rjt_err = LSRJT_CMD_UNSUPPORTED;
goto rjt;
}
@@ -5814,8 +5844,10 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
lcb_context->sub_command = beacon->lcb_sub_command;
+ lcb_context->capability = 0;
lcb_context->type = beacon->lcb_type;
lcb_context->frequency = beacon->lcb_frequency;
+ lcb_context->duration = beacon->lcb_duration;
lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
lcb_context->rx_id = cmdiocb->iocb.ulpContext;
lcb_context->ndlp = lpfc_nlp_get(ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2fef54fab86d..eb71877f12f8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 08a3f1520159..009aa0eee040 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -1065,14 +1065,17 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */
struct fc_lcb_request_frame {
uint32_t lcb_command; /* ELS command opcode (0x81) */
uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
-#define LPFC_LCB_ON 0x1
-#define LPFC_LCB_OFF 0x2
- uint8_t reserved[3];
-
+#define LPFC_LCB_ON 0x1
+#define LPFC_LCB_OFF 0x2
+ uint8_t reserved[2];
+ uint8_t capability; /* LCB Payload Word 1, bit 0:7 */
uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */
-#define LPFC_LCB_GREEN 0x1
-#define LPFC_LCB_AMBER 0x2
+#define LPFC_LCB_GREEN 0x1
+#define LPFC_LCB_AMBER 0x2
uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */
+#define LCB_CAPABILITY_DURATION 1
+#define BEACON_VERSION_V1 1
+#define BEACON_VERSION_V0 0
uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */
};
@@ -1082,7 +1085,8 @@ struct fc_lcb_request_frame {
struct fc_lcb_res_frame {
uint32_t lcb_ls_acc; /* Acceptance of LCB request (0x02) */
uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
- uint8_t reserved[3];
+ uint8_t reserved[2];
+ uint8_t capability; /* LCB Payload Word 1, bit 0:7 */
uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */
uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */
uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index f43f0bacb77a..083f8c8706e5 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1790,9 +1790,12 @@ struct lpfc_mbx_set_beacon_config {
#define lpfc_mbx_set_beacon_duration_SHIFT 16
#define lpfc_mbx_set_beacon_duration_MASK 0x000000FF
#define lpfc_mbx_set_beacon_duration_WORD word4
-#define lpfc_mbx_set_beacon_status_duration_SHIFT 24
-#define lpfc_mbx_set_beacon_status_duration_MASK 0x000000FF
-#define lpfc_mbx_set_beacon_status_duration_WORD word4
+
+/* COMMON_SET_BEACON_CONFIG_V1 */
+#define lpfc_mbx_set_beacon_duration_v1_SHIFT 16
+#define lpfc_mbx_set_beacon_duration_v1_MASK 0x0000FFFF
+#define lpfc_mbx_set_beacon_duration_v1_WORD word4
+ uint32_t word5; /* RESERVED */
};
struct lpfc_id_range {
@@ -2243,6 +2246,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
*/
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
+#define ADD_STATUS_INVALID_REQUEST 0x4B
struct lpfc_mbx_sli4_config {
struct mbox_header header;
@@ -3392,7 +3396,41 @@ struct lpfc_sli4_parameters {
#define cfg_nosr_SHIFT 9
#define cfg_nosr_MASK 0x00000001
#define cfg_nosr_WORD word19
-#define LPFC_NODELAY_MAX_IO 32
+
+#define cfg_bv1s_SHIFT 10
+#define cfg_bv1s_MASK 0x00000001
+#define cfg_bv1s_WORD word19
+
+ uint32_t word20;
+#define cfg_max_tow_xri_SHIFT 0
+#define cfg_max_tow_xri_MASK 0x0000ffff
+#define cfg_max_tow_xri_WORD word20
+
+ uint32_t word21; /* RESERVED */
+ uint32_t word22; /* RESERVED */
+ uint32_t word23; /* RESERVED */
+
+ uint32_t word24;
+#define cfg_frag_field_offset_SHIFT 0
+#define cfg_frag_field_offset_MASK 0x0000ffff
+#define cfg_frag_field_offset_WORD word24
+
+#define cfg_frag_field_size_SHIFT 16
+#define cfg_frag_field_size_MASK 0x0000ffff
+#define cfg_frag_field_size_WORD word24
+
+ uint32_t word25;
+#define cfg_sgl_field_offset_SHIFT 0
+#define cfg_sgl_field_offset_MASK 0x0000ffff
+#define cfg_sgl_field_offset_WORD word25
+
+#define cfg_sgl_field_size_SHIFT 16
+#define cfg_sgl_field_size_MASK 0x0000ffff
+#define cfg_sgl_field_size_WORD word25
+
+ uint32_t word26; /* Chain SGE initial value LOW */
+ uint32_t word27; /* Chain SGE initial value HIGH */
+#define LPFC_NODELAY_MAX_IO 32
};
#define LPFC_SET_UE_RECOVERY 0x10
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index 07ee34017d88..d48414e295a0 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 52cae87da0d2..f3cae733ae2d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -10387,6 +10387,11 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
!nvmet_xri_cmpl) {
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
+ if (!nvmet_xri_cmpl)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6424 NVMET XRI exchange busy "
+ "wait time: %d seconds.\n",
+ wait_time/1000);
if (!nvme_xri_cmpl)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6100 NVME XRI exchange busy "
@@ -10639,6 +10644,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
+ sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
@@ -10668,18 +10674,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
}
- /* Only embed PBDE for if_type 6 */
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
- LPFC_SLI_INTF_IF_TYPE_6) {
- phba->fcp_embed_pbde = 1;
- phba->nvme_embed_pbde = 1;
- }
-
- /* PBDE support requires xib be set */
- if (!bf_get(cfg_xib, mbx_sli4_parameters)) {
- phba->fcp_embed_pbde = 0;
- phba->nvme_embed_pbde = 0;
- }
+ /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
+ phba->cfg_enable_pbde = 0;
/*
* To support Suppress Response feature we must satisfy 3 conditions.
@@ -10713,10 +10711,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->fcp_embed_io = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
- "6422 XIB %d: FCP %d %d NVME %d %d %d %d\n",
+ "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
bf_get(cfg_xib, mbx_sli4_parameters),
- phba->fcp_embed_pbde, phba->fcp_embed_io,
- phba->nvme_support, phba->nvme_embed_pbde,
+ phba->cfg_enable_pbde,
+ phba->fcp_embed_io, phba->nvme_support,
phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 3b654ad08d1f..ea10f03437f5 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 47c02da11f01..deb094fdbb79 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 0758edb9dfe2..9c22a2c93462 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index b93e78f671fb..95d60ab5ebf9 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 1a803975bcbc..bd9bce9d9974 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -1062,6 +1062,9 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */
+ if (vport->phba->sli_rev == LPFC_SLI_REV3)
+ ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag;
/* software abort outstanding PLOGI */
lpfc_els_abort(vport->phba, ndlp);
@@ -1982,12 +1985,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_disc, nvpr))
ndlp->nlp_type |= NLP_NVME_DISCOVERY;
- /* This node is an NVME target. Adjust the command
- * queue depth on this node to not exceed the available
- * xris.
- */
- ndlp->cmd_qdepth = phba->sli4_hba.nvme_xri_max;
-
/*
* If prli_fba is set, the Target supports FirstBurst.
* If prli_fb_sz is 0, the FirstBurst size is unlimited,
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 76a5a99605aa..028462e5994d 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1135,9 +1135,6 @@ out_err:
else
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
- atomic_dec(&ndlp->cmd_pending);
-
/* Update stats and complete the IO. There is
* no need for dma unprep because the nvme_transport
* owns the dma address.
@@ -1279,6 +1276,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 9 */
bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+ /* Words 13 14 15 are for PBDE support */
+
pwqeq->vport = vport;
return 0;
}
@@ -1378,7 +1377,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
data_sg = sg_next(data_sg);
sgl++;
}
- if (phba->nvme_embed_pbde) {
+ if (phba->cfg_enable_pbde) {
/* Use PBDE support for first SGL only, offset == 0 */
/* Words 13-15 */
bde = (struct ulp_bde64 *)
@@ -1394,10 +1393,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
}
- } else {
- bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
- memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
+ } else {
/* For this clause to be valid, the payload_length
* and sg_cnt must zero.
*/
@@ -1546,17 +1543,19 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
/* The node is shared with FCP IO, make sure the IO pending count does
* not exceed the programmed depth.
*/
- if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
- !expedite) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
- "6174 Fail IO, ndlp qdepth exceeded: "
- "idx %d DID %x pend %d qdepth %d\n",
- lpfc_queue_info->index, ndlp->nlp_DID,
- atomic_read(&ndlp->cmd_pending),
- ndlp->cmd_qdepth);
- atomic_inc(&lport->xmt_fcp_qdepth);
- ret = -EBUSY;
- goto out_fail;
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
+ if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
+ !expedite) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6174 Fail IO, ndlp qdepth exceeded: "
+ "idx %d DID %x pend %d qdepth %d\n",
+ lpfc_queue_info->index, ndlp->nlp_DID,
+ atomic_read(&ndlp->cmd_pending),
+ ndlp->cmd_qdepth);
+ atomic_inc(&lport->xmt_fcp_qdepth);
+ ret = -EBUSY;
+ goto out_fail;
+ }
}
lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
@@ -1614,8 +1613,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_free_nvme_buf;
}
- atomic_inc(&ndlp->cmd_pending);
-
lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_queue_info->index, ndlp->nlp_DID);
@@ -1623,7 +1620,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
atomic_inc(&lport->xmt_fcp_wqerr);
- atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 Fail IO, Could not issue WQE err %x "
"sid: x%x did: x%x oxid: x%x\n",
@@ -2378,6 +2374,11 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
lpfc_ncmd = lpfc_nvme_buf(phba);
}
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
+
+ if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
+ }
return lpfc_ncmd;
}
@@ -2396,7 +2397,13 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
{
unsigned long iflag = 0;
+ if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
+ atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
+
lpfc_ncmd->nonsg_phys = 0;
+ lpfc_ncmd->ndlp = NULL;
+ lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
+
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for "
@@ -2687,7 +2694,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_nvme_rport *oldrport;
struct nvme_fc_remote_port *remote_port;
struct nvme_fc_port_info rpinfo;
- struct lpfc_nodelist *prev_ndlp;
+ struct lpfc_nodelist *prev_ndlp = NULL;
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
"6006 Register NVME PORT. DID x%06x nlptype x%x\n",
@@ -2736,23 +2743,29 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&vport->phba->hbalock);
rport = remote_port->private;
if (oldrport) {
+ /* New remoteport record does not guarantee valid
+ * host private memory area.
+ */
+ prev_ndlp = oldrport->ndlp;
if (oldrport == remote_port->private) {
- /* Same remoteport. Just reuse. */
+ /* Same remoteport - ndlp should match.
+ * Just reuse.
+ */
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
"6014 Rebinding lport to "
"remoteport %p wwpn 0x%llx, "
- "Data: x%x x%x %p x%x x%06x\n",
+ "Data: x%x x%x %p %p x%x x%06x\n",
remote_port,
remote_port->port_name,
remote_port->port_id,
remote_port->port_role,
+ prev_ndlp,
ndlp,
ndlp->nlp_type,
ndlp->nlp_DID);
return 0;
}
- prev_ndlp = rport->ndlp;
/* Sever the ndlp<->rport association
* before dropping the ndlp ref from
@@ -2786,13 +2799,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
"6022 Binding new rport to "
- "lport %p Remoteport %p WWNN 0x%llx, "
+ "lport %p Remoteport %p rport %p WWNN 0x%llx, "
"Rport WWPN 0x%llx DID "
- "x%06x Role x%x, ndlp %p\n",
- lport, remote_port,
+ "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
+ lport, remote_port, rport,
rpinfo.node_name, rpinfo.port_name,
rpinfo.port_id, rpinfo.port_role,
- ndlp);
+ ndlp, prev_ndlp);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
@@ -2970,7 +2983,7 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
struct lpfc_sli_ring *pring;
u32 i, wait_cnt = 0;
- if (phba->sli_rev < LPFC_SLI_REV4)
+ if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq)
return;
/* Cycle through all NVME rings and make sure all outstanding
@@ -2979,6 +2992,9 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
pring = phba->sli4_hba.nvme_wq[i]->pring;
+ if (!pring)
+ continue;
+
/* Retrieve everything on the txcmplq */
while (!list_empty(&pring->txcmplq)) {
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 04bd463dd043..cfd4719be25c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -86,6 +86,7 @@ struct lpfc_nvme_buf {
uint16_t flags; /* TBD convert exch_busy to flags */
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
+#define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint16_t cpu;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 7271c9d885dd..b766afe10d3d 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channsel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -402,6 +402,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
/* Process FCP command */
if (rc == 0) {
+ ctxp->rqb_buffer = NULL;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
return;
@@ -1116,8 +1117,17 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
ctxp->oxid, ctxp->size, smp_processor_id());
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6425 Defer rcv: no buffer xri x%x: "
+ "flg %x ste %x\n",
+ ctxp->oxid, ctxp->flag, ctxp->state);
+ return;
+ }
+
tgtp = phba->targetport->private;
- atomic_inc(&tgtp->rcv_fcp_cmd_defer);
+ if (tgtp)
+ atomic_inc(&tgtp->rcv_fcp_cmd_defer);
/* Free the nvmebuf since a new buffer already replaced it */
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
@@ -1732,9 +1742,12 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t *payload;
uint32_t size, oxid, sid, rc;
- if (!nvmebuf || !phba->targetport) {
+ fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+
+ if (!phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6154 LS Drop IO\n");
+ "6154 LS Drop IO x%x\n", oxid);
oxid = 0;
size = 0;
sid = 0;
@@ -1744,9 +1757,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
- fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
- oxid = be16_to_cpu(fc_hdr->fh_ox_id);
sid = sli4_sid_from_fc_hdr(fc_hdr);
ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
@@ -1759,8 +1770,7 @@ dropit:
lpfc_nvmeio_data(phba, "NVMET LS DROP: "
"xri x%x sz %d from %06x\n",
oxid, size, sid);
- if (nvmebuf)
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
return;
}
ctxp->phba = phba;
@@ -1803,8 +1813,7 @@ dropit:
ctxp->oxid, rc);
/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
- if (nvmebuf)
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
atomic_inc(&tgtp->xmt_ls_abort);
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
@@ -2492,7 +2501,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
/* Word 11 - set pbde later */
- if (phba->nvme_embed_pbde) {
+ if (phba->cfg_enable_pbde) {
do_pbde = 1;
} else {
bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
@@ -2607,16 +2616,19 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(cnt);
- if (do_pbde && i == 0) {
+ if (i == 0) {
bde = (struct ulp_bde64 *)&wqe->words[13];
- memset(bde, 0, sizeof(struct ulp_bde64));
- /* Words 13-15 (PBDE)*/
- bde->addrLow = sgl->addr_lo;
- bde->addrHigh = sgl->addr_hi;
- bde->tus.f.bdeSize =
- le32_to_cpu(sgl->sge_len);
- bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bde->tus.w = cpu_to_le32(bde->tus.w);
+ if (do_pbde) {
+ /* Words 13-15 (PBDE) */
+ bde->addrLow = sgl->addr_lo;
+ bde->addrHigh = sgl->addr_hi;
+ bde->tus.f.bdeSize =
+ le32_to_cpu(sgl->sge_len);
+ bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bde->tus.w = cpu_to_le32(bde->tus.w);
+ } else {
+ memset(bde, 0, sizeof(struct ulp_bde64));
+ }
}
sgl++;
ctxp->offset += cnt;
@@ -3105,11 +3117,17 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
}
aerr:
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
+ if (ctxp->flag & LPFC_NVMET_CTX_RLS)
+ list_del(&ctxp->list);
+ ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
ctxp->oxid, rc);
+ lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
return 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 81f520abfd64..1aaff63f1f41 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a94fb9f8bb44..5c7858e735c9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -995,6 +995,11 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock(&phba->scsi_buf_list_put_lock);
}
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
+
+ if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
+ }
return lpfc_cmd;
}
/**
@@ -1044,6 +1049,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
if (!found)
return NULL;
+
+ if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
+ }
return lpfc_cmd;
}
/**
@@ -1134,7 +1144,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
static void
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
+ if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
+ atomic_dec(&psb->ndlp->cmd_pending);
+ psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
phba->lpfc_release_scsi_buf(phba, psb);
}
@@ -3017,8 +3030,8 @@ out:
if (err_type == BGS_GUARD_ERR_MASK) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
@@ -3028,8 +3041,8 @@ out:
} else if (err_type == BGS_REFTAG_ERR_MASK) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3040,8 +3053,8 @@ out:
} else if (err_type == BGS_APPTAG_ERR_MASK) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3096,7 +3109,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
spin_unlock(&_dump_buf_lock);
if (lpfc_bgs_get_invalid_prof(bgstat)) {
- cmd->result = ScsiResult(DID_ERROR, 0);
+ cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9072 BLKGRD: Invalid BG Profile in cmd"
" 0x%x lba 0x%llx blk cnt 0x%x "
@@ -3108,7 +3121,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
}
if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
- cmd->result = ScsiResult(DID_ERROR, 0);
+ cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9073 BLKGRD: Invalid BG PDIF Block in cmd"
" 0x%x lba 0x%llx blk cnt 0x%x "
@@ -3124,8 +3137,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9055 BLKGRD: Guard Tag error in cmd"
@@ -3140,8 +3153,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3157,8 +3170,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3311,12 +3324,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
}
/*
* Setup the first Payload BDE. For FCoE we just key off
- * Performance Hints, for FC we utilize fcp_embed_pbde.
+ * Performance Hints, for FC we use lpfc_enable_pbde.
+ * We populate words 13-15 of IOCB/WQE.
*/
if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
- phba->fcp_embed_pbde) {
+ phba->cfg_enable_pbde) {
bde = (struct ulp_bde64 *)
- &(iocb_cmd->unsli3.sli3Words[5]);
+ &(iocb_cmd->unsli3.sli3Words[5]);
bde->addrLow = first_data_sgl->addr_lo;
bde->addrHigh = first_data_sgl->addr_hi;
bde->tus.f.bdeSize =
@@ -3330,6 +3344,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
+
+ if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
+ phba->cfg_enable_pbde) {
+ bde = (struct ulp_bde64 *)
+ &(iocb_cmd->unsli3.sli3Words[5]);
+ memset(bde, 0, (sizeof(uint32_t) * 3));
+ }
}
/*
@@ -3866,7 +3887,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
out:
- cmnd->result = ScsiResult(host_status, scsi_status);
+ cmnd->result = host_status << 16 | scsi_status;
lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
}
@@ -4019,7 +4040,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
- cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
fast_path_evt = lpfc_alloc_fast_evt(phba);
if (!fast_path_evt)
break;
@@ -4053,14 +4074,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
lpfc_cmd->result ==
IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
- cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+ cmd->result = DID_NO_CONNECT << 16;
break;
}
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = ScsiResult(DID_REQUEUE, 0);
+ cmd->result = DID_REQUEUE << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4094,16 +4115,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
/* else: fall through */
default:
- cmd->result = ScsiResult(DID_ERROR, 0);
+ cmd->result = DID_ERROR << 16;
break;
}
if (!pnode || !NLP_CHK_NODE_ACT(pnode)
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
- cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
- SAM_STAT_BUSY);
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
+ SAM_STAT_BUSY;
} else
- cmd->result = ScsiResult(DID_OK, 0);
+ cmd->result = DID_OK << 16;
if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
@@ -4122,7 +4143,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
spin_lock_irqsave(shost->host_lock, flags);
if (pnode && NLP_CHK_NODE_ACT(pnode)) {
- atomic_dec(&pnode->cmd_pending);
if (pnode->cmd_qdepth >
atomic_read(&pnode->cmd_pending) &&
(atomic_read(&pnode->cmd_pending) >
@@ -4135,8 +4155,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
pnode->last_change_time = jiffies;
}
spin_unlock_irqrestore(shost->host_lock, flags);
- } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
- atomic_dec(&pnode->cmd_pending);
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
@@ -4530,6 +4548,11 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
int err;
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+
+ /* sanity check on references */
+ if (unlikely(!rdata) || unlikely(!rport))
+ goto out_fail_command;
+
err = fc_remote_port_chkready(rport);
if (err) {
cmnd->result = err;
@@ -4555,33 +4578,36 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
goto out_tgt_busy;
- if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
- "3377 Target Queue Full, scsi Id:%d Qdepth:%d"
- " Pending command:%d"
- " WWNN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
- " WWPN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
- ndlp->nlp_sid, ndlp->cmd_qdepth,
- atomic_read(&ndlp->cmd_pending),
- ndlp->nlp_nodename.u.wwn[0],
- ndlp->nlp_nodename.u.wwn[1],
- ndlp->nlp_nodename.u.wwn[2],
- ndlp->nlp_nodename.u.wwn[3],
- ndlp->nlp_nodename.u.wwn[4],
- ndlp->nlp_nodename.u.wwn[5],
- ndlp->nlp_nodename.u.wwn[6],
- ndlp->nlp_nodename.u.wwn[7],
- ndlp->nlp_portname.u.wwn[0],
- ndlp->nlp_portname.u.wwn[1],
- ndlp->nlp_portname.u.wwn[2],
- ndlp->nlp_portname.u.wwn[3],
- ndlp->nlp_portname.u.wwn[4],
- ndlp->nlp_portname.u.wwn[5],
- ndlp->nlp_portname.u.wwn[6],
- ndlp->nlp_portname.u.wwn[7]);
- goto out_tgt_busy;
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
+ if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
+ "3377 Target Queue Full, scsi Id:%d "
+ "Qdepth:%d Pending command:%d"
+ " WWNN:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x, "
+ " WWPN:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x",
+ ndlp->nlp_sid, ndlp->cmd_qdepth,
+ atomic_read(&ndlp->cmd_pending),
+ ndlp->nlp_nodename.u.wwn[0],
+ ndlp->nlp_nodename.u.wwn[1],
+ ndlp->nlp_nodename.u.wwn[2],
+ ndlp->nlp_nodename.u.wwn[3],
+ ndlp->nlp_nodename.u.wwn[4],
+ ndlp->nlp_nodename.u.wwn[5],
+ ndlp->nlp_nodename.u.wwn[6],
+ ndlp->nlp_nodename.u.wwn[7],
+ ndlp->nlp_portname.u.wwn[0],
+ ndlp->nlp_portname.u.wwn[1],
+ ndlp->nlp_portname.u.wwn[2],
+ ndlp->nlp_portname.u.wwn[3],
+ ndlp->nlp_portname.u.wwn[4],
+ ndlp->nlp_portname.u.wwn[5],
+ ndlp->nlp_portname.u.wwn[6],
+ ndlp->nlp_portname.u.wwn[7]);
+ goto out_tgt_busy;
+ }
}
- atomic_inc(&ndlp->cmd_pending);
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
if (lpfc_cmd == NULL) {
@@ -4599,6 +4625,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
*/
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
+ lpfc_cmd->ndlp = ndlp;
lpfc_cmd->timeout = 0;
lpfc_cmd->start_time = jiffies;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
@@ -4681,7 +4708,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
- atomic_dec(&ndlp->cmd_pending);
return SCSI_MLQUEUE_HOST_BUSY;
out_tgt_busy:
@@ -4714,7 +4740,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
struct lpfc_scsi_buf *lpfc_cmd;
IOCB_t *cmd, *icmd;
int ret = SUCCESS, status = 0;
- struct lpfc_sli_ring *pring_s4;
+ struct lpfc_sli_ring *pring_s4 = NULL;
int ret_val;
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
@@ -4744,8 +4770,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
}
iocb = &lpfc_cmd->cur_iocbq;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (!(phba->cfg_fof) ||
+ (!(iocb->iocb_flag & LPFC_IO_FOF))) {
+ pring_s4 =
+ phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring;
+ } else {
+ iocb->hba_wqidx = 0;
+ pring_s4 = phba->sli4_hba.oas_wq->pring;
+ }
+ if (!pring_s4) {
+ ret = FAILED;
+ goto out_unlock;
+ }
+ spin_lock(&pring_s4->ring_lock);
+ }
/* the command is in process of being cancelled */
if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3169 SCSI Layer abort requested I/O has been "
@@ -4759,6 +4802,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
* see the completion before the eh fired. Just return SUCCESS.
*/
if (lpfc_cmd->pCmd != cmnd) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3170 SCSI Layer abort requested I/O has been "
"completed by LLD.\n");
@@ -4771,6 +4816,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3389 SCSI Layer I/O Abort Request is pending\n");
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
goto wait_for_cmpl;
}
@@ -4778,6 +4825,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb = __lpfc_sli_get_iocbq(phba);
if (abtsiocb == NULL) {
ret = FAILED;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
goto out_unlock;
}
@@ -4815,14 +4864,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
abtsiocb->vport = vport;
+ lpfc_cmd->waitq = &waitq;
if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocb);
- if (pring_s4 == NULL) {
- ret = FAILED;
- goto out_unlock;
- }
/* Note: both hbalock and ring_lock must be set here */
- spin_lock(&pring_s4->ring_lock);
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
abtsiocb, 0);
spin_unlock(&pring_s4->ring_lock);
@@ -4835,6 +4879,17 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (ret_val == IOCB_ERROR) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock_irqsave(&pring_s4->ring_lock, flags);
+ else
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* Indicate the IO is not being aborted by the driver. */
+ iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ lpfc_cmd->waitq = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock_irqrestore(&pring_s4->ring_lock, flags);
+ else
+ spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
@@ -4845,7 +4900,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
wait_for_cmpl:
- lpfc_cmd->waitq = &waitq;
/* Wait for abort to complete */
wait_event_timeout(waitq,
(lpfc_cmd->pCmd != cmnd),
@@ -5006,6 +5060,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
lpfc_cmd->pCmd = cmnd;
+ lpfc_cmd->ndlp = pnode;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
task_mgmt_cmd);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index c38e4da71f5f..cc99859774ff 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -134,11 +134,13 @@ struct lpfc_scsi_buf {
struct list_head list;
struct scsi_cmnd *pCmd;
struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *ndlp;
uint32_t timeout;
uint16_t flags; /* TBD convert exch_busy to flags */
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
+#define LPFC_SBUF_BUMP_QDEPTH 0x8 /* bumped queue depth counter */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 6f3c00a233ec..9830bdb6e072 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -145,6 +145,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
uint32_t idx;
uint32_t i = 0;
uint8_t *tmp;
+ u32 if_type;
/* sanity check on queue memory */
if (unlikely(!q))
@@ -199,8 +200,14 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
q->queue_id);
} else {
bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
- bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
+
+ /* Leave bits <23:16> clear for if_type 6 dpp */
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &q->phba->sli4_hba.sli_intf);
+ if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
+ bf_set(lpfc_wq_db_list_fm_index, &doorbell,
+ host_index);
}
} else if (q->db_format == LPFC_DB_RING_FORMAT) {
bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
@@ -4591,7 +4598,7 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
- psli->stats_start = get_seconds();
+ psli->stats_start = ktime_get_seconds();
/* Give the INITFF and Post time to settle. */
mdelay(100);
@@ -4638,7 +4645,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
- psli->stats_start = get_seconds();
+ psli->stats_start = ktime_get_seconds();
/* Reset HBA AER if it was enabled, note hba_flag was reset above */
if (hba_aer_enabled)
@@ -9110,8 +9117,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
}
/* Note, word 10 is already initialized to 0 */
- /* Don't set PBDE for Perf hints, just fcp_embed_pbde */
- if (phba->fcp_embed_pbde)
+ /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
+ if (phba->cfg_enable_pbde)
bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
else
bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
@@ -9174,8 +9181,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
}
/* Note, word 10 is already initialized to 0 */
- /* Don't set PBDE for Perf hints, just fcp_embed_pbde */
- if (phba->fcp_embed_pbde)
+ /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
+ if (phba->cfg_enable_pbde)
bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
else
bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
@@ -10696,6 +10703,12 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(&phba->hbalock);
if (phba->sli_rev < LPFC_SLI_REV4) {
+ if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
+ irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
+ spin_unlock_irq(&phba->hbalock);
+ goto release_iocb;
+ }
if (abort_iotag != 0 &&
abort_iotag <= phba->sli.last_iotag)
abort_iocb =
@@ -10717,6 +10730,7 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(&phba->hbalock);
}
+release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
@@ -10773,6 +10787,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iabt = NULL;
int retval;
unsigned long iflags;
+ struct lpfc_nodelist *ndlp;
lockdep_assert_held(&phba->hbalock);
@@ -10803,9 +10818,13 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (phba->sli_rev == LPFC_SLI_REV4) {
iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
iabt->un.acxri.abortContextTag = cmdiocb->iotag;
- }
- else
+ } else {
iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+ if (pring->ringno == LPFC_ELS_RING) {
+ ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
+ iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
+ }
+ }
iabt->ulpLe = 1;
iabt->ulpClass = icmd->ulpClass;
@@ -11084,10 +11103,11 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd;
int rc = 1;
- if (!(iocbq->iocb_flag & LPFC_IO_FCP))
+ if (iocbq->vport != vport)
return rc;
- if (iocbq->vport != vport)
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
return rc;
lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
@@ -11097,13 +11117,13 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
switch (ctx_cmd) {
case LPFC_CTX_LUN:
- if ((lpfc_cmd->rdata->pnode) &&
+ if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
(lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
(scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
rc = 0;
break;
case LPFC_CTX_TGT:
- if ((lpfc_cmd->rdata->pnode) &&
+ if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
(lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
rc = 0;
break;
@@ -11218,6 +11238,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
int errcnt = 0, ret_val = 0;
int i;
+ /* all I/Os are in process of being flushed */
+ if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
+ return errcnt;
+
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 431754195505..34b7ab69b9b4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -339,7 +339,7 @@ struct lpfc_sli {
struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
size_t iocbq_lookup_len; /* current lengs of the array */
uint16_t last_iotag; /* last allocated IOTAG */
- unsigned long stats_start; /* in seconds */
+ time64_t stats_start; /* in seconds */
struct lpfc_lnk_stat lnk_stat_offsets;
};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index cf64aca82bd0..399c0015c546 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -490,6 +490,7 @@ struct lpfc_pc_sli4_params {
uint8_t eqav;
uint8_t cqav;
uint8_t wqsize;
+ uint8_t bv1s;
#define LPFC_WQ_SZ64_SUPPORT 1
#define LPFC_WQ_SZ128_SUPPORT 2
uint8_t wqpcnt;
@@ -774,7 +775,9 @@ struct lpfc_rdp_context {
struct lpfc_lcb_context {
uint8_t sub_command;
uint8_t type;
+ uint8_t capability;
uint8_t frequency;
+ uint16_t duration;
uint16_t ox_id;
uint16_t rx_id;
struct lpfc_nodelist *ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 18c23afcf46b..501249509af4 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.0.0.4"
+#define LPFC_DRIVER_VERSION "12.0.0.6"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -33,5 +33,5 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
#define LPFC_COPYRIGHT "Copyright (C) 2017-2018 Broadcom. All Rights " \
- "Reserved. The term \"Broadcom\" refers to Broadcom Limited " \
+ "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 81bc12dedf41..1ff0f7de9105 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 62295971f66c..f4b8528dd2e7 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 8e8cf1145d7f..8c7154143a4e 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -371,7 +371,7 @@ mega_runpendq(adapter_t *adapter)
* The command queuing entry point for the mid-layer.
*/
static int
-megaraid_queue_lck(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *))
+megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
{
adapter_t *adapter;
scb_t *scb;
@@ -425,7 +425,7 @@ static DEF_SCSI_QCMD(megaraid_queue)
* commands.
*/
static inline scb_t *
-mega_allocate_scb(adapter_t *adapter, Scsi_Cmnd *cmd)
+mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd)
{
struct list_head *head = &adapter->free_list;
scb_t *scb;
@@ -457,7 +457,7 @@ mega_allocate_scb(adapter_t *adapter, Scsi_Cmnd *cmd)
* and the channel number.
*/
static inline int
-mega_get_ldrv_num(adapter_t *adapter, Scsi_Cmnd *cmd, int channel)
+mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
{
int tgt;
int ldrv_num;
@@ -520,7 +520,7 @@ mega_get_ldrv_num(adapter_t *adapter, Scsi_Cmnd *cmd, int channel)
* boot settings.
*/
static scb_t *
-mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
+mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
{
mega_ext_passthru *epthru;
mega_passthru *pthru;
@@ -951,8 +951,8 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
* prepare a command for the scsi physical devices.
*/
static mega_passthru *
-mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
- int channel, int target)
+mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
+ int channel, int target)
{
mega_passthru *pthru;
@@ -1015,8 +1015,9 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
* commands for devices which can take extended CDBs (>10 bytes)
*/
static mega_ext_passthru *
-mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
- int channel, int target)
+mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
+ struct scsi_cmnd *cmd,
+ int channel, int target)
{
mega_ext_passthru *epthru;
@@ -1417,7 +1418,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
{
mega_ext_passthru *epthru = NULL;
struct scatterlist *sgl;
- Scsi_Cmnd *cmd = NULL;
+ struct scsi_cmnd *cmd = NULL;
mega_passthru *pthru = NULL;
mbox_t *mbox = NULL;
u8 c;
@@ -1652,14 +1653,14 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
static void
mega_rundoneq (adapter_t *adapter)
{
- Scsi_Cmnd *cmd;
+ struct scsi_cmnd *cmd;
struct list_head *pos;
list_for_each(pos, &adapter->completed_list) {
struct scsi_pointer* spos = (struct scsi_pointer *)pos;
- cmd = list_entry(spos, Scsi_Cmnd, SCp);
+ cmd = list_entry(spos, struct scsi_cmnd, SCp);
cmd->scsi_done(cmd);
}
@@ -1722,7 +1723,7 @@ static int
mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
{
struct scatterlist *sg;
- Scsi_Cmnd *cmd;
+ struct scsi_cmnd *cmd;
int sgcnt;
int idx;
@@ -1869,7 +1870,7 @@ megaraid_info(struct Scsi_Host *host)
* aborted. All the commands issued to the F/W must complete.
*/
static int
-megaraid_abort(Scsi_Cmnd *cmd)
+megaraid_abort(struct scsi_cmnd *cmd)
{
adapter_t *adapter;
int rval;
@@ -1933,7 +1934,7 @@ megaraid_reset(struct scsi_cmnd *cmd)
* issued to the controller, abort/reset it. Otherwise return failure
*/
static int
-megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
+megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
{
struct list_head *pos, *next;
scb_t *scb;
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 18e85d9267ff..cce23a086fbe 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -191,7 +191,7 @@ typedef struct {
u32 dma_type;
u32 dma_direction;
- Scsi_Cmnd *cmd;
+ struct scsi_cmnd *cmd;
dma_addr_t dma_h_bulkdata;
dma_addr_t dma_h_sgdata;
@@ -942,7 +942,7 @@ static int issue_scb(adapter_t *, scb_t *);
static int mega_setup_mailbox(adapter_t *);
static int megaraid_queue (struct Scsi_Host *, struct scsi_cmnd *);
-static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *);
+static scb_t * mega_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
static void __mega_runpendq(adapter_t *);
static int issue_scb_block(adapter_t *, u_char *);
@@ -951,9 +951,9 @@ static irqreturn_t megaraid_isr_iomapped(int, void *);
static void mega_free_scb(adapter_t *, scb_t *);
-static int megaraid_abort(Scsi_Cmnd *);
-static int megaraid_reset(Scsi_Cmnd *);
-static int megaraid_abort_and_reset(adapter_t *, Scsi_Cmnd *, int);
+static int megaraid_abort(struct scsi_cmnd *);
+static int megaraid_reset(struct scsi_cmnd *);
+static int megaraid_abort_and_reset(adapter_t *, struct scsi_cmnd *, int);
static int megaraid_biosparam(struct scsi_device *, struct block_device *,
sector_t, int []);
@@ -983,9 +983,9 @@ static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t);
static int mega_support_ext_cdb(adapter_t *);
static mega_passthru* mega_prepare_passthru(adapter_t *, scb_t *,
- Scsi_Cmnd *, int, int);
+ struct scsi_cmnd *, int, int);
static mega_ext_passthru* mega_prepare_extpassthru(adapter_t *,
- scb_t *, Scsi_Cmnd *, int, int);
+ scb_t *, struct scsi_cmnd *, int, int);
static void mega_enum_raid_scsi(adapter_t *);
static void mega_get_boot_drv(adapter_t *);
static int mega_support_random_del(adapter_t *);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 75dc25f78336..67d356d84717 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.705.02.00-rc1"
-#define MEGASAS_RELDATE "April 4, 2018"
+#define MEGASAS_VERSION "07.706.03.00-rc1"
+#define MEGASAS_RELDATE "May 21, 2018"
/*
* Device IDs
@@ -709,7 +709,8 @@ struct MR_TARGET_PROPERTIES {
u32 max_io_size_kb;
u32 device_qdepth;
u32 sector_size;
- u8 reserved[500];
+ u8 reset_tmo;
+ u8 reserved[499];
} __packed;
/*
@@ -1400,6 +1401,19 @@ struct megasas_ctrl_info {
#endif
} adapter_operations4;
u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
+
+ u32 size;
+ u32 pad1;
+
+ u8 reserved6[64];
+
+ u32 rsvdForAdptOp[64];
+
+ u8 reserved7[3];
+
+ u8 TaskAbortTO; /* Timeout value in seconds used by Abort Task TM */
+ u8 MaxResetTO; /* Max Supported Reset timeout in seconds. */
+ u8 reserved8[3];
} __packed;
/*
@@ -1472,6 +1486,7 @@ enum FW_BOOT_CONTEXT {
#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
#define MEGASAS_BLOCKED_CMD_TIMEOUT 60
+#define MEGASAS_DEFAULT_TM_TIMEOUT 50
/*
* FW reports the maximum of number of commands that it can accept (maximum
* commands that can be outstanding) at any time. The driver must report a
@@ -1915,7 +1930,9 @@ struct MR_PRIV_DEVICE {
bool is_tm_capable;
bool tm_busy;
atomic_t r1_ldio_hint;
- u8 interface_type;
+ u8 interface_type;
+ u8 task_abort_tmo;
+ u8 target_reset_tmo;
};
struct megasas_cmd;
@@ -2291,6 +2308,8 @@ struct megasas_instance {
u8 adapter_type;
bool consistent_mask_64bit;
bool support_nvme_passthru;
+ u8 task_abort_tmo;
+ u8 max_reset_tmo;
};
struct MR_LD_VF_MAP {
u32 size;
@@ -2512,7 +2531,11 @@ int megasas_get_ctrl_info(struct megasas_instance *instance);
/* PD sequence */
int
megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend);
-void megasas_set_dynamic_target_properties(struct scsi_device *sdev);
+void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
+ bool is_target_prop);
+int megasas_get_target_prop(struct megasas_instance *instance,
+ struct scsi_device *sdev);
+
int megasas_set_crash_dump_params(struct megasas_instance *instance,
u8 crash_buf_state);
void megasas_free_host_crash_buffer(struct megasas_instance *instance);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 71d97573a667..9aa9590c5373 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -120,8 +120,7 @@ static int megasas_register_aen(struct megasas_instance *instance,
u32 seq_num, u32 class_locale_word);
static void megasas_get_pd_info(struct megasas_instance *instance,
struct scsi_device *sdev);
-static int megasas_get_target_prop(struct megasas_instance *instance,
- struct scsi_device *sdev);
+
/*
* PCI ID table for all supported controllers
*/
@@ -1794,7 +1793,8 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
*
* Returns void
*/
-void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
+void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
+ bool is_target_prop)
{
u16 pd_index = 0, ld;
u32 device_id;
@@ -1834,6 +1834,22 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
mr_device_priv_data->is_tm_capable =
pd_sync->seq[pd_index].capability.tmCapable;
}
+
+ if (is_target_prop && instance->tgt_prop->reset_tmo) {
+ /*
+ * If FW provides a target reset timeout value, driver will use
+ * it. If not set, fallback to default values.
+ */
+ mr_device_priv_data->target_reset_tmo =
+ min_t(u8, instance->max_reset_tmo,
+ instance->tgt_prop->reset_tmo);
+ mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
+ } else {
+ mr_device_priv_data->target_reset_tmo =
+ MEGASAS_DEFAULT_TM_TIMEOUT;
+ mr_device_priv_data->task_abort_tmo =
+ MEGASAS_DEFAULT_TM_TIMEOUT;
+ }
}
/*
@@ -1967,10 +1983,10 @@ static int megasas_slave_configure(struct scsi_device *sdev)
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
megasas_set_static_target_properties(sdev, is_target_prop);
- mutex_unlock(&instance->reset_mutex);
-
/* This sdev property may change post OCR */
- megasas_set_dynamic_target_properties(sdev);
+ megasas_set_dynamic_target_properties(sdev, is_target_prop);
+
+ mutex_unlock(&instance->reset_mutex);
return 0;
}
@@ -2818,7 +2834,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
"SCSI command pointer: (%p)\t SCSI host state: %d\t"
" SCSI host busy: %d\t FW outstanding: %d\n",
scmd, scmd->device->host->shost_state,
- atomic_read((atomic_t *)&scmd->device->host->host_busy),
+ scsi_host_busy(scmd->device->host),
atomic_read(&instance->fw_outstanding));
/*
@@ -4720,6 +4736,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
ci->adapter_operations4.support_pd_map_target_id;
instance->support_nvme_passthru =
ci->adapter_operations4.support_nvme_passthru;
+ instance->task_abort_tmo = ci->TaskAbortTO;
+ instance->max_reset_tmo = ci->MaxResetTO;
/*Check whether controller is iMR or MR */
instance->is_imr = (ci->memory_size ? 0 : 1);
@@ -4738,6 +4756,10 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
instance->secure_jbod_support ? "Yes" : "No");
dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
instance->support_nvme_passthru ? "Yes" : "No");
+ dev_info(&instance->pdev->dev,
+ "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
+ instance->task_abort_tmo, instance->max_reset_tmo);
+
break;
case DCMD_TIMEOUT:
@@ -4755,14 +4777,15 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
__func__, __LINE__);
break;
}
+ break;
case DCMD_FAILED:
megaraid_sas_kill_hba(instance);
break;
}
- megasas_return_cmd(instance, cmd);
-
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -5831,7 +5854,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
*
* Returns 0 on success non-zero on failure.
*/
-static int
+int
megasas_get_target_prop(struct megasas_instance *instance,
struct scsi_device *sdev)
{
@@ -6789,6 +6812,9 @@ megasas_resume(struct pci_dev *pdev)
goto fail_init_mfi;
}
+ if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
+ goto fail_init_mfi;
+
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
@@ -6842,12 +6868,12 @@ megasas_wait_for_adapter_operational(struct megasas_instance *instance)
{
int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
int i;
-
- if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
- return 1;
+ u8 adp_state;
for (i = 0; i < wait_time; i++) {
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
+ adp_state = atomic_read(&instance->adprecovery);
+ if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
+ (adp_state == MEGASAS_HW_CRITICAL_ERROR))
break;
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
@@ -6856,9 +6882,10 @@ megasas_wait_for_adapter_operational(struct megasas_instance *instance)
msleep(1000);
}
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
- __func__);
+ if (adp_state != MEGASAS_HBA_OPERATIONAL) {
+ dev_info(&instance->pdev->dev,
+ "%s HBA failed to become operational, adp_state %d\n",
+ __func__, adp_state);
return 1;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 94c23ad51179..c7f95bace353 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -4108,7 +4108,8 @@ megasas_tm_response_code(struct megasas_instance *instance,
*/
static int
megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
- uint channel, uint id, u16 smid_task, u8 type)
+ uint channel, uint id, u16 smid_task, u8 type,
+ struct MR_PRIV_DEVICE *mr_device_priv_data)
{
struct MR_TASK_MANAGE_REQUEST *mr_request;
struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
@@ -4119,6 +4120,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
struct fusion_context *fusion = NULL;
struct megasas_cmd_fusion *scsi_lookup;
int rc;
+ int timeout = MEGASAS_DEFAULT_TM_TIMEOUT;
struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
fusion = instance->ctrl_context;
@@ -4170,7 +4172,16 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
init_completion(&cmd_fusion->done);
megasas_fire_cmd_fusion(instance, req_desc);
- timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ);
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ timeout = mr_device_priv_data->task_abort_tmo;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ timeout = mr_device_priv_data->target_reset_tmo;
+ break;
+ }
+
+ timeleft = wait_for_completion_timeout(&cmd_fusion->done, timeout * HZ);
if (!timeleft) {
dev_err(&instance->pdev->dev,
@@ -4363,7 +4374,8 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
mr_device_priv_data->tm_busy = 1;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, smid,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ mr_device_priv_data);
mr_device_priv_data->tm_busy = 0;
mutex_unlock(&instance->reset_mutex);
@@ -4435,7 +4447,8 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
mr_device_priv_data->tm_busy = 1;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ mr_device_priv_data);
mr_device_priv_data->tm_busy = 0;
mutex_unlock(&instance->reset_mutex);
out:
@@ -4490,6 +4503,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
u32 io_timeout_in_crash_mode = 0;
struct scsi_cmnd *scmd_local = NULL;
struct scsi_device *sdev;
+ int ret_target_prop = DCMD_FAILED;
+ bool is_target_prop = false;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
@@ -4661,9 +4676,6 @@ transition_to_ready:
megasas_setup_jbod_map(instance);
- shost_for_each_device(sdev, shost)
- megasas_set_dynamic_target_properties(sdev);
-
/* reset stream detection array */
if (instance->adapter_type == VENTURA_SERIES) {
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
@@ -4677,6 +4689,16 @@ transition_to_ready:
clear_bit(MEGASAS_FUSION_IN_RESET,
&instance->reset_flags);
instance->instancet->enable_intr(instance);
+
+ shost_for_each_device(sdev, shost) {
+ if ((instance->tgt_prop) &&
+ (instance->nvme_page_size))
+ ret_target_prop = megasas_get_target_prop(instance, sdev);
+
+ is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
+ megasas_set_dynamic_target_properties(sdev, is_target_prop);
+ }
+
atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
dev_info(&instance->pdev->dev, "Interrupts are enabled and"
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 1753e42826dd..82e01dbe90af 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -594,9 +594,9 @@ static void mesh_done(struct mesh_state *ms, int start_next)
ms->current_req = NULL;
tp->current_req = NULL;
if (cmd) {
- cmd->result = (ms->stat << 16) + cmd->SCp.Status;
+ cmd->result = (ms->stat << 16) | cmd->SCp.Status;
if (ms->stat == DID_OK)
- cmd->result += (cmd->SCp.Message << 8);
+ cmd->result |= cmd->SCp.Message << 8;
if (DEBUG_TARGET(cmd)) {
printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
cmd->result, ms->data_ptr, scsi_bufflen(cmd));
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 569392d0d4c9..59d7844ee022 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -102,8 +102,39 @@ static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
/**
+ * mpt3sas_base_check_cmd_timeout - Function
+ * to check timeout and command termination due
+ * to Host reset.
+ *
+ * @ioc: per adapter object.
+ * @status: Status of issued command.
+ * @mpi_request:mf request pointer.
+ * @sz: size of buffer.
+ *
+ * @Returns - 1/0 Reset to be done or Not
+ */
+u8
+mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
+ u8 status, void *mpi_request, int sz)
+{
+ u8 issue_reset = 0;
+
+ if (!(status & MPT3_CMD_RESET))
+ issue_reset = 1;
+
+ pr_err(MPT3SAS_FMT "Command %s\n", ioc->name,
+ ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout"));
+ _debug_dump_mf(mpi_request, sz);
+
+ return issue_reset;
+}
+
+/**
* _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
+ * @val: ?
+ * @kp: ?
*
+ * Return: ?
*/
static int
_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
@@ -132,8 +163,6 @@ module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
* @ioc: per adapter object
* @reply: reply message frame(lower 32bit addr)
* @index: System request message index.
- *
- * @Returns - Nothing
*/
static void
_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
@@ -156,7 +185,7 @@ _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
* _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
* to system/BAR0 region.
*
- * @dst_iomem: Pointer to the destinaltion location in BAR0 space.
+ * @dst_iomem: Pointer to the destination location in BAR0 space.
* @src: Pointer to the Source data.
* @size: Size of data to be copied.
*/
@@ -197,7 +226,7 @@ _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
* @smid: system request message index
* @sge_chain_count: Scatter gather chain count.
*
- * @Return: chain address.
+ * Return: the chain address.
*/
static inline void __iomem*
_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -223,7 +252,7 @@ _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @smid: system request message index
* @sge_chain_count: Scatter gather chain count.
*
- * @Return - Physical chain address.
+ * Return: Physical chain address.
*/
static inline phys_addr_t
_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -248,7 +277,7 @@ _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @ioc: per adapter object
* @smid: system request message index
*
- * @Returns - Pointer to buffer location in BAR0.
+ * Return: Pointer to buffer location in BAR0.
*/
static void __iomem *
@@ -270,7 +299,7 @@ _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * @Returns - Pointer to buffer location in BAR0.
+ * Return: Pointer to buffer location in BAR0.
*/
static phys_addr_t
_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -291,7 +320,7 @@ _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @chain_buffer_dma: Chain buffer dma address.
*
- * @Returns - Pointer to chain buffer. Or Null on Failure.
+ * Return: Pointer to chain buffer. Or Null on Failure.
*/
static void *
_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
@@ -322,8 +351,6 @@ _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object.
* @mpi_request: mf request pointer.
* @smid: system request message index.
- *
- * @Returns: Nothing.
*/
static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
void *mpi_request, u16 smid)
@@ -496,8 +523,9 @@ eob_clone_chain:
* mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
* @arg: input argument, used to derive ioc
*
- * Return 0 if controller is removed from pci subsystem.
- * Return -1 for other case.
+ * Return:
+ * 0 if controller is removed from pci subsystem.
+ * -1 for other case.
*/
static int mpt3sas_remove_dead_ioc_func(void *arg)
{
@@ -517,9 +545,8 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
/**
* _base_fault_reset_work - workq handling ioc fault conditions
* @work: input argument, used to derive ioc
- * Context: sleep.
*
- * Return nothing.
+ * Context: sleep.
*/
static void
_base_fault_reset_work(struct work_struct *work)
@@ -610,9 +637,8 @@ _base_fault_reset_work(struct work_struct *work)
/**
* mpt3sas_base_start_watchdog - start the fault_reset_work_q
* @ioc: per adapter object
- * Context: sleep.
*
- * Return nothing.
+ * Context: sleep.
*/
void
mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
@@ -633,7 +659,7 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->fault_reset_work_q) {
pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
ioc->name, __func__, __LINE__);
- return;
+ return;
}
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->fault_reset_work_q)
@@ -646,9 +672,8 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
* @ioc: per adapter object
- * Context: sleep.
*
- * Return nothing.
+ * Context: sleep.
*/
void
mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
@@ -671,8 +696,6 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_base_fault_info - verbose translation of firmware FAULT code
* @ioc: per adapter object
* @fault_code: fault code
- *
- * Return nothing.
*/
void
mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
@@ -721,8 +744,6 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
* @request_hdr: request mf
- *
- * Return nothing.
*/
static void
_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
@@ -945,8 +966,6 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
* _base_display_event_data - verbose translation of firmware asyn events
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
- *
- * Return nothing.
*/
static void
_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
@@ -1065,8 +1084,6 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
* _base_sas_log_info - verbose translation of firmware log info
* @ioc: per adapter object
* @log_info: log info
- *
- * Return nothing.
*/
static void
_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
@@ -1124,8 +1141,6 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
* @smid: system request message index
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
- *
- * Return nothing.
*/
static void
_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1167,8 +1182,9 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return:
+ * 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1200,8 +1216,9 @@ mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return:
+ * 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
@@ -1279,7 +1296,7 @@ _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Return callback index.
+ * Return: callback index.
*/
static u8
_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -1312,8 +1329,6 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
*
* Disabling ResetIRQ, Reply and Doorbell Interrupts
- *
- * Return nothing.
*/
static void
_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
@@ -1332,8 +1347,6 @@ _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Enabling only Reply Interrupts
- *
- * Return nothing.
*/
static void
_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
@@ -1358,9 +1371,8 @@ union reply_descriptor {
* _base_interrupt - MPT adapter (IOC) specific interrupt handler.
* @irq: irq number (not used)
* @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
- * @r: pt_regs pointer (not used)
*
- * Return IRQ_HANDLE if processed, else IRQ_NONE.
+ * Return: IRQ_HANDLED if processed, else IRQ_NONE.
*/
static irqreturn_t
_base_interrupt(int irq, void *bus_id)
@@ -1535,6 +1547,7 @@ _base_interrupt(int irq, void *bus_id)
* _base_is_controller_msix_enabled - is controller support muli-reply queues
* @ioc: per adapter object
*
+ * Return: Whether or not MSI/X is enabled.
*/
static inline int
_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
@@ -1549,8 +1562,6 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
* Context: non ISR conext
*
* Called when a Task Management request has completed.
- *
- * Return nothing.
*/
void
mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
@@ -1577,8 +1588,6 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_release_callback_handler - clear interrupt callback handler
* @cb_idx: callback index
- *
- * Return nothing.
*/
void
mpt3sas_base_release_callback_handler(u8 cb_idx)
@@ -1590,7 +1599,7 @@ mpt3sas_base_release_callback_handler(u8 cb_idx)
* mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
* @cb_func: callback function
*
- * Returns cb_func.
+ * Return: Index of @cb_func.
*/
u8
mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
@@ -1607,8 +1616,6 @@ mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
/**
* mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
- *
- * Return nothing.
*/
void
mpt3sas_base_initialize_callback_handler(void)
@@ -1628,8 +1635,6 @@ mpt3sas_base_initialize_callback_handler(void)
* Create a zero length scatter gather entry to insure the IOCs hardware has
* something to use if the target device goes brain dead and tries
* to send data even when none is asked for.
- *
- * Return nothing.
*/
static void
_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
@@ -1646,8 +1651,6 @@ _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
* @paddr: virtual address for SGE
* @flags_length: SGE flags and data transfer length
* @dma_addr: Physical address
- *
- * Return nothing.
*/
static void
_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
@@ -1666,8 +1669,6 @@ _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
* @paddr: virtual address for SGE
* @flags_length: SGE flags and data transfer length
* @dma_addr: Physical address
- *
- * Return nothing.
*/
static void
_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
@@ -1685,7 +1686,7 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
* @ioc: per adapter object
* @scmd: SCSI commands of the IO request
*
- * Returns chain tracker from chain_lookup table using key as
+ * Return: chain tracker from chain_lookup table using key as
* smid and smid's chain_offset.
*/
static struct chain_tracker *
@@ -1715,8 +1716,6 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
* @data_out_sz: data xfer size for WRITES
* @data_in_dma: physical address for READS
* @data_in_sz: data xfer size for READS
- *
- * Return nothing.
*/
static void
_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
@@ -1777,7 +1776,7 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
* describes the first data memory segment, and PRP2 contains a pointer to a PRP
* list located elsewhere in memory to describe the remaining data memory
* segments. The PRP list will be contiguous.
-
+ *
* The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
* consists of a list of PRP entries to describe a number of noncontigous
* physical memory segments as a single memory buffer, just as a SGL does. Note
@@ -1820,8 +1819,6 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
* @data_out_sz: data xfer size for WRITES
* @data_in_dma: physical address for READS
* @data_in_sz: data xfer size for READS
- *
- * Returns nothing.
*/
static void
_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -1836,6 +1833,8 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u32 offset, entry_len;
u32 page_mask_result, page_mask;
size_t length;
+ struct mpt3sas_nvme_cmd *nvme_cmd =
+ (void *)nvme_encap_request->NVMe_Command;
/*
* Not all commands require a data transfer. If no data, just return
@@ -1843,15 +1842,8 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
*/
if (!data_in_sz && !data_out_sz)
return;
- /*
- * Set pointers to PRP1 and PRP2, which are in the NVMe command.
- * PRP1 is located at a 24 byte offset from the start of the NVMe
- * command. Then set the current PRP entry pointer to PRP1.
- */
- prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
- NVME_CMD_PRP1_OFFSET);
- prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
- NVME_CMD_PRP2_OFFSET);
+ prp1_entry = &nvme_cmd->prp1;
+ prp2_entry = &nvme_cmd->prp2;
prp_entry = prp1_entry;
/*
* For the PRP entries, use the specially allocated buffer of
@@ -1992,7 +1984,7 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @smid: msg Index
* @sge_count: scatter gather element count.
*
- * Returns: true: PRPs are built
+ * Return: true: PRPs are built
* false: IEEE SGLs needs to be built
*/
static void
@@ -2127,11 +2119,9 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
{
u32 data_length = 0;
- struct scatterlist *sg_scmd;
bool build_prp = true;
data_length = scsi_bufflen(scmd);
- sg_scmd = scsi_sglist(scmd);
/* If Datalenth is <= 16K and number of SGE’s entries are <= 2
* we built IEEE SGL
@@ -2155,18 +2145,16 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
* @scmd: scsi command
* @pcie_device: points to the PCIe device's info
*
- * Returns 0 if native SGL was built, 1 if no SGL was built
+ * Return: 0 if native SGL was built, 1 if no SGL was built
*/
static int
_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
struct _pcie_device *pcie_device)
{
- struct scatterlist *sg_scmd;
int sges_left;
/* Get the SG list pointer and info. */
- sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device,
@@ -2201,8 +2189,6 @@ out:
* @chain_offset: number of 128 byte elements from start of segment
* @length: data transfer length
* @dma_addr: Physical address
- *
- * Return nothing.
*/
static void
_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
@@ -2224,8 +2210,6 @@ _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
* Create a zero length scatter gather entry to insure the IOCs hardware has
* something to use if the target device goes brain dead and tries
* to send data even when none is asked for.
- *
- * Return nothing.
*/
static void
_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
@@ -2249,7 +2233,7 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
* The main routine that builds scatter gather table from a given
* scsi request sent via the .queuecommand main handler.
*
- * Returns 0 success, anything else error
+ * Return: 0 success, anything else error
*/
static int
_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
@@ -2394,7 +2378,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
* The main routine that builds scatter gather table from a given
* scsi request sent via the .queuecommand main handler.
*
- * Returns 0 success, anything else error
+ * Return: 0 success, anything else error
*/
static int
_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
@@ -2525,8 +2509,6 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
* @data_out_sz: data xfer size for WRITES
* @data_in_dma: physical address for READS
* @data_in_sz: data xfer size for READS
- *
- * Return nothing.
*/
static void
_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
@@ -2576,7 +2558,7 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
* @ioc: per adapter object
* @pdev: PCI device struct
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
@@ -2924,10 +2906,9 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->combined_reply_queue) {
- kfree(ioc->replyPostRegisterIndex);
- ioc->replyPostRegisterIndex = NULL;
- }
+ kfree(ioc->replyPostRegisterIndex);
+ ioc->replyPostRegisterIndex = NULL;
+
if (ioc->chip_phys) {
iounmap(ioc->chip);
@@ -2945,7 +2926,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
@@ -3034,7 +3015,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
/* Use the Combined reply queue feature only for SAS3 C0 & higher
* revision HBAs and also only when reply queue count is greater than 8
*/
- if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
+ if (ioc->combined_reply_queue) {
/* Determine the Supplemental Reply Post Host Index Registers
* Addresse. Supplemental Reply Post Host Index Registers
* starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
@@ -3058,8 +3039,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
(i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
}
- } else
- ioc->combined_reply_queue = 0;
+ }
if (ioc->is_warpdrive) {
ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
@@ -3097,7 +3077,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @smid: system request message index(smid zero is invalid)
*
- * Returns virt pointer to message frame.
+ * Return: virt pointer to message frame.
*/
void *
mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3110,7 +3090,7 @@ mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns virt pointer to sense buffer.
+ * Return: virt pointer to sense buffer.
*/
void *
mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3123,7 +3103,7 @@ mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns phys pointer to the low 32bit address of the sense buffer.
+ * Return: phys pointer to the low 32bit address of the sense buffer.
*/
__le32
mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3137,7 +3117,7 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns virt pointer to a PCIe SGL.
+ * Return: virt pointer to a PCIe SGL.
*/
void *
mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3150,7 +3130,7 @@ mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns phys pointer to the address of the PCIe buffer.
+ * Return: phys pointer to the address of the PCIe buffer.
*/
dma_addr_t
mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3184,7 +3164,7 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @cb_idx: callback index
*
- * Returns smid (zero is invalid)
+ * Return: smid (zero is invalid)
*/
u16
mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
@@ -3216,7 +3196,7 @@ mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
* @cb_idx: callback index
* @scmd: pointer to scsi command object
*
- * Returns smid (zero is invalid)
+ * Return: smid (zero is invalid)
*/
u16
mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
@@ -3239,7 +3219,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
* @ioc: per adapter object
* @cb_idx: callback index
*
- * Returns smid (zero is invalid)
+ * Return: smid (zero is invalid)
*/
u16
mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
@@ -3270,7 +3250,7 @@ _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
* See _wait_for_commands_to_complete() call with regards to this code.
*/
if (ioc->shost_recovery && ioc->pending_io_count) {
- ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
+ ioc->pending_io_count = scsi_host_busy(ioc->shost);
if (ioc->pending_io_count == 0)
wake_up(&ioc->reset_wq);
}
@@ -3284,14 +3264,13 @@ void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
st->cb_idx = 0xFF;
st->direct_io = 0;
atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
+ st->smid = 0;
}
/**
* mpt3sas_base_free_smid - put smid back on free_list
* @ioc: per adapter object
* @smid: system request message index
- *
- * Return nothing.
*/
void
mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3343,18 +3322,16 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
spinlock_t *writeq_lock)
{
unsigned long flags;
- __u64 data_out = b;
spin_lock_irqsave(writeq_lock, flags);
- writel((u32)(data_out), addr);
- writel((u32)(data_out >> 32), (addr + 4));
+ __raw_writel((u32)(b), addr);
+ __raw_writel((u32)(b >> 32), (addr + 4));
mmiowb();
spin_unlock_irqrestore(writeq_lock, flags);
}
/**
* _base_writeq - 64 bit write to MMIO
- * @ioc: per adapter object
* @b: data payload
* @addr: address in MMIO space
* @writeq_lock: spin lock
@@ -3367,7 +3344,8 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
static inline void
_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
{
- writeq(b, addr);
+ __raw_writeq(b, addr);
+ mmiowb();
}
#else
static inline void
@@ -3382,8 +3360,6 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
- *
- * Return nothing.
*/
static void
_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
@@ -3412,8 +3388,6 @@ _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
- *
- * Return nothing.
*/
static void
_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
@@ -3436,8 +3410,6 @@ _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
- *
- * Return nothing.
*/
void
mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -3461,7 +3433,6 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @ioc: per adapter object
* @smid: system request message index
* @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
- * Return nothing.
*/
void
mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -3472,11 +3443,8 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u64 *request;
if (ioc->is_mcpu_endpoint) {
- MPI2RequestHeader_t *request_hdr;
-
__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
- request_hdr = (MPI2RequestHeader_t *)mfp;
/* TBD 256 is offset within sys register. */
mpi_req_iomem = (void __force *)ioc->chip
+ MPI_FRAME_START_OFFSET
@@ -3507,8 +3475,6 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* firmware
* @ioc: per adapter object
* @smid: system request message index
- *
- * Return nothing.
*/
void
mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3530,8 +3496,6 @@ mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* mpt3sas_base_put_smid_default - Default, primarily used for config pages
* @ioc: per adapter object
* @smid: system request message index
- *
- * Return nothing.
*/
void
mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3539,13 +3503,10 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
Mpi2RequestDescriptorUnion_t descriptor;
void *mpi_req_iomem;
u64 *request;
- MPI2RequestHeader_t *request_hdr;
if (ioc->is_mcpu_endpoint) {
__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
- request_hdr = (MPI2RequestHeader_t *)mfp;
-
_clone_sg_entries(ioc, (void *) mfp, smid);
/* TBD 256 is offset within sys register */
mpi_req_iomem = (void __force *)ioc->chip +
@@ -3571,8 +3532,6 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
/**
* _base_display_OEMs_branding - Display branding string
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
@@ -3833,7 +3792,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
* version from FW Image Header.
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
@@ -3930,8 +3889,6 @@ out:
/**
* _base_display_ioc_capabilities - Disply IOC's capabilities.
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
@@ -4047,8 +4004,6 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
* @device_missing_delay: amount of time till device is reported missing
* @io_missing_delay: interval IO is returned when there is a missing device
*
- * Return nothing.
- *
* Passed on the command line, this function will modify the device missing
* delay, as well as the io missing delay. This should be called at driver
* load time.
@@ -4131,11 +4086,10 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
out:
kfree(sas_iounit_pg1);
}
+
/**
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
@@ -4207,8 +4161,6 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Free memory allocated during encloure add.
- *
- * Return nothing.
*/
void
mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
@@ -4228,8 +4180,6 @@ mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Free memory allocated from _base_allocate_memory_pools.
- *
- * Return nothing.
*/
static void
_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
@@ -4350,9 +4300,8 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* @reply_pool_start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
*
- * Returns 1 if reply queues in a set have a same upper 32bits
- * in their base memory address,
- * else 0
+ * Return: 1 if reply queues in a set have a same upper 32bits in their base
+ * memory address, else 0.
*/
static int
@@ -4373,7 +4322,7 @@ is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
* _base_allocate_memory_pools - allocate start of day memory pools
* @ioc: per adapter object
*
- * Returns 0 success, anything else error
+ * Return: 0 success, anything else error.
*/
static int
_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
@@ -4975,7 +4924,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* @ioc: Pointer to MPT_ADAPTER structure
* @cooked: Request raw or cooked IOC state
*
- * Returns all IOC Doorbell register bits if cooked==0, else just the
+ * Return: all IOC Doorbell register bits if cooked==0, else just the
* Doorbell bits in MPI_IOC_STATE_MASK.
*/
u32
@@ -4990,10 +4939,11 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
/**
* _base_wait_on_iocstate - waiting on a particular ioc state
+ * @ioc: ?
* @ioc_state: controller state { READY, OPERATIONAL, or RESET }
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
@@ -5021,9 +4971,8 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
* _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
* a write to the doorbell)
* @ioc: per adapter object
- * @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
* Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
*/
@@ -5090,7 +5039,7 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
* @ioc: per adapter object
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
* Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
* doorbell.
@@ -5137,8 +5086,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
* @ioc: per adapter object
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
- *
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
@@ -5173,7 +5121,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
* @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
@@ -5222,7 +5170,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
* @reply: pointer to reply payload
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
@@ -5268,7 +5216,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
/* send message 32-bits at a time */
for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
- writel((u32)(request[i]), &ioc->chip->Doorbell);
+ writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
if ((_base_wait_for_doorbell_ack(ioc, 5)))
failed = 1;
}
@@ -5289,7 +5237,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
/* read the first two 16-bits, it gives the total length of the reply */
- reply[0] = (u16)(readl(&ioc->chip->Doorbell)
+ reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -5298,7 +5246,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
ioc->name, __LINE__);
return -EFAULT;
}
- reply[1] = (u16)(readl(&ioc->chip->Doorbell)
+ reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
@@ -5312,7 +5260,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
if (i >= reply_bytes/2) /* overflow case */
readl(&ioc->chip->Doorbell);
else
- reply[i] = (u16)(readl(&ioc->chip->Doorbell)
+ reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
}
@@ -5346,7 +5294,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
* identifying information about the device, in addition allows the host to
* remove IOC resources associated with the device.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
@@ -5355,7 +5303,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
{
u16 smid;
u32 ioc_state;
- bool issue_reset = false;
+ u8 issue_reset = 0;
int rc;
void *request;
u16 wait_state_count;
@@ -5414,12 +5362,10 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
ioc->ioc_link_reset_in_progress)
ioc->ioc_link_reset_in_progress = 0;
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2SasIoUnitControlRequest_t)/4);
- if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
- issue_reset = true;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->base_cmds.status, mpi_request,
+ sizeof(Mpi2SasIoUnitControlRequest_t)/4);
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -5449,7 +5395,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
* The SCSI Enclosure Processor request message causes the IOC to
* communicate with SES devices to control LED status signals.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
@@ -5457,7 +5403,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
{
u16 smid;
u32 ioc_state;
- bool issue_reset = false;
+ u8 issue_reset = 0;
int rc;
void *request;
u16 wait_state_count;
@@ -5510,12 +5456,10 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2SepRequest_t)/4);
- if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
- issue_reset = false;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->base_cmds.status, mpi_request,
+ sizeof(Mpi2SepRequest_t)/4);
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -5539,8 +5483,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
/**
* _base_get_port_facts - obtain port facts reply and save in ioc
* @ioc: per adapter object
+ * @port: ?
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
@@ -5583,7 +5528,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
* @ioc: per adapter object
* @timeout:
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
@@ -5637,7 +5582,7 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
* _base_get_ioc_facts - obtain ioc facts reply and save in ioc
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
@@ -5681,6 +5626,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
facts->WhoInit = mpi_reply.WhoInit;
facts->NumberOfPorts = mpi_reply.NumberOfPorts;
facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
+ if (ioc->msix_enable && (facts->MaxMSIxVectors <=
+ MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
+ ioc->combined_reply_queue = 0;
facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
facts->MaxReplyDescriptorPostQueueDepth =
le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
@@ -5736,7 +5684,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
* _base_send_ioc_init - send ioc_init to firmware
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
@@ -5837,8 +5785,8 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -5883,7 +5831,7 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* _base_send_port_enable - send port_enable(discovery stuff) to firmware
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
@@ -5950,7 +5898,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
@@ -5990,7 +5938,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
* Decide whether to wait on discovery to complete. Used to either
* locate boot device, or report volumes ahead of physical devices.
*
- * Returns 1 for wait, 0 for don't wait
+ * Return: 1 for wait, 0 for don't wait.
*/
static int
_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
@@ -6062,7 +6010,7 @@ _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
* _base_event_notification - send event notification
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
@@ -6119,7 +6067,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_validate_event_type - validating event types
* @ioc: per adapter object
- * @event: firmware event
+ * @event_type: firmware event
*
* This will turn on firmware event notification when application
* ask for that event. We don't mask events that are already enabled.
@@ -6157,7 +6105,7 @@ mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
* _base_diag_reset - the "big hammer" start of day reset
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
@@ -6271,7 +6219,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @type: FORCE_BIG_HAMMER or SOFT_RESET
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
@@ -6340,7 +6288,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
* _base_make_ioc_operational - put controller in OPERATIONAL state
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
@@ -6513,8 +6461,6 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_free_resources - free resources controller resources
* @ioc: per adapter object
- *
- * Return nothing.
*/
void
mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
@@ -6540,7 +6486,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_base_attach - attach controller instance
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
@@ -6797,8 +6743,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_detach - remove controller instance
* @ioc: per adapter object
- *
- * Return nothing.
*/
void
mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
@@ -6830,68 +6774,72 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_reset_handler - reset callback handler (for base)
+ * _base_pre_reset_handler - pre reset handler
* @ioc: per adapter object
- * @reset_phase: phase
- *
- * The handler for doing any required cleanup or initialization.
- *
- * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
- * MPT3_IOC_DONE_RESET
- *
- * Return nothing.
*/
-static void
-_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- mpt3sas_scsih_reset_handler(ioc, reset_phase);
- mpt3sas_ctl_reset_handler(ioc, reset_phase);
- switch (reset_phase) {
- case MPT3_IOC_PRE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
- break;
- case MPT3_IOC_AFTER_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
- if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
- ioc->transport_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
- complete(&ioc->transport_cmds.done);
- }
- if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
- ioc->base_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
- complete(&ioc->base_cmds.done);
- }
- if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
- ioc->port_enable_failed = 1;
- ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
- if (ioc->is_driver_loading) {
- ioc->start_scan_failed =
- MPI2_IOCSTATUS_INTERNAL_ERROR;
- ioc->start_scan = 0;
- ioc->port_enable_cmds.status =
- MPT3_CMD_NOT_USED;
- } else
- complete(&ioc->port_enable_cmds.done);
- }
- if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
- ioc->config_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
- ioc->config_cmds.smid = USHRT_MAX;
- complete(&ioc->config_cmds.done);
+ mpt3sas_scsih_pre_reset_handler(ioc);
+ mpt3sas_ctl_pre_reset_handler(ioc);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+}
+
+/**
+ * _base_after_reset_handler - after reset handler
+ * @ioc: per adapter object
+ */
+static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ mpt3sas_scsih_after_reset_handler(ioc);
+ mpt3sas_ctl_after_reset_handler(ioc);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
+ ioc->transport_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
+ complete(&ioc->transport_cmds.done);
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
+ ioc->base_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
+ complete(&ioc->base_cmds.done);
+ }
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ ioc->port_enable_failed = 1;
+ ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
+ if (ioc->is_driver_loading) {
+ ioc->start_scan_failed =
+ MPI2_IOCSTATUS_INTERNAL_ERROR;
+ ioc->start_scan = 0;
+ ioc->port_enable_cmds.status =
+ MPT3_CMD_NOT_USED;
+ } else {
+ complete(&ioc->port_enable_cmds.done);
}
- break;
- case MPT3_IOC_DONE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
- break;
+ }
+ if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
+ ioc->config_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
}
}
/**
+ * _base_reset_done_handler - reset done handler
+ * @ioc: per adapter object
+ */
+static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ mpt3sas_scsih_reset_done_handler(ioc);
+ mpt3sas_ctl_reset_done_handler(ioc);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+}
+
+/**
* mpt3sas_wait_for_commands_to_complete - reset controller
* @ioc: Pointer to MPT_ADAPTER structure
*
@@ -6910,7 +6858,7 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
return;
/* pending command count */
- ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
+ ioc->pending_io_count = scsi_host_busy(ioc->shost);
if (!ioc->pending_io_count)
return;
@@ -6924,7 +6872,7 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
* @ioc: Pointer to MPT_ADAPTER structure
* @type: FORCE_BIG_HAMMER or SOFT_RESET
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
@@ -6949,14 +6897,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_halt_firmware(ioc);
/* wait for an active reset in progress to complete */
- if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
- do {
- ssleep(1);
- } while (ioc->shost_recovery == 1);
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
- return ioc->ioc_reset_in_progress_status;
- }
+ mutex_lock(&ioc->reset_in_progress_mutex);
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 1;
@@ -6971,13 +6912,13 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
is_fault = 1;
}
- _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
+ _base_pre_reset_handler(ioc);
mpt3sas_wait_for_commands_to_complete(ioc);
_base_mask_interrupts(ioc);
r = _base_make_ioc_ready(ioc, type);
if (r)
goto out;
- _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
+ _base_after_reset_handler(ioc);
/* If this hard reset is called while port enable is active, then
* there is no reason to call make_ioc_operational
@@ -6998,14 +6939,13 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
r = _base_make_ioc_operational(ioc);
if (!r)
- _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
+ _base_reset_done_handler(ioc);
out:
dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- ioc->ioc_reset_in_progress_status = r;
ioc->shost_recovery = 0;
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
ioc->ioc_reset_count++;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index f02974c0be4a..96dc15e90bd8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -74,8 +74,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "25.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 25
+#define MPT3SAS_DRIVER_VERSION "26.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 26
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -143,21 +143,17 @@
* NVMe defines
*/
#define NVME_PRP_SIZE 8 /* PRP size */
-#define NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */
-#define NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */
#define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */
#define NVME_TASK_ABORT_MIN_TIMEOUT 6
#define NVME_TASK_ABORT_MAX_TIMEOUT 60
#define NVME_TASK_MNGT_CUSTOM_MASK (0x0010)
#define NVME_PRP_PAGE_SIZE 4096 /* Page size */
-
-/*
- * reset phases
- */
-#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */
-#define MPT3_IOC_AFTER_RESET 2 /* just after host reset */
-#define MPT3_IOC_DONE_RESET 3 /* links re-initialized */
+struct mpt3sas_nvme_cmd {
+ u8 rsvd[24];
+ __le64 prp1;
+ __le64 prp2;
+};
/*
* logging format
@@ -323,6 +319,7 @@
* There are twelve Supplemental Reply Post Host Index Registers
* and each register is at offset 0x10 bytes from the previous one.
*/
+#define MAX_COMBINED_MSIX_VECTORS(gen35) ((gen35 == 1) ? 16 : 8)
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
@@ -1162,7 +1159,6 @@ struct MPT3SAS_ADAPTER {
struct mutex reset_in_progress_mutex;
spinlock_t ioc_reset_in_progress_lock;
u8 ioc_link_reset_in_progress;
- u8 ioc_reset_in_progress_status;
u8 ignore_loginfos;
u8 remove_host;
@@ -1482,13 +1478,17 @@ int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
void
mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
+u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
+ u8 status, void *mpi_request, int sz);
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
-void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method);
@@ -1615,7 +1615,9 @@ void mpt3sas_ctl_init(ushort hbas_to_enumerate);
void mpt3sas_ctl_exit(ushort hbas_to_enumerate);
u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
-void mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc,
u8 msix_index, u32 reply);
void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index e87c76a832f6..d29a2dcc7d0e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -198,7 +198,7 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
*
* A wrapper for obtaining dma-able memory for config page request.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
@@ -230,7 +230,7 @@ _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
*
* A wrapper to free dma-able memory when using _config_alloc_config_dma_memory.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static void
_config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
@@ -251,8 +251,8 @@ _config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
*
* The callback handler when using _config_request.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -295,7 +295,7 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
*
* The callback index is set inside `ioc->config_cb_idx.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
@@ -406,10 +406,9 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2ConfigRequest_t)/4);
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->config_cmds.status, mpi_request,
+ sizeof(Mpi2ConfigRequest_t)/4);
retry_count++;
if (ioc->config_cmds.smid == smid)
mpt3sas_base_free_smid(ioc, smid);
@@ -519,7 +518,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -556,7 +555,7 @@ mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
* @sz: size of buffer passed in config_page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
@@ -593,7 +592,7 @@ mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
@@ -630,7 +629,7 @@ mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
@@ -667,7 +666,7 @@ mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
@@ -708,7 +707,7 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc,
@@ -744,7 +743,7 @@ mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -780,7 +779,7 @@ mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -816,7 +815,7 @@ mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -852,7 +851,7 @@ mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -889,7 +888,7 @@ mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
* @sz: size of buffer passed in config_page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc,
@@ -924,7 +923,7 @@ mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc,
@@ -960,7 +959,7 @@ mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc,
@@ -998,7 +997,7 @@ mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc,
* @handle: device handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1039,7 +1038,7 @@ mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
* @handle: device handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -1080,7 +1079,7 @@ mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
* @handle: device handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1121,7 +1120,7 @@ out:
* @handle: device handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
@@ -1159,7 +1158,7 @@ out:
* @num_phys: pointer returned with the number of phys
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys)
@@ -1209,7 +1208,7 @@ mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys)
* Calling function should call config_get_number_hba_phys prior to
* this function, so enough memory is allocated for config_page.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1250,7 +1249,7 @@ mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
* Calling function should call config_get_number_hba_phys prior to
* this function, so enough memory is allocated for config_page.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -1291,7 +1290,7 @@ mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
* Calling function should call config_get_number_hba_phys prior to
* this function, so enough memory is allocated for config_page.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -1333,7 +1332,7 @@ mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
* @handle: expander handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1373,7 +1372,7 @@ mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @handle: expander handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1416,7 +1415,7 @@ mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @handle: expander handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1455,7 +1454,7 @@ mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @phy_number: phy number
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1495,7 +1494,7 @@ mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @phy_number: phy number
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1536,7 +1535,7 @@ mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @handle: volume handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -1574,7 +1573,7 @@ mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
* @num_pds: returns pds count
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -1626,7 +1625,7 @@ mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
* @sz: size of buffer passed in config_page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1665,7 +1664,7 @@ mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
* @form_specific: specific to the form
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1704,7 +1703,7 @@ mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @volume_handle: volume handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
@@ -1794,7 +1793,7 @@ mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
* @wwid: volume wwid
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 3269ef43f07e..5e8c059ce2c9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -253,8 +253,8 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
*
* The callback handler when using ioc->ctl_cb_idx.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -317,7 +317,7 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* The bitmask in ioc->event_type[] indicates which events should be
* be saved in the driver event_log. This bitmask is set by application.
*
- * Returns 1 when event should be captured, or zero means no match.
+ * Return: 1 when event should be captured, or zero means no match.
*/
static int
_ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
@@ -339,8 +339,6 @@ _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
* mpt3sas_ctl_add_to_event_log - add event
* @ioc: per adapter object
* @mpi_reply: reply message frame
- *
- * Return nothing.
*/
void
mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
@@ -395,8 +393,8 @@ mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
* This function merely adds a new work task into ioc->firmware_event_thread.
* The tasks are worked from _firmware_event_work in user context.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
@@ -412,12 +410,12 @@ mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
/**
* _ctl_verify_adapter - validates ioc_number passed from application
- * @ioc: per adapter object
+ * @ioc_number: ?
* @iocpp: The ioc pointer is returned in this.
* @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
* MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
*
- * Return (-1) means error, else ioc_number.
+ * Return: (-1) means error, else ioc_number.
*/
static int
_ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
@@ -460,65 +458,74 @@ out:
/**
* mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
* @ioc: per adapter object
- * @reset_phase: phase
*
* The handler for doing any required cleanup or initialization.
- *
- * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
- * MPT3_IOC_DONE_RESET
*/
-void
-mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
int i;
u8 issue_reset;
- switch (reset_phase) {
- case MPT3_IOC_PRE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
- for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
- if (!(ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_REGISTERED))
- continue;
- if ((ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_RELEASED))
- continue;
- mpt3sas_send_diag_release(ioc, i, &issue_reset);
- }
- break;
- case MPT3_IOC_AFTER_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ mpt3sas_send_diag_release(ioc, i, &issue_reset);
+ }
+}
+
+/**
+ * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * @ioc: per adapter object
+ *
+ * The handler for doing any required cleanup or initialization.
+ */
+void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
- if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
- ioc->ctl_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
- complete(&ioc->ctl_cmds.done);
- }
- break;
- case MPT3_IOC_DONE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
+ ioc->ctl_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
+ complete(&ioc->ctl_cmds.done);
+ }
+}
+
+/**
+ * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * @ioc: per adapter object
+ *
+ * The handler for doing any required cleanup or initialization.
+ */
+void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
- for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
- if (!(ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_REGISTERED))
- continue;
- if ((ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_RELEASED))
- continue;
- ioc->diag_buffer_status[i] |=
- MPT3_DIAG_BUFFER_IS_DIAG_RESET;
- }
- break;
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ ioc->diag_buffer_status[i] |=
+ MPT3_DIAG_BUFFER_IS_DIAG_RESET;
}
}
/**
* _ctl_fasync -
- * @fd -
- * @filep -
- * @mode -
+ * @fd: ?
+ * @filep: ?
+ * @mode: ?
*
* Called when application request fasyn callback handler.
*/
@@ -530,8 +537,8 @@ _ctl_fasync(int fd, struct file *filep, int mode)
/**
* _ctl_poll -
- * @file -
- * @wait -
+ * @filep: ?
+ * @wait: ?
*
*/
static __poll_t
@@ -556,10 +563,10 @@ _ctl_poll(struct file *filep, poll_table *wait)
/**
* _ctl_set_task_mid - assign an active smid to tm request
* @ioc: per adapter object
- * @karg - (struct mpt3_ioctl_command)
- * @tm_request - pointer to mf from user space
+ * @karg: (struct mpt3_ioctl_command)
+ * @tm_request: pointer to mf from user space
*
- * Returns 0 when an smid if found, else fail.
+ * Return: 0 when an smid if found, else fail.
* during failure, the reply frame is filled.
*/
static int
@@ -634,8 +641,8 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
/**
* _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
* @ioc: per adapter object
- * @karg - (struct mpt3_ioctl_command)
- * @mf - pointer to mf in user space
+ * @karg: (struct mpt3_ioctl_command)
+ * @mf: pointer to mf in user space
*/
static long
_ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
@@ -970,6 +977,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
/* drop to default case for posting the request */
}
+ /* fall through */
default:
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
@@ -995,11 +1003,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->ignore_loginfos = 0;
}
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
- __func__);
- _debug_dump_mf(mpi_request, karg.data_sge_offset);
- if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ karg.data_sge_offset);
goto issue_host_reset;
}
@@ -1114,7 +1121,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
/**
* _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1168,7 +1175,7 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1199,7 +1206,7 @@ _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1237,7 +1244,7 @@ _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1281,7 +1288,7 @@ _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_do_reset - main handler for MPT3HARDRESET opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1419,7 +1426,7 @@ _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
/**
* _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1621,12 +1628,10 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
- __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2DiagBufferPostRequest_t)/4);
- if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
goto issue_host_reset;
}
@@ -1719,7 +1724,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
/**
* _ctl_diag_register - application register with driver
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*
* This will allow the driver to setup any required buffers that will be
* needed by firmware to communicate with the driver.
@@ -1743,7 +1748,7 @@ _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_diag_unregister - application unregister with driver
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*
* This will allow the driver to cleanup any memory allocated for diag
* messages and to free up any resources.
@@ -1816,7 +1821,7 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_diag_query - query relevant info associated with diag buffers
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*
* The application will send only buffer_type and unique_id. Driver will
* inspect unique_id first, if valid, fill in all the info. If unique_id is
@@ -1903,8 +1908,8 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* mpt3sas_send_diag_release - Diag Release Message
* @ioc: per adapter object
- * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
- * @issue_reset - specifies whether host reset is required.
+ * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @issue_reset: specifies whether host reset is required.
*
*/
int
@@ -1968,12 +1973,9 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
- __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2DiagReleaseRequest_t)/4);
- if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
- *issue_reset = 1;
+ *issue_reset = mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagReleaseRequest_t)/4);
rc = -EFAULT;
goto out;
}
@@ -2009,7 +2011,8 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
/**
* _ctl_diag_release - request to send Diag Release Message to firmware
- * @arg - user space buffer containing ioctl content
+ * @ioc: ?
+ * @arg: user space buffer containing ioctl content
*
* This allows ownership of the specified buffer to returned to the driver,
* allowing an application to read the buffer without fear that firmware is
@@ -2098,7 +2101,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_diag_read_buffer - request for copy of the diag buffer
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -2235,12 +2238,10 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
- __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2DiagBufferPostRequest_t)/4);
- if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
goto issue_host_reset;
}
@@ -2284,8 +2285,8 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
* @ioc: per adapter object
- * @cmd - ioctl opcode
- * @arg - (struct mpt3_ioctl_command32)
+ * @cmd: ioctl opcode
+ * @arg: (struct mpt3_ioctl_command32)
*
* MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
*/
@@ -2328,10 +2329,10 @@ _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
/**
* _ctl_ioctl_main - main ioctl entry point
- * @file - (struct file)
- * @cmd - ioctl opcode
- * @arg - user space data buffer
- * @compat - handles 32 bit applications in 64bit os
+ * @file: (struct file)
+ * @cmd: ioctl opcode
+ * @arg: user space data buffer
+ * @compat: handles 32 bit applications in 64bit os
* @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
* MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
*/
@@ -2462,9 +2463,9 @@ out_unlock_pciaccess:
/**
* _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
- * @file - (struct file)
- * @cmd - ioctl opcode
- * @arg -
+ * @file: (struct file)
+ * @cmd: ioctl opcode
+ * @arg: ?
*/
static long
_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -2482,9 +2483,9 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/**
* _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked)
- * @file - (struct file)
- * @cmd - ioctl opcode
- * @arg -
+ * @file: (struct file)
+ * @cmd: ioctl opcode
+ * @arg: ?
*/
static long
_ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -2500,9 +2501,9 @@ _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_COMPAT
/**
*_ ctl_ioctl_compat - main ioctl entry point (compat)
- * @file -
- * @cmd -
- * @arg -
+ * @file: ?
+ * @cmd: ?
+ * @arg: ?
*
* This routine handles 32 bit applications in 64bit os.
*/
@@ -2518,9 +2519,9 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
/**
*_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
- * @file -
- * @cmd -
- * @arg -
+ * @file: ?
+ * @cmd: ?
+ * @arg: ?
*
* This routine handles 32 bit applications in 64bit os.
*/
@@ -2537,8 +2538,9 @@ _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
/* scsi host attributes */
/**
* _ctl_version_fw_show - firmware version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2559,8 +2561,9 @@ static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
/**
* _ctl_version_bios_show - bios version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2583,8 +2586,9 @@ static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
/**
* _ctl_version_mpi_show - MPI (message passing interface) version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2602,8 +2606,9 @@ static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
/**
* _ctl_version_product_show - product name
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2620,8 +2625,9 @@ static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
/**
* _ctl_version_nvdata_persistent_show - ndvata persistent version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2640,8 +2646,9 @@ static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
/**
* _ctl_version_nvdata_default_show - nvdata default version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2660,8 +2667,9 @@ static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
/**
* _ctl_board_name_show - board name
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2678,8 +2686,9 @@ static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
/**
* _ctl_board_assembly_show - board assembly name
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2696,8 +2705,9 @@ static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
/**
* _ctl_board_tracer_show - board tracer number
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2714,8 +2724,9 @@ static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
/**
* _ctl_io_delay_show - io missing delay
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is for firmware implemention for deboucing device
* removal events.
@@ -2735,8 +2746,9 @@ static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
/**
* _ctl_device_delay_show - device missing delay
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is for firmware implemention for deboucing device
* removal events.
@@ -2756,8 +2768,9 @@ static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
/**
* _ctl_fw_queue_depth_show - global credits
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is firmware queue depth limit
*
@@ -2776,8 +2789,9 @@ static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
/**
* _ctl_sas_address_show - sas address
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is the controller sas address
*
@@ -2799,8 +2813,9 @@ static DEVICE_ATTR(host_sas_address, S_IRUGO,
/**
* _ctl_logging_level_show - logging level
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -2834,8 +2849,9 @@ static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
/**
* _ctl_fwfault_debug_show - show/store fwfault_debug
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* mpt3sas_fwfault_debug is command line option
* A sysfs 'read/write' shost attribute.
@@ -2870,8 +2886,9 @@ static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
/**
* _ctl_ioc_reset_count_show - ioc reset count
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is firmware queue depth limit
*
@@ -2890,8 +2907,9 @@ static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
/**
* _ctl_ioc_reply_queue_count_show - number of reply queues
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is number of reply queues
*
@@ -2918,8 +2936,9 @@ static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
/**
* _ctl_BRM_status_show - Backup Rail Monitor Status
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is number of reply queues
*
@@ -3004,8 +3023,9 @@ struct DIAG_BUFFER_START {
/**
* _ctl_host_trace_buffer_size_show - host buffer size (trace only)
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -3049,8 +3069,9 @@ static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
/**
* _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*
@@ -3114,8 +3135,9 @@ static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
/**
* _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*
@@ -3200,8 +3222,9 @@ static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
/**
* _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3224,8 +3247,10 @@ _ctl_diag_trigger_master_show(struct device *cdev,
/**
* _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
+ * @count: ?
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3255,8 +3280,9 @@ static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
/**
* _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3278,8 +3304,10 @@ _ctl_diag_trigger_event_show(struct device *cdev,
/**
* _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
+ * @count: ?
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3309,8 +3337,9 @@ static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
/**
* _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3332,8 +3361,10 @@ _ctl_diag_trigger_scsi_show(struct device *cdev,
/**
* _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
+ * @count: ?
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3362,8 +3393,9 @@ static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
/**
* _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3385,8 +3417,10 @@ _ctl_diag_trigger_mpi_show(struct device *cdev,
/**
* _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
+ * @count: ?
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3450,8 +3484,9 @@ struct device_attribute *mpt3sas_host_attrs[] = {
/**
* _ctl_device_sas_address_show - sas address
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @dev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is the sas address for the target
*
@@ -3471,8 +3506,9 @@ static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
/**
* _ctl_device_handle_show - device handle
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @dev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is the firmware assigned device handle
*
@@ -3492,8 +3528,9 @@ static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
/**
* _ctl_device_ncq_io_prio_show - send prioritized io commands to device
- * @dev - pointer to embedded device
- * @buf - the buffer returned
+ * @dev: pointer to embedded device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' sdev attribute, only works with SATA
*/
@@ -3573,7 +3610,7 @@ static struct miscdevice gen2_ctl_dev = {
/**
* mpt3sas_ctl_init - main entry point for ctl.
- *
+ * @hbas_to_enumerate: ?
*/
void
mpt3sas_ctl_init(ushort hbas_to_enumerate)
@@ -3601,7 +3638,7 @@ mpt3sas_ctl_init(ushort hbas_to_enumerate)
/**
* mpt3sas_ctl_exit - exit point for ctl
- *
+ * @hbas_to_enumerate: ?
*/
void
mpt3sas_ctl_exit(ushort hbas_to_enumerate)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b8d131a455d0..53133cfd420f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -284,6 +284,8 @@ struct _scsi_io_transfer {
/**
* _scsih_set_debug_level - global setting of ioc->logging_level.
+ * @val: ?
+ * @kp: ?
*
* Note: The logging levels are defined in mpt3sas_debug.h.
*/
@@ -311,7 +313,7 @@ module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
* @sas_address: sas address
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static inline int
_scsih_srch_boot_sas_address(u64 sas_address,
@@ -325,7 +327,7 @@ _scsih_srch_boot_sas_address(u64 sas_address,
* @device_name: device name specified in INDENTIFY fram
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static inline int
_scsih_srch_boot_device_name(u64 device_name,
@@ -340,7 +342,7 @@ _scsih_srch_boot_device_name(u64 device_name,
* @slot_number: slot number
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
@@ -356,11 +358,11 @@ _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
* @sas_address: sas address
* @device_name: device name specified in INDENTIFY fram
* @enclosure_logical_id: enclosure logical id
- * @slot_number: slot number
+ * @slot: slot number
* @form: specifies boot device form
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static int
_scsih_is_boot_device(u64 sas_address, u64 device_name,
@@ -398,10 +400,11 @@ _scsih_is_boot_device(u64 sas_address, u64 device_name,
/**
* _scsih_get_sas_address - set the sas_address for given device handle
+ * @ioc: ?
* @handle: device handle
* @sas_address: sas address
*
- * Returns 0 success, non-zero when failure
+ * Return: 0 success, non-zero when failure
*/
static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -710,8 +713,6 @@ mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @sas_device: per sas device object
* @sdev: scsi device struct
* @starget: scsi target struct
- *
- * Returns nothing.
*/
static void
_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
@@ -806,8 +807,6 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
* _scsih_device_remove_by_handle - removing device object by handle
* @ioc: per adapter object
* @handle: device handle
- *
- * Return nothing.
*/
static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -835,8 +834,6 @@ _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* mpt3sas_device_remove_by_sas_address - removing device object by sas address
* @ioc: per adapter object
* @sas_address: device sas_address
- *
- * Return nothing.
*/
void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
@@ -1109,8 +1106,6 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
* _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
* @ioc: per adapter object
* @handle: device handle
- *
- * Return nothing.
*/
static void
_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -1273,7 +1268,7 @@ mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/**
* _scsih_raid_device_find_by_wwid - raid device search
* @ioc: per adapter object
- * @handle: sas device handle (assigned by firmware)
+ * @wwid: ?
* Context: Calling function should acquire ioc->raid_device_lock
*
* This searches for raid_device based on wwid, then return raid_device
@@ -1418,8 +1413,6 @@ mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
* Context: This function will acquire ioc->sas_node_lock.
*
* Adding new object to the ioc->sas_expander_list.
- *
- * Return nothing.
*/
static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
@@ -1437,7 +1430,7 @@ _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
* @device_info: bitfield providing information about the device.
* Context: none
*
- * Returns 1 if end device.
+ * Return: 1 if end device.
*/
static int
_scsih_is_end_device(u32 device_info)
@@ -1456,7 +1449,7 @@ _scsih_is_end_device(u32 device_info)
* @device_info: bitfield providing information about the device.
* Context: none
*
- * Returns 1 if nvme device.
+ * Return: 1 if nvme device.
*/
static int
_scsih_is_nvme_device(u32 device_info)
@@ -1473,7 +1466,7 @@ _scsih_is_nvme_device(u32 device_info)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns the smid stored scmd pointer.
+ * Return: the smid stored scmd pointer.
* Then will dereference the stored scmd pointer.
*/
struct scsi_cmnd *
@@ -1489,7 +1482,7 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
scmd = scsi_host_find_tag(ioc->shost, unique_tag);
if (scmd) {
st = scsi_cmd_priv(scmd);
- if (st->cb_idx == 0xFF)
+ if (st->cb_idx == 0xFF || st->smid == 0)
scmd = NULL;
}
}
@@ -1501,7 +1494,7 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @sdev: scsi device struct
* @qdepth: requested queue depth
*
- * Returns queue depth.
+ * Return: queue depth.
*/
static int
scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
@@ -1549,7 +1542,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
* scsih_target_alloc - target add routine
* @starget: scsi target struct
*
- * Returns 0 if ok. Any other return is assumed to be an error and
+ * Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
@@ -1640,8 +1633,6 @@ scsih_target_alloc(struct scsi_target *starget)
/**
* scsih_target_destroy - target destroy routine
* @starget: scsi target struct
- *
- * Returns nothing.
*/
static void
scsih_target_destroy(struct scsi_target *starget)
@@ -1653,7 +1644,6 @@ scsih_target_destroy(struct scsi_target *starget)
struct _raid_device *raid_device;
struct _pcie_device *pcie_device;
unsigned long flags;
- struct sas_rphy *rphy;
sas_target_priv_data = starget->hostdata;
if (!sas_target_priv_data)
@@ -1693,7 +1683,6 @@ scsih_target_destroy(struct scsi_target *starget)
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- rphy = dev_to_rphy(starget->dev.parent);
sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
if (sas_device && (sas_device->starget == starget) &&
(sas_device->id == starget->id) &&
@@ -1720,7 +1709,7 @@ scsih_target_destroy(struct scsi_target *starget)
* scsih_slave_alloc - device add routine
* @sdev: scsi device struct
*
- * Returns 0 if ok. Any other return is assumed to be an error and
+ * Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
@@ -1800,8 +1789,6 @@ scsih_slave_alloc(struct scsi_device *sdev)
/**
* scsih_slave_destroy - device destroy routine
* @sdev: scsi device struct
- *
- * Returns nothing.
*/
static void
scsih_slave_destroy(struct scsi_device *sdev)
@@ -1907,7 +1894,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
/**
* scsih_is_raid - return boolean indicating device is raid volume
- * @dev the device struct object
+ * @dev: the device struct object
*/
static int
scsih_is_raid(struct device *dev)
@@ -1930,7 +1917,7 @@ scsih_is_nvme(struct device *dev)
/**
* scsih_get_resync - get raid volume resync percent complete
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void
scsih_get_resync(struct device *dev)
@@ -1991,7 +1978,7 @@ scsih_get_resync(struct device *dev)
/**
* scsih_get_state - get raid volume level
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void
scsih_get_state(struct device *dev)
@@ -2057,6 +2044,7 @@ scsih_get_state(struct device *dev)
/**
* _scsih_set_level - set raid level
+ * @ioc: ?
* @sdev: scsi device struct
* @volume_type: volume type
*/
@@ -2098,9 +2086,9 @@ _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_get_volume_capabilities - volume capabilities
* @ioc: per adapter object
- * @sas_device: the raid_device object
+ * @raid_device: the raid_device object
*
- * Returns 0 for success, else 1
+ * Return: 0 for success, else 1
*/
static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
@@ -2192,7 +2180,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
* scsih_slave_configure - device configure routine.
* @sdev: scsi device struct
*
- * Returns 0 if ok. Any other return is assumed to be an error and
+ * Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
@@ -2256,7 +2244,7 @@ scsih_slave_configure(struct scsi_device *sdev)
ds = "SSP";
} else {
qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
- if (raid_device->device_info &
+ if (raid_device->device_info &
MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
ds = "SATA";
else
@@ -2365,13 +2353,14 @@ scsih_slave_configure(struct scsi_device *sdev)
"connector name( %s)\n", ds,
pcie_device->enclosure_level,
pcie_device->connector_name);
- pcie_device_put(pcie_device);
- spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
- scsih_change_queue_depth(sdev, qdepth);
if (pcie_device->nvme_mdts)
blk_queue_max_hw_sectors(sdev->request_queue,
pcie_device->nvme_mdts/512);
+
+ pcie_device_put(pcie_device);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ scsih_change_queue_depth(sdev, qdepth);
/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
** merged and can eliminate holes created during merging
** operation.
@@ -2450,8 +2439,6 @@ scsih_slave_configure(struct scsi_device *sdev)
* params[0] number of heads (max 255)
* params[1] number of sectors (max 63)
* params[2] number of cylinders
- *
- * Return nothing.
*/
static int
scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
@@ -2493,8 +2480,6 @@ scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
* _scsih_response_code - translation of device response code
* @ioc: per adapter object
* @response_code: response code returned by the device
- *
- * Return nothing.
*/
static void
_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
@@ -2544,8 +2529,8 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
*
* The callback handler when using scsih_issue_tm.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
@@ -2640,7 +2625,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* The callback index is set inside `ioc->tm_cb_idx`.
* The caller is responsible to check for outstanding commands.
*
- * Return SUCCESS or FAILED.
+ * Return: SUCCESS or FAILED.
*/
int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
@@ -2708,11 +2693,9 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2SCSITaskManagementRequest_t)/4);
- if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
+ if (mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->tm_cmds.status, mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
rc = mpt3sas_base_hard_reset_handler(ioc,
FORCE_BIG_HAMMER);
rc = (!rc) ? SUCCESS : FAILED;
@@ -2846,7 +2829,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
* scsih_abort - eh threads main abort routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
scsih_abort(struct scsi_cmnd *scmd)
@@ -2914,7 +2897,7 @@ scsih_abort(struct scsi_cmnd *scmd)
* scsih_dev_reset - eh threads main device reset routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
scsih_dev_reset(struct scsi_cmnd *scmd)
@@ -2992,7 +2975,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
* scsih_target_reset - eh threads main target reset routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
scsih_target_reset(struct scsi_cmnd *scmd)
@@ -3069,7 +3052,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
* scsih_host_reset - eh threads main host reset routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
scsih_host_reset(struct scsi_cmnd *scmd)
@@ -3105,8 +3088,6 @@ out:
*
* This adds the firmware event object into link list, then queues it up to
* be processed from user context.
- *
- * Return nothing.
*/
static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
@@ -3133,8 +3114,6 @@ _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
* Context: This function will acquire ioc->fw_event_lock.
*
* If the fw_event is on the fw_event_list, remove it and do a put.
- *
- * Return nothing.
*/
static void
_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
@@ -3155,8 +3134,6 @@ _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
* mpt3sas_send_trigger_data_event - send event for processing trigger data
* @ioc: per adapter object
* @event_data: trigger event data
- *
- * Return nothing.
*/
void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
@@ -3181,8 +3158,6 @@ mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_error_recovery_delete_devices - remove devices not responding
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -3203,8 +3178,6 @@ _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_port_enable_complete - port enable completed (fake event)
* @ioc: per adapter object
- *
- * Return nothing.
*/
void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
@@ -3242,8 +3215,6 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
*
* Walk the firmware event queue, either killing timers, or waiting
* for outstanding events to complete
- *
- * Return nothing.
*/
static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
@@ -3369,7 +3340,7 @@ _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
/**
* _scsih_ublock_io_device - prepare device to be deleted
* @ioc: per adapter object
- * @sas_addr: sas address
+ * @sas_address: sas address
*
* unblock then put device in offline state
*/
@@ -3395,7 +3366,6 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
/**
* _scsih_block_io_all_device - set the device state to SDEV_BLOCK
* @ioc: per adapter object
- * @handle: device handle
*
* During device pull we need to appropriately set the sdev state.
*/
@@ -3730,8 +3700,8 @@ out:
* handshake protocol with controller firmware.
* It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -3822,8 +3792,8 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* This code is part of the code to initiate the device removal
* handshake protocol with controller firmware.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -3909,8 +3879,8 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @reply: reply message frame(lower 32bit addr)
* Context: interrupt time.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4004,19 +3974,19 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
u16 smid, u16 handle)
- {
- Mpi2SasIoUnitControlRequest_t *mpi_request;
- u32 ioc_state;
- int i = smid - ioc->internal_smid;
- unsigned long flags;
+{
+ Mpi2SasIoUnitControlRequest_t *mpi_request;
+ u32 ioc_state;
+ int i = smid - ioc->internal_smid;
+ unsigned long flags;
- if (ioc->remove_host) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host has been removed\n",
__func__, ioc->name));
- return;
- } else if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ return;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host in pci error recovery\n",
__func__, ioc->name));
return;
@@ -4059,8 +4029,8 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
* This will check delayed internal messages list, and process the
* next request.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -4098,8 +4068,8 @@ mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* This will check delayed target reset list, and feed the
* next reqeust.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -4139,8 +4109,6 @@ _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* This handles the case where driver receives multiple expander
* add and delete events in a single shot. When there is a delete event
* the routine will void any pending add events waiting in the event queue.
- *
- * Return nothing.
*/
static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4222,8 +4190,6 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
* or device add and delete events in a single shot. When there
* is a delete event the routine will void any pending add
* events waiting in the event queue.
- *
- * Return nothing.
*/
static void
_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4348,8 +4314,6 @@ _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
* volume has been deleted or removed. When the target reset is sent
* to volume, the PD target resets need to be queued to start upon
* completion of the volume target reset.
- *
- * Return nothing.
*/
static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4433,8 +4397,6 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
* This will handle the case when the cable connected to entire volume is
* pulled. We will take care of setting the deleted flag so normal IO will
* not be sent.
- *
- * Return nothing.
*/
static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4456,8 +4418,6 @@ _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @event_data: the temp threshold event data
* Context: interrupt time.
- *
- * Return nothing.
*/
static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4496,8 +4456,6 @@ static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
*
* The flushing out of all pending scmd commands following host reset,
* where all IO is dropped to the floor.
- *
- * Return nothing.
*/
static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
@@ -4533,8 +4491,6 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
* @mpi_request: pointer to the SCSI_IO request message frame
*
* Supporting protection 1 and 3.
- *
- * Returns nothing
*/
static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
@@ -4568,7 +4524,7 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
- cpu_to_be32(scsi_prot_ref_tag(scmd));
+ cpu_to_be32(t10_pi_ref_tag(scmd->request));
break;
case SCSI_PROT_DIF_TYPE3:
@@ -4593,8 +4549,6 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
* _scsih_eedp_error_handling - return sense code for EEDP errors
* @scmd: pointer to scsi command object
* @ioc_status: ioc status
- *
- * Returns nothing
*/
static void
_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
@@ -4623,12 +4577,12 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
/**
* scsih_qcmd - main scsi request entry point
+ * @shost: SCSI host pointer
* @scmd: pointer to scsi command object
- * @done: function pointer to be invoked on completion
*
* The callback index is set inside `ioc->scsi_io_cb_idx`.
*
- * Returns 0 on success. If there's a failure, return either:
+ * Return: 0 on success. If there's a failure, return either:
* SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
* SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
*/
@@ -4674,19 +4628,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
}
- /* host recovery or link resets sent via IOCTLs */
- if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
+ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
+ /* host recovery or link resets sent via IOCTLs */
return SCSI_MLQUEUE_HOST_BUSY;
-
- /* device has been deleted */
- else if (sas_target_priv_data->deleted) {
+ } else if (sas_target_priv_data->deleted) {
+ /* device has been deleted */
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
- /* device busy with task management */
} else if (sas_target_priv_data->tm_busy ||
- sas_device_priv_data->block)
+ sas_device_priv_data->block) {
+ /* device busy with task management */
return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
/*
* Bug work around for firmware SATL handling. The loop
@@ -4791,8 +4745,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
* _scsih_normalize_sense - normalize descriptor and fixed format sense data
* @sense_buffer: sense data returned by target
* @data: normalized skey/asc/ascq
- *
- * Return nothing.
*/
static void
_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
@@ -4815,12 +4767,11 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
* @ioc: per adapter object
* @scmd: pointer to scsi command object
* @mpi_reply: reply mf payload returned from firmware
+ * @smid: ?
*
* scsi_status - SCSI Status code returned from target device
* scsi_state - state info associated with SCSI_IO determined by ioc
* ioc_status - ioc supplied status info
- *
- * Return nothing.
*/
static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
@@ -5044,8 +4995,6 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
* @ioc: per adapter object
* @handle: device handle
* Context: process
- *
- * Return nothing.
*/
static void
_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5089,8 +5038,6 @@ out:
* @ioc: per adapter object
* @sas_device: sas device whose PFA LED has to turned off
* Context: process
- *
- * Return nothing.
*/
static void
_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
@@ -5128,8 +5075,6 @@ _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @handle: device handle
* Context: interrupt.
- *
- * Return nothing.
*/
static void
_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5151,8 +5096,6 @@ _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @ioc: per adapter object
* @handle: device handle
* Context: interrupt.
- *
- * Return nothing.
*/
static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5228,8 +5171,8 @@ out_unlock:
*
* Callback handler when using _scsih_qcmd.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
@@ -5416,6 +5359,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
scsi_set_resid(scmd, 0);
+ /* fall through */
case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
case MPI2_IOCSTATUS_SUCCESS:
scmd->result = (DID_OK << 16) | scsi_status;
@@ -5468,8 +5412,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* During port enable, fw will send topology events for every device. Its
* possible that the handles may change from the previous setting, so this
* code keeping handles updating if changed.
- *
- * Return nothing.
*/
static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
@@ -5523,8 +5465,6 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Creating host side data object, stored in ioc->sas_hba
- *
- * Return nothing.
*/
static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
@@ -5672,7 +5612,7 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
*
* Creating expander object, stored in ioc->sas_expander_list.
*
- * Return 0 for success, else error.
+ * Return: 0 for success, else error.
*/
static int
_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5812,7 +5752,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
_scsih_expander_node_add(ioc, sas_expander);
- return 0;
+ return 0;
out_fail:
@@ -5827,8 +5767,6 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* mpt3sas_expander_remove - removing expander object
* @ioc: per adapter object
* @sas_address: expander sas_address
- *
- * Return nothing.
*/
void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
@@ -5857,8 +5795,8 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
* Callback handler when sending internal generated SCSI_IO.
* The callback index passed is `ioc->scsih_cb_idx`
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
@@ -5892,9 +5830,9 @@ _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* @ioc: per adapter object
* @sas_address: sas address
* @handle: sas device handle
- * @access_flags: errors returned during discovery of the device
+ * @access_status: errors returned during discovery of the device
*
- * Return 0 for success, else failure
+ * Return: 0 for success, else failure
*/
static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
@@ -5956,10 +5894,8 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
* @ioc: per adapter object
* @parent_sas_address: sas address of parent expander or sas host
* @handle: attached device handle
- * @phy_numberv: phy number
+ * @phy_number: phy number
* @link_rate: new link rate
- *
- * Returns nothing.
*/
static void
_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
@@ -6076,7 +6012,7 @@ out_unlock:
*
* Creating end device object, stored in ioc->sas_device_list.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
@@ -6208,9 +6144,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
/**
* _scsih_remove_device - removing sas device object
* @ioc: per adapter object
- * @sas_device_delete: the sas_device object
- *
- * Return nothing.
+ * @sas_device: the sas_device object
*/
static void
_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
@@ -6446,6 +6380,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (!test_bit(handle, ioc->pend_os_device_add))
break;
+ /* fall through */
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -6475,10 +6410,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_sas_device_status_change_event_debug - debug for device event
+ * @ioc: ?
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -6546,8 +6480,6 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -6608,9 +6540,9 @@ out:
* @ioc: per adapter object
* @wwid: wwid
* @handle: sas device handle
- * @access_flags: errors returned during discovery of the device
+ * @access_status: errors returned during discovery of the device
*
- * Return 0 for success, else failure
+ * Return: 0 for success, else failure
*/
static u8
_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
@@ -6695,8 +6627,6 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
* from SML and free up associated memory
* @ioc: per adapter object
* @pcie_device: the pcie_device object
- *
- * Return nothing.
*/
static void
_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
@@ -6770,8 +6700,6 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
* _scsih_pcie_check_device - checking device responsiveness
* @ioc: per adapter object
* @handle: attached device handle
- *
- * Returns nothing.
*/
static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -6863,7 +6791,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
*
* Creating end device object, stored in ioc->pcie_device_list.
*
- * Return 1 means queue the event later, 0 means complete the event
+ * Return: 1 means queue the event later, 0 means complete the event
*/
static int
_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -6873,7 +6801,6 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
Mpi2ConfigReply_t mpi_reply;
struct _pcie_device *pcie_device;
struct _enclosure_node *enclosure_dev;
- u32 pcie_device_type;
u32 ioc_status;
u64 wwid;
@@ -6935,8 +6862,6 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
pcie_device->port_num = pcie_device_pg0.PortNum;
pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
- pcie_device_type = pcie_device->device_info &
- MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE;
pcie_device->enclosure_handle =
le16_to_cpu(pcie_device_pg0.EnclosureHandle);
@@ -7165,6 +7090,7 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
event_data->PortEntry[i].PortStatus &= 0xF0;
event_data->PortEntry[i].PortStatus |=
MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
+ /* fall through */
case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
if (ioc->shost_recovery)
break;
@@ -7190,12 +7116,10 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * _scsih_pcie_device_status_change_event_debug - debug for
- * device event
+ * _scsih_pcie_device_status_change_event_debug - debug for device event
+ * @ioc: ?
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -7262,8 +7186,6 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7314,8 +7236,6 @@ out:
* @ioc: per adapter object
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -7348,8 +7268,6 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7416,8 +7334,6 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7483,6 +7399,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
if (sas_device_priv_data->sas_target->flags &
MPT_TARGET_FLAGS_VOLUME)
continue;
+ /* skip PCIe devices */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_PCIE_DEVICE)
+ continue;
handle = sas_device_priv_data->sas_target->handle;
lun = sas_device_priv_data->lun;
@@ -7580,8 +7500,6 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7617,8 +7535,6 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7654,8 +7570,6 @@ _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7684,7 +7598,7 @@ _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
* @handle: device handle for physical disk
* @phys_disk_num: physical disk number
*
- * Return 0 for success, else failure.
+ * Return: 0 for success, else failure.
*/
static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
@@ -7736,10 +7650,10 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->scsih_cmds.status, mpi_request,
+ sizeof(Mpi2RaidActionRequest_t)/4);
rc = -EFAULT;
goto out;
}
@@ -7794,8 +7708,6 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
@@ -7852,8 +7764,6 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @handle: volume device handle
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -7887,8 +7797,6 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
@@ -7929,8 +7837,6 @@ _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
@@ -7980,8 +7886,6 @@ _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
@@ -7997,8 +7901,6 @@ _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
@@ -8050,8 +7952,6 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -8130,8 +8030,6 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -8202,8 +8100,6 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
@@ -8286,8 +8182,6 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
@@ -8372,8 +8266,6 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -8414,8 +8306,6 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
@@ -8473,8 +8363,6 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_sas_devices.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
@@ -8569,8 +8457,6 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
* _scsih_create_enclosure_list_after_reset - Free Existing list,
* And create enclosure list by scanning all Enclosure Page(0)s
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
@@ -8617,8 +8503,6 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -8661,8 +8545,6 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponding_devices.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
@@ -8736,8 +8618,6 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -8785,8 +8665,6 @@ out:
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_raid_devices.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
@@ -8842,8 +8720,6 @@ _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -8914,8 +8790,6 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_expanders.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
@@ -8968,8 +8842,6 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
@@ -9009,8 +8881,6 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
/**
* _scsih_remove_unresponding_devices - removing unresponding devices
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -9136,8 +9006,6 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_scan_for_devices_after_reset - scan for devices after host reset
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
@@ -9421,60 +9289,68 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc->name);
pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
}
+
/**
* mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
* @ioc: per adapter object
- * @reset_phase: phase
*
* The handler for doing any required cleanup or initialization.
+ */
+void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+}
+
+/**
+ * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
*
- * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
- * MPT3_IOC_DONE_RESET
- *
- * Return nothing.
+ * The handler for doing any required cleanup or initialization.
*/
void
-mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- switch (reset_phase) {
- case MPT3_IOC_PRE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
- break;
- case MPT3_IOC_AFTER_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
- if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
- ioc->scsih_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
- complete(&ioc->scsih_cmds.done);
- }
- if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
- ioc->tm_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
- complete(&ioc->tm_cmds.done);
- }
+ if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
+ ioc->scsih_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
+ complete(&ioc->scsih_cmds.done);
+ }
+ if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
+ ioc->tm_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
+ complete(&ioc->tm_cmds.done);
+ }
- memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
- memset(ioc->device_remove_in_progress, 0,
- ioc->device_remove_in_progress_sz);
- _scsih_fw_event_cleanup_queue(ioc);
- _scsih_flush_running_cmds(ioc);
- break;
- case MPT3_IOC_DONE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
+ memset(ioc->device_remove_in_progress, 0,
+ ioc->device_remove_in_progress_sz);
+ _scsih_fw_event_cleanup_queue(ioc);
+ _scsih_flush_running_cmds(ioc);
+}
+
+/**
+ * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
+ *
+ * The handler for doing any required cleanup or initialization.
+ */
+void
+mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
- if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
- !ioc->sas_hba.num_phys)) {
- _scsih_prep_device_scan(ioc);
- _scsih_create_enclosure_list_after_reset(ioc);
- _scsih_search_responding_sas_devices(ioc);
- _scsih_search_responding_pcie_devices(ioc);
- _scsih_search_responding_raid_devices(ioc);
- _scsih_search_responding_expanders(ioc);
- _scsih_error_recovery_delete_devices(ioc);
- }
- break;
+ if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
+ !ioc->sas_hba.num_phys)) {
+ _scsih_prep_device_scan(ioc);
+ _scsih_create_enclosure_list_after_reset(ioc);
+ _scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_pcie_devices(ioc);
+ _scsih_search_responding_raid_devices(ioc);
+ _scsih_search_responding_expanders(ioc);
+ _scsih_error_recovery_delete_devices(ioc);
}
}
@@ -9483,8 +9359,6 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
@@ -9519,7 +9393,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
break;
case MPT3SAS_PORT_ENABLE_COMPLETE:
ioc->start_scan = 0;
- if (missing_delay[0] != -1 && missing_delay[1] != -1)
+ if (missing_delay[0] != -1 && missing_delay[1] != -1)
mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
missing_delay[1]);
dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -9577,13 +9451,10 @@ out:
/**
* _firmware_event_work
- * @ioc: per adapter object
* @work: The fw_event_work object
* Context: user.
*
* wrappers for the work thread handling firmware events
- *
- * Return nothing.
*/
static void
@@ -9605,8 +9476,8 @@ _firmware_event_work(struct work_struct *work)
* This function merely adds a new work task into ioc->firmware_event_thread.
* The tasks are worked from _firmware_event_work in user context.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
@@ -9791,8 +9662,6 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
*
* Removing object and freeing associated memory from the
* ioc->sas_expander_list.
- *
- * Return nothing.
*/
static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
@@ -9841,8 +9710,6 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
*
* Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
* the host system is shutting down.
- *
- * Return nothing.
*/
static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
@@ -9914,7 +9781,6 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
* @pdev: PCI device struct
*
* Routine called when unloading the driver.
- * Return nothing.
*/
static void scsih_remove(struct pci_dev *pdev)
{
@@ -9996,8 +9862,6 @@ static void scsih_remove(struct pci_dev *pdev)
/**
* scsih_shutdown - routine call during system shutdown
* @pdev: PCI device struct
- *
- * Return nothing.
*/
static void
scsih_shutdown(struct pci_dev *pdev)
@@ -10220,7 +10084,7 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
*
* Get the next pcie device from pcie_device_init_list list.
*
- * Returns pcie device structure if pcie_device_init_list list is not empty
+ * Return: pcie device structure if pcie_device_init_list list is not empty
* otherwise returns NULL
*/
static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
@@ -10390,7 +10254,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
if (time >= (300 * HZ)) {
- ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
pr_info(MPT3SAS_FMT
"port enable: FAILED with timeout (timeout=300s)\n",
ioc->name);
@@ -10412,7 +10276,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
- ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
if (ioc->wait_for_discovery_to_complete) {
ioc->wait_for_discovery_to_complete = 0;
@@ -10568,7 +10432,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
* @pdev: PCI device struct
* @id: pci device id
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -10818,7 +10682,7 @@ out_add_shost_fail:
* @pdev: PCI device struct
* @state: PM state change to (usually PCI_D3)
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
scsih_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -10845,7 +10709,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
* scsih_resume - power management resume main entry point
* @pdev: PCI device struct
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
scsih_resume(struct pci_dev *pdev)
@@ -10881,8 +10745,7 @@ scsih_resume(struct pci_dev *pdev)
*
* Description: Called when a PCI error is detected.
*
- * Return value:
- * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
*/
static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
@@ -11143,7 +11006,7 @@ static struct pci_driver mpt3sas_driver = {
/**
* scsih_init - main entry point for this driver.
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
scsih_init(void)
@@ -11193,7 +11056,7 @@ scsih_init(void)
/**
* scsih_exit - exit point for this driver (when it is a module).
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static void
scsih_exit(void)
@@ -11223,7 +11086,7 @@ scsih_exit(void)
/**
* _mpt3sas_init - main entry point for this driver.
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int __init
_mpt3sas_init(void)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 3a143bb5ca72..f8cc2677c1cd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -134,7 +134,7 @@ _transport_convert_phy_link_rate(u8 link_rate)
*
* Populates sas identify info.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -226,8 +226,8 @@ _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
* Callback handler when sending internal generated transport cmds.
* The callback index passed is `ioc->transport_cb_idx`
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -287,7 +287,7 @@ struct rep_manu_reply {
*
* Fills in the sas_expander_device object when SMP port is created.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
@@ -460,8 +460,6 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
* _transport_delete_port - helper function to removing a port
* @ioc: per adapter object
* @mpt3sas_port: mpt3sas per port object
- *
- * Returns nothing.
*/
static void
_transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
@@ -489,8 +487,6 @@ _transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @mpt3sas_port: mpt3sas per port object
* @mpt3sas_phy: mpt3sas per phy object
- *
- * Returns nothing.
*/
static void
_transport_delete_phy(struct MPT3SAS_ADAPTER *ioc,
@@ -513,8 +509,6 @@ _transport_delete_phy(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @mpt3sas_port: mpt3sas per port object
* @mpt3sas_phy: mpt3sas per phy object
- *
- * Returns nothing.
*/
static void
_transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port,
@@ -538,8 +532,6 @@ _transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port,
* @sas_node: sas node object (either expander or sas host)
* @mpt3sas_phy: mpt3sas per phy object
* @sas_address: sas address of device/expander were phy needs to be added to
- *
- * Returns nothing.
*/
static void
_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
@@ -563,7 +555,7 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
return;
}
_transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy);
- return;
+ return;
}
}
@@ -573,8 +565,6 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @sas_node: sas node object (either expander or sas host)
* @mpt3sas_phy: mpt3sas per phy object
- *
- * Returns nothing.
*/
static void
_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
@@ -635,7 +625,7 @@ _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
*
* Adding new port object to the sas_node->sas_port_list.
*
- * Returns mpt3sas_port.
+ * Return: mpt3sas_port.
*/
struct _sas_port *
mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -794,8 +784,6 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
*
* Removing object and freeing associated memory from the
* ioc->sas_port_list.
- *
- * Return nothing.
*/
void
mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
@@ -860,7 +848,7 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
* @phy_pg0: sas phy page 0
* @parent_dev: parent device class object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
@@ -928,7 +916,7 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
* @expander_pg1: expander page 1
* @parent_dev: parent device class object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
@@ -995,10 +983,8 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
* @ioc: per adapter object
* @sas_address: sas address of parent expander or sas host
* @handle: attached device handle
- * @phy_numberv: phy number
+ * @phy_number: phy number
* @link_rate: new link rate
- *
- * Returns nothing.
*/
void
mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
@@ -1090,7 +1076,7 @@ struct phy_error_log_reply {
* @ioc: per adapter object
* @phy: The sas phy object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
*/
static int
@@ -1262,7 +1248,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
* _transport_get_linkerrors - return phy counters for both hba and expanders
* @phy: The sas phy object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
*/
static int
@@ -1311,10 +1297,11 @@ _transport_get_linkerrors(struct sas_phy *phy)
/**
* _transport_get_enclosure_identifier -
- * @phy: The sas phy object
+ * @rphy: The sas phy object
+ * @identifier: ?
*
* Obtain the enclosure logical id for an expander.
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
@@ -1342,9 +1329,9 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
/**
* _transport_get_bay_identifier -
- * @phy: The sas phy object
+ * @rphy: The sas phy object
*
- * Returns the slot id for a device that resides inside an enclosure.
+ * Return: the slot id for a device that resides inside an enclosure.
*/
static int
_transport_get_bay_identifier(struct sas_rphy *rphy)
@@ -1400,8 +1387,9 @@ struct phy_control_reply {
* _transport_expander_phy_control - expander phy control
* @ioc: per adapter object
* @phy: The sas phy object
+ * @phy_operation: ?
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
*/
static int
@@ -1571,7 +1559,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
* @phy: The sas phy object
* @hard_reset:
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_phy_reset(struct sas_phy *phy, int hard_reset)
@@ -1623,7 +1611,7 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
* @enable: enable phy when true
*
* Only support sas_host direct attached phys.
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_phy_enable(struct sas_phy *phy, int enable)
@@ -1761,7 +1749,8 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
* @rates: rates defined in sas_phy_linkrates
*
* Only support sas_host direct attached phys.
- * Returns 0 for success, non-zero for failure.
+ *
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
@@ -1904,9 +1893,9 @@ _transport_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf,
/**
* _transport_smp_handler - transport portal for smp passthru
+ * @job: ?
* @shost: shost object
* @rphy: sas transport rphy object
- * @req:
*
* This used primarily for smp_utils.
* Example:
@@ -1936,12 +1925,12 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
rc = -EFAULT;
- goto out;
+ goto job_done;
}
rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
if (rc)
- goto out;
+ goto job_done;
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
@@ -2066,6 +2055,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
mutex_unlock(&ioc->transport_cmds.mutex);
+job_done:
bsg_job_done(job, rc, reslen);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index b60fd7a3b571..cae7c1eaef34 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -62,7 +62,7 @@
/**
* _mpt3sas_raise_sigio - notifiy app
* @ioc: per adapter object
- * @event_data:
+ * @event_data: ?
*/
static void
_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
@@ -107,7 +107,7 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
/**
* mpt3sas_process_trigger_data - process the event data for the trigger
* @ioc: per adapter object
- * @event_data:
+ * @event_data: ?
*/
void
mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
@@ -209,8 +209,8 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
/**
* mpt3sas_trigger_event - Event trigger handler
* @ioc: per adapter object
- * @event:
- * @log_entry_qualifier:
+ * @event: ?
+ * @log_entry_qualifier: ?
*
*/
void
@@ -288,9 +288,9 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
/**
* mpt3sas_trigger_scsi - SCSI trigger handler
* @ioc: per adapter object
- * @sense_key:
- * @asc:
- * @ascq:
+ * @sense_key: ?
+ * @asc: ?
+ * @ascq: ?
*
*/
void
@@ -364,8 +364,8 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
/**
* mpt3sas_trigger_mpi - MPI trigger handler
* @ioc: per adapter object
- * @ioc_status:
- * @loginfo:
+ * @ioc_status: ?
+ * @loginfo: ?
*
*/
void
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index 45aa94915cbf..b4927f2b7677 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -267,9 +267,6 @@ out_error:
* @scmd: pointer to scsi command object
* @raid_device: pointer to raid device data structure
* @mpi_request: pointer to the SCSI_IO reqest message frame
- * @smid: system request message index
- *
- * Returns nothing
*/
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index dc4e801b2cef..6cd3e289ef99 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4611,7 +4611,7 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
* in order to keep it alive.
*/
if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
- cmd->result = ScsiResult(DID_RESET, 0);
+ cmd->result = DID_RESET << 16;
ncr_queue_done_cmd(np, cmd);
}
@@ -4957,7 +4957,7 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
/*
** Check condition code
*/
- cmd->result = ScsiResult(DID_OK, S_CHECK_COND);
+ cmd->result = DID_OK << 16 | S_CHECK_COND;
/*
** Copy back sense data to caller's buffer.
@@ -4978,7 +4978,7 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
/*
** Reservation Conflict condition code
*/
- cmd->result = ScsiResult(DID_OK, S_CONFLICT);
+ cmd->result = DID_OK << 16 | S_CONFLICT;
} else if ((cp->host_status == HS_COMPLETE)
&& (cp->scsi_status == S_BUSY ||
@@ -8043,7 +8043,7 @@ printk("ncr53c8xx_queue_command\n");
spin_lock_irqsave(&np->smp_lock, flags);
if ((sts = ncr_queue_command(np, cmd)) != DID_OK) {
- cmd->result = ScsiResult(sts, 0);
+ cmd->result = sts << 16;
#ifdef DEBUG_NCR53C8XX
printk("ncr53c8xx : command not queued - result=%d\n", sts);
#endif
@@ -8234,7 +8234,7 @@ static void process_waiting_list(struct ncb *np, int sts)
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts);
#endif
- wcmd->result = ScsiResult(sts, 0);
+ wcmd->result = sts << 16;
ncr_queue_done_cmd(np, wcmd);
}
}
diff --git a/drivers/scsi/nsp32_debug.c b/drivers/scsi/nsp32_debug.c
index 58806f432a16..4f1d4bf9c775 100644
--- a/drivers/scsi/nsp32_debug.c
+++ b/drivers/scsi/nsp32_debug.c
@@ -137,7 +137,7 @@ static void print_commandk (unsigned char *command)
printk("\n");
}
-static void show_command(Scsi_Cmnd *SCpnt)
+static void show_command(struct scsi_cmnd *SCpnt)
{
print_commandk(SCpnt->cmnd);
}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 90394cef0f41..0a5dd5595dd3 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3295,6 +3295,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
init_completion(&qedf->flogi_compl);
+ status = qed_ops->common->update_drv_state(qedf->cdev, true);
+ if (status)
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Failed to send drv state to MFW.\n");
+
memset(&link_params, 0, sizeof(struct qed_link_params));
link_params.link_up = true;
status = qed_ops->common->set_link(qedf->cdev, &link_params);
@@ -3343,6 +3348,7 @@ static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void __qedf_remove(struct pci_dev *pdev, int mode)
{
struct qedf_ctx *qedf;
+ int rc;
if (!pdev) {
QEDF_ERR(NULL, "pdev is NULL.\n");
@@ -3437,6 +3443,12 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
pci_set_drvdata(pdev, NULL);
}
+
+ rc = qed_ops->common->update_drv_state(qedf->cdev, false);
+ if (rc)
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Failed to send drv state to MFW.\n");
+
qed_ops->common->slowpath_stop(qedf->cdev);
qed_ops->common->remove(qedf->cdev);
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index a269da1a6c75..387dc87e4d22 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -126,22 +126,24 @@ static void init_sqe(struct iscsi_task_params *task_params,
sgl_task_params,
dif_task_params);
- if (scsi_is_slow_sgl(sgl_task_params->num_sges,
- sgl_task_params->small_mid_sge))
- num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
- else
- num_sges = min(sgl_task_params->num_sges,
- (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
- }
+ if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
+ else
+ num_sges = min(sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+ }
- SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges);
- SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
- buf_size);
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+ num_sges);
+ SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
+ buf_size);
- if (GET_FIELD(pdu_header->hdr_second_dword,
- ISCSI_CMD_HDR_TOTAL_AHS_LEN))
- SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE,
- cmd_params->extended_cdb_sge.sge_len);
+ if (GET_FIELD(pdu_header->hdr_second_dword,
+ ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ ISCSI_WQE_CDB_SIZE,
+ cmd_params->extended_cdb_sge.sge_len);
}
break;
case ISCSI_TASK_TYPE_INITIATOR_READ:
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cf274a79e77a..aa96bccb5a96 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -524,7 +524,7 @@ static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
id_tbl->max = size;
id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
- id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
+ id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
@@ -888,7 +888,7 @@ static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
ipv6_en = !!(block->generic.ctrl_flags &
NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
- snprintf(tgt->iscsi_name, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+ snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
block->target[index].target_name.byte);
tgt->ipv6_en = ipv6_en;
@@ -2273,6 +2273,7 @@ kset_free:
static void __qedi_remove(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+ int rval;
if (qedi->tmf_thread) {
flush_workqueue(qedi->tmf_thread);
@@ -2302,6 +2303,10 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
if (mode == QEDI_MODE_NORMAL)
qedi_free_iscsi_pf_param(qedi);
+ rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
+ if (rval)
+ QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n");
+
if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
qedi_ops->common->slowpath_stop(qedi->cdev);
qedi_ops->common->remove(qedi->cdev);
@@ -2576,6 +2581,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
if (qedi_setup_boot_info(qedi))
QEDI_ERR(&qedi->dbg_ctx,
"No iSCSI boot target configured\n");
+
+ rc = qedi_ops->common->update_drv_state(qedi->cdev, true);
+ if (rc)
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to send drv state to MFW\n");
+
}
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 89a4999fa631..4888b999e82f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -518,6 +518,9 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
if (unlikely(pci_channel_offline(ha->pdev)))
return 0;
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
!ha->isp_ops->write_nvram)
return 0;
@@ -570,7 +573,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
return 0;
- if (qla2x00_reset_active(vha))
+ if (qla2x00_chip_is_down(vha))
return 0;
rval = qla2x00_read_sfp_dev(vha, buf, count);
@@ -733,6 +736,15 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
int type;
port_id_t did;
+ if (!capable(CAP_SYS_ADMIN))
+ return 0;
+
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return 0;
+
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
type = simple_strtol(buf, NULL, 10);
did.b.domain = (type & 0x00ff0000) >> 16;
@@ -771,6 +783,12 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
return 0;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
if (ha->xgmac_data)
goto do_read;
@@ -825,6 +843,9 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
if (ha->dcbx_tlv)
goto do_read;
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
&ha->dcbx_tlv_dma, GFP_KERNEL);
if (!ha->dcbx_tlv) {
@@ -1036,7 +1057,7 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
vha->device_flags & DFLG_NO_CABLE)
len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
else if (atomic_read(&vha->loop_state) != LOOP_READY ||
- qla2x00_reset_active(vha))
+ qla2x00_chip_is_down(vha))
len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
else {
len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
@@ -1163,7 +1184,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return -EPERM;
- if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+ if (qla2x00_chip_is_down(vha)) {
ql_log(ql_log_warn, vha, 0x707a,
"Abort ISP active -- ignoring beacon request.\n");
return -EBUSY;
@@ -1350,7 +1371,7 @@ qla2x00_thermal_temp_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
uint16_t temp = 0;
- if (qla2x00_reset_active(vha)) {
+ if (qla2x00_chip_is_down(vha)) {
ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
goto done;
}
@@ -1381,7 +1402,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
}
- if (qla2x00_reset_active(vha))
+ if (qla2x00_chip_is_down(vha))
ql_log(ql_log_warn, vha, 0x707c,
"ISP reset active.\n");
else if (!vha->hw->flags.eeh_busy)
@@ -1840,7 +1861,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (unlikely(pci_channel_offline(ha->pdev)))
goto done;
- if (qla2x00_reset_active(vha))
+ if (qla2x00_chip_is_down(vha))
goto done;
stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
@@ -2141,6 +2162,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
msleep(1000);
qla24xx_disable_vp(vha);
+ qla2x00_wait_for_sess_deletion(vha);
vha->flags.delete_progress = 1;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 5fd44c50bbac..c7533fa7f46e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1130,6 +1130,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
ha->fw_dump);
goto qla24xx_fw_dump_failed;
}
+ QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp24;
qla2xxx_prep_dump(ha, ha->fw_dump);
@@ -1384,6 +1385,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
ha->fw_dump);
goto qla25xx_fw_dump_failed;
}
+ QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp25;
qla2xxx_prep_dump(ha, ha->fw_dump);
ha->fw_dump->version = htonl(2);
@@ -2036,6 +2038,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"request...\n", ha->fw_dump);
goto qla83xx_fw_dump_failed;
}
+ QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp83;
qla2xxx_prep_dump(ha, ha->fw_dump);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 9442e18aef6f..a9dc9c4a6382 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -313,6 +313,7 @@ struct srb_cmd {
#define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */
#define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */
#define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */
+#define SRB_WAKEUP_ON_COMP BIT_6
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
@@ -361,6 +362,8 @@ struct ct_arg {
dma_addr_t rsp_dma;
u32 req_size;
u32 rsp_size;
+ u32 req_allocated_size;
+ u32 rsp_allocated_size;
void *req;
void *rsp;
port_id_t id;
@@ -377,6 +380,7 @@ struct srb_iocb {
#define SRB_LOGIN_COND_PLOGI BIT_1
#define SRB_LOGIN_SKIP_PRLI BIT_2
#define SRB_LOGIN_NVME_PRLI BIT_3
+#define SRB_LOGIN_PRLI_ONLY BIT_4
uint16_t data[2];
u32 iop[2];
} logio;
@@ -396,6 +400,8 @@ struct srb_iocb {
struct completion comp;
struct els_plogi_payload *els_plogi_pyld;
struct els_plogi_payload *els_resp_pyld;
+ u32 tx_size;
+ u32 rx_size;
dma_addr_t els_plogi_pyld_dma;
dma_addr_t els_resp_pyld_dma;
uint32_t fw_status[3];
@@ -2310,6 +2316,7 @@ enum fcport_mgt_event {
FCME_ADISC_DONE,
FCME_GNNID_DONE,
FCME_GFPNID_DONE,
+ FCME_ELS_PLOGI_DONE,
};
enum rscn_addr_format {
@@ -2406,6 +2413,7 @@ typedef struct fc_port {
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
enum login_state fw_login_state;
+ unsigned long dm_login_expire;
unsigned long plogi_nack_done_deadline;
u32 login_gen, last_login_gen;
@@ -2416,7 +2424,8 @@ typedef struct fc_port {
u8 iocb[IOCB_SIZE];
u8 current_login_state;
u8 last_login_state;
- struct completion n2n_done;
+ u16 n2n_link_reset_cnt;
+ u16 n2n_chip_reset;
} fc_port_t;
#define QLA_FCPORT_SCAN 1
@@ -3226,6 +3235,7 @@ enum qla_work_type {
QLA_EVT_GFPNID,
QLA_EVT_SP_RETRY,
QLA_EVT_IIDMA,
+ QLA_EVT_ELS_PLOGI,
};
@@ -3597,6 +3607,8 @@ struct qla_hw_data {
uint32_t detected_lr_sfp:1;
uint32_t using_lr_setting:1;
uint32_t rida_fmt2:1;
+ uint32_t purge_mbox:1;
+ uint32_t n2n_bigger:1;
} flags;
uint16_t max_exchg;
@@ -3842,6 +3854,10 @@ struct qla_hw_data {
int port_down_retry_count;
uint8_t mbx_count;
uint8_t aen_mbx_count;
+ atomic_t num_pend_mbx_stage1;
+ atomic_t num_pend_mbx_stage2;
+ atomic_t num_pend_mbx_stage3;
+ uint16_t frame_payload_size;
uint32_t login_retry_count;
/* SNS command interfaces. */
@@ -3901,6 +3917,9 @@ struct qla_hw_data {
int exchoffld_size;
int exchoffld_count;
+ /* n2n */
+ struct els_plogi_payload plogi_els_payld;
+
void *swl;
/* These are used by mailbox operations. */
@@ -4155,6 +4174,7 @@ struct qla_hw_data {
struct work_struct board_disable;
struct mr_data_fx00 mr;
+ uint32_t chip_reset;
struct qlt_hw_data tgt;
int allow_cna_fw_dump;
@@ -4236,7 +4256,7 @@ typedef struct scsi_qla_host {
#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
-#define FREE_BIT 21
+#define N2N_LINK_RESET 21
#define PORT_UPDATE_NEEDED 22
#define FX00_RESET_RECOVERY 23
#define FX00_TARGET_SCAN 24
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 5d8688e5bc7c..50c1e6c62e31 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1366,6 +1366,11 @@ struct vp_rpt_id_entry_24xx {
/* format 1 fabric */
uint8_t vpstat1_subcode; /* vp_status=1 subcode */
uint8_t flags;
+#define TOPO_MASK 0xE
+#define TOPO_FL 0x2
+#define TOPO_N2N 0x4
+#define TOPO_F 0x6
+
uint16_t fip_flags;
uint8_t rsv2[12];
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index f68eb6096559..178974896b5c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -45,8 +45,7 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
-extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *,
- port_id_t);
+extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
extern void qla2x00_update_fcports(scsi_qla_host_t *);
@@ -118,6 +117,7 @@ extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
+int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
/*
* Global Data in qla_os.c source file.
*/
@@ -212,8 +212,9 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
-int qla24xx_async_abort_cmd(srb_t *);
+int qla24xx_async_abort_cmd(srb_t *, bool);
int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
+void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
/*
* Global Functions in qla_mid.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4bc2b66b299f..a0038d879b9d 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -556,7 +556,7 @@ err2:
/* please ignore kernel warning. otherwise, we have mem leak. */
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
@@ -564,7 +564,7 @@ err2:
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -617,6 +617,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xd041,
"%s: Failed to allocate ct_sns request.\n",
@@ -627,6 +628,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xd042,
"%s: Failed to allocate ct_sns request.\n",
@@ -712,6 +714,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xd041,
"%s: Failed to allocate ct_sns request.\n",
@@ -722,6 +725,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xd042,
"%s: Failed to allocate ct_sns request.\n",
@@ -802,6 +806,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xd041,
"%s: Failed to allocate ct_sns request.\n",
@@ -812,6 +817,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xd042,
"%s: Failed to allocate ct_sns request.\n",
@@ -909,6 +915,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xd041,
"%s: Failed to allocate ct_sns request.\n",
@@ -919,6 +926,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xd042,
"%s: Failed to allocate ct_sns request.\n",
@@ -1954,7 +1962,6 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
void *entries;
struct ct_fdmiv2_hba_attr *eiter;
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
struct new_utsname *p_sysid = NULL;
/* Issue RHBA */
@@ -2134,9 +2141,7 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
/* MAX CT Payload Length */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
- eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ?
- le16_to_cpu(icb24->frame_payload_size) :
- le16_to_cpu(ha->init_cb->frame_payload_size);
+ eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
eiter->len = cpu_to_be16(4 + 4);
size += 4 + 4;
@@ -3386,19 +3391,40 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
{
- if (sp->u.iocb_cmd.u.ctarg.req) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
- }
- if (sp->u.iocb_cmd.u.ctarg.rsp) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ struct srb_iocb *c = &sp->u.iocb_cmd;
+
+ switch (sp->type) {
+ case SRB_ELS_DCMD:
+ if (c->u.els_plogi.els_plogi_pyld)
+ dma_free_coherent(&vha->hw->pdev->dev,
+ c->u.els_plogi.tx_size,
+ c->u.els_plogi.els_plogi_pyld,
+ c->u.els_plogi.els_plogi_pyld_dma);
+
+ if (c->u.els_plogi.els_resp_pyld)
+ dma_free_coherent(&vha->hw->pdev->dev,
+ c->u.els_plogi.rx_size,
+ c->u.els_plogi.els_resp_pyld,
+ c->u.els_plogi.els_resp_pyld_dma);
+ break;
+ case SRB_CT_PTHRU_CMD:
+ default:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+ break;
}
sp->free(sp);
@@ -3475,6 +3501,14 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->rscn_gen++;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
+ if (fcport->login_retry == 0) {
+ fcport->login_retry =
+ vha->hw->login_retry_count;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
+ fcport->port_name, fcport->loop_id,
+ fcport->login_retry);
+ }
switch (fcport->disc_state) {
case DSC_LOGIN_COMPLETE:
/* recheck session is still intact. */
@@ -3596,14 +3630,14 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
/* please ignore kernel warning. otherwise, we have mem leak. */
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
}
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -3654,6 +3688,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xd041,
"Failed to allocate ct_sns request.\n");
@@ -3663,6 +3698,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xd042,
"Failed to allocate ct_sns request.\n");
@@ -3698,6 +3734,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
return rval;
done_free_sp:
+ spin_lock_irqsave(&vha->hw->vport_slock, flags);
+ list_del(&sp->elem);
+ spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
+
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt),
@@ -3967,6 +4007,14 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
} else {
if (fcport->rscn_rcvd ||
fcport->disc_state != DSC_LOGIN_COMPLETE) {
+ if (fcport->login_retry == 0) {
+ fcport->login_retry =
+ vha->hw->login_retry_count;
+ ql_dbg(ql_dbg_disc, vha, 0x20a3,
+ "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
+ fcport->port_name, fcport->loop_id,
+ fcport->login_retry);
+ }
fcport->rscn_rcvd = 0;
qla24xx_fcport_handle_login(vha, fcport);
}
@@ -4142,14 +4190,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
*/
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
}
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4179,14 +4227,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
/* please ignore kernel warning. Otherwise, we have mem leak. */
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
}
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4281,14 +4329,14 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
done_free_sp:
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
}
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4349,6 +4397,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
&sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xffff,
"Failed to allocate ct_sns request.\n");
@@ -4366,6 +4415,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
&vha->hw->pdev->dev, rspsz,
&sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xffff,
"Failed to allocate ct_sns request.\n");
@@ -4425,14 +4475,14 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
done_free_sp:
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
}
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 7b675243bd16..b934977c5c26 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -50,16 +50,15 @@ qla2x00_sp_timeout(struct timer_list *t)
{
srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
- scsi_qla_host_t *vha = sp->vha;
struct req_que *req;
unsigned long flags;
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
- req = vha->hw->req_q_map[0];
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ req = sp->qpair->req;
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
iocb->timeout(sp);
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
void
@@ -100,6 +99,8 @@ qla2x00_async_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
+ int rc, h;
+ unsigned long flags;
if (fcport) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
@@ -114,11 +115,26 @@ qla2x00_async_iocb_timeout(void *data)
switch (sp->type) {
case SRB_LOGIN_CMD:
- /* Retry as needed. */
- lio->u.logio.data[0] = MBS_COMMAND_ERROR;
- lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
- QLA_LOGIO_LOGIN_RETRIED : 0;
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ rc = qla24xx_async_abort_cmd(sp, false);
+ if (rc) {
+ /* Retry as needed. */
+ lio->u.logio.data[0] = MBS_COMMAND_ERROR;
+ lio->u.logio.data[1] =
+ lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+ QLA_LOGIO_LOGIN_RETRIED : 0;
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
+ h++) {
+ if (sp->qpair->req->outstanding_cmds[h] ==
+ sp) {
+ sp->qpair->req->outstanding_cmds[h] =
+ NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ }
break;
case SRB_LOGOUT_CMD:
case SRB_CT_PTHRU_CMD:
@@ -127,7 +143,21 @@ qla2x00_async_iocb_timeout(void *data)
case SRB_NACK_PRLI:
case SRB_NACK_LOGO:
case SRB_CTRL_VP:
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ rc = qla24xx_async_abort_cmd(sp, false);
+ if (rc) {
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
+ h++) {
+ if (sp->qpair->req->outstanding_cmds[h] ==
+ sp) {
+ sp->qpair->req->outstanding_cmds[h] =
+ NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ }
break;
}
}
@@ -160,6 +190,22 @@ qla2x00_async_login_sp_done(void *ptr, int res)
sp->free(sp);
}
+static inline bool
+fcport_is_smaller(fc_port_t *fcport)
+{
+ if (wwn_to_u64(fcport->port_name) <
+ wwn_to_u64(fcport->vha->port_name))
+ return true;
+ else
+ return false;
+}
+
+static inline bool
+fcport_is_bigger(fc_port_t *fcport)
+{
+ return !fcport_is_smaller(fcport);
+}
+
int
qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
@@ -189,13 +235,16 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_login_sp_done;
- lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+ if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
+ lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
+ } else {
+ lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
- if (fcport->fc4f_nvme)
- lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
+ if (fcport->fc4f_nvme)
+ lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
+
+ }
- if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
- lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
fcport->flags |= FCF_LOGIN_NEEDED;
@@ -370,6 +419,19 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
__qla24xx_handle_gpdb_event(vha, ea);
}
+int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
+ return qla2x00_post_work(vha, e);
+}
+
static void
qla2x00_async_adisc_sp_done(void *ptr, int res)
{
@@ -382,7 +444,7 @@ qla2x00_async_adisc_sp_done(void *ptr, int res)
"Async done-%s res %x %8phC\n",
sp->name, res, sp->fcport->port_name);
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.event = FCME_ADISC_DONE;
@@ -418,6 +480,8 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_adisc_sp_done;
@@ -464,7 +528,6 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
if (ea->rc) { /* rval */
if (fcport->login_retry == 0) {
- fcport->login_retry = vha->hw->login_retry_count;
ql_dbg(ql_dbg_disc, vha, 0x20de,
"GNL failed Port login retry %8phN, retry cnt=%d.\n",
fcport->port_name, fcport->login_retry);
@@ -497,35 +560,51 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
for (i = 0; i < n; i++) {
e = &vha->gnl.l[i];
wwn = wwn_to_u64(e->port_name);
+ id.b.domain = e->port_id[2];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[0];
+ id.b.rsvd_1 = 0;
if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
continue;
+ if (IS_SW_RESV_ADDR(id))
+ continue;
+
found = 1;
- id.b.domain = e->port_id[2];
- id.b.area = e->port_id[1];
- id.b.al_pa = e->port_id[0];
- id.b.rsvd_1 = 0;
loop_id = le16_to_cpu(e->nport_handle);
loop_id = (loop_id & 0x7fff);
+ if (fcport->fc4f_nvme)
+ current_login_state = e->current_login_state >> 4;
+ else
+ current_login_state = e->current_login_state & 0xf;
+
ql_dbg(ql_dbg_disc, vha, 0x20e2,
- "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
+ "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
__func__, fcport->port_name,
e->current_login_state, fcport->fw_login_state,
- id.b.domain, id.b.area, id.b.al_pa,
+ fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
- if ((id.b24 != fcport->d_id.b24) ||
- ((fcport->loop_id != FC_NO_LOOP_ID) &&
- (fcport->loop_id != loop_id))) {
- ql_dbg(ql_dbg_disc, vha, 0x20e3,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport);
- return;
+ switch (fcport->disc_state) {
+ case DSC_DELETE_PEND:
+ case DSC_DELETED:
+ break;
+ default:
+ if ((id.b24 != fcport->d_id.b24 &&
+ fcport->d_id.b24) ||
+ (fcport->loop_id != FC_NO_LOOP_ID &&
+ fcport->loop_id != loop_id)) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e3,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ break;
}
fcport->loop_id = loop_id;
@@ -544,66 +623,148 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
fcport->login_pause = 1;
}
- if (fcport->fc4f_nvme)
- current_login_state = e->current_login_state >> 4;
- else
- current_login_state = e->current_login_state & 0xf;
-
- switch (current_login_state) {
- case DSC_LS_PRLI_COMP:
- ql_dbg(ql_dbg_disc, vha, 0x20e4,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, fcport->port_name);
+ switch (vha->hw->current_topology) {
+ default:
+ switch (current_login_state) {
+ case DSC_LS_PRLI_COMP:
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+ vha, 0x20e4, "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
- if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
- fcport->port_type = FCT_INITIATOR;
- else
- fcport->port_type = FCT_TARGET;
+ if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport,
+ data);
+ break;
+ case DSC_LS_PORT_UNAVAIL:
+ default:
+ if (fcport->loop_id != FC_NO_LOOP_ID)
+ qla2x00_clear_loop_id(fcport);
- data[0] = data[1] = 0;
- qla2x00_post_async_adisc_work(vha, fcport, data);
- break;
- case DSC_LS_PORT_UNAVAIL:
- default:
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- qla2x00_find_new_loop_id(vha, fcport);
+ fcport->loop_id = loop_id;
fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
}
- ql_dbg(ql_dbg_disc, vha, 0x20e5,
- "%s %d %8phC\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
break;
- }
+ case ISP_CFG_N:
+ fcport->fw_login_state = current_login_state;
+ fcport->d_id = id;
+ switch (current_login_state) {
+ case DSC_LS_PRLI_COMP:
+ if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport,
+ data);
+ break;
+ case DSC_LS_PLOGI_COMP:
+ if (fcport_is_bigger(fcport)) {
+ /* local adapter is smaller */
+ if (fcport->loop_id != FC_NO_LOOP_ID)
+ qla2x00_clear_loop_id(fcport);
+
+ fcport->loop_id = loop_id;
+ qla24xx_fcport_handle_login(vha,
+ fcport);
+ break;
+ }
+ /* drop through */
+ default:
+ if (fcport_is_smaller(fcport)) {
+ /* local adapter is bigger */
+ if (fcport->loop_id != FC_NO_LOOP_ID)
+ qla2x00_clear_loop_id(fcport);
+
+ fcport->loop_id = loop_id;
+ qla24xx_fcport_handle_login(vha,
+ fcport);
+ }
+ break;
+ }
+ break;
+ } /* switch (ha->current_topology) */
}
if (!found) {
- /* fw has no record of this port */
- for (i = 0; i < n; i++) {
- e = &vha->gnl.l[i];
- id.b.domain = e->port_id[0];
- id.b.area = e->port_id[1];
- id.b.al_pa = e->port_id[2];
- id.b.rsvd_1 = 0;
- loop_id = le16_to_cpu(e->nport_handle);
-
- if (fcport->d_id.b24 == id.b24) {
- conflict_fcport =
- qla2x00_find_fcport_by_wwpn(vha,
- e->port_name, 0);
- ql_dbg(ql_dbg_disc, vha, 0x20e6,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict_fcport->port_name);
- qlt_schedule_sess_for_deletion
- (conflict_fcport);
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_F:
+ case ISP_CFG_FL:
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ id.b.domain = e->port_id[0];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[2];
+ id.b.rsvd_1 = 0;
+ loop_id = le16_to_cpu(e->nport_handle);
+
+ if (fcport->d_id.b24 == id.b24) {
+ conflict_fcport =
+ qla2x00_find_fcport_by_wwpn(vha,
+ e->port_name, 0);
+ if (conflict_fcport) {
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+ vha, 0x20e5,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict_fcport->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict_fcport);
+ }
+ }
+ /*
+ * FW already picked this loop id for
+ * another fcport
+ */
+ if (fcport->loop_id == loop_id)
+ fcport->loop_id = FC_NO_LOOP_ID;
}
-
- /* FW already picked this loop id for another fcport */
- if (fcport->loop_id == loop_id)
- fcport->loop_id = FC_NO_LOOP_ID;
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case ISP_CFG_N:
+ fcport->disc_state = DSC_DELETED;
+ if (time_after_eq(jiffies, fcport->dm_login_expire)) {
+ if (fcport->n2n_link_reset_cnt < 2) {
+ fcport->n2n_link_reset_cnt++;
+ /*
+ * remote port is not sending PLOGI.
+ * Reset link to kick start his state
+ * machine
+ */
+ set_bit(N2N_LINK_RESET,
+ &vha->dpc_flags);
+ } else {
+ if (fcport->n2n_chip_reset < 1) {
+ ql_log(ql_log_info, vha, 0x705d,
+ "Chip reset to bring laser down");
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ fcport->n2n_chip_reset++;
+ } else {
+ ql_log(ql_log_info, vha, 0x705d,
+ "Remote port %8ph is not coming back\n",
+ fcport->port_name);
+ fcport->scan_state = 0;
+ }
+ }
+ qla2xxx_wake_dpc(vha);
+ } else {
+ /*
+ * report port suppose to do PLOGI. Give him
+ * more time. FW will catch it.
+ */
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
+ break;
+ default:
+ break;
}
- qla24xx_fcport_handle_login(vha, fcport);
}
} /* gnl_event */
@@ -909,9 +1070,9 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
}
ql_dbg(ql_dbg_disc, vha, 0x211b,
- "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
- fcport->port_name, sp->handle, fcport->loop_id,
- fcport->d_id.b24, fcport->login_retry);
+ "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
+ fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
+ fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
return rval;
@@ -1053,8 +1214,9 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->flags &= ~FCF_ASYNC_SENT;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name,
- fcport->disc_state, pd->current_login_state, ea->rc);
+ "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
+ fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
+ ea->rc);
if (fcport->disc_state == DSC_DELETE_PEND)
return;
@@ -1072,9 +1234,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
case PDS_PLOGI_COMPLETE:
case PDS_PRLI_PENDING:
case PDS_PRLI2_PENDING:
- ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
- __func__, __LINE__, fcport->port_name);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ /* Set discovery state back to GNL to Relogin attempt */
+ if (qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) {
+ fcport->disc_state = DSC_GNL;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
return;
case PDS_LOGO_PENDING:
case PDS_PORT_UNAVAILABLE:
@@ -1172,39 +1337,80 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
return 0;
}
- if (fcport->login_retry > 0)
- fcport->login_retry--;
-
switch (fcport->disc_state) {
case DSC_DELETED:
wwn = wwn_to_u64(fcport->node_name);
- if (wwn == 0) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post GNNID\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gnnid_work(vha, fcport);
- } else if (fcport->loop_id == FC_NO_LOOP_ID) {
- ql_dbg(ql_dbg_disc, vha, 0x20bd,
- "%s %d %8phC post gnl\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gnl_work(vha, fcport);
- } else {
- qla_chk_n2n_b4_login(vha, fcport);
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_N:
+ if (fcport_is_smaller(fcport)) {
+ /* this adapter is bigger */
+ if (fcport->login_retry) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ qla2x00_find_new_loop_id(vha,
+ fcport);
+ fcport->fw_login_state =
+ DSC_LS_PORT_UNAVAIL;
+ }
+ fcport->login_retry--;
+ qla_post_els_plogi_work(vha, fcport);
+ } else {
+ ql_log(ql_log_info, vha, 0x705d,
+ "Unable to reach remote port %8phC",
+ fcport->port_name);
+ }
+ } else {
+ qla24xx_post_gnl_work(vha, fcport);
+ }
+ break;
+ default:
+ if (wwn == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post GNNID\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnnid_work(vha, fcport);
+ } else if (fcport->loop_id == FC_NO_LOOP_ID) {
+ ql_dbg(ql_dbg_disc, vha, 0x20bd,
+ "%s %d %8phC post gnl\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnl_work(vha, fcport);
+ } else {
+ qla_chk_n2n_b4_login(vha, fcport);
+ }
+ break;
}
break;
case DSC_GNL:
- if (fcport->login_pause) {
- fcport->last_rscn_gen = fcport->rscn_gen;
- fcport->last_login_gen = fcport->login_gen;
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_N:
+ if ((fcport->current_login_state & 0xf) == 0x6) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post GPDB work\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->chip_reset =
+ vha->hw->base_qpair->chip_reset;
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post NVMe PRLI\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ }
+ break;
+ default:
+ if (fcport->login_pause) {
+ fcport->last_rscn_gen = fcport->rscn_gen;
+ fcport->last_login_gen = fcport->login_gen;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+ qla_chk_n2n_b4_login(vha, fcport);
break;
}
-
- qla_chk_n2n_b4_login(vha, fcport);
break;
case DSC_LOGIN_FAILED:
+ fcport->login_retry--;
ql_dbg(ql_dbg_disc, vha, 0x20d0,
"%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
@@ -1219,6 +1425,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x20d1,
"%s %d %8phC post adisc\n",
__func__, __LINE__, fcport->port_name);
+ fcport->login_retry--;
data[0] = data[1] = 0;
qla2x00_post_async_adisc_work(vha, fcport, data);
break;
@@ -1302,17 +1509,6 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
}
}
- if (fcport->flags & FCF_ASYNC_SENT) {
- fcport->login_retry++;
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- return;
- }
-
- if (fcport->disc_state == DSC_DELETE_PEND) {
- fcport->login_retry++;
- return;
- }
-
if (fcport->last_rscn_gen != fcport->rscn_gen) {
ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
@@ -1324,6 +1520,15 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
qla24xx_fcport_handle_login(vha, fcport);
}
+
+void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post PRLI\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_prli_work(vha, ea->fcport);
+}
+
void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
{
fc_port_t *f, *tf;
@@ -1425,6 +1630,9 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case FCME_GFPNID_DONE:
qla24xx_handle_gfpnid_event(vha, ea);
break;
+ case FCME_ELS_PLOGI_DONE:
+ qla_handle_els_plogi_done(vha, ea);
+ break;
default:
BUG_ON(1);
break;
@@ -1487,11 +1695,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
wait_for_completion(&tm_iocb->u.tmf.comp);
- rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
- QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ rval = tm_iocb->u.tmf.data;
- if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
- ql_dbg(ql_dbg_taskm, vha, 0x8030,
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x8030,
"TM IOCB failed (%x).\n", rval);
}
@@ -1519,7 +1726,7 @@ qla24xx_abort_iocb_timeout(void *data)
struct srb_iocb *abt = &sp->u.iocb_cmd;
abt->u.abt.comp_status = CS_TIMEOUT;
- complete(&abt->u.abt.comp);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
}
static void
@@ -1528,12 +1735,16 @@ qla24xx_abort_sp_done(void *ptr, int res)
srb_t *sp = ptr;
struct srb_iocb *abt = &sp->u.iocb_cmd;
- if (del_timer(&sp->u.iocb_cmd.timer))
- complete(&abt->u.abt.comp);
+ if (del_timer(&sp->u.iocb_cmd.timer)) {
+ if (sp->flags & SRB_WAKEUP_ON_COMP)
+ complete(&abt->u.abt.comp);
+ else
+ sp->free(sp);
+ }
}
int
-qla24xx_async_abort_cmd(srb_t *cmd_sp)
+qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
{
scsi_qla_host_t *vha = cmd_sp->vha;
fc_port_t *fcport = cmd_sp->fcport;
@@ -1548,6 +1759,8 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
+ if (wait)
+ sp->flags = SRB_WAKEUP_ON_COMP;
abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
@@ -1571,10 +1784,11 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
"Abort command issued - hdl=%x, target_id=%x\n",
cmd_sp->handle, fcport->tgt_id);
- wait_for_completion(&abt_iocb->u.abt.comp);
-
- rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
- QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ if (wait) {
+ wait_for_completion(&abt_iocb->u.abt.comp);
+ rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+ QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ }
done_free_sp:
sp->free(sp);
@@ -1610,7 +1824,7 @@ qla24xx_async_abort_command(srb_t *sp)
return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
FXDISC_ABORT_IOCTL);
- return qla24xx_async_abort_cmd(sp);
+ return qla24xx_async_abort_cmd(sp, true);
}
static void
@@ -1798,7 +2012,6 @@ void
qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
qlt_logo_completion_handler(fcport, data[0]);
fcport->login_gen++;
fcport->flags &= ~FCF_ASYNC_ACTIVE;
@@ -4049,7 +4262,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
id.b.al_pa = al_pa;
id.b.rsvd_1 = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_update_host_map(vha, id);
+ if (!(topo == 2 && ha->flags.n2n_bigger))
+ qlt_update_host_map(vha, id);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!vha->flags.init_done)
@@ -4307,7 +4521,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
while (cnt--)
*dptr1++ = *dptr2++;
-
+ ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
/* Use alternate WWN? */
if (nv->host_p[1] & BIT_7) {
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
@@ -4590,20 +4804,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
} else if (ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
- if (ha->flags.rida_fmt2) {
- /* With Rida Format 2, the login is already triggered.
- * We know who is on the other side of the wire.
- * No need to login to do login to find out or drop into
- * qla2x00_configure_local_loop().
- */
+ if (qla_tgt_mode_enabled(vha)) {
+ /* allow the other side to start the login */
clear_bit(LOCAL_LOOP_UPDATE, &flags);
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- } else {
- if (qla_tgt_mode_enabled(vha)) {
- /* allow the other side to start the login */
- clear_bit(LOCAL_LOOP_UPDATE, &flags);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- }
}
} else if (ha->current_topology == ISP_CFG_NL) {
clear_bit(RSCN_UPDATE, &flags);
@@ -4687,110 +4891,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
}
/*
- * N2N Login
- * Updates Fibre Channel Device Database with local loop devices.
- *
- * Input:
- * ha = adapter block pointer.
- *
- * Returns:
- */
-static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha,
- fc_port_t *fcport)
-{
- struct qla_hw_data *ha = vha->hw;
- int res = QLA_SUCCESS, rval;
- int greater_wwpn = 0;
- int logged_in = 0;
-
- if (ha->current_topology != ISP_CFG_N)
- return res;
-
- if (wwn_to_u64(vha->port_name) >
- wwn_to_u64(vha->n2n_port_name)) {
- ql_dbg(ql_dbg_disc, vha, 0x2002,
- "HBA WWPN is greater %llx > target %llx\n",
- wwn_to_u64(vha->port_name),
- wwn_to_u64(vha->n2n_port_name));
- greater_wwpn = 1;
- fcport->d_id.b24 = vha->n2n_id;
- }
-
- fcport->loop_id = vha->loop_id;
- fcport->fc4f_nvme = 0;
- fcport->query = 1;
-
- ql_dbg(ql_dbg_disc, vha, 0x4001,
- "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n",
- fcport->d_id.b24, vha->loop_id);
-
- /* Fill in member data. */
- if (!greater_wwpn) {
- rval = qla2x00_get_port_database(vha, fcport, 0);
- ql_dbg(ql_dbg_disc, vha, 0x1051,
- "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n",
- fcport->current_login_state, fcport->last_login_state,
- fcport->d_id.b24, fcport->loop_id, rval);
-
- if (((fcport->current_login_state & 0xf) == 0x4) ||
- ((fcport->current_login_state & 0xf) == 0x6))
- logged_in = 1;
- }
-
- if (logged_in || greater_wwpn) {
- if (!vha->nvme_local_port && vha->flags.nvme_enabled)
- qla_nvme_register_hba(vha);
-
- /* Set connected N_Port d_id */
- if (vha->flags.nvme_enabled)
- fcport->fc4f_nvme = 1;
-
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
- fcport->disc_state = DSC_GNL;
- fcport->n2n_flag = 1;
- fcport->flags = 3;
- vha->hw->flags.gpsc_supported = 0;
-
- if (greater_wwpn) {
- ql_dbg(ql_dbg_disc, vha, 0x20e5,
- "%s %d PLOGI ELS %8phC\n",
- __func__, __LINE__, fcport->port_name);
-
- res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
- fcport, fcport->d_id);
- }
-
- if (res != QLA_SUCCESS) {
- ql_log(ql_log_info, vha, 0xd04d,
- "PLOGI Failed: portid=%06x - retrying\n",
- fcport->d_id.b24);
- res = QLA_SUCCESS;
- } else {
- /* State 0x6 means FCP PRLI complete */
- if ((fcport->current_login_state & 0xf) == 0x6) {
- ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post GPDB work\n",
- __func__, __LINE__, fcport->port_name);
- fcport->chip_reset =
- vha->hw->base_qpair->chip_reset;
- qla24xx_post_gpdb_work(vha, fcport, 0);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post NVMe PRLI\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_prli_work(vha, fcport);
- }
- }
- } else {
- /* Wait for next database change */
- set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
- }
-
- return res;
-}
-
-/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
*
@@ -4816,6 +4916,31 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ /* Inititae N2N login. */
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+ /* borrowing */
+ u32 *bp, i, sz;
+
+ memset(ha->init_cb, 0, ha->init_cb_size);
+ sz = min_t(int, sizeof(struct els_plogi_payload),
+ ha->init_cb_size);
+ rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+ (void *)ha->init_cb, sz);
+ if (rval == QLA_SUCCESS) {
+ bp = (uint32_t *)ha->init_cb;
+ for (i = 0; i < sz/4 ; i++, bp++)
+ *bp = cpu_to_be32(*bp);
+
+ memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
+ sizeof(ha->plogi_els_payld.data));
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x00d1,
+ "PLOGI ELS param read fail.\n");
+ }
+ return QLA_SUCCESS;
+ }
+
found_devs = 0;
new_fcport = NULL;
entries = MAX_FIBRE_DEVICES_LOOP;
@@ -4847,14 +4972,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
}
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
- /* Inititae N2N login. */
- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
- rval = qla24xx_n2n_handle_login(vha, new_fcport);
- if (rval != QLA_SUCCESS)
- goto cleanup_allocation;
- return QLA_SUCCESS;
- }
-
/* Add devices to port list. */
id_iter = (char *)ha->gid_list;
for (index = 0; index < entries; index++) {
@@ -5053,6 +5170,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
struct fc_rport *rport;
unsigned long flags;
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ return;
+
rport_ids.node_name = wwn_to_u64(fcport->node_name);
rport_ids.port_name = wwn_to_u64(fcport->port_name);
rport_ids.port_id = fcport->d_id.b.domain << 16 |
@@ -5108,25 +5228,28 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
- ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
- __func__, fcport->port_name);
-
- if (IS_QLAFX00(vha->hw)) {
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- } else {
- fcport->login_retry = 0;
- fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- fcport->deleted = 0;
- fcport->logout_on_delete = 1;
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- }
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ fcport->deleted = 0;
+ fcport->logout_on_delete = 1;
+ fcport->login_retry = vha->hw->login_retry_count;
+ fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_N:
+ case ISP_CFG_NL:
+ fcport->keep_nport_handle = 1;
+ break;
+ default:
+ break;
+ }
+
if (fcport->fc4f_nvme) {
qla_nvme_register_remote(vha, fcport);
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
return;
}
@@ -5167,6 +5290,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla24xx_post_gpsc_work(vha, fcport);
}
}
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
}
/*
@@ -5667,6 +5791,34 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
}
+/* FW does not set aside Loop id for MGMT Server/FFFFFAh */
+int
+qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
+{
+ int loop_id = FC_NO_LOOP_ID;
+ int lid = NPH_MGMT_SERVER - vha->vp_idx;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->vp_idx == 0) {
+ set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
+ return NPH_MGMT_SERVER;
+ }
+
+ /* pick id from high and work down to low */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ for (; lid > 0; lid--) {
+ if (!test_bit(lid, vha->hw->loop_id_map)) {
+ set_bit(lid, vha->hw->loop_id_map);
+ loop_id = lid;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return loop_id;
+}
+
/*
* qla2x00_fabric_login
* Issue fabric login command.
@@ -6334,6 +6486,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0x00af,
"Performing ISP error recovery - ha=%p.\n", ha);
+ ha->flags.purge_mbox = 1;
/* For ISP82XX, reset_chip is just disabling interrupts.
* Driver waits for the completion of the commands.
* the interrupts need to be enabled.
@@ -6348,13 +6501,31 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->current_topology = 0;
ha->flags.fw_started = 0;
ha->flags.fw_init_done = 0;
- ha->base_qpair->chip_reset++;
+ ha->chip_reset++;
+ ha->base_qpair->chip_reset = ha->chip_reset;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i])
ha->queue_pair_map[i]->chip_reset =
ha->base_qpair->chip_reset;
}
+ /* purge MBox commands */
+ if (atomic_read(&ha->num_pend_mbx_stage3)) {
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+
+ i = 0;
+ while (atomic_read(&ha->num_pend_mbx_stage3) ||
+ atomic_read(&ha->num_pend_mbx_stage2) ||
+ atomic_read(&ha->num_pend_mbx_stage1)) {
+ msleep(20);
+ i++;
+ if (i > 50)
+ break;
+ }
+ ha->flags.purge_mbox = 0;
+
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -6860,7 +7031,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
(uint8_t *)&icb->interrupt_delay_timer;
while (cnt--)
*dptr1++ = *dptr2++;
-
+ ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
/*
* Setup driver NVRAM options.
*/
@@ -6959,6 +7130,9 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
if (ql2xloginretrycount)
ha->login_retry_count = ql2xloginretrycount;
+ /* N2N: driver will initiate Login instead of FW */
+ icb->firmware_options_3 |= BIT_8;
+
/* Enable ZIO. */
if (!vha->flags.init_done) {
ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
@@ -7068,7 +7242,7 @@ check_valid_image:
ha->active_image = QLA27XX_SECONDARY_IMAGE;
}
- ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n",
ha->active_image == 0 ? "default bootld and fw" :
ha->active_image == 1 ? "primary" :
ha->active_image == 2 ? "secondary" :
@@ -7916,7 +8090,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* Use extended-initialization control block. */
memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
-
+ ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
/*
* Setup driver NVRAM options.
*/
@@ -8041,8 +8215,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
/* enable RIDA Format2 */
- if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
- icb->firmware_options_3 |= BIT_0;
+ icb->firmware_options_3 |= BIT_0;
+
+ /* N2N: driver will initiate Login instead of FW */
+ icb->firmware_options_3 |= BIT_8;
if (IS_QLA27XX(ha)) {
icb->firmware_options_3 |= BIT_8;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 37ae0f6d8ae5..4351736b2426 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -58,14 +58,12 @@ qla2x00_debounce_register(volatile uint16_t __iomem *addr)
static inline void
qla2x00_poll(struct rsp_que *rsp)
{
- unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
- local_irq_save(flags);
+
if (IS_P3P_TYPE(ha))
qla82xx_poll(0, rsp);
else
ha->isp_ops->intr_handler(0, rsp);
- local_irq_restore(flags);
}
static inline uint8_t *
@@ -204,6 +202,12 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
}
+static inline int
+qla2x00_chip_is_down(scsi_qla_host_t *vha)
+{
+ return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
+}
+
static inline srb_t *
qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
{
@@ -222,6 +226,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
sp->fcport = fcport;
sp->iocbs = 1;
sp->vha = qpair->vha;
+ INIT_LIST_HEAD(&sp->elem);
+
done:
if (!sp)
QLA_QPAIR_MARK_NOT_BUSY(qpair);
@@ -276,8 +282,6 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
init_completion(&sp->comp);
if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
- if (sp->type == SRB_ELS_DCMD)
- init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
add_timer(&sp->u.iocb_cmd.timer);
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a91cca52b5d5..42ac8e097419 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2130,34 +2130,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
req_cnt = 1;
handle = 0;
- if (!sp)
- goto skip_cmd_array;
-
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds) {
- ql_log(ql_log_warn, vha, 0x700b,
- "No room on outstanding cmd array.\n");
- goto queuing_error;
- }
-
- /* Prep command array. */
- req->current_outstanding_cmd = handle;
- req->outstanding_cmds[handle] = sp;
- sp->handle = handle;
-
- /* Adjust entry-counts as needed. */
- if (sp->type != SRB_SCSI_CMD)
+ if (sp && (sp->type != SRB_SCSI_CMD)) {
+ /* Adjust entry-counts as needed. */
req_cnt = sp->iocbs;
+ }
-skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
if (qpair->use_shadow_reg)
@@ -2183,6 +2160,28 @@ skip_cmd_array:
if (req->cnt < req_cnt + 2)
goto queuing_error;
+ if (sp) {
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds) {
+ ql_log(ql_log_warn, vha, 0x700b,
+ "No room on outstanding cmd array.\n");
+ goto queuing_error;
+ }
+
+ /* Prep command array. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ }
+
/* Prep packet */
req->cnt -= req_cnt;
pkt = req->ring_ptr;
@@ -2195,6 +2194,8 @@ skip_cmd_array:
pkt->handle = handle;
}
+ return pkt;
+
queuing_error:
qpair->tgt_counters.num_alloc_iocb_failed++;
return pkt;
@@ -2240,12 +2241,15 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
struct srb_iocb *lio = &sp->u.iocb_cmd;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
- logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
-
- if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
- logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
- if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
- logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+ if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
+ } else {
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+ if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
+ logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
+ if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
+ logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+ }
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
@@ -2462,6 +2466,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
sp->fcport = fcport;
elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+ init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
sp->done = qla2x00_els_dcmd_sp_done;
sp->free = qla2x00_els_dcmd_sp_free;
@@ -2509,7 +2514,6 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
scsi_qla_host_t *vha = sp->vha;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
- uint32_t dsd_len = 24;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -2532,20 +2536,21 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->control_flags = 0;
if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
- els_iocb->tx_byte_count = sizeof(struct els_plogi_payload);
+ els_iocb->tx_byte_count = els_iocb->tx_len =
+ sizeof(struct els_plogi_payload);
els_iocb->tx_address[0] =
cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
els_iocb->tx_address[1] =
cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
- els_iocb->tx_len = dsd_len;
els_iocb->rx_dsd_count = 1;
- els_iocb->rx_byte_count = sizeof(struct els_plogi_payload);
+ els_iocb->rx_byte_count = els_iocb->rx_len =
+ sizeof(struct els_plogi_payload);
els_iocb->rx_address[0] =
cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
els_iocb->rx_address[1] =
cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
- els_iocb->rx_len = dsd_len;
+
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
"PLOGI ELS IOCB:\n");
ql_dump_buffer(ql_log_info, vha, 0x0109,
@@ -2568,33 +2573,12 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
}
static void
-qla2x00_els_dcmd2_sp_free(void *data)
-{
- srb_t *sp = data;
- struct srb_iocb *elsio = &sp->u.iocb_cmd;
-
- if (elsio->u.els_plogi.els_plogi_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
- elsio->u.els_plogi.els_plogi_pyld,
- elsio->u.els_plogi.els_plogi_pyld_dma);
-
- if (elsio->u.els_plogi.els_resp_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
- elsio->u.els_plogi.els_resp_pyld,
- elsio->u.els_plogi.els_resp_pyld_dma);
-
- del_timer(&elsio->timer);
- qla2x00_rel_sp(sp);
-}
-
-static void
qla2x00_els_dcmd2_iocb_timeout(void *data)
{
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
- struct srb_iocb *lio = &sp->u.iocb_cmd;
unsigned long flags = 0;
int res;
@@ -2610,7 +2594,7 @@ qla2x00_els_dcmd2_iocb_timeout(void *data)
(res == QLA_SUCCESS) ? "successful" : "failed");
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- complete(&lio->u.els_plogi.comp);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
}
static void
@@ -2620,17 +2604,55 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res)
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = sp->vha;
+ struct event_arg ea;
+ struct qla_work_evt *e;
+
+ ql_dbg(ql_dbg_disc, vha, 0x3072,
+ "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
+ sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
- ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
- "%s ELS hdl=%x, portid=%06x done %8phC\n",
- sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
+ fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
+ del_timer(&sp->u.iocb_cmd.timer);
- complete(&lio->u.els_plogi.comp);
+ if (sp->flags & SRB_WAKEUP_ON_COMP)
+ complete(&lio->u.els_plogi.comp);
+ else {
+ if (res) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ } else {
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.rc = res;
+ ea.event = FCME_ELS_PLOGI_DONE;
+ qla2x00_fcport_event_handler(vha, &ea);
+ }
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
+ if (!e) {
+ struct srb_iocb *elsio = &sp->u.iocb_cmd;
+
+ if (elsio->u.els_plogi.els_plogi_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev,
+ elsio->u.els_plogi.tx_size,
+ elsio->u.els_plogi.els_plogi_pyld,
+ elsio->u.els_plogi.els_plogi_pyld_dma);
+
+ if (elsio->u.els_plogi.els_resp_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev,
+ elsio->u.els_plogi.rx_size,
+ elsio->u.els_plogi.els_resp_pyld,
+ elsio->u.els_plogi.els_resp_pyld_dma);
+ sp->free(sp);
+ return;
+ }
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+ }
}
int
qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
- fc_port_t *fcport, port_id_t remote_did)
+ fc_port_t *fcport, bool wait)
{
srb_t *sp;
struct srb_iocb *elsio = NULL;
@@ -2648,23 +2670,23 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
}
elsio = &sp->u.iocb_cmd;
- fcport->d_id.b.domain = remote_did.b.domain;
- fcport->d_id.b.area = remote_did.b.area;
- fcport->d_id.b.al_pa = remote_did.b.al_pa;
-
ql_dbg(ql_dbg_io, vha, 0x3073,
"Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
init_completion(&elsio->u.els_plogi.comp);
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+ if (wait)
+ sp->flags = SRB_WAKEUP_ON_COMP;
+
+ qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
sp->done = qla2x00_els_dcmd2_sp_done;
- sp->free = qla2x00_els_dcmd2_sp_free;
+ elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
ptr = elsio->u.els_plogi.els_plogi_pyld =
dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
@@ -2689,33 +2711,52 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
memset(ptr, 0, sizeof(struct els_plogi_payload));
memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
+ memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
+ &ha->plogi_els_payld.data,
+ sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
+
elsio->u.els_plogi.els_cmd = els_opcode;
elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
- qla24xx_get_port_login_templ(vha, ptr_dma + 4,
- &elsio->u.els_plogi.els_plogi_pyld->data[0],
- sizeof(struct els_plogi_payload));
- ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
- ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109,
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
- goto out;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x3074,
+ "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
+ sp->name, sp->handle, fcport->loop_id,
+ fcport->d_id.b24, vha->d_id.b24);
}
- ql_dbg(ql_dbg_io, vha, 0x3074,
- "%s PLOGI sent, hdl=%x, loopid=%x, portid=%06x\n",
- sp->name, sp->handle, fcport->loop_id, fcport->d_id.b24);
-
- wait_for_completion(&elsio->u.els_plogi.comp);
+ if (wait) {
+ wait_for_completion(&elsio->u.els_plogi.comp);
- if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
- rval = QLA_FUNCTION_FAILED;
+ if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ goto done;
+ }
out:
+ fcport->flags &= ~(FCF_ASYNC_SENT);
+ if (elsio->u.els_plogi.els_plogi_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev,
+ elsio->u.els_plogi.tx_size,
+ elsio->u.els_plogi.els_plogi_pyld,
+ elsio->u.els_plogi.els_plogi_pyld_dma);
+
+ if (elsio->u.els_plogi.els_resp_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev,
+ elsio->u.els_plogi.rx_size,
+ elsio->u.els_plogi.els_resp_pyld,
+ elsio->u.els_plogi.els_resp_pyld_dma);
+
sp->free(sp);
+done:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9fa5a2557f2c..36cbb29c84f6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -631,6 +631,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
unsigned long flags;
fc_port_t *fcport = NULL;
+ if (!vha->hw->flags.fw_started)
+ return;
+
/* Setup to process RIO completion. */
handle_cnt = 0;
if (IS_CNA_CAPABLE(ha))
@@ -908,7 +911,8 @@ skip_rio:
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 1);
+ if (!N2N_TOPO(ha))
+ qla2x00_mark_all_devices_lost(vha, 1);
}
if (vha->vp_idx) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7e875f575229..2c6c2cd5a0d0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -59,6 +59,7 @@ static struct rom_cmd {
{ MBC_IOCB_COMMAND_A64 },
{ MBC_GET_ADAPTER_LOOP_ID },
{ MBC_READ_SFP },
+ { MBC_GET_RNID_PARAMS },
};
static int is_rom_cmd(uint16_t cmd)
@@ -110,6 +111,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
unsigned long wait_time;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ u32 chip_reset;
ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
@@ -140,7 +142,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_SUCCESS;
abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
-
+ chip_reset = ha->chip_reset;
if (ha->flags.pci_channel_io_perm_failure) {
ql_log(ql_log_warn, vha, 0x1003,
@@ -167,6 +169,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ atomic_inc(&ha->num_pend_mbx_stage1);
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -177,8 +180,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_log(ql_log_warn, vha, 0xd035,
"Cmd access timeout, cmd=0x%x, Exiting.\n",
mcp->mb[0]);
+ atomic_dec(&ha->num_pend_mbx_stage1);
return QLA_FUNCTION_TIMEOUT;
}
+ atomic_dec(&ha->num_pend_mbx_stage1);
+ if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+ rval = QLA_ABORTED;
+ goto premature_exit;
+ }
ha->flags.mbox_busy = 1;
/* Save mailbox command for debug */
@@ -189,6 +198,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+ rval = QLA_ABORTED;
+ ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ goto premature_exit;
+ }
+
/* Load mailbox registers. */
if (IS_P3P_TYPE(ha))
optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
@@ -231,7 +247,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"jiffies=%lx.\n", jiffies);
/* Wait for mbx cmd completion until timeout */
-
+ atomic_inc(&ha->num_pend_mbx_stage2);
if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
@@ -241,6 +257,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -254,6 +271,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies;
+ atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -261,7 +279,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ } else if (ha->flags.purge_mbox ||
+ chip_reset != ha->chip_reset) {
+ ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
+ atomic_dec(&ha->num_pend_mbx_stage3);
+ rval = QLA_ABORTED;
+ goto premature_exit;
}
+ atomic_dec(&ha->num_pend_mbx_stage3);
+
if (time_after(jiffies, wait_time + 5 * HZ))
ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
command, jiffies_to_msecs(jiffies - wait_time));
@@ -275,6 +303,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -289,6 +318,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
while (!ha->flags.mbox_int) {
+ if (ha->flags.purge_mbox ||
+ chip_reset != ha->chip_reset) {
+ ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
+ rval = QLA_ABORTED;
+ goto premature_exit;
+ }
+
if (time_after(jiffies, wait_time))
break;
@@ -312,6 +349,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Waited %d sec.\n",
(uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
}
+ atomic_dec(&ha->num_pend_mbx_stage2);
/* Check whether we timed out */
if (ha->flags.mbox_int) {
@@ -390,7 +428,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Capture FW dump only, if PCI device active */
if (!pci_channel_offline(vha->hw->pdev)) {
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
- if (w == 0xffff || ictrl == 0xffffffff) {
+ if (w == 0xffff || ictrl == 0xffffffff ||
+ (chip_reset != ha->chip_reset)) {
/* This is special case if there is unload
* of driver happening and if PCI device go
* into bad state due to PCI error condition
@@ -497,7 +536,11 @@ premature_exit:
complete(&ha->mbx_cmd_comp);
mbx_done:
- if (rval) {
+ if (rval == QLA_ABORTED) {
+ ql_log(ql_log_info, vha, 0xd035,
+ "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
+ mcp->mb[0]);
+ } else if (rval) {
if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
dev_name(&ha->pdev->dev), 0x1020+0x800,
@@ -2177,7 +2220,10 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
mcp->out_mb = MBX_2|MBX_1|MBX_0;
} else if (IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
- mcp->mb[1] = BIT_6;
+ if (N2N_TOPO(vha->hw))
+ mcp->mb[1] = BIT_4; /* re-init */
+ else
+ mcp->mb[1] = BIT_6; /* LIP */
mcp->mb[2] = 0;
mcp->mb[3] = vha->hw->loop_reset_delay;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3797,30 +3843,68 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
"Format 1: WWPN %8phC.\n",
vha->port_name);
- /* N2N. direct connect */
- if (IS_QLA27XX(ha) &&
- ((rptid_entry->u.f1.flags>>1) & 0x7) == 2) {
- /* if our portname is higher then initiate N2N login */
- if (wwn_to_u64(vha->port_name) >
- wwn_to_u64(rptid_entry->u.f1.port_name)) {
- // ??? qlt_update_host_map(vha, id);
- vha->n2n_id = 0x1;
- ql_dbg(ql_dbg_async, vha, 0x5075,
- "Format 1: Setting n2n_update_needed for id %d\n",
- vha->n2n_id);
+ switch (rptid_entry->u.f1.flags & TOPO_MASK) {
+ case TOPO_N2N:
+ ha->current_topology = ISP_CFG_N;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ rptid_entry->u.f1.port_name, 1);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (fcport) {
+ fcport->plogi_nack_done_deadline = jiffies + HZ;
+ fcport->dm_login_expire = jiffies + 3*HZ;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ set_bit(RELOGIN_NEEDED,
+ &vha->dpc_flags);
+ break;
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
} else {
- ql_dbg(ql_dbg_async, vha, 0x5075,
- "Format 1: Remote login - Waiting for WWPN %8phC.\n",
- rptid_entry->u.f1.port_name);
+ id.b24 = 0;
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(rptid_entry->u.f1.port_name)) {
+ vha->d_id.b24 = 0;
+ vha->d_id.b.al_pa = 1;
+ ha->flags.n2n_bigger = 1;
+
+ id.b.al_pa = 2;
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: assign local id %x remote id %x\n",
+ vha->d_id.b24, id.b24);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: Remote login - Waiting for WWPN %8phC.\n",
+ rptid_entry->u.f1.port_name);
+ ha->flags.n2n_bigger = 0;
+ }
+ qla24xx_post_newsess_work(vha, &id,
+ rptid_entry->u.f1.port_name,
+ rptid_entry->u.f1.node_name,
+ NULL,
+ FC4_TYPE_UNKNOWN);
}
- memcpy(vha->n2n_port_name, rptid_entry->u.f1.port_name,
- WWN_SIZE);
+ /* if our portname is higher then initiate N2N login */
+
set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
- set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
- set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
ha->flags.n2n_ae = 1;
return;
+ break;
+ case TOPO_FL:
+ ha->current_topology = ISP_CFG_FL;
+ break;
+ case TOPO_F:
+ ha->current_topology = ISP_CFG_F;
+ break;
+ default:
+ break;
}
ha->flags.gpsc_supported = 1;
@@ -3909,30 +3993,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->u.f2.port_name, 1);
if (fcport) {
+ fcport->login_retry = vha->hw->login_retry_count;
fcport->plogi_nack_done_deadline = jiffies + HZ;
fcport->scan_state = QLA_FCPORT_FOUND;
- switch (fcport->disc_state) {
- case DSC_DELETED:
- ql_dbg(ql_dbg_disc, vha, 0x210d,
- "%s %d %8phC login\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
- break;
- case DSC_DELETE_PEND:
- break;
- default:
- qlt_schedule_sess_for_deletion(fcport);
- break;
- }
- } else {
- id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0];
- id.b.area = rptid_entry->u.f2.remote_nport_id[1];
- id.b.domain = rptid_entry->u.f2.remote_nport_id[2];
- qla24xx_post_newsess_work(vha, &id,
- rptid_entry->u.f2.port_name,
- rptid_entry->u.f2.node_name,
- NULL,
- FC4_TYPE_UNKNOWN);
}
}
}
@@ -4220,6 +4283,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
+ if (!ha->flags.fw_started)
+ return QLA_SUCCESS;
+
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
"Entered %s.\n", __func__);
@@ -4289,6 +4355,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
+ if (!ha->flags.fw_started)
+ return QLA_SUCCESS;
+
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
"Entered %s.\n", __func__);
@@ -4657,7 +4726,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
"Done %s.\n", __func__);
bp = (uint32_t *) buf;
for (i = 0; i < (bufsiz-4)/4; i++, bp++)
- *bp = cpu_to_be32(*bp);
+ *bp = le32_to_cpu(*bp);
}
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f6f0a759a7c2..d620f4bebcd0 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -152,11 +152,18 @@ int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
unsigned long flags;
- int ret;
+ int ret = QLA_SUCCESS;
+ fc_port_t *fcport;
+
+ if (vha->hw->flags.fw_started)
+ ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
- ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->logout_on_delete = 0;
+
+ qla2x00_mark_all_devices_lost(vha, 0);
/* Remove port id from vp target map */
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
@@ -485,7 +492,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
"Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
- vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
+ vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
vha->dpc_flags = 0L;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index c5a963c2c86e..20d9dc39f0fb 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -30,6 +30,9 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return 0;
}
+ if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
+ return 0;
+
if (!(fcport->nvme_prli_service_param &
(NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
(fcport->nvme_flag & NVME_FLAG_REGISTERED))
@@ -676,15 +679,15 @@ void qla_nvme_delete(struct scsi_qla_host *vha)
}
}
-void qla_nvme_register_hba(struct scsi_qla_host *vha)
+int qla_nvme_register_hba(struct scsi_qla_host *vha)
{
struct nvme_fc_port_template *tmpl;
struct qla_hw_data *ha;
struct nvme_fc_port_info pinfo;
- int ret;
+ int ret = EINVAL;
if (!IS_ENABLED(CONFIG_NVME_FC))
- return;
+ return ret;
ha = vha->hw;
tmpl = &qla_nvme_fc_transport;
@@ -711,7 +714,9 @@ void qla_nvme_register_hba(struct scsi_qla_host *vha)
if (ret) {
ql_log(ql_log_warn, vha, 0xffff,
"register_localport failed: ret=%x\n", ret);
- return;
+ } else {
+ vha->nvme_local_port->private = vha;
}
- vha->nvme_local_port->private = vha;
+
+ return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index 816854ada654..4941d107fb1c 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -142,7 +142,7 @@ struct pt_ls4_rx_unsol {
/*
* Global functions prototype in qla_nvme.c source file.
*/
-void qla_nvme_register_hba(struct scsi_qla_host *);
+int qla_nvme_register_hba(struct scsi_qla_host *);
int qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *);
void qla_nvme_delete(struct scsi_qla_host *);
void qla_nvme_abort(struct qla_hw_data *, struct srb *sp, int res);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e881fce7477a..42b8f0d3e580 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -303,6 +303,7 @@ static void qla2x00_free_device(scsi_qla_host_t *);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
+
struct scsi_host_template qla2xxx_driver_template = {
.module = THIS_MODULE,
.name = QLA2XXX_DRIVER_NAME,
@@ -1147,7 +1148,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
* qla2x00_wait_for_sess_deletion can only be called from remove_one.
* it has dependency on UNLOADING flag to stop device discovery
*/
-static void
+void
qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
{
qla2x00_mark_all_devices_lost(vha, 0);
@@ -2815,6 +2816,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
ha->max_exchg = FW_MAX_EXCHANGES_CNT;
+ atomic_set(&ha->num_pend_mbx_stage1, 0);
+ atomic_set(&ha->num_pend_mbx_stage2, 0);
+ atomic_set(&ha->num_pend_mbx_stage3, 0);
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -3045,7 +3049,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
if (IS_QLA2XXX_MIDTYPE(ha))
- base_vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
+ base_vha->mgmt_svr_loop_id =
+ qla2x00_reserve_mgmt_server_loop_id(base_vha);
else
base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
base_vha->vp_idx;
@@ -3180,6 +3185,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
+ ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+
if (ha->isp_ops->initialize_adapter(base_vha)) {
ql_log(ql_log_fatal, base_vha, 0x00d6,
"Failed to initialize adapter - Adapter flags %x.\n",
@@ -3216,8 +3223,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
- ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
-
if (ha->mqenable) {
bool mq = false;
bool startit = false;
@@ -3603,6 +3608,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
base_vha = pci_get_drvdata(pdev);
ha = base_vha->hw;
+ ql_log(ql_log_info, base_vha, 0xb079,
+ "Removing driver\n");
/* Indicate device removal to prevent future board_disable and wait
* until any pending board_disable has completed. */
@@ -3625,6 +3632,21 @@ qla2x00_remove_one(struct pci_dev *pdev)
}
qla2x00_wait_for_hba_ready(base_vha);
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(base_vha);
+ } else if (!IS_QLAFX00(ha)) {
+ if (IS_QLA8031(ha)) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
+ "Clearing fcoe driver presence.\n");
+ if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
+ "Error while clearing DRV-Presence.\n");
+ }
+
+ qla2x00_try_to_stop_firmware(base_vha);
+ }
+
qla2x00_wait_for_sess_deletion(base_vha);
/*
@@ -3648,14 +3670,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_delete_all_vps(ha, base_vha);
- if (IS_QLA8031(ha)) {
- ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
- "Clearing fcoe driver presence.\n");
- if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
- ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
- "Error while clearing DRV-Presence.\n");
- }
-
qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
qla2x00_dfs_remove(base_vha);
@@ -3715,24 +3729,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
qla2x00_stop_timer(vha);
qla25xx_delete_queues(vha);
-
- if (ha->flags.fce_enabled)
- qla2x00_disable_fce_trace(vha, NULL, NULL);
-
- if (ha->eft)
- qla2x00_disable_eft_trace(vha);
-
- if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
- if (ha->flags.fw_started)
- qla2x00_abort_isp_cleanup(vha);
- } else {
- if (ha->flags.fw_started) {
- /* Stop currently executing firmware. */
- qla2x00_try_to_stop_firmware(vha);
- ha->flags.fw_started = 0;
- }
- }
-
vha->flags.online = 0;
/* turn-off interrupts on the card */
@@ -3838,14 +3834,6 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
return;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
-
- if (fcport->login_retry == 0) {
- fcport->login_retry = vha->hw->login_retry_count;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a3,
- "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
- fcport->port_name, fcport->loop_id, fcport->login_retry);
- }
}
/*
@@ -4793,7 +4781,6 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
struct qlt_plogi_ack_t *pla =
(struct qlt_plogi_ack_t *)e->u.new_sess.pla;
uint8_t free_fcport = 0;
- u64 wwn;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s %d %8phC enter\n",
@@ -4821,10 +4808,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
+ if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
fcport->fc4_type = FC4_TYPE_FCP_SCSI;
- if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
+ if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
fcport->fc4_type = FC4_TYPE_OTHER;
fcport->fc4f_nvme = FC4_TYPE_NVME;
}
@@ -4866,9 +4853,6 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
if (fcport) {
- if (N2N_TOPO(vha->hw))
- fcport->flags &= ~FCF_FABRIC_DEVICE;
-
fcport->id_changed = 1;
fcport->scan_state = QLA_FCPORT_FOUND;
memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
@@ -4929,12 +4913,22 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
if (dfcp)
qlt_schedule_sess_for_deletion(tfcp);
- wwn = wwn_to_u64(fcport->node_name);
- if (!wwn)
- qla24xx_async_gnnid(vha, fcport);
- else
- qla24xx_async_gnl(vha, fcport);
+ if (N2N_TOPO(vha->hw))
+ fcport->flags &= ~FCF_FABRIC_DEVICE;
+
+ if (N2N_TOPO(vha->hw)) {
+ if (vha->flags.nvme_enabled) {
+ fcport->fc4f_nvme = 1;
+ fcport->n2n_flag = 1;
+ }
+ fcport->fw_login_state = 0;
+ /*
+ * wait link init done before sending login
+ */
+ } else {
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
}
}
@@ -5069,6 +5063,10 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_IIDMA:
qla_do_iidma_work(vha, e->u.fcport.fcport);
break;
+ case QLA_EVT_ELS_PLOGI:
+ qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
+ e->u.fcport.fcport, false);
+ break;
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@@ -5098,7 +5096,7 @@ int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
void qla2x00_relogin(struct scsi_qla_host *vha)
{
fc_port_t *fcport;
- int status;
+ int status, relogin_needed = 0;
struct event_arg ea;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -5107,47 +5105,59 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
* to it if we haven't run out of retries.
*/
if (atomic_read(&fcport->state) != FCS_ONLINE &&
- fcport->login_retry &&
- !(fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE))) {
- if (vha->hw->current_topology != ISP_CFG_NL) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
- "%s %8phC DS %d LS %d\n", __func__,
- fcport->port_name, fcport->disc_state,
- fcport->fw_login_state);
- memset(&ea, 0, sizeof(ea));
- ea.event = FCME_RELOGIN;
- ea.fcport = fcport;
- qla2x00_fcport_event_handler(vha, &ea);
- } else if (vha->hw->current_topology == ISP_CFG_NL) {
- fcport->login_retry--;
- status = qla2x00_local_device_login(vha,
- fcport);
- if (status == QLA_SUCCESS) {
- fcport->old_loop_id = fcport->loop_id;
- ql_dbg(ql_dbg_disc, vha, 0x2003,
- "Port login OK: logged in ID 0x%x.\n",
- fcport->loop_id);
- qla2x00_update_fcport(vha, fcport);
- } else if (status == 1) {
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- /* retry the login again */
- ql_dbg(ql_dbg_disc, vha, 0x2007,
- "Retrying %d login again loop_id 0x%x.\n",
- fcport->login_retry,
- fcport->loop_id);
- } else {
- fcport->login_retry = 0;
- }
+ fcport->login_retry) {
+ if (fcport->scan_state != QLA_FCPORT_FOUND ||
+ fcport->disc_state == DSC_LOGIN_COMPLETE)
+ continue;
+
+ if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) ||
+ fcport->disc_state == DSC_DELETE_PEND) {
+ relogin_needed = 1;
+ } else {
+ if (vha->hw->current_topology != ISP_CFG_NL) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_RELOGIN;
+ ea.fcport = fcport;
+ qla2x00_fcport_event_handler(vha, &ea);
+ } else if (vha->hw->current_topology ==
+ ISP_CFG_NL) {
+ fcport->login_retry--;
+ status =
+ qla2x00_local_device_login(vha,
+ fcport);
+ if (status == QLA_SUCCESS) {
+ fcport->old_loop_id =
+ fcport->loop_id;
+ ql_dbg(ql_dbg_disc, vha, 0x2003,
+ "Port login OK: logged in ID 0x%x.\n",
+ fcport->loop_id);
+ qla2x00_update_fcport
+ (vha, fcport);
+ } else if (status == 1) {
+ set_bit(RELOGIN_NEEDED,
+ &vha->dpc_flags);
+ /* retry the login again */
+ ql_dbg(ql_dbg_disc, vha, 0x2007,
+ "Retrying %d login again loop_id 0x%x.\n",
+ fcport->login_retry,
+ fcport->loop_id);
+ } else {
+ fcport->login_retry = 0;
+ }
- if (fcport->login_retry == 0 &&
- status != QLA_SUCCESS)
- qla2x00_clear_loop_id(fcport);
+ if (fcport->login_retry == 0 &&
+ status != QLA_SUCCESS)
+ qla2x00_clear_loop_id(fcport);
+ }
}
}
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
}
+ if (relogin_needed)
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
ql_dbg(ql_dbg_disc, vha, 0x400e,
"Relogin end.\n");
}
@@ -6028,8 +6038,9 @@ qla2x00_do_dpc(void *data)
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
}
- if (test_and_clear_bit(ISP_ABORT_NEEDED,
- &base_vha->dpc_flags)) {
+ if (test_and_clear_bit
+ (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
+ !test_bit(UNLOADING, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
"ISP abort scheduled.\n");
@@ -6186,6 +6197,11 @@ intr_on_check:
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
+ if (test_and_clear_bit(N2N_LINK_RESET,
+ &base_vha->dpc_flags)) {
+ qla2x00_lip_reset(base_vha);
+ }
+
ha->dpc_active = 0;
end_loop:
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 04458eb19d38..4499c787165f 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1880,6 +1880,9 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
if (IS_P3P_TYPE(ha))
return QLA_SUCCESS;
+ if (!ha->flags.fw_started)
+ return QLA_SUCCESS;
+
ha->beacon_blink_led = 0;
if (IS_QLA2031(ha) || IS_QLA27XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 0fea2e2326be..8c811b251d42 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -805,6 +805,10 @@ qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
list_for_each_entry(pla, &vha->plogi_ack_list, list) {
if (pla->id.b24 == id->b24) {
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
+ "%s %d %8phC Term INOT due to new INOT",
+ __func__, __LINE__,
+ pla->iocb.u.isp24.port_name);
qlt_send_term_imm_notif(vha, &pla->iocb, 1);
memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
return pla;
@@ -982,8 +986,9 @@ void qlt_free_session_done(struct work_struct *work)
logo.id = sess->d_id;
logo.cmd_count = 0;
+ if (!own)
+ qlt_send_first_logo(vha, &logo);
sess->send_els_logo = 0;
- qlt_send_first_logo(vha, &logo);
}
if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
@@ -1053,7 +1058,6 @@ void qlt_free_session_done(struct work_struct *work)
sess->disc_state = DSC_DELETED;
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
sess->deleted = QLA_SESS_DELETED;
- sess->login_retry = vha->hw->login_retry_count;
if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
vha->fcport_count--;
@@ -1073,6 +1077,7 @@ void qlt_free_session_done(struct work_struct *work)
struct qlt_plogi_ack_t *con =
sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
struct imm_ntfy_from_isp *iocb;
+ own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
if (con) {
iocb = &con->iocb;
@@ -1156,7 +1161,7 @@ void qlt_unreg_sess(struct fc_port *sess)
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- qla2x00_mark_device_lost(vha, sess, 1, 1);
+ qla2x00_mark_device_lost(vha, sess, 0, 0);
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
sess->disc_state = DSC_DELETE_PEND;
@@ -1224,7 +1229,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
- struct qla_hw_data *ha = sess->vha->hw;
unsigned long flags;
if (sess->disc_state == DSC_DELETE_PEND)
@@ -1241,16 +1245,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
return;
}
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (sess->deleted == QLA_SESS_DELETED)
sess->logout_on_delete = 0;
+ spin_lock_irqsave(&sess->vha->work_lock, flags);
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
return;
}
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
sess->disc_state = DSC_DELETE_PEND;
@@ -3783,7 +3787,7 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
return;
}
cmd->jiffies_at_free = get_jiffies_64();
- percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ target_free_tag(sess->se_sess, &cmd->se_cmd);
}
EXPORT_SYMBOL(qlt_free_cmd);
@@ -4146,7 +4150,7 @@ out_term:
qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
qlt_decr_num_pend_cmds(vha);
- percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ target_free_tag(sess->se_sess, &cmd->se_cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4277,9 +4281,9 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
{
struct se_session *se_sess = sess->se_sess;
struct qla_tgt_cmd *cmd;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return NULL;
@@ -4292,6 +4296,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
qlt_incr_num_pend_cmds(vha);
cmd->vha = vha;
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->sess = sess;
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
@@ -4715,6 +4720,10 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
if (!pla) {
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s %d %8phC Term INOT due to mem alloc fail",
+ __func__, __LINE__,
+ iocb->u.isp24.port_name);
qlt_send_term_imm_notif(vha, iocb, 1);
goto out;
}
@@ -5294,7 +5303,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct fc_port *sess;
struct se_session *se_sess;
struct qla_tgt_cmd *cmd;
- int tag;
+ int tag, cpu;
unsigned long flags;
if (unlikely(tgt->tgt_stop)) {
@@ -5326,7 +5335,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
se_sess = sess->se_sess;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return;
@@ -5357,6 +5366,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
cmd->reset_count = ha->base_qpair->chip_reset;
cmd->q_full = 1;
cmd->qpair = ha->base_qpair;
+ cmd->se_cmd.map_cpu = cpu;
if (qfull) {
cmd->q_full = 1;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 731ca0d8520a..0ccd06f11f12 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -571,6 +571,15 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
}
break;
+ case T268_BUF_TYPE_REQ_MIRROR:
+ case T268_BUF_TYPE_RSP_MIRROR:
+ /*
+ * Mirror pointers are not implemented in the
+ * driver, instead shadow pointers are used by
+ * the drier. Skip these entries.
+ */
+ qla27xx_skip_entry(ent, buf);
+ break;
default:
ql_dbg(ql_dbg_async, vha, 0xd02b,
"%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
@@ -1028,8 +1037,10 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
ql_log(ql_log_warn, vha, 0xd300,
"Firmware has been previously dumped (%p),"
" -- ignoring request\n", vha->hw->fw_dump);
- else
+ else {
+ QLA_FW_STOPPED(vha->hw);
qla27xx_execute_fwdt_template(vha);
+ }
#ifndef __CHECKER__
if (!hardware_locked)
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 1ad7582220c3..3850b28518e5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.07-k"
+#define QLA2XXX_VERSION "10.00.00.08-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 7732e9336d43..e03d12a5f986 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1049,10 +1049,8 @@ static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
NULL,
};
-static struct se_portal_group *tcm_qla2xxx_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *tcm_qla2xxx_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
@@ -1171,10 +1169,8 @@ static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
NULL,
};
-static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
@@ -1465,8 +1461,7 @@ static void tcm_qla2xxx_free_session(struct fc_port *sess)
}
target_wait_for_sess_cmds(se_sess);
- transport_deregister_session_configfs(sess->se_sess);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(se_sess);
}
static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
@@ -1543,7 +1538,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
* Locate our struct se_node_acl either from an explict NodeACL created
* via ConfigFS, or via running in TPG demo mode.
*/
- se_sess = target_alloc_session(&tpg->se_tpg, num_tags,
+ se_sess = target_setup_session(&tpg->se_tpg, num_tags,
sizeof(struct qla_tgt_cmd),
TARGET_PROT_ALL, port_name,
qlat_sess, tcm_qla2xxx_session_cb);
@@ -1624,9 +1619,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
sess->conf_compl_supported = conf_compl_supported;
- /* Reset logout parameters to default */
- sess->logout_on_delete = 1;
- sess->keep_nport_handle = 0;
}
/*
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 8578e566ab41..9d09228eee28 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -959,7 +959,7 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int
/* Temporary workaround until bug is found and fixed (one bug has been found
already, but fixing it makes things even worse) -jj */
int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64;
- host->can_queue = atomic_read(&host->host_busy) + num_free;
+ host->can_queue = scsi_host_busy(host) + num_free;
host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 4c60c260c5da..fc1356d101b0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -162,12 +162,12 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
(level > 1)) {
scsi_print_result(cmd, "Done", disposition);
scsi_print_command(cmd);
- if (status_byte(cmd->result) & CHECK_CONDITION)
+ if (status_byte(cmd->result) == CHECK_CONDITION)
scsi_print_sense(cmd);
if (level > 3)
scmd_printk(KERN_INFO, cmd,
"scsi host busy %d failed %d\n",
- atomic_read(&cmd->device->host->host_busy),
+ scsi_host_busy(cmd->device->host),
cmd->device->host->host_failed);
}
}
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index 6dcc4c685d1d..4fd75a3aff66 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -43,7 +43,4 @@ struct scsi_device;
struct scsi_target;
struct scatterlist;
-/* obsolete typedef junk. */
-#include "scsi_typedefs.h"
-
#endif /* _SCSI_H */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 24d7496cd9e2..60bcc6df97a9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -164,29 +164,29 @@ static const char *sdebug_version_date = "20180128";
#define SDEBUG_OPT_RESET_NOISE 0x2000
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
#define SDEBUG_OPT_HOST_BUSY 0x8000
+#define SDEBUG_OPT_CMD_ABORT 0x10000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_TRANSPORT_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
SDEBUG_OPT_SHORT_TRANSFER | \
- SDEBUG_OPT_HOST_BUSY)
+ SDEBUG_OPT_HOST_BUSY | \
+ SDEBUG_OPT_CMD_ABORT)
/* When "every_nth" > 0 then modulo "every_nth" commands:
* - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
* commands if SDEBUG_OPT_RECOVERED_ERR is set.
* - a TRANSPORT_ERROR is simulated on successful read and write
* commands if SDEBUG_OPT_TRANSPORT_ERR is set.
+ * - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
+ * CMD_ABORT
*
- * When "every_nth" < 0 then after "- every_nth" commands:
- * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
- * - a RECOVERED_ERROR is simulated on successful read and write
- * commands if SDEBUG_OPT_RECOVERED_ERR is set.
- * - a TRANSPORT_ERROR is simulated on successful read and write
- * commands if _DEBUG_OPT_TRANSPORT_ERR is set.
- * This will continue on every subsequent command until some other action
- * occurs (e.g. the user * writing a new value (other than -1 or 1) to
- * every_nth via sysfs).
+ * When "every_nth" < 0 then after "- every_nth" commands the selected
+ * error will be injected. The error will be injected on every subsequent
+ * command until some other action occurs; for example, the user writing
+ * a new value (other than -1 or 1) to every_nth:
+ * echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
*/
/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
@@ -281,6 +281,7 @@ struct sdebug_defer {
int issuing_cpu;
bool init_hrt;
bool init_wq;
+ bool aborted; /* true when blk_abort_request() already called */
enum sdeb_defer_type defer_t;
};
@@ -296,6 +297,7 @@ struct sdebug_queued_cmd {
unsigned int inj_dix:1;
unsigned int inj_short:1;
unsigned int inj_host_busy:1;
+ unsigned int inj_cmd_abort:1;
};
struct sdebug_queue {
@@ -3792,6 +3794,7 @@ static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
/* Queued (deferred) command completions converge here. */
static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{
+ bool aborted = sd_dp->aborted;
int qc_idx;
int retiring = 0;
unsigned long iflags;
@@ -3801,6 +3804,8 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
struct sdebug_dev_info *devip;
sd_dp->defer_t = SDEB_DEFER_NONE;
+ if (unlikely(aborted))
+ sd_dp->aborted = false;
qc_idx = sd_dp->qc_idx;
sqp = sdebug_q_arr + sd_dp->sqa_idx;
if (sdebug_statistics) {
@@ -3852,6 +3857,11 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
atomic_set(&retired_max_queue, k + 1);
}
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ if (unlikely(aborted)) {
+ if (sdebug_verbose)
+ pr_info("bypassing scsi_done() due to aborted cmd\n");
+ return;
+ }
scp->scsi_done(scp); /* callback to mid level */
}
@@ -4312,7 +4322,8 @@ static void setup_inject(struct sdebug_queue *sqp,
if (sdebug_every_nth > 0)
sqcp->inj_recovered = sqcp->inj_transport
= sqcp->inj_dif
- = sqcp->inj_dix = sqcp->inj_short = 0;
+ = sqcp->inj_dix = sqcp->inj_short
+ = sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
return;
}
sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
@@ -4321,6 +4332,7 @@ static void setup_inject(struct sdebug_queue *sqp,
sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
+ sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
}
/* Complete the processing of the thread that queued a SCSI command to this
@@ -4458,7 +4470,14 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
sd_dp->defer_t = SDEB_DEFER_WQ;
+ if (unlikely(sqcp->inj_cmd_abort))
+ sd_dp->aborted = true;
schedule_work(&sd_dp->ew.work);
+ if (unlikely(sqcp->inj_cmd_abort)) {
+ sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
+ cmnd->request->tag);
+ blk_abort_request(cmnd->request);
+ }
}
if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
(scsi_result == device_qfull_result)))
@@ -4844,12 +4863,11 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
(unsigned long)sdebug_dev_size_mb *
1048576;
- fake_storep = vmalloc(sz);
+ fake_storep = vzalloc(sz);
if (NULL == fake_storep) {
pr_err("out of memory, 9\n");
return -ENOMEM;
}
- memset(fake_storep, 0, sz);
}
sdebug_fake_rw = n;
}
@@ -5391,13 +5409,12 @@ static int __init scsi_debug_init(void)
}
if (sdebug_fake_rw == 0) {
- fake_storep = vmalloc(sz);
+ fake_storep = vzalloc(sz);
if (NULL == fake_storep) {
pr_err("out of memory, 1\n");
ret = -ENOMEM;
goto free_q_arr;
}
- memset(fake_storep, 0, sz);
if (sdebug_num_parts > 0)
sdebug_build_parts(fake_storep, sz);
}
@@ -5507,9 +5524,9 @@ static void __exit scsi_debug_exit(void)
int k = sdebug_add_host;
stop_all_queued();
- free_all_queued();
for (; k; k--)
sdebug_remove_adapter();
+ free_all_queued();
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
@@ -5790,11 +5807,13 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
fini:
if (F_DELAY_OVERR & flags)
return schedule_resp(scp, devip, errsts, pfp, 0, 0);
- else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
+ else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
+ sdebug_ndelay > 10000)) {
/*
- * If any delay is active, for F_SSU_DELAY want at least 1
- * second and if sdebug_jdelay>0 want a long delay of that
- * many seconds; for F_SYNC_DELAY want 1/20 of that.
+ * Skip long delays if ndelay <= 10 microseconds. Otherwise
+ * for Start Stop Unit (SSU) want at least 1 second delay and
+ * if sdebug_jdelay>1 want a long delay of that many seconds.
+ * For Synchronize Cache want 1/20 of SSU's delay.
*/
int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 8932ae81a15a..b7a8fdfeb2f4 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -66,7 +66,7 @@ void scsi_eh_wakeup(struct Scsi_Host *shost)
{
lockdep_assert_held(shost->host_lock);
- if (atomic_read(&shost->host_busy) == shost->host_failed) {
+ if (scsi_host_busy(shost) == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
@@ -296,6 +296,20 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
rtn = host->hostt->eh_timed_out(scmd);
if (rtn == BLK_EH_DONE) {
+ /*
+ * For blk-mq, we must set the request state to complete now
+ * before sending the request to the scsi error handler. This
+ * will prevent a use-after-free in the event the LLD manages
+ * to complete the request before the error handler finishes
+ * processing this timed out request.
+ *
+ * If the request was already completed, then the LLD beat the
+ * time out handler from transferring the request to the scsi
+ * error handler. In that case we can return immediately as no
+ * further action is required.
+ */
+ if (req->q->mq_ops && !blk_mq_mark_complete(req))
+ return rtn;
if (scsi_abort_command(scmd) != SUCCESS) {
set_host_byte(scmd, DID_TIME_OUT);
scsi_eh_scmd_add(scmd);
@@ -2155,7 +2169,7 @@ int scsi_error_handler(void *data)
break;
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
- shost->host_failed != atomic_read(&shost->host_busy)) {
+ shost->host_failed != scsi_host_busy(shost)) {
SCSI_LOG_ERROR_RECOVERY(1,
shost_printk(KERN_INFO, shost,
"scsi_eh_%d: sleeping\n",
@@ -2170,7 +2184,7 @@ int scsi_error_handler(void *data)
"scsi_eh_%d: waking up %d/%d/%d\n",
shost->host_no, shost->host_eh_scheduled,
shost->host_failed,
- atomic_read(&shost->host_busy)));
+ scsi_host_busy(shost)));
/*
* We have a host that is failing for some reason. Figure out
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 0a875491f5a7..cc30fccc1a2e 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -100,8 +100,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
"Ioctl returned 0x%x\n", result));
- if ((driver_byte(result) & DRIVER_SENSE) &&
- (scsi_sense_valid(&sshdr))) {
+ if (driver_byte(result) == DRIVER_SENSE &&
+ scsi_sense_valid(&sshdr)) {
switch (sshdr.sense_key) {
case ILLEGAL_REQUEST:
if (cmd[0] == ALLOW_MEDIUM_REMOVAL)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 41e9ac9fc138..0adfb3bce0fd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -238,7 +238,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
/**
- * scsi_execute - insert request and wait for the result
+ * __scsi_execute - insert request and wait for the result
* @sdev: scsi device
* @cmd: scsi command
* @data_direction: data direction
@@ -255,7 +255,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* Returns the scsi_cmnd result field if a command was executed, or a negative
* Linux error code if we didn't get that far.
*/
-int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, struct scsi_sense_hdr *sshdr,
int timeout, int retries, u64 flags, req_flags_t rq_flags,
@@ -309,7 +309,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
return ret;
}
-EXPORT_SYMBOL(scsi_execute);
+EXPORT_SYMBOL(__scsi_execute);
/*
* Function: scsi_init_cmd_errh()
@@ -345,7 +345,8 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
unsigned long flags;
rcu_read_lock();
- atomic_dec(&shost->host_busy);
+ if (!shost->use_blk_mq)
+ atomic_dec(&shost->host_busy);
if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled)
@@ -371,7 +372,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
static void scsi_kick_queue(struct request_queue *q)
{
if (q->mq_ops)
- blk_mq_start_hw_queues(q);
+ blk_mq_run_hw_queues(q, false);
else
blk_run_queue(q);
}
@@ -444,7 +445,12 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
- if (shost->can_queue > 0 &&
+ /*
+ * blk-mq can handle host queue busy efficiently via host-wide driver
+ * tag allocation
+ */
+
+ if (!shost->use_blk_mq && shost->can_queue > 0 &&
atomic_read(&shost->host_busy) >= shost->can_queue)
return true;
if (atomic_read(&shost->host_blocked) > 0)
@@ -662,6 +668,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
cmd->request->next_rq->special = NULL;
}
+/* Returns false when no more bytes to process, true if there are more */
static bool scsi_end_request(struct request *req, blk_status_t error,
unsigned int bytes, unsigned int bidi_bytes)
{
@@ -760,161 +767,39 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
}
}
-/*
- * Function: scsi_io_completion()
- *
- * Purpose: Completion processing for block device I/O requests.
- *
- * Arguments: cmd - command that is finished.
- *
- * Lock status: Assumed that no lock is held upon entry.
- *
- * Returns: Nothing
- *
- * Notes: We will finish off the specified number of sectors. If we
- * are done, the command block will be released and the queue
- * function will be goosed. If we are not done then we have to
- * figure out what to do next:
- *
- * a) We can call scsi_requeue_command(). The request
- * will be unprepared and put back on the queue. Then
- * a new command will be created for it. This should
- * be used if we made forward progress, or if we want
- * to switch from READ(10) to READ(6) for example.
- *
- * b) We can call __scsi_queue_insert(). The request will
- * be put back on the queue and retried using the same
- * command as before, possibly after a delay.
- *
- * c) We can call scsi_end_request() with -EIO to fail
- * the remainder of the request.
- */
-void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+/* Helper for scsi_io_completion() when "reprep" action required. */
+static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
+ struct request_queue *q)
+{
+ /* A new command will be prepared and issued. */
+ if (q->mq_ops) {
+ scsi_mq_requeue_cmd(cmd);
+ } else {
+ /* Unprep request and put it back at head of the queue. */
+ scsi_release_buffers(cmd);
+ scsi_requeue_command(q, cmd);
+ }
+}
+
+/* Helper for scsi_io_completion() when special action required. */
+static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
- int result = cmd->result;
struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
- blk_status_t error = BLK_STS_OK;
- struct scsi_sense_hdr sshdr;
- bool sense_valid = false;
- int sense_deferred = 0, level = 0;
+ int level = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
+ struct scsi_sense_hdr sshdr;
+ bool sense_valid;
+ bool sense_current = true; /* false implies "deferred sense" */
+ blk_status_t blk_stat;
- if (result) {
- sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
- if (sense_valid)
- sense_deferred = scsi_sense_is_deferred(&sshdr);
- }
-
- if (blk_rq_is_passthrough(req)) {
- if (result) {
- if (sense_valid) {
- /*
- * SG_IO wants current and deferred errors
- */
- scsi_req(req)->sense_len =
- min(8 + cmd->sense_buffer[7],
- SCSI_SENSE_BUFFERSIZE);
- }
- if (!sense_deferred)
- error = scsi_result_to_blk_status(cmd, result);
- }
- /*
- * scsi_result_to_blk_status may have reset the host_byte
- */
- scsi_req(req)->result = cmd->result;
- scsi_req(req)->resid_len = scsi_get_resid(cmd);
-
- if (scsi_bidi_cmnd(cmd)) {
- /*
- * Bidi commands Must be complete as a whole,
- * both sides at once.
- */
- scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
- if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
- blk_rq_bytes(req->next_rq)))
- BUG();
- return;
- }
- } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
- /*
- * Flush commands do not transfers any data, and thus cannot use
- * good_bytes != blk_rq_bytes(req) as the signal for an error.
- * This sets the error explicitly for the problem case.
- */
- error = scsi_result_to_blk_status(cmd, result);
- }
-
- /* no bidi support for !blk_rq_is_passthrough yet */
- BUG_ON(blk_bidi_rq(req));
-
- /*
- * Next deal with any sectors which we were able to correctly
- * handle.
- */
- SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
- "%u sectors total, %d bytes done.\n",
- blk_rq_sectors(req), good_bytes));
-
- /*
- * Recovered errors need reporting, but they're always treated as
- * success, so fiddle the result code here. For passthrough requests
- * we already took a copy of the original into sreq->result which
- * is what gets returned to the user
- */
- if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
- /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
- * print since caller wants ATA registers. Only occurs on
- * SCSI ATA PASS_THROUGH commands when CK_COND=1
- */
- if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
- ;
- else if (!(req->rq_flags & RQF_QUIET))
- scsi_print_sense(cmd);
- result = 0;
- /* for passthrough error may be set */
- error = BLK_STS_OK;
- }
- /*
- * Another corner case: the SCSI status byte is non-zero but 'good'.
- * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
- * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
- * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
- * intermediate statuses (both obsolete in SAM-4) as good.
- */
- if (status_byte(result) && scsi_status_is_good(result)) {
- result = 0;
- error = BLK_STS_OK;
- }
-
- /*
- * special case: failed zero length commands always need to
- * drop down into the retry code. Otherwise, if we finished
- * all bytes in the request we are done now.
- */
- if (!(blk_rq_bytes(req) == 0 && error) &&
- !scsi_end_request(req, error, good_bytes, 0))
- return;
-
- /*
- * Kill remainder if no retrys.
- */
- if (error && scsi_noretry_cmd(cmd)) {
- if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
- BUG();
- return;
- }
-
- /*
- * If there had been no error, but we have leftover bytes in the
- * requeues just queue the command up again.
- */
- if (result == 0)
- goto requeue;
+ sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
+ if (sense_valid)
+ sense_current = !scsi_sense_is_deferred(&sshdr);
- error = scsi_result_to_blk_status(cmd, result);
+ blk_stat = scsi_result_to_blk_status(cmd, result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
@@ -922,7 +807,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* happens.
*/
action = ACTION_RETRY;
- } else if (sense_valid && !sense_deferred) {
+ } else if (sense_valid && sense_current) {
switch (sshdr.sense_key) {
case UNIT_ATTENTION:
if (cmd->device->removable) {
@@ -958,18 +843,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
action = ACTION_REPREP;
} else if (sshdr.asc == 0x10) /* DIX */ {
action = ACTION_FAIL;
- error = BLK_STS_PROTECTION;
+ blk_stat = BLK_STS_PROTECTION;
/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
action = ACTION_FAIL;
- error = BLK_STS_TARGET;
+ blk_stat = BLK_STS_TARGET;
} else
action = ACTION_FAIL;
break;
case ABORTED_COMMAND:
action = ACTION_FAIL;
if (sshdr.asc == 0x10) /* DIF */
- error = BLK_STS_PROTECTION;
+ blk_stat = BLK_STS_PROTECTION;
break;
case NOT_READY:
/* If the device is in the process of becoming
@@ -1022,8 +907,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
DEFAULT_RATELIMIT_BURST);
if (unlikely(scsi_logging_level))
- level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
- SCSI_LOG_MLCOMPLETE_BITS);
+ level =
+ SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
+ SCSI_LOG_MLCOMPLETE_BITS);
/*
* if logging is enabled the failure will be printed
@@ -1031,25 +917,16 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
if (!level && __ratelimit(&_rs)) {
scsi_print_result(cmd, NULL, FAILED);
- if (driver_byte(result) & DRIVER_SENSE)
+ if (driver_byte(result) == DRIVER_SENSE)
scsi_print_sense(cmd);
scsi_print_command(cmd);
}
}
- if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
+ if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req), 0))
return;
/*FALLTHRU*/
case ACTION_REPREP:
- requeue:
- /* Unprep the request and put it back at the head of the queue.
- * A new command will be prepared and issued.
- */
- if (q->mq_ops) {
- scsi_mq_requeue_cmd(cmd);
- } else {
- scsi_release_buffers(cmd);
- scsi_requeue_command(q, cmd);
- }
+ scsi_io_completion_reprep(cmd, q);
break;
case ACTION_RETRY:
/* Retry the same command immediately */
@@ -1062,6 +939,185 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
}
+/*
+ * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a
+ * new result that may suppress further error checking. Also modifies
+ * *blk_statp in some cases.
+ */
+static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
+ blk_status_t *blk_statp)
+{
+ bool sense_valid;
+ bool sense_current = true; /* false implies "deferred sense" */
+ struct request *req = cmd->request;
+ struct scsi_sense_hdr sshdr;
+
+ sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
+ if (sense_valid)
+ sense_current = !scsi_sense_is_deferred(&sshdr);
+
+ if (blk_rq_is_passthrough(req)) {
+ if (sense_valid) {
+ /*
+ * SG_IO wants current and deferred errors
+ */
+ scsi_req(req)->sense_len =
+ min(8 + cmd->sense_buffer[7],
+ SCSI_SENSE_BUFFERSIZE);
+ }
+ if (sense_current)
+ *blk_statp = scsi_result_to_blk_status(cmd, result);
+ } else if (blk_rq_bytes(req) == 0 && sense_current) {
+ /*
+ * Flush commands do not transfers any data, and thus cannot use
+ * good_bytes != blk_rq_bytes(req) as the signal for an error.
+ * This sets *blk_statp explicitly for the problem case.
+ */
+ *blk_statp = scsi_result_to_blk_status(cmd, result);
+ }
+ /*
+ * Recovered errors need reporting, but they're always treated as
+ * success, so fiddle the result code here. For passthrough requests
+ * we already took a copy of the original into sreq->result which
+ * is what gets returned to the user
+ */
+ if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
+ bool do_print = true;
+ /*
+ * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d]
+ * skip print since caller wants ATA registers. Only occurs
+ * on SCSI ATA PASS_THROUGH commands when CK_COND=1
+ */
+ if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
+ do_print = false;
+ else if (req->rq_flags & RQF_QUIET)
+ do_print = false;
+ if (do_print)
+ scsi_print_sense(cmd);
+ result = 0;
+ /* for passthrough, *blk_statp may be set */
+ *blk_statp = BLK_STS_OK;
+ }
+ /*
+ * Another corner case: the SCSI status byte is non-zero but 'good'.
+ * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
+ * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
+ * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
+ * intermediate statuses (both obsolete in SAM-4) as good.
+ */
+ if (status_byte(result) && scsi_status_is_good(result)) {
+ result = 0;
+ *blk_statp = BLK_STS_OK;
+ }
+ return result;
+}
+
+/*
+ * Function: scsi_io_completion()
+ *
+ * Purpose: Completion processing for block device I/O requests.
+ *
+ * Arguments: cmd - command that is finished.
+ *
+ * Lock status: Assumed that no lock is held upon entry.
+ *
+ * Returns: Nothing
+ *
+ * Notes: We will finish off the specified number of sectors. If we
+ * are done, the command block will be released and the queue
+ * function will be goosed. If we are not done then we have to
+ * figure out what to do next:
+ *
+ * a) We can call scsi_requeue_command(). The request
+ * will be unprepared and put back on the queue. Then
+ * a new command will be created for it. This should
+ * be used if we made forward progress, or if we want
+ * to switch from READ(10) to READ(6) for example.
+ *
+ * b) We can call __scsi_queue_insert(). The request will
+ * be put back on the queue and retried using the same
+ * command as before, possibly after a delay.
+ *
+ * c) We can call scsi_end_request() with blk_stat other than
+ * BLK_STS_OK, to fail the remainder of the request.
+ */
+void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+{
+ int result = cmd->result;
+ struct request_queue *q = cmd->device->request_queue;
+ struct request *req = cmd->request;
+ blk_status_t blk_stat = BLK_STS_OK;
+
+ if (unlikely(result)) /* a nz result may or may not be an error */
+ result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
+
+ if (unlikely(blk_rq_is_passthrough(req))) {
+ /*
+ * scsi_result_to_blk_status may have reset the host_byte
+ */
+ scsi_req(req)->result = cmd->result;
+ scsi_req(req)->resid_len = scsi_get_resid(cmd);
+
+ if (unlikely(scsi_bidi_cmnd(cmd))) {
+ /*
+ * Bidi commands Must be complete as a whole,
+ * both sides at once.
+ */
+ scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
+ if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
+ blk_rq_bytes(req->next_rq)))
+ WARN_ONCE(true,
+ "Bidi command with remaining bytes");
+ return;
+ }
+ }
+
+ /* no bidi support yet, other than in pass-through */
+ if (unlikely(blk_bidi_rq(req))) {
+ WARN_ONCE(true, "Only support bidi command in passthrough");
+ scmd_printk(KERN_ERR, cmd, "Killing bidi command\n");
+ if (scsi_end_request(req, BLK_STS_IOERR, blk_rq_bytes(req),
+ blk_rq_bytes(req->next_rq)))
+ WARN_ONCE(true, "Bidi command with remaining bytes");
+ return;
+ }
+
+ /*
+ * Next deal with any sectors which we were able to correctly
+ * handle.
+ */
+ SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
+ "%u sectors total, %d bytes done.\n",
+ blk_rq_sectors(req), good_bytes));
+
+ /*
+ * Next deal with any sectors which we were able to correctly
+ * handle. Failed, zero length commands always need to drop down
+ * to retry code. Fast path should return in this block.
+ */
+ if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
+ if (likely(!scsi_end_request(req, blk_stat, good_bytes, 0)))
+ return; /* no bytes remaining */
+ }
+
+ /* Kill remainder if no retries. */
+ if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
+ if (scsi_end_request(req, blk_stat, blk_rq_bytes(req), 0))
+ WARN_ONCE(true,
+ "Bytes remaining after failed, no-retry command");
+ return;
+ }
+
+ /*
+ * If there had been no error, but we have leftover bytes in the
+ * requeues just queue the command up again.
+ */
+ if (likely(result == 0))
+ scsi_io_completion_reprep(cmd, q);
+ else
+ scsi_io_completion_action(cmd, result);
+}
+
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
{
int count;
@@ -1550,7 +1606,10 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
if (scsi_host_in_recovery(shost))
return 0;
- busy = atomic_inc_return(&shost->host_busy) - 1;
+ if (!shost->use_blk_mq)
+ busy = atomic_inc_return(&shost->host_busy) - 1;
+ else
+ busy = 0;
if (atomic_read(&shost->host_blocked) > 0) {
if (busy)
goto starved;
@@ -1566,7 +1625,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
"unblocking host at zero depth\n"));
}
- if (shost->can_queue > 0 && busy >= shost->can_queue)
+ if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue)
goto starved;
if (shost->host_self_blocked)
goto starved;
@@ -1652,7 +1711,9 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
* with the locks as normal issue path does.
*/
atomic_inc(&sdev->device_busy);
- atomic_inc(&shost->host_busy);
+
+ if (!shost->use_blk_mq)
+ atomic_inc(&shost->host_busy);
if (starget->can_queue > 0)
atomic_inc(&starget->target_busy);
@@ -2555,7 +2616,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
* ILLEGAL REQUEST if the code page isn't supported */
if (use_10_for_ms && !scsi_status_is_good(result) &&
- (driver_byte(result) & DRIVER_SENSE)) {
+ driver_byte(result) == DRIVER_SENSE) {
if (scsi_sense_valid(sshdr)) {
if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
(sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 0880d975eed3..78ca63dfba4a 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -614,7 +614,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
* INQUIRY should not yield UNIT_ATTENTION
* but many buggy devices do so anyway.
*/
- if ((driver_byte(result) & DRIVER_SENSE) &&
+ if (driver_byte(result) == DRIVER_SENSE &&
scsi_sense_valid(&sshdr)) {
if ((sshdr.sense_key == UNIT_ATTENTION) &&
((sshdr.asc == 0x28) ||
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 7943b762c12d..3aee9464a7bf 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -382,7 +382,7 @@ static ssize_t
show_host_busy(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
- return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy));
+ return snprintf(buf, 20, "%d\n", scsi_host_busy(shost));
}
static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
@@ -722,8 +722,24 @@ static ssize_t
sdev_store_delete(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- if (device_remove_file_self(dev, attr))
- scsi_remove_device(to_scsi_device(dev));
+ struct kernfs_node *kn;
+
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ WARN_ON_ONCE(!kn);
+ /*
+ * Concurrent writes into the "delete" sysfs attribute may trigger
+ * concurrent calls to device_remove_file() and scsi_remove_device().
+ * device_remove_file() handles concurrent removal calls by
+ * serializing these and by ignoring the second and later removal
+ * attempts. Concurrent calls of scsi_remove_device() are
+ * serialized. The second and later calls of scsi_remove_device() are
+ * ignored because the first call of that function changes the device
+ * state into SDEV_DEL.
+ */
+ device_remove_file(dev, attr);
+ scsi_remove_device(to_scsi_device(dev));
+ if (kn)
+ sysfs_unbreak_active_protection(kn);
return count;
};
static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 13948102ca29..381668fa135d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -567,7 +567,7 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
FC_NL_ASYNC_EVENT, len);
- event->seconds = get_seconds();
+ event->seconds = ktime_get_real_seconds();
event->vendor_id = 0;
event->host_no = shost->host_no;
event->event_datalen = sizeof(u32); /* bytes */
@@ -635,7 +635,7 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
FC_NL_ASYNC_EVENT, len);
- event->seconds = get_seconds();
+ event->seconds = ktime_get_real_seconds();
event->vendor_id = vendor_id;
event->host_no = shost->host_no;
event->event_datalen = data_len; /* bytes */
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 2ca150b16764..40b85b752b79 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -136,7 +136,7 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
0, NULL);
- if (!(driver_byte(result) & DRIVER_SENSE) ||
+ if (driver_byte(result) != DRIVER_SENSE ||
sshdr->sense_key != UNIT_ATTENTION)
break;
}
diff --git a/drivers/scsi/scsi_typedefs.h b/drivers/scsi/scsi_typedefs.h
deleted file mode 100644
index 2ed4c5cb7088..000000000000
--- a/drivers/scsi/scsi_typedefs.h
+++ /dev/null
@@ -1,2 +0,0 @@
-
-typedef struct scsi_cmnd Scsi_Cmnd;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9421d9877730..a58cee7a85f2 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1119,7 +1119,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
SCpnt->cmnd[0] = WRITE_6;
if (blk_integrity_rq(rq))
- sd_dif_prepare(SCpnt);
+ t10_pi_prepare(SCpnt->request, sdkp->protection_type);
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
@@ -1635,7 +1635,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
if (res) {
sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
- if (driver_byte(res) & DRIVER_SENSE)
+ if (driver_byte(res) == DRIVER_SENSE)
sd_print_sense_hdr(sdkp, sshdr);
/* we need to evaluate the error return */
@@ -1737,8 +1737,8 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
&sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
- if ((driver_byte(result) & DRIVER_SENSE) &&
- (scsi_sense_valid(&sshdr))) {
+ if (driver_byte(result) == DRIVER_SENSE &&
+ scsi_sense_valid(&sshdr)) {
sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
scsi_print_sense_hdr(sdev, NULL, &sshdr);
}
@@ -2028,7 +2028,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
} else {
sdkp->device->no_write_same = 1;
sd_config_write_same(sdkp);
- req->__data_len = blk_rq_bytes(req);
req->rq_flags |= RQF_QUIET;
}
break;
@@ -2047,8 +2046,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
"sd_done: completed %d of %d bytes\n",
good_bytes, scsi_bufflen(SCpnt)));
- if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
- sd_dif_complete(SCpnt, good_bytes);
+ if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt) &&
+ good_bytes)
+ t10_pi_complete(SCpnt->request, sdkp->protection_type,
+ good_bytes / scsi_prot_interval(SCpnt));
return good_bytes;
}
@@ -2095,10 +2096,10 @@ sd_spinup_disk(struct scsi_disk *sdkp)
retries++;
} while (retries < 3 &&
(!scsi_status_is_good(the_result) ||
- ((driver_byte(the_result) & DRIVER_SENSE) &&
+ ((driver_byte(the_result) == DRIVER_SENSE) &&
sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
- if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
+ if (driver_byte(the_result) != DRIVER_SENSE) {
/* no sense, TUR either succeeded or failed
* with a status error */
if(!spintime && !scsi_status_is_good(the_result)) {
@@ -2224,7 +2225,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
struct scsi_sense_hdr *sshdr, int sense_valid,
int the_result)
{
- if (driver_byte(the_result) & DRIVER_SENSE)
+ if (driver_byte(the_result) == DRIVER_SENSE)
sd_print_sense_hdr(sdkp, sshdr);
else
sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
@@ -3490,7 +3491,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
- if (driver_byte(res) & DRIVER_SENSE)
+ if (driver_byte(res) == DRIVER_SENSE)
sd_print_sense_hdr(sdkp, &sshdr);
if (scsi_sense_valid(&sshdr) &&
/* 0x3a is medium not present */
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 392c7d078ae3..a7d4f50b67d4 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -254,21 +254,12 @@ static inline unsigned int sd_prot_flag_mask(unsigned int prot_op)
#ifdef CONFIG_BLK_DEV_INTEGRITY
extern void sd_dif_config_host(struct scsi_disk *);
-extern void sd_dif_prepare(struct scsi_cmnd *scmd);
-extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline void sd_dif_config_host(struct scsi_disk *disk)
{
}
-static inline int sd_dif_prepare(struct scsi_cmnd *scmd)
-{
- return 0;
-}
-static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a)
-{
-}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 9035380c0dda..db72c82486e3 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -95,116 +95,3 @@ out:
blk_integrity_register(disk, &bi);
}
-/*
- * The virtual start sector is the one that was originally submitted
- * by the block layer. Due to partitioning, MD/DM cloning, etc. the
- * actual physical start sector is likely to be different. Remap
- * protection information to match the physical LBA.
- *
- * From a protocol perspective there's a slight difference between
- * Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the
- * reference tag is seeded in the CDB. This gives us the potential to
- * avoid virt->phys remapping during write. However, at read time we
- * don't know whether the virt sector is the same as when we wrote it
- * (we could be reading from real disk as opposed to MD/DM device. So
- * we always remap Type 2 making it identical to Type 1.
- *
- * Type 3 does not have a reference tag so no remapping is required.
- */
-void sd_dif_prepare(struct scsi_cmnd *scmd)
-{
- const int tuple_sz = sizeof(struct t10_pi_tuple);
- struct bio *bio;
- struct scsi_disk *sdkp;
- struct t10_pi_tuple *pi;
- u32 phys, virt;
-
- sdkp = scsi_disk(scmd->request->rq_disk);
-
- if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION)
- return;
-
- phys = scsi_prot_ref_tag(scmd);
-
- __rq_for_each_bio(bio, scmd->request) {
- struct bio_integrity_payload *bip = bio_integrity(bio);
- struct bio_vec iv;
- struct bvec_iter iter;
- unsigned int j;
-
- /* Already remapped? */
- if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
- break;
-
- virt = bip_get_seed(bip) & 0xffffffff;
-
- bip_for_each_vec(iv, bip, iter) {
- pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
-
- for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
-
- if (be32_to_cpu(pi->ref_tag) == virt)
- pi->ref_tag = cpu_to_be32(phys);
-
- virt++;
- phys++;
- }
-
- kunmap_atomic(pi);
- }
-
- bip->bip_flags |= BIP_MAPPED_INTEGRITY;
- }
-}
-
-/*
- * Remap physical sector values in the reference tag to the virtual
- * values expected by the block layer.
- */
-void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
-{
- const int tuple_sz = sizeof(struct t10_pi_tuple);
- struct scsi_disk *sdkp;
- struct bio *bio;
- struct t10_pi_tuple *pi;
- unsigned int j, intervals;
- u32 phys, virt;
-
- sdkp = scsi_disk(scmd->request->rq_disk);
-
- if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION || good_bytes == 0)
- return;
-
- intervals = good_bytes / scsi_prot_interval(scmd);
- phys = scsi_prot_ref_tag(scmd);
-
- __rq_for_each_bio(bio, scmd->request) {
- struct bio_integrity_payload *bip = bio_integrity(bio);
- struct bio_vec iv;
- struct bvec_iter iter;
-
- virt = bip_get_seed(bip) & 0xffffffff;
-
- bip_for_each_vec(iv, bip, iter) {
- pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
-
- for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
-
- if (intervals == 0) {
- kunmap_atomic(pi);
- return;
- }
-
- if (be32_to_cpu(pi->ref_tag) == phys)
- pi->ref_tag = cpu_to_be32(virt);
-
- virt++;
- phys++;
- intervals--;
- }
-
- kunmap_atomic(pi);
- }
- }
-}
-
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index a14fef11776e..412c1787dcd9 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -148,12 +148,6 @@ int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
cmd->transfersize = sdkp->device->sector_size;
cmd->allowed = 0;
- /*
- * Report may return less bytes than requested. Make sure
- * to report completion on the entire initial request.
- */
- rq->__data_len = nr_bytes;
-
return BLKPREP_OK;
}
@@ -391,7 +385,8 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
* Check that all zones of the device are equal. The last zone can however
* be smaller. The zone size must also be a power of two number of LBAs.
*
- * Returns the zone size in bytes upon success or an error code upon failure.
+ * Returns the zone size in number of blocks upon success or an error code
+ * upon failure.
*/
static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
{
@@ -401,7 +396,7 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
unsigned char *rec;
unsigned int buf_len;
unsigned int list_length;
- int ret;
+ s64 ret;
u8 same;
/* Get a buffer */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 53ae52dbff84..8a254bb46a9b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */
#include <linux/atomic.h>
#include <linux/ratelimit.h>
#include <linux/uio.h>
+#include <linux/cred.h> /* for sg_check_file_access() */
#include "scsi.h"
#include <scsi/scsi_dbg.h>
@@ -209,6 +210,33 @@ static void sg_device_destroy(struct kref *kref);
sdev_prefix_printk(prefix, (sdp)->device, \
(sdp)->disk->disk_name, fmt, ##a)
+/*
+ * The SCSI interfaces that use read() and write() as an asynchronous variant of
+ * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
+ * to trigger read() and write() calls from various contexts with elevated
+ * privileges. This can lead to kernel memory corruption (e.g. if these
+ * interfaces are called through splice()) and privilege escalation inside
+ * userspace (e.g. if a process with access to such a device passes a file
+ * descriptor to a SUID binary as stdin/stdout/stderr).
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static int sg_check_file_access(struct file *filp, const char *caller)
+{
+ if (filp->f_cred != current_real_cred()) {
+ pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ caller, task_tgid_vnr(current), current->comm);
+ return -EPERM;
+ }
+ if (uaccess_kernel()) {
+ pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
+ caller, task_tgid_vnr(current), current->comm);
+ return -EACCES;
+ }
+ return 0;
+}
+
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = filp->private_data;
@@ -393,6 +421,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
struct sg_header *old_hdr = NULL;
int retval = 0;
+ /*
+ * This could cause a response to be stranded. Close the associated
+ * file descriptor to free up any resources being held.
+ */
+ retval = sg_check_file_access(filp, __func__);
+ if (retval)
+ return retval;
+
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
@@ -580,9 +616,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
struct sg_header old_hdr;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
+ int retval;
- if (unlikely(uaccess_kernel()))
- return -EINVAL;
+ retval = sg_check_file_access(filp, __func__);
+ if (retval)
+ return retval;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
@@ -1065,15 +1103,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
- if (read_only) {
- unsigned char opcode = WRITE_6;
- Scsi_Ioctl_Command __user *siocp = p;
-
- if (copy_from_user(&opcode, siocp->data, 1))
- return -EFAULT;
- if (sg_allow_access(filp, &opcode))
- return -EPERM;
- }
return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
@@ -1703,15 +1732,11 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
*
* With scsi-mq enabled, there are a fixed number of preallocated
* requests equal in number to shost->can_queue. If all of the
- * preallocated requests are already in use, then using GFP_ATOMIC with
- * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
- * will cause blk_get_request() to sleep until an active command
- * completes, freeing up a request. Neither option is ideal, but
- * GFP_KERNEL is the better choice to prevent userspace from getting an
- * unexpected EWOULDBLOCK.
- *
- * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
- * does not sleep except under memory pressure.
+ * preallocated requests are already in use, then blk_get_request()
+ * will sleep until an active command completes, freeing up a request.
+ * Although waiting in an asynchronous interface is less than ideal, we
+ * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might
+ * not expect an EWOULDBLOCK from this condition.
*/
rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
@@ -1850,7 +1875,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order;
- gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
+ gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO;
struct sg_device *sdp = sfp->parentdp;
if (blk_size < 0)
@@ -1880,9 +1905,6 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
if (sdp->device->host->unchecked_isa_dma)
gfp_mask |= GFP_DMA;
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- gfp_mask |= __GFP_ZERO;
-
order = get_order(num);
retry:
ret_sz = 1 << (PAGE_SHIFT + order);
@@ -1893,7 +1915,7 @@ retry:
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
- schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
+ schp->pages[k] = alloc_pages(gfp_mask, order);
if (!schp->pages[k])
goto out;
@@ -2147,6 +2169,7 @@ sg_add_sfp(Sg_device * sdp)
write_lock_irqsave(&sdp->sfd_lock, iflags);
if (atomic_read(&sdp->detaching)) {
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ kfree(sfp);
return ERR_PTR(-ENODEV);
}
list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index dc3a0542a2e8..e97bf2670315 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -483,6 +483,8 @@ struct pqi_raid_error_info {
#define CISS_CMD_STATUS_TMF 0xd
#define CISS_CMD_STATUS_AIO_DISABLED 0xe
+#define PQI_CMD_STATUS_ABORTED CISS_CMD_STATUS_ABORTED
+
#define PQI_NUM_EVENT_QUEUE_ELEMENTS 32
#define PQI_EVENT_OQ_ELEMENT_LENGTH sizeof(struct pqi_event_response)
@@ -581,8 +583,8 @@ struct pqi_admin_queues_aligned {
struct pqi_admin_queues {
void *iq_element_array;
void *oq_element_array;
- volatile pqi_index_t *iq_ci;
- volatile pqi_index_t *oq_pi;
+ pqi_index_t *iq_ci;
+ pqi_index_t __iomem *oq_pi;
dma_addr_t iq_element_array_bus_addr;
dma_addr_t oq_element_array_bus_addr;
dma_addr_t iq_ci_bus_addr;
@@ -606,8 +608,8 @@ struct pqi_queue_group {
dma_addr_t oq_element_array_bus_addr;
__le32 __iomem *iq_pi[2];
pqi_index_t iq_pi_copy[2];
- volatile pqi_index_t *iq_ci[2];
- volatile pqi_index_t *oq_pi;
+ pqi_index_t __iomem *iq_ci[2];
+ pqi_index_t __iomem *oq_pi;
dma_addr_t iq_ci_bus_addr[2];
dma_addr_t oq_pi_bus_addr;
__le32 __iomem *oq_ci;
@@ -620,7 +622,7 @@ struct pqi_event_queue {
u16 oq_id;
u16 int_msg_num;
void *oq_element_array;
- volatile pqi_index_t *oq_pi;
+ pqi_index_t __iomem *oq_pi;
dma_addr_t oq_element_array_bus_addr;
dma_addr_t oq_pi_bus_addr;
__le32 __iomem *oq_ci;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index b78d20b74ed8..2112ea6723c6 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -40,11 +40,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.1.4-115"
+#define DRIVER_VERSION "1.1.4-130"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
#define DRIVER_RELEASE 4
-#define DRIVER_REVISION 115
+#define DRIVER_REVISION 130
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -1197,20 +1197,30 @@ no_buffer:
device->volume_offline = volume_offline;
}
+#define PQI_INQUIRY_PAGE0_RETRIES 3
+
static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
u8 *buffer;
+ unsigned int retries;
buffer = kmalloc(64, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Send an inquiry to the device to see what it is. */
- rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
- if (rc)
- goto out;
+ for (retries = 0;;) {
+ rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
+ buffer, 64);
+ if (rc == 0)
+ break;
+ if (pqi_is_logical_device(device) ||
+ rc != PQI_CMD_STATUS_ABORTED ||
+ ++retries > PQI_INQUIRY_PAGE0_RETRIES)
+ goto out;
+ }
scsi_sanitize_inquiry_string(&buffer[8], 8);
scsi_sanitize_inquiry_string(&buffer[16], 16);
@@ -2693,7 +2703,7 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
oq_ci = queue_group->oq_ci_copy;
while (1) {
- oq_pi = *queue_group->oq_pi;
+ oq_pi = readl(queue_group->oq_pi);
if (oq_pi == oq_ci)
break;
@@ -2784,7 +2794,7 @@ static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
iq_pi = queue_group->iq_pi_copy[RAID_PATH];
- iq_ci = *queue_group->iq_ci[RAID_PATH];
+ iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
if (pqi_num_elements_free(iq_pi, iq_ci,
ctrl_info->num_elements_per_iq))
@@ -2943,7 +2953,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
oq_ci = event_queue->oq_ci_copy;
while (1) {
- oq_pi = *event_queue->oq_pi;
+ oq_pi = readl(event_queue->oq_pi);
if (oq_pi == oq_ci)
break;
@@ -3167,7 +3177,7 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
size_t element_array_length_per_iq;
size_t element_array_length_per_oq;
void *element_array;
- void *next_queue_index;
+ void __iomem *next_queue_index;
void *aligned_pointer;
unsigned int num_inbound_queues;
unsigned int num_outbound_queues;
@@ -3263,7 +3273,7 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
PQI_EVENT_OQ_ELEMENT_LENGTH;
- next_queue_index = PTR_ALIGN(element_array,
+ next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
PQI_OPERATIONAL_INDEX_ALIGNMENT);
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
@@ -3271,21 +3281,24 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
queue_group->iq_ci[RAID_PATH] = next_queue_index;
queue_group->iq_ci_bus_addr[RAID_PATH] =
ctrl_info->queue_memory_base_dma_handle +
- (next_queue_index - ctrl_info->queue_memory_base);
+ (next_queue_index -
+ (void __iomem *)ctrl_info->queue_memory_base);
next_queue_index += sizeof(pqi_index_t);
next_queue_index = PTR_ALIGN(next_queue_index,
PQI_OPERATIONAL_INDEX_ALIGNMENT);
queue_group->iq_ci[AIO_PATH] = next_queue_index;
queue_group->iq_ci_bus_addr[AIO_PATH] =
ctrl_info->queue_memory_base_dma_handle +
- (next_queue_index - ctrl_info->queue_memory_base);
+ (next_queue_index -
+ (void __iomem *)ctrl_info->queue_memory_base);
next_queue_index += sizeof(pqi_index_t);
next_queue_index = PTR_ALIGN(next_queue_index,
PQI_OPERATIONAL_INDEX_ALIGNMENT);
queue_group->oq_pi = next_queue_index;
queue_group->oq_pi_bus_addr =
ctrl_info->queue_memory_base_dma_handle +
- (next_queue_index - ctrl_info->queue_memory_base);
+ (next_queue_index -
+ (void __iomem *)ctrl_info->queue_memory_base);
next_queue_index += sizeof(pqi_index_t);
next_queue_index = PTR_ALIGN(next_queue_index,
PQI_OPERATIONAL_INDEX_ALIGNMENT);
@@ -3294,7 +3307,8 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
ctrl_info->event_queue.oq_pi = next_queue_index;
ctrl_info->event_queue.oq_pi_bus_addr =
ctrl_info->queue_memory_base_dma_handle +
- (next_queue_index - ctrl_info->queue_memory_base);
+ (next_queue_index -
+ (void __iomem *)ctrl_info->queue_memory_base);
return 0;
}
@@ -3368,7 +3382,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
admin_queues->oq_element_array =
&admin_queues_aligned->oq_element_array;
admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
- admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
+ admin_queues->oq_pi =
+ (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
admin_queues->iq_element_array_bus_addr =
ctrl_info->admin_queue_memory_base_dma_handle +
@@ -3384,8 +3399,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
ctrl_info->admin_queue_memory_base);
admin_queues->oq_pi_bus_addr =
ctrl_info->admin_queue_memory_base_dma_handle +
- ((void *)admin_queues->oq_pi -
- ctrl_info->admin_queue_memory_base);
+ ((void __iomem *)admin_queues->oq_pi -
+ (void __iomem *)ctrl_info->admin_queue_memory_base);
return 0;
}
@@ -3486,7 +3501,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
- oq_pi = *admin_queues->oq_pi;
+ oq_pi = readl(admin_queues->oq_pi);
if (oq_pi != oq_ci)
break;
if (time_after(jiffies, timeout)) {
@@ -3545,7 +3560,7 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
DIV_ROUND_UP(iu_length,
PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
- iq_ci = *queue_group->iq_ci[path];
+ iq_ci = readl(queue_group->iq_ci[path]);
if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
ctrl_info->num_elements_per_iq))
@@ -3621,29 +3636,24 @@ static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-static int pqi_submit_raid_request_synchronous_with_io_request(
- struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
- unsigned long timeout_msecs)
+static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
+ *error_info)
{
- int rc = 0;
- DECLARE_COMPLETION_ONSTACK(wait);
+ int rc = -EIO;
- io_request->io_complete_callback = pqi_raid_synchronous_complete;
- io_request->context = &wait;
-
- pqi_start_io(ctrl_info,
- &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
- io_request);
-
- if (timeout_msecs == NO_TIMEOUT) {
- pqi_wait_for_completion_io(ctrl_info, &wait);
- } else {
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(timeout_msecs))) {
- dev_warn(&ctrl_info->pci_dev->dev,
- "command timed out\n");
- rc = -ETIMEDOUT;
- }
+ switch (error_info->data_out_result) {
+ case PQI_DATA_IN_OUT_GOOD:
+ if (error_info->status == SAM_STAT_GOOD)
+ rc = 0;
+ break;
+ case PQI_DATA_IN_OUT_UNDERFLOW:
+ if (error_info->status == SAM_STAT_GOOD ||
+ error_info->status == SAM_STAT_CHECK_CONDITION)
+ rc = 0;
+ break;
+ case PQI_DATA_IN_OUT_ABORTED:
+ rc = PQI_CMD_STATUS_ABORTED;
+ break;
}
return rc;
@@ -3653,11 +3663,12 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
struct pqi_iu_header *request, unsigned int flags,
struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
{
- int rc;
+ int rc = 0;
struct pqi_io_request *io_request;
unsigned long start_jiffies;
unsigned long msecs_blocked;
size_t iu_length;
+ DECLARE_COMPLETION_ONSTACK(wait);
/*
* Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
@@ -3686,11 +3697,13 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
pqi_ctrl_busy(ctrl_info);
timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
if (timeout_msecs == 0) {
+ pqi_ctrl_unbusy(ctrl_info);
rc = -ETIMEDOUT;
goto out;
}
if (pqi_ctrl_offline(ctrl_info)) {
+ pqi_ctrl_unbusy(ctrl_info);
rc = -ENXIO;
goto out;
}
@@ -3708,8 +3721,25 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
PQI_REQUEST_HEADER_LENGTH;
memcpy(io_request->iu, request, iu_length);
- rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
- io_request, timeout_msecs);
+ io_request->io_complete_callback = pqi_raid_synchronous_complete;
+ io_request->context = &wait;
+
+ pqi_start_io(ctrl_info,
+ &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+ io_request);
+
+ pqi_ctrl_unbusy(ctrl_info);
+
+ if (timeout_msecs == NO_TIMEOUT) {
+ pqi_wait_for_completion_io(ctrl_info, &wait);
+ } else {
+ if (!wait_for_completion_io_timeout(&wait,
+ msecs_to_jiffies(timeout_msecs))) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "command timed out\n");
+ rc = -ETIMEDOUT;
+ }
+ }
if (error_info) {
if (io_request->error_info)
@@ -3718,25 +3748,13 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
else
memset(error_info, 0, sizeof(*error_info));
} else if (rc == 0 && io_request->error_info) {
- u8 scsi_status;
- struct pqi_raid_error_info *raid_error_info;
-
- raid_error_info = io_request->error_info;
- scsi_status = raid_error_info->status;
-
- if (scsi_status == SAM_STAT_CHECK_CONDITION &&
- raid_error_info->data_out_result ==
- PQI_DATA_IN_OUT_UNDERFLOW)
- scsi_status = SAM_STAT_GOOD;
-
- if (scsi_status != SAM_STAT_GOOD)
- rc = -EIO;
+ rc = pqi_process_raid_io_error_synchronous(
+ io_request->error_info);
}
pqi_free_io_request(io_request);
out:
- pqi_ctrl_unbusy(ctrl_info);
up(&ctrl_info->sync_request_sem);
return rc;
@@ -5041,7 +5059,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
iq_pi = queue_group->iq_pi_copy[path];
while (1) {
- iq_ci = *queue_group->iq_ci[path];
+ iq_ci = readl(queue_group->iq_ci[path]);
if (iq_ci == iq_pi)
break;
pqi_check_ctrl_health(ctrl_info);
@@ -6230,20 +6248,20 @@ static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
admin_queues = &ctrl_info->admin_queues;
admin_queues->iq_pi_copy = 0;
admin_queues->oq_ci_copy = 0;
- *admin_queues->oq_pi = 0;
+ writel(0, admin_queues->oq_pi);
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
ctrl_info->queue_groups[i].oq_ci_copy = 0;
- *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
- *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
- *ctrl_info->queue_groups[i].oq_pi = 0;
+ writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
+ writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
+ writel(0, ctrl_info->queue_groups[i].oq_pi);
}
event_queue = &ctrl_info->event_queue;
- *event_queue->oq_pi = 0;
+ writel(0, event_queue->oq_pi);
event_queue->oq_ci_copy = 0;
}
@@ -6826,6 +6844,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0110)
},
{
@@ -6950,6 +6980,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADVANTECH, 0x8312)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_DELL, 0x1fe0)
},
{
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 269ddf791a73..0abe17c1a73b 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -200,7 +200,7 @@ snic_stats_show(struct seq_file *sfp, void *data)
{
struct snic *snic = (struct snic *) sfp->private;
struct snic_stats *stats = &snic->s_stats;
- struct timespec last_isr_tms, last_ack_tms;
+ struct timespec64 last_isr_tms, last_ack_tms;
u64 maxio_tm;
int i;
@@ -312,12 +312,12 @@ snic_stats_show(struct seq_file *sfp, void *data)
"\t\t Other Statistics\n"
"\n---------------------------------------------\n");
- jiffies_to_timespec(stats->misc.last_isr_time, &last_isr_tms);
- jiffies_to_timespec(stats->misc.last_ack_time, &last_ack_tms);
+ jiffies_to_timespec64(stats->misc.last_isr_time, &last_isr_tms);
+ jiffies_to_timespec64(stats->misc.last_ack_time, &last_ack_tms);
seq_printf(sfp,
- "Last ISR Time : %llu (%8lu.%8lu)\n"
- "Last Ack Time : %llu (%8lu.%8lu)\n"
+ "Last ISR Time : %llu (%8llu.%09lu)\n"
+ "Last Ack Time : %llu (%8llu.%09lu)\n"
"Ack ISRs : %llu\n"
"IO Cmpl ISRs : %llu\n"
"Err Notify ISRs : %llu\n"
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c
index f00ebf4717e0..fc60c933d6c0 100644
--- a/drivers/scsi/snic/snic_trc.c
+++ b/drivers/scsi/snic/snic_trc.c
@@ -65,12 +65,12 @@ static int
snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz)
{
int len = 0;
- struct timespec tmspec;
+ struct timespec64 tmspec;
- jiffies_to_timespec(td->ts, &tmspec);
+ jiffies_to_timespec64(td->ts, &tmspec);
len += snprintf(buf, buf_sz,
- "%lu.%10lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n",
+ "%llu.%09lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n",
tmspec.tv_sec,
tmspec.tv_nsec,
td->fn,
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 3f3cb72e0c0c..d0389b20574d 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -523,18 +523,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
static int sr_block_open(struct block_device *bdev, fmode_t mode)
{
struct scsi_cd *cd;
+ struct scsi_device *sdev;
int ret = -ENXIO;
+ cd = scsi_cd_get(bdev->bd_disk);
+ if (!cd)
+ goto out;
+
+ sdev = cd->device;
+ scsi_autopm_get_device(sdev);
check_disk_change(bdev);
mutex_lock(&sr_mutex);
- cd = scsi_cd_get(bdev->bd_disk);
- if (cd) {
- ret = cdrom_open(&cd->cdi, bdev, mode);
- if (ret)
- scsi_cd_put(cd);
- }
+ ret = cdrom_open(&cd->cdi, bdev, mode);
mutex_unlock(&sr_mutex);
+
+ scsi_autopm_put_device(sdev);
+ if (ret)
+ scsi_cd_put(cd);
+
+out:
return ret;
}
@@ -562,6 +570,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (ret)
goto out;
+ scsi_autopm_get_device(sdev);
+
/*
* Send SCSI addressing ioctls directly to mid level, send other
* ioctls to cdrom/block level.
@@ -570,15 +580,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
ret = scsi_ioctl(sdev, cmd, argp);
- goto out;
+ goto put;
}
ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
if (ret != -ENOSYS)
- goto out;
+ goto put;
ret = scsi_ioctl(sdev, cmd, argp);
+put:
+ scsi_autopm_put_device(sdev);
+
out:
mutex_unlock(&sr_mutex);
return ret;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 35fab1e18adc..ffcf902da390 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -186,14 +186,13 @@ static int sr_play_trkind(struct cdrom_device_info *cdi,
int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
{
struct scsi_device *SDev;
- struct scsi_sense_hdr sshdr;
+ struct scsi_sense_hdr local_sshdr, *sshdr = &local_sshdr;
int result, err = 0, retries = 0;
- unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL;
SDev = cd->device;
- if (cgc->sense)
- senseptr = sense_buffer;
+ if (cgc->sshdr)
+ sshdr = cgc->sshdr;
retry:
if (!scsi_block_when_processing_errors(SDev)) {
@@ -202,15 +201,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
}
result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
- cgc->buffer, cgc->buflen, senseptr, &sshdr,
+ cgc->buffer, cgc->buflen, NULL, sshdr,
cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
- if (cgc->sense)
- memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense));
-
/* Minimal error checking. Ignore cases we know about, and report the rest. */
if (driver_byte(result) != 0) {
- switch (sshdr.sense_key) {
+ switch (sshdr->sense_key) {
case UNIT_ATTENTION:
SDev->changed = 1;
if (!cgc->quiet)
@@ -221,8 +217,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
err = -ENOMEDIUM;
break;
case NOT_READY: /* This happens if there is no disc in drive */
- if (sshdr.asc == 0x04 &&
- sshdr.ascq == 0x01) {
+ if (sshdr->asc == 0x04 &&
+ sshdr->ascq == 0x01) {
/* sense: Logical unit is in process of becoming ready */
if (!cgc->quiet)
sr_printk(KERN_INFO, cd,
@@ -245,8 +241,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
break;
case ILLEGAL_REQUEST:
err = -EIO;
- if (sshdr.asc == 0x20 &&
- sshdr.ascq == 0x00)
+ if (sshdr->asc == 0x20 &&
+ sshdr->ascq == 0x00)
/* sense: Invalid command operation code */
err = -EDRIVE_CANT_DO_THIS;
break;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 50c66ccc4b41..307df2fa39a3 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -828,11 +828,8 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
static int flush_buffer(struct scsi_tape *STp, int seek_next)
{
int backspace, result;
- struct st_buffer *STbuffer;
struct st_partstat *STps;
- STbuffer = STp->buffer;
-
/*
* If there was a bus reset, block further access
* to this device.
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 33a4a4dad324..f03dc03a42c3 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1935,6 +1935,9 @@ static struct hv_driver storvsc_drv = {
.id_table = id_table,
.probe = storvsc_probe,
.remove = storvsc_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c
index 190770bdc194..91db17727963 100644
--- a/drivers/scsi/sym53c8xx_2/sym_fw.c
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.c
@@ -295,10 +295,8 @@ static void
sym_fw1_setup(struct sym_hcb *np, struct sym_fw *fw)
{
struct sym_fw1a_scr *scripta0;
- struct sym_fw1b_scr *scriptb0;
scripta0 = (struct sym_fw1a_scr *) np->scripta0;
- scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
/*
* Fill variable parts in scripts.
@@ -319,10 +317,8 @@ static void
sym_fw2_setup(struct sym_hcb *np, struct sym_fw *fw)
{
struct sym_fw2a_scr *scripta0;
- struct sym_fw2b_scr *scriptb0;
scripta0 = (struct sym_fw2a_scr *) np->scripta0;
- scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
/*
* Fill variable parts in scripts.
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 7320d5fe4cbc..5f10aa9bad9b 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -252,7 +252,7 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
}
scsi_set_resid(cmd, resid);
- cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status;
+ cmd->result = (drv_status << 24) | (cam_status << 16) | scsi_status;
}
static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index 805369521df8..e34801ae5d69 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -256,7 +256,7 @@ sym_get_cam_status(struct scsi_cmnd *cmd)
static inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid)
{
scsi_set_resid(cmd, resid);
- cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f));
+ cmd->result = (DID_OK << 16) | (cp->ssss_status & 0x7f);
}
void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid);
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 378af306fda1..bd3f6e2d6834 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3855,7 +3855,7 @@ out_reject:
int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
{
- int dp_sg, dp_sgmin, resid = 0;
+ int dp_sg, resid = 0;
int dp_ofs = 0;
/*
@@ -3902,7 +3902,6 @@ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
* We are now full comfortable in the computation
* of the data residual (2's complement).
*/
- dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
resid = -cp->ext_ofs;
for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) {
u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index e27b4d4e6ae2..e09fe6ab3572 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -100,3 +100,12 @@ config SCSI_UFS_QCOM
Select this if you have UFS controller on QCOM chipset.
If unsure, say N.
+
+config SCSI_UFS_HISI
+ tristate "Hisilicon specific hooks to UFS controller platform driver"
+ depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
+ ---help---
+ This selects the Hisilicon specific additions to UFSHCD platform driver.
+
+ Select this if you have UFS controller on Hisilicon chipset.
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 918f5791202d..2c50f03d8c4a 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
ufshcd-core-objs := ufshcd.o ufs-sysfs.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
+obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
new file mode 100644
index 000000000000..46df707e6f2c
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -0,0 +1,619 @@
+/*
+ * HiSilicon Hixxxx UFS Driver
+ *
+ * Copyright (c) 2016-2017 Linaro Ltd.
+ * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/time.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "ufshcd.h"
+#include "ufshcd-pltfrm.h"
+#include "unipro.h"
+#include "ufs-hisi.h"
+#include "ufshci.h"
+
+static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 tx_fsm_val_0 = 0;
+ u32 tx_fsm_val_1 = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
+
+ do {
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
+ &tx_fsm_val_0);
+ err |= ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
+ if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 &&
+ tx_fsm_val_1 == TX_FSM_HIBERN8))
+ break;
+
+ /* sleep for max. 200us */
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ /*
+ * we might have scheduled out for long during polling so
+ * check the state again.
+ */
+ if (time_after(jiffies, timeout)) {
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
+ &tx_fsm_val_0);
+ err |= ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
+ }
+
+ if (err) {
+ dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
+ __func__, err);
+ } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 ||
+ tx_fsm_val_1 != TX_FSM_HIBERN8) {
+ err = -1;
+ dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
+ __func__, tx_fsm_val_0, tx_fsm_val_1);
+ }
+
+ return err;
+}
+
+static void ufs_hi3660_clk_init(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+ if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
+ mdelay(1);
+ /* use abb clk */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
+ /* open mphy ref clk */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+}
+
+static void ufs_hi3660_soc_init(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+ u32 reg;
+
+ if (!IS_ERR(host->rst))
+ reset_control_assert(host->rst);
+
+ /* HC_PSW powerup */
+ ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
+ udelay(10);
+ /* notify PWR ready */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
+ ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
+ UFS_DEVICE_RESET_CTRL);
+
+ reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
+ reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
+ /* set cfg clk freq */
+ ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
+ /* set ref clk freq */
+ ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
+ /* bypass ufs clk gate */
+ ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS,
+ CLOCK_GATE_BYPASS);
+ ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
+
+ /* open psw clk */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
+ /* disable ufshc iso */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
+ /* disable phy iso */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
+ /* notice iso disable */
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
+
+ /* disable lp_reset_n */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
+ mdelay(1);
+
+ ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
+ UFS_DEVICE_RESET_CTRL);
+
+ msleep(20);
+
+ /*
+ * enable the fix of linereset recovery,
+ * and enable rx_reset/tx_rest beat
+ * enable ref_clk_en override(bit5) &
+ * override value = 1(bit4), with mask
+ */
+ ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
+
+ if (!IS_ERR(host->rst))
+ reset_control_deassert(host->rst);
+}
+
+static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
+{
+ int err;
+ uint32_t value;
+ uint32_t reg;
+
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
+ /* PA_HSSeries */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
+ /* MPHY CBRATESEL */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
+ /* MPHY CBOVRCTRL2 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
+ /* MPHY CBOVRCTRL3 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
+ /* Unipro VS_MphyCfgUpdt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ /* MPHY RXOVRCTRL4 rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
+ /* MPHY RXOVRCTRL4 rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
+ /* MPHY RXOVRCTRL5 rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
+ /* MPHY RXOVRCTRL5 rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
+ /* MPHY RXSQCONTROL rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
+ /* MPHY RXSQCONTROL rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
+ /* Unipro VS_MphyCfgUpdt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+
+ /* Tactive RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
+ /* Tactive RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
+
+ /* Gear3 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
+ /* Gear3 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F);
+ /* Gear2 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F);
+ /* Gear2 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F);
+ /* Gear1 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F);
+ /* Gear1 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F);
+ /* Thibernate Tx */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
+ /* Thibernate Tx */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
+ if (value != 0x1)
+ dev_info(hba->dev,
+ "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
+
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
+ err = ufs_hisi_check_hibern8(hba);
+ if (err)
+ dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
+
+ ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
+
+ /* disable auto H8 */
+ reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ reg = reg & (~UFS_AHIT_AH8ITV_MASK);
+ ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+ /* Unipro PA_Local_TX_LCC_Enable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x155E, 0x0), 0x0);
+ /* close Unipro VS_Mk2ExtnSupport */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
+ if (value != 0) {
+ /* Ensure close success */
+ dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
+ }
+
+ return err;
+}
+
+static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ /* Unipro DL_AFC0CreditThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
+ /* Unipro DL_TC0OutAckThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
+ /* Unipro DL_TC0TXFCThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
+
+ /* not bypass ufs clk gate */
+ ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS,
+ CLOCK_GATE_BYPASS);
+ ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS,
+ UFS_SYSCTRL);
+
+ /* select received symbol cnt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000);
+ /* reset counter0 and enable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005);
+
+ return 0;
+}
+
+static int ufs_hi3660_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ err = ufs_hisi_link_startup_pre_change(hba);
+ break;
+ case POST_CHANGE:
+ err = ufs_hisi_link_startup_post_change(hba);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+struct ufs_hisi_dev_params {
+ u32 pwm_rx_gear; /* pwm rx gear to work in */
+ u32 pwm_tx_gear; /* pwm tx gear to work in */
+ u32 hs_rx_gear; /* hs rx gear to work in */
+ u32 hs_tx_gear; /* hs tx gear to work in */
+ u32 rx_lanes; /* number of rx lanes */
+ u32 tx_lanes; /* number of tx lanes */
+ u32 rx_pwr_pwm; /* rx pwm working pwr */
+ u32 tx_pwr_pwm; /* tx pwm working pwr */
+ u32 rx_pwr_hs; /* rx hs working pwr */
+ u32 tx_pwr_hs; /* tx hs working pwr */
+ u32 hs_rate; /* rate A/B to work in HS */
+ u32 desired_working_mode;
+};
+
+static int ufs_hisi_get_pwr_dev_param(
+ struct ufs_hisi_dev_params *hisi_param,
+ struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr)
+{
+ int min_hisi_gear;
+ int min_dev_gear;
+ bool is_dev_sup_hs = false;
+ bool is_hisi_max_hs = false;
+
+ if (dev_max->pwr_rx == FASTAUTO_MODE || dev_max->pwr_rx == FAST_MODE)
+ is_dev_sup_hs = true;
+
+ if (hisi_param->desired_working_mode == FAST) {
+ is_hisi_max_hs = true;
+ min_hisi_gear = min_t(u32, hisi_param->hs_rx_gear,
+ hisi_param->hs_tx_gear);
+ } else {
+ min_hisi_gear = min_t(u32, hisi_param->pwm_rx_gear,
+ hisi_param->pwm_tx_gear);
+ }
+
+ /*
+ * device doesn't support HS but
+ * hisi_param->desired_working_mode is HS,
+ * thus device and hisi_param don't agree
+ */
+ if (!is_dev_sup_hs && is_hisi_max_hs) {
+ pr_err("%s: device not support HS\n", __func__);
+ return -ENOTSUPP;
+ } else if (is_dev_sup_hs && is_hisi_max_hs) {
+ /*
+ * since device supports HS, it supports FAST_MODE.
+ * since hisi_param->desired_working_mode is also HS
+ * then final decision (FAST/FASTAUTO) is done according
+ * to hisi_params as it is the restricting factor
+ */
+ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+ hisi_param->rx_pwr_hs;
+ } else {
+ /*
+ * here hisi_param->desired_working_mode is PWM.
+ * it doesn't matter whether device supports HS or PWM,
+ * in both cases hisi_param->desired_working_mode will
+ * determine the mode
+ */
+ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+ hisi_param->rx_pwr_pwm;
+ }
+
+ /*
+ * we would like tx to work in the minimum number of lanes
+ * between device capability and vendor preferences.
+ * the same decision will be made for rx
+ */
+ agreed_pwr->lane_tx =
+ min_t(u32, dev_max->lane_tx, hisi_param->tx_lanes);
+ agreed_pwr->lane_rx =
+ min_t(u32, dev_max->lane_rx, hisi_param->rx_lanes);
+
+ /* device maximum gear is the minimum between device rx and tx gears */
+ min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
+
+ /*
+ * if both device capabilities and vendor pre-defined preferences are
+ * both HS or both PWM then set the minimum gear to be the chosen
+ * working gear.
+ * if one is PWM and one is HS then the one that is PWM get to decide
+ * what is the gear, as it is the one that also decided previously what
+ * pwr the device will be configured to.
+ */
+ if ((is_dev_sup_hs && is_hisi_max_hs) ||
+ (!is_dev_sup_hs && !is_hisi_max_hs))
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx =
+ min_t(u32, min_dev_gear, min_hisi_gear);
+ else
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_hisi_gear;
+
+ agreed_pwr->hs_rate = hisi_param->hs_rate;
+
+ pr_info("ufs final power mode: gear = %d, lane = %d, pwr = %d, rate = %d\n",
+ agreed_pwr->gear_rx, agreed_pwr->lane_rx, agreed_pwr->pwr_rx,
+ agreed_pwr->hs_rate);
+ return 0;
+}
+
+static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
+{
+ hisi_param->rx_lanes = UFS_HISI_LIMIT_NUM_LANES_RX;
+ hisi_param->tx_lanes = UFS_HISI_LIMIT_NUM_LANES_TX;
+ hisi_param->hs_rx_gear = UFS_HISI_LIMIT_HSGEAR_RX;
+ hisi_param->hs_tx_gear = UFS_HISI_LIMIT_HSGEAR_TX;
+ hisi_param->pwm_rx_gear = UFS_HISI_LIMIT_PWMGEAR_RX;
+ hisi_param->pwm_tx_gear = UFS_HISI_LIMIT_PWMGEAR_TX;
+ hisi_param->rx_pwr_pwm = UFS_HISI_LIMIT_RX_PWR_PWM;
+ hisi_param->tx_pwr_pwm = UFS_HISI_LIMIT_TX_PWR_PWM;
+ hisi_param->rx_pwr_hs = UFS_HISI_LIMIT_RX_PWR_HS;
+ hisi_param->tx_pwr_hs = UFS_HISI_LIMIT_TX_PWR_HS;
+ hisi_param->hs_rate = UFS_HISI_LIMIT_HS_RATE;
+ hisi_param->desired_working_mode = UFS_HISI_LIMIT_DESIRED_MODE;
+}
+
+static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
+{
+ /* update */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
+ /* PA_TxSkip */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
+ /*PA_PWRModeUserData0 = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191);
+ /*PA_PWRModeUserData1 = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535);
+ /*PA_PWRModeUserData2 = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767);
+ /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191);
+ /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535);
+ /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767);
+ /*PA_PWRModeUserData3 = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191);
+ /*PA_PWRModeUserData4 = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535);
+ /*PA_PWRModeUserData5 = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767);
+ /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191);
+ /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535);
+ /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
+}
+
+static int ufs_hi3660_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_hisi_dev_params ufs_hisi_cap;
+ int ret = 0;
+
+ if (!dev_req_params) {
+ dev_err(hba->dev,
+ "%s: incoming dev_req_params is NULL\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (status) {
+ case PRE_CHANGE:
+ ufs_hisi_set_dev_cap(&ufs_hisi_cap);
+ ret = ufs_hisi_get_pwr_dev_param(
+ &ufs_hisi_cap, dev_max_params, dev_req_params);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: failed to determine capabilities\n", __func__);
+ goto out;
+ }
+
+ ufs_hisi_pwr_change_pre_change(hba);
+ break;
+ case POST_CHANGE:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+out:
+ return ret;
+}
+
+static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ if (ufshcd_is_runtime_pm(pm_op))
+ return 0;
+
+ if (host->in_suspend) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+ udelay(10);
+ /* set ref_dig_clk override of PHY PCS to 0 */
+ ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
+
+ host->in_suspend = true;
+
+ return 0;
+}
+
+static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ if (!host->in_suspend)
+ return 0;
+
+ /* set ref_dig_clk override of PHY PCS to 1 */
+ ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
+ udelay(10);
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+
+ host->in_suspend = false;
+ return 0;
+}
+
+static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
+{
+ struct resource *mem_res;
+ struct device *dev = host->hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ /* get resource of ufs sys ctrl */
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ host->ufs_sys_ctrl = devm_ioremap_resource(dev, mem_res);
+ if (IS_ERR(host->ufs_sys_ctrl))
+ return PTR_ERR(host->ufs_sys_ctrl);
+
+ return 0;
+}
+
+static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
+{
+ hba->rpm_lvl = UFS_PM_LVL_1;
+ hba->spm_lvl = UFS_PM_LVL_3;
+}
+
+/**
+ * ufs_hisi_init_common
+ * @hba: host controller instance
+ */
+static int ufs_hisi_init_common(struct ufs_hba *hba)
+{
+ int err = 0;
+ struct device *dev = hba->dev;
+ struct ufs_hisi_host *host;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ host->rst = devm_reset_control_get(dev, "rst");
+
+ ufs_hisi_set_pm_lvl(hba);
+
+ err = ufs_hisi_get_resource(host);
+ if (err) {
+ ufshcd_set_variant(hba, NULL);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ufs_hi3660_init(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+
+ ret = ufs_hisi_init_common(hba);
+ if (ret) {
+ dev_err(dev, "%s: ufs common init fail\n", __func__);
+ return ret;
+ }
+
+ ufs_hi3660_clk_init(hba);
+
+ ufs_hi3660_soc_init(hba);
+
+ return 0;
+}
+
+static struct ufs_hba_variant_ops ufs_hba_hisi_vops = {
+ .name = "hi3660",
+ .init = ufs_hi3660_init,
+ .link_startup_notify = ufs_hi3660_link_startup_notify,
+ .pwr_change_notify = ufs_hi3660_pwr_change_notify,
+ .suspend = ufs_hisi_suspend,
+ .resume = ufs_hisi_resume,
+};
+
+static int ufs_hisi_probe(struct platform_device *pdev)
+{
+ return ufshcd_pltfrm_init(pdev, &ufs_hba_hisi_vops);
+}
+
+static int ufs_hisi_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct of_device_id ufs_hisi_of_match[] = {
+ { .compatible = "hisilicon,hi3660-ufs" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
+
+static const struct dev_pm_ops ufs_hisi_pm_ops = {
+ .suspend = ufshcd_pltfrm_suspend,
+ .resume = ufshcd_pltfrm_resume,
+ .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+ .runtime_resume = ufshcd_pltfrm_runtime_resume,
+ .runtime_idle = ufshcd_pltfrm_runtime_idle,
+};
+
+static struct platform_driver ufs_hisi_pltform = {
+ .probe = ufs_hisi_probe,
+ .remove = ufs_hisi_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "ufshcd-hisi",
+ .pm = &ufs_hisi_pm_ops,
+ .of_match_table = of_match_ptr(ufs_hisi_of_match),
+ },
+};
+module_platform_driver(ufs_hisi_pltform);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ufshcd-hisi");
+MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver");
diff --git a/drivers/scsi/ufs/ufs-hisi.h b/drivers/scsi/ufs/ufs-hisi.h
new file mode 100644
index 000000000000..3df9cd7acc29
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-hisi.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017, HiSilicon. All rights reserved.
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef UFS_HISI_H_
+#define UFS_HISI_H_
+
+#define HBRN8_POLL_TOUT_MS 1000
+
+/*
+ * ufs sysctrl specific define
+ */
+#define PSW_POWER_CTRL (0x04)
+#define PHY_ISO_EN (0x08)
+#define HC_LP_CTRL (0x0C)
+#define PHY_CLK_CTRL (0x10)
+#define PSW_CLK_CTRL (0x14)
+#define CLOCK_GATE_BYPASS (0x18)
+#define RESET_CTRL_EN (0x1C)
+#define UFS_SYSCTRL (0x5C)
+#define UFS_DEVICE_RESET_CTRL (0x60)
+
+#define BIT_UFS_PSW_ISO_CTRL (1 << 16)
+#define BIT_UFS_PSW_MTCMOS_EN (1 << 0)
+#define BIT_UFS_REFCLK_ISO_EN (1 << 16)
+#define BIT_UFS_PHY_ISO_CTRL (1 << 0)
+#define BIT_SYSCTRL_LP_ISOL_EN (1 << 16)
+#define BIT_SYSCTRL_PWR_READY (1 << 8)
+#define BIT_SYSCTRL_REF_CLOCK_EN (1 << 24)
+#define MASK_SYSCTRL_REF_CLOCK_SEL (0x3 << 8)
+#define MASK_SYSCTRL_CFG_CLOCK_FREQ (0xFF)
+#define UFS_FREQ_CFG_CLK (0x39)
+#define BIT_SYSCTRL_PSW_CLK_EN (1 << 4)
+#define MASK_UFS_CLK_GATE_BYPASS (0x3F)
+#define BIT_SYSCTRL_LP_RESET_N (1 << 0)
+#define BIT_UFS_REFCLK_SRC_SEl (1 << 0)
+#define MASK_UFS_SYSCRTL_BYPASS (0x3F << 16)
+#define MASK_UFS_DEVICE_RESET (0x1 << 16)
+#define BIT_UFS_DEVICE_RESET (0x1)
+
+/*
+ * M-TX Configuration Attributes for Hixxxx
+ */
+#define MPHY_TX_FSM_STATE 0x41
+#define TX_FSM_HIBERN8 0x1
+
+/*
+ * Hixxxx UFS HC specific Registers
+ */
+enum {
+ UFS_REG_OCPTHRTL = 0xc0,
+ UFS_REG_OOCPR = 0xc4,
+
+ UFS_REG_CDACFG = 0xd0,
+ UFS_REG_CDATX1 = 0xd4,
+ UFS_REG_CDATX2 = 0xd8,
+ UFS_REG_CDARX1 = 0xdc,
+ UFS_REG_CDARX2 = 0xe0,
+ UFS_REG_CDASTA = 0xe4,
+
+ UFS_REG_LBMCFG = 0xf0,
+ UFS_REG_LBMSTA = 0xf4,
+ UFS_REG_UFSMODE = 0xf8,
+
+ UFS_REG_HCLKDIV = 0xfc,
+};
+
+/* AHIT - Auto-Hibernate Idle Timer */
+#define UFS_AHIT_AH8ITV_MASK 0x3FF
+
+/* REG UFS_REG_OCPTHRTL definition */
+#define UFS_HCLKDIV_NORMAL_VALUE 0xE4
+
+/* vendor specific pre-defined parameters */
+#define SLOW 1
+#define FAST 2
+
+#define UFS_HISI_LIMIT_NUM_LANES_RX 2
+#define UFS_HISI_LIMIT_NUM_LANES_TX 2
+#define UFS_HISI_LIMIT_HSGEAR_RX UFS_HS_G3
+#define UFS_HISI_LIMIT_HSGEAR_TX UFS_HS_G3
+#define UFS_HISI_LIMIT_PWMGEAR_RX UFS_PWM_G4
+#define UFS_HISI_LIMIT_PWMGEAR_TX UFS_PWM_G4
+#define UFS_HISI_LIMIT_RX_PWR_PWM SLOW_MODE
+#define UFS_HISI_LIMIT_TX_PWR_PWM SLOW_MODE
+#define UFS_HISI_LIMIT_RX_PWR_HS FAST_MODE
+#define UFS_HISI_LIMIT_TX_PWR_HS FAST_MODE
+#define UFS_HISI_LIMIT_HS_RATE PA_HS_MODE_B
+#define UFS_HISI_LIMIT_DESIRED_MODE FAST
+
+struct ufs_hisi_host {
+ struct ufs_hba *hba;
+ void __iomem *ufs_sys_ctrl;
+
+ struct reset_control *rst;
+
+ uint64_t caps;
+
+ bool in_suspend;
+};
+
+#define ufs_sys_ctrl_writel(host, val, reg) \
+ writel((val), (host)->ufs_sys_ctrl + (reg))
+#define ufs_sys_ctrl_readl(host, reg) readl((host)->ufs_sys_ctrl + (reg))
+#define ufs_sys_ctrl_set_bits(host, mask, reg) \
+ ufs_sys_ctrl_writel( \
+ (host), ((mask) | (ufs_sys_ctrl_readl((host), (reg)))), (reg))
+#define ufs_sys_ctrl_clr_bits(host, mask, reg) \
+ ufs_sys_ctrl_writel((host), \
+ ((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \
+ (reg))
+#endif /* UFS_HISI_H_ */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 221820a7c78b..75ee5906b966 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -50,19 +50,10 @@ static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
u32 clk_cycles);
-static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
- char *prefix)
-{
- print_hex_dump(KERN_ERR, prefix,
- len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
- 16, 4, (void __force *)hba->mmio_base + offset,
- len * 4, false);
-}
-
static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
- char *prefix, void *priv)
+ const char *prefix, void *priv)
{
- ufs_qcom_dump_regs(hba, offset, len, prefix);
+ ufshcd_dump_regs(hba, offset, len * 4, prefix);
}
static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
@@ -1431,7 +1422,7 @@ out:
static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
void *priv, void (*print_fn)(struct ufs_hba *hba,
- int offset, int num_regs, char *str, void *priv))
+ int offset, int num_regs, const char *str, void *priv))
{
u32 reg;
struct ufs_qcom_host *host;
@@ -1613,7 +1604,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
static void ufs_qcom_testbus_read(struct ufs_hba *hba)
{
- ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
+ ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
}
static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
@@ -1639,8 +1630,8 @@ static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
- ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
- "HCI Vendor Specific Registers ");
+ ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
+ "HCI Vendor Specific Registers ");
/* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 397081d320b1..9d5d2ca7fc4f 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -99,8 +99,29 @@
_ret; \
})
-#define ufshcd_hex_dump(prefix_str, buf, len) \
-print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
+#define ufshcd_hex_dump(prefix_str, buf, len) do { \
+ size_t __len = (len); \
+ print_hex_dump(KERN_ERR, prefix_str, \
+ __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
+ 16, 4, buf, __len, false); \
+} while (0)
+
+int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix)
+{
+ u8 *regs;
+
+ regs = kzalloc(len, GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+
+ memcpy_fromio(regs, hba->mmio_base + offset, len);
+ ufshcd_hex_dump(prefix, regs, len);
+ kfree(regs);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
@@ -321,18 +342,19 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
sector_t lba = -1;
u8 opcode = 0;
u32 intr, doorbell;
- struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int transfer_len = -1;
- /* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str);
-
- if (!trace_ufshcd_command_enabled())
+ if (!trace_ufshcd_command_enabled()) {
+ /* trace UPIU W/O tracing command */
+ if (lrbp->cmd)
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
return;
-
- lrbp = &hba->lrb[tag];
+ }
if (lrbp->cmd) { /* data phase exists */
+ /* trace UPIU also */
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
opcode = (u8)(*lrbp->cmd->cmnd);
if ((opcode == READ_10) || (opcode == WRITE_10)) {
/*
@@ -386,15 +408,7 @@ static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
static void ufshcd_print_host_regs(struct ufs_hba *hba)
{
- /*
- * hex_dump reads its data without the readl macro. This might
- * cause inconsistency issues on some platform, as the printed
- * values may be from cache and not the most recent value.
- * To know whether you are looking at an un-cached version verify
- * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
- * during platform/pci probe function.
- */
- ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
hba->ufs_version, hba->capabilities);
dev_err(hba->dev,
@@ -7290,7 +7304,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
- if (driver_byte(ret) & DRIVER_SENSE)
+ if (driver_byte(ret) == DRIVER_SENSE)
scsi_print_sense_hdr(sdp, NULL, &sshdr);
}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index f51758f1e5cc..33fdd3f281ae 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -1043,4 +1043,7 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
}
+int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix);
+
#endif /* End of Header */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 6dc8891ccb74..1c72db94270e 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -513,12 +513,12 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
if (sc->sc_data_direction == DMA_TO_DEVICE)
cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
- blk_rq_sectors(rq) *
- bi->tuple_size);
+ bio_integrity_bytes(bi,
+ blk_rq_sectors(rq)));
else if (sc->sc_data_direction == DMA_FROM_DEVICE)
cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
- blk_rq_sectors(rq) *
- bi->tuple_size);
+ bio_integrity_bytes(bi,
+ blk_rq_sectors(rq)));
}
#endif
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 777e5f1e52d1..0cd947f78b5b 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -561,9 +561,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
(btstat == BTSTAT_SUCCESS ||
btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
- cmd->result = (DID_OK << 16) | sdstat;
- if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
- cmd->result |= (DRIVER_SENSE << 24);
+ if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
+ cmd->result = (DID_RESET << 16);
+ } else {
+ cmd->result = (DID_OK << 16) | sdstat;
+ if (sdstat == SAM_STAT_CHECK_CONDITION &&
+ cmd->sense_buffer)
+ cmd->result |= (DRIVER_SENSE << 24);
+ }
} else
switch (btstat) {
case BTSTAT_SUCCESS:
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 2e45988d1259..e5d7fb81ad66 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -300,8 +300,8 @@ static void maple_send(void)
mutex_unlock(&maple_wlist_lock);
if (maple_packets > 0) {
for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
- sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __flush_purge_region(maple_sendbuf + i * PAGE_SIZE,
+ PAGE_SIZE);
}
finish:
@@ -642,7 +642,8 @@ static void maple_dma_handler(struct work_struct *work)
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
mdev = mq->dev;
recvbuf = mq->recvbuf->buf;
- sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE);
+ __flush_invalidate_region(sh_cacheop_vaddr(recvbuf),
+ 0x400);
code = recvbuf[0];
kfree(mq->sendbuf);
list_del_init(&mq->list);
diff --git a/drivers/siox/siox-bus-gpio.c b/drivers/siox/siox-bus-gpio.c
index ea7ef982968b..46b4cda36bac 100644
--- a/drivers/siox/siox-bus-gpio.c
+++ b/drivers/siox/siox-bus-gpio.c
@@ -5,6 +5,7 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
index 16590dfaafa4..f8c08fb9891d 100644
--- a/drivers/siox/siox-core.c
+++ b/drivers/siox/siox-core.c
@@ -215,26 +215,26 @@ static void siox_poll(struct siox_master *smaster)
siox_status_clean(status,
sdevice->status_written_lastcycle);
- /* Check counter bits */
- if (siox_device_counter_error(sdevice, status_clean)) {
- bool prev_counter_error;
+ /* Check counter and type bits */
+ if (siox_device_counter_error(sdevice, status_clean) ||
+ siox_device_type_error(sdevice, status_clean)) {
+ bool prev_error;
synced = false;
/* only report a new error if the last cycle was ok */
- prev_counter_error =
+ prev_error =
siox_device_counter_error(sdevice,
- prev_status_clean);
- if (!prev_counter_error) {
+ prev_status_clean) ||
+ siox_device_type_error(sdevice,
+ prev_status_clean);
+
+ if (!prev_error) {
sdevice->status_errors++;
sysfs_notify_dirent(sdevice->status_errors_kn);
}
}
- /* Check type bits */
- if (siox_device_type_error(sdevice, status_clean))
- synced = false;
-
/* If the device is unsynced report the watchdog as active */
if (!synced) {
status &= ~SIOX_STATUS_WDG;
@@ -715,17 +715,17 @@ int siox_master_register(struct siox_master *smaster)
dev_set_name(&smaster->dev, "siox-%d", smaster->busno);
+ mutex_init(&smaster->lock);
+ INIT_LIST_HEAD(&smaster->devices);
+
smaster->last_poll = jiffies;
- smaster->poll_thread = kthread_create(siox_poll_thread, smaster,
- "siox-%d", smaster->busno);
+ smaster->poll_thread = kthread_run(siox_poll_thread, smaster,
+ "siox-%d", smaster->busno);
if (IS_ERR(smaster->poll_thread)) {
smaster->active = 0;
return PTR_ERR(smaster->poll_thread);
}
- mutex_init(&smaster->lock);
- INIT_LIST_HEAD(&smaster->devices);
-
ret = device_add(&smaster->dev);
if (ret)
kthread_stop(smaster->poll_thread);
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
index 1a632fad597e..9d73ad806698 100644
--- a/drivers/slimbus/Kconfig
+++ b/drivers/slimbus/Kconfig
@@ -15,10 +15,20 @@ if SLIMBUS
# SLIMbus controllers
config SLIM_QCOM_CTRL
tristate "Qualcomm SLIMbus Manager Component"
- depends on SLIMBUS
depends on HAS_IOMEM
help
Select driver if Qualcomm's SLIMbus Manager Component is
programmed using Linux kernel.
+config SLIM_QCOM_NGD_CTRL
+ tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component"
+ depends on QCOM_QMI_HELPERS
+ depends on HAS_IOMEM && DMA_ENGINE
+ help
+ Select driver if Qualcomm's SLIMbus Satellite Non-Generic Device
+ Component is programmed using Linux kernel.
+ This is light-weight slimbus controller driver responsible for
+ communicating with slave HW directly over the bus using messaging
+ interface, and communicating with master component residing on ADSP
+ for bandwidth and data-channel management.
endif
diff --git a/drivers/slimbus/Makefile b/drivers/slimbus/Makefile
index a35a3da4eb78..d9aa011b6804 100644
--- a/drivers/slimbus/Makefile
+++ b/drivers/slimbus/Makefile
@@ -3,8 +3,11 @@
# Makefile for kernel SLIMbus framework.
#
obj-$(CONFIG_SLIMBUS) += slimbus.o
-slimbus-y := core.o messaging.o sched.o
+slimbus-y := core.o messaging.o sched.o stream.o
#Controllers
obj-$(CONFIG_SLIM_QCOM_CTRL) += slim-qcom-ctrl.o
slim-qcom-ctrl-y := qcom-ctrl.o
+
+obj-$(CONFIG_SLIM_QCOM_NGD_CTRL) += slim-qcom-ngd-ctrl.o
+slim-qcom-ngd-ctrl-y := qcom-ngd-ctrl.o
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 7ddfc675b131..95b00d28ad6e 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -114,6 +114,8 @@ static int slim_add_device(struct slim_controller *ctrl,
sbdev->dev.release = slim_dev_release;
sbdev->dev.driver = NULL;
sbdev->ctrl = ctrl;
+ INIT_LIST_HEAD(&sbdev->stream_list);
+ spin_lock_init(&sbdev->stream_list_lock);
if (node)
sbdev->dev.of_node = of_node_get(node);
@@ -356,6 +358,45 @@ struct slim_device *slim_get_device(struct slim_controller *ctrl,
}
EXPORT_SYMBOL_GPL(slim_get_device);
+static int of_slim_match_dev(struct device *dev, void *data)
+{
+ struct device_node *np = data;
+ struct slim_device *sbdev = to_slim_device(dev);
+
+ return (sbdev->dev.of_node == np);
+}
+
+static struct slim_device *of_find_slim_device(struct slim_controller *ctrl,
+ struct device_node *np)
+{
+ struct slim_device *sbdev;
+ struct device *dev;
+
+ dev = device_find_child(ctrl->dev, np, of_slim_match_dev);
+ if (dev) {
+ sbdev = to_slim_device(dev);
+ return sbdev;
+ }
+
+ return NULL;
+}
+
+/**
+ * of_slim_get_device() - get handle to a device using dt node.
+ *
+ * @ctrl: Controller on which this device will be added/queried
+ * @np: node pointer to device
+ *
+ * Return: pointer to a device if it has already reported. Creates a new
+ * device and returns pointer to it if the device has not yet enumerated.
+ */
+struct slim_device *of_slim_get_device(struct slim_controller *ctrl,
+ struct device_node *np)
+{
+ return of_find_slim_device(ctrl, np);
+}
+EXPORT_SYMBOL_GPL(of_slim_get_device);
+
static int slim_device_alloc_laddr(struct slim_device *sbdev,
bool report_present)
{
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index 457ea1f8db30..d5879142dbef 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -29,22 +29,19 @@ void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
spin_lock_irqsave(&ctrl->txn_lock, flags);
txn = idr_find(&ctrl->tid_idr, tid);
- if (txn == NULL) {
- spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+
+ if (txn == NULL)
return;
- }
msg = txn->msg;
if (msg == NULL || msg->rbuf == NULL) {
dev_err(ctrl->dev, "Got response to invalid TID:%d, len:%d\n",
tid, len);
- spin_unlock_irqrestore(&ctrl->txn_lock, flags);
return;
}
- idr_remove(&ctrl->tid_idr, tid);
- spin_unlock_irqrestore(&ctrl->txn_lock, flags);
-
+ slim_free_txn_tid(ctrl, txn);
memcpy(msg->rbuf, reply, len);
if (txn->comp)
complete(txn->comp);
@@ -56,6 +53,48 @@ void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
EXPORT_SYMBOL_GPL(slim_msg_response);
/**
+ * slim_alloc_txn_tid() - Allocate a tid to txn
+ *
+ * @ctrl: Controller handle
+ * @txn: transaction to be allocated with tid.
+ *
+ * Return: zero on success with valid txn->tid and error code on failures.
+ */
+int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
+ SLIM_MAX_TIDS, GFP_ATOMIC);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ return ret;
+ }
+ txn->tid = ret;
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(slim_alloc_txn_tid);
+
+/**
+ * slim_free_txn_tid() - Freee tid of txn
+ *
+ * @ctrl: Controller handle
+ * @txn: transaction whose tid should be freed
+ */
+void slim_free_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ idr_remove(&ctrl->tid_idr, txn->tid);
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+}
+EXPORT_SYMBOL_GPL(slim_free_txn_tid);
+
+/**
* slim_do_transfer() - Process a SLIMbus-messaging transaction
*
* @ctrl: Controller handle
@@ -72,8 +111,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
{
DECLARE_COMPLETION_ONSTACK(done);
bool need_tid = false, clk_pause_msg = false;
- unsigned long flags;
- int ret, tid, timeout;
+ int ret, timeout;
/*
* do not vote for runtime-PM if the transactions are part of clock
@@ -97,34 +135,26 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
need_tid = slim_tid_txn(txn->mt, txn->mc);
if (need_tid) {
- spin_lock_irqsave(&ctrl->txn_lock, flags);
- tid = idr_alloc(&ctrl->tid_idr, txn, 0,
- SLIM_MAX_TIDS, GFP_ATOMIC);
- txn->tid = tid;
+ ret = slim_alloc_txn_tid(ctrl, txn);
+ if (ret)
+ return ret;
if (!txn->msg->comp)
txn->comp = &done;
else
txn->comp = txn->comp;
-
- spin_unlock_irqrestore(&ctrl->txn_lock, flags);
-
- if (tid < 0)
- return tid;
}
ret = ctrl->xfer_msg(ctrl, txn);
- if (ret && need_tid && !txn->msg->comp) {
+ if (!ret && need_tid && !txn->msg->comp) {
unsigned long ms = txn->rl + HZ;
timeout = wait_for_completion_timeout(txn->comp,
msecs_to_jiffies(ms));
if (!timeout) {
ret = -ETIMEDOUT;
- spin_lock_irqsave(&ctrl->txn_lock, flags);
- idr_remove(&ctrl->tid_idr, tid);
- spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ slim_free_txn_tid(ctrl, txn);
}
}
@@ -139,7 +169,7 @@ slim_xfer_err:
* if there was error during this transaction
*/
pm_runtime_mark_last_busy(ctrl->dev);
- pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_put_autosuspend(ctrl->dev);
}
return ret;
}
@@ -246,6 +276,7 @@ static void slim_fill_msg(struct slim_val_inf *msg, u32 addr,
msg->num_bytes = count;
msg->rbuf = rbuf;
msg->wbuf = wbuf;
+ msg->comp = NULL;
}
/**
@@ -307,7 +338,7 @@ int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val)
{
struct slim_val_inf msg;
- slim_fill_msg(&msg, addr, count, val, NULL);
+ slim_fill_msg(&msg, addr, count, NULL, val);
return slim_xfer_msg(sdev, &msg, SLIM_MSG_MC_CHANGE_VALUE);
}
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
new file mode 100644
index 000000000000..8be4d6786c61
--- /dev/null
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -0,0 +1,1526 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/slimbus.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/soc/qcom/qmi.h>
+#include <net/sock.h>
+#include "slimbus.h"
+
+/* NGD (Non-ported Generic Device) registers */
+#define NGD_CFG 0x0
+#define NGD_CFG_ENABLE BIT(0)
+#define NGD_CFG_RX_MSGQ_EN BIT(1)
+#define NGD_CFG_TX_MSGQ_EN BIT(2)
+#define NGD_STATUS 0x4
+#define NGD_LADDR BIT(1)
+#define NGD_RX_MSGQ_CFG 0x8
+#define NGD_INT_EN 0x10
+#define NGD_INT_RECFG_DONE BIT(24)
+#define NGD_INT_TX_NACKED_2 BIT(25)
+#define NGD_INT_MSG_BUF_CONTE BIT(26)
+#define NGD_INT_MSG_TX_INVAL BIT(27)
+#define NGD_INT_IE_VE_CHG BIT(28)
+#define NGD_INT_DEV_ERR BIT(29)
+#define NGD_INT_RX_MSG_RCVD BIT(30)
+#define NGD_INT_TX_MSG_SENT BIT(31)
+#define NGD_INT_STAT 0x14
+#define NGD_INT_CLR 0x18
+#define DEF_NGD_INT_MASK (NGD_INT_TX_NACKED_2 | NGD_INT_MSG_BUF_CONTE | \
+ NGD_INT_MSG_TX_INVAL | NGD_INT_IE_VE_CHG | \
+ NGD_INT_DEV_ERR | NGD_INT_TX_MSG_SENT | \
+ NGD_INT_RX_MSG_RCVD)
+
+/* Slimbus QMI service */
+#define SLIMBUS_QMI_SVC_ID 0x0301
+#define SLIMBUS_QMI_SVC_V1 1
+#define SLIMBUS_QMI_INS_ID 0
+#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
+#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
+#define SLIMBUS_QMI_POWER_REQ_V01 0x0021
+#define SLIMBUS_QMI_POWER_RESP_V01 0x0021
+#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022
+#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022
+#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 14
+#define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
+#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7
+/* QMI response timeout of 500ms */
+#define SLIMBUS_QMI_RESP_TOUT 1000
+
+/* User defined commands */
+#define SLIM_USR_MC_GENERIC_ACK 0x25
+#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
+#define SLIM_USR_MC_REPORT_SATELLITE 0x1
+#define SLIM_USR_MC_ADDR_QUERY 0xD
+#define SLIM_USR_MC_ADDR_REPLY 0xE
+#define SLIM_USR_MC_DEFINE_CHAN 0x20
+#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
+#define SLIM_USR_MC_CHAN_CTRL 0x23
+#define SLIM_USR_MC_RECONFIG_NOW 0x24
+#define SLIM_USR_MC_REQ_BW 0x28
+#define SLIM_USR_MC_CONNECT_SRC 0x2C
+#define SLIM_USR_MC_CONNECT_SINK 0x2D
+#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
+#define SLIM_USR_MC_REPEAT_CHANGE_VALUE 0x0
+
+#define QCOM_SLIM_NGD_AUTOSUSPEND MSEC_PER_SEC
+#define SLIM_RX_MSGQ_TIMEOUT_VAL 0x10000
+
+#define SLIM_LA_MGR 0xFF
+#define SLIM_ROOT_FREQ 24576000
+#define LADDR_RETRY 5
+
+/* Per spec.max 40 bytes per received message */
+#define SLIM_MSGQ_BUF_LEN 40
+#define QCOM_SLIM_NGD_DESC_NUM 32
+
+#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
+ ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
+
+#define INIT_MX_RETRIES 10
+#define DEF_RETRY_MS 10
+#define SAT_MAGIC_LSB 0xD9
+#define SAT_MAGIC_MSB 0xC5
+#define SAT_MSG_VER 0x1
+#define SAT_MSG_PROT 0x1
+#define to_ngd(d) container_of(d, struct qcom_slim_ngd, dev)
+
+struct ngd_reg_offset_data {
+ u32 offset, size;
+};
+
+static const struct ngd_reg_offset_data ngd_v1_5_offset_info = {
+ .offset = 0x1000,
+ .size = 0x1000,
+};
+
+enum qcom_slim_ngd_state {
+ QCOM_SLIM_NGD_CTRL_AWAKE,
+ QCOM_SLIM_NGD_CTRL_IDLE,
+ QCOM_SLIM_NGD_CTRL_ASLEEP,
+ QCOM_SLIM_NGD_CTRL_DOWN,
+};
+
+struct qcom_slim_ngd_qmi {
+ struct qmi_handle qmi;
+ struct sockaddr_qrtr svc_info;
+ struct qmi_handle svc_event_hdl;
+ struct qmi_response_type_v01 resp;
+ struct qmi_handle *handle;
+ struct completion qmi_comp;
+};
+
+struct qcom_slim_ngd_ctrl;
+struct qcom_slim_ngd;
+
+struct qcom_slim_ngd_dma_desc {
+ struct dma_async_tx_descriptor *desc;
+ struct qcom_slim_ngd_ctrl *ctrl;
+ struct completion *comp;
+ dma_cookie_t cookie;
+ dma_addr_t phys;
+ void *base;
+};
+
+struct qcom_slim_ngd {
+ struct platform_device *pdev;
+ void __iomem *base;
+ int id;
+};
+
+struct qcom_slim_ngd_ctrl {
+ struct slim_framer framer;
+ struct slim_controller ctrl;
+ struct qcom_slim_ngd_qmi qmi;
+ struct qcom_slim_ngd *ngd;
+ struct device *dev;
+ void __iomem *base;
+ struct dma_chan *dma_rx_channel;
+ struct dma_chan *dma_tx_channel;
+ struct qcom_slim_ngd_dma_desc rx_desc[QCOM_SLIM_NGD_DESC_NUM];
+ struct qcom_slim_ngd_dma_desc txdesc[QCOM_SLIM_NGD_DESC_NUM];
+ struct completion reconf;
+ struct work_struct m_work;
+ struct workqueue_struct *mwq;
+ spinlock_t tx_buf_lock;
+ enum qcom_slim_ngd_state state;
+ dma_addr_t rx_phys_base;
+ dma_addr_t tx_phys_base;
+ void *rx_base;
+ void *tx_base;
+ int tx_tail;
+ int tx_head;
+ u32 ver;
+};
+
+enum slimbus_mode_enum_type_v01 {
+ /* To force a 32 bit signed enum. Do not change or use*/
+ SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+ SLIMBUS_MODE_SATELLITE_V01 = 1,
+ SLIMBUS_MODE_MASTER_V01 = 2,
+ SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+enum slimbus_pm_enum_type_v01 {
+ /* To force a 32 bit signed enum. Do not change or use*/
+ SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+ SLIMBUS_PM_INACTIVE_V01 = 1,
+ SLIMBUS_PM_ACTIVE_V01 = 2,
+ SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+enum slimbus_resp_enum_type_v01 {
+ SLIMBUS_RESP_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
+ SLIMBUS_RESP_SYNCHRONOUS_V01 = 1,
+ SLIMBUS_RESP_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
+struct slimbus_select_inst_req_msg_v01 {
+ uint32_t instance;
+ uint8_t mode_valid;
+ enum slimbus_mode_enum_type_v01 mode;
+};
+
+struct slimbus_select_inst_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct slimbus_power_req_msg_v01 {
+ enum slimbus_pm_enum_type_v01 pm_req;
+ uint8_t resp_type_valid;
+ enum slimbus_resp_enum_type_v01 resp_type;
+};
+
+struct slimbus_power_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+static struct qmi_elem_info slimbus_select_inst_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
+ instance),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
+ mode_valid),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(enum slimbus_mode_enum_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
+ mode),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static struct qmi_elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct slimbus_select_inst_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static struct qmi_elem_info slimbus_power_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(enum slimbus_pm_enum_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct slimbus_power_req_msg_v01,
+ pm_req),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct slimbus_power_req_msg_v01,
+ resp_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum slimbus_resp_enum_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct slimbus_power_req_msg_v01,
+ resp_type),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static struct qmi_elem_info slimbus_power_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct slimbus_power_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static int qcom_slim_qmi_send_select_inst_req(struct qcom_slim_ngd_ctrl *ctrl,
+ struct slimbus_select_inst_req_msg_v01 *req)
+{
+ struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
+ struct qmi_txn txn;
+ int rc;
+
+ rc = qmi_txn_init(ctrl->qmi.handle, &txn,
+ slimbus_select_inst_resp_msg_v01_ei, &resp);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "QMI TXN init fail: %d\n", rc);
+ return rc;
+ }
+
+ rc = qmi_send_request(ctrl->qmi.handle, NULL, &txn,
+ SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01,
+ SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN,
+ slimbus_select_inst_req_msg_v01_ei, req);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "QMI send req fail %d\n", rc);
+ qmi_txn_cancel(&txn);
+ return rc;
+ }
+
+ rc = qmi_txn_wait(&txn, SLIMBUS_QMI_RESP_TOUT);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "QMI TXN wait fail: %d\n", rc);
+ return rc;
+ }
+ /* Check the response */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ dev_err(ctrl->dev, "QMI request failed 0x%x\n",
+ resp.resp.result);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static void qcom_slim_qmi_power_resp_cb(struct qmi_handle *handle,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct slimbus_power_resp_msg_v01 *resp;
+
+ resp = (struct slimbus_power_resp_msg_v01 *)data;
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01)
+ pr_err("QMI power request failed 0x%x\n",
+ resp->resp.result);
+
+ complete(&txn->completion);
+}
+
+static int qcom_slim_qmi_send_power_request(struct qcom_slim_ngd_ctrl *ctrl,
+ struct slimbus_power_req_msg_v01 *req)
+{
+ struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
+ struct qmi_txn txn;
+ int rc;
+
+ rc = qmi_txn_init(ctrl->qmi.handle, &txn,
+ slimbus_power_resp_msg_v01_ei, &resp);
+
+ rc = qmi_send_request(ctrl->qmi.handle, NULL, &txn,
+ SLIMBUS_QMI_POWER_REQ_V01,
+ SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN,
+ slimbus_power_req_msg_v01_ei, req);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "QMI send req fail %d\n", rc);
+ qmi_txn_cancel(&txn);
+ return rc;
+ }
+
+ rc = qmi_txn_wait(&txn, SLIMBUS_QMI_RESP_TOUT);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "QMI TXN wait fail: %d\n", rc);
+ return rc;
+ }
+
+ /* Check the response */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ dev_err(ctrl->dev, "QMI request failed 0x%x\n",
+ resp.resp.result);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static struct qmi_msg_handler qcom_slim_qmi_msg_handlers[] = {
+ {
+ .type = QMI_RESPONSE,
+ .msg_id = SLIMBUS_QMI_POWER_RESP_V01,
+ .ei = slimbus_power_resp_msg_v01_ei,
+ .decoded_size = sizeof(struct slimbus_power_resp_msg_v01),
+ .fn = qcom_slim_qmi_power_resp_cb,
+ },
+ {}
+};
+
+static int qcom_slim_qmi_init(struct qcom_slim_ngd_ctrl *ctrl,
+ bool apps_is_master)
+{
+ struct slimbus_select_inst_req_msg_v01 req;
+ struct qmi_handle *handle;
+ int rc;
+
+ handle = devm_kzalloc(ctrl->dev, sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ rc = qmi_handle_init(handle, SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN,
+ NULL, qcom_slim_qmi_msg_handlers);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "QMI client init failed: %d\n", rc);
+ goto qmi_handle_init_failed;
+ }
+
+ rc = kernel_connect(handle->sock,
+ (struct sockaddr *)&ctrl->qmi.svc_info,
+ sizeof(ctrl->qmi.svc_info), 0);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "Remote Service connect failed: %d\n", rc);
+ goto qmi_connect_to_service_failed;
+ }
+
+ /* Instance is 0 based */
+ req.instance = (ctrl->ngd->id >> 1);
+ req.mode_valid = 1;
+
+ /* Mode indicates the role of the ADSP */
+ if (apps_is_master)
+ req.mode = SLIMBUS_MODE_SATELLITE_V01;
+ else
+ req.mode = SLIMBUS_MODE_MASTER_V01;
+
+ ctrl->qmi.handle = handle;
+
+ rc = qcom_slim_qmi_send_select_inst_req(ctrl, &req);
+ if (rc) {
+ dev_err(ctrl->dev, "failed to select h/w instance\n");
+ goto qmi_select_instance_failed;
+ }
+
+ return 0;
+
+qmi_select_instance_failed:
+ ctrl->qmi.handle = NULL;
+qmi_connect_to_service_failed:
+ qmi_handle_release(handle);
+qmi_handle_init_failed:
+ devm_kfree(ctrl->dev, handle);
+ return rc;
+}
+
+static void qcom_slim_qmi_exit(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ if (!ctrl->qmi.handle)
+ return;
+
+ qmi_handle_release(ctrl->qmi.handle);
+ devm_kfree(ctrl->dev, ctrl->qmi.handle);
+ ctrl->qmi.handle = NULL;
+}
+
+static int qcom_slim_qmi_power_request(struct qcom_slim_ngd_ctrl *ctrl,
+ bool active)
+{
+ struct slimbus_power_req_msg_v01 req;
+
+ if (active)
+ req.pm_req = SLIMBUS_PM_ACTIVE_V01;
+ else
+ req.pm_req = SLIMBUS_PM_INACTIVE_V01;
+
+ req.resp_type_valid = 0;
+
+ return qcom_slim_qmi_send_power_request(ctrl, &req);
+}
+
+static u32 *qcom_slim_ngd_tx_msg_get(struct qcom_slim_ngd_ctrl *ctrl, int len,
+ struct completion *comp)
+{
+ struct qcom_slim_ngd_dma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
+
+ if ((ctrl->tx_tail + 1) % QCOM_SLIM_NGD_DESC_NUM == ctrl->tx_head) {
+ spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+ return NULL;
+ }
+ desc = &ctrl->txdesc[ctrl->tx_tail];
+ desc->base = ctrl->tx_base + ctrl->tx_tail * SLIM_MSGQ_BUF_LEN;
+ desc->comp = comp;
+ ctrl->tx_tail = (ctrl->tx_tail + 1) % QCOM_SLIM_NGD_DESC_NUM;
+
+ spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+
+ return desc->base;
+}
+
+static void qcom_slim_ngd_tx_msg_dma_cb(void *args)
+{
+ struct qcom_slim_ngd_dma_desc *desc = args;
+ struct qcom_slim_ngd_ctrl *ctrl = desc->ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
+
+ if (desc->comp) {
+ complete(desc->comp);
+ desc->comp = NULL;
+ }
+
+ ctrl->tx_head = (ctrl->tx_head + 1) % QCOM_SLIM_NGD_DESC_NUM;
+ spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+}
+
+static int qcom_slim_ngd_tx_msg_post(struct qcom_slim_ngd_ctrl *ctrl,
+ void *buf, int len)
+{
+ struct qcom_slim_ngd_dma_desc *desc;
+ unsigned long flags;
+ int index, offset;
+
+ spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
+ offset = buf - ctrl->tx_base;
+ index = offset/SLIM_MSGQ_BUF_LEN;
+
+ desc = &ctrl->txdesc[index];
+ desc->phys = ctrl->tx_phys_base + offset;
+ desc->base = ctrl->tx_base + offset;
+ desc->ctrl = ctrl;
+ len = (len + 3) & 0xfc;
+
+ desc->desc = dmaengine_prep_slave_single(ctrl->dma_tx_channel,
+ desc->phys, len,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!desc->desc) {
+ dev_err(ctrl->dev, "unable to prepare channel\n");
+ spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+ return -EINVAL;
+ }
+
+ desc->desc->callback = qcom_slim_ngd_tx_msg_dma_cb;
+ desc->desc->callback_param = desc;
+ desc->desc->cookie = dmaengine_submit(desc->desc);
+ dma_async_issue_pending(ctrl->dma_tx_channel);
+ spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+
+ return 0;
+}
+
+static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf)
+{
+ u8 mc, mt, len;
+
+ mt = SLIM_HEADER_GET_MT(buf[0]);
+ len = SLIM_HEADER_GET_RL(buf[0]);
+ mc = SLIM_HEADER_GET_MC(buf[1]);
+
+ if (mc == SLIM_USR_MC_MASTER_CAPABILITY &&
+ mt == SLIM_MSG_MT_SRC_REFERRED_USER)
+ queue_work(ctrl->mwq, &ctrl->m_work);
+
+ if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
+ mc == SLIM_MSG_MC_REPLY_VALUE || (mc == SLIM_USR_MC_ADDR_REPLY &&
+ mt == SLIM_MSG_MT_SRC_REFERRED_USER) ||
+ (mc == SLIM_USR_MC_GENERIC_ACK &&
+ mt == SLIM_MSG_MT_SRC_REFERRED_USER)) {
+ slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4);
+ pm_runtime_mark_last_busy(ctrl->dev);
+ }
+}
+
+static void qcom_slim_ngd_rx_msgq_cb(void *args)
+{
+ struct qcom_slim_ngd_dma_desc *desc = args;
+ struct qcom_slim_ngd_ctrl *ctrl = desc->ctrl;
+
+ qcom_slim_ngd_rx(ctrl, (u8 *)desc->base);
+ /* Add descriptor back to the queue */
+ desc->desc = dmaengine_prep_slave_single(ctrl->dma_rx_channel,
+ desc->phys, SLIM_MSGQ_BUF_LEN,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc->desc) {
+ dev_err(ctrl->dev, "Unable to prepare rx channel\n");
+ return;
+ }
+
+ desc->desc->callback = qcom_slim_ngd_rx_msgq_cb;
+ desc->desc->callback_param = desc;
+ desc->desc->cookie = dmaengine_submit(desc->desc);
+ dma_async_issue_pending(ctrl->dma_rx_channel);
+}
+
+static int qcom_slim_ngd_post_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ struct qcom_slim_ngd_dma_desc *desc;
+ int i;
+
+ for (i = 0; i < QCOM_SLIM_NGD_DESC_NUM; i++) {
+ desc = &ctrl->rx_desc[i];
+ desc->phys = ctrl->rx_phys_base + i * SLIM_MSGQ_BUF_LEN;
+ desc->ctrl = ctrl;
+ desc->base = ctrl->rx_base + i * SLIM_MSGQ_BUF_LEN;
+ desc->desc = dmaengine_prep_slave_single(ctrl->dma_rx_channel,
+ desc->phys, SLIM_MSGQ_BUF_LEN,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc->desc) {
+ dev_err(ctrl->dev, "Unable to prepare rx channel\n");
+ return -EINVAL;
+ }
+
+ desc->desc->callback = qcom_slim_ngd_rx_msgq_cb;
+ desc->desc->callback_param = desc;
+ desc->desc->cookie = dmaengine_submit(desc->desc);
+ }
+ dma_async_issue_pending(ctrl->dma_rx_channel);
+
+ return 0;
+}
+
+static int qcom_slim_ngd_init_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ struct device *dev = ctrl->dev;
+ int ret, size;
+
+ ctrl->dma_rx_channel = dma_request_slave_channel(dev, "rx");
+ if (!ctrl->dma_rx_channel) {
+ dev_err(dev, "Failed to request dma channels");
+ return -EINVAL;
+ }
+
+ size = QCOM_SLIM_NGD_DESC_NUM * SLIM_MSGQ_BUF_LEN;
+ ctrl->rx_base = dma_alloc_coherent(dev, size, &ctrl->rx_phys_base,
+ GFP_KERNEL);
+ if (!ctrl->rx_base) {
+ dev_err(dev, "dma_alloc_coherent failed\n");
+ ret = -ENOMEM;
+ goto rel_rx;
+ }
+
+ ret = qcom_slim_ngd_post_rx_msgq(ctrl);
+ if (ret) {
+ dev_err(dev, "post_rx_msgq() failed 0x%x\n", ret);
+ goto rx_post_err;
+ }
+
+ return 0;
+
+rx_post_err:
+ dma_free_coherent(dev, size, ctrl->rx_base, ctrl->rx_phys_base);
+rel_rx:
+ dma_release_channel(ctrl->dma_rx_channel);
+ return ret;
+}
+
+static int qcom_slim_ngd_init_tx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ struct device *dev = ctrl->dev;
+ unsigned long flags;
+ int ret = 0;
+ int size;
+
+ ctrl->dma_tx_channel = dma_request_slave_channel(dev, "tx");
+ if (!ctrl->dma_tx_channel) {
+ dev_err(dev, "Failed to request dma channels");
+ return -EINVAL;
+ }
+
+ size = ((QCOM_SLIM_NGD_DESC_NUM + 1) * SLIM_MSGQ_BUF_LEN);
+ ctrl->tx_base = dma_alloc_coherent(dev, size, &ctrl->tx_phys_base,
+ GFP_KERNEL);
+ if (!ctrl->tx_base) {
+ dev_err(dev, "dma_alloc_coherent failed\n");
+ ret = -EINVAL;
+ goto rel_tx;
+ }
+
+ spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
+ ctrl->tx_tail = 0;
+ ctrl->tx_head = 0;
+ spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+
+ return 0;
+rel_tx:
+ dma_release_channel(ctrl->dma_tx_channel);
+ return ret;
+}
+
+static int qcom_slim_ngd_init_dma(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ int ret = 0;
+
+ ret = qcom_slim_ngd_init_rx_msgq(ctrl);
+ if (ret) {
+ dev_err(ctrl->dev, "rx dma init failed\n");
+ return ret;
+ }
+
+ ret = qcom_slim_ngd_init_tx_msgq(ctrl);
+ if (ret)
+ dev_err(ctrl->dev, "tx dma init failed\n");
+
+ return ret;
+}
+
+static irqreturn_t qcom_slim_ngd_interrupt(int irq, void *d)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = d;
+ void __iomem *base = ctrl->ngd->base;
+ u32 stat = readl(base + NGD_INT_STAT);
+
+ if ((stat & NGD_INT_MSG_BUF_CONTE) ||
+ (stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) ||
+ (stat & NGD_INT_TX_NACKED_2)) {
+ dev_err(ctrl->dev, "Error Interrupt received 0x%x\n", stat);
+ }
+
+ writel(stat, base + NGD_INT_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
+ struct slim_msg_txn *txn)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
+ DECLARE_COMPLETION_ONSTACK(tx_sent);
+ DECLARE_COMPLETION_ONSTACK(done);
+ int ret, timeout, i;
+ u8 wbuf[SLIM_MSGQ_BUF_LEN];
+ u8 rbuf[SLIM_MSGQ_BUF_LEN];
+ u32 *pbuf;
+ u8 *puc;
+ u8 la = txn->la;
+ bool usr_msg = false;
+
+ if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
+ return -EPROTONOSUPPORT;
+
+ if (txn->mt == SLIM_MSG_MT_CORE &&
+ (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+ txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
+ return 0;
+
+ if (txn->dt == SLIM_MSG_DEST_ENUMADDR)
+ return -EPROTONOSUPPORT;
+
+ if (txn->msg->num_bytes > SLIM_MSGQ_BUF_LEN ||
+ txn->rl > SLIM_MSGQ_BUF_LEN) {
+ dev_err(ctrl->dev, "msg exeeds HW limit\n");
+ return -EINVAL;
+ }
+
+ pbuf = qcom_slim_ngd_tx_msg_get(ctrl, txn->rl, &tx_sent);
+ if (!pbuf) {
+ dev_err(ctrl->dev, "Message buffer unavailable\n");
+ return -ENOMEM;
+ }
+
+ if (txn->mt == SLIM_MSG_MT_CORE &&
+ (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+ txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
+ txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
+ txn->mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+ switch (txn->mc) {
+ case SLIM_MSG_MC_CONNECT_SOURCE:
+ txn->mc = SLIM_USR_MC_CONNECT_SRC;
+ break;
+ case SLIM_MSG_MC_CONNECT_SINK:
+ txn->mc = SLIM_USR_MC_CONNECT_SINK;
+ break;
+ case SLIM_MSG_MC_DISCONNECT_PORT:
+ txn->mc = SLIM_USR_MC_DISCONNECT_PORT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ usr_msg = true;
+ i = 0;
+ wbuf[i++] = txn->la;
+ la = SLIM_LA_MGR;
+ wbuf[i++] = txn->msg->wbuf[0];
+ if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
+ wbuf[i++] = txn->msg->wbuf[1];
+
+ txn->comp = &done;
+ ret = slim_alloc_txn_tid(sctrl, txn);
+ if (ret) {
+ dev_err(ctrl->dev, "Unable to allocate TID\n");
+ return ret;
+ }
+
+ wbuf[i++] = txn->tid;
+
+ txn->msg->num_bytes = i;
+ txn->msg->wbuf = wbuf;
+ txn->msg->rbuf = rbuf;
+ txn->rl = txn->msg->num_bytes + 4;
+ }
+
+ /* HW expects length field to be excluded */
+ txn->rl--;
+ puc = (u8 *)pbuf;
+ *pbuf = 0;
+ if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) {
+ *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 0,
+ la);
+ puc += 3;
+ } else {
+ *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 1,
+ la);
+ puc += 2;
+ }
+
+ if (slim_tid_txn(txn->mt, txn->mc))
+ *(puc++) = txn->tid;
+
+ if (slim_ec_txn(txn->mt, txn->mc)) {
+ *(puc++) = (txn->ec & 0xFF);
+ *(puc++) = (txn->ec >> 8) & 0xFF;
+ }
+
+ if (txn->msg && txn->msg->wbuf)
+ memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
+
+ ret = qcom_slim_ngd_tx_msg_post(ctrl, pbuf, txn->rl);
+ if (ret)
+ return ret;
+
+ timeout = wait_for_completion_timeout(&tx_sent, HZ);
+ if (!timeout) {
+ dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
+ txn->mt);
+ return -ETIMEDOUT;
+ }
+
+ if (usr_msg) {
+ timeout = wait_for_completion_timeout(&done, HZ);
+ if (!timeout) {
+ dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x",
+ txn->mc, txn->mt);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl,
+ struct slim_msg_txn *txn)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ int ret, timeout;
+
+ pm_runtime_get_sync(ctrl->dev);
+
+ txn->comp = &done;
+
+ ret = qcom_slim_ngd_xfer_msg(ctrl, txn);
+ if (ret)
+ return ret;
+
+ timeout = wait_for_completion_timeout(&done, HZ);
+ if (!timeout) {
+ dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
+ txn->mt);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int qcom_slim_ngd_enable_stream(struct slim_stream_runtime *rt)
+{
+ struct slim_device *sdev = rt->dev;
+ struct slim_controller *ctrl = sdev->ctrl;
+ struct slim_val_inf msg = {0};
+ u8 wbuf[SLIM_MSGQ_BUF_LEN];
+ u8 rbuf[SLIM_MSGQ_BUF_LEN];
+ struct slim_msg_txn txn = {0,};
+ int i, ret;
+
+ txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+ txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+ txn.la = SLIM_LA_MGR;
+ txn.ec = 0;
+ txn.msg = &msg;
+ txn.msg->num_bytes = 0;
+ txn.msg->wbuf = wbuf;
+ txn.msg->rbuf = rbuf;
+
+ for (i = 0; i < rt->num_ports; i++) {
+ struct slim_port *port = &rt->ports[i];
+
+ if (txn.msg->num_bytes == 0) {
+ int seg_interval = SLIM_SLOTS_PER_SUPERFRAME/rt->ratem;
+ int exp;
+
+ wbuf[txn.msg->num_bytes++] = sdev->laddr;
+ wbuf[txn.msg->num_bytes] = rt->bps >> 2 |
+ (port->ch.aux_fmt << 6);
+
+ /* Data channel segment interval not multiple of 3 */
+ exp = seg_interval % 3;
+ if (exp)
+ wbuf[txn.msg->num_bytes] |= BIT(5);
+
+ txn.msg->num_bytes++;
+ wbuf[txn.msg->num_bytes++] = exp << 4 | rt->prot;
+
+ if (rt->prot == SLIM_PROTO_ISO)
+ wbuf[txn.msg->num_bytes++] =
+ port->ch.prrate |
+ SLIM_CHANNEL_CONTENT_FL;
+ else
+ wbuf[txn.msg->num_bytes++] = port->ch.prrate;
+
+ ret = slim_alloc_txn_tid(ctrl, &txn);
+ if (ret) {
+ dev_err(&sdev->dev, "Fail to allocate TID\n");
+ return -ENXIO;
+ }
+ wbuf[txn.msg->num_bytes++] = txn.tid;
+ }
+ wbuf[txn.msg->num_bytes++] = port->ch.id;
+ }
+
+ txn.mc = SLIM_USR_MC_DEF_ACT_CHAN;
+ txn.rl = txn.msg->num_bytes + 4;
+ ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
+ if (ret) {
+ slim_free_txn_tid(ctrl, &txn);
+ dev_err(&sdev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn.mc,
+ txn.mt);
+ return ret;
+ }
+
+ txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+ txn.msg->num_bytes = 2;
+ wbuf[1] = sdev->laddr;
+ txn.rl = txn.msg->num_bytes + 4;
+
+ ret = slim_alloc_txn_tid(ctrl, &txn);
+ if (ret) {
+ dev_err(ctrl->dev, "Fail to allocate TID\n");
+ return ret;
+ }
+
+ wbuf[0] = txn.tid;
+ ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
+ if (ret) {
+ slim_free_txn_tid(ctrl, &txn);
+ dev_err(&sdev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn.mc,
+ txn.mt);
+ }
+
+ return ret;
+}
+
+static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl,
+ struct slim_eaddr *ea, u8 *laddr)
+{
+ struct slim_val_inf msg = {0};
+ struct slim_msg_txn txn;
+ u8 wbuf[10] = {0};
+ u8 rbuf[10] = {0};
+ int ret;
+
+ txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+ txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+ txn.la = SLIM_LA_MGR;
+ txn.ec = 0;
+
+ txn.mc = SLIM_USR_MC_ADDR_QUERY;
+ txn.rl = 11;
+ txn.msg = &msg;
+ txn.msg->num_bytes = 7;
+ txn.msg->wbuf = wbuf;
+ txn.msg->rbuf = rbuf;
+
+ ret = slim_alloc_txn_tid(ctrl, &txn);
+ if (ret < 0)
+ return ret;
+
+ wbuf[0] = (u8)txn.tid;
+ memcpy(&wbuf[1], ea, sizeof(*ea));
+
+ ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
+ if (ret) {
+ slim_free_txn_tid(ctrl, &txn);
+ return ret;
+ }
+
+ *laddr = rbuf[6];
+
+ return ret;
+}
+
+static int qcom_slim_ngd_exit_dma(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ if (ctrl->dma_rx_channel) {
+ dmaengine_terminate_sync(ctrl->dma_rx_channel);
+ dma_release_channel(ctrl->dma_rx_channel);
+ }
+
+ if (ctrl->dma_tx_channel) {
+ dmaengine_terminate_sync(ctrl->dma_tx_channel);
+ dma_release_channel(ctrl->dma_tx_channel);
+ }
+
+ ctrl->dma_tx_channel = ctrl->dma_rx_channel = NULL;
+
+ return 0;
+}
+
+static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ u32 cfg = readl_relaxed(ctrl->ngd->base);
+
+ if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
+ qcom_slim_ngd_init_dma(ctrl);
+
+ /* By default enable message queues */
+ cfg |= NGD_CFG_RX_MSGQ_EN;
+ cfg |= NGD_CFG_TX_MSGQ_EN;
+
+ /* Enable NGD if it's not already enabled*/
+ if (!(cfg & NGD_CFG_ENABLE))
+ cfg |= NGD_CFG_ENABLE;
+
+ writel_relaxed(cfg, ctrl->ngd->base);
+}
+
+static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ enum qcom_slim_ngd_state cur_state = ctrl->state;
+ struct qcom_slim_ngd *ngd = ctrl->ngd;
+ u32 laddr, rx_msgq;
+ int timeout, ret = 0;
+
+ if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) {
+ timeout = wait_for_completion_timeout(&ctrl->qmi.qmi_comp, HZ);
+ if (!timeout)
+ return -EREMOTEIO;
+ }
+
+ if (ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP ||
+ ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) {
+ ret = qcom_slim_qmi_power_request(ctrl, true);
+ if (ret) {
+ dev_err(ctrl->dev, "SLIM QMI power request failed:%d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ctrl->ver = readl_relaxed(ctrl->base);
+ /* Version info in 16 MSbits */
+ ctrl->ver >>= 16;
+
+ laddr = readl_relaxed(ngd->base + NGD_STATUS);
+ if (laddr & NGD_LADDR) {
+ /*
+ * external MDM restart case where ADSP itself was active framer
+ * For example, modem restarted when playback was active
+ */
+ if (cur_state == QCOM_SLIM_NGD_CTRL_AWAKE) {
+ dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
+ return 0;
+ }
+ return 0;
+ }
+
+ writel_relaxed(DEF_NGD_INT_MASK, ngd->base + NGD_INT_EN);
+ rx_msgq = readl_relaxed(ngd->base + NGD_RX_MSGQ_CFG);
+
+ writel_relaxed(rx_msgq|SLIM_RX_MSGQ_TIMEOUT_VAL,
+ ngd->base + NGD_RX_MSGQ_CFG);
+ qcom_slim_ngd_setup(ctrl);
+
+ timeout = wait_for_completion_timeout(&ctrl->reconf, HZ);
+ if (!timeout) {
+ dev_err(ctrl->dev, "capability exchange timed-out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void qcom_slim_ngd_notify_slaves(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ struct slim_device *sbdev;
+ struct device_node *node;
+
+ for_each_child_of_node(ctrl->ngd->pdev->dev.of_node, node) {
+ sbdev = of_slim_get_device(&ctrl->ctrl, node);
+ if (!sbdev)
+ continue;
+
+ if (slim_get_logical_addr(sbdev))
+ dev_err(ctrl->dev, "Failed to get logical address\n");
+ }
+}
+
+static void qcom_slim_ngd_master_worker(struct work_struct *work)
+{
+ struct qcom_slim_ngd_ctrl *ctrl;
+ struct slim_msg_txn txn;
+ struct slim_val_inf msg = {0};
+ int retries = 0;
+ u8 wbuf[8];
+ int ret = 0;
+
+ ctrl = container_of(work, struct qcom_slim_ngd_ctrl, m_work);
+ txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+ txn.ec = 0;
+ txn.mc = SLIM_USR_MC_REPORT_SATELLITE;
+ txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+ txn.la = SLIM_LA_MGR;
+ wbuf[0] = SAT_MAGIC_LSB;
+ wbuf[1] = SAT_MAGIC_MSB;
+ wbuf[2] = SAT_MSG_VER;
+ wbuf[3] = SAT_MSG_PROT;
+ txn.msg = &msg;
+ txn.msg->wbuf = wbuf;
+ txn.msg->num_bytes = 4;
+ txn.rl = 8;
+
+ dev_info(ctrl->dev, "SLIM SAT: Rcvd master capability\n");
+
+capability_retry:
+ ret = qcom_slim_ngd_xfer_msg(&ctrl->ctrl, &txn);
+ if (!ret) {
+ if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP)
+ complete(&ctrl->reconf);
+ else
+ dev_err(ctrl->dev, "unexpected state:%d\n",
+ ctrl->state);
+
+ if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
+ qcom_slim_ngd_notify_slaves(ctrl);
+
+ } else if (ret == -EIO) {
+ dev_err(ctrl->dev, "capability message NACKed, retrying\n");
+ if (retries < INIT_MX_RETRIES) {
+ msleep(DEF_RETRY_MS);
+ retries++;
+ goto capability_retry;
+ }
+ } else {
+ dev_err(ctrl->dev, "SLIM: capability TX failed:%d\n", ret);
+ }
+}
+
+static int qcom_slim_ngd_runtime_resume(struct device *dev)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP)
+ ret = qcom_slim_ngd_power_up(ctrl);
+ if (ret) {
+ /* Did SSR cause this power up failure */
+ if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN)
+ ctrl->state = QCOM_SLIM_NGD_CTRL_ASLEEP;
+ else
+ dev_err(ctrl->dev, "HW wakeup attempt during SSR\n");
+ } else {
+ ctrl->state = QCOM_SLIM_NGD_CTRL_AWAKE;
+ }
+
+ return 0;
+}
+
+static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable)
+{
+ if (enable) {
+ int ret = qcom_slim_qmi_init(ctrl, false);
+
+ if (ret) {
+ dev_err(ctrl->dev, "qmi init fail, ret:%d, state:%d\n",
+ ret, ctrl->state);
+ return ret;
+ }
+ /* controller state should be in sync with framework state */
+ complete(&ctrl->qmi.qmi_comp);
+ if (!pm_runtime_enabled(ctrl->dev) ||
+ !pm_runtime_suspended(ctrl->dev))
+ qcom_slim_ngd_runtime_resume(ctrl->dev);
+ else
+ pm_runtime_resume(ctrl->dev);
+ pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_put(ctrl->dev);
+ } else {
+ qcom_slim_qmi_exit(ctrl);
+ }
+
+ return 0;
+}
+
+static int qcom_slim_ngd_qmi_new_server(struct qmi_handle *hdl,
+ struct qmi_service *service)
+{
+ struct qcom_slim_ngd_qmi *qmi =
+ container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
+ struct qcom_slim_ngd_ctrl *ctrl =
+ container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
+
+ qmi->svc_info.sq_family = AF_QIPCRTR;
+ qmi->svc_info.sq_node = service->node;
+ qmi->svc_info.sq_port = service->port;
+
+ qcom_slim_ngd_enable(ctrl, true);
+
+ return 0;
+}
+
+static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
+ struct qmi_service *service)
+{
+ struct qcom_slim_ngd_qmi *qmi =
+ container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
+
+ qmi->svc_info.sq_node = 0;
+ qmi->svc_info.sq_port = 0;
+}
+
+static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
+ .new_server = qcom_slim_ngd_qmi_new_server,
+ .del_server = qcom_slim_ngd_qmi_del_server,
+};
+
+static int qcom_slim_ngd_qmi_svc_event_init(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ struct qcom_slim_ngd_qmi *qmi = &ctrl->qmi;
+ int ret;
+
+ ret = qmi_handle_init(&qmi->svc_event_hdl, 0,
+ &qcom_slim_ngd_qmi_svc_event_ops, NULL);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "qmi_handle_init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = qmi_add_lookup(&qmi->svc_event_hdl, SLIMBUS_QMI_SVC_ID,
+ SLIMBUS_QMI_SVC_V1, SLIMBUS_QMI_INS_ID);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "qmi_add_lookup failed: %d\n", ret);
+ qmi_handle_release(&qmi->svc_event_hdl);
+ }
+ return ret;
+}
+
+static void qcom_slim_ngd_qmi_svc_event_deinit(struct qcom_slim_ngd_qmi *qmi)
+{
+ qmi_handle_release(&qmi->svc_event_hdl);
+}
+
+static struct platform_driver qcom_slim_ngd_driver;
+#define QCOM_SLIM_NGD_DRV_NAME "qcom,slim-ngd"
+
+static const struct of_device_id qcom_slim_ngd_dt_match[] = {
+ {
+ .compatible = "qcom,slim-ngd-v1.5.0",
+ .data = &ngd_v1_5_offset_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, qcom_slim_ngd_dt_match);
+
+static int of_qcom_slim_ngd_register(struct device *parent,
+ struct qcom_slim_ngd_ctrl *ctrl)
+{
+ const struct ngd_reg_offset_data *data;
+ struct qcom_slim_ngd *ngd;
+ struct device_node *node;
+ u32 id;
+
+ data = of_match_node(qcom_slim_ngd_dt_match, parent->of_node)->data;
+
+ for_each_available_child_of_node(parent->of_node, node) {
+ if (of_property_read_u32(node, "reg", &id))
+ continue;
+
+ ngd = kzalloc(sizeof(*ngd), GFP_KERNEL);
+ if (!ngd)
+ return -ENOMEM;
+
+ ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id);
+ ngd->id = id;
+ ngd->pdev->dev.parent = parent;
+ ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
+ ngd->pdev->dev.of_node = node;
+ ctrl->ngd = ngd;
+ platform_set_drvdata(ngd->pdev, ctrl);
+
+ platform_device_add(ngd->pdev);
+ ngd->base = ctrl->base + ngd->id * data->offset +
+ (ngd->id - 1) * data->size;
+ ctrl->ngd = ngd;
+ platform_driver_register(&qcom_slim_ngd_driver);
+
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int qcom_slim_ngd_probe(struct platform_device *pdev)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ctrl->ctrl.dev = dev;
+ ret = slim_register_controller(&ctrl->ctrl);
+ if (ret) {
+ dev_err(dev, "error adding slim controller\n");
+ return ret;
+ }
+
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_noresume(dev);
+ ret = qcom_slim_ngd_qmi_svc_event_init(ctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "QMI service registration failed:%d", ret);
+ goto err;
+ }
+
+ INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker);
+ ctrl->mwq = create_singlethread_workqueue("ngd_master");
+ if (!ctrl->mwq) {
+ dev_err(&pdev->dev, "Failed to start master worker\n");
+ ret = -ENOMEM;
+ goto wq_err;
+ }
+
+ return 0;
+err:
+ slim_unregister_controller(&ctrl->ctrl);
+wq_err:
+ qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
+ if (ctrl->mwq)
+ destroy_workqueue(ctrl->mwq);
+
+ return 0;
+}
+
+static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_slim_ngd_ctrl *ctrl;
+ struct resource *res;
+ int ret;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, ctrl);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctrl->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->base))
+ return PTR_ERR(ctrl->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no slimbus IRQ resource\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(dev, res->start, qcom_slim_ngd_interrupt,
+ IRQF_TRIGGER_HIGH, "slim-ngd", ctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "request IRQ failed\n");
+ return ret;
+ }
+
+ ctrl->dev = dev;
+ ctrl->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
+ ctrl->framer.superfreq =
+ ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
+
+ ctrl->ctrl.a_framer = &ctrl->framer;
+ ctrl->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
+ ctrl->ctrl.get_laddr = qcom_slim_ngd_get_laddr;
+ ctrl->ctrl.enable_stream = qcom_slim_ngd_enable_stream;
+ ctrl->ctrl.xfer_msg = qcom_slim_ngd_xfer_msg;
+ ctrl->ctrl.wakeup = NULL;
+ ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
+
+ spin_lock_init(&ctrl->tx_buf_lock);
+ init_completion(&ctrl->reconf);
+ init_completion(&ctrl->qmi.qmi_comp);
+
+ return of_qcom_slim_ngd_register(dev, ctrl);
+}
+
+static int qcom_slim_ngd_ctrl_remove(struct platform_device *pdev)
+{
+ platform_driver_unregister(&qcom_slim_ngd_driver);
+
+ return 0;
+}
+
+static int qcom_slim_ngd_remove(struct platform_device *pdev)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ slim_unregister_controller(&ctrl->ctrl);
+ qcom_slim_ngd_exit_dma(ctrl);
+ qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
+ if (ctrl->mwq)
+ destroy_workqueue(ctrl->mwq);
+
+ kfree(ctrl->ngd);
+ ctrl->ngd = NULL;
+ return 0;
+}
+
+static int qcom_slim_ngd_runtime_idle(struct device *dev)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->state == QCOM_SLIM_NGD_CTRL_AWAKE)
+ ctrl->state = QCOM_SLIM_NGD_CTRL_IDLE;
+ pm_request_autosuspend(dev);
+ return -EAGAIN;
+}
+
+#ifdef CONFIG_PM
+static int qcom_slim_ngd_runtime_suspend(struct device *dev)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = qcom_slim_qmi_power_request(ctrl, false);
+ if (ret && ret != -EBUSY)
+ dev_info(ctrl->dev, "slim resource not idle:%d\n", ret);
+ if (!ret || ret == -ETIMEDOUT)
+ ctrl->state = QCOM_SLIM_NGD_CTRL_ASLEEP;
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(
+ qcom_slim_ngd_runtime_suspend,
+ qcom_slim_ngd_runtime_resume,
+ qcom_slim_ngd_runtime_idle
+ )
+};
+
+static struct platform_driver qcom_slim_ngd_ctrl_driver = {
+ .probe = qcom_slim_ngd_ctrl_probe,
+ .remove = qcom_slim_ngd_ctrl_remove,
+ .driver = {
+ .name = "qcom,slim-ngd-ctrl",
+ .of_match_table = qcom_slim_ngd_dt_match,
+ },
+};
+
+static struct platform_driver qcom_slim_ngd_driver = {
+ .probe = qcom_slim_ngd_probe,
+ .remove = qcom_slim_ngd_remove,
+ .driver = {
+ .name = QCOM_SLIM_NGD_DRV_NAME,
+ .pm = &qcom_slim_ngd_dev_pm_ops,
+ },
+};
+
+module_platform_driver(qcom_slim_ngd_ctrl_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm SLIMBus NGD controller");
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
index 79f8e05d92dd..4399d1873e2d 100644
--- a/drivers/slimbus/slimbus.h
+++ b/drivers/slimbus/slimbus.h
@@ -17,6 +17,8 @@
/* SLIMbus message types. Related to interpretation of message code. */
#define SLIM_MSG_MT_CORE 0x0
+#define SLIM_MSG_MT_DEST_REFERRED_USER 0x2
+#define SLIM_MSG_MT_SRC_REFERRED_USER 0x6
/*
* SLIM Broadcast header format
@@ -43,11 +45,28 @@
#define SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS 0x2
#define SLIM_MSG_MC_REPORT_ABSENT 0xF
+/* Data channel management messages */
+#define SLIM_MSG_MC_CONNECT_SOURCE 0x10
+#define SLIM_MSG_MC_CONNECT_SINK 0x11
+#define SLIM_MSG_MC_DISCONNECT_PORT 0x14
+#define SLIM_MSG_MC_CHANGE_CONTENT 0x18
+
/* Clock pause Reconfiguration messages */
#define SLIM_MSG_MC_BEGIN_RECONFIGURATION 0x40
#define SLIM_MSG_MC_NEXT_PAUSE_CLOCK 0x4A
+#define SLIM_MSG_MC_NEXT_DEFINE_CHANNEL 0x50
+#define SLIM_MSG_MC_NEXT_DEFINE_CONTENT 0x51
+#define SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL 0x54
+#define SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL 0x55
+#define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL 0x58
#define SLIM_MSG_MC_RECONFIGURE_NOW 0x5F
+/*
+ * Clock pause flag to indicate that the reconfig message
+ * corresponds to clock pause sequence
+ */
+#define SLIM_MSG_CLK_PAUSE_SEQ_FLG (1U << 8)
+
/* Clock pause values per SLIMbus spec */
#define SLIM_CLK_FAST 0
#define SLIM_CLK_CONST_PHASE 1
@@ -61,7 +80,15 @@
/* Standard values per SLIMbus spec needed by controllers and devices */
#define SLIM_MAX_CLK_GEAR 10
#define SLIM_MIN_CLK_GEAR 1
+#define SLIM_SLOT_LEN_BITS 4
+
+/* Indicate that the frequency of the flow and the bus frequency are locked */
+#define SLIM_CHANNEL_CONTENT_FL BIT(7)
+/* Standard values per SLIMbus spec needed by controllers and devices */
+#define SLIM_CL_PER_SUPERFRAME 6144
+#define SLIM_SLOTS_PER_SUPERFRAME (SLIM_CL_PER_SUPERFRAME >> 2)
+#define SLIM_SL_PER_SUPERFRAME (SLIM_CL_PER_SUPERFRAME >> 2)
/* Manager's logical address is set to 0xFF per spec */
#define SLIM_LA_MANAGER 0xFF
@@ -160,6 +187,169 @@ struct slim_sched {
};
/**
+ * enum slim_port_direction: SLIMbus port direction
+ *
+ * @SLIM_PORT_SINK: SLIMbus port is a sink
+ * @SLIM_PORT_SOURCE: SLIMbus port is a source
+ */
+enum slim_port_direction {
+ SLIM_PORT_SINK = 0,
+ SLIM_PORT_SOURCE,
+};
+/**
+ * enum slim_port_state: SLIMbus Port/Endpoint state machine
+ * according to SLIMbus Spec 2.0
+ * @SLIM_PORT_DISCONNECTED: SLIMbus port is disconnected
+ * entered from Unconfigure/configured state after
+ * DISCONNECT_PORT or REMOVE_CHANNEL core command
+ * @SLIM_PORT_UNCONFIGURED: SLIMbus port is in unconfigured state.
+ * entered from disconnect state after CONNECT_SOURCE/SINK core command
+ * @SLIM_PORT_CONFIGURED: SLIMbus port is in configured state.
+ * entered from unconfigured state after DEFINE_CHANNEL, DEFINE_CONTENT
+ * and ACTIVATE_CHANNEL core commands. Ready for data transmission.
+ */
+enum slim_port_state {
+ SLIM_PORT_DISCONNECTED = 0,
+ SLIM_PORT_UNCONFIGURED,
+ SLIM_PORT_CONFIGURED,
+};
+
+/**
+ * enum slim_channel_state: SLIMbus channel state machine used by core.
+ * @SLIM_CH_STATE_DISCONNECTED: SLIMbus channel is disconnected
+ * @SLIM_CH_STATE_ALLOCATED: SLIMbus channel is allocated
+ * @SLIM_CH_STATE_ASSOCIATED: SLIMbus channel is associated with port
+ * @SLIM_CH_STATE_DEFINED: SLIMbus channel parameters are defined
+ * @SLIM_CH_STATE_CONTENT_DEFINED: SLIMbus channel content is defined
+ * @SLIM_CH_STATE_ACTIVE: SLIMbus channel is active and ready for data
+ * @SLIM_CH_STATE_REMOVED: SLIMbus channel is inactive and removed
+ */
+enum slim_channel_state {
+ SLIM_CH_STATE_DISCONNECTED = 0,
+ SLIM_CH_STATE_ALLOCATED,
+ SLIM_CH_STATE_ASSOCIATED,
+ SLIM_CH_STATE_DEFINED,
+ SLIM_CH_STATE_CONTENT_DEFINED,
+ SLIM_CH_STATE_ACTIVE,
+ SLIM_CH_STATE_REMOVED,
+};
+
+/**
+ * enum slim_ch_data_fmt: SLIMbus channel data Type identifiers according to
+ * Table 60 of SLIMbus Spec 1.01.01
+ * @SLIM_CH_DATA_FMT_NOT_DEFINED: Undefined
+ * @SLIM_CH_DATA_FMT_LPCM_AUDIO: LPCM audio
+ * @SLIM_CH_DATA_FMT_IEC61937_COMP_AUDIO: IEC61937 Compressed audio
+ * @SLIM_CH_DATA_FMT_PACKED_PDM_AUDIO: Packed PDM audio
+ */
+enum slim_ch_data_fmt {
+ SLIM_CH_DATA_FMT_NOT_DEFINED = 0,
+ SLIM_CH_DATA_FMT_LPCM_AUDIO = 1,
+ SLIM_CH_DATA_FMT_IEC61937_COMP_AUDIO = 2,
+ SLIM_CH_DATA_FMT_PACKED_PDM_AUDIO = 3,
+};
+
+/**
+ * enum slim_ch_aux_fmt: SLIMbus channel Aux Field format IDs according to
+ * Table 63 of SLIMbus Spec 2.0
+ * @SLIM_CH_AUX_FMT_NOT_APPLICABLE: Undefined
+ * @SLIM_CH_AUX_FMT_ZCUV_TUNNEL_IEC60958: ZCUV for tunneling IEC60958
+ * @SLIM_CH_AUX_FMT_USER_DEFINED: User defined
+ */
+enum slim_ch_aux_bit_fmt {
+ SLIM_CH_AUX_FMT_NOT_APPLICABLE = 0,
+ SLIM_CH_AUX_FMT_ZCUV_TUNNEL_IEC60958 = 1,
+ SLIM_CH_AUX_FMT_USER_DEFINED = 0xF,
+};
+
+/**
+ * struct slim_channel - SLIMbus channel, used for state machine
+ *
+ * @id: ID of channel
+ * @prrate: Presense rate of channel from Table 66 of SLIMbus 2.0 Specs
+ * @seg_dist: segment distribution code from Table 20 of SLIMbus 2.0 Specs
+ * @data_fmt: Data format of channel.
+ * @aux_fmt: Aux format for this channel.
+ * @state: channel state machine
+ */
+struct slim_channel {
+ int id;
+ int prrate;
+ int seg_dist;
+ enum slim_ch_data_fmt data_fmt;
+ enum slim_ch_aux_bit_fmt aux_fmt;
+ enum slim_channel_state state;
+};
+
+/**
+ * struct slim_port - SLIMbus port
+ *
+ * @id: Port id
+ * @direction: Port direction, Source or Sink.
+ * @state: state machine of port.
+ * @ch: channel associated with this port.
+ */
+struct slim_port {
+ int id;
+ enum slim_port_direction direction;
+ enum slim_port_state state;
+ struct slim_channel ch;
+};
+
+/**
+ * enum slim_transport_protocol: SLIMbus Transport protocol list from
+ * Table 47 of SLIMbus 2.0 specs.
+ * @SLIM_PROTO_ISO: Isochronous Protocol, no flow control as data rate match
+ * channel rate flow control embedded in the data.
+ * @SLIM_PROTO_PUSH: Pushed Protocol, includes flow control, Used to carry
+ * data whose rate is equal to, or lower than the channel rate.
+ * @SLIM_PROTO_PULL: Pulled Protocol, similar usage as pushed protocol
+ * but pull is a unicast.
+ * @SLIM_PROTO_LOCKED: Locked Protocol
+ * @SLIM_PROTO_ASYNC_SMPLX: Asynchronous Protocol-Simplex
+ * @SLIM_PROTO_ASYNC_HALF_DUP: Asynchronous Protocol-Half-duplex
+ * @SLIM_PROTO_EXT_SMPLX: Extended Asynchronous Protocol-Simplex
+ * @SLIM_PROTO_EXT_HALF_DUP: Extended Asynchronous Protocol-Half-duplex
+ */
+enum slim_transport_protocol {
+ SLIM_PROTO_ISO = 0,
+ SLIM_PROTO_PUSH,
+ SLIM_PROTO_PULL,
+ SLIM_PROTO_LOCKED,
+ SLIM_PROTO_ASYNC_SMPLX,
+ SLIM_PROTO_ASYNC_HALF_DUP,
+ SLIM_PROTO_EXT_SMPLX,
+ SLIM_PROTO_EXT_HALF_DUP,
+};
+
+/**
+ * struct slim_stream_runtime - SLIMbus stream runtime instance
+ *
+ * @name: Name of the stream
+ * @dev: SLIM Device instance associated with this stream
+ * @direction: direction of stream
+ * @prot: Transport protocol used in this stream
+ * @rate: Data rate of samples *
+ * @bps: bits per sample
+ * @ratem: rate multipler which is super frame rate/data rate
+ * @num_ports: number of ports
+ * @ports: pointer to instance of ports
+ * @node: list head for stream associated with slim device.
+ */
+struct slim_stream_runtime {
+ const char *name;
+ struct slim_device *dev;
+ int direction;
+ enum slim_transport_protocol prot;
+ unsigned int rate;
+ unsigned int bps;
+ unsigned int ratem;
+ int num_ports;
+ struct slim_port *ports;
+ struct list_head node;
+};
+
+/**
* struct slim_controller - Controls every instance of SLIMbus
* (similar to 'master' on SPI)
* @dev: Device interface to this driver
@@ -188,6 +378,10 @@ struct slim_sched {
* @wakeup: This function pointer implements controller-specific procedure
* to wake it up from clock-pause. Framework will call this to bring
* the controller out of clock pause.
+ * @enable_stream: This function pointer implements controller-specific procedure
+ * to enable a stream.
+ * @disable_stream: This function pointer implements controller-specific procedure
+ * to disable stream.
*
* 'Manager device' is responsible for device management, bandwidth
* allocation, channel setup, and port associations per channel.
@@ -229,6 +423,8 @@ struct slim_controller {
struct slim_eaddr *ea, u8 laddr);
int (*get_laddr)(struct slim_controller *ctrl,
struct slim_eaddr *ea, u8 *laddr);
+ int (*enable_stream)(struct slim_stream_runtime *rt);
+ int (*disable_stream)(struct slim_stream_runtime *rt);
int (*wakeup)(struct slim_controller *ctrl);
};
@@ -240,6 +436,8 @@ int slim_unregister_controller(struct slim_controller *ctrl);
void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 l);
int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn);
int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart);
+int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn);
+void slim_free_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn);
static inline bool slim_tid_txn(u8 mt, u8 mc)
{
diff --git a/drivers/slimbus/stream.c b/drivers/slimbus/stream.c
new file mode 100644
index 000000000000..2fa05324ed07
--- /dev/null
+++ b/drivers/slimbus/stream.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/slimbus.h>
+#include <uapi/sound/asound.h>
+#include "slimbus.h"
+
+/**
+ * struct segdist_code - Segment Distributions code from
+ * Table 20 of SLIMbus Specs Version 2.0
+ *
+ * @ratem: Channel Rate Multipler(Segments per Superframe)
+ * @seg_interval: Number of slots between the first Slot of Segment
+ * and the first slot of the next consecutive Segment.
+ * @segdist_code: Segment Distribution Code SD[11:0]
+ * @seg_offset_mask: Segment offset mask in SD[11:0]
+ * @segdist_codes: List of all possible Segmet Distribution codes.
+ */
+static const struct segdist_code {
+ int ratem;
+ int seg_interval;
+ int segdist_code;
+ u32 seg_offset_mask;
+
+} segdist_codes[] = {
+ {1, 1536, 0x200, 0xdff},
+ {2, 768, 0x100, 0xcff},
+ {4, 384, 0x080, 0xc7f},
+ {8, 192, 0x040, 0xc3f},
+ {16, 96, 0x020, 0xc1f},
+ {32, 48, 0x010, 0xc0f},
+ {64, 24, 0x008, 0xc07},
+ {128, 12, 0x004, 0xc03},
+ {256, 6, 0x002, 0xc01},
+ {512, 3, 0x001, 0xc00},
+ {3, 512, 0xe00, 0x1ff},
+ {6, 256, 0xd00, 0x0ff},
+ {12, 128, 0xc80, 0x07f},
+ {24, 64, 0xc40, 0x03f},
+ {48, 32, 0xc20, 0x01f},
+ {96, 16, 0xc10, 0x00f},
+ {192, 8, 0xc08, 0x007},
+ {364, 4, 0xc04, 0x003},
+ {768, 2, 0xc02, 0x001},
+};
+
+/*
+ * Presence Rate table for all Natural Frequencies
+ * The Presence rate of a constant bitrate stream is mean flow rate of the
+ * stream expressed in occupied Segments of that Data Channel per second.
+ * Table 66 from SLIMbus 2.0 Specs
+ *
+ * Index of the table corresponds to Presence rate code for the respective rate
+ * in the table.
+ */
+static const int slim_presence_rate_table[] = {
+ 0, /* Not Indicated */
+ 12000,
+ 24000,
+ 48000,
+ 96000,
+ 192000,
+ 384000,
+ 768000,
+ 0, /* Reserved */
+ 110250,
+ 220500,
+ 441000,
+ 882000,
+ 176400,
+ 352800,
+ 705600,
+ 4000,
+ 8000,
+ 16000,
+ 32000,
+ 64000,
+ 128000,
+ 256000,
+ 512000,
+};
+
+/*
+ * slim_stream_allocate() - Allocate a new SLIMbus Stream
+ * @dev:Slim device to be associated with
+ * @name: name of the stream
+ *
+ * This is very first call for SLIMbus streaming, this API will allocate
+ * a new SLIMbus stream and return a valid stream runtime pointer for client
+ * to use it in subsequent stream apis. state of stream is set to ALLOCATED
+ *
+ * Return: valid pointer on success and error code on failure.
+ * From ASoC DPCM framework, this state is linked to startup() operation.
+ */
+struct slim_stream_runtime *slim_stream_allocate(struct slim_device *dev,
+ const char *name)
+{
+ struct slim_stream_runtime *rt;
+
+ rt = kzalloc(sizeof(*rt), GFP_KERNEL);
+ if (!rt)
+ return ERR_PTR(-ENOMEM);
+
+ rt->name = kasprintf(GFP_KERNEL, "slim-%s", name);
+ if (!rt->name) {
+ kfree(rt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rt->dev = dev;
+ spin_lock(&dev->stream_list_lock);
+ list_add_tail(&rt->node, &dev->stream_list);
+ spin_unlock(&dev->stream_list_lock);
+
+ return rt;
+}
+EXPORT_SYMBOL_GPL(slim_stream_allocate);
+
+static int slim_connect_port_channel(struct slim_stream_runtime *stream,
+ struct slim_port *port)
+{
+ struct slim_device *sdev = stream->dev;
+ u8 wbuf[2];
+ struct slim_val_inf msg = {0, 2, NULL, wbuf, NULL};
+ u8 mc = SLIM_MSG_MC_CONNECT_SOURCE;
+ DEFINE_SLIM_LDEST_TXN(txn, mc, 6, stream->dev->laddr, &msg);
+
+ if (port->direction == SLIM_PORT_SINK)
+ txn.mc = SLIM_MSG_MC_CONNECT_SINK;
+
+ wbuf[0] = port->id;
+ wbuf[1] = port->ch.id;
+ port->ch.state = SLIM_CH_STATE_ASSOCIATED;
+ port->state = SLIM_PORT_UNCONFIGURED;
+
+ return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_disconnect_port(struct slim_stream_runtime *stream,
+ struct slim_port *port)
+{
+ struct slim_device *sdev = stream->dev;
+ u8 wbuf[1];
+ struct slim_val_inf msg = {0, 1, NULL, wbuf, NULL};
+ u8 mc = SLIM_MSG_MC_DISCONNECT_PORT;
+ DEFINE_SLIM_LDEST_TXN(txn, mc, 5, stream->dev->laddr, &msg);
+
+ wbuf[0] = port->id;
+ port->ch.state = SLIM_CH_STATE_DISCONNECTED;
+ port->state = SLIM_PORT_DISCONNECTED;
+
+ return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_deactivate_remove_channel(struct slim_stream_runtime *stream,
+ struct slim_port *port)
+{
+ struct slim_device *sdev = stream->dev;
+ u8 wbuf[1];
+ struct slim_val_inf msg = {0, 1, NULL, wbuf, NULL};
+ u8 mc = SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL;
+ DEFINE_SLIM_LDEST_TXN(txn, mc, 5, stream->dev->laddr, &msg);
+ int ret;
+
+ wbuf[0] = port->ch.id;
+ ret = slim_do_transfer(sdev->ctrl, &txn);
+ if (ret)
+ return ret;
+
+ txn.mc = SLIM_MSG_MC_NEXT_REMOVE_CHANNEL;
+ port->ch.state = SLIM_CH_STATE_REMOVED;
+
+ return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_get_prate_code(int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(slim_presence_rate_table); i++) {
+ if (rate == slim_presence_rate_table[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * slim_stream_prepare() - Prepare a SLIMbus Stream
+ *
+ * @rt: instance of slim stream runtime to configure
+ * @cfg: new configuration for the stream
+ *
+ * This API will configure SLIMbus stream with config parameters from cfg.
+ * return zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to hw_params() operation.
+ */
+int slim_stream_prepare(struct slim_stream_runtime *rt,
+ struct slim_stream_config *cfg)
+{
+ struct slim_controller *ctrl = rt->dev->ctrl;
+ struct slim_port *port;
+ int num_ports, i, port_id;
+
+ if (rt->ports) {
+ dev_err(&rt->dev->dev, "Stream already Prepared\n");
+ return -EINVAL;
+ }
+
+ num_ports = hweight32(cfg->port_mask);
+ rt->ports = kcalloc(num_ports, sizeof(*port), GFP_KERNEL);
+ if (!rt->ports)
+ return -ENOMEM;
+
+ rt->num_ports = num_ports;
+ rt->rate = cfg->rate;
+ rt->bps = cfg->bps;
+ rt->direction = cfg->direction;
+
+ if (cfg->rate % ctrl->a_framer->superfreq) {
+ /*
+ * data rate not exactly multiple of super frame,
+ * use PUSH/PULL protocol
+ */
+ if (cfg->direction == SNDRV_PCM_STREAM_PLAYBACK)
+ rt->prot = SLIM_PROTO_PUSH;
+ else
+ rt->prot = SLIM_PROTO_PULL;
+ } else {
+ rt->prot = SLIM_PROTO_ISO;
+ }
+
+ rt->ratem = cfg->rate/ctrl->a_framer->superfreq;
+
+ i = 0;
+ for_each_set_bit(port_id, &cfg->port_mask, SLIM_DEVICE_MAX_PORTS) {
+ port = &rt->ports[i];
+ port->state = SLIM_PORT_DISCONNECTED;
+ port->id = port_id;
+ port->ch.prrate = slim_get_prate_code(cfg->rate);
+ port->ch.id = cfg->chs[i];
+ port->ch.data_fmt = SLIM_CH_DATA_FMT_NOT_DEFINED;
+ port->ch.aux_fmt = SLIM_CH_AUX_FMT_NOT_APPLICABLE;
+ port->ch.state = SLIM_CH_STATE_ALLOCATED;
+
+ if (cfg->direction == SNDRV_PCM_STREAM_PLAYBACK)
+ port->direction = SLIM_PORT_SINK;
+ else
+ port->direction = SLIM_PORT_SOURCE;
+
+ slim_connect_port_channel(rt, port);
+ i++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(slim_stream_prepare);
+
+static int slim_define_channel_content(struct slim_stream_runtime *stream,
+ struct slim_port *port)
+{
+ struct slim_device *sdev = stream->dev;
+ u8 wbuf[4];
+ struct slim_val_inf msg = {0, 4, NULL, wbuf, NULL};
+ u8 mc = SLIM_MSG_MC_NEXT_DEFINE_CONTENT;
+ DEFINE_SLIM_LDEST_TXN(txn, mc, 8, stream->dev->laddr, &msg);
+
+ wbuf[0] = port->ch.id;
+ wbuf[1] = port->ch.prrate;
+
+ /* Frequency Locked for ISO Protocol */
+ if (stream->prot != SLIM_PROTO_ISO)
+ wbuf[1] |= SLIM_CHANNEL_CONTENT_FL;
+
+ wbuf[2] = port->ch.data_fmt | (port->ch.aux_fmt << 4);
+ wbuf[3] = stream->bps/SLIM_SLOT_LEN_BITS;
+ port->ch.state = SLIM_CH_STATE_CONTENT_DEFINED;
+
+ return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_get_segdist_code(int ratem)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(segdist_codes); i++) {
+ if (segdist_codes[i].ratem == ratem)
+ return segdist_codes[i].segdist_code;
+ }
+
+ return -EINVAL;
+}
+
+static int slim_define_channel(struct slim_stream_runtime *stream,
+ struct slim_port *port)
+{
+ struct slim_device *sdev = stream->dev;
+ u8 wbuf[4];
+ struct slim_val_inf msg = {0, 4, NULL, wbuf, NULL};
+ u8 mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
+ DEFINE_SLIM_LDEST_TXN(txn, mc, 8, stream->dev->laddr, &msg);
+
+ port->ch.seg_dist = slim_get_segdist_code(stream->ratem);
+
+ wbuf[0] = port->ch.id;
+ wbuf[1] = port->ch.seg_dist & 0xFF;
+ wbuf[2] = (stream->prot << 4) | ((port->ch.seg_dist & 0xF00) >> 8);
+ if (stream->prot == SLIM_PROTO_ISO)
+ wbuf[3] = stream->bps/SLIM_SLOT_LEN_BITS;
+ else
+ wbuf[3] = stream->bps/SLIM_SLOT_LEN_BITS + 1;
+
+ port->ch.state = SLIM_CH_STATE_DEFINED;
+
+ return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_activate_channel(struct slim_stream_runtime *stream,
+ struct slim_port *port)
+{
+ struct slim_device *sdev = stream->dev;
+ u8 wbuf[1];
+ struct slim_val_inf msg = {0, 1, NULL, wbuf, NULL};
+ u8 mc = SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL;
+ DEFINE_SLIM_LDEST_TXN(txn, mc, 5, stream->dev->laddr, &msg);
+
+ txn.msg->num_bytes = 1;
+ txn.msg->wbuf = wbuf;
+ wbuf[0] = port->ch.id;
+ port->ch.state = SLIM_CH_STATE_ACTIVE;
+
+ return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+/*
+ * slim_stream_enable() - Enable a prepared SLIMbus Stream
+ *
+ * @stream: instance of slim stream runtime to enable
+ *
+ * This API will enable all the ports and channels associated with
+ * SLIMbus stream
+ *
+ * Return: zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to trigger() start operation.
+ */
+int slim_stream_enable(struct slim_stream_runtime *stream)
+{
+ DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION,
+ 3, SLIM_LA_MANAGER, NULL);
+ struct slim_controller *ctrl = stream->dev->ctrl;
+ int ret, i;
+
+ if (ctrl->enable_stream) {
+ ret = ctrl->enable_stream(stream);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < stream->num_ports; i++)
+ stream->ports[i].ch.state = SLIM_CH_STATE_ACTIVE;
+
+ return ret;
+ }
+
+ ret = slim_do_transfer(ctrl, &txn);
+ if (ret)
+ return ret;
+
+ /* define channels first before activating them */
+ for (i = 0; i < stream->num_ports; i++) {
+ struct slim_port *port = &stream->ports[i];
+
+ slim_define_channel(stream, port);
+ slim_define_channel_content(stream, port);
+ }
+
+ for (i = 0; i < stream->num_ports; i++) {
+ struct slim_port *port = &stream->ports[i];
+
+ slim_activate_channel(stream, port);
+ port->state = SLIM_PORT_CONFIGURED;
+ }
+ txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
+
+ return slim_do_transfer(ctrl, &txn);
+}
+EXPORT_SYMBOL_GPL(slim_stream_enable);
+
+/*
+ * slim_stream_disable() - Disable a SLIMbus Stream
+ *
+ * @stream: instance of slim stream runtime to disable
+ *
+ * This API will disable all the ports and channels associated with
+ * SLIMbus stream
+ *
+ * Return: zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to trigger() pause operation.
+ */
+int slim_stream_disable(struct slim_stream_runtime *stream)
+{
+ DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION,
+ 3, SLIM_LA_MANAGER, NULL);
+ struct slim_controller *ctrl = stream->dev->ctrl;
+ int ret, i;
+
+ if (ctrl->disable_stream)
+ ctrl->disable_stream(stream);
+
+ ret = slim_do_transfer(ctrl, &txn);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < stream->num_ports; i++)
+ slim_deactivate_remove_channel(stream, &stream->ports[i]);
+
+ txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
+
+ return slim_do_transfer(ctrl, &txn);
+}
+EXPORT_SYMBOL_GPL(slim_stream_disable);
+
+/*
+ * slim_stream_unprepare() - Un-prepare a SLIMbus Stream
+ *
+ * @stream: instance of slim stream runtime to unprepare
+ *
+ * This API will un allocate all the ports and channels associated with
+ * SLIMbus stream
+ *
+ * Return: zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to trigger() stop operation.
+ */
+int slim_stream_unprepare(struct slim_stream_runtime *stream)
+{
+ int i;
+
+ for (i = 0; i < stream->num_ports; i++)
+ slim_disconnect_port(stream, &stream->ports[i]);
+
+ kfree(stream->ports);
+ stream->ports = NULL;
+ stream->num_ports = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(slim_stream_unprepare);
+
+/*
+ * slim_stream_free() - Free a SLIMbus Stream
+ *
+ * @stream: instance of slim stream runtime to free
+ *
+ * This API will un allocate all the memory associated with
+ * slim stream runtime, user is not allowed to make an dereference
+ * to stream after this call.
+ *
+ * Return: zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to shutdown() operation.
+ */
+int slim_stream_free(struct slim_stream_runtime *stream)
+{
+ struct slim_device *sdev = stream->dev;
+
+ spin_lock(&sdev->stream_list_lock);
+ list_del(&stream->node);
+ spin_unlock(&sdev->stream_list_lock);
+
+ kfree(stream->name);
+ kfree(stream);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(slim_stream_free);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index dcf8c8065508..a5577dd5eb08 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -628,10 +628,26 @@ static const struct of_device_id ddr_shimphy_dt_ids[] = {
static const struct of_device_id brcmstb_memc_of_match[] = {
{
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
+ .data = &ddr_seq,
+ },
+ {
.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
.data = &ddr_seq_b22,
},
{
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
+ .data = &ddr_seq_b22,
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
+ .data = &ddr_seq_b22,
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
+ .data = &ddr_seq_b22,
+ },
+ {
.compatible = "brcm,brcmstb-memc-ddr",
.data = &ddr_seq,
},
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 7a9fb9baa66d..8f80e8bbf29e 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -1,7 +1,9 @@
#
-# Freescale SOC drivers
+# NXP/Freescale QorIQ series SOC drivers
#
+menu "NXP/Freescale QorIQ SoC drivers"
+
source "drivers/soc/fsl/qbman/Kconfig"
source "drivers/soc/fsl/qe/Kconfig"
@@ -16,3 +18,14 @@ config FSL_GUTS
Initially only reading SVR and registering soc device are supported.
Other guts accesses, such as reading RCW, should eventually be moved
into this driver as well.
+
+config FSL_MC_DPIO
+ tristate "QorIQ DPAA2 DPIO driver"
+ depends on FSL_MC_BUS
+ help
+ Driver for the DPAA2 DPIO object. A DPIO provides queue and
+ buffer management facilities for software to interact with
+ other DPAA2 objects. This driver does not expose the DPIO
+ objects individually, but groups them under a service layer
+ API.
+endmenu
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 44b3bebef24a..803ef1bfb5ff 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_FSL_DPAA) += qbman/
obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
obj-$(CONFIG_FSL_GUTS) += guts.o
+obj-$(CONFIG_FSL_MC_DPIO) += dpio/
diff --git a/drivers/staging/fsl-mc/bus/dpio/Makefile b/drivers/soc/fsl/dpio/Makefile
index b9ff24c76582..b9ff24c76582 100644
--- a/drivers/staging/fsl-mc/bus/dpio/Makefile
+++ b/drivers/soc/fsl/dpio/Makefile
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h
index ab8f82ee7ee5..ab8f82ee7ee5 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
+++ b/drivers/soc/fsl/dpio/dpio-cmd.h
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index 11a90a90d827..b60b77bfaffa 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -16,7 +16,7 @@
#include <linux/io.h>
#include <linux/fsl/mc.h>
-#include "../../include/dpaa2-io.h"
+#include <soc/fsl/dpaa2-io.h>
#include "qbman-portal.h"
#include "dpio.h"
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 14ed2beb7432..9b17f72349ed 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -6,7 +6,7 @@
*/
#include <linux/types.h>
#include <linux/fsl/mc.h>
-#include "../../include/dpaa2-io.h"
+#include <soc/fsl/dpaa2-io.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c
index ff37c80e11a0..ff37c80e11a0 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio.c
+++ b/drivers/soc/fsl/dpio/dpio.c
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h
index 49194c8e45f1..49194c8e45f1 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio.h
+++ b/drivers/soc/fsl/dpio/dpio.h
diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 116fafb28640..cf1d448ea468 100644
--- a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -8,7 +8,7 @@
#include <asm/cacheflush.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include "../../include/dpaa2-global.h"
+#include <soc/fsl/dpaa2-global.h>
#include "qbman-portal.h"
diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index 69db3c818742..89d1dd9969b6 100644
--- a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -7,7 +7,7 @@
#ifndef __FSL_QBMAN_PORTAL_H
#define __FSL_QBMAN_PORTAL_H
-#include "../../include/dpaa2-fd.h"
+#include <soc/fsl/dpaa2-fd.h>
struct dpaa2_dq;
struct qbman_swp;
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
index fb4e6bf0a0c4..d570cb5fd381 100644
--- a/drivers/soc/fsl/qbman/Kconfig
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -1,5 +1,5 @@
menuconfig FSL_DPAA
- bool "Freescale DPAA 1.x support"
+ bool "QorIQ DPAA1 framework support"
depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE)
select GENERIC_ALLOCATOR
help
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
index 73a2e08b47ef..fabba17e9d65 100644
--- a/drivers/soc/fsl/qe/Kconfig
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -3,7 +3,7 @@
#
config QUICC_ENGINE
- bool "Freescale QUICC Engine (QE) Support"
+ bool "QUICC Engine (QE) framework support"
depends on FSL_SOC && PPC32
select GENERIC_ALLOCATOR
select CRC32
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 3b27075c21a7..819bed0f5667 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -83,6 +83,33 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
+static void qe_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ for (i = 0; i < gc->ngpio; i++) {
+ if (*mask == 0)
+ break;
+ if (__test_and_clear_bit(i, mask)) {
+ if (test_bit(i, bits))
+ qe_gc->cpdata |= (1U << (QE_PIO_PINS - 1 - i));
+ else
+ qe_gc->cpdata &= ~(1U << (QE_PIO_PINS - 1 - i));
+ }
+ }
+
+ out_be32(&regs->cpdata, qe_gc->cpdata);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
@@ -298,6 +325,7 @@ static int __init qe_add_gpiochips(void)
gc->direction_output = qe_gpio_dir_out;
gc->get = qe_gpio_get;
gc->set = qe_gpio_set;
+ gc->set_multiple = qe_gpio_set_multiple;
ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc);
if (ret)
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 32f0748fd067..b3da635970ea 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -27,9 +27,16 @@
#define GPC_PGC_SW2ISO_SHIFT 0x8
#define GPC_PGC_SW_SHIFT 0x0
+#define GPC_PGC_PCI_PDN 0x200
+#define GPC_PGC_PCI_SR 0x20c
+
#define GPC_PGC_GPU_PDN 0x260
#define GPC_PGC_GPU_PUPSCR 0x264
#define GPC_PGC_GPU_PDNSCR 0x268
+#define GPC_PGC_GPU_SR 0x26c
+
+#define GPC_PGC_DISP_PDN 0x240
+#define GPC_PGC_DISP_SR 0x24c
#define GPU_VPU_PUP_REQ BIT(1)
#define GPU_VPU_PDN_REQ BIT(0)
@@ -47,7 +54,6 @@ struct imx_pm_domain {
unsigned int reg_offs;
signed char cntr_pdn_bit;
unsigned int ipg_rate_mhz;
- unsigned int flags;
};
static inline struct imx_pm_domain *
@@ -62,9 +68,6 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd)
int iso, iso2sw;
u32 val;
- if (pd->flags & PGC_DOMAIN_FLAG_NO_PD)
- return -EBUSY;
-
/* Read ISO and ISO2SW power down delays */
regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val);
iso = val & 0x3f;
@@ -202,7 +205,7 @@ static int imx_pgc_power_domain_probe(struct platform_device *pdev)
goto genpd_err;
}
- device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE);
+ device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE_CONSUMER);
return 0;
@@ -288,26 +291,31 @@ static struct imx_pm_domain imx_gpc_domains[] = {
struct imx_gpc_dt_data {
int num_domains;
bool err009619_present;
+ bool err006287_present;
};
static const struct imx_gpc_dt_data imx6q_dt_data = {
.num_domains = 2,
.err009619_present = false,
+ .err006287_present = false,
};
static const struct imx_gpc_dt_data imx6qp_dt_data = {
.num_domains = 2,
.err009619_present = true,
+ .err006287_present = false,
};
static const struct imx_gpc_dt_data imx6sl_dt_data = {
.num_domains = 3,
.err009619_present = false,
+ .err006287_present = true,
};
static const struct imx_gpc_dt_data imx6sx_dt_data = {
.num_domains = 4,
.err009619_present = false,
+ .err006287_present = false,
};
static const struct of_device_id imx_gpc_dt_ids[] = {
@@ -318,10 +326,24 @@ static const struct of_device_id imx_gpc_dt_ids[] = {
{ }
};
+static const struct regmap_range yes_ranges[] = {
+ regmap_reg_range(GPC_CNTR, GPC_CNTR),
+ regmap_reg_range(GPC_PGC_PCI_PDN, GPC_PGC_PCI_SR),
+ regmap_reg_range(GPC_PGC_GPU_PDN, GPC_PGC_GPU_SR),
+ regmap_reg_range(GPC_PGC_DISP_PDN, GPC_PGC_DISP_SR),
+};
+
+static const struct regmap_access_table access_table = {
+ .yes_ranges = yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(yes_ranges),
+};
+
static const struct regmap_config imx_gpc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
+ .rd_table = &access_table,
+ .wr_table = &access_table,
.max_register = 0x2ac,
};
@@ -413,8 +435,13 @@ static int imx_gpc_probe(struct platform_device *pdev)
/* Disable PU power down in normal operation if ERR009619 is present */
if (of_id_data->err009619_present)
- imx_gpc_domains[GPC_PGC_DOMAIN_PU].flags |=
- PGC_DOMAIN_FLAG_NO_PD;
+ imx_gpc_domains[GPC_PGC_DOMAIN_PU].base.flags |=
+ GENPD_FLAG_ALWAYS_ON;
+
+ /* Keep DISP always on if ERR006287 is present */
+ if (of_id_data->err006287_present)
+ imx_gpc_domains[GPC_PGC_DOMAIN_DISPLAY].base.flags |=
+ GENPD_FLAG_ALWAYS_ON;
if (!pgc_node) {
ret = imx_gpc_old_dt_init(&pdev->dev, regmap,
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index f4e3bd40c72e..6ef18cf8f243 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -39,10 +39,15 @@
#define GPC_M4_PU_PDN_FLG 0x1bc
-
-#define PGC_MIPI 4
-#define PGC_PCIE 5
-#define PGC_USB_HSIC 8
+/*
+ * The PGC offset values in Reference Manual
+ * (Rev. 1, 01/2018 and the older ones) GPC chapter's
+ * GPC_PGC memory map are incorrect, below offset
+ * values are from design RTL.
+ */
+#define PGC_MIPI 16
+#define PGC_PCIE 17
+#define PGC_USB_HSIC 20
#define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40)
#define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc)
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 2afae64061d8..4e931fdf4d09 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -146,6 +146,21 @@ static const u32 mt6397_regs[] = {
[PWRAP_DEW_CIPHER_SWRST] = 0xbc24,
};
+static const u32 mt6351_regs[] = {
+ [PWRAP_DEW_DIO_EN] = 0x02F2,
+ [PWRAP_DEW_READ_TEST] = 0x02F4,
+ [PWRAP_DEW_WRITE_TEST] = 0x02F6,
+ [PWRAP_DEW_CRC_EN] = 0x02FA,
+ [PWRAP_DEW_CRC_VAL] = 0x02FC,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x0300,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x0302,
+ [PWRAP_DEW_CIPHER_EN] = 0x0304,
+ [PWRAP_DEW_CIPHER_RDY] = 0x0306,
+ [PWRAP_DEW_CIPHER_MODE] = 0x0308,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x030A,
+ [PWRAP_DEW_RDDMY_NO] = 0x030C,
+};
+
enum pwrap_regs {
PWRAP_MUX_SEL,
PWRAP_WRAP_EN,
@@ -366,6 +381,39 @@ static int mt2701_regs[] = {
[PWRAP_ADC_RDATA_ADDR2] = 0x154,
};
+static int mt6797_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xC,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1C,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9C,
+ [PWRAP_WACS2_CMD] = 0xA0,
+ [PWRAP_WACS2_RDATA] = 0xA4,
+ [PWRAP_WACS2_VLDCLR] = 0xA8,
+ [PWRAP_INT_EN] = 0xC0,
+ [PWRAP_INT_FLG_RAW] = 0xC4,
+ [PWRAP_INT_FLG] = 0xC8,
+ [PWRAP_INT_CLR] = 0xCC,
+ [PWRAP_TIMER_EN] = 0xF4,
+ [PWRAP_WDT_UNIT] = 0xFC,
+ [PWRAP_WDT_SRC_EN] = 0x100,
+ [PWRAP_DCM_EN] = 0x1CC,
+ [PWRAP_DCM_DBC_PRD] = 0x1D4,
+};
+
static int mt7622_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
@@ -635,12 +683,14 @@ static int mt8135_regs[] = {
enum pmic_type {
PMIC_MT6323,
+ PMIC_MT6351,
PMIC_MT6380,
PMIC_MT6397,
};
enum pwrap_type {
PWRAP_MT2701,
+ PWRAP_MT6797,
PWRAP_MT7622,
PWRAP_MT8135,
PWRAP_MT8173,
@@ -1067,6 +1117,7 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
pwrap_writel(wrp, 1, PWRAP_CIPHER_START);
break;
case PWRAP_MT2701:
+ case PWRAP_MT6797:
case PWRAP_MT8173:
pwrap_writel(wrp, 1, PWRAP_CIPHER_EN);
break;
@@ -1080,8 +1131,6 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_SWRST], 0x0);
pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_KEY_SEL], 0x1);
pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_IV_SEL], 0x2);
- pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_LOAD], 0x1);
- pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_START], 0x1);
switch (wrp->slave->type) {
case PMIC_MT6397:
@@ -1091,6 +1140,7 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
0x1);
break;
case PMIC_MT6323:
+ case PMIC_MT6351:
pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_EN],
0x1);
break;
@@ -1367,6 +1417,15 @@ static const struct pwrap_slv_type pmic_mt6397 = {
.pwrap_write = pwrap_write16,
};
+static const struct pwrap_slv_type pmic_mt6351 = {
+ .dew_regs = mt6351_regs,
+ .type = PMIC_MT6351,
+ .regmap = &pwrap_regmap_config16,
+ .caps = 0,
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
+};
+
static const struct of_device_id of_slave_match_tbl[] = {
{
.compatible = "mediatek,mt6323",
@@ -1381,6 +1440,9 @@ static const struct of_device_id of_slave_match_tbl[] = {
.compatible = "mediatek,mt6397",
.data = &pmic_mt6397,
}, {
+ .compatible = "mediatek,mt6351",
+ .data = &pmic_mt6351,
+ }, {
/* sentinel */
}
};
@@ -1398,6 +1460,18 @@ static const struct pmic_wrapper_type pwrap_mt2701 = {
.init_soc_specific = pwrap_mt2701_init_soc_specific,
};
+static const struct pmic_wrapper_type pwrap_mt6797 = {
+ .regs = mt6797_regs,
+ .type = PWRAP_MT6797,
+ .arb_en_all = 0x01fff,
+ .int_en_all = 0xffffffc6,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .has_bridge = 0,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
static const struct pmic_wrapper_type pwrap_mt7622 = {
.regs = mt7622_regs,
.type = PWRAP_MT7622,
@@ -1439,6 +1513,9 @@ static const struct of_device_id of_pwrap_match_tbl[] = {
.compatible = "mediatek,mt2701-pwrap",
.data = &pwrap_mt2701,
}, {
+ .compatible = "mediatek,mt6797-pwrap",
+ .data = &pwrap_mt6797,
+ }, {
.compatible = "mediatek,mt7622-pwrap",
.data = &pwrap_mt7622,
}, {
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 9dc02f390ba3..ba79b609aca2 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -5,7 +5,8 @@ menu "Qualcomm SoC drivers"
config QCOM_COMMAND_DB
bool "Qualcomm Command DB"
- depends on (ARCH_QCOM && OF) || COMPILE_TEST
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on OF_RESERVED_MEM
help
Command DB queries shared memory by key string for shared system
resources. Platform drivers that require to set state of a shared
@@ -39,6 +40,23 @@ config QCOM_GSBI
functions for connecting the underlying serial UART, SPI, and I2C
devices to the output pins.
+config QCOM_LLCC
+ tristate "Qualcomm Technologies, Inc. LLCC driver"
+ depends on ARCH_QCOM
+ help
+ Qualcomm Technologies, Inc. platform specific
+ Last Level Cache Controller(LLCC) driver. This provides interfaces
+ to clients that use the LLCC. Say yes here to enable LLCC slice
+ driver.
+
+config QCOM_SDM845_LLCC
+ tristate "Qualcomm Technologies, Inc. SDM845 LLCC driver"
+ depends on QCOM_LLCC
+ help
+ Say yes here to enable the LLCC driver for SDM845. This provides
+ data required to configure LLCC so that clients can start using the
+ LLCC slices.
+
config QCOM_MDT_LOADER
tristate
select QCOM_SCM
@@ -74,6 +92,16 @@ config QCOM_RMTFS_MEM
Say y here if you intend to boot the modem remoteproc.
+config QCOM_RPMH
+ bool "Qualcomm RPM-Hardened (RPMH) Communication"
+ depends on ARCH_QCOM && ARM64 && OF || COMPILE_TEST
+ help
+ Support for communication with the hardened-RPM blocks in
+ Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
+ internal bus to transmit state requests for shared resources. A set
+ of hardware components aggregate requests for these resources and
+ help apply the aggregated state on the resource.
+
config QCOM_SMEM
tristate "Qualcomm Shared Memory Manager (SMEM)"
depends on ARCH_QCOM
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 19dcf957cb3a..f25b54cd6cf8 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+CFLAGS_rpmh-rsc.o := -I$(src)
obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
@@ -8,6 +9,9 @@ obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
+obj-$(CONFIG_QCOM_RPMH) += qcom_rpmh.o
+qcom_rpmh-y += rpmh-rsc.o
+qcom_rpmh-y += rpmh.o
obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
obj-$(CONFIG_QCOM_SMEM) += smem.o
obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
@@ -15,3 +19,5 @@ obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
obj-$(CONFIG_QCOM_SMSM) += smsm.o
obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
obj-$(CONFIG_QCOM_APR) += apr.o
+obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
+obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
new file mode 100644
index 000000000000..2e1e4f0a5db8
--- /dev/null
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+/*
+ * SCT(System Cache Table) entry contains of the following members:
+ * usecase_id: Unique id for the client's use case
+ * slice_id: llcc slice id for each client
+ * max_cap: The maximum capacity of the cache slice provided in KB
+ * priority: Priority of the client used to select victim line for replacement
+ * fixed_size: Boolean indicating if the slice has a fixed capacity
+ * bonus_ways: Bonus ways are additional ways to be used for any slice,
+ * if client ends up using more than reserved cache ways. Bonus
+ * ways are allocated only if they are not reserved for some
+ * other client.
+ * res_ways: Reserved ways for the cache slice, the reserved ways cannot
+ * be used by any other client than the one its assigned to.
+ * cache_mode: Each slice operates as a cache, this controls the mode of the
+ * slice: normal or TCM(Tightly Coupled Memory)
+ * probe_target_ways: Determines what ways to probe for access hit. When
+ * configured to 1 only bonus and reserved ways are probed.
+ * When configured to 0 all ways in llcc are probed.
+ * dis_cap_alloc: Disable capacity based allocation for a client
+ * retain_on_pc: If this bit is set and client has maintained active vote
+ * then the ways assigned to this client are not flushed on power
+ * collapse.
+ * activate_on_init: Activate the slice immediately after the SCT is programmed
+ */
+#define SCT_ENTRY(uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \
+ { \
+ .usecase_id = uid, \
+ .slice_id = sid, \
+ .max_cap = mc, \
+ .priority = p, \
+ .fixed_size = fs, \
+ .bonus_ways = bway, \
+ .res_ways = rway, \
+ .cache_mode = cmod, \
+ .probe_target_ways = ptw, \
+ .dis_cap_alloc = dca, \
+ .retain_on_pc = rp, \
+ .activate_on_init = a, \
+ }
+
+static struct llcc_slice_config sdm845_data[] = {
+ SCT_ENTRY(LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1),
+ SCT_ENTRY(LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1),
+ SCT_ENTRY(LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0),
+};
+
+static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
+{
+ return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
+}
+
+static const struct of_device_id sdm845_qcom_llcc_of_match[] = {
+ { .compatible = "qcom,sdm845-llcc", },
+ { }
+};
+
+static struct platform_driver sdm845_qcom_llcc_driver = {
+ .driver = {
+ .name = "sdm845-llcc",
+ .of_match_table = sdm845_qcom_llcc_of_match,
+ },
+ .probe = sdm845_qcom_llcc_probe,
+};
+module_platform_driver(sdm845_qcom_llcc_driver);
+
+MODULE_DESCRIPTION("QCOM sdm845 LLCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
new file mode 100644
index 000000000000..54063a31132f
--- /dev/null
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#define ACTIVATE BIT(0)
+#define DEACTIVATE BIT(1)
+#define ACT_CTRL_OPCODE_ACTIVATE BIT(0)
+#define ACT_CTRL_OPCODE_DEACTIVATE BIT(1)
+#define ACT_CTRL_ACT_TRIG BIT(0)
+#define ACT_CTRL_OPCODE_SHIFT 0x01
+#define ATTR1_PROBE_TARGET_WAYS_SHIFT 0x02
+#define ATTR1_FIXED_SIZE_SHIFT 0x03
+#define ATTR1_PRIORITY_SHIFT 0x04
+#define ATTR1_MAX_CAP_SHIFT 0x10
+#define ATTR0_RES_WAYS_MASK GENMASK(11, 0)
+#define ATTR0_BONUS_WAYS_MASK GENMASK(27, 16)
+#define ATTR0_BONUS_WAYS_SHIFT 0x10
+#define LLCC_STATUS_READ_DELAY 100
+
+#define CACHE_LINE_SIZE_SHIFT 6
+
+#define LLCC_COMMON_STATUS0 0x0003000c
+#define LLCC_LB_CNT_MASK GENMASK(31, 28)
+#define LLCC_LB_CNT_SHIFT 28
+
+#define MAX_CAP_TO_BYTES(n) (n * SZ_1K)
+#define LLCC_TRP_ACT_CTRLn(n) (n * SZ_4K)
+#define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K)
+#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
+#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
+
+#define BANK_OFFSET_STRIDE 0x80000
+
+static struct llcc_drv_data *drv_data;
+
+static const struct regmap_config llcc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+/**
+ * llcc_slice_getd - get llcc slice descriptor
+ * @uid: usecase_id for the client
+ *
+ * A pointer to llcc slice descriptor will be returned on success and
+ * and error pointer is returned on failure
+ */
+struct llcc_slice_desc *llcc_slice_getd(u32 uid)
+{
+ const struct llcc_slice_config *cfg;
+ struct llcc_slice_desc *desc;
+ u32 sz, count;
+
+ cfg = drv_data->cfg;
+ sz = drv_data->cfg_size;
+
+ for (count = 0; cfg && count < sz; count++, cfg++)
+ if (cfg->usecase_id == uid)
+ break;
+
+ if (count == sz || !cfg)
+ return ERR_PTR(-ENODEV);
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->slice_id = cfg->slice_id;
+ desc->slice_size = cfg->max_cap;
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_getd);
+
+/**
+ * llcc_slice_putd - llcc slice descritpor
+ * @desc: Pointer to llcc slice descriptor
+ */
+void llcc_slice_putd(struct llcc_slice_desc *desc)
+{
+ kfree(desc);
+}
+EXPORT_SYMBOL_GPL(llcc_slice_putd);
+
+static int llcc_update_act_ctrl(u32 sid,
+ u32 act_ctrl_reg_val, u32 status)
+{
+ u32 act_ctrl_reg;
+ u32 status_reg;
+ u32 slice_status;
+ int ret;
+
+ act_ctrl_reg = drv_data->bcast_off + LLCC_TRP_ACT_CTRLn(sid);
+ status_reg = drv_data->bcast_off + LLCC_TRP_STATUSn(sid);
+
+ /* Set the ACTIVE trigger */
+ act_ctrl_reg_val |= ACT_CTRL_ACT_TRIG;
+ ret = regmap_write(drv_data->regmap, act_ctrl_reg, act_ctrl_reg_val);
+ if (ret)
+ return ret;
+
+ /* Clear the ACTIVE trigger */
+ act_ctrl_reg_val &= ~ACT_CTRL_ACT_TRIG;
+ ret = regmap_write(drv_data->regmap, act_ctrl_reg, act_ctrl_reg_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(drv_data->regmap, status_reg,
+ slice_status, !(slice_status & status),
+ 0, LLCC_STATUS_READ_DELAY);
+ return ret;
+}
+
+/**
+ * llcc_slice_activate - Activate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value of zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_activate(struct llcc_slice_desc *desc)
+{
+ int ret;
+ u32 act_ctrl_val;
+
+ mutex_lock(&drv_data->lock);
+ if (test_bit(desc->slice_id, drv_data->bitmap)) {
+ mutex_unlock(&drv_data->lock);
+ return 0;
+ }
+
+ act_ctrl_val = ACT_CTRL_OPCODE_ACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+
+ ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
+ DEACTIVATE);
+ if (ret) {
+ mutex_unlock(&drv_data->lock);
+ return ret;
+ }
+
+ __set_bit(desc->slice_id, drv_data->bitmap);
+ mutex_unlock(&drv_data->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_activate);
+
+/**
+ * llcc_slice_deactivate - Deactivate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value of zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_deactivate(struct llcc_slice_desc *desc)
+{
+ u32 act_ctrl_val;
+ int ret;
+
+ mutex_lock(&drv_data->lock);
+ if (!test_bit(desc->slice_id, drv_data->bitmap)) {
+ mutex_unlock(&drv_data->lock);
+ return 0;
+ }
+ act_ctrl_val = ACT_CTRL_OPCODE_DEACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+
+ ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
+ ACTIVATE);
+ if (ret) {
+ mutex_unlock(&drv_data->lock);
+ return ret;
+ }
+
+ __clear_bit(desc->slice_id, drv_data->bitmap);
+ mutex_unlock(&drv_data->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_deactivate);
+
+/**
+ * llcc_get_slice_id - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_get_slice_id(struct llcc_slice_desc *desc)
+{
+ return desc->slice_id;
+}
+EXPORT_SYMBOL_GPL(llcc_get_slice_id);
+
+/**
+ * llcc_get_slice_size - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
+{
+ return desc->slice_size;
+}
+EXPORT_SYMBOL_GPL(llcc_get_slice_size);
+
+static int qcom_llcc_cfg_program(struct platform_device *pdev)
+{
+ int i;
+ u32 attr1_cfg;
+ u32 attr0_cfg;
+ u32 attr1_val;
+ u32 attr0_val;
+ u32 max_cap_cacheline;
+ u32 sz;
+ int ret;
+ const struct llcc_slice_config *llcc_table;
+ struct llcc_slice_desc desc;
+ u32 bcast_off = drv_data->bcast_off;
+
+ sz = drv_data->cfg_size;
+ llcc_table = drv_data->cfg;
+
+ for (i = 0; i < sz; i++) {
+ attr1_cfg = bcast_off +
+ LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
+ attr0_cfg = bcast_off +
+ LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
+
+ attr1_val = llcc_table[i].cache_mode;
+ attr1_val |= llcc_table[i].probe_target_ways <<
+ ATTR1_PROBE_TARGET_WAYS_SHIFT;
+ attr1_val |= llcc_table[i].fixed_size <<
+ ATTR1_FIXED_SIZE_SHIFT;
+ attr1_val |= llcc_table[i].priority <<
+ ATTR1_PRIORITY_SHIFT;
+
+ max_cap_cacheline = MAX_CAP_TO_BYTES(llcc_table[i].max_cap);
+
+ /* LLCC instances can vary for each target.
+ * The SW writes to broadcast register which gets propagated
+ * to each llcc instace (llcc0,.. llccN).
+ * Since the size of the memory is divided equally amongst the
+ * llcc instances, we need to configure the max cap accordingly.
+ */
+ max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
+ max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
+ attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
+
+ attr0_val = llcc_table[i].res_ways & ATTR0_RES_WAYS_MASK;
+ attr0_val |= llcc_table[i].bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
+
+ ret = regmap_write(drv_data->regmap, attr1_cfg, attr1_val);
+ if (ret)
+ return ret;
+ ret = regmap_write(drv_data->regmap, attr0_cfg, attr0_val);
+ if (ret)
+ return ret;
+ if (llcc_table[i].activate_on_init) {
+ desc.slice_id = llcc_table[i].slice_id;
+ ret = llcc_slice_activate(&desc);
+ }
+ }
+ return ret;
+}
+
+int qcom_llcc_probe(struct platform_device *pdev,
+ const struct llcc_slice_config *llcc_cfg, u32 sz)
+{
+ u32 num_banks;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ int ret, i;
+
+ drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drv_data->regmap = devm_regmap_init_mmio(dev, base,
+ &llcc_regmap_config);
+ if (IS_ERR(drv_data->regmap))
+ return PTR_ERR(drv_data->regmap);
+
+ ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
+ &num_banks);
+ if (ret)
+ return ret;
+
+ num_banks &= LLCC_LB_CNT_MASK;
+ num_banks >>= LLCC_LB_CNT_SHIFT;
+ drv_data->num_banks = num_banks;
+
+ for (i = 0; i < sz; i++)
+ if (llcc_cfg[i].slice_id > drv_data->max_slices)
+ drv_data->max_slices = llcc_cfg[i].slice_id;
+
+ drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
+ GFP_KERNEL);
+ if (!drv_data->offsets)
+ return -ENOMEM;
+
+ for (i = 0; i < num_banks; i++)
+ drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
+
+ drv_data->bcast_off = num_banks * BANK_OFFSET_STRIDE;
+
+ drv_data->bitmap = devm_kcalloc(dev,
+ BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!drv_data->bitmap)
+ return -ENOMEM;
+
+ drv_data->cfg = llcc_cfg;
+ drv_data->cfg_size = sz;
+ mutex_init(&drv_data->lock);
+ platform_set_drvdata(pdev, drv_data);
+
+ return qcom_llcc_cfg_program(pdev);
+}
+EXPORT_SYMBOL_GPL(qcom_llcc_probe);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index dc09d7ac905f..1c488024c698 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -74,23 +74,10 @@ ssize_t qcom_mdt_get_size(const struct firmware *fw)
}
EXPORT_SYMBOL_GPL(qcom_mdt_get_size);
-/**
- * qcom_mdt_load() - load the firmware which header is loaded as fw
- * @dev: device handle to associate resources with
- * @fw: firmware object for the mdt file
- * @firmware: name of the firmware, for construction of segment file names
- * @pas_id: PAS identifier
- * @mem_region: allocated memory region to load firmware into
- * @mem_phys: physical address of allocated memory region
- * @mem_size: size of the allocated memory region
- * @reloc_base: adjusted physical address after relocation
- *
- * Returns 0 on success, negative errno otherwise.
- */
-int qcom_mdt_load(struct device *dev, const struct firmware *fw,
- const char *firmware, int pas_id, void *mem_region,
- phys_addr_t mem_phys, size_t mem_size,
- phys_addr_t *reloc_base)
+static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ const char *firmware, int pas_id, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
+ phys_addr_t *reloc_base, bool pas_init)
{
const struct elf32_phdr *phdrs;
const struct elf32_phdr *phdr;
@@ -121,10 +108,12 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw,
if (!fw_name)
return -ENOMEM;
- ret = qcom_scm_pas_init_image(pas_id, fw->data, fw->size);
- if (ret) {
- dev_err(dev, "invalid firmware metadata\n");
- goto out;
+ if (pas_init) {
+ ret = qcom_scm_pas_init_image(pas_id, fw->data, fw->size);
+ if (ret) {
+ dev_err(dev, "invalid firmware metadata\n");
+ goto out;
+ }
}
for (i = 0; i < ehdr->e_phnum; i++) {
@@ -144,10 +133,13 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw,
}
if (relocate) {
- ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr);
- if (ret) {
- dev_err(dev, "unable to setup relocation\n");
- goto out;
+ if (pas_init) {
+ ret = qcom_scm_pas_mem_setup(pas_id, mem_phys,
+ max_addr - min_addr);
+ if (ret) {
+ dev_err(dev, "unable to setup relocation\n");
+ goto out;
+ }
}
/*
@@ -202,7 +194,52 @@ out:
return ret;
}
+
+/**
+ * qcom_mdt_load() - load the firmware which header is loaded as fw
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @firmware: name of the firmware, for construction of segment file names
+ * @pas_id: PAS identifier
+ * @mem_region: allocated memory region to load firmware into
+ * @mem_phys: physical address of allocated memory region
+ * @mem_size: size of the allocated memory region
+ * @reloc_base: adjusted physical address after relocation
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ const char *firmware, int pas_id, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
+ phys_addr_t *reloc_base)
+{
+ return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
+ mem_size, reloc_base, true);
+}
EXPORT_SYMBOL_GPL(qcom_mdt_load);
+/**
+ * qcom_mdt_load_no_init() - load the firmware which header is loaded as fw
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @firmware: name of the firmware, for construction of segment file names
+ * @pas_id: PAS identifier
+ * @mem_region: allocated memory region to load firmware into
+ * @mem_phys: physical address of allocated memory region
+ * @mem_size: size of the allocated memory region
+ * @reloc_base: adjusted physical address after relocation
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
+ const char *firmware, int pas_id,
+ void *mem_region, phys_addr_t mem_phys,
+ size_t mem_size, phys_addr_t *reloc_base)
+{
+ return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
+ mem_size, reloc_base, false);
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init);
+
MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index c8999e38b005..8a3678c2e83c 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -184,6 +184,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
device_initialize(&rmtfs_mem->dev);
rmtfs_mem->dev.parent = &pdev->dev;
rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
+ rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr,
rmtfs_mem->size, MEMREMAP_WC);
@@ -206,8 +207,6 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
goto put_device;
}
- rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
-
ret = of_property_read_u32(node, "qcom,vmid", &vmid);
if (ret < 0 && ret != -EINVAL) {
dev_err(&pdev->dev, "failed to parse qcom,vmid\n");
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
new file mode 100644
index 000000000000..a7bbbb67991c
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef __RPM_INTERNAL_H__
+#define __RPM_INTERNAL_H__
+
+#include <linux/bitmap.h>
+#include <soc/qcom/tcs.h>
+
+#define TCS_TYPE_NR 4
+#define MAX_CMDS_PER_TCS 16
+#define MAX_TCS_PER_TYPE 3
+#define MAX_TCS_NR (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
+#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
+
+struct rsc_drv;
+
+/**
+ * struct tcs_group: group of Trigger Command Sets (TCS) to send state requests
+ * to the controller
+ *
+ * @drv: the controller
+ * @type: type of the TCS in this group - active, sleep, wake
+ * @mask: mask of the TCSes relative to all the TCSes in the RSC
+ * @offset: start of the TCS group relative to the TCSes in the RSC
+ * @num_tcs: number of TCSes in this type
+ * @ncpt: number of commands in each TCS
+ * @lock: lock for synchronizing this TCS writes
+ * @req: requests that are sent from the TCS
+ * @cmd_cache: flattened cache of cmds in sleep/wake TCS
+ * @slots: indicates which of @cmd_addr are occupied
+ */
+struct tcs_group {
+ struct rsc_drv *drv;
+ int type;
+ u32 mask;
+ u32 offset;
+ int num_tcs;
+ int ncpt;
+ spinlock_t lock;
+ const struct tcs_request *req[MAX_TCS_PER_TYPE];
+ u32 *cmd_cache;
+ DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
+};
+
+/**
+ * struct rpmh_request: the message to be sent to rpmh-rsc
+ *
+ * @msg: the request
+ * @cmd: the payload that will be part of the @msg
+ * @completion: triggered when request is done
+ * @dev: the device making the request
+ * @err: err return from the controller
+ * @needs_free: check to free dynamically allocated request object
+ */
+struct rpmh_request {
+ struct tcs_request msg;
+ struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
+ struct completion *completion;
+ const struct device *dev;
+ int err;
+ bool needs_free;
+};
+
+/**
+ * struct rpmh_ctrlr: our representation of the controller
+ *
+ * @cache: the list of cached requests
+ * @cache_lock: synchronize access to the cache data
+ * @dirty: was the cache updated since flush
+ * @batch_cache: Cache sleep and wake requests sent as batch
+ */
+struct rpmh_ctrlr {
+ struct list_head cache;
+ spinlock_t cache_lock;
+ bool dirty;
+ struct list_head batch_cache;
+};
+
+/**
+ * struct rsc_drv: the Direct Resource Voter (DRV) of the
+ * Resource State Coordinator controller (RSC)
+ *
+ * @name: controller identifier
+ * @tcs_base: start address of the TCS registers in this controller
+ * @id: instance id in the controller (Direct Resource Voter)
+ * @num_tcs: number of TCSes in this DRV
+ * @tcs: TCS groups
+ * @tcs_in_use: s/w state of the TCS
+ * @lock: synchronize state of the controller
+ * @client: handle to the DRV's client.
+ */
+struct rsc_drv {
+ const char *name;
+ void __iomem *tcs_base;
+ int id;
+ int num_tcs;
+ struct tcs_group tcs[TCS_TYPE_NR];
+ DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
+ spinlock_t lock;
+ struct rpmh_ctrlr client;
+};
+
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg);
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
+ const struct tcs_request *msg);
+int rpmh_rsc_invalidate(struct rsc_drv *drv);
+
+void rpmh_tx_done(const struct tcs_request *msg, int r);
+
+#endif /* __RPM_INTERNAL_H__ */
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
new file mode 100644
index 000000000000..ee75da66d64b
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/tcs.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+#include "rpmh-internal.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace-rpmh.h"
+
+#define RSC_DRV_TCS_OFFSET 672
+#define RSC_DRV_CMD_OFFSET 20
+
+/* DRV Configuration Information Register */
+#define DRV_PRNT_CHLD_CONFIG 0x0C
+#define DRV_NUM_TCS_MASK 0x3F
+#define DRV_NUM_TCS_SHIFT 6
+#define DRV_NCPT_MASK 0x1F
+#define DRV_NCPT_SHIFT 27
+
+/* Register offsets */
+#define RSC_DRV_IRQ_ENABLE 0x00
+#define RSC_DRV_IRQ_STATUS 0x04
+#define RSC_DRV_IRQ_CLEAR 0x08
+#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10
+#define RSC_DRV_CONTROL 0x14
+#define RSC_DRV_STATUS 0x18
+#define RSC_DRV_CMD_ENABLE 0x1C
+#define RSC_DRV_CMD_MSGID 0x30
+#define RSC_DRV_CMD_ADDR 0x34
+#define RSC_DRV_CMD_DATA 0x38
+#define RSC_DRV_CMD_STATUS 0x3C
+#define RSC_DRV_CMD_RESP_DATA 0x40
+
+#define TCS_AMC_MODE_ENABLE BIT(16)
+#define TCS_AMC_MODE_TRIGGER BIT(24)
+
+/* TCS CMD register bit mask */
+#define CMD_MSGID_LEN 8
+#define CMD_MSGID_RESP_REQ BIT(8)
+#define CMD_MSGID_WRITE BIT(16)
+#define CMD_STATUS_ISSUED BIT(8)
+#define CMD_STATUS_COMPL BIT(16)
+
+static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
+{
+ return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
+ RSC_DRV_CMD_OFFSET * cmd_id);
+}
+
+static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id,
+ u32 data)
+{
+ writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
+ RSC_DRV_CMD_OFFSET * cmd_id);
+}
+
+static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data)
+{
+ writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
+}
+
+static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id,
+ u32 data)
+{
+ writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
+ for (;;) {
+ if (data == readl(drv->tcs_base + reg +
+ RSC_DRV_TCS_OFFSET * tcs_id))
+ break;
+ udelay(1);
+ }
+}
+
+static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
+{
+ return !test_bit(tcs_id, drv->tcs_in_use) &&
+ read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
+}
+
+static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
+{
+ return &drv->tcs[type];
+}
+
+static int tcs_invalidate(struct rsc_drv *drv, int type)
+{
+ int m;
+ struct tcs_group *tcs;
+
+ tcs = get_tcs_of_type(drv, type);
+
+ spin_lock(&tcs->lock);
+ if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
+ spin_unlock(&tcs->lock);
+ return 0;
+ }
+
+ for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
+ if (!tcs_is_free(drv, m)) {
+ spin_unlock(&tcs->lock);
+ return -EAGAIN;
+ }
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
+ }
+ bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
+ spin_unlock(&tcs->lock);
+
+ return 0;
+}
+
+/**
+ * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
+ *
+ * @drv: the RSC controller
+ */
+int rpmh_rsc_invalidate(struct rsc_drv *drv)
+{
+ int ret;
+
+ ret = tcs_invalidate(drv, SLEEP_TCS);
+ if (!ret)
+ ret = tcs_invalidate(drv, WAKE_TCS);
+
+ return ret;
+}
+
+static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
+ const struct tcs_request *msg)
+{
+ int type, ret;
+ struct tcs_group *tcs;
+
+ switch (msg->state) {
+ case RPMH_ACTIVE_ONLY_STATE:
+ type = ACTIVE_TCS;
+ break;
+ case RPMH_WAKE_ONLY_STATE:
+ type = WAKE_TCS;
+ break;
+ case RPMH_SLEEP_STATE:
+ type = SLEEP_TCS;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * If we are making an active request on a RSC that does not have a
+ * dedicated TCS for active state use, then re-purpose a wake TCS to
+ * send active votes.
+ * NOTE: The driver must be aware that this RSC does not have a
+ * dedicated AMC, and therefore would invalidate the sleep and wake
+ * TCSes before making an active state request.
+ */
+ tcs = get_tcs_of_type(drv, type);
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) {
+ tcs = get_tcs_of_type(drv, WAKE_TCS);
+ if (tcs->num_tcs) {
+ ret = rpmh_rsc_invalidate(drv);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ }
+
+ return tcs;
+}
+
+static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
+ int tcs_id)
+{
+ struct tcs_group *tcs;
+ int i;
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ tcs = &drv->tcs[i];
+ if (tcs->mask & BIT(tcs_id))
+ return tcs->req[tcs_id - tcs->offset];
+ }
+
+ return NULL;
+}
+
+/**
+ * tcs_tx_done: TX Done interrupt handler
+ */
+static irqreturn_t tcs_tx_done(int irq, void *p)
+{
+ struct rsc_drv *drv = p;
+ int i, j, err = 0;
+ unsigned long irq_status;
+ const struct tcs_request *req;
+ struct tcs_cmd *cmd;
+
+ irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0);
+
+ for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
+ req = get_req_from_tcs(drv, i);
+ if (!req) {
+ WARN_ON(1);
+ goto skip;
+ }
+
+ err = 0;
+ for (j = 0; j < req->num_cmds; j++) {
+ u32 sts;
+
+ cmd = &req->cmds[j];
+ sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j);
+ if (!(sts & CMD_STATUS_ISSUED) ||
+ ((req->wait_for_compl || cmd->wait) &&
+ !(sts & CMD_STATUS_COMPL))) {
+ pr_err("Incomplete request: %s: addr=%#x data=%#x",
+ drv->name, cmd->addr, cmd->data);
+ err = -EIO;
+ }
+ }
+
+ trace_rpmh_tx_done(drv, i, req, err);
+skip:
+ /* Reclaim the TCS */
+ write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
+ write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
+ spin_lock(&drv->lock);
+ clear_bit(i, drv->tcs_in_use);
+ spin_unlock(&drv->lock);
+ if (req)
+ rpmh_tx_done(req, err);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
+ const struct tcs_request *msg)
+{
+ u32 msgid, cmd_msgid;
+ u32 cmd_enable = 0;
+ u32 cmd_complete;
+ struct tcs_cmd *cmd;
+ int i, j;
+
+ cmd_msgid = CMD_MSGID_LEN;
+ cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
+ cmd_msgid |= CMD_MSGID_WRITE;
+
+ cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+
+ for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
+ cmd = &msg->cmds[i];
+ cmd_enable |= BIT(j);
+ cmd_complete |= cmd->wait << j;
+ msgid = cmd_msgid;
+ msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
+
+ write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
+ write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
+ write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
+ trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
+ }
+
+ write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
+ cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
+}
+
+static void __tcs_trigger(struct rsc_drv *drv, int tcs_id)
+{
+ u32 enable;
+
+ /*
+ * HW req: Clear the DRV_CONTROL and enable TCS again
+ * While clearing ensure that the AMC mode trigger is cleared
+ * and then the mode enable is cleared.
+ */
+ enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
+ enable &= ~TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable &= ~TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+
+ /* Enable the AMC mode on the TCS and then trigger the TCS */
+ enable = TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable |= TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+}
+
+static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
+ const struct tcs_request *msg)
+{
+ unsigned long curr_enabled;
+ u32 addr;
+ int i, j, k;
+ int tcs_id = tcs->offset;
+
+ for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
+ if (tcs_is_free(drv, tcs_id))
+ continue;
+
+ curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+
+ for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
+ addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
+ for (k = 0; k < msg->num_cmds; k++) {
+ if (addr == msg->cmds[k].addr)
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int find_free_tcs(struct tcs_group *tcs)
+{
+ int i;
+
+ for (i = 0; i < tcs->num_tcs; i++) {
+ if (tcs_is_free(tcs->drv, tcs->offset + i))
+ return tcs->offset + i;
+ }
+
+ return -EBUSY;
+}
+
+static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ struct tcs_group *tcs;
+ int tcs_id;
+ unsigned long flags;
+ int ret;
+
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+ spin_lock_irqsave(&tcs->lock, flags);
+ spin_lock(&drv->lock);
+ /*
+ * The h/w does not like if we send a request to the same address,
+ * when one is already in-flight or being processed.
+ */
+ ret = check_for_req_inflight(drv, tcs, msg);
+ if (ret) {
+ spin_unlock(&drv->lock);
+ goto done_write;
+ }
+
+ tcs_id = find_free_tcs(tcs);
+ if (tcs_id < 0) {
+ ret = tcs_id;
+ spin_unlock(&drv->lock);
+ goto done_write;
+ }
+
+ tcs->req[tcs_id - tcs->offset] = msg;
+ set_bit(tcs_id, drv->tcs_in_use);
+ spin_unlock(&drv->lock);
+
+ __tcs_buffer_write(drv, tcs_id, 0, msg);
+ __tcs_trigger(drv, tcs_id);
+
+done_write:
+ spin_unlock_irqrestore(&tcs->lock, flags);
+ return ret;
+}
+
+/**
+ * rpmh_rsc_send_data: Validate the incoming message and write to the
+ * appropriate TCS block.
+ *
+ * @drv: the controller
+ * @msg: the data to be sent
+ *
+ * Return: 0 on success, -EINVAL on error.
+ * Note: This call blocks until a valid data is written to the TCS.
+ */
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ int ret;
+
+ if (!msg || !msg->cmds || !msg->num_cmds ||
+ msg->num_cmds > MAX_RPMH_PAYLOAD) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ do {
+ ret = tcs_write(drv, msg);
+ if (ret == -EBUSY) {
+ pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
+ msg->cmds[0].addr);
+ udelay(10);
+ }
+ } while (ret == -EBUSY);
+
+ return ret;
+}
+
+static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd,
+ int len)
+{
+ int i, j;
+
+ /* Check for already cached commands */
+ for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) {
+ if (tcs->cmd_cache[i] != cmd[0].addr)
+ continue;
+ if (i + len >= tcs->num_tcs * tcs->ncpt)
+ goto seq_err;
+ for (j = 0; j < len; j++) {
+ if (tcs->cmd_cache[i + j] != cmd[j].addr)
+ goto seq_err;
+ }
+ return i;
+ }
+
+ return -ENODATA;
+
+seq_err:
+ WARN(1, "Message does not match previous sequence.\n");
+ return -EINVAL;
+}
+
+static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
+ int *tcs_id, int *cmd_id)
+{
+ int slot, offset;
+ int i = 0;
+
+ /* Find if we already have the msg in our TCS */
+ slot = find_match(tcs, msg->cmds, msg->num_cmds);
+ if (slot >= 0)
+ goto copy_data;
+
+ /* Do over, until we can fit the full payload in a TCS */
+ do {
+ slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
+ i, msg->num_cmds, 0);
+ if (slot == tcs->num_tcs * tcs->ncpt)
+ return -ENOMEM;
+ i += tcs->ncpt;
+ } while (slot + msg->num_cmds - 1 >= i);
+
+copy_data:
+ bitmap_set(tcs->slots, slot, msg->num_cmds);
+ /* Copy the addresses of the resources over to the slots */
+ for (i = 0; i < msg->num_cmds; i++)
+ tcs->cmd_cache[slot + i] = msg->cmds[i].addr;
+
+ offset = slot / tcs->ncpt;
+ *tcs_id = offset + tcs->offset;
+ *cmd_id = slot % tcs->ncpt;
+
+ return 0;
+}
+
+static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ struct tcs_group *tcs;
+ int tcs_id = 0, cmd_id = 0;
+ unsigned long flags;
+ int ret;
+
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+ spin_lock_irqsave(&tcs->lock, flags);
+ /* find the TCS id and the command in the TCS to write to */
+ ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
+ if (!ret)
+ __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
+ spin_unlock_irqrestore(&tcs->lock, flags);
+
+ return ret;
+}
+
+/**
+ * rpmh_rsc_write_ctrl_data: Write request to the controller
+ *
+ * @drv: the controller
+ * @msg: the data to be written to the controller
+ *
+ * There is no response returned for writing the request to the controller.
+ */
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ if (!msg || !msg->cmds || !msg->num_cmds ||
+ msg->num_cmds > MAX_RPMH_PAYLOAD) {
+ pr_err("Payload error\n");
+ return -EINVAL;
+ }
+
+ /* Data sent to this API will not be sent immediately */
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE)
+ return -EINVAL;
+
+ return tcs_ctrl_write(drv, msg);
+}
+
+static int rpmh_probe_tcs_config(struct platform_device *pdev,
+ struct rsc_drv *drv)
+{
+ struct tcs_type_config {
+ u32 type;
+ u32 n;
+ } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
+ struct device_node *dn = pdev->dev.of_node;
+ u32 config, max_tcs, ncpt, offset;
+ int i, ret, n, st = 0;
+ struct tcs_group *tcs;
+ struct resource *res;
+ void __iomem *base;
+ char drv_id[10] = {0};
+
+ snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
+ if (ret)
+ return ret;
+ drv->tcs_base = base + offset;
+
+ config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
+
+ max_tcs = config;
+ max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
+ max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
+
+ ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
+ ncpt = ncpt >> DRV_NCPT_SHIFT;
+
+ n = of_property_count_u32_elems(dn, "qcom,tcs-config");
+ if (n != 2 * TCS_TYPE_NR)
+ return -EINVAL;
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ ret = of_property_read_u32_index(dn, "qcom,tcs-config",
+ i * 2, &tcs_cfg[i].type);
+ if (ret)
+ return ret;
+ if (tcs_cfg[i].type >= TCS_TYPE_NR)
+ return -EINVAL;
+
+ ret = of_property_read_u32_index(dn, "qcom,tcs-config",
+ i * 2 + 1, &tcs_cfg[i].n);
+ if (ret)
+ return ret;
+ if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ tcs = &drv->tcs[tcs_cfg[i].type];
+ if (tcs->drv)
+ return -EINVAL;
+ tcs->drv = drv;
+ tcs->type = tcs_cfg[i].type;
+ tcs->num_tcs = tcs_cfg[i].n;
+ tcs->ncpt = ncpt;
+ spin_lock_init(&tcs->lock);
+
+ if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
+ continue;
+
+ if (st + tcs->num_tcs > max_tcs ||
+ st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
+ return -EINVAL;
+
+ tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
+ tcs->offset = st;
+ st += tcs->num_tcs;
+
+ /*
+ * Allocate memory to cache sleep and wake requests to
+ * avoid reading TCS register memory.
+ */
+ if (tcs->type == ACTIVE_TCS)
+ continue;
+
+ tcs->cmd_cache = devm_kcalloc(&pdev->dev,
+ tcs->num_tcs * ncpt, sizeof(u32),
+ GFP_KERNEL);
+ if (!tcs->cmd_cache)
+ return -ENOMEM;
+ }
+
+ drv->num_tcs = st;
+
+ return 0;
+}
+
+static int rpmh_rsc_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct rsc_drv *drv;
+ int ret, irq;
+
+ /*
+ * Even though RPMh doesn't directly use cmd-db, all of its children
+ * do. To avoid adding this check to our children we'll do it now.
+ */
+ ret = cmd_db_ready();
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Command DB not available (%d)\n",
+ ret);
+ return ret;
+ }
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
+ if (ret)
+ return ret;
+
+ drv->name = of_get_property(dn, "label", NULL);
+ if (!drv->name)
+ drv->name = dev_name(&pdev->dev);
+
+ ret = rpmh_probe_tcs_config(pdev, drv);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&drv->lock);
+ bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
+
+ irq = platform_get_irq(pdev, drv->id);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
+ drv->name, drv);
+ if (ret)
+ return ret;
+
+ /* Enable the active TCS to send requests immediately */
+ write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
+
+ spin_lock_init(&drv->client.cache_lock);
+ INIT_LIST_HEAD(&drv->client.cache);
+ INIT_LIST_HEAD(&drv->client.batch_cache);
+
+ dev_set_drvdata(&pdev->dev, drv);
+
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id rpmh_drv_match[] = {
+ { .compatible = "qcom,rpmh-rsc", },
+ { }
+};
+
+static struct platform_driver rpmh_driver = {
+ .probe = rpmh_rsc_probe,
+ .driver = {
+ .name = "rpmh",
+ .of_match_table = rpmh_drv_match,
+ },
+};
+
+static int __init rpmh_driver_init(void)
+{
+ return platform_driver_register(&rpmh_driver);
+}
+arch_initcall(rpmh_driver_init);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
new file mode 100644
index 000000000000..c7beb6841289
--- /dev/null
+++ b/drivers/soc/qcom/rpmh.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <soc/qcom/rpmh.h>
+
+#include "rpmh-internal.h"
+
+#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
+
+#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
+ struct rpmh_request name = { \
+ .msg = { \
+ .state = s, \
+ .cmds = name.cmd, \
+ .num_cmds = 0, \
+ .wait_for_compl = true, \
+ }, \
+ .cmd = { { 0 } }, \
+ .completion = q, \
+ .dev = dev, \
+ .needs_free = false, \
+ }
+
+#define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
+
+/**
+ * struct cache_req: the request object for caching
+ *
+ * @addr: the address of the resource
+ * @sleep_val: the sleep vote
+ * @wake_val: the wake vote
+ * @list: linked list obj
+ */
+struct cache_req {
+ u32 addr;
+ u32 sleep_val;
+ u32 wake_val;
+ struct list_head list;
+};
+
+/**
+ * struct batch_cache_req - An entry in our batch catch
+ *
+ * @list: linked list obj
+ * @count: number of messages
+ * @rpm_msgs: the messages
+ */
+
+struct batch_cache_req {
+ struct list_head list;
+ int count;
+ struct rpmh_request rpm_msgs[];
+};
+
+static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
+{
+ struct rsc_drv *drv = dev_get_drvdata(dev->parent);
+
+ return &drv->client;
+}
+
+void rpmh_tx_done(const struct tcs_request *msg, int r)
+{
+ struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
+ msg);
+ struct completion *compl = rpm_msg->completion;
+
+ rpm_msg->err = r;
+
+ if (r)
+ dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
+ rpm_msg->msg.cmds[0].addr, r);
+
+ if (!compl)
+ goto exit;
+
+ /* Signal the blocking thread we are done */
+ complete(compl);
+
+exit:
+ if (rpm_msg->needs_free)
+ kfree(rpm_msg);
+}
+
+static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
+{
+ struct cache_req *p, *req = NULL;
+
+ list_for_each_entry(p, &ctrlr->cache, list) {
+ if (p->addr == addr) {
+ req = p;
+ break;
+ }
+ }
+
+ return req;
+}
+
+static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
+ enum rpmh_state state,
+ struct tcs_cmd *cmd)
+{
+ struct cache_req *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ req = __find_req(ctrlr, cmd->addr);
+ if (req)
+ goto existing;
+
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req) {
+ req = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+
+ req->addr = cmd->addr;
+ req->sleep_val = req->wake_val = UINT_MAX;
+ INIT_LIST_HEAD(&req->list);
+ list_add_tail(&req->list, &ctrlr->cache);
+
+existing:
+ switch (state) {
+ case RPMH_ACTIVE_ONLY_STATE:
+ if (req->sleep_val != UINT_MAX)
+ req->wake_val = cmd->data;
+ break;
+ case RPMH_WAKE_ONLY_STATE:
+ req->wake_val = cmd->data;
+ break;
+ case RPMH_SLEEP_STATE:
+ req->sleep_val = cmd->data;
+ break;
+ default:
+ break;
+ }
+
+ ctrlr->dirty = true;
+unlock:
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+ return req;
+}
+
+/**
+ * __rpmh_write: Cache and send the RPMH request
+ *
+ * @dev: The device making the request
+ * @state: Active/Sleep request type
+ * @rpm_msg: The data that needs to be sent (cmds).
+ *
+ * Cache the RPMH request and send if the state is ACTIVE_ONLY.
+ * SLEEP/WAKE_ONLY requests are not sent to the controller at
+ * this time. Use rpmh_flush() to send them to the controller.
+ */
+static int __rpmh_write(const struct device *dev, enum rpmh_state state,
+ struct rpmh_request *rpm_msg)
+{
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ int ret = -EINVAL;
+ struct cache_req *req;
+ int i;
+
+ rpm_msg->msg.state = state;
+
+ /* Cache the request in our store and link the payload */
+ for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
+ req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ }
+
+ rpm_msg->msg.state = state;
+
+ if (state == RPMH_ACTIVE_ONLY_STATE) {
+ WARN_ON(irqs_disabled());
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
+ } else {
+ ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
+ &rpm_msg->msg);
+ /* Clean up our call by spoofing tx_done */
+ rpmh_tx_done(&rpm_msg->msg, ret);
+ }
+
+ return ret;
+}
+
+static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
+ return -EINVAL;
+
+ memcpy(req->cmd, cmd, n * sizeof(*cmd));
+
+ req->msg.state = state;
+ req->msg.cmds = req->cmd;
+ req->msg.num_cmds = n;
+
+ return 0;
+}
+
+/**
+ * rpmh_write_async: Write a set of RPMH commands
+ *
+ * @dev: The device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in payload
+ *
+ * Write a set of RPMH commands, the order of commands is maintained
+ * and will be sent as a single shot.
+ */
+int rpmh_write_async(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ struct rpmh_request *rpm_msg;
+ int ret;
+
+ rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
+ if (!rpm_msg)
+ return -ENOMEM;
+ rpm_msg->needs_free = true;
+
+ ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
+ if (ret) {
+ kfree(rpm_msg);
+ return ret;
+ }
+
+ return __rpmh_write(dev, state, rpm_msg);
+}
+EXPORT_SYMBOL(rpmh_write_async);
+
+/**
+ * rpmh_write: Write a set of RPMH commands and block until response
+ *
+ * @rc: The RPMH handle got from rpmh_get_client
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in @cmd
+ *
+ * May sleep. Do not call from atomic contexts.
+ */
+int rpmh_write(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ DECLARE_COMPLETION_ONSTACK(compl);
+ DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
+ int ret;
+
+ if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
+ return -EINVAL;
+
+ memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
+ rpm_msg.msg.num_cmds = n;
+
+ ret = __rpmh_write(dev, state, &rpm_msg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
+ WARN_ON(!ret);
+ return (ret > 0) ? 0 : -ETIMEDOUT;
+}
+EXPORT_SYMBOL(rpmh_write);
+
+static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_add_tail(&req->list, &ctrlr->batch_cache);
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+
+static int flush_batch(struct rpmh_ctrlr *ctrlr)
+{
+ struct batch_cache_req *req;
+ const struct rpmh_request *rpm_msg;
+ unsigned long flags;
+ int ret = 0;
+ int i;
+
+ /* Send Sleep/Wake requests to the controller, expect no response */
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry(req, &ctrlr->batch_cache, list) {
+ for (i = 0; i < req->count; i++) {
+ rpm_msg = req->rpm_msgs + i;
+ ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
+ &rpm_msg->msg);
+ if (ret)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+ return ret;
+}
+
+static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
+{
+ struct batch_cache_req *req, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+ kfree(req);
+ INIT_LIST_HEAD(&ctrlr->batch_cache);
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+
+/**
+ * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
+ * batch to finish.
+ *
+ * @dev: the device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The array of count of elements in each batch, 0 terminated.
+ *
+ * Write a request to the RSC controller without caching. If the request
+ * state is ACTIVE, then the requests are treated as completion request
+ * and sent to the controller immediately. The function waits until all the
+ * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
+ * request is sent as fire-n-forget and no ack is expected.
+ *
+ * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
+ */
+int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 *n)
+{
+ struct batch_cache_req *req;
+ struct rpmh_request *rpm_msgs;
+ DECLARE_COMPLETION_ONSTACK(compl);
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ unsigned long time_left;
+ int count = 0;
+ int ret, i, j;
+
+ if (!cmd || !n)
+ return -EINVAL;
+
+ while (n[count] > 0)
+ count++;
+ if (!count)
+ return -EINVAL;
+
+ req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
+ GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+ req->count = count;
+ rpm_msgs = req->rpm_msgs;
+
+ for (i = 0; i < count; i++) {
+ __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
+ cmd += n[i];
+ }
+
+ if (state != RPMH_ACTIVE_ONLY_STATE) {
+ cache_batch(ctrlr, req);
+ return 0;
+ }
+
+ for (i = 0; i < count; i++) {
+ rpm_msgs[i].completion = &compl;
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
+ if (ret) {
+ pr_err("Error(%d) sending RPMH message addr=%#x\n",
+ ret, rpm_msgs[i].msg.cmds[0].addr);
+ for (j = i; j < count; j++)
+ rpmh_tx_done(&rpm_msgs[j].msg, ret);
+ break;
+ }
+ }
+
+ time_left = RPMH_TIMEOUT_MS;
+ for (i = 0; i < count; i++) {
+ time_left = wait_for_completion_timeout(&compl, time_left);
+ if (!time_left) {
+ /*
+ * Better hope they never finish because they'll signal
+ * the completion on our stack and that's bad once
+ * we've returned from the function.
+ */
+ WARN_ON(1);
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ }
+
+exit:
+ kfree(req);
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmh_write_batch);
+
+static int is_req_valid(struct cache_req *req)
+{
+ return (req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX &&
+ req->sleep_val != req->wake_val);
+}
+
+static int send_single(const struct device *dev, enum rpmh_state state,
+ u32 addr, u32 data)
+{
+ DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+
+ /* Wake sets are always complete and sleep sets are not */
+ rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
+ rpm_msg.cmd[0].addr = addr;
+ rpm_msg.cmd[0].data = data;
+ rpm_msg.msg.num_cmds = 1;
+
+ return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
+}
+
+/**
+ * rpmh_flush: Flushes the buffered active and sleep sets to TCS
+ *
+ * @dev: The device making the request
+ *
+ * Return: -EBUSY if the controller is busy, probably waiting on a response
+ * to a RPMH request sent earlier.
+ *
+ * This function is always called from the sleep code from the last CPU
+ * that is powering down the entire system. Since no other RPMH API would be
+ * executing at this time, it is safe to run lockless.
+ */
+int rpmh_flush(const struct device *dev)
+{
+ struct cache_req *p;
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ int ret;
+
+ if (!ctrlr->dirty) {
+ pr_debug("Skipping flush, TCS has latest data.\n");
+ return 0;
+ }
+
+ /* First flush the cached batch requests */
+ ret = flush_batch(ctrlr);
+ if (ret)
+ return ret;
+
+ /*
+ * Nobody else should be calling this function other than system PM,
+ * hence we can run without locks.
+ */
+ list_for_each_entry(p, &ctrlr->cache, list) {
+ if (!is_req_valid(p)) {
+ pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
+ __func__, p->addr, p->sleep_val, p->wake_val);
+ continue;
+ }
+ ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
+ if (ret)
+ return ret;
+ ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
+ p->addr, p->wake_val);
+ if (ret)
+ return ret;
+ }
+
+ ctrlr->dirty = false;
+
+ return 0;
+}
+EXPORT_SYMBOL(rpmh_flush);
+
+/**
+ * rpmh_invalidate: Invalidate all sleep and active sets
+ * sets.
+ *
+ * @dev: The device making the request
+ *
+ * Invalidate the sleep and active values in the TCS blocks.
+ */
+int rpmh_invalidate(const struct device *dev)
+{
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ int ret;
+
+ invalidate_batch(ctrlr);
+ ctrlr->dirty = true;
+
+ do {
+ ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 70b2ee80d6bd..bf4bd71ab53f 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -364,11 +364,6 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
end = phdr_to_last_uncached_entry(phdr);
cached = phdr_to_last_cached_entry(phdr);
- if (smem->global_partition) {
- dev_err(smem->dev, "Already found the global partition\n");
- return -EINVAL;
- }
-
while (hdr < end) {
if (hdr->canary != SMEM_PRIVATE_CANARY)
goto bad_canary;
@@ -736,6 +731,11 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem)
bool found = false;
int i;
+ if (smem->global_partition) {
+ dev_err(smem->dev, "Already found the global partition\n");
+ return -EINVAL;
+ }
+
ptable = qcom_smem_get_ptable(smem);
if (IS_ERR(ptable))
return PTR_ERR(ptable);
diff --git a/drivers/soc/qcom/trace-rpmh.h b/drivers/soc/qcom/trace-rpmh.h
new file mode 100644
index 000000000000..feb0cb455e37
--- /dev/null
+++ b/drivers/soc/qcom/trace-rpmh.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_TRACE_RPMH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPMH_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpmh
+
+#include <linux/tracepoint.h>
+#include "rpmh-internal.h"
+
+TRACE_EVENT(rpmh_tx_done,
+
+ TP_PROTO(struct rsc_drv *d, int m, const struct tcs_request *r, int e),
+
+ TP_ARGS(d, m, r, e),
+
+ TP_STRUCT__entry(
+ __string(name, d->name)
+ __field(int, m)
+ __field(u32, addr)
+ __field(u32, data)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, d->name);
+ __entry->m = m;
+ __entry->addr = r->cmds[0].addr;
+ __entry->data = r->cmds[0].data;
+ __entry->err = e;
+ ),
+
+ TP_printk("%s: ack: tcs-m: %d addr: %#x data: %#x errno: %d",
+ __get_str(name), __entry->m, __entry->addr, __entry->data,
+ __entry->err)
+);
+
+TRACE_EVENT(rpmh_send_msg,
+
+ TP_PROTO(struct rsc_drv *d, int m, int n, u32 h,
+ const struct tcs_cmd *c),
+
+ TP_ARGS(d, m, n, h, c),
+
+ TP_STRUCT__entry(
+ __string(name, d->name)
+ __field(int, m)
+ __field(int, n)
+ __field(u32, hdr)
+ __field(u32, addr)
+ __field(u32, data)
+ __field(bool, wait)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, d->name);
+ __entry->m = m;
+ __entry->n = n;
+ __entry->hdr = h;
+ __entry->addr = c->addr;
+ __entry->data = c->data;
+ __entry->wait = c->wait;
+ ),
+
+ TP_printk("%s: send-msg: tcs(m): %d cmd(n): %d msgid: %#x addr: %#x data: %#x complete: %d",
+ __get_str(name), __entry->m, __entry->n, __entry->hdr,
+ __entry->addr, __entry->data, __entry->wait)
+);
+
+#endif /* _TRACE_RPMH_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-rpmh
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 7dc0f20d7907..c37b0803c1b6 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -18,6 +18,9 @@ obj-$(CONFIG_SYSC_R8A77970) += r8a77970-sysc.o
obj-$(CONFIG_SYSC_R8A77980) += r8a77980-sysc.o
obj-$(CONFIG_SYSC_R8A77990) += r8a77990-sysc.o
obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
+ifdef CONFIG_SMP
+obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
+endif
# Family
obj-$(CONFIG_RST_RCAR) += rcar-rst.o
diff --git a/drivers/soc/renesas/r9a06g032-smp.c b/drivers/soc/renesas/r9a06g032-smp.c
new file mode 100644
index 000000000000..a1926e8d73f2
--- /dev/null
+++ b/drivers/soc/renesas/r9a06g032-smp.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R9A06G032 Second CA7 enabler.
+ *
+ * Copyright (C) 2018 Renesas Electronics Europe Limited
+ *
+ * Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com>
+ * Derived from actions,s500-smp
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/smp.h>
+
+/*
+ * The second CPU is parked in ROM at boot time. It requires waking it after
+ * writing an address into the BOOTADDR register of sysctrl.
+ *
+ * So the default value of the "cpu-release-addr" corresponds to BOOTADDR...
+ *
+ * *However* the BOOTADDR register is not available when the kernel
+ * starts in NONSEC mode.
+ *
+ * So for NONSEC mode, the bootloader re-parks the second CPU into a pen
+ * in SRAM, and changes the "cpu-release-addr" of linux's DT to a SRAM address,
+ * which is not restricted.
+ */
+
+static void __iomem *cpu_bootaddr;
+
+static DEFINE_SPINLOCK(cpu_lock);
+
+static int
+r9a06g032_smp_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ if (!cpu_bootaddr)
+ return -ENODEV;
+
+ spin_lock(&cpu_lock);
+
+ writel(__pa_symbol(secondary_startup), cpu_bootaddr);
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+ spin_unlock(&cpu_lock);
+
+ return 0;
+}
+
+static void __init r9a06g032_smp_prepare_cpus(unsigned int max_cpus)
+{
+ struct device_node *dn;
+ int ret = -EINVAL, dns;
+ u32 bootaddr;
+
+ dn = of_get_cpu_node(1, NULL);
+ if (!dn) {
+ pr_err("CPU#1: missing device tree node\n");
+ return;
+ }
+ /*
+ * Determine the address from which the CPU is polling.
+ * The bootloader *does* change this property.
+ * Note: The property can be either 64 or 32 bits, so handle both cases
+ */
+ if (of_find_property(dn, "cpu-release-addr", &dns)) {
+ if (dns == sizeof(u64)) {
+ u64 temp;
+
+ ret = of_property_read_u64(dn,
+ "cpu-release-addr", &temp);
+ bootaddr = temp;
+ } else {
+ ret = of_property_read_u32(dn,
+ "cpu-release-addr",
+ &bootaddr);
+ }
+ }
+ of_node_put(dn);
+ if (ret) {
+ pr_err("CPU#1: invalid cpu-release-addr property\n");
+ return;
+ }
+ pr_info("CPU#1: cpu-release-addr %08x\n", bootaddr);
+
+ cpu_bootaddr = ioremap(bootaddr, sizeof(bootaddr));
+}
+
+static const struct smp_operations r9a06g032_smp_ops __initconst = {
+ .smp_prepare_cpus = r9a06g032_smp_prepare_cpus,
+ .smp_boot_secondary = r9a06g032_smp_boot_secondary,
+};
+
+CPU_METHOD_OF_DECLARE(r9a06g032_smp,
+ "renesas,r9a06g032-smp", &r9a06g032_smp_ops);
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 95120acc4d80..029188e8be6e 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -58,6 +58,12 @@
#define RCAR_PD_ALWAYS_ON 32 /* Always-on power area */
+struct rcar_sysc_ch {
+ u16 chan_offs;
+ u8 chan_bit;
+ u8 isr_bit;
+};
+
static void __iomem *rcar_sysc_base;
static DEFINE_SPINLOCK(rcar_sysc_lock); /* SMP CPUs + I/O devices */
@@ -143,12 +149,12 @@ static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on)
return ret;
}
-int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch)
+static int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch)
{
return rcar_sysc_power(sysc_ch, false);
}
-int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch)
+static int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch)
{
return rcar_sysc_power(sysc_ch, true);
}
@@ -194,11 +200,12 @@ static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd)
static bool has_cpg_mstp;
-static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
+static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
{
struct generic_pm_domain *genpd = &pd->genpd;
const char *name = pd->genpd.name;
struct dev_power_governor *gov = &simple_qos_governor;
+ int error;
if (pd->flags & PD_CPU) {
/*
@@ -251,7 +258,11 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
rcar_sysc_power_up(&pd->ch);
finalize:
- pm_genpd_init(genpd, gov, false);
+ error = pm_genpd_init(genpd, gov, false);
+ if (error)
+ pr_err("Failed to init PM domain %s: %d\n", name, error);
+
+ return error;
}
static const struct of_device_id rcar_sysc_matches[] __initconst = {
@@ -310,6 +321,8 @@ struct rcar_pm_domains {
struct generic_pm_domain *domains[RCAR_PD_ALWAYS_ON + 1];
};
+static struct genpd_onecell_data *rcar_sysc_onecell_data;
+
static int __init rcar_sysc_pd_init(void)
{
const struct rcar_sysc_info *info;
@@ -321,9 +334,6 @@ static int __init rcar_sysc_pd_init(void)
unsigned int i;
int error;
- if (rcar_sysc_base)
- return 0;
-
np = of_find_matching_node_and_match(NULL, rcar_sysc_matches, &match);
if (!np)
return -ENODEV;
@@ -356,6 +366,7 @@ static int __init rcar_sysc_pd_init(void)
domains->onecell_data.domains = domains->domains;
domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
+ rcar_sysc_onecell_data = &domains->onecell_data;
for (i = 0, syscier = 0; i < info->num_areas; i++)
syscier |= BIT(info->areas[i].isr_bit);
@@ -375,6 +386,9 @@ static int __init rcar_sysc_pd_init(void)
pr_debug("%pOF: syscier = 0x%08x\n", np, syscier);
iowrite32(syscier, base + SYSCIER);
+ /*
+ * First, create all PM domains
+ */
for (i = 0; i < info->num_areas; i++) {
const struct rcar_sysc_area *area = &info->areas[i];
struct rcar_sysc_pd *pd;
@@ -397,14 +411,29 @@ static int __init rcar_sysc_pd_init(void)
pd->ch.isr_bit = area->isr_bit;
pd->flags = area->flags;
- rcar_sysc_pd_setup(pd);
- if (area->parent >= 0)
- pm_genpd_add_subdomain(domains->domains[area->parent],
- &pd->genpd);
+ error = rcar_sysc_pd_setup(pd);
+ if (error)
+ goto out_put;
domains->domains[area->isr_bit] = &pd->genpd;
}
+ /*
+ * Second, link all PM domains to their parents
+ */
+ for (i = 0; i < info->num_areas; i++) {
+ const struct rcar_sysc_area *area = &info->areas[i];
+
+ if (!area->name || area->parent < 0)
+ continue;
+
+ error = pm_genpd_add_subdomain(domains->domains[area->parent],
+ domains->domains[area->isr_bit]);
+ if (error)
+ pr_warn("Failed to add PM subdomain %s to parent %u\n",
+ area->name, area->parent);
+ }
+
error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
out_put:
@@ -425,27 +454,39 @@ void __init rcar_sysc_nullify(struct rcar_sysc_area *areas,
}
}
-void __init rcar_sysc_init(phys_addr_t base, u32 syscier)
+#ifdef CONFIG_ARCH_R8A7779
+static int rcar_sysc_power_cpu(unsigned int idx, bool on)
{
- u32 syscimr;
+ struct generic_pm_domain *genpd;
+ struct rcar_sysc_pd *pd;
+ unsigned int i;
- if (!rcar_sysc_pd_init())
- return;
+ if (!rcar_sysc_onecell_data)
+ return -ENODEV;
- rcar_sysc_base = ioremap_nocache(base, PAGE_SIZE);
+ for (i = 0; i < rcar_sysc_onecell_data->num_domains; i++) {
+ genpd = rcar_sysc_onecell_data->domains[i];
+ if (!genpd)
+ continue;
- /*
- * Mask all interrupt sources to prevent the CPU from receiving them.
- * Make sure not to clear reserved bits that were set before.
- */
- syscimr = ioread32(rcar_sysc_base + SYSCIMR);
- syscimr |= syscier;
- pr_debug("%s: syscimr = 0x%08x\n", __func__, syscimr);
- iowrite32(syscimr, rcar_sysc_base + SYSCIMR);
+ pd = to_rcar_pd(genpd);
+ if (!(pd->flags & PD_CPU) || pd->ch.chan_bit != idx)
+ continue;
- /*
- * SYSC needs all interrupt sources enabled to control power.
- */
- pr_debug("%s: syscier = 0x%08x\n", __func__, syscier);
- iowrite32(syscier, rcar_sysc_base + SYSCIER);
+ return on ? rcar_sysc_power_up(&pd->ch)
+ : rcar_sysc_power_down(&pd->ch);
+ }
+
+ return -ENOENT;
+}
+
+int rcar_sysc_power_down_cpu(unsigned int cpu)
+{
+ return rcar_sysc_power_cpu(cpu, false);
+}
+
+int rcar_sysc_power_up_cpu(unsigned int cpu)
+{
+ return rcar_sysc_power_cpu(cpu, true);
}
+#endif /* CONFIG_ARCH_R8A7779 */
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 882be5ed7e84..b4b0f3480bd3 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -17,6 +17,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/soc/sunxi/sunxi_sram.h>
@@ -63,6 +64,12 @@ static struct sunxi_sram_desc sun4i_a10_sram_a3_a4 = {
SUNXI_SRAM_MAP(1, 1, "emac")),
};
+static struct sunxi_sram_desc sun4i_a10_sram_c1 = {
+ .data = SUNXI_SRAM_DATA("C1", 0x0, 0x0, 31,
+ SUNXI_SRAM_MAP(0, 0, "cpu"),
+ SUNXI_SRAM_MAP(0x7fffffff, 1, "ve")),
+};
+
static struct sunxi_sram_desc sun4i_a10_sram_d = {
.data = SUNXI_SRAM_DATA("D", 0x4, 0x0, 1,
SUNXI_SRAM_MAP(0, 0, "cpu"),
@@ -81,6 +88,10 @@ static const struct of_device_id sunxi_sram_dt_ids[] = {
.data = &sun4i_a10_sram_a3_a4.data,
},
{
+ .compatible = "allwinner,sun4i-a10-sram-c1",
+ .data = &sun4i_a10_sram_c1.data,
+ },
+ {
.compatible = "allwinner,sun4i-a10-sram-d",
.data = &sun4i_a10_sram_d.data,
},
@@ -281,13 +292,51 @@ int sunxi_sram_release(struct device *dev)
}
EXPORT_SYMBOL(sunxi_sram_release);
+struct sunxi_sramc_variant {
+ bool has_emac_clock;
+};
+
+static const struct sunxi_sramc_variant sun4i_a10_sramc_variant = {
+ /* Nothing special */
+};
+
+static const struct sunxi_sramc_variant sun50i_a64_sramc_variant = {
+ .has_emac_clock = true,
+};
+
+#define SUNXI_SRAM_EMAC_CLOCK_REG 0x30
+static bool sunxi_sram_regmap_accessible_reg(struct device *dev,
+ unsigned int reg)
+{
+ if (reg == SUNXI_SRAM_EMAC_CLOCK_REG)
+ return true;
+ return false;
+}
+
+static struct regmap_config sunxi_sram_emac_clock_regmap = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ /* last defined register */
+ .max_register = SUNXI_SRAM_EMAC_CLOCK_REG,
+ /* other devices have no business accessing other registers */
+ .readable_reg = sunxi_sram_regmap_accessible_reg,
+ .writeable_reg = sunxi_sram_regmap_accessible_reg,
+};
+
static int sunxi_sram_probe(struct platform_device *pdev)
{
struct resource *res;
struct dentry *d;
+ struct regmap *emac_clock;
+ const struct sunxi_sramc_variant *variant;
sram_dev = &pdev->dev;
+ variant = of_device_get_match_data(&pdev->dev);
+ if (!variant)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
@@ -300,12 +349,46 @@ static int sunxi_sram_probe(struct platform_device *pdev)
if (!d)
return -ENOMEM;
+ if (variant->has_emac_clock) {
+ emac_clock = devm_regmap_init_mmio(&pdev->dev, base,
+ &sunxi_sram_emac_clock_regmap);
+
+ if (IS_ERR(emac_clock))
+ return PTR_ERR(emac_clock);
+ }
+
return 0;
}
static const struct of_device_id sunxi_sram_dt_match[] = {
- { .compatible = "allwinner,sun4i-a10-sram-controller" },
- { .compatible = "allwinner,sun50i-a64-sram-controller" },
+ {
+ .compatible = "allwinner,sun4i-a10-sram-controller",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun4i-a10-system-control",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun5i-a13-system-control",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-a23-system-control",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-system-control",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-sram-controller",
+ .data = &sun50i_a64_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-system-control",
+ .data = &sun50i_a64_sramc_variant,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, sunxi_sram_dt_match);
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index 92770d84a288..be4570baad96 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -1,3 +1,17 @@
+# 64-bit ARM SoCs from TI
+if ARM64
+
+if ARCH_K3
+
+config ARCH_K3_AM6_SOC
+ bool "K3 AM6 SoC"
+ help
+ Enable support for TI's AM6 SoC Family support
+
+endif
+
+endif
+
#
# TI SOC drivers
#
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index 652739c7f718..d0dab323651f 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -41,6 +41,8 @@ static struct am33xx_pm_sram_addr *pm_sram;
static struct device *pm33xx_dev;
static struct wkup_m3_ipc *m3_ipc;
+static unsigned long suspend_wfi_flags;
+
static u32 sram_suspend_address(unsigned long addr)
{
return ((unsigned long)am33xx_do_wfi_sram +
@@ -53,7 +55,7 @@ static int am33xx_pm_suspend(suspend_state_t suspend_state)
int i, ret = 0;
ret = pm_ops->soc_suspend((unsigned long)suspend_state,
- am33xx_do_wfi_sram);
+ am33xx_do_wfi_sram, suspend_wfi_flags);
if (ret) {
dev_err(pm33xx_dev, "PM: Kernel suspend failure\n");
@@ -227,6 +229,7 @@ static int am33xx_push_sram_idle(void)
ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
ro_sram_data.amx3_pm_sram_data_phys =
gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
+ ro_sram_data.rtc_base_virt = pm_ops->get_rtc_base_addr();
/* Save physical address to calculate resume offset during pm init */
am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
@@ -310,6 +313,17 @@ static int am33xx_pm_probe(struct platform_device *pdev)
suspend_set_ops(&am33xx_pm_ops);
#endif /* CONFIG_SUSPEND */
+ /*
+ * For a system suspend we must flush the caches, we want
+ * the DDR in self-refresh, we want to save the context
+ * of the EMIF, and we want the wkup_m3 to handle low-power
+ * transition.
+ */
+ suspend_wfi_flags |= WFI_FLAG_FLUSH_CACHE;
+ suspend_wfi_flags |= WFI_FLAG_SELF_REFRESH;
+ suspend_wfi_flags |= WFI_FLAG_SAVE_EMIF;
+ suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
+
ret = pm_ops->init();
if (ret) {
dev_err(dev, "Unable to call core pm init!\n");
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 369aef5e7228..f5cb8c0af09f 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -46,6 +46,7 @@
#define M3_BASELINE_VERSION 0x191
#define M3_STATUS_RESP_MASK (0xffff << 16)
#define M3_FW_VERSION_MASK 0xffff
+#define M3_WAKE_SRC_MASK 0xff
#define M3_STATE_UNKNOWN 0
#define M3_STATE_RESET 1
@@ -55,6 +56,23 @@
static struct wkup_m3_ipc *m3_ipc_state;
+static const struct wkup_m3_wakeup_src wakeups[] = {
+ {.irq_nr = 35, .src = "USB0_PHY"},
+ {.irq_nr = 36, .src = "USB1_PHY"},
+ {.irq_nr = 40, .src = "I2C0"},
+ {.irq_nr = 41, .src = "RTC Timer"},
+ {.irq_nr = 42, .src = "RTC Alarm"},
+ {.irq_nr = 43, .src = "Timer0"},
+ {.irq_nr = 44, .src = "Timer1"},
+ {.irq_nr = 45, .src = "UART"},
+ {.irq_nr = 46, .src = "GPIO0"},
+ {.irq_nr = 48, .src = "MPU_WAKE"},
+ {.irq_nr = 49, .src = "WDT0"},
+ {.irq_nr = 50, .src = "WDT1"},
+ {.irq_nr = 51, .src = "ADC_TSC"},
+ {.irq_nr = 0, .src = "Unknown"},
+};
+
static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
{
writel(AM33XX_M3_TXEV_ACK,
@@ -329,12 +347,45 @@ static int wkup_m3_finish_low_power(struct wkup_m3_ipc *m3_ipc)
return 0;
}
+/**
+ * wkup_m3_request_wake_src - Get the wakeup source info passed from wkup_m3
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ */
+static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
+{
+ unsigned int wakeup_src_idx;
+ int j, val;
+
+ val = wkup_m3_ctrl_ipc_read(m3_ipc, 6);
+
+ wakeup_src_idx = val & M3_WAKE_SRC_MASK;
+
+ for (j = 0; j < ARRAY_SIZE(wakeups) - 1; j++) {
+ if (wakeups[j].irq_nr == wakeup_src_idx)
+ return wakeups[j].src;
+ }
+ return wakeups[j].src;
+}
+
+/**
+ * wkup_m3_set_rtc_only - Set the rtc_only flag
+ * @wkup_m3_wakeup: struct wkup_m3_wakeup_src * gets assigned the
+ * wakeup src value
+ */
+static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
+{
+ if (m3_ipc_state)
+ m3_ipc_state->is_rtc_only = true;
+}
+
static struct wkup_m3_ipc_ops ipc_ops = {
.set_mem_type = wkup_m3_set_mem_type,
.set_resume_address = wkup_m3_set_resume_address,
.prepare_low_power = wkup_m3_prepare_low_power,
.finish_low_power = wkup_m3_finish_low_power,
.request_pm_status = wkup_m3_request_pm_status,
+ .request_wake_src = wkup_m3_request_wake_src,
+ .set_rtc_only = wkup_m3_set_rtc_only,
};
/**
@@ -484,6 +535,30 @@ static int wkup_m3_ipc_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused wkup_m3_ipc_suspend(struct device *dev)
+{
+ /*
+ * Nothing needs to be done on suspend even with rtc_only flag set
+ */
+ return 0;
+}
+
+static int __maybe_unused wkup_m3_ipc_resume(struct device *dev)
+{
+ if (m3_ipc_state->is_rtc_only) {
+ rproc_shutdown(m3_ipc_state->rproc);
+ rproc_boot(m3_ipc_state->rproc);
+ }
+
+ m3_ipc_state->is_rtc_only = false;
+
+ return 0;
+}
+
+static const struct dev_pm_ops wkup_m3_ipc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(wkup_m3_ipc_suspend, wkup_m3_ipc_resume)
+};
+
static const struct of_device_id wkup_m3_ipc_of_match[] = {
{ .compatible = "ti,am3352-wkup-m3-ipc", },
{ .compatible = "ti,am4372-wkup-m3-ipc", },
@@ -497,6 +572,7 @@ static struct platform_driver wkup_m3_ipc_driver = {
.driver = {
.name = "wkup_m3_ipc",
.of_match_table = wkup_m3_ipc_of_match,
+ .pm = &wkup_m3_ipc_pm_ops,
},
};
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ad5d68e1dab7..671d078349cc 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -688,6 +688,19 @@ config SPI_TXX9
help
SPI driver for Toshiba TXx9 MIPS SoCs
+config SPI_UNIPHIER
+ tristate "Socionext UniPhier SPI Controller"
+ depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ help
+ This enables a driver for the Socionext UniPhier SoC SCSSI SPI controller.
+
+ UniPhier SoCs have SCSSI and MCSSI SPI controllers.
+ Every UniPhier SoC has SCSSI which supports single channel.
+ Older UniPhier Pro4/Pro5 also has MCSSI which support multiple channels.
+ This driver supports SCSSI only.
+
+ If your SoC supports SCSSI, say Y here.
+
config SPI_XCOMM
tristate "Analog Devices AD-FMCOMMS1-EBZ SPI-I2C-bridge driver"
depends on I2C
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index cb1f4378b87c..a90d55970036 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -101,6 +101,7 @@ spi-thunderx-objs := spi-cavium.o spi-cavium-thunderx.o
obj-$(CONFIG_SPI_THUNDERX) += spi-thunderx.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
+obj-$(CONFIG_SPI_UNIPHIER) += spi-uniphier.o
obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
obj-$(CONFIG_SPI_XLP) += spi-xlp.o
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 0719bd484891..3f6b657394de 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -176,7 +176,7 @@ static void ath79_spi_cleanup(struct spi_device *spi)
}
static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs,
- u32 word, u8 bits)
+ u32 word, u8 bits, unsigned flags)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
u32 ioc = sp->ioc_base;
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 3aa9e6e3dac8..f29176000b8d 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -49,22 +49,26 @@
struct spi_bitbang_cs {
unsigned nsecs; /* (clock cycle time)/2 */
u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs,
- u32 word, u8 bits);
+ u32 word, u8 bits, unsigned flags);
unsigned (*txrx_bufs)(struct spi_device *,
u32 (*txrx_word)(
struct spi_device *spi,
unsigned nsecs,
- u32 word, u8 bits),
- unsigned, struct spi_transfer *);
+ u32 word, u8 bits,
+ unsigned flags),
+ unsigned, struct spi_transfer *,
+ unsigned);
};
static unsigned bitbang_txrx_8(
struct spi_device *spi,
u32 (*txrx_word)(struct spi_device *spi,
unsigned nsecs,
- u32 word, u8 bits),
+ u32 word, u8 bits,
+ unsigned flags),
unsigned ns,
- struct spi_transfer *t
+ struct spi_transfer *t,
+ unsigned flags
) {
unsigned bits = t->bits_per_word;
unsigned count = t->len;
@@ -76,7 +80,7 @@ static unsigned bitbang_txrx_8(
if (tx)
word = *tx++;
- word = txrx_word(spi, ns, word, bits);
+ word = txrx_word(spi, ns, word, bits, flags);
if (rx)
*rx++ = word;
count -= 1;
@@ -88,9 +92,11 @@ static unsigned bitbang_txrx_16(
struct spi_device *spi,
u32 (*txrx_word)(struct spi_device *spi,
unsigned nsecs,
- u32 word, u8 bits),
+ u32 word, u8 bits,
+ unsigned flags),
unsigned ns,
- struct spi_transfer *t
+ struct spi_transfer *t,
+ unsigned flags
) {
unsigned bits = t->bits_per_word;
unsigned count = t->len;
@@ -102,7 +108,7 @@ static unsigned bitbang_txrx_16(
if (tx)
word = *tx++;
- word = txrx_word(spi, ns, word, bits);
+ word = txrx_word(spi, ns, word, bits, flags);
if (rx)
*rx++ = word;
count -= 2;
@@ -114,9 +120,11 @@ static unsigned bitbang_txrx_32(
struct spi_device *spi,
u32 (*txrx_word)(struct spi_device *spi,
unsigned nsecs,
- u32 word, u8 bits),
+ u32 word, u8 bits,
+ unsigned flags),
unsigned ns,
- struct spi_transfer *t
+ struct spi_transfer *t,
+ unsigned flags
) {
unsigned bits = t->bits_per_word;
unsigned count = t->len;
@@ -128,7 +136,7 @@ static unsigned bitbang_txrx_32(
if (tx)
word = *tx++;
- word = txrx_word(spi, ns, word, bits);
+ word = txrx_word(spi, ns, word, bits, flags);
if (rx)
*rx++ = word;
count -= 4;
@@ -235,8 +243,24 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct spi_bitbang_cs *cs = spi->controller_state;
unsigned nsecs = cs->nsecs;
+ struct spi_bitbang *bitbang;
+
+ bitbang = spi_master_get_devdata(spi->master);
+ if (bitbang->set_line_direction) {
+ int err;
- return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t);
+ err = bitbang->set_line_direction(spi, !!(t->tx_buf));
+ if (err < 0)
+ return err;
+ }
+
+ if (spi->mode & SPI_3WIRE) {
+ unsigned flags;
+
+ flags = t->tx_buf ? SPI_MASTER_NO_RX : SPI_MASTER_NO_TX;
+ return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, flags);
+ }
+ return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, 0);
}
/*----------------------------------------------------------------------*/
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index 22a31e4a1a11..1a3510215841 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -144,9 +144,9 @@ static void butterfly_chipselect(struct spi_device *spi, int value)
static u32
butterfly_txrx_word_mode0(struct spi_device *spi, unsigned nsecs, u32 word,
- u8 bits)
+ u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
/*----------------------------------------------------------------------*/
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index f3dad6fcdc35..7c88f74f7f47 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -319,7 +319,7 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
*/
if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
CDNS_SPI_IXR_TXFULL)
- usleep_range(10, 20);
+ udelay(10);
if (xspi->txbuf)
cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
@@ -739,7 +739,7 @@ static int __maybe_unused cnds_runtime_resume(struct device *dev)
ret = clk_prepare_enable(xspi->ref_clk);
if (ret) {
dev_err(dev, "Cannot enable device clock.\n");
- clk_disable(xspi->pclk);
+ clk_disable_unprepare(xspi->pclk);
return ret;
}
return 0;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 577084bb911b..a02099c90c5c 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -217,7 +217,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
pdata = &dspi->pdata;
/* program delay transfers if tx_delay is non zero */
- if (spicfg->wdelay)
+ if (spicfg && spicfg->wdelay)
spidat1 |= SPIDAT1_WDEL;
/*
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index d25cc4037e23..e80f60ed6fdf 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -15,11 +15,13 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/scatterlist.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/property.h>
+#include <linux/regmap.h>
#include "spi-dw.h"
@@ -28,10 +30,90 @@
struct dw_spi_mmio {
struct dw_spi dws;
struct clk *clk;
+ void *priv;
};
+#define MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL 0x24
+#define OCELOT_IF_SI_OWNER_MASK GENMASK(5, 4)
+#define OCELOT_IF_SI_OWNER_OFFSET 4
+#define MSCC_IF_SI_OWNER_SISL 0
+#define MSCC_IF_SI_OWNER_SIBM 1
+#define MSCC_IF_SI_OWNER_SIMC 2
+
+#define MSCC_SPI_MST_SW_MODE 0x14
+#define MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE BIT(13)
+#define MSCC_SPI_MST_SW_MODE_SW_SPI_CS(x) (x << 5)
+
+struct dw_spi_mscc {
+ struct regmap *syscon;
+ void __iomem *spi_mst;
+};
+
+/*
+ * The Designware SPI controller (referred to as master in the documentation)
+ * automatically deasserts chip select when the tx fifo is empty. The chip
+ * selects then needs to be either driven as GPIOs or, for the first 4 using the
+ * the SPI boot controller registers. the final chip select is an OR gate
+ * between the Designware SPI controller and the SPI boot controller.
+ */
+static void dw_spi_mscc_set_cs(struct spi_device *spi, bool enable)
+{
+ struct dw_spi *dws = spi_master_get_devdata(spi->master);
+ struct dw_spi_mmio *dwsmmio = container_of(dws, struct dw_spi_mmio, dws);
+ struct dw_spi_mscc *dwsmscc = dwsmmio->priv;
+ u32 cs = spi->chip_select;
+
+ if (cs < 4) {
+ u32 sw_mode = MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE;
+
+ if (!enable)
+ sw_mode |= MSCC_SPI_MST_SW_MODE_SW_SPI_CS(BIT(cs));
+
+ writel(sw_mode, dwsmscc->spi_mst + MSCC_SPI_MST_SW_MODE);
+ }
+
+ dw_spi_set_cs(spi, enable);
+}
+
+static int dw_spi_mscc_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ struct dw_spi_mscc *dwsmscc;
+ struct resource *res;
+
+ dwsmscc = devm_kzalloc(&pdev->dev, sizeof(*dwsmscc), GFP_KERNEL);
+ if (!dwsmscc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ dwsmscc->spi_mst = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dwsmscc->spi_mst)) {
+ dev_err(&pdev->dev, "SPI_MST region map failed\n");
+ return PTR_ERR(dwsmscc->spi_mst);
+ }
+
+ dwsmscc->syscon = syscon_regmap_lookup_by_compatible("mscc,ocelot-cpu-syscon");
+ if (IS_ERR(dwsmscc->syscon))
+ return PTR_ERR(dwsmscc->syscon);
+
+ /* Deassert all CS */
+ writel(0, dwsmscc->spi_mst + MSCC_SPI_MST_SW_MODE);
+
+ /* Select the owner of the SI interface */
+ regmap_update_bits(dwsmscc->syscon, MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL,
+ OCELOT_IF_SI_OWNER_MASK,
+ MSCC_IF_SI_OWNER_SIMC << OCELOT_IF_SI_OWNER_OFFSET);
+
+ dwsmmio->dws.set_cs = dw_spi_mscc_set_cs;
+ dwsmmio->priv = dwsmscc;
+
+ return 0;
+}
+
static int dw_spi_mmio_probe(struct platform_device *pdev)
{
+ int (*init_func)(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio);
struct dw_spi_mmio *dwsmmio;
struct dw_spi *dws;
struct resource *mem;
@@ -99,6 +181,13 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
}
}
+ init_func = device_get_match_data(&pdev->dev);
+ if (init_func) {
+ ret = init_func(pdev, dwsmmio);
+ if (ret)
+ goto out;
+ }
+
ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
goto out;
@@ -123,6 +212,7 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "snps,dw-apb-ssi", },
+ { .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_init},
{ /* end of table */}
};
MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index f693bfe95ab9..ac2eb89ef7a5 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -133,7 +133,7 @@ static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
}
#endif /* CONFIG_DEBUG_FS */
-static void dw_spi_set_cs(struct spi_device *spi, bool enable)
+void dw_spi_set_cs(struct spi_device *spi, bool enable)
{
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct chip_data *chip = spi_get_ctldata(spi);
@@ -145,6 +145,7 @@ static void dw_spi_set_cs(struct spi_device *spi, bool enable)
if (!enable)
dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
}
+EXPORT_SYMBOL_GPL(dw_spi_set_cs);
/* Return the max entries we can fill into tx fifo */
static inline u32 tx_max(struct dw_spi *dws)
@@ -485,6 +486,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
+ spi_controller_set_devdata(master, dws);
+
ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
master);
if (ret < 0) {
@@ -505,6 +508,9 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->dev.of_node = dev->of_node;
master->flags = SPI_MASTER_GPIO_SS;
+ if (dws->set_cs)
+ master->set_cs = dws->set_cs;
+
/* Basic HW init */
spi_hw_init(dev, dws);
@@ -518,7 +524,6 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
}
}
- spi_controller_set_devdata(master, dws);
ret = devm_spi_register_controller(dev, master);
if (ret) {
dev_err(&master->dev, "problem registering spi master\n");
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 2cde2473b3e9..0168b08364d5 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -112,6 +112,7 @@ struct dw_spi {
u32 reg_io_width; /* DR I/O width in bytes */
u16 bus_num;
u16 num_cs; /* supported slave numbers */
+ void (*set_cs)(struct spi_device *spi, bool enable);
/* Current message transfer state info */
size_t len;
@@ -244,6 +245,7 @@ struct dw_spi_chip {
void (*cs_control)(u32 command);
};
+extern void dw_spi_set_cs(struct spi_device *spi, bool enable);
extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws);
extern void dw_spi_remove_host(struct dw_spi *dws);
extern int dw_spi_suspend_host(struct dw_spi *dws);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 0630962ce442..7cb3ab0a35a0 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1,17 +1,9 @@
-/*
- * drivers/spi/spi-fsl-dspi.c
- *
- * Copyright 2013 Freescale Semiconductor, Inc.
- *
- * Freescale DSPI driver
- * This file contains a driver for the Freescale DSPI
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2013 Freescale Semiconductor, Inc.
+//
+// Freescale DSPI driver
+// This file contains a driver for the Freescale DSPI
#include <linux/clk.h>
#include <linux/delay.h>
@@ -38,10 +30,6 @@
#define DRIVER_NAME "fsl-dspi"
-#define TRAN_STATE_RX_VOID 0x01
-#define TRAN_STATE_TX_VOID 0x02
-#define TRAN_STATE_WORD_ODD_NUM 0x04
-
#define DSPI_FIFO_SIZE 4
#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
@@ -50,6 +38,7 @@
#define SPI_MCR_PCSIS (0x3F << 16)
#define SPI_MCR_CLR_TXF (1 << 11)
#define SPI_MCR_CLR_RXF (1 << 10)
+#define SPI_MCR_XSPI (1 << 3)
#define SPI_TCR 0x08
#define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16)
@@ -86,11 +75,16 @@
#define SPI_RSER_TCFQE 0x80000000
#define SPI_PUSHR 0x34
-#define SPI_PUSHR_CONT (1 << 31)
-#define SPI_PUSHR_CTAS(x) (((x) & 0x00000003) << 28)
-#define SPI_PUSHR_EOQ (1 << 27)
-#define SPI_PUSHR_CTCNT (1 << 26)
-#define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16)
+#define SPI_PUSHR_CMD_CONT (1 << 15)
+#define SPI_PUSHR_CONT (SPI_PUSHR_CMD_CONT << 16)
+#define SPI_PUSHR_CMD_CTAS(x) (((x) & 0x0003) << 12)
+#define SPI_PUSHR_CTAS(x) (SPI_PUSHR_CMD_CTAS(x) << 16)
+#define SPI_PUSHR_CMD_EOQ (1 << 11)
+#define SPI_PUSHR_EOQ (SPI_PUSHR_CMD_EOQ << 16)
+#define SPI_PUSHR_CMD_CTCNT (1 << 10)
+#define SPI_PUSHR_CTCNT (SPI_PUSHR_CMD_CTCNT << 16)
+#define SPI_PUSHR_CMD_PCS(x) ((1 << x) & 0x003f)
+#define SPI_PUSHR_PCS(x) (SPI_PUSHR_CMD_PCS(x) << 16)
#define SPI_PUSHR_TXDATA(x) ((x) & 0x0000ffff)
#define SPI_PUSHR_SLAVE 0x34
@@ -107,21 +101,31 @@
#define SPI_RXFR2 0x84
#define SPI_RXFR3 0x88
+#define SPI_CTARE(x) (0x11c + (((x) & 0x3) * 4))
+#define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
+#define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
+
+#define SPI_SREX 0x13c
+
#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
#define SPI_FRAME_BITS_MASK SPI_CTAR_FMSZ(0xf)
#define SPI_FRAME_BITS_16 SPI_CTAR_FMSZ(0xf)
#define SPI_FRAME_BITS_8 SPI_CTAR_FMSZ(0x7)
+#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
+#define SPI_FRAME_EBITS_MASK SPI_CTARE_FMSZE(1)
+
+/* Register offsets for regmap_pushr */
+#define PUSHR_CMD 0x0
+#define PUSHR_TX 0x2
+
#define SPI_CS_INIT 0x01
#define SPI_CS_ASSERT 0x02
#define SPI_CS_DROP 0x04
-#define SPI_TCR_TCNT_MAX 0x10000
-
#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
struct chip_data {
- u32 mcr_val;
u32 ctar_val;
u16 void_write_data;
};
@@ -135,6 +139,7 @@ enum dspi_trans_mode {
struct fsl_dspi_devtype_data {
enum dspi_trans_mode trans_mode;
u8 max_clock_factor;
+ bool xspi_mode;
};
static const struct fsl_dspi_devtype_data vf610_data = {
@@ -145,6 +150,7 @@ static const struct fsl_dspi_devtype_data vf610_data = {
static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
.trans_mode = DSPI_TCFQ_MODE,
.max_clock_factor = 8,
+ .xspi_mode = true,
};
static const struct fsl_dspi_devtype_data ls2085a_data = {
@@ -179,6 +185,7 @@ struct fsl_dspi {
struct platform_device *pdev;
struct regmap *regmap;
+ struct regmap *regmap_pushr;
int irq;
struct clk *clk;
@@ -186,32 +193,62 @@ struct fsl_dspi {
struct spi_message *cur_msg;
struct chip_data *cur_chip;
size_t len;
- void *tx;
- void *tx_end;
+ const void *tx;
void *rx;
void *rx_end;
- char dataflags;
- u8 cs;
u16 void_write_data;
- u32 cs_change;
+ u16 tx_cmd;
+ u8 bits_per_word;
+ u8 bytes_per_word;
const struct fsl_dspi_devtype_data *devtype_data;
wait_queue_head_t waitq;
u32 waitflags;
- u32 spi_tcnt;
struct fsl_dspi_dma *dma;
};
-static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word);
+static u32 dspi_pop_tx(struct fsl_dspi *dspi)
+{
+ u32 txdata = 0;
+
+ if (dspi->tx) {
+ if (dspi->bytes_per_word == 1)
+ txdata = *(u8 *)dspi->tx;
+ else if (dspi->bytes_per_word == 2)
+ txdata = *(u16 *)dspi->tx;
+ else /* dspi->bytes_per_word == 4 */
+ txdata = *(u32 *)dspi->tx;
+ dspi->tx += dspi->bytes_per_word;
+ }
+ dspi->len -= dspi->bytes_per_word;
+ return txdata;
+}
-static inline int is_double_byte_mode(struct fsl_dspi *dspi)
+static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
- unsigned int val;
+ u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
- regmap_read(dspi->regmap, SPI_CTAR(0), &val);
+ if (dspi->len > 0)
+ cmd |= SPI_PUSHR_CMD_CONT;
+ return cmd << 16 | data;
+}
- return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
+static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
+{
+ if (!dspi->rx)
+ return;
+
+ /* Mask of undefined bits */
+ rxdata &= (1 << dspi->bits_per_word) - 1;
+
+ if (dspi->bytes_per_word == 1)
+ *(u8 *)dspi->rx = rxdata;
+ else if (dspi->bytes_per_word == 2)
+ *(u16 *)dspi->rx = rxdata;
+ else /* dspi->bytes_per_word == 4 */
+ *(u32 *)dspi->rx = rxdata;
+ dspi->rx += dspi->bytes_per_word;
}
static void dspi_tx_dma_callback(void *arg)
@@ -226,19 +263,11 @@ static void dspi_rx_dma_callback(void *arg)
{
struct fsl_dspi *dspi = arg;
struct fsl_dspi_dma *dma = dspi->dma;
- int rx_word;
int i;
- u16 d;
-
- rx_word = is_double_byte_mode(dspi);
- if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
- for (i = 0; i < dma->curr_xfer_len; i++) {
- d = dspi->dma->rx_dma_buf[i];
- rx_word ? (*(u16 *)dspi->rx = d) :
- (*(u8 *)dspi->rx = d);
- dspi->rx += rx_word + 1;
- }
+ if (dspi->rx) {
+ for (i = 0; i < dma->curr_xfer_len; i++)
+ dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
}
complete(&dma->cmd_rx_complete);
@@ -249,16 +278,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
struct fsl_dspi_dma *dma = dspi->dma;
struct device *dev = &dspi->pdev->dev;
int time_left;
- int tx_word;
int i;
- tx_word = is_double_byte_mode(dspi);
-
- for (i = 0; i < dma->curr_xfer_len; i++) {
- dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word);
- if ((dspi->cs_change) && (!dspi->len))
- dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT;
- }
+ for (i = 0; i < dma->curr_xfer_len; i++)
+ dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
dma->tx_dma_phys,
@@ -327,18 +350,17 @@ static int dspi_dma_xfer(struct fsl_dspi *dspi)
{
struct fsl_dspi_dma *dma = dspi->dma;
struct device *dev = &dspi->pdev->dev;
+ struct spi_message *message = dspi->cur_msg;
int curr_remaining_bytes;
int bytes_per_buffer;
- int word = 1;
int ret = 0;
- if (is_double_byte_mode(dspi))
- word = 2;
curr_remaining_bytes = dspi->len;
bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
while (curr_remaining_bytes) {
/* Check if current transfer fits the DMA buffer */
- dma->curr_xfer_len = curr_remaining_bytes / word;
+ dma->curr_xfer_len = curr_remaining_bytes
+ / dspi->bytes_per_word;
if (dma->curr_xfer_len > bytes_per_buffer)
dma->curr_xfer_len = bytes_per_buffer;
@@ -348,7 +370,10 @@ static int dspi_dma_xfer(struct fsl_dspi *dspi)
goto exit;
} else {
- curr_remaining_bytes -= dma->curr_xfer_len * word;
+ const int len =
+ dma->curr_xfer_len * dspi->bytes_per_word;
+ curr_remaining_bytes -= len;
+ message->actual_length += len;
if (curr_remaining_bytes < 0)
curr_remaining_bytes = 0;
}
@@ -534,125 +559,91 @@ static void ns_delay_scale(char *psc, char *sc, int delay_ns,
}
}
-static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word)
+static void fifo_write(struct fsl_dspi *dspi)
{
- u16 d16;
-
- if (!(dspi->dataflags & TRAN_STATE_TX_VOID))
- d16 = tx_word ? *(u16 *)dspi->tx : *(u8 *)dspi->tx;
- else
- d16 = dspi->void_write_data;
-
- dspi->tx += tx_word + 1;
- dspi->len -= tx_word + 1;
-
- return SPI_PUSHR_TXDATA(d16) |
- SPI_PUSHR_PCS(dspi->cs) |
- SPI_PUSHR_CTAS(0) |
- SPI_PUSHR_CONT;
+ regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
}
-static void dspi_data_from_popr(struct fsl_dspi *dspi, int rx_word)
+static void cmd_fifo_write(struct fsl_dspi *dspi)
{
- u16 d;
- unsigned int val;
-
- regmap_read(dspi->regmap, SPI_POPR, &val);
- d = SPI_POPR_RXDATA(val);
+ u16 cmd = dspi->tx_cmd;
- if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
- rx_word ? (*(u16 *)dspi->rx = d) : (*(u8 *)dspi->rx = d);
-
- dspi->rx += rx_word + 1;
+ if (dspi->len > 0)
+ cmd |= SPI_PUSHR_CMD_CONT;
+ regmap_write(dspi->regmap_pushr, PUSHR_CMD, cmd);
}
-static int dspi_eoq_write(struct fsl_dspi *dspi)
+static void tx_fifo_write(struct fsl_dspi *dspi, u16 txdata)
{
- int tx_count = 0;
- int tx_word;
- u32 dspi_pushr = 0;
+ regmap_write(dspi->regmap_pushr, PUSHR_TX, txdata);
+}
- tx_word = is_double_byte_mode(dspi);
+static void dspi_tcfq_write(struct fsl_dspi *dspi)
+{
+ /* Clear transfer count */
+ dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
- while (dspi->len && (tx_count < DSPI_FIFO_SIZE)) {
- /* If we are in word mode, only have a single byte to transfer
- * switch to byte mode temporarily. Will switch back at the
- * end of the transfer.
+ if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
+ /* Write two TX FIFO entries first, and then the corresponding
+ * CMD FIFO entry.
*/
- if (tx_word && (dspi->len == 1)) {
- dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
- regmap_update_bits(dspi->regmap, SPI_CTAR(0),
- SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
- tx_word = 0;
- }
-
- dspi_pushr = dspi_data_to_pushr(dspi, tx_word);
-
- if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
- /* last transfer in the transfer */
- dspi_pushr |= SPI_PUSHR_EOQ;
- if ((dspi->cs_change) && (!dspi->len))
- dspi_pushr &= ~SPI_PUSHR_CONT;
- } else if (tx_word && (dspi->len == 1))
- dspi_pushr |= SPI_PUSHR_EOQ;
+ u32 data = dspi_pop_tx(dspi);
- regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
-
- tx_count++;
+ if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE(1)) {
+ /* LSB */
+ tx_fifo_write(dspi, data & 0xFFFF);
+ tx_fifo_write(dspi, data >> 16);
+ } else {
+ /* MSB */
+ tx_fifo_write(dspi, data >> 16);
+ tx_fifo_write(dspi, data & 0xFFFF);
+ }
+ cmd_fifo_write(dspi);
+ } else {
+ /* Write one entry to both TX FIFO and CMD FIFO
+ * simultaneously.
+ */
+ fifo_write(dspi);
}
-
- return tx_count * (tx_word + 1);
}
-static int dspi_eoq_read(struct fsl_dspi *dspi)
+static u32 fifo_read(struct fsl_dspi *dspi)
{
- int rx_count = 0;
- int rx_word = is_double_byte_mode(dspi);
-
- while ((dspi->rx < dspi->rx_end)
- && (rx_count < DSPI_FIFO_SIZE)) {
- if (rx_word && (dspi->rx_end - dspi->rx) == 1)
- rx_word = 0;
+ u32 rxdata = 0;
- dspi_data_from_popr(dspi, rx_word);
- rx_count++;
- }
-
- return rx_count;
+ regmap_read(dspi->regmap, SPI_POPR, &rxdata);
+ return rxdata;
}
-static int dspi_tcfq_write(struct fsl_dspi *dspi)
+static void dspi_tcfq_read(struct fsl_dspi *dspi)
{
- int tx_word;
- u32 dspi_pushr = 0;
-
- tx_word = is_double_byte_mode(dspi);
+ dspi_push_rx(dspi, fifo_read(dspi));
+}
- if (tx_word && (dspi->len == 1)) {
- dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
- regmap_update_bits(dspi->regmap, SPI_CTAR(0),
- SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
- tx_word = 0;
+static void dspi_eoq_write(struct fsl_dspi *dspi)
+{
+ int fifo_size = DSPI_FIFO_SIZE;
+
+ /* Fill TX FIFO with as many transfers as possible */
+ while (dspi->len && fifo_size--) {
+ /* Request EOQF for last transfer in FIFO */
+ if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
+ dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
+ /* Clear transfer count for first transfer in FIFO */
+ if (fifo_size == (DSPI_FIFO_SIZE - 1))
+ dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
+ /* Write combined TX FIFO and CMD FIFO entry */
+ fifo_write(dspi);
}
-
- dspi_pushr = dspi_data_to_pushr(dspi, tx_word);
-
- if ((dspi->cs_change) && (!dspi->len))
- dspi_pushr &= ~SPI_PUSHR_CONT;
-
- regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
-
- return tx_word + 1;
}
-static void dspi_tcfq_read(struct fsl_dspi *dspi)
+static void dspi_eoq_read(struct fsl_dspi *dspi)
{
- int rx_word = is_double_byte_mode(dspi);
-
- if (rx_word && (dspi->rx_end - dspi->rx) == 1)
- rx_word = 0;
+ int fifo_size = DSPI_FIFO_SIZE;
- dspi_data_from_popr(dspi, rx_word);
+ /* Read one FIFO entry at and push to rx buffer */
+ while ((dspi->rx < dspi->rx_end) && fifo_size--)
+ dspi_push_rx(dspi, fifo_read(dspi));
}
static int dspi_transfer_one_message(struct spi_master *master,
@@ -663,10 +654,6 @@ static int dspi_transfer_one_message(struct spi_master *master,
struct spi_transfer *transfer;
int status = 0;
enum dspi_trans_mode trans_mode;
- u32 spi_tcr;
-
- regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
- dspi->spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
message->actual_length = 0;
@@ -674,32 +661,51 @@ static int dspi_transfer_one_message(struct spi_master *master,
dspi->cur_transfer = transfer;
dspi->cur_msg = message;
dspi->cur_chip = spi_get_ctldata(spi);
- dspi->cs = spi->chip_select;
- dspi->cs_change = 0;
+ /* Prepare command word for CMD FIFO */
+ dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
+ SPI_PUSHR_CMD_PCS(spi->chip_select);
if (list_is_last(&dspi->cur_transfer->transfer_list,
- &dspi->cur_msg->transfers) || transfer->cs_change)
- dspi->cs_change = 1;
+ &dspi->cur_msg->transfers)) {
+ /* Leave PCS activated after last transfer when
+ * cs_change is set.
+ */
+ if (transfer->cs_change)
+ dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
+ } else {
+ /* Keep PCS active between transfers in same message
+ * when cs_change is not set, and de-activate PCS
+ * between transfers in the same message when
+ * cs_change is set.
+ */
+ if (!transfer->cs_change)
+ dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
+ }
+
dspi->void_write_data = dspi->cur_chip->void_write_data;
- dspi->dataflags = 0;
- dspi->tx = (void *)transfer->tx_buf;
- dspi->tx_end = dspi->tx + transfer->len;
+ dspi->tx = transfer->tx_buf;
dspi->rx = transfer->rx_buf;
dspi->rx_end = dspi->rx + transfer->len;
dspi->len = transfer->len;
+ /* Validated transfer specific frame size (defaults applied) */
+ dspi->bits_per_word = transfer->bits_per_word;
+ if (transfer->bits_per_word <= 8)
+ dspi->bytes_per_word = 1;
+ else if (transfer->bits_per_word <= 16)
+ dspi->bytes_per_word = 2;
+ else
+ dspi->bytes_per_word = 4;
- if (!dspi->rx)
- dspi->dataflags |= TRAN_STATE_RX_VOID;
-
- if (!dspi->tx)
- dspi->dataflags |= TRAN_STATE_TX_VOID;
-
- regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
regmap_update_bits(dspi->regmap, SPI_MCR,
- SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
- SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
regmap_write(dspi->regmap, SPI_CTAR(0),
- dspi->cur_chip->ctar_val);
+ dspi->cur_chip->ctar_val |
+ SPI_FRAME_BITS(transfer->bits_per_word));
+ if (dspi->devtype_data->xspi_mode)
+ regmap_write(dspi->regmap, SPI_CTARE(0),
+ SPI_FRAME_EBITS(transfer->bits_per_word)
+ | SPI_CTARE_DTCP(1));
trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) {
@@ -750,16 +756,9 @@ static int dspi_setup(struct spi_device *spi)
struct fsl_dspi_platform_data *pdata;
u32 cs_sck_delay = 0, sck_cs_delay = 0;
unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
- unsigned char pasc = 0, asc = 0, fmsz = 0;
+ unsigned char pasc = 0, asc = 0;
unsigned long clkrate;
- if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
- fmsz = spi->bits_per_word - 1;
- } else {
- pr_err("Invalid wordsize\n");
- return -ENODEV;
- }
-
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (chip == NULL) {
@@ -781,9 +780,6 @@ static int dspi_setup(struct spi_device *spi)
sck_cs_delay = pdata->sck_cs_delay;
}
- chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
- SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
-
chip->void_write_data = 0;
clkrate = clk_get_rate(dspi->clk);
@@ -795,8 +791,7 @@ static int dspi_setup(struct spi_device *spi)
/* Set After SCK delay scale values */
ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
- chip->ctar_val = SPI_CTAR_FMSZ(fmsz)
- | SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
+ chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
| SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
| SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
| SPI_CTAR_PCSSCK(pcssck)
@@ -827,36 +822,20 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
struct spi_message *msg = dspi->cur_msg;
enum dspi_trans_mode trans_mode;
u32 spi_sr, spi_tcr;
- u32 spi_tcnt, tcnt_diff;
- int tx_word;
+ u16 spi_tcnt;
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr);
if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
- tx_word = is_double_byte_mode(dspi);
-
+ /* Get transfer counter (in number of SPI transfers). It was
+ * reset to 0 when transfer(s) were started.
+ */
regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
- /*
- * The width of SPI Transfer Counter in SPI_TCR is 16bits,
- * so the max couner is 65535. When the counter reach 65535,
- * it will wrap around, counter reset to zero.
- * spi_tcnt my be less than dspi->spi_tcnt, it means the
- * counter already wrapped around.
- * SPI Transfer Counter is a counter of transmitted frames.
- * The size of frame maybe two bytes.
- */
- tcnt_diff = ((spi_tcnt + SPI_TCR_TCNT_MAX) - dspi->spi_tcnt)
- % SPI_TCR_TCNT_MAX;
- tcnt_diff *= (tx_word + 1);
- if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
- tcnt_diff--;
-
- msg->actual_length += tcnt_diff;
-
- dspi->spi_tcnt = spi_tcnt;
+ /* Update total number of bytes that were transferred */
+ msg->actual_length += spi_tcnt * dspi->bytes_per_word;
trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) {
@@ -873,14 +852,6 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
}
if (!dspi->len) {
- if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) {
- regmap_update_bits(dspi->regmap,
- SPI_CTAR(0),
- SPI_FRAME_BITS_MASK,
- SPI_FRAME_BITS(16));
- dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM;
- }
-
dspi->waitflags = 1;
wake_up_interruptible(&dspi->waitq);
} else {
@@ -943,16 +914,62 @@ static int dspi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+static const struct regmap_range dspi_volatile_ranges[] = {
+ regmap_reg_range(SPI_MCR, SPI_TCR),
+ regmap_reg_range(SPI_SR, SPI_SR),
+ regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
+};
+
+static const struct regmap_access_table dspi_volatile_table = {
+ .yes_ranges = dspi_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
+};
+
static const struct regmap_config dspi_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x88,
+ .volatile_table = &dspi_volatile_table,
+};
+
+static const struct regmap_range dspi_xspi_volatile_ranges[] = {
+ regmap_reg_range(SPI_MCR, SPI_TCR),
+ regmap_reg_range(SPI_SR, SPI_SR),
+ regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
+ regmap_reg_range(SPI_SREX, SPI_SREX),
+};
+
+static const struct regmap_access_table dspi_xspi_volatile_table = {
+ .yes_ranges = dspi_xspi_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
+};
+
+static const struct regmap_config dspi_xspi_regmap_config[] = {
+ {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x13c,
+ .volatile_table = &dspi_xspi_volatile_table,
+ },
+ {
+ .name = "pushr",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .max_register = 0x2,
+ },
};
static void dspi_init(struct fsl_dspi *dspi)
{
+ regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS |
+ (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0));
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
+ if (dspi->devtype_data->xspi_mode)
+ regmap_write(dspi->regmap, SPI_CTARE(0),
+ SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1));
}
static int dspi_probe(struct platform_device *pdev)
@@ -961,6 +978,7 @@ static int dspi_probe(struct platform_device *pdev)
struct spi_master *master;
struct fsl_dspi *dspi;
struct resource *res;
+ const struct regmap_config *regmap_config;
void __iomem *base;
struct fsl_dspi_platform_data *pdata;
int ret = 0, cs_num, bus_num;
@@ -980,8 +998,6 @@ static int dspi_probe(struct platform_device *pdev)
master->cleanup = dspi_cleanup;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
- master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
- SPI_BPW_MASK(16);
pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
@@ -1013,6 +1029,11 @@ static int dspi_probe(struct platform_device *pdev)
}
}
+ if (dspi->devtype_data->xspi_mode)
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ else
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) {
@@ -1020,8 +1041,11 @@ static int dspi_probe(struct platform_device *pdev)
goto out_master_put;
}
- dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
- &dspi_regmap_config);
+ if (dspi->devtype_data->xspi_mode)
+ regmap_config = &dspi_xspi_regmap_config[0];
+ else
+ regmap_config = &dspi_regmap_config;
+ dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
if (IS_ERR(dspi->regmap)) {
dev_err(&pdev->dev, "failed to init regmap: %ld\n",
PTR_ERR(dspi->regmap));
@@ -1029,30 +1053,43 @@ static int dspi_probe(struct platform_device *pdev)
goto out_master_put;
}
+ if (dspi->devtype_data->xspi_mode) {
+ dspi->regmap_pushr = devm_regmap_init_mmio(
+ &pdev->dev, base + SPI_PUSHR,
+ &dspi_xspi_regmap_config[1]);
+ if (IS_ERR(dspi->regmap_pushr)) {
+ dev_err(&pdev->dev,
+ "failed to init pushr regmap: %ld\n",
+ PTR_ERR(dspi->regmap_pushr));
+ ret = PTR_ERR(dspi->regmap_pushr);
+ goto out_master_put;
+ }
+ }
+
+ dspi->clk = devm_clk_get(&pdev->dev, "dspi");
+ if (IS_ERR(dspi->clk)) {
+ ret = PTR_ERR(dspi->clk);
+ dev_err(&pdev->dev, "unable to get clock\n");
+ goto out_master_put;
+ }
+ ret = clk_prepare_enable(dspi->clk);
+ if (ret)
+ goto out_master_put;
+
dspi_init(dspi);
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq < 0) {
dev_err(&pdev->dev, "can't get platform irq\n");
ret = dspi->irq;
- goto out_master_put;
+ goto out_clk_put;
}
ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
pdev->name, dspi);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
- goto out_master_put;
- }
-
- dspi->clk = devm_clk_get(&pdev->dev, "dspi");
- if (IS_ERR(dspi->clk)) {
- ret = PTR_ERR(dspi->clk);
- dev_err(&pdev->dev, "unable to get clock\n");
- goto out_master_put;
+ goto out_clk_put;
}
- ret = clk_prepare_enable(dspi->clk);
- if (ret)
- goto out_master_put;
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
ret = dspi_request_dma(dspi, res->start);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 1d332e23f6ed..1e8ff6256079 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -547,8 +547,11 @@ static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
dev_err(espi->dev,
"Transfer done but SPIE_DON isn't set!\n");
- if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE)
+ if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE) {
dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n");
+ dev_err(espi->dev, "SPIE_RXCNT = %d, SPIE_TXCNT = %d\n",
+ SPIE_RXCNT(events), SPIE_TXCNT(events));
+ }
complete(&espi->done);
}
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 6ae92d4dca19..0626e6e3ea0c 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -121,7 +121,10 @@ static inline int getmiso(const struct spi_device *spi)
{
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
- return !!gpiod_get_value_cansleep(spi_gpio->miso);
+ if (spi->mode & SPI_3WIRE)
+ return !!gpiod_get_value_cansleep(spi_gpio->mosi);
+ else
+ return !!gpiod_get_value_cansleep(spi_gpio->miso);
}
/*
@@ -149,27 +152,27 @@ static inline int getmiso(const struct spi_device *spi)
*/
static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
/*
@@ -183,30 +186,30 @@ static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi,
*/
static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- unsigned flags = spi->master->flags;
+ flags = spi->master->flags;
return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- unsigned flags = spi->master->flags;
+ flags = spi->master->flags;
return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- unsigned flags = spi->master->flags;
+ flags = spi->master->flags;
return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- unsigned flags = spi->master->flags;
+ flags = spi->master->flags;
return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
@@ -250,6 +253,16 @@ static int spi_gpio_setup(struct spi_device *spi)
return status;
}
+static int spi_gpio_set_direction(struct spi_device *spi, bool output)
+{
+ struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+
+ if (output)
+ return gpiod_direction_output(spi_gpio->mosi, 1);
+ else
+ return gpiod_direction_input(spi_gpio->mosi);
+}
+
static void spi_gpio_cleanup(struct spi_device *spi)
{
spi_bitbang_cleanup(spi);
@@ -395,6 +408,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
return status;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL;
master->flags = master_flags;
master->bus_num = pdev->id;
/* The master needs to think there is a chipselect even if not connected */
@@ -407,6 +421,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
spi_gpio->bitbang.master = master;
spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
+ spi_gpio->bitbang.set_line_direction = spi_gpio_set_direction;
if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 7a37090dabbe..e6eb979f1b8a 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -419,6 +419,9 @@ static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
u32 val;
val = spfi_readl(spfi, SPFI_PORT_STATE);
+ val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK <<
+ SPFI_PORT_STATE_DEV_SEL_SHIFT);
+ val |= msg->spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT;
if (msg->spi->mode & SPI_CPHA)
val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
else
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index d3b21faf6b1f..08dd3a31a3e5 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -94,8 +94,7 @@ struct spi_imx_data {
void *rx_buf;
const void *tx_buf;
unsigned int txfifo; /* number of words pushed in tx FIFO */
- unsigned int dynamic_burst, read_u32;
- unsigned int word_mask;
+ unsigned int dynamic_burst;
/* Slave mode */
bool slave_mode;
@@ -140,6 +139,8 @@ static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
*(type *)spi_imx->rx_buf = val; \
spi_imx->rx_buf += sizeof(type); \
} \
+ \
+ spi_imx->remainder -= sizeof(type); \
}
#define MXC_SPI_BUF_TX(type) \
@@ -203,7 +204,12 @@ out:
static int spi_imx_bytes_per_word(const int bits_per_word)
{
- return DIV_ROUND_UP(bits_per_word, BITS_PER_BYTE);
+ if (bits_per_word <= 8)
+ return 1;
+ else if (bits_per_word <= 16)
+ return 2;
+ else
+ return 4;
}
static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
@@ -220,17 +226,11 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
- if (bytes_per_word != 1 && bytes_per_word != 2 && bytes_per_word != 4)
- return false;
-
for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
if (!(transfer->len % (i * bytes_per_word)))
break;
}
- if (i == 0)
- return false;
-
spi_imx->wml = i;
spi_imx->dynamic_burst = 0;
@@ -291,26 +291,39 @@ static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
else if (bytes_per_word == 2)
val = (val << 16) | (val >> 16);
#endif
- val &= spi_imx->word_mask;
*(u32 *)spi_imx->rx_buf = val;
spi_imx->rx_buf += sizeof(u32);
}
+
+ spi_imx->remainder -= sizeof(u32);
}
static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
{
- unsigned int bytes_per_word;
+ int unaligned;
+ u32 val;
- bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
- if (spi_imx->read_u32) {
+ unaligned = spi_imx->remainder % 4;
+
+ if (!unaligned) {
spi_imx_buf_rx_swap_u32(spi_imx);
return;
}
- if (bytes_per_word == 1)
- spi_imx_buf_rx_u8(spi_imx);
- else if (bytes_per_word == 2)
+ if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
spi_imx_buf_rx_u16(spi_imx);
+ return;
+ }
+
+ val = readl(spi_imx->base + MXC_CSPIRXDATA);
+
+ while (unaligned--) {
+ if (spi_imx->rx_buf) {
+ *(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
+ spi_imx->rx_buf++;
+ }
+ spi_imx->remainder--;
+ }
}
static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
@@ -322,7 +335,6 @@ static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
if (spi_imx->tx_buf) {
val = *(u32 *)spi_imx->tx_buf;
- val &= spi_imx->word_mask;
spi_imx->tx_buf += sizeof(u32);
}
@@ -340,40 +352,30 @@ static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
{
- u32 ctrl, val;
- unsigned int bytes_per_word;
+ int unaligned;
+ u32 val = 0;
- if (spi_imx->count == spi_imx->remainder) {
- ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
- ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
- if (spi_imx->count > MX51_ECSPI_CTRL_MAX_BURST) {
- spi_imx->remainder = spi_imx->count %
- MX51_ECSPI_CTRL_MAX_BURST;
- val = MX51_ECSPI_CTRL_MAX_BURST * 8 - 1;
- } else if (spi_imx->count >= sizeof(u32)) {
- spi_imx->remainder = spi_imx->count % sizeof(u32);
- val = (spi_imx->count - spi_imx->remainder) * 8 - 1;
- } else {
- spi_imx->remainder = 0;
- val = spi_imx->bits_per_word - 1;
- spi_imx->read_u32 = 0;
- }
+ unaligned = spi_imx->count % 4;
- ctrl |= (val << MX51_ECSPI_CTRL_BL_OFFSET);
- writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+ if (!unaligned) {
+ spi_imx_buf_tx_swap_u32(spi_imx);
+ return;
}
- if (spi_imx->count >= sizeof(u32)) {
- spi_imx_buf_tx_swap_u32(spi_imx);
+ if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
+ spi_imx_buf_tx_u16(spi_imx);
return;
}
- bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
+ while (unaligned--) {
+ if (spi_imx->tx_buf) {
+ val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
+ spi_imx->tx_buf++;
+ }
+ spi_imx->count--;
+ }
- if (bytes_per_word == 1)
- spi_imx_buf_tx_u8(spi_imx);
- else if (bytes_per_word == 2)
- spi_imx_buf_tx_u16(spi_imx);
+ writel(val, spi_imx->base + MXC_CSPITXDATA);
}
static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
@@ -392,6 +394,8 @@ static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
spi_imx->rx_buf += n_bytes;
spi_imx->slave_burst -= n_bytes;
}
+
+ spi_imx->remainder -= sizeof(u32);
}
static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
@@ -1001,12 +1005,52 @@ static void spi_imx_chipselect(struct spi_device *spi, int is_active)
gpio_set_value(spi->cs_gpio, dev_is_lowactive ^ active);
}
+static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
+{
+ u32 ctrl;
+
+ ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
+ ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+}
+
static void spi_imx_push(struct spi_imx_data *spi_imx)
{
+ unsigned int burst_len, fifo_words;
+
+ if (spi_imx->dynamic_burst)
+ fifo_words = 4;
+ else
+ fifo_words = spi_imx_bytes_per_word(spi_imx->bits_per_word);
+ /*
+ * Reload the FIFO when the remaining bytes to be transferred in the
+ * current burst is 0. This only applies when bits_per_word is a
+ * multiple of 8.
+ */
+ if (!spi_imx->remainder) {
+ if (spi_imx->dynamic_burst) {
+
+ /* We need to deal unaligned data first */
+ burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
+
+ if (!burst_len)
+ burst_len = MX51_ECSPI_CTRL_MAX_BURST;
+
+ spi_imx_set_burst_len(spi_imx, burst_len * 8);
+
+ spi_imx->remainder = burst_len;
+ } else {
+ spi_imx->remainder = fifo_words;
+ }
+ }
+
while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
if (!spi_imx->count)
break;
- if (spi_imx->txfifo && (spi_imx->count == spi_imx->remainder))
+ if (spi_imx->dynamic_burst &&
+ spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder,
+ fifo_words))
break;
spi_imx->tx(spi_imx);
spi_imx->txfifo++;
@@ -1102,27 +1146,20 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->bits_per_word = t->bits_per_word;
spi_imx->speed_hz = t->speed_hz;
- /* Initialize the functions for transfer */
- if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode) {
- u32 mask;
-
- spi_imx->dynamic_burst = 0;
- spi_imx->remainder = 0;
- spi_imx->read_u32 = 1;
+ /*
+ * Initialize the functions for transfer. To transfer non byte-aligned
+ * words, we have to use multiple word-size bursts, we can't use
+ * dynamic_burst in that case.
+ */
+ if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
+ (spi_imx->bits_per_word == 8 ||
+ spi_imx->bits_per_word == 16 ||
+ spi_imx->bits_per_word == 32)) {
- mask = (1 << spi_imx->bits_per_word) - 1;
spi_imx->rx = spi_imx_buf_rx_swap;
spi_imx->tx = spi_imx_buf_tx_swap;
spi_imx->dynamic_burst = 1;
- spi_imx->remainder = t->len;
-
- if (spi_imx->bits_per_word <= 8)
- spi_imx->word_mask = mask << 24 | mask << 16
- | mask << 8 | mask;
- else if (spi_imx->bits_per_word <= 16)
- spi_imx->word_mask = mask << 16 | mask;
- else
- spi_imx->word_mask = mask;
+
} else {
if (spi_imx->bits_per_word <= 8) {
spi_imx->rx = spi_imx_buf_rx_u8;
@@ -1134,6 +1171,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->rx = spi_imx_buf_rx_u32;
spi_imx->tx = spi_imx_buf_tx_u32;
}
+ spi_imx->dynamic_burst = 0;
}
if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
@@ -1317,6 +1355,7 @@ static int spi_imx_pio_transfer(struct spi_device *spi,
spi_imx->rx_buf = transfer->rx_buf;
spi_imx->count = transfer->len;
spi_imx->txfifo = 0;
+ spi_imx->remainder = 0;
reinit_completion(&spi_imx->xfer_done);
@@ -1354,6 +1393,7 @@ static int spi_imx_pio_transfer_slave(struct spi_device *spi,
spi_imx->rx_buf = transfer->rx_buf;
spi_imx->count = transfer->len;
spi_imx->txfifo = 0;
+ spi_imx->remainder = 0;
reinit_completion(&spi_imx->xfer_done);
spi_imx->slave_aborted = false;
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index 61ee0f4269ae..4549efd792da 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -188,9 +188,10 @@ static void lm70_chipselect(struct spi_device *spi, int value)
/*
* Our actual bitbanger routine.
*/
-static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits)
+static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static void spi_lm70llp_attach(struct parport *p)
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 990770dfa5cf..e43842c7a31a 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -311,6 +311,24 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
EXPORT_SYMBOL_GPL(spi_mem_exec_op);
/**
+ * spi_mem_get_name() - Return the SPI mem device name to be used by the
+ * upper layer if necessary
+ * @mem: the SPI memory
+ *
+ * This function allows SPI mem users to retrieve the SPI mem device name.
+ * It is useful if the upper layer needs to expose a custom name for
+ * compatibility reasons.
+ *
+ * Return: a string containing the name of the memory device to be used
+ * by the SPI mem user
+ */
+const char *spi_mem_get_name(struct spi_mem *mem)
+{
+ return mem->name;
+}
+EXPORT_SYMBOL_GPL(spi_mem_get_name);
+
+/**
* spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
* match controller limitations
* @mem: the SPI memory
@@ -344,6 +362,7 @@ static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
static int spi_mem_probe(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+ struct spi_controller *ctlr = spi->controller;
struct spi_mem *mem;
mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
@@ -351,6 +370,15 @@ static int spi_mem_probe(struct spi_device *spi)
return -ENOMEM;
mem->spi = spi;
+
+ if (ctlr->mem_ops && ctlr->mem_ops->get_name)
+ mem->name = ctlr->mem_ops->get_name(mem);
+ else
+ mem->name = dev_name(&spi->dev);
+
+ if (IS_ERR_OR_NULL(mem->name))
+ return PTR_ERR(mem->name);
+
spi_set_drvdata(spi, mem);
return memdrv->probe(mem);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 6c628a54e946..508c61c669e7 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -398,11 +398,9 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
- unsigned int count;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- count = xfer->len;
if (mcspi_dma->dma_tx) {
struct dma_async_tx_descriptor *tx;
@@ -582,7 +580,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi_dma *mcspi_dma;
unsigned int count;
- u32 l;
u8 *rx;
const u8 *tx;
struct dma_slave_config cfg;
@@ -595,8 +592,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- l = mcspi_cached_chconf0(spi);
-
if (cs->word_len <= 8) {
width = DMA_SLAVE_BUSWIDTH_1_BYTE;
@@ -676,7 +671,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
static unsigned
omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
{
- struct omap2_mcspi *mcspi;
struct omap2_mcspi_cs *cs = spi->controller_state;
unsigned int count, c;
u32 l;
@@ -686,7 +680,6 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
void __iomem *chstat_reg;
int word_len;
- mcspi = spi_master_get_devdata(spi->master);
count = xfer->len;
c = count;
word_len = cs->word_len;
@@ -883,13 +876,11 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
{
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
- struct spi_master *spi_cntrl;
u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
u8 word_len = spi->bits_per_word;
u32 speed_hz = spi->max_speed_hz;
mcspi = spi_master_get_devdata(spi->master);
- spi_cntrl = mcspi->master;
if (t != NULL && t->bits_per_word)
word_len = t->bits_per_word;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index d01a6adc726e..47ef6b1a2e76 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/clk.h>
#include <linux/sizes.h>
#include <linux/gpio.h>
@@ -681,9 +682,9 @@ static int orion_spi_probe(struct platform_device *pdev)
goto out_rel_axi_clk;
}
- /* Scan all SPI devices of this controller for direct mapped devices */
for_each_available_child_of_node(pdev->dev.of_node, np) {
u32 cs;
+ int cs_gpio;
/* Get chip-select number from the "reg" property */
status = of_property_read_u32(np, "reg", &cs);
@@ -695,6 +696,44 @@ static int orion_spi_probe(struct platform_device *pdev)
}
/*
+ * Initialize the CS GPIO:
+ * - properly request the actual GPIO signal
+ * - de-assert the logical signal so that all GPIO CS lines
+ * are inactive when probing for slaves
+ * - find an unused physical CS which will be driven for any
+ * slave which uses a CS GPIO
+ */
+ cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", cs);
+ if (cs_gpio > 0) {
+ char *gpio_name;
+ int cs_flags;
+
+ if (spi->unused_hw_gpio == -1) {
+ dev_info(&pdev->dev,
+ "Selected unused HW CS#%d for any GPIO CSes\n",
+ cs);
+ spi->unused_hw_gpio = cs;
+ }
+
+ gpio_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%s-CS%d", dev_name(&pdev->dev), cs);
+ if (!gpio_name) {
+ status = -ENOMEM;
+ goto out_rel_axi_clk;
+ }
+
+ cs_flags = of_property_read_bool(np, "spi-cs-high") ?
+ GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH;
+ status = devm_gpio_request_one(&pdev->dev, cs_gpio,
+ cs_flags, gpio_name);
+ if (status) {
+ dev_err(&pdev->dev,
+ "Can't request GPIO for CS %d\n", cs);
+ goto out_rel_axi_clk;
+ }
+ }
+
+ /*
* Check if an address is configured for this SPI device. If
* not, the MBus mapping via the 'ranges' property in the 'soc'
* node is not configured and this device should not use the
@@ -740,44 +779,8 @@ static int orion_spi_probe(struct platform_device *pdev)
if (status < 0)
goto out_rel_pm;
- if (master->cs_gpios) {
- int i;
- for (i = 0; i < master->num_chipselect; ++i) {
- char *gpio_name;
-
- if (!gpio_is_valid(master->cs_gpios[i])) {
- continue;
- }
-
- gpio_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "%s-CS%d", dev_name(&pdev->dev), i);
- if (!gpio_name) {
- status = -ENOMEM;
- goto out_rel_master;
- }
-
- status = devm_gpio_request(&pdev->dev,
- master->cs_gpios[i], gpio_name);
- if (status) {
- dev_err(&pdev->dev,
- "Can't request GPIO for CS %d\n",
- master->cs_gpios[i]);
- goto out_rel_master;
- }
- if (spi->unused_hw_gpio == -1) {
- dev_info(&pdev->dev,
- "Selected unused HW CS#%d for any GPIO CSes\n",
- i);
- spi->unused_hw_gpio = i;
- }
- }
- }
-
-
return status;
-out_rel_master:
- spi_unregister_master(master);
out_rel_pm:
pm_runtime_disable(&pdev->dev);
out_rel_axi_clk:
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 0b2d60d30f69..14f4ea59caff 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1391,6 +1391,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP },
+ /* ICL-LP */
+ { PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0e74cbf9929d..539d6d1a277a 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -49,6 +49,7 @@ struct sh_msiof_spi_priv {
struct platform_device *pdev;
struct sh_msiof_spi_info *info;
struct completion done;
+ struct completion done_txdma;
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
unsigned int min_div_pow;
@@ -649,19 +650,21 @@ static int sh_msiof_slave_abort(struct spi_master *master)
p->slave_aborted = true;
complete(&p->done);
+ complete(&p->done_txdma);
return 0;
}
-static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p)
+static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
+ struct completion *x)
{
if (spi_controller_is_slave(p->master)) {
- if (wait_for_completion_interruptible(&p->done) ||
+ if (wait_for_completion_interruptible(x) ||
p->slave_aborted) {
dev_dbg(&p->pdev->dev, "interrupted\n");
return -EINTR;
}
} else {
- if (!wait_for_completion_timeout(&p->done, HZ)) {
+ if (!wait_for_completion_timeout(x, HZ)) {
dev_err(&p->pdev->dev, "timeout\n");
return -ETIMEDOUT;
}
@@ -711,7 +714,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
}
/* wait for tx fifo to be emptied / rx fifo to be filled */
- ret = sh_msiof_wait_for_completion(p);
+ ret = sh_msiof_wait_for_completion(p, &p->done);
if (ret)
goto stop_reset;
@@ -740,10 +743,7 @@ stop_ier:
static void sh_msiof_dma_complete(void *arg)
{
- struct sh_msiof_spi_priv *p = arg;
-
- sh_msiof_write(p, IER, 0);
- complete(&p->done);
+ complete(arg);
}
static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
@@ -764,7 +764,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
return -EAGAIN;
desc_rx->callback = sh_msiof_dma_complete;
- desc_rx->callback_param = p;
+ desc_rx->callback_param = &p->done;
cookie = dmaengine_submit(desc_rx);
if (dma_submit_error(cookie))
return cookie;
@@ -782,13 +782,8 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
goto no_dma_tx;
}
- if (rx) {
- /* No callback */
- desc_tx->callback = NULL;
- } else {
- desc_tx->callback = sh_msiof_dma_complete;
- desc_tx->callback_param = p;
- }
+ desc_tx->callback = sh_msiof_dma_complete;
+ desc_tx->callback_param = &p->done_txdma;
cookie = dmaengine_submit(desc_tx);
if (dma_submit_error(cookie)) {
ret = cookie;
@@ -805,6 +800,8 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
sh_msiof_write(p, IER, ier_bits);
reinit_completion(&p->done);
+ if (tx)
+ reinit_completion(&p->done_txdma);
p->slave_aborted = false;
/* Now start DMA */
@@ -819,17 +816,24 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
goto stop_dma;
}
- /* wait for tx/rx DMA completion */
- ret = sh_msiof_wait_for_completion(p);
- if (ret)
- goto stop_reset;
+ if (tx) {
+ /* wait for tx DMA completion */
+ ret = sh_msiof_wait_for_completion(p, &p->done_txdma);
+ if (ret)
+ goto stop_reset;
+ }
- if (!rx) {
- reinit_completion(&p->done);
- sh_msiof_write(p, IER, IER_TEOFE);
+ if (rx) {
+ /* wait for rx DMA completion */
+ ret = sh_msiof_wait_for_completion(p, &p->done);
+ if (ret)
+ goto stop_reset;
+ sh_msiof_write(p, IER, 0);
+ } else {
/* wait for tx fifo to be emptied */
- ret = sh_msiof_wait_for_completion(p);
+ sh_msiof_write(p, IER, IER_TEOFE);
+ ret = sh_msiof_wait_for_completion(p, &p->done);
if (ret)
goto stop_reset;
}
@@ -1327,6 +1331,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
p->min_div_pow = chipdata->min_div_pow;
init_completion(&p->done);
+ init_completion(&p->done_txdma);
p->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index a9beeeed812c..393701cfca3c 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -80,27 +80,31 @@ static inline u32 getmiso(struct spi_device *dev)
#include "spi-bitbang-txrx.h"
static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
index 197d4b0d81af..df5960bddfe6 100644
--- a/drivers/spi/spi-sprd-adi.c
+++ b/drivers/spi/spi-sprd-adi.c
@@ -459,13 +459,13 @@ static int sprd_adi_probe(struct platform_device *pdev)
sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET;
sadi->ctlr = ctlr;
sadi->dev = &pdev->dev;
- ret = of_hwspin_lock_get_id(np, 0);
+ ret = of_hwspin_lock_get_id_byname(np, "adi");
if (ret < 0) {
dev_err(&pdev->dev, "can not get the hardware spinlock\n");
goto put_ctlr;
}
- sadi->hwlock = hwspin_lock_request_specific(ret);
+ sadi->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
if (!sadi->hwlock) {
ret = -ENXIO;
goto put_ctlr;
@@ -483,7 +483,7 @@ static int sprd_adi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret) {
dev_err(&pdev->dev, "failed to register SPI controller\n");
- goto free_hwlock;
+ goto put_ctlr;
}
sadi->restart_handler.notifier_call = sprd_adi_restart_handler;
@@ -491,13 +491,11 @@ static int sprd_adi_probe(struct platform_device *pdev)
ret = register_restart_handler(&sadi->restart_handler);
if (ret) {
dev_err(&pdev->dev, "can not register restart handler\n");
- goto free_hwlock;
+ goto put_ctlr;
}
return 0;
-free_hwlock:
- hwspin_lock_free(sadi->hwlock);
put_ctlr:
spi_controller_put(ctlr);
return ret;
@@ -509,7 +507,6 @@ static int sprd_adi_remove(struct platform_device *pdev)
struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
unregister_restart_handler(&sadi->restart_handler);
- hwspin_lock_free(sadi->hwlock);
return 0;
}
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
new file mode 100644
index 000000000000..5a6137fe172d
--- /dev/null
+++ b/drivers/spi/spi-uniphier.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0
+// spi-uniphier.c - Socionext UniPhier SPI controller driver
+// Copyright 2012 Panasonic Corporation
+// Copyright 2016-2018 Socionext Inc.
+
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#include <asm/unaligned.h>
+
+#define SSI_TIMEOUT_MS 2000
+#define SSI_MAX_CLK_DIVIDER 254
+#define SSI_MIN_CLK_DIVIDER 4
+
+struct uniphier_spi_priv {
+ void __iomem *base;
+ struct clk *clk;
+ struct spi_master *master;
+ struct completion xfer_done;
+
+ int error;
+ unsigned int tx_bytes;
+ unsigned int rx_bytes;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+
+ bool is_save_param;
+ u8 bits_per_word;
+ u16 mode;
+ u32 speed_hz;
+};
+
+#define SSI_CTL 0x00
+#define SSI_CTL_EN BIT(0)
+
+#define SSI_CKS 0x04
+#define SSI_CKS_CKRAT_MASK GENMASK(7, 0)
+#define SSI_CKS_CKPHS BIT(14)
+#define SSI_CKS_CKINIT BIT(13)
+#define SSI_CKS_CKDLY BIT(12)
+
+#define SSI_TXWDS 0x08
+#define SSI_TXWDS_WDLEN_MASK GENMASK(13, 8)
+#define SSI_TXWDS_TDTF_MASK GENMASK(7, 6)
+#define SSI_TXWDS_DTLEN_MASK GENMASK(5, 0)
+
+#define SSI_RXWDS 0x0c
+#define SSI_RXWDS_DTLEN_MASK GENMASK(5, 0)
+
+#define SSI_FPS 0x10
+#define SSI_FPS_FSPOL BIT(15)
+#define SSI_FPS_FSTRT BIT(14)
+
+#define SSI_SR 0x14
+#define SSI_SR_RNE BIT(0)
+
+#define SSI_IE 0x18
+#define SSI_IE_RCIE BIT(3)
+#define SSI_IE_RORIE BIT(0)
+
+#define SSI_IS 0x1c
+#define SSI_IS_RXRS BIT(9)
+#define SSI_IS_RCID BIT(3)
+#define SSI_IS_RORID BIT(0)
+
+#define SSI_IC 0x1c
+#define SSI_IC_TCIC BIT(4)
+#define SSI_IC_RCIC BIT(3)
+#define SSI_IC_RORIC BIT(0)
+
+#define SSI_FC 0x20
+#define SSI_FC_TXFFL BIT(12)
+#define SSI_FC_TXFTH_MASK GENMASK(11, 8)
+#define SSI_FC_RXFFL BIT(4)
+#define SSI_FC_RXFTH_MASK GENMASK(3, 0)
+
+#define SSI_TXDR 0x24
+#define SSI_RXDR 0x24
+
+#define SSI_FIFO_DEPTH 8U
+
+static inline unsigned int bytes_per_word(unsigned int bits)
+{
+ return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
+}
+
+static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = readl(priv->base + SSI_IE);
+ val |= mask;
+ writel(val, priv->base + SSI_IE);
+}
+
+static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = readl(priv->base + SSI_IE);
+ val &= ~mask;
+ writel(val, priv->base + SSI_IE);
+}
+
+static void uniphier_spi_set_mode(struct spi_device *spi)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val1, val2;
+
+ /*
+ * clock setting
+ * CKPHS capture timing. 0:rising edge, 1:falling edge
+ * CKINIT clock initial level. 0:low, 1:high
+ * CKDLY clock delay. 0:no delay, 1:delay depending on FSTRT
+ * (FSTRT=0: 1 clock, FSTRT=1: 0.5 clock)
+ *
+ * frame setting
+ * FSPOL frame signal porarity. 0: low, 1: high
+ * FSTRT start frame timing
+ * 0: rising edge of clock, 1: falling edge of clock
+ */
+ switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
+ case SPI_MODE_0:
+ /* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
+ val1 = SSI_CKS_CKPHS | SSI_CKS_CKDLY;
+ val2 = 0;
+ break;
+ case SPI_MODE_1:
+ /* CKPHS=0, CKINIT=0, CKDLY=0, FSTRT=1 */
+ val1 = 0;
+ val2 = SSI_FPS_FSTRT;
+ break;
+ case SPI_MODE_2:
+ /* CKPHS=0, CKINIT=1, CKDLY=1, FSTRT=1 */
+ val1 = SSI_CKS_CKINIT | SSI_CKS_CKDLY;
+ val2 = SSI_FPS_FSTRT;
+ break;
+ case SPI_MODE_3:
+ /* CKPHS=1, CKINIT=1, CKDLY=0, FSTRT=0 */
+ val1 = SSI_CKS_CKPHS | SSI_CKS_CKINIT;
+ val2 = 0;
+ break;
+ }
+
+ if (!(spi->mode & SPI_CS_HIGH))
+ val2 |= SSI_FPS_FSPOL;
+
+ writel(val1, priv->base + SSI_CKS);
+ writel(val2, priv->base + SSI_FPS);
+
+ val1 = 0;
+ if (spi->mode & SPI_LSB_FIRST)
+ val1 |= FIELD_PREP(SSI_TXWDS_TDTF_MASK, 1);
+ writel(val1, priv->base + SSI_TXWDS);
+ writel(val1, priv->base + SSI_RXWDS);
+}
+
+static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = readl(priv->base + SSI_TXWDS);
+ val &= ~(SSI_TXWDS_WDLEN_MASK | SSI_TXWDS_DTLEN_MASK);
+ val |= FIELD_PREP(SSI_TXWDS_WDLEN_MASK, size);
+ val |= FIELD_PREP(SSI_TXWDS_DTLEN_MASK, size);
+ writel(val, priv->base + SSI_TXWDS);
+
+ val = readl(priv->base + SSI_RXWDS);
+ val &= ~SSI_RXWDS_DTLEN_MASK;
+ val |= FIELD_PREP(SSI_RXWDS_DTLEN_MASK, size);
+ writel(val, priv->base + SSI_RXWDS);
+}
+
+static void uniphier_spi_set_baudrate(struct spi_device *spi,
+ unsigned int speed)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val, ckdiv;
+
+ /*
+ * the supported rates are even numbers from 4 to 254. (4,6,8...254)
+ * round up as we look for equal or less speed
+ */
+ ckdiv = DIV_ROUND_UP(clk_get_rate(priv->clk), speed);
+ ckdiv = round_up(ckdiv, 2);
+
+ val = readl(priv->base + SSI_CKS);
+ val &= ~SSI_CKS_CKRAT_MASK;
+ val |= ckdiv & SSI_CKS_CKRAT_MASK;
+ writel(val, priv->base + SSI_CKS);
+}
+
+static void uniphier_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ priv->error = 0;
+ priv->tx_buf = t->tx_buf;
+ priv->rx_buf = t->rx_buf;
+ priv->tx_bytes = priv->rx_bytes = t->len;
+
+ if (!priv->is_save_param || priv->mode != spi->mode) {
+ uniphier_spi_set_mode(spi);
+ priv->mode = spi->mode;
+ }
+
+ if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
+ uniphier_spi_set_transfer_size(spi, t->bits_per_word);
+ priv->bits_per_word = t->bits_per_word;
+ }
+
+ if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
+ uniphier_spi_set_baudrate(spi, t->speed_hz);
+ priv->speed_hz = t->speed_hz;
+ }
+
+ if (!priv->is_save_param)
+ priv->is_save_param = true;
+
+ /* reset FIFOs */
+ val = SSI_FC_TXFFL | SSI_FC_RXFFL;
+ writel(val, priv->base + SSI_FC);
+}
+
+static void uniphier_spi_send(struct uniphier_spi_priv *priv)
+{
+ int wsize;
+ u32 val = 0;
+
+ wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
+ priv->tx_bytes -= wsize;
+
+ if (priv->tx_buf) {
+ switch (wsize) {
+ case 1:
+ val = *priv->tx_buf;
+ break;
+ case 2:
+ val = get_unaligned_le16(priv->tx_buf);
+ break;
+ case 4:
+ val = get_unaligned_le32(priv->tx_buf);
+ break;
+ }
+
+ priv->tx_buf += wsize;
+ }
+
+ writel(val, priv->base + SSI_TXDR);
+}
+
+static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
+{
+ int rsize;
+ u32 val;
+
+ rsize = min(bytes_per_word(priv->bits_per_word), priv->rx_bytes);
+ priv->rx_bytes -= rsize;
+
+ val = readl(priv->base + SSI_RXDR);
+
+ if (priv->rx_buf) {
+ switch (rsize) {
+ case 1:
+ *priv->rx_buf = val;
+ break;
+ case 2:
+ put_unaligned_le16(val, priv->rx_buf);
+ break;
+ case 4:
+ put_unaligned_le32(val, priv->rx_buf);
+ break;
+ }
+
+ priv->rx_buf += rsize;
+ }
+}
+
+static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+{
+ unsigned int tx_count;
+ u32 val;
+
+ tx_count = DIV_ROUND_UP(priv->tx_bytes,
+ bytes_per_word(priv->bits_per_word));
+ tx_count = min(tx_count, SSI_FIFO_DEPTH);
+
+ /* set fifo threshold */
+ val = readl(priv->base + SSI_FC);
+ val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
+ val |= FIELD_PREP(SSI_FC_TXFTH_MASK, tx_count);
+ val |= FIELD_PREP(SSI_FC_RXFTH_MASK, tx_count);
+ writel(val, priv->base + SSI_FC);
+
+ while (tx_count--)
+ uniphier_spi_send(priv);
+}
+
+static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = readl(priv->base + SSI_FPS);
+
+ if (enable)
+ val |= SSI_FPS_FSPOL;
+ else
+ val &= ~SSI_FPS_FSPOL;
+
+ writel(val, priv->base + SSI_FPS);
+}
+
+static int uniphier_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int status;
+
+ uniphier_spi_setup_transfer(spi, t);
+
+ reinit_completion(&priv->xfer_done);
+
+ uniphier_spi_fill_tx_fifo(priv);
+
+ uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+
+ status = wait_for_completion_timeout(&priv->xfer_done,
+ msecs_to_jiffies(SSI_TIMEOUT_MS));
+
+ uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+
+ if (status < 0)
+ return status;
+
+ return priv->error;
+}
+
+static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+
+ writel(SSI_CTL_EN, priv->base + SSI_CTL);
+
+ return 0;
+}
+
+static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+
+ writel(0, priv->base + SSI_CTL);
+
+ return 0;
+}
+
+static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
+{
+ struct uniphier_spi_priv *priv = dev_id;
+ u32 val, stat;
+
+ stat = readl(priv->base + SSI_IS);
+ val = SSI_IC_TCIC | SSI_IC_RCIC | SSI_IC_RORIC;
+ writel(val, priv->base + SSI_IC);
+
+ /* rx fifo overrun */
+ if (stat & SSI_IS_RORID) {
+ priv->error = -EIO;
+ goto done;
+ }
+
+ /* rx complete */
+ if ((stat & SSI_IS_RCID) && (stat & SSI_IS_RXRS)) {
+ while ((readl(priv->base + SSI_SR) & SSI_SR_RNE) &&
+ (priv->rx_bytes - priv->tx_bytes) > 0)
+ uniphier_spi_recv(priv);
+
+ if ((readl(priv->base + SSI_SR) & SSI_SR_RNE) ||
+ (priv->rx_bytes != priv->tx_bytes)) {
+ priv->error = -EIO;
+ goto done;
+ } else if (priv->rx_bytes == 0)
+ goto done;
+
+ /* next tx transfer */
+ uniphier_spi_fill_tx_fifo(priv);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+
+done:
+ complete(&priv->xfer_done);
+ return IRQ_HANDLED;
+}
+
+static int uniphier_spi_probe(struct platform_device *pdev)
+{
+ struct uniphier_spi_priv *priv;
+ struct spi_master *master;
+ struct resource *res;
+ unsigned long clk_rate;
+ int irq;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*priv));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ priv = spi_master_get_devdata(master);
+ priv->master = master;
+ priv->is_save_param = false;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto out_master_put;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(priv->clk);
+ goto out_master_put;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto out_master_put;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ\n");
+ ret = irq;
+ goto out_disable_clk;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, uniphier_spi_handler,
+ 0, "uniphier-spi", priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ goto out_disable_clk;
+ }
+
+ init_completion(&priv->xfer_done);
+
+ clk_rate = clk_get_rate(priv->clk);
+
+ master->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER);
+ master->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER);
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+
+ master->set_cs = uniphier_spi_set_cs;
+ master->transfer_one = uniphier_spi_transfer_one;
+ master->prepare_transfer_hardware
+ = uniphier_spi_prepare_transfer_hardware;
+ master->unprepare_transfer_hardware
+ = uniphier_spi_unprepare_transfer_hardware;
+ master->num_chipselect = 1;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret)
+ goto out_disable_clk;
+
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(priv->clk);
+
+out_master_put:
+ spi_master_put(master);
+ return ret;
+}
+
+static int uniphier_spi_remove(struct platform_device *pdev)
+{
+ struct uniphier_spi_priv *priv = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id uniphier_spi_match[] = {
+ { .compatible = "socionext,uniphier-scssi" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_spi_match);
+
+static struct platform_driver uniphier_spi_driver = {
+ .probe = uniphier_spi_probe,
+ .remove = uniphier_spi_remove,
+ .driver = {
+ .name = "uniphier-spi",
+ .of_match_table = uniphier_spi_match,
+ },
+};
+module_platform_driver(uniphier_spi_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
+MODULE_DESCRIPTION("Socionext UniPhier SPI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index be6155cba9de..8ce04f829a80 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -54,7 +54,7 @@ static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
}
static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
- u32 v, u8 bits)
+ u32 v, u8 bits, unsigned flags)
{
struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index c574dd210500..df30e1323252 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -89,27 +89,6 @@ config SSB_HOST_SOC
If unsure, say N
-config SSB_SILENT
- bool "No SSB kernel messages"
- depends on SSB && EXPERT
- help
- This option turns off all Sonics Silicon Backplane printks.
- Note that you won't be able to identify problems, once
- messages are turned off.
- This might only be desired for production kernels on
- embedded devices to reduce the kernel size.
-
- Say N
-
-config SSB_DEBUG
- bool "SSB debugging"
- depends on SSB && !SSB_SILENT
- help
- This turns on additional runtime checks and debugging
- messages. Turn this on for SSB troubleshooting.
-
- If unsure, say N
-
config SSB_SERIAL
bool
depends on SSB
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index bed2fedeb057..9c7316b5685f 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -10,12 +10,12 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/ssb/ssb.h>
-#include "ssb_private.h"
-
static const struct pci_device_id b43_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4301) },
diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c
index d70568ea02d5..f51f150307df 100644
--- a/drivers/ssb/bridge_pcmcia_80211.c
+++ b/drivers/ssb/bridge_pcmcia_80211.c
@@ -6,6 +6,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -15,8 +17,6 @@
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
-#include "ssb_private.h"
-
static const struct pcmcia_device_id ssb_host_pcmcia_tbl[] = {
PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448),
PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476),
@@ -70,7 +70,7 @@ err_disable:
err_kfree_ssb:
kfree(ssb);
out_error:
- ssb_err("Initialization failed (%d, %d)\n", res, err);
+ dev_err(&dev->dev, "Initialization failed (%d, %d)\n", res, err);
return err;
}
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 7cb7d2c8fd86..99a4656d113d 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -9,14 +9,14 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/bcm47xx_wdt.h>
-#include "ssb_private.h"
-
/* Clock sources */
enum ssb_clksrc {
@@ -56,7 +56,7 @@ void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
if (cc->capabilities & SSB_CHIPCO_CAP_PMU)
return; /* PMU controls clockmode, separated function needed */
- SSB_WARN_ON(ccdev->id.revision >= 20);
+ WARN_ON(ccdev->id.revision >= 20);
/* chipcommon cores prior to rev6 don't support dynamic clock control */
if (ccdev->id.revision < 6)
@@ -111,7 +111,7 @@ void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
}
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
}
@@ -164,7 +164,7 @@ static int chipco_pctl_clockfreqlimit(struct ssb_chipcommon *cc, int get_max)
divisor = 32;
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
} else if (cc->dev->id.revision < 10) {
switch (clocksrc) {
@@ -277,7 +277,7 @@ static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
minfreq = chipco_pctl_clockfreqlimit(cc, 0);
pll_on_delay = chipco_read32(cc, SSB_CHIPCO_PLLONDELAY);
tmp = (((pll_on_delay + 2) * 1000000) + (minfreq - 1)) / minfreq;
- SSB_WARN_ON(tmp & ~0xFFFF);
+ WARN_ON(tmp & ~0xFFFF);
cc->fast_pwrup_delay = tmp;
}
@@ -354,7 +354,7 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
if (cc->dev->id.revision >= 11)
cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
- ssb_dbg("chipcommon status is 0x%x\n", cc->status);
+ dev_dbg(cc->dev->dev, "chipcommon status is 0x%x\n", cc->status);
if (cc->dev->id.revision >= 20) {
chipco_write32(cc, SSB_CHIPCO_GPIOPULLUP, 0);
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index c5352ea4821e..0f60e90ded26 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -8,6 +8,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/ssb/ssb_driver_chipcommon.h>
@@ -17,8 +19,6 @@
#include <linux/bcm47xx_nvram.h>
#endif
-#include "ssb_private.h"
-
static u32 ssb_chipco_pll_read(struct ssb_chipcommon *cc, u32 offset)
{
chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, offset);
@@ -110,7 +110,7 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
return;
}
- ssb_info("Programming PLL to %u.%03u MHz\n",
+ dev_info(cc->dev->dev, "Programming PLL to %u.%03u MHz\n",
crystalfreq / 1000, crystalfreq % 1000);
/* First turn the PLL off. */
@@ -128,7 +128,7 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
~(1 << SSB_PMURES_5354_BB_PLL_PU));
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
for (i = 1500; i; i--) {
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
@@ -138,7 +138,7 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
}
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
- ssb_emerg("Failed to turn the PLL off!\n");
+ dev_emerg(cc->dev->dev, "Failed to turn the PLL off!\n");
/* Set PDIV in PLL control 0. */
pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0);
@@ -249,7 +249,7 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
return;
}
- ssb_info("Programming PLL to %u.%03u MHz\n",
+ dev_info(cc->dev->dev, "Programming PLL to %u.%03u MHz\n",
crystalfreq / 1000, crystalfreq % 1000);
/* First turn the PLL off. */
@@ -265,7 +265,7 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
buffer_strength = 0x222222;
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
for (i = 1500; i; i--) {
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
@@ -275,7 +275,7 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
}
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
- ssb_emerg("Failed to turn the PLL off!\n");
+ dev_emerg(cc->dev->dev, "Failed to turn the PLL off!\n");
/* Set p1div and p2div. */
pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0);
@@ -349,7 +349,7 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
case 43222:
break;
default:
- ssb_err("ERROR: PLL init unknown for device %04X\n",
+ dev_err(cc->dev->dev, "ERROR: PLL init unknown for device %04X\n",
bus->chip_id);
}
}
@@ -471,7 +471,7 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
max_msk = 0xFFFFF;
break;
default:
- ssb_err("ERROR: PMU resource config unknown for device %04X\n",
+ dev_err(cc->dev->dev, "ERROR: PMU resource config unknown for device %04X\n",
bus->chip_id);
}
@@ -501,7 +501,7 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
~(depend_tab[i].depend));
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
}
}
@@ -524,7 +524,7 @@ void ssb_pmu_init(struct ssb_chipcommon *cc)
pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP);
cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION);
- ssb_dbg("Found rev %u PMU (capabilities 0x%08X)\n",
+ dev_dbg(cc->dev->dev, "Found rev %u PMU (capabilities 0x%08X)\n",
cc->pmu.rev, pmucap);
if (cc->pmu.rev == 1)
@@ -568,12 +568,12 @@ void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc,
mask = 0x3F;
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
return;
}
break;
case 0x4312:
- if (SSB_WARN_ON(id != LDO_PAREF))
+ if (WARN_ON(id != LDO_PAREF))
return;
addr = 0;
shift = 21;
@@ -636,7 +636,7 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc)
case 0x5354:
return ssb_pmu_get_alp_clock_clk0(cc);
default:
- ssb_err("ERROR: PMU alp clock unknown for device %04X\n",
+ dev_err(cc->dev->dev, "ERROR: PMU alp clock unknown for device %04X\n",
bus->chip_id);
return 0;
}
@@ -651,7 +651,7 @@ u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc)
/* 5354 chip uses a non programmable PLL of frequency 240MHz */
return 240000000;
default:
- ssb_err("ERROR: PMU cpu clock unknown for device %04X\n",
+ dev_err(cc->dev->dev, "ERROR: PMU cpu clock unknown for device %04X\n",
bus->chip_id);
return 0;
}
@@ -665,7 +665,7 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
case 0x5354:
return 120000000;
default:
- ssb_err("ERROR: PMU controlclock unknown for device %04X\n",
+ dev_err(cc->dev->dev, "ERROR: PMU controlclock unknown for device %04X\n",
bus->chip_id);
return 0;
}
@@ -705,9 +705,9 @@ void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid)
pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD;
break;
default:
- ssb_printk(KERN_ERR PFX
- "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
- cc->dev->bus->chip_id);
+ dev_err(cc->dev->dev,
+ "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
+ cc->dev->bus->chip_id);
return;
}
diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c
index 937fc31971a7..fac0e6828288 100644
--- a/drivers/ssb/driver_chipcommon_sflash.c
+++ b/drivers/ssb/driver_chipcommon_sflash.c
@@ -5,10 +5,10 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
-#include <linux/ssb/ssb.h>
-
#include "ssb_private.h"
+#include <linux/ssb/ssb.h>
+
static struct resource ssb_sflash_resource = {
.name = "ssb_sflash",
.start = SSB_FLASH2,
@@ -80,7 +80,7 @@ static void ssb_sflash_cmd(struct ssb_chipcommon *cc, u32 opcode)
return;
cpu_relax();
}
- pr_err("SFLASH control command failed (timeout)!\n");
+ dev_err(cc->dev->dev, "SFLASH control command failed (timeout)!\n");
}
/* Initialize serial flash access */
diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c
index 59385fdab5b0..06b68dd6e022 100644
--- a/drivers/ssb/driver_extif.c
+++ b/drivers/ssb/driver_extif.c
@@ -10,12 +10,12 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
-#include "ssb_private.h"
-
static inline u32 extif_read32(struct ssb_extif *extif, u16 offset)
{
diff --git a/drivers/ssb/driver_gige.c b/drivers/ssb/driver_gige.c
index e9734051e3c4..ebee6b0e3c34 100644
--- a/drivers/ssb/driver_gige.c
+++ b/drivers/ssb/driver_gige.c
@@ -242,7 +242,7 @@ static int ssb_gige_probe(struct ssb_device *sdev,
bool pdev_is_ssb_gige_core(struct pci_dev *pdev)
{
if (!pdev->resource[0].name)
- return 0;
+ return false;
return (strcmp(pdev->resource[0].name, SSB_GIGE_MEM_RES_NAME) == 0);
}
EXPORT_SYMBOL(pdev_is_ssb_gige_core);
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index 796e22037bc4..e809dae4c470 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -8,6 +8,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/gpio/driver.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -15,8 +17,6 @@
#include <linux/export.h>
#include <linux/ssb/ssb.h>
-#include "ssb_private.h"
-
/**************************************************
* Shared
@@ -461,7 +461,7 @@ int ssb_gpio_init(struct ssb_bus *bus)
else if (ssb_extif_available(&bus->extif))
return ssb_gpio_extif_init(bus);
else
- SSB_WARN_ON(1);
+ WARN_ON(1);
return -1;
}
@@ -473,7 +473,7 @@ int ssb_gpio_unregister(struct ssb_bus *bus)
gpiochip_remove(&bus->gpio);
return 0;
} else {
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
return -1;
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index f87efef42252..1ca2ac5ef2b8 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -8,6 +8,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/mtd/physmap.h>
@@ -19,8 +21,6 @@
#include <linux/bcm47xx_nvram.h>
#endif
-#include "ssb_private.h"
-
static const char * const part_probes[] = { "bcm47xxpart", NULL };
static struct physmap_flash_data ssb_pflash_data = {
@@ -170,14 +170,15 @@ static void set_irq(struct ssb_device *dev, unsigned int irq)
irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]);
ssb_write32(mdev, SSB_IPSFLAG, irqflag);
}
- ssb_dbg("set_irq: core 0x%04x, irq %d => %d\n",
+ dev_dbg(dev->dev, "set_irq: core 0x%04x, irq %d => %d\n",
dev->id.coreid, oldirq+2, irq+2);
}
static void print_irq(struct ssb_device *dev, unsigned int irq)
{
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
- ssb_dbg("core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n",
+ dev_dbg(dev->dev,
+ "core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n",
dev->id.coreid,
irq_name[0], irq == 0 ? "*" : " ",
irq_name[1], irq == 1 ? "*" : " ",
@@ -229,11 +230,11 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) {
case SSB_CHIPCO_FLASHT_STSER:
case SSB_CHIPCO_FLASHT_ATSER:
- pr_debug("Found serial flash\n");
+ dev_dbg(mcore->dev->dev, "Found serial flash\n");
ssb_sflash_init(&bus->chipco);
break;
case SSB_CHIPCO_FLASHT_PARA:
- pr_debug("Found parallel flash\n");
+ dev_dbg(mcore->dev->dev, "Found parallel flash\n");
pflash->present = true;
pflash->window = SSB_FLASH2;
pflash->window_size = SSB_FLASH2_SZ;
@@ -299,7 +300,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
if (!mcore->dev)
return; /* We don't have a MIPS core */
- ssb_dbg("Initializing MIPS core...\n");
+ dev_dbg(mcore->dev->dev, "Initializing MIPS core...\n");
bus = mcore->dev->bus;
hz = ssb_clockspeed(bus);
@@ -347,7 +348,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
break;
}
}
- ssb_dbg("after irq reconfiguration\n");
+ dev_dbg(mcore->dev->dev, "after irq reconfiguration\n");
dump_irq(bus);
ssb_mips_serial_init(mcore);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 5fe1c22e289b..6a5622e0ded5 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -8,14 +8,14 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/ssb/ssb_embedded.h>
-#include "ssb_private.h"
-
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address);
static void ssb_pcie_write(struct ssb_pcicore *pc, u32 address, u32 data);
static u16 ssb_pcie_mdio_read(struct ssb_pcicore *pc, u8 device, u8 address);
@@ -115,7 +115,7 @@ static int ssb_extpci_read_config(struct ssb_pcicore *pc,
u32 addr, val;
void __iomem *mmio;
- SSB_WARN_ON(!pc->hostmode);
+ WARN_ON(!pc->hostmode);
if (unlikely(len != 1 && len != 2 && len != 4))
goto out;
addr = get_cfgspace_addr(pc, bus, dev, func, off);
@@ -161,7 +161,7 @@ static int ssb_extpci_write_config(struct ssb_pcicore *pc,
u32 addr, val = 0;
void __iomem *mmio;
- SSB_WARN_ON(!pc->hostmode);
+ WARN_ON(!pc->hostmode);
if (unlikely(len != 1 && len != 2 && len != 4))
goto out;
addr = get_cfgspace_addr(pc, bus, dev, func, off);
@@ -263,7 +263,7 @@ int ssb_pcicore_plat_dev_init(struct pci_dev *d)
return -ENODEV;
}
- ssb_info("PCI: Fixing up device %s\n", pci_name(d));
+ dev_info(&d->dev, "PCI: Fixing up device %s\n", pci_name(d));
/* Fix up interrupt lines */
d->irq = ssb_mips_irq(extpci_core->dev) + 2;
@@ -284,12 +284,12 @@ static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev)
if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0)
return;
- ssb_info("PCI: Fixing up bridge %s\n", pci_name(dev));
+ dev_info(&dev->dev, "PCI: Fixing up bridge %s\n", pci_name(dev));
/* Enable PCI bridge bus mastering and memory space */
pci_set_master(dev);
if (pcibios_enable_device(dev, ~0) < 0) {
- ssb_err("PCI: SSB bridge enable failed\n");
+ dev_err(&dev->dev, "PCI: SSB bridge enable failed\n");
return;
}
@@ -298,7 +298,8 @@ static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev)
/* Make sure our latency is high enough to handle the devices behind us */
lat = 168;
- ssb_info("PCI: Fixing latency timer of device %s to %u\n",
+ dev_info(&dev->dev,
+ "PCI: Fixing latency timer of device %s to %u\n",
pci_name(dev), lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
@@ -322,7 +323,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
return;
extpci_core = pc;
- ssb_dbg("PCIcore in host mode found\n");
+ dev_dbg(pc->dev->dev, "PCIcore in host mode found\n");
/* Reset devices on the external PCI bus */
val = SSB_PCICORE_CTL_RST_OE;
val |= SSB_PCICORE_CTL_CLK_OE;
@@ -337,7 +338,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
udelay(1); /* Assertion time demanded by the PCI standard */
if (pc->dev->bus->has_cardbus_slot) {
- ssb_dbg("CardBus slot detected\n");
+ dev_dbg(pc->dev->dev, "CardBus slot detected\n");
pc->cardbusmode = 1;
/* GPIO 1 resets the bridge */
ssb_gpio_out(pc->dev->bus, 1, 1);
@@ -701,7 +702,7 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
/* Calculate the "coremask" for the device. */
coremask = (1 << dev->core_index);
- SSB_WARN_ON(bus->bustype != SSB_BUSTYPE_PCI);
+ WARN_ON(bus->bustype != SSB_BUSTYPE_PCI);
err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp);
if (err)
goto out;
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
index 55e101115038..8254ed25e063 100644
--- a/drivers/ssb/embedded.c
+++ b/drivers/ssb/embedded.c
@@ -9,6 +9,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/ssb/ssb.h>
@@ -17,8 +19,6 @@
#include <linux/ssb/ssb_driver_gige.h>
#include <linux/pci.h>
-#include "ssb_private.h"
-
int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks)
{
@@ -57,8 +57,8 @@ int ssb_watchdog_register(struct ssb_bus *bus)
bus->busnumber, &wdt,
sizeof(wdt));
if (IS_ERR(pdev)) {
- ssb_dbg("can not register watchdog device, err: %li\n",
- PTR_ERR(pdev));
+ pr_debug("can not register watchdog device, err: %li\n",
+ PTR_ERR(pdev));
return PTR_ERR(pdev);
}
@@ -77,7 +77,7 @@ u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask)
else if (ssb_extif_available(&bus->extif))
res = ssb_extif_gpio_in(&bus->extif, mask);
else
- SSB_WARN_ON(1);
+ WARN_ON(1);
spin_unlock_irqrestore(&bus->gpio_lock, flags);
return res;
@@ -95,7 +95,7 @@ u32 ssb_gpio_out(struct ssb_bus *bus, u32 mask, u32 value)
else if (ssb_extif_available(&bus->extif))
res = ssb_extif_gpio_out(&bus->extif, mask, value);
else
- SSB_WARN_ON(1);
+ WARN_ON(1);
spin_unlock_irqrestore(&bus->gpio_lock, flags);
return res;
@@ -113,7 +113,7 @@ u32 ssb_gpio_outen(struct ssb_bus *bus, u32 mask, u32 value)
else if (ssb_extif_available(&bus->extif))
res = ssb_extif_gpio_outen(&bus->extif, mask, value);
else
- SSB_WARN_ON(1);
+ WARN_ON(1);
spin_unlock_irqrestore(&bus->gpio_lock, flags);
return res;
@@ -145,7 +145,7 @@ u32 ssb_gpio_intmask(struct ssb_bus *bus, u32 mask, u32 value)
else if (ssb_extif_available(&bus->extif))
res = ssb_extif_gpio_intmask(&bus->extif, mask, value);
else
- SSB_WARN_ON(1);
+ WARN_ON(1);
spin_unlock_irqrestore(&bus->gpio_lock, flags);
return res;
@@ -163,7 +163,7 @@ u32 ssb_gpio_polarity(struct ssb_bus *bus, u32 mask, u32 value)
else if (ssb_extif_available(&bus->extif))
res = ssb_extif_gpio_polarity(&bus->extif, mask, value);
else
- SSB_WARN_ON(1);
+ WARN_ON(1);
spin_unlock_irqrestore(&bus->gpio_lock, flags);
return res;
diff --git a/drivers/ssb/host_soc.c b/drivers/ssb/host_soc.c
index d62992dc08b2..3b438480515c 100644
--- a/drivers/ssb/host_soc.c
+++ b/drivers/ssb/host_soc.c
@@ -8,11 +8,11 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/bcm47xx_nvram.h>
#include <linux/ssb/ssb.h>
-#include "ssb_private.h"
-
static u8 ssb_host_soc_read8(struct ssb_device *dev, u16 offset)
{
struct ssb_bus *bus = dev->bus;
@@ -61,7 +61,7 @@ static void ssb_host_soc_block_read(struct ssb_device *dev, void *buffer,
case sizeof(u16): {
__le16 *buf = buffer;
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
while (count) {
*buf = (__force __le16)__raw_readw(addr);
buf++;
@@ -72,7 +72,7 @@ static void ssb_host_soc_block_read(struct ssb_device *dev, void *buffer,
case sizeof(u32): {
__le32 *buf = buffer;
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
while (count) {
*buf = (__force __le32)__raw_readl(addr);
buf++;
@@ -81,7 +81,7 @@ static void ssb_host_soc_block_read(struct ssb_device *dev, void *buffer,
break;
}
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
}
#endif /* CONFIG_SSB_BLOCKIO */
@@ -134,7 +134,7 @@ static void ssb_host_soc_block_write(struct ssb_device *dev, const void *buffer,
case sizeof(u16): {
const __le16 *buf = buffer;
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
while (count) {
__raw_writew((__force u16)(*buf), addr);
buf++;
@@ -145,7 +145,7 @@ static void ssb_host_soc_block_write(struct ssb_device *dev, const void *buffer,
case sizeof(u32): {
const __le32 *buf = buffer;
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
while (count) {
__raw_writel((__force u32)(*buf), addr);
buf++;
@@ -154,7 +154,7 @@ static void ssb_host_soc_block_write(struct ssb_device *dev, const void *buffer,
break;
}
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
}
#endif /* CONFIG_SSB_BLOCKIO */
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 116594413f66..0a26984acb2c 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -209,7 +209,7 @@ int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx)
memset(ctx, 0, sizeof(*ctx));
ctx->bus = bus;
- SSB_WARN_ON(bus->nr_devices > ARRAY_SIZE(ctx->device_frozen));
+ WARN_ON(bus->nr_devices > ARRAY_SIZE(ctx->device_frozen));
for (i = 0; i < bus->nr_devices; i++) {
sdev = ssb_device_get(&bus->devices[i]);
@@ -220,7 +220,7 @@ int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx)
continue;
}
sdrv = drv_to_ssb_drv(sdev->dev->driver);
- if (SSB_WARN_ON(!sdrv->remove))
+ if (WARN_ON(!sdrv->remove))
continue;
sdrv->remove(sdev);
ctx->device_frozen[i] = 1;
@@ -248,15 +248,16 @@ int ssb_devices_thaw(struct ssb_freeze_context *ctx)
continue;
sdev = &bus->devices[i];
- if (SSB_WARN_ON(!sdev->dev || !sdev->dev->driver))
+ if (WARN_ON(!sdev->dev || !sdev->dev->driver))
continue;
sdrv = drv_to_ssb_drv(sdev->dev->driver);
- if (SSB_WARN_ON(!sdrv || !sdrv->probe))
+ if (WARN_ON(!sdrv || !sdrv->probe))
continue;
err = sdrv->probe(sdev, &sdev->id);
if (err) {
- ssb_err("Failed to thaw device %s\n",
+ dev_err(sdev->dev,
+ "Failed to thaw device %s\n",
dev_name(sdev->dev));
result = err;
}
@@ -431,9 +432,9 @@ void ssb_bus_unregister(struct ssb_bus *bus)
err = ssb_gpio_unregister(bus);
if (err == -EBUSY)
- ssb_dbg("Some GPIOs are still in use\n");
+ pr_debug("Some GPIOs are still in use\n");
else if (err)
- ssb_dbg("Can not unregister GPIO driver: %i\n", err);
+ pr_debug("Can not unregister GPIO driver: %i\n", err);
ssb_buses_lock();
ssb_devices_unregister(bus);
@@ -518,7 +519,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
sdev->dev = dev;
err = device_register(dev);
if (err) {
- ssb_err("Could not register %s\n", dev_name(dev));
+ pr_err("Could not register %s\n", dev_name(dev));
/* Set dev to NULL to not unregister
* dev on error unwinding. */
sdev->dev = NULL;
@@ -576,9 +577,9 @@ static int ssb_attach_queued_buses(void)
err = ssb_gpio_init(bus);
if (err == -ENOTSUPP)
- ssb_dbg("GPIO driver not activated\n");
+ pr_debug("GPIO driver not activated\n");
else if (err)
- ssb_dbg("Error registering GPIO driver: %i\n", err);
+ pr_debug("Error registering GPIO driver: %i\n", err);
ssb_bus_may_powerdown(bus);
@@ -707,10 +708,12 @@ int ssb_bus_pcibus_register(struct ssb_bus *bus, struct pci_dev *host_pci)
err = ssb_bus_register(bus, ssb_pci_get_invariants, 0);
if (!err) {
- ssb_info("Sonics Silicon Backplane found on PCI device %s\n",
+ dev_info(&host_pci->dev,
+ "Sonics Silicon Backplane found on PCI device %s\n",
dev_name(&host_pci->dev));
} else {
- ssb_err("Failed to register PCI version of SSB with error %d\n",
+ dev_err(&host_pci->dev,
+ "Failed to register PCI version of SSB with error %d\n",
err);
}
@@ -731,7 +734,8 @@ int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
err = ssb_bus_register(bus, ssb_pcmcia_get_invariants, baseaddr);
if (!err) {
- ssb_info("Sonics Silicon Backplane found on PCMCIA device %s\n",
+ dev_info(&pcmcia_dev->dev,
+ "Sonics Silicon Backplane found on PCMCIA device %s\n",
pcmcia_dev->devname);
}
@@ -752,7 +756,8 @@ int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *func,
err = ssb_bus_register(bus, ssb_sdio_get_invariants, ~0);
if (!err) {
- ssb_info("Sonics Silicon Backplane found on SDIO device %s\n",
+ dev_info(&func->dev,
+ "Sonics Silicon Backplane found on SDIO device %s\n",
sdio_func_id(func));
}
@@ -771,8 +776,8 @@ int ssb_bus_host_soc_register(struct ssb_bus *bus, unsigned long baseaddr)
err = ssb_bus_register(bus, ssb_host_soc_get_invariants, baseaddr);
if (!err) {
- ssb_info("Sonics Silicon Backplane found at address 0x%08lX\n",
- baseaddr);
+ pr_info("Sonics Silicon Backplane found at address 0x%08lX\n",
+ baseaddr);
}
return err;
@@ -856,13 +861,13 @@ u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m)
case SSB_PLLTYPE_2: /* 48Mhz, 4 dividers */
n1 += SSB_CHIPCO_CLK_T2_BIAS;
n2 += SSB_CHIPCO_CLK_T2_BIAS;
- SSB_WARN_ON(!((n1 >= 2) && (n1 <= 7)));
- SSB_WARN_ON(!((n2 >= 5) && (n2 <= 23)));
+ WARN_ON(!((n1 >= 2) && (n1 <= 7)));
+ WARN_ON(!((n2 >= 5) && (n2 <= 23)));
break;
case SSB_PLLTYPE_5: /* 25Mhz, 4 dividers */
return 100000000;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
switch (plltype) {
@@ -911,9 +916,9 @@ u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m)
m1 += SSB_CHIPCO_CLK_T2_BIAS;
m2 += SSB_CHIPCO_CLK_T2M2_BIAS;
m3 += SSB_CHIPCO_CLK_T2_BIAS;
- SSB_WARN_ON(!((m1 >= 2) && (m1 <= 7)));
- SSB_WARN_ON(!((m2 >= 3) && (m2 <= 10)));
- SSB_WARN_ON(!((m3 >= 2) && (m3 <= 7)));
+ WARN_ON(!((m1 >= 2) && (m1 <= 7)));
+ WARN_ON(!((m2 >= 3) && (m2 <= 10)));
+ WARN_ON(!((m3 >= 2) && (m3 <= 7)));
if (!(mc & SSB_CHIPCO_CLK_T2MC_M1BYP))
clock /= m1;
@@ -923,7 +928,7 @@ u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m)
clock /= m3;
return clock;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
return 0;
}
@@ -1057,9 +1062,9 @@ static int ssb_wait_bits(struct ssb_device *dev, u16 reg, u32 bitmask,
}
udelay(10);
}
- printk(KERN_ERR PFX "Timeout waiting for bitmask %08X on "
- "register %04X to %s.\n",
- bitmask, reg, (set ? "set" : "clear"));
+ dev_err(dev->dev,
+ "Timeout waiting for bitmask %08X on register %04X to %s\n",
+ bitmask, reg, set ? "set" : "clear");
return -ETIMEDOUT;
}
@@ -1164,12 +1169,10 @@ int ssb_bus_may_powerdown(struct ssb_bus *bus)
if (err)
goto error;
out:
-#ifdef CONFIG_SSB_DEBUG
bus->powered_up = 0;
-#endif
return err;
error:
- ssb_err("Bus powerdown failed\n");
+ pr_err("Bus powerdown failed\n");
goto out;
}
EXPORT_SYMBOL(ssb_bus_may_powerdown);
@@ -1183,16 +1186,14 @@ int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl)
if (err)
goto error;
-#ifdef CONFIG_SSB_DEBUG
bus->powered_up = 1;
-#endif
mode = dynamic_pctl ? SSB_CLKMODE_DYNAMIC : SSB_CLKMODE_FAST;
ssb_chipco_set_clockmode(&bus->chipco, mode);
return 0;
error:
- ssb_err("Bus powerup failed\n");
+ pr_err("Bus powerup failed\n");
return err;
}
EXPORT_SYMBOL(ssb_bus_powerup);
@@ -1237,15 +1238,15 @@ u32 ssb_admatch_base(u32 adm)
base = (adm & SSB_ADM_BASE0);
break;
case SSB_ADM_TYPE1:
- SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
+ WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
base = (adm & SSB_ADM_BASE1);
break;
case SSB_ADM_TYPE2:
- SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
+ WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
base = (adm & SSB_ADM_BASE2);
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
return base;
@@ -1261,15 +1262,15 @@ u32 ssb_admatch_size(u32 adm)
size = ((adm & SSB_ADM_SZ0) >> SSB_ADM_SZ0_SHIFT);
break;
case SSB_ADM_TYPE1:
- SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
+ WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
size = ((adm & SSB_ADM_SZ1) >> SSB_ADM_SZ1_SHIFT);
break;
case SSB_ADM_TYPE2:
- SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
+ WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
size = ((adm & SSB_ADM_SZ2) >> SSB_ADM_SZ2_SHIFT);
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
size = (1 << (size + 1));
@@ -1300,19 +1301,19 @@ static int __init ssb_modinit(void)
err = b43_pci_ssb_bridge_init();
if (err) {
- ssb_err("Broadcom 43xx PCI-SSB-bridge initialization failed\n");
+ pr_err("Broadcom 43xx PCI-SSB-bridge initialization failed\n");
/* don't fail SSB init because of this */
err = 0;
}
err = ssb_host_pcmcia_init();
if (err) {
- ssb_err("PCMCIA host initialization failed\n");
+ pr_err("PCMCIA host initialization failed\n");
/* don't fail SSB init because of this */
err = 0;
}
err = ssb_gige_init();
if (err) {
- ssb_err("SSB Broadcom Gigabit Ethernet driver initialization failed\n");
+ pr_err("SSB Broadcom Gigabit Ethernet driver initialization failed\n");
/* don't fail SSB init because of this */
err = 0;
}
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 77b551da5728..84807a9b4b13 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -15,14 +15,14 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/delay.h>
-#include "ssb_private.h"
-
/* Define the following to 1 to enable a printk on each coreswitch. */
#define SSB_VERBOSE_PCICORESWITCH_DEBUG 0
@@ -56,7 +56,7 @@ int ssb_pci_switch_coreidx(struct ssb_bus *bus, u8 coreidx)
}
return 0;
error:
- ssb_err("Failed to switch to core %u\n", coreidx);
+ pr_err("Failed to switch to core %u\n", coreidx);
return -ENODEV;
}
@@ -67,9 +67,8 @@ int ssb_pci_switch_core(struct ssb_bus *bus,
unsigned long flags;
#if SSB_VERBOSE_PCICORESWITCH_DEBUG
- ssb_info("Switching to %s core, index %d\n",
- ssb_core_name(dev->id.coreid),
- dev->core_index);
+ pr_info("Switching to %s core, index %d\n",
+ ssb_core_name(dev->id.coreid), dev->core_index);
#endif
spin_lock_irqsave(&bus->bar_lock, flags);
@@ -161,7 +160,7 @@ out:
return err;
err_pci:
- printk(KERN_ERR PFX "Error: ssb_pci_xtal() could not access PCI config space!\n");
+ pr_err("Error: ssb_pci_xtal() could not access PCI config space!\n");
err = -EBUSY;
goto out;
}
@@ -286,7 +285,7 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
u32 spromctl;
u16 size = bus->sprom_size;
- ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
+ pr_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl);
if (err)
goto err_ctlreg;
@@ -294,17 +293,17 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl);
if (err)
goto err_ctlreg;
- ssb_notice("[ 0%%");
+ pr_notice("[ 0%%");
msleep(500);
for (i = 0; i < size; i++) {
if (i == size / 4)
- ssb_cont("25%%");
+ pr_cont("25%%");
else if (i == size / 2)
- ssb_cont("50%%");
+ pr_cont("50%%");
else if (i == (size * 3) / 4)
- ssb_cont("75%%");
+ pr_cont("75%%");
else if (i % 2)
- ssb_cont(".");
+ pr_cont(".");
writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
mmiowb();
msleep(20);
@@ -317,12 +316,12 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
if (err)
goto err_ctlreg;
msleep(500);
- ssb_cont("100%% ]\n");
- ssb_notice("SPROM written\n");
+ pr_cont("100%% ]\n");
+ pr_notice("SPROM written\n");
return 0;
err_ctlreg:
- ssb_err("Could not access SPROM control register.\n");
+ pr_err("Could not access SPROM control register.\n");
return err;
}
@@ -816,7 +815,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
memset(out, 0, sizeof(*out));
out->revision = in[size - 1] & 0x00FF;
- ssb_dbg("SPROM revision %d detected\n", out->revision);
+ pr_debug("SPROM revision %d detected\n", out->revision);
memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */
memset(out->et1mac, 0xFF, 6);
@@ -825,7 +824,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
* number stored in the SPROM.
* Always extract r1. */
out->revision = 1;
- ssb_dbg("SPROM treated as revision %d\n", out->revision);
+ pr_debug("SPROM treated as revision %d\n", out->revision);
}
switch (out->revision) {
@@ -842,8 +841,8 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
sprom_extract_r8(out, in);
break;
default:
- ssb_warn("Unsupported SPROM revision %d detected. Will extract v1\n",
- out->revision);
+ pr_warn("Unsupported SPROM revision %d detected. Will extract v1\n",
+ out->revision);
out->revision = 1;
sprom_extract_r123(out, in);
}
@@ -863,7 +862,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
u16 *buf;
if (!ssb_is_sprom_available(bus)) {
- ssb_err("No SPROM available!\n");
+ pr_err("No SPROM available!\n");
return -ENODEV;
}
if (bus->chipco.dev) { /* can be unavailable! */
@@ -882,7 +881,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
} else {
bus->sprom_offset = SSB_SPROM_BASE1;
}
- ssb_dbg("SPROM offset is 0x%x\n", bus->sprom_offset);
+ pr_debug("SPROM offset is 0x%x\n", bus->sprom_offset);
buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
if (!buf)
@@ -907,16 +906,16 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
* available for this device in some other storage */
err = ssb_fill_sprom_with_fallback(bus, sprom);
if (err) {
- ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n",
- err);
+ pr_warn("WARNING: Using fallback SPROM failed (err %d)\n",
+ err);
goto out_free;
} else {
- ssb_dbg("Using SPROM revision %d provided by platform\n",
- sprom->revision);
+ pr_debug("Using SPROM revision %d provided by platform\n",
+ sprom->revision);
err = 0;
goto out_free;
}
- ssb_warn("WARNING: Invalid SPROM CRC (corrupt SPROM)\n");
+ pr_warn("WARNING: Invalid SPROM CRC (corrupt SPROM)\n");
}
}
err = sprom_extract(bus, sprom, buf, bus->sprom_size);
@@ -947,14 +946,12 @@ out:
return err;
}
-#ifdef CONFIG_SSB_DEBUG
static int ssb_pci_assert_buspower(struct ssb_bus *bus)
{
if (likely(bus->powered_up))
return 0;
- printk(KERN_ERR PFX "FATAL ERROR: Bus powered down "
- "while accessing PCI MMIO space\n");
+ pr_err("FATAL ERROR: Bus powered down while accessing PCI MMIO space\n");
if (bus->power_warn_count <= 10) {
bus->power_warn_count++;
dump_stack();
@@ -962,12 +959,6 @@ static int ssb_pci_assert_buspower(struct ssb_bus *bus)
return -ENODEV;
}
-#else /* DEBUG */
-static inline int ssb_pci_assert_buspower(struct ssb_bus *bus)
-{
- return 0;
-}
-#endif /* DEBUG */
static u8 ssb_pci_read8(struct ssb_device *dev, u16 offset)
{
@@ -1026,15 +1017,15 @@ static void ssb_pci_block_read(struct ssb_device *dev, void *buffer,
ioread8_rep(addr, buffer, count);
break;
case sizeof(u16):
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
ioread16_rep(addr, buffer, count >> 1);
break;
case sizeof(u32):
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
ioread32_rep(addr, buffer, count >> 2);
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
return;
@@ -1100,15 +1091,15 @@ static void ssb_pci_block_write(struct ssb_device *dev, const void *buffer,
iowrite8_rep(addr, buffer, count);
break;
case sizeof(u16):
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
iowrite16_rep(addr, buffer, count >> 1);
break;
case sizeof(u32):
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
iowrite32_rep(addr, buffer, count >> 2);
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
}
#endif /* CONFIG_SSB_BLOCKIO */
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index f03422bbf087..567013f8a8be 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -8,6 +8,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -18,8 +20,6 @@
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
-#include "ssb_private.h"
-
/* Define the following to 1 to enable a printk on each coreswitch. */
#define SSB_VERBOSE_PCMCIACORESWITCH_DEBUG 0
@@ -143,7 +143,7 @@ int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus,
return 0;
error:
- ssb_err("Failed to switch to core %u\n", coreidx);
+ pr_err("Failed to switch to core %u\n", coreidx);
return err;
}
@@ -152,9 +152,8 @@ static int ssb_pcmcia_switch_core(struct ssb_bus *bus, struct ssb_device *dev)
int err;
#if SSB_VERBOSE_PCMCIACORESWITCH_DEBUG
- ssb_info("Switching to %s core, index %d\n",
- ssb_core_name(dev->id.coreid),
- dev->core_index);
+ pr_info("Switching to %s core, index %d\n",
+ ssb_core_name(dev->id.coreid), dev->core_index);
#endif
err = ssb_pcmcia_switch_coreidx(bus, dev->core_index);
@@ -170,7 +169,7 @@ int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg)
int err;
u8 val;
- SSB_WARN_ON((seg != 0) && (seg != 1));
+ WARN_ON((seg != 0) && (seg != 1));
while (1) {
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_MEMSEG, seg);
if (err)
@@ -190,7 +189,7 @@ int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg)
return 0;
error:
- ssb_err("Failed to switch pcmcia segment\n");
+ pr_err("Failed to switch pcmcia segment\n");
return err;
}
@@ -300,7 +299,7 @@ static void ssb_pcmcia_block_read(struct ssb_device *dev, void *buffer,
case sizeof(u16): {
__le16 *buf = buffer;
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
while (count) {
*buf = (__force __le16)__raw_readw(addr);
buf++;
@@ -311,7 +310,7 @@ static void ssb_pcmcia_block_read(struct ssb_device *dev, void *buffer,
case sizeof(u32): {
__le16 *buf = buffer;
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
while (count) {
*buf = (__force __le16)__raw_readw(addr);
buf++;
@@ -322,7 +321,7 @@ static void ssb_pcmcia_block_read(struct ssb_device *dev, void *buffer,
break;
}
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
unlock:
spin_unlock_irqrestore(&bus->bar_lock, flags);
@@ -400,7 +399,7 @@ static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer,
case sizeof(u16): {
const __le16 *buf = buffer;
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
while (count) {
__raw_writew((__force u16)(*buf), addr);
buf++;
@@ -411,7 +410,7 @@ static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer,
case sizeof(u32): {
const __le16 *buf = buffer;
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
while (count) {
__raw_writew((__force u16)(*buf), addr);
buf++;
@@ -422,7 +421,7 @@ static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer,
break;
}
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
unlock:
mmiowb();
@@ -547,39 +546,39 @@ static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom)
bool failed = 0;
size_t size = SSB_PCMCIA_SPROM_SIZE;
- ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
+ pr_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEEN);
if (err) {
- ssb_notice("Could not enable SPROM write access\n");
+ pr_notice("Could not enable SPROM write access\n");
return -EBUSY;
}
- ssb_notice("[ 0%%");
+ pr_notice("[ 0%%");
msleep(500);
for (i = 0; i < size; i++) {
if (i == size / 4)
- ssb_cont("25%%");
+ pr_cont("25%%");
else if (i == size / 2)
- ssb_cont("50%%");
+ pr_cont("50%%");
else if (i == (size * 3) / 4)
- ssb_cont("75%%");
+ pr_cont("75%%");
else if (i % 2)
- ssb_cont(".");
+ pr_cont(".");
err = ssb_pcmcia_sprom_write(bus, i, sprom[i]);
if (err) {
- ssb_notice("Failed to write to SPROM\n");
+ pr_notice("Failed to write to SPROM\n");
failed = 1;
break;
}
}
err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS);
if (err) {
- ssb_notice("Could not disable SPROM write access\n");
+ pr_notice("Could not disable SPROM write access\n");
failed = 1;
}
msleep(500);
if (!failed) {
- ssb_cont("100%% ]\n");
- ssb_notice("SPROM written\n");
+ pr_cont("100%% ]\n");
+ pr_notice("SPROM written\n");
}
return failed ? -EBUSY : 0;
@@ -693,9 +692,8 @@ static int ssb_pcmcia_do_get_invariants(struct pcmcia_device *p_dev,
return -ENOSPC; /* continue with next entry */
error:
- ssb_err(
- "PCMCIA: Failed to fetch device invariants: %s\n",
- error_description);
+ pr_err("PCMCIA: Failed to fetch device invariants: %s\n",
+ error_description);
return -ENODEV;
}
@@ -715,8 +713,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE,
ssb_pcmcia_get_mac, sprom);
if (res != 0) {
- ssb_err(
- "PCMCIA: Failed to fetch MAC address\n");
+ pr_err("PCMCIA: Failed to fetch MAC address\n");
return -ENODEV;
}
@@ -726,8 +723,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
if ((res == 0) || (res == -ENOSPC))
return 0;
- ssb_err(
- "PCMCIA: Failed to fetch device invariants\n");
+ pr_err("PCMCIA: Failed to fetch device invariants\n");
return -ENODEV;
}
@@ -836,6 +832,6 @@ int ssb_pcmcia_init(struct ssb_bus *bus)
return 0;
error:
- ssb_err("Failed to initialize PCMCIA host device\n");
+ pr_err("Failed to initialize PCMCIA host device\n");
return err;
}
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index b9429df583eb..6ceee98ed6ff 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -12,6 +12,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/pci.h>
@@ -20,8 +22,6 @@
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
-#include "ssb_private.h"
-
const char *ssb_core_name(u16 coreid)
{
@@ -125,7 +125,7 @@ static u16 pcidev_to_chipid(struct pci_dev *pci_dev)
chipid_fallback = 0x4401;
break;
default:
- ssb_err("PCI-ID not in fallback list\n");
+ dev_err(&pci_dev->dev, "PCI-ID not in fallback list\n");
}
return chipid_fallback;
@@ -151,7 +151,7 @@ static u8 chipid_to_nrcores(u16 chipid)
case 0x4704:
return 9;
default:
- ssb_err("CHIPID not in nrcores fallback list\n");
+ pr_err("CHIPID not in nrcores fallback list\n");
}
return 1;
@@ -210,7 +210,7 @@ void ssb_iounmap(struct ssb_bus *bus)
#ifdef CONFIG_SSB_PCIHOST
pci_iounmap(bus->host_pci, bus->mmio);
#else
- SSB_BUG_ON(1); /* Can't reach this code. */
+ WARN_ON(1); /* Can't reach this code. */
#endif
break;
case SSB_BUSTYPE_SDIO:
@@ -236,7 +236,7 @@ static void __iomem *ssb_ioremap(struct ssb_bus *bus,
#ifdef CONFIG_SSB_PCIHOST
mmio = pci_iomap(bus->host_pci, 0, ~0UL);
#else
- SSB_BUG_ON(1); /* Can't reach this code. */
+ WARN_ON(1); /* Can't reach this code. */
#endif
break;
case SSB_BUSTYPE_SDIO:
@@ -318,13 +318,13 @@ int ssb_bus_scan(struct ssb_bus *bus,
bus->chip_package = 0;
}
}
- ssb_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
- bus->chip_id, bus->chip_rev, bus->chip_package);
+ pr_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
+ bus->chip_id, bus->chip_rev, bus->chip_package);
if (!bus->nr_devices)
bus->nr_devices = chipid_to_nrcores(bus->chip_id);
if (bus->nr_devices > ARRAY_SIZE(bus->devices)) {
- ssb_err("More than %d ssb cores found (%d)\n",
- SSB_MAX_NR_CORES, bus->nr_devices);
+ pr_err("More than %d ssb cores found (%d)\n",
+ SSB_MAX_NR_CORES, bus->nr_devices);
goto err_unmap;
}
if (bus->bustype == SSB_BUSTYPE_SSB) {
@@ -355,18 +355,16 @@ int ssb_bus_scan(struct ssb_bus *bus,
dev->bus = bus;
dev->ops = bus->ops;
- printk(KERN_DEBUG PFX
- "Core %d found: %s "
- "(cc 0x%03X, rev 0x%02X, vendor 0x%04X)\n",
- i, ssb_core_name(dev->id.coreid),
- dev->id.coreid, dev->id.revision, dev->id.vendor);
+ pr_debug("Core %d found: %s (cc 0x%03X, rev 0x%02X, vendor 0x%04X)\n",
+ i, ssb_core_name(dev->id.coreid),
+ dev->id.coreid, dev->id.revision, dev->id.vendor);
switch (dev->id.coreid) {
case SSB_DEV_80211:
nr_80211_cores++;
if (nr_80211_cores > 1) {
if (!we_support_multiple_80211_cores(bus)) {
- ssb_dbg("Ignoring additional 802.11 core\n");
+ pr_debug("Ignoring additional 802.11 core\n");
continue;
}
}
@@ -374,7 +372,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
case SSB_DEV_EXTIF:
#ifdef CONFIG_SSB_DRIVER_EXTIF
if (bus->extif.dev) {
- ssb_warn("WARNING: Multiple EXTIFs found\n");
+ pr_warn("WARNING: Multiple EXTIFs found\n");
break;
}
bus->extif.dev = dev;
@@ -382,7 +380,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
break;
case SSB_DEV_CHIPCOMMON:
if (bus->chipco.dev) {
- ssb_warn("WARNING: Multiple ChipCommon found\n");
+ pr_warn("WARNING: Multiple ChipCommon found\n");
break;
}
bus->chipco.dev = dev;
@@ -391,7 +389,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
case SSB_DEV_MIPS_3302:
#ifdef CONFIG_SSB_DRIVER_MIPS
if (bus->mipscore.dev) {
- ssb_warn("WARNING: Multiple MIPS cores found\n");
+ pr_warn("WARNING: Multiple MIPS cores found\n");
break;
}
bus->mipscore.dev = dev;
@@ -412,7 +410,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
}
}
if (bus->pcicore.dev) {
- ssb_warn("WARNING: Multiple PCI(E) cores found\n");
+ pr_warn("WARNING: Multiple PCI(E) cores found\n");
break;
}
bus->pcicore.dev = dev;
diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c
index 2278e43614bd..7fe0afb42234 100644
--- a/drivers/ssb/sdio.c
+++ b/drivers/ssb/sdio.c
@@ -12,14 +12,14 @@
*
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/etherdevice.h>
#include <linux/mmc/sdio_func.h>
-#include "ssb_private.h"
-
/* Define the following to 1 to enable a printk on each coreswitch. */
#define SSB_VERBOSE_SDIOCORESWITCH_DEBUG 0
@@ -316,18 +316,18 @@ static void ssb_sdio_block_read(struct ssb_device *dev, void *buffer,
break;
}
case sizeof(u16): {
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
error = sdio_readsb(bus->host_sdio, buffer, offset, count);
break;
}
case sizeof(u32): {
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */
error = sdio_readsb(bus->host_sdio, buffer, offset, count);
break;
}
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
if (!error)
goto out;
@@ -423,18 +423,18 @@ static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer,
(void *)buffer, count);
break;
case sizeof(u16):
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
error = sdio_writesb(bus->host_sdio, offset,
(void *)buffer, count);
break;
case sizeof(u32):
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */
error = sdio_writesb(bus->host_sdio, offset,
(void *)buffer, count);
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
if (!error)
goto out;
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index e753fbe302a7..4f028a80d6c4 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -127,13 +127,13 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
goto out_kfree;
err = ssb_devices_freeze(bus, &freeze);
if (err) {
- ssb_err("SPROM write: Could not freeze all devices\n");
+ pr_err("SPROM write: Could not freeze all devices\n");
goto out_unlock;
}
res = sprom_write(bus, sprom);
err = ssb_devices_thaw(&freeze);
if (err)
- ssb_err("SPROM write: Could not thaw all devices\n");
+ pr_err("SPROM write: Could not thaw all devices\n");
out_unlock:
mutex_unlock(&bus->sprom_mutex);
out_kfree:
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index ef9ac8efcab4..5f31bdfbe77f 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -2,47 +2,14 @@
#ifndef LINUX_SSB_PRIVATE_H_
#define LINUX_SSB_PRIVATE_H_
+#define PFX "ssb: "
+#define pr_fmt(fmt) PFX fmt
+
#include <linux/ssb/ssb.h>
#include <linux/types.h>
#include <linux/bcm47xx_wdt.h>
-#define PFX "ssb: "
-
-#ifdef CONFIG_SSB_SILENT
-# define ssb_printk(fmt, ...) \
- do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
-#else
-# define ssb_printk(fmt, ...) \
- printk(fmt, ##__VA_ARGS__)
-#endif /* CONFIG_SSB_SILENT */
-
-#define ssb_emerg(fmt, ...) ssb_printk(KERN_EMERG PFX fmt, ##__VA_ARGS__)
-#define ssb_err(fmt, ...) ssb_printk(KERN_ERR PFX fmt, ##__VA_ARGS__)
-#define ssb_warn(fmt, ...) ssb_printk(KERN_WARNING PFX fmt, ##__VA_ARGS__)
-#define ssb_notice(fmt, ...) ssb_printk(KERN_NOTICE PFX fmt, ##__VA_ARGS__)
-#define ssb_info(fmt, ...) ssb_printk(KERN_INFO PFX fmt, ##__VA_ARGS__)
-#define ssb_cont(fmt, ...) ssb_printk(KERN_CONT fmt, ##__VA_ARGS__)
-
-/* dprintk: Debugging printk; vanishes for non-debug compilation */
-#ifdef CONFIG_SSB_DEBUG
-# define ssb_dbg(fmt, ...) \
- ssb_printk(KERN_DEBUG PFX fmt, ##__VA_ARGS__)
-#else
-# define ssb_dbg(fmt, ...) \
- do { if (0) printk(KERN_DEBUG PFX fmt, ##__VA_ARGS__); } while (0)
-#endif
-
-#ifdef CONFIG_SSB_DEBUG
-# define SSB_WARN_ON(x) WARN_ON(x)
-# define SSB_BUG_ON(x) BUG_ON(x)
-#else
-static inline int __ssb_do_nothing(int x) { return x; }
-# define SSB_WARN_ON(x) __ssb_do_nothing(unlikely(!!(x)))
-# define SSB_BUG_ON(x) __ssb_do_nothing(unlikely(!!(x)))
-#endif
-
-
/* pci.c */
#ifdef CONFIG_SSB_PCIHOST
extern int ssb_pci_switch_core(struct ssb_bus *bus,
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 75a480497d22..1abf76be2aa8 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -84,16 +84,12 @@ source "drivers/staging/dgnc/Kconfig"
source "drivers/staging/gs_fpgaboot/Kconfig"
-source "drivers/staging/skein/Kconfig"
-
source "drivers/staging/unisys/Kconfig"
source "drivers/staging/clocking-wizard/Kconfig"
source "drivers/staging/fbtft/Kconfig"
-source "drivers/staging/fsl-mc/Kconfig"
-
source "drivers/staging/fsl-dpaa2/Kconfig"
source "drivers/staging/wilc1000/Kconfig"
@@ -106,15 +102,13 @@ source "drivers/staging/greybus/Kconfig"
source "drivers/staging/vc04_services/Kconfig"
-source "drivers/staging/typec/Kconfig"
-
source "drivers/staging/vboxvideo/Kconfig"
source "drivers/staging/pi433/Kconfig"
-source "drivers/staging/mt7621-pinctrl/Kconfig"
+source "drivers/staging/mt7621-pci/Kconfig"
-source "drivers/staging/mt7621-gpio/Kconfig"
+source "drivers/staging/mt7621-pinctrl/Kconfig"
source "drivers/staging/mt7621-spi/Kconfig"
@@ -126,4 +120,10 @@ source "drivers/staging/mt7621-eth/Kconfig"
source "drivers/staging/mt7621-dts/Kconfig"
+source "drivers/staging/gasket/Kconfig"
+
+source "drivers/staging/axis-fifo/Kconfig"
+
+source "drivers/staging/erofs/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index e84959a8a684..ab0cbe8815b1 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -2,7 +2,6 @@
# Makefile for staging directory
obj-y += media/
-obj-y += typec/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
@@ -33,11 +32,9 @@ obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_DGNC) += dgnc/
obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/
obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
-obj-$(CONFIG_CRYPTO_SKEIN) += skein/
obj-$(CONFIG_UNISYSSPAR) += unisys/
obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
obj-$(CONFIG_FB_TFT) += fbtft/
-obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/
obj-$(CONFIG_WILC1000) += wilc1000/
obj-$(CONFIG_MOST) += most/
@@ -48,9 +45,11 @@ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_PI433) += pi433/
obj-$(CONFIG_SOC_MT7621) += mt7621-pci/
obj-$(CONFIG_SOC_MT7621) += mt7621-pinctrl/
-obj-$(CONFIG_SOC_MT7621) += mt7621-gpio/
obj-$(CONFIG_SOC_MT7621) += mt7621-spi/
obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
obj-$(CONFIG_SOC_MT7621) += mt7621-mmc/
obj-$(CONFIG_SOC_MT7621) += mt7621-eth/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
+obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
+obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
+obj-$(CONFIG_EROFS_FS) += erofs/
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index a1a0025b59e0..a880b5c6c6c3 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -178,7 +178,7 @@ static int range_alloc(struct ashmem_area *asma,
struct ashmem_range *range;
range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
- if (unlikely(!range))
+ if (!range)
return -ENOMEM;
range->asma = asma;
@@ -246,11 +246,11 @@ static int ashmem_open(struct inode *inode, struct file *file)
int ret;
ret = generic_file_open(inode, file);
- if (unlikely(ret))
+ if (ret)
return ret;
asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
- if (unlikely(!asma))
+ if (!asma)
return -ENOMEM;
INIT_LIST_HEAD(&asma->unpinned_list);
@@ -361,14 +361,20 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
mutex_lock(&ashmem_mutex);
/* user needs to SET_SIZE before mapping */
- if (unlikely(!asma->size)) {
+ if (!asma->size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* requested mapping size larger than object size */
+ if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
ret = -EINVAL;
goto out;
}
/* requested protection bits must match our allowed protection mask */
- if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
- calc_vm_prot_bits(PROT_MASK, 0))) {
+ if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
+ calc_vm_prot_bits(PROT_MASK, 0)) {
ret = -EPERM;
goto out;
}
@@ -402,6 +408,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
fput(asma->file);
goto out;
}
+ } else {
+ vma_set_anonymous(vma);
}
if (vma->vm_file)
@@ -444,9 +452,9 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
loff_t start = range->pgstart * PAGE_SIZE;
loff_t end = (range->pgend + 1) * PAGE_SIZE;
- vfs_fallocate(range->asma->file,
- FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
- start, end - start);
+ range->asma->file->f_op->fallocate(range->asma->file,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ start, end - start);
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
@@ -486,7 +494,7 @@ static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
mutex_lock(&ashmem_mutex);
/* the user can only remove, not add, protection bits */
- if (unlikely((asma->prot_mask & prot) != prot)) {
+ if ((asma->prot_mask & prot) != prot) {
ret = -EINVAL;
goto out;
}
@@ -524,7 +532,7 @@ static int set_name(struct ashmem_area *asma, void __user *name)
local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
- if (unlikely(asma->file))
+ if (asma->file)
ret = -EINVAL;
else
strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
@@ -563,7 +571,7 @@ static int get_name(struct ashmem_area *asma, void __user *name)
* Now we are just copying from the stack variable to userland
* No lock held
*/
- if (unlikely(copy_to_user(name, local_name, len)))
+ if (copy_to_user(name, local_name, len))
ret = -EFAULT;
return ret;
}
@@ -701,25 +709,25 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
size_t pgstart, pgend;
int ret = -EINVAL;
- if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
+ if (copy_from_user(&pin, p, sizeof(pin)))
return -EFAULT;
mutex_lock(&ashmem_mutex);
- if (unlikely(!asma->file))
+ if (!asma->file)
goto out_unlock;
/* per custom, you can pass zero for len to mean "everything onward" */
if (!pin.len)
pin.len = PAGE_ALIGN(asma->size) - pin.offset;
- if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
+ if ((pin.offset | pin.len) & ~PAGE_MASK)
goto out_unlock;
- if (unlikely(((__u32)-1) - pin.offset < pin.len))
+ if (((__u32)-1) - pin.offset < pin.len)
goto out_unlock;
- if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
+ if (PAGE_ALIGN(asma->size) < pin.offset + pin.len)
goto out_unlock;
pgstart = pin.offset / PAGE_SIZE;
@@ -856,7 +864,7 @@ static int __init ashmem_init(void)
ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
sizeof(struct ashmem_area),
0, 0, NULL);
- if (unlikely(!ashmem_area_cachep)) {
+ if (!ashmem_area_cachep) {
pr_err("failed to create slab cache\n");
goto out;
}
@@ -864,13 +872,13 @@ static int __init ashmem_init(void)
ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
sizeof(struct ashmem_range),
0, 0, NULL);
- if (unlikely(!ashmem_range_cachep)) {
+ if (!ashmem_range_cachep) {
pr_err("failed to create slab cache\n");
goto out_free1;
}
ret = misc_register(&ashmem_misc);
- if (unlikely(ret)) {
+ if (ret) {
pr_err("failed to register misc device!\n");
goto out_free2;
}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 9d1109e43ed4..99073325b0c0 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -201,7 +201,7 @@ struct ion_dma_buf_attachment {
struct list_head list;
};
-static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
+static int ion_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct ion_dma_buf_attachment *a;
@@ -219,7 +219,7 @@ static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
}
a->table = table;
- a->dev = dev;
+ a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
attachment->priv = a;
@@ -375,8 +375,6 @@ static const struct dma_buf_ops dma_buf_ops = {
.detach = ion_dma_buf_detatch,
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
.end_cpu_access = ion_dma_buf_end_cpu_access,
- .map_atomic = ion_dma_buf_kmap,
- .unmap_atomic = ion_dma_buf_kunmap,
.map = ion_dma_buf_kmap,
.unmap = ion_dma_buf_kunmap,
};
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 49718c96bf9e..3fafd013d80a 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -39,7 +39,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
- pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
+ pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
if (!pages)
return -ENOMEM;
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index e8c440329708..31db510018a9 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -30,7 +30,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
struct page **tmp = pages;
if (!pages)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
index 806beda1040b..22571abcaa4e 100644
--- a/drivers/staging/android/vsoc.c
+++ b/drivers/staging/android/vsoc.c
@@ -405,7 +405,7 @@ static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
int ret = 0;
struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
atomic_t *address = NULL;
- struct timespec ts;
+ ktime_t wake_time;
/* Ensure that the offset is aligned */
if (arg->offset & (sizeof(uint32_t) - 1))
@@ -433,14 +433,13 @@ static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
* We do things this way to flatten differences between 32 bit
* and 64 bit timespecs.
*/
- ts.tv_sec = arg->wake_time_sec;
- ts.tv_nsec = arg->wake_time_nsec;
-
- if (!timespec_valid(&ts))
+ if (arg->wake_time_nsec >= NSEC_PER_SEC)
return -EINVAL;
+ wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec);
+
hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
- hrtimer_set_expires_range_ns(&to->timer, timespec_to_ktime(ts),
+ hrtimer_set_expires_range_ns(&to->timer, wake_time,
current->timer_slack_ns);
hrtimer_init_sleeper(to, current);
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
new file mode 100644
index 000000000000..687537203d9c
--- /dev/null
+++ b/drivers/staging/axis-fifo/Kconfig
@@ -0,0 +1,9 @@
+#
+# "Xilinx AXI-Stream FIFO IP core driver"
+#
+config XIL_AXIS_FIFO
+ tristate "Xilinx AXI-Stream FIFO IP core driver"
+ default n
+ help
+ This adds support for the Xilinx AXI-Stream
+ FIFO IP core driver.
diff --git a/drivers/staging/axis-fifo/Makefile b/drivers/staging/axis-fifo/Makefile
new file mode 100644
index 000000000000..fe62cd1ac5de
--- /dev/null
+++ b/drivers/staging/axis-fifo/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b. b/drivers/staging/axis-fifo/README
index e69de29bb2d1..e69de29bb2d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.
+++ b/drivers/staging/axis-fifo/README
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
new file mode 100644
index 000000000000..abeee0ecc122
--- /dev/null
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -0,0 +1,1107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx AXIS FIFO: interface to the Xilinx AXI-Stream FIFO IP core
+ *
+ * Copyright (C) 2018 Jacob Feder
+ *
+ * Authors: Jacob Feder <jacobsfeder@gmail.com>
+ *
+ * See Xilinx PG080 document for IP details
+ */
+
+/* ----------------------------
+ * includes
+ * ----------------------------
+ */
+
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/spinlock_types.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/param.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/jiffies.h>
+
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+/* ----------------------------
+ * driver parameters
+ * ----------------------------
+ */
+
+#define DRIVER_NAME "axis_fifo"
+
+#define READ_BUF_SIZE 128U /* read buffer length in words */
+#define WRITE_BUF_SIZE 128U /* write buffer length in words */
+
+/* ----------------------------
+ * IP register offsets
+ * ----------------------------
+ */
+
+#define XLLF_ISR_OFFSET 0x00000000 /* Interrupt Status */
+#define XLLF_IER_OFFSET 0x00000004 /* Interrupt Enable */
+
+#define XLLF_TDFR_OFFSET 0x00000008 /* Transmit Reset */
+#define XLLF_TDFV_OFFSET 0x0000000c /* Transmit Vacancy */
+#define XLLF_TDFD_OFFSET 0x00000010 /* Transmit Data */
+#define XLLF_TLR_OFFSET 0x00000014 /* Transmit Length */
+
+#define XLLF_RDFR_OFFSET 0x00000018 /* Receive Reset */
+#define XLLF_RDFO_OFFSET 0x0000001c /* Receive Occupancy */
+#define XLLF_RDFD_OFFSET 0x00000020 /* Receive Data */
+#define XLLF_RLR_OFFSET 0x00000024 /* Receive Length */
+#define XLLF_SRR_OFFSET 0x00000028 /* Local Link Reset */
+#define XLLF_TDR_OFFSET 0x0000002C /* Transmit Destination */
+#define XLLF_RDR_OFFSET 0x00000030 /* Receive Destination */
+
+/* ----------------------------
+ * reset register masks
+ * ----------------------------
+ */
+
+#define XLLF_RDFR_RESET_MASK 0x000000a5 /* receive reset value */
+#define XLLF_TDFR_RESET_MASK 0x000000a5 /* Transmit reset value */
+#define XLLF_SRR_RESET_MASK 0x000000a5 /* Local Link reset value */
+
+/* ----------------------------
+ * interrupt masks
+ * ----------------------------
+ */
+
+#define XLLF_INT_RPURE_MASK 0x80000000 /* Receive under-read */
+#define XLLF_INT_RPORE_MASK 0x40000000 /* Receive over-read */
+#define XLLF_INT_RPUE_MASK 0x20000000 /* Receive underrun (empty) */
+#define XLLF_INT_TPOE_MASK 0x10000000 /* Transmit overrun */
+#define XLLF_INT_TC_MASK 0x08000000 /* Transmit complete */
+#define XLLF_INT_RC_MASK 0x04000000 /* Receive complete */
+#define XLLF_INT_TSE_MASK 0x02000000 /* Transmit length mismatch */
+#define XLLF_INT_TRC_MASK 0x01000000 /* Transmit reset complete */
+#define XLLF_INT_RRC_MASK 0x00800000 /* Receive reset complete */
+#define XLLF_INT_TFPF_MASK 0x00400000 /* Tx FIFO Programmable Full */
+#define XLLF_INT_TFPE_MASK 0x00200000 /* Tx FIFO Programmable Empty */
+#define XLLF_INT_RFPF_MASK 0x00100000 /* Rx FIFO Programmable Full */
+#define XLLF_INT_RFPE_MASK 0x00080000 /* Rx FIFO Programmable Empty */
+#define XLLF_INT_ALL_MASK 0xfff80000 /* All the ints */
+#define XLLF_INT_ERROR_MASK 0xf2000000 /* Error status ints */
+#define XLLF_INT_RXERROR_MASK 0xe0000000 /* Receive Error status ints */
+#define XLLF_INT_TXERROR_MASK 0x12000000 /* Transmit Error status ints */
+
+/* ----------------------------
+ * globals
+ * ----------------------------
+ */
+
+static struct class *axis_fifo_driver_class; /* char device class */
+
+static int read_timeout = 1000; /* ms to wait before read() times out */
+static int write_timeout = 1000; /* ms to wait before write() times out */
+
+/* ----------------------------
+ * module command-line arguments
+ * ----------------------------
+ */
+
+module_param(read_timeout, int, 0444);
+MODULE_PARM_DESC(read_timeout, "ms to wait before blocking read() timing out; set to -1 for no timeout");
+module_param(write_timeout, int, 0444);
+MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out; set to -1 for no timeout");
+
+/* ----------------------------
+ * types
+ * ----------------------------
+ */
+
+struct axis_fifo {
+ int irq; /* interrupt */
+ struct resource *mem; /* physical memory */
+ void __iomem *base_addr; /* kernel space memory */
+
+ unsigned int rx_fifo_depth; /* max words in the receive fifo */
+ unsigned int tx_fifo_depth; /* max words in the transmit fifo */
+ int has_rx_fifo; /* whether the IP has the rx fifo enabled */
+ int has_tx_fifo; /* whether the IP has the tx fifo enabled */
+
+ wait_queue_head_t read_queue; /* wait queue for asynchronos read */
+ spinlock_t read_queue_lock; /* lock for reading waitqueue */
+ wait_queue_head_t write_queue; /* wait queue for asynchronos write */
+ spinlock_t write_queue_lock; /* lock for writing waitqueue */
+ unsigned int write_flags; /* write file flags */
+ unsigned int read_flags; /* read file flags */
+
+ struct device *dt_device; /* device created from the device tree */
+ struct device *device; /* device associated with char_device */
+ dev_t devt; /* our char device number */
+ struct cdev char_device; /* our char device */
+};
+
+/* ----------------------------
+ * sysfs entries
+ * ----------------------------
+ */
+
+static ssize_t sysfs_write(struct device *dev, const char *buf,
+ size_t count, unsigned int addr_offset)
+{
+ struct axis_fifo *fifo = dev_get_drvdata(dev);
+ unsigned long tmp;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &tmp);
+ if (rc < 0)
+ return rc;
+
+ iowrite32(tmp, fifo->base_addr + addr_offset);
+
+ return count;
+}
+
+static ssize_t sysfs_read(struct device *dev, char *buf,
+ unsigned int addr_offset)
+{
+ struct axis_fifo *fifo = dev_get_drvdata(dev);
+ unsigned int read_val;
+ unsigned int len;
+ char tmp[32];
+
+ read_val = ioread32(fifo->base_addr + addr_offset);
+ len = snprintf(tmp, sizeof(tmp), "0x%x\n", read_val);
+ memcpy(buf, tmp, len);
+
+ return len;
+}
+
+static ssize_t isr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_write(dev, buf, count, XLLF_ISR_OFFSET);
+}
+
+static ssize_t isr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_read(dev, buf, XLLF_ISR_OFFSET);
+}
+
+static DEVICE_ATTR_RW(isr);
+
+static ssize_t ier_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_write(dev, buf, count, XLLF_IER_OFFSET);
+}
+
+static ssize_t ier_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_read(dev, buf, XLLF_IER_OFFSET);
+}
+
+static DEVICE_ATTR_RW(ier);
+
+static ssize_t tdfr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_write(dev, buf, count, XLLF_TDFR_OFFSET);
+}
+
+static DEVICE_ATTR_WO(tdfr);
+
+static ssize_t tdfv_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_read(dev, buf, XLLF_TDFV_OFFSET);
+}
+
+static DEVICE_ATTR_RO(tdfv);
+
+static ssize_t tdfd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_write(dev, buf, count, XLLF_TDFD_OFFSET);
+}
+
+static DEVICE_ATTR_WO(tdfd);
+
+static ssize_t tlr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_write(dev, buf, count, XLLF_TLR_OFFSET);
+}
+
+static DEVICE_ATTR_WO(tlr);
+
+static ssize_t rdfr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_write(dev, buf, count, XLLF_RDFR_OFFSET);
+}
+
+static DEVICE_ATTR_WO(rdfr);
+
+static ssize_t rdfo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_read(dev, buf, XLLF_RDFO_OFFSET);
+}
+
+static DEVICE_ATTR_RO(rdfo);
+
+static ssize_t rdfd_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_read(dev, buf, XLLF_RDFD_OFFSET);
+}
+
+static DEVICE_ATTR_RO(rdfd);
+
+static ssize_t rlr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_read(dev, buf, XLLF_RLR_OFFSET);
+}
+
+static DEVICE_ATTR_RO(rlr);
+
+static ssize_t srr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_write(dev, buf, count, XLLF_SRR_OFFSET);
+}
+
+static DEVICE_ATTR_WO(srr);
+
+static ssize_t tdr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_write(dev, buf, count, XLLF_TDR_OFFSET);
+}
+
+static DEVICE_ATTR_WO(tdr);
+
+static ssize_t rdr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_read(dev, buf, XLLF_RDR_OFFSET);
+}
+
+static DEVICE_ATTR_RO(rdr);
+
+static struct attribute *axis_fifo_attrs[] = {
+ &dev_attr_isr.attr,
+ &dev_attr_ier.attr,
+ &dev_attr_tdfr.attr,
+ &dev_attr_tdfv.attr,
+ &dev_attr_tdfd.attr,
+ &dev_attr_tlr.attr,
+ &dev_attr_rdfr.attr,
+ &dev_attr_rdfo.attr,
+ &dev_attr_rdfd.attr,
+ &dev_attr_rlr.attr,
+ &dev_attr_srr.attr,
+ &dev_attr_tdr.attr,
+ &dev_attr_rdr.attr,
+ NULL,
+};
+
+static const struct attribute_group axis_fifo_attrs_group = {
+ .name = "ip_registers",
+ .attrs = axis_fifo_attrs,
+};
+
+/* ----------------------------
+ * implementation
+ * ----------------------------
+ */
+
+static void reset_ip_core(struct axis_fifo *fifo)
+{
+ iowrite32(XLLF_SRR_RESET_MASK, fifo->base_addr + XLLF_SRR_OFFSET);
+ iowrite32(XLLF_TDFR_RESET_MASK, fifo->base_addr + XLLF_TDFR_OFFSET);
+ iowrite32(XLLF_RDFR_RESET_MASK, fifo->base_addr + XLLF_RDFR_OFFSET);
+ iowrite32(XLLF_INT_TC_MASK | XLLF_INT_RC_MASK | XLLF_INT_RPURE_MASK |
+ XLLF_INT_RPORE_MASK | XLLF_INT_RPUE_MASK |
+ XLLF_INT_TPOE_MASK | XLLF_INT_TSE_MASK,
+ fifo->base_addr + XLLF_IER_OFFSET);
+ iowrite32(XLLF_INT_ALL_MASK, fifo->base_addr + XLLF_ISR_OFFSET);
+}
+
+/* reads a single packet from the fifo as dictated by the tlast signal */
+static ssize_t axis_fifo_read(struct file *f, char __user *buf,
+ size_t len, loff_t *off)
+{
+ struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
+ size_t bytes_available;
+ unsigned int words_available;
+ unsigned int copied;
+ unsigned int copy;
+ unsigned int i;
+ int ret;
+ u32 tmp_buf[READ_BUF_SIZE];
+
+ if (fifo->read_flags & O_NONBLOCK) {
+ /* opened in non-blocking mode
+ * return if there are no packets available
+ */
+ if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET))
+ return -EAGAIN;
+ } else {
+ /* opened in blocking mode
+ * wait for a packet available interrupt (or timeout)
+ * if nothing is currently available
+ */
+ spin_lock_irq(&fifo->read_queue_lock);
+ ret = wait_event_interruptible_lock_irq_timeout(
+ fifo->read_queue,
+ ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
+ fifo->read_queue_lock,
+ (read_timeout >= 0) ? msecs_to_jiffies(read_timeout) :
+ MAX_SCHEDULE_TIMEOUT);
+ spin_unlock_irq(&fifo->read_queue_lock);
+
+ if (ret == 0) {
+ /* timeout occurred */
+ dev_dbg(fifo->dt_device, "read timeout");
+ return -EAGAIN;
+ } else if (ret == -ERESTARTSYS) {
+ /* signal received */
+ return -ERESTARTSYS;
+ } else if (ret < 0) {
+ dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET);
+ if (!bytes_available) {
+ dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n");
+ reset_ip_core(fifo);
+ return -EIO;
+ }
+
+ if (bytes_available > len) {
+ dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n",
+ bytes_available, len);
+ reset_ip_core(fifo);
+ return -EINVAL;
+ }
+
+ if (bytes_available % sizeof(u32)) {
+ /* this probably can't happen unless IP
+ * registers were previously mishandled
+ */
+ dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n");
+ reset_ip_core(fifo);
+ return -EIO;
+ }
+
+ words_available = bytes_available / sizeof(u32);
+
+ /* read data into an intermediate buffer, copying the contents
+ * to userspace when the buffer is full
+ */
+ copied = 0;
+ while (words_available > 0) {
+ copy = min(words_available, READ_BUF_SIZE);
+
+ for (i = 0; i < copy; i++) {
+ tmp_buf[i] = ioread32(fifo->base_addr +
+ XLLF_RDFD_OFFSET);
+ }
+
+ if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
+ copy * sizeof(u32))) {
+ reset_ip_core(fifo);
+ return -EFAULT;
+ }
+
+ copied += copy;
+ words_available -= copy;
+ }
+
+ return bytes_available;
+}
+
+static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
+ size_t len, loff_t *off)
+{
+ struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
+ unsigned int words_to_write;
+ unsigned int copied;
+ unsigned int copy;
+ unsigned int i;
+ int ret;
+ u32 tmp_buf[WRITE_BUF_SIZE];
+
+ if (len % sizeof(u32)) {
+ dev_err(fifo->dt_device,
+ "tried to send a packet that isn't word-aligned\n");
+ return -EINVAL;
+ }
+
+ words_to_write = len / sizeof(u32);
+
+ if (!words_to_write) {
+ dev_err(fifo->dt_device,
+ "tried to send a packet of length 0\n");
+ return -EINVAL;
+ }
+
+ if (words_to_write > fifo->tx_fifo_depth) {
+ dev_err(fifo->dt_device, "tried to write more words [%u] than slots in the fifo buffer [%u]\n",
+ words_to_write, fifo->tx_fifo_depth);
+ return -EINVAL;
+ }
+
+ if (fifo->write_flags & O_NONBLOCK) {
+ /* opened in non-blocking mode
+ * return if there is not enough room available in the fifo
+ */
+ if (words_to_write > ioread32(fifo->base_addr +
+ XLLF_TDFV_OFFSET)) {
+ return -EAGAIN;
+ }
+ } else {
+ /* opened in blocking mode */
+
+ /* wait for an interrupt (or timeout) if there isn't
+ * currently enough room in the fifo
+ */
+ spin_lock_irq(&fifo->write_queue_lock);
+ ret = wait_event_interruptible_lock_irq_timeout(
+ fifo->write_queue,
+ ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
+ >= words_to_write,
+ fifo->write_queue_lock,
+ (write_timeout >= 0) ? msecs_to_jiffies(write_timeout) :
+ MAX_SCHEDULE_TIMEOUT);
+ spin_unlock_irq(&fifo->write_queue_lock);
+
+ if (ret == 0) {
+ /* timeout occurred */
+ dev_dbg(fifo->dt_device, "write timeout\n");
+ return -EAGAIN;
+ } else if (ret == -ERESTARTSYS) {
+ /* signal received */
+ return -ERESTARTSYS;
+ } else if (ret < 0) {
+ /* unknown error */
+ dev_err(fifo->dt_device,
+ "wait_event_interruptible_timeout() error in write (ret=%i)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ /* write data from an intermediate buffer into the fifo IP, refilling
+ * the buffer with userspace data as needed
+ */
+ copied = 0;
+ while (words_to_write > 0) {
+ copy = min(words_to_write, WRITE_BUF_SIZE);
+
+ if (copy_from_user(tmp_buf, buf + copied * sizeof(u32),
+ copy * sizeof(u32))) {
+ reset_ip_core(fifo);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < copy; i++)
+ iowrite32(tmp_buf[i], fifo->base_addr +
+ XLLF_TDFD_OFFSET);
+
+ copied += copy;
+ words_to_write -= copy;
+ }
+
+ /* write packet size to fifo */
+ iowrite32(copied * sizeof(u32), fifo->base_addr + XLLF_TLR_OFFSET);
+
+ return (ssize_t)copied * sizeof(u32);
+}
+
+static irqreturn_t axis_fifo_irq(int irq, void *dw)
+{
+ struct axis_fifo *fifo = (struct axis_fifo *)dw;
+ unsigned int pending_interrupts;
+
+ do {
+ pending_interrupts = ioread32(fifo->base_addr +
+ XLLF_IER_OFFSET) &
+ ioread32(fifo->base_addr
+ + XLLF_ISR_OFFSET);
+ if (pending_interrupts & XLLF_INT_RC_MASK) {
+ /* packet received */
+
+ /* wake the reader process if it is waiting */
+ wake_up(&fifo->read_queue);
+
+ /* clear interrupt */
+ iowrite32(XLLF_INT_RC_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts & XLLF_INT_TC_MASK) {
+ /* packet sent */
+
+ /* wake the writer process if it is waiting */
+ wake_up(&fifo->write_queue);
+
+ iowrite32(XLLF_INT_TC_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts & XLLF_INT_TFPF_MASK) {
+ /* transmit fifo programmable full */
+
+ iowrite32(XLLF_INT_TFPF_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts & XLLF_INT_TFPE_MASK) {
+ /* transmit fifo programmable empty */
+
+ iowrite32(XLLF_INT_TFPE_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts & XLLF_INT_RFPF_MASK) {
+ /* receive fifo programmable full */
+
+ iowrite32(XLLF_INT_RFPF_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts & XLLF_INT_RFPE_MASK) {
+ /* receive fifo programmable empty */
+
+ iowrite32(XLLF_INT_RFPE_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts & XLLF_INT_TRC_MASK) {
+ /* transmit reset complete interrupt */
+
+ iowrite32(XLLF_INT_TRC_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts & XLLF_INT_RRC_MASK) {
+ /* receive reset complete interrupt */
+
+ iowrite32(XLLF_INT_RRC_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts & XLLF_INT_RPURE_MASK) {
+ /* receive fifo under-read error interrupt */
+ dev_err(fifo->dt_device,
+ "receive under-read interrupt\n");
+
+ iowrite32(XLLF_INT_RPURE_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts & XLLF_INT_RPORE_MASK) {
+ /* receive over-read error interrupt */
+ dev_err(fifo->dt_device,
+ "receive over-read interrupt\n");
+
+ iowrite32(XLLF_INT_RPORE_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts & XLLF_INT_RPUE_MASK) {
+ /* receive underrun error interrupt */
+ dev_err(fifo->dt_device,
+ "receive underrun error interrupt\n");
+
+ iowrite32(XLLF_INT_RPUE_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts & XLLF_INT_TPOE_MASK) {
+ /* transmit overrun error interrupt */
+ dev_err(fifo->dt_device,
+ "transmit overrun error interrupt\n");
+
+ iowrite32(XLLF_INT_TPOE_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts & XLLF_INT_TSE_MASK) {
+ /* transmit length mismatch error interrupt */
+ dev_err(fifo->dt_device,
+ "transmit length mismatch error interrupt\n");
+
+ iowrite32(XLLF_INT_TSE_MASK & XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ } else if (pending_interrupts) {
+ /* unknown interrupt type */
+ dev_err(fifo->dt_device,
+ "unknown interrupt(s) 0x%x\n",
+ pending_interrupts);
+
+ iowrite32(XLLF_INT_ALL_MASK,
+ fifo->base_addr + XLLF_ISR_OFFSET);
+ }
+ } while (pending_interrupts);
+
+ return IRQ_HANDLED;
+}
+
+static int axis_fifo_open(struct inode *inod, struct file *f)
+{
+ struct axis_fifo *fifo = (struct axis_fifo *)container_of(inod->i_cdev,
+ struct axis_fifo, char_device);
+ f->private_data = fifo;
+
+ if (((f->f_flags & O_ACCMODE) == O_WRONLY) ||
+ ((f->f_flags & O_ACCMODE) == O_RDWR)) {
+ if (fifo->has_tx_fifo) {
+ fifo->write_flags = f->f_flags;
+ } else {
+ dev_err(fifo->dt_device, "tried to open device for write but the transmit fifo is disabled\n");
+ return -EPERM;
+ }
+ }
+
+ if (((f->f_flags & O_ACCMODE) == O_RDONLY) ||
+ ((f->f_flags & O_ACCMODE) == O_RDWR)) {
+ if (fifo->has_rx_fifo) {
+ fifo->read_flags = f->f_flags;
+ } else {
+ dev_err(fifo->dt_device, "tried to open device for read but the receive fifo is disabled\n");
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static int axis_fifo_close(struct inode *inod, struct file *f)
+{
+ f->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = axis_fifo_open,
+ .release = axis_fifo_close,
+ .read = axis_fifo_read,
+ .write = axis_fifo_write
+};
+
+/* read named property from the device tree */
+static int get_dts_property(struct axis_fifo *fifo,
+ char *name, unsigned int *var)
+{
+ int rc;
+
+ rc = of_property_read_u32(fifo->dt_device->of_node, name, var);
+ if (rc) {
+ dev_err(fifo->dt_device, "couldn't read IP dts property '%s'",
+ name);
+ return rc;
+ }
+ dev_dbg(fifo->dt_device, "dts property '%s' = %u\n",
+ name, *var);
+
+ return 0;
+}
+
+static int axis_fifo_probe(struct platform_device *pdev)
+{
+ struct resource *r_irq; /* interrupt resources */
+ struct resource *r_mem; /* IO mem resources */
+ struct device *dev = &pdev->dev; /* OS device (from device tree) */
+ struct axis_fifo *fifo = NULL;
+
+ char device_name[32];
+
+ int rc = 0; /* error return value */
+
+ /* IP properties from device tree */
+ unsigned int rxd_tdata_width;
+ unsigned int txc_tdata_width;
+ unsigned int txd_tdata_width;
+ unsigned int tdest_width;
+ unsigned int tid_width;
+ unsigned int tuser_width;
+ unsigned int data_interface_type;
+ unsigned int has_tdest;
+ unsigned int has_tid;
+ unsigned int has_tkeep;
+ unsigned int has_tstrb;
+ unsigned int has_tuser;
+ unsigned int rx_fifo_depth;
+ unsigned int rx_programmable_empty_threshold;
+ unsigned int rx_programmable_full_threshold;
+ unsigned int axi_id_width;
+ unsigned int axi4_data_width;
+ unsigned int select_xpm;
+ unsigned int tx_fifo_depth;
+ unsigned int tx_programmable_empty_threshold;
+ unsigned int tx_programmable_full_threshold;
+ unsigned int use_rx_cut_through;
+ unsigned int use_rx_data;
+ unsigned int use_tx_control;
+ unsigned int use_tx_cut_through;
+ unsigned int use_tx_data;
+
+ /* ----------------------------
+ * init wrapper device
+ * ----------------------------
+ */
+
+ /* allocate device wrapper memory */
+ fifo = devm_kmalloc(dev, sizeof(*fifo), GFP_KERNEL);
+ if (!fifo)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, fifo);
+ fifo->dt_device = dev;
+
+ init_waitqueue_head(&fifo->read_queue);
+ init_waitqueue_head(&fifo->write_queue);
+
+ spin_lock_init(&fifo->read_queue_lock);
+ spin_lock_init(&fifo->write_queue_lock);
+
+ /* ----------------------------
+ * init device memory space
+ * ----------------------------
+ */
+
+ /* get iospace for the device */
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r_mem) {
+ dev_err(fifo->dt_device, "invalid address\n");
+ rc = -ENODEV;
+ goto err_initial;
+ }
+
+ fifo->mem = r_mem;
+
+ /* request physical memory */
+ if (!request_mem_region(fifo->mem->start, resource_size(fifo->mem),
+ DRIVER_NAME)) {
+ dev_err(fifo->dt_device,
+ "couldn't lock memory region at 0x%pa\n",
+ &fifo->mem->start);
+ rc = -EBUSY;
+ goto err_initial;
+ }
+ dev_dbg(fifo->dt_device, "got memory location [0x%pa - 0x%pa]\n",
+ &fifo->mem->start, &fifo->mem->end);
+
+ /* map physical memory to kernel virtual address space */
+ fifo->base_addr = ioremap(fifo->mem->start, resource_size(fifo->mem));
+ if (!fifo->base_addr) {
+ dev_err(fifo->dt_device, "couldn't map physical memory\n");
+ rc = -ENOMEM;
+ goto err_mem;
+ }
+ dev_dbg(fifo->dt_device, "remapped memory to 0x%p\n", fifo->base_addr);
+
+ /* create unique device name */
+ snprintf(device_name, sizeof(device_name), "%s_%pa",
+ DRIVER_NAME, &fifo->mem->start);
+
+ dev_dbg(fifo->dt_device, "device name [%s]\n", device_name);
+
+ /* ----------------------------
+ * init IP
+ * ----------------------------
+ */
+
+ /* retrieve device tree properties */
+ rc = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width",
+ &rxd_tdata_width);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,axi-str-txc-tdata-width",
+ &txc_tdata_width);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width",
+ &txd_tdata_width);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,axis-tdest-width", &tdest_width);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,axis-tid-width", &tid_width);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,axis-tuser-width", &tuser_width);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,data-interface-type",
+ &data_interface_type);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,has-axis-tdest", &has_tdest);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,has-axis-tid", &has_tid);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,has-axis-tkeep", &has_tkeep);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,has-axis-tstrb", &has_tstrb);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,has-axis-tuser", &has_tuser);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,rx-fifo-depth", &rx_fifo_depth);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,rx-fifo-pe-threshold",
+ &rx_programmable_empty_threshold);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,rx-fifo-pf-threshold",
+ &rx_programmable_full_threshold);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,s-axi-id-width", &axi_id_width);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,s-axi4-data-width", &axi4_data_width);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,select-xpm", &select_xpm);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,tx-fifo-depth", &tx_fifo_depth);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,tx-fifo-pe-threshold",
+ &tx_programmable_empty_threshold);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,tx-fifo-pf-threshold",
+ &tx_programmable_full_threshold);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,use-rx-cut-through",
+ &use_rx_cut_through);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,use-rx-data", &use_rx_data);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,use-tx-ctrl", &use_tx_control);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,use-tx-cut-through",
+ &use_tx_cut_through);
+ if (rc)
+ goto err_unmap;
+ rc = get_dts_property(fifo, "xlnx,use-tx-data", &use_tx_data);
+ if (rc)
+ goto err_unmap;
+
+ /* check validity of device tree properties */
+ if (rxd_tdata_width != 32) {
+ dev_err(fifo->dt_device,
+ "rxd_tdata_width width [%u] unsupported\n",
+ rxd_tdata_width);
+ rc = -EIO;
+ goto err_unmap;
+ }
+ if (txd_tdata_width != 32) {
+ dev_err(fifo->dt_device,
+ "txd_tdata_width width [%u] unsupported\n",
+ txd_tdata_width);
+ rc = -EIO;
+ goto err_unmap;
+ }
+ if (has_tdest) {
+ dev_err(fifo->dt_device, "tdest not supported\n");
+ rc = -EIO;
+ goto err_unmap;
+ }
+ if (has_tid) {
+ dev_err(fifo->dt_device, "tid not supported\n");
+ rc = -EIO;
+ goto err_unmap;
+ }
+ if (has_tkeep) {
+ dev_err(fifo->dt_device, "tkeep not supported\n");
+ rc = -EIO;
+ goto err_unmap;
+ }
+ if (has_tstrb) {
+ dev_err(fifo->dt_device, "tstrb not supported\n");
+ rc = -EIO;
+ goto err_unmap;
+ }
+ if (has_tuser) {
+ dev_err(fifo->dt_device, "tuser not supported\n");
+ rc = -EIO;
+ goto err_unmap;
+ }
+ if (use_rx_cut_through) {
+ dev_err(fifo->dt_device, "rx cut-through not supported\n");
+ rc = -EIO;
+ goto err_unmap;
+ }
+ if (use_tx_cut_through) {
+ dev_err(fifo->dt_device, "tx cut-through not supported\n");
+ rc = -EIO;
+ goto err_unmap;
+ }
+ if (use_tx_control) {
+ dev_err(fifo->dt_device, "tx control not supported\n");
+ rc = -EIO;
+ goto err_unmap;
+ }
+
+ /* TODO
+ * these exist in the device tree but it's unclear what they do
+ * - select-xpm
+ * - data-interface-type
+ */
+
+ /* set device wrapper properties based on IP config */
+ fifo->rx_fifo_depth = rx_fifo_depth;
+ /* IP sets TDFV to fifo depth - 4 so we will do the same */
+ fifo->tx_fifo_depth = tx_fifo_depth - 4;
+ fifo->has_rx_fifo = use_rx_data;
+ fifo->has_tx_fifo = use_tx_data;
+
+ reset_ip_core(fifo);
+
+ /* ----------------------------
+ * init device interrupts
+ * ----------------------------
+ */
+
+ /* get IRQ resource */
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r_irq) {
+ dev_err(fifo->dt_device, "no IRQ found for 0x%pa\n",
+ &fifo->mem->start);
+ rc = -EIO;
+ goto err_unmap;
+ }
+
+ /* request IRQ */
+ fifo->irq = r_irq->start;
+ rc = request_irq(fifo->irq, &axis_fifo_irq, 0, DRIVER_NAME, fifo);
+ if (rc) {
+ dev_err(fifo->dt_device, "couldn't allocate interrupt %i\n",
+ fifo->irq);
+ goto err_unmap;
+ }
+
+ /* ----------------------------
+ * init char device
+ * ----------------------------
+ */
+
+ /* allocate device number */
+ rc = alloc_chrdev_region(&fifo->devt, 0, 1, DRIVER_NAME);
+ if (rc < 0)
+ goto err_irq;
+ dev_dbg(fifo->dt_device, "allocated device number major %i minor %i\n",
+ MAJOR(fifo->devt), MINOR(fifo->devt));
+
+ /* create driver file */
+ fifo->device = device_create(axis_fifo_driver_class, NULL, fifo->devt,
+ NULL, device_name);
+ if (IS_ERR(fifo->device)) {
+ dev_err(fifo->dt_device,
+ "couldn't create driver file\n");
+ rc = PTR_ERR(fifo->device);
+ goto err_chrdev_region;
+ }
+ dev_set_drvdata(fifo->device, fifo);
+
+ /* create character device */
+ cdev_init(&fifo->char_device, &fops);
+ rc = cdev_add(&fifo->char_device, fifo->devt, 1);
+ if (rc < 0) {
+ dev_err(fifo->dt_device, "couldn't create character device\n");
+ goto err_dev;
+ }
+
+ /* create sysfs entries */
+ rc = sysfs_create_group(&fifo->device->kobj, &axis_fifo_attrs_group);
+ if (rc < 0) {
+ dev_err(fifo->dt_device, "couldn't register sysfs group\n");
+ goto err_cdev;
+ }
+
+ dev_info(fifo->dt_device, "axis-fifo created at %pa mapped to 0x%pa, irq=%i, major=%i, minor=%i\n",
+ &fifo->mem->start, &fifo->base_addr, fifo->irq,
+ MAJOR(fifo->devt), MINOR(fifo->devt));
+
+ return 0;
+
+err_cdev:
+ cdev_del(&fifo->char_device);
+err_dev:
+ device_destroy(axis_fifo_driver_class, fifo->devt);
+err_chrdev_region:
+ unregister_chrdev_region(fifo->devt, 1);
+err_irq:
+ free_irq(fifo->irq, fifo);
+err_unmap:
+ iounmap(fifo->base_addr);
+err_mem:
+ release_mem_region(fifo->mem->start, resource_size(fifo->mem));
+err_initial:
+ dev_set_drvdata(dev, NULL);
+ return rc;
+}
+
+static int axis_fifo_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct axis_fifo *fifo = dev_get_drvdata(dev);
+
+ sysfs_remove_group(&fifo->device->kobj, &axis_fifo_attrs_group);
+ cdev_del(&fifo->char_device);
+ dev_set_drvdata(fifo->device, NULL);
+ device_destroy(axis_fifo_driver_class, fifo->devt);
+ unregister_chrdev_region(fifo->devt, 1);
+ free_irq(fifo->irq, fifo);
+ iounmap(fifo->base_addr);
+ release_mem_region(fifo->mem->start, resource_size(fifo->mem));
+ dev_set_drvdata(dev, NULL);
+ return 0;
+}
+
+static const struct of_device_id axis_fifo_of_match[] = {
+ { .compatible = "xlnx,axi-fifo-mm-s-4.1", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, axis_fifo_of_match);
+
+static struct platform_driver axis_fifo_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = axis_fifo_of_match,
+ },
+ .probe = axis_fifo_probe,
+ .remove = axis_fifo_remove,
+};
+
+static int __init axis_fifo_init(void)
+{
+ pr_info("axis-fifo driver loaded with parameters read_timeout = %i, write_timeout = %i\n",
+ read_timeout, write_timeout);
+ axis_fifo_driver_class = class_create(THIS_MODULE, DRIVER_NAME);
+ return platform_driver_register(&axis_fifo_driver);
+}
+
+module_init(axis_fifo_init);
+
+static void __exit axis_fifo_exit(void)
+{
+ platform_driver_unregister(&axis_fifo_driver);
+ class_destroy(axis_fifo_driver_class);
+}
+
+module_exit(axis_fifo_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jacob Feder <jacobsfeder@gmail.com>");
+MODULE_DESCRIPTION("Xilinx AXI-Stream FIFO v4.1 IP core driver");
diff --git a/drivers/staging/axis-fifo/axis-fifo.txt b/drivers/staging/axis-fifo/axis-fifo.txt
new file mode 100644
index 000000000000..85d88c010e72
--- /dev/null
+++ b/drivers/staging/axis-fifo/axis-fifo.txt
@@ -0,0 +1,89 @@
+Xilinx AXI-Stream FIFO v4.1 IP core
+
+This IP core has read and write AXI-Stream FIFOs, the contents of which can
+be accessed from the AXI4 memory-mapped interface. This is useful for
+transferring data from a processor into the FPGA fabric. The driver creates
+a character device that can be read/written to with standard
+open/read/write/close.
+
+See Xilinx PG080 document for IP details.
+
+Currently supports only store-forward mode with a 32-bit
+AXI4-Lite interface. DOES NOT support:
+ - cut-through mode
+ - AXI4 (non-lite)
+
+Required properties:
+- compatible: Should be "xlnx,axi-fifo-mm-s-4.1"
+- interrupt-names: Should be "interrupt"
+- interrupt-parent: Should be <&intc>
+- interrupts: Should contain interrupts lines.
+- reg: Should contain registers location and length.
+- xlnx,axi-str-rxd-protocol: Should be "XIL_AXI_STREAM_ETH_DATA"
+- xlnx,axi-str-rxd-tdata-width: Should be <0x20>
+- xlnx,axi-str-txc-protocol: Should be "XIL_AXI_STREAM_ETH_CTRL"
+- xlnx,axi-str-txc-tdata-width: Should be <0x20>
+- xlnx,axi-str-txd-protocol: Should be "XIL_AXI_STREAM_ETH_DATA"
+- xlnx,axi-str-txd-tdata-width: Should be <0x20>
+- xlnx,axis-tdest-width: AXI-Stream TDEST width
+- xlnx,axis-tid-width: AXI-Stream TID width
+- xlnx,axis-tuser-width: AXI-Stream TUSER width
+- xlnx,data-interface-type: Should be <0x0>
+- xlnx,has-axis-tdest: Should be <0x0> (this feature isn't supported)
+- xlnx,has-axis-tid: Should be <0x0> (this feature isn't supported)
+- xlnx,has-axis-tkeep: Should be <0x0> (this feature isn't supported)
+- xlnx,has-axis-tstrb: Should be <0x0> (this feature isn't supported)
+- xlnx,has-axis-tuser: Should be <0x0> (this feature isn't supported)
+- xlnx,rx-fifo-depth: Depth of RX FIFO in words
+- xlnx,rx-fifo-pe-threshold: RX programmable empty interrupt threshold
+- xlnx,rx-fifo-pf-threshold: RX programmable full interrupt threshold
+- xlnx,s-axi-id-width: Should be <0x4>
+- xlnx,s-axi4-data-width: Should be <0x20>
+- xlnx,select-xpm: Should be <0x0>
+- xlnx,tx-fifo-depth: Depth of TX FIFO in words
+- xlnx,tx-fifo-pe-threshold: TX programmable empty interrupt threshold
+- xlnx,tx-fifo-pf-threshold: TX programmable full interrupt threshold
+- xlnx,use-rx-cut-through: Should be <0x0> (this feature isn't supported)
+- xlnx,use-rx-data: <0x1> if RX FIFO is enabled, <0x0> otherwise
+- xlnx,use-tx-ctrl: Should be <0x0> (this feature isn't supported)
+- xlnx,use-tx-cut-through: Should be <0x0> (this feature isn't supported)
+- xlnx,use-tx-data: <0x1> if TX FIFO is enabled, <0x0> otherwise
+
+Example:
+
+axi_fifo_mm_s_0: axi_fifo_mm_s@43c00000 {
+ compatible = "xlnx,axi-fifo-mm-s-4.1";
+ interrupt-names = "interrupt";
+ interrupt-parent = <&intc>;
+ interrupts = <0 29 4>;
+ reg = <0x43c00000 0x10000>;
+ xlnx,axi-str-rxd-protocol = "XIL_AXI_STREAM_ETH_DATA";
+ xlnx,axi-str-rxd-tdata-width = <0x20>;
+ xlnx,axi-str-txc-protocol = "XIL_AXI_STREAM_ETH_CTRL";
+ xlnx,axi-str-txc-tdata-width = <0x20>;
+ xlnx,axi-str-txd-protocol = "XIL_AXI_STREAM_ETH_DATA";
+ xlnx,axi-str-txd-tdata-width = <0x20>;
+ xlnx,axis-tdest-width = <0x4>;
+ xlnx,axis-tid-width = <0x4>;
+ xlnx,axis-tuser-width = <0x4>;
+ xlnx,data-interface-type = <0x0>;
+ xlnx,has-axis-tdest = <0x0>;
+ xlnx,has-axis-tid = <0x0>;
+ xlnx,has-axis-tkeep = <0x0>;
+ xlnx,has-axis-tstrb = <0x0>;
+ xlnx,has-axis-tuser = <0x0>;
+ xlnx,rx-fifo-depth = <0x200>;
+ xlnx,rx-fifo-pe-threshold = <0x2>;
+ xlnx,rx-fifo-pf-threshold = <0x1fb>;
+ xlnx,s-axi-id-width = <0x4>;
+ xlnx,s-axi4-data-width = <0x20>;
+ xlnx,select-xpm = <0x0>;
+ xlnx,tx-fifo-depth = <0x8000>;
+ xlnx,tx-fifo-pe-threshold = <0x200>;
+ xlnx,tx-fifo-pf-threshold = <0x7ffb>;
+ xlnx,use-rx-cut-through = <0x0>;
+ xlnx,use-rx-data = <0x0>;
+ xlnx,use-tx-ctrl = <0x0>;
+ xlnx,use-tx-cut-through = <0x0>;
+ xlnx,use-tx-data = <0x1>;
+};
diff --git a/drivers/staging/clocking-wizard/Kconfig b/drivers/staging/clocking-wizard/Kconfig
index 357af02c562c..aa57a5865556 100644
--- a/drivers/staging/clocking-wizard/Kconfig
+++ b/drivers/staging/clocking-wizard/Kconfig
@@ -5,5 +5,5 @@
config COMMON_CLK_XLNX_CLKWZRD
tristate "Xilinx Clocking Wizard"
depends on COMMON_CLK && OF
- ---help---
+ help
Support for the Xilinx Clocking Wizard IP core clock generator.
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 4218fc0e17f1..583bce9bb18e 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -75,14 +75,6 @@ config COMEDI_PARPORT
To compile this driver as a module, choose M here: the module will be
called comedi_parport.
-config COMEDI_SERIAL2002
- tristate "Driver for serial connected hardware"
- ---help---
- Enable support for serial connected hardware
-
- To compile this driver as a module, choose M here: the module will be
- called serial2002.
-
config COMEDI_SSV_DNP
tristate "SSV Embedded Systems DIL/Net-PC support"
depends on X86_32 || COMPILE_TEST
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index c0bc413f7fe0..bb961ac79b7e 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: LGPL-2.0+
+/* SPDX-License-Identifier: LGPL-2.0+ */
/*
* comedi.h
* header file for COMEDI user API
diff --git a/drivers/staging/comedi/comedi_compat32.h b/drivers/staging/comedi/comedi_compat32.h
index 3980e6e1bd0d..dc3e2a9442c7 100644
--- a/drivers/staging/comedi/comedi_compat32.h
+++ b/drivers/staging/comedi/comedi_compat32.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* comedi/comedi_compat32.h
* 32-bit ioctl compatibility for 64-bit comedi kernel module.
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index c13772a0df58..e18b61cdbdeb 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -60,7 +60,7 @@ struct comedi_file {
struct comedi_subdevice *read_subdev;
struct comedi_subdevice *write_subdev;
unsigned int last_detach_count;
- bool last_attached:1;
+ unsigned int last_attached:1;
};
#define COMEDI_NUM_MINORS 0x100
@@ -79,8 +79,8 @@ MODULE_PARM_DESC(comedi_default_buf_size_kb,
"default asynchronous buffer size in KiB (default "
__MODULE_STRING(CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB) ")");
-unsigned int comedi_default_buf_maxsize_kb
- = CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB;
+unsigned int comedi_default_buf_maxsize_kb =
+ CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB;
module_param(comedi_default_buf_maxsize_kb, uint, 0644);
MODULE_PARM_DESC(comedi_default_buf_maxsize_kb,
"default maximum size of asynchronous buffer in KiB (default "
diff --git a/drivers/staging/comedi/comedi_pci.h b/drivers/staging/comedi/comedi_pci.h
index 647a72441b8a..4e069440cbdc 100644
--- a/drivers/staging/comedi/comedi_pci.h
+++ b/drivers/staging/comedi/comedi_pci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* comedi_pci.h
* header file for Comedi PCI drivers
diff --git a/drivers/staging/comedi/comedi_pcmcia.h b/drivers/staging/comedi/comedi_pcmcia.h
index c7d37b38e730..f2f6e779645b 100644
--- a/drivers/staging/comedi/comedi_pcmcia.h
+++ b/drivers/staging/comedi/comedi_pcmcia.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* comedi_pcmcia.h
* header file for Comedi PCMCIA drivers
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index c54ac94d89d2..5775a93917f4 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -542,8 +542,8 @@ struct comedi_device {
const char *board_name;
const void *board_ptr;
- bool attached:1;
- bool ioenabled:1;
+ unsigned int attached:1;
+ unsigned int ioenabled:1;
spinlock_t spinlock; /* generic spin-lock for low-level driver */
struct mutex mutex; /* generic mutex for COMEDI core */
struct rw_semaphore attach_lock;
diff --git a/drivers/staging/comedi/comedilib.h b/drivers/staging/comedi/comedilib.h
index e98cb9752dbc..0223c9cd9215 100644
--- a/drivers/staging/comedi/comedilib.h
+++ b/drivers/staging/comedi/comedilib.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* comedilib.h
* Header file for kcomedilib
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 9d733471ca2e..57dd63d548b7 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -473,21 +473,21 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
+ unsigned long long scans_left;
+ unsigned long long samples_left;
- if (cmd->stop_src == TRIG_COUNT) {
- unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg);
- unsigned int scan_pos =
- comedi_bytes_to_samples(s, async->scan_progress);
- unsigned long long samples_left = 0;
-
- if (scans_left) {
- samples_left = ((unsigned long long)scans_left *
- cmd->scan_end_arg) - scan_pos;
- }
+ if (cmd->stop_src != TRIG_COUNT)
+ return nsamples;
- if (samples_left < nsamples)
- nsamples = samples_left;
- }
+ scans_left = __comedi_nscans_left(s, cmd->stop_arg);
+ if (!scans_left)
+ return 0;
+
+ samples_left = scans_left * cmd->scan_end_arg -
+ comedi_bytes_to_samples(s, async->scan_progress);
+
+ if (samples_left < nsamples)
+ return samples_left;
return nsamples;
}
EXPORT_SYMBOL_GPL(comedi_nsamples_left);
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 736e7e55219d..98b42b47dfe1 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_COMEDI_ISADMA) += comedi_isadma.o
obj-$(CONFIG_COMEDI_BOND) += comedi_bond.o
obj-$(CONFIG_COMEDI_TEST) += comedi_test.o
obj-$(CONFIG_COMEDI_PARPORT) += comedi_parport.o
-obj-$(CONFIG_COMEDI_SERIAL2002) += serial2002.o
# Comedi ISA drivers
obj-$(CONFIG_COMEDI_AMPLC_DIO200_ISA) += amplc_dio200.o
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.h b/drivers/staging/comedi/drivers/amplc_dio200.h
index 88c1d1063d5d..4c3e4c37c4c5 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.h
+++ b/drivers/staging/comedi/drivers/amplc_dio200.h
@@ -32,9 +32,9 @@ struct dio200_board {
unsigned short n_subdevs; /* number of subdevices */
unsigned char sdtype[DIO200_MAX_SUBDEVS]; /* enum dio200_sdtype */
unsigned char sdinfo[DIO200_MAX_SUBDEVS]; /* depends on sdtype */
- bool has_int_sce:1; /* has interrupt enable/status reg */
- bool has_clk_gat_sce:1; /* has clock/gate selection registers */
- bool is_pcie:1; /* has enhanced features */
+ unsigned int has_int_sce:1; /* has interrupt enable/status reg */
+ unsigned int has_clk_gat_sce:1; /* has clock/gate selection registers */
+ unsigned int is_pcie:1; /* has enhanced features */
};
int amplc_dio200_common_attach(struct comedi_device *dev, unsigned int irq,
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index 82bd41d92509..8697dc02ffb4 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -96,7 +96,7 @@ struct dio200_subdev_intr {
unsigned int ofs;
unsigned int valid_isns;
unsigned int enabled_isns;
- bool active:1;
+ unsigned int active:1;
};
static unsigned char dio200_read8(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 15fc7f19051a..08ffe26c5d43 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -444,7 +444,7 @@ struct pci230_board {
unsigned char ai_bits;
unsigned char ao_bits;
unsigned char min_hwver; /* Minimum hardware version supported. */
- bool have_dio:1;
+ unsigned int have_dio:1;
};
static const struct pci230_board pci230_boards[] = {
@@ -490,11 +490,11 @@ struct pci230_private {
unsigned short adcg; /* ADCG register value */
unsigned char ier; /* Interrupt enable bits */
unsigned char res_owned[NUM_OWNERS]; /* Owned resources */
- bool intr_running:1; /* Flag set in interrupt routine */
- bool ai_bipolar:1; /* Flag AI range is bipolar */
- bool ao_bipolar:1; /* Flag AO range is bipolar */
- bool ai_cmd_started:1; /* Flag AI command started */
- bool ao_cmd_started:1; /* Flag AO command started */
+ unsigned int intr_running:1; /* Flag set in interrupt routine */
+ unsigned int ai_bipolar:1; /* Flag AI range is bipolar */
+ unsigned int ao_bipolar:1; /* Flag AO range is bipolar */
+ unsigned int ai_cmd_started:1; /* Flag AI command started */
+ unsigned int ao_cmd_started:1; /* Flag AO command started */
};
/* PCI230 clock source periods in ns */
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index b33203f6a990..21fc7b3c5f60 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -188,7 +188,5 @@ static struct pci_driver cb_pcimdda_driver_pci_driver = {
module_comedi_pci_driver(cb_pcimdda_driver, cb_pcimdda_driver_pci_driver);
MODULE_AUTHOR("Calin A. Culianu <calin@rtlab.org>");
-MODULE_DESCRIPTION("Comedi low-level driver for the Computerboards PCIM-DDA "
- "series. Currently only supports PCIM-DDA06-16 (which "
- "also happens to be the only board in this series. :) ) ");
+MODULE_DESCRIPTION("Comedi low-level driver for the Computerboards PCIM-DDA series. Currently only supports PCIM-DDA06-16 (which also happens to be the only board in this series. :) ) ");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 03f98b0287c8..aabcda3f9fc8 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -240,7 +240,7 @@ enum db2k_boardid {
struct db2k_boardtype {
const char *name;
- bool has_2_ao:1; /* false: 4 AO chans; true: 2 AO chans */
+ unsigned int has_2_ao:1;/* false: 4 AO chans; true: 2 AO chans */
};
static const struct db2k_boardtype db2k_boardtypes[] = {
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index e40a2c0a9543..4dee2fc37aed 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -4294,7 +4294,7 @@ static int pack_ad8842(int addr, int val, int *bitstring)
struct caldac_struct {
int n_chans;
int n_bits;
- int (*packbits)(int, int, int *);
+ int (*packbits)(int address, int value, int *bitstring);
};
static struct caldac_struct caldacs[] = {
@@ -5446,11 +5446,11 @@ static int ni_E_init(struct comedi_device *dev,
/* Digital I/O (PFI) subdevice */
s = &dev->subdevices[NI_PFI_DIO_SUBDEV];
s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->maxdata = 1;
if (devpriv->is_m_series) {
s->n_chan = 16;
s->insn_bits = ni_pfi_insn_bits;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
ni_writew(dev, s->state, NI_M_PFI_DO_REG);
for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
@@ -5459,6 +5459,7 @@ static int ni_E_init(struct comedi_device *dev,
}
} else {
s->n_chan = 10;
+ s->subdev_flags = SDF_INTERNAL;
}
s->insn_config = ni_pfi_insn_config;
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index d722079f3327..d87cf6d4a161 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -282,7 +282,7 @@ static int check_channel_list(struct comedi_device *dev,
unsigned int chanlen)
{
unsigned int chansegment[16];
- unsigned int i, nowmustbechan, seglen, segpos;
+ unsigned int i, nowmustbechan, seglen;
/* correct channel and range number check itself comedi/range.c */
if (chanlen < 1) {
@@ -312,7 +312,7 @@ static int check_channel_list(struct comedi_device *dev,
}
/* check whole chanlist */
- for (i = 0, segpos = 0; i < chanlen; i++) {
+ for (i = 0; i < chanlen; i++) {
if (chanlist[i] != chansegment[i % seglen]) {
dev_dbg(dev->class_dev,
"bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n",
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index eebb49751713..0af5315d4357 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -573,7 +573,7 @@ static int check_channel_list(struct comedi_device *dev,
unsigned int *chanlist, unsigned int n_chan)
{
unsigned int chansegment[16];
- unsigned int i, nowmustbechan, seglen, segpos;
+ unsigned int i, nowmustbechan, seglen;
/* correct channel and range number check itself comedi/range.c */
if (n_chan < 1) {
@@ -605,7 +605,7 @@ static int check_channel_list(struct comedi_device *dev,
}
/* check whole chanlist */
- for (i = 0, segpos = 0; i < n_chan; i++) {
+ for (i = 0; i < n_chan; i++) {
if (chanlist[i] != chansegment[i % seglen]) {
dev_dbg(dev->class_dev,
"bad channel or range number! chanlist[%i]=%d,%d,%d and not %d,%d,%d!\n",
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index ea194aa01a64..257b0daff01f 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
/* Make sure D/A update mode is direct update */
outb(0, dev->iobase + DAQP_AUX_REG);
- for (i = 0; i > insn->n; i++) {
+ for (i = 0; i < insn->n; i++) {
unsigned int val = data[i];
int ret;
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
deleted file mode 100644
index 5471b2212a62..000000000000
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ /dev/null
@@ -1,778 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * serial2002.c
- * Comedi driver for serial connected hardware
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2002 Anders Blomdell <anders.blomdell@control.lth.se>
- */
-
-/*
- * Driver: serial2002
- * Description: Driver for serial connected hardware
- * Devices:
- * Author: Anders Blomdell
- * Updated: Fri, 7 Jun 2002 12:56:45 -0700
- * Status: in development
- */
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/ktime.h>
-
-#include <linux/termios.h>
-#include <asm/ioctls.h>
-#include <linux/serial.h>
-#include <linux/poll.h>
-
-struct serial2002_range_table_t {
- /* HACK... */
- int length;
- struct comedi_krange range;
-};
-
-struct serial2002_private {
- int port; /* /dev/ttyS<port> */
- int speed; /* baudrate */
- struct file *tty;
- unsigned int ao_readback[32];
- unsigned char digital_in_mapping[32];
- unsigned char digital_out_mapping[32];
- unsigned char analog_in_mapping[32];
- unsigned char analog_out_mapping[32];
- unsigned char encoder_in_mapping[32];
- struct serial2002_range_table_t in_range[32], out_range[32];
-};
-
-struct serial_data {
- enum { is_invalid, is_digital, is_channel } kind;
- int index;
- unsigned long value;
-};
-
-/*
- * The configuration serial_data.value read from the device is
- * a bitmask that defines specific options of a channel:
- *
- * 4:0 - the channel to configure
- * 7:5 - the kind of channel
- * 9:8 - the command used to configure the channel
- *
- * The remaining bits vary in use depending on the command:
- *
- * BITS 15:10 - the channel bits (maxdata)
- * MIN/MAX 12:10 - the units multiplier for the scale
- * 13 - the sign of the scale
- * 33:14 - the base value for the range
- */
-#define S2002_CFG_CHAN(x) ((x) & 0x1f)
-#define S2002_CFG_KIND(x) (((x) >> 5) & 0x7)
-#define S2002_CFG_KIND_INVALID 0
-#define S2002_CFG_KIND_DIGITAL_IN 1
-#define S2002_CFG_KIND_DIGITAL_OUT 2
-#define S2002_CFG_KIND_ANALOG_IN 3
-#define S2002_CFG_KIND_ANALOG_OUT 4
-#define S2002_CFG_KIND_ENCODER_IN 5
-#define S2002_CFG_CMD(x) (((x) >> 8) & 0x3)
-#define S2002_CFG_CMD_BITS 0
-#define S2002_CFG_CMD_MIN 1
-#define S2002_CFG_CMD_MAX 2
-#define S2002_CFG_BITS(x) (((x) >> 10) & 0x3f)
-#define S2002_CFG_UNITS(x) (((x) >> 10) & 0x7)
-#define S2002_CFG_SIGN(x) (((x) >> 13) & 0x1)
-#define S2002_CFG_BASE(x) (((x) >> 14) & 0xfffff)
-
-static long serial2002_tty_ioctl(struct file *f, unsigned int op,
- unsigned long param)
-{
- if (f->f_op->unlocked_ioctl)
- return f->f_op->unlocked_ioctl(f, op, param);
-
- return -ENOTTY;
-}
-
-static int serial2002_tty_write(struct file *f, unsigned char *buf, int count)
-{
- loff_t pos = 0;
-
- return kernel_write(f, buf, count, &pos);
-}
-
-static void serial2002_tty_read_poll_wait(struct file *f, int timeout)
-{
- struct poll_wqueues table;
- ktime_t start, now;
-
- start = ktime_get();
- poll_initwait(&table);
- while (1) {
- long elapsed;
- __poll_t mask;
-
- mask = vfs_poll(f, &table.pt);
- if (mask & (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN |
- EPOLLHUP | EPOLLERR)) {
- break;
- }
- now = ktime_get();
- elapsed = ktime_us_delta(now, start);
- if (elapsed > timeout)
- break;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(((timeout - elapsed) * HZ) / 10000);
- }
- poll_freewait(&table);
-}
-
-static int serial2002_tty_read(struct file *f, int timeout)
-{
- unsigned char ch;
- int result;
- loff_t pos = 0;
-
- result = -1;
- if (!IS_ERR(f)) {
- if (file_can_poll(f)) {
- serial2002_tty_read_poll_wait(f, timeout);
-
- if (kernel_read(f, &ch, 1, &pos) == 1)
- result = ch;
- } else {
- /* Device does not support poll, busy wait */
- int retries = 0;
-
- while (1) {
- retries++;
- if (retries >= timeout)
- break;
-
- if (kernel_read(f, &ch, 1, &pos) == 1) {
- result = ch;
- break;
- }
- usleep_range(100, 1000);
- }
- }
- }
- return result;
-}
-
-static void serial2002_tty_setspeed(struct file *f, int speed)
-{
- struct termios termios;
- struct serial_struct serial;
- mm_segment_t oldfs;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
-
- /* Set speed */
- serial2002_tty_ioctl(f, TCGETS, (unsigned long)&termios);
- termios.c_iflag = 0;
- termios.c_oflag = 0;
- termios.c_lflag = 0;
- termios.c_cflag = CLOCAL | CS8 | CREAD;
- termios.c_cc[VMIN] = 0;
- termios.c_cc[VTIME] = 0;
- switch (speed) {
- case 2400:
- termios.c_cflag |= B2400;
- break;
- case 4800:
- termios.c_cflag |= B4800;
- break;
- case 9600:
- termios.c_cflag |= B9600;
- break;
- case 19200:
- termios.c_cflag |= B19200;
- break;
- case 38400:
- termios.c_cflag |= B38400;
- break;
- case 57600:
- termios.c_cflag |= B57600;
- break;
- case 115200:
- termios.c_cflag |= B115200;
- break;
- default:
- termios.c_cflag |= B9600;
- break;
- }
- serial2002_tty_ioctl(f, TCSETS, (unsigned long)&termios);
-
- /* Set low latency */
- serial2002_tty_ioctl(f, TIOCGSERIAL, (unsigned long)&serial);
- serial.flags |= ASYNC_LOW_LATENCY;
- serial2002_tty_ioctl(f, TIOCSSERIAL, (unsigned long)&serial);
-
- set_fs(oldfs);
-}
-
-static void serial2002_poll_digital(struct file *f, int channel)
-{
- char cmd;
-
- cmd = 0x40 | (channel & 0x1f);
- serial2002_tty_write(f, &cmd, 1);
-}
-
-static void serial2002_poll_channel(struct file *f, int channel)
-{
- char cmd;
-
- cmd = 0x60 | (channel & 0x1f);
- serial2002_tty_write(f, &cmd, 1);
-}
-
-static struct serial_data serial2002_read(struct file *f, int timeout)
-{
- struct serial_data result;
- int length;
-
- result.kind = is_invalid;
- result.index = 0;
- result.value = 0;
- length = 0;
- while (1) {
- int data = serial2002_tty_read(f, timeout);
-
- length++;
- if (data < 0) {
- break;
- } else if (data & 0x80) {
- result.value = (result.value << 7) | (data & 0x7f);
- } else {
- if (length == 1) {
- switch ((data >> 5) & 0x03) {
- case 0:
- result.value = 0;
- result.kind = is_digital;
- break;
- case 1:
- result.value = 1;
- result.kind = is_digital;
- break;
- }
- } else {
- result.value =
- (result.value << 2) | ((data & 0x60) >> 5);
- result.kind = is_channel;
- }
- result.index = data & 0x1f;
- break;
- }
- }
- return result;
-}
-
-static void serial2002_write(struct file *f, struct serial_data data)
-{
- if (data.kind == is_digital) {
- unsigned char ch =
- ((data.value << 5) & 0x20) | (data.index & 0x1f);
- serial2002_tty_write(f, &ch, 1);
- } else {
- unsigned char ch[6];
- int i = 0;
-
- if (data.value >= (1L << 30)) {
- ch[i] = 0x80 | ((data.value >> 30) & 0x03);
- i++;
- }
- if (data.value >= (1L << 23)) {
- ch[i] = 0x80 | ((data.value >> 23) & 0x7f);
- i++;
- }
- if (data.value >= (1L << 16)) {
- ch[i] = 0x80 | ((data.value >> 16) & 0x7f);
- i++;
- }
- if (data.value >= (1L << 9)) {
- ch[i] = 0x80 | ((data.value >> 9) & 0x7f);
- i++;
- }
- ch[i] = 0x80 | ((data.value >> 2) & 0x7f);
- i++;
- ch[i] = ((data.value << 5) & 0x60) | (data.index & 0x1f);
- i++;
- serial2002_tty_write(f, ch, i);
- }
-}
-
-struct config_t {
- short int kind;
- short int bits;
- int min;
- int max;
-};
-
-static int serial2002_setup_subdevice(struct comedi_subdevice *s,
- struct config_t *cfg,
- struct serial2002_range_table_t *range,
- unsigned char *mapping,
- int kind)
-{
- const struct comedi_lrange **range_table_list = NULL;
- unsigned int *maxdata_list;
- int j, chan;
-
- for (chan = 0, j = 0; j < 32; j++) {
- if (cfg[j].kind == kind)
- chan++;
- }
- s->n_chan = chan;
- s->maxdata = 0;
- kfree(s->maxdata_list);
- maxdata_list = kmalloc_array(s->n_chan, sizeof(unsigned int),
- GFP_KERNEL);
- if (!maxdata_list)
- return -ENOMEM;
- s->maxdata_list = maxdata_list;
- kfree(s->range_table_list);
- s->range_table = NULL;
- s->range_table_list = NULL;
- if (kind == 1 || kind == 2) {
- s->range_table = &range_digital;
- } else if (range) {
- range_table_list = kmalloc_array(s->n_chan, sizeof(*range),
- GFP_KERNEL);
- if (!range_table_list)
- return -ENOMEM;
- s->range_table_list = range_table_list;
- }
- for (chan = 0, j = 0; j < 32; j++) {
- if (cfg[j].kind == kind) {
- if (mapping)
- mapping[chan] = j;
- if (range && range_table_list) {
- range[j].length = 1;
- range[j].range.min = cfg[j].min;
- range[j].range.max = cfg[j].max;
- range_table_list[chan] =
- (const struct comedi_lrange *)&range[j];
- }
- if (cfg[j].bits < 32)
- maxdata_list[chan] = (1u << cfg[j].bits) - 1;
- else
- maxdata_list[chan] = 0xffffffff;
- chan++;
- }
- }
- return 0;
-}
-
-static int serial2002_setup_subdevs(struct comedi_device *dev)
-{
- struct serial2002_private *devpriv = dev->private;
- struct config_t *di_cfg;
- struct config_t *do_cfg;
- struct config_t *ai_cfg;
- struct config_t *ao_cfg;
- struct config_t *cfg;
- struct comedi_subdevice *s;
- int result = 0;
- int i;
-
- /* Allocate the temporary structs to hold the configuration data */
- di_cfg = kcalloc(32, sizeof(*cfg), GFP_KERNEL);
- do_cfg = kcalloc(32, sizeof(*cfg), GFP_KERNEL);
- ai_cfg = kcalloc(32, sizeof(*cfg), GFP_KERNEL);
- ao_cfg = kcalloc(32, sizeof(*cfg), GFP_KERNEL);
- if (!di_cfg || !do_cfg || !ai_cfg || !ao_cfg) {
- result = -ENOMEM;
- goto err_alloc_configs;
- }
-
- /* Read the configuration from the connected device */
- serial2002_tty_setspeed(devpriv->tty, devpriv->speed);
- serial2002_poll_channel(devpriv->tty, 31);
- while (1) {
- struct serial_data data = serial2002_read(devpriv->tty, 1000);
- int kind = S2002_CFG_KIND(data.value);
- int channel = S2002_CFG_CHAN(data.value);
- int range = S2002_CFG_BASE(data.value);
- int cmd = S2002_CFG_CMD(data.value);
-
- if (data.kind != is_channel || data.index != 31 ||
- kind == S2002_CFG_KIND_INVALID)
- break;
-
- switch (kind) {
- case S2002_CFG_KIND_DIGITAL_IN:
- cfg = di_cfg;
- break;
- case S2002_CFG_KIND_DIGITAL_OUT:
- cfg = do_cfg;
- break;
- case S2002_CFG_KIND_ANALOG_IN:
- cfg = ai_cfg;
- break;
- case S2002_CFG_KIND_ANALOG_OUT:
- cfg = ao_cfg;
- break;
- case S2002_CFG_KIND_ENCODER_IN:
- cfg = ai_cfg;
- break;
- default:
- cfg = NULL;
- break;
- }
- if (!cfg)
- continue; /* unknown kind, skip it */
-
- cfg[channel].kind = kind;
-
- switch (cmd) {
- case S2002_CFG_CMD_BITS:
- cfg[channel].bits = S2002_CFG_BITS(data.value);
- break;
- case S2002_CFG_CMD_MIN:
- case S2002_CFG_CMD_MAX:
- switch (S2002_CFG_UNITS(data.value)) {
- case 0:
- range *= 1000000;
- break;
- case 1:
- range *= 1000;
- break;
- case 2:
- range *= 1;
- break;
- }
- if (S2002_CFG_SIGN(data.value))
- range = -range;
- if (cmd == S2002_CFG_CMD_MIN)
- cfg[channel].min = range;
- else
- cfg[channel].max = range;
- break;
- }
- }
-
- /* Fill in subdevice data */
- for (i = 0; i <= 4; i++) {
- unsigned char *mapping = NULL;
- struct serial2002_range_table_t *range = NULL;
- int kind = 0;
-
- s = &dev->subdevices[i];
-
- switch (i) {
- case 0:
- cfg = di_cfg;
- mapping = devpriv->digital_in_mapping;
- kind = S2002_CFG_KIND_DIGITAL_IN;
- break;
- case 1:
- cfg = do_cfg;
- mapping = devpriv->digital_out_mapping;
- kind = S2002_CFG_KIND_DIGITAL_OUT;
- break;
- case 2:
- cfg = ai_cfg;
- mapping = devpriv->analog_in_mapping;
- range = devpriv->in_range;
- kind = S2002_CFG_KIND_ANALOG_IN;
- break;
- case 3:
- cfg = ao_cfg;
- mapping = devpriv->analog_out_mapping;
- range = devpriv->out_range;
- kind = S2002_CFG_KIND_ANALOG_OUT;
- break;
- case 4:
- cfg = ai_cfg;
- mapping = devpriv->encoder_in_mapping;
- range = devpriv->in_range;
- kind = S2002_CFG_KIND_ENCODER_IN;
- break;
- }
-
- if (serial2002_setup_subdevice(s, cfg, range, mapping, kind))
- break; /* err handled below */
- }
- if (i <= 4) {
- /*
- * Failed to allocate maxdata_list or range_table_list
- * for a subdevice that needed it.
- */
- result = -ENOMEM;
- for (i = 0; i <= 4; i++) {
- s = &dev->subdevices[i];
- kfree(s->maxdata_list);
- s->maxdata_list = NULL;
- kfree(s->range_table_list);
- s->range_table_list = NULL;
- }
- }
-
-err_alloc_configs:
- kfree(di_cfg);
- kfree(do_cfg);
- kfree(ai_cfg);
- kfree(ao_cfg);
-
- if (result) {
- if (devpriv->tty) {
- filp_close(devpriv->tty, NULL);
- devpriv->tty = NULL;
- }
- }
-
- return result;
-}
-
-static int serial2002_open(struct comedi_device *dev)
-{
- struct serial2002_private *devpriv = dev->private;
- int result;
- char port[20];
-
- sprintf(port, "/dev/ttyS%d", devpriv->port);
- devpriv->tty = filp_open(port, O_RDWR, 0);
- if (IS_ERR(devpriv->tty)) {
- result = (int)PTR_ERR(devpriv->tty);
- dev_err(dev->class_dev, "file open error = %d\n", result);
- } else {
- result = serial2002_setup_subdevs(dev);
- }
- return result;
-}
-
-static void serial2002_close(struct comedi_device *dev)
-{
- struct serial2002_private *devpriv = dev->private;
-
- if (!IS_ERR(devpriv->tty) && devpriv->tty)
- filp_close(devpriv->tty, NULL);
-}
-
-static int serial2002_di_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct serial2002_private *devpriv = dev->private;
- int n;
- int chan;
-
- chan = devpriv->digital_in_mapping[CR_CHAN(insn->chanspec)];
- for (n = 0; n < insn->n; n++) {
- struct serial_data read;
-
- serial2002_poll_digital(devpriv->tty, chan);
- while (1) {
- read = serial2002_read(devpriv->tty, 1000);
- if (read.kind != is_digital || read.index == chan)
- break;
- }
- data[n] = read.value;
- }
- return n;
-}
-
-static int serial2002_do_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct serial2002_private *devpriv = dev->private;
- int n;
- int chan;
-
- chan = devpriv->digital_out_mapping[CR_CHAN(insn->chanspec)];
- for (n = 0; n < insn->n; n++) {
- struct serial_data write;
-
- write.kind = is_digital;
- write.index = chan;
- write.value = data[n];
- serial2002_write(devpriv->tty, write);
- }
- return n;
-}
-
-static int serial2002_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct serial2002_private *devpriv = dev->private;
- int n;
- int chan;
-
- chan = devpriv->analog_in_mapping[CR_CHAN(insn->chanspec)];
- for (n = 0; n < insn->n; n++) {
- struct serial_data read;
-
- serial2002_poll_channel(devpriv->tty, chan);
- while (1) {
- read = serial2002_read(devpriv->tty, 1000);
- if (read.kind != is_channel || read.index == chan)
- break;
- }
- data[n] = read.value;
- }
- return n;
-}
-
-static int serial2002_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct serial2002_private *devpriv = dev->private;
- int n;
- int chan;
-
- chan = devpriv->analog_out_mapping[CR_CHAN(insn->chanspec)];
- for (n = 0; n < insn->n; n++) {
- struct serial_data write;
-
- write.kind = is_channel;
- write.index = chan;
- write.value = data[n];
- serial2002_write(devpriv->tty, write);
- devpriv->ao_readback[chan] = data[n];
- }
- return n;
-}
-
-static int serial2002_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct serial2002_private *devpriv = dev->private;
- int n;
- int chan = CR_CHAN(insn->chanspec);
-
- for (n = 0; n < insn->n; n++)
- data[n] = devpriv->ao_readback[chan];
-
- return n;
-}
-
-static int serial2002_encoder_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct serial2002_private *devpriv = dev->private;
- int n;
- int chan;
-
- chan = devpriv->encoder_in_mapping[CR_CHAN(insn->chanspec)];
- for (n = 0; n < insn->n; n++) {
- struct serial_data read;
-
- serial2002_poll_channel(devpriv->tty, chan);
- while (1) {
- read = serial2002_read(devpriv->tty, 1000);
- if (read.kind != is_channel || read.index == chan)
- break;
- }
- data[n] = read.value;
- }
- return n;
-}
-
-static int serial2002_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct serial2002_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- devpriv->port = it->options[0];
- devpriv->speed = it->options[1];
-
- ret = comedi_alloc_subdevices(dev, 5);
- if (ret)
- return ret;
-
- /* digital input subdevice */
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_read = serial2002_di_insn_read;
-
- /* digital output subdevice */
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_write = serial2002_do_insn_write;
-
- /* analog input subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = NULL;
- s->insn_read = serial2002_ai_insn_read;
-
- /* analog output subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = NULL;
- s->insn_write = serial2002_ao_insn_write;
- s->insn_read = serial2002_ao_insn_read;
-
- /* encoder input subdevice */
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = NULL;
- s->insn_read = serial2002_encoder_insn_read;
-
- dev->open = serial2002_open;
- dev->close = serial2002_close;
-
- return 0;
-}
-
-static void serial2002_detach(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
- int i;
-
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- kfree(s->maxdata_list);
- kfree(s->range_table_list);
- }
-}
-
-static struct comedi_driver serial2002_driver = {
- .driver_name = "serial2002",
- .module = THIS_MODULE,
- .attach = serial2002_attach,
- .detach = serial2002_detach,
-};
-module_comedi_driver(serial2002_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index 9f9b9a5b4b27..f91eaa1c3b67 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -883,10 +883,9 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
* touched safely, the close routine will signal the
* ch_flags_wait to wake us back up.
*/
- rc = wait_event_interruptible(
- ch->ch_flags_wait,
- (((ch->ch_tun.un_flags |
- ch->ch_pun.un_flags) & UN_CLOSING) == 0));
+ rc = wait_event_interruptible(ch->ch_flags_wait,
+ !((ch->ch_tun.un_flags |
+ ch->ch_pun.un_flags) & UN_CLOSING));
/* If ret is non-zero, user ctrl-c'ed us */
if (rc)
return -EINTR;
diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
new file mode 100644
index 000000000000..96f614934df1
--- /dev/null
+++ b/drivers/staging/erofs/Kconfig
@@ -0,0 +1,141 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config EROFS_FS
+ tristate "EROFS filesystem support"
+ depends on BROKEN
+ help
+ EROFS(Enhanced Read-Only File System) is a lightweight
+ read-only file system with modern designs (eg. page-sized
+ blocks, inline xattrs/data, etc.) for scenarios which need
+ high-performance read-only requirements, eg. firmwares in
+ mobile phone or LIVECDs.
+
+ It also provides VLE compression support, focusing on
+ random read improvements, keeping relatively lower
+ compression ratios, which is useful for high-performance
+ devices with limited memory and ROM space.
+
+ If unsure, say N.
+
+config EROFS_FS_DEBUG
+ bool "EROFS debugging feature"
+ depends on EROFS_FS
+ help
+ Print EROFS debugging messages and enable more BUG_ONs
+ which check the filesystem consistency aggressively.
+
+ For daily use, say N.
+
+config EROFS_FS_XATTR
+ bool "EROFS extended attributes"
+ depends on EROFS_FS
+ default y
+ help
+ Extended attributes are name:value pairs associated with inodes by
+ the kernel or by users (see the attr(5) manual page, or visit
+ <http://acl.bestbits.at/> for details).
+
+ If unsure, say N.
+
+config EROFS_FS_POSIX_ACL
+ bool "EROFS Access Control Lists"
+ depends on EROFS_FS_XATTR
+ select FS_POSIX_ACL
+ default y
+ help
+ Posix Access Control Lists (ACLs) support permissions for users and
+ groups beyond the owner/group/world scheme.
+
+ To learn more about Access Control Lists, visit the POSIX ACLs for
+ Linux website <http://acl.bestbits.at/>.
+
+ If you don't know what Access Control Lists are, say N.
+
+config EROFS_FS_SECURITY
+ bool "EROFS Security Labels"
+ depends on EROFS_FS_XATTR
+ help
+ Security labels provide an access control facility to support Linux
+ Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO
+ Linux. This option enables an extended attribute handler for file
+ security labels in the erofs filesystem, so that it requires enabling
+ the extended attribute support in advance.
+
+ If you are not using a security module, say N.
+
+config EROFS_FS_USE_VM_MAP_RAM
+ bool "EROFS VM_MAP_RAM Support"
+ depends on EROFS_FS
+ help
+ use vm_map_ram/vm_unmap_ram instead of vmap/vunmap.
+
+ If you don't know what these are, say N.
+
+config EROFS_FAULT_INJECTION
+ bool "EROFS fault injection facility"
+ depends on EROFS_FS
+ help
+ Test EROFS to inject faults such as ENOMEM, EIO, and so on.
+ If unsure, say N.
+
+config EROFS_FS_ZIP
+ bool "EROFS Data Compresssion Support"
+ depends on EROFS_FS
+ help
+ Currently we support VLE Compression only.
+ Play at your own risk.
+
+ If you don't want to use compression feature, say N.
+
+config EROFS_FS_CLUSTER_PAGE_LIMIT
+ int "EROFS Cluster Pages Hard Limit"
+ depends on EROFS_FS_ZIP
+ range 1 256
+ default "1"
+ help
+ Indicates VLE compressed pages hard limit of a
+ compressed cluster.
+
+ For example, if files of a image are compressed
+ into 8k-unit, the hard limit should not be less
+ than 2. Otherwise, the image cannot be mounted
+ correctly on this kernel.
+
+choice
+ prompt "EROFS VLE Data Decompression mode"
+ depends on EROFS_FS_ZIP
+ default EROFS_FS_ZIP_CACHE_BIPOLAR
+ help
+ EROFS supports three options for VLE decompression.
+ "In-place Decompression Only" consumes the minimum memory
+ with lowest random read.
+
+ "Bipolar Cached Decompression" consumes the maximum memory
+ with highest random read.
+
+ If unsure, select "Bipolar Cached Decompression"
+
+config EROFS_FS_ZIP_NO_CACHE
+ bool "In-place Decompression Only"
+ help
+ Read compressed data into page cache and do in-place
+ decompression directly.
+
+config EROFS_FS_ZIP_CACHE_UNIPOLAR
+ bool "Unipolar Cached Decompression"
+ help
+ For each request, it caches the last compressed page
+ for further reading.
+ It still decompresses in place for the rest compressed pages.
+
+config EROFS_FS_ZIP_CACHE_BIPOLAR
+ bool "Bipolar Cached Decompression"
+ help
+ For each request, it caches the both end compressed pages
+ for further reading.
+ It still decompresses in place for the rest compressed pages.
+
+ Recommended for performance priority.
+
+endchoice
+
diff --git a/drivers/staging/erofs/Makefile b/drivers/staging/erofs/Makefile
new file mode 100644
index 000000000000..9a766eb7ed75
--- /dev/null
+++ b/drivers/staging/erofs/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+EROFS_VERSION = "1.0pre1"
+
+ccflags-y += -Wall -DEROFS_VERSION=\"$(EROFS_VERSION)\"
+
+obj-$(CONFIG_EROFS_FS) += erofs.o
+# staging requirement: to be self-contained in its own directory
+ccflags-y += -I$(src)/include
+erofs-objs := super.o inode.o data.o namei.o dir.o utils.o
+erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
+erofs-$(CONFIG_EROFS_FS_ZIP) += unzip_vle.o unzip_lz4.o unzip_vle_lz4.o
+
diff --git a/drivers/staging/erofs/TODO b/drivers/staging/erofs/TODO
new file mode 100644
index 000000000000..f99ddb842f99
--- /dev/null
+++ b/drivers/staging/erofs/TODO
@@ -0,0 +1,45 @@
+
+EROFS is still working in progress, thus it is not suitable
+for all productive uses. play at your own risk :)
+
+TODO List:
+ - add the missing error handling code
+ (mainly existed in xattr and decompression submodules);
+
+ - finalize erofs ondisk format design (which means that
+ minor on-disk revisions could happen later);
+
+ - documentation and detailed technical analysis;
+
+ - general code review and clean up
+ (including confusing variable names and code snippets);
+
+ - support larger compressed clustersizes for selection
+ (currently erofs only works as expected with the page-sized
+ compressed cluster configuration, usually 4KB);
+
+ - support more lossless data compression algorithms
+ in addition to LZ4 algorithms in VLE approach;
+
+ - data deduplication and other useful features.
+
+erofs-mkfs (preview version) binaries for i386 / x86_64 are available at:
+
+ https://github.com/hsiangkao/erofs_mkfs_binary
+
+It is still in progress opening mkfs source code to public,
+in any case an open-source mkfs will be released in the near future.
+
+
+Code, suggestions, etc, are welcome. Please feel free to
+ask and send patches,
+
+To:
+ linux-erofs mailing list <linux-erofs@lists.ozlabs.org>
+ Gao Xiang <gaoxiang25@huawei.com>
+ Chao Yu <yuchao0@huawei.com>
+
+Cc: (for linux-kernel upstream patches)
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ linux-staging mailing list <devel@driverdev.osuosl.org>
+
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
new file mode 100644
index 000000000000..ac263a180253
--- /dev/null
+++ b/drivers/staging/erofs/data.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/staging/erofs/data.c
+ *
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#include "internal.h"
+#include <linux/prefetch.h>
+
+#include <trace/events/erofs.h>
+
+static inline void read_endio(struct bio *bio)
+{
+ int i;
+ struct bio_vec *bvec;
+ const blk_status_t err = bio->bi_status;
+
+ bio_for_each_segment_all(bvec, bio, i) {
+ struct page *page = bvec->bv_page;
+
+ /* page is already locked */
+ BUG_ON(PageUptodate(page));
+
+ if (unlikely(err))
+ SetPageError(page);
+ else
+ SetPageUptodate(page);
+
+ unlock_page(page);
+ /* page could be reclaimed now */
+ }
+ bio_put(bio);
+}
+
+/* prio -- true is used for dir */
+struct page *erofs_get_meta_page(struct super_block *sb,
+ erofs_blk_t blkaddr, bool prio)
+{
+ struct inode *bd_inode = sb->s_bdev->bd_inode;
+ struct address_space *mapping = bd_inode->i_mapping;
+ struct page *page;
+
+repeat:
+ page = find_or_create_page(mapping, blkaddr,
+ /*
+ * Prefer looping in the allocator rather than here,
+ * at least that code knows what it's doing.
+ */
+ mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL);
+
+ BUG_ON(!page || !PageLocked(page));
+
+ if (!PageUptodate(page)) {
+ struct bio *bio;
+ int err;
+
+ bio = prepare_bio(sb, blkaddr, 1, read_endio);
+ err = bio_add_page(bio, page, PAGE_SIZE, 0);
+ BUG_ON(err != PAGE_SIZE);
+
+ __submit_bio(bio, REQ_OP_READ,
+ REQ_META | (prio ? REQ_PRIO : 0));
+
+ lock_page(page);
+
+ /* the page has been truncated by others? */
+ if (unlikely(page->mapping != mapping)) {
+ unlock_page(page);
+ put_page(page);
+ goto repeat;
+ }
+
+ /* more likely a read error */
+ if (unlikely(!PageUptodate(page))) {
+ unlock_page(page);
+ put_page(page);
+
+ page = ERR_PTR(-EIO);
+ }
+ }
+ return page;
+}
+
+static int erofs_map_blocks_flatmode(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags)
+{
+ erofs_blk_t nblocks, lastblk;
+ u64 offset = map->m_la;
+ struct erofs_vnode *vi = EROFS_V(inode);
+
+ trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
+ BUG_ON(is_inode_layout_compression(inode));
+
+ nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
+ lastblk = nblocks - is_inode_layout_inline(inode);
+
+ if (unlikely(offset >= inode->i_size)) {
+ /* leave out-of-bound access unmapped */
+ map->m_flags = 0;
+ map->m_plen = 0;
+ goto out;
+ }
+
+ /* there is no hole in flatmode */
+ map->m_flags = EROFS_MAP_MAPPED;
+
+ if (offset < blknr_to_addr(lastblk)) {
+ map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
+ map->m_plen = blknr_to_addr(lastblk) - offset;
+ } else if (is_inode_layout_inline(inode)) {
+ /* 2 - inode inline B: inode, [xattrs], inline last blk... */
+ struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
+
+ map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
+ vi->xattr_isize + erofs_blkoff(map->m_la);
+ map->m_plen = inode->i_size - offset;
+
+ /* inline data should locate in one meta block */
+ BUG_ON(erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE);
+ map->m_flags |= EROFS_MAP_META;
+ } else {
+ errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
+ vi->nid, inode->i_size, map->m_la);
+ BUG();
+ }
+
+out:
+ map->m_llen = map->m_plen;
+ trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
+ return 0;
+}
+
+#ifdef CONFIG_EROFS_FS_ZIP
+extern int z_erofs_map_blocks_iter(struct inode *,
+ struct erofs_map_blocks *, struct page **, int);
+#endif
+
+int erofs_map_blocks_iter(struct inode *inode,
+ struct erofs_map_blocks *map,
+ struct page **mpage_ret, int flags)
+{
+ /* by default, reading raw data never use erofs_map_blocks_iter */
+ if (unlikely(!is_inode_layout_compression(inode))) {
+ if (*mpage_ret != NULL)
+ put_page(*mpage_ret);
+ *mpage_ret = NULL;
+
+ return erofs_map_blocks(inode, map, flags);
+ }
+
+#ifdef CONFIG_EROFS_FS_ZIP
+ return z_erofs_map_blocks_iter(inode, map, mpage_ret, flags);
+#else
+ /* data compression is not available */
+ return -ENOTSUPP;
+#endif
+}
+
+int erofs_map_blocks(struct inode *inode,
+ struct erofs_map_blocks *map, int flags)
+{
+ if (unlikely(is_inode_layout_compression(inode))) {
+ struct page *mpage = NULL;
+ int err;
+
+ err = erofs_map_blocks_iter(inode, map, &mpage, flags);
+ if (mpage != NULL)
+ put_page(mpage);
+ return err;
+ }
+ return erofs_map_blocks_flatmode(inode, map, flags);
+}
+
+static inline struct bio *erofs_read_raw_page(
+ struct bio *bio,
+ struct address_space *mapping,
+ struct page *page,
+ erofs_off_t *last_block,
+ unsigned nblocks,
+ bool ra)
+{
+ struct inode *inode = mapping->host;
+ erofs_off_t current_block = (erofs_off_t)page->index;
+ int err;
+
+ BUG_ON(!nblocks);
+
+ if (PageUptodate(page)) {
+ err = 0;
+ goto has_updated;
+ }
+
+ if (cleancache_get_page(page) == 0) {
+ err = 0;
+ SetPageUptodate(page);
+ goto has_updated;
+ }
+
+ /* note that for readpage case, bio also equals to NULL */
+ if (bio != NULL &&
+ /* not continuous */
+ *last_block + 1 != current_block) {
+submit_bio_retry:
+ __submit_bio(bio, REQ_OP_READ, 0);
+ bio = NULL;
+ }
+
+ if (bio == NULL) {
+ struct erofs_map_blocks map = {
+ .m_la = blknr_to_addr(current_block),
+ };
+ erofs_blk_t blknr;
+ unsigned blkoff;
+
+ err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
+ if (unlikely(err))
+ goto err_out;
+
+ /* zero out the holed page */
+ if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) {
+ zero_user_segment(page, 0, PAGE_SIZE);
+ SetPageUptodate(page);
+
+ /* imply err = 0, see erofs_map_blocks */
+ goto has_updated;
+ }
+
+ /* for RAW access mode, m_plen must be equal to m_llen */
+ BUG_ON(map.m_plen != map.m_llen);
+
+ blknr = erofs_blknr(map.m_pa);
+ blkoff = erofs_blkoff(map.m_pa);
+
+ /* deal with inline page */
+ if (map.m_flags & EROFS_MAP_META) {
+ void *vsrc, *vto;
+ struct page *ipage;
+
+ BUG_ON(map.m_plen > PAGE_SIZE);
+
+ ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
+
+ if (IS_ERR(ipage)) {
+ err = PTR_ERR(ipage);
+ goto err_out;
+ }
+
+ vsrc = kmap_atomic(ipage);
+ vto = kmap_atomic(page);
+ memcpy(vto, vsrc + blkoff, map.m_plen);
+ memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
+ kunmap_atomic(vto);
+ kunmap_atomic(vsrc);
+ flush_dcache_page(page);
+
+ SetPageUptodate(page);
+ /* TODO: could we unlock the page earlier? */
+ unlock_page(ipage);
+ put_page(ipage);
+
+ /* imply err = 0, see erofs_map_blocks */
+ goto has_updated;
+ }
+
+ /* pa must be block-aligned for raw reading */
+ BUG_ON(erofs_blkoff(map.m_pa) != 0);
+
+ /* max # of continuous pages */
+ if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
+ nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
+ if (nblocks > BIO_MAX_PAGES)
+ nblocks = BIO_MAX_PAGES;
+
+ bio = prepare_bio(inode->i_sb, blknr, nblocks, read_endio);
+ }
+
+ err = bio_add_page(bio, page, PAGE_SIZE, 0);
+ /* out of the extent or bio is full */
+ if (err < PAGE_SIZE)
+ goto submit_bio_retry;
+
+ *last_block = current_block;
+
+ /* shift in advance in case of it followed by too many gaps */
+ if (unlikely(bio->bi_vcnt >= bio->bi_max_vecs)) {
+ /* err should reassign to 0 after submitting */
+ err = 0;
+ goto submit_bio_out;
+ }
+
+ return bio;
+
+err_out:
+ /* for sync reading, set page error immediately */
+ if (!ra) {
+ SetPageError(page);
+ ClearPageUptodate(page);
+ }
+has_updated:
+ unlock_page(page);
+
+ /* if updated manually, continuous pages has a gap */
+ if (bio != NULL)
+submit_bio_out:
+ __submit_bio(bio, REQ_OP_READ, 0);
+
+ return unlikely(err) ? ERR_PTR(err) : NULL;
+}
+
+/*
+ * since we dont have write or truncate flows, so no inode
+ * locking needs to be held at the moment.
+ */
+static int erofs_raw_access_readpage(struct file *file, struct page *page)
+{
+ erofs_off_t last_block;
+ struct bio *bio;
+
+ trace_erofs_readpage(page, true);
+
+ bio = erofs_read_raw_page(NULL, page->mapping,
+ page, &last_block, 1, false);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ BUG_ON(bio != NULL); /* since we have only one bio -- must be NULL */
+ return 0;
+}
+
+static int erofs_raw_access_readpages(struct file *filp,
+ struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages)
+{
+ erofs_off_t last_block;
+ struct bio *bio = NULL;
+ gfp_t gfp = readahead_gfp_mask(mapping);
+ struct page *page = list_last_entry(pages, struct page, lru);
+
+ trace_erofs_readpages(mapping->host, page, nr_pages, true);
+
+ for (; nr_pages; --nr_pages) {
+ page = list_entry(pages->prev, struct page, lru);
+
+ prefetchw(&page->flags);
+ list_del(&page->lru);
+
+ if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
+ bio = erofs_read_raw_page(bio, mapping, page,
+ &last_block, nr_pages, true);
+
+ /* all the page errors are ignored when readahead */
+ if (IS_ERR(bio)) {
+ pr_err("%s, readahead error at page %lu of nid %llu\n",
+ __func__, page->index,
+ EROFS_V(mapping->host)->nid);
+
+ bio = NULL;
+ }
+ }
+
+ /* pages could still be locked */
+ put_page(page);
+ }
+ BUG_ON(!list_empty(pages));
+
+ /* the rare case (end in gaps) */
+ if (unlikely(bio != NULL))
+ __submit_bio(bio, REQ_OP_READ, 0);
+ return 0;
+}
+
+/* for uncompressed (aligned) files and raw access for other files */
+const struct address_space_operations erofs_raw_access_aops = {
+ .readpage = erofs_raw_access_readpage,
+ .readpages = erofs_raw_access_readpages,
+};
+
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
new file mode 100644
index 000000000000..be6ae3b1bdbe
--- /dev/null
+++ b/drivers/staging/erofs/dir.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/staging/erofs/dir.c
+ *
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#include "internal.h"
+
+static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
+ [EROFS_FT_UNKNOWN] = DT_UNKNOWN,
+ [EROFS_FT_REG_FILE] = DT_REG,
+ [EROFS_FT_DIR] = DT_DIR,
+ [EROFS_FT_CHRDEV] = DT_CHR,
+ [EROFS_FT_BLKDEV] = DT_BLK,
+ [EROFS_FT_FIFO] = DT_FIFO,
+ [EROFS_FT_SOCK] = DT_SOCK,
+ [EROFS_FT_SYMLINK] = DT_LNK,
+};
+
+static int erofs_fill_dentries(struct dir_context *ctx,
+ void *dentry_blk, unsigned *ofs,
+ unsigned nameoff, unsigned maxsize)
+{
+ struct erofs_dirent *de = dentry_blk;
+ const struct erofs_dirent *end = dentry_blk + nameoff;
+
+ de = dentry_blk + *ofs;
+ while (de < end) {
+ const char *de_name;
+ int de_namelen;
+ unsigned char d_type;
+#ifdef CONFIG_EROFS_FS_DEBUG
+ unsigned dbg_namelen;
+ unsigned char dbg_namebuf[EROFS_NAME_LEN];
+#endif
+
+ if (unlikely(de->file_type < EROFS_FT_MAX))
+ d_type = erofs_filetype_table[de->file_type];
+ else
+ d_type = DT_UNKNOWN;
+
+ nameoff = le16_to_cpu(de->nameoff);
+ de_name = (char *)dentry_blk + nameoff;
+
+ de_namelen = unlikely(de + 1 >= end) ?
+ /* last directory entry */
+ strnlen(de_name, maxsize - nameoff) :
+ le16_to_cpu(de[1].nameoff) - nameoff;
+
+ /* the corrupted directory found */
+ BUG_ON(de_namelen < 0);
+
+#ifdef CONFIG_EROFS_FS_DEBUG
+ dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
+ memcpy(dbg_namebuf, de_name, dbg_namelen);
+ dbg_namebuf[dbg_namelen] = '\0';
+
+ debugln("%s, found de_name %s de_len %d d_type %d", __func__,
+ dbg_namebuf, de_namelen, d_type);
+#endif
+
+ if (!dir_emit(ctx, de_name, de_namelen,
+ le64_to_cpu(de->nid), d_type))
+ /* stoped by some reason */
+ return 1;
+ ++de;
+ *ofs += sizeof(struct erofs_dirent);
+ }
+ *ofs = maxsize;
+ return 0;
+}
+
+static int erofs_readdir(struct file *f, struct dir_context *ctx)
+{
+ struct inode *dir = file_inode(f);
+ struct address_space *mapping = dir->i_mapping;
+ const size_t dirsize = i_size_read(dir);
+ unsigned i = ctx->pos / EROFS_BLKSIZ;
+ unsigned ofs = ctx->pos % EROFS_BLKSIZ;
+ int err = 0;
+ bool initial = true;
+
+ while (ctx->pos < dirsize) {
+ struct page *dentry_page;
+ struct erofs_dirent *de;
+ unsigned nameoff, maxsize;
+
+ dentry_page = read_mapping_page(mapping, i, NULL);
+ if (IS_ERR(dentry_page))
+ continue;
+
+ lock_page(dentry_page);
+ de = (struct erofs_dirent *)kmap(dentry_page);
+
+ nameoff = le16_to_cpu(de->nameoff);
+
+ if (unlikely(nameoff < sizeof(struct erofs_dirent) ||
+ nameoff >= PAGE_SIZE)) {
+ errln("%s, invalid de[0].nameoff %u",
+ __func__, nameoff);
+
+ err = -EIO;
+ goto skip_this;
+ }
+
+ maxsize = min_t(unsigned, dirsize - ctx->pos + ofs, PAGE_SIZE);
+
+ /* search dirents at the arbitrary position */
+ if (unlikely(initial)) {
+ initial = false;
+
+ ofs = roundup(ofs, sizeof(struct erofs_dirent));
+ if (unlikely(ofs >= nameoff))
+ goto skip_this;
+ }
+
+ err = erofs_fill_dentries(ctx, de, &ofs, nameoff, maxsize);
+skip_this:
+ kunmap(dentry_page);
+
+ unlock_page(dentry_page);
+ put_page(dentry_page);
+
+ ctx->pos = blknr_to_addr(i) + ofs;
+
+ if (unlikely(err))
+ break;
+ ++i;
+ ofs = 0;
+ }
+ return err < 0 ? err : 0;
+}
+
+const struct file_operations erofs_dir_fops = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate = erofs_readdir,
+};
+
diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h
new file mode 100644
index 000000000000..2f8e2bf70941
--- /dev/null
+++ b/drivers/staging/erofs/erofs_fs.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+ *
+ * linux/drivers/staging/erofs/erofs_fs.h
+ *
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is dual-licensed; you may select either the GNU General Public
+ * License version 2 or Apache License, Version 2.0. See the file COPYING
+ * in the main directory of the Linux distribution for more details.
+ */
+#ifndef __EROFS_FS_H
+#define __EROFS_FS_H
+
+/* Enhanced(Extended) ROM File System */
+#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2
+#define EROFS_SUPER_OFFSET 1024
+
+struct erofs_super_block {
+/* 0 */__le32 magic; /* in the little endian */
+/* 4 */__le32 checksum; /* crc32c(super_block) */
+/* 8 */__le32 features;
+/* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */
+/* 13 */__u8 reserved;
+
+/* 14 */__le16 root_nid;
+/* 16 */__le64 inos; /* total valid ino # (== f_files - f_favail) */
+
+/* 24 */__le64 build_time; /* inode v1 time derivation */
+/* 32 */__le32 build_time_nsec;
+/* 36 */__le32 blocks; /* used for statfs */
+/* 40 */__le32 meta_blkaddr;
+/* 44 */__le32 xattr_blkaddr;
+/* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */
+/* 64 */__u8 volume_name[16]; /* volume name */
+
+/* 80 */__u8 reserved2[48]; /* 128 bytes */
+} __packed;
+
+#define __EROFS_BIT(_prefix, _cur, _pre) enum { \
+ _prefix ## _cur ## _BIT = _prefix ## _pre ## _BIT + \
+ _prefix ## _pre ## _BITS }
+
+/*
+ * erofs inode data mapping:
+ * 0 - inode plain without inline data A:
+ * inode, [xattrs], ... | ... | no-holed data
+ * 1 - inode VLE compression B:
+ * inode, [xattrs], extents ... | ...
+ * 2 - inode plain with inline data C:
+ * inode, [xattrs], last_inline_data, ... | ... | no-holed data
+ * 3~7 - reserved
+ */
+enum {
+ EROFS_INODE_LAYOUT_PLAIN,
+ EROFS_INODE_LAYOUT_COMPRESSION,
+ EROFS_INODE_LAYOUT_INLINE,
+ EROFS_INODE_LAYOUT_MAX
+};
+#define EROFS_I_VERSION_BITS 1
+#define EROFS_I_DATA_MAPPING_BITS 3
+
+#define EROFS_I_VERSION_BIT 0
+__EROFS_BIT(EROFS_I_, DATA_MAPPING, VERSION);
+
+struct erofs_inode_v1 {
+/* 0 */__le16 i_advise;
+
+/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
+/* 2 */__le16 i_xattr_icount;
+/* 4 */__le16 i_mode;
+/* 6 */__le16 i_nlink;
+/* 8 */__le32 i_size;
+/* 12 */__le32 i_reserved;
+/* 16 */union {
+ /* file total compressed blocks for data mapping 1 */
+ __le32 compressed_blocks;
+ __le32 raw_blkaddr;
+
+ /* for device files, used to indicate old/new device # */
+ __le32 rdev;
+ } i_u __packed;
+/* 20 */__le32 i_ino; /* only used for 32-bit stat compatibility */
+/* 24 */__le16 i_uid;
+/* 26 */__le16 i_gid;
+/* 28 */__le32 i_checksum;
+} __packed;
+
+/* 32 bytes on-disk inode */
+#define EROFS_INODE_LAYOUT_V1 0
+/* 64 bytes on-disk inode */
+#define EROFS_INODE_LAYOUT_V2 1
+
+struct erofs_inode_v2 {
+ __le16 i_advise;
+
+ /* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
+ __le16 i_xattr_icount;
+ __le16 i_mode;
+ __le16 i_reserved; /* 8 bytes */
+ __le64 i_size; /* 16 bytes */
+ union {
+ /* file total compressed blocks for data mapping 1 */
+ __le32 compressed_blocks;
+ __le32 raw_blkaddr;
+
+ /* for device files, used to indicate old/new device # */
+ __le32 rdev;
+ } i_u __packed;
+
+ /* only used for 32-bit stat compatibility */
+ __le32 i_ino; /* 24 bytes */
+
+ __le32 i_uid;
+ __le32 i_gid;
+ __le64 i_ctime; /* 32 bytes */
+ __le32 i_ctime_nsec;
+ __le32 i_nlink;
+ __u8 i_reserved2[12];
+ __le32 i_checksum; /* 64 bytes */
+} __packed;
+
+#define EROFS_MAX_SHARED_XATTRS (128)
+/* h_shared_count between 129 ... 255 are special # */
+#define EROFS_SHARED_XATTR_EXTENT (255)
+
+/*
+ * inline xattrs (n == i_xattr_icount):
+ * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes
+ * 12 bytes / \
+ * / \
+ * /-----------------------\
+ * | erofs_xattr_entries+ |
+ * +-----------------------+
+ * inline xattrs must starts in erofs_xattr_ibody_header,
+ * for read-only fs, no need to introduce h_refcount
+ */
+struct erofs_xattr_ibody_header {
+ __le32 h_checksum;
+ __u8 h_shared_count;
+ __u8 h_reserved[7];
+ __le32 h_shared_xattrs[0]; /* shared xattr id array */
+} __packed;
+
+/* Name indexes */
+#define EROFS_XATTR_INDEX_USER 1
+#define EROFS_XATTR_INDEX_POSIX_ACL_ACCESS 2
+#define EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT 3
+#define EROFS_XATTR_INDEX_TRUSTED 4
+#define EROFS_XATTR_INDEX_LUSTRE 5
+#define EROFS_XATTR_INDEX_SECURITY 6
+
+/* xattr entry (for both inline & shared xattrs) */
+struct erofs_xattr_entry {
+ __u8 e_name_len; /* length of name */
+ __u8 e_name_index; /* attribute name index */
+ __le16 e_value_size; /* size of attribute value */
+ /* followed by e_name and e_value */
+ char e_name[0]; /* attribute name */
+} __packed;
+
+#define ondisk_xattr_ibody_size(count) ({\
+ u32 __count = le16_to_cpu(count); \
+ ((__count) == 0) ? 0 : \
+ sizeof(struct erofs_xattr_ibody_header) + \
+ sizeof(__u32) * ((__count) - 1); })
+
+#define EROFS_XATTR_ALIGN(size) round_up(size, sizeof(struct erofs_xattr_entry))
+#define EROFS_XATTR_ENTRY_SIZE(entry) EROFS_XATTR_ALIGN( \
+ sizeof(struct erofs_xattr_entry) + \
+ (entry)->e_name_len + le16_to_cpu((entry)->e_value_size))
+
+/* have to be aligned with 8 bytes on disk */
+struct erofs_extent_header {
+ __le32 eh_checksum;
+ __le32 eh_reserved[3];
+} __packed;
+
+/*
+ * Z_EROFS Variable-sized Logical Extent cluster type:
+ * 0 - literal (uncompressed) cluster
+ * 1 - compressed cluster (for the head logical cluster)
+ * 2 - compressed cluster (for the other logical clusters)
+ *
+ * In detail,
+ * 0 - literal (uncompressed) cluster,
+ * di_advise = 0
+ * di_clusterofs = the literal data offset of the cluster
+ * di_blkaddr = the blkaddr of the literal cluster
+ *
+ * 1 - compressed cluster (for the head logical cluster)
+ * di_advise = 1
+ * di_clusterofs = the decompressed data offset of the cluster
+ * di_blkaddr = the blkaddr of the compressed cluster
+ *
+ * 2 - compressed cluster (for the other logical clusters)
+ * di_advise = 2
+ * di_clusterofs =
+ * the decompressed data offset in its own head cluster
+ * di_u.delta[0] = distance to its corresponding head cluster
+ * di_u.delta[1] = distance to its corresponding tail cluster
+ * (di_advise could be 0, 1 or 2)
+ */
+#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2
+#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0
+
+struct z_erofs_vle_decompressed_index {
+ __le16 di_advise;
+ /* where to decompress in the head cluster */
+ __le16 di_clusterofs;
+
+ union {
+ /* for the head cluster */
+ __le32 blkaddr;
+ /*
+ * for the rest clusters
+ * eg. for 4k page-sized cluster, maximum 4K*64k = 256M)
+ * [0] - pointing to the head cluster
+ * [1] - pointing to the tail cluster
+ */
+ __le16 delta[2];
+ } di_u __packed; /* 8 bytes */
+} __packed;
+
+#define Z_EROFS_VLE_EXTENT_ALIGN(size) round_up(size, \
+ sizeof(struct z_erofs_vle_decompressed_index))
+
+/* dirent sorts in alphabet order, thus we can do binary search */
+struct erofs_dirent {
+ __le64 nid; /* 0, node number */
+ __le16 nameoff; /* 8, start offset of file name */
+ __u8 file_type; /* 10, file type */
+ __u8 reserved; /* 11, reserved */
+} __packed;
+
+/* file types used in inode_info->flags */
+enum {
+ EROFS_FT_UNKNOWN,
+ EROFS_FT_REG_FILE,
+ EROFS_FT_DIR,
+ EROFS_FT_CHRDEV,
+ EROFS_FT_BLKDEV,
+ EROFS_FT_FIFO,
+ EROFS_FT_SOCK,
+ EROFS_FT_SYMLINK,
+ EROFS_FT_MAX
+};
+
+#define EROFS_NAME_LEN 255
+
+/* check the EROFS on-disk layout strictly at compile time */
+static inline void erofs_check_ondisk_layout_definitions(void)
+{
+ BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128);
+ BUILD_BUG_ON(sizeof(struct erofs_inode_v1) != 32);
+ BUILD_BUG_ON(sizeof(struct erofs_inode_v2) != 64);
+ BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12);
+ BUILD_BUG_ON(sizeof(struct erofs_xattr_entry) != 4);
+ BUILD_BUG_ON(sizeof(struct erofs_extent_header) != 16);
+ BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8);
+ BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12);
+}
+
+#endif
+
diff --git a/drivers/staging/erofs/include/linux/tagptr.h b/drivers/staging/erofs/include/linux/tagptr.h
new file mode 100644
index 000000000000..ccd106dbd48e
--- /dev/null
+++ b/drivers/staging/erofs/include/linux/tagptr.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Tagged pointer implementation
+ *
+ * Copyright (C) 2018 Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef _LINUX_TAGPTR_H
+#define _LINUX_TAGPTR_H
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+
+/*
+ * the name of tagged pointer types are tagptr{1, 2, 3...}_t
+ * avoid directly using the internal structs __tagptr{1, 2, 3...}
+ */
+#define __MAKE_TAGPTR(n) \
+typedef struct __tagptr##n { \
+ uintptr_t v; \
+} tagptr##n##_t;
+
+__MAKE_TAGPTR(1)
+__MAKE_TAGPTR(2)
+__MAKE_TAGPTR(3)
+__MAKE_TAGPTR(4)
+
+#undef __MAKE_TAGPTR
+
+extern void __compiletime_error("bad tagptr tags")
+ __bad_tagptr_tags(void);
+
+extern void __compiletime_error("bad tagptr type")
+ __bad_tagptr_type(void);
+
+/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */
+#define __tagptr_mask_1(ptr, n) \
+ __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \
+ (1UL << (n)) - 1 :
+
+#define __tagptr_mask(ptr) (\
+ __tagptr_mask_1(ptr, 1) ( \
+ __tagptr_mask_1(ptr, 2) ( \
+ __tagptr_mask_1(ptr, 3) ( \
+ __tagptr_mask_1(ptr, 4) ( \
+ __bad_tagptr_type(), 0)))))
+
+/* generate a tagged pointer from a raw value */
+#define tagptr_init(type, val) \
+ ((typeof(type)){ .v = (uintptr_t)(val) })
+
+/*
+ * directly cast a tagged pointer to the native pointer type, which
+ * could be used for backward compatibility of existing code.
+ */
+#define tagptr_cast_ptr(tptr) ((void *)(tptr).v)
+
+/* encode tagged pointers */
+#define tagptr_fold(type, ptr, _tags) ({ \
+ const typeof(_tags) tags = (_tags); \
+ if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \
+ __bad_tagptr_tags(); \
+tagptr_init(type, (uintptr_t)(ptr) | tags); })
+
+/* decode tagged pointers */
+#define tagptr_unfold_ptr(tptr) \
+ ((void *)((tptr).v & ~__tagptr_mask(tptr)))
+
+#define tagptr_unfold_tags(tptr) \
+ ((tptr).v & __tagptr_mask(tptr))
+
+/* operations for the tagger pointer */
+#define tagptr_eq(_tptr1, _tptr2) ({ \
+ typeof(_tptr1) tptr1 = (_tptr1); \
+ typeof(_tptr2) tptr2 = (_tptr2); \
+ (void)(&tptr1 == &tptr2); \
+(tptr1).v == (tptr2).v; })
+
+/* lock-free CAS operation */
+#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \
+ typeof(_ptptr) ptptr = (_ptptr); \
+ typeof(_o) o = (_o); \
+ typeof(_n) n = (_n); \
+ (void)(&o == &n); \
+ (void)(&o == ptptr); \
+tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); })
+
+/* wrap WRITE_ONCE if atomic update is needed */
+#define tagptr_replace_tags(_ptptr, tags) ({ \
+ typeof(_ptptr) ptptr = (_ptptr); \
+ *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \
+*ptptr; })
+
+#define tagptr_set_tags(_ptptr, _tags) ({ \
+ typeof(_ptptr) ptptr = (_ptptr); \
+ const typeof(_tags) tags = (_tags); \
+ if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
+ __bad_tagptr_tags(); \
+ ptptr->v |= tags; \
+*ptptr; })
+
+#define tagptr_clear_tags(_ptptr, _tags) ({ \
+ typeof(_ptptr) ptptr = (_ptptr); \
+ const typeof(_tags) tags = (_tags); \
+ if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
+ __bad_tagptr_tags(); \
+ ptptr->v &= ~tags; \
+*ptptr; })
+
+#endif
+
diff --git a/drivers/staging/erofs/include/trace/events/erofs.h b/drivers/staging/erofs/include/trace/events/erofs.h
new file mode 100644
index 000000000000..5aead93a762f
--- /dev/null
+++ b/drivers/staging/erofs/include/trace/events/erofs.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM erofs
+
+#if !defined(_TRACE_EROFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EROFS_H
+
+#include <linux/tracepoint.h>
+
+#define show_dev(dev) MAJOR(dev), MINOR(dev)
+#define show_dev_nid(entry) show_dev(entry->dev), entry->nid
+
+#define show_file_type(type) \
+ __print_symbolic(type, \
+ { 0, "FILE" }, \
+ { 1, "DIR" })
+
+#define show_map_flags(flags) __print_flags(flags, "|", \
+ { EROFS_GET_BLOCKS_RAW, "RAW" })
+
+#define show_mflags(flags) __print_flags(flags, "", \
+ { EROFS_MAP_MAPPED, "M" }, \
+ { EROFS_MAP_META, "I" }, \
+ { EROFS_MAP_ZIPPED, "Z" })
+
+TRACE_EVENT(erofs_lookup,
+
+ TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags),
+
+ TP_ARGS(dir, dentry, flags),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(erofs_nid_t, nid )
+ __field(const char *, name )
+ __field(unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dir->i_sb->s_dev;
+ __entry->nid = EROFS_V(dir)->nid;
+ __entry->name = dentry->d_name.name;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x",
+ show_dev_nid(__entry),
+ __entry->name,
+ __entry->flags)
+);
+
+TRACE_EVENT(erofs_fill_inode,
+ TP_PROTO(struct inode *inode, int isdir),
+ TP_ARGS(inode, isdir),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(erofs_nid_t, nid )
+ __field(erofs_blk_t, blkaddr )
+ __field(unsigned int, ofs )
+ __field(int, isdir )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->nid = EROFS_V(inode)->nid;
+ __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid));
+ __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid));
+ __entry->isdir = isdir;
+ ),
+
+ TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d",
+ show_dev_nid(__entry),
+ __entry->blkaddr, __entry->ofs,
+ __entry->isdir)
+);
+
+TRACE_EVENT(erofs_readpage,
+
+ TP_PROTO(struct page *page, bool raw),
+
+ TP_ARGS(page, raw),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(erofs_nid_t, nid )
+ __field(int, dir )
+ __field(pgoff_t, index )
+ __field(int, uptodate)
+ __field(bool, raw )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = page->mapping->host->i_sb->s_dev;
+ __entry->nid = EROFS_V(page->mapping->host)->nid;
+ __entry->dir = S_ISDIR(page->mapping->host->i_mode);
+ __entry->index = page->index;
+ __entry->uptodate = PageUptodate(page);
+ __entry->raw = raw;
+ ),
+
+ TP_printk("dev = (%d,%d), nid = %llu, %s, index = %lu, uptodate = %d "
+ "raw = %d",
+ show_dev_nid(__entry),
+ show_file_type(__entry->dir),
+ (unsigned long)__entry->index,
+ __entry->uptodate,
+ __entry->raw)
+);
+
+TRACE_EVENT(erofs_readpages,
+
+ TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage,
+ bool raw),
+
+ TP_ARGS(inode, page, nrpage, raw),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(erofs_nid_t, nid )
+ __field(pgoff_t, start )
+ __field(unsigned int, nrpage )
+ __field(bool, raw )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->nid = EROFS_V(inode)->nid;
+ __entry->start = page->index;
+ __entry->nrpage = nrpage;
+ __entry->raw = raw;
+ ),
+
+ TP_printk("dev = (%d,%d), nid = %llu, start = %lu nrpage = %u raw = %d",
+ show_dev_nid(__entry),
+ (unsigned long)__entry->start,
+ __entry->nrpage,
+ __entry->raw)
+);
+
+DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
+ TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
+ unsigned int flags),
+
+ TP_ARGS(inode, map, flags),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( erofs_nid_t, nid )
+ __field( erofs_off_t, la )
+ __field( u64, llen )
+ __field( unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->nid = EROFS_V(inode)->nid;
+ __entry->la = map->m_la;
+ __entry->llen = map->m_llen;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev = (%d,%d), nid = %llu, la %llu llen %llu flags %s",
+ show_dev_nid(__entry),
+ __entry->la, __entry->llen, show_map_flags(__entry->flags))
+);
+
+DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter,
+ TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
+ unsigned flags),
+
+ TP_ARGS(inode, map, flags)
+);
+
+DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
+ TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
+ unsigned int flags, int ret),
+
+ TP_ARGS(inode, map, flags, ret),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( erofs_nid_t, nid )
+ __field( unsigned int, flags )
+ __field( erofs_off_t, la )
+ __field( erofs_off_t, pa )
+ __field( u64, llen )
+ __field( u64, plen )
+ __field( unsigned int, mflags )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->nid = EROFS_V(inode)->nid;
+ __entry->flags = flags;
+ __entry->la = map->m_la;
+ __entry->pa = map->m_pa;
+ __entry->llen = map->m_llen;
+ __entry->plen = map->m_plen;
+ __entry->mflags = map->m_flags;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), nid = %llu, flags %s "
+ "la %llu pa %llu llen %llu plen %llu mflags %s ret %d",
+ show_dev_nid(__entry), show_map_flags(__entry->flags),
+ __entry->la, __entry->pa, __entry->llen, __entry->plen,
+ show_mflags(__entry->mflags), __entry->ret)
+);
+
+DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit,
+ TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
+ unsigned flags, int ret),
+
+ TP_ARGS(inode, map, flags, ret)
+);
+
+TRACE_EVENT(erofs_destroy_inode,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( erofs_nid_t, nid )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->nid = EROFS_V(inode)->nid;
+ ),
+
+ TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry))
+);
+
+#endif /* _TRACE_EROFS_H */
+
+ /* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
new file mode 100644
index 000000000000..fbf6ff25cd1b
--- /dev/null
+++ b/drivers/staging/erofs/inode.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/staging/erofs/inode.c
+ *
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#include "xattr.h"
+
+#include <trace/events/erofs.h>
+
+/* no locking */
+static int read_inode(struct inode *inode, void *data)
+{
+ struct erofs_vnode *vi = EROFS_V(inode);
+ struct erofs_inode_v1 *v1 = data;
+ const unsigned advise = le16_to_cpu(v1->i_advise);
+
+ vi->data_mapping_mode = __inode_data_mapping(advise);
+
+ if (unlikely(vi->data_mapping_mode >= EROFS_INODE_LAYOUT_MAX)) {
+ errln("unknown data mapping mode %u of nid %llu",
+ vi->data_mapping_mode, vi->nid);
+ DBG_BUGON(1);
+ return -EIO;
+ }
+
+ if (__inode_version(advise) == EROFS_INODE_LAYOUT_V2) {
+ struct erofs_inode_v2 *v2 = data;
+
+ vi->inode_isize = sizeof(struct erofs_inode_v2);
+ vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount);
+
+ inode->i_mode = le16_to_cpu(v2->i_mode);
+ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)) {
+ vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr);
+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+ inode->i_rdev =
+ new_decode_dev(le32_to_cpu(v2->i_u.rdev));
+ } else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+ inode->i_rdev = 0;
+ } else {
+ return -EIO;
+ }
+
+ i_uid_write(inode, le32_to_cpu(v2->i_uid));
+ i_gid_write(inode, le32_to_cpu(v2->i_gid));
+ set_nlink(inode, le32_to_cpu(v2->i_nlink));
+
+ /* ns timestamp */
+ inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
+ le64_to_cpu(v2->i_ctime);
+ inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
+ le32_to_cpu(v2->i_ctime_nsec);
+
+ inode->i_size = le64_to_cpu(v2->i_size);
+ } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) {
+ struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
+
+ vi->inode_isize = sizeof(struct erofs_inode_v1);
+ vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount);
+
+ inode->i_mode = le16_to_cpu(v1->i_mode);
+ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)) {
+ vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr);
+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+ inode->i_rdev =
+ new_decode_dev(le32_to_cpu(v1->i_u.rdev));
+ } else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+ inode->i_rdev = 0;
+ } else {
+ return -EIO;
+ }
+
+ i_uid_write(inode, le16_to_cpu(v1->i_uid));
+ i_gid_write(inode, le16_to_cpu(v1->i_gid));
+ set_nlink(inode, le16_to_cpu(v1->i_nlink));
+
+ /* use build time to derive all file time */
+ inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
+ sbi->build_time;
+ inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
+ sbi->build_time_nsec;
+
+ inode->i_size = le32_to_cpu(v1->i_size);
+ } else {
+ errln("unsupported on-disk inode version %u of nid %llu",
+ __inode_version(advise), vi->nid);
+ DBG_BUGON(1);
+ return -EIO;
+ }
+
+ /* measure inode.i_blocks as the generic filesystem */
+ inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
+ return 0;
+}
+
+/*
+ * try_lock can be required since locking order is:
+ * file data(fs_inode)
+ * meta(bd_inode)
+ * but the majority of the callers is "iget",
+ * in that case we are pretty sure no deadlock since
+ * no data operations exist. However I tend to
+ * try_lock since it takes no much overhead and
+ * will success immediately.
+ */
+static int fill_inline_data(struct inode *inode, void *data, unsigned m_pofs)
+{
+ struct erofs_vnode *vi = EROFS_V(inode);
+ struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+ int mode = vi->data_mapping_mode;
+
+ DBG_BUGON(mode >= EROFS_INODE_LAYOUT_MAX);
+
+ /* should be inode inline C */
+ if (mode != EROFS_INODE_LAYOUT_INLINE)
+ return 0;
+
+ /* fast symlink (following ext4) */
+ if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) {
+ char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL);
+
+ if (unlikely(lnk == NULL))
+ return -ENOMEM;
+
+ m_pofs += vi->inode_isize + vi->xattr_isize;
+ BUG_ON(m_pofs + inode->i_size > PAGE_SIZE);
+
+ /* get in-page inline data */
+ memcpy(lnk, data + m_pofs, inode->i_size);
+ lnk[inode->i_size] = '\0';
+
+ inode->i_link = lnk;
+ set_inode_fast_symlink(inode);
+ }
+ return -EAGAIN;
+}
+
+static int fill_inode(struct inode *inode, int isdir)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
+ struct erofs_vnode *vi = EROFS_V(inode);
+ struct page *page;
+ void *data;
+ int err;
+ erofs_blk_t blkaddr;
+ unsigned ofs;
+
+ trace_erofs_fill_inode(inode, isdir);
+
+ blkaddr = erofs_blknr(iloc(sbi, vi->nid));
+ ofs = erofs_blkoff(iloc(sbi, vi->nid));
+
+ debugln("%s, reading inode nid %llu at %u of blkaddr %u",
+ __func__, vi->nid, ofs, blkaddr);
+
+ page = erofs_get_meta_page(inode->i_sb, blkaddr, isdir);
+
+ if (IS_ERR(page)) {
+ errln("failed to get inode (nid: %llu) page, err %ld",
+ vi->nid, PTR_ERR(page));
+ return PTR_ERR(page);
+ }
+
+ BUG_ON(!PageUptodate(page));
+ data = page_address(page);
+
+ err = read_inode(inode, data + ofs);
+ if (!err) {
+ /* setup the new inode */
+ if (S_ISREG(inode->i_mode)) {
+#ifdef CONFIG_EROFS_FS_XATTR
+ if (vi->xattr_isize)
+ inode->i_op = &erofs_generic_xattr_iops;
+#endif
+ inode->i_fop = &generic_ro_fops;
+ } else if (S_ISDIR(inode->i_mode)) {
+ inode->i_op =
+#ifdef CONFIG_EROFS_FS_XATTR
+ vi->xattr_isize ? &erofs_dir_xattr_iops :
+#endif
+ &erofs_dir_iops;
+ inode->i_fop = &erofs_dir_fops;
+ } else if (S_ISLNK(inode->i_mode)) {
+ /* by default, page_get_link is used for symlink */
+ inode->i_op =
+#ifdef CONFIG_EROFS_FS_XATTR
+ &erofs_symlink_xattr_iops,
+#else
+ &page_symlink_inode_operations;
+#endif
+ inode_nohighmem(inode);
+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+#ifdef CONFIG_EROFS_FS_XATTR
+ inode->i_op = &erofs_special_inode_operations;
+#endif
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ } else {
+ err = -EIO;
+ goto out_unlock;
+ }
+
+ if (is_inode_layout_compression(inode)) {
+#ifdef CONFIG_EROFS_FS_ZIP
+ inode->i_mapping->a_ops =
+ &z_erofs_vle_normalaccess_aops;
+#else
+ err = -ENOTSUPP;
+#endif
+ goto out_unlock;
+ }
+
+ inode->i_mapping->a_ops = &erofs_raw_access_aops;
+
+ /* fill last page if inline data is available */
+ fill_inline_data(inode, data, ofs);
+ }
+
+out_unlock:
+ unlock_page(page);
+ put_page(page);
+ return err;
+}
+
+struct inode *erofs_iget(struct super_block *sb,
+ erofs_nid_t nid, bool isdir)
+{
+ struct inode *inode = iget_locked(sb, nid);
+
+ if (unlikely(inode == NULL))
+ return ERR_PTR(-ENOMEM);
+
+ if (inode->i_state & I_NEW) {
+ int err;
+ struct erofs_vnode *vi = EROFS_V(inode);
+ vi->nid = nid;
+
+ err = fill_inode(inode, isdir);
+ if (likely(!err))
+ unlock_new_inode(inode);
+ else {
+ iget_failed(inode);
+ inode = ERR_PTR(err);
+ }
+ }
+ return inode;
+}
+
+#ifdef CONFIG_EROFS_FS_XATTR
+const struct inode_operations erofs_generic_xattr_iops = {
+ .listxattr = erofs_listxattr,
+};
+#endif
+
+#ifdef CONFIG_EROFS_FS_XATTR
+const struct inode_operations erofs_symlink_xattr_iops = {
+ .get_link = page_get_link,
+ .listxattr = erofs_listxattr,
+};
+#endif
+
+const struct inode_operations erofs_special_inode_operations = {
+#ifdef CONFIG_EROFS_FS_XATTR
+ .listxattr = erofs_listxattr,
+#endif
+};
+
+#ifdef CONFIG_EROFS_FS_XATTR
+const struct inode_operations erofs_fast_symlink_xattr_iops = {
+ .get_link = simple_get_link,
+ .listxattr = erofs_listxattr,
+};
+#endif
+
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
new file mode 100644
index 000000000000..367b39fe46e5
--- /dev/null
+++ b/drivers/staging/erofs/internal.h
@@ -0,0 +1,556 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * linux/drivers/staging/erofs/internal.h
+ *
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#ifndef __INTERNAL_H
+#define __INTERNAL_H
+
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/bio.h>
+#include <linux/buffer_head.h>
+#include <linux/cleancache.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "erofs_fs.h"
+
+/* redefine pr_fmt "erofs: " */
+#undef pr_fmt
+#define pr_fmt(fmt) "erofs: " fmt
+
+#define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__)
+#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__)
+#ifdef CONFIG_EROFS_FS_DEBUG
+#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
+
+#define dbg_might_sleep might_sleep
+#define DBG_BUGON BUG_ON
+#else
+#define debugln(x, ...) ((void)0)
+
+#define dbg_might_sleep() ((void)0)
+#define DBG_BUGON(...) ((void)0)
+#endif
+
+#ifdef CONFIG_EROFS_FAULT_INJECTION
+enum {
+ FAULT_KMALLOC,
+ FAULT_MAX,
+};
+
+extern char *erofs_fault_name[FAULT_MAX];
+#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
+
+struct erofs_fault_info {
+ atomic_t inject_ops;
+ unsigned int inject_rate;
+ unsigned int inject_type;
+};
+#endif
+
+#ifdef CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR
+#define EROFS_FS_ZIP_CACHE_LVL (2)
+#elif defined(EROFS_FS_ZIP_CACHE_UNIPOLAR)
+#define EROFS_FS_ZIP_CACHE_LVL (1)
+#else
+#define EROFS_FS_ZIP_CACHE_LVL (0)
+#endif
+
+#if (!defined(EROFS_FS_HAS_MANAGED_CACHE) && (EROFS_FS_ZIP_CACHE_LVL > 0))
+#define EROFS_FS_HAS_MANAGED_CACHE
+#endif
+
+/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
+#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
+
+typedef u64 erofs_nid_t;
+
+struct erofs_sb_info {
+ /* list for all registered superblocks, mainly for shrinker */
+ struct list_head list;
+ struct mutex umount_mutex;
+
+ u32 blocks;
+ u32 meta_blkaddr;
+#ifdef CONFIG_EROFS_FS_XATTR
+ u32 xattr_blkaddr;
+#endif
+
+ /* inode slot unit size in bit shift */
+ unsigned char islotbits;
+#ifdef CONFIG_EROFS_FS_ZIP
+ /* cluster size in bit shift */
+ unsigned char clusterbits;
+
+ /* the dedicated workstation for compression */
+ struct radix_tree_root workstn_tree;
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ struct inode *managed_cache;
+#endif
+
+#endif
+
+ u32 build_time_nsec;
+ u64 build_time;
+
+ /* what we really care is nid, rather than ino.. */
+ erofs_nid_t root_nid;
+ /* used for statfs, f_files - f_favail */
+ u64 inos;
+
+ u8 uuid[16]; /* 128-bit uuid for volume */
+ u8 volume_name[16]; /* volume name */
+ char *dev_name;
+
+ unsigned int mount_opt;
+ unsigned int shrinker_run_no;
+
+#ifdef CONFIG_EROFS_FAULT_INJECTION
+ struct erofs_fault_info fault_info; /* For fault injection */
+#endif
+};
+
+#ifdef CONFIG_EROFS_FAULT_INJECTION
+#define erofs_show_injection_info(type) \
+ infoln("inject %s in %s of %pS", erofs_fault_name[type], \
+ __func__, __builtin_return_address(0))
+
+static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
+{
+ struct erofs_fault_info *ffi = &sbi->fault_info;
+
+ if (!ffi->inject_rate)
+ return false;
+
+ if (!IS_FAULT_SET(ffi, type))
+ return false;
+
+ atomic_inc(&ffi->inject_ops);
+ if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
+ atomic_set(&ffi->inject_ops, 0);
+ return true;
+ }
+ return false;
+}
+#endif
+
+static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
+ size_t size, gfp_t flags)
+{
+#ifdef CONFIG_EROFS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_KMALLOC)) {
+ erofs_show_injection_info(FAULT_KMALLOC);
+ return NULL;
+ }
+#endif
+ return kmalloc(size, flags);
+}
+
+#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
+#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
+
+/* Mount flags set via mount options or defaults */
+#define EROFS_MOUNT_XATTR_USER 0x00000010
+#define EROFS_MOUNT_POSIX_ACL 0x00000020
+#define EROFS_MOUNT_FAULT_INJECTION 0x00000040
+
+#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
+#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option)
+#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
+
+#ifdef CONFIG_EROFS_FS_ZIP
+#define erofs_workstn_lock(sbi) xa_lock(&(sbi)->workstn_tree)
+#define erofs_workstn_unlock(sbi) xa_unlock(&(sbi)->workstn_tree)
+
+/* basic unit of the workstation of a super_block */
+struct erofs_workgroup {
+ /* the workgroup index in the workstation */
+ pgoff_t index;
+
+ /* overall workgroup reference count */
+ atomic_t refcount;
+};
+
+#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
+
+static inline bool erofs_workgroup_try_to_freeze(
+ struct erofs_workgroup *grp, int v)
+{
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+ if (v != atomic_cmpxchg(&grp->refcount,
+ v, EROFS_LOCKED_MAGIC))
+ return false;
+ preempt_disable();
+#else
+ preempt_disable();
+ if (atomic_read(&grp->refcount) != v) {
+ preempt_enable();
+ return false;
+ }
+#endif
+ return true;
+}
+
+static inline void erofs_workgroup_unfreeze(
+ struct erofs_workgroup *grp, int v)
+{
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+ atomic_set(&grp->refcount, v);
+#endif
+ preempt_enable();
+}
+
+static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
+{
+ const int locked = (int)EROFS_LOCKED_MAGIC;
+ int o;
+
+repeat:
+ o = atomic_read(&grp->refcount);
+
+ /* spin if it is temporarily locked at the reclaim path */
+ if (unlikely(o == locked)) {
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+ do
+ cpu_relax();
+ while (atomic_read(&grp->refcount) == locked);
+#endif
+ goto repeat;
+ }
+
+ if (unlikely(o <= 0))
+ return -1;
+
+ if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o))
+ goto repeat;
+
+ *ocnt = o;
+ return 0;
+}
+
+#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
+
+extern int erofs_workgroup_put(struct erofs_workgroup *grp);
+
+extern struct erofs_workgroup *erofs_find_workgroup(
+ struct super_block *sb, pgoff_t index, bool *tag);
+
+extern int erofs_register_workgroup(struct super_block *sb,
+ struct erofs_workgroup *grp, bool tag);
+
+extern unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
+ unsigned long nr_shrink, bool cleanup);
+
+static inline void erofs_workstation_cleanup_all(struct super_block *sb)
+{
+ erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true);
+}
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+#define EROFS_UNALLOCATED_CACHED_PAGE ((void *)0x5F0EF00D)
+
+extern int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *egrp);
+extern int erofs_try_to_free_cached_page(struct address_space *mapping,
+ struct page *page);
+#endif
+
+#endif
+
+/* we strictly follow PAGE_SIZE and no buffer head yet */
+#define LOG_BLOCK_SIZE PAGE_SHIFT
+
+#undef LOG_SECTORS_PER_BLOCK
+#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
+
+#undef SECTORS_PER_BLOCK
+#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
+
+#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
+
+#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
+#error erofs cannot be used in this platform
+#endif
+
+#define ROOT_NID(sb) ((sb)->root_nid)
+
+#ifdef CONFIG_EROFS_FS_ZIP
+/* hard limit of pages per compressed cluster */
+#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
+
+/* page count of a compressed cluster */
+#define erofs_clusterpages(sbi) ((1 << (sbi)->clusterbits) / PAGE_SIZE)
+#endif
+
+typedef u64 erofs_off_t;
+
+/* data type for filesystem-wide blocks number */
+typedef u32 erofs_blk_t;
+
+#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
+#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
+#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
+
+static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
+{
+ return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
+}
+
+#define inode_set_inited_xattr(inode) (EROFS_V(inode)->flags |= 1)
+#define inode_has_inited_xattr(inode) (EROFS_V(inode)->flags & 1)
+
+struct erofs_vnode {
+ erofs_nid_t nid;
+ unsigned int flags;
+
+ unsigned char data_mapping_mode;
+ /* inline size in bytes */
+ unsigned char inode_isize;
+ unsigned short xattr_isize;
+
+ unsigned xattr_shared_count;
+ unsigned *xattr_shared_xattrs;
+
+ erofs_blk_t raw_blkaddr;
+
+ /* the corresponding vfs inode */
+ struct inode vfs_inode;
+};
+
+#define EROFS_V(ptr) \
+ container_of(ptr, struct erofs_vnode, vfs_inode)
+
+#define __inode_advise(x, bit, bits) \
+ (((x) >> (bit)) & ((1 << (bits)) - 1))
+
+#define __inode_version(advise) \
+ __inode_advise(advise, EROFS_I_VERSION_BIT, \
+ EROFS_I_VERSION_BITS)
+
+#define __inode_data_mapping(advise) \
+ __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\
+ EROFS_I_DATA_MAPPING_BITS)
+
+static inline unsigned long inode_datablocks(struct inode *inode)
+{
+ /* since i_size cannot be changed */
+ return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
+}
+
+static inline bool is_inode_layout_plain(struct inode *inode)
+{
+ return EROFS_V(inode)->data_mapping_mode == EROFS_INODE_LAYOUT_PLAIN;
+}
+
+static inline bool is_inode_layout_compression(struct inode *inode)
+{
+ return EROFS_V(inode)->data_mapping_mode ==
+ EROFS_INODE_LAYOUT_COMPRESSION;
+}
+
+static inline bool is_inode_layout_inline(struct inode *inode)
+{
+ return EROFS_V(inode)->data_mapping_mode == EROFS_INODE_LAYOUT_INLINE;
+}
+
+extern const struct super_operations erofs_sops;
+extern const struct inode_operations erofs_dir_iops;
+extern const struct file_operations erofs_dir_fops;
+
+extern const struct address_space_operations erofs_raw_access_aops;
+#ifdef CONFIG_EROFS_FS_ZIP
+extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
+#endif
+
+/*
+ * Logical to physical block mapping, used by erofs_map_blocks()
+ *
+ * Different with other file systems, it is used for 2 access modes:
+ *
+ * 1) RAW access mode:
+ *
+ * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
+ * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
+ *
+ * Note that m_lblk in the RAW access mode refers to the number of
+ * the compressed ondisk block rather than the uncompressed
+ * in-memory block for the compressed file.
+ *
+ * m_pofs equals to m_lofs except for the inline data page.
+ *
+ * 2) Normal access mode:
+ *
+ * If the inode is not compressed, it has no difference with
+ * the RAW access mode. However, if the inode is compressed,
+ * users should pass a valid (m_lblk, m_lofs) pair, and get
+ * the needed m_pblk, m_pofs, m_len to get the compressed data
+ * and the updated m_lblk, m_lofs which indicates the start
+ * of the corresponding uncompressed data in the file.
+ */
+enum {
+ BH_Zipped = BH_PrivateStart,
+};
+
+/* Has a disk mapping */
+#define EROFS_MAP_MAPPED (1 << BH_Mapped)
+/* Located in metadata (could be copied from bd_inode) */
+#define EROFS_MAP_META (1 << BH_Meta)
+/* The extent has been compressed */
+#define EROFS_MAP_ZIPPED (1 << BH_Zipped)
+
+struct erofs_map_blocks {
+ erofs_off_t m_pa, m_la;
+ u64 m_plen, m_llen;
+
+ unsigned int m_flags;
+};
+
+/* Flags used by erofs_map_blocks() */
+#define EROFS_GET_BLOCKS_RAW 0x0001
+
+/* data.c */
+static inline struct bio *prepare_bio(
+ struct super_block *sb,
+ erofs_blk_t blkaddr, unsigned nr_pages,
+ bio_end_io_t endio)
+{
+ gfp_t gfp = GFP_NOIO;
+ struct bio *bio = bio_alloc(gfp, nr_pages);
+
+ if (unlikely(bio == NULL) &&
+ (current->flags & PF_MEMALLOC)) {
+ do {
+ nr_pages /= 2;
+ if (unlikely(!nr_pages)) {
+ bio = bio_alloc(gfp | __GFP_NOFAIL, 1);
+ BUG_ON(bio == NULL);
+ break;
+ }
+ bio = bio_alloc(gfp, nr_pages);
+ } while (bio == NULL);
+ }
+
+ bio->bi_end_io = endio;
+ bio_set_dev(bio, sb->s_bdev);
+ bio->bi_iter.bi_sector = blkaddr << LOG_SECTORS_PER_BLOCK;
+ return bio;
+}
+
+static inline void __submit_bio(struct bio *bio, unsigned op, unsigned op_flags)
+{
+ bio_set_op_attrs(bio, op, op_flags);
+ submit_bio(bio);
+}
+
+extern struct page *erofs_get_meta_page(struct super_block *sb,
+ erofs_blk_t blkaddr, bool prio);
+extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
+extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *,
+ struct page **, int);
+
+struct erofs_map_blocks_iter {
+ struct erofs_map_blocks map;
+ struct page *mpage;
+};
+
+
+static inline struct page *erofs_get_inline_page(struct inode *inode,
+ erofs_blk_t blkaddr)
+{
+ return erofs_get_meta_page(inode->i_sb,
+ blkaddr, S_ISDIR(inode->i_mode));
+}
+
+/* inode.c */
+extern struct inode *erofs_iget(struct super_block *sb,
+ erofs_nid_t nid, bool dir);
+
+/* dir.c */
+int erofs_namei(struct inode *dir, struct qstr *name,
+ erofs_nid_t *nid, unsigned *d_type);
+
+/* xattr.c */
+#ifdef CONFIG_EROFS_FS_XATTR
+extern const struct xattr_handler *erofs_xattr_handlers[];
+#endif
+
+/* symlink */
+#ifdef CONFIG_EROFS_FS_XATTR
+extern const struct inode_operations erofs_symlink_xattr_iops;
+extern const struct inode_operations erofs_fast_symlink_xattr_iops;
+extern const struct inode_operations erofs_special_inode_operations;
+#endif
+
+static inline void set_inode_fast_symlink(struct inode *inode)
+{
+#ifdef CONFIG_EROFS_FS_XATTR
+ inode->i_op = &erofs_fast_symlink_xattr_iops;
+#else
+ inode->i_op = &simple_symlink_inode_operations;
+#endif
+}
+
+static inline bool is_inode_fast_symlink(struct inode *inode)
+{
+#ifdef CONFIG_EROFS_FS_XATTR
+ return inode->i_op == &erofs_fast_symlink_xattr_iops;
+#else
+ return inode->i_op == &simple_symlink_inode_operations;
+#endif
+}
+
+static inline void *erofs_vmap(struct page **pages, unsigned int count)
+{
+#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
+ int i = 0;
+
+ while (1) {
+ void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
+ /* retry two more times (totally 3 times) */
+ if (addr != NULL || ++i >= 3)
+ return addr;
+ vm_unmap_aliases();
+ }
+ return NULL;
+#else
+ return vmap(pages, count, VM_MAP, PAGE_KERNEL);
+#endif
+}
+
+static inline void erofs_vunmap(const void *mem, unsigned int count)
+{
+#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
+ vm_unmap_ram(mem, count);
+#else
+ vunmap(mem);
+#endif
+}
+
+/* utils.c */
+extern struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
+
+extern void erofs_register_super(struct super_block *sb);
+extern void erofs_unregister_super(struct super_block *sb);
+
+extern unsigned long erofs_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc);
+extern unsigned long erofs_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc);
+
+#ifndef lru_to_page
+#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
+#endif
+
+#endif
+
diff --git a/drivers/staging/erofs/lz4defs.h b/drivers/staging/erofs/lz4defs.h
new file mode 100644
index 000000000000..00a0b58a0871
--- /dev/null
+++ b/drivers/staging/erofs/lz4defs.h
@@ -0,0 +1,227 @@
+#ifndef __LZ4DEFS_H__
+#define __LZ4DEFS_H__
+
+/*
+ * lz4defs.h -- common and architecture specific defines for the kernel usage
+
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011-2016, Yann Collet.
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
+ *
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/string.h> /* memset, memcpy */
+
+#define FORCE_INLINE __always_inline
+
+/*-************************************
+ * Basic Types
+ **************************************/
+#include <linux/types.h>
+
+typedef uint8_t BYTE;
+typedef uint16_t U16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+typedef uintptr_t uptrval;
+
+/*-************************************
+ * Architecture specifics
+ **************************************/
+#if defined(CONFIG_64BIT)
+#define LZ4_ARCH64 1
+#else
+#define LZ4_ARCH64 0
+#endif
+
+#if defined(__LITTLE_ENDIAN)
+#define LZ4_LITTLE_ENDIAN 1
+#else
+#define LZ4_LITTLE_ENDIAN 0
+#endif
+
+/*-************************************
+ * Constants
+ **************************************/
+#define MINMATCH 4
+
+#define WILDCOPYLENGTH 8
+#define LASTLITERALS 5
+#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
+
+/* Increase this value ==> compression run slower on incompressible data */
+#define LZ4_SKIPTRIGGER 6
+
+#define HASH_UNIT sizeof(size_t)
+
+#define KB (1 << 10)
+#define MB (1 << 20)
+#define GB (1U << 30)
+
+#define MAXD_LOG 16
+#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
+#define STEPSIZE sizeof(size_t)
+
+#define ML_BITS 4
+#define ML_MASK ((1U << ML_BITS) - 1)
+#define RUN_BITS (8 - ML_BITS)
+#define RUN_MASK ((1U << RUN_BITS) - 1)
+
+/*-************************************
+ * Reading and writing into memory
+ **************************************/
+static FORCE_INLINE U16 LZ4_read16(const void *ptr)
+{
+ return get_unaligned((const U16 *)ptr);
+}
+
+static FORCE_INLINE U32 LZ4_read32(const void *ptr)
+{
+ return get_unaligned((const U32 *)ptr);
+}
+
+static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr)
+{
+ return get_unaligned((const size_t *)ptr);
+}
+
+static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value)
+{
+ put_unaligned(value, (U16 *)memPtr);
+}
+
+static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value)
+{
+ put_unaligned(value, (U32 *)memPtr);
+}
+
+static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr)
+{
+ return get_unaligned_le16(memPtr);
+}
+
+static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
+{
+ return put_unaligned_le16(value, memPtr);
+}
+
+static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
+{
+#if LZ4_ARCH64
+ U64 a = get_unaligned((const U64 *)src);
+
+ put_unaligned(a, (U64 *)dst);
+#else
+ U32 a = get_unaligned((const U32 *)src);
+ U32 b = get_unaligned((const U32 *)src + 1);
+
+ put_unaligned(a, (U32 *)dst);
+ put_unaligned(b, (U32 *)dst + 1);
+#endif
+}
+
+/*
+ * customized variant of memcpy,
+ * which can overwrite up to 7 bytes beyond dstEnd
+ */
+static FORCE_INLINE void LZ4_wildCopy(void *dstPtr,
+ const void *srcPtr, void *dstEnd)
+{
+ BYTE *d = (BYTE *)dstPtr;
+ const BYTE *s = (const BYTE *)srcPtr;
+ BYTE *const e = (BYTE *)dstEnd;
+
+ do {
+ LZ4_copy8(d, s);
+ d += 8;
+ s += 8;
+ } while (d < e);
+}
+
+static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val)
+{
+#if LZ4_LITTLE_ENDIAN
+ return __ffs(val) >> 3;
+#else
+ return (BITS_PER_LONG - 1 - __fls(val)) >> 3;
+#endif
+}
+
+static FORCE_INLINE unsigned int LZ4_count(
+ const BYTE *pIn,
+ const BYTE *pMatch,
+ const BYTE *pInLimit)
+{
+ const BYTE *const pStart = pIn;
+
+ while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
+ size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+
+ if (!diff) {
+ pIn += STEPSIZE;
+ pMatch += STEPSIZE;
+ continue;
+ }
+
+ pIn += LZ4_NbCommonBytes(diff);
+
+ return (unsigned int)(pIn - pStart);
+ }
+
+#if LZ4_ARCH64
+ if ((pIn < (pInLimit - 3))
+ && (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
+ pIn += 4;
+ pMatch += 4;
+ }
+#endif
+
+ if ((pIn < (pInLimit - 1))
+ && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
+ pIn += 2;
+ pMatch += 2;
+ }
+
+ if ((pIn < pInLimit) && (*pMatch == *pIn))
+ pIn++;
+
+ return (unsigned int)(pIn - pStart);
+}
+
+typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
+typedef enum { byPtr, byU32, byU16 } tableType_t;
+
+typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
+
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+typedef enum { full = 0, partial = 1 } earlyEnd_directive;
+
+#endif
diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
new file mode 100644
index 000000000000..546a47156101
--- /dev/null
+++ b/drivers/staging/erofs/namei.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/staging/erofs/namei.c
+ *
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#include "internal.h"
+#include "xattr.h"
+
+#include <trace/events/erofs.h>
+
+/* based on the value of qn->len is accurate */
+static inline int dirnamecmp(struct qstr *qn,
+ struct qstr *qd, unsigned *matched)
+{
+ unsigned i = *matched, len = min(qn->len, qd->len);
+loop:
+ if (unlikely(i >= len)) {
+ *matched = i;
+ if (qn->len < qd->len) {
+ /*
+ * actually (qn->len == qd->len)
+ * when qd->name[i] == '\0'
+ */
+ return qd->name[i] == '\0' ? 0 : -1;
+ }
+ return (qn->len > qd->len);
+ }
+
+ if (qn->name[i] != qd->name[i]) {
+ *matched = i;
+ return qn->name[i] > qd->name[i] ? 1 : -1;
+ }
+
+ ++i;
+ goto loop;
+}
+
+static struct erofs_dirent *find_target_dirent(
+ struct qstr *name,
+ u8 *data, int maxsize)
+{
+ unsigned ndirents, head, back;
+ unsigned startprfx, endprfx;
+ struct erofs_dirent *const de = (struct erofs_dirent *)data;
+
+ /* make sure that maxsize is valid */
+ BUG_ON(maxsize < sizeof(struct erofs_dirent));
+
+ ndirents = le16_to_cpu(de->nameoff) / sizeof(*de);
+
+ /* corrupted dir (may be unnecessary...) */
+ BUG_ON(!ndirents);
+
+ head = 0;
+ back = ndirents - 1;
+ startprfx = endprfx = 0;
+
+ while (head <= back) {
+ unsigned mid = head + (back - head) / 2;
+ unsigned nameoff = le16_to_cpu(de[mid].nameoff);
+ unsigned matched = min(startprfx, endprfx);
+
+ struct qstr dname = QSTR_INIT(data + nameoff,
+ unlikely(mid >= ndirents - 1) ?
+ maxsize - nameoff :
+ le16_to_cpu(de[mid + 1].nameoff) - nameoff);
+
+ /* string comparison without already matched prefix */
+ int ret = dirnamecmp(name, &dname, &matched);
+
+ if (unlikely(!ret))
+ return de + mid;
+ else if (ret > 0) {
+ head = mid + 1;
+ startprfx = matched;
+ } else if (unlikely(mid < 1)) /* fix "mid" overflow */
+ break;
+ else {
+ back = mid - 1;
+ endprfx = matched;
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct page *find_target_block_classic(
+ struct inode *dir,
+ struct qstr *name, int *_diff)
+{
+ unsigned startprfx, endprfx;
+ unsigned head, back;
+ struct address_space *const mapping = dir->i_mapping;
+ struct page *candidate = ERR_PTR(-ENOENT);
+
+ startprfx = endprfx = 0;
+ head = 0;
+ back = inode_datablocks(dir) - 1;
+
+ while (head <= back) {
+ unsigned mid = head + (back - head) / 2;
+ struct page *page = read_mapping_page(mapping, mid, NULL);
+
+ if (IS_ERR(page)) {
+exact_out:
+ if (!IS_ERR(candidate)) /* valid candidate */
+ put_page(candidate);
+ return page;
+ } else {
+ int diff;
+ unsigned ndirents, matched;
+ struct qstr dname;
+ struct erofs_dirent *de = kmap_atomic(page);
+ unsigned nameoff = le16_to_cpu(de->nameoff);
+
+ ndirents = nameoff / sizeof(*de);
+
+ /* corrupted dir (should have one entry at least) */
+ BUG_ON(!ndirents || nameoff > PAGE_SIZE);
+
+ matched = min(startprfx, endprfx);
+
+ dname.name = (u8 *)de + nameoff;
+ dname.len = ndirents == 1 ?
+ /* since the rest of the last page is 0 */
+ EROFS_BLKSIZ - nameoff
+ : le16_to_cpu(de[1].nameoff) - nameoff;
+
+ /* string comparison without already matched prefix */
+ diff = dirnamecmp(name, &dname, &matched);
+ kunmap_atomic(de);
+
+ if (unlikely(!diff)) {
+ *_diff = 0;
+ goto exact_out;
+ } else if (diff > 0) {
+ head = mid + 1;
+ startprfx = matched;
+
+ if (likely(!IS_ERR(candidate)))
+ put_page(candidate);
+ candidate = page;
+ } else {
+ put_page(page);
+
+ if (unlikely(mid < 1)) /* fix "mid" overflow */
+ break;
+
+ back = mid - 1;
+ endprfx = matched;
+ }
+ }
+ }
+ *_diff = 1;
+ return candidate;
+}
+
+int erofs_namei(struct inode *dir,
+ struct qstr *name,
+ erofs_nid_t *nid, unsigned *d_type)
+{
+ int diff;
+ struct page *page;
+ u8 *data;
+ struct erofs_dirent *de;
+
+ if (unlikely(!dir->i_size))
+ return -ENOENT;
+
+ diff = 1;
+ page = find_target_block_classic(dir, name, &diff);
+
+ if (unlikely(IS_ERR(page)))
+ return PTR_ERR(page);
+
+ data = kmap_atomic(page);
+ /* the target page has been mapped */
+ de = likely(diff) ?
+ /* since the rest of the last page is 0 */
+ find_target_dirent(name, data, EROFS_BLKSIZ) :
+ (struct erofs_dirent *)data;
+
+ if (likely(!IS_ERR(de))) {
+ *nid = le64_to_cpu(de->nid);
+ *d_type = de->file_type;
+ }
+
+ kunmap_atomic(data);
+ put_page(page);
+
+ return PTR_ERR_OR_ZERO(de);
+}
+
+/* NOTE: i_mutex is already held by vfs */
+static struct dentry *erofs_lookup(struct inode *dir,
+ struct dentry *dentry, unsigned int flags)
+{
+ int err;
+ erofs_nid_t nid;
+ unsigned d_type;
+ struct inode *inode;
+
+ DBG_BUGON(!d_really_is_negative(dentry));
+ /* dentry must be unhashed in lookup, no need to worry about */
+ DBG_BUGON(!d_unhashed(dentry));
+
+ trace_erofs_lookup(dir, dentry, flags);
+
+ /* file name exceeds fs limit */
+ if (unlikely(dentry->d_name.len > EROFS_NAME_LEN))
+ return ERR_PTR(-ENAMETOOLONG);
+
+ /* false uninitialized warnings on gcc 4.8.x */
+ err = erofs_namei(dir, &dentry->d_name, &nid, &d_type);
+
+ if (err == -ENOENT) {
+ /* negative dentry */
+ inode = NULL;
+ goto negative_out;
+ } else if (unlikely(err))
+ return ERR_PTR(err);
+
+ debugln("%s, %s (nid %llu) found, d_type %u", __func__,
+ dentry->d_name.name, nid, d_type);
+
+ inode = erofs_iget(dir->i_sb, nid, d_type == EROFS_FT_DIR);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+negative_out:
+ return d_splice_alias(inode, dentry);
+}
+
+const struct inode_operations erofs_dir_iops = {
+ .lookup = erofs_lookup,
+};
+
+const struct inode_operations erofs_dir_xattr_iops = {
+ .lookup = erofs_lookup,
+#ifdef CONFIG_EROFS_FS_XATTR
+ .listxattr = erofs_listxattr,
+#endif
+};
+
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
new file mode 100644
index 000000000000..1aec509c805f
--- /dev/null
+++ b/drivers/staging/erofs/super.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/staging/erofs/super.c
+ *
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+#include <linux/statfs.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+#include "internal.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/erofs.h>
+
+static struct kmem_cache *erofs_inode_cachep __read_mostly;
+
+static void init_once(void *ptr)
+{
+ struct erofs_vnode *vi = ptr;
+
+ inode_init_once(&vi->vfs_inode);
+}
+
+static int erofs_init_inode_cache(void)
+{
+ erofs_inode_cachep = kmem_cache_create("erofs_inode",
+ sizeof(struct erofs_vnode), 0,
+ SLAB_RECLAIM_ACCOUNT, init_once);
+
+ return erofs_inode_cachep != NULL ? 0 : -ENOMEM;
+}
+
+static void erofs_exit_inode_cache(void)
+{
+ BUG_ON(erofs_inode_cachep == NULL);
+ kmem_cache_destroy(erofs_inode_cachep);
+}
+
+static struct inode *alloc_inode(struct super_block *sb)
+{
+ struct erofs_vnode *vi =
+ kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
+
+ if (vi == NULL)
+ return NULL;
+
+ /* zero out everything except vfs_inode */
+ memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode));
+ return &vi->vfs_inode;
+}
+
+static void i_callback(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+ struct erofs_vnode *vi = EROFS_V(inode);
+
+ /* be careful RCU symlink path (see ext4_inode_info->i_data)! */
+ if (is_inode_fast_symlink(inode))
+ kfree(inode->i_link);
+
+ kfree(vi->xattr_shared_xattrs);
+
+ kmem_cache_free(erofs_inode_cachep, vi);
+}
+
+static void destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, i_callback);
+}
+
+static int superblock_read(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi;
+ struct buffer_head *bh;
+ struct erofs_super_block *layout;
+ unsigned blkszbits;
+ int ret;
+
+ bh = sb_bread(sb, 0);
+
+ if (bh == NULL) {
+ errln("cannot read erofs superblock");
+ return -EIO;
+ }
+
+ sbi = EROFS_SB(sb);
+ layout = (struct erofs_super_block *)((u8 *)bh->b_data
+ + EROFS_SUPER_OFFSET);
+
+ ret = -EINVAL;
+ if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) {
+ errln("cannot find valid erofs superblock");
+ goto out;
+ }
+
+ blkszbits = layout->blkszbits;
+ /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
+ if (unlikely(blkszbits != LOG_BLOCK_SIZE)) {
+ errln("blksize %u isn't supported on this platform",
+ 1 << blkszbits);
+ goto out;
+ }
+
+ sbi->blocks = le32_to_cpu(layout->blocks);
+ sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr);
+#ifdef CONFIG_EROFS_FS_XATTR
+ sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr);
+#endif
+ sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1;
+#ifdef CONFIG_EROFS_FS_ZIP
+ sbi->clusterbits = 12;
+
+ if (1 << (sbi->clusterbits - 12) > Z_EROFS_CLUSTER_MAX_PAGES)
+ errln("clusterbits %u is not supported on this kernel",
+ sbi->clusterbits);
+#endif
+
+ sbi->root_nid = le16_to_cpu(layout->root_nid);
+ sbi->inos = le64_to_cpu(layout->inos);
+
+ sbi->build_time = le64_to_cpu(layout->build_time);
+ sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec);
+
+ memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid));
+ memcpy(sbi->volume_name, layout->volume_name,
+ sizeof(layout->volume_name));
+
+ ret = 0;
+out:
+ brelse(bh);
+ return ret;
+}
+
+#ifdef CONFIG_EROFS_FAULT_INJECTION
+char *erofs_fault_name[FAULT_MAX] = {
+ [FAULT_KMALLOC] = "kmalloc",
+};
+
+static void erofs_build_fault_attr(struct erofs_sb_info *sbi,
+ unsigned int rate)
+{
+ struct erofs_fault_info *ffi = &sbi->fault_info;
+
+ if (rate) {
+ atomic_set(&ffi->inject_ops, 0);
+ ffi->inject_rate = rate;
+ ffi->inject_type = (1 << FAULT_MAX) - 1;
+ } else {
+ memset(ffi, 0, sizeof(struct erofs_fault_info));
+ }
+}
+#endif
+
+static void default_options(struct erofs_sb_info *sbi)
+{
+#ifdef CONFIG_EROFS_FS_XATTR
+ set_opt(sbi, XATTR_USER);
+#endif
+
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ set_opt(sbi, POSIX_ACL);
+#endif
+}
+
+enum {
+ Opt_user_xattr,
+ Opt_nouser_xattr,
+ Opt_acl,
+ Opt_noacl,
+ Opt_fault_injection,
+ Opt_err
+};
+
+static match_table_t erofs_tokens = {
+ {Opt_user_xattr, "user_xattr"},
+ {Opt_nouser_xattr, "nouser_xattr"},
+ {Opt_acl, "acl"},
+ {Opt_noacl, "noacl"},
+ {Opt_fault_injection, "fault_injection=%u"},
+ {Opt_err, NULL}
+};
+
+static int parse_options(struct super_block *sb, char *options)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *p;
+ int arg = 0;
+
+ if (!options)
+ return 0;
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token;
+
+ if (!*p)
+ continue;
+
+ args[0].to = args[0].from = NULL;
+ token = match_token(p, erofs_tokens, args);
+
+ switch (token) {
+#ifdef CONFIG_EROFS_FS_XATTR
+ case Opt_user_xattr:
+ set_opt(EROFS_SB(sb), XATTR_USER);
+ break;
+ case Opt_nouser_xattr:
+ clear_opt(EROFS_SB(sb), XATTR_USER);
+ break;
+#else
+ case Opt_user_xattr:
+ infoln("user_xattr options not supported");
+ break;
+ case Opt_nouser_xattr:
+ infoln("nouser_xattr options not supported");
+ break;
+#endif
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ case Opt_acl:
+ set_opt(EROFS_SB(sb), POSIX_ACL);
+ break;
+ case Opt_noacl:
+ clear_opt(EROFS_SB(sb), POSIX_ACL);
+ break;
+#else
+ case Opt_acl:
+ infoln("acl options not supported");
+ break;
+ case Opt_noacl:
+ infoln("noacl options not supported");
+ break;
+#endif
+ case Opt_fault_injection:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+#ifdef CONFIG_EROFS_FAULT_INJECTION
+ erofs_build_fault_attr(EROFS_SB(sb), arg);
+ set_opt(EROFS_SB(sb), FAULT_INJECTION);
+#else
+ infoln("FAULT_INJECTION was not selected");
+#endif
+ break;
+ default:
+ errln("Unrecognized mount option \"%s\" "
+ "or missing value", p);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+
+static const struct address_space_operations managed_cache_aops;
+
+static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
+{
+ int ret = 1; /* 0 - busy */
+ struct address_space *const mapping = page->mapping;
+
+ BUG_ON(!PageLocked(page));
+ BUG_ON(mapping->a_ops != &managed_cache_aops);
+
+ if (PagePrivate(page))
+ ret = erofs_try_to_free_cached_page(mapping, page);
+
+ return ret;
+}
+
+static void managed_cache_invalidatepage(struct page *page,
+ unsigned int offset, unsigned int length)
+{
+ const unsigned int stop = length + offset;
+
+ BUG_ON(!PageLocked(page));
+
+ /* Check for overflow */
+ BUG_ON(stop > PAGE_SIZE || stop < length);
+
+ if (offset == 0 && stop == PAGE_SIZE)
+ while (!managed_cache_releasepage(page, GFP_NOFS))
+ cond_resched();
+}
+
+static const struct address_space_operations managed_cache_aops = {
+ .releasepage = managed_cache_releasepage,
+ .invalidatepage = managed_cache_invalidatepage,
+};
+
+static struct inode *erofs_init_managed_cache(struct super_block *sb)
+{
+ struct inode *inode = new_inode(sb);
+
+ if (unlikely(inode == NULL))
+ return ERR_PTR(-ENOMEM);
+
+ set_nlink(inode, 1);
+ inode->i_size = OFFSET_MAX;
+
+ inode->i_mapping->a_ops = &managed_cache_aops;
+ mapping_set_gfp_mask(inode->i_mapping,
+ GFP_NOFS | __GFP_HIGHMEM |
+ __GFP_MOVABLE | __GFP_NOFAIL);
+ return inode;
+}
+
+#endif
+
+static int erofs_read_super(struct super_block *sb,
+ const char *dev_name, void *data, int silent)
+{
+ struct inode *inode;
+ struct erofs_sb_info *sbi;
+ int err = -EINVAL;
+
+ infoln("read_super, device -> %s", dev_name);
+ infoln("options -> %s", (char *)data);
+
+ if (unlikely(!sb_set_blocksize(sb, EROFS_BLKSIZ))) {
+ errln("failed to set erofs blksize");
+ goto err;
+ }
+
+ sbi = kzalloc(sizeof(struct erofs_sb_info), GFP_KERNEL);
+ if (unlikely(sbi == NULL)) {
+ err = -ENOMEM;
+ goto err;
+ }
+ sb->s_fs_info = sbi;
+
+ err = superblock_read(sb);
+ if (err)
+ goto err_sbread;
+
+ sb->s_magic = EROFS_SUPER_MAGIC;
+ sb->s_flags |= MS_RDONLY | MS_NOATIME;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_time_gran = 1;
+
+ sb->s_op = &erofs_sops;
+
+#ifdef CONFIG_EROFS_FS_XATTR
+ sb->s_xattr = erofs_xattr_handlers;
+#endif
+
+ /* set erofs default mount options */
+ default_options(sbi);
+
+ err = parse_options(sb, data);
+ if (err)
+ goto err_parseopt;
+
+ if (!silent)
+ infoln("root inode @ nid %llu", ROOT_NID(sbi));
+
+#ifdef CONFIG_EROFS_FS_ZIP
+ INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC);
+#endif
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ sbi->managed_cache = erofs_init_managed_cache(sb);
+ if (IS_ERR(sbi->managed_cache)) {
+ err = PTR_ERR(sbi->managed_cache);
+ goto err_init_managed_cache;
+ }
+#endif
+
+ /* get the root inode */
+ inode = erofs_iget(sb, ROOT_NID(sbi), true);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto err_iget;
+ }
+
+ if (!S_ISDIR(inode->i_mode)) {
+ errln("rootino(nid %llu) is not a directory(i_mode %o)",
+ ROOT_NID(sbi), inode->i_mode);
+ err = -EINVAL;
+ goto err_isdir;
+ }
+
+ sb->s_root = d_make_root(inode);
+ if (sb->s_root == NULL) {
+ err = -ENOMEM;
+ goto err_makeroot;
+ }
+
+ /* save the device name to sbi */
+ sbi->dev_name = __getname();
+ if (sbi->dev_name == NULL) {
+ err = -ENOMEM;
+ goto err_devname;
+ }
+
+ snprintf(sbi->dev_name, PATH_MAX, "%s", dev_name);
+ sbi->dev_name[PATH_MAX - 1] = '\0';
+
+ erofs_register_super(sb);
+
+ /*
+ * We already have a positive dentry, which was instantiated
+ * by d_make_root. Just need to d_rehash it.
+ */
+ d_rehash(sb->s_root);
+
+ if (!silent)
+ infoln("mounted on %s with opts: %s.", dev_name,
+ (char *)data);
+ return 0;
+ /*
+ * please add a label for each exit point and use
+ * the following name convention, thus new features
+ * can be integrated easily without renaming labels.
+ */
+err_devname:
+ dput(sb->s_root);
+err_makeroot:
+err_isdir:
+ if (sb->s_root == NULL)
+ iput(inode);
+err_iget:
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ iput(sbi->managed_cache);
+err_init_managed_cache:
+#endif
+err_parseopt:
+err_sbread:
+ sb->s_fs_info = NULL;
+ kfree(sbi);
+err:
+ return err;
+}
+
+/*
+ * could be triggered after deactivate_locked_super()
+ * is called, thus including umount and failed to initialize.
+ */
+static void erofs_put_super(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ /* for cases which are failed in "read_super" */
+ if (sbi == NULL)
+ return;
+
+ WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
+
+ infoln("unmounted for %s", sbi->dev_name);
+ __putname(sbi->dev_name);
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ iput(sbi->managed_cache);
+#endif
+
+ mutex_lock(&sbi->umount_mutex);
+
+#ifdef CONFIG_EROFS_FS_ZIP
+ erofs_workstation_cleanup_all(sb);
+#endif
+
+ erofs_unregister_super(sb);
+ mutex_unlock(&sbi->umount_mutex);
+
+ kfree(sbi);
+ sb->s_fs_info = NULL;
+}
+
+
+struct erofs_mount_private {
+ const char *dev_name;
+ char *options;
+};
+
+/* support mount_bdev() with options */
+static int erofs_fill_super(struct super_block *sb,
+ void *_priv, int silent)
+{
+ struct erofs_mount_private *priv = _priv;
+
+ return erofs_read_super(sb, priv->dev_name,
+ priv->options, silent);
+}
+
+static struct dentry *erofs_mount(
+ struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ struct erofs_mount_private priv = {
+ .dev_name = dev_name,
+ .options = data
+ };
+
+ return mount_bdev(fs_type, flags, dev_name,
+ &priv, erofs_fill_super);
+}
+
+static void erofs_kill_sb(struct super_block *sb)
+{
+ kill_block_super(sb);
+}
+
+static struct shrinker erofs_shrinker_info = {
+ .scan_objects = erofs_shrink_scan,
+ .count_objects = erofs_shrink_count,
+ .seeks = DEFAULT_SEEKS,
+};
+
+static struct file_system_type erofs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "erofs",
+ .mount = erofs_mount,
+ .kill_sb = erofs_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+MODULE_ALIAS_FS("erofs");
+
+#ifdef CONFIG_EROFS_FS_ZIP
+extern int z_erofs_init_zip_subsystem(void);
+extern void z_erofs_exit_zip_subsystem(void);
+#endif
+
+static int __init erofs_module_init(void)
+{
+ int err;
+
+ erofs_check_ondisk_layout_definitions();
+ infoln("initializing erofs " EROFS_VERSION);
+
+ err = erofs_init_inode_cache();
+ if (err)
+ goto icache_err;
+
+ err = register_shrinker(&erofs_shrinker_info);
+ if (err)
+ goto shrinker_err;
+
+#ifdef CONFIG_EROFS_FS_ZIP
+ err = z_erofs_init_zip_subsystem();
+ if (err)
+ goto zip_err;
+#endif
+
+ err = register_filesystem(&erofs_fs_type);
+ if (err)
+ goto fs_err;
+
+ infoln("successfully to initialize erofs");
+ return 0;
+
+fs_err:
+#ifdef CONFIG_EROFS_FS_ZIP
+ z_erofs_exit_zip_subsystem();
+zip_err:
+#endif
+ unregister_shrinker(&erofs_shrinker_info);
+shrinker_err:
+ erofs_exit_inode_cache();
+icache_err:
+ return err;
+}
+
+static void __exit erofs_module_exit(void)
+{
+ unregister_filesystem(&erofs_fs_type);
+#ifdef CONFIG_EROFS_FS_ZIP
+ z_erofs_exit_zip_subsystem();
+#endif
+ unregister_shrinker(&erofs_shrinker_info);
+ erofs_exit_inode_cache();
+ infoln("successfully finalize erofs");
+}
+
+/* get filesystem statistics */
+static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+
+ buf->f_type = sb->s_magic;
+ buf->f_bsize = EROFS_BLKSIZ;
+ buf->f_blocks = sbi->blocks;
+ buf->f_bfree = buf->f_bavail = 0;
+
+ buf->f_files = ULLONG_MAX;
+ buf->f_ffree = ULLONG_MAX - sbi->inos;
+
+ buf->f_namelen = EROFS_NAME_LEN;
+
+ buf->f_fsid.val[0] = (u32)id;
+ buf->f_fsid.val[1] = (u32)(id >> 32);
+ return 0;
+}
+
+static int erofs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb);
+
+#ifdef CONFIG_EROFS_FS_XATTR
+ if (test_opt(sbi, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+ else
+ seq_puts(seq, ",nouser_xattr");
+#endif
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ if (test_opt(sbi, POSIX_ACL))
+ seq_puts(seq, ",acl");
+ else
+ seq_puts(seq, ",noacl");
+#endif
+#ifdef CONFIG_EROFS_FAULT_INJECTION
+ if (test_opt(sbi, FAULT_INJECTION))
+ seq_printf(seq, ",fault_injection=%u",
+ sbi->fault_info.inject_rate);
+#endif
+ return 0;
+}
+
+static int erofs_remount(struct super_block *sb, int *flags, char *data)
+{
+ BUG_ON(!sb_rdonly(sb));
+
+ *flags |= MS_RDONLY;
+ return 0;
+}
+
+const struct super_operations erofs_sops = {
+ .put_super = erofs_put_super,
+ .alloc_inode = alloc_inode,
+ .destroy_inode = destroy_inode,
+ .statfs = erofs_statfs,
+ .show_options = erofs_show_options,
+ .remount_fs = erofs_remount,
+};
+
+module_init(erofs_module_init);
+module_exit(erofs_module_exit);
+
+MODULE_DESCRIPTION("Enhanced ROM File System");
+MODULE_AUTHOR("Gao Xiang, Yu Chao, Miao Xie, CONSUMER BG, HUAWEI Inc.");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/erofs/unzip_lz4.c b/drivers/staging/erofs/unzip_lz4.c
new file mode 100644
index 000000000000..b1ea23f66c4e
--- /dev/null
+++ b/drivers/staging/erofs/unzip_lz4.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * linux/drivers/staging/erofs/unzip_lz4.c
+ *
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * Original code taken from 'linux/lib/lz4/lz4_decompress.c'
+ */
+
+/*
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011 - 2016, Yann Collet.
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
+ *
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
+ */
+#include "internal.h"
+#include <asm/unaligned.h>
+#include "lz4defs.h"
+
+/*
+ * no public solution to solve our requirement yet.
+ * see: <required buffer size for LZ4_decompress_safe_partial>
+ * https://groups.google.com/forum/#!topic/lz4c/_3kkz5N6n00
+ */
+static FORCE_INLINE int customized_lz4_decompress_safe_partial(
+ const void * const source,
+ void * const dest,
+ int inputSize,
+ int outputSize)
+{
+ /* Local Variables */
+ const BYTE *ip = (const BYTE *) source;
+ const BYTE * const iend = ip + inputSize;
+
+ BYTE *op = (BYTE *) dest;
+ BYTE * const oend = op + outputSize;
+ BYTE *cpy;
+
+ static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
+ static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
+
+ /* Empty output buffer */
+ if (unlikely(outputSize == 0))
+ return ((inputSize == 1) && (*ip == 0)) ? 0 : -1;
+
+ /* Main Loop : decode sequences */
+ while (1) {
+ size_t length;
+ const BYTE *match;
+ size_t offset;
+
+ /* get literal length */
+ unsigned int const token = *ip++;
+
+ length = token>>ML_BITS;
+
+ if (length == RUN_MASK) {
+ unsigned int s;
+
+ do {
+ s = *ip++;
+ length += s;
+ } while ((ip < iend - RUN_MASK) & (s == 255));
+
+ if (unlikely((size_t)(op + length) < (size_t)(op))) {
+ /* overflow detection */
+ goto _output_error;
+ }
+ if (unlikely((size_t)(ip + length) < (size_t)(ip))) {
+ /* overflow detection */
+ goto _output_error;
+ }
+ }
+
+ /* copy literals */
+ cpy = op + length;
+ if ((cpy > oend - WILDCOPYLENGTH) ||
+ (ip + length > iend - (2 + 1 + LASTLITERALS))) {
+ if (cpy > oend) {
+ memcpy(op, ip, length = oend - op);
+ op += length;
+ break;
+ }
+
+ if (unlikely(ip + length > iend)) {
+ /*
+ * Error :
+ * read attempt beyond
+ * end of input buffer
+ */
+ goto _output_error;
+ }
+
+ memcpy(op, ip, length);
+ ip += length;
+ op += length;
+
+ if (ip > iend - 2)
+ break;
+ /* Necessarily EOF, due to parsing restrictions */
+ /* break; */
+ } else {
+ LZ4_wildCopy(op, ip, cpy);
+ ip += length;
+ op = cpy;
+ }
+
+ /* get offset */
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ match = op - offset;
+
+ if (unlikely(match < (const BYTE *)dest)) {
+ /* Error : offset outside buffers */
+ goto _output_error;
+ }
+
+ /* get matchlength */
+ length = token & ML_MASK;
+ if (length == ML_MASK) {
+ unsigned int s;
+
+ do {
+ s = *ip++;
+
+ if (ip > iend - LASTLITERALS)
+ goto _output_error;
+
+ length += s;
+ } while (s == 255);
+
+ if (unlikely((size_t)(op + length) < (size_t)op)) {
+ /* overflow detection */
+ goto _output_error;
+ }
+ }
+
+ length += MINMATCH;
+
+ /* copy match within block */
+ cpy = op + length;
+
+ if (unlikely(cpy >= oend - WILDCOPYLENGTH)) {
+ if (cpy >= oend) {
+ while (op < oend)
+ *op++ = *match++;
+ break;
+ }
+ goto __match;
+ }
+
+ /* costs ~1%; silence an msan warning when offset == 0 */
+ LZ4_write32(op, (U32)offset);
+
+ if (unlikely(offset < 8)) {
+ const int dec64 = dec64table[offset];
+
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[offset];
+ memcpy(op + 4, match, 4);
+ match -= dec64;
+ } else {
+ LZ4_copy8(op, match);
+ match += 8;
+ }
+
+ op += 8;
+
+ if (unlikely(cpy > oend - 12)) {
+ BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
+
+ if (op < oCopyLimit) {
+ LZ4_wildCopy(op, match, oCopyLimit);
+ match += oCopyLimit - op;
+ op = oCopyLimit;
+ }
+__match:
+ while (op < cpy)
+ *op++ = *match++;
+ } else {
+ LZ4_copy8(op, match);
+
+ if (length > 16)
+ LZ4_wildCopy(op + 8, match + 8, cpy);
+ }
+
+ op = cpy; /* correction */
+ }
+ DBG_BUGON((void *)ip - source > inputSize);
+ DBG_BUGON((void *)op - dest > outputSize);
+
+ /* Nb of output bytes decoded */
+ return (int) ((void *)op - dest);
+
+ /* Overflow error detected */
+_output_error:
+ return -ERANGE;
+}
+
+int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen)
+{
+ int ret = customized_lz4_decompress_safe_partial(in,
+ out, inlen, outlen);
+
+ if (ret >= 0)
+ return ret;
+
+ /*
+ * LZ4_decompress_safe will return an error code
+ * (< 0) if decompression failed
+ */
+ errln("%s, failed to decompress, in[%p, %zu] outlen[%p, %zu]",
+ __func__, in, inlen, out, outlen);
+ WARN_ON(1);
+ print_hex_dump(KERN_DEBUG, "raw data [in]: ", DUMP_PREFIX_OFFSET,
+ 16, 1, in, inlen, true);
+ print_hex_dump(KERN_DEBUG, "raw data [out]: ", DUMP_PREFIX_OFFSET,
+ 16, 1, out, outlen, true);
+ return -EIO;
+}
+
diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h
new file mode 100644
index 000000000000..0956615b86f7
--- /dev/null
+++ b/drivers/staging/erofs/unzip_pagevec.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * linux/drivers/staging/erofs/unzip_pagevec.h
+ *
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#ifndef __EROFS_UNZIP_PAGEVEC_H
+#define __EROFS_UNZIP_PAGEVEC_H
+
+#include <linux/tagptr.h>
+
+/* page type in pagevec for unzip subsystem */
+enum z_erofs_page_type {
+ /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
+ Z_EROFS_PAGE_TYPE_EXCLUSIVE,
+
+ Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
+
+ Z_EROFS_VLE_PAGE_TYPE_HEAD,
+ Z_EROFS_VLE_PAGE_TYPE_MAX
+};
+
+extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
+ __bad_page_type_exclusive(void);
+
+/* pagevec tagged pointer */
+typedef tagptr2_t erofs_vtptr_t;
+
+/* pagevec collector */
+struct z_erofs_pagevec_ctor {
+ struct page *curr, *next;
+ erofs_vtptr_t *pages;
+
+ unsigned int nr, index;
+};
+
+static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
+ bool atomic)
+{
+ if (ctor->curr == NULL)
+ return;
+
+ if (atomic)
+ kunmap_atomic(ctor->pages);
+ else
+ kunmap(ctor->curr);
+}
+
+static inline struct page *
+z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
+ unsigned nr)
+{
+ unsigned index;
+
+ /* keep away from occupied pages */
+ if (ctor->next != NULL)
+ return ctor->next;
+
+ for (index = 0; index < nr; ++index) {
+ const erofs_vtptr_t t = ctor->pages[index];
+ const unsigned tags = tagptr_unfold_tags(t);
+
+ if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
+ return tagptr_unfold_ptr(t);
+ }
+
+ if (unlikely(nr >= ctor->nr))
+ BUG();
+
+ return NULL;
+}
+
+static inline void
+z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
+ bool atomic)
+{
+ struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
+
+ z_erofs_pagevec_ctor_exit(ctor, atomic);
+
+ ctor->curr = next;
+ ctor->next = NULL;
+ ctor->pages = atomic ?
+ kmap_atomic(ctor->curr) : kmap(ctor->curr);
+
+ ctor->nr = PAGE_SIZE / sizeof(struct page *);
+ ctor->index = 0;
+}
+
+static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
+ unsigned nr,
+ erofs_vtptr_t *pages, unsigned i)
+{
+ ctor->nr = nr;
+ ctor->curr = ctor->next = NULL;
+ ctor->pages = pages;
+
+ if (i >= nr) {
+ i -= nr;
+ z_erofs_pagevec_ctor_pagedown(ctor, false);
+ while (i > ctor->nr) {
+ i -= ctor->nr;
+ z_erofs_pagevec_ctor_pagedown(ctor, false);
+ }
+ }
+
+ ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
+ ctor->index = i;
+}
+
+static inline bool
+z_erofs_pagevec_ctor_enqueue(struct z_erofs_pagevec_ctor *ctor,
+ struct page *page,
+ enum z_erofs_page_type type,
+ bool *occupied)
+{
+ *occupied = false;
+ if (unlikely(ctor->next == NULL && type))
+ if (ctor->index + 1 == ctor->nr)
+ return false;
+
+ if (unlikely(ctor->index >= ctor->nr))
+ z_erofs_pagevec_ctor_pagedown(ctor, false);
+
+ /* exclusive page type must be 0 */
+ if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
+ __bad_page_type_exclusive();
+
+ /* should remind that collector->next never equal to 1, 2 */
+ if (type == (uintptr_t)ctor->next) {
+ ctor->next = page;
+ *occupied = true;
+ }
+
+ ctor->pages[ctor->index++] =
+ tagptr_fold(erofs_vtptr_t, page, type);
+ return true;
+}
+
+static inline struct page *
+z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor,
+ enum z_erofs_page_type *type)
+{
+ erofs_vtptr_t t;
+
+ if (unlikely(ctor->index >= ctor->nr)) {
+ BUG_ON(ctor->next == NULL);
+ z_erofs_pagevec_ctor_pagedown(ctor, true);
+ }
+
+ t = ctor->pages[ctor->index];
+
+ *type = tagptr_unfold_tags(t);
+
+ /* should remind that collector->next never equal to 1, 2 */
+ if (*type == (uintptr_t)ctor->next)
+ ctor->next = tagptr_unfold_ptr(t);
+
+ ctor->pages[ctor->index++] =
+ tagptr_fold(erofs_vtptr_t, NULL, 0);
+
+ return tagptr_unfold_ptr(t);
+}
+
+#endif
+
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
new file mode 100644
index 000000000000..8721f0a41d15
--- /dev/null
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -0,0 +1,1656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/staging/erofs/unzip_vle.c
+ *
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#include "unzip_vle.h"
+#include <linux/prefetch.h>
+
+static struct workqueue_struct *z_erofs_workqueue __read_mostly;
+static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
+
+void z_erofs_exit_zip_subsystem(void)
+{
+ BUG_ON(z_erofs_workqueue == NULL);
+ BUG_ON(z_erofs_workgroup_cachep == NULL);
+
+ destroy_workqueue(z_erofs_workqueue);
+ kmem_cache_destroy(z_erofs_workgroup_cachep);
+}
+
+static inline int init_unzip_workqueue(void)
+{
+ const unsigned onlinecpus = num_possible_cpus();
+
+ /*
+ * we don't need too many threads, limiting threads
+ * could improve scheduling performance.
+ */
+ z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
+ onlinecpus + onlinecpus / 4);
+
+ return z_erofs_workqueue != NULL ? 0 : -ENOMEM;
+}
+
+int z_erofs_init_zip_subsystem(void)
+{
+ z_erofs_workgroup_cachep =
+ kmem_cache_create("erofs_compress",
+ Z_EROFS_WORKGROUP_SIZE, 0,
+ SLAB_RECLAIM_ACCOUNT, NULL);
+
+ if (z_erofs_workgroup_cachep != NULL) {
+ if (!init_unzip_workqueue())
+ return 0;
+
+ kmem_cache_destroy(z_erofs_workgroup_cachep);
+ }
+ return -ENOMEM;
+}
+
+enum z_erofs_vle_work_role {
+ Z_EROFS_VLE_WORK_SECONDARY,
+ Z_EROFS_VLE_WORK_PRIMARY,
+ /*
+ * The current work has at least been linked with the following
+ * processed chained works, which means if the processing page
+ * is the tail partial page of the work, the current work can
+ * safely use the whole page, as illustrated below:
+ * +--------------+-------------------------------------------+
+ * | tail page | head page (of the previous work) |
+ * +--------------+-------------------------------------------+
+ * /\ which belongs to the current work
+ * [ (*) this page can be used for the current work itself. ]
+ */
+ Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
+ Z_EROFS_VLE_WORK_MAX
+};
+
+struct z_erofs_vle_work_builder {
+ enum z_erofs_vle_work_role role;
+ /*
+ * 'hosted = false' means that the current workgroup doesn't belong to
+ * the owned chained workgroups. In the other words, it is none of our
+ * business to submit this workgroup.
+ */
+ bool hosted;
+
+ struct z_erofs_vle_workgroup *grp;
+ struct z_erofs_vle_work *work;
+ struct z_erofs_pagevec_ctor vector;
+
+ /* pages used for reading the compressed data */
+ struct page **compressed_pages;
+ unsigned compressed_deficit;
+};
+
+#define VLE_WORK_BUILDER_INIT() \
+ { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+
+static bool grab_managed_cache_pages(struct address_space *mapping,
+ erofs_blk_t start,
+ struct page **compressed_pages,
+ int clusterblks,
+ bool reserve_allocation)
+{
+ bool noio = true;
+ unsigned int i;
+
+ /* TODO: optimize by introducing find_get_pages_range */
+ for (i = 0; i < clusterblks; ++i) {
+ struct page *page, *found;
+
+ if (READ_ONCE(compressed_pages[i]) != NULL)
+ continue;
+
+ page = found = find_get_page(mapping, start + i);
+ if (found == NULL) {
+ noio = false;
+ if (!reserve_allocation)
+ continue;
+ page = EROFS_UNALLOCATED_CACHED_PAGE;
+ }
+
+ if (NULL == cmpxchg(compressed_pages + i, NULL, page))
+ continue;
+
+ if (found != NULL)
+ put_page(found);
+ }
+ return noio;
+}
+
+/* called by erofs_shrinker to get rid of all compressed_pages */
+int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *egrp)
+{
+ struct z_erofs_vle_workgroup *const grp =
+ container_of(egrp, struct z_erofs_vle_workgroup, obj);
+ struct address_space *const mapping = sbi->managed_cache->i_mapping;
+ const int clusterpages = erofs_clusterpages(sbi);
+ int i;
+
+ /*
+ * refcount of workgroup is now freezed as 1,
+ * therefore no need to worry about available decompression users.
+ */
+ for (i = 0; i < clusterpages; ++i) {
+ struct page *page = grp->compressed_pages[i];
+
+ if (page == NULL || page->mapping != mapping)
+ continue;
+
+ /* block other users from reclaiming or migrating the page */
+ if (!trylock_page(page))
+ return -EBUSY;
+
+ /* barrier is implied in the following 'unlock_page' */
+ WRITE_ONCE(grp->compressed_pages[i], NULL);
+
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+
+ unlock_page(page);
+ put_page(page);
+ }
+ return 0;
+}
+
+int erofs_try_to_free_cached_page(struct address_space *mapping,
+ struct page *page)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
+ const unsigned int clusterpages = erofs_clusterpages(sbi);
+
+ struct z_erofs_vle_workgroup *grp;
+ int ret = 0; /* 0 - busy */
+
+ /* prevent the workgroup from being freed */
+ rcu_read_lock();
+ grp = (void *)page_private(page);
+
+ if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
+ unsigned int i;
+
+ for (i = 0; i < clusterpages; ++i) {
+ if (grp->compressed_pages[i] == page) {
+ WRITE_ONCE(grp->compressed_pages[i], NULL);
+ ret = 1;
+ break;
+ }
+ }
+ erofs_workgroup_unfreeze(&grp->obj, 1);
+ }
+ rcu_read_unlock();
+
+ if (ret) {
+ ClearPagePrivate(page);
+ put_page(page);
+ }
+ return ret;
+}
+#endif
+
+/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
+static inline bool try_to_reuse_as_compressed_page(
+ struct z_erofs_vle_work_builder *b,
+ struct page *page)
+{
+ while (b->compressed_deficit) {
+ --b->compressed_deficit;
+ if (NULL == cmpxchg(b->compressed_pages++, NULL, page))
+ return true;
+ }
+
+ return false;
+}
+
+/* callers must be with work->lock held */
+static int z_erofs_vle_work_add_page(
+ struct z_erofs_vle_work_builder *builder,
+ struct page *page,
+ enum z_erofs_page_type type)
+{
+ int ret;
+ bool occupied;
+
+ /* give priority for the compressed data storage */
+ if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
+ type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
+ try_to_reuse_as_compressed_page(builder, page))
+ return 0;
+
+ ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
+ page, type, &occupied);
+ builder->work->vcnt += (unsigned)ret;
+
+ return ret ? 0 : -EAGAIN;
+}
+
+static inline bool try_to_claim_workgroup(
+ struct z_erofs_vle_workgroup *grp,
+ z_erofs_vle_owned_workgrp_t *owned_head,
+ bool *hosted)
+{
+ DBG_BUGON(*hosted == true);
+
+ /* let's claim these following types of workgroup */
+retry:
+ if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
+ /* type 1, nil workgroup */
+ if (Z_EROFS_VLE_WORKGRP_NIL != cmpxchg(&grp->next,
+ Z_EROFS_VLE_WORKGRP_NIL, *owned_head))
+ goto retry;
+
+ *owned_head = grp;
+ *hosted = true;
+ } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
+ /*
+ * type 2, link to the end of a existing open chain,
+ * be careful that its submission itself is governed
+ * by the original owned chain.
+ */
+ if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
+ Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
+ goto retry;
+
+ *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
+ } else
+ return false; /* :( better luck next time */
+
+ return true; /* lucky, I am the followee :) */
+}
+
+static struct z_erofs_vle_work *
+z_erofs_vle_work_lookup(struct super_block *sb,
+ pgoff_t idx, unsigned pageofs,
+ struct z_erofs_vle_workgroup **grp_ret,
+ enum z_erofs_vle_work_role *role,
+ z_erofs_vle_owned_workgrp_t *owned_head,
+ bool *hosted)
+{
+ bool tag, primary;
+ struct erofs_workgroup *egrp;
+ struct z_erofs_vle_workgroup *grp;
+ struct z_erofs_vle_work *work;
+
+ egrp = erofs_find_workgroup(sb, idx, &tag);
+ if (egrp == NULL) {
+ *grp_ret = NULL;
+ return NULL;
+ }
+
+ *grp_ret = grp = container_of(egrp,
+ struct z_erofs_vle_workgroup, obj);
+
+#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+ work = z_erofs_vle_grab_work(grp, pageofs);
+ primary = true;
+#else
+ BUG();
+#endif
+
+ DBG_BUGON(work->pageofs != pageofs);
+
+ /*
+ * lock must be taken first to avoid grp->next == NIL between
+ * claiming workgroup and adding pages:
+ * grp->next != NIL
+ * grp->next = NIL
+ * mutex_unlock_all
+ * mutex_lock(&work->lock)
+ * add all pages to pagevec
+ *
+ * [correct locking case 1]:
+ * mutex_lock(grp->work[a])
+ * ...
+ * mutex_lock(grp->work[b]) mutex_lock(grp->work[c])
+ * ... *role = SECONDARY
+ * add all pages to pagevec
+ * ...
+ * mutex_unlock(grp->work[c])
+ * mutex_lock(grp->work[c])
+ * ...
+ * grp->next = NIL
+ * mutex_unlock_all
+ *
+ * [correct locking case 2]:
+ * mutex_lock(grp->work[b])
+ * ...
+ * mutex_lock(grp->work[a])
+ * ...
+ * mutex_lock(grp->work[c])
+ * ...
+ * grp->next = NIL
+ * mutex_unlock_all
+ * mutex_lock(grp->work[a])
+ * *role = PRIMARY_OWNER
+ * add all pages to pagevec
+ * ...
+ */
+ mutex_lock(&work->lock);
+
+ *hosted = false;
+ if (!primary)
+ *role = Z_EROFS_VLE_WORK_SECONDARY;
+ /* claim the workgroup if possible */
+ else if (try_to_claim_workgroup(grp, owned_head, hosted))
+ *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+ else
+ *role = Z_EROFS_VLE_WORK_PRIMARY;
+
+ return work;
+}
+
+static struct z_erofs_vle_work *
+z_erofs_vle_work_register(struct super_block *sb,
+ struct z_erofs_vle_workgroup **grp_ret,
+ struct erofs_map_blocks *map,
+ pgoff_t index, unsigned pageofs,
+ enum z_erofs_vle_work_role *role,
+ z_erofs_vle_owned_workgrp_t *owned_head,
+ bool *hosted)
+{
+ bool newgrp = false;
+ struct z_erofs_vle_workgroup *grp = *grp_ret;
+ struct z_erofs_vle_work *work;
+
+#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+ BUG_ON(grp != NULL);
+#else
+ if (grp != NULL)
+ goto skip;
+#endif
+ /* no available workgroup, let's allocate one */
+ grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
+ if (unlikely(grp == NULL))
+ return ERR_PTR(-ENOMEM);
+
+ grp->obj.index = index;
+ grp->llen = map->m_llen;
+
+ z_erofs_vle_set_workgrp_fmt(grp,
+ (map->m_flags & EROFS_MAP_ZIPPED) ?
+ Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
+ Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
+ atomic_set(&grp->obj.refcount, 1);
+
+ /* new workgrps have been claimed as type 1 */
+ WRITE_ONCE(grp->next, *owned_head);
+ /* primary and followed work for all new workgrps */
+ *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+ /* it should be submitted by ourselves */
+ *hosted = true;
+
+ newgrp = true;
+#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
+skip:
+ /* currently unimplemented */
+ BUG();
+#else
+ work = z_erofs_vle_grab_primary_work(grp);
+#endif
+ work->pageofs = pageofs;
+
+ mutex_init(&work->lock);
+
+ if (newgrp) {
+ int err = erofs_register_workgroup(sb, &grp->obj, 0);
+
+ if (err) {
+ kmem_cache_free(z_erofs_workgroup_cachep, grp);
+ return ERR_PTR(-EAGAIN);
+ }
+ }
+
+ *owned_head = *grp_ret = grp;
+
+ mutex_lock(&work->lock);
+ return work;
+}
+
+static inline void __update_workgrp_llen(struct z_erofs_vle_workgroup *grp,
+ unsigned int llen)
+{
+ while (1) {
+ unsigned int orig_llen = grp->llen;
+
+ if (orig_llen >= llen || orig_llen ==
+ cmpxchg(&grp->llen, orig_llen, llen))
+ break;
+ }
+}
+
+#define builder_is_followed(builder) \
+ ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
+
+static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
+ struct super_block *sb,
+ struct erofs_map_blocks *map,
+ z_erofs_vle_owned_workgrp_t *owned_head)
+{
+ const unsigned clusterpages = erofs_clusterpages(EROFS_SB(sb));
+ const erofs_blk_t index = erofs_blknr(map->m_pa);
+ const unsigned pageofs = map->m_la & ~PAGE_MASK;
+ struct z_erofs_vle_workgroup *grp;
+ struct z_erofs_vle_work *work;
+
+ DBG_BUGON(builder->work != NULL);
+
+ /* must be Z_EROFS_WORK_TAIL or the next chained work */
+ DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
+ DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
+
+ DBG_BUGON(erofs_blkoff(map->m_pa));
+
+repeat:
+ work = z_erofs_vle_work_lookup(sb, index,
+ pageofs, &grp, &builder->role, owned_head, &builder->hosted);
+ if (work != NULL) {
+ __update_workgrp_llen(grp, map->m_llen);
+ goto got_it;
+ }
+
+ work = z_erofs_vle_work_register(sb, &grp, map, index, pageofs,
+ &builder->role, owned_head, &builder->hosted);
+
+ if (unlikely(work == ERR_PTR(-EAGAIN)))
+ goto repeat;
+
+ if (unlikely(IS_ERR(work)))
+ return PTR_ERR(work);
+got_it:
+ z_erofs_pagevec_ctor_init(&builder->vector,
+ Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
+
+ if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
+ /* enable possibly in-place decompression */
+ builder->compressed_pages = grp->compressed_pages;
+ builder->compressed_deficit = clusterpages;
+ } else {
+ builder->compressed_pages = NULL;
+ builder->compressed_deficit = 0;
+ }
+
+ builder->grp = grp;
+ builder->work = work;
+ return 0;
+}
+
+/*
+ * keep in mind that no referenced workgroups will be freed
+ * only after a RCU grace period, so rcu_read_lock() could
+ * prevent a workgroup from being freed.
+ */
+static void z_erofs_rcu_callback(struct rcu_head *head)
+{
+ struct z_erofs_vle_work *work = container_of(head,
+ struct z_erofs_vle_work, rcu);
+ struct z_erofs_vle_workgroup *grp =
+ z_erofs_vle_work_workgroup(work, true);
+
+ kmem_cache_free(z_erofs_workgroup_cachep, grp);
+}
+
+void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
+{
+ struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
+ struct z_erofs_vle_workgroup, obj);
+ struct z_erofs_vle_work *const work = &vgrp->work;
+
+ call_rcu(&work->rcu, z_erofs_rcu_callback);
+}
+
+static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
+ struct z_erofs_vle_work *work __maybe_unused)
+{
+ erofs_workgroup_put(&grp->obj);
+}
+
+void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
+{
+ struct z_erofs_vle_workgroup *grp =
+ z_erofs_vle_work_workgroup(work, true);
+
+ __z_erofs_vle_work_release(grp, work);
+}
+
+static inline bool
+z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
+{
+ struct z_erofs_vle_work *work = builder->work;
+
+ if (work == NULL)
+ return false;
+
+ z_erofs_pagevec_ctor_exit(&builder->vector, false);
+ mutex_unlock(&work->lock);
+
+ /*
+ * if all pending pages are added, don't hold work reference
+ * any longer if the current work isn't hosted by ourselves.
+ */
+ if (!builder->hosted)
+ __z_erofs_vle_work_release(builder->grp, work);
+
+ builder->work = NULL;
+ builder->grp = NULL;
+ return true;
+}
+
+static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
+ gfp_t gfp)
+{
+ struct page *page = erofs_allocpage(pagepool, gfp);
+
+ if (unlikely(page == NULL))
+ return NULL;
+
+ page->mapping = Z_EROFS_MAPPING_STAGING;
+ return page;
+}
+
+struct z_erofs_vle_frontend {
+ struct inode *const inode;
+
+ struct z_erofs_vle_work_builder builder;
+ struct erofs_map_blocks_iter m_iter;
+
+ z_erofs_vle_owned_workgrp_t owned_head;
+
+ bool initial;
+#if (EROFS_FS_ZIP_CACHE_LVL >= 2)
+ erofs_off_t cachedzone_la;
+#endif
+};
+
+#define VLE_FRONTEND_INIT(__i) { \
+ .inode = __i, \
+ .m_iter = { \
+ { .m_llen = 0, .m_plen = 0 }, \
+ .mpage = NULL \
+ }, \
+ .builder = VLE_WORK_BUILDER_INIT(), \
+ .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
+ .initial = true, }
+
+static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
+ struct page *page,
+ struct list_head *page_pool)
+{
+ struct super_block *const sb = fe->inode->i_sb;
+ struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
+ struct erofs_map_blocks_iter *const m = &fe->m_iter;
+ struct erofs_map_blocks *const map = &m->map;
+ struct z_erofs_vle_work_builder *const builder = &fe->builder;
+ const loff_t offset = page_offset(page);
+
+ bool tight = builder_is_followed(builder);
+ struct z_erofs_vle_work *work = builder->work;
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ struct address_space *const mngda = sbi->managed_cache->i_mapping;
+ struct z_erofs_vle_workgroup *grp;
+ bool noio_outoforder;
+#endif
+
+ enum z_erofs_page_type page_type;
+ unsigned cur, end, spiltted, index;
+ int err;
+
+ /* register locked file pages as online pages in pack */
+ z_erofs_onlinepage_init(page);
+
+ spiltted = 0;
+ end = PAGE_SIZE;
+repeat:
+ cur = end - 1;
+
+ /* lucky, within the range of the current map_blocks */
+ if (offset + cur >= map->m_la &&
+ offset + cur < map->m_la + map->m_llen)
+ goto hitted;
+
+ /* go ahead the next map_blocks */
+ debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
+
+ if (!z_erofs_vle_work_iter_end(builder))
+ fe->initial = false;
+
+ map->m_la = offset + cur;
+ map->m_llen = 0;
+ err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0);
+ if (unlikely(err))
+ goto err_out;
+
+ /* deal with hole (FIXME! broken now) */
+ if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
+ goto hitted;
+
+ DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
+ BUG_ON(erofs_blkoff(map->m_pa));
+
+ err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
+ if (unlikely(err))
+ goto err_out;
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ grp = fe->builder.grp;
+
+ /* let's do out-of-order decompression for noio */
+ noio_outoforder = grab_managed_cache_pages(mngda,
+ erofs_blknr(map->m_pa),
+ grp->compressed_pages, erofs_blknr(map->m_plen),
+ /* compressed page caching selection strategy */
+ fe->initial | (EROFS_FS_ZIP_CACHE_LVL >= 2 ?
+ map->m_la < fe->cachedzone_la : 0));
+
+ if (noio_outoforder && builder_is_followed(builder))
+ builder->role = Z_EROFS_VLE_WORK_PRIMARY;
+#endif
+
+ tight &= builder_is_followed(builder);
+ work = builder->work;
+hitted:
+ cur = end - min_t(unsigned, offset + end - map->m_la, end);
+ if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
+ zero_user_segment(page, cur, end);
+ goto next_part;
+ }
+
+ /* let's derive page type */
+ page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
+ (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
+ (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
+ Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
+
+retry:
+ err = z_erofs_vle_work_add_page(builder, page, page_type);
+ /* should allocate an additional staging page for pagevec */
+ if (err == -EAGAIN) {
+ struct page *const newpage =
+ __stagingpage_alloc(page_pool, GFP_NOFS);
+
+ err = z_erofs_vle_work_add_page(builder,
+ newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
+ if (!err)
+ goto retry;
+ }
+
+ if (unlikely(err))
+ goto err_out;
+
+ index = page->index - map->m_la / PAGE_SIZE;
+
+ /* FIXME! avoid the last relundant fixup & endio */
+ z_erofs_onlinepage_fixup(page, index, true);
+ ++spiltted;
+
+ /* also update nr_pages and increase queued_pages */
+ work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
+next_part:
+ /* can be used for verification */
+ map->m_llen = offset + cur - map->m_la;
+
+ end = cur;
+ if (end > 0)
+ goto repeat;
+
+ /* FIXME! avoid the last relundant fixup & endio */
+ z_erofs_onlinepage_endio(page);
+
+ debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
+ __func__, page, spiltted, map->m_llen);
+ return 0;
+
+err_out:
+ /* TODO: the missing error handing cases */
+ return err;
+}
+
+static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
+{
+ tagptr1_t t = tagptr_init(tagptr1_t, ptr);
+ struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
+ bool background = tagptr_unfold_tags(t);
+
+ if (atomic_add_return(bios, &io->pending_bios))
+ return;
+
+ if (background)
+ queue_work(z_erofs_workqueue, &io->u.work);
+ else
+ wake_up(&io->u.wait);
+}
+
+static inline void z_erofs_vle_read_endio(struct bio *bio)
+{
+ const blk_status_t err = bio->bi_status;
+ unsigned i;
+ struct bio_vec *bvec;
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ struct address_space *mngda = NULL;
+#endif
+
+ bio_for_each_segment_all(bvec, bio, i) {
+ struct page *page = bvec->bv_page;
+ bool cachemngd = false;
+
+ DBG_BUGON(PageUptodate(page));
+ BUG_ON(page->mapping == NULL);
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
+ struct inode *const inode = page->mapping->host;
+ struct super_block *const sb = inode->i_sb;
+
+ mngda = EROFS_SB(sb)->managed_cache->i_mapping;
+ }
+
+ /*
+ * If mngda has not gotten, it equals NULL,
+ * however, page->mapping never be NULL if working properly.
+ */
+ cachemngd = (page->mapping == mngda);
+#endif
+
+ if (unlikely(err))
+ SetPageError(page);
+ else if (cachemngd)
+ SetPageUptodate(page);
+
+ if (cachemngd)
+ unlock_page(page);
+ }
+
+ z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
+ bio_put(bio);
+}
+
+static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
+static DEFINE_MUTEX(z_pagemap_global_lock);
+
+static int z_erofs_vle_unzip(struct super_block *sb,
+ struct z_erofs_vle_workgroup *grp,
+ struct list_head *page_pool)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ struct address_space *const mngda = sbi->managed_cache->i_mapping;
+#endif
+ const unsigned clusterpages = erofs_clusterpages(sbi);
+
+ struct z_erofs_pagevec_ctor ctor;
+ unsigned nr_pages;
+#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+ unsigned sparsemem_pages = 0;
+#endif
+ struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
+ struct page **pages, **compressed_pages, *page;
+ unsigned i, llen;
+
+ enum z_erofs_page_type page_type;
+ bool overlapped;
+ struct z_erofs_vle_work *work;
+ void *vout;
+ int err;
+
+ might_sleep();
+#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+ work = z_erofs_vle_grab_primary_work(grp);
+#else
+ BUG();
+#endif
+ BUG_ON(!READ_ONCE(work->nr_pages));
+
+ mutex_lock(&work->lock);
+ nr_pages = work->nr_pages;
+
+ if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
+ pages = pages_onstack;
+ else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
+ mutex_trylock(&z_pagemap_global_lock))
+ pages = z_pagemap_global;
+ else {
+repeat:
+ pages = kvmalloc_array(nr_pages,
+ sizeof(struct page *), GFP_KERNEL);
+
+ /* fallback to global pagemap for the lowmem scenario */
+ if (unlikely(pages == NULL)) {
+ if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
+ goto repeat;
+ else {
+ mutex_lock(&z_pagemap_global_lock);
+ pages = z_pagemap_global;
+ }
+ }
+ }
+
+ for (i = 0; i < nr_pages; ++i)
+ pages[i] = NULL;
+
+ z_erofs_pagevec_ctor_init(&ctor,
+ Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
+
+ for (i = 0; i < work->vcnt; ++i) {
+ unsigned pagenr;
+
+ page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
+
+ /* all pages in pagevec ought to be valid */
+ DBG_BUGON(page == NULL);
+ DBG_BUGON(page->mapping == NULL);
+
+ if (z_erofs_gather_if_stagingpage(page_pool, page))
+ continue;
+
+ if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
+ pagenr = 0;
+ else
+ pagenr = z_erofs_onlinepage_index(page);
+
+ BUG_ON(pagenr >= nr_pages);
+
+#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+ BUG_ON(pages[pagenr] != NULL);
+ ++sparsemem_pages;
+#endif
+ pages[pagenr] = page;
+ }
+
+ z_erofs_pagevec_ctor_exit(&ctor, true);
+
+ overlapped = false;
+ compressed_pages = grp->compressed_pages;
+
+ for (i = 0; i < clusterpages; ++i) {
+ unsigned pagenr;
+
+ page = compressed_pages[i];
+
+ /* all compressed pages ought to be valid */
+ DBG_BUGON(page == NULL);
+ DBG_BUGON(page->mapping == NULL);
+
+ if (z_erofs_is_stagingpage(page))
+ continue;
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ else if (page->mapping == mngda) {
+ BUG_ON(PageLocked(page));
+ BUG_ON(!PageUptodate(page));
+ continue;
+ }
+#endif
+
+ /* only non-head page could be reused as a compressed page */
+ pagenr = z_erofs_onlinepage_index(page);
+
+ BUG_ON(pagenr >= nr_pages);
+#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+ BUG_ON(pages[pagenr] != NULL);
+ ++sparsemem_pages;
+#endif
+ pages[pagenr] = page;
+
+ overlapped = true;
+ }
+
+ llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
+
+ if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
+ /* FIXME! this should be fixed in the future */
+ BUG_ON(grp->llen != llen);
+
+ err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
+ pages, nr_pages, work->pageofs);
+ goto out;
+ }
+
+ if (llen > grp->llen)
+ llen = grp->llen;
+
+ err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
+ clusterpages, pages, llen, work->pageofs,
+ z_erofs_onlinepage_endio);
+ if (err != -ENOTSUPP)
+ goto out_percpu;
+
+#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+ if (sparsemem_pages >= nr_pages) {
+ BUG_ON(sparsemem_pages > nr_pages);
+ goto skip_allocpage;
+ }
+#endif
+
+ for (i = 0; i < nr_pages; ++i) {
+ if (pages[i] != NULL)
+ continue;
+
+ pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
+ }
+
+#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
+skip_allocpage:
+#endif
+ vout = erofs_vmap(pages, nr_pages);
+
+ err = z_erofs_vle_unzip_vmap(compressed_pages,
+ clusterpages, vout, llen, work->pageofs, overlapped);
+
+ erofs_vunmap(vout, nr_pages);
+
+out:
+ for (i = 0; i < nr_pages; ++i) {
+ page = pages[i];
+ DBG_BUGON(page->mapping == NULL);
+
+ /* recycle all individual staging pages */
+ if (z_erofs_gather_if_stagingpage(page_pool, page))
+ continue;
+
+ if (unlikely(err < 0))
+ SetPageError(page);
+
+ z_erofs_onlinepage_endio(page);
+ }
+
+out_percpu:
+ for (i = 0; i < clusterpages; ++i) {
+ page = compressed_pages[i];
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ if (page->mapping == mngda)
+ continue;
+#endif
+ /* recycle all individual staging pages */
+ (void)z_erofs_gather_if_stagingpage(page_pool, page);
+
+ WRITE_ONCE(compressed_pages[i], NULL);
+ }
+
+ if (pages == z_pagemap_global)
+ mutex_unlock(&z_pagemap_global_lock);
+ else if (unlikely(pages != pages_onstack))
+ kvfree(pages);
+
+ work->nr_pages = 0;
+ work->vcnt = 0;
+
+ /* all work locks MUST be taken before the following line */
+
+ WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
+
+ /* all work locks SHOULD be released right now */
+ mutex_unlock(&work->lock);
+
+ z_erofs_vle_work_release(work);
+ return err;
+}
+
+static void z_erofs_vle_unzip_all(struct super_block *sb,
+ struct z_erofs_vle_unzip_io *io,
+ struct list_head *page_pool)
+{
+ z_erofs_vle_owned_workgrp_t owned = io->head;
+
+ while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
+ struct z_erofs_vle_workgroup *grp;
+
+ /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
+ DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
+
+ /* no possible that 'owned' equals NULL */
+ DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
+
+ grp = owned;
+ owned = READ_ONCE(grp->next);
+
+ z_erofs_vle_unzip(sb, grp, page_pool);
+ }
+}
+
+static void z_erofs_vle_unzip_wq(struct work_struct *work)
+{
+ struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
+ struct z_erofs_vle_unzip_io_sb, io.u.work);
+ LIST_HEAD(page_pool);
+
+ BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
+ z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
+
+ put_pages_list(&page_pool);
+ kvfree(iosb);
+}
+
+static inline struct z_erofs_vle_unzip_io *
+prepare_io_handler(struct super_block *sb,
+ struct z_erofs_vle_unzip_io *io,
+ bool background)
+{
+ struct z_erofs_vle_unzip_io_sb *iosb;
+
+ if (!background) {
+ /* waitqueue available for foreground io */
+ BUG_ON(io == NULL);
+
+ init_waitqueue_head(&io->u.wait);
+ atomic_set(&io->pending_bios, 0);
+ goto out;
+ }
+
+ if (io != NULL)
+ BUG();
+ else {
+ /* allocate extra io descriptor for background io */
+ iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb),
+ GFP_KERNEL | __GFP_NOFAIL);
+ BUG_ON(iosb == NULL);
+
+ io = &iosb->io;
+ }
+
+ iosb->sb = sb;
+ INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
+out:
+ io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
+ return io;
+}
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+/* true - unlocked (noio), false - locked (need submit io) */
+static inline bool recover_managed_page(struct z_erofs_vle_workgroup *grp,
+ struct page *page)
+{
+ wait_on_page_locked(page);
+ if (PagePrivate(page) && PageUptodate(page))
+ return true;
+
+ lock_page(page);
+ if (unlikely(!PagePrivate(page))) {
+ set_page_private(page, (unsigned long)grp);
+ SetPagePrivate(page);
+ }
+ if (unlikely(PageUptodate(page))) {
+ unlock_page(page);
+ return true;
+ }
+ return false;
+}
+
+#define __FSIO_1 1
+#else
+#define __FSIO_1 0
+#endif
+
+static bool z_erofs_vle_submit_all(struct super_block *sb,
+ z_erofs_vle_owned_workgrp_t owned_head,
+ struct list_head *pagepool,
+ struct z_erofs_vle_unzip_io *fg_io,
+ bool force_fg)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ const unsigned clusterpages = erofs_clusterpages(sbi);
+ const gfp_t gfp = GFP_NOFS;
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ struct address_space *const mngda = sbi->managed_cache->i_mapping;
+ struct z_erofs_vle_workgroup *lstgrp_noio = NULL, *lstgrp_io = NULL;
+#endif
+ struct z_erofs_vle_unzip_io *ios[1 + __FSIO_1];
+ struct bio *bio;
+ tagptr1_t bi_private;
+ /* since bio will be NULL, no need to initialize last_index */
+ pgoff_t uninitialized_var(last_index);
+ bool force_submit = false;
+ unsigned nr_bios;
+
+ if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
+ return false;
+
+ /*
+ * force_fg == 1, (io, fg_io[0]) no io, (io, fg_io[1]) need submit io
+ * force_fg == 0, (io, fg_io[0]) no io; (io[1], bg_io) need submit io
+ */
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ ios[0] = prepare_io_handler(sb, fg_io + 0, false);
+#endif
+
+ if (force_fg) {
+ ios[__FSIO_1] = prepare_io_handler(sb, fg_io + __FSIO_1, false);
+ bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 0);
+ } else {
+ ios[__FSIO_1] = prepare_io_handler(sb, NULL, true);
+ bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 1);
+ }
+
+ nr_bios = 0;
+ force_submit = false;
+ bio = NULL;
+
+ /* by default, all need io submission */
+ ios[__FSIO_1]->head = owned_head;
+
+ do {
+ struct z_erofs_vle_workgroup *grp;
+ struct page **compressed_pages, *oldpage, *page;
+ pgoff_t first_index;
+ unsigned i = 0;
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ unsigned int noio = 0;
+ bool cachemngd;
+#endif
+ int err;
+
+ /* no possible 'owned_head' equals the following */
+ DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
+ DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
+
+ grp = owned_head;
+
+ /* close the main owned chain at first */
+ owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
+ Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
+
+ first_index = grp->obj.index;
+ compressed_pages = grp->compressed_pages;
+
+ force_submit |= (first_index != last_index + 1);
+repeat:
+ /* fulfill all compressed pages */
+ oldpage = page = READ_ONCE(compressed_pages[i]);
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ cachemngd = false;
+
+ if (page == EROFS_UNALLOCATED_CACHED_PAGE) {
+ cachemngd = true;
+ goto do_allocpage;
+ } else if (page != NULL) {
+ if (page->mapping != mngda)
+ BUG_ON(PageUptodate(page));
+ else if (recover_managed_page(grp, page)) {
+ /* page is uptodate, skip io submission */
+ force_submit = true;
+ ++noio;
+ goto skippage;
+ }
+ } else {
+do_allocpage:
+#else
+ if (page != NULL)
+ BUG_ON(PageUptodate(page));
+ else {
+#endif
+ page = __stagingpage_alloc(pagepool, gfp);
+
+ if (oldpage != cmpxchg(compressed_pages + i,
+ oldpage, page)) {
+ list_add(&page->lru, pagepool);
+ goto repeat;
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ } else if (cachemngd && !add_to_page_cache_lru(page,
+ mngda, first_index + i, gfp)) {
+ set_page_private(page, (unsigned long)grp);
+ SetPagePrivate(page);
+#endif
+ }
+ }
+
+ if (bio != NULL && force_submit) {
+submit_bio_retry:
+ __submit_bio(bio, REQ_OP_READ, 0);
+ bio = NULL;
+ }
+
+ if (bio == NULL) {
+ bio = prepare_bio(sb, first_index + i,
+ BIO_MAX_PAGES, z_erofs_vle_read_endio);
+ bio->bi_private = tagptr_cast_ptr(bi_private);
+
+ ++nr_bios;
+ }
+
+ err = bio_add_page(bio, page, PAGE_SIZE, 0);
+ if (err < PAGE_SIZE)
+ goto submit_bio_retry;
+
+ force_submit = false;
+ last_index = first_index + i;
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+skippage:
+#endif
+ if (++i < clusterpages)
+ goto repeat;
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ if (noio < clusterpages) {
+ lstgrp_io = grp;
+ } else {
+ z_erofs_vle_owned_workgrp_t iogrp_next =
+ owned_head == Z_EROFS_VLE_WORKGRP_TAIL ?
+ Z_EROFS_VLE_WORKGRP_TAIL_CLOSED :
+ owned_head;
+
+ if (lstgrp_io == NULL)
+ ios[1]->head = iogrp_next;
+ else
+ WRITE_ONCE(lstgrp_io->next, iogrp_next);
+
+ if (lstgrp_noio == NULL)
+ ios[0]->head = grp;
+ else
+ WRITE_ONCE(lstgrp_noio->next, grp);
+
+ lstgrp_noio = grp;
+ }
+#endif
+ } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
+
+ if (bio != NULL)
+ __submit_bio(bio, REQ_OP_READ, 0);
+
+#ifndef EROFS_FS_HAS_MANAGED_CACHE
+ BUG_ON(!nr_bios);
+#else
+ if (lstgrp_noio != NULL)
+ WRITE_ONCE(lstgrp_noio->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
+
+ if (!force_fg && !nr_bios) {
+ kvfree(container_of(ios[1],
+ struct z_erofs_vle_unzip_io_sb, io));
+ return true;
+ }
+#endif
+
+ z_erofs_vle_unzip_kickoff(tagptr_cast_ptr(bi_private), nr_bios);
+ return true;
+}
+
+static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
+ struct list_head *pagepool,
+ bool force_fg)
+{
+ struct super_block *sb = f->inode->i_sb;
+ struct z_erofs_vle_unzip_io io[1 + __FSIO_1];
+
+ if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
+ return;
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ z_erofs_vle_unzip_all(sb, &io[0], pagepool);
+#endif
+ if (!force_fg)
+ return;
+
+ /* wait until all bios are completed */
+ wait_event(io[__FSIO_1].u.wait,
+ !atomic_read(&io[__FSIO_1].pending_bios));
+
+ /* let's synchronous decompression */
+ z_erofs_vle_unzip_all(sb, &io[__FSIO_1], pagepool);
+}
+
+static int z_erofs_vle_normalaccess_readpage(struct file *file,
+ struct page *page)
+{
+ struct inode *const inode = page->mapping->host;
+ struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
+ int err;
+ LIST_HEAD(pagepool);
+
+#if (EROFS_FS_ZIP_CACHE_LVL >= 2)
+ f.cachedzone_la = page->index << PAGE_SHIFT;
+#endif
+ err = z_erofs_do_read_page(&f, page, &pagepool);
+ (void)z_erofs_vle_work_iter_end(&f.builder);
+
+ if (err) {
+ errln("%s, failed to read, err [%d]", __func__, err);
+ goto out;
+ }
+
+ z_erofs_submit_and_unzip(&f, &pagepool, true);
+out:
+ if (f.m_iter.mpage != NULL)
+ put_page(f.m_iter.mpage);
+
+ /* clean up the remaining free pages */
+ put_pages_list(&pagepool);
+ return 0;
+}
+
+static inline int __z_erofs_vle_normalaccess_readpages(
+ struct file *filp,
+ struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages, bool sync)
+{
+ struct inode *const inode = mapping->host;
+
+ struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
+ gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
+ struct page *head = NULL;
+ LIST_HEAD(pagepool);
+
+#if (EROFS_FS_ZIP_CACHE_LVL >= 2)
+ f.cachedzone_la = lru_to_page(pages)->index << PAGE_SHIFT;
+#endif
+ for (; nr_pages; --nr_pages) {
+ struct page *page = lru_to_page(pages);
+
+ prefetchw(&page->flags);
+ list_del(&page->lru);
+
+ if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
+ list_add(&page->lru, &pagepool);
+ continue;
+ }
+
+ BUG_ON(PagePrivate(page));
+ set_page_private(page, (unsigned long)head);
+ head = page;
+ }
+
+ while (head != NULL) {
+ struct page *page = head;
+ int err;
+
+ /* traversal in reverse order */
+ head = (void *)page_private(page);
+
+ err = z_erofs_do_read_page(&f, page, &pagepool);
+ if (err) {
+ struct erofs_vnode *vi = EROFS_V(inode);
+
+ errln("%s, readahead error at page %lu of nid %llu",
+ __func__, page->index, vi->nid);
+ }
+
+ put_page(page);
+ }
+
+ (void)z_erofs_vle_work_iter_end(&f.builder);
+
+ z_erofs_submit_and_unzip(&f, &pagepool, sync);
+
+ if (f.m_iter.mpage != NULL)
+ put_page(f.m_iter.mpage);
+
+ /* clean up the remaining free pages */
+ put_pages_list(&pagepool);
+ return 0;
+}
+
+static int z_erofs_vle_normalaccess_readpages(
+ struct file *filp,
+ struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages)
+{
+ return __z_erofs_vle_normalaccess_readpages(filp,
+ mapping, pages, nr_pages,
+ nr_pages < 4 /* sync */);
+}
+
+const struct address_space_operations z_erofs_vle_normalaccess_aops = {
+ .readpage = z_erofs_vle_normalaccess_readpage,
+ .readpages = z_erofs_vle_normalaccess_readpages,
+};
+
+#define __vle_cluster_advise(x, bit, bits) \
+ ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
+
+#define __vle_cluster_type(advise) __vle_cluster_advise(advise, \
+ Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT, Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS)
+
+enum {
+ Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
+ Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
+ Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
+ Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
+ Z_EROFS_VLE_CLUSTER_TYPE_MAX
+};
+
+#define vle_cluster_type(di) \
+ __vle_cluster_type((di)->di_advise)
+
+static inline unsigned
+vle_compressed_index_clusterofs(unsigned clustersize,
+ struct z_erofs_vle_decompressed_index *di)
+{
+ debugln("%s, vle=%pK, advise=%x (type %u), clusterofs=%x blkaddr=%x",
+ __func__, di, di->di_advise, vle_cluster_type(di),
+ di->di_clusterofs, di->di_u.blkaddr);
+
+ switch (vle_cluster_type(di)) {
+ case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+ break;
+ case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+ case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+ return di->di_clusterofs;
+ default:
+ BUG_ON(1);
+ }
+ return clustersize;
+}
+
+static inline erofs_blk_t
+vle_extent_blkaddr(struct inode *inode, pgoff_t index)
+{
+ struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+ struct erofs_vnode *vi = EROFS_V(inode);
+
+ unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
+ vi->xattr_isize) + sizeof(struct erofs_extent_header) +
+ index * sizeof(struct z_erofs_vle_decompressed_index);
+
+ return erofs_blknr(iloc(sbi, vi->nid) + ofs);
+}
+
+static inline unsigned int
+vle_extent_blkoff(struct inode *inode, pgoff_t index)
+{
+ struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+ struct erofs_vnode *vi = EROFS_V(inode);
+
+ unsigned ofs = Z_EROFS_VLE_EXTENT_ALIGN(vi->inode_isize +
+ vi->xattr_isize) + sizeof(struct erofs_extent_header) +
+ index * sizeof(struct z_erofs_vle_decompressed_index);
+
+ return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
+}
+
+/*
+ * Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
+ * ---
+ * VLE compression mode attempts to compress a number of logical data into
+ * a physical cluster with a fixed size.
+ * VLE compression mode uses "struct z_erofs_vle_decompressed_index".
+ */
+static erofs_off_t vle_get_logical_extent_head(
+ struct inode *inode,
+ struct page **page_iter,
+ void **kaddr_iter,
+ unsigned lcn, /* logical cluster number */
+ erofs_blk_t *pcn,
+ unsigned *flags)
+{
+ /* for extent meta */
+ struct page *page = *page_iter;
+ erofs_blk_t blkaddr = vle_extent_blkaddr(inode, lcn);
+ struct z_erofs_vle_decompressed_index *di;
+ unsigned long long ofs;
+ const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
+ const unsigned int clustersize = 1 << clusterbits;
+
+ if (page->index != blkaddr) {
+ kunmap_atomic(*kaddr_iter);
+ unlock_page(page);
+ put_page(page);
+
+ *page_iter = page = erofs_get_meta_page(inode->i_sb,
+ blkaddr, false);
+ *kaddr_iter = kmap_atomic(page);
+ }
+
+ di = *kaddr_iter + vle_extent_blkoff(inode, lcn);
+ switch (vle_cluster_type(di)) {
+ case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+ BUG_ON(!di->di_u.delta[0]);
+ BUG_ON(lcn < di->di_u.delta[0]);
+
+ ofs = vle_get_logical_extent_head(inode,
+ page_iter, kaddr_iter,
+ lcn - di->di_u.delta[0], pcn, flags);
+ break;
+ case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+ *flags ^= EROFS_MAP_ZIPPED;
+ case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+ /* clustersize should be a power of two */
+ ofs = ((unsigned long long)lcn << clusterbits) +
+ (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
+ *pcn = le32_to_cpu(di->di_u.blkaddr);
+ break;
+ default:
+ BUG_ON(1);
+ }
+ return ofs;
+}
+
+int z_erofs_map_blocks_iter(struct inode *inode,
+ struct erofs_map_blocks *map,
+ struct page **mpage_ret, int flags)
+{
+ /* logicial extent (start, end) offset */
+ unsigned long long ofs, end;
+ struct z_erofs_vle_decompressed_index *di;
+ erofs_blk_t e_blkaddr, pcn;
+ unsigned lcn, logical_cluster_ofs, cluster_type;
+ u32 ofs_rem;
+ struct page *mpage = *mpage_ret;
+ void *kaddr;
+ bool initial;
+ const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits;
+ const unsigned int clustersize = 1 << clusterbits;
+ int err = 0;
+
+ /* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
+ initial = !map->m_llen;
+
+ /* when trying to read beyond EOF, leave it unmapped */
+ if (unlikely(map->m_la >= inode->i_size)) {
+ BUG_ON(!initial);
+ map->m_llen = map->m_la + 1 - inode->i_size;
+ map->m_la = inode->i_size - 1;
+ map->m_flags = 0;
+ goto out;
+ }
+
+ debugln("%s, m_la %llu m_llen %llu --- start", __func__,
+ map->m_la, map->m_llen);
+
+ ofs = map->m_la + map->m_llen;
+
+ /* clustersize should be power of two */
+ lcn = ofs >> clusterbits;
+ ofs_rem = ofs & (clustersize - 1);
+
+ e_blkaddr = vle_extent_blkaddr(inode, lcn);
+
+ if (mpage == NULL || mpage->index != e_blkaddr) {
+ if (mpage != NULL)
+ put_page(mpage);
+
+ mpage = erofs_get_meta_page(inode->i_sb, e_blkaddr, false);
+ *mpage_ret = mpage;
+ } else {
+ lock_page(mpage);
+ DBG_BUGON(!PageUptodate(mpage));
+ }
+
+ kaddr = kmap_atomic(mpage);
+ di = kaddr + vle_extent_blkoff(inode, lcn);
+
+ debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn,
+ e_blkaddr, vle_extent_blkoff(inode, lcn));
+
+ logical_cluster_ofs = vle_compressed_index_clusterofs(clustersize, di);
+ if (!initial) {
+ /* [walking mode] 'map' has been already initialized */
+ map->m_llen += logical_cluster_ofs;
+ goto unmap_out;
+ }
+
+ /* by default, compressed */
+ map->m_flags |= EROFS_MAP_ZIPPED;
+
+ end = (u64)(lcn + 1) * clustersize;
+
+ cluster_type = vle_cluster_type(di);
+
+ switch (cluster_type) {
+ case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+ if (ofs_rem >= logical_cluster_ofs)
+ map->m_flags ^= EROFS_MAP_ZIPPED;
+ /* fallthrough */
+ case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+ if (ofs_rem == logical_cluster_ofs) {
+ pcn = le32_to_cpu(di->di_u.blkaddr);
+ goto exact_hitted;
+ }
+
+ if (ofs_rem > logical_cluster_ofs) {
+ ofs = lcn * clustersize | logical_cluster_ofs;
+ pcn = le32_to_cpu(di->di_u.blkaddr);
+ break;
+ }
+
+ /* logical cluster number should be >= 1 */
+ if (unlikely(!lcn)) {
+ errln("invalid logical cluster 0 at nid %llu",
+ EROFS_V(inode)->nid);
+ err = -EIO;
+ goto unmap_out;
+ }
+ end = (lcn-- * clustersize) | logical_cluster_ofs;
+ /* fallthrough */
+ case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+ /* get the correspoinding first chunk */
+ ofs = vle_get_logical_extent_head(inode, mpage_ret,
+ &kaddr, lcn, &pcn, &map->m_flags);
+ mpage = *mpage_ret;
+ break;
+ default:
+ errln("unknown cluster type %u at offset %llu of nid %llu",
+ cluster_type, ofs, EROFS_V(inode)->nid);
+ err = -EIO;
+ goto unmap_out;
+ }
+
+ map->m_la = ofs;
+exact_hitted:
+ map->m_llen = end - ofs;
+ map->m_plen = clustersize;
+ map->m_pa = blknr_to_addr(pcn);
+ map->m_flags |= EROFS_MAP_MAPPED;
+unmap_out:
+ kunmap_atomic(kaddr);
+ unlock_page(mpage);
+out:
+ debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
+ __func__, map->m_la, map->m_pa,
+ map->m_llen, map->m_plen, map->m_flags);
+
+ /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
+ DBG_BUGON(err < 0);
+ return err;
+}
+
diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
new file mode 100644
index 000000000000..393998500865
--- /dev/null
+++ b/drivers/staging/erofs/unzip_vle.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * linux/drivers/staging/erofs/unzip_vle.h
+ *
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#ifndef __EROFS_FS_UNZIP_VLE_H
+#define __EROFS_FS_UNZIP_VLE_H
+
+#include "internal.h"
+#include "unzip_pagevec.h"
+
+/*
+ * - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) -
+ * used for temporary allocated pages (via erofs_allocpage),
+ * in order to seperate those from NULL mapping (eg. truncated pages)
+ */
+#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D)
+
+#define z_erofs_is_stagingpage(page) \
+ ((page)->mapping == Z_EROFS_MAPPING_STAGING)
+
+static inline bool z_erofs_gather_if_stagingpage(struct list_head *page_pool,
+ struct page *page)
+{
+ if (z_erofs_is_stagingpage(page)) {
+ list_add(&page->lru, page_pool);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Structure fields follow one of the following exclusion rules.
+ *
+ * I: Modifiable by initialization/destruction paths and read-only
+ * for everyone else.
+ *
+ */
+
+#define Z_EROFS_VLE_INLINE_PAGEVECS 3
+
+struct z_erofs_vle_work {
+ /* struct z_erofs_vle_work *left, *right; */
+
+#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
+ struct list_head list;
+
+ atomic_t refcount;
+#endif
+ struct mutex lock;
+
+ /* I: decompression offset in page */
+ unsigned short pageofs;
+ unsigned short nr_pages;
+
+ /* L: queued pages in pagevec[] */
+ unsigned vcnt;
+
+ union {
+ /* L: pagevec */
+ erofs_vtptr_t pagevec[Z_EROFS_VLE_INLINE_PAGEVECS];
+ struct rcu_head rcu;
+ };
+};
+
+#define Z_EROFS_VLE_WORKGRP_FMT_PLAIN 0
+#define Z_EROFS_VLE_WORKGRP_FMT_LZ4 1
+#define Z_EROFS_VLE_WORKGRP_FMT_MASK 1
+
+typedef struct z_erofs_vle_workgroup *z_erofs_vle_owned_workgrp_t;
+
+struct z_erofs_vle_workgroup {
+ struct erofs_workgroup obj;
+ struct z_erofs_vle_work work;
+
+ /* next owned workgroup */
+ z_erofs_vle_owned_workgrp_t next;
+
+ /* compressed pages (including multi-usage pages) */
+ struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
+ unsigned int llen, flags;
+};
+
+/* let's avoid the valid 32-bit kernel addresses */
+
+/* the chained workgroup has't submitted io (still open) */
+#define Z_EROFS_VLE_WORKGRP_TAIL ((void *)0x5F0ECAFE)
+/* the chained workgroup has already submitted io */
+#define Z_EROFS_VLE_WORKGRP_TAIL_CLOSED ((void *)0x5F0EDEAD)
+
+#define Z_EROFS_VLE_WORKGRP_NIL (NULL)
+
+#define z_erofs_vle_workgrp_fmt(grp) \
+ ((grp)->flags & Z_EROFS_VLE_WORKGRP_FMT_MASK)
+
+static inline void z_erofs_vle_set_workgrp_fmt(
+ struct z_erofs_vle_workgroup *grp,
+ unsigned int fmt)
+{
+ grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
+}
+
+#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
+#error multiref decompression is unimplemented yet
+#else
+
+#define z_erofs_vle_grab_primary_work(grp) (&(grp)->work)
+#define z_erofs_vle_grab_work(grp, pageofs) (&(grp)->work)
+#define z_erofs_vle_work_workgroup(wrk, primary) \
+ ((primary) ? container_of(wrk, \
+ struct z_erofs_vle_workgroup, work) : \
+ ({ BUG(); (void *)NULL; }))
+
+#endif
+
+#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_vle_workgroup)
+
+struct z_erofs_vle_unzip_io {
+ atomic_t pending_bios;
+ z_erofs_vle_owned_workgrp_t head;
+
+ union {
+ wait_queue_head_t wait;
+ struct work_struct work;
+ } u;
+};
+
+struct z_erofs_vle_unzip_io_sb {
+ struct z_erofs_vle_unzip_io io;
+ struct super_block *sb;
+};
+
+#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
+#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
+#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
+
+/*
+ * waiters (aka. ongoing_packs): # to unlock the page
+ * sub-index: 0 - for partial page, >= 1 full page sub-index
+ */
+typedef atomic_t z_erofs_onlinepage_t;
+
+/* type punning */
+union z_erofs_onlinepage_converter {
+ z_erofs_onlinepage_t *o;
+ unsigned long *v;
+};
+
+static inline unsigned z_erofs_onlinepage_index(struct page *page)
+{
+ union z_erofs_onlinepage_converter u;
+
+ BUG_ON(!PagePrivate(page));
+ u.v = &page_private(page);
+
+ return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+}
+
+static inline void z_erofs_onlinepage_init(struct page *page)
+{
+ union {
+ z_erofs_onlinepage_t o;
+ unsigned long v;
+ /* keep from being unlocked in advance */
+ } u = { .o = ATOMIC_INIT(1) };
+
+ set_page_private(page, u.v);
+ smp_wmb();
+ SetPagePrivate(page);
+}
+
+static inline void z_erofs_onlinepage_fixup(struct page *page,
+ uintptr_t index, bool down)
+{
+ unsigned long *p, o, v, id;
+repeat:
+ p = &page_private(page);
+ o = READ_ONCE(*p);
+
+ id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+ if (id) {
+ if (!index)
+ return;
+
+ BUG_ON(id != index);
+ }
+
+ v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
+ ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned)down);
+ if (cmpxchg(p, o, v) != o)
+ goto repeat;
+}
+
+static inline void z_erofs_onlinepage_endio(struct page *page)
+{
+ union z_erofs_onlinepage_converter u;
+ unsigned v;
+
+ BUG_ON(!PagePrivate(page));
+ u.v = &page_private(page);
+
+ v = atomic_dec_return(u.o);
+ if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
+ ClearPagePrivate(page);
+ if (!PageError(page))
+ SetPageUptodate(page);
+ unlock_page(page);
+ }
+
+ debugln("%s, page %p value %x", __func__, page, atomic_read(u.o));
+}
+
+#define Z_EROFS_VLE_VMAP_ONSTACK_PAGES \
+ min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
+#define Z_EROFS_VLE_VMAP_GLOBAL_PAGES 2048
+
+/* unzip_vle_lz4.c */
+extern int z_erofs_vle_plain_copy(struct page **compressed_pages,
+ unsigned clusterpages, struct page **pages,
+ unsigned nr_pages, unsigned short pageofs);
+
+extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
+ unsigned clusterpages, struct page **pages,
+ unsigned outlen, unsigned short pageofs,
+ void (*endio)(struct page *));
+
+extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
+ unsigned clusterpages, void *vaddr, unsigned llen,
+ unsigned short pageofs, bool overlapped);
+
+#endif
+
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
new file mode 100644
index 000000000000..f5b665f15be5
--- /dev/null
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/staging/erofs/unzip_vle_lz4.c
+ *
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#include "unzip_vle.h"
+
+#if Z_EROFS_CLUSTER_MAX_PAGES > Z_EROFS_VLE_INLINE_PAGEVECS
+#define EROFS_PERCPU_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
+#else
+#define EROFS_PERCPU_NR_PAGES Z_EROFS_VLE_INLINE_PAGEVECS
+#endif
+
+static struct {
+ char data[PAGE_SIZE * EROFS_PERCPU_NR_PAGES];
+} erofs_pcpubuf[NR_CPUS];
+
+int z_erofs_vle_plain_copy(struct page **compressed_pages,
+ unsigned clusterpages,
+ struct page **pages,
+ unsigned nr_pages,
+ unsigned short pageofs)
+{
+ unsigned i, j;
+ void *src = NULL;
+ const unsigned righthalf = PAGE_SIZE - pageofs;
+ char *percpu_data;
+ bool mirrored[Z_EROFS_CLUSTER_MAX_PAGES] = { 0 };
+
+ preempt_disable();
+ percpu_data = erofs_pcpubuf[smp_processor_id()].data;
+
+ j = 0;
+ for (i = 0; i < nr_pages; j = i++) {
+ struct page *page = pages[i];
+ void *dst;
+
+ if (page == NULL) {
+ if (src != NULL) {
+ if (!mirrored[j])
+ kunmap_atomic(src);
+ src = NULL;
+ }
+ continue;
+ }
+
+ dst = kmap_atomic(page);
+
+ for (; j < clusterpages; ++j) {
+ if (compressed_pages[j] != page)
+ continue;
+
+ BUG_ON(mirrored[j]);
+ memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
+ mirrored[j] = true;
+ break;
+ }
+
+ if (i) {
+ if (src == NULL)
+ src = mirrored[i-1] ?
+ percpu_data + (i-1) * PAGE_SIZE :
+ kmap_atomic(compressed_pages[i-1]);
+
+ memcpy(dst, src + righthalf, pageofs);
+
+ if (!mirrored[i-1])
+ kunmap_atomic(src);
+
+ if (unlikely(i >= clusterpages)) {
+ kunmap_atomic(dst);
+ break;
+ }
+ }
+
+ if (!righthalf)
+ src = NULL;
+ else {
+ src = mirrored[i] ? percpu_data + i * PAGE_SIZE :
+ kmap_atomic(compressed_pages[i]);
+
+ memcpy(dst + pageofs, src, righthalf);
+ }
+
+ kunmap_atomic(dst);
+ }
+
+ if (src != NULL && !mirrored[j])
+ kunmap_atomic(src);
+
+ preempt_enable();
+ return 0;
+}
+
+extern int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen);
+
+int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
+ unsigned clusterpages,
+ struct page **pages,
+ unsigned outlen,
+ unsigned short pageofs,
+ void (*endio)(struct page *))
+{
+ void *vin, *vout;
+ unsigned nr_pages, i, j;
+ int ret;
+
+ if (outlen + pageofs > EROFS_PERCPU_NR_PAGES * PAGE_SIZE)
+ return -ENOTSUPP;
+
+ nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
+
+ if (clusterpages == 1)
+ vin = kmap_atomic(compressed_pages[0]);
+ else
+ vin = erofs_vmap(compressed_pages, clusterpages);
+
+ preempt_disable();
+ vout = erofs_pcpubuf[smp_processor_id()].data;
+
+ ret = z_erofs_unzip_lz4(vin, vout + pageofs,
+ clusterpages * PAGE_SIZE, outlen);
+
+ if (ret >= 0) {
+ outlen = ret;
+ ret = 0;
+ }
+
+ for (i = 0; i < nr_pages; ++i) {
+ j = min((unsigned)PAGE_SIZE - pageofs, outlen);
+
+ if (pages[i] != NULL) {
+ if (ret < 0)
+ SetPageError(pages[i]);
+ else if (clusterpages == 1 && pages[i] == compressed_pages[0])
+ memcpy(vin + pageofs, vout + pageofs, j);
+ else {
+ void *dst = kmap_atomic(pages[i]);
+
+ memcpy(dst + pageofs, vout + pageofs, j);
+ kunmap_atomic(dst);
+ }
+ endio(pages[i]);
+ }
+ vout += PAGE_SIZE;
+ outlen -= j;
+ pageofs = 0;
+ }
+ preempt_enable();
+
+ if (clusterpages == 1)
+ kunmap_atomic(vin);
+ else
+ erofs_vunmap(vin, clusterpages);
+
+ return ret;
+}
+
+int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
+ unsigned clusterpages,
+ void *vout,
+ unsigned llen,
+ unsigned short pageofs,
+ bool overlapped)
+{
+ void *vin;
+ unsigned i;
+ int ret;
+
+ if (overlapped) {
+ preempt_disable();
+ vin = erofs_pcpubuf[smp_processor_id()].data;
+
+ for (i = 0; i < clusterpages; ++i) {
+ void *t = kmap_atomic(compressed_pages[i]);
+
+ memcpy(vin + PAGE_SIZE *i, t, PAGE_SIZE);
+ kunmap_atomic(t);
+ }
+ } else if (clusterpages == 1)
+ vin = kmap_atomic(compressed_pages[0]);
+ else {
+ vin = erofs_vmap(compressed_pages, clusterpages);
+ }
+
+ ret = z_erofs_unzip_lz4(vin, vout + pageofs,
+ clusterpages * PAGE_SIZE, llen);
+ if (ret > 0)
+ ret = 0;
+
+ if (!overlapped) {
+ if (clusterpages == 1)
+ kunmap_atomic(vin);
+ else {
+ erofs_vunmap(vin, clusterpages);
+ }
+ } else
+ preempt_enable();
+
+ return ret;
+}
+
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
new file mode 100644
index 000000000000..595cf90af9bb
--- /dev/null
+++ b/drivers/staging/erofs/utils.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/staging/erofs/utils.c
+ *
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+
+#include "internal.h"
+#include <linux/pagevec.h>
+
+struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
+{
+ struct page *page;
+
+ if (!list_empty(pool)) {
+ page = lru_to_page(pool);
+ list_del(&page->lru);
+ } else {
+ page = alloc_pages(gfp | __GFP_NOFAIL, 0);
+
+ BUG_ON(page == NULL);
+ BUG_ON(page->mapping != NULL);
+ }
+ return page;
+}
+
+/* global shrink count (for all mounted EROFS instances) */
+static atomic_long_t erofs_global_shrink_cnt;
+
+#ifdef CONFIG_EROFS_FS_ZIP
+
+/* radix_tree and the future XArray both don't use tagptr_t yet */
+struct erofs_workgroup *erofs_find_workgroup(
+ struct super_block *sb, pgoff_t index, bool *tag)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_workgroup *grp;
+ int oldcount;
+
+repeat:
+ rcu_read_lock();
+ grp = radix_tree_lookup(&sbi->workstn_tree, index);
+ if (grp != NULL) {
+ *tag = radix_tree_exceptional_entry(grp);
+ grp = (void *)((unsigned long)grp &
+ ~RADIX_TREE_EXCEPTIONAL_ENTRY);
+
+ if (erofs_workgroup_get(grp, &oldcount)) {
+ /* prefer to relax rcu read side */
+ rcu_read_unlock();
+ goto repeat;
+ }
+
+ /* decrease refcount added by erofs_workgroup_put */
+ if (unlikely(oldcount == 1))
+ atomic_long_dec(&erofs_global_shrink_cnt);
+ BUG_ON(index != grp->index);
+ }
+ rcu_read_unlock();
+ return grp;
+}
+
+int erofs_register_workgroup(struct super_block *sb,
+ struct erofs_workgroup *grp,
+ bool tag)
+{
+ struct erofs_sb_info *sbi;
+ int err;
+
+ /* grp->refcount should not < 1 */
+ BUG_ON(!atomic_read(&grp->refcount));
+
+ err = radix_tree_preload(GFP_NOFS);
+ if (err)
+ return err;
+
+ sbi = EROFS_SB(sb);
+ erofs_workstn_lock(sbi);
+
+ if (tag)
+ grp = (void *)((unsigned long)grp |
+ 1UL << RADIX_TREE_EXCEPTIONAL_SHIFT);
+
+ err = radix_tree_insert(&sbi->workstn_tree,
+ grp->index, grp);
+
+ if (!err) {
+ __erofs_workgroup_get(grp);
+ }
+
+ erofs_workstn_unlock(sbi);
+ radix_tree_preload_end();
+ return err;
+}
+
+extern void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
+
+int erofs_workgroup_put(struct erofs_workgroup *grp)
+{
+ int count = atomic_dec_return(&grp->refcount);
+
+ if (count == 1)
+ atomic_long_inc(&erofs_global_shrink_cnt);
+ else if (!count) {
+ atomic_long_dec(&erofs_global_shrink_cnt);
+ erofs_workgroup_free_rcu(grp);
+ }
+ return count;
+}
+
+unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
+ unsigned long nr_shrink,
+ bool cleanup)
+{
+ pgoff_t first_index = 0;
+ void *batch[PAGEVEC_SIZE];
+ unsigned freed = 0;
+
+ int i, found;
+repeat:
+ erofs_workstn_lock(sbi);
+
+ found = radix_tree_gang_lookup(&sbi->workstn_tree,
+ batch, first_index, PAGEVEC_SIZE);
+
+ for (i = 0; i < found; ++i) {
+ int cnt;
+ struct erofs_workgroup *grp = (void *)
+ ((unsigned long)batch[i] &
+ ~RADIX_TREE_EXCEPTIONAL_ENTRY);
+
+ first_index = grp->index + 1;
+
+ cnt = atomic_read(&grp->refcount);
+ BUG_ON(cnt <= 0);
+
+ if (cleanup)
+ BUG_ON(cnt != 1);
+
+#ifndef EROFS_FS_HAS_MANAGED_CACHE
+ else if (cnt > 1)
+#else
+ if (!erofs_workgroup_try_to_freeze(grp, 1))
+#endif
+ continue;
+
+ if (radix_tree_delete(&sbi->workstn_tree,
+ grp->index) != grp) {
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+skip:
+ erofs_workgroup_unfreeze(grp, 1);
+#endif
+ continue;
+ }
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ if (erofs_try_to_free_all_cached_pages(sbi, grp))
+ goto skip;
+
+ erofs_workgroup_unfreeze(grp, 1);
+#endif
+ /* (rarely) grabbed again when freeing */
+ erofs_workgroup_put(grp);
+
+ ++freed;
+ if (unlikely(!--nr_shrink))
+ break;
+ }
+ erofs_workstn_unlock(sbi);
+
+ if (i && nr_shrink)
+ goto repeat;
+ return freed;
+}
+
+#endif
+
+/* protected by 'erofs_sb_list_lock' */
+static unsigned int shrinker_run_no;
+
+/* protects the mounted 'erofs_sb_list' */
+static DEFINE_SPINLOCK(erofs_sb_list_lock);
+static LIST_HEAD(erofs_sb_list);
+
+void erofs_register_super(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ mutex_init(&sbi->umount_mutex);
+
+ spin_lock(&erofs_sb_list_lock);
+ list_add(&sbi->list, &erofs_sb_list);
+ spin_unlock(&erofs_sb_list_lock);
+}
+
+void erofs_unregister_super(struct super_block *sb)
+{
+ spin_lock(&erofs_sb_list_lock);
+ list_del(&EROFS_SB(sb)->list);
+ spin_unlock(&erofs_sb_list_lock);
+}
+
+unsigned long erofs_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ return atomic_long_read(&erofs_global_shrink_cnt);
+}
+
+unsigned long erofs_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct erofs_sb_info *sbi;
+ struct list_head *p;
+
+ unsigned long nr = sc->nr_to_scan;
+ unsigned int run_no;
+ unsigned long freed = 0;
+
+ spin_lock(&erofs_sb_list_lock);
+ do
+ run_no = ++shrinker_run_no;
+ while (run_no == 0);
+
+ /* Iterate over all mounted superblocks and try to shrink them */
+ p = erofs_sb_list.next;
+ while (p != &erofs_sb_list) {
+ sbi = list_entry(p, struct erofs_sb_info, list);
+
+ /*
+ * We move the ones we do to the end of the list, so we stop
+ * when we see one we have already done.
+ */
+ if (sbi->shrinker_run_no == run_no)
+ break;
+
+ if (!mutex_trylock(&sbi->umount_mutex)) {
+ p = p->next;
+ continue;
+ }
+
+ spin_unlock(&erofs_sb_list_lock);
+ sbi->shrinker_run_no = run_no;
+
+#ifdef CONFIG_EROFS_FS_ZIP
+ freed += erofs_shrink_workstation(sbi, nr, false);
+#endif
+
+ spin_lock(&erofs_sb_list_lock);
+ /* Get the next list element before we move this one */
+ p = p->next;
+
+ /*
+ * Move this one to the end of the list to provide some
+ * fairness.
+ */
+ list_move_tail(&sbi->list, &erofs_sb_list);
+ mutex_unlock(&sbi->umount_mutex);
+
+ if (freed >= nr)
+ break;
+ }
+ spin_unlock(&erofs_sb_list_lock);
+ return freed;
+}
+
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
new file mode 100644
index 000000000000..0e9cfeccdf99
--- /dev/null
+++ b/drivers/staging/erofs/xattr.c
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/staging/erofs/xattr.c
+ *
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#include <linux/security.h>
+#include "xattr.h"
+
+struct xattr_iter {
+ struct super_block *sb;
+ struct page *page;
+ void *kaddr;
+
+ erofs_blk_t blkaddr;
+ unsigned ofs;
+};
+
+static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
+{
+ /* only init_inode_xattrs use non-atomic once */
+ if (unlikely(!atomic))
+ kunmap(it->page);
+ else
+ kunmap_atomic(it->kaddr);
+ unlock_page(it->page);
+ put_page(it->page);
+}
+
+static void init_inode_xattrs(struct inode *inode)
+{
+ struct xattr_iter it;
+ unsigned i;
+ struct erofs_xattr_ibody_header *ih;
+ struct erofs_sb_info *sbi;
+ struct erofs_vnode *vi;
+ bool atomic_map;
+
+ if (likely(inode_has_inited_xattr(inode)))
+ return;
+
+ vi = EROFS_V(inode);
+ BUG_ON(!vi->xattr_isize);
+
+ sbi = EROFS_I_SB(inode);
+ it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
+ it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
+
+ it.page = erofs_get_inline_page(inode, it.blkaddr);
+ BUG_ON(IS_ERR(it.page));
+
+ /* read in shared xattr array (non-atomic, see kmalloc below) */
+ it.kaddr = kmap(it.page);
+ atomic_map = false;
+
+ ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
+
+ vi->xattr_shared_count = ih->h_shared_count;
+ vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
+ vi->xattr_shared_count, sizeof(unsigned),
+ GFP_KERNEL | __GFP_NOFAIL);
+
+ /* let's skip ibody header */
+ it.ofs += sizeof(struct erofs_xattr_ibody_header);
+
+ for (i = 0; i < vi->xattr_shared_count; ++i) {
+ if (unlikely(it.ofs >= EROFS_BLKSIZ)) {
+ /* cannot be unaligned */
+ BUG_ON(it.ofs != EROFS_BLKSIZ);
+ xattr_iter_end(&it, atomic_map);
+
+ it.page = erofs_get_meta_page(inode->i_sb,
+ ++it.blkaddr, S_ISDIR(inode->i_mode));
+ BUG_ON(IS_ERR(it.page));
+
+ it.kaddr = kmap_atomic(it.page);
+ atomic_map = true;
+ it.ofs = 0;
+ }
+ vi->xattr_shared_xattrs[i] =
+ le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
+ it.ofs += sizeof(__le32);
+ }
+ xattr_iter_end(&it, atomic_map);
+
+ inode_set_inited_xattr(inode);
+}
+
+struct xattr_iter_handlers {
+ int (*entry)(struct xattr_iter *, struct erofs_xattr_entry *);
+ int (*name)(struct xattr_iter *, unsigned, char *, unsigned);
+ int (*alloc_buffer)(struct xattr_iter *, unsigned);
+ void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
+};
+
+static void xattr_iter_fixup(struct xattr_iter *it)
+{
+ if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
+ xattr_iter_end(it, true);
+
+ it->blkaddr += erofs_blknr(it->ofs);
+ it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
+ BUG_ON(IS_ERR(it->page));
+
+ it->kaddr = kmap_atomic(it->page);
+ it->ofs = erofs_blkoff(it->ofs);
+ }
+}
+
+static int inline_xattr_iter_begin(struct xattr_iter *it,
+ struct inode *inode)
+{
+ struct erofs_vnode *const vi = EROFS_V(inode);
+ struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
+ unsigned xattr_header_sz, inline_xattr_ofs;
+
+ xattr_header_sz = inlinexattr_header_size(inode);
+ if (unlikely(xattr_header_sz >= vi->xattr_isize)) {
+ BUG_ON(xattr_header_sz > vi->xattr_isize);
+ return -ENOATTR;
+ }
+
+ inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
+
+ it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
+ it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
+
+ it->page = erofs_get_inline_page(inode, it->blkaddr);
+ BUG_ON(IS_ERR(it->page));
+ it->kaddr = kmap_atomic(it->page);
+
+ return vi->xattr_isize - xattr_header_sz;
+}
+
+static int xattr_foreach(struct xattr_iter *it,
+ struct xattr_iter_handlers *op, unsigned *tlimit)
+{
+ struct erofs_xattr_entry entry;
+ unsigned value_sz, processed, slice;
+ int err;
+
+ /* 0. fixup blkaddr, ofs, ipage */
+ xattr_iter_fixup(it);
+
+ /*
+ * 1. read xattr entry to the memory,
+ * since we do EROFS_XATTR_ALIGN
+ * therefore entry should be in the page
+ */
+ entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
+ if (tlimit != NULL) {
+ unsigned entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry);
+
+ BUG_ON(*tlimit < entry_sz);
+ *tlimit -= entry_sz;
+ }
+
+ it->ofs += sizeof(struct erofs_xattr_entry);
+ value_sz = le16_to_cpu(entry.e_value_size);
+
+ /* handle entry */
+ err = op->entry(it, &entry);
+ if (err) {
+ it->ofs += entry.e_name_len + value_sz;
+ goto out;
+ }
+
+ /* 2. handle xattr name (ofs will finally be at the end of name) */
+ processed = 0;
+
+ while (processed < entry.e_name_len) {
+ if (it->ofs >= EROFS_BLKSIZ) {
+ BUG_ON(it->ofs > EROFS_BLKSIZ);
+
+ xattr_iter_fixup(it);
+ it->ofs = 0;
+ }
+
+ slice = min_t(unsigned, PAGE_SIZE - it->ofs,
+ entry.e_name_len - processed);
+
+ /* handle name */
+ err = op->name(it, processed, it->kaddr + it->ofs, slice);
+ if (err) {
+ it->ofs += entry.e_name_len - processed + value_sz;
+ goto out;
+ }
+
+ it->ofs += slice;
+ processed += slice;
+ }
+
+ /* 3. handle xattr value */
+ processed = 0;
+
+ if (op->alloc_buffer != NULL) {
+ err = op->alloc_buffer(it, value_sz);
+ if (err) {
+ it->ofs += value_sz;
+ goto out;
+ }
+ }
+
+ while (processed < value_sz) {
+ if (it->ofs >= EROFS_BLKSIZ) {
+ BUG_ON(it->ofs > EROFS_BLKSIZ);
+ xattr_iter_fixup(it);
+ it->ofs = 0;
+ }
+
+ slice = min_t(unsigned, PAGE_SIZE - it->ofs,
+ value_sz - processed);
+ op->value(it, processed, it->kaddr + it->ofs, slice);
+ it->ofs += slice;
+ processed += slice;
+ }
+
+out:
+ /* we assume that ofs is aligned with 4 bytes */
+ it->ofs = EROFS_XATTR_ALIGN(it->ofs);
+ return err;
+}
+
+struct getxattr_iter {
+ struct xattr_iter it;
+
+ char *buffer;
+ int buffer_size, index;
+ struct qstr name;
+};
+
+static int xattr_entrymatch(struct xattr_iter *_it,
+ struct erofs_xattr_entry *entry)
+{
+ struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
+
+ return (it->index != entry->e_name_index ||
+ it->name.len != entry->e_name_len) ? -ENOATTR : 0;
+}
+
+static int xattr_namematch(struct xattr_iter *_it,
+ unsigned processed, char *buf, unsigned len)
+{
+ struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
+
+ return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
+}
+
+static int xattr_checkbuffer(struct xattr_iter *_it,
+ unsigned value_sz)
+{
+ struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
+ int err = it->buffer_size < value_sz ? -ERANGE : 0;
+
+ it->buffer_size = value_sz;
+ return it->buffer == NULL ? 1 : err;
+}
+
+static void xattr_copyvalue(struct xattr_iter *_it,
+ unsigned processed, char *buf, unsigned len)
+{
+ struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
+
+ memcpy(it->buffer + processed, buf, len);
+}
+
+static struct xattr_iter_handlers find_xattr_handlers = {
+ .entry = xattr_entrymatch,
+ .name = xattr_namematch,
+ .alloc_buffer = xattr_checkbuffer,
+ .value = xattr_copyvalue
+};
+
+static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
+{
+ int ret;
+ unsigned remaining;
+
+ ret = inline_xattr_iter_begin(&it->it, inode);
+ if (ret < 0)
+ return ret;
+
+ remaining = ret;
+ while (remaining) {
+ ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
+ if (ret >= 0)
+ break;
+ }
+ xattr_iter_end(&it->it, true);
+
+ return ret < 0 ? ret : it->buffer_size;
+}
+
+static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
+{
+ struct erofs_vnode *const vi = EROFS_V(inode);
+ struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
+ unsigned i;
+ int ret = -ENOATTR;
+
+ for (i = 0; i < vi->xattr_shared_count; ++i) {
+ erofs_blk_t blkaddr =
+ xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
+
+ it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
+
+ if (!i || blkaddr != it->it.blkaddr) {
+ if (i)
+ xattr_iter_end(&it->it, true);
+
+ it->it.page = erofs_get_meta_page(inode->i_sb,
+ blkaddr, false);
+ BUG_ON(IS_ERR(it->it.page));
+ it->it.kaddr = kmap_atomic(it->it.page);
+ it->it.blkaddr = blkaddr;
+ }
+
+ ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
+ if (ret >= 0)
+ break;
+ }
+ if (vi->xattr_shared_count)
+ xattr_iter_end(&it->it, true);
+
+ return ret < 0 ? ret : it->buffer_size;
+}
+
+static bool erofs_xattr_user_list(struct dentry *dentry)
+{
+ return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER);
+}
+
+static bool erofs_xattr_trusted_list(struct dentry *dentry)
+{
+ return capable(CAP_SYS_ADMIN);
+}
+
+int erofs_getxattr(struct inode *inode, int index,
+ const char *name,
+ void *buffer, size_t buffer_size)
+{
+ int ret;
+ struct getxattr_iter it;
+
+ if (unlikely(name == NULL))
+ return -EINVAL;
+
+ init_inode_xattrs(inode);
+
+ it.index = index;
+
+ it.name.len = strlen(name);
+ if (it.name.len > EROFS_NAME_LEN)
+ return -ERANGE;
+ it.name.name = name;
+
+ it.buffer = buffer;
+ it.buffer_size = buffer_size;
+
+ it.it.sb = inode->i_sb;
+ ret = inline_getxattr(inode, &it);
+ if (ret == -ENOATTR)
+ ret = shared_getxattr(inode, &it);
+ return ret;
+}
+
+static int erofs_xattr_generic_get(const struct xattr_handler *handler,
+ struct dentry *unused, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ struct erofs_vnode *const vi = EROFS_V(inode);
+ struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
+
+ switch (handler->flags) {
+ case EROFS_XATTR_INDEX_USER:
+ if (!test_opt(sbi, XATTR_USER))
+ return -EOPNOTSUPP;
+ break;
+ case EROFS_XATTR_INDEX_TRUSTED:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ break;
+ case EROFS_XATTR_INDEX_SECURITY:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!vi->xattr_isize)
+ return -ENOATTR;
+
+ return erofs_getxattr(inode, handler->flags, name, buffer, size);
+}
+
+const struct xattr_handler erofs_xattr_user_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .flags = EROFS_XATTR_INDEX_USER,
+ .list = erofs_xattr_user_list,
+ .get = erofs_xattr_generic_get,
+};
+
+const struct xattr_handler erofs_xattr_trusted_handler = {
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .flags = EROFS_XATTR_INDEX_TRUSTED,
+ .list = erofs_xattr_trusted_list,
+ .get = erofs_xattr_generic_get,
+};
+
+#ifdef CONFIG_EROFS_FS_SECURITY
+const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .flags = EROFS_XATTR_INDEX_SECURITY,
+ .get = erofs_xattr_generic_get,
+};
+#endif
+
+const struct xattr_handler *erofs_xattr_handlers[] = {
+ &erofs_xattr_user_handler,
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ &posix_acl_access_xattr_handler,
+ &posix_acl_default_xattr_handler,
+#endif
+ &erofs_xattr_trusted_handler,
+#ifdef CONFIG_EROFS_FS_SECURITY
+ &erofs_xattr_security_handler,
+#endif
+ NULL,
+};
+
+struct listxattr_iter {
+ struct xattr_iter it;
+
+ struct dentry *dentry;
+ char *buffer;
+ int buffer_size, buffer_ofs;
+};
+
+static int xattr_entrylist(struct xattr_iter *_it,
+ struct erofs_xattr_entry *entry)
+{
+ struct listxattr_iter *it =
+ container_of(_it, struct listxattr_iter, it);
+ unsigned prefix_len;
+ const char *prefix;
+
+ const struct xattr_handler *h =
+ erofs_xattr_handler(entry->e_name_index);
+
+ if (h == NULL || (h->list != NULL && !h->list(it->dentry)))
+ return 1;
+
+ /* Note that at least one of 'prefix' and 'name' should be non-NULL */
+ prefix = h->prefix != NULL ? h->prefix : h->name;
+ prefix_len = strlen(prefix);
+
+ if (it->buffer == NULL) {
+ it->buffer_ofs += prefix_len + entry->e_name_len + 1;
+ return 1;
+ }
+
+ if (it->buffer_ofs + prefix_len
+ + entry->e_name_len + 1 > it->buffer_size)
+ return -ERANGE;
+
+ memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
+ it->buffer_ofs += prefix_len;
+ return 0;
+}
+
+static int xattr_namelist(struct xattr_iter *_it,
+ unsigned processed, char *buf, unsigned len)
+{
+ struct listxattr_iter *it =
+ container_of(_it, struct listxattr_iter, it);
+
+ memcpy(it->buffer + it->buffer_ofs, buf, len);
+ it->buffer_ofs += len;
+ return 0;
+}
+
+static int xattr_skipvalue(struct xattr_iter *_it,
+ unsigned value_sz)
+{
+ struct listxattr_iter *it =
+ container_of(_it, struct listxattr_iter, it);
+
+ it->buffer[it->buffer_ofs++] = '\0';
+ return 1;
+}
+
+static struct xattr_iter_handlers list_xattr_handlers = {
+ .entry = xattr_entrylist,
+ .name = xattr_namelist,
+ .alloc_buffer = xattr_skipvalue,
+ .value = NULL
+};
+
+static int inline_listxattr(struct listxattr_iter *it)
+{
+ int ret;
+ unsigned remaining;
+
+ ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
+ if (ret < 0)
+ return ret;
+
+ remaining = ret;
+ while (remaining) {
+ ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
+ if (ret < 0)
+ break;
+ }
+ xattr_iter_end(&it->it, true);
+ return ret < 0 ? ret : it->buffer_ofs;
+}
+
+static int shared_listxattr(struct listxattr_iter *it)
+{
+ struct inode *const inode = d_inode(it->dentry);
+ struct erofs_vnode *const vi = EROFS_V(inode);
+ struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
+ unsigned i;
+ int ret = 0;
+
+ for (i = 0; i < vi->xattr_shared_count; ++i) {
+ erofs_blk_t blkaddr =
+ xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
+
+ it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
+ if (!i || blkaddr != it->it.blkaddr) {
+ if (i)
+ xattr_iter_end(&it->it, true);
+
+ it->it.page = erofs_get_meta_page(inode->i_sb,
+ blkaddr, false);
+ BUG_ON(IS_ERR(it->it.page));
+ it->it.kaddr = kmap_atomic(it->it.page);
+ it->it.blkaddr = blkaddr;
+ }
+
+ ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
+ if (ret < 0)
+ break;
+ }
+ if (vi->xattr_shared_count)
+ xattr_iter_end(&it->it, true);
+
+ return ret < 0 ? ret : it->buffer_ofs;
+}
+
+ssize_t erofs_listxattr(struct dentry *dentry,
+ char *buffer, size_t buffer_size)
+{
+ int ret;
+ struct listxattr_iter it;
+
+ init_inode_xattrs(d_inode(dentry));
+
+ it.dentry = dentry;
+ it.buffer = buffer;
+ it.buffer_size = buffer_size;
+ it.buffer_ofs = 0;
+
+ it.it.sb = dentry->d_sb;
+
+ ret = inline_listxattr(&it);
+ if (ret < 0 && ret != -ENOATTR)
+ return ret;
+ return shared_listxattr(&it);
+}
+
diff --git a/drivers/staging/erofs/xattr.h b/drivers/staging/erofs/xattr.h
new file mode 100644
index 000000000000..0c7379282fc5
--- /dev/null
+++ b/drivers/staging/erofs/xattr.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * linux/drivers/staging/erofs/xattr.h
+ *
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+#ifndef __EROFS_XATTR_H
+#define __EROFS_XATTR_H
+
+#include "internal.h"
+#include <linux/posix_acl_xattr.h>
+#include <linux/xattr.h>
+
+/* Attribute not found */
+#define ENOATTR ENODATA
+
+static inline unsigned inlinexattr_header_size(struct inode *inode)
+{
+ return sizeof(struct erofs_xattr_ibody_header)
+ + sizeof(u32) * EROFS_V(inode)->xattr_shared_count;
+}
+
+static inline erofs_blk_t
+xattrblock_addr(struct erofs_sb_info *sbi, unsigned xattr_id)
+{
+#ifdef CONFIG_EROFS_FS_XATTR
+ return sbi->xattr_blkaddr +
+ xattr_id * sizeof(__u32) / EROFS_BLKSIZ;
+#else
+ return 0;
+#endif
+}
+
+static inline unsigned
+xattrblock_offset(struct erofs_sb_info *sbi, unsigned xattr_id)
+{
+ return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ;
+}
+
+extern const struct xattr_handler erofs_xattr_user_handler;
+extern const struct xattr_handler erofs_xattr_trusted_handler;
+#ifdef CONFIG_EROFS_FS_SECURITY
+extern const struct xattr_handler erofs_xattr_security_handler;
+#endif
+
+static inline const struct xattr_handler *erofs_xattr_handler(unsigned index)
+{
+static const struct xattr_handler *xattr_handler_map[] = {
+ [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
+ [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
+ &posix_acl_default_xattr_handler,
+#endif
+ [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
+#ifdef CONFIG_EROFS_FS_SECURITY
+ [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
+#endif
+};
+ return index && index < ARRAY_SIZE(xattr_handler_map) ?
+ xattr_handler_map[index] : NULL;
+}
+
+#ifdef CONFIG_EROFS_FS_XATTR
+
+extern const struct inode_operations erofs_generic_xattr_iops;
+extern const struct inode_operations erofs_dir_xattr_iops;
+
+int erofs_getxattr(struct inode *, int, const char *, void *, size_t);
+ssize_t erofs_listxattr(struct dentry *, char *, size_t);
+#else
+static int __maybe_unused erofs_getxattr(struct inode *inode, int index,
+ const char *name,
+ void *buffer, size_t buffer_size)
+{
+ return -ENOTSUPP;
+}
+
+static ssize_t __maybe_unused erofs_listxattr(struct dentry *dentry,
+ char *buffer, size_t buffer_size)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#endif
+
diff --git a/drivers/staging/fbtft/fb_hx8347d.c b/drivers/staging/fbtft/fb_hx8347d.c
index 0b605303813e..3427a858d17c 100644
--- a/drivers/staging/fbtft/fb_hx8347d.c
+++ b/drivers/staging/fbtft/fb_hx8347d.c
@@ -92,7 +92,7 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
* VRP0 VRP1 VRP2 VRP3 VRP4 VRP5 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 CGM
* VRN0 VRN1 VRN2 VRN3 VRN4 VRN5 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 CGM
*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
unsigned long mask[] = {
diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
index fd3dd671509f..86e140244aab 100644
--- a/drivers/staging/fbtft/fb_ili9163.c
+++ b/drivers/staging/fbtft/fb_ili9163.c
@@ -192,7 +192,7 @@ static int set_var(struct fbtft_par *par)
}
#ifdef GAMMA_ADJ
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int gamma_adj(struct fbtft_par *par, u32 *curves)
{
unsigned long mask[] = {
diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
index 501eee7dce4c..740c0acbecd8 100644
--- a/drivers/staging/fbtft/fb_ili9320.c
+++ b/drivers/staging/fbtft/fb_ili9320.c
@@ -211,7 +211,7 @@ static int set_var(struct fbtft_par *par)
* VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
* VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
unsigned long mask[] = {
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index d6b1d4be9ff4..2cf75f2e03e2 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -205,7 +205,7 @@ static int set_var(struct fbtft_par *par)
* VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
* VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
unsigned long mask[] = {
diff --git a/drivers/staging/fbtft/fb_ili9341.c b/drivers/staging/fbtft/fb_ili9341.c
index a10e8c9de438..9ccd0823c3ab 100644
--- a/drivers/staging/fbtft/fb_ili9341.c
+++ b/drivers/staging/fbtft/fb_ili9341.c
@@ -111,7 +111,7 @@ static int set_var(struct fbtft_par *par)
* Positive: Par1 Par2 [...] Par15
* Negative: Par1 Par2 [...] Par15
*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
int i;
diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c
index 75295760f491..d3d6871d8c47 100644
--- a/drivers/staging/fbtft/fb_s6d02a1.c
+++ b/drivers/staging/fbtft/fb_s6d02a1.c
@@ -16,25 +16,31 @@
#define DRVNAME "fb_s6d02a1"
static const s16 default_init_sequence[] = {
-
-1, 0xf0, 0x5a, 0x5a,
-1, 0xfc, 0x5a, 0x5a,
- -1, 0xfa, 0x02, 0x1f, 0x00, 0x10, 0x22, 0x30, 0x38, 0x3A, 0x3A, 0x3A, 0x3A, 0x3A, 0x3d, 0x02, 0x01,
+ -1, 0xfa, 0x02, 0x1f, 0x00, 0x10, 0x22, 0x30, 0x38,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x3A, 0x3d, 0x02, 0x01,
- -1, 0xfb, 0x21, 0x00, 0x02, 0x04, 0x07, 0x0a, 0x0b, 0x0c, 0x0c, 0x16, 0x1e, 0x30, 0x3f, 0x01, 0x02,
+ -1, 0xfb, 0x21, 0x00, 0x02, 0x04, 0x07, 0x0a, 0x0b,
+ 0x0c, 0x0c, 0x16, 0x1e, 0x30, 0x3f, 0x01, 0x02,
/* power setting sequence */
- -1, 0xfd, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x01, 0x01, 0x00, 0x1f, 0x1f,
+ -1, 0xfd, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x01,
+ 0x01, 0x00, 0x1f, 0x1f,
- -1, 0xf4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x3f, 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
+ -1, 0xf4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x3f,
+ 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
- -1, 0xf5, 0x00, 0x70, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x66, 0x06,
+ -1, 0xf5, 0x00, 0x70, 0x66, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x6d, 0x66, 0x06,
- -1, 0xf6, 0x02, 0x00, 0x3f, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x01, 0x00,
+ -1, 0xf6, 0x02, 0x00, 0x3f, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x06, 0x01, 0x00,
- -1, 0xf2, 0x00, 0x01, 0x03, 0x08, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x04, 0x08, 0x08,
+ -1, 0xf2, 0x00, 0x01, 0x03, 0x08, 0x08, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x04, 0x08, 0x08,
-1, 0xf8, 0x11,
@@ -54,7 +60,8 @@ static const s16 default_init_sequence[] = {
-1, 0xf3, 0x00, 0x0f,
-2, 50,
- -1, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3f, 0x3f, 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
+ -1, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3f, 0x3f,
+ 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
-2, 50,
-1, 0xf3, 0x00, 0x1f,
@@ -65,9 +72,11 @@ static const s16 default_init_sequence[] = {
-1, 0xf3, 0x00, 0xff,
-2, 50,
- -1, 0xfd, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x00, 0x01, 0x00, 0x16, 0x16,
+ -1, 0xfd, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x00,
+ 0x01, 0x00, 0x16, 0x16,
- -1, 0xf4, 0x00, 0x09, 0x00, 0x00, 0x00, 0x3f, 0x3f, 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
+ -1, 0xf4, 0x00, 0x09, 0x00, 0x00, 0x00, 0x3f, 0x3f,
+ 0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
/* initializing sequence */
diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
index b90244259d43..aa716f33420a 100644
--- a/drivers/staging/fbtft/fb_s6d1121.c
+++ b/drivers/staging/fbtft/fb_s6d1121.c
@@ -120,7 +120,7 @@ static int set_var(struct fbtft_par *par)
* PKP0 PKP1 PKP2 PKP3 PKP4 PKP5 PKP6 PKP7 PKP8 PKP9 PKP10 PKP11 VRP0 VRP1
* PKN0 PKN1 PKN2 PKN3 PKN4 PKN5 PKN6 PKN7 PRN8 PRN9 PRN10 PRN11 VRN0 VRN1
*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
unsigned long mask[] = {
@@ -154,6 +154,7 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
index 3fc18c0a6f11..00096f8d249a 100644
--- a/drivers/staging/fbtft/fb_sh1106.c
+++ b/drivers/staging/fbtft/fb_sh1106.c
@@ -89,7 +89,7 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+ fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
__func__, on ? "true" : "false");
write_reg(par, on ? 0xAE : 0xAF);
diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
index cbf22e1f4b61..c9b18b3ba4ab 100644
--- a/drivers/staging/fbtft/fb_ssd1289.c
+++ b/drivers/staging/fbtft/fb_ssd1289.c
@@ -38,7 +38,7 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x0E, 0x2B00);
write_reg(par, 0x1E, 0x00B7);
write_reg(par, 0x01,
- BIT(13) | (par->bgr << 11) | BIT(9) | (HEIGHT - 1));
+ BIT(13) | (par->bgr << 11) | BIT(9) | (HEIGHT - 1));
write_reg(par, 0x02, 0x0600);
write_reg(par, 0x10, 0x0000);
write_reg(par, 0x05, 0x0000);
@@ -98,8 +98,8 @@ static int set_var(struct fbtft_par *par)
if (par->fbtftops.init_display != init_display) {
/* don't risk messing up register 11h */
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "%s: skipping since custom init_display() is used\n",
- __func__);
+ "%s: skipping since custom init_display() is used\n",
+ __func__);
return 0;
}
@@ -126,7 +126,7 @@ static int set_var(struct fbtft_par *par)
* VRP0 VRP1 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 PKP5
* VRN0 VRN1 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 PKN5
*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
unsigned long mask[] = {
@@ -153,6 +153,7 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_ssd1306.c b/drivers/staging/fbtft/fb_ssd1306.c
index 9276be499303..50172ddd94ae 100644
--- a/drivers/staging/fbtft/fb_ssd1306.c
+++ b/drivers/staging/fbtft/fb_ssd1306.c
@@ -148,7 +148,7 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+ fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
__func__, on ? "true" : "false");
if (on)
diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c
index 1a469b3c92d4..f974f7fc4d79 100644
--- a/drivers/staging/fbtft/fb_ssd1325.c
+++ b/drivers/staging/fbtft/fb_ssd1325.c
@@ -88,7 +88,7 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+ fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
__func__, on ? "true" : "false");
if (on)
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index 383e197cf56a..0b614c84822e 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -168,7 +168,7 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+ fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
__func__, on ? "true" : "false");
if (on)
write_reg(par, 0xAE);
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index b8ef75f5e856..3da091b4d297 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -126,36 +126,44 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
for (i = 0; i < 63; i++) {
if (i > 0 && curves[i] < 2) {
dev_err(par->info->device,
- "Illegal value in Grayscale Lookup Table at index %d. " \
- "Must be greater than 1\n", i);
+ "Illegal value in Grayscale Lookup Table at index %d : %d. Must be greater than 1\n",
+ i, curves[i]);
return -EINVAL;
}
acc += curves[i];
tmp[i] = acc;
if (acc > 180) {
dev_err(par->info->device,
- "Illegal value(s) in Grayscale Lookup Table. " \
- "At index=%d, the accumulated value has exceeded 180\n", i);
+ "Illegal value(s) in Grayscale Lookup Table. At index=%d : %d, the accumulated value has exceeded 180\n",
+ i, acc);
return -EINVAL;
}
}
write_reg(par, 0xB8,
- tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], tmp[7],
- tmp[8], tmp[9], tmp[10], tmp[11], tmp[12], tmp[13], tmp[14], tmp[15],
- tmp[16], tmp[17], tmp[18], tmp[19], tmp[20], tmp[21], tmp[22], tmp[23],
- tmp[24], tmp[25], tmp[26], tmp[27], tmp[28], tmp[29], tmp[30], tmp[31],
- tmp[32], tmp[33], tmp[34], tmp[35], tmp[36], tmp[37], tmp[38], tmp[39],
- tmp[40], tmp[41], tmp[42], tmp[43], tmp[44], tmp[45], tmp[46], tmp[47],
- tmp[48], tmp[49], tmp[50], tmp[51], tmp[52], tmp[53], tmp[54], tmp[55],
- tmp[56], tmp[57], tmp[58], tmp[59], tmp[60], tmp[61], tmp[62]);
+ tmp[0], tmp[1], tmp[2], tmp[3],
+ tmp[4], tmp[5], tmp[6], tmp[7],
+ tmp[8], tmp[9], tmp[10], tmp[11],
+ tmp[12], tmp[13], tmp[14], tmp[15],
+ tmp[16], tmp[17], tmp[18], tmp[19],
+ tmp[20], tmp[21], tmp[22], tmp[23],
+ tmp[24], tmp[25], tmp[26], tmp[27],
+ tmp[28], tmp[29], tmp[30], tmp[31],
+ tmp[32], tmp[33], tmp[34], tmp[35],
+ tmp[36], tmp[37], tmp[38], tmp[39],
+ tmp[40], tmp[41], tmp[42], tmp[43],
+ tmp[44], tmp[45], tmp[46], tmp[47],
+ tmp[48], tmp[49], tmp[50], tmp[51],
+ tmp[52], tmp[53], tmp[54], tmp[55],
+ tmp[56], tmp[57], tmp[58], tmp[59],
+ tmp[60], tmp[61], tmp[62]);
return 0;
}
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+ fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
__func__, on ? "true" : "false");
if (on)
write_reg(par, 0xAE);
@@ -187,8 +195,8 @@ static int update_onboard_backlight(struct backlight_device *bd)
bool on;
fbtft_par_dbg(DEBUG_BACKLIGHT, par,
- "%s: power=%d, fb_blank=%d\n",
- __func__, bd->props.power, bd->props.fb_blank);
+ "%s: power=%d, fb_blank=%d\n",
+ __func__, bd->props.power, bd->props.fb_blank);
on = (bd->props.power == FB_BLANK_UNBLANK) &&
(bd->props.fb_blank == FB_BLANK_UNBLANK);
@@ -211,7 +219,8 @@ static void register_onboard_backlight(struct fbtft_par *par)
bl_props.power = FB_BLANK_POWERDOWN;
bd = backlight_device_register(dev_driver_string(par->info->device),
- par->info->device, par, &bl_ops, &bl_props);
+ par->info->device, par, &bl_ops,
+ &bl_props);
if (IS_ERR(bd)) {
dev_err(par->info->device,
"cannot register backlight device (%ld)\n",
diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c
index 631208bd3a17..9670a8989b91 100644
--- a/drivers/staging/fbtft/fb_st7735r.c
+++ b/drivers/staging/fbtft/fb_st7735r.c
@@ -133,7 +133,7 @@ static int set_var(struct fbtft_par *par)
* VRF0P VOS0P PK0P PK1P PK2P PK3P PK4P PK5P PK6P PK7P PK8P PK9P SELV0P SELV1P SELV62P SELV63P
* VRF0N VOS0N PK0N PK1N PK2N PK3N PK4N PK5N PK6N PK7N PK8N PK9N SELV0N SELV1N SELV62N SELV63N
*/
-#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
+#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
int i, j;
@@ -145,13 +145,18 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
for (i = 0; i < par->gamma.num_curves; i++)
write_reg(par, 0xE0 + i,
- CURVE(i, 0), CURVE(i, 1), CURVE(i, 2), CURVE(i, 3),
- CURVE(i, 4), CURVE(i, 5), CURVE(i, 6), CURVE(i, 7),
- CURVE(i, 8), CURVE(i, 9), CURVE(i, 10), CURVE(i, 11),
- CURVE(i, 12), CURVE(i, 13), CURVE(i, 14), CURVE(i, 15));
+ CURVE(i, 0), CURVE(i, 1),
+ CURVE(i, 2), CURVE(i, 3),
+ CURVE(i, 4), CURVE(i, 5),
+ CURVE(i, 6), CURVE(i, 7),
+ CURVE(i, 8), CURVE(i, 9),
+ CURVE(i, 10), CURVE(i, 11),
+ CURVE(i, 12), CURVE(i, 13),
+ CURVE(i, 14), CURVE(i, 15));
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c
index 7d7573a7b615..3c3f387936e8 100644
--- a/drivers/staging/fbtft/fb_st7789v.c
+++ b/drivers/staging/fbtft/fb_st7789v.c
@@ -201,13 +201,12 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
c = i * par->gamma.num_values;
for (j = 0; j < par->gamma.num_values; j++)
curves[c + j] &= gamma_par_mask[j];
- write_reg(
- par, PVGAMCTRL + i,
- curves[c + 0], curves[c + 1], curves[c + 2],
- curves[c + 3], curves[c + 4], curves[c + 5],
- curves[c + 6], curves[c + 7], curves[c + 8],
- curves[c + 9], curves[c + 10], curves[c + 11],
- curves[c + 12], curves[c + 13]);
+ write_reg(par, PVGAMCTRL + i,
+ curves[c + 0], curves[c + 1], curves[c + 2],
+ curves[c + 3], curves[c + 4], curves[c + 5],
+ curves[c + 6], curves[c + 7], curves[c + 8],
+ curves[c + 9], curves[c + 10], curves[c + 11],
+ curves[c + 12], curves[c + 13]);
}
return 0;
}
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 4d65567eefe2..dfaf8bc70f73 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -129,7 +129,7 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+ fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
__func__, on ? "true" : "false");
if (on)
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index bfd1527f20f7..e77178157f1b 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -46,7 +46,8 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
va_end(args);
fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par,
- par->info->device, u8, par->buf, len, "%s: ", __func__);
+ par->info->device, u8, par->buf,
+ len, "%s: ", __func__);
ret = par->fbtftops.write(par, par->buf, len);
if (ret < 0) {
@@ -89,9 +90,15 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
return 0;
}
-#define RGB565toRGB323(c) (((c&0xE000)>>8) | ((c&0600)>>6) | ((c&0x001C)>>2))
-#define RGB565toRGB332(c) (((c&0xE000)>>8) | ((c&0700)>>6) | ((c&0x0018)>>3))
-#define RGB565toRGB233(c) (((c&0xC000)>>8) | ((c&0700)>>5) | ((c&0x001C)>>2))
+#define RGB565toRGB323(c) ((((c) & 0xE000) >> 8) |\
+ (((c) & 000600) >> 6) |\
+ (((c) & 0x001C) >> 2))
+#define RGB565toRGB332(c) ((((c) & 0xE000) >> 8) |\
+ (((c) & 000700) >> 6) |\
+ (((c) & 0x0018) >> 3))
+#define RGB565toRGB233(c) ((((c) & 0xC000) >> 8) |\
+ (((c) & 000700) >> 5) |\
+ (((c) & 0x001C) >> 2))
static int write_vmem_8bit(struct fbtft_par *par, size_t offset, size_t len)
{
@@ -169,7 +176,7 @@ static int init_display(struct fbtft_par *par)
version = firmware_version(par);
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Firmware version: %x.%02x\n",
- version >> 8, version & 0xFF);
+ version >> 8, version & 0xFF);
if (mode == 332)
par->fbtftops.write_vmem = write_vmem_8bit;
@@ -220,9 +227,9 @@ static int backlight_chip_update_status(struct backlight_device *bd)
int brightness = bd->props.brightness;
fbtft_par_dbg(DEBUG_BACKLIGHT, par,
- "%s: brightness=%d, power=%d, fb_blank=%d\n",
- __func__, bd->props.brightness, bd->props.power,
- bd->props.fb_blank);
+ "%s: brightness=%d, power=%d, fb_blank=%d\n", __func__,
+ bd->props.brightness, bd->props.power,
+ bd->props.fb_blank);
if (bd->props.power != FB_BLANK_UNBLANK)
brightness = 0;
@@ -250,7 +257,8 @@ static void register_chip_backlight(struct fbtft_par *par)
bl_props.brightness = DEFAULT_BRIGHTNESS;
bd = backlight_device_register(dev_driver_string(par->info->device),
- par->info->device, par, &bl_ops, &bl_props);
+ par->info->device, par, &bl_ops,
+ &bl_props);
if (IS_ERR(bd)) {
dev_err(par->info->device,
"cannot register backlight device (%ld)\n",
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index 871b307d83cb..8ce1ff9b6c2a 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -79,7 +79,8 @@ void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...)
*(((u8 *)buf) + i) = (u8)va_arg(args, unsigned int);
va_end(args);
fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par,
- par->info->device, u8, buf, len, "%s: ", __func__);
+ par->info->device, u8, buf, len, "%s: ",
+ __func__);
}
if (len <= 0)
return;
@@ -129,7 +130,7 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
size_t startbyte_size = 0;
fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
+ __func__, offset, len);
remain = len / 2;
vmem16 = (u16 *)(par->info->screen_buffer + offset);
@@ -153,8 +154,8 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
while (remain) {
to_copy = min(tx_array_size, remain);
- dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n",
- to_copy, remain - to_copy);
+ dev_dbg(par->info->device, "to_copy=%zu, remain=%zu\n",
+ to_copy, remain - to_copy);
for (i = 0; i < to_copy; i++)
txbuf16[i] = cpu_to_be16(vmem16[i]);
@@ -183,7 +184,7 @@ int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len)
int ret = 0;
fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
+ __func__, offset, len);
if (!par->txbuf.buf) {
dev_err(par->info->device, "%s: txbuf.buf is NULL\n", __func__);
@@ -197,8 +198,8 @@ int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len)
while (remain) {
to_copy = min(tx_array_size, remain);
- dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n",
- to_copy, remain - to_copy);
+ dev_dbg(par->info->device, "to_copy=%zu, remain=%zu\n",
+ to_copy, remain - to_copy);
#ifdef __LITTLE_ENDIAN
for (i = 0; i < to_copy; i += 2) {
@@ -233,7 +234,7 @@ int fbtft_write_vmem16_bus16(struct fbtft_par *par, size_t offset, size_t len)
u16 *vmem16;
fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
+ __func__, offset, len);
vmem16 = (u16 *)(par->info->screen_buffer + offset);
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 731e47149af8..a2df02d97a8e 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -137,8 +137,8 @@ static int fbtft_request_gpios(struct fbtft_par *par)
flags = fbtft_request_gpios_match(par, gpio);
if (flags != FBTFT_GPIO_NO_MATCH) {
ret = devm_gpio_request_one(par->info->device,
- gpio->gpio, flags,
- par->info->device->driver->name);
+ gpio->gpio, flags,
+ par->info->device->driver->name);
if (ret < 0) {
dev_err(par->info->device,
"%s: gpio_request_one('%s'=%d) failed with %d\n",
@@ -249,8 +249,8 @@ static int fbtft_backlight_update_status(struct backlight_device *bd)
bool polarity = par->polarity;
fbtft_par_dbg(DEBUG_BACKLIGHT, par,
- "%s: polarity=%d, power=%d, fb_blank=%d\n",
- __func__, polarity, bd->props.power, bd->props.fb_blank);
+ "%s: polarity=%d, power=%d, fb_blank=%d\n",
+ __func__, polarity, bd->props.power, bd->props.fb_blank);
if ((bd->props.power == FB_BLANK_UNBLANK) &&
(bd->props.fb_blank == FB_BLANK_UNBLANK))
@@ -372,7 +372,7 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
if (start_line > par->info->var.yres - 1 ||
end_line > par->info->var.yres - 1) {
dev_warn(par->info->device,
- "%s: start_line=%u or end_line=%u is larger than max=%d. Shouldn't happen, will do full display update\n",
+ "%s: start_line=%u or end_line=%u is larger than max=%d. Shouldn't happen, will do full display update\n",
__func__, start_line,
end_line, par->info->var.yres - 1);
start_line = 0;
@@ -767,7 +767,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
fbops->fb_setcolreg = fbtft_fb_setcolreg;
fbops->fb_blank = fbtft_fb_blank;
- fbdefio->delay = HZ/fps;
+ fbdefio->delay = HZ / fps;
fbdefio->deferred_io = fbtft_deferred_io;
fb_deferred_io_init(info);
@@ -817,8 +817,8 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
info->pseudo_palette = par->pseudo_palette;
if (par->gamma.curves && gamma) {
- if (fbtft_gamma_parse_str(par,
- par->gamma.curves, gamma, strlen(gamma)))
+ if (fbtft_gamma_parse_str(par, par->gamma.curves, gamma,
+ strlen(gamma)))
goto alloc_fail;
}
@@ -1045,8 +1045,8 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
while (p && !(val & 0xFFFF0000)) {
if (i > 63) {
dev_err(par->info->device,
- "%s: Maximum register values exceeded\n",
- __func__);
+ "%s: Maximum register values exceeded\n",
+ __func__);
return -EINVAL;
}
buf[i++] = val;
@@ -1166,8 +1166,8 @@ int fbtft_init_display(struct fbtft_par *par)
while (par->init_sequence[i] >= 0) {
if (j > 63) {
dev_err(par->info->device,
- "%s: Maximum register values exceeded\n",
- __func__);
+ "%s: Maximum register values exceeded\n",
+ __func__);
return -EINVAL;
}
buf[j++] = par->init_sequence[i++];
@@ -1193,7 +1193,8 @@ int fbtft_init_display(struct fbtft_par *par)
case -2:
i++;
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "init: mdelay(%d)\n", par->init_sequence[i]);
+ "init: mdelay(%d)\n",
+ par->init_sequence[i]);
mdelay(par->init_sequence[i++]);
break;
default:
@@ -1225,8 +1226,8 @@ static int fbtft_verify_gpios(struct fbtft_par *par)
fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
- if (pdata->display.buswidth != 9 && par->startbyte == 0 &&
- par->gpio.dc < 0) {
+ if (pdata->display.buswidth != 9 && par->startbyte == 0 &&
+ par->gpio.dc < 0) {
dev_err(par->info->device,
"Missing info about 'dc' gpio. Aborting.\n");
return -EINVAL;
@@ -1321,7 +1322,8 @@ static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
* Return: 0 if successful, negative if error
*/
int fbtft_probe_common(struct fbtft_display *display,
- struct spi_device *sdev, struct platform_device *pdev)
+ struct spi_device *sdev,
+ struct platform_device *pdev)
{
struct device *dev;
struct fb_info *info;
@@ -1393,11 +1395,12 @@ int fbtft_probe_common(struct fbtft_display *display,
par->spi->bits_per_word = 9;
} else {
dev_warn(&par->spi->dev,
- "9-bit SPI not available, emulating using 8-bit.\n");
+ "9-bit SPI not available, emulating using 8-bit.\n");
/* allocate buffer with room for dc bits */
par->extra = devm_kzalloc(par->info->device,
- par->txbuf.len + (par->txbuf.len / 8) + 8,
- GFP_KERNEL);
+ par->txbuf.len +
+ (par->txbuf.len / 8) + 8,
+ GFP_KERNEL);
if (!par->extra) {
ret = -ENOMEM;
goto out_release;
diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
index f4a591919f62..b5051d3d46a6 100644
--- a/drivers/staging/fbtft/fbtft-io.c
+++ b/drivers/staging/fbtft/fbtft-io.c
@@ -14,7 +14,7 @@ int fbtft_write_spi(struct fbtft_par *par, void *buf, size_t len)
struct spi_message m;
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%d): ", __func__, len);
if (!par->spi) {
dev_err(par->info->device,
@@ -47,7 +47,7 @@ int fbtft_write_spi_emulate_9(struct fbtft_par *par, void *buf, size_t len)
u64 val, dc, tmp;
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%d): ", __func__, len);
if (!par->extra) {
dev_err(par->info->device, "%s: error: par->extra is NULL\n",
@@ -109,14 +109,15 @@ int fbtft_read_spi(struct fbtft_par *par, void *buf, size_t len)
txbuf[0] = par->startbyte | 0x3;
t.tx_buf = txbuf;
fbtft_par_dbg_hex(DEBUG_READ, par, par->info->device, u8,
- txbuf, len, "%s(len=%d) txbuf => ", __func__, len);
+ txbuf, len, "%s(len=%d) txbuf => ",
+ __func__, len);
}
spi_message_init(&m);
spi_message_add_tail(&t, &m);
ret = spi_sync(par->spi, &m);
fbtft_par_dbg_hex(DEBUG_READ, par, par->info->device, u8, buf, len,
- "%s(len=%d) buf <= ", __func__, len);
+ "%s(len=%d) buf <= ", __func__, len);
return ret;
}
@@ -135,7 +136,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
#endif
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%d): ", __func__, len);
while (len--) {
data = *(u8 *)buf;
@@ -151,7 +152,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
for (i = 0; i < 8; i++) {
if ((data & 1) != (prev_data & 1))
gpio_set_value(par->gpio.db[i],
- data & 1);
+ data & 1);
data >>= 1;
prev_data >>= 1;
}
@@ -185,7 +186,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
#endif
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%d): ", __func__, len);
while (len) {
data = *(u16 *)buf;
@@ -201,7 +202,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
for (i = 0; i < 16; i++) {
if ((data & 1) != (prev_data & 1))
gpio_set_value(par->gpio.db[i],
- data & 1);
+ data & 1);
data >>= 1;
prev_data >>= 1;
}
diff --git a/drivers/staging/fbtft/fbtft-sysfs.c b/drivers/staging/fbtft/fbtft-sysfs.c
index 712096659aa0..2a5c630dab87 100644
--- a/drivers/staging/fbtft/fbtft-sysfs.c
+++ b/drivers/staging/fbtft/fbtft-sysfs.c
@@ -126,7 +126,8 @@ static ssize_t store_gamma_curve(struct device *device,
mutex_lock(&par->gamma.lock);
memcpy(par->gamma.curves, tmp_curves,
- par->gamma.num_curves * par->gamma.num_values * sizeof(tmp_curves[0]));
+ par->gamma.num_curves * par->gamma.num_values *
+ sizeof(tmp_curves[0]));
mutex_unlock(&par->gamma.lock);
return count;
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index c7cb4a7896f4..798a8fe98e95 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -64,16 +64,16 @@ struct fbtft_ops {
void (*write_register)(struct fbtft_par *par, int len, ...);
void (*set_addr_win)(struct fbtft_par *par,
- int xs, int ys, int xe, int ye);
+ int xs, int ys, int xe, int ye);
void (*reset)(struct fbtft_par *par);
void (*mkdirty)(struct fb_info *info, int from, int to);
void (*update_display)(struct fbtft_par *par,
- unsigned int start_line, unsigned int end_line);
+ unsigned int start_line, unsigned int end_line);
int (*init_display)(struct fbtft_par *par);
int (*blank)(struct fbtft_par *par, bool on);
unsigned long (*request_gpios_match)(struct fbtft_par *par,
- const struct fbtft_gpio *gpio);
+ const struct fbtft_gpio *gpio);
int (*request_gpios)(struct fbtft_par *par);
int (*verify_gpios)(struct fbtft_par *par);
@@ -234,8 +234,8 @@ struct fbtft_par {
#define NUMARGS(...) (sizeof((int[]){__VA_ARGS__})/sizeof(int))
-#define write_reg(par, ...) \
- par->fbtftops.write_register(par, NUMARGS(__VA_ARGS__), __VA_ARGS__)
+#define write_reg(par, ...) \
+ ((par)->fbtftops.write_register(par, NUMARGS(__VA_ARGS__), __VA_ARGS__))
/* fbtft-core.c */
int fbtft_write_buf_dc(struct fbtft_par *par, void *buf, size_t len, int dc);
@@ -404,8 +404,9 @@ do { \
#define fbtft_par_dbg_hex(level, par, dev, type, buf, num, format, arg...) \
do { \
- if (unlikely(par->debug & level)) \
- fbtft_dbg_hex(dev, sizeof(type), buf, num * sizeof(type), format, ##arg); \
+ if (unlikely((par)->debug & (level))) \
+ fbtft_dbg_hex(dev, sizeof(type), buf,\
+ (num) * sizeof(type), format, ##arg); \
} while (0)
#endif /* __LINUX_FBTFT_H */
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index ec8477674b7d..50e97da993e7 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -21,12 +21,13 @@ static struct platform_device *p_device;
static char *name;
module_param(name, charp, 0000);
-MODULE_PARM_DESC(name, "Devicename (required). name=list => list all supported devices.");
+MODULE_PARM_DESC(name,
+ "Devicename (required). name=list => list all supported devices.");
static unsigned int rotate;
module_param(rotate, uint, 0000);
MODULE_PARM_DESC(rotate,
-"Angle to rotate display counter clockwise: 0, 90, 180, 270");
+ "Angle to rotate display counter clockwise: 0, 90, 180, 270");
static unsigned int busnum;
module_param(busnum, uint, 0000);
@@ -47,7 +48,7 @@ MODULE_PARM_DESC(mode, "SPI mode (override device default)");
static char *gpios;
module_param(gpios, charp, 0000);
MODULE_PARM_DESC(gpios,
-"List of gpios. Comma separated with the form: reset:23,dc:24 (when overriding the default, all gpios must be specified)");
+ "List of gpios. Comma separated with the form: reset:23,dc:24 (when overriding the default, all gpios must be specified)");
static unsigned int fps;
module_param(fps, uint, 0000);
@@ -56,7 +57,7 @@ MODULE_PARM_DESC(fps, "Frames per second (override driver default)");
static char *gamma;
module_param(gamma, charp, 0000);
MODULE_PARM_DESC(gamma,
-"String representation of Gamma Curve(s). Driver specific.");
+ "String representation of Gamma Curve(s). Driver specific.");
static int txbuflen;
module_param(txbuflen, int, 0000);
@@ -65,7 +66,7 @@ MODULE_PARM_DESC(txbuflen, "txbuflen (override driver default)");
static int bgr = -1;
module_param(bgr, int, 0000);
MODULE_PARM_DESC(bgr,
-"BGR bit (supported by some drivers).");
+ "BGR bit (supported by some drivers).");
static unsigned int startbyte;
module_param(startbyte, uint, 0000);
@@ -95,12 +96,12 @@ MODULE_PARM_DESC(init, "Init sequence, used with the custom argument");
static unsigned long debug;
module_param(debug, ulong, 0000);
MODULE_PARM_DESC(debug,
-"level: 0-7 (the remaining 29 bits is for advanced usage)");
+ "level: 0-7 (the remaining 29 bits is for advanced usage)");
static unsigned int verbose = 3;
module_param(verbose, uint, 0000);
MODULE_PARM_DESC(verbose,
-"0 silent, >0 show gpios, >1 show devices, >2 show devices before (default=3)");
+ "0 silent, >0 show gpios, >1 show devices, >2 show devices before (default=3)");
struct fbtft_device_display {
char *name;
@@ -112,7 +113,7 @@ static void fbtft_device_pdev_release(struct device *dev);
static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len);
static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
- int xs, int ys, int xe, int ye);
+ int xs, int ys, int xe, int ye);
#define ADAFRUIT18_GAMMA \
"02 1c 07 12 37 32 29 2d 29 25 2B 39 00 01 03 10\n" \
@@ -261,6 +262,10 @@ static const s16 waveshare32b_init_sequence[] = {
-3
};
+#define PIOLED_GAMMA "0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 " \
+ "2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 " \
+ "3 3 3 4 4 4 4 4 4 4 4 4 4 4 4"
+
/* Supported displays in alphabetical order */
static struct fbtft_device_display displays[] = {
{
@@ -832,7 +837,6 @@ static struct fbtft_device_display displays[] = {
}
}
}, {
-
.name = "piscreen",
.spi = &(struct spi_board_info) {
.modalias = "fb_ili9486",
@@ -889,14 +893,7 @@ static struct fbtft_device_display displays[] = {
{ "dc", 25 },
{},
},
- .gamma = "0 2 2 2 2 2 2 2 "
- "2 2 2 2 2 2 2 2 "
- "2 2 2 2 2 2 2 2 "
- "2 2 2 2 2 2 2 3 "
- "3 3 3 3 3 3 3 3 "
- "3 3 3 3 3 3 3 3 "
- "3 3 3 4 4 4 4 4 "
- "4 4 4 4 4 4 4"
+ .gamma = PIOLED_GAMMA
}
}
}, {
@@ -1223,14 +1220,14 @@ static struct fbtft_device_display displays[] = {
.name = "",
.id = 0,
.dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- {},
+ .release = fbtft_device_pdev_release,
+ .platform_data = &(struct fbtft_platform_data) {
+ .gpios = (const struct fbtft_gpio []) {
+ {},
+ },
},
},
},
- },
}
};
@@ -1243,7 +1240,7 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
#endif
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%d): ", __func__, len);
+ "%s(len=%d): ", __func__, len);
while (len) {
data = *(u16 *)buf;
@@ -1259,7 +1256,7 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
for (i = 0; i < 16; i++) {
if ((data & 1) != (prev_data & 1))
gpio_set_value(par->gpio.db[i],
- data & 1);
+ data & 1);
data >>= 1;
prev_data >>= 1;
}
@@ -1285,7 +1282,7 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
}
static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
+ int xs, int ys, int xe, int ye)
{
write_reg(par, 0x2A, 0, xs + 2, 0, xe + 2);
write_reg(par, 0x2B, 0, ys + 1, 0, ye + 1);
@@ -1476,7 +1473,7 @@ static int __init fbtft_device_init(void)
size_t len;
len = strlcpy(displays[i].spi->modalias, name,
- SPI_NAME_SIZE);
+ SPI_NAME_SIZE);
if (len >= SPI_NAME_SIZE)
pr_warn("modalias (name) truncated to: %s\n",
displays[i].spi->modalias);
@@ -1582,7 +1579,6 @@ static void __exit fbtft_device_exit(void)
if (p_device)
platform_device_unregister(p_device);
-
}
arch_initcall(fbtft_device_init);
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index f676c9b853f1..2af474469e7d 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -679,22 +679,27 @@ static int flexfb_probe_common(struct spi_device *sdev,
if (par->spi->master->bits_per_word_mask
& SPI_BPW_MASK(9)) {
par->spi->bits_per_word = 9;
- } else {
- dev_warn(dev,
- "9-bit SPI not available, emulating using 8-bit.\n");
- /* allocate buffer with room for dc bits */
- par->extra = devm_kzalloc(par->info->device,
- par->txbuf.len + (par->txbuf.len / 8) + 8,
- GFP_KERNEL);
- if (!par->extra) {
- ret = -ENOMEM;
- goto out_release;
- }
- par->fbtftops.write = fbtft_write_spi_emulate_9;
+ break;
}
+
+ dev_warn(dev,
+ "9-bit SPI not available, emulating using 8-bit.\n");
+ /* allocate buffer with room for dc bits */
+ par->extra = devm_kzalloc(par->info->device,
+ par->txbuf.len
+ + (par->txbuf.len / 8) + 8,
+ GFP_KERNEL);
+ if (!par->extra) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
+ par->fbtftops.write = fbtft_write_spi_emulate_9;
+
break;
default:
- dev_err(dev, "argument 'buswidth': %d is not supported with SPI.\n", buswidth);
+ dev_err(dev,
+ "argument 'buswidth': %d is not supported with SPI.\n",
+ buswidth);
return -EINVAL;
}
} else {
diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile
index 77b0b74f835a..9315ecdba612 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/Makefile
+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Freescale DPAA2 Ethernet controller
#
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
index 3b040e8d6a4e..9801528db2a5 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
@@ -1,32 +1,5 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2014-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
index 396371728aa1..9329fcad95ac 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016-2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/init.h>
#include <linux/module.h>
@@ -55,8 +28,6 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
-const char dpaa2_eth_drv_version[] = "0.1";
-
static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
dma_addr_t iova_addr)
{
@@ -455,7 +426,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, addr);
dpaa2_fd_set_len(fd, skb->len);
- dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_PTA | DPAA2_FD_CTRL_PTV1);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
enable_tx_tstamp(fd, sgt_buf);
@@ -508,7 +479,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_format(fd, dpaa2_fd_single);
- dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_PTA | DPAA2_FD_CTRL_PTV1);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
enable_tx_tstamp(fd, buffer_start);
@@ -522,8 +493,6 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
* back-pointed to is also freed.
* This can be called either from dpaa2_eth_tx_conf() or on the error path of
* dpaa2_eth_tx().
- * Optionally, return the frame annotation status word (FAS), which needs
- * to be checked if we're on the confirmation path.
*/
static void free_tx_fd(const struct dpaa2_eth_priv *priv,
const struct dpaa2_fd *fd)
@@ -767,7 +736,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
skb_free_frag(vaddr);
}
}
@@ -1245,25 +1214,6 @@ static void dpaa2_eth_get_stats(struct net_device *net_dev,
}
}
-static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
-{
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- int err;
-
- /* Set the maximum Rx frame length to match the transmit side;
- * account for L2 headers when computing the MFL
- */
- err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
- (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
- if (err) {
- netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
- return err;
- }
-
- net_dev->mtu = mtu;
- return 0;
-}
-
/* Copy mac unicast addresses from @net_dev to @priv.
* Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
*/
@@ -1471,7 +1421,6 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_init = dpaa2_eth_init,
.ndo_set_mac_address = dpaa2_eth_set_addr,
.ndo_get_stats64 = dpaa2_eth_get_stats,
- .ndo_change_mtu = dpaa2_eth_change_mtu,
.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
.ndo_set_features = dpaa2_eth_set_features,
.ndo_do_ioctl = dpaa2_eth_ioctl,
@@ -2217,10 +2166,10 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
return err;
}
- /* have the interface implicitly distribute traffic based on supported
- * header fields
+ /* have the interface implicitly distribute traffic based on
+ * the default hash key
*/
- err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
+ err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
if (err)
dev_err(dev, "Failed to configure hashing\n");
@@ -2385,9 +2334,14 @@ static int netdev_init(struct net_device *net_dev)
return err;
}
- /* Set MTU limits */
- net_dev->min_mtu = 68;
+ /* Set MTU upper limit; lower limit is 68B (default value) */
net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
+ DPAA2_ETH_MFL);
+ if (err) {
+ dev_err(dev, "dpni_set_max_frame_length() failed\n");
+ return err;
+ }
/* Set actual number of queues in the net device */
num_queues = dpaa2_eth_queue_count(priv);
@@ -2678,7 +2632,6 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
fsl_mc_portal_free(priv->mc_io);
- dev_set_drvdata(dev, NULL);
free_netdev(net_dev);
dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
index 905a4e6be8fa..d54cb0b99d08 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DPAA2_ETH_H
@@ -37,8 +10,8 @@
#include <linux/if_vlan.h>
#include <linux/fsl/mc.h>
-#include "../../fsl-mc/include/dpaa2-io.h"
-#include "../../fsl-mc/include/dpaa2-fd.h"
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
#include "dpni.h"
#include "dpni-cmd.h"
@@ -124,21 +97,13 @@ struct dpaa2_eth_swa {
#define DPAA2_FD_FRC_FAICFDV 0x0400
/* Error bits in FD CTRL */
-#define DPAA2_FD_CTRL_UFD 0x00000004
-#define DPAA2_FD_CTRL_SBE 0x00000008
-#define DPAA2_FD_CTRL_FSE 0x00000020
-#define DPAA2_FD_CTRL_FAERR 0x00000040
-
-#define DPAA2_FD_RX_ERR_MASK (DPAA2_FD_CTRL_SBE | \
- DPAA2_FD_CTRL_FAERR)
-#define DPAA2_FD_TX_ERR_MASK (DPAA2_FD_CTRL_UFD | \
- DPAA2_FD_CTRL_SBE | \
- DPAA2_FD_CTRL_FSE | \
- DPAA2_FD_CTRL_FAERR)
+#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
+#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
+ FD_CTRL_SBE | \
+ FD_CTRL_FSE | \
+ FD_CTRL_FAERR)
/* Annotation bits in FD CTRL */
-#define DPAA2_FD_CTRL_PTA 0x00800000
-#define DPAA2_FD_CTRL_PTV1 0x00400000
#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128B */
/* Frame annotation status */
@@ -304,10 +269,10 @@ struct dpaa2_eth_fq {
struct dpaa2_eth_channel *channel;
enum dpaa2_eth_fq_type type;
- void (*consume)(struct dpaa2_eth_priv *,
- struct dpaa2_eth_channel *,
- const struct dpaa2_fd *,
- struct napi_struct *,
+ void (*consume)(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct napi_struct *napi,
u16 queue_id);
struct dpaa2_eth_fq_stats stats;
};
@@ -377,11 +342,14 @@ struct dpaa2_eth_priv {
u64 rx_hash_fields;
};
-/* default Rx hash options, set during probing */
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
| RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
| RXH_L4_B_2_3)
+/* default Rx hash options, set during probing */
+#define DPAA2_RXH_DEFAULT (RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
#define dpaa2_eth_hash_enabled(priv) \
((priv)->dpni_attrs.num_queues > 1)
@@ -389,7 +357,6 @@ struct dpaa2_eth_priv {
#define DPAA2_CLASSIFIER_DMA_SIZE 256
extern const struct ethtool_ops dpaa2_ethtool_ops;
-extern const char dpaa2_eth_drv_version[];
extern int dpaa2_phc_index;
static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
index 1ae779ae8c99..8056a95e1265 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/net_tstamp.h>
@@ -82,8 +55,6 @@ static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, dpaa2_eth_drv_version,
- sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
index 02290a088391..6de613b13e4d 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
@@ -1,39 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPKG_H_
#define __FSL_DPKG_H_
#include <linux/types.h>
-#include "net.h"
/* Data Path Key Generator API
* Contains initialization APIs and runtime APIs for the Key Generator
@@ -86,48 +57,393 @@ struct dpkg_mask {
u8 offset;
};
+/* Protocol fields */
+
+/* Ethernet fields */
+#define NH_FLD_ETH_DA BIT(0)
+#define NH_FLD_ETH_SA BIT(1)
+#define NH_FLD_ETH_LENGTH BIT(2)
+#define NH_FLD_ETH_TYPE BIT(3)
+#define NH_FLD_ETH_FINAL_CKSUM BIT(4)
+#define NH_FLD_ETH_PADDING BIT(5)
+#define NH_FLD_ETH_ALL_FIELDS (BIT(6) - 1)
+
+/* VLAN fields */
+#define NH_FLD_VLAN_VPRI BIT(0)
+#define NH_FLD_VLAN_CFI BIT(1)
+#define NH_FLD_VLAN_VID BIT(2)
+#define NH_FLD_VLAN_LENGTH BIT(3)
+#define NH_FLD_VLAN_TYPE BIT(4)
+#define NH_FLD_VLAN_ALL_FIELDS (BIT(5) - 1)
+
+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
+ NH_FLD_VLAN_CFI | \
+ NH_FLD_VLAN_VID)
+
+/* IP (generic) fields */
+#define NH_FLD_IP_VER BIT(0)
+#define NH_FLD_IP_DSCP BIT(2)
+#define NH_FLD_IP_ECN BIT(3)
+#define NH_FLD_IP_PROTO BIT(4)
+#define NH_FLD_IP_SRC BIT(5)
+#define NH_FLD_IP_DST BIT(6)
+#define NH_FLD_IP_TOS_TC BIT(7)
+#define NH_FLD_IP_ID BIT(8)
+#define NH_FLD_IP_ALL_FIELDS (BIT(9) - 1)
+
+/* IPV4 fields */
+#define NH_FLD_IPV4_VER BIT(0)
+#define NH_FLD_IPV4_HDR_LEN BIT(1)
+#define NH_FLD_IPV4_TOS BIT(2)
+#define NH_FLD_IPV4_TOTAL_LEN BIT(3)
+#define NH_FLD_IPV4_ID BIT(4)
+#define NH_FLD_IPV4_FLAG_D BIT(5)
+#define NH_FLD_IPV4_FLAG_M BIT(6)
+#define NH_FLD_IPV4_OFFSET BIT(7)
+#define NH_FLD_IPV4_TTL BIT(8)
+#define NH_FLD_IPV4_PROTO BIT(9)
+#define NH_FLD_IPV4_CKSUM BIT(10)
+#define NH_FLD_IPV4_SRC_IP BIT(11)
+#define NH_FLD_IPV4_DST_IP BIT(12)
+#define NH_FLD_IPV4_OPTS BIT(13)
+#define NH_FLD_IPV4_OPTS_COUNT BIT(14)
+#define NH_FLD_IPV4_ALL_FIELDS (BIT(15) - 1)
+
+/* IPV6 fields */
+#define NH_FLD_IPV6_VER BIT(0)
+#define NH_FLD_IPV6_TC BIT(1)
+#define NH_FLD_IPV6_SRC_IP BIT(2)
+#define NH_FLD_IPV6_DST_IP BIT(3)
+#define NH_FLD_IPV6_NEXT_HDR BIT(4)
+#define NH_FLD_IPV6_FL BIT(5)
+#define NH_FLD_IPV6_HOP_LIMIT BIT(6)
+#define NH_FLD_IPV6_ID BIT(7)
+#define NH_FLD_IPV6_ALL_FIELDS (BIT(8) - 1)
+
+/* ICMP fields */
+#define NH_FLD_ICMP_TYPE BIT(0)
+#define NH_FLD_ICMP_CODE BIT(1)
+#define NH_FLD_ICMP_CKSUM BIT(2)
+#define NH_FLD_ICMP_ID BIT(3)
+#define NH_FLD_ICMP_SQ_NUM BIT(4)
+#define NH_FLD_ICMP_ALL_FIELDS (BIT(5) - 1)
+
+/* IGMP fields */
+#define NH_FLD_IGMP_VERSION BIT(0)
+#define NH_FLD_IGMP_TYPE BIT(1)
+#define NH_FLD_IGMP_CKSUM BIT(2)
+#define NH_FLD_IGMP_DATA BIT(3)
+#define NH_FLD_IGMP_ALL_FIELDS (BIT(4) - 1)
+
+/* TCP fields */
+#define NH_FLD_TCP_PORT_SRC BIT(0)
+#define NH_FLD_TCP_PORT_DST BIT(1)
+#define NH_FLD_TCP_SEQ BIT(2)
+#define NH_FLD_TCP_ACK BIT(3)
+#define NH_FLD_TCP_OFFSET BIT(4)
+#define NH_FLD_TCP_FLAGS BIT(5)
+#define NH_FLD_TCP_WINDOW BIT(6)
+#define NH_FLD_TCP_CKSUM BIT(7)
+#define NH_FLD_TCP_URGPTR BIT(8)
+#define NH_FLD_TCP_OPTS BIT(9)
+#define NH_FLD_TCP_OPTS_COUNT BIT(10)
+#define NH_FLD_TCP_ALL_FIELDS (BIT(11) - 1)
+
+/* UDP fields */
+#define NH_FLD_UDP_PORT_SRC BIT(0)
+#define NH_FLD_UDP_PORT_DST BIT(1)
+#define NH_FLD_UDP_LEN BIT(2)
+#define NH_FLD_UDP_CKSUM BIT(3)
+#define NH_FLD_UDP_ALL_FIELDS (BIT(4) - 1)
+
+/* UDP-lite fields */
+#define NH_FLD_UDP_LITE_PORT_SRC BIT(0)
+#define NH_FLD_UDP_LITE_PORT_DST BIT(1)
+#define NH_FLD_UDP_LITE_ALL_FIELDS (BIT(2) - 1)
+
+/* UDP-encap-ESP fields */
+#define NH_FLD_UDP_ENC_ESP_PORT_SRC BIT(0)
+#define NH_FLD_UDP_ENC_ESP_PORT_DST BIT(1)
+#define NH_FLD_UDP_ENC_ESP_LEN BIT(2)
+#define NH_FLD_UDP_ENC_ESP_CKSUM BIT(3)
+#define NH_FLD_UDP_ENC_ESP_SPI BIT(4)
+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM BIT(5)
+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS (BIT(6) - 1)
+
+/* SCTP fields */
+#define NH_FLD_SCTP_PORT_SRC BIT(0)
+#define NH_FLD_SCTP_PORT_DST BIT(1)
+#define NH_FLD_SCTP_VER_TAG BIT(2)
+#define NH_FLD_SCTP_CKSUM BIT(3)
+#define NH_FLD_SCTP_ALL_FIELDS (BIT(4) - 1)
+
+/* DCCP fields */
+#define NH_FLD_DCCP_PORT_SRC BIT(0)
+#define NH_FLD_DCCP_PORT_DST BIT(1)
+#define NH_FLD_DCCP_ALL_FIELDS (BIT(2) - 1)
+
+/* IPHC fields */
+#define NH_FLD_IPHC_CID BIT(0)
+#define NH_FLD_IPHC_CID_TYPE BIT(1)
+#define NH_FLD_IPHC_HCINDEX BIT(2)
+#define NH_FLD_IPHC_GEN BIT(3)
+#define NH_FLD_IPHC_D_BIT BIT(4)
+#define NH_FLD_IPHC_ALL_FIELDS (BIT(5) - 1)
+
+/* SCTP fields */
+#define NH_FLD_SCTP_CHUNK_DATA_TYPE BIT(0)
+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS BIT(1)
+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH BIT(2)
+#define NH_FLD_SCTP_CHUNK_DATA_TSN BIT(3)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID BIT(4)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN BIT(5)
+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID BIT(6)
+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED BIT(7)
+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING BIT(8)
+#define NH_FLD_SCTP_CHUNK_DATA_END BIT(9)
+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS (BIT(10) - 1)
+
+/* L2TPV2 fields */
+#define NH_FLD_L2TPV2_TYPE_BIT BIT(0)
+#define NH_FLD_L2TPV2_LENGTH_BIT BIT(1)
+#define NH_FLD_L2TPV2_SEQUENCE_BIT BIT(2)
+#define NH_FLD_L2TPV2_OFFSET_BIT BIT(3)
+#define NH_FLD_L2TPV2_PRIORITY_BIT BIT(4)
+#define NH_FLD_L2TPV2_VERSION BIT(5)
+#define NH_FLD_L2TPV2_LEN BIT(6)
+#define NH_FLD_L2TPV2_TUNNEL_ID BIT(7)
+#define NH_FLD_L2TPV2_SESSION_ID BIT(8)
+#define NH_FLD_L2TPV2_NS BIT(9)
+#define NH_FLD_L2TPV2_NR BIT(10)
+#define NH_FLD_L2TPV2_OFFSET_SIZE BIT(11)
+#define NH_FLD_L2TPV2_FIRST_BYTE BIT(12)
+#define NH_FLD_L2TPV2_ALL_FIELDS (BIT(13) - 1)
+
+/* L2TPV3 fields */
+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT BIT(0)
+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT BIT(1)
+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT BIT(2)
+#define NH_FLD_L2TPV3_CTRL_VERSION BIT(3)
+#define NH_FLD_L2TPV3_CTRL_LENGTH BIT(4)
+#define NH_FLD_L2TPV3_CTRL_CONTROL BIT(5)
+#define NH_FLD_L2TPV3_CTRL_SENT BIT(6)
+#define NH_FLD_L2TPV3_CTRL_RECV BIT(7)
+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE BIT(8)
+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS (BIT(9) - 1)
+
+#define NH_FLD_L2TPV3_SESS_TYPE_BIT BIT(0)
+#define NH_FLD_L2TPV3_SESS_VERSION BIT(1)
+#define NH_FLD_L2TPV3_SESS_ID BIT(2)
+#define NH_FLD_L2TPV3_SESS_COOKIE BIT(3)
+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS (BIT(4) - 1)
+
+/* PPP fields */
+#define NH_FLD_PPP_PID BIT(0)
+#define NH_FLD_PPP_COMPRESSED BIT(1)
+#define NH_FLD_PPP_ALL_FIELDS (BIT(2) - 1)
+
+/* PPPoE fields */
+#define NH_FLD_PPPOE_VER BIT(0)
+#define NH_FLD_PPPOE_TYPE BIT(1)
+#define NH_FLD_PPPOE_CODE BIT(2)
+#define NH_FLD_PPPOE_SID BIT(3)
+#define NH_FLD_PPPOE_LEN BIT(4)
+#define NH_FLD_PPPOE_SESSION BIT(5)
+#define NH_FLD_PPPOE_PID BIT(6)
+#define NH_FLD_PPPOE_ALL_FIELDS (BIT(7) - 1)
+
+/* PPP-Mux fields */
+#define NH_FLD_PPPMUX_PID BIT(0)
+#define NH_FLD_PPPMUX_CKSUM BIT(1)
+#define NH_FLD_PPPMUX_COMPRESSED BIT(2)
+#define NH_FLD_PPPMUX_ALL_FIELDS (BIT(3) - 1)
+
+/* PPP-Mux sub-frame fields */
+#define NH_FLD_PPPMUX_SUBFRM_PFF BIT(0)
+#define NH_FLD_PPPMUX_SUBFRM_LXT BIT(1)
+#define NH_FLD_PPPMUX_SUBFRM_LEN BIT(2)
+#define NH_FLD_PPPMUX_SUBFRM_PID BIT(3)
+#define NH_FLD_PPPMUX_SUBFRM_USE_PID BIT(4)
+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS (BIT(5) - 1)
+
+/* LLC fields */
+#define NH_FLD_LLC_DSAP BIT(0)
+#define NH_FLD_LLC_SSAP BIT(1)
+#define NH_FLD_LLC_CTRL BIT(2)
+#define NH_FLD_LLC_ALL_FIELDS (BIT(3) - 1)
+
+/* NLPID fields */
+#define NH_FLD_NLPID_NLPID BIT(0)
+#define NH_FLD_NLPID_ALL_FIELDS (BIT(1) - 1)
+
+/* SNAP fields */
+#define NH_FLD_SNAP_OUI BIT(0)
+#define NH_FLD_SNAP_PID BIT(1)
+#define NH_FLD_SNAP_ALL_FIELDS (BIT(2) - 1)
+
+/* LLC SNAP fields */
+#define NH_FLD_LLC_SNAP_TYPE BIT(0)
+#define NH_FLD_LLC_SNAP_ALL_FIELDS (BIT(1) - 1)
+
+/* ARP fields */
+#define NH_FLD_ARP_HTYPE BIT(0)
+#define NH_FLD_ARP_PTYPE BIT(1)
+#define NH_FLD_ARP_HLEN BIT(2)
+#define NH_FLD_ARP_PLEN BIT(3)
+#define NH_FLD_ARP_OPER BIT(4)
+#define NH_FLD_ARP_SHA BIT(5)
+#define NH_FLD_ARP_SPA BIT(6)
+#define NH_FLD_ARP_THA BIT(7)
+#define NH_FLD_ARP_TPA BIT(8)
+#define NH_FLD_ARP_ALL_FIELDS (BIT(9) - 1)
+
+/* RFC2684 fields */
+#define NH_FLD_RFC2684_LLC BIT(0)
+#define NH_FLD_RFC2684_NLPID BIT(1)
+#define NH_FLD_RFC2684_OUI BIT(2)
+#define NH_FLD_RFC2684_PID BIT(3)
+#define NH_FLD_RFC2684_VPN_OUI BIT(4)
+#define NH_FLD_RFC2684_VPN_IDX BIT(5)
+#define NH_FLD_RFC2684_ALL_FIELDS (BIT(6) - 1)
+
+/* User defined fields */
+#define NH_FLD_USER_DEFINED_SRCPORT BIT(0)
+#define NH_FLD_USER_DEFINED_PCDID BIT(1)
+#define NH_FLD_USER_DEFINED_ALL_FIELDS (BIT(2) - 1)
+
+/* Payload fields */
+#define NH_FLD_PAYLOAD_BUFFER BIT(0)
+#define NH_FLD_PAYLOAD_SIZE BIT(1)
+#define NH_FLD_MAX_FRM_SIZE BIT(2)
+#define NH_FLD_MIN_FRM_SIZE BIT(3)
+#define NH_FLD_PAYLOAD_TYPE BIT(4)
+#define NH_FLD_FRAME_SIZE BIT(5)
+#define NH_FLD_PAYLOAD_ALL_FIELDS (BIT(6) - 1)
+
+/* GRE fields */
+#define NH_FLD_GRE_TYPE BIT(0)
+#define NH_FLD_GRE_ALL_FIELDS (BIT(1) - 1)
+
+/* MINENCAP fields */
+#define NH_FLD_MINENCAP_SRC_IP BIT(0)
+#define NH_FLD_MINENCAP_DST_IP BIT(1)
+#define NH_FLD_MINENCAP_TYPE BIT(2)
+#define NH_FLD_MINENCAP_ALL_FIELDS (BIT(3) - 1)
+
+/* IPSEC AH fields */
+#define NH_FLD_IPSEC_AH_SPI BIT(0)
+#define NH_FLD_IPSEC_AH_NH BIT(1)
+#define NH_FLD_IPSEC_AH_ALL_FIELDS (BIT(2) - 1)
+
+/* IPSEC ESP fields */
+#define NH_FLD_IPSEC_ESP_SPI BIT(0)
+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM BIT(1)
+#define NH_FLD_IPSEC_ESP_ALL_FIELDS (BIT(2) - 1)
+
+/* MPLS fields */
+#define NH_FLD_MPLS_LABEL_STACK BIT(0)
+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS (BIT(1) - 1)
+
+/* MACSEC fields */
+#define NH_FLD_MACSEC_SECTAG BIT(0)
+#define NH_FLD_MACSEC_ALL_FIELDS (BIT(1) - 1)
+
+/* GTP fields */
+#define NH_FLD_GTP_TEID BIT(0)
+
+/* Supported protocols */
+enum net_prot {
+ NET_PROT_NONE = 0,
+ NET_PROT_PAYLOAD,
+ NET_PROT_ETH,
+ NET_PROT_VLAN,
+ NET_PROT_IPV4,
+ NET_PROT_IPV6,
+ NET_PROT_IP,
+ NET_PROT_TCP,
+ NET_PROT_UDP,
+ NET_PROT_UDP_LITE,
+ NET_PROT_IPHC,
+ NET_PROT_SCTP,
+ NET_PROT_SCTP_CHUNK_DATA,
+ NET_PROT_PPPOE,
+ NET_PROT_PPP,
+ NET_PROT_PPPMUX,
+ NET_PROT_PPPMUX_SUBFRM,
+ NET_PROT_L2TPV2,
+ NET_PROT_L2TPV3_CTRL,
+ NET_PROT_L2TPV3_SESS,
+ NET_PROT_LLC,
+ NET_PROT_LLC_SNAP,
+ NET_PROT_NLPID,
+ NET_PROT_SNAP,
+ NET_PROT_MPLS,
+ NET_PROT_IPSEC_AH,
+ NET_PROT_IPSEC_ESP,
+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
+ NET_PROT_MACSEC,
+ NET_PROT_GRE,
+ NET_PROT_MINENCAP,
+ NET_PROT_DCCP,
+ NET_PROT_ICMP,
+ NET_PROT_IGMP,
+ NET_PROT_ARP,
+ NET_PROT_CAPWAP_DATA,
+ NET_PROT_CAPWAP_CTRL,
+ NET_PROT_RFC2684,
+ NET_PROT_ICMPV6,
+ NET_PROT_FCOE,
+ NET_PROT_FIP,
+ NET_PROT_ISCSI,
+ NET_PROT_GTP,
+ NET_PROT_USER_DEFINED_L2,
+ NET_PROT_USER_DEFINED_L3,
+ NET_PROT_USER_DEFINED_L4,
+ NET_PROT_USER_DEFINED_L5,
+ NET_PROT_USER_DEFINED_SHIM1,
+ NET_PROT_USER_DEFINED_SHIM2,
+
+ NET_PROT_DUMMY_LAST
+};
+
/**
* struct dpkg_extract - A structure for defining a single extraction
* @type: Determines how the union below is interpreted:
- * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
- * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
- * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
* @extract: Selects extraction method
+ * @extract.from_hdr: Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @extract.from_data: Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @extract.from_parse: Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ * @extract.from_hdr.prot: Any of the supported headers
+ * @extract.from_hdr.type: Defines the type of header extraction:
+ * DPKG_FROM_HDR: use size & offset below;
+ * DPKG_FROM_FIELD: use field, size and offset below;
+ * DPKG_FULL_FIELD: use field below
+ * @extract.from_hdr.field: One of the supported fields (NH_FLD_)
+ * @extract.from_hdr.size: Size in bytes
+ * @extract.from_hdr.offset: Byte offset
+ * @extract.from_hdr.hdr_index: Clear for cases not listed below;
+ * Used for protocols that may have more than a single
+ * header, 0 indicates an outer header;
+ * Supported protocols (possible values):
+ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
+ * NET_PROT_IP(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
+ * @extract.from_data.size: Size in bytes
+ * @extract.from_data.offset: Byte offset
+ * @extract.from_parse.size: Size in bytes
+ * @extract.from_parse.offset: Byte offset
* @num_of_byte_masks: Defines the number of valid entries in the array below;
* This is also the number of bytes to be used as masks
* @masks: Masks parameters
*/
struct dpkg_extract {
enum dpkg_extract_type type;
- /**
- * union extract - Selects extraction method
- * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
- * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
- * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
- */
union {
- /**
- * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
- * @prot: Any of the supported headers
- * @type: Defines the type of header extraction:
- * DPKG_FROM_HDR: use size & offset below;
- * DPKG_FROM_FIELD: use field, size and offset below;
- * DPKG_FULL_FIELD: use field below
- * @field: One of the supported fields (NH_FLD_)
- *
- * @size: Size in bytes
- * @offset: Byte offset
- * @hdr_index: Clear for cases not listed below;
- * Used for protocols that may have more than a single
- * header, 0 indicates an outer header;
- * Supported protocols (possible values):
- * NET_PROT_VLAN (0, HDR_INDEX_LAST);
- * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
- * NET_PROT_IP(0, HDR_INDEX_LAST);
- * NET_PROT_IPv4(0, HDR_INDEX_LAST);
- * NET_PROT_IPv6(0, HDR_INDEX_LAST);
- */
-
struct {
enum net_prot prot;
enum dpkg_extract_from_hdr_type type;
@@ -136,22 +452,10 @@ struct dpkg_extract {
u8 offset;
u8 hdr_index;
} from_hdr;
- /**
- * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
- * @size: Size in bytes
- * @offset: Byte offset
- */
struct {
u8 size;
u8 offset;
} from_data;
-
- /**
- * struct from_parse - Used when
- * 'type = DPKG_EXTRACT_FROM_PARSE'
- * @size: Size in bytes
- * @offset: Byte offset
- */
struct {
u8 size;
u8 offset;
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
index d6f96f302cc6..83698abce8b4 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
@@ -1,34 +1,6 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_DPNI_CMD_H
#define _FSL_DPNI_CMD_H
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
index 1a721c95a67a..d6ac26797cec 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
@@ -1,34 +1,6 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
index ce86a816af45..b378a00c7c53 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
@@ -1,34 +1,6 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPNI_H
#define __FSL_DPNI_H
@@ -117,15 +89,12 @@ int dpni_close(struct fsl_mc_io *mc_io,
* @num_dpbp: Number of DPBPs
* @pools: Array of buffer pools parameters; The number of valid entries
* must match 'num_dpbp' value
+ * @pools.dpbp_id: DPBP object ID
+ * @pools.buffer_size: Buffer size
+ * @pools.backup_pool: Backup pool
*/
struct dpni_pools_cfg {
u8 num_dpbp;
- /**
- * struct pools - Buffer pools parameters
- * @dpbp_id: DPBP object ID
- * @buffer_size: Buffer size
- * @backup_pool: Backup pool
- */
struct {
int dpbp_id;
u16 buffer_size;
@@ -424,16 +393,32 @@ int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
#define DPNI_STATISTICS_CNT 7
+/**
+ * union dpni_statistics - Union describing the DPNI statistics
+ * @page_0: Page_0 statistics structure
+ * @page_0.ingress_all_frames: Ingress frame count
+ * @page_0.ingress_all_bytes: Ingress byte count
+ * @page_0.ingress_multicast_frames: Ingress multicast frame count
+ * @page_0.ingress_multicast_bytes: Ingress multicast byte count
+ * @page_0.ingress_broadcast_frames: Ingress broadcast frame count
+ * @page_0.ingress_broadcast_bytes: Ingress broadcast byte count
+ * @page_1: Page_1 statistics structure
+ * @page_1.egress_all_frames: Egress frame count
+ * @page_1.egress_all_bytes: Egress byte count
+ * @page_1.egress_multicast_frames: Egress multicast frame count
+ * @page_1.egress_multicast_bytes: Egress multicast byte count
+ * @page_1.egress_broadcast_frames: Egress broadcast frame count
+ * @page_1.egress_broadcast_bytes: Egress broadcast byte count
+ * @page_2: Page_2 statistics structure
+ * @page_2.ingress_filtered_frames: Ingress filtered frame count
+ * @page_2.ingress_discarded_frames: Ingress discarded frame count
+ * @page_2.ingress_nobuffer_discards: Ingress discarded frame count due to
+ * lack of buffers
+ * @page_2.egress_discarded_frames: Egress discarded frame count
+ * @page_2.egress_confirmed_frames: Egress confirmed frame count
+ * @raw: raw statistics structure, used to index counters
+ */
union dpni_statistics {
- /**
- * struct page_0 - Page_0 statistics structure
- * @ingress_all_frames: Ingress frame count
- * @ingress_all_bytes: Ingress byte count
- * @ingress_multicast_frames: Ingress multicast frame count
- * @ingress_multicast_bytes: Ingress multicast byte count
- * @ingress_broadcast_frames: Ingress broadcast frame count
- * @ingress_broadcast_bytes: Ingress broadcast byte count
- */
struct {
u64 ingress_all_frames;
u64 ingress_all_bytes;
@@ -442,15 +427,6 @@ union dpni_statistics {
u64 ingress_broadcast_frames;
u64 ingress_broadcast_bytes;
} page_0;
- /**
- * struct page_1 - Page_1 statistics structure
- * @egress_all_frames: Egress frame count
- * @egress_all_bytes: Egress byte count
- * @egress_multicast_frames: Egress multicast frame count
- * @egress_multicast_bytes: Egress multicast byte count
- * @egress_broadcast_frames: Egress broadcast frame count
- * @egress_broadcast_bytes: Egress broadcast byte count
- */
struct {
u64 egress_all_frames;
u64 egress_all_bytes;
@@ -459,15 +435,6 @@ union dpni_statistics {
u64 egress_broadcast_frames;
u64 egress_broadcast_bytes;
} page_1;
- /**
- * struct page_2 - Page_2 statistics structure
- * @ingress_filtered_frames: Ingress filtered frame count
- * @ingress_discarded_frames: Ingress discarded frame count
- * @ingress_nobuffer_discards: Ingress discarded frame count
- * due to lack of buffers
- * @egress_discarded_frames: Egress discarded frame count
- * @egress_confirmed_frames: Egress confirmed frame count
- */
struct {
u64 ingress_filtered_frames;
u64 ingress_discarded_frames;
@@ -475,9 +442,6 @@ union dpni_statistics {
u64 egress_discarded_frames;
u64 egress_confirmed_frames;
} page_2;
- /**
- * struct raw - raw statistics structure
- */
struct {
u64 counter[DPNI_STATISTICS_CNT];
} raw;
@@ -685,29 +649,52 @@ enum dpni_dest {
/**
* struct dpni_queue - Queue structure
- * @user_context: User data, presented to the user along with any frames from
- * this queue. Not relevant for Tx queues.
- */
-struct dpni_queue {
-/**
- * struct destination - Destination structure
- * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
- * Identifies either a DPIO or a DPCON object. Not relevant for
- * Tx queues.
- * @type: May be one of the following:
- * 0 - No destination, queue can be manually queried, but will not
- * push traffic or notifications to a DPIO;
- * 1 - The destination is a DPIO. When traffic becomes available in
- * the queue a FQDAN (FQ data available notification) will be
+ * @destination - Destination structure
+ * @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0.
+ * Identifies either a DPIO or a DPCON object.
+ * Not relevant for Tx queues.
+ * @destination.type: May be one of the following:
+ * 0 - No destination, queue can be manually
+ * queried, but will not push traffic or
+ * notifications to a DPIO;
+ * 1 - The destination is a DPIO. When traffic
+ * becomes available in the queue a FQDAN
+ * (FQ data available notification) will be
* generated to selected DPIO;
- * 2 - The destination is a DPCON. The queue is associated with a
- * DPCON object for the purpose of scheduling between multiple
- * queues. The DPCON may be independently configured to
- * generate notifications. Not relevant for Tx queues.
- * @hold_active: Hold active, maintains a queue scheduled for longer
- * in a DPIO during dequeue to reduce spread of traffic.
- * Only relevant if queues are not affined to a single DPIO.
+ * 2 - The destination is a DPCON. The queue is
+ * associated with a DPCON object for the
+ * purpose of scheduling between multiple
+ * queues. The DPCON may be independently
+ * configured to generate notifications.
+ * Not relevant for Tx queues.
+ * @destination.hold_active: Hold active, maintains a queue scheduled for longer
+ * in a DPIO during dequeue to reduce spread of traffic.
+ * Only relevant if queues are
+ * not affined to a single DPIO.
+ * @user_context: User data, presented to the user along with any frames
+ * from this queue. Not relevant for Tx queues.
+ * @flc: FD FLow Context structure
+ * @flc.value: Default FLC value for traffic dequeued from
+ * this queue. Please check description of FD
+ * structure for more information.
+ * Note that FLC values set using dpni_add_fs_entry,
+ * if any, take precedence over values per queue.
+ * @flc.stash_control: Boolean, indicates whether the 6 lowest
+ * - significant bits are used for stash control.
+ * significant bits are used for stash control. If set, the 6
+ * least significant bits in value are interpreted as follows:
+ * - bits 0-1: indicates the number of 64 byte units of context
+ * that are stashed. FLC value is interpreted as a memory address
+ * in this case, excluding the 6 LS bits.
+ * - bits 2-3: indicates the number of 64 byte units of frame
+ * annotation to be stashed. Annotation is placed at FD[ADDR].
+ * - bits 4-5: indicates the number of 64 byte units of frame
+ * data to be stashed. Frame data is placed at FD[ADDR] +
+ * FD[OFFSET].
+ * For more details check the Frame Descriptor section in the
+ * hardware documentation.
*/
+struct dpni_queue {
struct {
u16 id;
enum dpni_dest type;
diff --git a/drivers/staging/fsl-dpaa2/ethernet/README b/drivers/staging/fsl-dpaa2/ethernet/ethernet-driver.rst
index e3b5c90197e4..90ec940749e8 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/README
+++ b/drivers/staging/fsl-dpaa2/ethernet/ethernet-driver.rst
@@ -1,16 +1,13 @@
-Freescale DPAA2 Ethernet driver
-===============================
-
-This file provides documentation for the Freescale DPAA2 Ethernet driver.
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+===============================
+DPAA2 Ethernet driver
+===============================
-Contents
-========
- Supported Platforms
- Architecture Overview
- Creating a Network Interface
- Features & Offloads
+:Copyright: |copy| 2017-2018 NXP
+This file provides documentation for the Freescale DPAA2 Ethernet driver.
Supported Platforms
===================
@@ -23,10 +20,11 @@ Architecture Overview
Unlike regular NICs, in the DPAA2 architecture there is no single hardware block
representing network interfaces; instead, several separate hardware resources
concur to provide the networking functionality:
- - network interfaces
- - queues, channels
- - buffer pools
- - MAC/PHY
+
+- network interfaces
+- queues, channels
+- buffer pools
+- MAC/PHY
All hardware resources are allocated and configured through the Management
Complex (MC) portals. MC abstracts most of these resources as DPAA2 objects
@@ -35,14 +33,13 @@ hardware resources, like queues, do not have a corresponding MC object and
are treated as internal resources of other objects.
For a more detailed description of the DPAA2 architecture and its object
-abstractions see:
- Documentation/networking/dpaa2/overview.rst
+abstractions see *Documentation/networking/dpaa2/overview.rst*.
Each Linux net device is built on top of a Datapath Network Interface (DPNI)
object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators
(DPCONs).
-Configuration interface:
+Configuration interface::
-----------------------
| DPAA2 Ethernet Driver |
@@ -56,7 +53,7 @@ Configuration interface:
| DPBP API | | DPNI API | | DPCON API |
---------- ---------- -----------
. . . software
-=========== . ========== . ============ . ===================
+ ======= . ========== . ============ . ===================
. . . hardware
------------------------------------------
| MC hardware portals |
@@ -72,11 +69,11 @@ DPBPs represent hardware buffer pools. Packet I/O is performed in the context
of DPCON objects, using DPIO portals for managing and communicating with the
hardware resources.
-Datapath (I/O) interface:
+Datapath (I/O) interface::
-----------------------------------------------
| DPAA2 Ethernet Driver |
- -----------------------------------------------
+ -----------------------------------------------
| ^ ^ | |
| | | | |
enqueue| dequeue| data | dequeue| seed |
@@ -132,6 +129,8 @@ DPNIs are decoupled from PHYs; a DPNI can be connected to a PHY through a DPMAC
object or to another DPNI through an internal link, but the connection is
managed by MC and completely transparent to the Ethernet driver.
+::
+
--------- --------- ---------
| eth if1 | | eth if2 | | eth ifn |
--------- --------- ---------
diff --git a/drivers/staging/fsl-dpaa2/ethernet/net.h b/drivers/staging/fsl-dpaa2/ethernet/net.h
deleted file mode 100644
index 5020dee1730c..000000000000
--- a/drivers/staging/fsl-dpaa2/ethernet/net.h
+++ /dev/null
@@ -1,480 +0,0 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __FSL_NET_H
-#define __FSL_NET_H
-
-#define LAST_HDR_INDEX 0xFFFFFFFF
-
-/*****************************************************************************/
-/* Protocol fields */
-/*****************************************************************************/
-
-/************************* Ethernet fields *********************************/
-#define NH_FLD_ETH_DA (1)
-#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
-#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
-#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
-#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
-#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
-#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
-
-#define NH_FLD_ETH_ADDR_SIZE 6
-
-/*************************** VLAN fields ***********************************/
-#define NH_FLD_VLAN_VPRI (1)
-#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
-#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
-#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
-#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
-#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
-
-#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
- NH_FLD_VLAN_CFI | \
- NH_FLD_VLAN_VID)
-
-/************************ IP (generic) fields ******************************/
-#define NH_FLD_IP_VER (1)
-#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
-#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
-#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
-#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
-#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
-#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
-#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
-#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
-
-#define NH_FLD_IP_PROTO_SIZE 1
-
-/***************************** IPV4 fields *********************************/
-#define NH_FLD_IPV4_VER (1)
-#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
-#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
-#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
-#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
-#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
-#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
-#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
-#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
-#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
-#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
-#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
-#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
-#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
-#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
-#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
-
-#define NH_FLD_IPV4_ADDR_SIZE 4
-#define NH_FLD_IPV4_PROTO_SIZE 1
-
-/***************************** IPV6 fields *********************************/
-#define NH_FLD_IPV6_VER (1)
-#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
-#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
-#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
-#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
-#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
-#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
-#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
-#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
-
-#define NH_FLD_IPV6_ADDR_SIZE 16
-#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
-
-/***************************** ICMP fields *********************************/
-#define NH_FLD_ICMP_TYPE (1)
-#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
-#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
-#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
-#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
-#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
-
-#define NH_FLD_ICMP_CODE_SIZE 1
-#define NH_FLD_ICMP_TYPE_SIZE 1
-
-/***************************** IGMP fields *********************************/
-#define NH_FLD_IGMP_VERSION (1)
-#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
-#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
-#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
-#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
-
-/***************************** TCP fields **********************************/
-#define NH_FLD_TCP_PORT_SRC (1)
-#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
-#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
-#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
-#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
-#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
-#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
-#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
-#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
-#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
-#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
-#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
-
-#define NH_FLD_TCP_PORT_SIZE 2
-
-/***************************** UDP fields **********************************/
-#define NH_FLD_UDP_PORT_SRC (1)
-#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
-#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
-#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
-#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
-
-#define NH_FLD_UDP_PORT_SIZE 2
-
-/*************************** UDP-lite fields *******************************/
-#define NH_FLD_UDP_LITE_PORT_SRC (1)
-#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
-#define NH_FLD_UDP_LITE_ALL_FIELDS \
- ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
-
-#define NH_FLD_UDP_LITE_PORT_SIZE 2
-
-/*************************** UDP-encap-ESP fields **************************/
-#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
-#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
-#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
-#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
-#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
-#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
-#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
- ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
-
-#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
-#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
-
-/***************************** SCTP fields *********************************/
-#define NH_FLD_SCTP_PORT_SRC (1)
-#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
-#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
-#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
-#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
-
-#define NH_FLD_SCTP_PORT_SIZE 2
-
-/***************************** DCCP fields *********************************/
-#define NH_FLD_DCCP_PORT_SRC (1)
-#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
-#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
-
-#define NH_FLD_DCCP_PORT_SIZE 2
-
-/***************************** IPHC fields *********************************/
-#define NH_FLD_IPHC_CID (1)
-#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
-#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
-#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
-#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
-#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
-
-/***************************** SCTP fields *********************************/
-#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
-#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
-#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
-#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
-#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
-#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
-#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
-#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
-#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
-#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
-#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
- ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
-
-/*************************** L2TPV2 fields *********************************/
-#define NH_FLD_L2TPV2_TYPE_BIT (1)
-#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
-#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
-#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
-#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
-#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
-#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
-#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
-#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
-#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
-#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
-#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
-#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
-#define NH_FLD_L2TPV2_ALL_FIELDS \
- ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
-
-/*************************** L2TPV3 fields *********************************/
-#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
-#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
-#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
-#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
-#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
-#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
-#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
-#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
-#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
-#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
- ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
-
-#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
-#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
-#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
-#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
-#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
- ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
-
-/**************************** PPP fields ***********************************/
-#define NH_FLD_PPP_PID (1)
-#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
-#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
-
-/************************** PPPoE fields ***********************************/
-#define NH_FLD_PPPOE_VER (1)
-#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
-#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
-#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
-#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
-#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
-#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
-#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
-
-/************************* PPP-Mux fields **********************************/
-#define NH_FLD_PPPMUX_PID (1)
-#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
-#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
-#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
-
-/*********************** PPP-Mux sub-frame fields **************************/
-#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
-#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
-#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
-#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
-#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
-#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
- ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
-
-/*************************** LLC fields ************************************/
-#define NH_FLD_LLC_DSAP (1)
-#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
-#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
-#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
-
-/*************************** NLPID fields **********************************/
-#define NH_FLD_NLPID_NLPID (1)
-#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
-
-/*************************** SNAP fields ***********************************/
-#define NH_FLD_SNAP_OUI (1)
-#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
-#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
-
-/*************************** LLC SNAP fields *******************************/
-#define NH_FLD_LLC_SNAP_TYPE (1)
-#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
-
-#define NH_FLD_ARP_HTYPE (1)
-#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
-#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
-#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
-#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
-#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
-#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
-#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
-#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
-#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
-
-/*************************** RFC2684 fields ********************************/
-#define NH_FLD_RFC2684_LLC (1)
-#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
-#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
-#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
-#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
-#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
-#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
-
-/*************************** User defined fields ***************************/
-#define NH_FLD_USER_DEFINED_SRCPORT (1)
-#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
-#define NH_FLD_USER_DEFINED_ALL_FIELDS \
- ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
-
-/*************************** Payload fields ********************************/
-#define NH_FLD_PAYLOAD_BUFFER (1)
-#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
-#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
-#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
-#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
-#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
-#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
-
-/*************************** GRE fields ************************************/
-#define NH_FLD_GRE_TYPE (1)
-#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
-
-/*************************** MINENCAP fields *******************************/
-#define NH_FLD_MINENCAP_SRC_IP (1)
-#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
-#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
-#define NH_FLD_MINENCAP_ALL_FIELDS \
- ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
-
-/*************************** IPSEC AH fields *******************************/
-#define NH_FLD_IPSEC_AH_SPI (1)
-#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
-#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
-
-/*************************** IPSEC ESP fields ******************************/
-#define NH_FLD_IPSEC_ESP_SPI (1)
-#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
-#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
-
-#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
-
-/*************************** MPLS fields ***********************************/
-#define NH_FLD_MPLS_LABEL_STACK (1)
-#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
- ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
-
-/*************************** MACSEC fields *********************************/
-#define NH_FLD_MACSEC_SECTAG (1)
-#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
-
-/*************************** GTP fields ************************************/
-#define NH_FLD_GTP_TEID (1)
-
-/* Protocol options */
-
-/* Ethernet options */
-#define NH_OPT_ETH_BROADCAST 1
-#define NH_OPT_ETH_MULTICAST 2
-#define NH_OPT_ETH_UNICAST 3
-#define NH_OPT_ETH_BPDU 4
-
-#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
-/* also applicable for broadcast */
-
-/* VLAN options */
-#define NH_OPT_VLAN_CFI 1
-
-/* IPV4 options */
-#define NH_OPT_IPV4_UNICAST 1
-#define NH_OPT_IPV4_MULTICAST 2
-#define NH_OPT_IPV4_BROADCAST 3
-#define NH_OPT_IPV4_OPTION 4
-#define NH_OPT_IPV4_FRAG 5
-#define NH_OPT_IPV4_INITIAL_FRAG 6
-
-/* IPV6 options */
-#define NH_OPT_IPV6_UNICAST 1
-#define NH_OPT_IPV6_MULTICAST 2
-#define NH_OPT_IPV6_OPTION 3
-#define NH_OPT_IPV6_FRAG 4
-#define NH_OPT_IPV6_INITIAL_FRAG 5
-
-/* General IP options (may be used for any version) */
-#define NH_OPT_IP_FRAG 1
-#define NH_OPT_IP_INITIAL_FRAG 2
-#define NH_OPT_IP_OPTION 3
-
-/* Minenc. options */
-#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
-
-/* GRE. options */
-#define NH_OPT_GRE_ROUTING_PRESENT 1
-
-/* TCP options */
-#define NH_OPT_TCP_OPTIONS 1
-#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
-#define NH_OPT_TCP_CONTROL_LOW_BITS 3
-
-/* CAPWAP options */
-#define NH_OPT_CAPWAP_DTLS 1
-
-enum net_prot {
- NET_PROT_NONE = 0,
- NET_PROT_PAYLOAD,
- NET_PROT_ETH,
- NET_PROT_VLAN,
- NET_PROT_IPV4,
- NET_PROT_IPV6,
- NET_PROT_IP,
- NET_PROT_TCP,
- NET_PROT_UDP,
- NET_PROT_UDP_LITE,
- NET_PROT_IPHC,
- NET_PROT_SCTP,
- NET_PROT_SCTP_CHUNK_DATA,
- NET_PROT_PPPOE,
- NET_PROT_PPP,
- NET_PROT_PPPMUX,
- NET_PROT_PPPMUX_SUBFRM,
- NET_PROT_L2TPV2,
- NET_PROT_L2TPV3_CTRL,
- NET_PROT_L2TPV3_SESS,
- NET_PROT_LLC,
- NET_PROT_LLC_SNAP,
- NET_PROT_NLPID,
- NET_PROT_SNAP,
- NET_PROT_MPLS,
- NET_PROT_IPSEC_AH,
- NET_PROT_IPSEC_ESP,
- NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
- NET_PROT_MACSEC,
- NET_PROT_GRE,
- NET_PROT_MINENCAP,
- NET_PROT_DCCP,
- NET_PROT_ICMP,
- NET_PROT_IGMP,
- NET_PROT_ARP,
- NET_PROT_CAPWAP_DATA,
- NET_PROT_CAPWAP_CTRL,
- NET_PROT_RFC2684,
- NET_PROT_ICMPV6,
- NET_PROT_FCOE,
- NET_PROT_FIP,
- NET_PROT_ISCSI,
- NET_PROT_GTP,
- NET_PROT_USER_DEFINED_L2,
- NET_PROT_USER_DEFINED_L3,
- NET_PROT_USER_DEFINED_L4,
- NET_PROT_USER_DEFINED_L5,
- NET_PROT_USER_DEFINED_SHIM1,
- NET_PROT_USER_DEFINED_SHIM2,
-
- NET_PROT_DUMMY_LAST
-};
-
-/*! IEEE8021.Q */
-#define NH_IEEE8021Q_ETYPE 0x8100
-#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
- ((((u32)((etype) & 0xFFFF)) << 16) | \
- (((u32)((pcp) & 0x07)) << 13) | \
- (((u32)((dei) & 0x01)) << 12) | \
- (((u32)((vlan_id) & 0xFFF))))
-
-#endif /* __FSL_NET_H */
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
index 82f80c409ec3..db43fa3782b8 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
@@ -79,24 +79,21 @@ enum dpsw_component_type {
* struct dpsw_cfg - DPSW configuration
* @num_ifs: Number of external and internal interfaces
* @adv: Advanced parameters; default is all zeros;
- * use this structure to change default settings
+ * use this structure to change default settings
+ * @adv.options: Enable/Disable DPSW features (bitmap)
+ * @adv.max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
+ * @adv.max_meters_per_if: Number of meters per interface
+ * @adv.max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
+ * @adv.max_fdb_entries: Number of FDB entries for default FDB table;
+ * 0 - indicates default 1024 entries.
+ * @adv.fdb_aging_time: Default FDB aging time for default FDB table;
+ * 0 - indicates default 300 seconds
+ * @adv.max_fdb_mc_groups: Number of multicast groups in each FDB table;
+ * 0 - indicates default 32
+ * @adv.component_type: Indicates the component type of this bridge
*/
struct dpsw_cfg {
u16 num_ifs;
- /**
- * struct adv - Advanced parameters
- * @options: Enable/Disable DPSW features (bitmap)
- * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
- * @max_meters_per_if: Number of meters per interface
- * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
- * @max_fdb_entries: Number of FDB entries for default FDB table;
- * 0 - indicates default 1024 entries.
- * @fdb_aging_time: Default FDB aging time for default FDB table;
- * 0 - indicates default 300 seconds
- * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
- * 0 - indicates default 32
- * @component_type: Indicates the component type of this bridge
- */
struct {
u64 options;
u16 max_vlans;
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index 0d54564e4f38..ecdd3d84f956 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -179,7 +179,7 @@ static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, u8 flag)
port_priv->idx, flag);
if (err) {
netdev_err(port_priv->netdev,
- "dpsw_fdb_set_learning_mode err %d\n", err);
+ "dpsw_if_set_flooding err %d\n", err);
return err;
}
port_priv->flood = !!flag;
diff --git a/drivers/staging/fsl-mc/Kconfig b/drivers/staging/fsl-mc/Kconfig
deleted file mode 100644
index 3002229bec1b..000000000000
--- a/drivers/staging/fsl-mc/Kconfig
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-source "drivers/staging/fsl-mc/bus/Kconfig"
diff --git a/drivers/staging/fsl-mc/Makefile b/drivers/staging/fsl-mc/Makefile
deleted file mode 100644
index 14683889dabd..000000000000
--- a/drivers/staging/fsl-mc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Freescale Management Complex (MC) bus drivers
-obj-$(CONFIG_FSL_MC_BUS) += bus/
diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
deleted file mode 100644
index 342453035269..000000000000
--- a/drivers/staging/fsl-mc/bus/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# DPAA2 fsl-mc bus
-#
-# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
-#
-
-config FSL_MC_DPIO
- tristate "QorIQ DPAA2 DPIO driver"
- depends on FSL_MC_BUS
- help
- Driver for the DPAA2 DPIO object. A DPIO provides queue and
- buffer management facilities for software to interact with
- other DPAA2 objects. This driver does not expose the DPIO
- objects individually, but groups them under a service layer
- API.
diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile
deleted file mode 100644
index 21d8ebc8ce21..000000000000
--- a/drivers/staging/fsl-mc/bus/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Freescale Management Complex (MC) bus drivers
-#
-# Copyright (C) 2014 Freescale Semiconductor, Inc.
-#
-
-# MC DPIO driver
-obj-$(CONFIG_FSL_MC_DPIO) += dpio/
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt
deleted file mode 100644
index 72ba9da3d179..000000000000
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt
+++ /dev/null
@@ -1,135 +0,0 @@
-Copyright 2016 NXP
-
-Introduction
-------------
-
-A DPAA2 DPIO (Data Path I/O) is a hardware object that provides
-interfaces to enqueue and dequeue frames to/from network interfaces
-and other accelerators. A DPIO also provides hardware buffer
-pool management for network interfaces.
-
-This document provides an overview the Linux DPIO driver, its
-subcomponents, and its APIs.
-
-See Documentation/networking/dpaa2/overview.rst for a general overview of DPAA2
-and the general DPAA2 driver architecture in Linux.
-
-Driver Overview
----------------
-
-The DPIO driver is bound to DPIO objects discovered on the fsl-mc bus and
-provides services that:
- A) allow other drivers, such as the Ethernet driver, to enqueue and dequeue
- frames for their respective objects
- B) allow drivers to register callbacks for data availability notifications
- when data becomes available on a queue or channel
- C) allow drivers to manage hardware buffer pools
-
-The Linux DPIO driver consists of 3 primary components--
- DPIO object driver-- fsl-mc driver that manages the DPIO object
- DPIO service-- provides APIs to other Linux drivers for services
- QBman portal interface-- sends portal commands, gets responses
-
- fsl-mc other
- bus drivers
- | |
- +---+----+ +------+-----+
- |DPIO obj| |DPIO service|
- | driver |---| (DPIO) |
- +--------+ +------+-----+
- |
- +------+-----+
- | QBman |
- | portal i/f |
- +------------+
- |
- hardware
-
-The diagram below shows how the DPIO driver components fit with the other
-DPAA2 Linux driver components:
- +------------+
- | OS Network |
- | Stack |
- +------------+ +------------+
- | Allocator |. . . . . . . | Ethernet |
- |(DPMCP,DPBP)| | (DPNI) |
- +-.----------+ +---+---+----+
- . . ^ |
- . . <data avail, | |<enqueue,
- . . tx confirm> | | dequeue>
- +-------------+ . | |
- | DPRC driver | . +--------+ +------------+
- | (DPRC) | . . |DPIO obj| |DPIO service|
- +----------+--+ | driver |-| (DPIO) |
- | +--------+ +------+-----+
- |<dev add/remove> +------|-----+
- | | QBman |
- +----+--------------+ | portal i/f |
- | MC-bus driver | +------------+
- | | |
- | /soc/fsl-mc | |
- +-------------------+ |
- |
- =========================================|=========|========================
- +-+--DPIO---|-----------+
- | | |
- | QBman Portal |
- +-----------------------+
-
- ============================================================================
-
-
-DPIO Object Driver (dpio-driver.c)
-----------------------------------
-
- The dpio-driver component registers with the fsl-mc bus to handle objects of
- type "dpio". The implementation of probe() handles basic initialization
- of the DPIO including mapping of the DPIO regions (the QBman SW portal)
- and initializing interrupts and registering irq handlers. The dpio-driver
- registers the probed DPIO with dpio-service.
-
-DPIO service (dpio-service.c, dpaa2-io.h)
-------------------------------------------
-
- The dpio service component provides queuing, notification, and buffers
- management services to DPAA2 drivers, such as the Ethernet driver. A system
- will typically allocate 1 DPIO object per CPU to allow queuing operations
- to happen simultaneously across all CPUs.
-
- Notification handling
- dpaa2_io_service_register()
- dpaa2_io_service_deregister()
- dpaa2_io_service_rearm()
-
- Queuing
- dpaa2_io_service_pull_fq()
- dpaa2_io_service_pull_channel()
- dpaa2_io_service_enqueue_fq()
- dpaa2_io_service_enqueue_qd()
- dpaa2_io_store_create()
- dpaa2_io_store_destroy()
- dpaa2_io_store_next()
-
- Buffer pool management
- dpaa2_io_service_release()
- dpaa2_io_service_acquire()
-
-QBman portal interface (qbman-portal.c)
----------------------------------------
-
- The qbman-portal component provides APIs to do the low level hardware
- bit twiddling for operations such as:
- -initializing Qman software portals
- -building and sending portal commands
- -portal interrupt configuration and processing
-
- The qbman-portal APIs are not public to other drivers, and are
- only used by dpio-service.
-
-Other (dpaa2-fd.h, dpaa2-global.h)
-----------------------------------
-
- Frame descriptor and scatter-gather definitions and the APIs used to
- manipulate them are defined in dpaa2-fd.h.
-
- Dequeue result struct and parsing APIs are defined in dpaa2-global.h.
diff --git a/drivers/staging/fsl-mc/include/dpaa2-fd.h b/drivers/staging/fsl-mc/include/dpaa2-fd.h
deleted file mode 100644
index b55b89ba4eda..000000000000
--- a/drivers/staging/fsl-mc/include/dpaa2-fd.h
+++ /dev/null
@@ -1,426 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/*
- * Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
- *
- */
-#ifndef __FSL_DPAA2_FD_H
-#define __FSL_DPAA2_FD_H
-
-#include <linux/kernel.h>
-
-/**
- * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2
- *
- * Frame Descriptors (FDs) are used to describe frame data in the DPAA2.
- * Frames can be enqueued and dequeued to Frame Queues (FQs) which are consumed
- * by the various DPAA accelerators (WRIOP, SEC, PME, DCE)
- *
- * There are three types of frames: single, scatter gather, and frame lists.
- *
- * The set of APIs in this file must be used to create, manipulate and
- * query Frame Descriptors.
- */
-
-/**
- * struct dpaa2_fd - Struct describing FDs
- * @words: for easier/faster copying the whole FD structure
- * @addr: address in the FD
- * @len: length in the FD
- * @bpid: buffer pool ID
- * @format_offset: format, offset, and short-length fields
- * @frc: frame context
- * @ctrl: control bits...including dd, sc, va, err, etc
- * @flc: flow context address
- *
- * This structure represents the basic Frame Descriptor used in the system.
- */
-struct dpaa2_fd {
- union {
- u32 words[8];
- struct dpaa2_fd_simple {
- __le64 addr;
- __le32 len;
- __le16 bpid;
- __le16 format_offset;
- __le32 frc;
- __le32 ctrl;
- __le64 flc;
- } simple;
- };
-};
-
-#define FD_SHORT_LEN_FLAG_MASK 0x1
-#define FD_SHORT_LEN_FLAG_SHIFT 14
-#define FD_SHORT_LEN_MASK 0x3FFFF
-#define FD_OFFSET_MASK 0x0FFF
-#define FD_FORMAT_MASK 0x3
-#define FD_FORMAT_SHIFT 12
-#define FD_BPID_MASK 0x3FFF
-#define SG_SHORT_LEN_FLAG_MASK 0x1
-#define SG_SHORT_LEN_FLAG_SHIFT 14
-#define SG_SHORT_LEN_MASK 0x1FFFF
-#define SG_OFFSET_MASK 0x0FFF
-#define SG_FORMAT_MASK 0x3
-#define SG_FORMAT_SHIFT 12
-#define SG_BPID_MASK 0x3FFF
-#define SG_FINAL_FLAG_MASK 0x1
-#define SG_FINAL_FLAG_SHIFT 15
-
-enum dpaa2_fd_format {
- dpaa2_fd_single = 0,
- dpaa2_fd_list,
- dpaa2_fd_sg
-};
-
-/**
- * dpaa2_fd_get_addr() - get the addr field of frame descriptor
- * @fd: the given frame descriptor
- *
- * Return the address in the frame descriptor.
- */
-static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd)
-{
- return (dma_addr_t)le64_to_cpu(fd->simple.addr);
-}
-
-/**
- * dpaa2_fd_set_addr() - Set the addr field of frame descriptor
- * @fd: the given frame descriptor
- * @addr: the address needs to be set in frame descriptor
- */
-static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr)
-{
- fd->simple.addr = cpu_to_le64(addr);
-}
-
-/**
- * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor
- * @fd: the given frame descriptor
- *
- * Return the frame context field in the frame descriptor.
- */
-static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd)
-{
- return le32_to_cpu(fd->simple.frc);
-}
-
-/**
- * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor
- * @fd: the given frame descriptor
- * @frc: the frame context needs to be set in frame descriptor
- */
-static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc)
-{
- fd->simple.frc = cpu_to_le32(frc);
-}
-
-/**
- * dpaa2_fd_get_ctrl() - Get the control bits in the frame descriptor
- * @fd: the given frame descriptor
- *
- * Return the control bits field in the frame descriptor.
- */
-static inline u32 dpaa2_fd_get_ctrl(const struct dpaa2_fd *fd)
-{
- return le32_to_cpu(fd->simple.ctrl);
-}
-
-/**
- * dpaa2_fd_set_ctrl() - Set the control bits in the frame descriptor
- * @fd: the given frame descriptor
- * @ctrl: the control bits to be set in the frame descriptor
- */
-static inline void dpaa2_fd_set_ctrl(struct dpaa2_fd *fd, u32 ctrl)
-{
- fd->simple.ctrl = cpu_to_le32(ctrl);
-}
-
-/**
- * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor
- * @fd: the given frame descriptor
- *
- * Return the flow context in the frame descriptor.
- */
-static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd)
-{
- return (dma_addr_t)le64_to_cpu(fd->simple.flc);
-}
-
-/**
- * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor
- * @fd: the given frame descriptor
- * @flc_addr: the flow context needs to be set in frame descriptor
- */
-static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr)
-{
- fd->simple.flc = cpu_to_le64(flc_addr);
-}
-
-static inline bool dpaa2_fd_short_len(const struct dpaa2_fd *fd)
-{
- return !!((le16_to_cpu(fd->simple.format_offset) >>
- FD_SHORT_LEN_FLAG_SHIFT) & FD_SHORT_LEN_FLAG_MASK);
-}
-
-/**
- * dpaa2_fd_get_len() - Get the length in the frame descriptor
- * @fd: the given frame descriptor
- *
- * Return the length field in the frame descriptor.
- */
-static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd)
-{
- if (dpaa2_fd_short_len(fd))
- return le32_to_cpu(fd->simple.len) & FD_SHORT_LEN_MASK;
-
- return le32_to_cpu(fd->simple.len);
-}
-
-/**
- * dpaa2_fd_set_len() - Set the length field of frame descriptor
- * @fd: the given frame descriptor
- * @len: the length needs to be set in frame descriptor
- */
-static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len)
-{
- fd->simple.len = cpu_to_le32(len);
-}
-
-/**
- * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor
- * @fd: the given frame descriptor
- *
- * Return the offset.
- */
-static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd)
-{
- return le16_to_cpu(fd->simple.format_offset) & FD_OFFSET_MASK;
-}
-
-/**
- * dpaa2_fd_set_offset() - Set the offset field of frame descriptor
- * @fd: the given frame descriptor
- * @offset: the offset needs to be set in frame descriptor
- */
-static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset)
-{
- fd->simple.format_offset &= cpu_to_le16(~FD_OFFSET_MASK);
- fd->simple.format_offset |= cpu_to_le16(offset);
-}
-
-/**
- * dpaa2_fd_get_format() - Get the format field in the frame descriptor
- * @fd: the given frame descriptor
- *
- * Return the format.
- */
-static inline enum dpaa2_fd_format dpaa2_fd_get_format(
- const struct dpaa2_fd *fd)
-{
- return (enum dpaa2_fd_format)((le16_to_cpu(fd->simple.format_offset)
- >> FD_FORMAT_SHIFT) & FD_FORMAT_MASK);
-}
-
-/**
- * dpaa2_fd_set_format() - Set the format field of frame descriptor
- * @fd: the given frame descriptor
- * @format: the format needs to be set in frame descriptor
- */
-static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd,
- enum dpaa2_fd_format format)
-{
- fd->simple.format_offset &=
- cpu_to_le16(~(FD_FORMAT_MASK << FD_FORMAT_SHIFT));
- fd->simple.format_offset |= cpu_to_le16(format << FD_FORMAT_SHIFT);
-}
-
-/**
- * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor
- * @fd: the given frame descriptor
- *
- * Return the buffer pool id.
- */
-static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd)
-{
- return le16_to_cpu(fd->simple.bpid) & FD_BPID_MASK;
-}
-
-/**
- * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor
- * @fd: the given frame descriptor
- * @bpid: buffer pool id to be set
- */
-static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid)
-{
- fd->simple.bpid &= cpu_to_le16(~(FD_BPID_MASK));
- fd->simple.bpid |= cpu_to_le16(bpid);
-}
-
-/**
- * struct dpaa2_sg_entry - the scatter-gathering structure
- * @addr: address of the sg entry
- * @len: length in this sg entry
- * @bpid: buffer pool id
- * @format_offset: format and offset fields
- */
-struct dpaa2_sg_entry {
- __le64 addr;
- __le32 len;
- __le16 bpid;
- __le16 format_offset;
-};
-
-enum dpaa2_sg_format {
- dpaa2_sg_single = 0,
- dpaa2_sg_frame_data,
- dpaa2_sg_sgt_ext
-};
-
-/* Accessors for SG entry fields */
-
-/**
- * dpaa2_sg_get_addr() - Get the address from SG entry
- * @sg: the given scatter-gathering object
- *
- * Return the address.
- */
-static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg)
-{
- return (dma_addr_t)le64_to_cpu(sg->addr);
-}
-
-/**
- * dpaa2_sg_set_addr() - Set the address in SG entry
- * @sg: the given scatter-gathering object
- * @addr: the address to be set
- */
-static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr)
-{
- sg->addr = cpu_to_le64(addr);
-}
-
-static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg)
-{
- return !!((le16_to_cpu(sg->format_offset) >> SG_SHORT_LEN_FLAG_SHIFT)
- & SG_SHORT_LEN_FLAG_MASK);
-}
-
-/**
- * dpaa2_sg_get_len() - Get the length in SG entry
- * @sg: the given scatter-gathering object
- *
- * Return the length.
- */
-static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg)
-{
- if (dpaa2_sg_short_len(sg))
- return le32_to_cpu(sg->len) & SG_SHORT_LEN_MASK;
-
- return le32_to_cpu(sg->len);
-}
-
-/**
- * dpaa2_sg_set_len() - Set the length in SG entry
- * @sg: the given scatter-gathering object
- * @len: the length to be set
- */
-static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len)
-{
- sg->len = cpu_to_le32(len);
-}
-
-/**
- * dpaa2_sg_get_offset() - Get the offset in SG entry
- * @sg: the given scatter-gathering object
- *
- * Return the offset.
- */
-static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg)
-{
- return le16_to_cpu(sg->format_offset) & SG_OFFSET_MASK;
-}
-
-/**
- * dpaa2_sg_set_offset() - Set the offset in SG entry
- * @sg: the given scatter-gathering object
- * @offset: the offset to be set
- */
-static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg,
- u16 offset)
-{
- sg->format_offset &= cpu_to_le16(~SG_OFFSET_MASK);
- sg->format_offset |= cpu_to_le16(offset);
-}
-
-/**
- * dpaa2_sg_get_format() - Get the SG format in SG entry
- * @sg: the given scatter-gathering object
- *
- * Return the format.
- */
-static inline enum dpaa2_sg_format
- dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg)
-{
- return (enum dpaa2_sg_format)((le16_to_cpu(sg->format_offset)
- >> SG_FORMAT_SHIFT) & SG_FORMAT_MASK);
-}
-
-/**
- * dpaa2_sg_set_format() - Set the SG format in SG entry
- * @sg: the given scatter-gathering object
- * @format: the format to be set
- */
-static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg,
- enum dpaa2_sg_format format)
-{
- sg->format_offset &= cpu_to_le16(~(SG_FORMAT_MASK << SG_FORMAT_SHIFT));
- sg->format_offset |= cpu_to_le16(format << SG_FORMAT_SHIFT);
-}
-
-/**
- * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry
- * @sg: the given scatter-gathering object
- *
- * Return the bpid.
- */
-static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg)
-{
- return le16_to_cpu(sg->bpid) & SG_BPID_MASK;
-}
-
-/**
- * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry
- * @sg: the given scatter-gathering object
- * @bpid: the bpid to be set
- */
-static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid)
-{
- sg->bpid &= cpu_to_le16(~(SG_BPID_MASK));
- sg->bpid |= cpu_to_le16(bpid);
-}
-
-/**
- * dpaa2_sg_is_final() - Check final bit in SG entry
- * @sg: the given scatter-gathering object
- *
- * Return bool.
- */
-static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg)
-{
- return !!(le16_to_cpu(sg->format_offset) >> SG_FINAL_FLAG_SHIFT);
-}
-
-/**
- * dpaa2_sg_set_final() - Set the final bit in SG entry
- * @sg: the given scatter-gathering object
- * @final: the final boolean to be set
- */
-static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final)
-{
- sg->format_offset &= cpu_to_le16((~(SG_FINAL_FLAG_MASK
- << SG_FINAL_FLAG_SHIFT)) & 0xFFFF);
- sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT);
-}
-
-#endif /* __FSL_DPAA2_FD_H */
diff --git a/drivers/staging/fsl-mc/include/dpaa2-global.h b/drivers/staging/fsl-mc/include/dpaa2-global.h
deleted file mode 100644
index 9bc0713346a8..000000000000
--- a/drivers/staging/fsl-mc/include/dpaa2-global.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/*
- * Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
- *
- */
-#ifndef __FSL_DPAA2_GLOBAL_H
-#define __FSL_DPAA2_GLOBAL_H
-
-#include <linux/types.h>
-#include <linux/cpumask.h>
-#include "dpaa2-fd.h"
-
-struct dpaa2_dq {
- union {
- struct common {
- u8 verb;
- u8 reserved[63];
- } common;
- struct dq {
- u8 verb;
- u8 stat;
- __le16 seqnum;
- __le16 oprid;
- u8 reserved;
- u8 tok;
- __le32 fqid;
- u32 reserved2;
- __le32 fq_byte_cnt;
- __le32 fq_frm_cnt;
- __le64 fqd_ctx;
- u8 fd[32];
- } dq;
- struct scn {
- u8 verb;
- u8 stat;
- u8 state;
- u8 reserved;
- __le32 rid_tok;
- __le64 ctx;
- } scn;
- };
-};
-
-/* Parsing frame dequeue results */
-/* FQ empty */
-#define DPAA2_DQ_STAT_FQEMPTY 0x80
-/* FQ held active */
-#define DPAA2_DQ_STAT_HELDACTIVE 0x40
-/* FQ force eligible */
-#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20
-/* valid frame */
-#define DPAA2_DQ_STAT_VALIDFRAME 0x10
-/* FQ ODP enable */
-#define DPAA2_DQ_STAT_ODPVALID 0x04
-/* volatile dequeue */
-#define DPAA2_DQ_STAT_VOLATILE 0x02
-/* volatile dequeue command is expired */
-#define DPAA2_DQ_STAT_EXPIRED 0x01
-
-#define DQ_FQID_MASK 0x00FFFFFF
-#define DQ_FRAME_COUNT_MASK 0x00FFFFFF
-
-/**
- * dpaa2_dq_flags() - Get the stat field of dequeue response
- * @dq: the dequeue result.
- */
-static inline u32 dpaa2_dq_flags(const struct dpaa2_dq *dq)
-{
- return dq->dq.stat;
-}
-
-/**
- * dpaa2_dq_is_pull() - Check whether the dq response is from a pull
- * command.
- * @dq: the dequeue result
- *
- * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
- */
-static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq)
-{
- return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE);
-}
-
-/**
- * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed.
- * @dq: the dequeue result
- *
- * Return boolean.
- */
-static inline bool dpaa2_dq_is_pull_complete(const struct dpaa2_dq *dq)
-{
- return !!(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED);
-}
-
-/**
- * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response
- * @dq: the dequeue result
- *
- * seqnum is valid only if VALIDFRAME flag is TRUE
- *
- * Return seqnum.
- */
-static inline u16 dpaa2_dq_seqnum(const struct dpaa2_dq *dq)
-{
- return le16_to_cpu(dq->dq.seqnum);
-}
-
-/**
- * dpaa2_dq_odpid() - Get the odpid field in dequeue response
- * @dq: the dequeue result
- *
- * odpid is valid only if ODPVALID flag is TRUE.
- *
- * Return odpid.
- */
-static inline u16 dpaa2_dq_odpid(const struct dpaa2_dq *dq)
-{
- return le16_to_cpu(dq->dq.oprid);
-}
-
-/**
- * dpaa2_dq_fqid() - Get the fqid in dequeue response
- * @dq: the dequeue result
- *
- * Return fqid.
- */
-static inline u32 dpaa2_dq_fqid(const struct dpaa2_dq *dq)
-{
- return le32_to_cpu(dq->dq.fqid) & DQ_FQID_MASK;
-}
-
-/**
- * dpaa2_dq_byte_count() - Get the byte count in dequeue response
- * @dq: the dequeue result
- *
- * Return the byte count remaining in the FQ.
- */
-static inline u32 dpaa2_dq_byte_count(const struct dpaa2_dq *dq)
-{
- return le32_to_cpu(dq->dq.fq_byte_cnt);
-}
-
-/**
- * dpaa2_dq_frame_count() - Get the frame count in dequeue response
- * @dq: the dequeue result
- *
- * Return the frame count remaining in the FQ.
- */
-static inline u32 dpaa2_dq_frame_count(const struct dpaa2_dq *dq)
-{
- return le32_to_cpu(dq->dq.fq_frm_cnt) & DQ_FRAME_COUNT_MASK;
-}
-
-/**
- * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response
- * @dq: the dequeue result
- *
- * Return the frame queue context.
- */
-static inline u64 dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq)
-{
- return le64_to_cpu(dq->dq.fqd_ctx);
-}
-
-/**
- * dpaa2_dq_fd() - Get the frame descriptor in dequeue response
- * @dq: the dequeue result
- *
- * Return the frame descriptor.
- */
-static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq)
-{
- return (const struct dpaa2_fd *)&dq->dq.fd[0];
-}
-
-#endif /* __FSL_DPAA2_GLOBAL_H */
diff --git a/drivers/staging/fsl-mc/include/dpaa2-io.h b/drivers/staging/fsl-mc/include/dpaa2-io.h
deleted file mode 100644
index ab51e40d11db..000000000000
--- a/drivers/staging/fsl-mc/include/dpaa2-io.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/*
- * Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright NXP
- *
- */
-#ifndef __FSL_DPAA2_IO_H
-#define __FSL_DPAA2_IO_H
-
-#include <linux/types.h>
-#include <linux/cpumask.h>
-#include <linux/irqreturn.h>
-
-#include "dpaa2-fd.h"
-#include "dpaa2-global.h"
-
-struct dpaa2_io;
-struct dpaa2_io_store;
-struct device;
-
-/**
- * DOC: DPIO Service
- *
- * The DPIO service provides APIs for users to interact with the datapath
- * by enqueueing and dequeing frame descriptors.
- *
- * The following set of APIs can be used to enqueue and dequeue frames
- * as well as producing notification callbacks when data is available
- * for dequeue.
- */
-
-#define DPAA2_IO_ANY_CPU -1
-
-/**
- * struct dpaa2_io_desc - The DPIO descriptor
- * @receives_notifications: Use notificaton mode. Non-zero if the DPIO
- * has a channel.
- * @has_8prio: Set to non-zero for channel with 8 priority WQs. Ignored
- * unless receives_notification is TRUE.
- * @cpu: The cpu index that at least interrupt handlers will
- * execute on.
- * @stash_affinity: The stash affinity for this portal favour 'cpu'
- * @regs_cena: The cache enabled regs.
- * @regs_cinh: The cache inhibited regs
- * @dpio_id: The dpio index
- * @qman_version: The qman version
- *
- * Describes the attributes and features of the DPIO object.
- */
-struct dpaa2_io_desc {
- int receives_notifications;
- int has_8prio;
- int cpu;
- void *regs_cena;
- void __iomem *regs_cinh;
- int dpio_id;
- u32 qman_version;
-};
-
-struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
-
-void dpaa2_io_down(struct dpaa2_io *d);
-
-irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
-
-struct dpaa2_io *dpaa2_io_service_select(int cpu);
-
-/**
- * struct dpaa2_io_notification_ctx - The DPIO notification context structure
- * @cb: The callback to be invoked when the notification arrives
- * @is_cdan: Zero for FQDAN, non-zero for CDAN
- * @id: FQID or channel ID, needed for rearm
- * @desired_cpu: The cpu on which the notifications will show up. Use
- * DPAA2_IO_ANY_CPU if don't care
- * @dpio_id: The dpio index
- * @qman64: The 64-bit context value shows up in the FQDAN/CDAN.
- * @node: The list node
- * @dpio_private: The dpio object internal to dpio_service
- *
- * Used when a FQDAN/CDAN registration is made by drivers.
- */
-struct dpaa2_io_notification_ctx {
- void (*cb)(struct dpaa2_io_notification_ctx *ctx);
- int is_cdan;
- u32 id;
- int desired_cpu;
- int dpio_id;
- u64 qman64;
- struct list_head node;
- void *dpio_private;
-};
-
-int dpaa2_io_service_register(struct dpaa2_io *service,
- struct dpaa2_io_notification_ctx *ctx);
-void dpaa2_io_service_deregister(struct dpaa2_io *service,
- struct dpaa2_io_notification_ctx *ctx);
-int dpaa2_io_service_rearm(struct dpaa2_io *service,
- struct dpaa2_io_notification_ctx *ctx);
-
-int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
- struct dpaa2_io_store *s);
-
-int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
- u16 qdbin, const struct dpaa2_fd *fd);
-int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
- const u64 *buffers, unsigned int num_buffers);
-int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid,
- u64 *buffers, unsigned int num_buffers);
-
-struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
- struct device *dev);
-void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
-struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
-
-#endif /* __FSL_DPAA2_IO_H */
diff --git a/drivers/staging/gasket/Kconfig b/drivers/staging/gasket/Kconfig
new file mode 100644
index 000000000000..970e299046c3
--- /dev/null
+++ b/drivers/staging/gasket/Kconfig
@@ -0,0 +1,23 @@
+menu "Gasket devices"
+
+config STAGING_GASKET_FRAMEWORK
+ tristate "Gasket framework"
+ depends on PCI && (X86_64 || ARM64)
+ help
+ This framework supports Gasket-compatible devices, such as Apex.
+ It is required for any of the following module(s).
+
+ To compile this driver as a module, choose M here. The module
+ will be called "gasket".
+
+config STAGING_APEX_DRIVER
+ tristate "Apex Driver"
+ depends on STAGING_GASKET_FRAMEWORK
+ help
+ This driver supports the Apex device. Say Y if you want to
+ include this driver in the kernel.
+
+ To compile this driver as a module, choose M here. The module
+ will be called "apex".
+
+endmenu
diff --git a/drivers/staging/gasket/Makefile b/drivers/staging/gasket/Makefile
new file mode 100644
index 000000000000..cec813ece678
--- /dev/null
+++ b/drivers/staging/gasket/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for Gasket framework and dependent drivers.
+#
+
+obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket.o
+obj-$(CONFIG_STAGING_APEX_DRIVER) += apex.o
+
+gasket-objs := gasket_core.o gasket_ioctl.o gasket_interrupt.o gasket_page_table.o gasket_sysfs.o
+apex-objs := apex_driver.o
diff --git a/drivers/staging/gasket/TODO b/drivers/staging/gasket/TODO
new file mode 100644
index 000000000000..6ff8e01b04cc
--- /dev/null
+++ b/drivers/staging/gasket/TODO
@@ -0,0 +1,9 @@
+This is a list of things that need to be done to get this driver out of the
+staging directory.
+- Document sysfs files with Documentation/ABI/ entries.
+- Use misc interface instead of major number for driver version description.
+- Add descriptions of module_param's
+- apex_get_status() should actually check status.
+- "drivers" should never be dealing with "raw" sysfs calls or mess around with
+ kobjects at all. The driver core should handle all of this for you
+ automaically. There should not be a need for raw attribute macros.
diff --git a/drivers/staging/gasket/apex.h b/drivers/staging/gasket/apex.h
new file mode 100644
index 000000000000..3bbceffff5e4
--- /dev/null
+++ b/drivers/staging/gasket/apex.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Apex kernel-userspace interface definitions.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+#ifndef __APEX_H__
+#define __APEX_H__
+
+#include <linux/ioctl.h>
+
+/* Clock Gating ioctl. */
+struct apex_gate_clock_ioctl {
+ /* Enter or leave clock gated state. */
+ u64 enable;
+
+ /* If set, enter clock gating state, regardless of custom block's
+ * internal idle state
+ */
+ u64 force_idle;
+};
+
+/* Base number for all Apex-common IOCTLs */
+#define APEX_IOCTL_BASE 0x7F
+
+/* Enable/Disable clock gating. */
+#define APEX_IOCTL_GATE_CLOCK \
+ _IOW(APEX_IOCTL_BASE, 0, struct apex_gate_clock_ioctl)
+
+#endif /* __APEX_H__ */
diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
new file mode 100644
index 000000000000..c747e9ca4518
--- /dev/null
+++ b/drivers/staging/gasket/apex_driver.c
@@ -0,0 +1,741 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Apex chip.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+
+#include "apex.h"
+
+#include "gasket_core.h"
+#include "gasket_interrupt.h"
+#include "gasket_page_table.h"
+#include "gasket_sysfs.h"
+
+/* Constants */
+#define APEX_DEVICE_NAME "Apex"
+#define APEX_DRIVER_VERSION "1.0"
+
+/* CSRs are in BAR 2. */
+#define APEX_BAR_INDEX 2
+
+#define APEX_PCI_VENDOR_ID 0x1ac1
+#define APEX_PCI_DEVICE_ID 0x089a
+
+/* Bar Offsets. */
+#define APEX_BAR_OFFSET 0
+#define APEX_CM_OFFSET 0x1000000
+
+/* The sizes of each Apex BAR 2. */
+#define APEX_BAR_BYTES 0x100000
+#define APEX_CH_MEM_BYTES (PAGE_SIZE * MAX_NUM_COHERENT_PAGES)
+
+/* The number of user-mappable memory ranges in BAR2 of a Apex chip. */
+#define NUM_REGIONS 3
+
+/* The number of nodes in a Apex chip. */
+#define NUM_NODES 1
+
+/*
+ * The total number of entries in the page table. Should match the value read
+ * from the register APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_SIZE.
+ */
+#define APEX_PAGE_TABLE_TOTAL_ENTRIES 8192
+
+#define APEX_EXTENDED_SHIFT 63 /* Extended address bit position. */
+
+/* Check reset 120 times */
+#define APEX_RESET_RETRY 120
+/* Wait 100 ms between checks. Total 12 sec wait maximum. */
+#define APEX_RESET_DELAY 100
+
+/* Enumeration of the supported sysfs entries. */
+enum sysfs_attribute_type {
+ ATTR_KERNEL_HIB_PAGE_TABLE_SIZE,
+ ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE,
+ ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES,
+};
+
+/*
+ * Register offsets into BAR2 memory.
+ * Only values necessary for driver implementation are defined.
+ */
+enum apex_bar2_regs {
+ APEX_BAR2_REG_SCU_BASE = 0x1A300,
+ APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_SIZE = 0x46000,
+ APEX_BAR2_REG_KERNEL_HIB_EXTENDED_TABLE = 0x46008,
+ APEX_BAR2_REG_KERNEL_HIB_TRANSLATION_ENABLE = 0x46010,
+ APEX_BAR2_REG_KERNEL_HIB_INSTR_QUEUE_INTVECCTL = 0x46018,
+ APEX_BAR2_REG_KERNEL_HIB_INPUT_ACTV_QUEUE_INTVECCTL = 0x46020,
+ APEX_BAR2_REG_KERNEL_HIB_PARAM_QUEUE_INTVECCTL = 0x46028,
+ APEX_BAR2_REG_KERNEL_HIB_OUTPUT_ACTV_QUEUE_INTVECCTL = 0x46030,
+ APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL = 0x46038,
+ APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL = 0x46040,
+ APEX_BAR2_REG_KERNEL_HIB_FATAL_ERR_INTVECCTL = 0x46048,
+ APEX_BAR2_REG_KERNEL_HIB_DMA_PAUSE = 0x46050,
+ APEX_BAR2_REG_KERNEL_HIB_DMA_PAUSE_MASK = 0x46058,
+ APEX_BAR2_REG_KERNEL_HIB_STATUS_BLOCK_DELAY = 0x46060,
+ APEX_BAR2_REG_KERNEL_HIB_MSIX_PENDING_BIT_ARRAY0 = 0x46068,
+ APEX_BAR2_REG_KERNEL_HIB_MSIX_PENDING_BIT_ARRAY1 = 0x46070,
+ APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_INIT = 0x46078,
+ APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE_INIT = 0x46080,
+ APEX_BAR2_REG_KERNEL_WIRE_INT_PENDING_BIT_ARRAY = 0x48778,
+ APEX_BAR2_REG_KERNEL_WIRE_INT_MASK_ARRAY = 0x48780,
+ APEX_BAR2_REG_USER_HIB_DMA_PAUSE = 0x486D8,
+ APEX_BAR2_REG_USER_HIB_DMA_PAUSED = 0x486E0,
+ APEX_BAR2_REG_IDLEGENERATOR_IDLEGEN_IDLEREGISTER = 0x4A000,
+ APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE = 0x50000,
+
+ /* Error registers - Used mostly for debug */
+ APEX_BAR2_REG_USER_HIB_ERROR_STATUS = 0x86f0,
+ APEX_BAR2_REG_SCALAR_CORE_ERROR_STATUS = 0x41a0,
+};
+
+/* Addresses for packed registers. */
+#define APEX_BAR2_REG_AXI_QUIESCE (APEX_BAR2_REG_SCU_BASE + 0x2C)
+#define APEX_BAR2_REG_GCB_CLOCK_GATE (APEX_BAR2_REG_SCU_BASE + 0x14)
+#define APEX_BAR2_REG_SCU_0 (APEX_BAR2_REG_SCU_BASE + 0xc)
+#define APEX_BAR2_REG_SCU_1 (APEX_BAR2_REG_SCU_BASE + 0x10)
+#define APEX_BAR2_REG_SCU_2 (APEX_BAR2_REG_SCU_BASE + 0x14)
+#define APEX_BAR2_REG_SCU_3 (APEX_BAR2_REG_SCU_BASE + 0x18)
+#define APEX_BAR2_REG_SCU_4 (APEX_BAR2_REG_SCU_BASE + 0x1c)
+#define APEX_BAR2_REG_SCU_5 (APEX_BAR2_REG_SCU_BASE + 0x20)
+
+#define SCU3_RG_PWR_STATE_OVR_BIT_OFFSET 26
+#define SCU3_RG_PWR_STATE_OVR_MASK_WIDTH 2
+#define SCU3_CUR_RST_GCB_BIT_MASK 0x10
+#define SCU2_RG_RST_GCB_BIT_MASK 0xc
+
+/* Configuration for page table. */
+static struct gasket_page_table_config apex_page_table_configs[NUM_NODES] = {
+ {
+ .id = 0,
+ .mode = GASKET_PAGE_TABLE_MODE_NORMAL,
+ .total_entries = APEX_PAGE_TABLE_TOTAL_ENTRIES,
+ .base_reg = APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE,
+ .extended_reg = APEX_BAR2_REG_KERNEL_HIB_EXTENDED_TABLE,
+ .extended_bit = APEX_EXTENDED_SHIFT,
+ },
+};
+
+/* The regions in the BAR2 space that can be mapped into user space. */
+static const struct gasket_mappable_region mappable_regions[NUM_REGIONS] = {
+ { 0x40000, 0x1000 },
+ { 0x44000, 0x1000 },
+ { 0x48000, 0x1000 },
+};
+
+static const struct gasket_mappable_region cm_mappable_regions[1] = { { 0x0,
+ APEX_CH_MEM_BYTES } };
+
+/* Gasket device interrupts enums must be dense (i.e., no empty slots). */
+enum apex_interrupt {
+ APEX_INTERRUPT_INSTR_QUEUE = 0,
+ APEX_INTERRUPT_INPUT_ACTV_QUEUE = 1,
+ APEX_INTERRUPT_PARAM_QUEUE = 2,
+ APEX_INTERRUPT_OUTPUT_ACTV_QUEUE = 3,
+ APEX_INTERRUPT_SC_HOST_0 = 4,
+ APEX_INTERRUPT_SC_HOST_1 = 5,
+ APEX_INTERRUPT_SC_HOST_2 = 6,
+ APEX_INTERRUPT_SC_HOST_3 = 7,
+ APEX_INTERRUPT_TOP_LEVEL_0 = 8,
+ APEX_INTERRUPT_TOP_LEVEL_1 = 9,
+ APEX_INTERRUPT_TOP_LEVEL_2 = 10,
+ APEX_INTERRUPT_TOP_LEVEL_3 = 11,
+ APEX_INTERRUPT_FATAL_ERR = 12,
+ APEX_INTERRUPT_COUNT = 13,
+};
+
+/* Interrupt descriptors for Apex */
+static struct gasket_interrupt_desc apex_interrupts[] = {
+ {
+ APEX_INTERRUPT_INSTR_QUEUE,
+ APEX_BAR2_REG_KERNEL_HIB_INSTR_QUEUE_INTVECCTL,
+ UNPACKED,
+ },
+ {
+ APEX_INTERRUPT_INPUT_ACTV_QUEUE,
+ APEX_BAR2_REG_KERNEL_HIB_INPUT_ACTV_QUEUE_INTVECCTL,
+ UNPACKED
+ },
+ {
+ APEX_INTERRUPT_PARAM_QUEUE,
+ APEX_BAR2_REG_KERNEL_HIB_PARAM_QUEUE_INTVECCTL,
+ UNPACKED
+ },
+ {
+ APEX_INTERRUPT_OUTPUT_ACTV_QUEUE,
+ APEX_BAR2_REG_KERNEL_HIB_OUTPUT_ACTV_QUEUE_INTVECCTL,
+ UNPACKED
+ },
+ {
+ APEX_INTERRUPT_SC_HOST_0,
+ APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
+ PACK_0
+ },
+ {
+ APEX_INTERRUPT_SC_HOST_1,
+ APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
+ PACK_1
+ },
+ {
+ APEX_INTERRUPT_SC_HOST_2,
+ APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
+ PACK_2
+ },
+ {
+ APEX_INTERRUPT_SC_HOST_3,
+ APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
+ PACK_3
+ },
+ {
+ APEX_INTERRUPT_TOP_LEVEL_0,
+ APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
+ PACK_0
+ },
+ {
+ APEX_INTERRUPT_TOP_LEVEL_1,
+ APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
+ PACK_1
+ },
+ {
+ APEX_INTERRUPT_TOP_LEVEL_2,
+ APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
+ PACK_2
+ },
+ {
+ APEX_INTERRUPT_TOP_LEVEL_3,
+ APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
+ PACK_3
+ },
+ {
+ APEX_INTERRUPT_FATAL_ERR,
+ APEX_BAR2_REG_KERNEL_HIB_FATAL_ERR_INTVECCTL,
+ UNPACKED
+ },
+};
+
+
+/* Allows device to enter power save upon driver close(). */
+static int allow_power_save = 1;
+
+/* Allows SW based clock gating. */
+static int allow_sw_clock_gating;
+
+/* Allows HW based clock gating. */
+/* Note: this is not mutual exclusive with SW clock gating. */
+static int allow_hw_clock_gating = 1;
+
+/* Act as if only GCB is instantiated. */
+static int bypass_top_level;
+
+module_param(allow_power_save, int, 0644);
+module_param(allow_sw_clock_gating, int, 0644);
+module_param(allow_hw_clock_gating, int, 0644);
+module_param(bypass_top_level, int, 0644);
+
+/* Check the device status registers and return device status ALIVE or DEAD. */
+static int apex_get_status(struct gasket_dev *gasket_dev)
+{
+ /* TODO: Check device status. */
+ return GASKET_STATUS_ALIVE;
+}
+
+/* Enter GCB reset state. */
+static int apex_enter_reset(struct gasket_dev *gasket_dev)
+{
+ if (bypass_top_level)
+ return 0;
+
+ /*
+ * Software reset:
+ * Enable sleep mode
+ * - Software force GCB idle
+ * - Enable GCB idle
+ */
+ gasket_read_modify_write_64(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_IDLEGENERATOR_IDLEGEN_IDLEREGISTER,
+ 0x0, 1, 32);
+
+ /* - Initiate DMA pause */
+ gasket_dev_write_64(gasket_dev, 1, APEX_BAR_INDEX,
+ APEX_BAR2_REG_USER_HIB_DMA_PAUSE);
+
+ /* - Wait for DMA pause complete. */
+ if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_USER_HIB_DMA_PAUSED, 1, 1,
+ APEX_RESET_DELAY, APEX_RESET_RETRY)) {
+ dev_err(gasket_dev->dev,
+ "DMAs did not quiesce within timeout (%d ms)\n",
+ APEX_RESET_RETRY * APEX_RESET_DELAY);
+ return -ETIMEDOUT;
+ }
+
+ /* - Enable GCB reset (0x1 to rg_rst_gcb) */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_2, 0x1, 2, 2);
+
+ /* - Enable GCB clock Gate (0x1 to rg_gated_gcb) */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_2, 0x1, 2, 18);
+
+ /* - Enable GCB memory shut down (0x3 to rg_force_ram_sd) */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3, 0x3, 2, 14);
+
+ /* - Wait for RAM shutdown. */
+ if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3, 1 << 6, 1 << 6,
+ APEX_RESET_DELAY, APEX_RESET_RETRY)) {
+ dev_err(gasket_dev->dev,
+ "RAM did not shut down within timeout (%d ms)\n",
+ APEX_RESET_RETRY * APEX_RESET_DELAY);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* Quit GCB reset state. */
+static int apex_quit_reset(struct gasket_dev *gasket_dev)
+{
+ u32 val0, val1;
+
+ if (bypass_top_level)
+ return 0;
+
+ /*
+ * Disable sleep mode:
+ * - Disable GCB memory shut down:
+ * - b00: Not forced (HW controlled)
+ * - b1x: Force disable
+ */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3, 0x0, 2, 14);
+
+ /*
+ * - Disable software clock gate:
+ * - b00: Not forced (HW controlled)
+ * - b1x: Force disable
+ */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_2, 0x0, 2, 18);
+
+ /*
+ * - Disable GCB reset (rg_rst_gcb):
+ * - b00: Not forced (HW controlled)
+ * - b1x: Force disable = Force not Reset
+ */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_2, 0x2, 2, 2);
+
+ /* - Wait for RAM enable. */
+ if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3, 1 << 6, 0,
+ APEX_RESET_DELAY, APEX_RESET_RETRY)) {
+ dev_err(gasket_dev->dev,
+ "RAM did not enable within timeout (%d ms)\n",
+ APEX_RESET_RETRY * APEX_RESET_DELAY);
+ return -ETIMEDOUT;
+ }
+
+ /* - Wait for Reset complete. */
+ if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3,
+ SCU3_CUR_RST_GCB_BIT_MASK, 0,
+ APEX_RESET_DELAY, APEX_RESET_RETRY)) {
+ dev_err(gasket_dev->dev,
+ "GCB did not leave reset within timeout (%d ms)\n",
+ APEX_RESET_RETRY * APEX_RESET_DELAY);
+ return -ETIMEDOUT;
+ }
+
+ if (!allow_hw_clock_gating) {
+ val0 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3);
+ /* Inactive and Sleep mode are disabled. */
+ gasket_read_modify_write_32(gasket_dev,
+ APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3, 0x3,
+ SCU3_RG_PWR_STATE_OVR_MASK_WIDTH,
+ SCU3_RG_PWR_STATE_OVR_BIT_OFFSET);
+ val1 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3);
+ dev_dbg(gasket_dev->dev,
+ "Disallow HW clock gating 0x%x -> 0x%x\n", val0, val1);
+ } else {
+ val0 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3);
+ /* Inactive mode enabled - Sleep mode disabled. */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3, 2,
+ SCU3_RG_PWR_STATE_OVR_MASK_WIDTH,
+ SCU3_RG_PWR_STATE_OVR_BIT_OFFSET);
+ val1 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3);
+ dev_dbg(gasket_dev->dev, "Allow HW clock gating 0x%x -> 0x%x\n",
+ val0, val1);
+ }
+
+ return 0;
+}
+
+/* Reset the Apex hardware. Called on final close via device_close_cb. */
+static int apex_device_cleanup(struct gasket_dev *gasket_dev)
+{
+ u64 scalar_error;
+ u64 hib_error;
+ int ret = 0;
+
+ hib_error = gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_USER_HIB_ERROR_STATUS);
+ scalar_error = gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCALAR_CORE_ERROR_STATUS);
+
+ dev_dbg(gasket_dev->dev,
+ "%s 0x%p hib_error 0x%llx scalar_error 0x%llx\n",
+ __func__, gasket_dev, hib_error, scalar_error);
+
+ if (allow_power_save)
+ ret = apex_enter_reset(gasket_dev);
+
+ return ret;
+}
+
+/* Determine if GCB is in reset state. */
+static bool is_gcb_in_reset(struct gasket_dev *gasket_dev)
+{
+ u32 val = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3);
+
+ /* Masks rg_rst_gcb bit of SCU_CTRL_2 */
+ return (val & SCU3_CUR_RST_GCB_BIT_MASK);
+}
+
+/* Reset the hardware, then quit reset. Called on device open. */
+static int apex_reset(struct gasket_dev *gasket_dev)
+{
+ int ret;
+
+ if (bypass_top_level)
+ return 0;
+
+ if (!is_gcb_in_reset(gasket_dev)) {
+ /* We are not in reset - toggle the reset bit so as to force
+ * re-init of custom block
+ */
+ dev_dbg(gasket_dev->dev, "%s: toggle reset\n", __func__);
+
+ ret = apex_enter_reset(gasket_dev);
+ if (ret)
+ return ret;
+ }
+ ret = apex_quit_reset(gasket_dev);
+
+ return ret;
+}
+
+/*
+ * Check permissions for Apex ioctls.
+ * Returns true if the current user may execute this ioctl, and false otherwise.
+ */
+static bool apex_ioctl_check_permissions(struct file *filp, uint cmd)
+{
+ return !!(filp->f_mode & FMODE_WRITE);
+}
+
+/* Gates or un-gates Apex clock. */
+static long apex_clock_gating(struct gasket_dev *gasket_dev,
+ struct apex_gate_clock_ioctl __user *argp)
+{
+ struct apex_gate_clock_ioctl ibuf;
+
+ if (bypass_top_level || !allow_sw_clock_gating)
+ return 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ dev_dbg(gasket_dev->dev, "%s %llu\n", __func__, ibuf.enable);
+
+ if (ibuf.enable) {
+ /* Quiesce AXI, gate GCB clock. */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_AXI_QUIESCE, 0x1, 1,
+ 16);
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_GCB_CLOCK_GATE, 0x1,
+ 2, 18);
+ } else {
+ /* Un-gate GCB clock, un-quiesce AXI. */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_GCB_CLOCK_GATE, 0x0,
+ 2, 18);
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_AXI_QUIESCE, 0x0, 1,
+ 16);
+ }
+ return 0;
+}
+
+/* Apex-specific ioctl handler. */
+static long apex_ioctl(struct file *filp, uint cmd, void __user *argp)
+{
+ struct gasket_dev *gasket_dev = filp->private_data;
+
+ if (!apex_ioctl_check_permissions(filp, cmd))
+ return -EPERM;
+
+ switch (cmd) {
+ case APEX_IOCTL_GATE_CLOCK:
+ return apex_clock_gating(gasket_dev, argp);
+ default:
+ return -ENOTTY; /* unknown command */
+ }
+}
+
+/* Display driver sysfs entries. */
+static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct gasket_dev *gasket_dev;
+ struct gasket_sysfs_attribute *gasket_attr;
+ enum sysfs_attribute_type type;
+
+ gasket_dev = gasket_sysfs_get_device_data(device);
+ if (!gasket_dev) {
+ dev_err(device, "No Apex device sysfs mapping found\n");
+ return -ENODEV;
+ }
+
+ gasket_attr = gasket_sysfs_get_attr(device, attr);
+ if (!gasket_attr) {
+ dev_err(device, "No Apex device sysfs attr data found\n");
+ gasket_sysfs_put_device_data(device, gasket_dev);
+ return -ENODEV;
+ }
+
+ type = (enum sysfs_attribute_type)gasket_sysfs_get_attr(device, attr);
+ switch (type) {
+ case ATTR_KERNEL_HIB_PAGE_TABLE_SIZE:
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+ gasket_page_table_num_entries(
+ gasket_dev->page_table[0]));
+ break;
+ case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+ gasket_page_table_num_entries(
+ gasket_dev->page_table[0]));
+ break;
+ case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+ gasket_page_table_num_active_pages(
+ gasket_dev->page_table[0]));
+ break;
+ default:
+ dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
+ attr->attr.name);
+ ret = 0;
+ break;
+ }
+
+ gasket_sysfs_put_attr(device, gasket_attr);
+ gasket_sysfs_put_device_data(device, gasket_dev);
+ return ret;
+}
+
+static struct gasket_sysfs_attribute apex_sysfs_attrs[] = {
+ GASKET_SYSFS_RO(node_0_page_table_entries, sysfs_show,
+ ATTR_KERNEL_HIB_PAGE_TABLE_SIZE),
+ GASKET_SYSFS_RO(node_0_simple_page_table_entries, sysfs_show,
+ ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE),
+ GASKET_SYSFS_RO(node_0_num_mapped_pages, sysfs_show,
+ ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES),
+ GASKET_END_OF_ATTR_ARRAY
+};
+
+/* On device open, perform a core reinit reset. */
+static int apex_device_open_cb(struct gasket_dev *gasket_dev)
+{
+ return gasket_reset_nolock(gasket_dev);
+}
+
+static const struct pci_device_id apex_pci_ids[] = {
+ { PCI_DEVICE(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID) }, { 0 }
+};
+
+static void apex_pci_fixup_class(struct pci_dev *pdev)
+{
+ pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID,
+ PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
+
+static int apex_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ int ret;
+ ulong page_table_ready, msix_table_ready;
+ int retries = 0;
+ struct gasket_dev *gasket_dev;
+
+ ret = pci_enable_device(pci_dev);
+ if (ret) {
+ dev_err(&pci_dev->dev, "error enabling PCI device\n");
+ return ret;
+ }
+
+ pci_set_master(pci_dev);
+
+ ret = gasket_pci_add_device(pci_dev, &gasket_dev);
+ if (ret) {
+ dev_err(&pci_dev->dev, "error adding gasket device\n");
+ pci_disable_device(pci_dev);
+ return ret;
+ }
+
+ pci_set_drvdata(pci_dev, gasket_dev);
+ apex_reset(gasket_dev);
+
+ while (retries < APEX_RESET_RETRY) {
+ page_table_ready =
+ gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_INIT);
+ msix_table_ready =
+ gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE_INIT);
+ if (page_table_ready && msix_table_ready)
+ break;
+ schedule_timeout(msecs_to_jiffies(APEX_RESET_DELAY));
+ retries++;
+ }
+
+ if (retries == APEX_RESET_RETRY) {
+ if (!page_table_ready)
+ dev_err(gasket_dev->dev, "Page table init timed out\n");
+ if (!msix_table_ready)
+ dev_err(gasket_dev->dev, "MSI-X table init timed out\n");
+ ret = -ETIMEDOUT;
+ goto remove_device;
+ }
+
+ ret = gasket_sysfs_create_entries(gasket_dev->dev_info.device,
+ apex_sysfs_attrs);
+ if (ret)
+ dev_err(&pci_dev->dev, "error creating device sysfs entries\n");
+
+ ret = gasket_enable_device(gasket_dev);
+ if (ret) {
+ dev_err(&pci_dev->dev, "error enabling gasket device\n");
+ goto remove_device;
+ }
+
+ /* Place device in low power mode until opened */
+ if (allow_power_save)
+ apex_enter_reset(gasket_dev);
+
+ return 0;
+
+remove_device:
+ gasket_pci_remove_device(pci_dev);
+ pci_disable_device(pci_dev);
+ return ret;
+}
+
+static void apex_pci_remove(struct pci_dev *pci_dev)
+{
+ struct gasket_dev *gasket_dev = pci_get_drvdata(pci_dev);
+
+ gasket_disable_device(gasket_dev);
+ gasket_pci_remove_device(pci_dev);
+ pci_disable_device(pci_dev);
+}
+
+static struct gasket_driver_desc apex_desc = {
+ .name = "apex",
+ .driver_version = APEX_DRIVER_VERSION,
+ .major = 120,
+ .minor = 0,
+ .module = THIS_MODULE,
+ .pci_id_table = apex_pci_ids,
+
+ .num_page_tables = NUM_NODES,
+ .page_table_bar_index = APEX_BAR_INDEX,
+ .page_table_configs = apex_page_table_configs,
+ .page_table_extended_bit = APEX_EXTENDED_SHIFT,
+
+ .bar_descriptions = {
+ GASKET_UNUSED_BAR,
+ GASKET_UNUSED_BAR,
+ { APEX_BAR_BYTES, (VM_WRITE | VM_READ), APEX_BAR_OFFSET,
+ NUM_REGIONS, mappable_regions, PCI_BAR },
+ GASKET_UNUSED_BAR,
+ GASKET_UNUSED_BAR,
+ GASKET_UNUSED_BAR,
+ },
+ .coherent_buffer_description = {
+ APEX_CH_MEM_BYTES,
+ (VM_WRITE | VM_READ),
+ APEX_CM_OFFSET,
+ },
+ .interrupt_type = PCI_MSIX,
+ .interrupt_bar_index = APEX_BAR_INDEX,
+ .num_interrupts = APEX_INTERRUPT_COUNT,
+ .interrupts = apex_interrupts,
+ .interrupt_pack_width = 7,
+
+ .device_open_cb = apex_device_open_cb,
+ .device_close_cb = apex_device_cleanup,
+
+ .ioctl_handler_cb = apex_ioctl,
+ .device_status_cb = apex_get_status,
+ .hardware_revision_cb = NULL,
+ .device_reset_cb = apex_reset,
+};
+
+static struct pci_driver apex_pci_driver = {
+ .name = "apex",
+ .probe = apex_pci_probe,
+ .remove = apex_pci_remove,
+ .id_table = apex_pci_ids,
+};
+
+static int __init apex_init(void)
+{
+ int ret;
+
+ ret = gasket_register_device(&apex_desc);
+ if (ret)
+ return ret;
+ ret = pci_register_driver(&apex_pci_driver);
+ if (ret)
+ gasket_unregister_device(&apex_desc);
+ return ret;
+}
+
+static void apex_exit(void)
+{
+ pci_unregister_driver(&apex_pci_driver);
+ gasket_unregister_device(&apex_desc);
+}
+MODULE_DESCRIPTION("Google Apex driver");
+MODULE_VERSION(APEX_DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("John Joseph <jnjoseph@google.com>");
+MODULE_DEVICE_TABLE(pci, apex_pci_ids);
+module_init(apex_init);
+module_exit(apex_exit);
diff --git a/drivers/staging/gasket/gasket.h b/drivers/staging/gasket/gasket.h
new file mode 100644
index 000000000000..a0f065c517a5
--- /dev/null
+++ b/drivers/staging/gasket/gasket.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common Gasket device kernel and user space declarations.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+#ifndef __GASKET_H__
+#define __GASKET_H__
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* ioctl structure declarations */
+
+/* Ioctl structures are padded to a multiple of 64 bits */
+/* and padded to put 64 bit values on 64 bit boundaries. */
+/* Unsigned 64 bit integers are used to hold pointers. */
+/* This helps compatibility between 32 and 64 bits. */
+
+/*
+ * Common structure for ioctls associating an eventfd with a device interrupt,
+ * when using the Gasket interrupt module.
+ */
+struct gasket_interrupt_eventfd {
+ u64 interrupt;
+ u64 event_fd;
+};
+
+/*
+ * Common structure for ioctls mapping and unmapping buffers when using the
+ * Gasket page_table module.
+ */
+struct gasket_page_table_ioctl {
+ u64 page_table_index;
+ u64 size;
+ u64 host_address;
+ u64 device_address;
+};
+
+/*
+ * Common structure for ioctls mapping and unmapping buffers when using the
+ * Gasket page_table module.
+ * dma_address: phys addr start of coherent memory, allocated by kernel
+ */
+struct gasket_coherent_alloc_config_ioctl {
+ u64 page_table_index;
+ u64 enable;
+ u64 size;
+ u64 dma_address;
+};
+
+/* Base number for all Gasket-common IOCTLs */
+#define GASKET_IOCTL_BASE 0xDC
+
+/* Reset the device. */
+#define GASKET_IOCTL_RESET _IO(GASKET_IOCTL_BASE, 0)
+
+/* Associate the specified [event]fd with the specified interrupt. */
+#define GASKET_IOCTL_SET_EVENTFD \
+ _IOW(GASKET_IOCTL_BASE, 1, struct gasket_interrupt_eventfd)
+
+/*
+ * Clears any eventfd associated with the specified interrupt. The (ulong)
+ * argument is the interrupt number to clear.
+ */
+#define GASKET_IOCTL_CLEAR_EVENTFD _IOW(GASKET_IOCTL_BASE, 2, unsigned long)
+
+/*
+ * [Loopbacks only] Requests that the loopback device send the specified
+ * interrupt to the host. The (ulong) argument is the number of the interrupt to
+ * send.
+ */
+#define GASKET_IOCTL_LOOPBACK_INTERRUPT \
+ _IOW(GASKET_IOCTL_BASE, 3, unsigned long)
+
+/* Queries the kernel for the number of page tables supported by the device. */
+#define GASKET_IOCTL_NUMBER_PAGE_TABLES _IOR(GASKET_IOCTL_BASE, 4, u64)
+
+/*
+ * Queries the kernel for the maximum size of the page table. Only the size and
+ * page_table_index fields are used from the struct gasket_page_table_ioctl.
+ */
+#define GASKET_IOCTL_PAGE_TABLE_SIZE \
+ _IOWR(GASKET_IOCTL_BASE, 5, struct gasket_page_table_ioctl)
+
+/*
+ * Queries the kernel for the current simple page table size. Only the size and
+ * page_table_index fields are used from the struct gasket_page_table_ioctl.
+ */
+#define GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE \
+ _IOWR(GASKET_IOCTL_BASE, 6, struct gasket_page_table_ioctl)
+
+/*
+ * Tells the kernel to change the split between the number of simple and
+ * extended entries in the given page table. Only the size and page_table_index
+ * fields are used from the struct gasket_page_table_ioctl.
+ */
+#define GASKET_IOCTL_PARTITION_PAGE_TABLE \
+ _IOW(GASKET_IOCTL_BASE, 7, struct gasket_page_table_ioctl)
+
+/*
+ * Tells the kernel to map size bytes at host_address to device_address in
+ * page_table_index page table.
+ */
+#define GASKET_IOCTL_MAP_BUFFER \
+ _IOW(GASKET_IOCTL_BASE, 8, struct gasket_page_table_ioctl)
+
+/*
+ * Tells the kernel to unmap size bytes at host_address from device_address in
+ * page_table_index page table.
+ */
+#define GASKET_IOCTL_UNMAP_BUFFER \
+ _IOW(GASKET_IOCTL_BASE, 9, struct gasket_page_table_ioctl)
+
+/* Clear the interrupt counts stored for this device. */
+#define GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS _IO(GASKET_IOCTL_BASE, 10)
+
+/* Enable/Disable and configure the coherent allocator. */
+#define GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR \
+ _IOWR(GASKET_IOCTL_BASE, 11, struct gasket_coherent_alloc_config_ioctl)
+
+#endif /* __GASKET_H__ */
diff --git a/drivers/staging/gasket/gasket_constants.h b/drivers/staging/gasket/gasket_constants.h
new file mode 100644
index 000000000000..50d87c7b178c
--- /dev/null
+++ b/drivers/staging/gasket/gasket_constants.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Google, Inc. */
+#ifndef __GASKET_CONSTANTS_H__
+#define __GASKET_CONSTANTS_H__
+
+#define GASKET_FRAMEWORK_VERSION "1.1.2"
+
+/*
+ * The maximum number of simultaneous device types supported by the framework.
+ */
+#define GASKET_FRAMEWORK_DESC_MAX 2
+
+/* The maximum devices per each type. */
+#define GASKET_DEV_MAX 256
+
+/* The number of supported (and possible) PCI BARs. */
+#define GASKET_NUM_BARS 6
+
+/* The number of supported Gasket page tables per device. */
+#define GASKET_MAX_NUM_PAGE_TABLES 1
+
+/* Maximum length of device names (driver name + minor number suffix + NULL). */
+#define GASKET_NAME_MAX 32
+
+/* Device status enumeration. */
+enum gasket_status {
+ /*
+ * A device is DEAD if it has not been initialized or has had an error.
+ */
+ GASKET_STATUS_DEAD = 0,
+ /*
+ * A device is LAMED if the hardware is healthy but the kernel was
+ * unable to enable some functionality (e.g. interrupts).
+ */
+ GASKET_STATUS_LAMED,
+
+ /* A device is ALIVE if it is ready for operation. */
+ GASKET_STATUS_ALIVE,
+
+ /*
+ * This status is set when the driver is exiting and waiting for all
+ * handles to be closed.
+ */
+ GASKET_STATUS_DRIVER_EXIT,
+};
+
+#endif
diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
new file mode 100644
index 000000000000..d12ab560411f
--- /dev/null
+++ b/drivers/staging/gasket/gasket_core.c
@@ -0,0 +1,1816 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Gasket generic driver framework. This file contains the implementation
+ * for the Gasket generic driver framework - the functionality that is common
+ * across Gasket devices.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "gasket_core.h"
+
+#include "gasket_interrupt.h"
+#include "gasket_ioctl.h"
+#include "gasket_page_table.h"
+#include "gasket_sysfs.h"
+
+#include <linux/capability.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pid_namespace.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+
+#ifdef GASKET_KERNEL_TRACE_SUPPORT
+#define CREATE_TRACE_POINTS
+#include <trace/events/gasket_mmap.h>
+#else
+#define trace_gasket_mmap_exit(x)
+#define trace_gasket_mmap_entry(x, ...)
+#endif
+
+/*
+ * "Private" members of gasket_driver_desc.
+ *
+ * Contains internal per-device type tracking data, i.e., data not appropriate
+ * as part of the public interface for the generic framework.
+ */
+struct gasket_internal_desc {
+ /* Device-specific-driver-provided configuration information. */
+ const struct gasket_driver_desc *driver_desc;
+
+ /* Protects access to per-driver data (i.e. this structure). */
+ struct mutex mutex;
+
+ /* Kernel-internal device class. */
+ struct class *class;
+
+ /* Instantiated / present devices of this type. */
+ struct gasket_dev *devs[GASKET_DEV_MAX];
+};
+
+/* do_map_region() needs be able to return more than just true/false. */
+enum do_map_region_status {
+ /* The region was successfully mapped. */
+ DO_MAP_REGION_SUCCESS,
+
+ /* Attempted to map region and failed. */
+ DO_MAP_REGION_FAILURE,
+
+ /* The requested region to map was not part of a mappable region. */
+ DO_MAP_REGION_INVALID,
+};
+
+/* Global data definitions. */
+/* Mutex - only for framework-wide data. Other data should be protected by
+ * finer-grained locks.
+ */
+static DEFINE_MUTEX(g_mutex);
+
+/* List of all registered device descriptions & their supporting data. */
+static struct gasket_internal_desc g_descs[GASKET_FRAMEWORK_DESC_MAX];
+
+/* Mapping of statuses to human-readable strings. Must end with {0,NULL}. */
+static const struct gasket_num_name gasket_status_name_table[] = {
+ { GASKET_STATUS_DEAD, "DEAD" },
+ { GASKET_STATUS_ALIVE, "ALIVE" },
+ { GASKET_STATUS_LAMED, "LAMED" },
+ { GASKET_STATUS_DRIVER_EXIT, "DRIVER_EXITING" },
+ { 0, NULL },
+};
+
+/* Enumeration of the automatic Gasket framework sysfs nodes. */
+enum gasket_sysfs_attribute_type {
+ ATTR_BAR_OFFSETS,
+ ATTR_BAR_SIZES,
+ ATTR_DRIVER_VERSION,
+ ATTR_FRAMEWORK_VERSION,
+ ATTR_DEVICE_TYPE,
+ ATTR_HARDWARE_REVISION,
+ ATTR_PCI_ADDRESS,
+ ATTR_STATUS,
+ ATTR_IS_DEVICE_OWNED,
+ ATTR_DEVICE_OWNER,
+ ATTR_WRITE_OPEN_COUNT,
+ ATTR_RESET_COUNT,
+ ATTR_USER_MEM_RANGES
+};
+
+/* Perform a standard Gasket callback. */
+static inline int
+check_and_invoke_callback(struct gasket_dev *gasket_dev,
+ int (*cb_function)(struct gasket_dev *))
+{
+ int ret = 0;
+
+ dev_dbg(gasket_dev->dev, "check_and_invoke_callback %p\n",
+ cb_function);
+ if (cb_function) {
+ mutex_lock(&gasket_dev->mutex);
+ ret = cb_function(gasket_dev);
+ mutex_unlock(&gasket_dev->mutex);
+ }
+ return ret;
+}
+
+/* Perform a standard Gasket callback without grabbing gasket_dev->mutex. */
+static inline int
+gasket_check_and_invoke_callback_nolock(struct gasket_dev *gasket_dev,
+ int (*cb_function)(struct gasket_dev *))
+{
+ int ret = 0;
+
+ if (cb_function) {
+ dev_dbg(gasket_dev->dev,
+ "Invoking device-specific callback.\n");
+ ret = cb_function(gasket_dev);
+ }
+ return ret;
+}
+
+/*
+ * Return nonzero if the gasket_cdev_info is owned by the current thread group
+ * ID.
+ */
+static int gasket_owned_by_current_tgid(struct gasket_cdev_info *info)
+{
+ return (info->ownership.is_owned &&
+ (info->ownership.owner == current->tgid));
+}
+
+/*
+ * Find the next free gasket_internal_dev slot.
+ *
+ * Returns the located slot number on success or a negative number on failure.
+ */
+static int gasket_find_dev_slot(struct gasket_internal_desc *internal_desc,
+ const char *kobj_name)
+{
+ int i;
+
+ mutex_lock(&internal_desc->mutex);
+
+ /* Search for a previous instance of this device. */
+ for (i = 0; i < GASKET_DEV_MAX; i++) {
+ if (internal_desc->devs[i] &&
+ strcmp(internal_desc->devs[i]->kobj_name, kobj_name) == 0) {
+ pr_err("Duplicate device %s\n", kobj_name);
+ mutex_unlock(&internal_desc->mutex);
+ return -EBUSY;
+ }
+ }
+
+ /* Find a free device slot. */
+ for (i = 0; i < GASKET_DEV_MAX; i++) {
+ if (!internal_desc->devs[i])
+ break;
+ }
+
+ if (i == GASKET_DEV_MAX) {
+ pr_err("Too many registered devices; max %d\n", GASKET_DEV_MAX);
+ mutex_unlock(&internal_desc->mutex);
+ return -EBUSY;
+ }
+
+ mutex_unlock(&internal_desc->mutex);
+ return i;
+}
+
+/*
+ * Allocate and initialize a Gasket device structure, add the device to the
+ * device list.
+ *
+ * Returns 0 if successful, a negative error code otherwise.
+ */
+static int gasket_alloc_dev(struct gasket_internal_desc *internal_desc,
+ struct device *parent, struct gasket_dev **pdev,
+ const char *kobj_name)
+{
+ int dev_idx;
+ const struct gasket_driver_desc *driver_desc =
+ internal_desc->driver_desc;
+ struct gasket_dev *gasket_dev;
+ struct gasket_cdev_info *dev_info;
+
+ pr_debug("Allocating a Gasket device %s.\n", kobj_name);
+
+ *pdev = NULL;
+
+ dev_idx = gasket_find_dev_slot(internal_desc, kobj_name);
+ if (dev_idx < 0)
+ return dev_idx;
+
+ gasket_dev = *pdev = kzalloc(sizeof(*gasket_dev), GFP_KERNEL);
+ if (!gasket_dev) {
+ pr_err("no memory for device %s\n", kobj_name);
+ return -ENOMEM;
+ }
+ internal_desc->devs[dev_idx] = gasket_dev;
+
+ mutex_init(&gasket_dev->mutex);
+
+ gasket_dev->internal_desc = internal_desc;
+ gasket_dev->dev_idx = dev_idx;
+ snprintf(gasket_dev->kobj_name, GASKET_NAME_MAX, "%s", kobj_name);
+ gasket_dev->dev = get_device(parent);
+ /* gasket_bar_data is uninitialized. */
+ gasket_dev->num_page_tables = driver_desc->num_page_tables;
+ /* max_page_table_size and *page table are uninit'ed */
+ /* interrupt_data is not initialized. */
+ /* status is 0, or GASKET_STATUS_DEAD */
+
+ dev_info = &gasket_dev->dev_info;
+ snprintf(dev_info->name, GASKET_NAME_MAX, "%s_%u", driver_desc->name,
+ gasket_dev->dev_idx);
+ dev_info->devt =
+ MKDEV(driver_desc->major, driver_desc->minor +
+ gasket_dev->dev_idx);
+ dev_info->device = device_create(internal_desc->class, parent,
+ dev_info->devt, gasket_dev, dev_info->name);
+
+ dev_dbg(dev_info->device, "Gasket device allocated.\n");
+
+ /* cdev has not yet been added; cdev_added is 0 */
+ dev_info->gasket_dev_ptr = gasket_dev;
+ /* ownership is all 0, indicating no owner or opens. */
+
+ return 0;
+}
+
+/* Free a Gasket device. */
+static void gasket_free_dev(struct gasket_dev *gasket_dev)
+{
+ struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
+
+ mutex_lock(&internal_desc->mutex);
+ internal_desc->devs[gasket_dev->dev_idx] = NULL;
+ mutex_unlock(&internal_desc->mutex);
+ put_device(gasket_dev->dev);
+ kfree(gasket_dev);
+}
+
+/*
+ * Maps the specified bar into kernel space.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ * A zero-sized BAR will not be mapped, but is not an error.
+ */
+static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num)
+{
+ struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
+ const struct gasket_driver_desc *driver_desc =
+ internal_desc->driver_desc;
+ ulong desc_bytes = driver_desc->bar_descriptions[bar_num].size;
+ int ret;
+
+ if (desc_bytes == 0)
+ return 0;
+
+ if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR) {
+ /* not PCI: skip this entry */
+ return 0;
+ }
+ /*
+ * pci_resource_start and pci_resource_len return a "resource_size_t",
+ * which is safely castable to ulong (which itself is the arg to
+ * request_mem_region).
+ */
+ gasket_dev->bar_data[bar_num].phys_base =
+ (ulong)pci_resource_start(gasket_dev->pci_dev, bar_num);
+ if (!gasket_dev->bar_data[bar_num].phys_base) {
+ dev_err(gasket_dev->dev, "Cannot get BAR%u base address\n",
+ bar_num);
+ return -EINVAL;
+ }
+
+ gasket_dev->bar_data[bar_num].length_bytes =
+ (ulong)pci_resource_len(gasket_dev->pci_dev, bar_num);
+ if (gasket_dev->bar_data[bar_num].length_bytes < desc_bytes) {
+ dev_err(gasket_dev->dev,
+ "PCI BAR %u space is too small: %lu; expected >= %lu\n",
+ bar_num, gasket_dev->bar_data[bar_num].length_bytes,
+ desc_bytes);
+ return -ENOMEM;
+ }
+
+ if (!request_mem_region(gasket_dev->bar_data[bar_num].phys_base,
+ gasket_dev->bar_data[bar_num].length_bytes,
+ gasket_dev->dev_info.name)) {
+ dev_err(gasket_dev->dev,
+ "Cannot get BAR %d memory region %p\n",
+ bar_num, &gasket_dev->pci_dev->resource[bar_num]);
+ return -EINVAL;
+ }
+
+ gasket_dev->bar_data[bar_num].virt_base =
+ ioremap_nocache(gasket_dev->bar_data[bar_num].phys_base,
+ gasket_dev->bar_data[bar_num].length_bytes);
+ if (!gasket_dev->bar_data[bar_num].virt_base) {
+ dev_err(gasket_dev->dev,
+ "Cannot remap BAR %d memory region %p\n",
+ bar_num, &gasket_dev->pci_dev->resource[bar_num]);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dma_set_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
+ dma_set_coherent_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
+
+ return 0;
+
+fail:
+ iounmap(gasket_dev->bar_data[bar_num].virt_base);
+ release_mem_region(gasket_dev->bar_data[bar_num].phys_base,
+ gasket_dev->bar_data[bar_num].length_bytes);
+ return ret;
+}
+
+/*
+ * Releases PCI BAR mapping.
+ *
+ * A zero-sized or not-mapped BAR will not be unmapped, but is not an error.
+ */
+static void gasket_unmap_pci_bar(struct gasket_dev *dev, int bar_num)
+{
+ ulong base, bytes;
+ struct gasket_internal_desc *internal_desc = dev->internal_desc;
+ const struct gasket_driver_desc *driver_desc =
+ internal_desc->driver_desc;
+
+ if (driver_desc->bar_descriptions[bar_num].size == 0 ||
+ !dev->bar_data[bar_num].virt_base)
+ return;
+
+ if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR)
+ return;
+
+ iounmap(dev->bar_data[bar_num].virt_base);
+ dev->bar_data[bar_num].virt_base = NULL;
+
+ base = pci_resource_start(dev->pci_dev, bar_num);
+ if (!base) {
+ dev_err(dev->dev, "cannot get PCI BAR%u base address\n",
+ bar_num);
+ return;
+ }
+
+ bytes = pci_resource_len(dev->pci_dev, bar_num);
+ release_mem_region(base, bytes);
+}
+
+/*
+ * Setup PCI memory mapping for the specified device.
+ *
+ * Reads the BAR registers and sets up pointers to the device's memory mapped
+ * IO space.
+ *
+ * Returns 0 on success and a negative value otherwise.
+ */
+static int gasket_setup_pci(struct pci_dev *pci_dev,
+ struct gasket_dev *gasket_dev)
+{
+ int i, mapped_bars, ret;
+
+ for (i = 0; i < GASKET_NUM_BARS; i++) {
+ ret = gasket_map_pci_bar(gasket_dev, i);
+ if (ret) {
+ mapped_bars = i;
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ for (i = 0; i < mapped_bars; i++)
+ gasket_unmap_pci_bar(gasket_dev, i);
+
+ return -ENOMEM;
+}
+
+/* Unmaps memory for the specified device. */
+static void gasket_cleanup_pci(struct gasket_dev *gasket_dev)
+{
+ int i;
+
+ for (i = 0; i < GASKET_NUM_BARS; i++)
+ gasket_unmap_pci_bar(gasket_dev, i);
+}
+
+/* Determine the health of the Gasket device. */
+static int gasket_get_hw_status(struct gasket_dev *gasket_dev)
+{
+ int status;
+ int i;
+ const struct gasket_driver_desc *driver_desc =
+ gasket_dev->internal_desc->driver_desc;
+
+ status = gasket_check_and_invoke_callback_nolock(gasket_dev,
+ driver_desc->device_status_cb);
+ if (status != GASKET_STATUS_ALIVE) {
+ dev_dbg(gasket_dev->dev, "Hardware reported status %d.\n",
+ status);
+ return status;
+ }
+
+ status = gasket_interrupt_system_status(gasket_dev);
+ if (status != GASKET_STATUS_ALIVE) {
+ dev_dbg(gasket_dev->dev,
+ "Interrupt system reported status %d.\n", status);
+ return status;
+ }
+
+ for (i = 0; i < driver_desc->num_page_tables; ++i) {
+ status = gasket_page_table_system_status(gasket_dev->page_table[i]);
+ if (status != GASKET_STATUS_ALIVE) {
+ dev_dbg(gasket_dev->dev,
+ "Page table %d reported status %d.\n",
+ i, status);
+ return status;
+ }
+ }
+
+ return GASKET_STATUS_ALIVE;
+}
+
+static ssize_t
+gasket_write_mappable_regions(char *buf,
+ const struct gasket_driver_desc *driver_desc,
+ int bar_index)
+{
+ int i;
+ ssize_t written;
+ ssize_t total_written = 0;
+ ulong min_addr, max_addr;
+ struct gasket_bar_desc bar_desc =
+ driver_desc->bar_descriptions[bar_index];
+
+ if (bar_desc.permissions == GASKET_NOMAP)
+ return 0;
+ for (i = 0;
+ i < bar_desc.num_mappable_regions && total_written < PAGE_SIZE;
+ i++) {
+ min_addr = bar_desc.mappable_regions[i].start -
+ driver_desc->legacy_mmap_address_offset;
+ max_addr = bar_desc.mappable_regions[i].start -
+ driver_desc->legacy_mmap_address_offset +
+ bar_desc.mappable_regions[i].length_bytes;
+ written = scnprintf(buf, PAGE_SIZE - total_written,
+ "0x%08lx-0x%08lx\n", min_addr, max_addr);
+ total_written += written;
+ buf += written;
+ }
+ return total_written;
+}
+
+static ssize_t gasket_sysfs_data_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ int i, ret = 0;
+ ssize_t current_written = 0;
+ const struct gasket_driver_desc *driver_desc;
+ struct gasket_dev *gasket_dev;
+ struct gasket_sysfs_attribute *gasket_attr;
+ const struct gasket_bar_desc *bar_desc;
+ enum gasket_sysfs_attribute_type sysfs_type;
+
+ gasket_dev = gasket_sysfs_get_device_data(device);
+ if (!gasket_dev) {
+ dev_err(device, "No sysfs mapping found for device\n");
+ return 0;
+ }
+
+ gasket_attr = gasket_sysfs_get_attr(device, attr);
+ if (!gasket_attr) {
+ dev_err(device, "No sysfs attr found for device\n");
+ gasket_sysfs_put_device_data(device, gasket_dev);
+ return 0;
+ }
+
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+
+ sysfs_type =
+ (enum gasket_sysfs_attribute_type)gasket_attr->data.attr_type;
+ switch (sysfs_type) {
+ case ATTR_BAR_OFFSETS:
+ for (i = 0; i < GASKET_NUM_BARS; i++) {
+ bar_desc = &driver_desc->bar_descriptions[i];
+ if (bar_desc->size == 0)
+ continue;
+ current_written =
+ snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
+ (ulong)bar_desc->base);
+ buf += current_written;
+ ret += current_written;
+ }
+ break;
+ case ATTR_BAR_SIZES:
+ for (i = 0; i < GASKET_NUM_BARS; i++) {
+ bar_desc = &driver_desc->bar_descriptions[i];
+ if (bar_desc->size == 0)
+ continue;
+ current_written =
+ snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
+ (ulong)bar_desc->size);
+ buf += current_written;
+ ret += current_written;
+ }
+ break;
+ case ATTR_DRIVER_VERSION:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n",
+ gasket_dev->internal_desc->driver_desc->driver_version);
+ break;
+ case ATTR_FRAMEWORK_VERSION:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n",
+ GASKET_FRAMEWORK_VERSION);
+ break;
+ case ATTR_DEVICE_TYPE:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n",
+ gasket_dev->internal_desc->driver_desc->name);
+ break;
+ case ATTR_HARDWARE_REVISION:
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ gasket_dev->hardware_revision);
+ break;
+ case ATTR_PCI_ADDRESS:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", gasket_dev->kobj_name);
+ break;
+ case ATTR_STATUS:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n",
+ gasket_num_name_lookup(gasket_dev->status,
+ gasket_status_name_table));
+ break;
+ case ATTR_IS_DEVICE_OWNED:
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ gasket_dev->dev_info.ownership.is_owned);
+ break;
+ case ATTR_DEVICE_OWNER:
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ gasket_dev->dev_info.ownership.owner);
+ break;
+ case ATTR_WRITE_OPEN_COUNT:
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ gasket_dev->dev_info.ownership.write_open_count);
+ break;
+ case ATTR_RESET_COUNT:
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", gasket_dev->reset_count);
+ break;
+ case ATTR_USER_MEM_RANGES:
+ for (i = 0; i < GASKET_NUM_BARS; ++i) {
+ current_written =
+ gasket_write_mappable_regions(buf, driver_desc,
+ i);
+ buf += current_written;
+ ret += current_written;
+ }
+ break;
+ default:
+ dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
+ attr->attr.name);
+ ret = 0;
+ break;
+ }
+
+ gasket_sysfs_put_attr(device, gasket_attr);
+ gasket_sysfs_put_device_data(device, gasket_dev);
+ return ret;
+}
+
+/* These attributes apply to all Gasket driver instances. */
+static const struct gasket_sysfs_attribute gasket_sysfs_generic_attrs[] = {
+ GASKET_SYSFS_RO(bar_offsets, gasket_sysfs_data_show, ATTR_BAR_OFFSETS),
+ GASKET_SYSFS_RO(bar_sizes, gasket_sysfs_data_show, ATTR_BAR_SIZES),
+ GASKET_SYSFS_RO(driver_version, gasket_sysfs_data_show,
+ ATTR_DRIVER_VERSION),
+ GASKET_SYSFS_RO(framework_version, gasket_sysfs_data_show,
+ ATTR_FRAMEWORK_VERSION),
+ GASKET_SYSFS_RO(device_type, gasket_sysfs_data_show, ATTR_DEVICE_TYPE),
+ GASKET_SYSFS_RO(revision, gasket_sysfs_data_show,
+ ATTR_HARDWARE_REVISION),
+ GASKET_SYSFS_RO(pci_address, gasket_sysfs_data_show, ATTR_PCI_ADDRESS),
+ GASKET_SYSFS_RO(status, gasket_sysfs_data_show, ATTR_STATUS),
+ GASKET_SYSFS_RO(is_device_owned, gasket_sysfs_data_show,
+ ATTR_IS_DEVICE_OWNED),
+ GASKET_SYSFS_RO(device_owner, gasket_sysfs_data_show,
+ ATTR_DEVICE_OWNER),
+ GASKET_SYSFS_RO(write_open_count, gasket_sysfs_data_show,
+ ATTR_WRITE_OPEN_COUNT),
+ GASKET_SYSFS_RO(reset_count, gasket_sysfs_data_show, ATTR_RESET_COUNT),
+ GASKET_SYSFS_RO(user_mem_ranges, gasket_sysfs_data_show,
+ ATTR_USER_MEM_RANGES),
+ GASKET_END_OF_ATTR_ARRAY
+};
+
+/* Add a char device and related info. */
+static int gasket_add_cdev(struct gasket_cdev_info *dev_info,
+ const struct file_operations *file_ops,
+ struct module *owner)
+{
+ int ret;
+
+ cdev_init(&dev_info->cdev, file_ops);
+ dev_info->cdev.owner = owner;
+ ret = cdev_add(&dev_info->cdev, dev_info->devt, 1);
+ if (ret) {
+ dev_err(dev_info->gasket_dev_ptr->dev,
+ "cannot add char device [ret=%d]\n", ret);
+ return ret;
+ }
+ dev_info->cdev_added = 1;
+
+ return 0;
+}
+
+/* Disable device operations. */
+void gasket_disable_device(struct gasket_dev *gasket_dev)
+{
+ const struct gasket_driver_desc *driver_desc =
+ gasket_dev->internal_desc->driver_desc;
+ int i;
+
+ /* Only delete the device if it has been successfully added. */
+ if (gasket_dev->dev_info.cdev_added)
+ cdev_del(&gasket_dev->dev_info.cdev);
+
+ gasket_dev->status = GASKET_STATUS_DEAD;
+
+ gasket_interrupt_cleanup(gasket_dev);
+
+ for (i = 0; i < driver_desc->num_page_tables; ++i) {
+ if (gasket_dev->page_table[i]) {
+ gasket_page_table_reset(gasket_dev->page_table[i]);
+ gasket_page_table_cleanup(gasket_dev->page_table[i]);
+ }
+ }
+}
+EXPORT_SYMBOL(gasket_disable_device);
+
+/*
+ * Registered descriptor lookup.
+ *
+ * Precondition: Called with g_mutex held (to avoid a race on return).
+ * Returns NULL if no matching device was found.
+ */
+static struct gasket_internal_desc *
+lookup_internal_desc(struct pci_dev *pci_dev)
+{
+ int i;
+
+ __must_hold(&g_mutex);
+ for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
+ if (g_descs[i].driver_desc &&
+ g_descs[i].driver_desc->pci_id_table &&
+ pci_match_id(g_descs[i].driver_desc->pci_id_table, pci_dev))
+ return &g_descs[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * Verifies that the user has permissions to perform the requested mapping and
+ * that the provided descriptor/range is of adequate size to hold the range to
+ * be mapped.
+ */
+static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev,
+ struct vm_area_struct *vma,
+ int bar_permissions)
+{
+ int requested_permissions;
+ /* Always allow sysadmin to access. */
+ if (capable(CAP_SYS_ADMIN))
+ return true;
+
+ /* Never allow non-sysadmins to access to a dead device. */
+ if (gasket_dev->status != GASKET_STATUS_ALIVE) {
+ dev_dbg(gasket_dev->dev, "Device is dead.\n");
+ return false;
+ }
+
+ /* Make sure that no wrong flags are set. */
+ requested_permissions =
+ (vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC));
+ if (requested_permissions & ~(bar_permissions)) {
+ dev_dbg(gasket_dev->dev,
+ "Attempting to map a region with requested permissions "
+ "0x%x, but region has permissions 0x%x.\n",
+ requested_permissions, bar_permissions);
+ return false;
+ }
+
+ /* Do not allow a non-owner to write. */
+ if ((vma->vm_flags & VM_WRITE) &&
+ !gasket_owned_by_current_tgid(&gasket_dev->dev_info)) {
+ dev_dbg(gasket_dev->dev,
+ "Attempting to mmap a region for write without owning "
+ "device.\n");
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Verifies that the input address is within the region allocated to coherent
+ * buffer.
+ */
+static bool
+gasket_is_coherent_region(const struct gasket_driver_desc *driver_desc,
+ ulong address)
+{
+ struct gasket_coherent_buffer_desc coh_buff_desc =
+ driver_desc->coherent_buffer_description;
+
+ if (coh_buff_desc.permissions != GASKET_NOMAP) {
+ if ((address >= coh_buff_desc.base) &&
+ (address < coh_buff_desc.base + coh_buff_desc.size)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static int gasket_get_bar_index(const struct gasket_dev *gasket_dev,
+ ulong phys_addr)
+{
+ int i;
+ const struct gasket_driver_desc *driver_desc;
+
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+ for (i = 0; i < GASKET_NUM_BARS; ++i) {
+ struct gasket_bar_desc bar_desc =
+ driver_desc->bar_descriptions[i];
+
+ if (bar_desc.permissions != GASKET_NOMAP) {
+ if (phys_addr >= bar_desc.base &&
+ phys_addr < (bar_desc.base + bar_desc.size)) {
+ return i;
+ }
+ }
+ }
+ /* If we haven't found the address by now, it is invalid. */
+ return -EINVAL;
+}
+
+/*
+ * Sets the actual bounds to map, given the device's mappable region.
+ *
+ * Given the device's mappable region, along with the user-requested mapping
+ * start offset and length of the user region, determine how much of this
+ * mappable region can be mapped into the user's region (start/end offsets),
+ * and the physical offset (phys_offset) into the BAR where the mapping should
+ * begin (either the VMA's or region lower bound).
+ *
+ * In other words, this calculates the overlap between the VMA
+ * (bar_offset, requested_length) and the given gasket_mappable_region.
+ *
+ * Returns true if there's anything to map, and false otherwise.
+ */
+static bool
+gasket_mm_get_mapping_addrs(const struct gasket_mappable_region *region,
+ ulong bar_offset, ulong requested_length,
+ struct gasket_mappable_region *mappable_region,
+ ulong *virt_offset)
+{
+ ulong range_start = region->start;
+ ulong range_length = region->length_bytes;
+ ulong range_end = range_start + range_length;
+
+ *virt_offset = 0;
+ if (bar_offset + requested_length < range_start) {
+ /*
+ * If the requested region is completely below the range,
+ * there is nothing to map.
+ */
+ return false;
+ } else if (bar_offset <= range_start) {
+ /* If the bar offset is below this range's start
+ * but the requested length continues into it:
+ * 1) Only map starting from the beginning of this
+ * range's phys. offset, so we don't map unmappable
+ * memory.
+ * 2) The length of the virtual memory to not map is the
+ * delta between the bar offset and the
+ * mappable start (and since the mappable start is
+ * bigger, start - req.)
+ * 3) The map length is the minimum of the mappable
+ * requested length (requested_length - virt_offset)
+ * and the actual mappable length of the range.
+ */
+ mappable_region->start = range_start;
+ *virt_offset = range_start - bar_offset;
+ mappable_region->length_bytes =
+ min(requested_length - *virt_offset, range_length);
+ return true;
+ } else if (bar_offset > range_start &&
+ bar_offset < range_end) {
+ /*
+ * If the bar offset is within this range:
+ * 1) Map starting from the bar offset.
+ * 2) Because there is no forbidden memory between the
+ * bar offset and the range start,
+ * virt_offset is 0.
+ * 3) The map length is the minimum of the requested
+ * length and the remaining length in the buffer
+ * (range_end - bar_offset)
+ */
+ mappable_region->start = bar_offset;
+ *virt_offset = 0;
+ mappable_region->length_bytes =
+ min(requested_length, range_end - bar_offset);
+ return true;
+ }
+
+ /*
+ * If the requested [start] offset is above range_end,
+ * there's nothing to map.
+ */
+ return false;
+}
+
+/*
+ * Calculates the offset where the VMA range begins in its containing BAR.
+ * The offset is written into bar_offset on success.
+ * Returns zero on success, anything else on error.
+ */
+static int gasket_mm_vma_bar_offset(const struct gasket_dev *gasket_dev,
+ const struct vm_area_struct *vma,
+ ulong *bar_offset)
+{
+ ulong raw_offset;
+ int bar_index;
+ const struct gasket_driver_desc *driver_desc =
+ gasket_dev->internal_desc->driver_desc;
+
+ raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
+ driver_desc->legacy_mmap_address_offset;
+ bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
+ if (bar_index < 0) {
+ dev_err(gasket_dev->dev,
+ "Unable to find matching bar for address 0x%lx\n",
+ raw_offset);
+ trace_gasket_mmap_exit(bar_index);
+ return bar_index;
+ }
+ *bar_offset =
+ raw_offset - driver_desc->bar_descriptions[bar_index].base;
+
+ return 0;
+}
+
+int gasket_mm_unmap_region(const struct gasket_dev *gasket_dev,
+ struct vm_area_struct *vma,
+ const struct gasket_mappable_region *map_region)
+{
+ ulong bar_offset;
+ ulong virt_offset;
+ struct gasket_mappable_region mappable_region;
+ int ret;
+
+ if (map_region->length_bytes == 0)
+ return 0;
+
+ ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
+ if (ret)
+ return ret;
+
+ if (!gasket_mm_get_mapping_addrs(map_region, bar_offset,
+ vma->vm_end - vma->vm_start,
+ &mappable_region, &virt_offset))
+ return 1;
+
+ /*
+ * The length passed to zap_vma_ptes MUST BE A MULTIPLE OF
+ * PAGE_SIZE! Trust me. I have the scars.
+ *
+ * Next multiple of y: ceil_div(x, y) * y
+ */
+ zap_vma_ptes(vma, vma->vm_start + virt_offset,
+ DIV_ROUND_UP(mappable_region.length_bytes, PAGE_SIZE) *
+ PAGE_SIZE);
+ return 0;
+}
+EXPORT_SYMBOL(gasket_mm_unmap_region);
+
+/* Maps a virtual address + range to a physical offset of a BAR. */
+static enum do_map_region_status
+do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
+ struct gasket_mappable_region *mappable_region)
+{
+ /* Maximum size of a single call to io_remap_pfn_range. */
+ /* I pulled this number out of thin air. */
+ const ulong max_chunk_size = 64 * 1024 * 1024;
+ ulong chunk_size, mapped_bytes = 0;
+
+ const struct gasket_driver_desc *driver_desc =
+ gasket_dev->internal_desc->driver_desc;
+
+ ulong bar_offset, virt_offset;
+ struct gasket_mappable_region region_to_map;
+ ulong phys_offset, map_length;
+ ulong virt_base, phys_base;
+ int bar_index, ret;
+
+ ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
+ if (ret)
+ return DO_MAP_REGION_INVALID;
+
+ if (!gasket_mm_get_mapping_addrs(mappable_region, bar_offset,
+ vma->vm_end - vma->vm_start,
+ &region_to_map, &virt_offset))
+ return DO_MAP_REGION_INVALID;
+ phys_offset = region_to_map.start;
+ map_length = region_to_map.length_bytes;
+
+ virt_base = vma->vm_start + virt_offset;
+ bar_index =
+ gasket_get_bar_index(gasket_dev,
+ (vma->vm_pgoff << PAGE_SHIFT) +
+ driver_desc->legacy_mmap_address_offset);
+ phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
+ while (mapped_bytes < map_length) {
+ /*
+ * io_remap_pfn_range can take a while, so we chunk its
+ * calls and call cond_resched between each.
+ */
+ chunk_size = min(max_chunk_size, map_length - mapped_bytes);
+
+ cond_resched();
+ ret = io_remap_pfn_range(vma, virt_base + mapped_bytes,
+ (phys_base + mapped_bytes) >>
+ PAGE_SHIFT, chunk_size,
+ vma->vm_page_prot);
+ if (ret) {
+ dev_err(gasket_dev->dev,
+ "Error remapping PFN range.\n");
+ goto fail;
+ }
+ mapped_bytes += chunk_size;
+ }
+
+ return DO_MAP_REGION_SUCCESS;
+
+fail:
+ /* Unmap the partial chunk we mapped. */
+ mappable_region->length_bytes = mapped_bytes;
+ if (gasket_mm_unmap_region(gasket_dev, vma, mappable_region))
+ dev_err(gasket_dev->dev,
+ "Error unmapping partial region 0x%lx (0x%lx bytes)\n",
+ (ulong)virt_offset,
+ (ulong)mapped_bytes);
+
+ return DO_MAP_REGION_FAILURE;
+}
+
+/* Map a region of coherent memory. */
+static int gasket_mmap_coherent(struct gasket_dev *gasket_dev,
+ struct vm_area_struct *vma)
+{
+ const struct gasket_driver_desc *driver_desc =
+ gasket_dev->internal_desc->driver_desc;
+ const ulong requested_length = vma->vm_end - vma->vm_start;
+ int ret;
+ ulong permissions;
+
+ if (requested_length == 0 || requested_length >
+ gasket_dev->coherent_buffer.length_bytes) {
+ trace_gasket_mmap_exit(-EINVAL);
+ return -EINVAL;
+ }
+
+ permissions = driver_desc->coherent_buffer_description.permissions;
+ if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
+ dev_err(gasket_dev->dev, "Permission checking failed.\n");
+ trace_gasket_mmap_exit(-EPERM);
+ return -EPERM;
+ }
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ ret = remap_pfn_range(vma, vma->vm_start,
+ (gasket_dev->coherent_buffer.phys_base) >>
+ PAGE_SHIFT, requested_length, vma->vm_page_prot);
+ if (ret) {
+ dev_err(gasket_dev->dev, "Error remapping PFN range err=%d.\n",
+ ret);
+ trace_gasket_mmap_exit(ret);
+ return ret;
+ }
+
+ /* Record the user virtual to dma_address mapping that was
+ * created by the kernel.
+ */
+ gasket_set_user_virt(gasket_dev, requested_length,
+ gasket_dev->coherent_buffer.phys_base,
+ vma->vm_start);
+ return 0;
+}
+
+/* Map a device's BARs into user space. */
+static int gasket_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int i, ret;
+ int bar_index;
+ int has_mapped_anything = 0;
+ ulong permissions;
+ ulong raw_offset, vma_size;
+ bool is_coherent_region;
+ const struct gasket_driver_desc *driver_desc;
+ struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
+ const struct gasket_bar_desc *bar_desc;
+ struct gasket_mappable_region *map_regions = NULL;
+ int num_map_regions = 0;
+ enum do_map_region_status map_status;
+
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+
+ if (vma->vm_start & ~PAGE_MASK) {
+ dev_err(gasket_dev->dev,
+ "Base address not page-aligned: 0x%lx\n",
+ vma->vm_start);
+ trace_gasket_mmap_exit(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Calculate the offset of this range into physical mem. */
+ raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
+ driver_desc->legacy_mmap_address_offset;
+ vma_size = vma->vm_end - vma->vm_start;
+ trace_gasket_mmap_entry(gasket_dev->dev_info.name, raw_offset,
+ vma_size);
+
+ /*
+ * Check if the raw offset is within a bar region. If not, check if it
+ * is a coherent region.
+ */
+ bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
+ is_coherent_region = gasket_is_coherent_region(driver_desc, raw_offset);
+ if (bar_index < 0 && !is_coherent_region) {
+ dev_err(gasket_dev->dev,
+ "Unable to find matching bar for address 0x%lx\n",
+ raw_offset);
+ trace_gasket_mmap_exit(bar_index);
+ return bar_index;
+ }
+ if (bar_index > 0 && is_coherent_region) {
+ dev_err(gasket_dev->dev,
+ "double matching bar and coherent buffers for address "
+ "0x%lx\n",
+ raw_offset);
+ trace_gasket_mmap_exit(bar_index);
+ return -EINVAL;
+ }
+
+ vma->vm_private_data = gasket_dev;
+
+ if (is_coherent_region)
+ return gasket_mmap_coherent(gasket_dev, vma);
+
+ /* Everything in the rest of this function is for normal BAR mapping. */
+
+ /*
+ * Subtract the base of the bar from the raw offset to get the
+ * memory location within the bar to map.
+ */
+ bar_desc = &driver_desc->bar_descriptions[bar_index];
+ permissions = bar_desc->permissions;
+ if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
+ dev_err(gasket_dev->dev, "Permission checking failed.\n");
+ trace_gasket_mmap_exit(-EPERM);
+ return -EPERM;
+ }
+
+ if (driver_desc->get_mappable_regions_cb) {
+ ret = driver_desc->get_mappable_regions_cb(gasket_dev,
+ bar_index,
+ &map_regions,
+ &num_map_regions);
+ if (ret)
+ return ret;
+ } else {
+ if (!gasket_mmap_has_permissions(gasket_dev, vma,
+ bar_desc->permissions)) {
+ dev_err(gasket_dev->dev,
+ "Permission checking failed.\n");
+ trace_gasket_mmap_exit(-EPERM);
+ return -EPERM;
+ }
+ num_map_regions = bar_desc->num_mappable_regions;
+ map_regions = kcalloc(num_map_regions,
+ sizeof(*bar_desc->mappable_regions),
+ GFP_KERNEL);
+ if (map_regions) {
+ memcpy(map_regions, bar_desc->mappable_regions,
+ num_map_regions *
+ sizeof(*bar_desc->mappable_regions));
+ }
+ }
+
+ if (!map_regions || num_map_regions == 0) {
+ dev_err(gasket_dev->dev, "No mappable regions returned!\n");
+ return -EINVAL;
+ }
+
+ /* Marks the VMA's pages as uncacheable. */
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ for (i = 0; i < num_map_regions; i++) {
+ map_status = do_map_region(gasket_dev, vma, &map_regions[i]);
+ /* Try the next region if this one was not mappable. */
+ if (map_status == DO_MAP_REGION_INVALID)
+ continue;
+ if (map_status == DO_MAP_REGION_FAILURE) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ has_mapped_anything = 1;
+ }
+
+ kfree(map_regions);
+
+ /* If we could not map any memory, the request was invalid. */
+ if (!has_mapped_anything) {
+ dev_err(gasket_dev->dev,
+ "Map request did not contain a valid region.\n");
+ trace_gasket_mmap_exit(-EINVAL);
+ return -EINVAL;
+ }
+
+ trace_gasket_mmap_exit(0);
+ return 0;
+
+fail:
+ /* Need to unmap any mapped ranges. */
+ num_map_regions = i;
+ for (i = 0; i < num_map_regions; i++)
+ if (gasket_mm_unmap_region(gasket_dev, vma,
+ &bar_desc->mappable_regions[i]))
+ dev_err(gasket_dev->dev, "Error unmapping range %d.\n",
+ i);
+ kfree(map_regions);
+
+ return ret;
+}
+
+/*
+ * Open the char device file.
+ *
+ * If the open is for writing, and the device is not owned, this process becomes
+ * the owner. If the open is for writing and the device is already owned by
+ * some other process, it is an error. If this process is the owner, increment
+ * the open count.
+ *
+ * Returns 0 if successful, a negative error number otherwise.
+ */
+static int gasket_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+ struct gasket_dev *gasket_dev;
+ const struct gasket_driver_desc *driver_desc;
+ struct gasket_ownership *ownership;
+ char task_name[TASK_COMM_LEN];
+ struct gasket_cdev_info *dev_info =
+ container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
+ struct pid_namespace *pid_ns = task_active_pid_ns(current);
+ bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
+
+ gasket_dev = dev_info->gasket_dev_ptr;
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+ ownership = &dev_info->ownership;
+ get_task_comm(task_name, current);
+ filp->private_data = gasket_dev;
+ inode->i_size = 0;
+
+ dev_dbg(gasket_dev->dev,
+ "Attempting to open with tgid %u (%s) (f_mode: 0%03o, "
+ "fmode_write: %d is_root: %u)\n",
+ current->tgid, task_name, filp->f_mode,
+ (filp->f_mode & FMODE_WRITE), is_root);
+
+ /* Always allow non-writing accesses. */
+ if (!(filp->f_mode & FMODE_WRITE)) {
+ dev_dbg(gasket_dev->dev, "Allowing read-only opening.\n");
+ return 0;
+ }
+
+ mutex_lock(&gasket_dev->mutex);
+
+ dev_dbg(gasket_dev->dev,
+ "Current owner open count (owning tgid %u): %d.\n",
+ ownership->owner, ownership->write_open_count);
+
+ /* Opening a node owned by another TGID is an error (unless root) */
+ if (ownership->is_owned && ownership->owner != current->tgid &&
+ !is_root) {
+ dev_err(gasket_dev->dev,
+ "Process %u is opening a node held by %u.\n",
+ current->tgid, ownership->owner);
+ mutex_unlock(&gasket_dev->mutex);
+ return -EPERM;
+ }
+
+ /* If the node is not owned, assign it to the current TGID. */
+ if (!ownership->is_owned) {
+ ret = gasket_check_and_invoke_callback_nolock(gasket_dev,
+ driver_desc->device_open_cb);
+ if (ret) {
+ dev_err(gasket_dev->dev,
+ "Error in device open cb: %d\n", ret);
+ mutex_unlock(&gasket_dev->mutex);
+ return ret;
+ }
+ ownership->is_owned = 1;
+ ownership->owner = current->tgid;
+ dev_dbg(gasket_dev->dev, "Device owner is now tgid %u\n",
+ ownership->owner);
+ }
+
+ ownership->write_open_count++;
+
+ dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
+ ownership->owner, ownership->write_open_count);
+
+ mutex_unlock(&gasket_dev->mutex);
+ return 0;
+}
+
+/*
+ * Called on a close of the device file. If this process is the owner,
+ * decrement the open count. On last close by the owner, free up buffers and
+ * eventfd contexts, and release ownership.
+ *
+ * Returns 0 if successful, a negative error number otherwise.
+ */
+static int gasket_release(struct inode *inode, struct file *file)
+{
+ int i;
+ struct gasket_dev *gasket_dev;
+ struct gasket_ownership *ownership;
+ const struct gasket_driver_desc *driver_desc;
+ char task_name[TASK_COMM_LEN];
+ struct gasket_cdev_info *dev_info =
+ container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
+ struct pid_namespace *pid_ns = task_active_pid_ns(current);
+ bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
+
+ gasket_dev = dev_info->gasket_dev_ptr;
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+ ownership = &dev_info->ownership;
+ get_task_comm(task_name, current);
+ mutex_lock(&gasket_dev->mutex);
+
+ dev_dbg(gasket_dev->dev,
+ "Releasing device node. Call origin: tgid %u (%s) "
+ "(f_mode: 0%03o, fmode_write: %d, is_root: %u)\n",
+ current->tgid, task_name, file->f_mode,
+ (file->f_mode & FMODE_WRITE), is_root);
+ dev_dbg(gasket_dev->dev, "Current open count (owning tgid %u): %d\n",
+ ownership->owner, ownership->write_open_count);
+
+ if (file->f_mode & FMODE_WRITE) {
+ ownership->write_open_count--;
+ if (ownership->write_open_count == 0) {
+ dev_dbg(gasket_dev->dev, "Device is now free\n");
+ ownership->is_owned = 0;
+ ownership->owner = 0;
+
+ /* Forces chip reset before we unmap the page tables. */
+ driver_desc->device_reset_cb(gasket_dev);
+
+ for (i = 0; i < driver_desc->num_page_tables; ++i) {
+ gasket_page_table_unmap_all(gasket_dev->page_table[i]);
+ gasket_page_table_garbage_collect(gasket_dev->page_table[i]);
+ gasket_free_coherent_memory_all(gasket_dev, i);
+ }
+
+ /* Closes device, enters power save. */
+ gasket_check_and_invoke_callback_nolock(gasket_dev,
+ driver_desc->device_close_cb);
+ }
+ }
+
+ dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
+ ownership->owner, ownership->write_open_count);
+ mutex_unlock(&gasket_dev->mutex);
+ return 0;
+}
+
+/*
+ * Gasket ioctl dispatch function.
+ *
+ * Check if the ioctl is a generic ioctl. If not, pass the ioctl to the
+ * ioctl_handler_cb registered in the driver description.
+ * If the ioctl is a generic ioctl, pass it to gasket_ioctl_handler.
+ */
+static long gasket_ioctl(struct file *filp, uint cmd, ulong arg)
+{
+ struct gasket_dev *gasket_dev;
+ const struct gasket_driver_desc *driver_desc;
+ void __user *argp = (void __user *)arg;
+ char path[256];
+
+ gasket_dev = (struct gasket_dev *)filp->private_data;
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+ if (!driver_desc) {
+ dev_dbg(gasket_dev->dev,
+ "Unable to find device descriptor for file %s\n",
+ d_path(&filp->f_path, path, 256));
+ return -ENODEV;
+ }
+
+ if (!gasket_is_supported_ioctl(cmd)) {
+ /*
+ * The ioctl handler is not a standard Gasket callback, since
+ * it requires different arguments. This means we can't use
+ * check_and_invoke_callback.
+ */
+ if (driver_desc->ioctl_handler_cb)
+ return driver_desc->ioctl_handler_cb(filp, cmd, argp);
+
+ dev_dbg(gasket_dev->dev, "Received unknown ioctl 0x%x\n", cmd);
+ return -EINVAL;
+ }
+
+ return gasket_handle_ioctl(filp, cmd, argp);
+}
+
+/* File operations for all Gasket devices. */
+static const struct file_operations gasket_file_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .mmap = gasket_mmap,
+ .open = gasket_open,
+ .release = gasket_release,
+ .unlocked_ioctl = gasket_ioctl,
+};
+
+/* Perform final init and marks the device as active. */
+int gasket_enable_device(struct gasket_dev *gasket_dev)
+{
+ int tbl_idx;
+ int ret;
+ const struct gasket_driver_desc *driver_desc =
+ gasket_dev->internal_desc->driver_desc;
+
+ ret = gasket_interrupt_init(gasket_dev, driver_desc->name,
+ driver_desc->interrupt_type,
+ driver_desc->interrupts,
+ driver_desc->num_interrupts,
+ driver_desc->interrupt_pack_width,
+ driver_desc->interrupt_bar_index,
+ driver_desc->wire_interrupt_offsets);
+ if (ret) {
+ dev_err(gasket_dev->dev,
+ "Critical failure to allocate interrupts: %d\n", ret);
+ gasket_interrupt_cleanup(gasket_dev);
+ return ret;
+ }
+
+ for (tbl_idx = 0; tbl_idx < driver_desc->num_page_tables; tbl_idx++) {
+ dev_dbg(gasket_dev->dev, "Initializing page table %d.\n",
+ tbl_idx);
+ ret = gasket_page_table_init(&gasket_dev->page_table[tbl_idx],
+ &gasket_dev->bar_data[driver_desc->page_table_bar_index],
+ &driver_desc->page_table_configs[tbl_idx],
+ gasket_dev->dev,
+ gasket_dev->pci_dev);
+ if (ret) {
+ dev_err(gasket_dev->dev,
+ "Couldn't init page table %d: %d\n",
+ tbl_idx, ret);
+ return ret;
+ }
+ /*
+ * Make sure that the page table is clear and set to simple
+ * addresses.
+ */
+ gasket_page_table_reset(gasket_dev->page_table[tbl_idx]);
+ }
+
+ /*
+ * hardware_revision_cb returns a positive integer (the rev) if
+ * successful.)
+ */
+ ret = check_and_invoke_callback(gasket_dev,
+ driver_desc->hardware_revision_cb);
+ if (ret < 0) {
+ dev_err(gasket_dev->dev,
+ "Error getting hardware revision: %d\n", ret);
+ return ret;
+ }
+ gasket_dev->hardware_revision = ret;
+
+ /* device_status_cb returns a device status, not an error code. */
+ gasket_dev->status = gasket_get_hw_status(gasket_dev);
+ if (gasket_dev->status == GASKET_STATUS_DEAD)
+ dev_err(gasket_dev->dev, "Device reported as unhealthy.\n");
+
+ ret = gasket_add_cdev(&gasket_dev->dev_info, &gasket_file_ops,
+ driver_desc->module);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(gasket_enable_device);
+
+/*
+ * Add PCI gasket device.
+ *
+ * Called by Gasket device probe function.
+ * Allocates device metadata and maps device memory. The device driver must
+ * call gasket_enable_device after driver init is complete to place the device
+ * in active use.
+ */
+int gasket_pci_add_device(struct pci_dev *pci_dev,
+ struct gasket_dev **gasket_devp)
+{
+ int ret;
+ const char *kobj_name = dev_name(&pci_dev->dev);
+ struct gasket_internal_desc *internal_desc;
+ struct gasket_dev *gasket_dev;
+ const struct gasket_driver_desc *driver_desc;
+ struct device *parent;
+
+ pr_debug("add PCI device %s\n", kobj_name);
+
+ mutex_lock(&g_mutex);
+ internal_desc = lookup_internal_desc(pci_dev);
+ mutex_unlock(&g_mutex);
+ if (!internal_desc) {
+ dev_err(&pci_dev->dev,
+ "PCI add device called for unknown driver type\n");
+ return -ENODEV;
+ }
+
+ driver_desc = internal_desc->driver_desc;
+
+ parent = &pci_dev->dev;
+ ret = gasket_alloc_dev(internal_desc, parent, &gasket_dev, kobj_name);
+ if (ret)
+ return ret;
+ gasket_dev->pci_dev = pci_dev;
+ if (IS_ERR_OR_NULL(gasket_dev->dev_info.device)) {
+ pr_err("Cannot create %s device %s [ret = %ld]\n",
+ driver_desc->name, gasket_dev->dev_info.name,
+ PTR_ERR(gasket_dev->dev_info.device));
+ ret = -ENODEV;
+ goto fail1;
+ }
+
+ ret = gasket_setup_pci(pci_dev, gasket_dev);
+ if (ret)
+ goto fail2;
+
+ ret = gasket_sysfs_create_mapping(gasket_dev->dev_info.device,
+ gasket_dev);
+ if (ret)
+ goto fail3;
+
+ /*
+ * Once we've created the mapping structures successfully, attempt to
+ * create a symlink to the pci directory of this object.
+ */
+ ret = sysfs_create_link(&gasket_dev->dev_info.device->kobj,
+ &pci_dev->dev.kobj, dev_name(&pci_dev->dev));
+ if (ret) {
+ dev_err(gasket_dev->dev,
+ "Cannot create sysfs pci link: %d\n", ret);
+ goto fail3;
+ }
+ ret = gasket_sysfs_create_entries(gasket_dev->dev_info.device,
+ gasket_sysfs_generic_attrs);
+ if (ret)
+ goto fail4;
+
+ *gasket_devp = gasket_dev;
+ return 0;
+
+fail4:
+fail3:
+ gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
+fail2:
+ gasket_cleanup_pci(gasket_dev);
+ device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
+fail1:
+ gasket_free_dev(gasket_dev);
+ return ret;
+}
+EXPORT_SYMBOL(gasket_pci_add_device);
+
+/* Remove a PCI gasket device. */
+void gasket_pci_remove_device(struct pci_dev *pci_dev)
+{
+ int i;
+ struct gasket_internal_desc *internal_desc;
+ struct gasket_dev *gasket_dev = NULL;
+ const struct gasket_driver_desc *driver_desc;
+ /* Find the device desc. */
+ mutex_lock(&g_mutex);
+ internal_desc = lookup_internal_desc(pci_dev);
+ if (!internal_desc) {
+ mutex_unlock(&g_mutex);
+ return;
+ }
+ mutex_unlock(&g_mutex);
+
+ driver_desc = internal_desc->driver_desc;
+
+ /* Now find the specific device */
+ mutex_lock(&internal_desc->mutex);
+ for (i = 0; i < GASKET_DEV_MAX; i++) {
+ if (internal_desc->devs[i] &&
+ internal_desc->devs[i]->pci_dev == pci_dev) {
+ gasket_dev = internal_desc->devs[i];
+ break;
+ }
+ }
+ mutex_unlock(&internal_desc->mutex);
+
+ if (!gasket_dev)
+ return;
+
+ dev_dbg(gasket_dev->dev, "remove %s PCI gasket device\n",
+ internal_desc->driver_desc->name);
+
+ gasket_cleanup_pci(gasket_dev);
+
+ gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
+ device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
+ gasket_free_dev(gasket_dev);
+}
+EXPORT_SYMBOL(gasket_pci_remove_device);
+
+/**
+ * Lookup a name by number in a num_name table.
+ * @num: Number to lookup.
+ * @table: Array of num_name structures, the table for the lookup.
+ *
+ * Description: Searches for num in the table. If found, the
+ * corresponding name is returned; otherwise NULL
+ * is returned.
+ *
+ * The table must have a NULL name pointer at the end.
+ */
+const char *gasket_num_name_lookup(uint num,
+ const struct gasket_num_name *table)
+{
+ uint i = 0;
+
+ while (table[i].snn_name) {
+ if (num == table[i].snn_num)
+ break;
+ ++i;
+ }
+
+ return table[i].snn_name;
+}
+EXPORT_SYMBOL(gasket_num_name_lookup);
+
+int gasket_reset(struct gasket_dev *gasket_dev)
+{
+ int ret;
+
+ mutex_lock(&gasket_dev->mutex);
+ ret = gasket_reset_nolock(gasket_dev);
+ mutex_unlock(&gasket_dev->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(gasket_reset);
+
+int gasket_reset_nolock(struct gasket_dev *gasket_dev)
+{
+ int ret;
+ int i;
+ const struct gasket_driver_desc *driver_desc;
+
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+ if (!driver_desc->device_reset_cb)
+ return 0;
+
+ ret = driver_desc->device_reset_cb(gasket_dev);
+ if (ret) {
+ dev_dbg(gasket_dev->dev, "Device reset cb returned %d.\n",
+ ret);
+ return ret;
+ }
+
+ /* Reinitialize the page tables and interrupt framework. */
+ for (i = 0; i < driver_desc->num_page_tables; ++i)
+ gasket_page_table_reset(gasket_dev->page_table[i]);
+
+ ret = gasket_interrupt_reinit(gasket_dev);
+ if (ret) {
+ dev_dbg(gasket_dev->dev, "Unable to reinit interrupts: %d.\n",
+ ret);
+ return ret;
+ }
+
+ /* Get current device health. */
+ gasket_dev->status = gasket_get_hw_status(gasket_dev);
+ if (gasket_dev->status == GASKET_STATUS_DEAD) {
+ dev_dbg(gasket_dev->dev, "Device reported as dead.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(gasket_reset_nolock);
+
+gasket_ioctl_permissions_cb_t
+gasket_get_ioctl_permissions_cb(struct gasket_dev *gasket_dev)
+{
+ return gasket_dev->internal_desc->driver_desc->ioctl_permissions_cb;
+}
+EXPORT_SYMBOL(gasket_get_ioctl_permissions_cb);
+
+/* Get the driver structure for a given gasket_dev.
+ * @dev: pointer to gasket_dev, implementing the requested driver.
+ */
+const struct gasket_driver_desc *gasket_get_driver_desc(struct gasket_dev *dev)
+{
+ return dev->internal_desc->driver_desc;
+}
+
+/* Get the device structure for a given gasket_dev.
+ * @dev: pointer to gasket_dev, implementing the requested driver.
+ */
+struct device *gasket_get_device(struct gasket_dev *dev)
+{
+ return dev->dev;
+}
+
+/**
+ * Asynchronously waits on device.
+ * @gasket_dev: Device struct.
+ * @bar: Bar
+ * @offset: Register offset
+ * @mask: Register mask
+ * @val: Expected value
+ * @max_retries: number of sleep periods
+ * @delay_ms: Timeout in milliseconds
+ *
+ * Description: Busy waits for a specific combination of bits to be set on a
+ * Gasket register.
+ **/
+int gasket_wait_with_reschedule(struct gasket_dev *gasket_dev, int bar,
+ u64 offset, u64 mask, u64 val,
+ uint max_retries, u64 delay_ms)
+{
+ uint retries = 0;
+ u64 tmp;
+
+ while (retries < max_retries) {
+ tmp = gasket_dev_read_64(gasket_dev, bar, offset);
+ if ((tmp & mask) == val)
+ return 0;
+ msleep(delay_ms);
+ retries++;
+ }
+ dev_dbg(gasket_dev->dev, "%s timeout: reg %llx timeout (%llu ms)\n",
+ __func__, offset, max_retries * delay_ms);
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(gasket_wait_with_reschedule);
+
+/* See gasket_core.h for description. */
+int gasket_register_device(const struct gasket_driver_desc *driver_desc)
+{
+ int i, ret;
+ int desc_idx = -1;
+ struct gasket_internal_desc *internal;
+
+ pr_debug("Loading %s driver version %s\n", driver_desc->name,
+ driver_desc->driver_version);
+ /* Check for duplicates and find a free slot. */
+ mutex_lock(&g_mutex);
+
+ for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
+ if (g_descs[i].driver_desc == driver_desc) {
+ pr_err("%s driver already loaded/registered\n",
+ driver_desc->name);
+ mutex_unlock(&g_mutex);
+ return -EBUSY;
+ }
+ }
+
+ /* This and the above loop could be combined, but this reads easier. */
+ for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
+ if (!g_descs[i].driver_desc) {
+ g_descs[i].driver_desc = driver_desc;
+ desc_idx = i;
+ break;
+ }
+ }
+ mutex_unlock(&g_mutex);
+
+ if (desc_idx == -1) {
+ pr_err("too many drivers loaded, max %d\n",
+ GASKET_FRAMEWORK_DESC_MAX);
+ return -EBUSY;
+ }
+
+ internal = &g_descs[desc_idx];
+ mutex_init(&internal->mutex);
+ memset(internal->devs, 0, sizeof(struct gasket_dev *) * GASKET_DEV_MAX);
+ internal->class =
+ class_create(driver_desc->module, driver_desc->name);
+
+ if (IS_ERR(internal->class)) {
+ pr_err("Cannot register %s class [ret=%ld]\n",
+ driver_desc->name, PTR_ERR(internal->class));
+ ret = PTR_ERR(internal->class);
+ goto unregister_gasket_driver;
+ }
+
+ ret = register_chrdev_region(MKDEV(driver_desc->major,
+ driver_desc->minor), GASKET_DEV_MAX,
+ driver_desc->name);
+ if (ret) {
+ pr_err("cannot register %s char driver [ret=%d]\n",
+ driver_desc->name, ret);
+ goto destroy_class;
+ }
+
+ return 0;
+
+destroy_class:
+ class_destroy(internal->class);
+
+unregister_gasket_driver:
+ mutex_lock(&g_mutex);
+ g_descs[desc_idx].driver_desc = NULL;
+ mutex_unlock(&g_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(gasket_register_device);
+
+/* See gasket_core.h for description. */
+void gasket_unregister_device(const struct gasket_driver_desc *driver_desc)
+{
+ int i, desc_idx;
+ struct gasket_internal_desc *internal_desc = NULL;
+
+ mutex_lock(&g_mutex);
+ for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
+ if (g_descs[i].driver_desc == driver_desc) {
+ internal_desc = &g_descs[i];
+ desc_idx = i;
+ break;
+ }
+ }
+
+ if (!internal_desc) {
+ mutex_unlock(&g_mutex);
+ pr_err("request to unregister unknown desc: %s, %d:%d\n",
+ driver_desc->name, driver_desc->major,
+ driver_desc->minor);
+ return;
+ }
+
+ unregister_chrdev_region(MKDEV(driver_desc->major, driver_desc->minor),
+ GASKET_DEV_MAX);
+
+ class_destroy(internal_desc->class);
+
+ /* Finally, effectively "remove" the driver. */
+ g_descs[desc_idx].driver_desc = NULL;
+ mutex_unlock(&g_mutex);
+
+ pr_debug("removed %s driver\n", driver_desc->name);
+}
+EXPORT_SYMBOL(gasket_unregister_device);
+
+static int __init gasket_init(void)
+{
+ int i;
+
+ pr_debug("%s\n", __func__);
+ mutex_lock(&g_mutex);
+ for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
+ g_descs[i].driver_desc = NULL;
+ mutex_init(&g_descs[i].mutex);
+ }
+
+ gasket_sysfs_init();
+
+ mutex_unlock(&g_mutex);
+ return 0;
+}
+
+static void __exit gasket_exit(void)
+{
+ pr_debug("%s\n", __func__);
+}
+MODULE_DESCRIPTION("Google Gasket driver framework");
+MODULE_VERSION(GASKET_FRAMEWORK_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rob Springer <rspringer@google.com>");
+module_init(gasket_init);
+module_exit(gasket_exit);
diff --git a/drivers/staging/gasket/gasket_core.h b/drivers/staging/gasket/gasket_core.h
new file mode 100644
index 000000000000..275fd0b345b6
--- /dev/null
+++ b/drivers/staging/gasket/gasket_core.h
@@ -0,0 +1,649 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Gasket generic driver. Defines the set of data types and functions necessary
+ * to define a driver using the Gasket generic driver framework.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+#ifndef __GASKET_CORE_H__
+#define __GASKET_CORE_H__
+
+#include <linux/cdev.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "gasket_constants.h"
+
+/**
+ * struct gasket_num_name - Map numbers to names.
+ * @ein_num: Number.
+ * @ein_name: Name associated with the number, a char pointer.
+ *
+ * This structure maps numbers to names. It is used to provide printable enum
+ * names, e.g {0, "DEAD"} or {1, "ALIVE"}.
+ */
+struct gasket_num_name {
+ uint snn_num;
+ const char *snn_name;
+};
+
+/*
+ * Register location for packed interrupts.
+ * Each value indicates the location of an interrupt field (in units of
+ * gasket_driver_desc->interrupt_pack_width) within the containing register.
+ * In other words, this indicates the shift to use when creating a mask to
+ * extract/set bits within a register for a given interrupt.
+ */
+enum gasket_interrupt_packing {
+ PACK_0 = 0,
+ PACK_1 = 1,
+ PACK_2 = 2,
+ PACK_3 = 3,
+ UNPACKED = 4,
+};
+
+/* Type of the interrupt supported by the device. */
+enum gasket_interrupt_type {
+ PCI_MSIX = 0,
+ PCI_MSI = 1,
+ PLATFORM_WIRE = 2,
+};
+
+/*
+ * Used to describe a Gasket interrupt. Contains an interrupt index, a register,
+ * and packing data for that interrupt. The register and packing data
+ * fields are relevant only for PCI_MSIX interrupt type and can be
+ * set to 0 for everything else.
+ */
+struct gasket_interrupt_desc {
+ /* Device-wide interrupt index/number. */
+ int index;
+ /* The register offset controlling this interrupt. */
+ u64 reg;
+ /* The location of this interrupt inside register reg, if packed. */
+ int packing;
+};
+
+/* Offsets to the wire interrupt handling registers */
+struct gasket_wire_interrupt_offsets {
+ u64 pending_bit_array;
+ u64 mask_array;
+};
+
+/*
+ * This enum is used to identify memory regions being part of the physical
+ * memory that belongs to a device.
+ */
+enum mappable_area_type {
+ PCI_BAR = 0, /* Default */
+ BUS_REGION, /* For SYSBUS devices, i.e. AXI etc... */
+ COHERENT_MEMORY
+};
+
+/*
+ * Metadata for each BAR mapping.
+ * This struct is used so as to track PCI memory, I/O space, AXI and coherent
+ * memory area... i.e. memory objects which can be referenced in the device's
+ * mmap function.
+ */
+struct gasket_bar_data {
+ /* Virtual base address. */
+ u8 __iomem *virt_base;
+
+ /* Physical base address. */
+ ulong phys_base;
+
+ /* Length of the mapping. */
+ ulong length_bytes;
+
+ /* Type of mappable area */
+ enum mappable_area_type type;
+};
+
+/* Maintains device open ownership data. */
+struct gasket_ownership {
+ /* 1 if the device is owned, 0 otherwise. */
+ int is_owned;
+
+ /* TGID of the owner. */
+ pid_t owner;
+
+ /* Count of current device opens in write mode. */
+ int write_open_count;
+};
+
+/* Page table modes of operation. */
+enum gasket_page_table_mode {
+ /* The page table is partitionable as normal, all simple by default. */
+ GASKET_PAGE_TABLE_MODE_NORMAL,
+
+ /* All entries are always simple. */
+ GASKET_PAGE_TABLE_MODE_SIMPLE,
+
+ /* All entries are always extended. No extended bit is used. */
+ GASKET_PAGE_TABLE_MODE_EXTENDED,
+};
+
+/* Page table configuration. One per table. */
+struct gasket_page_table_config {
+ /* The identifier/index of this page table. */
+ int id;
+
+ /* The operation mode of this page table. */
+ enum gasket_page_table_mode mode;
+
+ /* Total (first-level) entries in this page table. */
+ ulong total_entries;
+
+ /* Base register for the page table. */
+ int base_reg;
+
+ /*
+ * Register containing the extended page table. This value is unused in
+ * GASKET_PAGE_TABLE_MODE_SIMPLE and GASKET_PAGE_TABLE_MODE_EXTENDED
+ * modes.
+ */
+ int extended_reg;
+
+ /* The bit index indicating whether a PT entry is extended. */
+ int extended_bit;
+};
+
+/* Maintains information about a device node. */
+struct gasket_cdev_info {
+ /* The internal name of this device. */
+ char name[GASKET_NAME_MAX];
+
+ /* Device number. */
+ dev_t devt;
+
+ /* Kernel-internal device structure. */
+ struct device *device;
+
+ /* Character device for real. */
+ struct cdev cdev;
+
+ /* Flag indicating if cdev_add has been called for the devices. */
+ int cdev_added;
+
+ /* Pointer to the overall gasket_dev struct for this device. */
+ struct gasket_dev *gasket_dev_ptr;
+
+ /* Ownership data for the device in question. */
+ struct gasket_ownership ownership;
+};
+
+/* Describes the offset and length of mmapable device BAR regions. */
+struct gasket_mappable_region {
+ u64 start;
+ u64 length_bytes;
+};
+
+/* Describe the offset, size, and permissions for a device bar. */
+struct gasket_bar_desc {
+ /*
+ * The size of each PCI BAR range, in bytes. If a value is 0, that BAR
+ * will not be mapped into kernel space at all.
+ * For devices with 64 bit BARs, only elements 0, 2, and 4 should be
+ * populated, and 1, 3, and 5 should be set to 0.
+ * For example, for a device mapping 1M in each of the first two 64-bit
+ * BARs, this field would be set as { 0x100000, 0, 0x100000, 0, 0, 0 }
+ * (one number per bar_desc struct.)
+ */
+ u64 size;
+ /* The permissions for this bar. (Should be VM_WRITE/VM_READ/VM_EXEC,
+ * and can be or'd.) If set to GASKET_NOMAP, the bar will
+ * not be used for mmapping.
+ */
+ ulong permissions;
+ /* The memory address corresponding to the base of this bar, if used. */
+ u64 base;
+ /* The number of mappable regions in this bar. */
+ int num_mappable_regions;
+
+ /* The mappable subregions of this bar. */
+ const struct gasket_mappable_region *mappable_regions;
+
+ /* Type of mappable area */
+ enum mappable_area_type type;
+};
+
+/* Describes the offset, size, and permissions for a coherent buffer. */
+struct gasket_coherent_buffer_desc {
+ /* The size of the coherent buffer. */
+ u64 size;
+
+ /* The permissions for this bar. (Should be VM_WRITE/VM_READ/VM_EXEC,
+ * and can be or'd.) If set to GASKET_NOMAP, the bar will
+ * not be used for mmaping.
+ */
+ ulong permissions;
+
+ /* device side address. */
+ u64 base;
+};
+
+/* Coherent buffer structure. */
+struct gasket_coherent_buffer {
+ /* Virtual base address. */
+ u8 __iomem *virt_base;
+
+ /* Physical base address. */
+ ulong phys_base;
+
+ /* Length of the mapping. */
+ ulong length_bytes;
+};
+
+/* Description of Gasket-specific permissions in the mmap field. */
+enum gasket_mapping_options { GASKET_NOMAP = 0 };
+
+/* This struct represents an undefined bar that should never be mapped. */
+#define GASKET_UNUSED_BAR \
+ { \
+ 0, GASKET_NOMAP, 0, 0, NULL, 0 \
+ }
+
+/* Internal data for a Gasket device. See gasket_core.c for more information. */
+struct gasket_internal_desc;
+
+#define MAX_NUM_COHERENT_PAGES 16
+
+/*
+ * Device data for Gasket device instances.
+ *
+ * This structure contains the data required to manage a Gasket device.
+ */
+struct gasket_dev {
+ /* Pointer to the internal driver description for this device. */
+ struct gasket_internal_desc *internal_desc;
+
+ /* Device info */
+ struct device *dev;
+
+ /* PCI subsystem metadata. */
+ struct pci_dev *pci_dev;
+
+ /* This device's index into internal_desc->devs. */
+ int dev_idx;
+
+ /* The name of this device, as reported by the kernel. */
+ char kobj_name[GASKET_NAME_MAX];
+
+ /* Virtual address of mapped BAR memory range. */
+ struct gasket_bar_data bar_data[GASKET_NUM_BARS];
+
+ /* Coherent buffer. */
+ struct gasket_coherent_buffer coherent_buffer;
+
+ /* Number of page tables for this device. */
+ int num_page_tables;
+
+ /* Address translations. Page tables have a private implementation. */
+ struct gasket_page_table *page_table[GASKET_MAX_NUM_PAGE_TABLES];
+
+ /* Interrupt data for this device. */
+ struct gasket_interrupt_data *interrupt_data;
+
+ /* Status for this device - GASKET_STATUS_ALIVE or _DEAD. */
+ uint status;
+
+ /* Number of times this device has been reset. */
+ uint reset_count;
+
+ /* Dev information for the cdev node. */
+ struct gasket_cdev_info dev_info;
+
+ /* Hardware revision value for this device. */
+ int hardware_revision;
+
+ /* Protects access to per-device data (i.e. this structure). */
+ struct mutex mutex;
+
+ /* cdev hash tracking/membership structure, Accel and legacy. */
+ /* Unused until Accel is upstreamed. */
+ struct hlist_node hlist_node;
+ struct hlist_node legacy_hlist_node;
+};
+
+/* Type of the ioctl handler callback. */
+typedef long (*gasket_ioctl_handler_cb_t)(struct file *file, uint cmd,
+ void __user *argp);
+/* Type of the ioctl permissions check callback. See below. */
+typedef int (*gasket_ioctl_permissions_cb_t)(struct file *filp, uint cmd,
+ void __user *argp);
+
+/*
+ * Device type descriptor.
+ *
+ * This structure contains device-specific data needed to identify and address a
+ * type of device to be administered via the Gasket generic driver.
+ *
+ * Device IDs are per-driver. In other words, two drivers using the Gasket
+ * framework will each have a distinct device 0 (for example).
+ */
+struct gasket_driver_desc {
+ /* The name of this device type. */
+ const char *name;
+
+ /* The name of this specific device model. */
+ const char *chip_model;
+
+ /* The version of the chip specified in chip_model. */
+ const char *chip_version;
+
+ /* The version of this driver: "1.0.0", "2.1.3", etc. */
+ const char *driver_version;
+
+ /*
+ * Non-zero if we should create "legacy" (device and device-class-
+ * specific) character devices and sysfs nodes.
+ */
+ /* Unused until Accel is upstreamed. */
+ int legacy_support;
+
+ /* Major and minor numbers identifying the device. */
+ int major, minor;
+
+ /* Module structure for this driver. */
+ struct module *module;
+
+ /* PCI ID table. */
+ const struct pci_device_id *pci_id_table;
+
+ /* The number of page tables handled by this driver. */
+ int num_page_tables;
+
+ /* The index of the bar containing the page tables. */
+ int page_table_bar_index;
+
+ /* Registers used to control each page table. */
+ const struct gasket_page_table_config *page_table_configs;
+
+ /* The bit index indicating whether a PT entry is extended. */
+ int page_table_extended_bit;
+
+ /*
+ * Legacy mmap address adjusment for legacy devices only. Should be 0
+ * for any new device.
+ */
+ ulong legacy_mmap_address_offset;
+
+ /* Set of 6 bar descriptions that describe all PCIe bars.
+ * Note that BUS/AXI devices (i.e. non PCI devices) use those.
+ */
+ struct gasket_bar_desc bar_descriptions[GASKET_NUM_BARS];
+
+ /*
+ * Coherent buffer description.
+ */
+ struct gasket_coherent_buffer_desc coherent_buffer_description;
+
+ /* Offset of wire interrupt registers. */
+ const struct gasket_wire_interrupt_offsets *wire_interrupt_offsets;
+
+ /* Interrupt type. (One of gasket_interrupt_type). */
+ int interrupt_type;
+
+ /* Index of the bar containing the interrupt registers to program. */
+ int interrupt_bar_index;
+
+ /* Number of interrupts in the gasket_interrupt_desc array */
+ int num_interrupts;
+
+ /* Description of the interrupts for this device. */
+ const struct gasket_interrupt_desc *interrupts;
+
+ /*
+ * If this device packs multiple interrupt->MSI-X mappings into a
+ * single register (i.e., "uses packed interrupts"), only a single bit
+ * width is supported for each interrupt mapping (unpacked/"full-width"
+ * interrupts are always supported). This value specifies that width. If
+ * packed interrupts are not used, this value is ignored.
+ */
+ int interrupt_pack_width;
+
+ /* Driver callback functions - all may be NULL */
+ /*
+ * device_open_cb: Callback for when a device node is opened in write
+ * mode.
+ * @dev: The gasket_dev struct for this driver instance.
+ *
+ * This callback should perform device-specific setup that needs to
+ * occur only once when a device is first opened.
+ */
+ int (*device_open_cb)(struct gasket_dev *dev);
+
+ /*
+ * device_release_cb: Callback when a device is closed.
+ * @gasket_dev: The gasket_dev struct for this driver instance.
+ *
+ * This callback is called whenever a device node fd is closed, as
+ * opposed to device_close_cb, which is called when the _last_
+ * descriptor for an open file is closed. This call is intended to
+ * handle any per-user or per-fd cleanup.
+ */
+ int (*device_release_cb)(struct gasket_dev *gasket_dev,
+ struct file *file);
+
+ /*
+ * device_close_cb: Callback for when a device node is closed for the
+ * last time.
+ * @dev: The gasket_dev struct for this driver instance.
+ *
+ * This callback should perform device-specific cleanup that only
+ * needs to occur when the last reference to a device node is closed.
+ *
+ * This call is intended to handle and device-wide cleanup, as opposed
+ * to per-fd cleanup (which should be handled by device_release_cb).
+ */
+ int (*device_close_cb)(struct gasket_dev *dev);
+
+ /*
+ * get_mappable_regions_cb: Get descriptors of mappable device memory.
+ * @gasket_dev: Pointer to the struct gasket_dev for this device.
+ * @bar_index: BAR for which to retrieve memory ranges.
+ * @mappable_regions: Out-pointer to the list of mappable regions on the
+ * device/BAR for this process.
+ * @num_mappable_regions: Out-pointer for the size of mappable_regions.
+ *
+ * Called when handling mmap(), this callback is used to determine which
+ * regions of device memory may be mapped by the current process. This
+ * information is then compared to mmap request to determine which
+ * regions to actually map.
+ */
+ int (*get_mappable_regions_cb)(struct gasket_dev *gasket_dev,
+ int bar_index,
+ struct gasket_mappable_region **mappable_regions,
+ int *num_mappable_regions);
+
+ /*
+ * ioctl_permissions_cb: Check permissions for generic ioctls.
+ * @filp: File structure pointer describing this node usage session.
+ * @cmd: ioctl number to handle.
+ * @arg: ioctl-specific data pointer.
+ *
+ * Returns 1 if the ioctl may be executed, 0 otherwise. If this callback
+ * isn't specified a default routine will be used, that only allows the
+ * original device opener (i.e, the "owner") to execute state-affecting
+ * ioctls.
+ */
+ gasket_ioctl_permissions_cb_t ioctl_permissions_cb;
+
+ /*
+ * ioctl_handler_cb: Callback to handle device-specific ioctls.
+ * @filp: File structure pointer describing this node usage session.
+ * @cmd: ioctl number to handle.
+ * @arg: ioctl-specific data pointer.
+ *
+ * Invoked whenever an ioctl is called that the generic Gasket
+ * framework doesn't support. If no cb is registered, unknown ioctls
+ * return -EINVAL. Should return an error status (either -EINVAL or
+ * the error result of the ioctl being handled).
+ */
+ gasket_ioctl_handler_cb_t ioctl_handler_cb;
+
+ /*
+ * device_status_cb: Callback to determine device health.
+ * @dev: Pointer to the gasket_dev struct for this device.
+ *
+ * Called to determine if the device is healthy or not. Should return
+ * a member of the gasket_status_type enum.
+ *
+ */
+ int (*device_status_cb)(struct gasket_dev *dev);
+
+ /*
+ * hardware_revision_cb: Get the device's hardware revision.
+ * @dev: Pointer to the gasket_dev struct for this device.
+ *
+ * Called to determine the reported rev of the physical hardware.
+ * Revision should be >0. A negative return value is an error.
+ */
+ int (*hardware_revision_cb)(struct gasket_dev *dev);
+
+ /*
+ * device_reset_cb: Reset the hardware in question.
+ * @dev: Pointer to the gasket_dev structure for this device.
+ *
+ * Called by reset ioctls. This function should not
+ * lock the gasket_dev mutex. It should return 0 on success
+ * and an error on failure.
+ */
+ int (*device_reset_cb)(struct gasket_dev *dev);
+};
+
+/*
+ * Register the specified device type with the framework.
+ * @desc: Populated/initialized device type descriptor.
+ *
+ * This function does _not_ take ownership of desc; the underlying struct must
+ * exist until the matching call to gasket_unregister_device.
+ * This function should be called from your driver's module_init function.
+ */
+int gasket_register_device(const struct gasket_driver_desc *desc);
+
+/*
+ * Remove the specified device type from the framework.
+ * @desc: Descriptor for the device type to unregister; it should have been
+ * passed to gasket_register_device in a previous call.
+ *
+ * This function should be called from your driver's module_exit function.
+ */
+void gasket_unregister_device(const struct gasket_driver_desc *desc);
+
+/* Add a PCI gasket device. */
+int gasket_pci_add_device(struct pci_dev *pci_dev,
+ struct gasket_dev **gasket_devp);
+/* Remove a PCI gasket device. */
+void gasket_pci_remove_device(struct pci_dev *pci_dev);
+
+/* Enable a Gasket device. */
+int gasket_enable_device(struct gasket_dev *gasket_dev);
+
+/* Disable a Gasket device. */
+void gasket_disable_device(struct gasket_dev *gasket_dev);
+
+/*
+ * Reset the Gasket device.
+ * @gasket_dev: Gasket device struct.
+ *
+ * Calls device_reset_cb. Returns 0 on success and an error code othewrise.
+ * gasket_reset_nolock will not lock the mutex, gasket_reset will.
+ *
+ */
+int gasket_reset(struct gasket_dev *gasket_dev);
+int gasket_reset_nolock(struct gasket_dev *gasket_dev);
+
+/*
+ * Memory management functions. These will likely be spun off into their own
+ * file in the future.
+ */
+
+/* Unmaps the specified mappable region from a VMA. */
+int gasket_mm_unmap_region(const struct gasket_dev *gasket_dev,
+ struct vm_area_struct *vma,
+ const struct gasket_mappable_region *map_region);
+
+/*
+ * Get the ioctl permissions callback.
+ * @gasket_dev: Gasket device structure.
+ */
+gasket_ioctl_permissions_cb_t
+gasket_get_ioctl_permissions_cb(struct gasket_dev *gasket_dev);
+
+/**
+ * Lookup a name by number in a num_name table.
+ * @num: Number to lookup.
+ * @table: Array of num_name structures, the table for the lookup.
+ *
+ */
+const char *gasket_num_name_lookup(uint num,
+ const struct gasket_num_name *table);
+
+/* Handy inlines */
+static inline ulong gasket_dev_read_64(struct gasket_dev *gasket_dev, int bar,
+ ulong location)
+{
+ return readq(&gasket_dev->bar_data[bar].virt_base[location]);
+}
+
+static inline void gasket_dev_write_64(struct gasket_dev *dev, u64 value,
+ int bar, ulong location)
+{
+ writeq(value, &dev->bar_data[bar].virt_base[location]);
+}
+
+static inline void gasket_dev_write_32(struct gasket_dev *dev, u32 value,
+ int bar, ulong location)
+{
+ writel(value, &dev->bar_data[bar].virt_base[location]);
+}
+
+static inline u32 gasket_dev_read_32(struct gasket_dev *dev, int bar,
+ ulong location)
+{
+ return readl(&dev->bar_data[bar].virt_base[location]);
+}
+
+static inline void gasket_read_modify_write_64(struct gasket_dev *dev, int bar,
+ ulong location, u64 value,
+ u64 mask_width, u64 mask_shift)
+{
+ u64 mask, tmp;
+
+ tmp = gasket_dev_read_64(dev, bar, location);
+ mask = ((1ULL << mask_width) - 1) << mask_shift;
+ tmp = (tmp & ~mask) | (value << mask_shift);
+ gasket_dev_write_64(dev, tmp, bar, location);
+}
+
+static inline void gasket_read_modify_write_32(struct gasket_dev *dev, int bar,
+ ulong location, u32 value,
+ u32 mask_width, u32 mask_shift)
+{
+ u32 mask, tmp;
+
+ tmp = gasket_dev_read_32(dev, bar, location);
+ mask = ((1 << mask_width) - 1) << mask_shift;
+ tmp = (tmp & ~mask) | (value << mask_shift);
+ gasket_dev_write_32(dev, tmp, bar, location);
+}
+
+/* Get the Gasket driver structure for a given device. */
+const struct gasket_driver_desc *gasket_get_driver_desc(struct gasket_dev *dev);
+
+/* Get the device structure for a given device. */
+struct device *gasket_get_device(struct gasket_dev *dev);
+
+/* Helper function, Asynchronous waits on a given set of bits. */
+int gasket_wait_with_reschedule(struct gasket_dev *gasket_dev, int bar,
+ u64 offset, u64 mask, u64 val,
+ uint max_retries, u64 delay_ms);
+
+#endif /* __GASKET_CORE_H__ */
diff --git a/drivers/staging/gasket/gasket_interrupt.c b/drivers/staging/gasket/gasket_interrupt.c
new file mode 100644
index 000000000000..1cfbc120f228
--- /dev/null
+++ b/drivers/staging/gasket/gasket_interrupt.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018 Google, Inc. */
+
+#include "gasket_interrupt.h"
+
+#include "gasket_constants.h"
+#include "gasket_core.h"
+#include "gasket_sysfs.h"
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/printk.h>
+#include <linux/version.h>
+#ifdef GASKET_KERNEL_TRACE_SUPPORT
+#define CREATE_TRACE_POINTS
+#include <trace/events/gasket_interrupt.h>
+#else
+#define trace_gasket_interrupt_event(x, ...)
+#endif
+/* Retry attempts if the requested number of interrupts aren't available. */
+#define MSIX_RETRY_COUNT 3
+
+/* Instance interrupt management data. */
+struct gasket_interrupt_data {
+ /* The name associated with this interrupt data. */
+ const char *name;
+
+ /* Interrupt type. See gasket_interrupt_type in gasket_core.h */
+ int type;
+
+ /* The PCI device [if any] associated with the owning device. */
+ struct pci_dev *pci_dev;
+
+ /* Set to 1 if MSI-X has successfully been configred, 0 otherwise. */
+ int msix_configured;
+
+ /* The number of interrupts requested by the owning device. */
+ int num_interrupts;
+
+ /* A pointer to the interrupt descriptor struct for this device. */
+ const struct gasket_interrupt_desc *interrupts;
+
+ /* The index of the bar into which interrupts should be mapped. */
+ int interrupt_bar_index;
+
+ /* The width of a single interrupt in a packed interrupt register. */
+ int pack_width;
+
+ /* offset of wire interrupt registers */
+ const struct gasket_wire_interrupt_offsets *wire_interrupt_offsets;
+
+ /*
+ * Design-wise, these elements should be bundled together, but
+ * pci_enable_msix's interface requires that they be managed
+ * individually (requires array of struct msix_entry).
+ */
+
+ /* The number of successfully configured interrupts. */
+ int num_configured;
+
+ /* The MSI-X data for each requested/configured interrupt. */
+ struct msix_entry *msix_entries;
+
+ /* The eventfd "callback" data for each interrupt. */
+ struct eventfd_ctx **eventfd_ctxs;
+
+ /* The number of times each interrupt has been called. */
+ ulong *interrupt_counts;
+
+ /* Linux IRQ number. */
+ int irq;
+};
+
+/* Structures to display interrupt counts in sysfs. */
+enum interrupt_sysfs_attribute_type {
+ ATTR_INTERRUPT_COUNTS,
+};
+
+/* Set up device registers for interrupt handling. */
+static void gasket_interrupt_setup(struct gasket_dev *gasket_dev)
+{
+ int i;
+ int pack_shift;
+ ulong mask;
+ ulong value;
+ struct gasket_interrupt_data *interrupt_data =
+ gasket_dev->interrupt_data;
+
+ if (!interrupt_data) {
+ dev_dbg(gasket_dev->dev, "Interrupt data is not initialized\n");
+ return;
+ }
+
+ dev_dbg(gasket_dev->dev, "Running interrupt setup\n");
+
+ if (interrupt_data->type == PLATFORM_WIRE ||
+ interrupt_data->type == PCI_MSI) {
+ /* Nothing needs to be done for platform or PCI devices. */
+ return;
+ }
+
+ if (interrupt_data->type != PCI_MSIX) {
+ dev_dbg(gasket_dev->dev,
+ "Cannot handle unsupported interrupt type %d\n",
+ interrupt_data->type);
+ return;
+ }
+
+ /* Setup the MSIX table. */
+
+ for (i = 0; i < interrupt_data->num_interrupts; i++) {
+ /*
+ * If the interrupt is not packed, we can write the index into
+ * the register directly. If not, we need to deal with a read-
+ * modify-write and shift based on the packing index.
+ */
+ dev_dbg(gasket_dev->dev,
+ "Setting up interrupt index %d with index 0x%llx and "
+ "packing %d\n",
+ interrupt_data->interrupts[i].index,
+ interrupt_data->interrupts[i].reg,
+ interrupt_data->interrupts[i].packing);
+ if (interrupt_data->interrupts[i].packing == UNPACKED) {
+ value = interrupt_data->interrupts[i].index;
+ } else {
+ switch (interrupt_data->interrupts[i].packing) {
+ case PACK_0:
+ pack_shift = 0;
+ break;
+ case PACK_1:
+ pack_shift = interrupt_data->pack_width;
+ break;
+ case PACK_2:
+ pack_shift = 2 * interrupt_data->pack_width;
+ break;
+ case PACK_3:
+ pack_shift = 3 * interrupt_data->pack_width;
+ break;
+ default:
+ dev_dbg(gasket_dev->dev,
+ "Found interrupt description with "
+ "unknown enum %d\n",
+ interrupt_data->interrupts[i].packing);
+ return;
+ }
+
+ mask = ~(0xFFFF << pack_shift);
+ value = gasket_dev_read_64(gasket_dev,
+ interrupt_data->interrupt_bar_index,
+ interrupt_data->interrupts[i].reg);
+ value &= mask;
+ value |= interrupt_data->interrupts[i].index
+ << pack_shift;
+ }
+ gasket_dev_write_64(gasket_dev, value,
+ interrupt_data->interrupt_bar_index,
+ interrupt_data->interrupts[i].reg);
+ }
+}
+
+static irqreturn_t gasket_msix_interrupt_handler(int irq, void *dev_id)
+{
+ struct eventfd_ctx *ctx;
+ struct gasket_interrupt_data *interrupt_data = dev_id;
+ int interrupt = -1;
+ int i;
+
+ /* If this linear lookup is a problem, we can maintain a map/hash. */
+ for (i = 0; i < interrupt_data->num_interrupts; i++) {
+ if (interrupt_data->msix_entries[i].vector == irq) {
+ interrupt = interrupt_data->msix_entries[i].entry;
+ break;
+ }
+ }
+ if (interrupt == -1) {
+ pr_err("Received unknown irq %d\n", irq);
+ return IRQ_HANDLED;
+ }
+ trace_gasket_interrupt_event(interrupt_data->name, interrupt);
+
+ ctx = interrupt_data->eventfd_ctxs[interrupt];
+ if (ctx)
+ eventfd_signal(ctx, 1);
+
+ ++(interrupt_data->interrupt_counts[interrupt]);
+
+ return IRQ_HANDLED;
+}
+
+static int
+gasket_interrupt_msix_init(struct gasket_interrupt_data *interrupt_data)
+{
+ int ret = 1;
+ int i;
+
+ for (i = 0; i < interrupt_data->num_interrupts; i++) {
+ interrupt_data->msix_entries[i].entry = i;
+ interrupt_data->msix_entries[i].vector = 0;
+ interrupt_data->eventfd_ctxs[i] = NULL;
+ }
+
+ /* Retry MSIX_RETRY_COUNT times if not enough IRQs are available. */
+ for (i = 0; i < MSIX_RETRY_COUNT && ret > 0; i++)
+ ret = pci_enable_msix_exact(interrupt_data->pci_dev,
+ interrupt_data->msix_entries,
+ interrupt_data->num_interrupts);
+
+ if (ret)
+ return ret > 0 ? -EBUSY : ret;
+ interrupt_data->msix_configured = 1;
+
+ for (i = 0; i < interrupt_data->num_interrupts; i++) {
+ ret = request_irq(interrupt_data->msix_entries[i].vector,
+ gasket_msix_interrupt_handler, 0,
+ interrupt_data->name, interrupt_data);
+
+ if (ret) {
+ dev_err(&interrupt_data->pci_dev->dev,
+ "Cannot get IRQ for interrupt %d, vector %d; "
+ "%d\n",
+ i, interrupt_data->msix_entries[i].vector, ret);
+ return ret;
+ }
+
+ interrupt_data->num_configured++;
+ }
+
+ return 0;
+}
+
+/*
+ * On QCM DragonBoard, we exit gasket_interrupt_msix_init() and kernel interrupt
+ * setup code with MSIX vectors masked. This is wrong because nothing else in
+ * the driver will normally touch the MSIX vectors.
+ *
+ * As a temporary hack, force unmasking there.
+ *
+ * TODO: Figure out why QCM kernel doesn't unmask the MSIX vectors, after
+ * gasket_interrupt_msix_init(), and remove this code.
+ */
+static void force_msix_interrupt_unmasking(struct gasket_dev *gasket_dev)
+{
+ int i;
+#define MSIX_VECTOR_SIZE 16
+#define MSIX_MASK_BIT_OFFSET 12
+#define APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE 0x46800
+ for (i = 0; i < gasket_dev->interrupt_data->num_configured; i++) {
+ /* Check if the MSIX vector is unmasked */
+ ulong location = APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE +
+ MSIX_MASK_BIT_OFFSET + i * MSIX_VECTOR_SIZE;
+ u32 mask =
+ gasket_dev_read_32(gasket_dev,
+ gasket_dev->interrupt_data->interrupt_bar_index,
+ location);
+ if (!(mask & 1))
+ continue;
+ /* Unmask the msix vector (clear 32 bits) */
+ gasket_dev_write_32(gasket_dev, 0,
+ gasket_dev->interrupt_data->interrupt_bar_index,
+ location);
+ }
+#undef MSIX_VECTOR_SIZE
+#undef MSIX_MASK_BIT_OFFSET
+#undef APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE
+}
+
+static ssize_t interrupt_sysfs_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ int i, ret;
+ ssize_t written = 0, total_written = 0;
+ struct gasket_interrupt_data *interrupt_data;
+ struct gasket_dev *gasket_dev;
+ struct gasket_sysfs_attribute *gasket_attr;
+ enum interrupt_sysfs_attribute_type sysfs_type;
+
+ gasket_dev = gasket_sysfs_get_device_data(device);
+ if (!gasket_dev) {
+ dev_dbg(device, "No sysfs mapping found for device\n");
+ return 0;
+ }
+
+ gasket_attr = gasket_sysfs_get_attr(device, attr);
+ if (!gasket_attr) {
+ dev_dbg(device, "No sysfs attr data found for device\n");
+ gasket_sysfs_put_device_data(device, gasket_dev);
+ return 0;
+ }
+
+ sysfs_type = (enum interrupt_sysfs_attribute_type)
+ gasket_attr->data.attr_type;
+ interrupt_data = gasket_dev->interrupt_data;
+ switch (sysfs_type) {
+ case ATTR_INTERRUPT_COUNTS:
+ for (i = 0; i < interrupt_data->num_interrupts; ++i) {
+ written =
+ scnprintf(buf, PAGE_SIZE - total_written,
+ "0x%02x: %ld\n", i,
+ interrupt_data->interrupt_counts[i]);
+ total_written += written;
+ buf += written;
+ }
+ ret = total_written;
+ break;
+ default:
+ dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
+ attr->attr.name);
+ ret = 0;
+ break;
+ }
+
+ gasket_sysfs_put_attr(device, gasket_attr);
+ gasket_sysfs_put_device_data(device, gasket_dev);
+ return ret;
+}
+
+static struct gasket_sysfs_attribute interrupt_sysfs_attrs[] = {
+ GASKET_SYSFS_RO(interrupt_counts, interrupt_sysfs_show,
+ ATTR_INTERRUPT_COUNTS),
+ GASKET_END_OF_ATTR_ARRAY,
+};
+
+int gasket_interrupt_init(struct gasket_dev *gasket_dev, const char *name,
+ int type,
+ const struct gasket_interrupt_desc *interrupts,
+ int num_interrupts, int pack_width, int bar_index,
+ const struct gasket_wire_interrupt_offsets *wire_int_offsets)
+{
+ int ret;
+ struct gasket_interrupt_data *interrupt_data;
+
+ interrupt_data = kzalloc(sizeof(struct gasket_interrupt_data),
+ GFP_KERNEL);
+ if (!interrupt_data)
+ return -ENOMEM;
+ gasket_dev->interrupt_data = interrupt_data;
+ interrupt_data->name = name;
+ interrupt_data->type = type;
+ interrupt_data->pci_dev = gasket_dev->pci_dev;
+ interrupt_data->num_interrupts = num_interrupts;
+ interrupt_data->interrupts = interrupts;
+ interrupt_data->interrupt_bar_index = bar_index;
+ interrupt_data->pack_width = pack_width;
+ interrupt_data->num_configured = 0;
+ interrupt_data->wire_interrupt_offsets = wire_int_offsets;
+
+ /* Allocate all dynamic structures. */
+ interrupt_data->msix_entries = kcalloc(num_interrupts,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!interrupt_data->msix_entries) {
+ kfree(interrupt_data);
+ return -ENOMEM;
+ }
+
+ interrupt_data->eventfd_ctxs = kcalloc(num_interrupts,
+ sizeof(struct eventfd_ctx *),
+ GFP_KERNEL);
+ if (!interrupt_data->eventfd_ctxs) {
+ kfree(interrupt_data->msix_entries);
+ kfree(interrupt_data);
+ return -ENOMEM;
+ }
+
+ interrupt_data->interrupt_counts = kcalloc(num_interrupts,
+ sizeof(ulong),
+ GFP_KERNEL);
+ if (!interrupt_data->interrupt_counts) {
+ kfree(interrupt_data->eventfd_ctxs);
+ kfree(interrupt_data->msix_entries);
+ kfree(interrupt_data);
+ return -ENOMEM;
+ }
+
+ switch (interrupt_data->type) {
+ case PCI_MSIX:
+ ret = gasket_interrupt_msix_init(interrupt_data);
+ if (ret)
+ break;
+ force_msix_interrupt_unmasking(gasket_dev);
+ break;
+
+ case PCI_MSI:
+ case PLATFORM_WIRE:
+ default:
+ dev_err(gasket_dev->dev,
+ "Cannot handle unsupported interrupt type %d\n",
+ interrupt_data->type);
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ /* Failing to setup interrupts will cause the device to report
+ * GASKET_STATUS_LAMED. But it is not fatal.
+ */
+ dev_warn(gasket_dev->dev,
+ "Couldn't initialize interrupts: %d\n", ret);
+ return 0;
+ }
+
+ gasket_interrupt_setup(gasket_dev);
+ gasket_sysfs_create_entries(gasket_dev->dev_info.device,
+ interrupt_sysfs_attrs);
+
+ return 0;
+}
+
+static void
+gasket_interrupt_msix_cleanup(struct gasket_interrupt_data *interrupt_data)
+{
+ int i;
+
+ for (i = 0; i < interrupt_data->num_configured; i++)
+ free_irq(interrupt_data->msix_entries[i].vector,
+ interrupt_data);
+ interrupt_data->num_configured = 0;
+
+ if (interrupt_data->msix_configured)
+ pci_disable_msix(interrupt_data->pci_dev);
+ interrupt_data->msix_configured = 0;
+}
+
+int gasket_interrupt_reinit(struct gasket_dev *gasket_dev)
+{
+ int ret;
+
+ if (!gasket_dev->interrupt_data) {
+ dev_dbg(gasket_dev->dev,
+ "Attempted to reinit uninitialized interrupt data\n");
+ return -EINVAL;
+ }
+
+ switch (gasket_dev->interrupt_data->type) {
+ case PCI_MSIX:
+ gasket_interrupt_msix_cleanup(gasket_dev->interrupt_data);
+ ret = gasket_interrupt_msix_init(gasket_dev->interrupt_data);
+ if (ret)
+ break;
+ force_msix_interrupt_unmasking(gasket_dev);
+ break;
+
+ case PCI_MSI:
+ case PLATFORM_WIRE:
+ default:
+ dev_dbg(gasket_dev->dev,
+ "Cannot handle unsupported interrupt type %d\n",
+ gasket_dev->interrupt_data->type);
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ /* Failing to setup MSIx will cause the device
+ * to report GASKET_STATUS_LAMED, but is not fatal.
+ */
+ dev_warn(gasket_dev->dev, "Couldn't init msix: %d\n", ret);
+ return 0;
+ }
+
+ gasket_interrupt_setup(gasket_dev);
+
+ return 0;
+}
+
+/* See gasket_interrupt.h for description. */
+int gasket_interrupt_reset_counts(struct gasket_dev *gasket_dev)
+{
+ dev_dbg(gasket_dev->dev, "Clearing interrupt counts\n");
+ memset(gasket_dev->interrupt_data->interrupt_counts, 0,
+ gasket_dev->interrupt_data->num_interrupts *
+ sizeof(*gasket_dev->interrupt_data->interrupt_counts));
+ return 0;
+}
+
+/* See gasket_interrupt.h for description. */
+void gasket_interrupt_cleanup(struct gasket_dev *gasket_dev)
+{
+ struct gasket_interrupt_data *interrupt_data =
+ gasket_dev->interrupt_data;
+ /*
+ * It is possible to get an error code from gasket_interrupt_init
+ * before interrupt_data has been allocated, so check it.
+ */
+ if (!interrupt_data)
+ return;
+
+ switch (interrupt_data->type) {
+ case PCI_MSIX:
+ gasket_interrupt_msix_cleanup(interrupt_data);
+ break;
+
+ case PCI_MSI:
+ case PLATFORM_WIRE:
+ default:
+ dev_dbg(gasket_dev->dev,
+ "Cannot handle unsupported interrupt type %d\n",
+ interrupt_data->type);
+ }
+
+ kfree(interrupt_data->interrupt_counts);
+ kfree(interrupt_data->eventfd_ctxs);
+ kfree(interrupt_data->msix_entries);
+ kfree(interrupt_data);
+ gasket_dev->interrupt_data = NULL;
+}
+
+int gasket_interrupt_system_status(struct gasket_dev *gasket_dev)
+{
+ if (!gasket_dev->interrupt_data) {
+ dev_dbg(gasket_dev->dev, "Interrupt data is null\n");
+ return GASKET_STATUS_DEAD;
+ }
+
+ if (!gasket_dev->interrupt_data->msix_configured) {
+ dev_dbg(gasket_dev->dev, "Interrupt not initialized\n");
+ return GASKET_STATUS_LAMED;
+ }
+
+ if (gasket_dev->interrupt_data->num_configured !=
+ gasket_dev->interrupt_data->num_interrupts) {
+ dev_dbg(gasket_dev->dev,
+ "Not all interrupts were configured\n");
+ return GASKET_STATUS_LAMED;
+ }
+
+ return GASKET_STATUS_ALIVE;
+}
+
+int gasket_interrupt_set_eventfd(struct gasket_interrupt_data *interrupt_data,
+ int interrupt, int event_fd)
+{
+ struct eventfd_ctx *ctx = eventfd_ctx_fdget(event_fd);
+
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts)
+ return -EINVAL;
+
+ interrupt_data->eventfd_ctxs[interrupt] = ctx;
+ return 0;
+}
+
+int gasket_interrupt_clear_eventfd(struct gasket_interrupt_data *interrupt_data,
+ int interrupt)
+{
+ if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts)
+ return -EINVAL;
+
+ interrupt_data->eventfd_ctxs[interrupt] = NULL;
+ return 0;
+}
diff --git a/drivers/staging/gasket/gasket_interrupt.h b/drivers/staging/gasket/gasket_interrupt.h
new file mode 100644
index 000000000000..835af439e96a
--- /dev/null
+++ b/drivers/staging/gasket/gasket_interrupt.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Gasket common interrupt module. Defines functions for enabling
+ * eventfd-triggered interrupts between a Gasket device and a host process.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+#ifndef __GASKET_INTERRUPT_H__
+#define __GASKET_INTERRUPT_H__
+
+#include <linux/eventfd.h>
+#include <linux/pci.h>
+
+#include "gasket_core.h"
+
+/* Note that this currently assumes that device interrupts are a dense set,
+ * numbered from 0 - (num_interrupts - 1). Should this have to change, these
+ * APIs will have to be updated.
+ */
+
+/* Opaque type used to hold interrupt subsystem data. */
+struct gasket_interrupt_data;
+
+/*
+ * Initialize the interrupt module.
+ * @gasket_dev: The Gasket device structure for the device to be initted.
+ * @type: Type of the interrupt. (See gasket_interrupt_type).
+ * @name: The name to associate with these interrupts.
+ * @interrupts: An array of all interrupt descriptions for this device.
+ * @num_interrupts: The length of the @interrupts array.
+ * @pack_width: The width, in bits, of a single field in a packed interrupt reg.
+ * @bar_index: The bar containing all interrupt registers.
+ *
+ * Allocates and initializes data to track interrupt state for a device.
+ * After this call, no interrupts will be configured/delivered; call
+ * gasket_interrupt_set_vector[_packed] to associate each interrupt with an
+ * __iomem location, then gasket_interrupt_set_eventfd to associate an eventfd
+ * with an interrupt.
+ *
+ * If num_interrupts interrupts are not available, this call will return a
+ * negative error code. In that case, gasket_interrupt_cleanup should still be
+ * called. Returns 0 on success (which can include a device where interrupts
+ * are not possible to set up, but is otherwise OK; that device will report
+ * status LAMED.)
+ */
+int gasket_interrupt_init(struct gasket_dev *gasket_dev, const char *name,
+ int type,
+ const struct gasket_interrupt_desc *interrupts,
+ int num_interrupts, int pack_width, int bar_index,
+ const struct gasket_wire_interrupt_offsets *wire_int_offsets);
+
+/*
+ * Clean up a device's interrupt structure.
+ * @gasket_dev: The Gasket information structure for this device.
+ *
+ * Cleans up the device's interrupts and deallocates data.
+ */
+void gasket_interrupt_cleanup(struct gasket_dev *gasket_dev);
+
+/*
+ * Clean up and re-initialize the MSI-x subsystem.
+ * @gasket_dev: The Gasket information structure for this device.
+ *
+ * Performs a teardown of the MSI-x subsystem and re-initializes it. Does not
+ * free the underlying data structures. Returns 0 on success and an error code
+ * on error.
+ */
+int gasket_interrupt_reinit(struct gasket_dev *gasket_dev);
+
+/*
+ * Reset the counts stored in the interrupt subsystem.
+ * @gasket_dev: The Gasket information structure for this device.
+ *
+ * Sets the counts of all interrupts in the subsystem to 0.
+ */
+int gasket_interrupt_reset_counts(struct gasket_dev *gasket_dev);
+
+/*
+ * Associates an eventfd with a device interrupt.
+ * @data: Pointer to device interrupt data.
+ * @interrupt: The device interrupt to configure.
+ * @event_fd: The eventfd to associate with the interrupt.
+ *
+ * Prepares the host to receive notification of device interrupts by associating
+ * event_fd with interrupt. Upon receipt of a device interrupt, event_fd will be
+ * signaled, after successful configuration.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int gasket_interrupt_set_eventfd(struct gasket_interrupt_data *interrupt_data,
+ int interrupt, int event_fd);
+
+/*
+ * Removes an interrupt-eventfd association.
+ * @data: Pointer to device interrupt data.
+ * @interrupt: The device interrupt to de-associate.
+ *
+ * Removes any eventfd associated with the specified interrupt, if any.
+ */
+int gasket_interrupt_clear_eventfd(struct gasket_interrupt_data *interrupt_data,
+ int interrupt);
+
+/*
+ * The below functions exist for backwards compatibility.
+ * No new uses should be written.
+ */
+/*
+ * Get the health of the interrupt subsystem.
+ * @gasket_dev: The Gasket device struct.
+ *
+ * Returns DEAD if not set up, LAMED if initialization failed, and ALIVE
+ * otherwise.
+ */
+
+int gasket_interrupt_system_status(struct gasket_dev *gasket_dev);
+
+#endif
diff --git a/drivers/staging/gasket/gasket_ioctl.c b/drivers/staging/gasket/gasket_ioctl.c
new file mode 100644
index 000000000000..0ca48e688818
--- /dev/null
+++ b/drivers/staging/gasket/gasket_ioctl.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018 Google, Inc. */
+#include "gasket.h"
+#include "gasket_ioctl.h"
+#include "gasket_constants.h"
+#include "gasket_core.h"
+#include "gasket_interrupt.h"
+#include "gasket_page_table.h"
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#ifdef GASKET_KERNEL_TRACE_SUPPORT
+#define CREATE_TRACE_POINTS
+#include <trace/events/gasket_ioctl.h>
+#else
+#define trace_gasket_ioctl_entry(x, ...)
+#define trace_gasket_ioctl_exit(x)
+#define trace_gasket_ioctl_integer_data(x)
+#define trace_gasket_ioctl_eventfd_data(x, ...)
+#define trace_gasket_ioctl_page_table_data(x, ...)
+#define trace_gasket_ioctl_config_coherent_allocator(x, ...)
+#endif
+
+/* Associate an eventfd with an interrupt. */
+static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
+ struct gasket_interrupt_eventfd __user *argp)
+{
+ struct gasket_interrupt_eventfd die;
+
+ if (copy_from_user(&die, argp, sizeof(struct gasket_interrupt_eventfd)))
+ return -EFAULT;
+
+ trace_gasket_ioctl_eventfd_data(die.interrupt, die.event_fd);
+
+ return gasket_interrupt_set_eventfd(
+ gasket_dev->interrupt_data, die.interrupt, die.event_fd);
+}
+
+/* Read the size of the page table. */
+static int gasket_read_page_table_size(
+ struct gasket_dev *gasket_dev,
+ struct gasket_page_table_ioctl __user *argp)
+{
+ int ret = 0;
+ struct gasket_page_table_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
+ return -EFAULT;
+
+ if (ibuf.page_table_index >= gasket_dev->num_page_tables)
+ return -EFAULT;
+
+ ibuf.size = gasket_page_table_num_entries(
+ gasket_dev->page_table[ibuf.page_table_index]);
+
+ trace_gasket_ioctl_page_table_data(
+ ibuf.page_table_index, ibuf.size, ibuf.host_address,
+ ibuf.device_address);
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return ret;
+}
+
+/* Read the size of the simple page table. */
+static int gasket_read_simple_page_table_size(
+ struct gasket_dev *gasket_dev,
+ struct gasket_page_table_ioctl __user *argp)
+{
+ int ret = 0;
+ struct gasket_page_table_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
+ return -EFAULT;
+
+ if (ibuf.page_table_index >= gasket_dev->num_page_tables)
+ return -EFAULT;
+
+ ibuf.size =
+ gasket_page_table_num_simple_entries(gasket_dev->page_table[ibuf.page_table_index]);
+
+ trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
+ ibuf.host_address,
+ ibuf.device_address);
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return ret;
+}
+
+/* Set the boundary between the simple and extended page tables. */
+static int gasket_partition_page_table(
+ struct gasket_dev *gasket_dev,
+ struct gasket_page_table_ioctl __user *argp)
+{
+ int ret;
+ struct gasket_page_table_ioctl ibuf;
+ uint max_page_table_size;
+
+ if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
+ return -EFAULT;
+
+ trace_gasket_ioctl_page_table_data(
+ ibuf.page_table_index, ibuf.size, ibuf.host_address,
+ ibuf.device_address);
+
+ if (ibuf.page_table_index >= gasket_dev->num_page_tables)
+ return -EFAULT;
+ max_page_table_size = gasket_page_table_max_size(
+ gasket_dev->page_table[ibuf.page_table_index]);
+
+ if (ibuf.size > max_page_table_size) {
+ dev_dbg(gasket_dev->dev,
+ "Partition request 0x%llx too large, max is 0x%x\n",
+ ibuf.size, max_page_table_size);
+ return -EINVAL;
+ }
+
+ mutex_lock(&gasket_dev->mutex);
+
+ ret = gasket_page_table_partition(
+ gasket_dev->page_table[ibuf.page_table_index], ibuf.size);
+ mutex_unlock(&gasket_dev->mutex);
+
+ return ret;
+}
+
+/* Map a userspace buffer to a device virtual address. */
+static int gasket_map_buffers(struct gasket_dev *gasket_dev,
+ struct gasket_page_table_ioctl __user *argp)
+{
+ struct gasket_page_table_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
+ return -EFAULT;
+
+ trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
+ ibuf.host_address,
+ ibuf.device_address);
+
+ if (ibuf.page_table_index >= gasket_dev->num_page_tables)
+ return -EFAULT;
+
+ if (gasket_page_table_are_addrs_bad(gasket_dev->page_table[ibuf.page_table_index],
+ ibuf.host_address,
+ ibuf.device_address, ibuf.size))
+ return -EINVAL;
+
+ return gasket_page_table_map(gasket_dev->page_table[ibuf.page_table_index],
+ ibuf.host_address, ibuf.device_address,
+ ibuf.size / PAGE_SIZE);
+}
+
+/* Unmap a userspace buffer from a device virtual address. */
+static int gasket_unmap_buffers(struct gasket_dev *gasket_dev,
+ struct gasket_page_table_ioctl __user *argp)
+{
+ struct gasket_page_table_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
+ return -EFAULT;
+
+ trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
+ ibuf.host_address,
+ ibuf.device_address);
+
+ if (ibuf.page_table_index >= gasket_dev->num_page_tables)
+ return -EFAULT;
+
+ if (gasket_page_table_is_dev_addr_bad(gasket_dev->page_table[ibuf.page_table_index],
+ ibuf.device_address, ibuf.size))
+ return -EINVAL;
+
+ gasket_page_table_unmap(gasket_dev->page_table[ibuf.page_table_index],
+ ibuf.device_address, ibuf.size / PAGE_SIZE);
+
+ return 0;
+}
+
+/*
+ * Reserve structures for coherent allocation, and allocate or free the
+ * corresponding memory.
+ */
+static int gasket_config_coherent_allocator(
+ struct gasket_dev *gasket_dev,
+ struct gasket_coherent_alloc_config_ioctl __user *argp)
+{
+ int ret;
+ struct gasket_coherent_alloc_config_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp,
+ sizeof(struct gasket_coherent_alloc_config_ioctl)))
+ return -EFAULT;
+
+ trace_gasket_ioctl_config_coherent_allocator(ibuf.enable, ibuf.size,
+ ibuf.dma_address);
+
+ if (ibuf.page_table_index >= gasket_dev->num_page_tables)
+ return -EFAULT;
+
+ if (ibuf.size > PAGE_SIZE * MAX_NUM_COHERENT_PAGES)
+ return -ENOMEM;
+
+ if (ibuf.enable == 0) {
+ ret = gasket_free_coherent_memory(gasket_dev, ibuf.size,
+ ibuf.dma_address,
+ ibuf.page_table_index);
+ } else {
+ ret = gasket_alloc_coherent_memory(gasket_dev, ibuf.size,
+ &ibuf.dma_address,
+ ibuf.page_table_index);
+ }
+ if (ret)
+ return ret;
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* Check permissions for Gasket ioctls. */
+static bool gasket_ioctl_check_permissions(struct file *filp, uint cmd)
+{
+ bool alive;
+ bool read, write;
+ struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
+
+ alive = (gasket_dev->status == GASKET_STATUS_ALIVE);
+ if (!alive)
+ dev_dbg(gasket_dev->dev, "%s alive %d status %d\n",
+ __func__, alive, gasket_dev->status);
+
+ read = !!(filp->f_mode & FMODE_READ);
+ write = !!(filp->f_mode & FMODE_WRITE);
+
+ switch (cmd) {
+ case GASKET_IOCTL_RESET:
+ case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
+ return write;
+
+ case GASKET_IOCTL_PAGE_TABLE_SIZE:
+ case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
+ case GASKET_IOCTL_NUMBER_PAGE_TABLES:
+ return read;
+
+ case GASKET_IOCTL_PARTITION_PAGE_TABLE:
+ case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
+ return alive && write;
+
+ case GASKET_IOCTL_MAP_BUFFER:
+ case GASKET_IOCTL_UNMAP_BUFFER:
+ return alive && write;
+
+ case GASKET_IOCTL_CLEAR_EVENTFD:
+ case GASKET_IOCTL_SET_EVENTFD:
+ return alive && write;
+ }
+
+ return false; /* unknown permissions */
+}
+
+/*
+ * standard ioctl dispatch function.
+ * @filp: File structure pointer describing this node usage session.
+ * @cmd: ioctl number to handle.
+ * @argp: ioctl-specific data pointer.
+ *
+ * Standard ioctl dispatcher; forwards operations to individual handlers.
+ */
+long gasket_handle_ioctl(struct file *filp, uint cmd, void __user *argp)
+{
+ struct gasket_dev *gasket_dev;
+ unsigned long arg = (unsigned long)argp;
+ gasket_ioctl_permissions_cb_t ioctl_permissions_cb;
+ int retval;
+
+ gasket_dev = (struct gasket_dev *)filp->private_data;
+ trace_gasket_ioctl_entry(gasket_dev->dev_info.name, cmd);
+
+ ioctl_permissions_cb = gasket_get_ioctl_permissions_cb(gasket_dev);
+ if (ioctl_permissions_cb) {
+ retval = ioctl_permissions_cb(filp, cmd, argp);
+ if (retval < 0) {
+ trace_gasket_ioctl_exit(retval);
+ return retval;
+ } else if (retval == 0) {
+ trace_gasket_ioctl_exit(-EPERM);
+ return -EPERM;
+ }
+ } else if (!gasket_ioctl_check_permissions(filp, cmd)) {
+ trace_gasket_ioctl_exit(-EPERM);
+ dev_dbg(gasket_dev->dev, "ioctl cmd=%x noperm\n", cmd);
+ return -EPERM;
+ }
+
+ /* Tracing happens in this switch statement for all ioctls with
+ * an integer argrument, but ioctls with a struct argument
+ * that needs copying and decoding, that tracing is done within
+ * the handler call.
+ */
+ switch (cmd) {
+ case GASKET_IOCTL_RESET:
+ retval = gasket_reset(gasket_dev);
+ break;
+ case GASKET_IOCTL_SET_EVENTFD:
+ retval = gasket_set_event_fd(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_CLEAR_EVENTFD:
+ trace_gasket_ioctl_integer_data(arg);
+ retval =
+ gasket_interrupt_clear_eventfd(gasket_dev->interrupt_data,
+ (int)arg);
+ break;
+ case GASKET_IOCTL_PARTITION_PAGE_TABLE:
+ trace_gasket_ioctl_integer_data(arg);
+ retval = gasket_partition_page_table(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_NUMBER_PAGE_TABLES:
+ trace_gasket_ioctl_integer_data(gasket_dev->num_page_tables);
+ if (copy_to_user(argp, &gasket_dev->num_page_tables,
+ sizeof(uint64_t)))
+ retval = -EFAULT;
+ else
+ retval = 0;
+ break;
+ case GASKET_IOCTL_PAGE_TABLE_SIZE:
+ retval = gasket_read_page_table_size(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
+ retval = gasket_read_simple_page_table_size(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_MAP_BUFFER:
+ retval = gasket_map_buffers(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
+ retval = gasket_config_coherent_allocator(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_UNMAP_BUFFER:
+ retval = gasket_unmap_buffers(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
+ /* Clear interrupt counts doesn't take an arg, so use 0. */
+ trace_gasket_ioctl_integer_data(0);
+ retval = gasket_interrupt_reset_counts(gasket_dev);
+ break;
+ default:
+ /* If we don't understand the ioctl, the best we can do is trace
+ * the arg.
+ */
+ trace_gasket_ioctl_integer_data(arg);
+ dev_dbg(gasket_dev->dev,
+ "Unknown ioctl cmd=0x%x not caught by "
+ "gasket_is_supported_ioctl\n",
+ cmd);
+ retval = -EINVAL;
+ break;
+ }
+
+ trace_gasket_ioctl_exit(retval);
+ return retval;
+}
+
+/*
+ * Determines if an ioctl is part of the standard Gasket framework.
+ * @cmd: The ioctl number to handle.
+ *
+ * Returns 1 if the ioctl is supported and 0 otherwise.
+ */
+long gasket_is_supported_ioctl(uint cmd)
+{
+ switch (cmd) {
+ case GASKET_IOCTL_RESET:
+ case GASKET_IOCTL_SET_EVENTFD:
+ case GASKET_IOCTL_CLEAR_EVENTFD:
+ case GASKET_IOCTL_PARTITION_PAGE_TABLE:
+ case GASKET_IOCTL_NUMBER_PAGE_TABLES:
+ case GASKET_IOCTL_PAGE_TABLE_SIZE:
+ case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
+ case GASKET_IOCTL_MAP_BUFFER:
+ case GASKET_IOCTL_UNMAP_BUFFER:
+ case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
+ case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
+ return 1;
+ default:
+ return 0;
+ }
+}
diff --git a/drivers/staging/gasket/gasket_ioctl.h b/drivers/staging/gasket/gasket_ioctl.h
new file mode 100644
index 000000000000..51f468c77f04
--- /dev/null
+++ b/drivers/staging/gasket/gasket_ioctl.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Google, Inc. */
+#ifndef __GASKET_IOCTL_H__
+#define __GASKET_IOCTL_H__
+
+#include "gasket_core.h"
+
+#include <linux/compiler.h>
+
+/*
+ * Handle Gasket common ioctls.
+ * @filp: Pointer to the ioctl's file.
+ * @cmd: Ioctl command.
+ * @arg: Ioctl argument pointer.
+ *
+ * Returns 0 on success and nonzero on failure.
+ */
+long gasket_handle_ioctl(struct file *filp, uint cmd, void __user *argp);
+
+/*
+ * Determines if an ioctl is part of the standard Gasket framework.
+ * @cmd: The ioctl number to handle.
+ *
+ * Returns 1 if the ioctl is supported and 0 otherwise.
+ */
+long gasket_is_supported_ioctl(uint cmd);
+
+#endif
diff --git a/drivers/staging/gasket/gasket_page_table.c b/drivers/staging/gasket/gasket_page_table.c
new file mode 100644
index 000000000000..d4c5f8aa7dd3
--- /dev/null
+++ b/drivers/staging/gasket/gasket_page_table.c
@@ -0,0 +1,1381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of Gasket page table support.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+
+/*
+ * Implementation of Gasket page table support.
+ *
+ * This file assumes 4kB pages throughout; can be factored out when necessary.
+ *
+ * Address format is as follows:
+ * Simple addresses - those whose containing pages are directly placed in the
+ * device's address translation registers - are laid out as:
+ * [ 63 - 40: Unused | 39 - 28: 0 | 27 - 12: page index | 11 - 0: page offset ]
+ * page index: The index of the containing page in the device's address
+ * translation registers.
+ * page offset: The index of the address into the containing page.
+ *
+ * Extended address - those whose containing pages are contained in a second-
+ * level page table whose address is present in the device's address translation
+ * registers - are laid out as:
+ * [ 63 - 40: Unused | 39: flag | 38 - 37: 0 | 36 - 21: dev/level 0 index |
+ * 20 - 12: host/level 1 index | 11 - 0: page offset ]
+ * flag: Marker indicating that this is an extended address. Always 1.
+ * dev index: The index of the first-level page in the device's extended
+ * address translation registers.
+ * host index: The index of the containing page in the [host-resident] second-
+ * level page table.
+ * page offset: The index of the address into the containing [second-level]
+ * page.
+ */
+#include "gasket_page_table.h"
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+
+#include "gasket_constants.h"
+#include "gasket_core.h"
+
+/* Constants & utility macros */
+/* The number of pages that can be mapped into each second-level page table. */
+#define GASKET_PAGES_PER_SUBTABLE 512
+
+/* The starting position of the page index in a simple virtual address. */
+#define GASKET_SIMPLE_PAGE_SHIFT 12
+
+/* Flag indicating that a [device] slot is valid for use. */
+#define GASKET_VALID_SLOT_FLAG 1
+
+/*
+ * The starting position of the level 0 page index (i.e., the entry in the
+ * device's extended address registers) in an extended address.
+ * Also can be thought of as (log2(PAGE_SIZE) + log2(PAGES_PER_SUBTABLE)),
+ * or (12 + 9).
+ */
+#define GASKET_EXTENDED_LVL0_SHIFT 21
+
+/*
+ * Number of first level pages that Gasket chips support. Equivalent to
+ * log2(NUM_LVL0_PAGE_TABLES)
+ *
+ * At a maximum, allowing for a 34 bits address space (or 16GB)
+ * = GASKET_EXTENDED_LVL0_WIDTH + (log2(PAGE_SIZE) + log2(PAGES_PER_SUBTABLE)
+ * or, = 13 + 9 + 12
+ */
+#define GASKET_EXTENDED_LVL0_WIDTH 13
+
+/*
+ * The starting position of the level 1 page index (i.e., the entry in the
+ * host second-level/sub- table) in an extended address.
+ */
+#define GASKET_EXTENDED_LVL1_SHIFT 12
+
+/* Type declarations */
+/* Valid states for a struct gasket_page_table_entry. */
+enum pte_status {
+ PTE_FREE,
+ PTE_INUSE,
+};
+
+/*
+ * Mapping metadata for a single page.
+ *
+ * In this file, host-side page table entries are referred to as that (or PTEs).
+ * Where device vs. host entries are differentiated, device-side or -visible
+ * entries are called "slots". A slot may be either an entry in the device's
+ * address translation table registers or an entry in a second-level page
+ * table ("subtable").
+ *
+ * The full data in this structure is visible on the host [of course]. Only
+ * the address contained in dma_addr is communicated to the device; that points
+ * to the actual page mapped and described by this structure.
+ */
+struct gasket_page_table_entry {
+ /* The status of this entry/slot: free or in use. */
+ enum pte_status status;
+
+ /* Address of the page in DMA space. */
+ dma_addr_t dma_addr;
+
+ /* Linux page descriptor for the page described by this structure. */
+ struct page *page;
+
+ /*
+ * Index for alignment into host vaddrs.
+ * When a user specifies a host address for a mapping, that address may
+ * not be page-aligned. Offset is the index into the containing page of
+ * the host address (i.e., host_vaddr & (PAGE_SIZE - 1)).
+ * This is necessary for translating between user-specified addresses
+ * and page-aligned addresses.
+ */
+ int offset;
+
+ /*
+ * If this is an extended and first-level entry, sublevel points
+ * to the second-level entries underneath this entry.
+ */
+ struct gasket_page_table_entry *sublevel;
+};
+
+/*
+ * Maintains virtual to physical address mapping for a coherent page that is
+ * allocated by this module for a given device.
+ * Note that coherent pages mappings virt mapping cannot be tracked by the
+ * Linux kernel, and coherent pages don't have a struct page associated,
+ * hence Linux kernel cannot perform a get_user_page_xx() on a phys address
+ * that was allocated coherent.
+ * This structure trivially implements this mechanism.
+ */
+struct gasket_coherent_page_entry {
+ /* Phys address, dma'able by the owner device */
+ dma_addr_t paddr;
+
+ /* Kernel virtual address */
+ u64 user_virt;
+
+ /* User virtual address that was mapped by the mmap kernel subsystem */
+ u64 kernel_virt;
+
+ /*
+ * Whether this page has been mapped into a user land process virtual
+ * space
+ */
+ u32 in_use;
+};
+
+/*
+ * [Host-side] page table descriptor.
+ *
+ * This structure tracks the metadata necessary to manage both simple and
+ * extended page tables.
+ */
+struct gasket_page_table {
+ /* The config used to create this page table. */
+ struct gasket_page_table_config config;
+
+ /* The number of simple (single-level) entries in the page table. */
+ uint num_simple_entries;
+
+ /* The number of extended (two-level) entries in the page table. */
+ uint num_extended_entries;
+
+ /* Array of [host-side] page table entries. */
+ struct gasket_page_table_entry *entries;
+
+ /* Number of actively mapped kernel pages in this table. */
+ uint num_active_pages;
+
+ /* Device register: base of/first slot in the page table. */
+ u64 __iomem *base_slot;
+
+ /* Device register: holds the offset indicating the start of the
+ * extended address region of the device's address translation table.
+ */
+ u64 __iomem *extended_offset_reg;
+
+ /* Device structure for the underlying device. Only used for logging. */
+ struct device *device;
+
+ /* PCI system descriptor for the underlying device. */
+ struct pci_dev *pci_dev;
+
+ /* Location of the extended address bit for this Gasket device. */
+ u64 extended_flag;
+
+ /* Mutex to protect page table internals. */
+ struct mutex mutex;
+
+ /* Number of coherent pages accessible thru by this page table */
+ int num_coherent_pages;
+
+ /*
+ * List of coherent memory (physical) allocated for a device.
+ *
+ * This structure also remembers the user virtual mapping, this is
+ * hacky, but we need to do this because the kernel doesn't keep track
+ * of the user coherent pages (pfn pages), and virt to coherent page
+ * mapping.
+ * TODO: use find_vma() APIs to convert host address to vm_area, to
+ * dma_addr_t instead of storing user virtu address in
+ * gasket_coherent_page_entry
+ *
+ * Note that the user virtual mapping is created by the driver, in
+ * gasket_mmap function, so user_virt belongs in the driver anyhow.
+ */
+ struct gasket_coherent_page_entry *coherent_pages;
+};
+
+/* See gasket_page_table.h for description. */
+int gasket_page_table_init(struct gasket_page_table **ppg_tbl,
+ const struct gasket_bar_data *bar_data,
+ const struct gasket_page_table_config *page_table_config,
+ struct device *device, struct pci_dev *pci_dev)
+{
+ ulong bytes;
+ struct gasket_page_table *pg_tbl;
+ ulong total_entries = page_table_config->total_entries;
+
+ /*
+ * TODO: Verify config->total_entries against value read from the
+ * hardware register that contains the page table size.
+ */
+ if (total_entries == ULONG_MAX) {
+ dev_dbg(device, "Error reading page table size. "
+ "Initializing page table with size 0\n");
+ total_entries = 0;
+ }
+
+ dev_dbg(device,
+ "Attempting to initialize page table of size 0x%lx\n",
+ total_entries);
+
+ dev_dbg(device,
+ "Table has base reg 0x%x, extended offset reg 0x%x\n",
+ page_table_config->base_reg,
+ page_table_config->extended_reg);
+
+ *ppg_tbl = kzalloc(sizeof(**ppg_tbl), GFP_KERNEL);
+ if (!*ppg_tbl) {
+ dev_dbg(device, "No memory for page table\n");
+ return -ENOMEM;
+ }
+
+ pg_tbl = *ppg_tbl;
+ bytes = total_entries * sizeof(struct gasket_page_table_entry);
+ if (bytes != 0) {
+ pg_tbl->entries = vzalloc(bytes);
+ if (!pg_tbl->entries) {
+ dev_dbg(device,
+ "No memory for address translation metadata\n");
+ kfree(pg_tbl);
+ *ppg_tbl = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ mutex_init(&pg_tbl->mutex);
+ memcpy(&pg_tbl->config, page_table_config, sizeof(*page_table_config));
+ if (pg_tbl->config.mode == GASKET_PAGE_TABLE_MODE_NORMAL ||
+ pg_tbl->config.mode == GASKET_PAGE_TABLE_MODE_SIMPLE) {
+ pg_tbl->num_simple_entries = total_entries;
+ pg_tbl->num_extended_entries = 0;
+ pg_tbl->extended_flag = 1ull << page_table_config->extended_bit;
+ } else {
+ pg_tbl->num_simple_entries = 0;
+ pg_tbl->num_extended_entries = total_entries;
+ pg_tbl->extended_flag = 0;
+ }
+ pg_tbl->num_active_pages = 0;
+ pg_tbl->base_slot =
+ (u64 __iomem *)&bar_data->virt_base[page_table_config->base_reg];
+ pg_tbl->extended_offset_reg =
+ (u64 __iomem *)&bar_data->virt_base[page_table_config->extended_reg];
+ pg_tbl->device = get_device(device);
+ pg_tbl->pci_dev = pci_dev;
+
+ dev_dbg(device, "Page table initialized successfully\n");
+
+ return 0;
+}
+
+/*
+ * Check if a range of PTEs is free.
+ * The page table mutex must be held by the caller.
+ */
+static bool gasket_is_pte_range_free(struct gasket_page_table_entry *ptes,
+ uint num_entries)
+{
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ if (ptes[i].status != PTE_FREE)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Free a second level page [sub]table.
+ * The page table mutex must be held before this call.
+ */
+static void gasket_free_extended_subtable(struct gasket_page_table *pg_tbl,
+ struct gasket_page_table_entry *pte,
+ u64 __iomem *slot)
+{
+ /* Release the page table from the driver */
+ pte->status = PTE_FREE;
+
+ /* Release the page table from the device */
+ writeq(0, slot);
+ /* Force sync around the address release. */
+ mb();
+
+ if (pte->dma_addr)
+ dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ vfree(pte->sublevel);
+
+ if (pte->page)
+ free_page((ulong)page_address(pte->page));
+
+ memset(pte, 0, sizeof(struct gasket_page_table_entry));
+}
+
+/*
+ * Actually perform collection.
+ * The page table mutex must be held by the caller.
+ */
+static void
+gasket_page_table_garbage_collect_nolock(struct gasket_page_table *pg_tbl)
+{
+ struct gasket_page_table_entry *pte;
+ u64 __iomem *slot;
+
+ /* XXX FIX ME XXX -- more efficient to keep a usage count */
+ /* rather than scanning the second level page tables */
+
+ for (pte = pg_tbl->entries + pg_tbl->num_simple_entries,
+ slot = pg_tbl->base_slot + pg_tbl->num_simple_entries;
+ pte < pg_tbl->entries + pg_tbl->config.total_entries;
+ pte++, slot++) {
+ if (pte->status == PTE_INUSE) {
+ if (gasket_is_pte_range_free(pte->sublevel,
+ GASKET_PAGES_PER_SUBTABLE))
+ gasket_free_extended_subtable(pg_tbl, pte,
+ slot);
+ }
+ }
+}
+
+/* See gasket_page_table.h for description. */
+void gasket_page_table_garbage_collect(struct gasket_page_table *pg_tbl)
+{
+ mutex_lock(&pg_tbl->mutex);
+ gasket_page_table_garbage_collect_nolock(pg_tbl);
+ mutex_unlock(&pg_tbl->mutex);
+}
+
+/* See gasket_page_table.h for description. */
+void gasket_page_table_cleanup(struct gasket_page_table *pg_tbl)
+{
+ /* Deallocate free second-level tables. */
+ gasket_page_table_garbage_collect(pg_tbl);
+
+ /* TODO: Check that all PTEs have been freed? */
+
+ vfree(pg_tbl->entries);
+ pg_tbl->entries = NULL;
+
+ put_device(pg_tbl->device);
+ kfree(pg_tbl);
+}
+
+/* See gasket_page_table.h for description. */
+int gasket_page_table_partition(struct gasket_page_table *pg_tbl,
+ uint num_simple_entries)
+{
+ int i, start;
+
+ mutex_lock(&pg_tbl->mutex);
+ if (num_simple_entries > pg_tbl->config.total_entries) {
+ mutex_unlock(&pg_tbl->mutex);
+ return -EINVAL;
+ }
+
+ gasket_page_table_garbage_collect_nolock(pg_tbl);
+
+ start = min(pg_tbl->num_simple_entries, num_simple_entries);
+
+ for (i = start; i < pg_tbl->config.total_entries; i++) {
+ if (pg_tbl->entries[i].status != PTE_FREE) {
+ dev_err(pg_tbl->device, "entry %d is not free\n", i);
+ mutex_unlock(&pg_tbl->mutex);
+ return -EBUSY;
+ }
+ }
+
+ pg_tbl->num_simple_entries = num_simple_entries;
+ pg_tbl->num_extended_entries =
+ pg_tbl->config.total_entries - num_simple_entries;
+ writeq(num_simple_entries, pg_tbl->extended_offset_reg);
+
+ mutex_unlock(&pg_tbl->mutex);
+ return 0;
+}
+EXPORT_SYMBOL(gasket_page_table_partition);
+
+/*
+ * Return whether a host buffer was mapped as coherent memory.
+ *
+ * A Gasket page_table currently support one contiguous dma range, mapped to one
+ * contiguous virtual memory range. Check if the host_addr is within that range.
+ */
+static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr)
+{
+ u64 min, max;
+
+ /* whether the host address is within user virt range */
+ if (!pg_tbl->coherent_pages)
+ return 0;
+
+ min = (u64)pg_tbl->coherent_pages[0].user_virt;
+ max = min + PAGE_SIZE * pg_tbl->num_coherent_pages;
+
+ return min <= host_addr && host_addr < max;
+}
+
+/*
+ * Get and map last level page table buffers.
+ *
+ * slots is the location(s) to write device-mapped page address. If this is a
+ * simple mapping, these will be address translation registers. If this is
+ * an extended mapping, these will be within a second-level page table
+ * allocated by the host and so must have their __iomem attribute casted away.
+ */
+static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
+ struct gasket_page_table_entry *ptes,
+ u64 __iomem *slots, ulong host_addr,
+ uint num_pages, int is_simple_mapping)
+{
+ int ret;
+ ulong offset;
+ struct page *page;
+ dma_addr_t dma_addr;
+ ulong page_addr;
+ int i;
+
+ for (i = 0; i < num_pages; i++) {
+ page_addr = host_addr + i * PAGE_SIZE;
+ offset = page_addr & (PAGE_SIZE - 1);
+ dev_dbg(pg_tbl->device, "%s i %d\n", __func__, i);
+ if (is_coherent(pg_tbl, host_addr)) {
+ u64 off =
+ (u64)host_addr -
+ (u64)pg_tbl->coherent_pages[0].user_virt;
+ ptes[i].page = NULL;
+ ptes[i].offset = offset;
+ ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr +
+ off + i * PAGE_SIZE;
+ } else {
+ ret = get_user_pages_fast(page_addr - offset, 1, 1,
+ &page);
+
+ if (ret <= 0) {
+ dev_err(pg_tbl->device,
+ "get user pages failed for addr=0x%lx, "
+ "offset=0x%lx [ret=%d]\n",
+ page_addr, offset, ret);
+ return ret ? ret : -ENOMEM;
+ }
+ ++pg_tbl->num_active_pages;
+
+ ptes[i].page = page;
+ ptes[i].offset = offset;
+
+ /* Map the page into DMA space. */
+ ptes[i].dma_addr =
+ dma_map_page(pg_tbl->device, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ dev_dbg(pg_tbl->device,
+ "%s i %d pte %p pfn %p -> mapped %llx\n",
+ __func__, i, &ptes[i],
+ (void *)page_to_pfn(page),
+ (unsigned long long)ptes[i].dma_addr);
+
+ if (ptes[i].dma_addr == -1) {
+ dev_dbg(pg_tbl->device,
+ "%s i %d -> fail to map page %llx "
+ "[pfn %p ohys %p]\n",
+ __func__, i,
+ (unsigned long long)ptes[i].dma_addr,
+ (void *)page_to_pfn(page),
+ (void *)page_to_phys(page));
+ return -1;
+ }
+ /* Wait until the page is mapped. */
+ mb();
+ }
+
+ /* Make the DMA-space address available to the device. */
+ dma_addr = (ptes[i].dma_addr + offset) | GASKET_VALID_SLOT_FLAG;
+
+ if (is_simple_mapping) {
+ writeq(dma_addr, &slots[i]);
+ } else {
+ ((u64 __force *)slots)[i] = dma_addr;
+ /* Extended page table vectors are in DRAM,
+ * and so need to be synced each time they are updated.
+ */
+ dma_map_single(pg_tbl->device,
+ (void *)&((u64 __force *)slots)[i],
+ sizeof(u64), DMA_TO_DEVICE);
+ }
+ ptes[i].status = PTE_INUSE;
+ }
+ return 0;
+}
+
+/*
+ * Return the index of the page for the address in the simple table.
+ * Does not perform validity checking.
+ */
+static int gasket_simple_page_idx(struct gasket_page_table *pg_tbl,
+ ulong dev_addr)
+{
+ return (dev_addr >> GASKET_SIMPLE_PAGE_SHIFT) &
+ (pg_tbl->config.total_entries - 1);
+}
+
+/*
+ * Return the level 0 page index for the given address.
+ * Does not perform validity checking.
+ */
+static ulong gasket_extended_lvl0_page_idx(struct gasket_page_table *pg_tbl,
+ ulong dev_addr)
+{
+ return (dev_addr >> GASKET_EXTENDED_LVL0_SHIFT) &
+ ((1 << GASKET_EXTENDED_LVL0_WIDTH) - 1);
+}
+
+/*
+ * Return the level 1 page index for the given address.
+ * Does not perform validity checking.
+ */
+static ulong gasket_extended_lvl1_page_idx(struct gasket_page_table *pg_tbl,
+ ulong dev_addr)
+{
+ return (dev_addr >> GASKET_EXTENDED_LVL1_SHIFT) &
+ (GASKET_PAGES_PER_SUBTABLE - 1);
+}
+
+/*
+ * Allocate page table entries in a simple table.
+ * The page table mutex must be held by the caller.
+ */
+static int gasket_alloc_simple_entries(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_pages)
+{
+ if (!gasket_is_pte_range_free(pg_tbl->entries +
+ gasket_simple_page_idx(pg_tbl, dev_addr),
+ num_pages))
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Safely return a page to the OS. */
+static bool gasket_release_page(struct page *page)
+{
+ if (!page)
+ return false;
+
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ put_page(page);
+
+ return true;
+}
+
+/*
+ * Unmap and release mapped pages.
+ * The page table mutex must be held by the caller.
+ */
+static void gasket_perform_unmapping(struct gasket_page_table *pg_tbl,
+ struct gasket_page_table_entry *ptes,
+ u64 __iomem *slots, uint num_pages,
+ int is_simple_mapping)
+{
+ int i;
+ /*
+ * For each page table entry and corresponding entry in the device's
+ * address translation table:
+ */
+ for (i = 0; i < num_pages; i++) {
+ /* release the address from the device, */
+ if (is_simple_mapping || ptes[i].status == PTE_INUSE)
+ writeq(0, &slots[i]);
+ else
+ ((u64 __force *)slots)[i] = 0;
+ /* Force sync around the address release. */
+ mb();
+
+ /* release the address from the driver, */
+ if (ptes[i].status == PTE_INUSE) {
+ if (ptes[i].dma_addr) {
+ dma_unmap_page(pg_tbl->device, ptes[i].dma_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+ if (gasket_release_page(ptes[i].page))
+ --pg_tbl->num_active_pages;
+ }
+ ptes[i].status = PTE_FREE;
+
+ /* and clear the PTE. */
+ memset(&ptes[i], 0, sizeof(struct gasket_page_table_entry));
+ }
+}
+
+/*
+ * Unmap and release pages mapped to simple addresses.
+ * The page table mutex must be held by the caller.
+ */
+static void gasket_unmap_simple_pages(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_pages)
+{
+ uint slot = gasket_simple_page_idx(pg_tbl, dev_addr);
+
+ gasket_perform_unmapping(pg_tbl, pg_tbl->entries + slot,
+ pg_tbl->base_slot + slot, num_pages, 1);
+}
+
+/*
+ * Unmap and release buffers to extended addresses.
+ * The page table mutex must be held by the caller.
+ */
+static void gasket_unmap_extended_pages(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_pages)
+{
+ uint slot_idx, remain, len;
+ struct gasket_page_table_entry *pte;
+ u64 __iomem *slot_base;
+
+ remain = num_pages;
+ slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
+ pte = pg_tbl->entries + pg_tbl->num_simple_entries +
+ gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
+
+ while (remain > 0) {
+ /* TODO: Add check to ensure pte remains valid? */
+ len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
+
+ if (pte->status == PTE_INUSE) {
+ slot_base = (u64 __iomem *)(page_address(pte->page) +
+ pte->offset);
+ gasket_perform_unmapping(pg_tbl,
+ pte->sublevel + slot_idx,
+ slot_base + slot_idx, len, 0);
+ }
+
+ remain -= len;
+ slot_idx = 0;
+ pte++;
+ }
+}
+
+/* Evaluates to nonzero if the specified virtual address is simple. */
+static inline bool gasket_addr_is_simple(struct gasket_page_table *pg_tbl,
+ ulong addr)
+{
+ return !((addr) & (pg_tbl)->extended_flag);
+}
+
+/*
+ * Convert (simple, page, offset) into a device address.
+ * Examples:
+ * Simple page 0, offset 32:
+ * Input (0, 0, 32), Output 0x20
+ * Simple page 1000, offset 511:
+ * Input (0, 1000, 512), Output 0x3E81FF
+ * Extended page 0, offset 32:
+ * Input (0, 0, 32), Output 0x8000000020
+ * Extended page 1000, offset 511:
+ * Input (1, 1000, 512), Output 0x8003E81FF
+ */
+static ulong gasket_components_to_dev_address(struct gasket_page_table *pg_tbl,
+ int is_simple, uint page_index,
+ uint offset)
+{
+ ulong lvl0_index, lvl1_index;
+
+ if (is_simple) {
+ /* Return simple addresses directly. */
+ lvl0_index = page_index & (pg_tbl->config.total_entries - 1);
+ return (lvl0_index << GASKET_SIMPLE_PAGE_SHIFT) | offset;
+ }
+
+ /*
+ * This could be compressed into fewer statements, but
+ * A) the compiler should optimize it
+ * B) this is not slow
+ * C) this is an uncommon operation
+ * D) this is actually readable this way.
+ */
+ lvl0_index = page_index / GASKET_PAGES_PER_SUBTABLE;
+ lvl1_index = page_index & (GASKET_PAGES_PER_SUBTABLE - 1);
+ return (pg_tbl)->extended_flag |
+ (lvl0_index << GASKET_EXTENDED_LVL0_SHIFT) |
+ (lvl1_index << GASKET_EXTENDED_LVL1_SHIFT) | offset;
+}
+
+/*
+ * Validity checking for simple addresses.
+ *
+ * Verify that address translation commutes (from address to/from page + offset)
+ * and that the requested page range starts and ends within the set of
+ * currently-partitioned simple pages.
+ */
+static bool gasket_is_simple_dev_addr_bad(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_pages)
+{
+ ulong page_offset = dev_addr & (PAGE_SIZE - 1);
+ ulong page_index =
+ (dev_addr / PAGE_SIZE) & (pg_tbl->config.total_entries - 1);
+
+ if (gasket_components_to_dev_address(pg_tbl, 1, page_index,
+ page_offset) != dev_addr) {
+ dev_err(pg_tbl->device, "address is invalid, 0x%lX\n",
+ dev_addr);
+ return true;
+ }
+
+ if (page_index >= pg_tbl->num_simple_entries) {
+ dev_err(pg_tbl->device,
+ "starting slot at %lu is too large, max is < %u\n",
+ page_index, pg_tbl->num_simple_entries);
+ return true;
+ }
+
+ if (page_index + num_pages > pg_tbl->num_simple_entries) {
+ dev_err(pg_tbl->device,
+ "ending slot at %lu is too large, max is <= %u\n",
+ page_index + num_pages, pg_tbl->num_simple_entries);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Validity checking for extended addresses.
+ *
+ * Verify that address translation commutes (from address to/from page +
+ * offset) and that the requested page range starts and ends within the set of
+ * currently-partitioned extended pages.
+ */
+static bool gasket_is_extended_dev_addr_bad(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_pages)
+{
+ /* Starting byte index of dev_addr into the first mapped page */
+ ulong page_offset = dev_addr & (PAGE_SIZE - 1);
+ ulong page_global_idx, page_lvl0_idx;
+ ulong num_lvl0_pages;
+ ulong addr;
+
+ /* check if the device address is out of bound */
+ addr = dev_addr & ~((pg_tbl)->extended_flag);
+ if (addr >> (GASKET_EXTENDED_LVL0_WIDTH + GASKET_EXTENDED_LVL0_SHIFT)) {
+ dev_err(pg_tbl->device, "device address out of bounds: 0x%lx\n",
+ dev_addr);
+ return true;
+ }
+
+ /* Find the starting sub-page index in the space of all sub-pages. */
+ page_global_idx = (dev_addr / PAGE_SIZE) &
+ (pg_tbl->config.total_entries * GASKET_PAGES_PER_SUBTABLE - 1);
+
+ /* Find the starting level 0 index. */
+ page_lvl0_idx = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
+
+ /* Get the count of affected level 0 pages. */
+ num_lvl0_pages = (num_pages + GASKET_PAGES_PER_SUBTABLE - 1) /
+ GASKET_PAGES_PER_SUBTABLE;
+
+ if (gasket_components_to_dev_address(pg_tbl, 0, page_global_idx,
+ page_offset) != dev_addr) {
+ dev_err(pg_tbl->device, "address is invalid: 0x%lx\n",
+ dev_addr);
+ return true;
+ }
+
+ if (page_lvl0_idx >= pg_tbl->num_extended_entries) {
+ dev_err(pg_tbl->device,
+ "starting level 0 slot at %lu is too large, max is < "
+ "%u\n", page_lvl0_idx, pg_tbl->num_extended_entries);
+ return true;
+ }
+
+ if (page_lvl0_idx + num_lvl0_pages > pg_tbl->num_extended_entries) {
+ dev_err(pg_tbl->device,
+ "ending level 0 slot at %lu is too large, max is <= %u\n",
+ page_lvl0_idx + num_lvl0_pages,
+ pg_tbl->num_extended_entries);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Non-locking entry to unmapping routines.
+ * The page table mutex must be held by the caller.
+ */
+static void gasket_page_table_unmap_nolock(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_pages)
+{
+ if (!num_pages)
+ return;
+
+ if (gasket_addr_is_simple(pg_tbl, dev_addr))
+ gasket_unmap_simple_pages(pg_tbl, dev_addr, num_pages);
+ else
+ gasket_unmap_extended_pages(pg_tbl, dev_addr, num_pages);
+}
+
+/*
+ * Allocate and map pages to simple addresses.
+ * If there is an error, no pages are mapped.
+ */
+static int gasket_map_simple_pages(struct gasket_page_table *pg_tbl,
+ ulong host_addr, ulong dev_addr,
+ uint num_pages)
+{
+ int ret;
+ uint slot_idx = gasket_simple_page_idx(pg_tbl, dev_addr);
+
+ ret = gasket_alloc_simple_entries(pg_tbl, dev_addr, num_pages);
+ if (ret) {
+ dev_err(pg_tbl->device,
+ "page table slots %u (@ 0x%lx) to %u are not available\n",
+ slot_idx, dev_addr, slot_idx + num_pages - 1);
+ return ret;
+ }
+
+ ret = gasket_perform_mapping(pg_tbl, pg_tbl->entries + slot_idx,
+ pg_tbl->base_slot + slot_idx, host_addr,
+ num_pages, 1);
+
+ if (ret) {
+ gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
+ dev_err(pg_tbl->device, "gasket_perform_mapping %d\n", ret);
+ }
+ return ret;
+}
+
+/*
+ * Allocate a second level page table.
+ * The page table mutex must be held by the caller.
+ */
+static int gasket_alloc_extended_subtable(struct gasket_page_table *pg_tbl,
+ struct gasket_page_table_entry *pte,
+ u64 __iomem *slot)
+{
+ ulong page_addr, subtable_bytes;
+ dma_addr_t dma_addr;
+
+ /* XXX FIX ME XXX this is inefficient for non-4K page sizes */
+
+ /* GFP_DMA flag must be passed to architectures for which
+ * part of the memory range is not considered DMA'able.
+ * This seems to be the case for Juno board with 4.5.0 Linaro kernel
+ */
+ page_addr = get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page_addr)
+ return -ENOMEM;
+ pte->page = virt_to_page((void *)page_addr);
+ pte->offset = 0;
+
+ subtable_bytes = sizeof(struct gasket_page_table_entry) *
+ GASKET_PAGES_PER_SUBTABLE;
+ pte->sublevel = vzalloc(subtable_bytes);
+ if (!pte->sublevel) {
+ free_page(page_addr);
+ memset(pte, 0, sizeof(struct gasket_page_table_entry));
+ return -ENOMEM;
+ }
+
+ /* Map the page into DMA space. */
+ pte->dma_addr = dma_map_page(pg_tbl->device, pte->page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ /* Wait until the page is mapped. */
+ mb();
+
+ /* make the addresses available to the device */
+ dma_addr = (pte->dma_addr + pte->offset) | GASKET_VALID_SLOT_FLAG;
+ writeq(dma_addr, slot);
+
+ pte->status = PTE_INUSE;
+
+ return 0;
+}
+
+/*
+ * Allocate slots in an extended page table. Check to see if a range of page
+ * table slots are available. If necessary, memory is allocated for second level
+ * page tables.
+ *
+ * Note that memory for second level page tables is allocated as needed, but
+ * that memory is only freed on the final close of the device file, when the
+ * page tables are repartitioned, or the the device is removed. If there is an
+ * error or if the full range of slots is not available, any memory
+ * allocated for second level page tables remains allocated until final close,
+ * repartition, or device removal.
+ *
+ * The page table mutex must be held by the caller.
+ */
+static int gasket_alloc_extended_entries(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_entries)
+{
+ int ret = 0;
+ uint remain, subtable_slot_idx, len;
+ struct gasket_page_table_entry *pte;
+ u64 __iomem *slot;
+
+ remain = num_entries;
+ subtable_slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
+ pte = pg_tbl->entries + pg_tbl->num_simple_entries +
+ gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
+ slot = pg_tbl->base_slot + pg_tbl->num_simple_entries +
+ gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
+
+ while (remain > 0) {
+ len = min(remain,
+ GASKET_PAGES_PER_SUBTABLE - subtable_slot_idx);
+
+ if (pte->status == PTE_FREE) {
+ ret = gasket_alloc_extended_subtable(pg_tbl, pte, slot);
+ if (ret) {
+ dev_err(pg_tbl->device,
+ "no memory for extended addr subtable\n");
+ return ret;
+ }
+ } else {
+ if (!gasket_is_pte_range_free(pte->sublevel +
+ subtable_slot_idx, len))
+ return -EBUSY;
+ }
+
+ remain -= len;
+ subtable_slot_idx = 0;
+ pte++;
+ slot++;
+ }
+
+ return 0;
+}
+
+/*
+ * gasket_map_extended_pages - Get and map buffers to extended addresses.
+ * If there is an error, no pages are mapped.
+ */
+static int gasket_map_extended_pages(struct gasket_page_table *pg_tbl,
+ ulong host_addr, ulong dev_addr,
+ uint num_pages)
+{
+ int ret;
+ ulong dev_addr_end;
+ uint slot_idx, remain, len;
+ struct gasket_page_table_entry *pte;
+ u64 __iomem *slot_base;
+
+ ret = gasket_alloc_extended_entries(pg_tbl, dev_addr, num_pages);
+ if (ret) {
+ dev_addr_end = dev_addr + (num_pages / PAGE_SIZE) - 1;
+ dev_err(pg_tbl->device,
+ "page table slots (%lu,%lu) (@ 0x%lx) to (%lu,%lu) are "
+ "not available\n",
+ gasket_extended_lvl0_page_idx(pg_tbl, dev_addr),
+ dev_addr,
+ gasket_extended_lvl1_page_idx(pg_tbl, dev_addr),
+ gasket_extended_lvl0_page_idx(pg_tbl, dev_addr_end),
+ gasket_extended_lvl1_page_idx(pg_tbl, dev_addr_end));
+ return ret;
+ }
+
+ remain = num_pages;
+ slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
+ pte = pg_tbl->entries + pg_tbl->num_simple_entries +
+ gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
+
+ while (remain > 0) {
+ len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
+
+ slot_base =
+ (u64 __iomem *)(page_address(pte->page) + pte->offset);
+ ret = gasket_perform_mapping(pg_tbl, pte->sublevel + slot_idx,
+ slot_base + slot_idx, host_addr,
+ len, 0);
+ if (ret) {
+ gasket_page_table_unmap_nolock(pg_tbl, dev_addr,
+ num_pages);
+ return ret;
+ }
+
+ remain -= len;
+ slot_idx = 0;
+ pte++;
+ host_addr += len * PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+/*
+ * See gasket_page_table.h for general description.
+ *
+ * gasket_page_table_map calls either gasket_map_simple_pages() or
+ * gasket_map_extended_pages() to actually perform the mapping.
+ *
+ * The page table mutex is held for the entire operation.
+ */
+int gasket_page_table_map(struct gasket_page_table *pg_tbl, ulong host_addr,
+ ulong dev_addr, uint num_pages)
+{
+ int ret;
+
+ if (!num_pages)
+ return 0;
+
+ mutex_lock(&pg_tbl->mutex);
+
+ if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
+ ret = gasket_map_simple_pages(pg_tbl, host_addr, dev_addr,
+ num_pages);
+ } else {
+ ret = gasket_map_extended_pages(pg_tbl, host_addr, dev_addr,
+ num_pages);
+ }
+
+ mutex_unlock(&pg_tbl->mutex);
+
+ dev_dbg(pg_tbl->device,
+ "%s done: ha %llx daddr %llx num %d, ret %d\n",
+ __func__, (unsigned long long)host_addr,
+ (unsigned long long)dev_addr, num_pages, ret);
+ return ret;
+}
+EXPORT_SYMBOL(gasket_page_table_map);
+
+/*
+ * See gasket_page_table.h for general description.
+ *
+ * gasket_page_table_unmap takes the page table lock and calls either
+ * gasket_unmap_simple_pages() or gasket_unmap_extended_pages() to
+ * actually unmap the pages from device space.
+ *
+ * The page table mutex is held for the entire operation.
+ */
+void gasket_page_table_unmap(struct gasket_page_table *pg_tbl, ulong dev_addr,
+ uint num_pages)
+{
+ if (!num_pages)
+ return;
+
+ mutex_lock(&pg_tbl->mutex);
+ gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
+ mutex_unlock(&pg_tbl->mutex);
+}
+EXPORT_SYMBOL(gasket_page_table_unmap);
+
+static void gasket_page_table_unmap_all_nolock(struct gasket_page_table *pg_tbl)
+{
+ gasket_unmap_simple_pages(pg_tbl,
+ gasket_components_to_dev_address(pg_tbl, 1, 0,
+ 0),
+ pg_tbl->num_simple_entries);
+ gasket_unmap_extended_pages(pg_tbl,
+ gasket_components_to_dev_address(pg_tbl, 0,
+ 0, 0),
+ pg_tbl->num_extended_entries *
+ GASKET_PAGES_PER_SUBTABLE);
+}
+
+/* See gasket_page_table.h for description. */
+void gasket_page_table_unmap_all(struct gasket_page_table *pg_tbl)
+{
+ mutex_lock(&pg_tbl->mutex);
+ gasket_page_table_unmap_all_nolock(pg_tbl);
+ mutex_unlock(&pg_tbl->mutex);
+}
+EXPORT_SYMBOL(gasket_page_table_unmap_all);
+
+/* See gasket_page_table.h for description. */
+void gasket_page_table_reset(struct gasket_page_table *pg_tbl)
+{
+ mutex_lock(&pg_tbl->mutex);
+ gasket_page_table_unmap_all_nolock(pg_tbl);
+ writeq(pg_tbl->config.total_entries, pg_tbl->extended_offset_reg);
+ mutex_unlock(&pg_tbl->mutex);
+}
+
+/* See gasket_page_table.h for description. */
+int gasket_page_table_lookup_page(
+ struct gasket_page_table *pg_tbl, ulong dev_addr, struct page **ppage,
+ ulong *poffset)
+{
+ uint page_num;
+ struct gasket_page_table_entry *pte;
+
+ mutex_lock(&pg_tbl->mutex);
+ if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
+ page_num = gasket_simple_page_idx(pg_tbl, dev_addr);
+ if (page_num >= pg_tbl->num_simple_entries)
+ goto fail;
+
+ pte = pg_tbl->entries + page_num;
+ if (pte->status != PTE_INUSE)
+ goto fail;
+ } else {
+ /* Find the level 0 entry, */
+ page_num = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
+ if (page_num >= pg_tbl->num_extended_entries)
+ goto fail;
+
+ pte = pg_tbl->entries + pg_tbl->num_simple_entries + page_num;
+ if (pte->status != PTE_INUSE)
+ goto fail;
+
+ /* and its contained level 1 entry. */
+ page_num = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
+ pte = pte->sublevel + page_num;
+ if (pte->status != PTE_INUSE)
+ goto fail;
+ }
+
+ *ppage = pte->page;
+ *poffset = pte->offset;
+ mutex_unlock(&pg_tbl->mutex);
+ return 0;
+
+fail:
+ *ppage = NULL;
+ *poffset = 0;
+ mutex_unlock(&pg_tbl->mutex);
+ return -1;
+}
+
+/* See gasket_page_table.h for description. */
+bool gasket_page_table_are_addrs_bad(
+ struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr,
+ ulong bytes)
+{
+ if (host_addr & (PAGE_SIZE - 1)) {
+ dev_err(pg_tbl->device,
+ "host mapping address 0x%lx must be page aligned\n",
+ host_addr);
+ return true;
+ }
+
+ return gasket_page_table_is_dev_addr_bad(pg_tbl, dev_addr, bytes);
+}
+EXPORT_SYMBOL(gasket_page_table_are_addrs_bad);
+
+/* See gasket_page_table.h for description. */
+bool gasket_page_table_is_dev_addr_bad(
+ struct gasket_page_table *pg_tbl, ulong dev_addr, ulong bytes)
+{
+ uint num_pages = bytes / PAGE_SIZE;
+
+ if (bytes & (PAGE_SIZE - 1)) {
+ dev_err(pg_tbl->device,
+ "mapping size 0x%lX must be page aligned\n", bytes);
+ return true;
+ }
+
+ if (num_pages == 0) {
+ dev_err(pg_tbl->device,
+ "requested mapping is less than one page: %lu / %lu\n",
+ bytes, PAGE_SIZE);
+ return true;
+ }
+
+ if (gasket_addr_is_simple(pg_tbl, dev_addr))
+ return gasket_is_simple_dev_addr_bad(pg_tbl, dev_addr,
+ num_pages);
+ return gasket_is_extended_dev_addr_bad(pg_tbl, dev_addr, num_pages);
+}
+EXPORT_SYMBOL(gasket_page_table_is_dev_addr_bad);
+
+/* See gasket_page_table.h for description. */
+uint gasket_page_table_max_size(struct gasket_page_table *page_table)
+{
+ if (!page_table)
+ return 0;
+ return page_table->config.total_entries;
+}
+EXPORT_SYMBOL(gasket_page_table_max_size);
+
+/* See gasket_page_table.h for description. */
+uint gasket_page_table_num_entries(struct gasket_page_table *pg_tbl)
+{
+ if (!pg_tbl)
+ return 0;
+ return pg_tbl->num_simple_entries + pg_tbl->num_extended_entries;
+}
+EXPORT_SYMBOL(gasket_page_table_num_entries);
+
+/* See gasket_page_table.h for description. */
+uint gasket_page_table_num_simple_entries(struct gasket_page_table *pg_tbl)
+{
+ if (!pg_tbl)
+ return 0;
+ return pg_tbl->num_simple_entries;
+}
+EXPORT_SYMBOL(gasket_page_table_num_simple_entries);
+
+/* See gasket_page_table.h for description. */
+uint gasket_page_table_num_active_pages(struct gasket_page_table *pg_tbl)
+{
+ if (!pg_tbl)
+ return 0;
+ return pg_tbl->num_active_pages;
+}
+EXPORT_SYMBOL(gasket_page_table_num_active_pages);
+
+/* See gasket_page_table.h */
+int gasket_page_table_system_status(struct gasket_page_table *page_table)
+{
+ if (!page_table)
+ return GASKET_STATUS_LAMED;
+
+ if (gasket_page_table_num_entries(page_table) == 0) {
+ dev_dbg(page_table->device, "Page table size is 0\n");
+ return GASKET_STATUS_LAMED;
+ }
+
+ return GASKET_STATUS_ALIVE;
+}
+
+/* Record the host_addr to coherent dma memory mapping. */
+int gasket_set_user_virt(
+ struct gasket_dev *gasket_dev, u64 size, dma_addr_t dma_address,
+ ulong vma)
+{
+ int j;
+ struct gasket_page_table *pg_tbl;
+
+ unsigned int num_pages = size / PAGE_SIZE;
+
+ /*
+ * TODO: for future chipset, better handling of the case where multiple
+ * page tables are supported on a given device
+ */
+ pg_tbl = gasket_dev->page_table[0];
+ if (!pg_tbl) {
+ dev_dbg(gasket_dev->dev, "%s: invalid page table index\n",
+ __func__);
+ return 0;
+ }
+ for (j = 0; j < num_pages; j++) {
+ pg_tbl->coherent_pages[j].user_virt =
+ (u64)vma + j * PAGE_SIZE;
+ }
+ return 0;
+}
+
+/* Allocate a block of coherent memory. */
+int gasket_alloc_coherent_memory(struct gasket_dev *gasket_dev, u64 size,
+ dma_addr_t *dma_address, u64 index)
+{
+ dma_addr_t handle;
+ void *mem;
+ int j;
+ unsigned int num_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ const struct gasket_driver_desc *driver_desc =
+ gasket_get_driver_desc(gasket_dev);
+
+ if (!gasket_dev->page_table[index])
+ return -EFAULT;
+
+ if (num_pages == 0)
+ return -EINVAL;
+
+ mem = dma_alloc_coherent(gasket_get_device(gasket_dev),
+ num_pages * PAGE_SIZE, &handle, 0);
+ if (!mem)
+ goto nomem;
+
+ gasket_dev->page_table[index]->num_coherent_pages = num_pages;
+
+ /* allocate the physical memory block */
+ gasket_dev->page_table[index]->coherent_pages =
+ kcalloc(num_pages, sizeof(struct gasket_coherent_page_entry),
+ GFP_KERNEL);
+ if (!gasket_dev->page_table[index]->coherent_pages)
+ goto nomem;
+ *dma_address = 0;
+
+ gasket_dev->coherent_buffer.length_bytes =
+ PAGE_SIZE * (num_pages);
+ gasket_dev->coherent_buffer.phys_base = handle;
+ gasket_dev->coherent_buffer.virt_base = mem;
+
+ *dma_address = driver_desc->coherent_buffer_description.base;
+ for (j = 0; j < num_pages; j++) {
+ gasket_dev->page_table[index]->coherent_pages[j].paddr =
+ handle + j * PAGE_SIZE;
+ gasket_dev->page_table[index]->coherent_pages[j].kernel_virt =
+ (u64)mem + j * PAGE_SIZE;
+ }
+
+ if (*dma_address == 0)
+ goto nomem;
+ return 0;
+
+nomem:
+ if (mem) {
+ dma_free_coherent(gasket_get_device(gasket_dev),
+ num_pages * PAGE_SIZE, mem, handle);
+ }
+
+ if (gasket_dev->page_table[index]->coherent_pages) {
+ kfree(gasket_dev->page_table[index]->coherent_pages);
+ gasket_dev->page_table[index]->coherent_pages = NULL;
+ }
+ gasket_dev->page_table[index]->num_coherent_pages = 0;
+ return -ENOMEM;
+}
+
+/* Free a block of coherent memory. */
+int gasket_free_coherent_memory(struct gasket_dev *gasket_dev, u64 size,
+ dma_addr_t dma_address, u64 index)
+{
+ const struct gasket_driver_desc *driver_desc;
+
+ if (!gasket_dev->page_table[index])
+ return -EFAULT;
+
+ driver_desc = gasket_get_driver_desc(gasket_dev);
+
+ if (driver_desc->coherent_buffer_description.base != dma_address)
+ return -EADDRNOTAVAIL;
+
+ if (gasket_dev->coherent_buffer.length_bytes) {
+ dma_free_coherent(gasket_get_device(gasket_dev),
+ gasket_dev->coherent_buffer.length_bytes,
+ gasket_dev->coherent_buffer.virt_base,
+ gasket_dev->coherent_buffer.phys_base);
+ gasket_dev->coherent_buffer.length_bytes = 0;
+ gasket_dev->coherent_buffer.virt_base = NULL;
+ gasket_dev->coherent_buffer.phys_base = 0;
+ }
+ return 0;
+}
+
+/* Release all coherent memory. */
+void gasket_free_coherent_memory_all(
+ struct gasket_dev *gasket_dev, u64 index)
+{
+ if (!gasket_dev->page_table[index])
+ return;
+
+ if (gasket_dev->coherent_buffer.length_bytes) {
+ dma_free_coherent(gasket_get_device(gasket_dev),
+ gasket_dev->coherent_buffer.length_bytes,
+ gasket_dev->coherent_buffer.virt_base,
+ gasket_dev->coherent_buffer.phys_base);
+ gasket_dev->coherent_buffer.length_bytes = 0;
+ gasket_dev->coherent_buffer.virt_base = NULL;
+ gasket_dev->coherent_buffer.phys_base = 0;
+ }
+}
diff --git a/drivers/staging/gasket/gasket_page_table.h b/drivers/staging/gasket/gasket_page_table.h
new file mode 100644
index 000000000000..7b01b73ea3e7
--- /dev/null
+++ b/drivers/staging/gasket/gasket_page_table.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Gasket Page Table functionality. This file describes the address
+ * translation/paging functionality supported by the Gasket driver framework.
+ * As much as possible, internal details are hidden to simplify use -
+ * all calls are thread-safe (protected by an internal mutex) except where
+ * indicated otherwise.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+
+#ifndef __GASKET_PAGE_TABLE_H__
+#define __GASKET_PAGE_TABLE_H__
+
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "gasket_constants.h"
+#include "gasket_core.h"
+
+/*
+ * Structure used for managing address translation on a device. All details are
+ * internal to the implementation.
+ */
+struct gasket_page_table;
+
+/*
+ * Allocate and init address translation data.
+ * @ppage_table: Pointer to Gasket page table pointer. Set by this call.
+ * @att_base_reg: [Mapped] pointer to the first entry in the device's address
+ * translation table.
+ * @extended_offset_reg: [Mapped] pointer to the device's register containing
+ * the starting index of the extended translation table.
+ * @extended_bit_location: The index of the bit indicating whether an address
+ * is extended.
+ * @total_entries: The total number of entries in the device's address
+ * translation table.
+ * @device: Device structure for the underlying device. Only used for logging.
+ * @pci_dev: PCI system descriptor for the underlying device.
+ * whether the driver will supply its own.
+ *
+ * Description: Allocates and initializes data to track address translation -
+ * simple and extended page table metadata. Initially, the page table is
+ * partitioned such that all addresses are "simple" (single-level lookup).
+ * gasket_partition_page_table can be called to change this paritioning.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int gasket_page_table_init(struct gasket_page_table **ppg_tbl,
+ const struct gasket_bar_data *bar_data,
+ const struct gasket_page_table_config *page_table_config,
+ struct device *device, struct pci_dev *pci_dev);
+
+/*
+ * Deallocate and cleanup page table data.
+ * @page_table: Gasket page table pointer.
+ *
+ * Description: The inverse of gasket_init; frees page_table and its contained
+ * elements.
+ *
+ * Because this call destroys the page table, it cannot be
+ * thread-safe (mutex-protected)!
+ */
+void gasket_page_table_cleanup(struct gasket_page_table *page_table);
+
+/*
+ * Sets the size of the simple page table.
+ * @page_table: Gasket page table pointer.
+ * @num_simple_entries: Desired size of the simple page table (in entries).
+ *
+ * Description: gasket_partition_page_table checks to see if the simple page
+ * size can be changed (i.e., if there are no active extended
+ * mappings in the new simple size range), and, if so,
+ * sets the new simple and extended page table sizes.
+ *
+ * Returns 0 if successful, or non-zero if the page table entries
+ * are not free.
+ */
+int gasket_page_table_partition(struct gasket_page_table *page_table,
+ uint num_simple_entries);
+
+/*
+ * Get and map [host] user space pages into device memory.
+ * @page_table: Gasket page table pointer.
+ * @host_addr: Starting host virtual memory address of the pages.
+ * @dev_addr: Starting device address of the pages.
+ * @num_pages: Number of [4kB] pages to map.
+ *
+ * Description: Maps the "num_pages" pages of host memory pointed to by
+ * host_addr to the address "dev_addr" in device memory.
+ *
+ * The caller is responsible for checking the addresses ranges.
+ *
+ * Returns 0 if successful or a non-zero error number otherwise.
+ * If there is an error, no pages are mapped.
+ */
+int gasket_page_table_map(struct gasket_page_table *page_table, ulong host_addr,
+ ulong dev_addr, uint num_pages);
+
+/*
+ * Un-map host pages from device memory.
+ * @page_table: Gasket page table pointer.
+ * @dev_addr: Starting device address of the pages to unmap.
+ * @num_pages: The number of [4kB] pages to unmap.
+ *
+ * Description: The inverse of gasket_map_pages. Unmaps pages from the device.
+ */
+void gasket_page_table_unmap(struct gasket_page_table *page_table,
+ ulong dev_addr, uint num_pages);
+
+/*
+ * Unmap ALL host pages from device memory.
+ * @page_table: Gasket page table pointer.
+ */
+void gasket_page_table_unmap_all(struct gasket_page_table *page_table);
+
+/*
+ * Unmap all host pages from device memory and reset the table to fully simple
+ * addressing.
+ * @page_table: Gasket page table pointer.
+ */
+void gasket_page_table_reset(struct gasket_page_table *page_table);
+
+/*
+ * Reclaims unused page table memory.
+ * @page_table: Gasket page table pointer.
+ *
+ * Description: Examines the page table and frees any currently-unused
+ * allocations. Called internally on gasket_cleanup().
+ */
+void gasket_page_table_garbage_collect(struct gasket_page_table *page_table);
+
+/*
+ * Retrieve the backing page for a device address.
+ * @page_table: Gasket page table pointer.
+ * @dev_addr: Gasket device address.
+ * @ppage: Pointer to a page pointer for the returned page.
+ * @poffset: Pointer to an unsigned long for the returned offset.
+ *
+ * Description: Interprets the address and looks up the corresponding page
+ * in the page table and the offset in that page. (We need an
+ * offset because the host page may be larger than the Gasket chip
+ * page it contains.)
+ *
+ * Returns 0 if successful, -1 for an error. The page pointer
+ * and offset are returned through the pointers, if successful.
+ */
+int gasket_page_table_lookup_page(struct gasket_page_table *page_table,
+ ulong dev_addr, struct page **page,
+ ulong *poffset);
+
+/*
+ * Checks validity for input addrs and size.
+ * @page_table: Gasket page table pointer.
+ * @host_addr: Host address to check.
+ * @dev_addr: Gasket device address.
+ * @bytes: Size of the range to check (in bytes).
+ *
+ * Description: This call performs a number of checks to verify that the ranges
+ * specified by both addresses and the size are valid for mapping pages into
+ * device memory.
+ *
+ * Returns true if the mapping is bad, false otherwise.
+ */
+bool gasket_page_table_are_addrs_bad(struct gasket_page_table *page_table,
+ ulong host_addr, ulong dev_addr,
+ ulong bytes);
+
+/*
+ * Checks validity for input dev addr and size.
+ * @page_table: Gasket page table pointer.
+ * @dev_addr: Gasket device address.
+ * @bytes: Size of the range to check (in bytes).
+ *
+ * Description: This call performs a number of checks to verify that the range
+ * specified by the device address and the size is valid for mapping pages into
+ * device memory.
+ *
+ * Returns true if the address is bad, false otherwise.
+ */
+bool gasket_page_table_is_dev_addr_bad(struct gasket_page_table *page_table,
+ ulong dev_addr, ulong bytes);
+
+/*
+ * Gets maximum size for the given page table.
+ * @page_table: Gasket page table pointer.
+ */
+uint gasket_page_table_max_size(struct gasket_page_table *page_table);
+
+/*
+ * Gets the total number of entries in the arg.
+ * @page_table: Gasket page table pointer.
+ */
+uint gasket_page_table_num_entries(struct gasket_page_table *page_table);
+
+/*
+ * Gets the number of simple entries.
+ * @page_table: Gasket page table pointer.
+ */
+uint gasket_page_table_num_simple_entries(struct gasket_page_table *page_table);
+
+/*
+ * Gets the number of actively pinned pages.
+ * @page_table: Gasket page table pointer.
+ */
+uint gasket_page_table_num_active_pages(struct gasket_page_table *page_table);
+
+/*
+ * Get status of page table managed by @page_table.
+ * @page_table: Gasket page table pointer.
+ */
+int gasket_page_table_system_status(struct gasket_page_table *page_table);
+
+/*
+ * Allocate a block of coherent memory.
+ * @gasket_dev: Gasket Device.
+ * @size: Size of the memory block.
+ * @dma_address: Dma address allocated by the kernel.
+ * @index: Index of the gasket_page_table within this Gasket device
+ *
+ * Description: Allocate a contiguous coherent memory block, DMA'ble
+ * by this device.
+ */
+int gasket_alloc_coherent_memory(struct gasket_dev *gasket_dev, uint64_t size,
+ dma_addr_t *dma_address, uint64_t index);
+/* Release a block of contiguous coherent memory, in use by a device. */
+int gasket_free_coherent_memory(struct gasket_dev *gasket_dev, uint64_t size,
+ dma_addr_t dma_address, uint64_t index);
+
+/* Release all coherent memory. */
+void gasket_free_coherent_memory_all(struct gasket_dev *gasket_dev,
+ uint64_t index);
+
+/*
+ * Records the host_addr to coherent dma memory mapping.
+ * @gasket_dev: Gasket Device.
+ * @size: Size of the virtual address range to map.
+ * @dma_address: Dma address within the coherent memory range.
+ * @vma: Virtual address we wish to map to coherent memory.
+ *
+ * Description: For each page in the virtual address range, record the
+ * coherent page mapping.
+ *
+ * Does not perform validity checking.
+ */
+int gasket_set_user_virt(struct gasket_dev *gasket_dev, uint64_t size,
+ dma_addr_t dma_address, ulong vma);
+
+#endif /* __GASKET_PAGE_TABLE_H__ */
diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c
new file mode 100644
index 000000000000..fc45f0d13e87
--- /dev/null
+++ b/drivers/staging/gasket/gasket_sysfs.c
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018 Google, Inc. */
+#include "gasket_sysfs.h"
+
+#include "gasket_core.h"
+
+#include <linux/device.h>
+#include <linux/printk.h>
+
+/*
+ * Pair of kernel device and user-specified pointer. Used in lookups in sysfs
+ * "show" functions to return user data.
+ */
+
+struct gasket_sysfs_mapping {
+ /*
+ * The device bound to this mapping. If this is NULL, then this mapping
+ * is free.
+ */
+ struct device *device;
+
+ /* The Gasket descriptor for this device. */
+ struct gasket_dev *gasket_dev;
+
+ /* This device's set of sysfs attributes/nodes. */
+ struct gasket_sysfs_attribute *attributes;
+
+ /* The number of live elements in "attributes". */
+ int attribute_count;
+
+ /* Protects structure from simultaneous access. */
+ struct mutex mutex;
+
+ /* Tracks active users of this mapping. */
+ struct kref refcount;
+};
+
+/*
+ * Data needed to manage users of this sysfs utility.
+ * Currently has a fixed size; if space is a concern, this can be dynamically
+ * allocated.
+ */
+/*
+ * 'Global' (file-scoped) list of mappings between devices and gasket_data
+ * pointers. This removes the requirement to have a gasket_sysfs_data
+ * handle in all files.
+ */
+static struct gasket_sysfs_mapping dev_mappings[GASKET_SYSFS_NUM_MAPPINGS];
+
+/* Callback when a mapping's refcount goes to zero. */
+static void release_entry(struct kref *ref)
+{
+ /* All work is done after the return from kref_put. */
+}
+
+/* Look up mapping information for the given device. */
+static struct gasket_sysfs_mapping *get_mapping(struct device *device)
+{
+ int i;
+
+ for (i = 0; i < GASKET_SYSFS_NUM_MAPPINGS; i++) {
+ mutex_lock(&dev_mappings[i].mutex);
+ if (dev_mappings[i].device == device) {
+ kref_get(&dev_mappings[i].refcount);
+ mutex_unlock(&dev_mappings[i].mutex);
+ return &dev_mappings[i];
+ }
+ mutex_unlock(&dev_mappings[i].mutex);
+ }
+
+ dev_dbg(device, "%s: Mapping to device %s not found\n",
+ __func__, device->kobj.name);
+ return NULL;
+}
+
+/* Put a reference to a mapping. */
+static void put_mapping(struct gasket_sysfs_mapping *mapping)
+{
+ int i;
+ int num_files_to_remove = 0;
+ struct device_attribute *files_to_remove;
+ struct device *device;
+
+ if (!mapping) {
+ pr_debug("%s: Mapping should not be NULL\n", __func__);
+ return;
+ }
+
+ mutex_lock(&mapping->mutex);
+ if (kref_put(&mapping->refcount, release_entry)) {
+ dev_dbg(mapping->device, "Removing Gasket sysfs mapping\n");
+ /*
+ * We can't remove the sysfs nodes in the kref callback, since
+ * device_remove_file() blocks until the node is free.
+ * Readers/writers of sysfs nodes, though, will be blocked on
+ * the mapping mutex, resulting in deadlock. To fix this, the
+ * sysfs nodes are removed outside the lock.
+ */
+ device = mapping->device;
+ num_files_to_remove = mapping->attribute_count;
+ files_to_remove = kcalloc(num_files_to_remove,
+ sizeof(*files_to_remove),
+ GFP_KERNEL);
+ if (files_to_remove)
+ for (i = 0; i < num_files_to_remove; i++)
+ files_to_remove[i] =
+ mapping->attributes[i].attr;
+ else
+ num_files_to_remove = 0;
+
+ kfree(mapping->attributes);
+ mapping->attributes = NULL;
+ mapping->attribute_count = 0;
+ put_device(mapping->device);
+ mapping->device = NULL;
+ mapping->gasket_dev = NULL;
+ }
+ mutex_unlock(&mapping->mutex);
+
+ if (num_files_to_remove != 0) {
+ for (i = 0; i < num_files_to_remove; ++i)
+ device_remove_file(device, &files_to_remove[i]);
+ kfree(files_to_remove);
+ }
+}
+
+/*
+ * Put a reference to a mapping N times.
+ *
+ * In higher-level resource acquire/release function pairs, the release function
+ * will need to release a mapping 2x - once for the refcount taken in the
+ * release function itself, and once for the count taken in the acquire call.
+ */
+static void put_mapping_n(struct gasket_sysfs_mapping *mapping, int times)
+{
+ int i;
+
+ for (i = 0; i < times; i++)
+ put_mapping(mapping);
+}
+
+void gasket_sysfs_init(void)
+{
+ int i;
+
+ for (i = 0; i < GASKET_SYSFS_NUM_MAPPINGS; i++) {
+ dev_mappings[i].device = NULL;
+ mutex_init(&dev_mappings[i].mutex);
+ }
+}
+
+int gasket_sysfs_create_mapping(struct device *device,
+ struct gasket_dev *gasket_dev)
+{
+ struct gasket_sysfs_mapping *mapping;
+ int map_idx = -1;
+
+ /*
+ * We need a function-level mutex to protect against the same device
+ * being added [multiple times] simultaneously.
+ */
+ static DEFINE_MUTEX(function_mutex);
+
+ mutex_lock(&function_mutex);
+ dev_dbg(device, "Creating sysfs entries for device\n");
+
+ /* Check that the device we're adding hasn't already been added. */
+ mapping = get_mapping(device);
+ if (mapping) {
+ dev_err(device,
+ "Attempting to re-initialize sysfs mapping for device\n");
+ put_mapping(mapping);
+ mutex_unlock(&function_mutex);
+ return -EBUSY;
+ }
+
+ /* Find the first empty entry in the array. */
+ for (map_idx = 0; map_idx < GASKET_SYSFS_NUM_MAPPINGS; ++map_idx) {
+ mutex_lock(&dev_mappings[map_idx].mutex);
+ if (!dev_mappings[map_idx].device)
+ /* Break with the mutex held! */
+ break;
+ mutex_unlock(&dev_mappings[map_idx].mutex);
+ }
+
+ if (map_idx == GASKET_SYSFS_NUM_MAPPINGS) {
+ dev_err(device, "All mappings have been exhausted\n");
+ mutex_unlock(&function_mutex);
+ return -ENOMEM;
+ }
+
+ dev_dbg(device, "Creating sysfs mapping for device %s\n",
+ device->kobj.name);
+
+ mapping = &dev_mappings[map_idx];
+ mapping->attributes = kcalloc(GASKET_SYSFS_MAX_NODES,
+ sizeof(*mapping->attributes),
+ GFP_KERNEL);
+ if (!mapping->attributes) {
+ dev_dbg(device, "Unable to allocate sysfs attribute array\n");
+ mutex_unlock(&mapping->mutex);
+ mutex_unlock(&function_mutex);
+ return -ENOMEM;
+ }
+
+ kref_init(&mapping->refcount);
+ mapping->device = get_device(device);
+ mapping->gasket_dev = gasket_dev;
+ mapping->attribute_count = 0;
+ mutex_unlock(&mapping->mutex);
+ mutex_unlock(&function_mutex);
+
+ /* Don't decrement the refcount here! One open count keeps it alive! */
+ return 0;
+}
+
+int gasket_sysfs_create_entries(struct device *device,
+ const struct gasket_sysfs_attribute *attrs)
+{
+ int i;
+ int ret;
+ struct gasket_sysfs_mapping *mapping = get_mapping(device);
+
+ if (!mapping) {
+ dev_dbg(device,
+ "Creating entries for device without first "
+ "initializing mapping\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mapping->mutex);
+ for (i = 0; strcmp(attrs[i].attr.attr.name, GASKET_ARRAY_END_MARKER);
+ i++) {
+ if (mapping->attribute_count == GASKET_SYSFS_MAX_NODES) {
+ dev_err(device,
+ "Maximum number of sysfs nodes reached for "
+ "device\n");
+ mutex_unlock(&mapping->mutex);
+ put_mapping(mapping);
+ return -ENOMEM;
+ }
+
+ ret = device_create_file(device, &attrs[i].attr);
+ if (ret) {
+ dev_dbg(device, "Unable to create device entries\n");
+ mutex_unlock(&mapping->mutex);
+ put_mapping(mapping);
+ return ret;
+ }
+
+ mapping->attributes[mapping->attribute_count] = attrs[i];
+ ++mapping->attribute_count;
+ }
+
+ mutex_unlock(&mapping->mutex);
+ put_mapping(mapping);
+ return 0;
+}
+EXPORT_SYMBOL(gasket_sysfs_create_entries);
+
+void gasket_sysfs_remove_mapping(struct device *device)
+{
+ struct gasket_sysfs_mapping *mapping = get_mapping(device);
+
+ if (!mapping) {
+ dev_err(device,
+ "Attempted to remove non-existent sysfs mapping to "
+ "device\n");
+ return;
+ }
+
+ put_mapping_n(mapping, 2);
+}
+
+struct gasket_dev *gasket_sysfs_get_device_data(struct device *device)
+{
+ struct gasket_sysfs_mapping *mapping = get_mapping(device);
+
+ if (!mapping) {
+ dev_err(device, "device not registered\n");
+ return NULL;
+ }
+
+ return mapping->gasket_dev;
+}
+EXPORT_SYMBOL(gasket_sysfs_get_device_data);
+
+void gasket_sysfs_put_device_data(struct device *device, struct gasket_dev *dev)
+{
+ struct gasket_sysfs_mapping *mapping = get_mapping(device);
+
+ if (!mapping)
+ return;
+
+ /* See comment of put_mapping_n() for why the '2' is necessary. */
+ put_mapping_n(mapping, 2);
+}
+EXPORT_SYMBOL(gasket_sysfs_put_device_data);
+
+struct gasket_sysfs_attribute *
+gasket_sysfs_get_attr(struct device *device, struct device_attribute *attr)
+{
+ int i;
+ int num_attrs;
+ struct gasket_sysfs_mapping *mapping = get_mapping(device);
+ struct gasket_sysfs_attribute *attrs = NULL;
+
+ if (!mapping)
+ return NULL;
+
+ attrs = mapping->attributes;
+ num_attrs = mapping->attribute_count;
+ for (i = 0; i < num_attrs; ++i) {
+ if (!strcmp(attrs[i].attr.attr.name, attr->attr.name))
+ return &attrs[i];
+ }
+
+ dev_err(device, "Unable to find match for device_attribute %s\n",
+ attr->attr.name);
+ return NULL;
+}
+EXPORT_SYMBOL(gasket_sysfs_get_attr);
+
+void gasket_sysfs_put_attr(struct device *device,
+ struct gasket_sysfs_attribute *attr)
+{
+ int i;
+ int num_attrs;
+ struct gasket_sysfs_mapping *mapping = get_mapping(device);
+ struct gasket_sysfs_attribute *attrs = NULL;
+
+ if (!mapping)
+ return;
+
+ attrs = mapping->attributes;
+ num_attrs = mapping->attribute_count;
+ for (i = 0; i < num_attrs; ++i) {
+ if (&attrs[i] == attr) {
+ put_mapping_n(mapping, 2);
+ return;
+ }
+ }
+
+ dev_err(device, "Unable to put unknown attribute: %s\n",
+ attr->attr.attr.name);
+}
+EXPORT_SYMBOL(gasket_sysfs_put_attr);
+
+ssize_t gasket_sysfs_register_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ ulong parsed_value = 0;
+ struct gasket_sysfs_mapping *mapping;
+ struct gasket_dev *gasket_dev;
+ struct gasket_sysfs_attribute *gasket_attr;
+
+ if (count < 3 || buf[0] != '0' || buf[1] != 'x') {
+ dev_err(device,
+ "sysfs register write format: \"0x<hex value>\"\n");
+ return -EINVAL;
+ }
+
+ if (kstrtoul(buf, 16, &parsed_value) != 0) {
+ dev_err(device,
+ "Unable to parse input as 64-bit hex value: %s\n", buf);
+ return -EINVAL;
+ }
+
+ mapping = get_mapping(device);
+ if (!mapping) {
+ dev_err(device, "Device driver may have been removed\n");
+ return 0;
+ }
+
+ gasket_dev = mapping->gasket_dev;
+ if (!gasket_dev) {
+ dev_err(device, "Device driver may have been removed\n");
+ return 0;
+ }
+
+ gasket_attr = gasket_sysfs_get_attr(device, attr);
+ if (!gasket_attr) {
+ put_mapping(mapping);
+ return count;
+ }
+
+ gasket_dev_write_64(gasket_dev, parsed_value,
+ gasket_attr->data.bar_address.bar,
+ gasket_attr->data.bar_address.offset);
+
+ if (gasket_attr->write_callback)
+ gasket_attr->write_callback(gasket_dev, gasket_attr,
+ parsed_value);
+
+ gasket_sysfs_put_attr(device, gasket_attr);
+ put_mapping(mapping);
+ return count;
+}
+EXPORT_SYMBOL(gasket_sysfs_register_store);
diff --git a/drivers/staging/gasket/gasket_sysfs.h b/drivers/staging/gasket/gasket_sysfs.h
new file mode 100644
index 000000000000..f32eaf89e056
--- /dev/null
+++ b/drivers/staging/gasket/gasket_sysfs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Set of common sysfs utilities.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+
+/* The functions described here are a set of utilities to allow each file in the
+ * Gasket driver framework to manage their own set of sysfs entries, instead of
+ * centralizing all that work in one file.
+ *
+ * The goal of these utilities is to allow for sysfs entries to be easily
+ * created without causing a proliferation of sysfs "show" functions. This
+ * requires O(N) string lookups during show function execution, but as reading
+ * sysfs entries is rarely performance-critical, this is likely acceptible.
+ */
+#ifndef __GASKET_SYSFS_H__
+#define __GASKET_SYSFS_H__
+
+#include "gasket_constants.h"
+#include "gasket_core.h"
+#include <linux/device.h>
+#include <linux/stringify.h>
+#include <linux/sysfs.h>
+
+/* The maximum number of mappings/devices a driver needs to support. */
+#define GASKET_SYSFS_NUM_MAPPINGS (GASKET_FRAMEWORK_DESC_MAX * GASKET_DEV_MAX)
+
+/* The maximum number of sysfs nodes in a directory.
+ */
+#define GASKET_SYSFS_MAX_NODES 196
+
+/* End markers for sysfs struct arrays. */
+#define GASKET_ARRAY_END_TOKEN GASKET_RESERVED_ARRAY_END
+#define GASKET_ARRAY_END_MARKER __stringify(GASKET_ARRAY_END_TOKEN)
+
+/*
+ * Terminator struct for a gasket_sysfs_attr array. Must be at the end of
+ * all gasket_sysfs_attribute arrays.
+ */
+#define GASKET_END_OF_ATTR_ARRAY \
+ { \
+ .attr = __ATTR(GASKET_ARRAY_END_TOKEN, S_IRUGO, NULL, NULL), \
+ .data.attr_type = 0, \
+ }
+
+/*
+ * Pairing of sysfs attribute and user data.
+ * Used in lookups in sysfs "show" functions to return attribute metadata.
+ */
+struct gasket_sysfs_attribute {
+ /* The underlying sysfs device attribute associated with this data. */
+ struct device_attribute attr;
+
+ /* User-specified data to associate with the attribute. */
+ union {
+ struct bar_address_ {
+ ulong bar;
+ ulong offset;
+ } bar_address;
+ uint attr_type;
+ } data;
+
+ /*
+ * Function pointer to a callback to be invoked when this attribute is
+ * written (if so configured). The arguments are to the Gasket device
+ * pointer, the enclosing gasket_attr structure, and the value written.
+ * The callback should perform any logging necessary, as errors cannot
+ * be returned from the callback.
+ */
+ void (*write_callback)(struct gasket_dev *dev,
+ struct gasket_sysfs_attribute *attr,
+ ulong value);
+};
+
+#define GASKET_SYSFS_RO(_name, _show_function, _attr_type) \
+ { \
+ .attr = __ATTR(_name, S_IRUGO, _show_function, NULL), \
+ .data.attr_type = _attr_type \
+ }
+
+/* Initializes the Gasket sysfs subsystem.
+ *
+ * Description: Performs one-time initialization. Must be called before usage
+ * at [Gasket] module load time.
+ */
+void gasket_sysfs_init(void);
+
+/*
+ * Create an entry in mapping_data between a device and a Gasket device.
+ * @device: Device struct to map to.
+ * @gasket_dev: The dev struct associated with the driver controlling @device.
+ *
+ * Description: This function maps a gasket_dev* to a device*. This mapping can
+ * be used in sysfs_show functions to get a handle to the gasket_dev struct
+ * controlling the device node.
+ *
+ * If this function is not called before gasket_sysfs_create_entries, a warning
+ * will be logged.
+ */
+int gasket_sysfs_create_mapping(struct device *device,
+ struct gasket_dev *gasket_dev);
+
+/*
+ * Creates bulk entries in sysfs.
+ * @device: Kernel device structure.
+ * @attrs: List of attributes/sysfs entries to create.
+ *
+ * Description: Creates each sysfs entry described in "attrs". Can be called
+ * multiple times for a given @device. If the gasket_dev specified in
+ * gasket_sysfs_create_mapping had a legacy device, the entries will be created
+ * for it, as well.
+ */
+int gasket_sysfs_create_entries(struct device *device,
+ const struct gasket_sysfs_attribute *attrs);
+
+/*
+ * Removes a device mapping from the global table.
+ * @device: Device to unmap.
+ *
+ * Description: Removes the device->Gasket device mapping from the internal
+ * table.
+ */
+void gasket_sysfs_remove_mapping(struct device *device);
+
+/*
+ * User data lookup based on kernel device structure.
+ * @device: Kernel device structure.
+ *
+ * Description: Returns the user data associated with "device" in a prior call
+ * to gasket_sysfs_create_entries. Returns NULL if no mapping can be found.
+ * Upon success, this call take a reference to internal sysfs data that must be
+ * released with gasket_sysfs_put_device_data. While this reference is held, the
+ * underlying device sysfs information/structure will remain valid/will not be
+ * deleted.
+ */
+struct gasket_dev *gasket_sysfs_get_device_data(struct device *device);
+
+/*
+ * Releases a references to internal data.
+ * @device: Kernel device structure.
+ * @dev: Gasket device descriptor (returned by gasket_sysfs_get_device_data).
+ */
+void gasket_sysfs_put_device_data(struct device *device,
+ struct gasket_dev *gasket_dev);
+
+/*
+ * Gasket-specific attribute lookup.
+ * @device: Kernel device structure.
+ * @attr: Device attribute to look up.
+ *
+ * Returns the Gasket sysfs attribute associated with the kernel device
+ * attribute and device structure itself. Upon success, this call will take a
+ * reference to internal sysfs data that must be released with a call to
+ * gasket_sysfs_get_device_data. While this reference is held, the underlying
+ * device sysfs information/structure will remain valid/will not be deleted.
+ */
+struct gasket_sysfs_attribute *
+gasket_sysfs_get_attr(struct device *device, struct device_attribute *attr);
+
+/*
+ * Releases a references to internal data.
+ * @device: Kernel device structure.
+ * @attr: Gasket sysfs attribute descriptor (returned by
+ * gasket_sysfs_get_attr).
+ */
+void gasket_sysfs_put_attr(struct device *device,
+ struct gasket_sysfs_attribute *attr);
+
+/*
+ * Write to a register sysfs node.
+ * @buf: NULL-terminated data being written.
+ * @count: number of bytes in the "buf" argument.
+ */
+ssize_t gasket_sysfs_register_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+
+#endif /* __GASKET_SYSFS_H__ */
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
index bf554f7c56ca..6e813693a766 100644
--- a/drivers/staging/gdm724x/gdm_tty.c
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -33,7 +33,7 @@ static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
static DEFINE_MUTEX(gdm_table_lock);
-static char *DRIVER_STRING[TTY_MAX_COUNT] = {"GCTATC", "GCTDM"};
+static const char *DRIVER_STRING[TTY_MAX_COUNT] = {"GCTATC", "GCTDM"};
static char *DEVICE_STRING[TTY_MAX_COUNT] = {"GCT-ATC", "GCT-DM"};
static void gdm_port_destruct(struct tty_port *port)
@@ -55,22 +55,14 @@ static int gdm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct gdm *gdm = NULL;
int ret;
- int i;
- int j;
-
- j = GDM_TTY_MINOR;
- for (i = 0; i < TTY_MAX_COUNT; i++) {
- if (!strcmp(tty->driver->driver_name, DRIVER_STRING[i])) {
- j = tty->index;
- break;
- }
- }
- if (j == GDM_TTY_MINOR)
+ ret = match_string(DRIVER_STRING, TTY_MAX_COUNT,
+ tty->driver->driver_name);
+ if (ret < 0)
return -ENODEV;
mutex_lock(&gdm_table_lock);
- gdm = gdm_table[i][j];
+ gdm = gdm_table[ret][tty->index];
if (!gdm) {
mutex_unlock(&gdm_table_lock);
return -ENODEV;
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index 0218782d1a08..dc4da66c3695 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -879,14 +879,9 @@ static void gdm_usb_disconnect(struct usb_interface *intf)
{
struct phy_dev *phy_dev;
struct lte_udev *udev;
- u16 idVendor, idProduct;
struct usb_device *usbdev;
usbdev = interface_to_usbdev(intf);
-
- idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
- idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
-
phy_dev = usb_get_intfdata(intf);
udev = phy_dev->priv_dev;
diff --git a/drivers/staging/goldfish/README b/drivers/staging/goldfish/README
index 183af0053234..ed08c4d46e75 100644
--- a/drivers/staging/goldfish/README
+++ b/drivers/staging/goldfish/README
@@ -3,9 +3,3 @@ Audio
- Move to using the ALSA framework not faking it
- Fix the wrong user page DMA (moving to ALSA may fix that too)
-NAND
-----
-- Remove excess checking of parameters in calls
-- Use dma coherent memory not kmalloc/__pa for the memory (this is just
- a cleanliness issue not a correctness one)
-
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c
index bd559956f199..3a75df1d2a0a 100644
--- a/drivers/staging/goldfish/goldfish_audio.c
+++ b/drivers/staging/goldfish/goldfish_audio.c
@@ -28,6 +28,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/goldfish.h>
+#include <linux/acpi.h>
MODULE_AUTHOR("Google, Inc.");
MODULE_DESCRIPTION("Android QEMU Audio Driver");
@@ -37,18 +38,19 @@ MODULE_VERSION("1.0");
struct goldfish_audio {
char __iomem *reg_base;
int irq;
+
/* lock protects access to buffer_status and to device registers */
spinlock_t lock;
wait_queue_head_t wait;
char *buffer_virt; /* combined buffer virtual address */
- unsigned long buffer_phys; /* combined buffer physical address */
+ unsigned long buffer_phys; /* combined buffer physical address */
char *write_buffer1; /* write buffer 1 virtual address */
char *write_buffer2; /* write buffer 2 virtual address */
char *read_buffer; /* read buffer virtual address */
int buffer_status;
- int read_supported; /* true if we have audio input support */
+ int read_supported; /* true if we have audio input support */
};
/*
@@ -56,16 +58,11 @@ struct goldfish_audio {
* Having two read buffers facilitate stereo -> mono conversion.
* Having two write buffers facilitate interleaved IO.
*/
-#define READ_BUFFER_SIZE 16384
-#define WRITE_BUFFER_SIZE 16384
-#define COMBINED_BUFFER_SIZE ((2 * READ_BUFFER_SIZE) + \
+#define READ_BUFFER_SIZE 16384
+#define WRITE_BUFFER_SIZE 16384
+#define COMBINED_BUFFER_SIZE ((2 * READ_BUFFER_SIZE) + \
(2 * WRITE_BUFFER_SIZE))
-#define AUDIO_READ(data, addr) (readl(data->reg_base + addr))
-#define AUDIO_WRITE(data, addr, x) (writel(x, data->reg_base + addr))
-#define AUDIO_WRITE64(data, addr, addr2, x) \
- (gf_write_dma_addr((x), data->reg_base + addr, data->reg_base + addr2))
-
/*
* temporary variable used between goldfish_audio_probe() and
* goldfish_audio_open()
@@ -103,19 +100,39 @@ enum {
/* this bit set when it is safe to write more bytes to the buffer */
AUDIO_INT_WRITE_BUFFER_1_EMPTY = 1U << 0,
AUDIO_INT_WRITE_BUFFER_2_EMPTY = 1U << 1,
- AUDIO_INT_READ_BUFFER_FULL = 1U << 2,
+ AUDIO_INT_READ_BUFFER_FULL = 1U << 2,
- AUDIO_INT_MASK = AUDIO_INT_WRITE_BUFFER_1_EMPTY |
+ AUDIO_INT_MASK = AUDIO_INT_WRITE_BUFFER_1_EMPTY |
AUDIO_INT_WRITE_BUFFER_2_EMPTY |
AUDIO_INT_READ_BUFFER_FULL,
};
static atomic_t open_count = ATOMIC_INIT(0);
+static unsigned int audio_read(const struct goldfish_audio *data, int addr)
+{
+ return readl(data->reg_base + addr);
+}
+
+static void audio_write(const struct goldfish_audio *data,
+ int addr, unsigned int x)
+{
+ writel(x, data->reg_base + addr);
+}
+
+static void audio_write64(const struct goldfish_audio *data,
+ int addr_lo, int addr_hi, unsigned int x)
+{
+ char __iomem *reg_base = data->reg_base;
+
+ gf_write_dma_addr(x, reg_base + addr_lo, reg_base + addr_hi);
+}
+
static ssize_t goldfish_audio_read(struct file *fp, char __user *buf,
size_t count, loff_t *pos)
{
struct goldfish_audio *data = fp->private_data;
+ unsigned long irq_flags;
int length;
int result = 0;
@@ -124,12 +141,16 @@ static ssize_t goldfish_audio_read(struct file *fp, char __user *buf,
while (count > 0) {
length = (count > READ_BUFFER_SIZE ? READ_BUFFER_SIZE : count);
- AUDIO_WRITE(data, AUDIO_START_READ, length);
+ audio_write(data, AUDIO_START_READ, length);
wait_event_interruptible(data->wait, data->buffer_status &
AUDIO_INT_READ_BUFFER_FULL);
- length = AUDIO_READ(data, AUDIO_READ_BUFFER_AVAILABLE);
+ spin_lock_irqsave(&data->lock, irq_flags);
+ data->buffer_status &= ~AUDIO_INT_READ_BUFFER_FULL;
+ spin_unlock_irqrestore(&data->lock, irq_flags);
+
+ length = audio_read(data, AUDIO_READ_BUFFER_AVAILABLE);
/* copy data to user space */
if (copy_to_user(buf, data->read_buffer, length))
@@ -177,10 +198,10 @@ static ssize_t goldfish_audio_write(struct file *fp, const char __user *buf,
*/
if (kbuf == data->write_buffer1) {
data->buffer_status &= ~AUDIO_INT_WRITE_BUFFER_1_EMPTY;
- AUDIO_WRITE(data, AUDIO_WRITE_BUFFER_1, copy);
+ audio_write(data, AUDIO_WRITE_BUFFER_1, copy);
} else {
data->buffer_status &= ~AUDIO_INT_WRITE_BUFFER_2_EMPTY;
- AUDIO_WRITE(data, AUDIO_WRITE_BUFFER_2, copy);
+ audio_write(data, AUDIO_WRITE_BUFFER_2, copy);
}
spin_unlock_irqrestore(&data->lock, irq_flags);
@@ -200,7 +221,7 @@ static int goldfish_audio_open(struct inode *ip, struct file *fp)
fp->private_data = audio_data;
audio_data->buffer_status = (AUDIO_INT_WRITE_BUFFER_1_EMPTY |
AUDIO_INT_WRITE_BUFFER_2_EMPTY);
- AUDIO_WRITE(audio_data, AUDIO_INT_ENABLE, AUDIO_INT_MASK);
+ audio_write(audio_data, AUDIO_INT_ENABLE, AUDIO_INT_MASK);
return 0;
}
@@ -212,7 +233,7 @@ static int goldfish_audio_release(struct inode *ip, struct file *fp)
{
atomic_dec(&open_count);
/* FIXME: surely this is wrong for the multi-opened case */
- AUDIO_WRITE(audio_data, AUDIO_INT_ENABLE, 0);
+ audio_write(audio_data, AUDIO_INT_ENABLE, 0);
return 0;
}
@@ -235,7 +256,7 @@ static irqreturn_t goldfish_audio_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&data->lock, irq_flags);
/* read buffer status flags */
- status = AUDIO_READ(data, AUDIO_INT_STATUS);
+ status = audio_read(data, AUDIO_INT_STATUS);
status &= AUDIO_INT_MASK;
/*
* if buffers are newly empty, wake up blocked
@@ -295,7 +316,8 @@ static int goldfish_audio_probe(struct platform_device *pdev)
return -ENODEV;
}
data->buffer_virt = dmam_alloc_coherent(&pdev->dev,
- COMBINED_BUFFER_SIZE, &buf_addr, GFP_KERNEL);
+ COMBINED_BUFFER_SIZE,
+ &buf_addr, GFP_KERNEL);
if (!data->buffer_virt) {
dev_err(&pdev->dev, "allocate buffer failed\n");
return -ENOMEM;
@@ -320,18 +342,18 @@ static int goldfish_audio_probe(struct platform_device *pdev)
return ret;
}
- AUDIO_WRITE64(data, AUDIO_SET_WRITE_BUFFER_1,
+ audio_write64(data, AUDIO_SET_WRITE_BUFFER_1,
AUDIO_SET_WRITE_BUFFER_1_HIGH, buf_addr);
buf_addr += WRITE_BUFFER_SIZE;
- AUDIO_WRITE64(data, AUDIO_SET_WRITE_BUFFER_2,
+ audio_write64(data, AUDIO_SET_WRITE_BUFFER_2,
AUDIO_SET_WRITE_BUFFER_2_HIGH, buf_addr);
buf_addr += WRITE_BUFFER_SIZE;
- data->read_supported = AUDIO_READ(data, AUDIO_READ_SUPPORTED);
+ data->read_supported = audio_read(data, AUDIO_READ_SUPPORTED);
if (data->read_supported)
- AUDIO_WRITE64(data, AUDIO_SET_READ_BUFFER,
+ audio_write64(data, AUDIO_SET_READ_BUFFER,
AUDIO_SET_READ_BUFFER_HIGH, buf_addr);
audio_data = data;
@@ -351,12 +373,21 @@ static const struct of_device_id goldfish_audio_of_match[] = {
};
MODULE_DEVICE_TABLE(of, goldfish_audio_of_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id goldfish_audio_acpi_match[] = {
+ { "GFSH0005", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_audio_acpi_match);
+#endif
+
static struct platform_driver goldfish_audio_driver = {
.probe = goldfish_audio_probe,
.remove = goldfish_audio_remove,
.driver = {
.name = "goldfish_audio",
.of_match_table = goldfish_audio_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_audio_acpi_match),
}
};
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index aee2335a25a1..e86ac9e47867 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -9,7 +9,6 @@ source "drivers/staging/iio/adc/Kconfig"
source "drivers/staging/iio/addac/Kconfig"
source "drivers/staging/iio/cdc/Kconfig"
source "drivers/staging/iio/frequency/Kconfig"
-source "drivers/staging/iio/gyro/Kconfig"
source "drivers/staging/iio/impedance-analyzer/Kconfig"
source "drivers/staging/iio/meter/Kconfig"
source "drivers/staging/iio/resolver/Kconfig"
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index c28d657497de..b15904b99581 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -8,7 +8,6 @@ obj-y += adc/
obj-y += addac/
obj-y += cdc/
obj-y += frequency/
-obj-y += gyro/
obj-y += impedance-analyzer/
obj-y += meter/
obj-y += resolver/
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
index b3e4571340ab..5cc96c8086b5 100644
--- a/drivers/staging/iio/accel/adis16203.c
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -168,7 +168,6 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
{
struct adis *st = iio_priv(indio_dev);
int ret;
- int bits;
u8 addr;
s16 val16;
@@ -202,14 +201,11 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
*val = 25000 / -470 - 1278; /* 25 C = 1278 */
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBBIAS:
- bits = 14;
addr = adis16203_addresses[chan->scan_index];
ret = adis_read_reg_16(st, addr, &val16);
if (ret)
return ret;
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
+ *val = sign_extend32(val16, 13);
return IIO_VAL_INT;
default:
return -EINVAL;
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index fff6d99089cc..24e525f1ef25 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -250,7 +250,6 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
{
struct adis *st = iio_priv(indio_dev);
int ret;
- int bits;
u8 addr;
s16 val16;
@@ -287,24 +286,18 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
*val = 25000 / 244 - 0x133; /* 25 C = 0x133 */
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBBIAS:
- bits = 10;
addr = adis16240_addresses[chan->scan_index][0];
ret = adis_read_reg_16(st, addr, &val16);
if (ret)
return ret;
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
+ *val = sign_extend32(val16, 9);
return IIO_VAL_INT;
case IIO_CHAN_INFO_PEAK:
- bits = 10;
addr = adis16240_addresses[chan->scan_index][1];
ret = adis_read_reg_16(st, addr, &val16);
if (ret)
return ret;
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
+ *val = sign_extend32(val16, 9);
return IIO_VAL_INT;
}
return -EINVAL;
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index acaed8d5379c..9716ee9d94a7 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -57,7 +57,7 @@ struct ad7606_state {
struct ad7606_bus_ops {
/* more methods added in future? */
- int (*read_block)(struct device *, int, void *);
+ int (*read_block)(struct device *dev, int num, void *data);
};
int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
deleted file mode 100644
index f62f68fd6f3f..000000000000
--- a/drivers/staging/iio/gyro/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# IIO Digital Gyroscope Sensor drivers configuration
-#
-menu "Digital gyroscope sensors"
-
-config ADIS16060
- tristate "Analog Devices ADIS16060 Yaw Rate Gyroscope with SPI driver"
- depends on SPI
- help
- Say Y (yes) here to build support for Analog Devices adis16060 wide bandwidth
- yaw rate gyroscope with SPI.
-
- To compile this driver as a module, say M here: the module will be
- called adis16060. If unsure, say N.
-
-endmenu
diff --git a/drivers/staging/iio/gyro/Makefile b/drivers/staging/iio/gyro/Makefile
deleted file mode 100644
index cf22d6d55e27..000000000000
--- a/drivers/staging/iio/gyro/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Makefile for digital gyroscope sensor drivers
-#
-
-adis16060-y := adis16060_core.o
-obj-$(CONFIG_ADIS16060) += adis16060.o
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
deleted file mode 100644
index 4e7630caf7d3..000000000000
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * ADIS16060 Wide Bandwidth Yaw Rate Gyroscope with SPI driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-#define ADIS16060_GYRO 0x20 /* Measure Angular Rate (Gyro) */
-#define ADIS16060_TEMP_OUT 0x10 /* Measure Temperature */
-#define ADIS16060_AIN2 0x80 /* Measure AIN2 */
-#define ADIS16060_AIN1 0x40 /* Measure AIN1 */
-
-/**
- * struct adis16060_state - device instance specific data
- * @us_w: actual spi_device to write config
- * @us_r: actual spi_device to read back data
- * @buf: transmit or receive buffer
- * @buf_lock: mutex to protect tx and rx
- **/
-struct adis16060_state {
- struct spi_device *us_w;
- struct spi_device *us_r;
- struct mutex buf_lock;
-
- u8 buf[3] ____cacheline_aligned;
-};
-
-static struct iio_dev *adis16060_iio_dev;
-
-static int adis16060_spi_write_then_read(struct iio_dev *indio_dev,
- u8 conf, u16 *val)
-{
- int ret;
- struct adis16060_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->buf[2] = conf; /* The last 8 bits clocked in are latched */
- ret = spi_write(st->us_w, st->buf, 3);
-
- if (ret < 0) {
- mutex_unlock(&st->buf_lock);
- return ret;
- }
-
- ret = spi_read(st->us_r, st->buf, 3);
-
- /* The internal successive approximation ADC begins the
- * conversion process on the falling edge of MSEL1 and
- * starts to place data MSB first on the DOUT line at
- * the 6th falling edge of SCLK
- */
- if (!ret)
- *val = ((st->buf[0] & 0x3) << 12) |
- (st->buf[1] << 4) |
- ((st->buf[2] >> 4) & 0xF);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int adis16060_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- u16 tval = 0;
- int ret;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- ret = adis16060_spi_write_then_read(indio_dev,
- chan->address, &tval);
- if (ret < 0)
- return ret;
-
- *val = tval;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_OFFSET:
- *val = -7;
- *val2 = 461117;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = 34000;
- return IIO_VAL_INT_PLUS_MICRO;
- }
-
- return -EINVAL;
-}
-
-static const struct iio_info adis16060_info = {
- .read_raw = adis16060_read_raw,
-};
-
-static const struct iio_chan_spec adis16060_channels[] = {
- {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .address = ADIS16060_GYRO,
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .address = ADIS16060_AIN1,
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .address = ADIS16060_AIN2,
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE),
- .address = ADIS16060_TEMP_OUT,
- }
-};
-
-static int adis16060_r_probe(struct spi_device *spi)
-{
- int ret;
- struct adis16060_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
- st = iio_priv(indio_dev);
- st->us_r = spi;
- mutex_init(&st->buf_lock);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16060_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = adis16060_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16060_channels);
-
- ret = devm_iio_device_register(&spi->dev, indio_dev);
- if (ret)
- return ret;
-
- adis16060_iio_dev = indio_dev;
- return 0;
-}
-
-static int adis16060_w_probe(struct spi_device *spi)
-{
- int ret;
- struct iio_dev *indio_dev = adis16060_iio_dev;
- struct adis16060_state *st;
-
- if (!indio_dev) {
- ret = -ENODEV;
- goto error_ret;
- }
- st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
- st->us_w = spi;
- return 0;
-
-error_ret:
- return ret;
-}
-
-static int adis16060_w_remove(struct spi_device *spi)
-{
- return 0;
-}
-
-static struct spi_driver adis16060_r_driver = {
- .driver = {
- .name = "adis16060_r",
- },
- .probe = adis16060_r_probe,
-};
-
-static struct spi_driver adis16060_w_driver = {
- .driver = {
- .name = "adis16060_w",
- },
- .probe = adis16060_w_probe,
- .remove = adis16060_w_remove,
-};
-
-static __init int adis16060_init(void)
-{
- int ret;
-
- ret = spi_register_driver(&adis16060_r_driver);
- if (ret < 0)
- return ret;
-
- ret = spi_register_driver(&adis16060_w_driver);
- if (ret < 0) {
- spi_unregister_driver(&adis16060_r_driver);
- return ret;
- }
-
- return 0;
-}
-module_init(adis16060_init);
-
-static __exit void adis16060_exit(void)
-{
- spi_unregister_driver(&adis16060_w_driver);
- spi_unregister_driver(&adis16060_r_driver);
-}
-module_exit(adis16060_exit);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16060 Yaw Rate Gyroscope Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 3bcf49466361..14df89510396 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -116,45 +116,26 @@ static struct ad5933_platform_data ad5933_default_pdata = {
.vref_mv = 3300,
};
+#define AD5933_CHANNEL(_type, _extend_name, _info_mask_separate, _address, \
+ _scan_index, _realbits) { \
+ .type = (_type), \
+ .extend_name = (_extend_name), \
+ .info_mask_separate = (_info_mask_separate), \
+ .address = (_address), \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (_realbits), \
+ .storagebits = 16, \
+ }, \
+}
+
static const struct iio_chan_spec ad5933_channels[] = {
- {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = AD5933_REG_TEMP_DATA,
- .scan_index = -1,
- .scan_type = {
- .sign = 's',
- .realbits = 14,
- .storagebits = 16,
- },
- }, { /* Ring Channels */
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .extend_name = "real",
- .address = AD5933_REG_REAL_DATA,
- .scan_index = 0,
- .scan_type = {
- .sign = 's',
- .realbits = 16,
- .storagebits = 16,
- },
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .extend_name = "imag",
- .address = AD5933_REG_IMAG_DATA,
- .scan_index = 1,
- .scan_type = {
- .sign = 's',
- .realbits = 16,
- .storagebits = 16,
- },
- },
+ AD5933_CHANNEL(IIO_TEMP, NULL, BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE), AD5933_REG_TEMP_DATA, -1, 14),
+ /* Ring Channels */
+ AD5933_CHANNEL(IIO_VOLTAGE, "real", 0, AD5933_REG_REAL_DATA, 0, 16),
+ AD5933_CHANNEL(IIO_VOLTAGE, "imag", 0, AD5933_REG_IMAG_DATA, 1, 16),
};
static int ad5933_i2c_write(struct i2c_client *client, u8 reg, u8 len, u8 *data)
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 0ecffab52ec2..0e554e3359b5 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -354,7 +354,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
u16 auth_type;
unsigned char temp[256];
struct ether_hdr *eth_hdr;
- unsigned short eth_proto;
struct ieee802_1x_hdr *aa1x_hdr;
size_t size;
int ret;
@@ -369,7 +368,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
get_word(priv); /* Reserve Area */
eth_hdr = (struct ether_hdr *)(priv->rxp);
- eth_proto = ntohs(eth_hdr->h_proto);
/* source address check */
if (ether_addr_equal(&priv->eth_addr[0], eth_hdr->h_source)) {
@@ -1842,15 +1840,15 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
if (dev->flags & IFF_PROMISC) {
- hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
- MCAST_FILTER_PROMISC);
+ hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+ MCAST_FILTER_PROMISC);
goto spin_unlock;
}
if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
(dev->flags & IFF_ALLMULTI)) {
- hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
- MCAST_FILTER_MCASTALL);
+ hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+ MCAST_FILTER_MCASTALL);
goto spin_unlock;
}
@@ -1866,8 +1864,8 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
ETH_ALEN * mc_count);
} else {
priv->sme_i.sme_flag |= SME_MULTICAST;
- hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
- MCAST_FILTER_MCAST);
+ hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+ MCAST_FILTER_MCAST);
}
spin_unlock:
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 06d1920150da..a90b2eb112f9 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2175,7 +2175,7 @@ static int bcm2048_fops_release(struct file *file)
}
static __poll_t bcm2048_fops_poll(struct file *file,
- struct poll_table_struct *pts)
+ struct poll_table_struct *pts)
{
struct bcm2048_device *bdev = video_drvdata(file);
__poll_t retval = 0;
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 390fc98d07dd..1269a983455e 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -1312,6 +1312,8 @@ static const struct vb2_ops video_qops = {
.stop_streaming = vpfe_stop_streaming,
.buf_cleanup = vpfe_buf_cleanup,
.buf_queue = vpfe_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/*
@@ -1357,6 +1359,7 @@ static int vpfe_reqbufs(struct file *file, void *priv,
q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->dev = vpfe_dev->pdev;
+ q->lock = &video->lock;
ret = vb2_queue_init(q);
if (ret) {
@@ -1598,17 +1601,18 @@ int vpfe_video_init(struct vpfe_video_device *video, const char *name)
return -EINVAL;
}
/* Initialize field of video device */
+ mutex_init(&video->lock);
video->video_dev.release = video_device_release;
video->video_dev.fops = &vpfe_fops;
video->video_dev.ioctl_ops = &vpfe_ioctl_ops;
video->video_dev.minor = -1;
video->video_dev.tvnorms = 0;
+ video->video_dev.lock = &video->lock;
snprintf(video->video_dev.name, sizeof(video->video_dev.name),
"DAVINCI VIDEO %s %s", name, direction);
spin_lock_init(&video->irqlock);
spin_lock_init(&video->dma_queue_lock);
- mutex_init(&video->lock);
ret = media_entity_pads_init(&video->video_dev.entity,
1, &video->pad);
if (ret < 0)
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.h b/drivers/staging/media/davinci_vpfe/vpfe_video.h
index 22136d3dadcb..4bbd219e8329 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.h
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.h
@@ -128,7 +128,7 @@ struct vpfe_video_device {
spinlock_t irqlock;
/* IRQ lock for DMA queue */
spinlock_t dma_queue_lock;
- /* lock used to access this structure */
+ /* lock used to serialize all video4linux ioctls */
struct mutex lock;
/* number of users performing IO */
u32 io_usrs;
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index ae453fd422f0..28f41caba05d 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -103,6 +103,7 @@ struct prp_priv {
int nfb4eof_irq;
int stream_count;
+ u32 frame_sequence; /* frame sequence counter */
bool last_eof; /* waiting for last EOF at stream off */
bool nfb4eof; /* NFB4EOF encountered during streaming */
struct completion last_eof_comp;
@@ -210,12 +211,15 @@ static void prp_vb2_buf_done(struct prp_priv *priv, struct ipuv3_channel *ch)
done = priv->active_vb2_buf[priv->ipu_buf_num];
if (done) {
+ done->vbuf.field = vdev->fmt.fmt.pix.field;
+ done->vbuf.sequence = priv->frame_sequence;
vb = &done->vbuf.vb2_buf;
vb->timestamp = ktime_get_ns();
vb2_buffer_done(vb, priv->nfb4eof ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
+ priv->frame_sequence++;
priv->nfb4eof = false;
/* get next queued buffer */
@@ -637,6 +641,7 @@ static int prp_start(struct prp_priv *priv)
/* init EOF completion waitq */
init_completion(&priv->last_eof_comp);
+ priv->frame_sequence = 0;
priv->last_eof = false;
priv->nfb4eof = false;
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index 4e3fdf8aeef5..256039ce561e 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -170,23 +170,22 @@ static int capture_enum_fmt_vid_cap(struct file *file, void *fh,
}
cc_src = imx_media_find_ipu_format(fmt_src.format.code, CS_SEL_ANY);
- if (!cc_src)
- cc_src = imx_media_find_mbus_format(fmt_src.format.code,
- CS_SEL_ANY, true);
- if (!cc_src)
- return -EINVAL;
-
- if (cc_src->bayer) {
- if (f->index != 0)
- return -EINVAL;
- fourcc = cc_src->fourcc;
- } else {
+ if (cc_src) {
u32 cs_sel = (cc_src->cs == IPUV3_COLORSPACE_YUV) ?
CS_SEL_YUV : CS_SEL_RGB;
ret = imx_media_enum_format(&fourcc, f->index, cs_sel);
if (ret)
return ret;
+ } else {
+ cc_src = imx_media_find_mbus_format(fmt_src.format.code,
+ CS_SEL_ANY, true);
+ if (WARN_ON(!cc_src))
+ return -EINVAL;
+
+ if (f->index != 0)
+ return -EINVAL;
+ fourcc = cc_src->fourcc;
}
f->pixelformat = fourcc;
@@ -219,15 +218,7 @@ static int capture_try_fmt_vid_cap(struct file *file, void *fh,
return ret;
cc_src = imx_media_find_ipu_format(fmt_src.format.code, CS_SEL_ANY);
- if (!cc_src)
- cc_src = imx_media_find_mbus_format(fmt_src.format.code,
- CS_SEL_ANY, true);
- if (!cc_src)
- return -EINVAL;
-
- if (cc_src->bayer) {
- cc = cc_src;
- } else {
+ if (cc_src) {
u32 fourcc, cs_sel;
cs_sel = (cc_src->cs == IPUV3_COLORSPACE_YUV) ?
@@ -239,6 +230,13 @@ static int capture_try_fmt_vid_cap(struct file *file, void *fh,
imx_media_enum_format(&fourcc, 0, cs_sel);
cc = imx_media_find_format(fourcc, cs_sel, false);
}
+ } else {
+ cc_src = imx_media_find_mbus_format(fmt_src.format.code,
+ CS_SEL_ANY, true);
+ if (WARN_ON(!cc_src))
+ return -EINVAL;
+
+ cc = cc_src;
}
imx_media_mbus_fmt_to_pix_fmt(&f->fmt.pix, &fmt_src.format, cc);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 95d7805f3485..cd2c291e1e94 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -111,6 +111,7 @@ struct csi_priv {
struct v4l2_ctrl_handler ctrl_hdlr;
int stream_count; /* streaming counter */
+ u32 frame_sequence; /* frame sequence counter */
bool last_eof; /* waiting for last EOF at stream off */
bool nfb4eof; /* NFB4EOF encountered during streaming */
struct completion last_eof_comp;
@@ -121,10 +122,32 @@ static inline struct csi_priv *sd_to_dev(struct v4l2_subdev *sdev)
return container_of(sdev, struct csi_priv, sd);
}
+static inline bool is_parallel_bus(struct v4l2_fwnode_endpoint *ep)
+{
+ return ep->bus_type != V4L2_MBUS_CSI2;
+}
+
static inline bool is_parallel_16bit_bus(struct v4l2_fwnode_endpoint *ep)
{
- return ep->bus_type != V4L2_MBUS_CSI2 &&
- ep->bus.parallel.bus_width >= 16;
+ return is_parallel_bus(ep) && ep->bus.parallel.bus_width >= 16;
+}
+
+/*
+ * Check for conditions that require the IPU to handle the
+ * data internally as generic data, aka passthrough mode:
+ * - raw bayer media bus formats, or
+ * - the CSI is receiving from a 16-bit parallel bus, or
+ * - the CSI is receiving from an 8-bit parallel bus and the incoming
+ * media bus format is other than UYVY8_2X8/YUYV8_2X8.
+ */
+static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
+ struct v4l2_mbus_framefmt *infmt,
+ const struct imx_media_pixfmt *incc)
+{
+ return incc->bayer || is_parallel_16bit_bus(ep) ||
+ (is_parallel_bus(ep) &&
+ infmt->code != MEDIA_BUS_FMT_UYVY8_2X8 &&
+ infmt->code != MEDIA_BUS_FMT_YUYV8_2X8);
}
/*
@@ -236,12 +259,15 @@ static void csi_vb2_buf_done(struct csi_priv *priv)
done = priv->active_vb2_buf[priv->ipu_buf_num];
if (done) {
+ done->vbuf.field = vdev->fmt.fmt.pix.field;
+ done->vbuf.sequence = priv->frame_sequence;
vb = &done->vbuf.vb2_buf;
vb->timestamp = ktime_get_ns();
vb2_buffer_done(vb, priv->nfb4eof ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
+ priv->frame_sequence++;
priv->nfb4eof = false;
/* get next queued buffer */
@@ -367,15 +393,18 @@ static void csi_idmac_unsetup_vb2_buf(struct csi_priv *priv,
static int csi_idmac_setup_channel(struct csi_priv *priv)
{
struct imx_media_video_dev *vdev = priv->vdev;
+ const struct imx_media_pixfmt *incc;
struct v4l2_mbus_framefmt *infmt;
struct ipu_image image;
u32 passthrough_bits;
+ u32 passthrough_cycles;
dma_addr_t phys[2];
bool passthrough;
u32 burst_size;
int ret;
infmt = &priv->format_mbus[CSI_SINK_PAD];
+ incc = priv->cc[CSI_SINK_PAD];
ipu_cpmem_zero(priv->idmac_ch);
@@ -389,12 +418,9 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
image.phys0 = phys[0];
image.phys1 = phys[1];
- /*
- * Check for conditions that require the IPU to handle the
- * data internally as generic data, aka passthrough mode:
- * - raw bayer formats
- * - the CSI is receiving from a 16-bit parallel bus
- */
+ passthrough = requires_passthrough(&priv->upstream_ep, infmt, incc);
+ passthrough_cycles = 1;
+
switch (image.pix.pixelformat) {
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
@@ -402,7 +428,6 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
case V4L2_PIX_FMT_SRGGB8:
case V4L2_PIX_FMT_GREY:
burst_size = 16;
- passthrough = true;
passthrough_bits = 8;
break;
case V4L2_PIX_FMT_SBGGR16:
@@ -411,7 +436,6 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
case V4L2_PIX_FMT_SRGGB16:
case V4L2_PIX_FMT_Y16:
burst_size = 8;
- passthrough = true;
passthrough_bits = 16;
break;
case V4L2_PIX_FMT_YUV420:
@@ -419,7 +443,6 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
burst_size = (image.pix.width & 0x3f) ?
((image.pix.width & 0x1f) ?
((image.pix.width & 0xf) ? 8 : 16) : 32) : 64;
- passthrough = is_parallel_16bit_bus(&priv->upstream_ep);
passthrough_bits = 16;
/* Skip writing U and V components to odd rows */
ipu_cpmem_skip_odd_chroma_rows(priv->idmac_ch);
@@ -428,18 +451,25 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
case V4L2_PIX_FMT_UYVY:
burst_size = (image.pix.width & 0x1f) ?
((image.pix.width & 0xf) ? 8 : 16) : 32;
- passthrough = is_parallel_16bit_bus(&priv->upstream_ep);
passthrough_bits = 16;
break;
+ case V4L2_PIX_FMT_RGB565:
+ if (passthrough) {
+ burst_size = 16;
+ passthrough_bits = 8;
+ passthrough_cycles = incc->cycles;
+ break;
+ }
+ /* fallthrough - non-passthrough RGB565 (CSI-2 bus) */
default:
burst_size = (image.pix.width & 0xf) ? 8 : 16;
- passthrough = is_parallel_16bit_bus(&priv->upstream_ep);
passthrough_bits = 16;
break;
}
if (passthrough) {
- ipu_cpmem_set_resolution(priv->idmac_ch, image.rect.width,
+ ipu_cpmem_set_resolution(priv->idmac_ch,
+ image.rect.width * passthrough_cycles,
image.rect.height);
ipu_cpmem_set_stride(priv->idmac_ch, image.pix.bytesperline);
ipu_cpmem_set_buffer(priv->idmac_ch, 0, image.phys0);
@@ -545,6 +575,7 @@ static int csi_idmac_start(struct csi_priv *priv)
/* init EOF completion waitq */
init_completion(&priv->last_eof_comp);
+ priv->frame_sequence = 0;
priv->last_eof = false;
priv->nfb4eof = false;
@@ -630,17 +661,20 @@ static void csi_idmac_stop(struct csi_priv *priv)
static int csi_setup(struct csi_priv *priv)
{
struct v4l2_mbus_framefmt *infmt, *outfmt;
+ const struct imx_media_pixfmt *incc;
struct v4l2_mbus_config mbus_cfg;
struct v4l2_mbus_framefmt if_fmt;
+ struct v4l2_rect crop;
infmt = &priv->format_mbus[CSI_SINK_PAD];
+ incc = priv->cc[CSI_SINK_PAD];
outfmt = &priv->format_mbus[priv->active_output_pad];
/* compose mbus_config from the upstream endpoint */
mbus_cfg.type = priv->upstream_ep.bus_type;
- mbus_cfg.flags = (priv->upstream_ep.bus_type == V4L2_MBUS_CSI2) ?
- priv->upstream_ep.bus.mipi_csi2.flags :
- priv->upstream_ep.bus.parallel.flags;
+ mbus_cfg.flags = is_parallel_bus(&priv->upstream_ep) ?
+ priv->upstream_ep.bus.parallel.flags :
+ priv->upstream_ep.bus.mipi_csi2.flags;
/*
* we need to pass input frame to CSI interface, but
@@ -648,8 +682,18 @@ static int csi_setup(struct csi_priv *priv)
*/
if_fmt = *infmt;
if_fmt.field = outfmt->field;
+ crop = priv->crop;
- ipu_csi_set_window(priv->csi, &priv->crop);
+ /*
+ * if cycles is set, we need to handle this over multiple cycles as
+ * generic/bayer data
+ */
+ if (is_parallel_bus(&priv->upstream_ep) && incc->cycles) {
+ if_fmt.width *= incc->cycles;
+ crop.width *= incc->cycles;
+ }
+
+ ipu_csi_set_window(priv->csi, &crop);
ipu_csi_set_downsize(priv->csi,
priv->crop.width == 2 * priv->compose.width,
@@ -1007,7 +1051,6 @@ static int csi_link_validate(struct v4l2_subdev *sd,
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_fwnode_endpoint upstream_ep = {};
- const struct imx_media_pixfmt *incc;
bool is_csi2;
int ret;
@@ -1025,17 +1068,7 @@ static int csi_link_validate(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
priv->upstream_ep = upstream_ep;
- is_csi2 = (upstream_ep.bus_type == V4L2_MBUS_CSI2);
- incc = priv->cc[CSI_SINK_PAD];
-
- if (priv->dest != IPU_CSI_DEST_IDMAC &&
- (incc->bayer || is_parallel_16bit_bus(&upstream_ep))) {
- v4l2_err(&priv->sd,
- "bayer/16-bit parallel buses must go to IDMAC pad\n");
- ret = -EINVAL;
- goto out;
- }
-
+ is_csi2 = !is_parallel_bus(&upstream_ep);
if (is_csi2) {
int vc_num = 0;
/*
@@ -1059,7 +1092,7 @@ static int csi_link_validate(struct v4l2_subdev *sd,
/* select either parallel or MIPI-CSI2 as input to CSI */
ipu_set_csi_src_mux(priv->ipu, priv->csi_id, is_csi2);
-out:
+
mutex_unlock(&priv->lock);
return ret;
}
@@ -1131,6 +1164,7 @@ static int csi_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fwnode_endpoint upstream_ep;
const struct imx_media_pixfmt *incc;
struct v4l2_mbus_framefmt *infmt;
int ret = 0;
@@ -1147,7 +1181,13 @@ static int csi_enum_mbus_code(struct v4l2_subdev *sd,
break;
case CSI_SRC_PAD_DIRECT:
case CSI_SRC_PAD_IDMAC:
- if (incc->bayer) {
+ ret = csi_get_upstream_endpoint(priv, &upstream_ep);
+ if (ret) {
+ v4l2_err(&priv->sd, "failed to find upstream endpoint\n");
+ goto out;
+ }
+
+ if (requires_passthrough(&upstream_ep, infmt, incc)) {
if (code->index != 0) {
ret = -EINVAL;
goto out;
@@ -1192,10 +1232,12 @@ static int csi_enum_frame_size(struct v4l2_subdev *sd,
} else {
crop = __csi_get_crop(priv, cfg, fse->which);
- fse->min_width = fse->max_width = fse->index & 1 ?
+ fse->min_width = fse->index & 1 ?
crop->width / 2 : crop->width;
- fse->min_height = fse->max_height = fse->index & 2 ?
+ fse->max_width = fse->min_width;
+ fse->min_height = fse->index & 2 ?
crop->height / 2 : crop->height;
+ fse->max_height = fse->min_height;
}
mutex_unlock(&priv->lock);
@@ -1286,7 +1328,7 @@ static void csi_try_fmt(struct csi_priv *priv,
sdformat->format.width = compose->width;
sdformat->format.height = compose->height;
- if (incc->bayer) {
+ if (requires_passthrough(upstream_ep, infmt, incc)) {
sdformat->format.code = infmt->code;
*cc = incc;
} else {
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 7ec2db84451c..8aa13403b09d 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -78,6 +78,7 @@ static const struct imx_media_pixfmt rgb_formats[] = {
.codes = {MEDIA_BUS_FMT_RGB565_2X8_LE},
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
+ .cycles = 2,
}, {
.fourcc = V4L2_PIX_FMT_RGB24,
.codes = {
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index e945e0ed6dd6..57bd094cf765 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -62,6 +62,8 @@ struct imx_media_pixfmt {
u32 fourcc;
u32 codes[4];
int bpp; /* total bpp */
+ /* cycles per pixel for generic (bayer) formats for the parallel bus */
+ int cycles;
enum ipu_color_space cs;
bool planar; /* is a planar format */
bool bayer; /* is a raw bayer format */
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index a3a83424a926..16478fe9e3f8 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -11,7 +11,6 @@
* (at your option) any later version.
*/
-#include <asm/cacheflush.h>
#include <linux/clk.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
@@ -24,6 +23,8 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
+#include <asm/cacheflush.h>
+
#include "iss_video.h"
#include "iss.h"
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index fe90a7cb56f7..31fbc1a75b06 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -785,7 +785,7 @@ static int dim2_probe(struct platform_device *pdev)
if (ret)
return ret;
- dev->disable_platform = pdata ? pdata->disable : 0;
+ dev->disable_platform = pdata ? pdata->disable : NULL;
dev_info(&pdev->dev, "sync: num of frames per sub-buffer: %u\n", fcnt);
hal_ret = dim_startup(dev->io_base, dev->clk_speed, fcnt);
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 04c18323c2ea..89b02fc305b8 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -457,7 +457,6 @@ static const struct snd_pcm_ops pcm_ops = {
.trigger = pcm_trigger,
.pointer = pcm_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static int split_arg_list(char *buf, char **card_name, u16 *ch_num,
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index 6b13d85d9d34..d5b27e224b56 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -24,15 +24,12 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
- #address-cells = <1>;
- #size-cells = <0>;
- poll-interval = <20>;
+ gpio-keys {
+ compatible = "gpio-keys";
reset {
label = "reset";
- gpios = <&gpio0 18 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpio 18 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_RESTART>;
};
};
@@ -42,22 +39,22 @@
system {
label = "gb-pc1:green:system";
- gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio 6 GPIO_ACTIVE_LOW>;
};
status {
label = "gb-pc1:green:status";
- gpios = <&gpio0 8 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio 8 GPIO_ACTIVE_LOW>;
};
lan1 {
label = "gb-pc1:green:lan1";
- gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio 24 GPIO_ACTIVE_LOW>;
};
lan2 {
label = "gb-pc1:green:lan2";
- gpios = <&gpio0 25 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio 25 GPIO_ACTIVE_LOW>;
};
};
};
@@ -74,7 +71,7 @@
#size-cells = <1>;
compatible = "jedec,spi-nor";
reg = <0>;
- spi-max-frequency = <10000000>;
+ spi-max-frequency = <50000000>;
partition@0 {
label = "u-boot";
@@ -104,7 +101,8 @@
&sysclock {
compatible = "fixed-clock";
- clock-frequency = <90000000>;
+ /* This is normally 1/4 of cpuclock */
+ clock-frequency = <225000000>;
};
&cpuclock {
@@ -113,6 +111,8 @@
};
&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pins>;
status = "okay";
};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index eb3966b7f033..2e837e60663a 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -38,8 +38,8 @@
#clock-cells = <0>;
compatible = "fixed-clock";
- /* FIXME: there should be way to detect this */
- clock-frequency = <50000000>;
+ /* This is normally 1/4 of cpuclock */
+ clock-frequency = <220000000>;
};
palmbus: palmbus@1E000000 {
@@ -61,37 +61,14 @@
};
gpio: gpio@600 {
- #address-cells = <1>;
- #size-cells = <0>;
-
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
compatible = "mediatek,mt7621-gpio";
+ gpio-controller;
+ interrupt-controller;
reg = <0x600 0x100>;
-
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 12 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #interrupt-cells = <2>;
-
- gpio0: bank@0 {
- reg = <0>;
- compatible = "mediatek,mt7621-gpio-bank";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpio1: bank@1 {
- reg = <1>;
- compatible = "mediatek,mt7621-gpio-bank";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpio2: bank@2 {
- reg = <2>;
- compatible = "mediatek,mt7621-gpio-bank";
- gpio-controller;
- #gpio-cells = <2>;
- };
};
i2c: i2c@900 {
@@ -227,83 +204,83 @@
i2c_pins: i2c {
i2c {
- ralink,group = "i2c";
- ralink,function = "i2c";
+ group = "i2c";
+ function = "i2c";
};
};
spi_pins: spi {
spi {
- ralink,group = "spi";
- ralink,function = "spi";
+ group = "spi";
+ function = "spi";
};
};
uart1_pins: uart1 {
uart1 {
- ralink,group = "uart1";
- ralink,function = "uart1";
+ group = "uart1";
+ function = "uart1";
};
};
uart2_pins: uart2 {
uart2 {
- ralink,group = "uart2";
- ralink,function = "uart2";
+ group = "uart2";
+ function = "uart2";
};
};
uart3_pins: uart3 {
uart3 {
- ralink,group = "uart3";
- ralink,function = "uart3";
+ group = "uart3";
+ function = "uart3";
};
};
rgmii1_pins: rgmii1 {
rgmii1 {
- ralink,group = "rgmii1";
- ralink,function = "rgmii1";
+ group = "rgmii1";
+ function = "rgmii1";
};
};
rgmii2_pins: rgmii2 {
rgmii2 {
- ralink,group = "rgmii2";
- ralink,function = "rgmii2";
+ group = "rgmii2";
+ function = "rgmii2";
};
};
mdio_pins: mdio {
mdio {
- ralink,group = "mdio";
- ralink,function = "mdio";
+ group = "mdio";
+ function = "mdio";
};
};
pcie_pins: pcie {
pcie {
- ralink,group = "pcie";
- ralink,function = "pcie rst";
+ group = "pcie";
+ function = "pcie rst";
};
};
nand_pins: nand {
spi-nand {
- ralink,group = "spi";
- ralink,function = "nand1";
+ group = "spi";
+ function = "nand1";
};
sdhci-nand {
- ralink,group = "sdhci";
- ralink,function = "nand2";
+ group = "sdhci";
+ function = "nand2";
};
};
sdhci_pins: sdhci {
sdhci {
- ralink,group = "sdhci";
- ralink,function = "sdhci";
+ group = "sdhci";
+ function = "sdhci";
};
};
};
@@ -417,8 +394,10 @@
pcie: pcie@1e140000 {
compatible = "mediatek,mt7621-pci";
- reg = <0x1e140000 0x100
- 0x1e142000 0x100>;
+ reg = <0x1e140000 0x100 /* host-pci bridge registers */
+ 0x1e142000 0x100 /* pcie port 0 RC control registers */
+ 0x1e143000 0x100 /* pcie port 1 RC control registers */
+ 0x1e144000 0x100>; /* pcie port 2 RC control registers */
#address-cells = <3>;
#size-cells = <2>;
@@ -447,31 +426,28 @@
clocks = <&clkctrl 24 &clkctrl 25 &clkctrl 26>;
clock-names = "pcie0", "pcie1", "pcie2";
- pcie0 {
+ pcie@0,0 {
reg = <0x0000 0 0 0 0>;
-
#address-cells = <3>;
#size-cells = <2>;
-
- device_type = "pci";
+ ranges;
+ bus-range = <0x00 0xff>;
};
- pcie1 {
+ pcie@1,0 {
reg = <0x0800 0 0 0 0>;
-
#address-cells = <3>;
#size-cells = <2>;
-
- device_type = "pci";
+ ranges;
+ bus-range = <0x00 0xff>;
};
- pcie2 {
+ pcie@2,0 {
reg = <0x1000 0 0 0 0>;
-
#address-cells = <3>;
#size-cells = <2>;
-
- device_type = "pci";
+ ranges;
+ bus-range = <0x00 0xff>;
};
};
};
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
index 2c7a2e666bfb..713507558568 100644
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ b/drivers/staging/mt7621-eth/mtk_eth_soc.c
@@ -1396,14 +1396,13 @@ static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
if (!ring->tx_buf)
goto no_tx_mem;
- ring->tx_dma = dma_alloc_coherent(eth->dev,
+ ring->tx_dma = dma_zalloc_coherent(eth->dev,
ring->tx_ring_size * sz,
&ring->tx_phys,
GFP_ATOMIC | __GFP_ZERO);
if (!ring->tx_dma)
goto no_tx_mem;
- memset(ring->tx_dma, 0, ring->tx_ring_size * sz);
for (i = 0; i < ring->tx_ring_size; i++) {
int next = (i + 1) % ring->tx_ring_size;
u32 next_ptr = ring->tx_phys + next * sz;
@@ -1822,10 +1821,9 @@ static int __init mtk_init(struct net_device *dev)
/* If the mac address is invalid, use random mac address */
if (!is_valid_ether_addr(dev->dev_addr)) {
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
dev_err(eth->dev, "generated random MAC address %pM\n",
dev->dev_addr);
- dev->addr_assign_type = NET_ADDR_RANDOM;
}
mac->hw->soc->set_mac(mac, dev->dev_addr);
@@ -2012,8 +2010,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw_stats = devm_kzalloc(eth->dev,
sizeof(*mac->hw_stats),
GFP_KERNEL);
- if (!mac->hw_stats)
- return -ENOMEM;
+ if (!mac->hw_stats) {
+ err = -ENOMEM;
+ goto free_netdev;
+ }
spin_lock_init(&mac->hw_stats->stats_lock);
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
}
@@ -2037,7 +2037,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
err = register_netdev(eth->netdev[id]);
if (err) {
dev_err(eth->dev, "error bringing up device\n");
- return err;
+ err = -ENOMEM;
+ goto free_netdev;
}
eth->netdev[id]->irq = eth->irq;
netif_info(eth, probe, eth->netdev[id],
@@ -2045,6 +2046,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->base_addr, eth->netdev[id]->irq);
return 0;
+
+free_netdev:
+ free_netdev(eth->netdev[id]);
+ return err;
}
static int mtk_probe(struct platform_device *pdev)
diff --git a/drivers/staging/mt7621-gpio/Kconfig b/drivers/staging/mt7621-gpio/Kconfig
deleted file mode 100644
index c741ec3f4e50..000000000000
--- a/drivers/staging/mt7621-gpio/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config GPIO_MT7621
- bool "Mediatek GPIO Support"
- depends on SOC_MT7620 || SOC_MT7621
- select ARCH_REQUIRE_GPIOLIB
- help
- Say yes here to support the Mediatek SoC GPIO device
diff --git a/drivers/staging/mt7621-gpio/Makefile b/drivers/staging/mt7621-gpio/Makefile
deleted file mode 100644
index e269ab1b8717..000000000000
--- a/drivers/staging/mt7621-gpio/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
-
-ccflags-y += -I$(srctree)/$(src)/include
diff --git a/drivers/staging/mt7621-gpio/TODO b/drivers/staging/mt7621-gpio/TODO
deleted file mode 100644
index 674930a10716..000000000000
--- a/drivers/staging/mt7621-gpio/TODO
+++ /dev/null
@@ -1,3 +0,0 @@
-- general code review and clean up
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-gpio/gpio-mt7621.c b/drivers/staging/mt7621-gpio/gpio-mt7621.c
deleted file mode 100644
index 0c4fb4a1b4a9..000000000000
--- a/drivers/staging/mt7621-gpio/gpio-mt7621.c
+++ /dev/null
@@ -1,370 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/gpio/driver.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irqdomain.h>
-#include <linux/module.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-
-#define MTK_BANK_CNT 3
-#define MTK_BANK_WIDTH 32
-#define PIN_MASK(nr) (1UL << ((nr % MTK_BANK_WIDTH)))
-
-enum mediatek_gpio_reg {
- GPIO_REG_CTRL = 0,
- GPIO_REG_POL,
- GPIO_REG_DATA,
- GPIO_REG_DSET,
- GPIO_REG_DCLR,
- GPIO_REG_REDGE,
- GPIO_REG_FEDGE,
- GPIO_REG_HLVL,
- GPIO_REG_LLVL,
- GPIO_REG_STAT,
- GPIO_REG_EDGE,
-};
-
-struct mtk_gc {
- struct gpio_chip chip;
- spinlock_t lock;
- int bank;
- u32 rising;
- u32 falling;
-};
-
-struct mtk_data {
- void __iomem *gpio_membase;
- int gpio_irq;
- struct irq_domain *gpio_irq_domain;
- struct mtk_gc gc_map[MTK_BANK_CNT];
-};
-
-static inline struct mtk_gc *
-to_mediatek_gpio(struct gpio_chip *chip)
-{
- return container_of(chip, struct mtk_gc, chip);
-}
-
-static inline void
-mtk_gpio_w32(struct mtk_gc *rg, u8 reg, u32 val)
-{
- struct mtk_data *gpio_data = gpiochip_get_data(&rg->chip);
- u32 offset = (reg * 0x10) + (rg->bank * 0x4);
-
- iowrite32(val, gpio_data->gpio_membase + offset);
-}
-
-static inline u32
-mtk_gpio_r32(struct mtk_gc *rg, u8 reg)
-{
- struct mtk_data *gpio_data = gpiochip_get_data(&rg->chip);
- u32 offset = (reg * 0x10) + (rg->bank * 0x4);
-
- return ioread32(gpio_data->gpio_membase + offset);
-}
-
-static void
-mediatek_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
-{
- struct mtk_gc *rg = to_mediatek_gpio(chip);
-
- mtk_gpio_w32(rg, (value) ? GPIO_REG_DSET : GPIO_REG_DCLR, BIT(offset));
-}
-
-static int
-mediatek_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- struct mtk_gc *rg = to_mediatek_gpio(chip);
-
- return !!(mtk_gpio_r32(rg, GPIO_REG_DATA) & BIT(offset));
-}
-
-static int
-mediatek_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
-{
- struct mtk_gc *rg = to_mediatek_gpio(chip);
- unsigned long flags;
- u32 t;
-
- spin_lock_irqsave(&rg->lock, flags);
- t = mtk_gpio_r32(rg, GPIO_REG_CTRL);
- t &= ~BIT(offset);
- mtk_gpio_w32(rg, GPIO_REG_CTRL, t);
- spin_unlock_irqrestore(&rg->lock, flags);
-
- return 0;
-}
-
-static int
-mediatek_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- struct mtk_gc *rg = to_mediatek_gpio(chip);
- unsigned long flags;
- u32 t;
-
- spin_lock_irqsave(&rg->lock, flags);
- t = mtk_gpio_r32(rg, GPIO_REG_CTRL);
- t |= BIT(offset);
- mtk_gpio_w32(rg, GPIO_REG_CTRL, t);
- mediatek_gpio_set(chip, offset, value);
- spin_unlock_irqrestore(&rg->lock, flags);
-
- return 0;
-}
-
-static int
-mediatek_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
-{
- struct mtk_gc *rg = to_mediatek_gpio(chip);
- u32 t = mtk_gpio_r32(rg, GPIO_REG_CTRL);
-
- return (t & BIT(offset)) ? GPIOF_DIR_OUT : GPIOF_DIR_IN;
-}
-
-static int
-mediatek_gpio_to_irq(struct gpio_chip *chip, unsigned int pin)
-{
- struct mtk_data *gpio_data = gpiochip_get_data(chip);
- struct mtk_gc *rg = to_mediatek_gpio(chip);
-
- return irq_create_mapping(gpio_data->gpio_irq_domain,
- pin + (rg->bank * MTK_BANK_WIDTH));
-}
-
-static int
-mediatek_gpio_bank_probe(struct platform_device *pdev, struct device_node *bank)
-{
- struct mtk_data *gpio_data = dev_get_drvdata(&pdev->dev);
- const __be32 *id = of_get_property(bank, "reg", NULL);
- struct mtk_gc *rg;
- int ret;
-
- if (!id || be32_to_cpu(*id) >= MTK_BANK_CNT)
- return -EINVAL;
-
- rg = &gpio_data->gc_map[be32_to_cpu(*id)];
- memset(rg, 0, sizeof(*rg));
-
- spin_lock_init(&rg->lock);
-
- rg->chip.parent = &pdev->dev;
- rg->chip.label = dev_name(&pdev->dev);
- rg->chip.of_node = bank;
- rg->chip.base = MTK_BANK_WIDTH * be32_to_cpu(*id);
- rg->chip.ngpio = MTK_BANK_WIDTH;
- rg->chip.direction_input = mediatek_gpio_direction_input;
- rg->chip.direction_output = mediatek_gpio_direction_output;
- rg->chip.get_direction = mediatek_gpio_get_direction;
- rg->chip.get = mediatek_gpio_get;
- rg->chip.set = mediatek_gpio_set;
- if (gpio_data->gpio_irq_domain)
- rg->chip.to_irq = mediatek_gpio_to_irq;
- rg->bank = be32_to_cpu(*id);
-
- ret = devm_gpiochip_add_data(&pdev->dev, &rg->chip, gpio_data);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpio %d, ret=%d\n",
- rg->chip.ngpio, ret);
- return ret;
- }
-
- /* set polarity to low for all gpios */
- mtk_gpio_w32(rg, GPIO_REG_POL, 0);
-
- dev_info(&pdev->dev, "registering %d gpios\n", rg->chip.ngpio);
-
- return 0;
-}
-
-static void
-mediatek_gpio_irq_handler(struct irq_desc *desc)
-{
- struct mtk_data *gpio_data = irq_desc_get_handler_data(desc);
- int i;
-
- for (i = 0; i < MTK_BANK_CNT; i++) {
- struct mtk_gc *rg = &gpio_data->gc_map[i];
- unsigned long pending;
- int bit;
-
- if (!rg)
- continue;
-
- pending = mtk_gpio_r32(rg, GPIO_REG_STAT);
-
- for_each_set_bit(bit, &pending, MTK_BANK_WIDTH) {
- u32 map = irq_find_mapping(gpio_data->gpio_irq_domain,
- (MTK_BANK_WIDTH * i) + bit);
-
- generic_handle_irq(map);
- mtk_gpio_w32(rg, GPIO_REG_STAT, BIT(bit));
- }
- }
-}
-
-static void
-mediatek_gpio_irq_unmask(struct irq_data *d)
-{
- struct mtk_data *gpio_data = irq_data_get_irq_chip_data(d);
- int pin = d->hwirq;
- int bank = pin / MTK_BANK_WIDTH;
- struct mtk_gc *rg = &gpio_data->gc_map[bank];
- unsigned long flags;
- u32 rise, fall;
-
- if (!rg)
- return;
-
- spin_lock_irqsave(&rg->lock, flags);
- rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
- fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
- mtk_gpio_w32(rg, GPIO_REG_REDGE, rise | (PIN_MASK(pin) & rg->rising));
- mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall | (PIN_MASK(pin) & rg->falling));
- spin_unlock_irqrestore(&rg->lock, flags);
-}
-
-static void
-mediatek_gpio_irq_mask(struct irq_data *d)
-{
- struct mtk_data *gpio_data = irq_data_get_irq_chip_data(d);
- int pin = d->hwirq;
- int bank = pin / MTK_BANK_WIDTH;
- struct mtk_gc *rg = &gpio_data->gc_map[bank];
- unsigned long flags;
- u32 rise, fall;
-
- if (!rg)
- return;
-
- spin_lock_irqsave(&rg->lock, flags);
- rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
- fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
- mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~PIN_MASK(pin));
- mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~PIN_MASK(pin));
- spin_unlock_irqrestore(&rg->lock, flags);
-}
-
-static int
-mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
-{
- struct mtk_data *gpio_data = irq_data_get_irq_chip_data(d);
- int pin = d->hwirq;
- int bank = pin / MTK_BANK_WIDTH;
- struct mtk_gc *rg = &gpio_data->gc_map[bank];
- u32 mask = PIN_MASK(pin);
-
- if (!rg)
- return -1;
-
- if (type == IRQ_TYPE_PROBE) {
- if ((rg->rising | rg->falling) & mask)
- return 0;
-
- type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
- }
-
- if (type & IRQ_TYPE_EDGE_RISING)
- rg->rising |= mask;
- else
- rg->rising &= ~mask;
-
- if (type & IRQ_TYPE_EDGE_FALLING)
- rg->falling |= mask;
- else
- rg->falling &= ~mask;
-
- return 0;
-}
-
-static struct irq_chip mediatek_gpio_irq_chip = {
- .name = "GPIO",
- .irq_unmask = mediatek_gpio_irq_unmask,
- .irq_mask = mediatek_gpio_irq_mask,
- .irq_mask_ack = mediatek_gpio_irq_mask,
- .irq_set_type = mediatek_gpio_irq_type,
-};
-
-static int
-mediatek_gpio_gpio_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hw)
-{
- int ret;
-
- ret = irq_set_chip_data(irq, d->host_data);
- if (ret < 0)
- return ret;
- irq_set_chip_and_handler(irq, &mediatek_gpio_irq_chip,
- handle_level_irq);
- irq_set_handler_data(irq, d);
-
- return 0;
-}
-
-static const struct irq_domain_ops irq_domain_ops = {
- .xlate = irq_domain_xlate_twocell,
- .map = mediatek_gpio_gpio_map,
-};
-
-static int
-mediatek_gpio_probe(struct platform_device *pdev)
-{
- struct device_node *bank, *np = pdev->dev.of_node;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct mtk_data *gpio_data;
-
- gpio_data = devm_kzalloc(&pdev->dev, sizeof(*gpio_data), GFP_KERNEL);
- if (!gpio_data)
- return -ENOMEM;
-
- gpio_data->gpio_membase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(gpio_data->gpio_membase))
- return PTR_ERR(gpio_data->gpio_membase);
-
- gpio_data->gpio_irq = irq_of_parse_and_map(np, 0);
- if (gpio_data->gpio_irq) {
- gpio_data->gpio_irq_domain = irq_domain_add_linear(np,
- MTK_BANK_CNT * MTK_BANK_WIDTH,
- &irq_domain_ops, gpio_data);
- if (!gpio_data->gpio_irq_domain)
- dev_err(&pdev->dev, "irq_domain_add_linear failed\n");
- }
-
- platform_set_drvdata(pdev, gpio_data);
-
- for_each_child_of_node(np, bank)
- if (of_device_is_compatible(bank, "mediatek,mt7621-gpio-bank"))
- mediatek_gpio_bank_probe(pdev, bank);
-
- if (gpio_data->gpio_irq_domain)
- irq_set_chained_handler_and_data(gpio_data->gpio_irq,
- mediatek_gpio_irq_handler,
- gpio_data);
-
- return 0;
-}
-
-static const struct of_device_id mediatek_gpio_match[] = {
- { .compatible = "mediatek,mt7621-gpio" },
- {},
-};
-MODULE_DEVICE_TABLE(of, mediatek_gpio_match);
-
-static struct platform_driver mediatek_gpio_driver = {
- .probe = mediatek_gpio_probe,
- .driver = {
- .name = "mt7621_gpio",
- .of_match_table = mediatek_gpio_match,
- },
-};
-
-module_platform_driver(mediatek_gpio_driver);
diff --git a/drivers/staging/mt7621-gpio/mediatek,mt7621-gpio.txt b/drivers/staging/mt7621-gpio/mediatek,mt7621-gpio.txt
deleted file mode 100644
index 30d8a0225aa1..000000000000
--- a/drivers/staging/mt7621-gpio/mediatek,mt7621-gpio.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-Mediatek SoC GPIO controller bindings
-
-The IP core used inside these SoCs has 3 banks of 32 GPIOs each.
-The registers of all the banks are interwoven inside one single IO range.
-We load one GPIO controller instance per bank. To make this possible
-we support 2 types of nodes. The parent node defines the memory I/O range and
-has 3 children each describing a single bank. Also the GPIO controller can receive
-interrupts on any of the GPIOs, either edge or level. It then interrupts the CPU
-using GIC INT12.
-
-Required properties for the top level node:
-- compatible:
- - "mediatek,mt7621-gpio" for Mediatek controllers
-- reg : Physical base address and length of the controller's registers
-- interrupt-parent : phandle of the parent interrupt controller.
-- interrupts : Interrupt specifier for the controllers interrupt.
-- interrupt-controller : Mark the device node as an interrupt controller.
-- #interrupt-cells : Should be 2. The first cell defines the interrupt number.
- The second cell bits[3:0] is used to specify trigger type as follows:
- - 1 = low-to-high edge triggered.
- - 2 = high-to-low edge triggered.
- - 4 = active high level-sensitive.
- - 8 = active low level-sensitive.
-
-
-Required properties for the GPIO bank node:
-- compatible:
- - "mediatek,mt7621-gpio-bank" for Mediatek banks
-- #gpio-cells : Should be two. The first cell is the GPIO pin number and the
- second cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>.
- Only the GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
-- gpio-controller : Marks the device node as a GPIO controller.
-- reg : The id of the bank that the node describes.
-
-Example:
- gpio@600 {
- #address-cells = <1>;
- #size-cells = <0>;
-
- compatible = "mediatek,mt7621-gpio";
- reg = <0x600 0x100>;
-
- interrupt-parent = <&gic>;
- interrupts = <GIC_SHARED 12 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #interrupt-cells = <2>;
-
- gpio0: bank@0 {
- reg = <0>;
- compatible = "mediatek,mt7621-gpio-bank";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpio1: bank@1 {
- reg = <1>;
- compatible = "mediatek,mt7621-gpio-bank";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpio2: bank@2 {
- reg = <2>;
- compatible = "mediatek,mt7621-gpio-bank";
- gpio-controller;
- #gpio-cells = <2>;
- };
- };
diff --git a/drivers/staging/mt7621-mmc/board.h b/drivers/staging/mt7621-mmc/board.h
index a7d82f321b00..983791ee308d 100644
--- a/drivers/staging/mt7621-mmc/board.h
+++ b/drivers/staging/mt7621-mmc/board.h
@@ -36,10 +36,10 @@
#ifndef __ARCH_ARM_MACH_BOARD_H
#define __ARCH_ARM_MACH_BOARD_H
-#define MSDC_CD_PIN_EN (1 << 0) /* card detection pin is wired */
-#define MSDC_WP_PIN_EN (1 << 1) /* write protection pin is wired */
-#define MSDC_RST_PIN_EN (1 << 2) /* emmc reset pin is wired */
-#define MSDC_REMOVABLE (1 << 5) /* removable slot */
+#define MSDC_CD_PIN_EN BIT(0) /* card detection pin is wired */
+#define MSDC_WP_PIN_EN BIT(1) /* write protection pin is wired */
+#define MSDC_RST_PIN_EN BIT(2) /* emmc reset pin is wired */
+#define MSDC_REMOVABLE BIT(5) /* removable slot */
#define MSDC_SMPL_RISING (0)
#define MSDC_SMPL_FALLING (1)
diff --git a/drivers/staging/mt7621-mmc/dbg.c b/drivers/staging/mt7621-mmc/dbg.c
index 6e4e223cddfb..6e518dce9029 100644
--- a/drivers/staging/mt7621-mmc/dbg.c
+++ b/drivers/staging/mt7621-mmc/dbg.c
@@ -91,11 +91,11 @@ u32 msdc_time_calc(u32 old_L32, u32 old_H32, u32 new_L32, u32 new_H32)
ret = new_L32 - old_L32;
} else if (new_H32 == (old_H32 + 1)) {
if (new_L32 > old_L32)
- printk("msdc old_L<0x%x> new_L<0x%x>\n", old_L32, new_L32);
+ pr_debug("msdc old_L<0x%x> new_L<0x%x>\n", old_L32, new_L32);
ret = (0xffffffff - old_L32);
ret += new_L32;
} else {
- printk("msdc old_H<0x%x> new_H<0x%x>\n", old_H32, new_H32);
+ pr_debug("msdc old_H<0x%x> new_H<0x%x>\n", old_H32, new_H32);
}
return ret;
@@ -106,34 +106,34 @@ void msdc_sdio_profile(struct sdio_profile *result)
struct cmd_profile *cmd;
u32 i;
- printk("sdio === performance dump ===\n");
- printk("sdio === total execute tick<%d> time<%dms> Tx<%dB> Rx<%dB>\n",
- result->total_tc, result->total_tc / TICKS_ONE_MS,
- result->total_tx_bytes, result->total_rx_bytes);
+ pr_debug("sdio === performance dump ===\n");
+ pr_debug("sdio === total execute tick<%d> time<%dms> Tx<%dB> Rx<%dB>\n",
+ result->total_tc, result->total_tc / TICKS_ONE_MS,
+ result->total_tx_bytes, result->total_rx_bytes);
/* CMD52 Dump */
cmd = &result->cmd52_rx;
- printk("sdio === CMD52 Rx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count);
+ pr_debug("sdio === CMD52 Rx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc,
+ cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count);
cmd = &result->cmd52_tx;
- printk("sdio === CMD52 Tx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count);
+ pr_debug("sdio === CMD52 Tx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc,
+ cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count);
/* CMD53 Rx bytes + block mode */
for (i = 0; i < 512; i++) {
cmd = &result->cmd53_rx_byte[i];
if (cmd->count) {
- printk("sdio<%6d><%3dB>_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
- cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
+ pr_debug("sdio<%6d><%3dB>_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
+ cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
+ cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
}
}
for (i = 0; i < 100; i++) {
cmd = &result->cmd53_rx_blk[i];
if (cmd->count) {
- printk("sdio<%6d><%3d>B_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
- cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
+ pr_debug("sdio<%6d><%3d>B_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
+ cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
+ cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
}
}
@@ -141,21 +141,21 @@ void msdc_sdio_profile(struct sdio_profile *result)
for (i = 0; i < 512; i++) {
cmd = &result->cmd53_tx_byte[i];
if (cmd->count) {
- printk("sdio<%6d><%3dB>_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
- cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
+ pr_debug("sdio<%6d><%3dB>_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
+ cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
+ cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
}
}
for (i = 0; i < 100; i++) {
cmd = &result->cmd53_tx_blk[i];
if (cmd->count) {
- printk("sdio<%6d><%3d>B_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
- cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
+ pr_debug("sdio<%6d><%3d>B_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
+ cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
+ cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
}
}
- printk("sdio === performance dump done ===\n");
+ pr_debug("sdio === performance dump done ===\n");
}
//========= sdio command table ===========
@@ -176,7 +176,7 @@ void msdc_performance(u32 opcode, u32 sizes, u32 bRx, u32 ticks)
} else {
block = sizes / 512;
if (block >= 99) {
- printk("cmd53 error blocks\n");
+ pr_err("cmd53 error blocks\n");
while (1)
;
}
@@ -247,7 +247,7 @@ static ssize_t msdc_debug_proc_write(struct file *file,
return -EFAULT;
cmd_buf[count] = '\0';
- printk("msdc Write %s\n", cmd_buf);
+ pr_debug("msdc Write %s\n", cmd_buf);
sscanf(cmd_buf, "%x %x %x", &cmd, &p1, &p2);
@@ -255,14 +255,14 @@ static ssize_t msdc_debug_proc_write(struct file *file,
id = p1;
zone = p2;
zone &= 0x3ff;
- printk("msdc host_id<%d> zone<0x%.8x>\n", id, zone);
+ pr_debug("msdc host_id<%d> zone<0x%.8x>\n", id, zone);
if (id >= 0 && id <= 3) {
sd_debug_zone[id] = zone;
} else if (id == 4) {
sd_debug_zone[0] = sd_debug_zone[1] = zone;
sd_debug_zone[2] = sd_debug_zone[3] = zone;
} else {
- printk("msdc host_id error when set debug zone\n");
+ pr_err("msdc host_id error when set debug zone\n");
}
} else if (cmd == SD_TOOL_SDIO_PROFILE) {
if (p1 == 1) { /* enable profile */
diff --git a/drivers/staging/mt7621-mmc/dbg.h b/drivers/staging/mt7621-mmc/dbg.h
index 5a25a69b00c9..2f2c56b73987 100644
--- a/drivers/staging/mt7621-mmc/dbg.h
+++ b/drivers/staging/mt7621-mmc/dbg.h
@@ -73,12 +73,6 @@ enum msdc_dbg {
SD_TOOL_SDIO_PROFILE = 3,
};
-enum msdc_mode {
- MODE_PIO = 0,
- MODE_DMA = 1,
- MODE_SIZE_DEP = 2,
-};
-
/* Debug message event */
#define DBG_EVT_NONE (0) /* No event */
#define DBG_EVT_DMA (1 << 0) /* DMA related event */
diff --git a/drivers/staging/mt7621-mmc/mt6575_sd.h b/drivers/staging/mt7621-mmc/mt6575_sd.h
index 33fa59a019ec..4e287c140acb 100644
--- a/drivers/staging/mt7621-mmc/mt6575_sd.h
+++ b/drivers/staging/mt7621-mmc/mt6575_sd.h
@@ -42,11 +42,6 @@
// #include <mach/mt6575_reg_base.h> /* --- by chhung */
/*--------------------------------------------------------------------------*/
-/* Common Macro */
-/*--------------------------------------------------------------------------*/
-#define REG_ADDR(x) (base + OFFSET_##x)
-
-/*--------------------------------------------------------------------------*/
/* Common Definition */
/*--------------------------------------------------------------------------*/
#define MSDC_FIFO_SZ (128)
@@ -56,13 +51,6 @@
#define MSDC_MS (0)
#define MSDC_SDMMC (1)
-#define MSDC_MODE_UNKNOWN (0)
-#define MSDC_MODE_PIO (1)
-#define MSDC_MODE_DMA_BASIC (2)
-#define MSDC_MODE_DMA_DESC (3)
-#define MSDC_MODE_DMA_ENHANCED (4)
-#define MSDC_MODE_MMC_STREAM (5)
-
#define MSDC_BUS_1BITS (0)
#define MSDC_BUS_4BITS (1)
#define MSDC_BUS_8BITS (2)
@@ -102,117 +90,51 @@ enum {
/*--------------------------------------------------------------------------*/
/* Register Offset */
/*--------------------------------------------------------------------------*/
-#define OFFSET_MSDC_CFG (0x0)
-#define OFFSET_MSDC_IOCON (0x04)
-#define OFFSET_MSDC_PS (0x08)
-#define OFFSET_MSDC_INT (0x0c)
-#define OFFSET_MSDC_INTEN (0x10)
-#define OFFSET_MSDC_FIFOCS (0x14)
-#define OFFSET_MSDC_TXDATA (0x18)
-#define OFFSET_MSDC_RXDATA (0x1c)
-#define OFFSET_SDC_CFG (0x30)
-#define OFFSET_SDC_CMD (0x34)
-#define OFFSET_SDC_ARG (0x38)
-#define OFFSET_SDC_STS (0x3c)
-#define OFFSET_SDC_RESP0 (0x40)
-#define OFFSET_SDC_RESP1 (0x44)
-#define OFFSET_SDC_RESP2 (0x48)
-#define OFFSET_SDC_RESP3 (0x4c)
-#define OFFSET_SDC_BLK_NUM (0x50)
-#define OFFSET_SDC_CSTS (0x58)
-#define OFFSET_SDC_CSTS_EN (0x5c)
-#define OFFSET_SDC_DCRC_STS (0x60)
-#define OFFSET_EMMC_CFG0 (0x70)
-#define OFFSET_EMMC_CFG1 (0x74)
-#define OFFSET_EMMC_STS (0x78)
-#define OFFSET_EMMC_IOCON (0x7c)
-#define OFFSET_SDC_ACMD_RESP (0x80)
-#define OFFSET_SDC_ACMD19_TRG (0x84)
-#define OFFSET_SDC_ACMD19_STS (0x88)
-#define OFFSET_MSDC_DMA_SA (0x90)
-#define OFFSET_MSDC_DMA_CA (0x94)
-#define OFFSET_MSDC_DMA_CTRL (0x98)
-#define OFFSET_MSDC_DMA_CFG (0x9c)
-#define OFFSET_MSDC_DBG_SEL (0xa0)
-#define OFFSET_MSDC_DBG_OUT (0xa4)
-#define OFFSET_MSDC_PATCH_BIT (0xb0)
-#define OFFSET_MSDC_PATCH_BIT1 (0xb4)
-#define OFFSET_MSDC_PAD_CTL0 (0xe0)
-#define OFFSET_MSDC_PAD_CTL1 (0xe4)
-#define OFFSET_MSDC_PAD_CTL2 (0xe8)
-#define OFFSET_MSDC_PAD_TUNE (0xec)
-#define OFFSET_MSDC_DAT_RDDLY0 (0xf0)
-#define OFFSET_MSDC_DAT_RDDLY1 (0xf4)
-#define OFFSET_MSDC_HW_DBG (0xf8)
-#define OFFSET_MSDC_VERSION (0x100)
-#define OFFSET_MSDC_ECO_VER (0x104)
-
-/*--------------------------------------------------------------------------*/
-/* Register Address */
-/*--------------------------------------------------------------------------*/
-
-/* common register */
-#define MSDC_CFG REG_ADDR(MSDC_CFG)
-#define MSDC_IOCON REG_ADDR(MSDC_IOCON)
-#define MSDC_PS REG_ADDR(MSDC_PS)
-#define MSDC_INT REG_ADDR(MSDC_INT)
-#define MSDC_INTEN REG_ADDR(MSDC_INTEN)
-#define MSDC_FIFOCS REG_ADDR(MSDC_FIFOCS)
-#define MSDC_TXDATA REG_ADDR(MSDC_TXDATA)
-#define MSDC_RXDATA REG_ADDR(MSDC_RXDATA)
-#define MSDC_PATCH_BIT0 REG_ADDR(MSDC_PATCH_BIT)
-
-/* sdmmc register */
-#define SDC_CFG REG_ADDR(SDC_CFG)
-#define SDC_CMD REG_ADDR(SDC_CMD)
-#define SDC_ARG REG_ADDR(SDC_ARG)
-#define SDC_STS REG_ADDR(SDC_STS)
-#define SDC_RESP0 REG_ADDR(SDC_RESP0)
-#define SDC_RESP1 REG_ADDR(SDC_RESP1)
-#define SDC_RESP2 REG_ADDR(SDC_RESP2)
-#define SDC_RESP3 REG_ADDR(SDC_RESP3)
-#define SDC_BLK_NUM REG_ADDR(SDC_BLK_NUM)
-#define SDC_CSTS REG_ADDR(SDC_CSTS)
-#define SDC_CSTS_EN REG_ADDR(SDC_CSTS_EN)
-#define SDC_DCRC_STS REG_ADDR(SDC_DCRC_STS)
-
-/* emmc register*/
-#define EMMC_CFG0 REG_ADDR(EMMC_CFG0)
-#define EMMC_CFG1 REG_ADDR(EMMC_CFG1)
-#define EMMC_STS REG_ADDR(EMMC_STS)
-#define EMMC_IOCON REG_ADDR(EMMC_IOCON)
-
-/* auto command register */
-#define SDC_ACMD_RESP REG_ADDR(SDC_ACMD_RESP)
-#define SDC_ACMD19_TRG REG_ADDR(SDC_ACMD19_TRG)
-#define SDC_ACMD19_STS REG_ADDR(SDC_ACMD19_STS)
-
-/* dma register */
-#define MSDC_DMA_SA REG_ADDR(MSDC_DMA_SA)
-#define MSDC_DMA_CA REG_ADDR(MSDC_DMA_CA)
-#define MSDC_DMA_CTRL REG_ADDR(MSDC_DMA_CTRL)
-#define MSDC_DMA_CFG REG_ADDR(MSDC_DMA_CFG)
-
-/* pad ctrl register */
-#define MSDC_PAD_CTL0 REG_ADDR(MSDC_PAD_CTL0)
-#define MSDC_PAD_CTL1 REG_ADDR(MSDC_PAD_CTL1)
-#define MSDC_PAD_CTL2 REG_ADDR(MSDC_PAD_CTL2)
-
-/* data read delay */
-#define MSDC_DAT_RDDLY0 REG_ADDR(MSDC_DAT_RDDLY0)
-#define MSDC_DAT_RDDLY1 REG_ADDR(MSDC_DAT_RDDLY1)
-
-/* debug register */
-#define MSDC_DBG_SEL REG_ADDR(MSDC_DBG_SEL)
-#define MSDC_DBG_OUT REG_ADDR(MSDC_DBG_OUT)
-
-/* misc register */
-#define MSDC_PATCH_BIT REG_ADDR(MSDC_PATCH_BIT)
-#define MSDC_PATCH_BIT1 REG_ADDR(MSDC_PATCH_BIT1)
-#define MSDC_PAD_TUNE REG_ADDR(MSDC_PAD_TUNE)
-#define MSDC_HW_DBG REG_ADDR(MSDC_HW_DBG)
-#define MSDC_VERSION REG_ADDR(MSDC_VERSION)
-#define MSDC_ECO_VER REG_ADDR(MSDC_ECO_VER) /* ECO Version */
+#define MSDC_CFG (0x0)
+#define MSDC_IOCON (0x04)
+#define MSDC_PS (0x08)
+#define MSDC_INT (0x0c)
+#define MSDC_INTEN (0x10)
+#define MSDC_FIFOCS (0x14)
+#define MSDC_TXDATA (0x18)
+#define MSDC_RXDATA (0x1c)
+#define SDC_CFG (0x30)
+#define SDC_CMD (0x34)
+#define SDC_ARG (0x38)
+#define SDC_STS (0x3c)
+#define SDC_RESP0 (0x40)
+#define SDC_RESP1 (0x44)
+#define SDC_RESP2 (0x48)
+#define SDC_RESP3 (0x4c)
+#define SDC_BLK_NUM (0x50)
+#define SDC_CSTS (0x58)
+#define SDC_CSTS_EN (0x5c)
+#define SDC_DCRC_STS (0x60)
+#define EMMC_CFG0 (0x70)
+#define EMMC_CFG1 (0x74)
+#define EMMC_STS (0x78)
+#define EMMC_IOCON (0x7c)
+#define SDC_ACMD_RESP (0x80)
+#define SDC_ACMD19_TRG (0x84)
+#define SDC_ACMD19_STS (0x88)
+#define MSDC_DMA_SA (0x90)
+#define MSDC_DMA_CA (0x94)
+#define MSDC_DMA_CTRL (0x98)
+#define MSDC_DMA_CFG (0x9c)
+#define MSDC_DBG_SEL (0xa0)
+#define MSDC_DBG_OUT (0xa4)
+#define MSDC_PATCH_BIT (0xb0)
+#define MSDC_PATCH_BIT0 MSDC_PATCH_BIT
+#define MSDC_PATCH_BIT1 (0xb4)
+#define MSDC_PAD_CTL0 (0xe0)
+#define MSDC_PAD_CTL1 (0xe4)
+#define MSDC_PAD_CTL2 (0xe8)
+#define MSDC_PAD_TUNE (0xec)
+#define MSDC_DAT_RDDLY0 (0xf0)
+#define MSDC_DAT_RDDLY1 (0xf4)
+#define MSDC_HW_DBG (0xf8)
+#define MSDC_VERSION (0x100)
+#define MSDC_ECO_VER (0x104)
/*--------------------------------------------------------------------------*/
/* Register Mask */
@@ -478,422 +400,7 @@ struct bd {
u32 rsv3:16;
};
-/*--------------------------------------------------------------------------*/
-/* Register Debugging Structure */
-/*--------------------------------------------------------------------------*/
-
-struct msdc_cfg_reg {
- u32 msdc:1;
- u32 ckpwn:1;
- u32 rst:1;
- u32 pio:1;
- u32 ckdrven:1;
- u32 start18v:1;
- u32 pass18v:1;
- u32 ckstb:1;
- u32 ckdiv:8;
- u32 ckmod:2;
- u32 pad:14;
-};
-
-struct msdc_iocon_reg {
- u32 sdr104cksel:1;
- u32 rsmpl:1;
- u32 dsmpl:1;
- u32 ddlysel:1;
- u32 ddr50ckd:1;
- u32 dsplsel:1;
- u32 pad1:10;
- u32 d0spl:1;
- u32 d1spl:1;
- u32 d2spl:1;
- u32 d3spl:1;
- u32 d4spl:1;
- u32 d5spl:1;
- u32 d6spl:1;
- u32 d7spl:1;
- u32 riscsz:1;
- u32 pad2:7;
-};
-
-struct msdc_ps_reg {
- u32 cden:1;
- u32 cdsts:1;
- u32 pad1:10;
- u32 cddebounce:4;
- u32 dat:8;
- u32 cmd:1;
- u32 pad2:6;
- u32 wp:1;
-};
-
-struct msdc_int_reg {
- u32 mmcirq:1;
- u32 cdsc:1;
- u32 pad1:1;
- u32 atocmdrdy:1;
- u32 atocmdtmo:1;
- u32 atocmdcrc:1;
- u32 dmaqempty:1;
- u32 sdioirq:1;
- u32 cmdrdy:1;
- u32 cmdtmo:1;
- u32 rspcrc:1;
- u32 csta:1;
- u32 xfercomp:1;
- u32 dxferdone:1;
- u32 dattmo:1;
- u32 datcrc:1;
- u32 atocmd19done:1;
- u32 pad2:15;
-};
-
-struct msdc_inten_reg {
- u32 mmcirq:1;
- u32 cdsc:1;
- u32 pad1:1;
- u32 atocmdrdy:1;
- u32 atocmdtmo:1;
- u32 atocmdcrc:1;
- u32 dmaqempty:1;
- u32 sdioirq:1;
- u32 cmdrdy:1;
- u32 cmdtmo:1;
- u32 rspcrc:1;
- u32 csta:1;
- u32 xfercomp:1;
- u32 dxferdone:1;
- u32 dattmo:1;
- u32 datcrc:1;
- u32 atocmd19done:1;
- u32 pad2:15;
-};
-
-struct msdc_fifocs_reg {
- u32 rxcnt:8;
- u32 pad1:8;
- u32 txcnt:8;
- u32 pad2:7;
- u32 clr:1;
-};
-
-struct msdc_txdat_reg {
- u32 val;
-};
-
-struct msdc_rxdat_reg {
- u32 val;
-};
-
-struct sdc_cfg_reg {
- u32 sdiowkup:1;
- u32 inswkup:1;
- u32 pad1:14;
- u32 buswidth:2;
- u32 pad2:1;
- u32 sdio:1;
- u32 sdioide:1;
- u32 intblkgap:1;
- u32 pad4:2;
- u32 dtoc:8;
-};
-
-struct sdc_cmd_reg {
- u32 cmd:6;
- u32 brk:1;
- u32 rsptyp:3;
- u32 pad1:1;
- u32 dtype:2;
- u32 rw:1;
- u32 stop:1;
- u32 goirq:1;
- u32 blklen:12;
- u32 atocmd:2;
- u32 volswth:1;
- u32 pad2:1;
-};
-
-struct sdc_arg_reg {
- u32 arg;
-};
-
-struct sdc_sts_reg {
- u32 sdcbusy:1;
- u32 cmdbusy:1;
- u32 pad:29;
- u32 swrcmpl:1;
-};
-
-struct sdc_resp0_reg {
- u32 val;
-};
-
-struct sdc_resp1_reg {
- u32 val;
-};
-
-struct sdc_resp2_reg {
- u32 val;
-};
-
-struct sdc_resp3_reg {
- u32 val;
-};
-
-struct sdc_blknum_reg {
- u32 num;
-};
-
-struct sdc_csts_reg {
- u32 sts;
-};
-
-struct sdc_cstsen_reg {
- u32 sts;
-};
-
-struct sdc_datcrcsts_reg {
- u32 datcrcsts:8;
- u32 ddrcrcsts:4;
- u32 pad:20;
-};
-
-struct emmc_cfg0_reg {
- u32 bootstart:1;
- u32 bootstop:1;
- u32 bootmode:1;
- u32 pad1:9;
- u32 bootwaidly:3;
- u32 bootsupp:1;
- u32 pad2:16;
-};
-
-struct emmc_cfg1_reg {
- u32 bootcrctmc:16;
- u32 pad:4;
- u32 bootacktmc:12;
-};
-
-struct emmc_sts_reg {
- u32 bootcrcerr:1;
- u32 bootackerr:1;
- u32 bootdattmo:1;
- u32 bootacktmo:1;
- u32 bootupstate:1;
- u32 bootackrcv:1;
- u32 bootdatrcv:1;
- u32 pad:25;
-};
-
-struct emmc_iocon_reg {
- u32 bootrst:1;
- u32 pad:31;
-};
-
-struct msdc_acmd_resp_reg {
- u32 val;
-};
-
-struct msdc_acmd19_trg_reg {
- u32 tunesel:4;
- u32 pad:28;
-};
-
-struct msdc_acmd19_sts_reg {
- u32 val;
-};
-
-struct msdc_dma_sa_reg {
- u32 addr;
-};
-
-struct msdc_dma_ca_reg {
- u32 addr;
-};
-
-struct msdc_dma_ctrl_reg {
- u32 start:1;
- u32 stop:1;
- u32 resume:1;
- u32 pad1:5;
- u32 mode:1;
- u32 pad2:1;
- u32 lastbuf:1;
- u32 pad3:1;
- u32 brustsz:3;
- u32 pad4:1;
- u32 xfersz:16;
-};
-
-struct msdc_dma_cfg_reg {
- u32 status:1;
- u32 decsen:1;
- u32 pad1:2;
- u32 bdcsen:1;
- u32 gpdcsen:1;
- u32 pad2:26;
-};
-
-struct msdc_dbg_sel_reg {
- u32 sel:16;
- u32 pad2:16;
-};
-
-struct msdc_dbg_out_reg {
- u32 val;
-};
-
-struct msdc_pad_ctl0_reg {
- u32 clkdrvn:3;
- u32 rsv0:1;
- u32 clkdrvp:3;
- u32 rsv1:1;
- u32 clksr:1;
- u32 rsv2:7;
- u32 clkpd:1;
- u32 clkpu:1;
- u32 clksmt:1;
- u32 clkies:1;
- u32 clktdsel:4;
- u32 clkrdsel:8;
-};
-
-struct msdc_pad_ctl1_reg {
- u32 cmddrvn:3;
- u32 rsv0:1;
- u32 cmddrvp:3;
- u32 rsv1:1;
- u32 cmdsr:1;
- u32 rsv2:7;
- u32 cmdpd:1;
- u32 cmdpu:1;
- u32 cmdsmt:1;
- u32 cmdies:1;
- u32 cmdtdsel:4;
- u32 cmdrdsel:8;
-};
-
-struct msdc_pad_ctl2_reg {
- u32 datdrvn:3;
- u32 rsv0:1;
- u32 datdrvp:3;
- u32 rsv1:1;
- u32 datsr:1;
- u32 rsv2:7;
- u32 datpd:1;
- u32 datpu:1;
- u32 datsmt:1;
- u32 daties:1;
- u32 dattdsel:4;
- u32 datrdsel:8;
-};
-
-struct msdc_pad_tune_reg {
- u32 wrrxdly:3;
- u32 pad1:5;
- u32 rdrxdly:8;
- u32 pad2:16;
-};
-
-struct msdc_dat_rddly0 {
- u32 dat0:5;
- u32 rsv0:3;
- u32 dat1:5;
- u32 rsv1:3;
- u32 dat2:5;
- u32 rsv2:3;
- u32 dat3:5;
- u32 rsv3:3;
-};
-
-struct msdc_dat_rddly1 {
- u32 dat4:5;
- u32 rsv4:3;
- u32 dat5:5;
- u32 rsv5:3;
- u32 dat6:5;
- u32 rsv6:3;
- u32 dat7:5;
- u32 rsv7:3;
-};
-
-struct msdc_hw_dbg_reg {
- u32 dbg0sel:8;
- u32 dbg1sel:6;
- u32 pad1:2;
- u32 dbg2sel:6;
- u32 pad2:2;
- u32 dbg3sel:6;
- u32 pad3:2;
-};
-
-struct msdc_version_reg {
- u32 val;
-};
-
-struct msdc_eco_ver_reg {
- u32 val;
-};
-
-struct msdc_regs {
- struct msdc_cfg_reg msdc_cfg; /* base+0x00h */
- struct msdc_iocon_reg msdc_iocon; /* base+0x04h */
- struct msdc_ps_reg msdc_ps; /* base+0x08h */
- struct msdc_int_reg msdc_int; /* base+0x0ch */
- struct msdc_inten_reg msdc_inten; /* base+0x10h */
- struct msdc_fifocs_reg msdc_fifocs; /* base+0x14h */
- struct msdc_txdat_reg msdc_txdat; /* base+0x18h */
- struct msdc_rxdat_reg msdc_rxdat; /* base+0x1ch */
- u32 rsv1[4];
- struct sdc_cfg_reg sdc_cfg; /* base+0x30h */
- struct sdc_cmd_reg sdc_cmd; /* base+0x34h */
- struct sdc_arg_reg sdc_arg; /* base+0x38h */
- struct sdc_sts_reg sdc_sts; /* base+0x3ch */
- struct sdc_resp0_reg sdc_resp0; /* base+0x40h */
- struct sdc_resp1_reg sdc_resp1; /* base+0x44h */
- struct sdc_resp2_reg sdc_resp2; /* base+0x48h */
- struct sdc_resp3_reg sdc_resp3; /* base+0x4ch */
- struct sdc_blknum_reg sdc_blknum; /* base+0x50h */
- u32 rsv2[1];
- struct sdc_csts_reg sdc_csts; /* base+0x58h */
- struct sdc_cstsen_reg sdc_cstsen; /* base+0x5ch */
- struct sdc_datcrcsts_reg sdc_dcrcsta; /* base+0x60h */
- u32 rsv3[3];
- struct emmc_cfg0_reg emmc_cfg0; /* base+0x70h */
- struct emmc_cfg1_reg emmc_cfg1; /* base+0x74h */
- struct emmc_sts_reg emmc_sts; /* base+0x78h */
- struct emmc_iocon_reg emmc_iocon; /* base+0x7ch */
- struct msdc_acmd_resp_reg acmd_resp; /* base+0x80h */
- struct msdc_acmd19_trg_reg acmd19_trg; /* base+0x84h */
- struct msdc_acmd19_sts_reg acmd19_sts; /* base+0x88h */
- u32 rsv4[1];
- struct msdc_dma_sa_reg dma_sa; /* base+0x90h */
- struct msdc_dma_ca_reg dma_ca; /* base+0x94h */
- struct msdc_dma_ctrl_reg dma_ctrl; /* base+0x98h */
- struct msdc_dma_cfg_reg dma_cfg; /* base+0x9ch */
- struct msdc_dbg_sel_reg dbg_sel; /* base+0xa0h */
- struct msdc_dbg_out_reg dbg_out; /* base+0xa4h */
- u32 rsv5[2];
- u32 patch0; /* base+0xb0h */
- u32 patch1; /* base+0xb4h */
- u32 rsv6[10];
- struct msdc_pad_ctl0_reg pad_ctl0; /* base+0xe0h */
- struct msdc_pad_ctl1_reg pad_ctl1; /* base+0xe4h */
- struct msdc_pad_ctl2_reg pad_ctl2; /* base+0xe8h */
- struct msdc_pad_tune_reg pad_tune; /* base+0xech */
- struct msdc_dat_rddly0 dat_rddly0; /* base+0xf0h */
- struct msdc_dat_rddly1 dat_rddly1; /* base+0xf4h */
- struct msdc_hw_dbg_reg hw_dbg; /* base+0xf8h */
- u32 rsv7[1];
- struct msdc_version_reg version; /* base+0x100h */
- struct msdc_eco_ver_reg eco_ver; /* base+0x104h */
-};
-
struct msdc_dma {
- u32 sglen; /* size of scatter list */
- struct scatterlist *sg; /* I/O scatter list */
- u8 mode; /* dma mode */
-
struct gpd *gpd; /* pointer to gpd array */
struct bd *bd; /* pointer to bd array */
dma_addr_t gpd_addr; /* the physical address of gpd array */
@@ -947,11 +454,6 @@ struct msdc_host {
u32 app_cmd_arg;
};
-#define sdr_read8(reg) readb(reg)
-#define sdr_read32(reg) readl(reg)
-#define sdr_write8(reg, val) writeb(val, reg)
-#define sdr_write32(reg, val) writel(val, reg)
-
static inline void sdr_set_bits(void __iomem *reg, u32 bs)
{
u32 val = readl(reg);
diff --git a/drivers/staging/mt7621-mmc/sd.c b/drivers/staging/mt7621-mmc/sd.c
index 648a2dd1436e..04d23cc7cd4a 100644
--- a/drivers/staging/mt7621-mmc/sd.c
+++ b/drivers/staging/mt7621-mmc/sd.c
@@ -50,15 +50,11 @@
#include "dbg.h"
#include "mt6575_sd.h"
-//#define IRQ_SDC 14 //MT7620 /*FIXME*/
#ifdef CONFIG_SOC_MT7621
#define RALINK_SYSCTL_BASE 0xbe000000
-#define RALINK_MSDC_BASE 0xbe130000
#else
#define RALINK_SYSCTL_BASE 0xb0000000
-#define RALINK_MSDC_BASE 0xb0130000
#endif
-#define IRQ_SDC 22 /*FIXME*/
#define DRV_NAME "mtk-sd"
@@ -91,10 +87,8 @@
#define MAX_GPD_NUM (1 + 1) /* one null gpd */
#define MAX_BD_NUM (1024)
-#define MAX_BD_PER_GPD (MAX_BD_NUM)
#define MAX_HW_SGMTS (MAX_BD_NUM)
-#define MAX_PHY_SGMTS (MAX_BD_NUM)
#define MAX_SGMT_SZ (MAX_DMA_CNT)
#define MAX_REQ_SZ (MAX_SGMT_SZ * 8)
@@ -147,59 +141,37 @@ static int msdc_rsp[] = {
7, /* RESP_R1b */
};
-#define msdc_txfifocnt() ((sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16)
-#define msdc_rxfifocnt() ((sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) >> 0)
-#define msdc_fifo_write32(v) sdr_write32(MSDC_TXDATA, (v))
-#define msdc_fifo_write8(v) sdr_write8(MSDC_TXDATA, (v))
-#define msdc_fifo_read32() sdr_read32(MSDC_RXDATA)
-#define msdc_fifo_read8() sdr_read8(MSDC_RXDATA)
-
-#define msdc_dma_on() sdr_clr_bits(MSDC_CFG, MSDC_CFG_PIO)
-
-#define msdc_retry(expr, retry, cnt) \
- do { \
- int backup = cnt; \
- while (retry) { \
- if (!(expr)) \
- break; \
- if (cnt-- == 0) { \
- retry--; mdelay(1); cnt = backup; \
- } \
- } \
- WARN_ON(retry == 0); \
- } while (0)
+#define msdc_dma_on() sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO)
static void msdc_reset_hw(struct msdc_host *host)
{
- void __iomem *base = host->base;
-
- sdr_set_bits(MSDC_CFG, MSDC_CFG_RST);
- while (sdr_read32(MSDC_CFG) & MSDC_CFG_RST)
+ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
+ while (readl(host->base + MSDC_CFG) & MSDC_CFG_RST)
cpu_relax();
}
#define msdc_clr_int() \
do { \
- volatile u32 val = sdr_read32(MSDC_INT); \
- sdr_write32(MSDC_INT, val); \
+ volatile u32 val = readl(host->base + MSDC_INT); \
+ writel(val, host->base + MSDC_INT); \
} while (0)
-#define msdc_clr_fifo() \
- do { \
- int retry = 3, cnt = 1000; \
- sdr_set_bits(MSDC_FIFOCS, MSDC_FIFOCS_CLR); \
- msdc_retry(sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_CLR, retry, cnt); \
- } while (0)
+static void msdc_clr_fifo(struct msdc_host *host)
+{
+ sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
+ while (readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_CLR)
+ cpu_relax();
+}
#define msdc_irq_save(val) \
do { \
- val = sdr_read32(MSDC_INTEN); \
- sdr_clr_bits(MSDC_INTEN, val); \
+ val = readl(host->base + MSDC_INTEN); \
+ sdr_clr_bits(host->base + MSDC_INTEN, val); \
} while (0)
#define msdc_irq_restore(val) \
do { \
- sdr_set_bits(MSDC_INTEN, val); \
+ sdr_set_bits(host->base + MSDC_INTEN, val); \
} while (0)
/* clock source for host: global */
@@ -237,19 +209,15 @@ static u32 hclks[] = {50000000}; /* +/- by chhung */
(void)hwPowerDown(MT65XX_POWER_LDO_VMCH, "SD"); \
} while (0)
-#define sdc_is_busy() (sdr_read32(SDC_STS) & SDC_STS_SDCBUSY)
-#define sdc_is_cmd_busy() (sdr_read32(SDC_STS) & SDC_STS_CMDBUSY)
+#define sdc_is_busy() (readl(host->base + SDC_STS) & SDC_STS_SDCBUSY)
+#define sdc_is_cmd_busy() (readl(host->base + SDC_STS) & SDC_STS_CMDBUSY)
#define sdc_send_cmd(cmd, arg) \
do { \
- sdr_write32(SDC_ARG, (arg)); \
- sdr_write32(SDC_CMD, (cmd)); \
+ writel((arg), host->base + SDC_ARG); \
+ writel((cmd), host->base + SDC_CMD); \
} while (0)
-// can modify to read h/w register.
-//#define is_card_present(h) ((sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1);
-#define is_card_present(h) (((struct msdc_host *)(h))->card_inserted)
-
/* +++ by chhung */
#ifndef __ASSEMBLY__
#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
@@ -404,7 +372,6 @@ static void msdc_dump_io_resp(struct msdc_host *host, u32 resp)
static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
{
- void __iomem *base = host->base;
u32 timeout, clk_ns;
host->timeout_ns = ns;
@@ -416,7 +383,7 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
timeout = timeout > 1 ? timeout - 1 : 0;
timeout = timeout > 255 ? 255 : timeout;
- sdr_set_field(SDC_CFG, SDC_CFG_DTOC, timeout);
+ sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, timeout);
N_MSG(OPS, "Set read data timeout: %dns %dclks -> %d x 65536 cycles",
ns, clks, timeout + 1);
@@ -426,14 +393,13 @@ static void msdc_tasklet_card(struct work_struct *work)
{
struct msdc_host *host = (struct msdc_host *)container_of(work,
struct msdc_host, card_delaywork.work);
- void __iomem *base = host->base;
u32 inserted;
u32 status = 0;
//u32 change = 0;
spin_lock(&host->lock);
- status = sdr_read32(MSDC_PS);
+ status = readl(host->base + MSDC_PS);
if (cd_active_low)
inserted = (status & MSDC_PS_CDSTS) ? 0 : 1;
else
@@ -471,19 +437,18 @@ static u8 clk_src_bit[4] = {
static void msdc_select_clksrc(struct msdc_host *host, unsigned char clksrc)
{
u32 val;
- void __iomem *base = host->base;
BUG_ON(clksrc > 3);
INIT_MSG("set clock source to <%d>", clksrc);
- val = sdr_read32(MSDC_CLKSRC_REG);
- if (sdr_read32(MSDC_ECO_VER) >= 4) {
+ val = readl(host->base + MSDC_CLKSRC_REG);
+ if (readl(host->base + MSDC_ECO_VER) >= 4) {
val &= ~(0x3 << clk_src_bit[host->id]);
val |= clksrc << clk_src_bit[host->id];
} else {
val &= ~0x3; val |= clksrc;
}
- sdr_write32(MSDC_CLKSRC_REG, val);
+ writel(val, host->base + MSDC_CLKSRC_REG);
host->hclk = hclks[clksrc];
host->hw->clk_src = clksrc;
@@ -493,7 +458,6 @@ static void msdc_select_clksrc(struct msdc_host *host, unsigned char clksrc)
static void msdc_set_mclk(struct msdc_host *host, int ddr, unsigned int hz)
{
//struct msdc_hw *hw = host->hw;
- void __iomem *base = host->base;
u32 mode;
u32 flags;
u32 div;
@@ -534,11 +498,11 @@ static void msdc_set_mclk(struct msdc_host *host, int ddr, unsigned int hz)
}
/* set clock mode and divisor */
- sdr_set_field(MSDC_CFG, MSDC_CFG_CKMOD, mode);
- sdr_set_field(MSDC_CFG, MSDC_CFG_CKDIV, div);
+ sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, mode);
+ sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKDIV, div);
/* wait clock stable */
- while (!(sdr_read32(MSDC_CFG) & MSDC_CFG_CKSTB))
+ while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
host->sclk = sclk;
@@ -555,13 +519,12 @@ static void msdc_set_mclk(struct msdc_host *host, int ddr, unsigned int hz)
/* Fix me. when need to abort */
static void msdc_abort_data(struct msdc_host *host)
{
- void __iomem *base = host->base;
struct mmc_command *stop = host->mrq->stop;
ERR_MSG("Need to Abort.");
msdc_reset_hw(host);
- msdc_clr_fifo();
+ msdc_clr_fifo(host);
msdc_clr_int();
// need to check FIFO count 0 ?
@@ -580,7 +543,6 @@ static void msdc_abort_data(struct msdc_host *host)
static void msdc_pin_config(struct msdc_host *host, int mode)
{
struct msdc_hw *hw = host->hw;
- void __iomem *base = host->base;
int pull = (mode == MSDC_PIN_PULL_UP) ? GPIO_PULL_UP : GPIO_PULL_DOWN;
/* Config WP pin */
@@ -593,27 +555,27 @@ static void msdc_pin_config(struct msdc_host *host, int mode)
case MSDC_PIN_PULL_UP:
//sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 1); /* Check & FIXME */
//sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 0); /* Check & FIXME */
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 1);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 1);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 1);
+ sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 1);
+ sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0);
break;
case MSDC_PIN_PULL_DOWN:
//sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 0); /* Check & FIXME */
//sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 1); /* Check & FIXME */
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 1);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 1);
+ sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 1);
+ sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 1);
break;
case MSDC_PIN_PULL_NONE:
default:
//sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 0); /* Check & FIXME */
//sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 0); /* Check & FIXME */
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0);
break;
}
@@ -624,7 +586,6 @@ static void msdc_pin_config(struct msdc_host *host, int mode)
void msdc_pin_reset(struct msdc_host *host, int mode)
{
struct msdc_hw *hw = (struct msdc_hw *)host->hw;
- void __iomem *base = host->base;
int pull = (mode == MSDC_PIN_PULL_UP) ? GPIO_PULL_UP : GPIO_PULL_DOWN;
/* Config reset pin */
@@ -633,9 +594,9 @@ void msdc_pin_reset(struct msdc_host *host, int mode)
hw->config_gpio_pin(MSDC_RST_PIN, pull);
if (mode == MSDC_PIN_PULL_UP)
- sdr_clr_bits(EMMC_IOCON, EMMC_IOCON_BOOTRST);
+ sdr_clr_bits(host->base + EMMC_IOCON, EMMC_IOCON_BOOTRST);
else
- sdr_set_bits(EMMC_IOCON, EMMC_IOCON_BOOTRST);
+ sdr_set_bits(host->base + EMMC_IOCON, EMMC_IOCON_BOOTRST);
}
}
@@ -744,30 +705,12 @@ static void msdc_pm(pm_message_t state, void *data)
}
#endif
-/*--------------------------------------------------------------------------*/
-/* mmc_host_ops members */
-/*--------------------------------------------------------------------------*/
-static unsigned int msdc_command_start(struct msdc_host *host,
- struct mmc_command *cmd,
- int tune, /* not used */
- unsigned long timeout)
+static inline u32 msdc_cmd_find_resp(struct mmc_command *cmd)
{
- void __iomem *base = host->base;
u32 opcode = cmd->opcode;
- u32 rawcmd;
- u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
- MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
- MSDC_INT_ACMD19_DONE;
-
u32 resp;
- unsigned long tmo;
- /* Protocol layer does not provide response type, but our hardware needs
- * to know exact type, not just size!
- */
- if (opcode == MMC_SEND_OP_COND || opcode == SD_APP_OP_COND) {
- resp = RESP_R3;
- } else if (opcode == MMC_SET_RELATIVE_ADDR) {
+ if (opcode == MMC_SET_RELATIVE_ADDR) {
resp = (mmc_cmd_type(cmd) == MMC_CMD_BCR) ? RESP_R6 : RESP_R1;
} else if (opcode == MMC_FAST_IO) {
resp = RESP_R4;
@@ -800,6 +743,30 @@ static unsigned int msdc_command_start(struct msdc_host *host,
}
}
+ return resp;
+}
+
+/*--------------------------------------------------------------------------*/
+/* mmc_host_ops members */
+/*--------------------------------------------------------------------------*/
+static unsigned int msdc_command_start(struct msdc_host *host,
+ struct mmc_command *cmd,
+ unsigned long timeout)
+{
+ u32 opcode = cmd->opcode;
+ u32 rawcmd;
+ u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
+ MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
+ MSDC_INT_ACMD19_DONE;
+
+ u32 resp;
+ unsigned long tmo;
+
+ /* Protocol layer does not provide response type, but our hardware needs
+ * to know exact type, not just size!
+ */
+ resp = msdc_cmd_find_resp(cmd);
+
cmd->error = 0;
/* rawcmd :
* vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
@@ -870,7 +837,7 @@ static unsigned int msdc_command_start(struct msdc_host *host,
init_completion(&host->cmd_done);
- sdr_set_bits(MSDC_INTEN, wints);
+ sdr_set_bits(host->base + MSDC_INTEN, wints);
sdc_send_cmd(rawcmd, cmd->arg);
end:
@@ -883,19 +850,15 @@ static unsigned int msdc_command_resp(struct msdc_host *host,
unsigned long timeout)
__must_hold(&host->lock)
{
- void __iomem *base = host->base;
u32 opcode = cmd->opcode;
//u32 rawcmd;
- u32 resp;
u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
MSDC_INT_ACMD19_DONE;
- resp = host->cmd_rsp;
-
BUG_ON(in_interrupt());
//init_completion(&host->cmd_done);
- //sdr_set_bits(MSDC_INTEN, wints);
+ //sdr_set_bits(host->base + MSDC_INTEN, wints);
spin_unlock(&host->lock);
if (!wait_for_completion_timeout(&host->cmd_done, 10 * timeout)) {
@@ -905,7 +868,7 @@ static unsigned int msdc_command_resp(struct msdc_host *host,
}
spin_lock(&host->lock);
- sdr_clr_bits(MSDC_INTEN, wints);
+ sdr_clr_bits(host->base + MSDC_INTEN, wints);
host->cmd = NULL;
//end:
@@ -950,12 +913,13 @@ static unsigned int msdc_command_resp(struct msdc_host *host,
/* memory card CRC */
if (host->hw->flags & MSDC_REMOVABLE && cmd->error == -EIO) {
- if (sdr_read32(SDC_CMD) & 0x1800) { /* check if has data phase */
+ /* check if has data phase */
+ if (readl(host->base + SDC_CMD) & 0x1800) {
msdc_abort_data(host);
} else {
/* do basic: reset*/
msdc_reset_hw(host);
- msdc_clr_fifo();
+ msdc_clr_fifo(host);
msdc_clr_int();
}
cmd->error = msdc_tune_cmdrsp(host, cmd);
@@ -963,7 +927,7 @@ static unsigned int msdc_command_resp(struct msdc_host *host,
// check DAT0
/* if (resp == RESP_R1B) {
- while ((sdr_read32(MSDC_PS) & 0x10000) != 0x10000);
+ while ((readl(host->base + MSDC_PS) & 0x10000) != 0x10000);
} */
/* CMD12 Error Handle */
@@ -975,7 +939,7 @@ static unsigned int msdc_do_command(struct msdc_host *host,
int tune,
unsigned long timeout)
{
- if (msdc_command_start(host, cmd, tune, timeout))
+ if (msdc_command_start(host, cmd, timeout))
goto end;
if (msdc_command_resp(host, cmd, tune, timeout))
@@ -991,9 +955,7 @@ end:
// DMA resume / start / stop
static void msdc_dma_resume(struct msdc_host *host)
{
- void __iomem *base = host->base;
-
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_RESUME, 1);
+ sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_RESUME, 1);
N_MSG(DMA, "DMA resume");
}
@@ -1001,31 +963,29 @@ static void msdc_dma_resume(struct msdc_host *host)
static void msdc_dma_start(struct msdc_host *host)
{
- void __iomem *base = host->base;
u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR;
- sdr_set_bits(MSDC_INTEN, wints);
+ sdr_set_bits(host->base + MSDC_INTEN, wints);
//dsb(); /* --- by chhung */
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
+ sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
N_MSG(DMA, "DMA start");
}
static void msdc_dma_stop(struct msdc_host *host)
{
- void __iomem *base = host->base;
//u32 retries=500;
u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR;
- N_MSG(DMA, "DMA status: 0x%.8x", sdr_read32(MSDC_DMA_CFG));
- //while (sdr_read32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS);
+ N_MSG(DMA, "DMA status: 0x%.8x", readl(host->base + MSDC_DMA_CFG));
+ //while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS);
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1);
- while (sdr_read32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS)
+ sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1);
+ while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS)
;
//dsb(); /* --- by chhung */
- sdr_clr_bits(MSDC_INTEN, wints); /* Not just xfer_comp */
+ sdr_clr_bits(host->base + MSDC_INTEN, wints); /* Not just xfer_comp */
N_MSG(DMA, "DMA stop");
}
@@ -1040,97 +1000,54 @@ static u8 msdc_dma_calcs(u8 *buf, u32 len)
return 0xFF - (u8)sum;
}
-/* gpd bd setup + dma registers */
-static void msdc_dma_config(struct msdc_host *host, struct msdc_dma *dma)
+static void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
+ struct scatterlist *sg_cmd, unsigned int sglen)
{
- void __iomem *base = host->base;
- //u32 i, j, num, bdlen, arg, xfersz;
- u32 j, num;
struct scatterlist *sg;
struct gpd *gpd;
struct bd *bd;
+ u32 j;
- switch (dma->mode) {
- case MSDC_MODE_DMA_BASIC:
- BUG_ON(host->xfer_size > 65535);
- BUG_ON(dma->sglen != 1);
- sdr_write32(MSDC_DMA_SA, PHYSADDR(sg_dma_address(sg)));
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_LASTBUF, 1);
-//#if defined (CONFIG_RALINK_MT7620)
- if (ralink_soc == MT762X_SOC_MT7620A)
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_XFERSZ, sg_dma_len(sg));
-//#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628)
- else
- sdr_write32((void __iomem *)(RALINK_MSDC_BASE + 0xa8), sg_dma_len(sg));
-//#endif
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ,
- MSDC_BRUST_64B);
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 0);
- break;
- case MSDC_MODE_DMA_DESC:
-
- /* calculate the required number of gpd */
- num = (dma->sglen + MAX_BD_PER_GPD - 1) / MAX_BD_PER_GPD;
- BUG_ON(num != 1);
-
- gpd = dma->gpd;
- bd = dma->bd;
-
- /* modify gpd*/
- //gpd->intr = 0;
- gpd->hwo = 1; /* hw will clear it */
- gpd->bdp = 1;
- gpd->chksum = 0; /* need to clear first. */
- gpd->chksum = msdc_dma_calcs((u8 *)gpd, 16);
-
- /* modify bd*/
- for_each_sg(dma->sg, sg, dma->sglen, j) {
- bd[j].blkpad = 0;
- bd[j].dwpad = 0;
- bd[j].ptr = (void *)sg_dma_address(sg);
- bd[j].buflen = sg_dma_len(sg);
-
- if (j == dma->sglen - 1)
- bd[j].eol = 1; /* the last bd */
- else
- bd[j].eol = 0;
-
- bd[j].chksum = 0; /* checksume need to clear first */
- bd[j].chksum = msdc_dma_calcs((u8 *)(&bd[j]), 16);
- }
+ BUG_ON(sglen > MAX_BD_NUM); /* not support currently */
- sdr_set_field(MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1);
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ,
- MSDC_BRUST_64B);
- sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 1);
+ N_MSG(DMA, "DMA sglen<%d> xfersz<%d>", sglen, host->xfer_size);
- sdr_write32(MSDC_DMA_SA, PHYSADDR((u32)dma->gpd_addr));
- break;
-
- default:
- break;
- }
+ gpd = dma->gpd;
+ bd = dma->bd;
- N_MSG(DMA, "DMA_CTRL = 0x%x", sdr_read32(MSDC_DMA_CTRL));
- N_MSG(DMA, "DMA_CFG = 0x%x", sdr_read32(MSDC_DMA_CFG));
- N_MSG(DMA, "DMA_SA = 0x%x", sdr_read32(MSDC_DMA_SA));
+ /* modify gpd*/
+ //gpd->intr = 0;
+ gpd->hwo = 1; /* hw will clear it */
+ gpd->bdp = 1;
+ gpd->chksum = 0; /* need to clear first. */
+ gpd->chksum = msdc_dma_calcs((u8 *)gpd, 16);
-}
+ /* modify bd*/
+ for_each_sg(sg_cmd, sg, sglen, j) {
+ bd[j].blkpad = 0;
+ bd[j].dwpad = 0;
+ bd[j].ptr = (void *)sg_dma_address(sg);
+ bd[j].buflen = sg_dma_len(sg);
-static void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
- struct scatterlist *sg, unsigned int sglen)
-{
- BUG_ON(sglen > MAX_BD_NUM); /* not support currently */
+ if (j == sglen - 1)
+ bd[j].eol = 1; /* the last bd */
+ else
+ bd[j].eol = 0;
- dma->sg = sg;
- dma->sglen = sglen;
+ bd[j].chksum = 0; /* checksume need to clear first */
+ bd[j].chksum = msdc_dma_calcs((u8 *)(&bd[j]), 16);
+ }
- dma->mode = MSDC_MODE_DMA_DESC;
+ sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1);
+ sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ,
+ MSDC_BRUST_64B);
+ sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 1);
- N_MSG(DMA, "DMA mode<%d> sglen<%d> xfersz<%d>", dma->mode, dma->sglen,
- host->xfer_size);
+ writel(PHYSADDR((u32)dma->gpd_addr), host->base + MSDC_DMA_SA);
- msdc_dma_config(host, dma);
+ N_MSG(DMA, "DMA_CTRL = 0x%x", readl(host->base + MSDC_DMA_CTRL));
+ N_MSG(DMA, "DMA_CFG = 0x%x", readl(host->base + MSDC_DMA_CFG));
+ N_MSG(DMA, "DMA_SA = 0x%x", readl(host->base + MSDC_DMA_SA));
}
static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -1139,7 +1056,6 @@ static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct msdc_host *host = mmc_priv(mmc);
struct mmc_command *cmd;
struct mmc_data *data;
- void __iomem *base = host->base;
//u32 intsts = 0;
int read = 1, send_type = 0;
@@ -1182,14 +1098,14 @@ static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
}
- sdr_write32(SDC_BLK_NUM, data->blocks);
- //msdc_clr_fifo(); /* no need */
+ writel(data->blocks, host->base + SDC_BLK_NUM);
+ //msdc_clr_fifo(host); /* no need */
msdc_dma_on(); /* enable DMA mode first!! */
init_completion(&host->xfer_done);
/* start the command first*/
- if (msdc_command_start(host, cmd, 1, CMD_TIMEOUT) != 0)
+ if (msdc_command_start(host, cmd, CMD_TIMEOUT) != 0)
goto done;
data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg,
@@ -1210,14 +1126,18 @@ static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock(&host->lock);
if (!wait_for_completion_timeout(&host->xfer_done, DAT_TIMEOUT)) {
ERR_MSG("XXX CMD<%d> wait xfer_done<%d> timeout!!", cmd->opcode, data->blocks * data->blksz);
- ERR_MSG(" DMA_SA = 0x%x", sdr_read32(MSDC_DMA_SA));
- ERR_MSG(" DMA_CA = 0x%x", sdr_read32(MSDC_DMA_CA));
- ERR_MSG(" DMA_CTRL = 0x%x", sdr_read32(MSDC_DMA_CTRL));
- ERR_MSG(" DMA_CFG = 0x%x", sdr_read32(MSDC_DMA_CFG));
+ ERR_MSG(" DMA_SA = 0x%x",
+ readl(host->base + MSDC_DMA_SA));
+ ERR_MSG(" DMA_CA = 0x%x",
+ readl(host->base + MSDC_DMA_CA));
+ ERR_MSG(" DMA_CTRL = 0x%x",
+ readl(host->base + MSDC_DMA_CTRL));
+ ERR_MSG(" DMA_CFG = 0x%x",
+ readl(host->base + MSDC_DMA_CFG));
data->error = -ETIMEDOUT;
msdc_reset_hw(host);
- msdc_clr_fifo();
+ msdc_clr_fifo(host);
msdc_clr_int();
}
spin_lock(&host->lock);
@@ -1311,7 +1231,6 @@ static int msdc_app_cmd(struct mmc_host *mmc, struct msdc_host *host)
static int msdc_tune_cmdrsp(struct msdc_host *host, struct mmc_command *cmd)
{
int result = -1;
- void __iomem *base = host->base;
u32 rsmpl, cur_rsmpl, orig_rsmpl;
u32 rrdly, cur_rrdly = 0xffffffff, orig_rrdly;
u32 skip = 1;
@@ -1322,8 +1241,9 @@ static int msdc_tune_cmdrsp(struct msdc_host *host, struct mmc_command *cmd)
==========================*/
// save the previous tune result
- sdr_get_field(MSDC_IOCON, MSDC_IOCON_RSPL, &orig_rsmpl);
- sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, &orig_rrdly);
+ sdr_get_field(host->base + MSDC_IOCON, MSDC_IOCON_RSPL, &orig_rsmpl);
+ sdr_get_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY,
+ &orig_rrdly);
rrdly = 0;
do {
@@ -1334,7 +1254,8 @@ static int msdc_tune_cmdrsp(struct msdc_host *host, struct mmc_command *cmd)
skip = 0;
continue;
}
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL, cur_rsmpl);
+ sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_RSPL,
+ cur_rsmpl);
if (host->app_cmd) {
result = msdc_app_cmd(host->mmc, host);
@@ -1356,14 +1277,15 @@ static int msdc_tune_cmdrsp(struct msdc_host *host, struct mmc_command *cmd)
}
/* should be EIO */
- if (sdr_read32(SDC_CMD) & 0x1800) { /* check if has data phase */
+ /* check if has data phase */
+ if (readl(host->base + SDC_CMD) & 0x1800)
msdc_abort_data(host);
- }
}
/* Lv2: PAD_CMD_RESP_RXDLY[26:22] */
cur_rrdly = (orig_rrdly + rrdly + 1) % 32;
- sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, cur_rrdly);
+ sdr_set_field(host->base + MSDC_PAD_TUNE,
+ MSDC_PAD_TUNE_CMDRRDLY, cur_rrdly);
} while (++rrdly < 32);
return result;
@@ -1373,7 +1295,6 @@ static int msdc_tune_cmdrsp(struct msdc_host *host, struct mmc_command *cmd)
static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct msdc_host *host = mmc_priv(mmc);
- void __iomem *base = host->base;
u32 ddr = 0;
u32 dcrc = 0;
u32 rxdly, cur_rxdly0, cur_rxdly1;
@@ -1385,10 +1306,10 @@ static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq)
int result = -1;
u32 skip = 1;
- sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, &orig_dsmpl);
+ sdr_get_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL, &orig_dsmpl);
/* Tune Method 2. */
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
+ sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
rxdly = 0;
do {
@@ -1398,7 +1319,8 @@ static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq)
skip = 0;
continue;
}
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl);
+ sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL,
+ cur_dsmpl);
if (host->app_cmd) {
result = msdc_app_cmd(host->mmc, host);
@@ -1409,14 +1331,15 @@ static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq)
}
result = msdc_do_request(mmc, mrq);
- sdr_get_field(SDC_DCRC_STS,
+ sdr_get_field(host->base + SDC_DCRC_STS,
SDC_DCRC_STS_POS | SDC_DCRC_STS_NEG,
&dcrc); /* RO */
if (!ddr)
dcrc &= ~SDC_DCRC_STS_NEG;
ERR_MSG("TUNE_BREAD<%s> dcrc<0x%x> DATRDDLY0/1<0x%x><0x%x> dsmpl<0x%x>",
(result == 0 && dcrc == 0) ? "PASS" : "FAIL", dcrc,
- sdr_read32(MSDC_DAT_RDDLY0), sdr_read32(MSDC_DAT_RDDLY1), cur_dsmpl);
+ readl(host->base + MSDC_DAT_RDDLY0),
+ readl(host->base + MSDC_DAT_RDDLY1), cur_dsmpl);
/* Fix me: result is 0, but dcrc is still exist */
if (result == 0 && dcrc == 0) {
@@ -1432,11 +1355,11 @@ static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq)
}
}
- cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0);
- cur_rxdly1 = sdr_read32(MSDC_DAT_RDDLY1);
+ cur_rxdly0 = readl(host->base + MSDC_DAT_RDDLY0);
+ cur_rxdly1 = readl(host->base + MSDC_DAT_RDDLY1);
/* E1 ECO. YD: Reverse */
- if (sdr_read32(MSDC_ECO_VER) >= 4) {
+ if (readl(host->base + MSDC_ECO_VER) >= 4) {
orig_dat0 = (cur_rxdly0 >> 24) & 0x1F;
orig_dat1 = (cur_rxdly0 >> 16) & 0x1F;
orig_dat2 = (cur_rxdly0 >> 8) & 0x1F;
@@ -1475,8 +1398,8 @@ static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq)
cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0);
cur_rxdly1 = (cur_dat4 << 24) | (cur_dat5 << 16) | (cur_dat6 << 8) | (cur_dat7 << 0);
- sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0);
- sdr_write32(MSDC_DAT_RDDLY1, cur_rxdly1);
+ writel(cur_rxdly0, host->base + MSDC_DAT_RDDLY0);
+ writel(cur_rxdly1, host->base + MSDC_DAT_RDDLY1);
} while (++rxdly < 32);
@@ -1487,7 +1410,6 @@ done:
static int msdc_tune_bwrite(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct msdc_host *host = mmc_priv(mmc);
- void __iomem *base = host->base;
u32 wrrdly, cur_wrrdly = 0xffffffff, orig_wrrdly;
u32 dsmpl, cur_dsmpl, orig_dsmpl;
@@ -1499,15 +1421,16 @@ static int msdc_tune_bwrite(struct mmc_host *mmc, struct mmc_request *mrq)
// MSDC_IOCON_DDR50CKD need to check. [Fix me]
- sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, &orig_wrrdly);
- sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, &orig_dsmpl);
+ sdr_get_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY,
+ &orig_wrrdly);
+ sdr_get_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL, &orig_dsmpl);
/* Tune Method 2. just DAT0 */
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
- cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0);
+ sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
+ cur_rxdly0 = readl(host->base + MSDC_DAT_RDDLY0);
/* E1 ECO. YD: Reverse */
- if (sdr_read32(MSDC_ECO_VER) >= 4) {
+ if (readl(host->base + MSDC_ECO_VER) >= 4) {
orig_dat0 = (cur_rxdly0 >> 24) & 0x1F;
orig_dat1 = (cur_rxdly0 >> 16) & 0x1F;
orig_dat2 = (cur_rxdly0 >> 8) & 0x1F;
@@ -1529,7 +1452,8 @@ static int msdc_tune_bwrite(struct mmc_host *mmc, struct mmc_request *mrq)
skip = 0;
continue;
}
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl);
+ sdr_set_field(host->base + MSDC_IOCON,
+ MSDC_IOCON_DSPL, cur_dsmpl);
if (host->app_cmd) {
result = msdc_app_cmd(host->mmc, host);
@@ -1556,7 +1480,8 @@ static int msdc_tune_bwrite(struct mmc_host *mmc, struct mmc_request *mrq)
}
}
cur_wrrdly = (orig_wrrdly + wrrdly + 1) % 32;
- sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, cur_wrrdly);
+ sdr_set_field(host->base + MSDC_PAD_TUNE,
+ MSDC_PAD_TUNE_DATWRDLY, cur_wrrdly);
} while (++wrrdly < 32);
cur_dat0 = (orig_dat0 + rxdly) % 32; /* only adjust bit-1 for crc */
@@ -1565,7 +1490,7 @@ static int msdc_tune_bwrite(struct mmc_host *mmc, struct mmc_request *mrq)
cur_dat3 = orig_dat3;
cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0);
- sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0);
+ writel(cur_rxdly0, host->base + MSDC_DAT_RDDLY0);
} while (++rxdly < 32);
done:
@@ -1620,12 +1545,10 @@ static int msdc_check_busy(struct mmc_host *mmc, struct msdc_host *host)
static int msdc_tune_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct msdc_host *host = mmc_priv(mmc);
- struct mmc_command *cmd;
struct mmc_data *data;
//u32 base = host->base;
int ret = 0, read;
- cmd = mrq->cmd;
data = mrq->cmd->data;
read = data->flags & MMC_DATA_READ ? 1 : 0;
@@ -1717,8 +1640,7 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
/* called by ops.set_ios */
static void msdc_set_buswidth(struct msdc_host *host, u32 width)
{
- void __iomem *base = host->base;
- u32 val = sdr_read32(SDC_CFG);
+ u32 val = readl(host->base + SDC_CFG);
val &= ~SDC_CFG_BUSWIDTH;
@@ -1736,7 +1658,7 @@ static void msdc_set_buswidth(struct msdc_host *host, u32 width)
break;
}
- sdr_write32(SDC_CFG, val);
+ writel(val, host->base + SDC_CFG);
N_MSG(CFG, "Bus Width = %d", width);
}
@@ -1745,7 +1667,6 @@ static void msdc_set_buswidth(struct msdc_host *host, u32 width)
static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
- void __iomem *base = host->base;
u32 ddr = 0;
#ifdef MT6575_SD_DEBUG
@@ -1791,18 +1712,23 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->clock > 25000000) {
//if (!(host->hw->flags & MSDC_REMOVABLE)) {
INIT_MSG("SD data latch edge<%d>", MSDC_SMPL_FALLING);
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL,
+ sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_RSPL,
MSDC_SMPL_FALLING);
- sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL,
+ sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL,
MSDC_SMPL_FALLING);
//} /* for tuning debug */
} else { /* default value */
- sdr_write32(MSDC_IOCON, 0x00000000);
- // sdr_write32(MSDC_DAT_RDDLY0, 0x00000000);
- sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward
- sdr_write32(MSDC_DAT_RDDLY1, 0x00000000);
- // sdr_write32(MSDC_PAD_TUNE, 0x00000000);
- sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward
+ writel(0x00000000, host->base + MSDC_IOCON);
+ // writel(0x00000000, host->base + MSDC_DAT_RDDLY0);
+
+ // for MT7620 E2 and afterward
+ writel(0x10101010, host->base + MSDC_DAT_RDDLY0);
+
+ writel(0x00000000, host->base + MSDC_DAT_RDDLY1);
+ // writel(0x00000000, host->base + MSDC_PAD_TUNE);
+
+ // for MT7620 E2 and afterward
+ writel(0x84101010, host->base + MSDC_PAD_TUNE);
}
msdc_set_mclk(host, ddr, ios->clock);
}
@@ -1812,13 +1738,12 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static int msdc_ops_get_ro(struct mmc_host *mmc)
{
struct msdc_host *host = mmc_priv(mmc);
- void __iomem *base = host->base;
unsigned long flags;
int ro = 0;
if (host->hw->flags & MSDC_WP_PIN_EN) { /* set for card */
spin_lock_irqsave(&host->lock, flags);
- ro = (sdr_read32(MSDC_PS) >> 31);
+ ro = (readl(host->base + MSDC_PS) >> 31);
spin_unlock_irqrestore(&host->lock, flags);
}
return ro;
@@ -1828,7 +1753,6 @@ static int msdc_ops_get_ro(struct mmc_host *mmc)
static int msdc_ops_get_cd(struct mmc_host *mmc)
{
struct msdc_host *host = mmc_priv(mmc);
- void __iomem *base = host->base;
unsigned long flags;
int present = 1;
@@ -1852,10 +1776,11 @@ static int msdc_ops_get_cd(struct mmc_host *mmc)
present = host->card_inserted; /* why not read from H/W: Fix me*/
#else
// CD
+ present = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS;
if (cd_active_low)
- present = (sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1;
+ present = present ? 0 : 1;
else
- present = (sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 1 : 0;
+ present = present ? 1 : 0;
host->card_inserted = present;
#endif
spin_unlock_irqrestore(&host->lock, flags);
@@ -1882,17 +1807,16 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
struct msdc_host *host = (struct msdc_host *)dev_id;
struct mmc_data *data = host->data;
struct mmc_command *cmd = host->cmd;
- void __iomem *base = host->base;
u32 cmdsts = MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | MSDC_INT_CMDRDY |
MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | MSDC_INT_ACMDRDY |
MSDC_INT_ACMD19_DONE;
u32 datsts = MSDC_INT_DATCRCERR | MSDC_INT_DATTMO;
- u32 intsts = sdr_read32(MSDC_INT);
- u32 inten = sdr_read32(MSDC_INTEN); inten &= intsts;
+ u32 intsts = readl(host->base + MSDC_INT);
+ u32 inten = readl(host->base + MSDC_INTEN); inten &= intsts;
- sdr_write32(MSDC_INT, intsts); /* clear interrupts */
+ writel(intsts, host->base + MSDC_INT); /* clear interrupts */
/* MSG will cause fatal error */
/* card change interrupt */
@@ -1920,18 +1844,18 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
if (intsts & datsts) {
/* do basic reset, or stop command will sdc_busy */
msdc_reset_hw(host);
- msdc_clr_fifo();
+ msdc_clr_fifo(host);
msdc_clr_int();
if (intsts & MSDC_INT_DATTMO) {
IRQ_MSG("XXX CMD<%d> MSDC_INT_DATTMO", host->mrq->cmd->opcode);
data->error = -ETIMEDOUT;
} else if (intsts & MSDC_INT_DATCRCERR) {
- IRQ_MSG("XXX CMD<%d> MSDC_INT_DATCRCERR, SDC_DCRC_STS<0x%x>", host->mrq->cmd->opcode, sdr_read32(SDC_DCRC_STS));
+ IRQ_MSG("XXX CMD<%d> MSDC_INT_DATCRCERR, SDC_DCRC_STS<0x%x>", host->mrq->cmd->opcode, readl(host->base + SDC_DCRC_STS));
data->error = -EIO;
}
- //if(sdr_read32(MSDC_INTEN) & MSDC_INT_XFER_COMPL) {
+ //if(readl(MSDC_INTEN) & MSDC_INT_XFER_COMPL) {
complete(&host->xfer_done); /* Read CRC come fast, XFER_COMPL not enabled */
}
}
@@ -1946,14 +1870,16 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
case RESP_NONE:
break;
case RESP_R2:
- *rsp++ = sdr_read32(SDC_RESP3); *rsp++ = sdr_read32(SDC_RESP2);
- *rsp++ = sdr_read32(SDC_RESP1); *rsp++ = sdr_read32(SDC_RESP0);
+ *rsp++ = readl(host->base + SDC_RESP3);
+ *rsp++ = readl(host->base + SDC_RESP2);
+ *rsp++ = readl(host->base + SDC_RESP1);
+ *rsp++ = readl(host->base + SDC_RESP0);
break;
default: /* Response types 1, 3, 4, 5, 6, 7(1b) */
if ((intsts & MSDC_INT_ACMDRDY) || (intsts & MSDC_INT_ACMD19_DONE))
- *rsp = sdr_read32(SDC_ACMD_RESP);
+ *rsp = readl(host->base + SDC_ACMD_RESP);
else
- *rsp = sdr_read32(SDC_RESP0);
+ *rsp = readl(host->base + SDC_RESP0);
break;
}
} else if ((intsts & MSDC_INT_RSPCRCERR) || (intsts & MSDC_INT_ACMDCRCERR)) {
@@ -1969,7 +1895,7 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
IRQ_MSG("XXX CMD<%d> MSDC_INT_CMDTMO", cmd->opcode);
cmd->error = -ETIMEDOUT;
msdc_reset_hw(host);
- msdc_clr_fifo();
+ msdc_clr_fifo(host);
msdc_clr_int();
}
complete(&host->cmd_done);
@@ -1977,7 +1903,8 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
/* mmc irq interrupts */
if (intsts & MSDC_INT_MMCIRQ)
- printk(KERN_INFO "msdc[%d] MMCIRQ: SDC_CSTS=0x%.8x\r\n", host->id, sdr_read32(SDC_CSTS));
+ printk(KERN_INFO "msdc[%d] MMCIRQ: SDC_CSTS=0x%.8x\r\n",
+ host->id, readl(host->base + SDC_CSTS));
#ifdef MT6575_SD_DEBUG
{
@@ -2017,7 +1944,6 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
static void msdc_enable_cd_irq(struct msdc_host *host, int enable)
{
struct msdc_hw *hw = host->hw;
- void __iomem *base = host->base;
/* for sdio, not set */
if ((hw->flags & MSDC_CD_PIN_EN) == 0) {
@@ -2026,9 +1952,9 @@ static void msdc_enable_cd_irq(struct msdc_host *host, int enable)
if (hw->config_gpio_pin)
hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN);
*/
- sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN);
- sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC);
- sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP);
+ sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
+ sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
+ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
return;
}
@@ -2044,17 +1970,20 @@ static void msdc_enable_cd_irq(struct msdc_host *host, int enable)
if (hw->config_gpio_pin) /* NULL */
hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_UP);
- sdr_set_field(MSDC_PS, MSDC_PS_CDDEBOUNCE, DEFAULT_DEBOUNCE);
- sdr_set_bits(MSDC_PS, MSDC_PS_CDEN);
- sdr_set_bits(MSDC_INTEN, MSDC_INTEN_CDSC);
- sdr_set_bits(SDC_CFG, SDC_CFG_INSWKUP); /* not in document! Fix me */
+ sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE,
+ DEFAULT_DEBOUNCE);
+ sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
+ sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
+
+ /* not in document! Fix me */
+ sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
} else {
if (hw->config_gpio_pin) /* NULL */
hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN);
- sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP);
- sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN);
- sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC);
+ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
+ sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
+ sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
/* Here decreases a reference count to core power since card
* detection circuit is shutdown.
@@ -2066,7 +1995,6 @@ static void msdc_enable_cd_irq(struct msdc_host *host, int enable)
/* called by msdc_drv_probe */
static void msdc_init_hw(struct msdc_host *host)
{
- void __iomem *base = host->base;
/* Power on */
#if 0 /* --- by chhung */
@@ -2077,41 +2005,51 @@ static void msdc_init_hw(struct msdc_host *host)
msdc_vdd_on(host);
#endif /* end of --- */
/* Configure to MMC/SD mode */
- sdr_set_field(MSDC_CFG, MSDC_CFG_MODE, MSDC_SDMMC);
+ sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_MODE, MSDC_SDMMC);
/* Reset */
msdc_reset_hw(host);
- msdc_clr_fifo();
+ msdc_clr_fifo(host);
/* Disable card detection */
- sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN);
+ sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
/* Disable and clear all interrupts */
- sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN));
- sdr_write32(MSDC_INT, sdr_read32(MSDC_INT));
+ sdr_clr_bits(host->base + MSDC_INTEN, readl(host->base + MSDC_INTEN));
+ writel(readl(host->base + MSDC_INT), host->base + MSDC_INT);
#if 1
/* reset tuning parameter */
- sdr_write32(MSDC_PAD_CTL0, 0x00090000);
- sdr_write32(MSDC_PAD_CTL1, 0x000A0000);
- sdr_write32(MSDC_PAD_CTL2, 0x000A0000);
- // sdr_write32(MSDC_PAD_TUNE, 0x00000000);
- sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward
- // sdr_write32(MSDC_DAT_RDDLY0, 0x00000000);
- sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward
- sdr_write32(MSDC_DAT_RDDLY1, 0x00000000);
- sdr_write32(MSDC_IOCON, 0x00000000);
+ writel(0x00090000, host->base + MSDC_PAD_CTL0);
+ writel(0x000A0000, host->base + MSDC_PAD_CTL1);
+ writel(0x000A0000, host->base + MSDC_PAD_CTL2);
+ // writel( 0x00000000, host->base + MSDC_PAD_TUNE);
+
+ // for MT7620 E2 and afterward
+ writel(0x84101010, host->base + MSDC_PAD_TUNE);
+
+ // writel(0x00000000, host->base + MSDC_DAT_RDDLY0);
+
+ // for MT7620 E2 and afterward
+ writel(0x10101010, host->base + MSDC_DAT_RDDLY0);
+
+ writel(0x00000000, host->base + MSDC_DAT_RDDLY1);
+ writel(0x00000000, host->base + MSDC_IOCON);
#if 0 // use MT7620 default value: 0x403c004f
- sdr_write32(MSDC_PATCH_BIT0, 0x003C000F); /* bit0 modified: Rx Data Clock Source: 1 -> 2.0*/
+ /* bit0 modified: Rx Data Clock Source: 1 -> 2.0*/
+ writel(0x003C000F, host->base + MSDC_PATCH_BIT0);
#endif
- if (sdr_read32(MSDC_ECO_VER) >= 4) {
+ if (readl(host->base + MSDC_ECO_VER) >= 4) {
if (host->id == 1) {
- sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_WRDAT_CRCS, 1);
- sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMD_RSP, 1);
+ sdr_set_field(host->base + MSDC_PATCH_BIT1,
+ MSDC_PATCH_BIT1_WRDAT_CRCS, 1);
+ sdr_set_field(host->base + MSDC_PATCH_BIT1,
+ MSDC_PATCH_BIT1_CMD_RSP, 1);
/* internal clock: latch read data */
- sdr_set_bits(MSDC_PATCH_BIT0, MSDC_PATCH_BIT_CKGEN_CK);
+ sdr_set_bits(host->base + MSDC_PATCH_BIT0,
+ MSDC_PATCH_BIT_CKGEN_CK);
}
}
#endif
@@ -2120,40 +2058,40 @@ static void msdc_init_hw(struct msdc_host *host)
pre-loader,uboot,kernel drivers. and SDC_CFG.SDIO_INT_DET_EN will be only
set when kernel driver wants to use SDIO bus interrupt */
/* Configure to enable SDIO mode. it's must otherwise sdio cmd5 failed */
- sdr_set_bits(SDC_CFG, SDC_CFG_SDIO);
+ sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
/* disable detect SDIO device interupt function */
- sdr_clr_bits(SDC_CFG, SDC_CFG_SDIOIDE);
+ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
/* eneable SMT for glitch filter */
- sdr_set_bits(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKSMT);
- sdr_set_bits(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDSMT);
- sdr_set_bits(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATSMT);
+ sdr_set_bits(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKSMT);
+ sdr_set_bits(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDSMT);
+ sdr_set_bits(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATSMT);
#if 1
/* set clk, cmd, dat pad driving */
- sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 4);
- sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 4);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 4);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 4);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 4);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 4);
+ sdr_set_field(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 4);
+ sdr_set_field(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 4);
+ sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 4);
+ sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 4);
+ sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 4);
+ sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 4);
#else
- sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 0);
- sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 0);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 0);
- sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 0);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 0);
- sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 0);
+ sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 0);
#endif
/* set sampling edge */
/* write crc timeout detection */
- sdr_set_field(MSDC_PATCH_BIT0, 1 << 30, 1);
+ sdr_set_field(host->base + MSDC_PATCH_BIT0, 1 << 30, 1);
/* Configure to default data timeout */
- sdr_set_field(SDC_CFG, SDC_CFG_DTOC, DEFAULT_DTOC);
+ sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, DEFAULT_DTOC);
msdc_set_buswidth(host, MMC_BUS_WIDTH_1);
@@ -2163,11 +2101,9 @@ static void msdc_init_hw(struct msdc_host *host)
/* called by msdc_drv_remove */
static void msdc_deinit_hw(struct msdc_host *host)
{
- void __iomem *base = host->base;
-
/* Disable and clear all interrupts */
- sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN));
- sdr_write32(MSDC_INT, sdr_read32(MSDC_INT));
+ sdr_clr_bits(host->base + MSDC_INTEN, readl(host->base + MSDC_INTEN));
+ writel(readl(host->base + MSDC_INT), host->base + MSDC_INT);
/* Disable card detection */
msdc_enable_cd_irq(host, 0);
@@ -2429,8 +2365,8 @@ static int __init mt_msdc_init(void)
// Set the pins for sdxc to sdxc mode
//FIXME: this should be done by pinctl and not by the sd driver
- reg = sdr_read32((void __iomem *)(RALINK_SYSCTL_BASE + 0x60)) & ~(0x3 << 18);
- sdr_write32((void __iomem *)(RALINK_SYSCTL_BASE + 0x60), reg);
+ reg = readl((void __iomem *)(RALINK_SYSCTL_BASE + 0x60)) & ~(0x3 << 18);
+ writel(reg, (void __iomem *)(RALINK_SYSCTL_BASE + 0x60));
ret = platform_driver_register(&mt_msdc_driver);
if (ret) {
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
new file mode 100644
index 000000000000..d33533872a16
--- /dev/null
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -0,0 +1,7 @@
+config PCI_MT7621
+ tristate "MediaTek MT7621 PCI Controller"
+ depends on RALINK
+ select PCI_DRIVERS_GENERIC
+ help
+ This selects a driver for the MediaTek MT7621 PCI Controller.
+
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index 17f2105ec698..a49e2795af6b 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/**************************************************************************
*
* BRIEF MODULE DESCRIPTION
@@ -39,431 +40,290 @@
**************************************************************************
*/
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/version.h>
-#include <asm/pci.h>
-#include <asm/io.h>
-#include <asm/mips-cm.h>
-#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
-
+#include <linux/reset.h>
+#include <mt7621.h>
#include <ralink_regs.h>
+#include "../../pci/pci.h"
+
/*
* These functions and structures provide the BIOS scan and mapping of the PCI
* devices.
*/
-#define RALINK_PCIE0_CLK_EN (1<<24)
-#define RALINK_PCIE1_CLK_EN (1<<25)
-#define RALINK_PCIE2_CLK_EN (1<<26)
+#define RALINK_PCIE0_CLK_EN BIT(24)
+#define RALINK_PCIE1_CLK_EN BIT(25)
+#define RALINK_PCIE2_CLK_EN BIT(26)
#define RALINK_PCI_CONFIG_ADDR 0x20
-#define RALINK_PCI_CONFIG_DATA_VIRTUAL_REG 0x24
-#define RALINK_PCI_MEMBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x0028)
-#define RALINK_PCI_IOBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x002C)
-#define RALINK_PCIE0_RST (1<<24)
-#define RALINK_PCIE1_RST (1<<25)
-#define RALINK_PCIE2_RST (1<<26)
-#define RALINK_SYSCTL_BASE 0xBE000000
-
-#define RALINK_PCI_PCICFG_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0000)
-#define RALINK_PCI_PCIMSK_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x000C)
-#define RALINK_PCI_BASE 0xBE140000
-
-#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000)
+#define RALINK_PCI_CONFIG_DATA 0x24
+#define RALINK_PCI_MEMBASE 0x28
+#define RALINK_PCI_IOBASE 0x2C
+#define RALINK_PCIE0_RST BIT(24)
+#define RALINK_PCIE1_RST BIT(25)
+#define RALINK_PCIE2_RST BIT(26)
+
+#define RALINK_PCI_PCICFG_ADDR 0x0000
+#define RALINK_PCI_PCIMSK_ADDR 0x000C
+
#define RT6855_PCIE0_OFFSET 0x2000
#define RT6855_PCIE1_OFFSET 0x3000
#define RT6855_PCIE2_OFFSET 0x4000
-#define RALINK_PCI0_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0010)
-#define RALINK_PCI0_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0018)
-#define RALINK_PCI0_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0030)
-#define RALINK_PCI0_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0034)
-#define RALINK_PCI0_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0038)
-#define RALINK_PCI0_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0050)
-#define RALINK_PCI0_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0060)
-#define RALINK_PCI0_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0064)
-
-#define RALINK_PCI1_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0010)
-#define RALINK_PCI1_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0018)
-#define RALINK_PCI1_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0030)
-#define RALINK_PCI1_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0034)
-#define RALINK_PCI1_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0038)
-#define RALINK_PCI1_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0050)
-#define RALINK_PCI1_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0060)
-#define RALINK_PCI1_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0064)
-
-#define RALINK_PCI2_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0010)
-#define RALINK_PCI2_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0018)
-#define RALINK_PCI2_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0030)
-#define RALINK_PCI2_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0034)
-#define RALINK_PCI2_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0038)
-#define RALINK_PCI2_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0050)
-#define RALINK_PCI2_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0060)
-#define RALINK_PCI2_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0064)
-
-#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000)
-#define RALINK_PCIEPHY_P2_CTL_OFFSET (RALINK_PCI_BASE + 0xA000)
-
-#define MV_WRITE(ofs, data) \
- *(volatile u32 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le32(data)
-#define MV_READ(ofs, data) \
- *(data) = le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs)))
-#define MV_READ_DATA(ofs) \
- le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs)))
-
-#define MV_WRITE_16(ofs, data) \
- *(volatile u16 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le16(data)
-#define MV_READ_16(ofs, data) \
- *(data) = le16_to_cpu(*(volatile u16 *)(RALINK_PCI_BASE+(ofs)))
-
-#define MV_WRITE_8(ofs, data) \
- *(volatile u8 *)(RALINK_PCI_BASE+(ofs)) = data
-#define MV_READ_8(ofs, data) \
- *(data) = *(volatile u8 *)(RALINK_PCI_BASE+(ofs))
+#define RALINK_PCI_BAR0SETUP_ADDR 0x0010
+#define RALINK_PCI_IMBASEBAR0_ADDR 0x0018
+#define RALINK_PCI_ID 0x0030
+#define RALINK_PCI_CLASS 0x0034
+#define RALINK_PCI_SUBID 0x0038
+#define RALINK_PCI_STATUS 0x0050
+
+#define RALINK_PCIEPHY_P0P1_CTL_OFFSET 0x9000
+#define RALINK_PCIEPHY_P2_CTL_OFFSET 0xA000
#define RALINK_PCI_MM_MAP_BASE 0x60000000
#define RALINK_PCI_IO_MAP_BASE 0x1e160000
-#define RALINK_SYSTEM_CONTROL_BASE 0xbe000000
-
#define ASSERT_SYSRST_PCIE(val) \
do { \
- if (*(unsigned int *)(0xbe00000c) == 0x00030101) \
- RALINK_RSTCTRL |= val; \
+ if (rt_sysc_r32(SYSC_REG_CHIP_REV) == 0x00030101) \
+ rt_sysc_m32(0, val, RALINK_RSTCTRL); \
else \
- RALINK_RSTCTRL &= ~val; \
- } while(0)
+ rt_sysc_m32(val, 0, RALINK_RSTCTRL); \
+ } while (0)
#define DEASSERT_SYSRST_PCIE(val) \
do { \
- if (*(unsigned int *)(0xbe00000c) == 0x00030101) \
- RALINK_RSTCTRL &= ~val; \
+ if (rt_sysc_r32(SYSC_REG_CHIP_REV) == 0x00030101) \
+ rt_sysc_m32(val, 0, RALINK_RSTCTRL); \
else \
- RALINK_RSTCTRL |= val; \
- } while(0)
-#define RALINK_SYSCFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x14)
-#define RALINK_CLKCFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x30)
-#define RALINK_RSTCTRL *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x34)
-#define RALINK_GPIOMODE *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x60)
-#define RALINK_PCIE_CLK_GEN *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x7c)
-#define RALINK_PCIE_CLK_GEN1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x80)
-#define PPLL_CFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x9c)
-#define PPLL_DRV *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0xa0)
-//RALINK_SYSCFG1 bit
-#define RALINK_PCI_HOST_MODE_EN (1<<7)
-#define RALINK_PCIE_RC_MODE_EN (1<<8)
+ rt_sysc_m32(0, val, RALINK_RSTCTRL); \
+ } while (0)
+
+#define RALINK_CLKCFG1 0x30
+#define RALINK_RSTCTRL 0x34
+#define RALINK_GPIOMODE 0x60
+#define RALINK_PCIE_CLK_GEN 0x7c
+#define RALINK_PCIE_CLK_GEN1 0x80
//RALINK_RSTCTRL bit
-#define RALINK_PCIE_RST (1<<23)
-#define RALINK_PCI_RST (1<<24)
+#define RALINK_PCIE_RST BIT(23)
+#define RALINK_PCI_RST BIT(24)
//RALINK_CLKCFG1 bit
-#define RALINK_PCI_CLK_EN (1<<19)
-#define RALINK_PCIE_CLK_EN (1<<21)
-//RALINK_GPIOMODE bit
-#define PCI_SLOTx2 (1<<11)
-#define PCI_SLOTx1 (2<<11)
-//MTK PCIE PLL bit
-#define PDRV_SW_SET (1<<31)
-#define LC_CKDRVPD_ (1<<19)
+#define RALINK_PCI_CLK_EN BIT(19)
+#define RALINK_PCIE_CLK_EN BIT(21)
#define MEMORY_BASE 0x0
static int pcie_link_status = 0;
-#define PCI_ACCESS_READ_1 0
-#define PCI_ACCESS_READ_2 1
-#define PCI_ACCESS_READ_4 2
-#define PCI_ACCESS_WRITE_1 3
-#define PCI_ACCESS_WRITE_2 4
-#define PCI_ACCESS_WRITE_4 5
-
-static int config_access(unsigned char access_type, struct pci_bus *bus,
- unsigned int devfn, unsigned int where, u32 * data)
-{
- unsigned int slot = PCI_SLOT(devfn);
- u8 func = PCI_FUNC(devfn);
- uint32_t address_reg, data_reg;
- unsigned int address;
-
- address_reg = RALINK_PCI_CONFIG_ADDR;
- data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG;
-
- address = (((where&0xF00)>>8)<<24) |(bus->number << 16) | (slot << 11) |
- (func << 8) | (where & 0xfc) | 0x80000000;
- MV_WRITE(address_reg, address);
-
- switch(access_type) {
- case PCI_ACCESS_WRITE_1:
- MV_WRITE_8(data_reg+(where&0x3), *data);
- break;
- case PCI_ACCESS_WRITE_2:
- MV_WRITE_16(data_reg+(where&0x3), *data);
- break;
- case PCI_ACCESS_WRITE_4:
- MV_WRITE(data_reg, *data);
- break;
- case PCI_ACCESS_READ_1:
- MV_READ_8( data_reg+(where&0x3), data);
- break;
- case PCI_ACCESS_READ_2:
- MV_READ_16(data_reg+(where&0x3), data);
- break;
- case PCI_ACCESS_READ_4:
- MV_READ(data_reg, data);
- break;
- default:
- printk("no specify access type\n");
- break;
- }
- return 0;
-}
-
-static int
-read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 * val)
-{
- return config_access(PCI_ACCESS_READ_1, bus, devfn, (unsigned int)where, (u32 *)val);
-}
+/**
+ * struct mt7621_pcie_port - PCIe port information
+ * @base: IO mapped register base
+ * @list: port list
+ * @pcie: pointer to PCIe host info
+ * @reset: pointer to port reset control
+ */
+struct mt7621_pcie_port {
+ void __iomem *base;
+ struct list_head list;
+ struct mt7621_pcie *pcie;
+ struct reset_control *reset;
+};
-static int
-read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 * val)
-{
- return config_access(PCI_ACCESS_READ_2, bus, devfn, (unsigned int)where, (u32 *)val);
-}
+/**
+ * struct mt7621_pcie - PCIe host information
+ * @base: IO Mapped Register Base
+ * @io: IO resource
+ * @mem: non-prefetchable memory resource
+ * @busn: bus range
+ * @offset: IO / Memory offset
+ * @dev: Pointer to PCIe device
+ * @ports: pointer to PCIe port information
+ */
+struct mt7621_pcie {
+ void __iomem *base;
+ struct device *dev;
+ struct resource io;
+ struct resource mem;
+ struct resource busn;
+ struct {
+ resource_size_t mem;
+ resource_size_t io;
+ } offset;
+ struct list_head ports;
+};
-static int
-read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 * val)
+static inline u32 pcie_read(struct mt7621_pcie *pcie, u32 reg)
{
- return config_access(PCI_ACCESS_READ_4, bus, devfn, (unsigned int)where, (u32 *)val);
+ return readl(pcie->base + reg);
}
-static int
-write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val)
+static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg)
{
- if (config_access(PCI_ACCESS_WRITE_1, bus, devfn, (unsigned int)where, (u32 *)&val))
- return -1;
-
- return PCIBIOS_SUCCESSFUL;
+ writel(val, pcie->base + reg);
}
-static int
-write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 val)
+static inline u32 mt7621_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
+ unsigned int func, unsigned int where)
{
- if (config_access(PCI_ACCESS_WRITE_2, bus, devfn, where, (u32 *)&val))
- return -1;
-
- return PCIBIOS_SUCCESSFUL;
+ return (((where & 0xF00) >> 8) << 24) | (bus << 16) | (slot << 11) |
+ (func << 8) | (where & 0xfc) | 0x80000000;
}
-static int
-write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val)
+static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
- if (config_access(PCI_ACCESS_WRITE_4, bus, devfn, where, &val))
- return -1;
-
- return PCIBIOS_SUCCESSFUL;
-}
+ struct mt7621_pcie *pcie = bus->sysdata;
+ u32 address = mt7621_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
-static int
-pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val)
-{
- switch (size) {
- case 1:
- return read_config_byte(bus, devfn, where, (u8 *) val);
- case 2:
- return read_config_word(bus, devfn, where, (u16 *) val);
- default:
- return read_config_dword(bus, devfn, where, val);
- }
-}
+ writel(address, pcie->base + RALINK_PCI_CONFIG_ADDR);
-static int
-pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
-{
- switch (size) {
- case 1:
- return write_config_byte(bus, devfn, where, (u8) val);
- case 2:
- return write_config_word(bus, devfn, where, (u16) val);
- default:
- return write_config_dword(bus, devfn, where, val);
- }
+ return pcie->base + RALINK_PCI_CONFIG_DATA + (where & 3);
}
-struct pci_ops mt7621_pci_ops= {
- .read = pci_config_read,
- .write = pci_config_write,
-};
-
-static struct resource mt7621_res_pci_mem1;
-static struct resource mt7621_res_pci_io1;
-static struct pci_controller mt7621_controller = {
- .pci_ops = &mt7621_pci_ops,
- .mem_resource = &mt7621_res_pci_mem1,
- .io_resource = &mt7621_res_pci_io1,
+struct pci_ops mt7621_pci_ops = {
+ .map_bus = mt7621_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
-static void
-read_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long *val)
+static u32
+read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
{
- unsigned int address_reg, data_reg, address;
-
- address_reg = RALINK_PCI_CONFIG_ADDR;
- data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG;
- address = (((reg & 0xF00)>>8)<<24) | (bus << 16) | (dev << 11) | (func << 8) | (reg & 0xfc) | 0x80000000 ;
- MV_WRITE(address_reg, address);
- MV_READ(data_reg, val);
- return;
-}
+ u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg);
-static void
-write_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long val)
-{
- unsigned int address_reg, data_reg, address;
-
- address_reg = RALINK_PCI_CONFIG_ADDR;
- data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG;
- address = (((reg & 0xF00)>>8)<<24) | (bus << 16) | (dev << 11) | (func << 8) | (reg & 0xfc) | 0x80000000 ;
- MV_WRITE(address_reg, address);
- MV_WRITE(data_reg, val);
- return;
+ pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
+ return pcie_read(pcie, RALINK_PCI_CONFIG_DATA);
}
-int
-pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static void
+write_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg, u32 val)
{
- u16 cmd;
- u32 val;
- int irq;
-
- if (dev->bus->number == 0) {
- write_config(0, slot, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE);
- read_config(0, slot, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val);
- printk("BAR0 at slot %d = %x\n", slot, val);
- }
+ u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg);
- pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x14); //configure cache line size 0x14
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xFF); //configure latency timer 0x10
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- cmd = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
- pci_write_config_word(dev, PCI_COMMAND, cmd);
-
- irq = of_irq_parse_and_map_pci(dev, slot, pin);
-
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
- return irq;
+ pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
+ pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA);
}
void
-set_pcie_phy(u32 *addr, int start_b, int bits, int val)
+set_pcie_phy(struct mt7621_pcie *pcie, u32 offset,
+ int start_b, int bits, int val)
{
- *(unsigned int *)(addr) &= ~(((1<<bits) - 1)<<start_b);
- *(unsigned int *)(addr) |= val << start_b;
+ u32 reg = pcie_read(pcie, offset);
+
+ reg &= ~(((1 << bits) - 1) << start_b);
+ reg |= val << start_b;
+ pcie_write(pcie, reg, offset);
}
void
-bypass_pipe_rst(void)
+bypass_pipe_rst(struct mt7621_pcie *pcie)
{
/* PCIe Port 0 */
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4]
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4]
/* PCIe Port 1 */
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 12, 1, 0x01); // rg_pe1_pipe_rst_b
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4]
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 12, 1, 0x01); // rg_pe1_pipe_rst_b
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4]
/* PCIe Port 2 */
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4]
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4]
}
void
-set_phy_for_ssc(void)
+set_phy_for_ssc(struct mt7621_pcie *pcie)
{
- unsigned long reg = (*(volatile u32 *)(RALINK_SYSCTL_BASE + 0x10));
+ unsigned long reg = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0);
reg = (reg >> 6) & 0x7;
/* Set PCIe Port0 & Port1 PHY to disable SSC */
/* Debug Xtal Type */
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 1 enable control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x00); // rg_pe1_phy_en //Port 1 disable
- if(reg <= 5 && reg >= 3) { // 40MHz Xtal
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 1 enable control
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x00); // rg_pe1_phy_en //Port 1 disable
+ if (reg <= 5 && reg >= 3) { // 40MHz Xtal
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
printk("***** Xtal 40MHz *****\n");
} else { // 25MHz | 20MHz Xtal
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
if (reg >= 6) {
printk("***** Xtal 25MHz *****\n");
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode)
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x49c), 0, 31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode)
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a4), 0, 16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 0, 12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 16, 12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial
} else {
printk("***** Xtal 20MHz *****\n");
}
}
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN
- if(reg <= 5 && reg >= 3) { // 40MHz Xtal
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN
+ if (reg <= 5 && reg >= 3) { // 40MHz Xtal
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv
}
/* Enable PHY and disable force mode */
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x01); // rg_pe1_phy_en //Port 1 enable
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 1 disable control
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x01); // rg_pe1_phy_en //Port 1 enable
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 1 disable control
/* Set PCIe Port2 PHY to disable SSC */
/* Debug Xtal Type */
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable
- if(reg <= 5 && reg >= 3) { // 40MHz Xtal
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable
+ if (reg <= 5 && reg >= 3) { // 40MHz Xtal
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
} else { // 25MHz | 20MHz Xtal
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
if (reg >= 6) { // 25MHz Xtal
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode)
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x49c), 0, 31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode)
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a4), 0, 16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 0, 12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 16, 12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial
}
}
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN
- if(reg <= 5 && reg >= 3) { // 40MHz Xtal
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN
+ if (reg <= 5 && reg >= 3) { // 40MHz Xtal
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv
}
/* Enable PHY and disable force mode */
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable
- set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable
+ set_pcie_phy(pcie, (RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control
}
-void setup_cm_memory_region(struct resource *mem_resource)
+static void setup_cm_memory_region(struct resource *mem_resource)
{
resource_size_t mask;
+
if (mips_cps_numiocu(0)) {
/* FIXME: hardware doesn't accept mask values with 1s after
* 0s (e.g. 0xffef), so it would be great to warn if that's
@@ -478,14 +338,142 @@ void setup_cm_memory_region(struct resource *mem_resource)
}
}
+static int mt7621_pci_parse_request_of_pci_ranges(struct mt7621_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *node = dev->of_node;
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ int err;
+
+ if (of_pci_range_parser_init(&parser, node)) {
+ dev_err(dev, "missing \"ranges\" property\n");
+ return -EINVAL;
+ }
+
+ for_each_of_pci_range(&parser, &range) {
+ struct resource *res = NULL;
+
+ switch (range.flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_IO:
+ ioremap(range.cpu_addr, range.size);
+ res = &pcie->io;
+ pcie->offset.io = 0x00000000UL;
+ break;
+ case IORESOURCE_MEM:
+ res = &pcie->mem;
+ pcie->offset.mem = 0x00000000UL;
+ break;
+ }
+
+ if (res != NULL)
+ of_pci_range_to_resource(&range, node, res);
+ }
+
+ err = of_pci_parse_bus_range(node, &pcie->busn);
+ if (err < 0) {
+ dev_err(dev, "failed to parse bus ranges property: %d\n", err);
+ pcie->busn.name = node->name;
+ pcie->busn.start = 0;
+ pcie->busn.end = 0xff;
+ pcie->busn.flags = IORESOURCE_BUS;
+ }
+
+ return 0;
+}
+
+static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *node = dev->of_node;
+ struct resource regs;
+ const char *type;
+ int err;
+
+ type = of_get_property(node, "device_type", NULL);
+ if (!type || strcmp(type, "pci") != 0) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ err = of_address_to_resource(node, 0, &regs);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+ return err;
+ }
+
+ pcie->base = devm_ioremap_resource(dev, &regs);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ return 0;
+}
+
+static int mt7621_pcie_request_resources(struct mt7621_pcie *pcie,
+ struct list_head *res)
+{
+ struct device *dev = pcie->dev;
+ int err;
+
+ pci_add_resource_offset(res, &pcie->io, pcie->offset.io);
+ pci_add_resource_offset(res, &pcie->mem, pcie->offset.mem);
+ pci_add_resource(res, &pcie->busn);
+
+ err = devm_request_pci_bus_resources(dev, res);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int mt7621_pcie_register_host(struct pci_host_bridge *host,
+ struct list_head *res)
+{
+ struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
+
+ list_splice_init(res, &host->windows);
+ host->busnr = pcie->busn.start;
+ host->dev.parent = pcie->dev;
+ host->ops = &mt7621_pci_ops;
+ host->map_irq = of_irq_parse_and_map_pci;
+ host->swizzle_irq = pci_common_swizzle;
+ host->sysdata = pcie;
+
+ return pci_host_probe(host);
+}
+
static int mt7621_pci_probe(struct platform_device *pdev)
{
- unsigned long val = 0;
+ struct device *dev = &pdev->dev;
+ struct mt7621_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ int err;
+ u32 val = 0;
+ LIST_HEAD(res);
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENODEV;
+
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->dev = dev;
+ platform_set_drvdata(pdev, pcie);
+ INIT_LIST_HEAD(&pcie->ports);
+
+ err = mt7621_pcie_parse_dt(pcie);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ return err;
+ }
+ /* set resources limits */
iomem_resource.start = 0;
- iomem_resource.end= ~0;
- ioport_resource.start= 0;
- ioport_resource.end = ~0;
+ iomem_resource.end = ~0UL; /* no limit */
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0UL; /* no limit */
val = RALINK_PCIE0_RST;
val |= RALINK_PCIE1_RST;
@@ -509,62 +497,66 @@ static int mt7621_pci_probe(struct platform_device *pdev)
DEASSERT_SYSRST_PCIE(val);
if ((*(unsigned int *)(0xbe00000c)&0xFFFF) == 0x0101) // MT7621 E2
- bypass_pipe_rst();
- set_phy_for_ssc();
+ bypass_pipe_rst(pcie);
+ set_phy_for_ssc(pcie);
- read_config(0, 0, 0, 0x70c, &val);
+ val = read_config(pcie, 0, 0x70c);
printk("Port 0 N_FTS = %x\n", (unsigned int)val);
- read_config(0, 1, 0, 0x70c, &val);
+ val = read_config(pcie, 1, 0x70c);
printk("Port 1 N_FTS = %x\n", (unsigned int)val);
- read_config(0, 2, 0, 0x70c, &val);
+ val = read_config(pcie, 2, 0x70c);
printk("Port 2 N_FTS = %x\n", (unsigned int)val);
- RALINK_RSTCTRL = (RALINK_RSTCTRL | RALINK_PCIE_RST);
- RALINK_SYSCFG1 &= ~(0x30);
- RALINK_SYSCFG1 |= (2<<4);
- RALINK_PCIE_CLK_GEN &= 0x7fffffff;
- RALINK_PCIE_CLK_GEN1 &= 0x80ffffff;
- RALINK_PCIE_CLK_GEN1 |= 0xa << 24;
- RALINK_PCIE_CLK_GEN |= 0x80000000;
+ rt_sysc_m32(0, RALINK_PCIE_RST, RALINK_RSTCTRL);
+ rt_sysc_m32(0x30, 2 << 4, SYSC_REG_SYSTEM_CONFIG1);
+
+ rt_sysc_m32(0x80000000, 0, RALINK_PCIE_CLK_GEN);
+ rt_sysc_m32(0x7f000000, 0xa << 24, RALINK_PCIE_CLK_GEN1);
+ rt_sysc_m32(0, 0x80000000, RALINK_PCIE_CLK_GEN);
+
mdelay(50);
- RALINK_RSTCTRL = (RALINK_RSTCTRL & ~RALINK_PCIE_RST);
+ rt_sysc_m32(RALINK_PCIE_RST, 0, RALINK_RSTCTRL);
/* Use GPIO control instead of PERST_N */
*(unsigned int *)(0xbe000620) |= 0x1<<19 | 0x1<<8 | 0x1<<7; // set DATA
mdelay(1000);
- if(( RALINK_PCI0_STATUS & 0x1) == 0)
- {
+ if ((pcie_read(pcie, RT6855_PCIE0_OFFSET + RALINK_PCI_STATUS) & 0x1) == 0) {
printk("PCIE0 no card, disable it(RST&CLK)\n");
ASSERT_SYSRST_PCIE(RALINK_PCIE0_RST);
- RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE0_CLK_EN);
+ rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
pcie_link_status &= ~(1<<0);
} else {
pcie_link_status |= 1<<0;
- RALINK_PCI_PCIMSK_ADDR |= (1<<20); // enable pcie1 interrupt
+ val = pcie_read(pcie, RALINK_PCI_PCIMSK_ADDR);
+ val |= (1<<20); // enable pcie1 interrupt
+ pcie_write(pcie, val, RALINK_PCI_PCIMSK_ADDR);
}
- if(( RALINK_PCI1_STATUS & 0x1) == 0)
- {
+ if ((pcie_read(pcie, RT6855_PCIE1_OFFSET + RALINK_PCI_STATUS) & 0x1) == 0) {
printk("PCIE1 no card, disable it(RST&CLK)\n");
ASSERT_SYSRST_PCIE(RALINK_PCIE1_RST);
- RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE1_CLK_EN);
+ rt_sysc_m32(RALINK_PCIE1_CLK_EN, 0, RALINK_CLKCFG1);
pcie_link_status &= ~(1<<1);
} else {
pcie_link_status |= 1<<1;
- RALINK_PCI_PCIMSK_ADDR |= (1<<21); // enable pcie1 interrupt
+ val = pcie_read(pcie, RALINK_PCI_PCIMSK_ADDR);
+ val |= (1<<21); // enable pcie1 interrupt
+ pcie_write(pcie, val, RALINK_PCI_PCIMSK_ADDR);
}
- if (( RALINK_PCI2_STATUS & 0x1) == 0) {
+ if ((pcie_read(pcie, RT6855_PCIE2_OFFSET + RALINK_PCI_STATUS) & 0x1) == 0) {
printk("PCIE2 no card, disable it(RST&CLK)\n");
ASSERT_SYSRST_PCIE(RALINK_PCIE2_RST);
- RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE2_CLK_EN);
+ rt_sysc_m32(RALINK_PCIE2_CLK_EN, 0, RALINK_CLKCFG1);
pcie_link_status &= ~(1<<2);
} else {
pcie_link_status |= 1<<2;
- RALINK_PCI_PCIMSK_ADDR |= (1<<22); // enable pcie2 interrupt
+ val = pcie_read(pcie, RALINK_PCI_PCIMSK_ADDR);
+ val |= (1<<22); // enable pcie2 interrupt
+ pcie_write(pcie, val, RALINK_PCI_PCIMSK_ADDR);
}
if (pcie_link_status == 0)
@@ -581,29 +573,37 @@ pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num
3'b110 1 0 x
3'b111 2 1 0
*/
- switch(pcie_link_status) {
+ switch (pcie_link_status) {
case 2:
- RALINK_PCI_PCICFG_ADDR &= ~0x00ff0000;
- RALINK_PCI_PCICFG_ADDR |= 0x1 << 16; //port0
- RALINK_PCI_PCICFG_ADDR |= 0x0 << 20; //port1
+ val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR);
+ val &= ~0x00ff0000;
+ val |= 0x1 << 16; // port 0
+ val |= 0x0 << 20; // port 1
+ pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR);
break;
case 4:
- RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000;
- RALINK_PCI_PCICFG_ADDR |= 0x1 << 16; //port0
- RALINK_PCI_PCICFG_ADDR |= 0x2 << 20; //port1
- RALINK_PCI_PCICFG_ADDR |= 0x0 << 24; //port2
+ val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR);
+ val &= ~0x0fff0000;
+ val |= 0x1 << 16; //port0
+ val |= 0x2 << 20; //port1
+ val |= 0x0 << 24; //port2
+ pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR);
break;
case 5:
- RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000;
- RALINK_PCI_PCICFG_ADDR |= 0x0 << 16; //port0
- RALINK_PCI_PCICFG_ADDR |= 0x2 << 20; //port1
- RALINK_PCI_PCICFG_ADDR |= 0x1 << 24; //port2
+ val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR);
+ val &= ~0x0fff0000;
+ val |= 0x0 << 16; //port0
+ val |= 0x2 << 20; //port1
+ val |= 0x1 << 24; //port2
+ pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR);
break;
case 6:
- RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000;
- RALINK_PCI_PCICFG_ADDR |= 0x2 << 16; //port0
- RALINK_PCI_PCICFG_ADDR |= 0x0 << 20; //port1
- RALINK_PCI_PCICFG_ADDR |= 0x1 << 24; //port2
+ val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR);
+ val &= ~0x0fff0000;
+ val |= 0x2 << 16; //port0
+ val |= 0x0 << 20; //port1
+ val |= 0x1 << 24; //port2
+ pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR);
break;
}
@@ -612,68 +612,91 @@ pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num
ioport_resource.end = mt7621_res_pci_io1.end;
*/
- RALINK_PCI_MEMBASE = 0xffffffff; //RALINK_PCI_MM_MAP_BASE;
- RALINK_PCI_IOBASE = RALINK_PCI_IO_MAP_BASE;
+ pcie_write(pcie, 0xffffffff, RALINK_PCI_MEMBASE);
+ pcie_write(pcie, RALINK_PCI_IO_MAP_BASE, RALINK_PCI_IOBASE);
//PCIe0
- if((pcie_link_status & 0x1) != 0) {
- RALINK_PCI0_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE
- RALINK_PCI0_IMBASEBAR0_ADDR = MEMORY_BASE;
- RALINK_PCI0_CLASS = 0x06040001;
+ if ((pcie_link_status & 0x1) != 0) {
+ /* open 7FFF:2G; ENABLE */
+ pcie_write(pcie, 0x7FFF0001,
+ RT6855_PCIE0_OFFSET + RALINK_PCI_BAR0SETUP_ADDR);
+ pcie_write(pcie, MEMORY_BASE,
+ RT6855_PCIE0_OFFSET + RALINK_PCI_IMBASEBAR0_ADDR);
+ pcie_write(pcie, 0x06040001,
+ RT6855_PCIE0_OFFSET + RALINK_PCI_CLASS);
printk("PCIE0 enabled\n");
}
//PCIe1
if ((pcie_link_status & 0x2) != 0) {
- RALINK_PCI1_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE
- RALINK_PCI1_IMBASEBAR0_ADDR = MEMORY_BASE;
- RALINK_PCI1_CLASS = 0x06040001;
+ /* open 7FFF:2G; ENABLE */
+ pcie_write(pcie, 0x7FFF0001,
+ RT6855_PCIE1_OFFSET + RALINK_PCI_BAR0SETUP_ADDR);
+ pcie_write(pcie, MEMORY_BASE,
+ RT6855_PCIE1_OFFSET + RALINK_PCI_IMBASEBAR0_ADDR);
+ pcie_write(pcie, 0x06040001,
+ RT6855_PCIE1_OFFSET + RALINK_PCI_CLASS);
printk("PCIE1 enabled\n");
}
//PCIe2
if ((pcie_link_status & 0x4) != 0) {
- RALINK_PCI2_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE
- RALINK_PCI2_IMBASEBAR0_ADDR = MEMORY_BASE;
- RALINK_PCI2_CLASS = 0x06040001;
+ /* open 7FFF:2G; ENABLE */
+ pcie_write(pcie, 0x7FFF0001,
+ RT6855_PCIE2_OFFSET + RALINK_PCI_BAR0SETUP_ADDR);
+ pcie_write(pcie, MEMORY_BASE,
+ RT6855_PCIE2_OFFSET + RALINK_PCI_IMBASEBAR0_ADDR);
+ pcie_write(pcie, 0x06040001,
+ RT6855_PCIE2_OFFSET + RALINK_PCI_CLASS);
printk("PCIE2 enabled\n");
}
- switch(pcie_link_status) {
+ switch (pcie_link_status) {
case 7:
- read_config(0, 2, 0, 0x4, &val);
- write_config(0, 2, 0, 0x4, val|0x4);
- read_config(0, 2, 0, 0x70c, &val);
+ val = read_config(pcie, 2, 0x4);
+ write_config(pcie, 2, 0x4, val|0x4);
+ val = read_config(pcie, 2, 0x70c);
val &= ~(0xff)<<8;
val |= 0x50<<8;
- write_config(0, 2, 0, 0x70c, val);
+ write_config(pcie, 2, 0x70c, val);
case 3:
case 5:
case 6:
- read_config(0, 1, 0, 0x4, &val);
- write_config(0, 1, 0, 0x4, val|0x4);
- read_config(0, 1, 0, 0x70c, &val);
+ val = read_config(pcie, 1, 0x4);
+ write_config(pcie, 1, 0x4, val|0x4);
+ val = read_config(pcie, 1, 0x70c);
val &= ~(0xff)<<8;
val |= 0x50<<8;
- write_config(0, 1, 0, 0x70c, val);
+ write_config(pcie, 1, 0x70c, val);
default:
- read_config(0, 0, 0, 0x4, &val);
- write_config(0, 0, 0, 0x4, val|0x4); //bus master enable
- read_config(0, 0, 0, 0x70c, &val);
+ val = read_config(pcie, 0, 0x4);
+ write_config(pcie, 0, 0x4, val|0x4); //bus master enable
+ val = read_config(pcie, 0, 0x70c);
val &= ~(0xff)<<8;
val |= 0x50<<8;
- write_config(0, 0, 0, 0x70c, val);
+ write_config(pcie, 0, 0x70c, val);
}
- pci_load_of_ranges(&mt7621_controller, pdev->dev.of_node);
- setup_cm_memory_region(mt7621_controller.mem_resource);
- register_pci_controller(&mt7621_controller);
- return 0;
+ err = mt7621_pci_parse_request_of_pci_ranges(pcie);
+ if (err) {
+ dev_err(dev, "Error requesting pci resources from ranges");
+ return err;
+ }
-}
+ setup_cm_memory_region(&pcie->mem);
+
+ err = mt7621_pcie_request_resources(pcie, &res);
+ if (err) {
+ dev_err(dev, "Error requesting resources\n");
+ return err;
+ }
+
+ err = mt7621_pcie_register_host(bridge, &res);
+ if (err) {
+ dev_err(dev, "Error registering host\n");
+ return err;
+ }
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
return 0;
}
diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
index 0c3e498ae99c..b8566ed898f1 100644
--- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
+++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
@@ -1,10 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/drivers/pinctrl/pinctrl-rt2880.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- *
* Copyright (C) 2013 John Crispin <blogic@openwrt.org>
*/
@@ -25,6 +20,7 @@
#include <asm/mach-ralink/mt7620.h>
#include "core.h"
+#include "pinctrl-utils.h"
#define SYSC_REG_GPIO_MODE 0x60
#define SYSC_REG_GPIO_MODE2 0x64
@@ -42,7 +38,7 @@ struct rt2880_priv {
const char **group_names;
int group_count;
- uint8_t *gpio;
+ u8 *gpio;
int max_pins;
};
@@ -54,20 +50,17 @@ static int rt2880_get_group_count(struct pinctrl_dev *pctrldev)
}
static const char *rt2880_get_group_name(struct pinctrl_dev *pctrldev,
- unsigned group)
+ unsigned int group)
{
struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
- if (group >= p->group_count)
- return NULL;
-
- return p->group_names[group];
+ return (group >= p->group_count) ? NULL : p->group_names[group];
}
static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev,
- unsigned group,
- const unsigned **pins,
- unsigned *num_pins)
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
{
struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
@@ -80,78 +73,38 @@ static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev,
return 0;
}
-static void rt2880_pinctrl_dt_free_map(struct pinctrl_dev *pctrldev,
- struct pinctrl_map *map, unsigned num_maps)
-{
- int i;
-
- for (i = 0; i < num_maps; i++)
- if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN ||
- map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
- kfree(map[i].data.configs.configs);
- kfree(map);
-}
-
-static void rt2880_pinctrl_pin_dbg_show(struct pinctrl_dev *pctrldev,
- struct seq_file *s,
- unsigned offset)
-{
- seq_printf(s, "ralink pio");
-}
-
-static void rt2880_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctrldev,
- struct device_node *np,
- struct pinctrl_map **map)
-{
- const char *function;
- int func = of_property_read_string(np, "ralink,function", &function);
- int grps = of_property_count_strings(np, "ralink,group");
- int i;
-
- if (func || !grps)
- return;
-
- for (i = 0; i < grps; i++) {
- const char *group;
-
- of_property_read_string_index(np, "ralink,group", i, &group);
-
- (*map)->type = PIN_MAP_TYPE_MUX_GROUP;
- (*map)->name = function;
- (*map)->data.mux.group = group;
- (*map)->data.mux.function = function;
- (*map)++;
- }
-}
-
static int rt2880_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrldev,
- struct device_node *np_config,
- struct pinctrl_map **map,
- unsigned *num_maps)
+ struct device_node *np_config,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
{
- int max_maps = 0;
- struct pinctrl_map *tmp;
- struct device_node *np;
-
- for_each_child_of_node(np_config, np) {
- int ret = of_property_count_strings(np, "ralink,group");
-
- if (ret >= 0)
- max_maps += ret;
+ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
+ struct property *prop;
+ const char *function_name, *group_name;
+ int ret;
+ int ngroups;
+ unsigned int reserved_maps = 0;
+
+ for_each_node_with_property(np_config, "group")
+ ngroups++;
+
+ *map = NULL;
+ ret = pinctrl_utils_reserve_map(pctrldev, map, &reserved_maps,
+ num_maps, ngroups);
+ if (ret) {
+ dev_err(p->dev, "can't reserve map: %d\n", ret);
+ return ret;
}
- if (!max_maps)
- return max_maps;
-
- *map = kcalloc(max_maps, sizeof(struct pinctrl_map), GFP_KERNEL);
- if (!*map)
- return -ENOMEM;
-
- tmp = *map;
-
- for_each_child_of_node(np_config, np)
- rt2880_pinctrl_dt_subnode_to_map(pctrldev, np, &tmp);
- *num_maps = max_maps;
+ of_property_for_each_string(np_config, "group", prop, group_name) {
+ ret = pinctrl_utils_add_map_mux(pctrldev, map, &reserved_maps,
+ num_maps, group_name,
+ function_name);
+ if (ret) {
+ dev_err(p->dev, "can't add map: %d\n", ret);
+ return ret;
+ }
+ }
return 0;
}
@@ -160,9 +113,8 @@ static const struct pinctrl_ops rt2880_pctrl_ops = {
.get_groups_count = rt2880_get_group_count,
.get_group_name = rt2880_get_group_name,
.get_group_pins = rt2880_get_group_pins,
- .pin_dbg_show = rt2880_pinctrl_pin_dbg_show,
.dt_node_to_map = rt2880_pinctrl_dt_node_to_map,
- .dt_free_map = rt2880_pinctrl_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev)
@@ -173,7 +125,7 @@ static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev)
}
static const char *rt2880_pmx_func_name(struct pinctrl_dev *pctrldev,
- unsigned func)
+ unsigned int func)
{
struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
@@ -181,9 +133,9 @@ static const char *rt2880_pmx_func_name(struct pinctrl_dev *pctrldev,
}
static int rt2880_pmx_group_get_groups(struct pinctrl_dev *pctrldev,
- unsigned func,
- const char * const **groups,
- unsigned * const num_groups)
+ unsigned int func,
+ const char * const **groups,
+ unsigned int * const num_groups)
{
struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
@@ -198,18 +150,18 @@ static int rt2880_pmx_group_get_groups(struct pinctrl_dev *pctrldev,
}
static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
- unsigned func,
- unsigned group)
+ unsigned int func, unsigned int group)
{
struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
- u32 mode = 0;
+ u32 mode = 0;
u32 reg = SYSC_REG_GPIO_MODE;
int i;
int shift;
/* dont allow double use */
if (p->groups[group].enabled) {
- dev_err(p->dev, "%s is already enabled\n", p->groups[group].name);
+ dev_err(p->dev, "%s is already enabled\n",
+ p->groups[group].name);
return -EBUSY;
}
@@ -242,8 +194,8 @@ static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
}
static int rt2880_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev,
- struct pinctrl_gpio_range *range,
- unsigned pin)
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
{
struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
@@ -287,8 +239,8 @@ static int rt2880_pinmux_index(struct rt2880_priv *p)
}
/* allocate the group names array needed by the gpio function */
- p->group_names = devm_kcalloc(p->dev, p->group_count, sizeof(char *),
- GFP_KERNEL);
+ p->group_names = devm_kcalloc(p->dev, p->group_count,
+ sizeof(char *), GFP_KERNEL);
if (!p->group_names)
return -1;
@@ -322,7 +274,8 @@ static int rt2880_pinmux_index(struct rt2880_priv *p)
for (i = 0; i < p->group_count; i++) {
for (j = 0; j < p->groups[i].func_count; j++) {
f[c] = &p->groups[i].func[j];
- f[c]->groups = devm_kzalloc(p->dev, sizeof(int), GFP_KERNEL);
+ f[c]->groups = devm_kzalloc(p->dev, sizeof(int),
+ GFP_KERNEL);
f[c]->groups[0] = i;
f[c]->group_count = 1;
c++;
@@ -335,7 +288,10 @@ static int rt2880_pinmux_pins(struct rt2880_priv *p)
{
int i, j;
- /* loop over the functions and initialize the pins array. also work out the highest pin used */
+ /*
+ * loop over the functions and initialize the pins array.
+ * also work out the highest pin used.
+ */
for (i = 0; i < p->func_count; i++) {
int pin;
@@ -355,18 +311,16 @@ static int rt2880_pinmux_pins(struct rt2880_priv *p)
}
/* the buffer that tells us which pins are gpio */
- p->gpio = devm_kcalloc(p->dev,p->max_pins, sizeof(uint8_t),
- GFP_KERNEL);
+ p->gpio = devm_kcalloc(p->dev, p->max_pins, sizeof(u8), GFP_KERNEL);
/* the pads needed to tell pinctrl about our pins */
- p->pads = devm_kcalloc(p->dev,
- p->max_pins, sizeof(struct pinctrl_pin_desc),
- GFP_KERNEL);
- if (!p->pads || !p->gpio ) {
+ p->pads = devm_kcalloc(p->dev, p->max_pins,
+ sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
+ if (!p->pads || !p->gpio) {
dev_err(p->dev, "Failed to allocate gpio data\n");
return -ENOMEM;
}
- memset(p->gpio, 1, sizeof(uint8_t) * p->max_pins);
+ memset(p->gpio, 1, sizeof(u8) * p->max_pins);
for (i = 0; i < p->func_count; i++) {
if (!p->func[i]->pin_count)
continue;
@@ -383,10 +337,8 @@ static int rt2880_pinmux_pins(struct rt2880_priv *p)
/* strlen("ioXY") + 1 = 5 */
char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL);
- if (!name) {
- dev_err(p->dev, "Failed to allocate pad name\n");
+ if (!name)
return -ENOMEM;
- }
snprintf(name, 5, "io%d", i);
p->pads[i].number = i;
p->pads[i].name = name;
@@ -404,7 +356,7 @@ static int rt2880_pinmux_probe(struct platform_device *pdev)
struct device_node *np;
if (!rt2880_pinmux_data)
- return -ENOSYS;
+ return -ENOTSUPP;
/* setup the private data */
p = devm_kzalloc(&pdev->dev, sizeof(struct rt2880_priv), GFP_KERNEL);
@@ -445,7 +397,7 @@ static int rt2880_pinmux_probe(struct platform_device *pdev)
return -EINVAL;
}
- range = devm_kzalloc(p->dev, sizeof(struct pinctrl_gpio_range) + 4, GFP_KERNEL);
+ range = devm_kzalloc(p->dev, sizeof(*range) + 4, GFP_KERNEL);
range->name = name = (char *) &range[1];
sprintf(name, "pio");
range->npins = __be32_to_cpu(*ngpio);
diff --git a/drivers/staging/mt7621-spi/spi-mt7621.c b/drivers/staging/mt7621-spi/spi-mt7621.c
index 37f299080410..d045b5568e0f 100644
--- a/drivers/staging/mt7621-spi/spi-mt7621.c
+++ b/drivers/staging/mt7621-spi/spi-mt7621.c
@@ -55,7 +55,8 @@
#define MT7621_CPOL BIT(4)
#define MT7621_LSB_FIRST BIT(3)
-#define RT2880_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH)
+#define RT2880_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | \
+ SPI_LSB_FIRST | SPI_CS_HIGH)
struct mt7621_spi;
@@ -65,6 +66,7 @@ struct mt7621_spi {
unsigned int sys_freq;
unsigned int speed;
struct clk *clk;
+ int pending_write;
struct mt7621_spi_ops *ops;
};
@@ -96,6 +98,7 @@ static void mt7621_spi_reset(struct mt7621_spi *rs, int duplex)
master &= ~(1 << 10);
mt7621_spi_write(rs, MT7621_SPI_MASTER, master);
+ rs->pending_write = 0;
}
static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
@@ -104,7 +107,7 @@ static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
int cs = spi->chip_select;
u32 polar = 0;
- mt7621_spi_reset(rs, cs);
+ mt7621_spi_reset(rs, cs);
if (enable)
polar = BIT(cs);
mt7621_spi_write(rs, MT7621_SPI_POLAR, polar);
@@ -137,36 +140,34 @@ static int mt7621_spi_prepare(struct spi_device *spi, unsigned int speed)
reg |= MT7621_LSB_FIRST;
reg &= ~(MT7621_CPHA | MT7621_CPOL);
- switch(spi->mode & (SPI_CPOL | SPI_CPHA)) {
- case SPI_MODE_0:
- break;
- case SPI_MODE_1:
- reg |= MT7621_CPHA;
- break;
- case SPI_MODE_2:
- reg |= MT7621_CPOL;
- break;
- case SPI_MODE_3:
- reg |= MT7621_CPOL | MT7621_CPHA;
- break;
+ switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
+ case SPI_MODE_0:
+ break;
+ case SPI_MODE_1:
+ reg |= MT7621_CPHA;
+ break;
+ case SPI_MODE_2:
+ reg |= MT7621_CPOL;
+ break;
+ case SPI_MODE_3:
+ reg |= MT7621_CPOL | MT7621_CPHA;
+ break;
}
mt7621_spi_write(rs, MT7621_SPI_MASTER, reg);
return 0;
}
-static inline int mt7621_spi_wait_till_ready(struct spi_device *spi)
+static inline int mt7621_spi_wait_till_ready(struct mt7621_spi *rs)
{
- struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
int i;
for (i = 0; i < RALINK_SPI_WAIT_MAX_LOOP; i++) {
u32 status;
status = mt7621_spi_read(rs, MT7621_SPI_TRANS);
- if ((status & SPITRANS_BUSY) == 0) {
+ if ((status & SPITRANS_BUSY) == 0)
return 0;
- }
cpu_relax();
udelay(1);
}
@@ -174,90 +175,124 @@ static inline int mt7621_spi_wait_till_ready(struct spi_device *spi)
return -ETIMEDOUT;
}
-static int mt7621_spi_transfer_half_duplex(struct spi_master *master,
- struct spi_message *m)
+static void mt7621_spi_read_half_duplex(struct mt7621_spi *rs,
+ int rx_len, u8 *buf)
{
- struct mt7621_spi *rs = spi_master_get_devdata(master);
- struct spi_device *spi = m->spi;
- unsigned int speed = spi->max_speed_hz;
- struct spi_transfer *t = NULL;
- int status = 0;
- int i, len = 0;
- int rx_len = 0;
- u32 data[9] = { 0 };
- u32 val;
+ /* Combine with any pending write, and perform one or
+ * more half-duplex transactions reading 'len' bytes.
+ * Data to be written is already in MT7621_SPI_DATA*
+ */
+ int tx_len = rs->pending_write;
- mt7621_spi_wait_till_ready(spi);
+ rs->pending_write = 0;
- list_for_each_entry(t, &m->transfers, transfer_list) {
- const u8 *buf = t->tx_buf;
+ while (rx_len || tx_len) {
+ int i;
+ u32 val = (min(tx_len, 4) * 8) << 24;
+ int rx = min(rx_len, 32);
- if (t->rx_buf)
- rx_len += t->len;
+ if (tx_len > 4)
+ val |= (tx_len - 4) * 8;
+ val |= (rx * 8) << 12;
+ mt7621_spi_write(rs, MT7621_SPI_MOREBUF, val);
- if (!buf)
- continue;
+ tx_len = 0;
- if (t->speed_hz < speed)
- speed = t->speed_hz;
+ val = mt7621_spi_read(rs, MT7621_SPI_TRANS);
+ val |= SPI_CTL_START;
+ mt7621_spi_write(rs, MT7621_SPI_TRANS, val);
- if (WARN_ON(len + t->len > 36)) {
- status = -EIO;
- goto msg_done;
- }
+ mt7621_spi_wait_till_ready(rs);
- for (i = 0; i < t->len; i++, len++)
- data[len / 4] |= buf[i] << (8 * (len & 3));
+ for (i = 0; i < rx; i++) {
+ if ((i % 4) == 0)
+ val = mt7621_spi_read(rs, MT7621_SPI_DATA0 + i);
+ *buf++ = val & 0xff;
+ val >>= 8;
+ }
+ rx_len -= i;
}
+}
- if (WARN_ON(rx_len > 32)) {
- status = -EIO;
- goto msg_done;
- }
+static inline void mt7621_spi_flush(struct mt7621_spi *rs)
+{
+ mt7621_spi_read_half_duplex(rs, 0, NULL);
+}
- if (mt7621_spi_prepare(spi, speed)) {
- status = -EIO;
- goto msg_done;
+static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
+ int tx_len, const u8 *buf)
+{
+ int val = 0;
+ int len = rs->pending_write;
+
+ if (len & 3) {
+ val = mt7621_spi_read(rs, MT7621_SPI_OPCODE + (len & ~3));
+ if (len < 4) {
+ val <<= (4 - len) * 8;
+ val = swab32(val);
+ }
}
- data[0] = swab32(data[0]);
- if (len < 4)
- data[0] >>= (4 - len) * 8;
-
- for (i = 0; i < len; i += 4)
- mt7621_spi_write(rs, MT7621_SPI_OPCODE + i, data[i / 4]);
-
- val = (min_t(int, len, 4) * 8) << 24;
- if (len > 4)
- val |= (len - 4) * 8;
- val |= (rx_len * 8) << 12;
- mt7621_spi_write(rs, MT7621_SPI_MOREBUF, val);
- mt7621_spi_set_cs(spi, 1);
+ while (tx_len > 0) {
+ if (len >= 36) {
+ rs->pending_write = len;
+ mt7621_spi_flush(rs);
+ len = 0;
+ }
- val = mt7621_spi_read(rs, MT7621_SPI_TRANS);
- val |= SPI_CTL_START;
- mt7621_spi_write(rs, MT7621_SPI_TRANS, val);
+ val |= *buf++ << (8 * (len & 3));
+ len++;
+ if ((len & 3) == 0) {
+ if (len == 4)
+ /* The byte-order of the opcode is weird! */
+ val = swab32(val);
+ mt7621_spi_write(rs, MT7621_SPI_OPCODE + len - 4, val);
+ val = 0;
+ }
+ tx_len -= 1;
+ }
+ if (len & 3) {
+ if (len < 4) {
+ val = swab32(val);
+ val >>= (4 - len) * 8;
+ }
+ mt7621_spi_write(rs, MT7621_SPI_OPCODE + (len & ~3), val);
+ }
+ rs->pending_write = len;
+}
- mt7621_spi_wait_till_ready(spi);
+static int mt7621_spi_transfer_half_duplex(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct mt7621_spi *rs = spi_master_get_devdata(master);
+ struct spi_device *spi = m->spi;
+ unsigned int speed = spi->max_speed_hz;
+ struct spi_transfer *t = NULL;
+ int status = 0;
- mt7621_spi_set_cs(spi, 0);
+ mt7621_spi_wait_till_ready(rs);
- for (i = 0; i < rx_len; i += 4)
- data[i / 4] = mt7621_spi_read(rs, MT7621_SPI_DATA0 + i);
+ list_for_each_entry(t, &m->transfers, transfer_list)
+ if (t->speed_hz < speed)
+ speed = t->speed_hz;
- m->actual_length = len + rx_len;
+ if (mt7621_spi_prepare(spi, speed)) {
+ status = -EIO;
+ goto msg_done;
+ }
- len = 0;
+ mt7621_spi_set_cs(spi, 1);
+ m->actual_length = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
- u8 *buf = t->rx_buf;
-
- if (!buf)
- continue;
-
- for (i = 0; i < t->len; i++, len++)
- buf[i] = data[len / 4] >> (8 * (len & 3));
+ if (t->rx_buf)
+ mt7621_spi_read_half_duplex(rs, t->len, t->rx_buf);
+ else if (t->tx_buf)
+ mt7621_spi_write_half_duplex(rs, t->len, t->tx_buf);
+ m->actual_length += t->len;
}
+ mt7621_spi_flush(rs);
+ mt7621_spi_set_cs(spi, 0);
msg_done:
m->status = status;
spi_finalize_current_message(master);
@@ -278,7 +313,7 @@ static int mt7621_spi_transfer_full_duplex(struct spi_master *master,
u32 data[9] = { 0 };
u32 val = 0;
- mt7621_spi_wait_till_ready(spi);
+ mt7621_spi_wait_till_ready(rs);
list_for_each_entry(t, &m->transfers, transfer_list) {
const u8 *buf = t->tx_buf;
@@ -323,7 +358,7 @@ static int mt7621_spi_transfer_full_duplex(struct spi_master *master,
val |= SPI_CTL_START;
mt7621_spi_write(rs, MT7621_SPI_TRANS, val);
- mt7621_spi_wait_till_ready(spi);
+ mt7621_spi_wait_till_ready(rs);
mt7621_spi_set_cs(spi, 0);
@@ -384,11 +419,6 @@ static const struct of_device_id mt7621_spi_match[] = {
};
MODULE_DEVICE_TABLE(of, mt7621_spi_match);
-static size_t max_transfer_size(struct spi_device *spi)
-{
- return 32;
-}
-
static int mt7621_spi_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -434,7 +464,6 @@ static int mt7621_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = pdev->dev.of_node;
master->num_chipselect = 2;
- master->max_transfer_size = max_transfer_size;
dev_set_drvdata(&pdev->dev, master);
@@ -444,6 +473,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
rs->master = master;
rs->sys_freq = clk_get_rate(rs->clk);
rs->ops = ops;
+ rs->pending_write = 0;
dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
device_reset(&pdev->dev);
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index e461168313bf..4e6611e4c59b 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -290,13 +290,6 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv,
- select_queue_fallback_t fallback)
-{
- return (u16)smp_processor_id();
-}
-
static void xlr_hw_set_mac_addr(struct net_device *ndev)
{
struct xlr_net_priv *priv = netdev_priv(ndev);
@@ -403,7 +396,7 @@ static const struct net_device_ops xlr_netdev_ops = {
.ndo_open = xlr_net_open,
.ndo_stop = xlr_net_stop,
.ndo_start_xmit = xlr_net_start_xmit,
- .ndo_select_queue = xlr_net_select_queue,
+ .ndo_select_queue = dev_pick_tx_cpu_id,
.ndo_set_mac_address = xlr_net_set_mac_addr,
.ndo_set_rx_mode = xlr_set_rx_mode,
.ndo_get_stats64 = xlr_stats,
diff --git a/drivers/staging/octeon-usb/Makefile b/drivers/staging/octeon-usb/Makefile
index 5588be395f2a..9873a0130ad5 100644
--- a/drivers/staging/octeon-usb/Makefile
+++ b/drivers/staging/octeon-usb/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-${CONFIG_OCTEON_USB} := octeon-hcd.o
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index cded30f145aa..cff5e790b196 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h
index 3353aefe662e..769c36cf6614 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.h
+++ b/drivers/staging/octeon-usb/octeon-hcd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Octeon HCD hardware register definitions.
*
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index d277f048789e..c91a56f77bcb 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -4,7 +4,7 @@ config FB_OLPC_DCON
depends on I2C
depends on (GPIO_CS5535 || GPIO_CS5535=n)
select BACKLIGHT_CLASS_DEVICE
- ---help---
+ help
In order to support very low power operation, the XO laptop uses a
secondary Display CONtroller, or DCON. This secondary controller
is present in the video pipeline between the primary display
@@ -18,7 +18,7 @@ config FB_OLPC_DCON_1
bool "OLPC XO-1 DCON support"
depends on FB_OLPC_DCON && GPIO_CS5535
default y
- ---help---
+ help
Enable support for the DCON in XO-1 model laptops. The kernel
communicates with the DCON using model-specific code. If you
have an XO-1 (or if you're unsure what model you have), you should
@@ -28,7 +28,7 @@ config FB_OLPC_DCON_1_5
bool "OLPC XO-1.5 DCON support"
depends on FB_OLPC_DCON && ACPI
default y
- ---help---
+ help
Enable support for the DCON in XO-1.5 model laptops. The kernel
communicates with the DCON using model-specific code. If you
have an XO-1.5 (or if you're unsure what model you have), you
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index fa89bb97c7b0..c987aaf894e7 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -91,10 +91,10 @@ struct dcon_priv {
};
struct dcon_platform_data {
- int (*init)(struct dcon_priv *);
+ int (*init)(struct dcon_priv *dcon);
void (*bus_stabilize_wiggle)(void);
- void (*set_dconload)(int);
- int (*read_status)(u8 *);
+ void (*set_dconload)(int load);
+ int (*read_status)(u8 *status);
};
#include <linux/interrupt.h>
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index b061f77dda41..c85a805a1243 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* userspace interface for pi433 radio module
*
@@ -65,10 +66,12 @@ static DEFINE_MUTEX(minor_lock); /* Protect idr accesses */
static struct class *pi433_class; /* mainly for udev to create /dev/pi433 */
-/* tx config is instance specific
+/*
+ * tx config is instance specific
* so with each open a new tx config struct is needed
*/
-/* rx config is device specific
+/*
+ * rx config is device specific
* so we have just one rx config, ebedded in device struct
*/
struct pi433_device {
@@ -78,7 +81,6 @@ struct pi433_device {
struct device *dev;
struct cdev *cdev;
struct spi_device *spi;
- unsigned int users;
/* irq related values */
struct gpio_desc *gpiod[NUM_DIO];
@@ -584,7 +586,8 @@ pi433_tx_thread(void *data)
if (kthread_should_stop())
return 0;
- /* get data from fifo in the following order:
+ /*
+ * get data from fifo in the following order:
* - tx_cfg
* - size of message
* - message
@@ -639,7 +642,8 @@ pi433_tx_thread(void *data)
dev_dbg(device->dev,
"read %d message byte(s) from fifo queue.", retval);
- /* if rx is active, we need to interrupt the waiting for
+ /*
+ * if rx is active, we need to interrupt the waiting for
* incoming telegrams, to be able to send something.
* We are only allowed, if currently no reception takes
* place otherwise we need to wait for the incoming telegram
@@ -649,14 +653,16 @@ pi433_tx_thread(void *data)
!device->rx_active ||
device->interrupt_rx_allowed);
- /* prevent race conditions
+ /*
+ * prevent race conditions
* irq will be reenabled after tx config is set
*/
disable_irq(device->irq_num[DIO0]);
device->tx_active = true;
if (device->rx_active && !rx_interrupted) {
- /* rx is currently waiting for a telegram;
+ /*
+ * rx is currently waiting for a telegram;
* we need to set the radio module to standby
*/
retval = rf69_set_mode(device->spi, standby);
@@ -826,11 +832,15 @@ pi433_write(struct file *filp, const char __user *buf,
instance = filp->private_data;
device = instance->device;
- /* check, whether internal buffer (tx thread) is big enough for requested size */
+ /*
+ * check, whether internal buffer (tx thread) is big enough
+ * for requested size
+ */
if (count > MAX_MSG_SIZE)
return -EMSGSIZE;
- /* write the following sequence into fifo:
+ /*
+ * write the following sequence into fifo:
* - tx_cfg
* - size of message
* - message
@@ -880,15 +890,13 @@ pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
int retval = 0;
struct pi433_instance *instance;
struct pi433_device *device;
+ struct pi433_tx_cfg tx_cfg;
void __user *argp = (void __user *)arg;
/* Check type and command number */
if (_IOC_TYPE(cmd) != PI433_IOC_MAGIC)
return -ENOTTY;
- /* TODO? guard against device removal before, or while,
- * we issue this ioctl. --> device_get()
- */
instance = filp->private_data;
device = instance->device;
@@ -902,9 +910,11 @@ pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EFAULT;
break;
case PI433_IOC_WR_TX_CFG:
- if (copy_from_user(&instance->tx_cfg, argp,
- sizeof(struct pi433_tx_cfg)))
+ if (copy_from_user(&tx_cfg, argp, sizeof(struct pi433_tx_cfg)))
return -EFAULT;
+ mutex_lock(&device->tx_fifo_lock);
+ memcpy(&instance->tx_cfg, &tx_cfg, sizeof(struct pi433_tx_cfg));
+ mutex_unlock(&device->tx_fifo_lock);
break;
case PI433_IOC_RD_RX_CFG:
if (copy_to_user(argp, &device->rx_cfg,
@@ -960,19 +970,9 @@ static int pi433_open(struct inode *inode, struct file *filp)
return -ENODEV;
}
- if (!device->rx_buffer) {
- device->rx_buffer = kmalloc(MAX_MSG_SIZE, GFP_KERNEL);
- if (!device->rx_buffer)
- return -ENOMEM;
- }
-
- device->users++;
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
- if (!instance) {
- kfree(device->rx_buffer);
- device->rx_buffer = NULL;
+ if (!instance)
return -ENOMEM;
- }
/* setup instance data*/
instance->device = device;
@@ -989,23 +989,11 @@ static int pi433_open(struct inode *inode, struct file *filp)
static int pi433_release(struct inode *inode, struct file *filp)
{
struct pi433_instance *instance;
- struct pi433_device *device;
instance = filp->private_data;
- device = instance->device;
kfree(instance);
filp->private_data = NULL;
- /* last close? */
- device->users--;
-
- if (!device->users) {
- kfree(device->rx_buffer);
- device->rx_buffer = NULL;
- if (!device->spi)
- kfree(device);
- }
-
return 0;
}
@@ -1115,7 +1103,8 @@ static void pi433_free_minor(struct pi433_device *dev)
static const struct file_operations pi433_fops = {
.owner = THIS_MODULE,
- /* REVISIT switch to aio primitives, so that userspace
+ /*
+ * REVISIT switch to aio primitives, so that userspace
* gets more complete API coverage. It'll simplify things
* too, except for the locking.
*/
@@ -1138,7 +1127,10 @@ static int pi433_probe(struct spi_device *spi)
/* setup spi parameters */
spi->mode = 0x00;
spi->bits_per_word = 8;
- /* spi->max_speed_hz = 10000000; 1MHz already set by device tree overlay */
+ /*
+ * spi->max_speed_hz = 10000000;
+ * 1MHz already set by device tree overlay
+ */
retval = spi_setup(spi);
if (retval) {
@@ -1175,6 +1167,13 @@ static int pi433_probe(struct spi_device *spi)
device->tx_active = false;
device->interrupt_rx_allowed = false;
+ /* init rx buffer */
+ device->rx_buffer = kmalloc(MAX_MSG_SIZE, GFP_KERNEL);
+ if (!device->rx_buffer) {
+ retval = -ENOMEM;
+ goto RX_failed;
+ }
+
/* init wait queues */
init_waitqueue_head(&device->tx_wait_queue);
init_waitqueue_head(&device->rx_wait_queue);
@@ -1250,6 +1249,7 @@ static int pi433_probe(struct spi_device *spi)
device->minor);
if (IS_ERR(device->tx_task_struct)) {
dev_dbg(device->dev, "start of send thread failed");
+ retval = PTR_ERR(device->tx_task_struct);
goto send_thread_failed;
}
@@ -1277,6 +1277,8 @@ device_create_failed:
minor_failed:
free_gpio(device);
GPIO_failed:
+ kfree(device->rx_buffer);
+RX_failed:
kfree(device);
return retval;
@@ -1300,8 +1302,8 @@ static int pi433_remove(struct spi_device *spi)
pi433_free_minor(device);
- if (device->users == 0)
- kfree(device);
+ kfree(device->rx_buffer);
+ kfree(device);
return 0;
}
@@ -1322,7 +1324,8 @@ static struct spi_driver pi433_spi_driver = {
.probe = pi433_probe,
.remove = pi433_remove,
- /* NOTE: suspend/resume methods are not necessary here.
+ /*
+ * NOTE: suspend/resume methods are not necessary here.
* We don't do anything except pass the requests to/from
* the underlying controller. The refrigerator handles
* most issues; the controller driver handles the rest.
@@ -1335,13 +1338,15 @@ static int __init pi433_init(void)
{
int status;
- /* If MAX_MSG_SIZE is smaller then FIFO_SIZE, the driver won't
+ /*
+ * If MAX_MSG_SIZE is smaller then FIFO_SIZE, the driver won't
* work stable - risk of buffer overflow
*/
if (MAX_MSG_SIZE < FIFO_SIZE)
return -EINVAL;
- /* Claim device numbers. Then register a class
+ /*
+ * Claim device numbers. Then register a class
* that will key udev/mdev to add/remove /dev nodes. Last, register
* Last, register the driver which manages those device numbers.
*/
diff --git a/drivers/staging/pi433/pi433_if.h b/drivers/staging/pi433/pi433_if.h
index b6e214c29ddf..2d4fa77c793e 100644
--- a/drivers/staging/pi433/pi433_if.h
+++ b/drivers/staging/pi433/pi433_if.h
@@ -1,4 +1,5 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* include/linux/TODO
*
* userspace interface for pi433 radio module
@@ -42,7 +43,8 @@ enum option_on_off {
/* IOCTL structs and commands */
/**
- * struct pi433_tx_config - describes the configuration of the radio module for sending
+ * struct pi433_tx_config
+ * describes the configuration of the radio module for sending
* @frequency:
* @bit_rate:
* @modulation:
@@ -89,7 +91,8 @@ struct pi433_tx_cfg {
};
/**
- * struct pi433_rx_config - describes the configuration of the radio module for sending
+ * struct pi433_rx_config
+ * describes the configuration of the radio module for sending
* @frequency:
* @bit_rate:
* @modulation:
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index 90280e9b006d..085272fb393f 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* abstraction of the spi interface of HopeRf rf69 radio module
*
@@ -41,7 +42,8 @@ static u8 rf69_read_reg(struct spi_device *spi, u8 addr)
#ifdef DEBUG_VALUES
if (retval < 0)
- /* should never happen, since we already checked,
+ /*
+ * should never happen, since we already checked,
* that module is connected. Therefore no error
* handling, just an optional error message...
*/
@@ -65,7 +67,8 @@ static int rf69_write_reg(struct spi_device *spi, u8 addr, u8 value)
#ifdef DEBUG_VALUES
if (retval < 0)
- /* should never happen, since we already checked,
+ /*
+ * should never happen, since we already checked,
* that module is connected. Therefore no error
* handling, just an optional error message...
*/
@@ -111,30 +114,29 @@ static inline int rf69_read_mod_write(struct spi_device *spi, u8 reg,
int rf69_set_mode(struct spi_device *spi, enum mode mode)
{
- switch (mode) {
- case transmit:
- return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE,
- OPMODE_MODE_TRANSMIT);
- case receive:
- return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE,
- OPMODE_MODE_RECEIVE);
- case synthesizer:
- return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE,
- OPMODE_MODE_SYNTHESIZER);
- case standby:
- return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE,
- OPMODE_MODE_STANDBY);
- case mode_sleep:
- return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE,
- OPMODE_MODE_SLEEP);
- default:
+ static const u8 mode_map[] = {
+ [transmit] = OPMODE_MODE_TRANSMIT,
+ [receive] = OPMODE_MODE_RECEIVE,
+ [synthesizer] = OPMODE_MODE_SYNTHESIZER,
+ [standby] = OPMODE_MODE_STANDBY,
+ [mode_sleep] = OPMODE_MODE_SLEEP,
+ };
+
+ if (unlikely(mode >= ARRAY_SIZE(mode_map))) {
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
- // we are using packet mode, so this check is not really needed
- // but waiting for mode ready is necessary when going from sleep because the FIFO may not be immediately available from previous mode
- //while (_mode == RF69_MODE_SLEEP && (READ_REG(REG_IRQFLAGS1) & RF_IRQFLAGS1_MODEREADY) == 0x00); // Wait for ModeReady
+ return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE,
+ mode_map[mode]);
+
+ /*
+ * we are using packet mode, so this check is not really needed
+ * but waiting for mode ready is necessary when going from sleep
+ * because the FIFO may not be immediately available from previous mode
+ * while (_mode == RF69_MODE_SLEEP && (READ_REG(REG_IRQFLAGS1) &
+ RF_IRQFLAGS1_MODEREADY) == 0x00); // Wait for ModeReady
+ */
}
int rf69_set_data_mode(struct spi_device *spi, u8 data_mode)
@@ -145,19 +147,19 @@ int rf69_set_data_mode(struct spi_device *spi, u8 data_mode)
int rf69_set_modulation(struct spi_device *spi, enum modulation modulation)
{
- switch (modulation) {
- case OOK:
- return rf69_read_mod_write(spi, REG_DATAMODUL,
- MASK_DATAMODUL_MODULATION_TYPE,
- DATAMODUL_MODULATION_TYPE_OOK);
- case FSK:
- return rf69_read_mod_write(spi, REG_DATAMODUL,
- MASK_DATAMODUL_MODULATION_TYPE,
- DATAMODUL_MODULATION_TYPE_FSK);
- default:
+ static const u8 modulation_map[] = {
+ [OOK] = DATAMODUL_MODULATION_TYPE_OOK,
+ [FSK] = DATAMODUL_MODULATION_TYPE_FSK,
+ };
+
+ if (unlikely(modulation >= ARRAY_SIZE(modulation_map))) {
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
+
+ return rf69_read_mod_write(spi, REG_DATAMODUL,
+ MASK_DATAMODUL_MODULATION_TYPE,
+ modulation_map[modulation]);
}
static enum modulation rf69_get_modulation(struct spi_device *spi)
@@ -373,43 +375,30 @@ int rf69_set_output_power_level(struct spi_device *spi, u8 power_level)
int rf69_set_pa_ramp(struct spi_device *spi, enum pa_ramp pa_ramp)
{
- switch (pa_ramp) {
- case ramp3400:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_3400);
- case ramp2000:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_2000);
- case ramp1000:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_1000);
- case ramp500:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_500);
- case ramp250:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_250);
- case ramp125:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_125);
- case ramp100:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_100);
- case ramp62:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_62);
- case ramp50:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_50);
- case ramp40:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_40);
- case ramp31:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_31);
- case ramp25:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_25);
- case ramp20:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_20);
- case ramp15:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_15);
- case ramp12:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_12);
- case ramp10:
- return rf69_write_reg(spi, REG_PARAMP, PARAMP_10);
- default:
+ static const u8 pa_ramp_map[] = {
+ [ramp3400] = PARAMP_3400,
+ [ramp2000] = PARAMP_2000,
+ [ramp1000] = PARAMP_1000,
+ [ramp500] = PARAMP_500,
+ [ramp250] = PARAMP_250,
+ [ramp125] = PARAMP_125,
+ [ramp100] = PARAMP_100,
+ [ramp62] = PARAMP_62,
+ [ramp50] = PARAMP_50,
+ [ramp40] = PARAMP_40,
+ [ramp31] = PARAMP_31,
+ [ramp25] = PARAMP_25,
+ [ramp20] = PARAMP_20,
+ [ramp15] = PARAMP_15,
+ [ramp10] = PARAMP_10,
+ };
+
+ if (unlikely(pa_ramp >= ARRAY_SIZE(pa_ramp_map))) {
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
+
+ return rf69_write_reg(spi, REG_PARAMP, pa_ramp_map[pa_ramp]);
}
int rf69_set_antenna_impedance(struct spi_device *spi,
@@ -428,32 +417,23 @@ int rf69_set_antenna_impedance(struct spi_device *spi,
int rf69_set_lna_gain(struct spi_device *spi, enum lna_gain lna_gain)
{
- switch (lna_gain) {
- case automatic:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
- LNA_GAIN_AUTO);
- case max:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
- LNA_GAIN_MAX);
- case max_minus_6:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
- LNA_GAIN_MAX_MINUS_6);
- case max_minus_12:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
- LNA_GAIN_MAX_MINUS_12);
- case max_minus_24:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
- LNA_GAIN_MAX_MINUS_24);
- case max_minus_36:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
- LNA_GAIN_MAX_MINUS_36);
- case max_minus_48:
- return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
- LNA_GAIN_MAX_MINUS_48);
- default:
+ static const u8 lna_gain_map[] = {
+ [automatic] = LNA_GAIN_AUTO,
+ [max] = LNA_GAIN_MAX,
+ [max_minus_6] = LNA_GAIN_MAX_MINUS_6,
+ [max_minus_12] = LNA_GAIN_MAX_MINUS_12,
+ [max_minus_24] = LNA_GAIN_MAX_MINUS_24,
+ [max_minus_36] = LNA_GAIN_MAX_MINUS_36,
+ [max_minus_48] = LNA_GAIN_MAX_MINUS_48,
+ };
+
+ if (unlikely(lna_gain >= ARRAY_SIZE(lna_gain_map))) {
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
+
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN,
+ lna_gain_map[lna_gain]);
}
static int rf69_set_bandwidth_intern(struct spi_device *spi, u8 reg,
@@ -516,43 +496,24 @@ int rf69_set_bandwidth_during_afc(struct spi_device *spi,
int rf69_set_ook_threshold_dec(struct spi_device *spi,
enum threshold_decrement threshold_decrement)
{
- switch (threshold_decrement) {
- case dec_every8th:
- return rf69_read_mod_write(spi, REG_OOKPEAK,
- MASK_OOKPEAK_THRESDEC,
- OOKPEAK_THRESHDEC_EVERY_8TH);
- case dec_every4th:
- return rf69_read_mod_write(spi, REG_OOKPEAK,
- MASK_OOKPEAK_THRESDEC,
- OOKPEAK_THRESHDEC_EVERY_4TH);
- case dec_every2nd:
- return rf69_read_mod_write(spi, REG_OOKPEAK,
- MASK_OOKPEAK_THRESDEC,
- OOKPEAK_THRESHDEC_EVERY_2ND);
- case dec_once:
- return rf69_read_mod_write(spi, REG_OOKPEAK,
- MASK_OOKPEAK_THRESDEC,
- OOKPEAK_THRESHDEC_ONCE);
- case dec_twice:
- return rf69_read_mod_write(spi, REG_OOKPEAK,
- MASK_OOKPEAK_THRESDEC,
- OOKPEAK_THRESHDEC_TWICE);
- case dec_4times:
- return rf69_read_mod_write(spi, REG_OOKPEAK,
- MASK_OOKPEAK_THRESDEC,
- OOKPEAK_THRESHDEC_4_TIMES);
- case dec_8times:
- return rf69_read_mod_write(spi, REG_OOKPEAK,
- MASK_OOKPEAK_THRESDEC,
- OOKPEAK_THRESHDEC_8_TIMES);
- case dec_16times:
- return rf69_read_mod_write(spi, REG_OOKPEAK,
- MASK_OOKPEAK_THRESDEC,
- OOKPEAK_THRESHDEC_16_TIMES);
- default:
+ static const u8 td_map[] = {
+ [dec_every8th] = OOKPEAK_THRESHDEC_EVERY_8TH,
+ [dec_every4th] = OOKPEAK_THRESHDEC_EVERY_4TH,
+ [dec_every2nd] = OOKPEAK_THRESHDEC_EVERY_2ND,
+ [dec_once] = OOKPEAK_THRESHDEC_ONCE,
+ [dec_twice] = OOKPEAK_THRESHDEC_TWICE,
+ [dec_4times] = OOKPEAK_THRESHDEC_4_TIMES,
+ [dec_8times] = OOKPEAK_THRESHDEC_8_TIMES,
+ [dec_16times] = OOKPEAK_THRESHDEC_16_TIMES,
+ };
+
+ if (unlikely(threshold_decrement >= ARRAY_SIZE(td_map))) {
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
+
+ return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC,
+ td_map[threshold_decrement]);
}
int rf69_set_dio_mapping(struct spi_device *spi, u8 dio_number, u8 value)
@@ -564,22 +525,34 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 dio_number, u8 value)
switch (dio_number) {
case 0:
- mask = MASK_DIO0; shift = SHIFT_DIO0; dio_addr = REG_DIOMAPPING1;
+ mask = MASK_DIO0;
+ shift = SHIFT_DIO0;
+ dio_addr = REG_DIOMAPPING1;
break;
case 1:
- mask = MASK_DIO1; shift = SHIFT_DIO1; dio_addr = REG_DIOMAPPING1;
+ mask = MASK_DIO1;
+ shift = SHIFT_DIO1;
+ dio_addr = REG_DIOMAPPING1;
break;
case 2:
- mask = MASK_DIO2; shift = SHIFT_DIO2; dio_addr = REG_DIOMAPPING1;
+ mask = MASK_DIO2;
+ shift = SHIFT_DIO2;
+ dio_addr = REG_DIOMAPPING1;
break;
case 3:
- mask = MASK_DIO3; shift = SHIFT_DIO3; dio_addr = REG_DIOMAPPING1;
+ mask = MASK_DIO3;
+ shift = SHIFT_DIO3;
+ dio_addr = REG_DIOMAPPING1;
break;
case 4:
- mask = MASK_DIO4; shift = SHIFT_DIO4; dio_addr = REG_DIOMAPPING2;
+ mask = MASK_DIO4;
+ shift = SHIFT_DIO4;
+ dio_addr = REG_DIOMAPPING2;
break;
case 5:
- mask = MASK_DIO5; shift = SHIFT_DIO5; dio_addr = REG_DIOMAPPING2;
+ mask = MASK_DIO5;
+ shift = SHIFT_DIO5;
+ dio_addr = REG_DIOMAPPING2;
break;
default:
dev_dbg(&spi->dev, "set: illegal input param");
@@ -617,8 +590,10 @@ bool rf69_get_flag(struct spi_device *spi, enum flag flag)
return (rf69_read_reg(spi, REG_IRQFLAGS1) & MASK_IRQFLAGS1_SYNC_ADDRESS_MATCH);
case fifo_full:
return (rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_FULL);
-/* case fifo_not_empty:
- * return (rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_NOT_EMPTY); */
+/*
+ * case fifo_not_empty:
+ * return (rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_NOT_EMPTY);
+ */
case fifo_empty:
return !(rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_NOT_EMPTY);
case fifo_level_below_threshold:
@@ -749,23 +724,21 @@ int rf69_disable_crc(struct spi_device *spi)
int rf69_set_address_filtering(struct spi_device *spi,
enum address_filtering address_filtering)
{
- switch (address_filtering) {
- case filtering_off:
- return rf69_read_mod_write(spi, REG_PACKETCONFIG1,
- MASK_PACKETCONFIG1_ADDRESSFILTERING,
- PACKETCONFIG1_ADDRESSFILTERING_OFF);
- case node_address:
- return rf69_read_mod_write(spi, REG_PACKETCONFIG1,
- MASK_PACKETCONFIG1_ADDRESSFILTERING,
- PACKETCONFIG1_ADDRESSFILTERING_NODE);
- case node_or_broadcast_address:
- return rf69_read_mod_write(spi, REG_PACKETCONFIG1,
- MASK_PACKETCONFIG1_ADDRESSFILTERING,
- PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST);
- default:
+ static const u8 af_map[] = {
+ [filtering_off] = PACKETCONFIG1_ADDRESSFILTERING_OFF,
+ [node_address] = PACKETCONFIG1_ADDRESSFILTERING_NODE,
+ [node_or_broadcast_address] =
+ PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST,
+ };
+
+ if (unlikely(address_filtering >= ARRAY_SIZE(af_map))) {
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
+
+ return rf69_read_mod_write(spi, REG_PACKETCONFIG1,
+ MASK_PACKETCONFIG1_ADDRESSFILTERING,
+ af_map[address_filtering]);
}
int rf69_set_payload_length(struct spi_device *spi, u8 payload_length)
@@ -816,7 +789,8 @@ int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold)
if (retval)
return retval;
- /* access the fifo to activate new threshold
+ /*
+ * access the fifo to activate new threshold
* retval (mis-) used as buffer here
*/
return rf69_read_fifo(spi, (u8 *)&retval, 1);
@@ -824,17 +798,18 @@ int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold)
int rf69_set_dagc(struct spi_device *spi, enum dagc dagc)
{
- switch (dagc) {
- case normal_mode:
- return rf69_write_reg(spi, REG_TESTDAGC, DAGC_NORMAL);
- case improve:
- return rf69_write_reg(spi, REG_TESTDAGC, DAGC_IMPROVED_LOWBETA0);
- case improve_for_low_modulation_index:
- return rf69_write_reg(spi, REG_TESTDAGC, DAGC_IMPROVED_LOWBETA1);
- default:
+ static const u8 dagc_map[] = {
+ [normal_mode] = DAGC_NORMAL,
+ [improve] = DAGC_IMPROVED_LOWBETA0,
+ [improve_for_low_modulation_index] = DAGC_IMPROVED_LOWBETA1,
+ };
+
+ if (unlikely(dagc >= ARRAY_SIZE(dagc_map))) {
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
+
+ return rf69_write_reg(spi, REG_TESTDAGC, dagc_map[dagc]);
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/staging/pi433/rf69.h b/drivers/staging/pi433/rf69.h
index c131ffbdc2db..319b86c14234 100644
--- a/drivers/staging/pi433/rf69.h
+++ b/drivers/staging/pi433/rf69.h
@@ -1,4 +1,5 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* hardware abstraction/register access for HopeRf rf69 radio module
*
* Copyright (C) 2016 Wolf-Entwicklungen
@@ -20,10 +21,11 @@
#include "rf69_enum.h"
#include "rf69_registers.h"
-#define F_OSC 32000000 /* in Hz */
-#define FREQUENCY 433920000 /* in Hz, modifying this value impacts CE certification */
-#define FIFO_SIZE 66 /* in byte */
-#define FIFO_THRESHOLD 15 /* in byte */
+/* NOTE: Modifying FREQUENCY value impacts CE certification */
+#define F_OSC 32000000 /* Hz */
+#define FREQUENCY 433920000 /* Hz */
+#define FIFO_SIZE 66 /* bytes */
+#define FIFO_THRESHOLD 15 /* bytes */
int rf69_set_mode(struct spi_device *spi, enum mode mode);
int rf69_set_data_mode(struct spi_device *spi, u8 data_mode);
diff --git a/drivers/staging/pi433/rf69_enum.h b/drivers/staging/pi433/rf69_enum.h
index 493bd0025453..de3b7e32dad7 100644
--- a/drivers/staging/pi433/rf69_enum.h
+++ b/drivers/staging/pi433/rf69_enum.h
@@ -1,4 +1,5 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* enumerations for HopeRf rf69 radio module
*
* Copyright (C) 2016 Wolf-Entwicklungen
diff --git a/drivers/staging/pi433/rf69_registers.h b/drivers/staging/pi433/rf69_registers.h
index 33fd91518bb0..ea19c1ca7509 100644
--- a/drivers/staging/pi433/rf69_registers.h
+++ b/drivers/staging/pi433/rf69_registers.h
@@ -1,4 +1,5 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* register description for HopeRf rf69 radio module
*
* Copyright (C) 2016 Wolf-Entwicklungen
diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig
index 673fdce25530..ff7832798a77 100644
--- a/drivers/staging/rtl8188eu/Kconfig
+++ b/drivers/staging/rtl8188eu/Kconfig
@@ -7,7 +7,6 @@ config R8188EU
select LIB80211
select LIB80211_CRYPT_WEP
select LIB80211_CRYPT_CCMP
- select LIB80211_CRYPT_TKIP
---help---
This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
If built as a module, it will be called r8188eu.
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
index 033fb2e6950d..4e606b03ec03 100644
--- a/drivers/staging/rtl8188eu/Makefile
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -24,12 +24,12 @@ r8188eu-y := \
hal/rf_cfg.o \
hal/pwrseqcmd.o \
hal/pwrseq.o \
- hal/Hal8188ERateAdaptive.o\
+ hal/hal8188e_rate_adaptive.o \
hal/hal_intf.o \
hal/hal_com.o \
hal/odm.o \
hal/odm_HWConfig.o \
- hal/odm_RTL8188E.o \
+ hal/odm_rtl8188e.o \
hal/rtl8188e_cmd.o \
hal/rtl8188e_dm.o \
hal/rtl8188e_hal_init.o \
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 4140e37bf859..676d549ef786 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_AP_C_
@@ -153,112 +145,6 @@ static void update_BCNTIM(struct adapter *padapter)
set_tx_beacon_cmd(padapter);
}
-void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
- u8 index, u8 *data, u8 len)
-{
- struct ndis_802_11_var_ie *pIE;
- u8 bmatch = false;
- u8 *pie = pnetwork->ies;
- u8 *p = NULL, *dst_ie = NULL, *premainder_ie = NULL;
- u8 *pbackup_remainder_ie = NULL;
- u32 i, offset, ielen = 0, ie_offset, remainder_ielen = 0;
-
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < pnetwork->ie_length;) {
- pIE = (struct ndis_802_11_var_ie *)(pnetwork->ies + i);
-
- if (pIE->ElementID > index) {
- break;
- /* already exist the same IE */
- } else if (pIE->ElementID == index) {
- p = (u8 *)pIE;
- ielen = pIE->Length;
- bmatch = true;
- break;
- }
- p = (u8 *)pIE;
- ielen = pIE->Length;
- i += (pIE->Length + 2);
- }
-
- if (p && ielen > 0) {
- ielen += 2;
-
- premainder_ie = p + ielen;
-
- ie_offset = (int)(p - pie);
-
- remainder_ielen = pnetwork->ie_length - ie_offset - ielen;
-
- if (bmatch)
- dst_ie = p;
- else
- dst_ie = p + ielen;
- }
-
- if (remainder_ielen > 0) {
- pbackup_remainder_ie = rtw_malloc(remainder_ielen);
- if (pbackup_remainder_ie && premainder_ie)
- memcpy(pbackup_remainder_ie, premainder_ie,
- remainder_ielen);
- }
-
- *dst_ie++ = index;
- *dst_ie++ = len;
-
- memcpy(dst_ie, data, len);
- dst_ie += len;
-
- /* copy remainder IE */
- if (pbackup_remainder_ie) {
- memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
-
- kfree(pbackup_remainder_ie);
- }
-
- offset = (uint)(dst_ie - pie);
- pnetwork->ie_length = offset + remainder_ielen;
-}
-
-void rtw_remove_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
- u8 index)
-{
- u8 *p, *dst_ie = NULL, *premainder_ie = NULL;
- u8 *pbackup_remainder_ie = NULL;
- uint offset, ielen, ie_offset, remainder_ielen = 0;
- u8 *pie = pnetwork->ies;
-
- p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, index, &ielen,
- pnetwork->ie_length - _FIXED_IE_LENGTH_);
- if (p && ielen > 0) {
- ielen += 2;
-
- premainder_ie = p + ielen;
-
- ie_offset = (int)(p - pie);
-
- remainder_ielen = pnetwork->ie_length - ie_offset - ielen;
-
- dst_ie = p;
- }
-
- if (remainder_ielen > 0) {
- pbackup_remainder_ie = rtw_malloc(remainder_ielen);
- if (pbackup_remainder_ie && premainder_ie)
- memcpy(pbackup_remainder_ie, premainder_ie,
- remainder_ielen);
- }
-
- /* copy remainder IE */
- if (pbackup_remainder_ie) {
- memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
-
- kfree(pbackup_remainder_ie);
- }
-
- offset = (uint)(dst_ie - pie);
- pnetwork->ie_length = offset + remainder_ielen;
-}
-
static u8 chk_sta_is_alive(struct sta_info *psta)
{
u8 ret = false;
@@ -314,7 +200,6 @@ void expire_timeout_chk(struct adapter *padapter)
spin_lock_bh(&pstapriv->auth_list_lock);
}
}
-
}
spin_unlock_bh(&pstapriv->auth_list_lock);
@@ -371,7 +256,8 @@ void expire_timeout_chk(struct adapter *padapter)
stainfo_offset =
rtw_stainfo_offset(pstapriv, psta);
if (stainfo_offset_valid(stainfo_offset))
- chk_alive_list[chk_alive_num++] = stainfo_offset;
+ chk_alive_list[chk_alive_num++] =
+ stainfo_offset;
continue;
}
@@ -635,7 +521,6 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
else
psta->ieee8021x_blocked = false;
-
/* update sta's cap */
/* ERP */
@@ -734,7 +619,6 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
cur_bwmode = HT_CHANNEL_WIDTH_20;
cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
/* check if there is wps ie,
* if there is wpsie in beacon, the hostapd will update
* beacon twice when stating hostapd, and at first time the
@@ -884,7 +768,6 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
return _FAIL;
-
if (len < 0 || len > MAX_IE_SZ)
return _FAIL;
@@ -894,7 +777,6 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
memcpy(ie, pbuf, pbss_network->ie_length);
-
if (pbss_network->InfrastructureMode != Ndis802_11APMode)
return _FAIL;
@@ -1690,7 +1572,6 @@ u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
/* clear cam entry / key */
rtw_clearstakey_cmd(padapter, (u8 *)psta, (u8)(psta->mac_id + 3), true);
-
spin_lock_bh(&psta->lock);
psta->state &= ~_FW_LINKED;
spin_unlock_bh(&psta->lock);
@@ -1739,7 +1620,6 @@ int rtw_sta_flush(struct adapter *padapter)
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
-
issue_deauth(padapter, bc_addr, WLAN_REASON_DEAUTH_LEAVING);
associated_clients_update(padapter, true);
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 72099f5d6915..59039211dad2 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_CMD_C_
@@ -499,7 +491,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+%s\n", __func__));
/* prepare cmd parameter */
- param = kzalloc(sizeof(*param), GFP_KERNEL);
+ param = kzalloc(sizeof(*param), GFP_ATOMIC);
if (!param) {
res = _FAIL;
goto exit;
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index 60d8c7b9f458..67461fdf315c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_DEBUG_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 2c4c8c43b1ad..0fd306a808c4 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_EFUSE_C_
@@ -30,58 +22,55 @@ enum{
};
/*
- * Function: Efuse_PowerSwitch
+ * Function: efuse_power_switch
*
* Overview: When we want to enable write operation, we should change to
* pwr on state. When we stop write, we should switch to 500k mode
* and disable LDO 2.5V.
*/
-void Efuse_PowerSwitch(
- struct adapter *pAdapter,
- u8 bWrite,
- u8 PwrState)
+void efuse_power_switch(struct adapter *pAdapter, u8 write, u8 pwrstate)
{
u8 tempval;
- u16 tmpV16;
+ u16 tmpv16;
- if (PwrState) {
+ if (pwrstate) {
usb_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
/* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), default valid */
- tmpV16 = usb_read16(pAdapter, REG_SYS_ISO_CTRL);
- if (!(tmpV16 & PWC_EV12V)) {
- tmpV16 |= PWC_EV12V;
- usb_write16(pAdapter, REG_SYS_ISO_CTRL, tmpV16);
+ tmpv16 = usb_read16(pAdapter, REG_SYS_ISO_CTRL);
+ if (!(tmpv16 & PWC_EV12V)) {
+ tmpv16 |= PWC_EV12V;
+ usb_write16(pAdapter, REG_SYS_ISO_CTRL, tmpv16);
}
/* Reset: 0x0000h[28], default valid */
- tmpV16 = usb_read16(pAdapter, REG_SYS_FUNC_EN);
- if (!(tmpV16 & FEN_ELDR)) {
- tmpV16 |= FEN_ELDR;
- usb_write16(pAdapter, REG_SYS_FUNC_EN, tmpV16);
+ tmpv16 = usb_read16(pAdapter, REG_SYS_FUNC_EN);
+ if (!(tmpv16 & FEN_ELDR)) {
+ tmpv16 |= FEN_ELDR;
+ usb_write16(pAdapter, REG_SYS_FUNC_EN, tmpv16);
}
/* Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid */
- tmpV16 = usb_read16(pAdapter, REG_SYS_CLKR);
- if ((!(tmpV16 & LOADER_CLK_EN)) || (!(tmpV16 & ANA8M))) {
- tmpV16 |= (LOADER_CLK_EN | ANA8M);
- usb_write16(pAdapter, REG_SYS_CLKR, tmpV16);
+ tmpv16 = usb_read16(pAdapter, REG_SYS_CLKR);
+ if ((!(tmpv16 & LOADER_CLK_EN)) || (!(tmpv16 & ANA8M))) {
+ tmpv16 |= (LOADER_CLK_EN | ANA8M);
+ usb_write16(pAdapter, REG_SYS_CLKR, tmpv16);
}
- if (bWrite) {
+ if (write) {
/* Enable LDO 2.5V before read/write action */
- tempval = usb_read8(pAdapter, EFUSE_TEST+3);
+ tempval = usb_read8(pAdapter, EFUSE_TEST + 3);
tempval &= 0x0F;
tempval |= (VOLTAGE_V25 << 4);
- usb_write8(pAdapter, EFUSE_TEST+3, (tempval | 0x80));
+ usb_write8(pAdapter, EFUSE_TEST + 3, (tempval | 0x80));
}
} else {
usb_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
- if (bWrite) {
+ if (write) {
/* Disable LDO 2.5V after read/write action */
- tempval = usb_read8(pAdapter, EFUSE_TEST+3);
- usb_write8(pAdapter, EFUSE_TEST+3, (tempval & 0x7F));
+ tempval = usb_read8(pAdapter, EFUSE_TEST + 3);
+ usb_write8(pAdapter, EFUSE_TEST + 3, (tempval & 0x7F));
}
}
}
@@ -309,8 +298,7 @@ static s32 iol_read_efuse(struct adapter *padapter, u8 txpktbuf_bndy, u16 offset
void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf)
{
-
- if (rtw_IOL_applied(Adapter)) {
+ if (rtw_iol_applied(Adapter)) {
rtw_hal_power_on(Adapter);
iol_mode_enable(Adapter, 1);
iol_read_efuse(Adapter, 0, _offset, _size_byte, pbuf);
@@ -484,7 +472,6 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
efuse_addr = efuse_addr + (word_cnts*2)+1;
ReadState = PG_STATE_HEADER;
}
-
}
if ((data[0] == 0xff) && (data[1] == 0xff) && (data[2] == 0xff) && (data[3] == 0xff) &&
@@ -909,11 +896,11 @@ void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata)
*/
static void Efuse_ReadAllMap(struct adapter *pAdapter, u8 efuseType, u8 *Efuse)
{
- Efuse_PowerSwitch(pAdapter, false, true);
+ efuse_power_switch(pAdapter, false, true);
efuse_ReadEFuse(pAdapter, efuseType, 0, EFUSE_MAP_LEN_88E, Efuse);
- Efuse_PowerSwitch(pAdapter, false, false);
+ efuse_power_switch(pAdapter, false, false);
}
/*
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 52ad085383a0..7d5cbaf50f1c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _IEEE80211_C
@@ -47,24 +39,23 @@ u8 RSN_CIPHER_SUITE_WEP104[] = { 0x00, 0x0f, 0xac, 5 };
/* for adhoc-master to generate ie and provide supported-rate to fw */
/* */
-static u8 WIFI_CCKRATES[] = {
- (IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK),
- (IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK),
- (IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK),
- (IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK)
- };
-
-static u8 WIFI_OFDMRATES[] = {
- (IEEE80211_OFDM_RATE_6MB),
- (IEEE80211_OFDM_RATE_9MB),
- (IEEE80211_OFDM_RATE_12MB),
- (IEEE80211_OFDM_RATE_18MB),
- (IEEE80211_OFDM_RATE_24MB),
+static u8 WIFI_CCKRATES[] = {
+ IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK
+};
+
+static u8 WIFI_OFDMRATES[] = {
+ IEEE80211_OFDM_RATE_6MB,
+ IEEE80211_OFDM_RATE_9MB,
+ IEEE80211_OFDM_RATE_12MB,
+ IEEE80211_OFDM_RATE_18MB,
+ IEEE80211_OFDM_RATE_24MB,
IEEE80211_OFDM_RATE_36MB,
IEEE80211_OFDM_RATE_48MB,
IEEE80211_OFDM_RATE_54MB
- };
-
+};
int rtw_get_bit_value_from_ieee_value(u8 val)
{
@@ -81,28 +72,27 @@ int rtw_get_bit_value_from_ieee_value(u8 val)
return 0;
}
-uint rtw_is_cckrates_included(u8 *rate)
+bool rtw_is_cckrates_included(u8 *rate)
{
- u32 i = 0;
+ while (*rate) {
+ u8 r = *rate & 0x7f;
- while (rate[i] != 0) {
- if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
- (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
+ if (r == 2 || r == 4 || r == 11 || r == 22)
return true;
- i++;
+ rate++;
}
+
return false;
}
-uint rtw_is_cckratesonly_included(u8 *rate)
+bool rtw_is_cckratesonly_included(u8 *rate)
{
- u32 i = 0;
+ while (*rate) {
+ u8 r = *rate & 0x7f;
- while (rate[i] != 0) {
- if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
- (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
+ if (r != 2 && r != 4 && r != 11 && r != 22)
return false;
- i++;
+ rate++;
}
return true;
@@ -111,14 +101,14 @@ uint rtw_is_cckratesonly_included(u8 *rate)
int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
{
if (channel > 14) {
- if ((rtw_is_cckrates_included(rate)) == true)
+ if (rtw_is_cckrates_included(rate))
return WIRELESS_INVALID;
else
return WIRELESS_11A;
} else { /* could be pure B, pure G, or B/G */
- if ((rtw_is_cckratesonly_included(rate)) == true)
+ if (rtw_is_cckratesonly_included(rate))
return WIRELESS_11B;
- else if ((rtw_is_cckrates_included(rate)) == true)
+ else if (rtw_is_cckrates_included(rate))
return WIRELESS_11BG;
else
return WIRELESS_11G;
@@ -186,7 +176,6 @@ u8 *rtw_get_ie(u8 *pbuf, int index, uint *len, int limit)
void rtw_set_supported_rate(u8 *SupportedRates, uint mode)
{
-
memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
switch (mode) {
@@ -209,29 +198,24 @@ void rtw_set_supported_rate(u8 *SupportedRates, uint mode)
}
}
-uint rtw_get_rateset_len(u8 *rateset)
+uint rtw_get_rateset_len(u8 *rateset)
{
- uint i = 0;
+ uint i;
- while (1) {
- if ((rateset[i]) == 0)
+ for (i = 0; i < 13; i++)
+ if (rateset[i] == 0)
break;
- if (i > 12)
- break;
- i++;
- }
return i;
}
int rtw_generate_ie(struct registry_priv *pregistrypriv)
{
- u8 wireless_mode;
- int rateLen;
- uint sz = 0;
+ u8 wireless_mode;
+ int rateLen;
+ uint sz = 0;
struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
u8 *ie = pdev_network->ies;
-
/* timestamp will be inserted by hardware */
sz += 8;
ie += sz;
@@ -334,7 +318,6 @@ check_next_ie:
unsigned char *rtw_get_wpa2_ie(unsigned char *pie, uint *rsn_ie_len, int limit)
{
-
return rtw_get_ie(pie, _WPA2_IE_ID_, rsn_ie_len, limit);
}
@@ -370,7 +353,6 @@ int rtw_get_wpa2_cipher_suite(u8 *s)
return 0;
}
-
int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
{
int i, ret = _SUCCESS;
@@ -383,7 +365,6 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
return _FAIL;
}
-
if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie + 1) != (u8)(wpa_ie_len - 2)) ||
(memcmp(wpa_ie + 2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN)))
return _FAIL;
@@ -393,7 +374,6 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
pos += 8;
left = wpa_ie_len - 8;
-
/* group_cipher */
if (left >= WPA_SELECTOR_LEN) {
*group_cipher = rtw_get_wpa_cipher_suite(pos);
@@ -452,7 +432,6 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
return _FAIL;
}
-
if ((*rsn_ie != _WPA2_IE_ID_) || (*(rsn_ie + 1) != (u8)(rsn_ie_len - 2)))
return _FAIL;
@@ -513,8 +492,7 @@ int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie,
{
u8 authmode, sec_idx, i;
u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
- uint cnt;
-
+ uint cnt;
/* Search required WPA or WPA2 IE and copy to sec_ie[] */
@@ -568,7 +546,6 @@ int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie,
}
}
-
return *rsn_len + *wpa_len;
}
@@ -931,28 +908,18 @@ void rtw_macaddr_cfg(u8 *mac_addr)
if (rtw_initmac && mac_pton(rtw_initmac, mac)) {
/* Users specify the mac address */
- memcpy(mac_addr, mac, ETH_ALEN);
+ ether_addr_copy(mac_addr, mac);
} else {
/* Use the mac address stored in the Efuse */
- memcpy(mac, mac_addr, ETH_ALEN);
+ ether_addr_copy(mac, mac_addr);
}
- if (((mac[0] == 0xff) && (mac[1] == 0xff) && (mac[2] == 0xff) &&
- (mac[3] == 0xff) && (mac[4] == 0xff) && (mac[5] == 0xff)) ||
- ((mac[0] == 0x0) && (mac[1] == 0x0) && (mac[2] == 0x0) &&
- (mac[3] == 0x0) && (mac[4] == 0x0) && (mac[5] == 0x0))) {
- mac[0] = 0x00;
- mac[1] = 0xe0;
- mac[2] = 0x4c;
- mac[3] = 0x87;
- mac[4] = 0x00;
- mac[5] = 0x00;
- /* use default mac address */
- memcpy(mac_addr, mac, ETH_ALEN);
- DBG_88E("MAC Address from efuse error, assign default one !!!\n");
+ if (is_broadcast_ether_addr(mac) || is_zero_ether_addr(mac)) {
+ eth_random_addr(mac_addr);
+ DBG_88E("MAC Address from efuse error, assign random one !!!\n");
}
- DBG_88E("%s MAC Address = %pM\n", __func__, (mac_addr));
+ DBG_88E("%s MAC Address = %pM\n", __func__, mac_addr);
}
static int rtw_get_cipher_info(struct wlan_network *pnetwork)
@@ -1003,7 +970,7 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
u16 wpa_len = 0, rsn_len = 0;
struct HT_info_element *pht_info = NULL;
uint len;
- unsigned char *p;
+ unsigned char *p;
memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.ies), 2);
cap = le16_to_cpu(le_tmp);
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index 2fca8ae68e05..c040f185074b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_IOCTL_SET_C_
@@ -29,7 +21,6 @@ u8 rtw_do_join(struct adapter *padapter)
struct __queue *queue = &(pmlmepriv->scanned_queue);
u8 ret = _SUCCESS;
-
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
plist = phead->next;
@@ -123,8 +114,6 @@ u8 rtw_do_join(struct adapter *padapter)
}
exit:
-
-
return ret;
}
@@ -134,7 +123,6 @@ u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
u32 cur_time = 0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
DBG_88E_LEVEL(_drv_info_, "set bssid:%pM\n", bssid);
if ((bssid[0] == 0x00 && bssid[1] == 0x00 && bssid[2] == 0x00 &&
@@ -147,7 +135,6 @@ u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
spin_lock_bh(&pmlmepriv->lock);
-
DBG_88E("Set BSSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
goto handle_tkip_countermeasure;
@@ -209,7 +196,6 @@ exit:
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
("%s: status=%d\n", __func__, status));
-
return status;
}
@@ -221,7 +207,6 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *pnetwork = &pmlmepriv->cur_network;
-
DBG_88E_LEVEL(_drv_info_, "set ssid [%s] fw_state=0x%08x\n",
ssid->Ssid, get_fwstate(pmlmepriv));
@@ -327,7 +312,6 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
struct wlan_network *cur_network = &pmlmepriv->cur_network;
enum ndis_802_11_network_infra *pold_state = &(cur_network->network.InfrastructureMode);
-
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_notice_,
("+rtw_set_802_11_infrastructure_mode: old =%d new =%d fw_state = 0x%08x\n",
*pold_state, networktype, get_fwstate(pmlmepriv)));
@@ -384,16 +368,13 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
spin_unlock_bh(&pmlmepriv->lock);
}
-
return true;
}
-
u8 rtw_set_802_11_disassociate(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -408,7 +389,6 @@ u8 rtw_set_802_11_disassociate(struct adapter *padapter)
spin_unlock_bh(&pmlmepriv->lock);
-
return true;
}
@@ -417,7 +397,6 @@ u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_s
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 res = true;
-
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("+%s(), fw_state =%x\n", __func__, get_fwstate(pmlmepriv)));
if (!padapter) {
@@ -456,8 +435,6 @@ u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_s
spin_unlock_bh(&pmlmepriv->lock);
}
exit:
-
-
return res;
}
@@ -467,7 +444,6 @@ u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum ndis_802_11
int res;
u8 ret;
-
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_802_11_auth.mode(): mode =%x\n", authmode));
psecuritypriv->ndisauthtype = authmode;
@@ -486,7 +462,6 @@ u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum ndis_802_11
else
ret = false;
-
return ret;
}
@@ -496,7 +471,6 @@ u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep)
struct security_priv *psecuritypriv = &(padapter->securitypriv);
u8 ret = _SUCCESS;
-
keyid = wep->KeyIndex & 0x3fffffff;
if (keyid >= 4) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_iol.c b/drivers/staging/rtl8188eu/core/rtw_iol.c
index 2e2145caa56b..fc3c66201e59 100644
--- a/drivers/staging/rtl8188eu/core/rtw_iol.c
+++ b/drivers/staging/rtl8188eu/core/rtw_iol.c
@@ -1,28 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *
******************************************************************************/
#include <rtw_iol.h>
-bool rtw_IOL_applied(struct adapter *adapter)
+bool rtw_iol_applied(struct adapter *adapter)
{
if (adapter->registrypriv.fw_iol == 1)
return true;
- if ((adapter->registrypriv.fw_iol == 2) &&
- (!adapter_to_dvobj(adapter)->ishighspeed))
+ if (adapter->registrypriv.fw_iol == 2 &&
+ !adapter_to_dvobj(adapter)->ishighspeed)
return true;
return false;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index c4335893d8f6..cbef871a7679 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *
******************************************************************************/
#include <drv_types.h>
@@ -78,7 +69,6 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed)
INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
}
-
/* */
/* Description: */
/* DeInitialize an LED_871x object. */
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index 50e7cae32f75..eca06f05c0c4 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_MLME_C_
@@ -209,7 +201,6 @@ exit:
return pnetwork;
}
-
void rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
{
struct list_head *phead, *plist;
@@ -265,7 +256,6 @@ u8 *rtw_get_capability_from_ie(u8 *ie)
return ie + 8 + 2;
}
-
u16 rtw_get_capability(struct wlan_bssid_ex *bss)
{
__le16 val;
@@ -534,7 +524,6 @@ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *
bselected = false;
}
-
if ((desired_encmode != Ndis802_11EncryptionDisabled) && (privacy == 0)) {
DBG_88E("desired_encmode: %d, privacy: %d\n", desired_encmode, privacy);
bselected = false;
@@ -770,7 +759,6 @@ void rtw_free_assoc_resources_locked(struct adapter *adapter)
rtw_init_bcmc_stainfo(adapter);
}
-
pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
if (pwlan)
pwlan->fixed = false;
@@ -944,7 +932,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
("\nfw_state:%x, BSSID:%pM\n",
get_fwstate(pmlmepriv), pnetwork->network.MacAddress));
-
/* why not use ptarget_wlan?? */
memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length);
/* some ies in pnetwork is wrong, so we should use ptarget_wlan ies */
@@ -2035,9 +2022,9 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
struct sta_info *psta = NULL;
struct ht_priv *phtpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- s32 bmcst = IS_MCAST(pattrib->ra);
- if (bmcst || (padapter->mlmepriv.LinkDetectInfo.NumTxOkInPeriod < 100))
+ if (is_multicast_ether_addr(pattrib->ra) ||
+ padapter->mlmepriv.LinkDetectInfo.NumTxOkInPeriod < 100)
return;
priority = pattrib->priority;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 59d862f67573..1115050077e4 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_MLME_EXT_C_
@@ -350,7 +342,6 @@ static void issue_beacon(struct adapter *padapter, int timeout_ms)
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct ieee80211_hdr *)pframe;
-
fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
@@ -593,7 +584,6 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
pframe = rtw_set_ie(pframe, _ERPINFO_IE_, 1, &erpinfo, &pattrib->pktlen);
}
-
/* EXTERNDED SUPPORTED RATE */
if (rate_len > 8)
pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pattrib->pktlen);
@@ -633,7 +623,6 @@ static int issue_probereq(struct adapter *padapter,
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
-
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
@@ -785,7 +774,6 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
if (psta) {/* for AP mode */
#ifdef CONFIG_88EU_AP_MODE
@@ -795,7 +783,6 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
ether_addr_copy(pwlanhdr->addr3,
myid(&(padapter->eeprompriv)));
-
/* setting auth algo number */
val16 = (u16)psta->authalg;
@@ -861,7 +848,6 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
pframe = rtw_set_fixed_ie(pframe, _AUTH_SEQ_NUM_, &le_tmp16,
&pattrib->pktlen);
-
/* setting status code... */
le_tmp16 = cpu_to_le16(status);
pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, &le_tmp16,
@@ -890,7 +876,6 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
dump_mgntframe(padapter, pmgntframe);
}
-
#ifdef CONFIG_88EU_AP_MODE
static void issue_asocrsp(struct adapter *padapter, unsigned short status,
struct sta_info *pstat, int pkt_type)
@@ -919,7 +904,6 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
-
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
@@ -933,7 +917,6 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
myid(&(padapter->eeprompriv)));
ether_addr_copy((void *)GetAddr3Ptr(pwlanhdr), pnetwork->MacAddress);
-
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
if ((pkt_type == WIFI_ASSOCRSP) || (pkt_type == WIFI_REASSOCRSP))
@@ -1123,7 +1106,6 @@ static void issue_assocreq(struct adapter *padapter)
goto exit; /* don't connect to AP if no joint supported rate */
}
-
if (bssrate_len > 8) {
pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen));
pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
@@ -1269,7 +1251,6 @@ exit:
return ret;
}
-
/* when wait_ms > 0 , this function should be called at process context */
/* da == NULL for station mode */
int issue_nulldata(struct adapter *padapter, unsigned char *da,
@@ -1497,7 +1478,6 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da,
pattrib->last_txcmdsz = pattrib->pktlen;
-
if (wait_ack) {
ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
} else {
@@ -1735,10 +1715,8 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
if (pmlmeinfo->bwmode_updated)
return;
-
DBG_88E("%s\n", __func__);
-
category = RTW_WLAN_CATEGORY_PUBLIC;
action = ACT_PUBLIC_BSSCOEXIST;
@@ -1772,7 +1750,6 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
-
/* */
if (pmlmepriv->num_FortyMHzIntolerant > 0) {
u8 iedata = 0;
@@ -1782,7 +1759,6 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
pframe = rtw_set_ie(pframe, EID_BSSCoexistence, 1, &iedata, &(pattrib->pktlen));
}
-
/* */
memset(ICS, 0, sizeof(ICS));
if (pmlmepriv->num_sta_no_ht > 0) {
@@ -1840,7 +1816,6 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
}
}
-
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
@@ -1943,7 +1918,6 @@ static void site_survey(struct adapter *padapter)
ScanType = (ch->flags & RTW_IEEE80211_CHAN_PASSIVE_SCAN) ? SCAN_PASSIVE : SCAN_ACTIVE;
}
-
if (survey_channel != 0) {
/* PAUSE 4-AC Queue when site_survey */
/* rtw_hal_get_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
@@ -1987,7 +1961,6 @@ static void site_survey(struct adapter *padapter)
set_survey_timer(pmlmeext, pmlmeext->chan_scan_time);
} else {
-
/* 20100721:Interrupt scan operation here. */
/* For SW antenna diversity before link, it needs to switch to another antenna and scan again. */
/* It compares the scan result and select better one to do connection. */
@@ -2329,7 +2302,6 @@ static void start_clnt_auth(struct adapter *padapter)
pmlmeinfo->link_count = 0;
pmlmeext->retry = 0;
-
/* Because of AP's not receiving deauth before */
/* AP may: 1)not response auth or 2)deauth us after link is complete */
/* issue deauth before issuing auth to deal with the situation */
@@ -2343,7 +2315,6 @@ static void start_clnt_auth(struct adapter *padapter)
set_link_timer(pmlmeext, REAUTH_TO);
}
-
static void start_clnt_assoc(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2965,7 +2936,6 @@ static unsigned int OnAssocReq(struct adapter *padapter,
ie_offset = _REASOCREQ_IE_OFFSET_;
}
-
if (pkt_len < IEEE80211_3ADDR_LEN + ie_offset) {
DBG_88E("handle_assoc(reassoc=%d) - too short payload (len=%lu)"
"\n", reassoc, (unsigned long)pkt_len);
@@ -2983,7 +2953,6 @@ static unsigned int OnAssocReq(struct adapter *padapter,
left = pkt_len - (IEEE80211_3ADDR_LEN + ie_offset);
pos = pframe + (IEEE80211_3ADDR_LEN + ie_offset);
-
DBG_88E("%s\n", __func__);
/* check if this stat has been successfully authenticated/assocated */
@@ -3009,7 +2978,6 @@ static unsigned int OnAssocReq(struct adapter *padapter,
goto OnAssocReqFail;
}
-
/* now we should check all the fields... */
/* checking SSID */
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SSID_IE_, &ie_len,
@@ -3138,7 +3106,6 @@ static unsigned int OnAssocReq(struct adapter *padapter,
pstat->flags |= WLAN_STA_MAYBE_WPS;
}
-
/* AP support WPA/RSN, and sta is going to do WPS, but AP is not ready */
/* that the selected registrar of AP is _FLASE */
if ((psecuritypriv->wpa_psk > 0) && (pstat->flags & (WLAN_STA_WPS|WLAN_STA_MAYBE_WPS))) {
@@ -3278,8 +3245,6 @@ static unsigned int OnAssocReq(struct adapter *padapter,
else
pstat->flags &= ~WLAN_STA_SHORT_PREAMBLE;
-
-
if (status != _STATS_SUCCESSFUL_)
goto OnAssocReqFail;
@@ -3501,7 +3466,6 @@ static unsigned int OnDeAuth(struct adapter *padapter,
associated_clients_update(padapter, updated);
}
-
return _SUCCESS;
} else
#endif
@@ -4097,7 +4061,6 @@ int init_mlme_ext_priv(struct adapter *padapter)
pmlmeext->chan_scan_time = SURVEY_TO;
pmlmeext->mlmeext_init = true;
-
pmlmeext->active_keep_alive_check = true;
return _SUCCESS;
@@ -4230,7 +4193,6 @@ void report_survey_event(struct adapter *padapter,
pmlmeext = &padapter->mlmeextpriv;
pcmdpriv = &padapter->cmdpriv;
-
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
if (!pcmd_obj)
return;
@@ -4357,10 +4319,8 @@ void report_join_res(struct adapter *padapter, int res)
DBG_88E("%s(%d)\n", __func__, res);
-
rtw_joinbss_event_prehandle(padapter, (u8 *)&pjoinbss_evt->network);
-
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
}
@@ -4406,7 +4366,6 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr,
ether_addr_copy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr);
memcpy((unsigned char *)(pdel_sta_evt->rsvd), (unsigned char *)(&reason), 2);
-
psta = rtw_get_stainfo(&padapter->stapriv, MacAddr);
if (psta)
mac_id = (int)psta->mac_id;
@@ -4465,7 +4424,6 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr,
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
}
-
/****************************************************************************
Following are the event callback functions
@@ -4510,7 +4468,6 @@ void update_sta_info(struct adapter *padapter, struct sta_info *psta)
if (pmlmepriv->qospriv.qos_option)
psta->qos_option = true;
-
psta->state = _FW_LINKED;
}
@@ -4545,7 +4502,6 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
}
}
-
/* turn on dynamic functions */
Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
@@ -4651,7 +4607,6 @@ void mlmeext_sta_del_event_callback(struct adapter *padapter)
/* SelectChannel(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset); */
set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
flush_all_cam_entry(padapter);
pmlmeinfo->state = WIFI_FW_NULL_STATE;
@@ -4839,7 +4794,6 @@ void survey_timer_hdl(struct timer_list *t)
rtw_enqueue_cmd(pcmdpriv, ph2c);
}
-
exit_survey_timer_hdl:
return;
}
@@ -4931,7 +4885,6 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
struct wlan_bssid_ex *pparm = (struct wlan_bssid_ex *)pbuf;
/* u32 initialgain; */
-
if (pparm->InfrastructureMode == Ndis802_11APMode) {
#ifdef CONFIG_88EU_AP_MODE
@@ -5010,7 +4963,6 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
/* set MSR to nolink -> infra. mode */
Set_MSR(padapter, _HW_STATE_STATION_);
-
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
}
@@ -5121,7 +5073,6 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
rtw_hal_set_hwreg(padapter, HW_VAR_BCN_FUNC, (u8 *)(&val8));
}
-
/* set MSR to no link state -> infra. mode */
Set_MSR(padapter, _HW_STATE_STATION_);
@@ -5394,7 +5345,6 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
int len_diff = 0;
-
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
@@ -5418,10 +5368,7 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
exit:
-
-
return res;
}
@@ -5456,7 +5403,6 @@ u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
if (peventbuf) {
event_callback = wlanevents[evt_code].event_callback;
event_callback(padapter, (u8 *)peventbuf);
-
}
_abort_event_:
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index ac27f9a023bc..5ab6fc22a156 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_PWRCTRL_C_
@@ -25,7 +17,6 @@ static int rtw_hw_suspend(struct adapter *padapter)
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct net_device *pnetdev = padapter->pnetdev;
-
if ((!padapter->bup) || (padapter->bDriverStopped) ||
(padapter->bSurpriseRemoved)) {
DBG_88E("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
@@ -87,7 +78,6 @@ static int rtw_hw_resume(struct adapter *padapter)
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct net_device *pnetdev = padapter->pnetdev;
-
/* system resume */
DBG_88E("==> %s\n", __func__);
mutex_lock(&pwrpriv->mutex_lock);
@@ -115,7 +105,6 @@ static int rtw_hw_resume(struct adapter *padapter)
mutex_unlock(&pwrpriv->mutex_lock);
-
return 0;
error_exit:
DBG_88E("%s, Open net dev failed\n", __func__);
@@ -170,7 +159,6 @@ int ips_leave(struct adapter *padapter)
int result = _SUCCESS;
int keyid;
-
mutex_lock(&pwrpriv->mutex_lock);
if ((pwrpriv->rf_pwrstate == rf_off) && (!pwrpriv->bips_processing)) {
@@ -350,7 +338,6 @@ static u8 PS_RDY_CHECK(struct adapter *padapter)
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
-
curr_time = jiffies;
delta_time = curr_time - pwrpriv->DelayLPSLastTimeStamp;
@@ -420,7 +407,6 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
u8 bAwake = false;
s32 err = 0;
-
start_time = jiffies;
while (1) {
rtw_hal_get_hwreg(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 05936a45eb93..17b4b9257b49 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_RECV_C_
@@ -23,7 +15,6 @@
#include <mon.h>
#include <wifi.h>
#include <linux/vmalloc.h>
-#include <net/lib80211.h>
#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
#define LLC_HEADER_SIZE 6 /* LLC Header Length */
@@ -44,13 +35,11 @@ static void rtw_signal_stat_timer_hdl(struct timer_list *t);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
-
memset((u8 *)psta_recvpriv, 0, sizeof(struct sta_recv_priv));
spin_lock_init(&psta_recvpriv->lock);
_rtw_init_queue(&psta_recvpriv->defrag_q);
-
}
int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
@@ -106,7 +95,6 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv)
vfree(precvpriv->pallocated_frame_buf);
rtw_hal_free_recv_priv(padapter);
-
}
struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
@@ -201,7 +189,6 @@ void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfre
}
spin_unlock(&pframequeue->lock);
-
}
u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
@@ -221,20 +208,31 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
static int recvframe_chkmic(struct adapter *adapter,
struct recv_frame *precvframe)
{
- int res = _SUCCESS;
- struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
- struct sta_info *stainfo = rtw_get_stainfo(&adapter->stapriv, prxattrib->ta);
+ int i, res = _SUCCESS;
+ u32 datalen;
+ u8 miccode[8];
+ u8 bmic_err = false, brpt_micerror = true;
+ u8 *pframe, *payload, *pframemic;
+ u8 *mickey;
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
if (prxattrib->encrypt == _TKIP_) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+ prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
+
+ /* calculate mic code */
if (stainfo) {
- int key_idx;
- const int iv_len = 8, icv_len = 4, key_length = 32;
- struct sk_buff *skb = precvframe->pkt;
- u8 key[32], iv[8], icv[4], *pframe = skb->data;
- void *crypto_private = NULL;
- struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
- struct security_priv *psecuritypriv = &adapter->securitypriv;
-
if (IS_MCAST(prxattrib->ra)) {
if (!psecuritypriv) {
res = _FAIL;
@@ -243,58 +241,115 @@ static int recvframe_chkmic(struct adapter *adapter,
DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__);
goto exit;
}
- key_idx = prxattrib->key_index;
- memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
- memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
+ mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n %s: bcmc key\n", __func__));
} else {
- key_idx = 0;
- memcpy(key, stainfo->dot118021x_UncstKey.skey, 16);
- memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
+ mickey = &stainfo->dot11tkiprxmickey.skey[0];
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n %s: unicast key\n", __func__));
}
- if (!crypto_ops) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
+ /* icv_len included the mic code */
+ datalen = precvframe->pkt->len-prxattrib->hdrlen -
+ prxattrib->iv_len-prxattrib->icv_len-8;
+ pframe = precvframe->pkt->data;
+ payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
- memcpy(iv, pframe + prxattrib->hdrlen, iv_len);
- memcpy(icv, pframe + skb->len - icv_len, icv_len);
- memmove(pframe + iv_len, pframe, prxattrib->hdrlen);
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
+ rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
+ (unsigned char)prxattrib->priority); /* care the length of the data */
- skb_pull(skb, iv_len);
- skb_trim(skb, skb->len - icv_len);
+ pframemic = payload+datalen;
- crypto_private = crypto_ops->init(key_idx);
- if (!crypto_private) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
- if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
- if (crypto_ops->decrypt_msdu(skb, key_idx, prxattrib->hdrlen, crypto_private)) {
- res = _FAIL;
- goto exit_lib80211_tkip;
+ bmic_err = false;
+
+ for (i = 0; i < 8; i++) {
+ if (miccode[i] != *(pframemic+i)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
+ __func__, i, miccode[i], i, *(pframemic + i)));
+ bmic_err = true;
+ }
}
- memmove(pframe, pframe + iv_len, prxattrib->hdrlen);
- skb_push(skb, iv_len);
- skb_put(skb, icv_len);
+ if (bmic_err) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ *(pframemic-8), *(pframemic-7), *(pframemic-6),
+ *(pframemic-5), *(pframemic-4), *(pframemic-3),
+ *(pframemic-2), *(pframemic-1)));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ *(pframemic-16), *(pframemic-15), *(pframemic-14),
+ *(pframemic-13), *(pframemic-12), *(pframemic-11),
+ *(pframemic-10), *(pframemic-9)));
+ {
+ uint i;
- memcpy(pframe + prxattrib->hdrlen, iv, iv_len);
- memcpy(pframe + skb->len - icv_len, icv, icv_len);
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n ======demp packet (len=%d)======\n",
+ precvframe->pkt->len));
+ for (i = 0; i < precvframe->pkt->len; i += 8) {
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
+ *(precvframe->pkt->data+i),
+ *(precvframe->pkt->data+i+1),
+ *(precvframe->pkt->data+i+2),
+ *(precvframe->pkt->data+i+3),
+ *(precvframe->pkt->data+i+4),
+ *(precvframe->pkt->data+i+5),
+ *(precvframe->pkt->data+i+6),
+ *(precvframe->pkt->data+i+7)));
+ }
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("\n ====== demp packet end [len=%d]======\n",
+ precvframe->pkt->len));
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("\n hrdlen=%d,\n",
+ prxattrib->hdrlen));
+ }
-exit_lib80211_tkip:
- if (crypto_ops && crypto_private)
- crypto_ops->deinit(crypto_private);
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
+ prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+ prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
+
+ /* double check key_index for some timing issue , */
+ /* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
+ if ((IS_MCAST(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index))
+ brpt_micerror = false;
+
+ if ((prxattrib->bdecrypted) && (brpt_micerror)) {
+ rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+ DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+ DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+ }
+ res = _FAIL;
+ } else {
+ /* mic checked ok */
+ if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) {
+ psecuritypriv->bcheck_grpkey = true;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true"));
+ }
+ }
} else {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
("%s: rtw_get_stainfo==NULL!!!\n", __func__));
}
+
+ skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
}
exit:
+
return res;
}
@@ -561,7 +616,7 @@ static void count_rx_stats(struct adapter *padapter,
padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++;
- if ((!MacAddr_isBcst(pattrib->dst)) && (!IS_MCAST(pattrib->dst)))
+ if (!is_multicast_ether_addr(pattrib->dst))
padapter->mlmepriv.LinkDetectInfo.NumRxUnicastOkInPeriod++;
if (sta)
@@ -964,8 +1019,7 @@ static int validate_recv_mgnt_frame(struct adapter *padapter,
if (!memcmp(padapter->eeprompriv.mac_addr,
GetAddr1Ptr(precv_frame->pkt->data), ETH_ALEN))
psta->sta_stats.rx_probersp_pkts++;
- else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->pkt->data)) ||
- is_multicast_mac_addr(GetAddr1Ptr(precv_frame->pkt->data)))
+ else if (is_multicast_ether_addr(GetAddr1Ptr(precv_frame->pkt->data)))
psta->sta_stats.rx_probersp_bm_pkts++;
else
psta->sta_stats.rx_probersp_uo_pkts++;
diff --git a/drivers/staging/rtl8188eu/core/rtw_rf.c b/drivers/staging/rtl8188eu/core/rtw_rf.c
index e47be87fb402..094aa15efe44 100644
--- a/drivers/staging/rtl8188eu/core/rtw_rf.c
+++ b/drivers/staging/rtl8188eu/core/rtw_rf.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_RF_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index bfe0b217e679..2a48b09ea9ae 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_SECURITY_C_
@@ -139,61 +131,72 @@ static __le32 getcrc32(u8 *buf, int len)
Need to consider the fragment situation
*/
void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
-{ /* exclude ICV */
-
- unsigned char crc[4];
- struct arc4context mycontext;
-
+{
int curfragnum, length;
- u32 keylength;
-
- u8 *pframe, *payload, *iv; /* wepkey */
- u8 wepkey[16];
- u8 hw_hdr_offset = 0;
+ u8 *pframe;
+ u8 hw_hdr_offset = 0;
struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
+ const int keyindex = psecuritypriv->dot11PrivacyKeyIndex;
+ void *crypto_private;
+ struct sk_buff *skb;
+ struct lib80211_crypto_ops *crypto_ops;
if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
return;
+ if ((pattrib->encrypt != _WEP40_) && (pattrib->encrypt != _WEP104_))
+ return;
+
hw_hdr_offset = TXDESC_SIZE +
(((struct xmit_frame *)pxmitframe)->pkt_offset * PACKET_OFFSET_SZ);
pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
- /* start to encrypt each fragment */
- if ((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) {
- keylength = psecuritypriv->dot11DefKeylen[psecuritypriv->dot11PrivacyKeyIndex];
+ crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
- for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- iv = pframe+pattrib->hdrlen;
- memcpy(&wepkey[0], iv, 3);
- memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[psecuritypriv->dot11PrivacyKeyIndex].skey[0], keylength);
- payload = pframe+pattrib->iv_len+pattrib->hdrlen;
+ if (!crypto_ops)
+ return;
- if ((curfragnum+1) == pattrib->nr_frags) { /* the last fragment */
- length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+ crypto_private = crypto_ops->init(keyindex);
+ if (!crypto_private)
+ return;
- *((__le32 *)crc) = getcrc32(payload, length);
+ if (crypto_ops->set_key(psecuritypriv->dot11DefKey[keyindex].skey,
+ psecuritypriv->dot11DefKeylen[keyindex], NULL, crypto_private) < 0)
+ goto free_crypto_private;
- arcfour_init(&mycontext, wepkey, 3+keylength);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload+length, crc, 4);
- } else {
- length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
- *((__le32 *)crc) = getcrc32(payload, length);
- arcfour_init(&mycontext, wepkey, 3+keylength);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload+length, crc, 4);
-
- pframe += pxmitpriv->frag_len;
- pframe = (u8 *)round_up((size_t)(pframe), 4);
- }
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ if (curfragnum + 1 == pattrib->nr_frags)
+ length = pattrib->last_txcmdsz;
+ else
+ length = pxmitpriv->frag_len;
+ skb = dev_alloc_skb(length);
+ if (!skb)
+ goto free_crypto_private;
+
+ skb_put_data(skb, pframe, length);
+
+ memmove(skb->data + 4, skb->data, pattrib->hdrlen);
+ skb_pull(skb, 4);
+ skb_trim(skb, skb->len - 4);
+
+ if (crypto_ops->encrypt_mpdu(skb, pattrib->hdrlen, crypto_private)) {
+ kfree_skb(skb);
+ goto free_crypto_private;
}
+
+ memcpy(pframe, skb->data, skb->len);
+
+ pframe += skb->len;
+ pframe = (u8 *)round_up((size_t)(pframe), 4);
+
+ kfree_skb(skb);
}
+free_crypto_private:
+ crypto_ops->deinit(crypto_private);
}
int rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
@@ -650,71 +653,71 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
return res;
}
+/* The hlen isn't include the IV */
u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
-{
- struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
- u32 res = _SUCCESS;
+{ /* exclude ICV */
+ u16 pnl;
+ u32 pnh;
+ u8 rc4key[16];
+ u8 ttkey[16];
+ u8 crc[4];
+ struct arc4context mycontext;
+ int length;
+
+ u8 *pframe, *payload, *iv, *prwskey;
+ union pn48 dot11txpn;
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u32 res = _SUCCESS;
+
+
+ pframe = (unsigned char *)((struct recv_frame *)precvframe)->pkt->data;
/* 4 start to decrypt recvframe */
if (prxattrib->encrypt == _TKIP_) {
- struct sta_info *stainfo = rtw_get_stainfo(&padapter->stapriv, prxattrib->ta);
-
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
if (stainfo) {
- int key_idx;
- const int iv_len = 8, icv_len = 4, key_length = 32;
- void *crypto_private = NULL;
- struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
- u8 key[32], iv[8], icv[4], *pframe = skb->data;
- struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
if (IS_MCAST(prxattrib->ra)) {
if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
goto exit;
}
- key_idx = prxattrib->key_index;
- memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
- memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
+ prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
} else {
- key_idx = 0;
- memcpy(key, stainfo->dot118021x_UncstKey.skey, 16);
- memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
}
- if (!crypto_ops) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
+ iv = pframe+prxattrib->hdrlen;
+ payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
+ length = ((struct recv_frame *)precvframe)->pkt->len-prxattrib->hdrlen-prxattrib->iv_len;
- memcpy(iv, pframe + prxattrib->hdrlen, iv_len);
- memcpy(icv, pframe + skb->len - icv_len, icv_len);
+ GET_TKIP_PN(iv, dot11txpn);
- crypto_private = crypto_ops->init(key_idx);
- if (!crypto_private) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
- if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
- if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
+ pnl = (u16)(dot11txpn.val);
+ pnh = (u32)(dot11txpn.val>>16);
- memmove(pframe, pframe + iv_len, prxattrib->hdrlen);
- skb_push(skb, iv_len);
- skb_put(skb, icv_len);
+ phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
+ phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
- memcpy(pframe + prxattrib->hdrlen, iv, iv_len);
- memcpy(pframe + skb->len - icv_len, icv, icv_len);
+ /* 4 decrypt payload include icv */
-exit_lib80211_tkip:
- if (crypto_ops && crypto_private)
- crypto_ops->deinit(crypto_private);
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+
+ *((__le32 *)crc) = getcrc32(payload, length-4);
+
+ if (crc[3] != payload[length-1] ||
+ crc[2] != payload[length-2] ||
+ crc[1] != payload[length-3] ||
+ crc[0] != payload[length-4]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
+ &crc, &payload[length-4]));
+ res = _FAIL;
+ }
} else {
RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n"));
res = _FAIL;
@@ -1038,7 +1041,6 @@ static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists, int
mic_header2[14] = mpdu[30] & 0x0f;
mic_header2[15] = mpdu[31] & 0x00;
}
-
}
/************************************************/
diff --git a/drivers/staging/rtl8188eu/core/rtw_sreset.c b/drivers/staging/rtl8188eu/core/rtw_sreset.c
index a198c5779d50..fb5adaf4a42c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sreset.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sreset.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include <rtw_sreset.h>
@@ -23,33 +15,6 @@ void rtw_hal_sreset_init(struct adapter *padapter)
psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
}
-u8 sreset_get_wifi_status(struct adapter *padapter)
-{
- struct sreset_priv *psrtpriv = &padapter->HalData->srestpriv;
-
- u8 status = WIFI_STATUS_SUCCESS;
- u32 val32 = 0;
-
- val32 = usb_read32(padapter, REG_TXDMA_STATUS);
- if (val32 == 0xeaeaeaea) {
- psrtpriv->Wifi_Error_Status = WIFI_IF_NOT_EXIST;
- } else if (val32 != 0) {
- DBG_88E("txdmastatu(%x)\n", val32);
- psrtpriv->Wifi_Error_Status = WIFI_MAC_TXDMA_ERROR;
- }
-
- if (WIFI_STATUS_SUCCESS != psrtpriv->Wifi_Error_Status) {
- DBG_88E("==>%s error_status(0x%x)\n", __func__, psrtpriv->Wifi_Error_Status);
- status = psrtpriv->Wifi_Error_Status & (~(USB_READ_PORT_FAIL|USB_WRITE_PORT_FAIL));
- }
- DBG_88E("==> %s wifi_status(0x%x)\n", __func__, status);
-
- /* status restore */
- psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
-
- return status;
-}
-
void sreset_set_wifi_error_status(struct adapter *padapter, u32 status)
{
padapter->HalData->srestpriv.Wifi_Error_Status = status;
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index f42aa4e0ddb8..f12a12b19d3f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_STA_MGT_C_
@@ -26,7 +18,7 @@ static void _rtw_init_stainfo(struct sta_info *psta)
{
memset((u8 *)psta, 0, sizeof(struct sta_info));
- spin_lock_init(&psta->lock);
+ spin_lock_init(&psta->lock);
INIT_LIST_HEAD(&psta->list);
INIT_LIST_HEAD(&psta->hash_list);
_rtw_init_queue(&psta->sleep_q);
@@ -63,7 +55,7 @@ static void _rtw_init_stainfo(struct sta_info *psta)
#endif /* CONFIG_88EU_AP_MODE */
}
-u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
+u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
{
struct sta_info *psta;
s32 i;
@@ -89,7 +81,7 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
for (i = 0; i < NUM_STA; i++) {
_rtw_init_stainfo(psta);
- INIT_LIST_HEAD(&(pstapriv->sta_hash[i]));
+ INIT_LIST_HEAD(&pstapriv->sta_hash[i]);
list_add_tail(&psta->list, get_list_head(&pstapriv->free_sta_queue));
@@ -135,18 +127,18 @@ inline struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int
return (struct sta_info *)(stapriv->pstainfo_buf + offset * sizeof(struct sta_info));
}
-u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
+u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
{
struct list_head *phead, *plist;
struct sta_info *psta = NULL;
struct recv_reorder_ctrl *preorder_ctrl;
- int index;
+ int index;
if (pstapriv) {
- /* delete all reordering_ctrl_timer */
+ /* delete all reordering_ctrl_timer */
spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
- phead = &(pstapriv->sta_hash[index]);
+ phead = &pstapriv->sta_hash[index];
plist = phead->next;
while (phead != plist) {
@@ -179,7 +171,7 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
struct __queue *pfree_sta_queue;
struct recv_reorder_ctrl *preorder_ctrl;
int i = 0;
- u16 wRxSeqInitialValue = 0xffff;
+ u16 wRxSeqInitialValue = 0xffff;
pfree_sta_queue = &pstapriv->free_sta_queue;
@@ -251,14 +243,14 @@ exit:
}
/* using pstapriv->sta_hash_lock to protect */
-u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
+u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
{
int i;
struct __queue *pfree_sta_queue;
struct recv_reorder_ctrl *preorder_ctrl;
- struct sta_xmit_priv *pstaxmitpriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_xmit_priv *pstaxmitpriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
if (!psta)
goto exit;
@@ -274,19 +266,19 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
- list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
+ list_del_init(&pstaxmitpriv->vo_q.tx_pending);
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
- list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
+ list_del_init(&pstaxmitpriv->vi_q.tx_pending);
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
- list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
+ list_del_init(&pstaxmitpriv->bk_q.tx_pending);
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&(pstaxmitpriv->be_q.tx_pending));
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
spin_unlock_bh(&pxmitpriv->lock);
@@ -327,7 +319,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
plist = plist->next;
- list_del_init(&(prframe->list));
+ list_del_init(&prframe->list);
rtw_free_recvframe(prframe, pfree_recv_queue);
}
@@ -371,7 +363,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
#endif /* CONFIG_88EU_AP_MODE */
- spin_lock_bh(&(pfree_sta_queue->lock));
+ spin_lock_bh(&pfree_sta_queue->lock);
list_add_tail(&psta->list, get_list_head(pfree_sta_queue));
spin_unlock_bh(&pfree_sta_queue->lock);
@@ -384,9 +376,9 @@ exit:
void rtw_free_all_stainfo(struct adapter *padapter)
{
struct list_head *plist, *phead;
- s32 index;
+ s32 index;
struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo(padapter);
if (pstapriv->asoc_sta_count == 1)
@@ -395,7 +387,7 @@ void rtw_free_all_stainfo(struct adapter *padapter)
spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
- phead = &(pstapriv->sta_hash[index]);
+ phead = &pstapriv->sta_hash[index];
plist = phead->next;
while (phead != plist) {
@@ -415,14 +407,14 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
struct list_head *plist, *phead;
struct sta_info *psta = NULL;
- u32 index;
+ u32 index;
u8 *addr;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
if (!hwaddr)
return NULL;
- if (IS_MCAST(hwaddr))
+ if (is_multicast_ether_addr(hwaddr))
addr = bc_addr;
else
addr = hwaddr;
@@ -431,7 +423,7 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
spin_lock_bh(&pstapriv->sta_hash_lock);
- phead = &(pstapriv->sta_hash[index]);
+ phead = &pstapriv->sta_hash[index];
plist = phead->next;
while (phead != plist) {
@@ -451,10 +443,10 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
{
- struct sta_info *psta;
+ struct sta_info *psta;
u32 res = _SUCCESS;
unsigned char bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
@@ -473,9 +465,10 @@ exit:
struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
{
- struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- return rtw_get_stainfo(pstapriv, bc_addr);
+
+ return rtw_get_stainfo(pstapriv, bc_addr);
}
u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
@@ -489,7 +482,7 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
- spin_lock_bh(&(pacl_node_q->lock));
+ spin_lock_bh(&pacl_node_q->lock);
phead = get_list_head(pacl_node_q);
plist = phead->next;
while (phead != plist) {
@@ -510,7 +503,7 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
else if (pacl_list->mode == 2)/* deny unless in accept list */
res = (match) ? true : false;
else
- res = true;
+ res = true;
#endif
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index ec5a74df9f48..b9406583e501 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_WLAN_UTIL_C_
@@ -42,20 +34,26 @@ unsigned char REALTEK_96B_IE[] = {0x00, 0xe0, 0x4c, 0x02, 0x01, 0x20};
#define WAIT_FOR_BCN_TO_MAX (20000)
static u8 rtw_basic_rate_cck[4] = {
- IEEE80211_CCK_RATE_1MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB|IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_5MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB|IEEE80211_BASIC_RATE_MASK
+ IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK
};
static u8 rtw_basic_rate_ofdm[3] = {
- IEEE80211_OFDM_RATE_6MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB|IEEE80211_BASIC_RATE_MASK,
- IEEE80211_OFDM_RATE_24MB|IEEE80211_BASIC_RATE_MASK
+ IEEE80211_OFDM_RATE_6MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_12MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_24MB | IEEE80211_BASIC_RATE_MASK
};
static u8 rtw_basic_rate_mix[7] = {
- IEEE80211_CCK_RATE_1MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB|IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_5MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB|IEEE80211_BASIC_RATE_MASK,
- IEEE80211_OFDM_RATE_6MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB|IEEE80211_BASIC_RATE_MASK,
- IEEE80211_OFDM_RATE_24MB|IEEE80211_BASIC_RATE_MASK
+ IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_6MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_12MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_24MB | IEEE80211_BASIC_RATE_MASK
};
int cckrates_included(unsigned char *rate, int ratelen)
@@ -300,21 +298,11 @@ inline void rtw_set_oper_ch(struct adapter *adapter, u8 ch)
adapter->mlmeextpriv.oper_channel = ch;
}
-inline u8 rtw_get_oper_bw(struct adapter *adapter)
-{
- return adapter->mlmeextpriv.oper_bwmode;
-}
-
inline void rtw_set_oper_bw(struct adapter *adapter, u8 bw)
{
adapter->mlmeextpriv.oper_bwmode = bw;
}
-inline u8 rtw_get_oper_choffset(struct adapter *adapter)
-{
- return adapter->mlmeextpriv.oper_ch_offset;
-}
-
inline void rtw_set_oper_choffset(struct adapter *adapter, u8 offset)
{
adapter->mlmeextpriv.oper_ch_offset = offset;
@@ -436,11 +424,6 @@ unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval)
return bcn_interval << 2;
}
-void CAM_empty_entry(struct adapter *Adapter, u8 ucIndex)
-{
- rtw_hal_set_hwreg(Adapter, HW_VAR_CAM_EMPTY_ENTRY, (u8 *)(&ucIndex));
-}
-
void invalidate_cam_all(struct adapter *padapter)
{
rtw_hal_set_hwreg(padapter, HW_VAR_CAM_INVALID_ALL, NULL);
@@ -1111,41 +1094,6 @@ unsigned int is_ap_in_tkip(struct adapter *padapter)
}
}
-unsigned int should_forbid_n_rate(struct adapter *padapter)
-{
- u32 i;
- struct ndis_802_11_var_ie *pIE;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *cur_network = &pmlmepriv->cur_network.network;
-
- if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < cur_network->ie_length;) {
- pIE = (struct ndis_802_11_var_ie *)(cur_network->ies + i);
-
- switch (pIE->ElementID) {
- case _VENDOR_SPECIFIC_IE_:
- if (!memcmp(pIE->data, RTW_WPA_OUI, 4) &&
- ((!memcmp((pIE->data + 12), WPA_CIPHER_SUITE_CCMP, 4)) ||
- (!memcmp((pIE->data + 16), WPA_CIPHER_SUITE_CCMP, 4))))
- return false;
- break;
- case _RSN_IE_2_:
- if ((!memcmp((pIE->data + 8), RSN_CIPHER_SUITE_CCMP, 4)) ||
- (!memcmp((pIE->data + 12), RSN_CIPHER_SUITE_CCMP, 4)))
- return false;
- default:
- break;
- }
-
- i += (pIE->Length + 2);
- }
-
- return true;
- } else {
- return false;
- }
-}
-
unsigned int is_ap_in_wep(struct adapter *padapter)
{
u32 i;
@@ -1310,7 +1258,6 @@ void update_tx_basic_rate(struct adapter *padapter, u8 wirelessmode)
else
memcpy(supported_rates, rtw_basic_rate_ofdm, 3);
-
if (wirelessmode & WIRELESS_11B)
update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
else
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 3c034486346b..2130d78e0d9f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTW_XMIT_C_
@@ -31,7 +23,7 @@ static void _init_txservq(struct tx_servq *ptxservq)
ptxservq->qcnt = 0;
}
-void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
+void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
{
memset((unsigned char *)psta_xmitpriv, 0, sizeof(struct sta_xmit_priv));
spin_lock_init(&psta_xmitpriv->lock);
@@ -41,19 +33,17 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
_init_txservq(&psta_xmitpriv->vo_q);
INIT_LIST_HEAD(&psta_xmitpriv->legacy_dz);
INIT_LIST_HEAD(&psta_xmitpriv->apsd);
-
}
-s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
+s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
{
int i;
struct xmit_buf *pxmitbuf;
struct xmit_frame *pxframe;
- int res = _SUCCESS;
+ int res = _SUCCESS;
u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
-
/* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
spin_lock_init(&pxmitpriv->lock);
@@ -80,7 +70,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->pallocated_frame_buf = vzalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
- if (pxmitpriv->pallocated_frame_buf == NULL) {
+ if (!pxmitpriv->pallocated_frame_buf) {
pxmitpriv->pxmit_frame_buf = NULL;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_frame fail!\n"));
res = _FAIL;
@@ -118,7 +108,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->pallocated_xmitbuf = vzalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
- if (pxmitpriv->pallocated_xmitbuf == NULL) {
+ if (!pxmitpriv->pallocated_xmitbuf) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_buf fail!\n"));
res = _FAIL;
goto exit;
@@ -159,7 +149,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->pallocated_xmit_extbuf = vzalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
- if (pxmitpriv->pallocated_xmit_extbuf == NULL) {
+ if (!pxmitpriv->pallocated_xmit_extbuf) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_extbuf fail!\n"));
res = _FAIL;
goto exit;
@@ -209,8 +199,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
rtw_hal_init_xmit_priv(padapter);
exit:
-
-
return res;
}
@@ -222,7 +210,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
- if (pxmitpriv->pxmit_frame_buf == NULL)
+ if (!pxmitpriv->pxmit_frame_buf)
return;
for (i = 0; i < NR_XMITFRAME; i++) {
@@ -594,8 +582,6 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
update_attrib_phy_info(pattrib, psta);
exit:
-
-
return res;
}
@@ -617,7 +603,6 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
else
stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
-
hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
if (pattrib->encrypt == _TKIP_) {/* if (psecuritypriv->dot11PrivacyAlgrthm == _TKIP_PRIVACY_) */
@@ -714,7 +699,6 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
}
}
-
return _SUCCESS;
}
@@ -722,7 +706,6 @@ static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmi
{
struct pkt_attrib *pattrib = &pxmitframe->attrib;
-
if (pattrib->bswenc) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("### %s\n", __func__));
switch (pattrib->encrypt) {
@@ -743,7 +726,6 @@ static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmi
RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("### xmitframe_hwencrypt\n"));
}
-
return _SUCCESS;
}
@@ -763,7 +745,6 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
int bmcst = IS_MCAST(pattrib->ra);
-
if (pattrib->psta) {
psta = pattrib->psta;
} else {
@@ -912,24 +893,6 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pat
}
/*
- * Calculate wlan 802.11 packet MAX size from pkt_attrib
- * This function doesn't consider fragment case
- */
-u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib)
-{
- u32 len = 0;
-
- len = pattrib->hdrlen + pattrib->iv_len; /* WLAN Header and IV */
- len += SNAP_SIZE + sizeof(u16); /* LLC */
- len += pattrib->pktlen;
- if (pattrib->encrypt == _TKIP_)
- len += 8; /* MIC */
- len += ((pattrib->bswenc) ? pattrib->icv_len : 0); /* ICV */
-
- return len;
-}
-
-/*
This sub-routine will perform all the following:
@@ -960,7 +923,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
if (!psta)
return _FAIL;
- if (pxmitframe->buf_addr == NULL) {
+ if (!pxmitframe->buf_addr) {
DBG_88E("==> %s buf_addr == NULL\n", __func__);
return _FAIL;
}
@@ -1084,8 +1047,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
pattrib->vcs_mode = NONE_VCS;
exit:
-
-
return res;
}
@@ -1101,7 +1062,6 @@ s32 rtw_put_snap(u8 *data, u16 h_proto)
struct ieee80211_snap_hdr *snap;
u8 *oui;
-
snap = (struct ieee80211_snap_hdr *)data;
snap->dsap = 0xaa;
snap->ssap = 0xaa;
@@ -1118,7 +1078,6 @@ s32 rtw_put_snap(u8 *data, u16 h_proto)
*(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
-
return SNAP_SIZE + sizeof(u16);
}
@@ -1129,7 +1088,6 @@ void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
-
switch (pxmitpriv->vcs_setting) {
case DISABLE_VCS:
pxmitpriv->vcs = NONE_VCS;
@@ -1154,7 +1112,6 @@ void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
}
break;
}
-
}
void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz)
@@ -1206,7 +1163,6 @@ s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
unsigned long irql;
struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
-
if (!pxmitbuf)
return _FAIL;
@@ -1219,7 +1175,6 @@ s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
spin_unlock_irqrestore(&pfree_queue->lock, irql);
-
return _SUCCESS;
}
@@ -1274,7 +1229,6 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
}
-
return _SUCCESS;
}
@@ -1343,7 +1297,6 @@ s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitfram
struct adapter *padapter = pxmitpriv->adapter;
struct sk_buff *pndis_pkt = NULL;
-
if (!pxmitframe) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("====== %s:pxmitframe == NULL!!!!!!!!!!\n", __func__));
goto exit;
@@ -1369,8 +1322,6 @@ s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitfram
rtw_os_pkt_complete(padapter, pndis_pkt);
exit:
-
-
return _SUCCESS;
}
@@ -1379,7 +1330,6 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
struct list_head *plist, *phead;
struct xmit_frame *pxmitframe;
-
spin_lock_bh(&pframequeue->lock);
phead = get_list_head(pframequeue);
@@ -1393,7 +1343,6 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
spin_unlock_bh(&pframequeue->lock);
-
}
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -1439,7 +1388,6 @@ struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmi
struct registry_priv *pregpriv = &padapter->registrypriv;
int i, inx[4];
-
inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
if (pregpriv->wifi_spec == 1) {
@@ -1513,7 +1461,6 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
break;
}
-
return ptxservq;
}
@@ -1531,7 +1478,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
int res = _SUCCESS;
-
if (pattrib->psta)
psta = pattrib->psta;
else
@@ -1553,8 +1499,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
ptxservq->qcnt++;
phwxmits[ac_index].accnt++;
exit:
-
-
return res;
}
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index 26e0ef224299..1862c1396c85 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/fw.c b/drivers/staging/rtl8188eu/hal/fw.c
index 6b67b38a6a9f..1b8341f40995 100644
--- a/drivers/staging/rtl8188eu/hal/fw.c
+++ b/drivers/staging/rtl8188eu/hal/fw.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2013 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
index bbb981c6bcec..464c11710398 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
@@ -542,16 +542,6 @@ odm_RATxRPTTimerSetting(
ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, (" <===== odm_RATxRPTTimerSetting()\n"));
}
-void
-ODM_RASupport_Init(
- struct odm_dm_struct *dm_odm
- )
-{
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("=====>ODM_RASupport_Init()\n"));
-
- dm_odm->RaSupport88E = true;
-}
-
int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
{
struct odm_ra_info *pRaInfo = &dm_odm->RAInfo[macid];
diff --git a/drivers/staging/rtl8188eu/hal/hal_com.c b/drivers/staging/rtl8188eu/hal/hal_com.c
index 960cc406d238..7202e1767fc0 100644
--- a/drivers/staging/rtl8188eu/hal/hal_com.c
+++ b/drivers/staging/rtl8188eu/hal/hal_com.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include <osdep_service.h>
#include <drv_types.h>
@@ -52,10 +44,10 @@ void dump_chip_info(struct HAL_VERSION chip_vers)
#define CHAN_PLAN_HW 0x80
-u8 /* return the final channel plan decision */
-hal_com_get_channel_plan(struct adapter *padapter, u8 hw_channel_plan,
- u8 sw_channel_plan, u8 def_channel_plan,
- bool load_fail)
+/* return the final channel plan decision */
+u8 hal_com_get_channel_plan(struct adapter *padapter, u8 hw_channel_plan,
+ u8 sw_channel_plan, u8 def_channel_plan,
+ bool load_fail)
{
u8 sw_cfg;
u8 chnlplan;
@@ -197,11 +189,13 @@ static void two_out_pipe(struct adapter *adapter, bool wifi_cfg)
{
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
- if (wifi_cfg) { /* WMM */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 0, 1, 0, 1, 0, 0, 0, 0, 0}; */
- /* 0:H, 1:L */
-
+ if (wifi_cfg) {
+ /*
+ * WMM
+ * BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA
+ * 0, 1, 0, 1, 0, 0, 0, 0, 0
+ * 0:H, 1:L
+ */
pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[1];/* VO */
pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
@@ -211,12 +205,13 @@ static void two_out_pipe(struct adapter *adapter, bool wifi_cfg)
pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
-
- } else {/* typical setting */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 1, 1, 0, 0, 0, 0, 0, 0, 0}; */
- /* 0:H, 1:L */
-
+ } else {
+ /*
+ * typical setting
+ * BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA
+ * 1, 1, 0, 0, 0, 0, 0, 0, 0
+ * 0:H, 1:L
+ */
pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
@@ -233,11 +228,13 @@ static void three_out_pipe(struct adapter *adapter, bool wifi_cfg)
{
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
- if (wifi_cfg) {/* for WMM */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 1, 2, 1, 0, 0, 0, 0, 0, 0}; */
- /* 0:H, 1:N, 2:L */
-
+ if (wifi_cfg) {
+ /*
+ * for WMM
+ * BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA
+ * 1, 2, 1, 0, 0, 0, 0, 0, 0
+ * 0:H, 1:N, 2:L
+ */
pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
@@ -247,12 +244,13 @@ static void three_out_pipe(struct adapter *adapter, bool wifi_cfg)
pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
-
- } else {/* typical setting */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 2, 2, 1, 0, 0, 0, 0, 0, 0}; */
- /* 0:H, 1:N, 2:L */
-
+ } else {
+ /*
+ * typical setting
+ * BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA
+ * 2, 2, 1, 0, 0, 0, 0, 0, 0
+ * 0:H, 1:N, 2:L
+ */
pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
@@ -272,18 +270,17 @@ bool Hal_MappingOutPipe(struct adapter *adapter, u8 numoutpipe)
bool result = true;
switch (numoutpipe) {
+ case 1:
+ one_out_pipe(adapter);
+ break;
case 2:
two_out_pipe(adapter, wifi_cfg);
break;
case 3:
three_out_pipe(adapter, wifi_cfg);
break;
- case 1:
- one_out_pipe(adapter);
- break;
default:
result = false;
- break;
}
return result;
}
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index a11c7b4254f6..b8fecc952cfc 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -1,26 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _HAL_INTF_C_
-#include <osdep_service.h>
-#include <drv_types.h>
#include <hal_intf.h>
-uint rtw_hal_init(struct adapter *adapt)
+uint rtw_hal_init(struct adapter *adapt)
{
- uint status = _SUCCESS;
+ uint status = _SUCCESS;
adapt->hw_init_completed = false;
@@ -44,7 +34,7 @@ uint rtw_hal_init(struct adapter *adapt)
uint rtw_hal_deinit(struct adapter *adapt)
{
- uint status = _SUCCESS;
+ uint status = _SUCCESS;
status = rtl8188eu_hal_deinit(adapt);
@@ -58,15 +48,15 @@ uint rtw_hal_deinit(struct adapter *adapt)
void rtw_hal_update_ra_mask(struct adapter *adapt, u32 mac_id, u8 rssi_level)
{
- struct mlme_priv *pmlmepriv = &(adapt->mlmepriv);
+ struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
#ifdef CONFIG_88EU_AP_MODE
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &adapt->stapriv;
- if ((mac_id-1) > 0)
- psta = pstapriv->sta_aid[(mac_id-1) - 1];
+ if (mac_id - 1 > 0)
+ psta = pstapriv->sta_aid[mac_id - 2];
if (psta)
add_RATid(adapt, psta, 0);/* todo: based on rssi_level*/
#endif
diff --git a/drivers/staging/rtl8188eu/hal/mac_cfg.c b/drivers/staging/rtl8188eu/hal/mac_cfg.c
index 6ed5e15ce661..370aa5cc55a7 100644
--- a/drivers/staging/rtl8188eu/hal/mac_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/mac_cfg.c
@@ -1,21 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
-*
-* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
-******************************************************************************/
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ ******************************************************************************/
#include "odm_precomp.h"
#include "phy.h"
-#include <rtw_iol.h>
/* MAC_REG.TXT */
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 001d6267b56e..9d567838a43a 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/* include files */
@@ -19,16 +11,6 @@
#include "phy.h"
u32 GlobalDebugLevel;
-static const u16 dB_Invert_Table[8][12] = {
- {1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4},
- {4, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16},
- {18, 20, 22, 25, 28, 32, 35, 40, 45, 50, 56, 63},
- {71, 79, 89, 100, 112, 126, 141, 158, 178, 200, 224, 251},
- {282, 316, 355, 398, 447, 501, 562, 631, 708, 794, 891, 1000},
- {1122, 1259, 1413, 1585, 1778, 1995, 2239, 2512, 2818, 3162, 3548, 3981},
- {4467, 5012, 5623, 6310, 7079, 7943, 8913, 10000, 11220, 12589, 14125, 15849},
- {17783, 19953, 22387, 25119, 28184, 31623, 35481, 39811, 44668, 50119, 56234, 65535}
-};
/* avoid to warn in FreeBSD ==> To DO modify */
static u32 EDCAParam[HT_IOT_PEER_MAX][3] = {
@@ -164,7 +146,6 @@ u8 CCKSwingTable_Ch14[CCK_TABLE_SIZE][8] = {
{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
};
-
#define RxDefaultAnt1 0x65a9
#define RxDefaultAnt2 0x569a
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
index 5fcbe5639e99..0464dc41f860 100644
--- a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/* include files */
diff --git a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c b/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
index 91e0f6cee8f4..d5001920f77c 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 20253b5b6679..3c7cf8720df8 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -1,22 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTL8188E_PHYCFG_C_
#include <osdep_service.h>
#include <drv_types.h>
-#include <rtw_iol.h>
#include <rtl8188e_hal.h>
#include <rf.h>
#include <phy.h>
@@ -360,7 +351,6 @@ void rtl88eu_dm_txpower_track_adjust(struct odm_dm_struct *dm_odm, u8 type,
pwr_value = dm_odm->BbSwingIdxCck -
dm_odm->BbSwingIdxCckBase;
}
-
}
if (pwr_value >= ODM_TXPWRTRACK_MAX_IDX_88E && *direction == 1)
@@ -887,7 +877,6 @@ static void mac_setting_calibration(struct adapter *adapt, u32 *mac_reg, u32 *ba
static void path_a_standby(struct adapter *adapt)
{
-
phy_set_bb_reg(adapt, rFPGA0_IQK, bMaskDWord, 0x0);
phy_set_bb_reg(adapt, 0x840, bMaskDWord, 0x00010000);
phy_set_bb_reg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
@@ -1011,7 +1000,6 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
retry_count = 2;
if (t == 0) {
-
/* Save ADDA parameters, turn Path A ADDA on */
save_adda_registers(adapt, adda_reg, dm_odm->RFCalibrateInfo.ADDA_backup,
IQK_ADDA_REG_NUM);
diff --git a/drivers/staging/rtl8188eu/hal/pwrseq.c b/drivers/staging/rtl8188eu/hal/pwrseq.c
index d92a34ea8d60..4aa1dec0b5e4 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseq.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseq.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include "pwrseq.h"
diff --git a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
index e6867eea3530..249cbc375074 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include <pwrseqcmd.h>
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 8f8c9de6a9bc..81e30a1a6bfd 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include <osdep_service.h>
@@ -52,7 +44,6 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
u8 *ptr;
u8 direction;
-
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
tx_agc[RF_PATH_A] = 0x3f3f3f3f;
tx_agc[RF_PATH_B] = 0x3f3f3f3f;
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index 9712d7b74345..0700d8bd448d 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
-*
-* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
-******************************************************************************/
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ ******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index eeb2d9f82e92..b832bbf202a5 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTL8188E_CMD_C_
@@ -65,7 +57,6 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
u32 h2c_cmd_ex = 0;
s32 ret = _FAIL;
-
if (!adapt->bFWReady) {
DBG_88E("%s(): return H2C cmd because fw is not ready\n",
__func__);
@@ -118,8 +109,6 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
ret = _SUCCESS;
exit:
-
-
return ret;
}
@@ -204,7 +193,6 @@ void rtl8188e_set_FwPwrMode_cmd(struct adapter *adapt, u8 Mode)
H2CSetPwrMode.PwrState = 0x0C;/* AllON(0x0C), RFON(0x04), RFOFF(0x00) */
FillH2CCmd_88E(adapt, H2C_PS_PWR_MODE, sizeof(H2CSetPwrMode), (u8 *)&H2CSetPwrMode);
-
}
void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
@@ -562,7 +550,6 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
u8 DLBcnCount = 0;
u32 poll = 0;
-
DBG_88E("%s mstatus(%x)\n", __func__, mstatus);
if (mstatus == 1) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index ff227c8b98ca..545d6a6102f1 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/* */
/* Description: */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 54ede4baa0c9..607170775fa5 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _HAL_INIT_C_
@@ -78,13 +70,12 @@ static s32 iol_InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
return rst;
}
-
s32 rtl8188e_iol_efuse_patch(struct adapter *padapter)
{
s32 result = _SUCCESS;
DBG_88E("==> %s\n", __func__);
- if (rtw_IOL_applied(padapter)) {
+ if (rtw_iol_applied(padapter)) {
iol_mode_enable(padapter, 1);
result = iol_execute(padapter, CMD_READ_EFUSE_MAP);
if (result == _SUCCESS)
@@ -216,7 +207,7 @@ s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
u32 i;
u32 Last_Entry_Of_TxPktBuf = LAST_ENTRY_OF_TX_PKT_BUFFER;/* 176, 22k */
- if (rtw_IOL_applied(padapter)) {
+ if (rtw_iol_applied(padapter)) {
status = iol_InitLLTTable(padapter, txpktbuf_bndy);
} else {
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
@@ -407,7 +398,6 @@ static u8 Hal_GetChnlGroup88E(u8 chnl, u8 *pGroup)
else if (chnl == 14) /* Channel 14 */
*pGroup = 5;
} else {
-
/* probably, this branch is suitable only for 5 GHz */
bIn24G = false;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index 9f51f54f866a..0a900827c4fc 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTL8188E_REDESC_C_
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
index 460a20558bc0..9b8a284544ac 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTL8188E_XMIT_C_
@@ -18,28 +10,6 @@
#include <drv_types.h>
#include <rtl8188e_hal.h>
-void dump_txrpt_ccx_88e(void *buf)
-{
- struct txrpt_ccx_88e *txrpt_ccx = buf;
-
- DBG_88E("%s:\n"
- "tag1:%u, pkt_num:%u, txdma_underflow:%u, int_bt:%u, int_tri:%u, int_ccx:%u\n"
- "mac_id:%u, pkt_ok:%u, bmc:%u\n"
- "retry_cnt:%u, lifetime_over:%u, retry_over:%u\n"
- "ccx_qtime:%u\n"
- "final_data_rate:0x%02x\n"
- "qsel:%u, sw:0x%03x\n",
- __func__, txrpt_ccx->tag1, txrpt_ccx->pkt_num,
- txrpt_ccx->txdma_underflow, txrpt_ccx->int_bt,
- txrpt_ccx->int_tri, txrpt_ccx->int_ccx,
- txrpt_ccx->mac_id, txrpt_ccx->pkt_ok, txrpt_ccx->bmc,
- txrpt_ccx->retry_cnt, txrpt_ccx->lifetime_over,
- txrpt_ccx->retry_over, txrpt_ccx_qtime_88e(txrpt_ccx),
- txrpt_ccx->final_data_rate, txrpt_ccx->qsel,
- txrpt_ccx_sw_88e(txrpt_ccx)
- );
-}
-
void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf)
{
struct txrpt_ccx_88e *txrpt_ccx = (struct txrpt_ccx_88e *)buf;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
index 12879afb992e..412b76271a3d 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -1,22 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include <osdep_service.h>
#include <drv_types.h>
#include <rtl8188e_hal.h>
-#include <rtl8188e_led.h>
#include <usb_ops_linux.h>
/* LED object. */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index 0fc093eb7a77..c0d51ba70a75 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTL8188EU_RECV_C_
#include <linux/kmemleak.h>
@@ -95,7 +87,6 @@ void rtw_hal_free_recv_priv(struct adapter *padapter)
DBG_88E(KERN_WARNING "rx_skb_queue not empty\n");
skb_queue_purge(&precvpriv->rx_skb_queue);
-
if (skb_queue_len(&precvpriv->free_recv_skb_queue))
DBG_88E(KERN_WARNING "free_recv_skb_queue not empty, %d\n",
skb_queue_len(&precvpriv->free_recv_skb_queue));
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index 4f0f512f303c..a11bee16d070 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _RTL8188E_XMIT_C_
#include <osdep_service.h>
@@ -23,7 +15,7 @@
s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
{
- struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
+ struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
tasklet_init(&pxmitpriv->xmit_tasklet,
(void(*)(unsigned long))rtl8188eu_xmit_tasklet,
@@ -38,8 +30,8 @@ static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
static void rtl8188eu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
{
- u16 *usptr = (u16 *)ptxdesc;
- u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
+ u16 *usptr = (u16 *)ptxdesc;
+ u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
u32 index;
u16 checksum = 0;
@@ -51,9 +43,11 @@ static void rtl8188eu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff & checksum);
}
-/* Description: In normal chip, we should send some packet to Hw which will be used by Fw */
-/* in FW LPS mode. The function is to fill the Tx descriptor of this packets, then */
-/* Fw can tell Hw to send these packet derectly. */
+/*
+ * In normal chip, we should send some packet to Hw which will be used by Fw
+ * in FW LPS mode. The function is to fill the Tx descriptor of this packets,
+ * then Fw can tell Hw to send these packet derectly.
+ */
void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u8 ispspoll, u8 is_btqosnull)
{
struct tx_desc *ptxdesc;
@@ -166,16 +160,15 @@ static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
{
- int pull = 0;
- uint qsel;
+ int pull = 0;
+ uint qsel;
u8 data_rate, pwr_status, offset;
- struct adapter *adapt = pxmitframe->padapter;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct adapter *adapt = pxmitframe->padapter;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct odm_dm_struct *odmpriv = &adapt->HalData->odmpriv;
- struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
- struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- int bmcst = IS_MCAST(pattrib->ra);
+ struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
if (adapt->registrypriv.mp_mode == 0) {
if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
@@ -194,7 +187,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
ptxdesc->txdw0 |= cpu_to_le32(((offset) << OFFSET_SHT) & 0x00ff0000);/* 32 bytes for TX Desc */
- if (bmcst)
+ if (is_multicast_ether_addr(pattrib->ra))
ptxdesc->txdw0 |= cpu_to_le32(BMC);
if (adapt->registrypriv.mp_mode == 0) {
@@ -335,7 +328,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
return pull;
}
-/* for non-agg data frame or management frame */
+/* for non-agg data frame or management frame */
static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
{
s32 ret = _SUCCESS;
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index c3bb183aba38..12864b648fa8 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _HCI_HAL_INIT_C_
@@ -19,8 +11,6 @@
#include <rtw_efuse.h>
#include <fw.h>
#include <rtl8188e_hal.h>
-#include <rtl8188e_led.h>
-#include <rtw_iol.h>
#include <phy.h>
#define HAL_BB_ENABLE 1
@@ -1003,7 +993,6 @@ exit:
RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("<=== usb_inirp_init\n"));
-
return status;
}
diff --git a/drivers/staging/rtl8188eu/include/HalVerDef.h b/drivers/staging/rtl8188eu/include/HalVerDef.h
index d244efff3593..63a144ee2183 100644
--- a/drivers/staging/rtl8188eu/include/HalVerDef.h
+++ b/drivers/staging/rtl8188eu/include/HalVerDef.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_VERSION_DEF_H__
#define __HAL_VERSION_DEF_H__
diff --git a/drivers/staging/rtl8188eu/include/basic_types.h b/drivers/staging/rtl8188eu/include/basic_types.h
index 73cc86705cf3..b69b45d95402 100644
--- a/drivers/staging/rtl8188eu/include/basic_types.h
+++ b/drivers/staging/rtl8188eu/include/basic_types.h
@@ -1,16 +1,8 @@
- /******************************************************************************
+/* SPDX-License-Identifier: GPL-2.0 */
+/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __BASIC_TYPES_H__
#define __BASIC_TYPES_H__
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index 2734565ce802..4ae095837bef 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/*-----------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8188eu/include/fw.h b/drivers/staging/rtl8188eu/include/fw.h
index b016f32a8992..9f010c4b8f9c 100644
--- a/drivers/staging/rtl8188eu/include/fw.h
+++ b/drivers/staging/rtl8188eu/include/fw.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2013 Realtek Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h
index 4e5d7fc6de07..da66695a1d8f 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __INC_HAL8188EPHYCFG_H__
#define __INC_HAL8188EPHYCFG_H__
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h b/drivers/staging/rtl8188eu/include/hal8188e_phy_reg.h
index 8cbba85e1587..53afcea21c96 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
+++ b/drivers/staging/rtl8188eu/include/hal8188e_phy_reg.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __INC_HAL8188EPHYREG_H__
#define __INC_HAL8188EPHYREG_H__
diff --git a/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h b/drivers/staging/rtl8188eu/include/hal8188e_rate_adaptive.h
index 96ebda93b4ee..5b59c25e4c8a 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h
+++ b/drivers/staging/rtl8188eu/include/hal8188e_rate_adaptive.h
@@ -49,8 +49,6 @@
/* End rate adaptive define */
-void ODM_RASupport_Init(struct odm_dm_struct *dm_odm);
-
int ODM_RAInfo_Init_all(struct odm_dm_struct *dm_odm);
int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 MacID);
diff --git a/drivers/staging/rtl8188eu/include/hal_com.h b/drivers/staging/rtl8188eu/include/hal_com.h
index aaf444733507..428a2a92820e 100644
--- a/drivers/staging/rtl8188eu/include/hal_com.h
+++ b/drivers/staging/rtl8188eu/include/hal_com.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_COMMON_H__
#define __HAL_COMMON_H__
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index da4ee1561c36..e5be27af7bf5 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -1,23 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL_INTF_H__
#define __HAL_INTF_H__
#include <osdep_service.h>
#include <drv_types.h>
-#include <Hal8188EPhyCfg.h>
+#include <hal8188e_phy_cfg.h>
enum RTL871X_HCI_TYPE {
RTW_PCIE = BIT(0),
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index 9f480ccec531..c60b833ca110 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __IEEE80211_H
#define __IEEE80211_H
@@ -523,17 +515,6 @@ enum ieee80211_state {
#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
#define DEFAULT_FTS 2346
-static inline int is_multicast_mac_addr(const u8 *addr)
-{
- return ((addr[0] != 0xff) && (0x01 & addr[0]));
-}
-
-static inline int is_broadcast_mac_addr(const u8 *addr)
-{
- return (addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) &&
- (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff);
-}
-
#define CFG_IEEE80211_RESERVE_FCS BIT(0)
#define CFG_IEEE80211_COMPUTE_FCS BIT(1)
@@ -786,9 +767,9 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv);
int rtw_get_bit_value_from_ieee_value(u8 val);
-uint rtw_is_cckrates_included(u8 *rate);
+bool rtw_is_cckrates_included(u8 *rate);
-uint rtw_is_cckratesonly_included(u8 *rate);
+bool rtw_is_cckratesonly_included(u8 *rate);
int rtw_check_network_type(unsigned char *rate, int ratelen, int channel);
diff --git a/drivers/staging/rtl8188eu/include/mlme_osdep.h b/drivers/staging/rtl8188eu/include/mlme_osdep.h
index 5a35b0866db6..eda16c06336a 100644
--- a/drivers/staging/rtl8188eu/include/mlme_osdep.h
+++ b/drivers/staging/rtl8188eu/include/mlme_osdep.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __MLME_OSDEP_H_
#define __MLME_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/include/mon.h b/drivers/staging/rtl8188eu/include/mon.h
index f31fa688e092..297710626d72 100644
--- a/drivers/staging/rtl8188eu/include/mon.h
+++ b/drivers/staging/rtl8188eu/include/mon.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* RTL8188EU monitor interface
*
* Copyright (C) 2015 Jakub Sitnicki
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
*/
/*
diff --git a/drivers/staging/rtl8188eu/include/mp_custom_oid.h b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
deleted file mode 100644
index 1a06ee6ad460..000000000000
--- a/drivers/staging/rtl8188eu/include/mp_custom_oid.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __CUSTOM_OID_H
-#define __CUSTOM_OID_H
-
-/* by Owen */
-/* 0xFF818000 - 0xFF81802F RTL8180 Mass Production Kit */
-/* 0xFF818500 - 0xFF81850F RTL8185 Setup Utility */
-/* 0xFF818580 - 0xFF81858F RTL8185 Phy Status Utility */
-
-/* */
-
-/* by Owen for Production Kit */
-/* For Production Kit with Agilent Equipments */
-/* in order to make our custom oids hopefully somewhat unique */
-/* we will use 0xFF (indicating implementation specific OID) */
-/* 81(first byte of non zero Realtek unique identifier) */
-/* 80 (second byte of non zero Realtek unique identifier) */
-/* XX (the custom OID number - providing 255 possible custom oids) */
-
-#define OID_RT_PRO_RESET_DUT 0xFF818000
-#define OID_RT_PRO_SET_DATA_RATE 0xFF818001
-#define OID_RT_PRO_START_TEST 0xFF818002
-#define OID_RT_PRO_STOP_TEST 0xFF818003
-#define OID_RT_PRO_SET_PREAMBLE 0xFF818004
-#define OID_RT_PRO_SET_SCRAMBLER 0xFF818005
-#define OID_RT_PRO_SET_FILTER_BB 0xFF818006
-#define OID_RT_PRO_SET_MANUAL_DIVERSITY_BB 0xFF818007
-#define OID_RT_PRO_SET_CHANNEL_DIRECT_CALL 0xFF818008
-#define OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL 0xFF818009
-#define OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL 0xFF81800A
-
-#define OID_RT_PRO_SET_TX_ANTENNA_BB 0xFF81800D
-#define OID_RT_PRO_SET_ANTENNA_BB 0xFF81800E
-#define OID_RT_PRO_SET_CR_SCRAMBLER 0xFF81800F
-#define OID_RT_PRO_SET_CR_NEW_FILTER 0xFF818010
-#define OID_RT_PRO_SET_TX_POWER_CONTROL 0xFF818011
-#define OID_RT_PRO_SET_CR_TX_CONFIG 0xFF818012
-#define OID_RT_PRO_GET_TX_POWER_CONTROL 0xFF818013
-#define OID_RT_PRO_GET_CR_SIGNAL_QUALITY 0xFF818014
-#define OID_RT_PRO_SET_CR_SETPOINT 0xFF818015
-#define OID_RT_PRO_SET_INTEGRATOR 0xFF818016
-#define OID_RT_PRO_SET_SIGNAL_QUALITY 0xFF818017
-#define OID_RT_PRO_GET_INTEGRATOR 0xFF818018
-#define OID_RT_PRO_GET_SIGNAL_QUALITY 0xFF818019
-#define OID_RT_PRO_QUERY_EEPROM_TYPE 0xFF81801A
-#define OID_RT_PRO_WRITE_MAC_ADDRESS 0xFF81801B
-#define OID_RT_PRO_READ_MAC_ADDRESS 0xFF81801C
-#define OID_RT_PRO_WRITE_CIS_DATA 0xFF81801D
-#define OID_RT_PRO_READ_CIS_DATA 0xFF81801E
-#define OID_RT_PRO_WRITE_POWER_CONTROL 0xFF81801F
-#define OID_RT_PRO_READ_POWER_CONTROL 0xFF818020
-#define OID_RT_PRO_WRITE_EEPROM 0xFF818021
-#define OID_RT_PRO_READ_EEPROM 0xFF818022
-#define OID_RT_PRO_RESET_TX_PACKET_SENT 0xFF818023
-#define OID_RT_PRO_QUERY_TX_PACKET_SENT 0xFF818024
-#define OID_RT_PRO_RESET_RX_PACKET_RECEIVED 0xFF818025
-#define OID_RT_PRO_QUERY_RX_PACKET_RECEIVED 0xFF818026
-#define OID_RT_PRO_QUERY_RX_PACKET_CRC32_ERROR 0xFF818027
-#define OID_RT_PRO_QUERY_CURRENT_ADDRESS 0xFF818028
-#define OID_RT_PRO_QUERY_PERMANENT_ADDRESS 0xFF818029
-#define OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS 0xFF81802A
-#define OID_RT_PRO_RECEIVE_PACKET 0xFF81802C
-/* added by Owen on 04/08/03 for Cameo's request */
-#define OID_RT_PRO_WRITE_EEPROM_BYTE 0xFF81802D
-#define OID_RT_PRO_READ_EEPROM_BYTE 0xFF81802E
-#define OID_RT_PRO_SET_MODULATION 0xFF81802F
-/* */
-
-/* Sean */
-#define OID_RT_DRIVER_OPTION 0xFF818080
-#define OID_RT_RF_OFF 0xFF818081
-#define OID_RT_AUTH_STATUS 0xFF818082
-
-/* */
-#define OID_RT_PRO_SET_CONTINUOUS_TX 0xFF81800B
-#define OID_RT_PRO_SET_SINGLE_CARRIER_TX 0xFF81800C
-#define OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX 0xFF81802B
-#define OID_RT_PRO_SET_SINGLE_TONE_TX 0xFF818043
-/* */
-
-
-/* by Owen for RTL8185 Phy Status Report Utility */
-#define OID_RT_UTILITY_false_ALARM_COUNTERS 0xFF818580
-#define OID_RT_UTILITY_SELECT_DEBUG_MODE 0xFF818581
-#define OID_RT_UTILITY_SELECT_SUBCARRIER_NUMBER 0xFF818582
-#define OID_RT_UTILITY_GET_RSSI_STATUS 0xFF818583
-#define OID_RT_UTILITY_GET_FRAME_DETECTION_STATUS 0xFF818584
-#define OID_RT_UTILITY_GET_AGC_AND_FREQUENCY_OFFSET_ESTIMATION_STATUS \
- 0xFF818585
-#define OID_RT_UTILITY_GET_CHANNEL_ESTIMATION_STATUS 0xFF818586
-/* */
-
-/* by Owen on 03/09/19-03/09/22 for RTL8185 */
-#define OID_RT_WIRELESS_MODE 0xFF818500
-#define OID_RT_SUPPORTED_RATES 0xFF818501
-#define OID_RT_DESIRED_RATES 0xFF818502
-#define OID_RT_WIRELESS_MODE_STARTING_ADHOC 0xFF818503
-/* */
-
-#define OID_RT_GET_CONNECT_STATE 0xFF030001
-#define OID_RT_RESCAN 0xFF030002
-#define OID_RT_SET_KEY_LENGTH 0xFF030003
-#define OID_RT_SET_DEFAULT_KEY_ID 0xFF030004
-
-#define OID_RT_SET_CHANNEL 0xFF010182
-#define OID_RT_SET_SNIFFER_MODE 0xFF010183
-#define OID_RT_GET_SIGNAL_QUALITY 0xFF010184
-#define OID_RT_GET_SMALL_PACKET_CRC 0xFF010185
-#define OID_RT_GET_MIDDLE_PACKET_CRC 0xFF010186
-#define OID_RT_GET_LARGE_PACKET_CRC 0xFF010187
-#define OID_RT_GET_TX_RETRY 0xFF010188
-#define OID_RT_GET_RX_RETRY 0xFF010189
-#define OID_RT_PRO_SET_FW_DIG_STATE 0xFF01018A/* S */
-#define OID_RT_PRO_SET_FW_RA_STATE 0xFF01018B/* S */
-
-#define OID_RT_GET_RX_TOTAL_PACKET 0xFF010190
-#define OID_RT_GET_TX_BEACON_OK 0xFF010191
-#define OID_RT_GET_TX_BEACON_ERR 0xFF010192
-#define OID_RT_GET_RX_ICV_ERR 0xFF010193
-#define OID_RT_SET_ENCRYPTION_ALGORITHM 0xFF010194
-#define OID_RT_SET_NO_AUTO_RESCAN 0xFF010195
-#define OID_RT_GET_PREAMBLE_MODE 0xFF010196
-#define OID_RT_GET_DRIVER_UP_DELTA_TIME 0xFF010197
-#define OID_RT_GET_AP_IP 0xFF010198
-#define OID_RT_GET_CHANNELPLAN 0xFF010199
-#define OID_RT_SET_PREAMBLE_MODE 0xFF01019A
-#define OID_RT_SET_BCN_INTVL 0xFF01019B
-#define OID_RT_GET_RF_VENDER 0xFF01019C
-#define OID_RT_DEDICATE_PROBE 0xFF01019D
-#define OID_RT_PRO_RX_FILTER_PATTERN 0xFF01019E
-
-#define OID_RT_GET_DCST_CURRENT_THRESHOLD 0xFF01019F
-
-#define OID_RT_GET_CCA_ERR 0xFF0101A0
-#define OID_RT_GET_CCA_UPGRADE_THRESHOLD 0xFF0101A1
-#define OID_RT_GET_CCA_FALLBACK_THRESHOLD 0xFF0101A2
-
-#define OID_RT_GET_CCA_UPGRADE_EVALUATE_TIMES 0xFF0101A3
-#define OID_RT_GET_CCA_FALLBACK_EVALUATE_TIMES 0xFF0101A4
-
-/* by Owen on 03/31/03 for Cameo's request */
-#define OID_RT_SET_RATE_ADAPTIVE 0xFF0101A5
-/* */
-#define OID_RT_GET_DCST_EVALUATE_PERIOD 0xFF0101A5
-#define OID_RT_GET_DCST_TIME_UNIT_INDEX 0xFF0101A6
-#define OID_RT_GET_TOTAL_TX_BYTES 0xFF0101A7
-#define OID_RT_GET_TOTAL_RX_BYTES 0xFF0101A8
-#define OID_RT_CURRENT_TX_POWER_LEVEL 0xFF0101A9
-#define OID_RT_GET_ENC_KEY_MISMATCH_COUNT 0xFF0101AA
-#define OID_RT_GET_ENC_KEY_MATCH_COUNT 0xFF0101AB
-#define OID_RT_GET_CHANNEL 0xFF0101AC
-
-#define OID_RT_SET_CHANNELPLAN 0xFF0101AD
-#define OID_RT_GET_HARDWARE_RADIO_OFF 0xFF0101AE
-#define OID_RT_CHANNELPLAN_BY_COUNTRY 0xFF0101AF
-#define OID_RT_SCAN_AVAILABLE_BSSID 0xFF0101B0
-#define OID_RT_GET_HARDWARE_VERSION 0xFF0101B1
-#define OID_RT_GET_IS_ROAMING 0xFF0101B2
-#define OID_RT_GET_IS_PRIVACY 0xFF0101B3
-#define OID_RT_GET_KEY_MISMATCH 0xFF0101B4
-#define OID_RT_SET_RSSI_ROAM_TRAFFIC_TH 0xFF0101B5
-#define OID_RT_SET_RSSI_ROAM_SIGNAL_TH 0xFF0101B6
-#define OID_RT_RESET_LOG 0xFF0101B7
-#define OID_RT_GET_LOG 0xFF0101B8
-#define OID_RT_SET_INDICATE_HIDDEN_AP 0xFF0101B9
-#define OID_RT_GET_HEADER_FAIL 0xFF0101BA
-#define OID_RT_SUPPORTED_WIRELESS_MODE 0xFF0101BB
-#define OID_RT_GET_CHANNEL_LIST 0xFF0101BC
-#define OID_RT_GET_SCAN_IN_PROGRESS 0xFF0101BD
-#define OID_RT_GET_TX_INFO 0xFF0101BE
-#define OID_RT_RF_READ_WRITE_OFFSET 0xFF0101BF
-#define OID_RT_RF_READ_WRITE 0xFF0101C0
-
-/* For Netgear request. 2005.01.13, by rcnjko. */
-#define OID_RT_FORCED_DATA_RATE 0xFF0101C1
-#define OID_RT_WIRELESS_MODE_FOR_SCAN_LIST 0xFF0101C2
-/* For Netgear request. 2005.02.17, by rcnjko. */
-#define OID_RT_GET_BSS_WIRELESS_MODE 0xFF0101C3
-/* For AZ project. 2005.06.27, by rcnjko. */
-#define OID_RT_SCAN_WITH_MAGIC_PACKET 0xFF0101C4
-
-/* Vincent 8185MP */
-#define OID_RT_PRO_RX_FILTER 0xFF0111C0
-
-#define OID_CE_USB_WRITE_REGISTRY 0xFF0111C1
-#define OID_CE_USB_READ_REGISTRY 0xFF0111C2
-
-#define OID_RT_PRO_SET_INITIAL_GA 0xFF0111C3
-#define OID_RT_PRO_SET_BB_RF_STANDBY_MODE 0xFF0111C4
-#define OID_RT_PRO_SET_BB_RF_SHUTDOWN_MODE 0xFF0111C5
-#define OID_RT_PRO_SET_TX_CHARGE_PUMP 0xFF0111C6
-#define OID_RT_PRO_SET_RX_CHARGE_PUMP 0xFF0111C7
-#define OID_RT_PRO_RF_WRITE_REGISTRY 0xFF0111C8
-#define OID_RT_PRO_RF_READ_REGISTRY 0xFF0111C9
-#define OID_RT_PRO_QUERY_RF_TYPE 0xFF0111CA
-
-/* AP OID */
-#define OID_RT_AP_GET_ASSOCIATED_STATION_LIST 0xFF010300
-#define OID_RT_AP_GET_CURRENT_TIME_STAMP 0xFF010301
-#define OID_RT_AP_SWITCH_INTO_AP_MODE 0xFF010302
-#define OID_RT_AP_SET_DTIM_PERIOD 0xFF010303
-/* Determine if driver supports AP mode. */
-#define OID_RT_AP_SUPPORTED 0xFF010304
-/* Set WPA-PSK passphrase into authenticator. */
-#define OID_RT_AP_SET_PASSPHRASE 0xFF010305
-
-/* 8187MP. 2004.09.06, by rcnjko. */
-#define OID_RT_PRO8187_WI_POLL 0xFF818780
-#define OID_RT_PRO_WRITE_BB_REG 0xFF818781
-#define OID_RT_PRO_READ_BB_REG 0xFF818782
-#define OID_RT_PRO_WRITE_RF_REG 0xFF818783
-#define OID_RT_PRO_READ_RF_REG 0xFF818784
-
-/* Meeting House. added by Annie, 2005-07-20. */
-#define OID_RT_MH_VENDER_ID 0xFFEDC100
-
-/* 8711 MP OID added 20051230. */
-#define OID_RT_PRO8711_JOIN_BSS 0xFF871100/* S */
-
-#define OID_RT_PRO_READ_REGISTER 0xFF871101 /* Q */
-#define OID_RT_PRO_WRITE_REGISTER 0xFF871102 /* S */
-
-#define OID_RT_PRO_BURST_READ_REGISTER 0xFF871103 /* Q */
-#define OID_RT_PRO_BURST_WRITE_REGISTER 0xFF871104 /* S */
-
-#define OID_RT_PRO_WRITE_TXCMD 0xFF871105 /* S */
-
-#define OID_RT_PRO_READ16_EEPROM 0xFF871106 /* Q */
-#define OID_RT_PRO_WRITE16_EEPROM 0xFF871107 /* S */
-
-#define OID_RT_PRO_H2C_SET_COMMAND 0xFF871108 /* S */
-#define OID_RT_PRO_H2C_QUERY_RESULT 0xFF871109 /* Q */
-
-#define OID_RT_PRO8711_WI_POLL 0xFF87110A /* Q */
-#define OID_RT_PRO8711_PKT_LOSS 0xFF87110B /* Q */
-#define OID_RT_RD_ATTRIB_MEM 0xFF87110C/* Q */
-#define OID_RT_WR_ATTRIB_MEM 0xFF87110D/* S */
-
-
-/* Method 2 for H2C/C2H */
-#define OID_RT_PRO_H2C_CMD_MODE 0xFF871110 /* S */
-#define OID_RT_PRO_H2C_CMD_RSP_MODE 0xFF871111 /* Q */
-#define OID_RT_PRO_H2C_CMD_EVENT_MODE 0xFF871112 /* S */
-#define OID_RT_PRO_WAIT_C2H_EVENT 0xFF871113 /* Q */
-#define OID_RT_PRO_RW_ACCESS_PROTOCOL_TEST 0xFF871114/* Q */
-
-#define OID_RT_PRO_SCSI_ACCESS_TEST 0xFF871115 /* Q, S */
-
-#define OID_RT_PRO_SCSI_TCPIPOFFLOAD_OUT 0xFF871116 /* S */
-#define OID_RT_PRO_SCSI_TCPIPOFFLOAD_IN 0xFF871117 /* Q,S */
-#define OID_RT_RRO_RX_PKT_VIA_IOCTRL 0xFF871118 /* Q */
-#define OID_RT_RRO_RX_PKTARRAY_VIA_IOCTRL 0xFF871119 /* Q */
-
-#define OID_RT_RPO_SET_PWRMGT_TEST 0xFF87111A /* S */
-#define OID_RT_PRO_QRY_PWRMGT_TEST 0XFF87111B /* Q */
-#define OID_RT_RPO_ASYNC_RWIO_TEST 0xFF87111C /* S */
-#define OID_RT_RPO_ASYNC_RWIO_POLL 0xFF87111D /* Q */
-#define OID_RT_PRO_SET_RF_INTFS 0xFF87111E /* S */
-#define OID_RT_POLL_RX_STATUS 0xFF87111F /* Q */
-
-#define OID_RT_PRO_CFG_DEBUG_MESSAGE 0xFF871120 /* Q,S */
-#define OID_RT_PRO_SET_DATA_RATE_EX 0xFF871121/* S */
-#define OID_RT_PRO_SET_BASIC_RATE 0xFF871122/* S */
-#define OID_RT_PRO_READ_TSSI 0xFF871123/* S */
-#define OID_RT_PRO_SET_POWER_TRACKING 0xFF871124/* S */
-
-
-#define OID_RT_PRO_QRY_PWRSTATE 0xFF871150 /* Q */
-#define OID_RT_PRO_SET_PWRSTATE 0xFF871151 /* S */
-
-/* Method 2 , using workitem */
-#define OID_RT_SET_READ_REG 0xFF871181 /* S */
-#define OID_RT_SET_WRITE_REG 0xFF871182 /* S */
-#define OID_RT_SET_BURST_READ_REG 0xFF871183 /* S */
-#define OID_RT_SET_BURST_WRITE_REG 0xFF871184 /* S */
-#define OID_RT_SET_WRITE_TXCMD 0xFF871185 /* S */
-#define OID_RT_SET_READ16_EEPROM 0xFF871186 /* S */
-#define OID_RT_SET_WRITE16_EEPROM 0xFF871187 /* S */
-#define OID_RT_QRY_POLL_WKITEM 0xFF871188 /* Q */
-
-/* For SDIO INTERFACE only */
-#define OID_RT_PRO_SYNCPAGERW_SRAM 0xFF8711A0 /* Q, S */
-#define OID_RT_PRO_871X_DRV_EXT 0xFF8711A1
-
-/* For USB INTERFACE only */
-#define OID_RT_PRO_USB_VENDOR_REQ 0xFF8711B0 /* Q, S */
-#define OID_RT_PRO_SCSI_AUTO_TEST 0xFF8711B1 /* S */
-#define OID_RT_PRO_USB_MAC_AC_FIFO_WRITE 0xFF8711B2 /* S */
-#define OID_RT_PRO_USB_MAC_RX_FIFO_READ 0xFF8711B3 /* Q */
-#define OID_RT_PRO_USB_MAC_RX_FIFO_POLLING 0xFF8711B4 /* Q */
-
-#define OID_RT_PRO_H2C_SET_RATE_TABLE 0xFF8711FB /* S */
-#define OID_RT_PRO_H2C_GET_RATE_TABLE 0xFF8711FC /* S */
-#define OID_RT_PRO_H2C_C2H_LBK_TEST 0xFF8711FE
-
-#define OID_RT_PRO_ENCRYPTION_CTRL 0xFF871200 /* Q, S */
-#define OID_RT_PRO_ADD_STA_INFO 0xFF871201 /* S */
-#define OID_RT_PRO_DELE_STA_INFO 0xFF871202 /* S */
-#define OID_RT_PRO_QUERY_DR_VARIABLE 0xFF871203 /* Q */
-
-#define OID_RT_PRO_RX_PACKET_TYPE 0xFF871204 /* Q, S */
-
-#define OID_RT_PRO_READ_EFUSE 0xFF871205 /* Q */
-#define OID_RT_PRO_WRITE_EFUSE 0xFF871206 /* S */
-#define OID_RT_PRO_RW_EFUSE_PGPKT 0xFF871207 /* Q, S */
-#define OID_RT_GET_EFUSE_CURRENT_SIZE 0xFF871208 /* Q */
-
-#define OID_RT_SET_BANDWIDTH 0xFF871209 /* S */
-#define OID_RT_SET_CRYSTAL_CAP 0xFF87120A /* S */
-
-#define OID_RT_SET_RX_PACKET_TYPE 0xFF87120B /* S */
-
-#define OID_RT_GET_EFUSE_MAX_SIZE 0xFF87120C /* Q */
-
-#define OID_RT_PRO_SET_TX_AGC_OFFSET 0xFF87120D /* S */
-
-#define OID_RT_PRO_SET_PKT_TEST_MODE 0xFF87120E /* S */
-
-#define OID_RT_PRO_FOR_EVM_TEST_SETTING 0xFF87120F /* S */
-
-#define OID_RT_PRO_GET_THERMAL_METER 0xFF871210 /* Q */
-
-#define OID_RT_RESET_PHY_RX_PACKET_COUNT 0xFF871211 /* S */
-#define OID_RT_GET_PHY_RX_PACKET_RECEIVED 0xFF871212 /* Q */
-#define OID_RT_GET_PHY_RX_PACKET_CRC32_ERROR 0xFF871213 /* Q */
-
-#define OID_RT_SET_POWER_DOWN 0xFF871214 /* S */
-
-#define OID_RT_GET_POWER_MODE 0xFF871215 /* Q */
-
-#define OID_RT_PRO_EFUSE 0xFF871216 /* Q, S */
-#define OID_RT_PRO_EFUSE_MAP 0xFF871217 /* Q, S */
-
-#endif /* ifndef __CUSTOM_OID_H */
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 95426b7c6dbf..947481de9cb1 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
@@ -716,7 +708,7 @@ struct odm_dm_struct {
/* HOOK BEFORE REG INIT----------- */
/* ODM Platform info AP/ADSL/CE/MP = 1/2/3/4 */
u8 SupportPlatform;
- /* ODM Support Ability DIG/RATR/TX_PWR_TRACK/ ¡K¡K = 1/2/3/¡K */
+ /* ODM Support Ability DIG/RATR/TX_PWR_TRACK/... = 1/2/3/... */
u32 SupportAbility;
/* ODM PCIE/USB/SDIO/GSPI = 0/1/2/3 */
u8 SupportInterface;
diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
index da7325d599c6..8cef32dc6350 100644
--- a/drivers/staging/rtl8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
@@ -1,17 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *
******************************************************************************/
#ifndef __HALHWOUTSRC_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
deleted file mode 100644
index f46f7d43ce00..000000000000
--- a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#ifndef __ODM_REGDEFINE11N_H__
-#define __ODM_REGDEFINE11N_H__
-
-
-/* 2 RF REG LIST */
-#define ODM_REG_RF_MODE_11N 0x00
-#define ODM_REG_RF_0B_11N 0x0B
-#define ODM_REG_CHNBW_11N 0x18
-#define ODM_REG_T_METER_11N 0x24
-#define ODM_REG_RF_25_11N 0x25
-#define ODM_REG_RF_26_11N 0x26
-#define ODM_REG_RF_27_11N 0x27
-#define ODM_REG_RF_2B_11N 0x2B
-#define ODM_REG_RF_2C_11N 0x2C
-#define ODM_REG_RXRF_A3_11N 0x3C
-#define ODM_REG_T_METER_92D_11N 0x42
-#define ODM_REG_T_METER_88E_11N 0x42
-
-
-
-/* 2 BB REG LIST */
-/* PAGE 8 */
-#define ODM_REG_BB_CTRL_11N 0x800
-#define ODM_REG_RF_PIN_11N 0x804
-#define ODM_REG_PSD_CTRL_11N 0x808
-#define ODM_REG_TX_ANT_CTRL_11N 0x80C
-#define ODM_REG_BB_PWR_SAV5_11N 0x818
-#define ODM_REG_CCK_RPT_FORMAT_11N 0x824
-#define ODM_REG_RX_DEFAULT_A_11N 0x858
-#define ODM_REG_RX_DEFAULT_B_11N 0x85A
-#define ODM_REG_BB_PWR_SAV3_11N 0x85C
-#define ODM_REG_ANTSEL_CTRL_11N 0x860
-#define ODM_REG_RX_ANT_CTRL_11N 0x864
-#define ODM_REG_PIN_CTRL_11N 0x870
-#define ODM_REG_BB_PWR_SAV1_11N 0x874
-#define ODM_REG_ANTSEL_PATH_11N 0x878
-#define ODM_REG_BB_3WIRE_11N 0x88C
-#define ODM_REG_SC_CNT_11N 0x8C4
-#define ODM_REG_PSD_DATA_11N 0x8B4
-/* PAGE 9 */
-#define ODM_REG_ANT_MAPPING1_11N 0x914
-#define ODM_REG_ANT_MAPPING2_11N 0x918
-/* PAGE A */
-#define ODM_REG_CCK_ANTDIV_PARA1_11N 0xA00
-#define ODM_REG_CCK_CCA_11N 0xA0A
-#define ODM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
-#define ODM_REG_CCK_ANTDIV_PARA3_11N 0xA10
-#define ODM_REG_CCK_ANTDIV_PARA4_11N 0xA14
-#define ODM_REG_CCK_FILTER_PARA1_11N 0xA22
-#define ODM_REG_CCK_FILTER_PARA2_11N 0xA23
-#define ODM_REG_CCK_FILTER_PARA3_11N 0xA24
-#define ODM_REG_CCK_FILTER_PARA4_11N 0xA25
-#define ODM_REG_CCK_FILTER_PARA5_11N 0xA26
-#define ODM_REG_CCK_FILTER_PARA6_11N 0xA27
-#define ODM_REG_CCK_FILTER_PARA7_11N 0xA28
-#define ODM_REG_CCK_FILTER_PARA8_11N 0xA29
-#define ODM_REG_CCK_FA_RST_11N 0xA2C
-#define ODM_REG_CCK_FA_MSB_11N 0xA58
-#define ODM_REG_CCK_FA_LSB_11N 0xA5C
-#define ODM_REG_CCK_CCA_CNT_11N 0xA60
-#define ODM_REG_BB_PWR_SAV4_11N 0xA74
-/* PAGE B */
-#define ODM_REG_LNA_SWITCH_11N 0xB2C
-#define ODM_REG_PATH_SWITCH_11N 0xB30
-#define ODM_REG_RSSI_CTRL_11N 0xB38
-#define ODM_REG_CONFIG_ANTA_11N 0xB68
-#define ODM_REG_RSSI_BT_11N 0xB9C
-/* PAGE C */
-#define ODM_REG_OFDM_FA_HOLDC_11N 0xC00
-#define ODM_REG_RX_PATH_11N 0xC04
-#define ODM_REG_TRMUX_11N 0xC08
-#define ODM_REG_OFDM_FA_RSTC_11N 0xC0C
-#define ODM_REG_RXIQI_MATRIX_11N 0xC14
-#define ODM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
-#define ODM_REG_IGI_A_11N 0xC50
-#define ODM_REG_ANTDIV_PARA2_11N 0xC54
-#define ODM_REG_IGI_B_11N 0xC58
-#define ODM_REG_ANTDIV_PARA3_11N 0xC5C
-#define ODM_REG_BB_PWR_SAV2_11N 0xC70
-#define ODM_REG_RX_OFF_11N 0xC7C
-#define ODM_REG_TXIQK_MATRIXA_11N 0xC80
-#define ODM_REG_TXIQK_MATRIXB_11N 0xC88
-#define ODM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
-#define ODM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
-#define ODM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
-#define ODM_REG_ANTDIV_PARA1_11N 0xCA4
-#define ODM_REG_OFDM_FA_TYPE1_11N 0xCF0
-/* PAGE D */
-#define ODM_REG_OFDM_FA_RSTD_11N 0xD00
-#define ODM_REG_OFDM_FA_TYPE2_11N 0xDA0
-#define ODM_REG_OFDM_FA_TYPE3_11N 0xDA4
-#define ODM_REG_OFDM_FA_TYPE4_11N 0xDA8
-/* PAGE E */
-#define ODM_REG_TXAGC_A_6_18_11N 0xE00
-#define ODM_REG_TXAGC_A_24_54_11N 0xE04
-#define ODM_REG_TXAGC_A_1_MCS32_11N 0xE08
-#define ODM_REG_TXAGC_A_MCS0_3_11N 0xE10
-#define ODM_REG_TXAGC_A_MCS4_7_11N 0xE14
-#define ODM_REG_TXAGC_A_MCS8_11_11N 0xE18
-#define ODM_REG_TXAGC_A_MCS12_15_11N 0xE1C
-#define ODM_REG_FPGA0_IQK_11N 0xE28
-#define ODM_REG_TXIQK_TONE_A_11N 0xE30
-#define ODM_REG_RXIQK_TONE_A_11N 0xE34
-#define ODM_REG_TXIQK_PI_A_11N 0xE38
-#define ODM_REG_RXIQK_PI_A_11N 0xE3C
-#define ODM_REG_TXIQK_11N 0xE40
-#define ODM_REG_RXIQK_11N 0xE44
-#define ODM_REG_IQK_AGC_PTS_11N 0xE48
-#define ODM_REG_IQK_AGC_RSP_11N 0xE4C
-#define ODM_REG_BLUETOOTH_11N 0xE6C
-#define ODM_REG_RX_WAIT_CCA_11N 0xE70
-#define ODM_REG_TX_CCK_RFON_11N 0xE74
-#define ODM_REG_TX_CCK_BBON_11N 0xE78
-#define ODM_REG_OFDM_RFON_11N 0xE7C
-#define ODM_REG_OFDM_BBON_11N 0xE80
-#define ODM_REG_TX2RX_11N 0xE84
-#define ODM_REG_TX2TX_11N 0xE88
-#define ODM_REG_RX_CCK_11N 0xE8C
-#define ODM_REG_RX_OFDM_11N 0xED0
-#define ODM_REG_RX_WAIT_RIFS_11N 0xED4
-#define ODM_REG_RX2RX_11N 0xED8
-#define ODM_REG_STANDBY_11N 0xEDC
-#define ODM_REG_SLEEP_11N 0xEE0
-#define ODM_REG_PMPD_ANAEN_11N 0xEEC
-
-
-
-
-
-
-
-/* 2 MAC REG LIST */
-#define ODM_REG_BB_RST_11N 0x02
-#define ODM_REG_ANTSEL_PIN_11N 0x4C
-#define ODM_REG_EARLY_MODE_11N 0x4D0
-#define ODM_REG_RSSI_MONITOR_11N 0x4FE
-#define ODM_REG_EDCA_VO_11N 0x500
-#define ODM_REG_EDCA_VI_11N 0x504
-#define ODM_REG_EDCA_BE_11N 0x508
-#define ODM_REG_EDCA_BK_11N 0x50C
-#define ODM_REG_TXPAUSE_11N 0x522
-#define ODM_REG_RESP_TX_11N 0x6D8
-#define ODM_REG_ANT_TRAIN_PARA1_11N 0x7b0
-#define ODM_REG_ANT_TRAIN_PARA2_11N 0x7b4
-
-
-/* DIG Related */
-#define ODM_BIT_IGI_11N 0x0000007F
-
-
-#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
index 687ff3e9c09a..7ab2483bdacc 100644
--- a/drivers/staging/rtl8188eu/include/odm_debug.h
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
index 9e5fe1777e6c..658a938df4c1 100644
--- a/drivers/staging/rtl8188eu/include/odm_precomp.h
+++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODM_PRECOMP_H__
@@ -32,14 +24,14 @@
#include "odm.h"
#include "odm_HWConfig.h"
#include "odm_debug.h"
-#include "odm_RegDefine11N.h"
+#include "../../rtlwifi/phydm/phydm_regdefine11n.h"
-#include "Hal8188ERateAdaptive.h"/* for RA,Power training */
+#include "hal8188e_rate_adaptive.h" /* for RA,Power training */
#include "rtl8188e_hal.h"
#include "odm_reg.h"
-#include "odm_RTL8188E.h"
+#include "odm_rtl8188e.h"
void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm);
void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm);
diff --git a/drivers/staging/rtl8188eu/include/odm_reg.h b/drivers/staging/rtl8188eu/include/odm_reg.h
index 3405a44a19ed..b56549ba1256 100644
--- a/drivers/staging/rtl8188eu/include/odm_reg.h
+++ b/drivers/staging/rtl8188eu/include/odm_reg.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
/* */
/* File Name: odm_reg.h */
diff --git a/drivers/staging/rtl8188eu/include/odm_RTL8188E.h b/drivers/staging/rtl8188eu/include/odm_rtl8188e.h
index 72b4db67ac33..dbf13c48767d 100644
--- a/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
+++ b/drivers/staging/rtl8188eu/include/odm_rtl8188e.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODM_RTL8188E_H__
#define __ODM_RTL8188E_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_types.h b/drivers/staging/rtl8188eu/include/odm_types.h
index 3474a9c72640..7255f7afff7a 100644
--- a/drivers/staging/rtl8188eu/include/odm_types.h
+++ b/drivers/staging/rtl8188eu/include/odm_types.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __ODM_TYPES_H__
#define __ODM_TYPES_H__
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index f1fb3d511a45..07c32768f649 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __OSDEP_INTF_H_
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 9e390648d93e..fbcba79a0927 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __OSDEP_SERVICE_H_
#define __OSDEP_SERVICE_H_
diff --git a/drivers/staging/rtl8188eu/include/pwrseq.h b/drivers/staging/rtl8188eu/include/pwrseq.h
index bd77a50c0d41..aa58db5fbd80 100644
--- a/drivers/staging/rtl8188eu/include/pwrseq.h
+++ b/drivers/staging/rtl8188eu/include/pwrseq.h
@@ -1,17 +1,8 @@
-
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HAL8188EPWRSEQ_H__
diff --git a/drivers/staging/rtl8188eu/include/pwrseqcmd.h b/drivers/staging/rtl8188eu/include/pwrseqcmd.h
index c4a919ea17ea..8c73322a0314 100644
--- a/drivers/staging/rtl8188eu/include/pwrseqcmd.h
+++ b/drivers/staging/rtl8188eu/include/pwrseqcmd.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __HALPWRSEQCMD_H__
#define __HALPWRSEQCMD_H__
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index 9b43a1314bd5..d2341521cc8e 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RECV_OSDEP_H_
#define __RECV_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
index 042b4ec656c8..e588656f1de9 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTL8188E_CMD_H__
#define __RTL8188E_CMD_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
index c0ffd98d7617..19204335ab4c 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTL8188E_DM_H__
#define __RTL8188E_DM_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index b4b5e217105a..a86b07d3c82a 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTL8188E_HAL_H__
#define __RTL8188E_HAL_H__
@@ -18,8 +10,8 @@
/* include HAL Related header after HAL Related compiling flags */
#include "rtl8188e_spec.h"
-#include "Hal8188EPhyReg.h"
-#include "Hal8188EPhyCfg.h"
+#include "hal8188e_phy_reg.h"
+#include "hal8188e_phy_cfg.h"
#include "rtl8188e_dm.h"
#include "rtl8188e_recv.h"
#include "rtl8188e_xmit.h"
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_led.h b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
deleted file mode 100644
index d1ad6aa8c1e0..000000000000
--- a/drivers/staging/rtl8188eu/include/rtl8188e_led.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTL8188E_LED_H__
-#define __RTL8188E_LED_H__
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-
-/* */
-/* Interface to manipulate LED objects. */
-/* */
-void SwLedOn(struct adapter *padapter, struct LED_871x *pLed);
-void SwLedOff(struct adapter *padapter, struct LED_871x *pLed);
-
-#endif
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 0d8bf51c72a9..c2c7ef974dc5 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTL8188E_RECV_H__
#define __RTL8188E_RECV_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index 71e2b817e20a..dd943c831d91 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*******************************************************************************/
#ifndef __RTL8188E_SPEC_H__
#define __RTL8188E_SPEC_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
index 66205b782721..20d35480dab8 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTL8188E_XMIT_H__
#define __RTL8188E_XMIT_H__
@@ -160,7 +152,6 @@ void rtl8188eu_xmit_tasklet(void *priv);
s32 rtl8188eu_xmitframe_complete(struct adapter *padapter,
struct xmit_priv *pxmitpriv);
-void dump_txrpt_ccx_88e(void *buf);
void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf);
void _dbg_dump_tx_info(struct adapter *padapter, int frame_tag,
diff --git a/drivers/staging/rtl8188eu/include/rtw_android.h b/drivers/staging/rtl8188eu/include/rtw_android.h
index e81ee92b0ae2..d7ca7c2fb118 100644
--- a/drivers/staging/rtl8188eu/include/rtw_android.h
+++ b/drivers/staging/rtl8188eu/include/rtw_android.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_ANDROID_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_ap.h b/drivers/staging/rtl8188eu/include/rtw_ap.h
index e8dd6d4407aa..7a4203bce473 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ap.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ap.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_AP_H_
#define __RTW_AP_H_
@@ -27,10 +19,6 @@ void rtw_indicate_sta_disassoc_event(struct adapter *padapter,
struct sta_info *psta);
void init_mlme_ap_info(struct adapter *padapter);
void free_mlme_ap_info(struct adapter *padapter);
-void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
- u8 index, u8 *data, u8 len);
-void rtw_remove_bcn_ie(struct adapter *padapter,
- struct wlan_bssid_ex *pnetwork, u8 index);
void update_beacon(struct adapter *padapter, u8 ie_id,
u8 *oui, u8 tx);
void add_RATid(struct adapter *padapter, struct sta_info *psta,
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index 2c026bf6fecb..fa5e212fc9e0 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_CMD_H_
#define __RTW_CMD_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index 4873ba49900c..9840e596feaa 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_DEBUG_H__
#define __RTW_DEBUG_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
index 11d1cb6de506..db25eb580c98 100644
--- a/drivers/staging/rtl8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_EEPROM_H__
#define __RTW_EEPROM_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
index 168c12d3c0b4..3ec53761e9fd 100644
--- a/drivers/staging/rtl8188eu/include/rtw_efuse.h
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_EFUSE_H__
#define __RTW_EFUSE_H__
@@ -90,7 +82,7 @@ u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data);
void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset,
u16 _size_byte, u8 *pbuf);
-void Efuse_PowerSwitch(struct adapter *adapt, u8 bWrite, u8 PwrState);
+void efuse_power_switch(struct adapter *adapt, u8 write, u8 pwrstate);
int Efuse_PgPacketRead(struct adapter *adapt, u8 offset, u8 *data);
bool Efuse_PgPacketWrite(struct adapter *adapter, u8 offset, u8 word, u8 *data);
void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata);
diff --git a/drivers/staging/rtl8188eu/include/rtw_event.h b/drivers/staging/rtl8188eu/include/rtw_event.h
index e798e794d962..bfe774e876d1 100644
--- a/drivers/staging/rtl8188eu/include/rtw_event.h
+++ b/drivers/staging/rtl8188eu/include/rtw_event.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_EVENT_H_
#define _RTW_EVENT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ht.h b/drivers/staging/rtl8188eu/include/rtw_ht.h
index d842eade7f57..192fa50c07be 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ht.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ht.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_HT_H_
#define _RTW_HT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
index 4c925e610997..5d773c84f11b 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_IOCTL_H_
#define _RTW_IOCTL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
deleted file mode 100644
index da4949f94f4c..000000000000
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef _RTW_IOCTL_RTL_H_
-#define _RTW_IOCTL_RTL_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-/* oid_rtl_seg_01_01 ************** */
-int oid_rt_get_signal_quality_hdl(struct oid_par_priv *poid_par_priv);/* 84 */
-int oid_rt_get_small_packet_crc_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_middle_packet_crc_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_large_packet_crc_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_tx_retry_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_rx_retry_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_rx_total_packet_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_tx_beacon_ok_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_tx_beacon_err_hdl(struct oid_par_priv *poid_par_priv);
-
-int oid_rt_pro_set_fw_dig_state_hdl(struct oid_par_priv *poid_par_priv);/* 8a */
-int oid_rt_pro_set_fw_ra_state_hdl(struct oid_par_priv *poid_par_priv); /* 8b */
-
-int oid_rt_get_rx_icv_err_hdl(struct oid_par_priv *poid_par_priv);/* 93 */
-int oid_rt_set_encryption_algorithm_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_preamble_mode_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_ap_ip_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_channelplan_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_set_channelplan_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_set_preamble_mode_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_set_bcn_intvl_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_dedicate_probe_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_total_tx_bytes_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_total_rx_bytes_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_current_tx_power_level_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_enc_key_mismatch_count_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_enc_key_match_count_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_channel_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_hardware_radio_off_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_key_mismatch_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_supported_wireless_mode_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_channel_list_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_scan_in_progress_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_forced_data_rate_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_wireless_mode_for_scan_list_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_bss_wireless_mode_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_scan_with_magic_packet_hdl(struct oid_par_priv *poid_par_priv);
-
-/* oid_rtl_seg_01_03 section start ************** */
-int oid_rt_ap_get_associated_station_list_hdl(struct oid_par_priv *priv);
-int oid_rt_ap_switch_into_ap_mode_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_ap_supported_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_ap_set_passphrase_hdl(struct oid_par_priv *poid_par_priv);
-
-/* oid_rtl_seg_01_11 */
-int oid_rt_pro_rf_write_registry_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_pro_rf_read_registry_hdl(struct oid_par_priv *poid_par_priv);
-
-/* oid_rtl_seg_03_00 section start ************** */
-int oid_rt_get_connect_state_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_set_default_key_id_hdl(struct oid_par_priv *poid_par_priv);
-
-#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
index b6e14a8b7a11..0be99f6d75ba 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_IOCTL_SET_H_
#define __RTW_IOCTL_SET_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_iol.h b/drivers/staging/rtl8188eu/include/rtw_iol.h
index 1f324e68d2ae..d713782d5cdc 100644
--- a/drivers/staging/rtl8188eu/include/rtw_iol.h
+++ b/drivers/staging/rtl8188eu/include/rtw_iol.h
@@ -1,23 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_IOL_H_
#define __RTW_IOL_H_
-#include <osdep_service.h>
#include <drv_types.h>
-bool rtw_IOL_applied(struct adapter *adapter);
+bool rtw_iol_applied(struct adapter *adapter);
#endif /* __RTW_IOL_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index 884e1397755a..e50237ab05c4 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -1,17 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *
******************************************************************************/
#ifndef __RTW_LED_H_
#define __RTW_LED_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index e6d4175af3a2..35997c521c35 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_MLME_H_
#define __RTW_MLME_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 118bf5509d97..ade68af15e04 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_MLME_EXT_H_
#define __RTW_MLME_EXT_H_
@@ -478,9 +470,7 @@ void Set_MSR(struct adapter *padapter, u8 type);
u8 rtw_get_oper_ch(struct adapter *adapter);
void rtw_set_oper_ch(struct adapter *adapter, u8 ch);
-u8 rtw_get_oper_bw(struct adapter *adapter);
void rtw_set_oper_bw(struct adapter *adapter, u8 bw);
-u8 rtw_get_oper_choffset(struct adapter *adapter);
void rtw_set_oper_choffset(struct adapter *adapter, u8 offset);
void set_channel_bwmode(struct adapter *padapter, unsigned char channel,
@@ -495,7 +485,6 @@ void write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key);
void clear_cam_entry(struct adapter *padapter, u8 entry);
void invalidate_cam_all(struct adapter *padapter);
-void CAM_empty_entry(struct adapter *Adapter, u8 ucIndex);
int allocate_fw_sta_entry(struct adapter *padapter);
void flush_all_cam_entry(struct adapter *padapter);
@@ -545,7 +534,6 @@ unsigned char get_highest_rate_idx(u32 mask);
int support_short_GI(struct adapter *padapter, struct ieee80211_ht_cap *caps);
unsigned int is_ap_in_tkip(struct adapter *padapter);
unsigned int is_ap_in_wep(struct adapter *padapter);
-unsigned int should_forbid_n_rate(struct adapter *padapter);
void report_join_res(struct adapter *padapter, int res);
void report_survey_event(struct adapter *padapter,
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
deleted file mode 100644
index aa353aefed3d..000000000000
--- a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
+++ /dev/null
@@ -1,1086 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-/*****************************************************************************
- *
- * Module: __RTW_MP_PHY_REGDEF_H_
- *
- *
- * Note: 1. Define PMAC/BB register map
- * 2. Define RF register map
- * 3. PMAC/BB register bit mask.
- * 4. RF reg bit mask.
- * 5. Other BB/RF relative definition.
- *
- *
- * Export: Constants, macro, functions(API), global variables(None).
- *
- * Abbrev:
- *
- * History:
- * Data Who Remark
- * 08/07/2007 MHC 1. Porting from 9x series PHYCFG.h.
- * 2. Reorganize code architecture.
- * 09/25/2008 MH 1. Add RL6052 register definition
- *
- *****************************************************************************/
-#ifndef __RTW_MP_PHY_REGDEF_H_
-#define __RTW_MP_PHY_REGDEF_H_
-
-
-/*--------------------------Define Parameters-------------------------------*/
-
-/* */
-/* 8192S Regsiter offset definition */
-/* */
-
-/* */
-/* BB-PHY register PMAC 0x100 PHY 0x800 - 0xEFF */
-/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
-/* 2. 0x800/0x900/0xA00/0xC00/0xD00/0xE00 */
-/* 3. RF register 0x00-2E */
-/* 4. Bit Mask for BB/RF register */
-/* 5. Other definition for BB/RF R/W */
-/* */
-
-
-/* */
-/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
-/* 1. Page1(0x100) */
-/* */
-#define rPMAC_Reset 0x100
-#define rPMAC_TxStart 0x104
-#define rPMAC_TxLegacySIG 0x108
-#define rPMAC_TxHTSIG1 0x10c
-#define rPMAC_TxHTSIG2 0x110
-#define rPMAC_PHYDebug 0x114
-#define rPMAC_TxPacketNum 0x118
-#define rPMAC_TxIdle 0x11c
-#define rPMAC_TxMACHeader0 0x120
-#define rPMAC_TxMACHeader1 0x124
-#define rPMAC_TxMACHeader2 0x128
-#define rPMAC_TxMACHeader3 0x12c
-#define rPMAC_TxMACHeader4 0x130
-#define rPMAC_TxMACHeader5 0x134
-#define rPMAC_TxDataType 0x138
-#define rPMAC_TxRandomSeed 0x13c
-#define rPMAC_CCKPLCPPreamble 0x140
-#define rPMAC_CCKPLCPHeader 0x144
-#define rPMAC_CCKCRC16 0x148
-#define rPMAC_OFDMRxCRC32OK 0x170
-#define rPMAC_OFDMRxCRC32Er 0x174
-#define rPMAC_OFDMRxParityEr 0x178
-#define rPMAC_OFDMRxCRC8Er 0x17c
-#define rPMAC_CCKCRxRC16Er 0x180
-#define rPMAC_CCKCRxRC32Er 0x184
-#define rPMAC_CCKCRxRC32OK 0x188
-#define rPMAC_TxStatus 0x18c
-
-/* */
-/* 2. Page2(0x200) */
-/* */
-/* The following two definition are only used for USB interface. */
-/* define RF_BB_CMD_ADDR 0x02c0 RF/BB read/write command address. */
-/* define RF_BB_CMD_DATA 0x02c4 RF/BB read/write command data. */
-
-/* */
-/* 3. Page8(0x800) */
-/* */
-#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC RF BW Setting?? */
-
-#define rFPGA0_TxInfo 0x804 /* Status report?? */
-#define rFPGA0_PSDFunction 0x808
-
-#define rFPGA0_TxGainStage 0x80c /* Set TX PWR init gain? */
-
-#define rFPGA0_RFTiming1 0x810 /* Useless now */
-#define rFPGA0_RFTiming2 0x814
-/* define rFPGA0_XC_RFTiming 0x818 */
-/* define rFPGA0_XD_RFTiming 0x81c */
-
-#define rFPGA0_XA_HSSIParameter1 0x820 /* RF 3 wire register */
-#define rFPGA0_XA_HSSIParameter2 0x824
-#define rFPGA0_XB_HSSIParameter1 0x828
-#define rFPGA0_XB_HSSIParameter2 0x82c
-#define rFPGA0_XC_HSSIParameter1 0x830
-#define rFPGA0_XC_HSSIParameter2 0x834
-#define rFPGA0_XD_HSSIParameter1 0x838
-#define rFPGA0_XD_HSSIParameter2 0x83c
-#define rFPGA0_XA_LSSIParameter 0x840
-#define rFPGA0_XB_LSSIParameter 0x844
-#define rFPGA0_XC_LSSIParameter 0x848
-#define rFPGA0_XD_LSSIParameter 0x84c
-
-#define rFPGA0_RFWakeUpParameter 0x850 /* Useless now */
-#define rFPGA0_RFSleepUpParameter 0x854
-
-#define rFPGA0_XAB_SwitchControl 0x858 /* RF Channel switch */
-#define rFPGA0_XCD_SwitchControl 0x85c
-
-#define rFPGA0_XA_RFInterfaceOE 0x860 /* RF Channel switch */
-#define rFPGA0_XB_RFInterfaceOE 0x864
-#define rFPGA0_XC_RFInterfaceOE 0x868
-#define rFPGA0_XD_RFInterfaceOE 0x86c
-
-#define rFPGA0_XAB_RFInterfaceSW 0x870 /* RF Interface Software Control */
-#define rFPGA0_XCD_RFInterfaceSW 0x874
-
-#define rFPGA0_XAB_RFParameter 0x878 /* RF Parameter */
-#define rFPGA0_XCD_RFParameter 0x87c
-
-#define rFPGA0_AnalogParameter1 0x880 /* Crystal cap setting RF-R/W protection for parameter4?? */
-#define rFPGA0_AnalogParameter2 0x884
-#define rFPGA0_AnalogParameter3 0x888 /* Useless now */
-#define rFPGA0_AnalogParameter4 0x88c
-
-#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Tranceiver LSSI Readback */
-#define rFPGA0_XB_LSSIReadBack 0x8a4
-#define rFPGA0_XC_LSSIReadBack 0x8a8
-#define rFPGA0_XD_LSSIReadBack 0x8ac
-
-#define rFPGA0_PSDReport 0x8b4 /* Useless now */
-#define rFPGA0_XAB_RFInterfaceRB 0x8e0 /* Useless now RF Interface Readback Value */
-#define rFPGA0_XCD_RFInterfaceRB 0x8e4 /* Useless now */
-
-/* */
-/* 4. Page9(0x900) */
-/* */
-#define rFPGA1_RFMOD 0x900 /* RF mode & OFDM TxSC RF BW Setting?? */
-
-#define rFPGA1_TxBlock 0x904 /* Useless now */
-#define rFPGA1_DebugSelect 0x908 /* Useless now */
-#define rFPGA1_TxInfo 0x90c /* Useless now Status report?? */
-
-/* */
-/* 5. PageA(0xA00) */
-/* */
-/* Set Control channel to upper or lower. These settings are required only for 40MHz */
-#define rCCK0_System 0xa00
-
-#define rCCK0_AFESetting 0xa04 /* Disable init gain now Select RX path by RSSI */
-#define rCCK0_CCA 0xa08 /* Disable init gain now Init gain */
-
-#define rCCK0_RxAGC1 0xa0c /* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold, RX LNA Threshold useless now. Not the same as 90 series */
-#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
-
-#define rCCK0_RxHP 0xa14
-
-#define rCCK0_DSPParameter1 0xa18 /* Timing recovery & Channel estimation threshold */
-#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
-
-#define rCCK0_TxFilter1 0xa20
-#define rCCK0_TxFilter2 0xa24
-#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
-#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d useless now 0xa30-a4f channel report */
-#define rCCK0_TRSSIReport 0xa50
-#define rCCK0_RxReport 0xa54 /* 0xa57 */
-#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
-#define rCCK0_FACounterUpper 0xa58 /* 0xa5c */
-
-/* */
-/* 6. PageC(0xC00) */
-/* */
-#define rOFDM0_LSTF 0xc00
-
-#define rOFDM0_TRxPathEnable 0xc04
-#define rOFDM0_TRMuxPar 0xc08
-#define rOFDM0_TRSWIsolation 0xc0c
-
-#define rOFDM0_XARxAFE 0xc10 /* RxIQ DC offset, Rx digital filter, DC notch filter */
-#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imbalance matrix */
-#define rOFDM0_XBRxAFE 0xc18
-#define rOFDM0_XBRxIQImbalance 0xc1c
-#define rOFDM0_XCRxAFE 0xc20
-#define rOFDM0_XCRxIQImbalance 0xc24
-#define rOFDM0_XDRxAFE 0xc28
-#define rOFDM0_XDRxIQImbalance 0xc2c
-
-#define rOFDM0_RxDetector1 0xc30 /* PD,BW & SBD DM tune init gain */
-#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync. */
-#define rOFDM0_RxDetector3 0xc38 /* Frame Sync. */
-#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */
-
-#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
-#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
-#define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */
-#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
-
-#define rOFDM0_XAAGCCore1 0xc50 /* DIG */
-#define rOFDM0_XAAGCCore2 0xc54
-#define rOFDM0_XBAGCCore1 0xc58
-#define rOFDM0_XBAGCCore2 0xc5c
-#define rOFDM0_XCAGCCore1 0xc60
-#define rOFDM0_XCAGCCore2 0xc64
-#define rOFDM0_XDAGCCore1 0xc68
-#define rOFDM0_XDAGCCore2 0xc6c
-
-#define rOFDM0_AGCParameter1 0xc70
-#define rOFDM0_AGCParameter2 0xc74
-#define rOFDM0_AGCRSSITable 0xc78
-#define rOFDM0_HTSTFAGC 0xc7c
-
-#define rOFDM0_XATxIQImbalance 0xc80 /* TX PWR TRACK and DIG */
-#define rOFDM0_XATxAFE 0xc84
-#define rOFDM0_XBTxIQImbalance 0xc88
-#define rOFDM0_XBTxAFE 0xc8c
-#define rOFDM0_XCTxIQImbalance 0xc90
-#define rOFDM0_XCTxAFE 0xc94
-#define rOFDM0_XDTxIQImbalance 0xc98
-#define rOFDM0_XDTxAFE 0xc9c
-#define rOFDM0_RxIQExtAnta 0xca0
-
-#define rOFDM0_RxHPParameter 0xce0
-#define rOFDM0_TxPseudoNoiseWgt 0xce4
-#define rOFDM0_FrameSync 0xcf0
-#define rOFDM0_DFSReport 0xcf4
-#define rOFDM0_TxCoeff1 0xca4
-#define rOFDM0_TxCoeff2 0xca8
-#define rOFDM0_TxCoeff3 0xcac
-#define rOFDM0_TxCoeff4 0xcb0
-#define rOFDM0_TxCoeff5 0xcb4
-#define rOFDM0_TxCoeff6 0xcb8
-
-/* 7. PageD(0xD00) */
-#define rOFDM1_LSTF 0xd00
-#define rOFDM1_TRxPathEnable 0xd04
-
-#define rOFDM1_CFO 0xd08 /* No setting now */
-#define rOFDM1_CSI1 0xd10
-#define rOFDM1_SBD 0xd14
-#define rOFDM1_CSI2 0xd18
-#define rOFDM1_CFOTracking 0xd2c
-#define rOFDM1_TRxMesaure1 0xd34
-#define rOFDM1_IntfDet 0xd3c
-#define rOFDM1_PseudoNoiseStateAB 0xd50
-#define rOFDM1_PseudoNoiseStateCD 0xd54
-#define rOFDM1_RxPseudoNoiseWgt 0xd58
-
-#define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */
-#define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */
-#define rOFDM_PHYCounter3 0xda8 /* MCS not support */
-
-#define rOFDM_ShortCFOAB 0xdac /* No setting now */
-#define rOFDM_ShortCFOCD 0xdb0
-#define rOFDM_LongCFOAB 0xdb4
-#define rOFDM_LongCFOCD 0xdb8
-#define rOFDM_TailCFOAB 0xdbc
-#define rOFDM_TailCFOCD 0xdc0
-#define rOFDM_PWMeasure1 0xdc4
-#define rOFDM_PWMeasure2 0xdc8
-#define rOFDM_BWReport 0xdcc
-#define rOFDM_AGCReport 0xdd0
-#define rOFDM_RxSNR 0xdd4
-#define rOFDM_RxEVMCSI 0xdd8
-#define rOFDM_SIGReport 0xddc
-
-
-/* */
-/* 8. PageE(0xE00) */
-/* */
-#define rTxAGC_Rate18_06 0xe00
-#define rTxAGC_Rate54_24 0xe04
-#define rTxAGC_CCK_Mcs32 0xe08
-#define rTxAGC_Mcs03_Mcs00 0xe10
-#define rTxAGC_Mcs07_Mcs04 0xe14
-#define rTxAGC_Mcs11_Mcs08 0xe18
-#define rTxAGC_Mcs15_Mcs12 0xe1c
-
-/* Analog- control in RX_WAIT_CCA : REG: EE0 [Analog- Power & Control Register] */
-#define rRx_Wait_CCCA 0xe70
-#define rAnapar_Ctrl_BB 0xee0
-
-/* */
-/* 7. RF Register 0x00-0x2E (RF 8256) */
-/* RF-0222D 0x00-3F */
-/* */
-/* Zebra1 */
-#define RTL92SE_FPGA_VERIFY 0
-#define rZebra1_HSSIEnable 0x0 /* Useless now */
-#define rZebra1_TRxEnable1 0x1
-#define rZebra1_TRxEnable2 0x2
-#define rZebra1_AGC 0x4
-#define rZebra1_ChargePump 0x5
-/* if (RTL92SE_FPGA_VERIFY == 1) */
-#define rZebra1_Channel 0x7 /* RF channel switch */
-/* else */
-
-/* endif */
-#define rZebra1_TxGain 0x8 /* Useless now */
-#define rZebra1_TxLPF 0x9
-#define rZebra1_RxLPF 0xb
-#define rZebra1_RxHPFCorner 0xc
-
-/* Zebra4 */
-#define rGlobalCtrl 0 /* Useless now */
-#define rRTL8256_TxLPF 19
-#define rRTL8256_RxLPF 11
-
-/* RTL8258 */
-#define rRTL8258_TxLPF 0x11 /* Useless now */
-#define rRTL8258_RxLPF 0x13
-#define rRTL8258_RSSILPF 0xa
-
-/* */
-/* RL6052 Register definition */
-#define RF_AC 0x00 /* */
-
-#define RF_IQADJ_G1 0x01 /* */
-#define RF_IQADJ_G2 0x02 /* */
-#define RF_POW_TRSW 0x05 /* */
-
-#define RF_GAIN_RX 0x06 /* */
-#define RF_GAIN_TX 0x07 /* */
-
-#define RF_TXM_IDAC 0x08 /* */
-#define RF_BS_IQGEN 0x0F /* */
-
-#define RF_MODE1 0x10 /* */
-#define RF_MODE2 0x11 /* */
-
-#define RF_RX_AGC_HP 0x12 /* */
-#define RF_TX_AGC 0x13 /* */
-#define RF_BIAS 0x14 /* */
-#define RF_IPA 0x15 /* */
-#define RF_TXBIAS 0x16 /* */
-#define RF_POW_ABILITY 0x17 /* */
-#define RF_MODE_AG 0x18 /* */
-#define rRfChannel 0x18 /* RF channel and BW switch */
-#define RF_CHNLBW 0x18 /* RF channel and BW switch */
-#define RF_TOP 0x19 /* */
-
-#define RF_RX_G1 0x1A /* */
-#define RF_RX_G2 0x1B /* */
-
-#define RF_RX_BB2 0x1C /* */
-#define RF_RX_BB1 0x1D /* */
-
-#define RF_RCK1 0x1E /* */
-#define RF_RCK2 0x1F /* */
-
-#define RF_TX_G1 0x20 /* */
-#define RF_TX_G2 0x21 /* */
-#define RF_TX_G3 0x22 /* */
-
-#define RF_TX_BB1 0x23 /* */
-
-#define RF_T_METER 0x24 /* */
-
-#define RF_SYN_G1 0x25 /* RF TX Power control */
-#define RF_SYN_G2 0x26 /* RF TX Power control */
-#define RF_SYN_G3 0x27 /* RF TX Power control */
-#define RF_SYN_G4 0x28 /* RF TX Power control */
-#define RF_SYN_G5 0x29 /* RF TX Power control */
-#define RF_SYN_G6 0x2A /* RF TX Power control */
-#define RF_SYN_G7 0x2B /* RF TX Power control */
-#define RF_SYN_G8 0x2C /* RF TX Power control */
-
-#define RF_RCK_OS 0x30 /* RF TX PA control */
-#define RF_TXPA_G1 0x31 /* RF TX PA control */
-#define RF_TXPA_G2 0x32 /* RF TX PA control */
-#define RF_TXPA_G3 0x33 /* RF TX PA control */
-
-/* */
-/* Bit Mask */
-/* */
-/* 1. Page1(0x100) */
-#define bBBResetB 0x100 /* Useless now? */
-#define bGlobalResetB 0x200
-#define bOFDMTxStart 0x4
-#define bCCKTxStart 0x8
-#define bCRC32Debug 0x100
-#define bPMACLoopback 0x10
-#define bTxLSIG 0xffffff
-#define bOFDMTxRate 0xf
-#define bOFDMTxReserved 0x10
-#define bOFDMTxLength 0x1ffe0
-#define bOFDMTxParity 0x20000
-#define bTxHTSIG1 0xffffff
-#define bTxHTMCSRate 0x7f
-#define bTxHTBW 0x80
-#define bTxHTLength 0xffff00
-#define bTxHTSIG2 0xffffff
-#define bTxHTSmoothing 0x1
-#define bTxHTSounding 0x2
-#define bTxHTReserved 0x4
-#define bTxHTAggreation 0x8
-#define bTxHTSTBC 0x30
-#define bTxHTAdvanceCoding 0x40
-#define bTxHTShortGI 0x80
-#define bTxHTNumberHT_LTF 0x300
-#define bTxHTCRC8 0x3fc00
-#define bCounterReset 0x10000
-#define bNumOfOFDMTx 0xffff
-#define bNumOfCCKTx 0xffff0000
-#define bTxIdleInterval 0xffff
-#define bOFDMService 0xffff0000
-#define bTxMACHeader 0xffffffff
-#define bTxDataInit 0xff
-#define bTxHTMode 0x100
-#define bTxDataType 0x30000
-#define bTxRandomSeed 0xffffffff
-#define bCCKTxPreamble 0x1
-#define bCCKTxSFD 0xffff0000
-#define bCCKTxSIG 0xff
-#define bCCKTxService 0xff00
-#define bCCKLengthExt 0x8000
-#define bCCKTxLength 0xffff0000
-#define bCCKTxCRC16 0xffff
-#define bCCKTxStatus 0x1
-#define bOFDMTxStatus 0x2
-
-#define IS_BB_REG_OFFSET_92S(_Offset) ((_Offset >= 0x800) && (_Offset <= 0xfff))
-
-/* 2. Page8(0x800) */
-#define bRFMOD 0x1 /* Reg 0x800 rFPGA0_RFMOD */
-#define bJapanMode 0x2
-#define bCCKTxSC 0x30
-#define bCCKEn 0x1000000
-#define bOFDMEn 0x2000000
-
-#define bOFDMRxADCPhase 0x10000 /* Useless now */
-#define bOFDMTxDACPhase 0x40000
-#define bXATxAGC 0x3f
-
-#define bXBTxAGC 0xf00 /* Reg 80c rFPGA0_TxGainStage */
-#define bXCTxAGC 0xf000
-#define bXDTxAGC 0xf0000
-
-#define bPAStart 0xf0000000 /* Useless now */
-#define bTRStart 0x00f00000
-#define bRFStart 0x0000f000
-#define bBBStart 0x000000f0
-#define bBBCCKStart 0x0000000f
-#define bPAEnd 0xf /* Reg0x814 */
-#define bTREnd 0x0f000000
-#define bRFEnd 0x000f0000
-#define bCCAMask 0x000000f0 /* T2R */
-#define bR2RCCAMask 0x00000f00
-#define bHSSI_R2TDelay 0xf8000000
-#define bHSSI_T2RDelay 0xf80000
-#define bContTxHSSI 0x400 /* chane gain at continue Tx */
-#define bIGFromCCK 0x200
-#define bAGCAddress 0x3f
-#define bRxHPTx 0x7000
-#define bRxHPT2R 0x38000
-#define bRxHPCCKIni 0xc0000
-#define bAGCTxCode 0xc00000
-#define bAGCRxCode 0x300000
-
-#define b3WireDataLength 0x800 /* Reg 0x820~84f rFPGA0_XA_HSSIParameter1 */
-#define b3WireAddressLength 0x400
-
-#define b3WireRFPowerDown 0x1 /* Useless now */
-/* define bHWSISelect 0x8 */
-#define b5GPAPEPolarity 0x40000000
-#define b2GPAPEPolarity 0x80000000
-#define bRFSW_TxDefaultAnt 0x3
-#define bRFSW_TxOptionAnt 0x30
-#define bRFSW_RxDefaultAnt 0x300
-#define bRFSW_RxOptionAnt 0x3000
-#define bRFSI_3WireData 0x1
-#define bRFSI_3WireClock 0x2
-#define bRFSI_3WireLoad 0x4
-#define bRFSI_3WireRW 0x8
-#define bRFSI_3Wire 0xf
-
-#define bRFSI_RFENV 0x10 /* Reg 0x870 rFPGA0_XAB_RFInterfaceSW */
-
-#define bRFSI_TRSW 0x20 /* Useless now */
-#define bRFSI_TRSWB 0x40
-#define bRFSI_ANTSW 0x100
-#define bRFSI_ANTSWB 0x200
-#define bRFSI_PAPE 0x400
-#define bRFSI_PAPE5G 0x800
-#define bBandSelect 0x1
-#define bHTSIG2_GI 0x80
-#define bHTSIG2_Smoothing 0x01
-#define bHTSIG2_Sounding 0x02
-#define bHTSIG2_Aggreaton 0x08
-#define bHTSIG2_STBC 0x30
-#define bHTSIG2_AdvCoding 0x40
-#define bHTSIG2_NumOfHTLTF 0x300
-#define bHTSIG2_CRC8 0x3fc
-#define bHTSIG1_MCS 0x7f
-#define bHTSIG1_BandWidth 0x80
-#define bHTSIG1_HTLength 0xffff
-#define bLSIG_Rate 0xf
-#define bLSIG_Reserved 0x10
-#define bLSIG_Length 0x1fffe
-#define bLSIG_Parity 0x20
-#define bCCKRxPhase 0x4
-#if (RTL92SE_FPGA_VERIFY == 1)
-#define bLSSIReadAddress 0x3f000000 /* LSSI "Read" Address
- * Reg 0x824 rFPGA0_XA_HSSIParameter2
- */
-#else
-#define bLSSIReadAddress 0x7f800000 /* T65 RF */
-#endif
-#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
-#if (RTL92SE_FPGA_VERIFY == 1)
-#define bLSSIReadBackData 0xfff /* Reg 0x8a0
- * rFPGA0_XA_LSSIReadBack
- */
-#else
-#define bLSSIReadBackData 0xfffff /* T65 RF */
-#endif
-#define bLSSIReadOKFlag 0x1000 /* Useless now */
-#define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */
-#define bRegulator0Standby 0x1
-#define bRegulatorPLLStandby 0x2
-#define bRegulator1Standby 0x4
-#define bPLLPowerUp 0x8
-#define bDPLLPowerUp 0x10
-#define bDA10PowerUp 0x20
-#define bAD7PowerUp 0x200
-#define bDA6PowerUp 0x2000
-#define bXtalPowerUp 0x4000
-#define b40MDClkPowerUP 0x8000
-#define bDA6DebugMode 0x20000
-#define bDA6Swing 0x380000
-
-#define bADClkPhase 0x4000000 /* Reg 0x880
- * rFPGA0_AnalogParameter1 20/40 CCK
- * support switch 40/80 BB MHZ
- */
-
-#define b80MClkDelay 0x18000000 /* Useless */
-#define bAFEWatchDogEnable 0x20000000
-
-#define bXtalCap01 0xc0000000 /* Reg 0x884
- * rFPGA0_AnalogParameter2 Crystal cap
- */
-#define bXtalCap23 0x3
-#define bXtalCap92x 0x0f000000
-#define bXtalCap 0x0f000000
-
-#define bIntDifClkEnable 0x400 /* Useless */
-#define bExtSigClkEnable 0x800
-#define bBandgapMbiasPowerUp 0x10000
-#define bAD11SHGain 0xc0000
-#define bAD11InputRange 0x700000
-#define bAD11OPCurrent 0x3800000
-#define bIPathLoopback 0x4000000
-#define bQPathLoopback 0x8000000
-#define bAFELoopback 0x10000000
-#define bDA10Swing 0x7e0
-#define bDA10Reverse 0x800
-#define bDAClkSource 0x1000
-#define bAD7InputRange 0x6000
-#define bAD7Gain 0x38000
-#define bAD7OutputCMMode 0x40000
-#define bAD7InputCMMode 0x380000
-#define bAD7Current 0xc00000
-#define bRegulatorAdjust 0x7000000
-#define bAD11PowerUpAtTx 0x1
-#define bDA10PSAtTx 0x10
-#define bAD11PowerUpAtRx 0x100
-#define bDA10PSAtRx 0x1000
-#define bCCKRxAGCFormat 0x200
-#define bPSDFFTSamplepPoint 0xc000
-#define bPSDAverageNum 0x3000
-#define bIQPathControl 0xc00
-#define bPSDFreq 0x3ff
-#define bPSDAntennaPath 0x30
-#define bPSDIQSwitch 0x40
-#define bPSDRxTrigger 0x400000
-#define bPSDTxTrigger 0x80000000
-#define bPSDSineToneScale 0x7f000000
-#define bPSDReport 0xffff
-
-/* 3. Page9(0x900) */
-#define bOFDMTxSC 0x30000000 /* Useless */
-#define bCCKTxOn 0x1
-#define bOFDMTxOn 0x2
-#define bDebugPage 0xfff /* reset debug page and HWord,
- * LWord
- */
-#define bDebugItem 0xff /* reset debug page and LWord */
-#define bAntL 0x10
-#define bAntNonHT 0x100
-#define bAntHT1 0x1000
-#define bAntHT2 0x10000
-#define bAntHT1S1 0x100000
-#define bAntNonHTS1 0x1000000
-
-/* 4. PageA(0xA00) */
-#define bCCKBBMode 0x3 /* Useless */
-#define bCCKTxPowerSaving 0x80
-#define bCCKRxPowerSaving 0x40
-
-#define bCCKSideBand 0x10 /* Reg 0xa00 rCCK0 20/40 sw */
-
-#define bCCKScramble 0x8 /* Useless */
-#define bCCKAntDiversity 0x8000
-#define bCCKCarrierRecovery 0x4000
-#define bCCKTxRate 0x3000
-#define bCCKDCCancel 0x0800
-#define bCCKISICancel 0x0400
-#define bCCKMatchFilter 0x0200
-#define bCCKEqualizer 0x0100
-#define bCCKPreambleDetect 0x800000
-#define bCCKFastFalseCCA 0x400000
-#define bCCKChEstStart 0x300000
-#define bCCKCCACount 0x080000
-#define bCCKcs_lim 0x070000
-#define bCCKBistMode 0x80000000
-#define bCCKCCAMask 0x40000000
-#define bCCKTxDACPhase 0x4
-#define bCCKRxADCPhase 0x20000000 /* r_rx_clk */
-#define bCCKr_cp_mode0 0x0100
-#define bCCKTxDCOffset 0xf0
-#define bCCKRxDCOffset 0xf
-#define bCCKCCAMode 0xc000
-#define bCCKFalseCS_lim 0x3f00
-#define bCCKCS_ratio 0xc00000
-#define bCCKCorgBit_sel 0x300000
-#define bCCKPD_lim 0x0f0000
-#define bCCKNewCCA 0x80000000
-#define bCCKRxHPofIG 0x8000
-#define bCCKRxIG 0x7f00
-#define bCCKLNAPolarity 0x800000
-#define bCCKRx1stGain 0x7f0000
-#define bCCKRFExtend 0x20000000 /* CCK Rx init gain polar */
-#define bCCKRxAGCSatLevel 0x1f000000
-#define bCCKRxAGCSatCount 0xe0
-#define bCCKRxRFSettle 0x1f /* AGCsamp_dly */
-#define bCCKFixedRxAGC 0x8000
-#define bCCKAntennaPolarity 0x2000
-#define bCCKTxFilterType 0x0c00
-#define bCCKRxAGCReportType 0x0300
-#define bCCKRxDAGCEn 0x80000000
-#define bCCKRxDAGCPeriod 0x20000000
-#define bCCKRxDAGCSatLevel 0x1f000000
-#define bCCKTimingRecovery 0x800000
-#define bCCKTxC0 0x3f0000
-#define bCCKTxC1 0x3f000000
-#define bCCKTxC2 0x3f
-#define bCCKTxC3 0x3f00
-#define bCCKTxC4 0x3f0000
-#define bCCKTxC5 0x3f000000
-#define bCCKTxC6 0x3f
-#define bCCKTxC7 0x3f00
-#define bCCKDebugPort 0xff0000
-#define bCCKDACDebug 0x0f000000
-#define bCCKFalseAlarmEnable 0x8000
-#define bCCKFalseAlarmRead 0x4000
-#define bCCKTRSSI 0x7f
-#define bCCKRxAGCReport 0xfe
-#define bCCKRxReport_AntSel 0x80000000
-#define bCCKRxReport_MFOff 0x40000000
-#define bCCKRxRxReport_SQLoss 0x20000000
-#define bCCKRxReport_Pktloss 0x10000000
-#define bCCKRxReport_Lockedbit 0x08000000
-#define bCCKRxReport_RateError 0x04000000
-#define bCCKRxReport_RxRate 0x03000000
-#define bCCKRxFACounterLower 0xff
-#define bCCKRxFACounterUpper 0xff000000
-#define bCCKRxHPAGCStart 0xe000
-#define bCCKRxHPAGCFinal 0x1c00
-#define bCCKRxFalseAlarmEnable 0x8000
-#define bCCKFACounterFreeze 0x4000
-#define bCCKTxPathSel 0x10000000
-#define bCCKDefaultRxPath 0xc000000
-#define bCCKOptionRxPath 0x3000000
-
-/* 5. PageC(0xC00) */
-#define bNumOfSTF 0x3 /* Useless */
-#define bShift_L 0xc0
-#define bGI_TH 0xc
-#define bRxPathA 0x1
-#define bRxPathB 0x2
-#define bRxPathC 0x4
-#define bRxPathD 0x8
-#define bTxPathA 0x1
-#define bTxPathB 0x2
-#define bTxPathC 0x4
-#define bTxPathD 0x8
-#define bTRSSIFreq 0x200
-#define bADCBackoff 0x3000
-#define bDFIRBackoff 0xc000
-#define bTRSSILatchPhase 0x10000
-#define bRxIDCOffset 0xff
-#define bRxQDCOffset 0xff00
-#define bRxDFIRMode 0x1800000
-#define bRxDCNFType 0xe000000
-#define bRXIQImb_A 0x3ff
-#define bRXIQImb_B 0xfc00
-#define bRXIQImb_C 0x3f0000
-#define bRXIQImb_D 0xffc00000
-#define bDC_dc_Notch 0x60000
-#define bRxNBINotch 0x1f000000
-#define bPD_TH 0xf
-#define bPD_TH_Opt2 0xc000
-#define bPWED_TH 0x700
-#define bIfMF_Win_L 0x800
-#define bPD_Option 0x1000
-#define bMF_Win_L 0xe000
-#define bBW_Search_L 0x30000
-#define bwin_enh_L 0xc0000
-#define bBW_TH 0x700000
-#define bED_TH2 0x3800000
-#define bBW_option 0x4000000
-#define bRatio_TH 0x18000000
-#define bWindow_L 0xe0000000
-#define bSBD_Option 0x1
-#define bFrame_TH 0x1c
-#define bFS_Option 0x60
-#define bDC_Slope_check 0x80
-#define bFGuard_Counter_DC_L 0xe00
-#define bFrame_Weight_Short 0x7000
-#define bSub_Tune 0xe00000
-#define bFrame_DC_Length 0xe000000
-#define bSBD_start_offset 0x30000000
-#define bFrame_TH_2 0x7
-#define bFrame_GI2_TH 0x38
-#define bGI2_Sync_en 0x40
-#define bSarch_Short_Early 0x300
-#define bSarch_Short_Late 0xc00
-#define bSarch_GI2_Late 0x70000
-#define bCFOAntSum 0x1
-#define bCFOAcc 0x2
-#define bCFOStartOffset 0xc
-#define bCFOLookBack 0x70
-#define bCFOSumWeight 0x80
-#define bDAGCEnable 0x10000
-#define bTXIQImb_A 0x3ff
-#define bTXIQImb_B 0xfc00
-#define bTXIQImb_C 0x3f0000
-#define bTXIQImb_D 0xffc00000
-#define bTxIDCOffset 0xff
-#define bTxQDCOffset 0xff00
-#define bTxDFIRMode 0x10000
-#define bTxPesudoNoiseOn 0x4000000
-#define bTxPesudoNoise_A 0xff
-#define bTxPesudoNoise_B 0xff00
-#define bTxPesudoNoise_C 0xff0000
-#define bTxPesudoNoise_D 0xff000000
-#define bCCADropOption 0x20000
-#define bCCADropThres 0xfff00000
-#define bEDCCA_H 0xf
-#define bEDCCA_L 0xf0
-#define bLambda_ED 0x300
-#define bRxInitialGain 0x7f
-#define bRxAntDivEn 0x80
-#define bRxAGCAddressForLNA 0x7f00
-#define bRxHighPowerFlow 0x8000
-#define bRxAGCFreezeThres 0xc0000
-#define bRxFreezeStep_AGC1 0x300000
-#define bRxFreezeStep_AGC2 0xc00000
-#define bRxFreezeStep_AGC3 0x3000000
-#define bRxFreezeStep_AGC0 0xc000000
-#define bRxRssi_Cmp_En 0x10000000
-#define bRxQuickAGCEn 0x20000000
-#define bRxAGCFreezeThresMode 0x40000000
-#define bRxOverFlowCheckType 0x80000000
-#define bRxAGCShift 0x7f
-#define bTRSW_Tri_Only 0x80
-#define bPowerThres 0x300
-#define bRxAGCEn 0x1
-#define bRxAGCTogetherEn 0x2
-#define bRxAGCMin 0x4
-#define bRxHP_Ini 0x7
-#define bRxHP_TRLNA 0x70
-#define bRxHP_RSSI 0x700
-#define bRxHP_BBP1 0x7000
-#define bRxHP_BBP2 0x70000
-#define bRxHP_BBP3 0x700000
-#define bRSSI_H 0x7f0000 /* thresh for hi power */
-#define bRSSI_Gen 0x7f000000 /* thresh for ant div */
-#define bRxSettle_TRSW 0x7
-#define bRxSettle_LNA 0x38
-#define bRxSettle_RSSI 0x1c0
-#define bRxSettle_BBP 0xe00
-#define bRxSettle_RxHP 0x7000
-#define bRxSettle_AntSW_RSSI 0x38000
-#define bRxSettle_AntSW 0xc0000
-#define bRxProcessTime_DAGC 0x300000
-#define bRxSettle_HSSI 0x400000
-#define bRxProcessTime_BBPPW 0x800000
-#define bRxAntennaPowerShift 0x3000000
-#define bRSSITableSelect 0xc000000
-#define bRxHP_Final 0x7000000
-#define bRxHTSettle_BBP 0x7
-#define bRxHTSettle_HSSI 0x8
-#define bRxHTSettle_RxHP 0x70
-#define bRxHTSettle_BBPPW 0x80
-#define bRxHTSettle_Idle 0x300
-#define bRxHTSettle_Reserved 0x1c00
-#define bRxHTRxHPEn 0x8000
-#define bRxHTAGCFreezeThres 0x30000
-#define bRxHTAGCTogetherEn 0x40000
-#define bRxHTAGCMin 0x80000
-#define bRxHTAGCEn 0x100000
-#define bRxHTDAGCEn 0x200000
-#define bRxHTRxHP_BBP 0x1c00000
-#define bRxHTRxHP_Final 0xe0000000
-#define bRxPWRatioTH 0x3
-#define bRxPWRatioEn 0x4
-#define bRxMFHold 0x3800
-#define bRxPD_Delay_TH1 0x38
-#define bRxPD_Delay_TH2 0x1c0
-#define bRxPD_DC_COUNT_MAX 0x600
-/* define bRxMF_Hold 0x3800 */
-#define bRxPD_Delay_TH 0x8000
-#define bRxProcess_Delay 0xf0000
-#define bRxSearchrange_GI2_Early 0x700000
-#define bRxFrame_Guard_Counter_L 0x3800000
-#define bRxSGI_Guard_L 0xc000000
-#define bRxSGI_Search_L 0x30000000
-#define bRxSGI_TH 0xc0000000
-#define bDFSCnt0 0xff
-#define bDFSCnt1 0xff00
-#define bDFSFlag 0xf0000
-#define bMFWeightSum 0x300000
-#define bMinIdxTH 0x7f000000
-#define bDAFormat 0x40000
-#define bTxChEmuEnable 0x01000000
-#define bTRSWIsolation_A 0x7f
-#define bTRSWIsolation_B 0x7f00
-#define bTRSWIsolation_C 0x7f0000
-#define bTRSWIsolation_D 0x7f000000
-#define bExtLNAGain 0x7c00
-
-/* 6. PageE(0xE00) */
-#define bSTBCEn 0x4 /* Useless */
-#define bAntennaMapping 0x10
-#define bNss 0x20
-#define bCFOAntSumD 0x200
-#define bPHYCounterReset 0x8000000
-#define bCFOReportGet 0x4000000
-#define bOFDMContinueTx 0x10000000
-#define bOFDMSingleCarrier 0x20000000
-#define bOFDMSingleTone 0x40000000
-/* define bRxPath1 0x01 */
-/* define bRxPath2 0x02 */
-/* define bRxPath3 0x04 */
-/* define bRxPath4 0x08 */
-/* define bTxPath1 0x10 */
-/* define bTxPath2 0x20 */
-#define bHTDetect 0x100
-#define bCFOEn 0x10000
-#define bCFOValue 0xfff00000
-#define bSigTone_Re 0x3f
-#define bSigTone_Im 0x7f00
-#define bCounter_CCA 0xffff
-#define bCounter_ParityFail 0xffff0000
-#define bCounter_RateIllegal 0xffff
-#define bCounter_CRC8Fail 0xffff0000
-#define bCounter_MCSNoSupport 0xffff
-#define bCounter_FastSync 0xffff
-#define bShortCFO 0xfff
-#define bShortCFOTLength 12 /* total */
-#define bShortCFOFLength 11 /* fraction */
-#define bLongCFO 0x7ff
-#define bLongCFOTLength 11
-#define bLongCFOFLength 11
-#define bTailCFO 0x1fff
-#define bTailCFOTLength 13
-#define bTailCFOFLength 12
-#define bmax_en_pwdB 0xffff
-#define bCC_power_dB 0xffff0000
-#define bnoise_pwdB 0xffff
-#define bPowerMeasTLength 10
-#define bPowerMeasFLength 3
-#define bRx_HT_BW 0x1
-#define bRxSC 0x6
-#define bRx_HT 0x8
-#define bNB_intf_det_on 0x1
-#define bIntf_win_len_cfg 0x30
-#define bNB_Intf_TH_cfg 0x1c0
-#define bRFGain 0x3f
-#define bTableSel 0x40
-#define bTRSW 0x80
-#define bRxSNR_A 0xff
-#define bRxSNR_B 0xff00
-#define bRxSNR_C 0xff0000
-#define bRxSNR_D 0xff000000
-#define bSNREVMTLength 8
-#define bSNREVMFLength 1
-#define bCSI1st 0xff
-#define bCSI2nd 0xff00
-#define bRxEVM1st 0xff0000
-#define bRxEVM2nd 0xff000000
-#define bSIGEVM 0xff
-#define bPWDB 0xff00
-#define bSGIEN 0x10000
-
-#define bSFactorQAM1 0xf /* Useless */
-#define bSFactorQAM2 0xf0
-#define bSFactorQAM3 0xf00
-#define bSFactorQAM4 0xf000
-#define bSFactorQAM5 0xf0000
-#define bSFactorQAM6 0xf0000
-#define bSFactorQAM7 0xf00000
-#define bSFactorQAM8 0xf000000
-#define bSFactorQAM9 0xf0000000
-#define bCSIScheme 0x100000
-
-#define bNoiseLvlTopSet 0x3 /* Useless */
-#define bChSmooth 0x4
-#define bChSmoothCfg1 0x38
-#define bChSmoothCfg2 0x1c0
-#define bChSmoothCfg3 0xe00
-#define bChSmoothCfg4 0x7000
-#define bMRCMode 0x800000
-#define bTHEVMCfg 0x7000000
-
-#define bLoopFitType 0x1 /* Useless */
-#define bUpdCFO 0x40
-#define bUpdCFOOffData 0x80
-#define bAdvUpdCFO 0x100
-#define bAdvTimeCtrl 0x800
-#define bUpdClko 0x1000
-#define bFC 0x6000
-#define bTrackingMode 0x8000
-#define bPhCmpEnable 0x10000
-#define bUpdClkoLTF 0x20000
-#define bComChCFO 0x40000
-#define bCSIEstiMode 0x80000
-#define bAdvUpdEqz 0x100000
-#define bUChCfg 0x7000000
-#define bUpdEqz 0x8000000
-
-#define bTxAGCRate18_06 0x7f7f7f7f /* Useless */
-#define bTxAGCRate54_24 0x7f7f7f7f
-#define bTxAGCRateMCS32 0x7f
-#define bTxAGCRateCCK 0x7f00
-#define bTxAGCRateMCS3_MCS0 0x7f7f7f7f
-#define bTxAGCRateMCS7_MCS4 0x7f7f7f7f
-#define bTxAGCRateMCS11_MCS8 0x7f7f7f7f
-#define bTxAGCRateMCS15_MCS12 0x7f7f7f7f
-
-/* Rx Pseduo noise */
-#define bRxPesudoNoiseOn 0x20000000 /* Useless */
-#define bRxPesudoNoise_A 0xff
-#define bRxPesudoNoise_B 0xff00
-#define bRxPesudoNoise_C 0xff0000
-#define bRxPesudoNoise_D 0xff000000
-#define bPesudoNoiseState_A 0xffff
-#define bPesudoNoiseState_B 0xffff0000
-#define bPesudoNoiseState_C 0xffff
-#define bPesudoNoiseState_D 0xffff0000
-
-/* 7. RF Register */
-/* Zebra1 */
-#define bZebra1_HSSIEnable 0x8 /* Useless */
-#define bZebra1_TRxControl 0xc00
-#define bZebra1_TRxGainSetting 0x07f
-#define bZebra1_RxCorner 0xc00
-#define bZebra1_TxChargePump 0x38
-#define bZebra1_RxChargePump 0x7
-#define bZebra1_ChannelNum 0xf80
-#define bZebra1_TxLPFBW 0x400
-#define bZebra1_RxLPFBW 0x600
-
-/* Zebra4 */
-#define bRTL8256RegModeCtrl1 0x100 /* Useless */
-#define bRTL8256RegModeCtrl0 0x40
-#define bRTL8256_TxLPFBW 0x18
-#define bRTL8256_RxLPFBW 0x600
-
-/* RTL8258 */
-#define bRTL8258_TxLPFBW 0xc /* Useless */
-#define bRTL8258_RxLPFBW 0xc00
-#define bRTL8258_RSSILPFBW 0xc0
-
-
-/* */
-/* Other Definition */
-/* */
-
-/* byte endable for sb_write */
-#define bByte0 0x1 /* Useless */
-#define bByte1 0x2
-#define bByte2 0x4
-#define bByte3 0x8
-#define bWord0 0x3
-#define bWord1 0xc
-#define bDWord 0xf
-
-/* for PutRegsetting & GetRegSetting BitMask */
-#define bMaskByte0 0xff /* Reg 0xc50 rOFDM0_XAAGCCore~0xC6f */
-#define bMaskByte1 0xff00
-#define bMaskByte2 0xff0000
-#define bMaskByte3 0xff000000
-#define bMaskHWord 0xffff0000
-#define bMaskLWord 0x0000ffff
-#define bMaskDWord 0xffffffff
-#define bMaskH4Bits 0xf0000000
-#define bMaskOFDM_D 0xffc00000
-#define bMaskCCK 0x3f3f3f3f
-#define bMask12Bits 0xfff
-
-/* for PutRFRegsetting & GetRFRegSetting BitMask */
-#if (RTL92SE_FPGA_VERIFY == 1)
-#define bRFRegOffsetMask 0xfff
-#else
-#define bRFRegOffsetMask 0xfffff
-#endif
-#define bEnable 0x1 /* Useless */
-#define bDisabl 0x0
-
-#define LeftAntenna 0x0 /* Useless */
-#define RightAntenna 0x1
-
-#define tCheckTxStatus 500 /* 500ms Useless */
-#define tUpdateRxCounter 100 /* 100ms */
-
-#define rateCCK 0 /* Useless */
-#define rateOFDM 1
-#define rateHT 2
-
-/* define Register-End */
-#define bPMAC_End 0x1ff /* Useless */
-#define bFPGAPHY0_End 0x8ff
-#define bFPGAPHY1_End 0x9ff
-#define bCCKPHY0_End 0xaff
-#define bOFDMPHY0_End 0xcff
-#define bOFDMPHY1_End 0xdff
-
-/* define max debug item in each debug page */
-/* define bMaxItem_FPGA_PHY0 0x9 */
-/* define bMaxItem_FPGA_PHY1 0x3 */
-/* define bMaxItem_PHY_11B 0x16 */
-/* define bMaxItem_OFDM_PHY0 0x29 */
-/* define bMaxItem_OFDM_PHY1 0x0 */
-
-#define bPMACControl 0x0 /* Useless */
-#define bWMACControl 0x1
-#define bWNICControl 0x2
-
-#define RCR_AAP BIT(0) /* accept all physical address */
-#define RCR_APM BIT(1) /* accept physical match */
-#define RCR_AM BIT(2) /* accept multicast */
-#define RCR_AB BIT(3) /* accept broadcast */
-#define RCR_ACRC32 BIT(5) /* accept error packet */
-#define RCR_9356SEL BIT(6)
-#define RCR_AICV BIT(12) /* Accept ICV error packet */
-#define RCR_RXFTH0 (BIT(13)|BIT(14)|BIT(15)) /* Rx FIFO threshold */
-#define RCR_ADF BIT(18) /* Accept Data(frame type) frame */
-#define RCR_ACF BIT(19) /* Accept control frame */
-#define RCR_AMF BIT(20) /* Accept management frame */
-#define RCR_ADD3 BIT(21)
-#define RCR_APWRMGT BIT(22) /* Accept power management packet */
-#define RCR_CBSSID BIT(23) /* Accept BSSID match packet */
-#define RCR_ENMARP BIT(28) /* enable mac auto reset phy */
-#define RCR_EnCS1 BIT(29) /* enable carrier sense method 1 */
-#define RCR_EnCS2 BIT(30) /* enable carrier sense method 2 */
-#define RCR_OnlyErlPkt BIT(31) /* Rx Early mode is performed for
- * packet size greater than 1536
- */
-
-/*--------------------------Define Parameters-------------------------------*/
-
-
-#endif /* __INC_HAL8192SPHYREG_H */
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index f39e90cfc031..404634999e35 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_PWRCTRL_H_
#define __RTW_PWRCTRL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_qos.h b/drivers/staging/rtl8188eu/include/rtw_qos.h
index 576dff68d0dc..bf617da3cd6c 100644
--- a/drivers/staging/rtl8188eu/include/rtw_qos.h
+++ b/drivers/staging/rtl8188eu/include/rtw_qos.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_QOS_H_
#define _RTW_QOS_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index 7e85f700acb3..54b7ba367293 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_RECV_H_
#define _RTW_RECV_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
index 0718a29e7c9d..b5dfb226f32a 100644
--- a/drivers/staging/rtl8188eu/include/rtw_rf.h
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_RF_H_
#define __RTW_RF_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
index b1883ca852af..f8d9151fe6e3 100644
--- a/drivers/staging/rtl8188eu/include/rtw_security.h
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __RTW_SECURITY_H_
#define __RTW_SECURITY_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_sreset.h b/drivers/staging/rtl8188eu/include/rtw_sreset.h
index 4c4ccd564863..3ee6a4a7847d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_sreset.h
+++ b/drivers/staging/rtl8188eu/include/rtw_sreset.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_SRESET_C_
#define _RTW_SRESET_C_
@@ -33,7 +25,6 @@ struct sreset_priv {
#define WIFI_RX_HANG BIT(5)
#define WIFI_IF_NOT_EXIST BIT(6)
-u8 sreset_get_wifi_status(struct adapter *padapter);
void sreset_set_wifi_error_status(struct adapter *padapter, u32 status);
#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index b4b3d13ace9e..788f59c74ea1 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _RTW_XMIT_H_
#define _RTW_XMIT_H_
@@ -334,8 +326,6 @@ struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv,
s32 rtw_xmit_classifier(struct adapter *padapter,
struct xmit_frame *pxmitframe);
-u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib);
-#define rtw_wlan_pkt_size(f) rtw_calculate_wlan_pkt_size_by_attribue(&f->attrib)
s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt,
struct xmit_frame *pxmitframe);
s32 _rtw_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag);
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index 8f01deed6e4a..dc685a14aeb8 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __STA_INFO_H_
#define __STA_INFO_H_
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
index fb586365d2e5..70d729742839 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __USB_OPS_LINUX_H__
#define __USB_OPS_LINUX_H__
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 084a246eec19..259bf2cce2d5 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef _WIFI_H_
#define _WIFI_H_
@@ -265,13 +257,6 @@ enum WIFI_REG_DOMAIN {
#define GetAddr4Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 24))
-#define MacAddr_isBcst(addr) \
- ( \
- ((addr[0] == 0xff) && (addr[1] == 0xff) && \
- (addr[2] == 0xff) && (addr[3] == 0xff) && \
- (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
-)
-
static inline int IS_MCAST(unsigned char *da)
{
if ((*da) & 0x01)
@@ -575,7 +560,6 @@ enum ht_cap_ampdu_factor {
* According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
*/
#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
#define OP_MODE_PURE 0
diff --git a/drivers/staging/rtl8188eu/include/wlan_bssdef.h b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
index 6000049bda8f..5e13a6ddf083 100644
--- a/drivers/staging/rtl8188eu/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __WLAN_BSSDEF_H__
#define __WLAN_BSSDEF_H__
diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h
index 00ebad88f0d1..5283a6d53700 100644
--- a/drivers/staging/rtl8188eu/include/xmit_osdep.h
+++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#ifndef __XMIT_OSDEP_H_
#define __XMIT_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 3d648cb55a6d..bee3c3a7a7a9 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _IOCTL_LINUX_C_
@@ -27,7 +19,6 @@
#include <rtw_ioctl_set.h>
#include <rtl8188e_hal.h>
-#include <rtw_iol.h>
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
@@ -150,7 +141,7 @@ static char *translate_scan(struct adapter *padapter,
if (ht_cap)
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bn");
else
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11b");
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11b");
} else if ((rtw_is_cckrates_included((u8 *)&pnetwork->network.SupportedRates))) {
if (ht_cap)
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bgn");
@@ -369,9 +360,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
goto exit;
}
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+ if (is_broadcast_ether_addr(param->sta_addr)) {
if (param->u.crypt.idx >= WEP_KEYS) {
ret = -EINVAL;
goto exit;
@@ -650,12 +639,12 @@ static int rtw_wx_get_name(struct net_device *dev,
prates = &pcur_bss->SupportedRates;
- if (rtw_is_cckratesonly_included((u8 *)prates) == true) {
+ if (rtw_is_cckratesonly_included((u8 *)prates)) {
if (ht_cap)
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bn");
else
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b");
- } else if ((rtw_is_cckrates_included((u8 *)prates)) == true) {
+ } else if (rtw_is_cckrates_included((u8 *)prates)) {
if (ht_cap)
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bgn");
else
@@ -1291,7 +1280,6 @@ static int rtw_wx_set_essid(struct net_device *dev,
uint ret = 0, len;
-
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
("+rtw_wx_set_essid: fw_state = 0x%08x\n", get_fwstate(pmlmepriv)));
if (_FAIL == rtw_pwr_wakeup(padapter)) {
@@ -1373,10 +1361,8 @@ static int rtw_wx_set_essid(struct net_device *dev,
}
exit:
-
DBG_88E("<=%s, ret %d\n", __func__, ret);
-
return ret;
}
@@ -1391,7 +1377,6 @@ static int rtw_wx_get_essid(struct net_device *dev,
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_essid\n"));
-
if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
len = pcur_bss->Ssid.SsidLength;
@@ -1417,7 +1402,6 @@ static int rtw_wx_set_rate(struct net_device *dev,
u32 ratevalue = 0;
u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_set_rate\n"));
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("target_rate = %d, fixed = %d\n", target_rate, fixed));
@@ -1509,7 +1493,6 @@ static int rtw_wx_set_rts(struct net_device *dev,
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
if (wrqu->rts.disabled) {
padapter->registrypriv.rts_thresh = 2347;
} else {
@@ -1522,7 +1505,6 @@ static int rtw_wx_set_rts(struct net_device *dev,
DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
-
return 0;
}
@@ -1532,14 +1514,12 @@ static int rtw_wx_get_rts(struct net_device *dev,
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
wrqu->rts.value = padapter->registrypriv.rts_thresh;
wrqu->rts.fixed = 0; /* no auto select */
/* wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); */
-
return 0;
}
@@ -1549,7 +1529,6 @@ static int rtw_wx_set_frag(struct net_device *dev,
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
if (wrqu->frag.disabled) {
padapter->xmitpriv.frag_len = MAX_FRAG_THRESHOLD;
} else {
@@ -1562,7 +1541,6 @@ static int rtw_wx_set_frag(struct net_device *dev,
DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
-
return 0;
}
@@ -1572,13 +1550,11 @@ static int rtw_wx_get_frag(struct net_device *dev,
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
wrqu->frag.value = padapter->xmitpriv.frag_len;
wrqu->frag.fixed = 0; /* no auto select */
-
return 0;
}
@@ -1612,7 +1588,6 @@ static int rtw_wx_set_enc(struct net_device *dev,
key = erq->flags & IW_ENCODE_INDEX;
-
if (erq->flags & IW_ENCODE_DISABLED) {
DBG_88E("EncryptionDisabled\n");
padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
@@ -1705,8 +1680,6 @@ static int rtw_wx_set_enc(struct net_device *dev,
}
exit:
-
-
return ret;
}
@@ -1719,7 +1692,6 @@ static int rtw_wx_get_enc(struct net_device *dev,
struct iw_point *erq = &(wrqu->encoding);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
-
if (check_fwstate(pmlmepriv, _FW_LINKED) != true) {
if (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
erq->length = 0;
@@ -1920,7 +1892,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
goto exit;
}
- strncpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
+ strlcpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
if (pext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
param->u.crypt.set_tx = 1;
@@ -2233,9 +2205,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
ret = -EINVAL;
goto exit;
}
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+ if (is_broadcast_ether_addr(param->sta_addr)) {
if (param->u.crypt.idx >= WEP_KEYS) {
ret = -EINVAL;
goto exit;
@@ -2328,7 +2298,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
- psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
} else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
DBG_88E("%s, set group_key, TKIP\n", __func__);
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
@@ -2496,9 +2466,7 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
if (!check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)))
return -EINVAL;
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ if (is_broadcast_ether_addr(param->sta_addr))
return -EINVAL;
psta = rtw_get_stainfo(pstapriv, param->sta_addr);
@@ -2553,9 +2521,7 @@ static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
return -EINVAL;
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ if (is_broadcast_ether_addr(param->sta_addr))
return -EINVAL;
psta = rtw_get_stainfo(pstapriv, param->sta_addr);
@@ -2591,9 +2557,7 @@ static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *par
if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
return -EINVAL;
- if (param_ex->sta_addr[0] == 0xff && param_ex->sta_addr[1] == 0xff &&
- param_ex->sta_addr[2] == 0xff && param_ex->sta_addr[3] == 0xff &&
- param_ex->sta_addr[4] == 0xff && param_ex->sta_addr[5] == 0xff)
+ if (is_broadcast_ether_addr(param_ex->sta_addr))
return -EINVAL;
psta = rtw_get_stainfo(pstapriv, param_ex->sta_addr);
@@ -2647,9 +2611,7 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
return -EINVAL;
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ if (is_broadcast_ether_addr(param->sta_addr))
return -EINVAL;
psta = rtw_get_stainfo(pstapriv, param->sta_addr);
@@ -2804,10 +2766,9 @@ static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *p
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
return -EINVAL;
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ if (is_broadcast_ether_addr(param->sta_addr))
return -EINVAL;
+
return rtw_acl_remove_sta(padapter, param->sta_addr);
}
@@ -2819,10 +2780,9 @@ static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *para
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
return -EINVAL;
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ if (is_broadcast_ether_addr(param->sta_addr))
return -EINVAL;
+
return rtw_acl_add_sta(padapter, param->sta_addr);
}
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
index 831c1ecc5e28..238c1d9cdc7b 100644
--- a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
-
#define _MLME_OSDEP_C_
#include <osdep_service.h>
@@ -150,7 +141,6 @@ void rtw_indicate_sta_assoc_event(struct adapter *padapter, struct sta_info *pst
if (pstapriv->sta_aid[psta->aid - 1] != psta)
return;
-
wrqu.addr.sa_family = ARPHRD_ETHER;
memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
@@ -174,7 +164,6 @@ void rtw_indicate_sta_disassoc_event(struct adapter *padapter, struct sta_info *
if (pstapriv->sta_aid[psta->aid - 1] != psta)
return;
-
wrqu.addr.sa_family = ARPHRD_ETHER;
memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index 225c23fc69dc..73b9599fe0dc 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RTL8188EU monitor interface
*
* Copyright (C) 2015 Jakub Sitnicki
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
*/
#include <linux/ieee80211.h>
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index add1ba00f3e9..0a9877d85c79 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _OS_INTFS_C_
@@ -253,7 +245,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -319,7 +312,7 @@ struct net_device *rtw_init_netdev(struct adapter *old_padapter)
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+init_net_dev\n"));
- if (old_padapter != NULL)
+ if (old_padapter)
pnetdev = rtw_alloc_etherdev_with_old_priv((void *)old_padapter);
if (!pnetdev)
@@ -363,7 +356,6 @@ void rtw_stop_drv_threads(struct adapter *padapter)
complete(&padapter->cmdpriv.cmd_queue_comp);
if (padapter->cmdThread)
wait_for_completion_interruptible(&padapter->cmdpriv.terminate_cmdthread_comp);
-
}
static u8 rtw_init_default_value(struct adapter *padapter)
@@ -441,7 +433,6 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
{
u8 ret8 = _SUCCESS;
-
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_init_drv_sw\n"));
if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL) {
@@ -498,7 +489,6 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
exit:
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw\n"));
-
return ret8;
}
@@ -661,7 +651,6 @@ netdev_open_error:
return _FAIL;
}
-
int rtw_ips_pwr_up(struct adapter *padapter)
{
int result;
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 3be87252fd62..78daef6704ac 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _OSDEP_SERVICE_C_
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index 7ec53a9dfa27..6f74f49bf3ab 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include <osdep_service.h>
#include <drv_types.h>
@@ -36,9 +28,9 @@ int rtw_os_recvbuf_resource_alloc(struct adapter *padapter,
void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
{
union iwreq_data wrqu;
- struct iw_michaelmicfailure ev;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct iw_michaelmicfailure ev;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
u32 cur_time = 0;
if (psecuritypriv->last_mic_err_time == 0) {
@@ -77,7 +69,6 @@ int rtw_recv_indicatepkt(struct adapter *padapter,
struct sk_buff *skb;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
precvpriv = &(padapter->recvpriv);
pfree_recv_queue = &(precvpriv->free_recv_queue);
@@ -93,11 +84,11 @@ int rtw_recv_indicatepkt(struct adapter *padapter,
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- int bmcast = IS_MCAST(pattrib->dst);
+ bool mcast = is_multicast_ether_addr(pattrib->dst);
if (memcmp(pattrib->dst, myid(&padapter->eeprompriv),
ETH_ALEN)) {
- if (bmcast) {
+ if (mcast) {
psta = rtw_get_bcmc_stainfo(padapter);
pskb2 = skb_clone(skb, GFP_ATOMIC);
} else {
@@ -113,7 +104,7 @@ int rtw_recv_indicatepkt(struct adapter *padapter,
rtw_xmit_entry(skb, pnetdev);
- if (bmcast)
+ if (mcast)
skb = pskb2;
else
goto _recv_indicatepkt_end;
@@ -137,7 +128,6 @@ _recv_indicatepkt_end:
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
("\n rtw_recv_indicatepkt :after netif_rx!!!!\n"));
-
return _SUCCESS;
_recv_indicatepkt_drop:
@@ -145,12 +135,11 @@ _recv_indicatepkt_drop:
/* enqueue back to free_recv_queue */
rtw_free_recvframe(precv_frame, pfree_recv_queue);
- return _FAIL;
+ return _FAIL;
}
void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
{
-
timer_setup(&preorder_ctrl->reordering_ctrl_timer,
rtw_reordering_ctrl_timeout_handler, 0);
}
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index 336e7023f7f7..34080c0ce14a 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#include <linux/module.h>
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 127ecf896fc9..28cbd6b3d26c 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define pr_fmt(fmt) "R8188EU: " fmt
@@ -141,7 +133,6 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
}
usb_put_dev(interface_to_usbdev(usb_intf));
-
}
void usb_intf_stop(struct adapter *padapter)
@@ -334,7 +325,7 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
int status = _FAIL;
padapter = vzalloc(sizeof(*padapter));
- if (padapter == NULL)
+ if (!padapter)
goto exit;
padapter->dvobj = dvobj;
dvobj->if1 = padapter;
@@ -343,14 +334,14 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
mutex_init(&padapter->hw_init_mutex);
pnetdev = rtw_init_netdev(padapter);
- if (pnetdev == NULL)
+ if (!pnetdev)
goto free_adapter;
SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
padapter = rtw_netdev_priv(pnetdev);
if (padapter->registrypriv.monitor_enable) {
pmondev = rtl88eu_mon_init();
- if (pmondev == NULL)
+ if (!pmondev)
netdev_warn(pnetdev, "Failed to initialize monitor interface");
padapter->pmondev = pmondev;
}
@@ -389,7 +380,7 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
/* 2012-07-11 Move here to prevent the 8723AS-VAU BT auto
* suspend influence */
if (usb_autopm_get_interface(pusb_intf) < 0)
- pr_debug("can't get autopm:\n");
+ pr_debug("can't get autopm:\n");
/* alloc dev name after read efuse. */
rtw_init_netdev_name(pnetdev, padapter->registrypriv.ifname);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index 7e75030475f7..5ddfc2ead127 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _USB_OPS_LINUX_C_
@@ -28,8 +20,8 @@ static void interrupt_handler_8188eu(struct adapter *adapt, u16 pkt_len, u8 *pbu
}
/* HISR */
- memcpy(&(haldata->IntArray[0]), &(pbuf[USB_INTR_CONTENT_HISR_OFFSET]), 4);
- memcpy(&(haldata->IntArray[1]), &(pbuf[USB_INTR_CONTENT_HISRE_OFFSET]), 4);
+ memcpy(&haldata->IntArray[0], &pbuf[USB_INTR_CONTENT_HISR_OFFSET], 4);
+ memcpy(&haldata->IntArray[1], &pbuf[USB_INTR_CONTENT_HISRE_OFFSET], 4);
/* C2H Event */
if (pbuf[0] != 0)
@@ -66,7 +58,7 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
prxstat = (struct recv_stat *)pbuf;
precvframe = rtw_alloc_recvframe(pfree_recv_queue);
- if (precvframe == NULL) {
+ if (!precvframe) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvbuf2recvframe: precvframe==NULL\n"));
DBG_88E("%s()-%d: rtw_alloc_recvframe() failed! RX Drop!\n", __func__, __LINE__);
goto _exit_recvbuf2recvframe;
@@ -275,7 +267,6 @@ static int usbctrl_vendorreq(struct adapter *adapt, u8 request, u16 value, u16 i
}
}
}
-
}
/* firmware download is checksumed, don't retry */
@@ -299,7 +290,6 @@ u8 usb_read8(struct adapter *adapter, u32 addr)
u16 len;
u8 data = 0;
-
request = 0x05;
requesttype = 0x01;/* read_in */
index = 0;/* n/a */
@@ -309,9 +299,7 @@ u8 usb_read8(struct adapter *adapter, u32 addr)
usbctrl_vendorreq(adapter, request, wvalue, index, &data, len, requesttype);
-
return data;
-
}
u16 usb_read16(struct adapter *adapter, u32 addr)
@@ -342,7 +330,6 @@ u32 usb_read32(struct adapter *adapter, u32 addr)
u16 len;
__le32 data;
-
request = 0x05;
requesttype = 0x01;/* read_in */
index = 0;/* n/a */
@@ -352,7 +339,6 @@ u32 usb_read32(struct adapter *adapter, u32 addr)
usbctrl_vendorreq(adapter, request, wvalue, index, &data, len, requesttype);
-
return le32_to_cpu(data);
}
@@ -437,7 +423,6 @@ u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf)
unsigned int pipe;
u32 ret = _SUCCESS;
-
if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
adapter->pwrctrlpriv.pnp_bstop_trx) {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
@@ -451,16 +436,16 @@ u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf)
return _FAIL;
}
- if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
+ if (!precvbuf->reuse || !precvbuf->pskb) {
precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
- if (precvbuf->pskb != NULL)
+ if (precvbuf->pskb)
precvbuf->reuse = true;
}
/* re-assign for linux based on skb */
- if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
+ if (!precvbuf->reuse || !precvbuf->pskb) {
precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ);
- if (precvbuf->pskb == NULL) {
+ if (!precvbuf->pskb) {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n"));
DBG_88E("#### usb_read_port() alloc_skb fail!#####\n");
return _FAIL;
@@ -540,7 +525,6 @@ int usb_write16(struct adapter *adapter, u32 addr, u16 val)
u16 len;
__le32 data;
-
request = 0x05;
requesttype = 0x00;/* write_out */
index = 0;/* n/a */
@@ -552,8 +536,6 @@ int usb_write16(struct adapter *adapter, u32 addr, u16 val)
return usbctrl_vendorreq(adapter, request, wvalue,
index, &data, len, requesttype);
-
-
}
int usb_write32(struct adapter *adapter, u32 addr, u32 val)
@@ -565,7 +547,6 @@ int usb_write32(struct adapter *adapter, u32 addr, u32 val)
u16 len;
__le32 data;
-
request = 0x05;
requesttype = 0x00;/* write_out */
index = 0;/* n/a */
@@ -576,8 +557,6 @@ int usb_write32(struct adapter *adapter, u32 addr, u32 val)
return usbctrl_vendorreq(adapter, request, wvalue,
index, &data, len, requesttype);
-
-
}
static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs)
@@ -671,7 +650,6 @@ u32 usb_write_port(struct adapter *padapter, u32 addr, u32 cnt, struct xmit_buf
struct xmit_frame *pxmitframe = (struct xmit_frame *)xmitbuf->priv_data;
struct usb_device *pusbd = pdvobj->pusbdev;
-
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("+usb_write_port\n"));
if ((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) ||
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 8ac9567c954d..d8ef9b5d81a8 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
******************************************************************************/
#define _XMIT_OSDEP_C_
@@ -27,7 +19,7 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb
int i;
pxmitbuf->pallocated_buf = kzalloc(alloc_sz, GFP_KERNEL);
- if (pxmitbuf->pallocated_buf == NULL)
+ if (!pxmitbuf->pallocated_buf)
return _FAIL;
pxmitbuf->pbuf = PTR_ALIGN(pxmitbuf->pallocated_buf, XMITBUF_ALIGN_SZ);
@@ -164,7 +156,6 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
return true;
}
-
int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
@@ -172,7 +163,6 @@ int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
s32 res = 0;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("+xmit_enry\n"));
if (rtw_if_up(padapter) == false) {
@@ -206,7 +196,5 @@ drop_packet:
RT_TRACE(_module_xmit_osdep_c_, _drv_notice_, ("rtw_xmit_entry: drop, tx_drop=%d\n", (u32)pxmitpriv->tx_drop));
exit:
-
-
return 0;
}
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index c466a5e7e3bd..687dbb04ed2e 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -83,7 +83,7 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
netdev_dbg(ieee->dev, "%s(): frame(%d) sentd to: %pM, ieee->dev:%p\n",
__func__, type, Dst, ieee->dev);
- if (pBA == NULL) {
+ if (!pBA) {
netdev_warn(ieee->dev, "pBA is NULL\n");
return NULL;
}
@@ -257,8 +257,8 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
pBaStartSeqCtrl = (union sequence_control *)(req + 7);
RT_TRACE(COMP_DBG, "====>rx ADDBAREQ from : %pM\n", dst);
- if (ieee->current_network.qos_data.active == 0 ||
- (ieee->pHTInfo->bCurrentHTSupport == false) ||
+ if (!ieee->current_network.qos_data.active ||
+ !ieee->pHTInfo->bCurrentHTSupport ||
(ieee->pHTInfo->IOTAction & HT_IOT_ACT_REJECT_ADDBA_REQ)) {
rc = ADDBA_STATUS_REFUSED;
netdev_warn(ieee->dev,
@@ -340,9 +340,9 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
pBaTimeoutVal = (u16 *)(tag + 7);
RT_TRACE(COMP_DBG, "====>rx ADDBARSP from : %pM\n", dst);
- if (ieee->current_network.qos_data.active == 0 ||
- ieee->pHTInfo->bCurrentHTSupport == false ||
- ieee->pHTInfo->bCurrentAMPDUEnable == false) {
+ if (!ieee->current_network.qos_data.active ||
+ !ieee->pHTInfo->bCurrentHTSupport ||
+ !ieee->pHTInfo->bCurrentAMPDUEnable) {
netdev_warn(ieee->dev,
"reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n",
ieee->current_network.qos_data.active,
@@ -365,11 +365,11 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
pAdmittedBA = &pTS->TxAdmittedBARecord;
- if (pAdmittedBA->bValid == true) {
+ if (pAdmittedBA->bValid) {
netdev_dbg(ieee->dev, "%s(): ADDBA response already admitted\n",
__func__);
return -1;
- } else if ((pPendingBA->bValid == false) ||
+ } else if (!pPendingBA->bValid ||
(*pDialogToken != pPendingBA->DialogToken)) {
netdev_warn(ieee->dev,
"%s(): ADDBA Rsp. BA invalid, DELBA!\n",
@@ -431,8 +431,8 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
return -1;
}
- if (ieee->current_network.qos_data.active == 0 ||
- ieee->pHTInfo->bCurrentHTSupport == false) {
+ if (!ieee->current_network.qos_data.active ||
+ !ieee->pHTInfo->bCurrentHTSupport) {
netdev_warn(ieee->dev,
"received DELBA while QOS or HT is not supported(%d, %d)\n",
ieee->current_network. qos_data.active,
@@ -485,7 +485,7 @@ void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
{
struct ba_record *pBA = &pTS->TxPendingBARecord;
- if (pBA->bValid == true && bOverwritePending == false)
+ if (pBA->bValid && !bOverwritePending)
return;
DeActivateBAEntry(ieee, pBA);
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index ae103b0b7a2a..9f18be14dda6 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -50,9 +50,9 @@ struct rtllib_tkip_data {
int key_idx;
struct crypto_skcipher *rx_tfm_arc4;
- struct crypto_ahash *rx_tfm_michael;
+ struct crypto_shash *rx_tfm_michael;
struct crypto_skcipher *tx_tfm_arc4;
- struct crypto_ahash *tx_tfm_michael;
+ struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16];
u8 tx_hdr[16];
@@ -74,8 +74,7 @@ static void *rtllib_tkip_init(int key_idx)
goto fail;
}
- priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->tx_tfm_michael)) {
pr_debug("Could not allocate crypto API michael_mic\n");
priv->tx_tfm_michael = NULL;
@@ -90,8 +89,7 @@ static void *rtllib_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->rx_tfm_michael)) {
pr_debug("Could not allocate crypto API michael_mic\n");
priv->rx_tfm_michael = NULL;
@@ -101,9 +99,9 @@ static void *rtllib_tkip_init(int key_idx)
fail:
if (priv) {
- crypto_free_ahash(priv->tx_tfm_michael);
+ crypto_free_shash(priv->tx_tfm_michael);
crypto_free_skcipher(priv->tx_tfm_arc4);
- crypto_free_ahash(priv->rx_tfm_michael);
+ crypto_free_shash(priv->rx_tfm_michael);
crypto_free_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -117,9 +115,9 @@ static void rtllib_tkip_deinit(void *priv)
struct rtllib_tkip_data *_priv = priv;
if (_priv) {
- crypto_free_ahash(_priv->tx_tfm_michael);
+ crypto_free_shash(_priv->tx_tfm_michael);
crypto_free_skcipher(_priv->tx_tfm_arc4);
- crypto_free_ahash(_priv->rx_tfm_michael);
+ crypto_free_shash(_priv->rx_tfm_michael);
crypto_free_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
@@ -504,29 +502,31 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
}
-static int michael_mic(struct crypto_ahash *tfm_michael, u8 *key, u8 *hdr,
+static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
u8 *data, size_t data_len, u8 *mic)
{
- AHASH_REQUEST_ON_STACK(req, tfm_michael);
- struct scatterlist sg[2];
+ SHASH_DESC_ON_STACK(desc, tfm_michael);
int err;
- if (tfm_michael == NULL) {
- pr_warn("michael_mic: tfm_michael == NULL\n");
- return -1;
- }
- sg_init_table(sg, 2);
- sg_set_buf(&sg[0], hdr, 16);
- sg_set_buf(&sg[1], data, data_len);
+ desc->tfm = tfm_michael;
+ desc->flags = 0;
- if (crypto_ahash_setkey(tfm_michael, key, 8))
+ if (crypto_shash_setkey(tfm_michael, key, 8))
return -1;
- ahash_request_set_tfm(req, tfm_michael);
- ahash_request_set_callback(req, 0, NULL, NULL);
- ahash_request_set_crypt(req, sg, mic, data_len + 16);
- err = crypto_ahash_digest(req);
- ahash_request_zero(req);
+ err = crypto_shash_init(desc);
+ if (err)
+ goto out;
+ err = crypto_shash_update(desc, hdr, 16);
+ if (err)
+ goto out;
+ err = crypto_shash_update(desc, data, data_len);
+ if (err)
+ goto out;
+ err = crypto_shash_final(desc, mic);
+
+out:
+ shash_desc_zero(desc);
return err;
}
@@ -663,9 +663,9 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
int keyidx;
- struct crypto_ahash *tfm = tkey->tx_tfm_michael;
+ struct crypto_shash *tfm = tkey->tx_tfm_michael;
struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
- struct crypto_ahash *tfm3 = tkey->rx_tfm_michael;
+ struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
index ba284bfb3b6d..2fb575a2b6ab 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c
@@ -5,14 +5,14 @@
void Dot11d_Init(struct ieee80211_device *ieee)
{
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(ieee);
- pDot11dInfo->bEnabled = false;
+ pDot11dInfo->enabled = false;
- pDot11dInfo->State = DOT11D_STATE_NONE;
- pDot11dInfo->CountryIeLen = 0;
+ pDot11dInfo->state = DOT11D_STATE_NONE;
+ pDot11dInfo->country_ie_len = 0;
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
- memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
+ memset(pDot11dInfo->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER+1);
RESET_CIE_WATCHDOG(ieee);
netdev_info(ieee->dev, "Dot11d_Init()\n");
@@ -23,10 +23,10 @@ EXPORT_SYMBOL(Dot11d_Init);
void Dot11d_Reset(struct ieee80211_device *ieee)
{
u32 i;
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(ieee);
/* Clear old channel map */
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
- memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
+ memset(pDot11dInfo->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER+1);
/* Set new channel map */
for (i = 1; i <= 11; i++)
(pDot11dInfo->channel_map)[i] = 1;
@@ -34,8 +34,8 @@ void Dot11d_Reset(struct ieee80211_device *ieee)
for (i = 12; i <= 14; i++)
(pDot11dInfo->channel_map)[i] = 2;
- pDot11dInfo->State = DOT11D_STATE_NONE;
- pDot11dInfo->CountryIeLen = 0;
+ pDot11dInfo->state = DOT11D_STATE_NONE;
+ pDot11dInfo->country_ie_len = 0;
RESET_CIE_WATCHDOG(ieee);
}
EXPORT_SYMBOL(Dot11d_Reset);
@@ -52,24 +52,24 @@ EXPORT_SYMBOL(Dot11d_Reset);
void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr,
u16 CoutryIeLen, u8 *pCoutryIe)
{
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
u8 i, j, NumTriples, MaxChnlNum;
- PCHNL_TXPOWER_TRIPLE pTriple;
+ struct chnl_txpower_triple *pTriple;
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
- memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
+ memset(pDot11dInfo->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER+1);
MaxChnlNum = 0;
NumTriples = (CoutryIeLen - 3) / 3; /* skip 3-byte country string. */
- pTriple = (PCHNL_TXPOWER_TRIPLE)(pCoutryIe + 3);
+ pTriple = (struct chnl_txpower_triple *)(pCoutryIe + 3);
for (i = 0; i < NumTriples; i++) {
- if (MaxChnlNum >= pTriple->FirstChnl) {
+ if (MaxChnlNum >= pTriple->first_channel) {
/* It is not in a monotonically increasing order, so
* stop processing.
*/
netdev_err(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
return;
}
- if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls)) {
+ if (MAX_CHANNEL_NUMBER < (pTriple->first_channel + pTriple->num_channels)) {
/* It is not a valid set of channel id, so stop
* processing.
*/
@@ -77,13 +77,13 @@ void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr,
return;
}
- for (j = 0; j < pTriple->NumChnls; j++) {
- pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
- pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] = pTriple->MaxTxPowerInDbm;
- MaxChnlNum = pTriple->FirstChnl + j;
+ for (j = 0; j < pTriple->num_channels; j++) {
+ pDot11dInfo->channel_map[pTriple->first_channel + j] = 1;
+ pDot11dInfo->max_tx_pwr_dbm_list[pTriple->first_channel + j] = pTriple->max_tx_pwr_dbm;
+ MaxChnlNum = pTriple->first_channel + j;
}
- pTriple = (PCHNL_TXPOWER_TRIPLE)((u8 *)pTriple + 3);
+ pTriple = (struct chnl_txpower_triple *)((u8 *)pTriple + 3);
}
netdev_info(dev->dev, "Channel List:");
for (i = 1; i <= MAX_CHANNEL_NUMBER; i++)
@@ -93,15 +93,15 @@ void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr,
UPDATE_CIE_SRC(dev, pTaddr);
- pDot11dInfo->CountryIeLen = CoutryIeLen;
- memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe, CoutryIeLen);
- pDot11dInfo->State = DOT11D_STATE_LEARNED;
+ pDot11dInfo->country_ie_len = CoutryIeLen;
+ memcpy(pDot11dInfo->country_ie_buf, pCoutryIe, CoutryIeLen);
+ pDot11dInfo->state = DOT11D_STATE_LEARNED;
}
EXPORT_SYMBOL(Dot11d_UpdateCountryIe);
u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel)
{
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
u8 MaxTxPwrInDbm = 255;
if (Channel > MAX_CHANNEL_NUMBER) {
@@ -109,7 +109,7 @@ u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel)
return MaxTxPwrInDbm;
}
if (pDot11dInfo->channel_map[Channel])
- MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
+ MaxTxPwrInDbm = pDot11dInfo->max_tx_pwr_dbm_list[Channel];
return MaxTxPwrInDbm;
}
@@ -117,11 +117,11 @@ EXPORT_SYMBOL(DOT11D_GetMaxTxPwrInDbm);
void DOT11D_ScanComplete(struct ieee80211_device *dev)
{
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
- switch (pDot11dInfo->State) {
+ switch (pDot11dInfo->state) {
case DOT11D_STATE_LEARNED:
- pDot11dInfo->State = DOT11D_STATE_DONE;
+ pDot11dInfo->state = DOT11D_STATE_DONE;
break;
case DOT11D_STATE_DONE:
@@ -138,7 +138,7 @@ EXPORT_SYMBOL(DOT11D_ScanComplete);
int IsLegalChannel(struct ieee80211_device *dev, u8 channel)
{
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
if (channel > MAX_CHANNEL_NUMBER) {
netdev_err(dev->dev, "IsLegalChannel(): Invalid Channel\n");
@@ -152,7 +152,7 @@ EXPORT_SYMBOL(IsLegalChannel);
int ToLegalChannel(struct ieee80211_device *dev, u8 channel)
{
- PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
+ struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
u8 default_chn = 0;
u32 i = 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.h b/drivers/staging/rtl8192u/ieee80211/dot11d.h
index 88bc298305bd..363a6bed18dd 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.h
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.h
@@ -4,62 +4,51 @@
#include "ieee80211.h"
+struct chnl_txpower_triple {
+ u8 first_channel;
+ u8 num_channels;
+ u8 max_tx_pwr_dbm;
+};
-typedef struct _CHNL_TXPOWER_TRIPLE {
- u8 FirstChnl;
- u8 NumChnls;
- u8 MaxTxPowerInDbm;
-} CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
-
-typedef enum _DOT11D_STATE {
+enum dot11d_state {
DOT11D_STATE_NONE = 0,
DOT11D_STATE_LEARNED,
DOT11D_STATE_DONE,
-} DOT11D_STATE;
+};
-typedef struct _RT_DOT11D_INFO {
- /* DECLARE_RT_OBJECT(RT_DOT11D_INFO); */
+struct rt_dot11d_info {
+ bool enabled; /* dot11MultiDomainCapabilityEnabled */
- bool bEnabled; /* dot11MultiDomainCapabilityEnabled */
+ u16 country_ie_len; /* > 0 if country_ie_buf[] contains valid country information element. */
+ u8 country_ie_buf[MAX_IE_LEN];
+ u8 country_ie_src_addr[6]; /* Source AP of the country IE. */
+ u8 country_ie_watchdog;
- u16 CountryIeLen; /* > 0 if CountryIeBuf[] contains valid country information element. */
- u8 CountryIeBuf[MAX_IE_LEN];
- u8 CountryIeSrcAddr[6]; /* Source AP of the country IE. */
- u8 CountryIeWatchdog;
+ u8 channel_map[MAX_CHANNEL_NUMBER + 1]; /* !Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan) */
+ u8 max_tx_pwr_dbm_list[MAX_CHANNEL_NUMBER + 1];
- u8 channel_map[MAX_CHANNEL_NUMBER+1]; /* !Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan) */
- u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
+ enum dot11d_state state;
+};
- DOT11D_STATE State;
-} RT_DOT11D_INFO, *PRT_DOT11D_INFO;
-#define eqMacAddr(a, b) (((a)[0] == (b)[0] && \
+#define eqMacAddr(a, b) (((a)[0] == (b)[0] && \
(a)[1] == (b)[1] && (a)[2] == (b)[2] && (a)[3] == (b)[3] && \
(a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1 : 0)
#define cpMacAddr(des, src) ((des)[0] = (src)[0], \
(des)[1] = (src)[1], (des)[2] = (src)[2], \
(des)[3] = (src)[3], (des)[4] = (src)[4], \
(des)[5] = (src)[5])
-#define GET_DOT11D_INFO(__pIeeeDev) ((PRT_DOT11D_INFO)((__pIeeeDev)->pDot11dInfo))
-
-#define IS_DOT11D_ENABLE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->bEnabled)
-#define IS_COUNTRY_IE_VALID(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen > 0)
+#define GET_DOT11D_INFO(__pIeeeDev) ((struct rt_dot11d_info *)((__pIeeeDev)->pDot11dInfo))
-#define IS_EQUAL_CIE_SRC(__pIeeeDev, __pTa) eqMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
-#define UPDATE_CIE_SRC(__pIeeeDev, __pTa) cpMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
+#define IS_DOT11D_ENABLE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->enabled)
+#define IS_COUNTRY_IE_VALID(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->country_ie_len > 0)
-#define IS_COUNTRY_IE_CHANGED(__pIeeeDev, __Ie) \
- (((__Ie).Length == 0 || (__Ie).Length != GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen) ? \
- FALSE : \
- (!memcmp(GET_DOT11D_INFO(__pIeeeDev)->CountryIeBuf, (__Ie).Octet, (__Ie).Length)))
+#define IS_EQUAL_CIE_SRC(__pIeeeDev, __pTa) eqMacAddr(GET_DOT11D_INFO(__pIeeeDev)->country_ie_src_addr, __pTa)
+#define UPDATE_CIE_SRC(__pIeeeDev, __pTa) cpMacAddr(GET_DOT11D_INFO(__pIeeeDev)->country_ie_src_addr, __pTa)
-#define CIE_WATCHDOG_TH 1
-#define GET_CIE_WATCHDOG(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog)
+#define GET_CIE_WATCHDOG(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->country_ie_watchdog)
#define RESET_CIE_WATCHDOG(__pIeeeDev) (GET_CIE_WATCHDOG(__pIeeeDev) = 0)
#define UPDATE_CIE_WATCHDOG(__pIeeeDev) (++GET_CIE_WATCHDOG(__pIeeeDev))
-#define IS_DOT11D_STATE_DONE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
-
-
void
Dot11d_Init(
struct ieee80211_device *dev
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 3addaa65085a..3cfeac0d7214 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1543,14 +1543,14 @@ typedef struct _RT_POWER_SAVE_CONTROL {
bool bTmpFilterHiddenAP;
bool bTmpUpdateParms;
u8 tmpSsidBuf[33];
- OCTET_STRING tmpSsid2Scan;
+ struct octet_string tmpSsid2Scan;
bool bTmpSsid2Scan;
u8 tmpNetworkType;
u8 tmpChannelNumber;
u16 tmpBcnPeriod;
u8 tmpDtimPeriod;
u16 tmpmCap;
- OCTET_STRING tmpSuppRateSet;
+ struct octet_string tmpSuppRateSet;
u8 tmpSuppRateBuf[MAX_NUM_RATES];
bool bTmpSuppRate;
IbssParms tmpIbpm;
@@ -1646,12 +1646,12 @@ struct ieee80211_device {
struct list_head Tx_TS_Admit_List;
struct list_head Tx_TS_Pending_List;
struct list_head Tx_TS_Unused_List;
- TX_TS_RECORD TxTsRecord[TOTAL_TS_NUM];
+ struct tx_ts_record TxTsRecord[TOTAL_TS_NUM];
// 802.11e and WMM Traffic Stream Info (RX)
struct list_head Rx_TS_Admit_List;
struct list_head Rx_TS_Pending_List;
struct list_head Rx_TS_Unused_List;
- RX_TS_RECORD RxTsRecord[TOTAL_TS_NUM];
+ struct rx_ts_record RxTsRecord[TOTAL_TS_NUM];
//#ifdef TO_DO_LIST
RX_REORDER_ENTRY RxReorderEntry[128];
struct list_head RxReorder_Unused_List;
@@ -2002,7 +2002,7 @@ struct ieee80211_device {
short (*check_nic_enough_desc)(struct net_device *dev, int queue_index);
//added by wb for HT related
// void (*SwChnlByTimerHandler)(struct net_device *dev, int channel);
- void (*SetBWModeHandler)(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
+ void (*SetBWModeHandler)(struct net_device *dev, enum ht_channel_width Bandwidth, enum ht_extension_chan_offset Offset);
// void (*UpdateHalRATRTableHandler)(struct net_device* dev, u8* pMcsRate);
bool (*GetNmodeSupportBySecCfg)(struct net_device *dev);
void (*SetWirelessMode)(struct net_device *dev, u8 wireless_mode);
@@ -2358,7 +2358,7 @@ void HTDebugHTCapability(u8 *CapIE, u8 *TitleString);
void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString);
void HTSetConnectBwMode(struct ieee80211_device *ieee,
- HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
+ enum ht_channel_width Bandwidth, enum ht_extension_chan_offset Offset);
void HTUpdateDefaultSetting(struct ieee80211_device *ieee);
void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap,
u8 *len, u8 isEncrypt);
@@ -2388,10 +2388,10 @@ u16 TxCountToDataRate(struct ieee80211_device *ieee, u8 nDataRate);
int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb);
int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb);
int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb);
-void TsInitAddBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTS,
+void TsInitAddBA(struct ieee80211_device *ieee, struct tx_ts_record *pTS,
u8 Policy, u8 bOverwritePending);
void TsInitDelBA(struct ieee80211_device *ieee,
- PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
+ struct ts_common_info *pTsCommonInfo, enum tr_select TxRxSelect);
void BaSetupTimeOut(struct timer_list *t);
void TxBaInactTimeout(struct timer_list *t);
void RxBaInactTimeout(struct timer_list *t);
@@ -2399,14 +2399,14 @@ void ResetBaEntry(PBA_RECORD pBA);
//function in TS.c
bool GetTs(
struct ieee80211_device *ieee,
- PTS_COMMON_INFO *ppTS,
+ struct ts_common_info **ppTS,
u8 *Addr,
u8 TID,
- TR_SELECT TxRxSelect, //Rx:1, Tx:0
+ enum tr_select TxRxSelect, //Rx:1, Tx:0
bool bAddNewTs
);
void TSInitialize(struct ieee80211_device *ieee);
-void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTS);
+void TsStartAddBaProcess(struct ieee80211_device *ieee, struct tx_ts_record *pTxTS);
void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr);
void RemoveAllTS(struct ieee80211_device *ieee);
void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index a7efaae4e25a..1088fa0aee0e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -54,9 +54,9 @@ struct ieee80211_tkip_data {
int key_idx;
struct crypto_skcipher *rx_tfm_arc4;
- struct crypto_ahash *rx_tfm_michael;
+ struct crypto_shash *rx_tfm_michael;
struct crypto_skcipher *tx_tfm_arc4;
- struct crypto_ahash *tx_tfm_michael;
+ struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16], tx_hdr[16];
@@ -80,8 +80,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
}
- priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->tx_tfm_michael)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API michael_mic\n");
@@ -98,8 +97,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->rx_tfm_michael)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API michael_mic\n");
@@ -111,9 +109,9 @@ static void *ieee80211_tkip_init(int key_idx)
fail:
if (priv) {
- crypto_free_ahash(priv->tx_tfm_michael);
+ crypto_free_shash(priv->tx_tfm_michael);
crypto_free_skcipher(priv->tx_tfm_arc4);
- crypto_free_ahash(priv->rx_tfm_michael);
+ crypto_free_shash(priv->rx_tfm_michael);
crypto_free_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -127,9 +125,9 @@ static void ieee80211_tkip_deinit(void *priv)
struct ieee80211_tkip_data *_priv = priv;
if (_priv) {
- crypto_free_ahash(_priv->tx_tfm_michael);
+ crypto_free_shash(_priv->tx_tfm_michael);
crypto_free_skcipher(_priv->tx_tfm_arc4);
- crypto_free_ahash(_priv->rx_tfm_michael);
+ crypto_free_shash(_priv->rx_tfm_michael);
crypto_free_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
@@ -500,30 +498,31 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return keyidx;
}
-static int michael_mic(struct crypto_ahash *tfm_michael, u8 *key, u8 *hdr,
+static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
u8 *data, size_t data_len, u8 *mic)
{
- AHASH_REQUEST_ON_STACK(req, tfm_michael);
- struct scatterlist sg[2];
+ SHASH_DESC_ON_STACK(desc, tfm_michael);
int err;
- if (tfm_michael == NULL) {
- printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
- return -1;
- }
-
- sg_init_table(sg, 2);
- sg_set_buf(&sg[0], hdr, 16);
- sg_set_buf(&sg[1], data, data_len);
+ desc->tfm = tfm_michael;
+ desc->flags = 0;
- if (crypto_ahash_setkey(tfm_michael, key, 8))
+ if (crypto_shash_setkey(tfm_michael, key, 8))
return -1;
- ahash_request_set_tfm(req, tfm_michael);
- ahash_request_set_callback(req, 0, NULL, NULL);
- ahash_request_set_crypt(req, sg, mic, data_len + 16);
- err = crypto_ahash_digest(req);
- ahash_request_zero(req);
+ err = crypto_shash_init(desc);
+ if (err)
+ goto out;
+ err = crypto_shash_update(desc, hdr, 16);
+ if (err)
+ goto out;
+ err = crypto_shash_update(desc, data, data_len);
+ if (err)
+ goto out;
+ err = crypto_shash_final(desc, mic);
+
+out:
+ shash_desc_zero(desc);
return err;
}
@@ -663,9 +662,9 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
int keyidx;
- struct crypto_ahash *tfm = tkey->tx_tfm_michael;
+ struct crypto_shash *tfm = tkey->tx_tfm_michael;
struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
- struct crypto_ahash *tfm3 = tkey->rx_tfm_michael;
+ struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index f2cdcc2bcab4..28cae82d795c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -508,10 +508,10 @@ drop:
return 1;
}
-static bool AddReorderEntry(PRX_TS_RECORD pTS, PRX_REORDER_ENTRY pReorderEntry)
+static bool AddReorderEntry(struct rx_ts_record *pTS, PRX_REORDER_ENTRY pReorderEntry)
{
- struct list_head *pList = &pTS->RxPendingPktList;
- while(pList->next != &pTS->RxPendingPktList)
+ struct list_head *pList = &pTS->rx_pending_pkt_list;
+ while(pList->next != &pTS->rx_pending_pkt_list)
{
if( SN_LESS(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) )
{
@@ -586,16 +586,16 @@ void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_
static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
struct ieee80211_rxb *prxb,
- PRX_TS_RECORD pTS, u16 SeqNum)
+ struct rx_ts_record *pTS, u16 SeqNum)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
PRX_REORDER_ENTRY pReorderEntry = NULL;
struct ieee80211_rxb **prxbIndicateArray;
u8 WinSize = pHTInfo->RxReorderWinSize;
- u16 WinEnd = (pTS->RxIndicateSeq + WinSize -1)%4096;
+ u16 WinEnd = (pTS->rx_indicate_seq + WinSize - 1) % 4096;
u8 index = 0;
bool bMatchWinStart = false, bPktInBuf = false;
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): Seq is %d,pTS->RxIndicateSeq is %d, WinSize is %d\n",__func__,SeqNum,pTS->RxIndicateSeq,WinSize);
+ IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): Seq is %d,pTS->rx_indicate_seq is %d, WinSize is %d\n",__func__,SeqNum,pTS->rx_indicate_seq,WinSize);
prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
sizeof(struct ieee80211_rxb *),
@@ -604,14 +604,14 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
return;
/* Rx Reorder initialize condition.*/
- if (pTS->RxIndicateSeq == 0xffff) {
- pTS->RxIndicateSeq = SeqNum;
+ if (pTS->rx_indicate_seq == 0xffff) {
+ pTS->rx_indicate_seq = SeqNum;
}
/* Drop out the packet which SeqNum is smaller than WinStart */
- if (SN_LESS(SeqNum, pTS->RxIndicateSeq)) {
+ if (SN_LESS(SeqNum, pTS->rx_indicate_seq)) {
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
- pTS->RxIndicateSeq, SeqNum);
+ pTS->rx_indicate_seq, SeqNum);
pHTInfo->RxReorderDropCounter++;
{
int i;
@@ -631,16 +631,16 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
* 1. Incoming SeqNum is equal to WinStart =>Window shift 1
* 2. Incoming SeqNum is larger than the WinEnd => Window shift N
*/
- if(SN_EQUAL(SeqNum, pTS->RxIndicateSeq)) {
- pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096;
+ if(SN_EQUAL(SeqNum, pTS->rx_indicate_seq)) {
+ pTS->rx_indicate_seq = (pTS->rx_indicate_seq + 1) % 4096;
bMatchWinStart = true;
} else if(SN_LESS(WinEnd, SeqNum)) {
if(SeqNum >= (WinSize - 1)) {
- pTS->RxIndicateSeq = SeqNum + 1 -WinSize;
+ pTS->rx_indicate_seq = SeqNum + 1 -WinSize;
} else {
- pTS->RxIndicateSeq = 4095 - (WinSize - (SeqNum +1)) + 1;
+ pTS->rx_indicate_seq = 4095 - (WinSize - (SeqNum + 1)) + 1;
}
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Window Shift! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum);
+ IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Window Shift! IndicateSeq: %d, NewSeq: %d\n",pTS->rx_indicate_seq, SeqNum);
}
/*
@@ -655,7 +655,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
if(bMatchWinStart) {
/* Current packet is going to be indicated.*/
IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Packets indication!! IndicateSeq: %d, NewSeq: %d\n",\
- pTS->RxIndicateSeq, SeqNum);
+ pTS->rx_indicate_seq, SeqNum);
prxbIndicateArray[0] = prxb;
// printk("========================>%s(): SeqNum is %d\n",__func__,SeqNum);
index = 1;
@@ -673,7 +673,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
if(!AddReorderEntry(pTS, pReorderEntry)) {
IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): Duplicate packet is dropped!! IndicateSeq: %d, NewSeq: %d\n",
- __func__, pTS->RxIndicateSeq, SeqNum);
+ __func__, pTS->rx_indicate_seq, SeqNum);
list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List);
{
int i;
@@ -685,7 +685,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
}
} else {
IEEE80211_DEBUG(IEEE80211_DL_REORDER,
- "Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum);
+ "Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n",pTS->rx_indicate_seq, SeqNum);
}
}
else {
@@ -707,11 +707,11 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
}
/* Check if there is any packet need indicate.*/
- while(!list_empty(&pTS->RxPendingPktList)) {
+ while(!list_empty(&pTS->rx_pending_pkt_list)) {
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): start RREORDER indicate\n",__func__);
- pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
- if (SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) ||
- SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq))
+ pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pTS->rx_pending_pkt_list.prev,RX_REORDER_ENTRY,List);
+ if (SN_LESS(pReorderEntry->SeqNum, pTS->rx_indicate_seq) ||
+ SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq))
{
/* This protect buffer from overflow. */
if (index >= REORDER_WIN_SIZE) {
@@ -722,10 +722,10 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
list_del_init(&pReorderEntry->List);
- if(SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq))
- pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096;
+ if(SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq))
+ pTS->rx_indicate_seq = (pTS->rx_indicate_seq + 1) % 4096;
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packets indication!! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum);
+ IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packets indication!! IndicateSeq: %d, NewSeq: %d\n",pTS->rx_indicate_seq, SeqNum);
prxbIndicateArray[index] = pReorderEntry->prxb;
// printk("========================>%s(): pReorderEntry->SeqNum is %d\n",__func__,pReorderEntry->SeqNum);
index++;
@@ -740,8 +740,8 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
/* Handling pending timer. Set this timer to prevent from long time Rx buffering.*/
if (index>0) {
// Cancel previous pending timer.
- // del_timer_sync(&pTS->RxPktPendingTimer);
- pTS->RxTimeoutIndicateSeq = 0xffff;
+ // del_timer_sync(&pTS->rx_pkt_pending_timer);
+ pTS->rx_timeout_indicate_seq = 0xffff;
// Indicate packets
if(index>REORDER_WIN_SIZE){
@@ -752,15 +752,15 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
ieee80211_indicate_packets(ieee, prxbIndicateArray, index);
}
- if (bPktInBuf && pTS->RxTimeoutIndicateSeq==0xffff) {
+ if (bPktInBuf && pTS->rx_timeout_indicate_seq == 0xffff) {
// Set new pending timer.
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): SET rx timeout timer\n", __func__);
- pTS->RxTimeoutIndicateSeq = pTS->RxIndicateSeq;
- if(timer_pending(&pTS->RxPktPendingTimer))
- del_timer_sync(&pTS->RxPktPendingTimer);
- pTS->RxPktPendingTimer.expires = jiffies +
+ pTS->rx_timeout_indicate_seq = pTS->rx_indicate_seq;
+ if(timer_pending(&pTS->rx_pkt_pending_timer))
+ del_timer_sync(&pTS->rx_pkt_pending_timer);
+ pTS->rx_pkt_pending_timer.expires = jiffies +
msecs_to_jiffies(pHTInfo->RxReorderPendingTime);
- add_timer(&pTS->RxPktPendingTimer);
+ add_timer(&pTS->rx_pkt_pending_timer);
}
kfree(prxbIndicateArray);
@@ -894,12 +894,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
u16 fc, type, stype, sc;
struct net_device_stats *stats;
unsigned int frag;
- u8 *payload;
u16 ethertype;
//added by amy for reorder
u8 TID = 0;
u16 SeqNum = 0;
- PRX_TS_RECORD pTS = NULL;
+ struct rx_ts_record *pTS = NULL;
//bool bIsAggregateFrame = false;
//added by amy for reorder
#ifdef NOT_YET
@@ -1018,27 +1017,27 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
}
else
{
- PRX_TS_RECORD pRxTS = NULL;
+ struct rx_ts_record *pRxTS = NULL;
//IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): QOS ENABLE AND RECEIVE QOS DATA , we will get Ts, tid:%d\n",__func__, tid);
if(GetTs(
ieee,
- (PTS_COMMON_INFO *) &pRxTS,
+ (struct ts_common_info **) &pRxTS,
hdr->addr2,
Frame_QoSTID((u8 *)(skb->data)),
RX_DIR,
true))
{
- // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pRxTS->RxLastFragNum is %d,frag is %d,pRxTS->RxLastSeqNum is %d,seq is %d\n",__func__,pRxTS->RxLastFragNum,frag,pRxTS->RxLastSeqNum,WLAN_GET_SEQ_SEQ(sc));
+ // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pRxTS->rx_last_frag_num is %d,frag is %d,pRxTS->rx_last_seq_num is %d,seq is %d\n",__func__,pRxTS->rx_last_frag_num,frag,pRxTS->rx_last_seq_num,WLAN_GET_SEQ_SEQ(sc));
if ((fc & (1<<11)) &&
- (frag == pRxTS->RxLastFragNum) &&
- (WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum)) {
+ (frag == pRxTS->rx_last_frag_num) &&
+ (WLAN_GET_SEQ_SEQ(sc) == pRxTS->rx_last_seq_num)) {
goto rx_dropped;
}
else
{
- pRxTS->RxLastFragNum = frag;
- pRxTS->RxLastSeqNum = WLAN_GET_SEQ_SEQ(sc);
+ pRxTS->rx_last_frag_num = frag;
+ pRxTS->rx_last_seq_num = WLAN_GET_SEQ_SEQ(sc);
}
}
else
@@ -1267,7 +1266,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
{
TID = Frame_QoSTID(skb->data);
SeqNum = WLAN_GET_SEQ_SEQ(sc);
- GetTs(ieee,(PTS_COMMON_INFO *) &pTS,hdr->addr2,TID,RX_DIR,true);
+ GetTs(ieee,(struct ts_common_info **) &pTS,hdr->addr2,TID,RX_DIR,true);
if (TID !=0 && TID !=3)
{
ieee->bis_any_nonbepkts = true;
@@ -1275,7 +1274,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
}
//added by amy for reorder
/* skb: hdr + (possible reassembled) full plaintext payload */
- payload = skb->data + hdrlen;
//ethertype = (payload[6] << 8) | payload[7];
rxb = kmalloc(sizeof(struct ieee80211_rxb), GFP_ATOMIC);
if (!rxb)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 21874e78d8a1..212cc9ccbb96 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -12,8 +12,6 @@
*
* released under the GPL
*/
-
-
#include "ieee80211.h"
#include <linux/random.h>
@@ -48,7 +46,6 @@ static unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
rate_len = IEEE80211_CCK_RATE_LEN + 2;
if (ieee->modulation & IEEE80211_OFDM_MODULATION)
-
rate_len += IEEE80211_OFDM_RATE_LEN + 2;
return rate_len;
@@ -79,8 +76,7 @@ static void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
- if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
-
+ if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
*tag++ = MFIE_TYPE_RATES_EX;
*tag++ = 8;
*tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
@@ -91,14 +87,12 @@ static void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
*tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
*tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
*tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
-
}
/* We may add an option for custom rates that specific HW might support */
*tag_p = tag;
}
-
static void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -112,11 +106,10 @@ static void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p)
*tag++ = 0x00;
*tag++ = 0x01;
#ifdef SUPPORT_USPD
- if(ieee->current_network.wmm_info & 0x80) {
- *tag++ = 0x0f|MAX_SP_Len;
- } else {
+ if (ieee->current_network.wmm_info & 0x80)
+ *tag++ = 0x0f | MAX_SP_Len;
+ else
*tag++ = MAX_SP_Len;
- }
#else
*tag++ = MAX_SP_Len;
#endif
@@ -147,7 +140,7 @@ static void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
{
int nh;
- nh = (ieee->mgmt_queue_head +1) % MGMT_QUEUE_NUM;
+ nh = (ieee->mgmt_queue_head + 1) % MGMT_QUEUE_NUM;
/*
* if the queue is full but we have newer frames then
@@ -166,13 +159,13 @@ static struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
{
struct sk_buff *ret;
- if(ieee->mgmt_queue_tail == ieee->mgmt_queue_head)
+ if (ieee->mgmt_queue_tail == ieee->mgmt_queue_head)
return NULL;
ret = ieee->mgmt_queue_ring[ieee->mgmt_queue_tail];
ieee->mgmt_queue_tail =
- (ieee->mgmt_queue_tail+1) % MGMT_QUEUE_NUM;
+ (ieee->mgmt_queue_tail + 1) % MGMT_QUEUE_NUM;
return ret;
}
@@ -188,16 +181,16 @@ static u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee)
u8 rate;
/* 2008/01/25 MH For broadcom, MGNT frame set as OFDM 6M. */
- if(pHTInfo->IOTAction & HT_IOT_ACT_MGNT_USE_CCK_6M)
+ if (pHTInfo->IOTAction & HT_IOT_ACT_MGNT_USE_CCK_6M)
rate = 0x0c;
else
rate = ieee->basic_rate & 0x7f;
if (rate == 0) {
/* 2005.01.26, by rcnjko. */
- if(ieee->mode == IEEE_A||
- ieee->mode== IEEE_N_5G||
- (ieee->mode== IEEE_N_24G&&!pHTInfo->bCurSuppCCK))
+ if (ieee->mode == IEEE_A ||
+ ieee->mode == IEEE_N_5G ||
+ (ieee->mode == IEEE_N_24G && !pHTInfo->bCurSuppCCK))
rate = 0x0c;
else
rate = 0x02;
@@ -205,8 +198,7 @@ static u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee)
/*
// Data rate of ProbeReq is already decided. Annie, 2005-03-31
- if( pMgntInfo->bScanInProgress || (pMgntInfo->bDualModeScanStep!=0) )
- {
+ if( pMgntInfo->bScanInProgress || (pMgntInfo->bDualModeScanStep!=0) ) {
if(pMgntInfo->dot11CurrentWirelessMode==WIRELESS_MODE_A)
rate = 0x0c;
else
@@ -216,17 +208,16 @@ static u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee)
return rate;
}
-
void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl);
inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
{
unsigned long flags;
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
- struct rtl_80211_hdr_3addr *header=
- (struct rtl_80211_hdr_3addr *) skb->data;
+ struct rtl_80211_hdr_3addr *header =
+ (struct rtl_80211_hdr_3addr *)skb->data;
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
spin_lock_irqsave(&ieee->lock, flags);
@@ -239,11 +230,11 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
tcb_desc->bTxDisableRateFallBack = 1;
tcb_desc->bTxUseDriverAssingedRate = 1;
- if(single){
- if(ieee->queue_stop){
+ if (single) {
+ if (ieee->queue_stop) {
enqueue_mgmt(ieee, skb);
- }else{
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
+ } else {
+ header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -252,12 +243,12 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
/* avoid watchdog triggers */
netif_trans_update(ieee->dev);
- ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
+ ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
//dev_kfree_skb_any(skb);//edit by thomas
}
spin_unlock_irqrestore(&ieee->lock, flags);
- }else{
+ } else {
spin_unlock_irqrestore(&ieee->lock, flags);
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
@@ -269,14 +260,14 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
ieee->seq_ctrl[0]++;
/* check whether the managed packet queued greater than 5 */
- if(!ieee->check_nic_enough_desc(ieee->dev,tcb_desc->queue_index)||\
- (skb_queue_len(&ieee->skb_waitQ[tcb_desc->queue_index]) != 0)||\
- (ieee->queue_stop) ) {
+ if (!ieee->check_nic_enough_desc(ieee->dev, tcb_desc->queue_index) || \
+ (skb_queue_len(&ieee->skb_waitQ[tcb_desc->queue_index]) != 0) || \
+ (ieee->queue_stop)) {
/* insert the skb packet to the management queue */
/* as for the completion function, it does not need
* to check it any more.
* */
- printk("%s():insert to waitqueue!\n",__func__);
+ printk("%s():insert to waitqueue!\n", __func__);
skb_queue_tail(&ieee->skb_waitQ[tcb_desc->queue_index], skb);
} else {
ieee->softmac_hard_start_xmit(skb, ieee->dev);
@@ -289,14 +280,11 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
static inline void
softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
{
-
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
struct rtl_80211_hdr_3addr *header =
- (struct rtl_80211_hdr_3addr *) skb->data;
-
-
- if(single){
+ (struct rtl_80211_hdr_3addr *)skb->data;
+ if (single) {
header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -306,10 +294,8 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
/* avoid watchdog triggers */
netif_trans_update(ieee->dev);
- ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
-
- }else{
-
+ ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
+ } else {
header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -318,7 +304,6 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
ieee->seq_ctrl[0]++;
ieee->softmac_hard_start_xmit(skb, ieee->dev);
-
}
//dev_kfree_skb_any(skb);//edit by thomas
}
@@ -356,8 +341,8 @@ static inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
memcpy(tag, ieee->current_network.ssid, len);
tag += len;
- ieee80211_MFIE_Brate(ieee,&tag);
- ieee80211_MFIE_Grate(ieee,&tag);
+ ieee80211_MFIE_Brate(ieee, &tag);
+ ieee80211_MFIE_Grate(ieee, &tag);
return skb;
}
@@ -367,7 +352,7 @@ static void ieee80211_send_beacon(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
- if(!ieee->ieee_up)
+ if (!ieee->ieee_up)
return;
//unsigned long flags;
skb = ieee80211_get_beacon_(ieee);
@@ -385,12 +370,11 @@ static void ieee80211_send_beacon(struct ieee80211_device *ieee)
// if(!timer_pending(&ieee->beacon_timer))
// add_timer(&ieee->beacon_timer);
mod_timer(&ieee->beacon_timer,
- jiffies + msecs_to_jiffies(ieee->current_network.beacon_interval-5));
+ jiffies + msecs_to_jiffies(ieee->current_network.beacon_interval - 5));
}
//spin_unlock_irqrestore(&ieee->beacon_lock,flags);
}
-
static void ieee80211_send_beacon_cb(struct timer_list *t)
{
struct ieee80211_device *ieee =
@@ -402,7 +386,6 @@ static void ieee80211_send_beacon_cb(struct timer_list *t)
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
}
-
static void ieee80211_send_probe(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
@@ -429,19 +412,17 @@ static void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
{
short ch = 0;
- u8 channel_map[MAX_CHANNEL_NUMBER+1];
+ u8 channel_map[MAX_CHANNEL_NUMBER + 1];
- memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
+ memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER + 1);
mutex_lock(&ieee->scan_mutex);
- while(1)
- {
-
- do{
+ while (1) {
+ do {
ch++;
if (ch > MAX_CHANNEL_NUMBER)
goto out; /* scan completed */
- }while(!channel_map[ch]);
+ } while (!channel_map[ch]);
/* this function can be called in two situations
* 1- We have switched to ad-hoc mode and we are
@@ -465,7 +446,7 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
if (ieee->state == IEEE80211_LINKED)
goto out;
ieee->set_chan(ieee->dev, ch);
- if(channel_map[ch] == 1)
+ if (channel_map[ch] == 1)
ieee80211_send_probe_requests(ieee);
/* this prevent excessive time wait when we
@@ -475,19 +456,17 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
goto out;
msleep_interruptible(IEEE80211_SOFTMAC_SCAN_TIME);
-
}
out:
- if(ieee->state < IEEE80211_LINKED){
+ if (ieee->state < IEEE80211_LINKED) {
ieee->actscanning = false;
mutex_unlock(&ieee->scan_mutex);
+ } else {
+ ieee->sync_scan_hurryup = 0;
+ if (IS_DOT11D_ENABLE(ieee))
+ DOT11D_ScanComplete(ieee);
+ mutex_unlock(&ieee->scan_mutex);
}
- else{
- ieee->sync_scan_hurryup = 0;
- if(IS_DOT11D_ENABLE(ieee))
- DOT11D_ScanComplete(ieee);
- mutex_unlock(&ieee->scan_mutex);
-}
}
EXPORT_SYMBOL(ieee80211_softmac_scan_syncro);
@@ -496,37 +475,35 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
static short watchdog;
- u8 channel_map[MAX_CHANNEL_NUMBER+1];
+ u8 channel_map[MAX_CHANNEL_NUMBER + 1];
- memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
- if(!ieee->ieee_up)
+ memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER + 1);
+ if (!ieee->ieee_up)
return;
mutex_lock(&ieee->scan_mutex);
- do{
+ do {
ieee->current_network.channel =
(ieee->current_network.channel + 1) % MAX_CHANNEL_NUMBER;
- if (watchdog++ > MAX_CHANNEL_NUMBER)
- {
+ if (watchdog++ > MAX_CHANNEL_NUMBER) {
//if current channel is not in channel map, set to default channel.
if (!channel_map[ieee->current_network.channel]) {
ieee->current_network.channel = 6;
goto out; /* no good chans */
}
}
- }while(!channel_map[ieee->current_network.channel]);
- if (ieee->scanning == 0 )
+ } while (!channel_map[ieee->current_network.channel]);
+ if (ieee->scanning == 0)
goto out;
ieee->set_chan(ieee->dev, ieee->current_network.channel);
- if(channel_map[ieee->current_network.channel] == 1)
+ if (channel_map[ieee->current_network.channel] == 1)
ieee80211_send_probe_requests(ieee);
-
schedule_delayed_work(&ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
mutex_unlock(&ieee->scan_mutex);
return;
out:
- if(IS_DOT11D_ENABLE(ieee))
+ if (IS_DOT11D_ENABLE(ieee))
DOT11D_ScanComplete(ieee);
ieee->actscanning = false;
watchdog = 0;
@@ -534,12 +511,10 @@ out:
mutex_unlock(&ieee->scan_mutex);
}
-
-
static void ieee80211_beacons_start(struct ieee80211_device *ieee)
{
unsigned long flags;
- spin_lock_irqsave(&ieee->beacon_lock,flags);
+ spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 1;
ieee80211_send_beacon(ieee);
@@ -557,13 +532,11 @@ static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
del_timer_sync(&ieee->beacon_timer);
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
-
}
-
void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
{
- if(ieee->stop_send_beacons)
+ if (ieee->stop_send_beacons)
ieee->stop_send_beacons(ieee->dev);
if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
ieee80211_beacons_stop(ieee);
@@ -572,9 +545,9 @@ EXPORT_SYMBOL(ieee80211_stop_send_beacons);
void ieee80211_start_send_beacons(struct ieee80211_device *ieee)
{
- if(ieee->start_send_beacons)
+ if (ieee->start_send_beacons)
ieee->start_send_beacons(ieee->dev, ieee->basic_rate);
- if(ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
+ if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
ieee80211_beacons_start(ieee);
}
EXPORT_SYMBOL(ieee80211_start_send_beacons);
@@ -610,39 +583,32 @@ EXPORT_SYMBOL(ieee80211_stop_scan);
/* called with ieee->lock held */
static void ieee80211_start_scan(struct ieee80211_device *ieee)
{
- if (IS_DOT11D_ENABLE(ieee) )
- {
+ if (IS_DOT11D_ENABLE(ieee)) {
if (IS_COUNTRY_IE_VALID(ieee))
- {
RESET_CIE_WATCHDOG(ieee);
- }
}
- if (ieee->softmac_features & IEEE_SOFTMAC_SCAN){
+ if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
if (ieee->scanning == 0) {
ieee->scanning = 1;
schedule_delayed_work(&ieee->softmac_scan_wq, 0);
}
- }else
+ } else {
ieee->start_scan(ieee->dev);
-
+ }
}
/* called with wx_mutex held */
void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
{
- if (IS_DOT11D_ENABLE(ieee) )
- {
+ if (IS_DOT11D_ENABLE(ieee)) {
if (IS_COUNTRY_IE_VALID(ieee))
- {
RESET_CIE_WATCHDOG(ieee);
- }
}
ieee->sync_scan_hurryup = 0;
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
ieee80211_softmac_scan_syncro(ieee);
else
ieee->scan_syncro(ieee->dev);
-
}
EXPORT_SYMBOL(ieee80211_start_scan_syncro);
@@ -654,16 +620,16 @@ ieee80211_authentication_req(struct ieee80211_network *beacon,
struct ieee80211_authentication *auth;
int len = sizeof(struct ieee80211_authentication) + challengelen + ieee->tx_headroom;
-
skb = dev_alloc_skb(len);
- if (!skb) return NULL;
+ if (!skb)
+ return NULL;
skb_reserve(skb, ieee->tx_headroom);
auth = skb_put(skb, sizeof(struct ieee80211_authentication));
if (challengelen)
auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH
- | IEEE80211_FCTL_WEP);
+ | IEEE80211_FCTL_WEP);
else
auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH);
@@ -674,23 +640,21 @@ ieee80211_authentication_req(struct ieee80211_network *beacon,
memcpy(auth->header.addr3, beacon->bssid, ETH_ALEN);
//auth->algorithm = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
- if(ieee->auth_mode == 0)
+ if (ieee->auth_mode == 0)
auth->algorithm = WLAN_AUTH_OPEN;
- else if(ieee->auth_mode == 1)
+ else if (ieee->auth_mode == 1)
auth->algorithm = cpu_to_le16(WLAN_AUTH_SHARED_KEY);
- else if(ieee->auth_mode == 2)
+ else if (ieee->auth_mode == 2)
auth->algorithm = WLAN_AUTH_OPEN; /* 0x80; */
- printk("=================>%s():auth->algorithm is %d\n",__func__,auth->algorithm);
+ printk("=================>%s():auth->algorithm is %d\n", __func__, auth->algorithm);
auth->transaction = cpu_to_le16(ieee->associate_seq);
ieee->associate_seq++;
auth->status = cpu_to_le16(WLAN_STATUS_SUCCESS);
return skb;
-
}
-
static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *dest)
{
u8 *tag;
@@ -703,74 +667,71 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
char *ssid = ieee->current_network.ssid;
int ssid_len = ieee->current_network.ssid_len;
- int rate_len = ieee->current_network.rates_len+2;
+ int rate_len = ieee->current_network.rates_len + 2;
int rate_ex_len = ieee->current_network.rates_ex_len;
int wpa_ie_len = ieee->wpa_ie_len;
u8 erpinfo_content = 0;
u8 *tmp_ht_cap_buf;
- u8 tmp_ht_cap_len=0;
+ u8 tmp_ht_cap_len = 0;
u8 *tmp_ht_info_buf;
- u8 tmp_ht_info_len=0;
+ u8 tmp_ht_info_len = 0;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- u8 *tmp_generic_ie_buf=NULL;
- u8 tmp_generic_ie_len=0;
+ u8 *tmp_generic_ie_buf = NULL;
+ u8 tmp_generic_ie_len = 0;
- if(rate_ex_len > 0) rate_ex_len+=2;
+ if (rate_ex_len > 0)
+ rate_ex_len += 2;
- if(ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
+ if (ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
atim_len = 4;
else
atim_len = 0;
- if(ieee80211_is_54g(&ieee->current_network))
+ if (ieee80211_is_54g(&ieee->current_network))
erp_len = 3;
else
erp_len = 0;
-
crypt = ieee->crypt[ieee->tx_keyidx];
-
encrypt = ieee->host_encrypt && crypt && crypt->ops &&
((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
/* HT ralated element */
- tmp_ht_cap_buf =(u8 *) &(ieee->pHTInfo->SelfHTCap);
+ tmp_ht_cap_buf = (u8 *)&ieee->pHTInfo->SelfHTCap;
tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
- tmp_ht_info_buf =(u8 *) &(ieee->pHTInfo->SelfHTInfo);
+ tmp_ht_info_buf = (u8 *)&ieee->pHTInfo->SelfHTInfo;
tmp_ht_info_len = sizeof(ieee->pHTInfo->SelfHTInfo);
- HTConstructCapabilityElement(ieee, tmp_ht_cap_buf, &tmp_ht_cap_len,encrypt);
- HTConstructInfoElement(ieee,tmp_ht_info_buf,&tmp_ht_info_len, encrypt);
+ HTConstructCapabilityElement(ieee, tmp_ht_cap_buf, &tmp_ht_cap_len, encrypt);
+ HTConstructInfoElement(ieee, tmp_ht_info_buf, &tmp_ht_info_len, encrypt);
-
- if (pHTInfo->bRegRT2RTAggregation)
- {
+ if (pHTInfo->bRegRT2RTAggregation) {
tmp_generic_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
tmp_generic_ie_len = sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf, &tmp_generic_ie_len);
}
// printk("===============>tmp_ht_cap_len is %d,tmp_ht_info_len is %d, tmp_generic_ie_len is %d\n",tmp_ht_cap_len,tmp_ht_info_len,tmp_generic_ie_len);
- beacon_size = sizeof(struct ieee80211_probe_response)+2+
- ssid_len
- +3 //channel
- +rate_len
- +rate_ex_len
- +atim_len
- +erp_len
- +wpa_ie_len
- // +tmp_ht_cap_len
- // +tmp_ht_info_len
- // +tmp_generic_ie_len
-// +wmm_len+2
- +ieee->tx_headroom;
+ beacon_size = sizeof(struct ieee80211_probe_response) + 2
+ + ssid_len
+ + 3 //channel
+ + rate_len
+ + rate_ex_len
+ + atim_len
+ + erp_len
+ + wpa_ie_len
+ // + tmp_ht_cap_len
+ // + tmp_ht_info_len
+ // + tmp_generic_ie_len
+// + wmm_len+2
+ + ieee->tx_headroom;
skb = dev_alloc_skb(beacon_size);
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
beacon_buf = skb_put(skb, (beacon_size - ieee->tx_headroom));
- memcpy (beacon_buf->header.addr1, dest,ETH_ALEN);
- memcpy (beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy (beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
+ memcpy(beacon_buf->header.addr1, dest, ETH_ALEN);
+ memcpy(beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
beacon_buf->header.duration_id = 0; /* FIXME */
beacon_buf->beacon_interval =
@@ -780,28 +741,27 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
beacon_buf->capability |=
cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE); /* add short preamble here */
- if(ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
+ if (ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
crypt = ieee->crypt[ieee->tx_keyidx];
if (encrypt)
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
-
beacon_buf->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_RESP);
beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
beacon_buf->info_element[0].len = ssid_len;
- tag = (u8 *) beacon_buf->info_element[0].data;
+ tag = (u8 *)beacon_buf->info_element[0].data;
memcpy(tag, ssid, ssid_len);
tag += ssid_len;
*(tag++) = MFIE_TYPE_RATES;
- *(tag++) = rate_len-2;
- memcpy(tag, ieee->current_network.rates, rate_len-2);
- tag+=rate_len-2;
+ *(tag++) = rate_len - 2;
+ memcpy(tag, ieee->current_network.rates, rate_len - 2);
+ tag += rate_len - 2;
*(tag++) = MFIE_TYPE_DS_SET;
*(tag++) = 1;
@@ -813,7 +773,7 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
put_unaligned_le16(ieee->current_network.atim_window,
tag);
- tag+=2;
+ tag += 2;
}
if (erp_len) {
@@ -823,15 +783,14 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
}
if (rate_ex_len) {
*(tag++) = MFIE_TYPE_RATES_EX;
- *(tag++) = rate_ex_len-2;
- memcpy(tag, ieee->current_network.rates_ex, rate_ex_len-2);
- tag+=rate_ex_len-2;
+ *(tag++) = rate_ex_len - 2;
+ memcpy(tag, ieee->current_network.rates_ex, rate_ex_len - 2);
+ tag += rate_ex_len - 2;
}
- if (wpa_ie_len)
- {
- if (ieee->iw_mode == IW_MODE_ADHOC)
- {//as Windows will set pairwise key same as the group key which is not allowed in Linux, so set this for IOT issue. WB 2008.07.07
+ if (wpa_ie_len) {
+ if (ieee->iw_mode == IW_MODE_ADHOC) {
+ //as Windows will set pairwise key same as the group key which is not allowed in Linux, so set this for IOT issue. WB 2008.07.07
memcpy(&ieee->wpa_ie[14], &ieee->wpa_ie[8], 4);
}
memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
@@ -842,7 +801,6 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
return skb;
}
-
static struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee,
u8 *dest)
{
@@ -866,19 +824,19 @@ static struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee,
assoc = skb_put(skb, sizeof(struct ieee80211_assoc_response_frame));
assoc->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
- memcpy(assoc->header.addr1, dest,ETH_ALEN);
+ memcpy(assoc->header.addr1, dest, ETH_ALEN);
memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
assoc->capability = cpu_to_le16(ieee->iw_mode == IW_MODE_MASTER ?
WLAN_CAPABILITY_BSS : WLAN_CAPABILITY_IBSS);
-
- if(ieee->short_slot)
+ if (ieee->short_slot)
assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
if (ieee->host_encrypt)
crypt = ieee->crypt[ieee->tx_keyidx];
- else crypt = NULL;
+ else
+ crypt = NULL;
encrypt = crypt && crypt->ops;
@@ -887,8 +845,10 @@ static struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee,
assoc->status = 0;
assoc->aid = cpu_to_le16(ieee->assoc_id);
- if (ieee->assoc_id == 0x2007) ieee->assoc_id=0;
- else ieee->assoc_id++;
+ if (ieee->assoc_id == 0x2007)
+ ieee->assoc_id = 0;
+ else
+ ieee->assoc_id++;
tag = skb_put(skb, rate_len);
@@ -903,7 +863,7 @@ static struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,
{
struct sk_buff *skb;
struct ieee80211_authentication *auth;
- int len = ieee->tx_headroom + sizeof(struct ieee80211_authentication)+1;
+ int len = ieee->tx_headroom + sizeof(struct ieee80211_authentication) + 1;
skb = dev_alloc_skb(len);
@@ -923,8 +883,6 @@ static struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,
memcpy(auth->header.addr1, dest, ETH_ALEN);
auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH);
return skb;
-
-
}
static struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee,
@@ -945,15 +903,12 @@ static struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee,
memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN);
hdr->frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS |
- (pwr ? IEEE80211_FCTL_PM:0));
+ IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS |
+ (pwr ? IEEE80211_FCTL_PM : 0));
return skb;
-
-
}
-
static void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
{
struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest);
@@ -962,7 +917,6 @@ static void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
softmac_mgmt_xmit(buf, ieee);
}
-
static void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s,
u8 *dest)
{
@@ -972,17 +926,13 @@ static void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s,
softmac_mgmt_xmit(buf, ieee);
}
-
static void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
{
-
-
struct sk_buff *buf = ieee80211_probe_resp(ieee, dest);
if (buf)
softmac_mgmt_xmit(buf, ieee);
}
-
static inline struct sk_buff *
ieee80211_association_req(struct ieee80211_network *beacon,
struct ieee80211_device *ieee)
@@ -999,59 +949,52 @@ ieee80211_association_req(struct ieee80211_network *beacon,
//unsigned int wpa_len = beacon->wpa_ie_len;
//for HT
u8 *ht_cap_buf = NULL;
- u8 ht_cap_len=0;
- u8 *realtek_ie_buf=NULL;
- u8 realtek_ie_len=0;
- int wpa_ie_len= ieee->wpa_ie_len;
- unsigned int ckip_ie_len=0;
- unsigned int ccxrm_ie_len=0;
- unsigned int cxvernum_ie_len=0;
+ u8 ht_cap_len = 0;
+ u8 *realtek_ie_buf = NULL;
+ u8 realtek_ie_len = 0;
+ int wpa_ie_len = ieee->wpa_ie_len;
+ unsigned int ckip_ie_len = 0;
+ unsigned int ccxrm_ie_len = 0;
+ unsigned int cxvernum_ie_len = 0;
struct ieee80211_crypt_data *crypt;
int encrypt;
unsigned int rate_len = ieee80211_MFIE_rate_len(ieee);
- unsigned int wmm_info_len = beacon->qos_data.supported?9:0;
+ unsigned int wmm_info_len = beacon->qos_data.supported ? 9 : 0;
#ifdef THOMAS_TURBO
- unsigned int turbo_info_len = beacon->Turbo_Enable?9:0;
+ unsigned int turbo_info_len = beacon->Turbo_Enable ? 9 : 0;
#endif
int len = 0;
crypt = ieee->crypt[ieee->tx_keyidx];
- encrypt = ieee->host_encrypt && crypt && crypt->ops && ((0 == strcmp(crypt->ops->name,"WEP") || wpa_ie_len));
+ encrypt = ieee->host_encrypt && crypt && crypt->ops && ((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
/* Include High Throuput capability && Realtek proprietary */
- if (ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT)
- {
- ht_cap_buf = (u8 *)&(ieee->pHTInfo->SelfHTCap);
+ if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
+ ht_cap_buf = (u8 *)&ieee->pHTInfo->SelfHTCap;
ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len, encrypt);
- if (ieee->pHTInfo->bCurrentRT2RTAggregation)
- {
+ if (ieee->pHTInfo->bCurrentRT2RTAggregation) {
realtek_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
- realtek_ie_len = sizeof( ieee->pHTInfo->szRT2RTAggBuffer);
+ realtek_ie_len = sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
HTConstructRT2RTAggElement(ieee, realtek_ie_buf, &realtek_ie_len);
-
}
}
- if (ieee->qos_support) {
- wmm_info_len = beacon->qos_data.supported?9:0;
- }
-
+ if (ieee->qos_support)
+ wmm_info_len = beacon->qos_data.supported ? 9 : 0;
if (beacon->bCkipSupported)
- {
- ckip_ie_len = 30+2;
- }
+ ckip_ie_len = 30 + 2;
+
if (beacon->bCcxRmEnable)
- {
- ccxrm_ie_len = 6+2;
- }
+ ccxrm_ie_len = 6 + 2;
+
if (beacon->BssCcxVerNumber >= 2)
- cxvernum_ie_len = 5+2;
+ cxvernum_ie_len = 5 + 2;
#ifdef THOMAS_TURBO
- len = sizeof(struct ieee80211_assoc_request_frame)+ 2
+ len = sizeof(struct ieee80211_assoc_request_frame) + 2
+ beacon->ssid_len /* essid tagged val */
+ rate_len /* rates tagged val */
+ wpa_ie_len
@@ -1064,7 +1007,7 @@ ieee80211_association_req(struct ieee80211_network *beacon,
+ cxvernum_ie_len
+ ieee->tx_headroom;
#else
- len = sizeof(struct ieee80211_assoc_request_frame)+ 2
+ len = sizeof(struct ieee80211_assoc_request_frame) + 2
+ beacon->ssid_len /* essid tagged val */
+ rate_len /* rates tagged val */
+ wpa_ie_len
@@ -1076,7 +1019,6 @@ ieee80211_association_req(struct ieee80211_network *beacon,
+ cxvernum_ie_len
+ ieee->tx_headroom;
#endif
-
skb = dev_alloc_skb(len);
if (!skb)
@@ -1086,7 +1028,6 @@ ieee80211_association_req(struct ieee80211_network *beacon,
hdr = skb_put(skb, sizeof(struct ieee80211_assoc_request_frame) + 2);
-
hdr->header.frame_ctl = IEEE80211_STYPE_ASSOC_REQ;
hdr->header.duration_id = cpu_to_le16(37);
memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
@@ -1096,13 +1037,13 @@ ieee80211_association_req(struct ieee80211_network *beacon,
memcpy(ieee->ap_mac_addr, beacon->bssid, ETH_ALEN);//for HW security, John
hdr->capability = cpu_to_le16(WLAN_CAPABILITY_BSS);
- if (beacon->capability & WLAN_CAPABILITY_PRIVACY )
+ if (beacon->capability & WLAN_CAPABILITY_PRIVACY)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
if (beacon->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE); //add short_preamble here
- if(ieee->short_slot)
+ if (ieee->short_slot)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
if (wmm_info_len) //QOS
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_QOS);
@@ -1122,71 +1063,67 @@ ieee80211_association_req(struct ieee80211_network *beacon,
if (beacon->bCkipSupported) {
static u8 AironetIeOui[] = {0x00, 0x01, 0x66}; // "4500-client"
u8 CcxAironetBuf[30];
- OCTET_STRING osCcxAironetIE;
+ struct octet_string osCcxAironetIE;
memset(CcxAironetBuf, 0, 30);
- osCcxAironetIE.Octet = CcxAironetBuf;
- osCcxAironetIE.Length = sizeof(CcxAironetBuf);
+ osCcxAironetIE.octet = CcxAironetBuf;
+ osCcxAironetIE.length = sizeof(CcxAironetBuf);
//
// Ref. CCX test plan v3.61, 3.2.3.1 step 13.
// We want to make the device type as "4500-client". 060926, by CCW.
//
- memcpy(osCcxAironetIE.Octet, AironetIeOui, sizeof(AironetIeOui));
+ memcpy(osCcxAironetIE.octet, AironetIeOui, sizeof(AironetIeOui));
// CCX1 spec V1.13, A01.1 CKIP Negotiation (page23):
// "The CKIP negotiation is started with the associate request from the client to the access point,
// containing an Aironet element with both the MIC and KP bits set."
- osCcxAironetIE.Octet[IE_CISCO_FLAG_POSITION] |= (SUPPORT_CKIP_PK|SUPPORT_CKIP_MIC) ;
+ osCcxAironetIE.octet[IE_CISCO_FLAG_POSITION] |= (SUPPORT_CKIP_PK | SUPPORT_CKIP_MIC);
tag = skb_put(skb, ckip_ie_len);
*tag++ = MFIE_TYPE_AIRONET;
- *tag++ = osCcxAironetIE.Length;
- memcpy(tag, osCcxAironetIE.Octet, osCcxAironetIE.Length);
- tag += osCcxAironetIE.Length;
+ *tag++ = osCcxAironetIE.length;
+ memcpy(tag, osCcxAironetIE.octet, osCcxAironetIE.length);
+ tag += osCcxAironetIE.length;
}
- if (beacon->bCcxRmEnable)
- {
+ if (beacon->bCcxRmEnable) {
static u8 CcxRmCapBuf[] = {0x00, 0x40, 0x96, 0x01, 0x01, 0x00};
- OCTET_STRING osCcxRmCap;
+ struct octet_string osCcxRmCap;
- osCcxRmCap.Octet = CcxRmCapBuf;
- osCcxRmCap.Length = sizeof(CcxRmCapBuf);
+ osCcxRmCap.octet = CcxRmCapBuf;
+ osCcxRmCap.length = sizeof(CcxRmCapBuf);
tag = skb_put(skb, ccxrm_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
- *tag++ = osCcxRmCap.Length;
- memcpy(tag, osCcxRmCap.Octet, osCcxRmCap.Length);
- tag += osCcxRmCap.Length;
+ *tag++ = osCcxRmCap.length;
+ memcpy(tag, osCcxRmCap.octet, osCcxRmCap.length);
+ tag += osCcxRmCap.length;
}
if (beacon->BssCcxVerNumber >= 2) {
u8 CcxVerNumBuf[] = {0x00, 0x40, 0x96, 0x03, 0x00};
- OCTET_STRING osCcxVerNum;
+ struct octet_string osCcxVerNum;
CcxVerNumBuf[4] = beacon->BssCcxVerNumber;
- osCcxVerNum.Octet = CcxVerNumBuf;
- osCcxVerNum.Length = sizeof(CcxVerNumBuf);
+ osCcxVerNum.octet = CcxVerNumBuf;
+ osCcxVerNum.length = sizeof(CcxVerNumBuf);
tag = skb_put(skb, cxvernum_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
- *tag++ = osCcxVerNum.Length;
- memcpy(tag, osCcxVerNum.Octet, osCcxVerNum.Length);
- tag += osCcxVerNum.Length;
+ *tag++ = osCcxVerNum.length;
+ memcpy(tag, osCcxVerNum.octet, osCcxVerNum.length);
+ tag += osCcxVerNum.length;
}
//HT cap element
if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
- if (ieee->pHTInfo->ePeerHTSpecVer != HT_SPEC_VER_EWC)
- {
+ if (ieee->pHTInfo->ePeerHTSpecVer != HT_SPEC_VER_EWC) {
tag = skb_put(skb, ht_cap_len);
*tag++ = MFIE_TYPE_HT_CAP;
*tag++ = ht_cap_len - 2;
memcpy(tag, ht_cap_buf, ht_cap_len - 2);
- tag += ht_cap_len -2;
+ tag += ht_cap_len - 2;
}
}
-
//choose what wpa_supplicant gives to associate.
- if (wpa_ie_len) {
+ if (wpa_ie_len)
skb_put_data(skb, ieee->wpa_ie, wpa_ie_len);
- }
if (wmm_info_len) {
tag = skb_put(skb, wmm_info_len);
@@ -1200,13 +1137,12 @@ ieee80211_association_req(struct ieee80211_network *beacon,
#endif
if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
- if(ieee->pHTInfo->ePeerHTSpecVer == HT_SPEC_VER_EWC)
- {
+ if (ieee->pHTInfo->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
tag = skb_put(skb, ht_cap_len);
*tag++ = MFIE_TYPE_GENERIC;
*tag++ = ht_cap_len - 2;
memcpy(tag, ht_cap_buf, ht_cap_len - 2);
- tag += ht_cap_len -2;
+ tag += ht_cap_len - 2;
}
if (ieee->pHTInfo->bCurrentRT2RTAggregation) {
@@ -1223,7 +1159,6 @@ ieee80211_association_req(struct ieee80211_network *beacon,
void ieee80211_associate_abort(struct ieee80211_device *ieee)
{
-
unsigned long flags;
spin_lock_irqsave(&ieee->lock, flags);
@@ -1235,10 +1170,10 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
* Here we will check if there are good nets to associate
* with, so we retry or just get back to NO_LINK and scanning
*/
- if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING){
+ if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING) {
IEEE80211_DEBUG_MGMT("Authentication failed\n");
ieee->softmac_stats.no_auth_rs++;
- }else{
+ } else {
IEEE80211_DEBUG_MGMT("Association failed\n");
ieee->softmac_stats.no_ass_rs++;
}
@@ -1246,7 +1181,7 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
ieee->state = IEEE80211_ASSOCIATING_RETRY;
schedule_delayed_work(&ieee->associate_retry_wq, \
- IEEE80211_SOFTMAC_ASSOC_RETRY_TIME);
+ IEEE80211_SOFTMAC_ASSOC_RETRY_TIME);
spin_unlock_irqrestore(&ieee->lock, flags);
}
@@ -1258,7 +1193,6 @@ static void ieee80211_associate_abort_cb(struct timer_list *t)
ieee80211_associate_abort(dev);
}
-
static void ieee80211_associate_step1(struct ieee80211_device *ieee)
{
struct ieee80211_network *beacon = &ieee->current_network;
@@ -1267,12 +1201,12 @@ static void ieee80211_associate_step1(struct ieee80211_device *ieee)
IEEE80211_DEBUG_MGMT("Stopping scan\n");
ieee->softmac_stats.tx_auth_rq++;
- skb=ieee80211_authentication_req(beacon, ieee, 0);
+ skb = ieee80211_authentication_req(beacon, ieee, 0);
- if (!skb)
+ if (!skb) {
ieee80211_associate_abort(ieee);
- else{
- ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATING ;
+ } else {
+ ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATING;
IEEE80211_DEBUG_MGMT("Sending authentication request\n");
softmac_mgmt_xmit(skb, ieee);
//BUGON when you try to add_timer twice, using mod_timer may be better, john0709
@@ -1296,21 +1230,21 @@ static void ieee80211_auth_challenge(struct ieee80211_device *ieee,
ieee->associate_seq++;
ieee->softmac_stats.tx_auth_rq++;
- skb = ieee80211_authentication_req(beacon, ieee, chlen+2);
- if (!skb)
+ skb = ieee80211_authentication_req(beacon, ieee, chlen + 2);
+ if (!skb) {
ieee80211_associate_abort(ieee);
- else{
- c = skb_put(skb, chlen+2);
+ } else {
+ c = skb_put(skb, chlen + 2);
*(c++) = MFIE_TYPE_CHALLENGE;
*(c++) = chlen;
memcpy(c, challenge, chlen);
IEEE80211_DEBUG_MGMT("Sending authentication challenge response\n");
- ieee80211_encrypt_fragment(ieee, skb, sizeof(struct rtl_80211_hdr_3addr ));
+ ieee80211_encrypt_fragment(ieee, skb, sizeof(struct rtl_80211_hdr_3addr));
softmac_mgmt_xmit(skb, ieee);
- mod_timer(&ieee->associate_timer, jiffies + (HZ/2));
+ mod_timer(&ieee->associate_timer, jiffies + (HZ / 2));
//dev_kfree_skb_any(skb);//edit by thomas
}
kfree(challenge);
@@ -1326,12 +1260,12 @@ static void ieee80211_associate_step2(struct ieee80211_device *ieee)
IEEE80211_DEBUG_MGMT("Sending association request\n");
ieee->softmac_stats.tx_ass_rq++;
- skb=ieee80211_association_req(beacon, ieee);
- if (!skb)
+ skb = ieee80211_association_req(beacon, ieee);
+ if (!skb) {
ieee80211_associate_abort(ieee);
- else{
+ } else {
softmac_mgmt_xmit(skb, ieee);
- mod_timer(&ieee->associate_timer, jiffies + (HZ/2));
+ mod_timer(&ieee->associate_timer, jiffies + (HZ / 2));
//dev_kfree_skb_any(skb);//edit by thomas
}
}
@@ -1339,32 +1273,27 @@ static void ieee80211_associate_complete_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
printk(KERN_INFO "Associated successfully\n");
- if(ieee80211_is_54g(&ieee->current_network) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION)){
-
+ if (ieee80211_is_54g(&ieee->current_network) &&
+ (ieee->modulation & IEEE80211_OFDM_MODULATION)) {
ieee->rate = 108;
printk(KERN_INFO"Using G rates:%d\n", ieee->rate);
- }else{
+ } else {
ieee->rate = 22;
printk(KERN_INFO"Using B rates:%d\n", ieee->rate);
}
- if (ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT)
- {
+ if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
printk("Successfully associated, ht enabled\n");
HTOnAssocRsp(ieee);
- }
- else
- {
+ } else {
printk("Successfully associated, ht not enabled(%d, %d)\n", ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bEnableHT);
memset(ieee->dot11HTOperationalRateSet, 0, 16);
//HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
}
- ieee->LinkDetectInfo.SlotNum = 2 * (1 + ieee->current_network.beacon_interval/500);
+ ieee->LinkDetectInfo.SlotNum = 2 * (1 + ieee->current_network.beacon_interval / 500);
// To prevent the immediately calling watch_dog after association.
- if (ieee->LinkDetectInfo.NumRecvBcnInPeriod==0||ieee->LinkDetectInfo.NumRecvDataInPeriod==0 )
- {
+ if (ieee->LinkDetectInfo.NumRecvBcnInPeriod == 0 || ieee->LinkDetectInfo.NumRecvDataInPeriod == 0) {
ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1;
- ieee->LinkDetectInfo.NumRecvDataInPeriod= 1;
+ ieee->LinkDetectInfo.NumRecvDataInPeriod = 1;
}
ieee->link_change(ieee->dev);
if (!ieee->is_silent_reset) {
@@ -1413,7 +1342,7 @@ static void ieee80211_associate_procedure_wq(struct work_struct *work)
inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net)
{
- u8 tmp_ssid[IW_ESSID_MAX_SIZE+1];
+ u8 tmp_ssid[IW_ESSID_MAX_SIZE + 1];
int tmp_ssid_len = 0;
short apset, ssidset, ssidbroad, apmatch, ssidmatch;
@@ -1430,7 +1359,6 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
if ((ieee->iw_mode == IW_MODE_ADHOC) && !(net->capability & WLAN_CAPABILITY_IBSS))
return;
-
if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
/* if the user specified the AP MAC, we need also the essid
* This could be obtained by beacons or, if the network does not
@@ -1438,77 +1366,75 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
*/
apset = ieee->wap_set;//(memcmp(ieee->current_network.bssid, zero,ETH_ALEN)!=0 );
ssidset = ieee->ssid_set;//ieee->current_network.ssid[0] != '\0';
- ssidbroad = !(net->ssid_len == 0 || net->ssid[0]== '\0');
- apmatch = (memcmp(ieee->current_network.bssid, net->bssid, ETH_ALEN)==0);
- ssidmatch = (ieee->current_network.ssid_len == net->ssid_len)&&\
- (!strncmp(ieee->current_network.ssid, net->ssid, net->ssid_len));
-
-
- if ( /* if the user set the AP check if match.
- * if the network does not broadcast essid we check the user supplyed ANY essid
- * if the network does broadcast and the user does not set essid it is OK
- * if the network does broadcast and the user did set essid chech if essid match
- */
- (apset && apmatch &&
- ((ssidset && ssidbroad && ssidmatch) || (ssidbroad && !ssidset) || (!ssidbroad && ssidset)) ) ||
- /* if the ap is not set, check that the user set the bssid
- * and the network does broadcast and that those two bssid matches
+ ssidbroad = !(net->ssid_len == 0 || net->ssid[0] == '\0');
+ apmatch = (memcmp(ieee->current_network.bssid, net->bssid, ETH_ALEN) == 0);
+ ssidmatch = (ieee->current_network.ssid_len == net->ssid_len) &&
+ (!strncmp(ieee->current_network.ssid, net->ssid, net->ssid_len));
+
+ /* if the user set the AP check if match.
+ * if the network does not broadcast essid we check the user supplyed ANY essid
+ * if the network does broadcast and the user does not set essid it is OK
+ * if the network does broadcast and the user did set essid chech if essid match
+ */
+ if ((apset && apmatch &&
+ ((ssidset && ssidbroad && ssidmatch) || (ssidbroad && !ssidset) || (!ssidbroad && ssidset))) ||
+ /* if the ap is not set, check that the user set the bssid
+ * and the network does broadcast and that those two bssid matches
+ */
+ (!apset && ssidset && ssidbroad && ssidmatch)) {
+ /* if the essid is hidden replace it with the
+ * essid provided by the user.
*/
- (!apset && ssidset && ssidbroad && ssidmatch)
- ){
- /* if the essid is hidden replace it with the
- * essid provided by the user.
- */
- if (!ssidbroad) {
- strncpy(tmp_ssid, ieee->current_network.ssid, IW_ESSID_MAX_SIZE);
- tmp_ssid_len = ieee->current_network.ssid_len;
- }
- memcpy(&ieee->current_network, net, sizeof(struct ieee80211_network));
-
- strncpy(ieee->current_network.ssid, tmp_ssid, IW_ESSID_MAX_SIZE);
- ieee->current_network.ssid_len = tmp_ssid_len;
- printk(KERN_INFO"Linking with %s,channel:%d, qos:%d, myHT:%d, networkHT:%d\n",ieee->current_network.ssid,ieee->current_network.channel, ieee->current_network.qos_data.supported, ieee->pHTInfo->bEnableHT, ieee->current_network.bssht.bdSupportHT);
-
- //ieee->pHTInfo->IOTAction = 0;
- HTResetIOTSetting(ieee->pHTInfo);
- if (ieee->iw_mode == IW_MODE_INFRA){
- /* Join the network for the first time */
- ieee->AsocRetryCount = 0;
- //for HT by amy 080514
- if((ieee->current_network.qos_data.supported == 1) &&
- // (ieee->pHTInfo->bEnableHT && ieee->current_network.bssht.bdSupportHT))
- ieee->current_network.bssht.bdSupportHT)
+ if (!ssidbroad) {
+ strncpy(tmp_ssid, ieee->current_network.ssid, IW_ESSID_MAX_SIZE);
+ tmp_ssid_len = ieee->current_network.ssid_len;
+ }
+ memcpy(&ieee->current_network, net, sizeof(struct ieee80211_network));
+
+ strncpy(ieee->current_network.ssid, tmp_ssid, IW_ESSID_MAX_SIZE);
+ ieee->current_network.ssid_len = tmp_ssid_len;
+ printk(KERN_INFO"Linking with %s,channel:%d, qos:%d, myHT:%d, networkHT:%d\n",
+ ieee->current_network.ssid,
+ ieee->current_network.channel,
+ ieee->current_network.qos_data.supported,
+ ieee->pHTInfo->bEnableHT,
+ ieee->current_network.bssht.bdSupportHT);
+
+ //ieee->pHTInfo->IOTAction = 0;
+ HTResetIOTSetting(ieee->pHTInfo);
+ if (ieee->iw_mode == IW_MODE_INFRA) {
+ /* Join the network for the first time */
+ ieee->AsocRetryCount = 0;
+ //for HT by amy 080514
+ if ((ieee->current_network.qos_data.supported == 1) &&
+ // (ieee->pHTInfo->bEnableHT && ieee->current_network.bssht.bdSupportHT))
+ ieee->current_network.bssht.bdSupportHT) {
/*WB, 2008.09.09:bCurrentHTSupport and bEnableHT two flags are going to put together to check whether we are in HT now, so needn't to check bEnableHT flags here. That's is to say we will set to HT support whenever joined AP has the ability to support HT. And whether we are in HT or not, please check bCurrentHTSupport&&bEnableHT now please.*/
- {
// ieee->pHTInfo->bCurrentHTSupport = true;
- HTResetSelfAndSavePeerSetting(ieee, &(ieee->current_network));
- }
- else
- {
- ieee->pHTInfo->bCurrentHTSupport = false;
- }
-
- ieee->state = IEEE80211_ASSOCIATING;
- schedule_work(&ieee->associate_procedure_wq);
- }else{
- if(ieee80211_is_54g(&ieee->current_network) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION)){
- ieee->rate = 108;
- ieee->SetWirelessMode(ieee->dev, IEEE_G);
- printk(KERN_INFO"Using G rates\n");
- }else{
- ieee->rate = 22;
- ieee->SetWirelessMode(ieee->dev, IEEE_B);
- printk(KERN_INFO"Using B rates\n");
- }
- memset(ieee->dot11HTOperationalRateSet, 0, 16);
- //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- ieee->state = IEEE80211_LINKED;
+ HTResetSelfAndSavePeerSetting(ieee, &ieee->current_network);
+ } else {
+ ieee->pHTInfo->bCurrentHTSupport = false;
}
+ ieee->state = IEEE80211_ASSOCIATING;
+ schedule_work(&ieee->associate_procedure_wq);
+ } else {
+ if (ieee80211_is_54g(&ieee->current_network) &&
+ (ieee->modulation & IEEE80211_OFDM_MODULATION)) {
+ ieee->rate = 108;
+ ieee->SetWirelessMode(ieee->dev, IEEE_G);
+ printk(KERN_INFO"Using G rates\n");
+ } else {
+ ieee->rate = 22;
+ ieee->SetWirelessMode(ieee->dev, IEEE_B);
+ printk(KERN_INFO"Using B rates\n");
+ }
+ memset(ieee->dot11HTOperationalRateSet, 0, 16);
+ //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
+ ieee->state = IEEE80211_LINKED;
+ }
}
}
-
}
void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
@@ -1519,7 +1445,6 @@ void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(target, &ieee->network_list, list) {
-
/* if the state become different that NOLINK means
* we had found what we are searching for
*/
@@ -1532,20 +1457,18 @@ void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
}
spin_unlock_irqrestore(&ieee->lock, flags);
-
}
-
static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
{
struct ieee80211_authentication *a;
u8 *t;
if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
- IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n",skb->len);
+ IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
return 0xcafe;
}
*challenge = NULL;
- a = (struct ieee80211_authentication *) skb->data;
+ a = (struct ieee80211_authentication *)skb->data;
if (skb->len > (sizeof(struct ieee80211_authentication) + 3)) {
t = skb->data + sizeof(struct ieee80211_authentication);
@@ -1558,21 +1481,19 @@ static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
}
return le16_to_cpu(a->status);
-
}
-
static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
{
struct ieee80211_authentication *a;
if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
- IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n",skb->len);
+ IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n", skb->len);
return -1;
}
- a = (struct ieee80211_authentication *) skb->data;
+ a = (struct ieee80211_authentication *)skb->data;
- memcpy(dest,a->header.addr2, ETH_ALEN);
+ memcpy(dest, a->header.addr2, ETH_ALEN);
if (le16_to_cpu(a->algorithm) != WLAN_AUTH_OPEN)
return WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
@@ -1584,25 +1505,25 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
{
u8 *tag;
u8 *skbend;
- u8 *ssid=NULL;
+ u8 *ssid = NULL;
u8 ssidlen = 0;
struct rtl_80211_hdr_3addr *header =
- (struct rtl_80211_hdr_3addr *) skb->data;
+ (struct rtl_80211_hdr_3addr *)skb->data;
- if (skb->len < sizeof (struct rtl_80211_hdr_3addr ))
+ if (skb->len < sizeof(struct rtl_80211_hdr_3addr))
return -1; /* corrupted */
- memcpy(src,header->addr2, ETH_ALEN);
+ memcpy(src, header->addr2, ETH_ALEN);
skbend = (u8 *)skb->data + skb->len;
- tag = skb->data + sizeof (struct rtl_80211_hdr_3addr );
+ tag = skb->data + sizeof(struct rtl_80211_hdr_3addr);
- while (tag+1 < skbend){
+ while (tag + 1 < skbend) {
if (*tag == 0) {
- ssid = tag+2;
- ssidlen = *(tag+1);
+ ssid = tag + 2;
+ ssidlen = *(tag + 1);
break;
}
tag++; /* point to the len field */
@@ -1611,11 +1532,13 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
}
//IEEE80211DMESG("Card MAC address is "MACSTR, MAC2STR(src));
- if (ssidlen == 0) return 1;
+ if (ssidlen == 0)
+ return 1;
- if (!ssid) return 1; /* ssid not found in tagged param */
- return (!strncmp(ssid, ieee->current_network.ssid, ssidlen));
+ if (!ssid)
+ return 1; /* ssid not found in tagged param */
+ return (!strncmp(ssid, ieee->current_network.ssid, ssidlen));
}
static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
@@ -1624,14 +1547,13 @@ static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
if (skb->len < (sizeof(struct ieee80211_assoc_request_frame) -
sizeof(struct ieee80211_info_element))) {
-
IEEE80211_DEBUG_MGMT("invalid len in auth request:%d \n", skb->len);
return -1;
}
- a = (struct ieee80211_assoc_request_frame *) skb->data;
+ a = (struct ieee80211_assoc_request_frame *)skb->data;
- memcpy(dest,a->header.addr2,ETH_ALEN);
+ memcpy(dest, a->header.addr2, ETH_ALEN);
return 0;
}
@@ -1646,18 +1568,18 @@ static inline u16 assoc_parse(struct ieee80211_device *ieee, struct sk_buff *skb
return 0xcafe;
}
- response_head = (struct ieee80211_assoc_response_frame *) skb->data;
+ response_head = (struct ieee80211_assoc_response_frame *)skb->data;
*aid = le16_to_cpu(response_head->aid) & 0x3fff;
status_code = le16_to_cpu(response_head->status);
- if((status_code==WLAN_STATUS_ASSOC_DENIED_RATES || \
- status_code==WLAN_STATUS_CAPS_UNSUPPORTED)&&
- ((ieee->mode == IEEE_G) &&
- (ieee->current_network.mode == IEEE_N_24G) &&
- (ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT-1)))) {
- ieee->pHTInfo->IOTAction |= HT_IOT_ACT_PURE_N_MODE;
- }else {
- ieee->AsocRetryCount = 0;
+ if ((status_code == WLAN_STATUS_ASSOC_DENIED_RATES ||
+ status_code == WLAN_STATUS_CAPS_UNSUPPORTED) &&
+ ((ieee->mode == IEEE_G) &&
+ (ieee->current_network.mode == IEEE_N_24G) &&
+ (ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT - 1)))) {
+ ieee->pHTInfo->IOTAction |= HT_IOT_ACT_PURE_N_MODE;
+ } else {
+ ieee->AsocRetryCount = 0;
}
return le16_to_cpu(response_head->status);
@@ -1687,24 +1609,20 @@ ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
ieee->softmac_stats.rx_auth_rq++;
status = auth_rq_parse(skb, dest);
- if (status != -1) {
+ if (status != -1)
ieee80211_resp_to_auth(ieee, status, dest);
- }
//DMESG("Dest is "MACSTR, MAC2STR(dest));
-
}
static inline void
ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
{
-
u8 dest[ETH_ALEN];
//unsigned long flags;
ieee->softmac_stats.rx_ass_rq++;
- if (assoc_rq_parse(skb, dest) != -1) {
+ if (assoc_rq_parse(skb, dest) != -1)
ieee80211_resp_to_assoc_rq(ieee, dest);
- }
printk(KERN_INFO"New client associated: %pM\n", dest);
//FIXME
@@ -1713,12 +1631,10 @@ ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
static void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee,
short pwr)
{
-
struct sk_buff *buf = ieee80211_null_func(ieee, pwr);
if (buf)
softmac_ps_mgmt_xmit(buf, ieee);
-
}
/* EXPORT_SYMBOL(ieee80211_sta_ps_send_null_frame); */
@@ -1734,46 +1650,43 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
return 0;
*/
dtim = ieee->current_network.dtim_data;
- if(!(dtim & IEEE80211_DTIM_VALID))
+ if (!(dtim & IEEE80211_DTIM_VALID))
return 0;
timeout = ieee->current_network.beacon_interval; //should we use ps_timeout value or beacon_interval
ieee->current_network.dtim_data = IEEE80211_DTIM_INVALID;
- if(dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST)& ieee->ps))
+ if (dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST) & ieee->ps))
return 2;
- if(!time_after(jiffies,
- dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout)))
+ if (!time_after(jiffies,
+ dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout)))
return 0;
- if(!time_after(jiffies,
- ieee->last_rx_ps_time + msecs_to_jiffies(timeout)))
+ if (!time_after(jiffies,
+ ieee->last_rx_ps_time + msecs_to_jiffies(timeout)))
return 0;
- if((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE ) &&
- (ieee->mgmt_queue_tail != ieee->mgmt_queue_head))
+ if ((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) &&
+ (ieee->mgmt_queue_tail != ieee->mgmt_queue_head))
return 0;
if (time_l) {
*time_l = ieee->current_network.last_dtim_sta_time[0]
+ (ieee->current_network.beacon_interval
- * ieee->current_network.dtim_period) * 1000;
+ * ieee->current_network.dtim_period) * 1000;
}
if (time_h) {
*time_h = ieee->current_network.last_dtim_sta_time[1];
- if(time_l && *time_l < ieee->current_network.last_dtim_sta_time[0])
+ if (time_l && *time_l < ieee->current_network.last_dtim_sta_time[0])
*time_h += 1;
}
return 1;
-
-
}
static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
{
-
u32 th, tl;
short sleep;
@@ -1782,10 +1695,9 @@ static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
spin_lock_irqsave(&ieee->lock, flags);
if ((ieee->ps == IEEE80211_PS_DISABLED ||
- ieee->iw_mode != IW_MODE_INFRA ||
- ieee->state != IEEE80211_LINKED)){
-
- // #warning CHECK_LOCK_HERE
+ ieee->iw_mode != IW_MODE_INFRA ||
+ ieee->state != IEEE80211_LINKED)) {
+ // #warning CHECK_LOCK_HERE
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
ieee80211_sta_wakeup(ieee, 1);
@@ -1793,23 +1705,19 @@ static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
}
- sleep = ieee80211_sta_ps_sleep(ieee,&th, &tl);
+ sleep = ieee80211_sta_ps_sleep(ieee, &th, &tl);
/* 2 wake, 1 sleep, 0 do nothing */
- if(sleep == 0)
+ if (sleep == 0)
goto out;
- if(sleep == 1){
-
- if(ieee->sta_sleep == 1)
+ if (sleep == 1) {
+ if (ieee->sta_sleep == 1) {
ieee->enter_sleep_state(ieee->dev, th, tl);
-
- else if(ieee->sta_sleep == 0){
+ } else if (ieee->sta_sleep == 0) {
// printk("send null 1\n");
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
- if(ieee->ps_is_queue_empty(ieee->dev)){
-
-
+ if (ieee->ps_is_queue_empty(ieee->dev)) {
ieee->sta_sleep = 2;
ieee->ps_request_tx_ack(ieee->dev);
@@ -1820,11 +1728,8 @@ static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
ieee->ps_tl = tl;
}
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
-
}
-
-
- }else if(sleep == 2){
+ } else if (sleep == 2) {
//#warning CHECK_LOCK_HERE
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
@@ -1832,10 +1737,8 @@ static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
}
-
out:
spin_unlock_irqrestore(&ieee->lock, flags);
-
}
void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl)
@@ -1847,10 +1750,9 @@ void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl)
ieee80211_sta_ps_send_null_frame(ieee, 0);
}
return;
-
}
- if(ieee->sta_sleep == 1)
+ if (ieee->sta_sleep == 1)
ieee->sta_wake_up(ieee->dev);
ieee->sta_sleep = 0;
@@ -1867,19 +1769,17 @@ void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success)
spin_lock_irqsave(&ieee->lock, flags);
- if(ieee->sta_sleep == 2){
+ if (ieee->sta_sleep == 2) {
/* Null frame with PS bit set */
if (success) {
ieee->sta_sleep = 1;
- ieee->enter_sleep_state(ieee->dev,ieee->ps_th,ieee->ps_tl);
+ ieee->enter_sleep_state(ieee->dev, ieee->ps_th, ieee->ps_tl);
}
/* if the card report not success we can't be sure the AP
* has not RXed so we can't assume the AP believe us awake
*/
- }
- /* 21112005 - tx again null without PS bit if lost */
- else {
-
+ } else {
+ /* 21112005 - tx again null without PS bit if lost */
if ((ieee->sta_sleep == 0) && !success) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
ieee80211_sta_ps_send_null_frame(ieee, 0);
@@ -1897,13 +1797,12 @@ static void ieee80211_process_action(struct ieee80211_device *ieee,
u8 *act = ieee80211_get_payload(header);
u8 tmp = 0;
// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
- if (act == NULL)
- {
+ if (act == NULL) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "error to get payload of action frame\n");
return;
}
tmp = *act;
- act ++;
+ act++;
switch (tmp) {
case ACT_CAT_BA:
if (*act == ACT_ADDBAREQ)
@@ -1917,7 +1816,6 @@ static void ieee80211_process_action(struct ieee80211_device *ieee,
break;
}
return;
-
}
static void ieee80211_check_auth_response(struct ieee80211_device *ieee,
@@ -1965,9 +1863,9 @@ static void ieee80211_check_auth_response(struct ieee80211_device *ieee,
bHalfSupportNmode) {
netdev_dbg(ieee->dev, "enter half N mode\n");
ieee->bHalfWirelessN24GMode = true;
- } else
+ } else {
ieee->bHalfWirelessN24GMode = false;
-
+ }
ieee80211_associate_step2(ieee);
} else {
ieee80211_auth_challenge(ieee, challenge, chlen);
@@ -1981,44 +1879,41 @@ static void ieee80211_check_auth_response(struct ieee80211_device *ieee,
inline int
ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats, u16 type,
- u16 stype)
+ struct ieee80211_rx_stats *rx_stats, u16 type,
+ u16 stype)
{
- struct rtl_80211_hdr_3addr *header = (struct rtl_80211_hdr_3addr *) skb->data;
+ struct rtl_80211_hdr_3addr *header = (struct rtl_80211_hdr_3addr *)skb->data;
u16 errcode;
int aid;
struct ieee80211_assoc_response_frame *assoc_resp;
// struct ieee80211_info_element *info_element;
- if(!ieee->proto_started)
+ if (!ieee->proto_started)
return 0;
- if(ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED &&
- ieee->iw_mode == IW_MODE_INFRA &&
- ieee->state == IEEE80211_LINKED))
-
+ if (ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED &&
+ ieee->iw_mode == IW_MODE_INFRA &&
+ ieee->state == IEEE80211_LINKED))
tasklet_schedule(&ieee->ps_task);
- if(WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_PROBE_RESP &&
- WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_BEACON)
+ if (WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_PROBE_RESP &&
+ WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_BEACON)
ieee->last_rx_ps_time = jiffies;
switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
-
case IEEE80211_STYPE_ASSOC_RESP:
case IEEE80211_STYPE_REASSOC_RESP:
-
IEEE80211_DEBUG_MGMT("received [RE]ASSOCIATION RESPONSE (%d)\n",
WLAN_FC_GET_STYPE(header->frame_ctl));
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED &&
- ieee->iw_mode == IW_MODE_INFRA){
+ ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED &&
+ ieee->iw_mode == IW_MODE_INFRA) {
struct ieee80211_network network_resp;
struct ieee80211_network *network = &network_resp;
errcode = assoc_parse(ieee, skb, &aid);
if (!errcode) {
- ieee->state=IEEE80211_LINKED;
+ ieee->state = IEEE80211_LINKED;
ieee->assoc_id = aid;
ieee->softmac_stats.rx_ass_ok++;
/* station support qos */
@@ -2026,13 +1921,12 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
if (ieee->qos_support) {
assoc_resp = (struct ieee80211_assoc_response_frame *)skb->data;
memset(network, 0, sizeof(*network));
- if (ieee80211_parse_info_param(ieee,assoc_resp->info_element,\
- rx_stats->len - sizeof(*assoc_resp),\
- network,rx_stats)){
+ if (ieee80211_parse_info_param(ieee, assoc_resp->info_element,\
+ rx_stats->len - sizeof(*assoc_resp), \
+ network, rx_stats)) {
return 1;
- }
- else
- { //filling the PeerHTCap. //maybe not necessary as we can get its info from current_network.
+ } else {
+ //filling the PeerHTCap. //maybe not necessary as we can get its info from current_network.
memcpy(ieee->pHTInfo->PeerHTCapBuf, network->bssht.bdHTCapBuf, network->bssht.bdHTCapLen);
memcpy(ieee->pHTInfo->PeerHTInfoBuf, network->bssht.bdHTInfoBuf, network->bssht.bdHTInfoLen);
}
@@ -2043,36 +1937,29 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
} else {
/* aid could not been allocated */
ieee->softmac_stats.rx_ass_err++;
- printk(
- "Association response status code 0x%x\n",
- errcode);
- IEEE80211_DEBUG_MGMT(
- "Association response status code 0x%x\n",
- errcode);
- if(ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT) {
+ printk("Association response status code 0x%x\n",
+ errcode);
+ IEEE80211_DEBUG_MGMT("Association response status code 0x%x\n",
+ errcode);
+ if (ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT)
schedule_work(&ieee->associate_procedure_wq);
- } else {
+ else
ieee80211_associate_abort(ieee);
- }
}
}
break;
case IEEE80211_STYPE_ASSOC_REQ:
case IEEE80211_STYPE_REASSOC_REQ:
-
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->iw_mode == IW_MODE_MASTER)
-
+ ieee->iw_mode == IW_MODE_MASTER)
ieee80211_rx_assoc_rq(ieee, skb);
break;
case IEEE80211_STYPE_AUTH:
-
if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) {
if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING
- && ieee->iw_mode == IW_MODE_INFRA) {
-
+ && ieee->iw_mode == IW_MODE_INFRA) {
IEEE80211_DEBUG_MGMT("Received auth response");
ieee80211_check_auth_response(ieee, skb);
} else if (ieee->iw_mode == IW_MODE_MASTER) {
@@ -2082,11 +1969,10 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
break;
case IEEE80211_STYPE_PROBE_REQ:
-
if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
- ((ieee->iw_mode == IW_MODE_ADHOC ||
- ieee->iw_mode == IW_MODE_MASTER) &&
- ieee->state == IEEE80211_LINKED)){
+ ((ieee->iw_mode == IW_MODE_ADHOC ||
+ ieee->iw_mode == IW_MODE_MASTER) &&
+ ieee->state == IEEE80211_LINKED)) {
ieee80211_rx_probe_rq(ieee, skb);
}
break;
@@ -2097,9 +1983,8 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
* both for disassociation and deauthentication
*/
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->state == IEEE80211_LINKED &&
- ieee->iw_mode == IW_MODE_INFRA){
-
+ ieee->state == IEEE80211_LINKED &&
+ ieee->iw_mode == IW_MODE_INFRA) {
ieee->state = IEEE80211_ASSOCIATING;
ieee->softmac_stats.reassoc++;
@@ -2140,7 +2025,6 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
*/
void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee)
{
-
unsigned int queue_index = txb->queue_index;
unsigned long flags;
int i;
@@ -2155,18 +2039,18 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *
ieee->stats.tx_bytes += le16_to_cpu(txb->payload_size);
ieee->stats.tx_packets++;
tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
- if (tcb_desc->bMulticast) {
+ if (tcb_desc->bMulticast)
ieee->stats.multicast++;
- }
+
/* if xmit available, just xmit it immediately, else just insert it to the wait queue */
- for(i = 0; i < txb->nr_frags; i++) {
+ for (i = 0; i < txb->nr_frags; i++) {
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
if ((skb_queue_len(&ieee->skb_drv_aggQ[queue_index]) != 0) ||
#else
if ((skb_queue_len(&ieee->skb_waitQ[queue_index]) != 0) ||
#endif
- (!ieee->check_nic_enough_desc(ieee->dev,queue_index))||\
- (ieee->queue_stop)) {
+ (!ieee->check_nic_enough_desc(ieee->dev, queue_index)) || \
+ (ieee->queue_stop)) {
/* insert the skb packet to the wait queue */
/* as for the completion function, it does not need
* to check it any more.
@@ -2178,10 +2062,9 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *
#else
skb_queue_tail(&ieee->skb_waitQ[queue_index], txb->fragments[i]);
#endif
- }else{
- ieee->softmac_data_hard_start_xmit(
- txb->fragments[i],
- ieee->dev, ieee->rate);
+ } else {
+ ieee->softmac_data_hard_start_xmit(txb->fragments[i],
+ ieee->dev, ieee->rate);
//ieee->stats.tx_packets++;
//ieee->stats.tx_bytes += txb->fragments[i]->len;
//ieee->dev->trans_start = jiffies;
@@ -2191,7 +2074,6 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *
//exit:
spin_unlock_irqrestore(&ieee->lock, flags);
-
}
EXPORT_SYMBOL(ieee80211_softmac_xmit);
@@ -2199,28 +2081,23 @@ EXPORT_SYMBOL(ieee80211_softmac_xmit);
static void ieee80211_resume_tx(struct ieee80211_device *ieee)
{
int i;
- for(i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
-
- if (ieee->queue_stop){
+ for (i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
+ if (ieee->queue_stop) {
ieee->tx_pending.frag = i;
return;
- }else{
-
- ieee->softmac_data_hard_start_xmit(
- ieee->tx_pending.txb->fragments[i],
- ieee->dev, ieee->rate);
- //(i+1)<ieee->tx_pending.txb->nr_frags);
+ } else {
+ ieee->softmac_data_hard_start_xmit(ieee->tx_pending.txb->fragments[i],
+ ieee->dev, ieee->rate);
+ //(i+1)<ieee->tx_pending.txb->nr_frags);
ieee->stats.tx_packets++;
netif_trans_update(ieee->dev);
}
}
-
ieee80211_txb_free(ieee->tx_pending.txb);
ieee->tx_pending.txb = NULL;
}
-
void ieee80211_reset_queue(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -2233,26 +2110,24 @@ void ieee80211_reset_queue(struct ieee80211_device *ieee)
}
ieee->queue_stop = 0;
spin_unlock_irqrestore(&ieee->lock, flags);
-
}
EXPORT_SYMBOL(ieee80211_reset_queue);
void ieee80211_wake_queue(struct ieee80211_device *ieee)
{
-
unsigned long flags;
struct sk_buff *skb;
struct rtl_80211_hdr_3addr *header;
spin_lock_irqsave(&ieee->lock, flags);
- if (! ieee->queue_stop) goto exit;
+ if (!ieee->queue_stop)
+ goto exit;
ieee->queue_stop = 0;
if (ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) {
- while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))){
-
- header = (struct rtl_80211_hdr_3addr *) skb->data;
+ while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))) {
+ header = (struct rtl_80211_hdr_3addr *)skb->data;
header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
@@ -2261,7 +2136,7 @@ void ieee80211_wake_queue(struct ieee80211_device *ieee)
else
ieee->seq_ctrl[0]++;
- ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
+ ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
//dev_kfree_skb_any(skb);//edit by thomas
}
}
@@ -2272,8 +2147,7 @@ void ieee80211_wake_queue(struct ieee80211_device *ieee)
ieee->softmac_stats.swtxawake++;
netif_wake_queue(ieee->dev);
}
-
-exit :
+exit:
spin_unlock_irqrestore(&ieee->lock, flags);
}
EXPORT_SYMBOL(ieee80211_wake_queue);
@@ -2289,7 +2163,6 @@ void ieee80211_stop_queue(struct ieee80211_device *ieee)
}
ieee->queue_stop = 1;
//spin_unlock_irqrestore(&ieee->lock,flags);
-
}
EXPORT_SYMBOL(ieee80211_stop_queue);
@@ -2323,7 +2196,6 @@ void ieee80211_start_master_bss(struct ieee80211_device *ieee)
static void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
{
if (ieee->raw_tx) {
-
if (ieee->data_hard_resume)
ieee->data_hard_resume(ieee->dev);
@@ -2332,7 +2204,6 @@ static void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
}
static void ieee80211_start_ibss_wq(struct work_struct *work)
{
-
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
/* iwconfig mode ad-hoc will schedule this and return
@@ -2357,7 +2228,6 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
/* check if we have this cell in our network list */
ieee80211_softmac_check_all_nets(ieee);
-
// if((IS_DOT11D_ENABLE(ieee)) && (ieee->state == IEEE80211_NOLINK))
if (ieee->state == IEEE80211_NOLINK)
ieee->current_network.channel = 6;
@@ -2381,22 +2251,20 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
/* the network definitively is not here.. create a new cell */
if (ieee->state == IEEE80211_NOLINK) {
printk("creating new IBSS cell\n");
- if(!ieee->wap_set)
- random_ether_addr(ieee->current_network.bssid);
-
- if(ieee->modulation & IEEE80211_CCK_MODULATION){
+ if (!ieee->wap_set)
+ eth_random_addr(ieee->current_network.bssid);
+ if (ieee->modulation & IEEE80211_CCK_MODULATION) {
ieee->current_network.rates_len = 4;
ieee->current_network.rates[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
ieee->current_network.rates[1] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
ieee->current_network.rates[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
ieee->current_network.rates[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
-
- }else
+ } else {
ieee->current_network.rates_len = 0;
-
- if(ieee->modulation & IEEE80211_OFDM_MODULATION){
+ }
+ if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
ieee->current_network.rates_ex_len = 8;
ieee->current_network.rates_ex[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
@@ -2409,7 +2277,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
ieee->current_network.rates_ex[7] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
ieee->rate = 108;
- }else{
+ } else {
ieee->current_network.rates_ex_len = 0;
ieee->rate = 22;
}
@@ -2419,9 +2287,8 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
ieee->SetWirelessMode(ieee->dev, IEEE_G);
ieee->current_network.atim_window = 0;
ieee->current_network.capability = WLAN_CAPABILITY_IBSS;
- if(ieee->short_slot)
+ if (ieee->short_slot)
ieee->current_network.capability |= WLAN_CAPABILITY_SHORT_SLOT;
-
}
ieee->state = IEEE80211_LINKED;
@@ -2453,12 +2320,9 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
// Ref: 802.11d 11.1.3.3
// STA shall not start a BSS unless properly formed Beacon frame including a Country IE.
//
- if (IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee))
- {
- if (! ieee->bGlobalDomain)
- {
+ if (IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee)) {
+ if (!ieee->bGlobalDomain)
return;
- }
}
/* check if we have already found the net we
* are interested in (if any).
@@ -2486,22 +2350,19 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
/* called only in userspace context */
void ieee80211_disassociate(struct ieee80211_device *ieee)
{
-
-
netif_carrier_off(ieee->dev);
if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)
- ieee80211_reset_queue(ieee);
+ ieee80211_reset_queue(ieee);
if (ieee->data_hard_stop)
- ieee->data_hard_stop(ieee->dev);
- if(IS_DOT11D_ENABLE(ieee))
+ ieee->data_hard_stop(ieee->dev);
+ if (IS_DOT11D_ENABLE(ieee))
Dot11d_Reset(ieee);
ieee->state = IEEE80211_NOLINK;
ieee->is_set_key = false;
ieee->link_change(ieee->dev);
//HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
notify_wx_assoc_event(ieee);
-
}
EXPORT_SYMBOL(ieee80211_disassociate);
@@ -2512,10 +2373,10 @@ static void ieee80211_associate_retry_wq(struct work_struct *work)
unsigned long flags;
mutex_lock(&ieee->wx_mutex);
- if(!ieee->proto_started)
+ if (!ieee->proto_started)
goto exit;
- if(ieee->state != IEEE80211_ASSOCIATING_RETRY)
+ if (ieee->state != IEEE80211_ASSOCIATING_RETRY)
goto exit;
/* until we do not set the state to IEEE80211_NOLINK
@@ -2537,7 +2398,7 @@ static void ieee80211_associate_retry_wq(struct work_struct *work)
spin_lock_irqsave(&ieee->lock, flags);
- if(ieee->state == IEEE80211_NOLINK)
+ if (ieee->state == IEEE80211_NOLINK)
ieee80211_start_scan(ieee);
spin_unlock_irqrestore(&ieee->lock, flags);
@@ -2558,11 +2419,10 @@ struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee)
if (!skb)
return NULL;
- b = (struct ieee80211_probe_response *) skb->data;
+ b = (struct ieee80211_probe_response *)skb->data;
b->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_BEACON);
return skb;
-
}
struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee)
@@ -2571,10 +2431,10 @@ struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee)
struct ieee80211_probe_response *b;
skb = ieee80211_get_beacon_(ieee);
- if(!skb)
+ if (!skb)
return NULL;
- b = (struct ieee80211_probe_response *) skb->data;
+ b = (struct ieee80211_probe_response *)skb->data;
b->header.seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -2632,11 +2492,11 @@ void ieee80211_start_protocol(struct ieee80211_device *ieee)
ieee->proto_started = 1;
if (ieee->current_network.channel == 0) {
- do{
+ do {
ch++;
if (ch > MAX_CHANNEL_NUMBER)
return; /* no channel found */
- }while(!GET_DOT11D_INFO(ieee)->channel_map[ch]);
+ } while (!GET_DOT11D_INFO(ieee)->channel_map[ch]);
ieee->current_network.channel = ch;
}
@@ -2645,15 +2505,14 @@ void ieee80211_start_protocol(struct ieee80211_device *ieee)
// printk("===>%s(), chan:%d\n", __func__, ieee->current_network.channel);
// ieee->set_chan(ieee->dev,ieee->current_network.channel);
- for(i = 0; i < 17; i++) {
- ieee->last_rxseq_num[i] = -1;
- ieee->last_rxfrag_num[i] = -1;
- ieee->last_packet_time[i] = 0;
+ for (i = 0; i < 17; i++) {
+ ieee->last_rxseq_num[i] = -1;
+ ieee->last_rxfrag_num[i] = -1;
+ ieee->last_packet_time[i] = 0;
}
ieee->init_wmmparam_flag = 0;//reinitialize AC_xx_PARAM registers.
-
/* if the user set the MAC of the ad-hoc cell and then
* switch to managed mode, shall we make sure that association
* attempts does not fail just because the user provide the essid
@@ -2668,11 +2527,10 @@ void ieee80211_start_protocol(struct ieee80211_device *ieee)
else if (ieee->iw_mode == IW_MODE_MASTER)
ieee80211_start_master_bss(ieee);
- else if(ieee->iw_mode == IW_MODE_MONITOR)
+ else if (ieee->iw_mode == IW_MODE_MONITOR)
ieee80211_start_monitor_mode(ieee);
}
-
#define DRV_NAME "Ieee80211"
void ieee80211_softmac_init(struct ieee80211_device *ieee)
{
@@ -2681,16 +2539,16 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->state = IEEE80211_NOLINK;
ieee->sync_scan_hurryup = 0;
- for(i = 0; i < 5; i++) {
- ieee->seq_ctrl[i] = 0;
- }
- ieee->pDot11dInfo = kzalloc(sizeof(RT_DOT11D_INFO), GFP_KERNEL);
+ for (i = 0; i < 5; i++)
+ ieee->seq_ctrl[i] = 0;
+
+ ieee->pDot11dInfo = kzalloc(sizeof(struct rt_dot11d_info), GFP_KERNEL);
if (!ieee->pDot11dInfo)
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for DOT11D\n");
//added for AP roaming
ieee->LinkDetectInfo.SlotNum = 2;
- ieee->LinkDetectInfo.NumRecvBcnInPeriod=0;
- ieee->LinkDetectInfo.NumRecvDataInPeriod=0;
+ ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0;
+ ieee->LinkDetectInfo.NumRecvDataInPeriod = 0;
ieee->assoc_id = 0;
ieee->queue_stop = 0;
@@ -2703,9 +2561,9 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->rate = 22;
ieee->ps = IEEE80211_PS_DISABLED;
ieee->sta_sleep = 0;
- ieee->Regdot11HTOperationalRateSet[0]= 0xff;//support MCS 0~7
- ieee->Regdot11HTOperationalRateSet[1]= 0xff;//support MCS 8~15
- ieee->Regdot11HTOperationalRateSet[4]= 0x01;
+ ieee->Regdot11HTOperationalRateSet[0] = 0xff;//support MCS 0~7
+ ieee->Regdot11HTOperationalRateSet[1] = 0xff;//support MCS 8~15
+ ieee->Regdot11HTOperationalRateSet[4] = 0x01;
//added by amy
ieee->actscanning = false;
ieee->beinretry = false;
@@ -2724,7 +2582,6 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
timer_setup(&ieee->beacon_timer, ieee80211_send_beacon_cb, 0);
-
INIT_DELAYED_WORK(&ieee->start_ibss_wq, ieee80211_start_ibss_wq);
INIT_WORK(&ieee->associate_complete_wq, ieee80211_associate_complete_wq);
INIT_WORK(&ieee->associate_procedure_wq, ieee80211_associate_procedure_wq);
@@ -2732,7 +2589,6 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
INIT_DELAYED_WORK(&ieee->associate_retry_wq, ieee80211_associate_retry_wq);
INIT_WORK(&ieee->wx_sync_scan_wq, ieee80211_wx_sync_scan_wq);
-
mutex_init(&ieee->wx_mutex);
mutex_init(&ieee->scan_mutex);
@@ -2740,9 +2596,8 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
spin_lock_init(&ieee->beacon_lock);
tasklet_init(&ieee->ps_task,
- (void(*)(unsigned long)) ieee80211_sta_ps,
- (unsigned long)ieee);
-
+ (void(*)(unsigned long)) ieee80211_sta_ps,
+ (unsigned long)ieee);
}
void ieee80211_softmac_free(struct ieee80211_device *ieee)
@@ -2761,8 +2616,6 @@ void ieee80211_softmac_free(struct ieee80211_device *ieee)
* Start of WPA code. *
* this is stolen from the ipw2200 driver *
********************************************************/
-
-
static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value)
{
/* This is called when wpa_supplicant loads and closes the driver
@@ -2772,7 +2625,6 @@ static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value)
return 0;
}
-
static void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee,
char *wpa_ie, int wpa_ie_len)
{
@@ -2782,10 +2634,8 @@ static void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee,
ieee80211_disassociate(ieee);
}
-
static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command, int reason)
{
-
int ret = 0;
switch (command) {
@@ -2805,7 +2655,6 @@ static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command, int re
return ret;
}
-
static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
struct ieee_param *param, int plen)
{
@@ -2839,7 +2688,6 @@ static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value)
{
-
struct ieee80211_security sec = {
.flags = SEC_AUTH_MODE,
};
@@ -2848,18 +2696,16 @@ static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value)
sec.auth_mode = WLAN_AUTH_SHARED_KEY;
ieee->open_wep = 0;
ieee->auth_mode = 1;
- } else if (value & AUTH_ALG_OPEN_SYSTEM){
+ } else if (value & AUTH_ALG_OPEN_SYSTEM) {
sec.auth_mode = WLAN_AUTH_OPEN;
ieee->open_wep = 1;
ieee->auth_mode = 0;
- }
- else if (value & IW_AUTH_ALG_LEAP){
+ } else if (value & IW_AUTH_ALG_LEAP) {
sec.auth_mode = WLAN_AUTH_LEAP;
ieee->open_wep = 1;
ieee->auth_mode = 2;
}
-
if (ieee->set_security)
ieee->set_security(ieee->dev, &sec);
//else
@@ -2905,8 +2751,7 @@ static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name, u32 v
if (!value) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_0;
- }
- else {
+ } else {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
}
@@ -2943,7 +2788,6 @@ static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name, u32 v
}
/* implementation borrowed from hostap driver */
-
static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
struct ieee_param *param, int param_len)
{
@@ -2961,7 +2805,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
if (param_len !=
- (int) ((char *) param->u.crypt.key - (char *) param) +
+ (int)((char *)param->u.crypt.key - (char *)param) +
param->u.crypt.key_len) {
printk("Len mismatch %d, %d\n", param_len,
param->u.crypt.key_len);
@@ -3053,9 +2897,9 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
ieee->tx_keyidx = param->u.crypt.idx;
sec.active_key = param->u.crypt.idx;
sec.flags |= SEC_ACTIVE_KEY;
- } else
+ } else {
sec.flags &= ~SEC_ACTIVE_KEY;
-
+ }
memcpy(sec.keys[param->u.crypt.idx],
param->u.crypt.key,
param->u.crypt.key_len);
@@ -3093,10 +2937,9 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
return ret;
}
-static inline struct sk_buff *ieee80211_disassociate_skb(
- struct ieee80211_network *beacon,
- struct ieee80211_device *ieee,
- u8 asRsn)
+static inline struct sk_buff *ieee80211_disassociate_skb(struct ieee80211_network *beacon,
+ struct ieee80211_device *ieee,
+ u8 asRsn)
{
struct sk_buff *skb;
struct ieee80211_disassoc *disass;
@@ -3117,22 +2960,20 @@ static inline struct sk_buff *ieee80211_disassociate_skb(
return skb;
}
-
void
-SendDisassociation(
- struct ieee80211_device *ieee,
- u8 *asSta,
- u8 asRsn
+SendDisassociation(struct ieee80211_device *ieee,
+ u8 *asSta,
+ u8 asRsn
)
{
- struct ieee80211_network *beacon = &ieee->current_network;
- struct sk_buff *skb;
+ struct ieee80211_network *beacon = &ieee->current_network;
+ struct sk_buff *skb;
- skb = ieee80211_disassociate_skb(beacon, ieee, asRsn);
- if (skb) {
- softmac_mgmt_xmit(skb, ieee);
- //dev_kfree_skb_any(skb);//edit by thomas
- }
+ skb = ieee80211_disassociate_skb(beacon, ieee, asRsn);
+ if (skb) {
+ softmac_mgmt_xmit(skb, ieee);
+ //dev_kfree_skb_any(skb);//edit by thomas
+ }
}
EXPORT_SYMBOL(SendDisassociation);
@@ -3156,7 +2997,6 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
}
switch (param->cmd) {
-
case IEEE_CMD_SET_WPA_PARAM:
ret = ieee80211_wpa_set_param(ieee, param->u.wpa_param.name,
param->u.wpa_param.value);
@@ -3176,7 +3016,7 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
break;
default:
- printk("Unknown WPA supplicant request: %d\n",param->cmd);
+ printk("Unknown WPA supplicant request: %d\n", param->cmd);
ret = -EOPNOTSUPP;
break;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index 21bd0dc40888..81020fbcdc20 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -302,8 +302,8 @@ void ieee80211_wx_sync_scan_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wx_sync_scan_wq);
short chan;
- HT_EXTCHNL_OFFSET chan_offset = 0;
- HT_CHANNEL_WIDTH bandwidth = 0;
+ enum ht_extension_chan_offset chan_offset = 0;
+ enum ht_channel_width bandwidth = 0;
int b40M = 0;
chan = ieee->current_network.channel;
@@ -320,7 +320,7 @@ void ieee80211_wx_sync_scan_wq(struct work_struct *work)
if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT && ieee->pHTInfo->bCurBW40MHz) {
b40M = 1;
chan_offset = ieee->pHTInfo->CurSTAExtChnlOffset;
- bandwidth = (HT_CHANNEL_WIDTH)ieee->pHTInfo->bCurBW40MHz;
+ bandwidth = (enum ht_channel_width)ieee->pHTInfo->bCurBW40MHz;
printk("Scan in 40M, force to 20M first:%d, %d\n", chan_offset, bandwidth);
ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 9a1a84548bc6..cc4049de975d 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -306,7 +306,7 @@ static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
struct sk_buff *skb, struct cb_desc *tcb_desc)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- PTX_TS_RECORD pTxTs = NULL;
+ struct tx_ts_record *pTxTs = NULL;
struct rtl_80211_hdr_1addr *hdr = (struct rtl_80211_hdr_1addr *)skb->data;
if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
@@ -330,20 +330,20 @@ static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
}
if(pHTInfo->bCurrentAMPDUEnable)
{
- if (!GetTs(ieee, (PTS_COMMON_INFO *)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true))
+ if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true))
{
printk("===>can't get TS\n");
return;
}
- if (!pTxTs->TxAdmittedBARecord.bValid)
+ if (!pTxTs->tx_admitted_ba_record.bValid)
{
TsStartAddBaProcess(ieee, pTxTs);
goto FORCED_AGG_SETTING;
}
- else if (!pTxTs->bUsingBa)
+ else if (!pTxTs->using_ba)
{
- if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096))
- pTxTs->bUsingBa = true;
+ if (SN_LESS(pTxTs->tx_admitted_ba_record.BaStartSeqCtrl.field.SeqNum, (pTxTs->tx_cur_seq + 1) % 4096))
+ pTxTs->using_ba = true;
else
goto FORCED_AGG_SETTING;
}
@@ -584,12 +584,12 @@ static void ieee80211_query_seqnum(struct ieee80211_device *ieee,
return;
if (IsQoSDataFrame(skb->data)) //we deal qos data only
{
- PTX_TS_RECORD pTS = NULL;
- if (!GetTs(ieee, (PTS_COMMON_INFO *)(&pTS), dst, skb->priority, TX_DIR, true))
+ struct tx_ts_record *pTS = NULL;
+ if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, skb->priority, TX_DIR, true))
{
return;
}
- pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
+ pTS->tx_cur_seq = (pTS->tx_cur_seq + 1) % 4096;
}
}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 86c73570e88a..01b631c2a180 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -19,7 +19,7 @@
static void ActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA, u16 Time)
{
pBA->bValid = true;
- if(Time != 0)
+ if (Time != 0)
mod_timer(&pBA->Timer, jiffies + msecs_to_jiffies(Time));
}
@@ -36,14 +36,14 @@ static void DeActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA)
/********************************************************************************************************************
*function: deactivete BA entry in Tx Ts, and send DELBA.
* input:
- * PTX_TS_RECORD pTxTs //Tx Ts which is to deactivate BA entry.
+ * struct tx_ts_record *pTxTs //Tx Ts which is to deactivate BA entry.
* output: none
- * notice: As PTX_TS_RECORD structure will be defined in QOS, so wait to be merged. //FIXME
+ * notice: As struct tx_ts_record * structure will be defined in QOS, so wait to be merged. //FIXME
********************************************************************************************************************/
-static u8 TxTsDeleteBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTs)
+static u8 TxTsDeleteBA(struct ieee80211_device *ieee, struct tx_ts_record *pTxTs)
{
- PBA_RECORD pAdmittedBa = &pTxTs->TxAdmittedBARecord; //These two BA entries must exist in TS structure
- PBA_RECORD pPendingBa = &pTxTs->TxPendingBARecord;
+ PBA_RECORD pAdmittedBa = &pTxTs->tx_admitted_ba_record; //These two BA entries must exist in TS structure
+ PBA_RECORD pPendingBa = &pTxTs->tx_pending_ba_record;
u8 bSendDELBA = false;
// Delete pending BA
@@ -64,13 +64,13 @@ static u8 TxTsDeleteBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTs)
/********************************************************************************************************************
*function: deactivete BA entry in Tx Ts, and send DELBA.
* input:
- * PRX_TS_RECORD pRxTs //Rx Ts which is to deactivate BA entry.
+ * struct rx_ts_record *pRxTs //Rx Ts which is to deactivate BA entry.
* output: none
- * notice: As PRX_TS_RECORD structure will be defined in QOS, so wait to be merged. //FIXME, same with above
+ * notice: As struct rx_ts_record * structure will be defined in QOS, so wait to be merged. //FIXME, same with above
********************************************************************************************************************/
-static u8 RxTsDeleteBA(struct ieee80211_device *ieee, PRX_TS_RECORD pRxTs)
+static u8 RxTsDeleteBA(struct ieee80211_device *ieee, struct rx_ts_record *pRxTs)
{
- PBA_RECORD pBa = &pRxTs->RxAdmittedBARecord;
+ PBA_RECORD pBa = &pRxTs->rx_admitted_ba_record;
u8 bSendDELBA = false;
if (pBa->bValid) {
@@ -117,13 +117,13 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, P
IEEE80211_DEBUG(IEEE80211_DL_ERR, "pBA is NULL\n");
return NULL;
}
- skb = dev_alloc_skb(len + sizeof( struct rtl_80211_hdr_3addr)); //need to add something others? FIXME
+ skb = dev_alloc_skb(len + sizeof(struct rtl_80211_hdr_3addr)); //need to add something others? FIXME
if (!skb) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc skb for ADDBA_REQ\n");
return NULL;
}
- memset(skb->data, 0, sizeof( struct rtl_80211_hdr_3addr)); //I wonder whether it's necessary. Apparently kernel will not do it when alloc a skb.
+ memset(skb->data, 0, sizeof(struct rtl_80211_hdr_3addr)); //I wonder whether it's necessary. Apparently kernel will not do it when alloc a skb.
skb_reserve(skb, ieee->tx_headroom);
BAReq = skb_put(skb, sizeof(struct rtl_80211_hdr_3addr));
@@ -137,10 +137,10 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, P
//tag += sizeof( struct rtl_80211_hdr_3addr); //move to action field
tag = skb_put(skb, 9);
- *tag ++= ACT_CAT_BA;
- *tag ++= type;
+ *tag++ = ACT_CAT_BA;
+ *tag++ = type;
// Dialog Token
- *tag ++= pBA->DialogToken;
+ *tag++ = pBA->DialogToken;
if (ACT_ADDBARSP == type) {
// Status Code
@@ -174,7 +174,7 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, P
*function: construct DELBA frame
* input: u8* dst //DELBA frame's destination
* PBA_RECORD pBA //BA_RECORD entry which stores the necessary information for BA
- * TR_SELECT TxRxSelect //TX RX direction
+ * enum tr_select TxRxSelect //TX RX direction
* u16 ReasonCode //status code.
* output: none
* return: sk_buff* skb //return constructed skb to xmit
@@ -183,7 +183,7 @@ static struct sk_buff *ieee80211_DELBA(
struct ieee80211_device *ieee,
u8 *dst,
PBA_RECORD pBA,
- TR_SELECT TxRxSelect,
+ enum tr_select TxRxSelect,
u16 ReasonCode
)
{
@@ -201,10 +201,10 @@ static struct sk_buff *ieee80211_DELBA(
memset(&DelbaParamSet, 0, 2);
- DelbaParamSet.field.Initiator = (TxRxSelect==TX_DIR)?1:0;
+ DelbaParamSet.field.Initiator = (TxRxSelect == TX_DIR) ? 1 : 0;
DelbaParamSet.field.TID = pBA->BaParamSet.field.TID;
- skb = dev_alloc_skb(len + sizeof( struct rtl_80211_hdr_3addr)); //need to add something others? FIXME
+ skb = dev_alloc_skb(len + sizeof(struct rtl_80211_hdr_3addr)); //need to add something others? FIXME
if (!skb) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc skb for ADDBA_REQ\n");
return NULL;
@@ -221,8 +221,8 @@ static struct sk_buff *ieee80211_DELBA(
tag = skb_put(skb, 6);
- *tag ++= ACT_CAT_BA;
- *tag ++= ACT_DELBA;
+ *tag++ = ACT_CAT_BA;
+ *tag++ = ACT_DELBA;
// DELBA Parameter Set
@@ -258,8 +258,7 @@ static void ieee80211_send_ADDBAReq(struct ieee80211_device *ieee,
//add statistic needed here.
//and skb will be freed in softmac_mgmt_xmit(), so omit all dev_kfree_skb_any() outside softmac_mgmt_xmit()
//WB
- }
- else {
+ } else {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "alloc skb error in function %s()\n", __func__);
}
}
@@ -280,8 +279,7 @@ static void ieee80211_send_ADDBARsp(struct ieee80211_device *ieee, u8 *dst,
if (skb) {
softmac_mgmt_xmit(skb, ieee);
//same above
- }
- else {
+ } else {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "alloc skb error in function %s()\n", __func__);
}
@@ -292,14 +290,14 @@ static void ieee80211_send_ADDBARsp(struct ieee80211_device *ieee, u8 *dst,
*function: send ADDBARSP frame out
* input: u8* dst //DELBA frame's destination
* PBA_RECORD pBA //BA_RECORD entry which stores the necessary information for BA
- * TR_SELECT TxRxSelect //TX or RX
+ * enum tr_select TxRxSelect //TX or RX
* u16 ReasonCode //DEL ReasonCode
* output: none
* notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
********************************************************************************************************************/
static void ieee80211_send_DELBA(struct ieee80211_device *ieee, u8 *dst,
- PBA_RECORD pBA, TR_SELECT TxRxSelect,
+ PBA_RECORD pBA, enum tr_select TxRxSelect,
u16 ReasonCode)
{
struct sk_buff *skb;
@@ -307,8 +305,7 @@ static void ieee80211_send_DELBA(struct ieee80211_device *ieee, u8 *dst,
if (skb) {
softmac_mgmt_xmit(skb, ieee);
//same above
- }
- else {
+ } else {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "alloc skb error in function %s()\n", __func__);
}
}
@@ -328,7 +325,7 @@ int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
PBA_PARAM_SET pBaParamSet = NULL;
u16 *pBaTimeoutVal = NULL;
PSEQUENCE_CONTROL pBaStartSeqCtrl = NULL;
- PRX_TS_RECORD pTS = NULL;
+ struct rx_ts_record *pTS = NULL;
if (skb->len < sizeof(struct rtl_80211_hdr_3addr) + 9) {
IEEE80211_DEBUG(IEEE80211_DL_ERR,
@@ -363,16 +360,16 @@ int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
// If there is no matched TS, reject the ADDBA request.
if (!GetTs(
ieee,
- (PTS_COMMON_INFO *)(&pTS),
+ (struct ts_common_info **)(&pTS),
dst,
(u8)(pBaParamSet->field.TID),
RX_DIR,
- true) ) {
+ true)) {
rc = ADDBA_STATUS_REFUSED;
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't get TS in %s()\n", __func__);
goto OnADDBAReq_Fail;
}
- pBA = &pTS->RxAdmittedBARecord;
+ pBA = &pTS->rx_admitted_ba_record;
// To Determine the ADDBA Req content
// We can do much more check here, including BufferSize, AMSDU_Support, Policy, StartSeqCtrl...
// I want to check StartSeqCtrl to make sure when we start aggregation!!!
@@ -423,7 +420,7 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
{
struct rtl_80211_hdr_3addr *rsp = NULL;
PBA_RECORD pPendingBA, pAdmittedBA;
- PTX_TS_RECORD pTS = NULL;
+ struct tx_ts_record *pTS = NULL;
u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
u16 *pStatusCode = NULL, *pBaTimeoutVal = NULL;
PBA_PARAM_SET pBaParamSet = NULL;
@@ -450,7 +447,7 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
if (ieee->current_network.qos_data.active == 0 ||
!ieee->pHTInfo->bCurrentHTSupport ||
!ieee->pHTInfo->bCurrentAMPDUEnable) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n",ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bCurrentAMPDUEnable);
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, "reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n", ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bCurrentAMPDUEnable);
ReasonCode = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
}
@@ -462,19 +459,19 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
//
if (!GetTs(
ieee,
- (PTS_COMMON_INFO *)(&pTS),
+ (struct ts_common_info **)(&pTS),
dst,
(u8)(pBaParamSet->field.TID),
TX_DIR,
- false) ) {
+ false)) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't get TS in %s()\n", __func__);
ReasonCode = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
}
- pTS->bAddBaReqInProgress = false;
- pPendingBA = &pTS->TxPendingBARecord;
- pAdmittedBA = &pTS->TxAdmittedBARecord;
+ pTS->add_ba_req_in_progress = false;
+ pPendingBA = &pTS->tx_pending_ba_record;
+ pAdmittedBA = &pTS->tx_admitted_ba_record;
//
@@ -485,19 +482,17 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
// Since BA is already setup, we ignore all other ADDBA Response.
IEEE80211_DEBUG(IEEE80211_DL_BA, "OnADDBARsp(): Recv ADDBA Rsp. Drop because already admit it! \n");
return -1;
- }
- else if((!pPendingBA->bValid) ||(*pDialogToken != pPendingBA->DialogToken)) {
+ } else if ((!pPendingBA->bValid) || (*pDialogToken != pPendingBA->DialogToken)) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "OnADDBARsp(): Recv ADDBA Rsp. BA invalid, DELBA! \n");
ReasonCode = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
- }
- else {
+ } else {
IEEE80211_DEBUG(IEEE80211_DL_BA, "OnADDBARsp(): Recv ADDBA Rsp. BA is admitted! Status code:%X\n", *pStatusCode);
DeActivateBAEntry(ieee, pPendingBA);
}
- if(*pStatusCode == ADDBA_STATUS_SUCCESS) {
+ if (*pStatusCode == ADDBA_STATUS_SUCCESS) {
//
// Determine ADDBA Rsp content here.
// We can compare the value of BA parameter set that Peer returned and Self sent.
@@ -505,7 +500,7 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
//
if (pBaParamSet->field.BAPolicy == BA_POLICY_DELAYED) {
// Since this is a kind of ADDBA failed, we delay next ADDBA process.
- pTS->bAddBaReqDelayed = true;
+ pTS->add_ba_req_delayed = true;
DeActivateBAEntry(ieee, pAdmittedBA);
ReasonCode = DELBA_REASON_END_BA;
goto OnADDBARsp_Reject;
@@ -521,10 +516,9 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
pAdmittedBA->BaParamSet = *pBaParamSet;
DeActivateBAEntry(ieee, pAdmittedBA);
ActivateBAEntry(ieee, pAdmittedBA, *pBaTimeoutVal);
- }
- else {
+ } else {
// Delay next ADDBA process.
- pTS->bAddBaReqDelayed = true;
+ pTS->add_ba_req_delayed = true;
}
// End of procedure
@@ -562,7 +556,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
if (ieee->current_network.qos_data.active == 0 ||
!ieee->pHTInfo->bCurrentHTSupport) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "received DELBA while QOS or HT is not supported(%d, %d)\n",ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport);
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, "received DELBA while QOS or HT is not supported(%d, %d)\n", ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport);
return -1;
}
@@ -571,41 +565,40 @@ int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
dst = &delba->addr2[0];
pDelBaParamSet = (PDELBA_PARAM_SET)&delba->payload[2];
- if(pDelBaParamSet->field.Initiator == 1) {
- PRX_TS_RECORD pRxTs;
+ if (pDelBaParamSet->field.Initiator == 1) {
+ struct rx_ts_record *pRxTs;
if (!GetTs(
ieee,
- (PTS_COMMON_INFO *)&pRxTs,
+ (struct ts_common_info **)&pRxTs,
dst,
(u8)pDelBaParamSet->field.TID,
RX_DIR,
- false) ) {
+ false)) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't get TS for RXTS in %s()\n", __func__);
return -1;
}
RxTsDeleteBA(ieee, pRxTs);
- }
- else {
- PTX_TS_RECORD pTxTs;
+ } else {
+ struct tx_ts_record *pTxTs;
if (!GetTs(
ieee,
- (PTS_COMMON_INFO *)&pTxTs,
+ (struct ts_common_info **)&pTxTs,
dst,
(u8)pDelBaParamSet->field.TID,
TX_DIR,
- false) ) {
+ false)) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't get TS for TXTS in %s()\n", __func__);
return -1;
}
- pTxTs->bUsingBa = false;
- pTxTs->bAddBaReqInProgress = false;
- pTxTs->bAddBaReqDelayed = false;
- del_timer_sync(&pTxTs->TsAddBaTimer);
- //PlatformCancelTimer(Adapter, &pTxTs->TsAddBaTimer);
+ pTxTs->using_ba = false;
+ pTxTs->add_ba_req_in_progress = false;
+ pTxTs->add_ba_req_delayed = false;
+ del_timer_sync(&pTxTs->ts_add_ba_timer);
+ //PlatformCancelTimer(Adapter, &pTxTs->ts_add_ba_timer);
TxTsDeleteBA(ieee, pTxTs);
}
return 0;
@@ -617,12 +610,12 @@ int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
void
TsInitAddBA(
struct ieee80211_device *ieee,
- PTX_TS_RECORD pTS,
+ struct tx_ts_record *pTS,
u8 Policy,
u8 bOverwritePending
)
{
- PBA_RECORD pBA = &pTS->TxPendingBARecord;
+ PBA_RECORD pBA = &pTS->tx_pending_ba_record;
if (pBA->bValid && !bOverwritePending)
return;
@@ -633,81 +626,79 @@ TsInitAddBA(
pBA->DialogToken++; // DialogToken: Only keep the latest dialog token
pBA->BaParamSet.field.AMSDU_Support = 0; // Do not support A-MSDU with A-MPDU now!!
pBA->BaParamSet.field.BAPolicy = Policy; // Policy: Delayed or Immediate
- pBA->BaParamSet.field.TID = pTS->TsCommonInfo.TSpec.f.TSInfo.field.ucTSID; // TID
+ pBA->BaParamSet.field.TID = pTS->ts_common_info.t_spec.ts_info.uc_tsid; // TID
// BufferSize: This need to be set according to A-MPDU vector
pBA->BaParamSet.field.BufferSize = 32; // BufferSize: This need to be set according to A-MPDU vector
pBA->BaTimeoutValue = 0; // Timeout value: Set 0 to disable Timer
- pBA->BaStartSeqCtrl.field.SeqNum = (pTS->TxCurSeq + 3) % 4096; // Block Ack will start after 3 packets later.
+ pBA->BaStartSeqCtrl.field.SeqNum = (pTS->tx_cur_seq + 3) % 4096; // Block Ack will start after 3 packets later.
ActivateBAEntry(ieee, pBA, BA_SETUP_TIMEOUT);
- ieee80211_send_ADDBAReq(ieee, pTS->TsCommonInfo.Addr, pBA);
+ ieee80211_send_ADDBAReq(ieee, pTS->ts_common_info.addr, pBA);
}
void
-TsInitDelBA( struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect)
+TsInitDelBA(struct ieee80211_device *ieee, struct ts_common_info *pTsCommonInfo, enum tr_select TxRxSelect)
{
+ if (TxRxSelect == TX_DIR) {
+ struct tx_ts_record *pTxTs = (struct tx_ts_record *)pTsCommonInfo;
- if(TxRxSelect == TX_DIR) {
- PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)pTsCommonInfo;
-
- if(TxTsDeleteBA(ieee, pTxTs))
+ if (TxTsDeleteBA(ieee, pTxTs))
ieee80211_send_DELBA(
ieee,
- pTsCommonInfo->Addr,
- (pTxTs->TxAdmittedBARecord.bValid)?(&pTxTs->TxAdmittedBARecord):(&pTxTs->TxPendingBARecord),
+ pTsCommonInfo->addr,
+ (pTxTs->tx_admitted_ba_record.bValid)?(&pTxTs->tx_admitted_ba_record):(&pTxTs->tx_pending_ba_record),
TxRxSelect,
DELBA_REASON_END_BA);
- }
- else if(TxRxSelect == RX_DIR) {
- PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)pTsCommonInfo;
- if(RxTsDeleteBA(ieee, pRxTs))
+ } else if (TxRxSelect == RX_DIR) {
+ struct rx_ts_record *pRxTs = (struct rx_ts_record *)pTsCommonInfo;
+ if (RxTsDeleteBA(ieee, pRxTs))
ieee80211_send_DELBA(
ieee,
- pTsCommonInfo->Addr,
- &pRxTs->RxAdmittedBARecord,
+ pTsCommonInfo->addr,
+ &pRxTs->rx_admitted_ba_record,
TxRxSelect,
- DELBA_REASON_END_BA );
+ DELBA_REASON_END_BA);
}
}
/********************************************************************************************************************
*function: BA setup timer
- * input: unsigned long data //acturally we send TX_TS_RECORD or RX_TS_RECORD to these timer
+ * input: unsigned long data //acturally we send struct tx_ts_record or struct rx_ts_record to these timer
* return: NULL
* notice:
********************************************************************************************************************/
void BaSetupTimeOut(struct timer_list *t)
{
- PTX_TS_RECORD pTxTs = from_timer(pTxTs, t, TxPendingBARecord.Timer);
+ struct tx_ts_record *pTxTs = from_timer(pTxTs, t, tx_pending_ba_record.Timer);
- pTxTs->bAddBaReqInProgress = false;
- pTxTs->bAddBaReqDelayed = true;
- pTxTs->TxPendingBARecord.bValid = false;
+ pTxTs->add_ba_req_in_progress = false;
+ pTxTs->add_ba_req_delayed = true;
+ pTxTs->tx_pending_ba_record.bValid = false;
}
void TxBaInactTimeout(struct timer_list *t)
{
- PTX_TS_RECORD pTxTs = from_timer(pTxTs, t, TxAdmittedBARecord.Timer);
+ struct tx_ts_record *pTxTs = from_timer(pTxTs, t, tx_admitted_ba_record.Timer);
struct ieee80211_device *ieee = container_of(pTxTs, struct ieee80211_device, TxTsRecord[pTxTs->num]);
TxTsDeleteBA(ieee, pTxTs);
ieee80211_send_DELBA(
ieee,
- pTxTs->TsCommonInfo.Addr,
- &pTxTs->TxAdmittedBARecord,
+ pTxTs->ts_common_info.addr,
+ &pTxTs->tx_admitted_ba_record,
TX_DIR,
DELBA_REASON_TIMEOUT);
}
void RxBaInactTimeout(struct timer_list *t)
{
- PRX_TS_RECORD pRxTs = from_timer(pRxTs, t, RxAdmittedBARecord.Timer);
+ struct rx_ts_record *pRxTs = from_timer(pRxTs, t, rx_admitted_ba_record.Timer);
struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
RxTsDeleteBA(ieee, pRxTs);
ieee80211_send_DELBA(
ieee,
- pRxTs->TsCommonInfo.Addr,
- &pRxTs->RxAdmittedBARecord,
+ pRxTs->ts_common_info.addr,
+ &pRxTs->rx_admitted_ba_record,
RX_DIR,
DELBA_REASON_TIMEOUT);
}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
index a85036022aa8..7d54a7cd9514 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
@@ -8,20 +8,9 @@
//------------------------------------------------------------
//
-// Operation mode value
-//
-#define HT_OPMODE_NO_PROTECT 0
-#define HT_OPMODE_OPTIONAL 1
-#define HT_OPMODE_40MHZ_PROTECT 2
-#define HT_OPMODE_MIXED 3
-
-//
// MIMO Power Save Settings
//
#define MIMO_PS_STATIC 0
-#define MIMO_PS_DYNAMIC 1
-#define MIMO_PS_NOLIMIT 3
-
//
// There should be 128 bits to cover all of the MCS rates. However, since
@@ -35,45 +24,24 @@
#define HT_SUPPORTED_MCS_2SS_BITMAP 0x0000ff00
#define HT_SUPPORTED_MCS_1SS_2SS_BITMAP HT_MCS_1SS_BITMAP|HT_MCS_1SS_2SS_BITMAP
-
-typedef enum _HT_MCS_RATE {
- HT_MCS0 = 0x00000001,
- HT_MCS1 = 0x00000002,
- HT_MCS2 = 0x00000004,
- HT_MCS3 = 0x00000008,
- HT_MCS4 = 0x00000010,
- HT_MCS5 = 0x00000020,
- HT_MCS6 = 0x00000040,
- HT_MCS7 = 0x00000080,
- HT_MCS8 = 0x00000100,
- HT_MCS9 = 0x00000200,
- HT_MCS10 = 0x00000400,
- HT_MCS11 = 0x00000800,
- HT_MCS12 = 0x00001000,
- HT_MCS13 = 0x00002000,
- HT_MCS14 = 0x00004000,
- HT_MCS15 = 0x00008000,
- // Do not define MCS32 here although 8190 support MCS32
-} HT_MCS_RATE, *PHT_MCS_RATE;
-
//
// Represent Channel Width in HT Capabilities
//
-typedef enum _HT_CHANNEL_WIDTH {
+enum ht_channel_width {
HT_CHANNEL_WIDTH_20 = 0,
HT_CHANNEL_WIDTH_20_40 = 1,
-}HT_CHANNEL_WIDTH, *PHT_CHANNEL_WIDTH;
+};
//
// Represent Extension Channel Offset in HT Capabilities
// This is available only in 40Mhz mode.
//
-typedef enum _HT_EXTCHNL_OFFSET {
+enum ht_extension_chan_offset {
HT_EXTCHNL_OFFSET_NO_EXT = 0,
HT_EXTCHNL_OFFSET_UPPER = 1,
HT_EXTCHNL_OFFSET_NO_DEF = 2,
HT_EXTCHNL_OFFSET_LOWER = 3,
-}HT_EXTCHNL_OFFSET, *PHT_EXTCHNL_OFFSET;
+};
typedef enum _CHNLOP {
CHNLOP_NONE = 0, // No Action now
@@ -120,28 +88,6 @@ typedef union _HT_CAPABILITY_MACPARA{
}HT_CAPABILITY_MACPARA, *PHT_CAPABILITY_MACPARA;
*/
-typedef enum _HT_ACTION {
- ACT_RECOMMAND_WIDTH = 0,
- ACT_MIMO_PWR_SAVE = 1,
- ACT_PSMP = 2,
- ACT_SET_PCO_PHASE = 3,
- ACT_MIMO_CHL_MEASURE = 4,
- ACT_RECIPROCITY_CORRECT = 5,
- ACT_MIMO_CSI_MATRICS = 6,
- ACT_MIMO_NOCOMPR_STEER = 7,
- ACT_MIMO_COMPR_STEER = 8,
- ACT_ANTENNA_SELECT = 9,
-} HT_ACTION, *PHT_ACTION;
-
-
-/* 2007/06/07 MH Define sub-carrier mode for 40MHZ. */
-typedef enum _HT_Bandwidth_40MHZ_Sub_Carrier {
- SC_MODE_DUPLICATE = 0,
- SC_MODE_LOWER = 1,
- SC_MODE_UPPER = 2,
- SC_MODE_FULL40MHZ = 3,
-}HT_BW40_SC_E;
-
typedef struct _HT_CAPABILITY_ELE {
//HT capability info
@@ -212,16 +158,6 @@ typedef struct _HT_INFORMATION_ELE {
u8 BasicMSC[16];
} __attribute__ ((packed)) HT_INFORMATION_ELE, *PHT_INFORMATION_ELE;
-//
-// MIMO Power Save control field.
-// This is appear in MIMO Power Save Action Frame
-//
-typedef struct _MIMOPS_CTRL {
- u8 MimoPsEnable:1;
- u8 MimoPsMode:1;
- u8 Reserved:6;
-} MIMOPS_CTRL, *PMIMOPS_CTRL;
-
typedef enum _HT_SPEC_VER {
HT_SPEC_VER_IEEE = 0,
HT_SPEC_VER_EWC = 1,
@@ -301,7 +237,7 @@ typedef struct _RT_HIGH_THROUGHPUT {
u8 PeerMimoPs;
// 40MHz Channel Offset settings.
- HT_EXTCHNL_OFFSET CurSTAExtChnlOffset;
+ enum ht_extension_chan_offset CurSTAExtChnlOffset;
u8 bCurTxBW40MHz; // If we use 40 MHz to Tx
u8 PeerBandwidth;
@@ -342,37 +278,6 @@ typedef struct _RT_HIGH_THROUGHPUT {
u32 IOTAction;
} __attribute__ ((packed)) RT_HIGH_THROUGHPUT, *PRT_HIGH_THROUGHPUT;
-
-//------------------------------------------------------------
-// The Data structure is used to keep HT related variable for "each Sta"
-// when card is configured as "AP mode"
-//------------------------------------------------------------
-
-typedef struct _RT_HTINFO_STA_ENTRY {
- u8 bEnableHT;
-
- u8 bSupportCck;
-
- u16 AMSDU_MaxSize;
-
- u8 AMPDU_Factor;
- u8 MPDU_Density;
-
- u8 HTHighestOperaRate;
-
- u8 bBw40MHz;
-
- u8 MimoPs;
-
- u8 McsRateSet[16];
-
-
-}RT_HTINFO_STA_ENTRY, *PRT_HTINFO_STA_ENTRY;
-
-
-
-
-
//------------------------------------------------------------
// The Data structure is used to keep HT related variable for "each AP"
// when card is configured as "STA mode"
@@ -396,28 +301,6 @@ typedef struct _BSS_HT {
u8 bdRT2RTLongSlotTime;
} __attribute__ ((packed)) BSS_HT, *PBSS_HT;
-typedef struct _MIMO_RSSI {
- u32 EnableAntenna;
- u32 AntennaA;
- u32 AntennaB;
- u32 AntennaC;
- u32 AntennaD;
- u32 Average;
-}MIMO_RSSI, *PMIMO_RSSI;
-
-typedef struct _MIMO_EVM {
- u32 EVM1;
- u32 EVM2;
-}MIMO_EVM, *PMIMO_EVM;
-
-typedef struct _FALSE_ALARM_STATISTICS {
- u32 Cnt_Parity_Fail;
- u32 Cnt_Rate_Illegal;
- u32 Cnt_Crc8_fail;
- u32 Cnt_all;
-}FALSE_ALARM_STATISTICS, *PFALSE_ALARM_STATISTICS;
-
-
extern u8 MCS_FILTER_ALL[16];
extern u8 MCS_FILTER_1SS[16];
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index 1b61a8de1edf..b948eae5909d 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
-//As this function is mainly ported from Windows driver, so leave the name little changed. If any confusion caused, tell me. Created by WB. 2008.05.08
+/*
+ * As this function is mainly ported from Windows driver, so leave the name
+ * little changed. If any confusion caused, tell me. Created by WB. 2008.05.08
+ */
#include "ieee80211.h"
-#include "rtl819x_HT.h"
+
u8 MCS_FILTER_ALL[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
u8 MCS_FILTER_1SS[16] = {0xff, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
@@ -41,17 +44,18 @@ static u8 EDIMAX_RALINK[3] = {0x00, 0x0e, 0x2e};
static u8 AIRLINK_RALINK[3] = {0x00, 0x18, 0x02};
//static u8 DLINK_ATHEROS[3] = {0x00, 0x1c, 0xf0};
static u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94};
-
-// 2008/04/01 MH For Cisco G mode RX TP We need to change FW duration. Should we put the
-// code in other place??
-//static u8 WIFI_CISCO_G_AP[3] = {0x00, 0x40, 0x96};
-/********************************************************************************************************************
+/*
+ * 2008/04/01 MH For Cisco G mode RX TP We need to change FW duration. Should we
+ * put the code in other place??
+ * static u8 WIFI_CISCO_G_AP[3] = {0x00, 0x40, 0x96};
+ */
+/*
*function: This function update default settings in pHTInfo structure
* input: PRT_HIGH_THROUGHPUT pHTInfo
* output: none
* return: none
* notice: These value need be modified if any changes.
- * *****************************************************************************************************************/
+ */
void HTUpdateDefaultSetting(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -93,8 +97,10 @@ void HTUpdateDefaultSetting(struct ieee80211_device *ieee)
// 8190 only. Assign duration operation mode to firmware
pMgntInfo->bTxEnableFwCalcDur = (BOOLEAN)pNdisCommon->bRegTxEnableFwCalcDur;
#endif
- // 8190 only, Realtek proprietary aggregation mode
- // Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others
+ /*
+ * 8190 only, Realtek proprietary aggregation mode
+ * Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others
+ */
pHTInfo->bRegRT2RTAggregation = 1;//0: Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others
// For Rx Reorder Control
@@ -111,20 +117,19 @@ void HTUpdateDefaultSetting(struct ieee80211_device *ieee)
pHTInfo->UsbRxFwAggrPacketNum = 8;
pHTInfo->UsbRxFwAggrTimeout = 16; ////usb rx FW aggregation timeout threshold.It's in units of 64us
#endif
-
-
}
-/********************************************************************************************************************
- *function: This function print out each field on HT capability IE mainly from (Beacon/ProbeRsp/AssocReq)
+
+/*
+ *function: This function print out each field on HT capability
+ * IE mainly from (Beacon/ProbeRsp/AssocReq)
* input: u8* CapIE //Capability IE to be printed out
* u8* TitleString //mainly print out caller function
* output: none
* return: none
* notice: Driver should not print out this message by default.
- * *****************************************************************************************************************/
+ */
void HTDebugHTCapability(u8 *CapIE, u8 *TitleString)
{
-
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
PHT_CAPABILITY_ELE pCapELE;
@@ -147,20 +152,19 @@ void HTDebugHTCapability(u8 *CapIE, u8 *TitleString)
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMPDU Density = %d\n", pCapELE->MPDUDensity);
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMCS Rate Set = [%x][%x][%x][%x][%x]\n", pCapELE->MCS[0],\
pCapELE->MCS[1], pCapELE->MCS[2], pCapELE->MCS[3], pCapELE->MCS[4]);
- return;
-
}
-/********************************************************************************************************************
- *function: This function print out each field on HT Information IE mainly from (Beacon/ProbeRsp)
+
+/*
+ *function: This function print out each field on HT Information
+ * IE mainly from (Beacon/ProbeRsp)
* input: u8* InfoIE //Capability IE to be printed out
* u8* TitleString //mainly print out caller function
* output: none
* return: none
* notice: Driver should not print out this message by default.
- * *****************************************************************************************************************/
+ */
void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString)
{
-
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
PHT_INFORMATION_ELE pHTInfoEle;
@@ -210,12 +214,11 @@ void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString)
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tBasic MCS Rate Set = [%x][%x][%x][%x][%x]\n", pHTInfoEle->BasicMSC[0],\
pHTInfoEle->BasicMSC[1], pHTInfoEle->BasicMSC[2], pHTInfoEle->BasicMSC[3], pHTInfoEle->BasicMSC[4]);
- return;
}
/*
-* Return: true if station in half n mode and AP supports 40 bw
-*/
+ * Return: true if station in half n mode and AP supports 40 bw
+ */
static bool IsHTHalfNmode40Bandwidth(struct ieee80211_device *ieee)
{
bool retValue = false;
@@ -261,17 +264,15 @@ static bool IsHTHalfNmodeSGI(struct ieee80211_device *ieee, bool is40MHz)
u16 HTHalfMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate)
{
-
u8 is40MHz;
u8 isShortGI;
is40MHz = (IsHTHalfNmode40Bandwidth(ieee)) ? 1 : 0;
isShortGI = (IsHTHalfNmodeSGI(ieee, is40MHz)) ? 1 : 0;
- return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate&0x7f)];
+ return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate & 0x7f)];
}
-
u16 HTMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -280,17 +281,17 @@ u16 HTMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate)
u8 isShortGI = (pHTInfo->bCurBW40MHz) ?
((pHTInfo->bCurShortGI40MHz) ? 1 : 0) :
((pHTInfo->bCurShortGI20MHz) ? 1 : 0);
- return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate&0x7f)];
+ return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate & 0x7f)];
}
-/********************************************************************************************************************
+/*
*function: This function returns current datarate.
* input: struct ieee80211_device* ieee
* u8 nDataRate
* output: none
* return: tx rate
* notice: quite unsure about how to use this function //wb
- * *****************************************************************************************************************/
+ */
u16 TxCountToDataRate(struct ieee80211_device *ieee, u8 nDataRate)
{
//PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -322,16 +323,15 @@ u16 TxCountToDataRate(struct ieee80211_device *ieee, u8 nDataRate)
//nDataRate = nDataRate - 60;
}
- return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate&0xf];
+ return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate & 0xf];
}
}
-
-
bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee)
{
bool retValue = false;
struct ieee80211_network *net = &ieee->current_network;
+
if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
(memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
(memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
@@ -353,32 +353,33 @@ bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee)
return retValue;
}
-/********************************************************************************************************************
+/*
*function: This function returns peer IOT.
* input: struct ieee80211_device* ieee
* output: none
* return:
* notice:
- * *****************************************************************************************************************/
+ */
static void HTIOTPeerDetermine(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
struct ieee80211_network *net = &ieee->current_network;
+
if (net->bssht.bdRT2RTAggregation)
pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK;
else if (net->broadcom_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
else if ((memcmp(net->bssid, UNKNOWN_BORADCOM, 3) == 0) ||
- (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) == 0) ||
- (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) == 0) ||
- (memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3) == 0))
+ (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) == 0) ||
+ (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) == 0) ||
+ (memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3) == 0))
pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
else if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
- (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
- (memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
- (memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
- (memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
- net->ralink_cap_exist)
+ (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
+ (memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
+ net->ralink_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_RALINK;
else if (net->atheros_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_ATHEROS;
@@ -389,32 +390,34 @@ static void HTIOTPeerDetermine(struct ieee80211_device *ieee)
IEEE80211_DEBUG(IEEE80211_DL_IOT, "Joseph debug!! IOTPEER: %x\n", pHTInfo->IOTPeer);
}
-/********************************************************************************************************************
- *function: Check whether driver should declare received rate up to MCS13 only since some chipset is not good
- * at receiving MCS14~15 frame from some AP.
+
+/*
+ *function: Check whether driver should declare received rate up to MCS13
+ * only since some chipset is not good at receiving MCS14~15 frame
+ * from some AP.
* input: struct ieee80211_device* ieee
* u8 * PeerMacAddr
* output: none
* return: return 1 if driver should declare MCS13 only(otherwise return 0)
- * *****************************************************************************************************************/
+ */
static u8 HTIOTActIsDisableMCS14(struct ieee80211_device *ieee, u8 *PeerMacAddr)
{
return 0;
- }
-
-
-/**
-* Function: HTIOTActIsDisableMCS15
-*
-* Overview: Check whether driver should declare capability of receiving MCS15
-*
-* Input:
-* PADAPTER Adapter,
-*
-* Output: None
-* Return: true if driver should disable MCS15
-* 2008.04.15 Emily
-*/
+}
+
+/*
+ * Function: HTIOTActIsDisableMCS15
+ *
+ * Overview: Check whether driver should declare capability of receiving
+ * MCS15
+ *
+ * Input:
+ * PADAPTER Adapter,
+ *
+ * Output: None
+ * Return: true if driver should disable MCS15
+ * 2008.04.15 Emily
+ */
static bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)
{
bool retValue = false;
@@ -440,18 +443,19 @@ static bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)
return retValue;
}
-/**
-* Function: HTIOTActIsDisableMCSTwoSpatialStream
-*
-* Overview: Check whether driver should declare capability of receiving All 2 ss packets
-*
-* Input:
-* PADAPTER Adapter,
-*
-* Output: None
-* Return: true if driver should disable all two spatial stream packet
-* 2008.04.21 Emily
-*/
+/*
+ * Function: HTIOTActIsDisableMCSTwoSpatialStream
+ *
+ * Overview: Check whether driver should declare capability of receiving
+ * All 2 ss packets
+ *
+ * Input:
+ * PADAPTER Adapter,
+ *
+ * Output: None
+ * Return: true if driver should disable all two spatial stream packet
+ * 2008.04.21 Emily
+ */
static bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device *ieee,
u8 *PeerMacAddr)
{
@@ -461,25 +465,27 @@ static bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device *ieee,
return false;
}
-/********************************************************************************************************************
+/*
*function: Check whether driver should disable EDCA turbo mode
* input: struct ieee80211_device* ieee
* u8* PeerMacAddr
* output: none
- * return: return 1 if driver should disable EDCA turbo mode(otherwise return 0)
- * *****************************************************************************************************************/
+ * return: return 1 if driver should disable EDCA turbo mode
+ * (otherwise return 0)
+ */
static u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device *ieee,
u8 *PeerMacAddr)
{ /* default enable EDCA Turbo mode. */
return false;
}
-/********************************************************************************************************************
- *function: Check whether we need to use OFDM to sned MGNT frame for broadcom AP
+/*
+ *function: Check whether we need to use OFDM to sned MGNT frame for
+ * broadcom AP
* input: struct ieee80211_network *network //current network we live
* output: none
* return: return 1 if true
- * *****************************************************************************************************************/
+ */
static u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
{
u8 retValue = 0;
@@ -496,6 +502,7 @@ static u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
static u8 HTIOTActIsCCDFsync(u8 *PeerMacAddr)
{
u8 retValue = 0;
+
if ((memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3) == 0) ||
(memcmp(PeerMacAddr, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) == 0) ||
(memcmp(PeerMacAddr, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) == 0))
@@ -504,52 +511,50 @@ static u8 HTIOTActIsCCDFsync(u8 *PeerMacAddr)
return retValue;
}
-void HTResetIOTSetting(
- PRT_HIGH_THROUGHPUT pHTInfo
-)
+void HTResetIOTSetting(PRT_HIGH_THROUGHPUT pHTInfo)
{
pHTInfo->IOTAction = 0;
pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
}
-
-/********************************************************************************************************************
+/*
*function: Construct Capablility Element in Beacon... if HTEnable is turned on
* input: struct ieee80211_device* ieee
- * u8* posHTCap //pointer to store Capability Ele
- * u8* len //store length of CE
- * u8 IsEncrypt //whether encrypt, needed further
+ * u8* posHTCap //pointer to store Capability Ele
+ * u8* len //store length of CE
+ * u8 IsEncrypt //whether encrypt, needed further
* output: none
* return: none
* notice: posHTCap can't be null and should be initialized before.
- * *****************************************************************************************************************/
+ */
void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u8 *len, u8 IsEncrypt)
{
PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
PHT_CAPABILITY_ELE pCapELE = NULL;
//u8 bIsDeclareMCS13;
- if ((posHTCap == NULL) || (pHT == NULL)) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "posHTCap or pHTInfo can't be null in HTConstructCapabilityElement()\n");
+ if (!posHTCap || !pHT) {
+ IEEE80211_DEBUG(IEEE80211_DL_ERR,
+ "posHTCap or pHTInfo can't be null in %s\n",
+ __func__);
return;
}
memset(posHTCap, 0, *len);
if (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
+
memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
- pCapELE = (PHT_CAPABILITY_ELE)&(posHTCap[4]);
+ pCapELE = (PHT_CAPABILITY_ELE)&posHTCap[4];
} else {
pCapELE = (PHT_CAPABILITY_ELE)posHTCap;
}
-
//HT capability info
pCapELE->AdvCoding = 0; // This feature is not supported now!!
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
+ if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
pCapELE->ChlWidth = 0;
- } else {
+ else
pCapELE->ChlWidth = (pHT->bRegBW40MHz ? 1 : 0);
- }
// pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
pCapELE->MimoPwrSave = pHT->SelfMimoPs;
@@ -557,7 +562,7 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
pCapELE->ShortGI20Mhz = 1; // We can receive Short GI!!
pCapELE->ShortGI40Mhz = 1; // We can receive Short GI!!
//DbgPrint("TX HT cap/info ele BW=%d SG20=%d SG40=%d\n\r",
- //pCapELE->ChlWidth, pCapELE->ShortGI20Mhz, pCapELE->ShortGI40Mhz);
+ //pCapELE->ChlWidth, pCapELE->ShortGI20Mhz, pCapELE->ShortGI40Mhz);
pCapELE->TxSTBC = 1;
pCapELE->RxSTBC = 0;
pCapELE->DelayBA = 0; // Do not support now!!
@@ -566,9 +571,10 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
pCapELE->PSMP = 0; // Do not support now!!
pCapELE->LSigTxopProtect = 0; // Do not support now!!
-
- //MAC HT parameters info
- // TODO: Nedd to take care of this part
+ /*
+ * MAC HT parameters info
+ * TODO: Nedd to take care of this part
+ */
IEEE80211_DEBUG(IEEE80211_DL_HT, "TX HT cap/info ele BW=%d MaxAMSDUSize:%d DssCCk:%d\n", pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk);
if (IsEncrypt) {
@@ -590,10 +596,13 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
if (pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS)
pCapELE->MCS[1] &= 0x00;
- // 2008.06.12
- // For RTL819X, if pairwisekey = wep/tkip, ap is ralink, we support only MCS0~7.
+ /*
+ * 2008.06.12
+ * For RTL819X, if pairwisekey = wep/tkip, ap is ralink, we support only MCS0~7.
+ */
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
int i;
+
for (i = 1; i < 16; i++)
pCapELE->MCS[i] = 0;
}
@@ -601,7 +610,6 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
//Extended HT Capability Info
memset(&pCapELE->ExtHTCapInfo, 0, 2);
-
//TXBF Capabilities
memset(pCapELE->TxBFCap, 0, 4);
@@ -613,31 +621,35 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
else
*len = 26 + 2;
-
-
// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_HT, posHTCap, *len -2);
- //Print each field in detail. Driver should not print out this message by default
+ /*
+ * Print each field in detail. Driver should not print out this message
+ * by default
+ */
// HTDebugHTCapability(posHTCap, (u8*)"HTConstructCapability()");
- return;
-
}
-/********************************************************************************************************************
- *function: Construct Information Element in Beacon... if HTEnable is turned on
+
+/*
+ *function: Construct Information Element in Beacon... if HTEnable is turned on
* input: struct ieee80211_device* ieee
- * u8* posHTCap //pointer to store Information Ele
- * u8* len //store len of
- * u8 IsEncrypt //whether encrypt, needed further
+ * u8* posHTCap //pointer to store Information Ele
+ * u8* len //store len of
+ * u8 IsEncrypt //whether encrypt, needed further
* output: none
* return: none
- * notice: posHTCap can't be null and be initialized before. only AP and IBSS sta should do this
- * *****************************************************************************************************************/
+ * notice: posHTCap can't be null and be initialized before.
+ * Only AP and IBSS sta should do this
+ */
void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *len, u8 IsEncrypt)
{
PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
PHT_INFORMATION_ELE pHTInfoEle = (PHT_INFORMATION_ELE)posHTInfo;
- if ((posHTInfo == NULL) || (pHTInfoEle == NULL)) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "posHTInfo or pHTInfoEle can't be null in HTConstructInfoElement()\n");
+
+ if (!posHTInfo || !pHTInfoEle) {
+ IEEE80211_DEBUG(IEEE80211_DL_ERR,
+ "posHTInfo or pHTInfoEle can't be null in %s\n",
+ __func__);
return;
}
@@ -661,49 +673,49 @@ void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *le
memset(pHTInfoEle->BasicMSC, 0, 16);
-
*len = 22 + 2; //same above
-
} else {
//STA should not generate High Throughput Information Element
*len = 0;
}
//IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_HT, posHTInfo, *len - 2);
//HTDebugHTInfo(posHTInfo, "HTConstructInforElement");
- return;
}
/*
- * According to experiment, Realtek AP to STA (based on rtl8190) may achieve best performance
- * if both STA and AP set limitation of aggregation size to 32K, that is, set AMPDU density to 2
- * (Ref: IEEE 11n specification). However, if Realtek STA associates to other AP, STA should set
- * limitation of aggregation size to 8K, otherwise, performance of traffic stream from STA to AP
- * will be much less than the traffic stream from AP to STA if both of the stream runs concurrently
- * at the same time.
- *
- * Frame Format
- * Element ID Length OUI Type1 Reserved
- * 1 byte 1 byte 3 bytes 1 byte 1 byte
- *
- * OUI = 0x00, 0xe0, 0x4c,
- * Type = 0x02
- * Reserved = 0x00
- *
- * 2007.8.21 by Emily
-*/
-/********************************************************************************************************************
+ * According to experiment, Realtek AP to STA (based on rtl8190) may achieve
+ * best performance if both STA and AP set limitation of aggregation size to
+ * 32K, that is, set AMPDU density to 2 (Ref: IEEE 11n specification).
+ * However, if Realtek STA associates to other AP, STA should set limitation of
+ * aggregation size to 8K, otherwise, performance of traffic stream from STA to
+ * AP will be much less than the traffic stream from AP to STA if both of the
+ * stream runs concurrently at the same time.
+ *
+ * Frame Format
+ * Element ID Length OUI Type1 Reserved
+ * 1 byte 1 byte 3 bytes 1 byte 1 byte
+ *
+ * OUI = 0x00, 0xe0, 0x4c,
+ * Type = 0x02
+ * Reserved = 0x00
+ *
+ * 2007.8.21 by Emily
+ */
+/*
*function: Construct Information Element in Beacon... in RT2RT condition
* input: struct ieee80211_device* ieee
- * u8* posRT2RTAgg //pointer to store Information Ele
- * u8* len //store len
+ * u8* posRT2RTAgg //pointer to store Information Ele
+ * u8* len //store len
* output: none
* return: none
* notice:
- * *****************************************************************************************************************/
+ */
void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg, u8 *len)
{
- if (posRT2RTAgg == NULL) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "posRT2RTAgg can't be null in HTConstructRT2RTAggElement()\n");
+ if (!posRT2RTAgg) {
+ IEEE80211_DEBUG(IEEE80211_DL_ERR,
+ "posRT2RTAgg can't be null in %s\n",
+ __func__);
return;
}
memset(posRT2RTAgg, 0, *len);
@@ -714,9 +726,8 @@ void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg,
*posRT2RTAgg++ = 0x01;
*posRT2RTAgg = 0x10;//*posRT2RTAgg = 0x02;
- if (ieee->bSupportRemoteWakeUp) {
+ if (ieee->bSupportRemoteWakeUp)
*posRT2RTAgg |= 0x08;//RT_HT_CAP_USE_WOW;
- }
*len = 6 + 2;
return;
@@ -727,38 +738,33 @@ void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg,
section of code.
if(IS_UNDER_11N_AES_MODE(Adapter))
{
- posRT2RTAgg->Octet[5] |=RT_HT_CAP_USE_AMPDU;
+ posRT2RTAgg->octet[5] |= RT_HT_CAP_USE_AMPDU;
}else
{
- posRT2RTAgg->Octet[5] &= 0xfb;
+ posRT2RTAgg->octet[5] &= 0xfb;
}
*/
-
#else
// Do Nothing
#endif
posRT2RTAgg->Length = 6;
#endif
-
-
-
-
}
-
-/********************************************************************************************************************
+/*
*function: Pick the right Rate Adaptive table to use
* input: struct ieee80211_device* ieee
- * u8* pOperateMCS //A pointer to MCS rate bitmap
+ * u8* pOperateMCS //A pointer to MCS rate bitmap
* return: always we return true
* notice:
- * *****************************************************************************************************************/
+ */
static u8 HT_PickMCSRate(struct ieee80211_device *ieee, u8 *pOperateMCS)
{
- u8 i;
- if (pOperateMCS == NULL) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "pOperateMCS can't be null in HT_PickMCSRate()\n");
+ if (!pOperateMCS) {
+ IEEE80211_DEBUG(IEEE80211_DL_ERR,
+ "pOperateMCS can't be null in %s\n",
+ __func__);
return false;
}
@@ -766,62 +772,62 @@ static u8 HT_PickMCSRate(struct ieee80211_device *ieee, u8 *pOperateMCS)
case IEEE_A:
case IEEE_B:
case IEEE_G:
- //legacy rate routine handled at selectedrate
+ //legacy rate routine handled at selectedrate
- //no MCS rate
- for (i = 0; i <= 15; i++)
- pOperateMCS[i] = 0;
- break;
+ //no MCS rate
+ memset(pOperateMCS, 0, 16);
+ break;
case IEEE_N_24G: //assume CCK rate ok
case IEEE_N_5G:
- // Legacy part we only use 6, 5.5,2,1 for N_24G and 6 for N_5G.
- // Legacy part shall be handled at SelectRateSet().
-
- //HT part
- // TODO: may be different if we have different number of antenna
- pOperateMCS[0] &= RATE_ADPT_1SS_MASK; //support MCS 0~7
- pOperateMCS[1] &= RATE_ADPT_2SS_MASK;
- pOperateMCS[3] &= RATE_ADPT_MCS32_MASK;
- break;
+ // Legacy part we only use 6, 5.5,2,1 for N_24G and 6 for N_5G.
+ // Legacy part shall be handled at SelectRateSet().
+
+ //HT part
+ // TODO: may be different if we have different number of antenna
+ pOperateMCS[0] &= RATE_ADPT_1SS_MASK; //support MCS 0~7
+ pOperateMCS[1] &= RATE_ADPT_2SS_MASK;
+ pOperateMCS[3] &= RATE_ADPT_MCS32_MASK;
+ break;
//should never reach here
default:
-
- break;
-
+ break;
}
return true;
}
/*
-* Description:
-* This function will get the highest speed rate in input MCS set.
-*
-* /param Adapter Pionter to Adapter entity
-* pMCSRateSet Pointer to MCS rate bitmap
-* pMCSFilter Pointer to MCS rate filter
-*
-* /return Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter.
-*
-*/
-/********************************************************************************************************************
+ * Description:
+ * This function will get the highest speed rate in input MCS set.
+ *
+ * /param Adapter Pionter to Adapter entity
+ * pMCSRateSet Pointer to MCS rate bitmap
+ * pMCSFilter Pointer to MCS rate filter
+ *
+ * /return Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter.
+ *
+ */
+/*
*function: This function will get the highest speed rate in input MCS set.
* input: struct ieee80211_device* ieee
* u8* pMCSRateSet //Pointer to MCS rate bitmap
* u8* pMCSFilter //Pointer to MCS rate filter
* return: Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter
* notice:
- * *****************************************************************************************************************/
+ */
u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSFilter)
{
u8 i, j;
u8 bitMap;
u8 mcsRate = 0;
u8 availableMcsRate[16];
- if (pMCSRateSet == NULL || pMCSFilter == NULL) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "pMCSRateSet or pMCSFilter can't be null in HTGetHighestMCSRate()\n");
+
+ if (!pMCSRateSet || !pMCSFilter) {
+ IEEE80211_DEBUG(IEEE80211_DL_ERR,
+ "pMCSRateSet or pMCSFilter can't be null in %s\n",
+ __func__);
return false;
}
for (i = 0; i < 16; i++)
@@ -838,60 +844,57 @@ u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSF
if (availableMcsRate[i] != 0) {
bitMap = availableMcsRate[i];
for (j = 0; j < 8; j++) {
- if ((bitMap%2) != 0) {
- if (HTMcsToDataRate(ieee, (8*i+j)) > HTMcsToDataRate(ieee, mcsRate))
- mcsRate = (8*i+j);
+ if ((bitMap % 2) != 0) {
+ if (HTMcsToDataRate(ieee, (8 * i + j)) > HTMcsToDataRate(ieee, mcsRate))
+ mcsRate = (8 * i + j);
}
bitMap >>= 1;
}
}
}
- return (mcsRate|0x80);
+ return (mcsRate | 0x80);
}
-
-
/*
-**
-**1.Filter our operation rate set with AP's rate set
-**2.shall reference channel bandwidth, STBC, Antenna number
-**3.generate rate adative table for firmware
-**David 20060906
-**
-** \pHTSupportedCap: the connected STA's supported rate Capability element
-*/
+ * 1.Filter our operation rate set with AP's rate set
+ * 2.shall reference channel bandwidth, STBC, Antenna number
+ * 3.generate rate adative table for firmware
+ * David 20060906
+ *
+ * \pHTSupportedCap: the connected STA's supported rate Capability element
+ */
static u8 HTFilterMCSRate(struct ieee80211_device *ieee, u8 *pSupportMCS,
u8 *pOperateMCS)
{
-
u8 i = 0;
// filter out operational rate set not supported by AP, the length of it is 16
- for (i = 0; i <= 15; i++) {
- pOperateMCS[i] = ieee->Regdot11HTOperationalRateSet[i]&pSupportMCS[i];
- }
-
+ for (i = 0; i <= 15; i++)
+ pOperateMCS[i] = ieee->Regdot11HTOperationalRateSet[i] & pSupportMCS[i];
// TODO: adjust our operational rate set according to our channel bandwidth, STBC and Antenna number
-
- // TODO: fill suggested rate adaptive rate index and give firmware info using Tx command packet
- // we also shall suggested the first start rate set according to our singal strength
+ /*
+ * TODO: fill suggested rate adaptive rate index and give firmware info
+ * using Tx command packet we also shall suggested the first start rate
+ * set according to our signal strength
+ */
HT_PickMCSRate(ieee, pOperateMCS);
// For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7.
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
pOperateMCS[1] = 0;
- //
- // For RTL819X, we support only MCS0~15.
- // And also, we do not know how to use MCS32 now.
- //
+ /*
+ * For RTL819X, we support only MCS0~15.
+ * And also, we do not know how to use MCS32 now.
+ */
for (i = 2; i <= 15; i++)
pOperateMCS[i] = 0;
return true;
}
-void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
+
+void HTSetConnectBwMode(struct ieee80211_device *ieee, enum ht_channel_width Bandwidth, enum ht_extension_chan_offset Offset);
void HTOnAssocRsp(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -904,7 +907,9 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
if (!pHTInfo->bCurrentHTSupport) {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "<=== HTOnAssocRsp(): HT_DISABLE\n");
+ IEEE80211_DEBUG(IEEE80211_DL_ERR,
+ "<=== %s: HT_DISABLE\n",
+ __func__);
return;
}
IEEE80211_DEBUG(IEEE80211_DL_HT, "===> HTOnAssocRsp_wq(): HT_ENABLE\n");
@@ -924,43 +929,44 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
else
pPeerHTInfo = (PHT_INFORMATION_ELE)(pHTInfo->PeerHTInfoBuf);
-
////////////////////////////////////////////////////////
// Configurations:
////////////////////////////////////////////////////////
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_HT, pPeerHTCap, sizeof(HT_CAPABILITY_ELE));
+ IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_HT, pPeerHTCap, sizeof(HT_CAPABILITY_ELE));
// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_HT, pPeerHTInfo, sizeof(HT_INFORMATION_ELE));
// Config Supported Channel Width setting
//
- HTSetConnectBwMode(ieee, (HT_CHANNEL_WIDTH)(pPeerHTCap->ChlWidth), (HT_EXTCHNL_OFFSET)(pPeerHTInfo->ExtChlOffset));
+ HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth), (enum ht_extension_chan_offset)(pPeerHTInfo->ExtChlOffset));
pHTInfo->bCurTxBW40MHz = (pPeerHTInfo->RecommemdedTxWidth == 1);
- //
- // Update short GI/ long GI setting
- //
- // TODO:
+ /*
+ * Update short GI/ long GI setting
+ *
+ * TODO:
+ */
pHTInfo->bCurShortGI20MHz = pHTInfo->bRegShortGI20MHz &&
(pPeerHTCap->ShortGI20Mhz == 1);
pHTInfo->bCurShortGI40MHz = pHTInfo->bRegShortGI40MHz &&
(pPeerHTCap->ShortGI40Mhz == 1);
- //
- // Config TX STBC setting
- //
- // TODO:
+ /*
+ * Config TX STBC setting
+ *
+ * TODO:
+ */
- //
- // Config DSSS/CCK mode in 40MHz mode
- //
- // TODO:
+ /*
+ * Config DSSS/CCK mode in 40MHz mode
+ *
+ * TODO:
+ */
pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK &&
(pPeerHTCap->DssCCk == 1);
-
- //
- // Config and configure A-MSDU setting
- //
+ /*
+ * Config and configure A-MSDU setting
+ */
pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize == 0) ? 3839 : 7935;
@@ -969,26 +975,26 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
pHTInfo->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize;
else
pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
-
-
- //
- // Config A-MPDU setting
- //
+ /*
+ * Config A-MPDU setting
+ */
pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
- // <1> Decide AMPDU Factor
-
- // By Emily
+ /*
+ * <1> Decide AMPDU Factor
+ * By Emily
+ */
if (!pHTInfo->bRegRT2RTAggregation) {
// Decide AMPDU Factor according to protocol handshake
if (pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
else
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
-
} else {
- // Set MPDU density to 2 to Realtek AP, and set it to 0 for others
- // Replace MPDU factor declared in original association response frame format. 2007.08.20 by Emily
+ /*
+ * Set MPDU density to 2 to Realtek AP, and set it to 0 for others
+ * Replace MPDU factor declared in original association response frame format. 2007.08.20 by Emily
+ */
if (ieee->current_network.bssht.bdRT2RTAggregation) {
if (ieee->pairwise_key_type != KEY_TYPE_NA)
// Realtek may set 32k in security mode and 64k for others
@@ -1003,8 +1009,10 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
}
}
- // <2> Set AMPDU Minimum MPDU Start Spacing
- // 802.11n 3.0 section 9.7d.3
+ /*
+ * <2> Set AMPDU Minimum MPDU Start Spacing
+ * 802.11n 3.0 section 9.7d.3
+ */
if (pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
else
@@ -1026,22 +1034,25 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
// Rx Reorder Setting
pHTInfo->bCurRxReorderEnable = pHTInfo->bRegRxReorderEnable;
- //
- // Filter out unsupported HT rate for this AP
- // Update RATR table
- // This is only for 8190 ,8192 or later product which using firmware to handle rate adaptive mechanism.
- //
+ /*
+ * Filter out unsupported HT rate for this AP
+ * Update RATR table
+ * This is only for 8190 ,8192 or later product which using firmware to
+ * handle rate adaptive mechanism.
+ */
- // Handle Ralink AP bad MCS rate set condition. Joseph.
- // This fix the bug of Ralink AP. This may be removed in the future.
+ /*
+ * Handle Ralink AP bad MCS rate set condition. Joseph.
+ * This fix the bug of Ralink AP. This may be removed in the future.
+ */
if (pPeerHTCap->MCS[0] == 0)
pPeerHTCap->MCS[0] = 0xff;
HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet);
- //
- // Config MIMO Power Save setting
- //
+ /*
+ * Config MIMO Power Save setting
+ */
pHTInfo->PeerMimoPs = pPeerHTCap->MimoPwrSave;
if (pHTInfo->PeerMimoPs == MIMO_PS_STATIC)
pMcsFilter = MCS_FILTER_1SS;
@@ -1052,31 +1063,30 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, pMcsFilter);
ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
- //
- // Config current operation mode.
- //
+ /*
+ * Config current operation mode.
+ */
pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
-
-
-
}
void HTSetConnectBwModeCallback(struct ieee80211_device *ieee);
-/********************************************************************************************************************
+/*
*function: initialize HT info(struct PRT_HIGH_THROUGHPUT)
* input: struct ieee80211_device* ieee
* output: none
* return: none
- * notice: This function is called when * (1) MPInitialization Phase * (2) Receiving of Deauthentication from AP
-********************************************************************************************************************/
+ * notice: This function is called when
+ * * (1) MPInitialization Phase
+ * * (2) Receiving of Deauthentication from AP
+ */
// TODO: Should this funciton be called when receiving of Disassociation?
void HTInitializeHTInfo(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- //
- // These parameters will be reset when receiving deauthentication packet
- //
+ /*
+ * These parameters will be reset when receiving deauthentication packet
+ */
IEEE80211_DEBUG(IEEE80211_DL_HT, "===========>%s()\n", __func__);
pHTInfo->bCurrentHTSupport = false;
@@ -1089,10 +1099,12 @@ void HTInitializeHTInfo(struct ieee80211_device *ieee)
pHTInfo->bCurShortGI40MHz = false;
pHTInfo->bForcedShortGI = false;
- // CCK rate support
- // This flag is set to true to support CCK rate by default.
- // It will be affected by "pHTInfo->bRegSuppCCK" and AP capabilities only when associate to
- // 11N BSS.
+ /*
+ * CCK rate support
+ * This flag is set to true to support CCK rate by default.
+ * It will be affected by "pHTInfo->bRegSuppCCK" and AP capabilities
+ * only when associate to 11N BSS.
+ */
pHTInfo->bCurSuppCCK = true;
// AMSDU related
@@ -1103,13 +1115,11 @@ void HTInitializeHTInfo(struct ieee80211_device *ieee)
pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
-
-
// Initialize all of the parameters related to 11n
- memset((void *)(&(pHTInfo->SelfHTCap)), 0, sizeof(pHTInfo->SelfHTCap));
- memset((void *)(&(pHTInfo->SelfHTInfo)), 0, sizeof(pHTInfo->SelfHTInfo));
- memset((void *)(&(pHTInfo->PeerHTCapBuf)), 0, sizeof(pHTInfo->PeerHTCapBuf));
- memset((void *)(&(pHTInfo->PeerHTInfoBuf)), 0, sizeof(pHTInfo->PeerHTInfoBuf));
+ memset(&pHTInfo->SelfHTCap, 0, sizeof(pHTInfo->SelfHTCap));
+ memset(&pHTInfo->SelfHTInfo, 0, sizeof(pHTInfo->SelfHTInfo));
+ memset(&pHTInfo->PeerHTCapBuf, 0, sizeof(pHTInfo->PeerHTCapBuf));
+ memset(&pHTInfo->PeerHTInfoBuf, 0, sizeof(pHTInfo->PeerHTInfoBuf));
pHTInfo->bSwBwInProgress = false;
pHTInfo->ChnlOp = CHNLOP_NONE;
@@ -1125,22 +1135,23 @@ void HTInitializeHTInfo(struct ieee80211_device *ieee)
//MCS rate initialized here
{
- u8 *RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]);
+ u8 *RegHTSuppRateSets = &ieee->RegHTSuppRateSet[0];
+
RegHTSuppRateSets[0] = 0xFF; //support MCS 0~7
RegHTSuppRateSets[1] = 0xFF; //support MCS 8~15
RegHTSuppRateSets[4] = 0x01; //support MCS 32
}
}
-/********************************************************************************************************************
+
+/*
*function: initialize Bss HT structure(struct PBSS_HT)
* input: PBSS_HT pBssHT //to be initialized
* output: none
* return: none
* notice: This function is called when initialize network structure
-********************************************************************************************************************/
+ */
void HTInitializeBssDesc(PBSS_HT pBssHT)
{
-
pBssHT->bdSupportHT = false;
memset(pBssHT->bdHTCapBuf, 0, sizeof(pBssHT->bdHTCapBuf));
pBssHT->bdHTCapLen = 0;
@@ -1152,14 +1163,16 @@ void HTInitializeBssDesc(PBSS_HT pBssHT)
pBssHT->bdRT2RTAggregation = false;
pBssHT->bdRT2RTLongSlotTime = false;
}
-/********************************************************************************************************************
+
+/*
*function: initialize Bss HT structure(struct PBSS_HT)
* input: struct ieee80211_device *ieee
- * struct ieee80211_network *pNetwork //usually current network we are live in
+ * struct ieee80211_network *pNetwork //usually current network
+ * we are live in
* output: none
* return: none
* notice: This function should ONLY be called before association
-********************************************************************************************************************/
+ */
void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -1198,8 +1211,10 @@ void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee802
// Determine the IOT Peer Vendor.
HTIOTPeerDetermine(ieee);
- // Decide IOT Action
- // Must be called after the parameter of pHTInfo->bCurrentRT2RTAggregation is decided
+ /*
+ * Decide IOT Action
+ * Must be called after the parameter of pHTInfo->bCurrentRT2RTAggregation is decided
+ */
pHTInfo->IOTAction = 0;
bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid);
if (bIOTAction)
@@ -1213,7 +1228,6 @@ void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee802
if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_ALL_2SS;
-
bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid);
if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
@@ -1225,8 +1239,6 @@ void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee802
bIOTAction = HTIOTActIsCCDFsync(pNetwork->bssid);
if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_CDD_FSYNC;
-
-
} else {
pHTInfo->bCurrentHTSupport = false;
pHTInfo->bCurrentRT2RTAggregation = false;
@@ -1234,7 +1246,6 @@ void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee802
pHTInfo->IOTAction = 0;
}
-
}
void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork)
@@ -1244,27 +1255,27 @@ void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee, struct ieee80211_
PHT_INFORMATION_ELE pPeerHTInfo = (PHT_INFORMATION_ELE)pNetwork->bssht.bdHTInfoBuf;
if (pHTInfo->bCurrentHTSupport) {
- //
- // Config current operation mode.
- //
+ /*
+ * Config current operation mode.
+ */
if (pNetwork->bssht.bdHTInfoLen != 0)
pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
- //
- // <TODO: Config according to OBSS non-HT STA present!!>
- //
+ /*
+ * <TODO: Config according to OBSS non-HT STA present!!>
+ */
}
}
EXPORT_SYMBOL(HTUpdateSelfAndPeerSetting);
-/********************************************************************************************************************
+/*
*function: check whether HT control field exists
* input: struct ieee80211_device *ieee
* u8* pFrame //coming skb->data
* output: none
* return: return true if HT control field exists(false otherwise)
* notice:
-********************************************************************************************************************/
+ */
u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame)
{
if (ieee->pHTInfo->bCurrentHTSupport) {
@@ -1276,10 +1287,10 @@ u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame)
return false;
}
-//
-// This function set bandwidth mode in protocol layer.
-//
-void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset)
+/*
+ * This function set bandwidth mode in protocol layer.
+ */
+void HTSetConnectBwMode(struct ieee80211_device *ieee, enum ht_channel_width Bandwidth, enum ht_extension_chan_offset Offset)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// u32 flags = 0;
@@ -1287,8 +1298,6 @@ void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidt
if (!pHTInfo->bRegBW40MHz)
return;
-
-
// To reduce dummy operation
// if((pHTInfo->bCurBW40MHz==false && Bandwidth==HT_CHANNEL_WIDTH_20) ||
// (pHTInfo->bCurBW40MHz==true && Bandwidth==HT_CHANNEL_WIDTH_20_40 && Offset==pHTInfo->CurSTAExtChnlOffset))
@@ -1318,10 +1327,12 @@ void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidt
pHTInfo->bSwBwInProgress = true;
- // TODO: 2007.7.13 by Emily Wait 2000ms in order to guarantee that switching
- // bandwidth is executed after scan is finished. It is a temporal solution
- // because software should ganrantee the last operation of switching bandwidth
- // is executed properlly.
+ /*
+ * TODO: 2007.7.13 by Emily Wait 2000ms in order to guarantee that
+ * switching bandwidth is executed after scan is finished. It is a
+ * temporal solution because software should ganrantee the last
+ * operation of switching bandwidth is executed properlly.
+ */
HTSetConnectBwModeCallback(ieee);
// spin_unlock_irqrestore(&(ieee->bw_spinlock), flags);
@@ -1335,9 +1346,9 @@ void HTSetConnectBwModeCallback(struct ieee80211_device *ieee)
if (pHTInfo->bCurBW40MHz) {
if (pHTInfo->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_UPPER)
- ieee->set_chan(ieee->dev, ieee->current_network.channel+2);
+ ieee->set_chan(ieee->dev, ieee->current_network.channel + 2);
else if (pHTInfo->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_LOWER)
- ieee->set_chan(ieee->dev, ieee->current_network.channel-2);
+ ieee->set_chan(ieee->dev, ieee->current_network.channel - 2);
else
ieee->set_chan(ieee->dev, ieee->current_network.channel);
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
index 71df9d9e2e99..3052f53d2e7e 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
@@ -2,532 +2,81 @@
#ifndef __INC_QOS_TYPE_H
#define __INC_QOS_TYPE_H
-#define MAX_WMMELE_LENGTH 64
-
-//
-// QoS mode.
-// enum 0, 1, 2, 4: since we can use the OR(|) operation.
-//
-// QOS_MODE is redefined for enum can't be ++, | under C++ compiler, 2006.05.17, by rcnjko.
-//typedef enum _QOS_MODE{
-// QOS_DISABLE = 0,
-// QOS_WMM = 1,
-// QOS_EDCA = 2,
-// QOS_HCCA = 4,
-//}QOS_MODE,*PQOS_MODE;
-//
-typedef u32 QOS_MODE, *PQOS_MODE;
-#define QOS_DISABLE 0
-#define QOS_WMM 1
-#define QOS_WMMSA 2
-#define QOS_EDCA 4
-#define QOS_HCCA 8
-#define QOS_WMM_UAPSD 16 //WMM Power Save, 2006-06-14 Isaiah
-
-#define AC_PARAM_SIZE 4
-#define WMM_PARAM_ELE_BODY_LEN 18
-
-//
-// QoS ACK Policy Field Values
-// Ref: WMM spec 2.1.6: QoS Control Field, p.10.
-//
-typedef enum _ACK_POLICY{
- eAckPlc0_ACK = 0x00,
- eAckPlc1_NoACK = 0x01,
-} ACK_POLICY, *PACK_POLICY;
-
-#define WMM_PARAM_ELEMENT_SIZE (8+(4*AC_PARAM_SIZE))
-
-//
-// QoS Control Field
-// Ref:
-// 1. WMM spec 2.1.6: QoS Control Field, p.9.
-// 2. 802.11e/D13.0 7.1.3.5, p.26.
-//
-typedef union _QOS_CTRL_FIELD{
- u8 charData[2];
- u16 shortData;
-
- // WMM spec
- struct {
- u8 UP:3;
- u8 usRsvd1:1;
- u8 EOSP:1;
- u8 AckPolicy:2;
- u8 usRsvd2:1;
- u8 ucRsvdByte;
- }WMM;
-
- // 802.11e: QoS data type frame sent by non-AP QSTAs.
- struct {
- u8 TID:4;
- u8 bIsQsize:1;// 0: BIT[8:15] is TXOP Duration Requested, 1: BIT[8:15] is Queue Size.
- u8 AckPolicy:2;
- u8 usRsvd:1;
- u8 TxopOrQsize; // (BIT4=0)TXOP Duration Requested or (BIT4=1)Queue Size.
- }BySta;
-
- // 802.11e: QoS data, QoS Null, and QoS Data+CF-Ack frames sent by HC.
- struct {
- u8 TID:4;
- u8 EOSP:1;
- u8 AckPolicy:2;
- u8 usRsvd:1;
- u8 PSBufState; // QAP PS Buffer State.
- }ByHc_Data;
-
- // 802.11e: QoS (+) CF-Poll frames sent by HC.
- struct {
- u8 TID:4;
- u8 EOSP:1;
- u8 AckPolicy:2;
- u8 usRsvd:1;
- u8 TxopLimit; // TXOP Limit.
- }ByHc_CFP;
-
-}QOS_CTRL_FIELD, *PQOS_CTRL_FIELD;
-
-
-//
-// QoS Info Field
-// Ref:
-// 1. WMM spec 2.2.1: WME Information Element, p.11.
-// 2. 8185 QoS code: QOS_INFO [def. in QoS_mp.h]
-//
-typedef union _QOS_INFO_FIELD{
- u8 charData;
-
- struct {
- u8 ucParameterSetCount:4;
- u8 ucReserved:4;
- }WMM;
-
- struct {
- //Ref WMM_Specification_1-1.pdf, 2006-06-13 Isaiah
- u8 ucAC_VO_UAPSD:1;
- u8 ucAC_VI_UAPSD:1;
- u8 ucAC_BE_UAPSD:1;
- u8 ucAC_BK_UAPSD:1;
- u8 ucReserved1:1;
- u8 ucMaxSPLen:2;
- u8 ucReserved2:1;
-
- }ByWmmPsSta;
-
- struct {
- //Ref WMM_Specification_1-1.pdf, 2006-06-13 Isaiah
- u8 ucParameterSetCount:4;
- u8 ucReserved:3;
- u8 ucApUapsd:1;
- }ByWmmPsAp;
-
- struct {
- u8 ucAC3_UAPSD:1;
- u8 ucAC2_UAPSD:1;
- u8 ucAC1_UAPSD:1;
- u8 ucAC0_UAPSD:1;
- u8 ucQAck:1;
- u8 ucMaxSPLen:2;
- u8 ucMoreDataAck:1;
- } By11eSta;
-
- struct {
- u8 ucParameterSetCount:4;
- u8 ucQAck:1;
- u8 ucQueueReq:1;
- u8 ucTXOPReq:1;
- u8 ucReserved:1;
- } By11eAp;
-
- struct {
- u8 ucReserved1:4;
- u8 ucQAck:1;
- u8 ucReserved2:2;
- u8 ucMoreDataAck:1;
- } ByWmmsaSta;
-
- struct {
- u8 ucReserved1:4;
- u8 ucQAck:1;
- u8 ucQueueReq:1;
- u8 ucTXOPReq:1;
- u8 ucReserved2:1;
- } ByWmmsaAp;
-
- struct {
- u8 ucAC3_UAPSD:1;
- u8 ucAC2_UAPSD:1;
- u8 ucAC1_UAPSD:1;
- u8 ucAC0_UAPSD:1;
- u8 ucQAck:1;
- u8 ucMaxSPLen:2;
- u8 ucMoreDataAck:1;
- } ByAllSta;
-
- struct {
- u8 ucParameterSetCount:4;
- u8 ucQAck:1;
- u8 ucQueueReq:1;
- u8 ucTXOPReq:1;
- u8 ucApUapsd:1;
- } ByAllAp;
-
-}QOS_INFO_FIELD, *PQOS_INFO_FIELD;
-
-//
-// ACI to AC coding.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.13.
-//
-// AC_CODING is redefined for enum can't be ++, | under C++ compiler, 2006.05.17, by rcnjko.
-//typedef enum _AC_CODING{
-// AC0_BE = 0, // ACI: 0x00 // Best Effort
-// AC1_BK = 1, // ACI: 0x01 // Background
-// AC2_VI = 2, // ACI: 0x10 // Video
-// AC3_VO = 3, // ACI: 0x11 // Voice
-// AC_MAX = 4, // Max: define total number; Should not to be used as a real enum.
-//}AC_CODING,*PAC_CODING;
-//
-typedef u32 AC_CODING;
-#define AC0_BE 0 // ACI: 0x00 // Best Effort
-#define AC1_BK 1 // ACI: 0x01 // Background
-#define AC2_VI 2 // ACI: 0x10 // Video
-#define AC3_VO 3 // ACI: 0x11 // Voice
-#define AC_MAX 4 // Max: define total number; Should not to be used as a real enum.
-
-//
-// ACI/AIFSN Field.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
-//
-typedef union _ACI_AIFSN{
- u8 charData;
-
- struct {
- u8 AIFSN:4;
- u8 ACM:1;
- u8 ACI:2;
- u8 Reserved:1;
- }f; // Field
-}ACI_AIFSN, *PACI_AIFSN;
-
-//
-// ECWmin/ECWmax field.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.13.
-//
-typedef union _ECW{
- u8 charData;
- struct {
- u8 ECWmin:4;
- u8 ECWmax:4;
- }f; // Field
-}ECW, *PECW;
-
-//
-// AC Parameters Record Format.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
-//
-typedef union _AC_PARAM{
- u32 longData;
- u8 charData[4];
-
- struct {
- ACI_AIFSN AciAifsn;
- ECW Ecw;
- u16 TXOPLimit;
- }f; // Field
-}AC_PARAM, *PAC_PARAM;
-
-
-
-//
-// QoS element subtype
-//
-typedef enum _QOS_ELE_SUBTYPE{
- QOSELE_TYPE_INFO = 0x00, // 0x00: Information element
- QOSELE_TYPE_PARAM = 0x01, // 0x01: parameter element
-} QOS_ELE_SUBTYPE, *PQOS_ELE_SUBTYPE;
-
-
-//
-// Direction Field Values.
-// Ref: WMM spec 2.2.11: WME TSPEC Element, p.18.
-//
-typedef enum _DIRECTION_VALUE{
+/*
+ * ACI/AIFSN Field.
+ * Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
+ * Note: 1 Byte Length
+ */
+struct aci_aifsn {
+ u8 aifsn:4;
+ u8 acm:1;
+ u8 aci:2;
+ u8:1;
+};
+
+/*
+ * Direction Field Values.
+ * Ref: WMM spec 2.2.11: WME TSPEC Element, p.18.
+ */
+enum direction_value {
DIR_UP = 0, // 0x00 // UpLink
DIR_DOWN = 1, // 0x01 // DownLink
DIR_DIRECT = 2, // 0x10 // DirectLink
DIR_BI_DIR = 3, // 0x11 // Bi-Direction
-} DIRECTION_VALUE, *PDIRECTION_VALUE;
-
-
-//
-// TS Info field in WMM TSPEC Element.
-// Ref:
-// 1. WMM spec 2.2.11: WME TSPEC Element, p.18.
-// 2. 8185 QoS code: QOS_TSINFO [def. in QoS_mp.h]
-//
-typedef union _QOS_TSINFO{
- u8 charData[3];
- struct {
- u8 ucTrafficType:1; //WMM is reserved
- u8 ucTSID:4;
- u8 ucDirection:2;
- u8 ucAccessPolicy:2; //WMM: bit8=0, bit7=1
- u8 ucAggregation:1; //WMM is reserved
- u8 ucPSB:1; //WMMSA is APSD
- u8 ucUP:3;
- u8 ucTSInfoAckPolicy:2; //WMM is reserved
- u8 ucSchedule:1; //WMM is reserved
- u8 ucReserved:7;
- }field;
-}QOS_TSINFO, *PQOS_TSINFO;
-
-//
-// WMM TSPEC Body.
-// Ref: WMM spec 2.2.11: WME TSPEC Element, p.16.
-//
-typedef union _TSPEC_BODY{
- u8 charData[55];
-
- struct {
- QOS_TSINFO TSInfo; //u8 TSInfo[3];
- u16 NominalMSDUsize;
- u16 MaxMSDUsize;
- u32 MinServiceItv;
- u32 MaxServiceItv;
- u32 InactivityItv;
- u32 SuspenItv;
- u32 ServiceStartTime;
- u32 MinDataRate;
- u32 MeanDataRate;
- u32 PeakDataRate;
- u32 MaxBurstSize;
- u32 DelayBound;
- u32 MinPhyRate;
- u16 SurplusBandwidthAllowance;
- u16 MediumTime;
- } f; // Field
-}TSPEC_BODY, *PTSPEC_BODY;
-
-
-//
-// WMM TSPEC Element.
-// Ref: WMM spec 2.2.11: WME TSPEC Element, p.16.
-//
-typedef struct _WMM_TSPEC{
- u8 ID;
- u8 Length;
- u8 OUI[3];
- u8 OUI_Type;
- u8 OUI_SubType;
- u8 Version;
- TSPEC_BODY Body;
-} WMM_TSPEC, *PWMM_TSPEC;
-
-//
-// ACM implementation method.
-// Annie, 2005-12-13.
-//
-typedef enum _ACM_METHOD{
- eAcmWay0_SwAndHw = 0, // By SW and HW.
- eAcmWay1_HW = 1, // By HW.
- eAcmWay2_SW = 2, // By SW.
-} ACM_METHOD, *PACM_METHOD;
-
-
-typedef struct _ACM{
-// u8 RegEnableACM;
- u64 UsedTime;
- u64 MediumTime;
- u8 HwAcmCtl; // TRUE: UsedTime exceed => Do NOT USE this AC. It wll be written to ACM_CONTROL(0xBF BIT 0/1/2 in 8185B).
-}ACM, *PACM;
-
-typedef u8 AC_UAPSD, *PAC_UAPSD;
-
-#define GET_VO_UAPSD(_apsd) ((_apsd) & BIT(0))
-#define SET_VO_UAPSD(_apsd) ((_apsd) |= BIT(0))
-
-#define GET_VI_UAPSD(_apsd) ((_apsd) & BIT(1))
-#define SET_VI_UAPSD(_apsd) ((_apsd) |= BIT(1))
-
-#define GET_BK_UAPSD(_apsd) ((_apsd) & BIT(2))
-#define SET_BK_UAPSD(_apsd) ((_apsd) |= BIT(2))
-
-#define GET_BE_UAPSD(_apsd) ((_apsd) & BIT(3))
-#define SET_BE_UAPSD(_apsd) ((_apsd) |= BIT(3))
-
-
-//typedef struct _TCLASS{
-// TODO
-//} TCLASS, *PTCLASS;
-typedef union _QOS_TCLAS{
-
- struct _TYPE_GENERAL{
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- } TYPE_GENERAL;
-
- struct _TYPE0_ETH{
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u8 SrcAddr[6];
- u8 DstAddr[6];
- u16 Type;
- } TYPE0_ETH;
-
- struct _TYPE1_IPV4{
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u8 Version;
- u8 SrcIP[4];
- u8 DstIP[4];
- u16 SrcPort;
- u16 DstPort;
- u8 DSCP;
- u8 Protocol;
- u8 Reserved;
- } TYPE1_IPV4;
-
- struct _TYPE1_IPV6{
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u8 Version;
- u8 SrcIP[16];
- u8 DstIP[16];
- u16 SrcPort;
- u16 DstPort;
- u8 FlowLabel[3];
- } TYPE1_IPV6;
-
- struct _TYPE2_8021Q{
- u8 Priority;
- u8 ClassifierType;
- u8 Mask;
- u16 TagType;
- } TYPE2_8021Q;
-} QOS_TCLAS, *PQOS_TCLAS;
-
-//typedef struct _WMM_TSTREAM{
-//
-//- TSPEC
-//- AC (which to mapping)
-//} WMM_TSTREAM, *PWMM_TSTREAM;
-typedef struct _QOS_TSTREAM{
- u8 AC;
- WMM_TSPEC TSpec;
- QOS_TCLAS TClass;
-} QOS_TSTREAM, *PQOS_TSTREAM;
-
-//typedef struct _U_APSD{
-//- TriggerEnable [4]
-//- MaxSPLength
-//- HighestAcBuffered
-//} U_APSD, *PU_APSD;
-
-//joseph TODO:
-// UAPSD function should be implemented by 2 data structure
-// "Qos control field" and "Qos info field"
-//typedef struct _QOS_UAPSD{
-// u8 bTriggerEnable[4];
-// u8 MaxSPLength;
-// u8 HighestBufAC;
-//} QOS_UAPSD, *PQOS_APSD;
-
-//----------------------------------------------------------------------------
-// 802.11 Management frame Status Code field
-//----------------------------------------------------------------------------
-typedef struct _OCTET_STRING{
- u8 *Octet;
- u16 Length;
-}OCTET_STRING, *POCTET_STRING;
-
-//
-// STA QoS data.
-// Ref: DOT11_QOS in 8185 code. [def. in QoS_mp.h]
-//
-typedef struct _STA_QOS{
- //DECLARE_RT_OBJECT(STA_QOS);
- u8 WMMIEBuf[MAX_WMMELE_LENGTH];
- u8 *WMMIE;
-
- // Part 1. Self QoS Mode.
- QOS_MODE QosCapability; //QoS Capability, 2006-06-14 Isaiah
- QOS_MODE CurrentQosMode;
-
- // For WMM Power Save Mode :
- // ACs are trigger/delivery enabled or legacy power save enabled. 2006-06-13 Isaiah
- AC_UAPSD b4ac_Uapsd; //VoUapsd(bit0), ViUapsd(bit1), BkUapsd(bit2), BeUapsd(bit3),
- AC_UAPSD Curr4acUapsd;
- u8 bInServicePeriod;
- u8 MaxSPLength;
- int NumBcnBeforeTrigger;
-
- // Part 2. EDCA Parameter (perAC)
- u8 *pWMMInfoEle;
- u8 WMMParamEle[WMM_PARAM_ELEMENT_SIZE];
- u8 WMMPELength;
-
- // <Bruce_Note>
- //2 ToDo: remove the Qos Info Field and replace it by the above WMM Info element.
- // By Bruce, 2008-01-30.
- // Part 2. EDCA Parameter (perAC)
- QOS_INFO_FIELD QosInfoField_STA; // Maintained by STA
- QOS_INFO_FIELD QosInfoField_AP; // Retrieved from AP
-
- AC_PARAM CurAcParameters[4];
-
- // Part 3. ACM
- ACM acm[4];
- ACM_METHOD AcmMethod;
-
- // Part 4. Per TID (Part 5: TCLASS will be described by TStream)
- QOS_TSTREAM TStream[16];
- WMM_TSPEC TSpec;
-
- u32 QBssWirelessMode;
-
- // No Ack Setting
- u8 bNoAck;
-
- // Enable/Disable Rx immediate BA capability.
- u8 bEnableRxImmBA;
-
-}STA_QOS, *PSTA_QOS;
-
-//
-// BSS QOS data.
-// Ref: BssDscr in 8185 code. [def. in BssDscr.h]
-//
-typedef struct _BSS_QOS{
- QOS_MODE bdQoSMode;
-
- u8 bdWMMIEBuf[MAX_WMMELE_LENGTH];
- u8 *bdWMMIE;
-
- QOS_ELE_SUBTYPE EleSubType;
-
- u8 *pWMMInfoEle;
- u8 *pWMMParamEle;
-
- QOS_INFO_FIELD QosInfoField;
- AC_PARAM AcParameter[4];
-}BSS_QOS, *PBSS_QOS;
-
-
-//
-// Ref: sQoSCtlLng and QoSCtl definition in 8185 QoS code.
-//#define QoSCtl (( (Adapter->bRegQoS) && (Adapter->dot11QoS.QoSMode &(QOS_EDCA|QOS_HCCA)) ) ?sQoSCtlLng:0)
-//
-#define sQoSCtlLng 2
-#define QOS_CTRL_LEN(_QosMode) ((_QosMode > QOS_DISABLE)? sQoSCtlLng : 0)
-
-
-//Added by joseph
-//UP Mapping to AC, using in MgntQuery_SequenceNumber() and maybe for DSCP
-//#define UP2AC(up) ((up<3)?((up==0)?1:0):(up>>1))
-#define IsACValid(ac) ((ac<=7 )?true:false )
+};
+
+/*
+ * TS Info field in WMM TSPEC Element.
+ * Ref:
+ * 1. WMM spec 2.2.11: WME TSPEC Element, p.18.
+ * 2. 8185 QoS code: QOS_TSINFO [def. in QoS_mp.h]
+ * Note: sizeof 3 Bytes
+ */
+struct qos_tsinfo {
+ u16 uc_traffic_type:1; //WMM is reserved
+ u16 uc_tsid:4;
+ u16 uc_direction:2;
+ u16 uc_access_policy:2; //WMM: bit8=0, bit7=1
+ u16 uc_aggregation:1; //WMM is reserved
+ u16 uc_psb:1; //WMMSA is APSD
+ u16 uc_up:3;
+ u16 uc_ts_info_ack_policy:2; //WMM is reserved
+ u8 uc_schedule:1; //WMM is reserved
+ u8:7;
+};
+
+/*
+ * WMM TSPEC Body.
+ * Ref: WMM spec 2.2.11: WME TSPEC Element, p.16.
+ * Note: sizeof 55 bytes
+ */
+struct tspec_body {
+ struct qos_tsinfo ts_info; //u8 TSInfo[3];
+ u16 nominal_msd_usize;
+ u16 max_msd_usize;
+ u32 min_service_itv;
+ u32 max_service_itv;
+ u32 inactivity_itv;
+ u32 suspen_itv;
+ u32 service_start_time;
+ u32 min_data_rate;
+ u32 mean_data_rate;
+ u32 peak_data_rate;
+ u32 max_burst_size;
+ u32 delay_bound;
+ u32 min_phy_rate;
+ u16 surplus_bandwidth_allowance;
+ u16 medium_time;
+};
+
+/*
+ * 802.11 Management frame Status Code field
+ */
+struct octet_string {
+ u8 *octet;
+ u16 length;
+};
+
+#define is_ac_valid(ac) (((ac) <= 7) ? true : false)
#endif // #ifndef __INC_QOS_TYPE_H
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h
index 3a0ff08c687a..924d4b373099 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h
@@ -2,55 +2,101 @@
#ifndef _TSTYPE_H_
#define _TSTYPE_H_
#include "rtl819x_Qos.h"
-#define TS_SETUP_TIMEOUT 60 /* In millisecond */
-#define TS_INACT_TIMEOUT 60
+
#define TS_ADDBA_DELAY 60
#define TOTAL_TS_NUM 16
#define TCLAS_NUM 4
/* This define the Tx/Rx directions */
-typedef enum _TR_SELECT {
+enum tr_select {
TX_DIR = 0,
RX_DIR = 1,
-} TR_SELECT, *PTR_SELECT;
-
-typedef struct _TS_COMMON_INFO {
- struct list_head List;
- struct timer_list SetupTimer;
- struct timer_list InactTimer;
- u8 Addr[6];
- TSPEC_BODY TSpec;
- QOS_TCLAS TClass[TCLAS_NUM];
- u8 TClasProc;
- u8 TClasNum;
-} TS_COMMON_INFO, *PTS_COMMON_INFO;
-
-typedef struct _TX_TS_RECORD {
- TS_COMMON_INFO TsCommonInfo;
- u16 TxCurSeq;
- BA_RECORD TxPendingBARecord; /* For BA Originator */
- BA_RECORD TxAdmittedBARecord; /* For BA Originator */
-/* QOS_DL_RECORD DLRecord; */
- u8 bAddBaReqInProgress;
- u8 bAddBaReqDelayed;
- u8 bUsingBa;
- struct timer_list TsAddBaTimer;
- u8 num;
-} TX_TS_RECORD, *PTX_TS_RECORD;
-
-typedef struct _RX_TS_RECORD {
- TS_COMMON_INFO TsCommonInfo;
- u16 RxIndicateSeq;
- u16 RxTimeoutIndicateSeq;
- struct list_head RxPendingPktList;
- struct timer_list RxPktPendingTimer;
- BA_RECORD RxAdmittedBARecord; /* For BA Recipient */
- u16 RxLastSeqNum;
- u8 RxLastFragNum;
+};
+
+union qos_tclas {
+ struct type_general {
+ u8 priority;
+ u8 classifier_type;
+ u8 mask;
+ } type_general;
+
+ struct type0_eth {
+ u8 priority;
+ u8 classifier_type;
+ u8 mask;
+ u8 src_addr[6];
+ u8 dst_addr[6];
+ u16 type;
+ } type0_eth;
+
+ struct type1_ipv4 {
+ u8 priority;
+ u8 classifier_type;
+ u8 mask;
+ u8 version;
+ u8 src_ip[4];
+ u8 dst_ip[4];
+ u16 src_port;
+ u16 dst_port;
+ u8 dscp;
+ u8 protocol;
+ u8 reserved;
+ } type1_ipv4;
+
+ struct type1_ipv6 {
+ u8 priority;
+ u8 classifier_type;
+ u8 mask;
+ u8 version;
+ u8 src_ip[16];
+ u8 dst_ip[16];
+ u16 src_port;
+ u16 dst_port;
+ u8 flow_label[3];
+ } type1_ipv6;
+
+ struct type2_8021q {
+ u8 priority;
+ u8 classifier_type;
+ u8 mask;
+ u16 tag_type;
+ } type2_8021q;
+};
+
+struct ts_common_info {
+ struct list_head list;
+ struct timer_list setup_timer;
+ struct timer_list inact_timer;
+ u8 addr[6];
+ struct tspec_body t_spec;
+ union qos_tclas t_class[TCLAS_NUM];
+ u8 t_clas_proc;
+ u8 t_clas_num;
+};
+
+struct tx_ts_record {
+ struct ts_common_info ts_common_info;
+ u16 tx_cur_seq;
+ BA_RECORD tx_pending_ba_record;
+ BA_RECORD tx_admitted_ba_record;
+ u8 add_ba_req_in_progress;
+ u8 add_ba_req_delayed;
+ u8 using_ba;
+ struct timer_list ts_add_ba_timer;
u8 num;
-/* QOS_DL_RECORD DLRecord; */
-} RX_TS_RECORD, *PRX_TS_RECORD;
+};
+struct rx_ts_record {
+ struct ts_common_info ts_common_info;
+ u16 rx_indicate_seq;
+ u16 rx_timeout_indicate_seq;
+ struct list_head rx_pending_pkt_list;
+ struct timer_list rx_pkt_pending_timer;
+ BA_RECORD rx_admitted_ba_record;
+ u16 rx_last_seq_num;
+ u8 rx_last_frag_num;
+ u8 num;
+};
#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index e60a26250682..d46d8f468671 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -19,13 +19,13 @@ static void TsInactTimeout(struct timer_list *unused)
/********************************************************************************************************************
*function: I still not understand this function, so wait for further implementation
- * input: unsigned long data //acturally we send TX_TS_RECORD or RX_TS_RECORD to these timer
+ * input: unsigned long data //acturally we send struct tx_ts_record or struct rx_ts_record to these timer
* return: NULL
* notice:
********************************************************************************************************************/
static void RxPktPendingTimeout(struct timer_list *t)
{
- PRX_TS_RECORD pRxTs = from_timer(pRxTs, t, RxPktPendingTimer);
+ struct rx_ts_record *pRxTs = from_timer(pRxTs, t, rx_pkt_pending_timer);
struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
PRX_REORDER_ENTRY pReorderEntry = NULL;
@@ -37,19 +37,19 @@ static void RxPktPendingTimeout(struct timer_list *t)
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"==================>%s()\n",__func__);
- if(pRxTs->RxTimeoutIndicateSeq != 0xffff) {
+ if(pRxTs->rx_timeout_indicate_seq != 0xffff) {
// Indicate the pending packets sequentially according to SeqNum until meet the gap.
- while(!list_empty(&pRxTs->RxPendingPktList)) {
- pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTs->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
+ while(!list_empty(&pRxTs->rx_pending_pkt_list)) {
+ pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTs->rx_pending_pkt_list.prev,RX_REORDER_ENTRY,List);
if(index == 0)
- pRxTs->RxIndicateSeq = pReorderEntry->SeqNum;
+ pRxTs->rx_indicate_seq = pReorderEntry->SeqNum;
- if( SN_LESS(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq) ||
- SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq) ) {
+ if( SN_LESS(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq) ||
+ SN_EQUAL(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq) ) {
list_del_init(&pReorderEntry->List);
- if(SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq))
- pRxTs->RxIndicateSeq = (pRxTs->RxIndicateSeq + 1) % 4096;
+ if(SN_EQUAL(pReorderEntry->SeqNum, pRxTs->rx_indicate_seq))
+ pRxTs->rx_indicate_seq = (pRxTs->rx_indicate_seq + 1) % 4096;
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"RxPktPendingTimeout(): IndicateSeq: %d\n", pReorderEntry->SeqNum);
ieee->stats_IndicateArray[index] = pReorderEntry->prxb;
@@ -64,8 +64,8 @@ static void RxPktPendingTimeout(struct timer_list *t)
}
if(index>0) {
- // Set RxTimeoutIndicateSeq to 0xffff to indicate no pending packets in buffer now.
- pRxTs->RxTimeoutIndicateSeq = 0xffff;
+ // Set rx_timeout_indicate_seq to 0xffff to indicate no pending packets in buffer now.
+ pRxTs->rx_timeout_indicate_seq = 0xffff;
// Indicate packets
if(index > REORDER_WIN_SIZE) {
@@ -76,9 +76,9 @@ static void RxPktPendingTimeout(struct timer_list *t)
ieee80211_indicate_packets(ieee, ieee->stats_IndicateArray, index);
}
- if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff)) {
- pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq;
- mod_timer(&pRxTs->RxPktPendingTimer,
+ if(bPktInBuf && (pRxTs->rx_timeout_indicate_seq == 0xffff)) {
+ pRxTs->rx_timeout_indicate_seq = pRxTs->rx_indicate_seq;
+ mod_timer(&pRxTs->rx_pkt_pending_timer,
jiffies + msecs_to_jiffies(ieee->pHTInfo->RxReorderPendingTime));
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
@@ -86,13 +86,13 @@ static void RxPktPendingTimeout(struct timer_list *t)
/********************************************************************************************************************
*function: Add BA timer function
- * input: unsigned long data //acturally we send TX_TS_RECORD or RX_TS_RECORD to these timer
+ * input: unsigned long data //acturally we send struct tx_ts_record or struct rx_ts_record to these timer
* return: NULL
* notice:
********************************************************************************************************************/
static void TsAddBaProcess(struct timer_list *t)
{
- PTX_TS_RECORD pTxTs = from_timer(pTxTs, t, TsAddBaTimer);
+ struct tx_ts_record *pTxTs = from_timer(pTxTs, t, ts_add_ba_timer);
u8 num = pTxTs->num;
struct ieee80211_device *ieee = container_of(pTxTs, struct ieee80211_device, TxTsRecord[num]);
@@ -101,38 +101,38 @@ static void TsAddBaProcess(struct timer_list *t)
}
-static void ResetTsCommonInfo(PTS_COMMON_INFO pTsCommonInfo)
+static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
{
- eth_zero_addr(pTsCommonInfo->Addr);
- memset(&pTsCommonInfo->TSpec, 0, sizeof(TSPEC_BODY));
- memset(&pTsCommonInfo->TClass, 0, sizeof(QOS_TCLAS)*TCLAS_NUM);
- pTsCommonInfo->TClasProc = 0;
- pTsCommonInfo->TClasNum = 0;
+ eth_zero_addr(pTsCommonInfo->addr);
+ memset(&pTsCommonInfo->t_spec, 0, sizeof(struct tspec_body));
+ memset(&pTsCommonInfo->t_class, 0, sizeof(union qos_tclas)*TCLAS_NUM);
+ pTsCommonInfo->t_clas_proc = 0;
+ pTsCommonInfo->t_clas_num = 0;
}
-static void ResetTxTsEntry(PTX_TS_RECORD pTS)
+static void ResetTxTsEntry(struct tx_ts_record *pTS)
{
- ResetTsCommonInfo(&pTS->TsCommonInfo);
- pTS->TxCurSeq = 0;
- pTS->bAddBaReqInProgress = false;
- pTS->bAddBaReqDelayed = false;
- pTS->bUsingBa = false;
- ResetBaEntry(&pTS->TxAdmittedBARecord); //For BA Originator
- ResetBaEntry(&pTS->TxPendingBARecord);
+ ResetTsCommonInfo(&pTS->ts_common_info);
+ pTS->tx_cur_seq = 0;
+ pTS->add_ba_req_in_progress = false;
+ pTS->add_ba_req_delayed = false;
+ pTS->using_ba = false;
+ ResetBaEntry(&pTS->tx_admitted_ba_record); //For BA Originator
+ ResetBaEntry(&pTS->tx_pending_ba_record);
}
-static void ResetRxTsEntry(PRX_TS_RECORD pTS)
+static void ResetRxTsEntry(struct rx_ts_record *pTS)
{
- ResetTsCommonInfo(&pTS->TsCommonInfo);
- pTS->RxIndicateSeq = 0xffff; // This indicate the RxIndicateSeq is not used now!!
- pTS->RxTimeoutIndicateSeq = 0xffff; // This indicate the RxTimeoutIndicateSeq is not used now!!
- ResetBaEntry(&pTS->RxAdmittedBARecord); // For BA Recipient
+ ResetTsCommonInfo(&pTS->ts_common_info);
+ pTS->rx_indicate_seq = 0xffff; // This indicate the rx_indicate_seq is not used now!!
+ pTS->rx_timeout_indicate_seq = 0xffff; // This indicate the rx_timeout_indicate_seq is not used now!!
+ ResetBaEntry(&pTS->rx_admitted_ba_record); // For BA Recipient
}
void TSInitialize(struct ieee80211_device *ieee)
{
- PTX_TS_RECORD pTxTS = ieee->TxTsRecord;
- PRX_TS_RECORD pRxTS = ieee->RxTsRecord;
+ struct tx_ts_record *pTxTS = ieee->TxTsRecord;
+ struct rx_ts_record *pRxTS = ieee->RxTsRecord;
PRX_REORDER_ENTRY pRxReorderEntry = ieee->RxReorderEntry;
u8 count = 0;
IEEE80211_DEBUG(IEEE80211_DL_TS, "==========>%s()\n", __func__);
@@ -146,17 +146,17 @@ void TSInitialize(struct ieee80211_device *ieee)
pTxTS->num = count;
// The timers for the operation of Traffic Stream and Block Ack.
// DLS related timer will be add here in the future!!
- timer_setup(&pTxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
+ timer_setup(&pTxTS->ts_common_info.setup_timer, TsSetupTimeOut,
0);
- timer_setup(&pTxTS->TsCommonInfo.InactTimer, TsInactTimeout,
+ timer_setup(&pTxTS->ts_common_info.inact_timer, TsInactTimeout,
0);
- timer_setup(&pTxTS->TsAddBaTimer, TsAddBaProcess, 0);
- timer_setup(&pTxTS->TxPendingBARecord.Timer, BaSetupTimeOut,
+ timer_setup(&pTxTS->ts_add_ba_timer, TsAddBaProcess, 0);
+ timer_setup(&pTxTS->tx_pending_ba_record.Timer, BaSetupTimeOut,
0);
- timer_setup(&pTxTS->TxAdmittedBARecord.Timer,
+ timer_setup(&pTxTS->tx_admitted_ba_record.Timer,
TxBaInactTimeout, 0);
ResetTxTsEntry(pTxTS);
- list_add_tail(&pTxTS->TsCommonInfo.List, &ieee->Tx_TS_Unused_List);
+ list_add_tail(&pTxTS->ts_common_info.list, &ieee->Tx_TS_Unused_List);
pTxTS++;
}
@@ -166,16 +166,16 @@ void TSInitialize(struct ieee80211_device *ieee)
INIT_LIST_HEAD(&ieee->Rx_TS_Unused_List);
for(count = 0; count < TOTAL_TS_NUM; count++) {
pRxTS->num = count;
- INIT_LIST_HEAD(&pRxTS->RxPendingPktList);
- timer_setup(&pRxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
+ INIT_LIST_HEAD(&pRxTS->rx_pending_pkt_list);
+ timer_setup(&pRxTS->ts_common_info.setup_timer, TsSetupTimeOut,
0);
- timer_setup(&pRxTS->TsCommonInfo.InactTimer, TsInactTimeout,
+ timer_setup(&pRxTS->ts_common_info.inact_timer, TsInactTimeout,
0);
- timer_setup(&pRxTS->RxAdmittedBARecord.Timer,
+ timer_setup(&pRxTS->rx_admitted_ba_record.Timer,
RxBaInactTimeout, 0);
- timer_setup(&pRxTS->RxPktPendingTimer, RxPktPendingTimeout, 0);
+ timer_setup(&pRxTS->rx_pkt_pending_timer, RxPktPendingTimeout, 0);
ResetRxTsEntry(pRxTS);
- list_add_tail(&pRxTS->TsCommonInfo.List, &ieee->Rx_TS_Unused_List);
+ list_add_tail(&pRxTS->ts_common_info.list, &ieee->Rx_TS_Unused_List);
pRxTS++;
}
// Initialize unused Rx Reorder List.
@@ -191,26 +191,26 @@ void TSInitialize(struct ieee80211_device *ieee)
}
static void AdmitTS(struct ieee80211_device *ieee,
- PTS_COMMON_INFO pTsCommonInfo, u32 InactTime)
+ struct ts_common_info *pTsCommonInfo, u32 InactTime)
{
- del_timer_sync(&pTsCommonInfo->SetupTimer);
- del_timer_sync(&pTsCommonInfo->InactTimer);
+ del_timer_sync(&pTsCommonInfo->setup_timer);
+ del_timer_sync(&pTsCommonInfo->inact_timer);
if(InactTime!=0)
- mod_timer(&pTsCommonInfo->InactTimer,
+ mod_timer(&pTsCommonInfo->inact_timer,
jiffies + msecs_to_jiffies(InactTime));
}
-static PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee,
- u8 *Addr, u8 TID,
- TR_SELECT TxRxSelect)
+static struct ts_common_info *SearchAdmitTRStream(struct ieee80211_device *ieee,
+ u8 *Addr, u8 TID,
+ enum tr_select TxRxSelect)
{
//DIRECTION_VALUE dir;
u8 dir;
bool search_dir[4] = {0};
struct list_head *psearch_list; //FIXME
- PTS_COMMON_INFO pRet = NULL;
+ struct ts_common_info *pRet = NULL;
if(ieee->iw_mode == IW_MODE_MASTER) { //ap mode
if(TxRxSelect == TX_DIR) {
search_dir[DIR_DOWN] = true;
@@ -245,27 +245,27 @@ static PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee,
for(dir = 0; dir <= DIR_BI_DIR; dir++) {
if (!search_dir[dir])
continue;
- list_for_each_entry(pRet, psearch_list, List){
- // IEEE80211_DEBUG(IEEE80211_DL_TS, "ADD:%pM, TID:%d, dir:%d\n", pRet->Addr, pRet->TSpec.f.TSInfo.field.ucTSID, pRet->TSpec.f.TSInfo.field.ucDirection);
- if (memcmp(pRet->Addr, Addr, 6) == 0)
- if (pRet->TSpec.f.TSInfo.field.ucTSID == TID)
- if(pRet->TSpec.f.TSInfo.field.ucDirection == dir) {
+ list_for_each_entry(pRet, psearch_list, list){
+ // IEEE80211_DEBUG(IEEE80211_DL_TS, "ADD:%pM, TID:%d, dir:%d\n", pRet->Addr, pRet->TSpec.ts_info.ucTSID, pRet->TSpec.ts_info.ucDirection);
+ if (memcmp(pRet->addr, Addr, 6) == 0)
+ if (pRet->t_spec.ts_info.uc_tsid == TID)
+ if(pRet->t_spec.ts_info.uc_direction == dir) {
// printk("Bingo! got it\n");
break;
}
}
- if(&pRet->List != psearch_list)
+ if(&pRet->list != psearch_list)
break;
}
- if(&pRet->List != psearch_list)
+ if(&pRet->list != psearch_list)
return pRet ;
else
return NULL;
}
-static void MakeTSEntry(PTS_COMMON_INFO pTsCommonInfo, u8 *Addr,
- PTSPEC_BODY pTSPEC, PQOS_TCLAS pTCLAS, u8 TCLAS_Num,
+static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
+ struct tspec_body *pTSPEC, union qos_tclas *pTCLAS, u8 TCLAS_Num,
u8 TCLAS_Proc)
{
u8 count;
@@ -273,25 +273,25 @@ static void MakeTSEntry(PTS_COMMON_INFO pTsCommonInfo, u8 *Addr,
if(pTsCommonInfo == NULL)
return;
- memcpy(pTsCommonInfo->Addr, Addr, 6);
+ memcpy(pTsCommonInfo->addr, Addr, 6);
if(pTSPEC != NULL)
- memcpy((u8 *)(&(pTsCommonInfo->TSpec)), (u8 *)pTSPEC, sizeof(TSPEC_BODY));
+ memcpy((u8 *)(&(pTsCommonInfo->t_spec)), (u8 *)pTSPEC, sizeof(struct tspec_body));
for(count = 0; count < TCLAS_Num; count++)
- memcpy((u8 *)(&(pTsCommonInfo->TClass[count])), (u8 *)pTCLAS, sizeof(QOS_TCLAS));
+ memcpy((u8 *)(&(pTsCommonInfo->t_class[count])), (u8 *)pTCLAS, sizeof(union qos_tclas));
- pTsCommonInfo->TClasProc = TCLAS_Proc;
- pTsCommonInfo->TClasNum = TCLAS_Num;
+ pTsCommonInfo->t_clas_proc = TCLAS_Proc;
+ pTsCommonInfo->t_clas_num = TCLAS_Num;
}
bool GetTs(
struct ieee80211_device *ieee,
- PTS_COMMON_INFO *ppTS,
+ struct ts_common_info **ppTS,
u8 *Addr,
u8 TID,
- TR_SELECT TxRxSelect, //Rx:1, Tx:0
+ enum tr_select TxRxSelect, //Rx:1, Tx:0
bool bAddNewTs
)
{
@@ -309,7 +309,7 @@ bool GetTs(
UP = 0;
} else {
// In WMM case: we use 4 TID only
- if (!IsACValid(TID)) {
+ if (!is_ac_valid(TID)) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, " in %s(), TID(%d) is not valid\n", __func__, TID);
return false;
}
@@ -354,8 +354,8 @@ bool GetTs(
// This is for EDCA and WMM to add a new TS.
// For HCCA or WMMSA, TS cannot be addmit without negotiation.
//
- TSPEC_BODY TSpec;
- PQOS_TSINFO pTSInfo = &TSpec.f.TSInfo;
+ struct tspec_body TSpec;
+ struct qos_tsinfo *pTSInfo = &TSpec.ts_info;
struct list_head *pUnusedList =
(TxRxSelect == TX_DIR)?
(&ieee->Tx_TS_Unused_List):
@@ -366,36 +366,36 @@ bool GetTs(
(&ieee->Tx_TS_Admit_List):
(&ieee->Rx_TS_Admit_List);
- DIRECTION_VALUE Dir = (ieee->iw_mode == IW_MODE_MASTER)?
+ enum direction_value Dir = (ieee->iw_mode == IW_MODE_MASTER)?
((TxRxSelect==TX_DIR)?DIR_DOWN:DIR_UP):
((TxRxSelect==TX_DIR)?DIR_UP:DIR_DOWN);
IEEE80211_DEBUG(IEEE80211_DL_TS, "to add Ts\n");
if(!list_empty(pUnusedList)) {
- (*ppTS) = list_entry(pUnusedList->next, TS_COMMON_INFO, List);
- list_del_init(&(*ppTS)->List);
+ (*ppTS) = list_entry(pUnusedList->next, struct ts_common_info, list);
+ list_del_init(&(*ppTS)->list);
if(TxRxSelect==TX_DIR) {
- PTX_TS_RECORD tmp = container_of(*ppTS, TX_TS_RECORD, TsCommonInfo);
+ struct tx_ts_record *tmp = container_of(*ppTS, struct tx_ts_record, ts_common_info);
ResetTxTsEntry(tmp);
} else {
- PRX_TS_RECORD tmp = container_of(*ppTS, RX_TS_RECORD, TsCommonInfo);
+ struct rx_ts_record *tmp = container_of(*ppTS, struct rx_ts_record, ts_common_info);
ResetRxTsEntry(tmp);
}
IEEE80211_DEBUG(IEEE80211_DL_TS, "to init current TS, UP:%d, Dir:%d, addr:%pM\n", UP, Dir, Addr);
// Prepare TS Info releated field
- pTSInfo->field.ucTrafficType = 0; // Traffic type: WMM is reserved in this field
- pTSInfo->field.ucTSID = UP; // TSID
- pTSInfo->field.ucDirection = Dir; // Direction: if there is DirectLink, this need additional consideration.
- pTSInfo->field.ucAccessPolicy = 1; // Access policy
- pTSInfo->field.ucAggregation = 0; // Aggregation
- pTSInfo->field.ucPSB = 0; // Aggregation
- pTSInfo->field.ucUP = UP; // User priority
- pTSInfo->field.ucTSInfoAckPolicy = 0; // Ack policy
- pTSInfo->field.ucSchedule = 0; // Schedule
+ pTSInfo->uc_traffic_type = 0; // Traffic type: WMM is reserved in this field
+ pTSInfo->uc_tsid = UP; // TSID
+ pTSInfo->uc_direction = Dir; // Direction: if there is DirectLink, this need additional consideration.
+ pTSInfo->uc_access_policy = 1; // Access policy
+ pTSInfo->uc_aggregation = 0; // Aggregation
+ pTSInfo->uc_psb = 0; // Aggregation
+ pTSInfo->uc_up = UP; // User priority
+ pTSInfo->uc_ts_info_ack_policy = 0; // Ack policy
+ pTSInfo->uc_schedule = 0; // Schedule
MakeTSEntry(*ppTS, Addr, &TSpec, NULL, 0, 0);
AdmitTS(ieee, *ppTS, 0);
- list_add_tail(&((*ppTS)->List), pAddmitList);
+ list_add_tail(&((*ppTS)->list), pAddmitList);
// if there is DirectLink, we need to do additional operation here!!
return true;
@@ -407,26 +407,26 @@ bool GetTs(
}
}
-static void RemoveTsEntry(struct ieee80211_device *ieee, PTS_COMMON_INFO pTs,
- TR_SELECT TxRxSelect)
+static void RemoveTsEntry(struct ieee80211_device *ieee, struct ts_common_info *pTs,
+ enum tr_select TxRxSelect)
{
//u32 flags = 0;
unsigned long flags = 0;
- del_timer_sync(&pTs->SetupTimer);
- del_timer_sync(&pTs->InactTimer);
+ del_timer_sync(&pTs->setup_timer);
+ del_timer_sync(&pTs->inact_timer);
TsInitDelBA(ieee, pTs, TxRxSelect);
if(TxRxSelect == RX_DIR) {
//#ifdef TO_DO_LIST
PRX_REORDER_ENTRY pRxReorderEntry;
- PRX_TS_RECORD pRxTS = (PRX_TS_RECORD)pTs;
- if(timer_pending(&pRxTS->RxPktPendingTimer))
- del_timer_sync(&pRxTS->RxPktPendingTimer);
+ struct rx_ts_record *pRxTS = (struct rx_ts_record *)pTs;
+ if(timer_pending(&pRxTS->rx_pkt_pending_timer))
+ del_timer_sync(&pRxTS->rx_pkt_pending_timer);
- while(!list_empty(&pRxTS->RxPendingPktList)) {
+ while(!list_empty(&pRxTS->rx_pending_pkt_list)) {
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
- //pRxReorderEntry = list_entry(&pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
- pRxReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
+ //pRxReorderEntry = list_entry(&pRxTS->rx_pending_pkt_list.prev,RX_REORDER_ENTRY,List);
+ pRxReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTS->rx_pending_pkt_list.prev,RX_REORDER_ENTRY,List);
list_del_init(&pRxReorderEntry->List);
{
int i = 0;
@@ -447,90 +447,90 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, PTS_COMMON_INFO pTs,
//#endif
} else {
- PTX_TS_RECORD pTxTS = (PTX_TS_RECORD)pTs;
- del_timer_sync(&pTxTS->TsAddBaTimer);
+ struct tx_ts_record *pTxTS = (struct tx_ts_record *)pTs;
+ del_timer_sync(&pTxTS->ts_add_ba_timer);
}
}
void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr)
{
- PTS_COMMON_INFO pTS, pTmpTS;
+ struct ts_common_info *pTS, *pTmpTS;
printk("===========>RemovePeerTS,%pM\n", Addr);
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
- if (memcmp(pTS->Addr, Addr, 6) == 0) {
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, list) {
+ if (memcmp(pTS->addr, Addr, 6) == 0) {
RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
+ list_del_init(&pTS->list);
+ list_add_tail(&pTS->list, &ieee->Tx_TS_Unused_List);
}
}
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
- if (memcmp(pTS->Addr, Addr, 6) == 0) {
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, list) {
+ if (memcmp(pTS->addr, Addr, 6) == 0) {
printk("====>remove Tx_TS_admin_list\n");
RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
+ list_del_init(&pTS->list);
+ list_add_tail(&pTS->list, &ieee->Tx_TS_Unused_List);
}
}
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
- if (memcmp(pTS->Addr, Addr, 6) == 0) {
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, list) {
+ if (memcmp(pTS->addr, Addr, 6) == 0) {
RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
+ list_del_init(&pTS->list);
+ list_add_tail(&pTS->list, &ieee->Rx_TS_Unused_List);
}
}
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
- if (memcmp(pTS->Addr, Addr, 6) == 0) {
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, list) {
+ if (memcmp(pTS->addr, Addr, 6) == 0) {
RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
+ list_del_init(&pTS->list);
+ list_add_tail(&pTS->list, &ieee->Rx_TS_Unused_List);
}
}
}
void RemoveAllTS(struct ieee80211_device *ieee)
{
- PTS_COMMON_INFO pTS, pTmpTS;
+ struct ts_common_info *pTS, *pTmpTS;
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, list) {
RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
+ list_del_init(&pTS->list);
+ list_add_tail(&pTS->list, &ieee->Tx_TS_Unused_List);
}
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, list) {
RemoveTsEntry(ieee, pTS, TX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
+ list_del_init(&pTS->list);
+ list_add_tail(&pTS->list, &ieee->Tx_TS_Unused_List);
}
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, list) {
RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
+ list_del_init(&pTS->list);
+ list_add_tail(&pTS->list, &ieee->Rx_TS_Unused_List);
}
- list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
+ list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, list) {
RemoveTsEntry(ieee, pTS, RX_DIR);
- list_del_init(&pTS->List);
- list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
+ list_del_init(&pTS->list);
+ list_add_tail(&pTS->list, &ieee->Rx_TS_Unused_List);
}
}
-void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTS)
+void TsStartAddBaProcess(struct ieee80211_device *ieee, struct tx_ts_record *pTxTS)
{
- if(!pTxTS->bAddBaReqInProgress) {
- pTxTS->bAddBaReqInProgress = true;
- if(pTxTS->bAddBaReqDelayed) {
+ if(!pTxTS->add_ba_req_in_progress) {
+ pTxTS->add_ba_req_in_progress = true;
+ if(pTxTS->add_ba_req_delayed) {
IEEE80211_DEBUG(IEEE80211_DL_BA, "TsStartAddBaProcess(): Delayed Start ADDBA after 60 sec!!\n");
- mod_timer(&pTxTS->TsAddBaTimer,
+ mod_timer(&pTxTS->ts_add_ba_timer,
jiffies + msecs_to_jiffies(TS_ADDBA_DELAY));
} else {
IEEE80211_DEBUG(IEEE80211_DL_BA,"TsStartAddBaProcess(): Immediately Start ADDBA now!!\n");
- mod_timer(&pTxTS->TsAddBaTimer, jiffies+10); //set 10 ticks
+ mod_timer(&pTxTS->ts_add_ba_timer, jiffies+10); //set 10 ticks
}
} else {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s()==>BA timer is already added\n", __func__);
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index e54f6fad2e68..9b7f822e9762 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -23,7 +23,7 @@
* Note: 8226 support both 20M and 40 MHz
*--------------------------------------------------------------------------
*/
-void PHY_SetRF8256Bandwidth(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth)
+void PHY_SetRF8256Bandwidth(struct net_device *dev, enum ht_channel_width Bandwidth)
{
u8 eRFPath;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -41,16 +41,16 @@ void PHY_SetRF8256Bandwidth(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth)
|| priv->card_8192_version
== VERSION_819xU_B) { /* 8256 D-cut, E-cut, xiong: consider it later! */
rtl8192_phy_SetRFReg(dev,
- (RF90_RADIO_PATH_E)eRFPath,
+ (enum rf90_radio_path_e)eRFPath,
0x0b, bMask12Bits, 0x100); /* phy para:1ba */
rtl8192_phy_SetRFReg(dev,
- (RF90_RADIO_PATH_E)eRFPath,
+ (enum rf90_radio_path_e)eRFPath,
0x2c, bMask12Bits, 0x3d7);
rtl8192_phy_SetRFReg(dev,
- (RF90_RADIO_PATH_E)eRFPath,
+ (enum rf90_radio_path_e)eRFPath,
0x0e, bMask12Bits, 0x021);
rtl8192_phy_SetRFReg(dev,
- (RF90_RADIO_PATH_E)eRFPath,
+ (enum rf90_radio_path_e)eRFPath,
0x14, bMask12Bits, 0x5ab);
} else {
RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown hardware version\n");
@@ -58,15 +58,15 @@ void PHY_SetRF8256Bandwidth(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth)
break;
case HT_CHANNEL_WIDTH_20_40:
if (priv->card_8192_version == VERSION_819xU_A || priv->card_8192_version == VERSION_819xU_B) { /* 8256 D-cut, E-cut, xiong: consider it later! */
- rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0b, bMask12Bits, 0x300); /* phy para:3ba */
- rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x2c, bMask12Bits, 0x3df);
- rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0e, bMask12Bits, 0x0a1);
+ rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x0b, bMask12Bits, 0x300); /* phy para:3ba */
+ rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x2c, bMask12Bits, 0x3df);
+ rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x0e, bMask12Bits, 0x0a1);
if (priv->chan == 3 || priv->chan == 9)
/* I need to set priv->chan whenever current channel changes */
- rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x14, bMask12Bits, 0x59b);
+ rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x14, bMask12Bits, 0x59b);
else
- rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x14, bMask12Bits, 0x5ab);
+ rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x14, bMask12Bits, 0x5ab);
} else {
RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown hardware version\n");
}
@@ -115,14 +115,14 @@ void phy_RF8256_Config_ParaFile(struct net_device *dev)
u8 ConstRetryTimes = 5, RetryTimes = 5;
u8 ret = 0;
/* Initialize RF */
- for (eRFPath = (RF90_RADIO_PATH_E)RF90_PATH_A; eRFPath < priv->NumTotalRFPath; eRFPath++) {
+ for (eRFPath = (enum rf90_radio_path_e)RF90_PATH_A; eRFPath < priv->NumTotalRFPath; eRFPath++) {
if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
continue;
pPhyReg = &priv->PHYRegDef[eRFPath];
/* Joseph test for shorten RF config
- * pHalData->RfReg0Value[eRFPath] = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, rGlobalCtrl, bMaskDWord);
+ * pHalData->RfReg0Value[eRFPath] = rtl8192_phy_QueryRFReg(dev, (enum rf90_radio_path_e)eRFPath, rGlobalCtrl, bMaskDWord);
* ----Store original RFENV control type
*/
switch (eRFPath) {
@@ -146,12 +146,12 @@ void phy_RF8256_Config_ParaFile(struct net_device *dev)
rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 0 to 4 bits for Z-serial and set 1 to 6 bits for 8258 */
rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for Z-serial and 8258, and set 1 to 14 bits for ??? */
- rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E) eRFPath, 0x0, bMask12Bits, 0xbf);
+ rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e) eRFPath, 0x0, bMask12Bits, 0xbf);
/* Check RF block (for FPGA platform only)----
* TODO: this function should be removed on ASIC , Emily 2007.2.2
*/
- if (rtl8192_phy_checkBBAndRF(dev, HW90_BLOCK_RF, (RF90_RADIO_PATH_E)eRFPath)) {
+ if (rtl8192_phy_checkBBAndRF(dev, HW90_BLOCK_RF, (enum rf90_radio_path_e)eRFPath)) {
RT_TRACE(COMP_ERR, "PHY_RF8256_Config():Check Radio[%d] Fail!!\n", eRFPath);
goto phy_RF8256_Config_ParaFile_Fail;
}
@@ -162,32 +162,32 @@ void phy_RF8256_Config_ParaFile(struct net_device *dev)
switch (eRFPath) {
case RF90_PATH_A:
while (RF3_Final_Value != RegValueToBeCheck && RetryTimes != 0) {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(dev, (RF90_RADIO_PATH_E)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits);
+ ret = rtl8192_phy_ConfigRFWithHeaderFile(dev, (enum rf90_radio_path_e)eRFPath);
+ RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (enum rf90_radio_path_e)eRFPath, RegOffSetToBeCheck, bMask12Bits);
RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value);
RetryTimes--;
}
break;
case RF90_PATH_B:
while (RF3_Final_Value != RegValueToBeCheck && RetryTimes != 0) {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(dev, (RF90_RADIO_PATH_E)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits);
+ ret = rtl8192_phy_ConfigRFWithHeaderFile(dev, (enum rf90_radio_path_e)eRFPath);
+ RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (enum rf90_radio_path_e)eRFPath, RegOffSetToBeCheck, bMask12Bits);
RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value);
RetryTimes--;
}
break;
case RF90_PATH_C:
while (RF3_Final_Value != RegValueToBeCheck && RetryTimes != 0) {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(dev, (RF90_RADIO_PATH_E)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits);
+ ret = rtl8192_phy_ConfigRFWithHeaderFile(dev, (enum rf90_radio_path_e)eRFPath);
+ RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (enum rf90_radio_path_e)eRFPath, RegOffSetToBeCheck, bMask12Bits);
RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value);
RetryTimes--;
}
break;
case RF90_PATH_D:
while (RF3_Final_Value != RegValueToBeCheck && RetryTimes != 0) {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(dev, (RF90_RADIO_PATH_E)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits);
+ ret = rtl8192_phy_ConfigRFWithHeaderFile(dev, (enum rf90_radio_path_e)eRFPath);
+ RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (enum rf90_radio_path_e)eRFPath, RegOffSetToBeCheck, bMask12Bits);
RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value);
RetryTimes--;
}
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.h b/drivers/staging/rtl8192u/r8190_rtl8256.h
index 5c325ce9d631..29b926cad14b 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.h
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.h
@@ -14,7 +14,7 @@
#define RTL8225H
#define RTL819X_TOTAL_RF_PATH 2 /* for 8192U */
-void PHY_SetRF8256Bandwidth(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth);
+void PHY_SetRF8256Bandwidth(struct net_device *dev, enum ht_channel_width Bandwidth);
void PHY_RF8256_Config(struct net_device *dev);
void phy_RF8256_Config_ParaFile(struct net_device *dev);
void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8 powerlevel);
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index 51c150a39fc2..94a148994069 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -15,9 +15,10 @@
* project Authors.
*/
-#ifndef R819xU_H
-#define R819xU_H
+#ifndef R8192U_H
+#define R8192U_H
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
@@ -58,7 +59,6 @@ extern u32 rt_global_debug_component;
#define COMP_DBG BIT(1)
#define COMP_INIT BIT(2) /* Driver initialization/halt/reset. */
-
#define COMP_RECV BIT(3) /* Receive data path. */
#define COMP_SEND BIT(4) /* Send data path. */
#define COMP_IO BIT(5)
@@ -126,7 +126,6 @@ extern u32 rt_global_debug_component;
#define RT_DEBUG_DATA(level, data, datalen) do {} while (0)
#endif /* RTL8169_DEBUG */
-
/* Queue Select Value in TxDesc */
#define QSLT_BK 0x1
#define QSLT_BE 0x0
@@ -176,7 +175,7 @@ extern u32 rt_global_debug_component;
#define CCK_Table_length 12
/* For rtl819x */
-typedef struct _tx_desc_819x_usb {
+struct tx_desc_819x_usb {
/* DWORD 0 */
u16 PktSize;
u8 Offset;
@@ -212,36 +211,9 @@ typedef struct _tx_desc_819x_usb {
u32 Reserved5;
u32 Reserved6;
u32 Reserved7;
-} tx_desc_819x_usb, *ptx_desc_819x_usb;
-
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
-typedef struct _tx_desc_819x_usb_aggr_subframe {
- /* DWORD 0 */
- u16 PktSize;
- u8 Offset;
- u8 TxFWInfoSize;
-
- /* DWORD 1 */
- u8 RATid:3;
- u8 DISFB:1;
- u8 USERATE:1;
- u8 MOREFRAG:1;
- u8 NoEnc:1;
- u8 PIFS:1;
- u8 QueueSelect:5;
- u8 NoACM:1;
- u8 Reserved1:2;
- u8 SecCAMID:5;
- u8 SecDescAssign:1;
- u8 SecType:2;
- u8 PacketID:7;
- u8 OWN:1;
-} tx_desc_819x_usb_aggr_subframe, *ptx_desc_819x_usb_aggr_subframe;
-#endif
-
-
+};
-typedef struct _tx_desc_cmd_819x_usb {
+struct tx_desc_cmd_819x_usb {
/* DWORD 0 */
u16 Reserved0;
u8 Reserved1;
@@ -266,10 +238,9 @@ typedef struct _tx_desc_cmd_819x_usb {
u32 Reserved6;
u32 Reserved7;
u32 Reserved8;
-} tx_desc_cmd_819x_usb, *ptx_desc_cmd_819x_usb;
-
+};
-typedef struct _tx_fwinfo_819x_usb {
+struct tx_fwinfo_819x_usb {
/* DOWRD 0 */
u8 TxRate:7;
u8 CtsEnable:1;
@@ -300,7 +271,7 @@ typedef struct _tx_fwinfo_819x_usb {
u32 TxAGCSign:1;
u32 Tx_INFO_RSVD:6;
u32 PacketID:13;
-} tx_fwinfo_819x_usb, *ptx_fwinfo_819x_usb;
+};
struct rtl8192_rx_info {
struct urb *urb;
@@ -308,7 +279,7 @@ struct rtl8192_rx_info {
u8 out_pipe;
};
-typedef struct rx_desc_819x_usb {
+struct rx_desc_819x_usb {
/* DOWRD 0 */
u16 Length:14;
u16 CRC32:1;
@@ -321,27 +292,9 @@ typedef struct rx_desc_819x_usb {
/* DWORD 1 */
u32 Reserved2;
-} rx_desc_819x_usb, *prx_desc_819x_usb;
-
-#ifdef USB_RX_AGGREGATION_SUPPORT
-typedef struct _rx_desc_819x_usb_aggr_subframe {
- /* DOWRD 0 */
- u16 Length:14;
- u16 CRC32:1;
- u16 ICV:1;
- u8 Offset;
- u8 RxDrvInfoSize;
- /* DOWRD 1 */
- u8 Shift:2;
- u8 PHYStatus:1;
- u8 SWDec:1;
- u8 Reserved1:4;
- u8 Reserved2;
- u16 Reserved3;
-} rx_desc_819x_usb_aggr_subframe, *prx_desc_819x_usb_aggr_subframe;
-#endif
+};
-typedef struct rx_drvinfo_819x_usb {
+struct rx_drvinfo_819x_usb {
/* DWORD 0 */
u16 Reserved1:12;
u16 PartAggr:1;
@@ -362,7 +315,7 @@ typedef struct rx_drvinfo_819x_usb {
/* DWORD 1 */
u32 TSFL;
-} rx_drvinfo_819x_usb, *prx_drvinfo_819x_usb;
+};
/* Support till 64 bit bus width OS */
#define MAX_DEV_ADDR_SIZE 8
@@ -370,25 +323,23 @@ typedef struct rx_drvinfo_819x_usb {
#define MAX_FIRMWARE_INFORMATION_SIZE 32
#define MAX_802_11_HEADER_LENGTH (40 + MAX_FIRMWARE_INFORMATION_SIZE)
#define ENCRYPTION_MAX_OVERHEAD 128
-#define USB_HWDESC_HEADER_LEN sizeof(tx_desc_819x_usb)
-#define TX_PACKET_SHIFT_BYTES (USB_HWDESC_HEADER_LEN + sizeof(tx_fwinfo_819x_usb))
+#define USB_HWDESC_HEADER_LEN sizeof(struct tx_desc_819x_usb)
+#define TX_PACKET_SHIFT_BYTES (USB_HWDESC_HEADER_LEN + sizeof(struct tx_fwinfo_819x_usb))
#define MAX_FRAGMENT_COUNT 8
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
#define MAX_TRANSMIT_BUFFER_SIZE 32000
#else
#define MAX_TRANSMIT_BUFFER_SIZE 8000
#endif
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
-#define TX_PACKET_DRVAGGR_SUBFRAME_SHIFT_BYTES (sizeof(tx_desc_819x_usb_aggr_subframe) + sizeof(tx_fwinfo_819x_usb))
-#endif
/* Octets for crc32 (FCS, ICV) */
#define scrclng 4
-typedef enum rf_optype {
+enum rf_op_type {
RF_OP_By_SW_3wire = 0,
RF_OP_By_FW,
RF_OP_MAX
-} rf_op_type;
+};
+
/* 8190 Loopback Mode definition */
typedef enum _rtl819xUsb_loopback {
RTL819xU_NO_LOOPBACK = 0,
@@ -491,7 +442,6 @@ typedef struct _rt_firmware_info_819xUsb {
#define PHY_RSSI_SLID_WIN_MAX 100
-
typedef enum _WIRELESS_MODE {
WIRELESS_MODE_UNKNOWN = 0x00,
WIRELESS_MODE_A = 0x01,
@@ -502,7 +452,6 @@ typedef enum _WIRELESS_MODE {
WIRELESS_MODE_N_5G = 0x20
} WIRELESS_MODE;
-
#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
typedef struct buffer {
@@ -522,11 +471,6 @@ typedef struct rtl_reg_debug {
unsigned char buf[0xff];
} rtl_reg_debug;
-
-
-
-
-
typedef struct _rt_9x_tx_rate_history {
u32 cck[4];
u32 ofdm[8];
@@ -641,13 +585,11 @@ typedef struct Stats {
u32 CurrentShowTxate;
} Stats;
-
/* Bandwidth Offset */
#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
#define HAL_PRIME_CHNL_OFFSET_LOWER 1
#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-
typedef struct ChnlAccessSetting {
u16 SIFS_Timer;
u16 DIFS_Timer;
@@ -720,9 +662,17 @@ typedef enum _RT_RF_TYPE_819xU {
RF_PSEUDO_11N = 4,
} RT_RF_TYPE_819xU, *PRT_RF_TYPE_819xU;
+/* 2007/10/08 MH Define RATR state. */
+enum dynamic_ratr_state {
+ DM_RATR_STA_HIGH = 0,
+ DM_RATR_STA_MIDDLE = 1,
+ DM_RATR_STA_LOW = 2,
+ DM_RATR_STA_MAX
+};
+
typedef struct _rate_adaptive {
u8 rate_adaptive_disabled;
- u8 ratr_state;
+ enum dynamic_ratr_state ratr_state;
u16 reserve;
u32 high_rssi_thresh_for_ra;
@@ -756,7 +706,6 @@ typedef struct _ccktxbbgain_struct {
u8 ccktxbb_valuearray[8];
} ccktxbbgain_struct, *pccktxbbgain_struct;
-
typedef struct _init_gain {
u8 xaagccore1;
u8 xbagccore1;
@@ -792,7 +741,6 @@ typedef struct _phy_cck_rx_status_report_819xusb {
u8 cck_agc_rpt;
} phy_sts_cck_819xusb_t;
-
struct phy_ofdm_rx_status_rxsc_sgien_exintfflag {
u8 reserved:4;
u8 rxsc:2;
@@ -884,7 +832,6 @@ typedef struct r8192_priv {
short sens;
short max_sens;
-
short up;
/* If 1, allow bad crc frame, reception in monitor mode */
short crcmon;
@@ -923,7 +870,6 @@ typedef struct r8192_priv {
short tx_urb_index;
atomic_t tx_pending[0x10]; /* UART_PRIORITY + 1 */
-
struct tasklet_struct irq_rx_tasklet;
struct urb *rxurb_task;
@@ -936,7 +882,6 @@ typedef struct r8192_priv {
u32 LastRxDescTSFHigh;
u32 LastRxDescTSFLow;
-
/* Rx Related variables */
u16 EarlyRxThreshold;
u32 ReceiveConfig;
@@ -958,7 +903,7 @@ typedef struct r8192_priv {
u8 slot_time;
bool bDcut;
bool bCurrentRxAggrEnable;
- u8 Rf_Mode; /* For Firmware RF -R/W switch */
+ enum rf_op_type Rf_Mode; /* For Firmware RF -R/W switch */
prt_firmware pFirmware;
rtl819xUsb_loopback_e LoopbackMode;
u16 EEPROMTxPowerDiff;
@@ -995,7 +940,7 @@ typedef struct r8192_priv {
u8 SwChnlStage;
u8 SwChnlStep;
u8 SetBWModeInProgress;
- HT_CHANNEL_WIDTH CurrentChannelBW;
+ enum ht_channel_width CurrentChannelBW;
u8 ChannelPlan;
/* 8190 40MHz mode */
/* Control channel sub-carrier */
@@ -1171,5 +1116,4 @@ void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate);
void EnableHWSecurityConfig8192(struct net_device *dev);
void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType, u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
-
#endif
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 8b17400f6c13..e218b5c20642 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -211,7 +211,7 @@ static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv *priv)
/* this flag enabled to follow 11d country IE setting,
* otherwise, it shall follow global domain settings.
*/
- GET_DOT11D_INFO(ieee)->bEnabled = 0;
+ GET_DOT11D_INFO(ieee)->enabled = 0;
Dot11d_Reset(ieee);
ieee->bGlobalDomain = true;
break;
@@ -505,7 +505,7 @@ static void watch_dog_timer_callback(struct timer_list *t);
static struct proc_dir_entry *rtl8192_proc;
-static int proc_get_stats_ap(struct seq_file *m, void *v)
+static int __maybe_unused proc_get_stats_ap(struct seq_file *m, void *v)
{
struct net_device *dev = m->private;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -524,7 +524,7 @@ static int proc_get_stats_ap(struct seq_file *m, void *v)
return 0;
}
-static int proc_get_registers(struct seq_file *m, void *v)
+static int __maybe_unused proc_get_registers(struct seq_file *m, void *v)
{
struct net_device *dev = m->private;
int i, n, max = 0xff;
@@ -565,7 +565,7 @@ static int proc_get_registers(struct seq_file *m, void *v)
return 0;
}
-static int proc_get_stats_tx(struct seq_file *m, void *v)
+static int __maybe_unused proc_get_stats_tx(struct seq_file *m, void *v)
{
struct net_device *dev = m->private;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -624,7 +624,7 @@ static int proc_get_stats_tx(struct seq_file *m, void *v)
return 0;
}
-static int proc_get_stats_rx(struct seq_file *m, void *v)
+static int __maybe_unused proc_get_stats_rx(struct seq_file *m, void *v)
{
struct net_device *dev = m->private;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -739,7 +739,7 @@ static void rtl8192_rx_isr(struct urb *urb);
static u32 get_rxpacket_shiftbytes_819xusb(struct ieee80211_rx_stats *pstats)
{
- return (sizeof(rx_desc_819x_usb) + pstats->RxDrvInfoSize
+ return (sizeof(struct rx_desc_819x_usb) + pstats->RxDrvInfoSize
+ pstats->RxBufShift);
}
@@ -1242,7 +1242,7 @@ short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
int status;
struct urb *tx_urb;
unsigned int idx_pipe;
- tx_desc_cmd_819x_usb *pdesc = (tx_desc_cmd_819x_usb *)skb->data;
+ struct tx_desc_cmd_819x_usb *pdesc = (struct tx_desc_cmd_819x_usb *)skb->data;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
@@ -1462,9 +1462,9 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- tx_desc_819x_usb *tx_desc = (tx_desc_819x_usb *)skb->data;
- tx_fwinfo_819x_usb *tx_fwinfo =
- (tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);
+ struct tx_desc_819x_usb *tx_desc = (struct tx_desc_819x_usb *)skb->data;
+ struct tx_fwinfo_819x_usb *tx_fwinfo =
+ (struct tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);
struct usb_device *udev = priv->udev;
int pend;
int status;
@@ -1489,7 +1489,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
}
/* Fill Tx firmware info */
- memset(tx_fwinfo, 0, sizeof(tx_fwinfo_819x_usb));
+ memset(tx_fwinfo, 0, sizeof(struct tx_fwinfo_819x_usb));
/* DWORD 0 */
tx_fwinfo->TxHT = (tcb_desc->data_rate & 0x80) ? 1 : 0;
tx_fwinfo->TxRate = MRateToHwRate8190Pci(tcb_desc->data_rate);
@@ -1535,11 +1535,11 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
}
/* Fill Tx descriptor */
- memset(tx_desc, 0, sizeof(tx_desc_819x_usb));
+ memset(tx_desc, 0, sizeof(struct tx_desc_819x_usb));
/* DWORD 0 */
tx_desc->LINIP = 0;
tx_desc->CmdInit = 1;
- tx_desc->Offset = sizeof(tx_fwinfo_819x_usb) + 8;
+ tx_desc->Offset = sizeof(struct tx_fwinfo_819x_usb) + 8;
tx_desc->PktSize = (skb->len - TX_PACKET_SHIFT_BYTES) & 0xffff;
/*DWORD 1*/
@@ -1570,7 +1570,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
}
tx_desc->QueueSelect = MapHwQueueToFirmwareQueue(tcb_desc->queue_index);
- tx_desc->TxFWInfoSize = sizeof(tx_fwinfo_819x_usb);
+ tx_desc->TxFWInfoSize = sizeof(struct tx_fwinfo_819x_usb);
tx_desc->DISFB = tcb_desc->bTxDisableRateFallBack;
tx_desc->USERATE = tcb_desc->bTxUseDriverAssingedRate;
@@ -2897,7 +2897,7 @@ static bool rtl8192_adapter_start(struct net_device *dev)
*/
for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++)
PHY_SetRFReg(Adapter,
- (RF90_RADIO_PATH_E)eRFPath,
+ (enum rf90_radio_path_e)eRFPath,
0x4, 0xC00, 0x0);
} else if (pMgntInfo->RfOffReason > RF_CHANGE_BY_PS) {
/* H/W or S/W RF OFF before sleep. */
@@ -2923,7 +2923,7 @@ static bool rtl8192_adapter_start(struct net_device *dev)
*/
for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++)
PHY_SetRFReg(Adapter,
- (RF90_RADIO_PATH_E)eRFPath,
+ (enum rf90_radio_path_e)eRFPath,
0x4, 0xC00, 0x0);
}
}
@@ -3079,18 +3079,18 @@ static bool HalRxCheckStuck819xUsb(struct net_device *dev)
* or maybe it will continuous silent reset every 2 seconds.
*/
rx_chk_cnt++;
- if (priv->undecorated_smoothed_pwdb >= (RateAdaptiveTH_High + 5)) {
+ if (priv->undecorated_smoothed_pwdb >= (RATE_ADAPTIVE_TH_HIGH + 5)) {
rx_chk_cnt = 0; /* high rssi, check rx stuck right now. */
- } else if (priv->undecorated_smoothed_pwdb < (RateAdaptiveTH_High + 5) &&
- ((priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb >= RateAdaptiveTH_Low_40M) ||
- (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb >= RateAdaptiveTH_Low_20M))) {
+ } else if (priv->undecorated_smoothed_pwdb < (RATE_ADAPTIVE_TH_HIGH + 5) &&
+ ((priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_40M) ||
+ (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_20M))) {
if (rx_chk_cnt < 2)
return bStuck;
rx_chk_cnt = 0;
- } else if (((priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb < RateAdaptiveTH_Low_40M) ||
- (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb < RateAdaptiveTH_Low_20M)) &&
- priv->undecorated_smoothed_pwdb >= VeryLowRSSI) {
+ } else if (((priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb < RATE_ADAPTIVE_TH_LOW_40M) ||
+ (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb < RATE_ADAPTIVE_TH_LOW_20M)) &&
+ priv->undecorated_smoothed_pwdb >= VERY_LOW_RSSI) {
if (rx_chk_cnt < 4)
return bStuck;
@@ -3932,11 +3932,10 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
struct rtl_80211_hdr_3addr *hdr;
u16 sc;
- unsigned int frag, seq;
+ unsigned int seq;
hdr = (struct rtl_80211_hdr_3addr *)buffer;
sc = le16_to_cpu(hdr->seq_ctl);
- frag = WLAN_GET_SEQ_FRAG(sc);
seq = WLAN_GET_SEQ_SEQ(sc);
/* to record the sequence number */
pcurrent_stats->Seq_Num = seq;
@@ -4195,7 +4194,7 @@ static inline bool rx_hal_is_cck_rate(struct rx_drvinfo_819x_usb *pdrvinfo)
static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
struct ieee80211_rx_stats *pstats,
- rx_drvinfo_819x_usb *pdrvinfo,
+ struct rx_drvinfo_819x_usb *pdrvinfo,
struct ieee80211_rx_stats *precord_stats,
bool bpacket_match_bssid,
bool bpacket_toself,
@@ -4232,7 +4231,7 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
prxpkt = (u8 *)pdrvinfo;
/* Move pointer to the 16th bytes. Phy status start address. */
- prxpkt += sizeof(rx_drvinfo_819x_usb);
+ prxpkt += sizeof(struct rx_drvinfo_819x_usb);
/* Initial the cck and ofdm buffer pointer */
pcck_buf = (phy_sts_cck_819xusb_t *)prxpkt;
@@ -4432,7 +4431,7 @@ static void rtl8192_record_rxdesc_forlateruse(
static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
struct ieee80211_rx_stats *pstats,
- rx_drvinfo_819x_usb *pdrvinfo)
+ struct rx_drvinfo_819x_usb *pdrvinfo)
{
/* TODO: We must only check packet for current MAC address.
* Not finish
@@ -4631,10 +4630,10 @@ static void query_rxdesc_status(struct sk_buff *skb,
struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
struct net_device *dev = info->dev;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- rx_drvinfo_819x_usb *driver_info = NULL;
+ struct rx_drvinfo_819x_usb *driver_info = NULL;
/* Get Rx Descriptor Information */
- rx_desc_819x_usb *desc = (rx_desc_819x_usb *)skb->data;
+ struct rx_desc_819x_usb *desc = (struct rx_desc_819x_usb *)skb->data;
stats->Length = desc->Length;
stats->RxDrvInfoSize = desc->RxDrvInfoSize;
@@ -4658,9 +4657,9 @@ static void query_rxdesc_status(struct sk_buff *skb,
* Driver info are written to the RxBuffer following rx desc
*/
if (stats->RxDrvInfoSize != 0) {
- driver_info = (rx_drvinfo_819x_usb *)(
+ driver_info = (struct rx_drvinfo_819x_usb *)(
skb->data
- + sizeof(rx_desc_819x_usb)
+ + sizeof(struct rx_desc_819x_usb)
+ stats->RxBufShift
);
/* unit: 0.5M */
@@ -4705,7 +4704,7 @@ static void query_rxdesc_status(struct sk_buff *skb,
driver_info->FirstAGGR, driver_info->PartAggr);
}
- skb_pull(skb, sizeof(rx_desc_819x_usb));
+ skb_pull(skb, sizeof(struct rx_desc_819x_usb));
/* Get Total offset of MPDU Frame Body */
if ((stats->RxBufShift + stats->RxDrvInfoSize) > 0) {
stats->bShift = 1;
@@ -4734,7 +4733,7 @@ static void rtl8192_rx_nomal(struct sk_buff *skb)
bool unicast_packet = false;
/* 20 is for ps-poll */
- if ((skb->len >= (20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
+ if ((skb->len >= (20 + sizeof(struct rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
/* first packet should not contain Rx aggregation header */
query_rxdesc_status(skb, &stats, false);
/* TODO */
@@ -4772,14 +4771,10 @@ static void rtl819xusb_process_received_packet(
struct net_device *dev,
struct ieee80211_rx_stats *pstats)
{
- u8 *frame;
- u16 frame_len = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
/* Get shifted bytes of Starting address of 802.11 header. */
pstats->virtual_address += get_rxpacket_shiftbytes_819xusb(pstats);
- frame = pstats->virtual_address;
- frame_len = pstats->packetlength;
#ifdef TODO /* about HCT */
if (!Adapter->bInHctTest)
CountRxErrStatistics(Adapter, pRfd);
@@ -4814,7 +4809,7 @@ static void rtl819xusb_process_received_packet(
static void query_rx_cmdpkt_desc_status(struct sk_buff *skb,
struct ieee80211_rx_stats *stats)
{
- rx_desc_819x_usb *desc = (rx_desc_819x_usb *)skb->data;
+ struct rx_desc_819x_usb *desc = (struct rx_desc_819x_usb *)skb->data;
/* Get Rx Descriptor Information */
stats->virtual_address = (u8 *)skb->data;
@@ -4840,7 +4835,7 @@ static void rtl8192_rx_cmd(struct sk_buff *skb)
.freq = IEEE80211_24GHZ_BAND,
};
- if ((skb->len >= (20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
+ if ((skb->len >= (20 + sizeof(struct rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
query_rx_cmdpkt_desc_status(skb, &stats);
/* prfd->queue_id = 1; */
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index e25b058dec26..5fb5f583f703 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -41,7 +41,7 @@ struct dig dm_digtable;
/* Store current software write register content for MAC PHY. */
u8 dm_shadow[16][256] = { {0} };
/* For Dynamic Rx Path Selection by Signal Strength */
-struct dynamic_rx_path_sel DM_RxPathSelTable;
+static struct dynamic_rx_path_sel DM_RxPathSelTable;
/*------------------------Define global variable-----------------------------*/
@@ -243,13 +243,13 @@ void init_rate_adaptive(struct net_device *dev)
prate_adaptive pra = (prate_adaptive)&priv->rate_adaptive;
pra->ratr_state = DM_RATR_STA_MAX;
- pra->high2low_rssi_thresh_for_ra = RateAdaptiveTH_High;
- pra->low2high_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M+5;
- pra->low2high_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M+5;
+ pra->high2low_rssi_thresh_for_ra = RATE_ADAPTIVE_TH_HIGH;
+ pra->low2high_rssi_thresh_for_ra20M = RATE_ADAPTIVE_TH_LOW_20M + 5;
+ pra->low2high_rssi_thresh_for_ra40M = RATE_ADAPTIVE_TH_LOW_40M + 5;
- pra->high_rssi_thresh_for_ra = RateAdaptiveTH_High+5;
- pra->low_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M;
- pra->low_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M;
+ pra->high_rssi_thresh_for_ra = RATE_ADAPTIVE_TH_HIGH + 5;
+ pra->low_rssi_thresh_for_ra20M = RATE_ADAPTIVE_TH_LOW_20M;
+ pra->low_rssi_thresh_for_ra40M = RATE_ADAPTIVE_TH_LOW_40M;
if (priv->CustomerID == RT_CID_819x_Netcore)
pra->ping_rssi_enable = 1;
@@ -509,8 +509,8 @@ static u8 CCKSwingTable_Ch14[CCK_Table_length][8] = {
static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- bool bHighpowerstate, viviflag = false;
- DCMD_TXCMD_T tx_cmd;
+ bool viviflag = false;
+ struct tx_config_cmd tx_cmd;
u8 powerlevelOFDM24G;
int i = 0, j = 0, k = 0;
u8 RF_Type, tmp_report[5] = {0, 0, 0, 0, 0};
@@ -524,7 +524,6 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
write_nic_byte(dev, 0x1ba, 0);
priv->ieee80211->bdynamic_txpower_enable = false;
- bHighpowerstate = priv->bDynamicTxHighPower;
powerlevelOFDM24G = (u8)(priv->Pwr_Track>>24);
RF_Type = priv->rf_type;
@@ -533,10 +532,10 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
RT_TRACE(COMP_POWER_TRACKING, "powerlevelOFDM24G = %x\n", powerlevelOFDM24G);
for (j = 0; j <= 30; j++) { /* fill tx_cmd */
- tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING;
- tx_cmd.Length = 4;
- tx_cmd.Value = Value;
- rtStatus = SendTxCommandPacket(dev, &tx_cmd, 12);
+ tx_cmd.cmd_op = TXCMD_SET_TX_PWR_TRACKING;
+ tx_cmd.cmd_length = sizeof(tx_cmd.cmd_op);
+ tx_cmd.cmd_value = Value;
+ rtStatus = SendTxCommandPacket(dev, &tx_cmd, sizeof(struct tx_config_cmd));
if (rtStatus == RT_STATUS_FAILURE)
RT_TRACE(COMP_POWER_TRACKING, "Set configuration with tx cmd queue fail!\n");
usleep_range(1000, 2000);
@@ -1615,97 +1614,6 @@ static void dm_bb_initialgain_backup(struct net_device *dev)
#endif
/*-----------------------------------------------------------------------------
- * Function: dm_change_dynamic_initgain_thresh()
- *
- * Overview:
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/29/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------*/
-
-void dm_change_dynamic_initgain_thresh(struct net_device *dev, u32 dm_type,
- u32 dm_value)
-{
- switch (dm_type) {
- case DIG_TYPE_THRESH_HIGH:
- dm_digtable.rssi_high_thresh = dm_value;
- break;
-
- case DIG_TYPE_THRESH_LOW:
- dm_digtable.rssi_low_thresh = dm_value;
- break;
-
- case DIG_TYPE_THRESH_HIGHPWR_HIGH:
- dm_digtable.rssi_high_power_highthresh = dm_value;
- break;
-
- case DIG_TYPE_THRESH_HIGHPWR_LOW:
- dm_digtable.rssi_high_power_lowthresh = dm_value;
- break;
-
- case DIG_TYPE_ENABLE:
- dm_digtable.dig_state = DM_STA_DIG_MAX;
- dm_digtable.dig_enable_flag = true;
- break;
-
- case DIG_TYPE_DISABLE:
- dm_digtable.dig_state = DM_STA_DIG_MAX;
- dm_digtable.dig_enable_flag = false;
- break;
-
- case DIG_TYPE_DBG_MODE:
- if (dm_value >= DM_DBG_MAX)
- dm_value = DM_DBG_OFF;
- dm_digtable.dbg_mode = (u8)dm_value;
- break;
-
- case DIG_TYPE_RSSI:
- if (dm_value > 100)
- dm_value = 30;
- dm_digtable.rssi_val = (long)dm_value;
- break;
-
- case DIG_TYPE_ALGORITHM:
- if (dm_value >= DIG_ALGO_MAX)
- dm_value = DIG_ALGO_BY_FALSE_ALARM;
- if (dm_digtable.dig_algorithm != (u8)dm_value)
- dm_digtable.dig_algorithm_switch = 1;
- dm_digtable.dig_algorithm = (u8)dm_value;
- break;
-
- case DIG_TYPE_BACKOFF:
- if (dm_value > 30)
- dm_value = 30;
- dm_digtable.backoff_val = (u8)dm_value;
- break;
-
- case DIG_TYPE_RX_GAIN_MIN:
- if (dm_value == 0)
- dm_value = 0x1;
- dm_digtable.rx_gain_range_min = (u8)dm_value;
- break;
-
- case DIG_TYPE_RX_GAIN_MAX:
- if (dm_value > 0x50)
- dm_value = 0x50;
- dm_digtable.rx_gain_range_max = (u8)dm_value;
- break;
-
- default:
- break;
- }
-
-} /* DM_ChangeDynamicInitGainThresh */
-
-/*-----------------------------------------------------------------------------
* Function: dm_dig_init()
*
* Overview: Set DIG scheme init value.
@@ -1727,13 +1635,11 @@ static void dm_dig_init(struct net_device *dev)
/* 2007/10/05 MH Disable DIG scheme now. Not tested. */
dm_digtable.dig_enable_flag = true;
dm_digtable.dig_algorithm = DIG_ALGO_BY_RSSI;
- dm_digtable.dbg_mode = DM_DBG_OFF; /* off=by real rssi value, on=by DM_DigTable.Rssi_val for new dig */
dm_digtable.dig_algorithm_switch = 0;
/* 2007/10/04 MH Define init gain threshold. */
dm_digtable.dig_state = DM_STA_DIG_MAX;
dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
- dm_digtable.initialgain_lowerbound_state = false;
dm_digtable.rssi_low_thresh = DM_DIG_THRESH_LOW;
dm_digtable.rssi_high_thresh = DM_DIG_THRESH_HIGH;
@@ -1743,9 +1649,8 @@ static void dm_dig_init(struct net_device *dev)
dm_digtable.rssi_val = 50; /* for new dig debug rssi value */
dm_digtable.backoff_val = DM_DIG_BACKOFF;
- dm_digtable.rx_gain_range_max = DM_DIG_MAX;
if (priv->CustomerID == RT_CID_819x_Netcore)
- dm_digtable.rx_gain_range_min = DM_DIG_MIN_Netcore;
+ dm_digtable.rx_gain_range_min = DM_DIG_MIN_NETCORE;
else
dm_digtable.rx_gain_range_min = DM_DIG_MIN;
@@ -1812,8 +1717,7 @@ static void dm_ctrl_initgain_byrssi_by_driverrssi(
/*DbgPrint("DM_DigTable.PreConnectState = %d, DM_DigTable.CurConnectState = %d\n",
DM_DigTable.PreConnectState, DM_DigTable.CurConnectState);*/
- if (dm_digtable.dbg_mode == DM_DBG_OFF)
- dm_digtable.rssi_val = priv->undecorated_smoothed_pwdb;
+ dm_digtable.rssi_val = priv->undecorated_smoothed_pwdb;
/*DbgPrint("DM_DigTable.Rssi_val = %d\n", DM_DigTable.Rssi_val);*/
dm_initial_gain(dev);
dm_pd_th(dev);
@@ -2062,8 +1966,8 @@ static void dm_initial_gain(
if (dm_digtable.pre_connect_state == dm_digtable.cur_connect_state) {
if (dm_digtable.cur_connect_state == DIG_CONNECT) {
- if ((dm_digtable.rssi_val+10-dm_digtable.backoff_val) > dm_digtable.rx_gain_range_max)
- dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_max;
+ if ((dm_digtable.rssi_val + 10 - dm_digtable.backoff_val) > DM_DIG_MAX)
+ dm_digtable.cur_ig_value = DM_DIG_MAX;
else if ((dm_digtable.rssi_val+10-dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_min;
else
@@ -2334,12 +2238,12 @@ static void dm_check_edca_turbo(
{
/* TODO: Modified this part and try to set acm control in only 1 IO processing!! */
- PACI_AIFSN pAciAifsn = (PACI_AIFSN)&(qos_parameters->aifs[0]);
+ struct aci_aifsn *pAciAifsn = (struct aci_aifsn *)&(qos_parameters->aifs[0]);
u8 AcmCtrl;
read_nic_byte(dev, AcmHwCtrl, &AcmCtrl);
- if (pAciAifsn->f.ACM) { /* ACM bit is 1. */
+ if (pAciAifsn->acm) { /* acm bit is 1. */
AcmCtrl |= AcmHw_BeqEn;
} else { /* ACM bit is 0. */
AcmCtrl &= (~AcmHw_BeqEn);
@@ -2364,7 +2268,7 @@ static void dm_init_ctstoself(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
priv->ieee80211->bCTSToSelfEnable = true;
- priv->ieee80211->CTSToSelfTH = CTSToSelfTHVal;
+ priv->ieee80211->CTSToSelfTH = CTS_TO_SELF_TH_VAL;
}
static void dm_ctstoself(struct net_device *dev)
@@ -2472,8 +2376,6 @@ void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
else
priv->brfpath_rxenable[i] = false;
}
- if (!DM_RxPathSelTable.Enable)
- return;
dm_rxpath_sel_byrssi(dev);
} /* DM_RFPathCheckWorkItemCallBack */
@@ -2483,15 +2385,11 @@ static void dm_init_rxpath_selection(struct net_device *dev)
u8 i;
struct r8192_priv *priv = ieee80211_priv(dev);
- DM_RxPathSelTable.Enable = 1; /* default enabled */
- DM_RxPathSelTable.SS_TH_low = RxPathSelection_SS_TH_low;
- DM_RxPathSelTable.diff_TH = RxPathSelection_diff_TH;
if (priv->CustomerID == RT_CID_819x_Netcore)
- DM_RxPathSelTable.cck_method = CCK_Rx_Version_2;
+ DM_RxPathSelTable.cck_method = CCK_RX_VERSION_2;
else
- DM_RxPathSelTable.cck_method = CCK_Rx_Version_1;
- DM_RxPathSelTable.DbgMode = DM_DBG_OFF;
- DM_RxPathSelTable.disabledRF = 0;
+ DM_RxPathSelTable.cck_method = CCK_RX_VERSION_1;
+ DM_RxPathSelTable.disabled_rf = 0;
for (i = 0; i < 4; i++) {
DM_RxPathSelTable.rf_rssi[i] = 50;
DM_RxPathSelTable.cck_pwdb_sta[i] = -64;
@@ -2517,23 +2415,22 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
return;
if (!cck_Rx_Path_initialized) {
- read_nic_byte(dev, 0xa07, &DM_RxPathSelTable.cck_Rx_path);
- DM_RxPathSelTable.cck_Rx_path &= 0xf;
+ read_nic_byte(dev, 0xa07, &DM_RxPathSelTable.cck_rx_path);
+ DM_RxPathSelTable.cck_rx_path &= 0xf;
cck_Rx_Path_initialized = 1;
}
- read_nic_byte(dev, 0xc04, &DM_RxPathSelTable.disabledRF);
- DM_RxPathSelTable.disabledRF = ~DM_RxPathSelTable.disabledRF & 0xf;
+ read_nic_byte(dev, 0xc04, &DM_RxPathSelTable.disabled_rf);
+ DM_RxPathSelTable.disabled_rf = ~DM_RxPathSelTable.disabled_rf & 0xf;
if (priv->ieee80211->mode == WIRELESS_MODE_B) {
- DM_RxPathSelTable.cck_method = CCK_Rx_Version_2; /* pure B mode, fixed cck version2 */
+ DM_RxPathSelTable.cck_method = CCK_RX_VERSION_2; /* pure B mode, fixed cck version2 */
/*DbgPrint("Pure B mode, use cck rx version2\n");*/
}
/* decide max/sec/min rssi index */
for (i = 0; i < RF90_PATH_MAX; i++) {
- if (!DM_RxPathSelTable.DbgMode)
- DM_RxPathSelTable.rf_rssi[i] = priv->stats.rx_rssi_percentage[i];
+ DM_RxPathSelTable.rf_rssi[i] = priv->stats.rx_rssi_percentage[i];
if (priv->brfpath_rxenable[i]) {
rf_num++;
@@ -2591,7 +2488,7 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
rf_num = 0;
/* decide max/sec/min cck pwdb index */
- if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_2) {
+ if (DM_RxPathSelTable.cck_method == CCK_RX_VERSION_2) {
for (i = 0; i < RF90_PATH_MAX; i++) {
if (priv->brfpath_rxenable[i]) {
rf_num++;
@@ -2649,15 +2546,15 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
* reg0xA07[3:2]=cck default rx path, reg0xa07[1:0]=cck optional rx path.
*/
update_cck_rx_path = 0;
- if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_2) {
+ if (DM_RxPathSelTable.cck_method == CCK_RX_VERSION_2) {
cck_default_Rx = cck_rx_ver2_max_index;
cck_optional_Rx = cck_rx_ver2_sec_index;
if (tmp_cck_max_pwdb != -64)
update_cck_rx_path = 1;
}
- if (tmp_min_rssi < DM_RxPathSelTable.SS_TH_low && disabled_rf_cnt < 2) {
- if ((tmp_max_rssi - tmp_min_rssi) >= DM_RxPathSelTable.diff_TH) {
+ if (tmp_min_rssi < RX_PATH_SELECTION_SS_TH_LOW && disabled_rf_cnt < 2) {
+ if ((tmp_max_rssi - tmp_min_rssi) >= RX_PATH_SELECTION_DIFF_TH) {
/* record the enabled rssi threshold */
DM_RxPathSelTable.rf_enable_rssi_th[min_rssi_index] = tmp_max_rssi+5;
/* disable the BB Rx path, OFDM */
@@ -2665,7 +2562,7 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1<<min_rssi_index, 0x0); /* 0xd04[3:0] */
disabled_rf_cnt++;
}
- if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_1) {
+ if (DM_RxPathSelTable.cck_method == CCK_RX_VERSION_1) {
cck_default_Rx = max_rssi_index;
cck_optional_Rx = sec_rssi_index;
if (tmp_max_rssi)
@@ -2674,13 +2571,13 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
}
if (update_cck_rx_path) {
- DM_RxPathSelTable.cck_Rx_path = (cck_default_Rx<<2)|(cck_optional_Rx);
- rtl8192_setBBreg(dev, rCCK0_AFESetting, 0x0f000000, DM_RxPathSelTable.cck_Rx_path);
+ DM_RxPathSelTable.cck_rx_path = (cck_default_Rx<<2)|(cck_optional_Rx);
+ rtl8192_setBBreg(dev, rCCK0_AFESetting, 0x0f000000, DM_RxPathSelTable.cck_rx_path);
}
- if (DM_RxPathSelTable.disabledRF) {
+ if (DM_RxPathSelTable.disabled_rf) {
for (i = 0; i < 4; i++) {
- if ((DM_RxPathSelTable.disabledRF>>i) & 0x1) { /* disabled rf */
+ if ((DM_RxPathSelTable.disabled_rf >> i) & 0x1) { /* disabled rf */
if (tmp_max_rssi >= DM_RxPathSelTable.rf_enable_rssi_th[i]) {
/* enable the BB Rx path */
/*DbgPrint("RF-%d is enabled.\n", 0x1<<i);*/
@@ -2978,13 +2875,13 @@ void dm_check_fsync(struct net_device *dev)
if (priv->framesyncMonitor) {
if (priv->ieee80211->state == IEEE80211_LINKED) {
- if (priv->undecorated_smoothed_pwdb <= RegC38_TH) {
+ if (priv->undecorated_smoothed_pwdb <= REG_C38_TH) {
if (reg_c38_State != RegC38_NonFsync_Other_AP) {
write_nic_byte(dev, rOFDM0_RxDetector3, 0x90);
reg_c38_State = RegC38_NonFsync_Other_AP;
}
- } else if (priv->undecorated_smoothed_pwdb >= (RegC38_TH+5)) {
+ } else if (priv->undecorated_smoothed_pwdb >= (REG_C38_TH + 5)) {
if (reg_c38_State) {
write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync);
reg_c38_State = RegC38_Default;
@@ -3149,15 +3046,15 @@ static void dm_check_txrateandretrycount(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
/* for 11n tx rate */
- /*priv->stats.CurrentShowTxate = read_nic_byte(dev, Current_Tx_Rate_Reg);*/
- read_nic_byte(dev, Current_Tx_Rate_Reg, &ieee->softmac_stats.CurrentShowTxate);
+ /*priv->stats.CurrentShowTxate = read_nic_byte(dev, CURRENT_TX_RATE_REG);*/
+ read_nic_byte(dev, CURRENT_TX_RATE_REG, &ieee->softmac_stats.CurrentShowTxate);
/*printk("=============>tx_rate_reg:%x\n", ieee->softmac_stats.CurrentShowTxate);*/
/* for initial tx rate */
- /*priv->stats.last_packet_rate = read_nic_byte(dev, Initial_Tx_Rate_Reg);*/
- read_nic_byte(dev, Initial_Tx_Rate_Reg, &ieee->softmac_stats.last_packet_rate);
+ /*priv->stats.last_packet_rate = read_nic_byte(dev, INITIAL_TX_RATE_REG);*/
+ read_nic_byte(dev, INITIAL_TX_RATE_REG, &ieee->softmac_stats.last_packet_rate);
/* for tx tx retry count */
- /*priv->stats.txretrycount = read_nic_dword(dev, Tx_Retry_Count_Reg);*/
- read_nic_dword(dev, Tx_Retry_Count_Reg, &ieee->softmac_stats.txretrycount);
+ /*priv->stats.txretrycount = read_nic_dword(dev, TX_RETRY_COUNT_REG);*/
+ read_nic_dword(dev, TX_RETRY_COUNT_REG, &ieee->softmac_stats.txretrycount);
}
static void dm_send_rssi_tofw(struct net_device *dev)
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
index 8f3d618dcfdb..0de0332906bd 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ b/drivers/staging/rtl8192u/r8192U_dm.h
@@ -21,197 +21,139 @@
#ifndef __R8192UDM_H__
#define __R8192UDM_H__
-
/*--------------------------Define Parameters-------------------------------*/
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_DIG_HIGH_PWR_THRESH_HIGH 75
-#define DM_DIG_HIGH_PWR_THRESH_LOW 70
-
-#define BW_AUTO_SWITCH_HIGH_LOW 25
-#define BW_AUTO_SWITCH_LOW_HIGH 30
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
-#define DM_check_fsync_time_interval 500
+#define DM_DIG_HIGH_PWR_THRESH_HIGH 75
+#define DM_DIG_HIGH_PWR_THRESH_LOW 70
+#define BW_AUTO_SWITCH_HIGH_LOW 25
+#define BW_AUTO_SWITCH_LOW_HIGH 30
-#define DM_DIG_BACKOFF 12
-#define DM_DIG_MAX 0x36
-#define DM_DIG_MIN 0x1c
-#define DM_DIG_MIN_Netcore 0x12
+#define DM_DIG_BACKOFF 12
+#define DM_DIG_MAX 0x36
+#define DM_DIG_MIN 0x1c
+#define DM_DIG_MIN_NETCORE 0x12
-#define RxPathSelection_SS_TH_low 30
-#define RxPathSelection_diff_TH 18
+#define RX_PATH_SELECTION_SS_TH_LOW 30
+#define RX_PATH_SELECTION_DIFF_TH 18
-#define RateAdaptiveTH_High 50
-#define RateAdaptiveTH_Low_20M 30
-#define RateAdaptiveTH_Low_40M 10
-#define VeryLowRSSI 15
-#define CTSToSelfTHVal 30
+#define RATE_ADAPTIVE_TH_HIGH 50
+#define RATE_ADAPTIVE_TH_LOW_20M 30
+#define RATE_ADAPTIVE_TH_LOW_40M 10
+#define VERY_LOW_RSSI 15
+#define CTS_TO_SELF_TH_VAL 30
/* defined by vivi, for tx power track */
-#define E_FOR_TX_POWER_TRACK 300
+#define E_FOR_TX_POWER_TRACK 300
/* Dynamic Tx Power Control Threshold */
-#define TX_POWER_NEAR_FIELD_THRESH_HIGH 68
-#define TX_POWER_NEAR_FIELD_THRESH_LOW 62
+#define TX_POWER_NEAR_FIELD_THRESH_HIGH 68
+#define TX_POWER_NEAR_FIELD_THRESH_LOW 62
/* added by amy for atheros AP */
#define TX_POWER_ATHEROAP_THRESH_HIGH 78
-#define TX_POWER_ATHEROAP_THRESH_LOW 72
+#define TX_POWER_ATHEROAP_THRESH_LOW 72
/* defined by vivi, for showing on UI */
-#define Current_Tx_Rate_Reg 0x1b8
-#define Initial_Tx_Rate_Reg 0x1b9
-#define Tx_Retry_Count_Reg 0x1ac
-#define RegC38_TH 20
+#define CURRENT_TX_RATE_REG 0x1b8
+#define INITIAL_TX_RATE_REG 0x1b9
+#define TX_RETRY_COUNT_REG 0x1ac
+#define REG_C38_TH 20
/*--------------------------Define Parameters-------------------------------*/
-
/*------------------------------Define structure----------------------------*/
+
+enum dig_algorithm {
+ DIG_ALGO_BY_FALSE_ALARM = 0,
+ DIG_ALGO_BY_RSSI = 1,
+};
+
+enum dynamic_init_gain_state {
+ DM_STA_DIG_OFF = 0,
+ DM_STA_DIG_ON,
+ DM_STA_DIG_MAX
+};
+
+enum dig_connect {
+ DIG_DISCONNECT = 0,
+ DIG_CONNECT = 1,
+};
+
+enum dig_pkt_detection_threshold {
+ DIG_PD_AT_LOW_POWER = 0,
+ DIG_PD_AT_NORMAL_POWER = 1,
+ DIG_PD_AT_HIGH_POWER = 2,
+};
+
+enum dig_cck_cs_ratio_state {
+ DIG_CS_RATIO_LOWER = 0,
+ DIG_CS_RATIO_HIGHER = 1,
+};
+
/* 2007/10/04 MH Define upper and lower threshold of DIG enable or disable. */
struct dig {
- u8 dig_enable_flag;
- u8 dig_algorithm;
- u8 dbg_mode;
- u8 dig_algorithm_switch;
+ u8 dig_enable_flag;
+ enum dig_algorithm dig_algorithm;
+ u8 dig_algorithm_switch;
- long rssi_low_thresh;
- long rssi_high_thresh;
+ long rssi_low_thresh;
+ long rssi_high_thresh;
- long rssi_high_power_lowthresh;
- long rssi_high_power_highthresh;
+ long rssi_high_power_lowthresh;
+ long rssi_high_power_highthresh;
- u8 dig_state;
- u8 dig_highpwr_state;
- u8 cur_connect_state;
- u8 pre_connect_state;
+ enum dynamic_init_gain_state dig_state;
+ enum dynamic_init_gain_state dig_highpwr_state;
+ enum dig_connect cur_connect_state;
+ enum dig_connect pre_connect_state;
- u8 curpd_thstate;
- u8 prepd_thstate;
- u8 curcs_ratio_state;
- u8 precs_ratio_state;
+ enum dig_pkt_detection_threshold curpd_thstate;
+ enum dig_pkt_detection_threshold prepd_thstate;
+ enum dig_cck_cs_ratio_state curcs_ratio_state;
+ enum dig_cck_cs_ratio_state precs_ratio_state;
- u32 pre_ig_value;
- u32 cur_ig_value;
+ u32 pre_ig_value;
+ u32 cur_ig_value;
- u8 backoff_val;
- u8 rx_gain_range_max;
- u8 rx_gain_range_min;
- bool initialgain_lowerbound_state;
+ u8 backoff_val;
+ u8 rx_gain_range_min;
- long rssi_val;
+ long rssi_val;
};
-typedef enum tag_dynamic_init_gain_state_definition {
- DM_STA_DIG_OFF = 0,
- DM_STA_DIG_ON,
- DM_STA_DIG_MAX
-} dm_dig_sta_e;
-
-
-/* 2007/10/08 MH Define RATR state. */
-typedef enum tag_dynamic_ratr_state_definition {
- DM_RATR_STA_HIGH = 0,
- DM_RATR_STA_MIDDLE = 1,
- DM_RATR_STA_LOW = 2,
- DM_RATR_STA_MAX
-} dm_ratr_sta_e;
-
-/* 2007/10/11 MH Define DIG operation type. */
-typedef enum tag_dynamic_init_gain_operation_type_definition {
- DIG_TYPE_THRESH_HIGH = 0,
- DIG_TYPE_THRESH_LOW = 1,
- DIG_TYPE_THRESH_HIGHPWR_HIGH = 2,
- DIG_TYPE_THRESH_HIGHPWR_LOW = 3,
- DIG_TYPE_DBG_MODE = 4,
- DIG_TYPE_RSSI = 5,
- DIG_TYPE_ALGORITHM = 6,
- DIG_TYPE_BACKOFF = 7,
- DIG_TYPE_PWDB_FACTOR = 8,
- DIG_TYPE_RX_GAIN_MIN = 9,
- DIG_TYPE_RX_GAIN_MAX = 10,
- DIG_TYPE_ENABLE = 20,
- DIG_TYPE_DISABLE = 30,
- DIG_OP_TYPE_MAX
-} dm_dig_op_e;
-
-typedef enum tag_dig_algorithm_definition {
- DIG_ALGO_BY_FALSE_ALARM = 0,
- DIG_ALGO_BY_RSSI = 1,
- DIG_ALGO_MAX
-} dm_dig_alg_e;
+enum cck_rx_path_method {
+ CCK_RX_VERSION_1 = 0,
+ CCK_RX_VERSION_2 = 1,
+};
-typedef enum tag_dig_dbgmode_definition {
- DIG_DBG_OFF = 0,
- DIG_DBG_ON = 1,
- DIG_DBG_MAX
-} dm_dig_dbg_e;
+struct dynamic_rx_path_sel {
+ enum cck_rx_path_method cck_method;
+ u8 cck_rx_path;
-typedef enum tag_dig_connect_definition {
- DIG_DISCONNECT = 0,
- DIG_CONNECT = 1,
- DIG_CONNECT_MAX
-} dm_dig_connect_e;
+ u8 disabled_rf;
-typedef enum tag_dig_packetdetection_threshold_definition {
- DIG_PD_AT_LOW_POWER = 0,
- DIG_PD_AT_NORMAL_POWER = 1,
- DIG_PD_AT_HIGH_POWER = 2,
- DIG_PD_MAX
-} dm_dig_pd_th_e;
+ u8 rf_rssi[4];
+ u8 rf_enable_rssi_th[4];
+ long cck_pwdb_sta[4];
+};
-typedef enum tag_dig_cck_cs_ratio_state_definition {
- DIG_CS_RATIO_LOWER = 0,
- DIG_CS_RATIO_HIGHER = 1,
- DIG_CS_MAX
-} dm_dig_cs_ratio_e;
-struct dynamic_rx_path_sel {
- u8 Enable;
- u8 DbgMode;
- u8 cck_method;
- u8 cck_Rx_path;
-
- u8 SS_TH_low;
- u8 diff_TH;
- u8 disabledRF;
- u8 reserved;
-
- u8 rf_rssi[4];
- u8 rf_enable_rssi_th[4];
- long cck_pwdb_sta[4];
+struct tx_config_cmd {
+ u32 cmd_op; /* Command packet type. */
+ u32 cmd_length; /* Command packet length. */
+ u32 cmd_value;
};
-typedef enum tag_CCK_Rx_Path_Method_Definition {
- CCK_Rx_Version_1 = 0,
- CCK_Rx_Version_2 = 1,
- CCK_Rx_Version_MAX
-} DM_CCK_Rx_Path_Method;
-
-typedef enum tag_DM_DbgMode_Definition {
- DM_DBG_OFF = 0,
- DM_DBG_ON = 1,
- DM_DBG_MAX
-} DM_DBG_E;
-
-typedef struct tag_Tx_Config_Cmd_Format {
- u32 Op; /* Command packet type. */
- u32 Length; /* Command packet length. */
- u32 Value;
-} DCMD_TXCMD_T, *PDCMD_TXCMD_T;
/*------------------------------Define structure----------------------------*/
-
/*------------------------Export global variable----------------------------*/
extern struct dig dm_digtable;
extern u8 dm_shadow[16][256];
-extern struct dynamic_rx_path_sel DM_RxPathSelTable;
/*------------------------Export global variable----------------------------*/
-
/*------------------------Export Marco Definition---------------------------*/
/*------------------------Export Marco Definition---------------------------*/
-
/*--------------------------Exported Function prototype---------------------*/
void init_hal_dm(struct net_device *dev);
void deinit_hal_dm(struct net_device *dev);
@@ -220,8 +162,6 @@ void init_rate_adaptive(struct net_device *dev);
void dm_txpower_trackingcallback(struct work_struct *work);
void dm_restore_dynamic_mechanism_state(struct net_device *dev);
void dm_backup_dynamic_mechanism_state(struct net_device *dev);
-void dm_change_dynamic_initgain_thresh(struct net_device *dev,
- u32 dm_type, u32 dm_value);
void dm_force_tx_fw_info(struct net_device *dev,
u32 force_type, u32 force_value);
void dm_init_edca_turbo(struct net_device *dev);
@@ -233,8 +173,6 @@ void dm_shadow_init(struct net_device *dev);
void dm_initialize_txpower_tracking(struct net_device *dev);
/*--------------------------Exported Function prototype---------------------*/
-
#endif /*__R8192UDM_H__ */
-
/* End of r8192U_dm.h */
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 80672100ea26..900f7866d381 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -60,25 +60,7 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
return RT_STATUS_SUCCESS;
}
-/*-----------------------------------------------------------------------------
- * Function: cmpk_counttxstatistic()
- *
- * Overview:
- *
- * Input: PADAPTER pAdapter
- * CMPK_TXFB_T *psTx_FB
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------
- */
-static void cmpk_count_txstatistic(struct net_device *dev, cmpk_txfb_t *pstx_fb)
+static void cmpk_count_txstatistic(struct net_device *dev, struct cmd_pkt_tx_feedback *pstx_fb)
{
struct r8192_priv *priv = ieee80211_priv(dev);
#ifdef ENABLE_PS
@@ -163,7 +145,7 @@ static void cmpk_count_txstatistic(struct net_device *dev, cmpk_txfb_t *pstx_fb)
static void cmpk_handle_tx_feedback(struct net_device *dev, u8 *pmsg)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- cmpk_txfb_t rx_tx_fb;
+ struct cmd_pkt_tx_feedback rx_tx_fb;
priv->stats.txfeedback++;
@@ -173,7 +155,7 @@ static void cmpk_handle_tx_feedback(struct net_device *dev, u8 *pmsg)
* endian type before copy the message copy.
*/
/* Use pointer to transfer structure memory. */
- memcpy((u8 *)&rx_tx_fb, pmsg, sizeof(cmpk_txfb_t));
+ memcpy((u8 *)&rx_tx_fb, pmsg, sizeof(struct cmd_pkt_tx_feedback));
/* 2. Use tx feedback info to count TX statistics. */
cmpk_count_txstatistic(dev, &rx_tx_fb);
/* Comment previous method for TX statistic function. */
@@ -225,7 +207,7 @@ static void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
*/
static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
{
- cmpk_intr_sta_t rx_intr_status; /* */
+ struct cmd_pkt_interrupt_status rx_intr_status; /* */
struct r8192_priv *priv = ieee80211_priv(dev);
DMESG("---> cmpk_Handle_Interrupt_Status()\n");
@@ -236,7 +218,7 @@ static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
* endian type before copy the message copy.
*/
rx_intr_status.length = pmsg[1];
- if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2)) {
+ if (rx_intr_status.length != (sizeof(struct cmd_pkt_interrupt_status) - 2)) {
DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n");
return;
}
@@ -249,15 +231,15 @@ static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
DMESG("interrupt status = 0x%x\n",
rx_intr_status.interrupt_status);
- if (rx_intr_status.interrupt_status & ISR_TxBcnOk) {
+ if (rx_intr_status.interrupt_status & ISR_TX_BCN_OK) {
priv->ieee80211->bibsscoordinator = true;
priv->stats.txbeaconokint++;
- } else if (rx_intr_status.interrupt_status & ISR_TxBcnErr) {
+ } else if (rx_intr_status.interrupt_status & ISR_TX_BCN_ERR) {
priv->ieee80211->bibsscoordinator = false;
priv->stats.txbeaconerr++;
}
- if (rx_intr_status.interrupt_status & ISR_BcnTimerIntr)
+ if (rx_intr_status.interrupt_status & ISR_BCN_TIMER_INTR)
cmdpkt_beacontimerinterrupt_819xusb(dev);
}
@@ -288,7 +270,7 @@ static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
*/
static void cmpk_handle_query_config_rx(struct net_device *dev, u8 *pmsg)
{
- cmpk_query_cfg_t rx_query_cfg;
+ struct cmpk_query_cfg rx_query_cfg;
/* 1. Extract TX feedback info from RFD to temp structure buffer. */
/* It seems that FW use big endian(MIPS) and DRV use little endian in
@@ -528,7 +510,7 @@ u32 cmpk_message_handle_rx(struct net_device *dev,
case RX_INTERRUPT_STATUS:
cmpk_handle_interrupt_status(dev, pcmd_buff);
- cmd_length = sizeof(cmpk_intr_sta_t);
+ cmd_length = sizeof(struct cmd_pkt_interrupt_status);
break;
case BOTH_QUERY_CONFIG:
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.h b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
index 85fb49ca7bc8..be45cd609d67 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.h
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
@@ -2,25 +2,22 @@
#ifndef R819XUSB_CMDPKT_H
#define R819XUSB_CMDPKT_H
/* Different command packet have dedicated message length and definition. */
-#define CMPK_RX_TX_FB_SIZE sizeof(cmpk_txfb_t) /* 20 */
-#define CMPK_TX_SET_CONFIG_SIZE sizeof(cmpk_set_cfg_t) /* 16 */
-#define CMPK_BOTH_QUERY_CONFIG_SIZE sizeof(cmpk_set_cfg_t) /* 16 */
+#define CMPK_RX_TX_FB_SIZE sizeof(struct cmd_pkt_tx_feedback) /* 20 */
+#define CMPK_BOTH_QUERY_CONFIG_SIZE sizeof(struct cmd_pkt_set_configuration) /* 16 */
#define CMPK_RX_TX_STS_SIZE sizeof(cmpk_tx_status_t)
-#define CMPK_RX_DBG_MSG_SIZE sizeof(cmpk_rx_dbginfo_t)
#define CMPK_TX_RAHIS_SIZE sizeof(cmpk_tx_rahis_t)
/* 2008/05/08 amy For USB constant. */
-#define ISR_TxBcnOk BIT(27) /* Transmit Beacon OK */
-#define ISR_TxBcnErr BIT(26) /* Transmit Beacon Error */
-#define ISR_BcnTimerIntr BIT(13) /* Beacon Timer Interrupt */
-
+#define ISR_TX_BCN_OK BIT(27) /* Transmit Beacon OK */
+#define ISR_TX_BCN_ERR BIT(26) /* Transmit Beacon Error */
+#define ISR_BCN_TIMER_INTR BIT(13) /* Beacon Timer Interrupt */
/* Define element ID of command packet. */
/*------------------------------Define structure----------------------------*/
/* Define different command packet structure. */
/* 1. RX side: TX feedback packet. */
-typedef struct tag_cmd_pkt_tx_feedback {
+struct cmd_pkt_tx_feedback {
/* DWORD 0 */
u8 element_id; /* Command packet type. */
u8 length; /* Command packet length. */
@@ -53,21 +50,20 @@ typedef struct tag_cmd_pkt_tx_feedback {
/* DWORD 5 */
u16 reserve3;
u16 duration;
-} cmpk_txfb_t;
+};
/* 2. RX side: Interrupt status packet. It includes Beacon State,
- * Beacon Timer Interrupt and other useful informations in MAC ISR Reg.
+ * Beacon Timer Interrupt and other useful information in MAC ISR Reg.
*/
-typedef struct tag_cmd_pkt_interrupt_status {
+struct cmd_pkt_interrupt_status {
u8 element_id; /* Command packet type. */
u8 length; /* Command packet length. */
u16 reserve;
u32 interrupt_status; /* Interrupt Status. */
-} cmpk_intr_sta_t;
-
+};
/* 3. TX side: Set configuration packet. */
-typedef struct tag_cmd_pkt_set_configuration {
+struct cmd_pkt_set_configuration {
u8 element_id; /* Command packet type. */
u8 length; /* Command packet length. */
u16 reserve1;
@@ -82,12 +78,12 @@ typedef struct tag_cmd_pkt_set_configuration {
u8 cfg_offset;
u32 value;
u32 mask;
-} cmpk_set_cfg_t;
+};
-/* 4. Both side : TX/RX query configuraton packet. The query structure is the
+/* 4. Both side : TX/RX query configuration packet. The query structure is the
* same as set configuration.
*/
-#define cmpk_query_cfg_t cmpk_set_cfg_t
+#define cmpk_query_cfg cmd_pkt_set_configuration
/* 5. Multi packet feedback status. */
typedef struct tag_tx_stats_feedback {
@@ -191,5 +187,4 @@ u32 cmpk_message_handle_rx(struct net_device *dev,
rt_status SendTxCommandPacket(struct net_device *dev,
void *pData, u32 DataLen);
-
#endif
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index 12750671c860..7ee10d49894b 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -28,25 +28,17 @@ static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
0x0f72, /* 2484 */
};
-
-#define rtl819XPHY_REG_1T2RArray Rtl8192UsbPHY_REG_1T2RArray
-#define rtl819XMACPHY_Array_PG Rtl8192UsbMACPHY_Array_PG
#define rtl819XMACPHY_Array Rtl8192UsbMACPHY_Array
-#define rtl819XRadioA_Array Rtl8192UsbRadioA_Array
-#define rtl819XRadioB_Array Rtl8192UsbRadioB_Array
-#define rtl819XRadioC_Array Rtl8192UsbRadioC_Array
-#define rtl819XRadioD_Array Rtl8192UsbRadioD_Array
-#define rtl819XAGCTAB_Array Rtl8192UsbAGCTAB_Array
/******************************************************************************
* function: This function checks different RF type to execute legal judgement.
* If RF Path is illegal, we will return false.
* input: net_device *dev
- * u32 eRFPath
+ * u32 e_rfpath
* output: none
* return: 0(illegal, false), 1(legal, true)
*****************************************************************************/
-u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath)
+u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 e_rfpath)
{
u8 ret = 1;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -54,9 +46,9 @@ u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath)
if (priv->rf_type == RF_2T4R) {
ret = 0;
} else if (priv->rf_type == RF_1T2R) {
- if (eRFPath == RF90_PATH_A || eRFPath == RF90_PATH_B)
+ if (e_rfpath == RF90_PATH_A || e_rfpath == RF90_PATH_B)
ret = 1;
- else if (eRFPath == RF90_PATH_C || eRFPath == RF90_PATH_D)
+ else if (e_rfpath == RF90_PATH_C || e_rfpath == RF90_PATH_D)
ret = 0;
}
return ret;
@@ -108,17 +100,19 @@ u32 rtl8192_QueryBBReg(struct net_device *dev, u32 reg_addr, u32 bitmask)
return (reg & bitmask) >> bitshift;
}
-static u32 phy_FwRFSerialRead(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+static u32 phy_FwRFSerialRead(struct net_device *dev,
+ enum rf90_radio_path_e e_rfpath,
u32 offset);
static void phy_FwRFSerialWrite(struct net_device *dev,
- RF90_RADIO_PATH_E eRFPath, u32 offset,
+ enum rf90_radio_path_e e_rfpath,
+ u32 offset,
u32 data);
/******************************************************************************
* function: This function reads register from RF chip
* input: net_device *dev
- * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
+ * rf90_radio_path_e e_rfpath //radio path of A/B/C/D
* u32 offset //target address to be read
* output: none
* return: u32 readback value
@@ -130,12 +124,12 @@ static void phy_FwRFSerialWrite(struct net_device *dev,
* ---need more spec for this information.
******************************************************************************/
static u32 rtl8192_phy_RFSerialRead(struct net_device *dev,
- RF90_RADIO_PATH_E eRFPath, u32 offset)
+ enum rf90_radio_path_e e_rfpath, u32 offset)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 ret = 0;
u32 new_offset = 0;
- BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[eRFPath];
+ BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[e_rfpath];
rtl8192_setBBreg(dev, pPhyReg->rfLSSIReadBack, bLSSIReadBackData, 0);
/* Make sure RF register offset is correct */
@@ -144,20 +138,20 @@ static u32 rtl8192_phy_RFSerialRead(struct net_device *dev,
/* Switch page for 8256 RF IC */
if (priv->rf_chip == RF_8256) {
if (offset >= 31) {
- priv->RfReg0Value[eRFPath] |= 0x140;
+ priv->RfReg0Value[e_rfpath] |= 0x140;
/* Switch to Reg_Mode2 for Reg 31-45 */
rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
- priv->RfReg0Value[eRFPath]<<16);
+ priv->RfReg0Value[e_rfpath]<<16);
/* Modify offset */
new_offset = offset - 30;
} else if (offset >= 16) {
- priv->RfReg0Value[eRFPath] |= 0x100;
- priv->RfReg0Value[eRFPath] &= (~0x40);
+ priv->RfReg0Value[e_rfpath] |= 0x100;
+ priv->RfReg0Value[e_rfpath] &= (~0x40);
/* Switch to Reg_Mode1 for Reg16-30 */
rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
- priv->RfReg0Value[eRFPath]<<16);
+ priv->RfReg0Value[e_rfpath]<<16);
new_offset = offset - 15;
} else {
@@ -185,10 +179,10 @@ static u32 rtl8192_phy_RFSerialRead(struct net_device *dev,
/* Switch back to Reg_Mode0 */
if (priv->rf_chip == RF_8256) {
- priv->RfReg0Value[eRFPath] &= 0xebf;
+ priv->RfReg0Value[e_rfpath] &= 0xebf;
rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord,
- priv->RfReg0Value[eRFPath] << 16);
+ priv->RfReg0Value[e_rfpath] << 16);
}
return ret;
@@ -197,7 +191,7 @@ static u32 rtl8192_phy_RFSerialRead(struct net_device *dev,
/******************************************************************************
* function: This function writes data to RF register
* input: net_device *dev
- * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
+ * rf90_radio_path_e e_rfpath //radio path of A/B/C/D
* u32 offset //target address to be written
* u32 data //the new register data to be written
* output: none
@@ -215,28 +209,29 @@ static u32 rtl8192_phy_RFSerialRead(struct net_device *dev,
* ---------------------------------------------------------------------------
*****************************************************************************/
static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
- RF90_RADIO_PATH_E eRFPath, u32 offset,
+ enum rf90_radio_path_e e_rfpath,
+ u32 offset,
u32 data)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 DataAndAddr = 0, new_offset = 0;
- BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[eRFPath];
+ BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[e_rfpath];
offset &= 0x3f;
if (priv->rf_chip == RF_8256) {
if (offset >= 31) {
- priv->RfReg0Value[eRFPath] |= 0x140;
+ priv->RfReg0Value[e_rfpath] |= 0x140;
rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
- priv->RfReg0Value[eRFPath] << 16);
+ priv->RfReg0Value[e_rfpath] << 16);
new_offset = offset - 30;
} else if (offset >= 16) {
- priv->RfReg0Value[eRFPath] |= 0x100;
- priv->RfReg0Value[eRFPath] &= (~0x40);
+ priv->RfReg0Value[e_rfpath] |= 0x100;
+ priv->RfReg0Value[e_rfpath] &= (~0x40);
rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
- priv->RfReg0Value[eRFPath]<<16);
+ priv->RfReg0Value[e_rfpath]<<16);
new_offset = offset - 15;
} else {
new_offset = offset;
@@ -255,15 +250,15 @@ static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
if (offset == 0x0)
- priv->RfReg0Value[eRFPath] = data;
+ priv->RfReg0Value[e_rfpath] = data;
/* Switch back to Reg_Mode0 */
if (priv->rf_chip == RF_8256) {
if (offset != 0) {
- priv->RfReg0Value[eRFPath] &= 0xebf;
+ priv->RfReg0Value[e_rfpath] &= 0xebf;
rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
- priv->RfReg0Value[eRFPath] << 16);
+ priv->RfReg0Value[e_rfpath] << 16);
}
}
}
@@ -271,7 +266,7 @@ static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
/******************************************************************************
* function: This function set specific bits to RF register
* input: net_device dev
- * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
+ * rf90_radio_path_e e_rfpath //radio path of A/B/C/D
* u32 reg_addr //target addr to be modified
* u32 bitmask //taget bit pos to be modified
* u32 data //value to be written
@@ -279,26 +274,27 @@ static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
* return: none
* notice:
*****************************************************************************/
-void rtl8192_phy_SetRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+void rtl8192_phy_SetRFReg(struct net_device *dev,
+ enum rf90_radio_path_e e_rfpath,
u32 reg_addr, u32 bitmask, u32 data)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 reg, bitshift;
- if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+ if (!rtl8192_phy_CheckIsLegalRFPath(dev, e_rfpath))
return;
if (priv->Rf_Mode == RF_OP_By_FW) {
if (bitmask != bMask12Bits) {
/* RF data is 12 bits only */
- reg = phy_FwRFSerialRead(dev, eRFPath, reg_addr);
+ reg = phy_FwRFSerialRead(dev, e_rfpath, reg_addr);
bitshift = ffs(bitmask) - 1;
reg &= ~bitmask;
reg |= data << bitshift;
- phy_FwRFSerialWrite(dev, eRFPath, reg_addr, reg);
+ phy_FwRFSerialWrite(dev, e_rfpath, reg_addr, reg);
} else {
- phy_FwRFSerialWrite(dev, eRFPath, reg_addr, data);
+ phy_FwRFSerialWrite(dev, e_rfpath, reg_addr, data);
}
udelay(200);
@@ -306,14 +302,14 @@ void rtl8192_phy_SetRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
} else {
if (bitmask != bMask12Bits) {
/* RF data is 12 bits only */
- reg = rtl8192_phy_RFSerialRead(dev, eRFPath, reg_addr);
+ reg = rtl8192_phy_RFSerialRead(dev, e_rfpath, reg_addr);
bitshift = ffs(bitmask) - 1;
reg &= ~bitmask;
reg |= data << bitshift;
- rtl8192_phy_RFSerialWrite(dev, eRFPath, reg_addr, reg);
+ rtl8192_phy_RFSerialWrite(dev, e_rfpath, reg_addr, reg);
} else {
- rtl8192_phy_RFSerialWrite(dev, eRFPath, reg_addr, data);
+ rtl8192_phy_RFSerialWrite(dev, e_rfpath, reg_addr, data);
}
}
}
@@ -327,20 +323,21 @@ void rtl8192_phy_SetRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
* return: u32 data //the readback register value
* notice:
*****************************************************************************/
-u32 rtl8192_phy_QueryRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+u32 rtl8192_phy_QueryRFReg(struct net_device *dev,
+ enum rf90_radio_path_e e_rfpath,
u32 reg_addr, u32 bitmask)
{
u32 reg, bitshift;
struct r8192_priv *priv = ieee80211_priv(dev);
- if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+ if (!rtl8192_phy_CheckIsLegalRFPath(dev, e_rfpath))
return 0;
if (priv->Rf_Mode == RF_OP_By_FW) {
- reg = phy_FwRFSerialRead(dev, eRFPath, reg_addr);
+ reg = phy_FwRFSerialRead(dev, e_rfpath, reg_addr);
udelay(200);
} else {
- reg = rtl8192_phy_RFSerialRead(dev, eRFPath, reg_addr);
+ reg = rtl8192_phy_RFSerialRead(dev, e_rfpath, reg_addr);
}
bitshift = ffs(bitmask) - 1;
reg = (reg & bitmask) >> bitshift;
@@ -351,13 +348,14 @@ u32 rtl8192_phy_QueryRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
/******************************************************************************
* function: We support firmware to execute RF-R/W.
* input: net_device *dev
- * RF90_RADIO_PATH_E eRFPath
+ * rf90_radio_path_e e_rfpath
* u32 offset
* output: none
* return: u32
* notice:
****************************************************************************/
-static u32 phy_FwRFSerialRead(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+static u32 phy_FwRFSerialRead(struct net_device *dev,
+ enum rf90_radio_path_e e_rfpath,
u32 offset)
{
u32 reg = 0;
@@ -374,7 +372,7 @@ static u32 phy_FwRFSerialRead(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
/* 2. Write RF register address. bit 12-19 */
data |= ((offset&0xFF)<<12);
/* 3. Write RF path. bit 20-21 */
- data |= ((eRFPath&0x3)<<20);
+ data |= ((e_rfpath&0x3)<<20);
/* 4. Set RF read indicator. bit 22=0 */
/* 5. Trigger Fw to operate the command. bit 31 */
data |= 0x80000000;
@@ -414,7 +412,7 @@ static u32 phy_FwRFSerialRead(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
/******************************************************************************
* function: We support firmware to execute RF-R/W.
* input: net_device *dev
- * RF90_RADIO_PATH_E eRFPath
+ * rf90_radio_path_e e_rfpath
* u32 offset
* u32 data
* output: none
@@ -422,7 +420,8 @@ static u32 phy_FwRFSerialRead(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
* notice:
****************************************************************************/
static void phy_FwRFSerialWrite(struct net_device *dev,
- RF90_RADIO_PATH_E eRFPath, u32 offset, u32 data)
+ enum rf90_radio_path_e e_rfpath,
+ u32 offset, u32 data)
{
u8 time = 0;
u32 tmp;
@@ -437,7 +436,7 @@ static void phy_FwRFSerialWrite(struct net_device *dev,
/* 2. Write RF register address. bit 12-19 */
data |= ((offset&0xFF)<<12);
/* 3. Write RF path. bit 20-21 */
- data |= ((eRFPath&0x3)<<20);
+ data |= ((e_rfpath&0x3)<<20);
/* 4. Set RF write indicator. bit 22=1 */
data |= 0x400000;
/* 5. Trigger Fw to operate the command. bit 31=1 */
@@ -484,7 +483,7 @@ void rtl8192_phy_configmac(struct net_device *dev)
if (priv->btxpowerdata_readfromEEPORM) {
RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array_PG\n");
dwArrayLen = MACPHY_Array_PGLength;
- pdwArray = rtl819XMACPHY_Array_PG;
+ pdwArray = Rtl8192UsbMACPHY_Array_PG;
} else {
RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array\n");
@@ -528,22 +527,22 @@ void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType)
#endif
if (ConfigType == BaseBand_Config_PHY_REG) {
for (i = 0; i < PHY_REG_1T2RArrayLength; i += 2) {
- rtl8192_setBBreg(dev, rtl819XPHY_REG_1T2RArray[i],
+ rtl8192_setBBreg(dev, Rtl8192UsbPHY_REG_1T2RArray[i],
bMaskDWord,
- rtl819XPHY_REG_1T2RArray[i+1]);
+ Rtl8192UsbPHY_REG_1T2RArray[i+1]);
RT_TRACE(COMP_DBG,
"i: %x, Rtl819xUsbPHY_REGArray[0]=%x Rtl819xUsbPHY_REGArray[1]=%x\n",
- i, rtl819XPHY_REG_1T2RArray[i],
- rtl819XPHY_REG_1T2RArray[i+1]);
+ i, Rtl8192UsbPHY_REG_1T2RArray[i],
+ Rtl8192UsbPHY_REG_1T2RArray[i+1]);
}
} else if (ConfigType == BaseBand_Config_AGC_TAB) {
for (i = 0; i < AGCTAB_ArrayLength; i += 2) {
- rtl8192_setBBreg(dev, rtl819XAGCTAB_Array[i],
- bMaskDWord, rtl819XAGCTAB_Array[i+1]);
+ rtl8192_setBBreg(dev, Rtl8192UsbAGCTAB_Array[i],
+ bMaskDWord, Rtl8192UsbAGCTAB_Array[i+1]);
RT_TRACE(COMP_DBG,
- "i: %x, rtl819XAGCTAB_Array[0]=%x rtl819XAGCTAB_Array[1]=%x\n",
- i, rtl819XAGCTAB_Array[i],
- rtl819XAGCTAB_Array[i+1]);
+ "i: %x, Rtl8192UsbAGCTAB_Array[0]=%x Rtl8192UsbAGCTAB_Array[1]=%x\n",
+ i, Rtl8192UsbAGCTAB_Array[i],
+ Rtl8192UsbAGCTAB_Array[i+1]);
}
}
}
@@ -688,15 +687,15 @@ static void rtl8192_InitBBRFRegDef(struct net_device *dev)
* function: This function is to write register and then readback to make
* sure whether BB and RF is OK
* input: net_device *dev
- * HW90_BLOCK_E CheckBlock
- * RF90_RADIO_PATH_E eRFPath //only used when checkblock is
+ * hw90_block_e CheckBlock
+ * rf90_radio_path_e e_rfpath //only used when checkblock is
* //HW90_BLOCK_RF
* output: none
* return: return whether BB and RF is ok (0:OK, 1:Fail)
* notice: This function may be removed in the ASIC
******************************************************************************/
-u8 rtl8192_phy_checkBBAndRF(struct net_device *dev, HW90_BLOCK_E CheckBlock,
- RF90_RADIO_PATH_E eRFPath)
+u8 rtl8192_phy_checkBBAndRF(struct net_device *dev, enum hw90_block_e CheckBlock,
+ enum rf90_radio_path_e e_rfpath)
{
u8 ret = 0;
u32 i, CheckTimes = 4, reg = 0;
@@ -727,14 +726,14 @@ u8 rtl8192_phy_checkBBAndRF(struct net_device *dev, HW90_BLOCK_E CheckBlock,
case HW90_BLOCK_RF:
WriteData[i] &= 0xfff;
- rtl8192_phy_SetRFReg(dev, eRFPath,
+ rtl8192_phy_SetRFReg(dev, e_rfpath,
WriteAddr[HW90_BLOCK_RF],
bMask12Bits, WriteData[i]);
/* TODO: we should not delay for such a long time.
* Ask SD3
*/
usleep_range(1000, 1000);
- reg = rtl8192_phy_QueryRFReg(dev, eRFPath,
+ reg = rtl8192_phy_QueryRFReg(dev, e_rfpath,
WriteAddr[HW90_BLOCK_RF],
bMask12Bits);
usleep_range(1000, 1000);
@@ -787,11 +786,11 @@ static void rtl8192_BB_Config_ParaFile(struct net_device *dev)
/* ----Ckeck FPGAPHY0 and PHY1 board is OK---- */
/* TODO: this function should be removed on ASIC */
- for (eCheckItem = (HW90_BLOCK_E)HW90_BLOCK_PHY0;
+ for (eCheckItem = (enum hw90_block_e)HW90_BLOCK_PHY0;
eCheckItem <= HW90_BLOCK_PHY1; eCheckItem++) {
/* don't care RF path */
- status = rtl8192_phy_checkBBAndRF(dev, (HW90_BLOCK_E)eCheckItem,
- (RF90_RADIO_PATH_E)0);
+ status = rtl8192_phy_checkBBAndRF(dev, (enum hw90_block_e)eCheckItem,
+ (enum rf90_radio_path_e)0);
if (status != 0) {
RT_TRACE((COMP_ERR | COMP_PHY),
"PHY_RF8256_Config(): Check PHY%d Fail!!\n",
@@ -963,29 +962,29 @@ void rtl8192_phy_updateInitGain(struct net_device *dev)
* function: This function read RF parameters from general head file,
* and do RF 3-wire
* input: net_device *dev
- * RF90_RADIO_PATH_E eRFPath
+ * rf90_radio_path_e e_rfpath
* output: none
* return: return code show if RF configuration is successful(0:pass, 1:fail)
* notice: Delay may be required for RF configuration
*****************************************************************************/
u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
- RF90_RADIO_PATH_E eRFPath)
+ enum rf90_radio_path_e e_rfpath)
{
int i;
- switch (eRFPath) {
+ switch (e_rfpath) {
case RF90_PATH_A:
for (i = 0; i < RadioA_ArrayLength; i = i+2) {
- if (rtl819XRadioA_Array[i] == 0xfe) {
+ if (Rtl8192UsbRadioA_Array[i] == 0xfe) {
mdelay(100);
continue;
}
- rtl8192_phy_SetRFReg(dev, eRFPath,
- rtl819XRadioA_Array[i],
+ rtl8192_phy_SetRFReg(dev, e_rfpath,
+ Rtl8192UsbRadioA_Array[i],
bMask12Bits,
- rtl819XRadioA_Array[i+1]);
+ Rtl8192UsbRadioA_Array[i+1]);
mdelay(1);
}
@@ -993,14 +992,14 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
case RF90_PATH_B:
for (i = 0; i < RadioB_ArrayLength; i = i+2) {
- if (rtl819XRadioB_Array[i] == 0xfe) {
+ if (Rtl8192UsbRadioB_Array[i] == 0xfe) {
mdelay(100);
continue;
}
- rtl8192_phy_SetRFReg(dev, eRFPath,
- rtl819XRadioB_Array[i],
+ rtl8192_phy_SetRFReg(dev, e_rfpath,
+ Rtl8192UsbRadioB_Array[i],
bMask12Bits,
- rtl819XRadioB_Array[i+1]);
+ Rtl8192UsbRadioB_Array[i+1]);
mdelay(1);
}
@@ -1008,14 +1007,14 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
case RF90_PATH_C:
for (i = 0; i < RadioC_ArrayLength; i = i+2) {
- if (rtl819XRadioC_Array[i] == 0xfe) {
+ if (Rtl8192UsbRadioC_Array[i] == 0xfe) {
mdelay(100);
continue;
}
- rtl8192_phy_SetRFReg(dev, eRFPath,
- rtl819XRadioC_Array[i],
+ rtl8192_phy_SetRFReg(dev, e_rfpath,
+ Rtl8192UsbRadioC_Array[i],
bMask12Bits,
- rtl819XRadioC_Array[i+1]);
+ Rtl8192UsbRadioC_Array[i+1]);
mdelay(1);
}
@@ -1023,14 +1022,14 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
case RF90_PATH_D:
for (i = 0; i < RadioD_ArrayLength; i = i+2) {
- if (rtl819XRadioD_Array[i] == 0xfe) {
+ if (Rtl8192UsbRadioD_Array[i] == 0xfe) {
mdelay(100);
continue;
}
- rtl8192_phy_SetRFReg(dev, eRFPath,
- rtl819XRadioD_Array[i],
+ rtl8192_phy_SetRFReg(dev, e_rfpath,
+ Rtl8192UsbRadioD_Array[i],
bMask12Bits,
- rtl819XRadioD_Array[i+1]);
+ Rtl8192UsbRadioD_Array[i+1]);
mdelay(1);
}
@@ -1208,11 +1207,11 @@ bool rtl8192_SetRFPowerState(struct net_device *dev,
}
/******************************************************************************
- * function: This function sets command table variable (struct SwChnlCmd).
- * input: SwChnlCmd *CmdTable //table to be set
+ * function: This function sets command table variable (struct sw_chnl_cmd).
+ * input: sw_chnl_cmd *CmdTable //table to be set
* u32 CmdTableIdx //variable index in table to be set
* u32 CmdTableSz //table size
- * SwChnlCmdID CmdID //command ID to set
+ * switch_chan_cmd_id CmdID //command ID to set
* u32 Para1
* u32 Para2
* u32 msDelay
@@ -1220,11 +1219,11 @@ bool rtl8192_SetRFPowerState(struct net_device *dev,
* return: true if finished, false otherwise
* notice:
******************************************************************************/
-static u8 rtl8192_phy_SetSwChnlCmdArray(SwChnlCmd *CmdTable, u32 CmdTableIdx,
- u32 CmdTableSz, SwChnlCmdID CmdID,
+static u8 rtl8192_phy_SetSwChnlCmdArray(struct sw_chnl_cmd *CmdTable, u32 CmdTableIdx,
+ u32 CmdTableSz, enum switch_chan_cmd_id CmdID,
u32 Para1, u32 Para2, u32 msDelay)
{
- SwChnlCmd *pCmd;
+ struct sw_chnl_cmd *pCmd;
if (CmdTable == NULL) {
RT_TRACE(COMP_ERR, "%s(): CmdTable cannot be NULL\n", __func__);
@@ -1237,10 +1236,10 @@ static u8 rtl8192_phy_SetSwChnlCmdArray(SwChnlCmd *CmdTable, u32 CmdTableIdx,
}
pCmd = CmdTable + CmdTableIdx;
- pCmd->CmdID = CmdID;
- pCmd->Para1 = Para1;
- pCmd->Para2 = Para2;
- pCmd->msDelay = msDelay;
+ pCmd->cmd_id = CmdID;
+ pCmd->para_1 = Para1;
+ pCmd->para_2 = Para2;
+ pCmd->ms_delay = msDelay;
return true;
}
@@ -1261,14 +1260,14 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
u8 *stage, u8 *step, u32 *delay)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- SwChnlCmd PreCommonCmd[MAX_PRECMD_CNT];
- u32 PreCommonCmdCnt;
- SwChnlCmd PostCommonCmd[MAX_POSTCMD_CNT];
- u32 PostCommonCmdCnt;
- SwChnlCmd RfDependCmd[MAX_RFDEPENDCMD_CNT];
- u32 RfDependCmdCnt;
- SwChnlCmd *CurrentCmd = NULL;
- u8 eRFPath;
+ struct sw_chnl_cmd PreCommonCmd[MAX_PRECMD_CNT];
+ u32 PreCommonCmdCnt;
+ struct sw_chnl_cmd PostCommonCmd[MAX_POSTCMD_CNT];
+ u32 PostCommonCmdCnt;
+ struct sw_chnl_cmd RfDependCmd[MAX_RFDEPENDCMD_CNT];
+ u32 RfDependCmdCnt;
+ struct sw_chnl_cmd *CurrentCmd = NULL;
+ u8 e_rfpath;
RT_TRACE(COMP_CH, "%s() stage: %d, step: %d, channel: %d\n",
__func__, *stage, *step, channel);
@@ -1285,16 +1284,16 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
/* <1> Fill up pre common command. */
PreCommonCmdCnt = 0;
rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++,
- MAX_PRECMD_CNT, CmdID_SetTxPowerLevel,
+ MAX_PRECMD_CNT, CMD_ID_SET_TX_PWR_LEVEL,
0, 0, 0);
rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++,
- MAX_PRECMD_CNT, CmdID_End, 0, 0, 0);
+ MAX_PRECMD_CNT, CMD_ID_END, 0, 0, 0);
/* <2> Fill up post common command. */
PostCommonCmdCnt = 0;
rtl8192_phy_SetSwChnlCmdArray(PostCommonCmd, PostCommonCmdCnt++,
- MAX_POSTCMD_CNT, CmdID_End, 0, 0, 0);
+ MAX_POSTCMD_CNT, CMD_ID_END, 0, 0, 0);
/* <3> Fill up RF dependent command. */
RfDependCmdCnt = 0;
@@ -1308,13 +1307,13 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
}
rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++,
MAX_RFDEPENDCMD_CNT,
- CmdID_RF_WriteReg,
+ CMD_ID_RF_WRITE_REG,
rZebra1_Channel,
RF_CHANNEL_TABLE_ZEBRA[channel],
10);
rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++,
MAX_RFDEPENDCMD_CNT,
- CmdID_End, 0, 0, 0);
+ CMD_ID_END, 0, 0, 0);
break;
case RF_8256:
@@ -1327,11 +1326,11 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
}
rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++,
MAX_RFDEPENDCMD_CNT,
- CmdID_RF_WriteReg,
+ CMD_ID_RF_WRITE_REG,
rZebra1_Channel, channel, 10);
rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++,
MAX_RFDEPENDCMD_CNT,
- CmdID_End, 0, 0, 0);
+ CMD_ID_END, 0, 0, 0);
break;
case RF_8258:
@@ -1356,9 +1355,9 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
break;
}
- if (CurrentCmd->CmdID == CmdID_End) {
+ if (CurrentCmd->cmd_id == CMD_ID_END) {
if ((*stage) == 2) {
- (*delay) = CurrentCmd->msDelay;
+ (*delay) = CurrentCmd->ms_delay;
return true;
}
(*stage)++;
@@ -1366,31 +1365,31 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
continue;
}
- switch (CurrentCmd->CmdID) {
- case CmdID_SetTxPowerLevel:
+ switch (CurrentCmd->cmd_id) {
+ case CMD_ID_SET_TX_PWR_LEVEL:
if (priv->card_8192_version == (u8)VERSION_819xU_A)
/* consider it later! */
rtl8192_SetTxPowerLevel(dev, channel);
break;
- case CmdID_WritePortUlong:
- write_nic_dword(dev, CurrentCmd->Para1,
- CurrentCmd->Para2);
+ case CMD_ID_WRITE_PORT_ULONG:
+ write_nic_dword(dev, CurrentCmd->para_1,
+ CurrentCmd->para_2);
break;
- case CmdID_WritePortUshort:
- write_nic_word(dev, CurrentCmd->Para1,
- (u16)CurrentCmd->Para2);
+ case CMD_ID_WRITE_PORT_USHORT:
+ write_nic_word(dev, CurrentCmd->para_1,
+ (u16)CurrentCmd->para_2);
break;
- case CmdID_WritePortUchar:
- write_nic_byte(dev, CurrentCmd->Para1,
- (u8)CurrentCmd->Para2);
+ case CMD_ID_WRITE_PORT_UCHAR:
+ write_nic_byte(dev, CurrentCmd->para_1,
+ (u8)CurrentCmd->para_2);
break;
- case CmdID_RF_WriteReg:
- for (eRFPath = 0; eRFPath < RF90_PATH_MAX; eRFPath++) {
+ case CMD_ID_RF_WRITE_REG:
+ for (e_rfpath = 0; e_rfpath < RF90_PATH_MAX; e_rfpath++) {
rtl8192_phy_SetRFReg(dev,
- (RF90_RADIO_PATH_E)eRFPath,
- CurrentCmd->Para1,
+ (enum rf90_radio_path_e)e_rfpath,
+ CurrentCmd->para_1,
bZebra1_ChannelNum,
- CurrentCmd->Para2);
+ CurrentCmd->para_2);
}
break;
default:
@@ -1400,7 +1399,7 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
break;
} while (true);
- (*delay) = CurrentCmd->msDelay;
+ (*delay) = CurrentCmd->ms_delay;
(*step)++;
return false;
}
@@ -1663,8 +1662,9 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
* notice: I doubt whether SetBWModeInProgress flag is necessary as we can
* test whether current work in the queue or not.//do I?
*****************************************************************************/
-void rtl8192_SetBWMode(struct net_device *dev, HT_CHANNEL_WIDTH bandwidth,
- HT_EXTCHNL_OFFSET offset)
+void rtl8192_SetBWMode(struct net_device *dev,
+ enum ht_channel_width bandwidth,
+ enum ht_extension_chan_offset offset)
{
struct r8192_priv *priv = ieee80211_priv(dev);
diff --git a/drivers/staging/rtl8192u/r819xU_phy.h b/drivers/staging/rtl8192u/r819xU_phy.h
index 0a42a6092ea9..c7ec3182857f 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.h
+++ b/drivers/staging/rtl8192u/r819xU_phy.h
@@ -7,80 +7,67 @@
#define MAX_RFDEPENDCMD_CNT 16
#define MAX_POSTCMD_CNT 16
-typedef enum _SwChnlCmdID {
- CmdID_End,
- CmdID_SetTxPowerLevel,
- CmdID_BBRegWrite10,
- CmdID_WritePortUlong,
- CmdID_WritePortUshort,
- CmdID_WritePortUchar,
- CmdID_RF_WriteReg,
-} SwChnlCmdID;
+enum switch_chan_cmd_id {
+ CMD_ID_END,
+ CMD_ID_SET_TX_PWR_LEVEL,
+ CMD_ID_WRITE_PORT_ULONG,
+ CMD_ID_WRITE_PORT_USHORT,
+ CMD_ID_WRITE_PORT_UCHAR,
+ CMD_ID_RF_WRITE_REG,
+};
/* -----------------------Define structure---------------------- */
/* 1. Switch channel related */
-typedef struct _SwChnlCmd {
- SwChnlCmdID CmdID;
- u32 Para1;
- u32 Para2;
- u32 msDelay;
-} __packed SwChnlCmd;
+struct sw_chnl_cmd {
+ enum switch_chan_cmd_id cmd_id;
+ u32 para_1;
+ u32 para_2;
+ u32 ms_delay;
+} __packed;
-extern u32 rtl819XMACPHY_Array_PG[];
-extern u32 rtl819XPHY_REG_1T2RArray[];
-extern u32 rtl819XAGCTAB_Array[];
-extern u32 rtl819XRadioA_Array[];
-extern u32 rtl819XRadioB_Array[];
-extern u32 rtl819XRadioC_Array[];
-extern u32 rtl819XRadioD_Array[];
-
-typedef enum _HW90_BLOCK {
+enum hw90_block_e {
HW90_BLOCK_MAC = 0,
HW90_BLOCK_PHY0 = 1,
HW90_BLOCK_PHY1 = 2,
HW90_BLOCK_RF = 3,
HW90_BLOCK_MAXIMUM = 4, /* Never use this */
-} HW90_BLOCK_E, *PHW90_BLOCK_E;
+};
-typedef enum _RF90_RADIO_PATH {
+enum rf90_radio_path_e {
RF90_PATH_A = 0, /* Radio Path A */
RF90_PATH_B = 1, /* Radio Path B */
RF90_PATH_C = 2, /* Radio Path C */
RF90_PATH_D = 3, /* Radio Path D */
RF90_PATH_MAX /* Max RF number 92 support */
-} RF90_RADIO_PATH_E, *PRF90_RADIO_PATH_E;
-
-#define bMaskByte0 0xff
-#define bMaskByte1 0xff00
-#define bMaskByte2 0xff0000
-#define bMaskByte3 0xff000000
-#define bMaskHWord 0xffff0000
-#define bMaskLWord 0x0000ffff
-#define bMaskDWord 0xffffffff
+};
-u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath);
+u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 e_rfpath);
void rtl8192_setBBreg(struct net_device *dev, u32 reg_addr,
u32 bitmask, u32 data);
u32 rtl8192_QueryBBReg(struct net_device *dev, u32 reg_addr, u32 bitmask);
-void rtl8192_phy_SetRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+void rtl8192_phy_SetRFReg(struct net_device *dev,
+ enum rf90_radio_path_e e_rfpath,
u32 reg_addr, u32 bitmask, u32 data);
-u32 rtl8192_phy_QueryRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+u32 rtl8192_phy_QueryRFReg(struct net_device *dev,
+ enum rf90_radio_path_e e_rfpath,
u32 reg_addr, u32 bitmask);
void rtl8192_phy_configmac(struct net_device *dev);
void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType);
u8 rtl8192_phy_checkBBAndRF(struct net_device *dev,
- HW90_BLOCK_E CheckBlock, RF90_RADIO_PATH_E eRFPath);
+ enum hw90_block_e CheckBlock,
+ enum rf90_radio_path_e e_rfpath);
void rtl8192_BBConfig(struct net_device *dev);
void rtl8192_phy_getTxPower(struct net_device *dev);
void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel);
void rtl8192_phy_RFConfig(struct net_device *dev);
void rtl8192_phy_updateInitGain(struct net_device *dev);
u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
- RF90_RADIO_PATH_E eRFPath);
+ enum rf90_radio_path_e e_rfpath);
u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel);
-void rtl8192_SetBWMode(struct net_device *dev, HT_CHANNEL_WIDTH bandwidth,
- HT_EXTCHNL_OFFSET offset);
+void rtl8192_SetBWMode(struct net_device *dev,
+ enum ht_channel_width bandwidth,
+ enum ht_extension_chan_offset offset);
void rtl8192_SwChnl_WorkItem(struct net_device *dev);
void rtl8192_SetBWModeWorkItem(struct net_device *dev);
bool rtl8192_SetRFPowerState(struct net_device *dev,
diff --git a/drivers/staging/rtl8192u/r819xU_phyreg.h b/drivers/staging/rtl8192u/r819xU_phyreg.h
index c058a9537526..65ee6088324c 100644
--- a/drivers/staging/rtl8192u/r819xU_phyreg.h
+++ b/drivers/staging/rtl8192u/r819xU_phyreg.h
@@ -5,47 +5,9 @@
#define RF_DATA 0x1d4 /* FW will write RF data in the register.*/
-/* Register duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF
- * page 1
- */
-#define rPMAC_Reset 0x100
-#define rPMAC_TxStart 0x104
-#define rPMAC_TxLegacySIG 0x108
-#define rPMAC_TxHTSIG1 0x10c
-#define rPMAC_TxHTSIG2 0x110
-#define rPMAC_PHYDebug 0x114
-#define rPMAC_TxPacketNum 0x118
-#define rPMAC_TxIdle 0x11c
-#define rPMAC_TxMACHeader0 0x120
-#define rPMAC_TxMACHeader1 0x124
-#define rPMAC_TxMACHeader2 0x128
-#define rPMAC_TxMACHeader3 0x12c
-#define rPMAC_TxMACHeader4 0x130
-#define rPMAC_TxMACHeader5 0x134
-#define rPMAC_TxDataType 0x138
-#define rPMAC_TxRandomSeed 0x13c
-#define rPMAC_CCKPLCPPreamble 0x140
-#define rPMAC_CCKPLCPHeader 0x144
-#define rPMAC_CCKCRC16 0x148
-#define rPMAC_OFDMRxCRC32OK 0x170
-#define rPMAC_OFDMRxCRC32Er 0x174
-#define rPMAC_OFDMRxParityEr 0x178
-#define rPMAC_OFDMRxCRC8Er 0x17c
-#define rPMAC_CCKCRxRC16Er 0x180
-#define rPMAC_CCKCRxRC32Er 0x184
-#define rPMAC_CCKCRxRC32OK 0x188
-#define rPMAC_TxStatus 0x18c
-
/* page8 */
#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC */
-#define rFPGA0_TxInfo 0x804
-#define rFPGA0_PSDFunction 0x808
#define rFPGA0_TxGainStage 0x80c
-#define rFPGA0_RFTiming1 0x810
-#define rFPGA0_RFTiming2 0x814
-/* #define rFPGA0_XC_RFTiming 0x818
- * #define rFPGA0_XD_RFTiming 0x81c
- */
#define rFPGA0_XA_HSSIParameter1 0x820
#define rFPGA0_XA_HSSIParameter2 0x824
#define rFPGA0_XB_HSSIParameter1 0x828
@@ -58,8 +20,6 @@
#define rFPGA0_XB_LSSIParameter 0x844
#define rFPGA0_XC_LSSIParameter 0x848
#define rFPGA0_XD_LSSIParameter 0x84c
-#define rFPGA0_RFWakeUpParameter 0x850
-#define rFPGA0_RFSleepUpParameter 0x854
#define rFPGA0_XAB_SwitchControl 0x858
#define rFPGA0_XCD_SwitchControl 0x85c
#define rFPGA0_XA_RFInterfaceOE 0x860
@@ -71,46 +31,27 @@
#define rFPGA0_XAB_RFParameter 0x878
#define rFPGA0_XCD_RFParameter 0x87c
#define rFPGA0_AnalogParameter1 0x880
-#define rFPGA0_AnalogParameter2 0x884
-#define rFPGA0_AnalogParameter3 0x888
#define rFPGA0_AnalogParameter4 0x88c
#define rFPGA0_XA_LSSIReadBack 0x8a0
#define rFPGA0_XB_LSSIReadBack 0x8a4
#define rFPGA0_XC_LSSIReadBack 0x8a8
#define rFPGA0_XD_LSSIReadBack 0x8ac
-#define rFPGA0_PSDReport 0x8b4
#define rFPGA0_XAB_RFInterfaceRB 0x8e0
#define rFPGA0_XCD_RFInterfaceRB 0x8e4
/* page 9 */
#define rFPGA1_RFMOD 0x900 /* RF mode & OFDM TxSC */
-#define rFPGA1_TxBlock 0x904
-#define rFPGA1_DebugSelect 0x908
-#define rFPGA1_TxInfo 0x90c
/* page a */
#define rCCK0_System 0xa00
#define rCCK0_AFESetting 0xa04
#define rCCK0_CCA 0xa08
-#define rCCK0_RxAGC1 0xa0c /* AGC default value, saturation level */
-#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
-#define rCCK0_RxHP 0xa14
-#define rCCK0_DSPParameter1 0xa18 /* Timing recovery & Channel estimation threshold */
-#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
#define rCCK0_TxFilter1 0xa20
#define rCCK0_TxFilter2 0xa24
#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
-#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d */
-#define rCCK0_TRSSIReport 0xa50
-#define rCCK0_RxReport 0xa54 /* 0xa57 */
-#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
-#define rCCK0_FACounterUpper 0xa58 /* 0xa5c */
/* page c */
-#define rOFDM0_LSTF 0xc00
#define rOFDM0_TRxPathEnable 0xc04
-#define rOFDM0_TRMuxPar 0xc08
-#define rOFDM0_TRSWIsolation 0xc0c
#define rOFDM0_XARxAFE 0xc10 /* RxIQ DC offset, Rx digital filter, DC notch filter */
#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */
#define rOFDM0_XBRxAFE 0xc18
@@ -122,10 +63,6 @@
#define rOFDM0_RxDetector1 0xc30 /* PD,BW & SBD */
#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync.*/
#define rOFDM0_RxDetector3 0xc38 /* Frame Sync.*/
-#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */
-#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
-#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
-#define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */
#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
#define rOFDM0_XAAGCCore1 0xc50
#define rOFDM0_XAAGCCore2 0xc54
@@ -135,10 +72,6 @@
#define rOFDM0_XCAGCCore2 0xc64
#define rOFDM0_XDAGCCore1 0xc68
#define rOFDM0_XDAGCCore2 0xc6c
-#define rOFDM0_AGCParameter1 0xc70
-#define rOFDM0_AGCParameter2 0xc74
-#define rOFDM0_AGCRSSITable 0xc78
-#define rOFDM0_HTSTFAGC 0xc7c
#define rOFDM0_XATxIQImbalance 0xc80
#define rOFDM0_XATxAFE 0xc84
#define rOFDM0_XBTxIQImbalance 0xc88
@@ -147,48 +80,11 @@
#define rOFDM0_XCTxAFE 0xc94
#define rOFDM0_XDTxIQImbalance 0xc98
#define rOFDM0_XDTxAFE 0xc9c
-#define rOFDM0_RxHPParameter 0xce0
-#define rOFDM0_TxPseudoNoiseWgt 0xce4
-#define rOFDM0_FrameSync 0xcf0
-#define rOFDM0_DFSReport 0xcf4
-#define rOFDM0_TxCoeff1 0xca4
-#define rOFDM0_TxCoeff2 0xca8
-#define rOFDM0_TxCoeff3 0xcac
-#define rOFDM0_TxCoeff4 0xcb0
-#define rOFDM0_TxCoeff5 0xcb4
-#define rOFDM0_TxCoeff6 0xcb8
/* page d */
#define rOFDM1_LSTF 0xd00
#define rOFDM1_TRxPathEnable 0xd04
-#define rOFDM1_CFO 0xd08
-#define rOFDM1_CSI1 0xd10
-#define rOFDM1_SBD 0xd14
-#define rOFDM1_CSI2 0xd18
-#define rOFDM1_CFOTracking 0xd2c
-#define rOFDM1_TRxMesaure1 0xd34
-#define rOFDM1_IntfDet 0xd3c
-#define rOFDM1_PseudoNoiseStateAB 0xd50
-#define rOFDM1_PseudoNoiseStateCD 0xd54
-#define rOFDM1_RxPseudoNoiseWgt 0xd58
-#define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */
-#define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */
-
-#define rOFDM_PHYCounter3 0xda8 /* MCS not support */
-#define rOFDM_ShortCFOAB 0xdac
-#define rOFDM_ShortCFOCD 0xdb0
-#define rOFDM_LongCFOAB 0xdb4
-#define rOFDM_LongCFOCD 0xdb8
-#define rOFDM_TailCFOAB 0xdbc
-#define rOFDM_TailCFOCD 0xdc0
-#define rOFDM_PWMeasure1 0xdc4
-#define rOFDM_PWMeasure2 0xdc8
-#define rOFDM_BWReport 0xdcc
-#define rOFDM_AGCReport 0xdd0
-#define rOFDM_RxSNR 0xdd4
-#define rOFDM_RxEVMCSI 0xdd8
-#define rOFDM_SIGReport 0xddc
/* page e */
#define rTxAGC_Rate18_06 0xe00
@@ -203,627 +99,43 @@
/* RF
* Zebra1
*/
-#define rZebra1_HSSIEnable 0x0
-#define rZebra1_TRxEnable1 0x1
-#define rZebra1_TRxEnable2 0x2
-#define rZebra1_AGC 0x4
-#define rZebra1_ChargePump 0x5
#define rZebra1_Channel 0x7
-#define rZebra1_TxGain 0x8
-#define rZebra1_TxLPF 0x9
-#define rZebra1_RxLPF 0xb
-#define rZebra1_RxHPFCorner 0xc
/* Zebra4 */
#define rGlobalCtrl 0
-#define rRTL8256_TxLPF 19
-#define rRTL8256_RxLPF 11
-
-/* RTL8258 */
-#define rRTL8258_TxLPF 0x11
-#define rRTL8258_RxLPF 0x13
-#define rRTL8258_RSSILPF 0xa
/* Bit Mask
- * page-1
+ * page-8
*/
-#define bBBResetB 0x100
-#define bGlobalResetB 0x200
-#define bOFDMTxStart 0x4
-#define bCCKTxStart 0x8
-#define bCRC32Debug 0x100
-#define bPMACLoopback 0x10
-#define bTxLSIG 0xffffff
-#define bOFDMTxRate 0xf
-#define bOFDMTxReserved 0x10
-#define bOFDMTxLength 0x1ffe0
-#define bOFDMTxParity 0x20000
-#define bTxHTSIG1 0xffffff
-#define bTxHTMCSRate 0x7f
-#define bTxHTBW 0x80
-#define bTxHTLength 0xffff00
-#define bTxHTSIG2 0xffffff
-#define bTxHTSmoothing 0x1
-#define bTxHTSounding 0x2
-#define bTxHTReserved 0x4
-#define bTxHTAggreation 0x8
-#define bTxHTSTBC 0x30
-#define bTxHTAdvanceCoding 0x40
-#define bTxHTShortGI 0x80
-#define bTxHTNumberHT_LTF 0x300
-#define bTxHTCRC8 0x3fc00
-#define bCounterReset 0x10000
-#define bNumOfOFDMTx 0xffff
-#define bNumOfCCKTx 0xffff0000
-#define bTxIdleInterval 0xffff
-#define bOFDMService 0xffff0000
-#define bTxMACHeader 0xffffffff
-#define bTxDataInit 0xff
-#define bTxHTMode 0x100
-#define bTxDataType 0x30000
-#define bTxRandomSeed 0xffffffff
-#define bCCKTxPreamble 0x1
-#define bCCKTxSFD 0xffff0000
-#define bCCKTxSIG 0xff
-#define bCCKTxService 0xff00
-#define bCCKLengthExt 0x8000
-#define bCCKTxLength 0xffff0000
-#define bCCKTxCRC16 0xffff
-#define bCCKTxStatus 0x1
-#define bOFDMTxStatus 0x2
-
-/* page-8 */
#define bRFMOD 0x1
-#define bJapanMode 0x2
-#define bCCKTxSC 0x30
#define bCCKEn 0x1000000
#define bOFDMEn 0x2000000
-#define bOFDMRxADCPhase 0x10000
-#define bOFDMTxDACPhase 0x40000
-#define bXATxAGC 0x3f
#define bXBTxAGC 0xf00
#define bXCTxAGC 0xf000
-#define bXDTxAGC 0xf0000
-#define bPAStart 0xf0000000
-#define bTRStart 0x00f00000
-#define bRFStart 0x0000f000
-#define bBBStart 0x000000f0
-#define bBBCCKStart 0x0000000f
-#define bPAEnd 0xf /* Reg0x814 */
-#define bTREnd 0x0f000000
-#define bRFEnd 0x000f0000
-#define bCCAMask 0x000000f0 /* T2R */
-#define bR2RCCAMask 0x00000f00
-#define bHSSI_R2TDelay 0xf8000000
-#define bHSSI_T2RDelay 0xf80000
-#define bContTxHSSI 0x400 /* chane gain at continue Tx */
-#define bIGFromCCK 0x200
-#define bAGCAddress 0x3f
-#define bRxHPTx 0x7000
-#define bRxHPT2R 0x38000
-#define bRxHPCCKIni 0xc0000
-#define bAGCTxCode 0xc00000
-#define bAGCRxCode 0x300000
#define b3WireDataLength 0x800
#define b3WireAddressLength 0x400
-#define b3WireRFPowerDown 0x1
-/* #define bHWSISelect 0x8 */
-#define b5GPAPEPolarity 0x40000000
-#define b2GPAPEPolarity 0x80000000
-#define bRFSW_TxDefaultAnt 0x3
-#define bRFSW_TxOptionAnt 0x30
-#define bRFSW_RxDefaultAnt 0x300
-#define bRFSW_RxOptionAnt 0x3000
-#define bRFSI_3WireData 0x1
-#define bRFSI_3WireClock 0x2
-#define bRFSI_3WireLoad 0x4
-#define bRFSI_3WireRW 0x8
-#define bRFSI_3Wire 0xf /* 3-wire total control */
#define bRFSI_RFENV 0x10
-#define bRFSI_TRSW 0x20
-#define bRFSI_TRSWB 0x40
-#define bRFSI_ANTSW 0x100
-#define bRFSI_ANTSWB 0x200
-#define bRFSI_PAPE 0x400
-#define bRFSI_PAPE5G 0x800
-#define bBandSelect 0x1
-#define bHTSIG2_GI 0x80
-#define bHTSIG2_Smoothing 0x01
-#define bHTSIG2_Sounding 0x02
-#define bHTSIG2_Aggreaton 0x08
-#define bHTSIG2_STBC 0x30
-#define bHTSIG2_AdvCoding 0x40
-#define bHTSIG2_NumOfHTLTF 0x300
-#define bHTSIG2_CRC8 0x3fc
-#define bHTSIG1_MCS 0x7f
-#define bHTSIG1_BandWidth 0x80
-#define bHTSIG1_HTLength 0xffff
-#define bLSIG_Rate 0xf
-#define bLSIG_Reserved 0x10
-#define bLSIG_Length 0x1fffe
-#define bLSIG_Parity 0x20
-#define bCCKRxPhase 0x4
#define bLSSIReadAddress 0x3f000000 /* LSSI "Read" Address */
#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
#define bLSSIReadBackData 0xfff
-#define bLSSIReadOKFlag 0x1000
-#define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */
-#define bRegulator0Standby 0x1
-#define bRegulatorPLLStandby 0x2
-#define bRegulator1Standby 0x4
-#define bPLLPowerUp 0x8
-#define bDPLLPowerUp 0x10
-#define bDA10PowerUp 0x20
-#define bAD7PowerUp 0x200
-#define bDA6PowerUp 0x2000
-#define bXtalPowerUp 0x4000
-#define b40MDClkPowerUP 0x8000
-#define bDA6DebugMode 0x20000
-#define bDA6Swing 0x380000
-#define bADClkPhase 0x4000000
-#define b80MClkDelay 0x18000000
-#define bAFEWatchDogEnable 0x20000000
#define bXtalCap 0x0f000000
-#define bIntDifClkEnable 0x400
-#define bExtSigClkEnable 0x800
-#define bBandgapMbiasPowerUp 0x10000
-#define bAD11SHGain 0xc0000
-#define bAD11InputRange 0x700000
-#define bAD11OPCurrent 0x3800000
-#define bIPathLoopback 0x4000000
-#define bQPathLoopback 0x8000000
-#define bAFELoopback 0x10000000
-#define bDA10Swing 0x7e0
-#define bDA10Reverse 0x800
-#define bDAClkSource 0x1000
-#define bAD7InputRange 0x6000
-#define bAD7Gain 0x38000
-#define bAD7OutputCMMode 0x40000
-#define bAD7InputCMMode 0x380000
-#define bAD7Current 0xc00000
-#define bRegulatorAdjust 0x7000000
-#define bAD11PowerUpAtTx 0x1
-#define bDA10PSAtTx 0x10
-#define bAD11PowerUpAtRx 0x100
-#define bDA10PSAtRx 0x1000
-
-#define bCCKRxAGCFormat 0x200
-
-#define bPSDFFTSamplepPoint 0xc000
-#define bPSDAverageNum 0x3000
-#define bIQPathControl 0xc00
-#define bPSDFreq 0x3ff
-#define bPSDAntennaPath 0x30
-#define bPSDIQSwitch 0x40
-#define bPSDRxTrigger 0x400000
-#define bPSDTxTrigger 0x80000000
-#define bPSDSineToneScale 0x7f000000
-#define bPSDReport 0xffff
-
-/* page-9 */
-#define bOFDMTxSC 0x30000000
-#define bCCKTxOn 0x1
-#define bOFDMTxOn 0x2
-#define bDebugPage 0xfff /* reset debug page and also HWord, LWord */
-#define bDebugItem 0xff /* reset debug page and LWord */
-#define bAntL 0x10
-#define bAntNonHT 0x100
-#define bAntHT1 0x1000
-#define bAntHT2 0x10000
-#define bAntHT1S1 0x100000
-#define bAntNonHTS1 0x1000000
/* page-a */
-#define bCCKBBMode 0x3
-#define bCCKTxPowerSaving 0x80
-#define bCCKRxPowerSaving 0x40
#define bCCKSideBand 0x10
-#define bCCKScramble 0x8
-#define bCCKAntDiversity 0x8000
-#define bCCKCarrierRecovery 0x4000
-#define bCCKTxRate 0x3000
-#define bCCKDCCancel 0x0800
-#define bCCKISICancel 0x0400
-#define bCCKMatchFilter 0x0200
-#define bCCKEqualizer 0x0100
-#define bCCKPreambleDetect 0x800000
-#define bCCKFastFalseCCA 0x400000
-#define bCCKChEstStart 0x300000
-#define bCCKCCACount 0x080000
-#define bCCKcs_lim 0x070000
-#define bCCKBistMode 0x80000000
-#define bCCKCCAMask 0x40000000
-#define bCCKTxDACPhase 0x4
-#define bCCKRxADCPhase 0x20000000 /* r_rx_clk */
-#define bCCKr_cp_mode0 0x0100
-#define bCCKTxDCOffset 0xf0
-#define bCCKRxDCOffset 0xf
-#define bCCKCCAMode 0xc000
-#define bCCKFalseCS_lim 0x3f00
-#define bCCKCS_ratio 0xc00000
-#define bCCKCorgBit_sel 0x300000
-#define bCCKPD_lim 0x0f0000
-#define bCCKNewCCA 0x80000000
-#define bCCKRxHPofIG 0x8000
-#define bCCKRxIG 0x7f00
-#define bCCKLNAPolarity 0x800000
-#define bCCKRx1stGain 0x7f0000
-#define bCCKRFExtend 0x20000000 /* CCK Rx initial gain polarity */
-#define bCCKRxAGCSatLevel 0x1f000000
-#define bCCKRxAGCSatCount 0xe0
-#define bCCKRxRFSettle 0x1f /* AGCsamp_dly */
-#define bCCKFixedRxAGC 0x8000
-/* #define bCCKRxAGCFormat 0x4000 */ /* remove to HSSI register 0x824 */
-#define bCCKAntennaPolarity 0x2000
-#define bCCKTxFilterType 0x0c00
-#define bCCKRxAGCReportType 0x0300
-#define bCCKRxDAGCEn 0x80000000
-#define bCCKRxDAGCPeriod 0x20000000
-#define bCCKRxDAGCSatLevel 0x1f000000
-#define bCCKTimingRecovery 0x800000
-#define bCCKTxC0 0x3f0000
-#define bCCKTxC1 0x3f000000
-#define bCCKTxC2 0x3f
-#define bCCKTxC3 0x3f00
-#define bCCKTxC4 0x3f0000
-#define bCCKTxC5 0x3f000000
-#define bCCKTxC6 0x3f
-#define bCCKTxC7 0x3f00
-#define bCCKDebugPort 0xff0000
-#define bCCKDACDebug 0x0f000000
-#define bCCKFalseAlarmEnable 0x8000
-#define bCCKFalseAlarmRead 0x4000
-#define bCCKTRSSI 0x7f
-#define bCCKRxAGCReport 0xfe
-#define bCCKRxReport_AntSel 0x80000000
-#define bCCKRxReport_MFOff 0x40000000
-#define bCCKRxRxReport_SQLoss 0x20000000
-#define bCCKRxReport_Pktloss 0x10000000
-#define bCCKRxReport_Lockedbit 0x08000000
-#define bCCKRxReport_RateError 0x04000000
-#define bCCKRxReport_RxRate 0x03000000
-#define bCCKRxFACounterLower 0xff
-#define bCCKRxFACounterUpper 0xff000000
-#define bCCKRxHPAGCStart 0xe000
-#define bCCKRxHPAGCFinal 0x1c00
-
-#define bCCKRxFalseAlarmEnable 0x8000
-#define bCCKFACounterFreeze 0x4000
-
-#define bCCKTxPathSel 0x10000000
-#define bCCKDefaultRxPath 0xc000000
-#define bCCKOptionRxPath 0x3000000
-
-/* page c */
-#define bNumOfSTF 0x3
-#define bShift_L 0xc0
-#define bGI_TH 0xc
-#define bRxPathA 0x1
-#define bRxPathB 0x2
-#define bRxPathC 0x4
-#define bRxPathD 0x8
-#define bTxPathA 0x1
-#define bTxPathB 0x2
-#define bTxPathC 0x4
-#define bTxPathD 0x8
-#define bTRSSIFreq 0x200
-#define bADCBackoff 0x3000
-#define bDFIRBackoff 0xc000
-#define bTRSSILatchPhase 0x10000
-#define bRxIDCOffset 0xff
-#define bRxQDCOffset 0xff00
-#define bRxDFIRMode 0x1800000
-#define bRxDCNFType 0xe000000
-#define bRXIQImb_A 0x3ff
-#define bRXIQImb_B 0xfc00
-#define bRXIQImb_C 0x3f0000
-#define bRXIQImb_D 0xffc00000
-#define bDC_dc_Notch 0x60000
-#define bRxNBINotch 0x1f000000
-#define bPD_TH 0xf
-#define bPD_TH_Opt2 0xc000
-#define bPWED_TH 0x700
-#define bIfMF_Win_L 0x800
-#define bPD_Option 0x1000
-#define bMF_Win_L 0xe000
-#define bBW_Search_L 0x30000
-#define bwin_enh_L 0xc0000
-#define bBW_TH 0x700000
-#define bED_TH2 0x3800000
-#define bBW_option 0x4000000
-#define bRatio_TH 0x18000000
-#define bWindow_L 0xe0000000
-#define bSBD_Option 0x1
-#define bFrame_TH 0x1c
-#define bFS_Option 0x60
-#define bDC_Slope_check 0x80
-#define bFGuard_Counter_DC_L 0xe00
-#define bFrame_Weight_Short 0x7000
-#define bSub_Tune 0xe00000
-#define bFrame_DC_Length 0xe000000
-#define bSBD_start_offset 0x30000000
-#define bFrame_TH_2 0x7
-#define bFrame_GI2_TH 0x38
-#define bGI2_Sync_en 0x40
-#define bSarch_Short_Early 0x300
-#define bSarch_Short_Late 0xc00
-#define bSarch_GI2_Late 0x70000
-#define bCFOAntSum 0x1
-#define bCFOAcc 0x2
-#define bCFOStartOffset 0xc
-#define bCFOLookBack 0x70
-#define bCFOSumWeight 0x80
-#define bDAGCEnable 0x10000
-#define bTXIQImb_A 0x3ff
-#define bTXIQImb_B 0xfc00
-#define bTXIQImb_C 0x3f0000
-#define bTXIQImb_D 0xffc00000
-#define bTxIDCOffset 0xff
-#define bTxQDCOffset 0xff00
-#define bTxDFIRMode 0x10000
-#define bTxPesudoNoiseOn 0x4000000
-#define bTxPesudoNoise_A 0xff
-#define bTxPesudoNoise_B 0xff00
-#define bTxPesudoNoise_C 0xff0000
-#define bTxPesudoNoise_D 0xff000000
-#define bCCADropOption 0x20000
-#define bCCADropThres 0xfff00000
-#define bEDCCA_H 0xf
-#define bEDCCA_L 0xf0
-#define bLambda_ED 0x300
-#define bRxInitialGain 0x7f
-#define bRxAntDivEn 0x80
-#define bRxAGCAddressForLNA 0x7f00
-#define bRxHighPowerFlow 0x8000
-#define bRxAGCFreezeThres 0xc0000
-#define bRxFreezeStep_AGC1 0x300000
-#define bRxFreezeStep_AGC2 0xc00000
-#define bRxFreezeStep_AGC3 0x3000000
-#define bRxFreezeStep_AGC0 0xc000000
-#define bRxRssi_Cmp_En 0x10000000
-#define bRxQuickAGCEn 0x20000000
-#define bRxAGCFreezeThresMode 0x40000000
-#define bRxOverFlowCheckType 0x80000000
-#define bRxAGCShift 0x7f
-#define bTRSW_Tri_Only 0x80
-#define bPowerThres 0x300
-#define bRxAGCEn 0x1
-#define bRxAGCTogetherEn 0x2
-#define bRxAGCMin 0x4
-#define bRxHP_Ini 0x7
-#define bRxHP_TRLNA 0x70
-#define bRxHP_RSSI 0x700
-#define bRxHP_BBP1 0x7000
-#define bRxHP_BBP2 0x70000
-#define bRxHP_BBP3 0x700000
-#define bRSSI_H 0x7f0000 /* the threshold for high power */
-#define bRSSI_Gen 0x7f000000 /* the threshold for ant diversity */
-#define bRxSettle_TRSW 0x7
-#define bRxSettle_LNA 0x38
-#define bRxSettle_RSSI 0x1c0
-#define bRxSettle_BBP 0xe00
-#define bRxSettle_RxHP 0x7000
-#define bRxSettle_AntSW_RSSI 0x38000
-#define bRxSettle_AntSW 0xc0000
-#define bRxProcessTime_DAGC 0x300000
-#define bRxSettle_HSSI 0x400000
-#define bRxProcessTime_BBPPW 0x800000
-#define bRxAntennaPowerShift 0x3000000
-#define bRSSITableSelect 0xc000000
-#define bRxHP_Final 0x7000000
-#define bRxHTSettle_BBP 0x7
-#define bRxHTSettle_HSSI 0x8
-#define bRxHTSettle_RxHP 0x70
-#define bRxHTSettle_BBPPW 0x80
-#define bRxHTSettle_Idle 0x300
-#define bRxHTSettle_Reserved 0x1c00
-#define bRxHTRxHPEn 0x8000
-#define bRxHTAGCFreezeThres 0x30000
-#define bRxHTAGCTogetherEn 0x40000
-#define bRxHTAGCMin 0x80000
-#define bRxHTAGCEn 0x100000
-#define bRxHTDAGCEn 0x200000
-#define bRxHTRxHP_BBP 0x1c00000
-#define bRxHTRxHP_Final 0xe0000000
-#define bRxPWRatioTH 0x3
-#define bRxPWRatioEn 0x4
-#define bRxMFHold 0x3800
-#define bRxPD_Delay_TH1 0x38
-#define bRxPD_Delay_TH2 0x1c0
-#define bRxPD_DC_COUNT_MAX 0x600
-/* #define bRxMF_Hold 0x3800 */
-#define bRxPD_Delay_TH 0x8000
-#define bRxProcess_Delay 0xf0000
-#define bRxSearchrange_GI2_Early 0x700000
-#define bRxFrame_Guard_Counter_L 0x3800000
-#define bRxSGI_Guard_L 0xc000000
-#define bRxSGI_Search_L 0x30000000
-#define bRxSGI_TH 0xc0000000
-#define bDFSCnt0 0xff
-#define bDFSCnt1 0xff00
-#define bDFSFlag 0xf0000
-
-#define bMFWeightSum 0x300000
-#define bMinIdxTH 0x7f000000
-
-#define bDAFormat 0x40000
-
-#define bTxChEmuEnable 0x01000000
-
-#define bTRSWIsolation_A 0x7f
-#define bTRSWIsolation_B 0x7f00
-#define bTRSWIsolation_C 0x7f0000
-#define bTRSWIsolation_D 0x7f000000
-
-#define bExtLNAGain 0x7c00
-
-/* page d */
-#define bSTBCEn 0x4
-#define bAntennaMapping 0x10
-#define bNss 0x20
-#define bCFOAntSumD 0x200
-#define bPHYCounterReset 0x8000000
-#define bCFOReportGet 0x4000000
-#define bOFDMContinueTx 0x10000000
-#define bOFDMSingleCarrier 0x20000000
-#define bOFDMSingleTone 0x40000000
-/* #define bRxPath1 0x01
- * #define bRxPath2 0x02
- * #define bRxPath3 0x04
- * #define bRxPath4 0x08
- * #define bTxPath1 0x10
- * #define bTxPath2 0x20
- */
-#define bHTDetect 0x100
-#define bCFOEn 0x10000
-#define bCFOValue 0xfff00000
-#define bSigTone_Re 0x3f
-#define bSigTone_Im 0x7f00
-#define bCounter_CCA 0xffff
-#define bCounter_ParityFail 0xffff0000
-#define bCounter_RateIllegal 0xffff
-#define bCounter_CRC8Fail 0xffff0000
-#define bCounter_MCSNoSupport 0xffff
-#define bCounter_FastSync 0xffff
-#define bShortCFO 0xfff
-#define bShortCFOTLength 12 /* total */
-#define bShortCFOFLength 11 /* fraction */
-#define bLongCFO 0x7ff
-#define bLongCFOTLength 11
-#define bLongCFOFLength 11
-#define bTailCFO 0x1fff
-#define bTailCFOTLength 13
-#define bTailCFOFLength 12
-
-#define bmax_en_pwdB 0xffff
-#define bCC_power_dB 0xffff0000
-#define bnoise_pwdB 0xffff
-#define bPowerMeasTLength 10
-#define bPowerMeasFLength 3
-#define bRx_HT_BW 0x1
-#define bRxSC 0x6
-#define bRx_HT 0x8
-
-#define bNB_intf_det_on 0x1
-#define bIntf_win_len_cfg 0x30
-#define bNB_Intf_TH_cfg 0x1c0
-
-#define bRFGain 0x3f
-#define bTableSel 0x40
-#define bTRSW 0x80
-
-#define bRxSNR_A 0xff
-#define bRxSNR_B 0xff00
-#define bRxSNR_C 0xff0000
-#define bRxSNR_D 0xff000000
-#define bSNREVMTLength 8
-#define bSNREVMFLength 1
-
-#define bCSI1st 0xff
-#define bCSI2nd 0xff00
-#define bRxEVM1st 0xff0000
-#define bRxEVM2nd 0xff000000
-
-#define bSIGEVM 0xff
-#define bPWDB 0xff00
-#define bSGIEN 0x10000
-
-#define bSFactorQAM1 0xf
-#define bSFactorQAM2 0xf0
-#define bSFactorQAM3 0xf00
-#define bSFactorQAM4 0xf000
-#define bSFactorQAM5 0xf0000
-#define bSFactorQAM6 0xf0000
-#define bSFactorQAM7 0xf00000
-#define bSFactorQAM8 0xf000000
-#define bSFactorQAM9 0xf0000000
-#define bCSIScheme 0x100000
-
-#define bNoiseLvlTopSet 0x3
-#define bChSmooth 0x4
-#define bChSmoothCfg1 0x38
-#define bChSmoothCfg2 0x1c0
-#define bChSmoothCfg3 0xe00
-#define bChSmoothCfg4 0x7000
-#define bMRCMode 0x800000
-#define bTHEVMCfg 0x7000000
-
-#define bLoopFitType 0x1
-#define bUpdCFO 0x40
-#define bUpdCFOOffData 0x80
-#define bAdvUpdCFO 0x100
-#define bAdvTimeCtrl 0x800
-#define bUpdClko 0x1000
-#define bFC 0x6000
-#define bTrackingMode 0x8000
-#define bPhCmpEnable 0x10000
-#define bUpdClkoLTF 0x20000
-#define bComChCFO 0x40000
-#define bCSIEstiMode 0x80000
-#define bAdvUpdEqz 0x100000
-#define bUChCfg 0x7000000
-#define bUpdEqz 0x8000000
/* page e */
-#define bTxAGCRate18_06 0x7f7f7f7f
-#define bTxAGCRate54_24 0x7f7f7f7f
-#define bTxAGCRateMCS32 0x7f
#define bTxAGCRateCCK 0x7f00
-#define bTxAGCRateMCS3_MCS0 0x7f7f7f7f
-#define bTxAGCRateMCS7_MCS4 0x7f7f7f7f
-#define bTxAGCRateMCS11_MCS8 0x7f7f7f7f
-#define bTxAGCRateMCS15_MCS12 0x7f7f7f7f
-
-
-/* Rx Pseduo noise */
-#define bRxPesudoNoiseOn 0x20000000
-#define bRxPesudoNoise_A 0xff
-#define bRxPesudoNoise_B 0xff00
-#define bRxPesudoNoise_C 0xff0000
-#define bRxPesudoNoise_D 0xff000000
-#define bPesudoNoiseState_A 0xffff
-#define bPesudoNoiseState_B 0xffff0000
-#define bPesudoNoiseState_C 0xffff
-#define bPesudoNoiseState_D 0xffff0000
/* RF
* Zebra1
*/
-#define bZebra1_HSSIEnable 0x8
-#define bZebra1_TRxControl 0xc00
-#define bZebra1_TRxGainSetting 0x07f
-#define bZebra1_RxCorner 0xc00
-#define bZebra1_TxChargePump 0x38
-#define bZebra1_RxChargePump 0x7
#define bZebra1_ChannelNum 0xf80
-#define bZebra1_TxLPFBW 0x400
-#define bZebra1_RxLPFBW 0x600
-
-/* Zebra4 */
-#define bRTL8256RegModeCtrl1 0x100
-#define bRTL8256RegModeCtrl0 0x40
-#define bRTL8256_TxLPFBW 0x18
-#define bRTL8256_RxLPFBW 0x600
/* RTL8258 */
-#define bRTL8258_TxLPFBW 0xc
-#define bRTL8258_RxLPFBW 0xc00
-#define bRTL8258_RSSILPFBW 0xc0
-
-/* byte endable for sb_write */
-#define bByte0 0x1
-#define bByte1 0x2
-#define bByte2 0x4
-#define bByte3 0x8
-#define bWord0 0x3
-#define bWord1 0xc
-#define bDWord 0xf
-
/* for PutRegsetting & GetRegSetting BitMask */
#define bMaskByte0 0xff
#define bMaskByte1 0xff00
#define bMaskByte2 0xff0000
-#define bMaskByte3 0xff000000
#define bMaskHWord 0xffff0000
#define bMaskLWord 0x0000ffff
#define bMaskDWord 0xffffffff
@@ -831,49 +143,4 @@
/* for PutRFRegsetting & GetRFRegSetting BitMask */
#define bMask12Bits 0xfff
-#define bEnable 0x1
-#define bDisable 0x0
-
-#define LeftAntenna 0x0
-#define RightAntenna 0x1
-
-#define tCheckTxStatus 500 /* 500ms */
-#define tUpdateRxCounter 100 /* 100ms */
-
-#define rateCCK 0
-#define rateOFDM 1
-#define rateHT 2
-
-/* define Register-End */
-#define bPMAC_End 0x1ff
-#define bFPGAPHY0_End 0x8ff
-#define bFPGAPHY1_End 0x9ff
-#define bCCKPHY0_End 0xaff
-#define bOFDMPHY0_End 0xcff
-#define bOFDMPHY1_End 0xdff
-
-/* define max debug item in each debug page
- * #define bMaxItem_FPGA_PHY0 0x9
- * #define bMaxItem_FPGA_PHY1 0x3
- * #define bMaxItem_PHY_11B 0x16
- * #define bMaxItem_OFDM_PHY0 0x29
- * #define bMaxItem_OFDM_PHY1 0x0
- */
-
-#define bPMACControl 0x0
-#define bWMACControl 0x1
-#define bWNICControl 0x2
-
-#define PathA 0x0
-#define PathB 0x1
-#define PathC 0x2
-#define PathD 0x3
-
-#define rRTL8256RxMixerPole 0xb
-#define bZebraRxMixerPole 0x6
-#define rRTL8256TxBBOPBias 0x9
-#define bRTL8256TxBBOPBias 0x400
-#define rRTL8256TxBBBW 19
-#define bRTL8256TxBBBW 0x18
-
#endif /* __INC_HAL8190PCIPHYREG_H */
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index e9077347837e..0aa97c9dcced 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -89,9 +89,6 @@ static void BlinkWorkItemCallback(struct work_struct *work);
static void InitLed871x(struct _adapter *padapter, struct LED_871x *pLed,
enum LED_PIN_871x LedPin)
{
- struct net_device *nic;
-
- nic = padapter->pnetdev;
pLed->padapter = padapter;
pLed->LedPin = LedPin;
pLed->CurrLedState = LED_STATE_OFF;
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 7bc74d7d8a3a..1075eacdb441 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -40,6 +40,7 @@
#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <linux/atomic.h>
+#include <linux/crc32poly.h>
#include <linux/semaphore.h>
#include "osdep_service.h"
@@ -49,8 +50,6 @@
/* =====WEP related===== */
-#define CRC32_POLY 0x04c11db7
-
struct arc4context {
u32 x;
u32 y;
@@ -135,7 +134,7 @@ static void crc32_init(void)
for (i = 0; i < 256; ++i) {
k = crc32_reverseBit((u8)i);
for (c = ((u32)k) << 24, j = 8; j > 0; --j)
- c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+ c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY_BE : (c << 1);
p1 = (u8 *)&crc32_table[i];
p1[0] = crc32_reverseBit(p[3]);
p1[1] = crc32_reverseBit(p[2]);
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index 0ed2f44ab4e9..00a4302e9983 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -574,7 +574,6 @@ struct ieee80211_ht_addt_info {
* According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
*/
#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
/* Spatial Multiplexing Power Save Modes */
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 45c05527a57a..faf4b4158cfa 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -1051,7 +1051,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
return _FAIL;
- if (len > MAX_IE_SZ)
+ if (len < 0 || len > MAX_IE_SZ)
return _FAIL;
pbss_network->IELength = len;
diff --git a/drivers/staging/rtl8723bs/core/rtw_btcoex.c b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
index adac915a2153..35310e8e0806 100644
--- a/drivers/staging/rtl8723bs/core/rtw_btcoex.c
+++ b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
@@ -77,14 +77,14 @@ void rtw_btcoex_SuspendNotify(struct adapter *padapter, u8 state)
void rtw_btcoex_HaltNotify(struct adapter *padapter)
{
- if (false == padapter->bup) {
+ if (!padapter->bup) {
DBG_871X(FUNC_ADPT_FMT ": bup =%d Skip!\n",
FUNC_ADPT_ARG(padapter), padapter->bup);
return;
}
- if (true == padapter->bSurpriseRemoved) {
+ if (padapter->bSurpriseRemoved) {
DBG_871X(FUNC_ADPT_FMT ": bSurpriseRemoved =%d Skip!\n",
FUNC_ADPT_ARG(padapter), padapter->bSurpriseRemoved);
@@ -115,11 +115,7 @@ s32 rtw_btcoex_IsBTCoexCtrlAMPDUSize(struct adapter *padapter)
void rtw_btcoex_SetManualControl(struct adapter *padapter, u8 manual)
{
- if (true == manual) {
- hal_btcoex_SetManualControl(padapter, true);
- } else{
- hal_btcoex_SetManualControl(padapter, false);
- }
+ hal_btcoex_SetManualControl(padapter, manual);
}
u8 rtw_btcoex_IsBtControlLps(struct adapter *padapter)
@@ -198,11 +194,11 @@ void rtw_btcoex_RejectApAggregatedPacket(struct adapter *padapter, u8 enable)
pmlmeinfo = &padapter->mlmeextpriv.mlmext_info;
psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(&padapter->mlmepriv));
- if (true == enable) {
+ if (enable) {
pmlmeinfo->accept_addba_req = false;
if (psta)
send_delba(padapter, 0, psta->hwaddr);
- } else{
+ } else {
pmlmeinfo->accept_addba_req = true;
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c
index bbf6f3fa21ea..b3247c9642ea 100644
--- a/drivers/staging/rtl8723bs/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c
@@ -12,28 +12,25 @@
#include <linux/jiffies.h>
-/*------------------------Define local variable------------------------------*/
-u8 fakeEfuseBank = 0;
-u32 fakeEfuseUsedBytes = 0;
+/* Define global variables */
+u8 fakeEfuseBank;
+u32 fakeEfuseUsedBytes;
u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE] = {0};
u8 fakeEfuseInitMap[EFUSE_MAX_MAP_LEN] = {0};
u8 fakeEfuseModifiedMap[EFUSE_MAX_MAP_LEN] = {0};
-u32 BTEfuseUsedBytes = 0;
+u32 BTEfuseUsedBytes;
u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
u8 BTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
u8 BTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
-u32 fakeBTEfuseUsedBytes = 0;
+u32 fakeBTEfuseUsedBytes;
u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
-/*------------------------Define local variable------------------------------*/
-/* */
#define REG_EFUSE_CTRL 0x0030
#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
-/* */
bool
Efuse_Read1ByteFromFakeContent(
@@ -583,11 +580,10 @@ void EFUSE_ShadowMapUpdate(
EFUSE_GetEfuseDefinition(padapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, bPseudoTest);
- if (pEEPROM->bautoload_fail_flag == true) {
+ if (pEEPROM->bautoload_fail_flag)
memset(pEEPROM->efuse_eeprom_data, 0xFF, mapLen);
- } else{
+ else
Efuse_ReadAllMap(padapter, efuseType, pEEPROM->efuse_eeprom_data, bPseudoTest);
- }
/* PlatformMoveMemory((void *)&pHalData->EfuseMap[EFUSE_MODIFY_MAP][0], */
/* void *)&pHalData->EfuseMap[EFUSE_INIT_MAP][0], mapLen); */
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index 0822e440204e..33f2649ba2ec 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -53,12 +53,11 @@ static u8 WIFI_OFDMRATES[] = {
IEEE80211_OFDM_RATE_54MB
};
-
int rtw_get_bit_value_from_ieee_value(u8 val)
{
unsigned char dot11_rate_table[] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0}; /* last element must be zero!! */
-
int i = 0;
+
while (dot11_rate_table[i] != 0) {
if (dot11_rate_table[i] == val)
return BIT(i);
@@ -67,30 +66,27 @@ int rtw_get_bit_value_from_ieee_value(u8 val)
return 0;
}
-uint rtw_is_cckrates_included(u8 *rate)
+bool rtw_is_cckrates_included(u8 *rate)
{
- u32 i = 0;
+ while (*rate) {
+ u8 r = *rate & 0x7f;
- while (rate[i] != 0) {
- if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
- (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
- return true;
- i++;
- }
+ if (r == 2 || r == 4 || r == 11 || r == 22)
+ return true;
+ rate++;
+ }
- return false;
+ return false;
}
-uint rtw_is_cckratesonly_included(u8 *rate)
+bool rtw_is_cckratesonly_included(u8 *rate)
{
- u32 i = 0;
-
+ while (*rate) {
+ u8 r = *rate & 0x7f;
- while (rate[i] != 0) {
- if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
- (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
+ if (r != 2 && r != 4 && r != 11 && r != 22)
return false;
- i++;
+ rate++;
}
return true;
@@ -99,19 +95,18 @@ uint rtw_is_cckratesonly_included(u8 *rate)
int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
{
if (channel > 14) {
- if ((rtw_is_cckrates_included(rate)) == true)
+ if (rtw_is_cckrates_included(rate))
return WIRELESS_INVALID;
else
return WIRELESS_11A;
} else{ /* could be pure B, pure G, or B/G */
- if ((rtw_is_cckratesonly_included(rate)) == true)
+ if (rtw_is_cckratesonly_included(rate))
return WIRELESS_11B;
- else if ((rtw_is_cckrates_included(rate)) == true)
+ else if (rtw_is_cckrates_included(rate))
return WIRELESS_11BG;
else
return WIRELESS_11G;
}
-
}
u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source,
@@ -152,9 +147,8 @@ u8 *rtw_get_ie(u8 *pbuf, sint index, sint *len, sint limit)
sint tmp, i;
u8 *p;
- if (limit < 1) {
+ if (limit < 1)
return NULL;
- }
p = pbuf;
i = 0;
@@ -191,7 +185,6 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u
uint cnt;
u8 *target_ie = NULL;
-
if (ielen)
*ielen = 0;
@@ -215,7 +208,6 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u
} else{
cnt += in_ie[cnt+1]+2; /* goto next */
}
-
}
return target_ie;
@@ -292,23 +284,16 @@ void rtw_set_supported_rate(u8 *SupportedRates, uint mode)
memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
break;
-
}
}
-uint rtw_get_rateset_len(u8 *rateset)
+uint rtw_get_rateset_len(u8 *rateset)
{
- uint i = 0;
+ uint i;
- while (1) {
- if ((rateset[i]) == 0)
+ for (i = 0; i < 13; i++)
+ if (rateset[i] == 0)
break;
-
- if (i > 12)
- break;
-
- i++;
- }
return i;
}
@@ -369,7 +354,6 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
/* DS parameter set */
ie = rtw_set_ie(ie, _DSSET_IE_, 1, (u8 *)&(pdev_network->Configuration.DSConfig), &sz);
-
/* IBSS Parameter Set */
ie = rtw_set_ie(ie, _IBSS_PARA_IE_, 2, (u8 *)&(pdev_network->Configuration.ATIMWindow), &sz);
@@ -404,10 +388,8 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
pbuf = rtw_get_ie(pbuf, _WPA_IE_ID_, &len, limit_new);
if (pbuf) {
-
/* check if oui matches... */
if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type))) {
-
goto check_next_ie;
}
@@ -423,7 +405,6 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
return pbuf;
} else{
-
*wpa_ie_len = 0;
return NULL;
}
@@ -436,20 +417,16 @@ check_next_ie:
break;
pbuf += (2 + len);
-
}
*wpa_ie_len = 0;
return NULL;
-
}
unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit)
{
-
return rtw_get_ie(pie, _WPA2_IE_ID_, rsn_ie_len, limit);
-
}
int rtw_get_wpa_cipher_suite(u8 *s)
@@ -484,7 +461,6 @@ int rtw_get_wpa2_cipher_suite(u8 *s)
return 0;
}
-
int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
{
int i, ret = _SUCCESS;
@@ -497,7 +473,6 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
return _FAIL;
}
-
if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie+1) != (u8)(wpa_ie_len - 2)) ||
(memcmp(wpa_ie+2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN))) {
return _FAIL;
@@ -508,10 +483,8 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
pos += 8;
left = wpa_ie_len - 8;
-
/* group_cipher */
if (left >= WPA_SELECTOR_LEN) {
-
*group_cipher = rtw_get_wpa_cipher_suite(pos);
pos += WPA_SELECTOR_LEN;
@@ -523,7 +496,6 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
return _FAIL;
}
-
/* pairwise_cipher */
if (left >= 2) {
/* count = le16_to_cpu(*(u16*)pos); */
@@ -560,7 +532,6 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
}
return ret;
-
}
int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
@@ -575,7 +546,6 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
return _FAIL;
}
-
if ((*rsn_ie != _WPA2_IE_ID_) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2))) {
return _FAIL;
}
@@ -586,7 +556,6 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
/* group_cipher */
if (left >= RSN_SELECTOR_LEN) {
-
*group_cipher = rtw_get_wpa2_cipher_suite(pos);
pos += RSN_SELECTOR_LEN;
@@ -634,7 +603,6 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
}
return ret;
-
}
/* ifdef CONFIG_WAPI_SUPPORT */
@@ -736,7 +704,6 @@ int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie,
cnt += in_ie[cnt+1]+2; /* get next */
}
}
-
}
return (*rsn_len + *wpa_len);
@@ -747,7 +714,7 @@ u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
u8 match = false;
u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
- if (ie_ptr == NULL)
+ if (!ie_ptr)
return match;
eid = ie_ptr[0];
@@ -801,7 +768,6 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
} else{
cnt += in_ie[cnt+1]+2; /* goto next */
}
-
}
return wpsie_ptr;
@@ -854,7 +820,6 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_att
} else{
attr_ptr += attr_len; /* goto next */
}
-
}
return target_attr_ptr;
@@ -987,7 +952,6 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
}
return 0;
-
}
/**
@@ -1134,26 +1098,6 @@ ParseRes rtw_ieee802_11_parse_elems(u8 *start, uint len,
return ParseFailed;
return unknown ? ParseUnknown : ParseOK;
-
-}
-
-static u8 key_char2num(u8 ch);
-static u8 key_char2num(u8 ch)
-{
- if ((ch >= '0') && (ch <= '9'))
- return ch - '0';
- else if ((ch >= 'a') && (ch <= 'f'))
- return ch - 'a' + 10;
- else if ((ch >= 'A') && (ch <= 'F'))
- return ch - 'A' + 10;
- else
- return 0xff;
-}
-
-u8 key_2char2num(u8 hch, u8 lch);
-u8 key_2char2num(u8 hch, u8 lch)
-{
- return ((key_char2num(hch) << 4) | key_char2num(lch));
}
void rtw_macaddr_cfg(struct device *dev, u8 *mac_addr)
@@ -1163,38 +1107,24 @@ void rtw_macaddr_cfg(struct device *dev, u8 *mac_addr)
const unsigned char *addr;
int len;
- if (mac_addr == NULL)
+ if (!mac_addr)
return;
- if (rtw_initmac) { /* Users specify the mac address */
- int jj, kk;
-
- for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3) {
- mac[jj] = key_2char2num(rtw_initmac[kk], rtw_initmac[kk + 1]);
- }
- memcpy(mac_addr, mac, ETH_ALEN);
- } else{ /* Use the mac address stored in the Efuse */
- memcpy(mac, mac_addr, ETH_ALEN);
+ if (rtw_initmac && mac_pton(rtw_initmac, mac)) {
+ /* Users specify the mac address */
+ ether_addr_copy(mac_addr, mac);
+ } else {
+ /* Use the mac address stored in the Efuse */
+ ether_addr_copy(mac, mac_addr);
}
- if (((mac[0] == 0xff) && (mac[1] == 0xff) && (mac[2] == 0xff) &&
- (mac[3] == 0xff) && (mac[4] == 0xff) && (mac[5] == 0xff)) ||
- ((mac[0] == 0x00) && (mac[1] == 0x00) && (mac[2] == 0x00) &&
- (mac[3] == 0x00) && (mac[4] == 0x00) && (mac[5] == 0x00))) {
- if (np &&
- (addr = of_get_property(np, "local-mac-address", &len)) &&
+ if (is_broadcast_ether_addr(mac) || is_zero_ether_addr(mac)) {
+ if ((addr = of_get_property(np, "local-mac-address", &len)) &&
len == ETH_ALEN) {
- memcpy(mac_addr, addr, ETH_ALEN);
+ ether_addr_copy(mac_addr, addr);
} else {
- mac[0] = 0x00;
- mac[1] = 0xe0;
- mac[2] = 0x4c;
- mac[3] = 0x87;
- mac[4] = 0x00;
- mac[5] = 0x00;
- /* use default mac addresss */
- memcpy(mac_addr, mac, ETH_ALEN);
- DBG_871X("MAC Address from efuse error, assign default one !!!\n");
+ eth_random_addr(mac_addr);
+ DBG_871X("MAC Address from efuse error, assign random one !!!\n");
}
}
@@ -1207,12 +1137,12 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
unsigned char *pbuf;
int group_cipher = 0, pairwise_cipher = 0, is8021x = 0;
int ret = _FAIL;
+
pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
if (pbuf && (wpa_ielen > 0)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_cipher_info: wpa_ielen: %d", wpa_ielen));
if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) {
-
pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
pnetwork->BcnInfo.group_cipher = group_cipher;
pnetwork->BcnInfo.is_8021x = is8021x;
@@ -1221,7 +1151,6 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
ret = _SUCCESS;
}
} else {
-
pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
if (pbuf && (wpa_ielen > 0)) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index a81e13011c49..0952d15f6d40 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -331,7 +331,8 @@ static void init_mlme_ext_priv_value(struct adapter *padapter)
static int has_channel(RT_CHANNEL_INFO *channel_set,
u8 chanset_size,
- u8 chan) {
+ u8 chan)
+{
int i;
for (i = 0; i < chanset_size; i++) {
@@ -345,7 +346,8 @@ static int has_channel(RT_CHANNEL_INFO *channel_set,
static void init_channel_list(struct adapter *padapter, RT_CHANNEL_INFO *channel_set,
u8 chanset_size,
- struct p2p_channels *channel_list) {
+ struct p2p_channels *channel_list)
+{
struct p2p_oper_class_map op_class[] = {
{ IEEE80211G, 81, 1, 13, 1, BW20 },
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 612277a555d2..6c8ac9e86c9f 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -6,6 +6,7 @@
******************************************************************************/
#define _RTW_SECURITY_C_
+#include <linux/crc32poly.h>
#include <drv_types.h>
#include <rtw_debug.h>
@@ -87,8 +88,6 @@ const char *security_type_str(u8 value)
/* WEP related ===== */
-#define CRC32_POLY 0x04c11db7
-
struct arc4context {
u32 x;
u32 y;
@@ -178,7 +177,7 @@ static void crc32_init(void)
for (i = 0; i < 256; ++i) {
k = crc32_reverseBit((u8)i);
for (c = ((u32)k) << 24, j = 8; j > 0; --j) {
- c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+ c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY_BE : (c << 1);
}
p1 = (u8 *)&crc32_table[i];
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index 2b3eb6f8ddc5..2c65af319a60 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -42,13 +42,16 @@ extern unsigned char WPA_TKIP_CIPHER[4];
#define DISCONNECT_BY_CHK_BCN_FAIL_THRESHOLD 3
static u8 rtw_basic_rate_cck[4] = {
- IEEE80211_CCK_RATE_1MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB|IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_5MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB|IEEE80211_BASIC_RATE_MASK
+ IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK
};
static u8 rtw_basic_rate_ofdm[3] = {
- IEEE80211_OFDM_RATE_6MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB|IEEE80211_BASIC_RATE_MASK,
- IEEE80211_OFDM_RATE_24MB|IEEE80211_BASIC_RATE_MASK
+ IEEE80211_OFDM_RATE_6MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_12MB | IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_24MB | IEEE80211_BASIC_RATE_MASK
};
int cckrates_included(unsigned char *rate, int ratelen)
@@ -62,7 +65,6 @@ int cckrates_included(unsigned char *rate, int ratelen)
}
return false;
-
}
int cckratesonly_included(unsigned char *rate, int ratelen)
@@ -131,70 +133,41 @@ u8 networktype_to_raid_ex(struct adapter *adapter, struct sta_info *psta)
default:
raid = RATEID_IDX_BGN_40M_2SS;
break;
-
}
return raid;
-
}
unsigned char ratetbl_val_2wifirate(unsigned char rate);
unsigned char ratetbl_val_2wifirate(unsigned char rate)
{
- unsigned char val = 0;
-
switch (rate & 0x7f) {
case 0:
- val = IEEE80211_CCK_RATE_1MB;
- break;
-
+ return IEEE80211_CCK_RATE_1MB;
case 1:
- val = IEEE80211_CCK_RATE_2MB;
- break;
-
+ return IEEE80211_CCK_RATE_2MB;
case 2:
- val = IEEE80211_CCK_RATE_5MB;
- break;
-
+ return IEEE80211_CCK_RATE_5MB;
case 3:
- val = IEEE80211_CCK_RATE_11MB;
- break;
-
+ return IEEE80211_CCK_RATE_11MB;
case 4:
- val = IEEE80211_OFDM_RATE_6MB;
- break;
-
+ return IEEE80211_OFDM_RATE_6MB;
case 5:
- val = IEEE80211_OFDM_RATE_9MB;
- break;
-
+ return IEEE80211_OFDM_RATE_9MB;
case 6:
- val = IEEE80211_OFDM_RATE_12MB;
- break;
-
+ return IEEE80211_OFDM_RATE_12MB;
case 7:
- val = IEEE80211_OFDM_RATE_18MB;
- break;
-
+ return IEEE80211_OFDM_RATE_18MB;
case 8:
- val = IEEE80211_OFDM_RATE_24MB;
- break;
-
+ return IEEE80211_OFDM_RATE_24MB;
case 9:
- val = IEEE80211_OFDM_RATE_36MB;
- break;
-
+ return IEEE80211_OFDM_RATE_36MB;
case 10:
- val = IEEE80211_OFDM_RATE_48MB;
- break;
-
+ return IEEE80211_OFDM_RATE_48MB;
case 11:
- val = IEEE80211_OFDM_RATE_54MB;
- break;
-
+ return IEEE80211_OFDM_RATE_54MB;
+ default:
+ return 0;
}
-
- return val;
-
}
int is_basicrate(struct adapter *padapter, unsigned char rate);
@@ -289,7 +262,6 @@ void UpdateBrateTbl(struct adapter *Adapter, u8 *mBratesOS)
break;
}
}
-
}
void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen)
@@ -308,7 +280,6 @@ void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen)
break;
}
}
-
}
void Save_DM_Func_Flag(struct adapter *padapter)
@@ -496,7 +467,6 @@ u16 get_beacon_interval(struct wlan_bssid_ex *bss)
memcpy((unsigned char *)&val, rtw_get_beacon_interval_from_ie(bss->IEs), 2);
return le16_to_cpu(val);
-
}
int is_client_associated_to_ap(struct adapter *padapter)
@@ -539,7 +509,6 @@ int is_IBSS_empty(struct adapter *padapter)
}
return true;
-
}
unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval)
@@ -851,7 +820,6 @@ void flush_all_cam_entry(struct adapter *padapter)
rtw_hal_set_hwreg(padapter, HW_VAR_SEC_DK_CFG, (u8 *)false);
memset((u8 *)(pmlmeinfo->FW_sta_info), 0, sizeof(pmlmeinfo->FW_sta_info));
-
}
int WMM_param_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE)
@@ -1065,7 +1033,6 @@ static void bwmode_update_check(struct adapter *padapter, struct ndis_80211_var_
new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
}
-
if ((new_bwmode != pmlmeext->cur_bwmode) || (new_ch_offset != pmlmeext->cur_ch_offset)) {
pmlmeinfo->bwmode_updated = true;
@@ -1077,7 +1044,6 @@ static void bwmode_update_check(struct adapter *padapter, struct ndis_80211_var_
} else
pmlmeinfo->bwmode_updated = false;
-
if (true == pmlmeinfo->bwmode_updated) {
struct sta_info *psta;
struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
@@ -1085,7 +1051,6 @@ static void bwmode_update_check(struct adapter *padapter, struct ndis_80211_var_
/* set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
-
/* update ap's stainfo */
psta = rtw_get_stainfo(pstapriv, cur_network->MacAddress);
if (psta) {
@@ -1116,7 +1081,7 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
- if (pIE == NULL)
+ if (!pIE)
return;
if (phtpriv->ht_option == false)
@@ -1199,13 +1164,12 @@ void HT_info_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
- if (pIE == NULL)
+ if (!pIE)
return;
if (phtpriv->ht_option == false)
return;
-
if (pIE->Length > sizeof(struct HT_info_element))
return;
@@ -1352,7 +1316,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
bssid = rtw_zmalloc(sizeof(struct wlan_bssid_ex));
- if (bssid == NULL) {
+ if (!bssid) {
DBG_871X("%s rtw_zmalloc fail !!!\n", __func__);
return true;
}
@@ -1427,7 +1391,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
/* checking SSID */
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (p == NULL) {
+ if (!p) {
DBG_871X("%s marc: cannot find SSID for survey event\n", __func__);
hidden_ssid = true;
} else {
@@ -1617,7 +1581,6 @@ unsigned int is_ap_in_tkip(struct adapter *padapter)
return false;
} else
return false;
-
}
int support_short_GI(struct adapter *padapter, struct HT_caps_element *pHT_caps, u8 bwmode)
@@ -1769,7 +1732,6 @@ void update_IOT_info(struct adapter *padapter)
pmlmeinfo->turboMode_rtsen = 1;
break;
}
-
}
void update_capinfo(struct adapter *Adapter, u16 updateCap)
@@ -1782,7 +1744,6 @@ void update_capinfo(struct adapter *Adapter, u16 updateCap)
/* Mark to update preamble value forever, 2008.03.18 by lanhsin */
/* if (pMgntInfo->RegPreambleMode == PREAMBLE_AUTO) */
{
-
if (updateCap & cShortPreamble) {
/* Short Preamble */
if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) { /* PREAMBLE_LONG or PREAMBLE_AUTO */
@@ -1820,7 +1781,6 @@ void update_capinfo(struct adapter *Adapter, u16 updateCap)
}
rtw_hal_set_hwreg(Adapter, HW_VAR_SLOT_TIME, &pmlmeinfo->slotTime);
-
}
void update_wireless_mode(struct adapter *padapter)
@@ -1894,7 +1854,7 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
- if (pIE == NULL)
+ if (!pIE)
return _FAIL;
memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
@@ -1905,7 +1865,6 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
return _SUCCESS;
-
}
void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
@@ -1940,7 +1899,6 @@ void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
preorder_ctrl->enable = pmlmeinfo->accept_addba_req;
}
-
}
void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
@@ -1972,7 +1930,6 @@ void adaptive_early_32k(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
u32 delay_ms;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
-
pmlmeext->bcn_cnt++;
pIE = pframe + sizeof(struct ieee80211_hdr_3addr);
@@ -2022,7 +1979,6 @@ void adaptive_early_32k(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
for (i = 0; i < 9; i++) {
pmlmeext->bcn_delay_ratio[i] = (pmlmeext->bcn_delay_cnt[i] * 100) / pmlmeext->bcn_cnt;
-
DBG_871X("%s():bcn_delay_cnt[%d]=%d, bcn_delay_ratio[%d]=%d\n", __func__, i,
pmlmeext->bcn_delay_cnt[i], i, pmlmeext->bcn_delay_ratio[i]);
@@ -2049,10 +2005,8 @@ void adaptive_early_32k(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
pmlmeext->bcn_cnt = 0;
}
-
}
-
void beacon_timing_control(struct adapter *padapter)
{
rtw_hal_bcn_related_reg_setting(padapter);
@@ -2064,7 +2018,6 @@ void rtw_alloc_macid(struct adapter *padapter, struct sta_info *psta)
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
-
if (!memcmp(psta->hwaddr, bc_addr, ETH_ALEN))
return;
@@ -2089,7 +2042,6 @@ void rtw_alloc_macid(struct adapter *padapter, struct sta_info *psta)
psta->mac_id = i;
DBG_871X("%s = %d\n", __func__, psta->mac_id);
}
-
}
void rtw_release_macid(struct adapter *padapter, struct sta_info *psta)
@@ -2097,7 +2049,6 @@ void rtw_release_macid(struct adapter *padapter, struct sta_info *psta)
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
-
if (!memcmp(psta->hwaddr, bc_addr, ETH_ALEN))
return;
@@ -2111,10 +2062,8 @@ void rtw_release_macid(struct adapter *padapter, struct sta_info *psta)
pdvobj->macid[psta->mac_id] = false;
psta->mac_id = NUM_STA;
}
-
}
spin_unlock_bh(&pdvobj->lock);
-
}
/* For 8188E RA */
u8 rtw_search_max_mac_id(struct adapter *padapter)
@@ -2131,7 +2080,6 @@ u8 rtw_search_max_mac_id(struct adapter *padapter)
spin_unlock_bh(&pdvobj->lock);
return max_mac_id;
-
}
struct adapter *dvobj_get_port0_adapter(struct dvobj_priv *dvobj)
@@ -2246,9 +2194,9 @@ void rtw_get_current_ip_address(struct adapter *padapter, u8 *pcurrentip)
if ((pmlmeinfo->state & WIFI_FW_LINKING_STATE) ||
pmlmeinfo->state & WIFI_FW_AP_STATE) {
- if (my_ip_ptr != NULL) {
+ if (my_ip_ptr) {
struct in_ifaddr *my_ifa_list = my_ip_ptr->ifa_list;
- if (my_ifa_list != NULL) {
+ if (my_ifa_list) {
ipaddress[0] = my_ifa_list->ifa_address & 0xFF;
ipaddress[1] = (my_ifa_list->ifa_address >> 8) & 0xFF;
ipaddress[2] = (my_ifa_list->ifa_address >> 16) & 0xFF;
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index aaabffb0a199..edb678190b4b 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -36,7 +36,7 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
INIT_LIST_HEAD(&psta_xmitpriv->apsd);
}
-s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
+s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
{
int i;
struct xmit_buf *pxmitbuf;
@@ -845,8 +845,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
/* pattrib->priority = 5; force to used VI queue, for testing */
- rtw_set_tx_chksum_offload(pkt, pattrib);
-
exit:
return res;
}
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
index 2ee25b2471de..53d3bdf21a6f 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -1352,7 +1352,6 @@ static void _PHY_ReloadMACRegisters8723B(
static void _PHY_PathADDAOn8723B(
struct adapter *padapter,
u32 *ADDAReg,
- bool isPathAOn,
bool is2T
)
{
@@ -1363,7 +1362,7 @@ static void _PHY_PathADDAOn8723B(
ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("ADDA ON.\n"));
- pathOn = isPathAOn ? 0x01c00014 : 0x01c00014;
+ pathOn = 0x01c00014;
if (false == is2T) {
pathOn = 0x01c00014;
PHY_SetBBReg(pDM_Odm->Adapter, ADDAReg[0], bMaskDWord, 0x01c00014);
@@ -1556,7 +1555,7 @@ static void phy_IQCalibrate_8723B(
}
ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2T ? "2T2R" : "1T1R"), t));
- _PHY_PathADDAOn8723B(padapter, ADDA_REG, true, is2T);
+ _PHY_PathADDAOn8723B(padapter, ADDA_REG, is2T);
/* no serial mode */
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 3922d0308a81..0d2c61b67d0e 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -1612,202 +1612,201 @@ static s8 phy_GetChannelIndexOfTxPowerLimit(u8 Band, u8 Channel)
return channelIndex;
}
-s8 PHY_GetTxPowerLimit(
- struct adapter *Adapter,
- u32 RegPwrTblSel,
- enum BAND_TYPE Band,
- enum CHANNEL_WIDTH Bandwidth,
- u8 RfPath,
- u8 DataRate,
- u8 Channel
-)
+static s16 get_bandwidth_idx(const enum CHANNEL_WIDTH bandwidth)
{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- s16 band = -1, regulation = -1, bandwidth = -1, rateSection = -1, channel = -1;
- s8 powerLimit = MAX_POWER_INDEX;
-
- if ((Adapter->registrypriv.RegEnableTxPowerLimit == 2 && pHalData->EEPROMRegulatory != 1) ||
- Adapter->registrypriv.RegEnableTxPowerLimit == 0)
- return MAX_POWER_INDEX;
-
- switch (Adapter->registrypriv.RegPwrTblSel) {
- case 1:
- regulation = TXPWR_LMT_ETSI;
- break;
- case 2:
- regulation = TXPWR_LMT_MKK;
- break;
- case 3:
- regulation = TXPWR_LMT_FCC;
- break;
-
- case 4:
- regulation = TXPWR_LMT_WW;
- break;
-
+ switch (bandwidth) {
+ case CHANNEL_WIDTH_20:
+ return 0;
+ case CHANNEL_WIDTH_40:
+ return 1;
+ case CHANNEL_WIDTH_80:
+ return 2;
+ case CHANNEL_WIDTH_160:
+ return 3;
default:
- regulation = (Band == BAND_ON_2_4G) ? pHalData->Regulation2_4G : pHalData->Regulation5G;
- break;
+ return -1;
}
+}
- /* DBG_871X("pMgntInfo->RegPwrTblSel %d, final regulation %d\n", Adapter->registrypriv.RegPwrTblSel, regulation); */
-
-
- if (Band == BAND_ON_2_4G)
- band = 0;
- else if (Band == BAND_ON_5G)
- band = 1;
-
- if (Bandwidth == CHANNEL_WIDTH_20)
- bandwidth = 0;
- else if (Bandwidth == CHANNEL_WIDTH_40)
- bandwidth = 1;
- else if (Bandwidth == CHANNEL_WIDTH_80)
- bandwidth = 2;
- else if (Bandwidth == CHANNEL_WIDTH_160)
- bandwidth = 3;
-
- switch (DataRate) {
+static s16 get_rate_sctn_idx(const u8 rate)
+{
+ switch (rate) {
case MGN_1M: case MGN_2M: case MGN_5_5M: case MGN_11M:
- rateSection = 0;
- break;
-
+ return 0;
case MGN_6M: case MGN_9M: case MGN_12M: case MGN_18M:
case MGN_24M: case MGN_36M: case MGN_48M: case MGN_54M:
- rateSection = 1;
- break;
-
+ return 1;
case MGN_MCS0: case MGN_MCS1: case MGN_MCS2: case MGN_MCS3:
case MGN_MCS4: case MGN_MCS5: case MGN_MCS6: case MGN_MCS7:
- rateSection = 2;
- break;
-
+ return 2;
case MGN_MCS8: case MGN_MCS9: case MGN_MCS10: case MGN_MCS11:
case MGN_MCS12: case MGN_MCS13: case MGN_MCS14: case MGN_MCS15:
- rateSection = 3;
- break;
-
+ return 3;
case MGN_MCS16: case MGN_MCS17: case MGN_MCS18: case MGN_MCS19:
case MGN_MCS20: case MGN_MCS21: case MGN_MCS22: case MGN_MCS23:
- rateSection = 4;
- break;
-
+ return 4;
case MGN_MCS24: case MGN_MCS25: case MGN_MCS26: case MGN_MCS27:
case MGN_MCS28: case MGN_MCS29: case MGN_MCS30: case MGN_MCS31:
- rateSection = 5;
- break;
-
+ return 5;
case MGN_VHT1SS_MCS0: case MGN_VHT1SS_MCS1: case MGN_VHT1SS_MCS2:
case MGN_VHT1SS_MCS3: case MGN_VHT1SS_MCS4: case MGN_VHT1SS_MCS5:
case MGN_VHT1SS_MCS6: case MGN_VHT1SS_MCS7: case MGN_VHT1SS_MCS8:
case MGN_VHT1SS_MCS9:
- rateSection = 6;
- break;
-
+ return 6;
case MGN_VHT2SS_MCS0: case MGN_VHT2SS_MCS1: case MGN_VHT2SS_MCS2:
case MGN_VHT2SS_MCS3: case MGN_VHT2SS_MCS4: case MGN_VHT2SS_MCS5:
case MGN_VHT2SS_MCS6: case MGN_VHT2SS_MCS7: case MGN_VHT2SS_MCS8:
case MGN_VHT2SS_MCS9:
- rateSection = 7;
- break;
-
+ return 7;
case MGN_VHT3SS_MCS0: case MGN_VHT3SS_MCS1: case MGN_VHT3SS_MCS2:
case MGN_VHT3SS_MCS3: case MGN_VHT3SS_MCS4: case MGN_VHT3SS_MCS5:
case MGN_VHT3SS_MCS6: case MGN_VHT3SS_MCS7: case MGN_VHT3SS_MCS8:
case MGN_VHT3SS_MCS9:
- rateSection = 8;
- break;
-
+ return 8;
case MGN_VHT4SS_MCS0: case MGN_VHT4SS_MCS1: case MGN_VHT4SS_MCS2:
case MGN_VHT4SS_MCS3: case MGN_VHT4SS_MCS4: case MGN_VHT4SS_MCS5:
case MGN_VHT4SS_MCS6: case MGN_VHT4SS_MCS7: case MGN_VHT4SS_MCS8:
case MGN_VHT4SS_MCS9:
- rateSection = 9;
- break;
+ return 9;
+ default:
+ DBG_871X("Wrong rate 0x%x\n", rate);
+ return -1;
+ }
+}
+
+s8 phy_get_tx_pwr_lmt(struct adapter *adapter, u32 reg_pwr_tbl_sel,
+ enum BAND_TYPE band_type, enum CHANNEL_WIDTH bandwidth,
+ u8 rf_path, u8 data_rate, u8 channel)
+{
+ s16 idx_band = -1;
+ s16 idx_regulation = -1;
+ s16 idx_bandwidth = -1;
+ s16 idx_rate_sctn = -1;
+ s16 idx_channel = -1;
+ s8 pwr_lmt = MAX_POWER_INDEX;
+ struct hal_com_data *hal_data = GET_HAL_DATA(adapter);
+
+ if (((adapter->registrypriv.RegEnableTxPowerLimit == 2) &&
+ (hal_data->EEPROMRegulatory != 1)) ||
+ (adapter->registrypriv.RegEnableTxPowerLimit == 0))
+ return MAX_POWER_INDEX;
+ switch (adapter->registrypriv.RegPwrTblSel) {
+ case 1:
+ idx_regulation = TXPWR_LMT_ETSI;
+ break;
+ case 2:
+ idx_regulation = TXPWR_LMT_MKK;
+ break;
+ case 3:
+ idx_regulation = TXPWR_LMT_FCC;
+ break;
+ case 4:
+ idx_regulation = TXPWR_LMT_WW;
+ break;
default:
- DBG_871X("Wrong rate 0x%x\n", DataRate);
+ idx_regulation = (band_type == BAND_ON_2_4G) ?
+ hal_data->Regulation2_4G :
+ hal_data->Regulation5G;
break;
}
- if (Band == BAND_ON_5G && rateSection == 0)
- DBG_871X("Wrong rate 0x%x: No CCK in 5G Band\n", DataRate);
+ /* DBG_871X("pMgntInfo->RegPwrTblSel %d, final regulation %d\n", */
+ /* adapter->registrypriv.RegPwrTblSel, idx_regulation); */
- /* workaround for wrong index combination to obtain tx power limit, */
- /* OFDM only exists in BW 20M */
- if (rateSection == 1)
- bandwidth = 0;
+ if (band_type == BAND_ON_2_4G)
+ idx_band = 0;
+ else if (band_type == BAND_ON_5G)
+ idx_band = 1;
+
+ idx_bandwidth = get_bandwidth_idx(bandwidth);
+ idx_rate_sctn = get_rate_sctn_idx(data_rate);
+
+ if (band_type == BAND_ON_5G && idx_rate_sctn == 0)
+ DBG_871X("Wrong rate 0x%x: No CCK in 5G Band\n", DataRate);
/* workaround for wrong index combination to obtain tx power limit, */
+ /* OFDM only exists in BW 20M */
/* CCK table will only be given in BW 20M */
- if (rateSection == 0)
- bandwidth = 0;
-
- /* workaround for wrong indxe combination to obtain tx power limit, */
/* HT on 80M will reference to HT on 40M */
- if ((rateSection == 2 || rateSection == 3) && Band == BAND_ON_5G && bandwidth == 2) {
- bandwidth = 1;
- }
+ if (idx_rate_sctn == 0 || idx_rate_sctn == 1)
+ idx_bandwidth = 0;
+ else if ((idx_rate_sctn == 2 || idx_rate_sctn == 3) &&
+ (band_type == BAND_ON_5G) && (idx_bandwidth == 2))
+ idx_bandwidth = 1;
- if (Band == BAND_ON_2_4G)
- channel = phy_GetChannelIndexOfTxPowerLimit(BAND_ON_2_4G, Channel);
- else if (Band == BAND_ON_5G)
- channel = phy_GetChannelIndexOfTxPowerLimit(BAND_ON_5G, Channel);
- else if (Band == BAND_ON_BOTH) {
- /* BAND_ON_BOTH don't care temporarily */
- }
+ if (band_type == BAND_ON_2_4G || band_type == BAND_ON_5G)
+ channel = phy_GetChannelIndexOfTxPowerLimit(band_type, channel);
- if (band == -1 || regulation == -1 || bandwidth == -1 ||
- rateSection == -1 || channel == -1) {
+ if (idx_band == -1 || idx_regulation == -1 || idx_bandwidth == -1 ||
+ idx_rate_sctn == -1 || idx_channel == -1) {
/* DBG_871X("Wrong index value to access power limit table [band %d][regulation %d][bandwidth %d][rf_path %d][rate_section %d][chnlGroup %d]\n", */
- /* band, regulation, bandwidth, RfPath, rateSection, channelGroup); */
+ /* idx_band, idx_regulation, idx_bandwidth, rf_path, */
+ /* idx_rate_sctn, channel); */
return MAX_POWER_INDEX;
}
- if (Band == BAND_ON_2_4G) {
+ if (band_type == BAND_ON_2_4G) {
s8 limits[10] = {0}; u8 i = 0;
for (i = 0; i < MAX_REGULATION_NUM; i++)
- limits[i] = pHalData->TxPwrLimit_2_4G[i][bandwidth][rateSection][channel][RfPath];
-
- powerLimit = (regulation == TXPWR_LMT_WW) ? phy_GetWorldWideLimit(limits) :
- pHalData->TxPwrLimit_2_4G[regulation][bandwidth][rateSection][channel][RfPath];
-
- } else if (Band == BAND_ON_5G) {
+ limits[i] = hal_data->TxPwrLimit_2_4G[i]
+ [idx_bandwidth]
+ [idx_rate_sctn]
+ [idx_channel]
+ [rf_path];
+
+ pwr_lmt = (idx_regulation == TXPWR_LMT_WW) ?
+ phy_GetWorldWideLimit(limits) :
+ hal_data->TxPwrLimit_2_4G[idx_regulation]
+ [idx_bandwidth]
+ [idx_rate_sctn]
+ [idx_channel]
+ [rf_path];
+
+ } else if (band_type == BAND_ON_5G) {
s8 limits[10] = {0}; u8 i = 0;
for (i = 0; i < MAX_REGULATION_NUM; ++i)
- limits[i] = pHalData->TxPwrLimit_5G[i][bandwidth][rateSection][channel][RfPath];
-
- powerLimit = (regulation == TXPWR_LMT_WW) ? phy_GetWorldWideLimit(limits) :
- pHalData->TxPwrLimit_5G[regulation][bandwidth][rateSection][channel][RfPath];
- } else
+ limits[i] = hal_data->TxPwrLimit_5G[i]
+ [idx_bandwidth]
+ [idx_rate_sctn]
+ [idx_channel]
+ [rf_path];
+
+ pwr_lmt = (idx_regulation == TXPWR_LMT_WW) ?
+ phy_GetWorldWideLimit(limits) :
+ hal_data->TxPwrLimit_5G[idx_regulation]
+ [idx_bandwidth]
+ [idx_rate_sctn]
+ [idx_channel]
+ [rf_path];
+ } else {
DBG_871X("No power limit table of the specified band\n");
+ }
/* combine 5G VHT & HT rate */
/* 5G 20M and 40M HT and VHT can cross reference */
/*
- if (Band == BAND_ON_5G && powerLimit == MAX_POWER_INDEX) {
- if (bandwidth == 0 || bandwidth == 1) {
+ if (band_type == BAND_ON_5G && pwr_lmt == MAX_POWER_INDEX) {
+ if (idx_bandwidth == 0 || idx_bandwidth == 1) {
RT_TRACE(COMP_INIT, DBG_LOUD, ("No power limit table of the specified band %d, bandwidth %d, ratesection %d, rf path %d\n",
- band, bandwidth, rateSection, RfPath));
- if (rateSection == 2)
- powerLimit = pHalData->TxPwrLimit_5G[regulation]
- [bandwidth][4][channelGroup][RfPath];
- else if (rateSection == 4)
- powerLimit = pHalData->TxPwrLimit_5G[regulation]
- [bandwidth][2][channelGroup][RfPath];
- else if (rateSection == 3)
- powerLimit = pHalData->TxPwrLimit_5G[regulation]
- [bandwidth][5][channelGroup][RfPath];
- else if (rateSection == 5)
- powerLimit = pHalData->TxPwrLimit_5G[regulation]
- [bandwidth][3][channelGroup][RfPath];
+ idx_band, idx_bandwidth,
+ idx_rate_sctn, rf_path));
+ if (idx_rate_sctn == 2)
+ pwr_lmt = hal_data->TxPwrLimit_5G[idx_regulation][idx_bandwidth][4][idx_channel][rf_path];
+ else if (idx_rate_sctn == 4)
+ pwr_lmt = hal_data->TxPwrLimit_5G[idx_regulation][idx_bandwidth][2][idx_channel][rf_path];
+ else if (idx_rate_sctn == 3)
+ pwr_lmt = hal_data->TxPwrLimit_5G[idx_regulation][idx_bandwidth][5][idx_channel][rf_path];
+ else if (idx_rate_sctn == 5)
+ pwr_lmt = hal_data->TxPwrLimit_5G[idx_regulation][idx_bandwidth][3][idx_channel][rf_path];
}
}
*/
+
/* DBG_871X("TxPwrLmt[Regulation %d][Band %d][BW %d][RFPath %d][Rate 0x%x][Chnl %d] = %d\n", */
- /* regulation, pHalData->CurrentBandType, Bandwidth, RfPath, DataRate, Channel, powerLimit); */
- return powerLimit;
+ /* idx_regulation, hal_data->CurrentBandType, bandwidth, rf_path, data_rate, channel, pwr_lmt); */
+ return pwr_lmt;
}
static void phy_CrossReferenceHTAndVHTTxPowerLimit(struct adapter *padapter)
@@ -3294,4 +3293,3 @@ void phy_free_filebuf(struct adapter *padapter)
vfree(pHalData->rf_tx_pwr_lmt);
}
-
diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
index a4153a660d32..23ab160ac2c8 100644
--- a/drivers/staging/rtl8723bs/hal/odm.h
+++ b/drivers/staging/rtl8723bs/hal/odm.h
@@ -913,7 +913,7 @@ typedef struct DM_Out_Source_Dynamic_Mechanism_Structure {
/* HOOK BEFORE REG INIT----------- */
/* ODM Platform info AP/ADSL/CE/MP = 1/2/3/4 */
u8 SupportPlatform;
- /* ODM Support Ability DIG/RATR/TX_PWR_TRACK/ ?K?K = 1/2/3/?K */
+ /* ODM Support Ability DIG/RATR/TX_PWR_TRACK/... = 1/2/3/... */
u32 SupportAbility;
/* ODM PCIE/USB/SDIO = 1/2/3 */
u8 SupportInterface;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index 50428f688859..78a4828ecb65 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -688,7 +688,7 @@ u8 PHY_GetTxPowerIndex_8723B(
txPower = (s8) PHY_GetTxPowerIndexBase(padapter, RFPath, Rate, BandWidth, Channel, &bIn24G);
powerDiffByRate = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, ODM_RF_PATH_A, RF_1TX, Rate);
- limit = PHY_GetTxPowerLimit(
+ limit = phy_get_tx_pwr_lmt(
padapter,
padapter->registrypriv.RegPwrTblSel,
(u8)(!bIn24G),
diff --git a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
index c5184315f82f..f841546584a7 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
@@ -177,7 +177,7 @@ u8 Channel,
bool *bIn24G
);
-s8 PHY_GetTxPowerLimit (struct adapter *adapter, u32 RegPwrTblSel,
+s8 phy_get_tx_pwr_lmt (struct adapter *adapter, u32 RegPwrTblSel,
enum BAND_TYPE Band, enum CHANNEL_WIDTH Bandwidth,
u8 RfPath,
u8 DataRate,
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index 974e922f54fa..bcc8dfa8e672 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -1169,9 +1169,9 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv);
int rtw_get_bit_value_from_ieee_value(u8 val);
-uint rtw_is_cckrates_included(u8 *rate);
+bool rtw_is_cckrates_included(u8 *rate);
-uint rtw_is_cckratesonly_included(u8 *rate);
+bool rtw_is_cckratesonly_included(u8 *rate);
int rtw_check_network_type(unsigned char *rate, int ratelen, int channel);
diff --git a/drivers/staging/rtl8723bs/include/rtl8192c_rf.h b/drivers/staging/rtl8723bs/include/rtl8192c_rf.h
deleted file mode 100644
index ad684add8dd6..000000000000
--- a/drivers/staging/rtl8723bs/include/rtl8192c_rf.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef _RTL8192C_RF_H_
-#define _RTL8192C_RF_H_
-
-
-/* */
-/* RF RL6052 Series API */
-/* */
-void rtl8192c_RF_ChangeTxPath(struct adapter *Adapter,
- u16 DataRate);
-void rtl8192c_PHY_RF6052SetBandwidth(struct adapter *Adapter,
- enum CHANNEL_WIDTH Bandwidth);
-void rtl8192c_PHY_RF6052SetCckTxPower(struct adapter *Adapter,
- u8 *pPowerlevel);
-void rtl8192c_PHY_RF6052SetOFDMTxPower(struct adapter *Adapter,
- u8 *pPowerLevel,
- u8 Channel);
-int PHY_RF6052_Config8192C(struct adapter *Adapter);
-
-/*--------------------------Exported Function prototype---------------------*/
-
-
-#endif/* End of HalRf.h */
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_rf.h b/drivers/staging/rtl8723bs/include/rtl8723b_rf.h
index 1c16183caf9b..987b9f12a4f4 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_rf.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_rf.h
@@ -7,7 +7,6 @@
#ifndef __RTL8723B_RF_H__
#define __RTL8723B_RF_H__
-#include "rtl8192c_rf.h"
int PHY_RF6052_Config8723B(struct adapter *Adapter );
diff --git a/drivers/staging/rtl8723bs/include/rtw_beamforming.h b/drivers/staging/rtl8723bs/include/rtw_beamforming.h
deleted file mode 100644
index 031496c8c02c..000000000000
--- a/drivers/staging/rtl8723bs/include/rtw_beamforming.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef __RTW_BEAMFORMING_H_
-#define __RTW_BEAMFORMING_H_
-
-#define BEAMFORMING_ENTRY_NUM 2
-#define GET_BEAMFORM_INFO(_pmlmepriv) ((struct beamforming_info *)(&(_pmlmepriv)->beamforming_info))
-
-typedef enum _BEAMFORMING_ENTRY_STATE
-{
- BEAMFORMING_ENTRY_STATE_UNINITIALIZE,
- BEAMFORMING_ENTRY_STATE_INITIALIZEING,
- BEAMFORMING_ENTRY_STATE_INITIALIZED,
- BEAMFORMING_ENTRY_STATE_PROGRESSING,
- BEAMFORMING_ENTRY_STATE_PROGRESSED,
-}BEAMFORMING_ENTRY_STATE, *PBEAMFORMING_ENTRY_STATE;
-
-
-typedef enum _BEAMFORMING_STATE
-{
- BEAMFORMING_STATE_IDLE,
- BEAMFORMING_STATE_START,
- BEAMFORMING_STATE_END,
-}BEAMFORMING_STATE, *PBEAMFORMING_STATE;
-
-
-typedef enum _BEAMFORMING_CAP
-{
- BEAMFORMING_CAP_NONE = 0x0,
- BEAMFORMER_CAP_HT_EXPLICIT = 0x1,
- BEAMFORMEE_CAP_HT_EXPLICIT = 0x2,
- BEAMFORMER_CAP_VHT_SU = 0x4, /* Self has er Cap, because Reg er & peer ee */
- BEAMFORMEE_CAP_VHT_SU = 0x8, /* Self has ee Cap, because Reg ee & peer er */
- BEAMFORMER_CAP = 0x10,
- BEAMFORMEE_CAP = 0x20,
-}BEAMFORMING_CAP, *PBEAMFORMING_CAP;
-
-
-typedef enum _SOUNDING_MODE
-{
- SOUNDING_SW_VHT_TIMER = 0x0,
- SOUNDING_SW_HT_TIMER = 0x1,
- SOUNDING_STOP_All_TIMER = 0x2,
- SOUNDING_HW_VHT_TIMER = 0x3,
- SOUNDING_HW_HT_TIMER = 0x4,
- SOUNDING_STOP_OID_TIMER = 0x5,
- SOUNDING_AUTO_VHT_TIMER = 0x6,
- SOUNDING_AUTO_HT_TIMER = 0x7,
- SOUNDING_FW_VHT_TIMER = 0x8,
- SOUNDING_FW_HT_TIMER = 0x9,
-}SOUNDING_MODE, *PSOUNDING_MODE;
-
-
-enum BEAMFORMING_CTRL_TYPE
-{
- BEAMFORMING_CTRL_ENTER = 0,
- BEAMFORMING_CTRL_LEAVE = 1,
- BEAMFORMING_CTRL_START_PERIOD = 2,
- BEAMFORMING_CTRL_END_PERIOD = 3,
- BEAMFORMING_CTRL_SOUNDING_FAIL =4,
- BEAMFORMING_CTRL_SOUNDING_CLK =5,
-};
-
-struct beamforming_entry {
- bool bUsed;
- bool bSound;
- u16 aid; /* Used to construct AID field of NDPA packet. */
- u16 mac_id; /* Used to Set Reg42C in IBSS mode. */
- u16 p_aid; /* Used to fill Reg42C & Reg714 to compare with P_AID of Tx DESC. */
- u8 mac_addr[6];/* Used to fill Reg6E4 to fill Mac address of CSI report frame. */
- enum CHANNEL_WIDTH sound_bw; /* Sounding BandWidth */
- u16 sound_period;
- BEAMFORMING_CAP beamforming_entry_cap;
- BEAMFORMING_ENTRY_STATE beamforming_entry_state;
- u8 LogSeq;
- u8 LogRetryCnt;
- u8 LogSuccessCnt;
- u8 LogStatusFailCnt;
- u8 PreCsiReport[327];
- u8 DefaultCsiCnt;
- bool bDefaultCSI;
-};
-
-struct sounding_info {
- u8 sound_idx;
- enum CHANNEL_WIDTH sound_bw;
- SOUNDING_MODE sound_mode;
- u16 sound_period;
-};
-
-struct beamforming_info {
- BEAMFORMING_CAP beamforming_cap;
- BEAMFORMING_STATE beamforming_state;
- struct beamforming_entry beamforming_entry[BEAMFORMING_ENTRY_NUM];
- u8 beamforming_cur_idx;
- u8 beamforming_in_progress;
- u8 sounding_sequence;
- struct sounding_info sounding_info;
-};
-
-struct rtw_ndpa_sta_info {
- u16 aid:12;
- u16 feedback_type:1;
- u16 nc_index:3;
-};
-
-BEAMFORMING_CAP beamforming_get_entry_beam_cap_by_mac_id(void *pmlmepriv , u8 mac_id);
-void beamforming_notify(struct adapter * adapter);
-BEAMFORMING_CAP beamforming_get_beamform_cap(struct beamforming_info *pBeamInfo);
-
-u32 beamforming_get_report_frame(struct adapter * Adapter, union recv_frame *precv_frame);
-
-bool beamforming_send_ht_ndpa_packet(struct adapter * Adapter, u8 *ra, enum CHANNEL_WIDTH bw, u8 qidx);
-bool beamforming_send_vht_ndpa_packet(struct adapter * Adapter, u8 *ra, u16 aid, enum CHANNEL_WIDTH bw, u8 qidx);
-
-void beamforming_check_sounding_success(struct adapter * Adapter, bool status);
-
-void beamforming_watchdog(struct adapter * Adapter);
-
-void beamforming_wk_hdl(struct adapter *padapter, u8 type, u8 *pbuf);
-u8 beamforming_wk_cmd(struct adapter *padapter, s32 type, u8 *pbuf, s32 size, u8 enqueue);
-
-#endif
diff --git a/drivers/staging/rtl8723bs/include/rtw_br_ext.h b/drivers/staging/rtl8723bs/include/rtw_br_ext.h
deleted file mode 100644
index 5eaeb3c9a97c..000000000000
--- a/drivers/staging/rtl8723bs/include/rtw_br_ext.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef _RTW_BR_EXT_H_
-#define _RTW_BR_EXT_H_
-
-#define MACADDRLEN 6
-#define _DEBUG_ERR DBG_8192C
-#define _DEBUG_INFO /* DBG_8192C */
-#define DEBUG_WARN DBG_8192C
-#define DEBUG_INFO /* DBG_8192C */
-#define DEBUG_ERR DBG_8192C
-/* define GET_MY_HWADDR ((GET_MIB(priv))->dot11OperationEntry.hwaddr) */
-#define GET_MY_HWADDR(padapter) ((padapter)->eeprompriv.mac_addr)
-
-#define NAT25_HASH_BITS 4
-#define NAT25_HASH_SIZE (1 << NAT25_HASH_BITS)
-#define NAT25_AGEING_TIME 300
-
-#define MAX_NETWORK_ADDR_LEN 17
-
-struct nat25_network_db_entry
-{
- struct nat25_network_db_entry *next_hash;
- struct nat25_network_db_entry **pprev_hash;
- atomic_t use_count;
- unsigned char macAddr[6];
- unsigned long ageing_timer;
- unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
-};
-
-enum NAT25_METHOD {
- NAT25_MIN,
- NAT25_CHECK,
- NAT25_INSERT,
- NAT25_LOOKUP,
- NAT25_PARSE,
- NAT25_MAX
-};
-
-struct br_ext_info {
- unsigned int nat25_disable;
- unsigned int macclone_enable;
- unsigned int dhcp_bcst_disable;
- int addPPPoETag; /* 1: Add PPPoE relay-SID, 0: disable */
- unsigned char nat25_dmzMac[MACADDRLEN];
- unsigned int nat25sc_disable;
-};
-
-void nat25_db_cleanup(struct adapter *priv);
-
-#endif /* _RTW_BR_EXT_H_ */
diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h
index 08bc79840b23..559bf2606fb7 100644
--- a/drivers/staging/rtl8723bs/include/wifi.h
+++ b/drivers/staging/rtl8723bs/include/wifi.h
@@ -799,7 +799,6 @@ enum HT_CAP_AMPDU_FACTOR {
* According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
*/
#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
/* Spatial Multiplexing Power Save Modes */
diff --git a/drivers/staging/rtl8723bs/include/xmit_osdep.h b/drivers/staging/rtl8723bs/include/xmit_osdep.h
index 377b453de199..a61412b54ec0 100644
--- a/drivers/staging/rtl8723bs/include/xmit_osdep.h
+++ b/drivers/staging/rtl8723bs/include/xmit_osdep.h
@@ -33,8 +33,6 @@ void rtw_os_xmit_schedule(struct adapter *padapter);
int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz, u8 flag);
void rtw_os_xmit_resource_free(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 free_sz, u8 flag);
-extern void rtw_set_tx_chksum_offload(_pkt *pkt, struct pkt_attrib *pattrib);
-
extern uint rtw_remainder_len(struct pkt_file *pfile);
extern void _rtw_open_pktfile(_pkt *pkt, struct pkt_file *pfile);
extern uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 02178e25fbb8..af2234798fa8 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -233,13 +233,6 @@ static int rtw_ieee80211_channel_to_frequency(int chan, int band)
return 0; /* not supported */
}
-static u64 rtw_get_systime_us(void)
-{
- struct timespec ts;
- get_monotonic_boottime(&ts);
- return ((u64)ts.tv_sec*1000000) + ts.tv_nsec / 1000;
-}
-
#define MAX_BSSINFO_LEN 1000
struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wlan_network *pnetwork)
{
@@ -331,7 +324,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
notify_channel = ieee80211_get_channel(wiphy, freq);
- notify_timestamp = rtw_get_systime_us();
+ notify_timestamp = ktime_to_us(ktime_get_boottime());
notify_interval = le16_to_cpu(*(__le16 *)rtw_get_beacon_interval_from_ie(pnetwork->network.IEs));
notify_capability = le16_to_cpu(*(__le16 *)rtw_get_capability_from_ie(pnetwork->network.IEs));
@@ -1273,16 +1266,16 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy,
goto exit;
}
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = translate_percentage_to_dbm(padapter->recvpriv.signal_strength);
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
sinfo->txrate.legacy = rtw_get_cur_max_rate(padapter);
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
sinfo->rx_packets = sta_rx_data_pkts(psta);
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
sinfo->tx_packets = psta->sta_stats.tx_pkts;
}
@@ -3013,7 +3006,7 @@ static int cfg80211_rtw_dump_station(struct wiphy *wiphy, struct net_device *nde
goto exit;
}
memcpy(mac, psta->hwaddr, ETH_ALEN);
- sinfo->filled = BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled = BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = psta->rssi;
exit:
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 39502156f652..c38298d960ff 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -32,9 +32,6 @@
#define WEXT_CSCAN_HOME_DWELL_SECTION 'H'
#define WEXT_CSCAN_TYPE_SECTION 'T'
-
-extern u8 key_2char2num(u8 hch, u8 lch);
-
static u32 rtw_rates[] = {1000000, 2000000, 5500000, 11000000,
6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
@@ -42,44 +39,6 @@ static const char * const iw_operation_mode[] = {
"Auto", "Ad-Hoc", "Managed", "Master", "Repeater", "Secondary", "Monitor"
};
-static int hex2num_i(char c)
-{
- if (c >= '0' && c <= '9')
- return c - '0';
- if (c >= 'a' && c <= 'f')
- return c - 'a' + 10;
- if (c >= 'A' && c <= 'F')
- return c - 'A' + 10;
- return -1;
-}
-
-/**
- * hwaddr_aton - Convert ASCII string to MAC address
- * @txt: MAC address as a string (e.g., "00:11:22:33:44:55")
- * @addr: Buffer for the MAC address (ETH_ALEN = 6 bytes)
- * Returns: 0 on success, -1 on failure (e.g., string not a MAC address)
- */
-static int hwaddr_aton_i(const char *txt, u8 *addr)
-{
- int i;
-
- for (i = 0; i < 6; i++) {
- int a, b;
-
- a = hex2num_i(*txt++);
- if (a < 0)
- return -1;
- b = hex2num_i(*txt++);
- if (b < 0)
- return -1;
- *addr++ = (a << 4) | b;
- if (i < 5 && *txt++ != ':')
- return -1;
- }
-
- return 0;
-}
-
void indicate_wx_scan_complete_event(struct adapter *padapter)
{
union iwreq_data wrqu;
@@ -120,38 +79,6 @@ void rtw_indicate_wx_disassoc_event(struct adapter *padapter)
eth_zero_addr(wrqu.ap_addr.sa_data);
}
-/*
-uint rtw_is_cckrates_included(u8 *rate)
-{
- u32 i = 0;
-
- while (rate[i]!= 0)
- {
- if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
- (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
- return true;
- i++;
- }
-
- return false;
-}
-
-uint rtw_is_cckratesonly_included(u8 *rate)
-{
- u32 i = 0;
-
- while (rate[i]!= 0)
- {
- if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
- (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
- return false;
- i++;
- }
-
- return true;
-}
-*/
-
static char *translate_scan(struct adapter *padapter,
struct iw_request_info* info, struct wlan_network *pnetwork,
char *start, char *stop)
@@ -199,26 +126,26 @@ static char *translate_scan(struct adapter *padapter,
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
- if ((rtw_is_cckratesonly_included((u8 *)&pnetwork->network.SupportedRates)) == true) {
- if (ht_cap == true)
+ if (rtw_is_cckratesonly_included((u8 *)&pnetwork->network.SupportedRates)) {
+ if (ht_cap)
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bn");
else
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11b");
- } else if ((rtw_is_cckrates_included((u8 *)&pnetwork->network.SupportedRates)) == true) {
- if (ht_cap == true)
+ } else if (rtw_is_cckrates_included((u8 *)&pnetwork->network.SupportedRates)) {
+ if (ht_cap)
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bgn");
else
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bg");
} else {
if (pnetwork->network.Configuration.DSConfig > 14) {
- if (vht_cap == true)
+ if (vht_cap)
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11AC");
- else if (ht_cap == true)
+ else if (ht_cap)
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11an");
else
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11a");
} else {
- if (ht_cap == true)
+ if (ht_cap)
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn");
else
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
@@ -313,7 +240,7 @@ static char *translate_scan(struct adapter *padapter,
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.Ssid.Ssid));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
- buf = kzalloc(MAX_WPA_IE_LEN*2, GFP_KERNEL);
+ buf = kzalloc(MAX_WPA_IE_LEN*2, GFP_ATOMIC);
if (!buf)
return start;
if (wpa_len > 0) {
@@ -445,7 +372,7 @@ static char *translate_scan(struct adapter *padapter,
u8 *buf;
u8 *p, *pos;
- buf = kzalloc(MAX_WPA_IE_LEN, GFP_KERNEL);
+ buf = kzalloc(MAX_WPA_IE_LEN, GFP_ATOMIC);
if (!buf)
goto exit;
p = buf;
@@ -858,26 +785,26 @@ static int rtw_wx_get_name(struct net_device *dev,
prates = &pcur_bss->SupportedRates;
- if (rtw_is_cckratesonly_included((u8 *)prates) == true) {
- if (ht_cap == true)
+ if (rtw_is_cckratesonly_included((u8 *)prates)) {
+ if (ht_cap)
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bn");
else
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b");
- } else if ((rtw_is_cckrates_included((u8 *)prates)) == true) {
- if (ht_cap == true)
+ } else if (rtw_is_cckrates_included((u8 *)prates)) {
+ if (ht_cap)
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bgn");
else
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bg");
} else {
if (pcur_bss->Configuration.DSConfig > 14) {
- if (vht_cap == true)
+ if (vht_cap)
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11AC");
- else if (ht_cap == true)
+ else if (ht_cap)
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11an");
else
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11a");
} else {
- if (ht_cap == true)
+ if (ht_cap)
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn");
else
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g");
@@ -2570,8 +2497,7 @@ static int rtw_get_ap_info(struct net_device *dev,
pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
- /* if (hwaddr_aton_i(pdata->pointer, bssid)) */
- if (hwaddr_aton_i(data, bssid)) {
+ if (!mac_pton(data, bssid)) {
DBG_871X("Invalid BSSID '%s'.\n", (u8 *)data);
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
return -EINVAL;
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index ace68f023b49..181642358e3f 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -403,10 +403,9 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
-static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb
- , void *accel_priv
- , select_queue_fallback_t fallback
-)
+static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 22191c9584ad..6d02904de63f 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -605,7 +605,6 @@ static int rtw_sdio_resume(struct device *dev)
{
struct sdio_func *func =dev_to_sdio_func(dev);
struct dvobj_priv *psdpriv = sdio_get_drvdata(func);
- struct pwrctrl_priv *pwrpriv = dvobj_to_pwrctl(psdpriv);
struct adapter *padapter = psdpriv->if1;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
int ret = 0;
@@ -615,25 +614,11 @@ static int rtw_sdio_resume(struct device *dev)
pdbgpriv->dbg_resume_cnt++;
- if (pwrpriv->bInternalAutoSuspend)
- {
- ret = rtw_resume_process(padapter);
- }
- else
- {
- if (pwrpriv->wowlan_mode || pwrpriv->wowlan_ap_mode)
- {
- ret = rtw_resume_process(padapter);
- }
- else
- {
- ret = rtw_resume_process(padapter);
- }
- }
+ ret = rtw_resume_process(padapter);
+
pmlmeext->last_scan_time = jiffies;
DBG_871X("<======== %s return %d\n", __func__, ret);
return ret;
-
}
static int __init rtw_drv_entry(void)
diff --git a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
index 4da0c6f323d1..2cf903c66854 100644
--- a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
@@ -46,11 +46,6 @@ sint rtw_endofpktfile(struct pkt_file *pfile)
return false;
}
-void rtw_set_tx_chksum_offload(_pkt *pkt, struct pkt_attrib *pattrib)
-{
-
-}
-
int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz, u8 flag)
{
if (alloc_sz > 0) {
diff --git a/drivers/staging/rtlwifi/base.c b/drivers/staging/rtlwifi/base.c
index e46e47d93d7d..50b1c187a920 100644
--- a/drivers/staging/rtlwifi/base.c
+++ b/drivers/staging/rtlwifi/base.c
@@ -685,9 +685,8 @@ static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
}
}
-u8 rtl_mrate_idx_to_arfr_id(
- struct ieee80211_hw *hw, u8 rate_index,
- enum wireless_mode wirelessmode)
+u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index,
+ enum wireless_mode wirelessmode)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
@@ -1237,67 +1236,61 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
if (rtl_is_tx_report_skb(hw, skb))
tcb_desc->use_spe_rpt = 1;
- if (ieee80211_is_data(fc)) {
- /*
- *we set data rate INX 0
- *in rtl_rc.c if skb is special data or
- *mgt which need low data rate.
- */
-
- /*
- *So tcb_desc->hw_rate is just used for
- *special data and mgt frames
- */
- if (info->control.rates[0].idx == 0 ||
- ieee80211_is_nullfunc(fc)) {
- tcb_desc->use_driver_rate = true;
- tcb_desc->ratr_index =
- SET_RATE_ID(RATR_INX_WIRELESS_MC);
+ if (!ieee80211_is_data(fc)) {
+ tcb_desc->use_driver_rate = true;
+ tcb_desc->ratr_index = SET_RATE_ID(RATR_INX_WIRELESS_MC);
+ tcb_desc->disable_ratefallback = 1;
+ tcb_desc->mac_id = 0;
+ tcb_desc->packet_bw = false;
- tcb_desc->disable_ratefallback = 1;
- } else {
- /* because hw will never use hw_rate
- * when tcb_desc->use_driver_rate = false
- * so we never set highest N rate here,
- * and N rate will all be controlled by FW
- * when tcb_desc->use_driver_rate = false
- */
- if (sta && sta->vht_cap.vht_supported) {
- tcb_desc->hw_rate =
- _rtl_get_vht_highest_n_rate(hw, sta);
- } else {
- if (sta && sta->ht_cap.ht_supported) {
- tcb_desc->hw_rate =
- _rtl_get_highest_n_rate(hw, sta);
- } else {
- if (rtlmac->mode == WIRELESS_MODE_B) {
- tcb_desc->hw_rate =
- rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M];
- } else {
- tcb_desc->hw_rate =
- rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M];
- }
- }
- }
- }
+ return;
+ }
- if (is_multicast_ether_addr(hdr->addr1))
- tcb_desc->multicast = 1;
- else if (is_broadcast_ether_addr(hdr->addr1))
- tcb_desc->broadcast = 1;
+ /*
+ * We set data rate INX 0
+ * in rtl_rc.c if skb is special data or
+ * mgt which need low data rate.
+ */
- _rtl_txrate_selectmode(hw, sta, tcb_desc);
- _rtl_query_bandwidth_mode(hw, sta, tcb_desc);
- _rtl_qurey_shortpreamble_mode(hw, tcb_desc, info);
- _rtl_query_shortgi(hw, sta, tcb_desc, info);
- _rtl_query_protection_mode(hw, tcb_desc, info);
- } else {
+ /*
+ * So tcb_desc->hw_rate is just used for
+ * special data and mgt frames
+ */
+ if (info->control.rates[0].idx == 0 || ieee80211_is_nullfunc(fc)) {
tcb_desc->use_driver_rate = true;
tcb_desc->ratr_index = SET_RATE_ID(RATR_INX_WIRELESS_MC);
+
tcb_desc->disable_ratefallback = 1;
- tcb_desc->mac_id = 0;
- tcb_desc->packet_bw = false;
+ } else if (sta && sta->vht_cap.vht_supported) {
+ /*
+ * Because hw will never use hw_rate
+ * when tcb_desc->use_driver_rate = false
+ * so we never set highest N rate here,
+ * and N rate will all be controlled by FW
+ * when tcb_desc->use_driver_rate = false
+ */
+ tcb_desc->hw_rate = _rtl_get_vht_highest_n_rate(hw, sta);
+ } else if (sta && sta->ht_cap.ht_supported) {
+ tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw, sta);
+ } else {
+ enum rtl_var_map var = RTL_RC_OFDM_RATE54M;
+
+ if (rtlmac->mode == WIRELESS_MODE_B)
+ var = RTL_RC_CCK_RATE11M;
+
+ tcb_desc->hw_rate = rtlpriv->cfg->maps[var];
}
+
+ if (is_multicast_ether_addr(hdr->addr1))
+ tcb_desc->multicast = 1;
+ else if (is_broadcast_ether_addr(hdr->addr1))
+ tcb_desc->broadcast = 1;
+
+ _rtl_txrate_selectmode(hw, sta, tcb_desc);
+ _rtl_query_bandwidth_mode(hw, sta, tcb_desc);
+ _rtl_qurey_shortpreamble_mode(hw, tcb_desc, info);
+ _rtl_query_shortgi(hw, sta, tcb_desc, info);
+ _rtl_query_protection_mode(hw, tcb_desc, info);
#undef SET_RATE_ID
}
@@ -1838,7 +1831,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
reject_agg, ctrl_agg_size, agg_size);
rtlpriv->hw->max_rx_aggregation_subframes =
- (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF);
+ (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT);
}
/*********************************************************
diff --git a/drivers/staging/rtlwifi/halmac/halmac_api.h b/drivers/staging/rtlwifi/halmac/halmac_api.h
index 4922cc8ce6f2..e220db39c8a7 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_api.h
+++ b/drivers/staging/rtlwifi/halmac/halmac_api.h
@@ -29,7 +29,6 @@
#include "halmac_usb_reg.h"
#include "halmac_sdio_reg.h"
-#include "halmac_pcie_reg.h"
#include "halmac_bit2.h"
#include "halmac_reg2.h"
diff --git a/drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h b/drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h
deleted file mode 100644
index a2552b27367b..000000000000
--- a/drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2016 Realtek Corporation.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-#ifndef __HALMAC_PCIE_REG_H__
-#define __HALMAC_PCIE_REG_H__
-
-#endif /* __HALMAC_PCIE_REG_H__ */
diff --git a/drivers/staging/rtlwifi/halmac/rtl_halmac.c b/drivers/staging/rtlwifi/halmac/rtl_halmac.c
index ae433aa6ebbb..f0c6fc8c6aca 100644
--- a/drivers/staging/rtlwifi/halmac/rtl_halmac.c
+++ b/drivers/staging/rtlwifi/halmac/rtl_halmac.c
@@ -870,7 +870,7 @@ static bool _is_fw_read_cmd_down(struct rtl_priv *rtlpriv, u8 msgbox_num)
if (valid == 0)
read_down = true;
else
- schedule();
+ mdelay(1);
} while ((!read_down) && (retry_cnts--));
return read_down;
diff --git a/drivers/staging/rtlwifi/phydm/phydm.c b/drivers/staging/rtlwifi/phydm/phydm.c
index 985978d3decc..27635feedba2 100644
--- a/drivers/staging/rtlwifi/phydm/phydm.c
+++ b/drivers/staging/rtlwifi/phydm/phydm.c
@@ -149,7 +149,7 @@ static void phydm_traffic_load_decision(void *dm_void)
{
struct phy_dm_struct *dm = (struct phy_dm_struct *)dm_void;
- /*---TP & Trafic-load calculation---*/
+ /*---TP & Traffic-load calculation---*/
if (dm->last_tx_ok_cnt > *dm->num_tx_bytes_unicast)
dm->last_tx_ok_cnt = *dm->num_tx_bytes_unicast;
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index efec7281511c..a40396614814 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -82,7 +82,7 @@ static void _rtl8822be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
}
while (!bwrite_success) {
- /* 2. Find the last BOX number which has been writen. */
+ /* 2. Find the last BOX number which has been written. */
boxnum = rtlhal->last_hmeboxnum;
switch (boxnum) {
case 0:
diff --git a/drivers/staging/rtlwifi/rtl8822be/hw.c b/drivers/staging/rtlwifi/rtl8822be/hw.c
index 7947edb239a1..88ba5b2fea6a 100644
--- a/drivers/staging/rtlwifi/rtl8822be/hw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/hw.c
@@ -803,7 +803,7 @@ static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw)
return;
pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp);
- pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | BIT(7));
+ pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3);
pci_read_config_byte(rtlpci->pdev, 0x719, &tmp);
pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4));
diff --git a/drivers/staging/rtlwifi/rtl8822be/sw.c b/drivers/staging/rtlwifi/rtl8822be/sw.c
index 7825e85ed091..a2ab19fa94f2 100644
--- a/drivers/staging/rtlwifi/rtl8822be/sw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/sw.c
@@ -43,7 +43,7 @@ static void rtl8822be_init_aspm_vars(struct ieee80211_hw *hw)
* 0 - Disable ASPM,
* 1 - Enable ASPM without Clock Req,
* 2 - Enable ASPM with Clock Req,
- * 3 - Alwyas Enable ASPM with Clock Req,
+ * 3 - Always Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
* set default to RTL8822BE:3 RTL8822B:2
*
diff --git a/drivers/staging/rtlwifi/wifi.h b/drivers/staging/rtlwifi/wifi.h
index 012fb618840b..a45f0eb69d3f 100644
--- a/drivers/staging/rtlwifi/wifi.h
+++ b/drivers/staging/rtlwifi/wifi.h
@@ -88,6 +88,7 @@
#define RTL_USB_MAX_RX_COUNT 100
#define QBSS_LOAD_SIZE 5
#define MAX_WMMELE_LENGTH 64
+#define ASPM_L1_LATENCY 7
#define TOTAL_CAM_ENTRY 32
diff --git a/drivers/staging/rts5208/Makefile b/drivers/staging/rts5208/Makefile
index f7fd03a94e5f..17b4471c4d6d 100644
--- a/drivers/staging/rts5208/Makefile
+++ b/drivers/staging/rts5208/Makefile
@@ -3,4 +3,4 @@ obj-$(CONFIG_RTS5208) := rts5208.o
ccflags-y := -Idrivers/scsi
rts5208-y := rtsx.o rtsx_chip.o rtsx_transport.o rtsx_scsi.o \
- rtsx_card.o general.o sd.o xd.o ms.o spi.o trace.o
+ rtsx_card.o general.o sd.o xd.o ms.o spi.o
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index b89ef15e3c20..3a71dbb6d24a 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -44,7 +44,6 @@ static inline int ms_check_err_code(struct rtsx_chip *chip, u8 err_code)
static int ms_parse_err_code(struct rtsx_chip *chip)
{
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -76,7 +75,6 @@ static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
if (retval < 0) {
rtsx_clear_ms_error(chip);
ms_set_err_code(chip, MS_TO_ERROR);
- rtsx_trace(chip);
return ms_parse_err_code(chip);
}
@@ -85,14 +83,12 @@ static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
if (!(tpc & 0x08)) { /* Read Packet */
if (*ptr & MS_CRC16_ERR) {
ms_set_err_code(chip, MS_CRC16_ERROR);
- rtsx_trace(chip);
return ms_parse_err_code(chip);
}
} else { /* Write Packet */
if (CHK_MSPRO(ms_card) && !(*ptr & 0x80)) {
if (*ptr & (MS_INT_ERR | MS_INT_CMDNK)) {
ms_set_err_code(chip, MS_CMD_NK);
- rtsx_trace(chip);
return ms_parse_err_code(chip);
}
}
@@ -101,7 +97,6 @@ static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
if (*ptr & MS_RDY_TIMEOUT) {
rtsx_clear_ms_error(chip);
ms_set_err_code(chip, MS_TO_ERROR);
- rtsx_trace(chip);
return ms_parse_err_code(chip);
}
@@ -117,7 +112,6 @@ static int ms_transfer_data(struct rtsx_chip *chip, u8 trans_mode,
enum dma_data_direction dir;
if (!buf || !buf_len) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -128,7 +122,6 @@ static int ms_transfer_data(struct rtsx_chip *chip, u8 trans_mode,
dir = DMA_TO_DEVICE;
err_code = MS_FLASH_WRITE_ERROR;
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -165,17 +158,14 @@ static int ms_transfer_data(struct rtsx_chip *chip, u8 trans_mode,
else
retval = STATUS_FAIL;
- rtsx_trace(chip);
return retval;
}
retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -189,7 +179,6 @@ static int ms_write_bytes(struct rtsx_chip *chip,
int retval, i;
if (!data || (data_len < cnt)) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -225,14 +214,12 @@ static int ms_write_bytes(struct rtsx_chip *chip,
if (!(tpc & 0x08)) {
if (val & MS_CRC16_ERR) {
ms_set_err_code(chip, MS_CRC16_ERROR);
- rtsx_trace(chip);
return ms_parse_err_code(chip);
}
} else {
if (CHK_MSPRO(ms_card) && !(val & 0x80)) {
if (val & (MS_INT_ERR | MS_INT_CMDNK)) {
ms_set_err_code(chip, MS_CMD_NK);
- rtsx_trace(chip);
return ms_parse_err_code(chip);
}
}
@@ -240,12 +227,10 @@ static int ms_write_bytes(struct rtsx_chip *chip,
if (val & MS_RDY_TIMEOUT) {
ms_set_err_code(chip, MS_TO_ERROR);
- rtsx_trace(chip);
return ms_parse_err_code(chip);
}
ms_set_err_code(chip, MS_TO_ERROR);
- rtsx_trace(chip);
return ms_parse_err_code(chip);
}
@@ -260,7 +245,6 @@ static int ms_read_bytes(struct rtsx_chip *chip,
u8 *ptr;
if (!data) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -296,14 +280,12 @@ static int ms_read_bytes(struct rtsx_chip *chip,
if (!(tpc & 0x08)) {
if (val & MS_CRC16_ERR) {
ms_set_err_code(chip, MS_CRC16_ERROR);
- rtsx_trace(chip);
return ms_parse_err_code(chip);
}
} else {
if (CHK_MSPRO(ms_card) && !(val & 0x80)) {
if (val & (MS_INT_ERR | MS_INT_CMDNK)) {
ms_set_err_code(chip, MS_CMD_NK);
- rtsx_trace(chip);
return ms_parse_err_code(chip);
}
}
@@ -311,12 +293,10 @@ static int ms_read_bytes(struct rtsx_chip *chip,
if (val & MS_RDY_TIMEOUT) {
ms_set_err_code(chip, MS_TO_ERROR);
- rtsx_trace(chip);
return ms_parse_err_code(chip);
}
ms_set_err_code(chip, MS_TO_ERROR);
- rtsx_trace(chip);
return ms_parse_err_code(chip);
}
@@ -353,7 +333,6 @@ static int ms_set_rw_reg_addr(struct rtsx_chip *chip, u8 read_start,
rtsx_clear_ms_error(chip);
}
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -393,13 +372,11 @@ static int ms_set_init_para(struct rtsx_chip *chip)
retval = switch_clock(chip, ms_card->ms_clock);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = select_card(chip, MS_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -413,13 +390,11 @@ static int ms_switch_clock(struct rtsx_chip *chip)
retval = select_card(chip, MS_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = switch_clock(chip, ms_card->ms_clock);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -435,41 +410,35 @@ static int ms_pull_ctl_disable(struct rtsx_chip *chip)
MS_D1_PD | MS_D2_PD | MS_CLK_PD |
MS_D6_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
MS_D3_PD | MS_D0_PD | MS_BS_PD |
XD_D4_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
MS_D7_PD | XD_CE_PD | XD_CLE_PD |
XD_CD_PU);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
XD_RDY_PD | SD_D3_PD | SD_D2_PD |
XD_ALE_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
MS_INS_PU | SD_WP_PD | SD_CD_PU |
SD_CMD_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL6, 0xFF,
MS_D5_PD | MS_D4_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
} else if (CHECK_PID(chip, 0x5288)) {
@@ -477,25 +446,21 @@ static int ms_pull_ctl_disable(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, CARD_PULL_CTL1,
0xFF, 0x55);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL2,
0xFF, 0x55);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL3,
0xFF, 0x4B);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL4,
0xFF, 0x69);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -538,7 +503,6 @@ static int ms_pull_ctl_enable(struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, MS_CARD, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -560,7 +524,6 @@ static int ms_prepare_reset(struct rtsx_chip *chip)
retval = ms_power_off_card3v3(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -569,21 +532,18 @@ static int ms_prepare_reset(struct rtsx_chip *chip)
retval = enable_card_clock(chip, MS_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (chip->asic_code) {
retval = ms_pull_ctl_enable(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
retval = rtsx_write_register(chip, FPGA_PULL_CTL,
FPGA_MS_PULL_CTL_BIT | 0x20, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -591,7 +551,6 @@ static int ms_prepare_reset(struct rtsx_chip *chip)
if (!chip->ft2_fast_mode) {
retval = card_power_on(chip, MS_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -606,7 +565,6 @@ static int ms_prepare_reset(struct rtsx_chip *chip)
if (chip->ocp_stat & oc_mask) {
dev_dbg(rtsx_dev(chip), "Over current, OCPSTAT is 0x%x\n",
chip->ocp_stat);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -615,7 +573,6 @@ static int ms_prepare_reset(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN,
MS_OUTPUT_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -626,7 +583,6 @@ static int ms_prepare_reset(struct rtsx_chip *chip)
NO_EXTEND_TOGGLE |
MS_BUS_WIDTH_1);
if (retval) {
- rtsx_trace(chip);
return retval;
}
} else {
@@ -636,26 +592,22 @@ static int ms_prepare_reset(struct rtsx_chip *chip)
NO_EXTEND_TOGGLE |
MS_BUS_WIDTH_1);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
retval = rtsx_write_register(chip, MS_TRANS_CFG, 0xFF,
NO_WAIT_INT | NO_AUTO_READ_INT_REG);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_STOP, MS_STOP | MS_CLR_ERR,
MS_STOP | MS_CLR_ERR);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = ms_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -670,7 +622,6 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
retval = ms_set_rw_reg_addr(chip, Pro_StatusReg, 6, SystemParm, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -681,13 +632,11 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
break;
}
if (i == MS_MAX_RETRY_COUNT) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, PPBUF_BASE2 + 2, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
dev_dbg(rtsx_dev(chip), "Type register: 0x%x\n", val);
@@ -695,32 +644,27 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
if (val != 0x02)
ms_card->check_ms_flow = 1;
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, PPBUF_BASE2 + 4, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
dev_dbg(rtsx_dev(chip), "Category register: 0x%x\n", val);
if (val != 0) {
ms_card->check_ms_flow = 1;
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, PPBUF_BASE2 + 5, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
dev_dbg(rtsx_dev(chip), "Class register: 0x%x\n", val);
if (val == 0) {
retval = rtsx_read_register(chip, PPBUF_BASE2, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (val & WRT_PRTCT)
@@ -732,7 +676,6 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
chip->card_wp |= MS_CARD;
} else {
ms_card->check_ms_flow = 1;
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -740,7 +683,6 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
retval = rtsx_read_register(chip, PPBUF_BASE2 + 3, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
dev_dbg(rtsx_dev(chip), "IF Mode register: 0x%x\n", val);
@@ -753,7 +695,6 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
ms_card->ms_type &= 0x0F;
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -770,7 +711,6 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
do {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -781,12 +721,10 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
break;
}
if (i == MS_MAX_RETRY_COUNT) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (k > 100) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -800,7 +738,6 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
break;
}
if (i == MS_MAX_RETRY_COUNT) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -808,7 +745,6 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
if (val & INT_REG_CMDNK) {
chip->card_wp |= (MS_CARD);
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -831,7 +767,6 @@ static int ms_switch_parallel_bus(struct rtsx_chip *chip)
break;
}
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -853,20 +788,17 @@ static int ms_switch_8bit_bus(struct rtsx_chip *chip)
break;
}
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, MS_CFG, 0x98,
MS_BUS_WIDTH_8 | SAMPLE_TIME_FALLING);
if (retval) {
- rtsx_trace(chip);
return retval;
}
ms_card->ms_type |= MS_8BIT;
retval = ms_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -874,7 +806,6 @@ static int ms_switch_8bit_bus(struct rtsx_chip *chip)
retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT,
1, NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -890,19 +821,16 @@ static int ms_pro_reset_flow(struct rtsx_chip *chip, int switch_8bit_bus)
for (i = 0; i < 3; i++) {
retval = ms_prepare_reset(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_identify_media_type(chip, switch_8bit_bus);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_confirm_cpu_startup(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -910,7 +838,6 @@ static int ms_pro_reset_flow(struct rtsx_chip *chip, int switch_8bit_bus)
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
continue;
@@ -920,26 +847,22 @@ static int ms_pro_reset_flow(struct rtsx_chip *chip, int switch_8bit_bus)
}
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
/* Switch MS-PRO into Parallel mode */
retval = rtsx_write_register(chip, MS_CFG, 0x18, MS_BUS_WIDTH_4);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, MS_CFG, PUSH_TIME_ODD,
PUSH_TIME_ODD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = ms_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -948,7 +871,6 @@ static int ms_pro_reset_flow(struct rtsx_chip *chip, int switch_8bit_bus)
retval = ms_switch_8bit_bus(chip);
if (retval != STATUS_SUCCESS) {
ms_card->switch_8bit_fail = 1;
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -966,7 +888,6 @@ static int msxc_change_power(struct rtsx_chip *chip, u8 mode)
retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -979,23 +900,19 @@ static int msxc_change_power(struct rtsx_chip *chip, u8 mode)
retval = ms_write_bytes(chip, PRO_WRITE_REG, 6, NO_WAIT_INT, buf, 6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_send_cmd(chip, XC_CHG_POWER, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, MS_TRANS_CFG, buf);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (buf[0] & (MS_INT_CMDNK | MS_INT_ERR)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1020,7 +937,6 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
retval = ms_set_rw_reg_addr(chip, Pro_IntReg, 2, Pro_SystemParm, 7);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1045,13 +961,11 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
break;
}
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
buf = kmalloc(64 * 512, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -1063,12 +977,10 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
if (retval != STATUS_SUCCESS) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (!(val & MS_INT_BREQ)) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_transfer_data(chip, MS_TM_AUTO_READ,
@@ -1081,7 +993,6 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
}
if (retval != STATUS_SUCCESS) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1090,7 +1001,6 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
if (retval != STATUS_SUCCESS) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1101,7 +1011,6 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
PRO_READ_LONG_DATA, 0, WAIT_INT);
if (retval != STATUS_SUCCESS) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1111,13 +1020,11 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
if ((buf[0] != 0xa5) && (buf[1] != 0xc3)) {
/* Signature code is wrong */
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if ((buf[4] < 1) || (buf[4] > 12)) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1142,17 +1049,14 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
sys_info_addr, sys_info_size);
if (sys_info_size != 96) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (sys_info_addr < 0x1A0) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if ((sys_info_size + sys_info_addr) > 0x8000) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1180,17 +1084,14 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
model_name_addr, model_name_size);
if (model_name_size != 48) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (model_name_addr < 0x1A0) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if ((model_name_size + model_name_addr) > 0x8000) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1204,7 +1105,6 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
if (i == buf[4]) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1251,18 +1151,15 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
#ifdef SUPPORT_MSXC
if (CHK_MSXC(ms_card)) {
if (class_code != 0x03) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
if (class_code != 0x02) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
#else
if (class_code != 0x02) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -1272,13 +1169,11 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
(device_type == 0x03)) {
chip->card_wp |= MS_CARD;
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
if (sub_class & 0xC0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1329,18 +1224,15 @@ retry:
if (ms_card->switch_8bit_fail) {
retval = ms_pro_reset_flow(chip, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
retval = ms_read_attribute_info(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1383,7 +1275,6 @@ retry:
#ifdef SUPPORT_MAGIC_GATE
retval = mg_set_tpc_para_sub(chip, 0, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -1403,19 +1294,16 @@ static int ms_read_status_reg(struct rtsx_chip *chip)
retval = ms_set_rw_reg_addr(chip, StatusReg0, 2, 0, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_read_bytes(chip, READ_REG, 2, NO_WAIT_INT, val, 2);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val[1] & (STS_UCDT | STS_UCEX | STS_UCFG)) {
ms_set_err_code(chip, MS_FLASH_READ_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1432,7 +1320,6 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
SystemParm, 6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1456,7 +1343,6 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
break;
}
if (i == MS_MAX_RETRY_COUNT) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1468,27 +1354,23 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
break;
}
if (i == MS_MAX_RETRY_COUNT) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
retval = ms_read_status_reg(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1496,7 +1378,6 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
MS_EXTRA_SIZE, SystemParm,
6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1505,7 +1386,6 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT,
data, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1526,14 +1406,12 @@ static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr,
u8 val, data[16];
if (!buf || (buf_len < MS_EXTRA_SIZE)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
SystemParm, 6 + MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1554,32 +1432,27 @@ static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr,
retval = ms_write_bytes(chip, WRITE_REG, (6 + MS_EXTRA_SIZE),
NO_WAIT_INT, data, 16);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1596,7 +1469,6 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
SystemParm, 6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1613,26 +1485,22 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1640,7 +1508,6 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
if (val & INT_REG_ERR) {
if (!(val & INT_REG_BREQ)) {
ms_set_err_code(chip, MS_FLASH_READ_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_read_status_reg(chip);
@@ -1650,7 +1517,6 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
} else {
if (!(val & INT_REG_BREQ)) {
ms_set_err_code(chip, MS_BREQ_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1659,12 +1525,10 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA,
0, NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1679,14 +1543,12 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
retval = ms_read_extra_data(chip, phy_blk, 0, extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
SystemParm, 7);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1707,33 +1569,28 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
retval = ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT, data, 7);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1750,7 +1607,6 @@ static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
SystemParm, 6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1769,21 +1625,18 @@ static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
ERASE_RTY:
retval = ms_send_cmd(chip, BLOCK_ERASE, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1795,14 +1648,12 @@ ERASE_RTY:
ms_set_err_code(chip, MS_CMD_NK);
ms_set_bad_block(chip, phy_blk);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1845,14 +1696,12 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
for (i = start_page; i < end_page; i++) {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_write_extra_data(chip, phy_blk, i,
extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1875,38 +1724,32 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
retval = ms_read_extra_data(chip, new_blk, 0, extra, MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_read_status_reg(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, PPBUF_BASE2, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (val & BUF_FULL) {
retval = ms_send_cmd(chip, CLEAR_BUF, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (!(val & INT_REG_CED)) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1914,7 +1757,6 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
for (i = start_page; i < end_page; i++) {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1923,7 +1765,6 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
MS_EXTRA_SIZE, SystemParm, 6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1943,26 +1784,22 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT,
data, 6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1981,7 +1818,6 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
READ_PAGE_DATA,
0, NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2019,14 +1855,12 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
break;
}
if (rty_cnt == MS_MAX_RETRY_COUNT) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
if (!(val & INT_REG_BREQ)) {
ms_set_err_code(chip, MS_BREQ_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2062,33 +1896,28 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
retval = ms_write_bytes(chip, WRITE_REG, (6 + MS_EXTRA_SIZE),
NO_WAIT_INT, data, 16);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
ms_set_err_code(chip, MS_NO_ERROR);
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CED) {
if (val & INT_REG_ERR) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2098,7 +1927,6 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
MS_EXTRA_SIZE, SystemParm,
7);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2120,13 +1948,11 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
retval = ms_write_bytes(chip, WRITE_REG, 7,
NO_WAIT_INT, data, 8);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2134,13 +1960,11 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
retval = ms_read_bytes(chip, GET_INT, 1,
NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2148,7 +1972,6 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
if (val & INT_REG_ERR) {
ms_set_err_code(chip,
MS_FLASH_WRITE_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2170,7 +1993,6 @@ static int reset_ms(struct rtsx_chip *chip)
retval = ms_prepare_reset(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2178,19 +2000,16 @@ static int reset_ms(struct rtsx_chip *chip)
retval = ms_send_cmd(chip, MS_RESET, NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_read_status_reg(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, PPBUF_BASE2, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (val & WRT_PRTCT)
@@ -2205,7 +2024,6 @@ RE_SEARCH:
while (i < (MAX_DEFECTIVE_BLOCK + 2)) {
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2226,7 +2044,6 @@ RE_SEARCH:
if (i == (MAX_DEFECTIVE_BLOCK + 2)) {
dev_dbg(rtsx_dev(chip), "No boot block found!");
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2243,7 +2060,6 @@ RE_SEARCH:
retval = ms_read_page(chip, ms_card->boot_block, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2255,7 +2071,6 @@ RE_SEARCH:
retval = rtsx_send_cmd(chip, MS_CARD, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2280,7 +2095,6 @@ RE_SEARCH:
retval = rtsx_send_cmd(chip, MS_CARD, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2356,25 +2170,21 @@ RE_SEARCH:
if (ptr[15]) {
retval = ms_set_rw_reg_addr(chip, 0, 0, SystemParm, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, PPBUF_BASE2, 0xFF, 0x88);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, PPBUF_BASE2 + 1, 0xFF, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG, 1,
NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2384,7 +2194,6 @@ RE_SEARCH:
PUSH_TIME_ODD |
MS_NO_CHECK_INT);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -2413,13 +2222,11 @@ static int ms_init_l2p_tbl(struct rtsx_chip *chip)
size = ms_card->segment_cnt * sizeof(struct zone_entry);
ms_card->segment = vzalloc(size);
if (!ms_card->segment) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_read_page(chip, ms_card->boot_block, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto INIT_FAIL;
}
@@ -2429,13 +2236,11 @@ static int ms_init_l2p_tbl(struct rtsx_chip *chip)
retval = rtsx_read_register(chip, reg_addr++, &val1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto INIT_FAIL;
}
retval = rtsx_read_register(chip, reg_addr++, &val2);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto INIT_FAIL;
}
@@ -2599,7 +2404,6 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
if (!ms_card->segment) {
retval = ms_init_l2p_tbl(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return retval;
}
}
@@ -2620,7 +2424,6 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
if (!segment->l2p_table) {
segment->l2p_table = vmalloc(array_size(table_size, 2));
if (!segment->l2p_table) {
- rtsx_trace(chip);
goto BUILD_FAIL;
}
}
@@ -2629,7 +2432,6 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
if (!segment->free_table) {
segment->free_table = vmalloc(MS_FREE_TABLE_CNT * 2);
if (!segment->free_table) {
- rtsx_trace(chip);
goto BUILD_FAIL;
}
}
@@ -2757,7 +2559,6 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
}
retval = ms_init_page(chip, phy_blk, log_blk, 0, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto BUILD_FAIL;
}
@@ -2791,7 +2592,6 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
log_blk, 0,
ms_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2799,7 +2599,6 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
retval = ms_set_bad_block(chip, tmp_blk);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2828,13 +2627,11 @@ int reset_ms_card(struct rtsx_chip *chip)
retval = enable_card_clock(chip, MS_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = select_card(chip, MS_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2845,18 +2642,15 @@ int reset_ms_card(struct rtsx_chip *chip)
if (ms_card->check_ms_flow) {
retval = reset_ms(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
retval = ms_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2866,7 +2660,6 @@ int reset_ms_card(struct rtsx_chip *chip)
*/
retval = ms_build_l2p_tbl(chip, seg_no);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2898,7 +2691,6 @@ static int mspro_set_rw_cmd(struct rtsx_chip *chip,
break;
}
if (i == MS_MAX_RETRY_COUNT) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2940,7 +2732,6 @@ static inline int ms_auto_tune_clock(struct rtsx_chip *chip)
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2992,7 +2783,6 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3003,7 +2793,6 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
retval = rtsx_read_register(chip, MS_TRANS_CFG, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -3020,7 +2809,6 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
if (val & MS_INT_BREQ) {
retval = ms_send_cmd(chip, PRO_STOP, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3050,7 +2838,6 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
retval = mspro_set_rw_cmd(chip, start_sector, count, rw_cmd);
if (retval != STATUS_SUCCESS) {
ms_card->seq_mode = 0;
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3067,7 +2854,6 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
chip->rw_need_retry = 0;
dev_dbg(rtsx_dev(chip), "No card exist, exit %s\n",
__func__);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3080,7 +2866,6 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
ms_auto_tune_clock(chip);
}
- rtsx_trace(chip);
return retval;
}
@@ -3109,14 +2894,12 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, MS_TRANS_CFG, &tmp);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3127,7 +2910,6 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
return STATUS_SUCCESS;
}
ms_card->format_status = FORMAT_FAIL;
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3140,7 +2922,6 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
MS_NO_CHECK_INT);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3148,7 +2929,6 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
data, 8);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3174,7 +2954,6 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
retval = rtsx_read_register(chip, MS_TRANS_CFG, &tmp);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (tmp & (MS_INT_CED | MS_INT_CMDNK |
@@ -3187,19 +2966,16 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT, 0);
if (retval != STATUS_SUCCESS) {
ms_card->format_status = FORMAT_FAIL;
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (i == 5000) {
ms_card->format_status = FORMAT_FAIL;
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (tmp & (MS_INT_CMDNK | MS_INT_ERR)) {
ms_card->format_status = FORMAT_FAIL;
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3211,7 +2987,6 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
} else {
ms_card->format_status = FORMAT_FAIL;
ms_card->pro_under_formatting = 0;
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3245,13 +3020,11 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, Pro_TPCParm, 0x01);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3279,7 +3052,6 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
break;
}
if (i == MS_MAX_RETRY_COUNT) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3290,18 +3062,15 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
retval = mspro_set_rw_cmd(chip, 0, para, PRO_FORMAT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, MS_TRANS_CFG, &tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (tmp & (MS_INT_CMDNK | MS_INT_ERR)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3320,7 +3089,6 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
return STATUS_SUCCESS;
}
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3339,7 +3107,6 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
if (retval == STATUS_SUCCESS) {
if ((extra[1] & 0x30) != 0x30) {
ms_set_err_code(chip, MS_FLASH_READ_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3347,7 +3114,6 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
SystemParm, 6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3369,7 +3135,6 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
break;
}
if (i == MS_MAX_RETRY_COUNT) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3377,7 +3142,6 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3388,19 +3152,16 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_ERR) {
@@ -3420,18 +3181,15 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
}
ms_set_err_code(chip,
MS_FLASH_READ_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
ms_set_err_code(chip, MS_FLASH_READ_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
if (!(val & INT_REG_BREQ)) {
ms_set_err_code(chip, MS_BREQ_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3440,7 +3198,6 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
if (!(val & INT_REG_CED)) {
retval = ms_send_cmd(chip, BLOCK_END, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3448,13 +3205,11 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT,
&val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (!(val & INT_REG_CED)) {
ms_set_err_code(chip, MS_FLASH_READ_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3489,7 +3244,6 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
if (retval == -ETIMEDOUT) {
ms_set_err_code(chip, MS_TO_ERROR);
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
return STATUS_TIMEDOUT;
}
@@ -3497,13 +3251,11 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
if (retval != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_TO_ERROR);
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
return STATUS_TIMEDOUT;
}
if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) {
ms_set_err_code(chip, MS_CRC16_ERROR);
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3529,7 +3281,6 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
SystemParm, 7);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3549,13 +3300,11 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
retval = ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT,
data, 8);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3563,7 +3312,6 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1,
NO_WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3571,7 +3319,6 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
SystemParm, (6 + MS_EXTRA_SIZE));
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3606,7 +3353,6 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
break;
}
if (i == MS_MAX_RETRY_COUNT) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3616,13 +3362,11 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
break;
}
if (i == MS_MAX_RETRY_COUNT) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3632,23 +3376,19 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
ms_set_err_code(chip, MS_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_CMDNK) {
ms_set_err_code(chip, MS_CMD_NK);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (val & INT_REG_ERR) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (!(val & INT_REG_BREQ)) {
ms_set_err_code(chip, MS_BREQ_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3682,23 +3422,19 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
rtsx_clear_ms_error(chip);
if (retval == -ETIMEDOUT) {
- rtsx_trace(chip);
return STATUS_TIMEDOUT;
}
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if ((end_page - start_page) == 1) {
if (!(val & INT_REG_CED)) {
ms_set_err_code(chip, MS_FLASH_WRITE_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
@@ -3707,7 +3443,6 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
retval = ms_send_cmd(chip, BLOCK_END,
WAIT_INT);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3715,7 +3450,6 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
retval = ms_read_bytes(chip, GET_INT, 1,
NO_WAIT_INT, &val, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3725,7 +3459,6 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
if (!(val & INT_REG_CED)) {
ms_set_err_code(chip,
MS_FLASH_WRITE_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3747,7 +3480,6 @@ static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
page_off, ms_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3776,7 +3508,6 @@ static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
0, start_page);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3794,7 +3525,6 @@ int ms_delay_write(struct rtsx_chip *chip)
if (delay_write->delay_write_flag) {
retval = ms_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3805,7 +3535,6 @@ int ms_delay_write(struct rtsx_chip *chip)
delay_write->logblock,
delay_write->pageoff);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3846,7 +3575,6 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
ms_rw_fail(srb, chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3863,7 +3591,6 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
chip->card_fail |= MS_CARD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3882,7 +3609,6 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
old_blk = delay_write->old_phyblock;
@@ -3898,7 +3624,6 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -3909,7 +3634,6 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3921,12 +3645,10 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
set_sense_type
(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#ifdef MS_DELAY_WRITE
@@ -3939,12 +3661,10 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -3953,7 +3673,6 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (old_blk == 0xFFFF) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3989,11 +3708,9 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
ms_rw_fail(srb, chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4030,7 +3747,6 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
chip->card_fail |= MS_CARD;
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -4039,7 +3755,6 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
log_blk - ms_start_idx[seg_no]);
if (old_blk == 0xFFFF) {
ms_rw_fail(srb, chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4047,7 +3762,6 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
new_blk = ms_get_unused_block(chip, seg_no);
if (new_blk == 0xFFFF) {
ms_rw_fail(srb, chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -4075,12 +3789,10 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
set_sense_type
(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
ms_rw_fail(srb, chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -4139,13 +3851,11 @@ static int ms_poll_int(struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, MS_CARD, 5000);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
val = *rtsx_get_cmd_data(chip);
if (val & MS_INT_ERR) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4211,13 +3921,11 @@ static int mg_send_ex_cmd(struct rtsx_chip *chip, u8 cmd, u8 entry_num)
break;
}
if (i == MS_MAX_RETRY_COUNT) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (check_ms_err(chip)) {
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4236,7 +3944,6 @@ static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type,
retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4251,7 +3958,6 @@ static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type,
retval = ms_write_bytes(chip, PRO_WRITE_REG, (type == 0) ? 1 : 6,
NO_WAIT_INT, buf, 6);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4267,7 +3973,6 @@ int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (scsi_bufflen(srb) < 12) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4275,14 +3980,12 @@ int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = mg_send_ex_cmd(chip, MG_SET_LID, 0);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4295,13 +3998,11 @@ int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf1, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4319,13 +4020,11 @@ int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
buf = kmalloc(1540, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -4337,7 +4036,6 @@ int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = mg_send_ex_cmd(chip, MG_GET_LEKB, 0);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
- rtsx_trace(chip);
goto free_buffer;
}
@@ -4346,13 +4044,11 @@ int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
goto free_buffer;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
retval = STATUS_FAIL;
goto free_buffer;
}
@@ -4378,14 +4074,12 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = mg_send_ex_cmd(chip, MG_GET_ID, 0);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4393,13 +4087,11 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4409,7 +4101,6 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = ms_poll_int(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -4417,7 +4108,6 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = mg_send_ex_cmd(chip, MG_SET_RD, 0);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4434,13 +4124,11 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
32, WAIT_INT, buf, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4461,14 +4149,12 @@ int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = mg_send_ex_cmd(chip, MG_MAKE_RMS, 0);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4476,13 +4162,11 @@ int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf1, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4501,7 +4185,6 @@ int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = ms_poll_int(chip);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -4522,14 +4205,12 @@ int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = mg_send_ex_cmd(chip, MG_MAKE_KSE, 0);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4546,13 +4227,11 @@ int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf, 32);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4573,13 +4252,11 @@ int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
buf = kmalloc(1028, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -4591,7 +4268,6 @@ int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = mg_send_ex_cmd(chip, MG_GET_IBD, ms_card->mg_entry_num);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
goto free_buffer;
}
@@ -4600,13 +4276,11 @@ int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
goto free_buffer;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_clear_ms_error(chip);
- rtsx_trace(chip);
retval = STATUS_FAIL;
goto free_buffer;
}
@@ -4634,13 +4308,11 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = ms_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
buf = kmalloc(1028, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -4660,7 +4332,6 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
- rtsx_trace(chip);
goto SetICVFinish;
}
@@ -4702,7 +4373,6 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SENSE_TYPE_MG_WRITE_ERR);
}
retval = STATUS_FAIL;
- rtsx_trace(chip);
goto SetICVFinish;
}
}
@@ -4722,7 +4392,6 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
- rtsx_trace(chip);
goto SetICVFinish;
}
#endif
@@ -4765,14 +4434,12 @@ int ms_power_off_card3v3(struct rtsx_chip *chip)
retval = disable_card_clock(chip, MS_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (chip->asic_code) {
retval = ms_pull_ctl_disable(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
@@ -4780,19 +4447,16 @@ int ms_power_off_card3v3(struct rtsx_chip *chip)
FPGA_MS_PULL_CTL_BIT | 0x20,
FPGA_MS_PULL_CTL_BIT);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
retval = rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (!chip->ft2_fast_mode) {
retval = card_power_off(chip, MS_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -4823,7 +4487,6 @@ int release_ms_card(struct rtsx_chip *chip)
retval = ms_power_off_card3v3(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 70e0b8623110..69e6abe14abf 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -857,7 +857,7 @@ static int rtsx_probe(struct pci_dev *pci,
dev->chip = kzalloc(sizeof(*dev->chip), GFP_KERNEL);
if (!dev->chip) {
err = -ENOMEM;
- goto errout;
+ goto chip_alloc_fail;
}
spin_lock_init(&dev->reg_lock);
@@ -879,7 +879,7 @@ static int rtsx_probe(struct pci_dev *pci,
if (!dev->remap_addr) {
dev_err(&pci->dev, "ioremap error\n");
err = -ENXIO;
- goto errout;
+ goto ioremap_fail;
}
/*
@@ -894,7 +894,7 @@ static int rtsx_probe(struct pci_dev *pci,
if (!dev->rtsx_resv_buf) {
dev_err(&pci->dev, "alloc dma buffer fail\n");
err = -ENXIO;
- goto errout;
+ goto dma_alloc_fail;
}
dev->chip->host_cmds_ptr = dev->rtsx_resv_buf;
dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr;
@@ -915,7 +915,7 @@ static int rtsx_probe(struct pci_dev *pci,
if (rtsx_acquire_irq(dev) < 0) {
err = -EBUSY;
- goto errout;
+ goto irq_acquire_fail;
}
pci_set_master(pci);
@@ -935,14 +935,14 @@ static int rtsx_probe(struct pci_dev *pci,
if (IS_ERR(th)) {
dev_err(&pci->dev, "Unable to start control thread\n");
err = PTR_ERR(th);
- goto errout;
+ goto control_thread_fail;
}
dev->ctl_thread = th;
err = scsi_add_host(host, &pci->dev);
if (err) {
dev_err(&pci->dev, "Unable to add the scsi host\n");
- goto errout;
+ goto scsi_add_host_fail;
}
/* Start up the thread for delayed SCSI-device scanning */
@@ -950,18 +950,16 @@ static int rtsx_probe(struct pci_dev *pci,
if (IS_ERR(th)) {
dev_err(&pci->dev, "Unable to start the device-scanning thread\n");
complete(&dev->scanning_done);
- quiesce_and_remove_host(dev);
err = PTR_ERR(th);
- goto errout;
+ goto scan_thread_fail;
}
/* Start up the thread for polling thread */
th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling");
if (IS_ERR(th)) {
dev_err(&pci->dev, "Unable to start the device-polling thread\n");
- quiesce_and_remove_host(dev);
err = PTR_ERR(th);
- goto errout;
+ goto scan_thread_fail;
}
dev->polling_thread = th;
@@ -970,9 +968,25 @@ static int rtsx_probe(struct pci_dev *pci,
return 0;
/* We come here if there are any problems */
-errout:
+scan_thread_fail:
+ quiesce_and_remove_host(dev);
+scsi_add_host_fail:
+ complete(&dev->cmnd_ready);
+ wait_for_completion(&dev->control_exit);
+control_thread_fail:
+ free_irq(dev->irq, (void *)dev);
+ rtsx_release_chip(dev->chip);
+irq_acquire_fail:
+ dev->chip->host_cmds_ptr = NULL;
+ dev->chip->host_sg_tbl_ptr = NULL;
+ if (dev->chip->msi_en)
+ pci_disable_msi(dev->pci);
+dma_alloc_fail:
+ iounmap(dev->remap_addr);
+ioremap_fail:
+ kfree(dev->chip);
+chip_alloc_fail:
dev_err(&pci->dev, "%s failed\n", __func__);
- release_everything(dev);
return err;
}
diff --git a/drivers/staging/rts5208/rtsx.h b/drivers/staging/rts5208/rtsx.h
index 62e467c5a6d7..514536a6f92b 100644
--- a/drivers/staging/rts5208/rtsx.h
+++ b/drivers/staging/rts5208/rtsx.h
@@ -139,28 +139,6 @@ static inline struct rtsx_dev *host_to_rtsx(struct Scsi_Host *host)
return (struct rtsx_dev *)host->hostdata;
}
-static inline void get_current_time(u8 *timeval_buf, int buf_len)
-{
- struct timespec64 ts64;
- u32 tv_usec;
-
- if (!timeval_buf || (buf_len < 8))
- return;
-
- getnstimeofday64(&ts64);
-
- tv_usec = ts64.tv_nsec / NSEC_PER_USEC;
-
- timeval_buf[0] = (u8)(ts64.tv_sec >> 24);
- timeval_buf[1] = (u8)(ts64.tv_sec >> 16);
- timeval_buf[2] = (u8)(ts64.tv_sec >> 8);
- timeval_buf[3] = (u8)(ts64.tv_sec);
- timeval_buf[4] = (u8)(tv_usec >> 24);
- timeval_buf[5] = (u8)(tv_usec >> 16);
- timeval_buf[6] = (u8)(tv_usec >> 8);
- timeval_buf[7] = (u8)(tv_usec);
-}
-
/*
* The scsi_lock() and scsi_unlock() macros protect the sm_state and the
* single queue element srb for write access
@@ -174,9 +152,6 @@ static inline void get_current_time(u8 *timeval_buf, int buf_len)
/* struct scsi_cmnd transfer buffer access utilities */
enum xfer_buf_dir {TO_XFER_BUF, FROM_XFER_BUF};
-#define _MSG_TRACE
-
-#include "trace.h"
#include "rtsx_chip.h"
#include "rtsx_transport.h"
#include "rtsx_scsi.h"
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index a6b7bffc6714..d26a8e372fce 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -648,7 +648,6 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
clk, chip->cur_clk);
if ((clk <= 2) || (n > max_n)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -690,14 +689,12 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
retval = rtsx_send_cmd(chip, 0, WAIT_TIME);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
udelay(10);
retval = rtsx_write_register(chip, CLK_CTL, CLK_LOW_FREQ, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -789,38 +786,32 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk)
default:
dev_dbg(rtsx_dev(chip), "Try to switch to an illegal clock (%d)\n",
clk);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, CLK_CTL, 0xFF, CLK_LOW_FREQ);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (sd_vpclk_phase_reset) {
retval = rtsx_write_register(chip, SD_VPCLK0_CTL,
PHASE_NOT_RESET, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, SD_VPCLK1_CTL,
PHASE_NOT_RESET, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
retval = rtsx_write_register(chip, CLK_DIV, 0xFF,
(div << 4) | mcu_cnt);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CLK_SEL, 0xFF, sel);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -829,20 +820,17 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk)
retval = rtsx_write_register(chip, SD_VPCLK0_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, SD_VPCLK1_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
if (retval) {
- rtsx_trace(chip);
return retval;
}
udelay(200);
}
retval = rtsx_write_register(chip, CLK_CTL, 0xFF, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -891,7 +879,6 @@ int enable_card_clock(struct rtsx_chip *chip, u8 card)
retval = rtsx_write_register(chip, CARD_CLK_EN, clk_en, clk_en);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -912,7 +899,6 @@ int disable_card_clock(struct rtsx_chip *chip, u8 card)
retval = rtsx_write_register(chip, CARD_CLK_EN, clk_en, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -939,7 +925,6 @@ int card_power_on(struct rtsx_chip *chip, u8 card)
retval = rtsx_send_cmd(chip, 0, 100);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -950,7 +935,6 @@ int card_power_on(struct rtsx_chip *chip, u8 card)
retval = rtsx_send_cmd(chip, 0, 100);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -972,7 +956,6 @@ int card_power_off(struct rtsx_chip *chip, u8 card)
retval = rtsx_write_register(chip, CARD_PWR_CTL, mask, val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -987,7 +970,6 @@ int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
int i;
if (!chip->rw_card[lun]) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -998,12 +980,10 @@ int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
if (rtsx_check_chip_exist(chip) != STATUS_SUCCESS) {
rtsx_release_chip(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (detect_card_cd(chip, chip->cur_card) !=
STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1036,7 +1016,6 @@ int card_share_mode(struct rtsx_chip *chip, int card)
} else if (card == XD_CARD) {
value = CARD_SHARE_48_XD;
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1049,18 +1028,15 @@ int card_share_mode(struct rtsx_chip *chip, int card)
} else if (card == XD_CARD) {
value = CARD_SHARE_BAROSSA_XD;
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, CARD_SHARE_MODE, mask, value);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -1083,20 +1059,17 @@ int select_card(struct rtsx_chip *chip, int card)
} else if (card == SPI_CARD) {
mod = SPI_MOD_SEL;
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, CARD_SELECT, 0x07, mod);
if (retval) {
- rtsx_trace(chip);
return retval;
}
chip->cur_card = card;
retval = card_share_mode(chip, card);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1143,13 +1116,11 @@ int detect_card_cd(struct rtsx_chip *chip, int card)
card_cd = XD_EXIST;
} else {
dev_dbg(rtsx_dev(chip), "Wrong card type: 0x%x\n", card);
- rtsx_trace(chip);
return STATUS_FAIL;
}
status = rtsx_readl(chip, RTSX_BIPR);
if (!(status & card_cd)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
diff --git a/drivers/staging/rts5208/rtsx_card.h b/drivers/staging/rts5208/rtsx_card.h
index aa37705bae39..ac165d8a081c 100644
--- a/drivers/staging/rts5208/rtsx_card.h
+++ b/drivers/staging/rts5208/rtsx_card.h
@@ -1063,7 +1063,6 @@ static inline int card_power_off_all(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, CARD_PWR_CTL, 0x0F, 0x0F);
if (retval) {
- rtsx_trace(chip);
return retval;
}
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index 8a823466ca2b..6b1234bff09c 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -117,7 +117,6 @@ static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
MS_INS_PU | SD_WP_PU |
SD_CD_PU | SD_CMD_PU);
if (retval) {
- rtsx_trace(chip);
return retval;
}
} else {
@@ -125,28 +124,24 @@ static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
0xFF,
FPGA_SD_PULL_CTL_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
retval = rtsx_write_register(chip, CARD_SHARE_MODE, 0xFF,
CARD_SHARE_48_SD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
/* Enable SDIO internal clock */
retval = rtsx_write_register(chip, 0xFF2C, 0x01, 0x01);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, SDIO_CTRL, 0xFF,
SDIO_BUS_CTRL | SDIO_CD_CTRL);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -170,7 +165,6 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5288)) {
retval = rtsx_read_register(chip, 0xFE5A, &tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (tmp & 0x08)
@@ -178,7 +172,6 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
} else if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_read_register(chip, 0xFE70, &tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (tmp & 0x80)
@@ -200,7 +193,6 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
retval = rtsx_read_register(chip, TLPTISTAT, &tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
cd_toggle_mask = 0x08;
@@ -211,14 +203,12 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, 0xFE5A,
0x08, 0x00);
if (retval) {
- rtsx_trace(chip);
return retval;
}
} else if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, 0xFE70,
0x80, 0x00);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -226,7 +216,6 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, TLPTISTAT, 0xFF,
tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -237,7 +226,6 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
if (chip->asic_code) {
retval = sd_pull_ctl_enable(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
@@ -246,13 +234,11 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
FPGA_SD_PULL_CTL_BIT | 0x20,
0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
retval = card_share_mode(chip, SD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -261,14 +247,12 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, 0xFE5A,
0x08, 0x08);
if (retval) {
- rtsx_trace(chip);
return retval;
}
} else if (CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_register(chip, 0xFE70,
0x80, 0x80);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -279,7 +263,6 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
} else {
retval = rtsx_write_register(chip, TLPTISTAT, 0x08, 0x08);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -301,7 +284,6 @@ static int rtsx_reset_aspm(struct rtsx_chip *chip)
ret = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF,
chip->aspm_l0s_l1_en);
if (ret != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -311,13 +293,11 @@ static int rtsx_reset_aspm(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208)) {
ret = rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFF, 0x3F);
if (ret) {
- rtsx_trace(chip);
return ret;
}
}
ret = rtsx_write_config_byte(chip, LCTLR, chip->aspm_l0s_l1_en);
if (ret != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -327,7 +307,6 @@ static int rtsx_reset_aspm(struct rtsx_chip *chip)
ret = rtsx_write_cfg_dw(chip, CHECK_PID(chip, 0x5288) ? 2 : 1,
0xC0, 0xFF, chip->aspm_l0s_l1_en);
if (ret != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -349,7 +328,6 @@ static int rtsx_enable_pcie_intr(struct rtsx_chip *chip)
if (chip->phy_debug_mode) {
ret = rtsx_write_register(chip, CDRESUMECTL, 0x77, 0);
if (ret) {
- rtsx_trace(chip);
return ret;
}
rtsx_disable_bus_int(chip);
@@ -362,7 +340,6 @@ static int rtsx_enable_pcie_intr(struct rtsx_chip *chip)
ret = rtsx_read_phy_register(chip, 0x00, &reg);
if (ret != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -370,20 +347,17 @@ static int rtsx_enable_pcie_intr(struct rtsx_chip *chip)
reg |= 0x80;
ret = rtsx_write_phy_register(chip, 0x00, reg);
if (ret != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
ret = rtsx_read_phy_register(chip, 0x1C, &reg);
if (ret != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
reg &= 0xFFF7;
ret = rtsx_write_phy_register(chip, 0x1C, reg);
if (ret != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -404,14 +378,12 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 0x00);
if (retval) {
- rtsx_trace(chip);
return retval;
}
/* Disable card clock */
retval = rtsx_write_register(chip, CARD_CLK_EN, 0x1E, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -420,14 +392,12 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
retval = rtsx_write_register(chip, FPDCTL, OC_POWER_DOWN, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
} else {
retval = rtsx_write_register(chip, FPDCTL, OC_POWER_DOWN,
MS_OC_POWER_DOWN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -435,19 +405,16 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, OCPPARA1, OCP_TIME_MASK,
OCP_TIME_800);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, OCPPARA2, OCP_THD_MASK,
OCP_THD_244_946);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, OCPCTL, 0xFF,
CARD_OC_INT_EN | CARD_DETECT_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
#else
@@ -455,7 +422,6 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, FPDCTL, OC_POWER_DOWN,
OC_POWER_DOWN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
#endif
@@ -463,7 +429,6 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
if (!CHECK_PID(chip, 0x5288)) {
retval = rtsx_write_register(chip, CARD_GPIO_DIR, 0xFF, 0x03);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -471,14 +436,12 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
/* Turn off LED */
retval = rtsx_write_register(chip, CARD_GPIO, 0xFF, 0x03);
if (retval) {
- rtsx_trace(chip);
return retval;
}
/* Reset delink mode */
retval = rtsx_write_register(chip, CHANGE_LINK_STATE, 0x0A, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -486,7 +449,6 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, CARD_DRIVE_SEL, 0xFF,
chip->card_drive_sel);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -494,7 +456,6 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, CARD_AUTO_BLINK, 0xFF,
LED_BLINK_SPEED | BLINK_EN | LED_GPIO0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
#endif
@@ -504,12 +465,10 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, SSC_CTL1, 0xFF,
SSC_8X_EN | SSC_SEL_4M);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, SSC_CTL2, 0xFF, 0x12);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -524,7 +483,6 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
*/
retval = rtsx_write_register(chip, CHANGE_LINK_STATE, 0x16, 0x10);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -532,28 +490,24 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
if (chip->aspm_l0s_l1_en) {
retval = rtsx_reset_aspm(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_phy_register(chip, 0x07, 0x0129);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
retval = rtsx_write_config_byte(chip, LCTLR,
chip->aspm_l0s_l1_en);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
retval = rtsx_write_config_byte(chip, 0x81, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -563,7 +517,6 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
0xC0, 0xFF00, 0x0100);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -571,13 +524,11 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5288) && !CHK_SDIO_EXIST(chip)) {
retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFFFF, 0x0103);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_cfg_dw(chip, 2, 0x84, 0xFF, 0x03);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -585,19 +536,16 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT,
LINK_RDY_INT);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, PERST_GLITCH_WIDTH, 0xFF, 0x80);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_enable_pcie_intr(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -622,7 +570,6 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
retval = rtsx_pre_handle_sdio_old(chip);
#endif /* HW_AUTO_SWITCH_SD_BUS */
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -631,7 +578,6 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, SDIO_CTRL,
SDIO_BUS_CTRL | SDIO_CD_CTRL, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -645,7 +591,6 @@ nextcard:
retval = rtsx_write_register(chip, SSC_CTL1, SSC_RSTB,
SSC_RSTB);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -655,7 +600,6 @@ nextcard:
retval = rtsx_write_register(chip, RCCTL, 0x01, 0x00);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -664,7 +608,6 @@ nextcard:
retval = rtsx_write_register(chip, MAIN_PWR_OFF_CTL, 0x03,
0x03);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -672,26 +615,22 @@ nextcard:
if (chip->remote_wakeup_en && !chip->auto_delink_en) {
retval = rtsx_write_register(chip, WAKE_SEL_CTL, 0x07, 0x07);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (chip->aux_pwr_exist) {
retval = rtsx_write_register(chip, PME_FORCE_CTL,
0xFF, 0x33);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
} else {
retval = rtsx_write_register(chip, WAKE_SEL_CTL, 0x07, 0x04);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, PME_FORCE_CTL, 0xFF, 0x30);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -699,7 +638,6 @@ nextcard:
if (CHECK_PID(chip, 0x5208) && (chip->ic_version >= IC_VER_D)) {
retval = rtsx_write_register(chip, PETXCFG, 0x1C, 0x14);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -707,7 +645,6 @@ nextcard:
if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
retval = rtsx_clr_phy_reg_bit(chip, 0x1C, 2);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -717,14 +654,12 @@ nextcard:
MS_PARTIAL_POWER_ON |
SD_PARTIAL_POWER_ON);
if (retval) {
- rtsx_trace(chip);
return retval;
}
udelay(chip->pmos_pwr_on_interval);
retval = rtsx_write_register(chip, CARD_PWR_CTL, 0xFF,
MS_POWER_ON | SD_POWER_ON);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -781,12 +716,10 @@ static int rts5208_init(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, CLK_SEL, 0x03, 0x03);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_read_register(chip, CLK_SEL, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
chip->asic_code = val == 0 ? 1 : 0;
@@ -794,7 +727,6 @@ static int rts5208_init(struct rtsx_chip *chip)
if (chip->asic_code) {
retval = rtsx_read_phy_register(chip, 0x1C, &reg);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -806,7 +738,6 @@ static int rts5208_init(struct rtsx_chip *chip)
} else {
retval = rtsx_read_register(chip, 0xFE80, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
chip->ic_version = val;
@@ -815,7 +746,6 @@ static int rts5208_init(struct rtsx_chip *chip)
retval = rtsx_read_register(chip, PDINFO, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
dev_dbg(rtsx_dev(chip), "PDINFO: 0x%x\n", val);
@@ -823,7 +753,6 @@ static int rts5208_init(struct rtsx_chip *chip)
retval = rtsx_read_register(chip, 0xFE50, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
chip->hw_bypass_sd = val & 0x01 ? 1 : 0;
@@ -837,7 +766,6 @@ static int rts5208_init(struct rtsx_chip *chip)
if (chip->use_hw_setting) {
retval = rtsx_read_register(chip, CHANGE_LINK_STATE, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
chip->auto_delink_en = val & 0x80 ? 1 : 0;
@@ -854,12 +782,10 @@ static int rts5288_init(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, CLK_SEL, 0x03, 0x03);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_read_register(chip, CLK_SEL, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
chip->asic_code = val == 0 ? 1 : 0;
@@ -869,7 +795,6 @@ static int rts5288_init(struct rtsx_chip *chip)
retval = rtsx_read_register(chip, PDINFO, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
dev_dbg(rtsx_dev(chip), "PDINFO: 0x%x\n", val);
@@ -877,7 +802,6 @@ static int rts5288_init(struct rtsx_chip *chip)
retval = rtsx_read_register(chip, CARD_SHARE_MODE, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
dev_dbg(rtsx_dev(chip), "CARD_SHARE_MODE: 0x%x\n", val);
@@ -885,14 +809,12 @@ static int rts5288_init(struct rtsx_chip *chip)
retval = rtsx_read_register(chip, 0xFE5A, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
chip->hw_bypass_sd = val & 0x10 ? 1 : 0;
retval = rtsx_read_cfg_dw(chip, 0, 0x718, &lval);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -906,7 +828,6 @@ static int rts5288_init(struct rtsx_chip *chip)
if (chip->use_hw_setting) {
retval = rtsx_read_register(chip, CHANGE_LINK_STATE, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
chip->auto_delink_en = val & 0x80 ? 1 : 0;
@@ -933,10 +854,6 @@ int rtsx_init_chip(struct rtsx_chip *chip)
chip->ic_version = 0;
-#ifdef _MSG_TRACE
- chip->msg_idx = 0;
-#endif
-
memset(xd_card, 0, sizeof(struct xd_info));
memset(sd_card, 0, sizeof(struct sd_info));
memset(ms_card, 0, sizeof(struct ms_info));
@@ -989,13 +906,11 @@ int rtsx_init_chip(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, FPDCTL, SSC_POWER_DOWN, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
wait_timeout(200);
retval = rtsx_write_register(chip, CLK_DIV, 0x07, 0x07);
if (retval) {
- rtsx_trace(chip);
return retval;
}
dev_dbg(rtsx_dev(chip), "chip->use_hw_setting = %d\n",
@@ -1004,14 +919,12 @@ int rtsx_init_chip(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208)) {
retval = rts5208_init(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else if (CHECK_PID(chip, 0x5288)) {
retval = rts5288_init(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1061,7 +974,6 @@ int rtsx_init_chip(struct rtsx_chip *chip)
retval = rtsx_reset_chip(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1492,7 +1404,6 @@ int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data)
val = rtsx_readl(chip, RTSX_HAIMR);
if ((val & BIT(31)) == 0) {
if (data != (u8)val) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1500,7 +1411,6 @@ int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data)
}
}
- rtsx_trace(chip);
return STATUS_TIMEDOUT;
}
@@ -1523,7 +1433,6 @@ int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data)
}
if (i >= MAX_RW_REG_CNT) {
- rtsx_trace(chip);
return STATUS_TIMEDOUT;
}
@@ -1546,7 +1455,6 @@ int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask,
0xFF,
(u8)(val & mask & 0xFF));
if (retval) {
- rtsx_trace(chip);
return retval;
}
mode |= (1 << i);
@@ -1558,13 +1466,11 @@ int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask,
if (mode) {
retval = rtsx_write_register(chip, CFGADDR0, 0xFF, (u8)addr);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CFGADDR1, 0xFF,
(u8)(addr >> 8));
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -1572,14 +1478,12 @@ int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask,
0x80 | mode |
((func_no & 0x03) << 4));
if (retval) {
- rtsx_trace(chip);
return retval;
}
for (i = 0; i < MAX_RW_REG_CNT; i++) {
retval = rtsx_read_register(chip, CFGRWCTL, &tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if ((tmp & 0x80) == 0)
@@ -1599,25 +1503,21 @@ int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val)
retval = rtsx_write_register(chip, CFGADDR0, 0xFF, (u8)addr);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CFGADDR1, 0xFF, (u8)(addr >> 8));
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CFGRWCTL, 0xFF,
0x80 | ((func_no & 0x03) << 4));
if (retval) {
- rtsx_trace(chip);
return retval;
}
for (i = 0; i < MAX_RW_REG_CNT; i++) {
retval = rtsx_read_register(chip, CFGRWCTL, &tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if ((tmp & 0x80) == 0)
@@ -1627,7 +1527,6 @@ int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val)
for (i = 0; i < 4; i++) {
retval = rtsx_read_register(chip, CFGDATA0 + i, &tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
data |= (u32)tmp << (i * 8);
@@ -1649,7 +1548,6 @@ int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
int retval;
if (!buf) {
- rtsx_trace(chip);
return STATUS_NOMEM;
}
@@ -1662,14 +1560,12 @@ int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
data = vzalloc(array_size(dw_len, 4));
if (!data) {
- rtsx_trace(chip);
return STATUS_NOMEM;
}
mask = vzalloc(array_size(dw_len, 4));
if (!mask) {
vfree(data);
- rtsx_trace(chip);
return STATUS_NOMEM;
}
@@ -1694,7 +1590,6 @@ int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
if (retval != STATUS_SUCCESS) {
vfree(data);
vfree(mask);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1723,7 +1618,6 @@ int rtsx_read_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
data = vmalloc(array_size(dw_len, 4));
if (!data) {
- rtsx_trace(chip);
return STATUS_NOMEM;
}
@@ -1732,7 +1626,6 @@ int rtsx_read_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
data + i);
if (retval != STATUS_SUCCESS) {
vfree(data);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1763,29 +1656,24 @@ int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val)
retval = rtsx_write_register(chip, PHYDATA0, 0xFF, (u8)val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, PHYDATA1, 0xFF, (u8)(val >> 8));
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, PHYADDR, 0xFF, addr);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, PHYRWCTL, 0xFF, 0x81);
if (retval) {
- rtsx_trace(chip);
return retval;
}
for (i = 0; i < 100000; i++) {
retval = rtsx_read_register(chip, PHYRWCTL, &tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (!(tmp & 0x80)) {
@@ -1795,7 +1683,6 @@ int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val)
}
if (!finished) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1812,19 +1699,16 @@ int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val)
retval = rtsx_write_register(chip, PHYADDR, 0xFF, addr);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, PHYRWCTL, 0xFF, 0x80);
if (retval) {
- rtsx_trace(chip);
return retval;
}
for (i = 0; i < 100000; i++) {
retval = rtsx_read_register(chip, PHYRWCTL, &tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (!(tmp & 0x80)) {
@@ -1834,19 +1718,16 @@ int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val)
}
if (!finished) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, PHYDATA0, &tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
data = tmp;
retval = rtsx_read_register(chip, PHYDATA1, &tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
data |= (u16)tmp << 8;
@@ -1865,14 +1746,12 @@ int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val)
retval = rtsx_write_register(chip, EFUSE_CTRL, 0xFF, 0x80 | addr);
if (retval) {
- rtsx_trace(chip);
return retval;
}
for (i = 0; i < 100; i++) {
retval = rtsx_read_register(chip, EFUSE_CTRL, &data);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (!(data & 0x80))
@@ -1881,13 +1760,11 @@ int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val)
}
if (data & 0x80) {
- rtsx_trace(chip);
return STATUS_TIMEDOUT;
}
retval = rtsx_read_register(chip, EFUSE_DATA, &data);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (val)
@@ -1911,20 +1788,17 @@ int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val)
retval = rtsx_write_register(chip, EFUSE_DATA, 0xFF, tmp);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, EFUSE_CTRL, 0xFF,
0xA0 | addr);
if (retval) {
- rtsx_trace(chip);
return retval;
}
for (j = 0; j < 100; j++) {
retval = rtsx_read_register(chip, EFUSE_CTRL, &data);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (!(data & 0x80))
@@ -1933,7 +1807,6 @@ int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val)
}
if (data & 0x80) {
- rtsx_trace(chip);
return STATUS_TIMEDOUT;
}
@@ -1950,7 +1823,6 @@ int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
retval = rtsx_read_phy_register(chip, reg, &value);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1958,7 +1830,6 @@ int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
value &= ~(1 << bit);
retval = rtsx_write_phy_register(chip, reg, value);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1973,7 +1844,6 @@ int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
retval = rtsx_read_phy_register(chip, reg, &value);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1981,7 +1851,6 @@ int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
value |= (1 << bit);
retval = rtsx_write_phy_register(chip, reg, value);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2285,7 +2154,6 @@ int rtsx_read_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
u8 *ptr;
if (!buf) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -2299,7 +2167,6 @@ int rtsx_read_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2315,7 +2182,6 @@ int rtsx_read_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2333,7 +2199,6 @@ int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
u8 *ptr;
if (!buf) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -2350,7 +2215,6 @@ int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2366,7 +2230,6 @@ int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2377,7 +2240,6 @@ int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
int rtsx_check_chip_exist(struct rtsx_chip *chip)
{
if (rtsx_readl(chip, 0) == 0xFFFFFFFF) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2403,7 +2265,6 @@ int rtsx_force_power_on(struct rtsx_chip *chip, u8 ctl)
if (mask) {
retval = rtsx_write_register(chip, FPDCTL, mask, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2434,7 +2295,6 @@ int rtsx_force_power_down(struct rtsx_chip *chip, u8 ctl)
val = mask;
retval = rtsx_write_register(chip, FPDCTL, mask, val);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
index 8a8cd5d3cf7e..ec1c3d96d31a 100644
--- a/drivers/staging/rts5208/rtsx_chip.h
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -644,19 +644,6 @@ struct spi_info {
int spi_clock;
};
-#ifdef _MSG_TRACE
-struct trace_msg_t {
- u16 line;
-#define MSG_FUNC_LEN 64
- char func[MSG_FUNC_LEN];
-#define MSG_FILE_LEN 32
- char file[MSG_FILE_LEN];
-#define TIME_VAL_LEN 16
- u8 timeval_buf[TIME_VAL_LEN];
- u8 valid;
-};
-#endif
-
/************/
/* LUN mode */
/************/
@@ -798,11 +785,6 @@ struct rtsx_chip {
struct spi_info spi;
-#ifdef _MSG_TRACE
- struct trace_msg_t trace_msg[TRACE_ITEM_CNT];
- int msg_idx;
-#endif
-
int auto_delink_cnt;
int auto_delink_allowed;
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index a401b13f5f5e..c9a6d97938f6 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -508,7 +508,6 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf = vmalloc(scsi_bufflen(srb));
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -583,13 +582,11 @@ static int start_stop_unit(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (check_card_ready(chip, lun))
return TRANSPORT_GOOD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
break;
}
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -604,7 +601,6 @@ static int allow_medium_removal(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (prevent) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -642,7 +638,6 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf = vmalloc(scsi_bufflen(srb));
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -764,7 +759,6 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
scsi_set_resid(srb, scsi_bufflen(srb));
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
#endif
@@ -790,7 +784,6 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf = kmalloc(data_size, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -871,7 +864,6 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!check_card_ready(chip, lun) || (get_card_size(chip, lun) == 0)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -888,7 +880,6 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
*/
dev_dbg(rtsx_dev(chip), "SD card being erased!\n");
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_READ_FORBIDDEN);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -897,7 +888,6 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
dev_dbg(rtsx_dev(chip), "SD card locked!\n");
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_READ_FORBIDDEN);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -923,7 +913,6 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
sec_cnt = ((u16)(srb->cmnd[9]) << 8) | srb->cmnd[10];
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -934,7 +923,6 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if ((start_sec > get_card_size(chip, lun)) ||
((start_sec + sec_cnt) > get_card_size(chip, lun))) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -951,7 +939,6 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
else
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -960,7 +947,6 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
dev_dbg(rtsx_dev(chip), "Write protected card!\n");
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_PROTECT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -981,7 +967,6 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SENSE_TYPE_MEDIA_WRITE_ERR);
}
retval = TRANSPORT_FAILED;
- rtsx_trace(chip);
goto exit;
} else {
chip->rw_fail_cnt[lun] = 0;
@@ -1007,7 +992,6 @@ static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!check_card_ready(chip, lun)) {
if (!chip->mspro_formatter_enable) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -1016,7 +1000,6 @@ static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1083,7 +1066,6 @@ static int read_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1095,7 +1077,6 @@ static int read_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf = kmalloc(8, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1136,7 +1117,6 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf = vmalloc(len);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1145,7 +1125,6 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1155,7 +1134,6 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -1188,7 +1166,6 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1197,7 +1174,6 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else {
@@ -1205,7 +1181,6 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
len);
buf = vmalloc(len);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1218,7 +1193,6 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -1249,13 +1223,11 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (addr < 0xFC00) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
buf = vmalloc(len);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1263,7 +1235,6 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1273,7 +1244,6 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -1307,14 +1277,12 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (addr < 0xFC00) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
buf = vmalloc(len);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1325,7 +1293,6 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1335,7 +1302,6 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -1352,13 +1318,11 @@ static int get_sd_csd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (get_lun_card(chip, lun) != SD_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1387,77 +1351,6 @@ static int toggle_gpio_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_GOOD;
}
-#ifdef _MSG_TRACE
-static int trace_msg_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
-{
- unsigned char *ptr, *buf = NULL;
- int i, msg_cnt;
- u8 clear;
- unsigned int buf_len;
-
- buf_len = 4 + ((2 + MSG_FUNC_LEN + MSG_FILE_LEN + TIME_VAL_LEN) *
- TRACE_ITEM_CNT);
-
- if ((scsi_bufflen(srb) < buf_len) || !scsi_sglist(srb)) {
- set_sense_type(chip, SCSI_LUN(srb),
- SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
- return TRANSPORT_FAILED;
- }
-
- clear = srb->cmnd[2];
-
- buf = vmalloc(scsi_bufflen(srb));
- if (!buf) {
- rtsx_trace(chip);
- return TRANSPORT_ERROR;
- }
- ptr = buf;
-
- if (chip->trace_msg[chip->msg_idx].valid)
- msg_cnt = TRACE_ITEM_CNT;
- else
- msg_cnt = chip->msg_idx;
-
- *(ptr++) = (u8)(msg_cnt >> 24);
- *(ptr++) = (u8)(msg_cnt >> 16);
- *(ptr++) = (u8)(msg_cnt >> 8);
- *(ptr++) = (u8)msg_cnt;
- dev_dbg(rtsx_dev(chip), "Trace message count is %d\n", msg_cnt);
-
- for (i = 1; i <= msg_cnt; i++) {
- int j, idx;
-
- idx = chip->msg_idx - i;
- if (idx < 0)
- idx += TRACE_ITEM_CNT;
-
- *(ptr++) = (u8)(chip->trace_msg[idx].line >> 8);
- *(ptr++) = (u8)(chip->trace_msg[idx].line);
- for (j = 0; j < MSG_FUNC_LEN; j++)
- *(ptr++) = chip->trace_msg[idx].func[j];
-
- for (j = 0; j < MSG_FILE_LEN; j++)
- *(ptr++) = chip->trace_msg[idx].file[j];
-
- for (j = 0; j < TIME_VAL_LEN; j++)
- *(ptr++) = chip->trace_msg[idx].timeval_buf[j];
- }
-
- rtsx_stor_set_xfer_buf(buf, scsi_bufflen(srb), srb);
- vfree(buf);
-
- if (clear) {
- chip->msg_idx = 0;
- for (i = 0; i < TRACE_ITEM_CNT; i++)
- chip->trace_msg[i].valid = 0;
- }
-
- scsi_set_resid(srb, 0);
- return TRANSPORT_GOOD;
-}
-#endif
-
static int read_host_reg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
u8 addr, buf[4];
@@ -1543,7 +1436,6 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else if (srb->cmnd[3] == 2) {
@@ -1567,7 +1459,6 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1575,7 +1466,6 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1608,7 +1498,6 @@ static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1619,7 +1508,6 @@ static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_stor_set_xfer_buf(&tmp, 1, srb);
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1659,7 +1547,6 @@ static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
scsi_set_resid(srb, 0);
@@ -1807,7 +1694,6 @@ static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!CHECK_PID(chip, 0x5208)) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1817,7 +1703,6 @@ static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
chip->phy_debug_mode = 1;
retval = rtsx_write_register(chip, CDRESUMECTL, 0x77, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1825,21 +1710,18 @@ static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_read_phy_register(chip, 0x1C, &reg);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
reg |= 0x0001;
retval = rtsx_write_phy_register(chip, 0x1C, reg);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else {
chip->phy_debug_mode = 0;
retval = rtsx_write_register(chip, CDRESUMECTL, 0x77, 0x77);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1847,14 +1729,12 @@ static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_read_phy_register(chip, 0x1C, &reg);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
reg &= 0xFFFE;
retval = rtsx_write_phy_register(chip, 0x1C, reg);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -1887,7 +1767,6 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (cmd_type > 2) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
addr = (srb->cmnd[5] << 8) | srb->cmnd[6];
@@ -1906,7 +1785,6 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (scsi_bufflen(srb) < 1) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
rtsx_stor_set_xfer_buf(&value, 1, srb);
@@ -1915,13 +1793,11 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1965,7 +1841,6 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (len) {
buf = vmalloc(len);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1974,7 +1849,6 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -1985,7 +1859,6 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type
(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2031,7 +1904,6 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf = vmalloc(len);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2043,7 +1915,6 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2054,7 +1925,6 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -2082,7 +1952,6 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2094,7 +1963,6 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else if (mode == 1) {
@@ -2102,13 +1970,11 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2134,7 +2000,6 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf = vmalloc(len);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2143,7 +2008,6 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2153,7 +2017,6 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -2187,7 +2050,6 @@ static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
buf = vmalloc(len);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2198,7 +2060,6 @@ static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2208,7 +2069,6 @@ static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -2237,7 +2097,6 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf = vmalloc(len);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2246,7 +2105,6 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2256,7 +2114,6 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
vfree(buf);
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -2291,7 +2148,6 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
len = (u8)min_t(unsigned int, scsi_bufflen(srb), len);
buf = vmalloc(len);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2301,7 +2157,6 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_force_power_on(chip, SSC_PDCTL);
if (retval != STATUS_SUCCESS) {
vfree(buf);
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2309,7 +2164,6 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_read_phy_register(chip, 0x08, &val);
if (retval != STATUS_SUCCESS) {
vfree(buf);
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2317,7 +2171,6 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
LDO3318_PWR_MASK, LDO_OFF);
if (retval != STATUS_SUCCESS) {
vfree(buf);
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2327,7 +2180,6 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
0x4C00 | chip->phy_voltage);
if (retval != STATUS_SUCCESS) {
vfree(buf);
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2335,7 +2187,6 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
LDO3318_PWR_MASK, LDO_ON);
if (retval != STATUS_SUCCESS) {
vfree(buf);
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2345,7 +2196,6 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = card_power_on(chip, SPI_CARD);
if (retval != STATUS_SUCCESS) {
vfree(buf);
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2357,7 +2207,6 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
result = TRANSPORT_FAILED;
- rtsx_trace(chip);
goto exit;
}
}
@@ -2367,7 +2216,6 @@ exit:
retval = card_power_off(chip, SPI_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2375,7 +2223,6 @@ exit:
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
LDO3318_PWR_MASK, LDO_OFF);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2383,14 +2230,12 @@ exit:
retval = rtsx_write_phy_register(chip, 0x08, val);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
retval = rtsx_write_register(chip, PWR_GATE_CTRL,
LDO3318_PWR_MASK, LDO_ON);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
}
@@ -2429,13 +2274,11 @@ static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (func > func_max) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
buf = vmalloc(len);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2444,7 +2287,6 @@ static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
vfree(buf);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2488,14 +2330,12 @@ static int write_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (func > func_max) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
buf = vmalloc(len);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2506,7 +2346,6 @@ static int write_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
vfree(buf);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2597,7 +2436,6 @@ static int app_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2740,7 +2578,6 @@ static int get_card_bus_width(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2749,7 +2586,6 @@ static int get_card_bus_width(struct scsi_cmnd *srb, struct rtsx_chip *chip)
bus_width = chip->card_bus_width[lun];
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2767,7 +2603,6 @@ static int spi_vendor_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208) || CHECK_PID(chip, 0x5288)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2817,14 +2652,12 @@ static int spi_vendor_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir);
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
rtsx_write_register(chip, CARD_GPIO_DIR, 0x07, gpio_dir);
if (result != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2868,12 +2701,6 @@ static int vendor_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
result = get_card_bus_width(srb, chip);
break;
-#ifdef _MSG_TRACE
- case TRACE_MSG:
- result = trace_msg_cmd(srb, chip);
- break;
-#endif
-
case SCSI_APP_CMD:
result = app_cmd(srb, chip);
break;
@@ -2885,7 +2712,6 @@ static int vendor_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2926,7 +2752,6 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2934,7 +2759,6 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
(srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) ||
(srb->cmnd[7] != 0x74)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2947,7 +2771,6 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!check_card_ready(chip, lun) ||
(get_card_size(chip, lun) == 0)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -2960,26 +2783,22 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!(chip->card_ready & MS_CARD)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (chip->card_wp & MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
retval = mspro_format(srb, chip, MS_SHORT_DATA_LEN, quick_format);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -2999,12 +2818,10 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3012,7 +2829,6 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
(srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
(srb->cmnd[7] != 0x44)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3021,7 +2837,6 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
(!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3035,7 +2850,6 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -3124,12 +2938,10 @@ static int sd_extension_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (get_lun_card(chip, lun) != SD_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3160,7 +2972,6 @@ static int sd_extension_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3188,24 +2999,20 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (srb->cmnd[7] != KC_MG_R_PRO) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3219,14 +3026,12 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
(srb->cmnd[9] == 0x1C)) {
retval = mg_get_local_EKB(srb, chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
break;
@@ -3237,14 +3042,12 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
(srb->cmnd[9] == 0x24)) {
retval = mg_get_rsp_chg(srb, chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
break;
@@ -3260,21 +3063,18 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
(srb->cmnd[5] < 32)) {
retval = mg_get_ICV(srb, chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3301,29 +3101,24 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!check_card_ready(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (check_card_wp(chip, lun)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (srb->cmnd[7] != KC_MG_R_PRO) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3337,14 +3132,12 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
(srb->cmnd[9] == 0x0C)) {
retval = mg_set_leaf_id(srb, chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
break;
@@ -3355,14 +3148,12 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
(srb->cmnd[9] == 0x0C)) {
retval = mg_chg(srb, chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
break;
@@ -3373,14 +3164,12 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
(srb->cmnd[9] == 0x0C)) {
retval = mg_rsp(srb, chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
break;
@@ -3396,21 +3185,18 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
(srb->cmnd[5] < 32)) {
retval = mg_set_ICV(srb, chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
} else {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -3440,7 +3226,6 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR,
0x02, 0, 0x04, 0x04, 0, 0);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -3453,7 +3238,6 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
0, (u16)(ms_card->progress));
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 716cce2bd7f0..b4a796c570c2 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -275,7 +275,6 @@ int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
chip->int_reg);
err = -ETIMEDOUT;
- rtsx_trace(chip);
goto finish_send_cmd;
}
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index d548bc695f9e..e7efa34195c7 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -110,13 +110,11 @@ static int sd_check_data0_status(struct rtsx_chip *chip)
retval = rtsx_read_register(chip, REG_SD_STAT1, &stat);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (!(stat & SD_DAT0_STATUS)) {
sd_set_err_code(chip, SD_BUSY);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -191,7 +189,6 @@ RTY_SEND_CMD:
retval = sd_check_data0_status(chip);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
- rtsx_trace(chip);
return retval;
}
} else {
@@ -203,7 +200,6 @@ RTY_SEND_CMD:
}
rtsx_clear_sd_error(chip);
- rtsx_trace(chip);
return retval;
}
@@ -214,7 +210,6 @@ RTY_SEND_CMD:
if ((ptr[0] & 0xC0) != 0) {
sd_set_err_code(chip, SD_STS_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -222,7 +217,6 @@ RTY_SEND_CMD:
if (ptr[stat_idx] & SD_CRC7_ERR) {
if (cmd_idx == WRITE_MULTIPLE_BLOCK) {
sd_set_err_code(chip, SD_CRC_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (rty_cnt < SD_MAX_RETRY_COUNT) {
@@ -231,7 +225,6 @@ RTY_SEND_CMD:
goto RTY_SEND_CMD;
} else {
sd_set_err_code(chip, SD_CRC_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -242,7 +235,6 @@ RTY_SEND_CMD:
(cmd_idx != SEND_IF_COND)) {
if (cmd_idx != STOP_TRANSMISSION) {
if (ptr[1] & 0x80) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -253,19 +245,16 @@ RTY_SEND_CMD:
#endif
dev_dbg(rtsx_dev(chip), "ptr[1]: 0x%02x\n",
ptr[1]);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (ptr[2] & 0xFF) {
dev_dbg(rtsx_dev(chip), "ptr[2]: 0x%02x\n",
ptr[2]);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (ptr[3] & 0x80) {
dev_dbg(rtsx_dev(chip), "ptr[3]: 0x%02x\n",
ptr[3]);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (ptr[3] & 0x01)
@@ -296,7 +285,6 @@ static int sd_read_data(struct rtsx_chip *chip,
buf_len = 0;
if (buf_len > 512) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -338,14 +326,12 @@ static int sd_read_data(struct rtsx_chip *chip,
SD_RSP_TYPE_R1, NULL, 0);
}
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (buf && buf_len) {
retval = rtsx_read_ppbuf(chip, buf, buf_len);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -368,14 +354,12 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
if (buf_len > 512) {
/* This function can't write data more than one page */
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (buf && buf_len) {
retval = rtsx_write_ppbuf(chip, buf, buf_len);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -416,7 +400,6 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
SD_RSP_TYPE_R1, NULL, 0);
}
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -434,7 +417,6 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
for (i = 0; i < 6; i++) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -445,7 +427,6 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
}
if (i == 6) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -489,11 +470,9 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
else
sd_card->sd_clock = CLK_20;
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -565,7 +544,6 @@ static int sd_set_sample_push_timing(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x1C, val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -629,7 +607,6 @@ static int sd_set_clock_divider(struct rtsx_chip *chip, u8 clk_div)
retval = rtsx_write_register(chip, REG_SD_CFG1, mask, val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -643,7 +620,6 @@ static int sd_set_init_para(struct rtsx_chip *chip)
retval = sd_set_sample_push_timing(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -651,7 +627,6 @@ static int sd_set_init_para(struct rtsx_chip *chip)
retval = switch_clock(chip, sd_card->sd_clock);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -677,7 +652,6 @@ int sd_select_card(struct rtsx_chip *chip, int select)
retval = sd_send_cmd_get_rsp(chip, cmd_idx, addr, cmd_type, NULL, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -694,7 +668,6 @@ static int sd_update_lock_status(struct rtsx_chip *chip)
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, rsp, 5);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -707,7 +680,6 @@ static int sd_update_lock_status(struct rtsx_chip *chip)
sd_card->sd_lock_status);
if (rsp[1] & 0x01) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -727,7 +699,6 @@ static int sd_wait_state_data_ready(struct rtsx_chip *chip, u8 state,
sd_card->sd_addr, SD_RSP_TYPE_R1,
rsp, 5);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -736,7 +707,6 @@ static int sd_wait_state_data_ready(struct rtsx_chip *chip, u8 state,
return STATUS_SUCCESS;
}
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -750,14 +720,12 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
0x4FC0 |
chip->phy_voltage);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
retval = rtsx_write_register(chip, SD_PAD_CTL,
SD_IO_USING_1V8, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -767,7 +735,6 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
0x4C40 |
chip->phy_voltage);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
@@ -775,12 +742,10 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
SD_IO_USING_1V8,
SD_IO_USING_1V8);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -796,14 +761,12 @@ static int sd_voltage_switch(struct rtsx_chip *chip)
SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP,
SD_CLK_TOGGLE_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = sd_send_cmd_get_rsp(chip, VOLTAGE_SWITCH, 0, SD_RSP_TYPE_R1,
NULL, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -811,24 +774,20 @@ static int sd_voltage_switch(struct rtsx_chip *chip)
retval = rtsx_read_register(chip, SD_BUS_STAT, &stat);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
SD_DAT1_STATUS | SD_DAT0_STATUS)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, SD_BUS_STAT, 0xFF,
SD_CLK_FORCE_STOP);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = sd_change_bank_voltage(chip, SD_IO_1V8);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -837,14 +796,12 @@ static int sd_voltage_switch(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, SD_BUS_STAT, 0xFF,
SD_CLK_TOGGLE_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
wait_timeout(10);
retval = rtsx_read_register(chip, SD_BUS_STAT, &stat);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if ((stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
@@ -855,14 +812,12 @@ static int sd_voltage_switch(struct rtsx_chip *chip)
rtsx_write_register(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN |
SD_CLK_FORCE_STOP, 0);
rtsx_write_register(chip, CARD_CLK_EN, 0xFF, 0);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, SD_BUS_STAT,
SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -877,24 +832,20 @@ static int sd_reset_dcm(struct rtsx_chip *chip, u8 tune_dir)
retval = rtsx_write_register(chip, DCM_DRP_CTL, 0xFF,
DCM_RESET | DCM_RX);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, DCM_DRP_CTL, 0xFF, DCM_RX);
if (retval) {
- rtsx_trace(chip);
return retval;
}
} else {
retval = rtsx_write_register(chip, DCM_DRP_CTL, 0xFF,
DCM_RESET | DCM_TX);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, DCM_DRP_CTL, 0xFF, DCM_TX);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -927,30 +878,25 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
retval = rtsx_write_register(chip, CLK_CTL, CHANGE_CLK,
CHANGE_CLK);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, SD_VP_CTL, 0x1F,
sample_point);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, SD_VPCLK0_CTL,
PHASE_NOT_RESET, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, SD_VPCLK0_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CLK_CTL, CHANGE_CLK, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
} else {
@@ -964,7 +910,6 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
PHASE_CHANGE,
PHASE_CHANGE);
if (retval) {
- rtsx_trace(chip);
return retval;
}
udelay(50);
@@ -973,14 +918,12 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
PHASE_NOT_RESET |
sample_point);
if (retval) {
- rtsx_trace(chip);
return retval;
}
} else {
retval = rtsx_write_register(chip, CLK_CTL,
CHANGE_CLK, CHANGE_CLK);
if (retval) {
- rtsx_trace(chip);
return retval;
}
udelay(50);
@@ -988,7 +931,6 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
PHASE_NOT_RESET |
sample_point);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -1001,39 +943,33 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE);
retval = rtsx_send_cmd(chip, SD_CARD, 100);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto fail;
}
val = *rtsx_get_cmd_data(chip);
if (val & DCMPS_ERROR) {
- rtsx_trace(chip);
goto fail;
}
if ((val & DCMPS_CURRENT_PHASE) != sample_point) {
- rtsx_trace(chip);
goto fail;
}
retval = rtsx_write_register(chip, SD_DCMPS_CTL,
DCMPS_CHANGE, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (ddr_rx) {
retval = rtsx_write_register(chip, SD_VP_CTL,
PHASE_CHANGE, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
} else {
retval = rtsx_write_register(chip, CLK_CTL,
CHANGE_CLK, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -1043,7 +979,6 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
retval = rtsx_write_register(chip, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -1071,7 +1006,6 @@ static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1085,14 +1019,12 @@ static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
buf, 8, 250);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
memcpy(sd_card->raw_scr, buf, 8);
if ((buf[0] & 0x0F) == 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1136,7 +1068,6 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
break;
default:
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else if (func_group == SD_FUNC_GROUP_3) {
@@ -1164,7 +1095,6 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
break;
default:
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else if (func_group == SD_FUNC_GROUP_4) {
@@ -1192,18 +1122,15 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
break;
default:
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (func_group == SD_FUNC_GROUP_1) {
if (!(buf[support_offset] & support_mask) ||
((buf[query_switch_offset] & 0x0F) != query_switch)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1211,7 +1138,6 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
/* Check 'Busy Status' */
if ((buf[DATA_STRUCTURE_VER_OFFSET] == 0x01) &&
((buf[check_busy_offset] & switch_busy) == switch_busy)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1254,7 +1180,6 @@ static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group,
buf, 64, 250);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1283,14 +1208,12 @@ static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group,
dev_dbg(rtsx_dev(chip), "Maximum current consumption: %dmA\n",
cc);
if ((cc == 0) || (cc > 800)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sd_query_switch_result(chip, func_group,
func_to_switch, buf, 64);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1299,14 +1222,12 @@ static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group,
SD_OCP_THD_MASK,
chip->sd_800mA_ocp_thd);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PWR_CTL,
PMOS_STRG_MASK,
PMOS_STRG_800mA);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -1339,7 +1260,6 @@ static int sd_check_switch(struct rtsx_chip *chip,
for (i = 0; i < 3; i++) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1359,12 +1279,10 @@ static int sd_check_switch(struct rtsx_chip *chip,
retval = rtsx_read_register(chip, SD_STAT1, &stat);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (stat & SD_CRC16_ERR) {
dev_dbg(rtsx_dev(chip), "SD CRC16 error when switching mode\n");
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1376,7 +1294,6 @@ static int sd_check_switch(struct rtsx_chip *chip,
}
if (!switch_good) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1394,7 +1311,6 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
retval = sd_check_switch_mode(chip, SD_CHECK_MODE, NO_ARGUMENT,
NO_ARGUMENT, bus_width);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1462,7 +1378,6 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
sd_card->sd_switch_fail = SDR104_SUPPORT_MASK |
DDR50_SUPPORT_MASK | SDR50_SUPPORT_MASK;
}
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1480,12 +1395,10 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
retval = rtsx_write_register(chip, SD_PUSH_POINT_CTL, 0x06,
0x04);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = sd_set_sample_push_timing(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1542,7 +1455,6 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
bus_width);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_NO_CARD)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1553,7 +1465,6 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
if (CHK_SD_DDR50(sd_card)) {
retval = rtsx_write_register(chip, SD_PUSH_POINT_CTL, 0x06, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -1570,7 +1481,6 @@ static int sd_wait_data_idle(struct rtsx_chip *chip)
for (i = 0; i < 100; i++) {
retval = rtsx_read_register(chip, SD_DATA_STATE, &val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (val & SD_DATA_IDLE) {
@@ -1591,7 +1501,6 @@ static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
retval = sd_change_phase(chip, sample_point, TUNE_RX);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1607,7 +1516,6 @@ static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
(void)sd_wait_data_idle(chip);
rtsx_clear_sd_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1622,7 +1530,6 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
retval = sd_change_phase(chip, sample_point, TUNE_RX);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1631,7 +1538,6 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1647,7 +1553,6 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
(void)sd_wait_data_idle(chip);
rtsx_clear_sd_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1669,7 +1574,6 @@ static int mmc_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
retval = sd_change_phase(chip, sample_point, TUNE_RX);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1687,7 +1591,6 @@ static int mmc_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
(void)sd_wait_data_idle(chip);
rtsx_clear_sd_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1701,14 +1604,12 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
retval = sd_change_phase(chip, sample_point, TUNE_TX);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
SD_RSP_80CLK_TIMEOUT_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -1718,7 +1619,6 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
if (sd_check_err_code(chip, SD_RSP_TIMEOUT)) {
rtsx_write_register(chip, SD_CFG3,
SD_RSP_80CLK_TIMEOUT_EN, 0);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1726,7 +1626,6 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
retval = rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -1741,7 +1640,6 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
retval = sd_change_phase(chip, sample_point, TUNE_TX);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1758,14 +1656,12 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
SD_RSP_80CLK_TIMEOUT_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -1780,14 +1676,12 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -1935,7 +1829,6 @@ static int sd_tuning_rx(struct rtsx_chip *chip)
if (CHK_MMC_DDR52(sd_card)) {
tuning_cmd = mmc_ddr_tuning_rx_cmd;
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1945,7 +1838,6 @@ static int sd_tuning_rx(struct rtsx_chip *chip)
for (j = MAX_PHASE; j >= 0; j--) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1964,13 +1856,11 @@ static int sd_tuning_rx(struct rtsx_chip *chip)
final_phase = sd_search_final_phase(chip, phase_map, TUNE_RX);
if (final_phase == 0xFF) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sd_change_phase(chip, final_phase, TUNE_RX);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1988,7 +1878,6 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
SD_RSP_80CLK_TIMEOUT_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -1998,7 +1887,6 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
sd_set_err_code(chip, SD_NO_CARD);
rtsx_write_register(chip, SD_CFG3,
SD_RSP_80CLK_TIMEOUT_EN, 0);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2017,7 +1905,6 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -2026,13 +1913,11 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
final_phase = sd_search_final_phase(chip, phase_map, TUNE_TX);
if (final_phase == 0xFF) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sd_change_phase(chip, final_phase, TUNE_TX);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2061,7 +1946,6 @@ static int sd_tuning_tx(struct rtsx_chip *chip)
if (CHK_MMC_DDR52(sd_card)) {
tuning_cmd = sd_ddr_tuning_tx_cmd;
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2073,7 +1957,6 @@ static int sd_tuning_tx(struct rtsx_chip *chip)
sd_set_err_code(chip, SD_NO_CARD);
rtsx_write_register(chip, SD_CFG3,
SD_RSP_80CLK_TIMEOUT_EN, 0);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2092,13 +1975,11 @@ static int sd_tuning_tx(struct rtsx_chip *chip)
final_phase = sd_search_final_phase(chip, phase_map, TUNE_TX);
if (final_phase == 0xFF) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sd_change_phase(chip, final_phase, TUNE_TX);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2111,13 +1992,11 @@ static int sd_sdr_tuning(struct rtsx_chip *chip)
retval = sd_tuning_tx(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sd_tuning_rx(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2131,28 +2010,24 @@ static int sd_ddr_tuning(struct rtsx_chip *chip)
if (!(chip->sd_ctl & SD_DDR_TX_PHASE_SET_BY_USER)) {
retval = sd_ddr_pre_tuning_tx(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
retval = sd_change_phase(chip, (u8)chip->sd_ddr_tx_phase,
TUNE_TX);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
retval = sd_tuning_rx(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (!(chip->sd_ctl & SD_DDR_TX_PHASE_SET_BY_USER)) {
retval = sd_tuning_tx(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2167,28 +2042,24 @@ static int mmc_ddr_tuning(struct rtsx_chip *chip)
if (!(chip->sd_ctl & MMC_DDR_TX_PHASE_SET_BY_USER)) {
retval = sd_ddr_pre_tuning_tx(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
retval = sd_change_phase(chip, (u8)chip->mmc_ddr_tx_phase,
TUNE_TX);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
retval = sd_tuning_rx(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (!(chip->sd_ctl & MMC_DDR_TX_PHASE_SET_BY_USER)) {
retval = sd_tuning_tx(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2204,13 +2075,11 @@ int sd_switch_clock(struct rtsx_chip *chip)
retval = select_card(chip, SD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = switch_clock(chip, sd_card->sd_clock);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2226,7 +2095,6 @@ int sd_switch_clock(struct rtsx_chip *chip)
}
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2259,26 +2127,22 @@ static int sd_prepare_reset(struct rtsx_chip *chip)
retval = sd_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, REG_SD_CFG1, 0xFF, 0x40);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
SD_STOP | SD_CLR_ERR);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = select_card(chip, SD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2294,41 +2158,35 @@ static int sd_pull_ctl_disable(struct rtsx_chip *chip)
XD_D3_PD | SD_D7_PD | SD_CLK_PD |
SD_D5_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
SD_D6_PD | SD_D0_PD | SD_D1_PD |
XD_D5_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
SD_D4_PD | XD_CE_PD | XD_CLE_PD |
XD_CD_PU);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
XD_RDY_PD | SD_D3_PD | SD_D2_PD |
XD_ALE_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
MS_INS_PU | SD_WP_PD | SD_CD_PU |
SD_CMD_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL6, 0xFF,
MS_D5_PD | MS_D4_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
} else if (CHECK_PID(chip, 0x5288)) {
@@ -2336,25 +2194,21 @@ static int sd_pull_ctl_disable(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, CARD_PULL_CTL1,
0xFF, 0x55);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL2,
0xFF, 0x55);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL3,
0xFF, 0x4B);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL4,
0xFF, 0x69);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -2397,7 +2251,6 @@ int sd_pull_ctl_enable(struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, SD_CARD, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2410,7 +2263,6 @@ static int sd_init_power(struct rtsx_chip *chip)
retval = sd_power_off_card3v3(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2419,21 +2271,18 @@ static int sd_init_power(struct rtsx_chip *chip)
retval = enable_card_clock(chip, SD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (chip->asic_code) {
retval = sd_pull_ctl_enable(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
retval = rtsx_write_register(chip, FPGA_PULL_CTL,
FPGA_SD_PULL_CTL_BIT | 0x20, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -2441,7 +2290,6 @@ static int sd_init_power(struct rtsx_chip *chip)
if (!chip->ft2_fast_mode) {
retval = card_power_on(chip, SD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2451,7 +2299,6 @@ static int sd_init_power(struct rtsx_chip *chip)
if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
dev_dbg(rtsx_dev(chip), "Over current, OCPSTAT is 0x%x\n",
chip->ocp_stat);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -2460,7 +2307,6 @@ static int sd_init_power(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN,
SD_OUTPUT_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -2473,13 +2319,11 @@ static int sd_dummy_clock(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, REG_SD_CFG3, 0x01, 0x01);
if (retval) {
- rtsx_trace(chip);
return retval;
}
wait_timeout(5);
retval = rtsx_write_register(chip, REG_SD_CFG3, 0x01, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -2513,7 +2357,6 @@ static int sd_read_lba0(struct rtsx_chip *chip)
bus_width, NULL, 0, 100);
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2531,7 +2374,6 @@ static int sd_check_wp_state(struct rtsx_chip *chip)
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2548,7 +2390,6 @@ static int sd_check_wp_state(struct rtsx_chip *chip)
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2849,7 +2690,6 @@ SD_UNLOCK_ENTRY:
retval = rtsx_write_register(chip, SD30_DRIVE_SEL, 0x07,
chip->sd30_drive_sel_1v8);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -2914,13 +2754,11 @@ SD_UNLOCK_ENTRY:
retval = rtsx_write_register(chip, REG_SD_BLOCK_CNT_H, 0xFF,
0x02);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, REG_SD_BLOCK_CNT_L, 0xFF,
0x00);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -2929,7 +2767,6 @@ SD_UNLOCK_ENTRY:
return STATUS_SUCCESS;
status_fail:
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2944,7 +2781,6 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
retval = sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL,
0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return SWITCH_FAIL;
}
@@ -2963,7 +2799,6 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
retval = rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0x02);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return SWITCH_ERR;
}
@@ -2972,13 +2807,11 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
if (retval != STATUS_SUCCESS) {
rtsx_clear_sd_error(chip);
rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
- rtsx_trace(chip);
return SWITCH_ERR;
}
retval = rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return SWITCH_ERR;
}
@@ -3015,7 +2848,6 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
retval = rtsx_send_cmd(chip, SD_CARD, 100);
if (retval < 0) {
rtsx_clear_sd_error(chip);
- rtsx_trace(chip);
return SWITCH_ERR;
}
@@ -3058,7 +2890,6 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
}
}
- rtsx_trace(chip);
return SWITCH_FAIL;
}
@@ -3109,7 +2940,6 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
}
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3117,7 +2947,6 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
if (ptr[0] & SD_TRANSFER_ERR) {
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3151,7 +2980,6 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
sd_choose_proper_clock(chip);
retval = switch_clock(chip, sd_card->sd_clock);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3175,11 +3003,9 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
CLR_MMC_8BIT(sd_card);
CLR_MMC_4BIT(sd_card);
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3203,7 +3029,6 @@ static int reset_mmc(struct rtsx_chip *chip)
switch_fail:
retval = sd_prepare_reset(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return retval;
}
@@ -3213,14 +3038,12 @@ RTY_MMC_RST:
retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
NULL, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
do {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3235,7 +3058,6 @@ RTY_MMC_RST:
sd_clr_err_code(chip);
goto RTY_MMC_RST;
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
@@ -3244,7 +3066,6 @@ RTY_MMC_RST:
sd_clr_err_code(chip);
goto RTY_MMC_RST;
} else {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3255,7 +3076,6 @@ RTY_MMC_RST:
} while (!(rsp[1] & 0x80) && (i < 255));
if (i == 255) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3267,7 +3087,6 @@ RTY_MMC_RST:
retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
NULL, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3275,13 +3094,11 @@ RTY_MMC_RST:
retval = sd_send_cmd_get_rsp(chip, SET_RELATIVE_ADDR, sd_card->sd_addr,
SD_RSP_TYPE_R6, rsp, 5);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sd_check_csd(chip, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3289,14 +3106,12 @@ RTY_MMC_RST:
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
NULL, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3304,14 +3119,12 @@ RTY_MMC_RST:
MMC_UNLOCK_ENTRY:
retval = sd_update_lock_status(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3324,24 +3137,20 @@ MMC_UNLOCK_ENTRY:
if (retval != STATUS_SUCCESS) {
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
sd_card->mmc_dont_switch_bus = 1;
- rtsx_trace(chip);
goto switch_fail;
}
}
if (CHK_MMC_SECTOR_MODE(sd_card) && (sd_card->capacity == 0)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (switch_ddr && CHK_MMC_DDR52(sd_card)) {
retval = sd_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3349,12 +3158,10 @@ MMC_UNLOCK_ENTRY:
if (retval != STATUS_SUCCESS) {
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
switch_ddr = false;
- rtsx_trace(chip);
goto switch_fail;
}
@@ -3364,12 +3171,10 @@ MMC_UNLOCK_ENTRY:
if (retval != STATUS_SUCCESS) {
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
switch_ddr = false;
- rtsx_trace(chip);
goto switch_fail;
}
}
@@ -3381,13 +3186,11 @@ MMC_UNLOCK_ENTRY:
retval = rtsx_write_register(chip, REG_SD_BLOCK_CNT_H, 0xFF,
0x02);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, REG_SD_BLOCK_CNT_L, 0xFF,
0x00);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -3412,7 +3215,6 @@ int reset_sd_card(struct rtsx_chip *chip)
retval = enable_card_clock(chip, SD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3421,7 +3223,6 @@ int reset_sd_card(struct rtsx_chip *chip)
if (chip->asic_code) {
retval = sd_pull_ctl_enable(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
@@ -3429,24 +3230,20 @@ int reset_sd_card(struct rtsx_chip *chip)
FPGA_SD_PULL_CTL_BIT |
0x20, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
retval = card_share_mode(chip, SD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
chip->sd_io = 1;
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3454,13 +3251,11 @@ int reset_sd_card(struct rtsx_chip *chip)
retval = reset_mmc(chip);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_NO_CARD)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = reset_sd(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3468,17 +3263,14 @@ int reset_sd_card(struct rtsx_chip *chip)
retval = reset_sd(chip);
if (retval != STATUS_SUCCESS) {
if (sd_check_err_code(chip, SD_NO_CARD)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (chip->sd_io) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = reset_mmc(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3486,18 +3278,15 @@ int reset_sd_card(struct rtsx_chip *chip)
retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, REG_SD_BYTE_CNT_L, 0xFF, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, REG_SD_BYTE_CNT_H, 0xFF, 2);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -3505,7 +3294,6 @@ int reset_sd_card(struct rtsx_chip *chip)
retval = sd_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3534,36 +3322,30 @@ static int reset_mmc_only(struct rtsx_chip *chip)
retval = enable_card_clock(chip, SD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sd_init_power(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = reset_mmc(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, REG_SD_BYTE_CNT_L, 0xFF, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, REG_SD_BYTE_CNT_H, 0xFF, 2);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -3571,7 +3353,6 @@ static int reset_mmc_only(struct rtsx_chip *chip)
retval = sd_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3591,7 +3372,6 @@ static int wait_data_buf_ready(struct rtsx_chip *chip)
for (i = 0; i < WAIT_DATA_READY_RTY_CNT; i++) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3601,7 +3381,6 @@ static int wait_data_buf_ready(struct rtsx_chip *chip)
sd_card->sd_addr, SD_RSP_TYPE_R1,
NULL, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3613,7 +3392,6 @@ static int wait_data_buf_ready(struct rtsx_chip *chip)
sd_set_err_code(chip, SD_TO_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3683,7 +3461,6 @@ static inline int sd_auto_tune_clock(struct rtsx_chip *chip)
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3722,7 +3499,6 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
chip->card_fail |= SD_CARD;
chip->capacity[chip->card2lun[SD_CARD]] = 0;
chip->rw_need_retry = 1;
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -3737,7 +3513,6 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_IO_ERR);
- rtsx_trace(chip);
goto RW_FAIL;
}
@@ -3759,7 +3534,6 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
if (retval != STATUS_SUCCESS) {
chip->rw_need_retry = 1;
sd_set_err_code(chip, SD_STS_ERR);
- rtsx_trace(chip);
goto RW_FAIL;
}
@@ -3768,7 +3542,6 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
retval = rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
if (retval != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_IO_ERR);
- rtsx_trace(chip);
goto RW_FAIL;
}
@@ -3860,7 +3633,6 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
chip->rw_need_retry = 1;
sd_set_err_code(chip, SD_TO_ERR);
- rtsx_trace(chip);
goto RW_FAIL;
}
@@ -3868,7 +3640,6 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
if (retval != STATUS_SUCCESS) {
chip->rw_need_retry = 1;
sd_set_err_code(chip, SD_TO_ERR);
- rtsx_trace(chip);
goto RW_FAIL;
}
@@ -3877,7 +3648,6 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
NULL, 0);
if (retval != STATUS_SUCCESS) {
chip->rw_need_retry = 1;
- rtsx_trace(chip);
goto RW_FAIL;
}
@@ -3923,7 +3693,6 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
chip->rw_need_retry = 0;
dev_dbg(rtsx_dev(chip), "No card exist, exit %s\n",
__func__);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3933,24 +3702,20 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
SD_RSP_TYPE_R1b, NULL, 0);
if (retval != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_STS_ERR);
- rtsx_trace(chip);
goto RW_FAIL;
}
if (stat & (SD_CRC7_ERR | SD_CRC16_ERR | SD_CRC_WRITE_ERR)) {
dev_dbg(rtsx_dev(chip), "SD CRC error, tune clock!\n");
sd_set_err_code(chip, SD_CRC_ERR);
- rtsx_trace(chip);
goto RW_FAIL;
}
if (err == STATUS_TIMEDOUT) {
sd_set_err_code(chip, SD_TO_ERR);
- rtsx_trace(chip);
goto RW_FAIL;
}
- rtsx_trace(chip);
return err;
}
@@ -3966,7 +3731,6 @@ RW_FAIL:
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
chip->rw_need_retry = 0;
dev_dbg(rtsx_dev(chip), "No card exist, exit %s\n", __func__);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -3988,7 +3752,6 @@ RW_FAIL:
}
}
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4057,14 +3820,12 @@ RTY_SEND_CMD:
if (rsp_type & SD_WAIT_BUSY_END) {
retval = sd_check_data0_status(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return retval;
}
} else {
sd_set_err_code(chip, SD_TO_ERR);
}
}
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4075,7 +3836,6 @@ RTY_SEND_CMD:
if ((ptr[0] & 0xC0) != 0) {
sd_set_err_code(chip, SD_STS_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4083,7 +3843,6 @@ RTY_SEND_CMD:
if (ptr[stat_idx] & SD_CRC7_ERR) {
if (cmd_idx == WRITE_MULTIPLE_BLOCK) {
sd_set_err_code(chip, SD_CRC_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (rty_cnt < SD_MAX_RETRY_COUNT) {
@@ -4092,7 +3851,6 @@ RTY_SEND_CMD:
goto RTY_SEND_CMD;
} else {
sd_set_err_code(chip, SD_CRC_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -4102,7 +3860,6 @@ RTY_SEND_CMD:
(cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) {
if ((cmd_idx != STOP_TRANSMISSION) && !special_check) {
if (ptr[1] & 0x80) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -4111,18 +3868,15 @@ RTY_SEND_CMD:
#else
if (ptr[1] & 0x7F) {
#endif
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (ptr[2] & 0xF8) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (cmd_idx == SELECT_CARD) {
if (rsp_type == SD_RSP_TYPE_R2) {
if ((ptr[3] & 0x1E) != 0x04) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -4162,7 +3916,6 @@ int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type)
retval = rtsx_send_cmd(chip, SD_CARD, 100);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -4210,7 +3963,6 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!(CHK_BIT(chip->lun_mc, lun))) {
SET_BIT(chip->lun_mc, lun);
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4219,7 +3971,6 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
(srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
(srb->cmnd[8] != 0x64)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4234,7 +3985,6 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4303,20 +4053,17 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!sd_card->sd_pass_thru_en) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (sd_card->pre_cmd_err) {
sd_card->pre_cmd_err = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4333,14 +4080,12 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = get_rsp_type(srb, &rsp_type, &rsp_len);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
sd_card->last_rsp_type = rsp_type;
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4350,7 +4095,6 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
SD_BUS_WIDTH_8);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4358,7 +4102,6 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
SD_BUS_WIDTH_4);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -4366,7 +4109,6 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#else
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
#endif
@@ -4374,7 +4116,6 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (standby) {
retval = sd_select_card(chip, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_cmd_failed;
}
}
@@ -4385,7 +4126,6 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_cmd_failed;
}
}
@@ -4393,14 +4133,12 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
sd_card->rsp, rsp_len, false);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_cmd_failed;
}
if (standby) {
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_cmd_failed;
}
}
@@ -4408,7 +4146,6 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#ifdef SUPPORT_SD_LOCK
retval = sd_update_lock_status(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_cmd_failed;
}
#endif
@@ -4424,7 +4161,6 @@ sd_execute_cmd_failed:
if (!(chip->card_ready & SD_CARD))
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4440,20 +4176,17 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!sd_card->sd_pass_thru_en) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (sd_card->pre_cmd_err) {
sd_card->pre_cmd_err = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4473,14 +4206,12 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = get_rsp_type(srb, &rsp_type, &rsp_len);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
sd_card->last_rsp_type = rsp_type;
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4505,7 +4236,6 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
}
@@ -4513,7 +4243,6 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (standby) {
retval = sd_select_card(chip, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
}
@@ -4524,7 +4253,6 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
}
@@ -4546,7 +4274,6 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf = kmalloc(data_len, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -4556,7 +4283,6 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
read_err = true;
kfree(buf);
rtsx_clear_sd_error(chip);
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
@@ -4606,25 +4332,21 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval < 0) {
read_err = true;
rtsx_clear_sd_error(chip);
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
} else {
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
retval = ext_sd_get_rsp(chip, rsp_len, sd_card->rsp, rsp_type);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
if (standby) {
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
}
@@ -4634,7 +4356,6 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SD_RSP_TYPE_R1b, NULL, 0,
false);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
}
@@ -4644,19 +4365,16 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
}
@@ -4673,7 +4391,6 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
break;
}
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_read_cmd_failed;
}
@@ -4691,7 +4408,6 @@ sd_execute_read_cmd_failed:
if (!(chip->card_ready & SD_CARD))
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4712,20 +4428,17 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!sd_card->sd_pass_thru_en) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (sd_card->pre_cmd_err) {
sd_card->pre_cmd_err = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4754,14 +4467,12 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = get_rsp_type(srb, &rsp_type, &rsp_len);
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
sd_card->last_rsp_type = rsp_type;
retval = sd_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4771,7 +4482,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
SD_BUS_WIDTH_8);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -4779,7 +4489,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
SD_BUS_WIDTH_4);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
}
@@ -4787,7 +4496,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#else
retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03, SD_BUS_WIDTH_4);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
#endif
@@ -4797,7 +4505,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
}
@@ -4805,7 +4512,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (standby) {
retval = sd_select_card(chip, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
}
@@ -4816,7 +4522,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
}
@@ -4824,7 +4529,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
sd_card->rsp, rsp_len, false);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
@@ -4834,7 +4538,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf = kmalloc(data_len, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -4854,7 +4557,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, 0, 250);
if (retval != STATUS_SUCCESS) {
kfree(buf);
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
@@ -4866,7 +4568,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, 0, 250);
if (retval != STATUS_SUCCESS) {
kfree(buf);
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
} else {
@@ -4878,7 +4579,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, 0, 250);
if (retval != STATUS_SUCCESS) {
kfree(buf);
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
}
@@ -4931,14 +4631,12 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
DMA_TO_DEVICE, 10000);
} else {
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
if (retval < 0) {
write_err = true;
rtsx_clear_sd_error(chip);
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
@@ -4966,7 +4664,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (standby) {
retval = sd_select_card(chip, 1);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
}
@@ -4976,7 +4673,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SD_RSP_TYPE_R1b, NULL, 0,
false);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
}
@@ -4986,19 +4682,16 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SD_RSP_TYPE_R1, NULL, 0,
false);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
- rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+ retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
}
@@ -5015,7 +4708,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
break;
}
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
@@ -5043,7 +4735,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = reset_sd(chip);
if (retval != STATUS_SUCCESS) {
sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST);
- rtsx_trace(chip);
goto sd_execute_write_cmd_failed;
}
}
@@ -5057,7 +4748,6 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (lock_cmd_fail) {
scsi_set_resid(srb, 0);
set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
#endif /* SUPPORT_SD_LOCK */
@@ -5076,7 +4766,6 @@ sd_execute_write_cmd_failed:
if (!(chip->card_ready & SD_CARD))
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -5089,14 +4778,12 @@ int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!sd_card->sd_pass_thru_en) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (sd_card->pre_cmd_err) {
sd_card->pre_cmd_err = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -5104,7 +4791,6 @@ int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (sd_card->last_rsp_type == SD_RSP_TYPE_R0) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
} else if (sd_card->last_rsp_type == SD_RSP_TYPE_R2) {
count = (data_len < 17) ? data_len : 17;
@@ -5130,14 +4816,12 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (!sd_card->sd_pass_thru_en) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
if (sd_card->pre_cmd_err) {
sd_card->pre_cmd_err = 0;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -5146,7 +4830,6 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
(srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
(srb->cmnd[8] != 0x64)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -5163,7 +4846,6 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#endif
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
sd_card->pre_cmd_err = 1;
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
#ifdef SUPPORT_SD_LOCK
@@ -5176,14 +4858,12 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
sd_card->pre_cmd_err = 1;
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
break;
default:
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
- rtsx_trace(chip);
return TRANSPORT_FAILED;
}
@@ -5209,20 +4889,17 @@ int sd_power_off_card3v3(struct rtsx_chip *chip)
retval = disable_card_clock(chip, SD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (!chip->ft2_fast_mode) {
retval = card_power_off(chip, SD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -5232,7 +4909,6 @@ int sd_power_off_card3v3(struct rtsx_chip *chip)
if (chip->asic_code) {
retval = sd_pull_ctl_disable(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
@@ -5240,7 +4916,6 @@ int sd_power_off_card3v3(struct rtsx_chip *chip)
FPGA_SD_PULL_CTL_BIT | 0x20,
FPGA_SD_PULL_CTL_BIT);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -5270,7 +4945,6 @@ int release_sd_card(struct rtsx_chip *chip)
retval = sd_power_off_card3v3(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c
index b5646b62ec9e..4675668ad977 100644
--- a/drivers/staging/rts5208/spi.c
+++ b/drivers/staging/rts5208/spi.c
@@ -42,13 +42,11 @@ static int spi_init(struct rtsx_chip *chip)
CS_POLARITY_LOW | DTO_MSB_FIRST
| SPI_MASTER | SPI_MODE0 | SPI_AUTO);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, SPI_TCTL, EDO_TIMING_MASK,
SAMPLE_DELAY_HALF);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -63,38 +61,32 @@ static int spi_set_init_para(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, SPI_CLK_DIVIDER1, 0xFF,
(u8)(spi->clk_div >> 8));
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, SPI_CLK_DIVIDER0, 0xFF,
(u8)(spi->clk_div));
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = switch_clock(chip, spi->spi_clock);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = select_card(chip, SPI_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, CARD_CLK_EN, SPI_CLK_EN,
SPI_CLK_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_OE, SPI_OUTPUT_EN,
SPI_OUTPUT_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -102,7 +94,6 @@ static int spi_set_init_para(struct rtsx_chip *chip)
retval = spi_init(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -125,7 +116,6 @@ static int sf_polling_status(struct rtsx_chip *chip, int msec)
if (retval < 0) {
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_BUSY_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -154,7 +144,6 @@ static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
if (retval < 0) {
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -183,7 +172,6 @@ static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
if (retval < 0) {
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -242,7 +230,6 @@ static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr)
if (retval < 0) {
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -261,37 +248,31 @@ static int spi_init_eeprom(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, SPI_CLK_DIVIDER1, 0xFF, 0x00);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, SPI_CLK_DIVIDER0, 0xFF, 0x27);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = switch_clock(chip, clk);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = select_card(chip, SPI_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, CARD_CLK_EN, SPI_CLK_EN,
SPI_CLK_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_OE, SPI_OUTPUT_EN,
SPI_OUTPUT_EN);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -300,13 +281,11 @@ static int spi_init_eeprom(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, SPI_CONTROL, 0xFF,
CS_POLARITY_HIGH | SPI_EEPROM_AUTO);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, SPI_TCTL, EDO_TIMING_MASK,
SAMPLE_DELAY_HALF);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -328,7 +307,6 @@ static int spi_eeprom_program_enable(struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -341,13 +319,11 @@ int spi_erase_eeprom_chip(struct rtsx_chip *chip)
retval = spi_init_eeprom(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = spi_eeprom_program_enable(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -364,13 +340,11 @@ int spi_erase_eeprom_chip(struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, CARD_GPIO_DIR, 0x01, 0x01);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -383,13 +357,11 @@ int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr)
retval = spi_init_eeprom(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = spi_eeprom_program_enable(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -408,13 +380,11 @@ int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr)
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, CARD_GPIO_DIR, 0x01, 0x01);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -428,7 +398,6 @@ int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val)
retval = spi_init_eeprom(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -448,14 +417,12 @@ int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val)
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
wait_timeout(5);
retval = rtsx_read_register(chip, SPI_DATA, &data);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -464,7 +431,6 @@ int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val)
retval = rtsx_write_register(chip, CARD_GPIO_DIR, 0x01, 0x01);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -477,13 +443,11 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
retval = spi_init_eeprom(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = spi_eeprom_program_enable(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -503,13 +467,11 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
retval = rtsx_send_cmd(chip, 0, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, CARD_GPIO_DIR, 0x01, 0x01);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -562,14 +524,12 @@ int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
len = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
if (len > 512) {
spi_set_err_code(chip, SPI_INVALID_COMMAND);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = spi_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -612,14 +572,12 @@ int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval < 0) {
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (len) {
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -627,7 +585,6 @@ int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
spi_set_err_code(chip, SPI_READ_ERR);
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -660,13 +617,11 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = spi_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
buf = kmalloc(SF_PAGE_LEN, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -720,7 +675,6 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
kfree(buf);
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -757,14 +711,12 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = spi_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (program_mode == BYTE_PROGRAM) {
buf = kmalloc(4, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -772,7 +724,6 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sf_enable_write(chip, SPI_WREN);
if (retval != STATUS_SUCCESS) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -792,14 +743,12 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
kfree(buf);
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sf_polling_status(chip, 100);
if (retval != STATUS_SUCCESS) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -814,13 +763,11 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sf_enable_write(chip, SPI_WREN);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
buf = kmalloc(4, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -846,14 +793,12 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
kfree(buf);
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sf_polling_status(chip, 100);
if (retval != STATUS_SUCCESS) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -864,19 +809,16 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sf_disable_write(chip, SPI_WRDI);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sf_polling_status(chip, 100);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else if (program_mode == PAGE_PROGRAM) {
buf = kmalloc(SF_PAGE_LEN, GFP_KERNEL);
if (!buf) {
- rtsx_trace(chip);
return STATUS_NOMEM;
}
@@ -889,7 +831,6 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = sf_enable_write(chip, SPI_WREN);
if (retval != STATUS_SUCCESS) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -909,14 +850,12 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
kfree(buf);
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sf_polling_status(chip, 100);
if (retval != STATUS_SUCCESS) {
kfree(buf);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -927,7 +866,6 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
kfree(buf);
} else {
spi_set_err_code(chip, SPI_INVALID_COMMAND);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -950,37 +888,31 @@ int spi_erase_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = spi_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (erase_mode == PAGE_ERASE) {
retval = sf_enable_write(chip, SPI_WREN);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sf_erase(chip, ins, 1, addr);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else if (erase_mode == CHIP_ERASE) {
retval = sf_enable_write(chip, SPI_WREN);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sf_erase(chip, ins, 0, 0);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
spi_set_err_code(chip, SPI_INVALID_COMMAND);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -999,13 +931,11 @@ int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = spi_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = sf_enable_write(chip, ewsr);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1029,7 +959,6 @@ int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
rtsx_clear_spi_error(chip);
spi_set_err_code(chip, SPI_HW_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
diff --git a/drivers/staging/rts5208/trace.c b/drivers/staging/rts5208/trace.c
deleted file mode 100644
index c878e75293f7..000000000000
--- a/drivers/staging/rts5208/trace.c
+++ /dev/null
@@ -1,27 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/string.h>
-
-#include "rtsx.h"
-
-#ifdef _MSG_TRACE
-
-void _rtsx_trace(struct rtsx_chip *chip, const char *file, const char *func,
- int line)
-{
- struct trace_msg_t *msg = &chip->trace_msg[chip->msg_idx];
-
- file = kbasename(file);
- dev_dbg(rtsx_dev(chip), "[%s][%s]:[%d]\n", file, func, line);
-
- strncpy(msg->file, file, MSG_FILE_LEN - 1);
- strncpy(msg->func, func, MSG_FUNC_LEN - 1);
- msg->line = (u16)line;
- get_current_time(msg->timeval_buf, TIME_VAL_LEN);
- msg->valid = 1;
-
- chip->msg_idx++;
- if (chip->msg_idx >= TRACE_ITEM_CNT)
- chip->msg_idx = 0;
-}
-#endif
diff --git a/drivers/staging/rts5208/trace.h b/drivers/staging/rts5208/trace.h
deleted file mode 100644
index 5b807874c1d7..000000000000
--- a/drivers/staging/rts5208/trace.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Driver for Realtek PCI-Express card reader
- * Header file
- *
- * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author:
- * Wei WANG (wei_wang@realsil.com.cn)
- * Micky Ching (micky_ching@realsil.com.cn)
- */
-
-#ifndef __REALTEK_RTSX_TRACE_H
-#define __REALTEK_RTSX_TRACE_H
-
-struct rtsx_chip;
-
-#ifdef _MSG_TRACE
-void _rtsx_trace(struct rtsx_chip *chip, const char *file, const char *func,
- int line);
-#define rtsx_trace(chip) \
- _rtsx_trace(chip, __FILE__, __func__, __LINE__)
-#else
-static inline void rtsx_trace(struct rtsx_chip *chip)
-{
-}
-#endif
-
-#endif /* __REALTEK_RTSX_TRACE_H */
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index 11ea0c658e28..261d868a3072 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -61,7 +61,6 @@ static int xd_set_init_para(struct rtsx_chip *chip)
retval = switch_clock(chip, xd_card->xd_clock);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -75,13 +74,11 @@ static int xd_switch_clock(struct rtsx_chip *chip)
retval = select_card(chip, XD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = switch_clock(chip, xd_card->xd_clock);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -106,7 +103,6 @@ static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
retval = rtsx_send_cmd(chip, XD_CARD, 20);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -178,7 +174,6 @@ static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
retval = rtsx_send_cmd(chip, XD_CARD, 500);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -199,7 +194,6 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
int retval, i;
if (!buf || (buf_len < 0)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -212,7 +206,6 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
retval = rtsx_send_cmd(chip, 0, 250);
if (retval < 0) {
rtsx_clear_xd_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -228,7 +221,6 @@ static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
u8 reg;
if (!buf || (buf_len < 10)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -250,30 +242,25 @@ static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
retval = rtsx_send_cmd(chip, XD_CARD, 250);
if (retval == -ETIMEDOUT) {
rtsx_clear_xd_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, XD_PAGE_STATUS, &reg);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (reg != XD_GPG) {
rtsx_clear_xd_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_read_register(chip, XD_CTL, &reg);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (!(reg & XD_ECC1_ERROR) || !(reg & XD_ECC1_UNCORRECTABLE)) {
retval = xd_read_data_from_ppb(chip, 0, buf, buf_len);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (reg & XD_ECC1_ERROR) {
@@ -282,13 +269,11 @@ static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
retval = rtsx_read_register(chip, XD_ECC_BIT1,
&ecc_bit);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_read_register(chip, XD_ECC_BYTE1,
&ecc_byte);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -307,7 +292,6 @@ static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
retval = xd_read_data_from_ppb(chip, 256, buf, buf_len);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (reg & XD_ECC2_ERROR) {
@@ -316,13 +300,11 @@ static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
retval = rtsx_read_register(chip, XD_ECC_BIT2,
&ecc_bit);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_read_register(chip, XD_ECC_BYTE2,
&ecc_byte);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -338,7 +320,6 @@ static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
}
} else {
rtsx_clear_xd_error(chip);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -424,7 +405,6 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
XD_D1_PD |
XD_D0_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
@@ -433,7 +413,6 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
XD_D5_PD |
XD_D4_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
@@ -442,7 +421,6 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
XD_CLE_PD |
XD_CD_PU);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
@@ -451,7 +429,6 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
XD_RE_PD |
XD_ALE_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
@@ -460,13 +437,11 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
SD_CD_PU |
SD_CMD_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL6, 0xFF,
MS_D5_PD | MS_D4_PD);
if (retval) {
- rtsx_trace(chip);
return retval;
}
} else if (CHECK_PID(chip, 0x5288)) {
@@ -474,25 +449,21 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
retval = rtsx_write_register(chip, CARD_PULL_CTL1,
0xFF, 0x55);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL2,
0xFF, 0x55);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL3,
0xFF, 0x4B);
if (retval) {
- rtsx_trace(chip);
return retval;
}
retval = rtsx_write_register(chip, CARD_PULL_CTL4,
0xFF, 0x69);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -509,7 +480,6 @@ static int reset_xd(struct rtsx_chip *chip)
retval = select_card(chip, XD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -536,14 +506,12 @@ static int reset_xd(struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, XD_CARD, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (!chip->ft2_fast_mode) {
retval = card_power_off(chip, XD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -562,13 +530,11 @@ static int reset_xd(struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, XD_CARD, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = card_power_on(chip, XD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -577,7 +543,6 @@ static int reset_xd(struct rtsx_chip *chip)
if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
dev_dbg(rtsx_dev(chip), "Over current, OCPSTAT is 0x%x\n",
chip->ocp_stat);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -601,7 +566,6 @@ static int reset_xd(struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, XD_CARD, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -610,7 +574,6 @@ static int reset_xd(struct rtsx_chip *chip)
retval = xd_set_init_para(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -636,7 +599,6 @@ static int reset_xd(struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, XD_CARD, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -651,7 +613,6 @@ static int reset_xd(struct rtsx_chip *chip)
retval = xd_read_id(chip, READ_ID, id_buf, 4);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -734,7 +695,6 @@ static int reset_xd(struct rtsx_chip *chip)
for (j = 0; j < 10; j++) {
retval = xd_read_id(chip, READ_ID, id_buf, 4);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -752,19 +712,16 @@ static int reset_xd(struct rtsx_chip *chip)
xd_card->addr_cycle = 0;
xd_card->capacity = 0;
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = xd_read_id(chip, READ_xD_ID, id_buf, 4);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
dev_dbg(rtsx_dev(chip), "READ_xD_ID: 0x%x 0x%x 0x%x 0x%x\n",
id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
if (id_buf[2] != XD_ID_CODE) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -773,7 +730,6 @@ static int reset_xd(struct rtsx_chip *chip)
u32 page_addr;
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -814,7 +770,6 @@ static int reset_xd(struct rtsx_chip *chip)
retval = xd_read_cis(chip, page_addr, buf, 10);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -832,10 +787,8 @@ static int reset_xd(struct rtsx_chip *chip)
}
dev_dbg(rtsx_dev(chip), "CIS block: 0x%x\n", xd_card->cis_block);
- if (xd_card->cis_block == 0xFFFF) {
- rtsx_trace(chip);
+ if (xd_card->cis_block == 0xFFFF)
return STATUS_FAIL;
- }
chip->capacity[chip->card2lun[XD_CARD]] = xd_card->capacity;
@@ -889,7 +842,6 @@ static int xd_init_l2p_tbl(struct rtsx_chip *chip)
xd_card->zone_cnt);
if (xd_card->zone_cnt < 1) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -898,7 +850,6 @@ static int xd_init_l2p_tbl(struct rtsx_chip *chip)
xd_card->zone = vmalloc(size);
if (!xd_card->zone) {
- rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -1078,19 +1029,16 @@ int reset_xd_card(struct rtsx_chip *chip)
retval = enable_card_clock(chip, XD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = reset_xd(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = xd_init_l2p_tbl(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1107,7 +1055,6 @@ static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
dev_dbg(rtsx_dev(chip), "mark block 0x%x as bad block\n", phy_blk);
if (phy_blk == BLK_NOT_FOUND) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1144,7 +1091,6 @@ static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
xd_set_err_code(chip, XD_PRG_ERROR);
else
xd_set_err_code(chip, XD_TO_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1162,11 +1108,9 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
dev_dbg(rtsx_dev(chip), "Init block 0x%x\n", phy_blk);
if (start_page > end_page) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (phy_blk == BLK_NOT_FOUND) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1203,7 +1147,6 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
} else {
xd_set_err_code(chip, XD_TO_ERROR);
}
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1222,12 +1165,10 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
old_blk, new_blk);
if (start_page > end_page) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if ((old_blk == BLK_NOT_FOUND) || (new_blk == BLK_NOT_FOUND)) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1239,7 +1180,6 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
retval = rtsx_write_register(chip, CARD_DATA_SOURCE, 0x01,
PINGPONG_BUFFER);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -1247,7 +1187,6 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
rtsx_clear_xd_error(chip);
xd_set_err_code(chip, XD_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1274,7 +1213,6 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
if (detect_card_cd(chip,
XD_CARD) != STATUS_SUCCESS) {
xd_set_err_code(chip, XD_NO_CARD);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1296,7 +1234,6 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
}
} else {
xd_set_err_code(chip, XD_TO_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1325,7 +1262,6 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
} else {
xd_set_err_code(chip, XD_TO_ERROR);
}
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1352,7 +1288,6 @@ static int xd_reset_cmd(struct rtsx_chip *chip)
retval = rtsx_send_cmd(chip, XD_CARD, 100);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1360,7 +1295,6 @@ static int xd_reset_cmd(struct rtsx_chip *chip)
if (((ptr[0] & READY_FLAG) == READY_STATE) && (ptr[1] & XD_RDY))
return STATUS_SUCCESS;
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1372,7 +1306,6 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
int i, retval;
if (phy_blk == BLK_NOT_FOUND) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1396,13 +1329,11 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
if (reg & PROGRAM_ERROR) {
xd_mark_bad_block(chip, phy_blk);
xd_set_err_code(chip, XD_PRG_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
xd_set_err_code(chip, XD_ERASE_FAIL);
retval = xd_reset_cmd(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
continue;
@@ -1412,7 +1343,6 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
if (*ptr & PROGRAM_ERROR) {
xd_mark_bad_block(chip, phy_blk);
xd_set_err_code(chip, XD_PRG_ERROR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1421,7 +1351,6 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
xd_mark_bad_block(chip, phy_blk);
xd_set_err_code(chip, XD_ERASE_FAIL);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1454,7 +1383,6 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
if (!zone->l2p_table) {
zone->l2p_table = vmalloc(2000);
if (!zone->l2p_table) {
- rtsx_trace(chip);
goto build_fail;
}
}
@@ -1463,7 +1391,6 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
if (!zone->free_table) {
zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
if (!zone->free_table) {
- rtsx_trace(chip);
goto build_fail;
}
}
@@ -1629,7 +1556,6 @@ static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
retval = rtsx_send_cmd(chip, XD_CARD, 200);
if (retval < 0) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1702,7 +1628,6 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
xd_set_err_code(chip, XD_TO_ERROR);
goto status_fail;
} else {
- rtsx_trace(chip);
goto fail;
}
}
@@ -1712,7 +1637,6 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
fail:
retval = rtsx_read_register(chip, XD_PAGE_STATUS, &reg_val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -1721,7 +1645,6 @@ fail:
retval = rtsx_read_register(chip, XD_CTL, &reg_val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
@@ -1764,7 +1687,6 @@ fail:
}
status_fail:
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1781,7 +1703,6 @@ static int xd_finish_write(struct rtsx_chip *chip,
dev_dbg(rtsx_dev(chip), "log_blk = 0x%x\n", log_blk);
if (page_off > xd_card->page_off) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1795,7 +1716,6 @@ static int xd_finish_write(struct rtsx_chip *chip,
retval = xd_erase_block(chip, new_blk);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, new_blk);
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
@@ -1808,7 +1728,6 @@ static int xd_finish_write(struct rtsx_chip *chip,
xd_set_unused_block(chip, new_blk);
}
XD_CLR_BAD_NEWBLK(xd_card);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1842,7 +1761,6 @@ static int xd_prepare_write(struct rtsx_chip *chip,
if (page_off) {
retval = xd_copy_page(chip, old_blk, new_blk, 0, page_off);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -1912,7 +1830,6 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
xd_set_err_code(chip, XD_TO_ERROR);
goto status_fail;
} else {
- rtsx_trace(chip);
goto fail;
}
}
@@ -1942,7 +1859,6 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
fail:
retval = rtsx_read_register(chip, XD_DAT, &reg_val);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (reg_val & PROGRAM_ERROR) {
@@ -1951,7 +1867,6 @@ fail:
}
status_fail:
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1966,7 +1881,6 @@ int xd_delay_write(struct rtsx_chip *chip)
dev_dbg(rtsx_dev(chip), "%s\n", __func__);
retval = xd_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1977,7 +1891,6 @@ int xd_delay_write(struct rtsx_chip *chip)
delay_write->logblock,
delay_write->pageoff);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2012,14 +1925,12 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
retval = xd_switch_clock(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2033,7 +1944,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2053,7 +1963,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2070,7 +1979,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -2080,7 +1988,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
(new_blk == BLK_NOT_FOUND)) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2091,12 +1998,10 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#ifdef XD_DELAY_WRITE
@@ -2109,12 +2014,10 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -2123,7 +2026,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (old_blk == BLK_NOT_FOUND) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2134,7 +2036,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2151,7 +2052,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
@@ -2162,7 +2062,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2184,7 +2083,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2198,7 +2096,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2207,7 +2104,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (new_blk == BLK_NOT_FOUND) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
}
@@ -2227,7 +2123,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2237,11 +2132,9 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
- rtsx_trace(chip);
return STATUS_FAIL;
}
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
- rtsx_trace(chip);
return STATUS_FAIL;
}
#endif
@@ -2288,20 +2181,17 @@ int xd_power_off_card3v3(struct rtsx_chip *chip)
retval = disable_card_clock(chip, XD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
retval = rtsx_write_register(chip, CARD_OE, XD_OUTPUT_EN, 0);
if (retval) {
- rtsx_trace(chip);
return retval;
}
if (!chip->ft2_fast_mode) {
retval = card_power_off(chip, XD_CARD);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2311,13 +2201,11 @@ int xd_power_off_card3v3(struct rtsx_chip *chip)
if (chip->asic_code) {
retval = xd_pull_ctl_disable(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
} else {
retval = rtsx_write_register(chip, FPGA_PULL_CTL, 0xFF, 0xDF);
if (retval) {
- rtsx_trace(chip);
return retval;
}
}
@@ -2340,7 +2228,6 @@ int release_xd_card(struct rtsx_chip *chip)
retval = xd_power_off_card3v3(chip);
if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
return STATUS_FAIL;
}
diff --git a/drivers/staging/skein/Kconfig b/drivers/staging/skein/Kconfig
deleted file mode 100644
index 012a8233376e..000000000000
--- a/drivers/staging/skein/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-config CRYPTO_SKEIN
- tristate "Skein digest algorithm"
- depends on (X86 || UML_X86) && 64BIT && CRYPTO
- select CRYPTO_HASH
- select CRYPTO_ALGAPI
- help
- Skein secure hash algorithm is one of 5 finalists from the NIST SHA3
- competition.
-
- Skein is optimized for modern, 64bit processors and is highly
- customizable. See:
-
- http://www.skein-hash.info/sites/default/files/skein1.3.pdf
-
- for more information. This module also contains the threefish block
- cipher algorithm.
diff --git a/drivers/staging/skein/Makefile b/drivers/staging/skein/Makefile
deleted file mode 100644
index 86b7966d694e..000000000000
--- a/drivers/staging/skein/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the skein secure hash algorithm
-#
-obj-$(CONFIG_CRYPTO_SKEIN) += skein.o
-skein-y := skein_base.o \
- skein_api.o \
- skein_block.o \
- threefish_block.o \
- threefish_api.o \
- skein_generic.o
diff --git a/drivers/staging/skein/TODO b/drivers/staging/skein/TODO
deleted file mode 100644
index cd3508dd9089..000000000000
--- a/drivers/staging/skein/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-skein/threefish TODO
-
- - move macros into appropriate header files
- - add / pass test vectors
- - module support
-
-Please send patches to Jason Cooper <jason@lakedaemon.net> in addition to the
-staging tree mailinglist.
diff --git a/drivers/staging/skein/skein_api.c b/drivers/staging/skein/skein_api.c
deleted file mode 100644
index c6526b6fbfb4..000000000000
--- a/drivers/staging/skein/skein_api.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright (c) 2010 Werner Dittmann
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/string.h>
-#include "skein_api.h"
-
-int skein_ctx_prepare(struct skein_ctx *ctx, enum skein_size size)
-{
- skein_assert_ret(ctx && size, SKEIN_FAIL);
-
- memset(ctx, 0, sizeof(struct skein_ctx));
- ctx->skein_size = size;
-
- return SKEIN_SUCCESS;
-}
-
-int skein_init(struct skein_ctx *ctx, size_t hash_bit_len)
-{
- int ret = SKEIN_FAIL;
- size_t x_len = 0;
- u64 *x = NULL;
- u64 tree_info = SKEIN_CFG_TREE_INFO_SEQUENTIAL;
-
- skein_assert_ret(ctx, SKEIN_FAIL);
- /*
- * The following two lines rely of the fact that the real Skein
- * contexts are a union in out context and thus have tha maximum
- * memory available. The beauty of C :-) .
- */
- x = ctx->m.s256.x;
- x_len = ctx->skein_size / 8;
- /*
- * If size is the same and hash bit length is zero then reuse
- * the save chaining variables.
- */
- switch (ctx->skein_size) {
- case SKEIN_256:
- ret = skein_256_init_ext(&ctx->m.s256, hash_bit_len,
- tree_info, NULL, 0);
- break;
- case SKEIN_512:
- ret = skein_512_init_ext(&ctx->m.s512, hash_bit_len,
- tree_info, NULL, 0);
- break;
- case SKEIN_1024:
- ret = skein_1024_init_ext(&ctx->m.s1024, hash_bit_len,
- tree_info, NULL, 0);
- break;
- }
-
- if (ret == SKEIN_SUCCESS) {
- /*
- * Save chaining variables for this combination of size and
- * hash_bit_len
- */
- memcpy(ctx->x_save, x, x_len);
- }
- return ret;
-}
-
-int skein_mac_init(struct skein_ctx *ctx, const u8 *key, size_t key_len,
- size_t hash_bit_len)
-{
- int ret = SKEIN_FAIL;
- u64 *x = NULL;
- size_t x_len = 0;
- u64 tree_info = SKEIN_CFG_TREE_INFO_SEQUENTIAL;
-
- skein_assert_ret(ctx, SKEIN_FAIL);
-
- x = ctx->m.s256.x;
- x_len = ctx->skein_size / 8;
-
- skein_assert_ret(hash_bit_len, SKEIN_BAD_HASHLEN);
-
- switch (ctx->skein_size) {
- case SKEIN_256:
- ret = skein_256_init_ext(&ctx->m.s256, hash_bit_len,
- tree_info, key, key_len);
-
- break;
- case SKEIN_512:
- ret = skein_512_init_ext(&ctx->m.s512, hash_bit_len,
- tree_info, key, key_len);
- break;
- case SKEIN_1024:
- ret = skein_1024_init_ext(&ctx->m.s1024, hash_bit_len,
- tree_info, key, key_len);
-
- break;
- }
- if (ret == SKEIN_SUCCESS) {
- /*
- * Save chaining variables for this combination of key,
- * key_len, hash_bit_len
- */
- memcpy(ctx->x_save, x, x_len);
- }
- return ret;
-}
-
-void skein_reset(struct skein_ctx *ctx)
-{
- size_t x_len = 0;
- u64 *x;
-
- /*
- * The following two lines rely of the fact that the real Skein
- * contexts are a union in out context and thus have tha maximum
- * memory available. The beautiy of C :-) .
- */
- x = ctx->m.s256.x;
- x_len = ctx->skein_size / 8;
- /* Restore the chaing variable, reset byte counter */
- memcpy(x, ctx->x_save, x_len);
-
- /* Setup context to process the message */
- skein_start_new_type(&ctx->m, MSG);
-}
-
-int skein_update(struct skein_ctx *ctx, const u8 *msg,
- size_t msg_byte_cnt)
-{
- int ret = SKEIN_FAIL;
-
- skein_assert_ret(ctx, SKEIN_FAIL);
-
- switch (ctx->skein_size) {
- case SKEIN_256:
- ret = skein_256_update(&ctx->m.s256, msg, msg_byte_cnt);
- break;
- case SKEIN_512:
- ret = skein_512_update(&ctx->m.s512, msg, msg_byte_cnt);
- break;
- case SKEIN_1024:
- ret = skein_1024_update(&ctx->m.s1024, msg, msg_byte_cnt);
- break;
- }
- return ret;
-}
-
-int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
- size_t msg_bit_cnt)
-{
- /*
- * I've used the bit pad implementation from skein_test.c (see NIST CD)
- * and modified it to use the convenience functions and added some
- * pointer arithmetic.
- */
- size_t length;
- u8 mask;
- u8 *up;
-
- /*
- * only the final Update() call is allowed do partial bytes, else
- * assert an error
- */
- skein_assert_ret((ctx->m.h.T[1] & SKEIN_T1_FLAG_BIT_PAD) == 0 ||
- msg_bit_cnt == 0, SKEIN_FAIL);
-
- /* if number of bits is a multiple of bytes - that's easy */
- if ((msg_bit_cnt & 0x7) == 0)
- return skein_update(ctx, msg, msg_bit_cnt >> 3);
-
- skein_update(ctx, msg, (msg_bit_cnt >> 3) + 1);
-
- /*
- * The next line rely on the fact that the real Skein contexts
- * are a union in our context. After the addition the pointer points to
- * Skein's real partial block buffer.
- * If this layout ever changes we have to adapt this as well.
- */
- up = (u8 *)ctx->m.s256.x + ctx->skein_size / 8;
-
- /* set tweak flag for the skein_final call */
- skein_set_bit_pad_flag(ctx->m.h);
-
- /* now "pad" the final partial byte the way NIST likes */
- /* get the b_cnt value (same location for all block sizes) */
- length = ctx->m.h.b_cnt;
- /* internal sanity check: there IS a partial byte in the buffer! */
- skein_assert(length != 0);
- /* partial byte bit mask */
- mask = (u8)(1u << (7 - (msg_bit_cnt & 7)));
- /* apply bit padding on final byte (in the buffer) */
- up[length - 1] = (up[length - 1] & (0 - mask)) | mask;
-
- return SKEIN_SUCCESS;
-}
-
-int skein_final(struct skein_ctx *ctx, u8 *hash)
-{
- int ret = SKEIN_FAIL;
-
- skein_assert_ret(ctx, SKEIN_FAIL);
-
- switch (ctx->skein_size) {
- case SKEIN_256:
- ret = skein_256_final(&ctx->m.s256, hash);
- break;
- case SKEIN_512:
- ret = skein_512_final(&ctx->m.s512, hash);
- break;
- case SKEIN_1024:
- ret = skein_1024_final(&ctx->m.s1024, hash);
- break;
- }
- return ret;
-}
diff --git a/drivers/staging/skein/skein_api.h b/drivers/staging/skein/skein_api.h
deleted file mode 100644
index 5df7905825da..000000000000
--- a/drivers/staging/skein/skein_api.h
+++ /dev/null
@@ -1,230 +0,0 @@
-/**
- * Copyright (c) 2010 Werner Dittmann
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
-
- */
-
-#ifndef SKEINAPI_H
-#define SKEINAPI_H
-
-/**
- * @file skein_api.h
- * @brief A Skein API and its functions.
- * @{
- *
- * This API and the functions that implement this API simplify the usage
- * of Skein. The design and the way to use the functions follow the openSSL
- * design but at the same time take care of some Skein specific behaviour
- * and possibilities.
- *
- * The functions enable applications to create a normal Skein hashes and
- * message authentication codes (MAC).
- *
- * Using these functions is simple and straight forward:
- *
- * @code
- *
- * #include "skein_api.h"
- *
- * ...
- * struct skein_ctx ctx; // a Skein hash or MAC context
- *
- * // prepare context, here for a Skein with a state size of 512 bits.
- * skein_ctx_prepare(&ctx, SKEIN_512);
- *
- * // Initialize the context to set the requested hash length in bits
- * // here request a output hash size of 31 bits (Skein supports variable
- * // output sizes even very strange sizes)
- * skein_init(&ctx, 31);
- *
- * // Now update Skein with any number of message bits. A function that
- * // takes a number of bytes is also available.
- * skein_update_bits(&ctx, message, msg_length);
- *
- * // Now get the result of the Skein hash. The output buffer must be
- * // large enough to hold the request number of output bits. The application
- * // may now extract the bits.
- * skein_final(&ctx, result);
- * ...
- * @endcode
- *
- * An application may use @c skein_reset to reset a Skein context and use
- * it for creation of another hash with the same Skein state size and output
- * bit length. In this case the API implementation restores some internal
- * internal state data and saves a full Skein initialization round.
- *
- * To create a MAC the application just uses @c skein_mac_init instead of
- * @c skein_init. All other functions calls remain the same.
- *
- */
-
-#include <linux/types.h>
-#include "skein_base.h"
-
-/**
- * Which Skein size to use
- */
-enum skein_size {
- SKEIN_256 = 256, /*!< Skein with 256 bit state */
- SKEIN_512 = 512, /*!< Skein with 512 bit state */
- SKEIN_1024 = 1024 /*!< Skein with 1024 bit state */
-};
-
-/**
- * Context for Skein.
- *
- * This structure was setup with some know-how of the internal
- * Skein structures, in particular ordering of header and size dependent
- * variables. If Skein implementation changes this, then adapt these
- * structures as well.
- */
-struct skein_ctx {
- u64 skein_size;
- u64 x_save[SKEIN_MAX_STATE_WORDS]; /* save area for state variables */
- union {
- struct skein_ctx_hdr h;
- struct skein_256_ctx s256;
- struct skein_512_ctx s512;
- struct skein_1024_ctx s1024;
- } m;
-};
-
-/**
- * Prepare a Skein context.
- *
- * An application must call this function before it can use the Skein
- * context. The functions clears memory and initializes size dependent
- * variables.
- *
- * @param ctx
- * Pointer to a Skein context.
- * @param size
- * Which Skein size to use.
- * @return
- * SKEIN_SUCCESS of SKEIN_FAIL
- */
-int skein_ctx_prepare(struct skein_ctx *ctx, enum skein_size size);
-
-/**
- * Initialize a Skein context.
- *
- * Initializes the context with this data and saves the resulting Skein
- * state variables for further use.
- *
- * @param ctx
- * Pointer to a Skein context.
- * @param hash_bit_len
- * Number of MAC hash bits to compute
- * @return
- * SKEIN_SUCCESS of SKEIN_FAIL
- * @see skein_reset
- */
-int skein_init(struct skein_ctx *ctx, size_t hash_bit_len);
-
-/**
- * Resets a Skein context for further use.
- *
- * Restores the saved chaining variables to reset the Skein context.
- * Thus applications can reuse the same setup to process several
- * messages. This saves a complete Skein initialization cycle.
- *
- * @param ctx
- * Pointer to a pre-initialized Skein MAC context
- */
-void skein_reset(struct skein_ctx *ctx);
-
-/**
- * Initializes a Skein context for MAC usage.
- *
- * Initializes the context with this data and saves the resulting Skein
- * state variables for further use.
- *
- * Applications call the normal Skein functions to update the MAC and
- * get the final result.
- *
- * @param ctx
- * Pointer to an empty or preinitialized Skein MAC context
- * @param key
- * Pointer to key bytes or NULL
- * @param key_len
- * Length of the key in bytes or zero
- * @param hash_bit_len
- * Number of MAC hash bits to compute
- * @return
- * SKEIN_SUCCESS of SKEIN_FAIL
- */
-int skein_mac_init(struct skein_ctx *ctx, const u8 *key, size_t key_len,
- size_t hash_bit_len);
-
-/**
- * Update Skein with the next part of the message.
- *
- * @param ctx
- * Pointer to initialized Skein context
- * @param msg
- * Pointer to the message.
- * @param msg_byte_cnt
- * Length of the message in @b bytes
- * @return
- * Success or error code.
- */
-int skein_update(struct skein_ctx *ctx, const u8 *msg,
- size_t msg_byte_cnt);
-
-/**
- * Update the hash with a message bit string.
- *
- * Skein can handle data not only as bytes but also as bit strings of
- * arbitrary length (up to its maximum design size).
- *
- * @param ctx
- * Pointer to initialized Skein context
- * @param msg
- * Pointer to the message.
- * @param msg_bit_cnt
- * Length of the message in @b bits.
- */
-int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
- size_t msg_bit_cnt);
-
-/**
- * Finalize Skein and return the hash.
- *
- * Before an application can reuse a Skein setup the application must
- * reset the Skein context.
- *
- * @param ctx
- * Pointer to initialized Skein context
- * @param hash
- * Pointer to buffer that receives the hash. The buffer must be large
- * enough to store @c hash_bit_len bits.
- * @return
- * Success or error code.
- * @see skein_reset
- */
-int skein_final(struct skein_ctx *ctx, u8 *hash);
-
-/**
- * @}
- */
-#endif
diff --git a/drivers/staging/skein/skein_base.c b/drivers/staging/skein/skein_base.c
deleted file mode 100644
index 8db858a11875..000000000000
--- a/drivers/staging/skein/skein_base.c
+++ /dev/null
@@ -1,870 +0,0 @@
-/***********************************************************************
- **
- ** Implementation of the Skein hash function.
- **
- ** Source code author: Doug Whiting, 2008.
- **
- ** This algorithm and source code is released to the public domain.
- **
- ************************************************************************/
-
-#include <linux/string.h> /* get the memcpy/memset functions */
-#include <linux/export.h>
-#include "skein_base.h" /* get the Skein API definitions */
-#include "skein_iv.h" /* get precomputed IVs */
-#include "skein_block.h"
-
-/*****************************************************************/
-/* 256-bit Skein */
-/*****************************************************************/
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* init the context for a straight hashing operation */
-int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len)
-{
- union {
- u8 b[SKEIN_256_STATE_BYTES];
- u64 w[SKEIN_256_STATE_WORDS];
- } cfg; /* config block */
-
- skein_assert_ret(hash_bit_len > 0, SKEIN_BAD_HASHLEN);
- ctx->h.hash_bit_len = hash_bit_len; /* output hash bit count */
-
- switch (hash_bit_len) { /* use pre-computed values, where available */
- case 256:
- memcpy(ctx->x, SKEIN_256_IV_256, sizeof(ctx->x));
- break;
- case 224:
- memcpy(ctx->x, SKEIN_256_IV_224, sizeof(ctx->x));
- break;
- case 160:
- memcpy(ctx->x, SKEIN_256_IV_160, sizeof(ctx->x));
- break;
- case 128:
- memcpy(ctx->x, SKEIN_256_IV_128, sizeof(ctx->x));
- break;
- default:
- /* here if there is no precomputed IV value available */
- /*
- * build/process the config block, type == CONFIG (could be
- * precomputed)
- */
- /* set tweaks: T0=0; T1=CFG | FINAL */
- skein_start_new_type(ctx, CFG_FINAL);
-
- /* set the schema, version */
- cfg.w[0] = skein_swap64(SKEIN_SCHEMA_VER);
- /* hash result length in bits */
- cfg.w[1] = skein_swap64(hash_bit_len);
- cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
- /* zero pad config block */
- memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
-
- /* compute the initial chaining values from config block */
- /* zero the chaining variables */
- memset(ctx->x, 0, sizeof(ctx->x));
- skein_256_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
- break;
- }
- /* The chaining vars ctx->x are now initialized for hash_bit_len. */
- /* Set up to process the data message portion of the hash (default) */
- skein_start_new_type(ctx, MSG); /* T0=0, T1= MSG type */
-
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* init the context for a MAC and/or tree hash operation */
-/*
- * [identical to skein_256_init() when key_bytes == 0 && \
- * tree_info == SKEIN_CFG_TREE_INFO_SEQUENTIAL]
- */
-int skein_256_init_ext(struct skein_256_ctx *ctx, size_t hash_bit_len,
- u64 tree_info, const u8 *key, size_t key_bytes)
-{
- union {
- u8 b[SKEIN_256_STATE_BYTES];
- u64 w[SKEIN_256_STATE_WORDS];
- } cfg; /* config block */
-
- skein_assert_ret(hash_bit_len > 0, SKEIN_BAD_HASHLEN);
- skein_assert_ret(key_bytes == 0 || key, SKEIN_FAIL);
-
- /* compute the initial chaining values ctx->x[], based on key */
- if (key_bytes == 0) { /* is there a key? */
- /* no key: use all zeroes as key for config block */
- memset(ctx->x, 0, sizeof(ctx->x));
- } else { /* here to pre-process a key */
- skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
- /* do a mini-Init right here */
- /* set output hash bit count = state size */
- ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
- /* set tweaks: T0 = 0; T1 = KEY type */
- skein_start_new_type(ctx, KEY);
- /* zero the initial chaining variables */
- memset(ctx->x, 0, sizeof(ctx->x));
- /* hash the key */
- skein_256_update(ctx, key, key_bytes);
- /* put result into cfg.b[] */
- skein_256_final_pad(ctx, cfg.b);
- /* copy over into ctx->x[] */
- memcpy(ctx->x, cfg.b, sizeof(cfg.b));
- }
- /*
- * build/process the config block, type == CONFIG (could be
- * precomputed for each key)
- */
- /* output hash bit count */
- ctx->h.hash_bit_len = hash_bit_len;
- skein_start_new_type(ctx, CFG_FINAL);
-
- /* pre-pad cfg.w[] with zeroes */
- memset(&cfg.w, 0, sizeof(cfg.w));
- cfg.w[0] = skein_swap64(SKEIN_SCHEMA_VER);
- /* hash result length in bits */
- cfg.w[1] = skein_swap64(hash_bit_len);
- /* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
- cfg.w[2] = skein_swap64(tree_info);
-
- /* compute the initial chaining values from config block */
- skein_256_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
-
- /* The chaining vars ctx->x are now initialized */
- /* Set up to process the data message portion of the hash (default) */
- skein_start_new_type(ctx, MSG);
-
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* process the input bytes */
-int skein_256_update(struct skein_256_ctx *ctx, const u8 *msg,
- size_t msg_byte_cnt)
-{
- size_t n;
-
- /* catch uninitialized context */
- skein_assert_ret(ctx->h.b_cnt <= SKEIN_256_BLOCK_BYTES, SKEIN_FAIL);
-
- /* process full blocks, if any */
- if (msg_byte_cnt + ctx->h.b_cnt > SKEIN_256_BLOCK_BYTES) {
- /* finish up any buffered message data */
- if (ctx->h.b_cnt) {
- /* # bytes free in buffer b[] */
- n = SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt;
- if (n) {
- /* check on our logic here */
- skein_assert(n < msg_byte_cnt);
- memcpy(&ctx->b[ctx->h.b_cnt], msg, n);
- msg_byte_cnt -= n;
- msg += n;
- ctx->h.b_cnt += n;
- }
- skein_assert(ctx->h.b_cnt == SKEIN_256_BLOCK_BYTES);
- skein_256_process_block(ctx, ctx->b, 1,
- SKEIN_256_BLOCK_BYTES);
- ctx->h.b_cnt = 0;
- }
- /*
- * now process any remaining full blocks, directly from input
- * message data
- */
- if (msg_byte_cnt > SKEIN_256_BLOCK_BYTES) {
- /* number of full blocks to process */
- n = (msg_byte_cnt - 1) / SKEIN_256_BLOCK_BYTES;
- skein_256_process_block(ctx, msg, n,
- SKEIN_256_BLOCK_BYTES);
- msg_byte_cnt -= n * SKEIN_256_BLOCK_BYTES;
- msg += n * SKEIN_256_BLOCK_BYTES;
- }
- skein_assert(ctx->h.b_cnt == 0);
- }
-
- /* copy any remaining source message data bytes into b[] */
- if (msg_byte_cnt) {
- skein_assert(msg_byte_cnt + ctx->h.b_cnt <=
- SKEIN_256_BLOCK_BYTES);
- memcpy(&ctx->b[ctx->h.b_cnt], msg, msg_byte_cnt);
- ctx->h.b_cnt += msg_byte_cnt;
- }
-
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* finalize the hash computation and output the result */
-int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val)
-{
- size_t i, n, byte_cnt;
- u64 x[SKEIN_256_STATE_WORDS];
- /* catch uninitialized context */
- skein_assert_ret(ctx->h.b_cnt <= SKEIN_256_BLOCK_BYTES, SKEIN_FAIL);
-
- /* tag as the final block */
- ctx->h.tweak[1] |= SKEIN_T1_FLAG_FINAL;
- /* zero pad b[] if necessary */
- if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES)
- memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
-
- /* process the final block */
- skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
-
- /* now output the result */
- /* total number of output bytes */
- byte_cnt = (ctx->h.hash_bit_len + 7) >> 3;
-
- /* run Threefish in "counter mode" to generate output */
- /* zero out b[], so it can hold the counter */
- memset(ctx->b, 0, sizeof(ctx->b));
- /* keep a local copy of counter mode "key" */
- memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i * SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
- /* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
- skein_start_new_type(ctx, OUT_FINAL);
- /* run "counter mode" */
- skein_256_process_block(ctx, ctx->b, 1, sizeof(u64));
- /* number of output bytes left to go */
- n = byte_cnt - i * SKEIN_256_BLOCK_BYTES;
- if (n >= SKEIN_256_BLOCK_BYTES)
- n = SKEIN_256_BLOCK_BYTES;
- /* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val + (i * SKEIN_256_BLOCK_BYTES),
- ctx->x, n);
- /* restore the counter mode key for next time */
- memcpy(ctx->x, x, sizeof(x));
- }
- return SKEIN_SUCCESS;
-}
-
-/*****************************************************************/
-/* 512-bit Skein */
-/*****************************************************************/
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* init the context for a straight hashing operation */
-int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len)
-{
- union {
- u8 b[SKEIN_512_STATE_BYTES];
- u64 w[SKEIN_512_STATE_WORDS];
- } cfg; /* config block */
-
- skein_assert_ret(hash_bit_len > 0, SKEIN_BAD_HASHLEN);
- ctx->h.hash_bit_len = hash_bit_len; /* output hash bit count */
-
- switch (hash_bit_len) { /* use pre-computed values, where available */
- case 512:
- memcpy(ctx->x, SKEIN_512_IV_512, sizeof(ctx->x));
- break;
- case 384:
- memcpy(ctx->x, SKEIN_512_IV_384, sizeof(ctx->x));
- break;
- case 256:
- memcpy(ctx->x, SKEIN_512_IV_256, sizeof(ctx->x));
- break;
- case 224:
- memcpy(ctx->x, SKEIN_512_IV_224, sizeof(ctx->x));
- break;
- default:
- /* here if there is no precomputed IV value available */
- /*
- * build/process the config block, type == CONFIG (could be
- * precomputed)
- */
- /* set tweaks: T0=0; T1=CFG | FINAL */
- skein_start_new_type(ctx, CFG_FINAL);
-
- /* set the schema, version */
- cfg.w[0] = skein_swap64(SKEIN_SCHEMA_VER);
- /* hash result length in bits */
- cfg.w[1] = skein_swap64(hash_bit_len);
- cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
- /* zero pad config block */
- memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
-
- /* compute the initial chaining values from config block */
- /* zero the chaining variables */
- memset(ctx->x, 0, sizeof(ctx->x));
- skein_512_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
- break;
- }
-
- /*
- * The chaining vars ctx->x are now initialized for the given
- * hash_bit_len.
- */
- /* Set up to process the data message portion of the hash (default) */
- skein_start_new_type(ctx, MSG); /* T0=0, T1= MSG type */
-
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* init the context for a MAC and/or tree hash operation */
-/*
- * [identical to skein_512_init() when key_bytes == 0 && \
- * tree_info == SKEIN_CFG_TREE_INFO_SEQUENTIAL]
- */
-int skein_512_init_ext(struct skein_512_ctx *ctx, size_t hash_bit_len,
- u64 tree_info, const u8 *key, size_t key_bytes)
-{
- union {
- u8 b[SKEIN_512_STATE_BYTES];
- u64 w[SKEIN_512_STATE_WORDS];
- } cfg; /* config block */
-
- skein_assert_ret(hash_bit_len > 0, SKEIN_BAD_HASHLEN);
- skein_assert_ret(key_bytes == 0 || key, SKEIN_FAIL);
-
- /* compute the initial chaining values ctx->x[], based on key */
- if (key_bytes == 0) { /* is there a key? */
- /* no key: use all zeroes as key for config block */
- memset(ctx->x, 0, sizeof(ctx->x));
- } else { /* here to pre-process a key */
- skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
- /* do a mini-Init right here */
- /* set output hash bit count = state size */
- ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
- /* set tweaks: T0 = 0; T1 = KEY type */
- skein_start_new_type(ctx, KEY);
- /* zero the initial chaining variables */
- memset(ctx->x, 0, sizeof(ctx->x));
- /* hash the key */
- skein_512_update(ctx, key, key_bytes);
- /* put result into cfg.b[] */
- skein_512_final_pad(ctx, cfg.b);
- /* copy over into ctx->x[] */
- memcpy(ctx->x, cfg.b, sizeof(cfg.b));
- }
- /*
- * build/process the config block, type == CONFIG (could be
- * precomputed for each key)
- */
- ctx->h.hash_bit_len = hash_bit_len; /* output hash bit count */
- skein_start_new_type(ctx, CFG_FINAL);
-
- /* pre-pad cfg.w[] with zeroes */
- memset(&cfg.w, 0, sizeof(cfg.w));
- cfg.w[0] = skein_swap64(SKEIN_SCHEMA_VER);
- /* hash result length in bits */
- cfg.w[1] = skein_swap64(hash_bit_len);
- /* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
- cfg.w[2] = skein_swap64(tree_info);
-
- /* compute the initial chaining values from config block */
- skein_512_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
-
- /* The chaining vars ctx->x are now initialized */
- /* Set up to process the data message portion of the hash (default) */
- skein_start_new_type(ctx, MSG);
-
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* process the input bytes */
-int skein_512_update(struct skein_512_ctx *ctx, const u8 *msg,
- size_t msg_byte_cnt)
-{
- size_t n;
-
- /* catch uninitialized context */
- skein_assert_ret(ctx->h.b_cnt <= SKEIN_512_BLOCK_BYTES, SKEIN_FAIL);
-
- /* process full blocks, if any */
- if (msg_byte_cnt + ctx->h.b_cnt > SKEIN_512_BLOCK_BYTES) {
- /* finish up any buffered message data */
- if (ctx->h.b_cnt) {
- /* # bytes free in buffer b[] */
- n = SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt;
- if (n) {
- /* check on our logic here */
- skein_assert(n < msg_byte_cnt);
- memcpy(&ctx->b[ctx->h.b_cnt], msg, n);
- msg_byte_cnt -= n;
- msg += n;
- ctx->h.b_cnt += n;
- }
- skein_assert(ctx->h.b_cnt == SKEIN_512_BLOCK_BYTES);
- skein_512_process_block(ctx, ctx->b, 1,
- SKEIN_512_BLOCK_BYTES);
- ctx->h.b_cnt = 0;
- }
- /*
- * now process any remaining full blocks, directly from input
- * message data
- */
- if (msg_byte_cnt > SKEIN_512_BLOCK_BYTES) {
- /* number of full blocks to process */
- n = (msg_byte_cnt - 1) / SKEIN_512_BLOCK_BYTES;
- skein_512_process_block(ctx, msg, n,
- SKEIN_512_BLOCK_BYTES);
- msg_byte_cnt -= n * SKEIN_512_BLOCK_BYTES;
- msg += n * SKEIN_512_BLOCK_BYTES;
- }
- skein_assert(ctx->h.b_cnt == 0);
- }
-
- /* copy any remaining source message data bytes into b[] */
- if (msg_byte_cnt) {
- skein_assert(msg_byte_cnt + ctx->h.b_cnt <=
- SKEIN_512_BLOCK_BYTES);
- memcpy(&ctx->b[ctx->h.b_cnt], msg, msg_byte_cnt);
- ctx->h.b_cnt += msg_byte_cnt;
- }
-
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* finalize the hash computation and output the result */
-int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val)
-{
- size_t i, n, byte_cnt;
- u64 x[SKEIN_512_STATE_WORDS];
- /* catch uninitialized context */
- skein_assert_ret(ctx->h.b_cnt <= SKEIN_512_BLOCK_BYTES, SKEIN_FAIL);
-
- /* tag as the final block */
- ctx->h.tweak[1] |= SKEIN_T1_FLAG_FINAL;
- /* zero pad b[] if necessary */
- if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES)
- memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
-
- /* process the final block */
- skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
-
- /* now output the result */
- /* total number of output bytes */
- byte_cnt = (ctx->h.hash_bit_len + 7) >> 3;
-
- /* run Threefish in "counter mode" to generate output */
- /* zero out b[], so it can hold the counter */
- memset(ctx->b, 0, sizeof(ctx->b));
- /* keep a local copy of counter mode "key" */
- memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i * SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
- /* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
- skein_start_new_type(ctx, OUT_FINAL);
- /* run "counter mode" */
- skein_512_process_block(ctx, ctx->b, 1, sizeof(u64));
- /* number of output bytes left to go */
- n = byte_cnt - i * SKEIN_512_BLOCK_BYTES;
- if (n >= SKEIN_512_BLOCK_BYTES)
- n = SKEIN_512_BLOCK_BYTES;
- /* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val + (i * SKEIN_512_BLOCK_BYTES),
- ctx->x, n);
- /* restore the counter mode key for next time */
- memcpy(ctx->x, x, sizeof(x));
- }
- return SKEIN_SUCCESS;
-}
-
-/*****************************************************************/
-/* 1024-bit Skein */
-/*****************************************************************/
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* init the context for a straight hashing operation */
-int skein_1024_init(struct skein_1024_ctx *ctx, size_t hash_bit_len)
-{
- union {
- u8 b[SKEIN_1024_STATE_BYTES];
- u64 w[SKEIN_1024_STATE_WORDS];
- } cfg; /* config block */
-
- skein_assert_ret(hash_bit_len > 0, SKEIN_BAD_HASHLEN);
- ctx->h.hash_bit_len = hash_bit_len; /* output hash bit count */
-
- switch (hash_bit_len) { /* use pre-computed values, where available */
- case 512:
- memcpy(ctx->x, SKEIN_1024_IV_512, sizeof(ctx->x));
- break;
- case 384:
- memcpy(ctx->x, SKEIN_1024_IV_384, sizeof(ctx->x));
- break;
- case 1024:
- memcpy(ctx->x, SKEIN_1024_IV_1024, sizeof(ctx->x));
- break;
- default:
- /* here if there is no precomputed IV value available */
- /*
- * build/process the config block, type == CONFIG
- * (could be precomputed)
- */
- /* set tweaks: T0=0; T1=CFG | FINAL */
- skein_start_new_type(ctx, CFG_FINAL);
-
- /* set the schema, version */
- cfg.w[0] = skein_swap64(SKEIN_SCHEMA_VER);
- /* hash result length in bits */
- cfg.w[1] = skein_swap64(hash_bit_len);
- cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
- /* zero pad config block */
- memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
-
- /* compute the initial chaining values from config block */
- /* zero the chaining variables */
- memset(ctx->x, 0, sizeof(ctx->x));
- skein_1024_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
- break;
- }
-
- /* The chaining vars ctx->x are now initialized for the hash_bit_len. */
- /* Set up to process the data message portion of the hash (default) */
- skein_start_new_type(ctx, MSG); /* T0=0, T1= MSG type */
-
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* init the context for a MAC and/or tree hash operation */
-/*
- * [identical to skein_1024_init() when key_bytes == 0 && \
- * tree_info == SKEIN_CFG_TREE_INFO_SEQUENTIAL]
- */
-int skein_1024_init_ext(struct skein_1024_ctx *ctx, size_t hash_bit_len,
- u64 tree_info, const u8 *key, size_t key_bytes)
-{
- union {
- u8 b[SKEIN_1024_STATE_BYTES];
- u64 w[SKEIN_1024_STATE_WORDS];
- } cfg; /* config block */
-
- skein_assert_ret(hash_bit_len > 0, SKEIN_BAD_HASHLEN);
- skein_assert_ret(key_bytes == 0 || key, SKEIN_FAIL);
-
- /* compute the initial chaining values ctx->x[], based on key */
- if (key_bytes == 0) { /* is there a key? */
- /* no key: use all zeroes as key for config block */
- memset(ctx->x, 0, sizeof(ctx->x));
- } else { /* here to pre-process a key */
- skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
- /* do a mini-Init right here */
- /* set output hash bit count = state size */
- ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
- /* set tweaks: T0 = 0; T1 = KEY type */
- skein_start_new_type(ctx, KEY);
- /* zero the initial chaining variables */
- memset(ctx->x, 0, sizeof(ctx->x));
- /* hash the key */
- skein_1024_update(ctx, key, key_bytes);
- /* put result into cfg.b[] */
- skein_1024_final_pad(ctx, cfg.b);
- /* copy over into ctx->x[] */
- memcpy(ctx->x, cfg.b, sizeof(cfg.b));
- }
- /*
- * build/process the config block, type == CONFIG (could be
- * precomputed for each key)
- */
- /* output hash bit count */
- ctx->h.hash_bit_len = hash_bit_len;
- skein_start_new_type(ctx, CFG_FINAL);
-
- /* pre-pad cfg.w[] with zeroes */
- memset(&cfg.w, 0, sizeof(cfg.w));
- cfg.w[0] = skein_swap64(SKEIN_SCHEMA_VER);
- /* hash result length in bits */
- cfg.w[1] = skein_swap64(hash_bit_len);
- /* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
- cfg.w[2] = skein_swap64(tree_info);
-
- /* compute the initial chaining values from config block */
- skein_1024_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
-
- /* The chaining vars ctx->x are now initialized */
- /* Set up to process the data message portion of the hash (default) */
- skein_start_new_type(ctx, MSG);
-
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* process the input bytes */
-int skein_1024_update(struct skein_1024_ctx *ctx, const u8 *msg,
- size_t msg_byte_cnt)
-{
- size_t n;
-
- /* catch uninitialized context */
- skein_assert_ret(ctx->h.b_cnt <= SKEIN_1024_BLOCK_BYTES, SKEIN_FAIL);
-
- /* process full blocks, if any */
- if (msg_byte_cnt + ctx->h.b_cnt > SKEIN_1024_BLOCK_BYTES) {
- /* finish up any buffered message data */
- if (ctx->h.b_cnt) {
- /* # bytes free in buffer b[] */
- n = SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt;
- if (n) {
- /* check on our logic here */
- skein_assert(n < msg_byte_cnt);
- memcpy(&ctx->b[ctx->h.b_cnt], msg, n);
- msg_byte_cnt -= n;
- msg += n;
- ctx->h.b_cnt += n;
- }
- skein_assert(ctx->h.b_cnt == SKEIN_1024_BLOCK_BYTES);
- skein_1024_process_block(ctx, ctx->b, 1,
- SKEIN_1024_BLOCK_BYTES);
- ctx->h.b_cnt = 0;
- }
- /*
- * now process any remaining full blocks, directly from input
- * message data
- */
- if (msg_byte_cnt > SKEIN_1024_BLOCK_BYTES) {
- /* number of full blocks to process */
- n = (msg_byte_cnt - 1) / SKEIN_1024_BLOCK_BYTES;
- skein_1024_process_block(ctx, msg, n,
- SKEIN_1024_BLOCK_BYTES);
- msg_byte_cnt -= n * SKEIN_1024_BLOCK_BYTES;
- msg += n * SKEIN_1024_BLOCK_BYTES;
- }
- skein_assert(ctx->h.b_cnt == 0);
- }
-
- /* copy any remaining source message data bytes into b[] */
- if (msg_byte_cnt) {
- skein_assert(msg_byte_cnt + ctx->h.b_cnt <=
- SKEIN_1024_BLOCK_BYTES);
- memcpy(&ctx->b[ctx->h.b_cnt], msg, msg_byte_cnt);
- ctx->h.b_cnt += msg_byte_cnt;
- }
-
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* finalize the hash computation and output the result */
-int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val)
-{
- size_t i, n, byte_cnt;
- u64 x[SKEIN_1024_STATE_WORDS];
- /* catch uninitialized context */
- skein_assert_ret(ctx->h.b_cnt <= SKEIN_1024_BLOCK_BYTES, SKEIN_FAIL);
-
- /* tag as the final block */
- ctx->h.tweak[1] |= SKEIN_T1_FLAG_FINAL;
- /* zero pad b[] if necessary */
- if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES)
- memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
-
- /* process the final block */
- skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
-
- /* now output the result */
- /* total number of output bytes */
- byte_cnt = (ctx->h.hash_bit_len + 7) >> 3;
-
- /* run Threefish in "counter mode" to generate output */
- /* zero out b[], so it can hold the counter */
- memset(ctx->b, 0, sizeof(ctx->b));
- /* keep a local copy of counter mode "key" */
- memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i * SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
- /* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
- skein_start_new_type(ctx, OUT_FINAL);
- /* run "counter mode" */
- skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64));
- /* number of output bytes left to go */
- n = byte_cnt - i * SKEIN_1024_BLOCK_BYTES;
- if (n >= SKEIN_1024_BLOCK_BYTES)
- n = SKEIN_1024_BLOCK_BYTES;
- /* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val + (i * SKEIN_1024_BLOCK_BYTES),
- ctx->x, n);
- /* restore the counter mode key for next time */
- memcpy(ctx->x, x, sizeof(x));
- }
- return SKEIN_SUCCESS;
-}
-
-/**************** Functions to support MAC/tree hashing ***************/
-/* (this code is identical for Optimized and Reference versions) */
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* finalize the hash computation and output the block, no OUTPUT stage */
-int skein_256_final_pad(struct skein_256_ctx *ctx, u8 *hash_val)
-{
- /* catch uninitialized context */
- skein_assert_ret(ctx->h.b_cnt <= SKEIN_256_BLOCK_BYTES, SKEIN_FAIL);
-
- /* tag as the final block */
- ctx->h.tweak[1] |= SKEIN_T1_FLAG_FINAL;
- /* zero pad b[] if necessary */
- if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES)
- memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
- /* process the final block */
- skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
-
- /* "output" the state bytes */
- skein_put64_lsb_first(hash_val, ctx->x, SKEIN_256_BLOCK_BYTES);
-
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* finalize the hash computation and output the block, no OUTPUT stage */
-int skein_512_final_pad(struct skein_512_ctx *ctx, u8 *hash_val)
-{
- /* catch uninitialized context */
- skein_assert_ret(ctx->h.b_cnt <= SKEIN_512_BLOCK_BYTES, SKEIN_FAIL);
-
- /* tag as the final block */
- ctx->h.tweak[1] |= SKEIN_T1_FLAG_FINAL;
- /* zero pad b[] if necessary */
- if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES)
- memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
- /* process the final block */
- skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
-
- /* "output" the state bytes */
- skein_put64_lsb_first(hash_val, ctx->x, SKEIN_512_BLOCK_BYTES);
-
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* finalize the hash computation and output the block, no OUTPUT stage */
-int skein_1024_final_pad(struct skein_1024_ctx *ctx, u8 *hash_val)
-{
- /* catch uninitialized context */
- skein_assert_ret(ctx->h.b_cnt <= SKEIN_1024_BLOCK_BYTES, SKEIN_FAIL);
-
- /* tag as the final block */
- ctx->h.tweak[1] |= SKEIN_T1_FLAG_FINAL;
- /* zero pad b[] if necessary */
- if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES)
- memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
- /* process the final block */
- skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
-
- /* "output" the state bytes */
- skein_put64_lsb_first(hash_val, ctx->x, SKEIN_1024_BLOCK_BYTES);
-
- return SKEIN_SUCCESS;
-}
-
-#if SKEIN_TREE_HASH
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* just do the OUTPUT stage */
-int skein_256_output(struct skein_256_ctx *ctx, u8 *hash_val)
-{
- size_t i, n, byte_cnt;
- u64 x[SKEIN_256_STATE_WORDS];
- /* catch uninitialized context */
- skein_assert_ret(ctx->h.b_cnt <= SKEIN_256_BLOCK_BYTES, SKEIN_FAIL);
-
- /* now output the result */
- /* total number of output bytes */
- byte_cnt = (ctx->h.hash_bit_len + 7) >> 3;
-
- /* run Threefish in "counter mode" to generate output */
- /* zero out b[], so it can hold the counter */
- memset(ctx->b, 0, sizeof(ctx->b));
- /* keep a local copy of counter mode "key" */
- memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i * SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
- /* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
- skein_start_new_type(ctx, OUT_FINAL);
- /* run "counter mode" */
- skein_256_process_block(ctx, ctx->b, 1, sizeof(u64));
- /* number of output bytes left to go */
- n = byte_cnt - i * SKEIN_256_BLOCK_BYTES;
- if (n >= SKEIN_256_BLOCK_BYTES)
- n = SKEIN_256_BLOCK_BYTES;
- /* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val + (i * SKEIN_256_BLOCK_BYTES),
- ctx->x, n);
- /* restore the counter mode key for next time */
- memcpy(ctx->x, x, sizeof(x));
- }
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* just do the OUTPUT stage */
-int skein_512_output(struct skein_512_ctx *ctx, u8 *hash_val)
-{
- size_t i, n, byte_cnt;
- u64 x[SKEIN_512_STATE_WORDS];
- /* catch uninitialized context */
- skein_assert_ret(ctx->h.b_cnt <= SKEIN_512_BLOCK_BYTES, SKEIN_FAIL);
-
- /* now output the result */
- /* total number of output bytes */
- byte_cnt = (ctx->h.hash_bit_len + 7) >> 3;
-
- /* run Threefish in "counter mode" to generate output */
- /* zero out b[], so it can hold the counter */
- memset(ctx->b, 0, sizeof(ctx->b));
- /* keep a local copy of counter mode "key" */
- memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i * SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
- /* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
- skein_start_new_type(ctx, OUT_FINAL);
- /* run "counter mode" */
- skein_512_process_block(ctx, ctx->b, 1, sizeof(u64));
- /* number of output bytes left to go */
- n = byte_cnt - i * SKEIN_512_BLOCK_BYTES;
- if (n >= SKEIN_512_BLOCK_BYTES)
- n = SKEIN_512_BLOCK_BYTES;
- /* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val + (i * SKEIN_512_BLOCK_BYTES),
- ctx->x, n);
- /* restore the counter mode key for next time */
- memcpy(ctx->x, x, sizeof(x));
- }
- return SKEIN_SUCCESS;
-}
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-/* just do the OUTPUT stage */
-int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val)
-{
- size_t i, n, byte_cnt;
- u64 x[SKEIN_1024_STATE_WORDS];
- /* catch uninitialized context */
- skein_assert_ret(ctx->h.b_cnt <= SKEIN_1024_BLOCK_BYTES, SKEIN_FAIL);
-
- /* now output the result */
- /* total number of output bytes */
- byte_cnt = (ctx->h.hash_bit_len + 7) >> 3;
-
- /* run Threefish in "counter mode" to generate output */
- /* zero out b[], so it can hold the counter */
- memset(ctx->b, 0, sizeof(ctx->b));
- /* keep a local copy of counter mode "key" */
- memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i * SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
- /* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
- skein_start_new_type(ctx, OUT_FINAL);
- /* run "counter mode" */
- skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64));
- /* number of output bytes left to go */
- n = byte_cnt - i * SKEIN_1024_BLOCK_BYTES;
- if (n >= SKEIN_1024_BLOCK_BYTES)
- n = SKEIN_1024_BLOCK_BYTES;
- /* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val + (i * SKEIN_1024_BLOCK_BYTES),
- ctx->x, n);
- /* restore the counter mode key for next time */
- memcpy(ctx->x, x, sizeof(x));
- }
- return SKEIN_SUCCESS;
-}
-#endif
diff --git a/drivers/staging/skein/skein_base.h b/drivers/staging/skein/skein_base.h
deleted file mode 100644
index cd794c1bc1bb..000000000000
--- a/drivers/staging/skein/skein_base.h
+++ /dev/null
@@ -1,336 +0,0 @@
-#ifndef _SKEIN_H_
-#define _SKEIN_H_ 1
-/*
- **************************************************************************
- *
- * Interface declarations and internal definitions for Skein hashing.
- *
- * Source code author: Doug Whiting, 2008.
- *
- * This algorithm and source code is released to the public domain.
- *
- **************************************************************************
- *
- * The following compile-time switches may be defined to control some
- * tradeoffs between speed, code size, error checking, and security.
- *
- * The "default" note explains what happens when the switch is not defined.
- *
- * SKEIN_ERR_CHECK -- how error checking is handled inside Skein
- * code. If not defined, most error checking
- * is disabled (for performance). Otherwise,
- * the switch value is interpreted as:
- * 0: use assert() to flag errors
- * 1: return SKEIN_FAIL to flag errors
- *
- **************************************************************************
- */
-
-/*Skein digest sizes for crypto api*/
-#define SKEIN256_DIGEST_BIT_SIZE 256
-#define SKEIN512_DIGEST_BIT_SIZE 512
-#define SKEIN1024_DIGEST_BIT_SIZE 1024
-
-/* below two prototype assume we are handed aligned data */
-#define skein_put64_lsb_first(dst08, src64, b_cnt) memcpy(dst08, src64, b_cnt)
-#define skein_get64_lsb_first(dst64, src08, w_cnt) \
- memcpy(dst64, src08, 8 * (w_cnt))
-#define skein_swap64(w64) (w64)
-
-enum {
- SKEIN_SUCCESS = 0, /* return codes from Skein calls */
- SKEIN_FAIL = 1,
- SKEIN_BAD_HASHLEN = 2
-};
-
-#define SKEIN_MODIFIER_WORDS 2 /* number of modifier (tweak) words */
-
-#define SKEIN_256_STATE_WORDS 4
-#define SKEIN_512_STATE_WORDS 8
-#define SKEIN_1024_STATE_WORDS 16
-#define SKEIN_MAX_STATE_WORDS 16
-
-#define SKEIN_256_STATE_BYTES (8 * SKEIN_256_STATE_WORDS)
-#define SKEIN_512_STATE_BYTES (8 * SKEIN_512_STATE_WORDS)
-#define SKEIN_1024_STATE_BYTES (8 * SKEIN_1024_STATE_WORDS)
-
-#define SKEIN_256_STATE_BITS (64 * SKEIN_256_STATE_WORDS)
-#define SKEIN_512_STATE_BITS (64 * SKEIN_512_STATE_WORDS)
-#define SKEIN_1024_STATE_BITS (64 * SKEIN_1024_STATE_WORDS)
-
-#define SKEIN_256_BLOCK_BYTES (8 * SKEIN_256_STATE_WORDS)
-#define SKEIN_512_BLOCK_BYTES (8 * SKEIN_512_STATE_WORDS)
-#define SKEIN_1024_BLOCK_BYTES (8 * SKEIN_1024_STATE_WORDS)
-
-struct skein_ctx_hdr {
- size_t hash_bit_len; /* size of hash result, in bits */
- size_t b_cnt; /* current byte count in buffer b[] */
- u64 tweak[SKEIN_MODIFIER_WORDS]; /* tweak[0]=byte cnt, tweak[1]=flags */
-};
-
-struct skein_256_ctx { /* 256-bit Skein hash context structure */
- struct skein_ctx_hdr h; /* common header context variables */
- u64 x[SKEIN_256_STATE_WORDS]; /* chaining variables */
- u8 b[SKEIN_256_BLOCK_BYTES]; /* partial block buf (8-byte aligned) */
-};
-
-struct skein_512_ctx { /* 512-bit Skein hash context structure */
- struct skein_ctx_hdr h; /* common header context variables */
- u64 x[SKEIN_512_STATE_WORDS]; /* chaining variables */
- u8 b[SKEIN_512_BLOCK_BYTES]; /* partial block buf (8-byte aligned) */
-};
-
-struct skein_1024_ctx { /* 1024-bit Skein hash context structure */
- struct skein_ctx_hdr h; /* common header context variables */
- u64 x[SKEIN_1024_STATE_WORDS]; /* chaining variables */
- u8 b[SKEIN_1024_BLOCK_BYTES]; /* partial block buf (8-byte aligned) */
-};
-
-/* Skein APIs for (incremental) "straight hashing" */
-int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len);
-int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len);
-int skein_1024_init(struct skein_1024_ctx *ctx, size_t hash_bit_len);
-
-int skein_256_update(struct skein_256_ctx *ctx, const u8 *msg,
- size_t msg_byte_cnt);
-int skein_512_update(struct skein_512_ctx *ctx, const u8 *msg,
- size_t msg_byte_cnt);
-int skein_1024_update(struct skein_1024_ctx *ctx, const u8 *msg,
- size_t msg_byte_cnt);
-
-int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val);
-int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val);
-int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val);
-
-/*
- * Skein APIs for "extended" initialization: MAC keys, tree hashing.
- * After an init_ext() call, just use update/final calls as with init().
- *
- * Notes: Same parameters as _init() calls, plus tree_info/key/key_bytes.
- * When key_bytes == 0 and tree_info == SKEIN_SEQUENTIAL,
- * the results of init_ext() are identical to calling init().
- * The function init() may be called once to "precompute" the IV for
- * a given hash_bit_len value, then by saving a copy of the context
- * the IV computation may be avoided in later calls.
- * Similarly, the function init_ext() may be called once per MAC key
- * to precompute the MAC IV, then a copy of the context saved and
- * reused for each new MAC computation.
- */
-int skein_256_init_ext(struct skein_256_ctx *ctx, size_t hash_bit_len,
- u64 tree_info, const u8 *key, size_t key_bytes);
-int skein_512_init_ext(struct skein_512_ctx *ctx, size_t hash_bit_len,
- u64 tree_info, const u8 *key, size_t key_bytes);
-int skein_1024_init_ext(struct skein_1024_ctx *ctx, size_t hash_bit_len,
- u64 tree_info, const u8 *key, size_t key_bytes);
-
-/*
- * Skein APIs for MAC and tree hash:
- * final_pad: pad, do final block, but no OUTPUT type
- * output: do just the output stage
- */
-int skein_256_final_pad(struct skein_256_ctx *ctx, u8 *hash_val);
-int skein_512_final_pad(struct skein_512_ctx *ctx, u8 *hash_val);
-int skein_1024_final_pad(struct skein_1024_ctx *ctx, u8 *hash_val);
-
-#ifndef SKEIN_TREE_HASH
-#define SKEIN_TREE_HASH (1)
-#endif
-#if SKEIN_TREE_HASH
-int skein_256_output(struct skein_256_ctx *ctx, u8 *hash_val);
-int skein_512_output(struct skein_512_ctx *ctx, u8 *hash_val);
-int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
-#endif
-
-/*
- *****************************************************************
- * "Internal" Skein definitions
- * -- not needed for sequential hashing API, but will be
- * helpful for other uses of Skein (e.g., tree hash mode).
- * -- included here so that they can be shared between
- * reference and optimized code.
- *****************************************************************
- */
-
-/* tweak word tweak[1]: bit field starting positions */
-#define SKEIN_T1_BIT(BIT) ((BIT) - 64) /* second word */
-
-#define SKEIN_T1_POS_TREE_LVL SKEIN_T1_BIT(112) /* 112..118 hash tree level */
-#define SKEIN_T1_POS_BIT_PAD SKEIN_T1_BIT(119) /* 119 part. final in byte */
-#define SKEIN_T1_POS_BLK_TYPE SKEIN_T1_BIT(120) /* 120..125 type field `*/
-#define SKEIN_T1_POS_FIRST SKEIN_T1_BIT(126) /* 126 first blk flag */
-#define SKEIN_T1_POS_FINAL SKEIN_T1_BIT(127) /* 127 final blk flag */
-
-/* tweak word tweak[1]: flag bit definition(s) */
-#define SKEIN_T1_FLAG_FIRST (((u64)1) << SKEIN_T1_POS_FIRST)
-#define SKEIN_T1_FLAG_FINAL (((u64)1) << SKEIN_T1_POS_FINAL)
-#define SKEIN_T1_FLAG_BIT_PAD (((u64)1) << SKEIN_T1_POS_BIT_PAD)
-
-/* tweak word tweak[1]: tree level bit field mask */
-#define SKEIN_T1_TREE_LVL_MASK (((u64)0x7F) << SKEIN_T1_POS_TREE_LVL)
-#define SKEIN_T1_TREE_LEVEL(n) (((u64)(n)) << SKEIN_T1_POS_TREE_LVL)
-
-/* tweak word tweak[1]: block type field */
-#define SKEIN_BLK_TYPE_KEY (0) /* key, for MAC and KDF */
-#define SKEIN_BLK_TYPE_CFG (4) /* configuration block */
-#define SKEIN_BLK_TYPE_PERS (8) /* personalization string */
-#define SKEIN_BLK_TYPE_PK (12) /* pubkey (for digital sigs) */
-#define SKEIN_BLK_TYPE_KDF (16) /* key identifier for KDF */
-#define SKEIN_BLK_TYPE_NONCE (20) /* nonce for PRNG */
-#define SKEIN_BLK_TYPE_MSG (48) /* message processing */
-#define SKEIN_BLK_TYPE_OUT (63) /* output stage */
-#define SKEIN_BLK_TYPE_MASK (63) /* bit field mask */
-
-#define SKEIN_T1_BLK_TYPE(T) (((u64)(SKEIN_BLK_TYPE_##T)) << \
- SKEIN_T1_POS_BLK_TYPE)
-#define SKEIN_T1_BLK_TYPE_KEY SKEIN_T1_BLK_TYPE(KEY) /* for MAC and KDF */
-#define SKEIN_T1_BLK_TYPE_CFG SKEIN_T1_BLK_TYPE(CFG) /* config block */
-#define SKEIN_T1_BLK_TYPE_PERS SKEIN_T1_BLK_TYPE(PERS) /* personalization */
-#define SKEIN_T1_BLK_TYPE_PK SKEIN_T1_BLK_TYPE(PK) /* pubkey (for sigs) */
-#define SKEIN_T1_BLK_TYPE_KDF SKEIN_T1_BLK_TYPE(KDF) /* key ident for KDF */
-#define SKEIN_T1_BLK_TYPE_NONCE SKEIN_T1_BLK_TYPE(NONCE)/* nonce for PRNG */
-#define SKEIN_T1_BLK_TYPE_MSG SKEIN_T1_BLK_TYPE(MSG) /* message processing */
-#define SKEIN_T1_BLK_TYPE_OUT SKEIN_T1_BLK_TYPE(OUT) /* output stage */
-#define SKEIN_T1_BLK_TYPE_MASK SKEIN_T1_BLK_TYPE(MASK) /* field bit mask */
-
-#define SKEIN_T1_BLK_TYPE_CFG_FINAL (SKEIN_T1_BLK_TYPE_CFG | \
- SKEIN_T1_FLAG_FINAL)
-#define SKEIN_T1_BLK_TYPE_OUT_FINAL (SKEIN_T1_BLK_TYPE_OUT | \
- SKEIN_T1_FLAG_FINAL)
-
-#define SKEIN_VERSION (1)
-
-#ifndef SKEIN_ID_STRING_LE /* allow compile-time personalization */
-#define SKEIN_ID_STRING_LE (0x33414853) /* "SHA3" (little-endian)*/
-#endif
-
-#define SKEIN_MK_64(hi32, lo32) ((lo32) + (((u64)(hi32)) << 32))
-#define SKEIN_SCHEMA_VER SKEIN_MK_64(SKEIN_VERSION, SKEIN_ID_STRING_LE)
-#define SKEIN_KS_PARITY SKEIN_MK_64(0x1BD11BDA, 0xA9FC1A22)
-
-#define SKEIN_CFG_STR_LEN (4 * 8)
-
-/* bit field definitions in config block tree_info word */
-#define SKEIN_CFG_TREE_LEAF_SIZE_POS (0)
-#define SKEIN_CFG_TREE_NODE_SIZE_POS (8)
-#define SKEIN_CFG_TREE_MAX_LEVEL_POS (16)
-
-#define SKEIN_CFG_TREE_LEAF_SIZE_MSK (((u64)0xFF) << \
- SKEIN_CFG_TREE_LEAF_SIZE_POS)
-#define SKEIN_CFG_TREE_NODE_SIZE_MSK (((u64)0xFF) << \
- SKEIN_CFG_TREE_NODE_SIZE_POS)
-#define SKEIN_CFG_TREE_MAX_LEVEL_MSK (((u64)0xFF) << \
- SKEIN_CFG_TREE_MAX_LEVEL_POS)
-
-#define SKEIN_CFG_TREE_INFO(leaf, node, max_lvl) \
- ((((u64)(leaf)) << SKEIN_CFG_TREE_LEAF_SIZE_POS) | \
- (((u64)(node)) << SKEIN_CFG_TREE_NODE_SIZE_POS) | \
- (((u64)(max_lvl)) << SKEIN_CFG_TREE_MAX_LEVEL_POS))
-
-/* use as tree_info in InitExt() call for sequential processing */
-#define SKEIN_CFG_TREE_INFO_SEQUENTIAL SKEIN_CFG_TREE_INFO(0, 0, 0)
-
-/*
- * Skein macros for getting/setting tweak words, etc.
- * These are useful for partial input bytes, hash tree init/update, etc.
- */
-#define skein_get_tweak(ctx_ptr, TWK_NUM) ((ctx_ptr)->h.tweak[TWK_NUM])
-#define skein_set_tweak(ctx_ptr, TWK_NUM, t_val) { \
- (ctx_ptr)->h.tweak[TWK_NUM] = (t_val); \
- }
-
-#define skein_get_T0(ctx_ptr) skein_get_tweak(ctx_ptr, 0)
-#define skein_get_T1(ctx_ptr) skein_get_tweak(ctx_ptr, 1)
-#define skein_set_T0(ctx_ptr, T0) skein_set_tweak(ctx_ptr, 0, T0)
-#define skein_set_T1(ctx_ptr, T1) skein_set_tweak(ctx_ptr, 1, T1)
-
-/* set both tweak words at once */
-#define skein_set_T0_T1(ctx_ptr, T0, T1) \
- { \
- skein_set_T0(ctx_ptr, (T0)); \
- skein_set_T1(ctx_ptr, (T1)); \
- }
-
-#define skein_set_type(ctx_ptr, BLK_TYPE) \
- skein_set_T1(ctx_ptr, SKEIN_T1_BLK_TYPE_##BLK_TYPE)
-
-/*
- * setup for starting with a new type:
- * h.tweak[0]=0; h.tweak[1] = NEW_TYPE; h.b_cnt=0;
- */
-#define skein_start_new_type(ctx_ptr, BLK_TYPE) { \
- skein_set_T0_T1(ctx_ptr, 0, SKEIN_T1_FLAG_FIRST | \
- SKEIN_T1_BLK_TYPE_##BLK_TYPE); \
- (ctx_ptr)->h.b_cnt = 0; \
- }
-
-#define skein_clear_first_flag(hdr) { \
- (hdr).tweak[1] &= ~SKEIN_T1_FLAG_FIRST; \
- }
-#define skein_set_bit_pad_flag(hdr) { \
- (hdr).tweak[1] |= SKEIN_T1_FLAG_BIT_PAD; \
- }
-
-#define skein_set_tree_level(hdr, height) { \
- (hdr).tweak[1] |= SKEIN_T1_TREE_LEVEL(height); \
- }
-
-/* ignore all asserts, for performance */
-#define skein_assert_ret(x, ret_code)
-#define skein_assert(x)
-
-/*
- *****************************************************************
- * Skein block function constants (shared across Ref and Opt code)
- *****************************************************************
- */
-enum {
- /* SKEIN_256 round rotation constants */
- R_256_0_0 = 14, R_256_0_1 = 16,
- R_256_1_0 = 52, R_256_1_1 = 57,
- R_256_2_0 = 23, R_256_2_1 = 40,
- R_256_3_0 = 5, R_256_3_1 = 37,
- R_256_4_0 = 25, R_256_4_1 = 33,
- R_256_5_0 = 46, R_256_5_1 = 12,
- R_256_6_0 = 58, R_256_6_1 = 22,
- R_256_7_0 = 32, R_256_7_1 = 32,
-
- /* SKEIN_512 round rotation constants */
- R_512_0_0 = 46, R_512_0_1 = 36, R_512_0_2 = 19, R_512_0_3 = 37,
- R_512_1_0 = 33, R_512_1_1 = 27, R_512_1_2 = 14, R_512_1_3 = 42,
- R_512_2_0 = 17, R_512_2_1 = 49, R_512_2_2 = 36, R_512_2_3 = 39,
- R_512_3_0 = 44, R_512_3_1 = 9, R_512_3_2 = 54, R_512_3_3 = 56,
- R_512_4_0 = 39, R_512_4_1 = 30, R_512_4_2 = 34, R_512_4_3 = 24,
- R_512_5_0 = 13, R_512_5_1 = 50, R_512_5_2 = 10, R_512_5_3 = 17,
- R_512_6_0 = 25, R_512_6_1 = 29, R_512_6_2 = 39, R_512_6_3 = 43,
- R_512_7_0 = 8, R_512_7_1 = 35, R_512_7_2 = 56, R_512_7_3 = 22,
-
- /* SKEIN_1024 round rotation constants */
- R1024_0_0 = 24, R1024_0_1 = 13, R1024_0_2 = 8, R1024_0_3 = 47,
- R1024_0_4 = 8, R1024_0_5 = 17, R1024_0_6 = 22, R1024_0_7 = 37,
- R1024_1_0 = 38, R1024_1_1 = 19, R1024_1_2 = 10, R1024_1_3 = 55,
- R1024_1_4 = 49, R1024_1_5 = 18, R1024_1_6 = 23, R1024_1_7 = 52,
- R1024_2_0 = 33, R1024_2_1 = 4, R1024_2_2 = 51, R1024_2_3 = 13,
- R1024_2_4 = 34, R1024_2_5 = 41, R1024_2_6 = 59, R1024_2_7 = 17,
- R1024_3_0 = 5, R1024_3_1 = 20, R1024_3_2 = 48, R1024_3_3 = 41,
- R1024_3_4 = 47, R1024_3_5 = 28, R1024_3_6 = 16, R1024_3_7 = 25,
- R1024_4_0 = 41, R1024_4_1 = 9, R1024_4_2 = 37, R1024_4_3 = 31,
- R1024_4_4 = 12, R1024_4_5 = 47, R1024_4_6 = 44, R1024_4_7 = 30,
- R1024_5_0 = 16, R1024_5_1 = 34, R1024_5_2 = 56, R1024_5_3 = 51,
- R1024_5_4 = 4, R1024_5_5 = 53, R1024_5_6 = 42, R1024_5_7 = 41,
- R1024_6_0 = 31, R1024_6_1 = 44, R1024_6_2 = 47, R1024_6_3 = 46,
- R1024_6_4 = 19, R1024_6_5 = 42, R1024_6_6 = 44, R1024_6_7 = 25,
- R1024_7_0 = 9, R1024_7_1 = 48, R1024_7_2 = 35, R1024_7_3 = 52,
- R1024_7_4 = 23, R1024_7_5 = 31, R1024_7_6 = 37, R1024_7_7 = 20
-};
-
-#ifndef SKEIN_ROUNDS
-#define SKEIN_256_ROUNDS_TOTAL (72) /* # rounds for diff block sizes */
-#define SKEIN_512_ROUNDS_TOTAL (72)
-#define SKEIN_1024_ROUNDS_TOTAL (80)
-#else /* allow command-line define in range 8*(5..14) */
-#define SKEIN_256_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS / 100) + 5) % 10) + 5))
-#define SKEIN_512_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS / 10) + 5) % 10) + 5))
-#define SKEIN_1024_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS) + 5) % 10) + 5))
-#endif
-
-#endif /* ifndef _SKEIN_H_ */
diff --git a/drivers/staging/skein/skein_block.c b/drivers/staging/skein/skein_block.c
deleted file mode 100644
index 3bc25e149034..000000000000
--- a/drivers/staging/skein/skein_block.c
+++ /dev/null
@@ -1,469 +0,0 @@
-/*
- ***********************************************************************
- *
- * Implementation of the Skein block functions.
- *
- * Source code author: Doug Whiting, 2008.
- *
- * This algorithm and source code is released to the public domain.
- *
- * Compile-time switches:
- *
- * SKEIN_USE_ASM -- set bits (256/512/1024) to select which
- * versions use ASM code for block processing
- * [default: use C for all block sizes]
- *
- ***********************************************************************
- */
-
-#include <linux/string.h>
-#include <linux/bitops.h>
-#include "skein_base.h"
-#include "skein_block.h"
-
-/***************************** SKEIN_256 ******************************/
-#if !(SKEIN_USE_ASM & 256)
-void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
- size_t blk_cnt, size_t byte_cnt_add)
-{ /* do it in C */
- enum {
- WCNT = SKEIN_256_STATE_WORDS
- };
- size_t r;
-#if SKEIN_UNROLL_256
- /* key schedule: chaining vars + tweak + "rot"*/
- u64 kw[WCNT + 4 + (RCNT * 2)];
-#else
- /* key schedule words : chaining vars + tweak */
- u64 kw[WCNT + 4];
-#endif
- u64 X0, X1, X2, X3; /* local copy of context vars, for speed */
- u64 w[WCNT]; /* local copy of input block */
-#ifdef SKEIN_DEBUG
- const u64 *X_ptr[4]; /* use for debugging (help cc put Xn in regs) */
-
- X_ptr[0] = &X0;
- X_ptr[1] = &X1;
- X_ptr[2] = &X2;
- X_ptr[3] = &X3;
-#endif
- skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
- ts[0] = ctx->h.tweak[0];
- ts[1] = ctx->h.tweak[1];
- do {
- /*
- * this implementation only supports 2**64 input bytes
- * (no carry out here)
- */
- ts[0] += byte_cnt_add; /* update processed length */
-
- /* precompute the key schedule for this block */
- ks[0] = ctx->x[0];
- ks[1] = ctx->x[1];
- ks[2] = ctx->x[2];
- ks[3] = ctx->x[3];
- ks[4] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ SKEIN_KS_PARITY;
-
- ts[2] = ts[0] ^ ts[1];
-
- /* get input block in little-endian format */
- skein_get64_lsb_first(w, blk_ptr, WCNT);
- debug_save_tweak(ctx);
-
- /* do the first full key injection */
- X0 = w[0] + ks[0];
- X1 = w[1] + ks[1] + ts[0];
- X2 = w[2] + ks[2] + ts[1];
- X3 = w[3] + ks[3];
-
- blk_ptr += SKEIN_256_BLOCK_BYTES;
-
- /* run the rounds */
- for (r = 1;
- r < (SKEIN_UNROLL_256 ? 2 * RCNT : 2);
- r += (SKEIN_UNROLL_256 ? 2 * SKEIN_UNROLL_256 : 1)) {
- R256_8_ROUNDS(0);
-#if R256_UNROLL_R(1)
- R256_8_ROUNDS(1);
-#endif
-#if R256_UNROLL_R(2)
- R256_8_ROUNDS(2);
-#endif
-#if R256_UNROLL_R(3)
- R256_8_ROUNDS(3);
-#endif
-#if R256_UNROLL_R(4)
- R256_8_ROUNDS(4);
-#endif
-#if R256_UNROLL_R(5)
- R256_8_ROUNDS(5);
-#endif
-#if R256_UNROLL_R(6)
- R256_8_ROUNDS(6);
-#endif
-#if R256_UNROLL_R(7)
- R256_8_ROUNDS(7);
-#endif
-#if R256_UNROLL_R(8)
- R256_8_ROUNDS(8);
-#endif
-#if R256_UNROLL_R(9)
- R256_8_ROUNDS(9);
-#endif
-#if R256_UNROLL_R(10)
- R256_8_ROUNDS(10);
-#endif
-#if R256_UNROLL_R(11)
- R256_8_ROUNDS(11);
-#endif
-#if R256_UNROLL_R(12)
- R256_8_ROUNDS(12);
-#endif
-#if R256_UNROLL_R(13)
- R256_8_ROUNDS(13);
-#endif
-#if R256_UNROLL_R(14)
- R256_8_ROUNDS(14);
-#endif
- }
- /* do the final "feedforward" xor, update context chaining */
- ctx->x[0] = X0 ^ w[0];
- ctx->x[1] = X1 ^ w[1];
- ctx->x[2] = X2 ^ w[2];
- ctx->x[3] = X3 ^ w[3];
-
- ts[1] &= ~SKEIN_T1_FLAG_FIRST;
- } while (--blk_cnt);
- ctx->h.tweak[0] = ts[0];
- ctx->h.tweak[1] = ts[1];
-}
-
-#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
-size_t skein_256_process_block_code_size(void)
-{
- return ((u8 *)skein_256_process_block_code_size) -
- ((u8 *)skein_256_process_block);
-}
-
-unsigned int skein_256_unroll_cnt(void)
-{
- return SKEIN_UNROLL_256;
-}
-#endif
-#endif
-
-/***************************** SKEIN_512 ******************************/
-#if !(SKEIN_USE_ASM & 512)
-void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
- size_t blk_cnt, size_t byte_cnt_add)
-{ /* do it in C */
- enum {
- WCNT = SKEIN_512_STATE_WORDS
- };
- size_t r;
-#if SKEIN_UNROLL_512
- /* key sched: chaining vars + tweak + "rot"*/
- u64 kw[WCNT + 4 + RCNT * 2];
-#else
- /* key schedule words : chaining vars + tweak */
- u64 kw[WCNT + 4];
-#endif
- u64 X0, X1, X2, X3, X4, X5, X6, X7; /* local copies, for speed */
- u64 w[WCNT]; /* local copy of input block */
-#ifdef SKEIN_DEBUG
- const u64 *X_ptr[8]; /* use for debugging (help cc put Xn in regs) */
-
- X_ptr[0] = &X0;
- X_ptr[1] = &X1;
- X_ptr[2] = &X2;
- X_ptr[3] = &X3;
- X_ptr[4] = &X4;
- X_ptr[5] = &X5;
- X_ptr[6] = &X6;
- X_ptr[7] = &X7;
-#endif
-
- skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
- ts[0] = ctx->h.tweak[0];
- ts[1] = ctx->h.tweak[1];
- do {
- /*
- * this implementation only supports 2**64 input bytes
- * (no carry out here)
- */
- ts[0] += byte_cnt_add; /* update processed length */
-
- /* precompute the key schedule for this block */
- ks[0] = ctx->x[0];
- ks[1] = ctx->x[1];
- ks[2] = ctx->x[2];
- ks[3] = ctx->x[3];
- ks[4] = ctx->x[4];
- ks[5] = ctx->x[5];
- ks[6] = ctx->x[6];
- ks[7] = ctx->x[7];
- ks[8] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^
- ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^ SKEIN_KS_PARITY;
-
- ts[2] = ts[0] ^ ts[1];
-
- /* get input block in little-endian format */
- skein_get64_lsb_first(w, blk_ptr, WCNT);
- debug_save_tweak(ctx);
-
- /* do the first full key injection */
- X0 = w[0] + ks[0];
- X1 = w[1] + ks[1];
- X2 = w[2] + ks[2];
- X3 = w[3] + ks[3];
- X4 = w[4] + ks[4];
- X5 = w[5] + ks[5] + ts[0];
- X6 = w[6] + ks[6] + ts[1];
- X7 = w[7] + ks[7];
-
- blk_ptr += SKEIN_512_BLOCK_BYTES;
-
- /* run the rounds */
- for (r = 1;
- r < (SKEIN_UNROLL_512 ? 2 * RCNT : 2);
- r += (SKEIN_UNROLL_512 ? 2 * SKEIN_UNROLL_512 : 1)) {
- R512_8_ROUNDS(0);
-
-#if R512_UNROLL_R(1)
- R512_8_ROUNDS(1);
-#endif
-#if R512_UNROLL_R(2)
- R512_8_ROUNDS(2);
-#endif
-#if R512_UNROLL_R(3)
- R512_8_ROUNDS(3);
-#endif
-#if R512_UNROLL_R(4)
- R512_8_ROUNDS(4);
-#endif
-#if R512_UNROLL_R(5)
- R512_8_ROUNDS(5);
-#endif
-#if R512_UNROLL_R(6)
- R512_8_ROUNDS(6);
-#endif
-#if R512_UNROLL_R(7)
- R512_8_ROUNDS(7);
-#endif
-#if R512_UNROLL_R(8)
- R512_8_ROUNDS(8);
-#endif
-#if R512_UNROLL_R(9)
- R512_8_ROUNDS(9);
-#endif
-#if R512_UNROLL_R(10)
- R512_8_ROUNDS(10);
-#endif
-#if R512_UNROLL_R(11)
- R512_8_ROUNDS(11);
-#endif
-#if R512_UNROLL_R(12)
- R512_8_ROUNDS(12);
-#endif
-#if R512_UNROLL_R(13)
- R512_8_ROUNDS(13);
-#endif
-#if R512_UNROLL_R(14)
- R512_8_ROUNDS(14);
-#endif
- }
-
- /* do the final "feedforward" xor, update context chaining */
- ctx->x[0] = X0 ^ w[0];
- ctx->x[1] = X1 ^ w[1];
- ctx->x[2] = X2 ^ w[2];
- ctx->x[3] = X3 ^ w[3];
- ctx->x[4] = X4 ^ w[4];
- ctx->x[5] = X5 ^ w[5];
- ctx->x[6] = X6 ^ w[6];
- ctx->x[7] = X7 ^ w[7];
-
- ts[1] &= ~SKEIN_T1_FLAG_FIRST;
- } while (--blk_cnt);
- ctx->h.tweak[0] = ts[0];
- ctx->h.tweak[1] = ts[1];
-}
-
-#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
-size_t skein_512_process_block_code_size(void)
-{
- return ((u8 *)skein_512_process_block_code_size) -
- ((u8 *)skein_512_process_block);
-}
-
-unsigned int skein_512_unroll_cnt(void)
-{
- return SKEIN_UNROLL_512;
-}
-#endif
-#endif
-
-/***************************** SKEIN_1024 ******************************/
-#if !(SKEIN_USE_ASM & 1024)
-void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
- size_t blk_cnt, size_t byte_cnt_add)
-{ /* do it in C, always looping (unrolled is bigger AND slower!) */
- enum {
- WCNT = SKEIN_1024_STATE_WORDS
- };
- size_t r;
-#if (SKEIN_UNROLL_1024 != 0)
- /* key sched: chaining vars + tweak + "rot" */
- u64 kw[WCNT + 4 + (RCNT * 2)];
-#else
- /* key schedule words : chaining vars + tweak */
- u64 kw[WCNT + 4];
-#endif
-
- /* local copy of vars, for speed */
- u64 X00, X01, X02, X03, X04, X05, X06, X07,
- X08, X09, X10, X11, X12, X13, X14, X15;
- u64 w[WCNT]; /* local copy of input block */
-
- skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
- ts[0] = ctx->h.tweak[0];
- ts[1] = ctx->h.tweak[1];
- do {
- /*
- * this implementation only supports 2**64 input bytes
- * (no carry out here)
- */
- ts[0] += byte_cnt_add; /* update processed length */
-
- /* precompute the key schedule for this block */
- ks[0] = ctx->x[0];
- ks[1] = ctx->x[1];
- ks[2] = ctx->x[2];
- ks[3] = ctx->x[3];
- ks[4] = ctx->x[4];
- ks[5] = ctx->x[5];
- ks[6] = ctx->x[6];
- ks[7] = ctx->x[7];
- ks[8] = ctx->x[8];
- ks[9] = ctx->x[9];
- ks[10] = ctx->x[10];
- ks[11] = ctx->x[11];
- ks[12] = ctx->x[12];
- ks[13] = ctx->x[13];
- ks[14] = ctx->x[14];
- ks[15] = ctx->x[15];
- ks[16] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^
- ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^
- ks[8] ^ ks[9] ^ ks[10] ^ ks[11] ^
- ks[12] ^ ks[13] ^ ks[14] ^ ks[15] ^ SKEIN_KS_PARITY;
-
- ts[2] = ts[0] ^ ts[1];
-
- /* get input block in little-endian format */
- skein_get64_lsb_first(w, blk_ptr, WCNT);
- debug_save_tweak(ctx);
-
- /* do the first full key injection */
- X00 = w[0] + ks[0];
- X01 = w[1] + ks[1];
- X02 = w[2] + ks[2];
- X03 = w[3] + ks[3];
- X04 = w[4] + ks[4];
- X05 = w[5] + ks[5];
- X06 = w[6] + ks[6];
- X07 = w[7] + ks[7];
- X08 = w[8] + ks[8];
- X09 = w[9] + ks[9];
- X10 = w[10] + ks[10];
- X11 = w[11] + ks[11];
- X12 = w[12] + ks[12];
- X13 = w[13] + ks[13] + ts[0];
- X14 = w[14] + ks[14] + ts[1];
- X15 = w[15] + ks[15];
-
- for (r = 1;
- r < (SKEIN_UNROLL_1024 ? 2 * RCNT : 2);
- r += (SKEIN_UNROLL_1024 ? 2 * SKEIN_UNROLL_1024 : 1)) {
- R1024_8_ROUNDS(0);
-#if R1024_UNROLL_R(1)
- R1024_8_ROUNDS(1);
-#endif
-#if R1024_UNROLL_R(2)
- R1024_8_ROUNDS(2);
-#endif
-#if R1024_UNROLL_R(3)
- R1024_8_ROUNDS(3);
-#endif
-#if R1024_UNROLL_R(4)
- R1024_8_ROUNDS(4);
-#endif
-#if R1024_UNROLL_R(5)
- R1024_8_ROUNDS(5);
-#endif
-#if R1024_UNROLL_R(6)
- R1024_8_ROUNDS(6);
-#endif
-#if R1024_UNROLL_R(7)
- R1024_8_ROUNDS(7);
-#endif
-#if R1024_UNROLL_R(8)
- R1024_8_ROUNDS(8);
-#endif
-#if R1024_UNROLL_R(9)
- R1024_8_ROUNDS(9);
-#endif
-#if R1024_UNROLL_R(10)
- R1024_8_ROUNDS(10);
-#endif
-#if R1024_UNROLL_R(11)
- R1024_8_ROUNDS(11);
-#endif
-#if R1024_UNROLL_R(12)
- R1024_8_ROUNDS(12);
-#endif
-#if R1024_UNROLL_R(13)
- R1024_8_ROUNDS(13);
-#endif
-#if R1024_UNROLL_R(14)
- R1024_8_ROUNDS(14);
-#endif
- }
- /* do the final "feedforward" xor, update context chaining */
-
- ctx->x[0] = X00 ^ w[0];
- ctx->x[1] = X01 ^ w[1];
- ctx->x[2] = X02 ^ w[2];
- ctx->x[3] = X03 ^ w[3];
- ctx->x[4] = X04 ^ w[4];
- ctx->x[5] = X05 ^ w[5];
- ctx->x[6] = X06 ^ w[6];
- ctx->x[7] = X07 ^ w[7];
- ctx->x[8] = X08 ^ w[8];
- ctx->x[9] = X09 ^ w[9];
- ctx->x[10] = X10 ^ w[10];
- ctx->x[11] = X11 ^ w[11];
- ctx->x[12] = X12 ^ w[12];
- ctx->x[13] = X13 ^ w[13];
- ctx->x[14] = X14 ^ w[14];
- ctx->x[15] = X15 ^ w[15];
-
- ts[1] &= ~SKEIN_T1_FLAG_FIRST;
- blk_ptr += SKEIN_1024_BLOCK_BYTES;
- } while (--blk_cnt);
- ctx->h.tweak[0] = ts[0];
- ctx->h.tweak[1] = ts[1];
-}
-
-#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
-size_t skein_1024_process_block_code_size(void)
-{
- return ((u8 *)skein_1024_process_block_code_size) -
- ((u8 *)skein_1024_process_block);
-}
-
-unsigned int skein_1024_unroll_cnt(void)
-{
- return SKEIN_UNROLL_1024;
-}
-#endif
-#endif
diff --git a/drivers/staging/skein/skein_block.h b/drivers/staging/skein/skein_block.h
deleted file mode 100644
index b3bb3d24273b..000000000000
--- a/drivers/staging/skein/skein_block.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- ***********************************************************************
- *
- * Implementation of the Skein hash function.
- *
- * Source code author: Doug Whiting, 2008.
- *
- * This algorithm and source code is released to the public domain.
- *
- ***********************************************************************
- */
-#ifndef _SKEIN_BLOCK_H_
-#define _SKEIN_BLOCK_H_
-
-#include "skein_base.h" /* get the Skein API definitions */
-
-#ifndef SKEIN_USE_ASM
-#define SKEIN_USE_ASM (0) /* default is all C code (no ASM) */
-#endif
-
-#ifndef SKEIN_LOOP
-#define SKEIN_LOOP 001 /* default: unroll 256 and 512, but not 1024 */
-#endif
-
-#define BLK_BITS (WCNT * 64) /* some useful definitions for code here */
-#define KW_TWK_BASE (0)
-#define KW_KEY_BASE (3)
-#define ks (kw + KW_KEY_BASE)
-#define ts (kw + KW_TWK_BASE)
-
-#ifdef SKEIN_DEBUG
-#define debug_save_tweak(ctx) \
-{ \
- ctx->h.tweak[0] = ts[0]; \
- ctx->h.tweak[1] = ts[1]; \
-}
-#else
-#define debug_save_tweak(ctx)
-#endif
-
-#if !(SKEIN_USE_ASM & 256)
-#undef RCNT
-#define RCNT (SKEIN_256_ROUNDS_TOTAL / 8)
-#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
-#define SKEIN_UNROLL_256 (((SKEIN_LOOP) / 100) % 10)
-#else
-#define SKEIN_UNROLL_256 (0)
-#endif
-
-#if SKEIN_UNROLL_256
-#if (RCNT % SKEIN_UNROLL_256)
-#error "Invalid SKEIN_UNROLL_256" /* sanity check on unroll count */
-#endif
-#endif
-#define ROUND256(p0, p1, p2, p3, ROT, r_num) \
- do { \
- X##p0 += X##p1; \
- X##p1 = rol64(X##p1, ROT##_0); \
- X##p1 ^= X##p0; \
- X##p2 += X##p3; \
- X##p3 = rol64(X##p3, ROT##_1); \
- X##p3 ^= X##p2; \
- } while (0)
-
-#if SKEIN_UNROLL_256 == 0
-#define R256(p0, p1, p2, p3, ROT, r_num) /* fully unrolled */ \
- ROUND256(p0, p1, p2, p3, ROT, r_num)
-
-#define I256(R) \
- do { \
- /* inject the key schedule value */ \
- X0 += ks[((R) + 1) % 5]; \
- X1 += ks[((R) + 2) % 5] + ts[((R) + 1) % 3]; \
- X2 += ks[((R) + 3) % 5] + ts[((R) + 2) % 3]; \
- X3 += ks[((R) + 4) % 5] + (R) + 1; \
- } while (0)
-#else
-/* looping version */
-#define R256(p0, p1, p2, p3, ROT, r_num) ROUND256(p0, p1, p2, p3, ROT, r_num)
-
-#define I256(R) \
- do { \
- /* inject the key schedule value */ \
- X0 += ks[r + (R) + 0]; \
- X1 += ks[r + (R) + 1] + ts[r + (R) + 0];\
- X2 += ks[r + (R) + 2] + ts[r + (R) + 1];\
- X3 += ks[r + (R) + 3] + r + (R); \
- /* rotate key schedule */ \
- ks[r + (R) + 4] = ks[r + (R) - 1]; \
- ts[r + (R) + 2] = ts[r + (R) - 1]; \
- } while (0)
-#endif
-#define R256_8_ROUNDS(R) \
- do { \
- R256(0, 1, 2, 3, R_256_0, 8 * (R) + 1); \
- R256(0, 3, 2, 1, R_256_1, 8 * (R) + 2); \
- R256(0, 1, 2, 3, R_256_2, 8 * (R) + 3); \
- R256(0, 3, 2, 1, R_256_3, 8 * (R) + 4); \
- I256(2 * (R)); \
- R256(0, 1, 2, 3, R_256_4, 8 * (R) + 5); \
- R256(0, 3, 2, 1, R_256_5, 8 * (R) + 6); \
- R256(0, 1, 2, 3, R_256_6, 8 * (R) + 7); \
- R256(0, 3, 2, 1, R_256_7, 8 * (R) + 8); \
- I256(2 * (R) + 1); \
- } while (0)
-
-#define R256_UNROLL_R(NN) \
- ((SKEIN_UNROLL_256 == 0 && \
- SKEIN_256_ROUNDS_TOTAL / 8 > (NN)) || \
- (SKEIN_UNROLL_256 > (NN)))
-
-#if (SKEIN_UNROLL_256 > 14)
-#error "need more unrolling in skein_256_process_block"
-#endif
-#endif
-
-#if !(SKEIN_USE_ASM & 512)
-#undef RCNT
-#define RCNT (SKEIN_512_ROUNDS_TOTAL / 8)
-
-#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
-#define SKEIN_UNROLL_512 (((SKEIN_LOOP) / 10) % 10)
-#else
-#define SKEIN_UNROLL_512 (0)
-#endif
-
-#if SKEIN_UNROLL_512
-#if (RCNT % SKEIN_UNROLL_512)
-#error "Invalid SKEIN_UNROLL_512" /* sanity check on unroll count */
-#endif
-#endif
-#define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
- do { \
- X##p0 += X##p1; \
- X##p1 = rol64(X##p1, ROT##_0); \
- X##p1 ^= X##p0; \
- X##p2 += X##p3; \
- X##p3 = rol64(X##p3, ROT##_1); \
- X##p3 ^= X##p2; \
- X##p4 += X##p5; \
- X##p5 = rol64(X##p5, ROT##_2); \
- X##p5 ^= X##p4; \
- X##p6 += X##p7; \
- X##p7 = rol64(X##p7, ROT##_3); \
- X##p7 ^= X##p6; \
- } while (0)
-
-#if SKEIN_UNROLL_512 == 0
-#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) /* unrolled */ \
- ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num)
-
-#define I512(R) \
- do { \
- /* inject the key schedule value */ \
- X0 += ks[((R) + 1) % 9]; \
- X1 += ks[((R) + 2) % 9]; \
- X2 += ks[((R) + 3) % 9]; \
- X3 += ks[((R) + 4) % 9]; \
- X4 += ks[((R) + 5) % 9]; \
- X5 += ks[((R) + 6) % 9] + ts[((R) + 1) % 3]; \
- X6 += ks[((R) + 7) % 9] + ts[((R) + 2) % 3]; \
- X7 += ks[((R) + 8) % 9] + (R) + 1; \
- } while (0)
-
-#else /* looping version */
-#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
- ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
-
-#define I512(R) \
- do { \
- /* inject the key schedule value */ \
- X0 += ks[r + (R) + 0]; \
- X1 += ks[r + (R) + 1]; \
- X2 += ks[r + (R) + 2]; \
- X3 += ks[r + (R) + 3]; \
- X4 += ks[r + (R) + 4]; \
- X5 += ks[r + (R) + 5] + ts[r + (R) + 0]; \
- X6 += ks[r + (R) + 6] + ts[r + (R) + 1]; \
- X7 += ks[r + (R) + 7] + r + (R); \
- /* rotate key schedule */ \
- ks[r + (R) + 8] = ks[r + (R) - 1]; \
- ts[r + (R) + 2] = ts[r + (R) - 1]; \
- } while (0)
-#endif /* end of looped code definitions */
-#define R512_8_ROUNDS(R) /* do 8 full rounds */ \
- do { \
- R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_0, 8 * (R) + 1); \
- R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_1, 8 * (R) + 2); \
- R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_2, 8 * (R) + 3); \
- R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_3, 8 * (R) + 4); \
- I512(2 * (R)); \
- R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_4, 8 * (R) + 5); \
- R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_5, 8 * (R) + 6); \
- R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_6, 8 * (R) + 7); \
- R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_7, 8 * (R) + 8); \
- I512(2 * (R) + 1); /* and key injection */ \
- } while (0)
-#define R512_UNROLL_R(NN) \
- ((SKEIN_UNROLL_512 == 0 && \
- SKEIN_512_ROUNDS_TOTAL / 8 > (NN)) || \
- (SKEIN_UNROLL_512 > (NN)))
-
-#if (SKEIN_UNROLL_512 > 14)
-#error "need more unrolling in skein_512_process_block"
-#endif
-#endif
-
-#if !(SKEIN_USE_ASM & 1024)
-#undef RCNT
-#define RCNT (SKEIN_1024_ROUNDS_TOTAL / 8)
-#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
-#define SKEIN_UNROLL_1024 ((SKEIN_LOOP) % 10)
-#else
-#define SKEIN_UNROLL_1024 (0)
-#endif
-
-#if (SKEIN_UNROLL_1024 != 0)
-#if (RCNT % SKEIN_UNROLL_1024)
-#error "Invalid SKEIN_UNROLL_1024" /* sanity check on unroll count */
-#endif
-#endif
-#define ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
- pF, ROT, r_num) \
- do { \
- X##p0 += X##p1; \
- X##p1 = rol64(X##p1, ROT##_0); \
- X##p1 ^= X##p0; \
- X##p2 += X##p3; \
- X##p3 = rol64(X##p3, ROT##_1); \
- X##p3 ^= X##p2; \
- X##p4 += X##p5; \
- X##p5 = rol64(X##p5, ROT##_2); \
- X##p5 ^= X##p4; \
- X##p6 += X##p7; \
- X##p7 = rol64(X##p7, ROT##_3); \
- X##p7 ^= X##p6; \
- X##p8 += X##p9; \
- X##p9 = rol64(X##p9, ROT##_4); \
- X##p9 ^= X##p8; \
- X##pA += X##pB; \
- X##pB = rol64(X##pB, ROT##_5); \
- X##pB ^= X##pA; \
- X##pC += X##pD; \
- X##pD = rol64(X##pD, ROT##_6); \
- X##pD ^= X##pC; \
- X##pE += X##pF; \
- X##pF = rol64(X##pF, ROT##_7); \
- X##pF ^= X##pE; \
- } while (0)
-
-#if SKEIN_UNROLL_1024 == 0
-#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
- ROT, rn) \
- ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
- pF, ROT, rn) \
-
-#define I1024(R) \
- do { \
- /* inject the key schedule value */ \
- X00 += ks[((R) + 1) % 17]; \
- X01 += ks[((R) + 2) % 17]; \
- X02 += ks[((R) + 3) % 17]; \
- X03 += ks[((R) + 4) % 17]; \
- X04 += ks[((R) + 5) % 17]; \
- X05 += ks[((R) + 6) % 17]; \
- X06 += ks[((R) + 7) % 17]; \
- X07 += ks[((R) + 8) % 17]; \
- X08 += ks[((R) + 9) % 17]; \
- X09 += ks[((R) + 10) % 17]; \
- X10 += ks[((R) + 11) % 17]; \
- X11 += ks[((R) + 12) % 17]; \
- X12 += ks[((R) + 13) % 17]; \
- X13 += ks[((R) + 14) % 17] + ts[((R) + 1) % 3]; \
- X14 += ks[((R) + 15) % 17] + ts[((R) + 2) % 3]; \
- X15 += ks[((R) + 16) % 17] + (R) + 1; \
- } while (0)
-#else /* looping version */
-#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
- ROT, rn) \
- ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
- pF, ROT, rn) \
-
-#define I1024(R) \
- do { \
- /* inject the key schedule value */ \
- X00 += ks[r + (R) + 0]; \
- X01 += ks[r + (R) + 1]; \
- X02 += ks[r + (R) + 2]; \
- X03 += ks[r + (R) + 3]; \
- X04 += ks[r + (R) + 4]; \
- X05 += ks[r + (R) + 5]; \
- X06 += ks[r + (R) + 6]; \
- X07 += ks[r + (R) + 7]; \
- X08 += ks[r + (R) + 8]; \
- X09 += ks[r + (R) + 9]; \
- X10 += ks[r + (R) + 10]; \
- X11 += ks[r + (R) + 11]; \
- X12 += ks[r + (R) + 12]; \
- X13 += ks[r + (R) + 13] + ts[r + (R) + 0]; \
- X14 += ks[r + (R) + 14] + ts[r + (R) + 1]; \
- X15 += ks[r + (R) + 15] + r + (R); \
- /* rotate key schedule */ \
- ks[r + (R) + 16] = ks[r + (R) - 1]; \
- ts[r + (R) + 2] = ts[r + (R) - 1]; \
- } while (0)
-
-#endif
-#define R1024_8_ROUNDS(R) \
- do { \
- R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \
- 13, 14, 15, R1024_0, 8 * (R) + 1); \
- R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \
- 05, 08, 01, R1024_1, 8 * (R) + 2); \
- R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \
- 11, 10, 09, R1024_2, 8 * (R) + 3); \
- R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \
- 03, 12, 07, R1024_3, 8 * (R) + 4); \
- I1024(2 * (R)); \
- R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \
- 13, 14, 15, R1024_4, 8 * (R) + 5); \
- R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \
- 05, 08, 01, R1024_5, 8 * (R) + 6); \
- R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \
- 11, 10, 09, R1024_6, 8 * (R) + 7); \
- R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \
- 03, 12, 07, R1024_7, 8 * (R) + 8); \
- I1024(2 * (R) + 1); \
- } while (0)
-
-#define R1024_UNROLL_R(NN) \
- ((SKEIN_UNROLL_1024 == 0 && \
- SKEIN_1024_ROUNDS_TOTAL / 8 > (NN)) || \
- (SKEIN_UNROLL_1024 > (NN)))
-
-#if (SKEIN_UNROLL_1024 > 14)
-#error "need more unrolling in Skein_1024_Process_Block"
-#endif
-#endif
-
-void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
- size_t blk_cnt, size_t byte_cnt_add);
-void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
- size_t blk_cnt, size_t byte_cnt_add);
-void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
- size_t blk_cnt, size_t byte_cnt_add);
-
-#endif
diff --git a/drivers/staging/skein/skein_generic.c b/drivers/staging/skein/skein_generic.c
deleted file mode 100644
index 11f5e530a75f..000000000000
--- a/drivers/staging/skein/skein_generic.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Skein256 Hash Algorithm.
- *
- * Derived from cryptoapi implementation, adapted for in-place
- * scatterlist interface.
- *
- * Copyright (c) Eric Rost <eric.rost@mybabylon.net>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <crypto/internal/hash.h>
-#include "skein_base.h"
-
-static int skein256_init(struct shash_desc *desc)
-{
- return skein_256_init((struct skein_256_ctx *)shash_desc_ctx(desc),
- SKEIN256_DIGEST_BIT_SIZE);
-}
-
-static int skein256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- return skein_256_update((struct skein_256_ctx *)shash_desc_ctx(desc),
- data, len);
-}
-
-static int skein256_final(struct shash_desc *desc, u8 *out)
-{
- return skein_256_final((struct skein_256_ctx *)shash_desc_ctx(desc),
- out);
-}
-
-static int skein256_export(struct shash_desc *desc, void *out)
-{
- struct skein_256_ctx *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int skein256_import(struct shash_desc *desc, const void *in)
-{
- struct skein_256_ctx *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
-}
-
-static int skein512_init(struct shash_desc *desc)
-{
- return skein_512_init((struct skein_512_ctx *)shash_desc_ctx(desc),
- SKEIN512_DIGEST_BIT_SIZE);
-}
-
-static int skein512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- return skein_512_update((struct skein_512_ctx *)shash_desc_ctx(desc),
- data, len);
-}
-
-static int skein512_final(struct shash_desc *desc, u8 *out)
-{
- return skein_512_final((struct skein_512_ctx *)shash_desc_ctx(desc),
- out);
-}
-
-static int skein512_export(struct shash_desc *desc, void *out)
-{
- struct skein_512_ctx *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int skein512_import(struct shash_desc *desc, const void *in)
-{
- struct skein_512_ctx *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
-}
-
-static int skein1024_init(struct shash_desc *desc)
-{
- return skein_1024_init((struct skein_1024_ctx *)shash_desc_ctx(desc),
- SKEIN1024_DIGEST_BIT_SIZE);
-}
-
-static int skein1024_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- return skein_1024_update((struct skein_1024_ctx *)shash_desc_ctx(desc),
- data, len);
-}
-
-static int skein1024_final(struct shash_desc *desc, u8 *out)
-{
- return skein_1024_final((struct skein_1024_ctx *)shash_desc_ctx(desc),
- out);
-}
-
-static int skein1024_export(struct shash_desc *desc, void *out)
-{
- struct skein_1024_ctx *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int skein1024_import(struct shash_desc *desc, const void *in)
-{
- struct skein_1024_ctx *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
-}
-
-static struct shash_alg alg256 = {
- .digestsize = (SKEIN256_DIGEST_BIT_SIZE / 8),
- .init = skein256_init,
- .update = skein256_update,
- .final = skein256_final,
- .export = skein256_export,
- .import = skein256_import,
- .descsize = sizeof(struct skein_256_ctx),
- .statesize = sizeof(struct skein_256_ctx),
- .base = {
- .cra_name = "skein256",
- .cra_driver_name = "skein",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SKEIN_256_BLOCK_BYTES,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg alg512 = {
- .digestsize = (SKEIN512_DIGEST_BIT_SIZE / 8),
- .init = skein512_init,
- .update = skein512_update,
- .final = skein512_final,
- .export = skein512_export,
- .import = skein512_import,
- .descsize = sizeof(struct skein_512_ctx),
- .statesize = sizeof(struct skein_512_ctx),
- .base = {
- .cra_name = "skein512",
- .cra_driver_name = "skein",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SKEIN_512_BLOCK_BYTES,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg alg1024 = {
- .digestsize = (SKEIN1024_DIGEST_BIT_SIZE / 8),
- .init = skein1024_init,
- .update = skein1024_update,
- .final = skein1024_final,
- .export = skein1024_export,
- .import = skein1024_import,
- .descsize = sizeof(struct skein_1024_ctx),
- .statesize = sizeof(struct skein_1024_ctx),
- .base = {
- .cra_name = "skein1024",
- .cra_driver_name = "skein",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SKEIN_1024_BLOCK_BYTES,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int __init skein_generic_init(void)
-{
- if (crypto_register_shash(&alg256))
- goto out;
- if (crypto_register_shash(&alg512))
- goto unreg256;
- if (crypto_register_shash(&alg1024))
- goto unreg512;
-
- return 0;
-
-unreg512:
- crypto_unregister_shash(&alg512);
-unreg256:
- crypto_unregister_shash(&alg256);
-out:
- return -1;
-}
-
-static void __exit skein_generic_fini(void)
-{
- crypto_unregister_shash(&alg256);
- crypto_unregister_shash(&alg512);
- crypto_unregister_shash(&alg1024);
-}
-
-module_init(skein_generic_init);
-module_exit(skein_generic_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Skein Hash Algorithm");
-
-MODULE_ALIAS("skein");
diff --git a/drivers/staging/skein/skein_iv.h b/drivers/staging/skein/skein_iv.h
deleted file mode 100644
index 916f029da726..000000000000
--- a/drivers/staging/skein/skein_iv.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SKEIN_IV_H_
-#define _SKEIN_IV_H_
-
-#include "skein_base.h" /* get Skein macros and types */
-
-/*
- **************** Pre-computed Skein IVs *******************
- *
- * NOTE: these values are not "magic" constants, but
- * are generated using the Threefish block function.
- * They are pre-computed here only for speed; i.e., to
- * avoid the need for a Threefish call during Init().
- *
- * The IV for any fixed hash length may be pre-computed.
- * Only the most common values are included here.
- *
- ***********************************************************
- */
-
-#define MK_64 SKEIN_MK_64
-
-/* blkSize = 256 bits. hashSize = 128 bits */
-static const u64 SKEIN_256_IV_128[] = {
- MK_64(0xE1111906, 0x964D7260),
- MK_64(0x883DAAA7, 0x7C8D811C),
- MK_64(0x10080DF4, 0x91960F7A),
- MK_64(0xCCF7DDE5, 0xB45BC1C2)
-};
-
-/* blkSize = 256 bits. hashSize = 160 bits */
-static const u64 SKEIN_256_IV_160[] = {
- MK_64(0x14202314, 0x72825E98),
- MK_64(0x2AC4E9A2, 0x5A77E590),
- MK_64(0xD47A5856, 0x8838D63E),
- MK_64(0x2DD2E496, 0x8586AB7D)
-};
-
-/* blkSize = 256 bits. hashSize = 224 bits */
-static const u64 SKEIN_256_IV_224[] = {
- MK_64(0xC6098A8C, 0x9AE5EA0B),
- MK_64(0x876D5686, 0x08C5191C),
- MK_64(0x99CB88D7, 0xD7F53884),
- MK_64(0x384BDDB1, 0xAEDDB5DE)
-};
-
-/* blkSize = 256 bits. hashSize = 256 bits */
-static const u64 SKEIN_256_IV_256[] = {
- MK_64(0xFC9DA860, 0xD048B449),
- MK_64(0x2FCA6647, 0x9FA7D833),
- MK_64(0xB33BC389, 0x6656840F),
- MK_64(0x6A54E920, 0xFDE8DA69)
-};
-
-/* blkSize = 512 bits. hashSize = 128 bits */
-static const u64 SKEIN_512_IV_128[] = {
- MK_64(0xA8BC7BF3, 0x6FBF9F52),
- MK_64(0x1E9872CE, 0xBD1AF0AA),
- MK_64(0x309B1790, 0xB32190D3),
- MK_64(0xBCFBB854, 0x3F94805C),
- MK_64(0x0DA61BCD, 0x6E31B11B),
- MK_64(0x1A18EBEA, 0xD46A32E3),
- MK_64(0xA2CC5B18, 0xCE84AA82),
- MK_64(0x6982AB28, 0x9D46982D)
-};
-
-/* blkSize = 512 bits. hashSize = 160 bits */
-static const u64 SKEIN_512_IV_160[] = {
- MK_64(0x28B81A2A, 0xE013BD91),
- MK_64(0xC2F11668, 0xB5BDF78F),
- MK_64(0x1760D8F3, 0xF6A56F12),
- MK_64(0x4FB74758, 0x8239904F),
- MK_64(0x21EDE07F, 0x7EAF5056),
- MK_64(0xD908922E, 0x63ED70B8),
- MK_64(0xB8EC76FF, 0xECCB52FA),
- MK_64(0x01A47BB8, 0xA3F27A6E)
-};
-
-/* blkSize = 512 bits. hashSize = 224 bits */
-static const u64 SKEIN_512_IV_224[] = {
- MK_64(0xCCD06162, 0x48677224),
- MK_64(0xCBA65CF3, 0xA92339EF),
- MK_64(0x8CCD69D6, 0x52FF4B64),
- MK_64(0x398AED7B, 0x3AB890B4),
- MK_64(0x0F59D1B1, 0x457D2BD0),
- MK_64(0x6776FE65, 0x75D4EB3D),
- MK_64(0x99FBC70E, 0x997413E9),
- MK_64(0x9E2CFCCF, 0xE1C41EF7)
-};
-
-/* blkSize = 512 bits. hashSize = 256 bits */
-static const u64 SKEIN_512_IV_256[] = {
- MK_64(0xCCD044A1, 0x2FDB3E13),
- MK_64(0xE8359030, 0x1A79A9EB),
- MK_64(0x55AEA061, 0x4F816E6F),
- MK_64(0x2A2767A4, 0xAE9B94DB),
- MK_64(0xEC06025E, 0x74DD7683),
- MK_64(0xE7A436CD, 0xC4746251),
- MK_64(0xC36FBAF9, 0x393AD185),
- MK_64(0x3EEDBA18, 0x33EDFC13)
-};
-
-/* blkSize = 512 bits. hashSize = 384 bits */
-static const u64 SKEIN_512_IV_384[] = {
- MK_64(0xA3F6C6BF, 0x3A75EF5F),
- MK_64(0xB0FEF9CC, 0xFD84FAA4),
- MK_64(0x9D77DD66, 0x3D770CFE),
- MK_64(0xD798CBF3, 0xB468FDDA),
- MK_64(0x1BC4A666, 0x8A0E4465),
- MK_64(0x7ED7D434, 0xE5807407),
- MK_64(0x548FC1AC, 0xD4EC44D6),
- MK_64(0x266E1754, 0x6AA18FF8)
-};
-
-/* blkSize = 512 bits. hashSize = 512 bits */
-static const u64 SKEIN_512_IV_512[] = {
- MK_64(0x4903ADFF, 0x749C51CE),
- MK_64(0x0D95DE39, 0x9746DF03),
- MK_64(0x8FD19341, 0x27C79BCE),
- MK_64(0x9A255629, 0xFF352CB1),
- MK_64(0x5DB62599, 0xDF6CA7B0),
- MK_64(0xEABE394C, 0xA9D5C3F4),
- MK_64(0x991112C7, 0x1A75B523),
- MK_64(0xAE18A40B, 0x660FCC33)
-};
-
-/* blkSize = 1024 bits. hashSize = 384 bits */
-static const u64 SKEIN_1024_IV_384[] = {
- MK_64(0x5102B6B8, 0xC1894A35),
- MK_64(0xFEEBC9E3, 0xFE8AF11A),
- MK_64(0x0C807F06, 0xE32BED71),
- MK_64(0x60C13A52, 0xB41A91F6),
- MK_64(0x9716D35D, 0xD4917C38),
- MK_64(0xE780DF12, 0x6FD31D3A),
- MK_64(0x797846B6, 0xC898303A),
- MK_64(0xB172C2A8, 0xB3572A3B),
- MK_64(0xC9BC8203, 0xA6104A6C),
- MK_64(0x65909338, 0xD75624F4),
- MK_64(0x94BCC568, 0x4B3F81A0),
- MK_64(0x3EBBF51E, 0x10ECFD46),
- MK_64(0x2DF50F0B, 0xEEB08542),
- MK_64(0x3B5A6530, 0x0DBC6516),
- MK_64(0x484B9CD2, 0x167BBCE1),
- MK_64(0x2D136947, 0xD4CBAFEA)
-};
-
-/* blkSize = 1024 bits. hashSize = 512 bits */
-static const u64 SKEIN_1024_IV_512[] = {
- MK_64(0xCAEC0E5D, 0x7C1B1B18),
- MK_64(0xA01B0E04, 0x5F03E802),
- MK_64(0x33840451, 0xED912885),
- MK_64(0x374AFB04, 0xEAEC2E1C),
- MK_64(0xDF25A0E2, 0x813581F7),
- MK_64(0xE4004093, 0x8B12F9D2),
- MK_64(0xA662D539, 0xC2ED39B6),
- MK_64(0xFA8B85CF, 0x45D8C75A),
- MK_64(0x8316ED8E, 0x29EDE796),
- MK_64(0x053289C0, 0x2E9F91B8),
- MK_64(0xC3F8EF1D, 0x6D518B73),
- MK_64(0xBDCEC3C4, 0xD5EF332E),
- MK_64(0x549A7E52, 0x22974487),
- MK_64(0x67070872, 0x5B749816),
- MK_64(0xB9CD28FB, 0xF0581BD1),
- MK_64(0x0E2940B8, 0x15804974)
-};
-
-/* blkSize = 1024 bits. hashSize = 1024 bits */
-static const u64 SKEIN_1024_IV_1024[] = {
- MK_64(0xD593DA07, 0x41E72355),
- MK_64(0x15B5E511, 0xAC73E00C),
- MK_64(0x5180E5AE, 0xBAF2C4F0),
- MK_64(0x03BD41D3, 0xFCBCAFAF),
- MK_64(0x1CAEC6FD, 0x1983A898),
- MK_64(0x6E510B8B, 0xCDD0589F),
- MK_64(0x77E2BDFD, 0xC6394ADA),
- MK_64(0xC11E1DB5, 0x24DCB0A3),
- MK_64(0xD6D14AF9, 0xC6329AB5),
- MK_64(0x6A9B0BFC, 0x6EB67E0D),
- MK_64(0x9243C60D, 0xCCFF1332),
- MK_64(0x1A1F1DDE, 0x743F02D4),
- MK_64(0x0996753C, 0x10ED0BB8),
- MK_64(0x6572DD22, 0xF2B4969A),
- MK_64(0x61FD3062, 0xD00A579A),
- MK_64(0x1DE0536E, 0x8682E539)
-};
-
-#endif /* _SKEIN_IV_H_ */
diff --git a/drivers/staging/skein/threefish_api.c b/drivers/staging/skein/threefish_api.c
deleted file mode 100644
index e69cefa6b16a..000000000000
--- a/drivers/staging/skein/threefish_api.c
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/string.h>
-#include "threefish_api.h"
-
-void threefish_set_key(struct threefish_key *key_ctx,
- enum threefish_size state_size,
- u64 *key_data, u64 *tweak)
-{
- int key_words = state_size / 64;
- int i;
- u64 parity = KEY_SCHEDULE_CONST;
-
- key_ctx->tweak[0] = tweak[0];
- key_ctx->tweak[1] = tweak[1];
- key_ctx->tweak[2] = tweak[0] ^ tweak[1];
-
- for (i = 0; i < key_words; i++) {
- key_ctx->key[i] = key_data[i];
- parity ^= key_data[i];
- }
- key_ctx->key[i] = parity;
- key_ctx->state_size = state_size;
-}
-
-void threefish_encrypt_block_bytes(struct threefish_key *key_ctx, u8 *in,
- u8 *out)
-{
- u64 plain[SKEIN_MAX_STATE_WORDS]; /* max number of words*/
- u64 cipher[SKEIN_MAX_STATE_WORDS];
-
- skein_get64_lsb_first(plain, in, key_ctx->state_size / 64);
- threefish_encrypt_block_words(key_ctx, plain, cipher);
- skein_put64_lsb_first(out, cipher, key_ctx->state_size / 8);
-}
-
-void threefish_encrypt_block_words(struct threefish_key *key_ctx, u64 *in,
- u64 *out)
-{
- switch (key_ctx->state_size) {
- case THREEFISH_256:
- threefish_encrypt_256(key_ctx, in, out);
- break;
- case THREEFISH_512:
- threefish_encrypt_512(key_ctx, in, out);
- break;
- case THREEFISH_1024:
- threefish_encrypt_1024(key_ctx, in, out);
- break;
- }
-}
-
-void threefish_decrypt_block_bytes(struct threefish_key *key_ctx, u8 *in,
- u8 *out)
-{
- u64 plain[SKEIN_MAX_STATE_WORDS]; /* max number of words*/
- u64 cipher[SKEIN_MAX_STATE_WORDS];
-
- skein_get64_lsb_first(cipher, in, key_ctx->state_size / 64);
- threefish_decrypt_block_words(key_ctx, cipher, plain);
- skein_put64_lsb_first(out, plain, key_ctx->state_size / 8);
-}
-
-void threefish_decrypt_block_words(struct threefish_key *key_ctx, u64 *in,
- u64 *out)
-{
- switch (key_ctx->state_size) {
- case THREEFISH_256:
- threefish_decrypt_256(key_ctx, in, out);
- break;
- case THREEFISH_512:
- threefish_decrypt_512(key_ctx, in, out);
- break;
- case THREEFISH_1024:
- threefish_decrypt_1024(key_ctx, in, out);
- break;
- }
-}
-
diff --git a/drivers/staging/skein/threefish_api.h b/drivers/staging/skein/threefish_api.h
deleted file mode 100644
index 21539c3cc7a0..000000000000
--- a/drivers/staging/skein/threefish_api.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef THREEFISHAPI_H
-#define THREEFISHAPI_H
-
-/**
- * @file threefish_api.h
- * @brief A Threefish cipher API and its functions.
- * @{
- *
- * This API and the functions that implement this API simplify the usage
- * of the Threefish cipher. The design and the way to use the functions
- * follow the openSSL design but at the same time take care of some Threefish
- * specific behaviour and possibilities.
- *
- * These are the low level functions that deal with Threefish blocks only.
- * Implementations for cipher modes such as ECB, CFB, or CBC may use these
- * functions.
- *
-@code
- // Threefish cipher context data
- struct threefish_key key_ctx;
-
- // Initialize the context
- threefish_set_key(&key_ctx, THREEFISH_512, key, tweak);
-
- // Encrypt
- threefish_encrypt_block_bytes(&key_ctx, input, cipher);
-@endcode
- */
-
-#include <linux/types.h>
-#include "skein_base.h"
-
-#define KEY_SCHEDULE_CONST 0x1BD11BDAA9FC1A22L
-
-/**
- * Which Threefish size to use
- */
-enum threefish_size {
- THREEFISH_256 = 256, /*!< Skein with 256 bit state */
- THREEFISH_512 = 512, /*!< Skein with 512 bit state */
- THREEFISH_1024 = 1024 /*!< Skein with 1024 bit state */
-};
-
-/**
- * Context for Threefish key and tweak words.
- *
- * This structure was setup with some know-how of the internal
- * Skein structures, in particular ordering of header and size dependent
- * variables. If Skein implementation changes this, the adapt these
- * structures as well.
- */
-struct threefish_key {
- u64 state_size;
- u64 key[SKEIN_MAX_STATE_WORDS + 1]; /* max number of key words*/
- u64 tweak[3];
-};
-
-/**
- * Set Threefish key and tweak data.
- *
- * This function sets the key and tweak data for the Threefish cipher of
- * the given size. The key data must have the same length (number of bits)
- * as the state size
- *
- * @param key_ctx
- * Pointer to a Threefish key structure.
- * @param size
- * Which Skein size to use.
- * @param key_data
- * Pointer to the key words (word has 64 bits).
- * @param tweak
- * Pointer to the two tweak words (word has 64 bits).
- */
-void threefish_set_key(struct threefish_key *key_ctx,
- enum threefish_size state_size,
- u64 *key_data, u64 *tweak);
-
-/**
- * Encrypt Threefish block (bytes).
- *
- * The buffer must have at least the same length (number of bits) as the
- * state size for this key. The function uses the first @c state_size bits
- * of the input buffer, encrypts them and stores the result in the output
- * buffer.
- *
- * @param key_ctx
- * Pointer to a Threefish key structure.
- * @param in
- * Poionter to plaintext data buffer.
- * @param out
- * Pointer to cipher buffer.
- */
-void threefish_encrypt_block_bytes(struct threefish_key *key_ctx, u8 *in,
- u8 *out);
-
-/**
- * Encrypt Threefish block (words).
- *
- * The buffer must have at least the same length (number of bits) as the
- * state size for this key. The function uses the first @c state_size bits
- * of the input buffer, encrypts them and stores the result in the output
- * buffer.
- *
- * The wordsize ist set to 64 bits.
- *
- * @param key_ctx
- * Pointer to a Threefish key structure.
- * @param in
- * Poionter to plaintext data buffer.
- * @param out
- * Pointer to cipher buffer.
- */
-void threefish_encrypt_block_words(struct threefish_key *key_ctx, u64 *in,
- u64 *out);
-
-/**
- * Decrypt Threefish block (bytes).
- *
- * The buffer must have at least the same length (number of bits) as the
- * state size for this key. The function uses the first @c state_size bits
- * of the input buffer, decrypts them and stores the result in the output
- * buffer
- *
- * @param key_ctx
- * Pointer to a Threefish key structure.
- * @param in
- * Poionter to cipher data buffer.
- * @param out
- * Pointer to plaintext buffer.
- */
-void threefish_decrypt_block_bytes(struct threefish_key *key_ctx, u8 *in,
- u8 *out);
-
-/**
- * Decrypt Threefish block (words).
- *
- * The buffer must have at least the same length (number of bits) as the
- * state size for this key. The function uses the first @c state_size bits
- * of the input buffer, encrypts them and stores the result in the output
- * buffer.
- *
- * The wordsize ist set to 64 bits.
- *
- * @param key_ctx
- * Pointer to a Threefish key structure.
- * @param in
- * Poionter to cipher data buffer.
- * @param out
- * Pointer to plaintext buffer.
- */
-void threefish_decrypt_block_words(struct threefish_key *key_ctx, u64 *in,
- u64 *out);
-
-void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
- u64 *output);
-void threefish_encrypt_512(struct threefish_key *key_ctx, u64 *input,
- u64 *output);
-void threefish_encrypt_1024(struct threefish_key *key_ctx, u64 *input,
- u64 *output);
-void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
- u64 *output);
-void threefish_decrypt_512(struct threefish_key *key_ctx, u64 *input,
- u64 *output);
-void threefish_decrypt_1024(struct threefish_key *key_ctx, u64 *input,
- u64 *output);
-/**
- * @}
- */
-#endif
diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c
deleted file mode 100644
index 87f055890544..000000000000
--- a/drivers/staging/skein/threefish_block.c
+++ /dev/null
@@ -1,8244 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/bitops.h>
-#include "threefish_api.h"
-
-void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
- u64 *output)
-{
- u64 b0 = input[0], b1 = input[1],
- b2 = input[2], b3 = input[3];
- u64 k0 = key_ctx->key[0], k1 = key_ctx->key[1],
- k2 = key_ctx->key[2], k3 = key_ctx->key[3],
- k4 = key_ctx->key[4];
- u64 t0 = key_ctx->tweak[0], t1 = key_ctx->tweak[1],
- t2 = key_ctx->tweak[2];
-
- b1 += k1 + t0;
- b0 += b1 + k0;
- b1 = rol64(b1, 14) ^ b0;
-
- b3 += k3;
- b2 += b3 + k2 + t1;
- b3 = rol64(b3, 16) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 52) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 57) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 23) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 40) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 5) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 37) ^ b2;
-
- b1 += k2 + t1;
- b0 += b1 + k1;
- b1 = rol64(b1, 25) ^ b0;
-
- b3 += k4 + 1;
- b2 += b3 + k3 + t2;
- b3 = rol64(b3, 33) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 46) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 12) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 58) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 22) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 32) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 32) ^ b2;
-
- b1 += k3 + t2;
- b0 += b1 + k2;
- b1 = rol64(b1, 14) ^ b0;
-
- b3 += k0 + 2;
- b2 += b3 + k4 + t0;
- b3 = rol64(b3, 16) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 52) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 57) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 23) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 40) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 5) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 37) ^ b2;
-
- b1 += k4 + t0;
- b0 += b1 + k3;
- b1 = rol64(b1, 25) ^ b0;
-
- b3 += k1 + 3;
- b2 += b3 + k0 + t1;
- b3 = rol64(b3, 33) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 46) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 12) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 58) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 22) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 32) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 32) ^ b2;
-
- b1 += k0 + t1;
- b0 += b1 + k4;
- b1 = rol64(b1, 14) ^ b0;
-
- b3 += k2 + 4;
- b2 += b3 + k1 + t2;
- b3 = rol64(b3, 16) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 52) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 57) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 23) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 40) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 5) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 37) ^ b2;
-
- b1 += k1 + t2;
- b0 += b1 + k0;
- b1 = rol64(b1, 25) ^ b0;
-
- b3 += k3 + 5;
- b2 += b3 + k2 + t0;
- b3 = rol64(b3, 33) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 46) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 12) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 58) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 22) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 32) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 32) ^ b2;
-
- b1 += k2 + t0;
- b0 += b1 + k1;
- b1 = rol64(b1, 14) ^ b0;
-
- b3 += k4 + 6;
- b2 += b3 + k3 + t1;
- b3 = rol64(b3, 16) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 52) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 57) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 23) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 40) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 5) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 37) ^ b2;
-
- b1 += k3 + t1;
- b0 += b1 + k2;
- b1 = rol64(b1, 25) ^ b0;
-
- b3 += k0 + 7;
- b2 += b3 + k4 + t2;
- b3 = rol64(b3, 33) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 46) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 12) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 58) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 22) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 32) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 32) ^ b2;
-
- b1 += k4 + t2;
- b0 += b1 + k3;
- b1 = rol64(b1, 14) ^ b0;
-
- b3 += k1 + 8;
- b2 += b3 + k0 + t0;
- b3 = rol64(b3, 16) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 52) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 57) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 23) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 40) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 5) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 37) ^ b2;
-
- b1 += k0 + t0;
- b0 += b1 + k4;
- b1 = rol64(b1, 25) ^ b0;
-
- b3 += k2 + 9;
- b2 += b3 + k1 + t1;
- b3 = rol64(b3, 33) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 46) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 12) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 58) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 22) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 32) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 32) ^ b2;
-
- b1 += k1 + t1;
- b0 += b1 + k0;
- b1 = rol64(b1, 14) ^ b0;
-
- b3 += k3 + 10;
- b2 += b3 + k2 + t2;
- b3 = rol64(b3, 16) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 52) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 57) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 23) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 40) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 5) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 37) ^ b2;
-
- b1 += k2 + t2;
- b0 += b1 + k1;
- b1 = rol64(b1, 25) ^ b0;
-
- b3 += k4 + 11;
- b2 += b3 + k3 + t0;
- b3 = rol64(b3, 33) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 46) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 12) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 58) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 22) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 32) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 32) ^ b2;
-
- b1 += k3 + t0;
- b0 += b1 + k2;
- b1 = rol64(b1, 14) ^ b0;
-
- b3 += k0 + 12;
- b2 += b3 + k4 + t1;
- b3 = rol64(b3, 16) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 52) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 57) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 23) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 40) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 5) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 37) ^ b2;
-
- b1 += k4 + t1;
- b0 += b1 + k3;
- b1 = rol64(b1, 25) ^ b0;
-
- b3 += k1 + 13;
- b2 += b3 + k0 + t2;
- b3 = rol64(b3, 33) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 46) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 12) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 58) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 22) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 32) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 32) ^ b2;
-
- b1 += k0 + t2;
- b0 += b1 + k4;
- b1 = rol64(b1, 14) ^ b0;
-
- b3 += k2 + 14;
- b2 += b3 + k1 + t0;
- b3 = rol64(b3, 16) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 52) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 57) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 23) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 40) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 5) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 37) ^ b2;
-
- b1 += k1 + t0;
- b0 += b1 + k0;
- b1 = rol64(b1, 25) ^ b0;
-
- b3 += k3 + 15;
- b2 += b3 + k2 + t1;
- b3 = rol64(b3, 33) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 46) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 12) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 58) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 22) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 32) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 32) ^ b2;
-
- b1 += k2 + t1;
- b0 += b1 + k1;
- b1 = rol64(b1, 14) ^ b0;
-
- b3 += k4 + 16;
- b2 += b3 + k3 + t2;
- b3 = rol64(b3, 16) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 52) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 57) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 23) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 40) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 5) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 37) ^ b2;
-
- b1 += k3 + t2;
- b0 += b1 + k2;
- b1 = rol64(b1, 25) ^ b0;
-
- b3 += k0 + 17;
- b2 += b3 + k4 + t0;
- b3 = rol64(b3, 33) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 46) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 12) ^ b2;
-
- b0 += b1;
- b1 = rol64(b1, 58) ^ b0;
-
- b2 += b3;
- b3 = rol64(b3, 22) ^ b2;
-
- b0 += b3;
- b3 = rol64(b3, 32) ^ b0;
-
- b2 += b1;
- b1 = rol64(b1, 32) ^ b2;
-
- output[0] = b0 + k3;
- output[1] = b1 + k4 + t0;
- output[2] = b2 + k0 + t1;
- output[3] = b3 + k1 + 18;
-}
-
-void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
- u64 *output)
-{
- u64 b0 = input[0], b1 = input[1],
- b2 = input[2], b3 = input[3];
- u64 k0 = key_ctx->key[0], k1 = key_ctx->key[1],
- k2 = key_ctx->key[2], k3 = key_ctx->key[3],
- k4 = key_ctx->key[4];
- u64 t0 = key_ctx->tweak[0], t1 = key_ctx->tweak[1],
- t2 = key_ctx->tweak[2];
-
- u64 tmp;
-
- b0 -= k3;
- b1 -= k4 + t0;
- b2 -= k0 + t1;
- b3 -= k1 + 18;
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 32);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 32);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 58);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 22);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 46);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 12);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 25);
- b0 -= b1 + k2;
- b1 -= k3 + t2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 33);
- b2 -= b3 + k4 + t0;
- b3 -= k0 + 17;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 5);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 37);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 23);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 40);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 52);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 57);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 14);
- b0 -= b1 + k1;
- b1 -= k2 + t1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 16);
- b2 -= b3 + k3 + t2;
- b3 -= k4 + 16;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 32);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 32);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 58);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 22);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 46);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 12);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 25);
- b0 -= b1 + k0;
- b1 -= k1 + t0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 33);
- b2 -= b3 + k2 + t1;
- b3 -= k3 + 15;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 5);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 37);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 23);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 40);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 52);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 57);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 14);
- b0 -= b1 + k4;
- b1 -= k0 + t2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 16);
- b2 -= b3 + k1 + t0;
- b3 -= k2 + 14;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 32);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 32);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 58);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 22);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 46);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 12);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 25);
- b0 -= b1 + k3;
- b1 -= k4 + t1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 33);
- b2 -= b3 + k0 + t2;
- b3 -= k1 + 13;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 5);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 37);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 23);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 40);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 52);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 57);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 14);
- b0 -= b1 + k2;
- b1 -= k3 + t0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 16);
- b2 -= b3 + k4 + t1;
- b3 -= k0 + 12;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 32);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 32);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 58);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 22);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 46);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 12);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 25);
- b0 -= b1 + k1;
- b1 -= k2 + t2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 33);
- b2 -= b3 + k3 + t0;
- b3 -= k4 + 11;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 5);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 37);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 23);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 40);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 52);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 57);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 14);
- b0 -= b1 + k0;
- b1 -= k1 + t1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 16);
- b2 -= b3 + k2 + t2;
- b3 -= k3 + 10;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 32);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 32);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 58);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 22);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 46);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 12);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 25);
- b0 -= b1 + k4;
- b1 -= k0 + t0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 33);
- b2 -= b3 + k1 + t1;
- b3 -= k2 + 9;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 5);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 37);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 23);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 40);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 52);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 57);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 14);
- b0 -= b1 + k3;
- b1 -= k4 + t2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 16);
- b2 -= b3 + k0 + t0;
- b3 -= k1 + 8;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 32);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 32);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 58);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 22);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 46);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 12);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 25);
- b0 -= b1 + k2;
- b1 -= k3 + t1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 33);
- b2 -= b3 + k4 + t2;
- b3 -= k0 + 7;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 5);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 37);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 23);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 40);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 52);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 57);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 14);
- b0 -= b1 + k1;
- b1 -= k2 + t0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 16);
- b2 -= b3 + k3 + t1;
- b3 -= k4 + 6;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 32);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 32);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 58);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 22);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 46);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 12);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 25);
- b0 -= b1 + k0;
- b1 -= k1 + t2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 33);
- b2 -= b3 + k2 + t0;
- b3 -= k3 + 5;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 5);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 37);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 23);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 40);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 52);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 57);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 14);
- b0 -= b1 + k4;
- b1 -= k0 + t1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 16);
- b2 -= b3 + k1 + t2;
- b3 -= k2 + 4;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 32);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 32);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 58);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 22);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 46);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 12);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 25);
- b0 -= b1 + k3;
- b1 -= k4 + t0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 33);
- b2 -= b3 + k0 + t1;
- b3 -= k1 + 3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 5);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 37);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 23);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 40);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 52);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 57);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 14);
- b0 -= b1 + k2;
- b1 -= k3 + t2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 16);
- b2 -= b3 + k4 + t0;
- b3 -= k0 + 2;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 32);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 32);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 58);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 22);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 46);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 12);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 25);
- b0 -= b1 + k1;
- b1 -= k2 + t1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 33);
- b2 -= b3 + k3 + t2;
- b3 -= k4 + 1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 5);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 37);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 23);
- b0 -= b1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 40);
- b2 -= b3;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 52);
- b0 -= b3;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 57);
- b2 -= b1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 14);
- b0 -= b1 + k0;
- b1 -= k1 + t0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 16);
- b2 -= b3 + k2 + t1;
- b3 -= k3;
-
- output[0] = b0;
- output[1] = b1;
- output[2] = b2;
- output[3] = b3;
-}
-
-void threefish_encrypt_512(struct threefish_key *key_ctx, u64 *input,
- u64 *output)
-{
- u64 b0 = input[0], b1 = input[1],
- b2 = input[2], b3 = input[3],
- b4 = input[4], b5 = input[5],
- b6 = input[6], b7 = input[7];
- u64 k0 = key_ctx->key[0], k1 = key_ctx->key[1],
- k2 = key_ctx->key[2], k3 = key_ctx->key[3],
- k4 = key_ctx->key[4], k5 = key_ctx->key[5],
- k6 = key_ctx->key[6], k7 = key_ctx->key[7],
- k8 = key_ctx->key[8];
- u64 t0 = key_ctx->tweak[0], t1 = key_ctx->tweak[1],
- t2 = key_ctx->tweak[2];
-
- b1 += k1;
- b0 += b1 + k0;
- b1 = rol64(b1, 46) ^ b0;
-
- b3 += k3;
- b2 += b3 + k2;
- b3 = rol64(b3, 36) ^ b2;
-
- b5 += k5 + t0;
- b4 += b5 + k4;
- b5 = rol64(b5, 19) ^ b4;
-
- b7 += k7;
- b6 += b7 + k6 + t1;
- b7 = rol64(b7, 37) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 33) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 27) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 14) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 42) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 17) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 49) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 36) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 39) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 44) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 9) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 54) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 56) ^ b4;
-
- b1 += k2;
- b0 += b1 + k1;
- b1 = rol64(b1, 39) ^ b0;
-
- b3 += k4;
- b2 += b3 + k3;
- b3 = rol64(b3, 30) ^ b2;
-
- b5 += k6 + t1;
- b4 += b5 + k5;
- b5 = rol64(b5, 34) ^ b4;
-
- b7 += k8 + 1;
- b6 += b7 + k7 + t2;
- b7 = rol64(b7, 24) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 13) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 50) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 10) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 17) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 25) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 29) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 39) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 43) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 8) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 35) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 56) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 22) ^ b4;
-
- b1 += k3;
- b0 += b1 + k2;
- b1 = rol64(b1, 46) ^ b0;
-
- b3 += k5;
- b2 += b3 + k4;
- b3 = rol64(b3, 36) ^ b2;
-
- b5 += k7 + t2;
- b4 += b5 + k6;
- b5 = rol64(b5, 19) ^ b4;
-
- b7 += k0 + 2;
- b6 += b7 + k8 + t0;
- b7 = rol64(b7, 37) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 33) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 27) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 14) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 42) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 17) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 49) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 36) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 39) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 44) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 9) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 54) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 56) ^ b4;
-
- b1 += k4;
- b0 += b1 + k3;
- b1 = rol64(b1, 39) ^ b0;
-
- b3 += k6;
- b2 += b3 + k5;
- b3 = rol64(b3, 30) ^ b2;
-
- b5 += k8 + t0;
- b4 += b5 + k7;
- b5 = rol64(b5, 34) ^ b4;
-
- b7 += k1 + 3;
- b6 += b7 + k0 + t1;
- b7 = rol64(b7, 24) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 13) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 50) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 10) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 17) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 25) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 29) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 39) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 43) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 8) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 35) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 56) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 22) ^ b4;
-
- b1 += k5;
- b0 += b1 + k4;
- b1 = rol64(b1, 46) ^ b0;
-
- b3 += k7;
- b2 += b3 + k6;
- b3 = rol64(b3, 36) ^ b2;
-
- b5 += k0 + t1;
- b4 += b5 + k8;
- b5 = rol64(b5, 19) ^ b4;
-
- b7 += k2 + 4;
- b6 += b7 + k1 + t2;
- b7 = rol64(b7, 37) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 33) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 27) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 14) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 42) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 17) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 49) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 36) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 39) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 44) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 9) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 54) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 56) ^ b4;
-
- b1 += k6;
- b0 += b1 + k5;
- b1 = rol64(b1, 39) ^ b0;
-
- b3 += k8;
- b2 += b3 + k7;
- b3 = rol64(b3, 30) ^ b2;
-
- b5 += k1 + t2;
- b4 += b5 + k0;
- b5 = rol64(b5, 34) ^ b4;
-
- b7 += k3 + 5;
- b6 += b7 + k2 + t0;
- b7 = rol64(b7, 24) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 13) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 50) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 10) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 17) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 25) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 29) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 39) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 43) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 8) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 35) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 56) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 22) ^ b4;
-
- b1 += k7;
- b0 += b1 + k6;
- b1 = rol64(b1, 46) ^ b0;
-
- b3 += k0;
- b2 += b3 + k8;
- b3 = rol64(b3, 36) ^ b2;
-
- b5 += k2 + t0;
- b4 += b5 + k1;
- b5 = rol64(b5, 19) ^ b4;
-
- b7 += k4 + 6;
- b6 += b7 + k3 + t1;
- b7 = rol64(b7, 37) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 33) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 27) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 14) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 42) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 17) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 49) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 36) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 39) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 44) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 9) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 54) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 56) ^ b4;
-
- b1 += k8;
- b0 += b1 + k7;
- b1 = rol64(b1, 39) ^ b0;
-
- b3 += k1;
- b2 += b3 + k0;
- b3 = rol64(b3, 30) ^ b2;
-
- b5 += k3 + t1;
- b4 += b5 + k2;
- b5 = rol64(b5, 34) ^ b4;
-
- b7 += k5 + 7;
- b6 += b7 + k4 + t2;
- b7 = rol64(b7, 24) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 13) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 50) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 10) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 17) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 25) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 29) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 39) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 43) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 8) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 35) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 56) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 22) ^ b4;
-
- b1 += k0;
- b0 += b1 + k8;
- b1 = rol64(b1, 46) ^ b0;
-
- b3 += k2;
- b2 += b3 + k1;
- b3 = rol64(b3, 36) ^ b2;
-
- b5 += k4 + t2;
- b4 += b5 + k3;
- b5 = rol64(b5, 19) ^ b4;
-
- b7 += k6 + 8;
- b6 += b7 + k5 + t0;
- b7 = rol64(b7, 37) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 33) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 27) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 14) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 42) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 17) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 49) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 36) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 39) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 44) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 9) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 54) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 56) ^ b4;
-
- b1 += k1;
- b0 += b1 + k0;
- b1 = rol64(b1, 39) ^ b0;
-
- b3 += k3;
- b2 += b3 + k2;
- b3 = rol64(b3, 30) ^ b2;
-
- b5 += k5 + t0;
- b4 += b5 + k4;
- b5 = rol64(b5, 34) ^ b4;
-
- b7 += k7 + 9;
- b6 += b7 + k6 + t1;
- b7 = rol64(b7, 24) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 13) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 50) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 10) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 17) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 25) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 29) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 39) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 43) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 8) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 35) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 56) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 22) ^ b4;
-
- b1 += k2;
- b0 += b1 + k1;
- b1 = rol64(b1, 46) ^ b0;
-
- b3 += k4;
- b2 += b3 + k3;
- b3 = rol64(b3, 36) ^ b2;
-
- b5 += k6 + t1;
- b4 += b5 + k5;
- b5 = rol64(b5, 19) ^ b4;
-
- b7 += k8 + 10;
- b6 += b7 + k7 + t2;
- b7 = rol64(b7, 37) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 33) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 27) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 14) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 42) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 17) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 49) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 36) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 39) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 44) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 9) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 54) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 56) ^ b4;
-
- b1 += k3;
- b0 += b1 + k2;
- b1 = rol64(b1, 39) ^ b0;
-
- b3 += k5;
- b2 += b3 + k4;
- b3 = rol64(b3, 30) ^ b2;
-
- b5 += k7 + t2;
- b4 += b5 + k6;
- b5 = rol64(b5, 34) ^ b4;
-
- b7 += k0 + 11;
- b6 += b7 + k8 + t0;
- b7 = rol64(b7, 24) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 13) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 50) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 10) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 17) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 25) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 29) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 39) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 43) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 8) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 35) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 56) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 22) ^ b4;
-
- b1 += k4;
- b0 += b1 + k3;
- b1 = rol64(b1, 46) ^ b0;
-
- b3 += k6;
- b2 += b3 + k5;
- b3 = rol64(b3, 36) ^ b2;
-
- b5 += k8 + t0;
- b4 += b5 + k7;
- b5 = rol64(b5, 19) ^ b4;
-
- b7 += k1 + 12;
- b6 += b7 + k0 + t1;
- b7 = rol64(b7, 37) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 33) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 27) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 14) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 42) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 17) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 49) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 36) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 39) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 44) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 9) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 54) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 56) ^ b4;
-
- b1 += k5;
- b0 += b1 + k4;
- b1 = rol64(b1, 39) ^ b0;
-
- b3 += k7;
- b2 += b3 + k6;
- b3 = rol64(b3, 30) ^ b2;
-
- b5 += k0 + t1;
- b4 += b5 + k8;
- b5 = rol64(b5, 34) ^ b4;
-
- b7 += k2 + 13;
- b6 += b7 + k1 + t2;
- b7 = rol64(b7, 24) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 13) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 50) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 10) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 17) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 25) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 29) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 39) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 43) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 8) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 35) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 56) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 22) ^ b4;
-
- b1 += k6;
- b0 += b1 + k5;
- b1 = rol64(b1, 46) ^ b0;
-
- b3 += k8;
- b2 += b3 + k7;
- b3 = rol64(b3, 36) ^ b2;
-
- b5 += k1 + t2;
- b4 += b5 + k0;
- b5 = rol64(b5, 19) ^ b4;
-
- b7 += k3 + 14;
- b6 += b7 + k2 + t0;
- b7 = rol64(b7, 37) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 33) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 27) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 14) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 42) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 17) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 49) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 36) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 39) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 44) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 9) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 54) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 56) ^ b4;
-
- b1 += k7;
- b0 += b1 + k6;
- b1 = rol64(b1, 39) ^ b0;
-
- b3 += k0;
- b2 += b3 + k8;
- b3 = rol64(b3, 30) ^ b2;
-
- b5 += k2 + t0;
- b4 += b5 + k1;
- b5 = rol64(b5, 34) ^ b4;
-
- b7 += k4 + 15;
- b6 += b7 + k3 + t1;
- b7 = rol64(b7, 24) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 13) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 50) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 10) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 17) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 25) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 29) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 39) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 43) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 8) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 35) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 56) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 22) ^ b4;
-
- b1 += k8;
- b0 += b1 + k7;
- b1 = rol64(b1, 46) ^ b0;
-
- b3 += k1;
- b2 += b3 + k0;
- b3 = rol64(b3, 36) ^ b2;
-
- b5 += k3 + t1;
- b4 += b5 + k2;
- b5 = rol64(b5, 19) ^ b4;
-
- b7 += k5 + 16;
- b6 += b7 + k4 + t2;
- b7 = rol64(b7, 37) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 33) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 27) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 14) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 42) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 17) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 49) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 36) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 39) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 44) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 9) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 54) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 56) ^ b4;
-
- b1 += k0;
- b0 += b1 + k8;
- b1 = rol64(b1, 39) ^ b0;
-
- b3 += k2;
- b2 += b3 + k1;
- b3 = rol64(b3, 30) ^ b2;
-
- b5 += k4 + t2;
- b4 += b5 + k3;
- b5 = rol64(b5, 34) ^ b4;
-
- b7 += k6 + 17;
- b6 += b7 + k5 + t0;
- b7 = rol64(b7, 24) ^ b6;
-
- b2 += b1;
- b1 = rol64(b1, 13) ^ b2;
-
- b4 += b7;
- b7 = rol64(b7, 50) ^ b4;
-
- b6 += b5;
- b5 = rol64(b5, 10) ^ b6;
-
- b0 += b3;
- b3 = rol64(b3, 17) ^ b0;
-
- b4 += b1;
- b1 = rol64(b1, 25) ^ b4;
-
- b6 += b3;
- b3 = rol64(b3, 29) ^ b6;
-
- b0 += b5;
- b5 = rol64(b5, 39) ^ b0;
-
- b2 += b7;
- b7 = rol64(b7, 43) ^ b2;
-
- b6 += b1;
- b1 = rol64(b1, 8) ^ b6;
-
- b0 += b7;
- b7 = rol64(b7, 35) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 56) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 22) ^ b4;
-
- output[0] = b0 + k0;
- output[1] = b1 + k1;
- output[2] = b2 + k2;
- output[3] = b3 + k3;
- output[4] = b4 + k4;
- output[5] = b5 + k5 + t0;
- output[6] = b6 + k6 + t1;
- output[7] = b7 + k7 + 18;
-}
-
-void threefish_decrypt_512(struct threefish_key *key_ctx, u64 *input,
- u64 *output)
-{
- u64 b0 = input[0], b1 = input[1],
- b2 = input[2], b3 = input[3],
- b4 = input[4], b5 = input[5],
- b6 = input[6], b7 = input[7];
- u64 k0 = key_ctx->key[0], k1 = key_ctx->key[1],
- k2 = key_ctx->key[2], k3 = key_ctx->key[3],
- k4 = key_ctx->key[4], k5 = key_ctx->key[5],
- k6 = key_ctx->key[6], k7 = key_ctx->key[7],
- k8 = key_ctx->key[8];
- u64 t0 = key_ctx->tweak[0], t1 = key_ctx->tweak[1],
- t2 = key_ctx->tweak[2];
-
- u64 tmp;
-
- b0 -= k0;
- b1 -= k1;
- b2 -= k2;
- b3 -= k3;
- b4 -= k4;
- b5 -= k5 + t0;
- b6 -= k6 + t1;
- b7 -= k7 + 18;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 22);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 56);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 35);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 8);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 43);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 39);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 29);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 25);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 17);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 10);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 50);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 13);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 24);
- b6 -= b7 + k5 + t0;
- b7 -= k6 + 17;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 34);
- b4 -= b5 + k3;
- b5 -= k4 + t2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 30);
- b2 -= b3 + k1;
- b3 -= k2;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 39);
- b0 -= b1 + k8;
- b1 -= k0;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 56);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 54);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 9);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 44);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 39);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 36);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 49);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 17);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 42);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 14);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 27);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 33);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 37);
- b6 -= b7 + k4 + t2;
- b7 -= k5 + 16;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 19);
- b4 -= b5 + k2;
- b5 -= k3 + t1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 36);
- b2 -= b3 + k0;
- b3 -= k1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 46);
- b0 -= b1 + k7;
- b1 -= k8;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 22);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 56);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 35);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 8);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 43);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 39);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 29);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 25);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 17);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 10);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 50);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 13);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 24);
- b6 -= b7 + k3 + t1;
- b7 -= k4 + 15;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 34);
- b4 -= b5 + k1;
- b5 -= k2 + t0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 30);
- b2 -= b3 + k8;
- b3 -= k0;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 39);
- b0 -= b1 + k6;
- b1 -= k7;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 56);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 54);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 9);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 44);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 39);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 36);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 49);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 17);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 42);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 14);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 27);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 33);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 37);
- b6 -= b7 + k2 + t0;
- b7 -= k3 + 14;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 19);
- b4 -= b5 + k0;
- b5 -= k1 + t2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 36);
- b2 -= b3 + k7;
- b3 -= k8;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 46);
- b0 -= b1 + k5;
- b1 -= k6;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 22);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 56);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 35);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 8);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 43);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 39);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 29);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 25);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 17);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 10);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 50);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 13);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 24);
- b6 -= b7 + k1 + t2;
- b7 -= k2 + 13;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 34);
- b4 -= b5 + k8;
- b5 -= k0 + t1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 30);
- b2 -= b3 + k6;
- b3 -= k7;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 39);
- b0 -= b1 + k4;
- b1 -= k5;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 56);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 54);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 9);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 44);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 39);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 36);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 49);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 17);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 42);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 14);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 27);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 33);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 37);
- b6 -= b7 + k0 + t1;
- b7 -= k1 + 12;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 19);
- b4 -= b5 + k7;
- b5 -= k8 + t0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 36);
- b2 -= b3 + k5;
- b3 -= k6;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 46);
- b0 -= b1 + k3;
- b1 -= k4;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 22);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 56);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 35);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 8);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 43);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 39);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 29);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 25);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 17);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 10);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 50);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 13);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 24);
- b6 -= b7 + k8 + t0;
- b7 -= k0 + 11;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 34);
- b4 -= b5 + k6;
- b5 -= k7 + t2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 30);
- b2 -= b3 + k4;
- b3 -= k5;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 39);
- b0 -= b1 + k2;
- b1 -= k3;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 56);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 54);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 9);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 44);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 39);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 36);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 49);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 17);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 42);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 14);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 27);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 33);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 37);
- b6 -= b7 + k7 + t2;
- b7 -= k8 + 10;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 19);
- b4 -= b5 + k5;
- b5 -= k6 + t1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 36);
- b2 -= b3 + k3;
- b3 -= k4;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 46);
- b0 -= b1 + k1;
- b1 -= k2;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 22);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 56);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 35);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 8);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 43);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 39);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 29);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 25);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 17);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 10);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 50);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 13);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 24);
- b6 -= b7 + k6 + t1;
- b7 -= k7 + 9;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 34);
- b4 -= b5 + k4;
- b5 -= k5 + t0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 30);
- b2 -= b3 + k2;
- b3 -= k3;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 39);
- b0 -= b1 + k0;
- b1 -= k1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 56);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 54);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 9);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 44);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 39);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 36);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 49);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 17);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 42);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 14);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 27);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 33);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 37);
- b6 -= b7 + k5 + t0;
- b7 -= k6 + 8;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 19);
- b4 -= b5 + k3;
- b5 -= k4 + t2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 36);
- b2 -= b3 + k1;
- b3 -= k2;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 46);
- b0 -= b1 + k8;
- b1 -= k0;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 22);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 56);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 35);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 8);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 43);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 39);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 29);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 25);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 17);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 10);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 50);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 13);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 24);
- b6 -= b7 + k4 + t2;
- b7 -= k5 + 7;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 34);
- b4 -= b5 + k2;
- b5 -= k3 + t1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 30);
- b2 -= b3 + k0;
- b3 -= k1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 39);
- b0 -= b1 + k7;
- b1 -= k8;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 56);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 54);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 9);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 44);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 39);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 36);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 49);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 17);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 42);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 14);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 27);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 33);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 37);
- b6 -= b7 + k3 + t1;
- b7 -= k4 + 6;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 19);
- b4 -= b5 + k1;
- b5 -= k2 + t0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 36);
- b2 -= b3 + k8;
- b3 -= k0;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 46);
- b0 -= b1 + k6;
- b1 -= k7;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 22);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 56);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 35);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 8);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 43);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 39);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 29);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 25);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 17);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 10);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 50);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 13);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 24);
- b6 -= b7 + k2 + t0;
- b7 -= k3 + 5;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 34);
- b4 -= b5 + k0;
- b5 -= k1 + t2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 30);
- b2 -= b3 + k7;
- b3 -= k8;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 39);
- b0 -= b1 + k5;
- b1 -= k6;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 56);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 54);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 9);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 44);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 39);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 36);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 49);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 17);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 42);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 14);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 27);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 33);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 37);
- b6 -= b7 + k1 + t2;
- b7 -= k2 + 4;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 19);
- b4 -= b5 + k8;
- b5 -= k0 + t1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 36);
- b2 -= b3 + k6;
- b3 -= k7;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 46);
- b0 -= b1 + k4;
- b1 -= k5;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 22);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 56);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 35);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 8);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 43);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 39);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 29);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 25);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 17);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 10);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 50);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 13);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 24);
- b6 -= b7 + k0 + t1;
- b7 -= k1 + 3;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 34);
- b4 -= b5 + k7;
- b5 -= k8 + t0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 30);
- b2 -= b3 + k5;
- b3 -= k6;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 39);
- b0 -= b1 + k3;
- b1 -= k4;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 56);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 54);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 9);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 44);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 39);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 36);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 49);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 17);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 42);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 14);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 27);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 33);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 37);
- b6 -= b7 + k8 + t0;
- b7 -= k0 + 2;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 19);
- b4 -= b5 + k6;
- b5 -= k7 + t2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 36);
- b2 -= b3 + k4;
- b3 -= k5;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 46);
- b0 -= b1 + k2;
- b1 -= k3;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 22);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 56);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 35);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 8);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 43);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 39);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 29);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 25);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 17);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 10);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 50);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 13);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 24);
- b6 -= b7 + k7 + t2;
- b7 -= k8 + 1;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 34);
- b4 -= b5 + k5;
- b5 -= k6 + t1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 30);
- b2 -= b3 + k3;
- b3 -= k4;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 39);
- b0 -= b1 + k1;
- b1 -= k2;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 56);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 54);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 9);
- b0 -= b7;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 44);
- b6 -= b1;
-
- tmp = b7 ^ b2;
- b7 = ror64(tmp, 39);
- b2 -= b7;
-
- tmp = b5 ^ b0;
- b5 = ror64(tmp, 36);
- b0 -= b5;
-
- tmp = b3 ^ b6;
- b3 = ror64(tmp, 49);
- b6 -= b3;
-
- tmp = b1 ^ b4;
- b1 = ror64(tmp, 17);
- b4 -= b1;
-
- tmp = b3 ^ b0;
- b3 = ror64(tmp, 42);
- b0 -= b3;
-
- tmp = b5 ^ b6;
- b5 = ror64(tmp, 14);
- b6 -= b5;
-
- tmp = b7 ^ b4;
- b7 = ror64(tmp, 27);
- b4 -= b7;
-
- tmp = b1 ^ b2;
- b1 = ror64(tmp, 33);
- b2 -= b1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 37);
- b6 -= b7 + k6 + t1;
- b7 -= k7;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 19);
- b4 -= b5 + k4;
- b5 -= k5 + t0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 36);
- b2 -= b3 + k2;
- b3 -= k3;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 46);
- b0 -= b1 + k0;
- b1 -= k1;
-
- output[0] = b0;
- output[1] = b1;
- output[2] = b2;
- output[3] = b3;
-
- output[7] = b7;
- output[6] = b6;
- output[5] = b5;
- output[4] = b4;
-}
-
-void threefish_encrypt_1024(struct threefish_key *key_ctx, u64 *input,
- u64 *output)
-{
- u64 b0 = input[0], b1 = input[1],
- b2 = input[2], b3 = input[3],
- b4 = input[4], b5 = input[5],
- b6 = input[6], b7 = input[7],
- b8 = input[8], b9 = input[9],
- b10 = input[10], b11 = input[11],
- b12 = input[12], b13 = input[13],
- b14 = input[14], b15 = input[15];
- u64 k0 = key_ctx->key[0], k1 = key_ctx->key[1],
- k2 = key_ctx->key[2], k3 = key_ctx->key[3],
- k4 = key_ctx->key[4], k5 = key_ctx->key[5],
- k6 = key_ctx->key[6], k7 = key_ctx->key[7],
- k8 = key_ctx->key[8], k9 = key_ctx->key[9],
- k10 = key_ctx->key[10], k11 = key_ctx->key[11],
- k12 = key_ctx->key[12], k13 = key_ctx->key[13],
- k14 = key_ctx->key[14], k15 = key_ctx->key[15],
- k16 = key_ctx->key[16];
- u64 t0 = key_ctx->tweak[0], t1 = key_ctx->tweak[1],
- t2 = key_ctx->tweak[2];
-
- b1 += k1;
- b0 += b1 + k0;
- b1 = rol64(b1, 24) ^ b0;
-
- b3 += k3;
- b2 += b3 + k2;
- b3 = rol64(b3, 13) ^ b2;
-
- b5 += k5;
- b4 += b5 + k4;
- b5 = rol64(b5, 8) ^ b4;
-
- b7 += k7;
- b6 += b7 + k6;
- b7 = rol64(b7, 47) ^ b6;
-
- b9 += k9;
- b8 += b9 + k8;
- b9 = rol64(b9, 8) ^ b8;
-
- b11 += k11;
- b10 += b11 + k10;
- b11 = rol64(b11, 17) ^ b10;
-
- b13 += k13 + t0;
- b12 += b13 + k12;
- b13 = rol64(b13, 22) ^ b12;
-
- b15 += k15;
- b14 += b15 + k14 + t1;
- b15 = rol64(b15, 37) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 38) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 19) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 10) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 55) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 49) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 18) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 23) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 52) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 33) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 4) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 51) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 13) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 34) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 41) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 59) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 17) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 5) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 20) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 48) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 41) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 47) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 28) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 16) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 25) ^ b12;
-
- b1 += k2;
- b0 += b1 + k1;
- b1 = rol64(b1, 41) ^ b0;
-
- b3 += k4;
- b2 += b3 + k3;
- b3 = rol64(b3, 9) ^ b2;
-
- b5 += k6;
- b4 += b5 + k5;
- b5 = rol64(b5, 37) ^ b4;
-
- b7 += k8;
- b6 += b7 + k7;
- b7 = rol64(b7, 31) ^ b6;
-
- b9 += k10;
- b8 += b9 + k9;
- b9 = rol64(b9, 12) ^ b8;
-
- b11 += k12;
- b10 += b11 + k11;
- b11 = rol64(b11, 47) ^ b10;
-
- b13 += k14 + t1;
- b12 += b13 + k13;
- b13 = rol64(b13, 44) ^ b12;
-
- b15 += k16 + 1;
- b14 += b15 + k15 + t2;
- b15 = rol64(b15, 30) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 16) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 34) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 56) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 51) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 4) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 53) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 42) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 41) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 31) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 44) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 47) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 46) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 19) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 42) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 44) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 25) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 9) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 48) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 35) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 52) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 23) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 31) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 37) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 20) ^ b12;
-
- b1 += k3;
- b0 += b1 + k2;
- b1 = rol64(b1, 24) ^ b0;
-
- b3 += k5;
- b2 += b3 + k4;
- b3 = rol64(b3, 13) ^ b2;
-
- b5 += k7;
- b4 += b5 + k6;
- b5 = rol64(b5, 8) ^ b4;
-
- b7 += k9;
- b6 += b7 + k8;
- b7 = rol64(b7, 47) ^ b6;
-
- b9 += k11;
- b8 += b9 + k10;
- b9 = rol64(b9, 8) ^ b8;
-
- b11 += k13;
- b10 += b11 + k12;
- b11 = rol64(b11, 17) ^ b10;
-
- b13 += k15 + t2;
- b12 += b13 + k14;
- b13 = rol64(b13, 22) ^ b12;
-
- b15 += k0 + 2;
- b14 += b15 + k16 + t0;
- b15 = rol64(b15, 37) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 38) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 19) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 10) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 55) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 49) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 18) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 23) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 52) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 33) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 4) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 51) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 13) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 34) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 41) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 59) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 17) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 5) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 20) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 48) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 41) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 47) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 28) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 16) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 25) ^ b12;
-
- b1 += k4;
- b0 += b1 + k3;
- b1 = rol64(b1, 41) ^ b0;
-
- b3 += k6;
- b2 += b3 + k5;
- b3 = rol64(b3, 9) ^ b2;
-
- b5 += k8;
- b4 += b5 + k7;
- b5 = rol64(b5, 37) ^ b4;
-
- b7 += k10;
- b6 += b7 + k9;
- b7 = rol64(b7, 31) ^ b6;
-
- b9 += k12;
- b8 += b9 + k11;
- b9 = rol64(b9, 12) ^ b8;
-
- b11 += k14;
- b10 += b11 + k13;
- b11 = rol64(b11, 47) ^ b10;
-
- b13 += k16 + t0;
- b12 += b13 + k15;
- b13 = rol64(b13, 44) ^ b12;
-
- b15 += k1 + 3;
- b14 += b15 + k0 + t1;
- b15 = rol64(b15, 30) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 16) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 34) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 56) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 51) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 4) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 53) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 42) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 41) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 31) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 44) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 47) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 46) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 19) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 42) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 44) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 25) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 9) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 48) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 35) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 52) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 23) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 31) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 37) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 20) ^ b12;
-
- b1 += k5;
- b0 += b1 + k4;
- b1 = rol64(b1, 24) ^ b0;
-
- b3 += k7;
- b2 += b3 + k6;
- b3 = rol64(b3, 13) ^ b2;
-
- b5 += k9;
- b4 += b5 + k8;
- b5 = rol64(b5, 8) ^ b4;
-
- b7 += k11;
- b6 += b7 + k10;
- b7 = rol64(b7, 47) ^ b6;
-
- b9 += k13;
- b8 += b9 + k12;
- b9 = rol64(b9, 8) ^ b8;
-
- b11 += k15;
- b10 += b11 + k14;
- b11 = rol64(b11, 17) ^ b10;
-
- b13 += k0 + t1;
- b12 += b13 + k16;
- b13 = rol64(b13, 22) ^ b12;
-
- b15 += k2 + 4;
- b14 += b15 + k1 + t2;
- b15 = rol64(b15, 37) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 38) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 19) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 10) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 55) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 49) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 18) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 23) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 52) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 33) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 4) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 51) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 13) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 34) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 41) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 59) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 17) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 5) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 20) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 48) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 41) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 47) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 28) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 16) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 25) ^ b12;
-
- b1 += k6;
- b0 += b1 + k5;
- b1 = rol64(b1, 41) ^ b0;
-
- b3 += k8;
- b2 += b3 + k7;
- b3 = rol64(b3, 9) ^ b2;
-
- b5 += k10;
- b4 += b5 + k9;
- b5 = rol64(b5, 37) ^ b4;
-
- b7 += k12;
- b6 += b7 + k11;
- b7 = rol64(b7, 31) ^ b6;
-
- b9 += k14;
- b8 += b9 + k13;
- b9 = rol64(b9, 12) ^ b8;
-
- b11 += k16;
- b10 += b11 + k15;
- b11 = rol64(b11, 47) ^ b10;
-
- b13 += k1 + t2;
- b12 += b13 + k0;
- b13 = rol64(b13, 44) ^ b12;
-
- b15 += k3 + 5;
- b14 += b15 + k2 + t0;
- b15 = rol64(b15, 30) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 16) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 34) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 56) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 51) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 4) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 53) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 42) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 41) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 31) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 44) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 47) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 46) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 19) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 42) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 44) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 25) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 9) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 48) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 35) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 52) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 23) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 31) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 37) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 20) ^ b12;
-
- b1 += k7;
- b0 += b1 + k6;
- b1 = rol64(b1, 24) ^ b0;
-
- b3 += k9;
- b2 += b3 + k8;
- b3 = rol64(b3, 13) ^ b2;
-
- b5 += k11;
- b4 += b5 + k10;
- b5 = rol64(b5, 8) ^ b4;
-
- b7 += k13;
- b6 += b7 + k12;
- b7 = rol64(b7, 47) ^ b6;
-
- b9 += k15;
- b8 += b9 + k14;
- b9 = rol64(b9, 8) ^ b8;
-
- b11 += k0;
- b10 += b11 + k16;
- b11 = rol64(b11, 17) ^ b10;
-
- b13 += k2 + t0;
- b12 += b13 + k1;
- b13 = rol64(b13, 22) ^ b12;
-
- b15 += k4 + 6;
- b14 += b15 + k3 + t1;
- b15 = rol64(b15, 37) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 38) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 19) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 10) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 55) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 49) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 18) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 23) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 52) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 33) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 4) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 51) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 13) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 34) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 41) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 59) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 17) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 5) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 20) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 48) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 41) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 47) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 28) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 16) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 25) ^ b12;
-
- b1 += k8;
- b0 += b1 + k7;
- b1 = rol64(b1, 41) ^ b0;
-
- b3 += k10;
- b2 += b3 + k9;
- b3 = rol64(b3, 9) ^ b2;
-
- b5 += k12;
- b4 += b5 + k11;
- b5 = rol64(b5, 37) ^ b4;
-
- b7 += k14;
- b6 += b7 + k13;
- b7 = rol64(b7, 31) ^ b6;
-
- b9 += k16;
- b8 += b9 + k15;
- b9 = rol64(b9, 12) ^ b8;
-
- b11 += k1;
- b10 += b11 + k0;
- b11 = rol64(b11, 47) ^ b10;
-
- b13 += k3 + t1;
- b12 += b13 + k2;
- b13 = rol64(b13, 44) ^ b12;
-
- b15 += k5 + 7;
- b14 += b15 + k4 + t2;
- b15 = rol64(b15, 30) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 16) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 34) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 56) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 51) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 4) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 53) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 42) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 41) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 31) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 44) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 47) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 46) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 19) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 42) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 44) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 25) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 9) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 48) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 35) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 52) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 23) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 31) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 37) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 20) ^ b12;
-
- b1 += k9;
- b0 += b1 + k8;
- b1 = rol64(b1, 24) ^ b0;
-
- b3 += k11;
- b2 += b3 + k10;
- b3 = rol64(b3, 13) ^ b2;
-
- b5 += k13;
- b4 += b5 + k12;
- b5 = rol64(b5, 8) ^ b4;
-
- b7 += k15;
- b6 += b7 + k14;
- b7 = rol64(b7, 47) ^ b6;
-
- b9 += k0;
- b8 += b9 + k16;
- b9 = rol64(b9, 8) ^ b8;
-
- b11 += k2;
- b10 += b11 + k1;
- b11 = rol64(b11, 17) ^ b10;
-
- b13 += k4 + t2;
- b12 += b13 + k3;
- b13 = rol64(b13, 22) ^ b12;
-
- b15 += k6 + 8;
- b14 += b15 + k5 + t0;
- b15 = rol64(b15, 37) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 38) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 19) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 10) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 55) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 49) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 18) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 23) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 52) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 33) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 4) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 51) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 13) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 34) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 41) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 59) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 17) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 5) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 20) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 48) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 41) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 47) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 28) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 16) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 25) ^ b12;
-
- b1 += k10;
- b0 += b1 + k9;
- b1 = rol64(b1, 41) ^ b0;
-
- b3 += k12;
- b2 += b3 + k11;
- b3 = rol64(b3, 9) ^ b2;
-
- b5 += k14;
- b4 += b5 + k13;
- b5 = rol64(b5, 37) ^ b4;
-
- b7 += k16;
- b6 += b7 + k15;
- b7 = rol64(b7, 31) ^ b6;
-
- b9 += k1;
- b8 += b9 + k0;
- b9 = rol64(b9, 12) ^ b8;
-
- b11 += k3;
- b10 += b11 + k2;
- b11 = rol64(b11, 47) ^ b10;
-
- b13 += k5 + t0;
- b12 += b13 + k4;
- b13 = rol64(b13, 44) ^ b12;
-
- b15 += k7 + 9;
- b14 += b15 + k6 + t1;
- b15 = rol64(b15, 30) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 16) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 34) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 56) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 51) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 4) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 53) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 42) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 41) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 31) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 44) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 47) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 46) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 19) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 42) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 44) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 25) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 9) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 48) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 35) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 52) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 23) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 31) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 37) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 20) ^ b12;
-
- b1 += k11;
- b0 += b1 + k10;
- b1 = rol64(b1, 24) ^ b0;
-
- b3 += k13;
- b2 += b3 + k12;
- b3 = rol64(b3, 13) ^ b2;
-
- b5 += k15;
- b4 += b5 + k14;
- b5 = rol64(b5, 8) ^ b4;
-
- b7 += k0;
- b6 += b7 + k16;
- b7 = rol64(b7, 47) ^ b6;
-
- b9 += k2;
- b8 += b9 + k1;
- b9 = rol64(b9, 8) ^ b8;
-
- b11 += k4;
- b10 += b11 + k3;
- b11 = rol64(b11, 17) ^ b10;
-
- b13 += k6 + t1;
- b12 += b13 + k5;
- b13 = rol64(b13, 22) ^ b12;
-
- b15 += k8 + 10;
- b14 += b15 + k7 + t2;
- b15 = rol64(b15, 37) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 38) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 19) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 10) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 55) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 49) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 18) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 23) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 52) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 33) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 4) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 51) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 13) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 34) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 41) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 59) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 17) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 5) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 20) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 48) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 41) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 47) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 28) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 16) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 25) ^ b12;
-
- b1 += k12;
- b0 += b1 + k11;
- b1 = rol64(b1, 41) ^ b0;
-
- b3 += k14;
- b2 += b3 + k13;
- b3 = rol64(b3, 9) ^ b2;
-
- b5 += k16;
- b4 += b5 + k15;
- b5 = rol64(b5, 37) ^ b4;
-
- b7 += k1;
- b6 += b7 + k0;
- b7 = rol64(b7, 31) ^ b6;
-
- b9 += k3;
- b8 += b9 + k2;
- b9 = rol64(b9, 12) ^ b8;
-
- b11 += k5;
- b10 += b11 + k4;
- b11 = rol64(b11, 47) ^ b10;
-
- b13 += k7 + t2;
- b12 += b13 + k6;
- b13 = rol64(b13, 44) ^ b12;
-
- b15 += k9 + 11;
- b14 += b15 + k8 + t0;
- b15 = rol64(b15, 30) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 16) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 34) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 56) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 51) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 4) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 53) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 42) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 41) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 31) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 44) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 47) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 46) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 19) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 42) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 44) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 25) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 9) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 48) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 35) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 52) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 23) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 31) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 37) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 20) ^ b12;
-
- b1 += k13;
- b0 += b1 + k12;
- b1 = rol64(b1, 24) ^ b0;
-
- b3 += k15;
- b2 += b3 + k14;
- b3 = rol64(b3, 13) ^ b2;
-
- b5 += k0;
- b4 += b5 + k16;
- b5 = rol64(b5, 8) ^ b4;
-
- b7 += k2;
- b6 += b7 + k1;
- b7 = rol64(b7, 47) ^ b6;
-
- b9 += k4;
- b8 += b9 + k3;
- b9 = rol64(b9, 8) ^ b8;
-
- b11 += k6;
- b10 += b11 + k5;
- b11 = rol64(b11, 17) ^ b10;
-
- b13 += k8 + t0;
- b12 += b13 + k7;
- b13 = rol64(b13, 22) ^ b12;
-
- b15 += k10 + 12;
- b14 += b15 + k9 + t1;
- b15 = rol64(b15, 37) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 38) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 19) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 10) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 55) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 49) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 18) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 23) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 52) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 33) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 4) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 51) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 13) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 34) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 41) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 59) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 17) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 5) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 20) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 48) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 41) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 47) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 28) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 16) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 25) ^ b12;
-
- b1 += k14;
- b0 += b1 + k13;
- b1 = rol64(b1, 41) ^ b0;
-
- b3 += k16;
- b2 += b3 + k15;
- b3 = rol64(b3, 9) ^ b2;
-
- b5 += k1;
- b4 += b5 + k0;
- b5 = rol64(b5, 37) ^ b4;
-
- b7 += k3;
- b6 += b7 + k2;
- b7 = rol64(b7, 31) ^ b6;
-
- b9 += k5;
- b8 += b9 + k4;
- b9 = rol64(b9, 12) ^ b8;
-
- b11 += k7;
- b10 += b11 + k6;
- b11 = rol64(b11, 47) ^ b10;
-
- b13 += k9 + t1;
- b12 += b13 + k8;
- b13 = rol64(b13, 44) ^ b12;
-
- b15 += k11 + 13;
- b14 += b15 + k10 + t2;
- b15 = rol64(b15, 30) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 16) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 34) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 56) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 51) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 4) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 53) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 42) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 41) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 31) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 44) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 47) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 46) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 19) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 42) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 44) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 25) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 9) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 48) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 35) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 52) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 23) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 31) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 37) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 20) ^ b12;
-
- b1 += k15;
- b0 += b1 + k14;
- b1 = rol64(b1, 24) ^ b0;
-
- b3 += k0;
- b2 += b3 + k16;
- b3 = rol64(b3, 13) ^ b2;
-
- b5 += k2;
- b4 += b5 + k1;
- b5 = rol64(b5, 8) ^ b4;
-
- b7 += k4;
- b6 += b7 + k3;
- b7 = rol64(b7, 47) ^ b6;
-
- b9 += k6;
- b8 += b9 + k5;
- b9 = rol64(b9, 8) ^ b8;
-
- b11 += k8;
- b10 += b11 + k7;
- b11 = rol64(b11, 17) ^ b10;
-
- b13 += k10 + t2;
- b12 += b13 + k9;
- b13 = rol64(b13, 22) ^ b12;
-
- b15 += k12 + 14;
- b14 += b15 + k11 + t0;
- b15 = rol64(b15, 37) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 38) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 19) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 10) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 55) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 49) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 18) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 23) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 52) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 33) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 4) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 51) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 13) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 34) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 41) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 59) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 17) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 5) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 20) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 48) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 41) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 47) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 28) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 16) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 25) ^ b12;
-
- b1 += k16;
- b0 += b1 + k15;
- b1 = rol64(b1, 41) ^ b0;
-
- b3 += k1;
- b2 += b3 + k0;
- b3 = rol64(b3, 9) ^ b2;
-
- b5 += k3;
- b4 += b5 + k2;
- b5 = rol64(b5, 37) ^ b4;
-
- b7 += k5;
- b6 += b7 + k4;
- b7 = rol64(b7, 31) ^ b6;
-
- b9 += k7;
- b8 += b9 + k6;
- b9 = rol64(b9, 12) ^ b8;
-
- b11 += k9;
- b10 += b11 + k8;
- b11 = rol64(b11, 47) ^ b10;
-
- b13 += k11 + t0;
- b12 += b13 + k10;
- b13 = rol64(b13, 44) ^ b12;
-
- b15 += k13 + 15;
- b14 += b15 + k12 + t1;
- b15 = rol64(b15, 30) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 16) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 34) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 56) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 51) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 4) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 53) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 42) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 41) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 31) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 44) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 47) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 46) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 19) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 42) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 44) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 25) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 9) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 48) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 35) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 52) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 23) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 31) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 37) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 20) ^ b12;
-
- b1 += k0;
- b0 += b1 + k16;
- b1 = rol64(b1, 24) ^ b0;
-
- b3 += k2;
- b2 += b3 + k1;
- b3 = rol64(b3, 13) ^ b2;
-
- b5 += k4;
- b4 += b5 + k3;
- b5 = rol64(b5, 8) ^ b4;
-
- b7 += k6;
- b6 += b7 + k5;
- b7 = rol64(b7, 47) ^ b6;
-
- b9 += k8;
- b8 += b9 + k7;
- b9 = rol64(b9, 8) ^ b8;
-
- b11 += k10;
- b10 += b11 + k9;
- b11 = rol64(b11, 17) ^ b10;
-
- b13 += k12 + t1;
- b12 += b13 + k11;
- b13 = rol64(b13, 22) ^ b12;
-
- b15 += k14 + 16;
- b14 += b15 + k13 + t2;
- b15 = rol64(b15, 37) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 38) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 19) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 10) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 55) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 49) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 18) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 23) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 52) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 33) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 4) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 51) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 13) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 34) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 41) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 59) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 17) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 5) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 20) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 48) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 41) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 47) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 28) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 16) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 25) ^ b12;
-
- b1 += k1;
- b0 += b1 + k0;
- b1 = rol64(b1, 41) ^ b0;
-
- b3 += k3;
- b2 += b3 + k2;
- b3 = rol64(b3, 9) ^ b2;
-
- b5 += k5;
- b4 += b5 + k4;
- b5 = rol64(b5, 37) ^ b4;
-
- b7 += k7;
- b6 += b7 + k6;
- b7 = rol64(b7, 31) ^ b6;
-
- b9 += k9;
- b8 += b9 + k8;
- b9 = rol64(b9, 12) ^ b8;
-
- b11 += k11;
- b10 += b11 + k10;
- b11 = rol64(b11, 47) ^ b10;
-
- b13 += k13 + t2;
- b12 += b13 + k12;
- b13 = rol64(b13, 44) ^ b12;
-
- b15 += k15 + 17;
- b14 += b15 + k14 + t0;
- b15 = rol64(b15, 30) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 16) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 34) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 56) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 51) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 4) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 53) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 42) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 41) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 31) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 44) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 47) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 46) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 19) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 42) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 44) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 25) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 9) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 48) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 35) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 52) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 23) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 31) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 37) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 20) ^ b12;
-
- b1 += k2;
- b0 += b1 + k1;
- b1 = rol64(b1, 24) ^ b0;
-
- b3 += k4;
- b2 += b3 + k3;
- b3 = rol64(b3, 13) ^ b2;
-
- b5 += k6;
- b4 += b5 + k5;
- b5 = rol64(b5, 8) ^ b4;
-
- b7 += k8;
- b6 += b7 + k7;
- b7 = rol64(b7, 47) ^ b6;
-
- b9 += k10;
- b8 += b9 + k9;
- b9 = rol64(b9, 8) ^ b8;
-
- b11 += k12;
- b10 += b11 + k11;
- b11 = rol64(b11, 17) ^ b10;
-
- b13 += k14 + t0;
- b12 += b13 + k13;
- b13 = rol64(b13, 22) ^ b12;
-
- b15 += k16 + 18;
- b14 += b15 + k15 + t1;
- b15 = rol64(b15, 37) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 38) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 19) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 10) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 55) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 49) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 18) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 23) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 52) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 33) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 4) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 51) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 13) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 34) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 41) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 59) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 17) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 5) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 20) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 48) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 41) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 47) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 28) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 16) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 25) ^ b12;
-
- b1 += k3;
- b0 += b1 + k2;
- b1 = rol64(b1, 41) ^ b0;
-
- b3 += k5;
- b2 += b3 + k4;
- b3 = rol64(b3, 9) ^ b2;
-
- b5 += k7;
- b4 += b5 + k6;
- b5 = rol64(b5, 37) ^ b4;
-
- b7 += k9;
- b6 += b7 + k8;
- b7 = rol64(b7, 31) ^ b6;
-
- b9 += k11;
- b8 += b9 + k10;
- b9 = rol64(b9, 12) ^ b8;
-
- b11 += k13;
- b10 += b11 + k12;
- b11 = rol64(b11, 47) ^ b10;
-
- b13 += k15 + t1;
- b12 += b13 + k14;
- b13 = rol64(b13, 44) ^ b12;
-
- b15 += k0 + 19;
- b14 += b15 + k16 + t2;
- b15 = rol64(b15, 30) ^ b14;
-
- b0 += b9;
- b9 = rol64(b9, 16) ^ b0;
-
- b2 += b13;
- b13 = rol64(b13, 34) ^ b2;
-
- b6 += b11;
- b11 = rol64(b11, 56) ^ b6;
-
- b4 += b15;
- b15 = rol64(b15, 51) ^ b4;
-
- b10 += b7;
- b7 = rol64(b7, 4) ^ b10;
-
- b12 += b3;
- b3 = rol64(b3, 53) ^ b12;
-
- b14 += b5;
- b5 = rol64(b5, 42) ^ b14;
-
- b8 += b1;
- b1 = rol64(b1, 41) ^ b8;
-
- b0 += b7;
- b7 = rol64(b7, 31) ^ b0;
-
- b2 += b5;
- b5 = rol64(b5, 44) ^ b2;
-
- b4 += b3;
- b3 = rol64(b3, 47) ^ b4;
-
- b6 += b1;
- b1 = rol64(b1, 46) ^ b6;
-
- b12 += b15;
- b15 = rol64(b15, 19) ^ b12;
-
- b14 += b13;
- b13 = rol64(b13, 42) ^ b14;
-
- b8 += b11;
- b11 = rol64(b11, 44) ^ b8;
-
- b10 += b9;
- b9 = rol64(b9, 25) ^ b10;
-
- b0 += b15;
- b15 = rol64(b15, 9) ^ b0;
-
- b2 += b11;
- b11 = rol64(b11, 48) ^ b2;
-
- b6 += b13;
- b13 = rol64(b13, 35) ^ b6;
-
- b4 += b9;
- b9 = rol64(b9, 52) ^ b4;
-
- b14 += b1;
- b1 = rol64(b1, 23) ^ b14;
-
- b8 += b5;
- b5 = rol64(b5, 31) ^ b8;
-
- b10 += b3;
- b3 = rol64(b3, 37) ^ b10;
-
- b12 += b7;
- b7 = rol64(b7, 20) ^ b12;
-
- output[0] = b0 + k3;
- output[1] = b1 + k4;
- output[2] = b2 + k5;
- output[3] = b3 + k6;
- output[4] = b4 + k7;
- output[5] = b5 + k8;
- output[6] = b6 + k9;
- output[7] = b7 + k10;
- output[8] = b8 + k11;
- output[9] = b9 + k12;
- output[10] = b10 + k13;
- output[11] = b11 + k14;
- output[12] = b12 + k15;
- output[13] = b13 + k16 + t2;
- output[14] = b14 + k0 + t0;
- output[15] = b15 + k1 + 20;
-}
-
-void threefish_decrypt_1024(struct threefish_key *key_ctx, u64 *input,
- u64 *output)
-{
- u64 b0 = input[0], b1 = input[1],
- b2 = input[2], b3 = input[3],
- b4 = input[4], b5 = input[5],
- b6 = input[6], b7 = input[7],
- b8 = input[8], b9 = input[9],
- b10 = input[10], b11 = input[11],
- b12 = input[12], b13 = input[13],
- b14 = input[14], b15 = input[15];
- u64 k0 = key_ctx->key[0], k1 = key_ctx->key[1],
- k2 = key_ctx->key[2], k3 = key_ctx->key[3],
- k4 = key_ctx->key[4], k5 = key_ctx->key[5],
- k6 = key_ctx->key[6], k7 = key_ctx->key[7],
- k8 = key_ctx->key[8], k9 = key_ctx->key[9],
- k10 = key_ctx->key[10], k11 = key_ctx->key[11],
- k12 = key_ctx->key[12], k13 = key_ctx->key[13],
- k14 = key_ctx->key[14], k15 = key_ctx->key[15],
- k16 = key_ctx->key[16];
- u64 t0 = key_ctx->tweak[0], t1 = key_ctx->tweak[1],
- t2 = key_ctx->tweak[2];
- u64 tmp;
-
- b0 -= k3;
- b1 -= k4;
- b2 -= k5;
- b3 -= k6;
- b4 -= k7;
- b5 -= k8;
- b6 -= k9;
- b7 -= k10;
- b8 -= k11;
- b9 -= k12;
- b10 -= k13;
- b11 -= k14;
- b12 -= k15;
- b13 -= k16 + t2;
- b14 -= k0 + t0;
- b15 -= k1 + 20;
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 20);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 37);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 31);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 23);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 52);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 35);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 48);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 9);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 25);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 44);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 42);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 19);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 46);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 47);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 44);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 31);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 41);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 42);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 53);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 4);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 51);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 56);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 34);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 16);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 30);
- b14 -= b15 + k16 + t2;
- b15 -= k0 + 19;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 44);
- b12 -= b13 + k14;
- b13 -= k15 + t1;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 47);
- b10 -= b11 + k12;
- b11 -= k13;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 12);
- b8 -= b9 + k10;
- b9 -= k11;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 31);
- b6 -= b7 + k8;
- b7 -= k9;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 37);
- b4 -= b5 + k6;
- b5 -= k7;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 9);
- b2 -= b3 + k4;
- b3 -= k5;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 41);
- b0 -= b1 + k2;
- b1 -= k3;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 25);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 16);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 28);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 47);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 41);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 48);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 20);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 5);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 17);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 59);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 41);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 34);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 13);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 51);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 4);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 33);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 52);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 23);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 18);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 49);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 55);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 10);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 19);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 38);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 37);
- b14 -= b15 + k15 + t1;
- b15 -= k16 + 18;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 22);
- b12 -= b13 + k13;
- b13 -= k14 + t0;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 17);
- b10 -= b11 + k11;
- b11 -= k12;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 8);
- b8 -= b9 + k9;
- b9 -= k10;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 47);
- b6 -= b7 + k7;
- b7 -= k8;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 8);
- b4 -= b5 + k5;
- b5 -= k6;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 13);
- b2 -= b3 + k3;
- b3 -= k4;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 24);
- b0 -= b1 + k1;
- b1 -= k2;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 20);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 37);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 31);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 23);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 52);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 35);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 48);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 9);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 25);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 44);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 42);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 19);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 46);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 47);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 44);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 31);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 41);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 42);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 53);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 4);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 51);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 56);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 34);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 16);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 30);
- b14 -= b15 + k14 + t0;
- b15 -= k15 + 17;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 44);
- b12 -= b13 + k12;
- b13 -= k13 + t2;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 47);
- b10 -= b11 + k10;
- b11 -= k11;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 12);
- b8 -= b9 + k8;
- b9 -= k9;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 31);
- b6 -= b7 + k6;
- b7 -= k7;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 37);
- b4 -= b5 + k4;
- b5 -= k5;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 9);
- b2 -= b3 + k2;
- b3 -= k3;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 41);
- b0 -= b1 + k0;
- b1 -= k1;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 25);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 16);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 28);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 47);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 41);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 48);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 20);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 5);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 17);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 59);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 41);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 34);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 13);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 51);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 4);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 33);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 52);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 23);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 18);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 49);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 55);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 10);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 19);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 38);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 37);
- b14 -= b15 + k13 + t2;
- b15 -= k14 + 16;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 22);
- b12 -= b13 + k11;
- b13 -= k12 + t1;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 17);
- b10 -= b11 + k9;
- b11 -= k10;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 8);
- b8 -= b9 + k7;
- b9 -= k8;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 47);
- b6 -= b7 + k5;
- b7 -= k6;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 8);
- b4 -= b5 + k3;
- b5 -= k4;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 13);
- b2 -= b3 + k1;
- b3 -= k2;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 24);
- b0 -= b1 + k16;
- b1 -= k0;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 20);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 37);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 31);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 23);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 52);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 35);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 48);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 9);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 25);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 44);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 42);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 19);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 46);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 47);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 44);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 31);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 41);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 42);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 53);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 4);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 51);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 56);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 34);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 16);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 30);
- b14 -= b15 + k12 + t1;
- b15 -= k13 + 15;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 44);
- b12 -= b13 + k10;
- b13 -= k11 + t0;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 47);
- b10 -= b11 + k8;
- b11 -= k9;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 12);
- b8 -= b9 + k6;
- b9 -= k7;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 31);
- b6 -= b7 + k4;
- b7 -= k5;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 37);
- b4 -= b5 + k2;
- b5 -= k3;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 9);
- b2 -= b3 + k0;
- b3 -= k1;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 41);
- b0 -= b1 + k15;
- b1 -= k16;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 25);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 16);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 28);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 47);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 41);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 48);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 20);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 5);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 17);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 59);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 41);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 34);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 13);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 51);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 4);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 33);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 52);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 23);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 18);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 49);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 55);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 10);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 19);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 38);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 37);
- b14 -= b15 + k11 + t0;
- b15 -= k12 + 14;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 22);
- b12 -= b13 + k9;
- b13 -= k10 + t2;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 17);
- b10 -= b11 + k7;
- b11 -= k8;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 8);
- b8 -= b9 + k5;
- b9 -= k6;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 47);
- b6 -= b7 + k3;
- b7 -= k4;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 8);
- b4 -= b5 + k1;
- b5 -= k2;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 13);
- b2 -= b3 + k16;
- b3 -= k0;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 24);
- b0 -= b1 + k14;
- b1 -= k15;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 20);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 37);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 31);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 23);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 52);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 35);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 48);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 9);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 25);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 44);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 42);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 19);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 46);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 47);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 44);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 31);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 41);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 42);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 53);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 4);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 51);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 56);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 34);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 16);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 30);
- b14 -= b15 + k10 + t2;
- b15 -= k11 + 13;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 44);
- b12 -= b13 + k8;
- b13 -= k9 + t1;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 47);
- b10 -= b11 + k6;
- b11 -= k7;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 12);
- b8 -= b9 + k4;
- b9 -= k5;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 31);
- b6 -= b7 + k2;
- b7 -= k3;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 37);
- b4 -= b5 + k0;
- b5 -= k1;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 9);
- b2 -= b3 + k15;
- b3 -= k16;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 41);
- b0 -= b1 + k13;
- b1 -= k14;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 25);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 16);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 28);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 47);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 41);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 48);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 20);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 5);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 17);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 59);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 41);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 34);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 13);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 51);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 4);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 33);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 52);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 23);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 18);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 49);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 55);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 10);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 19);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 38);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 37);
- b14 -= b15 + k9 + t1;
- b15 -= k10 + 12;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 22);
- b12 -= b13 + k7;
- b13 -= k8 + t0;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 17);
- b10 -= b11 + k5;
- b11 -= k6;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 8);
- b8 -= b9 + k3;
- b9 -= k4;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 47);
- b6 -= b7 + k1;
- b7 -= k2;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 8);
- b4 -= b5 + k16;
- b5 -= k0;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 13);
- b2 -= b3 + k14;
- b3 -= k15;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 24);
- b0 -= b1 + k12;
- b1 -= k13;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 20);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 37);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 31);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 23);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 52);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 35);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 48);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 9);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 25);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 44);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 42);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 19);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 46);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 47);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 44);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 31);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 41);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 42);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 53);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 4);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 51);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 56);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 34);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 16);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 30);
- b14 -= b15 + k8 + t0;
- b15 -= k9 + 11;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 44);
- b12 -= b13 + k6;
- b13 -= k7 + t2;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 47);
- b10 -= b11 + k4;
- b11 -= k5;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 12);
- b8 -= b9 + k2;
- b9 -= k3;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 31);
- b6 -= b7 + k0;
- b7 -= k1;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 37);
- b4 -= b5 + k15;
- b5 -= k16;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 9);
- b2 -= b3 + k13;
- b3 -= k14;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 41);
- b0 -= b1 + k11;
- b1 -= k12;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 25);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 16);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 28);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 47);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 41);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 48);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 20);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 5);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 17);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 59);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 41);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 34);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 13);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 51);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 4);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 33);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 52);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 23);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 18);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 49);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 55);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 10);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 19);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 38);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 37);
- b14 -= b15 + k7 + t2;
- b15 -= k8 + 10;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 22);
- b12 -= b13 + k5;
- b13 -= k6 + t1;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 17);
- b10 -= b11 + k3;
- b11 -= k4;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 8);
- b8 -= b9 + k1;
- b9 -= k2;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 47);
- b6 -= b7 + k16;
- b7 -= k0;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 8);
- b4 -= b5 + k14;
- b5 -= k15;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 13);
- b2 -= b3 + k12;
- b3 -= k13;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 24);
- b0 -= b1 + k10;
- b1 -= k11;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 20);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 37);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 31);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 23);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 52);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 35);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 48);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 9);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 25);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 44);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 42);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 19);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 46);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 47);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 44);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 31);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 41);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 42);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 53);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 4);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 51);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 56);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 34);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 16);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 30);
- b14 -= b15 + k6 + t1;
- b15 -= k7 + 9;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 44);
- b12 -= b13 + k4;
- b13 -= k5 + t0;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 47);
- b10 -= b11 + k2;
- b11 -= k3;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 12);
- b8 -= b9 + k0;
- b9 -= k1;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 31);
- b6 -= b7 + k15;
- b7 -= k16;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 37);
- b4 -= b5 + k13;
- b5 -= k14;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 9);
- b2 -= b3 + k11;
- b3 -= k12;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 41);
- b0 -= b1 + k9;
- b1 -= k10;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 25);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 16);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 28);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 47);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 41);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 48);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 20);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 5);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 17);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 59);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 41);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 34);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 13);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 51);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 4);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 33);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 52);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 23);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 18);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 49);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 55);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 10);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 19);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 38);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 37);
- b14 -= b15 + k5 + t0;
- b15 -= k6 + 8;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 22);
- b12 -= b13 + k3;
- b13 -= k4 + t2;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 17);
- b10 -= b11 + k1;
- b11 -= k2;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 8);
- b8 -= b9 + k16;
- b9 -= k0;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 47);
- b6 -= b7 + k14;
- b7 -= k15;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 8);
- b4 -= b5 + k12;
- b5 -= k13;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 13);
- b2 -= b3 + k10;
- b3 -= k11;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 24);
- b0 -= b1 + k8;
- b1 -= k9;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 20);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 37);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 31);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 23);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 52);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 35);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 48);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 9);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 25);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 44);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 42);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 19);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 46);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 47);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 44);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 31);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 41);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 42);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 53);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 4);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 51);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 56);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 34);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 16);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 30);
- b14 -= b15 + k4 + t2;
- b15 -= k5 + 7;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 44);
- b12 -= b13 + k2;
- b13 -= k3 + t1;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 47);
- b10 -= b11 + k0;
- b11 -= k1;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 12);
- b8 -= b9 + k15;
- b9 -= k16;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 31);
- b6 -= b7 + k13;
- b7 -= k14;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 37);
- b4 -= b5 + k11;
- b5 -= k12;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 9);
- b2 -= b3 + k9;
- b3 -= k10;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 41);
- b0 -= b1 + k7;
- b1 -= k8;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 25);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 16);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 28);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 47);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 41);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 48);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 20);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 5);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 17);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 59);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 41);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 34);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 13);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 51);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 4);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 33);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 52);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 23);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 18);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 49);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 55);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 10);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 19);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 38);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 37);
- b14 -= b15 + k3 + t1;
- b15 -= k4 + 6;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 22);
- b12 -= b13 + k1;
- b13 -= k2 + t0;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 17);
- b10 -= b11 + k16;
- b11 -= k0;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 8);
- b8 -= b9 + k14;
- b9 -= k15;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 47);
- b6 -= b7 + k12;
- b7 -= k13;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 8);
- b4 -= b5 + k10;
- b5 -= k11;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 13);
- b2 -= b3 + k8;
- b3 -= k9;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 24);
- b0 -= b1 + k6;
- b1 -= k7;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 20);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 37);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 31);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 23);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 52);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 35);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 48);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 9);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 25);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 44);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 42);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 19);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 46);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 47);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 44);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 31);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 41);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 42);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 53);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 4);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 51);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 56);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 34);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 16);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 30);
- b14 -= b15 + k2 + t0;
- b15 -= k3 + 5;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 44);
- b12 -= b13 + k0;
- b13 -= k1 + t2;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 47);
- b10 -= b11 + k15;
- b11 -= k16;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 12);
- b8 -= b9 + k13;
- b9 -= k14;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 31);
- b6 -= b7 + k11;
- b7 -= k12;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 37);
- b4 -= b5 + k9;
- b5 -= k10;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 9);
- b2 -= b3 + k7;
- b3 -= k8;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 41);
- b0 -= b1 + k5;
- b1 -= k6;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 25);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 16);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 28);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 47);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 41);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 48);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 20);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 5);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 17);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 59);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 41);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 34);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 13);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 51);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 4);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 33);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 52);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 23);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 18);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 49);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 55);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 10);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 19);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 38);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 37);
- b14 -= b15 + k1 + t2;
- b15 -= k2 + 4;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 22);
- b12 -= b13 + k16;
- b13 -= k0 + t1;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 17);
- b10 -= b11 + k14;
- b11 -= k15;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 8);
- b8 -= b9 + k12;
- b9 -= k13;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 47);
- b6 -= b7 + k10;
- b7 -= k11;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 8);
- b4 -= b5 + k8;
- b5 -= k9;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 13);
- b2 -= b3 + k6;
- b3 -= k7;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 24);
- b0 -= b1 + k4;
- b1 -= k5;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 20);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 37);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 31);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 23);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 52);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 35);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 48);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 9);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 25);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 44);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 42);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 19);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 46);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 47);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 44);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 31);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 41);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 42);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 53);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 4);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 51);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 56);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 34);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 16);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 30);
- b14 -= b15 + k0 + t1;
- b15 -= k1 + 3;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 44);
- b12 -= b13 + k15;
- b13 -= k16 + t0;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 47);
- b10 -= b11 + k13;
- b11 -= k14;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 12);
- b8 -= b9 + k11;
- b9 -= k12;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 31);
- b6 -= b7 + k9;
- b7 -= k10;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 37);
- b4 -= b5 + k7;
- b5 -= k8;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 9);
- b2 -= b3 + k5;
- b3 -= k6;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 41);
- b0 -= b1 + k3;
- b1 -= k4;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 25);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 16);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 28);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 47);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 41);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 48);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 20);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 5);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 17);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 59);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 41);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 34);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 13);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 51);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 4);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 33);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 52);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 23);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 18);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 49);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 55);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 10);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 19);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 38);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 37);
- b14 -= b15 + k16 + t0;
- b15 -= k0 + 2;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 22);
- b12 -= b13 + k14;
- b13 -= k15 + t2;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 17);
- b10 -= b11 + k12;
- b11 -= k13;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 8);
- b8 -= b9 + k10;
- b9 -= k11;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 47);
- b6 -= b7 + k8;
- b7 -= k9;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 8);
- b4 -= b5 + k6;
- b5 -= k7;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 13);
- b2 -= b3 + k4;
- b3 -= k5;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 24);
- b0 -= b1 + k2;
- b1 -= k3;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 20);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 37);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 31);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 23);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 52);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 35);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 48);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 9);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 25);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 44);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 42);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 19);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 46);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 47);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 44);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 31);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 41);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 42);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 53);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 4);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 51);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 56);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 34);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 16);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 30);
- b14 -= b15 + k15 + t2;
- b15 -= k16 + 1;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 44);
- b12 -= b13 + k13;
- b13 -= k14 + t1;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 47);
- b10 -= b11 + k11;
- b11 -= k12;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 12);
- b8 -= b9 + k9;
- b9 -= k10;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 31);
- b6 -= b7 + k7;
- b7 -= k8;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 37);
- b4 -= b5 + k5;
- b5 -= k6;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 9);
- b2 -= b3 + k3;
- b3 -= k4;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 41);
- b0 -= b1 + k1;
- b1 -= k2;
-
- tmp = b7 ^ b12;
- b7 = ror64(tmp, 25);
- b12 -= b7;
-
- tmp = b3 ^ b10;
- b3 = ror64(tmp, 16);
- b10 -= b3;
-
- tmp = b5 ^ b8;
- b5 = ror64(tmp, 28);
- b8 -= b5;
-
- tmp = b1 ^ b14;
- b1 = ror64(tmp, 47);
- b14 -= b1;
-
- tmp = b9 ^ b4;
- b9 = ror64(tmp, 41);
- b4 -= b9;
-
- tmp = b13 ^ b6;
- b13 = ror64(tmp, 48);
- b6 -= b13;
-
- tmp = b11 ^ b2;
- b11 = ror64(tmp, 20);
- b2 -= b11;
-
- tmp = b15 ^ b0;
- b15 = ror64(tmp, 5);
- b0 -= b15;
-
- tmp = b9 ^ b10;
- b9 = ror64(tmp, 17);
- b10 -= b9;
-
- tmp = b11 ^ b8;
- b11 = ror64(tmp, 59);
- b8 -= b11;
-
- tmp = b13 ^ b14;
- b13 = ror64(tmp, 41);
- b14 -= b13;
-
- tmp = b15 ^ b12;
- b15 = ror64(tmp, 34);
- b12 -= b15;
-
- tmp = b1 ^ b6;
- b1 = ror64(tmp, 13);
- b6 -= b1;
-
- tmp = b3 ^ b4;
- b3 = ror64(tmp, 51);
- b4 -= b3;
-
- tmp = b5 ^ b2;
- b5 = ror64(tmp, 4);
- b2 -= b5;
-
- tmp = b7 ^ b0;
- b7 = ror64(tmp, 33);
- b0 -= b7;
-
- tmp = b1 ^ b8;
- b1 = ror64(tmp, 52);
- b8 -= b1;
-
- tmp = b5 ^ b14;
- b5 = ror64(tmp, 23);
- b14 -= b5;
-
- tmp = b3 ^ b12;
- b3 = ror64(tmp, 18);
- b12 -= b3;
-
- tmp = b7 ^ b10;
- b7 = ror64(tmp, 49);
- b10 -= b7;
-
- tmp = b15 ^ b4;
- b15 = ror64(tmp, 55);
- b4 -= b15;
-
- tmp = b11 ^ b6;
- b11 = ror64(tmp, 10);
- b6 -= b11;
-
- tmp = b13 ^ b2;
- b13 = ror64(tmp, 19);
- b2 -= b13;
-
- tmp = b9 ^ b0;
- b9 = ror64(tmp, 38);
- b0 -= b9;
-
- tmp = b15 ^ b14;
- b15 = ror64(tmp, 37);
- b14 -= b15 + k14 + t1;
- b15 -= k15;
-
- tmp = b13 ^ b12;
- b13 = ror64(tmp, 22);
- b12 -= b13 + k12;
- b13 -= k13 + t0;
-
- tmp = b11 ^ b10;
- b11 = ror64(tmp, 17);
- b10 -= b11 + k10;
- b11 -= k11;
-
- tmp = b9 ^ b8;
- b9 = ror64(tmp, 8);
- b8 -= b9 + k8;
- b9 -= k9;
-
- tmp = b7 ^ b6;
- b7 = ror64(tmp, 47);
- b6 -= b7 + k6;
- b7 -= k7;
-
- tmp = b5 ^ b4;
- b5 = ror64(tmp, 8);
- b4 -= b5 + k4;
- b5 -= k5;
-
- tmp = b3 ^ b2;
- b3 = ror64(tmp, 13);
- b2 -= b3 + k2;
- b3 -= k3;
-
- tmp = b1 ^ b0;
- b1 = ror64(tmp, 24);
- b0 -= b1 + k0;
- b1 -= k1;
-
- output[15] = b15;
- output[14] = b14;
- output[13] = b13;
- output[12] = b12;
- output[11] = b11;
- output[10] = b10;
- output[9] = b9;
- output[8] = b8;
- output[7] = b7;
- output[6] = b6;
- output[5] = b5;
- output[4] = b4;
- output[3] = b3;
- output[2] = b2;
- output[1] = b1;
- output[0] = b0;
-}
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 67207b0554cd..846d7d243994 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -512,12 +512,10 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
{
struct lynxfb_par *par;
struct lynxfb_crtc *crtc;
- struct lynxfb_output *output;
resource_size_t request;
par = info->par;
crtc = &par->crtc;
- output = &par->output;
pr_debug("check var:%dx%d-%d\n",
var->xres,
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index f1f90222186b..08f11cc17371 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -388,7 +388,7 @@ static ssize_t synth_store(struct kobject *kobj, struct kobj_attribute *attr,
len = strlen(buf);
if (len < 2 || len > 9)
return -EINVAL;
- strncpy(new_synth_name, buf, len);
+ memcpy(new_synth_name, buf, len);
if (new_synth_name[len - 1] == '\n')
len--;
new_synth_name[len] = '\0';
@@ -519,7 +519,7 @@ static ssize_t punc_store(struct kobject *kobj, struct kobj_attribute *attr,
return -EINVAL;
}
- strncpy(punc_buf, buf, x);
+ memcpy(punc_buf, buf, x);
while (x && punc_buf[x - 1] == '\n')
x--;
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index a61bc41b82d7..947c79532e10 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -198,11 +198,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
int chars_sent = 0;
char __user *cp;
char *init;
+ size_t bytes_per_ch = unicode ? 3 : 1;
u16 ch;
int empty;
unsigned long flags;
DEFINE_WAIT(wait);
+ if (count < bytes_per_ch)
+ return -EINVAL;
+
spin_lock_irqsave(&speakup_info.spinlock, flags);
while (1) {
prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
@@ -228,7 +232,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
init = get_initstring();
/* Keep 3 bytes available for a 16bit UTF-8-encoded character */
- while (chars_sent <= count - 3) {
+ while (chars_sent <= count - bytes_per_ch) {
if (speakup_info.flushing) {
speakup_info.flushing = 0;
ch = '\x18';
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index 3e082dc3d45c..a2fc72c29894 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -160,6 +160,8 @@ struct spk_io_ops {
};
struct spk_synth {
+ struct list_head node;
+
const char *name;
const char *version;
const char *long_name;
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 7deeb7061018..25f259ee4ffc 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -18,8 +18,7 @@
#include "speakup.h"
#include "serialio.h"
-#define MAXSYNTHS 16 /* Max number of synths in array. */
-static struct spk_synth *synths[MAXSYNTHS + 1];
+static LIST_HEAD(synths);
struct spk_synth *synth;
char spk_pitch_buff[32] = "";
static int module_status;
@@ -355,9 +354,8 @@ struct var_t synth_time_vars[] = {
/* called by: speakup_init() */
int synth_init(char *synth_name)
{
- int i;
int ret = 0;
- struct spk_synth *synth = NULL;
+ struct spk_synth *tmp, *synth = NULL;
if (!synth_name)
return 0;
@@ -371,9 +369,10 @@ int synth_init(char *synth_name)
mutex_lock(&spk_mutex);
/* First, check if we already have it loaded. */
- for (i = 0; i < MAXSYNTHS && synths[i]; i++)
- if (strcmp(synths[i]->name, synth_name) == 0)
- synth = synths[i];
+ list_for_each_entry(tmp, &synths, node) {
+ if (strcmp(tmp->name, synth_name) == 0)
+ synth = tmp;
+ }
/* If we got one, initialize it now. */
if (synth)
@@ -448,29 +447,23 @@ void synth_release(void)
/* called by: all_driver_init() */
int synth_add(struct spk_synth *in_synth)
{
- int i;
int status = 0;
+ struct spk_synth *tmp;
mutex_lock(&spk_mutex);
- for (i = 0; i < MAXSYNTHS && synths[i]; i++)
- /* synth_remove() is responsible for rotating the array down */
- if (in_synth == synths[i]) {
+
+ list_for_each_entry(tmp, &synths, node) {
+ if (tmp == in_synth) {
mutex_unlock(&spk_mutex);
return 0;
}
- if (i == MAXSYNTHS) {
- pr_warn("Error: attempting to add a synth past end of array\n");
- mutex_unlock(&spk_mutex);
- return -1;
}
if (in_synth->startup)
status = do_synth_init(in_synth);
- if (!status) {
- synths[i++] = in_synth;
- synths[i] = NULL;
- }
+ if (!status)
+ list_add_tail(&in_synth->node, &synths);
mutex_unlock(&spk_mutex);
return status;
@@ -479,17 +472,10 @@ EXPORT_SYMBOL_GPL(synth_add);
void synth_remove(struct spk_synth *in_synth)
{
- int i;
-
mutex_lock(&spk_mutex);
if (synth == in_synth)
synth_release();
- for (i = 0; synths[i]; i++) {
- if (in_synth == synths[i])
- break;
- }
- for ( ; synths[i]; i++) /* compress table */
- synths[i] = synths[i + 1];
+ list_del(&in_synth->node);
module_status = 0;
mutex_unlock(&spk_mutex);
}
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 54a76b6752ad..1b545152cc49 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -179,7 +179,6 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
{
int val;
int *p_val = var->p_val;
- int l;
char buf[32];
char *cp;
struct var_t *var_data = var->data;
@@ -237,9 +236,9 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
else
cp = buf;
if (!var_data->u.n.out_str)
- l = sprintf(cp, var_data->u.n.synth_fmt, (int)val);
+ sprintf(cp, var_data->u.n.synth_fmt, (int)val);
else
- l = sprintf(cp, var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
+ sprintf(cp, var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
synth_printf("%s", cp);
return 0;
}
diff --git a/drivers/staging/typec/Kconfig b/drivers/staging/typec/Kconfig
deleted file mode 100644
index 3aa981fbc8f5..000000000000
--- a/drivers/staging/typec/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
-menu "USB Power Delivery and Type-C drivers"
-
-if TYPEC_TCPM
-
-config TYPEC_TCPCI
- tristate "Type-C Port Controller Interface driver"
- depends on I2C
- select REGMAP_I2C
- help
- Type-C Port Controller driver for TCPCI-compliant controller.
-
-config TYPEC_RT1711H
- tristate "Richtek RT1711H Type-C chip driver"
- select TYPEC_TCPCI
- help
- Richtek RT1711H Type-C chip driver that works with
- Type-C Port Controller Manager to provide USB PD and USB
- Type-C functionalities.
-
-endif
-
-endmenu
diff --git a/drivers/staging/typec/Makefile b/drivers/staging/typec/Makefile
deleted file mode 100644
index 7803d485e1b3..000000000000
--- a/drivers/staging/typec/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
-obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
diff --git a/drivers/staging/typec/TODO b/drivers/staging/typec/TODO
deleted file mode 100644
index 53fe2f726c88..000000000000
--- a/drivers/staging/typec/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-tcpci:
-- Test with real hardware
-
-Please send patches to Guenter Roeck <linux@roeck-us.net> and copy
-Heikki Krogerus <heikki.krogerus@linux.intel.com>.
diff --git a/drivers/staging/vboxvideo/TODO b/drivers/staging/vboxvideo/TODO
index bd381d861ab3..468eea856ca6 100644
--- a/drivers/staging/vboxvideo/TODO
+++ b/drivers/staging/vboxvideo/TODO
@@ -1,6 +1,5 @@
TODO:
-Move the driver over to the atomic API
--Stop using old load / unload drm_driver hooks
-Get a full review from the drm-maintainers on dri-devel done on this driver
-Extend this TODO with the results of that review
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
index f6d26beffa54..da92c493f157 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -51,14 +51,42 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_pci_dev(pdev, ent, &driver);
+ struct drm_device *dev = NULL;
+ int ret = 0;
+
+ dev = drm_dev_alloc(&driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_drv_alloc;
+ }
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = vbox_driver_load(dev);
+ if (ret)
+ goto err_vbox_driver_load;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ goto err_drv_dev_register;
+
+ return ret;
+
+ err_drv_dev_register:
+ vbox_driver_unload(dev);
+ err_vbox_driver_load:
+ drm_dev_put(dev);
+ err_drv_alloc:
+ return ret;
}
static void vbox_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- drm_put_dev(dev);
+ drm_dev_unregister(dev);
+ vbox_driver_unload(dev);
+ drm_dev_put(dev);
}
static int vbox_drm_freeze(struct drm_device *dev)
@@ -227,8 +255,6 @@ static struct drm_driver driver = {
DRIVER_PRIME,
.dev_priv_size = 0,
- .load = vbox_driver_load,
- .unload = vbox_driver_unload,
.lastclose = vbox_driver_lastclose,
.master_set = vbox_master_set,
.master_drop = vbox_master_drop,
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
index eeac4f0cb2c6..594f84272957 100644
--- a/drivers/staging/vboxvideo/vbox_drv.h
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -126,7 +126,7 @@ struct vbox_private {
#undef CURSOR_PIXEL_COUNT
#undef CURSOR_DATA_SIZE
-int vbox_driver_load(struct drm_device *dev, unsigned long flags);
+int vbox_driver_load(struct drm_device *dev);
void vbox_driver_unload(struct drm_device *dev);
void vbox_driver_lastclose(struct drm_device *dev);
diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c
index 9d2018cd544e..429f6a453619 100644
--- a/drivers/staging/vboxvideo/vbox_main.c
+++ b/drivers/staging/vboxvideo/vbox_main.c
@@ -350,7 +350,7 @@ static void vbox_hw_fini(struct vbox_private *vbox)
pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
}
-int vbox_driver_load(struct drm_device *dev, unsigned long flags)
+int vbox_driver_load(struct drm_device *dev)
{
struct vbox_private *vbox;
int ret = 0;
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index 5c7ea237893e..a83eac8668d0 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -222,7 +222,9 @@ static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
}
static int vbox_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *old_fb, int x, int y)
+ struct drm_framebuffer *old_fb,
+ struct drm_framebuffer *new_fb,
+ int x, int y)
{
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
@@ -245,7 +247,7 @@ static int vbox_crtc_do_set_base(struct drm_crtc *crtc,
vbox_bo_unreserve(bo);
}
- vbox_fb = to_vbox_framebuffer(CRTC_FB(crtc));
+ vbox_fb = to_vbox_framebuffer(new_fb);
obj = vbox_fb->obj;
bo = gem_to_vbox_bo(obj);
@@ -281,7 +283,7 @@ static int vbox_crtc_do_set_base(struct drm_crtc *crtc,
static int vbox_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
- return vbox_crtc_do_set_base(crtc, old_fb, x, y);
+ return vbox_crtc_do_set_base(crtc, old_fb, CRTC_FB(crtc), x, y);
}
static int vbox_crtc_mode_set(struct drm_crtc *crtc,
@@ -306,6 +308,31 @@ static int vbox_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
+static int vbox_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ struct drm_device *drm = vbox->dev;
+ unsigned long flags;
+ int rc;
+
+ rc = vbox_crtc_do_set_base(crtc, CRTC_FB(crtc), fb, 0, 0);
+ if (rc)
+ return rc;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+
+ if (event)
+ drm_crtc_send_vblank_event(crtc, event);
+
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+
+ return 0;
+}
+
static void vbox_crtc_disable(struct drm_crtc *crtc)
{
}
@@ -344,6 +371,7 @@ static const struct drm_crtc_funcs vbox_crtc_funcs = {
.reset = vbox_crtc_reset,
.set_config = drm_crtc_helper_set_config,
/* .gamma_set = vbox_crtc_gamma_set, */
+ .page_flip = vbox_crtc_page_flip,
.destroy = vbox_crtc_destroy,
};
@@ -504,7 +532,7 @@ static void vbox_set_edid(struct drm_connector *connector, int width,
for (i = 0; i < EDID_SIZE - 1; ++i)
sum += edid[i];
edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
- drm_mode_connector_update_edid_property(connector, (struct edid *)edid);
+ drm_connector_update_edid_property(connector, (struct edid *)edid);
}
static int vbox_get_modes(struct drm_connector *connector)
@@ -655,7 +683,7 @@ static int vbox_connector_init(struct drm_device *dev,
dev->mode_config.suggested_y_property, 0);
drm_connector_register(connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index f0cefa1b7b0f..868e2d6aaf1b 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -117,44 +117,40 @@ static void my_wq_function(struct work_struct *work)
int bcm2835_audio_start(struct bcm2835_alsa_stream *alsa_stream)
{
- if (alsa_stream->my_wq) {
- struct bcm2835_audio_work *work;
-
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- /*--- Queue some work (item 1) ---*/
- if (!work) {
- LOG_ERR(" .. Error: NULL work kmalloc\n");
- return -ENOMEM;
- }
- INIT_WORK(&work->my_work, my_wq_function);
- work->alsa_stream = alsa_stream;
- work->cmd = BCM2835_AUDIO_START;
- if (!queue_work(alsa_stream->my_wq, &work->my_work)) {
- kfree(work);
- return -EBUSY;
- }
+ struct bcm2835_audio_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ /*--- Queue some work (item 1) ---*/
+ if (!work) {
+ LOG_ERR(" .. Error: NULL work kmalloc\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&work->my_work, my_wq_function);
+ work->alsa_stream = alsa_stream;
+ work->cmd = BCM2835_AUDIO_START;
+ if (!queue_work(alsa_stream->my_wq, &work->my_work)) {
+ kfree(work);
+ return -EBUSY;
}
return 0;
}
int bcm2835_audio_stop(struct bcm2835_alsa_stream *alsa_stream)
{
- if (alsa_stream->my_wq) {
- struct bcm2835_audio_work *work;
-
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- /*--- Queue some work (item 1) ---*/
- if (!work) {
- LOG_ERR(" .. Error: NULL work kmalloc\n");
- return -ENOMEM;
- }
- INIT_WORK(&work->my_work, my_wq_function);
- work->alsa_stream = alsa_stream;
- work->cmd = BCM2835_AUDIO_STOP;
- if (!queue_work(alsa_stream->my_wq, &work->my_work)) {
- kfree(work);
- return -EBUSY;
- }
+ struct bcm2835_audio_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ /*--- Queue some work (item 1) ---*/
+ if (!work) {
+ LOG_ERR(" .. Error: NULL work kmalloc\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&work->my_work, my_wq_function);
+ work->alsa_stream = alsa_stream;
+ work->cmd = BCM2835_AUDIO_STOP;
+ if (!queue_work(alsa_stream->my_wq, &work->my_work)) {
+ kfree(work);
+ return -EBUSY;
}
return 0;
}
@@ -162,40 +158,31 @@ int bcm2835_audio_stop(struct bcm2835_alsa_stream *alsa_stream)
int bcm2835_audio_write(struct bcm2835_alsa_stream *alsa_stream,
unsigned int count, void *src)
{
- if (alsa_stream->my_wq) {
- struct bcm2835_audio_work *work;
-
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- /*--- Queue some work (item 1) ---*/
- if (!work) {
- LOG_ERR(" .. Error: NULL work kmalloc\n");
- return -ENOMEM;
- }
- INIT_WORK(&work->my_work, my_wq_function);
- work->alsa_stream = alsa_stream;
- work->cmd = BCM2835_AUDIO_WRITE;
- work->src = src;
- work->count = count;
- if (!queue_work(alsa_stream->my_wq, &work->my_work)) {
- kfree(work);
- return -EBUSY;
- }
+ struct bcm2835_audio_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ /*--- Queue some work (item 1) ---*/
+ if (!work) {
+ LOG_ERR(" .. Error: NULL work kmalloc\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&work->my_work, my_wq_function);
+ work->alsa_stream = alsa_stream;
+ work->cmd = BCM2835_AUDIO_WRITE;
+ work->src = src;
+ work->count = count;
+ if (!queue_work(alsa_stream->my_wq, &work->my_work)) {
+ kfree(work);
+ return -EBUSY;
}
return 0;
}
-static void my_workqueue_init(struct bcm2835_alsa_stream *alsa_stream)
-{
- alsa_stream->my_wq = alloc_workqueue("my_queue", WQ_HIGHPRI, 1);
-}
-
static void my_workqueue_quit(struct bcm2835_alsa_stream *alsa_stream)
{
- if (alsa_stream->my_wq) {
- flush_workqueue(alsa_stream->my_wq);
- destroy_workqueue(alsa_stream->my_wq);
- alsa_stream->my_wq = NULL;
- }
+ flush_workqueue(alsa_stream->my_wq);
+ destroy_workqueue(alsa_stream->my_wq);
+ alsa_stream->my_wq = NULL;
}
static void audio_vchi_callback(void *param,
@@ -436,19 +423,21 @@ int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
int status;
int ret;
- my_workqueue_init(alsa_stream);
+ alsa_stream->my_wq = alloc_workqueue("my_queue", WQ_HIGHPRI, 1);
+ if (!alsa_stream->my_wq)
+ return -ENOMEM;
ret = bcm2835_audio_open_connection(alsa_stream);
- if (ret) {
- ret = -1;
- goto exit;
- }
+ if (ret)
+ goto free_wq;
+
instance = alsa_stream->instance;
LOG_DBG(" instance (%p)\n", instance);
if (mutex_lock_interruptible(&instance->vchi_mutex)) {
LOG_DBG("Interrupted whilst waiting for lock on (%d)\n", instance->num_connections);
- return -EINTR;
+ ret = -EINTR;
+ goto free_wq;
}
vchi_service_use(instance->vchi_handle[0]);
@@ -471,7 +460,11 @@ int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
unlock:
vchi_service_release(instance->vchi_handle[0]);
mutex_unlock(&instance->vchi_mutex);
-exit:
+
+free_wq:
+ if (ret)
+ destroy_workqueue(alsa_stream->my_wq);
+
return ret;
}
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index ce26741ae9d9..c04bdf070c87 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -580,6 +580,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
static void stop_streaming(struct vb2_queue *vq)
{
int ret;
+ unsigned long timeout;
struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
@@ -605,10 +606,10 @@ static void stop_streaming(struct vb2_queue *vq)
sizeof(dev->capture.frame_count));
/* wait for last frame to complete */
- ret = wait_for_completion_timeout(&dev->capture.frame_cmplt, HZ);
- if (ret <= 0)
+ timeout = wait_for_completion_timeout(&dev->capture.frame_cmplt, HZ);
+ if (timeout == 0)
v4l2_err(&dev->v4l2_dev,
- "error %d waiting for frame completion\n", ret);
+ "timed out waiting for frame completion\n");
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"disabling connection\n");
@@ -628,20 +629,6 @@ static void stop_streaming(struct vb2_queue *vq)
v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n");
}
-static void bm2835_mmal_lock(struct vb2_queue *vq)
-{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
-
- mutex_lock(&dev->mutex);
-}
-
-static void bm2835_mmal_unlock(struct vb2_queue *vq)
-{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
-
- mutex_unlock(&dev->mutex);
-}
-
static const struct vb2_ops bm2835_mmal_video_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
@@ -650,8 +637,8 @@ static const struct vb2_ops bm2835_mmal_video_qops = {
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
- .wait_prepare = bm2835_mmal_unlock,
- .wait_finish = bm2835_mmal_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------
@@ -1864,6 +1851,8 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
goto cleanup_gdev;
}
+ /* v4l2 core mutex used to protect all fops and v4l2 ioctls. */
+ mutex_init(&dev->mutex);
dev->camera_num = camera;
dev->max_width = resolutions[camera][0];
dev->max_height = resolutions[camera][1];
@@ -1908,13 +1897,11 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
q->ops = &bm2835_mmal_video_qops;
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &dev->mutex;
ret = vb2_queue_init(q);
if (ret < 0)
goto unreg_dev;
- /* v4l2 core mutex used to protect all fops and v4l2 ioctls. */
- mutex_init(&dev->mutex);
-
/* initialise video devices */
ret = bm2835_mmal_init_device(dev, &dev->vdev);
if (ret < 0)
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
index f5b5ead6347c..51e5b04ff0f5 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -630,6 +630,7 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
{
struct mmal_msg_context *msg_context;
int ret;
+ unsigned long timeout;
/* payload size must not cause message to exceed max size */
if (payload_len >
@@ -668,11 +669,11 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
return ret;
}
- ret = wait_for_completion_timeout(&msg_context->u.sync.cmplt, 3 * HZ);
- if (ret <= 0) {
- pr_err("error %d waiting for sync completion\n", ret);
- if (ret == 0)
- ret = -ETIME;
+ timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
+ 3 * HZ);
+ if (timeout == 0) {
+ pr_err("timed out waiting for sync completion\n");
+ ret = -ETIME;
/* todo: what happens if the message arrives after aborting */
release_msg_context(msg_context);
return ret;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 38805504d462..6a9e71a61142 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -52,8 +52,8 @@
#define VCHIQ_LOG_TRACE_STR "trace"
/* Global 'vchiq' debugfs and clients entry used by all instances */
-struct dentry *vchiq_dbg_dir;
-struct dentry *vchiq_dbg_clients;
+static struct dentry *vchiq_dbg_dir;
+static struct dentry *vchiq_dbg_clients;
/* Log category debugfs entries */
struct vchiq_debugfs_log_entry {
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 088d2d9dbc21..52214a30e9b6 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -34,7 +34,7 @@ static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
__le64 *tsf_time;
u16 frame_size;
int ii, r;
- u8 *rx_sts, *rx_rate, *sq;
+ u8 *rx_rate;
u8 *skb_data;
u8 rate_idx = 0;
u8 rate[MAX_RATE] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
@@ -49,7 +49,6 @@ static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
skb_data = (u8 *)skb->data;
- rx_sts = skb_data;
rx_rate = skb_data + 1;
sband = hw->wiphy->bands[hw->conf.chandef.chan->band];
@@ -74,7 +73,6 @@ static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
}
tsf_time = (__le64 *)(skb_data + bytes_received - 12);
- sq = skb_data + bytes_received - 4;
new_rsr = skb_data + bytes_received - 3;
rssi = skb_data + bytes_received - 2;
rsr = skb_data + bytes_received - 1;
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index c3b5b1431048..3b94e80f1d5e 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -32,7 +32,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
struct ieee80211_rx_status rx_status = { 0 };
struct ieee80211_hdr *hdr;
__le16 fc;
- u8 *rsr, *new_rsr, *rssi, *frame;
+ u8 *rsr, *new_rsr, *rssi;
__le64 *tsf_time;
u32 frame_size;
int ii, r;
@@ -133,8 +133,6 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
priv->current_rssi = priv->bb_pre_ed_rssi;
- frame = skb_data + 8;
-
skb_pull(skb, 8);
skb_trim(skb, frame_size);
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 26ca3fa29301..9def0748ffee 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -797,7 +797,7 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
unsigned long flags;
u16 tx_bytes, tx_header_size, tx_body_size, current_rate, duration_id;
u8 pkt_type, fb_option = AUTO_FB_NONE;
- bool need_rts = false, is_pspoll = false;
+ bool need_rts = false;
bool need_mic = false;
hdr = (struct ieee80211_hdr *)(skb->data);
@@ -888,9 +888,6 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
if (ieee80211_has_a4(hdr->frame_control))
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_LHEAD);
- if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)
- is_pspoll = true;
-
tx_buffer_head->frag_ctl =
cpu_to_le16(ieee80211_get_hdrlen_from_skb(skb) << 10);
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index ee7e26b886a5..f7b07c0b5ce2 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_WILC1000) += wilc1000.o
ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
-DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
@@ -12,7 +11,9 @@ wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
wilc_wlan.o
obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o
+wilc1000-sdio-objs += $(wilc1000-objs)
wilc1000-sdio-objs += wilc_sdio.o
obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o
+wilc1000-spi-objs += $(wilc1000-objs)
wilc1000-spi-objs += wilc_spi.o
diff --git a/drivers/staging/wilc1000/TODO b/drivers/staging/wilc1000/TODO
index d123324bd5c9..862e9eac9d60 100644
--- a/drivers/staging/wilc1000/TODO
+++ b/drivers/staging/wilc1000/TODO
@@ -1,14 +1,3 @@
TODO:
-- rework comments and function headers(also coding style)
-- Move handling for each individual members of 'union message_body' out
- into a separate 'struct work_struct' and completely remove the multiplexer
- that is currently part of host_if_work(), allowing movement of the
- implementation of each message handler into the callsite of the function
- that currently queues the 'host_if_msg'.
-- make spi and sdio components coexist in one build
- support soft-ap and p2p mode
- support resume/suspend function
-- convert all uses of the old GPIO API from <linux/gpio.h> to the
- GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
- lines from device tree, ACPI or board files, board files should
- use <linux/gpio/machine.h>
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index 44816024f79c..e5420676afb3 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -1,17 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
+
+#include <linux/ieee80211.h>
+
#include "coreconfigurator.h"
#define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \
BEACON_INTERVAL_LEN + CAP_INFO_LEN)
-enum basic_frame_type {
- FRAME_TYPE_CONTROL = 0x04,
- FRAME_TYPE_DATA = 0x08,
- FRAME_TYPE_MANAGEMENT = 0x00,
- FRAME_TYPE_RESERVED = 0x0C,
- FRAME_TYPE_FORCE_32BIT = 0xFFFFFFFF
-};
-
enum sub_frame_type {
ASSOC_REQ = 0x00,
ASSOC_RSP = 0x10,
@@ -51,49 +50,6 @@ enum sub_frame_type {
FRAME_SUBTYPE_FORCE_32BIT = 0xFFFFFFFF
};
-enum info_element_id {
- ISSID = 0, /* Service Set Identifier */
- ISUPRATES = 1, /* Supported Rates */
- IFHPARMS = 2, /* FH parameter set */
- IDSPARMS = 3, /* DS parameter set */
- ICFPARMS = 4, /* CF parameter set */
- ITIM = 5, /* Traffic Information Map */
- IIBPARMS = 6, /* IBSS parameter set */
- ICOUNTRY = 7, /* Country element */
- IEDCAPARAMS = 12, /* EDCA parameter set */
- ITSPEC = 13, /* Traffic Specification */
- ITCLAS = 14, /* Traffic Classification */
- ISCHED = 15, /* Schedule */
- ICTEXT = 16, /* Challenge Text */
- IPOWERCONSTRAINT = 32, /* Power Constraint */
- IPOWERCAPABILITY = 33, /* Power Capability */
- ITPCREQUEST = 34, /* TPC Request */
- ITPCREPORT = 35, /* TPC Report */
- ISUPCHANNEL = 36, /* Supported channel list */
- ICHSWANNOUNC = 37, /* Channel Switch Announcement */
- IMEASUREMENTREQUEST = 38, /* Measurement request */
- IMEASUREMENTREPORT = 39, /* Measurement report */
- IQUIET = 40, /* Quiet element Info */
- IIBSSDFS = 41, /* IBSS DFS */
- IERPINFO = 42, /* ERP Information */
- ITSDELAY = 43, /* TS Delay */
- ITCLASPROCESS = 44, /* TCLAS Processing */
- IHTCAP = 45, /* HT Capabilities */
- IQOSCAP = 46, /* QoS Capability */
- IRSNELEMENT = 48, /* RSN Information Element */
- IEXSUPRATES = 50, /* Extended Supported Rates */
- IEXCHSWANNOUNC = 60, /* Extended Ch Switch Announcement*/
- IHTOPERATION = 61, /* HT Information */
- ISECCHOFF = 62, /* Secondary Channel Offeset */
- I2040COEX = 72, /* 20/40 Coexistence IE */
- I2040INTOLCHREPORT = 73, /* 20/40 Intolerant channel report*/
- IOBSSSCAN = 74, /* OBSS Scan parameters */
- IEXTCAP = 127, /* Extended capability */
- IWMM = 221, /* WMM parameters */
- IWPAELEMENT = 221, /* WPA Information Element */
- INFOELEM_ID_FORCE_32BIT = 0xFFFFFFFF
-};
-
static inline u16 get_beacon_period(u8 *data)
{
u16 bcn_per;
@@ -172,9 +128,7 @@ static inline void get_BSSID(u8 *data, u8 *bssid)
static inline void get_ssid(u8 *data, u8 *ssid, u8 *p_ssid_len)
{
- u8 len = 0;
- u8 i = 0;
- u8 j = 0;
+ u8 i, j, len;
len = data[TAG_PARAM_OFFSET + 1];
j = TAG_PARAM_OFFSET + 2;
@@ -222,7 +176,7 @@ static u8 *get_tim_elm(u8 *msa, u16 rx_len, u16 tag_param_offset)
index = tag_param_offset;
while (index < (rx_len - FCS_LEN)) {
- if (msa[index] == ITIM)
+ if (msa[index] == WLAN_EID_TIM)
return &msa[index];
index += (IE_HDR_LEN + msa[index + 1]);
}
@@ -236,7 +190,7 @@ static u8 get_current_channel_802_11n(u8 *msa, u16 rx_len)
index = TAG_PARAM_OFFSET;
while (index < (rx_len - FCS_LEN)) {
- if (msa[index] == IDSPARMS)
+ if (msa[index] == WLAN_EID_DS_PARAMS)
return msa[index + 2];
index += msa[index + 1] + IE_HDR_LEN;
}
@@ -247,18 +201,11 @@ static u8 get_current_channel_802_11n(u8 *msa, u16 rx_len)
s32 wilc_parse_network_info(u8 *msg_buffer,
struct network_info **ret_network_info)
{
- struct network_info *network_info = NULL;
- u8 msg_type = 0;
- u16 wid_len = 0;
- u8 *wid_val = NULL;
- u8 *msa = NULL;
- u16 rx_len = 0;
- u8 *tim_elm = NULL;
- u8 *ies = NULL;
- u16 ies_len = 0;
- u8 index = 0;
- u32 tsf_lo;
- u32 tsf_hi;
+ struct network_info *network_info;
+ u8 *wid_val, *msa, *tim_elm, *ies;
+ u32 tsf_lo, tsf_hi;
+ u16 wid_len, rx_len, ies_len;
+ u8 msg_type, index;
msg_type = msg_buffer[0];
@@ -320,11 +267,11 @@ s32 wilc_parse_network_info(u8 *msg_buffer,
s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
struct connect_info *ret_conn_info)
{
- u8 *ies = NULL;
- u16 ies_len = 0;
+ u8 *ies;
+ u16 ies_len;
ret_conn_info->status = get_asoc_status(buffer);
- if (ret_conn_info->status == SUCCESSFUL_STATUSCODE) {
+ if (ret_conn_info->status == WLAN_STATUS_SUCCESS) {
ies = &buffer[CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN];
ies_len = buffer_len - (CAP_INFO_LEN + STATUS_CODE_LEN +
AID_LEN);
diff --git a/drivers/staging/wilc1000/coreconfigurator.h b/drivers/staging/wilc1000/coreconfigurator.h
index 55b5531856f8..b62acb447383 100644
--- a/drivers/staging/wilc1000/coreconfigurator.h
+++ b/drivers/staging/wilc1000/coreconfigurator.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
+
#ifndef CORECONFIGURATOR_H
#define CORECONFIGURATOR_H
@@ -24,26 +29,6 @@
#define MAKE_WORD16(lsb, msb) ((((u16)(msb) << 8) & 0xFF00) | (lsb))
#define MAKE_WORD32(lsw, msw) ((((u32)(msw) << 16) & 0xFFFF0000) | (lsw))
-enum connect_status {
- SUCCESSFUL_STATUSCODE = 0,
- UNSPEC_FAIL = 1,
- UNSUP_CAP = 10,
- REASOC_NO_ASOC = 11,
- FAIL_OTHER = 12,
- UNSUPT_ALG = 13,
- AUTH_SEQ_FAIL = 14,
- CHLNG_FAIL = 15,
- AUTH_TIMEOUT = 16,
- AP_FULL = 17,
- UNSUP_RATE = 18,
- SHORT_PREAMBLE_UNSUP = 19,
- PBCC_UNSUP = 20,
- CHANNEL_AGIL_UNSUP = 21,
- SHORT_SLOT_UNSUP = 25,
- OFDM_DSSS_UNSUP = 26,
- CONNECT_STS_FORCE_16_BIT = 0xFFFF
-};
-
struct rssi_history_buffer {
bool full;
u8 index;
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 0aaae33f97b9..42d8accb1f60 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -1,49 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
-#include "wilc_wfi_netdevice.h"
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
-#define HOST_IF_MSG_SCAN 0
-#define HOST_IF_MSG_CONNECT 1
-#define HOST_IF_MSG_RCVD_GNRL_ASYNC_INFO 2
-#define HOST_IF_MSG_KEY 3
-#define HOST_IF_MSG_RCVD_NTWRK_INFO 4
-#define HOST_IF_MSG_RCVD_SCAN_COMPLETE 5
-#define HOST_IF_MSG_CFG_PARAMS 6
-#define HOST_IF_MSG_SET_CHANNEL 7
-#define HOST_IF_MSG_DISCONNECT 8
-#define HOST_IF_MSG_GET_RSSI 9
-#define HOST_IF_MSG_ADD_BEACON 11
-#define HOST_IF_MSG_DEL_BEACON 12
-#define HOST_IF_MSG_ADD_STATION 13
-#define HOST_IF_MSG_DEL_STATION 14
-#define HOST_IF_MSG_EDIT_STATION 15
-#define HOST_IF_MSG_SCAN_TIMER_FIRED 16
-#define HOST_IF_MSG_CONNECT_TIMER_FIRED 17
-#define HOST_IF_MSG_POWER_MGMT 18
-#define HOST_IF_MSG_GET_INACTIVETIME 19
-#define HOST_IF_MSG_REMAIN_ON_CHAN 20
-#define HOST_IF_MSG_REGISTER_FRAME 21
-#define HOST_IF_MSG_LISTEN_TIMER_FIRED 22
-#define HOST_IF_MSG_SET_WFIDRV_HANDLER 24
-#define HOST_IF_MSG_GET_MAC_ADDRESS 26
-#define HOST_IF_MSG_SET_OPERATION_MODE 27
-#define HOST_IF_MSG_SET_IPADDRESS 28
-#define HOST_IF_MSG_GET_IPADDRESS 29
-#define HOST_IF_MSG_GET_STATISTICS 31
-#define HOST_IF_MSG_SET_MULTICAST_FILTER 32
-#define HOST_IF_MSG_DEL_BA_SESSION 34
-#define HOST_IF_MSG_DEL_ALL_STA 36
-#define HOST_IF_MSG_SET_TX_POWER 38
-#define HOST_IF_MSG_GET_TX_POWER 39
-#define HOST_IF_MSG_EXIT 100
+#include "wilc_wfi_netdevice.h"
#define HOST_IF_SCAN_TIMEOUT 4000
#define HOST_IF_CONNECT_TIMEOUT 9500
#define FALSE_FRMWR_CHANNEL 100
-#define TCP_ACK_FILTER_LINK_SPEED_THRESH 54
-#define DEFAULT_LINK_SPEED 72
-
#define REAL_JOIN_REQ 0
struct host_if_wpa_attr {
@@ -61,7 +28,7 @@ struct host_if_wep_attr {
u8 key_len;
u8 index;
u8 mode;
- enum AUTHTYPE auth_type;
+ enum authtype auth_type;
};
union host_if_key_attr {
@@ -97,7 +64,7 @@ struct connect_attr {
u8 security;
wilc_connect_result result;
void *arg;
- enum AUTHTYPE auth_type;
+ enum authtype auth_type;
u8 ch;
void *params;
};
@@ -145,6 +112,7 @@ struct set_ip_addr {
};
struct sta_inactive_t {
+ u32 inactive_time;
u8 mac[6];
};
@@ -180,10 +148,12 @@ union message_body {
};
struct host_if_msg {
- u16 id;
union message_body body;
struct wilc_vif *vif;
struct work_struct work;
+ void (*fn)(struct work_struct *ws);
+ struct completion work_comp;
+ bool is_sync;
};
struct join_bss_param {
@@ -219,9 +189,7 @@ static struct host_if_drv *terminated_handle;
bool wilc_optaining_ip;
static u8 p2p_listen_state;
static struct workqueue_struct *hif_workqueue;
-static struct completion hif_thread_comp;
static struct completion hif_driver_comp;
-static struct completion hif_wait_response;
static struct mutex hif_deinit_lock;
static struct timer_list periodic_rssi;
static struct wilc_vif *periodic_rssi_vif;
@@ -230,37 +198,44 @@ u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
static u8 rcv_assoc_resp[MAX_ASSOC_RESP_FRAME_SIZE];
-static s8 rssi;
static u8 set_ip[2][4];
static u8 get_ip[2][4];
-static u32 inactive_time;
static u32 clients_count;
-static void *host_int_parse_join_bss_param(struct network_info *info);
static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
-static s32 handle_scan_done(struct wilc_vif *vif, enum scan_event evt);
-static void host_if_work(struct work_struct *work);
-
-/*!
- * @author syounan
- * @date 1 Sep 2010
- * @note copied from FLO glue implementatuion
- * @version 1.0
- */
-static int wilc_enqueue_cmd(struct host_if_msg *msg)
+
+/* 'msg' should be free by the caller for syc */
+static struct host_if_msg*
+wilc_alloc_work(struct wilc_vif *vif, void (*work_fun)(struct work_struct *),
+ bool is_sync)
{
- struct host_if_msg *new_msg;
+ struct host_if_msg *msg;
- new_msg = kmemdup(msg, sizeof(*new_msg), GFP_ATOMIC);
- if (!new_msg)
- return -ENOMEM;
+ if (!work_fun)
+ return ERR_PTR(-EINVAL);
+
+ msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+ msg->fn = work_fun;
+ msg->vif = vif;
+ msg->is_sync = is_sync;
+ if (is_sync)
+ init_completion(&msg->work_comp);
+
+ return msg;
+}
+
+static int wilc_enqueue_work(struct host_if_msg *msg)
+{
+ INIT_WORK(&msg->work, msg->fn);
+ if (!hif_workqueue || !queue_work(hif_workqueue, &msg->work))
+ return -EINVAL;
- INIT_WORK(&new_msg->work, host_if_work);
- queue_work(hif_workqueue, &new_msg->work);
return 0;
}
-/* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as
+/* The idx starts from 0 to (NUM_CONCURRENT_IFC - 1), but 0 index used as
* special purpose in wilc device, so we add 1 to the index to starts from 1.
* As a result, the returned index will be 1 to NUM_CONCURRENT_IFC.
*/
@@ -272,7 +247,7 @@ int wilc_get_vif_idx(struct wilc_vif *vif)
/* We need to minus 1 from idx which is from wilc device to get real index
* of wilc->vif[], because we add 1 when pass to wilc device in the function
* wilc_get_vif_idx.
- * As a result, the index should be between 0 and NUM_CONCURRENT_IFC -1.
+ * As a result, the index should be between 0 and (NUM_CONCURRENT_IFC - 1).
*/
static struct wilc_vif *wilc_get_vif_from_idx(struct wilc *wilc, int idx)
{
@@ -284,13 +259,15 @@ static struct wilc_vif *wilc_get_vif_from_idx(struct wilc *wilc, int idx)
return wilc->vif[index];
}
-static void handle_set_channel(struct wilc_vif *vif,
- struct channel_attr *hif_set_ch)
+static void handle_set_channel(struct work_struct *work)
{
- int ret = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct channel_attr *hif_set_ch = &msg->body.channel_info;
+ int ret;
struct wid wid;
- wid.id = (u16)WID_CURRENT_CHANNEL;
+ wid.id = WID_CURRENT_CHANNEL;
wid.type = WID_CHAR;
wid.val = (char *)&hif_set_ch->set_ch;
wid.size = sizeof(char);
@@ -300,27 +277,27 @@ static void handle_set_channel(struct wilc_vif *vif,
if (ret)
netdev_err(vif->ndev, "Failed to set channel\n");
+ kfree(msg);
}
-static int handle_set_wfi_drv_handler(struct wilc_vif *vif,
- struct drv_handler *hif_drv_handler)
+static void handle_set_wfi_drv_handler(struct work_struct *work)
{
- int ret = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct drv_handler *hif_drv_handler = &msg->body.drv;
+ int ret;
struct wid wid;
u8 *currbyte, *buffer;
- struct host_if_drv *hif_drv = NULL;
-
- if (!vif->hif_drv)
- return -EINVAL;
+ struct host_if_drv *hif_drv;
- if (!hif_drv_handler)
- return -EINVAL;
+ if (!vif->hif_drv || !hif_drv_handler)
+ goto free_msg;
hif_drv = vif->hif_drv;
buffer = kzalloc(DRV_HANDLER_SIZE, GFP_KERNEL);
if (!buffer)
- return -ENOMEM;
+ goto free_msg;
currbyte = buffer;
*currbyte = hif_drv->driver_handler_id & DRV_HANDLER_MASK;
@@ -333,31 +310,32 @@ static int handle_set_wfi_drv_handler(struct wilc_vif *vif,
currbyte++;
*currbyte = (hif_drv_handler->name | (hif_drv_handler->mode << 1));
- wid.id = (u16)WID_SET_DRV_HANDLER;
+ wid.id = WID_SET_DRV_HANDLER;
wid.type = WID_STR;
wid.val = (s8 *)buffer;
wid.size = DRV_HANDLER_SIZE;
ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
hif_drv->driver_handler_id);
- if (ret) {
+ if (ret)
netdev_err(vif->ndev, "Failed to set driver handler\n");
- complete(&hif_driver_comp);
- kfree(buffer);
- return ret;
- }
+
complete(&hif_driver_comp);
kfree(buffer);
- return 0;
+
+free_msg:
+ kfree(msg);
}
-static void handle_set_operation_mode(struct wilc_vif *vif,
- struct op_mode *hif_op_mode)
+static void handle_set_operation_mode(struct work_struct *work)
{
- int ret = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct op_mode *hif_op_mode = &msg->body.mode;
+ int ret;
struct wid wid;
- wid.id = (u16)WID_SET_OPERATION_MODE;
+ wid.id = WID_SET_OPERATION_MODE;
wid.type = WID_INT;
wid.val = (s8 *)&hif_op_mode->mode;
wid.size = sizeof(u32);
@@ -369,12 +347,18 @@ static void handle_set_operation_mode(struct wilc_vif *vif,
complete(&hif_driver_comp);
if (ret)
- netdev_err(vif->ndev, "Failed to set driver handler\n");
+ netdev_err(vif->ndev, "Failed to set operation mode\n");
+
+ kfree(msg);
}
-static void handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
+static void handle_set_ip_address(struct work_struct *work)
{
- int ret = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ u8 *ip_addr = msg->body.ip_info.ip_addr;
+ u8 idx = msg->body.ip_info.idx;
+ int ret;
struct wid wid;
char firmware_ip_addr[4] = {0};
@@ -383,7 +367,7 @@ static void handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
memcpy(set_ip[idx], ip_addr, IP_ALEN);
- wid.id = (u16)WID_IP_ADDRESS;
+ wid.id = WID_IP_ADDRESS;
wid.type = WID_STR;
wid.val = ip_addr;
wid.size = IP_ALEN;
@@ -395,14 +379,18 @@ static void handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
if (ret)
netdev_err(vif->ndev, "Failed to set IP address\n");
+ kfree(msg);
}
-static void handle_get_ip_address(struct wilc_vif *vif, u8 idx)
+static void handle_get_ip_address(struct work_struct *work)
{
- int ret = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ u8 idx = msg->body.ip_info.idx;
+ int ret;
struct wid wid;
- wid.id = (u16)WID_IP_ADDRESS;
+ wid.id = WID_IP_ADDRESS;
wid.type = WID_STR;
wid.val = kmalloc(IP_ALEN, GFP_KERNEL);
wid.size = IP_ALEN;
@@ -419,15 +407,18 @@ static void handle_get_ip_address(struct wilc_vif *vif, u8 idx)
if (ret)
netdev_err(vif->ndev, "Failed to get IP address\n");
+ kfree(msg);
}
-static void handle_get_mac_address(struct wilc_vif *vif,
- struct get_mac_addr *get_mac_addr)
+static void handle_get_mac_address(struct work_struct *work)
{
- int ret = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct get_mac_addr *get_mac_addr = &msg->body.get_mac_info;
+ int ret;
struct wid wid;
- wid.id = (u16)WID_MAC_ADDR;
+ wid.id = WID_MAC_ADDR;
wid.type = WID_STR;
wid.val = get_mac_addr->mac_addr;
wid.size = ETH_ALEN;
@@ -437,12 +428,16 @@ static void handle_get_mac_address(struct wilc_vif *vif,
if (ret)
netdev_err(vif->ndev, "Failed to get mac address\n");
- complete(&hif_wait_response);
+ complete(&msg->work_comp);
+ /* free 'msg' data later, in caller */
}
-static void handle_cfg_param(struct wilc_vif *vif, struct cfg_param_attr *param)
+static void handle_cfg_param(struct work_struct *work)
{
- int ret = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct cfg_param_attr *param = &msg->body.cfg_info;
+ int ret;
struct wid wid_list[32];
struct host_if_drv *hif_drv = vif->hif_drv;
int i = 0;
@@ -641,7 +636,7 @@ static void handle_cfg_param(struct wilc_vif *vif, struct cfg_param_attr *param)
i++;
}
if (param->flag & SITE_SURVEY) {
- enum SITESURVEY enabled = param->site_survey_enabled;
+ enum site_survey enabled = param->site_survey_enabled;
if (enabled < 3) {
wid_list[i].id = WID_SITE_SURVEY;
@@ -701,7 +696,7 @@ static void handle_cfg_param(struct wilc_vif *vif, struct cfg_param_attr *param)
i++;
}
if (param->flag & CURRENT_TX_RATE) {
- enum CURRENT_TXRATE curr_tx_rate = param->curr_tx_rate;
+ enum current_tx_rate curr_tx_rate = param->curr_tx_rate;
if (curr_tx_rate == AUTORATE || curr_tx_rate == MBPS_1 ||
curr_tx_rate == MBPS_2 || curr_tx_rate == MBPS_5_5 ||
@@ -730,11 +725,53 @@ static void handle_cfg_param(struct wilc_vif *vif, struct cfg_param_attr *param)
unlock:
mutex_unlock(&hif_drv->cfg_values_lock);
+ kfree(msg);
+}
+
+static int handle_scan_done(struct wilc_vif *vif, enum scan_event evt)
+{
+ int result = 0;
+ u8 abort_running_scan;
+ struct wid wid;
+ struct host_if_drv *hif_drv = vif->hif_drv;
+ struct user_scan_req *scan_req;
+
+ if (evt == SCAN_EVENT_ABORTED) {
+ abort_running_scan = 1;
+ wid.id = WID_ABORT_RUNNING_SCAN;
+ wid.type = WID_CHAR;
+ wid.val = (s8 *)&abort_running_scan;
+ wid.size = sizeof(char);
+
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
+
+ if (result) {
+ netdev_err(vif->ndev, "Failed to set abort running\n");
+ result = -EFAULT;
+ }
+ }
+
+ if (!hif_drv) {
+ netdev_err(vif->ndev, "%s: hif driver is NULL\n", __func__);
+ return result;
+ }
+
+ scan_req = &hif_drv->usr_scan_req;
+ if (scan_req->scan_result) {
+ scan_req->scan_result(evt, NULL, scan_req->arg, NULL);
+ scan_req->scan_result = NULL;
+ }
+
+ return result;
}
-static s32 handle_scan(struct wilc_vif *vif, struct scan_attr *scan_info)
+static void handle_scan(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct scan_attr *scan_info = &msg->body.scan_info;
+ int result = 0;
struct wid wid_list[5];
u32 index = 0;
u32 i;
@@ -760,9 +797,9 @@ static s32 handle_scan(struct wilc_vif *vif, struct scan_attr *scan_info)
goto error;
}
- hif_drv->usr_scan_req.rcvd_ch_cnt = 0;
+ hif_drv->usr_scan_req.ch_cnt = 0;
- wid_list[index].id = (u16)WID_SSID_PROBE_REQ;
+ wid_list[index].id = WID_SSID_PROBE_REQ;
wid_list[index].type = WID_STR;
for (i = 0; i < hidden_net->n_ssids; i++)
@@ -804,7 +841,7 @@ static s32 handle_scan(struct wilc_vif *vif, struct scan_attr *scan_info)
scan_info->ch_list_len > 0) {
int i;
- for (i = 0; i < scan_info->ch_list_len; i++) {
+ for (i = 0; i < scan_info->ch_list_len; i++) {
if (scan_info->ch_freq_list[i] > 0)
scan_info->ch_freq_list[i] -= 1;
}
@@ -843,62 +880,34 @@ error:
kfree(hdn_ntwk_wid_val);
- return result;
-}
-
-static s32 handle_scan_done(struct wilc_vif *vif, enum scan_event evt)
-{
- s32 result = 0;
- u8 abort_running_scan;
- struct wid wid;
- struct host_if_drv *hif_drv = vif->hif_drv;
- struct user_scan_req *scan_req;
-
- if (evt == SCAN_EVENT_ABORTED) {
- abort_running_scan = 1;
- wid.id = (u16)WID_ABORT_RUNNING_SCAN;
- wid.type = WID_CHAR;
- wid.val = (s8 *)&abort_running_scan;
- wid.size = sizeof(char);
-
- result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
-
- if (result) {
- netdev_err(vif->ndev, "Failed to set abort running\n");
- result = -EFAULT;
- }
- }
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "Driver handler is NULL\n");
- return result;
- }
-
- scan_req = &hif_drv->usr_scan_req;
- if (scan_req->scan_result) {
- scan_req->scan_result(evt, NULL, scan_req->arg, NULL);
- scan_req->scan_result = NULL;
- }
-
- return result;
+ kfree(msg);
}
u8 wilc_connected_ssid[6] = {0};
-static s32 handle_connect(struct wilc_vif *vif,
- struct connect_attr *conn_attr)
+static void handle_connect(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct connect_attr *conn_attr = &msg->body.con_info;
+ int result = 0;
struct wid wid_list[8];
u32 wid_cnt = 0, dummyval = 0;
u8 *cur_byte = NULL;
struct join_bss_param *bss_param;
struct host_if_drv *hif_drv = vif->hif_drv;
+ if (msg->vif->hif_drv->usr_scan_req.scan_result) {
+ result = wilc_enqueue_work(msg);
+ if (result)
+ goto error;
+
+ usleep_range(2 * 1000, 2 * 1000);
+ return;
+ }
+
if (memcmp(conn_attr->bssid, wilc_connected_ssid, ETH_ALEN) == 0) {
- result = 0;
netdev_err(vif->ndev, "Discard connect request\n");
- return result;
+ goto error;
}
bss_param = conn_attr->params;
@@ -971,19 +980,19 @@ static s32 handle_connect(struct wilc_vif *vif,
wid_list[wid_cnt].size = hif_drv->usr_conn_req.ies_len;
wid_cnt++;
- wid_list[wid_cnt].id = (u16)WID_11I_MODE;
+ wid_list[wid_cnt].id = WID_11I_MODE;
wid_list[wid_cnt].type = WID_CHAR;
wid_list[wid_cnt].size = sizeof(char);
wid_list[wid_cnt].val = (s8 *)&hif_drv->usr_conn_req.security;
wid_cnt++;
- wid_list[wid_cnt].id = (u16)WID_AUTH_TYPE;
+ wid_list[wid_cnt].id = WID_AUTH_TYPE;
wid_list[wid_cnt].type = WID_CHAR;
wid_list[wid_cnt].size = sizeof(char);
wid_list[wid_cnt].val = (s8 *)&hif_drv->usr_conn_req.auth_type;
wid_cnt++;
- wid_list[wid_cnt].id = (u16)WID_JOIN_REQ_EXTENDED;
+ wid_list[wid_cnt].id = WID_JOIN_REQ_EXTENDED;
wid_list[wid_cnt].type = WID_STR;
wid_list[wid_cnt].size = 112;
wid_list[wid_cnt].val = kmalloc(wid_list[wid_cnt].size, GFP_KERNEL);
@@ -1138,20 +1147,22 @@ error:
conn_attr->ies = NULL;
kfree(cur_byte);
- return result;
+ kfree(msg);
}
-static s32 handle_connect_timeout(struct wilc_vif *vif)
+static void handle_connect_timeout(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ int result;
struct connect_info info;
struct wid wid;
u16 dummy_reason_code = 0;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- netdev_err(vif->ndev, "Driver handler is NULL\n");
- return result;
+ netdev_err(vif->ndev, "%s: hif driver is NULL\n", __func__);
+ goto out;
}
hif_drv->hif_state = HOST_IF_IDLE;
@@ -1170,7 +1181,7 @@ static s32 handle_connect_timeout(struct wilc_vif *vif)
hif_drv->usr_conn_req.ies_len,
GFP_KERNEL);
if (!info.req_ies)
- return -ENOMEM;
+ goto out;
}
hif_drv->usr_conn_req.conn_result(CONN_DISCONN_EVENT_CONN_RESP,
@@ -1182,10 +1193,10 @@ static s32 handle_connect_timeout(struct wilc_vif *vif)
kfree(info.req_ies);
info.req_ies = NULL;
} else {
- netdev_err(vif->ndev, "Connect callback is NULL\n");
+ netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
}
- wid.id = (u16)WID_DISCONNECT;
+ wid.id = WID_DISCONNECT;
wid.type = WID_CHAR;
wid.val = (s8 *)&dummy_reason_code;
wid.size = sizeof(char);
@@ -1206,17 +1217,185 @@ static s32 handle_connect_timeout(struct wilc_vif *vif)
eth_zero_addr(wilc_connected_ssid);
- return result;
+out:
+ kfree(msg);
}
-static s32 handle_rcvd_ntwrk_info(struct wilc_vif *vif,
- struct rcvd_net_info *rcvd_info)
+static void host_int_fill_join_bss_param(struct join_bss_param *param, u8 *ies,
+ u16 *out_index, u8 *pcipher_tc,
+ u8 *auth_total_cnt, u32 tsf_lo,
+ u8 *rates_no)
{
+ u8 ext_rates_no;
+ u16 offset;
+ u8 pcipher_cnt;
+ u8 auth_cnt;
+ u8 i, j;
+ u16 index = *out_index;
+
+ if (ies[index] == WLAN_EID_SUPP_RATES) {
+ *rates_no = ies[index + 1];
+ param->supp_rates[0] = *rates_no;
+ index += 2;
+
+ for (i = 0; i < *rates_no; i++)
+ param->supp_rates[i + 1] = ies[index + i];
+
+ index += *rates_no;
+ } else if (ies[index] == WLAN_EID_EXT_SUPP_RATES) {
+ ext_rates_no = ies[index + 1];
+ if (ext_rates_no > (MAX_RATES_SUPPORTED - *rates_no))
+ param->supp_rates[0] = MAX_RATES_SUPPORTED;
+ else
+ param->supp_rates[0] += ext_rates_no;
+ index += 2;
+ for (i = 0; i < (param->supp_rates[0] - *rates_no); i++)
+ param->supp_rates[*rates_no + i + 1] = ies[index + i];
+
+ index += ext_rates_no;
+ } else if (ies[index] == WLAN_EID_HT_CAPABILITY) {
+ param->ht_capable = true;
+ index += ies[index + 1] + 2;
+ } else if ((ies[index] == WLAN_EID_VENDOR_SPECIFIC) &&
+ (ies[index + 2] == 0x00) && (ies[index + 3] == 0x50) &&
+ (ies[index + 4] == 0xF2) && (ies[index + 5] == 0x02) &&
+ ((ies[index + 6] == 0x00) || (ies[index + 6] == 0x01)) &&
+ (ies[index + 7] == 0x01)) {
+ param->wmm_cap = true;
+
+ if (ies[index + 8] & BIT(7))
+ param->uapsd_cap = true;
+ index += ies[index + 1] + 2;
+ } else if ((ies[index] == WLAN_EID_VENDOR_SPECIFIC) &&
+ (ies[index + 2] == 0x50) && (ies[index + 3] == 0x6f) &&
+ (ies[index + 4] == 0x9a) &&
+ (ies[index + 5] == 0x09) && (ies[index + 6] == 0x0c)) {
+ u16 p2p_cnt;
+
+ param->tsf = tsf_lo;
+ param->noa_enabled = 1;
+ param->idx = ies[index + 9];
+
+ if (ies[index + 10] & BIT(7)) {
+ param->opp_enabled = 1;
+ param->ct_window = ies[index + 10];
+ } else {
+ param->opp_enabled = 0;
+ }
+
+ param->cnt = ies[index + 11];
+ p2p_cnt = index + 12;
+
+ memcpy(param->duration, ies + p2p_cnt, 4);
+ p2p_cnt += 4;
+
+ memcpy(param->interval, ies + p2p_cnt, 4);
+ p2p_cnt += 4;
+
+ memcpy(param->start_time, ies + p2p_cnt, 4);
+
+ index += ies[index + 1] + 2;
+ } else if ((ies[index] == WLAN_EID_RSN) ||
+ ((ies[index] == WLAN_EID_VENDOR_SPECIFIC) &&
+ (ies[index + 2] == 0x00) &&
+ (ies[index + 3] == 0x50) && (ies[index + 4] == 0xF2) &&
+ (ies[index + 5] == 0x01))) {
+ u16 rsn_idx = index;
+
+ if (ies[rsn_idx] == WLAN_EID_RSN) {
+ param->mode_802_11i = 2;
+ } else {
+ if (param->mode_802_11i == 0)
+ param->mode_802_11i = 1;
+ rsn_idx += 4;
+ }
+
+ rsn_idx += 7;
+ param->rsn_grp_policy = ies[rsn_idx];
+ rsn_idx++;
+ offset = ies[rsn_idx] * 4;
+ pcipher_cnt = (ies[rsn_idx] > 3) ? 3 : ies[rsn_idx];
+ rsn_idx += 2;
+
+ i = *pcipher_tc;
+ j = 0;
+ for (; i < (pcipher_cnt + *pcipher_tc) && i < 3; i++, j++) {
+ u8 *policy = &param->rsn_pcip_policy[i];
+
+ *policy = ies[rsn_idx + ((j + 1) * 4) - 1];
+ }
+
+ *pcipher_tc += pcipher_cnt;
+ rsn_idx += offset;
+
+ offset = ies[rsn_idx] * 4;
+
+ auth_cnt = (ies[rsn_idx] > 3) ? 3 : ies[rsn_idx];
+ rsn_idx += 2;
+ i = *auth_total_cnt;
+ j = 0;
+ for (; i < (*auth_total_cnt + auth_cnt); i++, j++) {
+ u8 *policy = &param->rsn_auth_policy[i];
+
+ *policy = ies[rsn_idx + ((j + 1) * 4) - 1];
+ }
+
+ *auth_total_cnt += auth_cnt;
+ rsn_idx += offset;
+
+ if (ies[index] == WLAN_EID_RSN) {
+ param->rsn_cap[0] = ies[rsn_idx];
+ param->rsn_cap[1] = ies[rsn_idx + 1];
+ rsn_idx += 2;
+ }
+ param->rsn_found = true;
+ index += ies[index + 1] + 2;
+ } else {
+ index += ies[index + 1] + 2;
+ }
+
+ *out_index = index;
+}
+
+static void *host_int_parse_join_bss_param(struct network_info *info)
+{
+ struct join_bss_param *param;
+ u16 index = 0;
+ u8 rates_no = 0;
+ u8 pcipher_total_cnt = 0;
+ u8 auth_total_cnt = 0;
+
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param)
+ return NULL;
+
+ param->dtim_period = info->dtim_period;
+ param->beacon_period = info->beacon_period;
+ param->cap_info = info->cap_info;
+ memcpy(param->bssid, info->bssid, 6);
+ memcpy((u8 *)param->ssid, info->ssid, info->ssid_len + 1);
+ param->ssid_len = info->ssid_len;
+ memset(param->rsn_pcip_policy, 0xFF, 3);
+ memset(param->rsn_auth_policy, 0xFF, 3);
+
+ while (index < info->ies_len)
+ host_int_fill_join_bss_param(param, info->ies, &index,
+ &pcipher_total_cnt,
+ &auth_total_cnt, info->tsf_lo,
+ &rates_no);
+
+ return (void *)param;
+}
+
+static void handle_rcvd_ntwrk_info(struct work_struct *work)
+{
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct rcvd_net_info *rcvd_info = &msg->body.net_info;
u32 i;
bool found;
- s32 result = 0;
struct network_info *info = NULL;
- void *params = NULL;
+ void *params;
struct host_if_drv *hif_drv = vif->hif_drv;
struct user_scan_req *scan_req = &hif_drv->usr_scan_req;
@@ -1227,12 +1406,12 @@ static s32 handle_rcvd_ntwrk_info(struct wilc_vif *vif,
wilc_parse_network_info(rcvd_info->buffer, &info);
if (!info || !scan_req->scan_result) {
- netdev_err(vif->ndev, "driver is null\n");
- result = -EINVAL;
+ netdev_err(vif->ndev, "%s: info or scan result NULL\n",
+ __func__);
goto done;
}
- for (i = 0; i < scan_req->rcvd_ch_cnt; i++) {
+ for (i = 0; i < scan_req->ch_cnt; i++) {
if (memcmp(scan_req->net_info[i].bssid, info->bssid, 6) == 0) {
if (info->rssi <= scan_req->net_info[i].rssi) {
goto done;
@@ -1245,13 +1424,13 @@ static s32 handle_rcvd_ntwrk_info(struct wilc_vif *vif,
}
if (found) {
- if (scan_req->rcvd_ch_cnt < MAX_NUM_SCANNED_NETWORKS) {
- scan_req->net_info[scan_req->rcvd_ch_cnt].rssi = info->rssi;
+ if (scan_req->ch_cnt < MAX_NUM_SCANNED_NETWORKS) {
+ scan_req->net_info[scan_req->ch_cnt].rssi = info->rssi;
- memcpy(scan_req->net_info[scan_req->rcvd_ch_cnt].bssid,
+ memcpy(scan_req->net_info[scan_req->ch_cnt].bssid,
info->bssid, 6);
- scan_req->rcvd_ch_cnt++;
+ scan_req->ch_cnt++;
info->new_network = true;
params = host_int_parse_join_bss_param(info);
@@ -1274,13 +1453,33 @@ done:
kfree(info);
}
- return result;
+ kfree(msg);
}
static s32 host_int_get_assoc_res_info(struct wilc_vif *vif,
u8 *assoc_resp_info,
u32 max_assoc_resp_info_len,
- u32 *rcvd_assoc_resp_info_len);
+ u32 *rcvd_assoc_resp_info_len)
+{
+ int result;
+ struct wid wid;
+
+ wid.id = WID_ASSOC_RES_INFO;
+ wid.type = WID_STR;
+ wid.val = assoc_resp_info;
+ wid.size = max_assoc_resp_info_len;
+
+ result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
+ if (result) {
+ *rcvd_assoc_resp_info_len = 0;
+ netdev_err(vif->ndev, "Failed to send association response\n");
+ return -EINVAL;
+ }
+
+ *rcvd_assoc_resp_info_len = wid.size;
+ return result;
+}
static inline void host_int_free_user_conn_req(struct host_if_drv *hif_drv)
{
@@ -1325,9 +1524,9 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
}
if (mac_status == MAC_STATUS_CONNECTED &&
- conn_info.status != SUCCESSFUL_STATUSCODE) {
+ conn_info.status != WLAN_STATUS_SUCCESS) {
netdev_err(vif->ndev,
- "Received MAC status is MAC_STATUS_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE\n");
+ "Received MAC status is MAC_STATUS_CONNECTED, Assoc Resp is not SUCCESS\n");
eth_zero_addr(wilc_connected_ssid);
} else if (mac_status == MAC_STATUS_DISCONNECTED) {
netdev_err(vif->ndev, "Received MAC status is MAC_STATUS_DISCONNECTED\n");
@@ -1338,7 +1537,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
memcpy(conn_info.bssid, hif_drv->usr_conn_req.bssid, 6);
if (mac_status == MAC_STATUS_CONNECTED &&
- conn_info.status == SUCCESSFUL_STATUSCODE) {
+ conn_info.status == WLAN_STATUS_SUCCESS) {
memcpy(hif_drv->assoc_bssid,
hif_drv->usr_conn_req.bssid, ETH_ALEN);
}
@@ -1358,7 +1557,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
hif_drv->usr_conn_req.arg);
if (mac_status == MAC_STATUS_CONNECTED &&
- conn_info.status == SUCCESSFUL_STATUSCODE) {
+ conn_info.status == WLAN_STATUS_SUCCESS) {
wilc_set_power_mgmt(vif, 0, 0);
hif_drv->hif_state = HOST_IF_CONNECTED;
@@ -1402,7 +1601,7 @@ static inline void host_int_handle_disconnect(struct wilc_vif *vif)
conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, NULL, 0,
&disconn_info, hif_drv->usr_conn_req.arg);
} else {
- netdev_err(vif->ndev, "Connect result NULL\n");
+ netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
}
eth_zero_addr(hif_drv->assoc_bssid);
@@ -1411,43 +1610,39 @@ static inline void host_int_handle_disconnect(struct wilc_vif *vif)
hif_drv->hif_state = HOST_IF_IDLE;
}
-static s32 handle_rcvd_gnrl_async_info(struct wilc_vif *vif,
- struct rcvd_async_info *rcvd_info)
+static void handle_rcvd_gnrl_async_info(struct work_struct *work)
{
- s32 result = 0;
- u8 msg_type = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct rcvd_async_info *rcvd_info = &msg->body.async_info;
+ u8 msg_type;
u8 mac_status;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!rcvd_info->buffer) {
- netdev_err(vif->ndev, "Received buffer is NULL\n");
- return -EINVAL;
+ netdev_err(vif->ndev, "%s: buffer is NULL\n", __func__);
+ goto free_msg;
}
if (!hif_drv) {
- netdev_err(vif->ndev, "Driver handler is NULL\n");
- kfree(rcvd_info->buffer);
- rcvd_info->buffer = NULL;
- return -ENODEV;
+ netdev_err(vif->ndev, "%s: hif driver is NULL\n", __func__);
+ goto free_rcvd_info;
}
if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP ||
hif_drv->hif_state == HOST_IF_CONNECTED ||
hif_drv->usr_scan_req.scan_result) {
if (!hif_drv->usr_conn_req.conn_result) {
- netdev_err(vif->ndev, "driver is null\n");
- kfree(rcvd_info->buffer);
- rcvd_info->buffer = NULL;
- return -EINVAL;
+ netdev_err(vif->ndev, "%s: conn_result is NULL\n",
+ __func__);
+ goto free_rcvd_info;
}
msg_type = rcvd_info->buffer[0];
if ('I' != msg_type) {
netdev_err(vif->ndev, "Received Message incorrect.\n");
- kfree(rcvd_info->buffer);
- rcvd_info->buffer = NULL;
- return -EFAULT;
+ goto free_rcvd_info;
}
mac_status = rcvd_info->buffer[7];
@@ -1464,10 +1659,12 @@ static s32 handle_rcvd_gnrl_async_info(struct wilc_vif *vif,
}
}
+free_rcvd_info:
kfree(rcvd_info->buffer);
rcvd_info->buffer = NULL;
- return result;
+free_msg:
+ kfree(msg);
}
static int wilc_pmksa_key_copy(struct wilc_vif *vif, struct key_attr *hif_key)
@@ -1491,7 +1688,7 @@ static int wilc_pmksa_key_copy(struct wilc_vif *vif, struct key_attr *hif_key)
hif_key->attr.pmkid.pmkidlist[i].pmkid, PMKID_LEN);
}
- wid.id = (u16)WID_PMKID_INFO;
+ wid.id = WID_PMKID_INFO;
wid.type = WID_STR;
wid.val = (s8 *)key_buf;
wid.size = (hif_key->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1;
@@ -1504,8 +1701,11 @@ static int wilc_pmksa_key_copy(struct wilc_vif *vif, struct key_attr *hif_key)
return ret;
}
-static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
+static void handle_key(struct work_struct *work)
{
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct key_attr *hif_key = &msg->body.key_info;
int result = 0;
struct wid wid;
struct wid wid_list[5];
@@ -1516,7 +1716,7 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
case WEP:
if (hif_key->action & ADDKEY_AP) {
- wid_list[0].id = (u16)WID_11I_MODE;
+ wid_list[0].id = WID_11I_MODE;
wid_list[0].type = WID_CHAR;
wid_list[0].size = sizeof(char);
wid_list[0].val = (s8 *)&hif_key->attr.wep.mode;
@@ -1539,9 +1739,7 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
memcpy(&key_buf[2], hif_key->attr.wep.key,
hif_key->attr.wep.key_len);
- kfree(hif_key->attr.wep.key);
-
- wid_list[2].id = (u16)WID_WEP_KEY_VALUE;
+ wid_list[2].id = WID_WEP_KEY_VALUE;
wid_list[2].type = WID_STR;
wid_list[2].size = hif_key->attr.wep.key_len + 2;
wid_list[2].val = (s8 *)key_buf;
@@ -1561,9 +1759,8 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
memcpy(key_buf + 1, &hif_key->attr.wep.key_len, 1);
memcpy(key_buf + 2, hif_key->attr.wep.key,
hif_key->attr.wep.key_len);
- kfree(hif_key->attr.wep.key);
- wid.id = (u16)WID_ADD_WEP_KEY;
+ wid.id = WID_ADD_WEP_KEY;
wid.type = WID_STR;
wid.val = (s8 *)key_buf;
wid.size = hif_key->attr.wep.key_len + 2;
@@ -1573,7 +1770,7 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
wilc_get_vif_idx(vif));
kfree(key_buf);
} else if (hif_key->action & REMOVEKEY) {
- wid.id = (u16)WID_REMOVE_WEP_KEY;
+ wid.id = WID_REMOVE_WEP_KEY;
wid.type = WID_STR;
wid.val = (s8 *)&hif_key->attr.wep.index;
@@ -1583,7 +1780,7 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
&wid, 1,
wilc_get_vif_idx(vif));
} else if (hif_key->action & DEFAULTKEY) {
- wid.id = (u16)WID_KEY_ID;
+ wid.id = WID_KEY_ID;
wid.type = WID_CHAR;
wid.val = (s8 *)&hif_key->attr.wep.index;
wid.size = sizeof(char);
@@ -1593,7 +1790,7 @@ static int handle_key(struct wilc_vif *vif, struct key_attr *hif_key)
wilc_get_vif_idx(vif));
}
out_wep:
- complete(&hif_drv->comp_test_key_block);
+ complete(&msg->work_comp);
break;
case WPA_RX_GTK:
@@ -1612,12 +1809,12 @@ out_wep:
memcpy(key_buf + 16, hif_key->attr.wpa.key,
hif_key->attr.wpa.key_len);
- wid_list[0].id = (u16)WID_11I_MODE;
+ wid_list[0].id = WID_11I_MODE;
wid_list[0].type = WID_CHAR;
wid_list[0].size = sizeof(char);
wid_list[0].val = (s8 *)&hif_key->attr.wpa.mode;
- wid_list[1].id = (u16)WID_ADD_RX_GTK;
+ wid_list[1].id = WID_ADD_RX_GTK;
wid_list[1].type = WID_STR;
wid_list[1].val = (s8 *)key_buf;
wid_list[1].size = RX_MIC_KEY_MSG_LEN;
@@ -1645,7 +1842,7 @@ out_wep:
memcpy(key_buf + 16, hif_key->attr.wpa.key,
hif_key->attr.wpa.key_len);
- wid.id = (u16)WID_ADD_RX_GTK;
+ wid.id = WID_ADD_RX_GTK;
wid.type = WID_STR;
wid.val = (s8 *)key_buf;
wid.size = RX_MIC_KEY_MSG_LEN;
@@ -1657,9 +1854,7 @@ out_wep:
kfree(key_buf);
}
out_wpa_rx_gtk:
- complete(&hif_drv->comp_test_key_block);
- kfree(hif_key->attr.wpa.key);
- kfree(hif_key->attr.wpa.seq);
+ complete(&msg->work_comp);
break;
case WPA_PTK:
@@ -1676,12 +1871,12 @@ out_wpa_rx_gtk:
memcpy(key_buf + 8, hif_key->attr.wpa.key,
hif_key->attr.wpa.key_len);
- wid_list[0].id = (u16)WID_11I_MODE;
+ wid_list[0].id = WID_11I_MODE;
wid_list[0].type = WID_CHAR;
wid_list[0].size = sizeof(char);
wid_list[0].val = (s8 *)&hif_key->attr.wpa.mode;
- wid_list[1].id = (u16)WID_ADD_PTK;
+ wid_list[1].id = WID_ADD_PTK;
wid_list[1].type = WID_STR;
wid_list[1].val = (s8 *)key_buf;
wid_list[1].size = PTK_KEY_MSG_LEN + 1;
@@ -1702,7 +1897,7 @@ out_wpa_rx_gtk:
memcpy(key_buf + 7, hif_key->attr.wpa.key,
hif_key->attr.wpa.key_len);
- wid.id = (u16)WID_ADD_PTK;
+ wid.id = WID_ADD_PTK;
wid.type = WID_STR;
wid.val = (s8 *)key_buf;
wid.size = PTK_KEY_MSG_LEN;
@@ -1714,32 +1909,35 @@ out_wpa_rx_gtk:
}
out_wpa_ptk:
- complete(&hif_drv->comp_test_key_block);
- kfree(hif_key->attr.wpa.key);
+ complete(&msg->work_comp);
break;
case PMKSA:
result = wilc_pmksa_key_copy(vif, hif_key);
+ /*free 'msg', this case it not a sync call*/
+ kfree(msg);
break;
}
if (result)
netdev_err(vif->ndev, "Failed to send key config packet\n");
- return result;
+ /* free 'msg' data in caller sync call */
}
-static void handle_disconnect(struct wilc_vif *vif)
+static void handle_disconnect(struct work_struct *work)
{
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
struct wid wid;
struct host_if_drv *hif_drv = vif->hif_drv;
struct disconnect_info disconn_info;
struct user_scan_req *scan_req;
struct user_conn_req *conn_req;
- s32 result = 0;
+ int result;
u16 dummy_reason_code = 0;
- wid.id = (u16)WID_DISCONNECT;
+ wid.id = WID_DISCONNECT;
wid.type = WID_CHAR;
wid.val = (s8 *)&dummy_reason_code;
wid.size = sizeof(char);
@@ -1779,7 +1977,7 @@ static void handle_disconnect(struct wilc_vif *vif)
conn_req->conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, NULL,
0, &disconn_info, conn_req->arg);
} else {
- netdev_err(vif->ndev, "conn_result = NULL\n");
+ netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
}
hif_drv->hif_state = HOST_IF_IDLE;
@@ -1797,7 +1995,8 @@ static void handle_disconnect(struct wilc_vif *vif)
out:
- complete(&hif_drv->comp_test_disconn_block);
+ complete(&msg->work_comp);
+ /* free 'msg' in caller after receiving completion */
}
void wilc_resolve_disconnect_aberration(struct wilc_vif *vif)
@@ -1809,31 +2008,34 @@ void wilc_resolve_disconnect_aberration(struct wilc_vif *vif)
wilc_disconnect(vif, 1);
}
-static void handle_get_rssi(struct wilc_vif *vif)
+static void handle_get_rssi(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ int result;
struct wid wid;
- wid.id = (u16)WID_RSSI;
+ wid.id = WID_RSSI;
wid.type = WID_CHAR;
- wid.val = &rssi;
+ wid.val = msg->body.data;
wid.size = sizeof(char);
result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
- if (result) {
+ if (result)
netdev_err(vif->ndev, "Failed to get RSSI value\n");
- result = -EFAULT;
- }
- complete(&vif->hif_drv->comp_get_rssi);
+ complete(&msg->work_comp);
+ /* free 'msg' data in caller */
}
-static s32 handle_get_statistics(struct wilc_vif *vif,
- struct rf_info *stats)
+static void handle_get_statistics(struct work_struct *work)
{
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
struct wid wid_list[5];
- u32 wid_cnt = 0, result = 0;
+ u32 wid_cnt = 0, result;
+ struct rf_info *stats = (struct rf_info *)msg->body.data;
wid_list[wid_cnt].id = WID_LINKSPEED;
wid_list[wid_cnt].type = WID_CHAR;
@@ -1878,24 +2080,27 @@ static s32 handle_get_statistics(struct wilc_vif *vif,
else if (stats->link_speed != DEFAULT_LINK_SPEED)
wilc_enable_tcp_ack_filter(false);
- if (stats != &vif->wilc->dummy_statistics)
- complete(&hif_wait_response);
- return 0;
+ /* free 'msg' for async command, for sync caller will free it */
+ if (msg->is_sync)
+ complete(&msg->work_comp);
+ else
+ kfree(msg);
}
-static s32 handle_get_inactive_time(struct wilc_vif *vif,
- struct sta_inactive_t *hif_sta_inactive)
+static void handle_get_inactive_time(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct sta_inactive_t *hif_sta_inactive = &msg->body.mac_info;
+ int result;
struct wid wid;
- struct host_if_drv *hif_drv = vif->hif_drv;
- wid.id = (u16)WID_SET_STA_MAC_INACTIVE_TIME;
+ wid.id = WID_SET_STA_MAC_INACTIVE_TIME;
wid.type = WID_STR;
wid.size = ETH_ALEN;
wid.val = kmalloc(wid.size, GFP_KERNEL);
if (!wid.val)
- return -ENOMEM;
+ goto out;
ether_addr_copy(wid.val, hif_sta_inactive->mac);
@@ -1904,35 +2109,36 @@ static s32 handle_get_inactive_time(struct wilc_vif *vif,
kfree(wid.val);
if (result) {
- netdev_err(vif->ndev, "Failed to SET inactive time\n");
- return -EFAULT;
+ netdev_err(vif->ndev, "Failed to set inactive mac\n");
+ goto out;
}
- wid.id = (u16)WID_GET_INACTIVE_TIME;
+ wid.id = WID_GET_INACTIVE_TIME;
wid.type = WID_INT;
- wid.val = (s8 *)&inactive_time;
+ wid.val = (s8 *)&hif_sta_inactive->inactive_time;
wid.size = sizeof(u32);
result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
- if (result) {
+ if (result)
netdev_err(vif->ndev, "Failed to get inactive time\n");
- return -EFAULT;
- }
-
- complete(&hif_drv->comp_inactive_time);
- return result;
+out:
+ /* free 'msg' data in caller */
+ complete(&msg->work_comp);
}
-static void handle_add_beacon(struct wilc_vif *vif, struct beacon_attr *param)
+static void handle_add_beacon(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct beacon_attr *param = &msg->body.beacon_info;
+ int result;
struct wid wid;
u8 *cur_byte;
- wid.id = (u16)WID_ADD_BEACON;
+ wid.id = WID_ADD_BEACON;
wid.type = WID_BIN;
wid.size = param->head_len + param->tail_len + 16;
wid.val = kmalloc(wid.size, GFP_KERNEL);
@@ -1976,15 +2182,18 @@ error:
kfree(wid.val);
kfree(param->head);
kfree(param->tail);
+ kfree(msg);
}
-static void handle_del_beacon(struct wilc_vif *vif)
+static void handle_del_beacon(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ int result;
struct wid wid;
u8 del_beacon = 0;
- wid.id = (u16)WID_DEL_BEACON;
+ wid.id = WID_DEL_BEACON;
wid.type = WID_CHAR;
wid.size = sizeof(char);
wid.val = &del_beacon;
@@ -1993,6 +2202,7 @@ static void handle_del_beacon(struct wilc_vif *vif)
wilc_get_vif_idx(vif));
if (result)
netdev_err(vif->ndev, "Failed to send delete beacon\n");
+ kfree(msg);
}
static u32 wilc_hif_pack_sta_param(u8 *buff, struct add_sta_param *param)
@@ -2025,14 +2235,16 @@ static u32 wilc_hif_pack_sta_param(u8 *buff, struct add_sta_param *param)
return cur_byte - buff;
}
-static void handle_add_station(struct wilc_vif *vif,
- struct add_sta_param *param)
+static void handle_add_station(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct add_sta_param *param = &msg->body.add_sta_info;
+ int result;
struct wid wid;
u8 *cur_byte;
- wid.id = (u16)WID_ADD_STA;
+ wid.id = WID_ADD_STA;
wid.type = WID_BIN;
wid.size = WILC_ADD_STA_LENGTH + param->rates_len;
@@ -2051,18 +2263,21 @@ static void handle_add_station(struct wilc_vif *vif,
error:
kfree(param->rates);
kfree(wid.val);
+ kfree(msg);
}
-static void handle_del_all_sta(struct wilc_vif *vif,
- struct del_all_sta *param)
+static void handle_del_all_sta(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct del_all_sta *param = &msg->body.del_all_sta_info;
+ int result;
struct wid wid;
u8 *curr_byte;
u8 i;
u8 zero_buff[6] = {0};
- wid.id = (u16)WID_DEL_ALL_STA;
+ wid.id = WID_DEL_ALL_STA;
wid.type = WID_STR;
wid.size = (param->assoc_sta * ETH_ALEN) + 1;
@@ -2086,20 +2301,24 @@ static void handle_del_all_sta(struct wilc_vif *vif,
result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
if (result)
- netdev_err(vif->ndev, "Failed to send add station\n");
+ netdev_err(vif->ndev, "Failed to send delete all station\n");
error:
kfree(wid.val);
- complete(&hif_wait_response);
+ /* free 'msg' data in caller */
+ complete(&msg->work_comp);
}
-static void handle_del_station(struct wilc_vif *vif, struct del_sta *param)
+static void handle_del_station(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct del_sta *param = &msg->body.del_sta_info;
+ int result;
struct wid wid;
- wid.id = (u16)WID_REMOVE_STA;
+ wid.id = WID_REMOVE_STA;
wid.type = WID_BIN;
wid.size = ETH_ALEN;
@@ -2112,20 +2331,23 @@ static void handle_del_station(struct wilc_vif *vif, struct del_sta *param)
result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
if (result)
- netdev_err(vif->ndev, "Failed to send add station\n");
+ netdev_err(vif->ndev, "Failed to del station\n");
error:
kfree(wid.val);
+ kfree(msg);
}
-static void handle_edit_station(struct wilc_vif *vif,
- struct add_sta_param *param)
+static void handle_edit_station(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct add_sta_param *param = &msg->body.edit_sta_info;
+ int result;
struct wid wid;
u8 *cur_byte;
- wid.id = (u16)WID_EDIT_STA;
+ wid.id = WID_EDIT_STA;
wid.type = WID_BIN;
wid.size = WILC_ADD_STA_LENGTH + param->rates_len;
@@ -2144,12 +2366,13 @@ static void handle_edit_station(struct wilc_vif *vif,
error:
kfree(param->rates);
kfree(wid.val);
+ kfree(msg);
}
static int handle_remain_on_chan(struct wilc_vif *vif,
struct remain_ch *hif_remain_ch)
{
- s32 result = 0;
+ int result;
u8 remain_on_chan_flag;
struct wid wid;
struct host_if_drv *hif_drv = vif->hif_drv;
@@ -2180,7 +2403,7 @@ static int handle_remain_on_chan(struct wilc_vif *vif,
}
remain_on_chan_flag = true;
- wid.id = (u16)WID_REMAIN_ON_CHAN;
+ wid.id = WID_REMAIN_ON_CHAN;
wid.type = WID_STR;
wid.size = 2;
wid.val = kmalloc(wid.size, GFP_KERNEL);
@@ -2213,18 +2436,20 @@ error:
return result;
}
-static int handle_register_frame(struct wilc_vif *vif,
- struct reg_frame *hif_reg_frame)
+static void handle_register_frame(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct reg_frame *hif_reg_frame = &msg->body.reg_frame;
+ int result;
struct wid wid;
u8 *cur_byte;
- wid.id = (u16)WID_REGISTER_FRAME;
+ wid.id = WID_REGISTER_FRAME;
wid.type = WID_STR;
wid.val = kmalloc(sizeof(u16) + 2, GFP_KERNEL);
if (!wid.val)
- return -ENOMEM;
+ goto out;
cur_byte = wid.val;
@@ -2237,31 +2462,32 @@ static int handle_register_frame(struct wilc_vif *vif,
result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
kfree(wid.val);
- if (result) {
+ if (result)
netdev_err(vif->ndev, "Failed to frame register\n");
- result = -EINVAL;
- }
- return result;
+out:
+ kfree(msg);
}
-static u32 handle_listen_state_expired(struct wilc_vif *vif,
- struct remain_ch *hif_remain_ch)
+static void handle_listen_state_expired(struct work_struct *work)
{
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct remain_ch *hif_remain_ch = &msg->body.remain_on_ch;
u8 remain_on_chan_flag;
struct wid wid;
- s32 result = 0;
+ int result;
struct host_if_drv *hif_drv = vif->hif_drv;
if (p2p_listen_state) {
remain_on_chan_flag = false;
- wid.id = (u16)WID_REMAIN_ON_CHAN;
+ wid.id = WID_REMAIN_ON_CHAN;
wid.type = WID_STR;
wid.size = 2;
wid.val = kmalloc(wid.size, GFP_KERNEL);
if (!wid.val)
- return -ENOMEM;
+ goto free_msg;
wid.val[0] = remain_on_chan_flag;
wid.val[1] = FALSE_FRMWR_CHANNEL;
@@ -2271,7 +2497,7 @@ static u32 handle_listen_state_expired(struct wilc_vif *vif,
kfree(wid.val);
if (result != 0) {
netdev_err(vif->ndev, "Failed to set remain channel\n");
- return result;
+ goto free_msg;
}
if (hif_drv->remain_on_ch.expired) {
@@ -2281,10 +2507,10 @@ static u32 handle_listen_state_expired(struct wilc_vif *vif,
p2p_listen_state = 0;
} else {
netdev_dbg(vif->ndev, "Not in listen state\n");
- result = -EFAULT;
}
- return result;
+free_msg:
+ kfree(msg);
}
static void listen_timer_cb(struct timer_list *t)
@@ -2292,29 +2518,34 @@ static void listen_timer_cb(struct timer_list *t)
struct host_if_drv *hif_drv = from_timer(hif_drv, t,
remain_on_ch_timer);
struct wilc_vif *vif = hif_drv->remain_on_ch_timer_vif;
- s32 result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
del_timer(&vif->hif_drv->remain_on_ch_timer);
- memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_LISTEN_TIMER_FIRED;
- msg.vif = vif;
- msg.body.remain_on_ch.id = vif->hif_drv->remain_on_ch.id;
+ msg = wilc_alloc_work(vif, handle_listen_state_expired, false);
+ if (IS_ERR(msg))
+ return;
+
+ msg->body.remain_on_ch.id = vif->hif_drv->remain_on_ch.id;
- result = wilc_enqueue_cmd(&msg);
- if (result)
- netdev_err(vif->ndev, "wilc_mq_send fail\n");
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
+ }
}
-static void handle_power_management(struct wilc_vif *vif,
- struct power_mgmt_param *pm_param)
+static void handle_power_management(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct power_mgmt_param *pm_param = &msg->body.pwr_mgmt_info;
+ int result;
struct wid wid;
s8 power_mode;
- wid.id = (u16)WID_POWER_MANAGEMENT;
+ wid.id = WID_POWER_MANAGEMENT;
if (pm_param->enabled)
power_mode = MIN_FAST_PS;
@@ -2328,16 +2559,19 @@ static void handle_power_management(struct wilc_vif *vif,
wilc_get_vif_idx(vif));
if (result)
netdev_err(vif->ndev, "Failed to send power management\n");
+ kfree(msg);
}
-static void handle_set_mcast_filter(struct wilc_vif *vif,
- struct set_multicast *hif_set_mc)
+static void handle_set_mcast_filter(struct work_struct *work)
{
- s32 result = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ struct set_multicast *hif_set_mc = &msg->body.multicast_info;
+ int result;
struct wid wid;
u8 *cur_byte;
- wid.id = (u16)WID_SETUP_MULTICAST_FILTER;
+ wid.id = WID_SETUP_MULTICAST_FILTER;
wid.type = WID_BIN;
wid.size = sizeof(struct set_multicast) + (hif_set_mc->cnt * ETH_ALEN);
wid.val = kmalloc(wid.size, GFP_KERNEL);
@@ -2366,14 +2600,18 @@ static void handle_set_mcast_filter(struct wilc_vif *vif,
error:
kfree(wid.val);
+ kfree(msg);
}
-static void handle_set_tx_pwr(struct wilc_vif *vif, u8 tx_pwr)
+static void handle_set_tx_pwr(struct work_struct *work)
{
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ u8 tx_pwr = msg->body.tx_power.tx_pwr;
int ret;
struct wid wid;
- wid.id = (u16)WID_TX_POWER;
+ wid.id = WID_TX_POWER;
wid.type = WID_CHAR;
wid.val = &tx_pwr;
wid.size = sizeof(char);
@@ -2382,14 +2620,19 @@ static void handle_set_tx_pwr(struct wilc_vif *vif, u8 tx_pwr)
wilc_get_vif_idx(vif));
if (ret)
netdev_err(vif->ndev, "Failed to set TX PWR\n");
+ kfree(msg);
}
-static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
+/* Note: 'msg' will be free after using data */
+static void handle_get_tx_pwr(struct work_struct *work)
{
- int ret = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc_vif *vif = msg->vif;
+ u8 *tx_pwr = &msg->body.tx_power.tx_pwr;
+ int ret;
struct wid wid;
- wid.id = (u16)WID_TX_POWER;
+ wid.id = WID_TX_POWER;
wid.type = WID_CHAR;
wid.val = (s8 *)tx_pwr;
wid.size = sizeof(char);
@@ -2399,190 +2642,64 @@ static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
if (ret)
netdev_err(vif->ndev, "Failed to get TX PWR\n");
- complete(&hif_wait_response);
+ complete(&msg->work_comp);
}
-static void host_if_work(struct work_struct *work)
+static void handle_scan_timer(struct work_struct *work)
{
- struct host_if_msg *msg;
- struct wilc *wilc;
- int ret = 0;
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
- msg = container_of(work, struct host_if_msg, work);
- wilc = msg->vif->wilc;
-
- if (msg->id == HOST_IF_MSG_CONNECT &&
- msg->vif->hif_drv->usr_scan_req.scan_result) {
- wilc_enqueue_cmd(msg);
- usleep_range(2 * 1000, 2 * 1000);
- goto free_msg;
- }
- switch (msg->id) {
- case HOST_IF_MSG_SCAN:
- handle_scan(msg->vif, &msg->body.scan_info);
- break;
-
- case HOST_IF_MSG_CONNECT:
- handle_connect(msg->vif, &msg->body.con_info);
- break;
-
- case HOST_IF_MSG_RCVD_NTWRK_INFO:
- handle_rcvd_ntwrk_info(msg->vif, &msg->body.net_info);
- break;
-
- case HOST_IF_MSG_RCVD_GNRL_ASYNC_INFO:
- handle_rcvd_gnrl_async_info(msg->vif,
- &msg->body.async_info);
- break;
-
- case HOST_IF_MSG_KEY:
- handle_key(msg->vif, &msg->body.key_info);
- break;
-
- case HOST_IF_MSG_CFG_PARAMS:
- handle_cfg_param(msg->vif, &msg->body.cfg_info);
- break;
-
- case HOST_IF_MSG_SET_CHANNEL:
- handle_set_channel(msg->vif, &msg->body.channel_info);
- break;
-
- case HOST_IF_MSG_DISCONNECT:
- handle_disconnect(msg->vif);
- break;
-
- case HOST_IF_MSG_RCVD_SCAN_COMPLETE:
- del_timer(&msg->vif->hif_drv->scan_timer);
-
- if (!wilc_wlan_get_num_conn_ifcs(wilc))
- wilc_chip_sleep_manually(wilc);
-
- handle_scan_done(msg->vif, SCAN_EVENT_DONE);
-
- if (msg->vif->hif_drv->remain_on_ch_pending)
- handle_remain_on_chan(msg->vif,
- &msg->body.remain_on_ch);
-
- break;
-
- case HOST_IF_MSG_GET_RSSI:
- handle_get_rssi(msg->vif);
- break;
-
- case HOST_IF_MSG_GET_STATISTICS:
- handle_get_statistics(msg->vif,
- (struct rf_info *)msg->body.data);
- break;
-
- case HOST_IF_MSG_ADD_BEACON:
- handle_add_beacon(msg->vif, &msg->body.beacon_info);
- break;
-
- case HOST_IF_MSG_DEL_BEACON:
- handle_del_beacon(msg->vif);
- break;
-
- case HOST_IF_MSG_ADD_STATION:
- handle_add_station(msg->vif, &msg->body.add_sta_info);
- break;
-
- case HOST_IF_MSG_DEL_STATION:
- handle_del_station(msg->vif, &msg->body.del_sta_info);
- break;
-
- case HOST_IF_MSG_EDIT_STATION:
- handle_edit_station(msg->vif, &msg->body.edit_sta_info);
- break;
-
- case HOST_IF_MSG_GET_INACTIVETIME:
- handle_get_inactive_time(msg->vif, &msg->body.mac_info);
- break;
+ handle_scan_done(msg->vif, SCAN_EVENT_ABORTED);
+ kfree(msg);
+}
- case HOST_IF_MSG_SCAN_TIMER_FIRED:
- handle_scan_done(msg->vif, SCAN_EVENT_ABORTED);
- break;
+static void handle_remain_on_chan_work(struct work_struct *work)
+{
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
- case HOST_IF_MSG_CONNECT_TIMER_FIRED:
- handle_connect_timeout(msg->vif);
- break;
+ handle_remain_on_chan(msg->vif, &msg->body.remain_on_ch);
+ kfree(msg);
+}
- case HOST_IF_MSG_POWER_MGMT:
- handle_power_management(msg->vif,
- &msg->body.pwr_mgmt_info);
- break;
+static void handle_hif_exit_work(struct work_struct *work)
+{
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
- case HOST_IF_MSG_SET_WFIDRV_HANDLER:
- ret = handle_set_wfi_drv_handler(msg->vif, &msg->body.drv);
- break;
+ /* free 'msg' data in caller */
+ complete(&msg->work_comp);
+}
- case HOST_IF_MSG_SET_OPERATION_MODE:
- handle_set_operation_mode(msg->vif, &msg->body.mode);
- break;
+static void handle_scan_complete(struct work_struct *work)
+{
+ struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
+ struct wilc *wilc = msg->vif->wilc;
- case HOST_IF_MSG_SET_IPADDRESS:
- handle_set_ip_address(msg->vif,
- msg->body.ip_info.ip_addr,
- msg->body.ip_info.idx);
- break;
+ del_timer(&msg->vif->hif_drv->scan_timer);
- case HOST_IF_MSG_GET_IPADDRESS:
- handle_get_ip_address(msg->vif, msg->body.ip_info.idx);
- break;
+ if (!wilc_wlan_get_num_conn_ifcs(wilc))
+ wilc_chip_sleep_manually(wilc);
- case HOST_IF_MSG_GET_MAC_ADDRESS:
- handle_get_mac_address(msg->vif,
- &msg->body.get_mac_info);
- break;
+ handle_scan_done(msg->vif, SCAN_EVENT_DONE);
- case HOST_IF_MSG_REMAIN_ON_CHAN:
+ if (msg->vif->hif_drv->remain_on_ch_pending)
handle_remain_on_chan(msg->vif, &msg->body.remain_on_ch);
- break;
-
- case HOST_IF_MSG_REGISTER_FRAME:
- handle_register_frame(msg->vif, &msg->body.reg_frame);
- break;
-
- case HOST_IF_MSG_LISTEN_TIMER_FIRED:
- handle_listen_state_expired(msg->vif, &msg->body.remain_on_ch);
- break;
-
- case HOST_IF_MSG_SET_MULTICAST_FILTER:
- handle_set_mcast_filter(msg->vif, &msg->body.multicast_info);
- break;
-
- case HOST_IF_MSG_DEL_ALL_STA:
- handle_del_all_sta(msg->vif, &msg->body.del_all_sta_info);
- break;
-
- case HOST_IF_MSG_SET_TX_POWER:
- handle_set_tx_pwr(msg->vif, msg->body.tx_power.tx_pwr);
- break;
-
- case HOST_IF_MSG_GET_TX_POWER:
- handle_get_tx_pwr(msg->vif, &msg->body.tx_power.tx_pwr);
- break;
- default:
- netdev_err(msg->vif->ndev, "[Host Interface] undefined\n");
- break;
- }
-free_msg:
- if (ret)
- netdev_err(msg->vif->ndev, "Host cmd %d failed\n", msg->id);
kfree(msg);
- complete(&hif_thread_comp);
}
static void timer_scan_cb(struct timer_list *t)
{
struct host_if_drv *hif_drv = from_timer(hif_drv, t, scan_timer);
struct wilc_vif *vif = hif_drv->scan_timer_vif;
- struct host_if_msg msg;
+ struct host_if_msg *msg;
+ int result;
- memset(&msg, 0, sizeof(struct host_if_msg));
- msg.vif = vif;
- msg.id = HOST_IF_MSG_SCAN_TIMER_FIRED;
+ msg = wilc_alloc_work(vif, handle_scan_timer, false);
+ if (IS_ERR(msg))
+ return;
- wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
+ if (result)
+ kfree(msg);
}
static void timer_connect_cb(struct timer_list *t)
@@ -2590,70 +2707,75 @@ static void timer_connect_cb(struct timer_list *t)
struct host_if_drv *hif_drv = from_timer(hif_drv, t,
connect_timer);
struct wilc_vif *vif = hif_drv->connect_timer_vif;
- struct host_if_msg msg;
+ struct host_if_msg *msg;
+ int result;
- memset(&msg, 0, sizeof(struct host_if_msg));
- msg.vif = vif;
- msg.id = HOST_IF_MSG_CONNECT_TIMER_FIRED;
+ msg = wilc_alloc_work(vif, handle_connect_timeout, false);
+ if (IS_ERR(msg))
+ return;
- wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
+ if (result)
+ kfree(msg);
}
int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
result = -EFAULT;
- netdev_err(vif->ndev, "Failed to send setup multicast\n");
+ netdev_err(vif->ndev, "%s: hif driver is NULL", __func__);
return result;
}
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_key, true);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_KEY;
- msg.body.key_info.type = WEP;
- msg.body.key_info.action = REMOVEKEY;
- msg.vif = vif;
- msg.body.key_info.attr.wep.index = index;
+ msg->body.key_info.type = WEP;
+ msg->body.key_info.action = REMOVEKEY;
+ msg->body.key_info.attr.wep.index = index;
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result)
- netdev_err(vif->ndev, "Request to remove WEP key\n");
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
else
- wait_for_completion(&hif_drv->comp_test_key_block);
+ wait_for_completion(&msg->work_comp);
+ kfree(msg);
return result;
}
int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
result = -EFAULT;
- netdev_err(vif->ndev, "driver is null\n");
+ netdev_err(vif->ndev, "%s: hif driver is NULL\n", __func__);
return result;
}
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_key, true);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_KEY;
- msg.body.key_info.type = WEP;
- msg.body.key_info.action = DEFAULTKEY;
- msg.vif = vif;
- msg.body.key_info.attr.wep.index = index;
+ msg->body.key_info.type = WEP;
+ msg->body.key_info.action = DEFAULTKEY;
+ msg->body.key_info.attr.wep.index = index;
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result)
- netdev_err(vif->ndev, "Default key index\n");
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
else
- wait_for_completion(&hif_drv->comp_test_key_block);
+ wait_for_completion(&msg->work_comp);
+ kfree(msg);
return result;
}
@@ -2661,74 +2783,84 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
u8 index)
{
int result;
- struct host_if_msg msg;
+ struct host_if_msg *msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
+ netdev_err(vif->ndev, "%s: hif driver is NULL", __func__);
return -EFAULT;
}
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_key, true);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_KEY;
- msg.body.key_info.type = WEP;
- msg.body.key_info.action = ADDKEY;
- msg.vif = vif;
- msg.body.key_info.attr.wep.key = kmemdup(key, len, GFP_KERNEL);
- if (!msg.body.key_info.attr.wep.key)
- return -ENOMEM;
+ msg->body.key_info.type = WEP;
+ msg->body.key_info.action = ADDKEY;
+ msg->body.key_info.attr.wep.key = kmemdup(key, len, GFP_KERNEL);
+ if (!msg->body.key_info.attr.wep.key) {
+ result = -ENOMEM;
+ goto free_msg;
+ }
- msg.body.key_info.attr.wep.key_len = len;
- msg.body.key_info.attr.wep.index = index;
+ msg->body.key_info.attr.wep.key_len = len;
+ msg->body.key_info.attr.wep.index = index;
- result = wilc_enqueue_cmd(&msg);
- if (result) {
- netdev_err(vif->ndev, "STA - WEP Key\n");
- kfree(msg.body.key_info.attr.wep.key);
- return result;
- }
+ result = wilc_enqueue_work(msg);
+ if (result)
+ goto free_key;
- wait_for_completion(&hif_drv->comp_test_key_block);
- return 0;
+ wait_for_completion(&msg->work_comp);
+
+free_key:
+ kfree(msg->body.key_info.attr.wep.key);
+
+free_msg:
+ kfree(msg);
+ return result;
}
int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
- u8 index, u8 mode, enum AUTHTYPE auth_type)
+ u8 index, u8 mode, enum authtype auth_type)
{
int result;
- struct host_if_msg msg;
+ struct host_if_msg *msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
+ netdev_err(vif->ndev, "%s: hif driver is NULL\n", __func__);
return -EFAULT;
}
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_key, true);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_KEY;
- msg.body.key_info.type = WEP;
- msg.body.key_info.action = ADDKEY_AP;
- msg.vif = vif;
- msg.body.key_info.attr.wep.key = kmemdup(key, len, GFP_KERNEL);
- if (!msg.body.key_info.attr.wep.key)
- return -ENOMEM;
+ msg->body.key_info.type = WEP;
+ msg->body.key_info.action = ADDKEY_AP;
+ msg->body.key_info.attr.wep.key = kmemdup(key, len, GFP_KERNEL);
+ if (!msg->body.key_info.attr.wep.key) {
+ result = -ENOMEM;
+ goto free_msg;
+ }
- msg.body.key_info.attr.wep.key_len = len;
- msg.body.key_info.attr.wep.index = index;
- msg.body.key_info.attr.wep.mode = mode;
- msg.body.key_info.attr.wep.auth_type = auth_type;
+ msg->body.key_info.attr.wep.key_len = len;
+ msg->body.key_info.attr.wep.index = index;
+ msg->body.key_info.attr.wep.mode = mode;
+ msg->body.key_info.attr.wep.auth_type = auth_type;
- result = wilc_enqueue_cmd(&msg);
- if (result) {
- netdev_err(vif->ndev, "AP - WEP Key\n");
- kfree(msg.body.key_info.attr.wep.key);
- return result;
- }
+ result = wilc_enqueue_work(msg);
+ if (result)
+ goto free_key;
- wait_for_completion(&hif_drv->comp_test_key_block);
- return 0;
+ wait_for_completion(&msg->work_comp);
+
+free_key:
+ kfree(msg->body.key_info.attr.wep.key);
+
+free_msg:
+ kfree(msg);
+ return result;
}
int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
@@ -2736,12 +2868,12 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
u8 mode, u8 cipher_mode, u8 index)
{
int result;
- struct host_if_msg msg;
+ struct host_if_msg *msg;
struct host_if_drv *hif_drv = vif->hif_drv;
u8 key_len = ptk_key_len;
if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
+ netdev_err(vif->ndev, "%s: hif driver is NULL", __func__);
return -EFAULT;
}
@@ -2751,43 +2883,50 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
if (tx_mic)
key_len += TX_MIC_KEY_LEN;
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_key, true);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_KEY;
- msg.body.key_info.type = WPA_PTK;
+ msg->body.key_info.type = WPA_PTK;
if (mode == AP_MODE) {
- msg.body.key_info.action = ADDKEY_AP;
- msg.body.key_info.attr.wpa.index = index;
+ msg->body.key_info.action = ADDKEY_AP;
+ msg->body.key_info.attr.wpa.index = index;
}
if (mode == STATION_MODE)
- msg.body.key_info.action = ADDKEY;
+ msg->body.key_info.action = ADDKEY;
- msg.body.key_info.attr.wpa.key = kmemdup(ptk, ptk_key_len, GFP_KERNEL);
- if (!msg.body.key_info.attr.wpa.key)
- return -ENOMEM;
+ msg->body.key_info.attr.wpa.key = kmemdup(ptk, ptk_key_len, GFP_KERNEL);
+ if (!msg->body.key_info.attr.wpa.key) {
+ result = -ENOMEM;
+ goto free_msg;
+ }
if (rx_mic)
- memcpy(msg.body.key_info.attr.wpa.key + 16, rx_mic,
+ memcpy(msg->body.key_info.attr.wpa.key + 16, rx_mic,
RX_MIC_KEY_LEN);
if (tx_mic)
- memcpy(msg.body.key_info.attr.wpa.key + 24, tx_mic,
+ memcpy(msg->body.key_info.attr.wpa.key + 24, tx_mic,
TX_MIC_KEY_LEN);
- msg.body.key_info.attr.wpa.key_len = key_len;
- msg.body.key_info.attr.wpa.mac_addr = mac_addr;
- msg.body.key_info.attr.wpa.mode = cipher_mode;
- msg.vif = vif;
+ msg->body.key_info.attr.wpa.key_len = key_len;
+ msg->body.key_info.attr.wpa.mac_addr = mac_addr;
+ msg->body.key_info.attr.wpa.mode = cipher_mode;
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result) {
- netdev_err(vif->ndev, "PTK Key\n");
- kfree(msg.body.key_info.attr.wpa.key);
- return result;
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ goto free_key;
}
- wait_for_completion(&hif_drv->comp_test_key_block);
- return 0;
+ wait_for_completion(&msg->work_comp);
+
+free_key:
+ kfree(msg->body.key_info.attr.wpa.key);
+
+free_msg:
+ kfree(msg);
+ return result;
}
int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
@@ -2796,15 +2935,18 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
u8 cipher_mode)
{
int result;
- struct host_if_msg msg;
+ struct host_if_msg *msg;
struct host_if_drv *hif_drv = vif->hif_drv;
u8 key_len = gtk_key_len;
if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
+ netdev_err(vif->ndev, "%s: hif driver is NULL", __func__);
return -EFAULT;
}
- memset(&msg, 0, sizeof(struct host_if_msg));
+
+ msg = wilc_alloc_work(vif, handle_key, true);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
if (rx_mic)
key_len += RX_MIC_KEY_LEN;
@@ -2813,258 +2955,263 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
key_len += TX_MIC_KEY_LEN;
if (key_rsc) {
- msg.body.key_info.attr.wpa.seq = kmemdup(key_rsc,
- key_rsc_len,
- GFP_KERNEL);
- if (!msg.body.key_info.attr.wpa.seq)
- return -ENOMEM;
+ msg->body.key_info.attr.wpa.seq = kmemdup(key_rsc,
+ key_rsc_len,
+ GFP_KERNEL);
+ if (!msg->body.key_info.attr.wpa.seq) {
+ result = -ENOMEM;
+ goto free_msg;
+ }
}
- msg.id = HOST_IF_MSG_KEY;
- msg.body.key_info.type = WPA_RX_GTK;
- msg.vif = vif;
+ msg->body.key_info.type = WPA_RX_GTK;
if (mode == AP_MODE) {
- msg.body.key_info.action = ADDKEY_AP;
- msg.body.key_info.attr.wpa.mode = cipher_mode;
+ msg->body.key_info.action = ADDKEY_AP;
+ msg->body.key_info.attr.wpa.mode = cipher_mode;
}
if (mode == STATION_MODE)
- msg.body.key_info.action = ADDKEY;
+ msg->body.key_info.action = ADDKEY;
- msg.body.key_info.attr.wpa.key = kmemdup(rx_gtk,
- key_len,
- GFP_KERNEL);
- if (!msg.body.key_info.attr.wpa.key) {
- kfree(msg.body.key_info.attr.wpa.seq);
- return -ENOMEM;
+ msg->body.key_info.attr.wpa.key = kmemdup(rx_gtk, key_len, GFP_KERNEL);
+ if (!msg->body.key_info.attr.wpa.key) {
+ result = -ENOMEM;
+ goto free_seq;
}
if (rx_mic)
- memcpy(msg.body.key_info.attr.wpa.key + 16, rx_mic,
+ memcpy(msg->body.key_info.attr.wpa.key + 16, rx_mic,
RX_MIC_KEY_LEN);
if (tx_mic)
- memcpy(msg.body.key_info.attr.wpa.key + 24, tx_mic,
+ memcpy(msg->body.key_info.attr.wpa.key + 24, tx_mic,
TX_MIC_KEY_LEN);
- msg.body.key_info.attr.wpa.index = index;
- msg.body.key_info.attr.wpa.key_len = key_len;
- msg.body.key_info.attr.wpa.seq_len = key_rsc_len;
+ msg->body.key_info.attr.wpa.index = index;
+ msg->body.key_info.attr.wpa.key_len = key_len;
+ msg->body.key_info.attr.wpa.seq_len = key_rsc_len;
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result) {
- netdev_err(vif->ndev, "RX GTK\n");
- kfree(msg.body.key_info.attr.wpa.seq);
- kfree(msg.body.key_info.attr.wpa.key);
- return result;
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ goto free_key;
}
- wait_for_completion(&hif_drv->comp_test_key_block);
- return 0;
+ wait_for_completion(&msg->work_comp);
+
+free_key:
+ kfree(msg->body.key_info.attr.wpa.key);
+
+free_seq:
+ kfree(msg->body.key_info.attr.wpa.seq);
+
+free_msg:
+ kfree(msg);
+ return result;
}
int wilc_set_pmkid_info(struct wilc_vif *vif,
struct host_if_pmkid_attr *pmkid)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
int i;
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_key, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_KEY;
- msg.body.key_info.type = PMKSA;
- msg.body.key_info.action = ADDKEY;
- msg.vif = vif;
+ msg->body.key_info.type = PMKSA;
+ msg->body.key_info.action = ADDKEY;
for (i = 0; i < pmkid->numpmkid; i++) {
- memcpy(msg.body.key_info.attr.pmkid.pmkidlist[i].bssid,
+ memcpy(msg->body.key_info.attr.pmkid.pmkidlist[i].bssid,
&pmkid->pmkidlist[i].bssid, ETH_ALEN);
- memcpy(msg.body.key_info.attr.pmkid.pmkidlist[i].pmkid,
+ memcpy(msg->body.key_info.attr.pmkid.pmkidlist[i].pmkid,
&pmkid->pmkidlist[i].pmkid, PMKID_LEN);
}
- result = wilc_enqueue_cmd(&msg);
- if (result)
- netdev_err(vif->ndev, "PMKID Info\n");
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
+ }
return result;
}
int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_get_mac_address, true);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_GET_MAC_ADDRESS;
- msg.body.get_mac_info.mac_addr = mac_addr;
- msg.vif = vif;
+ msg->body.get_mac_info.mac_addr = mac_addr;
- result = wilc_enqueue_cmd(&msg);
- if (result) {
- netdev_err(vif->ndev, "Failed to send get mac address\n");
- return -EFAULT;
- }
+ result = wilc_enqueue_work(msg);
+ if (result)
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ else
+ wait_for_completion(&msg->work_comp);
+
+ kfree(msg);
- wait_for_completion(&hif_wait_response);
return result;
}
int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
size_t ssid_len, const u8 *ies, size_t ies_len,
wilc_connect_result connect_result, void *user_arg,
- u8 security, enum AUTHTYPE auth_type,
+ u8 security, enum authtype auth_type,
u8 channel, void *join_params)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv || !connect_result) {
- netdev_err(vif->ndev, "Driver is null\n");
+ netdev_err(vif->ndev,
+ "%s: hif driver or connect result is NULL",
+ __func__);
return -EFAULT;
}
if (!join_params) {
- netdev_err(vif->ndev, "Unable to Join - JoinParams is NULL\n");
+ netdev_err(vif->ndev, "%s: joinparams is NULL\n", __func__);
return -EFAULT;
}
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_connect, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_CONNECT;
-
- msg.body.con_info.security = security;
- msg.body.con_info.auth_type = auth_type;
- msg.body.con_info.ch = channel;
- msg.body.con_info.result = connect_result;
- msg.body.con_info.arg = user_arg;
- msg.body.con_info.params = join_params;
- msg.vif = vif;
+ msg->body.con_info.security = security;
+ msg->body.con_info.auth_type = auth_type;
+ msg->body.con_info.ch = channel;
+ msg->body.con_info.result = connect_result;
+ msg->body.con_info.arg = user_arg;
+ msg->body.con_info.params = join_params;
if (bssid) {
- msg.body.con_info.bssid = kmemdup(bssid, 6, GFP_KERNEL);
- if (!msg.body.con_info.bssid)
- return -ENOMEM;
+ msg->body.con_info.bssid = kmemdup(bssid, 6, GFP_KERNEL);
+ if (!msg->body.con_info.bssid) {
+ result = -ENOMEM;
+ goto free_msg;
+ }
}
if (ssid) {
- msg.body.con_info.ssid_len = ssid_len;
- msg.body.con_info.ssid = kmemdup(ssid, ssid_len, GFP_KERNEL);
- if (!msg.body.con_info.ssid)
- return -ENOMEM;
+ msg->body.con_info.ssid_len = ssid_len;
+ msg->body.con_info.ssid = kmemdup(ssid, ssid_len, GFP_KERNEL);
+ if (!msg->body.con_info.ssid) {
+ result = -ENOMEM;
+ goto free_bssid;
+ }
}
if (ies) {
- msg.body.con_info.ies_len = ies_len;
- msg.body.con_info.ies = kmemdup(ies, ies_len, GFP_KERNEL);
- if (!msg.body.con_info.ies)
- return -ENOMEM;
+ msg->body.con_info.ies_len = ies_len;
+ msg->body.con_info.ies = kmemdup(ies, ies_len, GFP_KERNEL);
+ if (!msg->body.con_info.ies) {
+ result = -ENOMEM;
+ goto free_ssid;
+ }
}
if (hif_drv->hif_state < HOST_IF_CONNECTING)
hif_drv->hif_state = HOST_IF_CONNECTING;
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result) {
- netdev_err(vif->ndev, "send message: Set join request\n");
- return -EFAULT;
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ goto free_ies;
}
hif_drv->connect_timer_vif = vif;
mod_timer(&hif_drv->connect_timer,
jiffies + msecs_to_jiffies(HOST_IF_CONNECT_TIMEOUT));
+ return 0;
+
+free_ies:
+ kfree(msg->body.con_info.ies);
+
+free_ssid:
+ kfree(msg->body.con_info.ssid);
+
+free_bssid:
+ kfree(msg->body.con_info.bssid);
+
+free_msg:
+ kfree(msg);
return result;
}
int wilc_disconnect(struct wilc_vif *vif, u16 reason_code)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- netdev_err(vif->ndev, "Driver is null\n");
+ netdev_err(vif->ndev, "%s: hif driver is NULL", __func__);
return -EFAULT;
}
- memset(&msg, 0, sizeof(struct host_if_msg));
-
- msg.id = HOST_IF_MSG_DISCONNECT;
- msg.vif = vif;
+ msg = wilc_alloc_work(vif, handle_disconnect, true);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result)
- netdev_err(vif->ndev, "Failed to send message: disconnect\n");
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
else
- wait_for_completion(&hif_drv->comp_test_disconn_block);
-
- return result;
-}
-
-static s32 host_int_get_assoc_res_info(struct wilc_vif *vif,
- u8 *assoc_resp_info,
- u32 max_assoc_resp_info_len,
- u32 *rcvd_assoc_resp_info_len)
-{
- s32 result = 0;
- struct wid wid;
-
- wid.id = (u16)WID_ASSOC_RES_INFO;
- wid.type = WID_STR;
- wid.val = assoc_resp_info;
- wid.size = max_assoc_resp_info_len;
+ wait_for_completion(&msg->work_comp);
- result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
- if (result) {
- *rcvd_assoc_resp_info_len = 0;
- netdev_err(vif->ndev, "Failed to send association response\n");
- return -EINVAL;
- }
-
- *rcvd_assoc_resp_info_len = wid.size;
+ kfree(msg);
return result;
}
int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel)
{
int result;
- struct host_if_msg msg;
+ struct host_if_msg *msg;
+
+ msg = wilc_alloc_work(vif, handle_set_channel, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_SET_CHANNEL;
- msg.body.channel_info.set_ch = channel;
- msg.vif = vif;
+ msg->body.channel_info.set_ch = channel;
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result) {
- netdev_err(vif->ndev, "wilc mq send fail\n");
- return -EINVAL;
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
}
- return 0;
+ return result;
}
int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mode,
u8 ifc_id)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
+
+ msg = wilc_alloc_work(vif, handle_set_wfi_drv_handler, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_SET_WFIDRV_HANDLER;
- msg.body.drv.handler = index;
- msg.body.drv.mode = mode;
- msg.body.drv.name = ifc_id;
- msg.vif = vif;
+ msg->body.drv.handler = index;
+ msg->body.drv.mode = mode;
+ msg->body.drv.name = ifc_id;
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result) {
- netdev_err(vif->ndev, "wilc mq send fail\n");
- result = -EINVAL;
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
}
return result;
@@ -3072,18 +3219,18 @@ int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mode,
int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
- memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_SET_OPERATION_MODE;
- msg.body.mode.mode = mode;
- msg.vif = vif;
+ msg = wilc_alloc_work(vif, handle_set_operation_mode, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- result = wilc_enqueue_cmd(&msg);
+ msg->body.mode.mode = mode;
+ result = wilc_enqueue_work(msg);
if (result) {
- netdev_err(vif->ndev, "wilc mq send fail\n");
- result = -EINVAL;
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
}
return result;
@@ -3092,78 +3239,91 @@ int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode)
s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
u32 *out_val)
{
- s32 result = 0;
- struct host_if_msg msg;
+ s32 result;
+ struct host_if_msg *msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
+ netdev_err(vif->ndev, "%s: hif driver is NULL", __func__);
return -EFAULT;
}
- memset(&msg, 0, sizeof(struct host_if_msg));
- memcpy(msg.body.mac_info.mac, mac, ETH_ALEN);
+ msg = wilc_alloc_work(vif, handle_get_inactive_time, true);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_GET_INACTIVETIME;
- msg.vif = vif;
+ memcpy(msg->body.mac_info.mac, mac, ETH_ALEN);
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result)
- netdev_err(vif->ndev, "Failed to send get host ch param\n");
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
else
- wait_for_completion(&hif_drv->comp_inactive_time);
+ wait_for_completion(&msg->work_comp);
- *out_val = inactive_time;
+ *out_val = msg->body.mac_info.inactive_time;
+ kfree(msg);
return result;
}
int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level)
{
- int result = 0;
- struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_GET_RSSI;
- msg.vif = vif;
+ int result;
+ struct host_if_msg *msg;
- result = wilc_enqueue_cmd(&msg);
- if (result) {
- netdev_err(vif->ndev, "Failed to send get host ch param\n");
+ if (!rssi_level) {
+ netdev_err(vif->ndev, "%s: RSSI level is NULL\n", __func__);
return -EFAULT;
}
- wait_for_completion(&hif_drv->comp_get_rssi);
+ msg = wilc_alloc_work(vif, handle_get_rssi, true);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- if (!rssi_level) {
- netdev_err(vif->ndev, "RSS pointer value is null\n");
- return -EFAULT;
+ msg->body.data = kzalloc(sizeof(s8), GFP_KERNEL);
+ if (!msg->body.data) {
+ kfree(msg);
+ return -ENOMEM;
}
- *rssi_level = rssi;
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ } else {
+ wait_for_completion(&msg->work_comp);
+ *rssi_level = *msg->body.data;
+ }
+
+ kfree(msg->body.data);
+ kfree(msg);
return result;
}
-int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats)
+int
+wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats, bool is_sync)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
- memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_GET_STATISTICS;
- msg.body.data = (char *)stats;
- msg.vif = vif;
+ msg = wilc_alloc_work(vif, handle_get_statistics, is_sync);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- result = wilc_enqueue_cmd(&msg);
+ msg->body.data = (char *)stats;
+
+ result = wilc_enqueue_work(msg);
if (result) {
- netdev_err(vif->ndev, "Failed to send get host channel\n");
- return -EFAULT;
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
+ return result;
+ }
+
+ if (is_sync) {
+ wait_for_completion(&msg->work_comp);
+ kfree(msg);
}
- if (stats != &vif->wilc->dummy_statistics)
- wait_for_completion(&hif_wait_response);
return result;
}
@@ -3172,9 +3332,9 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
size_t ies_len, wilc_scan_result scan_result, void *user_arg,
struct hidden_network *hidden_network)
{
- int result = 0;
- struct host_if_msg msg;
- struct scan_attr *scan_info = &msg.body.scan_info;
+ int result;
+ struct host_if_msg *msg;
+ struct scan_attr *scan_info;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv || !scan_result) {
@@ -3182,16 +3342,17 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
return -EFAULT;
}
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_scan, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_SCAN;
+ scan_info = &msg->body.scan_info;
if (hidden_network) {
scan_info->hidden_network.net_info = hidden_network->net_info;
scan_info->hidden_network.n_ssids = hidden_network->n_ssids;
}
- msg.vif = vif;
scan_info->src = scan_source;
scan_info->type = scan_type;
scan_info->result = scan_result;
@@ -3201,44 +3362,63 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
scan_info->ch_freq_list = kmemdup(ch_freq_list,
ch_list_len,
GFP_KERNEL);
- if (!scan_info->ch_freq_list)
- return -ENOMEM;
+ if (!scan_info->ch_freq_list) {
+ result = -ENOMEM;
+ goto free_msg;
+ }
scan_info->ies_len = ies_len;
scan_info->ies = kmemdup(ies, ies_len, GFP_KERNEL);
- if (!scan_info->ies)
- return -ENOMEM;
+ if (!scan_info->ies) {
+ result = -ENOMEM;
+ goto free_freq_list;
+ }
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result) {
- netdev_err(vif->ndev, "Error in sending message queue\n");
- return -EINVAL;
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ goto free_ies;
}
hif_drv->scan_timer_vif = vif;
mod_timer(&hif_drv->scan_timer,
jiffies + msecs_to_jiffies(HOST_IF_SCAN_TIMEOUT));
+ return 0;
+
+free_ies:
+ kfree(scan_info->ies);
+
+free_freq_list:
+ kfree(scan_info->ch_freq_list);
+
+free_msg:
+ kfree(msg);
return result;
}
int wilc_hif_set_cfg(struct wilc_vif *vif,
struct cfg_param_attr *cfg_param)
{
- struct host_if_msg msg;
+ struct host_if_msg *msg;
struct host_if_drv *hif_drv = vif->hif_drv;
+ int result;
if (!hif_drv) {
- netdev_err(vif->ndev, "hif_drv NULL\n");
+ netdev_err(vif->ndev, "%s: hif driver is NULL", __func__);
return -EFAULT;
}
- memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_CFG_PARAMS;
- msg.body.cfg_info = *cfg_param;
- msg.vif = vif;
+ msg = wilc_alloc_work(vif, handle_cfg_param, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->body.cfg_info = *cfg_param;
+ result = wilc_enqueue_work(msg);
+ if (result)
+ kfree(msg);
- return wilc_enqueue_cmd(&msg);
+ return result;
}
static void get_periodic_rssi(struct timer_list *unused)
@@ -3246,12 +3426,12 @@ static void get_periodic_rssi(struct timer_list *unused)
struct wilc_vif *vif = periodic_rssi_vif;
if (!vif->hif_drv) {
- netdev_err(vif->ndev, "Driver handler is NULL\n");
+ netdev_err(vif->ndev, "%s: hif driver is NULL", __func__);
return;
}
if (vif->hif_drv->hif_state == HOST_IF_CONNECTED)
- wilc_get_statistics(vif, &vif->wilc->dummy_statistics);
+ wilc_get_statistics(vif, &vif->wilc->dummy_statistics, false);
mod_timer(&periodic_rssi, jiffies + msecs_to_jiffies(5000));
}
@@ -3259,15 +3439,10 @@ static void get_periodic_rssi(struct timer_list *unused)
int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
{
struct host_if_drv *hif_drv;
- struct wilc_vif *vif;
- struct wilc *wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
int i;
- vif = netdev_priv(dev);
- wilc = vif->wilc;
-
- init_completion(&hif_wait_response);
-
hif_drv = kzalloc(sizeof(*hif_drv), GFP_KERNEL);
if (!hif_drv)
return -ENOMEM;
@@ -3283,16 +3458,10 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
wilc_optaining_ip = false;
if (clients_count == 0) {
- init_completion(&hif_thread_comp);
init_completion(&hif_driver_comp);
mutex_init(&hif_deinit_lock);
}
- init_completion(&hif_drv->comp_test_key_block);
- init_completion(&hif_drv->comp_test_disconn_block);
- init_completion(&hif_drv->comp_get_rssi);
- init_completion(&hif_drv->comp_inactive_time);
-
if (clients_count == 0) {
hif_workqueue = create_singlethread_workqueue("WILC_wq");
if (!hif_workqueue) {
@@ -3332,11 +3501,10 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
int wilc_deinit(struct wilc_vif *vif)
{
int result = 0;
- struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
- if (!hif_drv) {
- netdev_err(vif->ndev, "hif_drv = NULL\n");
+ if (!hif_drv) {
+ netdev_err(vif->ndev, "%s: hif driver is NULL", __func__);
return -EFAULT;
}
@@ -3361,18 +3529,19 @@ int wilc_deinit(struct wilc_vif *vif)
hif_drv->hif_state = HOST_IF_IDLE;
- memset(&msg, 0, sizeof(struct host_if_msg));
-
if (clients_count == 1) {
- msg.id = HOST_IF_MSG_EXIT;
- msg.vif = vif;
-
- result = wilc_enqueue_cmd(&msg);
- if (result != 0)
- netdev_err(vif->ndev, "deinit : Error(%d)\n", result);
- else
- wait_for_completion(&hif_thread_comp);
-
+ struct host_if_msg *msg;
+
+ msg = wilc_alloc_work(vif, handle_hif_exit_work, true);
+ if (!IS_ERR(msg)) {
+ result = wilc_enqueue_work(msg);
+ if (result)
+ netdev_err(vif->ndev, "deinit : Error(%d)\n",
+ result);
+ else
+ wait_for_completion(&msg->work_comp);
+ kfree(msg);
+ }
destroy_workqueue(hif_workqueue);
}
@@ -3386,10 +3555,10 @@ int wilc_deinit(struct wilc_vif *vif)
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- s32 result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
int id;
- struct host_if_drv *hif_drv = NULL;
+ struct host_if_drv *hif_drv;
struct wilc_vif *vif;
id = buffer[length - 4];
@@ -3401,34 +3570,36 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
return;
hif_drv = vif->hif_drv;
- if (!hif_drv || hif_drv == terminated_handle) {
+ if (!hif_drv || hif_drv == terminated_handle) {
netdev_err(vif->ndev, "driver not init[%p]\n", hif_drv);
return;
}
- memset(&msg, 0, sizeof(struct host_if_msg));
-
- msg.id = HOST_IF_MSG_RCVD_NTWRK_INFO;
- msg.vif = vif;
+ msg = wilc_alloc_work(vif, handle_rcvd_ntwrk_info, false);
+ if (IS_ERR(msg))
+ return;
- msg.body.net_info.len = length;
- msg.body.net_info.buffer = kmemdup(buffer, length, GFP_KERNEL);
- if (!msg.body.net_info.buffer)
+ msg->body.net_info.len = length;
+ msg->body.net_info.buffer = kmemdup(buffer, length, GFP_KERNEL);
+ if (!msg->body.net_info.buffer) {
+ kfree(msg);
return;
+ }
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result) {
- netdev_err(vif->ndev, "message parameters (%d)\n", result);
- kfree(msg.body.net_info.buffer);
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg->body.net_info.buffer);
+ kfree(msg);
}
}
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- s32 result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
int id;
- struct host_if_drv *hif_drv = NULL;
+ struct host_if_drv *hif_drv;
struct wilc_vif *vif;
mutex_lock(&hif_deinit_lock);
@@ -3451,27 +3622,30 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
}
if (!hif_drv->usr_conn_req.conn_result) {
- netdev_err(vif->ndev, "there is no current Connect Request\n");
+ netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
mutex_unlock(&hif_deinit_lock);
return;
}
- memset(&msg, 0, sizeof(struct host_if_msg));
-
- msg.id = HOST_IF_MSG_RCVD_GNRL_ASYNC_INFO;
- msg.vif = vif;
+ msg = wilc_alloc_work(vif, handle_rcvd_gnrl_async_info, false);
+ if (IS_ERR(msg)) {
+ mutex_unlock(&hif_deinit_lock);
+ return;
+ }
- msg.body.async_info.len = length;
- msg.body.async_info.buffer = kmemdup(buffer, length, GFP_KERNEL);
- if (!msg.body.async_info.buffer) {
+ msg->body.async_info.len = length;
+ msg->body.async_info.buffer = kmemdup(buffer, length, GFP_KERNEL);
+ if (!msg->body.async_info.buffer) {
+ kfree(msg);
mutex_unlock(&hif_deinit_lock);
return;
}
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result) {
- netdev_err(vif->ndev, "synchronous info (%d)\n", result);
- kfree(msg.body.async_info.buffer);
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg->body.async_info.buffer);
+ kfree(msg);
}
mutex_unlock(&hif_deinit_lock);
@@ -3479,10 +3653,9 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- s32 result = 0;
- struct host_if_msg msg;
+ int result;
int id;
- struct host_if_drv *hif_drv = NULL;
+ struct host_if_drv *hif_drv;
struct wilc_vif *vif;
id = buffer[length - 4];
@@ -3498,14 +3671,18 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
return;
if (hif_drv->usr_scan_req.scan_result) {
- memset(&msg, 0, sizeof(struct host_if_msg));
+ struct host_if_msg *msg;
- msg.id = HOST_IF_MSG_RCVD_SCAN_COMPLETE;
- msg.vif = vif;
+ msg = wilc_alloc_work(vif, handle_scan_complete, false);
+ if (IS_ERR(msg))
+ return;
- result = wilc_enqueue_cmd(&msg);
- if (result)
- netdev_err(vif->ndev, "complete param (%d)\n", result);
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n",
+ __func__);
+ kfree(msg);
+ }
}
}
@@ -3515,79 +3692,86 @@ int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
wilc_remain_on_chan_ready ready,
void *user_arg)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_remain_on_chan_work, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_REMAIN_ON_CHAN;
- msg.body.remain_on_ch.ch = chan;
- msg.body.remain_on_ch.expired = expired;
- msg.body.remain_on_ch.ready = ready;
- msg.body.remain_on_ch.arg = user_arg;
- msg.body.remain_on_ch.duration = duration;
- msg.body.remain_on_ch.id = session_id;
- msg.vif = vif;
+ msg->body.remain_on_ch.ch = chan;
+ msg->body.remain_on_ch.expired = expired;
+ msg->body.remain_on_ch.ready = ready;
+ msg->body.remain_on_ch.arg = user_arg;
+ msg->body.remain_on_ch.duration = duration;
+ msg->body.remain_on_ch.id = session_id;
- result = wilc_enqueue_cmd(&msg);
- if (result)
- netdev_err(vif->ndev, "wilc mq send fail\n");
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
+ }
return result;
}
int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
+ netdev_err(vif->ndev, "%s: hif driver is NULL", __func__);
return -EFAULT;
}
del_timer(&hif_drv->remain_on_ch_timer);
- memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_LISTEN_TIMER_FIRED;
- msg.vif = vif;
- msg.body.remain_on_ch.id = session_id;
+ msg = wilc_alloc_work(vif, handle_listen_state_expired, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- result = wilc_enqueue_cmd(&msg);
- if (result)
- netdev_err(vif->ndev, "wilc mq send fail\n");
+ msg->body.remain_on_ch.id = session_id;
+
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
+ }
return result;
}
int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_register_frame, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_REGISTER_FRAME;
switch (frame_type) {
case ACTION:
- msg.body.reg_frame.reg_id = ACTION_FRM_IDX;
+ msg->body.reg_frame.reg_id = ACTION_FRM_IDX;
break;
case PROBE_REQ:
- msg.body.reg_frame.reg_id = PROBE_REQ_IDX;
+ msg->body.reg_frame.reg_id = PROBE_REQ_IDX;
break;
default:
break;
}
- msg.body.reg_frame.frame_type = frame_type;
- msg.body.reg_frame.reg = reg;
- msg.vif = vif;
+ msg->body.reg_frame.frame_type = frame_type;
+ msg->body.reg_frame.reg = reg;
- result = wilc_enqueue_cmd(&msg);
- if (result)
- netdev_err(vif->ndev, "wilc mq send fail\n");
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
+ }
return result;
}
@@ -3595,14 +3779,15 @@ int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
u32 head_len, u8 *head, u32 tail_len, u8 *tail)
{
- int result = 0;
- struct host_if_msg msg;
- struct beacon_attr *beacon_info = &msg.body.beacon_info;
+ int result;
+ struct host_if_msg *msg;
+ struct beacon_attr *beacon_info;
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_add_beacon, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_ADD_BEACON;
- msg.vif = vif;
+ beacon_info = &msg->body.beacon_info;
beacon_info->interval = interval;
beacon_info->dtim_period = dtim_period;
beacon_info->head_len = head_len;
@@ -3623,15 +3808,15 @@ int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
beacon_info->tail = NULL;
}
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result)
- netdev_err(vif->ndev, "wilc mq send fail\n");
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
error:
if (result) {
kfree(beacon_info->head);
-
kfree(beacon_info->tail);
+ kfree(msg);
}
return result;
@@ -3639,82 +3824,92 @@ error:
int wilc_del_beacon(struct wilc_vif *vif)
{
- int result = 0;
- struct host_if_msg msg;
+ int result;
+ struct host_if_msg *msg;
- msg.id = HOST_IF_MSG_DEL_BEACON;
- msg.vif = vif;
+ msg = wilc_alloc_work(vif, handle_del_beacon, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- result = wilc_enqueue_cmd(&msg);
- if (result)
- netdev_err(vif->ndev, "wilc_mq_send fail\n");
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
+ }
return result;
}
int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param)
{
- int result = 0;
- struct host_if_msg msg;
- struct add_sta_param *add_sta_info = &msg.body.add_sta_info;
-
- memset(&msg, 0, sizeof(struct host_if_msg));
+ int result;
+ struct host_if_msg *msg;
+ struct add_sta_param *add_sta_info;
- msg.id = HOST_IF_MSG_ADD_STATION;
- msg.vif = vif;
+ msg = wilc_alloc_work(vif, handle_add_station, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+ add_sta_info = &msg->body.add_sta_info;
memcpy(add_sta_info, sta_param, sizeof(struct add_sta_param));
if (add_sta_info->rates_len > 0) {
add_sta_info->rates = kmemdup(sta_param->rates,
add_sta_info->rates_len,
GFP_KERNEL);
- if (!add_sta_info->rates)
+ if (!add_sta_info->rates) {
+ kfree(msg);
return -ENOMEM;
+ }
}
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result) {
- netdev_err(vif->ndev, "wilc_mq_send fail\n");
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
kfree(add_sta_info->rates);
+ kfree(msg);
}
return result;
}
int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr)
{
- int result = 0;
- struct host_if_msg msg;
- struct del_sta *del_sta_info = &msg.body.del_sta_info;
+ int result;
+ struct host_if_msg *msg;
+ struct del_sta *del_sta_info;
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_del_station, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_DEL_STATION;
- msg.vif = vif;
+ del_sta_info = &msg->body.del_sta_info;
if (!mac_addr)
eth_broadcast_addr(del_sta_info->mac_addr);
else
memcpy(del_sta_info->mac_addr, mac_addr, ETH_ALEN);
- result = wilc_enqueue_cmd(&msg);
- if (result)
- netdev_err(vif->ndev, "wilc_mq_send fail\n");
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
+ }
return result;
}
int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
{
- int result = 0;
- struct host_if_msg msg;
- struct del_all_sta *del_all_sta_info = &msg.body.del_all_sta_info;
+ int result;
+ struct host_if_msg *msg;
+ struct del_all_sta *del_all_sta_info;
u8 zero_addr[ETH_ALEN] = {0};
int i;
u8 assoc_sta = 0;
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_del_all_sta, true);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_DEL_ALL_STA;
- msg.vif = vif;
+ del_all_sta_info = &msg->body.del_all_sta_info;
for (i = 0; i < MAX_NUM_STA; i++) {
if (memcmp(mac_addr[i], zero_addr, ETH_ALEN)) {
@@ -3723,16 +3918,20 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
assoc_sta++;
}
}
- if (!assoc_sta)
- return result;
+ if (!assoc_sta) {
+ kfree(msg);
+ return 0;
+ }
del_all_sta_info->assoc_sta = assoc_sta;
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result)
- netdev_err(vif->ndev, "wilc_mq_send fail\n");
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
else
- wait_for_completion(&hif_wait_response);
+ wait_for_completion(&msg->work_comp);
+
+ kfree(msg);
return result;
}
@@ -3740,28 +3939,31 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
int wilc_edit_station(struct wilc_vif *vif,
struct add_sta_param *sta_param)
{
- int result = 0;
- struct host_if_msg msg;
- struct add_sta_param *add_sta_info = &msg.body.add_sta_info;
-
- memset(&msg, 0, sizeof(struct host_if_msg));
+ int result;
+ struct host_if_msg *msg;
+ struct add_sta_param *add_sta_info;
- msg.id = HOST_IF_MSG_EDIT_STATION;
- msg.vif = vif;
+ msg = wilc_alloc_work(vif, handle_edit_station, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- memcpy(add_sta_info, sta_param, sizeof(struct add_sta_param));
+ add_sta_info = &msg->body.add_sta_info;
+ memcpy(add_sta_info, sta_param, sizeof(*add_sta_info));
if (add_sta_info->rates_len > 0) {
add_sta_info->rates = kmemdup(sta_param->rates,
add_sta_info->rates_len,
GFP_KERNEL);
- if (!add_sta_info->rates)
+ if (!add_sta_info->rates) {
+ kfree(msg);
return -ENOMEM;
+ }
}
- result = wilc_enqueue_cmd(&msg);
+ result = wilc_enqueue_work(msg);
if (result) {
- netdev_err(vif->ndev, "wilc_mq_send fail\n");
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
kfree(add_sta_info->rates);
+ kfree(msg);
}
return result;
@@ -3769,287 +3971,128 @@ int wilc_edit_station(struct wilc_vif *vif,
int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
{
- int result = 0;
- struct host_if_msg msg;
- struct power_mgmt_param *pwr_mgmt_info = &msg.body.pwr_mgmt_info;
+ int result;
+ struct host_if_msg *msg;
if (wilc_wlan_get_num_conn_ifcs(vif->wilc) == 2 && enabled)
return 0;
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_power_management, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_POWER_MGMT;
- msg.vif = vif;
+ msg->body.pwr_mgmt_info.enabled = enabled;
+ msg->body.pwr_mgmt_info.timeout = timeout;
- pwr_mgmt_info->enabled = enabled;
- pwr_mgmt_info->timeout = timeout;
-
- result = wilc_enqueue_cmd(&msg);
- if (result)
- netdev_err(vif->ndev, "wilc_mq_send fail\n");
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
+ }
return result;
}
int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
u32 count)
{
- int result = 0;
- struct host_if_msg msg;
- struct set_multicast *multicast_filter_param = &msg.body.multicast_info;
-
- memset(&msg, 0, sizeof(struct host_if_msg));
-
- msg.id = HOST_IF_MSG_SET_MULTICAST_FILTER;
- msg.vif = vif;
-
- multicast_filter_param->enabled = enabled;
- multicast_filter_param->cnt = count;
-
- result = wilc_enqueue_cmd(&msg);
- if (result)
- netdev_err(vif->ndev, "wilc_mq_send fail\n");
- return result;
-}
-
-static void host_int_fill_join_bss_param(struct join_bss_param *param, u8 *ies,
- u16 *out_index, u8 *pcipher_tc,
- u8 *auth_total_cnt, u32 tsf_lo,
- u8 *rates_no)
-{
- u8 ext_rates_no;
- u16 offset;
- u8 pcipher_cnt;
- u8 auth_cnt;
- u8 i, j;
- u16 index = *out_index;
-
- if (ies[index] == SUPP_RATES_IE) {
- *rates_no = ies[index + 1];
- param->supp_rates[0] = *rates_no;
- index += 2;
-
- for (i = 0; i < *rates_no; i++)
- param->supp_rates[i + 1] = ies[index + i];
-
- index += *rates_no;
- } else if (ies[index] == EXT_SUPP_RATES_IE) {
- ext_rates_no = ies[index + 1];
- if (ext_rates_no > (MAX_RATES_SUPPORTED - *rates_no))
- param->supp_rates[0] = MAX_RATES_SUPPORTED;
- else
- param->supp_rates[0] += ext_rates_no;
- index += 2;
- for (i = 0; i < (param->supp_rates[0] - *rates_no); i++)
- param->supp_rates[*rates_no + i + 1] = ies[index + i];
-
- index += ext_rates_no;
- } else if (ies[index] == HT_CAPABILITY_IE) {
- param->ht_capable = true;
- index += ies[index + 1] + 2;
- } else if ((ies[index] == WMM_IE) &&
- (ies[index + 2] == 0x00) && (ies[index + 3] == 0x50) &&
- (ies[index + 4] == 0xF2) && (ies[index + 5] == 0x02) &&
- ((ies[index + 6] == 0x00) || (ies[index + 6] == 0x01)) &&
- (ies[index + 7] == 0x01)) {
- param->wmm_cap = true;
-
- if (ies[index + 8] & BIT(7))
- param->uapsd_cap = true;
- index += ies[index + 1] + 2;
- } else if ((ies[index] == P2P_IE) &&
- (ies[index + 2] == 0x50) && (ies[index + 3] == 0x6f) &&
- (ies[index + 4] == 0x9a) &&
- (ies[index + 5] == 0x09) && (ies[index + 6] == 0x0c)) {
- u16 p2p_cnt;
-
- param->tsf = tsf_lo;
- param->noa_enabled = 1;
- param->idx = ies[index + 9];
-
- if (ies[index + 10] & BIT(7)) {
- param->opp_enabled = 1;
- param->ct_window = ies[index + 10];
- } else {
- param->opp_enabled = 0;
- }
-
- param->cnt = ies[index + 11];
- p2p_cnt = index + 12;
-
- memcpy(param->duration, ies + p2p_cnt, 4);
- p2p_cnt += 4;
-
- memcpy(param->interval, ies + p2p_cnt, 4);
- p2p_cnt += 4;
-
- memcpy(param->start_time, ies + p2p_cnt, 4);
-
- index += ies[index + 1] + 2;
- } else if ((ies[index] == RSN_IE) ||
- ((ies[index] == WPA_IE) && (ies[index + 2] == 0x00) &&
- (ies[index + 3] == 0x50) && (ies[index + 4] == 0xF2) &&
- (ies[index + 5] == 0x01))) {
- u16 rsn_idx = index;
-
- if (ies[rsn_idx] == RSN_IE) {
- param->mode_802_11i = 2;
- } else {
- if (param->mode_802_11i == 0)
- param->mode_802_11i = 1;
- rsn_idx += 4;
- }
-
- rsn_idx += 7;
- param->rsn_grp_policy = ies[rsn_idx];
- rsn_idx++;
- offset = ies[rsn_idx] * 4;
- pcipher_cnt = (ies[rsn_idx] > 3) ? 3 : ies[rsn_idx];
- rsn_idx += 2;
-
- i = *pcipher_tc;
- j = 0;
- for (; i < (pcipher_cnt + *pcipher_tc) && i < 3; i++, j++) {
- u8 *policy = &param->rsn_pcip_policy[i];
-
- *policy = ies[rsn_idx + ((j + 1) * 4) - 1];
- }
-
- *pcipher_tc += pcipher_cnt;
- rsn_idx += offset;
+ int result;
+ struct host_if_msg *msg;
- offset = ies[rsn_idx] * 4;
+ msg = wilc_alloc_work(vif, handle_set_mcast_filter, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- auth_cnt = (ies[rsn_idx] > 3) ? 3 : ies[rsn_idx];
- rsn_idx += 2;
- i = *auth_total_cnt;
- j = 0;
- for (; i < (*auth_total_cnt + auth_cnt); i++, j++) {
- u8 *policy = &param->rsn_auth_policy[i];
+ msg->body.multicast_info.enabled = enabled;
+ msg->body.multicast_info.cnt = count;
- *policy = ies[rsn_idx + ((j + 1) * 4) - 1];
- }
-
- *auth_total_cnt += auth_cnt;
- rsn_idx += offset;
-
- if (ies[index] == RSN_IE) {
- param->rsn_cap[0] = ies[rsn_idx];
- param->rsn_cap[1] = ies[rsn_idx + 1];
- rsn_idx += 2;
- }
- param->rsn_found = true;
- index += ies[index + 1] + 2;
- } else {
- index += ies[index + 1] + 2;
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
}
-
- *out_index = index;
-}
-
-static void *host_int_parse_join_bss_param(struct network_info *info)
-{
- struct join_bss_param *param = NULL;
- u16 index = 0;
- u8 rates_no = 0;
- u8 pcipher_total_cnt = 0;
- u8 auth_total_cnt = 0;
-
- param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (!param)
- return NULL;
-
- param->dtim_period = info->dtim_period;
- param->beacon_period = info->beacon_period;
- param->cap_info = info->cap_info;
- memcpy(param->bssid, info->bssid, 6);
- memcpy((u8 *)param->ssid, info->ssid, info->ssid_len + 1);
- param->ssid_len = info->ssid_len;
- memset(param->rsn_pcip_policy, 0xFF, 3);
- memset(param->rsn_auth_policy, 0xFF, 3);
-
- while (index < info->ies_len)
- host_int_fill_join_bss_param(param, info->ies, &index,
- &pcipher_total_cnt,
- &auth_total_cnt, info->tsf_lo,
- &rates_no);
-
- return (void *)param;
+ return result;
}
int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
- int result = 0;
- struct host_if_msg msg;
-
- memset(&msg, 0, sizeof(struct host_if_msg));
+ int result;
+ struct host_if_msg *msg;
- msg.id = HOST_IF_MSG_SET_IPADDRESS;
+ msg = wilc_alloc_work(vif, handle_set_ip_address, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.body.ip_info.ip_addr = ip_addr;
- msg.vif = vif;
- msg.body.ip_info.idx = idx;
+ msg->body.ip_info.ip_addr = ip_addr;
+ msg->body.ip_info.idx = idx;
- result = wilc_enqueue_cmd(&msg);
- if (result)
- netdev_err(vif->ndev, "wilc_mq_send fail\n");
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
+ }
return result;
}
static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
- int result = 0;
- struct host_if_msg msg;
-
- memset(&msg, 0, sizeof(struct host_if_msg));
+ int result;
+ struct host_if_msg *msg;
- msg.id = HOST_IF_MSG_GET_IPADDRESS;
+ msg = wilc_alloc_work(vif, handle_get_ip_address, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.body.ip_info.ip_addr = ip_addr;
- msg.vif = vif;
- msg.body.ip_info.idx = idx;
+ msg->body.ip_info.ip_addr = ip_addr;
+ msg->body.ip_info.idx = idx;
- result = wilc_enqueue_cmd(&msg);
- if (result)
- netdev_err(vif->ndev, "wilc_mq_send fail\n");
+ result = wilc_enqueue_work(msg);
+ if (result) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
+ }
return result;
}
int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power)
{
- int ret = 0;
- struct host_if_msg msg;
+ int ret;
+ struct host_if_msg *msg;
- memset(&msg, 0, sizeof(struct host_if_msg));
+ msg = wilc_alloc_work(vif, handle_set_tx_pwr, false);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- msg.id = HOST_IF_MSG_SET_TX_POWER;
- msg.body.tx_power.tx_pwr = tx_power;
- msg.vif = vif;
+ msg->body.tx_power.tx_pwr = tx_power;
- ret = wilc_enqueue_cmd(&msg);
- if (ret)
- netdev_err(vif->ndev, "wilc_mq_send fail\n");
+ ret = wilc_enqueue_work(msg);
+ if (ret) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ kfree(msg);
+ }
return ret;
}
int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
{
- int ret = 0;
- struct host_if_msg msg;
-
- memset(&msg, 0, sizeof(struct host_if_msg));
-
- msg.id = HOST_IF_MSG_GET_TX_POWER;
- msg.vif = vif;
+ int ret;
+ struct host_if_msg *msg;
- ret = wilc_enqueue_cmd(&msg);
- if (ret)
- netdev_err(vif->ndev, "Failed to get TX PWR\n");
+ msg = wilc_alloc_work(vif, handle_get_tx_pwr, true);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- wait_for_completion(&hif_wait_response);
- *tx_power = msg.body.tx_power.tx_pwr;
+ ret = wilc_enqueue_work(msg);
+ if (ret) {
+ netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
+ } else {
+ wait_for_completion(&msg->work_comp);
+ *tx_power = msg->body.tx_power.tx_pwr;
+ }
+ /* free 'msg' after copying data */
+ kfree(msg);
return ret;
}
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 068b587a9df4..84866a62a4d4 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries
+ * All rights reserved.
+ */
+
#ifndef HOST_INT_H
#define HOST_INT_H
#include <linux/ieee80211.h>
@@ -81,7 +86,7 @@ struct host_if_pmkid_attr {
struct host_if_pmkid pmkidlist[WILC_MAX_NUM_PMKIDS];
};
-enum CURRENT_TXRATE {
+enum current_tx_rate {
AUTORATE = 0,
MBPS_1 = 1,
MBPS_2 = 2,
@@ -113,12 +118,12 @@ struct cfg_param_attr {
u8 txop_prot_disabled;
u16 beacon_interval;
u16 dtim_period;
- enum SITESURVEY site_survey_enabled;
+ enum site_survey site_survey_enabled;
u16 site_survey_scan_time;
u8 scan_source;
u16 active_scan_time;
u16 passive_scan_time;
- enum CURRENT_TXRATE curr_tx_rate;
+ enum current_tx_rate curr_tx_rate;
};
@@ -199,7 +204,7 @@ struct hidden_network {
struct user_scan_req {
wilc_scan_result scan_result;
void *arg;
- u32 rcvd_ch_cnt;
+ u32 ch_cnt;
struct found_net_info net_info[MAX_NUM_SCANNED_NETWORKS];
};
@@ -207,7 +212,7 @@ struct user_conn_req {
u8 *bssid;
u8 *ssid;
u8 security;
- enum AUTHTYPE auth_type;
+ enum authtype auth_type;
size_t ssid_len;
u8 *ies;
size_t ies_len;
@@ -252,12 +257,6 @@ struct reg_frame {
u8 reg_id;
};
-enum p2p_listen_state {
- P2P_IDLE,
- P2P_LISTEN,
- P2P_GRP_FORMATION
-};
-
struct wilc;
struct host_if_drv {
struct user_scan_req usr_scan_req;
@@ -273,10 +272,6 @@ struct host_if_drv {
struct cfg_param_attr cfg_values;
/*lock to protect concurrent setting of cfg params*/
struct mutex cfg_values_lock;
- struct completion comp_test_key_block;
- struct completion comp_test_disconn_block;
- struct completion comp_get_rssi;
- struct completion comp_inactive_time;
struct timer_list scan_timer;
struct wilc_vif *scan_timer_vif;
@@ -287,7 +282,7 @@ struct host_if_drv {
struct timer_list remain_on_ch_timer;
struct wilc_vif *remain_on_ch_timer_vif;
- bool IFC_UP;
+ bool ifc_up;
int driver_handler_id;
};
@@ -308,7 +303,7 @@ int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index);
int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
u8 index);
int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
- u8 index, u8 mode, enum AUTHTYPE auth_type);
+ u8 index, u8 mode, enum authtype auth_type);
int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
const u8 *mac_addr, const u8 *rx_mic, const u8 *tx_mic,
u8 mode, u8 cipher_mode, u8 index);
@@ -324,7 +319,7 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr);
int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
size_t ssid_len, const u8 *ies, size_t ies_len,
wilc_connect_result connect_result, void *user_arg,
- u8 security, enum AUTHTYPE auth_type,
+ u8 security, enum authtype auth_type,
u8 channel, void *join_params);
int wilc_disconnect(struct wilc_vif *vif, u16 reason_code);
int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel);
@@ -359,7 +354,8 @@ int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg);
int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mode,
u8 ifc_id);
int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode);
-int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats);
+int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats,
+ bool is_sync);
void wilc_resolve_disconnect_aberration(struct wilc_vif *vif);
int wilc_get_vif_idx(struct wilc_vif *vif);
int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power);
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index 1c7e6e15809c..1afdb9e86bc1 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -1,4 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
+
#include "wilc_wfi_cfgoperations.h"
struct wilc_wfi_radiotap_hdr {
@@ -20,7 +25,6 @@ static u8 bssid[6];
#define IEEE80211_RADIOTAP_F_TX_RTS 0x0004 /* used rts/cts handshake */
#define IEEE80211_RADIOTAP_F_TX_FAIL 0x0001 /* failed due to excessive*/
-#define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff)
#define TX_RADIOTAP_PRESENT ((1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_TX_FLAGS))
@@ -40,6 +44,7 @@ void wilc_wfi_monitor_rx(u8 *buff, u32 size)
/* Get WILC header */
memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
+ le32_to_cpus(&header);
/*
* The packet offset field contain info about what type of management
* the frame we are dealing with and ack status
@@ -64,7 +69,7 @@ void wilc_wfi_monitor_rx(u8 *buff, u32 size)
cb_hdr->hdr.it_present = cpu_to_le32(TX_RADIOTAP_PRESENT);
- cb_hdr->rate = 5; /* txrate->bitrate / 5; */
+ cb_hdr->rate = 5;
if (pkt_offset & IS_MGMT_STATUS_SUCCES) {
/* success */
@@ -85,8 +90,8 @@ void wilc_wfi_monitor_rx(u8 *buff, u32 size)
hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
hdr->hdr.it_present = cpu_to_le32
- (1 << IEEE80211_RADIOTAP_RATE); /* | */
- hdr->rate = 5; /* txrate->bitrate / 5; */
+ (1 << IEEE80211_RADIOTAP_RATE);
+ hdr->rate = 5;
}
skb->dev = wilc_wfi_mon;
@@ -148,7 +153,6 @@ static netdev_tx_t wilc_wfi_mon_xmit(struct sk_buff *skb,
{
u32 rtap_len, ret = 0;
struct wilc_wfi_mon_priv *mon_priv;
-
struct sk_buff *skb2;
struct wilc_wfi_radiotap_cb_hdr *cb_hdr;
@@ -180,7 +184,7 @@ static netdev_tx_t wilc_wfi_mon_xmit(struct sk_buff *skb,
cb_hdr->hdr.it_present = cpu_to_le32(TX_RADIOTAP_PRESENT);
- cb_hdr->rate = 5; /* txrate->bitrate / 5; */
+ cb_hdr->rate = 5;
cb_hdr->tx_flags = 0x0004;
skb2->dev = wilc_wfi_mon;
@@ -196,11 +200,12 @@ static netdev_tx_t wilc_wfi_mon_xmit(struct sk_buff *skb,
}
skb->dev = mon_priv->real_ndev;
- /* Identify if Ethernet or MAC header (data or mgmt) */
memcpy(srcadd, &skb->data[10], 6);
memcpy(bssid, &skb->data[16], 6);
- /* if source address and bssid fields are equal>>Mac header */
- /*send it to mgmt frames handler */
+ /*
+ * Identify if data or mgmt packet, if source address and bssid
+ * fields are equal send it to mgmt frames handler
+ */
if (!(memcmp(srcadd, bssid, 6))) {
ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len);
if (ret)
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 02e6b1338440..01cf4bd2e192 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1,6 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
+
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/kthread.h>
#include <linux/firmware.h>
#include <linux/netdevice.h>
@@ -45,7 +49,7 @@ static int dev_state_ev_handler(struct notifier_block *this,
switch (event) {
case NETDEV_UP:
if (vif->iftype == STATION_MODE || vif->iftype == CLIENT_MODE) {
- hif_drv->IFC_UP = 1;
+ hif_drv->ifc_up = 1;
wilc_optaining_ip = false;
del_timer(&wilc_during_ip_timer);
}
@@ -65,7 +69,7 @@ static int dev_state_ev_handler(struct notifier_block *this,
case NETDEV_DOWN:
if (vif->iftype == STATION_MODE || vif->iftype == CLIENT_MODE) {
- hif_drv->IFC_UP = 0;
+ hif_drv->ifc_up = 0;
wilc_optaining_ip = false;
}
@@ -94,12 +98,9 @@ static int dev_state_ev_handler(struct notifier_block *this,
static irqreturn_t isr_uh_routine(int irq, void *user_data)
{
- struct wilc_vif *vif;
- struct wilc *wilc;
struct net_device *dev = user_data;
-
- vif = netdev_priv(dev);
- wilc = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
if (wilc->close) {
netdev_err(dev, "Can't handle UH interrupt\n");
@@ -110,12 +111,9 @@ static irqreturn_t isr_uh_routine(int irq, void *user_data)
static irqreturn_t isr_bh_routine(int irq, void *userdata)
{
- struct wilc_vif *vif;
- struct wilc *wilc;
struct net_device *dev = userdata;
-
- vif = netdev_priv(userdata);
- wilc = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(userdata);
+ struct wilc *wilc = vif->wilc;
if (wilc->close) {
netdev_err(dev, "Can't handle BH interrupt\n");
@@ -130,50 +128,38 @@ static irqreturn_t isr_bh_routine(int irq, void *userdata)
static int init_irq(struct net_device *dev)
{
int ret = 0;
- struct wilc_vif *vif;
- struct wilc *wl;
-
- vif = netdev_priv(dev);
- wl = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wl = vif->wilc;
- if ((gpio_request(wl->gpio, "WILC_INTR") == 0) &&
- (gpio_direction_input(wl->gpio) == 0)) {
- wl->dev_irq_num = gpio_to_irq(wl->gpio);
- } else {
- ret = -1;
+ ret = gpiod_direction_input(wl->gpio_irq);
+ if (ret) {
netdev_err(dev, "could not obtain gpio for WILC_INTR\n");
+ return ret;
}
- if (ret != -1 && request_threaded_irq(wl->dev_irq_num,
- isr_uh_routine,
- isr_bh_routine,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "WILC_IRQ", dev) < 0) {
- netdev_err(dev, "Failed to request IRQ GPIO: %d\n", wl->gpio);
- gpio_free(wl->gpio);
- ret = -1;
- } else {
- netdev_dbg(dev,
- "IRQ request succeeded IRQ-NUM= %d on GPIO: %d\n",
- wl->dev_irq_num, wl->gpio);
- }
+ wl->dev_irq_num = gpiod_to_irq(wl->gpio_irq);
+
+ ret = request_threaded_irq(wl->dev_irq_num, isr_uh_routine,
+ isr_bh_routine,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "WILC_IRQ", dev);
+ if (ret < 0)
+ netdev_err(dev, "Failed to request IRQ\n");
+ else
+ netdev_dbg(dev, "IRQ request succeeded IRQ-NUM= %d\n",
+ wl->dev_irq_num);
return ret;
}
static void deinit_irq(struct net_device *dev)
{
- struct wilc_vif *vif;
- struct wilc *wilc;
-
- vif = netdev_priv(dev);
- wilc = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
/* Deinitialize IRQ */
- if (wilc->dev_irq_num) {
+ if (wilc->dev_irq_num)
free_irq(wilc->dev_irq_num, wilc);
- gpio_free(wilc->gpio);
- }
}
void wilc_mac_indicate(struct wilc *wilc)
@@ -238,12 +224,9 @@ static int linux_wlan_txq_task(void *vp)
{
int ret;
u32 txq_count;
- struct wilc_vif *vif;
- struct wilc *wl;
struct net_device *dev = vp;
-
- vif = netdev_priv(dev);
- wl = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wl = vif->wilc;
complete(&wl->txq_thread_started);
while (1) {
@@ -271,15 +254,12 @@ static int linux_wlan_txq_task(void *vp)
static int wilc_wlan_get_firmware(struct net_device *dev)
{
- struct wilc_vif *vif;
- struct wilc *wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
int chip_id, ret = 0;
const struct firmware *wilc_firmware;
char *firmware;
- vif = netdev_priv(dev);
- wilc = vif->wilc;
-
chip_id = wilc_get_chipid(wilc, false);
if (chip_id < 0x1003a0)
@@ -306,13 +286,10 @@ fail:
static int linux_wlan_start_firmware(struct net_device *dev)
{
- struct wilc_vif *vif;
- struct wilc *wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
int ret = 0;
- vif = netdev_priv(dev);
- wilc = vif->wilc;
-
ret = wilc_wlan_start(wilc);
if (ret < 0)
return ret;
@@ -326,13 +303,10 @@ static int linux_wlan_start_firmware(struct net_device *dev)
static int wilc1000_firmware_download(struct net_device *dev)
{
- struct wilc_vif *vif;
- struct wilc *wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
int ret = 0;
- vif = netdev_priv(dev);
- wilc = vif->wilc;
-
if (!wilc->firmware) {
netdev_err(dev, "Firmware buffer is NULL\n");
return -ENOBUFS;
@@ -378,7 +352,7 @@ static int linux_wlan_init_test_config(struct net_device *dev,
if (!wilc_wlan_cfg_set(vif, 0, WID_BSS_TYPE, c_val, 1, 0, 0))
goto fail;
- c_val[0] = RATE_AUTO;
+ c_val[0] = AUTORATE;
if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_TX_RATE, c_val, 1, 0, 0))
goto fail;
@@ -560,11 +534,8 @@ fail:
static int wlan_deinit_locks(struct net_device *dev)
{
- struct wilc_vif *vif;
- struct wilc *wilc;
-
- vif = netdev_priv(dev);
- wilc = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
mutex_destroy(&wilc->hif_cs);
mutex_destroy(&wilc->rxq_cs);
@@ -575,11 +546,8 @@ static int wlan_deinit_locks(struct net_device *dev)
static void wlan_deinitialize_threads(struct net_device *dev)
{
- struct wilc_vif *vif;
- struct wilc *wl;
-
- vif = netdev_priv(dev);
- wl = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wl = vif->wilc;
wl->close = 1;
@@ -593,18 +561,15 @@ static void wlan_deinitialize_threads(struct net_device *dev)
static void wilc_wlan_deinitialize(struct net_device *dev)
{
- struct wilc_vif *vif;
- struct wilc *wl;
-
- vif = netdev_priv(dev);
- wl = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wl = vif->wilc;
if (!wl) {
netdev_err(dev, "wl is NULL\n");
return;
}
- if (wl->initialized) {
+ if (wl->initialized) {
netdev_info(dev, "Deinitializing wilc1000...\n");
if (!wl->dev_irq_num &&
@@ -632,11 +597,8 @@ static void wilc_wlan_deinitialize(struct net_device *dev)
static int wlan_init_locks(struct net_device *dev)
{
- struct wilc_vif *vif;
- struct wilc *wl;
-
- vif = netdev_priv(dev);
- wl = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wl = vif->wilc;
mutex_init(&wl->hif_cs);
mutex_init(&wl->rxq_cs);
@@ -655,11 +617,8 @@ static int wlan_init_locks(struct net_device *dev)
static int wlan_initialize_threads(struct net_device *dev)
{
- struct wilc_vif *vif;
- struct wilc *wilc;
-
- vif = netdev_priv(dev);
- wilc = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
wilc->txq_thread = kthread_run(linux_wlan_txq_task, (void *)dev,
"K_TXQ_TASK");
@@ -690,7 +649,7 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
goto fail_locks;
}
- if (wl->gpio >= 0 && init_irq(dev)) {
+ if (wl->gpio_irq && init_irq(dev)) {
ret = -EIO;
goto fail_locks;
}
@@ -779,15 +738,11 @@ static int mac_init_fn(struct net_device *ndev)
static int wilc_mac_open(struct net_device *ndev)
{
- struct wilc_vif *vif;
-
+ struct wilc_vif *vif = netdev_priv(ndev);
+ struct wilc *wl = vif->wilc;
unsigned char mac_add[ETH_ALEN] = {0};
int ret = 0;
int i = 0;
- struct wilc *wl;
-
- vif = netdev_priv(ndev);
- wl = vif->wilc;
if (!wl || !wl->dev) {
netdev_err(ndev, "device not ready\n");
@@ -851,11 +806,9 @@ static struct net_device_stats *mac_stats(struct net_device *dev)
static void wilc_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
- struct wilc_vif *vif;
+ struct wilc_vif *vif = netdev_priv(dev);
int i = 0;
- vif = netdev_priv(dev);
-
if (dev->flags & IFF_PROMISC)
return;
@@ -895,16 +848,13 @@ static void linux_wlan_tx_complete(void *priv, int status)
netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
{
- struct wilc_vif *vif;
+ struct wilc_vif *vif = netdev_priv(ndev);
+ struct wilc *wilc = vif->wilc;
struct tx_complete_data *tx_data = NULL;
int queue_count;
char *udp_buf;
struct iphdr *ih;
struct ethhdr *eth_h;
- struct wilc *wilc;
-
- vif = netdev_priv(ndev);
- wilc = vif->wilc;
if (skb->dev != ndev) {
netdev_err(ndev, "Packet not destined to this device\n");
@@ -952,12 +902,10 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
static int wilc_mac_close(struct net_device *ndev)
{
struct wilc_priv *priv;
- struct wilc_vif *vif;
+ struct wilc_vif *vif = netdev_priv(ndev);
struct host_if_drv *hif_drv;
struct wilc *wl;
- vif = netdev_priv(ndev);
-
if (!vif || !vif->ndev || !vif->ndev->ieee80211_ptr ||
!vif->ndev->ieee80211_ptr->wiphy)
return 0;
@@ -1090,8 +1038,8 @@ void wilc_netdev_cleanup(struct wilc *wilc)
}
kfree(wilc);
+ wilc_debugfs_remove();
}
-EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
static const struct net_device_ops wilc_netdev_ops = {
.ndo_init = mac_init_fn,
@@ -1103,7 +1051,7 @@ static const struct net_device_ops wilc_netdev_ops = {
};
int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
- int gpio, const struct wilc_hif_func *ops)
+ const struct wilc_hif_func *ops)
{
int i, ret;
struct wilc_vif *vif;
@@ -1114,10 +1062,12 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
if (!wl)
return -ENOMEM;
+ wilc_debugfs_init();
*wilc = wl;
wl->io_type = io_type;
- wl->gpio = gpio;
wl->hif_func = ops;
+ INIT_LIST_HEAD(&wl->txq_head.list);
+ INIT_LIST_HEAD(&wl->rxq_head.list);
register_inetaddr_notifier(&g_dev_notifier);
@@ -1174,6 +1124,3 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
return 0;
}
-EXPORT_SYMBOL_GPL(wilc_netdev_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt b/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt
new file mode 100644
index 000000000000..4f7d1c2be4d0
--- /dev/null
+++ b/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt
@@ -0,0 +1,32 @@
+* Microchip WILC wireless SDIO device
+
+The wilc1000 chips can be connected via SDIO. The node is used to specifiy
+child node to the SDIO controller that connects the device to the system.
+
+Required properties:
+- compatible : Should be "microchip,wilc1000-spi"
+- irq-gpios : Connect to a host IRQ
+- reg : Slot ID used in the controller
+
+Optional:
+- bus-width : Number of data lines wired up the slot. Default 1 bit.
+
+
+Examples:
+mmc1: mmc@fc000000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3>;
+ non-removable;
+ vmmc-supply = <&vcc_mmc1_reg>;
+ vqmmc-supply = <&vcc_3v3_reg>;
+ status = "okay";
+
+ wilc_sdio@0 {
+ compatible = "microchip,wilc1000-sdio";
+ irq-gpios = <&pioC 27 0>;
+ status = "okay";
+ reg = <0>;
+ bus-width = <4>;
+ }
+ };
+}
diff --git a/drivers/staging/wilc1000/microchip,wilc1000,spi.txt b/drivers/staging/wilc1000/microchip,wilc1000,spi.txt
new file mode 100644
index 000000000000..87db87b2d901
--- /dev/null
+++ b/drivers/staging/wilc1000/microchip,wilc1000,spi.txt
@@ -0,0 +1,26 @@
+* Microchip WILC wireless SPI device
+
+The wilc1000 chips can be connected via SPI. This document describes
+the binding for the SPI connected module.
+
+Required properties:
+- compatible : Should be "microchip,wilc1000-spi"
+- spi-max-frequency : Maximum SPI clocking speed of device in Hz
+- reg : Chip select address of device
+- irq-gpios : Connect to a host IRQ
+
+
+Examples:
+
+spi1: spi@fc018000 {
+ cs-gpios = <&pioB 21 0>;
+ status = "okay";
+
+ wilc_spi@0 {
+ compatible = "microchip,wilc1000-spi";
+ spi-max-frequency = <48000000>;
+ reg = <0>;
+ irq-gpios = <&pioC 27 0>;
+ status = "okay";
+ };
+};
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index 287c11b58160..edc72876458d 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -1,13 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * NewportMedia WiFi chipset driver test tools - wilc-debug
- * Copyright (c) 2012 NewportMedia Inc.
- * Author: SSW <sswd@wilcsemic.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
*/
#if defined(WILC_DEBUGFS)
@@ -18,9 +12,6 @@
static struct dentry *wilc_dir;
-/*
- * ----------------------------------------------------------------------------
- */
#define DEBUG BIT(0)
#define INFO BIT(1)
#define WRN BIT(2)
@@ -28,11 +19,6 @@ static struct dentry *wilc_dir;
#define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR)
static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
-EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL);
-
-/*
- * ----------------------------------------------------------------------------
- */
static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
@@ -77,10 +63,6 @@ static ssize_t wilc_debug_level_write(struct file *filp,
return count;
}
-/*
- * ----------------------------------------------------------------------------
- */
-
#define FOPS(_open, _read, _write, _poll) { \
.owner = THIS_MODULE, \
.open = (_open), \
@@ -105,7 +87,7 @@ static struct wilc_debugfs_info_t debugfs_info[] = {
},
};
-static int __init wilc_debugfs_init(void)
+int wilc_debugfs_init(void)
{
int i;
struct wilc_debugfs_info_t *info;
@@ -121,12 +103,10 @@ static int __init wilc_debugfs_init(void)
}
return 0;
}
-module_init(wilc_debugfs_init);
-static void __exit wilc_debugfs_remove(void)
+void wilc_debugfs_remove(void)
{
debugfs_remove_recursive(wilc_dir);
}
-module_exit(wilc_debugfs_remove);
#endif
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index 4ab43f97646a..b2080d8b801f 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -1,12 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) Atmel Corporation. All rights reserved.
- *
- * Module Name: wilc_sdio.c
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
*/
#include <linux/mmc/sdio_func.h>
-#include <linux/of_gpio.h>
+#include <linux/mmc/host.h>
#include "wilc_wfi_netdevice.h"
@@ -108,24 +107,28 @@ static int linux_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
struct wilc *wilc;
- int gpio, ret;
+ int ret;
+ struct gpio_desc *gpio = NULL;
- gpio = -1;
if (IS_ENABLED(CONFIG_WILC1000_HW_OOB_INTR)) {
- gpio = of_get_gpio(func->dev.of_node, 0);
- if (gpio < 0)
- gpio = GPIO_NUM;
+ gpio = gpiod_get(&func->dev, "irq", GPIOD_IN);
+ if (IS_ERR(gpio)) {
+ /* get the GPIO descriptor from hardcode GPIO number */
+ gpio = gpio_to_desc(GPIO_NUM);
+ if (!gpio)
+ dev_err(&func->dev, "failed to get irq gpio\n");
+ }
}
dev_dbg(&func->dev, "Initializing netdev\n");
- ret = wilc_netdev_init(&wilc, &func->dev, HIF_SDIO, gpio,
- &wilc_hif_sdio);
+ ret = wilc_netdev_init(&wilc, &func->dev, HIF_SDIO, &wilc_hif_sdio);
if (ret) {
dev_err(&func->dev, "Couldn't initialize netdev\n");
return ret;
}
sdio_set_drvdata(func, wilc);
wilc->dev = &func->dev;
+ wilc->gpio_irq = gpio;
dev_info(&func->dev, "Driver Initializing success\n");
return 0;
@@ -133,7 +136,12 @@ static int linux_sdio_probe(struct sdio_func *func,
static void linux_sdio_remove(struct sdio_func *func)
{
- wilc_netdev_cleanup(sdio_get_drvdata(func));
+ struct wilc *wilc = sdio_get_drvdata(func);
+
+ /* free the GPIO in module remove */
+ if (wilc->gpio_irq)
+ gpiod_put(wilc->gpio_irq);
+ wilc_netdev_cleanup(wilc);
}
static int sdio_reset(struct wilc *wilc)
@@ -199,21 +207,28 @@ static int wilc_sdio_resume(struct device *dev)
return 0;
}
+static const struct of_device_id wilc_of_match[] = {
+ { .compatible = "microchip,wilc1000-sdio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, wilc_of_match);
+
static const struct dev_pm_ops wilc_sdio_pm_ops = {
.suspend = wilc_sdio_suspend,
.resume = wilc_sdio_resume,
};
-static struct sdio_driver wilc1000_sdio_driver = {
+static struct sdio_driver wilc_sdio_driver = {
.name = SDIO_MODALIAS,
.id_table = wilc_sdio_ids,
.probe = linux_sdio_probe,
.remove = linux_sdio_remove,
.drv = {
.pm = &wilc_sdio_pm_ops,
+ .of_match_table = wilc_of_match,
}
};
-module_driver(wilc1000_sdio_driver,
+module_driver(wilc_sdio_driver,
sdio_register_driver,
sdio_unregister_driver);
MODULE_LICENSE("GPL");
@@ -368,7 +383,7 @@ static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
int ret;
- data = cpu_to_le32(data);
+ cpu_to_le32s(&data);
if (addr >= 0xf0 && addr <= 0xff) {
struct sdio_cmd52 cmd;
@@ -547,7 +562,7 @@ static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
}
}
- *data = cpu_to_le32(*data);
+ le32_to_cpus(data);
return 1;
@@ -793,9 +808,6 @@ static int sdio_read_size(struct wilc *wilc, u32 *size)
wilc_sdio_cmd52(wilc, &cmd);
tmp = cmd.data;
- /* cmd.read_write = 0; */
- /* cmd.function = 0; */
- /* cmd.raw = 0; */
cmd.address = 0xf3;
cmd.data = 0;
wilc_sdio_cmd52(wilc, &cmd);
@@ -1080,12 +1092,7 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
return 1;
}
-/********************************************
- *
- * Global sdio HIF function table
- *
- ********************************************/
-
+/* Global sdio HIF function table */
static const struct wilc_hif_func wilc_hif_sdio = {
.hif_init = sdio_init,
.hif_deinit = sdio_deinit,
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index 647526387784..5517477d875a 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -1,12 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) Atmel Corporation. All rights reserved.
- *
- * Module Name: wilc_spi.c
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
*/
#include <linux/spi/spi.h>
-#include <linux/of_gpio.h>
#include "wilc_wfi_netdevice.h"
@@ -106,44 +104,55 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len)
static int wilc_bus_probe(struct spi_device *spi)
{
- int ret, gpio;
+ int ret;
struct wilc *wilc;
+ struct gpio_desc *gpio;
+
+ gpio = gpiod_get(&spi->dev, "irq", GPIOD_IN);
+ if (IS_ERR(gpio)) {
+ /* get the GPIO descriptor from hardcode GPIO number */
+ gpio = gpio_to_desc(GPIO_NUM);
+ if (!gpio)
+ dev_err(&spi->dev, "failed to get the irq gpio\n");
+ }
- gpio = of_get_gpio(spi->dev.of_node, 0);
- if (gpio < 0)
- gpio = GPIO_NUM;
-
- ret = wilc_netdev_init(&wilc, NULL, HIF_SPI, GPIO_NUM, &wilc_hif_spi);
+ ret = wilc_netdev_init(&wilc, NULL, HIF_SPI, &wilc_hif_spi);
if (ret)
return ret;
spi_set_drvdata(spi, wilc);
wilc->dev = &spi->dev;
+ wilc->gpio_irq = gpio;
return 0;
}
static int wilc_bus_remove(struct spi_device *spi)
{
- wilc_netdev_cleanup(spi_get_drvdata(spi));
+ struct wilc *wilc = spi_get_drvdata(spi);
+
+ /* free the GPIO in module remove */
+ if (wilc->gpio_irq)
+ gpiod_put(wilc->gpio_irq);
+ wilc_netdev_cleanup(wilc);
return 0;
}
-static const struct of_device_id wilc1000_of_match[] = {
- { .compatible = "atmel,wilc_spi", },
- {}
+static const struct of_device_id wilc_of_match[] = {
+ { .compatible = "microchip,wilc1000-spi", },
+ { /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, wilc1000_of_match);
+MODULE_DEVICE_TABLE(of, wilc_of_match);
-static struct spi_driver wilc1000_spi_driver = {
+static struct spi_driver wilc_spi_driver = {
.driver = {
.name = MODALIAS,
- .of_match_table = wilc1000_of_match,
+ .of_match_table = wilc_of_match,
},
.probe = wilc_bus_probe,
.remove = wilc_bus_remove,
};
-module_spi_driver(wilc1000_spi_driver);
+module_spi_driver(wilc_spi_driver);
MODULE_LICENSE("GPL");
static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
@@ -668,7 +677,7 @@ static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat)
struct spi_device *spi = to_spi_device(wilc->dev);
int result;
- dat = cpu_to_le32(dat);
+ cpu_to_le32s(&dat);
result = spi_cmd_complete(wilc, CMD_INTERNAL_WRITE, adr, (u8 *)&dat, 4,
0);
if (result != N_OK)
@@ -689,7 +698,7 @@ static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data)
return 0;
}
- *data = cpu_to_le32(*data);
+ le32_to_cpus(data);
return 1;
}
@@ -707,7 +716,7 @@ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data)
u8 cmd = CMD_SINGLE_WRITE;
u8 clockless = 0;
- data = cpu_to_le32(data);
+ cpu_to_le32s(&data);
if (addr < 0x30) {
/* Clockless register */
cmd = CMD_INTERNAL_WRITE;
@@ -757,7 +766,6 @@ static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
u8 clockless = 0;
if (addr < 0x30) {
- /* dev_err(&spi->dev, "***** read addr %d\n\n", addr); */
/* Clockless register */
cmd = CMD_INTERNAL_READ;
clockless = 1;
@@ -769,7 +777,7 @@ static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
return 0;
}
- *data = cpu_to_le32(*data);
+ le32_to_cpus(data);
return 1;
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index e248702ee519..7cd033004651 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -1,4 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
+
#include "wilc_wfi_cfgoperations.h"
#define NO_ENCRYPT 0
@@ -77,11 +82,6 @@ static const struct wiphy_wowlan_support wowlan_support = {
.flags = WIPHY_WOWLAN_ANY
};
-#define TCP_ACK_FILTER_LINK_SPEED_THRESH 54
-#define DEFAULT_LINK_SPEED 72
-
-#define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff)
-
static struct network_info last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
static u32 last_scanned_cnt;
struct timer_list wilc_during_ip_timer;
@@ -468,28 +468,22 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt,
struct disconnect_info *disconn_info,
void *priv_data)
{
- struct wilc_priv *priv;
- struct net_device *dev;
- struct host_if_drv *wfi_drv;
+ struct wilc_priv *priv = priv_data;
+ struct net_device *dev = priv->dev;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wl = vif->wilc;
+ struct host_if_drv *wfi_drv = priv->hif_drv;
u8 null_bssid[ETH_ALEN] = {0};
- struct wilc *wl;
- struct wilc_vif *vif;
wilc_connecting = 0;
- priv = priv_data;
- dev = priv->dev;
- vif = netdev_priv(dev);
- wl = vif->wilc;
- wfi_drv = (struct host_if_drv *)priv->hif_drv;
-
if (conn_disconn_evt == CONN_DISCONN_EVENT_CONN_RESP) {
u16 connect_status;
connect_status = conn_info->status;
if (mac_status == MAC_STATUS_DISCONNECTED &&
- conn_info->status == SUCCESSFUL_STATUSCODE) {
+ conn_info->status == WLAN_STATUS_SUCCESS) {
connect_status = WLAN_STATUS_UNSPECIFIED_FAILURE;
wilc_wlan_set_bssid(priv->dev, null_bssid,
STATION_MODE);
@@ -529,7 +523,7 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt,
conn_info->resp_ies,
conn_info->resp_ies_len, connect_status,
GFP_KERNEL);
- } else if (conn_disconn_evt == CONN_DISCONN_EVENT_DISCONN_NOTIF) {
+ } else if (conn_disconn_evt == CONN_DISCONN_EVENT_DISCONN_NOTIF) {
wilc_optaining_ip = false;
p2p_local_random = 0x01;
p2p_recv_random = 0x00;
@@ -540,9 +534,9 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt,
if (!wfi_drv->p2p_connect)
wlan_channel = INVALID_CHANNEL;
- if (wfi_drv->IFC_UP && dev == wl->vif[1]->ndev)
+ if (wfi_drv->ifc_up && dev == wl->vif[1]->ndev)
disconn_info->reason = 3;
- else if (!wfi_drv->IFC_UP && dev == wl->vif[1]->ndev)
+ else if (!wfi_drv->ifc_up && dev == wl->vif[1]->ndev)
disconn_info->reason = 1;
cfg80211_disconnected(dev, disconn_info->reason,
@@ -555,12 +549,9 @@ static int set_channel(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef)
{
u32 channelnum = 0;
- struct wilc_priv *priv;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
int result = 0;
- struct wilc_vif *vif;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->dev);
channelnum = ieee80211_frequency_to_channel(chandef->chan->center_freq);
@@ -618,15 +609,12 @@ out:
static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
- struct wilc_priv *priv;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
u32 i;
- s32 ret = 0;
+ int ret = 0;
u8 scan_ch_list[MAX_NUM_SCANNED_NETWORKS];
struct hidden_network hidden_ntwk;
- struct wilc_vif *vif;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->dev);
priv->scan_req = request;
@@ -674,21 +662,18 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
static int connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
- s32 ret = 0;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
+ struct host_if_drv *wfi_drv = priv->hif_drv;
+ struct network_info *nw_info;
+ int ret;
u32 i;
u32 sel_bssi_idx = UINT_MAX;
u8 security = NO_ENCRYPT;
- enum AUTHTYPE auth_type = ANY;
+ enum authtype auth_type = ANY;
u32 cipher_group;
- struct wilc_priv *priv;
- struct host_if_drv *wfi_drv;
- struct network_info *nw_info = NULL;
- struct wilc_vif *vif;
wilc_connecting = 1;
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->dev);
- wfi_drv = (struct host_if_drv *)priv->hif_drv;
if (!(strncmp(sme->ssid, "DIRECT-", 7)))
wfi_drv->p2p_connect = 1;
@@ -761,7 +746,8 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
security = ENCRYPT_ENABLED | WPA | AES;
} else {
ret = -ENOTSUPP;
- netdev_err(dev, "Not supported cipher\n");
+ netdev_err(dev, "%s: Unsupported cipher\n",
+ __func__);
wilc_connecting = 0;
return ret;
}
@@ -823,17 +809,14 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
static int disconnect(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code)
{
- s32 ret = 0;
- struct wilc_priv *priv;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
+ struct wilc *wilc = vif->wilc;
struct host_if_drv *wfi_drv;
- struct wilc_vif *vif;
- struct wilc *wilc;
+ int ret;
u8 null_bssid[ETH_ALEN] = {0};
wilc_connecting = 0;
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->dev);
- wilc = vif->wilc;
if (!wilc)
return -EIO;
@@ -919,16 +902,13 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
bool pairwise, const u8 *mac_addr, struct key_params *params)
{
- s32 ret = 0, keylen = params->key_len;
- struct wilc_priv *priv;
+ int ret = 0, keylen = params->key_len;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
const u8 *rx_mic = NULL;
const u8 *tx_mic = NULL;
u8 mode = NO_ENCRYPT;
u8 op_mode;
- struct wilc_vif *vif;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(netdev);
+ struct wilc_vif *vif = netdev_priv(netdev);
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
@@ -1021,7 +1001,7 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
break;
default:
- netdev_err(netdev, "Not supported cipher\n");
+ netdev_err(netdev, "%s: Unsupported cipher\n", __func__);
ret = -ENOTSUPP;
}
@@ -1033,13 +1013,9 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
bool pairwise,
const u8 *mac_addr)
{
- struct wilc_priv *priv;
- struct wilc *wl;
- struct wilc_vif *vif;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(netdev);
- wl = vif->wilc;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(netdev);
+ struct wilc *wl = vif->wilc;
if (netdev == wl->vif[0]->ndev) {
if (priv->wilc_gtk[key_index]) {
@@ -1076,11 +1052,9 @@ static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
bool pairwise, const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie, struct key_params *))
{
- struct wilc_priv *priv;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
struct key_params key_params;
- priv = wiphy_priv(wiphy);
-
if (!pairwise) {
key_params.key = priv->wilc_gtk[key_index]->key;
key_params.cipher = priv->wilc_gtk[key_index]->cipher;
@@ -1103,11 +1077,8 @@ static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
static int set_default_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool unicast, bool multicast)
{
- struct wilc_priv *priv;
- struct wilc_vif *vif;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->dev);
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
wilc_set_wep_default_keyid(vif, key_index);
@@ -1117,15 +1088,12 @@ static int set_default_key(struct wiphy *wiphy, struct net_device *netdev,
static int get_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_info *sinfo)
{
- struct wilc_priv *priv;
- struct wilc_vif *vif;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(dev);
u32 i = 0;
u32 associatedsta = ~0;
u32 inactive_time = 0;
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(dev);
-
if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
for (i = 0; i < NUM_STA_ASSOCIATED; i++) {
if (!(memcmp(mac,
@@ -1141,20 +1109,20 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
return -ENOENT;
}
- sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME);
wilc_get_inactive_time(vif, mac, &inactive_time);
sinfo->inactive_time = 1000 * inactive_time;
} else if (vif->iftype == STATION_MODE) {
struct rf_info stats;
- wilc_get_statistics(vif, &stats);
+ wilc_get_statistics(vif, &stats, true);
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL) |
- BIT(NL80211_STA_INFO_RX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_FAILED) |
- BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL) |
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_FAILED) |
+ BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
sinfo->signal = stats.rssi;
sinfo->rx_packets = stats.rx_cnt;
@@ -1179,13 +1147,10 @@ static int change_bss(struct wiphy *wiphy, struct net_device *dev,
static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
- s32 ret = 0;
+ int ret;
struct cfg_param_attr cfg_param_val;
- struct wilc_priv *priv;
- struct wilc_vif *vif;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->dev);
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
cfg_param_val.flag = 0;
@@ -1217,13 +1182,11 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
static int set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
u32 i;
- s32 ret = 0;
+ int ret = 0;
u8 flag = 0;
- struct wilc_vif *vif;
- struct wilc_priv *priv = wiphy_priv(wiphy);
-
- vif = netdev_priv(priv->dev);
for (i = 0; i < priv->pmkid_list.numpmkid; i++) {
if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
@@ -1254,8 +1217,7 @@ static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
u32 i;
- s32 ret = 0;
-
+ int ret = 0;
struct wilc_priv *priv = wiphy_priv(wiphy);
for (i = 0; i < priv->pmkid_list.numpmkid; i++) {
@@ -1320,7 +1282,6 @@ static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u8 ch_list_attr_idx,
static void wilc_wfi_cfg_parse_rx_action(u8 *buf, u32 len)
{
u32 index = 0;
-
u8 op_channel_attr_index = 0;
u8 channel_list_attr_index = 0;
@@ -1343,7 +1304,6 @@ static void wilc_wfi_cfg_parse_tx_action(u8 *buf, u32 len, bool oper_ch,
u8 iftype)
{
u32 index = 0;
-
u8 op_channel_attr_index = 0;
u8 channel_list_attr_index = 0;
@@ -1405,16 +1365,14 @@ static void wilc_wfi_cfg_parse_rx_vendor_spec(struct wilc_priv *priv, u8 *buff,
void wilc_wfi_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
{
- struct wilc_priv *priv;
+ struct wilc_priv *priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
+ struct host_if_drv *wfi_drv = priv->hif_drv;
u32 header, pkt_offset;
- struct host_if_drv *wfi_drv;
s32 freq;
-
- priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
- wfi_drv = (struct host_if_drv *)priv->hif_drv;
+ __le16 fc;
memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
-
+ le32_to_cpus(&header);
pkt_offset = GET_PKT_OFFSET(header);
if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
@@ -1431,7 +1389,8 @@ void wilc_wfi_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
freq = ieee80211_channel_to_frequency(curr_channel, NL80211_BAND_2GHZ);
- if (!ieee80211_is_action(buff[FRAME_TYPE_ID])) {
+ fc = ((struct ieee80211_hdr *)buff)->frame_control;
+ if (!ieee80211_is_action(fc)) {
cfg80211_rx_mgmt(priv->wdev, freq, 0, buff, size, 0);
return;
}
@@ -1462,8 +1421,8 @@ void wilc_wfi_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
default:
netdev_dbg(dev,
- "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n",
- buff[ACTION_SUBTYPE_ID]);
+ "%s: Not handled action frame type:%x\n",
+ __func__, buff[ACTION_SUBTYPE_ID]);
break;
}
}
@@ -1513,12 +1472,9 @@ static int remain_on_channel(struct wiphy *wiphy,
struct ieee80211_channel *chan,
unsigned int duration, u64 *cookie)
{
- s32 ret = 0;
- struct wilc_priv *priv;
- struct wilc_vif *vif;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->dev);
+ int ret = 0;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
if (wdev->iftype == NL80211_IFTYPE_AP) {
netdev_dbg(vif->ndev, "Required while in AP mode\n");
@@ -1543,11 +1499,8 @@ static int cancel_remain_on_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
u64 cookie)
{
- struct wilc_priv *priv;
- struct wilc_vif *vif;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->dev);
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
return wilc_listen_state_expired(vif,
priv->remain_on_ch_params.listen_session_id);
@@ -1613,16 +1566,12 @@ static int mgmt_tx(struct wiphy *wiphy,
size_t len = params->len;
const struct ieee80211_mgmt *mgmt;
struct p2p_mgmt_data *mgmt_tx;
- struct wilc_priv *priv;
- struct host_if_drv *wfi_drv;
- struct wilc_vif *vif;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct host_if_drv *wfi_drv = priv->hif_drv;
+ struct wilc_vif *vif = netdev_priv(wdev->netdev);
u32 buf_len = len + sizeof(p2p_vendor_spec) + sizeof(p2p_local_random);
int ret = 0;
- vif = netdev_priv(wdev->netdev);
- priv = wiphy_priv(wiphy);
- wfi_drv = (struct host_if_drv *)priv->hif_drv;
-
*cookie = (unsigned long)buf;
priv->tx_cookie = *cookie;
mgmt = (const struct ieee80211_mgmt *)buf;
@@ -1679,8 +1628,8 @@ static int mgmt_tx(struct wiphy *wiphy,
default:
netdev_dbg(vif->ndev,
- "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n",
- buf[ACTION_SUBTYPE_ID]);
+ "%s: Not handled action frame type:%x\n",
+ __func__, buf[ACTION_SUBTYPE_ID]);
break;
}
}
@@ -1702,11 +1651,9 @@ static int mgmt_tx_cancel_wait(struct wiphy *wiphy,
struct wireless_dev *wdev,
u64 cookie)
{
- struct wilc_priv *priv;
- struct host_if_drv *wfi_drv;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct host_if_drv *wfi_drv = priv->hif_drv;
- priv = wiphy_priv(wiphy);
- wfi_drv = (struct host_if_drv *)priv->hif_drv;
wfi_drv->p2p_timeout = jiffies;
if (!priv->p2p_listen_state) {
@@ -1726,13 +1673,9 @@ static int mgmt_tx_cancel_wait(struct wiphy *wiphy,
void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
u16 frame_type, bool reg)
{
- struct wilc_priv *priv;
- struct wilc_vif *vif;
- struct wilc *wl;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->wdev->netdev);
- wl = vif->wilc;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->wdev->netdev);
+ struct wilc *wl = vif->wilc;
if (!frame_type)
return;
@@ -1766,16 +1709,13 @@ static int set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev,
static int dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo)
{
- struct wilc_priv *priv;
- struct wilc_vif *vif;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
if (idx != 0)
return -ENOENT;
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->dev);
-
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
wilc_get_rssi(vif, &sinfo->signal);
@@ -1786,14 +1726,9 @@ static int dump_station(struct wiphy *wiphy, struct net_device *dev,
static int set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
bool enabled, int timeout)
{
- struct wilc_priv *priv;
- struct wilc_vif *vif;
-
- if (!wiphy)
- return -ENOENT;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->dev);
if (!priv->hif_drv)
return -EIO;
@@ -1807,13 +1742,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
enum nl80211_iftype type,
struct vif_params *params)
{
- struct wilc_priv *priv;
- struct wilc_vif *vif;
- struct wilc *wl;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wl = vif->wilc;
- vif = netdev_priv(dev);
- priv = wiphy_priv(wiphy);
- wl = vif->wilc;
p2p_local_random = 0x01;
p2p_recv_random = 0x00;
wilc_ie = false;
@@ -1886,13 +1818,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
static int start_ap(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *settings)
{
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wl = vif->wilc;
struct cfg80211_beacon_data *beacon = &settings->beacon;
- s32 ret = 0;
- struct wilc *wl;
- struct wilc_vif *vif;
-
- vif = netdev_priv(dev);
- wl = vif->wilc;
+ int ret;
ret = set_channel(wiphy, &settings->chandef);
@@ -1911,11 +1840,8 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev,
static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_beacon_data *beacon)
{
- struct wilc_priv *priv;
- struct wilc_vif *vif;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->dev);
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
return wilc_add_beacon(vif, 0, 0, beacon->head_len,
(u8 *)beacon->head, beacon->tail_len,
@@ -1924,17 +1850,11 @@ static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
static int stop_ap(struct wiphy *wiphy, struct net_device *dev)
{
- s32 ret = 0;
- struct wilc_priv *priv;
- struct wilc_vif *vif;
+ int ret;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
u8 null_bssid[ETH_ALEN] = {0};
- if (!wiphy)
- return -EFAULT;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->dev);
-
wilc_wlan_set_bssid(dev, null_bssid, AP_MODE);
ret = wilc_del_beacon(vif);
@@ -1948,16 +1868,10 @@ static int stop_ap(struct wiphy *wiphy, struct net_device *dev)
static int add_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_parameters *params)
{
- s32 ret = 0;
- struct wilc_priv *priv;
+ int ret = 0;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
struct add_sta_param sta_params = { {0} };
- struct wilc_vif *vif;
-
- if (!wiphy)
- return -EFAULT;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(dev);
+ struct wilc_vif *vif = netdev_priv(dev);
if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
memcpy(sta_params.bssid, mac, ETH_ALEN);
@@ -1989,17 +1903,11 @@ static int del_station(struct wiphy *wiphy, struct net_device *dev,
struct station_del_parameters *params)
{
const u8 *mac = params->mac;
- s32 ret = 0;
- struct wilc_priv *priv;
- struct wilc_vif *vif;
+ int ret = 0;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(dev);
struct sta_info *info;
- if (!wiphy)
- return -EFAULT;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(dev);
-
if (!(vif->iftype == AP_MODE || vif->iftype == GO_MODE))
return ret;
@@ -2017,14 +1925,9 @@ static int del_station(struct wiphy *wiphy, struct net_device *dev,
static int change_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_parameters *params)
{
- s32 ret = 0;
+ int ret = 0;
struct add_sta_param sta_params = { {0} };
- struct wilc_vif *vif;
-
- if (!wiphy)
- return -EFAULT;
-
- vif = netdev_priv(dev);
+ struct wilc_vif *vif = netdev_priv(dev);
if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
memcpy(sta_params.bssid, mac, ETH_ALEN);
@@ -2055,12 +1958,9 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
enum nl80211_iftype type,
struct vif_params *params)
{
- struct wilc_vif *vif;
- struct wilc_priv *priv;
- struct net_device *new_ifc = NULL;
-
- priv = wiphy_priv(wiphy);
- vif = netdev_priv(priv->wdev->netdev);
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->wdev->netdev);
+ struct net_device *new_ifc;
if (type == NL80211_IFTYPE_MONITOR) {
new_ifc = wilc_wfi_init_mon_interface(name, vif->ndev);
@@ -2132,9 +2032,7 @@ static int get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
int ret;
struct wilc_priv *priv = wiphy_priv(wiphy);
struct wilc_vif *vif = netdev_priv(priv->dev);
- struct wilc *wl;
-
- wl = vif->wilc;
+ struct wilc *wl = vif->wilc;
/* If firmware is not started, return. */
if (!wl->initialized)
@@ -2196,11 +2094,11 @@ static struct wireless_dev *wilc_wfi_cfg_alloc(void)
wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
if (!wdev)
- goto _fail_;
+ goto out;
wdev->wiphy = wiphy_new(&wilc_cfg80211_ops, sizeof(struct wilc_priv));
if (!wdev->wiphy)
- goto _fail_mem_;
+ goto free_mem;
wilc_band_2ghz.ht_cap.ht_supported = 1;
wilc_band_2ghz.ht_cap.cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
@@ -2212,9 +2110,9 @@ static struct wireless_dev *wilc_wfi_cfg_alloc(void)
return wdev;
-_fail_mem_:
+free_mem:
kfree(wdev);
-_fail_:
+out:
return NULL;
}
@@ -2223,7 +2121,7 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net,
{
struct wilc_priv *priv;
struct wireless_dev *wdev;
- s32 ret = 0;
+ int ret;
wdev = wilc_wfi_cfg_alloc();
if (!wdev) {
@@ -2265,19 +2163,15 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net,
int wilc_init_host_int(struct net_device *net)
{
- int ret = 0;
-
- struct wilc_priv *priv;
+ int ret;
+ struct wilc_priv *priv = wdev_priv(net->ieee80211_ptr);
- priv = wdev_priv(net->ieee80211_ptr);
if (op_ifcs == 0) {
timer_setup(&aging_timer, remove_network_from_shadow, 0);
timer_setup(&wilc_during_ip_timer, clear_during_ip, 0);
}
op_ifcs++;
- priv->auto_rate_adjusted = false;
-
priv->p2p_listen_state = false;
mutex_init(&priv->scan_req_lock);
@@ -2290,14 +2184,9 @@ int wilc_init_host_int(struct net_device *net)
int wilc_deinit_host_int(struct net_device *net)
{
- int ret = 0;
- struct wilc_vif *vif;
- struct wilc_priv *priv;
-
- priv = wdev_priv(net->ieee80211_ptr);
- vif = netdev_priv(priv->dev);
-
- priv->auto_rate_adjusted = false;
+ int ret;
+ struct wilc_priv *priv = wdev_priv(net->ieee80211_ptr);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
priv->p2p_listen_state = false;
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
index a69103b44958..be412b65926c 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
+
#ifndef NM_WFI_CFGOPERATIONS
#define NM_WFI_CFGOPERATIONS
#include "wilc_wfi_netdevice.h"
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index f2b07e8aedd7..b7eee772f3fe 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
+
#ifndef WILC_WFI_NETDEVICE
#define WILC_WFI_NETDEVICE
@@ -7,6 +12,7 @@
#include <net/cfg80211.h>
#include <net/ieee80211_radiotap.h>
#include <linux/if_arp.h>
+#include <linux/gpio/consumer.h>
#include "host_interface.h"
#include "wilc_wlan.h"
@@ -21,6 +27,11 @@
#define NUM_REG_FRAME 2
+#define TCP_ACK_FILTER_LINK_SPEED_THRESH 54
+#define DEFAULT_LINK_SPEED 72
+
+#define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff)
+
struct wilc_wfi_stats {
unsigned long rx_packets;
unsigned long tx_packets;
@@ -31,11 +42,6 @@ struct wilc_wfi_stats {
};
-/*
- * This structure is private to each device. It is used to pass
- * packets in and out, so there is place for a packet
- */
-
struct wilc_wfi_key {
u8 *key;
u8 *seq;
@@ -74,14 +80,10 @@ struct wilc_priv {
u8 associated_bss[ETH_ALEN];
struct sta_info assoc_stainfo;
- struct net_device_stats stats;
- u8 monitor_flag;
- int status;
struct sk_buff *skb;
struct net_device *dev;
struct host_if_drv *hif_drv;
struct host_if_pmkid_attr pmkid_list;
- struct wilc_wfi_stats netstats;
u8 wep_key[4][WLAN_KEY_LEN_WEP104];
u8 wep_key_len[4];
/* The real interface that the monitor is on */
@@ -91,9 +93,6 @@ struct wilc_priv {
u8 wilc_groupkey;
/* mutexes */
struct mutex scan_req_lock;
- /* */
- bool auto_rate_adjusted;
-
bool p2p_listen_state;
};
@@ -123,7 +122,7 @@ struct wilc {
const struct wilc_hif_func *hif_func;
int io_type;
int mac_status;
- int gpio;
+ struct gpio_desc *gpio_irq;
bool initialized;
int dev_irq_num;
int close;
@@ -136,6 +135,7 @@ struct wilc {
spinlock_t txq_spinlock;
/*protect rxq_entry_t receiver queue*/
struct mutex rxq_cs;
+ /* lock to protect hif access */
struct mutex hif_cs;
struct completion cfg_event;
@@ -155,19 +155,10 @@ struct wilc {
u32 rx_buffer_offset;
u8 *tx_buffer;
- unsigned long txq_spinlock_flags;
-
- struct txq_entry_t *txq_head;
- struct txq_entry_t *txq_tail;
+ struct txq_entry_t txq_head;
int txq_entries;
- int txq_exit;
-
- struct rxq_entry_t *rxq_head;
- struct rxq_entry_t *rxq_tail;
- int rxq_entries;
- int rxq_exit;
- unsigned char eth_src_address[NUM_CONCURRENT_IFC][6];
+ struct rxq_entry_t rxq_head;
const struct firmware *firmware;
@@ -185,7 +176,7 @@ void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
void wilc_mac_indicate(struct wilc *wilc);
void wilc_netdev_cleanup(struct wilc *wilc);
int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
- int gpio, const struct wilc_hif_func *ops);
+ const struct wilc_hif_func *ops);
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size);
int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode);
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index d4ebbf67e50b..6787b6e9f124 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -1,9 +1,21 @@
// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
+
+#include <linux/if_ether.h>
+#include <linux/ip.h>
#include "wilc_wfi_netdevice.h"
#include "wilc_wlan_cfg.h"
static enum chip_ps_states chip_ps_state = CHIP_WAKEDUP;
+static inline bool is_wilc1000(u32 id)
+{
+ return ((id & 0xfffff000) == 0x100000 ? true : false);
+}
+
static inline void acquire_bus(struct wilc *wilc, enum bus_acquire acquire)
{
mutex_lock(&wilc->hif_cs);
@@ -20,42 +32,25 @@ static inline void release_bus(struct wilc *wilc, enum bus_release release)
static void wilc_wlan_txq_remove(struct wilc *wilc, struct txq_entry_t *tqe)
{
- if (tqe == wilc->txq_head) {
- wilc->txq_head = tqe->next;
- if (wilc->txq_head)
- wilc->txq_head->prev = NULL;
- } else if (tqe == wilc->txq_tail) {
- wilc->txq_tail = (tqe->prev);
- if (wilc->txq_tail)
- wilc->txq_tail->next = NULL;
- } else {
- tqe->prev->next = tqe->next;
- tqe->next->prev = tqe->prev;
- }
+ list_del(&tqe->list);
wilc->txq_entries -= 1;
}
static struct txq_entry_t *
wilc_wlan_txq_remove_from_head(struct net_device *dev)
{
- struct txq_entry_t *tqe;
+ struct txq_entry_t *tqe = NULL;
unsigned long flags;
- struct wilc_vif *vif;
- struct wilc *wilc;
-
- vif = netdev_priv(dev);
- wilc = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
spin_lock_irqsave(&wilc->txq_spinlock, flags);
- if (wilc->txq_head) {
- tqe = wilc->txq_head;
- wilc->txq_head = tqe->next;
- if (wilc->txq_head)
- wilc->txq_head->prev = NULL;
+ if (!list_empty(&wilc->txq_head.list)) {
+ tqe = list_first_entry(&wilc->txq_head.list, struct txq_entry_t,
+ list);
+ list_del(&tqe->list);
wilc->txq_entries -= 1;
- } else {
- tqe = NULL;
}
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
return tqe;
@@ -65,25 +60,12 @@ static void wilc_wlan_txq_add_to_tail(struct net_device *dev,
struct txq_entry_t *tqe)
{
unsigned long flags;
- struct wilc_vif *vif;
- struct wilc *wilc;
-
- vif = netdev_priv(dev);
- wilc = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
spin_lock_irqsave(&wilc->txq_spinlock, flags);
- if (!wilc->txq_head) {
- tqe->next = NULL;
- tqe->prev = NULL;
- wilc->txq_head = tqe;
- wilc->txq_tail = tqe;
- } else {
- tqe->next = NULL;
- tqe->prev = wilc->txq_tail;
- wilc->txq_tail->next = tqe;
- wilc->txq_tail = tqe;
- }
+ list_add_tail(&tqe->list, &wilc->txq_head.list);
wilc->txq_entries += 1;
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
@@ -101,17 +83,7 @@ static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
spin_lock_irqsave(&wilc->txq_spinlock, flags);
- if (!wilc->txq_head) {
- tqe->next = NULL;
- tqe->prev = NULL;
- wilc->txq_head = tqe;
- wilc->txq_tail = tqe;
- } else {
- tqe->next = wilc->txq_head;
- tqe->prev = NULL;
- wilc->txq_head->prev = tqe;
- wilc->txq_head = tqe;
- }
+ list_add(&tqe->list, &wilc->txq_head.list);
wilc->txq_entries += 1;
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
@@ -184,50 +156,32 @@ static inline int add_tcp_pending_ack(u32 ack, u32 session_index,
static inline void tcp_process(struct net_device *dev, struct txq_entry_t *tqe)
{
- u8 *eth_hdr_ptr;
- u8 *buffer = tqe->buffer;
- unsigned short h_proto;
+ void *buffer = tqe->buffer;
+ const struct ethhdr *eth_hdr_ptr = buffer;
int i;
unsigned long flags;
- struct wilc_vif *vif;
- struct wilc *wilc;
-
- vif = netdev_priv(dev);
- wilc = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
spin_lock_irqsave(&wilc->txq_spinlock, flags);
- eth_hdr_ptr = &buffer[0];
- h_proto = ntohs(*((unsigned short *)&eth_hdr_ptr[12]));
- if (h_proto == ETH_P_IP) {
- u8 *ip_hdr_ptr;
- u8 protocol;
+ if (eth_hdr_ptr->h_proto == htons(ETH_P_IP)) {
+ const struct iphdr *ip_hdr_ptr = buffer + ETH_HLEN;
- ip_hdr_ptr = &buffer[ETHERNET_HDR_LEN];
- protocol = ip_hdr_ptr[9];
-
- if (protocol == 0x06) {
- u8 *tcp_hdr_ptr;
+ if (ip_hdr_ptr->protocol == IPPROTO_TCP) {
+ const struct tcphdr *tcp_hdr_ptr;
u32 IHL, total_length, data_offset;
- tcp_hdr_ptr = &ip_hdr_ptr[IP_HDR_LEN];
- IHL = (ip_hdr_ptr[0] & 0xf) << 2;
- total_length = ((u32)ip_hdr_ptr[2] << 8) +
- (u32)ip_hdr_ptr[3];
- data_offset = ((u32)tcp_hdr_ptr[12] & 0xf0) >> 2;
+ IHL = ip_hdr_ptr->ihl << 2;
+ tcp_hdr_ptr = buffer + ETH_HLEN + IHL;
+ total_length = ntohs(ip_hdr_ptr->tot_len);
+
+ data_offset = tcp_hdr_ptr->doff << 2;
if (total_length == (IHL + data_offset)) {
u32 seq_no, ack_no;
- seq_no = ((u32)tcp_hdr_ptr[4] << 24) +
- ((u32)tcp_hdr_ptr[5] << 16) +
- ((u32)tcp_hdr_ptr[6] << 8) +
- (u32)tcp_hdr_ptr[7];
-
- ack_no = ((u32)tcp_hdr_ptr[8] << 24) +
- ((u32)tcp_hdr_ptr[9] << 16) +
- ((u32)tcp_hdr_ptr[10] << 8) +
- (u32)tcp_hdr_ptr[11];
-
+ seq_no = ntohl(tcp_hdr_ptr->seq);
+ ack_no = ntohl(tcp_hdr_ptr->ack_seq);
for (i = 0; i < tcp_session; i++) {
u32 j = ack_session_info[i].seq_num;
@@ -249,15 +203,13 @@ static inline void tcp_process(struct net_device *dev, struct txq_entry_t *tqe)
static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev)
{
- struct wilc_vif *vif;
- struct wilc *wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
u32 i = 0;
u32 dropped = 0;
+ unsigned long flags;
- vif = netdev_priv(dev);
- wilc = vif->wilc;
-
- spin_lock_irqsave(&wilc->txq_spinlock, wilc->txq_spinlock_flags);
+ spin_lock_irqsave(&wilc->txq_spinlock, flags);
for (i = pending_base; i < (pending_base + pending_acks); i++) {
u32 session_index;
u32 bigger_ack_num;
@@ -295,7 +247,7 @@ static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev)
else
pending_base = 0;
- spin_unlock_irqrestore(&wilc->txq_spinlock, wilc->txq_spinlock_flags);
+ spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
while (dropped > 0) {
wait_for_completion_timeout(&wilc->txq_event,
@@ -402,12 +354,14 @@ int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
static struct txq_entry_t *wilc_wlan_txq_get_first(struct wilc *wilc)
{
- struct txq_entry_t *tqe;
+ struct txq_entry_t *tqe = NULL;
unsigned long flags;
spin_lock_irqsave(&wilc->txq_spinlock, flags);
- tqe = wilc->txq_head;
+ if (!list_empty(&wilc->txq_head.list))
+ tqe = list_first_entry(&wilc->txq_head.list, struct txq_entry_t,
+ list);
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
@@ -421,45 +375,37 @@ static struct txq_entry_t *wilc_wlan_txq_get_next(struct wilc *wilc,
spin_lock_irqsave(&wilc->txq_spinlock, flags);
- tqe = tqe->next;
+ if (!list_is_last(&tqe->list, &wilc->txq_head.list))
+ tqe = list_next_entry(tqe, list);
+ else
+ tqe = NULL;
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
return tqe;
}
-static int wilc_wlan_rxq_add(struct wilc *wilc, struct rxq_entry_t *rqe)
+static void wilc_wlan_rxq_add(struct wilc *wilc, struct rxq_entry_t *rqe)
{
if (wilc->quit)
- return 0;
+ return;
mutex_lock(&wilc->rxq_cs);
- if (!wilc->rxq_head) {
- rqe->next = NULL;
- wilc->rxq_head = rqe;
- wilc->rxq_tail = rqe;
- } else {
- wilc->rxq_tail->next = rqe;
- rqe->next = NULL;
- wilc->rxq_tail = rqe;
- }
- wilc->rxq_entries += 1;
+ list_add_tail(&rqe->list, &wilc->rxq_head.list);
mutex_unlock(&wilc->rxq_cs);
- return wilc->rxq_entries;
}
static struct rxq_entry_t *wilc_wlan_rxq_remove(struct wilc *wilc)
{
- if (wilc->rxq_head) {
- struct rxq_entry_t *rqe;
-
- mutex_lock(&wilc->rxq_cs);
- rqe = wilc->rxq_head;
- wilc->rxq_head = wilc->rxq_head->next;
- wilc->rxq_entries -= 1;
- mutex_unlock(&wilc->rxq_cs);
- return rqe;
+ struct rxq_entry_t *rqe = NULL;
+
+ mutex_lock(&wilc->rxq_cs);
+ if (!list_empty(&wilc->rxq_head.list)) {
+ rqe = list_first_entry(&wilc->rxq_head.list, struct rxq_entry_t,
+ list);
+ list_del(&rqe->list);
}
- return NULL;
+ mutex_unlock(&wilc->rxq_cs);
+ return rqe;
}
void chip_allow_sleep(struct wilc *wilc)
@@ -471,7 +417,6 @@ void chip_allow_sleep(struct wilc *wilc)
wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0));
wilc->hif_func->hif_write_reg(wilc, 0xfa, 0);
}
-EXPORT_SYMBOL_GPL(chip_allow_sleep);
void chip_wakeup(struct wilc *wilc)
{
@@ -488,7 +433,7 @@ void chip_wakeup(struct wilc *wilc)
wilc_get_chipid(wilc, true);
} while (wilc_get_chipid(wilc, true) == 0);
} while (wilc_get_chipid(wilc, true) == 0);
- } else if ((wilc->io_type & 0x1) == HIF_SDIO) {
+ } else if ((wilc->io_type & 0x1) == HIF_SDIO) {
wilc->hif_func->hif_write_reg(wilc, 0xfa, 1);
udelay(200);
wilc->hif_func->hif_read_reg(wilc, 0xf0, &reg);
@@ -526,7 +471,6 @@ void chip_wakeup(struct wilc *wilc)
}
chip_ps_state = CHIP_WAKEDUP;
}
-EXPORT_SYMBOL_GPL(chip_wakeup);
void wilc_chip_sleep_manually(struct wilc *wilc)
{
@@ -540,7 +484,6 @@ void wilc_chip_sleep_manually(struct wilc *wilc)
chip_ps_state = CHIP_SLEEPING_MANUAL;
release_bus(wilc, RELEASE_ONLY);
}
-EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually);
void host_wakeup_notify(struct wilc *wilc)
{
@@ -548,7 +491,6 @@ void host_wakeup_notify(struct wilc *wilc)
wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1);
release_bus(wilc, RELEASE_ONLY);
}
-EXPORT_SYMBOL_GPL(host_wakeup_notify);
void host_sleep_notify(struct wilc *wilc)
{
@@ -556,14 +498,12 @@ void host_sleep_notify(struct wilc *wilc)
wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1);
release_bus(wilc, RELEASE_ONLY);
}
-EXPORT_SYMBOL_GPL(host_sleep_notify);
int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
{
int i, entries = 0;
u32 sum;
u32 reg;
- u8 *txb;
u32 offset = 0;
int vmm_sz = 0;
struct txq_entry_t *tqe;
@@ -571,15 +511,10 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
int counter;
int timeout;
u32 vmm_table[WILC_VMM_TBL_SIZE];
- struct wilc_vif *vif;
- struct wilc *wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
const struct wilc_hif_func *func;
-
- vif = netdev_priv(dev);
- wilc = vif->wilc;
-
- txb = wilc->tx_buffer;
- wilc->txq_exit = 0;
+ u8 *txb = wilc->tx_buffer;
if (wilc->quit)
goto out;
@@ -611,7 +546,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
vmm_table[i] = vmm_sz / 4;
if (tqe->type == WILC_CFG_PKT)
vmm_table[i] |= BIT(10);
- vmm_table[i] = cpu_to_le32(vmm_table[i]);
+ cpu_to_le32s(&vmm_table[i]);
i++;
sum += vmm_sz;
@@ -714,7 +649,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if (vmm_table[i] == 0)
break;
- vmm_table[i] = cpu_to_le32(vmm_table[i]);
+ le32_to_cpus(&vmm_table[i]);
vmm_sz = (vmm_table[i] & 0x3ff);
vmm_sz *= 4;
header = (tqe->type << 31) |
@@ -725,7 +660,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
else
header &= ~BIT(30);
- header = cpu_to_le32(header);
+ cpu_to_le32s(&header);
memcpy(&txb[offset], &header, 4);
if (tqe->type == WILC_CFG_PKT) {
buffer_offset = ETH_CONFIG_PKT_HDR_OFFSET;
@@ -765,7 +700,6 @@ out_release_bus:
out:
mutex_unlock(&wilc->txq_add_to_head_cs);
- wilc->txq_exit = 1;
*txq_count = wilc->txq_entries;
return ret;
}
@@ -781,7 +715,7 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size)
do {
buff_ptr = buffer + offset;
memcpy(&header, buff_ptr, 4);
- header = cpu_to_le32(header);
+ le32_to_cpus(&header);
is_cfg_packet = (header >> 31) & 0x1;
pkt_offset = (header >> 22) & 0x1ff;
@@ -832,8 +766,6 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
u8 *buffer;
struct rxq_entry_t *rqe;
- wilc->rxq_exit = 0;
-
do {
if (wilc->quit) {
complete(&wilc->cfg_event);
@@ -849,8 +781,6 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
kfree(rqe);
} while (1);
-
- wilc->rxq_exit = 1;
}
static void wilc_unknown_isr_ext(struct wilc *wilc)
@@ -869,7 +799,7 @@ static void wilc_pllupdate_isr_ext(struct wilc *wilc, u32 int_stats)
else
mdelay(WILC_PLL_TO_SPI);
- while (!(ISWILC1000(wilc_get_chipid(wilc, true)) && --trials))
+ while (!(is_wilc1000(wilc_get_chipid(wilc, true)) && --trials))
mdelay(1);
}
@@ -895,31 +825,28 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
retries++;
}
- if (size > 0) {
- if (LINUX_RX_SIZE - offset < size)
- offset = 0;
+ if (size <= 0)
+ return;
- if (wilc->rx_buffer)
- buffer = &wilc->rx_buffer[offset];
- else
- goto _end_;
-
- wilc->hif_func->hif_clear_int_ext(wilc,
- DATA_INT_CLR | ENABLE_RX_VMM);
- ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size);
-
-_end_:
- if (ret) {
- offset += size;
- wilc->rx_buffer_offset = offset;
- rqe = kmalloc(sizeof(*rqe), GFP_KERNEL);
- if (rqe) {
- rqe->buffer = buffer;
- rqe->buffer_size = size;
- wilc_wlan_rxq_add(wilc, rqe);
- }
- }
- }
+ if (LINUX_RX_SIZE - offset < size)
+ offset = 0;
+
+ buffer = &wilc->rx_buffer[offset];
+
+ wilc->hif_func->hif_clear_int_ext(wilc, DATA_INT_CLR | ENABLE_RX_VMM);
+ ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size);
+ if (!ret)
+ return;
+
+ offset += size;
+ wilc->rx_buffer_offset = offset;
+ rqe = kmalloc(sizeof(*rqe), GFP_KERNEL);
+ if (!rqe)
+ return;
+
+ rqe->buffer = buffer;
+ rqe->buffer_size = size;
+ wilc_wlan_rxq_add(wilc, rqe);
wilc_wlan_handle_rxq(wilc);
}
@@ -944,7 +871,6 @@ void wilc_handle_isr(struct wilc *wilc)
release_bus(wilc, RELEASE_ALLOW_SLEEP);
}
-EXPORT_SYMBOL_GPL(wilc_handle_isr);
int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
u32 buffer_size)
@@ -964,8 +890,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
do {
memcpy(&addr, &buffer[offset], 4);
memcpy(&size, &buffer[offset + 4], 4);
- addr = cpu_to_le32(addr);
- size = cpu_to_le32(size);
+ le32_to_cpus(&addr);
+ le32_to_cpus(&size);
acquire_bus(wilc, ACQUIRE_ONLY);
offset += 8;
while (((int)size) && (offset < buffer_size)) {
@@ -1139,11 +1065,8 @@ void wilc_wlan_cleanup(struct net_device *dev)
struct rxq_entry_t *rqe;
u32 reg = 0;
int ret;
- struct wilc_vif *vif;
- struct wilc *wilc;
-
- vif = netdev_priv(dev);
- wilc = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
wilc->quit = 1;
do {
@@ -1232,10 +1155,7 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
if (!commit)
return ret_size;
- netdev_dbg(vif->ndev,
- "[WILC]PACKET Commit with sequence number %d\n",
- wilc->cfg_seq_no);
- netdev_dbg(vif->ndev, "Processing cfg_set()\n");
+ netdev_dbg(vif->ndev, "%s: seqno[%d]\n", __func__, wilc->cfg_seq_no);
wilc->cfg_frame_in_use = 1;
if (wilc_wlan_cfg_commit(vif, WILC_CFG_SET, drv_handler))
@@ -1243,7 +1163,7 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
if (!wait_for_completion_timeout(&wilc->cfg_event,
msecs_to_jiffies(CFG_PKTS_TIMEOUT))) {
- netdev_dbg(vif->ndev, "Set Timed Out\n");
+ netdev_dbg(vif->ndev, "%s: Timed Out\n", __func__);
ret_size = 0;
}
@@ -1282,7 +1202,7 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
if (!wait_for_completion_timeout(&wilc->cfg_event,
msecs_to_jiffies(CFG_PKTS_TIMEOUT))) {
- netdev_dbg(vif->ndev, "Get Timed Out\n");
+ netdev_dbg(vif->ndev, "%s: Timed Out\n", __func__);
ret_size = 0;
}
wilc->cfg_frame_in_use = 0;
@@ -1339,11 +1259,8 @@ static u32 init_chip(struct net_device *dev)
{
u32 chipid;
u32 reg, ret = 0;
- struct wilc_vif *vif;
- struct wilc *wilc;
-
- vif = netdev_priv(dev);
- wilc = vif->wilc;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wilc = vif->wilc;
acquire_bus(wilc, ACQUIRE_ONLY);
@@ -1382,7 +1299,7 @@ u32 wilc_get_chipid(struct wilc *wilc, bool update)
if (chipid == 0 || update) {
wilc->hif_func->hif_read_reg(wilc, 0x1000, &tempchipid);
wilc->hif_func->hif_read_reg(wilc, 0x13f4, &rfrevid);
- if (!ISWILC1000(tempchipid)) {
+ if (!is_wilc1000(tempchipid)) {
chipid = 0;
return chipid;
}
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index a5b9c68e1b9c..7467188dbf2f 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -1,11 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
+
#ifndef WILC_WLAN_H
#define WILC_WLAN_H
#include <linux/types.h>
-#define ISWILC1000(id) ((id & 0xfffff000) == 0x100000 ? 1 : 0)
-
/********************************************
*
* Mac eth header length
@@ -207,8 +210,7 @@
********************************************/
struct txq_entry_t {
- struct txq_entry_t *next;
- struct txq_entry_t *prev;
+ struct list_head list;
int type;
int tcp_pending_ack_idx;
u8 *buffer;
@@ -219,7 +221,7 @@ struct txq_entry_t {
};
struct rxq_entry_t {
- struct rxq_entry_t *next;
+ struct list_head list;
u8 *buffer;
int buffer_size;
};
@@ -247,18 +249,9 @@ struct wilc_hif_func {
void (*disable_interrupt)(struct wilc *nic);
};
-/********************************************
- *
- * Configuration Structure
- *
- ********************************************/
-
#define MAX_CFG_FRAME_SIZE 1468
struct wilc_cfg_frame {
- u8 ether_header[14];
- u8 ip_header[20];
- u8 udp_header[8];
u8 wid_header[8];
u8 frame[MAX_CFG_FRAME_SIZE];
};
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index c0b9b700f4d7..421576386ab4 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -1,23 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* ////////////////////////////////////////////////////////////////////////// */
-/* */
-/* Copyright (c) Atmel Corporation. All rights reserved. */
-/* */
-/* Module Name: wilc_wlan_cfg.c */
-/* */
-/* */
-/* ///////////////////////////////////////////////////////////////////////// */
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
#include "wilc_wlan_cfg.h"
#include "coreconfigurator.h"
-/********************************************
- *
- * Global Data
- *
- ********************************************/
enum cfg_cmd_type {
CFG_BYTE_CMD = 0,
CFG_HWORD_CMD = 1,
@@ -273,16 +264,17 @@ static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
*
********************************************/
+#define GET_WID_TYPE(wid) (((wid) >> 12) & 0x7)
static void wilc_wlan_parse_response_frame(u8 *info, int size)
{
- u32 wid, len = 0, i = 0;
+ u16 wid;
+ u32 len = 0, i = 0;
while (size > 0) {
i = 0;
wid = info[0] | (info[1] << 8);
- wid = cpu_to_le32(wid);
- switch ((wid >> 12) & 0x7) {
+ switch (GET_WID_TYPE(wid)) {
case WID_CHAR:
do {
if (g_cfg_byte[i].id == WID_NIL)
@@ -303,9 +295,8 @@ static void wilc_wlan_parse_response_frame(u8 *info, int size)
break;
if (g_cfg_hword[i].id == wid) {
- g_cfg_hword[i].val =
- cpu_to_le16(info[4] |
- (info[5] << 8));
+ g_cfg_hword[i].val = (info[4] |
+ (info[5] << 8));
break;
}
i++;
@@ -319,11 +310,10 @@ static void wilc_wlan_parse_response_frame(u8 *info, int size)
break;
if (g_cfg_word[i].id == wid) {
- g_cfg_word[i].val =
- cpu_to_le32(info[4] |
- (info[5] << 8) |
- (info[6] << 16) |
- (info[7] << 24));
+ g_cfg_word[i].val = (info[4] |
+ (info[5] << 8) |
+ (info[6] << 16) |
+ (info[7] << 24));
break;
}
i++;
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.h b/drivers/staging/wilc1000/wilc_wlan_cfg.h
index 08092a551840..0c649d1f6f11 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.h
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.h
@@ -1,12 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* ////////////////////////////////////////////////////////////////////////// */
-/* */
-/* Copyright (c) Atmel Corporation. All rights reserved. */
-/* */
-/* Module Name: wilc_wlan_cfg.h */
-/* */
-/* */
-/* ///////////////////////////////////////////////////////////////////////// */
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
#ifndef WILC_WLAN_CFG_H
#define WILC_WLAN_CFG_H
@@ -22,12 +18,12 @@ struct wilc_cfg_hword {
};
struct wilc_cfg_word {
- u32 id;
+ u16 id;
u32 val;
};
struct wilc_cfg_str {
- u32 id;
+ u16 id;
u8 *str;
};
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index e4a7bf5df65b..00d13b153f80 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -1,12 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* ///////////////////////////////////////////////////////////////////////// */
-/* */
-/* Copyright (c) Atmel Corporation. All rights reserved. */
-/* */
-/* Module Name: wilc_wlan_if.h */
-/* */
-/* */
-/* ///////////////////////////////////////////////////////////////////////// */
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
#ifndef WILC_WLAN_IF_H
#define WILC_WLAN_IF_H
@@ -71,16 +67,6 @@ typedef void (*wilc_tx_complete_func_t)(void *, int);
#define MAX_SSID_LEN 33
#define MAX_RATES_SUPPORTED 12
-enum {
- SUPP_RATES_IE = 1,
- EXT_SUPP_RATES_IE = 50,
- HT_CAPABILITY_IE = 45,
- RSN_IE = 48,
- WPA_IE = 221,
- WMM_IE = 221,
- P2P_IE = 221,
-};
-
enum bss_types {
INFRASTRUCTURE = 0,
INDEPENDENT,
@@ -88,22 +74,6 @@ enum bss_types {
};
enum {
- RATE_AUTO = 0,
- RATE_1MB = 1,
- RATE_2MB = 2,
- RATE_5MB = 5,
- RATE_6MB = 6,
- RATE_9MB = 9,
- RATE_11MB = 11,
- RATE_12MB = 12,
- RATE_18MB = 18,
- RATE_24MB = 24,
- RATE_26MB = 36,
- RATE_48MB = 48,
- RATE_54MB = 54
-};
-
-enum {
B_ONLY_MODE = 0, /* 1, 2 M, otherwise 5, 11 M */
G_ONLY_MODE, /* 6,12,24 otherwise 9,18,36,48,54 */
G_MIXED_11B_1_MODE, /* 1,2,5.5,11 otherwise all on */
@@ -157,14 +127,14 @@ enum {
WPA2_AES_TKIP = 0x71, /* Aes or Tkip */
};
-enum AUTHTYPE {
+enum authtype {
OPEN_SYSTEM = 1,
SHARED_KEY = 2,
ANY = 3,
IEEE8021 = 5
};
-enum SITESURVEY {
+enum site_survey {
SITE_SURVEY_1CH = 0,
SITE_SURVEY_ALL_CH = 1,
SITE_SURVEY_OFF = 2
@@ -176,12 +146,6 @@ enum {
};
enum {
- DONT_RESET = 0,
- DO_RESET = 1,
- NO_REQUEST = 2,
-};
-
-enum {
REKEY_DISABLE = 1,
REKEY_TIME_BASE,
REKEY_PKT_BASE,
@@ -195,17 +159,6 @@ enum {
};
enum {
- PRI_HIGH_RSSI = 0x00,
- PRI_LOW_RSSI = 0x04,
- PRI_DETECT = 0x08
-};
-
-enum {
- CH_FILTER_OFF = 0x00,
- CH_FILTER_ON = 0x10
-};
-
-enum {
AUTO_PROT = 0, /* Auto */
NO_PROT, /* Do not use any protection */
ERP_PROT, /* Protect all ERP frame exchanges */
@@ -244,15 +197,6 @@ enum {
MIMO_MODE = 3, /* power save disable */
};
-enum {
- DISABLE_SELF_CTS,
- ENABLE_SELF_CTS,
- DISABLE_TX_ABORT,
- ENABLE_TX_ABORT,
- HW_TRIGGER_ABORT,
- SW_TRIGGER_ABORT,
-};
-
enum wid_type {
WID_CHAR = 0,
WID_SHORT = 1,
@@ -887,4 +831,6 @@ struct wilc;
int wilc_wlan_init(struct net_device *dev);
u32 wilc_get_chipid(struct wilc *wilc, bool update);
+int wilc_debugfs_init(void);
+void wilc_debugfs_remove(void);
#endif
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 42912257e2b9..d4cf09b11e33 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -148,40 +148,26 @@ static int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
struct wlandevice *wlandev = dev->ml_priv;
u32 did;
- int err = 0;
- int result = 0;
-
if (key_index >= NUM_WEPKEYS)
return -EINVAL;
- switch (params->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- result = prism2_domibset_uint32(wlandev,
- DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
- key_index);
- if (result)
- goto exit;
-
- /* send key to driver */
- did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(key_index + 1);
-
- result = prism2_domibset_pstr32(wlandev, did,
- params->key_len, params->key);
- if (result)
- goto exit;
- break;
-
- default:
+ if (params->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ params->cipher != WLAN_CIPHER_SUITE_WEP104) {
pr_debug("Unsupported cipher suite\n");
- result = 1;
+ return -EFAULT;
}
-exit:
- if (result)
- err = -EFAULT;
+ if (prism2_domibset_uint32(wlandev,
+ DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
+ key_index))
+ return -EFAULT;
- return err;
+ /* send key to driver */
+ did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(key_index + 1);
+
+ if (prism2_domibset_pstr32(wlandev, did, params->key_len, params->key))
+ return -EFAULT;
+ return 0;
}
static int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
@@ -282,9 +268,9 @@ static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
if (result == 0) {
sinfo->txrate.legacy = quality.txrate.data;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
sinfo->signal = quality.level.data;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
return result;
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 67a944c0d690..992ebaa1071f 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1176,7 +1176,7 @@ struct hfa384x_usbctlx {
enum ctlx_state state; /* Tracks running state */
struct completion done;
- volatile int reapable; /* Food for the reaper task */
+ int reapable; /* Food for the reaper task */
ctlx_cmdcb_t cmdcb; /* Async command callback */
ctlx_usercb_t usercb; /* Async user callback, */
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 33e97ffbb436..16f7dd266e3b 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -1290,7 +1290,7 @@ cleanup:
* cmdcb command-specific callback
* usercb user callback for async calls, NULL for DOWAIT calls
* usercb_data user supplied data pointer for async calls, NULL
- * for DOASYNC calls
+ * for DOWAIT calls
*
* Returns:
* 0 success
diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h
index 26b178721414..6564810fd026 100644
--- a/drivers/staging/wlan-ng/p80211hdr.h
+++ b/drivers/staging/wlan-ng/p80211hdr.h
@@ -174,15 +174,25 @@ union p80211_hdr {
/* Frame and header length macros */
-#define WLAN_CTL_FRAMELEN(fstype) (\
- (fstype) == WLAN_FSTYPE_BLOCKACKREQ ? 24 : \
- (fstype) == WLAN_FSTYPE_BLOCKACK ? 152 : \
- (fstype) == WLAN_FSTYPE_PSPOLL ? 20 : \
- (fstype) == WLAN_FSTYPE_RTS ? 20 : \
- (fstype) == WLAN_FSTYPE_CTS ? 14 : \
- (fstype) == WLAN_FSTYPE_ACK ? 14 : \
- (fstype) == WLAN_FSTYPE_CFEND ? 20 : \
- (fstype) == WLAN_FSTYPE_CFENDCFACK ? 20 : 4)
+static inline u16 wlan_ctl_framelen(u16 fstype)
+{
+ switch (fstype) {
+ case WLAN_FSTYPE_BLOCKACKREQ:
+ return 24;
+ case WLAN_FSTYPE_BLOCKACK:
+ return 152;
+ case WLAN_FSTYPE_PSPOLL:
+ case WLAN_FSTYPE_RTS:
+ case WLAN_FSTYPE_CFEND:
+ case WLAN_FSTYPE_CFENDCFACK:
+ return 20;
+ case WLAN_FSTYPE_CTS:
+ case WLAN_FSTYPE_ACK:
+ return 14;
+ default:
+ return 4;
+ }
+}
#define WLAN_FCS_LEN 4
@@ -201,7 +211,7 @@ static inline u16 p80211_headerlen(u16 fctl)
hdrlen += ETH_ALEN;
break;
case WLAN_FTYPE_CTL:
- hdrlen = WLAN_CTL_FRAMELEN(WLAN_GET_FC_FSTYPE(fctl)) -
+ hdrlen = wlan_ctl_framelen(WLAN_GET_FC_FSTYPE(fctl)) -
WLAN_FCS_LEN;
break;
default:
diff --git a/drivers/staging/wlan-ng/p80211types.h b/drivers/staging/wlan-ng/p80211types.h
index 7c37d56dd9b7..ac254542fde6 100644
--- a/drivers/staging/wlan-ng/p80211types.h
+++ b/drivers/staging/wlan-ng/p80211types.h
@@ -194,20 +194,6 @@
P80211DID_LSB_ACCESS)
/*----------------------------------------------------------------*/
-/* The following structure types are used for the representation */
-/* of ENUMint type metadata. */
-
-struct p80211enumpair {
- u32 val;
- char *name;
-};
-
-struct p80211enum {
- int nitems;
- struct p80211enumpair *list;
-};
-
-/*----------------------------------------------------------------*/
/* The following structure types are used to store data items in */
/* messages. */
@@ -330,17 +316,4 @@ struct p80211item_unk4096 {
u8 data[4096];
} __packed;
-struct catlistitem;
-
-/*----------------------------------------------------------------*/
-/* The following structure type is used to represent all of the */
-/* metadata items. Some components may choose to use more, */
-/* less or different metadata items. */
-
-typedef void (*p80211_totext_t) (struct catlistitem *, u32 did, u8 *itembuf,
- char *textbuf);
-typedef void (*p80211_fromtext_t) (struct catlistitem *, u32 did, u8 *itembuf,
- char *textbuf);
-typedef u32(*p80211_valid_t) (struct catlistitem *, u32 did, u8 *itembuf);
-
#endif /* _P80211TYPES_H */
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 5860d0d65841..4fb91294570d 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -1189,9 +1189,10 @@ static int validate_identity(void)
/* PRI compat range */
if ((s3info[i].info.compat.role == 1) &&
(s3info[i].info.compat.id == 3)) {
- if ((s3info[i].info.compat.bottom > priid.top)
- || (s3info[i].info.compat.top <
- priid.bottom)) {
+ if ((s3info[i].info.compat.bottom >
+ priid.top) ||
+ (s3info[i].info.compat.top <
+ priid.bottom)) {
result = 3;
}
}
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index d7de9e9c47a2..7350fe5d96a3 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -85,10 +85,21 @@
#include "prism2mgmt.h"
/* Converts 802.11 format rate specifications to prism2 */
-#define p80211rate_to_p2bit(n) ((((n) & ~BIT(7)) == 2) ? BIT(0) : \
- (((n) & ~BIT(7)) == 4) ? BIT(1) : \
- (((n) & ~BIT(7)) == 11) ? BIT(2) : \
- (((n) & ~BIT(7)) == 22) ? BIT(3) : 0)
+static inline u16 p80211rate_to_p2bit(u32 rate)
+{
+ switch (rate & ~BIT(7)) {
+ case 2:
+ return BIT(0);
+ case 4:
+ return BIT(1);
+ case 11:
+ return BIT(2);
+ case 22:
+ return BIT(3);
+ default:
+ return 0;
+ }
+}
/*----------------------------------------------------------------
* prism2mgmt_scan
@@ -403,7 +414,7 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
goto exit;
}
- item = &(hw->scanresults->info.hscanresult.result[req->bssindex.data]);
+ item = &hw->scanresults->info.hscanresult.result[req->bssindex.data];
/* signal and noise */
req->signal.status = P80211ENUM_msgitem_status_data_ok;
req->noise.status = P80211ENUM_msgitem_status_data_ok;
@@ -428,7 +439,7 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
#define REQBASICRATE(N) \
do { \
- if ((count >= N) && DOT11_RATE5_ISBASIC_GET( \
+ if ((count >= (N)) && DOT11_RATE5_ISBASIC_GET( \
item->supprates[(N) - 1])) { \
req->basicrate ## N .data = item->supprates[(N) - 1]; \
req->basicrate ## N .status = \
@@ -447,7 +458,7 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
#define REQSUPPRATE(N) \
do { \
- if (count >= N) { \
+ if (count >= (N)) { \
req->supprate ## N .data = item->supprates[(N) - 1]; \
req->supprate ## N .status = \
P80211ENUM_msgitem_status_data_ok; \
@@ -1064,7 +1075,7 @@ int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp)
/* Set the ssid */
memset(bytebuf, 0, 256);
- pstr = (struct p80211pstrd *)&(msg->ssid.data);
+ pstr = (struct p80211pstrd *)&msg->ssid.data;
prism2mgmt_pstr2bytestr(p2bytestr, pstr);
result = hfa384x_drvr_setconfig(hw, HFA384x_RID_CNFDESIREDSSID,
bytebuf,
@@ -1188,7 +1199,7 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
/* Save macport 0 state */
result = hfa384x_drvr_getconfig16(hw,
HFA384x_RID_CNFPORTTYPE,
- &(hw->presniff_port_type));
+ &hw->presniff_port_type);
if (result) {
netdev_dbg
(wlandev->netdev,
@@ -1199,7 +1210,7 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
/* Save the wepflags state */
result = hfa384x_drvr_getconfig16(hw,
HFA384x_RID_CNFWEPFLAGS,
- &(hw->presniff_wepflags));
+ &hw->presniff_wepflags);
if (result) {
netdev_dbg
(wlandev->netdev,
@@ -1258,9 +1269,8 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
goto failed;
}
if ((msg->keepwepflags.status ==
- P80211ENUM_msgitem_status_data_ok)
- && (msg->keepwepflags.data !=
- P80211ENUM_truth_true)) {
+ P80211ENUM_msgitem_status_data_ok) &&
+ (msg->keepwepflags.data != P80211ENUM_truth_true)) {
/* Set the wepflags for no decryption */
word = HFA384x_WEPFLAGS_DISABLE_TXCRYPT |
HFA384x_WEPFLAGS_DISABLE_RXCRYPT;
@@ -1280,8 +1290,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
}
/* Do we want to strip the FCS in monitor mode? */
- if ((msg->stripfcs.status == P80211ENUM_msgitem_status_data_ok)
- && (msg->stripfcs.data == P80211ENUM_truth_true)) {
+ if ((msg->stripfcs.status ==
+ P80211ENUM_msgitem_status_data_ok) &&
+ (msg->stripfcs.data == P80211ENUM_truth_true)) {
hw->sniff_fcs = 0;
} else {
hw->sniff_fcs = 1;
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index edad299ff5ad..e88baf715cec 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -87,10 +87,10 @@ struct mibrec {
u16 parm2;
u16 parm3;
int (*func)(struct mibrec *mib,
- int isget,
- struct wlandevice *wlandev,
- struct hfa384x *hw,
- struct p80211msg_dot11req_mibset *msg, void *data);
+ int isget,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
+ struct p80211msg_dot11req_mibset *msg, void *data);
};
static int prism2mib_bytearea2pstr(struct mibrec *mib,
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 4c44d7bed01a..cb6f32ce7de8 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -1,10 +1,10 @@
menuconfig TARGET_CORE
tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
- depends on SCSI && BLOCK
+ depends on BLOCK
select CONFIGFS_FS
select CRC_T10DIF
- select BLK_SCSI_REQUEST # only for scsi_command_size_tbl..
+ select BLK_SCSI_REQUEST
select SGL_ALLOC
default n
help
@@ -29,6 +29,7 @@ config TCM_FILEIO
config TCM_PSCSI
tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
+ depends on SCSI
help
Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
passthrough access to Linux/SCSI device
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index 514986b57c2d..25eb3891e34b 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -652,6 +652,7 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
struct iscsi_param *param;
u32 mrdsl, mbl;
u32 max_npdu, max_iso_npdu;
+ u32 max_iso_payload;
if (conn->login->leading_connection) {
param = iscsi_find_param_from_key(MAXBURSTLENGTH,
@@ -670,8 +671,10 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
mrdsl = conn_ops->MaxRecvDataSegmentLength;
max_npdu = mbl / mrdsl;
- max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD /
- (ISCSI_HDR_LEN + mrdsl +
+ max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
+
+ max_iso_npdu = max_iso_payload /
+ (ISCSI_HDR_LEN + mrdsl +
cxgbit_digest_len[csk->submode]);
csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
@@ -741,6 +744,9 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
+ if (cxgbit_set_digest(csk))
+ return -1;
+
if (conn->login->leading_connection) {
param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
conn->param_list);
@@ -764,7 +770,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
if (is_t5(cdev->lldi.adapter_type))
goto enable_ddp;
else
- goto enable_digest;
+ return 0;
}
if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
@@ -781,10 +787,6 @@ enable_ddp:
}
}
-enable_digest:
- if (cxgbit_set_digest(csk))
- return -1;
-
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0ebc4818e132..95d0a22b2ad6 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1090,10 +1090,8 @@ static struct configfs_attribute *lio_target_tpg_attrs[] = {
/* Start items for lio_target_tiqn_cit */
-static struct se_portal_group *lio_target_tiqn_addtpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *lio_target_tiqn_addtpg(struct se_wwn *wwn,
+ const char *name)
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 99501785cdc1..923b1a9fc3dc 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -369,7 +369,7 @@ static int iscsi_login_zero_tsih_s1(
return -ENOMEM;
}
- sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
+ sess->se_sess = transport_alloc_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 4b34f71547c6..101d62105c93 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -636,8 +636,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
none = strstr(buf1, NONE);
if (none)
goto out;
- strncat(buf1, ",", strlen(","));
- strncat(buf1, NONE, strlen(NONE));
+ strlcat(buf1, "," NONE, sizeof(buf1));
if (iscsi_update_param_value(param, buf1) < 0)
return -EINVAL;
}
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 4435bf374d2d..49be1e41290c 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -17,7 +17,7 @@
******************************************************************************/
#include <linux/list.h>
-#include <linux/percpu_ida.h>
+#include <linux/sched/signal.h>
#include <net/ipv6.h> /* ipv6_addr_equal() */
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
@@ -147,6 +147,30 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
spin_unlock_bh(&cmd->r2t_lock);
}
+static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup)
+{
+ int tag = -1;
+ DEFINE_WAIT(wait);
+ struct sbq_wait_state *ws;
+
+ if (state == TASK_RUNNING)
+ return tag;
+
+ ws = &se_sess->sess_tag_pool.ws[0];
+ for (;;) {
+ prepare_to_wait_exclusive(&ws->wait, &wait, state);
+ if (signal_pending_state(state, current))
+ break;
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, cpup);
+ if (tag >= 0)
+ break;
+ schedule();
+ }
+
+ finish_wait(&ws->wait, &wait);
+ return tag;
+}
+
/*
* May be called from software interrupt (timer) context for allocating
* iSCSI NopINs.
@@ -155,9 +179,11 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
{
struct iscsi_cmd *cmd;
struct se_session *se_sess = conn->sess->se_sess;
- int size, tag;
+ int size, tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+ if (tag < 0)
+ tag = iscsit_wait_for_tag(se_sess, state, &cpu);
if (tag < 0)
return NULL;
@@ -166,6 +192,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
memset(cmd, 0, size);
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->conn = conn;
cmd->data_direction = DMA_NONE;
INIT_LIST_HEAD(&cmd->i_conn_node);
@@ -711,7 +738,7 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
kfree(cmd->iov_data);
kfree(cmd->text_in_ptr);
- percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(sess->se_sess, se_cmd);
}
EXPORT_SYMBOL(iscsit_release_cmd);
@@ -1026,26 +1053,8 @@ void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
void iscsit_start_nopin_timer(struct iscsi_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
- struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
- /*
- * NOPIN timeout is disabled..
- */
- if (!na->nopin_timeout)
- return;
-
spin_lock_bh(&conn->nopin_timer_lock);
- if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
- spin_unlock_bh(&conn->nopin_timer_lock);
- return;
- }
-
- conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
- conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
- mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
-
- pr_debug("Started NOPIN Timer on CID: %d at %u second"
- " interval\n", conn->cid, na->nopin_timeout);
+ __iscsit_start_nopin_timer(conn);
spin_unlock_bh(&conn->nopin_timer_lock);
}
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
index abe8ecbcdf06..158ee9d522f7 100644
--- a/drivers/target/loopback/Kconfig
+++ b/drivers/target/loopback/Kconfig
@@ -1,5 +1,6 @@
config LOOPBACK_TARGET
tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module"
+ depends on SCSI
help
Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
fabric loopback module.
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 60d5b918c4ac..bc8918f382e4 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -239,10 +239,7 @@ out:
return ret;
release:
- if (se_cmd)
- transport_generic_free_cmd(se_cmd, 0);
- else
- kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
goto out;
}
@@ -768,7 +765,7 @@ static int tcm_loop_make_nexus(
if (!tl_nexus)
return -ENOMEM;
- tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
+ tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0,
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
name, tl_nexus, tcm_loop_alloc_sess_cb);
if (IS_ERR(tl_nexus->se_sess)) {
@@ -808,7 +805,7 @@ static int tcm_loop_drop_nexus(
/*
* Release the SCSI I_T Nexus to the emulated Target Port
*/
- transport_deregister_session(tl_nexus->se_sess);
+ target_remove_session(se_sess);
tpg->tl_nexus = NULL;
kfree(tl_nexus);
return 0;
@@ -983,10 +980,8 @@ static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
/* Start items for tcm_loop_naa_cit */
-static struct se_portal_group *tcm_loop_make_naa_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index fb1003921d85..3d10189ecedc 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -209,7 +209,7 @@ static struct sbp_session *sbp_session_create(
INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
sess->guid = guid;
- sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
+ sess->se_sess = target_setup_session(&tpg->se_tpg, 128,
sizeof(struct sbp_target_request),
TARGET_PROT_NORMAL, guid_str,
sess, NULL);
@@ -235,8 +235,7 @@ static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
if (cancel_work)
cancel_delayed_work_sync(&sess->maint_work);
- transport_deregister_session_configfs(sess->se_sess);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(sess->se_sess);
if (sess->card)
fw_card_put(sess->card);
@@ -926,15 +925,16 @@ static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
{
struct se_session *se_sess = sess->se_sess;
struct sbp_target_request *req;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return ERR_PTR(-ENOMEM);
req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
memset(req, 0, sizeof(*req));
req->se_cmd.map_tag = tag;
+ req->se_cmd.map_cpu = cpu;
req->se_cmd.tag = next_orb;
return req;
@@ -1460,7 +1460,7 @@ static void sbp_free_request(struct sbp_target_request *req)
kfree(req->pg_tbl);
kfree(req->cmd_buf);
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_sess, se_cmd);
}
static void sbp_mgt_agent_process(struct work_struct *work)
@@ -2005,10 +2005,8 @@ static void sbp_pre_unlink_lun(
pr_err("unlink LUN: failed to update unit directory\n");
}
-static struct se_portal_group *sbp_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct sbp_tport *tport =
container_of(wwn, struct sbp_tport, tport_wwn);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 5ccef7d597fa..f6b1549f4142 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -263,8 +263,8 @@ static struct config_group *target_core_register_fabric(
&tf->tf_discovery_cit);
configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
- pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
- " %s\n", tf->tf_group.cg_item.ci_name);
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
+ config_item_name(&tf->tf_group.cg_item));
return &tf->tf_group;
}
@@ -810,7 +810,7 @@ static ssize_t pi_prot_type_store(struct config_item *item,
dev->transport->name);
return -ENOSYS;
}
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("DIF protection requires device to be configured\n");
return -ENODEV;
}
@@ -859,7 +859,7 @@ static ssize_t pi_prot_format_store(struct config_item *item,
dev->transport->name);
return -ENOSYS;
}
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("DIF protection format requires device to be configured\n");
return -ENODEV;
}
@@ -1948,7 +1948,7 @@ static ssize_t target_dev_enable_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
- return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
+ return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
}
static ssize_t target_dev_enable_store(struct config_item *item,
@@ -2473,7 +2473,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("Unable to set alua_access_state while device is"
" not configured\n");
return -ENODEV;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index e27db4d45a9d..47b5ef153135 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -336,7 +336,6 @@ int core_enable_device_list_for_node(
return -ENOMEM;
}
- atomic_set(&new->ua_count, 0);
spin_lock_init(&new->ua_lock);
INIT_LIST_HEAD(&new->ua_list);
INIT_LIST_HEAD(&new->lun_link);
@@ -879,39 +878,21 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
}
EXPORT_SYMBOL(target_to_linux_sector);
-/**
- * target_find_device - find a se_device by its dev_index
- * @id: dev_index
- * @do_depend: true if caller needs target_depend_item to be done
- *
- * If do_depend is true, the caller must do a target_undepend_item
- * when finished using the device.
- *
- * If do_depend is false, the caller must be called in a configfs
- * callback or during removal.
- */
-struct se_device *target_find_device(int id, bool do_depend)
-{
- struct se_device *dev;
-
- mutex_lock(&device_mutex);
- dev = idr_find(&devices_idr, id);
- if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item))
- dev = NULL;
- mutex_unlock(&device_mutex);
- return dev;
-}
-EXPORT_SYMBOL(target_find_device);
-
struct devices_idr_iter {
+ struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
static int target_devices_idr_iter(int id, void *p, void *data)
+ __must_hold(&device_mutex)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
+ int ret;
+
+ config_item_put(iter->prev_item);
+ iter->prev_item = NULL;
/*
* We add the device early to the idr, so it can be used
@@ -919,10 +900,18 @@ static int target_devices_idr_iter(int id, void *p, void *data)
* to allow other callers to access partially setup devices,
* so we skip them here.
*/
- if (!(dev->dev_flags & DF_CONFIGURED))
+ if (!target_dev_configured(dev))
+ return 0;
+
+ iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
+ if (!iter->prev_item)
return 0;
+ mutex_unlock(&device_mutex);
+
+ ret = iter->fn(dev, iter->data);
- return iter->fn(dev, iter->data);
+ mutex_lock(&device_mutex);
+ return ret;
}
/**
@@ -936,15 +925,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
void *data)
{
- struct devices_idr_iter iter;
+ struct devices_idr_iter iter = { .fn = fn, .data = data };
int ret;
- iter.fn = fn;
- iter.data = data;
-
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
+ config_item_put(iter.prev_item);
return ret;
}
@@ -953,7 +940,7 @@ int target_configure_device(struct se_device *dev)
struct se_hba *hba = dev->se_hba;
int ret, id;
- if (dev->dev_flags & DF_CONFIGURED) {
+ if (target_dev_configured(dev)) {
pr_err("se_dev->se_dev_ptr already set for storage"
" object\n");
return -EEXIST;
@@ -1058,7 +1045,7 @@ void target_free_device(struct se_device *dev)
WARN_ON(!list_empty(&dev->dev_sep_list));
- if (dev->dev_flags & DF_CONFIGURED) {
+ if (target_dev_configured(dev)) {
destroy_workqueue(dev->tmr_wq);
dev->transport->destroy_device(dev);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index e1416b007aa4..aa2f4f632ebe 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -34,6 +34,7 @@
#include <linux/configfs.h>
#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
@@ -642,7 +643,7 @@ static int target_fabric_port_link(
}
dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("se_device not configured yet, cannot port link\n");
return -ENODEV;
}
@@ -841,7 +842,7 @@ static struct config_group *target_fabric_make_tpg(
return ERR_PTR(-ENOSYS);
}
- se_tpg = tf->tf_ops->fabric_make_tpg(wwn, group, name);
+ se_tpg = tf->tf_ops->fabric_make_tpg(wwn, name);
if (!se_tpg || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index dead30b1d32c..0c6635587930 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -138,7 +138,7 @@ int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
-int transport_cmd_finish_abort(struct se_cmd *, int);
+int transport_cmd_finish_abort(struct se_cmd *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 01ac306131c1..10db5656fd5d 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -3727,11 +3727,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
* Check for overflow of 8byte PRI READ_KEYS payload and
* next reservation key list descriptor.
*/
- if ((add_len + 8) > (cmd->data_length - 8))
- break;
-
- put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
- off += 8;
+ if (off + 8 <= cmd->data_length) {
+ put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
+ off += 8;
+ }
+ /*
+ * SPC5r17: 6.16.2 READ KEYS service action
+ * The ADDITIONAL LENGTH field indicates the number of bytes in
+ * the Reservation key list. The contents of the ADDITIONAL
+ * LENGTH field are not altered based on the allocation length
+ */
add_len += 8;
}
spin_unlock(&dev->t10_pr.registration_lock);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index b054682e974f..ebac2b49b9c6 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -978,9 +978,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
}
case COMPARE_AND_WRITE:
if (!dev->dev_attrib.emulate_caw) {
- pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject"
- " COMPARE_AND_WRITE\n", dev->se_hba->backend->ops->name,
- dev->dev_group.cg_item.ci_name, dev->t10_wwn.unit_serial);
+ pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject COMPARE_AND_WRITE\n",
+ dev->se_hba->backend->ops->name,
+ config_item_name(&dev->dev_group.cg_item),
+ dev->t10_wwn.unit_serial);
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
sectors = cdb[13];
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 9c7bc1ca341a..6d1179a7f043 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,25 +75,6 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
kfree(tmr);
}
-static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
-{
- unsigned long flags;
- bool remove = true, send_tas;
- /*
- * TASK ABORTED status (TAS) bit support
- */
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- send_tas = (cmd->transport_state & CMD_T_TAS);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- if (send_tas) {
- remove = false;
- transport_send_task_abort(cmd);
- }
-
- return transport_cmd_finish_abort(cmd, remove);
-}
-
static int target_check_cdb_and_preempt(struct list_head *list,
struct se_cmd *cmd)
{
@@ -142,7 +123,7 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
return false;
}
}
- if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
+ if (sess->sess_tearing_down) {
pr_debug("Attempted to abort io tag: %llu already shutdown,"
" skipping\n", se_cmd->tag);
spin_unlock(&se_cmd->t_state_lock);
@@ -187,13 +168,12 @@ void core_tmr_abort_task(
if (!__target_check_io_state(se_cmd, se_sess, 0))
continue;
- list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- if (!transport_cmd_finish_abort(se_cmd, true))
+ if (!transport_cmd_finish_abort(se_cmd))
target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
@@ -259,7 +239,7 @@ static void core_tmr_drain_tmr_list(
spin_unlock(&sess->sess_cmd_lock);
continue;
}
- if (sess->sess_tearing_down || cmd->cmd_wait_set) {
+ if (sess->sess_tearing_down) {
spin_unlock(&cmd->t_state_lock);
spin_unlock(&sess->sess_cmd_lock);
continue;
@@ -291,7 +271,7 @@ static void core_tmr_drain_tmr_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- if (!transport_cmd_finish_abort(cmd, 1))
+ if (!transport_cmd_finish_abort(cmd))
target_put_sess_cmd(cmd);
}
}
@@ -380,7 +360,7 @@ static void core_tmr_drain_state_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- if (!core_tmr_handle_tas_abort(cmd, tas))
+ if (!transport_cmd_finish_abort(cmd))
target_put_sess_cmd(cmd);
}
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ee5081ba5313..86c0156e6c88 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -64,7 +64,7 @@ struct kmem_cache *t10_alua_lba_map_cache;
struct kmem_cache *t10_alua_lba_map_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
-static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
+static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev, int err, bool write_pending);
static void target_complete_ok_work(struct work_struct *work);
@@ -224,7 +224,27 @@ void transport_subsystem_check_init(void)
sub_api_initialized = 1;
}
-struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
+/**
+ * transport_init_session - initialize a session object
+ * @se_sess: Session object pointer.
+ *
+ * The caller must have zero-initialized @se_sess before calling this function.
+ */
+void transport_init_session(struct se_session *se_sess)
+{
+ INIT_LIST_HEAD(&se_sess->sess_list);
+ INIT_LIST_HEAD(&se_sess->sess_acl_list);
+ INIT_LIST_HEAD(&se_sess->sess_cmd_list);
+ spin_lock_init(&se_sess->sess_cmd_lock);
+ init_waitqueue_head(&se_sess->cmd_list_wq);
+}
+EXPORT_SYMBOL(transport_init_session);
+
+/**
+ * transport_alloc_session - allocate a session object and initialize it
+ * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+ */
+struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
@@ -234,17 +254,20 @@ struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
" se_sess_cache\n");
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&se_sess->sess_list);
- INIT_LIST_HEAD(&se_sess->sess_acl_list);
- INIT_LIST_HEAD(&se_sess->sess_cmd_list);
- INIT_LIST_HEAD(&se_sess->sess_wait_list);
- spin_lock_init(&se_sess->sess_cmd_lock);
+ transport_init_session(se_sess);
se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
}
-EXPORT_SYMBOL(transport_init_session);
+EXPORT_SYMBOL(transport_alloc_session);
+/**
+ * transport_alloc_session_tags - allocate target driver private data
+ * @se_sess: Session pointer.
+ * @tag_num: Maximum number of in-flight commands between initiator and target.
+ * @tag_size: Size in bytes of the private data a target driver associates with
+ * each command.
+ */
int transport_alloc_session_tags(struct se_session *se_sess,
unsigned int tag_num, unsigned int tag_size)
{
@@ -260,7 +283,8 @@ int transport_alloc_session_tags(struct se_session *se_sess,
}
}
- rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
+ rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
+ false, GFP_KERNEL, NUMA_NO_NODE);
if (rc < 0) {
pr_err("Unable to init se_sess->sess_tag_pool,"
" tag_num: %u\n", tag_num);
@@ -273,9 +297,16 @@ int transport_alloc_session_tags(struct se_session *se_sess,
}
EXPORT_SYMBOL(transport_alloc_session_tags);
-struct se_session *transport_init_session_tags(unsigned int tag_num,
- unsigned int tag_size,
- enum target_prot_op sup_prot_ops)
+/**
+ * transport_init_session_tags - allocate a session and target driver private data
+ * @tag_num: Maximum number of in-flight commands between initiator and target.
+ * @tag_size: Size in bytes of the private data a target driver associates with
+ * each command.
+ * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+ */
+static struct se_session *
+transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
+ enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
int rc;
@@ -291,7 +322,7 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
return ERR_PTR(-EINVAL);
}
- se_sess = transport_init_session(sup_prot_ops);
+ se_sess = transport_alloc_session(sup_prot_ops);
if (IS_ERR(se_sess))
return se_sess;
@@ -303,7 +334,6 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
return se_sess;
}
-EXPORT_SYMBOL(transport_init_session_tags);
/*
* Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
@@ -316,6 +346,7 @@ void __transport_register_session(
{
const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
unsigned char buf[PR_REG_ISID_LEN];
+ unsigned long flags;
se_sess->se_tpg = se_tpg;
se_sess->fabric_sess_ptr = fabric_sess_ptr;
@@ -352,7 +383,7 @@ void __transport_register_session(
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
- spin_lock_irq(&se_nacl->nacl_sess_lock);
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
/*
* The se_nacl->nacl_sess pointer will be set to the
* last active I_T Nexus for each struct se_node_acl.
@@ -361,7 +392,7 @@ void __transport_register_session(
list_add_tail(&se_sess->sess_acl_list,
&se_nacl->acl_sess_list);
- spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
@@ -385,7 +416,7 @@ void transport_register_session(
EXPORT_SYMBOL(transport_register_session);
struct se_session *
-target_alloc_session(struct se_portal_group *tpg,
+target_setup_session(struct se_portal_group *tpg,
unsigned int tag_num, unsigned int tag_size,
enum target_prot_op prot_op,
const char *initiatorname, void *private,
@@ -401,7 +432,7 @@ target_alloc_session(struct se_portal_group *tpg,
if (tag_num != 0)
sess = transport_init_session_tags(tag_num, tag_size, prot_op);
else
- sess = transport_init_session(prot_op);
+ sess = transport_alloc_session(prot_op);
if (IS_ERR(sess))
return sess;
@@ -427,7 +458,7 @@ target_alloc_session(struct se_portal_group *tpg,
transport_register_session(tpg, sess->se_node_acl, sess, private);
return sess;
}
-EXPORT_SYMBOL(target_alloc_session);
+EXPORT_SYMBOL(target_setup_session);
ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
{
@@ -547,7 +578,7 @@ void transport_free_session(struct se_session *se_sess)
target_put_nacl(se_nacl);
}
if (se_sess->sess_cmd_map) {
- percpu_ida_destroy(&se_sess->sess_tag_pool);
+ sbitmap_queue_free(&se_sess->sess_tag_pool);
kvfree(se_sess->sess_cmd_map);
}
kmem_cache_free(se_sess_cache, se_sess);
@@ -585,6 +616,13 @@ void transport_deregister_session(struct se_session *se_sess)
}
EXPORT_SYMBOL(transport_deregister_session);
+void target_remove_session(struct se_session *se_sess)
+{
+ transport_deregister_session_configfs(se_sess);
+ transport_deregister_session(se_sess);
+}
+EXPORT_SYMBOL(target_remove_session);
+
static void target_remove_from_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -601,6 +639,13 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
+/*
+ * This function is called by the target core after the target core has
+ * finished processing a SCSI command or SCSI TMF. Both the regular command
+ * processing code and the code for aborting commands can call this
+ * function. CMD_T_STOP is set if and only if another thread is waiting
+ * inside transport_wait_for_tasks() for t_transport_stop_comp.
+ */
static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
unsigned long flags;
@@ -650,23 +695,27 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
percpu_ref_put(&lun->lun_ref);
}
-int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+int transport_cmd_finish_abort(struct se_cmd *cmd)
{
+ bool send_tas = cmd->transport_state & CMD_T_TAS;
bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
int ret = 0;
+ if (send_tas)
+ transport_send_task_abort(cmd);
+
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
/*
* Allow the fabric driver to unmap any resources before
* releasing the descriptor via TFO->release_cmd()
*/
- if (remove)
+ if (!send_tas)
cmd->se_tfo->aborted_task(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
return 1;
- if (remove && ack_kref)
+ if (!send_tas && ack_kref)
ret = target_put_sess_cmd(cmd);
return ret;
@@ -1267,7 +1316,7 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->t_transport_stop_comp);
- init_completion(&cmd->cmd_wait_comp);
+ cmd->compl = NULL;
spin_lock_init(&cmd->t_state_lock);
INIT_WORK(&cmd->work, NULL);
kref_init(&cmd->cmd_kref);
@@ -2079,9 +2128,6 @@ static void transport_complete_qf(struct se_cmd *cmd)
if (cmd->scsi_status)
goto queue_status;
- cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
- cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
- cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
goto queue_status;
}
@@ -2593,20 +2639,37 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
+/*
+ * This function is called by frontend drivers after processing of a command
+ * has finished.
+ *
+ * The protocol for ensuring that either the regular flow or the TMF
+ * code drops one reference is as follows:
+ * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
+ * the frontend driver to drop one reference, synchronously or asynchronously.
+ * - During regular command processing the target core sets CMD_T_COMPLETE
+ * before invoking one of the .queue_*() functions.
+ * - The code that aborts commands skips commands and TMFs for which
+ * CMD_T_COMPLETE has been set.
+ * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
+ * commands that will be aborted.
+ * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
+ * transport_generic_free_cmd() skips its call to target_put_sess_cmd().
+ * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
+ * be called and will drop a reference.
+ * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
+ * will be called. transport_cmd_finish_abort() will drop the final reference.
+ */
int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
+ DECLARE_COMPLETION_ONSTACK(compl);
int ret = 0;
bool aborted = false, tas = false;
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
- if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
- target_wait_free_cmd(cmd, &aborted, &tas);
+ if (wait_for_tasks)
+ target_wait_free_cmd(cmd, &aborted, &tas);
- if (!aborted || tas)
- ret = target_put_sess_cmd(cmd);
- } else {
- if (wait_for_tasks)
- target_wait_free_cmd(cmd, &aborted, &tas);
+ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
/*
* Handle WRITE failure case where transport_generic_new_cmd()
* has already added se_cmd to state_list, but fabric has
@@ -2617,20 +2680,14 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
-
- if (!aborted || tas)
- ret = target_put_sess_cmd(cmd);
}
- /*
- * If the task has been internally aborted due to TMR ABORT_TASK
- * or LUN_RESET, target_core_tmr.c is responsible for performing
- * the remaining calls to target_put_sess_cmd(), and not the
- * callers of this function.
- */
+ if (aborted)
+ cmd->compl = &compl;
+ if (!aborted || tas)
+ ret = target_put_sess_cmd(cmd);
if (aborted) {
pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
- wait_for_completion(&cmd->cmd_wait_comp);
- cmd->se_tfo->release_cmd(cmd);
+ wait_for_completion(&compl);
ret = 1;
}
return ret;
@@ -2691,30 +2748,21 @@ static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
+ struct completion *compl = se_cmd->compl;
unsigned long flags;
- bool fabric_stop;
if (se_sess) {
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
-
- spin_lock(&se_cmd->t_state_lock);
- fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
- (se_cmd->transport_state & CMD_T_ABORTED);
- spin_unlock(&se_cmd->t_state_lock);
-
- if (se_cmd->cmd_wait_set || fabric_stop) {
- list_del_init(&se_cmd->se_cmd_list);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- target_free_cmd_mem(se_cmd);
- complete(&se_cmd->cmd_wait_comp);
- return;
- }
list_del_init(&se_cmd->se_cmd_list);
+ if (list_empty(&se_sess->sess_cmd_list))
+ wake_up(&se_sess->cmd_list_wq);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
+ if (compl)
+ complete(compl);
}
/**
@@ -2833,78 +2881,41 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
EXPORT_SYMBOL(target_show_cmd);
/**
- * target_sess_cmd_list_set_waiting - Flag all commands in
- * sess_cmd_list to complete cmd_wait_comp. Set
- * sess_tearing_down so no more commands are queued.
+ * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued.
* @se_sess: session to flag
*/
void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
- struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
- int rc;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- if (se_sess->sess_tearing_down) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- return;
- }
se_sess->sess_tearing_down = 1;
- list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
-
- list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
- rc = kref_get_unless_zero(&se_cmd->cmd_kref);
- if (rc) {
- se_cmd->cmd_wait_set = 1;
- spin_lock(&se_cmd->t_state_lock);
- se_cmd->transport_state |= CMD_T_FABRIC_STOP;
- spin_unlock(&se_cmd->t_state_lock);
- } else
- list_del_init(&se_cmd->se_cmd_list);
- }
-
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
/**
- * target_wait_for_sess_cmds - Wait for outstanding descriptors
+ * target_wait_for_sess_cmds - Wait for outstanding commands
* @se_sess: session to wait for active I/O
*/
void target_wait_for_sess_cmds(struct se_session *se_sess)
{
- struct se_cmd *se_cmd, *tmp_cmd;
- unsigned long flags;
- bool tas;
-
- list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
- pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
- " %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- tas = (se_cmd->transport_state & CMD_T_TAS);
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-
- if (!target_put_sess_cmd(se_cmd)) {
- if (tas)
- target_put_sess_cmd(se_cmd);
- }
-
- wait_for_completion(&se_cmd->cmd_wait_comp);
- pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
- " fabric state: %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- se_cmd->se_tfo->release_cmd(se_cmd);
- }
+ struct se_cmd *cmd;
+ int ret;
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- WARN_ON(!list_empty(&se_sess->sess_cmd_list));
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ WARN_ON_ONCE(!se_sess->sess_tearing_down);
+ spin_lock_irq(&se_sess->sess_cmd_lock);
+ do {
+ ret = wait_event_interruptible_lock_irq_timeout(
+ se_sess->cmd_list_wq,
+ list_empty(&se_sess->sess_cmd_list),
+ se_sess->sess_cmd_lock, 180 * HZ);
+ list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
+ target_show_cmd("session shutdown: still waiting for ",
+ cmd);
+ } while (ret <= 0);
+ spin_unlock_irq(&se_sess->sess_cmd_lock);
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
@@ -3166,12 +3177,23 @@ static const struct sense_info sense_info_table[] = {
},
};
-static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
+/**
+ * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
+ * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
+ * be stored.
+ * @reason: LIO sense reason code. If this argument has the value
+ * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
+ * dequeuing a unit attention fails due to multiple commands being processed
+ * concurrently, set the command status to BUSY.
+ *
+ * Return: 0 upon success or -EINVAL if the sense buffer is too small.
+ */
+static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
{
const struct sense_info *si;
u8 *buffer = cmd->sense_buffer;
int r = (__force int)reason;
- u8 asc, ascq;
+ u8 key, asc, ascq;
bool desc_format = target_sense_desc_format(cmd->se_dev);
if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
@@ -3180,9 +3202,13 @@ static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
si = &sense_info_table[(__force int)
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
+ key = si->key;
if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
- core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
- WARN_ON_ONCE(asc == 0);
+ if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
+ &ascq)) {
+ cmd->scsi_status = SAM_STAT_BUSY;
+ return;
+ }
} else if (si->asc == 0) {
WARN_ON_ONCE(cmd->scsi_asc == 0);
asc = cmd->scsi_asc;
@@ -3192,13 +3218,14 @@ static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
ascq = si->ascq;
}
- scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
+ cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+ cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
+ scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
if (si->add_sector_info)
- return scsi_set_sense_information(buffer,
- cmd->scsi_sense_length,
- cmd->bad_sector);
-
- return 0;
+ WARN_ON_ONCE(scsi_set_sense_information(buffer,
+ cmd->scsi_sense_length,
+ cmd->bad_sector) < 0);
}
int
@@ -3215,16 +3242,8 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (!from_transport) {
- int rc;
-
- cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
- cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
- cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
- rc = translate_sense_reason(cmd, reason);
- if (rc)
- return rc;
- }
+ if (!from_transport)
+ translate_sense_reason(cmd, reason);
trace_target_cmd_complete(cmd);
return cmd->se_tfo->queue_status(cmd);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index be25eb807a5f..c8ac242ce888 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -55,7 +55,7 @@ target_scsi3_ua_check(struct se_cmd *cmd)
rcu_read_unlock();
return 0;
}
- if (!atomic_read(&deve->ua_count)) {
+ if (list_empty_careful(&deve->ua_list)) {
rcu_read_unlock();
return 0;
}
@@ -154,7 +154,6 @@ int core_scsi3_ua_allocate(
&deve->ua_list);
spin_unlock(&deve->ua_lock);
- atomic_inc_mb(&deve->ua_count);
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
@@ -164,7 +163,6 @@ int core_scsi3_ua_allocate(
" 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
asc, ascq);
- atomic_inc_mb(&deve->ua_count);
return 0;
}
@@ -196,16 +194,17 @@ void core_scsi3_ua_release_all(
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
}
-void core_scsi3_ua_for_check_condition(
- struct se_cmd *cmd,
- u8 *asc,
- u8 *ascq)
+/*
+ * Dequeue a unit attention from the unit attention list. This function
+ * returns true if the dequeuing succeeded and if *@key, *@asc and *@ascq have
+ * been set.
+ */
+bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
+ u8 *ascq)
{
struct se_device *dev = cmd->se_dev;
struct se_dev_entry *deve;
@@ -214,23 +213,23 @@ void core_scsi3_ua_for_check_condition(
struct se_ua *ua = NULL, *ua_p;
int head = 1;
- if (!sess)
- return;
+ if (WARN_ON_ONCE(!sess))
+ return false;
nacl = sess->se_node_acl;
- if (!nacl)
- return;
+ if (WARN_ON_ONCE(!nacl))
+ return false;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
if (!deve) {
rcu_read_unlock();
- return;
- }
- if (!atomic_read(&deve->ua_count)) {
- rcu_read_unlock();
- return;
+ *key = ILLEGAL_REQUEST;
+ *asc = 0x25; /* LOGICAL UNIT NOT SUPPORTED */
+ *ascq = 0;
+ return true;
}
+ *key = UNIT_ATTENTION;
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
@@ -260,8 +259,6 @@ void core_scsi3_ua_for_check_condition(
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
rcu_read_unlock();
@@ -273,6 +270,8 @@ void core_scsi3_ua_for_check_condition(
(dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
+
+ return head == 0;
}
int core_scsi3_ua_clear_for_request_sense(
@@ -299,7 +298,7 @@ int core_scsi3_ua_clear_for_request_sense(
rcu_read_unlock();
return -EINVAL;
}
- if (!atomic_read(&deve->ua_count)) {
+ if (list_empty_careful(&deve->ua_list)) {
rcu_read_unlock();
return -EPERM;
}
@@ -322,8 +321,6 @@ int core_scsi3_ua_clear_for_request_sense(
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
rcu_read_unlock();
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index b0f4205a96cd..76487c9be090 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -37,7 +37,8 @@ extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8);
extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
-extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
+extern bool core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *,
+ u8 *);
extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
u8 *, u8 *);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 7f96dfa32b9c..9cd404acdb82 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -83,14 +83,10 @@
#define DATA_BLOCK_SIZE PAGE_SIZE
#define DATA_BLOCK_SHIFT PAGE_SHIFT
#define DATA_BLOCK_BITS_DEF (256 * 1024)
-#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
-/* The total size of the ring is 8M + 256K * PAGE_SIZE */
-#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
-
/*
* Default number of global data blocks(512K * PAGE_SIZE)
* when the unmap thread will be started.
@@ -98,6 +94,7 @@
#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
static u8 tcmu_kern_cmd_reply_supported;
+static u8 tcmu_netlink_blocked;
static struct device *tcmu_root_device;
@@ -107,9 +104,16 @@ struct tcmu_hba {
#define TCMU_CONFIG_LEN 256
+static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
+static LIST_HEAD(tcmu_nl_cmd_list);
+
+struct tcmu_dev;
+
struct tcmu_nl_cmd {
/* wake up thread waiting for reply */
struct completion complete;
+ struct list_head nl_list;
+ struct tcmu_dev *udev;
int cmd;
int status;
};
@@ -133,7 +137,7 @@ struct tcmu_dev {
struct inode *inode;
struct tcmu_mailbox *mb_addr;
- size_t dev_size;
+ uint64_t dev_size;
u32 cmdr_size;
u32 cmdr_last_cleaned;
/* Offset of data area from start of mb */
@@ -161,10 +165,7 @@ struct tcmu_dev {
struct list_head timedout_entry;
- spinlock_t nl_cmd_lock;
struct tcmu_nl_cmd curr_nl_cmd;
- /* wake up threads waiting on curr_nl_cmd */
- wait_queue_head_t nl_cmd_wq;
char dev_config[TCMU_CONFIG_LEN];
@@ -255,6 +256,92 @@ MODULE_PARM_DESC(global_max_data_area_mb,
"Max MBs allowed to be allocated to all the tcmu device's "
"data areas.");
+static int tcmu_get_block_netlink(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
+ "blocked" : "unblocked");
+}
+
+static int tcmu_set_block_netlink(const char *str,
+ const struct kernel_param *kp)
+{
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val > 1) {
+ pr_err("Invalid block netlink value %u\n", val);
+ return -EINVAL;
+ }
+
+ tcmu_netlink_blocked = val;
+ return 0;
+}
+
+static const struct kernel_param_ops tcmu_block_netlink_op = {
+ .set = tcmu_set_block_netlink,
+ .get = tcmu_get_block_netlink,
+};
+
+module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
+
+static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
+{
+ struct tcmu_dev *udev = nl_cmd->udev;
+
+ if (!tcmu_netlink_blocked) {
+ pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
+ return -EBUSY;
+ }
+
+ if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
+ pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
+ nl_cmd->status = -EINTR;
+ list_del(&nl_cmd->nl_list);
+ complete(&nl_cmd->complete);
+ }
+ return 0;
+}
+
+static int tcmu_set_reset_netlink(const char *str,
+ const struct kernel_param *kp)
+{
+ struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != 1) {
+ pr_err("Invalid reset netlink value %u\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
+ ret = tcmu_fail_netlink_cmd(nl_cmd);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+
+ return ret;
+}
+
+static const struct kernel_param_ops tcmu_reset_netlink_op = {
+ .set = tcmu_set_reset_netlink,
+};
+
+module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
+MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
+
/* multicast group */
enum tcmu_multicast_groups {
TCMU_MCGRP_CONFIG,
@@ -274,48 +361,50 @@ static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
{
- struct se_device *dev;
- struct tcmu_dev *udev;
+ struct tcmu_dev *udev = NULL;
struct tcmu_nl_cmd *nl_cmd;
int dev_id, rc, ret = 0;
- bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);
if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
!info->attrs[TCMU_ATTR_DEVICE_ID]) {
printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
- return -EINVAL;
+ return -EINVAL;
}
dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
- dev = target_find_device(dev_id, !is_removed);
- if (!dev) {
- printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
- completed_cmd, rc, dev_id);
- return -ENODEV;
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
+ if (nl_cmd->udev->se_dev.dev_index == dev_id) {
+ udev = nl_cmd->udev;
+ break;
+ }
}
- udev = TCMU_DEV(dev);
- spin_lock(&udev->nl_cmd_lock);
- nl_cmd = &udev->curr_nl_cmd;
+ if (!udev) {
+ pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
+ completed_cmd, rc, dev_id);
+ ret = -ENODEV;
+ goto unlock;
+ }
+ list_del(&nl_cmd->nl_list);
- pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
- nl_cmd->cmd, completed_cmd, rc);
+ pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
+ udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
+ nl_cmd->status);
if (nl_cmd->cmd != completed_cmd) {
- printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
- completed_cmd, nl_cmd->cmd);
+ pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
+ udev->name, completed_cmd, nl_cmd->cmd);
ret = -EINVAL;
- } else {
- nl_cmd->status = rc;
+ goto unlock;
}
- spin_unlock(&udev->nl_cmd_lock);
- if (!is_removed)
- target_undepend_item(&dev->dev_group.cg_item);
- if (!ret)
- complete(&nl_cmd->complete);
+ nl_cmd->status = rc;
+ complete(&nl_cmd->complete);
+unlock:
+ mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
@@ -656,7 +745,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
}
static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
- bool bidi)
+ bool bidi, uint32_t read_len)
{
struct se_cmd *se_cmd = cmd->se_cmd;
int i, dbi;
@@ -689,7 +778,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
for_each_sg(data_sg, sg, data_nents, i) {
int sg_remaining = sg->length;
to = kmap_atomic(sg_page(sg)) + sg->offset;
- while (sg_remaining > 0) {
+ while (sg_remaining > 0 && read_len > 0) {
if (block_remaining == 0) {
if (from)
kunmap_atomic(from);
@@ -701,6 +790,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
}
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
+ if (read_len < copy_bytes)
+ copy_bytes = read_len;
offset = DATA_BLOCK_SIZE - block_remaining;
tcmu_flush_dcache_range(from, copy_bytes);
memcpy(to + sg->length - sg_remaining, from + offset,
@@ -708,8 +799,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
sg_remaining -= copy_bytes;
block_remaining -= copy_bytes;
+ read_len -= copy_bytes;
}
kunmap_atomic(to - sg->offset);
+ if (read_len == 0)
+ break;
}
if (from)
kunmap_atomic(from);
@@ -977,7 +1071,6 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
&udev->cmd_timer);
if (ret) {
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
- mutex_unlock(&udev->cmdr_lock);
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
@@ -1042,6 +1135,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
{
struct se_cmd *se_cmd = cmd->se_cmd;
struct tcmu_dev *udev = cmd->tcmu_dev;
+ bool read_len_valid = false;
+ uint32_t read_len = se_cmd->data_length;
/*
* cmd has been completed already from timeout, just reclaim
@@ -1056,13 +1151,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
cmd->se_cmd);
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
- } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
+ goto done;
+ }
+
+ if (se_cmd->data_direction == DMA_FROM_DEVICE &&
+ (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
+ read_len_valid = true;
+ if (entry->rsp.read_len < read_len)
+ read_len = entry->rsp.read_len;
+ }
+
+ if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
- } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ if (!read_len_valid )
+ goto done;
+ else
+ se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
+ }
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
/* Get Data-In buffer before clean up */
- gather_data_area(udev, cmd, true);
+ gather_data_area(udev, cmd, true, read_len);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
- gather_data_area(udev, cmd, false);
+ gather_data_area(udev, cmd, false, read_len);
} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
/* TODO: */
} else if (se_cmd->data_direction != DMA_NONE) {
@@ -1070,7 +1180,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
se_cmd->data_direction);
}
- target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
+done:
+ if (read_len_valid) {
+ pr_debug("read_len = %d\n", read_len);
+ target_complete_cmd_with_length(cmd->se_cmd,
+ entry->rsp.scsi_status, read_len);
+ } else
+ target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
out:
cmd->se_cmd = NULL;
@@ -1254,6 +1370,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
udev->max_blocks = DATA_BLOCK_BITS_DEF;
mutex_init(&udev->cmdr_lock);
+ INIT_LIST_HEAD(&udev->node);
INIT_LIST_HEAD(&udev->timedout_entry);
INIT_LIST_HEAD(&udev->cmdr_queue);
idr_init(&udev->commands);
@@ -1261,9 +1378,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
- init_waitqueue_head(&udev->nl_cmd_wq);
- spin_lock_init(&udev->nl_cmd_lock);
-
INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
return &udev->se_dev;
@@ -1537,38 +1651,48 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
return 0;
}
-static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
+static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
if (!tcmu_kern_cmd_reply_supported)
- return;
+ return 0;
if (udev->nl_reply_supported <= 0)
- return;
+ return 0;
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
-relock:
- spin_lock(&udev->nl_cmd_lock);
+ if (tcmu_netlink_blocked) {
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
+ udev->name);
+ return -EAGAIN;
+ }
if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
- spin_unlock(&udev->nl_cmd_lock);
- pr_debug("sleeping for open nl cmd\n");
- wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
- goto relock;
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ pr_warn("netlink cmd %d already executing on %s\n",
+ nl_cmd->cmd, udev->name);
+ return -EBUSY;
}
memset(nl_cmd, 0, sizeof(*nl_cmd));
nl_cmd->cmd = cmd;
+ nl_cmd->udev = udev;
init_completion(&nl_cmd->complete);
+ INIT_LIST_HEAD(&nl_cmd->nl_list);
+
+ list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
- spin_unlock(&udev->nl_cmd_lock);
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ return 0;
}
static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
int ret;
- DEFINE_WAIT(__wait);
if (!tcmu_kern_cmd_reply_supported)
return 0;
@@ -1579,13 +1703,10 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
pr_debug("sleeping for nl reply\n");
wait_for_completion(&nl_cmd->complete);
- spin_lock(&udev->nl_cmd_lock);
+ mutex_lock(&tcmu_nl_cmd_mutex);
nl_cmd->cmd = TCMU_CMD_UNSPEC;
ret = nl_cmd->status;
- nl_cmd->status = 0;
- spin_unlock(&udev->nl_cmd_lock);
-
- wake_up_all(&udev->nl_cmd_wq);
+ mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
@@ -1629,19 +1750,21 @@ free_skb:
static int tcmu_netlink_event_send(struct tcmu_dev *udev,
enum tcmu_genl_cmd cmd,
- struct sk_buff **buf, void **hdr)
+ struct sk_buff *skb, void *msg_header)
{
- int ret = 0;
- struct sk_buff *skb = *buf;
- void *msg_header = *hdr;
+ int ret;
genlmsg_end(skb, msg_header);
- tcmu_init_genl_cmd_reply(udev, cmd);
+ ret = tcmu_init_genl_cmd_reply(udev, cmd);
+ if (ret) {
+ nlmsg_free(skb);
+ return ret;
+ }
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
- /* We don't care if no one is listening */
+ /* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
if (!ret)
@@ -1659,9 +1782,8 @@ static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
&msg_header);
if (ret < 0)
return ret;
- return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, &skb,
- &msg_header);
-
+ return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
+ msg_header);
}
static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
@@ -1675,7 +1797,7 @@ static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
if (ret < 0)
return ret;
return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static int tcmu_update_uio_info(struct tcmu_dev *udev)
@@ -1717,9 +1839,11 @@ static int tcmu_configure_device(struct se_device *dev)
info = &udev->uio_info;
+ mutex_lock(&udev->cmdr_lock);
udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks),
sizeof(unsigned long),
GFP_KERNEL);
+ mutex_unlock(&udev->cmdr_lock);
if (!udev->data_bitmap) {
ret = -ENOMEM;
goto err_bitmap_alloc;
@@ -1740,7 +1864,7 @@ static int tcmu_configure_device(struct se_device *dev)
/* Initialise the mailbox of the ring buffer */
mb = udev->mb_addr;
mb->version = TCMU_MAILBOX_VERSION;
- mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
+ mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
mb->cmdr_off = CMDR_OFF;
mb->cmdr_size = udev->cmdr_size;
@@ -1814,11 +1938,6 @@ err_bitmap_alloc:
return ret;
}
-static bool tcmu_dev_configured(struct tcmu_dev *udev)
-{
- return udev->uio_info.uio_dev ? true : false;
-}
-
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1925,45 +2044,76 @@ enum {
static match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"},
- {Opt_dev_size, "dev_size=%u"},
- {Opt_hw_block_size, "hw_block_size=%u"},
- {Opt_hw_max_sectors, "hw_max_sectors=%u"},
+ {Opt_dev_size, "dev_size=%s"},
+ {Opt_hw_block_size, "hw_block_size=%d"},
+ {Opt_hw_max_sectors, "hw_max_sectors=%d"},
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
- {Opt_max_data_area_mb, "max_data_area_mb=%u"},
+ {Opt_max_data_area_mb, "max_data_area_mb=%d"},
{Opt_err, NULL}
};
static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
{
- unsigned long tmp_ul;
- char *arg_p;
- int ret;
-
- arg_p = match_strdup(arg);
- if (!arg_p)
- return -ENOMEM;
+ int val, ret;
- ret = kstrtoul(arg_p, 0, &tmp_ul);
- kfree(arg_p);
+ ret = match_int(arg, &val);
if (ret < 0) {
- pr_err("kstrtoul() failed for dev attrib\n");
+ pr_err("match_int() failed for dev attrib. Error %d.\n",
+ ret);
return ret;
}
- if (!tmp_ul) {
- pr_err("dev attrib must be nonzero\n");
+
+ if (val <= 0) {
+ pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
+ val);
return -EINVAL;
}
- *dev_attrib = tmp_ul;
+ *dev_attrib = val;
return 0;
}
+static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val <= 0) {
+ pr_err("Invalid max_data_area %d.\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
+ if (udev->max_blocks > tcmu_global_max_blocks) {
+ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
+ val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ udev->max_blocks = tcmu_global_max_blocks;
+ }
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- char *orig, *ptr, *opts, *arg_p;
+ char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, token, tmpval;
+ int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -1986,15 +2136,10 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
break;
case Opt_dev_size:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
- kfree(arg_p);
+ ret = match_u64(&args[0], &udev->dev_size);
if (ret < 0)
- pr_err("kstrtoul() failed for dev_size=\n");
+ pr_err("match_u64() failed for dev_size=. Error %d.\n",
+ ret);
break;
case Opt_hw_block_size:
ret = tcmu_set_dev_attrib(&args[0],
@@ -2005,48 +2150,13 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
&(dev->dev_attrib.hw_max_sectors));
break;
case Opt_nl_reply_supported:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
- kfree(arg_p);
+ ret = match_int(&args[0], &udev->nl_reply_supported);
if (ret < 0)
- pr_err("kstrtoint() failed for nl_reply_supported=\n");
+ pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
+ ret);
break;
case Opt_max_data_area_mb:
- if (dev->export_count) {
- pr_err("Unable to set max_data_area_mb while exports exist\n");
- ret = -EINVAL;
- break;
- }
-
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoint(arg_p, 0, &tmpval);
- kfree(arg_p);
- if (ret < 0) {
- pr_err("kstrtoint() failed for max_data_area_mb=\n");
- break;
- }
-
- if (tmpval <= 0) {
- pr_err("Invalid max_data_area %d\n", tmpval);
- ret = -EINVAL;
- break;
- }
-
- udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval);
- if (udev->max_blocks > tcmu_global_max_blocks) {
- pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
- tmpval,
- TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
- udev->max_blocks = tcmu_global_max_blocks;
- }
+ ret = tcmu_set_max_blocks_param(udev, &args[0]);
break;
default:
break;
@@ -2067,7 +2177,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
bl = sprintf(b + bl, "Config: %s ",
udev->dev_config[0] ? udev->dev_config : "NULL");
- bl += sprintf(b + bl, "Size: %zu ", udev->dev_size);
+ bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
TCMU_BLOCKS_TO_MBS(udev->max_blocks));
@@ -2194,7 +2304,7 @@ static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
@@ -2211,7 +2321,7 @@ static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
return -EINVAL;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_dev_config_event(udev, page);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2236,7 +2346,7 @@ static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
- return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
+ return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
}
static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
@@ -2256,7 +2366,7 @@ static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
@@ -2273,7 +2383,7 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
return ret;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_dev_size_event(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2338,7 +2448,7 @@ static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
@@ -2355,7 +2465,7 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
return ret;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_emulate_write_cache(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2391,6 +2501,11 @@ static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
u8 val;
int ret;
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
@@ -2418,6 +2533,11 @@ static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
u8 val;
int ret;
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
@@ -2482,6 +2602,11 @@ static void find_free_blocks(void)
list_for_each_entry(udev, &root_udev, node) {
mutex_lock(&udev->cmdr_lock);
+ if (!target_dev_configured(&udev->se_dev)) {
+ mutex_unlock(&udev->cmdr_lock);
+ continue;
+ }
+
/* Try to complete the finished commands first */
tcmu_handle_completions(udev);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 9ee89e00cd77..2718a933c0c6 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -497,10 +497,7 @@ int target_xcopy_setup_pt(void)
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
- INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
- INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
- INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list);
- spin_lock_init(&xcopy_pt_sess.sess_cmd_lock);
+ transport_init_session(&xcopy_pt_sess);
xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index ec372860106f..a183d4da7db2 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -28,7 +28,6 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
-#include <linux/percpu_ida.h>
#include <asm/unaligned.h>
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
@@ -92,7 +91,7 @@ static void ft_free_cmd(struct ft_cmd *cmd)
if (fr_seq(fp))
fc_seq_release(fr_seq(fp));
fc_frame_free(fp);
- percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ target_free_tag(sess->se_sess, &cmd->se_cmd);
ft_sess_put(sess); /* undo get from lookup at recv */
}
@@ -448,9 +447,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
struct ft_cmd *cmd;
struct fc_lport *lport = sess->tport->lport;
struct se_session *se_sess = sess->se_sess;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
goto busy;
@@ -458,10 +457,11 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
memset(cmd, 0, sizeof(struct ft_cmd));
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->sess = sess;
cmd->seq = fc_seq_assign(lport, fp);
if (!cmd->seq) {
- percpu_ida_free(&se_sess->sess_tag_pool, tag);
+ target_free_tag(se_sess, &cmd->se_cmd);
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 42ee91123dca..e55c4d537592 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -223,10 +223,7 @@ static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
/*
* local_port port_group (tpg) ops.
*/
-static struct se_portal_group *ft_add_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name)
{
struct ft_lport_wwn *ft_wwn;
struct ft_tpg *tpg;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index c91979c1463d..6d4adf5ec26c 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -239,7 +239,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
sess->tport = tport;
sess->port_id = port_id;
- sess->se_sess = target_alloc_session(se_tpg, TCM_FC_DEFAULT_TAGS,
+ sess->se_sess = target_setup_session(se_tpg, TCM_FC_DEFAULT_TAGS,
sizeof(struct ft_cmd),
TARGET_PROT_NORMAL, &initiatorname[0],
sess, ft_sess_alloc_cb);
@@ -287,7 +287,6 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
static void ft_close_sess(struct ft_sess *sess)
{
- transport_deregister_session_configfs(sess->se_sess);
target_sess_cmd_list_set_waiting(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
ft_sess_put(sess);
@@ -448,7 +447,7 @@ static void ft_sess_free(struct kref *kref)
{
struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(sess->se_sess);
kfree_rcu(sess, rcu);
}
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
index 0126de898036..3c59e19029be 100644
--- a/drivers/tee/optee/Kconfig
+++ b/drivers/tee/optee/Kconfig
@@ -5,3 +5,11 @@ config OPTEE
help
This implements the OP-TEE Trusted Execution Environment (TEE)
driver.
+
+config OPTEE_SHM_NUM_PRIV_PAGES
+ int "Private Shared Memory Pages"
+ default 1
+ depends on OPTEE
+ help
+ This sets the number of private shared memory pages to be
+ used by OP-TEE TEE driver.
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index e5fd5ed217da..e1aafe842d66 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -32,7 +32,7 @@
#define DRIVER_NAME "optee"
-#define OPTEE_SHM_NUM_PRIV_PAGES 1
+#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
/**
* optee_from_msg_param() - convert from OPTEE_MSG parameters to
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index 41aea12e2bcc..b45c73dd37a5 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -48,7 +48,7 @@ static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT)
goto bad;
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
arg->params[0].u.value.a = ts.tv_sec;
arg->params[0].u.value.b = ts.tv_nsec;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 07d3be6f0780..0b9ab1d0dd45 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -80,11 +80,6 @@ static void tee_shm_op_release(struct dma_buf *dmabuf)
tee_shm_release(shm);
}
-static void *tee_shm_op_map_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
-{
- return NULL;
-}
-
static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
{
return NULL;
@@ -107,7 +102,6 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
.map_dma_buf = tee_shm_op_map_dma_buf,
.unmap_dma_buf = tee_shm_op_unmap_dma_buf,
.release = tee_shm_op_release,
- .map_atomic = tee_shm_op_map_atomic,
.map = tee_shm_op_map,
.mmap = tee_shm_op_mmap,
};
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 82979880f985..0e69edc77d18 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -360,7 +360,7 @@ config INTEL_SOC_DTS_IOSF_CORE
config INTEL_SOC_DTS_THERMAL
tristate "Intel SoCs DTS thermal driver"
- depends on X86 && PCI
+ depends on X86 && PCI && ACPI
select INTEL_SOC_DTS_IOSF_CORE
select THERMAL_WRITABLE_TRIPS
help
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 4c275ec10ac5..2c2f6d93034e 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -24,6 +24,8 @@
#include <linux/of_device.h>
#include <linux/thermal.h>
#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
/* Thermal Manager Control and Status Register */
#define PMU_TDC0_SW_RST_MASK (0x1 << 1)
@@ -39,24 +41,24 @@
#define A375_READOUT_INVERT BIT(15)
#define A375_HW_RESETn BIT(8)
-/* Legacy bindings */
-#define LEGACY_CONTROL_MEM_LEN 0x4
-
-/* Current bindings with the 2 control registers under the same memory area */
-#define LEGACY_CONTROL1_OFFSET 0x0
-#define CONTROL0_OFFSET 0x0
-#define CONTROL1_OFFSET 0x4
-
/* Errata fields */
#define CONTROL0_TSEN_TC_TRIM_MASK 0x7
#define CONTROL0_TSEN_TC_TRIM_VAL 0x3
-/* TSEN refers to the temperature sensors within the AP */
#define CONTROL0_TSEN_START BIT(0)
#define CONTROL0_TSEN_RESET BIT(1)
#define CONTROL0_TSEN_ENABLE BIT(2)
-
-/* EXT_TSEN refers to the external temperature sensors, out of the AP */
+#define CONTROL0_TSEN_AVG_BYPASS BIT(6)
+#define CONTROL0_TSEN_CHAN_SHIFT 13
+#define CONTROL0_TSEN_CHAN_MASK 0xF
+#define CONTROL0_TSEN_OSR_SHIFT 24
+#define CONTROL0_TSEN_OSR_MAX 0x3
+#define CONTROL0_TSEN_MODE_SHIFT 30
+#define CONTROL0_TSEN_MODE_EXTERNAL 0x2
+#define CONTROL0_TSEN_MODE_MASK 0x3
+
+#define CONTROL1_TSEN_AVG_SHIFT 0
+#define CONTROL1_TSEN_AVG_MASK 0x7
#define CONTROL1_EXT_TSEN_SW_RESET BIT(7)
#define CONTROL1_EXT_TSEN_HW_RESETn BIT(8)
@@ -67,19 +69,19 @@ struct armada_thermal_data;
/* Marvell EBU Thermal Sensor Dev Structure */
struct armada_thermal_priv {
- void __iomem *status;
- void __iomem *control0;
- void __iomem *control1;
+ struct device *dev;
+ struct regmap *syscon;
+ char zone_name[THERMAL_NAME_LENGTH];
+ /* serialize temperature reads/updates */
+ struct mutex update_lock;
struct armada_thermal_data *data;
+ int current_channel;
};
struct armada_thermal_data {
- /* Initialize the sensor */
- void (*init_sensor)(struct platform_device *pdev,
- struct armada_thermal_priv *);
-
- /* Test for a valid sensor value (optional) */
- bool (*is_valid)(struct armada_thermal_priv *);
+ /* Initialize the thermal IC */
+ void (*init)(struct platform_device *pdev,
+ struct armada_thermal_priv *priv);
/* Formula coeficients: temp = (b - m * reg) / div */
s64 coef_b;
@@ -92,141 +94,242 @@ struct armada_thermal_data {
unsigned int temp_shift;
unsigned int temp_mask;
u32 is_valid_bit;
- bool needs_control0;
+
+ /* Syscon access */
+ unsigned int syscon_control0_off;
+ unsigned int syscon_control1_off;
+ unsigned int syscon_status_off;
+
+ /* One sensor is in the thermal IC, the others are in the CPUs if any */
+ unsigned int cpu_nr;
};
-static void armadaxp_init_sensor(struct platform_device *pdev,
- struct armada_thermal_priv *priv)
+struct armada_drvdata {
+ enum drvtype {
+ LEGACY,
+ SYSCON
+ } type;
+ union {
+ struct armada_thermal_priv *priv;
+ struct thermal_zone_device *tz;
+ } data;
+};
+
+/*
+ * struct armada_thermal_sensor - hold the information of one thermal sensor
+ * @thermal: pointer to the local private structure
+ * @tzd: pointer to the thermal zone device
+ * @id: identifier of the thermal sensor
+ */
+struct armada_thermal_sensor {
+ struct armada_thermal_priv *priv;
+ int id;
+};
+
+static void armadaxp_init(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
{
+ struct armada_thermal_data *data = priv->data;
u32 reg;
- reg = readl_relaxed(priv->control1);
+ regmap_read(priv->syscon, data->syscon_control1_off, &reg);
reg |= PMU_TDC0_OTF_CAL_MASK;
- writel(reg, priv->control1);
/* Reference calibration value */
reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS);
- writel(reg, priv->control1);
/* Reset the sensor */
- reg = readl_relaxed(priv->control1);
- writel((reg | PMU_TDC0_SW_RST_MASK), priv->control1);
+ reg |= PMU_TDC0_SW_RST_MASK;
- writel(reg, priv->control1);
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
/* Enable the sensor */
- reg = readl_relaxed(priv->status);
+ regmap_read(priv->syscon, data->syscon_status_off, &reg);
reg &= ~PMU_TM_DISABLE_MASK;
- writel(reg, priv->status);
+ regmap_write(priv->syscon, data->syscon_status_off, reg);
}
-static void armada370_init_sensor(struct platform_device *pdev,
- struct armada_thermal_priv *priv)
+static void armada370_init(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
{
+ struct armada_thermal_data *data = priv->data;
u32 reg;
- reg = readl_relaxed(priv->control1);
+ regmap_read(priv->syscon, data->syscon_control1_off, &reg);
reg |= PMU_TDC0_OTF_CAL_MASK;
- writel(reg, priv->control1);
/* Reference calibration value */
reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS);
- writel(reg, priv->control1);
+ /* Reset the sensor */
reg &= ~PMU_TDC0_START_CAL_MASK;
- writel(reg, priv->control1);
+
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
msleep(10);
}
-static void armada375_init_sensor(struct platform_device *pdev,
- struct armada_thermal_priv *priv)
+static void armada375_init(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
{
+ struct armada_thermal_data *data = priv->data;
u32 reg;
- reg = readl(priv->control1);
+ regmap_read(priv->syscon, data->syscon_control1_off, &reg);
reg &= ~(A375_UNIT_CONTROL_MASK << A375_UNIT_CONTROL_SHIFT);
reg &= ~A375_READOUT_INVERT;
reg &= ~A375_HW_RESETn;
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
- writel(reg, priv->control1);
msleep(20);
reg |= A375_HW_RESETn;
- writel(reg, priv->control1);
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
+
msleep(50);
}
-static void armada_wait_sensor_validity(struct armada_thermal_priv *priv)
+static int armada_wait_sensor_validity(struct armada_thermal_priv *priv)
{
u32 reg;
- readl_relaxed_poll_timeout(priv->status, reg,
- reg & priv->data->is_valid_bit,
- STATUS_POLL_PERIOD_US,
- STATUS_POLL_TIMEOUT_US);
+ return regmap_read_poll_timeout(priv->syscon,
+ priv->data->syscon_status_off, reg,
+ reg & priv->data->is_valid_bit,
+ STATUS_POLL_PERIOD_US,
+ STATUS_POLL_TIMEOUT_US);
}
-static void armada380_init_sensor(struct platform_device *pdev,
- struct armada_thermal_priv *priv)
+static void armada380_init(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
{
- u32 reg = readl_relaxed(priv->control1);
+ struct armada_thermal_data *data = priv->data;
+ u32 reg;
/* Disable the HW/SW reset */
+ regmap_read(priv->syscon, data->syscon_control1_off, &reg);
reg |= CONTROL1_EXT_TSEN_HW_RESETn;
reg &= ~CONTROL1_EXT_TSEN_SW_RESET;
- writel(reg, priv->control1);
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
/* Set Tsen Tc Trim to correct default value (errata #132698) */
- if (priv->control0) {
- reg = readl_relaxed(priv->control0);
- reg &= ~CONTROL0_TSEN_TC_TRIM_MASK;
- reg |= CONTROL0_TSEN_TC_TRIM_VAL;
- writel(reg, priv->control0);
- }
-
- /* Wait the sensors to be valid or the core will warn the user */
- armada_wait_sensor_validity(priv);
+ regmap_read(priv->syscon, data->syscon_control0_off, &reg);
+ reg &= ~CONTROL0_TSEN_TC_TRIM_MASK;
+ reg |= CONTROL0_TSEN_TC_TRIM_VAL;
+ regmap_write(priv->syscon, data->syscon_control0_off, reg);
}
-static void armada_ap806_init_sensor(struct platform_device *pdev,
- struct armada_thermal_priv *priv)
+static void armada_ap806_init(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
{
+ struct armada_thermal_data *data = priv->data;
u32 reg;
- reg = readl_relaxed(priv->control0);
+ regmap_read(priv->syscon, data->syscon_control0_off, &reg);
reg &= ~CONTROL0_TSEN_RESET;
reg |= CONTROL0_TSEN_START | CONTROL0_TSEN_ENABLE;
- writel(reg, priv->control0);
- /* Wait the sensors to be valid or the core will warn the user */
- armada_wait_sensor_validity(priv);
+ /* Sample every ~2ms */
+ reg |= CONTROL0_TSEN_OSR_MAX << CONTROL0_TSEN_OSR_SHIFT;
+
+ /* Enable average (2 samples by default) */
+ reg &= ~CONTROL0_TSEN_AVG_BYPASS;
+
+ regmap_write(priv->syscon, data->syscon_control0_off, reg);
+}
+
+static void armada_cp110_init(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
+{
+ struct armada_thermal_data *data = priv->data;
+ u32 reg;
+
+ armada380_init(pdev, priv);
+
+ /* Sample every ~2ms */
+ regmap_read(priv->syscon, data->syscon_control0_off, &reg);
+ reg |= CONTROL0_TSEN_OSR_MAX << CONTROL0_TSEN_OSR_SHIFT;
+ regmap_write(priv->syscon, data->syscon_control0_off, reg);
+
+ /* Average the output value over 2^1 = 2 samples */
+ regmap_read(priv->syscon, data->syscon_control1_off, &reg);
+ reg &= ~CONTROL1_TSEN_AVG_MASK << CONTROL1_TSEN_AVG_SHIFT;
+ reg |= 1 << CONTROL1_TSEN_AVG_SHIFT;
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
}
static bool armada_is_valid(struct armada_thermal_priv *priv)
{
- u32 reg = readl_relaxed(priv->status);
+ u32 reg;
+
+ if (!priv->data->is_valid_bit)
+ return true;
+
+ regmap_read(priv->syscon, priv->data->syscon_status_off, &reg);
return reg & priv->data->is_valid_bit;
}
-static int armada_get_temp(struct thermal_zone_device *thermal,
- int *temp)
+/* There is currently no board with more than one sensor per channel */
+static int armada_select_channel(struct armada_thermal_priv *priv, int channel)
{
- struct armada_thermal_priv *priv = thermal->devdata;
- u32 reg, div;
- s64 sample, b, m;
+ struct armada_thermal_data *data = priv->data;
+ u32 ctrl0;
+
+ if (channel < 0 || channel > priv->data->cpu_nr)
+ return -EINVAL;
+
+ if (priv->current_channel == channel)
+ return 0;
+
+ /* Stop the measurements */
+ regmap_read(priv->syscon, data->syscon_control0_off, &ctrl0);
+ ctrl0 &= ~CONTROL0_TSEN_START;
+ regmap_write(priv->syscon, data->syscon_control0_off, ctrl0);
+
+ /* Reset the mode, internal sensor will be automatically selected */
+ ctrl0 &= ~(CONTROL0_TSEN_MODE_MASK << CONTROL0_TSEN_MODE_SHIFT);
+
+ /* Other channels are external and should be selected accordingly */
+ if (channel) {
+ /* Change the mode to external */
+ ctrl0 |= CONTROL0_TSEN_MODE_EXTERNAL <<
+ CONTROL0_TSEN_MODE_SHIFT;
+ /* Select the sensor */
+ ctrl0 &= ~(CONTROL0_TSEN_CHAN_MASK << CONTROL0_TSEN_CHAN_SHIFT);
+ ctrl0 |= (channel - 1) << CONTROL0_TSEN_CHAN_SHIFT;
+ }
- /* Valid check */
- if (priv->data->is_valid && !priv->data->is_valid(priv)) {
- dev_err(&thermal->device,
+ /* Actually set the mode/channel */
+ regmap_write(priv->syscon, data->syscon_control0_off, ctrl0);
+ priv->current_channel = channel;
+
+ /* Re-start the measurements */
+ ctrl0 |= CONTROL0_TSEN_START;
+ regmap_write(priv->syscon, data->syscon_control0_off, ctrl0);
+
+ /*
+ * The IP has a latency of ~15ms, so after updating the selected source,
+ * we must absolutely wait for the sensor validity bit to ensure we read
+ * actual data.
+ */
+ if (armada_wait_sensor_validity(priv)) {
+ dev_err(priv->dev,
"Temperature sensor reading not valid\n");
return -EIO;
}
- reg = readl_relaxed(priv->status);
+ return 0;
+}
+
+static int armada_read_sensor(struct armada_thermal_priv *priv, int *temp)
+{
+ u32 reg, div;
+ s64 sample, b, m;
+
+ regmap_read(priv->syscon, priv->data->syscon_status_off, &reg);
reg = (reg >> priv->data->temp_shift) & priv->data->temp_mask;
if (priv->data->signed_sample)
/* The most significant bit is the sign bit */
@@ -247,45 +350,93 @@ static int armada_get_temp(struct thermal_zone_device *thermal,
return 0;
}
-static struct thermal_zone_device_ops ops = {
+static int armada_get_temp_legacy(struct thermal_zone_device *thermal,
+ int *temp)
+{
+ struct armada_thermal_priv *priv = thermal->devdata;
+ int ret;
+
+ /* Valid check */
+ if (armada_is_valid(priv)) {
+ dev_err(priv->dev,
+ "Temperature sensor reading not valid\n");
+ return -EIO;
+ }
+
+ /* Do the actual reading */
+ ret = armada_read_sensor(priv, temp);
+
+ return ret;
+}
+
+static struct thermal_zone_device_ops legacy_ops = {
+ .get_temp = armada_get_temp_legacy,
+};
+
+static int armada_get_temp(void *_sensor, int *temp)
+{
+ struct armada_thermal_sensor *sensor = _sensor;
+ struct armada_thermal_priv *priv = sensor->priv;
+ int ret;
+
+ mutex_lock(&priv->update_lock);
+
+ /* Select the desired channel */
+ ret = armada_select_channel(priv, sensor->id);
+ if (ret)
+ goto unlock_mutex;
+
+ /* Do the actual reading */
+ ret = armada_read_sensor(priv, temp);
+
+unlock_mutex:
+ mutex_unlock(&priv->update_lock);
+
+ return ret;
+}
+
+static struct thermal_zone_of_device_ops of_ops = {
.get_temp = armada_get_temp,
};
static const struct armada_thermal_data armadaxp_data = {
- .init_sensor = armadaxp_init_sensor,
+ .init = armadaxp_init,
.temp_shift = 10,
.temp_mask = 0x1ff,
.coef_b = 3153000000ULL,
.coef_m = 10000000ULL,
.coef_div = 13825,
+ .syscon_status_off = 0xb0,
+ .syscon_control1_off = 0xd0,
};
static const struct armada_thermal_data armada370_data = {
- .is_valid = armada_is_valid,
- .init_sensor = armada370_init_sensor,
+ .init = armada370_init,
.is_valid_bit = BIT(9),
.temp_shift = 10,
.temp_mask = 0x1ff,
.coef_b = 3153000000ULL,
.coef_m = 10000000ULL,
.coef_div = 13825,
+ .syscon_status_off = 0x0,
+ .syscon_control1_off = 0x4,
};
static const struct armada_thermal_data armada375_data = {
- .is_valid = armada_is_valid,
- .init_sensor = armada375_init_sensor,
+ .init = armada375_init,
.is_valid_bit = BIT(10),
.temp_shift = 0,
.temp_mask = 0x1ff,
.coef_b = 3171900000ULL,
.coef_m = 10000000ULL,
.coef_div = 13616,
- .needs_control0 = true,
+ .syscon_status_off = 0x78,
+ .syscon_control0_off = 0x7c,
+ .syscon_control1_off = 0x80,
};
static const struct armada_thermal_data armada380_data = {
- .is_valid = armada_is_valid,
- .init_sensor = armada380_init_sensor,
+ .init = armada380_init,
.is_valid_bit = BIT(10),
.temp_shift = 0,
.temp_mask = 0x3ff,
@@ -293,11 +444,13 @@ static const struct armada_thermal_data armada380_data = {
.coef_m = 2000096ULL,
.coef_div = 4201,
.inverted = true,
+ .syscon_control0_off = 0x70,
+ .syscon_control1_off = 0x74,
+ .syscon_status_off = 0x78,
};
static const struct armada_thermal_data armada_ap806_data = {
- .is_valid = armada_is_valid,
- .init_sensor = armada_ap806_init_sensor,
+ .init = armada_ap806_init,
.is_valid_bit = BIT(16),
.temp_shift = 0,
.temp_mask = 0x3ff,
@@ -306,12 +459,14 @@ static const struct armada_thermal_data armada_ap806_data = {
.coef_div = 1,
.inverted = true,
.signed_sample = true,
- .needs_control0 = true,
+ .syscon_control0_off = 0x84,
+ .syscon_control1_off = 0x88,
+ .syscon_status_off = 0x8C,
+ .cpu_nr = 4,
};
static const struct armada_thermal_data armada_cp110_data = {
- .is_valid = armada_is_valid,
- .init_sensor = armada380_init_sensor,
+ .init = armada_cp110_init,
.is_valid_bit = BIT(10),
.temp_shift = 0,
.temp_mask = 0x3ff,
@@ -319,7 +474,9 @@ static const struct armada_thermal_data armada_cp110_data = {
.coef_m = 2000096ULL,
.coef_div = 4201,
.inverted = true,
- .needs_control0 = true,
+ .syscon_control0_off = 0x70,
+ .syscon_control1_off = 0x74,
+ .syscon_status_off = 0x78,
};
static const struct of_device_id armada_thermal_id_table[] = {
@@ -353,13 +510,97 @@ static const struct of_device_id armada_thermal_id_table[] = {
};
MODULE_DEVICE_TABLE(of, armada_thermal_id_table);
+static const struct regmap_config armada_thermal_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static int armada_thermal_probe_legacy(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
+{
+ struct armada_thermal_data *data = priv->data;
+ struct resource *res;
+ void __iomem *base;
+
+ /* First memory region points towards the status register */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ /*
+ * Edit the resource start address and length to map over all the
+ * registers, instead of pointing at them one by one.
+ */
+ res->start -= data->syscon_status_off;
+ res->end = res->start + max(data->syscon_status_off,
+ max(data->syscon_control0_off,
+ data->syscon_control1_off)) +
+ sizeof(unsigned int) - 1;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->syscon = devm_regmap_init_mmio(&pdev->dev, base,
+ &armada_thermal_regmap_config);
+ if (IS_ERR(priv->syscon))
+ return PTR_ERR(priv->syscon);
+
+ return 0;
+}
+
+static int armada_thermal_probe_syscon(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
+{
+ priv->syscon = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(priv->syscon))
+ return PTR_ERR(priv->syscon);
+
+ return 0;
+}
+
+static void armada_set_sane_name(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
+{
+ const char *name = dev_name(&pdev->dev);
+ char *insane_char;
+
+ if (strlen(name) > THERMAL_NAME_LENGTH) {
+ /*
+ * When inside a system controller, the device name has the
+ * form: f06f8000.system-controller:ap-thermal so stripping
+ * after the ':' should give us a shorter but meaningful name.
+ */
+ name = strrchr(name, ':');
+ if (!name)
+ name = "armada_thermal";
+ else
+ name++;
+ }
+
+ /* Save the name locally */
+ strncpy(priv->zone_name, name, THERMAL_NAME_LENGTH - 1);
+ priv->zone_name[THERMAL_NAME_LENGTH - 1] = '\0';
+
+ /* Then check there are no '-' or hwmon core will complain */
+ do {
+ insane_char = strpbrk(priv->zone_name, "-");
+ if (insane_char)
+ *insane_char = '_';
+ } while (insane_char);
+}
+
static int armada_thermal_probe(struct platform_device *pdev)
{
- void __iomem *control = NULL;
- struct thermal_zone_device *thermal;
+ struct thermal_zone_device *tz;
+ struct armada_thermal_sensor *sensor;
+ struct armada_drvdata *drvdata;
const struct of_device_id *match;
struct armada_thermal_priv *priv;
- struct resource *res;
+ int sensor_id;
+ int ret;
match = of_match_device(armada_thermal_id_table, &pdev->dev);
if (!match)
@@ -369,58 +610,99 @@ static int armada_thermal_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->status = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->status))
- return PTR_ERR(priv->status);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- control = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(control))
- return PTR_ERR(control);
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ priv->dev = &pdev->dev;
priv->data = (struct armada_thermal_data *)match->data;
+ mutex_init(&priv->update_lock);
+
/*
* Legacy DT bindings only described "control1" register (also referred
- * as "control MSB" on old documentation). New bindings cover
+ * as "control MSB" on old documentation). Then, bindings moved to cover
* "control0/control LSB" and "control1/control MSB" registers within
- * the same resource, which is then of size 8 instead of 4.
+ * the same resource, which was then of size 8 instead of 4.
+ *
+ * The logic of defining sporadic registers is broken. For instance, it
+ * blocked the addition of the overheat interrupt feature that needed
+ * another resource somewhere else in the same memory area. One solution
+ * is to define an overall system controller and put the thermal node
+ * into it, which requires the use of regmaps across all the driver.
*/
- if (resource_size(res) == LEGACY_CONTROL_MEM_LEN) {
- /* ->control0 unavailable in this configuration */
- if (priv->data->needs_control0) {
- dev_err(&pdev->dev, "No access to control0 register\n");
- return -EINVAL;
+ if (IS_ERR(syscon_node_to_regmap(pdev->dev.parent->of_node))) {
+ /* Ensure device name is correct for the thermal core */
+ armada_set_sane_name(pdev, priv);
+
+ ret = armada_thermal_probe_legacy(pdev, priv);
+ if (ret)
+ return ret;
+
+ priv->data->init(pdev, priv);
+
+ /* Wait the sensors to be valid */
+ armada_wait_sensor_validity(priv);
+
+ tz = thermal_zone_device_register(priv->zone_name, 0, 0, priv,
+ &legacy_ops, NULL, 0, 0);
+ if (IS_ERR(tz)) {
+ dev_err(&pdev->dev,
+ "Failed to register thermal zone device\n");
+ return PTR_ERR(tz);
}
- priv->control1 = control + LEGACY_CONTROL1_OFFSET;
- } else {
- priv->control0 = control + CONTROL0_OFFSET;
- priv->control1 = control + CONTROL1_OFFSET;
+ drvdata->type = LEGACY;
+ drvdata->data.tz = tz;
+ platform_set_drvdata(pdev, drvdata);
+
+ return 0;
}
- priv->data->init_sensor(pdev, priv);
+ ret = armada_thermal_probe_syscon(pdev, priv);
+ if (ret)
+ return ret;
- thermal = thermal_zone_device_register(dev_name(&pdev->dev), 0, 0, priv,
- &ops, NULL, 0, 0);
- if (IS_ERR(thermal)) {
- dev_err(&pdev->dev,
- "Failed to register thermal zone device\n");
- return PTR_ERR(thermal);
- }
+ priv->current_channel = -1;
+ priv->data->init(pdev, priv);
+ drvdata->type = SYSCON;
+ drvdata->data.priv = priv;
+ platform_set_drvdata(pdev, drvdata);
- platform_set_drvdata(pdev, thermal);
+ /*
+ * There is one channel for the IC and one per CPU (if any), each
+ * channel has one sensor.
+ */
+ for (sensor_id = 0; sensor_id <= priv->data->cpu_nr; sensor_id++) {
+ sensor = devm_kzalloc(&pdev->dev,
+ sizeof(struct armada_thermal_sensor),
+ GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ /* Register the sensor */
+ sensor->priv = priv;
+ sensor->id = sensor_id;
+ tz = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ sensor->id, sensor,
+ &of_ops);
+ if (IS_ERR(tz)) {
+ dev_info(&pdev->dev, "Thermal sensor %d unavailable\n",
+ sensor_id);
+ devm_kfree(&pdev->dev, sensor);
+ continue;
+ }
+ }
return 0;
}
static int armada_thermal_exit(struct platform_device *pdev)
{
- struct thermal_zone_device *armada_thermal =
- platform_get_drvdata(pdev);
+ struct armada_drvdata *drvdata = platform_get_drvdata(pdev);
- thermal_zone_device_unregister(armada_thermal);
+ if (drvdata->type == LEGACY)
+ thermal_zone_device_unregister(drvdata->data.tz);
return 0;
}
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 334d98be03b9..aa452acb60b6 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -3,6 +3,7 @@
// Copyright 2013 Freescale Semiconductor, Inc.
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
#include <linux/delay.h>
@@ -604,7 +605,10 @@ static int imx_init_from_nvmem_cells(struct platform_device *pdev)
ret = nvmem_cell_read_u32(&pdev->dev, "calib", &val);
if (ret)
return ret;
- imx_init_calib(pdev, val);
+
+ ret = imx_init_calib(pdev, val);
+ if (ret)
+ return ret;
ret = nvmem_cell_read_u32(&pdev->dev, "temp_grade", &val);
if (ret)
@@ -644,6 +648,27 @@ static const struct of_device_id of_imx_thermal_match[] = {
};
MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
+/*
+ * Create cooling device in case no #cooling-cells property is available in
+ * CPU node
+ */
+static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data)
+{
+ struct device_node *np = of_get_cpu_node(data->policy->cpu, NULL);
+ int ret;
+
+ if (!np || !of_find_property(np, "#cooling-cells", NULL)) {
+ data->cdev = cpufreq_cooling_register(data->policy);
+ if (IS_ERR(data->cdev)) {
+ ret = PTR_ERR(data->cdev);
+ cpufreq_cpu_put(data->policy);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int imx_thermal_probe(struct platform_device *pdev)
{
struct imx_thermal_data *data;
@@ -724,12 +749,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- data->cdev = cpufreq_cooling_register(data->policy);
- if (IS_ERR(data->cdev)) {
- ret = PTR_ERR(data->cdev);
+ ret = imx_thermal_register_legacy_cooling(data);
+ if (ret) {
dev_err(&pdev->dev,
"failed to register cpufreq cooling device: %d\n", ret);
- cpufreq_cpu_put(data->policy);
return ret;
}
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 4540e892b61d..cde891c54cde 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -28,7 +28,7 @@
* case of external interrupts without need for ack, clamping down
* cpu in non-irq context does not reduce irq. for majority of the
* cases, clamping down cpu does help reduce irq as well, we should
- * be able to differenciate the two cases and give a quantitative
+ * be able to differentiate the two cases and give a quantitative
* solution for the irqs that we can control. perhaps based on
* get_cpu_iowait_time_us()
*
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index c27868b2c6af..1e47511a6bd5 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <asm/cpu_device_id.h>
@@ -31,6 +32,7 @@ MODULE_PARM_DESC(crit_offset,
/* IRQ 86 is a fixed APIC interrupt for BYT DTS Aux threshold notifications */
#define BYT_SOC_DTS_APIC_IRQ 86
+static int soc_dts_thres_gsi;
static int soc_dts_thres_irq;
static struct intel_soc_dts_sensors *soc_dts;
@@ -65,7 +67,21 @@ static int __init intel_soc_thermal_init(void)
return err;
}
- soc_dts_thres_irq = (int)match_cpu->driver_data;
+ soc_dts_thres_gsi = (int)match_cpu->driver_data;
+ if (soc_dts_thres_gsi) {
+ /*
+ * Note the flags here MUST match the firmware defaults, rather
+ * then the request_irq flags, otherwise we get an EBUSY error.
+ */
+ soc_dts_thres_irq = acpi_register_gsi(NULL, soc_dts_thres_gsi,
+ ACPI_LEVEL_SENSITIVE,
+ ACPI_ACTIVE_LOW);
+ if (soc_dts_thres_irq < 0) {
+ pr_warn("intel_soc_dts: Could not get IRQ for GSI %d, err %d\n",
+ soc_dts_thres_gsi, soc_dts_thres_irq);
+ soc_dts_thres_irq = 0;
+ }
+ }
if (soc_dts_thres_irq) {
err = request_threaded_irq(soc_dts_thres_irq, NULL,
@@ -90,8 +106,10 @@ static int __init intel_soc_thermal_init(void)
return 0;
error_trips:
- if (soc_dts_thres_irq)
+ if (soc_dts_thres_irq) {
free_irq(soc_dts_thres_irq, soc_dts);
+ acpi_unregister_gsi(soc_dts_thres_gsi);
+ }
intel_soc_dts_iosf_exit(soc_dts);
return err;
@@ -99,8 +117,10 @@ error_trips:
static void __exit intel_soc_thermal_exit(void)
{
- if (soc_dts_thres_irq)
+ if (soc_dts_thres_irq) {
free_irq(soc_dts_thres_irq, soc_dts);
+ acpi_unregister_gsi(soc_dts_thres_gsi);
+ }
intel_soc_dts_iosf_exit(soc_dts);
}
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 2cc2193637e7..a821929ede0b 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o
-qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o
+qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-v2.o
diff --git a/drivers/thermal/qcom/tsens-8996.c b/drivers/thermal/qcom/tsens-8996.c
deleted file mode 100644
index e1f77818d8fa..000000000000
--- a/drivers/thermal/qcom/tsens-8996.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include "tsens.h"
-
-#define STATUS_OFFSET 0x10a0
-#define LAST_TEMP_MASK 0xfff
-#define STATUS_VALID_BIT BIT(21)
-#define CODE_SIGN_BIT BIT(11)
-
-static int get_temp_8996(struct tsens_device *tmdev, int id, int *temp)
-{
- struct tsens_sensor *s = &tmdev->sensor[id];
- u32 code;
- unsigned int sensor_addr;
- int last_temp = 0, last_temp2 = 0, last_temp3 = 0, ret;
-
- sensor_addr = STATUS_OFFSET + s->hw_id * 4;
- ret = regmap_read(tmdev->map, sensor_addr, &code);
- if (ret)
- return ret;
- last_temp = code & LAST_TEMP_MASK;
- if (code & STATUS_VALID_BIT)
- goto done;
-
- /* Try a second time */
- ret = regmap_read(tmdev->map, sensor_addr, &code);
- if (ret)
- return ret;
- if (code & STATUS_VALID_BIT) {
- last_temp = code & LAST_TEMP_MASK;
- goto done;
- } else {
- last_temp2 = code & LAST_TEMP_MASK;
- }
-
- /* Try a third/last time */
- ret = regmap_read(tmdev->map, sensor_addr, &code);
- if (ret)
- return ret;
- if (code & STATUS_VALID_BIT) {
- last_temp = code & LAST_TEMP_MASK;
- goto done;
- } else {
- last_temp3 = code & LAST_TEMP_MASK;
- }
-
- if (last_temp == last_temp2)
- last_temp = last_temp2;
- else if (last_temp2 == last_temp3)
- last_temp = last_temp3;
-done:
- /* Code sign bit is the sign extension for a negative value */
- if (last_temp & CODE_SIGN_BIT)
- last_temp |= ~CODE_SIGN_BIT;
-
- /* Temperatures are in deciCelicius */
- *temp = last_temp * 100;
-
- return 0;
-}
-
-static const struct tsens_ops ops_8996 = {
- .init = init_common,
- .get_temp = get_temp_8996,
-};
-
-const struct tsens_data data_8996 = {
- .num_sensors = 13,
- .ops = &ops_8996,
-};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index b1449ad67fc0..6207d8d92351 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/nvmem-consumer.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "tsens.h"
@@ -103,11 +104,11 @@ int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
{
struct tsens_sensor *s = &tmdev->sensor[id];
u32 code;
- unsigned int sensor_addr;
+ unsigned int status_reg;
int last_temp = 0, ret;
- sensor_addr = S0_ST_ADDR + s->hw_id * SN_ADDR_OFFSET;
- ret = regmap_read(tmdev->map, sensor_addr, &code);
+ status_reg = S0_ST_ADDR + s->hw_id * SN_ADDR_OFFSET;
+ ret = regmap_read(tmdev->map, status_reg, &code);
if (ret)
return ret;
last_temp = code & SN_ST_TEMP_MASK;
@@ -126,16 +127,28 @@ static const struct regmap_config tsens_config = {
int __init init_common(struct tsens_device *tmdev)
{
void __iomem *base;
+ struct resource *res;
+ struct platform_device *op = of_find_device_by_node(tmdev->dev->of_node);
- base = of_iomap(tmdev->dev->of_node, 0);
- if (!base)
+ if (!op)
return -EINVAL;
+ /* The driver only uses the TM register address space for now */
+ if (op->num_resources > 1) {
+ tmdev->tm_offset = 0;
+ } else {
+ /* old DTs where SROT and TM were in a contiguous 2K block */
+ tmdev->tm_offset = 0x1000;
+ }
+
+ res = platform_get_resource(op, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&op->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
tmdev->map = devm_regmap_init_mmio(tmdev->dev, base, &tsens_config);
- if (IS_ERR(tmdev->map)) {
- iounmap(base);
+ if (IS_ERR(tmdev->map))
return PTR_ERR(tmdev->map);
- }
return 0;
}
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
new file mode 100644
index 000000000000..44da02f594ac
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018, Linaro Limited
+ */
+
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include "tsens.h"
+
+#define STATUS_OFFSET 0xa0
+#define LAST_TEMP_MASK 0xfff
+#define STATUS_VALID_BIT BIT(21)
+
+static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
+{
+ struct tsens_sensor *s = &tmdev->sensor[id];
+ u32 code;
+ unsigned int status_reg;
+ u32 last_temp = 0, last_temp2 = 0, last_temp3 = 0;
+ int ret;
+
+ status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * 4;
+ ret = regmap_read(tmdev->map, status_reg, &code);
+ if (ret)
+ return ret;
+ last_temp = code & LAST_TEMP_MASK;
+ if (code & STATUS_VALID_BIT)
+ goto done;
+
+ /* Try a second time */
+ ret = regmap_read(tmdev->map, status_reg, &code);
+ if (ret)
+ return ret;
+ if (code & STATUS_VALID_BIT) {
+ last_temp = code & LAST_TEMP_MASK;
+ goto done;
+ } else {
+ last_temp2 = code & LAST_TEMP_MASK;
+ }
+
+ /* Try a third/last time */
+ ret = regmap_read(tmdev->map, status_reg, &code);
+ if (ret)
+ return ret;
+ if (code & STATUS_VALID_BIT) {
+ last_temp = code & LAST_TEMP_MASK;
+ goto done;
+ } else {
+ last_temp3 = code & LAST_TEMP_MASK;
+ }
+
+ if (last_temp == last_temp2)
+ last_temp = last_temp2;
+ else if (last_temp2 == last_temp3)
+ last_temp = last_temp3;
+done:
+ /* Convert temperature from deciCelsius to milliCelsius */
+ *temp = sign_extend32(last_temp, fls(LAST_TEMP_MASK) - 1) * 100;
+
+ return 0;
+}
+
+static const struct tsens_ops ops_generic_v2 = {
+ .init = init_common,
+ .get_temp = get_temp_tsens_v2,
+};
+
+const struct tsens_data data_tsens_v2 = {
+ .ops = &ops_generic_v2,
+};
+
+/* Kept around for backward compatibility with old msm8996.dtsi */
+const struct tsens_data data_8996 = {
+ .num_sensors = 13,
+ .ops = &ops_generic_v2,
+};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 3440166c2ae9..a2c9bfae3d86 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -72,6 +72,9 @@ static const struct of_device_id tsens_table[] = {
}, {
.compatible = "qcom,msm8996-tsens",
.data = &data_8996,
+ }, {
+ .compatible = "qcom,tsens-v2",
+ .data = &data_tsens_v2,
},
{}
};
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 911c1978892b..14331eb45a86 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -77,9 +77,8 @@ struct tsens_device {
struct device *dev;
u32 num_sensors;
struct regmap *map;
- struct regmap_field *status_field;
+ u32 tm_offset;
struct tsens_context ctx;
- bool trdy;
const struct tsens_ops *ops;
struct tsens_sensor sensor[0];
};
@@ -89,6 +88,9 @@ void compute_intercept_slope(struct tsens_device *, u32 *, u32 *, u32);
int init_common(struct tsens_device *);
int get_temp_common(struct tsens_device *, int, int *);
-extern const struct tsens_data data_8916, data_8974, data_8960, data_8996;
+/* TSENS v1 targets */
+extern const struct tsens_data data_8916, data_8974, data_8960;
+/* TSENS v2 targets */
+extern const struct tsens_data data_8996, data_tsens_v2;
#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 45fb284d4c11..e77e63070e99 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -598,7 +598,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
enr_bits |= 3 << (i * 8);
}
- if (enr_bits)
+ if (common->base && enr_bits)
rcar_thermal_common_write(common, ENR, enr_bits);
dev_info(dev, "%d sensor probed\n", i);
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index a992e51ef065..48eef552cba4 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -789,11 +789,6 @@ static void exynos_tmu_work(struct work_struct *work)
struct exynos_tmu_data *data = container_of(work,
struct exynos_tmu_data, irq_work);
- if (!IS_ERR(data->clk_sec))
- clk_enable(data->clk_sec);
- if (!IS_ERR(data->clk_sec))
- clk_disable(data->clk_sec);
-
thermal_zone_device_update(data->tzd, THERMAL_EVENT_UNSPECIFIED);
mutex_lock(&data->lock);
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 11278836ed12..40c69a533b24 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -142,7 +142,8 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
INIT_LIST_HEAD(&hwmon->tz_list);
strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
- hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type,
+ strreplace(hwmon->type, '-', '_');
+ hwmon->device = hwmon_device_register_with_info(&tz->device, hwmon->type,
hwmon, NULL, NULL);
if (IS_ERR(hwmon->device)) {
result = PTR_ERR(hwmon->device);
diff --git a/drivers/thermal/ti-soc-thermal/dra752-bandgap.h b/drivers/thermal/ti-soc-thermal/dra752-bandgap.h
index a31e4b5e82cd..9490cd63fa6a 100644
--- a/drivers/thermal/ti-soc-thermal/dra752-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/dra752-bandgap.h
@@ -54,56 +54,36 @@
#define DRA752_STD_FUSE_OPP_BGAP_CORE_OFFSET 0x8
#define DRA752_TEMP_SENSOR_CORE_OFFSET 0x154
#define DRA752_BANDGAP_THRESHOLD_CORE_OFFSET 0x1ac
-#define DRA752_BANDGAP_CUMUL_DTEMP_CORE_OFFSET 0x1c4
-#define DRA752_DTEMP_CORE_0_OFFSET 0x208
#define DRA752_DTEMP_CORE_1_OFFSET 0x20c
#define DRA752_DTEMP_CORE_2_OFFSET 0x210
-#define DRA752_DTEMP_CORE_3_OFFSET 0x214
-#define DRA752_DTEMP_CORE_4_OFFSET 0x218
/* DRA752.iva register offsets */
#define DRA752_STD_FUSE_OPP_BGAP_IVA_OFFSET 0x388
#define DRA752_TEMP_SENSOR_IVA_OFFSET 0x398
#define DRA752_BANDGAP_THRESHOLD_IVA_OFFSET 0x3a4
-#define DRA752_BANDGAP_CUMUL_DTEMP_IVA_OFFSET 0x3b4
-#define DRA752_DTEMP_IVA_0_OFFSET 0x3d0
#define DRA752_DTEMP_IVA_1_OFFSET 0x3d4
#define DRA752_DTEMP_IVA_2_OFFSET 0x3d8
-#define DRA752_DTEMP_IVA_3_OFFSET 0x3dc
-#define DRA752_DTEMP_IVA_4_OFFSET 0x3e0
/* DRA752.mpu register offsets */
#define DRA752_STD_FUSE_OPP_BGAP_MPU_OFFSET 0x4
#define DRA752_TEMP_SENSOR_MPU_OFFSET 0x14c
#define DRA752_BANDGAP_THRESHOLD_MPU_OFFSET 0x1a4
-#define DRA752_BANDGAP_CUMUL_DTEMP_MPU_OFFSET 0x1bc
-#define DRA752_DTEMP_MPU_0_OFFSET 0x1e0
#define DRA752_DTEMP_MPU_1_OFFSET 0x1e4
#define DRA752_DTEMP_MPU_2_OFFSET 0x1e8
-#define DRA752_DTEMP_MPU_3_OFFSET 0x1ec
-#define DRA752_DTEMP_MPU_4_OFFSET 0x1f0
/* DRA752.dspeve register offsets */
#define DRA752_STD_FUSE_OPP_BGAP_DSPEVE_OFFSET 0x384
#define DRA752_TEMP_SENSOR_DSPEVE_OFFSET 0x394
#define DRA752_BANDGAP_THRESHOLD_DSPEVE_OFFSET 0x3a0
-#define DRA752_BANDGAP_CUMUL_DTEMP_DSPEVE_OFFSET 0x3b0
-#define DRA752_DTEMP_DSPEVE_0_OFFSET 0x3bc
#define DRA752_DTEMP_DSPEVE_1_OFFSET 0x3c0
#define DRA752_DTEMP_DSPEVE_2_OFFSET 0x3c4
-#define DRA752_DTEMP_DSPEVE_3_OFFSET 0x3c8
-#define DRA752_DTEMP_DSPEVE_4_OFFSET 0x3cc
/* DRA752.gpu register offsets */
#define DRA752_STD_FUSE_OPP_BGAP_GPU_OFFSET 0x0
#define DRA752_TEMP_SENSOR_GPU_OFFSET 0x150
#define DRA752_BANDGAP_THRESHOLD_GPU_OFFSET 0x1a8
-#define DRA752_BANDGAP_CUMUL_DTEMP_GPU_OFFSET 0x1c0
-#define DRA752_DTEMP_GPU_0_OFFSET 0x1f4
#define DRA752_DTEMP_GPU_1_OFFSET 0x1f8
#define DRA752_DTEMP_GPU_2_OFFSET 0x1fc
-#define DRA752_DTEMP_GPU_3_OFFSET 0x200
-#define DRA752_DTEMP_GPU_4_OFFSET 0x204
/**
* Register bitfields for DRA752
@@ -114,7 +94,6 @@
*/
/* DRA752.BANDGAP_STATUS_1 */
-#define DRA752_BANDGAP_STATUS_1_ALERT_MASK BIT(31)
#define DRA752_BANDGAP_STATUS_1_HOT_CORE_MASK BIT(5)
#define DRA752_BANDGAP_STATUS_1_COLD_CORE_MASK BIT(4)
#define DRA752_BANDGAP_STATUS_1_HOT_GPU_MASK BIT(3)
@@ -125,10 +104,6 @@
/* DRA752.BANDGAP_CTRL_2 */
#define DRA752_BANDGAP_CTRL_2_FREEZE_IVA_MASK BIT(22)
#define DRA752_BANDGAP_CTRL_2_FREEZE_DSPEVE_MASK BIT(21)
-#define DRA752_BANDGAP_CTRL_2_CLEAR_IVA_MASK BIT(19)
-#define DRA752_BANDGAP_CTRL_2_CLEAR_DSPEVE_MASK BIT(18)
-#define DRA752_BANDGAP_CTRL_2_CLEAR_ACCUM_IVA_MASK BIT(16)
-#define DRA752_BANDGAP_CTRL_2_CLEAR_ACCUM_DSPEVE_MASK BIT(15)
#define DRA752_BANDGAP_CTRL_2_MASK_HOT_IVA_MASK BIT(3)
#define DRA752_BANDGAP_CTRL_2_MASK_COLD_IVA_MASK BIT(2)
#define DRA752_BANDGAP_CTRL_2_MASK_HOT_DSPEVE_MASK BIT(1)
@@ -141,17 +116,10 @@
#define DRA752_BANDGAP_STATUS_2_COLD_DSPEVE_MASK BIT(0)
/* DRA752.BANDGAP_CTRL_1 */
-#define DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK (0x3 << 30)
#define DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK (0x7 << 27)
#define DRA752_BANDGAP_CTRL_1_FREEZE_CORE_MASK BIT(23)
#define DRA752_BANDGAP_CTRL_1_FREEZE_GPU_MASK BIT(22)
#define DRA752_BANDGAP_CTRL_1_FREEZE_MPU_MASK BIT(21)
-#define DRA752_BANDGAP_CTRL_1_CLEAR_CORE_MASK BIT(20)
-#define DRA752_BANDGAP_CTRL_1_CLEAR_GPU_MASK BIT(19)
-#define DRA752_BANDGAP_CTRL_1_CLEAR_MPU_MASK BIT(18)
-#define DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_CORE_MASK BIT(17)
-#define DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_GPU_MASK BIT(16)
-#define DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_MPU_MASK BIT(15)
#define DRA752_BANDGAP_CTRL_1_MASK_HOT_CORE_MASK BIT(5)
#define DRA752_BANDGAP_CTRL_1_MASK_COLD_CORE_MASK BIT(4)
#define DRA752_BANDGAP_CTRL_1_MASK_HOT_GPU_MASK BIT(3)
@@ -168,22 +136,6 @@
#define DRA752_BANDGAP_THRESHOLD_HOT_MASK (0x3ff << 16)
#define DRA752_BANDGAP_THRESHOLD_COLD_MASK (0x3ff << 0)
-
-/* DRA752.BANDGAP_CUMUL_DTEMP_CORE */
-#define DRA752_BANDGAP_CUMUL_DTEMP_CORE_MASK (0xffffffff << 0)
-
-/* DRA752.BANDGAP_CUMUL_DTEMP_IVA */
-#define DRA752_BANDGAP_CUMUL_DTEMP_IVA_MASK (0xffffffff << 0)
-
-/* DRA752.BANDGAP_CUMUL_DTEMP_MPU */
-#define DRA752_BANDGAP_CUMUL_DTEMP_MPU_MASK (0xffffffff << 0)
-
-/* DRA752.BANDGAP_CUMUL_DTEMP_DSPEVE */
-#define DRA752_BANDGAP_CUMUL_DTEMP_DSPEVE_MASK (0xffffffff << 0)
-
-/* DRA752.BANDGAP_CUMUL_DTEMP_GPU */
-#define DRA752_BANDGAP_CUMUL_DTEMP_GPU_MASK (0xffffffff << 0)
-
/**
* Temperature limits and thresholds for DRA752
*
@@ -202,10 +154,6 @@
/* bandgap clock limits */
#define DRA752_GPU_MAX_FREQ 1500000
#define DRA752_GPU_MIN_FREQ 1000000
-/* sensor limits */
-#define DRA752_GPU_MIN_TEMP -40000
-#define DRA752_GPU_MAX_TEMP 125000
-#define DRA752_GPU_HYST_VAL 5000
/* interrupts thresholds */
#define DRA752_GPU_T_HOT 800
#define DRA752_GPU_T_COLD 795
@@ -214,10 +162,6 @@
/* bandgap clock limits */
#define DRA752_MPU_MAX_FREQ 1500000
#define DRA752_MPU_MIN_FREQ 1000000
-/* sensor limits */
-#define DRA752_MPU_MIN_TEMP -40000
-#define DRA752_MPU_MAX_TEMP 125000
-#define DRA752_MPU_HYST_VAL 5000
/* interrupts thresholds */
#define DRA752_MPU_T_HOT 800
#define DRA752_MPU_T_COLD 795
@@ -226,10 +170,6 @@
/* bandgap clock limits */
#define DRA752_CORE_MAX_FREQ 1500000
#define DRA752_CORE_MIN_FREQ 1000000
-/* sensor limits */
-#define DRA752_CORE_MIN_TEMP -40000
-#define DRA752_CORE_MAX_TEMP 125000
-#define DRA752_CORE_HYST_VAL 5000
/* interrupts thresholds */
#define DRA752_CORE_T_HOT 800
#define DRA752_CORE_T_COLD 795
@@ -238,10 +178,6 @@
/* bandgap clock limits */
#define DRA752_DSPEVE_MAX_FREQ 1500000
#define DRA752_DSPEVE_MIN_FREQ 1000000
-/* sensor limits */
-#define DRA752_DSPEVE_MIN_TEMP -40000
-#define DRA752_DSPEVE_MAX_TEMP 125000
-#define DRA752_DSPEVE_HYST_VAL 5000
/* interrupts thresholds */
#define DRA752_DSPEVE_T_HOT 800
#define DRA752_DSPEVE_T_COLD 795
@@ -250,10 +186,6 @@
/* bandgap clock limits */
#define DRA752_IVA_MAX_FREQ 1500000
#define DRA752_IVA_MIN_FREQ 1000000
-/* sensor limits */
-#define DRA752_IVA_MIN_TEMP -40000
-#define DRA752_IVA_MAX_TEMP 125000
-#define DRA752_IVA_HYST_VAL 5000
/* interrupts thresholds */
#define DRA752_IVA_T_HOT 800
#define DRA752_IVA_T_COLD 795
diff --git a/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c b/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
index 4167373327d9..33a3030aa3c0 100644
--- a/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
@@ -41,24 +41,16 @@ dra752_core_temp_sensor_registers = {
.bgap_mask_ctrl = DRA752_BANDGAP_CTRL_1_OFFSET,
.mask_hot_mask = DRA752_BANDGAP_CTRL_1_MASK_HOT_CORE_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_1_MASK_COLD_CORE_MASK,
- .mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
.mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_1_FREEZE_CORE_MASK,
- .mask_clear_mask = DRA752_BANDGAP_CTRL_1_CLEAR_CORE_MASK,
- .mask_clear_accum_mask = DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_CORE_MASK,
.bgap_threshold = DRA752_BANDGAP_THRESHOLD_CORE_OFFSET,
.threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK,
.threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK,
.bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET,
- .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK,
.status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_CORE_MASK,
.status_cold_mask = DRA752_BANDGAP_STATUS_1_COLD_CORE_MASK,
- .bgap_cumul_dtemp = DRA752_BANDGAP_CUMUL_DTEMP_CORE_OFFSET,
- .ctrl_dtemp_0 = DRA752_DTEMP_CORE_0_OFFSET,
.ctrl_dtemp_1 = DRA752_DTEMP_CORE_1_OFFSET,
.ctrl_dtemp_2 = DRA752_DTEMP_CORE_2_OFFSET,
- .ctrl_dtemp_3 = DRA752_DTEMP_CORE_3_OFFSET,
- .ctrl_dtemp_4 = DRA752_DTEMP_CORE_4_OFFSET,
.bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_CORE_OFFSET,
};
@@ -74,24 +66,16 @@ dra752_iva_temp_sensor_registers = {
.bgap_mask_ctrl = DRA752_BANDGAP_CTRL_2_OFFSET,
.mask_hot_mask = DRA752_BANDGAP_CTRL_2_MASK_HOT_IVA_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_2_MASK_COLD_IVA_MASK,
- .mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
.mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_2_FREEZE_IVA_MASK,
- .mask_clear_mask = DRA752_BANDGAP_CTRL_2_CLEAR_IVA_MASK,
- .mask_clear_accum_mask = DRA752_BANDGAP_CTRL_2_CLEAR_ACCUM_IVA_MASK,
.bgap_threshold = DRA752_BANDGAP_THRESHOLD_IVA_OFFSET,
.threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK,
.threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK,
.bgap_status = DRA752_BANDGAP_STATUS_2_OFFSET,
- .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK,
.status_hot_mask = DRA752_BANDGAP_STATUS_2_HOT_IVA_MASK,
.status_cold_mask = DRA752_BANDGAP_STATUS_2_COLD_IVA_MASK,
- .bgap_cumul_dtemp = DRA752_BANDGAP_CUMUL_DTEMP_IVA_OFFSET,
- .ctrl_dtemp_0 = DRA752_DTEMP_IVA_0_OFFSET,
.ctrl_dtemp_1 = DRA752_DTEMP_IVA_1_OFFSET,
.ctrl_dtemp_2 = DRA752_DTEMP_IVA_2_OFFSET,
- .ctrl_dtemp_3 = DRA752_DTEMP_IVA_3_OFFSET,
- .ctrl_dtemp_4 = DRA752_DTEMP_IVA_4_OFFSET,
.bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_IVA_OFFSET,
};
@@ -107,24 +91,16 @@ dra752_mpu_temp_sensor_registers = {
.bgap_mask_ctrl = DRA752_BANDGAP_CTRL_1_OFFSET,
.mask_hot_mask = DRA752_BANDGAP_CTRL_1_MASK_HOT_MPU_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_1_MASK_COLD_MPU_MASK,
- .mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
.mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_1_FREEZE_MPU_MASK,
- .mask_clear_mask = DRA752_BANDGAP_CTRL_1_CLEAR_MPU_MASK,
- .mask_clear_accum_mask = DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_MPU_MASK,
.bgap_threshold = DRA752_BANDGAP_THRESHOLD_MPU_OFFSET,
.threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK,
.threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK,
.bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET,
- .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK,
.status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_MPU_MASK,
.status_cold_mask = DRA752_BANDGAP_STATUS_1_COLD_MPU_MASK,
- .bgap_cumul_dtemp = DRA752_BANDGAP_CUMUL_DTEMP_MPU_OFFSET,
- .ctrl_dtemp_0 = DRA752_DTEMP_MPU_0_OFFSET,
.ctrl_dtemp_1 = DRA752_DTEMP_MPU_1_OFFSET,
.ctrl_dtemp_2 = DRA752_DTEMP_MPU_2_OFFSET,
- .ctrl_dtemp_3 = DRA752_DTEMP_MPU_3_OFFSET,
- .ctrl_dtemp_4 = DRA752_DTEMP_MPU_4_OFFSET,
.bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_MPU_OFFSET,
};
@@ -140,24 +116,16 @@ dra752_dspeve_temp_sensor_registers = {
.bgap_mask_ctrl = DRA752_BANDGAP_CTRL_2_OFFSET,
.mask_hot_mask = DRA752_BANDGAP_CTRL_2_MASK_HOT_DSPEVE_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_2_MASK_COLD_DSPEVE_MASK,
- .mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
.mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_2_FREEZE_DSPEVE_MASK,
- .mask_clear_mask = DRA752_BANDGAP_CTRL_2_CLEAR_DSPEVE_MASK,
- .mask_clear_accum_mask = DRA752_BANDGAP_CTRL_2_CLEAR_ACCUM_DSPEVE_MASK,
.bgap_threshold = DRA752_BANDGAP_THRESHOLD_DSPEVE_OFFSET,
.threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK,
.threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK,
.bgap_status = DRA752_BANDGAP_STATUS_2_OFFSET,
- .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK,
.status_hot_mask = DRA752_BANDGAP_STATUS_2_HOT_DSPEVE_MASK,
.status_cold_mask = DRA752_BANDGAP_STATUS_2_COLD_DSPEVE_MASK,
- .bgap_cumul_dtemp = DRA752_BANDGAP_CUMUL_DTEMP_DSPEVE_OFFSET,
- .ctrl_dtemp_0 = DRA752_DTEMP_DSPEVE_0_OFFSET,
.ctrl_dtemp_1 = DRA752_DTEMP_DSPEVE_1_OFFSET,
.ctrl_dtemp_2 = DRA752_DTEMP_DSPEVE_2_OFFSET,
- .ctrl_dtemp_3 = DRA752_DTEMP_DSPEVE_3_OFFSET,
- .ctrl_dtemp_4 = DRA752_DTEMP_DSPEVE_4_OFFSET,
.bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_DSPEVE_OFFSET,
};
@@ -173,24 +141,16 @@ dra752_gpu_temp_sensor_registers = {
.bgap_mask_ctrl = DRA752_BANDGAP_CTRL_1_OFFSET,
.mask_hot_mask = DRA752_BANDGAP_CTRL_1_MASK_HOT_GPU_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_1_MASK_COLD_GPU_MASK,
- .mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
.mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_1_FREEZE_GPU_MASK,
- .mask_clear_mask = DRA752_BANDGAP_CTRL_1_CLEAR_GPU_MASK,
- .mask_clear_accum_mask = DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_GPU_MASK,
.bgap_threshold = DRA752_BANDGAP_THRESHOLD_GPU_OFFSET,
.threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK,
.threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK,
.bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET,
- .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK,
.status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_GPU_MASK,
.status_cold_mask = DRA752_BANDGAP_STATUS_1_COLD_GPU_MASK,
- .bgap_cumul_dtemp = DRA752_BANDGAP_CUMUL_DTEMP_GPU_OFFSET,
- .ctrl_dtemp_0 = DRA752_DTEMP_GPU_0_OFFSET,
.ctrl_dtemp_1 = DRA752_DTEMP_GPU_1_OFFSET,
.ctrl_dtemp_2 = DRA752_DTEMP_GPU_2_OFFSET,
- .ctrl_dtemp_3 = DRA752_DTEMP_GPU_3_OFFSET,
- .ctrl_dtemp_4 = DRA752_DTEMP_GPU_4_OFFSET,
.bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_GPU_OFFSET,
};
@@ -200,11 +160,6 @@ static struct temp_sensor_data dra752_mpu_temp_sensor_data = {
.t_cold = DRA752_MPU_T_COLD,
.min_freq = DRA752_MPU_MIN_FREQ,
.max_freq = DRA752_MPU_MAX_FREQ,
- .max_temp = DRA752_MPU_MAX_TEMP,
- .min_temp = DRA752_MPU_MIN_TEMP,
- .hyst_val = DRA752_MPU_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/* Thresholds and limits for DRA752 GPU temperature sensor */
@@ -213,11 +168,6 @@ static struct temp_sensor_data dra752_gpu_temp_sensor_data = {
.t_cold = DRA752_GPU_T_COLD,
.min_freq = DRA752_GPU_MIN_FREQ,
.max_freq = DRA752_GPU_MAX_FREQ,
- .max_temp = DRA752_GPU_MAX_TEMP,
- .min_temp = DRA752_GPU_MIN_TEMP,
- .hyst_val = DRA752_GPU_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/* Thresholds and limits for DRA752 CORE temperature sensor */
@@ -226,11 +176,6 @@ static struct temp_sensor_data dra752_core_temp_sensor_data = {
.t_cold = DRA752_CORE_T_COLD,
.min_freq = DRA752_CORE_MIN_FREQ,
.max_freq = DRA752_CORE_MAX_FREQ,
- .max_temp = DRA752_CORE_MAX_TEMP,
- .min_temp = DRA752_CORE_MIN_TEMP,
- .hyst_val = DRA752_CORE_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/* Thresholds and limits for DRA752 DSPEVE temperature sensor */
@@ -239,11 +184,6 @@ static struct temp_sensor_data dra752_dspeve_temp_sensor_data = {
.t_cold = DRA752_DSPEVE_T_COLD,
.min_freq = DRA752_DSPEVE_MIN_FREQ,
.max_freq = DRA752_DSPEVE_MAX_FREQ,
- .max_temp = DRA752_DSPEVE_MAX_TEMP,
- .min_temp = DRA752_DSPEVE_MIN_TEMP,
- .hyst_val = DRA752_DSPEVE_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/* Thresholds and limits for DRA752 IVA temperature sensor */
@@ -252,11 +192,6 @@ static struct temp_sensor_data dra752_iva_temp_sensor_data = {
.t_cold = DRA752_IVA_T_COLD,
.min_freq = DRA752_IVA_MIN_FREQ,
.max_freq = DRA752_IVA_MAX_FREQ,
- .max_temp = DRA752_IVA_MAX_TEMP,
- .min_temp = DRA752_IVA_MIN_TEMP,
- .hyst_val = DRA752_IVA_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/*
diff --git a/drivers/thermal/ti-soc-thermal/omap3-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap3-thermal-data.c
index c6d217913dd1..f5366807daf0 100644
--- a/drivers/thermal/ti-soc-thermal/omap3-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/omap3-thermal-data.c
@@ -48,9 +48,6 @@ omap34xx_mpu_temp_sensor_registers = {
static struct temp_sensor_data omap34xx_mpu_temp_sensor_data = {
.min_freq = 32768,
.max_freq = 32768,
- .max_temp = 125000,
- .min_temp = -40000,
- .hyst_val = 5000,
};
/*
@@ -119,9 +116,6 @@ omap36xx_mpu_temp_sensor_registers = {
static struct temp_sensor_data omap36xx_mpu_temp_sensor_data = {
.min_freq = 32768,
.max_freq = 32768,
- .max_temp = 125000,
- .min_temp = -40000,
- .hyst_val = 5000,
};
/*
diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
index fd1113360603..c12211eaaac4 100644
--- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
@@ -42,9 +42,6 @@ omap4430_mpu_temp_sensor_registers = {
static struct temp_sensor_data omap4430_mpu_temp_sensor_data = {
.min_freq = OMAP4430_MIN_FREQ,
.max_freq = OMAP4430_MAX_FREQ,
- .max_temp = OMAP4430_MAX_TEMP,
- .min_temp = OMAP4430_MIN_TEMP,
- .hyst_val = OMAP4430_HYST_VAL,
};
/*
@@ -121,8 +118,6 @@ omap4460_mpu_temp_sensor_registers = {
.tshut_cold_mask = OMAP4460_TSHUT_COLD_MASK,
.bgap_status = OMAP4460_BGAP_STATUS_OFFSET,
- .status_clean_stop_mask = OMAP4460_CLEAN_STOP_MASK,
- .status_bgap_alert_mask = OMAP4460_BGAP_ALERT_MASK,
.status_hot_mask = OMAP4460_HOT_FLAG_MASK,
.status_cold_mask = OMAP4460_COLD_FLAG_MASK,
@@ -137,11 +132,6 @@ static struct temp_sensor_data omap4460_mpu_temp_sensor_data = {
.t_cold = OMAP4460_T_COLD,
.min_freq = OMAP4460_MIN_FREQ,
.max_freq = OMAP4460_MAX_FREQ,
- .max_temp = OMAP4460_MAX_TEMP,
- .min_temp = OMAP4460_MIN_TEMP,
- .hyst_val = OMAP4460_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/*
diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
index 6f2de3a3356d..b87c8659ec60 100644
--- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
@@ -73,10 +73,6 @@
/* bandgap clock limits (no control on 4430) */
#define OMAP4430_MAX_FREQ 32768
#define OMAP4430_MIN_FREQ 32768
-/* sensor limits */
-#define OMAP4430_MIN_TEMP -40000
-#define OMAP4430_MAX_TEMP 125000
-#define OMAP4430_HYST_VAL 5000
/**
* *** OMAP4460 *** Applicable for OMAP4470
@@ -143,8 +139,6 @@
#define OMAP4460_TSHUT_COLD_MASK (0x3ff << 0)
/* OMAP4460.BANDGAP_STATUS bits */
-#define OMAP4460_CLEAN_STOP_MASK BIT(3)
-#define OMAP4460_BGAP_ALERT_MASK BIT(2)
#define OMAP4460_HOT_FLAG_MASK BIT(1)
#define OMAP4460_COLD_FLAG_MASK BIT(0)
@@ -162,10 +156,6 @@
/* bandgap clock limits */
#define OMAP4460_MAX_FREQ 1500000
#define OMAP4460_MIN_FREQ 1000000
-/* sensor limits */
-#define OMAP4460_MIN_TEMP -40000
-#define OMAP4460_MAX_TEMP 123000
-#define OMAP4460_HYST_VAL 5000
/* interrupts thresholds */
#define OMAP4460_TSHUT_HOT 900 /* 122 deg C */
#define OMAP4460_TSHUT_COLD 895 /* 100 deg C */
diff --git a/drivers/thermal/ti-soc-thermal/omap5-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
index 6ac037098b52..8191bae834de 100644
--- a/drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
@@ -38,12 +38,8 @@ omap5430_mpu_temp_sensor_registers = {
.bgap_mask_ctrl = OMAP5430_BGAP_CTRL_OFFSET,
.mask_hot_mask = OMAP5430_MASK_HOT_MPU_MASK,
.mask_cold_mask = OMAP5430_MASK_COLD_MPU_MASK,
- .mask_sidlemode_mask = OMAP5430_MASK_SIDLEMODE_MASK,
.mask_counter_delay_mask = OMAP5430_MASK_COUNTER_DELAY_MASK,
.mask_freeze_mask = OMAP5430_MASK_FREEZE_MPU_MASK,
- .mask_clear_mask = OMAP5430_MASK_CLEAR_MPU_MASK,
- .mask_clear_accum_mask = OMAP5430_MASK_CLEAR_ACCUM_MPU_MASK,
-
.bgap_counter = OMAP5430_BGAP_CTRL_OFFSET,
.counter_mask = OMAP5430_COUNTER_MASK,
@@ -57,17 +53,11 @@ omap5430_mpu_temp_sensor_registers = {
.tshut_cold_mask = OMAP5430_TSHUT_COLD_MASK,
.bgap_status = OMAP5430_BGAP_STATUS_OFFSET,
- .status_clean_stop_mask = 0x0,
- .status_bgap_alert_mask = OMAP5430_BGAP_ALERT_MASK,
.status_hot_mask = OMAP5430_HOT_MPU_FLAG_MASK,
.status_cold_mask = OMAP5430_COLD_MPU_FLAG_MASK,
- .bgap_cumul_dtemp = OMAP5430_BGAP_CUMUL_DTEMP_MPU_OFFSET,
- .ctrl_dtemp_0 = OMAP5430_BGAP_DTEMP_MPU_0_OFFSET,
.ctrl_dtemp_1 = OMAP5430_BGAP_DTEMP_MPU_1_OFFSET,
.ctrl_dtemp_2 = OMAP5430_BGAP_DTEMP_MPU_2_OFFSET,
- .ctrl_dtemp_3 = OMAP5430_BGAP_DTEMP_MPU_3_OFFSET,
- .ctrl_dtemp_4 = OMAP5430_BGAP_DTEMP_MPU_4_OFFSET,
.bgap_efuse = OMAP5430_FUSE_OPP_BGAP_MPU,
};
@@ -84,11 +74,8 @@ omap5430_gpu_temp_sensor_registers = {
.bgap_mask_ctrl = OMAP5430_BGAP_CTRL_OFFSET,
.mask_hot_mask = OMAP5430_MASK_HOT_GPU_MASK,
.mask_cold_mask = OMAP5430_MASK_COLD_GPU_MASK,
- .mask_sidlemode_mask = OMAP5430_MASK_SIDLEMODE_MASK,
.mask_counter_delay_mask = OMAP5430_MASK_COUNTER_DELAY_MASK,
.mask_freeze_mask = OMAP5430_MASK_FREEZE_GPU_MASK,
- .mask_clear_mask = OMAP5430_MASK_CLEAR_GPU_MASK,
- .mask_clear_accum_mask = OMAP5430_MASK_CLEAR_ACCUM_GPU_MASK,
.bgap_counter = OMAP5430_BGAP_CTRL_OFFSET,
.counter_mask = OMAP5430_COUNTER_MASK,
@@ -102,17 +89,11 @@ omap5430_gpu_temp_sensor_registers = {
.tshut_cold_mask = OMAP5430_TSHUT_COLD_MASK,
.bgap_status = OMAP5430_BGAP_STATUS_OFFSET,
- .status_clean_stop_mask = 0x0,
- .status_bgap_alert_mask = OMAP5430_BGAP_ALERT_MASK,
.status_hot_mask = OMAP5430_HOT_GPU_FLAG_MASK,
.status_cold_mask = OMAP5430_COLD_GPU_FLAG_MASK,
- .bgap_cumul_dtemp = OMAP5430_BGAP_CUMUL_DTEMP_GPU_OFFSET,
- .ctrl_dtemp_0 = OMAP5430_BGAP_DTEMP_GPU_0_OFFSET,
.ctrl_dtemp_1 = OMAP5430_BGAP_DTEMP_GPU_1_OFFSET,
.ctrl_dtemp_2 = OMAP5430_BGAP_DTEMP_GPU_2_OFFSET,
- .ctrl_dtemp_3 = OMAP5430_BGAP_DTEMP_GPU_3_OFFSET,
- .ctrl_dtemp_4 = OMAP5430_BGAP_DTEMP_GPU_4_OFFSET,
.bgap_efuse = OMAP5430_FUSE_OPP_BGAP_GPU,
};
@@ -130,11 +111,8 @@ omap5430_core_temp_sensor_registers = {
.bgap_mask_ctrl = OMAP5430_BGAP_CTRL_OFFSET,
.mask_hot_mask = OMAP5430_MASK_HOT_CORE_MASK,
.mask_cold_mask = OMAP5430_MASK_COLD_CORE_MASK,
- .mask_sidlemode_mask = OMAP5430_MASK_SIDLEMODE_MASK,
.mask_counter_delay_mask = OMAP5430_MASK_COUNTER_DELAY_MASK,
.mask_freeze_mask = OMAP5430_MASK_FREEZE_CORE_MASK,
- .mask_clear_mask = OMAP5430_MASK_CLEAR_CORE_MASK,
- .mask_clear_accum_mask = OMAP5430_MASK_CLEAR_ACCUM_CORE_MASK,
.bgap_counter = OMAP5430_BGAP_CTRL_OFFSET,
.counter_mask = OMAP5430_COUNTER_MASK,
@@ -148,17 +126,11 @@ omap5430_core_temp_sensor_registers = {
.tshut_cold_mask = OMAP5430_TSHUT_COLD_MASK,
.bgap_status = OMAP5430_BGAP_STATUS_OFFSET,
- .status_clean_stop_mask = 0x0,
- .status_bgap_alert_mask = OMAP5430_BGAP_ALERT_MASK,
.status_hot_mask = OMAP5430_HOT_CORE_FLAG_MASK,
.status_cold_mask = OMAP5430_COLD_CORE_FLAG_MASK,
- .bgap_cumul_dtemp = OMAP5430_BGAP_CUMUL_DTEMP_CORE_OFFSET,
- .ctrl_dtemp_0 = OMAP5430_BGAP_DTEMP_CORE_0_OFFSET,
.ctrl_dtemp_1 = OMAP5430_BGAP_DTEMP_CORE_1_OFFSET,
.ctrl_dtemp_2 = OMAP5430_BGAP_DTEMP_CORE_2_OFFSET,
- .ctrl_dtemp_3 = OMAP5430_BGAP_DTEMP_CORE_3_OFFSET,
- .ctrl_dtemp_4 = OMAP5430_BGAP_DTEMP_CORE_4_OFFSET,
.bgap_efuse = OMAP5430_FUSE_OPP_BGAP_CORE,
};
@@ -171,11 +143,6 @@ static struct temp_sensor_data omap5430_mpu_temp_sensor_data = {
.t_cold = OMAP5430_MPU_T_COLD,
.min_freq = OMAP5430_MPU_MIN_FREQ,
.max_freq = OMAP5430_MPU_MAX_FREQ,
- .max_temp = OMAP5430_MPU_MAX_TEMP,
- .min_temp = OMAP5430_MPU_MIN_TEMP,
- .hyst_val = OMAP5430_MPU_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/* Thresholds and limits for OMAP5430 GPU temperature sensor */
@@ -186,11 +153,6 @@ static struct temp_sensor_data omap5430_gpu_temp_sensor_data = {
.t_cold = OMAP5430_GPU_T_COLD,
.min_freq = OMAP5430_GPU_MIN_FREQ,
.max_freq = OMAP5430_GPU_MAX_FREQ,
- .max_temp = OMAP5430_GPU_MAX_TEMP,
- .min_temp = OMAP5430_GPU_MIN_TEMP,
- .hyst_val = OMAP5430_GPU_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/* Thresholds and limits for OMAP5430 CORE temperature sensor */
@@ -201,11 +163,6 @@ static struct temp_sensor_data omap5430_core_temp_sensor_data = {
.t_cold = OMAP5430_CORE_T_COLD,
.min_freq = OMAP5430_CORE_MIN_FREQ,
.max_freq = OMAP5430_CORE_MAX_FREQ,
- .max_temp = OMAP5430_CORE_MAX_TEMP,
- .min_temp = OMAP5430_CORE_MIN_TEMP,
- .hyst_val = OMAP5430_CORE_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/*
@@ -319,8 +276,7 @@ const struct ti_bandgap_data omap5430_data = {
TI_BANDGAP_FEATURE_FREEZE_BIT |
TI_BANDGAP_FEATURE_TALERT |
TI_BANDGAP_FEATURE_COUNTER_DELAY |
- TI_BANDGAP_FEATURE_HISTORY_BUFFER |
- TI_BANDGAP_FEATURE_ERRATA_813,
+ TI_BANDGAP_FEATURE_HISTORY_BUFFER,
.fclock_name = "l3instr_ts_gclk_div",
.div_ck_name = "l3instr_ts_gclk_div",
.conv_table = omap5430_adc_to_temp,
diff --git a/drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h
index 400b55dffadd..9096403b74b0 100644
--- a/drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h
@@ -44,36 +44,24 @@
#define OMAP5430_TEMP_SENSOR_GPU_OFFSET 0x150
#define OMAP5430_BGAP_THRESHOLD_GPU_OFFSET 0x1A8
#define OMAP5430_BGAP_TSHUT_GPU_OFFSET 0x1B4
-#define OMAP5430_BGAP_CUMUL_DTEMP_GPU_OFFSET 0x1C0
-#define OMAP5430_BGAP_DTEMP_GPU_0_OFFSET 0x1F4
#define OMAP5430_BGAP_DTEMP_GPU_1_OFFSET 0x1F8
#define OMAP5430_BGAP_DTEMP_GPU_2_OFFSET 0x1FC
-#define OMAP5430_BGAP_DTEMP_GPU_3_OFFSET 0x200
-#define OMAP5430_BGAP_DTEMP_GPU_4_OFFSET 0x204
/* OMAP5430.MPU register offsets */
#define OMAP5430_FUSE_OPP_BGAP_MPU 0x4
#define OMAP5430_TEMP_SENSOR_MPU_OFFSET 0x14C
#define OMAP5430_BGAP_THRESHOLD_MPU_OFFSET 0x1A4
#define OMAP5430_BGAP_TSHUT_MPU_OFFSET 0x1B0
-#define OMAP5430_BGAP_CUMUL_DTEMP_MPU_OFFSET 0x1BC
-#define OMAP5430_BGAP_DTEMP_MPU_0_OFFSET 0x1E0
#define OMAP5430_BGAP_DTEMP_MPU_1_OFFSET 0x1E4
#define OMAP5430_BGAP_DTEMP_MPU_2_OFFSET 0x1E8
-#define OMAP5430_BGAP_DTEMP_MPU_3_OFFSET 0x1EC
-#define OMAP5430_BGAP_DTEMP_MPU_4_OFFSET 0x1F0
/* OMAP5430.MPU register offsets */
#define OMAP5430_FUSE_OPP_BGAP_CORE 0x8
#define OMAP5430_TEMP_SENSOR_CORE_OFFSET 0x154
#define OMAP5430_BGAP_THRESHOLD_CORE_OFFSET 0x1AC
#define OMAP5430_BGAP_TSHUT_CORE_OFFSET 0x1B8
-#define OMAP5430_BGAP_CUMUL_DTEMP_CORE_OFFSET 0x1C4
-#define OMAP5430_BGAP_DTEMP_CORE_0_OFFSET 0x208
#define OMAP5430_BGAP_DTEMP_CORE_1_OFFSET 0x20C
#define OMAP5430_BGAP_DTEMP_CORE_2_OFFSET 0x210
-#define OMAP5430_BGAP_DTEMP_CORE_3_OFFSET 0x214
-#define OMAP5430_BGAP_DTEMP_CORE_4_OFFSET 0x218
/* OMAP5430.common register offsets */
#define OMAP5430_BGAP_CTRL_OFFSET 0x1A0
@@ -94,17 +82,10 @@
#define OMAP5430_BGAP_TEMP_SENSOR_DTEMP_MASK (0x3ff << 0)
/* OMAP5430.BANDGAP_CTRL */
-#define OMAP5430_MASK_SIDLEMODE_MASK (0x3 << 30)
#define OMAP5430_MASK_COUNTER_DELAY_MASK (0x7 << 27)
#define OMAP5430_MASK_FREEZE_CORE_MASK BIT(23)
#define OMAP5430_MASK_FREEZE_GPU_MASK BIT(22)
#define OMAP5430_MASK_FREEZE_MPU_MASK BIT(21)
-#define OMAP5430_MASK_CLEAR_CORE_MASK BIT(20)
-#define OMAP5430_MASK_CLEAR_GPU_MASK BIT(19)
-#define OMAP5430_MASK_CLEAR_MPU_MASK BIT(18)
-#define OMAP5430_MASK_CLEAR_ACCUM_CORE_MASK BIT(17)
-#define OMAP5430_MASK_CLEAR_ACCUM_GPU_MASK BIT(16)
-#define OMAP5430_MASK_CLEAR_ACCUM_MPU_MASK BIT(15)
#define OMAP5430_MASK_HOT_CORE_MASK BIT(5)
#define OMAP5430_MASK_COLD_CORE_MASK BIT(4)
#define OMAP5430_MASK_HOT_GPU_MASK BIT(3)
@@ -123,17 +104,7 @@
#define OMAP5430_TSHUT_HOT_MASK (0x3ff << 16)
#define OMAP5430_TSHUT_COLD_MASK (0x3ff << 0)
-/* OMAP5430.BANDGAP_CUMUL_DTEMP_MPU */
-#define OMAP5430_CUMUL_DTEMP_MPU_MASK (0xffffffff << 0)
-
-/* OMAP5430.BANDGAP_CUMUL_DTEMP_GPU */
-#define OMAP5430_CUMUL_DTEMP_GPU_MASK (0xffffffff << 0)
-
-/* OMAP5430.BANDGAP_CUMUL_DTEMP_CORE */
-#define OMAP5430_CUMUL_DTEMP_CORE_MASK (0xffffffff << 0)
-
/* OMAP5430.BANDGAP_STATUS */
-#define OMAP5430_BGAP_ALERT_MASK BIT(31)
#define OMAP5430_HOT_CORE_FLAG_MASK BIT(5)
#define OMAP5430_COLD_CORE_FLAG_MASK BIT(4)
#define OMAP5430_HOT_GPU_FLAG_MASK BIT(3)
@@ -159,10 +130,6 @@
/* bandgap clock limits */
#define OMAP5430_GPU_MAX_FREQ 1500000
#define OMAP5430_GPU_MIN_FREQ 1000000
-/* sensor limits */
-#define OMAP5430_GPU_MIN_TEMP -40000
-#define OMAP5430_GPU_MAX_TEMP 125000
-#define OMAP5430_GPU_HYST_VAL 5000
/* interrupts thresholds */
#define OMAP5430_GPU_TSHUT_HOT 915
#define OMAP5430_GPU_TSHUT_COLD 900
@@ -173,10 +140,6 @@
/* bandgap clock limits */
#define OMAP5430_MPU_MAX_FREQ 1500000
#define OMAP5430_MPU_MIN_FREQ 1000000
-/* sensor limits */
-#define OMAP5430_MPU_MIN_TEMP -40000
-#define OMAP5430_MPU_MAX_TEMP 125000
-#define OMAP5430_MPU_HYST_VAL 5000
/* interrupts thresholds */
#define OMAP5430_MPU_TSHUT_HOT 915
#define OMAP5430_MPU_TSHUT_COLD 900
@@ -187,10 +150,6 @@
/* bandgap clock limits */
#define OMAP5430_CORE_MAX_FREQ 1500000
#define OMAP5430_CORE_MIN_FREQ 1000000
-/* sensor limits */
-#define OMAP5430_CORE_MIN_TEMP -40000
-#define OMAP5430_CORE_MAX_TEMP 125000
-#define OMAP5430_CORE_HYST_VAL 5000
/* interrupts thresholds */
#define OMAP5430_CORE_TSHUT_HOT 915
#define OMAP5430_CORE_TSHUT_COLD 900
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 696ab3046b87..097328d8e943 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -306,217 +306,6 @@ int ti_bandgap_adc_to_mcelsius(struct ti_bandgap *bgp, int adc_val, int *t)
}
/**
- * ti_bandgap_mcelsius_to_adc() - converts a mCelsius value to ADC scale
- * @bgp: struct ti_bandgap pointer
- * @temp: value in mCelsius
- * @adc: address where to write the resulting temperature in ADC representation
- *
- * Simple conversion from mCelsius to ADC values. In case the temp value
- * is out of the ADC conv table range, it returns -ERANGE, 0 on success.
- * The conversion table is indexed by the ADC values.
- *
- * Return: 0 if conversion was successful, else -ERANGE in case the @temp
- * argument is out of the ADC conv table range.
- */
-static
-int ti_bandgap_mcelsius_to_adc(struct ti_bandgap *bgp, long temp, int *adc)
-{
- const struct ti_bandgap_data *conf = bgp->conf;
- const int *conv_table = bgp->conf->conv_table;
- int high, low, mid;
-
- low = 0;
- high = conf->adc_end_val - conf->adc_start_val;
- mid = (high + low) / 2;
-
- if (temp < conv_table[low] || temp > conv_table[high])
- return -ERANGE;
-
- while (low < high) {
- if (temp < conv_table[mid])
- high = mid - 1;
- else
- low = mid + 1;
- mid = (low + high) / 2;
- }
-
- *adc = conf->adc_start_val + low;
- return 0;
-}
-
-/**
- * ti_bandgap_add_hyst() - add hysteresis (in mCelsius) to an ADC value
- * @bgp: struct ti_bandgap pointer
- * @adc_val: temperature value in ADC representation
- * @hyst_val: hysteresis value in mCelsius
- * @sum: address where to write the resulting temperature (in ADC scale)
- *
- * Adds an hysteresis value (in mCelsius) to a ADC temperature value.
- *
- * Return: 0 on success, -ERANGE otherwise.
- */
-static
-int ti_bandgap_add_hyst(struct ti_bandgap *bgp, int adc_val, int hyst_val,
- u32 *sum)
-{
- int temp, ret;
-
- /*
- * Need to add in the mcelsius domain, so we have a temperature
- * the conv_table range
- */
- ret = ti_bandgap_adc_to_mcelsius(bgp, adc_val, &temp);
- if (ret < 0)
- return ret;
-
- temp += hyst_val;
-
- ret = ti_bandgap_mcelsius_to_adc(bgp, temp, sum);
- return ret;
-}
-
-/*** Helper functions handling device Alert/Shutdown signals ***/
-
-/**
- * ti_bandgap_unmask_interrupts() - unmasks the events of thot & tcold
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- * @t_hot: hot temperature value to trigger alert signal
- * @t_cold: cold temperature value to trigger alert signal
- *
- * Checks the requested t_hot and t_cold values and configures the IRQ event
- * masks accordingly. Call this function only if bandgap features HAS(TALERT).
- */
-static void ti_bandgap_unmask_interrupts(struct ti_bandgap *bgp, int id,
- u32 t_hot, u32 t_cold)
-{
- struct temp_sensor_registers *tsr;
- u32 temp, reg_val;
-
- /* Read the current on die temperature */
- temp = ti_bandgap_read_temp(bgp, id);
-
- tsr = bgp->conf->sensors[id].registers;
- reg_val = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
-
- if (temp < t_hot)
- reg_val |= tsr->mask_hot_mask;
- else
- reg_val &= ~tsr->mask_hot_mask;
-
- if (t_cold < temp)
- reg_val |= tsr->mask_cold_mask;
- else
- reg_val &= ~tsr->mask_cold_mask;
- ti_bandgap_writel(bgp, reg_val, tsr->bgap_mask_ctrl);
-}
-
-/**
- * ti_bandgap_update_alert_threshold() - sequence to update thresholds
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- * @val: value (ADC) of a new threshold
- * @hot: desired threshold to be updated. true if threshold hot, false if
- * threshold cold
- *
- * It will program the required thresholds (hot and cold) for TALERT signal.
- * This function can be used to update t_hot or t_cold, depending on @hot value.
- * It checks the resulting t_hot and t_cold values, based on the new passed @val
- * and configures the thresholds so that t_hot is always greater than t_cold.
- * Call this function only if bandgap features HAS(TALERT).
- *
- * Return: 0 if no error, else corresponding error
- */
-static int ti_bandgap_update_alert_threshold(struct ti_bandgap *bgp, int id,
- int val, bool hot)
-{
- struct temp_sensor_data *ts_data = bgp->conf->sensors[id].ts_data;
- struct temp_sensor_registers *tsr;
- u32 thresh_val, reg_val, t_hot, t_cold, ctrl;
- int err = 0;
-
- tsr = bgp->conf->sensors[id].registers;
-
- /* obtain the current value */
- thresh_val = ti_bandgap_readl(bgp, tsr->bgap_threshold);
- t_cold = (thresh_val & tsr->threshold_tcold_mask) >>
- __ffs(tsr->threshold_tcold_mask);
- t_hot = (thresh_val & tsr->threshold_thot_mask) >>
- __ffs(tsr->threshold_thot_mask);
- if (hot)
- t_hot = val;
- else
- t_cold = val;
-
- if (t_cold > t_hot) {
- if (hot)
- err = ti_bandgap_add_hyst(bgp, t_hot,
- -ts_data->hyst_val,
- &t_cold);
- else
- err = ti_bandgap_add_hyst(bgp, t_cold,
- ts_data->hyst_val,
- &t_hot);
- }
-
- /* write the new threshold values */
- reg_val = thresh_val &
- ~(tsr->threshold_thot_mask | tsr->threshold_tcold_mask);
- reg_val |= (t_hot << __ffs(tsr->threshold_thot_mask)) |
- (t_cold << __ffs(tsr->threshold_tcold_mask));
-
- /**
- * Errata i813:
- * Spurious Thermal Alert: Talert can happen randomly while the device
- * remains under the temperature limit defined for this event to trig.
- * This spurious event is caused by a incorrect re-synchronization
- * between clock domains. The comparison between configured threshold
- * and current temperature value can happen while the value is
- * transitioning (metastable), thus causing inappropriate event
- * generation. No spurious event occurs as long as the threshold value
- * stays unchanged. Spurious event can be generated while a thermal
- * alert threshold is modified in
- * CONTROL_BANDGAP_THRESHOLD_MPU/GPU/CORE/DSPEVE/IVA_n.
- */
-
- if (TI_BANDGAP_HAS(bgp, ERRATA_813)) {
- /* Mask t_hot and t_cold events at the IP Level */
- ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
-
- if (hot)
- ctrl &= ~tsr->mask_hot_mask;
- else
- ctrl &= ~tsr->mask_cold_mask;
-
- ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl);
- }
-
- /* Write the threshold value */
- ti_bandgap_writel(bgp, reg_val, tsr->bgap_threshold);
-
- if (TI_BANDGAP_HAS(bgp, ERRATA_813)) {
- /* Unmask t_hot and t_cold events at the IP Level */
- ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
- if (hot)
- ctrl |= tsr->mask_hot_mask;
- else
- ctrl |= tsr->mask_cold_mask;
-
- ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl);
- }
-
- if (err) {
- dev_err(bgp->dev, "failed to reprogram thot threshold\n");
- err = -EIO;
- goto exit;
- }
-
- ti_bandgap_unmask_interrupts(bgp, id, t_hot, t_cold);
-exit:
- return err;
-}
-
-/**
* ti_bandgap_validate() - helper to check the sanity of a struct ti_bandgap
* @bgp: struct ti_bandgap pointer
* @id: bandgap sensor id
@@ -544,165 +333,6 @@ static inline int ti_bandgap_validate(struct ti_bandgap *bgp, int id)
}
/**
- * _ti_bandgap_write_threshold() - helper to update TALERT t_cold or t_hot
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- * @val: value (mCelsius) of a new threshold
- * @hot: desired threshold to be updated. true if threshold hot, false if
- * threshold cold
- *
- * It will update the required thresholds (hot and cold) for TALERT signal.
- * This function can be used to update t_hot or t_cold, depending on @hot value.
- * Validates the mCelsius range and update the requested threshold.
- * Call this function only if bandgap features HAS(TALERT).
- *
- * Return: 0 if no error, else corresponding error value.
- */
-static int _ti_bandgap_write_threshold(struct ti_bandgap *bgp, int id, int val,
- bool hot)
-{
- struct temp_sensor_data *ts_data;
- struct temp_sensor_registers *tsr;
- u32 adc_val;
- int ret;
-
- ret = ti_bandgap_validate(bgp, id);
- if (ret)
- return ret;
-
- if (!TI_BANDGAP_HAS(bgp, TALERT))
- return -ENOTSUPP;
-
- ts_data = bgp->conf->sensors[id].ts_data;
- tsr = bgp->conf->sensors[id].registers;
- if (hot) {
- if (val < ts_data->min_temp + ts_data->hyst_val)
- ret = -EINVAL;
- } else {
- if (val > ts_data->max_temp + ts_data->hyst_val)
- ret = -EINVAL;
- }
-
- if (ret)
- return ret;
-
- ret = ti_bandgap_mcelsius_to_adc(bgp, val, &adc_val);
- if (ret < 0)
- return ret;
-
- spin_lock(&bgp->lock);
- ret = ti_bandgap_update_alert_threshold(bgp, id, adc_val, hot);
- spin_unlock(&bgp->lock);
- return ret;
-}
-
-/**
- * _ti_bandgap_read_threshold() - helper to read TALERT t_cold or t_hot
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- * @val: value (mCelsius) of a threshold
- * @hot: desired threshold to be read. true if threshold hot, false if
- * threshold cold
- *
- * It will fetch the required thresholds (hot and cold) for TALERT signal.
- * This function can be used to read t_hot or t_cold, depending on @hot value.
- * Call this function only if bandgap features HAS(TALERT).
- *
- * Return: 0 if no error, -ENOTSUPP if it has no TALERT support, or the
- * corresponding error value if some operation fails.
- */
-static int _ti_bandgap_read_threshold(struct ti_bandgap *bgp, int id,
- int *val, bool hot)
-{
- struct temp_sensor_registers *tsr;
- u32 temp, mask;
- int ret = 0;
-
- ret = ti_bandgap_validate(bgp, id);
- if (ret)
- goto exit;
-
- if (!TI_BANDGAP_HAS(bgp, TALERT)) {
- ret = -ENOTSUPP;
- goto exit;
- }
-
- tsr = bgp->conf->sensors[id].registers;
- if (hot)
- mask = tsr->threshold_thot_mask;
- else
- mask = tsr->threshold_tcold_mask;
-
- temp = ti_bandgap_readl(bgp, tsr->bgap_threshold);
- temp = (temp & mask) >> __ffs(mask);
- ret = ti_bandgap_adc_to_mcelsius(bgp, temp, &temp);
- if (ret) {
- dev_err(bgp->dev, "failed to read thot\n");
- ret = -EIO;
- goto exit;
- }
-
- *val = temp;
-
-exit:
- return ret;
-}
-
-/*** Exposed APIs ***/
-
-/**
- * ti_bandgap_read_thot() - reads sensor current thot
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @thot: resulting current thot value
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_read_thot(struct ti_bandgap *bgp, int id, int *thot)
-{
- return _ti_bandgap_read_threshold(bgp, id, thot, true);
-}
-
-/**
- * ti_bandgap_write_thot() - sets sensor current thot
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @val: desired thot value
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_write_thot(struct ti_bandgap *bgp, int id, int val)
-{
- return _ti_bandgap_write_threshold(bgp, id, val, true);
-}
-
-/**
- * ti_bandgap_read_tcold() - reads sensor current tcold
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @tcold: resulting current tcold value
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_read_tcold(struct ti_bandgap *bgp, int id, int *tcold)
-{
- return _ti_bandgap_read_threshold(bgp, id, tcold, false);
-}
-
-/**
- * ti_bandgap_write_tcold() - sets the sensor tcold
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @val: desired tcold value
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_write_tcold(struct ti_bandgap *bgp, int id, int val)
-{
- return _ti_bandgap_write_threshold(bgp, id, val, false);
-}
-
-/**
* ti_bandgap_read_counter() - read the sensor counter
* @bgp: pointer to bandgap instance
* @id: sensor id
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.h b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
index 209c664c2823..68d39ad43241 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
@@ -78,11 +78,8 @@
* @bgap_mask_ctrl: BANDGAP_MASK_CTRL register offset
* @mask_hot_mask: mask to bandgap_mask_ctrl.mask_hot
* @mask_cold_mask: mask to bandgap_mask_ctrl.mask_cold
- * @mask_sidlemode_mask: mask to bandgap_mask_ctrl.mask_sidlemode
* @mask_counter_delay_mask: mask to bandgap_mask_ctrl.mask_counter_delay
* @mask_freeze_mask: mask to bandgap_mask_ctrl.mask_free
- * @mask_clear_mask: mask to bandgap_mask_ctrl.mask_clear
- * @mask_clear_accum_mask: mask to bandgap_mask_ctrl.mask_clear_accum
* @bgap_mode_ctrl: BANDGAP_MODE_CTRL register offset
* @mode_ctrl_mask: mask to bandgap_mode_ctrl.mode_ctrl
* @bgap_counter: BANDGAP_COUNTER register offset
@@ -91,21 +88,13 @@
* @threshold_thot_mask: mask to bandgap_threhold.thot
* @threshold_tcold_mask: mask to bandgap_threhold.tcold
* @tshut_threshold: TSHUT_THRESHOLD register offset (TSHUT thresholds)
- * @tshut_efuse_mask: mask to tshut_threshold.tshut_efuse
- * @tshut_efuse_shift: shift to tshut_threshold.tshut_efuse
* @tshut_hot_mask: mask to tshut_threhold.thot
* @tshut_cold_mask: mask to tshut_threhold.thot
* @bgap_status: BANDGAP_STATUS register offset
- * @status_clean_stop_mask: mask to bandgap_status.clean_stop
- * @status_bgap_alert_mask: mask to bandgap_status.bandgap_alert
* @status_hot_mask: mask to bandgap_status.hot
* @status_cold_mask: mask to bandgap_status.cold
- * @bgap_cumul_dtemp: BANDGAP_CUMUL_DTEMP register offset
- * @ctrl_dtemp_0: CTRL_DTEMP0 register offset
* @ctrl_dtemp_1: CTRL_DTEMP1 register offset
* @ctrl_dtemp_2: CTRL_DTEMP2 register offset
- * @ctrl_dtemp_3: CTRL_DTEMP3 register offset
- * @ctrl_dtemp_4: CTRL_DTEMP4 register offset
* @bgap_efuse: BANDGAP_EFUSE register offset
*
* The register offsets and bitfields might change across
@@ -121,17 +110,14 @@ struct temp_sensor_registers {
u32 temp_sensor_ctrl;
u32 bgap_tempsoff_mask;
u32 bgap_soc_mask;
- u32 bgap_eocz_mask; /* not used: but needs revisit */
+ u32 bgap_eocz_mask;
u32 bgap_dtemp_mask;
u32 bgap_mask_ctrl;
u32 mask_hot_mask;
u32 mask_cold_mask;
- u32 mask_sidlemode_mask; /* not used: but may be needed for pm */
u32 mask_counter_delay_mask;
u32 mask_freeze_mask;
- u32 mask_clear_mask; /* not used: but needed for trending */
- u32 mask_clear_accum_mask; /* not used: but needed for trending */
u32 bgap_mode_ctrl;
u32 mode_ctrl_mask;
@@ -144,23 +130,15 @@ struct temp_sensor_registers {
u32 threshold_tcold_mask;
u32 tshut_threshold;
- u32 tshut_efuse_mask; /* not used */
- u32 tshut_efuse_shift; /* not used */
u32 tshut_hot_mask;
u32 tshut_cold_mask;
u32 bgap_status;
- u32 status_clean_stop_mask; /* not used: but needed for trending */
- u32 status_bgap_alert_mask; /* not used */
u32 status_hot_mask;
u32 status_cold_mask;
- u32 bgap_cumul_dtemp; /* not used: but needed for trending */
- u32 ctrl_dtemp_0; /* not used: but needed for trending */
- u32 ctrl_dtemp_1; /* not used: but needed for trending */
- u32 ctrl_dtemp_2; /* not used: but needed for trending */
- u32 ctrl_dtemp_3; /* not used: but needed for trending */
- u32 ctrl_dtemp_4; /* not used: but needed for trending */
+ u32 ctrl_dtemp_1;
+ u32 ctrl_dtemp_2;
u32 bgap_efuse;
};
@@ -172,11 +150,6 @@ struct temp_sensor_registers {
* @t_cold: temperature to trigger a thermal alert (low initial value)
* @min_freq: sensor minimum clock rate
* @max_freq: sensor maximum clock rate
- * @max_temp: sensor maximum temperature
- * @min_temp: sensor minimum temperature
- * @hyst_val: temperature hysteresis considered while converting ADC values
- * @update_int1: update interval
- * @update_int2: update interval
*
* This data structure will hold the required thresholds and temperature limits
* for a specific temperature sensor, like shutdown temperature, alert
@@ -189,11 +162,6 @@ struct temp_sensor_data {
u32 t_cold;
u32 min_freq;
u32 max_freq;
- int max_temp;
- int min_temp;
- int hyst_val;
- u32 update_int1; /* not used */
- u32 update_int2; /* not used */
};
struct ti_bandgap_data;
@@ -316,8 +284,6 @@ struct ti_temp_sensor {
*
* TI_BANDGAP_FEATURE_ERRATA_814 - used to workaorund when the bandgap device
* has Errata 814
- * TI_BANDGAP_FEATURE_ERRATA_813 - used to workaorund when the bandgap device
- * has Errata 813
* TI_BANDGAP_FEATURE_UNRELIABLE - used when the sensor readings are too
* inaccurate.
* TI_BANDGAP_HAS(b, f) - macro to check if a bandgap device is capable of a
@@ -334,8 +300,7 @@ struct ti_temp_sensor {
#define TI_BANDGAP_FEATURE_COUNTER_DELAY BIT(8)
#define TI_BANDGAP_FEATURE_HISTORY_BUFFER BIT(9)
#define TI_BANDGAP_FEATURE_ERRATA_814 BIT(10)
-#define TI_BANDGAP_FEATURE_ERRATA_813 BIT(11)
-#define TI_BANDGAP_FEATURE_UNRELIABLE BIT(12)
+#define TI_BANDGAP_FEATURE_UNRELIABLE BIT(11)
#define TI_BANDGAP_HAS(b, f) \
((b)->conf->features & TI_BANDGAP_FEATURE_ ## f)
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 6281266b8ec0..092381e2accf 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -12,6 +12,7 @@
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <crypto/hash.h>
@@ -132,6 +133,8 @@ static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
if (!uuids)
return -ENOMEM;
+ pm_runtime_get_sync(&tb->dev);
+
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out;
@@ -153,7 +156,10 @@ static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
}
out:
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
kfree(uuids);
+
return ret;
}
@@ -208,13 +214,22 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
goto err_free_acl;
}
+ pm_runtime_get_sync(&tb->dev);
+
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
- goto err_free_acl;
+ goto err_rpm_put;
}
ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
+ if (!ret) {
+ /* Notify userspace about the change */
+ kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
+ }
mutex_unlock(&tb->lock);
+err_rpm_put:
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
err_free_acl:
kfree(acl);
err_free_str:
@@ -426,6 +441,13 @@ int tb_domain_add(struct tb *tb)
/* This starts event processing */
mutex_unlock(&tb->lock);
+ pm_runtime_no_callbacks(&tb->dev);
+ pm_runtime_set_active(&tb->dev);
+ pm_runtime_enable(&tb->dev);
+ pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_use_autosuspend(&tb->dev);
+
return 0;
err_domain_del:
@@ -505,26 +527,35 @@ int tb_domain_resume_noirq(struct tb *tb)
int tb_domain_suspend(struct tb *tb)
{
- int ret;
+ return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
+}
- mutex_lock(&tb->lock);
- if (tb->cm_ops->suspend) {
- ret = tb->cm_ops->suspend(tb);
- if (ret) {
- mutex_unlock(&tb->lock);
+void tb_domain_complete(struct tb *tb)
+{
+ if (tb->cm_ops->complete)
+ tb->cm_ops->complete(tb);
+}
+
+int tb_domain_runtime_suspend(struct tb *tb)
+{
+ if (tb->cm_ops->runtime_suspend) {
+ int ret = tb->cm_ops->runtime_suspend(tb);
+ if (ret)
return ret;
- }
}
- mutex_unlock(&tb->lock);
+ tb_ctl_stop(tb->ctl);
return 0;
}
-void tb_domain_complete(struct tb *tb)
+int tb_domain_runtime_resume(struct tb *tb)
{
- mutex_lock(&tb->lock);
- if (tb->cm_ops->complete)
- tb->cm_ops->complete(tb);
- mutex_unlock(&tb->lock);
+ tb_ctl_start(tb->ctl);
+ if (tb->cm_ops->runtime_resume) {
+ int ret = tb->cm_ops->runtime_resume(tb);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
/**
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 500911f16498..e1e264a9a4c7 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -57,9 +58,11 @@
* (only set when @upstream_port is not %NULL)
* @safe_mode: ICM is in safe mode
* @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
+ * @rpm: Does the controller support runtime PM (RTD3)
* @is_supported: Checks if we can support ICM on this controller
* @get_mode: Read and return the ICM firmware mode (optional)
* @get_route: Find a route string for given switch
+ * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
* @driver_ready: Send driver ready message to ICM
* @device_connected: Handle device connected ICM message
* @device_disconnected: Handle device disconnected ICM message
@@ -73,12 +76,14 @@ struct icm {
size_t max_boot_acl;
int vnd_cap;
bool safe_mode;
+ bool rpm;
bool (*is_supported)(struct tb *tb);
int (*get_mode)(struct tb *tb);
int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
+ void (*save_devices)(struct tb *tb);
int (*driver_ready)(struct tb *tb,
enum tb_security_level *security_level,
- size_t *nboot_acl);
+ size_t *nboot_acl, bool *rpm);
void (*device_connected)(struct tb *tb,
const struct icm_pkg_header *hdr);
void (*device_disconnected)(struct tb *tb,
@@ -95,6 +100,47 @@ struct icm_notification {
struct tb *tb;
};
+struct ep_name_entry {
+ u8 len;
+ u8 type;
+ u8 data[0];
+};
+
+#define EP_NAME_INTEL_VSS 0x10
+
+/* Intel Vendor specific structure */
+struct intel_vss {
+ u16 vendor;
+ u16 model;
+ u8 mc;
+ u8 flags;
+ u16 pci_devid;
+ u32 nvm_version;
+};
+
+#define INTEL_VSS_FLAGS_RTD3 BIT(0)
+
+static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
+{
+ const void *end = ep_name + size;
+
+ while (ep_name < end) {
+ const struct ep_name_entry *ep = ep_name;
+
+ if (!ep->len)
+ break;
+ if (ep_name + ep->len > end)
+ break;
+
+ if (ep->type == EP_NAME_INTEL_VSS)
+ return (const struct intel_vss *)ep->data;
+
+ ep_name += ep->len;
+ }
+
+ return NULL;
+}
+
static inline struct tb *icm_to_tb(struct icm *icm)
{
return ((void *)icm - sizeof(struct tb));
@@ -258,9 +304,14 @@ err_free:
return ret;
}
+static void icm_fr_save_devices(struct tb *tb)
+{
+ nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
+}
+
static int
icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
- size_t *nboot_acl)
+ size_t *nboot_acl, bool *rpm)
{
struct icm_fr_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
@@ -410,15 +461,19 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
}
static void add_switch(struct tb_switch *parent_sw, u64 route,
- const uuid_t *uuid, u8 connection_id, u8 connection_key,
+ const uuid_t *uuid, const u8 *ep_name,
+ size_t ep_name_size, u8 connection_id, u8 connection_key,
u8 link, u8 depth, enum tb_security_level security_level,
bool authorized, bool boot)
{
+ const struct intel_vss *vss;
struct tb_switch *sw;
+ pm_runtime_get_sync(&parent_sw->dev);
+
sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
if (!sw)
- return;
+ goto out;
sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
sw->connection_id = connection_id;
@@ -429,6 +484,10 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
sw->security_level = security_level;
sw->boot = boot;
+ vss = parse_intel_vss(ep_name, ep_name_size);
+ if (vss)
+ sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+
/* Link the two switches now */
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
@@ -436,8 +495,11 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
if (tb_switch_add(sw)) {
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
tb_switch_put(sw);
- return;
}
+
+out:
+ pm_runtime_mark_last_busy(&parent_sw->dev);
+ pm_runtime_put_autosuspend(&parent_sw->dev);
}
static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
@@ -477,9 +539,11 @@ static void add_xdomain(struct tb_switch *sw, u64 route,
{
struct tb_xdomain *xd;
+ pm_runtime_get_sync(&sw->dev);
+
xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
if (!xd)
- return;
+ goto out;
xd->link = link;
xd->depth = depth;
@@ -487,6 +551,10 @@ static void add_xdomain(struct tb_switch *sw, u64 route,
tb_port_at(route, sw)->xdomain = xd;
tb_xdomain_add(xd);
+
+out:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
}
static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
@@ -534,20 +602,13 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
- ret = icm->get_route(tb, link, depth, &route);
- if (ret) {
- tb_err(tb, "failed to find route string for switch at %u.%u\n",
- link, depth);
- return;
- }
-
sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
if (sw) {
u8 phy_port, sw_phy_port;
parent_sw = tb_to_switch(sw->dev.parent);
- sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth);
- phy_port = phy_port_from_route(route, depth);
+ sw_phy_port = tb_phy_port_from_link(sw->link);
+ phy_port = tb_phy_port_from_link(link);
/*
* On resume ICM will send us connected events for the
@@ -559,6 +620,22 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
*/
if (sw->depth == depth && sw_phy_port == phy_port &&
!!sw->authorized == authorized) {
+ /*
+ * It was enumerated through another link so update
+ * route string accordingly.
+ */
+ if (sw->link != link) {
+ ret = icm->get_route(tb, link, depth, &route);
+ if (ret) {
+ tb_err(tb, "failed to update route string for switch at %u.%u\n",
+ link, depth);
+ tb_switch_put(sw);
+ return;
+ }
+ } else {
+ route = tb_route(sw);
+ }
+
update_switch(parent_sw, sw, route, pkg->connection_id,
pkg->connection_key, link, depth, boot);
tb_switch_put(sw);
@@ -607,7 +684,16 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
- add_switch(parent_sw, route, &pkg->ep_uuid, pkg->connection_id,
+ ret = icm->get_route(tb, link, depth, &route);
+ if (ret) {
+ tb_err(tb, "failed to find route string for switch at %u.%u\n",
+ link, depth);
+ tb_switch_put(parent_sw);
+ return;
+ }
+
+ add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
+ sizeof(pkg->ep_name), pkg->connection_id,
pkg->connection_key, link, depth, security_level,
authorized, boot);
@@ -650,7 +736,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
struct tb_xdomain *xd;
struct tb_switch *sw;
u8 link, depth;
- bool approved;
u64 route;
/*
@@ -664,7 +749,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT;
- approved = pkg->link_info & ICM_LINK_INFO_APPROVED;
if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
@@ -757,7 +841,7 @@ icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
static int
icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
- size_t *nboot_acl)
+ size_t *nboot_acl, bool *rpm)
{
struct icm_tr_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
@@ -776,6 +860,9 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
if (nboot_acl)
*nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
ICM_TR_INFO_BOOT_ACL_SHIFT;
+ if (rpm)
+ *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
+
return 0;
}
@@ -1005,7 +1092,8 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
- add_switch(parent_sw, route, &pkg->ep_uuid, pkg->connection_id,
+ add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
+ sizeof(pkg->ep_name), pkg->connection_id,
0, 0, 0, security_level, authorized, boot);
tb_switch_put(parent_sw);
@@ -1184,7 +1272,7 @@ static int icm_ar_get_mode(struct tb *tb)
static int
icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
- size_t *nboot_acl)
+ size_t *nboot_acl, bool *rpm)
{
struct icm_ar_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
@@ -1203,6 +1291,9 @@ icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
*nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
ICM_AR_INFO_BOOT_ACL_SHIFT;
+ if (rpm)
+ *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
+
return 0;
}
@@ -1356,13 +1447,13 @@ static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
static int
__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
- size_t *nboot_acl)
+ size_t *nboot_acl, bool *rpm)
{
struct icm *icm = tb_priv(tb);
unsigned int retries = 50;
int ret;
- ret = icm->driver_ready(tb, security_level, nboot_acl);
+ ret = icm->driver_ready(tb, security_level, nboot_acl, rpm);
if (ret) {
tb_err(tb, "failed to send driver ready to ICM\n");
return ret;
@@ -1632,7 +1723,8 @@ static int icm_driver_ready(struct tb *tb)
return 0;
}
- ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl);
+ ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl,
+ &icm->rpm);
if (ret)
return ret;
@@ -1648,13 +1740,12 @@ static int icm_driver_ready(struct tb *tb)
static int icm_suspend(struct tb *tb)
{
- int ret;
+ struct icm *icm = tb_priv(tb);
- ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
- if (ret)
- tb_info(tb, "Ignoring mailbox command error (%d) in %s\n",
- ret, __func__);
+ if (icm->save_devices)
+ icm->save_devices(tb);
+ nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
return 0;
}
@@ -1739,7 +1830,7 @@ static void icm_complete(struct tb *tb)
* Now all existing children should be resumed, start events
* from ICM to get updated status.
*/
- __icm_driver_ready(tb, NULL, NULL);
+ __icm_driver_ready(tb, NULL, NULL, NULL);
/*
* We do not get notifications of devices that have been
@@ -1749,6 +1840,22 @@ static void icm_complete(struct tb *tb)
queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
}
+static int icm_runtime_suspend(struct tb *tb)
+{
+ nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
+ return 0;
+}
+
+static int icm_runtime_resume(struct tb *tb)
+{
+ /*
+ * We can reuse the same resume functionality than with system
+ * suspend.
+ */
+ icm_complete(tb);
+ return 0;
+}
+
static int icm_start(struct tb *tb)
{
struct icm *icm = tb_priv(tb);
@@ -1767,6 +1874,7 @@ static int icm_start(struct tb *tb)
* prevent root switch NVM upgrade on Macs for now.
*/
tb->root_switch->no_nvm_upgrade = x86_apple_machine;
+ tb->root_switch->rpm = icm->rpm;
ret = tb_switch_add(tb->root_switch);
if (ret) {
@@ -1815,6 +1923,8 @@ static const struct tb_cm_ops icm_ar_ops = {
.stop = icm_stop,
.suspend = icm_suspend,
.complete = icm_complete,
+ .runtime_suspend = icm_runtime_suspend,
+ .runtime_resume = icm_runtime_resume,
.handle_event = icm_handle_event,
.get_boot_acl = icm_ar_get_boot_acl,
.set_boot_acl = icm_ar_set_boot_acl,
@@ -1833,6 +1943,8 @@ static const struct tb_cm_ops icm_tr_ops = {
.stop = icm_stop,
.suspend = icm_suspend,
.complete = icm_complete,
+ .runtime_suspend = icm_runtime_suspend,
+ .runtime_resume = icm_runtime_resume,
.handle_event = icm_handle_event,
.get_boot_acl = icm_ar_get_boot_acl,
.set_boot_acl = icm_ar_set_boot_acl,
@@ -1862,6 +1974,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
icm->is_supported = icm_fr_is_supported;
icm->get_route = icm_fr_get_route;
+ icm->save_devices = icm_fr_save_devices;
icm->driver_ready = icm_fr_driver_ready;
icm->device_connected = icm_fr_device_connected;
icm->device_disconnected = icm_fr_device_disconnected;
@@ -1879,6 +1992,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
icm->is_supported = icm_ar_is_supported;
icm->get_mode = icm_ar_get_mode;
icm->get_route = icm_ar_get_route;
+ icm->save_devices = icm_fr_save_devices;
icm->driver_ready = icm_ar_driver_ready;
icm->device_connected = icm_fr_device_connected;
icm->device_disconnected = icm_fr_device_disconnected;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index f5a33e88e676..88cff05a1808 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -900,7 +900,32 @@ static void nhi_complete(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
- tb_domain_complete(tb);
+ /*
+ * If we were runtime suspended when system suspend started,
+ * schedule runtime resume now. It should bring the domain back
+ * to functional state.
+ */
+ if (pm_runtime_suspended(&pdev->dev))
+ pm_runtime_resume(&pdev->dev);
+ else
+ tb_domain_complete(tb);
+}
+
+static int nhi_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ return tb_domain_runtime_suspend(tb);
+}
+
+static int nhi_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ nhi_enable_int_throttling(tb->nhi);
+ return tb_domain_runtime_resume(tb);
}
static void nhi_shutdown(struct tb_nhi *nhi)
@@ -1015,6 +1040,14 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&nhi->lock);
+ res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (res)
+ res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (res) {
+ dev_err(&pdev->dev, "failed to set DMA mask\n");
+ return res;
+ }
+
pci_set_master(pdev);
tb = icm_probe(nhi);
@@ -1040,6 +1073,11 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
pci_set_drvdata(pdev, tb);
+ pm_runtime_allow(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
}
@@ -1048,6 +1086,10 @@ static void nhi_remove(struct pci_dev *pdev)
struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
+
tb_domain_remove(tb);
nhi_shutdown(nhi);
}
@@ -1070,6 +1112,8 @@ static const struct dev_pm_ops nhi_pm_ops = {
.freeze = nhi_suspend,
.poweroff = nhi_suspend,
.complete = nhi_complete,
+ .runtime_suspend = nhi_runtime_suspend,
+ .runtime_resume = nhi_runtime_resume,
};
static struct pci_device_id nhi_ids[] = {
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 25758671ddf4..7442bc4c6433 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/nvmem-provider.h>
+#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -236,8 +237,14 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
struct tb_switch *sw = priv;
+ int ret;
+
+ pm_runtime_get_sync(&sw->dev);
+ ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
- return dma_port_flash_read(sw->dma_port, offset, val, bytes);
+ return ret;
}
static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
@@ -722,6 +729,7 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
* the new tunnel too early.
*/
pci_lock_rescan_remove();
+ pm_runtime_get_sync(&sw->dev);
switch (val) {
/* Approve switch */
@@ -742,6 +750,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
break;
}
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
pci_unlock_rescan_remove();
if (!ret) {
@@ -888,9 +898,18 @@ static ssize_t nvm_authenticate_store(struct device *dev,
nvm_clear_auth_status(sw);
if (val) {
+ if (!sw->nvm->buf) {
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ pm_runtime_get_sync(&sw->dev);
ret = nvm_validate_and_write(sw);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
goto exit_unlock;
+ }
sw->nvm->authenticating = true;
@@ -898,6 +917,8 @@ static ssize_t nvm_authenticate_store(struct device *dev,
ret = nvm_authenticate_host(sw);
else
ret = nvm_authenticate_device(sw);
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
}
exit_unlock:
@@ -1023,9 +1044,29 @@ static void tb_switch_release(struct device *dev)
kfree(sw);
}
+/*
+ * Currently only need to provide the callbacks. Everything else is handled
+ * in the connection manager.
+ */
+static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops tb_switch_pm_ops = {
+ SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
+ NULL)
+};
+
struct device_type tb_switch_type = {
.name = "thunderbolt_device",
.release = tb_switch_release,
+ .pm = &tb_switch_pm_ops,
};
static int tb_switch_get_generation(struct tb_switch *sw)
@@ -1365,10 +1406,21 @@ int tb_switch_add(struct tb_switch *sw)
return ret;
ret = tb_switch_nvm_add(sw);
- if (ret)
+ if (ret) {
device_del(&sw->dev);
+ return ret;
+ }
- return ret;
+ pm_runtime_set_active(&sw->dev);
+ if (sw->rpm) {
+ pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&sw->dev);
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_enable(&sw->dev);
+ pm_request_autosuspend(&sw->dev);
+ }
+
+ return 0;
}
/**
@@ -1383,6 +1435,11 @@ void tb_switch_remove(struct tb_switch *sw)
{
int i;
+ if (sw->rpm) {
+ pm_runtime_get_sync(&sw->dev);
+ pm_runtime_disable(&sw->dev);
+ }
+
/* port 0 is the switch itself and never has a remote */
for (i = 1; i <= sw->config.max_port_number; i++) {
if (tb_is_upstream_port(&sw->ports[i]))
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 9d9f0ca16bfb..5067d69d0501 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -67,6 +67,7 @@ struct tb_switch_nvm {
* @no_nvm_upgrade: Prevent NVM upgrade of this switch
* @safe_mode: The switch is in safe-mode
* @boot: Whether the switch was already authorized on boot or not
+ * @rpm: The switch supports runtime PM
* @authorized: Whether the switch is authorized by user or policy
* @work: Work used to automatically authorize a switch
* @security_level: Switch supported security level
@@ -101,6 +102,7 @@ struct tb_switch {
bool no_nvm_upgrade;
bool safe_mode;
bool boot;
+ bool rpm;
unsigned int authorized;
struct work_struct work;
enum tb_security_level security_level;
@@ -199,6 +201,8 @@ struct tb_path {
* @resume_noirq: Connection manager specific resume_noirq
* @suspend: Connection manager specific suspend
* @complete: Connection manager specific complete
+ * @runtime_suspend: Connection manager specific runtime_suspend
+ * @runtime_resume: Connection manager specific runtime_resume
* @handle_event: Handle thunderbolt event
* @get_boot_acl: Get boot ACL list
* @set_boot_acl: Set boot ACL list
@@ -217,6 +221,8 @@ struct tb_cm_ops {
int (*resume_noirq)(struct tb *tb);
int (*suspend)(struct tb *tb);
void (*complete)(struct tb *tb);
+ int (*runtime_suspend)(struct tb *tb);
+ int (*runtime_resume)(struct tb *tb);
void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
const void *buf, size_t size);
int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
@@ -235,6 +241,8 @@ static inline void *tb_priv(struct tb *tb)
return (void *)tb->privdata;
}
+#define TB_AUTOSUSPEND_DELAY 15000 /* ms */
+
/* helper functions & macros */
/**
@@ -364,6 +372,8 @@ int tb_domain_suspend_noirq(struct tb *tb);
int tb_domain_resume_noirq(struct tb *tb);
int tb_domain_suspend(struct tb *tb);
void tb_domain_complete(struct tb *tb);
+int tb_domain_runtime_suspend(struct tb *tb);
+int tb_domain_runtime_resume(struct tb *tb);
int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index bc13f8d6b804..2487e162c885 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -286,6 +286,8 @@ struct icm_ar_pkg_driver_ready_response {
u16 info;
};
+#define ICM_AR_FLAGS_RTD3 BIT(6)
+
#define ICM_AR_INFO_SLEVEL_MASK GENMASK(3, 0)
#define ICM_AR_INFO_BOOT_ACL_SHIFT 7
#define ICM_AR_INFO_BOOT_ACL_MASK GENMASK(11, 7)
@@ -333,6 +335,8 @@ struct icm_tr_pkg_driver_ready_response {
u16 reserved2;
};
+#define ICM_TR_FLAGS_RTD3 BIT(6)
+
#define ICM_TR_INFO_SLEVEL_MASK GENMASK(2, 0)
#define ICM_TR_INFO_BOOT_ACL_SHIFT 7
#define ICM_TR_INFO_BOOT_ACL_MASK GENMASK(12, 7)
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 5d94142afda6..693b0353c3fe 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -202,7 +202,7 @@ struct tb_regs_port_header {
/* DWORD 5 */
u32 max_in_hop_id:11;
u32 max_out_hop_id:11;
- u32 __unkown4:10;
+ u32 __unknown4:10;
/* DWORD 6 */
u32 __unknown5;
/* DWORD 7 */
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 8abb4e843085..db8bece63327 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/kmod.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <linux/utsname.h>
#include <linux/uuid.h>
#include <linux/workqueue.h>
@@ -1129,6 +1130,14 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
xd->dev.groups = xdomain_attr_groups;
dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
+ /*
+ * This keeps the DMA powered on as long as we have active
+ * connection to another host.
+ */
+ pm_runtime_set_active(&xd->dev);
+ pm_runtime_get_noresume(&xd->dev);
+ pm_runtime_enable(&xd->dev);
+
return xd;
err_free_local_uuid:
@@ -1174,6 +1183,15 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
device_for_each_child_reverse(&xd->dev, xd, unregister_service);
+ /*
+ * Undo runtime PM here explicitly because it is possible that
+ * the XDomain was never added to the bus and thus device_del()
+ * is not called for it (device_del() would handle this otherwise).
+ */
+ pm_runtime_disable(&xd->dev);
+ pm_runtime_put_noidle(&xd->dev);
+ pm_runtime_set_suspended(&xd->dev);
+
if (!device_is_registered(&xd->dev))
put_device(&xd->dev);
else
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 37caba7c3aff..c8c5cdfc5e19 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/goldfish.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 7709fcc707f4..5414c4a87bea 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -73,7 +73,7 @@ static LIST_HEAD(hvc_structs);
* Protect the list of hvc_struct instances from inserts and removals during
* list traversal.
*/
-static DEFINE_SPINLOCK(hvc_structs_lock);
+static DEFINE_MUTEX(hvc_structs_mutex);
/*
* This value is used to assign a tty->index value to a hvc_struct based
@@ -83,7 +83,7 @@ static DEFINE_SPINLOCK(hvc_structs_lock);
static int last_hvc = -1;
/*
- * Do not call this function with either the hvc_structs_lock or the hvc_struct
+ * Do not call this function with either the hvc_structs_mutex or the hvc_struct
* lock held. If successful, this function increments the kref reference
* count against the target hvc_struct so it should be released when finished.
*/
@@ -92,24 +92,46 @@ static struct hvc_struct *hvc_get_by_index(int index)
struct hvc_struct *hp;
unsigned long flags;
- spin_lock(&hvc_structs_lock);
+ mutex_lock(&hvc_structs_mutex);
list_for_each_entry(hp, &hvc_structs, next) {
spin_lock_irqsave(&hp->lock, flags);
if (hp->index == index) {
tty_port_get(&hp->port);
spin_unlock_irqrestore(&hp->lock, flags);
- spin_unlock(&hvc_structs_lock);
+ mutex_unlock(&hvc_structs_mutex);
return hp;
}
spin_unlock_irqrestore(&hp->lock, flags);
}
hp = NULL;
+ mutex_unlock(&hvc_structs_mutex);
- spin_unlock(&hvc_structs_lock);
return hp;
}
+static int __hvc_flush(const struct hv_ops *ops, uint32_t vtermno, bool wait)
+{
+ if (wait)
+ might_sleep();
+
+ if (ops->flush)
+ return ops->flush(vtermno, wait);
+ return 0;
+}
+
+static int hvc_console_flush(const struct hv_ops *ops, uint32_t vtermno)
+{
+ return __hvc_flush(ops, vtermno, false);
+}
+
+/*
+ * Wait for the console to flush before writing more to it. This sleeps.
+ */
+static int hvc_flush(struct hvc_struct *hp)
+{
+ return __hvc_flush(hp->ops, hp->vtermno, true);
+}
/*
* Initial console vtermnos for console API usage prior to full console
@@ -156,8 +178,12 @@ static void hvc_console_print(struct console *co, const char *b,
if (r <= 0) {
/* throw away characters on error
* but spin in case of -EAGAIN */
- if (r != -EAGAIN)
+ if (r != -EAGAIN) {
i = 0;
+ } else {
+ hvc_console_flush(cons_ops[index],
+ vtermnos[index]);
+ }
} else if (r > 0) {
i -= r;
if (i > 0)
@@ -165,6 +191,7 @@ static void hvc_console_print(struct console *co, const char *b,
}
}
}
+ hvc_console_flush(cons_ops[index], vtermnos[index]);
}
static struct tty_driver *hvc_console_device(struct console *c, int *index)
@@ -224,13 +251,13 @@ static void hvc_port_destruct(struct tty_port *port)
struct hvc_struct *hp = container_of(port, struct hvc_struct, port);
unsigned long flags;
- spin_lock(&hvc_structs_lock);
+ mutex_lock(&hvc_structs_mutex);
spin_lock_irqsave(&hp->lock, flags);
list_del(&(hp->next));
spin_unlock_irqrestore(&hp->lock, flags);
- spin_unlock(&hvc_structs_lock);
+ mutex_unlock(&hvc_structs_mutex);
kfree(hp);
}
@@ -494,23 +521,32 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
if (hp->port.count <= 0)
return -EIO;
- spin_lock_irqsave(&hp->lock, flags);
+ while (count > 0) {
+ spin_lock_irqsave(&hp->lock, flags);
- /* Push pending writes */
- if (hp->n_outbuf > 0)
- hvc_push(hp);
-
- while (count > 0 && (rsize = hp->outbuf_size - hp->n_outbuf) > 0) {
- if (rsize > count)
- rsize = count;
- memcpy(hp->outbuf + hp->n_outbuf, buf, rsize);
- count -= rsize;
- buf += rsize;
- hp->n_outbuf += rsize;
- written += rsize;
- hvc_push(hp);
+ rsize = hp->outbuf_size - hp->n_outbuf;
+
+ if (rsize) {
+ if (rsize > count)
+ rsize = count;
+ memcpy(hp->outbuf + hp->n_outbuf, buf, rsize);
+ count -= rsize;
+ buf += rsize;
+ hp->n_outbuf += rsize;
+ written += rsize;
+ }
+
+ if (hp->n_outbuf > 0)
+ hvc_push(hp);
+
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ if (count) {
+ if (hp->n_outbuf > 0)
+ hvc_flush(hp);
+ cond_resched();
+ }
}
- spin_unlock_irqrestore(&hp->lock, flags);
/*
* Racy, but harmless, kick thread if there is still pending data.
@@ -590,10 +626,10 @@ static u32 timeout = MIN_TIMEOUT;
#define HVC_POLL_READ 0x00000001
#define HVC_POLL_WRITE 0x00000002
-int hvc_poll(struct hvc_struct *hp)
+static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
{
struct tty_struct *tty;
- int i, n, poll_mask = 0;
+ int i, n, count, poll_mask = 0;
char buf[N_INBUF] __ALIGNED__;
unsigned long flags;
int read_total = 0;
@@ -612,6 +648,12 @@ int hvc_poll(struct hvc_struct *hp)
timeout = (written_total) ? 0 : MIN_TIMEOUT;
}
+ if (may_sleep) {
+ spin_unlock_irqrestore(&hp->lock, flags);
+ cond_resched();
+ spin_lock_irqsave(&hp->lock, flags);
+ }
+
/* No tty attached, just skip */
tty = tty_port_tty_get(&hp->port);
if (tty == NULL)
@@ -619,7 +661,7 @@ int hvc_poll(struct hvc_struct *hp)
/* Now check if we can get data (are we throttled ?) */
if (tty_throttled(tty))
- goto throttled;
+ goto out;
/* If we aren't notifier driven and aren't throttled, we always
* request a reschedule
@@ -628,56 +670,58 @@ int hvc_poll(struct hvc_struct *hp)
poll_mask |= HVC_POLL_READ;
/* Read data if any */
- for (;;) {
- int count = tty_buffer_request_room(&hp->port, N_INBUF);
- /* If flip is full, just reschedule a later read */
- if (count == 0) {
+ count = tty_buffer_request_room(&hp->port, N_INBUF);
+
+ /* If flip is full, just reschedule a later read */
+ if (count == 0) {
+ poll_mask |= HVC_POLL_READ;
+ goto out;
+ }
+
+ n = hp->ops->get_chars(hp->vtermno, buf, count);
+ if (n <= 0) {
+ /* Hangup the tty when disconnected from host */
+ if (n == -EPIPE) {
+ spin_unlock_irqrestore(&hp->lock, flags);
+ tty_hangup(tty);
+ spin_lock_irqsave(&hp->lock, flags);
+ } else if ( n == -EAGAIN ) {
+ /*
+ * Some back-ends can only ensure a certain min
+ * num of bytes read, which may be > 'count'.
+ * Let the tty clear the flip buff to make room.
+ */
poll_mask |= HVC_POLL_READ;
- break;
}
+ goto out;
+ }
- n = hp->ops->get_chars(hp->vtermno, buf, count);
- if (n <= 0) {
- /* Hangup the tty when disconnected from host */
- if (n == -EPIPE) {
- spin_unlock_irqrestore(&hp->lock, flags);
- tty_hangup(tty);
- spin_lock_irqsave(&hp->lock, flags);
- } else if ( n == -EAGAIN ) {
- /*
- * Some back-ends can only ensure a certain min
- * num of bytes read, which may be > 'count'.
- * Let the tty clear the flip buff to make room.
- */
- poll_mask |= HVC_POLL_READ;
- }
- break;
- }
- for (i = 0; i < n; ++i) {
+ for (i = 0; i < n; ++i) {
#ifdef CONFIG_MAGIC_SYSRQ
- if (hp->index == hvc_console.index) {
- /* Handle the SysRq Hack */
- /* XXX should support a sequence */
- if (buf[i] == '\x0f') { /* ^O */
- /* if ^O is pressed again, reset
- * sysrq_pressed and flip ^O char */
- sysrq_pressed = !sysrq_pressed;
- if (sysrq_pressed)
- continue;
- } else if (sysrq_pressed) {
- handle_sysrq(buf[i]);
- sysrq_pressed = 0;
+ if (hp->index == hvc_console.index) {
+ /* Handle the SysRq Hack */
+ /* XXX should support a sequence */
+ if (buf[i] == '\x0f') { /* ^O */
+ /* if ^O is pressed again, reset
+ * sysrq_pressed and flip ^O char */
+ sysrq_pressed = !sysrq_pressed;
+ if (sysrq_pressed)
continue;
- }
+ } else if (sysrq_pressed) {
+ handle_sysrq(buf[i]);
+ sysrq_pressed = 0;
+ continue;
}
-#endif /* CONFIG_MAGIC_SYSRQ */
- tty_insert_flip_char(&hp->port, buf[i], 0);
}
-
- read_total += n;
+#endif /* CONFIG_MAGIC_SYSRQ */
+ tty_insert_flip_char(&hp->port, buf[i], 0);
}
- throttled:
+ if (n == count)
+ poll_mask |= HVC_POLL_READ;
+ read_total = n;
+
+ out:
/* Wakeup write queue if necessary */
if (hp->do_wakeup) {
hp->do_wakeup = 0;
@@ -697,6 +741,11 @@ int hvc_poll(struct hvc_struct *hp)
return poll_mask;
}
+
+int hvc_poll(struct hvc_struct *hp)
+{
+ return __hvc_poll(hp, false);
+}
EXPORT_SYMBOL_GPL(hvc_poll);
/**
@@ -733,11 +782,12 @@ static int khvcd(void *unused)
try_to_freeze();
wmb();
if (!cpus_are_in_xmon()) {
- spin_lock(&hvc_structs_lock);
+ mutex_lock(&hvc_structs_mutex);
list_for_each_entry(hp, &hvc_structs, next) {
- poll_mask |= hvc_poll(hp);
+ poll_mask |= __hvc_poll(hp, true);
+ cond_resched();
}
- spin_unlock(&hvc_structs_lock);
+ mutex_unlock(&hvc_structs_mutex);
} else
poll_mask |= HVC_POLL_READ;
if (hvc_kicked)
@@ -871,7 +921,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
INIT_WORK(&hp->tty_resize, hvc_set_winsz);
spin_lock_init(&hp->lock);
- spin_lock(&hvc_structs_lock);
+ mutex_lock(&hvc_structs_mutex);
/*
* find index to use:
@@ -891,7 +941,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
vtermnos[i] = vtermno;
list_add_tail(&(hp->next), &hvc_structs);
- spin_unlock(&hvc_structs_lock);
+ mutex_unlock(&hvc_structs_mutex);
/* check if we need to re-register the kernel console */
hvc_check_console(i);
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index ea63090e013f..e9319954c832 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -54,6 +54,7 @@ struct hvc_struct {
struct hv_ops {
int (*get_chars)(uint32_t vtermno, char *buf, int count);
int (*put_chars)(uint32_t vtermno, const char *buf, int count);
+ int (*flush)(uint32_t vtermno, bool wait);
/* Callbacks for notification. Called in open, close and hangup */
int (*notifier_add)(struct hvc_struct *hp, int irq);
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 9645c0062a90..77baf895108f 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -52,6 +52,7 @@ static u32 hvc_opal_boot_termno;
static const struct hv_ops hvc_opal_raw_ops = {
.get_chars = opal_get_chars,
.put_chars = opal_put_chars,
+ .flush = opal_flush_chars,
.notifier_add = notifier_add_irq,
.notifier_del = notifier_del_irq,
.notifier_hangup = notifier_hangup_irq,
@@ -141,6 +142,7 @@ static int hvc_opal_hvsi_tiocmset(struct hvc_struct *hp, unsigned int set,
static const struct hv_ops hvc_opal_hvsi_ops = {
.get_chars = hvc_opal_hvsi_get_chars,
.put_chars = hvc_opal_hvsi_put_chars,
+ .flush = opal_flush_chars,
.notifier_add = hvc_opal_hvsi_open,
.notifier_del = hvc_opal_hvsi_close,
.notifier_hangup = hvc_opal_hvsi_hangup,
@@ -183,9 +185,15 @@ static int hvc_opal_probe(struct platform_device *dev)
return -ENOMEM;
pv->proto = proto;
hvc_opal_privs[termno] = pv;
- if (proto == HV_PROTOCOL_HVSI)
- hvsilib_init(&pv->hvsi, opal_get_chars, opal_put_chars,
+ if (proto == HV_PROTOCOL_HVSI) {
+ /*
+ * We want put_chars to be atomic to avoid mangling of
+ * hvsi packets.
+ */
+ hvsilib_init(&pv->hvsi,
+ opal_get_chars, opal_put_chars_atomic,
termno, 0);
+ }
/* Instanciate now to establish a mapping index==vtermno */
hvc_instantiate(termno, termno, ops);
@@ -275,6 +283,11 @@ static void udbg_opal_putc(char c)
count = hvc_opal_hvsi_put_chars(termno, &c, 1);
break;
}
+
+ /* This is needed for the cosole to flush
+ * when there aren't any interrupts.
+ */
+ opal_flush_console(termno);
} while(count == 0 || count == -EAGAIN);
}
@@ -302,14 +315,8 @@ static int udbg_opal_getc(void)
int ch;
for (;;) {
ch = udbg_opal_getc_poll();
- if (ch == -1) {
- /* This shouldn't be needed...but... */
- volatile unsigned long delay;
- for (delay=0; delay < 2000000; delay++)
- ;
- } else {
+ if (ch != -1)
return ch;
- }
}
}
@@ -370,8 +377,9 @@ void __init hvc_opal_init_early(void)
else if (of_device_is_compatible(stdout_node,"ibm,opal-console-hvsi")) {
hvc_opal_boot_priv.proto = HV_PROTOCOL_HVSI;
ops = &hvc_opal_hvsi_ops;
- hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars,
- opal_put_chars, index, 1);
+ hvsilib_init(&hvc_opal_boot_priv.hvsi,
+ opal_get_chars, opal_put_chars_atomic,
+ index, 1);
/* HVSI, perform the handshake now */
hvsilib_establish(&hvc_opal_boot_priv.hvsi);
pr_devel("hvc_opal: Found HVSI console\n");
@@ -403,7 +411,8 @@ void __init udbg_init_debug_opal_hvsi(void)
hvc_opal_privs[index] = &hvc_opal_boot_priv;
hvc_opal_boot_termno = index;
udbg_init_opal_common();
- hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars, opal_put_chars,
+ hvsilib_init(&hvc_opal_boot_priv.hvsi,
+ opal_get_chars, opal_put_chars_atomic,
index, 1);
hvsilib_establish(&hvc_opal_boot_priv.hvsi);
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index cbe98bc2b998..431742201709 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -124,6 +124,8 @@ struct n_tty_data {
struct mutex output_lock;
};
+#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
+
static inline size_t read_cnt(struct n_tty_data *ldata)
{
return ldata->read_head - ldata->read_tail;
@@ -141,6 +143,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
{
+ smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
}
@@ -316,9 +319,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
static void reset_buffer_flags(struct n_tty_data *ldata)
{
ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
- ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
ldata->commit_head = 0;
- ldata->echo_mark = 0;
ldata->line_start = 0;
ldata->erasing = 0;
@@ -617,13 +618,20 @@ static size_t __process_echoes(struct tty_struct *tty)
old_space = space = tty_write_room(tty);
tail = ldata->echo_tail;
- while (ldata->echo_commit != tail) {
+ while (MASK(ldata->echo_commit) != MASK(tail)) {
c = echo_buf(ldata, tail);
if (c == ECHO_OP_START) {
unsigned char op;
int no_space_left = 0;
/*
+ * Since add_echo_byte() is called without holding
+ * output_lock, we might see only portion of multi-byte
+ * operation.
+ */
+ if (MASK(ldata->echo_commit) == MASK(tail + 1))
+ goto not_yet_stored;
+ /*
* If the buffer byte is the start of a multi-byte
* operation, get the next byte, which is either the
* op code or a control character value.
@@ -634,6 +642,8 @@ static size_t __process_echoes(struct tty_struct *tty)
unsigned int num_chars, num_bs;
case ECHO_OP_ERASE_TAB:
+ if (MASK(ldata->echo_commit) == MASK(tail + 2))
+ goto not_yet_stored;
num_chars = echo_buf(ldata, tail + 2);
/*
@@ -728,7 +738,8 @@ static size_t __process_echoes(struct tty_struct *tty)
/* If the echo buffer is nearly full (so that the possibility exists
* of echo overrun before the next commit), then discard enough
* data at the tail to prevent a subsequent overrun */
- while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
+ while (ldata->echo_commit > tail &&
+ ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
if (echo_buf(ldata, tail) == ECHO_OP_START) {
if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
tail += 3;
@@ -738,6 +749,7 @@ static size_t __process_echoes(struct tty_struct *tty)
tail++;
}
+ not_yet_stored:
ldata->echo_tail = tail;
return old_space - space;
}
@@ -748,6 +760,7 @@ static void commit_echoes(struct tty_struct *tty)
size_t nr, old, echoed;
size_t head;
+ mutex_lock(&ldata->output_lock);
head = ldata->echo_head;
ldata->echo_mark = head;
old = ldata->echo_commit - ldata->echo_tail;
@@ -756,10 +769,12 @@ static void commit_echoes(struct tty_struct *tty)
* is over the threshold (and try again each time another
* block is accumulated) */
nr = head - ldata->echo_tail;
- if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
+ if (nr < ECHO_COMMIT_WATERMARK ||
+ (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
+ mutex_unlock(&ldata->output_lock);
return;
+ }
- mutex_lock(&ldata->output_lock);
ldata->echo_commit = head;
echoed = __process_echoes(tty);
mutex_unlock(&ldata->output_lock);
@@ -810,7 +825,9 @@ static void flush_echoes(struct tty_struct *tty)
static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
{
- *echo_buf_addr(ldata, ldata->echo_head++) = c;
+ *echo_buf_addr(ldata, ldata->echo_head) = c;
+ smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
+ ldata->echo_head++;
}
/**
@@ -978,14 +995,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
}
seen_alnums = 0;
- while (ldata->read_head != ldata->canon_head) {
+ while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
head = ldata->read_head;
/* erase a single possibly multibyte character */
do {
head--;
c = read_buf(ldata, head);
- } while (is_continuation(c, tty) && head != ldata->canon_head);
+ } while (is_continuation(c, tty) &&
+ MASK(head) != MASK(ldata->canon_head));
/* do not partially erase */
if (is_continuation(c, tty))
@@ -1027,7 +1045,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
* This info is used to go back the correct
* number of columns.
*/
- while (tail != ldata->canon_head) {
+ while (MASK(tail) != MASK(ldata->canon_head)) {
tail--;
c = read_buf(ldata, tail);
if (c == '\t') {
@@ -1302,7 +1320,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
finish_erasing(ldata);
echo_char(c, tty);
echo_char_raw('\n', ldata);
- while (tail != ldata->read_head) {
+ while (MASK(tail) != MASK(ldata->read_head)) {
echo_char(read_buf(ldata, tail), tty);
tail++;
}
@@ -1878,30 +1896,21 @@ static int n_tty_open(struct tty_struct *tty)
struct n_tty_data *ldata;
/* Currently a malloc failure here can panic */
- ldata = vmalloc(sizeof(*ldata));
+ ldata = vzalloc(sizeof(*ldata));
if (!ldata)
- goto err;
+ return -ENOMEM;
ldata->overrun_time = jiffies;
mutex_init(&ldata->atomic_read_lock);
mutex_init(&ldata->output_lock);
tty->disc_data = ldata;
- reset_buffer_flags(tty->disc_data);
- ldata->column = 0;
- ldata->canon_column = 0;
- ldata->num_overrun = 0;
- ldata->no_room = 0;
- ldata->lnext = 0;
tty->closing = 0;
/* indicate buffer work may resume */
clear_bit(TTY_LDISC_HALTED, &tty->flags);
n_tty_set_termios(tty, NULL);
tty_unthrottle(tty);
-
return 0;
-err:
- return -ENOMEM;
}
static inline int input_available_p(struct tty_struct *tty, int poll)
@@ -2411,7 +2420,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
tail = ldata->read_tail;
nr = head - tail;
/* Skip EOF-chars.. */
- while (head != tail) {
+ while (MASK(head) != MASK(tail)) {
if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
read_buf(ldata, tail) == __DISABLED_CHAR)
nr--;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index b0e2c4847a5d..678406e0948b 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -625,7 +625,7 @@ int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
if (tty->driver != ptm_driver)
return -EIO;
- fd = get_unused_fd_flags(0);
+ fd = get_unused_fd_flags(flags);
if (fd < 0) {
retval = fd;
goto err;
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index bdd17d2aaafd..b121d8f8f3d7 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1881,7 +1881,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
ByteIO_t UPCIRingInd = 0;
if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
- pci_enable_device(dev))
+ pci_enable_device(dev) || i >= NUM_BOARDS)
return 0;
rcktpt_io_addr[i] = pci_resource_start(dev, 0);
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index df93b727e984..9db93f500b4e 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -13,6 +13,8 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/serdev.h>
#include <linux/slab.h>
@@ -143,11 +145,28 @@ EXPORT_SYMBOL_GPL(serdev_device_remove);
int serdev_device_open(struct serdev_device *serdev)
{
struct serdev_controller *ctrl = serdev->ctrl;
+ int ret;
if (!ctrl || !ctrl->ops->open)
return -EINVAL;
- return ctrl->ops->open(ctrl);
+ ret = ctrl->ops->open(ctrl);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(&ctrl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&ctrl->dev);
+ goto err_close;
+ }
+
+ return 0;
+
+err_close:
+ if (ctrl->ops->close)
+ ctrl->ops->close(ctrl);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(serdev_device_open);
@@ -158,6 +177,8 @@ void serdev_device_close(struct serdev_device *serdev)
if (!ctrl || !ctrl->ops->close)
return;
+ pm_runtime_put(&ctrl->dev);
+
ctrl->ops->close(ctrl);
}
EXPORT_SYMBOL_GPL(serdev_device_close);
@@ -330,8 +351,17 @@ EXPORT_SYMBOL_GPL(serdev_device_set_tiocm);
static int serdev_drv_probe(struct device *dev)
{
const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
+ int ret;
+
+ ret = dev_pm_domain_attach(dev, true);
+ if (ret)
+ return ret;
+
+ ret = sdrv->probe(to_serdev_device(dev));
+ if (ret)
+ dev_pm_domain_detach(dev, true);
- return sdrv->probe(to_serdev_device(dev));
+ return ret;
}
static int serdev_drv_remove(struct device *dev)
@@ -339,6 +369,9 @@ static int serdev_drv_remove(struct device *dev)
const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
if (sdrv->remove)
sdrv->remove(to_serdev_device(dev));
+
+ dev_pm_domain_detach(dev, true);
+
return 0;
}
@@ -416,6 +449,9 @@ struct serdev_controller *serdev_controller_alloc(struct device *parent,
dev_set_name(&ctrl->dev, "serial%d", id);
+ pm_runtime_no_callbacks(&ctrl->dev);
+ pm_suspend_ignore_children(&ctrl->dev, true);
+
dev_dbg(&ctrl->dev, "allocated controller 0x%p id %d\n", ctrl, id);
return ctrl;
@@ -547,20 +583,23 @@ int serdev_controller_add(struct serdev_controller *ctrl)
if (ret)
return ret;
+ pm_runtime_enable(&ctrl->dev);
+
ret_of = of_serdev_register_devices(ctrl);
ret_acpi = acpi_serdev_register_devices(ctrl);
if (ret_of && ret_acpi) {
dev_dbg(&ctrl->dev, "no devices registered: of:%d acpi:%d\n",
ret_of, ret_acpi);
ret = -ENODEV;
- goto out_dev_del;
+ goto err_rpm_disable;
}
dev_dbg(&ctrl->dev, "serdev%d registered: dev:%p\n",
ctrl->nr, &ctrl->dev);
return 0;
-out_dev_del:
+err_rpm_disable:
+ pm_runtime_disable(&ctrl->dev);
device_del(&ctrl->dev);
return ret;
};
@@ -591,6 +630,7 @@ void serdev_controller_remove(struct serdev_controller *ctrl)
dummy = device_for_each_child(&ctrl->dev, NULL,
serdev_remove_device);
+ pm_runtime_disable(&ctrl->dev);
device_del(&ctrl->dev);
}
EXPORT_SYMBOL_GPL(serdev_controller_remove);
@@ -617,6 +657,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
static void __exit serdev_exit(void)
{
bus_unregister(&serdev_bus_type);
+ ida_destroy(&ctrl_ida);
}
module_exit(serdev_exit);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 9342fc2ee7df..8fe3d0ed229e 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -323,7 +323,7 @@ static int univ8250_setup_irq(struct uart_8250_port *up)
* the port is opened so this value needs to be preserved.
*/
if (up->bugs & UART_BUG_THRE) {
- pr_debug("ttyS%d - using backup timer\n", serial_index(port));
+ pr_debug("%s - using backup timer\n", port->name);
up->timer.function = serial8250_backup_timeout;
mod_timer(&up->timer, jiffies +
@@ -1023,6 +1023,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart->port.get_mctrl = up->port.get_mctrl;
if (up->port.set_mctrl)
uart->port.set_mctrl = up->port.set_mctrl;
+ if (up->port.get_divisor)
+ uart->port.get_divisor = up->port.get_divisor;
+ if (up->port.set_divisor)
+ uart->port.set_divisor = up->port.set_divisor;
if (up->port.startup)
uart->port.startup = up->port.startup;
if (up->port.shutdown)
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index aff04f1de3a5..fa8dcb470640 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -31,6 +31,7 @@
/* Offsets for the DesignWare specific registers */
#define DW_UART_USR 0x1f /* UART Status Register */
+#define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */
#define DW_UART_CPR 0xf4 /* Component Parameter Register */
#define DW_UART_UCV 0xf8 /* UART Component Version */
@@ -55,6 +56,7 @@
struct dw8250_data {
u8 usr_reg;
+ u8 dlf_size;
int line;
int msr_mask_on;
int msr_mask_off;
@@ -67,6 +69,21 @@ struct dw8250_data {
unsigned int uart_16550_compatible:1;
};
+static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
+{
+ if (p->iotype == UPIO_MEM32BE)
+ return ioread32be(p->membase + offset);
+ return readl(p->membase + offset);
+}
+
+static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg)
+{
+ if (p->iotype == UPIO_MEM32BE)
+ iowrite32be(reg, p->membase + offset);
+ else
+ writel(reg, p->membase + offset);
+}
+
static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = p->private_data;
@@ -293,7 +310,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
long rate;
int ret;
- if (IS_ERR(d->clk) || !old)
+ if (IS_ERR(d->clk))
goto out;
clk_disable_unprepare(d->clk);
@@ -351,6 +368,37 @@ static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
return param == chan->device->dev->parent;
}
+/*
+ * divisor = div(I) + div(F)
+ * "I" means integer, "F" means fractional
+ * quot = div(I) = clk / (16 * baud)
+ * frac = div(F) * 2^dlf_size
+ *
+ * let rem = clk % (16 * baud)
+ * we have: div(F) * (16 * baud) = rem
+ * so frac = 2^dlf_size * rem / (16 * baud) = (rem << dlf_size) / (16 * baud)
+ */
+static unsigned int dw8250_get_divisor(struct uart_port *p,
+ unsigned int baud,
+ unsigned int *frac)
+{
+ unsigned int quot, rem, base_baud = baud * 16;
+ struct dw8250_data *d = p->private_data;
+
+ quot = p->uartclk / base_baud;
+ rem = p->uartclk % base_baud;
+ *frac = DIV_ROUND_CLOSEST(rem << d->dlf_size, base_baud);
+
+ return quot;
+}
+
+static void dw8250_set_divisor(struct uart_port *p, unsigned int baud,
+ unsigned int quot, unsigned int quot_frac)
+{
+ dw8250_writel_ext(p, DW_UART_DLF, quot_frac);
+ serial8250_do_set_divisor(p, baud, quot, quot_frac);
+}
+
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
{
if (p->dev->of_node) {
@@ -404,20 +452,26 @@ static void dw8250_setup_port(struct uart_port *p)
* If the Component Version Register returns zero, we know that
* ADDITIONAL_FEATURES are not enabled. No need to go any further.
*/
- if (p->iotype == UPIO_MEM32BE)
- reg = ioread32be(p->membase + DW_UART_UCV);
- else
- reg = readl(p->membase + DW_UART_UCV);
+ reg = dw8250_readl_ext(p, DW_UART_UCV);
if (!reg)
return;
dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
(reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
- if (p->iotype == UPIO_MEM32BE)
- reg = ioread32be(p->membase + DW_UART_CPR);
- else
- reg = readl(p->membase + DW_UART_CPR);
+ dw8250_writel_ext(p, DW_UART_DLF, ~0U);
+ reg = dw8250_readl_ext(p, DW_UART_DLF);
+ dw8250_writel_ext(p, DW_UART_DLF, 0);
+
+ if (reg) {
+ struct dw8250_data *d = p->private_data;
+
+ d->dlf_size = fls(reg);
+ p->get_divisor = dw8250_get_divisor;
+ p->set_divisor = dw8250_set_divisor;
+ }
+
+ reg = dw8250_readl_ext(p, DW_UART_CPR);
if (!reg)
return;
@@ -693,6 +747,7 @@ static const struct of_device_id dw8250_of_match[] = {
{ .compatible = "snps,dw-apb-uart" },
{ .compatible = "cavium,octeon-3860-uart" },
{ .compatible = "marvell,armada-38x-uart" },
+ { .compatible = "renesas,rzn1-uart" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dw8250_of_match);
@@ -707,6 +762,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
{ "APMC0D08", 0},
{ "AMD0020", 0 },
{ "AMDI0020", 0 },
+ { "BRCM2032", 0 },
{ "HISI0031", 0 },
{ },
};
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index f6a86f2bc4e5..2a76e22d2ec0 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
#include <linux/platform_device.h>
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 38af306ca0e8..0089aa305ef9 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -109,11 +109,12 @@ struct exar8250_platform {
* struct exar8250_board - board information
* @num_ports: number of serial ports
* @reg_shift: describes UART register mapping in PCI memory
+ * @setup: quirk run at ->probe() stage
+ * @exit: quirk run at ->remove() stage
*/
struct exar8250_board {
unsigned int num_ports;
unsigned int reg_shift;
- bool has_slave;
int (*setup)(struct exar8250 *, struct pci_dev *,
struct uart_8250_port *, int);
void (*exit)(struct pci_dev *pcidev);
@@ -272,8 +273,32 @@ static int xr17v35x_register_gpio(struct pci_dev *pcidev,
return 0;
}
+static int generic_rs485_config(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ bool is_rs485 = !!(rs485->flags & SER_RS485_ENABLED);
+ u8 __iomem *p = port->membase;
+ u8 value;
+
+ value = readb(p + UART_EXAR_FCTR);
+ if (is_rs485)
+ value |= UART_FCTR_EXAR_485;
+ else
+ value &= ~UART_FCTR_EXAR_485;
+
+ writeb(value, p + UART_EXAR_FCTR);
+
+ if (is_rs485)
+ writeb(UART_EXAR_RS485_DLY(4), p + UART_MSR);
+
+ port->rs485 = *rs485;
+
+ return 0;
+}
+
static const struct exar8250_platform exar8250_default_platform = {
.register_gpio = xr17v35x_register_gpio,
+ .rs485_config = generic_rs485_config,
};
static int iot2040_rs485_config(struct uart_port *port,
@@ -306,19 +331,7 @@ static int iot2040_rs485_config(struct uart_port *port,
value |= mode;
writeb(value, p + UART_EXAR_MPIOLVL_7_0);
- value = readb(p + UART_EXAR_FCTR);
- if (is_rs485)
- value |= UART_FCTR_EXAR_485;
- else
- value &= ~UART_FCTR_EXAR_485;
- writeb(value, p + UART_EXAR_FCTR);
-
- if (is_rs485)
- writeb(UART_EXAR_RS485_DLY(4), p + UART_MSR);
-
- port->rs485 = *rs485;
-
- return 0;
+ return generic_rs485_config(port, rs485);
}
static const struct property_entry iot2040_gpio_properties[] = {
@@ -364,7 +377,6 @@ static int
pci_xr17v35x_setup(struct exar8250 *priv, struct pci_dev *pcidev,
struct uart_8250_port *port, int idx)
{
- const struct exar8250_board *board = priv->board;
const struct exar8250_platform *platform;
const struct dmi_system_id *dmi_match;
unsigned int offset = idx * 0x400;
@@ -382,10 +394,10 @@ pci_xr17v35x_setup(struct exar8250 *priv, struct pci_dev *pcidev,
port->port.rs485_config = platform->rs485_config;
/*
- * Setup the uart clock for the devices on expansion slot to
+ * Setup the UART clock for the devices on expansion slot to
* half the clock speed of the main chip (which is 125MHz)
*/
- if (board->has_slave && idx >= 8)
+ if (idx >= 8)
port->port.uartclk /= 2;
ret = default_setup(priv, pcidev, idx, offset, port);
@@ -433,7 +445,11 @@ static irqreturn_t exar_misc_handler(int irq, void *data)
struct exar8250 *priv = data;
/* Clear all PCI interrupts by reading INT0. No effect on IIR */
- ioread8(priv->virt + UART_EXAR_INT0);
+ readb(priv->virt + UART_EXAR_INT0);
+
+ /* Clear INT0 for Expansion Interface slave ports, too */
+ if (priv->board->num_ports > 8)
+ readb(priv->virt + 0x2000 + UART_EXAR_INT0);
return IRQ_HANDLED;
}
@@ -590,14 +606,12 @@ static const struct exar8250_board pbn_exar_XR17V35x = {
static const struct exar8250_board pbn_exar_XR17V4358 = {
.num_ports = 12,
- .has_slave = true,
.setup = pci_xr17v35x_setup,
.exit = pci_xr17v35x_exit,
};
static const struct exar8250_board pbn_exar_XR17V8358 = {
.num_ports = 16,
- .has_slave = true,
.setup = pci_xr17v35x_setup,
.exit = pci_xr17v35x_exit,
};
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index bfb37f0be22f..af8beefe9b5c 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -124,7 +124,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
dev_warn(&ofdev->dev, "unsupported reg-io-width (%d)\n",
prop);
ret = -EINVAL;
- goto err_dispose;
+ goto err_unprepare;
}
}
port->flags |= UPF_IOREMAP;
@@ -144,6 +144,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->line = ret;
port->irq = irq_of_parse_and_map(np, 0);
+ if (!port->irq) {
+ ret = -EPROBE_DEFER;
+ goto err_unprepare;
+ }
info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL);
if (IS_ERR(info->rst)) {
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 1b337fee07ed..a019286f8bb6 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1115,6 +1115,7 @@ static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE;
static const struct of_device_id omap8250_dt_ids[] = {
+ { .compatible = "ti,am654-uart" },
{ .compatible = "ti,omap2-uart" },
{ .compatible = "ti,omap3-uart" },
{ .compatible = "ti,omap4-uart", .data = &omap4_habit, },
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 3296a05cda2d..f80a300b5d68 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -3339,9 +3339,7 @@ static const struct pci_device_id blacklist[] = {
/* multi-io cards handled by parport_serial */
{ PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
{ PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
- { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
{ PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
- { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
/* Moxa Smartio MUE boards handled by 8250_moxa */
{ PCI_VDEVICE(MOXA, 0x1024), },
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index cf541aab2bd0..3f779d25ec0c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -90,8 +90,7 @@ static const struct serial8250_config uart_config[] = {
.name = "16550A",
.fifo_size = 16,
.tx_loadsz = 16,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
- UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO,
},
@@ -1211,8 +1210,8 @@ static void autoconfig(struct uart_8250_port *up)
if (!port->iobase && !port->mapbase && !port->membase)
return;
- DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04lx, 0x%p): ",
- serial_index(port), port->iobase, port->membase);
+ DEBUG_AUTOCONF("%s: autoconf (0x%04lx, 0x%p): ",
+ port->name, port->iobase, port->membase);
/*
* We really do need global IRQs disabled here - we're going to
@@ -1363,9 +1362,8 @@ out_lock:
fintek_8250_probe(up);
if (up->capabilities != old_capabilities) {
- pr_warn("ttyS%d: detected caps %08x should be %08x\n",
- serial_index(port), old_capabilities,
- up->capabilities);
+ pr_warn("%s: detected caps %08x should be %08x\n",
+ port->name, old_capabilities, up->capabilities);
}
out:
DEBUG_AUTOCONF("iir=%d ", scratch);
@@ -2212,8 +2210,7 @@ int serial8250_do_startup(struct uart_port *port)
*/
if (!(port->flags & UPF_BUGGY_UART) &&
(serial_port_in(port, UART_LSR) == 0xff)) {
- printk_ratelimited(KERN_INFO "ttyS%d: LSR safety check engaged!\n",
- serial_index(port));
+ pr_info_ratelimited("%s: LSR safety check engaged!\n", port->name);
retval = -ENODEV;
goto out;
}
@@ -2245,8 +2242,8 @@ int serial8250_do_startup(struct uart_port *port)
(port->type == PORT_ALTR_16550_F128)) && (port->fifosize > 1)) {
/* Bounds checking of TX threshold (valid 0 to fifosize-2) */
if ((up->tx_loadsz < 2) || (up->tx_loadsz > port->fifosize)) {
- pr_err("ttyS%d TX FIFO Threshold errors, skipping\n",
- serial_index(port));
+ pr_err("%s TX FIFO Threshold errors, skipping\n",
+ port->name);
} else {
serial_port_out(port, UART_ALTR_AFR,
UART_ALTR_EN_TXFIFO_LW);
@@ -2343,8 +2340,8 @@ int serial8250_do_startup(struct uart_port *port)
if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
if (!(up->bugs & UART_BUG_TXEN)) {
up->bugs |= UART_BUG_TXEN;
- pr_debug("ttyS%d - enabling bad tx status workarounds\n",
- serial_index(port));
+ pr_debug("%s - enabling bad tx status workarounds\n",
+ port->name);
}
} else {
up->bugs &= ~UART_BUG_TXEN;
@@ -2373,8 +2370,8 @@ dont_test_tx_en:
if (up->dma) {
retval = serial8250_request_dma(up);
if (retval) {
- pr_warn_ratelimited("ttyS%d - failed to request DMA\n",
- serial_index(port));
+ pr_warn_ratelimited("%s - failed to request DMA\n",
+ port->name);
up->dma = NULL;
}
}
@@ -2498,11 +2495,11 @@ static unsigned int npcm_get_divisor(struct uart_8250_port *up,
return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2;
}
-static unsigned int serial8250_get_divisor(struct uart_8250_port *up,
- unsigned int baud,
- unsigned int *frac)
+static unsigned int serial8250_do_get_divisor(struct uart_port *port,
+ unsigned int baud,
+ unsigned int *frac)
{
- struct uart_port *port = &up->port;
+ struct uart_8250_port *up = up_to_u8250p(port);
unsigned int quot;
/*
@@ -2532,6 +2529,16 @@ static unsigned int serial8250_get_divisor(struct uart_8250_port *up,
return quot;
}
+static unsigned int serial8250_get_divisor(struct uart_port *port,
+ unsigned int baud,
+ unsigned int *frac)
+{
+ if (port->get_divisor)
+ return port->get_divisor(port, baud, frac);
+
+ return serial8250_do_get_divisor(port, baud, frac);
+}
+
static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
tcflag_t c_cflag)
{
@@ -2570,8 +2577,8 @@ static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
return cval;
}
-static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
- unsigned int quot, unsigned int quot_frac)
+void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int quot, unsigned int quot_frac)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -2602,6 +2609,16 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
serial_port_out(port, 0x2, quot_frac);
}
}
+EXPORT_SYMBOL_GPL(serial8250_do_set_divisor);
+
+static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int quot, unsigned int quot_frac)
+{
+ if (port->set_divisor)
+ port->set_divisor(port, baud, quot, quot_frac);
+ else
+ serial8250_do_set_divisor(port, baud, quot, quot_frac);
+}
static unsigned int serial8250_get_baud_rate(struct uart_port *port,
struct ktermios *termios,
@@ -2636,7 +2653,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
cval = serial8250_compute_lcr(up, termios->c_cflag);
baud = serial8250_get_baud_rate(port, termios, old);
- quot = serial8250_get_divisor(up, baud, &frac);
+ quot = serial8250_get_divisor(port, baud, &frac);
/*
* Ok, we're now changing the port state. Do it with
@@ -3197,7 +3214,7 @@ static void serial8250_console_restore(struct uart_8250_port *up)
termios.c_cflag = port->state->port.tty->termios.c_cflag;
baud = serial8250_get_baud_rate(port, &termios, NULL);
- quot = serial8250_get_divisor(up, baud, &frac);
+ quot = serial8250_get_divisor(port, baud, &frac);
serial8250_set_divisor(port, baud, quot, frac);
serial_port_out(port, UART_LCR, up->lcr);
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index 9963a766dcfb..c8186a05a453 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -638,8 +638,10 @@ static int serial_config(struct pcmcia_device *link)
(link->has_func_id) &&
(link->socket->pcmcia_pfc == 0) &&
((link->func_id == CISTPL_FUNCID_MULTI) ||
- (link->func_id == CISTPL_FUNCID_SERIAL)))
- pcmcia_loop_config(link, serial_check_for_multi, info);
+ (link->func_id == CISTPL_FUNCID_SERIAL))) {
+ if (pcmcia_loop_config(link, serial_check_for_multi, info))
+ goto failed;
+ }
/*
* Apply any multi-port quirk.
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 4e853570ea80..239c0fa2e981 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -314,7 +314,8 @@ static u32 imx_uart_readl(struct imx_port *sport, u32 offset)
/*
* UCR2_SRST is the only bit in the cached registers that might
* differ from the value that was last written. As it only
- * clears after being set, reread conditionally.
+ * automatically becomes one after being cleared, reread
+ * conditionally.
*/
if (!(sport->ucr2 & UCR2_SRST))
sport->ucr2 = readl(sport->port.membase + offset);
@@ -1051,7 +1052,7 @@ static void imx_uart_dma_rx_callback(void *data)
unsigned int r_bytes;
unsigned int bd_size;
- status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
+ status = dmaengine_tx_status(chan, sport->rx_cookie, &state);
if (status == DMA_ERROR) {
imx_uart_clear_rx_errors(sport);
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index b6bd6e15e07b..689774c073ca 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -430,7 +430,6 @@ int jsm_uart_port_init(struct jsm_board *brd)
{
int i, rc;
unsigned int line;
- struct jsm_channel *ch;
if (!brd)
return -ENXIO;
@@ -444,7 +443,7 @@ int jsm_uart_port_init(struct jsm_board *brd)
brd->nasync = brd->maxports;
/* Set up channel variables */
- for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
+ for (i = 0; i < brd->nasync; i++) {
if (!brd->channels[i])
continue;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index efe55a1a0615..3db48fcd6068 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -531,8 +531,8 @@ static int max310x_update_best_err(unsigned long f, long *besterr)
return 1;
}
-static int max310x_set_ref_clk(struct max310x_port *s, unsigned long freq,
- bool xtal)
+static int max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
+ unsigned long freq, bool xtal)
{
unsigned int div, clksrc, pllcfg = 0;
long besterr = -1;
@@ -588,8 +588,14 @@ static int max310x_set_ref_clk(struct max310x_port *s, unsigned long freq,
regmap_write(s->regmap, MAX310X_CLKSRC_REG, clksrc);
/* Wait for crystal */
- if (pllcfg && xtal)
+ if (xtal) {
+ unsigned int val;
msleep(10);
+ regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
+ if (!(val & MAX310X_STS_CLKREADY_BIT)) {
+ dev_warn(dev, "clock is not stable yet\n");
+ }
+ }
return (int)bestfreq;
}
@@ -1260,7 +1266,7 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
MAX310X_MODE1_AUTOSLEEP_BIT);
}
- uartclk = max310x_set_ref_clk(s, freq, xtal);
+ uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
mutex_init(&s->mutex);
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index eda3c7710d6a..4932b674f7ef 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -887,7 +887,8 @@ static int serial_pxa_probe(struct platform_device *dev)
goto err_clk;
if (sport->port.line >= ARRAY_SIZE(serial_pxa_ports)) {
dev_err(&dev->dev, "serial%d out of range\n", sport->port.line);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_clk;
}
snprintf(sport->name, PXA_NAME_LEN - 1, "UART%d", sport->port.line + 1);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index c62e17c85f57..29ec34387246 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -17,6 +17,7 @@
#include <linux/tty_flip.h>
/* UART specific GENI registers */
+#define SE_UART_LOOPBACK_CFG 0x22c
#define SE_UART_TX_TRANS_CFG 0x25c
#define SE_UART_TX_WORD_LEN 0x268
#define SE_UART_TX_STOP_BIT_LEN 0x26c
@@ -26,6 +27,7 @@
#define SE_UART_RX_STALE_CNT 0x294
#define SE_UART_TX_PARITY_CFG 0x2a4
#define SE_UART_RX_PARITY_CFG 0x2a8
+#define SE_UART_MANUAL_RFR 0x2ac
/* SE_UART_TRANS_CFG */
#define UART_TX_PAR_EN BIT(0)
@@ -62,6 +64,11 @@
#define PAR_SPACE 0x10
#define PAR_MARK 0x11
+/* SE_UART_MANUAL_RFR register fields */
+#define UART_MANUAL_RFR_EN BIT(31)
+#define UART_RFR_NOT_READY BIT(1)
+#define UART_RFR_READY BIT(0)
+
/* UART M_CMD OP codes */
#define UART_START_TX 0x1
#define UART_START_BREAK 0x4
@@ -74,10 +81,12 @@
#define STALE_TIMEOUT 16
#define DEFAULT_BITS_PER_CHAR 10
#define GENI_UART_CONS_PORTS 1
+#define GENI_UART_PORTS 3
#define DEF_FIFO_DEPTH_WORDS 16
#define DEF_TX_WM 2
#define DEF_FIFO_WIDTH_BITS 32
#define UART_CONSOLE_RX_WM 2
+#define MAX_LOOPBACK_CFG 3
#ifdef CONFIG_CONSOLE_POLL
#define RX_BYTES_PW 1
@@ -101,22 +110,81 @@ struct qcom_geni_serial_port {
unsigned int baud;
unsigned int tx_bytes_pw;
unsigned int rx_bytes_pw;
+ u32 *rx_fifo;
+ u32 loopback;
bool brk;
};
static const struct uart_ops qcom_geni_console_pops;
+static const struct uart_ops qcom_geni_uart_pops;
static struct uart_driver qcom_geni_console_driver;
+static struct uart_driver qcom_geni_uart_driver;
static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
+static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop);
static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
static void qcom_geni_serial_stop_rx(struct uart_port *uport);
static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
32000000, 48000000, 64000000, 80000000,
- 96000000, 100000000};
+ 96000000, 100000000, 102400000,
+ 112000000, 120000000, 128000000};
#define to_dev_port(ptr, member) \
container_of(ptr, struct qcom_geni_serial_port, member)
+static struct qcom_geni_serial_port qcom_geni_uart_ports[GENI_UART_PORTS] = {
+ [0] = {
+ .uport = {
+ .iotype = UPIO_MEM,
+ .ops = &qcom_geni_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 0,
+ },
+ },
+ [1] = {
+ .uport = {
+ .iotype = UPIO_MEM,
+ .ops = &qcom_geni_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 1,
+ },
+ },
+ [2] = {
+ .uport = {
+ .iotype = UPIO_MEM,
+ .ops = &qcom_geni_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 2,
+ },
+ },
+};
+
+static ssize_t loopback_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
+
+ return snprintf(buf, sizeof(u32), "%d\n", port->loopback);
+}
+
+static ssize_t loopback_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
+ u32 loopback;
+
+ if (kstrtoint(buf, 0, &loopback) || loopback > MAX_LOOPBACK_CFG) {
+ dev_err(dev, "Invalid input\n");
+ return -EINVAL;
+ }
+ port->loopback = loopback;
+ return size;
+}
+static DEVICE_ATTR_RW(loopback);
+
static struct qcom_geni_serial_port qcom_geni_console_port = {
.uport = {
.iotype = UPIO_MEM,
@@ -148,14 +216,33 @@ static void qcom_geni_serial_config_port(struct uart_port *uport, int cfg_flags)
}
}
-static unsigned int qcom_geni_cons_get_mctrl(struct uart_port *uport)
+static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport)
{
- return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+ unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
+ u32 geni_ios;
+
+ if (uart_console(uport) || !uart_cts_enabled(uport)) {
+ mctrl |= TIOCM_CTS;
+ } else {
+ geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
+ if (!(geni_ios & IO2_DATA_IN))
+ mctrl |= TIOCM_CTS;
+ }
+
+ return mctrl;
}
-static void qcom_geni_cons_set_mctrl(struct uart_port *uport,
+static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
unsigned int mctrl)
{
+ u32 uart_manual_rfr = 0;
+
+ if (uart_console(uport) || !uart_cts_enabled(uport))
+ return;
+
+ if (!(mctrl & TIOCM_RTS))
+ uart_manual_rfr = UART_MANUAL_RFR_EN | UART_RFR_NOT_READY;
+ writel_relaxed(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
}
static const char *qcom_geni_serial_get_type(struct uart_port *uport)
@@ -163,11 +250,16 @@ static const char *qcom_geni_serial_get_type(struct uart_port *uport)
return "MSM";
}
-static struct qcom_geni_serial_port *get_port_from_line(int line)
+static struct qcom_geni_serial_port *get_port_from_line(int line, bool console)
{
- if (line < 0 || line >= GENI_UART_CONS_PORTS)
+ struct qcom_geni_serial_port *port;
+ int nr_ports = console ? GENI_UART_CONS_PORTS : GENI_UART_PORTS;
+
+ if (line < 0 || line >= nr_ports)
return ERR_PTR(-ENXIO);
- return &qcom_geni_console_port;
+
+ port = console ? &qcom_geni_console_port : &qcom_geni_uart_ports[line];
+ return port;
}
static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
@@ -346,7 +438,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
- port = get_port_from_line(co->index);
+ port = get_port_from_line(co->index, true);
if (IS_ERR(port))
return;
@@ -420,6 +512,32 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */
+static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop)
+{
+ unsigned char *buf;
+ struct tty_port *tport;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ u32 num_bytes_pw = port->tx_fifo_width / BITS_PER_BYTE;
+ u32 words = ALIGN(bytes, num_bytes_pw) / num_bytes_pw;
+ int ret;
+
+ tport = &uport->state->port;
+ ioread32_rep(uport->membase + SE_GENI_RX_FIFOn, port->rx_fifo, words);
+ if (drop)
+ return 0;
+
+ buf = (unsigned char *)port->rx_fifo;
+ ret = tty_insert_flip_string(tport, buf, bytes);
+ if (ret != bytes) {
+ dev_err(uport->dev, "%s:Unable to push data ret %d_bytes %d\n",
+ __func__, ret, bytes);
+ WARN_ON_ONCE(1);
+ }
+ uport->icount.rx += ret;
+ tty_flip_buffer_push(tport);
+ return ret;
+}
+
static void qcom_geni_serial_start_tx(struct uart_port *uport)
{
u32 irq_en;
@@ -586,6 +704,7 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport)
u32 status;
unsigned int chunk;
int tail;
+ u32 irq_en;
chunk = uart_circ_chars_pending(xmit);
status = readl_relaxed(uport->membase + SE_GENI_TX_FIFO_STATUS);
@@ -595,6 +714,13 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport)
goto out_write_wakeup;
}
+ if (!uart_console(uport)) {
+ irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en &= ~(M_TX_FIFO_WATERMARK_EN);
+ writel_relaxed(0, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ }
+
avail = (port->tx_fifo_depth - port->tx_wm) * port->tx_bytes_pw;
tail = xmit->tail;
chunk = min3((size_t)chunk, (size_t)(UART_XMIT_SIZE - tail), avail);
@@ -623,7 +749,8 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport)
}
xmit->tail = tail & (UART_XMIT_SIZE - 1);
- qcom_geni_serial_poll_tx_done(uport);
+ if (uart_console(uport))
+ qcom_geni_serial_poll_tx_done(uport);
out_write_wakeup:
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(uport);
@@ -710,7 +837,8 @@ static void qcom_geni_serial_shutdown(struct uart_port *uport)
unsigned long flags;
/* Stop the console before stopping the current tx */
- console_stop(uport->cons);
+ if (uart_console(uport))
+ console_stop(uport->cons);
free_irq(uport->irq, uport);
spin_lock_irqsave(&uport->lock, flags);
@@ -731,13 +859,20 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
* it else we could end up in data loss scenarios.
*/
port->xfer_mode = GENI_SE_FIFO;
- qcom_geni_serial_poll_tx_done(uport);
+ if (uart_console(uport))
+ qcom_geni_serial_poll_tx_done(uport);
geni_se_config_packing(&port->se, BITS_PER_BYTE, port->tx_bytes_pw,
false, true, false);
geni_se_config_packing(&port->se, BITS_PER_BYTE, port->rx_bytes_pw,
false, false, true);
geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
geni_se_select_mode(&port->se, port->xfer_mode);
+ if (!uart_console(uport)) {
+ port->rx_fifo = devm_kzalloc(uport->dev,
+ port->rx_fifo_depth * sizeof(u32), GFP_KERNEL);
+ if (!port->rx_fifo)
+ return -ENOMEM;
+ }
port->setup = true;
return 0;
}
@@ -749,8 +884,13 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
scnprintf(port->name, sizeof(port->name),
- "qcom_serial_geni%d", uport->line);
+ "qcom_serial_%s%d",
+ (uart_console(uport) ? "console" : "uart"), uport->line);
+ if (!uart_console(uport)) {
+ port->tx_bytes_pw = 4;
+ port->rx_bytes_pw = RX_BYTES_PW;
+ }
proto = geni_se_read_proto(&port->se);
if (proto != GENI_SE_UART) {
dev_err(uport->dev, "Invalid FW loaded, proto: %d\n", proto);
@@ -886,6 +1026,9 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
if (baud)
uart_update_timeout(uport, termios->c_cflag, baud);
+ if (!uart_console(uport))
+ writel_relaxed(port->loopback,
+ uport->membase + SE_UART_LOOPBACK_CFG);
writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
@@ -917,7 +1060,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
if (co->index >= GENI_UART_CONS_PORTS || co->index < 0)
return -ENXIO;
- port = get_port_from_line(co->index);
+ port = get_port_from_line(co->index, true);
if (IS_ERR(port)) {
pr_err("Invalid line %d\n", co->index);
return PTR_ERR(port);
@@ -1048,16 +1191,23 @@ static void console_unregister(struct uart_driver *drv)
}
#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */
-static void qcom_geni_serial_cons_pm(struct uart_port *uport,
+static struct uart_driver qcom_geni_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "qcom_geni_uart",
+ .dev_name = "ttyHS",
+ .nr = GENI_UART_PORTS,
+};
+
+static void qcom_geni_serial_pm(struct uart_port *uport,
unsigned int new_state, unsigned int old_state)
{
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
- if (unlikely(!uart_console(uport)))
- return;
-
if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF)
geni_se_resources_on(&port->se);
+ else if (!uart_console(uport) && (new_state == UART_PM_STATE_ON &&
+ old_state == UART_PM_STATE_UNDEFINED))
+ geni_se_resources_on(&port->se);
else if (new_state == UART_PM_STATE_OFF &&
old_state == UART_PM_STATE_ON)
geni_se_resources_off(&port->se);
@@ -1074,13 +1224,29 @@ static const struct uart_ops qcom_geni_console_pops = {
.config_port = qcom_geni_serial_config_port,
.shutdown = qcom_geni_serial_shutdown,
.type = qcom_geni_serial_get_type,
- .set_mctrl = qcom_geni_cons_set_mctrl,
- .get_mctrl = qcom_geni_cons_get_mctrl,
+ .set_mctrl = qcom_geni_serial_set_mctrl,
+ .get_mctrl = qcom_geni_serial_get_mctrl,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = qcom_geni_serial_get_char,
.poll_put_char = qcom_geni_serial_poll_put_char,
#endif
- .pm = qcom_geni_serial_cons_pm,
+ .pm = qcom_geni_serial_pm,
+};
+
+static const struct uart_ops qcom_geni_uart_pops = {
+ .tx_empty = qcom_geni_serial_tx_empty,
+ .stop_tx = qcom_geni_serial_stop_tx,
+ .start_tx = qcom_geni_serial_start_tx,
+ .stop_rx = qcom_geni_serial_stop_rx,
+ .set_termios = qcom_geni_serial_set_termios,
+ .startup = qcom_geni_serial_startup,
+ .request_port = qcom_geni_serial_request_port,
+ .config_port = qcom_geni_serial_config_port,
+ .shutdown = qcom_geni_serial_shutdown,
+ .type = qcom_geni_serial_get_type,
+ .set_mctrl = qcom_geni_serial_set_mctrl,
+ .get_mctrl = qcom_geni_serial_get_mctrl,
+ .pm = qcom_geni_serial_pm,
};
static int qcom_geni_serial_probe(struct platform_device *pdev)
@@ -1091,13 +1257,23 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
struct uart_port *uport;
struct resource *res;
int irq;
+ bool console = false;
+ struct uart_driver *drv;
- if (pdev->dev.of_node)
- line = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,geni-debug-uart"))
+ console = true;
- if (line < 0 || line >= GENI_UART_CONS_PORTS)
- return -ENXIO;
- port = get_port_from_line(line);
+ if (pdev->dev.of_node) {
+ if (console) {
+ drv = &qcom_geni_console_driver;
+ line = of_alias_get_id(pdev->dev.of_node, "serial");
+ } else {
+ drv = &qcom_geni_uart_driver;
+ line = of_alias_get_id(pdev->dev.of_node, "hsuart");
+ }
+ }
+
+ port = get_port_from_line(line, console);
if (IS_ERR(port)) {
dev_err(&pdev->dev, "Invalid line %d\n", line);
return PTR_ERR(port);
@@ -1134,10 +1310,12 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
}
uport->irq = irq;
- uport->private_data = &qcom_geni_console_driver;
+ uport->private_data = drv;
platform_set_drvdata(pdev, port);
- port->handle_rx = handle_rx_console;
- return uart_add_one_port(&qcom_geni_console_driver, uport);
+ port->handle_rx = console ? handle_rx_console : handle_rx_uart;
+ if (!console)
+ device_create_file(uport->dev, &dev_attr_loopback);
+ return uart_add_one_port(drv, uport);
}
static int qcom_geni_serial_remove(struct platform_device *pdev)
@@ -1154,7 +1332,17 @@ static int __maybe_unused qcom_geni_serial_sys_suspend_noirq(struct device *dev)
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
- uart_suspend_port(uport->private_data, uport);
+ if (uart_console(uport)) {
+ uart_suspend_port(uport->private_data, uport);
+ } else {
+ struct uart_state *state = uport->state;
+ /*
+ * If the port is open, deny system suspend.
+ */
+ if (state->pm_state == UART_PM_STATE_ON)
+ return -EBUSY;
+ }
+
return 0;
}
@@ -1163,7 +1351,8 @@ static int __maybe_unused qcom_geni_serial_sys_resume_noirq(struct device *dev)
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
- if (console_suspend_enabled && uport->suspended) {
+ if (uart_console(uport) &&
+ console_suspend_enabled && uport->suspended) {
uart_resume_port(uport->private_data, uport);
/*
* uart_suspend_port() invokes port shutdown which in turn
@@ -1185,6 +1374,7 @@ static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
static const struct of_device_id qcom_geni_serial_match_table[] = {
{ .compatible = "qcom,geni-debug-uart", },
+ { .compatible = "qcom,geni-uart", },
{}
};
MODULE_DEVICE_TABLE(of, qcom_geni_serial_match_table);
@@ -1207,9 +1397,17 @@ static int __init qcom_geni_serial_init(void)
if (ret)
return ret;
+ ret = uart_register_driver(&qcom_geni_uart_driver);
+ if (ret) {
+ console_unregister(&qcom_geni_console_driver);
+ return ret;
+ }
+
ret = platform_driver_register(&qcom_geni_serial_platform_driver);
- if (ret)
+ if (ret) {
console_unregister(&qcom_geni_console_driver);
+ uart_unregister_driver(&qcom_geni_uart_driver);
+ }
return ret;
}
module_init(qcom_geni_serial_init);
@@ -1218,6 +1416,7 @@ static void __exit qcom_geni_serial_exit(void)
{
platform_driver_unregister(&qcom_geni_serial_platform_driver);
console_unregister(&qcom_geni_console_driver);
+ uart_unregister_driver(&qcom_geni_uart_driver);
}
module_exit(qcom_geni_serial_exit);
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index d6ae3086c2a2..339befdd2f4d 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/device.h>
#include <linux/console.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 9c14a453f73c..80bb56facfb6 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -182,6 +182,7 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
{
struct uart_port *uport = uart_port_check(state);
unsigned long page;
+ unsigned long flags = 0;
int retval = 0;
if (uport->type == PORT_UNKNOWN)
@@ -196,15 +197,18 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
* Initialise and allocate the transmit and temporary
* buffer.
*/
- if (!state->xmit.buf) {
- /* This is protected by the per port mutex */
- page = get_zeroed_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ uart_port_lock(state, flags);
+ if (!state->xmit.buf) {
state->xmit.buf = (unsigned char *) page;
uart_circ_clear(&state->xmit);
+ } else {
+ free_page(page);
}
+ uart_port_unlock(uport, flags);
retval = uport->ops->startup(uport);
if (retval == 0) {
@@ -263,6 +267,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
{
struct uart_port *uport = uart_port_check(state);
struct tty_port *port = &state->port;
+ unsigned long flags = 0;
/*
* Set the TTY IO error marker
@@ -295,10 +300,12 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
/*
* Free the transmit buffer page.
*/
+ uart_port_lock(state, flags);
if (state->xmit.buf) {
free_page((unsigned long)state->xmit.buf);
state->xmit.buf = NULL;
}
+ uart_port_unlock(uport, flags);
}
/**
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index c181eb37f985..ac4424bf6b13 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -65,6 +65,8 @@ enum {
SCIx_RXI_IRQ,
SCIx_TXI_IRQ,
SCIx_BRI_IRQ,
+ SCIx_DRI_IRQ,
+ SCIx_TEI_IRQ,
SCIx_NR_IRQS,
SCIx_MUX_IRQ = SCIx_NR_IRQS, /* special case */
@@ -135,6 +137,8 @@ struct sci_port {
struct dma_chan *chan_rx;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ struct dma_chan *chan_tx_saved;
+ struct dma_chan *chan_rx_saved;
dma_cookie_t cookie_tx;
dma_cookie_t cookie_rx[2];
dma_cookie_t active_rx;
@@ -315,15 +319,15 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
[SCIx_SH4_SCIF_REGTYPE] = {
.regs = {
[SCSMR] = { 0x00, 16 },
- [SCBRR] = { 0x04, 8 },
- [SCSCR] = { 0x08, 16 },
- [SCxTDR] = { 0x0c, 8 },
- [SCxSR] = { 0x10, 16 },
- [SCxRDR] = { 0x14, 8 },
- [SCFCR] = { 0x18, 16 },
- [SCFDR] = { 0x1c, 16 },
- [SCSPTR] = { 0x20, 16 },
- [SCLSR] = { 0x24, 16 },
+ [SCBRR] = { 0x02, 8 },
+ [SCSCR] = { 0x04, 16 },
+ [SCxTDR] = { 0x06, 8 },
+ [SCxSR] = { 0x08, 16 },
+ [SCxRDR] = { 0x0a, 8 },
+ [SCFCR] = { 0x0c, 16 },
+ [SCFDR] = { 0x0e, 16 },
+ [SCSPTR] = { 0x10, 16 },
+ [SCLSR] = { 0x12, 16 },
},
.fifosize = 16,
.overrun_reg = SCLSR,
@@ -1212,25 +1216,16 @@ static int sci_dma_rx_find_active(struct sci_port *s)
return -1;
}
-static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
+static void sci_rx_dma_release(struct sci_port *s)
{
- struct dma_chan *chan = s->chan_rx;
- struct uart_port *port = &s->port;
- unsigned long flags;
+ struct dma_chan *chan = s->chan_rx_saved;
- spin_lock_irqsave(&port->lock, flags);
- s->chan_rx = NULL;
+ s->chan_rx_saved = s->chan_rx = NULL;
s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
- spin_unlock_irqrestore(&port->lock, flags);
- dmaengine_terminate_all(chan);
+ dmaengine_terminate_sync(chan);
dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
sg_dma_address(&s->sg_rx[0]));
dma_release_channel(chan);
- if (enable_pio) {
- spin_lock_irqsave(&port->lock, flags);
- sci_start_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
- }
}
static void start_hrtimer_us(struct hrtimer *hrt, unsigned long usec)
@@ -1289,33 +1284,31 @@ static void sci_dma_rx_complete(void *arg)
fail:
spin_unlock_irqrestore(&port->lock, flags);
dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
- sci_rx_dma_release(s, true);
+ /* Switch to PIO */
+ spin_lock_irqsave(&port->lock, flags);
+ s->chan_rx = NULL;
+ sci_start_rx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
}
-static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
+static void sci_tx_dma_release(struct sci_port *s)
{
- struct dma_chan *chan = s->chan_tx;
- struct uart_port *port = &s->port;
- unsigned long flags;
+ struct dma_chan *chan = s->chan_tx_saved;
- spin_lock_irqsave(&port->lock, flags);
- s->chan_tx = NULL;
+ cancel_work_sync(&s->work_tx);
+ s->chan_tx_saved = s->chan_tx = NULL;
s->cookie_tx = -EINVAL;
- spin_unlock_irqrestore(&port->lock, flags);
- dmaengine_terminate_all(chan);
+ dmaengine_terminate_sync(chan);
dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE,
DMA_TO_DEVICE);
dma_release_channel(chan);
- if (enable_pio) {
- spin_lock_irqsave(&port->lock, flags);
- sci_start_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
- }
}
static void sci_submit_rx(struct sci_port *s)
{
struct dma_chan *chan = s->chan_rx;
+ struct uart_port *port = &s->port;
+ unsigned long flags;
int i;
for (i = 0; i < 2; i++) {
@@ -1343,11 +1336,15 @@ static void sci_submit_rx(struct sci_port *s)
fail:
if (i)
- dmaengine_terminate_all(chan);
+ dmaengine_terminate_async(chan);
for (i = 0; i < 2; i++)
s->cookie_rx[i] = -EINVAL;
s->active_rx = -EINVAL;
- sci_rx_dma_release(s, true);
+ /* Switch to PIO */
+ spin_lock_irqsave(&port->lock, flags);
+ s->chan_rx = NULL;
+ sci_start_rx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static void work_fn_tx(struct work_struct *work)
@@ -1357,6 +1354,7 @@ static void work_fn_tx(struct work_struct *work)
struct dma_chan *chan = s->chan_tx;
struct uart_port *port = &s->port;
struct circ_buf *xmit = &port->state->xmit;
+ unsigned long flags;
dma_addr_t buf;
/*
@@ -1378,9 +1376,7 @@ static void work_fn_tx(struct work_struct *work)
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
- /* switch to PIO */
- sci_tx_dma_release(s, true);
- return;
+ goto switch_to_pio;
}
dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
@@ -1393,15 +1389,21 @@ static void work_fn_tx(struct work_struct *work)
s->cookie_tx = dmaengine_submit(desc);
if (dma_submit_error(s->cookie_tx)) {
dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
- /* switch to PIO */
- sci_tx_dma_release(s, true);
- return;
+ goto switch_to_pio;
}
dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
__func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
dma_async_issue_pending(chan);
+ return;
+
+switch_to_pio:
+ spin_lock_irqsave(&port->lock, flags);
+ s->chan_tx = NULL;
+ sci_start_tx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
}
static enum hrtimer_restart rx_timer_fn(struct hrtimer *t)
@@ -1452,7 +1454,7 @@ static enum hrtimer_restart rx_timer_fn(struct hrtimer *t)
}
/* Handle incomplete DMA receive */
- dmaengine_terminate_all(s->chan_rx);
+ dmaengine_terminate_async(s->chan_rx);
read = sg_dma_len(&s->sg_rx[active]) - state.residue;
if (read) {
@@ -1535,7 +1537,6 @@ static void sci_request_dma(struct uart_port *port)
chan = sci_request_dma_chan(port, DMA_MEM_TO_DEV);
dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
if (chan) {
- s->chan_tx = chan;
/* UART circular tx buffer is an aligned page. */
s->tx_dma_addr = dma_map_single(chan->device->dev,
port->state->xmit.buf,
@@ -1544,14 +1545,14 @@ static void sci_request_dma(struct uart_port *port)
if (dma_mapping_error(chan->device->dev, s->tx_dma_addr)) {
dev_warn(port->dev, "Failed mapping Tx DMA descriptor\n");
dma_release_channel(chan);
- s->chan_tx = NULL;
} else {
dev_dbg(port->dev, "%s: mapped %lu@%p to %pad\n",
__func__, UART_XMIT_SIZE,
port->state->xmit.buf, &s->tx_dma_addr);
- }
- INIT_WORK(&s->work_tx, work_fn_tx);
+ INIT_WORK(&s->work_tx, work_fn_tx);
+ s->chan_tx_saved = s->chan_tx = chan;
+ }
}
chan = sci_request_dma_chan(port, DMA_DEV_TO_MEM);
@@ -1561,8 +1562,6 @@ static void sci_request_dma(struct uart_port *port)
dma_addr_t dma;
void *buf;
- s->chan_rx = chan;
-
s->buf_len_rx = 2 * max_t(size_t, 16, port->fifosize);
buf = dma_alloc_coherent(chan->device->dev, s->buf_len_rx * 2,
&dma, GFP_KERNEL);
@@ -1570,7 +1569,6 @@ static void sci_request_dma(struct uart_port *port)
dev_warn(port->dev,
"Failed to allocate Rx dma buffer, using PIO\n");
dma_release_channel(chan);
- s->chan_rx = NULL;
return;
}
@@ -1591,6 +1589,8 @@ static void sci_request_dma(struct uart_port *port)
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
sci_submit_rx(s);
+
+ s->chan_rx_saved = s->chan_rx = chan;
}
}
@@ -1598,10 +1598,10 @@ static void sci_free_dma(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
- if (s->chan_tx)
- sci_tx_dma_release(s, false);
- if (s->chan_rx)
- sci_rx_dma_release(s, false);
+ if (s->chan_tx_saved)
+ sci_tx_dma_release(s);
+ if (s->chan_rx_saved)
+ sci_rx_dma_release(s);
}
static void sci_flush_buffer(struct uart_port *port)
@@ -1683,11 +1683,35 @@ static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
return IRQ_HANDLED;
}
+static irqreturn_t sci_br_interrupt(int irq, void *ptr)
+{
+ struct uart_port *port = ptr;
+
+ /* Handle BREAKs */
+ sci_handle_breaks(port);
+ sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t sci_er_interrupt(int irq, void *ptr)
{
struct uart_port *port = ptr;
struct sci_port *s = to_sci_port(port);
+ if (s->irqs[SCIx_ERI_IRQ] == s->irqs[SCIx_BRI_IRQ]) {
+ /* Break and Error interrupts are muxed */
+ unsigned short ssr_status = serial_port_in(port, SCxSR);
+
+ /* Break Interrupt */
+ if (ssr_status & SCxSR_BRK(port))
+ sci_br_interrupt(irq, ptr);
+
+ /* Break only? */
+ if (!(ssr_status & SCxSR_ERRORS(port)))
+ return IRQ_HANDLED;
+ }
+
/* Handle errors */
if (port->type == PORT_SCI) {
if (sci_handle_errors(port)) {
@@ -1710,17 +1734,6 @@ static irqreturn_t sci_er_interrupt(int irq, void *ptr)
return IRQ_HANDLED;
}
-static irqreturn_t sci_br_interrupt(int irq, void *ptr)
-{
- struct uart_port *port = ptr;
-
- /* Handle BREAKs */
- sci_handle_breaks(port);
- sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
-
- return IRQ_HANDLED;
-}
-
static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
{
unsigned short ssr_status, scr_status, err_enabled, orer_status = 0;
@@ -1794,6 +1807,16 @@ static const struct sci_irq_desc {
.handler = sci_br_interrupt,
},
+ [SCIx_DRI_IRQ] = {
+ .desc = "rx ready",
+ .handler = sci_rx_interrupt,
+ },
+
+ [SCIx_TEI_IRQ] = {
+ .desc = "tx end",
+ .handler = sci_tx_interrupt,
+ },
+
/*
* Special muxed handler.
*/
@@ -1806,12 +1829,19 @@ static const struct sci_irq_desc {
static int sci_request_irq(struct sci_port *port)
{
struct uart_port *up = &port->port;
- int i, j, ret = 0;
+ int i, j, w, ret = 0;
for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
const struct sci_irq_desc *desc;
int irq;
+ /* Check if already registered (muxed) */
+ for (w = 0; w < i; w++)
+ if (port->irqs[w] == port->irqs[i])
+ w = i + 1;
+ if (w > i)
+ continue;
+
if (SCIx_IRQ_IS_MUXED(port)) {
i = SCIx_MUX_IRQ;
irq = up->irq;
@@ -2092,13 +2122,15 @@ static void sci_shutdown(struct uart_port *port)
spin_unlock_irqrestore(&port->lock, flags);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
- if (s->chan_rx) {
+ if (s->chan_rx_saved) {
dev_dbg(port->dev, "%s(%d) deleting rx_timer\n", __func__,
port->line);
hrtimer_cancel(&s->rx_timer);
}
#endif
+ if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0)
+ del_timer_sync(&s->rx_fifo_timer);
sci_free_irq(s);
sci_free_dma(port);
}
@@ -2778,7 +2810,7 @@ static int sci_init_single(struct platform_device *dev,
{
struct uart_port *port = &sci_port->port;
const struct resource *res;
- unsigned int i;
+ unsigned int i, regtype;
int ret;
sci_port->cfg = p;
@@ -2799,22 +2831,23 @@ static int sci_init_single(struct platform_device *dev,
/* The SCI generates several interrupts. They can be muxed together or
* connected to different interrupt lines. In the muxed case only one
- * interrupt resource is specified. In the non-muxed case three or four
- * interrupt resources are specified, as the BRI interrupt is optional.
+ * interrupt resource is specified as there is only one interrupt ID.
+ * In the non-muxed case, up to 6 interrupt signals might be generated
+ * from the SCI, however those signals might have their own individual
+ * interrupt ID numbers, or muxed together with another interrupt.
*/
if (sci_port->irqs[0] < 0)
return -ENXIO;
- if (sci_port->irqs[1] < 0) {
- sci_port->irqs[1] = sci_port->irqs[0];
- sci_port->irqs[2] = sci_port->irqs[0];
- sci_port->irqs[3] = sci_port->irqs[0];
- }
+ if (sci_port->irqs[1] < 0)
+ for (i = 1; i < ARRAY_SIZE(sci_port->irqs); i++)
+ sci_port->irqs[i] = sci_port->irqs[0];
sci_port->params = sci_probe_regmap(p);
if (unlikely(sci_port->params == NULL))
return -EINVAL;
+ regtype = sci_port->params - sci_port_params;
switch (p->type) {
case PORT_SCIFB:
sci_port->rx_trigger = 48;
@@ -2869,6 +2902,10 @@ static int sci_init_single(struct platform_device *dev,
port->regshift = 1;
}
+ if (regtype == SCIx_SH4_SCIF_REGTYPE)
+ if (sci_port->reg_size >= 0x20)
+ port->regshift = 1;
+
/*
* The UART port needs an IRQ value, so we peg this to the RX IRQ
* for the multi-IRQ ports, which is where we are primarily
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index c47db7826189..98d3eadd2fd0 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -21,6 +21,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/clk.h>
#define ULITE_NAME "ttyUL"
#define ULITE_MAJOR 204
@@ -54,6 +55,11 @@
#define ULITE_CONTROL_RST_RX 0x02
#define ULITE_CONTROL_IE 0x10
+struct uartlite_data {
+ const struct uartlite_reg_ops *reg_ops;
+ struct clk *clk;
+};
+
struct uartlite_reg_ops {
u32 (*in)(void __iomem *addr);
void (*out)(u32 val, void __iomem *addr);
@@ -91,16 +97,16 @@ static const struct uartlite_reg_ops uartlite_le = {
static inline u32 uart_in32(u32 offset, struct uart_port *port)
{
- const struct uartlite_reg_ops *reg_ops = port->private_data;
+ struct uartlite_data *pdata = port->private_data;
- return reg_ops->in(port->membase + offset);
+ return pdata->reg_ops->in(port->membase + offset);
}
static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
{
- const struct uartlite_reg_ops *reg_ops = port->private_data;
+ struct uartlite_data *pdata = port->private_data;
- reg_ops->out(val, port->membase + offset);
+ pdata->reg_ops->out(val, port->membase + offset);
}
static struct uart_port ulite_ports[ULITE_NR_UARTS];
@@ -257,8 +263,15 @@ static void ulite_break_ctl(struct uart_port *port, int ctl)
static int ulite_startup(struct uart_port *port)
{
+ struct uartlite_data *pdata = port->private_data;
int ret;
+ ret = clk_enable(pdata->clk);
+ if (ret) {
+ dev_err(port->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
ret = request_irq(port->irq, ulite_isr, IRQF_SHARED | IRQF_TRIGGER_RISING,
"uartlite", port);
if (ret)
@@ -273,9 +286,12 @@ static int ulite_startup(struct uart_port *port)
static void ulite_shutdown(struct uart_port *port)
{
+ struct uartlite_data *pdata = port->private_data;
+
uart_out32(0, ULITE_CONTROL, port);
uart_in32(ULITE_CONTROL, port); /* dummy */
free_irq(port->irq, port);
+ clk_disable(pdata->clk);
}
static void ulite_set_termios(struct uart_port *port, struct ktermios *termios,
@@ -325,6 +341,7 @@ static void ulite_release_port(struct uart_port *port)
static int ulite_request_port(struct uart_port *port)
{
+ struct uartlite_data *pdata = port->private_data;
int ret;
pr_debug("ulite console: port=%p; port->mapbase=%llx\n",
@@ -342,13 +359,13 @@ static int ulite_request_port(struct uart_port *port)
return -EBUSY;
}
- port->private_data = (void *)&uartlite_be;
+ pdata->reg_ops = &uartlite_be;
ret = uart_in32(ULITE_CONTROL, port);
uart_out32(ULITE_CONTROL_RST_TX, ULITE_CONTROL, port);
ret = uart_in32(ULITE_STATUS, port);
/* Endianess detection */
if ((ret & ULITE_STATUS_TXEMPTY) != ULITE_STATUS_TXEMPTY)
- port->private_data = (void *)&uartlite_le;
+ pdata->reg_ops = &uartlite_le;
return 0;
}
@@ -365,6 +382,17 @@ static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser)
return -EINVAL;
}
+static void ulite_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct uartlite_data *pdata = port->private_data;
+
+ if (!state)
+ clk_enable(pdata->clk);
+ else
+ clk_disable(pdata->clk);
+}
+
#ifdef CONFIG_CONSOLE_POLL
static int ulite_get_poll_char(struct uart_port *port)
{
@@ -400,6 +428,7 @@ static const struct uart_ops ulite_ops = {
.request_port = ulite_request_port,
.config_port = ulite_config_port,
.verify_port = ulite_verify_port,
+ .pm = ulite_pm,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = ulite_get_poll_char,
.poll_put_char = ulite_put_poll_char,
@@ -585,10 +614,12 @@ static struct uart_driver ulite_uart_driver = {
* @id: requested id number. Pass -1 for automatic port assignment
* @base: base address of uartlite registers
* @irq: irq number for uartlite
+ * @pdata: private data for uartlite
*
* Returns: 0 on success, <0 otherwise
*/
-static int ulite_assign(struct device *dev, int id, u32 base, int irq)
+static int ulite_assign(struct device *dev, int id, u32 base, int irq,
+ struct uartlite_data *pdata)
{
struct uart_port *port;
int rc;
@@ -625,6 +656,7 @@ static int ulite_assign(struct device *dev, int id, u32 base, int irq)
port->dev = dev;
port->type = PORT_UNKNOWN;
port->line = id;
+ port->private_data = pdata;
dev_set_drvdata(dev, port);
@@ -658,10 +690,44 @@ static int ulite_release(struct device *dev)
return rc;
}
+/**
+ * ulite_suspend - Stop the device.
+ *
+ * @dev: handle to the device structure.
+ * Return: 0 always.
+ */
+static int __maybe_unused ulite_suspend(struct device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(dev);
+
+ if (port)
+ uart_suspend_port(&ulite_uart_driver, port);
+
+ return 0;
+}
+
+/**
+ * ulite_resume - Resume the device.
+ *
+ * @dev: handle to the device structure.
+ * Return: 0 on success, errno otherwise.
+ */
+static int __maybe_unused ulite_resume(struct device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(dev);
+
+ if (port)
+ uart_resume_port(&ulite_uart_driver, port);
+
+ return 0;
+}
+
/* ---------------------------------------------------------------------
* Platform bus binding
*/
+static SIMPLE_DEV_PM_OPS(ulite_pm_ops, ulite_suspend, ulite_resume);
+
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
static const struct of_device_id ulite_of_match[] = {
@@ -675,7 +741,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match);
static int ulite_probe(struct platform_device *pdev)
{
struct resource *res;
- int irq;
+ struct uartlite_data *pdata;
+ int irq, ret;
int id = pdev->id;
#ifdef CONFIG_OF
const __be32 *prop;
@@ -684,6 +751,10 @@ static int ulite_probe(struct platform_device *pdev)
if (prop)
id = be32_to_cpup(prop);
#endif
+ pdata = devm_kzalloc(&pdev->dev, sizeof(struct uartlite_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -693,11 +764,33 @@ static int ulite_probe(struct platform_device *pdev)
if (irq <= 0)
return -ENXIO;
- return ulite_assign(&pdev->dev, id, res->start, irq);
+ pdata->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(pdata->clk)) {
+ if (PTR_ERR(pdata->clk) != -ENOENT)
+ return PTR_ERR(pdata->clk);
+
+ /*
+ * Clock framework support is optional, continue on
+ * anyways if we don't find a matching clock.
+ */
+ pdata->clk = NULL;
+ }
+
+ ret = clk_prepare(pdata->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare clock\n");
+ return ret;
+ }
+
+ return ulite_assign(&pdev->dev, id, res->start, irq, pdata);
}
static int ulite_remove(struct platform_device *pdev)
{
+ struct uart_port *port = dev_get_drvdata(&pdev->dev);
+ struct uartlite_data *pdata = port->private_data;
+
+ clk_disable_unprepare(pdata->clk);
return ulite_release(&pdev->dev);
}
@@ -710,6 +803,7 @@ static struct platform_driver ulite_platform_driver = {
.driver = {
.name = "uartlite",
.of_match_table = of_match_ptr(ulite_of_match),
+ .pm = &ulite_pm_ops,
},
};
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 8a3e34234e98..a48f19b1b88f 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -167,6 +167,7 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
#define CDNS_UART_SR_TXEMPTY 0x00000008 /* TX FIFO empty */
#define CDNS_UART_SR_TXFULL 0x00000010 /* TX FIFO full */
#define CDNS_UART_SR_RXTRIG 0x00000001 /* Rx Trigger */
+#define CDNS_UART_SR_TACTIVE 0x00000800 /* TX state machine active */
/* baud dividers min/max values */
#define CDNS_UART_BDIV_MIN 4
@@ -829,7 +830,7 @@ static int cdns_uart_startup(struct uart_port *port)
* the receiver.
*/
status = readl(port->membase + CDNS_UART_CR);
- status &= CDNS_UART_CR_RX_DIS;
+ status &= ~CDNS_UART_CR_RX_DIS;
status |= CDNS_UART_CR_RX_EN;
writel(status, port->membase + CDNS_UART_CR);
@@ -1099,23 +1100,14 @@ static const struct uart_ops cdns_uart_ops = {
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
/**
- * cdns_uart_console_wait_tx - Wait for the TX to be full
- * @port: Handle to the uart port structure
- */
-static void cdns_uart_console_wait_tx(struct uart_port *port)
-{
- while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
- barrier();
-}
-
-/**
* cdns_uart_console_putchar - write the character to the FIFO buffer
* @port: Handle to the uart port structure
* @ch: Character to be written
*/
static void cdns_uart_console_putchar(struct uart_port *port, int ch)
{
- cdns_uart_console_wait_tx(port);
+ while (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)
+ cpu_relax();
writel(ch, port->membase + CDNS_UART_FIFO);
}
@@ -1206,9 +1198,10 @@ static void cdns_uart_console_write(struct console *co, const char *s,
writel(ctrl, port->membase + CDNS_UART_CR);
uart_console_write(port, s, count, cdns_uart_console_putchar);
- cdns_uart_console_wait_tx(port);
-
- writel(ctrl, port->membase + CDNS_UART_CR);
+ while ((readl(port->membase + CDNS_UART_SR) &
+ (CDNS_UART_SR_TXEMPTY | CDNS_UART_SR_TACTIVE)) !=
+ CDNS_UART_SR_TXEMPTY)
+ cpu_relax();
/* restore interrupt state */
writel(imr, port->membase + CDNS_UART_IER);
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 6364890575ec..06ed20dd01ba 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -348,7 +348,7 @@ static void send_sig_all(int sig)
if (is_global_init(p))
continue;
- do_send_sig_info(sig, SEND_SIG_FORCED, p, true);
+ do_send_sig_info(sig, SEND_SIG_FORCED, p, PIDTYPE_MAX);
}
read_unlock(&tasklist_lock);
}
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index e30aa6bf9ff9..50f567b6a66e 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -92,7 +92,7 @@ static void tty_audit_buf_push(struct tty_audit_buf *buf)
{
if (buf->valid == 0)
return;
- if (audit_enabled == 0) {
+ if (audit_enabled == AUDIT_OFF) {
buf->valid = 0;
return;
}
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index 6ff8cdfc9d2a..7576ceace571 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -100,11 +100,11 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
if (cbaud == B0)
return tty_termios_baud_rate(termios);
-
+#ifdef BOTHER
/* Magic token for arbitrary speed via c_ispeed*/
if (cbaud == BOTHER)
return termios->c_ispeed;
-
+#endif
if (cbaud & CBAUDEX) {
cbaud &= ~CBAUDEX;
@@ -114,9 +114,9 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
cbaud += 15;
}
return baud_table[cbaud];
-#else
+#else /* IBSHIFT */
return tty_termios_baud_rate(termios);
-#endif
+#endif /* IBSHIFT */
}
EXPORT_SYMBOL(tty_termios_input_baud_rate);
@@ -156,19 +156,27 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
termios->c_ispeed = ibaud;
termios->c_ospeed = obaud;
+#ifdef IBSHIFT
+ if ((termios->c_cflag >> IBSHIFT) & CBAUD)
+ ibinput = 1; /* An input speed was specified */
+#endif
#ifdef BOTHER
/* If the user asked for a precise weird speed give a precise weird
answer. If they asked for a Bfoo speed they may have problems
digesting non-exact replies so fuzz a bit */
- if ((termios->c_cflag & CBAUD) == BOTHER)
+ if ((termios->c_cflag & CBAUD) == BOTHER) {
oclose = 0;
+ if (!ibinput)
+ iclose = 0;
+ }
if (((termios->c_cflag >> IBSHIFT) & CBAUD) == BOTHER)
iclose = 0;
- if ((termios->c_cflag >> IBSHIFT) & CBAUD)
- ibinput = 1; /* An input speed was specified */
#endif
termios->c_cflag &= ~CBAUD;
+#ifdef IBSHIFT
+ termios->c_cflag &= ~(CBAUD << IBSHIFT);
+#endif
/*
* Our goal is to find a close match to the standard baud rate
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index aba59521ad48..32bc3e3fe4d3 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -814,9 +814,9 @@ void start_tty(struct tty_struct *tty)
}
EXPORT_SYMBOL(start_tty);
-static void tty_update_time(struct timespec *time)
+static void tty_update_time(struct timespec64 *time)
{
- unsigned long sec = get_seconds();
+ time64_t sec = ktime_get_real_seconds();
/*
* We only care if the two values differ in anything other than the
@@ -867,13 +867,8 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
i = -EIO;
tty_ldisc_deref(ld);
- if (i > 0) {
- struct timespec ts;
-
- ts = timespec64_to_timespec(inode->i_atime);
- tty_update_time(&ts);
- inode->i_atime = timespec_to_timespec64(ts);
- }
+ if (i > 0)
+ tty_update_time(&inode->i_atime);
return i;
}
@@ -974,11 +969,7 @@ static inline ssize_t do_tty_write(
cond_resched();
}
if (written) {
- struct timespec ts;
-
- ts = timespec64_to_timespec(file_inode(file)->i_mtime);
- tty_update_time(&ts);
- file_inode(file)->i_mtime = timespec_to_timespec64(ts);
+ tty_update_time(&file_inode(file)->i_mtime);
ret = written;
}
out:
@@ -2122,7 +2113,7 @@ static int __tty_fasync(int fd, struct file *filp, int on)
type = PIDTYPE_PGID;
} else {
pid = task_pid(current);
- type = PIDTYPE_PID;
+ type = PIDTYPE_TGID;
}
get_pid(pid);
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 37a91b3df980..0c98d88f795a 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -74,28 +74,6 @@ struct ldsem_waiter {
struct task_struct *task;
};
-static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
-{
- return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
-}
-
-/*
- * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
- * Returns 1 if count was successfully changed; @*old will have @new value.
- * Returns 0 if count was not changed; @*old will have most recent sem->count
- */
-static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
-{
- long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
- if (tmp == *old) {
- *old = new;
- return 1;
- } else {
- *old = tmp;
- return 0;
- }
-}
-
/*
* Initialize an ldsem:
*/
@@ -109,7 +87,7 @@ void __init_ldsem(struct ld_semaphore *sem, const char *name,
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
- sem->count = LDSEM_UNLOCKED;
+ atomic_long_set(&sem->count, LDSEM_UNLOCKED);
sem->wait_readers = 0;
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->read_wait);
@@ -122,16 +100,17 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
struct task_struct *tsk;
long adjust, count;
- /* Try to grant read locks to all readers on the read wait list.
+ /*
+ * Try to grant read locks to all readers on the read wait list.
* Note the 'active part' of the count is incremented by
* the number of readers before waking any processes up.
*/
adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
- count = ldsem_atomic_update(adjust, sem);
+ count = atomic_long_add_return(adjust, &sem->count);
do {
if (count > 0)
break;
- if (ldsem_cmpxchg(&count, count - adjust, sem))
+ if (atomic_long_try_cmpxchg(&sem->count, &count, count - adjust))
return;
} while (1);
@@ -148,14 +127,15 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
static inline int writer_trylock(struct ld_semaphore *sem)
{
- /* only wake this writer if the active part of the count can be
+ /*
+ * Only wake this writer if the active part of the count can be
* transitioned from 0 -> 1
*/
- long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem);
+ long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count);
do {
if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
return 1;
- if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem))
+ if (atomic_long_try_cmpxchg(&sem->count, &count, count - LDSEM_ACTIVE_BIAS))
return 0;
} while (1);
}
@@ -205,12 +185,16 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
/* set up my own style of waitqueue */
raw_spin_lock_irq(&sem->wait_lock);
- /* Try to reverse the lock attempt but if the count has changed
+ /*
+ * Try to reverse the lock attempt but if the count has changed
* so that reversing fails, check if there are are no waiters,
- * and early-out if not */
+ * and early-out if not
+ */
do {
- if (ldsem_cmpxchg(&count, count + adjust, sem))
+ if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) {
+ count += adjust;
break;
+ }
if (count > 0) {
raw_spin_unlock_irq(&sem->wait_lock);
return sem;
@@ -243,12 +227,14 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
__set_current_state(TASK_RUNNING);
if (!timeout) {
- /* lock timed out but check if this task was just
+ /*
+ * Lock timed out but check if this task was just
* granted lock ownership - if so, pretend there
- * was no timeout; otherwise, cleanup lock wait */
+ * was no timeout; otherwise, cleanup lock wait.
+ */
raw_spin_lock_irq(&sem->wait_lock);
if (waiter.task) {
- ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
+ atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
list_del(&waiter.list);
raw_spin_unlock_irq(&sem->wait_lock);
put_task_struct(waiter.task);
@@ -273,11 +259,13 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
/* set up my own style of waitqueue */
raw_spin_lock_irq(&sem->wait_lock);
- /* Try to reverse the lock attempt but if the count has changed
+ /*
+ * Try to reverse the lock attempt but if the count has changed
* so that reversing fails, check if the lock is now owned,
- * and early-out if so */
+ * and early-out if so.
+ */
do {
- if (ldsem_cmpxchg(&count, count + adjust, sem))
+ if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust))
break;
if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
raw_spin_unlock_irq(&sem->wait_lock);
@@ -303,7 +291,7 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
}
if (!locked)
- ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
+ atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
list_del(&waiter.list);
raw_spin_unlock_irq(&sem->wait_lock);
@@ -324,7 +312,7 @@ static int __ldsem_down_read_nested(struct ld_semaphore *sem,
lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
- count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
+ count = atomic_long_add_return(LDSEM_READ_BIAS, &sem->count);
if (count <= 0) {
lock_stat(sem, contended);
if (!down_read_failed(sem, count, timeout)) {
@@ -343,7 +331,7 @@ static int __ldsem_down_write_nested(struct ld_semaphore *sem,
lockdep_acquire(sem, subclass, 0, _RET_IP_);
- count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem);
+ count = atomic_long_add_return(LDSEM_WRITE_BIAS, &sem->count);
if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
lock_stat(sem, contended);
if (!down_write_failed(sem, count, timeout)) {
@@ -370,10 +358,10 @@ int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
*/
int ldsem_down_read_trylock(struct ld_semaphore *sem)
{
- long count = sem->count;
+ long count = atomic_long_read(&sem->count);
while (count >= 0) {
- if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) {
+ if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_READ_BIAS)) {
lockdep_acquire_read(sem, 0, 1, _RET_IP_);
lock_stat(sem, acquired);
return 1;
@@ -396,10 +384,10 @@ int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
*/
int ldsem_down_write_trylock(struct ld_semaphore *sem)
{
- long count = sem->count;
+ long count = atomic_long_read(&sem->count);
while ((count & LDSEM_ACTIVE_MASK) == 0) {
- if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) {
+ if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_WRITE_BIAS)) {
lockdep_acquire(sem, 0, 1, _RET_IP_);
lock_stat(sem, acquired);
return 1;
@@ -417,7 +405,7 @@ void ldsem_up_read(struct ld_semaphore *sem)
lockdep_release(sem, 1, _RET_IP_);
- count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem);
+ count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
ldsem_wake(sem);
}
@@ -431,7 +419,7 @@ void ldsem_up_write(struct ld_semaphore *sem)
lockdep_release(sem, 1, _RET_IP_);
- count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem);
+ count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
if (count < 0)
ldsem_wake(sem);
}
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index d5b4a2b44ab8..88312c6c92cc 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -690,7 +690,35 @@ static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag)
*/
static void k_dead(struct vc_data *vc, unsigned char value, char up_flag)
{
- static const unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' };
+ static const unsigned char ret_diacr[NR_DEAD] = {
+ '`', /* dead_grave */
+ '\'', /* dead_acute */
+ '^', /* dead_circumflex */
+ '~', /* dead_tilda */
+ '"', /* dead_diaeresis */
+ ',', /* dead_cedilla */
+ '_', /* dead_macron */
+ 'U', /* dead_breve */
+ '.', /* dead_abovedot */
+ '*', /* dead_abovering */
+ '=', /* dead_doubleacute */
+ 'c', /* dead_caron */
+ 'k', /* dead_ogonek */
+ 'i', /* dead_iota */
+ '#', /* dead_voiced_sound */
+ 'o', /* dead_semivoiced_sound */
+ '!', /* dead_belowdot */
+ '?', /* dead_hook */
+ '+', /* dead_horn */
+ '-', /* dead_stroke */
+ ')', /* dead_abovecomma */
+ '(', /* dead_abovereversedcomma */
+ ':', /* dead_doublegrave */
+ 'n', /* dead_invertedbreve */
+ ';', /* dead_belowcomma */
+ '$', /* dead_currency */
+ '@', /* dead_greek */
+ };
k_deadunicode(vc, ret_diacr[value], up_flag);
}
@@ -959,7 +987,7 @@ struct kbd_led_trigger {
unsigned int mask;
};
-static void kbd_led_trigger_activate(struct led_classdev *cdev)
+static int kbd_led_trigger_activate(struct led_classdev *cdev)
{
struct kbd_led_trigger *trigger =
container_of(cdev->trigger, struct kbd_led_trigger, trigger);
@@ -970,6 +998,8 @@ static void kbd_led_trigger_activate(struct led_classdev *cdev)
ledstate & trigger->mask ?
LED_FULL : LED_OFF);
tasklet_enable(&keyboard_tasklet);
+
+ return 0;
}
#define KBD_LED_TRIGGER(_led_bit, _name) { \
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 90ea1cc52b7a..07496c711d7d 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -57,11 +57,13 @@ static inline void highlight_pointer(const int where)
complement_pos(sel_cons, where);
}
-static u16
+static u32
sel_pos(int n)
{
+ if (use_unicode)
+ return screen_glyph_unicode(sel_cons, n / 2);
return inverse_translate(sel_cons, screen_glyph(sel_cons, n),
- use_unicode);
+ 0);
}
/**
@@ -90,7 +92,8 @@ static u32 inwordLut[]={
0x07FFFFFE, /* lowercase */
};
-static inline int inword(const u16 c) {
+static inline int inword(const u32 c)
+{
return c > 0x7f || (( inwordLut[c>>5] >> (c & 0x1F) ) & 1);
}
@@ -116,14 +119,8 @@ static inline int atedge(const int p, int size_row)
return (!(p % size_row) || !((p + 2) % size_row));
}
-/* constrain v such that v <= u */
-static inline unsigned short limit(const unsigned short v, const unsigned short u)
-{
- return (v > u) ? u : v;
-}
-
-/* stores the char in UTF8 and returns the number of bytes used (1-3) */
-static int store_utf8(u16 c, char *p)
+/* stores the char in UTF8 and returns the number of bytes used (1-4) */
+static int store_utf8(u32 c, char *p)
{
if (c < 0x80) {
/* 0******* */
@@ -134,13 +131,26 @@ static int store_utf8(u16 c, char *p)
p[0] = 0xc0 | (c >> 6);
p[1] = 0x80 | (c & 0x3f);
return 2;
- } else {
+ } else if (c < 0x10000) {
/* 1110**** 10****** 10****** */
p[0] = 0xe0 | (c >> 12);
p[1] = 0x80 | ((c >> 6) & 0x3f);
p[2] = 0x80 | (c & 0x3f);
return 3;
- }
+ } else if (c < 0x110000) {
+ /* 11110*** 10****** 10****** 10****** */
+ p[0] = 0xf0 | (c >> 18);
+ p[1] = 0x80 | ((c >> 12) & 0x3f);
+ p[2] = 0x80 | ((c >> 6) & 0x3f);
+ p[3] = 0x80 | (c & 0x3f);
+ return 4;
+ } else {
+ /* outside Unicode, replace with U+FFFD */
+ p[0] = 0xef;
+ p[1] = 0xbf;
+ p[2] = 0xbd;
+ return 3;
+ }
}
/**
@@ -160,17 +170,17 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
struct tiocl_selection v;
char *bp, *obp;
int i, ps, pe, multiplier;
- u16 c;
+ u32 c;
int mode;
poke_blanked_console();
if (copy_from_user(&v, sel, sizeof(*sel)))
return -EFAULT;
- v.xs = limit(v.xs - 1, vc->vc_cols - 1);
- v.ys = limit(v.ys - 1, vc->vc_rows - 1);
- v.xe = limit(v.xe - 1, vc->vc_cols - 1);
- v.ye = limit(v.ye - 1, vc->vc_rows - 1);
+ v.xs = min_t(u16, v.xs - 1, vc->vc_cols - 1);
+ v.ys = min_t(u16, v.ys - 1, vc->vc_rows - 1);
+ v.xe = min_t(u16, v.xe - 1, vc->vc_cols - 1);
+ v.ye = min_t(u16, v.ye - 1, vc->vc_rows - 1);
ps = v.ys * vc->vc_size_row + (v.xs << 1);
pe = v.ye * vc->vc_size_row + (v.xe << 1);
@@ -279,7 +289,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
sel_end = new_sel_end;
/* Allocate a new buffer before freeing the old one ... */
- multiplier = use_unicode ? 3 : 1; /* chars can take up to 3 bytes */
+ multiplier = use_unicode ? 4 : 1; /* chars can take up to 4 bytes */
bp = kmalloc_array((sel_end - sel_start) / 2 + 1, multiplier,
GFP_KERNEL);
if (!bp) {
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index e4a66e1fd05f..2384ea85ffaf 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -10,6 +10,12 @@
* Attribute/character pair is in native endianity.
* [minor: N+128]
*
+ * /dev/vcsuN: similar to /dev/vcsaN but using 4-byte unicode values
+ * instead of 1-byte screen glyph values.
+ * [minor: N+64]
+ *
+ * /dev/vcsuaN: same idea as /dev/vcsaN for unicode (not yet implemented).
+ *
* This replaces screendump and part of selection, so that the system
* administrator can control access using file system permissions.
*
@@ -51,6 +57,26 @@
#define CON_BUF_SIZE (CONFIG_BASE_SMALL ? 256 : PAGE_SIZE)
+/*
+ * Our minor space:
+ *
+ * 0 ... 63 glyph mode without attributes
+ * 64 ... 127 unicode mode without attributes
+ * 128 ... 191 glyph mode with attributes
+ * 192 ... 255 unused (reserved for unicode with attributes)
+ *
+ * This relies on MAX_NR_CONSOLES being <= 63, meaning 63 actual consoles
+ * with minors 0, 64, 128 and 192 being proxies for the foreground console.
+ */
+#if MAX_NR_CONSOLES > 63
+#warning "/dev/vcs* devices may not accommodate more than 63 consoles"
+#endif
+
+#define console(inode) (iminor(inode) & 63)
+#define use_unicode(inode) (iminor(inode) & 64)
+#define use_attributes(inode) (iminor(inode) & 128)
+
+
struct vcs_poll_data {
struct notifier_block notifier;
unsigned int cons_num;
@@ -102,7 +128,7 @@ vcs_poll_data_get(struct file *file)
poll = kzalloc(sizeof(*poll), GFP_KERNEL);
if (!poll)
return NULL;
- poll->cons_num = iminor(file_inode(file)) & 127;
+ poll->cons_num = console(file_inode(file));
init_waitqueue_head(&poll->waitq);
poll->notifier.notifier_call = vcs_notifier;
if (register_vt_notifier(&poll->notifier) != 0) {
@@ -140,7 +166,7 @@ vcs_poll_data_get(struct file *file)
static struct vc_data*
vcs_vc(struct inode *inode, int *viewed)
{
- unsigned int currcons = iminor(inode) & 127;
+ unsigned int currcons = console(inode);
WARN_CONSOLE_UNLOCKED();
@@ -164,7 +190,6 @@ static int
vcs_size(struct inode *inode)
{
int size;
- int minor = iminor(inode);
struct vc_data *vc;
WARN_CONSOLE_UNLOCKED();
@@ -175,8 +200,12 @@ vcs_size(struct inode *inode)
size = vc->vc_rows * vc->vc_cols;
- if (minor & 128)
+ if (use_attributes(inode)) {
+ if (use_unicode(inode))
+ return -EOPNOTSUPP;
size = 2*size + HEADER_SIZE;
+ } else if (use_unicode(inode))
+ size *= 4;
return size;
}
@@ -197,12 +226,10 @@ static ssize_t
vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct inode *inode = file_inode(file);
- unsigned int currcons = iminor(inode);
struct vc_data *vc;
struct vcs_poll_data *poll;
- long pos;
- long attr, read;
- int col, maxcol, viewed;
+ long pos, read;
+ int attr, uni_mode, row, col, maxcol, viewed;
unsigned short *org = NULL;
ssize_t ret;
char *con_buf;
@@ -218,7 +245,8 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
*/
console_lock();
- attr = (currcons & 128);
+ uni_mode = use_unicode(inode);
+ attr = use_attributes(inode);
ret = -ENXIO;
vc = vcs_vc(inode, &viewed);
if (!vc)
@@ -227,6 +255,10 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
ret = -EINVAL;
if (pos < 0)
goto unlock_out;
+ /* we enforce 32-bit alignment for pos and count in unicode mode */
+ if (uni_mode && (pos | count) & 3)
+ goto unlock_out;
+
poll = file->private_data;
if (count && poll)
poll->seen_last_update = true;
@@ -266,7 +298,28 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
con_buf_start = con_buf0 = con_buf;
orig_count = this_round;
maxcol = vc->vc_cols;
- if (!attr) {
+ if (uni_mode) {
+ unsigned int nr;
+
+ ret = vc_uniscr_check(vc);
+ if (ret)
+ break;
+ p /= 4;
+ row = p / vc->vc_cols;
+ col = p % maxcol;
+ nr = maxcol - col;
+ do {
+ if (nr > this_round/4)
+ nr = this_round/4;
+ vc_uniscr_copy_line(vc, con_buf0, viewed,
+ row, col, nr);
+ con_buf0 += nr * 4;
+ this_round -= nr * 4;
+ row++;
+ col = 0;
+ nr = maxcol;
+ } while (this_round);
+ } else if (!attr) {
org = screen_pos(vc, p, viewed);
col = p % maxcol;
p += maxcol - col;
@@ -375,7 +428,6 @@ static ssize_t
vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct inode *inode = file_inode(file);
- unsigned int currcons = iminor(inode);
struct vc_data *vc;
long pos;
long attr, size, written;
@@ -396,7 +448,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
*/
console_lock();
- attr = (currcons & 128);
+ attr = use_attributes(inode);
ret = -ENXIO;
vc = vcs_vc(inode, &viewed);
if (!vc)
@@ -593,9 +645,15 @@ vcs_fasync(int fd, struct file *file, int on)
static int
vcs_open(struct inode *inode, struct file *filp)
{
- unsigned int currcons = iminor(inode) & 127;
+ unsigned int currcons = console(inode);
+ bool attr = use_attributes(inode);
+ bool uni_mode = use_unicode(inode);
int ret = 0;
-
+
+ /* we currently don't support attributes in unicode mode */
+ if (attr && uni_mode)
+ return -EOPNOTSUPP;
+
console_lock();
if(currcons && !vc_cons_allocated(currcons-1))
ret = -ENXIO;
@@ -628,6 +686,8 @@ void vcs_make_sysfs(int index)
{
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 1), NULL,
"vcs%u", index + 1);
+ device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 65), NULL,
+ "vcsu%u", index + 1);
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 129), NULL,
"vcsa%u", index + 1);
}
@@ -635,6 +695,7 @@ void vcs_make_sysfs(int index)
void vcs_remove_sysfs(int index)
{
device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 1));
+ device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 65));
device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 129));
}
@@ -647,6 +708,7 @@ int __init vcs_init(void)
vc_class = class_create(THIS_MODULE, "vc");
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 0), NULL, "vcs");
+ device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 64), NULL, "vcsu");
device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 128), NULL, "vcsa");
for (i = 0; i < MIN_NR_CONSOLES; i++)
vcs_make_sysfs(i);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 1eb1a376a041..5f1183b0b89d 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -104,6 +104,7 @@
#include <linux/kdb.h>
#include <linux/ctype.h>
#include <linux/bsearch.h>
+#include <linux/gcd.h>
#define MAX_NR_CON_DRIVER 16
@@ -317,6 +318,306 @@ void schedule_console_callback(void)
schedule_work(&console_work);
}
+/*
+ * Code to manage unicode-based screen buffers
+ */
+
+#ifdef NO_VC_UNI_SCREEN
+/* this disables and optimizes related code away at compile time */
+#define get_vc_uniscr(vc) NULL
+#else
+#define get_vc_uniscr(vc) vc->vc_uni_screen
+#endif
+
+#define VC_UNI_SCREEN_DEBUG 0
+
+typedef uint32_t char32_t;
+
+/*
+ * Our screen buffer is preceded by an array of line pointers so that
+ * scrolling only implies some pointer shuffling.
+ */
+struct uni_screen {
+ char32_t *lines[0];
+};
+
+static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
+{
+ struct uni_screen *uniscr;
+ void *p;
+ unsigned int memsize, i;
+
+ /* allocate everything in one go */
+ memsize = cols * rows * sizeof(char32_t);
+ memsize += rows * sizeof(char32_t *);
+ p = kmalloc(memsize, GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ /* initial line pointers */
+ uniscr = p;
+ p = uniscr->lines + rows;
+ for (i = 0; i < rows; i++) {
+ uniscr->lines[i] = p;
+ p += cols * sizeof(char32_t);
+ }
+ return uniscr;
+}
+
+static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
+{
+ kfree(vc->vc_uni_screen);
+ vc->vc_uni_screen = new_uniscr;
+}
+
+static void vc_uniscr_putc(struct vc_data *vc, char32_t uc)
+{
+ struct uni_screen *uniscr = get_vc_uniscr(vc);
+
+ if (uniscr)
+ uniscr->lines[vc->vc_y][vc->vc_x] = uc;
+}
+
+static void vc_uniscr_insert(struct vc_data *vc, unsigned int nr)
+{
+ struct uni_screen *uniscr = get_vc_uniscr(vc);
+
+ if (uniscr) {
+ char32_t *ln = uniscr->lines[vc->vc_y];
+ unsigned int x = vc->vc_x, cols = vc->vc_cols;
+
+ memmove(&ln[x + nr], &ln[x], (cols - x - nr) * sizeof(*ln));
+ memset32(&ln[x], ' ', nr);
+ }
+}
+
+static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr)
+{
+ struct uni_screen *uniscr = get_vc_uniscr(vc);
+
+ if (uniscr) {
+ char32_t *ln = uniscr->lines[vc->vc_y];
+ unsigned int x = vc->vc_x, cols = vc->vc_cols;
+
+ memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
+ memset32(&ln[cols - nr], ' ', nr);
+ }
+}
+
+static void vc_uniscr_clear_line(struct vc_data *vc, unsigned int x,
+ unsigned int nr)
+{
+ struct uni_screen *uniscr = get_vc_uniscr(vc);
+
+ if (uniscr) {
+ char32_t *ln = uniscr->lines[vc->vc_y];
+
+ memset32(&ln[x], ' ', nr);
+ }
+}
+
+static void vc_uniscr_clear_lines(struct vc_data *vc, unsigned int y,
+ unsigned int nr)
+{
+ struct uni_screen *uniscr = get_vc_uniscr(vc);
+
+ if (uniscr) {
+ unsigned int cols = vc->vc_cols;
+
+ while (nr--)
+ memset32(uniscr->lines[y++], ' ', cols);
+ }
+}
+
+static void vc_uniscr_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int nr)
+{
+ struct uni_screen *uniscr = get_vc_uniscr(vc);
+
+ if (uniscr) {
+ unsigned int i, j, k, sz, d, clear;
+
+ sz = b - t;
+ clear = b - nr;
+ d = nr;
+ if (dir == SM_DOWN) {
+ clear = t;
+ d = sz - nr;
+ }
+ for (i = 0; i < gcd(d, sz); i++) {
+ char32_t *tmp = uniscr->lines[t + i];
+ j = i;
+ while (1) {
+ k = j + d;
+ if (k >= sz)
+ k -= sz;
+ if (k == i)
+ break;
+ uniscr->lines[t + j] = uniscr->lines[t + k];
+ j = k;
+ }
+ uniscr->lines[t + j] = tmp;
+ }
+ vc_uniscr_clear_lines(vc, clear, nr);
+ }
+}
+
+static void vc_uniscr_copy_area(struct uni_screen *dst,
+ unsigned int dst_cols,
+ unsigned int dst_rows,
+ struct uni_screen *src,
+ unsigned int src_cols,
+ unsigned int src_top_row,
+ unsigned int src_bot_row)
+{
+ unsigned int dst_row = 0;
+
+ if (!dst)
+ return;
+
+ while (src_top_row < src_bot_row) {
+ char32_t *src_line = src->lines[src_top_row];
+ char32_t *dst_line = dst->lines[dst_row];
+
+ memcpy(dst_line, src_line, src_cols * sizeof(char32_t));
+ if (dst_cols - src_cols)
+ memset32(dst_line + src_cols, ' ', dst_cols - src_cols);
+ src_top_row++;
+ dst_row++;
+ }
+ while (dst_row < dst_rows) {
+ char32_t *dst_line = dst->lines[dst_row];
+
+ memset32(dst_line, ' ', dst_cols);
+ dst_row++;
+ }
+}
+
+/*
+ * Called from vcs_read() to make sure unicode screen retrieval is possible.
+ * This will initialize the unicode screen buffer if not already done.
+ * This returns 0 if OK, or a negative error code otherwise.
+ * In particular, -ENODATA is returned if the console is not in UTF-8 mode.
+ */
+int vc_uniscr_check(struct vc_data *vc)
+{
+ struct uni_screen *uniscr;
+ unsigned short *p;
+ int x, y, mask;
+
+ if (__is_defined(NO_VC_UNI_SCREEN))
+ return -EOPNOTSUPP;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ if (!vc->vc_utf)
+ return -ENODATA;
+
+ if (vc->vc_uni_screen)
+ return 0;
+
+ uniscr = vc_uniscr_alloc(vc->vc_cols, vc->vc_rows);
+ if (!uniscr)
+ return -ENOMEM;
+
+ /*
+ * Let's populate it initially with (imperfect) reverse translation.
+ * This is the next best thing we can do short of having it enabled
+ * from the start even when no users rely on this functionality. True
+ * unicode content will be available after a complete screen refresh.
+ */
+ p = (unsigned short *)vc->vc_origin;
+ mask = vc->vc_hi_font_mask | 0xff;
+ for (y = 0; y < vc->vc_rows; y++) {
+ char32_t *line = uniscr->lines[y];
+ for (x = 0; x < vc->vc_cols; x++) {
+ u16 glyph = scr_readw(p++) & mask;
+ line[x] = inverse_translate(vc, glyph, true);
+ }
+ }
+
+ vc->vc_uni_screen = uniscr;
+ return 0;
+}
+
+/*
+ * Called from vcs_read() to get the unicode data from the screen.
+ * This must be preceded by a successful call to vc_uniscr_check() once
+ * the console lock has been taken.
+ */
+void vc_uniscr_copy_line(struct vc_data *vc, void *dest, int viewed,
+ unsigned int row, unsigned int col, unsigned int nr)
+{
+ struct uni_screen *uniscr = get_vc_uniscr(vc);
+ int offset = row * vc->vc_size_row + col * 2;
+ unsigned long pos;
+
+ BUG_ON(!uniscr);
+
+ pos = (unsigned long)screenpos(vc, offset, viewed);
+ if (pos >= vc->vc_origin && pos < vc->vc_scr_end) {
+ /*
+ * Desired position falls in the main screen buffer.
+ * However the actual row/col might be different if
+ * scrollback is active.
+ */
+ row = (pos - vc->vc_origin) / vc->vc_size_row;
+ col = ((pos - vc->vc_origin) % vc->vc_size_row) / 2;
+ memcpy(dest, &uniscr->lines[row][col], nr * sizeof(char32_t));
+ } else {
+ /*
+ * Scrollback is active. For now let's simply backtranslate
+ * the screen glyphs until the unicode screen buffer does
+ * synchronize with console display drivers for a scrollback
+ * buffer of its own.
+ */
+ u16 *p = (u16 *)pos;
+ int mask = vc->vc_hi_font_mask | 0xff;
+ char32_t *uni_buf = dest;
+ while (nr--) {
+ u16 glyph = scr_readw(p++) & mask;
+ *uni_buf++ = inverse_translate(vc, glyph, true);
+ }
+ }
+}
+
+/* this is for validation and debugging only */
+static void vc_uniscr_debug_check(struct vc_data *vc)
+{
+ struct uni_screen *uniscr = get_vc_uniscr(vc);
+ unsigned short *p;
+ int x, y, mask;
+
+ if (!VC_UNI_SCREEN_DEBUG || !uniscr)
+ return;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ /*
+ * Make sure our unicode screen translates into the same glyphs
+ * as the actual screen. This is brutal indeed.
+ */
+ p = (unsigned short *)vc->vc_origin;
+ mask = vc->vc_hi_font_mask | 0xff;
+ for (y = 0; y < vc->vc_rows; y++) {
+ char32_t *line = uniscr->lines[y];
+ for (x = 0; x < vc->vc_cols; x++) {
+ u16 glyph = scr_readw(p++) & mask;
+ char32_t uc = line[x];
+ int tc = conv_uni_to_pc(vc, uc);
+ if (tc == -4)
+ tc = conv_uni_to_pc(vc, 0xfffd);
+ if (tc == -4)
+ tc = conv_uni_to_pc(vc, '?');
+ if (tc != glyph)
+ pr_err_ratelimited(
+ "%s: mismatch at %d,%d: glyph=%#x tc=%#x\n",
+ __func__, x, y, glyph, tc);
+ }
+ }
+}
+
+
static void con_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
enum con_scroll dir, unsigned int nr)
{
@@ -326,6 +627,7 @@ static void con_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
nr = b - t - 1;
if (b > vc->vc_rows || t >= b || nr < 1)
return;
+ vc_uniscr_scroll(vc, t, b, dir, nr);
if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, dir, nr))
return;
@@ -533,6 +835,7 @@ static void insert_char(struct vc_data *vc, unsigned int nr)
{
unsigned short *p = (unsigned short *) vc->vc_pos;
+ vc_uniscr_insert(vc, nr);
scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x - nr) * 2);
scr_memsetw(p, vc->vc_video_erase_char, nr * 2);
vc->vc_need_wrap = 0;
@@ -545,6 +848,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr)
{
unsigned short *p = (unsigned short *) vc->vc_pos;
+ vc_uniscr_delete(vc, nr);
scr_memcpyw(p, p + nr, (vc->vc_cols - vc->vc_x - nr) * 2);
scr_memsetw(p + vc->vc_cols - vc->vc_x - nr, vc->vc_video_erase_char,
nr * 2);
@@ -784,7 +1088,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
if (!*vc->vc_uni_pagedir_loc)
con_set_default_unimap(vc);
- vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
+ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
if (!vc->vc_screenbuf)
goto err_free;
@@ -845,10 +1149,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
{
unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0;
unsigned long end;
- unsigned int old_rows, old_row_size;
+ unsigned int old_rows, old_row_size, first_copied_row;
unsigned int new_cols, new_rows, new_row_size, new_screen_size;
unsigned int user;
unsigned short *newscreen;
+ struct uni_screen *new_uniscr = NULL;
WARN_CONSOLE_UNLOCKED();
@@ -871,10 +1176,18 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_screen_size > (4 << 20))
return -EINVAL;
- newscreen = kmalloc(new_screen_size, GFP_USER);
+ newscreen = kzalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;
+ if (get_vc_uniscr(vc)) {
+ new_uniscr = vc_uniscr_alloc(new_cols, new_rows);
+ if (!new_uniscr) {
+ kfree(newscreen);
+ return -ENOMEM;
+ }
+ }
+
if (vc == sel_cons)
clear_selection();
@@ -884,6 +1197,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
err = resize_screen(vc, new_cols, new_rows, user);
if (err) {
kfree(newscreen);
+ kfree(new_uniscr);
return err;
}
@@ -904,18 +1218,24 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
* Cursor near the bottom, copy contents from the
* bottom of buffer
*/
- old_origin += (old_rows - new_rows) * old_row_size;
+ first_copied_row = (old_rows - new_rows);
} else {
/*
* Cursor is in no man's land, copy 1/2 screenful
* from the top and bottom of cursor position
*/
- old_origin += (vc->vc_y - new_rows/2) * old_row_size;
+ first_copied_row = (vc->vc_y - new_rows/2);
}
- }
-
+ old_origin += first_copied_row * old_row_size;
+ } else
+ first_copied_row = 0;
end = old_origin + old_row_size * min(old_rows, new_rows);
+ vc_uniscr_copy_area(new_uniscr, new_cols, new_rows,
+ get_vc_uniscr(vc), rlth/2, first_copied_row,
+ min(old_rows, new_rows));
+ vc_uniscr_set(vc, new_uniscr);
+
update_attr(vc);
while (old_origin < end) {
@@ -1013,6 +1333,7 @@ struct vc_data *vc_deallocate(unsigned int currcons)
vc->vc_sw->con_deinit(vc);
put_pid(vc->vt_pid);
module_put(vc->vc_sw->owner);
+ vc_uniscr_set(vc, NULL);
kfree(vc->vc_screenbuf);
vc_cons[currcons].d = NULL;
}
@@ -1171,15 +1492,22 @@ static void csi_J(struct vc_data *vc, int vpar)
switch (vpar) {
case 0: /* erase from cursor to end of display */
+ vc_uniscr_clear_line(vc, vc->vc_x,
+ vc->vc_cols - vc->vc_x);
+ vc_uniscr_clear_lines(vc, vc->vc_y + 1,
+ vc->vc_rows - vc->vc_y - 1);
count = (vc->vc_scr_end - vc->vc_pos) >> 1;
start = (unsigned short *)vc->vc_pos;
break;
case 1: /* erase from start to cursor */
+ vc_uniscr_clear_line(vc, 0, vc->vc_x + 1);
+ vc_uniscr_clear_lines(vc, 0, vc->vc_y);
count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
start = (unsigned short *)vc->vc_origin;
break;
case 2: /* erase whole display */
case 3: /* (and scrollback buffer later) */
+ vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
count = vc->vc_cols * vc->vc_rows;
start = (unsigned short *)vc->vc_origin;
break;
@@ -1200,25 +1528,27 @@ static void csi_J(struct vc_data *vc, int vpar)
static void csi_K(struct vc_data *vc, int vpar)
{
unsigned int count;
- unsigned short * start;
+ unsigned short *start = (unsigned short *)vc->vc_pos;
+ int offset;
switch (vpar) {
case 0: /* erase from cursor to end of line */
+ offset = 0;
count = vc->vc_cols - vc->vc_x;
- start = (unsigned short *)vc->vc_pos;
break;
case 1: /* erase from start of line to cursor */
- start = (unsigned short *)(vc->vc_pos - (vc->vc_x << 1));
+ offset = -vc->vc_x;
count = vc->vc_x + 1;
break;
case 2: /* erase whole line */
- start = (unsigned short *)(vc->vc_pos - (vc->vc_x << 1));
+ offset = -vc->vc_x;
count = vc->vc_cols;
break;
default:
return;
}
- scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
+ vc_uniscr_clear_line(vc, vc->vc_x + offset, count);
+ scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count);
vc->vc_need_wrap = 0;
if (con_should_update(vc))
do_update_region(vc, (unsigned long) start, count);
@@ -1232,6 +1562,7 @@ static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar posi
vpar++;
count = (vpar > vc->vc_cols - vc->vc_x) ? (vc->vc_cols - vc->vc_x) : vpar;
+ vc_uniscr_clear_line(vc, vc->vc_x, count);
scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count);
if (con_should_update(vc))
vc->vc_sw->con_clear(vc, vc->vc_y, vc->vc_x, 1, count);
@@ -2188,7 +2519,7 @@ static void con_flush(struct vc_data *vc, unsigned long draw_from,
/* acquires console_lock */
static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
- int c, tc, ok, n = 0, draw_x = -1;
+ int c, next_c, tc, ok, n = 0, draw_x = -1;
unsigned int currcons;
unsigned long draw_from = 0, draw_to = 0;
struct vc_data *vc;
@@ -2382,6 +2713,7 @@ rescan_last_byte:
con_flush(vc, draw_from, draw_to, &draw_x);
}
+ next_c = c;
while (1) {
if (vc->vc_need_wrap || vc->vc_decim)
con_flush(vc, draw_from, draw_to,
@@ -2392,6 +2724,7 @@ rescan_last_byte:
}
if (vc->vc_decim)
insert_char(vc, 1);
+ vc_uniscr_putc(vc, next_c);
scr_writew(himask ?
((vc_attr << 8) & ~himask) + ((tc & 0x100) ? himask : 0) + (tc & 0xff) :
(vc_attr << 8) + tc,
@@ -2412,6 +2745,7 @@ rescan_last_byte:
tc = conv_uni_to_pc(vc, ' '); /* A space is printed in the second column */
if (tc < 0) tc = ' ';
+ next_c = ' ';
}
notify_write(vc, c);
@@ -2431,6 +2765,7 @@ rescan_last_byte:
do_con_trol(tty, vc, orig);
}
con_flush(vc, draw_from, draw_to, &draw_x);
+ vc_uniscr_debug_check(vc);
console_conditional_schedule();
console_unlock();
notify_update(vc);
@@ -4257,6 +4592,16 @@ u16 screen_glyph(struct vc_data *vc, int offset)
}
EXPORT_SYMBOL_GPL(screen_glyph);
+u32 screen_glyph_unicode(struct vc_data *vc, int n)
+{
+ struct uni_screen *uniscr = get_vc_uniscr(vc);
+
+ if (uniscr)
+ return uniscr->lines[n / vc->vc_cols][n % vc->vc_cols];
+ return inverse_translate(vc, screen_glyph(vc, n * 2), 1);
+}
+EXPORT_SYMBOL_GPL(screen_glyph_unicode);
+
/* used by vcs - note the word offset */
unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed)
{
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index e8f4ac9400ea..70a7981b94b3 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/uio/uio.c
*
@@ -9,8 +10,6 @@
* Userspace IO
*
* Base Functions
- *
- * Licensed under the GPLv2 only.
*/
#include <linux/module.h>
@@ -215,7 +214,20 @@ static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uio_device *idev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", idev->info->name);
+ int ret;
+
+ mutex_lock(&idev->info_lock);
+ if (!idev->info) {
+ ret = -EINVAL;
+ dev_err(dev, "the device has been unregistered\n");
+ goto out;
+ }
+
+ ret = sprintf(buf, "%s\n", idev->info->name);
+
+out:
+ mutex_unlock(&idev->info_lock);
+ return ret;
}
static DEVICE_ATTR_RO(name);
@@ -223,7 +235,20 @@ static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uio_device *idev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", idev->info->version);
+ int ret;
+
+ mutex_lock(&idev->info_lock);
+ if (!idev->info) {
+ ret = -EINVAL;
+ dev_err(dev, "the device has been unregistered\n");
+ goto out;
+ }
+
+ ret = sprintf(buf, "%s\n", idev->info->version);
+
+out:
+ mutex_unlock(&idev->info_lock);
+ return ret;
}
static DEVICE_ATTR_RO(version);
@@ -415,8 +440,9 @@ EXPORT_SYMBOL_GPL(uio_event_notify);
static irqreturn_t uio_interrupt(int irq, void *dev_id)
{
struct uio_device *idev = (struct uio_device *)dev_id;
- irqreturn_t ret = idev->info->handler(irq, idev->info);
+ irqreturn_t ret;
+ ret = idev->info->handler(irq, idev->info);
if (ret == IRQ_HANDLED)
uio_event_notify(idev->info);
@@ -433,7 +459,6 @@ static int uio_open(struct inode *inode, struct file *filep)
struct uio_device *idev;
struct uio_listener *listener;
int ret = 0;
- unsigned long flags;
mutex_lock(&minor_lock);
idev = idr_find(&uio_idr, iminor(inode));
@@ -460,10 +485,16 @@ static int uio_open(struct inode *inode, struct file *filep)
listener->event_count = atomic_read(&idev->event);
filep->private_data = listener;
- spin_lock_irqsave(&idev->info_lock, flags);
+ mutex_lock(&idev->info_lock);
+ if (!idev->info) {
+ mutex_unlock(&idev->info_lock);
+ ret = -EINVAL;
+ goto err_alloc_listener;
+ }
+
if (idev->info && idev->info->open)
ret = idev->info->open(idev->info, inode);
- spin_unlock_irqrestore(&idev->info_lock, flags);
+ mutex_unlock(&idev->info_lock);
if (ret)
goto err_infoopen;
@@ -495,12 +526,11 @@ static int uio_release(struct inode *inode, struct file *filep)
int ret = 0;
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
- unsigned long flags;
- spin_lock_irqsave(&idev->info_lock, flags);
+ mutex_lock(&idev->info_lock);
if (idev->info && idev->info->release)
ret = idev->info->release(idev->info, inode);
- spin_unlock_irqrestore(&idev->info_lock, flags);
+ mutex_unlock(&idev->info_lock);
module_put(idev->owner);
kfree(listener);
@@ -513,12 +543,11 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait)
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
__poll_t ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&idev->info_lock, flags);
+ mutex_lock(&idev->info_lock);
if (!idev->info || !idev->info->irq)
ret = -EIO;
- spin_unlock_irqrestore(&idev->info_lock, flags);
+ mutex_unlock(&idev->info_lock);
if (ret)
return ret;
@@ -537,12 +566,11 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
DECLARE_WAITQUEUE(wait, current);
ssize_t retval = 0;
s32 event_count;
- unsigned long flags;
- spin_lock_irqsave(&idev->info_lock, flags);
+ mutex_lock(&idev->info_lock);
if (!idev->info || !idev->info->irq)
retval = -EIO;
- spin_unlock_irqrestore(&idev->info_lock, flags);
+ mutex_unlock(&idev->info_lock);
if (retval)
return retval;
@@ -592,33 +620,33 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
struct uio_device *idev = listener->dev;
ssize_t retval;
s32 irq_on;
- unsigned long flags;
- spin_lock_irqsave(&idev->info_lock, flags);
- if (!idev->info || !idev->info->irq) {
- retval = -EIO;
- goto out;
- }
+ if (count != sizeof(s32))
+ return -EINVAL;
- if (count != sizeof(s32)) {
+ if (copy_from_user(&irq_on, buf, count))
+ return -EFAULT;
+
+ mutex_lock(&idev->info_lock);
+ if (!idev->info) {
retval = -EINVAL;
goto out;
}
- if (!idev->info->irqcontrol) {
- retval = -ENOSYS;
+ if (!idev->info || !idev->info->irq) {
+ retval = -EIO;
goto out;
}
- if (copy_from_user(&irq_on, buf, count)) {
- retval = -EFAULT;
+ if (!idev->info->irqcontrol) {
+ retval = -ENOSYS;
goto out;
}
retval = idev->info->irqcontrol(idev->info, irq_on);
out:
- spin_unlock_irqrestore(&idev->info_lock, flags);
+ mutex_unlock(&idev->info_lock);
return retval ? retval : sizeof(s32);
}
@@ -640,10 +668,20 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
struct page *page;
unsigned long offset;
void *addr;
+ int ret = 0;
+ int mi;
- int mi = uio_find_mem_index(vmf->vma);
- if (mi < 0)
- return VM_FAULT_SIGBUS;
+ mutex_lock(&idev->info_lock);
+ if (!idev->info) {
+ ret = VM_FAULT_SIGBUS;
+ goto out;
+ }
+
+ mi = uio_find_mem_index(vmf->vma);
+ if (mi < 0) {
+ ret = VM_FAULT_SIGBUS;
+ goto out;
+ }
/*
* We need to subtract mi because userspace uses offset = N*PAGE_SIZE
@@ -658,7 +696,11 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
page = vmalloc_to_page(addr);
get_page(page);
vmf->page = page;
- return 0;
+
+out:
+ mutex_unlock(&idev->info_lock);
+
+ return ret;
}
static const struct vm_operations_struct uio_logical_vm_ops = {
@@ -683,6 +725,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
struct uio_device *idev = vma->vm_private_data;
int mi = uio_find_mem_index(vma);
struct uio_mem *mem;
+
if (mi < 0)
return -EINVAL;
mem = idev->info->mem + mi;
@@ -724,30 +767,46 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
vma->vm_private_data = idev;
+ mutex_lock(&idev->info_lock);
+ if (!idev->info) {
+ ret = -EINVAL;
+ goto out;
+ }
+
mi = uio_find_mem_index(vma);
- if (mi < 0)
- return -EINVAL;
+ if (mi < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
requested_pages = vma_pages(vma);
actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)
+ idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
- if (requested_pages > actual_pages)
- return -EINVAL;
+ if (requested_pages > actual_pages) {
+ ret = -EINVAL;
+ goto out;
+ }
if (idev->info->mmap) {
ret = idev->info->mmap(idev->info, vma);
- return ret;
+ goto out;
}
switch (idev->info->mem[mi].memtype) {
case UIO_MEM_PHYS:
- return uio_mmap_physical(vma);
+ ret = uio_mmap_physical(vma);
+ break;
case UIO_MEM_LOGICAL:
case UIO_MEM_VIRTUAL:
- return uio_mmap_logical(vma);
+ ret = uio_mmap_logical(vma);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+
+out:
+ mutex_unlock(&idev->info_lock);
+ return ret;
}
static const struct file_operations uio_fops = {
@@ -865,7 +924,7 @@ int __uio_register_device(struct module *owner,
idev->owner = owner;
idev->info = info;
- spin_lock_init(&idev->info_lock);
+ mutex_init(&idev->info_lock);
init_waitqueue_head(&idev->wait);
atomic_set(&idev->event, 0);
@@ -891,8 +950,6 @@ int __uio_register_device(struct module *owner,
if (ret)
goto err_uio_dev_add_attributes;
- info->uio_dev = idev;
-
if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
/*
* Note that we deliberately don't use devm_request_irq
@@ -908,6 +965,7 @@ int __uio_register_device(struct module *owner,
goto err_request_irq;
}
+ info->uio_dev = idev;
return 0;
err_request_irq:
@@ -928,7 +986,6 @@ EXPORT_SYMBOL_GPL(__uio_register_device);
void uio_unregister_device(struct uio_info *info)
{
struct uio_device *idev;
- unsigned long flags;
if (!info || !info->uio_dev)
return;
@@ -937,14 +994,14 @@ void uio_unregister_device(struct uio_info *info)
uio_free_minor(idev);
+ mutex_lock(&idev->info_lock);
uio_dev_del_attributes(idev);
if (info->irq && info->irq != UIO_IRQ_CUSTOM)
free_irq(info->irq, idev);
- spin_lock_irqsave(&idev->info_lock, flags);
idev->info = NULL;
- spin_unlock_irqrestore(&idev->info_lock, flags);
+ mutex_unlock(&idev->info_lock);
device_unregister(&idev->dev);
diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c
index 30f533ce3758..ab60186f9759 100644
--- a/drivers/uio/uio_cif.c
+++ b/drivers/uio/uio_cif.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UIO Hilscher CIF card driver
*
* (C) 2007 Hans J. Koch <hjk@hansjkoch.de>
* Original code (C) 2005 Benedikt Spranger <b.spranger@linutronix.de>
- *
- * Licensed under GPL version 2 only.
- *
*/
#include <linux/device.h>
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index b55191335d90..bbc17effae5e 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* uio_fsl_elbc_gpcm: UIO driver for eLBC/GPCM peripherals
Copyright (C) 2014 Linutronix GmbH
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index c690d100adcd..e401be8321ab 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* uio_hv_generic - generic UIO driver for VMBus
*
* Copyright (c) 2013-2016 Brocade Communications Systems, Inc.
* Copyright (c) 2016, Microsoft Corporation.
*
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
* Since the driver does not declare any device ids, you must allocate
* id and bind the device to the driver yourself. For example:
*
diff --git a/drivers/uio/uio_netx.c b/drivers/uio/uio_netx.c
index 4c345db8b016..9ae29ffde410 100644
--- a/drivers/uio/uio_netx.c
+++ b/drivers/uio/uio_netx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UIO driver for Hilscher NetX based fieldbus cards (cifX, comX).
* See http://www.hilscher.com for details.
@@ -5,8 +6,6 @@
* (C) 2007 Hans J. Koch <hjk@hansjkoch.de>
* (C) 2008 Manuel Traut <manut@linutronix.de>
*
- * Licensed under GPL version 2 only.
- *
*/
#include <linux/device.h>
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index a56fdf972dbe..8773e373ffe5 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/* uio_pci_generic - generic UIO driver for PCI 2.3 devices
*
* Copyright (C) 2009 Red Hat, Inc.
* Author: Michael S. Tsirkin <mst@redhat.com>
*
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
* Since the driver does not declare any device ids, you must allocate
* id and bind the device to the driver yourself. For example:
*
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index 91aea8823af5..1cc175d3c25c 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -122,7 +122,7 @@ static int pruss_probe(struct platform_device *pdev)
struct uio_pruss_dev *gdev;
struct resource *regs_prussio;
struct device *dev = &pdev->dev;
- int ret = -ENODEV, cnt = 0, len;
+ int ret, cnt, i, len;
struct uio_pruss_pdata *pdata = dev_get_platdata(dev);
gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL);
@@ -131,8 +131,8 @@ static int pruss_probe(struct platform_device *pdev)
gdev->info = kcalloc(MAX_PRUSS_EVT, sizeof(*p), GFP_KERNEL);
if (!gdev->info) {
- kfree(gdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_gdev;
}
/* Power on PRU in case its not done as part of boot-loader */
@@ -140,29 +140,26 @@ static int pruss_probe(struct platform_device *pdev)
if (IS_ERR(gdev->pruss_clk)) {
dev_err(dev, "Failed to get clock\n");
ret = PTR_ERR(gdev->pruss_clk);
- kfree(gdev->info);
- kfree(gdev);
- return ret;
- } else {
- ret = clk_enable(gdev->pruss_clk);
- if (ret) {
- dev_err(dev, "Failed to enable clock\n");
- clk_put(gdev->pruss_clk);
- kfree(gdev->info);
- kfree(gdev);
- return ret;
- }
+ goto err_free_info;
+ }
+
+ ret = clk_enable(gdev->pruss_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ goto err_clk_put;
}
regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs_prussio) {
dev_err(dev, "No PRUSS I/O resource specified\n");
- goto out_free;
+ ret = -EIO;
+ goto err_clk_disable;
}
if (!regs_prussio->start) {
dev_err(dev, "Invalid memory resource\n");
- goto out_free;
+ ret = -EIO;
+ goto err_clk_disable;
}
if (pdata->sram_pool) {
@@ -172,7 +169,8 @@ static int pruss_probe(struct platform_device *pdev)
sram_pool_sz, &gdev->sram_paddr);
if (!gdev->sram_vaddr) {
dev_err(dev, "Could not allocate SRAM pool\n");
- goto out_free;
+ ret = -ENOMEM;
+ goto err_clk_disable;
}
}
@@ -180,14 +178,16 @@ static int pruss_probe(struct platform_device *pdev)
&(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA);
if (!gdev->ddr_vaddr) {
dev_err(dev, "Could not allocate external memory\n");
- goto out_free;
+ ret = -ENOMEM;
+ goto err_free_sram;
}
len = resource_size(regs_prussio);
gdev->prussio_vaddr = ioremap(regs_prussio->start, len);
if (!gdev->prussio_vaddr) {
dev_err(dev, "Can't remap PRUSS I/O address range\n");
- goto out_free;
+ ret = -ENOMEM;
+ goto err_free_ddr_vaddr;
}
gdev->pintc_base = pdata->pintc_base;
@@ -215,15 +215,36 @@ static int pruss_probe(struct platform_device *pdev)
p->priv = gdev;
ret = uio_register_device(dev, p);
- if (ret < 0)
- goto out_free;
+ if (ret < 0) {
+ kfree(p->name);
+ goto err_unloop;
+ }
}
platform_set_drvdata(pdev, gdev);
return 0;
-out_free:
- pruss_cleanup(dev, gdev);
+err_unloop:
+ for (i = 0, p = gdev->info; i < cnt; i++, p++) {
+ uio_unregister_device(p);
+ kfree(p->name);
+ }
+ iounmap(gdev->prussio_vaddr);
+err_free_ddr_vaddr:
+ dma_free_coherent(dev, extram_pool_sz, gdev->ddr_vaddr,
+ gdev->ddr_paddr);
+err_free_sram:
+ if (pdata->sram_pool)
+ gen_pool_free(gdev->sram_pool, gdev->sram_vaddr, sram_pool_sz);
+err_clk_disable:
+ clk_disable(gdev->pruss_clk);
+err_clk_put:
+ clk_put(gdev->pruss_clk);
+err_free_info:
+ kfree(gdev->info);
+err_free_gdev:
+ kfree(gdev);
+
return ret;
}
diff --git a/drivers/uio/uio_sercos3.c b/drivers/uio/uio_sercos3.c
index 9cfdfcafa262..9658a0887fee 100644
--- a/drivers/uio/uio_sercos3.c
+++ b/drivers/uio/uio_sercos3.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sercos3: UIO driver for the Automata Sercos III PCI card
Copyright (C) 2008 Linutronix GmbH
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 785f0ed037f7..ee34e9046f7e 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -3,6 +3,7 @@ config USB_CHIPIDEA
depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
select EXTCON
select RESET_CONTROLLER
+ select USB_ULPI_BUS
help
Say Y here if your system has a dual role high speed USB
controller based on ChipIdea silicon IP. It supports:
@@ -38,12 +39,4 @@ config USB_CHIPIDEA_HOST
help
Say Y here to enable host controller functionality of the
ChipIdea driver.
-
-config USB_CHIPIDEA_ULPI
- bool "ChipIdea ULPI PHY support"
- depends on USB_ULPI_BUS=y || USB_ULPI_BUS=USB_CHIPIDEA
- help
- Say Y here if you have a ULPI PHY attached to your ChipIdea
- controller.
-
endif
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index e3d5e728fa53..12df94f78f72 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -1,11 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o
-ci_hdrc-y := core.o otg.o debug.o
+ci_hdrc-y := core.o otg.o debug.o ulpi.o
ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o
ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o
ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o
-ci_hdrc-$(CONFIG_USB_CHIPIDEA_ULPI) += ulpi.o
# Glue/Bridge layers go here
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 0bf244d50544..6a2cc5cd0281 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -240,10 +240,8 @@ struct ci_hdrc {
struct ci_hdrc_platform_data *platdata;
int vbus_active;
-#ifdef CONFIG_USB_CHIPIDEA_ULPI
struct ulpi *ulpi;
struct ulpi_ops ulpi_ops;
-#endif
struct phy *phy;
/* old usb_phy interface */
struct usb_phy *usb_phy;
@@ -426,15 +424,9 @@ static inline bool ci_otg_is_fsm_mode(struct ci_hdrc *ci)
#endif
}
-#if IS_ENABLED(CONFIG_USB_CHIPIDEA_ULPI)
int ci_ulpi_init(struct ci_hdrc *ci);
void ci_ulpi_exit(struct ci_hdrc *ci);
int ci_ulpi_resume(struct ci_hdrc *ci);
-#else
-static inline int ci_ulpi_init(struct ci_hdrc *ci) { return 0; }
-static inline void ci_ulpi_exit(struct ci_hdrc *ci) { }
-static inline int ci_ulpi_resume(struct ci_hdrc *ci) { return 0; }
-#endif
u32 hw_read_intr_enable(struct ci_hdrc *ci);
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 7f4d2b6af37a..772851bee99b 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -33,11 +33,11 @@ static const struct tegra_udc_soc_info tegra30_udc_soc_info = {
};
static const struct tegra_udc_soc_info tegra114_udc_soc_info = {
- .flags = 0,
+ .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
};
static const struct tegra_udc_soc_info tegra124_udc_soc_info = {
- .flags = 0,
+ .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
};
static const struct of_device_id tegra_udc_of_match[] = {
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index af45aa3222b5..4638d9b066be 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -124,8 +124,11 @@ static int host_start(struct ci_hdrc *ci)
hcd->power_budget = ci->platdata->power_budget;
hcd->tpl_support = ci->platdata->tpl_support;
- if (ci->phy || ci->usb_phy)
+ if (ci->phy || ci->usb_phy) {
hcd->skip_phy_initialization = 1;
+ if (ci->usb_phy)
+ hcd->usb_phy = ci->usb_phy;
+ }
ehci = hcd_to_ehci(hcd);
ehci->caps = ci->hw_bank.cap;
diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c
index 6da42dcd2888..dfec07e8ae1d 100644
--- a/drivers/usb/chipidea/ulpi.c
+++ b/drivers/usb/chipidea/ulpi.c
@@ -95,6 +95,9 @@ int ci_ulpi_resume(struct ci_hdrc *ci)
{
int cnt = 100000;
+ if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
+ return 0;
+
while (cnt-- > 0) {
if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE))
return 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 7b366a6c0b49..27346d69f393 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -276,6 +276,7 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
{
int newctrl;
int difference;
+ unsigned long flags;
struct usb_cdc_notification *dr = (struct usb_cdc_notification *)buf;
unsigned char *data = buf + sizeof(struct usb_cdc_notification);
@@ -303,7 +304,7 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
}
difference = acm->ctrlin ^ newctrl;
- spin_lock(&acm->read_lock);
+ spin_lock_irqsave(&acm->read_lock, flags);
acm->ctrlin = newctrl;
acm->oldcount = acm->iocount;
@@ -321,7 +322,7 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
acm->iocount.parity++;
if (difference & ACM_CTRL_OVERRUN)
acm->iocount.overrun++;
- spin_unlock(&acm->read_lock);
+ spin_unlock_irqrestore(&acm->read_lock, flags);
if (difference)
wake_up_all(&acm->wioctl);
@@ -1378,6 +1379,9 @@ made_compressed_probe:
if (acm == NULL)
goto alloc_fail;
+ tty_port_init(&acm->port);
+ acm->port.ops = &acm_port_ops;
+
minor = acm_alloc_minor(acm);
if (minor < 0)
goto alloc_fail1;
@@ -1413,22 +1417,20 @@ made_compressed_probe:
acm->out = usb_sndintpipe(usb_dev, epwrite->bEndpointAddress);
else
acm->out = usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress);
- tty_port_init(&acm->port);
- acm->port.ops = &acm_port_ops;
init_usb_anchor(&acm->delayed);
acm->quirks = quirks;
buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
if (!buf)
- goto alloc_fail2;
+ goto alloc_fail1;
acm->ctrl_buffer = buf;
if (acm_write_buffers_alloc(acm) < 0)
- goto alloc_fail4;
+ goto alloc_fail2;
acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL);
if (!acm->ctrlurb)
- goto alloc_fail5;
+ goto alloc_fail3;
for (i = 0; i < num_rx_buf; i++) {
struct acm_rb *rb = &(acm->read_buffers[i]);
@@ -1437,13 +1439,13 @@ made_compressed_probe:
rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL,
&rb->dma);
if (!rb->base)
- goto alloc_fail6;
+ goto alloc_fail4;
rb->index = i;
rb->instance = acm;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
- goto alloc_fail6;
+ goto alloc_fail4;
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = rb->dma;
@@ -1465,7 +1467,7 @@ made_compressed_probe:
snd->urb = usb_alloc_urb(0, GFP_KERNEL);
if (snd->urb == NULL)
- goto alloc_fail7;
+ goto alloc_fail5;
if (usb_endpoint_xfer_int(epwrite))
usb_fill_int_urb(snd->urb, usb_dev, acm->out,
@@ -1483,7 +1485,7 @@ made_compressed_probe:
i = device_create_file(&intf->dev, &dev_attr_bmCapabilities);
if (i < 0)
- goto alloc_fail7;
+ goto alloc_fail5;
if (h.usb_cdc_country_functional_desc) { /* export the country data */
struct usb_cdc_country_functional_desc * cfd =
@@ -1542,7 +1544,7 @@ skip_countries:
&control_interface->dev);
if (IS_ERR(tty_dev)) {
rv = PTR_ERR(tty_dev);
- goto alloc_fail8;
+ goto alloc_fail6;
}
if (quirks & CLEAR_HALT_CONDITIONS) {
@@ -1551,7 +1553,7 @@ skip_countries:
}
return 0;
-alloc_fail8:
+alloc_fail6:
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
@@ -1560,23 +1562,21 @@ alloc_fail8:
kfree(acm->country_codes);
}
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
-alloc_fail7:
+alloc_fail5:
usb_set_intfdata(intf, NULL);
for (i = 0; i < ACM_NW; i++)
usb_free_urb(acm->wb[i].urb);
-alloc_fail6:
+alloc_fail4:
for (i = 0; i < num_rx_buf; i++)
usb_free_urb(acm->read_urbs[i]);
acm_read_buffers_free(acm);
usb_free_urb(acm->ctrlurb);
-alloc_fail5:
+alloc_fail3:
acm_write_buffers_free(acm);
-alloc_fail4:
- usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
alloc_fail2:
- acm_release_minor(acm);
+ usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
alloc_fail1:
- kfree(acm);
+ tty_port_put(&acm->port);
alloc_fail:
return rv;
}
@@ -1758,6 +1758,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
.driver_info = SINGLE_RX_URB,
},
+ { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
{ USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
@@ -1828,6 +1831,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
},
+ { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index a0d284ef3f40..bec581fb7c63 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -96,6 +96,7 @@ struct wdm_device {
struct mutex rlock;
wait_queue_head_t wait;
struct work_struct rxwork;
+ struct work_struct service_outs_intr;
int werr;
int rerr;
int resp_count;
@@ -141,26 +142,26 @@ found:
static void wdm_out_callback(struct urb *urb)
{
struct wdm_device *desc;
+ unsigned long flags;
+
desc = urb->context;
- spin_lock(&desc->iuspin);
+ spin_lock_irqsave(&desc->iuspin, flags);
desc->werr = urb->status;
- spin_unlock(&desc->iuspin);
+ spin_unlock_irqrestore(&desc->iuspin, flags);
kfree(desc->outbuf);
desc->outbuf = NULL;
clear_bit(WDM_IN_USE, &desc->flags);
wake_up(&desc->wait);
}
-/* forward declaration */
-static int service_outstanding_interrupt(struct wdm_device *desc);
-
static void wdm_in_callback(struct urb *urb)
{
+ unsigned long flags;
struct wdm_device *desc = urb->context;
int status = urb->status;
int length = urb->actual_length;
- spin_lock(&desc->iuspin);
+ spin_lock_irqsave(&desc->iuspin, flags);
clear_bit(WDM_RESPONDING, &desc->flags);
if (status) {
@@ -209,8 +210,6 @@ static void wdm_in_callback(struct urb *urb)
}
}
skip_error:
- set_bit(WDM_READ, &desc->flags);
- wake_up(&desc->wait);
if (desc->rerr) {
/*
@@ -219,14 +218,17 @@ skip_error:
* We should respond to further attempts from the device to send
* data, so that we can get unstuck.
*/
- service_outstanding_interrupt(desc);
+ schedule_work(&desc->service_outs_intr);
+ } else {
+ set_bit(WDM_READ, &desc->flags);
+ wake_up(&desc->wait);
}
-
- spin_unlock(&desc->iuspin);
+ spin_unlock_irqrestore(&desc->iuspin, flags);
}
static void wdm_int_callback(struct urb *urb)
{
+ unsigned long flags;
int rv = 0;
int responding;
int status = urb->status;
@@ -286,7 +288,7 @@ static void wdm_int_callback(struct urb *urb)
goto exit;
}
- spin_lock(&desc->iuspin);
+ spin_lock_irqsave(&desc->iuspin, flags);
responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
if (!desc->resp_count++ && !responding
&& !test_bit(WDM_DISCONNECTING, &desc->flags)
@@ -294,7 +296,7 @@ static void wdm_int_callback(struct urb *urb)
rv = usb_submit_urb(desc->response, GFP_ATOMIC);
dev_dbg(&desc->intf->dev, "submit response URB %d\n", rv);
}
- spin_unlock(&desc->iuspin);
+ spin_unlock_irqrestore(&desc->iuspin, flags);
if (rv < 0) {
clear_bit(WDM_RESPONDING, &desc->flags);
if (rv == -EPERM)
@@ -758,6 +760,21 @@ static void wdm_rxwork(struct work_struct *work)
}
}
+static void service_interrupt_work(struct work_struct *work)
+{
+ struct wdm_device *desc;
+
+ desc = container_of(work, struct wdm_device, service_outs_intr);
+
+ spin_lock_irq(&desc->iuspin);
+ service_outstanding_interrupt(desc);
+ if (!desc->resp_count) {
+ set_bit(WDM_READ, &desc->flags);
+ wake_up(&desc->wait);
+ }
+ spin_unlock_irq(&desc->iuspin);
+}
+
/* --- hotplug --- */
static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep,
@@ -779,6 +796,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
desc->inum = cpu_to_le16((u16)intf->cur_altsetting->desc.bInterfaceNumber);
desc->intf = intf;
INIT_WORK(&desc->rxwork, wdm_rxwork);
+ INIT_WORK(&desc->service_outs_intr, service_interrupt_work);
rv = -EINVAL;
if (!usb_endpoint_is_int_in(ep))
@@ -964,6 +982,7 @@ static void wdm_disconnect(struct usb_interface *intf)
mutex_lock(&desc->wlock);
kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
+ cancel_work_sync(&desc->service_outs_intr);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
@@ -1006,6 +1025,7 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
/* callback submits work - order is essential */
kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
+ cancel_work_sync(&desc->service_outs_intr);
}
if (!PMSG_IS_AUTO(message)) {
mutex_unlock(&desc->wlock);
@@ -1065,6 +1085,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
mutex_lock(&desc->wlock);
kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
+ cancel_work_sync(&desc->service_outs_intr);
return 0;
}
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index d058d7a31e7c..407a7a6198a2 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -292,6 +292,7 @@ static void usblp_bulk_read(struct urb *urb)
{
struct usblp *usblp = urb->context;
int status = urb->status;
+ unsigned long flags;
if (usblp->present && usblp->used) {
if (status)
@@ -299,14 +300,14 @@ static void usblp_bulk_read(struct urb *urb)
"nonzero read bulk status received: %d\n",
usblp->minor, status);
}
- spin_lock(&usblp->lock);
+ spin_lock_irqsave(&usblp->lock, flags);
if (status < 0)
usblp->rstatus = status;
else
usblp->rstatus = urb->actual_length;
usblp->rcomplete = 1;
wake_up(&usblp->rwait);
- spin_unlock(&usblp->lock);
+ spin_unlock_irqrestore(&usblp->lock, flags);
usb_free_urb(urb);
}
@@ -315,6 +316,7 @@ static void usblp_bulk_write(struct urb *urb)
{
struct usblp *usblp = urb->context;
int status = urb->status;
+ unsigned long flags;
if (usblp->present && usblp->used) {
if (status)
@@ -322,7 +324,7 @@ static void usblp_bulk_write(struct urb *urb)
"nonzero write bulk status received: %d\n",
usblp->minor, status);
}
- spin_lock(&usblp->lock);
+ spin_lock_irqsave(&usblp->lock, flags);
if (status < 0)
usblp->wstatus = status;
else
@@ -330,7 +332,7 @@ static void usblp_bulk_write(struct urb *urb)
usblp->no_paper = 0;
usblp->wcomplete = 1;
wake_up(&usblp->wwait);
- spin_unlock(&usblp->lock);
+ spin_unlock_irqrestore(&usblp->lock, flags);
usb_free_urb(urb);
}
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 529295a17579..83ffa5a14c3d 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -18,6 +18,7 @@
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/usb.h>
+#include <linux/compat.h>
#include <linux/usb/tmc.h>
@@ -30,6 +31,8 @@
*/
#define USBTMC_SIZE_IOBUFFER 2048
+/* Minimum USB timeout (in milliseconds) */
+#define USBTMC_MIN_TIMEOUT 100
/* Default USB timeout (in milliseconds) */
#define USBTMC_TIMEOUT 5000
@@ -67,6 +70,7 @@ struct usbtmc_device_data {
const struct usb_device_id *id;
struct usb_device *usb_dev;
struct usb_interface *intf;
+ struct list_head file_list;
unsigned int bulk_in;
unsigned int bulk_out;
@@ -87,7 +91,6 @@ struct usbtmc_device_data {
int iin_interval;
struct urb *iin_urb;
u16 iin_wMaxPacketSize;
- atomic_t srq_asserted;
/* coalesced usb488_caps from usbtmc_dev_capabilities */
__u8 usb488_caps;
@@ -104,9 +107,25 @@ struct usbtmc_device_data {
struct mutex io_mutex; /* only one i/o function running at a time */
wait_queue_head_t waitq;
struct fasync_struct *fasync;
+ spinlock_t dev_lock; /* lock for file_list */
};
#define to_usbtmc_data(d) container_of(d, struct usbtmc_device_data, kref)
+/*
+ * This structure holds private data for each USBTMC file handle.
+ */
+struct usbtmc_file_data {
+ struct usbtmc_device_data *data;
+ struct list_head file_elem;
+
+ u32 timeout;
+ u8 srq_byte;
+ atomic_t srq_asserted;
+ u8 eom_val;
+ u8 term_char;
+ bool term_char_enabled;
+};
+
/* Forward declarations */
static struct usb_driver usbtmc_driver;
@@ -122,7 +141,7 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
{
struct usb_interface *intf;
struct usbtmc_device_data *data;
- int retval = 0;
+ struct usbtmc_file_data *file_data;
intf = usb_find_interface(&usbtmc_driver, iminor(inode));
if (!intf) {
@@ -130,21 +149,51 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
return -ENODEV;
}
+ file_data = kzalloc(sizeof(*file_data), GFP_KERNEL);
+ if (!file_data)
+ return -ENOMEM;
+
data = usb_get_intfdata(intf);
/* Protect reference to data from file structure until release */
kref_get(&data->kref);
+ mutex_lock(&data->io_mutex);
+ file_data->data = data;
+
+ /* copy default values from device settings */
+ file_data->timeout = USBTMC_TIMEOUT;
+ file_data->term_char = data->TermChar;
+ file_data->term_char_enabled = data->TermCharEnabled;
+ file_data->eom_val = 1;
+
+ INIT_LIST_HEAD(&file_data->file_elem);
+ spin_lock_irq(&data->dev_lock);
+ list_add_tail(&file_data->file_elem, &data->file_list);
+ spin_unlock_irq(&data->dev_lock);
+ mutex_unlock(&data->io_mutex);
+
/* Store pointer in file structure's private data field */
- filp->private_data = data;
+ filp->private_data = file_data;
- return retval;
+ return 0;
}
static int usbtmc_release(struct inode *inode, struct file *file)
{
- struct usbtmc_device_data *data = file->private_data;
+ struct usbtmc_file_data *file_data = file->private_data;
- kref_put(&data->kref, usbtmc_delete);
+ /* prevent IO _AND_ usbtmc_interrupt */
+ mutex_lock(&file_data->data->io_mutex);
+ spin_lock_irq(&file_data->data->dev_lock);
+
+ list_del(&file_data->file_elem);
+
+ spin_unlock_irq(&file_data->data->dev_lock);
+ mutex_unlock(&file_data->data->io_mutex);
+
+ kref_put(&file_data->data->kref, usbtmc_delete);
+ file_data->data = NULL;
+ kfree(file_data);
return 0;
}
@@ -369,10 +418,12 @@ exit:
return rv;
}
-static int usbtmc488_ioctl_read_stb(struct usbtmc_device_data *data,
+static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
void __user *arg)
{
+ struct usbtmc_device_data *data = file_data->data;
struct device *dev = &data->intf->dev;
+ int srq_asserted = 0;
u8 *buffer;
u8 tag;
__u8 stb;
@@ -381,15 +432,25 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_device_data *data,
dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n",
data->iin_ep_present);
+ spin_lock_irq(&data->dev_lock);
+ srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted);
+ if (srq_asserted) {
+ /* a STB with SRQ is already received */
+ stb = file_data->srq_byte;
+ spin_unlock_irq(&data->dev_lock);
+ rv = put_user(stb, (__u8 __user *)arg);
+ dev_dbg(dev, "stb:0x%02x with srq received %d\n",
+ (unsigned int)stb, rv);
+ return rv;
+ }
+ spin_unlock_irq(&data->dev_lock);
+
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
atomic_set(&data->iin_data_valid, 0);
- /* must issue read_stb before using poll or select */
- atomic_set(&data->srq_asserted, 0);
-
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC488_REQUEST_READ_STATUS_BYTE,
@@ -412,7 +473,7 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_device_data *data,
rv = wait_event_interruptible_timeout(
data->waitq,
atomic_read(&data->iin_data_valid) != 0,
- USBTMC_TIMEOUT);
+ file_data->timeout);
if (rv < 0) {
dev_dbg(dev, "wait interrupted %d\n", rv);
goto exit;
@@ -420,7 +481,7 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_device_data *data,
if (rv == 0) {
dev_dbg(dev, "wait timed out\n");
- rv = -ETIME;
+ rv = -ETIMEDOUT;
goto exit;
}
@@ -435,9 +496,8 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_device_data *data,
stb = buffer[2];
}
- rv = copy_to_user(arg, &stb, sizeof(stb));
- if (rv)
- rv = -EFAULT;
+ rv = put_user(stb, (__u8 __user *)arg);
+ dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)stb, rv);
exit:
/* bump interrupt bTag */
@@ -506,6 +566,51 @@ static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
}
/*
+ * Sends a TRIGGER Bulk-OUT command message
+ * See the USBTMC-USB488 specification, Table 2.
+ *
+ * Also updates bTag_last_write.
+ */
+static int usbtmc488_ioctl_trigger(struct usbtmc_file_data *file_data)
+{
+ struct usbtmc_device_data *data = file_data->data;
+ int retval;
+ u8 *buffer;
+ int actual;
+
+ buffer = kzalloc(USBTMC_HEADER_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ buffer[0] = 128;
+ buffer[1] = data->bTag;
+ buffer[2] = ~data->bTag;
+
+ retval = usb_bulk_msg(data->usb_dev,
+ usb_sndbulkpipe(data->usb_dev,
+ data->bulk_out),
+ buffer, USBTMC_HEADER_SIZE,
+ &actual, file_data->timeout);
+
+ /* Store bTag (in case we need to abort) */
+ data->bTag_last_write = data->bTag;
+
+ /* Increment bTag -- and increment again if zero */
+ data->bTag++;
+ if (!data->bTag)
+ data->bTag++;
+
+ kfree(buffer);
+ if (retval < 0) {
+ dev_err(&data->intf->dev, "%s returned %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+ return 0;
+}
+
+/*
* Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-OUT endpoint.
* @transfer_size: number of bytes to request from the device.
*
@@ -513,8 +618,10 @@ static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
*
* Also updates bTag_last_write.
*/
-static int send_request_dev_dep_msg_in(struct usbtmc_device_data *data, size_t transfer_size)
+static int send_request_dev_dep_msg_in(struct usbtmc_file_data *file_data,
+ size_t transfer_size)
{
+ struct usbtmc_device_data *data = file_data->data;
int retval;
u8 *buffer;
int actual;
@@ -533,9 +640,9 @@ static int send_request_dev_dep_msg_in(struct usbtmc_device_data *data, size_t t
buffer[5] = transfer_size >> 8;
buffer[6] = transfer_size >> 16;
buffer[7] = transfer_size >> 24;
- buffer[8] = data->TermCharEnabled * 2;
+ buffer[8] = file_data->term_char_enabled * 2;
/* Use term character? */
- buffer[9] = data->TermChar;
+ buffer[9] = file_data->term_char;
buffer[10] = 0; /* Reserved */
buffer[11] = 0; /* Reserved */
@@ -543,7 +650,8 @@ static int send_request_dev_dep_msg_in(struct usbtmc_device_data *data, size_t t
retval = usb_bulk_msg(data->usb_dev,
usb_sndbulkpipe(data->usb_dev,
data->bulk_out),
- buffer, USBTMC_HEADER_SIZE, &actual, USBTMC_TIMEOUT);
+ buffer, USBTMC_HEADER_SIZE,
+ &actual, file_data->timeout);
/* Store bTag (in case we need to abort) */
data->bTag_last_write = data->bTag;
@@ -565,6 +673,7 @@ static int send_request_dev_dep_msg_in(struct usbtmc_device_data *data, size_t t
static ssize_t usbtmc_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
{
+ struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
struct device *dev;
u32 n_characters;
@@ -576,7 +685,8 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
size_t this_part;
/* Get pointer to private data structure */
- data = filp->private_data;
+ file_data = filp->private_data;
+ data = file_data->data;
dev = &data->intf->dev;
buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
@@ -591,7 +701,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
dev_dbg(dev, "usb_bulk_msg_in: count(%zu)\n", count);
- retval = send_request_dev_dep_msg_in(data, count);
+ retval = send_request_dev_dep_msg_in(file_data, count);
if (retval < 0) {
if (data->auto_abort)
@@ -610,7 +720,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
usb_rcvbulkpipe(data->usb_dev,
data->bulk_in),
buffer, USBTMC_SIZE_IOBUFFER, &actual,
- USBTMC_TIMEOUT);
+ file_data->timeout);
dev_dbg(dev, "usb_bulk_msg: retval(%u), done(%zu), remaining(%zu), actual(%d)\n", retval, done, remaining, actual);
@@ -721,6 +831,7 @@ exit:
static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
+ struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
u8 *buffer;
int retval;
@@ -730,7 +841,8 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
int done;
int this_part;
- data = filp->private_data;
+ file_data = filp->private_data;
+ data = file_data->data;
buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
if (!buffer)
@@ -751,7 +863,7 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
buffer[8] = 0;
} else {
this_part = remaining;
- buffer[8] = 1;
+ buffer[8] = file_data->eom_val;
}
/* Setup IO buffer for DEV_DEP_MSG_OUT message */
@@ -781,7 +893,7 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
usb_sndbulkpipe(data->usb_dev,
data->bulk_out),
buffer, n_bytes,
- &actual, USBTMC_TIMEOUT);
+ &actual, file_data->timeout);
if (retval != 0)
break;
n_bytes -= actual;
@@ -1138,12 +1250,91 @@ exit:
return rv;
}
+/*
+ * Get the usb timeout value
+ */
+static int usbtmc_ioctl_get_timeout(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ u32 timeout;
+
+ timeout = file_data->timeout;
+
+ return put_user(timeout, (__u32 __user *)arg);
+}
+
+/*
+ * Set the usb timeout value
+ */
+static int usbtmc_ioctl_set_timeout(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ u32 timeout;
+
+ if (get_user(timeout, (__u32 __user *)arg))
+ return -EFAULT;
+
+ /* Note that timeout = 0 means
+ * MAX_SCHEDULE_TIMEOUT in usb_control_msg
+ */
+ if (timeout < USBTMC_MIN_TIMEOUT)
+ return -EINVAL;
+
+ file_data->timeout = timeout;
+
+ return 0;
+}
+
+/*
+ * enables/disables sending EOM on write
+ */
+static int usbtmc_ioctl_eom_enable(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ u8 eom_enable;
+
+ if (copy_from_user(&eom_enable, arg, sizeof(eom_enable)))
+ return -EFAULT;
+
+ if (eom_enable > 1)
+ return -EINVAL;
+
+ file_data->eom_val = eom_enable;
+
+ return 0;
+}
+
+/*
+ * Configure termination character for read()
+ */
+static int usbtmc_ioctl_config_termc(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ struct usbtmc_termchar termc;
+
+ if (copy_from_user(&termc, arg, sizeof(termc)))
+ return -EFAULT;
+
+ if ((termc.term_char_enabled > 1) ||
+ (termc.term_char_enabled &&
+ !(file_data->data->capabilities.device_capabilities & 1)))
+ return -EINVAL;
+
+ file_data->term_char = termc.term_char;
+ file_data->term_char_enabled = termc.term_char_enabled;
+
+ return 0;
+}
+
static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
int retval = -EBADRQC;
- data = file->private_data;
+ file_data = file->private_data;
+ data = file_data->data;
+
mutex_lock(&data->io_mutex);
if (data->zombie) {
retval = -ENODEV;
@@ -1175,6 +1366,26 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
retval = usbtmc_ioctl_abort_bulk_in(data);
break;
+ case USBTMC_IOCTL_GET_TIMEOUT:
+ retval = usbtmc_ioctl_get_timeout(file_data,
+ (void __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_SET_TIMEOUT:
+ retval = usbtmc_ioctl_set_timeout(file_data,
+ (void __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_EOM_ENABLE:
+ retval = usbtmc_ioctl_eom_enable(file_data,
+ (void __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_CONFIG_TERMCHAR:
+ retval = usbtmc_ioctl_config_termc(file_data,
+ (void __user *)arg);
+ break;
+
case USBTMC488_IOCTL_GET_CAPS:
retval = copy_to_user((void __user *)arg,
&data->usb488_caps,
@@ -1184,7 +1395,8 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case USBTMC488_IOCTL_READ_STB:
- retval = usbtmc488_ioctl_read_stb(data, (void __user *)arg);
+ retval = usbtmc488_ioctl_read_stb(file_data,
+ (void __user *)arg);
break;
case USBTMC488_IOCTL_REN_CONTROL:
@@ -1201,6 +1413,10 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
retval = usbtmc488_ioctl_simple(data, (void __user *)arg,
USBTMC488_REQUEST_LOCAL_LOCKOUT);
break;
+
+ case USBTMC488_IOCTL_TRIGGER:
+ retval = usbtmc488_ioctl_trigger(file_data);
+ break;
}
skip_io_on_zombie:
@@ -1210,14 +1426,15 @@ skip_io_on_zombie:
static int usbtmc_fasync(int fd, struct file *file, int on)
{
- struct usbtmc_device_data *data = file->private_data;
+ struct usbtmc_file_data *file_data = file->private_data;
- return fasync_helper(fd, file, on, &data->fasync);
+ return fasync_helper(fd, file, on, &file_data->data->fasync);
}
static __poll_t usbtmc_poll(struct file *file, poll_table *wait)
{
- struct usbtmc_device_data *data = file->private_data;
+ struct usbtmc_file_data *file_data = file->private_data;
+ struct usbtmc_device_data *data = file_data->data;
__poll_t mask;
mutex_lock(&data->io_mutex);
@@ -1229,7 +1446,7 @@ static __poll_t usbtmc_poll(struct file *file, poll_table *wait)
poll_wait(file, &data->waitq, wait);
- mask = (atomic_read(&data->srq_asserted)) ? EPOLLIN | EPOLLRDNORM : 0;
+ mask = (atomic_read(&file_data->srq_asserted)) ? EPOLLPRI : 0;
no_poll:
mutex_unlock(&data->io_mutex);
@@ -1243,6 +1460,9 @@ static const struct file_operations fops = {
.open = usbtmc_open,
.release = usbtmc_release,
.unlocked_ioctl = usbtmc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = usbtmc_ioctl,
+#endif
.fasync = usbtmc_fasync,
.poll = usbtmc_poll,
.llseek = default_llseek,
@@ -1276,15 +1496,33 @@ static void usbtmc_interrupt(struct urb *urb)
}
/* check for SRQ notification */
if (data->iin_buffer[0] == 0x81) {
+ unsigned long flags;
+ struct list_head *elem;
+
if (data->fasync)
kill_fasync(&data->fasync,
- SIGIO, POLL_IN);
+ SIGIO, POLL_PRI);
- atomic_set(&data->srq_asserted, 1);
- wake_up_interruptible(&data->waitq);
+ spin_lock_irqsave(&data->dev_lock, flags);
+ list_for_each(elem, &data->file_list) {
+ struct usbtmc_file_data *file_data;
+
+ file_data = list_entry(elem,
+ struct usbtmc_file_data,
+ file_elem);
+ file_data->srq_byte = data->iin_buffer[1];
+ atomic_set(&file_data->srq_asserted, 1);
+ }
+ spin_unlock_irqrestore(&data->dev_lock, flags);
+
+ dev_dbg(dev, "srq received bTag %x stb %x\n",
+ (unsigned int)data->iin_buffer[0],
+ (unsigned int)data->iin_buffer[1]);
+ wake_up_interruptible_all(&data->waitq);
goto exit;
}
- dev_warn(dev, "invalid notification: %x\n", data->iin_buffer[0]);
+ dev_warn(dev, "invalid notification: %x\n",
+ data->iin_buffer[0]);
break;
case -EOVERFLOW:
dev_err(dev, "overflow with length %d, actual length is %d\n",
@@ -1295,6 +1533,7 @@ static void usbtmc_interrupt(struct urb *urb)
case -ESHUTDOWN:
case -EILSEQ:
case -ETIME:
+ case -EPIPE:
/* urb terminated, clean up */
dev_dbg(dev, "urb terminated, status: %d\n", status);
return;
@@ -1339,7 +1578,9 @@ static int usbtmc_probe(struct usb_interface *intf,
mutex_init(&data->io_mutex);
init_waitqueue_head(&data->waitq);
atomic_set(&data->iin_data_valid, 0);
- atomic_set(&data->srq_asserted, 0);
+ INIT_LIST_HEAD(&data->file_list);
+ spin_lock_init(&data->dev_lock);
+
data->zombie = 0;
/* Initialize USBTMC bTag and other fields */
@@ -1442,17 +1683,14 @@ err_put:
static void usbtmc_disconnect(struct usb_interface *intf)
{
- struct usbtmc_device_data *data;
-
- dev_dbg(&intf->dev, "usbtmc_disconnect called\n");
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
- data = usb_get_intfdata(intf);
usb_deregister_dev(intf, &usbtmc_class);
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
mutex_lock(&data->io_mutex);
data->zombie = 1;
- wake_up_all(&data->waitq);
+ wake_up_interruptible_all(&data->waitq);
mutex_unlock(&data->io_mutex);
usbtmc_free_int(data);
kref_put(&data->kref, usbtmc_delete);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 476dcc5f2da3..6ce77b33da61 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -585,9 +585,10 @@ static void async_completed(struct urb *urb)
struct siginfo sinfo;
struct pid *pid = NULL;
const struct cred *cred = NULL;
+ unsigned long flags;
int signr;
- spin_lock(&ps->lock);
+ spin_lock_irqsave(&ps->lock, flags);
list_move_tail(&as->asynclist, &ps->async_completed);
as->status = urb->status;
signr = as->signr;
@@ -611,7 +612,7 @@ static void async_completed(struct urb *urb)
cancel_bulk_urbs(ps, as->bulk_addr);
wake_up(&ps->wait);
- spin_unlock(&ps->lock);
+ spin_unlock_irqrestore(&ps->lock, flags);
if (signr) {
kill_pid_info_as_cred(sinfo.si_signo, &sinfo, pid, cred);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index fcae521df29b..462ce49f683a 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1142,10 +1142,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
if (!udev || udev->state == USB_STATE_NOTATTACHED) {
/* Tell hub_wq to disconnect the device or
- * check for a new connection
+ * check for a new connection or over current condition.
+ * Based on USB2.0 Spec Section 11.12.5,
+ * C_PORT_OVER_CURRENT could be set while
+ * PORT_OVER_CURRENT is not. So check for any of them.
*/
if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
- (portstatus & USB_PORT_STAT_OVERCURRENT))
+ (portstatus & USB_PORT_STAT_OVERCURRENT) ||
+ (portchange & USB_PORT_STAT_C_OVERCURRENT))
set_bit(port1, hub->change_bits);
} else if (portstatus & USB_PORT_STAT_ENABLE) {
@@ -3656,12 +3660,54 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
return 0;
}
+/* Report wakeup requests from the ports of a resuming root hub */
+static void report_wakeup_requests(struct usb_hub *hub)
+{
+ struct usb_device *hdev = hub->hdev;
+ struct usb_device *udev;
+ struct usb_hcd *hcd;
+ unsigned long resuming_ports;
+ int i;
+
+ if (hdev->parent)
+ return; /* Not a root hub */
+
+ hcd = bus_to_hcd(hdev->bus);
+ if (hcd->driver->get_resuming_ports) {
+
+ /*
+ * The get_resuming_ports() method returns a bitmap (origin 0)
+ * of ports which have started wakeup signaling but have not
+ * yet finished resuming. During system resume we will
+ * resume all the enabled ports, regardless of any wakeup
+ * signals, which means the wakeup requests would be lost.
+ * To prevent this, report them to the PM core here.
+ */
+ resuming_ports = hcd->driver->get_resuming_ports(hcd);
+ for (i = 0; i < hdev->maxchild; ++i) {
+ if (test_bit(i, &resuming_ports)) {
+ udev = hub->ports[i]->child;
+ if (udev)
+ pm_wakeup_event(&udev->dev, 0);
+ }
+ }
+ }
+}
+
static int hub_resume(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
dev_dbg(&intf->dev, "%s\n", __func__);
hub_activate(hub, HUB_RESUME);
+
+ /*
+ * This should be called only for system resume, not runtime resume.
+ * We can't tell the difference here, so some wakeup requests will be
+ * reported at the wrong time or more than once. This shouldn't
+ * matter much, so long as they do get reported.
+ */
+ report_wakeup_requests(hub);
return 0;
}
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index d775ffea20c3..dc7f7fd71684 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -113,11 +113,17 @@ static ssize_t usbport_trig_port_store(struct device *dev,
static struct attribute *ports_attrs[] = {
NULL,
};
+
static const struct attribute_group ports_group = {
.name = "ports",
.attrs = ports_attrs,
};
+static const struct attribute_group *ports_groups[] = {
+ &ports_group,
+ NULL
+};
+
/***************************************
* Adding & removing ports
***************************************/
@@ -298,61 +304,47 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
}
-static void usbport_trig_activate(struct led_classdev *led_cdev)
+static int usbport_trig_activate(struct led_classdev *led_cdev)
{
struct usbport_trig_data *usbport_data;
- int err;
usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
if (!usbport_data)
- return;
+ return -ENOMEM;
usbport_data->led_cdev = led_cdev;
/* List of ports */
INIT_LIST_HEAD(&usbport_data->ports);
- err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
- if (err)
- goto err_free;
usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
usbport_trig_update_count(usbport_data);
/* Notifications */
- usbport_data->nb.notifier_call = usbport_trig_notify,
- led_cdev->trigger_data = usbport_data;
+ usbport_data->nb.notifier_call = usbport_trig_notify;
+ led_set_trigger_data(led_cdev, usbport_data);
usb_register_notify(&usbport_data->nb);
- led_cdev->activated = true;
- return;
-
-err_free:
- kfree(usbport_data);
+ return 0;
}
static void usbport_trig_deactivate(struct led_classdev *led_cdev)
{
- struct usbport_trig_data *usbport_data = led_cdev->trigger_data;
+ struct usbport_trig_data *usbport_data = led_get_trigger_data(led_cdev);
struct usbport_trig_port *port, *tmp;
- if (!led_cdev->activated)
- return;
-
list_for_each_entry_safe(port, tmp, &usbport_data->ports, list) {
usbport_trig_remove_port(usbport_data, port);
}
usb_unregister_notify(&usbport_data->nb);
- sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
-
kfree(usbport_data);
-
- led_cdev->activated = false;
}
static struct led_trigger usbport_led_trigger = {
.name = "usbport",
.activate = usbport_trig_activate,
.deactivate = usbport_trig_deactivate,
+ .groups = ports_groups,
};
static int __init usbport_trig_init(void)
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 1a15392326fc..228672f2c4a1 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -269,10 +269,11 @@ static void sg_clean(struct usb_sg_request *io)
static void sg_complete(struct urb *urb)
{
+ unsigned long flags;
struct usb_sg_request *io = urb->context;
int status = urb->status;
- spin_lock(&io->lock);
+ spin_lock_irqsave(&io->lock, flags);
/* In 2.5 we require hcds' endpoint queues not to progress after fault
* reports, until the completion callback (this!) returns. That lets
@@ -306,7 +307,7 @@ static void sg_complete(struct urb *urb)
* unlink pending urbs so they won't rx/tx bad data.
* careful: unlink can sometimes be synchronous...
*/
- spin_unlock(&io->lock);
+ spin_unlock_irqrestore(&io->lock, flags);
for (i = 0, found = 0; i < io->entries; i++) {
if (!io->urbs[i])
continue;
@@ -323,7 +324,7 @@ static void sg_complete(struct urb *urb)
} else if (urb == io->urbs[i])
found = 1;
}
- spin_lock(&io->lock);
+ spin_lock_irqsave(&io->lock, flags);
}
/* on the last completion, signal usb_sg_wait() */
@@ -332,7 +333,7 @@ static void sg_complete(struct urb *urb)
if (!io->count)
complete(&io->complete);
- spin_unlock(&io->lock);
+ spin_unlock_irqrestore(&io->lock, flags);
}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index c55def2f1320..097057d2eacf 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -378,6 +378,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair K70 RGB */
{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Corsair Strafe */
+ { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
+
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
USB_QUIRK_DELAY_CTRL_MSG },
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 1c36a6a9dd63..55d5ae2a7ec7 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -73,17 +73,17 @@ int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
/* Backup global regs */
gr = &hsotg->gr_backup;
- gr->gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
- gr->gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
- gr->gahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
- gr->gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
- gr->grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
- gr->gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
- gr->gdfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
- gr->pcgcctl1 = dwc2_readl(hsotg->regs + PCGCCTL1);
- gr->glpmcfg = dwc2_readl(hsotg->regs + GLPMCFG);
- gr->gi2cctl = dwc2_readl(hsotg->regs + GI2CCTL);
- gr->pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
+ gr->gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ gr->gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gr->gahbcfg = dwc2_readl(hsotg, GAHBCFG);
+ gr->gusbcfg = dwc2_readl(hsotg, GUSBCFG);
+ gr->grxfsiz = dwc2_readl(hsotg, GRXFSIZ);
+ gr->gnptxfsiz = dwc2_readl(hsotg, GNPTXFSIZ);
+ gr->gdfifocfg = dwc2_readl(hsotg, GDFIFOCFG);
+ gr->pcgcctl1 = dwc2_readl(hsotg, PCGCCTL1);
+ gr->glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ gr->gi2cctl = dwc2_readl(hsotg, GI2CCTL);
+ gr->pcgcctl = dwc2_readl(hsotg, PCGCTL);
gr->valid = true;
return 0;
@@ -111,18 +111,18 @@ int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
}
gr->valid = false;
- dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
- dwc2_writel(gr->gotgctl, hsotg->regs + GOTGCTL);
- dwc2_writel(gr->gintmsk, hsotg->regs + GINTMSK);
- dwc2_writel(gr->gusbcfg, hsotg->regs + GUSBCFG);
- dwc2_writel(gr->gahbcfg, hsotg->regs + GAHBCFG);
- dwc2_writel(gr->grxfsiz, hsotg->regs + GRXFSIZ);
- dwc2_writel(gr->gnptxfsiz, hsotg->regs + GNPTXFSIZ);
- dwc2_writel(gr->gdfifocfg, hsotg->regs + GDFIFOCFG);
- dwc2_writel(gr->pcgcctl1, hsotg->regs + PCGCCTL1);
- dwc2_writel(gr->glpmcfg, hsotg->regs + GLPMCFG);
- dwc2_writel(gr->pcgcctl, hsotg->regs + PCGCTL);
- dwc2_writel(gr->gi2cctl, hsotg->regs + GI2CCTL);
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
+ dwc2_writel(hsotg, gr->gotgctl, GOTGCTL);
+ dwc2_writel(hsotg, gr->gintmsk, GINTMSK);
+ dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
+ dwc2_writel(hsotg, gr->gahbcfg, GAHBCFG);
+ dwc2_writel(hsotg, gr->grxfsiz, GRXFSIZ);
+ dwc2_writel(hsotg, gr->gnptxfsiz, GNPTXFSIZ);
+ dwc2_writel(hsotg, gr->gdfifocfg, GDFIFOCFG);
+ dwc2_writel(hsotg, gr->pcgcctl1, PCGCCTL1);
+ dwc2_writel(hsotg, gr->glpmcfg, GLPMCFG);
+ dwc2_writel(hsotg, gr->pcgcctl, PCGCTL);
+ dwc2_writel(hsotg, gr->gi2cctl, GI2CCTL);
return 0;
}
@@ -141,17 +141,17 @@ int dwc2_exit_partial_power_down(struct dwc2_hsotg *hsotg, bool restore)
if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL)
return -ENOTSUPP;
- pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl &= ~PCGCTL_STOPPCLK;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
- pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl &= ~PCGCTL_PWRCLMP;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
- pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(100);
if (restore) {
@@ -222,21 +222,21 @@ int dwc2_enter_partial_power_down(struct dwc2_hsotg *hsotg)
* Clear any pending interrupts since dwc2 will not be able to
* clear them after entering partial_power_down.
*/
- dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Put the controller in low power state */
- pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl |= PCGCTL_PWRCLMP;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
ndelay(20);
pcgcctl |= PCGCTL_RSTPDWNMODULE;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
ndelay(20);
pcgcctl |= PCGCTL_STOPPCLK;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
return ret;
}
@@ -272,39 +272,39 @@ static void dwc2_restore_essential_regs(struct dwc2_hsotg *hsotg, int rmode,
if (!(pcgcctl & PCGCTL_P2HD_DEV_ENUM_SPD_MASK))
pcgcctl |= BIT(17);
}
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
/* Umnask global Interrupt in GAHBCFG and restore it */
- dwc2_writel(gr->gahbcfg | GAHBCFG_GLBL_INTR_EN, hsotg->regs + GAHBCFG);
+ dwc2_writel(hsotg, gr->gahbcfg | GAHBCFG_GLBL_INTR_EN, GAHBCFG);
/* Clear all pending interupts */
- dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Unmask restore done interrupt */
- dwc2_writel(GINTSTS_RESTOREDONE, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, GINTSTS_RESTOREDONE, GINTMSK);
/* Restore GUSBCFG and HCFG/DCFG */
- dwc2_writel(gr->gusbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
if (is_host) {
- dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hsotg, hr->hcfg, HCFG);
if (rmode)
pcgcctl |= PCGCTL_RESTOREMODE;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(10);
pcgcctl |= PCGCTL_ESS_REG_RESTORED;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(10);
} else {
- dwc2_writel(dr->dcfg, hsotg->regs + DCFG);
+ dwc2_writel(hsotg, dr->dcfg, DCFG);
if (!rmode)
pcgcctl |= PCGCTL_RESTOREMODE | PCGCTL_RSTPDWNMODULE;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(10);
pcgcctl |= PCGCTL_ESS_REG_RESTORED;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(10);
}
}
@@ -322,42 +322,42 @@ void dwc2_hib_restore_common(struct dwc2_hsotg *hsotg, int rem_wakeup,
u32 gpwrdn;
/* Switch-on voltage to the core */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_PWRDNSWTCH;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Reset core */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_PWRDNRSTN;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Enable restore from PMU */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_RESTORE;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Disable Power Down Clamp */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_PWRDNCLMP;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(50);
if (!is_host && rem_wakeup)
udelay(70);
/* Deassert reset core */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PWRDNRSTN;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Disable PMU interrupt */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_PMUINTSEL;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Set Restore Essential Regs bit in PCGCCTL register */
@@ -431,7 +431,7 @@ static bool dwc2_iddig_filter_enabled(struct dwc2_hsotg *hsotg)
return false;
/* Check if core configuration includes the IDDIG filter. */
- ghwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
+ ghwcfg4 = dwc2_readl(hsotg, GHWCFG4);
if (!(ghwcfg4 & GHWCFG4_IDDIG_FILT_EN))
return false;
@@ -439,9 +439,9 @@ static bool dwc2_iddig_filter_enabled(struct dwc2_hsotg *hsotg)
* Check if the IDDIG debounce filter is bypassed. Available
* in core version >= 3.10a.
*/
- gsnpsid = dwc2_readl(hsotg->regs + GSNPSID);
+ gsnpsid = dwc2_readl(hsotg, GSNPSID);
if (gsnpsid >= DWC2_CORE_REV_3_10a) {
- u32 gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
if (gotgctl & GOTGCTL_DBNCE_FLTR_BYPASS)
return false;
@@ -510,8 +510,8 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
* reset and account for this delay after the reset.
*/
if (dwc2_iddig_filter_enabled(hsotg)) {
- u32 gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
- u32 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ u32 gusbcfg = dwc2_readl(hsotg, GUSBCFG);
if (!(gotgctl & GOTGCTL_CONID_B) ||
(gusbcfg & GUSBCFG_FORCEHOSTMODE)) {
@@ -520,9 +520,9 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
}
/* Core Soft Reset */
- greset = dwc2_readl(hsotg->regs + GRSTCTL);
+ greset = dwc2_readl(hsotg, GRSTCTL);
greset |= GRSTCTL_CSFTRST;
- dwc2_writel(greset, hsotg->regs + GRSTCTL);
+ dwc2_writel(hsotg, greset, GRSTCTL);
if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 50)) {
dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL GRSTCTL_CSFTRST\n",
@@ -594,14 +594,14 @@ void dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
if (WARN_ON(!host && hsotg->dr_mode == USB_DR_MODE_HOST))
return;
- gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ gusbcfg = dwc2_readl(hsotg, GUSBCFG);
set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE;
clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE;
gusbcfg &= ~clear;
gusbcfg |= set;
- dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, gusbcfg, GUSBCFG);
dwc2_wait_for_mode(hsotg, host);
return;
@@ -627,10 +627,10 @@ static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "Clearing force mode bits\n");
- gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ gusbcfg = dwc2_readl(hsotg, GUSBCFG);
gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
- dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, gusbcfg, GUSBCFG);
if (dwc2_iddig_filter_enabled(hsotg))
msleep(100);
@@ -670,11 +670,11 @@ void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
void dwc2_enable_acg(struct dwc2_hsotg *hsotg)
{
if (hsotg->params.acg_enable) {
- u32 pcgcctl1 = dwc2_readl(hsotg->regs + PCGCCTL1);
+ u32 pcgcctl1 = dwc2_readl(hsotg, PCGCCTL1);
dev_dbg(hsotg->dev, "Enabling Active Clock Gating\n");
pcgcctl1 |= PCGCCTL1_GATEEN;
- dwc2_writel(pcgcctl1, hsotg->regs + PCGCCTL1);
+ dwc2_writel(hsotg, pcgcctl1, PCGCCTL1);
}
}
@@ -695,56 +695,57 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "Host Global Registers\n");
addr = hsotg->regs + HCFG;
dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HCFG));
addr = hsotg->regs + HFIR;
dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HFIR));
addr = hsotg->regs + HFNUM;
dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HFNUM));
addr = hsotg->regs + HPTXSTS;
dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HPTXSTS));
addr = hsotg->regs + HAINT;
dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HAINT));
addr = hsotg->regs + HAINTMSK;
dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HAINTMSK));
if (hsotg->params.dma_desc_enable) {
addr = hsotg->regs + HFLBADDR;
dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HFLBADDR));
}
addr = hsotg->regs + HPRT0;
dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HPRT0));
for (i = 0; i < hsotg->params.host_channels; i++) {
dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
addr = hsotg->regs + HCCHAR(i);
dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HCCHAR(i)));
addr = hsotg->regs + HCSPLT(i);
dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HCSPLT(i)));
addr = hsotg->regs + HCINT(i);
dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HCINT(i)));
addr = hsotg->regs + HCINTMSK(i);
dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HCINTMSK(i)));
addr = hsotg->regs + HCTSIZ(i);
dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HCTSIZ(i)));
addr = hsotg->regs + HCDMA(i);
dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HCDMA(i)));
if (hsotg->params.dma_desc_enable) {
addr = hsotg->regs + HCDMAB(i);
dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg,
+ HCDMAB(i)));
}
}
#endif
@@ -766,80 +767,80 @@ void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "Core Global Registers\n");
addr = hsotg->regs + GOTGCTL;
dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GOTGCTL));
addr = hsotg->regs + GOTGINT;
dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GOTGINT));
addr = hsotg->regs + GAHBCFG;
dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GAHBCFG));
addr = hsotg->regs + GUSBCFG;
dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GUSBCFG));
addr = hsotg->regs + GRSTCTL;
dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GRSTCTL));
addr = hsotg->regs + GINTSTS;
dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GINTSTS));
addr = hsotg->regs + GINTMSK;
dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GINTMSK));
addr = hsotg->regs + GRXSTSR;
dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GRXSTSR));
addr = hsotg->regs + GRXFSIZ;
dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GRXFSIZ));
addr = hsotg->regs + GNPTXFSIZ;
dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GNPTXFSIZ));
addr = hsotg->regs + GNPTXSTS;
dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GNPTXSTS));
addr = hsotg->regs + GI2CCTL;
dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GI2CCTL));
addr = hsotg->regs + GPVNDCTL;
dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GPVNDCTL));
addr = hsotg->regs + GGPIO;
dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GGPIO));
addr = hsotg->regs + GUID;
dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GUID));
addr = hsotg->regs + GSNPSID;
dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GSNPSID));
addr = hsotg->regs + GHWCFG1;
dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GHWCFG1));
addr = hsotg->regs + GHWCFG2;
dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GHWCFG2));
addr = hsotg->regs + GHWCFG3;
dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GHWCFG3));
addr = hsotg->regs + GHWCFG4;
dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GHWCFG4));
addr = hsotg->regs + GLPMCFG;
dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GLPMCFG));
addr = hsotg->regs + GPWRDN;
dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GPWRDN));
addr = hsotg->regs + GDFIFOCFG;
dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, GDFIFOCFG));
addr = hsotg->regs + HPTXFSIZ;
dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, HPTXFSIZ));
addr = hsotg->regs + PCGCTL;
dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
- (unsigned long)addr, dwc2_readl(addr));
+ (unsigned long)addr, dwc2_readl(hsotg, PCGCTL));
#endif
}
@@ -862,7 +863,7 @@ void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
greset = GRSTCTL_TXFFLSH;
greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
- dwc2_writel(greset, hsotg->regs + GRSTCTL);
+ dwc2_writel(hsotg, greset, GRSTCTL);
if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 10000))
dev_warn(hsotg->dev, "%s: HANG! timeout GRSTCTL GRSTCTL_TXFFLSH\n",
@@ -889,7 +890,7 @@ void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
__func__);
greset = GRSTCTL_RXFFLSH;
- dwc2_writel(greset, hsotg->regs + GRSTCTL);
+ dwc2_writel(hsotg, greset, GRSTCTL);
/* Wait for RxFIFO flush done */
if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_RXFFLSH, 10000))
@@ -902,7 +903,7 @@ void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
{
- if (dwc2_readl(hsotg->regs + GSNPSID) == 0xffffffff)
+ if (dwc2_readl(hsotg, GSNPSID) == 0xffffffff)
return false;
else
return true;
@@ -916,10 +917,10 @@ bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
*/
void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
{
- u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
+ u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
ahbcfg |= GAHBCFG_GLBL_INTR_EN;
- dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
+ dwc2_writel(hsotg, ahbcfg, GAHBCFG);
}
/**
@@ -930,16 +931,16 @@ void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
*/
void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
{
- u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
+ u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
- dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
+ dwc2_writel(hsotg, ahbcfg, GAHBCFG);
}
/* Returns the controller's GHWCFG2.OTG_MODE. */
unsigned int dwc2_op_mode(struct dwc2_hsotg *hsotg)
{
- u32 ghwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
+ u32 ghwcfg2 = dwc2_readl(hsotg, GHWCFG2);
return (ghwcfg2 & GHWCFG2_OP_MODE_MASK) >>
GHWCFG2_OP_MODE_SHIFT;
@@ -988,7 +989,7 @@ int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hsotg, u32 offset, u32 mask,
u32 i;
for (i = 0; i < timeout; i++) {
- if (dwc2_readl(hsotg->regs + offset) & mask)
+ if (dwc2_readl(hsotg, offset) & mask)
return 0;
udelay(1);
}
@@ -1011,7 +1012,7 @@ int dwc2_hsotg_wait_bit_clear(struct dwc2_hsotg *hsotg, u32 offset, u32 mask,
u32 i;
for (i = 0; i < timeout; i++) {
- if (!(dwc2_readl(hsotg->regs + offset) & mask))
+ if (!(dwc2_readl(hsotg, offset) & mask))
return 0;
udelay(1);
}
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 4a56ac772a3c..cc9c93affa14 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -65,60 +65,6 @@
DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \
dev_name(hsotg->dev), ##__VA_ARGS__)
-#ifdef CONFIG_MIPS
-/*
- * There are some MIPS machines that can run in either big-endian
- * or little-endian mode and that use the dwc2 register without
- * a byteswap in both ways.
- * Unlike other architectures, MIPS apparently does not require a
- * barrier before the __raw_writel() to synchronize with DMA but does
- * require the barrier after the __raw_writel() to serialize a set of
- * writes. This set of operations was added specifically for MIPS and
- * should only be used there.
- */
-static inline u32 dwc2_readl(const void __iomem *addr)
-{
- u32 value = __raw_readl(addr);
-
- /* In order to preserve endianness __raw_* operation is used. Therefore
- * a barrier is needed to ensure IO access is not re-ordered across
- * reads or writes
- */
- mb();
- return value;
-}
-
-static inline void dwc2_writel(u32 value, void __iomem *addr)
-{
- __raw_writel(value, addr);
-
- /*
- * In order to preserve endianness __raw_* operation is used. Therefore
- * a barrier is needed to ensure IO access is not re-ordered across
- * reads or writes
- */
- mb();
-#ifdef DWC2_LOG_WRITES
- pr_info("INFO:: wrote %08x to %p\n", value, addr);
-#endif
-}
-#else
-/* Normal architectures just use readl/write */
-static inline u32 dwc2_readl(const void __iomem *addr)
-{
- return readl(addr);
-}
-
-static inline void dwc2_writel(u32 value, void __iomem *addr)
-{
- writel(value, addr);
-
-#ifdef DWC2_LOG_WRITES
- pr_info("info:: wrote %08x to %p\n", value, addr);
-#endif
-}
-#endif
-
/* Maximum number of Endpoints/HostChannels */
#define MAX_EPS_CHANNELS 16
@@ -911,6 +857,7 @@ struct dwc2_hregs_backup {
* @gr_backup: Backup of global registers during suspend
* @dr_backup: Backup of device registers during suspend
* @hr_backup: Backup of host registers during suspend
+ * @needs_byte_swap: Specifies whether the opposite endianness.
*
* These are for host mode:
*
@@ -1004,6 +951,7 @@ struct dwc2_hregs_backup {
* @frame_list_sz: Frame list size
* @desc_gen_cache: Kmem cache for generic descriptors
* @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors
+ * @unaligned_cache: Kmem cache for DMA mode to handle non-aligned buf
*
* These are for peripheral mode:
*
@@ -1099,6 +1047,7 @@ struct dwc2_hsotg {
struct dentry *debug_root;
struct debugfs_regset32 *regset;
+ bool needs_byte_swap;
/* DWC OTG HW Release versions */
#define DWC2_CORE_REV_2_71a 0x4f54271a
@@ -1177,6 +1126,8 @@ struct dwc2_hsotg {
u32 frame_list_sz;
struct kmem_cache *desc_gen_cache;
struct kmem_cache *desc_hsisoc_cache;
+ struct kmem_cache *unaligned_cache;
+#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024
#endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */
@@ -1212,6 +1163,55 @@ struct dwc2_hsotg {
#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
};
+/* Normal architectures just use readl/write */
+static inline u32 dwc2_readl(struct dwc2_hsotg *hsotg, u32 offset)
+{
+ u32 val;
+
+ val = readl(hsotg->regs + offset);
+ if (hsotg->needs_byte_swap)
+ return swab32(val);
+ else
+ return val;
+}
+
+static inline void dwc2_writel(struct dwc2_hsotg *hsotg, u32 value, u32 offset)
+{
+ if (hsotg->needs_byte_swap)
+ writel(swab32(value), hsotg->regs + offset);
+ else
+ writel(value, hsotg->regs + offset);
+
+#ifdef DWC2_LOG_WRITES
+ pr_info("info:: wrote %08x to %p\n", value, hsotg->regs + offset);
+#endif
+}
+
+static inline void dwc2_readl_rep(struct dwc2_hsotg *hsotg, u32 offset,
+ void *buffer, unsigned int count)
+{
+ if (count) {
+ u32 *buf = buffer;
+
+ do {
+ u32 x = dwc2_readl(hsotg, offset);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+
+static inline void dwc2_writel_rep(struct dwc2_hsotg *hsotg, u32 offset,
+ const void *buffer, unsigned int count)
+{
+ if (count) {
+ const u32 *buf = buffer;
+
+ do {
+ dwc2_writel(hsotg, *buf++, offset);
+ } while (--count);
+ }
+}
+
/* Reasons for halting a host channel */
enum dwc2_halt_status {
DWC2_HC_XFER_NO_HALT_STATUS,
@@ -1317,12 +1317,12 @@ bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg);
*/
static inline int dwc2_is_host_mode(struct dwc2_hsotg *hsotg)
{
- return (dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) != 0;
+ return (dwc2_readl(hsotg, GINTSTS) & GINTSTS_CURMODE_HOST) != 0;
}
static inline int dwc2_is_device_mode(struct dwc2_hsotg *hsotg)
{
- return (dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) == 0;
+ return (dwc2_readl(hsotg, GINTSTS) & GINTSTS_CURMODE_HOST) == 0;
}
/*
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index cc90b58b6b3c..19ae2595f1c3 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -81,11 +81,11 @@ static const char *dwc2_op_state_str(struct dwc2_hsotg *hsotg)
*/
static void dwc2_handle_usb_port_intr(struct dwc2_hsotg *hsotg)
{
- u32 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
+ u32 hprt0 = dwc2_readl(hsotg, HPRT0);
if (hprt0 & HPRT0_ENACHG) {
hprt0 &= ~HPRT0_ENA;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
}
}
@@ -97,7 +97,7 @@ static void dwc2_handle_usb_port_intr(struct dwc2_hsotg *hsotg)
static void dwc2_handle_mode_mismatch_intr(struct dwc2_hsotg *hsotg)
{
/* Clear interrupt */
- dwc2_writel(GINTSTS_MODEMIS, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_MODEMIS, GINTSTS);
dev_warn(hsotg->dev, "Mode Mismatch Interrupt: currently in %s mode\n",
dwc2_is_host_mode(hsotg) ? "Host" : "Device");
@@ -115,8 +115,8 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
u32 gotgctl;
u32 gintmsk;
- gotgint = dwc2_readl(hsotg->regs + GOTGINT);
- gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ gotgint = dwc2_readl(hsotg, GOTGINT);
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
dev_dbg(hsotg->dev, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint,
dwc2_op_state_str(hsotg));
@@ -124,7 +124,7 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev,
" ++OTG Interrupt: Session End Detected++ (%s)\n",
dwc2_op_state_str(hsotg));
- gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
if (dwc2_is_device_mode(hsotg))
dwc2_hsotg_disconnect(hsotg);
@@ -150,24 +150,24 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
hsotg->lx_state = DWC2_L0;
}
- gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
gotgctl &= ~GOTGCTL_DEVHNPEN;
- dwc2_writel(gotgctl, hsotg->regs + GOTGCTL);
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
}
if (gotgint & GOTGINT_SES_REQ_SUC_STS_CHNG) {
dev_dbg(hsotg->dev,
" ++OTG Interrupt: Session Request Success Status Change++\n");
- gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
if (gotgctl & GOTGCTL_SESREQSCS) {
if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
hsotg->params.i2c_enable) {
hsotg->srp_success = 1;
} else {
/* Clear Session Request */
- gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
gotgctl &= ~GOTGCTL_SESREQ;
- dwc2_writel(gotgctl, hsotg->regs + GOTGCTL);
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
}
}
}
@@ -177,7 +177,7 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
* Print statements during the HNP interrupt handling
* can cause it to fail
*/
- gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
/*
* WA for 3.00a- HW is not setting cur_mode, even sometimes
* this does not help
@@ -197,9 +197,9 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
* interrupt does not get handled and Linux
* complains loudly.
*/
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_SOF;
- dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
/*
* Call callback function with spin lock
@@ -213,9 +213,9 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
hsotg->op_state = OTG_STATE_B_HOST;
}
} else {
- gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
gotgctl &= ~(GOTGCTL_HNPREQ | GOTGCTL_DEVHNPEN);
- dwc2_writel(gotgctl, hsotg->regs + GOTGCTL);
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
dev_dbg(hsotg->dev, "HNP Failed\n");
dev_err(hsotg->dev,
"Device Not Connected/Responding\n");
@@ -241,9 +241,9 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
hsotg->op_state = OTG_STATE_A_PERIPHERAL;
} else {
/* Need to disable SOF interrupt immediately */
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_SOF;
- dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
spin_unlock(&hsotg->lock);
dwc2_hcd_start(hsotg);
spin_lock(&hsotg->lock);
@@ -258,7 +258,7 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, " ++OTG Interrupt: Debounce Done++\n");
/* Clear GOTGINT */
- dwc2_writel(gotgint, hsotg->regs + GOTGINT);
+ dwc2_writel(hsotg, gotgint, GOTGINT);
}
/**
@@ -276,12 +276,12 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
u32 gintmsk;
/* Clear interrupt */
- dwc2_writel(GINTSTS_CONIDSTSCHNG, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_CONIDSTSCHNG, GINTSTS);
/* Need to disable SOF interrupt immediately */
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_SOF;
- dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
dev_dbg(hsotg->dev, " ++Connector ID Status Change Interrupt++ (%s)\n",
dwc2_is_host_mode(hsotg) ? "Host" : "Device");
@@ -314,7 +314,7 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
int ret;
/* Clear interrupt */
- dwc2_writel(GINTSTS_SESSREQINT, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_SESSREQINT, GINTSTS);
dev_dbg(hsotg->dev, "Session request interrupt - lx_state=%d\n",
hsotg->lx_state);
@@ -351,15 +351,15 @@ static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
return;
}
- glpmcfg = dwc2_readl(hsotg->regs + GLPMCFG);
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (dwc2_is_device_mode(hsotg)) {
dev_dbg(hsotg->dev, "Exit from L1 state\n");
glpmcfg &= ~GLPMCFG_ENBLSLPM;
glpmcfg &= ~GLPMCFG_HIRD_THRES_EN;
- dwc2_writel(glpmcfg, hsotg->regs + GLPMCFG);
+ dwc2_writel(hsotg, glpmcfg, GLPMCFG);
do {
- glpmcfg = dwc2_readl(hsotg->regs + GLPMCFG);
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (!(glpmcfg & (GLPMCFG_COREL1RES_MASK |
GLPMCFG_L1RESUMEOK | GLPMCFG_SLPSTS)))
@@ -398,7 +398,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
int ret;
/* Clear interrupt */
- dwc2_writel(GINTSTS_WKUPINT, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_WKUPINT, GINTSTS);
dev_dbg(hsotg->dev, "++Resume or Remote Wakeup Detected Interrupt++\n");
dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state);
@@ -410,13 +410,13 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
if (dwc2_is_device_mode(hsotg)) {
dev_dbg(hsotg->dev, "DSTS=0x%0x\n",
- dwc2_readl(hsotg->regs + DSTS));
+ dwc2_readl(hsotg, DSTS));
if (hsotg->lx_state == DWC2_L2) {
- u32 dctl = dwc2_readl(hsotg->regs + DCTL);
+ u32 dctl = dwc2_readl(hsotg, DCTL);
/* Clear Remote Wakeup Signaling */
dctl &= ~DCTL_RMTWKUPSIG;
- dwc2_writel(dctl, hsotg->regs + DCTL);
+ dwc2_writel(hsotg, dctl, DCTL);
ret = dwc2_exit_partial_power_down(hsotg, true);
if (ret && (ret != -ENOTSUPP))
dev_err(hsotg->dev, "exit power_down failed\n");
@@ -430,11 +430,11 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
return;
if (hsotg->lx_state != DWC2_L1) {
- u32 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
+ u32 pcgcctl = dwc2_readl(hsotg, PCGCTL);
/* Restart the Phy Clock */
pcgcctl &= ~PCGCTL_STOPPCLK;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
mod_timer(&hsotg->wkp_timer,
jiffies + msecs_to_jiffies(71));
} else {
@@ -450,7 +450,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
*/
static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg)
{
- dwc2_writel(GINTSTS_DISCONNINT, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_DISCONNINT, GINTSTS);
dev_dbg(hsotg->dev, "++Disconnect Detected Interrupt++ (%s) %s\n",
dwc2_is_host_mode(hsotg) ? "Host" : "Device",
@@ -474,7 +474,7 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
int ret;
/* Clear interrupt */
- dwc2_writel(GINTSTS_USBSUSP, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_USBSUSP, GINTSTS);
dev_dbg(hsotg->dev, "USB SUSPEND\n");
@@ -483,7 +483,7 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
* Check the Device status register to determine if the Suspend
* state is active
*/
- dsts = dwc2_readl(hsotg->regs + DSTS);
+ dsts = dwc2_readl(hsotg, DSTS);
dev_dbg(hsotg->dev, "%s: DSTS=0x%0x\n", __func__, dsts);
dev_dbg(hsotg->dev,
"DSTS.Suspend Status=%d HWCFG4.Power Optimize=%d HWCFG4.Hibernation=%d\n",
@@ -563,9 +563,9 @@ static void dwc2_handle_lpm_intr(struct dwc2_hsotg *hsotg)
u32 enslpm;
/* Clear interrupt */
- dwc2_writel(GINTSTS_LPMTRANRCVD, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_LPMTRANRCVD, GINTSTS);
- glpmcfg = dwc2_readl(hsotg->regs + GLPMCFG);
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (!(glpmcfg & GLPMCFG_LPMCAP)) {
dev_err(hsotg->dev, "Unexpected LPM interrupt\n");
@@ -588,16 +588,16 @@ static void dwc2_handle_lpm_intr(struct dwc2_hsotg *hsotg)
} else {
dev_dbg(hsotg->dev, "Entering Sleep with L1 Gating\n");
- pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl |= PCGCTL_ENBL_SLEEP_GATING;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
}
/**
* Examine prt_sleep_sts after TL1TokenTetry period max (10 us)
*/
udelay(10);
- glpmcfg = dwc2_readl(hsotg->regs + GLPMCFG);
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (glpmcfg & GLPMCFG_SLPSTS) {
/* Save the current state */
@@ -627,9 +627,9 @@ static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
u32 gahbcfg;
u32 gintmsk_common = GINTMSK_COMMON;
- gintsts = dwc2_readl(hsotg->regs + GINTSTS);
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
- gahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
+ gintsts = dwc2_readl(hsotg, GINTSTS);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gahbcfg = dwc2_readl(hsotg, GAHBCFG);
/* If any common interrupts set */
if (gintsts & gintmsk_common)
@@ -653,9 +653,9 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
u32 gpwrdn;
int linestate;
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
/* clear all interrupt */
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
linestate = (gpwrdn & GPWRDN_LINESTATE_MASK) >> GPWRDN_LINESTATE_SHIFT;
dev_dbg(hsotg->dev,
"%s: dwc2_handle_gpwrdwn_intr called gpwrdn= %08x\n", __func__,
@@ -668,38 +668,38 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s: GPWRDN_DISCONN_DET\n", __func__);
/* Switch-on voltage to the core */
- gpwrdn_tmp = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
- dwc2_writel(gpwrdn_tmp, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
udelay(10);
/* Reset core */
- gpwrdn_tmp = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
- dwc2_writel(gpwrdn_tmp, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
udelay(10);
/* Disable Power Down Clamp */
- gpwrdn_tmp = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
- dwc2_writel(gpwrdn_tmp, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
udelay(10);
/* Deassert reset core */
- gpwrdn_tmp = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
- dwc2_writel(gpwrdn_tmp, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
udelay(10);
/* Disable PMU interrupt */
- gpwrdn_tmp = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
- dwc2_writel(gpwrdn_tmp, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
/* De-assert Wakeup Logic */
- gpwrdn_tmp = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
gpwrdn_tmp &= ~GPWRDN_PMUACTV;
- dwc2_writel(gpwrdn_tmp, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
hsotg->hibernated = 0;
@@ -780,10 +780,10 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
/* Reading current frame number value in device or host modes. */
if (dwc2_is_device_mode(hsotg))
- hsotg->frame_number = (dwc2_readl(hsotg->regs + DSTS)
+ hsotg->frame_number = (dwc2_readl(hsotg, DSTS)
& DSTS_SOFFN_MASK) >> DSTS_SOFFN_SHIFT;
else
- hsotg->frame_number = (dwc2_readl(hsotg->regs + HFNUM)
+ hsotg->frame_number = (dwc2_readl(hsotg, HFNUM)
& HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
gintsts = dwc2_read_common_intr(hsotg);
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index d0bdb7997557..22d015b0424f 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -69,7 +69,7 @@ static int testmode_show(struct seq_file *s, void *unused)
int dctl;
spin_lock_irqsave(&hsotg->lock, flags);
- dctl = dwc2_readl(hsotg->regs + DCTL);
+ dctl = dwc2_readl(hsotg, DCTL);
dctl &= DCTL_TSTCTL_MASK;
dctl >>= DCTL_TSTCTL_SHIFT;
spin_unlock_irqrestore(&hsotg->lock, flags);
@@ -126,42 +126,41 @@ static const struct file_operations testmode_fops = {
static int state_show(struct seq_file *seq, void *v)
{
struct dwc2_hsotg *hsotg = seq->private;
- void __iomem *regs = hsotg->regs;
int idx;
seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
- dwc2_readl(regs + DCFG),
- dwc2_readl(regs + DCTL),
- dwc2_readl(regs + DSTS));
+ dwc2_readl(hsotg, DCFG),
+ dwc2_readl(hsotg, DCTL),
+ dwc2_readl(hsotg, DSTS));
seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
- dwc2_readl(regs + DIEPMSK), dwc2_readl(regs + DOEPMSK));
+ dwc2_readl(hsotg, DIEPMSK), dwc2_readl(hsotg, DOEPMSK));
seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
- dwc2_readl(regs + GINTMSK),
- dwc2_readl(regs + GINTSTS));
+ dwc2_readl(hsotg, GINTMSK),
+ dwc2_readl(hsotg, GINTSTS));
seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
- dwc2_readl(regs + DAINTMSK),
- dwc2_readl(regs + DAINT));
+ dwc2_readl(hsotg, DAINTMSK),
+ dwc2_readl(hsotg, DAINT));
seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
- dwc2_readl(regs + GNPTXSTS),
- dwc2_readl(regs + GRXSTSR));
+ dwc2_readl(hsotg, GNPTXSTS),
+ dwc2_readl(hsotg, GRXSTSR));
seq_puts(seq, "\nEndpoint status:\n");
for (idx = 0; idx < hsotg->num_of_eps; idx++) {
u32 in, out;
- in = dwc2_readl(regs + DIEPCTL(idx));
- out = dwc2_readl(regs + DOEPCTL(idx));
+ in = dwc2_readl(hsotg, DIEPCTL(idx));
+ out = dwc2_readl(hsotg, DOEPCTL(idx));
seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
idx, in, out);
- in = dwc2_readl(regs + DIEPTSIZ(idx));
- out = dwc2_readl(regs + DOEPTSIZ(idx));
+ in = dwc2_readl(hsotg, DIEPTSIZ(idx));
+ out = dwc2_readl(hsotg, DOEPTSIZ(idx));
seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
in, out);
@@ -184,14 +183,13 @@ DEFINE_SHOW_ATTRIBUTE(state);
static int fifo_show(struct seq_file *seq, void *v)
{
struct dwc2_hsotg *hsotg = seq->private;
- void __iomem *regs = hsotg->regs;
u32 val;
int idx;
seq_puts(seq, "Non-periodic FIFOs:\n");
- seq_printf(seq, "RXFIFO: Size %d\n", dwc2_readl(regs + GRXFSIZ));
+ seq_printf(seq, "RXFIFO: Size %d\n", dwc2_readl(hsotg, GRXFSIZ));
- val = dwc2_readl(regs + GNPTXFSIZ);
+ val = dwc2_readl(hsotg, GNPTXFSIZ);
seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
val >> FIFOSIZE_DEPTH_SHIFT,
val & FIFOSIZE_STARTADDR_MASK);
@@ -199,7 +197,7 @@ static int fifo_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nPeriodic TXFIFOs:\n");
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
- val = dwc2_readl(regs + DPTXFSIZN(idx));
+ val = dwc2_readl(hsotg, DPTXFSIZN(idx));
seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
val >> FIFOSIZE_DEPTH_SHIFT,
@@ -228,7 +226,6 @@ static int ep_show(struct seq_file *seq, void *v)
struct dwc2_hsotg_ep *ep = seq->private;
struct dwc2_hsotg *hsotg = ep->parent;
struct dwc2_hsotg_req *req;
- void __iomem *regs = hsotg->regs;
int index = ep->index;
int show_limit = 15;
unsigned long flags;
@@ -239,20 +236,20 @@ static int ep_show(struct seq_file *seq, void *v)
/* first show the register state */
seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
- dwc2_readl(regs + DIEPCTL(index)),
- dwc2_readl(regs + DOEPCTL(index)));
+ dwc2_readl(hsotg, DIEPCTL(index)),
+ dwc2_readl(hsotg, DOEPCTL(index)));
seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
- dwc2_readl(regs + DIEPDMA(index)),
- dwc2_readl(regs + DOEPDMA(index)));
+ dwc2_readl(hsotg, DIEPDMA(index)),
+ dwc2_readl(hsotg, DOEPDMA(index)));
seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
- dwc2_readl(regs + DIEPINT(index)),
- dwc2_readl(regs + DOEPINT(index)));
+ dwc2_readl(hsotg, DIEPINT(index)),
+ dwc2_readl(hsotg, DOEPINT(index)));
seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
- dwc2_readl(regs + DIEPTSIZ(index)),
- dwc2_readl(regs + DOEPTSIZ(index)));
+ dwc2_readl(hsotg, DIEPTSIZ(index)),
+ dwc2_readl(hsotg, DOEPTSIZ(index)));
seq_puts(seq, "\n");
seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index f0d9ccf1d665..220c0f9b89b0 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -47,14 +47,14 @@ static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
return container_of(gadget, struct dwc2_hsotg, gadget);
}
-static inline void dwc2_set_bit(void __iomem *ptr, u32 val)
+static inline void dwc2_set_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
{
- dwc2_writel(dwc2_readl(ptr) | val, ptr);
+ dwc2_writel(hsotg, dwc2_readl(hsotg, offset) | val, offset);
}
-static inline void dwc2_clear_bit(void __iomem *ptr, u32 val)
+static inline void dwc2_clear_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
{
- dwc2_writel(dwc2_readl(ptr) & ~val, ptr);
+ dwc2_writel(hsotg, dwc2_readl(hsotg, offset) & ~val, offset);
}
static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
@@ -129,14 +129,14 @@ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
*/
static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
{
- u32 gsintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
u32 new_gsintmsk;
new_gsintmsk = gsintmsk | ints;
if (new_gsintmsk != gsintmsk) {
dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
- dwc2_writel(new_gsintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
}
}
@@ -147,13 +147,13 @@ static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
*/
static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
{
- u32 gsintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
u32 new_gsintmsk;
new_gsintmsk = gsintmsk & ~ints;
if (new_gsintmsk != gsintmsk)
- dwc2_writel(new_gsintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
}
/**
@@ -178,12 +178,12 @@ static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
bit <<= 16;
local_irq_save(flags);
- daint = dwc2_readl(hsotg->regs + DAINTMSK);
+ daint = dwc2_readl(hsotg, DAINTMSK);
if (en)
daint |= bit;
else
daint &= ~bit;
- dwc2_writel(daint, hsotg->regs + DAINTMSK);
+ dwc2_writel(hsotg, daint, DAINTMSK);
local_irq_restore(flags);
}
@@ -266,10 +266,11 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
hsotg->fifo_map = 0;
/* set RX/NPTX FIFO sizes */
- dwc2_writel(hsotg->params.g_rx_fifo_size, hsotg->regs + GRXFSIZ);
- dwc2_writel((hsotg->params.g_rx_fifo_size << FIFOSIZE_STARTADDR_SHIFT) |
+ dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ);
+ dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size <<
+ FIFOSIZE_STARTADDR_SHIFT) |
(hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
- hsotg->regs + GNPTXFSIZ);
+ GNPTXFSIZ);
/*
* arange all the rest of the TX FIFOs, as some versions of this
@@ -295,25 +296,25 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
"insufficient fifo memory");
addr += txfsz[ep];
- dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
- val = dwc2_readl(hsotg->regs + DPTXFSIZN(ep));
+ dwc2_writel(hsotg, val, DPTXFSIZN(ep));
+ val = dwc2_readl(hsotg, DPTXFSIZN(ep));
}
- dwc2_writel(hsotg->hw_params.total_fifo_size |
+ dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size |
addr << GDFIFOCFG_EPINFOBASE_SHIFT,
- hsotg->regs + GDFIFOCFG);
+ GDFIFOCFG);
/*
* according to p428 of the design guide, we need to ensure that
* all fifos are flushed before continuing
*/
- dwc2_writel(GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
- GRSTCTL_RXFFLSH, hsotg->regs + GRSTCTL);
+ dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
+ GRSTCTL_RXFFLSH, GRSTCTL);
/* wait until the fifos are both flushed */
timeout = 100;
while (1) {
- val = dwc2_readl(hsotg->regs + GRSTCTL);
+ val = dwc2_readl(hsotg, GRSTCTL);
if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0)
break;
@@ -451,7 +452,7 @@ static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_req *hs_req)
{
bool periodic = is_ep_periodic(hs_ep);
- u32 gnptxsts = dwc2_readl(hsotg->regs + GNPTXSTS);
+ u32 gnptxsts = dwc2_readl(hsotg, GNPTXSTS);
int buf_pos = hs_req->req.actual;
int to_write = hs_ep->size_loaded;
void *data;
@@ -466,7 +467,7 @@ static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
return 0;
if (periodic && !hsotg->dedicated_fifos) {
- u32 epsize = dwc2_readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
+ u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
int size_left;
int size_done;
@@ -507,8 +508,8 @@ static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
return -ENOSPC;
}
} else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
- can_write = dwc2_readl(hsotg->regs +
- DTXFSTS(hs_ep->fifo_index));
+ can_write = dwc2_readl(hsotg,
+ DTXFSTS(hs_ep->fifo_index));
can_write &= 0xffff;
can_write *= 4;
@@ -598,7 +599,7 @@ static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
to_write = DIV_ROUND_UP(to_write, 4);
data = hs_req->req.buf + buf_pos;
- iowrite32_rep(hsotg->regs + EPFIFO(hs_ep->index), data, to_write);
+ dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write);
return (to_write >= can_write) ? -ENOSPC : 0;
}
@@ -652,7 +653,7 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
{
u32 dsts;
- dsts = dwc2_readl(hsotg->regs + DSTS);
+ dsts = dwc2_readl(hsotg, DSTS);
dsts &= DSTS_SOFFN_MASK;
dsts >>= DSTS_SOFFN_SHIFT;
@@ -812,6 +813,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
u32 index;
u32 maxsize = 0;
u32 mask = 0;
+ u8 pid = 0;
maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
@@ -840,7 +842,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
((len << DEV_DMA_NBYTES_SHIFT) & mask));
if (hs_ep->dir_in) {
- desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) &
+ if (len)
+ pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
+ else
+ pid = 1;
+ desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
DEV_DMA_ISOC_PID_MASK) |
((len % hs_ep->ep.maxpacket) ?
DEV_DMA_SHORT : 0) |
@@ -884,6 +890,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
struct dwc2_dma_desc *desc;
if (list_empty(&hs_ep->queue)) {
+ hs_ep->target_frame = TARGET_FRAME_INITIAL;
dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
return;
}
@@ -909,11 +916,11 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
/* write descriptor chain address to control register */
- dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg);
+ dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
- ctrl = dwc2_readl(hsotg->regs + depctl);
+ ctrl = dwc2_readl(hsotg, depctl);
ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
- dwc2_writel(ctrl, hsotg->regs + depctl);
+ dwc2_writel(hsotg, ctrl, depctl);
}
/**
@@ -961,11 +968,11 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
- __func__, dwc2_readl(hsotg->regs + epctrl_reg), index,
+ __func__, dwc2_readl(hsotg, epctrl_reg), index,
hs_ep->dir_in ? "in" : "out");
/* If endpoint is stalled, we will restart request later */
- ctrl = dwc2_readl(hsotg->regs + epctrl_reg);
+ ctrl = dwc2_readl(hsotg, epctrl_reg);
if (index && ctrl & DXEPCTL_STALL) {
dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
@@ -1058,13 +1065,13 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
length);
/* write descriptor chain address to control register */
- dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg);
+ dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
__func__, (u32)hs_ep->desc_list_dma, dma_reg);
} else {
/* write size / packets */
- dwc2_writel(epsize, hsotg->regs + epsize_reg);
+ dwc2_writel(hsotg, epsize, epsize_reg);
if (using_dma(hsotg) && !continuing && (length != 0)) {
/*
@@ -1072,7 +1079,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
* already synced by dwc2_hsotg_ep_queue().
*/
- dwc2_writel(ureq->dma, hsotg->regs + dma_reg);
+ dwc2_writel(hsotg, ureq->dma, dma_reg);
dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
__func__, &ureq->dma, dma_reg);
@@ -1098,7 +1105,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
- dwc2_writel(ctrl, hsotg->regs + epctrl_reg);
+ dwc2_writel(hsotg, ctrl, epctrl_reg);
/*
* set these, it seems that DMA support increments past the end
@@ -1121,13 +1128,13 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
*/
/* check ep is enabled */
- if (!(dwc2_readl(hsotg->regs + epctrl_reg) & DXEPCTL_EPENA))
+ if (!(dwc2_readl(hsotg, epctrl_reg) & DXEPCTL_EPENA))
dev_dbg(hsotg->dev,
"ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
- index, dwc2_readl(hsotg->regs + epctrl_reg));
+ index, dwc2_readl(hsotg, epctrl_reg));
dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
- __func__, dwc2_readl(hsotg->regs + epctrl_reg));
+ __func__, dwc2_readl(hsotg, epctrl_reg));
/* enable ep interrupts */
dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
@@ -1460,7 +1467,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
*/
int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
{
- int dctl = dwc2_readl(hsotg->regs + DCTL);
+ int dctl = dwc2_readl(hsotg, DCTL);
dctl &= ~DCTL_TSTCTL_MASK;
switch (testmode) {
@@ -1474,7 +1481,7 @@ int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
default:
return -EINVAL;
}
- dwc2_writel(dctl, hsotg->regs + DCTL);
+ dwc2_writel(hsotg, dctl, DCTL);
return 0;
}
@@ -1628,9 +1635,9 @@ static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
} else {
dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
__func__);
- mask = dwc2_readl(hsotg->regs + epmsk_reg);
+ mask = dwc2_readl(hsotg, epmsk_reg);
mask |= DOEPMSK_OUTTKNEPDISMSK;
- dwc2_writel(mask, hsotg->regs + epmsk_reg);
+ dwc2_writel(hsotg, mask, epmsk_reg);
}
}
@@ -1767,14 +1774,14 @@ static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
* taken effect, so no need to clear later.
*/
- ctrl = dwc2_readl(hsotg->regs + reg);
+ ctrl = dwc2_readl(hsotg, reg);
ctrl |= DXEPCTL_STALL;
ctrl |= DXEPCTL_CNAK;
- dwc2_writel(ctrl, hsotg->regs + reg);
+ dwc2_writel(hsotg, ctrl, reg);
dev_dbg(hsotg->dev,
"written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
- ctrl, reg, dwc2_readl(hsotg->regs + reg));
+ ctrl, reg, dwc2_readl(hsotg, reg));
/*
* complete won't be called, so we enqueue
@@ -1819,11 +1826,11 @@ static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg,
switch (ctrl->bRequest) {
case USB_REQ_SET_ADDRESS:
hsotg->connected = 1;
- dcfg = dwc2_readl(hsotg->regs + DCFG);
+ dcfg = dwc2_readl(hsotg, DCFG);
dcfg &= ~DCFG_DEVADDR_MASK;
dcfg |= (le16_to_cpu(ctrl->wValue) <<
DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
- dwc2_writel(dcfg, hsotg->regs + DCFG);
+ dwc2_writel(hsotg, dcfg, DCFG);
dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
@@ -1949,16 +1956,16 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
} else {
- dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
- DXEPTSIZ_XFERSIZE(0), hsotg->regs +
+ dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
+ DXEPTSIZ_XFERSIZE(0),
epsiz_reg);
}
- ctrl = dwc2_readl(hsotg->regs + epctl_reg);
+ ctrl = dwc2_readl(hsotg, epctl_reg);
ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
ctrl |= DXEPCTL_USBACTEP;
- dwc2_writel(ctrl, hsotg->regs + epctl_reg);
+ dwc2_writel(hsotg, ctrl, epctl_reg);
}
/**
@@ -2118,13 +2125,12 @@ static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
{
struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
struct dwc2_hsotg_req *hs_req = hs_ep->req;
- void __iomem *fifo = hsotg->regs + EPFIFO(ep_idx);
int to_read;
int max_req;
int read_ptr;
if (!hs_req) {
- u32 epctl = dwc2_readl(hsotg->regs + DOEPCTL(ep_idx));
+ u32 epctl = dwc2_readl(hsotg, DOEPCTL(ep_idx));
int ptr;
dev_dbg(hsotg->dev,
@@ -2133,7 +2139,7 @@ static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
/* dump the data from the FIFO, we've nothing we can do */
for (ptr = 0; ptr < size; ptr += 4)
- (void)dwc2_readl(fifo);
+ (void)dwc2_readl(hsotg, EPFIFO(ep_idx));
return;
}
@@ -2163,7 +2169,8 @@ static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
* note, we might over-write the buffer end by 3 bytes depending on
* alignment of the data.
*/
- ioread32_rep(fifo, hs_req->req.buf + read_ptr, to_read);
+ dwc2_readl_rep(hsotg, EPFIFO(ep_idx),
+ hs_req->req.buf + read_ptr, to_read);
}
/**
@@ -2192,12 +2199,12 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
{
u32 ctrl;
- ctrl = dwc2_readl(hsotg->regs + epctl_reg);
+ ctrl = dwc2_readl(hsotg, epctl_reg);
if (ctrl & DXEPCTL_EOFRNUM)
ctrl |= DXEPCTL_SETEVENFR;
else
ctrl |= DXEPCTL_SETODDFR;
- dwc2_writel(ctrl, hsotg->regs + epctl_reg);
+ dwc2_writel(hsotg, ctrl, epctl_reg);
}
/*
@@ -2241,7 +2248,7 @@ static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
*/
static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
{
- u32 epsize = dwc2_readl(hsotg->regs + DOEPTSIZ(epnum));
+ u32 epsize = dwc2_readl(hsotg, DOEPTSIZ(epnum));
struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
struct dwc2_hsotg_req *hs_req = hs_ep->req;
struct usb_request *req = &hs_req->req;
@@ -2337,7 +2344,7 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
*/
static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
{
- u32 grxstsr = dwc2_readl(hsotg->regs + GRXSTSP);
+ u32 grxstsr = dwc2_readl(hsotg, GRXSTSP);
u32 epnum, status, size;
WARN_ON(using_dma(hsotg));
@@ -2368,7 +2375,7 @@ static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev,
"SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
dwc2_hsotg_read_frameno(hsotg),
- dwc2_readl(hsotg->regs + DOEPCTL(0)));
+ dwc2_readl(hsotg, DOEPCTL(0)));
/*
* Call dwc2_hsotg_handle_outdone here if it was not called from
* GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
@@ -2386,7 +2393,7 @@ static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev,
"SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
dwc2_hsotg_read_frameno(hsotg),
- dwc2_readl(hsotg->regs + DOEPCTL(0)));
+ dwc2_readl(hsotg, DOEPCTL(0)));
WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
@@ -2440,7 +2447,6 @@ static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
unsigned int mc, unsigned int dir_in)
{
struct dwc2_hsotg_ep *hs_ep;
- void __iomem *regs = hsotg->regs;
u32 reg;
hs_ep = index_to_ep(hsotg, ep, dir_in);
@@ -2466,15 +2472,15 @@ static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
}
if (dir_in) {
- reg = dwc2_readl(regs + DIEPCTL(ep));
+ reg = dwc2_readl(hsotg, DIEPCTL(ep));
reg &= ~DXEPCTL_MPS_MASK;
reg |= mps;
- dwc2_writel(reg, regs + DIEPCTL(ep));
+ dwc2_writel(hsotg, reg, DIEPCTL(ep));
} else {
- reg = dwc2_readl(regs + DOEPCTL(ep));
+ reg = dwc2_readl(hsotg, DOEPCTL(ep));
reg &= ~DXEPCTL_MPS_MASK;
reg |= mps;
- dwc2_writel(reg, regs + DOEPCTL(ep));
+ dwc2_writel(hsotg, reg, DOEPCTL(ep));
}
return;
@@ -2490,8 +2496,8 @@ bad_mps:
*/
static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
{
- dwc2_writel(GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
- hsotg->regs + GRSTCTL);
+ dwc2_writel(hsotg, GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
+ GRSTCTL);
/* wait until the fifo is flushed */
if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 100))
@@ -2544,7 +2550,7 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg_req *hs_req = hs_ep->req;
- u32 epsize = dwc2_readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
+ u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
int size_left, size_done;
if (!hs_req) {
@@ -2648,12 +2654,12 @@ static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
u32 mask;
u32 diepempmsk;
- mask = dwc2_readl(hsotg->regs + epmsk_reg);
- diepempmsk = dwc2_readl(hsotg->regs + DIEPEMPMSK);
+ mask = dwc2_readl(hsotg, epmsk_reg);
+ diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK);
mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
mask |= DXEPINT_SETUP_RCVD;
- ints = dwc2_readl(hsotg->regs + epint_reg);
+ ints = dwc2_readl(hsotg, epint_reg);
ints &= mask;
return ints;
}
@@ -2678,12 +2684,12 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
unsigned char idx = hs_ep->index;
int dir_in = hs_ep->dir_in;
u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
- int dctl = dwc2_readl(hsotg->regs + DCTL);
+ int dctl = dwc2_readl(hsotg, DCTL);
dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
if (dir_in) {
- int epctl = dwc2_readl(hsotg->regs + epctl_reg);
+ int epctl = dwc2_readl(hsotg, epctl_reg);
dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
@@ -2693,17 +2699,17 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
}
if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
- int dctl = dwc2_readl(hsotg->regs + DCTL);
+ int dctl = dwc2_readl(hsotg, DCTL);
dctl |= DCTL_CGNPINNAK;
- dwc2_writel(dctl, hsotg->regs + DCTL);
+ dwc2_writel(hsotg, dctl, DCTL);
}
return;
}
if (dctl & DCTL_GOUTNAKSTS) {
dctl |= DCTL_CGOUTNAK;
- dwc2_writel(dctl, hsotg->regs + DCTL);
+ dwc2_writel(hsotg, dctl, DCTL);
}
if (!hs_ep->isochronous)
@@ -2744,23 +2750,14 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
struct dwc2_hsotg *hsotg = ep->parent;
int dir_in = ep->dir_in;
u32 doepmsk;
- u32 tmp;
if (dir_in || !ep->isochronous)
return;
- /*
- * Store frame in which irq was asserted here, as
- * it can change while completing request below.
- */
- tmp = dwc2_hsotg_read_frameno(hsotg);
-
- dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), 0);
-
if (using_desc_dma(hsotg)) {
if (ep->target_frame == TARGET_FRAME_INITIAL) {
/* Start first ISO Out */
- ep->target_frame = tmp;
+ ep->target_frame = hsotg->frame_number;
dwc2_gadget_start_isoc_ddma(ep);
}
return;
@@ -2768,26 +2765,24 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
if (ep->interval > 1 &&
ep->target_frame == TARGET_FRAME_INITIAL) {
- u32 dsts;
u32 ctrl;
- dsts = dwc2_readl(hsotg->regs + DSTS);
- ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+ ep->target_frame = hsotg->frame_number;
dwc2_gadget_incr_frame_num(ep);
- ctrl = dwc2_readl(hsotg->regs + DOEPCTL(ep->index));
+ ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
if (ep->target_frame & 0x1)
ctrl |= DXEPCTL_SETODDFR;
else
ctrl |= DXEPCTL_SETEVENFR;
- dwc2_writel(ctrl, hsotg->regs + DOEPCTL(ep->index));
+ dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
}
dwc2_gadget_start_next_request(ep);
- doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
+ doepmsk = dwc2_readl(hsotg, DOEPMSK);
doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
- dwc2_writel(doepmsk, hsotg->regs + DOEPMSK);
+ dwc2_writel(hsotg, doepmsk, DOEPMSK);
}
/**
@@ -2808,34 +2803,29 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
int dir_in = hs_ep->dir_in;
- u32 tmp;
if (!dir_in || !hs_ep->isochronous)
return;
if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
- tmp = dwc2_hsotg_read_frameno(hsotg);
if (using_desc_dma(hsotg)) {
- dwc2_hsotg_complete_request(hsotg, hs_ep,
- get_ep_head(hs_ep), 0);
-
- hs_ep->target_frame = tmp;
+ hs_ep->target_frame = hsotg->frame_number;
dwc2_gadget_incr_frame_num(hs_ep);
dwc2_gadget_start_isoc_ddma(hs_ep);
return;
}
- hs_ep->target_frame = tmp;
+ hs_ep->target_frame = hsotg->frame_number;
if (hs_ep->interval > 1) {
- u32 ctrl = dwc2_readl(hsotg->regs +
+ u32 ctrl = dwc2_readl(hsotg,
DIEPCTL(hs_ep->index));
if (hs_ep->target_frame & 0x1)
ctrl |= DXEPCTL_SETODDFR;
else
ctrl |= DXEPCTL_SETEVENFR;
- dwc2_writel(ctrl, hsotg->regs + DIEPCTL(hs_ep->index));
+ dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
}
dwc2_hsotg_complete_request(hsotg, hs_ep,
@@ -2865,10 +2855,10 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
u32 ctrl;
ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
- ctrl = dwc2_readl(hsotg->regs + epctl_reg);
+ ctrl = dwc2_readl(hsotg, epctl_reg);
/* Clear endpoint interrupts */
- dwc2_writel(ints, hsotg->regs + epint_reg);
+ dwc2_writel(hsotg, ints, epint_reg);
if (!hs_ep) {
dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
@@ -2896,8 +2886,8 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
if (ints & DXEPINT_XFERCOMPL) {
dev_dbg(hsotg->dev,
"%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
- __func__, dwc2_readl(hsotg->regs + epctl_reg),
- dwc2_readl(hsotg->regs + epsiz_reg));
+ __func__, dwc2_readl(hsotg, epctl_reg),
+ dwc2_readl(hsotg, epsiz_reg));
/* In DDMA handle isochronous requests separately */
if (using_desc_dma(hsotg) && hs_ep->isochronous) {
@@ -3015,7 +3005,7 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
*/
static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
{
- u32 dsts = dwc2_readl(hsotg->regs + DSTS);
+ u32 dsts = dwc2_readl(hsotg, DSTS);
int ep0_mps = 0, ep_mps = 8;
/*
@@ -3086,8 +3076,8 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
dwc2_hsotg_enqueue_setup(hsotg);
dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
- dwc2_readl(hsotg->regs + DIEPCTL0),
- dwc2_readl(hsotg->regs + DOEPCTL0));
+ dwc2_readl(hsotg, DIEPCTL0),
+ dwc2_readl(hsotg, DOEPCTL0));
}
/**
@@ -3114,7 +3104,7 @@ static void kill_all_requests(struct dwc2_hsotg *hsotg,
if (!hsotg->dedicated_fifos)
return;
- size = (dwc2_readl(hsotg->regs + DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
+ size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
if (size < ep->fifo_size)
dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
}
@@ -3215,7 +3205,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
*/
/* keep other bits untouched (so e.g. forced modes are not lost) */
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
@@ -3230,12 +3220,12 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
(val << GUSBCFG_USBTRDTIM_SHIFT);
}
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
dwc2_hsotg_init_fifo(hsotg);
if (!is_usb_reset)
- dwc2_set_bit(hsotg->regs + DCTL, DCTL_SFTDISCON);
+ dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
dcfg |= DCFG_EPMISCNT(1);
@@ -3256,13 +3246,13 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
if (hsotg->params.ipg_isoc_en)
dcfg |= DCFG_IPG_ISOC_SUPPORDED;
- dwc2_writel(dcfg, hsotg->regs + DCFG);
+ dwc2_writel(hsotg, dcfg, DCFG);
/* Clear any pending OTG interrupts */
- dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
+ dwc2_writel(hsotg, 0xffffffff, GOTGINT);
/* Clear any pending interrupts */
- dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT |
GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
GINTSTS_USBRST | GINTSTS_RESETDET |
@@ -3276,22 +3266,22 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
if (!hsotg->params.external_id_pin_ctl)
intmsk |= GINTSTS_CONIDSTSCHNG;
- dwc2_writel(intmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, intmsk, GINTMSK);
if (using_dma(hsotg)) {
- dwc2_writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
+ dwc2_writel(hsotg, GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
hsotg->params.ahbcfg,
- hsotg->regs + GAHBCFG);
+ GAHBCFG);
/* Set DDMA mode support in the core if needed */
if (using_desc_dma(hsotg))
- dwc2_set_bit(hsotg->regs + DCFG, DCFG_DESCDMA_EN);
+ dwc2_set_bit(hsotg, DCFG, DCFG_DESCDMA_EN);
} else {
- dwc2_writel(((hsotg->dedicated_fifos) ?
+ dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ?
(GAHBCFG_NP_TXF_EMP_LVL |
GAHBCFG_P_TXF_EMP_LVL) : 0) |
- GAHBCFG_GLBL_INTR_EN, hsotg->regs + GAHBCFG);
+ GAHBCFG_GLBL_INTR_EN, GAHBCFG);
}
/*
@@ -3300,33 +3290,33 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
* interrupts.
*/
- dwc2_writel(((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
+ dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
- hsotg->regs + DIEPMSK);
+ DIEPMSK);
/*
* don't need XferCompl, we get that from RXFIFO in slave mode. In
* DMA mode we may need this and StsPhseRcvd.
*/
- dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
+ dwc2_writel(hsotg, (using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
DOEPMSK_STSPHSERCVDMSK) : 0) |
DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
DOEPMSK_SETUPMSK,
- hsotg->regs + DOEPMSK);
+ DOEPMSK);
/* Enable BNA interrupt for DDMA */
if (using_desc_dma(hsotg)) {
- dwc2_set_bit(hsotg->regs + DOEPMSK, DOEPMSK_BNAMSK);
- dwc2_set_bit(hsotg->regs + DIEPMSK, DIEPMSK_BNAININTRMSK);
+ dwc2_set_bit(hsotg, DOEPMSK, DOEPMSK_BNAMSK);
+ dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK);
}
- dwc2_writel(0, hsotg->regs + DAINTMSK);
+ dwc2_writel(hsotg, 0, DAINTMSK);
dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
- dwc2_readl(hsotg->regs + DIEPCTL0),
- dwc2_readl(hsotg->regs + DOEPCTL0));
+ dwc2_readl(hsotg, DIEPCTL0),
+ dwc2_readl(hsotg, DOEPCTL0));
/* enable in and out endpoint interrupts */
dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT);
@@ -3344,12 +3334,12 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1);
if (!is_usb_reset) {
- dwc2_set_bit(hsotg->regs + DCTL, DCTL_PWRONPRGDONE);
+ dwc2_set_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
udelay(10); /* see openiboot */
- dwc2_clear_bit(hsotg->regs + DCTL, DCTL_PWRONPRGDONE);
+ dwc2_clear_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
}
- dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg->regs + DCTL));
+ dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL));
/*
* DxEPCTL_USBActEp says RO in manual, but seems to be set by
@@ -3357,23 +3347,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
*/
/* set to read 1 8byte packet */
- dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
- DXEPTSIZ_XFERSIZE(8), hsotg->regs + DOEPTSIZ0);
+ dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
+ DXEPTSIZ_XFERSIZE(8), DOEPTSIZ0);
- dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
+ dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
DXEPCTL_CNAK | DXEPCTL_EPENA |
DXEPCTL_USBACTEP,
- hsotg->regs + DOEPCTL0);
+ DOEPCTL0);
/* enable, but don't activate EP0in */
- dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
- DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0);
+ dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
+ DXEPCTL_USBACTEP, DIEPCTL0);
/* clear global NAKs */
val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
if (!is_usb_reset)
val |= DCTL_SFTDISCON;
- dwc2_set_bit(hsotg->regs + DCTL, val);
+ dwc2_set_bit(hsotg, DCTL, val);
/* configure the core to support LPM */
dwc2_gadget_init_lpm(hsotg);
@@ -3386,20 +3376,20 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
dwc2_hsotg_enqueue_setup(hsotg);
dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
- dwc2_readl(hsotg->regs + DIEPCTL0),
- dwc2_readl(hsotg->regs + DOEPCTL0));
+ dwc2_readl(hsotg, DIEPCTL0),
+ dwc2_readl(hsotg, DOEPCTL0));
}
static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
{
/* set the soft-disconnect bit */
- dwc2_set_bit(hsotg->regs + DCTL, DCTL_SFTDISCON);
+ dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
}
void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
{
/* remove the soft-disconnect and let's go */
- dwc2_clear_bit(hsotg->regs + DCTL, DCTL_SFTDISCON);
+ dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
}
/**
@@ -3424,25 +3414,25 @@ static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
- daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
+ daintmsk = dwc2_readl(hsotg, DAINTMSK);
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_in[idx];
/* Proceed only unmasked ISOC EPs */
- if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+ if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
continue;
- epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
+ epctrl = dwc2_readl(hsotg, DIEPCTL(idx));
if ((epctrl & DXEPCTL_EPENA) &&
dwc2_gadget_target_frame_elapsed(hs_ep)) {
epctrl |= DXEPCTL_SNAK;
epctrl |= DXEPCTL_EPDIS;
- dwc2_writel(epctrl, hsotg->regs + DIEPCTL(idx));
+ dwc2_writel(hsotg, epctrl, DIEPCTL(idx));
}
}
/* Clear interrupt */
- dwc2_writel(GINTSTS_INCOMPL_SOIN, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_INCOMPL_SOIN, GINTSTS);
}
/**
@@ -3469,33 +3459,33 @@ static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
- daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
+ daintmsk = dwc2_readl(hsotg, DAINTMSK);
daintmsk >>= DAINT_OUTEP_SHIFT;
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_out[idx];
/* Proceed only unmasked ISOC EPs */
- if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+ if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
continue;
- epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
+ epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
if ((epctrl & DXEPCTL_EPENA) &&
dwc2_gadget_target_frame_elapsed(hs_ep)) {
/* Unmask GOUTNAKEFF interrupt */
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk |= GINTSTS_GOUTNAKEFF;
- dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
- gintsts = dwc2_readl(hsotg->regs + GINTSTS);
+ gintsts = dwc2_readl(hsotg, GINTSTS);
if (!(gintsts & GINTSTS_GOUTNAKEFF)) {
- dwc2_set_bit(hsotg->regs + DCTL, DCTL_SGOUTNAK);
+ dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
break;
}
}
}
/* Clear interrupt */
- dwc2_writel(GINTSTS_INCOMPL_SOOUT, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_INCOMPL_SOOUT, GINTSTS);
}
/**
@@ -3515,8 +3505,8 @@ static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
spin_lock(&hsotg->lock);
irq_retry:
- gintsts = dwc2_readl(hsotg->regs + GINTSTS);
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintsts = dwc2_readl(hsotg, GINTSTS);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
__func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
@@ -3526,7 +3516,7 @@ irq_retry:
if (gintsts & GINTSTS_RESETDET) {
dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__);
- dwc2_writel(GINTSTS_RESETDET, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_RESETDET, GINTSTS);
/* This event must be used only if controller is suspended */
if (hsotg->lx_state == DWC2_L2) {
@@ -3536,34 +3526,34 @@ irq_retry:
}
if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) {
- u32 usb_status = dwc2_readl(hsotg->regs + GOTGCTL);
+ u32 usb_status = dwc2_readl(hsotg, GOTGCTL);
u32 connected = hsotg->connected;
dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
- dwc2_readl(hsotg->regs + GNPTXSTS));
+ dwc2_readl(hsotg, GNPTXSTS));
- dwc2_writel(GINTSTS_USBRST, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_USBRST, GINTSTS);
/* Report disconnection if it is not already done. */
dwc2_hsotg_disconnect(hsotg);
/* Reset device address to zero */
- dwc2_clear_bit(hsotg->regs + DCFG, DCFG_DEVADDR_MASK);
+ dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
if (usb_status & GOTGCTL_BSESVLD && connected)
dwc2_hsotg_core_init_disconnected(hsotg, true);
}
if (gintsts & GINTSTS_ENUMDONE) {
- dwc2_writel(GINTSTS_ENUMDONE, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_ENUMDONE, GINTSTS);
dwc2_hsotg_irq_enumdone(hsotg);
}
if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
- u32 daint = dwc2_readl(hsotg->regs + DAINT);
- u32 daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
+ u32 daint = dwc2_readl(hsotg, DAINT);
+ u32 daintmsk = dwc2_readl(hsotg, DAINTMSK);
u32 daint_out, daint_in;
int ep;
@@ -3622,7 +3612,7 @@ irq_retry:
if (gintsts & GINTSTS_ERLYSUSP) {
dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
- dwc2_writel(GINTSTS_ERLYSUSP, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_ERLYSUSP, GINTSTS);
}
/*
@@ -3638,26 +3628,26 @@ irq_retry:
u32 daintmsk;
struct dwc2_hsotg_ep *hs_ep;
- daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
+ daintmsk = dwc2_readl(hsotg, DAINTMSK);
daintmsk >>= DAINT_OUTEP_SHIFT;
/* Mask this interrupt */
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_GOUTNAKEFF;
- dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_out[idx];
/* Proceed only unmasked ISOC EPs */
- if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+ if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
continue;
- epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
+ epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
if (epctrl & DXEPCTL_EPENA) {
epctrl |= DXEPCTL_SNAK;
epctrl |= DXEPCTL_EPDIS;
- dwc2_writel(epctrl, hsotg->regs + DOEPCTL(idx));
+ dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
}
}
@@ -3667,7 +3657,7 @@ irq_retry:
if (gintsts & GINTSTS_GINNAKEFF) {
dev_info(hsotg->dev, "GINNakEff triggered\n");
- dwc2_set_bit(hsotg->regs + DCTL, DCTL_CGNPINNAK);
+ dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
dwc2_hsotg_dump(hsotg);
}
@@ -3707,7 +3697,7 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
if (hs_ep->dir_in) {
if (hsotg->dedicated_fifos || hs_ep->periodic) {
- dwc2_set_bit(hsotg->regs + epctrl_reg, DXEPCTL_SNAK);
+ dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_SNAK);
/* Wait for Nak effect */
if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
DXEPINT_INEPNAKEFF, 100))
@@ -3715,7 +3705,7 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
"%s: timeout DIEPINT.NAKEFF\n",
__func__);
} else {
- dwc2_set_bit(hsotg->regs + DCTL, DCTL_SGNPINNAK);
+ dwc2_set_bit(hsotg, DCTL, DCTL_SGNPINNAK);
/* Wait for Nak effect */
if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
GINTSTS_GINNAKEFF, 100))
@@ -3724,8 +3714,8 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
__func__);
}
} else {
- if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
- dwc2_set_bit(hsotg->regs + DCTL, DCTL_SGOUTNAK);
+ if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
+ dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
/* Wait for global nak to take effect */
if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
@@ -3735,7 +3725,7 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
}
/* Disable ep */
- dwc2_set_bit(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
+ dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
/* Wait for ep to be disabled */
if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
@@ -3743,7 +3733,7 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
"%s: timeout DOEPCTL.EPDisable\n", __func__);
/* Clear EPDISBLD interrupt */
- dwc2_set_bit(hsotg->regs + epint_reg, DXEPINT_EPDISBLD);
+ dwc2_set_bit(hsotg, epint_reg, DXEPINT_EPDISBLD);
if (hs_ep->dir_in) {
unsigned short fifo_index;
@@ -3758,11 +3748,11 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
/* Clear Global In NP NAK in Shared FIFO for non periodic ep */
if (!hsotg->dedicated_fifos && !hs_ep->periodic)
- dwc2_set_bit(hsotg->regs + DCTL, DCTL_CGNPINNAK);
+ dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
} else {
/* Remove global NAKs */
- dwc2_set_bit(hsotg->regs + DCTL, DCTL_CGOUTNAK);
+ dwc2_set_bit(hsotg, DCTL, DCTL_CGOUTNAK);
}
}
@@ -3830,7 +3820,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
/* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
- epctrl = dwc2_readl(hsotg->regs + epctrl_reg);
+ epctrl = dwc2_readl(hsotg, epctrl_reg);
dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
__func__, epctrl, epctrl_reg);
@@ -3878,13 +3868,13 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
hs_ep->compl_desc = 0;
if (dir_in) {
hs_ep->periodic = 1;
- mask = dwc2_readl(hsotg->regs + DIEPMSK);
+ mask = dwc2_readl(hsotg, DIEPMSK);
mask |= DIEPMSK_NAKMSK;
- dwc2_writel(mask, hsotg->regs + DIEPMSK);
+ dwc2_writel(hsotg, mask, DIEPMSK);
} else {
- mask = dwc2_readl(hsotg->regs + DOEPMSK);
+ mask = dwc2_readl(hsotg, DOEPMSK);
mask |= DOEPMSK_OUTTKNEPDISMSK;
- dwc2_writel(mask, hsotg->regs + DOEPMSK);
+ dwc2_writel(hsotg, mask, DOEPMSK);
}
break;
@@ -3919,7 +3909,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
for (i = 1; i < hsotg->num_of_eps; ++i) {
if (hsotg->fifo_map & (1 << i))
continue;
- val = dwc2_readl(hsotg->regs + DPTXFSIZN(i));
+ val = dwc2_readl(hsotg, DPTXFSIZN(i));
val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4;
if (val < size)
continue;
@@ -3957,7 +3947,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
* to 4.00a (including both). Also for FS_IOT_1.00a
* and HS_IOT_1.00a.
*/
- u32 gsnpsid = dwc2_readl(hsotg->regs + GSNPSID);
+ u32 gsnpsid = dwc2_readl(hsotg, GSNPSID);
if ((gsnpsid >= DWC2_CORE_REV_2_72a &&
gsnpsid <= DWC2_CORE_REV_4_00a) ||
@@ -3969,9 +3959,9 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
__func__, epctrl);
- dwc2_writel(epctrl, hsotg->regs + epctrl_reg);
+ dwc2_writel(hsotg, epctrl, epctrl_reg);
dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
- __func__, dwc2_readl(hsotg->regs + epctrl_reg));
+ __func__, dwc2_readl(hsotg, epctrl_reg));
/* enable the endpoint interrupt */
dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
@@ -4020,7 +4010,7 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
spin_lock_irqsave(&hsotg->lock, flags);
- ctrl = dwc2_readl(hsotg->regs + epctrl_reg);
+ ctrl = dwc2_readl(hsotg, epctrl_reg);
if (ctrl & DXEPCTL_EPENA)
dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
@@ -4030,7 +4020,7 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
ctrl |= DXEPCTL_SNAK;
dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
- dwc2_writel(ctrl, hsotg->regs + epctrl_reg);
+ dwc2_writel(hsotg, ctrl, epctrl_reg);
/* disable endpoint interrupts */
dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
@@ -4137,7 +4127,7 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
if (hs_ep->dir_in) {
epreg = DIEPCTL(index);
- epctl = dwc2_readl(hs->regs + epreg);
+ epctl = dwc2_readl(hs, epreg);
if (value) {
epctl |= DXEPCTL_STALL | DXEPCTL_SNAK;
@@ -4150,10 +4140,10 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
xfertype == DXEPCTL_EPTYPE_INTERRUPT)
epctl |= DXEPCTL_SETD0PID;
}
- dwc2_writel(epctl, hs->regs + epreg);
+ dwc2_writel(hs, epctl, epreg);
} else {
epreg = DOEPCTL(index);
- epctl = dwc2_readl(hs->regs + epreg);
+ epctl = dwc2_readl(hs, epreg);
if (value) {
epctl |= DXEPCTL_STALL;
@@ -4164,7 +4154,7 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
xfertype == DXEPCTL_EPTYPE_INTERRUPT)
epctl |= DXEPCTL_SETD0PID;
}
- dwc2_writel(epctl, hs->regs + epreg);
+ dwc2_writel(hs, epctl, epreg);
}
hs_ep->halted = value;
@@ -4212,29 +4202,29 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
u32 usbcfg;
/* unmask subset of endpoint interrupts */
- dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
+ dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK,
- hsotg->regs + DIEPMSK);
+ DIEPMSK);
- dwc2_writel(DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
+ dwc2_writel(hsotg, DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK,
- hsotg->regs + DOEPMSK);
+ DOEPMSK);
- dwc2_writel(0, hsotg->regs + DAINTMSK);
+ dwc2_writel(hsotg, 0, DAINTMSK);
/* Be in disconnected state until gadget is registered */
- dwc2_set_bit(hsotg->regs + DCTL, DCTL_SFTDISCON);
+ dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
/* setup fifos */
dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
- dwc2_readl(hsotg->regs + GRXFSIZ),
- dwc2_readl(hsotg->regs + GNPTXFSIZ));
+ dwc2_readl(hsotg, GRXFSIZ),
+ dwc2_readl(hsotg, GNPTXFSIZ));
dwc2_hsotg_init_fifo(hsotg);
/* keep other bits untouched (so e.g. forced modes are not lost) */
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
@@ -4242,10 +4232,10 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
(trdtim << GUSBCFG_USBTRDTIM_SHIFT);
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
if (using_dma(hsotg))
- dwc2_set_bit(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN);
+ dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN);
}
/**
@@ -4535,9 +4525,9 @@ static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
if (dir_in)
- dwc2_writel(next, hsotg->regs + DIEPCTL(epnum));
+ dwc2_writel(hsotg, next, DIEPCTL(epnum));
else
- dwc2_writel(next, hsotg->regs + DOEPCTL(epnum));
+ dwc2_writel(hsotg, next, DOEPCTL(epnum));
}
}
@@ -4606,24 +4596,23 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
{
#ifdef DEBUG
struct device *dev = hsotg->dev;
- void __iomem *regs = hsotg->regs;
u32 val;
int idx;
dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
- dwc2_readl(regs + DCFG), dwc2_readl(regs + DCTL),
- dwc2_readl(regs + DIEPMSK));
+ dwc2_readl(hsotg, DCFG), dwc2_readl(hsotg, DCTL),
+ dwc2_readl(hsotg, DIEPMSK));
dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
- dwc2_readl(regs + GAHBCFG), dwc2_readl(regs + GHWCFG1));
+ dwc2_readl(hsotg, GAHBCFG), dwc2_readl(hsotg, GHWCFG1));
dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
- dwc2_readl(regs + GRXFSIZ), dwc2_readl(regs + GNPTXFSIZ));
+ dwc2_readl(hsotg, GRXFSIZ), dwc2_readl(hsotg, GNPTXFSIZ));
/* show periodic fifo settings */
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
- val = dwc2_readl(regs + DPTXFSIZN(idx));
+ val = dwc2_readl(hsotg, DPTXFSIZN(idx));
dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
val >> FIFOSIZE_DEPTH_SHIFT,
val & FIFOSIZE_STARTADDR_MASK);
@@ -4632,20 +4621,20 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
for (idx = 0; idx < hsotg->num_of_eps; idx++) {
dev_info(dev,
"ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
- dwc2_readl(regs + DIEPCTL(idx)),
- dwc2_readl(regs + DIEPTSIZ(idx)),
- dwc2_readl(regs + DIEPDMA(idx)));
+ dwc2_readl(hsotg, DIEPCTL(idx)),
+ dwc2_readl(hsotg, DIEPTSIZ(idx)),
+ dwc2_readl(hsotg, DIEPDMA(idx)));
- val = dwc2_readl(regs + DOEPCTL(idx));
+ val = dwc2_readl(hsotg, DOEPCTL(idx));
dev_info(dev,
"ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
- idx, dwc2_readl(regs + DOEPCTL(idx)),
- dwc2_readl(regs + DOEPTSIZ(idx)),
- dwc2_readl(regs + DOEPDMA(idx)));
+ idx, dwc2_readl(hsotg, DOEPCTL(idx)),
+ dwc2_readl(hsotg, DOEPTSIZ(idx)),
+ dwc2_readl(hsotg, DOEPDMA(idx)));
}
dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
- dwc2_readl(regs + DVBUSDIS), dwc2_readl(regs + DVBUSPULSE));
+ dwc2_readl(hsotg, DVBUSDIS), dwc2_readl(hsotg, DVBUSPULSE));
#endif
}
@@ -4739,9 +4728,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
}
ret = usb_add_gadget_udc(dev, &hsotg->gadget);
- if (ret)
+ if (ret) {
+ dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
+ hsotg->ctrl_req);
return ret;
-
+ }
dwc2_hsotg_dump(hsotg);
return 0;
@@ -4755,6 +4746,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
{
usb_del_gadget_udc(&hsotg->gadget);
+ dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
return 0;
}
@@ -4831,15 +4823,15 @@ int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
/* Backup dev regs */
dr = &hsotg->dr_backup;
- dr->dcfg = dwc2_readl(hsotg->regs + DCFG);
- dr->dctl = dwc2_readl(hsotg->regs + DCTL);
- dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
- dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK);
- dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
+ dr->dcfg = dwc2_readl(hsotg, DCFG);
+ dr->dctl = dwc2_readl(hsotg, DCTL);
+ dr->daintmsk = dwc2_readl(hsotg, DAINTMSK);
+ dr->diepmsk = dwc2_readl(hsotg, DIEPMSK);
+ dr->doepmsk = dwc2_readl(hsotg, DOEPMSK);
for (i = 0; i < hsotg->num_of_eps; i++) {
/* Backup IN EPs */
- dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i));
+ dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i));
/* Ensure DATA PID is correctly configured */
if (dr->diepctl[i] & DXEPCTL_DPID)
@@ -4847,11 +4839,11 @@ int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
else
dr->diepctl[i] |= DXEPCTL_SETD0PID;
- dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i));
- dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i));
+ dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i));
+ dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i));
/* Backup OUT EPs */
- dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i));
+ dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i));
/* Ensure DATA PID is correctly configured */
if (dr->doepctl[i] & DXEPCTL_DPID)
@@ -4859,9 +4851,9 @@ int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
else
dr->doepctl[i] |= DXEPCTL_SETD0PID;
- dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i));
- dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i));
- dr->dtxfsiz[i] = dwc2_readl(hsotg->regs + DPTXFSIZN(i));
+ dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i));
+ dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i));
+ dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i));
}
dr->valid = true;
return 0;
@@ -4894,17 +4886,17 @@ int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
dr->valid = false;
if (!remote_wakeup)
- dwc2_writel(dr->dctl, hsotg->regs + DCTL);
+ dwc2_writel(hsotg, dr->dctl, DCTL);
- dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK);
- dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK);
- dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK);
+ dwc2_writel(hsotg, dr->daintmsk, DAINTMSK);
+ dwc2_writel(hsotg, dr->diepmsk, DIEPMSK);
+ dwc2_writel(hsotg, dr->doepmsk, DOEPMSK);
for (i = 0; i < hsotg->num_of_eps; i++) {
/* Restore IN EPs */
- dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i));
- dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i));
- dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
+ dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i));
+ dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i));
+ dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
/** WA for enabled EPx's IN in DDMA mode. On entering to
* hibernation wrong value read and saved from DIEPDMAx,
* as result BNA interrupt asserted on hibernation exit
@@ -4913,10 +4905,10 @@ int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
if (hsotg->params.g_dma_desc &&
(dr->diepctl[i] & DXEPCTL_EPENA))
dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma;
- dwc2_writel(dr->dtxfsiz[i], hsotg->regs + DPTXFSIZN(i));
- dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i));
+ dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i));
+ dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i));
/* Restore OUT EPs */
- dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
+ dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
/* WA for enabled EPx's OUT in DDMA mode. On entering to
* hibernation wrong value read and saved from DOEPDMAx,
* as result BNA interrupt asserted on hibernation exit
@@ -4925,8 +4917,8 @@ int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
if (hsotg->params.g_dma_desc &&
(dr->doepctl[i] & DXEPCTL_EPENA))
dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma;
- dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i));
- dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i));
+ dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i));
+ dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i));
}
return 0;
@@ -4950,9 +4942,8 @@ void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0;
val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT;
val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
- dwc2_writel(val, hsotg->regs + GLPMCFG);
- dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg->regs
- + GLPMCFG));
+ dwc2_writel(hsotg, val, GLPMCFG);
+ dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
}
/**
@@ -4985,40 +4976,40 @@ int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
gpwrdn = GPWRDN_PWRDNRSTN;
gpwrdn |= GPWRDN_PMUACTV;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Set flag to indicate that we are in hibernation */
hsotg->hibernated = 1;
/* Enable interrupts from wake up logic */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PMUINTSEL;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Unmask device mode interrupts in GPWRDN */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_RST_DET_MSK;
gpwrdn |= GPWRDN_LNSTSCHG_MSK;
gpwrdn |= GPWRDN_STS_CHGINT_MSK;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Enable Power Down Clamp */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PWRDNCLMP;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Switch off VDD */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PWRDNSWTCH;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Save gpwrdn register for further usage if stschng interrupt */
- hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN);
dev_dbg(hsotg->dev, "Hibernation completed\n");
return ret;
@@ -5060,46 +5051,46 @@ int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
if (!reset) {
/* Clear all pending interupts */
- dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
}
/* De-assert Restore */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_RESTORE;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
if (!rem_wakeup) {
- pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
}
/* Restore GUSBCFG, DCFG and DCTL */
- dwc2_writel(gr->gusbcfg, hsotg->regs + GUSBCFG);
- dwc2_writel(dr->dcfg, hsotg->regs + DCFG);
- dwc2_writel(dr->dctl, hsotg->regs + DCTL);
+ dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
+ dwc2_writel(hsotg, dr->dcfg, DCFG);
+ dwc2_writel(hsotg, dr->dctl, DCTL);
/* De-assert Wakeup Logic */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_PMUACTV;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
if (rem_wakeup) {
udelay(10);
/* Start Remote Wakeup Signaling */
- dwc2_writel(dr->dctl | DCTL_RMTWKUPSIG, hsotg->regs + DCTL);
+ dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL);
} else {
udelay(50);
/* Set Device programming done bit */
- dctl = dwc2_readl(hsotg->regs + DCTL);
+ dctl = dwc2_readl(hsotg, DCTL);
dctl |= DCTL_PWRONPRGDONE;
- dwc2_writel(dctl, hsotg->regs + DCTL);
+ dwc2_writel(hsotg, dctl, DCTL);
}
/* Wait for interrupts which must be cleared */
mdelay(2);
/* Clear all pending interupts */
- dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Restore global registers */
ret = dwc2_restore_global_registers(hsotg);
@@ -5119,9 +5110,9 @@ int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
if (rem_wakeup) {
mdelay(10);
- dctl = dwc2_readl(hsotg->regs + DCTL);
+ dctl = dwc2_readl(hsotg, DCTL);
dctl &= ~DCTL_RMTWKUPSIG;
- dwc2_writel(dctl, hsotg->regs + DCTL);
+ dwc2_writel(hsotg, dctl, DCTL);
}
hsotg->hibernated = 0;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index edaf0b6af4f0..2bd6e6bfc241 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -75,10 +75,10 @@ static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
u32 intmsk;
/* Clear any pending OTG Interrupts */
- dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
+ dwc2_writel(hsotg, 0xffffffff, GOTGINT);
/* Clear any pending interrupts */
- dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Enable the interrupts in the GINTMSK */
intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
@@ -94,7 +94,7 @@ static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
if (dwc2_is_device_mode(hsotg) && hsotg->params.lpm)
intmsk |= GINTSTS_LPMTRANRCVD;
- dwc2_writel(intmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, intmsk, GINTMSK);
}
/*
@@ -117,10 +117,10 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
}
dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
- hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg = dwc2_readl(hsotg, HCFG);
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
- dwc2_writel(hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hsotg, hcfg, HCFG);
}
static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
@@ -135,10 +135,10 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
if (select_phy) {
dev_dbg(hsotg->dev, "FS PHY selected\n");
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
if (!(usbcfg & GUSBCFG_PHYSEL)) {
usbcfg |= GUSBCFG_PHYSEL;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
/* Reset after a PHY select */
retval = dwc2_core_reset(hsotg, false);
@@ -151,7 +151,7 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
}
if (hsotg->params.activate_stm_fs_transceiver) {
- ggpio = dwc2_readl(hsotg->regs + GGPIO);
+ ggpio = dwc2_readl(hsotg, GGPIO);
if (!(ggpio & GGPIO_STM32_OTG_GCCFG_PWRDWN)) {
dev_dbg(hsotg->dev, "Activating transceiver\n");
/*
@@ -159,7 +159,7 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
* core configuration register.
*/
ggpio |= GGPIO_STM32_OTG_GCCFG_PWRDWN;
- dwc2_writel(ggpio, hsotg->regs + GGPIO);
+ dwc2_writel(hsotg, ggpio, GGPIO);
}
}
}
@@ -176,18 +176,18 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
/* Program GUSBCFG.OtgUtmiFsSel to I2C */
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
/* Program GI2CCTL.I2CEn */
- i2cctl = dwc2_readl(hsotg->regs + GI2CCTL);
+ i2cctl = dwc2_readl(hsotg, GI2CCTL);
i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
i2cctl &= ~GI2CCTL_I2CEN;
- dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
+ dwc2_writel(hsotg, i2cctl, GI2CCTL);
i2cctl |= GI2CCTL_I2CEN;
- dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
+ dwc2_writel(hsotg, i2cctl, GI2CCTL);
}
return retval;
@@ -201,7 +201,7 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
if (!select_phy)
return 0;
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg_old = usbcfg;
/*
@@ -236,7 +236,7 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
}
if (usbcfg != usbcfg_old) {
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
/* Reset after setting the PHY parameters */
retval = dwc2_core_reset(hsotg, false);
@@ -273,15 +273,15 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
hsotg->params.ulpi_fs_ls) {
dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg |= GUSBCFG_ULPI_FS_LS;
usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
} else {
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg &= ~GUSBCFG_ULPI_FS_LS;
usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
}
return retval;
@@ -289,7 +289,7 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
{
- u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
+ u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
switch (hsotg->hw_params.arch) {
case GHWCFG2_EXT_DMA_ARCH:
@@ -316,7 +316,7 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
else
hsotg->params.dma_desc_enable = false;
- dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
+ dwc2_writel(hsotg, ahbcfg, GAHBCFG);
return 0;
}
@@ -325,7 +325,7 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
{
u32 usbcfg;
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
switch (hsotg->hw_params.op_mode) {
@@ -353,7 +353,7 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
break;
}
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
}
static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg)
@@ -390,16 +390,16 @@ static void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s()\n", __func__);
/* Disable all interrupts */
- dwc2_writel(0, hsotg->regs + GINTMSK);
- dwc2_writel(0, hsotg->regs + HAINTMSK);
+ dwc2_writel(hsotg, 0, GINTMSK);
+ dwc2_writel(hsotg, 0, HAINTMSK);
/* Enable the common interrupts */
dwc2_enable_common_interrupts(hsotg);
/* Enable host mode interrupts without disturbing common interrupts */
- intmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ intmsk = dwc2_readl(hsotg, GINTMSK);
intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
- dwc2_writel(intmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, intmsk, GINTMSK);
}
/**
@@ -409,12 +409,12 @@ static void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
*/
static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
{
- u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ u32 intmsk = dwc2_readl(hsotg, GINTMSK);
/* Disable host mode interrupts without disturbing common interrupts */
intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
- dwc2_writel(intmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, intmsk, GINTMSK);
}
/*
@@ -494,37 +494,37 @@ static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
dwc2_calculate_dynamic_fifo(hsotg);
/* Rx FIFO */
- grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
+ grxfsiz = dwc2_readl(hsotg, GRXFSIZ);
dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
grxfsiz |= params->host_rx_fifo_size <<
GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
- dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ);
+ dwc2_writel(hsotg, grxfsiz, GRXFSIZ);
dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
- dwc2_readl(hsotg->regs + GRXFSIZ));
+ dwc2_readl(hsotg, GRXFSIZ));
/* Non-periodic Tx FIFO */
dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
- dwc2_readl(hsotg->regs + GNPTXFSIZ));
+ dwc2_readl(hsotg, GNPTXFSIZ));
nptxfsiz = params->host_nperio_tx_fifo_size <<
FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
nptxfsiz |= params->host_rx_fifo_size <<
FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
- dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
+ dwc2_writel(hsotg, nptxfsiz, GNPTXFSIZ);
dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
- dwc2_readl(hsotg->regs + GNPTXFSIZ));
+ dwc2_readl(hsotg, GNPTXFSIZ));
/* Periodic Tx FIFO */
dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
- dwc2_readl(hsotg->regs + HPTXFSIZ));
+ dwc2_readl(hsotg, HPTXFSIZ));
hptxfsiz = params->host_perio_tx_fifo_size <<
FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
hptxfsiz |= (params->host_rx_fifo_size +
params->host_nperio_tx_fifo_size) <<
FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
- dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
+ dwc2_writel(hsotg, hptxfsiz, HPTXFSIZ);
dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
- dwc2_readl(hsotg->regs + HPTXFSIZ));
+ dwc2_readl(hsotg, HPTXFSIZ));
if (hsotg->params.en_multiple_tx_fifo &&
hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_91a) {
@@ -533,14 +533,14 @@ static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
* Global DFIFOCFG calculation for Host mode -
* include RxFIFO, NPTXFIFO and HPTXFIFO
*/
- dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
+ dfifocfg = dwc2_readl(hsotg, GDFIFOCFG);
dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
dfifocfg |= (params->host_rx_fifo_size +
params->host_nperio_tx_fifo_size +
params->host_perio_tx_fifo_size) <<
GDFIFOCFG_EPINFOBASE_SHIFT &
GDFIFOCFG_EPINFOBASE_MASK;
- dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG);
+ dwc2_writel(hsotg, dfifocfg, GDFIFOCFG);
}
}
@@ -560,8 +560,8 @@ u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
u32 hprt0;
int clock = 60; /* default value */
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
- hprt0 = dwc2_readl(hsotg->regs + HPRT0);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+ hprt0 = dwc2_readl(hsotg, HPRT0);
if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
!(usbcfg & GUSBCFG_PHYIF16))
@@ -603,7 +603,6 @@ u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
*/
void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
{
- u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
u32 *data_buf = (u32 *)dest;
int word_count = (bytes + 3) / 4;
int i;
@@ -617,7 +616,7 @@ void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
for (i = 0; i < word_count; i++, data_buf++)
- *data_buf = dwc2_readl(fifo);
+ *data_buf = dwc2_readl(hsotg, HCFIFO(0));
}
/**
@@ -646,10 +645,10 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
if (!chan)
return;
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
- hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num));
- hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chan->hc_num));
- hc_dma = dwc2_readl(hsotg->regs + HCDMA(chan->hc_num));
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
+ hcsplt = dwc2_readl(hsotg, HCSPLT(chan->hc_num));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chan->hc_num));
+ hc_dma = dwc2_readl(hsotg, HCDMA(chan->hc_num));
dev_dbg(hsotg->dev, " Assigned to channel %p:\n", chan);
dev_dbg(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n",
@@ -797,7 +796,7 @@ static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
break;
}
- dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
+ dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
}
@@ -834,7 +833,7 @@ static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
}
}
- dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
+ dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
}
@@ -855,16 +854,16 @@ static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
}
/* Enable the top level host channel interrupt */
- intmsk = dwc2_readl(hsotg->regs + HAINTMSK);
+ intmsk = dwc2_readl(hsotg, HAINTMSK);
intmsk |= 1 << chan->hc_num;
- dwc2_writel(intmsk, hsotg->regs + HAINTMSK);
+ dwc2_writel(hsotg, intmsk, HAINTMSK);
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
/* Make sure host channel interrupts are enabled */
- intmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ intmsk = dwc2_readl(hsotg, GINTMSK);
intmsk |= GINTSTS_HCHINT;
- dwc2_writel(intmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, intmsk, GINTMSK);
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
}
@@ -893,7 +892,7 @@ static void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
/* Clear old interrupt conditions for this host channel */
hcintmsk = 0xffffffff;
hcintmsk &= ~HCINTMSK_RESERVED14_31;
- dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num));
+ dwc2_writel(hsotg, hcintmsk, HCINT(hc_num));
/* Enable channel interrupts required for this transfer */
dwc2_hc_enable_ints(hsotg, chan);
@@ -910,7 +909,7 @@ static void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
hcchar |= HCCHAR_LSPDDEV;
hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num));
+ dwc2_writel(hsotg, hcchar, HCCHAR(hc_num));
if (dbg_hc(chan)) {
dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
hc_num, hcchar);
@@ -964,7 +963,7 @@ static void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
}
}
- dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
+ dwc2_writel(hsotg, hcsplt, HCSPLT(hc_num));
}
/**
@@ -1034,14 +1033,14 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
u32 hcintmsk = HCINTMSK_CHHLTD;
dev_vdbg(hsotg->dev, "dequeue/error\n");
- dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
+ dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
/*
* Make sure no other interrupts besides halt are currently
* pending. Handling another interrupt could cause a crash due
* to the QTD and QH state.
*/
- dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
+ dwc2_writel(hsotg, ~hcintmsk, HCINT(chan->hc_num));
/*
* Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
@@ -1050,7 +1049,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
*/
chan->halt_status = halt_status;
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
if (!(hcchar & HCCHAR_CHENA)) {
/*
* The channel is either already halted or it hasn't
@@ -1078,7 +1077,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
return;
}
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
/* No need to set the bit in DDMA for disabling the channel */
/* TODO check it everywhere channel is disabled */
@@ -1101,7 +1100,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
chan->ep_type == USB_ENDPOINT_XFER_BULK) {
dev_vdbg(hsotg->dev, "control/bulk\n");
- nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS);
+ nptxsts = dwc2_readl(hsotg, GNPTXSTS);
if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
dev_vdbg(hsotg->dev, "Disabling channel\n");
hcchar &= ~HCCHAR_CHENA;
@@ -1109,7 +1108,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
} else {
if (dbg_perio())
dev_vdbg(hsotg->dev, "isoc/intr\n");
- hptxsts = dwc2_readl(hsotg->regs + HPTXSTS);
+ hptxsts = dwc2_readl(hsotg, HPTXSTS);
if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
hsotg->queuing_high_bandwidth) {
if (dbg_perio())
@@ -1122,7 +1121,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
dev_vdbg(hsotg->dev, "DMA enabled\n");
}
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
+ dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
chan->halt_status = halt_status;
if (hcchar & HCCHAR_CHENA) {
@@ -1171,10 +1170,10 @@ void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
* Clear channel interrupt enables and any unhandled channel interrupt
* conditions
*/
- dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
+ dwc2_writel(hsotg, 0, HCINTMSK(chan->hc_num));
hcintmsk = 0xffffffff;
hcintmsk &= ~HCINTMSK_RESERVED14_31;
- dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
+ dwc2_writel(hsotg, hcintmsk, HCINT(chan->hc_num));
}
/**
@@ -1228,7 +1227,7 @@ static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
!chan->do_split) ? chan->speed : USB_SPEED_HIGH;
/* See how many bytes are in the periodic FIFO right now */
- fifo_space = (dwc2_readl(hsotg->regs + HPTXSTS) &
+ fifo_space = (dwc2_readl(hsotg, HPTXSTS) &
TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
bytes_in_fifo = sizeof(u32) *
(hsotg->params.host_perio_tx_fifo_size -
@@ -1348,13 +1347,13 @@ static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
if (((unsigned long)data_buf & 0x3) == 0) {
/* xfer_buf is DWORD aligned */
for (i = 0; i < dword_count; i++, data_buf++)
- dwc2_writel(*data_buf, data_fifo);
+ dwc2_writel(hsotg, *data_buf, HCFIFO(chan->hc_num));
} else {
/* xfer_buf is not DWORD aligned */
for (i = 0; i < dword_count; i++, data_buf++) {
u32 data = data_buf[0] | data_buf[1] << 8 |
data_buf[2] << 16 | data_buf[3] << 24;
- dwc2_writel(data, data_fifo);
+ dwc2_writel(hsotg, data, HCFIFO(chan->hc_num));
}
}
@@ -1383,12 +1382,12 @@ static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
hctsiz = TSIZ_DOPNG;
hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
- dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
+ dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
hcchar |= HCCHAR_CHENA;
hcchar &= ~HCCHAR_CHDIS;
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
+ dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
}
/**
@@ -1548,7 +1547,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
TSIZ_SC_MC_PID_MASK;
- dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
+ dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
if (dbg_hc(chan)) {
dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
hctsiz, chan->hc_num);
@@ -1567,22 +1566,31 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
}
if (hsotg->params.host_dma) {
- dwc2_writel((u32)chan->xfer_dma,
- hsotg->regs + HCDMA(chan->hc_num));
+ dma_addr_t dma_addr;
+
+ if (chan->align_buf) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "align_buf\n");
+ dma_addr = chan->align_buf;
+ } else {
+ dma_addr = chan->xfer_dma;
+ }
+ dwc2_writel(hsotg, (u32)dma_addr, HCDMA(chan->hc_num));
+
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
- (unsigned long)chan->xfer_dma, chan->hc_num);
+ (unsigned long)dma_addr, chan->hc_num);
}
/* Start the split */
if (chan->do_split) {
- u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num));
+ u32 hcsplt = dwc2_readl(hsotg, HCSPLT(chan->hc_num));
hcsplt |= HCSPLT_SPLTENA;
- dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
+ dwc2_writel(hsotg, hcsplt, HCSPLT(chan->hc_num));
}
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
hcchar &= ~HCCHAR_MULTICNT_MASK;
hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
@@ -1601,7 +1609,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
(hcchar & HCCHAR_MULTICNT_MASK) >>
HCCHAR_MULTICNT_SHIFT);
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
+ dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
chan->hc_num);
@@ -1659,18 +1667,18 @@ void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
}
- dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
+ dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
chan->desc_list_sz, DMA_TO_DEVICE);
- dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num));
+ dwc2_writel(hsotg, chan->desc_list_addr, HCDMA(chan->hc_num));
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
&chan->desc_list_addr, chan->hc_num);
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+ hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
hcchar &= ~HCCHAR_MULTICNT_MASK;
hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
HCCHAR_MULTICNT_MASK;
@@ -1689,7 +1697,7 @@ void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
(hcchar & HCCHAR_MULTICNT_MASK) >>
HCCHAR_MULTICNT_SHIFT);
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
+ dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
chan->hc_num);
@@ -1746,7 +1754,7 @@ static int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
* transfer completes, the extra requests for the channel will
* be flushed.
*/
- u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+ u32 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
hcchar |= HCCHAR_CHENA;
@@ -1754,7 +1762,7 @@ static int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
hcchar);
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
+ dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
chan->requests++;
return 1;
}
@@ -1764,7 +1772,7 @@ static int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
if (chan->xfer_count < chan->xfer_len) {
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
- u32 hcchar = dwc2_readl(hsotg->regs +
+ u32 hcchar = dwc2_readl(hsotg,
HCCHAR(chan->hc_num));
dwc2_hc_set_even_odd_frame(hsotg, chan,
@@ -1878,7 +1886,7 @@ void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
*/
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_RST;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
}
queue_delayed_work(hsotg->wq_otg, &hsotg->start_work,
@@ -1899,11 +1907,11 @@ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
channel = hsotg->hc_ptr_array[i];
if (!list_empty(&channel->hc_list_entry))
continue;
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
+ hcchar = dwc2_readl(hsotg, HCCHAR(i));
if (hcchar & HCCHAR_CHENA) {
hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR);
hcchar |= HCCHAR_CHDIS;
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
+ dwc2_writel(hsotg, hcchar, HCCHAR(i));
}
}
}
@@ -1912,11 +1920,11 @@ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
channel = hsotg->hc_ptr_array[i];
if (!list_empty(&channel->hc_list_entry))
continue;
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
+ hcchar = dwc2_readl(hsotg, HCCHAR(i));
if (hcchar & HCCHAR_CHENA) {
/* Halt the channel */
hcchar |= HCCHAR_CHDIS;
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
+ dwc2_writel(hsotg, hcchar, HCCHAR(i));
}
dwc2_hc_cleanup(hsotg, channel);
@@ -1976,11 +1984,11 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force)
* interrupt mask and status bits and disabling subsequent host
* channel interrupts.
*/
- intr = dwc2_readl(hsotg->regs + GINTMSK);
+ intr = dwc2_readl(hsotg, GINTMSK);
intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT);
- dwc2_writel(intr, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, intr, GINTMSK);
intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT;
- dwc2_writel(intr, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, intr, GINTSTS);
/*
* Turn off the vbus power only if the core has transitioned to device
@@ -1990,7 +1998,7 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force)
if (dwc2_is_device_mode(hsotg)) {
if (hsotg->op_state != OTG_STATE_A_SUSPEND) {
dev_dbg(hsotg->dev, "Disconnect: PortPower off\n");
- dwc2_writel(0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, 0, HPRT0);
}
dwc2_disable_host_interrupts(hsotg);
@@ -2018,7 +2026,7 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force)
* and won't get any future interrupts to handle the connect.
*/
if (!force) {
- hprt0 = dwc2_readl(hsotg->regs + HPRT0);
+ hprt0 = dwc2_readl(hsotg, HPRT0);
if (!(hprt0 & HPRT0_CONNDET) && (hprt0 & HPRT0_CONNSTS))
dwc2_hcd_connect(hsotg);
}
@@ -2062,7 +2070,7 @@ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
/* Turn off the vbus power */
dev_dbg(hsotg->dev, "PortPower off\n");
- dwc2_writel(0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, 0, HPRT0);
}
/* Caller must hold driver lock */
@@ -2086,7 +2094,7 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
if ((dev_speed == USB_SPEED_LOW) &&
(hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) &&
(hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) {
- u32 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
+ u32 hprt0 = dwc2_readl(hsotg, HPRT0);
u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
if (prtspd == HPRT0_SPD_FULL_SPEED)
@@ -2105,7 +2113,7 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
return retval;
}
- intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
+ intr_mask = dwc2_readl(hsotg, GINTMSK);
if (!(intr_mask & GINTSTS_SOF)) {
enum dwc2_transaction_type tr_type;
@@ -2270,7 +2278,7 @@ int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
/* Set ULPI External VBUS bit if needed */
usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
@@ -2282,7 +2290,7 @@ int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
if (hsotg->params.ts_dline)
usbcfg |= GUSBCFG_TERMSELDLPULSE;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
/*
* Reset the Controller
@@ -2316,9 +2324,9 @@ int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
dwc2_gusbcfg_init(hsotg);
/* Program the GOTGCTL register */
- otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ otgctl = dwc2_readl(hsotg, GOTGCTL);
otgctl &= ~GOTGCTL_OTGVER;
- dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
+ dwc2_writel(hsotg, otgctl, GOTGCTL);
/* Clear the SRP success bit for FS-I2c */
hsotg->srp_success = 0;
@@ -2365,20 +2373,20 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
* introduced by the PHY in generating the linestate condition
* can vary from one PHY to another.
*/
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg |= GUSBCFG_TOUTCAL(7);
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
/* Restart the Phy Clock */
- dwc2_writel(0, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, 0, PCGCTL);
/* Initialize Host Configuration Register */
dwc2_init_fs_ls_pclk_sel(hsotg);
if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
hsotg->params.speed == DWC2_SPEED_PARAM_LOW) {
- hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg = dwc2_readl(hsotg, HCFG);
hcfg |= HCFG_FSLSSUPP;
- dwc2_writel(hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hsotg, hcfg, HCFG);
}
/*
@@ -2387,9 +2395,9 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
* and its value must not be changed during runtime.
*/
if (hsotg->params.reload_ctl) {
- hfir = dwc2_readl(hsotg->regs + HFIR);
+ hfir = dwc2_readl(hsotg, HFIR);
hfir |= HFIR_RLDCTRL;
- dwc2_writel(hfir, hsotg->regs + HFIR);
+ dwc2_writel(hsotg, hfir, HFIR);
}
if (hsotg->params.dma_desc_enable) {
@@ -2406,9 +2414,9 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
"falling back to buffer DMA mode.\n");
hsotg->params.dma_desc_enable = false;
} else {
- hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg = dwc2_readl(hsotg, HCFG);
hcfg |= HCFG_DESCDMA;
- dwc2_writel(hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hsotg, hcfg, HCFG);
}
}
@@ -2417,18 +2425,18 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
/* TODO - check this */
/* Clear Host Set HNP Enable in the OTG Control Register */
- otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ otgctl = dwc2_readl(hsotg, GOTGCTL);
otgctl &= ~GOTGCTL_HSTSETHNPEN;
- dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
+ dwc2_writel(hsotg, otgctl, GOTGCTL);
/* Make sure the FIFOs are flushed */
dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
dwc2_flush_rx_fifo(hsotg);
/* Clear Host Set HNP Enable in the OTG Control Register */
- otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ otgctl = dwc2_readl(hsotg, GOTGCTL);
otgctl &= ~GOTGCTL_HSTSETHNPEN;
- dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
+ dwc2_writel(hsotg, otgctl, GOTGCTL);
if (!hsotg->params.dma_desc_enable) {
int num_channels, i;
@@ -2437,19 +2445,19 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
/* Flush out any leftover queued requests */
num_channels = hsotg->params.host_channels;
for (i = 0; i < num_channels; i++) {
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
+ hcchar = dwc2_readl(hsotg, HCCHAR(i));
hcchar &= ~HCCHAR_CHENA;
hcchar |= HCCHAR_CHDIS;
hcchar &= ~HCCHAR_EPDIR;
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
+ dwc2_writel(hsotg, hcchar, HCCHAR(i));
}
/* Halt all channels to put them into a known state */
for (i = 0; i < num_channels; i++) {
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
+ hcchar = dwc2_readl(hsotg, HCCHAR(i));
hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
hcchar &= ~HCCHAR_EPDIR;
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
+ dwc2_writel(hsotg, hcchar, HCCHAR(i));
dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
__func__, i);
@@ -2473,7 +2481,7 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
!!(hprt0 & HPRT0_PWR));
if (!(hprt0 & HPRT0_PWR)) {
hprt0 |= HPRT0_PWR;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
}
}
@@ -2625,36 +2633,66 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
}
}
-#define DWC2_USB_DMA_ALIGN 4
+static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh,
+ struct dwc2_host_chan *chan)
+{
+ if (!hsotg->unaligned_cache ||
+ chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
+ return -ENOMEM;
-struct dma_aligned_buffer {
- void *kmalloc_ptr;
- void *old_xfer_buffer;
- u8 data[0];
-};
+ if (!qh->dw_align_buf) {
+ qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
+ GFP_ATOMIC | GFP_DMA);
+ if (!qh->dw_align_buf)
+ return -ENOMEM;
+ }
+
+ qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
+ DWC2_KMEM_UNALIGNED_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
+ dev_err(hsotg->dev, "can't map align_buf\n");
+ chan->align_buf = 0;
+ return -EINVAL;
+ }
+
+ chan->align_buf = qh->dw_align_buf_dma;
+ return 0;
+}
+
+#define DWC2_USB_DMA_ALIGN 4
static void dwc2_free_dma_aligned_buffer(struct urb *urb)
{
- struct dma_aligned_buffer *temp;
+ void *stored_xfer_buffer;
+ size_t length;
if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
return;
- temp = container_of(urb->transfer_buffer,
- struct dma_aligned_buffer, data);
+ /* Restore urb->transfer_buffer from the end of the allocated area */
+ memcpy(&stored_xfer_buffer, urb->transfer_buffer +
+ urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
- if (usb_urb_dir_in(urb))
- memcpy(temp->old_xfer_buffer, temp->data,
- urb->transfer_buffer_length);
- urb->transfer_buffer = temp->old_xfer_buffer;
- kfree(temp->kmalloc_ptr);
+ if (usb_urb_dir_in(urb)) {
+ if (usb_pipeisoc(urb->pipe))
+ length = urb->transfer_buffer_length;
+ else
+ length = urb->actual_length;
+
+ memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
+ }
+ kfree(urb->transfer_buffer);
+ urb->transfer_buffer = stored_xfer_buffer;
urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
}
static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
{
- struct dma_aligned_buffer *temp, *kmalloc_ptr;
+ void *kmalloc_ptr;
size_t kmalloc_size;
if (urb->num_sgs || urb->sg ||
@@ -2662,22 +2700,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
!((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
return 0;
- /* Allocate a buffer with enough padding for alignment */
+ /*
+ * Allocate a buffer with enough padding for original transfer_buffer
+ * pointer. This allocation is guaranteed to be aligned properly for
+ * DMA
+ */
kmalloc_size = urb->transfer_buffer_length +
- sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1;
+ sizeof(urb->transfer_buffer);
kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
if (!kmalloc_ptr)
return -ENOMEM;
- /* Position our struct dma_aligned_buffer such that data is aligned */
- temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1;
- temp->kmalloc_ptr = kmalloc_ptr;
- temp->old_xfer_buffer = urb->transfer_buffer;
+ /*
+ * Position value of original urb->transfer_buffer pointer to the end
+ * of allocation for later referencing
+ */
+ memcpy(kmalloc_ptr + urb->transfer_buffer_length,
+ &urb->transfer_buffer, sizeof(urb->transfer_buffer));
+
if (usb_urb_dir_out(urb))
- memcpy(temp->data, urb->transfer_buffer,
+ memcpy(kmalloc_ptr, urb->transfer_buffer,
urb->transfer_buffer_length);
- urb->transfer_buffer = temp->data;
+ urb->transfer_buffer = kmalloc_ptr;
urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
@@ -2802,6 +2847,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
/* Set the transfer attributes */
dwc2_hc_init_xfer(hsotg, chan, qtd);
+ /* For non-dword aligned buffers */
+ if (hsotg->params.host_dma && qh->do_split &&
+ chan->ep_is_in && (chan->xfer_dma & 0x3)) {
+ dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
+ if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
+ dev_err(hsotg->dev,
+ "Failed to allocate memory to handle non-aligned buffer\n");
+ /* Add channel back to free list */
+ chan->align_buf = 0;
+ chan->multi_count = 0;
+ list_add_tail(&chan->hc_list_entry,
+ &hsotg->free_hc_list);
+ qtd->in_process = 0;
+ qh->channel = NULL;
+ return -ENOMEM;
+ }
+ } else {
+ /*
+ * We assume that DMA is always aligned in non-split
+ * case or split out case. Warn if not.
+ */
+ WARN_ON_ONCE(hsotg->params.host_dma &&
+ (chan->xfer_dma & 0x3));
+ chan->align_buf = 0;
+ }
+
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC)
/*
@@ -3012,7 +3083,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
if (dbg_perio())
dev_vdbg(hsotg->dev, "Queue periodic transactions\n");
- tx_status = dwc2_readl(hsotg->regs + HPTXSTS);
+ tx_status = dwc2_readl(hsotg, HPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
@@ -3027,7 +3098,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
qh_ptr = hsotg->periodic_sched_assigned.next;
while (qh_ptr != &hsotg->periodic_sched_assigned) {
- tx_status = dwc2_readl(hsotg->regs + HPTXSTS);
+ tx_status = dwc2_readl(hsotg, HPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
if (qspcavail == 0) {
@@ -3097,10 +3168,10 @@ exit:
* level to ensure that new requests are loaded as
* soon as possible.)
*/
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
if (!(gintmsk & GINTSTS_PTXFEMP)) {
gintmsk |= GINTSTS_PTXFEMP;
- dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
}
} else {
/*
@@ -3110,10 +3181,10 @@ exit:
* handlers to queue more transactions as transfer
* states change.
*/
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
if (gintmsk & GINTSTS_PTXFEMP) {
gintmsk &= ~GINTSTS_PTXFEMP;
- dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
}
}
}
@@ -3142,7 +3213,7 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n");
- tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
+ tx_status = dwc2_readl(hsotg, GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
@@ -3165,7 +3236,7 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
* available in the request queue or the Tx FIFO
*/
do {
- tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
+ tx_status = dwc2_readl(hsotg, GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
if (!hsotg->params.host_dma && qspcavail == 0) {
@@ -3202,7 +3273,7 @@ next:
} while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
if (!hsotg->params.host_dma) {
- tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
+ tx_status = dwc2_readl(hsotg, GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
@@ -3222,9 +3293,9 @@ next:
* level to ensure that new requests are loaded as
* soon as possible.)
*/
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk |= GINTSTS_NPTXFEMP;
- dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
} else {
/*
* Disable the Tx FIFO empty interrupt since there are
@@ -3233,9 +3304,9 @@ next:
* handlers to queue more transactions as transfer
* states change.
*/
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_NPTXFEMP;
- dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
}
}
}
@@ -3272,10 +3343,10 @@ void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
* Ensure NP Tx FIFO empty interrupt is disabled when
* there are no non-periodic transfers to process
*/
- u32 gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ u32 gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk &= ~GINTSTS_NPTXFEMP;
- dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
}
}
}
@@ -3290,7 +3361,7 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
dev_dbg(hsotg->dev, "%s()\n", __func__);
- gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl);
dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n",
!!(gotgctl & GOTGCTL_CONID_B));
@@ -3316,7 +3387,7 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
* check it again and jump to host mode if that was
* the case.
*/
- gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
if (!(gotgctl & GOTGCTL_CONID_B))
goto host;
if (++count > 250)
@@ -3376,9 +3447,9 @@ static void dwc2_wakeup_detected(struct timer_list *t)
hprt0 = dwc2_read_hprt0(hsotg);
dev_dbg(hsotg->dev, "Resume: HPRT0=%0x\n", hprt0);
hprt0 &= ~HPRT0_RES;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
- dwc2_readl(hsotg->regs + HPRT0));
+ dwc2_readl(hsotg, HPRT0));
dwc2_hcd_rem_wakeup(hsotg);
hsotg->bus_suspended = false;
@@ -3407,15 +3478,15 @@ static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
spin_lock_irqsave(&hsotg->lock, flags);
if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) {
- gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
gotgctl |= GOTGCTL_HSTSETHNPEN;
- dwc2_writel(gotgctl, hsotg->regs + GOTGCTL);
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
hsotg->op_state = OTG_STATE_A_SUSPEND;
}
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_SUSP;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
hsotg->bus_suspended = true;
@@ -3425,17 +3496,17 @@ static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
*/
if (!hsotg->params.power_down) {
/* Suspend the Phy Clock */
- pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl |= PCGCTL_STOPPCLK;
- dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
udelay(10);
}
/* For HNP the bus must be suspended for at least 200ms */
if (dwc2_host_is_b_hnp_enabled(hsotg)) {
- pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl &= ~PCGCTL_STOPPCLK;
- dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
spin_unlock_irqrestore(&hsotg->lock, flags);
@@ -3459,9 +3530,9 @@ static void dwc2_port_resume(struct dwc2_hsotg *hsotg)
* after registers restore.
*/
if (!hsotg->params.power_down) {
- pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl &= ~PCGCTL_STOPPCLK;
- dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
spin_unlock_irqrestore(&hsotg->lock, flags);
msleep(20);
spin_lock_irqsave(&hsotg->lock, flags);
@@ -3470,7 +3541,7 @@ static void dwc2_port_resume(struct dwc2_hsotg *hsotg)
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_RES;
hprt0 &= ~HPRT0_SUSP;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
spin_unlock_irqrestore(&hsotg->lock, flags);
msleep(USB_RESUME_TIMEOUT);
@@ -3478,7 +3549,7 @@ static void dwc2_port_resume(struct dwc2_hsotg *hsotg)
spin_lock_irqsave(&hsotg->lock, flags);
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 &= ~(HPRT0_RES | HPRT0_SUSP);
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
hsotg->bus_suspended = false;
spin_unlock_irqrestore(&hsotg->lock, flags);
}
@@ -3522,7 +3593,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
"ClearPortFeature USB_PORT_FEAT_ENABLE\n");
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_ENA;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
break;
case USB_PORT_FEAT_SUSPEND:
@@ -3542,7 +3613,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
"ClearPortFeature USB_PORT_FEAT_POWER\n");
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 &= ~HPRT0_PWR;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
break;
case USB_PORT_FEAT_INDICATOR:
@@ -3663,7 +3734,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
break;
}
- hprt0 = dwc2_readl(hsotg->regs + HPRT0);
+ hprt0 = dwc2_readl(hsotg, HPRT0);
dev_vdbg(hsotg->dev, " HPRT0: 0x%08x\n", hprt0);
if (hprt0 & HPRT0_CONNSTS)
@@ -3704,9 +3775,9 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_info(hsotg->dev, "Enabling descriptor DMA mode\n");
hsotg->params.dma_desc_enable = true;
- hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg = dwc2_readl(hsotg, HCFG);
hcfg |= HCFG_DESCDMA;
- dwc2_writel(hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hsotg, hcfg, HCFG);
hsotg->new_connection = false;
}
}
@@ -3753,7 +3824,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
"SetPortFeature - USB_PORT_FEAT_POWER\n");
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_PWR;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
break;
case USB_PORT_FEAT_RESET:
@@ -3763,11 +3834,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
hprt0 = dwc2_read_hprt0(hsotg);
dev_dbg(hsotg->dev,
"SetPortFeature - USB_PORT_FEAT_RESET\n");
- pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK);
- dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
/* ??? Original driver does this */
- dwc2_writel(0, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, 0, PCGCTL);
hprt0 = dwc2_read_hprt0(hsotg);
/* Clear suspend bit if resetting from suspend state */
@@ -3782,13 +3853,13 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
hprt0 |= HPRT0_PWR | HPRT0_RST;
dev_dbg(hsotg->dev,
"In host mode, hprt0=%08x\n", hprt0);
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
}
/* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
msleep(50);
hprt0 &= ~HPRT0_RST;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
hsotg->lx_state = DWC2_L0; /* Now back to On state */
break;
@@ -3804,7 +3875,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
"SetPortFeature - USB_PORT_FEAT_TEST\n");
hprt0 &= ~HPRT0_TSTCTL_MASK;
hprt0 |= (windex >> 8) << HPRT0_TSTCTL_SHIFT;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
break;
default:
@@ -3861,7 +3932,7 @@ static int dwc2_hcd_is_status_changed(struct dwc2_hsotg *hsotg, int port)
int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
{
- u32 hfnum = dwc2_readl(hsotg->regs + HFNUM);
+ u32 hfnum = dwc2_readl(hsotg, HFNUM);
#ifdef DWC2_DEBUG_SOF
dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n",
@@ -3872,9 +3943,9 @@ int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us)
{
- u32 hprt = dwc2_readl(hsotg->regs + HPRT0);
- u32 hfir = dwc2_readl(hsotg->regs + HFIR);
- u32 hfnum = dwc2_readl(hsotg->regs + HFNUM);
+ u32 hprt = dwc2_readl(hsotg, HPRT0);
+ u32 hfir = dwc2_readl(hsotg, HFIR);
+ u32 hfnum = dwc2_readl(hsotg, HFNUM);
unsigned int us_per_frame;
unsigned int frame_number;
unsigned int remaining;
@@ -3993,11 +4064,11 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
if (chan->xfer_started) {
u32 hfnum, hcchar, hctsiz, hcint, hcintmsk;
- hfnum = dwc2_readl(hsotg->regs + HFNUM);
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
- hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(i));
- hcint = dwc2_readl(hsotg->regs + HCINT(i));
- hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(i));
+ hfnum = dwc2_readl(hsotg, HFNUM);
+ hcchar = dwc2_readl(hsotg, HCCHAR(i));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(i));
+ hcint = dwc2_readl(hsotg, HCINT(i));
+ hcintmsk = dwc2_readl(hsotg, HCINTMSK(i));
dev_dbg(hsotg->dev, " hfnum: 0x%08x\n", hfnum);
dev_dbg(hsotg->dev, " hcchar: 0x%08x\n", hcchar);
dev_dbg(hsotg->dev, " hctsiz: 0x%08x\n", hctsiz);
@@ -4045,12 +4116,12 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, " periodic_channels: %d\n",
hsotg->periodic_channels);
dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs);
- np_tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
+ np_tx_status = dwc2_readl(hsotg, GNPTXSTS);
dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n",
(np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n",
(np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
- p_tx_status = dwc2_readl(hsotg->regs + HPTXSTS);
+ p_tx_status = dwc2_readl(hsotg, HPTXSTS);
dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n",
(p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n",
@@ -4300,7 +4371,7 @@ static void dwc2_hcd_reset_func(struct work_struct *work)
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 &= ~HPRT0_RST;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
hsotg->flags.b.port_reset_change = 1;
spin_unlock_irqrestore(&hsotg->lock, flags);
@@ -4410,7 +4481,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_SUSP;
hprt0 &= ~HPRT0_PWR;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
dwc2_vbus_supply_exit(hsotg);
}
@@ -4501,8 +4572,8 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd)
* Clear Port Enable and Port Status changes.
* Enable Port Power.
*/
- dwc2_writel(HPRT0_PWR | HPRT0_CONNDET |
- HPRT0_ENACHG, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, HPRT0_PWR | HPRT0_CONNDET |
+ HPRT0_ENACHG, HPRT0);
/* Wait for controller to detect Port Connect */
usleep_range(5000, 7000);
}
@@ -5022,17 +5093,17 @@ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
hsotg->status_buf = NULL;
}
- ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
+ ahbcfg = dwc2_readl(hsotg, GAHBCFG);
/* Disable all interrupts */
ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
- dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
- dwc2_writel(0, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, ahbcfg, GAHBCFG);
+ dwc2_writel(hsotg, 0, GINTMSK);
if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a) {
- dctl = dwc2_readl(hsotg->regs + DCTL);
+ dctl = dwc2_readl(hsotg, DCTL);
dctl |= DCTL_SFTDISCON;
- dwc2_writel(dctl, hsotg->regs + DCTL);
+ dwc2_writel(hsotg, dctl, DCTL);
}
if (hsotg->wq_otg) {
@@ -5075,7 +5146,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
retval = -ENOMEM;
- hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg = dwc2_readl(hsotg, HCFG);
dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg);
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
@@ -5246,6 +5317,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
}
}
+ if (hsotg->params.host_dma) {
+ /*
+ * Create kmem caches to handle non-aligned buffer
+ * in Buffer DMA mode.
+ */
+ hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
+ DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
+ SLAB_CACHE_DMA, NULL);
+ if (!hsotg->unaligned_cache)
+ dev_err(hsotg->dev,
+ "unable to create dwc2 unaligned cache\n");
+ }
+
hsotg->otg_port = 1;
hsotg->frame_list = NULL;
hsotg->frame_list_dma = 0;
@@ -5280,8 +5364,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
return 0;
error4:
- kmem_cache_destroy(hsotg->desc_gen_cache);
+ kmem_cache_destroy(hsotg->unaligned_cache);
kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+ kmem_cache_destroy(hsotg->desc_gen_cache);
error3:
dwc2_hcd_release(hsotg);
error2:
@@ -5322,8 +5407,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
usb_remove_hcd(hcd);
hsotg->priv = NULL;
- kmem_cache_destroy(hsotg->desc_gen_cache);
+ kmem_cache_destroy(hsotg->unaligned_cache);
kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+ kmem_cache_destroy(hsotg->desc_gen_cache);
dwc2_hcd_release(hsotg);
usb_put_hcd(hcd);
@@ -5350,14 +5436,14 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
/* Backup Host regs */
hr = &hsotg->hr_backup;
- hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
- hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
+ hr->hcfg = dwc2_readl(hsotg, HCFG);
+ hr->haintmsk = dwc2_readl(hsotg, HAINTMSK);
for (i = 0; i < hsotg->params.host_channels; ++i)
- hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
+ hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i));
hr->hprt0 = dwc2_read_hprt0(hsotg);
- hr->hfir = dwc2_readl(hsotg->regs + HFIR);
- hr->hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
+ hr->hfir = dwc2_readl(hsotg, HFIR);
+ hr->hptxfsiz = dwc2_readl(hsotg, HPTXFSIZ);
hr->valid = true;
return 0;
@@ -5386,15 +5472,15 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
}
hr->valid = false;
- dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
- dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
+ dwc2_writel(hsotg, hr->hcfg, HCFG);
+ dwc2_writel(hsotg, hr->haintmsk, HAINTMSK);
for (i = 0; i < hsotg->params.host_channels; ++i)
- dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
+ dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i));
- dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
- dwc2_writel(hr->hfir, hsotg->regs + HFIR);
- dwc2_writel(hr->hptxfsiz, hsotg->regs + HPTXFSIZ);
+ dwc2_writel(hsotg, hr->hprt0, HPRT0);
+ dwc2_writel(hsotg, hr->hfir, HFIR);
+ dwc2_writel(hsotg, hr->hptxfsiz, HPTXFSIZ);
hsotg->frame_number = 0;
return 0;
@@ -5429,13 +5515,13 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
}
/* Enter USB Suspend Mode */
- hprt0 = dwc2_readl(hsotg->regs + HPRT0);
+ hprt0 = dwc2_readl(hsotg, HPRT0);
hprt0 |= HPRT0_SUSP;
hprt0 &= ~HPRT0_ENA;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
/* Wait for the HPRT0.PrtSusp register field to be set */
- if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 300))
+ if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
dev_warn(hsotg->dev, "Suspend wasn't generated\n");
/*
@@ -5445,56 +5531,56 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
spin_lock_irqsave(&hsotg->lock, flags);
hsotg->lx_state = DWC2_L2;
- gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ gusbcfg = dwc2_readl(hsotg, GUSBCFG);
if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) {
/* ULPI interface */
/* Suspend the Phy Clock */
- pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl |= PCGCTL_STOPPCLK;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(10);
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PMUACTV;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
} else {
/* UTMI+ Interface */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PMUACTV;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
- pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
pcgcctl |= PCGCTL_STOPPCLK;
- dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
udelay(10);
}
/* Enable interrupts from wake up logic */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PMUINTSEL;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Unmask host mode interrupts in GPWRDN */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_DISCONN_DET_MSK;
gpwrdn |= GPWRDN_LNSTSCHG_MSK;
gpwrdn |= GPWRDN_STS_CHGINT_MSK;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Enable Power Down Clamp */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PWRDNCLMP;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Switch off VDD */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn |= GPWRDN_PWRDNSWTCH;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
hsotg->hibernated = 1;
hsotg->bus_suspended = 1;
@@ -5542,29 +5628,29 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
mdelay(100);
/* Clear all pending interupts */
- dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* De-assert Restore */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_RESTORE;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
/* Restore GUSBCFG, HCFG */
- dwc2_writel(gr->gusbcfg, hsotg->regs + GUSBCFG);
- dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
+ dwc2_writel(hsotg, hr->hcfg, HCFG);
/* De-assert Wakeup Logic */
- gpwrdn = dwc2_readl(hsotg->regs + GPWRDN);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
gpwrdn &= ~GPWRDN_PMUACTV;
- dwc2_writel(gpwrdn, hsotg->regs + GPWRDN);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
udelay(10);
hprt0 = hr->hprt0;
hprt0 |= HPRT0_PWR;
hprt0 &= ~HPRT0_ENA;
hprt0 &= ~HPRT0_SUSP;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
hprt0 = hr->hprt0;
hprt0 |= HPRT0_PWR;
@@ -5573,32 +5659,32 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
if (reset) {
hprt0 |= HPRT0_RST;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
/* Wait for Resume time and then program HPRT again */
mdelay(60);
hprt0 &= ~HPRT0_RST;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
} else {
hprt0 |= HPRT0_RES;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
/* Wait for Resume time and then program HPRT again */
mdelay(100);
hprt0 &= ~HPRT0_RES;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
}
/* Clear all interrupt status */
- hprt0 = dwc2_readl(hsotg->regs + HPRT0);
+ hprt0 = dwc2_readl(hsotg, HPRT0);
hprt0 |= HPRT0_CONNDET;
hprt0 |= HPRT0_ENACHG;
hprt0 &= ~HPRT0_ENA;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0, HPRT0);
- hprt0 = dwc2_readl(hsotg->regs + HPRT0);
+ hprt0 = dwc2_readl(hsotg, HPRT0);
/* Clear all pending interupts */
- dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Restore global registers */
ret = dwc2_restore_global_registers(hsotg);
@@ -5616,6 +5702,8 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
return ret;
}
+ dwc2_hcd_rem_wakeup(hsotg);
+
hsotg->hibernated = 0;
hsotg->bus_suspended = 0;
hsotg->lx_state = DWC2_L0;
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 7db1ee7e7a77..3f9bccc95add 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -76,6 +76,8 @@ struct dwc2_qh;
* (micro)frame
* @xfer_buf: Pointer to current transfer buffer position
* @xfer_dma: DMA address of xfer_buf
+ * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not
+ * DWORD aligned
* @xfer_len: Total number of bytes to transfer
* @xfer_count: Number of bytes transferred so far
* @start_pkt_count: Packet count at start of transfer
@@ -133,6 +135,7 @@ struct dwc2_host_chan {
u8 *xfer_buf;
dma_addr_t xfer_dma;
+ dma_addr_t align_buf;
u32 xfer_len;
u32 xfer_count;
u16 start_pkt_count;
@@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time {
* speed. Note that this is in "schedule slice" which
* is tightly packed.
* @ntd: Actual number of transfer descriptors in a list
+ * @dw_align_buf: Used instead of original buffer if its physical address
+ * is not dword-aligned
+ * @dw_align_buf_dma: DMA address for dw_align_buf
* @qtd_list: List of QTDs for this QH
* @channel: Host channel currently processing transfers for this QH
* @qh_list_entry: Entry for QH in either the periodic or non-periodic
@@ -350,6 +356,8 @@ struct dwc2_qh {
struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
u32 ls_start_schedule_slice;
u16 ntd;
+ u8 *dw_align_buf;
+ dma_addr_t dw_align_buf_dma;
struct list_head qtd_list;
struct dwc2_host_chan *channel;
struct list_head qh_list_entry;
@@ -461,10 +469,10 @@ static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg)
*/
static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr)
{
- u32 mask = dwc2_readl(hsotg->regs + HCINTMSK(chnum));
+ u32 mask = dwc2_readl(hsotg, HCINTMSK(chnum));
mask &= ~intr;
- dwc2_writel(mask, hsotg->regs + HCINTMSK(chnum));
+ dwc2_writel(hsotg, mask, HCINTMSK(chnum));
}
void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan);
@@ -479,7 +487,7 @@ void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
*/
static inline u32 dwc2_read_hprt0(struct dwc2_hsotg *hsotg)
{
- u32 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
+ u32 hprt0 = dwc2_readl(hsotg, HPRT0);
hprt0 &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG | HPRT0_OVRCURRCHG);
return hprt0;
@@ -682,8 +690,8 @@ static inline u16 dwc2_micro_frame_num(u16 frame)
*/
static inline u32 dwc2_read_core_intr(struct dwc2_hsotg *hsotg)
{
- return dwc2_readl(hsotg->regs + GINTSTS) &
- dwc2_readl(hsotg->regs + GINTMSK);
+ return dwc2_readl(hsotg, GINTSTS) &
+ dwc2_readl(hsotg, GINTMSK);
}
static inline u32 dwc2_hcd_urb_get_status(struct dwc2_hcd_urb *dwc2_urb)
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 74f11c823f79..a858b5f9c1d6 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -185,19 +185,19 @@ static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
spin_lock_irqsave(&hsotg->lock, flags);
- hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg = dwc2_readl(hsotg, HCFG);
if (hcfg & HCFG_PERSCHEDENA) {
/* already enabled */
spin_unlock_irqrestore(&hsotg->lock, flags);
return;
}
- dwc2_writel(hsotg->frame_list_dma, hsotg->regs + HFLBADDR);
+ dwc2_writel(hsotg, hsotg->frame_list_dma, HFLBADDR);
hcfg &= ~HCFG_FRLISTEN_MASK;
hcfg |= fr_list_en | HCFG_PERSCHEDENA;
dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
- dwc2_writel(hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hsotg, hcfg, HCFG);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
@@ -209,7 +209,7 @@ static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
spin_lock_irqsave(&hsotg->lock, flags);
- hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg = dwc2_readl(hsotg, HCFG);
if (!(hcfg & HCFG_PERSCHEDENA)) {
/* already disabled */
spin_unlock_irqrestore(&hsotg->lock, flags);
@@ -218,7 +218,7 @@ static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
hcfg &= ~HCFG_PERSCHEDENA;
dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
- dwc2_writel(hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hsotg, hcfg, HCFG);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index fbea5e3fb947..88b5dcf3aefc 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -144,7 +144,7 @@ static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
enum dwc2_transaction_type tr_type;
/* Clear interrupt */
- dwc2_writel(GINTSTS_SOF, hsotg->regs + GINTSTS);
+ dwc2_writel(hsotg, GINTSTS_SOF, GINTSTS);
#ifdef DEBUG_SOF
dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
@@ -191,7 +191,7 @@ static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
if (dbg_perio())
dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
- grxsts = dwc2_readl(hsotg->regs + GRXSTSP);
+ grxsts = dwc2_readl(hsotg, GRXSTSP);
chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
chan = hsotg->hc_ptr_array[chnum];
if (!chan) {
@@ -274,11 +274,11 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
/* Every time when port enables calculate HFIR.FrInterval */
- hfir = dwc2_readl(hsotg->regs + HFIR);
+ hfir = dwc2_readl(hsotg, HFIR);
hfir &= ~HFIR_FRINT_MASK;
hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
HFIR_FRINT_MASK;
- dwc2_writel(hfir, hsotg->regs + HFIR);
+ dwc2_writel(hsotg, hfir, HFIR);
/* Check if we need to adjust the PHY clock speed for low power */
if (!params->host_support_fs_ls_low_power) {
@@ -287,7 +287,7 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
return;
}
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
@@ -295,11 +295,11 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
/* Set PHY low power clock select for FS/LS devices */
usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
do_reset = 1;
}
- hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg = dwc2_readl(hsotg, HCFG);
fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
HCFG_FSLSPCLKSEL_SHIFT;
@@ -312,7 +312,7 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
- dwc2_writel(hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hsotg, hcfg, HCFG);
do_reset = 1;
}
} else {
@@ -323,7 +323,7 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
- dwc2_writel(hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hsotg, hcfg, HCFG);
do_reset = 1;
}
}
@@ -331,14 +331,14 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
/* Not low power */
if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
do_reset = 1;
}
}
if (do_reset) {
*hprt0_modify |= HPRT0_RST;
- dwc2_writel(*hprt0_modify, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, *hprt0_modify, HPRT0);
queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
msecs_to_jiffies(60));
} else {
@@ -359,7 +359,7 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
- hprt0 = dwc2_readl(hsotg->regs + HPRT0);
+ hprt0 = dwc2_readl(hsotg, HPRT0);
hprt0_modify = hprt0;
/*
@@ -374,7 +374,7 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
* Set flag and clear if detected
*/
if (hprt0 & HPRT0_CONNDET) {
- dwc2_writel(hprt0_modify | HPRT0_CONNDET, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0_modify | HPRT0_CONNDET, HPRT0);
dev_vdbg(hsotg->dev,
"--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
@@ -392,7 +392,7 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
* Clear if detected - Set internal flag if disabled
*/
if (hprt0 & HPRT0_ENACHG) {
- dwc2_writel(hprt0_modify | HPRT0_ENACHG, hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0_modify | HPRT0_ENACHG, HPRT0);
dev_vdbg(hsotg->dev,
" --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
hprt0, !!(hprt0 & HPRT0_ENA));
@@ -406,17 +406,17 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
hsotg->params.dma_desc_enable = false;
hsotg->new_connection = false;
- hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg = dwc2_readl(hsotg, HCFG);
hcfg &= ~HCFG_DESCDMA;
- dwc2_writel(hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hsotg, hcfg, HCFG);
}
}
}
/* Overcurrent Change Interrupt */
if (hprt0 & HPRT0_OVRCURRCHG) {
- dwc2_writel(hprt0_modify | HPRT0_OVRCURRCHG,
- hsotg->regs + HPRT0);
+ dwc2_writel(hsotg, hprt0_modify | HPRT0_OVRCURRCHG,
+ HPRT0);
dev_vdbg(hsotg->dev,
" --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
hprt0);
@@ -441,7 +441,7 @@ static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
{
u32 hctsiz, count, length;
- hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
if (halt_status == DWC2_HC_XFER_COMPLETE) {
if (chan->ep_is_in) {
@@ -518,7 +518,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
urb->status = 0;
}
- hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
__func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
dev_vdbg(hsotg->dev, " chan->xfer_len %d\n", chan->xfer_len);
@@ -541,7 +541,7 @@ void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
struct dwc2_qtd *qtd)
{
- u32 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
+ u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
@@ -780,9 +780,9 @@ cleanup:
}
}
- haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
+ haintmsk = dwc2_readl(hsotg, HAINTMSK);
haintmsk &= ~(1 << chan->hc_num);
- dwc2_writel(haintmsk, hsotg->regs + HAINTMSK);
+ dwc2_writel(hsotg, haintmsk, HAINTMSK);
/* Try to queue more transfers now that there's a free channel */
tr_type = dwc2_hcd_select_transactions(hsotg);
@@ -829,9 +829,9 @@ static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
* is enabled so that the non-periodic schedule will
* be processed
*/
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk |= GINTSTS_NPTXFEMP;
- dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
} else {
dev_vdbg(hsotg->dev, "isoc/intr\n");
/*
@@ -848,9 +848,9 @@ static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
* enabled so that the periodic schedule will be
* processed
*/
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
gintmsk |= GINTSTS_PTXFEMP;
- dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
}
}
}
@@ -915,7 +915,7 @@ static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd,
enum dwc2_halt_status halt_status)
{
- u32 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
+ u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
qtd->error_count = 0;
@@ -942,17 +942,24 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
DWC2_HC_XFER_COMPLETE, NULL);
- if (!len) {
+ if (!len && !qtd->isoc_split_offset) {
qtd->complete_split = 0;
- qtd->isoc_split_offset = 0;
return 0;
}
frame_desc->actual_length += len;
+ if (chan->align_buf) {
+ dev_vdbg(hsotg->dev, "non-aligned buffer\n");
+ dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
+ DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
+ memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
+ chan->qh->dw_align_buf, len);
+ }
+
qtd->isoc_split_offset += len;
- hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
if (frame_desc->actual_length >= frame_desc->length || pid == 0) {
@@ -1178,7 +1185,7 @@ static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
urb->actual_length += xfer_length;
- hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
__func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
dev_vdbg(hsotg->dev, " chan->start_pkt_count %d\n",
@@ -1224,7 +1231,10 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
* avoid interrupt storms we'll wait before retrying if we've got
* several NAKs. If we didn't do this we'd retry directly from the
* interrupt handler and could end up quickly getting another
- * interrupt (another NAK), which we'd retry.
+ * interrupt (another NAK), which we'd retry. Note that we do not
+ * delay retries for IN parts of control requests, as those are expected
+ * to complete fairly quickly, and if we delay them we risk confusing
+ * the device and cause it issue STALL.
*
* Note that in DMA mode software only gets involved to re-send NAKed
* transfers for split transactions, so we only need to apply this
@@ -1237,7 +1247,9 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
qtd->error_count = 0;
qtd->complete_split = 0;
qtd->num_naks++;
- qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY;
+ qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
+ !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
+ chan->ep_is_in);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
goto handle_nak_done;
}
@@ -1554,10 +1566,10 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chnum));
- hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chnum));
- hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
- hc_dma = dwc2_readl(hsotg->regs + HCDMA(chnum));
+ hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
+ hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
+ hc_dma = dwc2_readl(hsotg, HCDMA(chnum));
dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
dev_err(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
@@ -1769,10 +1781,10 @@ static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
* This code is here only as a check. This condition should
* never happen. Ignore the halt if it does occur.
*/
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chnum));
- hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
- hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(chnum));
- hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chnum));
+ hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
+ hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
+ hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
+ hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
dev_dbg(hsotg->dev,
"%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
__func__);
@@ -1796,7 +1808,7 @@ static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
* when the halt interrupt occurs. Halt the channel again if it does
* occur.
*/
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chnum));
+ hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
if (hcchar & HCCHAR_CHDIS) {
dev_warn(hsotg->dev,
"%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
@@ -1856,7 +1868,7 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
return;
}
- hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(chnum));
+ hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
if (chan->hcint & HCINTMSK_XFERCOMPL) {
/*
@@ -1951,7 +1963,7 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
dev_err(hsotg->dev,
"hcint 0x%08x, intsts 0x%08x\n",
chan->hcint,
- dwc2_readl(hsotg->regs + GINTSTS));
+ dwc2_readl(hsotg, GINTSTS));
goto error;
}
}
@@ -2024,11 +2036,11 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
chan = hsotg->hc_ptr_array[chnum];
- hcint = dwc2_readl(hsotg->regs + HCINT(chnum));
- hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(chnum));
+ hcint = dwc2_readl(hsotg, HCINT(chnum));
+ hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
if (!chan) {
dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
- dwc2_writel(hcint, hsotg->regs + HCINT(chnum));
+ dwc2_writel(hsotg, hcint, HCINT(chnum));
return;
}
@@ -2040,7 +2052,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
hcint, hcintmsk, hcint & hcintmsk);
}
- dwc2_writel(hcint, hsotg->regs + HCINT(chnum));
+ dwc2_writel(hsotg, hcint, HCINT(chnum));
/*
* If we got an interrupt after someone called
@@ -2175,7 +2187,7 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
int i;
struct dwc2_host_chan *chan, *chan_tmp;
- haint = dwc2_readl(hsotg->regs + HAINT);
+ haint = dwc2_readl(hsotg, HAINT);
if (dbg_perio()) {
dev_vdbg(hsotg->dev, "%s()\n", __func__);
@@ -2259,8 +2271,8 @@ irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
"DWC OTG HCD Finished Servicing Interrupts\n");
dev_vdbg(hsotg->dev,
"DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
- dwc2_readl(hsotg->regs + GINTSTS),
- dwc2_readl(hsotg->regs + GINTMSK));
+ dwc2_readl(hsotg, GINTSTS),
+ dwc2_readl(hsotg, GINTMSK));
}
}
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index d7c3d6c776d8..40839591d2ec 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
/* Get the map and adjust if this is a multi_tt hub */
map = qh->dwc_tt->periodic_bitmaps;
if (qh->dwc_tt->usb_tt->multi)
- map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
+ map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
return map;
}
@@ -1510,7 +1510,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
bool ep_is_in = !!dwc2_hcd_is_pipe_in(&urb->pipe_info);
bool ep_is_isoc = (ep_type == USB_ENDPOINT_XFER_ISOC);
bool ep_is_int = (ep_type == USB_ENDPOINT_XFER_INT);
- u32 hprt = dwc2_readl(hsotg->regs + HPRT0);
+ u32 hprt = dwc2_readl(hsotg, HPRT0);
u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
dev_speed != USB_SPEED_HIGH);
@@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
if (qh->desc_list)
dwc2_hcd_qh_free_ddma(hsotg, qh);
+ else if (hsotg->unaligned_cache && qh->dw_align_buf)
+ kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
+
kfree(qh);
}
@@ -1744,9 +1747,9 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
if (status)
return status;
if (!hsotg->periodic_qh_count) {
- intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
+ intr_mask = dwc2_readl(hsotg, GINTMSK);
intr_mask |= GINTSTS_SOF;
- dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, intr_mask, GINTMSK);
}
hsotg->periodic_qh_count++;
@@ -1785,9 +1788,9 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
hsotg->periodic_qh_count--;
if (!hsotg->periodic_qh_count &&
!hsotg->params.dma_desc_enable) {
- intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
+ intr_mask = dwc2_readl(hsotg, GINTMSK);
intr_mask &= ~GINTSTS_SOF;
- dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
+ dwc2_writel(hsotg, intr_mask, GINTMSK);
}
}
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index af075d4da895..bf7052e037d6 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -47,7 +47,6 @@ static void dwc2_set_bcm_params(struct dwc2_hsotg *hsotg)
p->max_transfer_size = 65535;
p->max_packet_count = 511;
p->ahbcfg = 0x10;
- p->uframe_sched = false;
}
static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
@@ -68,7 +67,6 @@ static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
p->reload_ctl = false;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
GAHBCFG_HBSTLEN_SHIFT;
- p->uframe_sched = false;
p->change_speed_quirk = true;
p->power_down = false;
}
@@ -112,7 +110,6 @@ static void dwc2_set_amlogic_params(struct dwc2_hsotg *hsotg)
p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
GAHBCFG_HBSTLEN_SHIFT;
- p->uframe_sched = false;
}
static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
@@ -134,7 +131,6 @@ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)
p->max_packet_count = 256;
p->phy_type = DWC2_PHY_TYPE_PARAM_FS;
p->i2c_enable = false;
- p->uframe_sched = false;
p->activate_stm_fs_transceiver = true;
}
@@ -654,8 +650,8 @@ static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
dwc2_force_mode(hsotg, true);
- gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
- hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
+ gnptxfsiz = dwc2_readl(hsotg, GNPTXFSIZ);
+ hptxfsiz = dwc2_readl(hsotg, HPTXFSIZ);
hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
FIFOSIZE_DEPTH_SHIFT;
@@ -679,13 +675,13 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
dwc2_force_mode(hsotg, false);
- gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
+ gnptxfsiz = dwc2_readl(hsotg, GNPTXFSIZ);
fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
for (fifo = 1; fifo <= fifo_count; fifo++) {
hw->g_tx_fifo_size[fifo] =
- (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
+ (dwc2_readl(hsotg, DPTXFSIZN(fifo)) &
FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
}
@@ -713,7 +709,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
* 0x45f4xxxx, 0x5531xxxx or 0x5532xxxx
*/
- hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
+ hw->snpsid = dwc2_readl(hsotg, GSNPSID);
if ((hw->snpsid & GSNPSID_ID_MASK) != DWC2_OTG_ID &&
(hw->snpsid & GSNPSID_ID_MASK) != DWC2_FS_IOT_ID &&
(hw->snpsid & GSNPSID_ID_MASK) != DWC2_HS_IOT_ID) {
@@ -726,11 +722,11 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
- hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
- hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
- hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
- hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
- grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
+ hwcfg1 = dwc2_readl(hsotg, GHWCFG1);
+ hwcfg2 = dwc2_readl(hsotg, GHWCFG2);
+ hwcfg3 = dwc2_readl(hsotg, GHWCFG3);
+ hwcfg4 = dwc2_readl(hsotg, GHWCFG4);
+ grxfsiz = dwc2_readl(hsotg, GRXFSIZ);
/* hwcfg1 */
hw->dev_ep_dirs = hwcfg1;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 4c0819554bcd..9a53a58e676e 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -353,6 +353,23 @@ static void dwc2_driver_shutdown(struct platform_device *dev)
}
/**
+ * dwc2_check_core_endianness() - Returns true if core and AHB have
+ * opposite endianness.
+ * @hsotg: Programming view of the DWC_otg controller.
+ */
+static bool dwc2_check_core_endianness(struct dwc2_hsotg *hsotg)
+{
+ u32 snpsid;
+
+ snpsid = ioread32(hsotg->regs + GSNPSID);
+ if ((snpsid & GSNPSID_ID_MASK) == DWC2_OTG_ID ||
+ (snpsid & GSNPSID_ID_MASK) == DWC2_FS_IOT_ID ||
+ (snpsid & GSNPSID_ID_MASK) == DWC2_HS_IOT_ID)
+ return false;
+ return true;
+}
+
+/**
* dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
* driver
*
@@ -395,6 +412,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
(unsigned long)res->start, hsotg->regs);
+ hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
+
retval = dwc2_lowlevel_hw_init(hsotg);
if (retval)
return retval;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 451012ea1294..518ead12458d 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -74,11 +74,16 @@ config USB_DWC3_PCI
depends on USB_PCI && ACPI
default USB_DWC3
help
- If you're using the DesignWare Core IP with a PCIe, please say
- 'Y' or 'M' here.
+ If you're using the DesignWare Core IP with a PCIe (but not HAPS
+ platform), please say 'Y' or 'M' here.
- One such PCIe-based platform is Synopsys' PCIe HAPS model of
- this IP.
+config USB_DWC3_HAPS
+ tristate "Synopsys PCIe-based HAPS Platforms"
+ depends on USB_PCI
+ default USB_DWC3
+ help
+ If you're using the DesignWare Core IP with a Synopsys PCIe HAPS
+ platform, please say 'Y' or 'M' here.
config USB_DWC3_KEYSTONE
tristate "Texas Instruments Keystone2 Platforms"
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 5c07d8f925e0..6e3ef6144e5d 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -45,6 +45,7 @@ endif
obj-$(CONFIG_USB_DWC3_OMAP) += dwc3-omap.o
obj-$(CONFIG_USB_DWC3_EXYNOS) += dwc3-exynos.o
obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o
+obj-$(CONFIG_USB_DWC3_HAPS) += dwc3-haps.o
obj-$(CONFIG_USB_DWC3_KEYSTONE) += dwc3-keystone.o
obj-$(CONFIG_USB_DWC3_OF_SIMPLE) += dwc3-of-simple.o
obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ea91310113b9..88c80fcc39f5 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -78,6 +78,14 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
mode = USB_DR_MODE_HOST;
else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
mode = USB_DR_MODE_PERIPHERAL;
+
+ /*
+ * dwc_usb31 does not support OTG mode. If the controller
+ * supports DRD but the dr_mode is not specified or set to OTG,
+ * then set the mode to peripheral.
+ */
+ if (mode == USB_DR_MODE_OTG && dwc3_is_usb31(dwc))
+ mode = USB_DR_MODE_PERIPHERAL;
}
if (mode != dwc->dr_mode) {
@@ -778,6 +786,98 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
static int dwc3_core_get_phy(struct dwc3 *dwc);
static int dwc3_core_ulpi_init(struct dwc3 *dwc);
+/* set global incr burst type configuration registers */
+static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+ /* incrx_mode : for INCR burst type. */
+ bool incrx_mode;
+ /* incrx_size : for size of INCRX burst. */
+ u32 incrx_size;
+ u32 *vals;
+ u32 cfg;
+ int ntype;
+ int ret;
+ int i;
+
+ cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
+
+ /*
+ * Handle property "snps,incr-burst-type-adjustment".
+ * Get the number of value from this property:
+ * result <= 0, means this property is not supported.
+ * result = 1, means INCRx burst mode supported.
+ * result > 1, means undefined length burst mode supported.
+ */
+ ntype = device_property_read_u32_array(dev,
+ "snps,incr-burst-type-adjustment", NULL, 0);
+ if (ntype <= 0)
+ return;
+
+ vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
+ if (!vals) {
+ dev_err(dev, "Error to get memory\n");
+ return;
+ }
+
+ /* Get INCR burst type, and parse it */
+ ret = device_property_read_u32_array(dev,
+ "snps,incr-burst-type-adjustment", vals, ntype);
+ if (ret) {
+ dev_err(dev, "Error to get property\n");
+ return;
+ }
+
+ incrx_size = *vals;
+
+ if (ntype > 1) {
+ /* INCRX (undefined length) burst mode */
+ incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE;
+ for (i = 1; i < ntype; i++) {
+ if (vals[i] > incrx_size)
+ incrx_size = vals[i];
+ }
+ } else {
+ /* INCRX burst mode */
+ incrx_mode = INCRX_BURST_MODE;
+ }
+
+ /* Enable Undefined Length INCR Burst and Enable INCRx Burst */
+ cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK;
+ if (incrx_mode)
+ cfg |= DWC3_GSBUSCFG0_INCRBRSTENA;
+ switch (incrx_size) {
+ case 256:
+ cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA;
+ break;
+ case 128:
+ cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA;
+ break;
+ case 64:
+ cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA;
+ break;
+ case 32:
+ cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA;
+ break;
+ case 16:
+ cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA;
+ break;
+ case 8:
+ cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA;
+ break;
+ case 4:
+ cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA;
+ break;
+ case 1:
+ break;
+ default:
+ dev_err(dev, "Invalid property\n");
+ break;
+ }
+
+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg);
+}
+
/**
* dwc3_core_init - Low-level initialization of DWC3 Core
* @dwc: Pointer to our controller context structure
@@ -840,6 +940,8 @@ static int dwc3_core_init(struct dwc3 *dwc)
/* Adjust Frame Length */
dwc3_frame_length_adjustment(dwc);
+ dwc3_set_incr_burst_type(dwc);
+
usb_phy_set_suspend(dwc->usb2_phy, 0);
usb_phy_set_suspend(dwc->usb3_phy, 0);
ret = phy_power_on(dwc->usb2_generic_phy);
@@ -883,6 +985,22 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
}
+ if (dwc->dr_mode == USB_DR_MODE_HOST ||
+ dwc->dr_mode == USB_DR_MODE_OTG) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
+
+ /*
+ * Enable Auto retry Feature to make the controller operating in
+ * Host mode on seeing transaction errors(CRC errors or internal
+ * overrun scenerios) on IN transfers to reply to the device
+ * with a non-terminating retry ACK (i.e, an ACK transcation
+ * packet with Retry=1 & Nump != 0)
+ */
+ reg |= DWC3_GUCTL_HSTINAUTORETRY;
+
+ dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
+ }
+
/*
* Must config both number of packets and max burst settings to enable
* RX and/or TX threshold.
@@ -1272,7 +1390,6 @@ static int dwc3_probe(struct platform_device *pdev)
if (!dwc->clks)
return -ENOMEM;
- dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
dwc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1307,15 +1424,19 @@ static int dwc3_probe(struct platform_device *pdev)
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
- ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
- if (ret == -EPROBE_DEFER)
- return ret;
- /*
- * Clocks are optional, but new DT platforms should support all clocks
- * as required by the DT-binding.
- */
- if (ret)
- dwc->num_clks = 0;
+ if (dev->of_node) {
+ dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
+
+ ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ /*
+ * Clocks are optional, but new DT platforms should support all
+ * clocks as required by the DT-binding.
+ */
+ if (ret)
+ dwc->num_clks = 0;
+ }
ret = reset_control_deassert(dwc->reset);
if (ret)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 285ce0ef3b91..5bfb62533e0f 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -163,6 +163,17 @@
/* Bit fields */
+/* Global SoC Bus Configuration INCRx Register 0 */
+#define DWC3_GSBUSCFG0_INCR256BRSTENA (1 << 7) /* INCR256 burst */
+#define DWC3_GSBUSCFG0_INCR128BRSTENA (1 << 6) /* INCR128 burst */
+#define DWC3_GSBUSCFG0_INCR64BRSTENA (1 << 5) /* INCR64 burst */
+#define DWC3_GSBUSCFG0_INCR32BRSTENA (1 << 4) /* INCR32 burst */
+#define DWC3_GSBUSCFG0_INCR16BRSTENA (1 << 3) /* INCR16 burst */
+#define DWC3_GSBUSCFG0_INCR8BRSTENA (1 << 2) /* INCR8 burst */
+#define DWC3_GSBUSCFG0_INCR4BRSTENA (1 << 1) /* INCR4 burst */
+#define DWC3_GSBUSCFG0_INCRBRSTENA (1 << 0) /* undefined length enable */
+#define DWC3_GSBUSCFG0_INCRBRST_MASK 0xff
+
/* Global Debug Queue/FIFO Space Available Register */
#define DWC3_GDBGFIFOSPACE_NUM(n) ((n) & 0x1f)
#define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0)
@@ -227,6 +238,9 @@
#define DWC3_GCTL_GBLHIBERNATIONEN BIT(1)
#define DWC3_GCTL_DSBLCLKGTNG BIT(0)
+/* Global User Control Register */
+#define DWC3_GUCTL_HSTINAUTORETRY BIT(14)
+
/* Global User Control 1 Register */
#define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28)
#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24)
@@ -1157,6 +1171,9 @@ struct dwc3 {
u16 imod_interval;
};
+#define INCRX_BURST_MODE 0
+#define INCRX_UNDEF_LENGTH_BURST_MODE 1
+
#define work_to_dwc(w) (container_of((w), struct dwc3, drd_work))
/* -------------------------------------------------------------------------- */
diff --git a/drivers/usb/dwc3/dwc3-haps.c b/drivers/usb/dwc3/dwc3-haps.c
new file mode 100644
index 000000000000..c9cc33881bef
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-haps.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * dwc3-haps.c - Synopsys HAPS PCI Specific glue layer
+ *
+ * Copyright (C) 2018 Synopsys, Inc.
+ *
+ * Authors: Thinh Nguyen <thinhn@synopsys.com>,
+ * John Youn <johnyoun@synopsys.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
+
+/**
+ * struct dwc3_haps - Driver private structure
+ * @dwc3: child dwc3 platform_device
+ * @pci: our link to PCI bus
+ */
+struct dwc3_haps {
+ struct platform_device *dwc3;
+ struct pci_dev *pci;
+};
+
+static const struct property_entry initial_properties[] = {
+ PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"),
+ PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
+ PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ { },
+};
+
+static int dwc3_haps_probe(struct pci_dev *pci,
+ const struct pci_device_id *id)
+{
+ struct dwc3_haps *dwc;
+ struct device *dev = &pci->dev;
+ struct resource res[2];
+ int ret;
+
+ ret = pcim_enable_device(pci);
+ if (ret) {
+ dev_err(dev, "failed to enable pci device\n");
+ return -ENODEV;
+ }
+
+ pci_set_master(pci);
+
+ dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ dwc->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
+ if (!dwc->dwc3)
+ return -ENOMEM;
+
+ memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+
+ res[0].start = pci_resource_start(pci, 0);
+ res[0].end = pci_resource_end(pci, 0);
+ res[0].name = "dwc_usb3";
+ res[0].flags = IORESOURCE_MEM;
+
+ res[1].start = pci->irq;
+ res[1].name = "dwc_usb3";
+ res[1].flags = IORESOURCE_IRQ;
+
+ ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(dev, "couldn't add resources to dwc3 device\n");
+ goto err;
+ }
+
+ dwc->pci = pci;
+ dwc->dwc3->dev.parent = dev;
+
+ ret = platform_device_add_properties(dwc->dwc3, initial_properties);
+ if (ret)
+ goto err;
+
+ ret = platform_device_add(dwc->dwc3);
+ if (ret) {
+ dev_err(dev, "failed to register dwc3 device\n");
+ goto err;
+ }
+
+ pci_set_drvdata(pci, dwc);
+
+ return 0;
+err:
+ platform_device_put(dwc->dwc3);
+ return ret;
+}
+
+static void dwc3_haps_remove(struct pci_dev *pci)
+{
+ struct dwc3_haps *dwc = pci_get_drvdata(pci);
+
+ platform_device_unregister(dwc->dwc3);
+}
+
+static const struct pci_device_id dwc3_haps_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
+ PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
+ PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI),
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
+ PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31),
+ },
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(pci, dwc3_haps_id_table);
+
+static struct pci_driver dwc3_haps_driver = {
+ .name = "dwc3-haps",
+ .id_table = dwc3_haps_id_table,
+ .probe = dwc3_haps_probe,
+ .remove = dwc3_haps_remove,
+};
+
+MODULE_AUTHOR("Thinh Nguyen <thinhn@synopsys.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys HAPS PCI Glue Layer");
+
+module_pci_driver(dwc3_haps_driver);
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 6b3ccd542bd7..40bf9e0bbc59 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -28,6 +28,7 @@ struct dwc3_of_simple {
int num_clocks;
struct reset_control *resets;
bool pulse_resets;
+ bool need_reset;
};
static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
@@ -93,6 +94,13 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, simple);
simple->dev = dev;
+ /*
+ * Some controllers need to toggle the usb3-otg reset before trying to
+ * initialize the PHY, otherwise the PHY times out.
+ */
+ if (of_device_is_compatible(np, "rockchip,rk3399-dwc3"))
+ simple->need_reset = true;
+
if (of_device_is_compatible(np, "amlogic,meson-axg-dwc3") ||
of_device_is_compatible(np, "amlogic,meson-gxl-dwc3")) {
shared_resets = true;
@@ -165,8 +173,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
reset_control_put(simple->resets);
- pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
return 0;
}
@@ -200,9 +209,30 @@ static int dwc3_of_simple_runtime_resume(struct device *dev)
return 0;
}
+
+static int dwc3_of_simple_suspend(struct device *dev)
+{
+ struct dwc3_of_simple *simple = dev_get_drvdata(dev);
+
+ if (simple->need_reset)
+ reset_control_assert(simple->resets);
+
+ return 0;
+}
+
+static int dwc3_of_simple_resume(struct device *dev)
+{
+ struct dwc3_of_simple *simple = dev_get_drvdata(dev);
+
+ if (simple->need_reset)
+ reset_control_deassert(simple->resets);
+
+ return 0;
+}
#endif
static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume)
SET_RUNTIME_PM_OPS(dwc3_of_simple_runtime_suspend,
dwc3_of_simple_runtime_resume, NULL)
};
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index c961a94d136b..5edd79470368 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -16,12 +16,10 @@
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <linux/acpi.h>
#include <linux/delay.h>
-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
#define PCI_DEVICE_ID_INTEL_BSW 0x22b7
@@ -34,18 +32,24 @@
#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
+#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee
#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
#define PCI_INTEL_BXT_STATE_D0 0
#define PCI_INTEL_BXT_STATE_D3 3
+#define GP_RWBAR 1
+#define GP_RWREG1 0xa0
+#define GP_RWREG1_ULPI_REFCLK_DISABLE (1 << 17)
+
/**
* struct dwc3_pci - Driver private structure
* @dwc3: child dwc3 platform_device
* @pci: our link to PCI bus
* @guid: _DSM GUID
* @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM
+ * @wakeup_work: work for asynchronous resume
*/
struct dwc3_pci {
struct platform_device *dwc3;
@@ -66,52 +70,74 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
{ },
};
-static int dwc3_pci_quirks(struct dwc3_pci *dwc)
+static struct gpiod_lookup_table platform_bytcr_gpios = {
+ .dev_id = "0000:00:16.0",
+ .table = {
+ GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH),
+ {}
+ },
+};
+
+static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci)
{
- struct platform_device *dwc3 = dwc->dwc3;
- struct pci_dev *pdev = dwc->pci;
+ void __iomem *reg;
+ u32 value;
+
+ reg = pcim_iomap(pci, GP_RWBAR, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ value = readl(reg + GP_RWREG1);
+ if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE))
+ goto unmap; /* ULPI refclk already enabled */
+
+ value &= ~GP_RWREG1_ULPI_REFCLK_DISABLE;
+ writel(value, reg + GP_RWREG1);
+ /* This comes from the Intel Android x86 tree w/o any explanation */
+ msleep(100);
+unmap:
+ pcim_iounmap(pci, reg);
+ return 0;
+}
- if (pdev->vendor == PCI_VENDOR_ID_AMD &&
- pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
- struct property_entry properties[] = {
- PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
- PROPERTY_ENTRY_U8("snps,lpm-nyet-threshold", 0xf),
- PROPERTY_ENTRY_BOOL("snps,u2exit_lfps_quirk"),
- PROPERTY_ENTRY_BOOL("snps,u2ss_inp3_quirk"),
- PROPERTY_ENTRY_BOOL("snps,req_p1p2p3_quirk"),
- PROPERTY_ENTRY_BOOL("snps,del_p1p2p3_quirk"),
- PROPERTY_ENTRY_BOOL("snps,del_phy_power_chg_quirk"),
- PROPERTY_ENTRY_BOOL("snps,lfps_filter_quirk"),
- PROPERTY_ENTRY_BOOL("snps,rx_detect_poll_quirk"),
- PROPERTY_ENTRY_BOOL("snps,tx_de_emphasis_quirk"),
- PROPERTY_ENTRY_U8("snps,tx_de_emphasis", 1),
- /*
- * FIXME these quirks should be removed when AMD NL
- * tapes out
- */
- PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
- PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
- PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
- PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
- { },
- };
-
- return platform_device_add_properties(dwc3, properties);
- }
+static const struct property_entry dwc3_pci_intel_properties[] = {
+ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+};
- if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
- int ret;
+static const struct property_entry dwc3_pci_mrfld_properties[] = {
+ PROPERTY_ENTRY_STRING("dr_mode", "otg"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+};
- struct property_entry properties[] = {
- PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
- PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
- { }
- };
+static const struct property_entry dwc3_pci_amd_properties[] = {
+ PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
+ PROPERTY_ENTRY_U8("snps,lpm-nyet-threshold", 0xf),
+ PROPERTY_ENTRY_BOOL("snps,u2exit_lfps_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,u2ss_inp3_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,req_p1p2p3_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,del_p1p2p3_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,del_phy_power_chg_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,lfps_filter_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,rx_detect_poll_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,tx_de_emphasis_quirk"),
+ PROPERTY_ENTRY_U8("snps,tx_de_emphasis", 1),
+ /* FIXME these quirks should be removed when AMD NL tapes out */
+ PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+};
- ret = platform_device_add_properties(dwc3, properties);
- if (ret < 0)
- return ret;
+static int dwc3_pci_quirks(struct dwc3_pci *dwc)
+{
+ struct pci_dev *pdev = dwc->pci;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) {
guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid);
@@ -120,6 +146,12 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
struct gpio_desc *gpio;
+ int ret;
+
+ /* On BYT the FW does not always enable the refclock */
+ ret = dwc3_byt_enable_ulpi_refclock(pdev);
+ if (ret)
+ return ret;
ret = devm_acpi_dev_add_driver_gpios(&pdev->dev,
acpi_dwc3_byt_gpios);
@@ -127,44 +159,36 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
dev_dbg(&pdev->dev, "failed to add mapping table\n");
/*
+ * A lot of BYT devices lack ACPI resource entries for
+ * the GPIOs, add a fallback mapping to the reference
+ * design GPIOs which all boards seem to use.
+ */
+ gpiod_add_lookup_table(&platform_bytcr_gpios);
+
+ /*
* These GPIOs will turn on the USB2 PHY. Note that we have to
* put the gpio descriptors again here because the phy driver
* might want to grab them, too.
*/
- gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
+ gpio = devm_gpiod_get_optional(&pdev->dev, "cs",
+ GPIOD_OUT_LOW);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
gpiod_set_value_cansleep(gpio, 1);
- gpiod_put(gpio);
- gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
if (gpio) {
gpiod_set_value_cansleep(gpio, 1);
- gpiod_put(gpio);
usleep_range(10000, 11000);
}
}
}
- if (pdev->vendor == PCI_VENDOR_ID_SYNOPSYS &&
- (pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 ||
- pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI ||
- pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31)) {
- struct property_entry properties[] = {
- PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"),
- PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
- PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"),
- PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
- { },
- };
-
- return platform_device_add_properties(dwc3, properties);
- }
-
return 0;
}
@@ -184,9 +208,9 @@ static void dwc3_pci_resume_work(struct work_struct *work)
}
#endif
-static int dwc3_pci_probe(struct pci_dev *pci,
- const struct pci_device_id *id)
+static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
+ struct property_entry *p = (struct property_entry *)id->driver_data;
struct dwc3_pci *dwc;
struct resource res[2];
int ret;
@@ -229,6 +253,10 @@ static int dwc3_pci_probe(struct pci_dev *pci,
dwc->dwc3->dev.parent = dev;
ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev));
+ ret = platform_device_add_properties(dwc->dwc3, p);
+ if (ret < 0)
+ return ret;
+
ret = dwc3_pci_quirks(dwc);
if (ret)
goto err;
@@ -256,6 +284,7 @@ static void dwc3_pci_remove(struct pci_dev *pci)
{
struct dwc3_pci *dwc = pci_get_drvdata(pci);
+ gpiod_remove_lookup_table(&platform_bytcr_gpios);
#ifdef CONFIG_PM
cancel_work_sync(&dwc->wakeup_work);
#endif
@@ -265,31 +294,47 @@ static void dwc3_pci_remove(struct pci_dev *pci)
}
static const struct pci_device_id dwc3_pci_id_table[] = {
- {
- PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
- PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
- },
- {
- PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
- PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI),
- },
- {
- PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
- PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31),
- },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BSW),
+ (kernel_ulong_t) &dwc3_pci_intel_properties },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
+ (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTH),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BXT),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BXT_M),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_APL),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_KBP),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_GLK),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPLP),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPH),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICLLP),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB),
+ (kernel_ulong_t) &dwc3_pci_amd_properties, },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index b0e67ab2f98c..a6d0203e40b6 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -490,6 +490,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qcom->dwc3 = of_find_device_by_node(dwc3_np);
if (!qcom->dwc3) {
dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
+ ret = -ENODEV;
goto depopulate;
}
@@ -547,8 +548,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int dwc3_qcom_pm_suspend(struct device *dev)
+static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
int ret = 0;
@@ -560,7 +560,7 @@ static int dwc3_qcom_pm_suspend(struct device *dev)
return ret;
}
-static int dwc3_qcom_pm_resume(struct device *dev)
+static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
int ret;
@@ -571,23 +571,20 @@ static int dwc3_qcom_pm_resume(struct device *dev)
return ret;
}
-#endif
-#ifdef CONFIG_PM
-static int dwc3_qcom_runtime_suspend(struct device *dev)
+static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
return dwc3_qcom_suspend(qcom);
}
-static int dwc3_qcom_runtime_resume(struct device *dev)
+static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
return dwc3_qcom_resume(qcom);
}
-#endif
static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index c77ff50a88a2..8efde178eef4 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -973,15 +973,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
ret = dwc3_ep0_start_trans(dep);
} else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
req->request.length && req->request.zero) {
- u32 maxpacket;
ret = usb_gadget_map_request_by_dev(dwc->sysdev,
&req->request, dep->number);
if (ret)
return;
- maxpacket = dep->endpoint.maxpacket;
-
/* prepare normal TRB */
dwc3_ep0_prepare_one_trb(dep, req->request.dma,
req->request.length,
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 69bf137aab37..032ea7d709ba 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1121,7 +1121,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
req->request.short_not_ok,
req->request.no_interrupt);
} else if (req->request.zero && req->request.length &&
- (IS_ALIGNED(req->request.length,dep->endpoint.maxpacket))) {
+ (IS_ALIGNED(req->request.length, maxp))) {
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index db610c56f1d6..2aacd1afd9ff 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -25,7 +25,7 @@ struct dwc3;
#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN BIT(9)
#define DWC3_DEPCFG_XFER_NOT_READY_EN BIT(10)
#define DWC3_DEPCFG_FIFO_ERROR_EN BIT(11)
-#define DWC3_DEPCFG_STREAM_EVENT_EN BIT(12)
+#define DWC3_DEPCFG_STREAM_EVENT_EN BIT(13)
#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
#define DWC3_DEPCFG_STREAM_CAPABLE BIT(24)
#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f242c2bcea81..b8a15840b4ff 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1719,6 +1719,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
*/
if (w_value && !f->get_alt)
break;
+
+ spin_lock(&cdev->lock);
value = f->set_alt(f, w_index, w_value);
if (value == USB_GADGET_DELAYED_STATUS) {
DBG(cdev,
@@ -1728,6 +1730,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
DBG(cdev, "delayed_status count %d\n",
cdev->delayed_status);
}
+ spin_unlock(&cdev->lock);
break;
case USB_REQ_GET_INTERFACE:
if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
@@ -1816,7 +1819,6 @@ unknown:
if (cdev->use_os_string && cdev->os_desc_config &&
(ctrl->bRequestType & USB_TYPE_VENDOR) &&
ctrl->bRequest == cdev->b_vendor_code) {
- struct usb_request *req;
struct usb_configuration *os_desc_cfg;
u8 *buf;
int interface;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index efba66ca0719..025129942894 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1217,8 +1217,8 @@ static void purge_configs_funcs(struct gadget_info *gi)
list_move_tail(&f->list, &cfg->func_list);
if (f->unbind) {
dev_dbg(&gi->cdev.gadget->dev,
- "unbind function '%s'/%p\n",
- f->name, f);
+ "unbind function '%s'/%p\n",
+ f->name, f);
f->unbind(c, f);
}
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index dce9d12c7981..3ada83d81bda 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -215,6 +215,7 @@ struct ffs_io_data {
struct mm_struct *mm;
struct work_struct work;
+ struct work_struct cancellation_work;
struct usb_ep *ep;
struct usb_request *req;
@@ -1072,22 +1073,31 @@ ffs_epfile_open(struct inode *inode, struct file *file)
return 0;
}
+static void ffs_aio_cancel_worker(struct work_struct *work)
+{
+ struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
+ cancellation_work);
+
+ ENTER();
+
+ usb_ep_dequeue(io_data->ep, io_data->req);
+}
+
static int ffs_aio_cancel(struct kiocb *kiocb)
{
struct ffs_io_data *io_data = kiocb->private;
- struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+ struct ffs_data *ffs = io_data->ffs;
int value;
ENTER();
- spin_lock_irq(&epfile->ffs->eps_lock);
-
- if (likely(io_data && io_data->ep && io_data->req))
- value = usb_ep_dequeue(io_data->ep, io_data->req);
- else
+ if (likely(io_data && io_data->ep && io_data->req)) {
+ INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
+ queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
+ value = -EINPROGRESS;
+ } else {
value = -EINVAL;
-
- spin_unlock_irq(&epfile->ffs->eps_lock);
+ }
return value;
}
@@ -3253,7 +3263,7 @@ static int ffs_func_setup(struct usb_function *f,
__ffs_event_add(ffs, FUNCTIONFS_SETUP);
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
- return USB_GADGET_DELAYED_STATUS;
+ return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
}
static bool ffs_func_req_match(struct usb_function *f,
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index acecd13dcbd9..ca8a4b53c59f 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -206,7 +206,6 @@
#include <linux/fcntl.h>
#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/kref.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
#include <linux/limits.h>
@@ -312,8 +311,6 @@ struct fsg_common {
void *private_data;
char inquiry_string[INQUIRY_STRING_LEN];
-
- struct kref ref;
};
struct fsg_dev {
@@ -2551,25 +2548,11 @@ static DEVICE_ATTR(file, 0, file_show, file_store);
/****************************** FSG COMMON ******************************/
-static void fsg_common_release(struct kref *ref);
-
static void fsg_lun_release(struct device *dev)
{
/* Nothing needs to be done */
}
-void fsg_common_get(struct fsg_common *common)
-{
- kref_get(&common->ref);
-}
-EXPORT_SYMBOL_GPL(fsg_common_get);
-
-void fsg_common_put(struct fsg_common *common)
-{
- kref_put(&common->ref, fsg_common_release);
-}
-EXPORT_SYMBOL_GPL(fsg_common_put);
-
static struct fsg_common *fsg_common_setup(struct fsg_common *common)
{
if (!common) {
@@ -2582,7 +2565,6 @@ static struct fsg_common *fsg_common_setup(struct fsg_common *common)
}
init_rwsem(&common->filesem);
spin_lock_init(&common->lock);
- kref_init(&common->ref);
init_completion(&common->thread_notifier);
init_waitqueue_head(&common->io_wait);
init_waitqueue_head(&common->fsg_wait);
@@ -2870,9 +2852,8 @@ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
}
EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
-static void fsg_common_release(struct kref *ref)
+static void fsg_common_release(struct fsg_common *common)
{
- struct fsg_common *common = container_of(ref, struct fsg_common, ref);
int i;
/* If the thread isn't already dead, tell it to exit now */
@@ -3308,7 +3289,9 @@ static ssize_t fsg_opts_num_buffers_store(struct config_item *item,
if (ret)
goto end;
- fsg_common_set_num_buffers(opts->common, num);
+ ret = fsg_common_set_num_buffers(opts->common, num);
+ if (ret)
+ goto end;
ret = len;
end:
@@ -3344,7 +3327,7 @@ static void fsg_free_inst(struct usb_function_instance *fi)
struct fsg_opts *opts;
opts = fsg_opts_from_func_inst(fi);
- fsg_common_put(opts->common);
+ fsg_common_release(opts->common);
kfree(opts);
}
@@ -3368,7 +3351,7 @@ static struct usb_function_instance *fsg_alloc_inst(void)
rc = fsg_common_set_num_buffers(opts->common,
CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS);
if (rc)
- goto release_opts;
+ goto release_common;
pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
@@ -3391,6 +3374,8 @@ static struct usb_function_instance *fsg_alloc_inst(void)
release_buffers:
fsg_common_free_buffers(opts->common);
+release_common:
+ kfree(opts->common);
release_opts:
kfree(opts);
return ERR_PTR(rc);
diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
index 58857fcf199f..3b8c4ce2a40a 100644
--- a/drivers/usb/gadget/function/f_mass_storage.h
+++ b/drivers/usb/gadget/function/f_mass_storage.h
@@ -115,10 +115,6 @@ fsg_opts_from_func_inst(const struct usb_function_instance *fi)
return container_of(fi, struct fsg_opts, func_inst);
}
-void fsg_common_get(struct fsg_common *common);
-
-void fsg_common_put(struct fsg_common *common);
-
void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs);
int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n);
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index d78dbb73bde8..106988a6661a 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1071,15 +1071,16 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
{
struct se_session *se_sess = tv_nexus->tvn_se_sess;
struct usbg_cmd *cmd;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return ERR_PTR(-ENOMEM);
cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(*cmd));
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->se_cmd.tag = cmd->tag = scsi_tag;
cmd->fu = fu;
@@ -1288,7 +1289,7 @@ static void usbg_release_cmd(struct se_cmd *se_cmd)
struct se_session *se_sess = se_cmd->se_sess;
kfree(cmd->data_buf);
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_sess, se_cmd);
}
static u32 usbg_sess_get_index(struct se_session *se_sess)
@@ -1343,10 +1344,8 @@ static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
return 0;
}
-static struct se_portal_group *usbg_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
tport_wwn);
@@ -1379,7 +1378,7 @@ static struct se_portal_group *usbg_make_tpg(
goto unlock_dep;
} else {
ret = configfs_depend_item_unlocked(
- group->cg_subsys,
+ wwn->wwn_group.cg_subsys,
&opts->func_inst.group.cg_item);
if (ret)
goto unlock_dep;
@@ -1593,7 +1592,7 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
goto out_unlock;
}
- tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
USB_G_DEFAULT_SESSION_TAGS,
sizeof(struct usbg_cmd),
TARGET_PROT_NORMAL, name,
@@ -1639,7 +1638,7 @@ static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
/*
* Release the SCSI I_T Nexus to the emulated vHost Target Port
*/
- transport_deregister_session(tv_nexus->tvn_se_sess);
+ target_remove_session(se_sess);
tpg->tpg_nexus = NULL;
kfree(tv_nexus);
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index d2dc1f00180b..d582921f7257 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -438,14 +438,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
};
struct cntrl_cur_lay3 {
- __u32 dCUR;
+ __le32 dCUR;
};
struct cntrl_range_lay3 {
- __u16 wNumSubRanges;
- __u32 dMIN;
- __u32 dMAX;
- __u32 dRES;
+ __le16 wNumSubRanges;
+ __le32 dMIN;
+ __le32 dMAX;
+ __le32 dRES;
} __packed;
static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
@@ -559,13 +559,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
if (!agdev->out_ep) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return ret;
+ return -ENODEV;
}
agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
if (!agdev->in_ep) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return ret;
+ return -ENODEV;
}
agdev->in_ep_maxpsize = max_t(u16,
@@ -703,9 +703,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
memset(&c, 0, sizeof(struct cntrl_cur_lay3));
if (entity_id == USB_IN_CLK_ID)
- c.dCUR = p_srate;
+ c.dCUR = cpu_to_le32(p_srate);
else if (entity_id == USB_OUT_CLK_ID)
- c.dCUR = c_srate;
+ c.dCUR = cpu_to_le32(c_srate);
value = min_t(unsigned, w_length, sizeof c);
memcpy(req->buf, &c, value);
@@ -742,15 +742,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
if (entity_id == USB_IN_CLK_ID)
- r.dMIN = p_srate;
+ r.dMIN = cpu_to_le32(p_srate);
else if (entity_id == USB_OUT_CLK_ID)
- r.dMIN = c_srate;
+ r.dMIN = cpu_to_le32(c_srate);
else
return -EOPNOTSUPP;
r.dMAX = r.dMIN;
r.dRES = 0;
- r.wNumSubRanges = 1;
+ r.wNumSubRanges = cpu_to_le16(1);
value = min_t(unsigned, w_length, sizeof r);
memcpy(req->buf, &r, value);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 439eba660e95..d8ce7868fe22 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -6,16 +6,17 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/g_uvc.h>
#include <linux/usb/video.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
@@ -30,6 +31,8 @@
#include "uvc_video.h"
unsigned int uvc_gadget_trace_param;
+module_param_named(trace, uvc_gadget_trace_param, uint, 0644);
+MODULE_PARM_DESC(trace, "Trace level bitmask");
/* --------------------------------------------------------------------------
* Function descriptors
@@ -410,10 +413,21 @@ uvc_function_disconnect(struct uvc_device *uvc)
* USB probe and disconnect
*/
+static ssize_t function_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uvc_device *uvc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", uvc->func.fi->group.cg_item.ci_name);
+}
+
+static DEVICE_ATTR_RO(function_name);
+
static int
uvc_register_video(struct uvc_device *uvc)
{
struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ int ret;
/* TODO reference counting. */
uvc->vdev.v4l2_dev = &uvc->v4l2_dev;
@@ -426,7 +440,17 @@ uvc_register_video(struct uvc_device *uvc)
video_set_drvdata(&uvc->vdev, uvc);
- return video_register_device(&uvc->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&uvc->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ return ret;
+
+ ret = device_create_file(&uvc->vdev.dev, &dev_attr_function_name);
+ if (ret < 0) {
+ video_unregister_device(&uvc->vdev);
+ return ret;
+ }
+
+ return 0;
}
#define UVC_COPY_DESCRIPTOR(mem, dst, desc) \
@@ -864,6 +888,7 @@ static void uvc_unbind(struct usb_configuration *c, struct usb_function *f)
INFO(cdev, "%s\n", __func__);
+ device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
video_unregister_device(&uvc->vdev);
v4l2_device_unregister(&uvc->v4l2_dev);
diff --git a/drivers/usb/gadget/function/f_uvc.h b/drivers/usb/gadget/function/f_uvc.h
index 81defe4557fe..a81a17765558 100644
--- a/drivers/usb/gadget/function/f_uvc.h
+++ b/drivers/usb/gadget/function/f_uvc.h
@@ -9,10 +9,7 @@
#ifndef _F_UVC_H_
#define _F_UVC_H_
-#include <linux/usb/composite.h>
-#include <linux/usb/video.h>
-
-#include "uvc.h"
+struct uvc_device;
void uvc_function_setup_continue(struct uvc_device *uvc);
@@ -21,4 +18,3 @@ void uvc_function_connect(struct uvc_device *uvc);
void uvc_function_disconnect(struct uvc_device *uvc);
#endif /* _F_UVC_H_ */
-
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index a72295c953bb..fb5ed97572e5 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -32,9 +32,6 @@ struct uac_req {
struct uac_rtd_params {
struct snd_uac_chip *uac; /* parent chip */
bool ep_enabled; /* if the ep is enabled */
- /* Size of the ring buffer */
- size_t dma_bytes;
- unsigned char *dma_area;
struct snd_pcm_substream *ss;
@@ -43,8 +40,6 @@ struct uac_rtd_params {
void *rbuf;
- size_t period_size;
-
unsigned max_psize; /* MaxPacketSize of endpoint */
struct uac_req *ureq;
@@ -84,12 +79,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
{
unsigned pending;
- unsigned long flags;
+ unsigned long flags, flags2;
unsigned int hw_ptr;
- bool update_alsa = false;
int status = req->status;
struct uac_req *ur = req->context;
struct snd_pcm_substream *substream;
+ struct snd_pcm_runtime *runtime;
struct uac_rtd_params *prm = ur->pp;
struct snd_uac_chip *uac = prm->uac;
@@ -111,6 +106,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
if (!substream)
goto exit;
+ snd_pcm_stream_lock_irqsave(substream, flags2);
+
+ runtime = substream->runtime;
+ if (!runtime || !snd_pcm_running(substream)) {
+ snd_pcm_stream_unlock_irqrestore(substream, flags2);
+ goto exit;
+ }
+
spin_lock_irqsave(&prm->lock, flags);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -137,43 +140,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
req->actual = req->length;
}
- pending = prm->hw_ptr % prm->period_size;
- pending += req->actual;
- if (pending >= prm->period_size)
- update_alsa = true;
-
hw_ptr = prm->hw_ptr;
- prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
spin_unlock_irqrestore(&prm->lock, flags);
/* Pack USB load in ALSA ring buffer */
- pending = prm->dma_bytes - hw_ptr;
+ pending = runtime->dma_bytes - hw_ptr;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (unlikely(pending < req->actual)) {
- memcpy(req->buf, prm->dma_area + hw_ptr, pending);
- memcpy(req->buf + pending, prm->dma_area,
+ memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
+ memcpy(req->buf + pending, runtime->dma_area,
req->actual - pending);
} else {
- memcpy(req->buf, prm->dma_area + hw_ptr, req->actual);
+ memcpy(req->buf, runtime->dma_area + hw_ptr,
+ req->actual);
}
} else {
if (unlikely(pending < req->actual)) {
- memcpy(prm->dma_area + hw_ptr, req->buf, pending);
- memcpy(prm->dma_area, req->buf + pending,
+ memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
+ memcpy(runtime->dma_area, req->buf + pending,
req->actual - pending);
} else {
- memcpy(prm->dma_area + hw_ptr, req->buf, req->actual);
+ memcpy(runtime->dma_area + hw_ptr, req->buf,
+ req->actual);
}
}
+ spin_lock_irqsave(&prm->lock, flags);
+ /* update hw_ptr after data is copied to memory */
+ prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
+ hw_ptr = prm->hw_ptr;
+ spin_unlock_irqrestore(&prm->lock, flags);
+ snd_pcm_stream_unlock_irqrestore(substream, flags2);
+
+ if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
+ snd_pcm_period_elapsed(substream);
+
exit:
if (usb_ep_queue(ep, req, GFP_ATOMIC))
dev_err(uac->card->dev, "%d Error!\n", __LINE__);
-
- if (update_alsa)
- snd_pcm_period_elapsed(substream);
}
static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
@@ -236,40 +242,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
- struct uac_rtd_params *prm;
- int err;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prm = &uac->p_prm;
- else
- prm = &uac->c_prm;
-
- err = snd_pcm_lib_malloc_pages(substream,
+ return snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
- if (err >= 0) {
- prm->dma_bytes = substream->runtime->dma_bytes;
- prm->dma_area = substream->runtime->dma_area;
- prm->period_size = params_period_bytes(hw_params);
- }
-
- return err;
}
static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
{
- struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
- struct uac_rtd_params *prm;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prm = &uac->p_prm;
- else
- prm = &uac->c_prm;
-
- prm->dma_area = NULL;
- prm->dma_bytes = 0;
- prm->period_size = 0;
-
return snd_pcm_lib_free_pages(substream);
}
@@ -595,15 +573,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
if (err < 0)
goto snd_fail;
- strcpy(pcm->name, pcm_name);
+ strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
pcm->private_data = uac;
uac->pcm = pcm;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
- strcpy(card->driver, card_name);
- strcpy(card->shortname, card_name);
+ strlcpy(card->driver, card_name, sizeof(card->driver));
+ strlcpy(card->shortname, card_name, sizeof(card->shortname));
sprintf(card->longname, "%s %i", card_name, card->dev->id);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index d00d3ded71c0..2ed292e94fbc 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -13,6 +13,7 @@
#ifndef U_UVC_H
#define U_UVC_H
+#include <linux/mutex.h>
#include <linux/usb/composite.h>
#include <linux/usb/video.h>
@@ -20,7 +21,6 @@
struct f_uvc_opts {
struct usb_function_instance func_inst;
- unsigned int uvc_gadget_trace_param;
unsigned int streaming_interval;
unsigned int streaming_maxpacket;
unsigned int streaming_maxburst;
@@ -80,7 +80,4 @@ struct f_uvc_opts {
int refcnt;
};
-void uvc_set_trace_param(unsigned int trace);
-
#endif /* U_UVC_H */
-
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index a64e07e61f8c..93cf78b420fe 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -9,52 +9,26 @@
#ifndef _UVC_GADGET_H_
#define _UVC_GADGET_H_
-#include <linux/ioctl.h>
-#include <linux/types.h>
-#include <linux/usb/ch9.h>
-
-#define UVC_EVENT_FIRST (V4L2_EVENT_PRIVATE_START + 0)
-#define UVC_EVENT_CONNECT (V4L2_EVENT_PRIVATE_START + 0)
-#define UVC_EVENT_DISCONNECT (V4L2_EVENT_PRIVATE_START + 1)
-#define UVC_EVENT_STREAMON (V4L2_EVENT_PRIVATE_START + 2)
-#define UVC_EVENT_STREAMOFF (V4L2_EVENT_PRIVATE_START + 3)
-#define UVC_EVENT_SETUP (V4L2_EVENT_PRIVATE_START + 4)
-#define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5)
-#define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5)
-
-struct uvc_request_data {
- __s32 length;
- __u8 data[60];
-};
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/usb/composite.h>
+#include <linux/videodev2.h>
-struct uvc_event {
- union {
- enum usb_device_speed speed;
- struct usb_ctrlrequest req;
- struct uvc_request_data data;
- };
-};
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
-#define UVCIOC_SEND_RESPONSE _IOW('U', 1, struct uvc_request_data)
+#include "uvc_queue.h"
-#define UVC_INTF_CONTROL 0
-#define UVC_INTF_STREAMING 1
+struct usb_ep;
+struct usb_request;
+struct uvc_descriptor_header;
/* ------------------------------------------------------------------------
* Debugging, printing and logging
*/
-#ifdef __KERNEL__
-
-#include <linux/usb.h> /* For usb_endpoint_* */
-#include <linux/usb/composite.h>
-#include <linux/usb/gadget.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-device.h>
-
-#include "uvc_queue.h"
-
#define UVC_TRACE_PROBE (1 << 0)
#define UVC_TRACE_DESCR (1 << 1)
#define UVC_TRACE_CONTROL (1 << 2)
@@ -184,7 +158,4 @@ extern void uvc_endpoint_stream(struct uvc_device *dev);
extern void uvc_function_connect(struct uvc_device *uvc);
extern void uvc_function_disconnect(struct uvc_device *uvc);
-#endif /* __KERNEL__ */
-
#endif /* _UVC_GADGET_H_ */
-
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index c9b8cc4aae5a..b51f0d278826 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -31,7 +31,11 @@ static struct configfs_attribute prefix##attr_##cname = { \
.show = prefix##cname##_show, \
}
-static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item);
+static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_uvc_opts,
+ func_inst.group);
+}
/* control/header/<NAME> */
DECLARE_UVC_HEADER_DESCRIPTOR(1);
@@ -2105,12 +2109,6 @@ static const struct config_item_type uvcg_streaming_grp_type = {
.ct_owner = THIS_MODULE,
};
-static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
-{
- return container_of(to_config_group(item), struct f_uvc_opts,
- func_inst.group);
-}
-
static void uvc_attr_release(struct config_item *item)
{
struct f_uvc_opts *opts = to_f_uvc_opts(item);
diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h
index f9f65b5c1062..2f0fff769843 100644
--- a/drivers/usb/gadget/function/uvc_queue.h
+++ b/drivers/usb/gadget/function/uvc_queue.h
@@ -2,13 +2,15 @@
#ifndef _UVC_QUEUE_H_
#define _UVC_QUEUE_H_
-#ifdef __KERNEL__
-
-#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/poll.h>
-#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+
#include <media/videobuf2-v4l2.h>
+struct file;
+struct mutex;
+
/* Maximum frame size in bytes, for sanity checking. */
#define UVC_MAX_FRAME_SIZE (16*1024*1024)
/* Maximum number of video buffers. */
@@ -91,7 +93,5 @@ struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue);
-#endif /* __KERNEL__ */
-
#endif /* _UVC_QUEUE_H_ */
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 9a9019625496..7f1ca3b57823 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -6,10 +6,11 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
-#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/usb/g_uvc.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h
index 6c20aa75f966..7d77122b0ff9 100644
--- a/drivers/usb/gadget/function/uvc_video.h
+++ b/drivers/usb/gadget/function/uvc_video.h
@@ -12,6 +12,8 @@
#ifndef __UVC_VIDEO_H__
#define __UVC_VIDEO_H__
+struct uvc_video;
+
int uvcg_video_pump(struct uvc_video *video);
int uvcg_video_enable(struct uvc_video *video, int enable);
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 682bf99dcf76..40870227999a 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -41,7 +41,7 @@ static struct usb_device_descriptor usbg_device_desc = {
#define USB_G_STR_CONFIG USB_GADGET_FIRST_AVAIL_IDX
static struct usb_string usbg_us_strings[] = {
- [USB_GADGET_MANUFACTURER_IDX].s = "Target Manufactor",
+ [USB_GADGET_MANUFACTURER_IDX].s = "Target Manufacturer",
[USB_GADGET_PRODUCT_IDX].s = "Target Product",
[USB_GADGET_SERIAL_IDX].s = "000000000001",
[USB_G_STR_CONFIG].s = "default config",
diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
index 6b86568c9157..a9f8eb8e1c76 100644
--- a/drivers/usb/gadget/legacy/webcam.c
+++ b/drivers/usb/gadget/legacy/webcam.c
@@ -30,9 +30,6 @@ static unsigned int streaming_maxburst;
module_param(streaming_maxburst, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(streaming_maxburst, "0 - 15 (ss only)");
-static unsigned int trace;
-module_param(trace, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(trace, "Trace level bitmask");
/* --------------------------------------------------------------------------
* Device descriptor
*/
@@ -379,7 +376,6 @@ webcam_bind(struct usb_composite_dev *cdev)
uvc_opts->streaming_interval = streaming_interval;
uvc_opts->streaming_maxpacket = streaming_maxpacket;
uvc_opts->streaming_maxburst = streaming_maxburst;
- uvc_set_trace_param(trace);
uvc_opts->fs_control = uvc_fs_control_cls;
uvc_opts->ss_control = uvc_ss_control_cls;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 1df4dedffe86..0a16cbd4e528 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -193,6 +193,7 @@ config USB_RENESAS_USB3
tristate 'Renesas USB3.0 Peripheral controller'
depends on ARCH_RENESAS || COMPILE_TEST
depends on EXTCON
+ select USB_ROLE_SWITCH
help
Renesas USB3.0 Peripheral controller is a USB peripheral controller
that supports super, high, and full speed USB 3.0 data transfers.
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
index f0cdf89b8503..83ba8a2eb6af 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
+++ b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
@@ -2,6 +2,7 @@
config USB_ASPEED_VHUB
tristate "Aspeed vHub UDC driver"
depends on ARCH_ASPEED || COMPILE_TEST
+ depends on USB_LIBCOMPOSITE
help
USB peripheral controller for the Aspeed AST2500 family
SoCs supporting the "vHub" functionality and USB2.0
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
index 20ffb03ff6ac..e2927fb083cf 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
@@ -108,6 +108,13 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
/* Check our state, cancel pending requests if needed */
if (ep->ep0.state != ep0_state_token) {
EPDBG(ep, "wrong state\n");
+ ast_vhub_nuke(ep, -EIO);
+
+ /*
+ * Accept the packet regardless, this seems to happen
+ * when stalling a SETUP packet that has an OUT data
+ * phase.
+ */
ast_vhub_nuke(ep, 0);
goto stall;
}
@@ -212,6 +219,8 @@ static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
if (chunk && req->req.buf)
memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
+ vhub_dma_workaround(ep->buf);
+
/* Remember chunk size and trigger send */
reg = VHUB_EP0_SET_TX_LEN(chunk);
writel(reg, ep->ep0.ctlstat);
@@ -224,7 +233,7 @@ static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
EPVDBG(ep, "rx prime\n");
/* Prime endpoint for receiving data */
- writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL);
+ writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
}
static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 80c9feac5147..5939eb1e97f2 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -66,11 +66,16 @@ static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
if (!req->req.dma) {
/* For IN transfers, copy data over first */
- if (ep->epn.is_in)
+ if (ep->epn.is_in) {
memcpy(ep->buf, req->req.buf + act, chunk);
+ vhub_dma_workaround(ep->buf);
+ }
writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
- } else
+ } else {
+ if (ep->epn.is_in)
+ vhub_dma_workaround(req->req.buf);
writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
+ }
/* Start DMA */
req->active = true;
@@ -161,6 +166,7 @@ static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
struct ast_vhub_req *req)
{
+ struct ast_vhub_desc *desc = NULL;
unsigned int act = req->act_count;
unsigned int len = req->req.length;
unsigned int chunk;
@@ -177,7 +183,6 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
/* While we can create descriptors */
while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
- struct ast_vhub_desc *desc;
unsigned int d_num;
/* Grab next free descriptor */
@@ -227,6 +232,9 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
req->act_count = act = act + chunk;
}
+ if (likely(desc))
+ vhub_dma_workaround(desc);
+
/* Tell HW about new descriptors */
writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
index 2b040257bc1f..4ed03d33a5a9 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -462,6 +462,39 @@ enum std_req_rc {
#define DDBG(d, fmt, ...) do { } while(0)
#endif
+static inline void vhub_dma_workaround(void *addr)
+{
+ /*
+ * This works around a confirmed HW issue with the Aspeed chip.
+ *
+ * The core uses a different bus to memory than the AHB going to
+ * the USB device controller. Due to the latter having a higher
+ * priority than the core for arbitration on that bus, it's
+ * possible for an MMIO to the device, followed by a DMA by the
+ * device from memory to all be performed and services before
+ * a previous store to memory gets completed.
+ *
+ * This the following scenario can happen:
+ *
+ * - Driver writes to a DMA descriptor (Mbus)
+ * - Driver writes to the MMIO register to start the DMA (AHB)
+ * - The gadget sees the second write and sends a read of the
+ * descriptor to the memory controller (Mbus)
+ * - The gadget hits memory before the descriptor write
+ * causing it to read an obsolete value.
+ *
+ * Thankfully the problem is limited to the USB gadget device, other
+ * masters in the SoC all have a lower priority than the core, thus
+ * ensuring that the store by the core arrives first.
+ *
+ * The workaround consists of using a dummy read of the memory before
+ * doing the MMIO writes. This will ensure that the previous writes
+ * have been "pushed out".
+ */
+ mb();
+ (void)__raw_readl((void __iomem *)addr);
+}
+
/* core.c */
void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
int status);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index cab5e4f09924..af88b48c1cea 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -87,6 +87,8 @@ EXPORT_SYMBOL_GPL(usb_ep_set_maxpacket_limit);
* configurable, with more generic names like "ep-a". (remember that for
* USB, "in" means "towards the USB master".)
*
+ * This routine must be called in process context.
+ *
* returns zero, or a negative error code.
*/
int usb_ep_enable(struct usb_ep *ep)
@@ -119,6 +121,8 @@ EXPORT_SYMBOL_GPL(usb_ep_enable);
* gadget drivers must call usb_ep_enable() again before queueing
* requests to the endpoint.
*
+ * This routine must be called in process context.
+ *
* returns zero, or a negative error code.
*/
int usb_ep_disable(struct usb_ep *ep)
@@ -241,6 +245,8 @@ EXPORT_SYMBOL_GPL(usb_ep_free_request);
* Note that @req's ->complete() callback must never be called from
* within usb_ep_queue() as that can create deadlock situations.
*
+ * This routine may be called in interrupt context.
+ *
* Returns zero, or a negative error code. Endpoints that are not enabled
* report errors; errors will also be
* reported when the usb peripheral is disconnected.
@@ -284,6 +290,8 @@ EXPORT_SYMBOL_GPL(usb_ep_queue);
* at the head of the queue) except as part of disconnecting from usb. Such
* restrictions prevent drivers from supporting configuration changes,
* even to configuration zero (a "chapter 9" requirement).
+ *
+ * This routine may be called in interrupt context.
*/
int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
{
@@ -311,6 +319,8 @@ EXPORT_SYMBOL_GPL(usb_ep_dequeue);
* current altsetting, see usb_ep_clear_halt(). When switching altsettings,
* it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints.
*
+ * This routine may be called in interrupt context.
+ *
* Returns zero, or a negative error code. On success, this call sets
* underlying hardware state that blocks data transfers.
* Attempts to halt IN endpoints will fail (returning -EAGAIN) if any
@@ -336,6 +346,8 @@ EXPORT_SYMBOL_GPL(usb_ep_set_halt);
* for endpoints that aren't reconfigured, after clearing any other state
* in the endpoint's i/o queue.
*
+ * This routine may be called in interrupt context.
+ *
* Returns zero, or a negative error code. On success, this call clears
* the underlying hardware state reflecting endpoint halt and data toggle.
* Note that some hardware can't support this request (like pxa2xx_udc),
@@ -360,6 +372,8 @@ EXPORT_SYMBOL_GPL(usb_ep_clear_halt);
* requests. If the gadget driver clears the halt status, it will
* automatically unwedge the endpoint.
*
+ * This routine may be called in interrupt context.
+ *
* Returns zero on success, else negative errno.
*/
int usb_ep_set_wedge(struct usb_ep *ep)
@@ -388,6 +402,8 @@ EXPORT_SYMBOL_GPL(usb_ep_set_wedge);
* written OUT to it by the host. Drivers that need precise handling for
* fault reporting or recovery may need to use this call.
*
+ * This routine may be called in interrupt context.
+ *
* This returns the number of such bytes in the fifo, or a negative
* errno if the endpoint doesn't use a FIFO or doesn't support such
* precise handling.
@@ -415,6 +431,8 @@ EXPORT_SYMBOL_GPL(usb_ep_fifo_status);
* an endpoint fifo after abnormal transaction terminations. The call
* must never be used except when endpoint is not being used for any
* protocol translation.
+ *
+ * This routine may be called in interrupt context.
*/
void usb_ep_fifo_flush(struct usb_ep *ep)
{
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
index f29cf5c6160c..5a321992decc 100644
--- a/drivers/usb/gadget/udc/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/udc/fsl_mxc_udc.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fsl_devices.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/io.h>
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index a3ecce62662b..11e25a3f4f1f 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -832,11 +832,11 @@ static void init_controller(struct r8a66597 *r8a66597)
r8a66597_bset(r8a66597, XCKE, SYSCFG0);
- msleep(3);
+ mdelay(3);
r8a66597_bset(r8a66597, PLLC, SYSCFG0);
- msleep(1);
+ mdelay(1);
r8a66597_bset(r8a66597, SCKE, SYSCFG0);
@@ -1190,7 +1190,7 @@ __acquires(r8a66597->lock)
r8a66597->ep0_req->length = 2;
/* AV: what happens if we get called again before that gets through? */
spin_unlock(&r8a66597->lock);
- r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
+ r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
spin_lock(&r8a66597->lock);
}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 7cf98c793e04..1f879b3f2c96 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -23,6 +23,8 @@
#include <linux/uaccess.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/of.h>
+#include <linux/usb/role.h>
/* register definitions */
#define USB3_AXI_INT_STA 0x008
@@ -335,6 +337,11 @@ struct renesas_usb3 {
struct phy *phy;
struct dentry *dentry;
+ struct usb_role_switch *role_sw;
+ struct device *host_dev;
+ struct work_struct role_work;
+ enum usb_role role;
+
struct renesas_usb3_ep *usb3_ep;
int num_usb3_eps;
@@ -651,6 +658,14 @@ static void usb3_check_vbus(struct renesas_usb3 *usb3)
}
}
+static void renesas_usb3_role_work(struct work_struct *work)
+{
+ struct renesas_usb3 *usb3 =
+ container_of(work, struct renesas_usb3, role_work);
+
+ usb_role_switch_set_role(usb3->role_sw, usb3->role);
+}
+
static void usb3_set_mode(struct renesas_usb3 *usb3, bool host)
{
if (host)
@@ -659,6 +674,16 @@ static void usb3_set_mode(struct renesas_usb3 *usb3, bool host)
usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
}
+static void usb3_set_mode_by_role_sw(struct renesas_usb3 *usb3, bool host)
+{
+ if (usb3->role_sw) {
+ usb3->role = host ? USB_ROLE_HOST : USB_ROLE_DEVICE;
+ schedule_work(&usb3->role_work);
+ } else {
+ usb3_set_mode(usb3, host);
+ }
+}
+
static void usb3_vbus_out(struct renesas_usb3 *usb3, bool enable)
{
if (enable)
@@ -672,7 +697,7 @@ static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
unsigned long flags;
spin_lock_irqsave(&usb3->lock, flags);
- usb3_set_mode(usb3, host);
+ usb3_set_mode_by_role_sw(usb3, host);
usb3_vbus_out(usb3, a_dev);
/* for A-Peripheral or forced B-device mode */
if ((!host && a_dev) ||
@@ -2302,6 +2327,41 @@ static const struct usb_gadget_ops renesas_usb3_gadget_ops = {
.set_selfpowered = renesas_usb3_set_selfpowered,
};
+static enum usb_role renesas_usb3_role_switch_get(struct device *dev)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+ enum usb_role cur_role;
+
+ pm_runtime_get_sync(dev);
+ cur_role = usb3_is_host(usb3) ? USB_ROLE_HOST : USB_ROLE_DEVICE;
+ pm_runtime_put(dev);
+
+ return cur_role;
+}
+
+static int renesas_usb3_role_switch_set(struct device *dev,
+ enum usb_role role)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+ struct device *host = usb3->host_dev;
+ enum usb_role cur_role = renesas_usb3_role_switch_get(dev);
+
+ pm_runtime_get_sync(dev);
+ if (cur_role == USB_ROLE_HOST && role == USB_ROLE_DEVICE) {
+ device_release_driver(host);
+ usb3_set_mode(usb3, false);
+ } else if (cur_role == USB_ROLE_DEVICE && role == USB_ROLE_HOST) {
+ /* Must set the mode before device_attach of the host */
+ usb3_set_mode(usb3, true);
+ /* This device_attach() might sleep */
+ if (device_attach(host) < 0)
+ dev_err(dev, "device_attach(host) failed\n");
+ }
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
static ssize_t role_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -2405,6 +2465,8 @@ static int renesas_usb3_remove(struct platform_device *pdev)
debugfs_remove_recursive(usb3->dentry);
device_remove_file(&pdev->dev, &dev_attr_role);
+ usb_role_switch_unregister(usb3->role_sw);
+
usb_del_gadget_udc(&usb3->gadget);
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
@@ -2562,6 +2624,12 @@ static const unsigned int renesas_usb3_cable[] = {
EXTCON_NONE,
};
+static const struct usb_role_switch_desc renesas_usb3_role_switch_desc = {
+ .set = renesas_usb3_role_switch_set,
+ .get = renesas_usb3_role_switch_get,
+ .allow_userspace_control = true,
+};
+
static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
@@ -2647,6 +2715,20 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (ret < 0)
goto err_dev_create;
+ INIT_WORK(&usb3->role_work, renesas_usb3_role_work);
+ usb3->role_sw = usb_role_switch_register(&pdev->dev,
+ &renesas_usb3_role_switch_desc);
+ if (!IS_ERR(usb3->role_sw)) {
+ usb3->host_dev = usb_of_get_companion_dev(&pdev->dev);
+ if (!usb3->host_dev) {
+ /* If not found, this driver will not use a role sw */
+ usb_role_switch_unregister(usb3->role_sw);
+ usb3->role_sw = NULL;
+ }
+ } else {
+ usb3->role_sw = NULL;
+ }
+
usb3->workaround_for_vbus = priv->workaround_for_vbus;
renesas_usb3_debugfs_init(usb3, &pdev->dev);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 6e64d3a64dbb..1a4ea98cac2a 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -192,6 +192,14 @@ config USB_EHCI_MXC
---help---
Variation of ARC USB block used in some Freescale chips.
+config USB_EHCI_HCD_NPCM7XX
+ tristate "Support for Nuvoton NPCM7XX on-chip EHCI USB controller"
+ depends on (USB_EHCI_HCD && ARCH_NPCM7XX) || COMPILE_TEST
+ default y if (USB_EHCI_HCD && ARCH_NPCM7XX)
+ help
+ Enables support for the on-chip EHCI controller on
+ Nuvoton NPCM7XX chips.
+
config USB_EHCI_HCD_OMAP
tristate "EHCI support for OMAP3 and later chips"
depends on ARCH_OMAP
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 9b669c9f9a48..e6235269c151 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o
obj-$(CONFIG_USB_EHCI_HCD_PLATFORM) += ehci-platform.o
obj-$(CONFIG_USB_EHCI_MXC) += ehci-mxc.o
+obj-$(CONFIG_USB_EHCI_HCD_NPCM7XX) += ehci-npcm7xx.o
obj-$(CONFIG_USB_EHCI_HCD_OMAP) += ehci-omap.o
obj-$(CONFIG_USB_EHCI_HCD_ORION) += ehci-orion.o
obj-$(CONFIG_USB_EHCI_HCD_SPEAR) += ehci-spear.o
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index d9145a8f35d2..8e3bab1e0c1f 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -161,16 +161,10 @@ static int exynos_ehci_probe(struct platform_device *pdev)
}
exynos_ehci = to_exynos_ehci(hcd);
- if (of_device_is_compatible(pdev->dev.of_node,
- "samsung,exynos5440-ehci"))
- goto skip_phy;
-
err = exynos_ehci_get_phy(&pdev->dev, exynos_ehci);
if (err)
goto fail_clk;
-skip_phy:
-
exynos_ehci->clk = devm_clk_get(&pdev->dev, "usbhost");
if (IS_ERR(exynos_ehci->clk)) {
@@ -304,7 +298,6 @@ static const struct dev_pm_ops exynos_ehci_pm_ops = {
#ifdef CONFIG_OF
static const struct of_device_id exynos_ehci_match[] = {
{ .compatible = "samsung,exynos4210-ehci" },
- { .compatible = "samsung,exynos5440-ehci" },
{},
};
MODULE_DEVICE_TABLE(of, exynos_ehci_match);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 89c47ae5c7d3..8608ac513fb7 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1226,6 +1226,7 @@ static const struct hc_driver ehci_hc_driver = {
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
+ .get_resuming_ports = ehci_get_resuming_ports,
/*
* device support
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index d7641cbdee43..ce0eaf7d7c12 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -512,10 +512,18 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
return -ESHUTDOWN;
}
+static unsigned long ehci_get_resuming_ports(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ return ehci->resuming_ports;
+}
+
#else
#define ehci_bus_suspend NULL
#define ehci_bus_resume NULL
+#define ehci_get_resuming_ports NULL
#endif /* CONFIG_PM */
diff --git a/drivers/usb/host/ehci-npcm7xx.c b/drivers/usb/host/ehci-npcm7xx.c
new file mode 100644
index 000000000000..adaf8fb4b459
--- /dev/null
+++ b/drivers/usb/host/ehci-npcm7xx.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NPCM7xx driver for EHCI HCD
+ *
+ * Copyright (C) 2018 Nuvoton Technologies,
+ * Avi Fishman <avi.fishman@nuvoton.com> <avifishman70@gmail.com>
+ * Tomer Maimon <tomer.maimon@nuvoton.com> <tmaimon77@gmail.com>
+ *
+ * Based on various ehci-spear.c driver
+ */
+
+
+#include <linux/dma-mapping.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define DRIVER_DESC "EHCI npcm7xx driver"
+
+static const char hcd_name[] = "npcm7xx-ehci";
+
+#define USB2PHYCTL_OFFSET 0x144
+
+#define IPSRST2_OFFSET 0x24
+#define IPSRST3_OFFSET 0x34
+
+
+static struct hc_driver __read_mostly ehci_npcm7xx_hc_driver;
+
+#ifdef CONFIG_PM_SLEEP
+static int ehci_npcm7xx_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ bool do_wakeup = device_may_wakeup(dev);
+
+ return ehci_suspend(hcd, do_wakeup);
+}
+
+static int ehci_npcm7xx_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ ehci_resume(hcd, false);
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(ehci_npcm7xx_pm_ops, ehci_npcm7xx_drv_suspend,
+ ehci_npcm7xx_drv_resume);
+
+static int npcm7xx_ehci_hcd_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct resource *res;
+ struct regmap *gcr_regmap;
+ struct regmap *rst_regmap;
+ const struct hc_driver *driver = &ehci_npcm7xx_hc_driver;
+ int irq;
+ int retval;
+
+ dev_dbg(&pdev->dev, "initializing npcm7xx ehci USB Controller\n");
+
+ gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
+ if (IS_ERR(gcr_regmap)) {
+ dev_err(&pdev->dev, "%s: failed to find nuvoton,npcm750-gcr\n",
+ __func__);
+ return PTR_ERR(gcr_regmap);
+ }
+
+ rst_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
+ if (IS_ERR(rst_regmap)) {
+ dev_err(&pdev->dev, "%s: failed to find nuvoton,npcm750-rst\n",
+ __func__);
+ return PTR_ERR(rst_regmap);
+ }
+
+ /********* phy init ******/
+ // reset usb host
+ regmap_update_bits(rst_regmap, IPSRST2_OFFSET,
+ (0x1 << 26), (0x1 << 26));
+ regmap_update_bits(rst_regmap, IPSRST3_OFFSET,
+ (0x1 << 25), (0x1 << 25));
+ regmap_update_bits(gcr_regmap, USB2PHYCTL_OFFSET,
+ (0x1 << 28), 0);
+
+ udelay(1);
+
+ // enable phy
+ regmap_update_bits(rst_regmap, IPSRST3_OFFSET,
+ (0x1 << 25), 0);
+
+ udelay(50); // enable phy
+
+ regmap_update_bits(gcr_regmap, USB2PHYCTL_OFFSET,
+ (0x1 << 28), (0x1 << 28));
+
+ // enable host
+ regmap_update_bits(rst_regmap, IPSRST2_OFFSET,
+ (0x1 << 26), 0);
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ retval = irq;
+ goto fail;
+ }
+
+ /*
+ * Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail;
+
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
+ goto err_put_hcd;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ /* registers start at offset 0x0 */
+ hcd_to_ehci(hcd)->caps = hcd->regs;
+
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval)
+ goto err_put_hcd;
+
+ device_wakeup_enable(hcd->self.controller);
+ return retval;
+
+err_put_hcd:
+ usb_put_hcd(hcd);
+fail:
+ dev_err(&pdev->dev, "init fail, %d\n", retval);
+
+ return retval;
+}
+
+static int npcm7xx_ehci_hcd_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_remove_hcd(hcd);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static const struct of_device_id npcm7xx_ehci_id_table[] = {
+ { .compatible = "nuvoton,npcm750-ehci" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, npcm7xx_ehci_id_table);
+
+static struct platform_driver npcm7xx_ehci_hcd_driver = {
+ .probe = npcm7xx_ehci_hcd_drv_probe,
+ .remove = npcm7xx_ehci_hcd_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "npcm7xx-ehci",
+ .bus = &platform_bus_type,
+ .pm = &ehci_npcm7xx_pm_ops,
+ .of_match_table = npcm7xx_ehci_id_table,
+ }
+};
+
+static int __init ehci_npcm7xx_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_npcm7xx_hc_driver, NULL);
+ return platform_driver_register(&npcm7xx_ehci_hcd_driver);
+}
+module_init(ehci_npcm7xx_init);
+
+static void __exit ehci_npcm7xx_cleanup(void)
+{
+ platform_driver_unregister(&npcm7xx_ehci_hcd_driver);
+}
+module_exit(ehci_npcm7xx_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:npcm7xx-ehci");
+MODULE_AUTHOR("Avi Fishman");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 8c733492d8fe..454d8c624a3f 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -86,7 +86,7 @@ static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
int result;
struct usb_hcd *hcd;
unsigned int virq;
- static u64 dummy_mask = DMA_BIT_MASK(32);
+ static u64 dummy_mask;
if (usb_disabled()) {
result = -ENODEV;
@@ -131,7 +131,9 @@ static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
goto fail_irq;
}
- dev->core.dma_mask = &dummy_mask; /* FIXME: for improper usb code */
+ dummy_mask = DMA_BIT_MASK(32);
+ dev->core.dma_mask = &dummy_mask;
+ dma_set_coherent_mask(&dev->core, dummy_mask);
hcd = usb_create_hcd(&ps3_ehci_hc_driver, &dev->core, dev_name(&dev->core));
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 1d87295682b8..da7b00a6110b 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1835,7 +1835,6 @@ static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
unsigned uframe;
int urb_index = -1;
struct ehci_iso_stream *stream = itd->stream;
- struct usb_device *dev;
bool retval = false;
/* for each uframe with a packet */
@@ -1886,7 +1885,6 @@ static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
*/
/* give urb back to the driver; completion often (re)submits */
- dev = urb->dev;
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
@@ -2230,7 +2228,6 @@ static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
u32 t;
int urb_index;
struct ehci_iso_stream *stream = sitd->stream;
- struct usb_device *dev;
bool retval = false;
urb_index = sitd->index;
@@ -2268,7 +2265,6 @@ static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
*/
/* give urb back to the driver; completion often (re)submits */
- dev = urb->dev;
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index a39fae41bc70..c0c4dcca6f3c 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -130,15 +130,10 @@ static int exynos_ohci_probe(struct platform_device *pdev)
exynos_ohci = to_exynos_ohci(hcd);
- if (of_device_is_compatible(pdev->dev.of_node,
- "samsung,exynos5440-ohci"))
- goto skip_phy;
-
err = exynos_ohci_get_phy(&pdev->dev, exynos_ohci);
if (err)
goto fail_clk;
-skip_phy:
exynos_ohci->clk = devm_clk_get(&pdev->dev, "usbhost");
if (IS_ERR(exynos_ohci->clk)) {
@@ -270,7 +265,6 @@ static const struct dev_pm_ops exynos_ohci_pm_ops = {
#ifdef CONFIG_OF
static const struct of_device_id exynos_ohci_match[] = {
{ .compatible = "samsung,exynos4210-ohci" },
- { .compatible = "samsung,exynos5440-ohci" },
{},
};
MODULE_DEVICE_TABLE(of, exynos_ohci_match);
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index 20a23d795adf..395f9d3bc849 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -69,7 +69,7 @@ static int ps3_ohci_probe(struct ps3_system_bus_device *dev)
int result;
struct usb_hcd *hcd;
unsigned int virq;
- static u64 dummy_mask = DMA_BIT_MASK(32);
+ static u64 dummy_mask;
if (usb_disabled()) {
result = -ENODEV;
@@ -115,7 +115,9 @@ static int ps3_ohci_probe(struct ps3_system_bus_device *dev)
goto fail_irq;
}
- dev->core.dma_mask = &dummy_mask; /* FIXME: for improper usb code */
+ dummy_mask = DMA_BIT_MASK(32);
+ dev->core.dma_mask = &dummy_mask;
+ dma_set_coherent_mask(&dev->core, dummy_mask);
hcd = usb_create_hcd(&ps3_ohci_hc_driver, &dev->core, dev_name(&dev->core));
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 032b8652910a..072bd5d5738e 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3062,7 +3062,6 @@ static int u132_probe(struct platform_device *pdev)
int retval;
u32 control;
u32 rh_a = -1;
- u32 num_ports;
msleep(100);
if (u132_exiting > 0)
@@ -3077,7 +3076,6 @@ static int u132_probe(struct platform_device *pdev)
retval = ftdi_read_pcimem(pdev, roothub.a, &rh_a);
if (retval)
return retval;
- num_ports = rh_a & RH_A_NDP; /* refuse to confuse usbcore */
if (pdev->dev.dma_mask)
return -EINVAL;
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c
index bb84366f7bd3..ef52aeb02fde 100644
--- a/drivers/usb/host/whci/pzl.c
+++ b/drivers/usb/host/whci/pzl.c
@@ -96,9 +96,7 @@ static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
while (qset->ntds) {
struct whc_qtd *td;
- int t;
- t = qset->td_start;
td = &qset->qtd[qset->td_start];
status = le32_to_cpu(td->status);
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 1fbfd89d0a0f..86cff5c28eff 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -508,16 +508,18 @@ static int xhci_do_dbc_start(struct xhci_hcd *xhci)
return 0;
}
-static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
+static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
{
struct xhci_dbc *dbc = xhci->dbc;
if (dbc->state == DS_DISABLED)
- return;
+ return -1;
writel(0, &dbc->regs->control);
xhci_dbc_mem_cleanup(xhci);
dbc->state = DS_DISABLED;
+
+ return 0;
}
static int xhci_dbc_start(struct xhci_hcd *xhci)
@@ -544,6 +546,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci)
static void xhci_dbc_stop(struct xhci_hcd *xhci)
{
+ int ret;
unsigned long flags;
struct xhci_dbc *dbc = xhci->dbc;
struct dbc_port *port = &dbc->port;
@@ -556,10 +559,11 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
xhci_dbc_tty_unregister_device(xhci);
spin_lock_irqsave(&dbc->lock, flags);
- xhci_do_dbc_stop(xhci);
+ ret = xhci_do_dbc_stop(xhci);
spin_unlock_irqrestore(&dbc->lock, flags);
- pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+ if (!ret)
+ pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
}
static void
@@ -909,11 +913,9 @@ static ssize_t dbc_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct xhci_dbc *dbc;
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
- dbc = xhci->dbc;
if (!strncmp(buf, "enable", 6))
xhci_dbc_start(xhci);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a4b95d019f84..7e2a531ba321 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1684,4 +1684,15 @@ int xhci_bus_resume(struct usb_hcd *hcd)
return 0;
}
+unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_bus_state *bus_state;
+
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
+
+ /* USB3 port wakeups are reported via usb_wakeup_notification() */
+ return bus_state->resuming_ports; /* USB2 ports only */
+}
+
#endif /* CONFIG_PM */
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index acbd3d7b8828..ef350c33dc4a 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -595,7 +595,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
if (!ep->stream_info)
return NULL;
- if (stream_id > ep->stream_info->num_streams)
+ if (stream_id >= ep->stream_info->num_streams)
return NULL;
return ep->stream_info->stream_rings[stream_id];
}
@@ -886,12 +886,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
dev = xhci->devs[slot_id];
- trace_xhci_free_virt_device(dev);
-
xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
if (!dev)
return;
+ trace_xhci_free_virt_device(dev);
+
if (dev->tt_info)
old_active_eps = dev->tt_info->active_eps;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index c1b22fc64e38..8dc77e34a859 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -105,6 +105,7 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = {
};
static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
+ .firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3,
.init_quirk = xhci_rcar_init_quirk,
.plat_start = xhci_rcar_start,
.resume_quirk = xhci_rcar_resume_quirk,
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index f33ffc2bc4ed..a6e463715779 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -17,9 +17,8 @@
#include "xhci-rcar.h"
/*
-* - The V3 firmware is for r8a7796 (with good performance) and r8a7795 es2.0
-* or later.
-* - The V2 firmware can be used on both r8a7795 (es1.x) and r8a7796.
+* - The V3 firmware is for almost all R-Car Gen3 (except r8a7795 ES1.x)
+* - The V2 firmware is for r8a7795 ES1.x.
* - The V2 firmware is possible to use on R-Car Gen2. However, the V2 causes
* performance degradation. So, this driver continues to use the V1 if R-Car
* Gen2.
@@ -30,6 +29,7 @@ MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V2);
MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);
/*** Register Offset ***/
+#define RCAR_USB3_AXH_STA 0x104 /* AXI Host Control Status */
#define RCAR_USB3_INT_ENA 0x224 /* Interrupt Enable */
#define RCAR_USB3_DL_CTRL 0x250 /* FW Download Control & Status */
#define RCAR_USB3_FW_DATA0 0x258 /* FW Data0 */
@@ -42,6 +42,12 @@ MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);
#define RCAR_USB3_TX_POL 0xab8 /* USB3.0 TX Polarity */
/*** Register Settings ***/
+/* AXI Host Control Status */
+#define RCAR_USB3_AXH_STA_B3_PLL_ACTIVE 0x00010000
+#define RCAR_USB3_AXH_STA_B2_PLL_ACTIVE 0x00000001
+#define RCAR_USB3_AXH_STA_PLL_ACTIVE_MASK (RCAR_USB3_AXH_STA_B3_PLL_ACTIVE | \
+ RCAR_USB3_AXH_STA_B2_PLL_ACTIVE)
+
/* Interrupt Enable */
#define RCAR_USB3_INT_XHC_ENA 0x00000001
#define RCAR_USB3_INT_PME_ENA 0x00000002
@@ -75,18 +81,6 @@ static const struct soc_device_attribute rcar_quirks_match[] = {
.soc_id = "r8a7795", .revision = "ES1.*",
.data = (void *)RCAR_XHCI_FIRMWARE_V2,
},
- {
- .soc_id = "r8a7795",
- .data = (void *)RCAR_XHCI_FIRMWARE_V3,
- },
- {
- .soc_id = "r8a7796",
- .data = (void *)RCAR_XHCI_FIRMWARE_V3,
- },
- {
- .soc_id = "r8a77965",
- .data = (void *)RCAR_XHCI_FIRMWARE_V3,
- },
{ /* sentinel */ },
};
@@ -213,6 +207,22 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
return retval;
}
+static bool xhci_rcar_wait_for_pll_active(struct usb_hcd *hcd)
+{
+ int timeout = 1000;
+ u32 val, mask = RCAR_USB3_AXH_STA_PLL_ACTIVE_MASK;
+
+ while (timeout > 0) {
+ val = readl(hcd->regs + RCAR_USB3_AXH_STA);
+ if ((val & mask) == mask)
+ return true;
+ udelay(1);
+ timeout--;
+ }
+
+ return false;
+}
+
/* This function needs to initialize a "phy" of usb before */
int xhci_rcar_init_quirk(struct usb_hcd *hcd)
{
@@ -233,6 +243,9 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
xhci_rcar_is_gen3(hcd->self.controller))
xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+ if (!xhci_rcar_wait_for_pll_active(hcd))
+ return -ETIMEDOUT;
+
return xhci_rcar_download_firmware(hcd);
}
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index a8c1d073cba0..4b463e5202a4 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -481,7 +481,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
unsigned long mask;
unsigned int port;
bool idle, enable;
- int err;
+ int err = 0;
memset(&rsp, 0, sizeof(rsp));
@@ -1223,10 +1223,10 @@ disable_rpm:
pm_runtime_disable(&pdev->dev);
usb_put_hcd(tegra->hcd);
disable_xusbc:
- if (!&pdev->dev.pm_domain)
+ if (!pdev->dev.pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
disable_xusba:
- if (!&pdev->dev.pm_domain)
+ if (!pdev->dev.pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
put_padctl:
tegra_xusb_padctl_put(tegra->padctl);
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 410544ffe78f..88b427434bd8 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -171,6 +171,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
TP_ARGS(ring, trb)
);
+DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
+ TP_PROTO(struct xhci_virt_device *vdev),
+ TP_ARGS(vdev),
+ TP_STRUCT__entry(
+ __field(void *, vdev)
+ __field(unsigned long long, out_ctx)
+ __field(unsigned long long, in_ctx)
+ __field(u8, fake_port)
+ __field(u8, real_port)
+ __field(u16, current_mel)
+
+ ),
+ TP_fast_assign(
+ __entry->vdev = vdev;
+ __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
+ __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
+ __entry->fake_port = (u8) vdev->fake_port;
+ __entry->real_port = (u8) vdev->real_port;
+ __entry->current_mel = (u16) vdev->current_mel;
+ ),
+ TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
+ __entry->vdev, __entry->in_ctx, __entry->out_ctx,
+ __entry->fake_port, __entry->real_port, __entry->current_mel
+ )
+);
+
+DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
+ TP_PROTO(struct xhci_virt_device *vdev),
+ TP_ARGS(vdev)
+);
+
DECLARE_EVENT_CLASS(xhci_log_virt_dev,
TP_PROTO(struct xhci_virt_device *vdev),
TP_ARGS(vdev),
@@ -208,11 +239,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
TP_ARGS(vdev)
);
-DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
- TP_PROTO(struct xhci_virt_device *vdev),
- TP_ARGS(vdev)
-);
-
DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
TP_PROTO(struct xhci_virt_device *vdev),
TP_ARGS(vdev)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8c8da2d657fa..61f48b17e57b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -908,6 +908,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
spin_unlock_irqrestore(&xhci->lock, flags);
}
+static bool xhci_pending_portevent(struct xhci_hcd *xhci)
+{
+ struct xhci_port **ports;
+ int port_index;
+ u32 status;
+ u32 portsc;
+
+ status = readl(&xhci->op_regs->status);
+ if (status & STS_EINT)
+ return true;
+ /*
+ * Checking STS_EINT is not enough as there is a lag between a change
+ * bit being set and the Port Status Change Event that it generated
+ * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
+ */
+
+ port_index = xhci->usb2_rhub.num_ports;
+ ports = xhci->usb2_rhub.ports;
+ while (port_index--) {
+ portsc = readl(ports[port_index]->addr);
+ if (portsc & PORT_CHANGE_MASK ||
+ (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+ return true;
+ }
+ port_index = xhci->usb3_rhub.num_ports;
+ ports = xhci->usb3_rhub.ports;
+ while (port_index--) {
+ portsc = readl(ports[port_index]->addr);
+ if (portsc & PORT_CHANGE_MASK ||
+ (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+ return true;
+ }
+ return false;
+}
+
/*
* Stop HC (not bus-specific)
*
@@ -1009,7 +1044,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
*/
int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
{
- u32 command, temp = 0, status;
+ u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct usb_hcd *secondary_hcd;
int retval = 0;
@@ -1043,8 +1078,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
command = readl(&xhci->op_regs->command);
command |= CMD_CRS;
writel(command, &xhci->op_regs->command);
+ /*
+ * Some controllers take up to 55+ ms to complete the controller
+ * restore so setting the timeout to 100ms. Xhci specification
+ * doesn't mention any timeout value.
+ */
if (xhci_handshake(&xhci->op_regs->status,
- STS_RESTORE, 0, 10 * 1000)) {
+ STS_RESTORE, 0, 100 * 1000)) {
xhci_warn(xhci, "WARN: xHC restore state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
@@ -1134,8 +1174,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
done:
if (retval == 0) {
/* Resume root hubs only when have pending events. */
- status = readl(&xhci->op_regs->status);
- if (status & STS_EINT) {
+ if (xhci_pending_portevent(xhci)) {
usb_hcd_resume_root_hub(xhci->shared_hcd);
usb_hcd_resume_root_hub(hcd);
}
@@ -3012,6 +3051,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
if (!list_empty(&ep->ring->td_list)) {
dev_err(&udev->dev, "EP not empty, refuse reset\n");
spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, cfg_cmd);
goto cleanup;
}
xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
@@ -5081,6 +5121,7 @@ static const struct hc_driver xhci_hc_driver = {
.hub_status_data = xhci_hub_status_data,
.bus_suspend = xhci_bus_suspend,
.bus_resume = xhci_bus_resume,
+ .get_resuming_ports = xhci_get_resuming_ports,
/*
* call back when device connected and addressed
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 939e2f86b595..6230a578324c 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -382,6 +382,10 @@ struct xhci_op_regs {
#define PORT_PLC (1 << 22)
/* port configure error change - port failed to configure its link partner */
#define PORT_CEC (1 << 23)
+#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+ PORT_RC | PORT_PLC | PORT_CEC)
+
+
/* Cold Attach Status - xHC can set this bit to report device attached during
* Sx state. Warm port reset should be perfomed to clear this bit and move port
* to connected state.
@@ -2110,9 +2114,11 @@ void xhci_hc_died(struct xhci_hcd *xhci);
#ifdef CONFIG_PM
int xhci_bus_suspend(struct usb_hcd *hcd);
int xhci_bus_resume(struct usb_hcd *hcd);
+unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd);
#else
#define xhci_bus_suspend NULL
#define xhci_bus_resume NULL
+#define xhci_get_resuming_ports NULL
#endif /* CONFIG_PM */
u32 xhci_port_state_to_neutral(u32 state);
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 1045521be293..8142c6b4c4cf 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -1817,7 +1817,6 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
u32 temp, status;
unsigned long flags;
int retval = 0;
- unsigned selector;
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
@@ -2010,7 +2009,6 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
}
break;
case SetPortFeature:
- selector = wIndex >> 8;
wIndex &= 0xff;
if (!wIndex || wIndex > ports)
goto error;
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index b3160afe0458..9465fb95d70a 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -155,11 +155,12 @@ static void adu_interrupt_in_callback(struct urb *urb)
{
struct adu_device *dev = urb->context;
int status = urb->status;
+ unsigned long flags;
adu_debug_data(&dev->udev->dev, __func__,
urb->actual_length, urb->transfer_buffer);
- spin_lock(&dev->buflock);
+ spin_lock_irqsave(&dev->buflock, flags);
if (status != 0) {
if ((status != -ENOENT) && (status != -ECONNRESET) &&
@@ -190,7 +191,7 @@ static void adu_interrupt_in_callback(struct urb *urb)
exit:
dev->read_urb_finished = 1;
- spin_unlock(&dev->buflock);
+ spin_unlock_irqrestore(&dev->buflock, flags);
/* always wake up so we recover from errors */
wake_up_interruptible(&dev->read_wait);
}
@@ -199,6 +200,7 @@ static void adu_interrupt_out_callback(struct urb *urb)
{
struct adu_device *dev = urb->context;
int status = urb->status;
+ unsigned long flags;
adu_debug_data(&dev->udev->dev, __func__,
urb->actual_length, urb->transfer_buffer);
@@ -213,10 +215,10 @@ static void adu_interrupt_out_callback(struct urb *urb)
return;
}
- spin_lock(&dev->buflock);
+ spin_lock_irqsave(&dev->buflock, flags);
dev->out_urb_finished = 1;
wake_up(&dev->write_wait);
- spin_unlock(&dev->buflock);
+ spin_unlock_irqrestore(&dev->buflock, flags);
}
static int adu_open(struct inode *inode, struct file *file)
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index b3eb8b989bd4..d746c26a8055 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -89,6 +89,7 @@ static void appledisplay_complete(struct urb *urb)
dev_err(dev,
"OVERFLOW with data length %d, actual length is %d\n",
ACD_URB_BUFFER_LEN, pdata->urb->actual_length);
+ /* fall through */
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 8d33187ce2af..c2991b8a65ce 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -81,7 +81,6 @@ struct iowarrior {
atomic_t write_busy; /* number of write-urbs submitted */
atomic_t read_idx;
atomic_t intr_idx;
- spinlock_t intr_idx_lock; /* protects intr_idx */
atomic_t overflow_flag; /* signals an index 'rollover' */
int present; /* this is 1 as long as the device is connected */
int opened; /* this is 1 if the device is currently open */
@@ -166,7 +165,6 @@ static void iowarrior_callback(struct urb *urb)
goto exit;
}
- spin_lock(&dev->intr_idx_lock);
intr_idx = atomic_read(&dev->intr_idx);
/* aux_idx become previous intr_idx */
aux_idx = (intr_idx == 0) ? (MAX_INTERRUPT_BUFFER - 1) : (intr_idx - 1);
@@ -181,7 +179,6 @@ static void iowarrior_callback(struct urb *urb)
(dev->read_queue + offset, urb->transfer_buffer,
dev->report_size)) {
/* equal values on interface 0 will be ignored */
- spin_unlock(&dev->intr_idx_lock);
goto exit;
}
}
@@ -202,7 +199,6 @@ static void iowarrior_callback(struct urb *urb)
*(dev->read_queue + offset + (dev->report_size)) = dev->serial_number++;
atomic_set(&dev->intr_idx, aux_idx);
- spin_unlock(&dev->intr_idx_lock);
/* tell the blocking read about the new data */
wake_up_interruptible(&dev->read_wait);
@@ -762,7 +758,6 @@ static int iowarrior_probe(struct usb_interface *interface,
atomic_set(&dev->intr_idx, 0);
atomic_set(&dev->read_idx, 0);
- spin_lock_init(&dev->intr_idx_lock);
atomic_set(&dev->overflow_flag, 0);
init_waitqueue_head(&dev->read_wait);
atomic_set(&dev->write_busy, 0);
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index c2e255f02a72..006762b72ff5 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -225,6 +225,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
size_t *actual_buffer;
unsigned int next_ring_head;
int status = urb->status;
+ unsigned long flags;
int retval;
if (status) {
@@ -236,12 +237,12 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
dev_dbg(&dev->intf->dev,
"%s: nonzero status received: %d\n", __func__,
status);
- spin_lock(&dev->rbsl);
+ spin_lock_irqsave(&dev->rbsl, flags);
goto resubmit; /* maybe we can recover */
}
}
- spin_lock(&dev->rbsl);
+ spin_lock_irqsave(&dev->rbsl, flags);
if (urb->actual_length > 0) {
next_ring_head = (dev->ring_head+1) % ring_buffer_size;
if (next_ring_head != dev->ring_tail) {
@@ -270,7 +271,7 @@ resubmit:
dev->buffer_overflow = 1;
}
}
- spin_unlock(&dev->rbsl);
+ spin_unlock_irqrestore(&dev->rbsl, flags);
exit:
dev->interrupt_in_done = 1;
wake_up_interruptible(&dev->read_wait);
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index bf47bd8bc76f..006cf13b2199 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -722,6 +722,7 @@ static void tower_interrupt_in_callback (struct urb *urb)
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
int retval;
+ unsigned long flags;
lego_usb_tower_debug_data(&dev->udev->dev, __func__,
urb->actual_length, urb->transfer_buffer);
@@ -740,7 +741,7 @@ static void tower_interrupt_in_callback (struct urb *urb)
}
if (urb->actual_length > 0) {
- spin_lock (&dev->read_buffer_lock);
+ spin_lock_irqsave(&dev->read_buffer_lock, flags);
if (dev->read_buffer_length + urb->actual_length < read_buffer_size) {
memcpy (dev->read_buffer + dev->read_buffer_length,
dev->interrupt_in_buffer,
@@ -753,7 +754,7 @@ static void tower_interrupt_in_callback (struct urb *urb)
pr_warn("read_buffer overflow, %d bytes dropped\n",
urb->actual_length);
}
- spin_unlock (&dev->read_buffer_lock);
+ spin_unlock_irqrestore(&dev->read_buffer_lock, flags);
}
resubmit:
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index f92c5df26320..3198d0477cf8 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -1750,7 +1750,7 @@ static int sisusb_setup_screen(struct sisusb_usb_data *sisusb,
static int sisusb_set_default_mode(struct sisusb_usb_data *sisusb,
int touchengines)
{
- int ret = 0, i, j, modex, modey, bpp, du;
+ int ret = 0, i, j, modex, bpp, du;
u8 sr31, cr63, tmp8;
static const char attrdata[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
@@ -1773,7 +1773,7 @@ static int sisusb_set_default_mode(struct sisusb_usb_data *sisusb,
0x00
};
- modex = 640; modey = 480; bpp = 2;
+ modex = 640; bpp = 2;
GETIREG(SISSR, 0x31, &sr31);
GETIREG(SISCR, 0x63, &cr63);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 9e1142b8b91b..c7f82310e73e 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1082,11 +1082,12 @@ static void ctrl_complete(struct urb *urb)
struct usb_ctrlrequest *reqp;
struct subcase *subcase;
int status = urb->status;
+ unsigned long flags;
reqp = (struct usb_ctrlrequest *)urb->setup_packet;
subcase = container_of(reqp, struct subcase, setup);
- spin_lock(&ctx->lock);
+ spin_lock_irqsave(&ctx->lock, flags);
ctx->count--;
ctx->pending--;
@@ -1185,7 +1186,7 @@ error:
/* signal completion when nothing's queued */
if (ctx->pending == 0)
complete(&ctx->complete);
- spin_unlock(&ctx->lock);
+ spin_unlock_irqrestore(&ctx->lock, flags);
}
static int
@@ -1917,8 +1918,9 @@ struct transfer_context {
static void complicated_callback(struct urb *urb)
{
struct transfer_context *ctx = urb->context;
+ unsigned long flags;
- spin_lock(&ctx->lock);
+ spin_lock_irqsave(&ctx->lock, flags);
ctx->count--;
ctx->packet_count += urb->number_of_packets;
@@ -1958,7 +1960,7 @@ static void complicated_callback(struct urb *urb)
complete(&ctx->done);
}
done:
- spin_unlock(&ctx->lock);
+ spin_unlock_irqrestore(&ctx->lock, flags);
}
static struct urb *iso_alloc_urb(
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index de9a502491c2..82f220631bd7 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -748,13 +748,11 @@ static void uss720_disconnect(struct usb_interface *intf)
{
struct parport *pp = usb_get_intfdata(intf);
struct parport_uss720_private *priv;
- struct usb_device *usbdev;
dev_dbg(&intf->dev, "disconnect\n");
usb_set_intfdata(intf, NULL);
if (pp) {
priv = pp->private_data;
- usbdev = priv->usbdev;
priv->usbdev = NULL;
priv->pp = NULL;
dev_dbg(&intf->dev, "parport_remove_port\n");
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 8abb6cbbd98a..3be40eaa1ac9 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -396,8 +396,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
loff_t *ppos)
{
struct usb_yurex *dev;
- int retval = 0;
- int bytes_read = 0;
+ int len = 0;
char in_buffer[20];
unsigned long flags;
@@ -405,26 +404,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
mutex_lock(&dev->io_mutex);
if (!dev->interface) { /* already disconnected */
- retval = -ENODEV;
- goto exit;
+ mutex_unlock(&dev->io_mutex);
+ return -ENODEV;
}
spin_lock_irqsave(&dev->lock, flags);
- bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
+ len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
spin_unlock_irqrestore(&dev->lock, flags);
-
- if (*ppos < bytes_read) {
- if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
- retval = -EFAULT;
- else {
- retval = bytes_read - *ppos;
- *ppos += bytes_read;
- }
- }
-
-exit:
mutex_unlock(&dev->io_mutex);
- return retval;
+
+ return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
}
static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index ad2c082bd0fb..ac2b4fcc265f 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -95,8 +95,8 @@ struct mon_bin_hdr {
unsigned short busnum; /* Bus number */
char flag_setup;
char flag_data;
- s64 ts_sec; /* getnstimeofday64 */
- s32 ts_usec; /* getnstimeofday64 */
+ s64 ts_sec; /* ktime_get_real_ts64 */
+ s32 ts_usec; /* ktime_get_real_ts64 */
int status;
unsigned int len_urb; /* Length of data (submitted or actual) */
unsigned int len_cap; /* Delivered length */
@@ -497,7 +497,7 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
struct mon_bin_hdr *ep;
char data_tag = 0;
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
spin_lock_irqsave(&rp->b_lock, flags);
@@ -637,7 +637,7 @@ static void mon_bin_error(void *data, struct urb *urb, int error)
unsigned int offset;
struct mon_bin_hdr *ep;
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
spin_lock_irqsave(&rp->b_lock, flags);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index fb871eabcc10..df827ff57b0d 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -237,7 +237,7 @@ static int dsps_check_status(struct musb *musb, void *unused)
}
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
skip_session = 1;
- /* fall */
+ /* fall through */
case OTG_STATE_A_IDLE:
case OTG_STATE_B_IDLE:
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8000c7c02f79..b59ce9ad14ce 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -378,6 +378,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
qh = first_qh(head);
break;
}
+ /* else: fall through */
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 900875f326d7..f7c96d209eda 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -861,6 +861,7 @@ int usb_otg_start(struct platform_device *pdev)
if (pdata->init && pdata->init(pdev) != 0)
return -EINVAL;
+#ifdef CONFIG_PPC32
if (pdata->big_endian_mmio) {
_fsl_readl = _fsl_readl_be;
_fsl_writel = _fsl_writel_be;
@@ -868,6 +869,7 @@ int usb_otg_start(struct platform_device *pdev)
_fsl_readl = _fsl_readl_le;
_fsl_writel = _fsl_writel_le;
}
+#endif
/* request irq */
p_otg->irq = platform_get_irq(pdev, 0);
@@ -958,7 +960,7 @@ int usb_otg_start(struct platform_device *pdev)
/*
* state file in sysfs
*/
-static int show_fsl_usb2_otg_state(struct device *dev,
+static ssize_t show_fsl_usb2_otg_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct otg_fsm *fsm = &fsl_otg_dev->fsm;
diff --git a/drivers/usb/renesas_usbhs/Kconfig b/drivers/usb/renesas_usbhs/Kconfig
index b26d7c339c05..7fdbff23ae8b 100644
--- a/drivers/usb/renesas_usbhs/Kconfig
+++ b/drivers/usb/renesas_usbhs/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Renesas USBHS Controller Drivers
#
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 33d059c40616..59cac40aafcc 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -502,6 +502,7 @@ static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv,
case READ_STATUS_STAGE:
case WRITE_STATUS_STAGE:
usbhs_dcp_control_transfer_done(pipe);
+ /* fall through */
default:
return ret;
}
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index bdd7a5ad3bf1..3bb1fff02bed 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -128,7 +128,7 @@ static int ch341_control_in(struct usb_device *dev,
r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
value, index, buf, bufsize, DEFAULT_TIMEOUT);
- if (r < bufsize) {
+ if (r < (int)bufsize) {
if (r >= 0) {
dev_err(&dev->dev,
"short control message received (%d < %u)\n",
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index eb6c26cbe579..c0777a374a88 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -95,6 +95,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
{ USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
+ { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
+ { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
+ { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -112,6 +115,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
+ { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
+ { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
{ USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
@@ -124,7 +130,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
{ USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
+ { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -134,17 +142,24 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+ { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
+ { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
{ USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+ { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
+ { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
{ USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
+ { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
+ { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
{ USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
{ USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
@@ -224,11 +239,14 @@ MODULE_DEVICE_TABLE(usb, id_table);
struct cp210x_serial_private {
#ifdef CONFIG_GPIOLIB
struct gpio_chip gc;
- u8 config;
- u8 gpio_mode;
bool gpio_registered;
+ u8 gpio_pushpull;
+ u8 gpio_altfunc;
+ u8 gpio_input;
#endif
u8 partnum;
+ speed_t max_speed;
+ bool use_actual_rate;
};
struct cp210x_port_private {
@@ -341,6 +359,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
#define CONTROL_WRITE_RTS 0x0200
/* CP210X_VENDOR_SPECIFIC values */
+#define CP210X_READ_2NCONFIG 0x000E
#define CP210X_READ_LATCH 0x00C2
#define CP210X_GET_PARTNUM 0x370B
#define CP210X_GET_PORTCONFIG 0x370C
@@ -354,6 +373,9 @@ static struct usb_serial_driver * const serial_drivers[] = {
#define CP210X_PARTNUM_CP2104 0x04
#define CP210X_PARTNUM_CP2105 0x05
#define CP210X_PARTNUM_CP2108 0x08
+#define CP210X_PARTNUM_CP2102N_QFN28 0x20
+#define CP210X_PARTNUM_CP2102N_QFN24 0x21
+#define CP210X_PARTNUM_CP2102N_QFN20 0x22
#define CP210X_PARTNUM_UNKNOWN 0xFF
/* CP210X_GET_COMM_STATUS returns these 0x13 bytes */
@@ -447,6 +469,12 @@ struct cp210x_config {
#define CP2105_GPIO1_RXLED_MODE BIT(1)
#define CP2105_GPIO1_RS485_MODE BIT(2)
+/* CP2102N configuration array indices */
+#define CP210X_2NCONFIG_CONFIG_VERSION_IDX 2
+#define CP210X_2NCONFIG_GPIO_MODE_IDX 581
+#define CP210X_2NCONFIG_GPIO_RSTLATCH_IDX 587
+#define CP210X_2NCONFIG_GPIO_CONTROL_IDX 600
+
/* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */
struct cp210x_gpio_write {
u8 mask;
@@ -752,48 +780,6 @@ static int cp210x_get_line_ctl(struct usb_serial_port *port, u16 *ctl)
return 0;
}
-/*
- * cp210x_quantise_baudrate
- * Quantises the baud rate as per AN205 Table 1
- */
-static unsigned int cp210x_quantise_baudrate(unsigned int baud)
-{
- if (baud <= 300)
- baud = 300;
- else if (baud <= 600) baud = 600;
- else if (baud <= 1200) baud = 1200;
- else if (baud <= 1800) baud = 1800;
- else if (baud <= 2400) baud = 2400;
- else if (baud <= 4000) baud = 4000;
- else if (baud <= 4803) baud = 4800;
- else if (baud <= 7207) baud = 7200;
- else if (baud <= 9612) baud = 9600;
- else if (baud <= 14428) baud = 14400;
- else if (baud <= 16062) baud = 16000;
- else if (baud <= 19250) baud = 19200;
- else if (baud <= 28912) baud = 28800;
- else if (baud <= 38601) baud = 38400;
- else if (baud <= 51558) baud = 51200;
- else if (baud <= 56280) baud = 56000;
- else if (baud <= 58053) baud = 57600;
- else if (baud <= 64111) baud = 64000;
- else if (baud <= 77608) baud = 76800;
- else if (baud <= 117028) baud = 115200;
- else if (baud <= 129347) baud = 128000;
- else if (baud <= 156868) baud = 153600;
- else if (baud <= 237832) baud = 230400;
- else if (baud <= 254234) baud = 250000;
- else if (baud <= 273066) baud = 256000;
- else if (baud <= 491520) baud = 460800;
- else if (baud <= 567138) baud = 500000;
- else if (baud <= 670254) baud = 576000;
- else if (baud < 1000000)
- baud = 921600;
- else if (baud > 2000000)
- baud = 2000000;
- return baud;
-}
-
static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int result;
@@ -1013,6 +999,75 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
*cflagp = cflag;
}
+struct cp210x_rate {
+ speed_t rate;
+ speed_t high;
+};
+
+static const struct cp210x_rate cp210x_an205_table1[] = {
+ { 300, 300 },
+ { 600, 600 },
+ { 1200, 1200 },
+ { 1800, 1800 },
+ { 2400, 2400 },
+ { 4000, 4000 },
+ { 4800, 4803 },
+ { 7200, 7207 },
+ { 9600, 9612 },
+ { 14400, 14428 },
+ { 16000, 16062 },
+ { 19200, 19250 },
+ { 28800, 28912 },
+ { 38400, 38601 },
+ { 51200, 51558 },
+ { 56000, 56280 },
+ { 57600, 58053 },
+ { 64000, 64111 },
+ { 76800, 77608 },
+ { 115200, 117028 },
+ { 128000, 129347 },
+ { 153600, 156868 },
+ { 230400, 237832 },
+ { 250000, 254234 },
+ { 256000, 273066 },
+ { 460800, 491520 },
+ { 500000, 567138 },
+ { 576000, 670254 },
+ { 921600, UINT_MAX }
+};
+
+/*
+ * Quantises the baud rate as per AN205 Table 1
+ */
+static speed_t cp210x_get_an205_rate(speed_t baud)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cp210x_an205_table1); ++i) {
+ if (baud <= cp210x_an205_table1[i].high)
+ break;
+ }
+
+ return cp210x_an205_table1[i].rate;
+}
+
+static speed_t cp210x_get_actual_rate(struct usb_serial *serial, speed_t baud)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ unsigned int prescale = 1;
+ unsigned int div;
+
+ baud = clamp(baud, 300u, priv->max_speed);
+
+ if (baud <= 365)
+ prescale = 4;
+
+ div = DIV_ROUND_CLOSEST(48000000, 2 * prescale * baud);
+ baud = 48000000 / (2 * prescale * div);
+
+ return baud;
+}
+
/*
* CP2101 supports the following baud rates:
*
@@ -1042,16 +1097,24 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
static void cp210x_change_speed(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
+ struct usb_serial *serial = port->serial;
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
u32 baud;
baud = tty->termios.c_ospeed;
- /* This maps the requested rate to a rate valid on cp2102 or cp2103,
- * or to an arbitrary rate in [1M,2M].
+ /*
+ * This maps the requested rate to the actual rate, a valid rate on
+ * cp2102 or cp2103, or to an arbitrary rate in [1M, max_speed].
*
* NOTE: B0 is not implemented.
*/
- baud = cp210x_quantise_baudrate(baud);
+ if (priv->use_actual_rate)
+ baud = cp210x_get_actual_rate(serial, baud);
+ else if (baud < 1000000)
+ baud = cp210x_get_an205_rate(baud);
+ else if (baud > priv->max_speed)
+ baud = priv->max_speed;
dev_dbg(&port->dev, "%s - setting baud rate to %u\n", __func__, baud);
if (cp210x_write_u32_reg(port, CP210X_SET_BAUDRATE, baud)) {
@@ -1273,17 +1336,8 @@ static int cp210x_gpio_request(struct gpio_chip *gc, unsigned int offset)
struct usb_serial *serial = gpiochip_get_data(gc);
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
- switch (offset) {
- case 0:
- if (priv->config & CP2105_GPIO0_TXLED_MODE)
- return -ENODEV;
- break;
- case 1:
- if (priv->config & (CP2105_GPIO1_RXLED_MODE |
- CP2105_GPIO1_RS485_MODE))
- return -ENODEV;
- break;
- }
+ if (priv->gpio_altfunc & BIT(offset))
+ return -ENODEV;
return 0;
}
@@ -1291,10 +1345,15 @@ static int cp210x_gpio_request(struct gpio_chip *gc, unsigned int offset)
static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct usb_serial *serial = gpiochip_get_data(gc);
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ u8 req_type = REQTYPE_DEVICE_TO_HOST;
int result;
u8 buf;
- result = cp210x_read_vendor_block(serial, REQTYPE_INTERFACE_TO_HOST,
+ if (priv->partnum == CP210X_PARTNUM_CP2105)
+ req_type = REQTYPE_INTERFACE_TO_HOST;
+
+ result = cp210x_read_vendor_block(serial, req_type,
CP210X_READ_LATCH, &buf, sizeof(buf));
if (result < 0)
return result;
@@ -1305,7 +1364,9 @@ static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct usb_serial *serial = gpiochip_get_data(gc);
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
struct cp210x_gpio_write buf;
+ int result;
if (value == 1)
buf.state = BIT(gpio);
@@ -1314,25 +1375,68 @@ static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
buf.mask = BIT(gpio);
- cp210x_write_vendor_block(serial, REQTYPE_HOST_TO_INTERFACE,
- CP210X_WRITE_LATCH, &buf, sizeof(buf));
+ if (priv->partnum == CP210X_PARTNUM_CP2105) {
+ result = cp210x_write_vendor_block(serial,
+ REQTYPE_HOST_TO_INTERFACE,
+ CP210X_WRITE_LATCH, &buf,
+ sizeof(buf));
+ } else {
+ u16 wIndex = buf.state << 8 | buf.mask;
+
+ result = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ CP210X_VENDOR_SPECIFIC,
+ REQTYPE_HOST_TO_DEVICE,
+ CP210X_WRITE_LATCH,
+ wIndex,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+ }
+
+ if (result < 0) {
+ dev_err(&serial->interface->dev, "failed to set GPIO value: %d\n",
+ result);
+ }
}
static int cp210x_gpio_direction_get(struct gpio_chip *gc, unsigned int gpio)
{
- /* Hardware does not support an input mode */
- return 0;
+ struct usb_serial *serial = gpiochip_get_data(gc);
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+ return priv->gpio_input & BIT(gpio);
}
static int cp210x_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
{
- /* Hardware does not support an input mode */
- return -ENOTSUPP;
+ struct usb_serial *serial = gpiochip_get_data(gc);
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+ if (priv->partnum == CP210X_PARTNUM_CP2105) {
+ /* hardware does not support an input mode */
+ return -ENOTSUPP;
+ }
+
+ /* push-pull pins cannot be changed to be inputs */
+ if (priv->gpio_pushpull & BIT(gpio))
+ return -EINVAL;
+
+ /* make sure to release pin if it is being driven low */
+ cp210x_gpio_set(gc, gpio, 1);
+
+ priv->gpio_input |= BIT(gpio);
+
+ return 0;
}
static int cp210x_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
int value)
{
+ struct usb_serial *serial = gpiochip_get_data(gc);
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+ priv->gpio_input &= ~BIT(gpio);
+ cp210x_gpio_set(gc, gpio, value);
+
return 0;
}
@@ -1345,11 +1449,11 @@ static int cp210x_gpio_set_config(struct gpio_chip *gc, unsigned int gpio,
/* Succeed only if in correct mode (this can't be set at runtime) */
if ((param == PIN_CONFIG_DRIVE_PUSH_PULL) &&
- (priv->gpio_mode & BIT(gpio)))
+ (priv->gpio_pushpull & BIT(gpio)))
return 0;
if ((param == PIN_CONFIG_DRIVE_OPEN_DRAIN) &&
- !(priv->gpio_mode & BIT(gpio)))
+ !(priv->gpio_pushpull & BIT(gpio)))
return 0;
return -ENOTSUPP;
@@ -1362,12 +1466,13 @@ static int cp210x_gpio_set_config(struct gpio_chip *gc, unsigned int gpio,
* this driver that provide GPIO do so in a way that does not impact other
* signals and are thus expected to have very different initialisation.
*/
-static int cp2105_shared_gpio_init(struct usb_serial *serial)
+static int cp2105_gpioconf_init(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
struct cp210x_pin_mode mode;
struct cp210x_config config;
u8 intf_num = cp210x_interface_num(serial);
+ u8 iface_config;
int result;
result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
@@ -1384,20 +1489,26 @@ static int cp2105_shared_gpio_init(struct usb_serial *serial)
/* 2 banks of GPIO - One for the pins taken from each serial port */
if (intf_num == 0) {
- if (mode.eci == CP210X_PIN_MODE_MODEM)
+ if (mode.eci == CP210X_PIN_MODE_MODEM) {
+ /* mark all GPIOs of this interface as reserved */
+ priv->gpio_altfunc = 0xff;
return 0;
+ }
- priv->config = config.eci_cfg;
- priv->gpio_mode = (u8)((le16_to_cpu(config.gpio_mode) &
+ iface_config = config.eci_cfg;
+ priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
CP210X_ECI_GPIO_MODE_MASK) >>
CP210X_ECI_GPIO_MODE_OFFSET);
priv->gc.ngpio = 2;
} else if (intf_num == 1) {
- if (mode.sci == CP210X_PIN_MODE_MODEM)
+ if (mode.sci == CP210X_PIN_MODE_MODEM) {
+ /* mark all GPIOs of this interface as reserved */
+ priv->gpio_altfunc = 0xff;
return 0;
+ }
- priv->config = config.sci_cfg;
- priv->gpio_mode = (u8)((le16_to_cpu(config.gpio_mode) &
+ iface_config = config.sci_cfg;
+ priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
CP210X_SCI_GPIO_MODE_MASK) >>
CP210X_SCI_GPIO_MODE_OFFSET);
priv->gc.ngpio = 3;
@@ -1405,6 +1516,125 @@ static int cp2105_shared_gpio_init(struct usb_serial *serial)
return -ENODEV;
}
+ /* mark all pins which are not in GPIO mode */
+ if (iface_config & CP2105_GPIO0_TXLED_MODE) /* GPIO 0 */
+ priv->gpio_altfunc |= BIT(0);
+ if (iface_config & (CP2105_GPIO1_RXLED_MODE | /* GPIO 1 */
+ CP2105_GPIO1_RS485_MODE))
+ priv->gpio_altfunc |= BIT(1);
+
+ /* driver implementation for CP2105 only supports outputs */
+ priv->gpio_input = 0;
+
+ return 0;
+}
+
+static int cp2102n_gpioconf_init(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ const u16 config_size = 0x02a6;
+ u8 gpio_rst_latch;
+ u8 config_version;
+ u8 gpio_pushpull;
+ u8 *config_buf;
+ u8 gpio_latch;
+ u8 gpio_ctrl;
+ int result;
+ u8 i;
+
+ /*
+ * Retrieve device configuration from the device.
+ * The array received contains all customization settings done at the
+ * factory/manufacturer. Format of the array is documented at the
+ * time of writing at:
+ * https://www.silabs.com/community/interface/knowledge-base.entry.html/2017/03/31/cp2102n_setconfig-xsfa
+ */
+ config_buf = kmalloc(config_size, GFP_KERNEL);
+ if (!config_buf)
+ return -ENOMEM;
+
+ result = cp210x_read_vendor_block(serial,
+ REQTYPE_DEVICE_TO_HOST,
+ CP210X_READ_2NCONFIG,
+ config_buf,
+ config_size);
+ if (result < 0) {
+ kfree(config_buf);
+ return result;
+ }
+
+ config_version = config_buf[CP210X_2NCONFIG_CONFIG_VERSION_IDX];
+ gpio_pushpull = config_buf[CP210X_2NCONFIG_GPIO_MODE_IDX];
+ gpio_ctrl = config_buf[CP210X_2NCONFIG_GPIO_CONTROL_IDX];
+ gpio_rst_latch = config_buf[CP210X_2NCONFIG_GPIO_RSTLATCH_IDX];
+
+ kfree(config_buf);
+
+ /* Make sure this is a config format we understand. */
+ if (config_version != 0x01)
+ return -ENOTSUPP;
+
+ /*
+ * We only support 4 GPIOs even on the QFN28 package, because
+ * config locations of GPIOs 4-6 determined using reverse
+ * engineering revealed conflicting offsets with other
+ * documented functions. So we'll just play it safe for now.
+ */
+ priv->gc.ngpio = 4;
+
+ /*
+ * Get default pin states after reset. Needed so we can determine
+ * the direction of an open-drain pin.
+ */
+ gpio_latch = (gpio_rst_latch >> 3) & 0x0f;
+
+ /* 0 indicates open-drain mode, 1 is push-pull */
+ priv->gpio_pushpull = (gpio_pushpull >> 3) & 0x0f;
+
+ /* 0 indicates GPIO mode, 1 is alternate function */
+ priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+
+ /*
+ * The CP2102N does not strictly has input and output pin modes,
+ * it only knows open-drain and push-pull modes which is set at
+ * factory. An open-drain pin can function both as an
+ * input or an output. We emulate input mode for open-drain pins
+ * by making sure they are not driven low, and we do not allow
+ * push-pull pins to be set as an input.
+ */
+ for (i = 0; i < priv->gc.ngpio; ++i) {
+ /*
+ * Set direction to "input" iff pin is open-drain and reset
+ * value is 1.
+ */
+ if (!(priv->gpio_pushpull & BIT(i)) && (gpio_latch & BIT(i)))
+ priv->gpio_input |= BIT(i);
+ }
+
+ return 0;
+}
+
+static int cp210x_gpio_init(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ int result;
+
+ switch (priv->partnum) {
+ case CP210X_PARTNUM_CP2105:
+ result = cp2105_gpioconf_init(serial);
+ break;
+ case CP210X_PARTNUM_CP2102N_QFN28:
+ case CP210X_PARTNUM_CP2102N_QFN24:
+ case CP210X_PARTNUM_CP2102N_QFN20:
+ result = cp2102n_gpioconf_init(serial);
+ break;
+ default:
+ return 0;
+ }
+
+ if (result < 0)
+ return result;
+
priv->gc.label = "cp210x";
priv->gc.request = cp210x_gpio_request;
priv->gc.get_direction = cp210x_gpio_direction_get;
@@ -1437,7 +1667,7 @@ static void cp210x_gpio_remove(struct usb_serial *serial)
#else
-static int cp2105_shared_gpio_init(struct usb_serial *serial)
+static int cp210x_gpio_init(struct usb_serial *serial)
{
return 0;
}
@@ -1482,6 +1712,50 @@ static int cp210x_port_remove(struct usb_serial_port *port)
return 0;
}
+static void cp210x_init_max_speed(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ bool use_actual_rate = false;
+ speed_t max;
+
+ switch (priv->partnum) {
+ case CP210X_PARTNUM_CP2101:
+ max = 921600;
+ break;
+ case CP210X_PARTNUM_CP2102:
+ case CP210X_PARTNUM_CP2103:
+ max = 1000000;
+ break;
+ case CP210X_PARTNUM_CP2104:
+ use_actual_rate = true;
+ max = 2000000;
+ break;
+ case CP210X_PARTNUM_CP2108:
+ max = 2000000;
+ break;
+ case CP210X_PARTNUM_CP2105:
+ if (cp210x_interface_num(serial) == 0) {
+ use_actual_rate = true;
+ max = 2000000; /* ECI */
+ } else {
+ max = 921600; /* SCI */
+ }
+ break;
+ case CP210X_PARTNUM_CP2102N_QFN28:
+ case CP210X_PARTNUM_CP2102N_QFN24:
+ case CP210X_PARTNUM_CP2102N_QFN20:
+ use_actual_rate = true;
+ max = 3000000;
+ break;
+ default:
+ max = 2000000;
+ break;
+ }
+
+ priv->max_speed = max;
+ priv->use_actual_rate = use_actual_rate;
+}
+
static int cp210x_attach(struct usb_serial *serial)
{
int result;
@@ -1502,12 +1776,12 @@ static int cp210x_attach(struct usb_serial *serial)
usb_set_serial_data(serial, priv);
- if (priv->partnum == CP210X_PARTNUM_CP2105) {
- result = cp2105_shared_gpio_init(serial);
- if (result < 0) {
- dev_err(&serial->interface->dev,
- "GPIO initialisation failed, continuing without GPIO support\n");
- }
+ cp210x_init_max_speed(serial);
+
+ result = cp210x_gpio_init(serial);
+ if (result < 0) {
+ dev_err(&serial->interface->dev, "GPIO initialisation failed: %d\n",
+ result);
}
return 0;
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index dc67a2eb98d7..ebd76ab07b72 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -255,6 +255,7 @@ static void cyberjack_read_int_callback(struct urb *urb)
struct device *dev = &port->dev;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
+ unsigned long flags;
int result;
/* the urb might have been killed. */
@@ -270,13 +271,13 @@ static void cyberjack_read_int_callback(struct urb *urb)
/* This is a announcement of coming bulk_ins. */
unsigned short size = ((unsigned short)data[3]<<8)+data[2]+3;
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
old_rdtodo = priv->rdtodo;
if (old_rdtodo > SHRT_MAX - size) {
dev_dbg(dev, "To many bulk_in urbs to do.\n");
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
goto resubmit;
}
@@ -285,7 +286,7 @@ static void cyberjack_read_int_callback(struct urb *urb)
dev_dbg(dev, "%s - rdtodo: %d\n", __func__, priv->rdtodo);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
if (!old_rdtodo) {
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
@@ -309,6 +310,7 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
struct cyberjack_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
unsigned char *data = urb->transfer_buffer;
+ unsigned long flags;
short todo;
int result;
int status = urb->status;
@@ -325,7 +327,7 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
tty_flip_buffer_push(&port->port);
}
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
/* Reduce urbs to do by one. */
priv->rdtodo -= urb->actual_length;
@@ -334,7 +336,7 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
priv->rdtodo = 0;
todo = priv->rdtodo;
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(dev, "%s - rdtodo: %d\n", __func__, todo);
@@ -354,6 +356,7 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
struct cyberjack_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
int status = urb->status;
+ unsigned long flags;
set_bit(0, &port->write_urbs_free);
if (status) {
@@ -362,7 +365,7 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
return;
}
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
/* only do something if we have more data to send */
if (priv->wrfilled) {
@@ -406,7 +409,7 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
}
exit:
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
usb_serial_port_softint(port);
}
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index b0526786fb02..e7f244cf2c07 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -984,6 +984,7 @@ static void digi_write_bulk_callback(struct urb *urb)
struct usb_serial *serial;
struct digi_port *priv;
struct digi_serial *serial_priv;
+ unsigned long flags;
int ret = 0;
int status = urb->status;
@@ -1004,15 +1005,15 @@ static void digi_write_bulk_callback(struct urb *urb)
/* handle oob callback */
if (priv->dp_port_num == serial_priv->ds_oob_port_num) {
dev_dbg(&port->dev, "digi_write_bulk_callback: oob callback\n");
- spin_lock(&priv->dp_port_lock);
+ spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_write_urb_in_use = 0;
wake_up_interruptible(&port->write_wait);
- spin_unlock(&priv->dp_port_lock);
+ spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return;
}
/* try to send any buffered data on this port */
- spin_lock(&priv->dp_port_lock);
+ spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_write_urb_in_use = 0;
if (priv->dp_out_buf_len > 0) {
*((unsigned char *)(port->write_urb->transfer_buffer))
@@ -1035,7 +1036,7 @@ static void digi_write_bulk_callback(struct urb *urb)
/* lost the race in write_chan(). */
schedule_work(&priv->dp_wakeup_work);
- spin_unlock(&priv->dp_port_lock);
+ spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (ret && ret != -EPERM)
dev_err_console(port,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
@@ -1381,11 +1382,12 @@ static int digi_read_inb_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned char *buf = urb->transfer_buffer;
+ unsigned long flags;
int opcode;
int len;
int port_status;
unsigned char *data;
- int flag, throttled;
+ int tty_flag, throttled;
/* short/multiple packet check */
if (urb->actual_length < 2) {
@@ -1407,7 +1409,7 @@ static int digi_read_inb_callback(struct urb *urb)
return -1;
}
- spin_lock(&priv->dp_port_lock);
+ spin_lock_irqsave(&priv->dp_port_lock, flags);
/* check for throttle; if set, do not resubmit read urb */
/* indicate the read chain needs to be restarted on unthrottle */
@@ -1421,7 +1423,7 @@ static int digi_read_inb_callback(struct urb *urb)
data = &buf[3];
/* get flag from port_status */
- flag = 0;
+ tty_flag = 0;
/* overrun is special, not associated with a char */
if (port_status & DIGI_OVERRUN_ERROR)
@@ -1430,21 +1432,21 @@ static int digi_read_inb_callback(struct urb *urb)
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
if (port_status & DIGI_BREAK_ERROR)
- flag = TTY_BREAK;
+ tty_flag = TTY_BREAK;
else if (port_status & DIGI_PARITY_ERROR)
- flag = TTY_PARITY;
+ tty_flag = TTY_PARITY;
else if (port_status & DIGI_FRAMING_ERROR)
- flag = TTY_FRAME;
+ tty_flag = TTY_FRAME;
/* data length is len-1 (one byte of len is port_status) */
--len;
if (len > 0) {
tty_insert_flip_string_fixed_flag(&port->port, data,
- flag, len);
+ tty_flag, len);
tty_flip_buffer_push(&port->port);
}
}
- spin_unlock(&priv->dp_port_lock);
+ spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (opcode == DIGI_CMD_RECEIVE_DISABLE)
dev_dbg(&port->dev, "%s: got RECEIVE_DISABLE\n", __func__);
@@ -1474,6 +1476,7 @@ static int digi_read_oob_callback(struct urb *urb)
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned char *buf = urb->transfer_buffer;
int opcode, line, status, val;
+ unsigned long flags;
int i;
unsigned int rts;
@@ -1506,7 +1509,7 @@ static int digi_read_oob_callback(struct urb *urb)
rts = C_CRTSCTS(tty);
if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
- spin_lock(&priv->dp_port_lock);
+ spin_lock_irqsave(&priv->dp_port_lock, flags);
/* convert from digi flags to termiox flags */
if (val & DIGI_READ_INPUT_SIGNALS_CTS) {
priv->dp_modem_signals |= TIOCM_CTS;
@@ -1530,12 +1533,12 @@ static int digi_read_oob_callback(struct urb *urb)
else
priv->dp_modem_signals &= ~TIOCM_CD;
- spin_unlock(&priv->dp_port_lock);
+ spin_unlock_irqrestore(&priv->dp_port_lock, flags);
} else if (opcode == DIGI_CMD_TRANSMIT_IDLE) {
- spin_lock(&priv->dp_port_lock);
+ spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_transmit_idle = 1;
wake_up_interruptible(&priv->dp_transmit_idle_wait);
- spin_unlock(&priv->dp_port_lock);
+ spin_unlock_irqrestore(&priv->dp_port_lock, flags);
} else if (opcode == DIGI_CMD_IFLUSH_FIFO) {
wake_up_interruptible(&priv->dp_flush_wait);
}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 17283f4b4779..97c69d373ca6 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -648,6 +648,7 @@ static void edge_interrupt_callback(struct urb *urb)
struct usb_serial_port *port;
unsigned char *data = urb->transfer_buffer;
int length = urb->actual_length;
+ unsigned long flags;
int bytes_avail;
int position;
int txCredits;
@@ -679,7 +680,7 @@ static void edge_interrupt_callback(struct urb *urb)
if (length > 1) {
bytes_avail = data[0] | (data[1] << 8);
if (bytes_avail) {
- spin_lock(&edge_serial->es_lock);
+ spin_lock_irqsave(&edge_serial->es_lock, flags);
edge_serial->rxBytesAvail += bytes_avail;
dev_dbg(dev,
"%s - bytes_avail=%d, rxBytesAvail=%d, read_in_progress=%d\n",
@@ -702,7 +703,8 @@ static void edge_interrupt_callback(struct urb *urb)
edge_serial->read_in_progress = false;
}
}
- spin_unlock(&edge_serial->es_lock);
+ spin_unlock_irqrestore(&edge_serial->es_lock,
+ flags);
}
}
/* grab the txcredits for the ports if available */
@@ -715,9 +717,11 @@ static void edge_interrupt_callback(struct urb *urb)
port = edge_serial->serial->port[portNumber];
edge_port = usb_get_serial_port_data(port);
if (edge_port->open) {
- spin_lock(&edge_port->ep_lock);
+ spin_lock_irqsave(&edge_port->ep_lock,
+ flags);
edge_port->txCredits += txCredits;
- spin_unlock(&edge_port->ep_lock);
+ spin_unlock_irqrestore(&edge_port->ep_lock,
+ flags);
dev_dbg(dev, "%s - txcredits for port%d = %d\n",
__func__, portNumber,
edge_port->txCredits);
@@ -758,6 +762,7 @@ static void edge_bulk_in_callback(struct urb *urb)
int retval;
__u16 raw_data_length;
int status = urb->status;
+ unsigned long flags;
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n",
@@ -777,7 +782,7 @@ static void edge_bulk_in_callback(struct urb *urb)
usb_serial_debug_data(dev, __func__, raw_data_length, data);
- spin_lock(&edge_serial->es_lock);
+ spin_lock_irqsave(&edge_serial->es_lock, flags);
/* decrement our rxBytes available by the number that we just got */
edge_serial->rxBytesAvail -= raw_data_length;
@@ -801,7 +806,7 @@ static void edge_bulk_in_callback(struct urb *urb)
edge_serial->read_in_progress = false;
}
- spin_unlock(&edge_serial->es_lock);
+ spin_unlock_irqrestore(&edge_serial->es_lock, flags);
}
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 0fbadb37c104..6d1d6efa3055 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1729,6 +1729,7 @@ static void edge_bulk_in_callback(struct urb *urb)
struct edgeport_port *edge_port = urb->context;
struct device *dev = &edge_port->port->dev;
unsigned char *data = urb->transfer_buffer;
+ unsigned long flags;
int retval = 0;
int port_number;
int status = urb->status;
@@ -1780,13 +1781,13 @@ static void edge_bulk_in_callback(struct urb *urb)
exit:
/* continue read unless stopped */
- spin_lock(&edge_port->ep_lock);
+ spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED;
- spin_unlock(&edge_port->ep_lock);
+ spin_unlock_irqrestore(&edge_port->ep_lock, flags);
if (retval)
dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval);
}
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 24b06c7e5e2d..7643716b5299 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -132,7 +132,7 @@ irda_usb_find_class_desc(struct usb_serial *serial, unsigned int ifnum)
0, ifnum, desc, sizeof(*desc), 1000);
dev_dbg(&serial->dev->dev, "%s - ret=%d\n", __func__, ret);
- if (ret < sizeof(*desc)) {
+ if (ret < (int)sizeof(*desc)) {
dev_dbg(&serial->dev->dev,
"%s - class descriptor read %s (%d)\n", __func__,
(ret < 0) ? "failed" : "too short", ret);
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 2fb71303ec3a..449e89db9cea 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -58,7 +58,6 @@ struct iuu_private {
u8 *buf; /* used for initialize speed */
u8 len;
int vcc; /* vcc (either 3 or 5 V) */
- u32 baud;
u32 boost;
u32 clk;
};
@@ -963,9 +962,6 @@ static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port)
struct iuu_private *priv = usb_get_serial_port_data(port);
baud = tty->termios.c_ospeed;
- tty->termios.c_ispeed = baud;
- /* Re-encode speed */
- tty_encode_baud_rate(tty, baud, baud);
dev_dbg(dev, "%s - baud %d\n", __func__, baud);
usb_clear_halt(serial->dev, port->write_urb->pipe);
@@ -991,7 +987,6 @@ static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port)
if (boost < 100)
boost = 100;
priv->boost = boost;
- priv->baud = baud;
switch (clockmode) {
case 2: /* 3.680 Mhz */
priv->clk = IUU_CLK_3680000;
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 5169624d8b11..38d43c4b7ce5 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -369,8 +369,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
3, /* get pins */
USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
0, 0, data, 1, 2000);
- if (rc >= 0)
+ if (rc == 1)
*value = *data;
+ else if (rc >= 0)
+ rc = -EIO;
kfree(data);
return rc;
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 5046ffd53cde..5ee48b0650c4 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -67,7 +67,6 @@ static int klsi_105_prepare_write_buffer(struct usb_serial_port *port,
*/
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PALMCONNECT_VID, PALMCONNECT_PID) },
- { USB_DEVICE(KLSI_VID, KLSI_KL5KUSB105D_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/kl5kusb105.h b/drivers/usb/serial/kl5kusb105.h
index 41c9bf60fbf0..dbe98d85ca8e 100644
--- a/drivers/usb/serial/kl5kusb105.h
+++ b/drivers/usb/serial/kl5kusb105.h
@@ -7,9 +7,6 @@
#define PALMCONNECT_VID 0x0830
#define PALMCONNECT_PID 0x0080
-#define KLSI_VID 0x05e9
-#define KLSI_KL5KUSB105D_PID 0x00c0
-
/* Vendor commands: */
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index a31ea7e194dd..e9882ba20933 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -190,8 +190,10 @@ static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port)
KOBIL_TIMEOUT
);
dev_dbg(dev, "%s - Send get_HW_version URB returns: %i\n", __func__, result);
- dev_dbg(dev, "Hardware version: %i.%i.%i\n", transfer_buffer[0],
- transfer_buffer[1], transfer_buffer[2]);
+ if (result >= 3) {
+ dev_dbg(dev, "Hardware version: %i.%i.%i\n", transfer_buffer[0],
+ transfer_buffer[1], transfer_buffer[2]);
+ }
/* get firmware version */
result = usb_control_msg(port->serial->dev,
@@ -205,8 +207,10 @@ static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port)
KOBIL_TIMEOUT
);
dev_dbg(dev, "%s - Send get_FW_version URB returns: %i\n", __func__, result);
- dev_dbg(dev, "Firmware version: %i.%i.%i\n", transfer_buffer[0],
- transfer_buffer[1], transfer_buffer[2]);
+ if (result >= 3) {
+ dev_dbg(dev, "Firmware version: %i.%i.%i\n", transfer_buffer[0],
+ transfer_buffer[1], transfer_buffer[2]);
+ }
if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
@@ -393,12 +397,20 @@ static int kobil_tiocmget(struct tty_struct *tty)
transfer_buffer_length,
KOBIL_TIMEOUT);
- dev_dbg(&port->dev, "%s - Send get_status_line_state URB returns: %i. Statusline: %02x\n",
- __func__, result, transfer_buffer[0]);
+ dev_dbg(&port->dev, "Send get_status_line_state URB returns: %i\n",
+ result);
+ if (result < 1) {
+ if (result >= 0)
+ result = -EIO;
+ goto out_free;
+ }
+
+ dev_dbg(&port->dev, "Statusline: %02x\n", transfer_buffer[0]);
result = 0;
if ((transfer_buffer[0] & SUSBCR_GSL_DSR) != 0)
result = TIOCM_DSR;
+out_free:
kfree(transfer_buffer);
return result;
}
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index bd57630e67e2..27109522fd8b 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -340,14 +340,15 @@ static void async_complete(struct urb *urb)
{
struct urbtracker *urbtrack = urb->context;
int status = urb->status;
+ unsigned long flags;
if (unlikely(status))
dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d\n", __func__, status);
/* remove the urbtracker from the active_urbs list */
- spin_lock(&urbtrack->mos_parport->listlock);
+ spin_lock_irqsave(&urbtrack->mos_parport->listlock, flags);
list_del(&urbtrack->urblist_entry);
- spin_unlock(&urbtrack->mos_parport->listlock);
+ spin_unlock_irqrestore(&urbtrack->mos_parport->listlock, flags);
kref_put(&urbtrack->ref_count, destroy_urbtracker);
}
@@ -1526,8 +1527,6 @@ static void change_port_settings(struct tty_struct *tty,
struct usb_serial *serial;
int baud;
unsigned cflag;
- unsigned iflag;
- __u8 mask = 0xff;
__u8 lData;
__u8 lParity;
__u8 lStop;
@@ -1551,23 +1550,19 @@ static void change_port_settings(struct tty_struct *tty,
lParity = 0x00; /* No parity */
cflag = tty->termios.c_cflag;
- iflag = tty->termios.c_iflag;
/* Change the number of bits */
switch (cflag & CSIZE) {
case CS5:
lData = UART_LCR_WLEN5;
- mask = 0x1f;
break;
case CS6:
lData = UART_LCR_WLEN6;
- mask = 0x3f;
break;
case CS7:
lData = UART_LCR_WLEN7;
- mask = 0x7f;
break;
default:
case CS8:
@@ -1685,11 +1680,8 @@ static void mos7720_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
int status;
- struct usb_serial *serial;
struct moschip_port *mos7720_port;
- serial = port->serial;
-
mos7720_port = usb_get_serial_port_data(port);
if (mos7720_port == NULL)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index fdceb46d9fc6..b42bad85097a 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -468,6 +468,9 @@ static void mos7840_control_callback(struct urb *urb)
}
dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
+ if (urb->actual_length < 1)
+ goto out;
+
dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
mos7840_port->MsrLsr, mos7840_port->port_num);
data = urb->transfer_buffer;
@@ -802,18 +805,19 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
struct moschip_port *mos7840_port;
struct usb_serial_port *port;
int status = urb->status;
+ unsigned long flags;
int i;
mos7840_port = urb->context;
port = mos7840_port->port;
- spin_lock(&mos7840_port->pool_lock);
+ spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; i++) {
if (urb == mos7840_port->write_urb_pool[i]) {
mos7840_port->busy[i] = 0;
break;
}
}
- spin_unlock(&mos7840_port->pool_lock);
+ spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
if (status) {
dev_dbg(&port->dev, "nonzero write bulk status received:%d\n", status);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 664e61f16b6a..0215b70c4efc 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -196,6 +196,8 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
+#define DELL_PRODUCT_5821E 0x81d7
+
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
#define KYOCERA_PRODUCT_KPC680 0x180a
@@ -1030,6 +1032,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 5d1a1931967e..e41f725ac7aa 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = {
.driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485),
.driver_info = PL2303_QUIRK_ENDPOINT_HACK },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC232B),
+ .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index fcd72396a7b6..26965cc23c17 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -24,6 +24,7 @@
#define ATEN_VENDOR_ID2 0x0547
#define ATEN_PRODUCT_ID 0x2008
#define ATEN_PRODUCT_UC485 0x2021
+#define ATEN_PRODUCT_UC232B 0x2022
#define ATEN_PRODUCT_ID2 0x2118
#define IODATA_VENDOR_ID 0x04bb
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 958e12e1e7c7..b61c2a9b6b11 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -194,7 +194,7 @@ static inline int qt2_getregister(struct usb_device *dev,
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
QT_SET_GET_REGISTER, 0xc0, reg,
uart, data, sizeof(*data), QT2_USB_TIMEOUT);
- if (ret < sizeof(*data)) {
+ if (ret < (int)sizeof(*data)) {
if (ret >= 0)
ret = -EIO;
}
@@ -621,16 +621,17 @@ static void qt2_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port;
struct qt2_port_private *port_priv;
+ unsigned long flags;
port = urb->context;
port_priv = usb_get_serial_port_data(port);
- spin_lock(&port_priv->urb_lock);
+ spin_lock_irqsave(&port_priv->urb_lock, flags);
port_priv->urb_in_use = false;
usb_serial_port_softint(port);
- spin_unlock(&port_priv->urb_lock);
+ spin_unlock_irqrestore(&port_priv->urb_lock, flags);
}
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index d189f953c891..a43263a0edd8 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -409,6 +409,7 @@ static void sierra_outdat_callback(struct urb *urb)
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
struct sierra_intf_private *intfdata;
int status = urb->status;
+ unsigned long flags;
intfdata = usb_get_serial_data(port->serial);
@@ -419,12 +420,12 @@ static void sierra_outdat_callback(struct urb *urb)
dev_dbg(&port->dev, "%s - nonzero write bulk status "
"received: %d\n", __func__, status);
- spin_lock(&portdata->lock);
+ spin_lock_irqsave(&portdata->lock, flags);
--portdata->outstanding_urbs;
- spin_unlock(&portdata->lock);
- spin_lock(&intfdata->susp_lock);
+ spin_unlock_irqrestore(&portdata->lock, flags);
+ spin_lock_irqsave(&intfdata->susp_lock, flags);
--intfdata->in_flight;
- spin_unlock(&intfdata->susp_lock);
+ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
usb_serial_port_softint(port);
}
@@ -770,9 +771,9 @@ static void sierra_close(struct usb_serial_port *port)
kfree(urb->transfer_buffer);
usb_free_urb(urb);
usb_autopm_put_interface_async(serial->interface);
- spin_lock(&portdata->lock);
+ spin_lock_irq(&portdata->lock);
portdata->outstanding_urbs--;
- spin_unlock(&portdata->lock);
+ spin_unlock_irq(&portdata->lock);
}
sierra_stop_rx_urbs(port);
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 2083c267787b..0900b47b5f57 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -104,7 +104,7 @@ static inline int ssu100_getregister(struct usb_device *dev,
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
QT_SET_GET_REGISTER, 0xc0, reg,
uart, data, sizeof(*data), 300);
- if (ret < sizeof(*data)) {
+ if (ret < (int)sizeof(*data)) {
if (ret >= 0)
ret = -EIO;
}
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index cd2f8dc8b58c..6ca24e86f686 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -35,6 +35,7 @@ static void symbol_int_callback(struct urb *urb)
struct symbol_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
+ unsigned long flags;
int result;
int data_length;
@@ -73,7 +74,7 @@ static void symbol_int_callback(struct urb *urb)
}
exit:
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
/* Continue trying to always read if we should */
if (!priv->throttled) {
@@ -84,7 +85,7 @@ exit:
__func__, result);
} else
priv->actually_throttled = true;
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port)
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 6b22857f6e52..3010878f7f8e 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1215,6 +1215,7 @@ static void ti_bulk_in_callback(struct urb *urb)
struct usb_serial_port *port = tport->tp_port;
struct device *dev = &urb->dev->dev;
int status = urb->status;
+ unsigned long flags;
int retval = 0;
switch (status) {
@@ -1247,20 +1248,20 @@ static void ti_bulk_in_callback(struct urb *urb)
__func__);
else
ti_recv(port, urb->transfer_buffer, urb->actual_length);
- spin_lock(&tport->tp_lock);
+ spin_lock_irqsave(&tport->tp_lock, flags);
port->icount.rx += urb->actual_length;
- spin_unlock(&tport->tp_lock);
+ spin_unlock_irqrestore(&tport->tp_lock, flags);
}
exit:
/* continue to read unless stopping */
- spin_lock(&tport->tp_lock);
+ spin_lock_irqsave(&tport->tp_lock, flags);
if (tport->tp_read_urb_state == TI_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
else if (tport->tp_read_urb_state == TI_READ_URB_STOPPING)
tport->tp_read_urb_state = TI_READ_URB_STOPPED;
- spin_unlock(&tport->tp_lock);
+ spin_unlock_irqrestore(&tport->tp_lock, flags);
if (retval)
dev_err(dev, "%s - resubmit read urb failed, %d\n",
__func__, retval);
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 107e64c42e94..912472f26e4f 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -326,6 +326,7 @@ static void usb_wwan_outdat_callback(struct urb *urb)
struct usb_serial_port *port;
struct usb_wwan_port_private *portdata;
struct usb_wwan_intf_private *intfdata;
+ unsigned long flags;
int i;
port = urb->context;
@@ -334,9 +335,9 @@ static void usb_wwan_outdat_callback(struct urb *urb)
usb_serial_port_softint(port);
usb_autopm_put_interface_async(port->serial->interface);
portdata = usb_get_serial_port_data(port);
- spin_lock(&intfdata->susp_lock);
+ spin_lock_irqsave(&intfdata->susp_lock, flags);
intfdata->in_flight--;
- spin_unlock(&intfdata->susp_lock);
+ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
for (i = 0; i < N_OUT_URB; ++i) {
if (portdata->out_urbs[i] == urb) {
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 2c8eab11a493..00878c386dd0 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -56,6 +56,22 @@ config TYPEC_TCPM
if TYPEC_TCPM
+config TYPEC_TCPCI
+ tristate "Type-C Port Controller Interface driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Type-C Port Controller driver for TCPCI-compliant controller.
+
+config TYPEC_RT1711H
+ tristate "Richtek RT1711H Type-C chip driver"
+ depends on I2C
+ select TYPEC_TCPCI
+ help
+ Richtek RT1711H Type-C chip driver that works with
+ Type-C Port Controller Manager to provide USB PD and USB
+ Type-C functionalities.
+
source "drivers/usb/typec/fusb302/Kconfig"
config TYPEC_WCOVE
@@ -88,4 +104,6 @@ config TYPEC_TPS6598X
source "drivers/usb/typec/mux/Kconfig"
+source "drivers/usb/typec/altmodes/Kconfig"
+
endif # TYPEC
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index 1f599a6c30cc..45b0aef428a8 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -1,9 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TYPEC) += typec.o
-typec-y := class.o mux.o
+typec-y := class.o mux.o bus.o
+obj-$(CONFIG_TYPEC) += altmodes/
obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
obj-y += fusb302/
obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
obj-$(CONFIG_TYPEC_TPS6598X) += tps6598x.o
obj-$(CONFIG_TYPEC) += mux/
+obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
+obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
new file mode 100644
index 000000000000..efef2a64bc51
--- /dev/null
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -0,0 +1,14 @@
+
+menu "USB Type-C Alternate Mode drivers"
+
+config TYPEC_DP_ALTMODE
+ tristate "DisplayPort Alternate Mode driver"
+ help
+ DisplayPort USB Type-C Alternate Mode allows DisplayPort
+ displays and adapters to be attached to the USB Type-C
+ connectors on the system.
+
+ To compile this driver as a module, choose M here: the
+ module will be called typec_displayport.
+
+endmenu
diff --git a/drivers/usb/typec/altmodes/Makefile b/drivers/usb/typec/altmodes/Makefile
new file mode 100644
index 000000000000..5caf094ef71a
--- /dev/null
+++ b/drivers/usb/typec/altmodes/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_TYPEC_DP_ALTMODE) += typec_displayport.o
+typec_displayport-y := displayport.o
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
new file mode 100644
index 000000000000..3f06e94771a7
--- /dev/null
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * USB Typec-C DisplayPort Alternate Mode driver
+ *
+ * Copyright (C) 2018 Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *
+ * DisplayPort is trademark of VESA (www.vesa.org)
+ */
+
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/usb/pd_vdo.h>
+#include <linux/usb/typec_dp.h>
+
+#define DP_HEADER(cmd) (VDO(USB_TYPEC_DP_SID, 1, cmd) | \
+ VDO_OPOS(USB_TYPEC_DP_MODE))
+
+enum {
+ DP_CONF_USB,
+ DP_CONF_DFP_D,
+ DP_CONF_UFP_D,
+ DP_CONF_DUAL_D,
+};
+
+/* Helper for setting/getting the pin assignement value to the configuration */
+#define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8)
+#define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8)
+
+/* Pin assignments that use USB3.1 Gen2 signaling to carry DP protocol */
+#define DP_PIN_ASSIGN_GEN2_BR_MASK (BIT(DP_PIN_ASSIGN_A) | \
+ BIT(DP_PIN_ASSIGN_B))
+
+/* Pin assignments that use DP v1.3 signaling to carry DP protocol */
+#define DP_PIN_ASSIGN_DP_BR_MASK (BIT(DP_PIN_ASSIGN_C) | \
+ BIT(DP_PIN_ASSIGN_D) | \
+ BIT(DP_PIN_ASSIGN_E) | \
+ BIT(DP_PIN_ASSIGN_F))
+
+/* DP only pin assignments */
+#define DP_PIN_ASSIGN_DP_ONLY_MASK (BIT(DP_PIN_ASSIGN_A) | \
+ BIT(DP_PIN_ASSIGN_C) | \
+ BIT(DP_PIN_ASSIGN_E))
+
+/* Pin assignments where one channel is for USB */
+#define DP_PIN_ASSIGN_MULTI_FUNC_MASK (BIT(DP_PIN_ASSIGN_B) | \
+ BIT(DP_PIN_ASSIGN_D) | \
+ BIT(DP_PIN_ASSIGN_F))
+
+enum dp_state {
+ DP_STATE_IDLE,
+ DP_STATE_ENTER,
+ DP_STATE_UPDATE,
+ DP_STATE_CONFIGURE,
+ DP_STATE_EXIT,
+};
+
+struct dp_altmode {
+ struct typec_displayport_data data;
+
+ enum dp_state state;
+
+ struct mutex lock; /* device lock */
+ struct work_struct work;
+ struct typec_altmode *alt;
+ const struct typec_altmode *port;
+};
+
+static int dp_altmode_notify(struct dp_altmode *dp)
+{
+ u8 state = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
+
+ return typec_altmode_notify(dp->alt, TYPEC_MODAL_STATE(state),
+ &dp->data);
+}
+
+static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
+{
+ u32 conf = DP_CONF_SIGNALING_DP; /* Only DP signaling supported */
+ u8 pin_assign = 0;
+
+ switch (con) {
+ case DP_STATUS_CON_DISABLED:
+ return 0;
+ case DP_STATUS_CON_DFP_D:
+ conf |= DP_CONF_UFP_U_AS_DFP_D;
+ pin_assign = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo) &
+ DP_CAP_DFP_D_PIN_ASSIGN(dp->port->vdo);
+ break;
+ case DP_STATUS_CON_UFP_D:
+ case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
+ conf |= DP_CONF_UFP_U_AS_UFP_D;
+ pin_assign = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo) &
+ DP_CAP_UFP_D_PIN_ASSIGN(dp->port->vdo);
+ break;
+ default:
+ break;
+ }
+
+ /* Determining the initial pin assignment. */
+ if (!DP_CONF_GET_PIN_ASSIGN(dp->data.conf)) {
+ /* Is USB together with DP preferred */
+ if (dp->data.status & DP_STATUS_PREFER_MULTI_FUNC &&
+ pin_assign & DP_PIN_ASSIGN_MULTI_FUNC_MASK)
+ pin_assign &= DP_PIN_ASSIGN_MULTI_FUNC_MASK;
+ else
+ pin_assign &= DP_PIN_ASSIGN_DP_ONLY_MASK;
+
+ if (!pin_assign)
+ return -EINVAL;
+
+ conf |= DP_CONF_SET_PIN_ASSIGN(pin_assign);
+ }
+
+ dp->data.conf = conf;
+
+ return 0;
+}
+
+static int dp_altmode_status_update(struct dp_altmode *dp)
+{
+ bool configured = !!DP_CONF_GET_PIN_ASSIGN(dp->data.conf);
+ u8 con = DP_STATUS_CONNECTION(dp->data.status);
+ int ret = 0;
+
+ if (configured && (dp->data.status & DP_STATUS_SWITCH_TO_USB)) {
+ dp->data.conf = 0;
+ dp->state = DP_STATE_CONFIGURE;
+ } else if (dp->data.status & DP_STATUS_EXIT_DP_MODE) {
+ dp->state = DP_STATE_EXIT;
+ } else if (!(con & DP_CONF_CURRENTLY(dp->data.conf))) {
+ ret = dp_altmode_configure(dp, con);
+ if (!ret)
+ dp->state = DP_STATE_CONFIGURE;
+ }
+
+ return ret;
+}
+
+static int dp_altmode_configured(struct dp_altmode *dp)
+{
+ int ret;
+
+ sysfs_notify(&dp->alt->dev.kobj, "displayport", "configuration");
+
+ if (!dp->data.conf)
+ return typec_altmode_notify(dp->alt, TYPEC_STATE_USB,
+ &dp->data);
+
+ ret = dp_altmode_notify(dp);
+ if (ret)
+ return ret;
+
+ sysfs_notify(&dp->alt->dev.kobj, "displayport", "pin_assignment");
+
+ return 0;
+}
+
+static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf)
+{
+ u32 header = DP_HEADER(DP_CMD_CONFIGURE);
+ int ret;
+
+ ret = typec_altmode_notify(dp->alt, TYPEC_STATE_SAFE, &dp->data);
+ if (ret) {
+ dev_err(&dp->alt->dev,
+ "unable to put to connector to safe mode\n");
+ return ret;
+ }
+
+ ret = typec_altmode_vdm(dp->alt, header, &conf, 2);
+ if (ret) {
+ if (DP_CONF_GET_PIN_ASSIGN(dp->data.conf))
+ dp_altmode_notify(dp);
+ else
+ typec_altmode_notify(dp->alt, TYPEC_STATE_USB,
+ &dp->data);
+ }
+
+ return ret;
+}
+
+static void dp_altmode_work(struct work_struct *work)
+{
+ struct dp_altmode *dp = container_of(work, struct dp_altmode, work);
+ u32 header;
+ u32 vdo;
+ int ret;
+
+ mutex_lock(&dp->lock);
+
+ switch (dp->state) {
+ case DP_STATE_ENTER:
+ ret = typec_altmode_enter(dp->alt);
+ if (ret)
+ dev_err(&dp->alt->dev, "failed to enter mode\n");
+ break;
+ case DP_STATE_UPDATE:
+ header = DP_HEADER(DP_CMD_STATUS_UPDATE);
+ vdo = 1;
+ ret = typec_altmode_vdm(dp->alt, header, &vdo, 2);
+ if (ret)
+ dev_err(&dp->alt->dev,
+ "unable to send Status Update command (%d)\n",
+ ret);
+ break;
+ case DP_STATE_CONFIGURE:
+ ret = dp_altmode_configure_vdm(dp, dp->data.conf);
+ if (ret)
+ dev_err(&dp->alt->dev,
+ "unable to send Configure command (%d)\n", ret);
+ break;
+ case DP_STATE_EXIT:
+ if (typec_altmode_exit(dp->alt))
+ dev_err(&dp->alt->dev, "Exit Mode Failed!\n");
+ break;
+ default:
+ break;
+ }
+
+ dp->state = DP_STATE_IDLE;
+
+ mutex_unlock(&dp->lock);
+}
+
+static void dp_altmode_attention(struct typec_altmode *alt, const u32 vdo)
+{
+ struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
+ u8 old_state;
+
+ mutex_lock(&dp->lock);
+
+ old_state = dp->state;
+ dp->data.status = vdo;
+
+ if (old_state != DP_STATE_IDLE)
+ dev_warn(&alt->dev, "ATTENTION while processing state %d\n",
+ old_state);
+
+ if (dp_altmode_status_update(dp))
+ dev_warn(&alt->dev, "%s: status update failed\n", __func__);
+
+ if (dp_altmode_notify(dp))
+ dev_err(&alt->dev, "%s: notification failed\n", __func__);
+
+ if (old_state == DP_STATE_IDLE && dp->state != DP_STATE_IDLE)
+ schedule_work(&dp->work);
+
+ mutex_unlock(&dp->lock);
+}
+
+static int dp_altmode_vdm(struct typec_altmode *alt,
+ const u32 hdr, const u32 *vdo, int count)
+{
+ struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
+ int cmd_type = PD_VDO_CMDT(hdr);
+ int cmd = PD_VDO_CMD(hdr);
+ int ret = 0;
+
+ mutex_lock(&dp->lock);
+
+ if (dp->state != DP_STATE_IDLE) {
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ switch (cmd_type) {
+ case CMDT_RSP_ACK:
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ dp->state = DP_STATE_UPDATE;
+ break;
+ case CMD_EXIT_MODE:
+ dp->data.status = 0;
+ dp->data.conf = 0;
+ break;
+ case DP_CMD_STATUS_UPDATE:
+ dp->data.status = *vdo;
+ ret = dp_altmode_status_update(dp);
+ break;
+ case DP_CMD_CONFIGURE:
+ ret = dp_altmode_configured(dp);
+ break;
+ default:
+ break;
+ }
+ break;
+ case CMDT_RSP_NAK:
+ switch (cmd) {
+ case DP_CMD_CONFIGURE:
+ dp->data.conf = 0;
+ ret = dp_altmode_configured(dp);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (dp->state != DP_STATE_IDLE)
+ schedule_work(&dp->work);
+
+err_unlock:
+ mutex_unlock(&dp->lock);
+ return ret;
+}
+
+static int dp_altmode_activate(struct typec_altmode *alt, int activate)
+{
+ return activate ? typec_altmode_enter(alt) : typec_altmode_exit(alt);
+}
+
+static const struct typec_altmode_ops dp_altmode_ops = {
+ .attention = dp_altmode_attention,
+ .vdm = dp_altmode_vdm,
+ .activate = dp_altmode_activate,
+};
+
+static const char * const configurations[] = {
+ [DP_CONF_USB] = "USB",
+ [DP_CONF_DFP_D] = "source",
+ [DP_CONF_UFP_D] = "sink",
+};
+
+static ssize_t
+configuration_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dp_altmode *dp = dev_get_drvdata(dev);
+ u32 conf;
+ u32 cap;
+ int con;
+ int ret = 0;
+
+ con = sysfs_match_string(configurations, buf);
+ if (con < 0)
+ return con;
+
+ mutex_lock(&dp->lock);
+
+ if (dp->state != DP_STATE_IDLE) {
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ cap = DP_CAP_CAPABILITY(dp->alt->vdo);
+
+ if ((con == DP_CONF_DFP_D && !(cap & DP_CAP_DFP_D)) ||
+ (con == DP_CONF_UFP_D && !(cap & DP_CAP_UFP_D))) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ conf = dp->data.conf & ~DP_CONF_DUAL_D;
+ conf |= con;
+
+ if (dp->alt->active) {
+ ret = dp_altmode_configure_vdm(dp, conf);
+ if (ret)
+ goto err_unlock;
+ }
+
+ dp->data.conf = conf;
+
+err_unlock:
+ mutex_unlock(&dp->lock);
+
+ return ret ? ret : size;
+}
+
+static ssize_t configuration_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dp_altmode *dp = dev_get_drvdata(dev);
+ int len;
+ u8 cap;
+ u8 cur;
+ int i;
+
+ mutex_lock(&dp->lock);
+
+ cap = DP_CAP_CAPABILITY(dp->alt->vdo);
+ cur = DP_CONF_CURRENTLY(dp->data.conf);
+
+ len = sprintf(buf, "%s ", cur ? "USB" : "[USB]");
+
+ for (i = 1; i < ARRAY_SIZE(configurations); i++) {
+ if (i == cur)
+ len += sprintf(buf + len, "[%s] ", configurations[i]);
+ else if ((i == DP_CONF_DFP_D && cap & DP_CAP_DFP_D) ||
+ (i == DP_CONF_UFP_D && cap & DP_CAP_UFP_D))
+ len += sprintf(buf + len, "%s ", configurations[i]);
+ }
+
+ mutex_unlock(&dp->lock);
+
+ buf[len - 1] = '\n';
+ return len;
+}
+static DEVICE_ATTR_RW(configuration);
+
+static const char * const pin_assignments[] = {
+ [DP_PIN_ASSIGN_A] = "A",
+ [DP_PIN_ASSIGN_B] = "B",
+ [DP_PIN_ASSIGN_C] = "C",
+ [DP_PIN_ASSIGN_D] = "D",
+ [DP_PIN_ASSIGN_E] = "E",
+ [DP_PIN_ASSIGN_F] = "F",
+};
+
+static ssize_t
+pin_assignment_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dp_altmode *dp = dev_get_drvdata(dev);
+ u8 assignments;
+ u32 conf;
+ int ret;
+
+ ret = sysfs_match_string(pin_assignments, buf);
+ if (ret < 0)
+ return ret;
+
+ conf = DP_CONF_SET_PIN_ASSIGN(BIT(ret));
+ ret = 0;
+
+ mutex_lock(&dp->lock);
+
+ if (conf & dp->data.conf)
+ goto out_unlock;
+
+ if (dp->state != DP_STATE_IDLE) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
+ assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo);
+ else
+ assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo);
+
+ if (!(DP_CONF_GET_PIN_ASSIGN(conf) & assignments)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ conf |= dp->data.conf & ~DP_CONF_PIN_ASSIGNEMENT_MASK;
+
+ /* Only send Configure command if a configuration has been set */
+ if (dp->alt->active && DP_CONF_CURRENTLY(dp->data.conf)) {
+ ret = dp_altmode_configure_vdm(dp, conf);
+ if (ret)
+ goto out_unlock;
+ }
+
+ dp->data.conf = conf;
+
+out_unlock:
+ mutex_unlock(&dp->lock);
+
+ return ret ? ret : size;
+}
+
+static ssize_t pin_assignment_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dp_altmode *dp = dev_get_drvdata(dev);
+ u8 assignments;
+ int len = 0;
+ u8 cur;
+ int i;
+
+ mutex_lock(&dp->lock);
+
+ cur = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
+
+ if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
+ assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo);
+ else
+ assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo);
+
+ for (i = 0; assignments; assignments >>= 1, i++) {
+ if (assignments & 1) {
+ if (i == cur)
+ len += sprintf(buf + len, "[%s] ",
+ pin_assignments[i]);
+ else
+ len += sprintf(buf + len, "%s ",
+ pin_assignments[i]);
+ }
+ }
+
+ mutex_unlock(&dp->lock);
+
+ buf[len - 1] = '\n';
+ return len;
+}
+static DEVICE_ATTR_RW(pin_assignment);
+
+static struct attribute *dp_altmode_attrs[] = {
+ &dev_attr_configuration.attr,
+ &dev_attr_pin_assignment.attr,
+ NULL
+};
+
+static const struct attribute_group dp_altmode_group = {
+ .name = "displayport",
+ .attrs = dp_altmode_attrs,
+};
+
+static int dp_altmode_probe(struct typec_altmode *alt)
+{
+ const struct typec_altmode *port = typec_altmode_get_partner(alt);
+ struct dp_altmode *dp;
+ int ret;
+
+ /* FIXME: Port can only be DFP_U. */
+
+ /* Make sure we have compatiple pin configurations */
+ if (!(DP_CAP_DFP_D_PIN_ASSIGN(port->vdo) &
+ DP_CAP_UFP_D_PIN_ASSIGN(alt->vdo)) &&
+ !(DP_CAP_UFP_D_PIN_ASSIGN(port->vdo) &
+ DP_CAP_DFP_D_PIN_ASSIGN(alt->vdo)))
+ return -ENODEV;
+
+ ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
+ if (ret)
+ return ret;
+
+ dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ INIT_WORK(&dp->work, dp_altmode_work);
+ mutex_init(&dp->lock);
+ dp->port = port;
+ dp->alt = alt;
+
+ alt->desc = "DisplayPort";
+ alt->ops = &dp_altmode_ops;
+
+ typec_altmode_set_drvdata(alt, dp);
+
+ dp->state = DP_STATE_ENTER;
+ schedule_work(&dp->work);
+
+ return 0;
+}
+
+static void dp_altmode_remove(struct typec_altmode *alt)
+{
+ struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
+
+ sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group);
+ cancel_work_sync(&dp->work);
+}
+
+static const struct typec_device_id dp_typec_id[] = {
+ { USB_TYPEC_DP_SID, USB_TYPEC_DP_MODE },
+ { },
+};
+MODULE_DEVICE_TABLE(typec, dp_typec_id);
+
+static struct typec_altmode_driver dp_altmode_driver = {
+ .id_table = dp_typec_id,
+ .probe = dp_altmode_probe,
+ .remove = dp_altmode_remove,
+ .driver = {
+ .name = "typec_displayport",
+ .owner = THIS_MODULE,
+ },
+};
+module_typec_altmode_driver(dp_altmode_driver);
+
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DisplayPort Alternate Mode");
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
new file mode 100644
index 000000000000..95a2b10127db
--- /dev/null
+++ b/drivers/usb/typec/bus.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Bus for USB Type-C Alternate Modes
+ *
+ * Copyright (C) 2018 Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/usb/pd_vdo.h>
+
+#include "bus.h"
+
+static inline int typec_altmode_set_mux(struct altmode *alt, u8 state)
+{
+ return alt->mux ? alt->mux->set(alt->mux, state) : 0;
+}
+
+static int typec_altmode_set_state(struct typec_altmode *adev, int state)
+{
+ bool is_port = is_typec_port(adev->dev.parent);
+ struct altmode *port_altmode;
+ int ret;
+
+ port_altmode = is_port ? to_altmode(adev) : to_altmode(adev)->partner;
+
+ ret = typec_altmode_set_mux(port_altmode, state);
+ if (ret)
+ return ret;
+
+ blocking_notifier_call_chain(&port_altmode->nh, state, NULL);
+
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* Common API */
+
+/**
+ * typec_altmode_notify - Communication between the OS and alternate mode driver
+ * @adev: Handle to the alternate mode
+ * @conf: Alternate mode specific configuration value
+ * @data: Alternate mode specific data
+ *
+ * The primary purpose for this function is to allow the alternate mode drivers
+ * to tell which pin configuration has been negotiated with the partner. That
+ * information will then be used for example to configure the muxes.
+ * Communication to the other direction is also possible, and low level device
+ * drivers can also send notifications to the alternate mode drivers. The actual
+ * communication will be specific for every SVID.
+ */
+int typec_altmode_notify(struct typec_altmode *adev,
+ unsigned long conf, void *data)
+{
+ bool is_port;
+ struct altmode *altmode;
+ struct altmode *partner;
+ int ret;
+
+ if (!adev)
+ return 0;
+
+ altmode = to_altmode(adev);
+
+ if (!altmode->partner)
+ return -ENODEV;
+
+ is_port = is_typec_port(adev->dev.parent);
+ partner = altmode->partner;
+
+ ret = typec_altmode_set_mux(is_port ? altmode : partner, (u8)conf);
+ if (ret)
+ return ret;
+
+ blocking_notifier_call_chain(is_port ? &altmode->nh : &partner->nh,
+ conf, data);
+
+ if (partner->adev.ops && partner->adev.ops->notify)
+ return partner->adev.ops->notify(&partner->adev, conf, data);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(typec_altmode_notify);
+
+/**
+ * typec_altmode_enter - Enter Mode
+ * @adev: The alternate mode
+ *
+ * The alternate mode drivers use this function to enter mode. The port drivers
+ * use this to inform the alternate mode drivers that the partner has initiated
+ * Enter Mode command.
+ */
+int typec_altmode_enter(struct typec_altmode *adev)
+{
+ struct altmode *partner = to_altmode(adev)->partner;
+ struct typec_altmode *pdev = &partner->adev;
+ int ret;
+
+ if (!adev || adev->active)
+ return 0;
+
+ if (!pdev->ops || !pdev->ops->enter)
+ return -EOPNOTSUPP;
+
+ /* Moving to USB Safe State */
+ ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE);
+ if (ret)
+ return ret;
+
+ /* Enter Mode */
+ return pdev->ops->enter(pdev);
+}
+EXPORT_SYMBOL_GPL(typec_altmode_enter);
+
+/**
+ * typec_altmode_exit - Exit Mode
+ * @adev: The alternate mode
+ *
+ * The partner of @adev has initiated Exit Mode command.
+ */
+int typec_altmode_exit(struct typec_altmode *adev)
+{
+ struct altmode *partner = to_altmode(adev)->partner;
+ struct typec_altmode *pdev = &partner->adev;
+ int ret;
+
+ if (!adev || !adev->active)
+ return 0;
+
+ if (!pdev->ops || !pdev->ops->enter)
+ return -EOPNOTSUPP;
+
+ /* Moving to USB Safe State */
+ ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE);
+ if (ret)
+ return ret;
+
+ /* Exit Mode command */
+ return pdev->ops->exit(pdev);
+}
+EXPORT_SYMBOL_GPL(typec_altmode_exit);
+
+/**
+ * typec_altmode_attention - Attention command
+ * @adev: The alternate mode
+ * @vdo: VDO for the Attention command
+ *
+ * Notifies the partner of @adev about Attention command.
+ */
+void typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
+{
+ struct typec_altmode *pdev = &to_altmode(adev)->partner->adev;
+
+ if (pdev->ops && pdev->ops->attention)
+ pdev->ops->attention(pdev, vdo);
+}
+EXPORT_SYMBOL_GPL(typec_altmode_attention);
+
+/**
+ * typec_altmode_vdm - Send Vendor Defined Messages (VDM) to the partner
+ * @adev: Alternate mode handle
+ * @header: VDM Header
+ * @vdo: Array of Vendor Defined Data Objects
+ * @count: Number of Data Objects
+ *
+ * The alternate mode drivers use this function for SVID specific communication
+ * with the partner. The port drivers use it to deliver the Structured VDMs
+ * received from the partners to the alternate mode drivers.
+ */
+int typec_altmode_vdm(struct typec_altmode *adev,
+ const u32 header, const u32 *vdo, int count)
+{
+ struct typec_altmode *pdev;
+ struct altmode *altmode;
+
+ if (!adev)
+ return 0;
+
+ altmode = to_altmode(adev);
+
+ if (!altmode->partner)
+ return -ENODEV;
+
+ pdev = &altmode->partner->adev;
+
+ if (!pdev->ops || !pdev->ops->vdm)
+ return -EOPNOTSUPP;
+
+ return pdev->ops->vdm(pdev, header, vdo, count);
+}
+EXPORT_SYMBOL_GPL(typec_altmode_vdm);
+
+const struct typec_altmode *
+typec_altmode_get_partner(struct typec_altmode *adev)
+{
+ return &to_altmode(adev)->partner->adev;
+}
+EXPORT_SYMBOL_GPL(typec_altmode_get_partner);
+
+/* -------------------------------------------------------------------------- */
+/* API for the alternate mode drivers */
+
+/**
+ * typec_altmode_get_plug - Find cable plug alternate mode
+ * @adev: Handle to partner alternate mode
+ * @index: Cable plug index
+ *
+ * Increment reference count for cable plug alternate mode device. Returns
+ * handle to the cable plug alternate mode, or NULL if none is found.
+ */
+struct typec_altmode *typec_altmode_get_plug(struct typec_altmode *adev,
+ enum typec_plug_index index)
+{
+ struct altmode *port = to_altmode(adev)->partner;
+
+ if (port->plug[index]) {
+ get_device(&port->plug[index]->adev.dev);
+ return &port->plug[index]->adev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(typec_altmode_get_plug);
+
+/**
+ * typec_altmode_put_plug - Decrement cable plug alternate mode reference count
+ * @plug: Handle to the cable plug alternate mode
+ */
+void typec_altmode_put_plug(struct typec_altmode *plug)
+{
+ if (plug)
+ put_device(&plug->dev);
+}
+EXPORT_SYMBOL_GPL(typec_altmode_put_plug);
+
+int __typec_altmode_register_driver(struct typec_altmode_driver *drv,
+ struct module *module)
+{
+ if (!drv->probe)
+ return -EINVAL;
+
+ drv->driver.owner = module;
+ drv->driver.bus = &typec_bus;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__typec_altmode_register_driver);
+
+void typec_altmode_unregister_driver(struct typec_altmode_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(typec_altmode_unregister_driver);
+
+/* -------------------------------------------------------------------------- */
+/* API for the port drivers */
+
+/**
+ * typec_match_altmode - Match SVID to an array of alternate modes
+ * @altmodes: Array of alternate modes
+ * @n: Number of elements in the array, or -1 for NULL termiated arrays
+ * @svid: Standard or Vendor ID to match with
+ *
+ * Return pointer to an alternate mode with SVID mathing @svid, or NULL when no
+ * match is found.
+ */
+struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
+ size_t n, u16 svid, u8 mode)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (!altmodes[i])
+ break;
+ if (altmodes[i]->svid == svid && altmodes[i]->mode == mode)
+ return altmodes[i];
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(typec_match_altmode);
+
+/* -------------------------------------------------------------------------- */
+
+static ssize_t
+description_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct typec_altmode *alt = to_typec_altmode(dev);
+
+ return sprintf(buf, "%s\n", alt->desc ? alt->desc : "");
+}
+static DEVICE_ATTR_RO(description);
+
+static struct attribute *typec_attrs[] = {
+ &dev_attr_description.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(typec);
+
+static int typec_match(struct device *dev, struct device_driver *driver)
+{
+ struct typec_altmode_driver *drv = to_altmode_driver(driver);
+ struct typec_altmode *altmode = to_typec_altmode(dev);
+ const struct typec_device_id *id;
+
+ for (id = drv->id_table; id->svid; id++)
+ if (id->svid == altmode->svid &&
+ (id->mode == TYPEC_ANY_MODE || id->mode == altmode->mode))
+ return 1;
+ return 0;
+}
+
+static int typec_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct typec_altmode *altmode = to_typec_altmode(dev);
+
+ if (add_uevent_var(env, "SVID=%04X", altmode->svid))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "MODE=%u", altmode->mode))
+ return -ENOMEM;
+
+ return add_uevent_var(env, "MODALIAS=typec:id%04Xm%02X",
+ altmode->svid, altmode->mode);
+}
+
+static int typec_altmode_create_links(struct altmode *alt)
+{
+ struct device *port_dev = &alt->partner->adev.dev;
+ struct device *dev = &alt->adev.dev;
+ int err;
+
+ err = sysfs_create_link(&dev->kobj, &port_dev->kobj, "port");
+ if (err)
+ return err;
+
+ err = sysfs_create_link(&port_dev->kobj, &dev->kobj, "partner");
+ if (err)
+ sysfs_remove_link(&dev->kobj, "port");
+
+ return err;
+}
+
+static void typec_altmode_remove_links(struct altmode *alt)
+{
+ sysfs_remove_link(&alt->partner->adev.dev.kobj, "partner");
+ sysfs_remove_link(&alt->adev.dev.kobj, "port");
+}
+
+static int typec_probe(struct device *dev)
+{
+ struct typec_altmode_driver *drv = to_altmode_driver(dev->driver);
+ struct typec_altmode *adev = to_typec_altmode(dev);
+ struct altmode *altmode = to_altmode(adev);
+ int ret;
+
+ /* Fail if the port does not support the alternate mode */
+ if (!altmode->partner)
+ return -ENODEV;
+
+ ret = typec_altmode_create_links(altmode);
+ if (ret) {
+ dev_warn(dev, "failed to create symlinks\n");
+ return ret;
+ }
+
+ ret = drv->probe(adev);
+ if (ret)
+ typec_altmode_remove_links(altmode);
+
+ return ret;
+}
+
+static int typec_remove(struct device *dev)
+{
+ struct typec_altmode_driver *drv = to_altmode_driver(dev->driver);
+ struct typec_altmode *adev = to_typec_altmode(dev);
+ struct altmode *altmode = to_altmode(adev);
+
+ typec_altmode_remove_links(altmode);
+
+ if (drv->remove)
+ drv->remove(to_typec_altmode(dev));
+
+ if (adev->active) {
+ WARN_ON(typec_altmode_set_state(adev, TYPEC_STATE_SAFE));
+ typec_altmode_update_active(adev, false);
+ }
+
+ adev->desc = NULL;
+ adev->ops = NULL;
+
+ return 0;
+}
+
+struct bus_type typec_bus = {
+ .name = "typec",
+ .dev_groups = typec_groups,
+ .match = typec_match,
+ .uevent = typec_uevent,
+ .probe = typec_probe,
+ .remove = typec_remove,
+};
diff --git a/drivers/usb/typec/bus.h b/drivers/usb/typec/bus.h
new file mode 100644
index 000000000000..db40e61d8b72
--- /dev/null
+++ b/drivers/usb/typec/bus.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __USB_TYPEC_ALTMODE_H__
+#define __USB_TYPEC_ALTMODE_H__
+
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_mux.h>
+
+struct bus_type;
+
+struct altmode {
+ unsigned int id;
+ struct typec_altmode adev;
+ struct typec_mux *mux;
+
+ enum typec_port_data roles;
+
+ struct attribute *attrs[5];
+ char group_name[8];
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+
+ struct altmode *partner;
+ struct altmode *plug[2];
+
+ struct blocking_notifier_head nh;
+};
+
+#define to_altmode(d) container_of(d, struct altmode, adev)
+
+extern struct bus_type typec_bus;
+extern const struct device_type typec_altmode_dev_type;
+extern const struct device_type typec_port_dev_type;
+
+#define is_typec_altmode(_dev_) (_dev_->type == &typec_altmode_dev_type)
+#define is_typec_port(_dev_) (_dev_->type == &typec_port_dev_type)
+
+#endif /* __USB_TYPEC_ALTMODE_H__ */
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 53df10df2f9d..c202975f8097 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -10,39 +10,13 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/usb/typec.h>
-#include <linux/usb/typec_mux.h>
-struct typec_mode {
- int index;
- u32 vdo;
- char *desc;
- enum typec_port_type roles;
-
- struct typec_altmode *alt_mode;
-
- unsigned int active:1;
-
- char group_name[6];
- struct attribute_group group;
- struct attribute *attrs[5];
- struct device_attribute vdo_attr;
- struct device_attribute desc_attr;
- struct device_attribute active_attr;
- struct device_attribute roles_attr;
-};
-
-struct typec_altmode {
- struct device dev;
- u16 svid;
- int n_modes;
- struct typec_mode modes[ALTMODE_MAX_MODES];
- const struct attribute_group *mode_groups[ALTMODE_MAX_MODES];
-};
+#include "bus.h"
struct typec_plug {
struct device dev;
enum typec_plug_index index;
+ struct ida mode_ids;
};
struct typec_cable {
@@ -57,11 +31,13 @@ struct typec_partner {
unsigned int usb_pd:1;
struct usb_pd_identity *identity;
enum typec_accessory accessory;
+ struct ida mode_ids;
};
struct typec_port {
unsigned int id;
struct device dev;
+ struct ida mode_ids;
int prefer_role;
enum typec_data_role data_role;
@@ -82,17 +58,14 @@ struct typec_port {
#define to_typec_plug(_dev_) container_of(_dev_, struct typec_plug, dev)
#define to_typec_cable(_dev_) container_of(_dev_, struct typec_cable, dev)
#define to_typec_partner(_dev_) container_of(_dev_, struct typec_partner, dev)
-#define to_altmode(_dev_) container_of(_dev_, struct typec_altmode, dev)
static const struct device_type typec_partner_dev_type;
static const struct device_type typec_cable_dev_type;
static const struct device_type typec_plug_dev_type;
-static const struct device_type typec_port_dev_type;
#define is_typec_partner(_dev_) (_dev_->type == &typec_partner_dev_type)
#define is_typec_cable(_dev_) (_dev_->type == &typec_cable_dev_type)
#define is_typec_plug(_dev_) (_dev_->type == &typec_plug_dev_type)
-#define is_typec_port(_dev_) (_dev_->type == &typec_port_dev_type)
static DEFINE_IDA(typec_index_ida);
static struct class *typec_class;
@@ -174,28 +147,148 @@ static void typec_report_identity(struct device *dev)
/* ------------------------------------------------------------------------- */
/* Alternate Modes */
+static int altmode_match(struct device *dev, void *data)
+{
+ struct typec_altmode *adev = to_typec_altmode(dev);
+ struct typec_device_id *id = data;
+
+ if (!is_typec_altmode(dev))
+ return 0;
+
+ return ((adev->svid == id->svid) && (adev->mode == id->mode));
+}
+
+static void typec_altmode_set_partner(struct altmode *altmode)
+{
+ struct typec_altmode *adev = &altmode->adev;
+ struct typec_device_id id = { adev->svid, adev->mode, };
+ struct typec_port *port = typec_altmode2port(adev);
+ struct altmode *partner;
+ struct device *dev;
+
+ dev = device_find_child(&port->dev, &id, altmode_match);
+ if (!dev)
+ return;
+
+ /* Bind the port alt mode to the partner/plug alt mode. */
+ partner = to_altmode(to_typec_altmode(dev));
+ altmode->partner = partner;
+
+ /* Bind the partner/plug alt mode to the port alt mode. */
+ if (is_typec_plug(adev->dev.parent)) {
+ struct typec_plug *plug = to_typec_plug(adev->dev.parent);
+
+ partner->plug[plug->index] = altmode;
+ } else {
+ partner->partner = altmode;
+ }
+}
+
+static void typec_altmode_put_partner(struct altmode *altmode)
+{
+ struct altmode *partner = altmode->partner;
+ struct typec_altmode *adev;
+
+ if (!partner)
+ return;
+
+ adev = &partner->adev;
+
+ if (is_typec_plug(adev->dev.parent)) {
+ struct typec_plug *plug = to_typec_plug(adev->dev.parent);
+
+ partner->plug[plug->index] = NULL;
+ } else {
+ partner->partner = NULL;
+ }
+ put_device(&adev->dev);
+}
+
+static int __typec_port_match(struct device *dev, const void *name)
+{
+ return !strcmp((const char *)name, dev_name(dev));
+}
+
+static void *typec_port_match(struct device_connection *con, int ep, void *data)
+{
+ return class_find_device(typec_class, NULL, con->endpoint[ep],
+ __typec_port_match);
+}
+
+struct typec_altmode *
+typec_altmode_register_notifier(struct device *dev, u16 svid, u8 mode,
+ struct notifier_block *nb)
+{
+ struct typec_device_id id = { svid, mode, };
+ struct device *altmode_dev;
+ struct device *port_dev;
+ struct altmode *altmode;
+ int ret;
+
+ /* Find the port linked to the caller */
+ port_dev = device_connection_find_match(dev, NULL, NULL,
+ typec_port_match);
+ if (IS_ERR_OR_NULL(port_dev))
+ return port_dev ? ERR_CAST(port_dev) : ERR_PTR(-ENODEV);
+
+ /* Find the altmode with matching svid */
+ altmode_dev = device_find_child(port_dev, &id, altmode_match);
+
+ put_device(port_dev);
+
+ if (!altmode_dev)
+ return ERR_PTR(-ENODEV);
+
+ altmode = to_altmode(to_typec_altmode(altmode_dev));
+
+ /* Register notifier */
+ ret = blocking_notifier_chain_register(&altmode->nh, nb);
+ if (ret) {
+ put_device(altmode_dev);
+ return ERR_PTR(ret);
+ }
+
+ return &altmode->adev;
+}
+EXPORT_SYMBOL_GPL(typec_altmode_register_notifier);
+
+void typec_altmode_unregister_notifier(struct typec_altmode *adev,
+ struct notifier_block *nb)
+{
+ struct altmode *altmode = to_altmode(adev);
+
+ blocking_notifier_chain_unregister(&altmode->nh, nb);
+ put_device(&adev->dev);
+}
+EXPORT_SYMBOL_GPL(typec_altmode_unregister_notifier);
+
/**
* typec_altmode_update_active - Report Enter/Exit mode
- * @alt: Handle to the alternate mode
- * @mode: Mode index
+ * @adev: Handle to the alternate mode
* @active: True when the mode has been entered
*
* If a partner or cable plug executes Enter/Exit Mode command successfully, the
* drivers use this routine to report the updated state of the mode.
*/
-void typec_altmode_update_active(struct typec_altmode *alt, int mode,
- bool active)
+void typec_altmode_update_active(struct typec_altmode *adev, bool active)
{
- struct typec_mode *m = &alt->modes[mode];
char dir[6];
- if (m->active == active)
+ if (adev->active == active)
return;
- m->active = active;
- snprintf(dir, sizeof(dir), "mode%d", mode);
- sysfs_notify(&alt->dev.kobj, dir, "active");
- kobject_uevent(&alt->dev.kobj, KOBJ_CHANGE);
+ if (!is_typec_port(adev->dev.parent)) {
+ if (!active)
+ module_put(adev->dev.driver->owner);
+ else
+ WARN_ON(!try_module_get(adev->dev.driver->owner));
+ }
+
+ adev->active = active;
+ snprintf(dir, sizeof(dir), "mode%d", adev->mode);
+ sysfs_notify(&adev->dev.kobj, dir, "active");
+ sysfs_notify(&adev->dev.kobj, NULL, "active");
+ kobject_uevent(&adev->dev.kobj, KOBJ_CHANGE);
}
EXPORT_SYMBOL_GPL(typec_altmode_update_active);
@@ -220,68 +313,78 @@ struct typec_port *typec_altmode2port(struct typec_altmode *alt)
EXPORT_SYMBOL_GPL(typec_altmode2port);
static ssize_t
-typec_altmode_vdo_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+vdo_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct typec_mode *mode = container_of(attr, struct typec_mode,
- vdo_attr);
+ struct typec_altmode *alt = to_typec_altmode(dev);
- return sprintf(buf, "0x%08x\n", mode->vdo);
+ return sprintf(buf, "0x%08x\n", alt->vdo);
}
+static DEVICE_ATTR_RO(vdo);
static ssize_t
-typec_altmode_desc_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+description_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct typec_mode *mode = container_of(attr, struct typec_mode,
- desc_attr);
+ struct typec_altmode *alt = to_typec_altmode(dev);
- return sprintf(buf, "%s\n", mode->desc ? mode->desc : "");
+ return sprintf(buf, "%s\n", alt->desc ? alt->desc : "");
}
+static DEVICE_ATTR_RO(description);
static ssize_t
-typec_altmode_active_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+active_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct typec_mode *mode = container_of(attr, struct typec_mode,
- active_attr);
+ struct typec_altmode *alt = to_typec_altmode(dev);
- return sprintf(buf, "%s\n", mode->active ? "yes" : "no");
+ return sprintf(buf, "%s\n", alt->active ? "yes" : "no");
}
-static ssize_t
-typec_altmode_active_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t size)
+static ssize_t active_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
{
- struct typec_mode *mode = container_of(attr, struct typec_mode,
- active_attr);
- struct typec_port *port = typec_altmode2port(mode->alt_mode);
- bool activate;
+ struct typec_altmode *adev = to_typec_altmode(dev);
+ struct altmode *altmode = to_altmode(adev);
+ bool enter;
int ret;
- if (!port->cap->activate_mode)
- return -EOPNOTSUPP;
-
- ret = kstrtobool(buf, &activate);
+ ret = kstrtobool(buf, &enter);
if (ret)
return ret;
- ret = port->cap->activate_mode(port->cap, mode->index, activate);
- if (ret)
- return ret;
+ if (adev->active == enter)
+ return size;
+
+ if (is_typec_port(adev->dev.parent)) {
+ typec_altmode_update_active(adev, enter);
+
+ /* Make sure that the partner exits the mode before disabling */
+ if (altmode->partner && !enter && altmode->partner->adev.active)
+ typec_altmode_exit(&altmode->partner->adev);
+ } else if (altmode->partner) {
+ if (enter && !altmode->partner->adev.active) {
+ dev_warn(dev, "port has the mode disabled\n");
+ return -EPERM;
+ }
+ }
+
+ /* Note: If there is no driver, the mode will not be entered */
+ if (adev->ops && adev->ops->activate) {
+ ret = adev->ops->activate(adev, enter);
+ if (ret)
+ return ret;
+ }
return size;
}
+static DEVICE_ATTR_RW(active);
static ssize_t
-typec_altmode_roles_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+supported_roles_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct typec_mode *mode = container_of(attr, struct typec_mode,
- roles_attr);
+ struct altmode *alt = to_altmode(to_typec_altmode(dev));
ssize_t ret;
- switch (mode->roles) {
+ switch (alt->roles) {
case TYPEC_PORT_SRC:
ret = sprintf(buf, "source\n");
break;
@@ -295,89 +398,74 @@ typec_altmode_roles_show(struct device *dev, struct device_attribute *attr,
}
return ret;
}
+static DEVICE_ATTR_RO(supported_roles);
-static void typec_init_modes(struct typec_altmode *alt,
- const struct typec_mode_desc *desc, bool is_port)
+static ssize_t
+mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- int i;
-
- for (i = 0; i < alt->n_modes; i++, desc++) {
- struct typec_mode *mode = &alt->modes[i];
-
- /* Not considering the human readable description critical */
- mode->desc = kstrdup(desc->desc, GFP_KERNEL);
- if (desc->desc && !mode->desc)
- dev_err(&alt->dev, "failed to copy mode%d desc\n", i);
-
- mode->alt_mode = alt;
- mode->vdo = desc->vdo;
- mode->roles = desc->roles;
- mode->index = desc->index;
- sprintf(mode->group_name, "mode%d", desc->index);
-
- sysfs_attr_init(&mode->vdo_attr.attr);
- mode->vdo_attr.attr.name = "vdo";
- mode->vdo_attr.attr.mode = 0444;
- mode->vdo_attr.show = typec_altmode_vdo_show;
-
- sysfs_attr_init(&mode->desc_attr.attr);
- mode->desc_attr.attr.name = "description";
- mode->desc_attr.attr.mode = 0444;
- mode->desc_attr.show = typec_altmode_desc_show;
-
- sysfs_attr_init(&mode->active_attr.attr);
- mode->active_attr.attr.name = "active";
- mode->active_attr.attr.mode = 0644;
- mode->active_attr.show = typec_altmode_active_show;
- mode->active_attr.store = typec_altmode_active_store;
-
- mode->attrs[0] = &mode->vdo_attr.attr;
- mode->attrs[1] = &mode->desc_attr.attr;
- mode->attrs[2] = &mode->active_attr.attr;
-
- /* With ports, list the roles that the mode is supported with */
- if (is_port) {
- sysfs_attr_init(&mode->roles_attr.attr);
- mode->roles_attr.attr.name = "supported_roles";
- mode->roles_attr.attr.mode = 0444;
- mode->roles_attr.show = typec_altmode_roles_show;
-
- mode->attrs[3] = &mode->roles_attr.attr;
- }
+ struct typec_altmode *adev = to_typec_altmode(dev);
- mode->group.attrs = mode->attrs;
- mode->group.name = mode->group_name;
-
- alt->mode_groups[i] = &mode->group;
- }
+ return sprintf(buf, "%u\n", adev->mode);
}
+static DEVICE_ATTR_RO(mode);
-static ssize_t svid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t
+svid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct typec_altmode *alt = to_altmode(dev);
+ struct typec_altmode *adev = to_typec_altmode(dev);
- return sprintf(buf, "%04x\n", alt->svid);
+ return sprintf(buf, "%04x\n", adev->svid);
}
static DEVICE_ATTR_RO(svid);
static struct attribute *typec_altmode_attrs[] = {
+ &dev_attr_active.attr,
+ &dev_attr_mode.attr,
&dev_attr_svid.attr,
+ &dev_attr_vdo.attr,
NULL
};
ATTRIBUTE_GROUPS(typec_altmode);
+static int altmode_id_get(struct device *dev)
+{
+ struct ida *ids;
+
+ if (is_typec_partner(dev))
+ ids = &to_typec_partner(dev)->mode_ids;
+ else if (is_typec_plug(dev))
+ ids = &to_typec_plug(dev)->mode_ids;
+ else
+ ids = &to_typec_port(dev)->mode_ids;
+
+ return ida_simple_get(ids, 0, 0, GFP_KERNEL);
+}
+
+static void altmode_id_remove(struct device *dev, int id)
+{
+ struct ida *ids;
+
+ if (is_typec_partner(dev))
+ ids = &to_typec_partner(dev)->mode_ids;
+ else if (is_typec_plug(dev))
+ ids = &to_typec_plug(dev)->mode_ids;
+ else
+ ids = &to_typec_port(dev)->mode_ids;
+
+ ida_simple_remove(ids, id);
+}
+
static void typec_altmode_release(struct device *dev)
{
- struct typec_altmode *alt = to_altmode(dev);
- int i;
+ struct altmode *alt = to_altmode(to_typec_altmode(dev));
+
+ typec_altmode_put_partner(alt);
- for (i = 0; i < alt->n_modes; i++)
- kfree(alt->modes[i].desc);
+ altmode_id_remove(alt->adev.dev.parent, alt->id);
kfree(alt);
}
-static const struct device_type typec_altmode_dev_type = {
+const struct device_type typec_altmode_dev_type = {
.name = "typec_alternate_mode",
.groups = typec_altmode_groups,
.release = typec_altmode_release,
@@ -387,44 +475,74 @@ static struct typec_altmode *
typec_register_altmode(struct device *parent,
const struct typec_altmode_desc *desc)
{
- struct typec_altmode *alt;
+ unsigned int id = altmode_id_get(parent);
+ bool is_port = is_typec_port(parent);
+ struct altmode *alt;
int ret;
alt = kzalloc(sizeof(*alt), GFP_KERNEL);
if (!alt)
return ERR_PTR(-ENOMEM);
- alt->svid = desc->svid;
- alt->n_modes = desc->n_modes;
- typec_init_modes(alt, desc->modes, is_typec_port(parent));
+ alt->adev.svid = desc->svid;
+ alt->adev.mode = desc->mode;
+ alt->adev.vdo = desc->vdo;
+ alt->roles = desc->roles;
+ alt->id = id;
+
+ alt->attrs[0] = &dev_attr_vdo.attr;
+ alt->attrs[1] = &dev_attr_description.attr;
+ alt->attrs[2] = &dev_attr_active.attr;
+
+ if (is_port) {
+ alt->attrs[3] = &dev_attr_supported_roles.attr;
+ alt->adev.active = true; /* Enabled by default */
+ }
- alt->dev.parent = parent;
- alt->dev.groups = alt->mode_groups;
- alt->dev.type = &typec_altmode_dev_type;
- dev_set_name(&alt->dev, "svid-%04x", alt->svid);
+ sprintf(alt->group_name, "mode%d", desc->mode);
+ alt->group.name = alt->group_name;
+ alt->group.attrs = alt->attrs;
+ alt->groups[0] = &alt->group;
- ret = device_register(&alt->dev);
+ alt->adev.dev.parent = parent;
+ alt->adev.dev.groups = alt->groups;
+ alt->adev.dev.type = &typec_altmode_dev_type;
+ dev_set_name(&alt->adev.dev, "%s.%u", dev_name(parent), id);
+
+ /* Link partners and plugs with the ports */
+ if (is_port)
+ BLOCKING_INIT_NOTIFIER_HEAD(&alt->nh);
+ else
+ typec_altmode_set_partner(alt);
+
+ /* The partners are bind to drivers */
+ if (is_typec_partner(parent))
+ alt->adev.dev.bus = &typec_bus;
+
+ ret = device_register(&alt->adev.dev);
if (ret) {
dev_err(parent, "failed to register alternate mode (%d)\n",
ret);
- put_device(&alt->dev);
+ put_device(&alt->adev.dev);
return ERR_PTR(ret);
}
- return alt;
+ return &alt->adev;
}
/**
* typec_unregister_altmode - Unregister Alternate Mode
- * @alt: The alternate mode to be unregistered
+ * @adev: The alternate mode to be unregistered
*
* Unregister device created with typec_partner_register_altmode(),
* typec_plug_register_altmode() or typec_port_register_altmode().
*/
-void typec_unregister_altmode(struct typec_altmode *alt)
+void typec_unregister_altmode(struct typec_altmode *adev)
{
- if (!IS_ERR_OR_NULL(alt))
- device_unregister(&alt->dev);
+ if (IS_ERR_OR_NULL(adev))
+ return;
+ typec_mux_put(to_altmode(adev)->mux);
+ device_unregister(&adev->dev);
}
EXPORT_SYMBOL_GPL(typec_unregister_altmode);
@@ -462,6 +580,7 @@ static void typec_partner_release(struct device *dev)
{
struct typec_partner *partner = to_typec_partner(dev);
+ ida_destroy(&partner->mode_ids);
kfree(partner);
}
@@ -527,6 +646,7 @@ struct typec_partner *typec_register_partner(struct typec_port *port,
if (!partner)
return ERR_PTR(-ENOMEM);
+ ida_init(&partner->mode_ids);
partner->usb_pd = desc->usb_pd;
partner->accessory = desc->accessory;
@@ -575,6 +695,7 @@ static void typec_plug_release(struct device *dev)
{
struct typec_plug *plug = to_typec_plug(dev);
+ ida_destroy(&plug->mode_ids);
kfree(plug);
}
@@ -627,6 +748,7 @@ struct typec_plug *typec_register_plug(struct typec_cable *cable,
sprintf(name, "plug%d", desc->index);
+ ida_init(&plug->mode_ids);
plug->index = desc->index;
plug->dev.class = typec_class;
plug->dev.parent = &cable->dev;
@@ -796,12 +918,18 @@ static const char * const typec_data_roles[] = {
[TYPEC_HOST] = "host",
};
-static const char * const typec_port_types[] = {
+static const char * const typec_port_power_roles[] = {
[TYPEC_PORT_SRC] = "source",
[TYPEC_PORT_SNK] = "sink",
[TYPEC_PORT_DRP] = "dual",
};
+static const char * const typec_port_data_roles[] = {
+ [TYPEC_PORT_DFP] = "host",
+ [TYPEC_PORT_UFP] = "device",
+ [TYPEC_PORT_DRD] = "dual",
+};
+
static const char * const typec_port_types_drp[] = {
[TYPEC_PORT_SRC] = "dual [source] sink",
[TYPEC_PORT_SNK] = "dual source [sink]",
@@ -932,7 +1060,7 @@ static ssize_t power_role_store(struct device *dev,
mutex_lock(&port->port_type_lock);
if (port->port_type != TYPEC_PORT_DRP) {
dev_dbg(dev, "port type fixed at \"%s\"",
- typec_port_types[port->port_type]);
+ typec_port_power_roles[port->port_type]);
ret = -EOPNOTSUPP;
goto unlock_and_ret;
}
@@ -973,7 +1101,7 @@ port_type_store(struct device *dev, struct device_attribute *attr,
return -EOPNOTSUPP;
}
- ret = sysfs_match_string(typec_port_types, buf);
+ ret = sysfs_match_string(typec_port_power_roles, buf);
if (ret < 0)
return ret;
@@ -1007,7 +1135,7 @@ port_type_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%s\n",
typec_port_types_drp[port->port_type]);
- return sprintf(buf, "[%s]\n", typec_port_types[port->cap->type]);
+ return sprintf(buf, "[%s]\n", typec_port_power_roles[port->cap->type]);
}
static DEVICE_ATTR_RW(port_type);
@@ -1141,12 +1269,13 @@ static void typec_release(struct device *dev)
struct typec_port *port = to_typec_port(dev);
ida_simple_remove(&typec_index_ida, port->id);
+ ida_destroy(&port->mode_ids);
typec_switch_put(port->sw);
typec_mux_put(port->mux);
kfree(port);
}
-static const struct device_type typec_port_dev_type = {
+const struct device_type typec_port_dev_type = {
.name = "typec_port",
.groups = typec_groups,
.uevent = typec_uevent,
@@ -1252,6 +1381,50 @@ void typec_set_pwr_opmode(struct typec_port *port,
}
EXPORT_SYMBOL_GPL(typec_set_pwr_opmode);
+/**
+ * typec_find_port_power_role - Get the typec port power capability
+ * @name: port power capability string
+ *
+ * This routine is used to find the typec_port_type by its string name.
+ *
+ * Returns typec_port_type if success, otherwise negative error code.
+ */
+int typec_find_port_power_role(const char *name)
+{
+ return match_string(typec_port_power_roles,
+ ARRAY_SIZE(typec_port_power_roles), name);
+}
+EXPORT_SYMBOL_GPL(typec_find_port_power_role);
+
+/**
+ * typec_find_power_role - Find the typec one specific power role
+ * @name: power role string
+ *
+ * This routine is used to find the typec_role by its string name.
+ *
+ * Returns typec_role if success, otherwise negative error code.
+ */
+int typec_find_power_role(const char *name)
+{
+ return match_string(typec_roles, ARRAY_SIZE(typec_roles), name);
+}
+EXPORT_SYMBOL_GPL(typec_find_power_role);
+
+/**
+ * typec_find_port_data_role - Get the typec port data capability
+ * @name: port data capability string
+ *
+ * This routine is used to find the typec_port_data by its string name.
+ *
+ * Returns typec_port_data if success, otherwise negative error code.
+ */
+int typec_find_port_data_role(const char *name)
+{
+ return match_string(typec_port_data_roles,
+ ARRAY_SIZE(typec_port_data_roles), name);
+}
+EXPORT_SYMBOL_GPL(typec_find_port_data_role);
+
/* ------------------------------------------ */
/* API for Multiplexer/DeMultiplexer Switches */
@@ -1280,12 +1453,24 @@ int typec_set_orientation(struct typec_port *port,
EXPORT_SYMBOL_GPL(typec_set_orientation);
/**
+ * typec_get_orientation - Get USB Type-C cable plug orientation
+ * @port: USB Type-C Port
+ *
+ * Get current cable plug orientation for @port.
+ */
+enum typec_orientation typec_get_orientation(struct typec_port *port)
+{
+ return port->orientation;
+}
+EXPORT_SYMBOL_GPL(typec_get_orientation);
+
+/**
* typec_set_mode - Set mode of operation for USB Type-C connector
- * @port: USB Type-C port for the connector
- * @mode: Operation mode for the connector
+ * @port: USB Type-C connector
+ * @mode: Accessory Mode, USB Operation or Safe State
*
- * Set mode @mode for @port. This function will configure the muxes needed to
- * enter @mode.
+ * Configure @port for Accessory Mode @mode. This function will configure the
+ * muxes needed for @mode.
*/
int typec_set_mode(struct typec_port *port, int mode)
{
@@ -1299,6 +1484,7 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
* typec_port_register_altmode - Register USB Type-C Port Alternate Mode
* @port: USB Type-C Port that supports the alternate mode
* @desc: Description of the alternate mode
+ * @drvdata: Private pointer to driver specific info
*
* This routine is used to register an alternate mode that @port is capable of
* supporting.
@@ -1309,7 +1495,23 @@ struct typec_altmode *
typec_port_register_altmode(struct typec_port *port,
const struct typec_altmode_desc *desc)
{
- return typec_register_altmode(&port->dev, desc);
+ struct typec_altmode *adev;
+ struct typec_mux *mux;
+ char id[10];
+
+ sprintf(id, "id%04xm%02x", desc->svid, desc->mode);
+
+ mux = typec_mux_get(port->dev.parent, id);
+ if (IS_ERR(mux))
+ return ERR_CAST(mux);
+
+ adev = typec_register_altmode(&port->dev, desc);
+ if (IS_ERR(adev))
+ typec_mux_put(mux);
+ else
+ to_altmode(adev)->mux = mux;
+
+ return adev;
}
EXPORT_SYMBOL_GPL(typec_port_register_altmode);
@@ -1345,7 +1547,7 @@ struct typec_port *typec_register_port(struct device *parent,
goto err_switch;
}
- port->mux = typec_mux_get(cap->fwnode ? &port->dev : parent);
+ port->mux = typec_mux_get(parent, "typec-mux");
if (IS_ERR(port->mux)) {
ret = PTR_ERR(port->mux);
goto err_mux;
@@ -1383,10 +1585,12 @@ struct typec_port *typec_register_port(struct device *parent,
break;
}
+ ida_init(&port->mode_ids);
+ mutex_init(&port->port_type_lock);
+
port->id = id;
port->cap = cap;
port->port_type = cap->type;
- mutex_init(&port->port_type_lock);
port->prefer_role = cap->prefer_role;
port->dev.class = typec_class;
@@ -1430,8 +1634,19 @@ EXPORT_SYMBOL_GPL(typec_unregister_port);
static int __init typec_init(void)
{
+ int ret;
+
+ ret = bus_register(&typec_bus);
+ if (ret)
+ return ret;
+
typec_class = class_create(THIS_MODULE, "typec");
- return PTR_ERR_OR_ZERO(typec_class);
+ if (IS_ERR(typec_class)) {
+ bus_unregister(&typec_bus);
+ return PTR_ERR(typec_class);
+ }
+
+ return 0;
}
subsys_initcall(typec_init);
@@ -1439,6 +1654,7 @@ static void __exit typec_exit(void)
{
class_destroy(typec_class);
ida_destroy(&typec_index_ida);
+ bus_unregister(&typec_bus);
}
module_exit(typec_exit);
diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c
index 1e68da10bf17..82bed9810be6 100644
--- a/drivers/usb/typec/fusb302/fusb302.c
+++ b/drivers/usb/typec/fusb302/fusb302.c
@@ -864,17 +864,6 @@ done:
return ret;
}
-static int tcpm_set_current_limit(struct tcpc_dev *dev, u32 max_ma, u32 mv)
-{
- struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
- tcpc_dev);
-
- fusb302_log(chip, "current limit: %d ma, %d mv (not implemented)",
- max_ma, mv);
-
- return 0;
-}
-
static int fusb302_pd_tx_flush(struct fusb302_chip *chip)
{
return fusb302_i2c_set_bits(chip, FUSB_REG_CONTROL0,
@@ -1213,7 +1202,6 @@ static void init_tcpc_dev(struct tcpc_dev *fusb302_tcpc_dev)
fusb302_tcpc_dev->set_polarity = tcpm_set_polarity;
fusb302_tcpc_dev->set_vconn = tcpm_set_vconn;
fusb302_tcpc_dev->set_vbus = tcpm_set_vbus;
- fusb302_tcpc_dev->set_current_limit = tcpm_set_current_limit;
fusb302_tcpc_dev->set_pd_rx = tcpm_set_pd_rx;
fusb302_tcpc_dev->set_roles = tcpm_set_roles;
fusb302_tcpc_dev->start_drp_toggling = tcpm_start_drp_toggling;
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 9d8330e9c431..ddaac63ecf12 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -123,19 +123,19 @@ static void *typec_mux_match(struct device_connection *con, int ep, void *data)
/**
* typec_mux_get - Find USB Type-C Multiplexer
* @dev: The caller device
+ * @name: Mux identifier
*
* Finds a mux linked to the caller. This function is primarily meant for the
* Type-C drivers. Returns a reference to the mux on success, NULL if no
* matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a connection
* was found but the mux has not been enumerated yet.
*/
-struct typec_mux *typec_mux_get(struct device *dev)
+struct typec_mux *typec_mux_get(struct device *dev, const char *name)
{
struct typec_mux *mux;
mutex_lock(&mux_lock);
- mux = device_connection_find_match(dev, "typec-mux", NULL,
- typec_mux_match);
+ mux = device_connection_find_match(dev, name, NULL, typec_mux_match);
if (!IS_ERR_OR_NULL(mux))
get_device(mux->dev);
mutex_unlock(&mux_lock);
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index b0e88db60ecf..64eb5983e17a 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/usb/tcpm.h>
+#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
#define PI3USB30532_CONF 0x00
@@ -83,21 +83,24 @@ static int pi3usb30532_mux_set(struct typec_mux *mux, int state)
new_conf = pi->conf;
switch (state) {
- case TYPEC_MUX_NONE:
+ case TYPEC_STATE_SAFE:
new_conf = PI3USB30532_CONF_OPEN;
break;
- case TYPEC_MUX_USB:
+ case TYPEC_STATE_USB:
new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
PI3USB30532_CONF_USB3;
break;
- case TYPEC_MUX_DP:
+ case TYPEC_DP_STATE_C:
+ case TYPEC_DP_STATE_E:
new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
PI3USB30532_CONF_4LANE_DP;
break;
- case TYPEC_MUX_DOCK:
+ case TYPEC_DP_STATE_D:
new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
PI3USB30532_CONF_USB3_AND_2LANE_DP;
break;
+ default:
+ break;
}
ret = pi3usb30532_set_conf(pi, new_conf);
diff --git a/drivers/staging/typec/tcpci.c b/drivers/usb/typec/tcpci.c
index 076d97eaff6f..ac6b418b15f1 100644
--- a/drivers/staging/typec/tcpci.c
+++ b/drivers/usb/typec/tcpci.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpm.h>
@@ -184,15 +185,25 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc,
enum typec_cc_polarity polarity)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ unsigned int reg;
int ret;
- ret = regmap_write(tcpci->regmap, TCPC_TCPC_CTRL,
- (polarity == TYPEC_POLARITY_CC2) ?
- TCPC_TCPC_CTRL_ORIENTATION : 0);
+ /* Keep the disconnect cc line open */
+ ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &reg);
if (ret < 0)
return ret;
- return 0;
+ if (polarity == TYPEC_POLARITY_CC2)
+ reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT;
+ else
+ reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT;
+ ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(tcpci->regmap, TCPC_TCPC_CTRL,
+ (polarity == TYPEC_POLARITY_CC2) ?
+ TCPC_TCPC_CTRL_ORIENTATION : 0);
}
static int tcpci_set_vconn(struct tcpc_dev *tcpc, bool enable)
@@ -207,12 +218,9 @@ static int tcpci_set_vconn(struct tcpc_dev *tcpc, bool enable)
return ret;
}
- ret = regmap_write(tcpci->regmap, TCPC_POWER_CTRL,
- enable ? TCPC_POWER_CTRL_VCONN_ENABLE : 0);
- if (ret < 0)
- return ret;
-
- return 0;
+ return regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL,
+ TCPC_POWER_CTRL_VCONN_ENABLE,
+ enable ? TCPC_POWER_CTRL_VCONN_ENABLE : 0);
}
static int tcpci_set_roles(struct tcpc_dev *tcpc, bool attached,
@@ -372,6 +380,12 @@ static int tcpci_init(struct tcpc_dev *tcpc)
if (ret < 0)
return ret;
+ /* Enable Vbus detection */
+ ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
+ TCPC_CMD_ENABLE_VBUS_DETECT);
+ if (ret < 0)
+ return ret;
+
reg = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_FAILED |
TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_RX_STATUS |
TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_CC_STATUS;
@@ -463,17 +477,16 @@ static const struct regmap_config tcpci_regmap_config = {
.max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */
};
-static const struct tcpc_config tcpci_tcpc_config = {
- .type = TYPEC_PORT_DFP,
- .default_role = TYPEC_SINK,
-};
-
static int tcpci_parse_config(struct tcpci *tcpci)
{
tcpci->controls_vbus = true; /* XXX */
- /* TODO: Populate struct tcpc_config from ACPI/device-tree */
- tcpci->tcpc.config = &tcpci_tcpc_config;
+ tcpci->tcpc.fwnode = device_get_named_child_node(tcpci->dev,
+ "connector");
+ if (!tcpci->tcpc.fwnode) {
+ dev_err(tcpci->dev, "Can't find connector node.\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -509,7 +522,7 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
return ERR_PTR(err);
tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc);
- if (PTR_ERR_OR_ZERO(tcpci->port))
+ if (IS_ERR(tcpci->port))
return ERR_CAST(tcpci->port);
return tcpci;
@@ -537,24 +550,27 @@ static int tcpci_probe(struct i2c_client *client,
if (IS_ERR(chip->data.regmap))
return PTR_ERR(chip->data.regmap);
+ i2c_set_clientdata(client, chip);
+
/* Disable chip interrupts before requesting irq */
err = regmap_raw_write(chip->data.regmap, TCPC_ALERT_MASK, &val,
sizeof(u16));
if (err < 0)
return err;
+ chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
+ if (IS_ERR(chip->tcpci))
+ return PTR_ERR(chip->tcpci);
+
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
_tcpci_irq,
IRQF_ONESHOT | IRQF_TRIGGER_LOW,
dev_name(&client->dev), chip);
- if (err < 0)
+ if (err < 0) {
+ tcpci_unregister_port(chip->tcpci);
return err;
+ }
- chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
- if (PTR_ERR_OR_ZERO(chip->tcpci))
- return PTR_ERR(chip->tcpci);
-
- i2c_set_clientdata(client, chip);
return 0;
}
@@ -575,7 +591,7 @@ MODULE_DEVICE_TABLE(i2c, tcpci_id);
#ifdef CONFIG_OF
static const struct of_device_id tcpci_of_match[] = {
- { .compatible = "usb,tcpci", },
+ { .compatible = "nxp,ptn5110", },
{},
};
MODULE_DEVICE_TABLE(of, tcpci_of_match);
diff --git a/drivers/staging/typec/tcpci.h b/drivers/usb/typec/tcpci.h
index 303ebde26546..303ebde26546 100644
--- a/drivers/staging/typec/tcpci.h
+++ b/drivers/usb/typec/tcpci.h
diff --git a/drivers/staging/typec/tcpci_rt1711h.c b/drivers/usb/typec/tcpci_rt1711h.c
index 017389021b96..017389021b96 100644
--- a/drivers/staging/typec/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpci_rt1711h.c
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 8a201dd53d36..4f1f4215f3d6 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -26,7 +26,7 @@
#include <linux/usb/pd_vdo.h>
#include <linux/usb/role.h>
#include <linux/usb/tcpm.h>
-#include <linux/usb/typec.h>
+#include <linux/usb/typec_altmode.h>
#include <linux/workqueue.h>
#define FOREACH_STATE(S) \
@@ -169,13 +169,14 @@ enum pd_msg_request {
/* Alternate mode support */
#define SVID_DISCOVERY_MAX 16
+#define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
struct pd_mode_data {
int svid_index; /* current SVID index */
int nsvids;
u16 svids[SVID_DISCOVERY_MAX];
int altmodes; /* number of alternate modes */
- struct typec_altmode_desc altmode_desc[SVID_DISCOVERY_MAX];
+ struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
};
struct pd_pps_data {
@@ -310,8 +311,8 @@ struct tcpm_port {
/* Alternate mode data */
struct pd_mode_data mode_data;
- struct typec_altmode *partner_altmode[SVID_DISCOVERY_MAX];
- struct typec_altmode *port_altmode[SVID_DISCOVERY_MAX];
+ struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
+ struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
/* Deadline in jiffies to exit src_try_wait state */
unsigned long max_wait;
@@ -418,17 +419,18 @@ static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
u64 ts_nsec = local_clock();
unsigned long rem_nsec;
+ mutex_lock(&port->logbuffer_lock);
if (!port->logbuffer[port->logbuffer_head]) {
port->logbuffer[port->logbuffer_head] =
kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
- if (!port->logbuffer[port->logbuffer_head])
+ if (!port->logbuffer[port->logbuffer_head]) {
+ mutex_unlock(&port->logbuffer_lock);
return;
+ }
}
vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
- mutex_lock(&port->logbuffer_lock);
-
if (tcpm_log_full(port)) {
port->logbuffer_head = max(port->logbuffer_head - 1, 0);
strcpy(tmpbuffer, "overflow");
@@ -640,14 +642,14 @@ void tcpm_pd_transmit_complete(struct tcpm_port *port,
}
EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
-static int tcpm_mux_set(struct tcpm_port *port, enum tcpc_mux_mode mode,
+static int tcpm_mux_set(struct tcpm_port *port, int state,
enum usb_role usb_role,
enum typec_orientation orientation)
{
int ret;
- tcpm_log(port, "Requesting mux mode %d, usb-role %d, orientation %d",
- mode, usb_role, orientation);
+ tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
+ state, usb_role, orientation);
ret = typec_set_orientation(port->typec_port, orientation);
if (ret)
@@ -659,7 +661,7 @@ static int tcpm_mux_set(struct tcpm_port *port, enum tcpc_mux_mode mode,
return ret;
}
- return typec_set_mode(port->typec_port, mode);
+ return typec_set_mode(port->typec_port, state);
}
static int tcpm_set_polarity(struct tcpm_port *port,
@@ -724,6 +726,9 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
+ port->supply_voltage = mv;
+ port->current_limit = max_ma;
+
if (port->tcpc->set_current_limit)
ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
@@ -786,7 +791,7 @@ static int tcpm_set_roles(struct tcpm_port *port, bool attached,
else
usb_role = USB_ROLE_DEVICE;
- ret = tcpm_mux_set(port, TYPEC_MUX_USB, usb_role, orientation);
+ ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
if (ret < 0)
return ret;
@@ -994,7 +999,6 @@ static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
{
struct pd_mode_data *pmdata = &port->mode_data;
struct typec_altmode_desc *paltmode;
- struct typec_mode_desc *pmode;
int i;
if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
@@ -1002,32 +1006,36 @@ static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
return;
}
- paltmode = &pmdata->altmode_desc[pmdata->altmodes];
- memset(paltmode, 0, sizeof(*paltmode));
+ for (i = 1; i < cnt; i++) {
+ paltmode = &pmdata->altmode_desc[pmdata->altmodes];
+ memset(paltmode, 0, sizeof(*paltmode));
- paltmode->svid = pmdata->svids[pmdata->svid_index];
+ paltmode->svid = pmdata->svids[pmdata->svid_index];
+ paltmode->mode = i;
+ paltmode->vdo = le32_to_cpu(payload[i]);
- tcpm_log(port, " Alternate mode %d: SVID 0x%04x",
- pmdata->altmodes, paltmode->svid);
+ tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
+ pmdata->altmodes, paltmode->svid,
+ paltmode->mode, paltmode->vdo);
- for (i = 1; i < cnt && paltmode->n_modes < ALTMODE_MAX_MODES; i++) {
- pmode = &paltmode->modes[paltmode->n_modes];
- memset(pmode, 0, sizeof(*pmode));
- pmode->vdo = le32_to_cpu(payload[i]);
- pmode->index = i - 1;
- paltmode->n_modes++;
- tcpm_log(port, " VDO %d: 0x%08x",
- pmode->index, pmode->vdo);
+ pmdata->altmodes++;
}
- port->partner_altmode[pmdata->altmodes] =
- typec_partner_register_altmode(port->partner, paltmode);
- if (!port->partner_altmode[pmdata->altmodes]) {
- tcpm_log(port,
- "Failed to register alternate modes for SVID 0x%04x",
- paltmode->svid);
- return;
+}
+
+static void tcpm_register_partner_altmodes(struct tcpm_port *port)
+{
+ struct pd_mode_data *modep = &port->mode_data;
+ struct typec_altmode *altmode;
+ int i;
+
+ for (i = 0; i < modep->altmodes; i++) {
+ altmode = typec_partner_register_altmode(port->partner,
+ &modep->altmode_desc[i]);
+ if (!altmode)
+ tcpm_log(port, "Failed to register partner SVID 0x%04x",
+ modep->altmode_desc[i].svid);
+ port->partner_altmode[i] = altmode;
}
- pmdata->altmodes++;
}
#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
@@ -1035,19 +1043,32 @@ static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
u32 *response)
{
- u32 p0 = le32_to_cpu(payload[0]);
- int cmd_type = PD_VDO_CMDT(p0);
- int cmd = PD_VDO_CMD(p0);
+ struct typec_altmode *adev;
+ struct typec_altmode *pdev;
struct pd_mode_data *modep;
+ u32 p[PD_MAX_PAYLOAD];
int rlen = 0;
- u16 svid;
+ int cmd_type;
+ int cmd;
int i;
+ for (i = 0; i < cnt; i++)
+ p[i] = le32_to_cpu(payload[i]);
+
+ cmd_type = PD_VDO_CMDT(p[0]);
+ cmd = PD_VDO_CMD(p[0]);
+
tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
- p0, cmd_type, cmd, cnt);
+ p[0], cmd_type, cmd, cnt);
modep = &port->mode_data;
+ adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
+ PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
+
+ pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX,
+ PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
+
switch (cmd_type) {
case CMDT_INIT:
switch (cmd) {
@@ -1069,17 +1090,19 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
case CMD_EXIT_MODE:
break;
case CMD_ATTENTION:
- break;
+ /* Attention command does not have response */
+ typec_altmode_attention(adev, p[1]);
+ return 0;
default:
break;
}
if (rlen >= 1) {
- response[0] = p0 | VDO_CMDT(CMDT_RSP_ACK);
+ response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
} else if (rlen == 0) {
- response[0] = p0 | VDO_CMDT(CMDT_RSP_NAK);
+ response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
rlen = 1;
} else {
- response[0] = p0 | VDO_CMDT(CMDT_RSP_BUSY);
+ response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
rlen = 1;
}
break;
@@ -1112,14 +1135,39 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
svdm_consume_modes(port, payload, cnt);
modep->svid_index++;
if (modep->svid_index < modep->nsvids) {
- svid = modep->svids[modep->svid_index];
+ u16 svid = modep->svids[modep->svid_index];
response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
rlen = 1;
} else {
- /* enter alternate mode if/when implemented */
+ tcpm_register_partner_altmodes(port);
}
break;
case CMD_ENTER_MODE:
+ typec_altmode_update_active(pdev, true);
+
+ if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
+ response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
+ response[0] |= VDO_OPOS(adev->mode);
+ return 1;
+ }
+ return 0;
+ case CMD_EXIT_MODE:
+ typec_altmode_update_active(pdev, false);
+
+ /* Back to USB Operation */
+ WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
+ NULL));
+ break;
+ default:
+ break;
+ }
+ break;
+ case CMDT_RSP_NAK:
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ /* Back to USB Operation */
+ WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
+ NULL));
break;
default:
break;
@@ -1129,6 +1177,9 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
break;
}
+ /* Informing the alternate mode drivers about everything */
+ typec_altmode_vdm(adev, p[0], &p[1], cnt);
+
return rlen;
}
@@ -1412,6 +1463,57 @@ static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
return 0;
}
+static int tcpm_altmode_enter(struct typec_altmode *altmode)
+{
+ struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+ u32 header;
+
+ mutex_lock(&port->lock);
+ header = VDO(altmode->svid, 1, CMD_ENTER_MODE);
+ header |= VDO_OPOS(altmode->mode);
+
+ tcpm_queue_vdm(port, header, NULL, 0);
+ mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
+ mutex_unlock(&port->lock);
+
+ return 0;
+}
+
+static int tcpm_altmode_exit(struct typec_altmode *altmode)
+{
+ struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+ u32 header;
+
+ mutex_lock(&port->lock);
+ header = VDO(altmode->svid, 1, CMD_EXIT_MODE);
+ header |= VDO_OPOS(altmode->mode);
+
+ tcpm_queue_vdm(port, header, NULL, 0);
+ mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
+ mutex_unlock(&port->lock);
+
+ return 0;
+}
+
+static int tcpm_altmode_vdm(struct typec_altmode *altmode,
+ u32 header, const u32 *data, int count)
+{
+ struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+
+ mutex_lock(&port->lock);
+ tcpm_queue_vdm(port, header, data, count - 1);
+ mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
+ mutex_unlock(&port->lock);
+
+ return 0;
+}
+
+static const struct typec_altmode_ops tcpm_altmode_ops = {
+ .enter = tcpm_altmode_enter,
+ .exit = tcpm_altmode_exit,
+ .vdm = tcpm_altmode_vdm,
+};
+
/*
* PD (data, control) command handling functions
*/
@@ -2136,7 +2238,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
* PPS APDO. Again skip the first sink PDO as this will
* always be 5V 3A.
*/
- for (j = i; j < port->nr_snk_pdo; j++) {
+ for (j = 1; j < port->nr_snk_pdo; j++) {
pdo = port->snk_pdo[j];
switch (pdo_type(pdo)) {
@@ -2431,15 +2533,15 @@ static int tcpm_set_charge(struct tcpm_port *port, bool charge)
return 0;
}
-static bool tcpm_start_drp_toggling(struct tcpm_port *port)
+static bool tcpm_start_drp_toggling(struct tcpm_port *port,
+ enum typec_cc_status cc)
{
int ret;
if (port->tcpc->start_drp_toggling &&
port->port_type == TYPEC_PORT_DRP) {
tcpm_log_force(port, "Start DRP toggling");
- ret = port->tcpc->start_drp_toggling(port->tcpc,
- tcpm_rp_cc(port));
+ ret = port->tcpc->start_drp_toggling(port->tcpc, cc);
if (!ret)
return true;
}
@@ -2543,7 +2645,7 @@ out_disable_vconn:
out_disable_pd:
port->tcpc->set_pd_rx(port->tcpc, false);
out_disable_mux:
- tcpm_mux_set(port, TYPEC_MUX_NONE, USB_ROLE_NONE,
+ tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
TYPEC_ORIENTATION_NONE);
return ret;
}
@@ -2589,13 +2691,11 @@ static void tcpm_reset_port(struct tcpm_port *port)
tcpm_init_vconn(port);
tcpm_set_current_limit(port, 0, 0);
tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
- tcpm_mux_set(port, TYPEC_MUX_NONE, USB_ROLE_NONE,
+ tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
TYPEC_ORIENTATION_NONE);
tcpm_set_attached_state(port, false);
port->try_src_count = 0;
port->try_snk_count = 0;
- port->supply_voltage = 0;
- port->current_limit = 0;
port->usb_type = POWER_SUPPLY_USB_TYPE_C;
power_supply_changed(port->psy);
@@ -2747,7 +2847,7 @@ static void run_state_machine(struct tcpm_port *port)
if (!port->non_pd_role_swap)
tcpm_swap_complete(port, -ENOTCONN);
tcpm_src_detach(port);
- if (tcpm_start_drp_toggling(port)) {
+ if (tcpm_start_drp_toggling(port, tcpm_rp_cc(port))) {
tcpm_set_state(port, DRP_TOGGLING, 0);
break;
}
@@ -2922,7 +3022,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_swap_complete(port, -ENOTCONN);
tcpm_pps_complete(port, -ENOTCONN);
tcpm_snk_detach(port);
- if (tcpm_start_drp_toggling(port)) {
+ if (tcpm_start_drp_toggling(port, TYPEC_CC_RD)) {
tcpm_set_state(port, DRP_TOGGLING, 0);
break;
}
@@ -3043,7 +3143,8 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_port_is_sink(port) &&
time_is_after_jiffies(port->delayed_runtime)) {
tcpm_set_state(port, SNK_DISCOVERY,
- port->delayed_runtime - jiffies);
+ jiffies_to_msecs(port->delayed_runtime -
+ jiffies));
break;
}
tcpm_set_state(port, unattached_state(port), 0);
@@ -4236,6 +4337,81 @@ static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
return nr_vdo;
}
+static int tcpm_fw_get_caps(struct tcpm_port *port,
+ struct fwnode_handle *fwnode)
+{
+ const char *cap_str;
+ int ret;
+ u32 mw;
+
+ if (!fwnode)
+ return -EINVAL;
+
+ /* USB data support is optional */
+ ret = fwnode_property_read_string(fwnode, "data-role", &cap_str);
+ if (ret == 0) {
+ port->typec_caps.data = typec_find_port_data_role(cap_str);
+ if (port->typec_caps.data < 0)
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_string(fwnode, "power-role", &cap_str);
+ if (ret < 0)
+ return ret;
+
+ port->typec_caps.type = typec_find_port_power_role(cap_str);
+ if (port->typec_caps.type < 0)
+ return -EINVAL;
+ port->port_type = port->typec_caps.type;
+
+ if (port->port_type == TYPEC_PORT_SNK)
+ goto sink;
+
+ /* Get source pdos */
+ ret = fwnode_property_read_u32_array(fwnode, "source-pdos",
+ NULL, 0);
+ if (ret <= 0)
+ return -EINVAL;
+
+ port->nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
+ ret = fwnode_property_read_u32_array(fwnode, "source-pdos",
+ port->src_pdo, port->nr_src_pdo);
+ if ((ret < 0) || tcpm_validate_caps(port, port->src_pdo,
+ port->nr_src_pdo))
+ return -EINVAL;
+
+ if (port->port_type == TYPEC_PORT_SRC)
+ return 0;
+
+ /* Get the preferred power role for DRP */
+ ret = fwnode_property_read_string(fwnode, "try-power-role", &cap_str);
+ if (ret < 0)
+ return ret;
+
+ port->typec_caps.prefer_role = typec_find_power_role(cap_str);
+ if (port->typec_caps.prefer_role < 0)
+ return -EINVAL;
+sink:
+ /* Get sink pdos */
+ ret = fwnode_property_read_u32_array(fwnode, "sink-pdos",
+ NULL, 0);
+ if (ret <= 0)
+ return -EINVAL;
+
+ port->nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
+ ret = fwnode_property_read_u32_array(fwnode, "sink-pdos",
+ port->snk_pdo, port->nr_snk_pdo);
+ if ((ret < 0) || tcpm_validate_caps(port, port->snk_pdo,
+ port->nr_snk_pdo))
+ return -EINVAL;
+
+ if (fwnode_property_read_u32(fwnode, "op-sink-microwatt", &mw) < 0)
+ return -EINVAL;
+ port->operating_snk_mw = mw / 1000;
+
+ return 0;
+}
+
int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
unsigned int nr_pdo)
{
@@ -4521,12 +4697,36 @@ static int devm_tcpm_psy_register(struct tcpm_port *port)
return PTR_ERR_OR_ZERO(port->psy);
}
+static int tcpm_copy_caps(struct tcpm_port *port,
+ const struct tcpc_config *tcfg)
+{
+ if (tcpm_validate_caps(port, tcfg->src_pdo, tcfg->nr_src_pdo) ||
+ tcpm_validate_caps(port, tcfg->snk_pdo, tcfg->nr_snk_pdo))
+ return -EINVAL;
+
+ port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcfg->src_pdo,
+ tcfg->nr_src_pdo);
+ port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcfg->snk_pdo,
+ tcfg->nr_snk_pdo);
+
+ port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcfg->snk_vdo,
+ tcfg->nr_snk_vdo);
+
+ port->operating_snk_mw = tcfg->operating_snk_mw;
+
+ port->typec_caps.prefer_role = tcfg->default_role;
+ port->typec_caps.type = tcfg->type;
+ port->typec_caps.data = tcfg->data;
+
+ return 0;
+}
+
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
{
struct tcpm_port *port;
int i, err;
- if (!dev || !tcpc || !tcpc->config ||
+ if (!dev || !tcpc ||
!tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
!tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
!tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
@@ -4556,29 +4756,18 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
init_completion(&port->pps_complete);
tcpm_debugfs_init(port);
- if (tcpm_validate_caps(port, tcpc->config->src_pdo,
- tcpc->config->nr_src_pdo) ||
- tcpm_validate_caps(port, tcpc->config->snk_pdo,
- tcpc->config->nr_snk_pdo)) {
- err = -EINVAL;
+ err = tcpm_fw_get_caps(port, tcpc->fwnode);
+ if ((err < 0) && tcpc->config)
+ err = tcpm_copy_caps(port, tcpc->config);
+ if (err < 0)
goto out_destroy_wq;
- }
- port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcpc->config->src_pdo,
- tcpc->config->nr_src_pdo);
- port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
- tcpc->config->nr_snk_pdo);
- port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
- tcpc->config->nr_snk_vdo);
-
- port->operating_snk_mw = tcpc->config->operating_snk_mw;
- if (!tcpc->config->try_role_hw)
- port->try_role = tcpc->config->default_role;
+
+ if (!tcpc->config || !tcpc->config->try_role_hw)
+ port->try_role = port->typec_caps.prefer_role;
else
port->try_role = TYPEC_NO_PREFERRED_ROLE;
- port->typec_caps.prefer_role = tcpc->config->default_role;
- port->typec_caps.type = tcpc->config->type;
- port->typec_caps.data = tcpc->config->data;
+ port->typec_caps.fwnode = tcpc->fwnode;
port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
port->typec_caps.dr_set = tcpm_dr_set;
@@ -4588,7 +4777,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
port->typec_caps.port_type_set = tcpm_port_type_set;
port->partner_desc.identity = &port->partner_ident;
- port->port_type = tcpc->config->type;
+ port->port_type = port->typec_caps.type;
port->role_sw = usb_role_switch_get(port->dev);
if (IS_ERR(port->role_sw)) {
@@ -4606,7 +4795,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
goto out_destroy_wq;
}
- if (tcpc->config->alt_modes) {
+ if (tcpc->config && tcpc->config->alt_modes) {
const struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
i = 0;
@@ -4621,6 +4810,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
dev_name(dev), paltmode->svid);
break;
}
+ typec_altmode_set_drvdata(alt, port);
+ alt->ops = &tcpm_altmode_ops;
port->port_altmode[i] = alt;
i++;
paltmode++;
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
index 4b4c8d271b27..c84c8c189e90 100644
--- a/drivers/usb/typec/tps6598x.c
+++ b/drivers/usb/typec/tps6598x.c
@@ -81,12 +81,21 @@ struct tps6598x {
struct typec_capability typec_cap;
};
+/*
+ * Max data bytes for Data1, Data2, and other registers. See ch 1.3.2:
+ * http://www.ti.com/lit/ug/slvuan1a/slvuan1a.pdf
+ */
+#define TPS_MAX_LEN 64
+
static int
tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
{
- u8 data[len + 1];
+ u8 data[TPS_MAX_LEN + 1];
int ret;
+ if (WARN_ON(len + 1 > sizeof(data)))
+ return -EINVAL;
+
if (!tps->i2c_protocol)
return regmap_raw_read(tps->regmap, reg, val, len);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index bd5cca5632b3..8d0a6fe748bd 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -350,6 +350,19 @@ static void ucsi_connector_change(struct work_struct *work)
}
if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
+ typec_set_pwr_role(con->port, con->status.pwr_dir);
+
+ switch (con->status.partner_type) {
+ case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+ typec_set_data_role(con->port, TYPEC_HOST);
+ break;
+ case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+ typec_set_data_role(con->port, TYPEC_DEVICE);
+ break;
+ default:
+ break;
+ }
+
if (con->status.connected)
ucsi_register_partner(con);
else
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 44eb4e1ea817..a18112a83fae 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -79,6 +79,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
return -ENODEV;
}
+ /* This will make sure we can use ioremap_nocache() */
+ status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
+ if (ACPI_FAILURE(status))
+ return -ENOMEM;
+
/*
* NOTE: The memory region for the data structures is used also in an
* operation region, which means ACPI has already reserved it. Therefore
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index c3ddd0f1f449..f101347e3ea3 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -159,10 +159,11 @@ static int skel_flush(struct file *file, fl_owner_t id)
static void skel_read_bulk_callback(struct urb *urb)
{
struct usb_skel *dev;
+ unsigned long flags;
dev = urb->context;
- spin_lock(&dev->err_lock);
+ spin_lock_irqsave(&dev->err_lock, flags);
/* sync/async unlink faults aren't errors */
if (urb->status) {
if (!(urb->status == -ENOENT ||
@@ -177,7 +178,7 @@ static void skel_read_bulk_callback(struct urb *urb)
dev->bulk_in_filled = urb->actual_length;
}
dev->ongoing_read = 0;
- spin_unlock(&dev->err_lock);
+ spin_unlock_irqrestore(&dev->err_lock, flags);
wake_up_interruptible(&dev->bulk_in_wait);
}
@@ -331,6 +332,7 @@ exit:
static void skel_write_bulk_callback(struct urb *urb)
{
struct usb_skel *dev;
+ unsigned long flags;
dev = urb->context;
@@ -343,9 +345,9 @@ static void skel_write_bulk_callback(struct urb *urb)
"%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
- spin_lock(&dev->err_lock);
+ spin_lock_irqsave(&dev->err_lock, flags);
dev->errors = urb->status;
- spin_unlock(&dev->err_lock);
+ spin_unlock_irqrestore(&dev->err_lock, flags);
}
/* free up our allocated buffer */
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index 1b9a4f87db59..1634d8698e15 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -279,12 +279,10 @@ static int vep_disable(struct usb_ep *_ep)
static struct usb_request *vep_alloc_request(struct usb_ep *_ep,
gfp_t mem_flags)
{
- struct vep *ep;
struct vrequest *req;
if (!_ep)
return NULL;
- ep = to_vep(_ep);
req = kzalloc(sizeof(*req), mem_flags);
if (!req)
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 33d2f5d7f33b..14ac8c98ac9e 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -217,7 +217,7 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
0, secd, sizeof(*secd));
- if (result < sizeof(*secd)) {
+ if (result < (int)sizeof(*secd)) {
dev_err(dev, "Can't read security descriptor or "
"not enough data: %d\n", result);
goto out;
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 7fca4e7e556d..01f2f21830c0 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1918,7 +1918,7 @@ EXPORT_SYMBOL_GPL(wa_urb_enqueue);
*/
int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
{
- unsigned long flags, flags2;
+ unsigned long flags;
struct wa_xfer *xfer;
struct wa_seg *seg;
struct wa_rpipe *rpipe;
@@ -1964,10 +1964,10 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
goto out_unlock;
}
/* Check the delayed list -> if there, release and complete */
- spin_lock_irqsave(&wa->xfer_list_lock, flags2);
+ spin_lock(&wa->xfer_list_lock);
if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
goto dequeue_delayed;
- spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
+ spin_unlock(&wa->xfer_list_lock);
if (xfer->seg == NULL) /* still hasn't reached */
goto out_unlock; /* setup(), enqueue_b() completes */
/* Ok, the xfer is in flight already, it's been setup and submitted.*/
@@ -2054,7 +2054,7 @@ out_unlock:
dequeue_delayed:
list_del_init(&xfer->list_node);
- spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
+ spin_unlock(&wa->xfer_list_lock);
xfer->result = urb->status;
spin_unlock_irqrestore(&xfer->lock, flags);
wa_xfer_giveback(xfer);
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 9a53912bdfe9..5d3ba747ae17 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -873,6 +873,7 @@ error_get_version:
error_rc_add:
usb_put_intf(iface);
usb_put_dev(hwarc->usb_dev);
+ kfree(hwarc);
error_alloc:
uwb_rc_put(uwb_rc);
error_rc_alloc:
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 24ee2605b9f0..42dc1d3d71cf 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -28,5 +28,13 @@ config VFIO_PCI_INTX
def_bool y if !S390
config VFIO_PCI_IGD
- depends on VFIO_PCI
- def_bool y if X86
+ bool "VFIO PCI extensions for Intel graphics (GVT-d)"
+ depends on VFIO_PCI && X86
+ default y
+ help
+ Support for Intel IGD specific extensions to enable direct
+ assignment to virtual machines. This includes exposing an IGD
+ specific firmware table and read-only copies of the host bridge
+ and LPC bridge config space.
+
+ To enable Intel IGD assignment through vfio-pci, say Y.
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index b423a309a6e0..cddb453a1ba5 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -28,6 +28,7 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/vgaarb.h>
+#include <linux/nospec.h>
#include "vfio_pci_private.h"
@@ -727,6 +728,9 @@ static long vfio_pci_ioctl(void *device_data,
if (info.index >=
VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
+ info.index = array_index_nospec(info.index,
+ VFIO_PCI_NUM_REGIONS +
+ vdev->num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
@@ -785,7 +789,7 @@ static long vfio_pci_ioctl(void *device_data,
case VFIO_PCI_ERR_IRQ_INDEX:
if (pci_is_pcie(vdev->pdev))
break;
- /* pass thru to return error */
+ /* fall through */
default:
return -EINVAL;
}
@@ -1010,8 +1014,7 @@ reset_info_exit:
&info, slot);
if (!ret)
/* User has access, do the reset */
- ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
- pci_try_reset_bus(vdev->pdev->bus);
+ ret = pci_reset_bus(vdev->pdev);
hot_reset_release:
for (i--; i >= 0; i--)
@@ -1189,6 +1192,19 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
+ /*
+ * Prevent binding to PFs with VFs enabled, this too easily allows
+ * userspace instance with VFs and PFs from the same device, which
+ * cannot work. Disabling SR-IOV here would initiate removing the
+ * VFs, which would unbind the driver, which is prone to blocking
+ * if that VF is also in use by vfio-pci. Just reject these PFs
+ * and let the user sort it out.
+ */
+ if (pci_num_vf(pdev)) {
+ pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
+ return -EBUSY;
+ }
+
group = vfio_iommu_group_get(&pdev->dev);
if (!group)
return -EINVAL;
@@ -1373,8 +1389,7 @@ static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
}
if (needs_reset)
- ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
- pci_try_reset_bus(vdev->pdev->bus);
+ ret = pci_reset_bus(vdev->pdev);
put_devs:
for (i = 0; i < devs.cur_index; i++) {
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 759a5bdd40e1..96721b154454 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -211,44 +211,6 @@ static long tce_iommu_register_pages(struct tce_container *container,
return 0;
}
-static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
- struct mm_struct *mm)
-{
- unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
- tbl->it_size, PAGE_SIZE);
- unsigned long *uas;
- long ret;
-
- BUG_ON(tbl->it_userspace);
-
- ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
- if (ret)
- return ret;
-
- uas = vzalloc(cb);
- if (!uas) {
- decrement_locked_vm(mm, cb >> PAGE_SHIFT);
- return -ENOMEM;
- }
- tbl->it_userspace = uas;
-
- return 0;
-}
-
-static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
- struct mm_struct *mm)
-{
- unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
- tbl->it_size, PAGE_SIZE);
-
- if (!tbl->it_userspace)
- return;
-
- vfree(tbl->it_userspace);
- tbl->it_userspace = NULL;
- decrement_locked_vm(mm, cb >> PAGE_SHIFT);
-}
-
static bool tce_page_is_contained(struct page *page, unsigned page_shift)
{
/*
@@ -457,17 +419,17 @@ static void tce_iommu_unuse_page(struct tce_container *container,
}
static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
- unsigned long tce, unsigned long size,
+ unsigned long tce, unsigned long shift,
unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem;
- mem = mm_iommu_lookup(container->mm, tce, size);
+ mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
if (!mem)
return -EINVAL;
- ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
+ ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
if (ret)
return -EINVAL;
@@ -482,20 +444,20 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
struct mm_iommu_table_group_mem_t *mem = NULL;
int ret;
unsigned long hpa = 0;
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
if (!pua)
return;
- ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
- &hpa, &mem);
+ ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
+ tbl->it_page_shift, &hpa, &mem);
if (ret)
- pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
- __func__, *pua, entry, ret);
+ pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
+ __func__, be64_to_cpu(*pua), entry, ret);
if (mem)
mm_iommu_mapped_dec(mem);
- *pua = 0;
+ *pua = cpu_to_be64(0);
}
static int tce_iommu_clear(struct tce_container *container,
@@ -599,19 +561,12 @@ static long tce_iommu_build_v2(struct tce_container *container,
unsigned long hpa;
enum dma_data_direction dirtmp;
- if (!tbl->it_userspace) {
- ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
- if (ret)
- return ret;
- }
-
for (i = 0; i < pages; ++i) {
struct mm_iommu_table_group_mem_t *mem = NULL;
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
- entry + i);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
ret = tce_iommu_prereg_ua_to_hpa(container,
- tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
+ tce, tbl->it_page_shift, &hpa, &mem);
if (ret)
break;
@@ -642,7 +597,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
if (dirtmp != DMA_NONE)
tce_iommu_unuse_page_v2(container, tbl, entry + i);
- *pua = tce;
+ *pua = cpu_to_be64(tce);
tce += IOMMU_PAGE_SIZE(tbl);
}
@@ -676,7 +631,7 @@ static long tce_iommu_create_table(struct tce_container *container,
page_shift, window_size, levels, ptbl);
WARN_ON(!ret && !(*ptbl)->it_ops->free);
- WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
+ WARN_ON(!ret && ((*ptbl)->it_allocated_size > table_size));
return ret;
}
@@ -686,7 +641,6 @@ static void tce_iommu_free_table(struct tce_container *container,
{
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
- tce_iommu_userspace_view_free(tbl, container->mm);
iommu_tce_table_put(tbl);
decrement_locked_vm(container->mm, pages);
}
@@ -1201,7 +1155,6 @@ static void tce_iommu_release_ownership(struct tce_container *container,
continue;
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_userspace_view_free(tbl, container->mm);
if (tbl->it_map)
iommu_release_ownership(tbl);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2c75b33db4ac..d9fd3188615d 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -343,18 +343,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
struct page *page[1];
struct vm_area_struct *vma;
struct vm_area_struct *vmas[1];
+ unsigned int flags = 0;
int ret;
+ if (prot & IOMMU_WRITE)
+ flags |= FOLL_WRITE;
+
+ down_read(&mm->mmap_sem);
if (mm == current->mm) {
- ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
- page, vmas);
+ ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
} else {
- unsigned int flags = 0;
-
- if (prot & IOMMU_WRITE)
- flags |= FOLL_WRITE;
-
- down_read(&mm->mmap_sem);
ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
vmas, NULL);
/*
@@ -368,8 +366,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
ret = -EOPNOTSUPP;
put_page(page[0]);
}
- up_read(&mm->mmap_sem);
}
+ up_read(&mm->mmap_sem);
if (ret == 1) {
*pfn = page_to_pfn(page[0]);
@@ -1603,6 +1601,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
break;
case VFIO_TYPE1_NESTING_IOMMU:
iommu->nesting = true;
+ /* fall through */
case VFIO_TYPE1v2_IOMMU:
iommu->v2 = true;
break;
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index cfdecea5078f..b580885243f7 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -12,7 +12,7 @@ config VHOST_NET
config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver"
- depends on TARGET_CORE && EVENTFD && m
+ depends on TARGET_CORE && EVENTFD
select VHOST
default n
---help---
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 686dc670fd29..4e656f89cb22 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -78,6 +78,10 @@ enum {
};
enum {
+ VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
+};
+
+enum {
VHOST_NET_VQ_RX = 0,
VHOST_NET_VQ_TX = 1,
VHOST_NET_VQ_MAX = 2,
@@ -94,7 +98,7 @@ struct vhost_net_ubuf_ref {
struct vhost_virtqueue *vq;
};
-#define VHOST_RX_BATCH 64
+#define VHOST_NET_BATCH 64
struct vhost_net_buf {
void **queue;
int tail;
@@ -168,7 +172,7 @@ static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
rxq->head = 0;
rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
- VHOST_RX_BATCH);
+ VHOST_NET_BATCH);
return rxq->tail;
}
@@ -396,13 +400,10 @@ static inline unsigned long busy_clock(void)
return local_clock() >> 10;
}
-static bool vhost_can_busy_poll(struct vhost_dev *dev,
- unsigned long endtime)
+static bool vhost_can_busy_poll(unsigned long endtime)
{
- return likely(!need_resched()) &&
- likely(!time_after(busy_clock(), endtime)) &&
- likely(!signal_pending(current)) &&
- !vhost_has_work(dev);
+ return likely(!need_resched() && !time_after(busy_clock(), endtime) &&
+ !signal_pending(current));
}
static void vhost_net_disable_vq(struct vhost_net *n,
@@ -431,21 +432,42 @@ static int vhost_net_enable_vq(struct vhost_net *n,
return vhost_poll_start(poll, sock->file);
}
+static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ struct vhost_dev *dev = vq->dev;
+
+ if (!nvq->done_idx)
+ return;
+
+ vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
+ nvq->done_idx = 0;
+}
+
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
- struct vhost_virtqueue *vq,
- struct iovec iov[], unsigned int iov_size,
- unsigned int *out_num, unsigned int *in_num)
+ struct vhost_net_virtqueue *nvq,
+ unsigned int *out_num, unsigned int *in_num,
+ bool *busyloop_intr)
{
+ struct vhost_virtqueue *vq = &nvq->vq;
unsigned long uninitialized_var(endtime);
int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL);
if (r == vq->num && vq->busyloop_timeout) {
+ if (!vhost_sock_zcopy(vq->private_data))
+ vhost_net_signal_used(nvq);
preempt_disable();
endtime = busy_clock() + vq->busyloop_timeout;
- while (vhost_can_busy_poll(vq->dev, endtime) &&
- vhost_vq_avail_empty(vq->dev, vq))
+ while (vhost_can_busy_poll(endtime)) {
+ if (vhost_has_work(vq->dev)) {
+ *busyloop_intr = true;
+ break;
+ }
+ if (!vhost_vq_avail_empty(vq->dev, vq))
+ break;
cpu_relax();
+ }
preempt_enable();
r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL);
@@ -463,9 +485,62 @@ static bool vhost_exceeds_maxpend(struct vhost_net *net)
min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
}
-/* Expects to be always run from workqueue - which acts as
- * read-size critical section for our kind of RCU. */
-static void handle_tx(struct vhost_net *net)
+static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
+ size_t hdr_size, int out)
+{
+ /* Skip header. TODO: support TSO. */
+ size_t len = iov_length(vq->iov, out);
+
+ iov_iter_init(iter, WRITE, vq->iov, out, len);
+ iov_iter_advance(iter, hdr_size);
+
+ return iov_iter_count(iter);
+}
+
+static bool vhost_exceeds_weight(int pkts, int total_len)
+{
+ return total_len >= VHOST_NET_WEIGHT ||
+ pkts >= VHOST_NET_PKT_WEIGHT;
+}
+
+static int get_tx_bufs(struct vhost_net *net,
+ struct vhost_net_virtqueue *nvq,
+ struct msghdr *msg,
+ unsigned int *out, unsigned int *in,
+ size_t *len, bool *busyloop_intr)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ int ret;
+
+ ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr);
+
+ if (ret < 0 || ret == vq->num)
+ return ret;
+
+ if (*in) {
+ vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n",
+ *out, *in);
+ return -EFAULT;
+ }
+
+ /* Sanity check */
+ *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out);
+ if (*len == 0) {
+ vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n",
+ *len, nvq->vhost_hlen);
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
+{
+ return total_len < VHOST_NET_WEIGHT &&
+ !vhost_vq_avail_empty(vq->dev, vq);
+}
+
+static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
@@ -480,67 +555,103 @@ static void handle_tx(struct vhost_net *net)
};
size_t len, total_len = 0;
int err;
- size_t hdr_size;
- struct socket *sock;
- struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
- bool zcopy, zcopy_used;
int sent_pkts = 0;
- mutex_lock(&vq->mutex);
- sock = vq->private_data;
- if (!sock)
- goto out;
+ for (;;) {
+ bool busyloop_intr = false;
- if (!vq_iotlb_prefetch(vq))
- goto out;
+ head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
+ &busyloop_intr);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(head < 0))
+ break;
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (head == vq->num) {
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev,
+ vq))) {
+ vhost_disable_notify(&net->dev, vq);
+ continue;
+ }
+ break;
+ }
- vhost_disable_notify(&net->dev, vq);
- vhost_net_disable_vq(net, vq);
+ vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
+ vq->heads[nvq->done_idx].len = 0;
- hdr_size = nvq->vhost_hlen;
- zcopy = nvq->ubufs;
+ total_len += len;
+ if (tx_can_batch(vq, total_len))
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags &= ~MSG_MORE;
+
+ /* TODO: Check specific error and bomb out unless ENOBUFS? */
+ err = sock->ops->sendmsg(sock, &msg, len);
+ if (unlikely(err < 0)) {
+ vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+ if (err != len)
+ pr_debug("Truncated TX packet: len %d != %zd\n",
+ err, len);
+ if (++nvq->done_idx >= VHOST_NET_BATCH)
+ vhost_net_signal_used(nvq);
+ if (vhost_exceeds_weight(++sent_pkts, total_len)) {
+ vhost_poll_queue(&vq->poll);
+ break;
+ }
+ }
+
+ vhost_net_signal_used(nvq);
+}
+
+static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ unsigned out, in;
+ int head;
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_flags = MSG_DONTWAIT,
+ };
+ size_t len, total_len = 0;
+ int err;
+ struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
+ bool zcopy_used;
+ int sent_pkts = 0;
for (;;) {
- /* Release DMAs done buffers first */
- if (zcopy)
- vhost_zerocopy_signal_used(net, vq);
+ bool busyloop_intr;
+ /* Release DMAs done buffers first */
+ vhost_zerocopy_signal_used(net, vq);
- head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
- ARRAY_SIZE(vq->iov),
- &out, &in);
+ busyloop_intr = false;
+ head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
+ &busyloop_intr);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
- if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
}
break;
}
- if (in) {
- vq_err(vq, "Unexpected descriptor format for TX: "
- "out %d, int %d\n", out, in);
- break;
- }
- /* Skip header. TODO: support TSO. */
- len = iov_length(vq->iov, out);
- iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
- iov_iter_advance(&msg.msg_iter, hdr_size);
- /* Sanity check */
- if (!msg_data_left(&msg)) {
- vq_err(vq, "Unexpected header len for TX: "
- "%zd expected %zd\n",
- len, hdr_size);
- break;
- }
- len = msg_data_left(&msg);
- zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
- && !vhost_exceeds_maxpend(net)
- && vhost_net_tx_select_zcopy(net);
+ zcopy_used = len >= VHOST_GOODCOPY_LEN
+ && !vhost_exceeds_maxpend(net)
+ && vhost_net_tx_select_zcopy(net);
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
@@ -562,10 +673,8 @@ static void handle_tx(struct vhost_net *net)
msg.msg_control = NULL;
ubufs = NULL;
}
-
total_len += len;
- if (total_len < VHOST_NET_WEIGHT &&
- !vhost_vq_avail_empty(&net->dev, vq) &&
+ if (tx_can_batch(vq, total_len) &&
likely(!vhost_exceeds_maxpend(net))) {
msg.msg_flags |= MSG_MORE;
} else {
@@ -592,12 +701,37 @@ static void handle_tx(struct vhost_net *net)
else
vhost_zerocopy_signal_used(net, vq);
vhost_net_tx_packet(net);
- if (unlikely(total_len >= VHOST_NET_WEIGHT) ||
- unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT)) {
+ if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) {
vhost_poll_queue(&vq->poll);
break;
}
}
+}
+
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_tx(struct vhost_net *net)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ struct socket *sock;
+
+ mutex_lock(&vq->mutex);
+ sock = vq->private_data;
+ if (!sock)
+ goto out;
+
+ if (!vq_iotlb_prefetch(vq))
+ goto out;
+
+ vhost_disable_notify(&net->dev, vq);
+ vhost_net_disable_vq(net, vq);
+
+ if (vhost_sock_zcopy(sock))
+ handle_tx_zerocopy(net, sock);
+ else
+ handle_tx_copy(net, sock);
+
out:
mutex_unlock(&vq->mutex);
}
@@ -633,53 +767,50 @@ static int sk_has_rx_data(struct sock *sk)
return skb_queue_empty(&sk->sk_receive_queue);
}
-static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
+static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
+ bool *busyloop_intr)
{
- struct vhost_virtqueue *vq = &nvq->vq;
- struct vhost_dev *dev = vq->dev;
-
- if (!nvq->done_idx)
- return;
-
- vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
- nvq->done_idx = 0;
-}
-
-static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
-{
- struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
- struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
- struct vhost_virtqueue *vq = &nvq->vq;
+ struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *rvq = &rnvq->vq;
+ struct vhost_virtqueue *tvq = &tnvq->vq;
unsigned long uninitialized_var(endtime);
- int len = peek_head_len(rvq, sk);
+ int len = peek_head_len(rnvq, sk);
- if (!len && vq->busyloop_timeout) {
+ if (!len && tvq->busyloop_timeout) {
/* Flush batched heads first */
- vhost_rx_signal_used(rvq);
+ vhost_net_signal_used(rnvq);
/* Both tx vq and rx socket were polled here */
- mutex_lock_nested(&vq->mutex, 1);
- vhost_disable_notify(&net->dev, vq);
+ mutex_lock_nested(&tvq->mutex, 1);
+ vhost_disable_notify(&net->dev, tvq);
preempt_disable();
- endtime = busy_clock() + vq->busyloop_timeout;
+ endtime = busy_clock() + tvq->busyloop_timeout;
- while (vhost_can_busy_poll(&net->dev, endtime) &&
- !sk_has_rx_data(sk) &&
- vhost_vq_avail_empty(&net->dev, vq))
+ while (vhost_can_busy_poll(endtime)) {
+ if (vhost_has_work(&net->dev)) {
+ *busyloop_intr = true;
+ break;
+ }
+ if ((sk_has_rx_data(sk) &&
+ !vhost_vq_avail_empty(&net->dev, rvq)) ||
+ !vhost_vq_avail_empty(&net->dev, tvq))
+ break;
cpu_relax();
+ }
preempt_enable();
- if (!vhost_vq_avail_empty(&net->dev, vq))
- vhost_poll_queue(&vq->poll);
- else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
- vhost_disable_notify(&net->dev, vq);
- vhost_poll_queue(&vq->poll);
+ if (!vhost_vq_avail_empty(&net->dev, tvq)) {
+ vhost_poll_queue(&tvq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
+ vhost_disable_notify(&net->dev, tvq);
+ vhost_poll_queue(&tvq->poll);
}
- mutex_unlock(&vq->mutex);
+ mutex_unlock(&tvq->mutex);
- len = peek_head_len(rvq, sk);
+ len = peek_head_len(rnvq, sk);
}
return len;
@@ -786,6 +917,7 @@ static void handle_rx(struct vhost_net *net)
s16 headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
+ bool busyloop_intr = false;
struct socket *sock;
struct iov_iter fixup;
__virtio16 num_buffers;
@@ -809,7 +941,8 @@ static void handle_rx(struct vhost_net *net)
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
- while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
+ while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
+ &busyloop_intr))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
@@ -820,7 +953,9 @@ static void handle_rx(struct vhost_net *net)
goto out;
/* OK, now we need to know about added descriptors. */
if (!headcount) {
- if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
/* They have slipped one in as we were
* doing that: check again. */
vhost_disable_notify(&net->dev, vq);
@@ -830,6 +965,7 @@ static void handle_rx(struct vhost_net *net)
* they refilled. */
goto out;
}
+ busyloop_intr = false;
if (nvq->rx_ring)
msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
/* On overrun, truncate and discard */
@@ -885,20 +1021,22 @@ static void handle_rx(struct vhost_net *net)
goto out;
}
nvq->done_idx += headcount;
- if (nvq->done_idx > VHOST_RX_BATCH)
- vhost_rx_signal_used(nvq);
+ if (nvq->done_idx > VHOST_NET_BATCH)
+ vhost_net_signal_used(nvq);
if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len);
total_len += vhost_len;
- if (unlikely(total_len >= VHOST_NET_WEIGHT) ||
- unlikely(++recv_pkts >= VHOST_NET_PKT_WEIGHT)) {
+ if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
vhost_poll_queue(&vq->poll);
goto out;
}
}
- vhost_net_enable_vq(net, vq);
+ if (unlikely(busyloop_intr))
+ vhost_poll_queue(&vq->poll);
+ else
+ vhost_net_enable_vq(net, vq);
out:
- vhost_rx_signal_used(nvq);
+ vhost_net_signal_used(nvq);
mutex_unlock(&vq->mutex);
}
@@ -951,7 +1089,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
return -ENOMEM;
}
- queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *),
+ queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
GFP_KERNEL);
if (!queue) {
kfree(vqs);
@@ -1226,7 +1364,8 @@ err_used:
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);
err_ubufs:
- sockfd_put(sock);
+ if (sock)
+ sockfd_put(sock);
err_vq:
mutex_unlock(&vq->mutex);
err:
@@ -1264,6 +1403,21 @@ done:
return err;
}
+static int vhost_net_set_backend_features(struct vhost_net *n, u64 features)
+{
+ int i;
+
+ mutex_lock(&n->dev.mutex);
+ for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
+ mutex_lock(&n->vqs[i].vq.mutex);
+ n->vqs[i].vq.acked_backend_features = features;
+ mutex_unlock(&n->vqs[i].vq.mutex);
+ }
+ mutex_unlock(&n->dev.mutex);
+
+ return 0;
+}
+
static int vhost_net_set_features(struct vhost_net *n, u64 features)
{
size_t vhost_hlen, sock_hlen, hdr_len;
@@ -1354,6 +1508,17 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
if (features & ~VHOST_NET_FEATURES)
return -EOPNOTSUPP;
return vhost_net_set_features(n, features);
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_NET_BACKEND_FEATURES;
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_NET_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ return vhost_net_set_backend_features(n, features);
case VHOST_RESET_OWNER:
return vhost_net_reset_owner(n);
case VHOST_SET_OWNER:
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 17fcd3b2e686..c24bb690680b 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -46,7 +46,6 @@
#include <linux/virtio_scsi.h>
#include <linux/llist.h>
#include <linux/bitmap.h>
-#include <linux/percpu_ida.h>
#include "vhost.h"
@@ -56,7 +55,7 @@
#define VHOST_SCSI_DEFAULT_TAGS 256
#define VHOST_SCSI_PREALLOC_SGLS 2048
#define VHOST_SCSI_PREALLOC_UPAGES 2048
-#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
+#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
@@ -324,7 +323,7 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
}
vhost_scsi_put_inflight(tv_cmd->inflight);
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_sess, se_cmd);
}
static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
@@ -567,7 +566,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
struct se_session *se_sess;
struct scatterlist *sg, *prot_sg;
struct page **pages;
- int tag;
+ int tag, cpu;
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
@@ -576,7 +575,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
}
se_sess = tv_nexus->tvn_se_sess;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0) {
pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
return ERR_PTR(-ENOMEM);
@@ -591,6 +590,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
cmd->tvc_prot_sgl = prot_sg;
cmd->tvc_upages = pages;
cmd->tvc_se_cmd.map_tag = tag;
+ cmd->tvc_se_cmd.map_cpu = cpu;
cmd->tvc_tag = scsi_tag;
cmd->tvc_lun = lun;
cmd->tvc_task_attr = task_attr;
@@ -1738,7 +1738,7 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
* struct se_node_acl for the vhost_scsi struct se_portal_group with
* the SCSI Initiator port name of the passed configfs group 'name'.
*/
- tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
VHOST_SCSI_DEFAULT_TAGS,
sizeof(struct vhost_scsi_cmd),
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
@@ -1797,7 +1797,7 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
/*
* Release the SCSI I_T Nexus to the emulated vhost Target Port
*/
- transport_deregister_session(tv_nexus->tvn_se_sess);
+ target_remove_session(se_sess);
tpg->tpg_nexus = NULL;
mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1912,9 +1912,7 @@ static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
};
static struct se_portal_group *
-vhost_scsi_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
{
struct vhost_scsi_tport *tport = container_of(wwn,
struct vhost_scsi_tport, tport_wwn);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index a502f1af4a21..96c1d8400822 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -315,6 +315,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->log_addr = -1ull;
vq->private_data = NULL;
vq->acked_features = 0;
+ vq->acked_backend_features = 0;
vq->log_base = NULL;
vq->error_ctx = NULL;
vq->kick = NULL;
@@ -1027,28 +1028,40 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
struct iov_iter *from)
{
- struct vhost_msg_node node;
- unsigned size = sizeof(struct vhost_msg);
- size_t ret;
- int err;
+ struct vhost_iotlb_msg msg;
+ size_t offset;
+ int type, ret;
- if (iov_iter_count(from) < size)
- return 0;
- ret = copy_from_iter(&node.msg, size, from);
- if (ret != size)
+ ret = copy_from_iter(&type, sizeof(type), from);
+ if (ret != sizeof(type))
goto done;
- switch (node.msg.type) {
+ switch (type) {
case VHOST_IOTLB_MSG:
- err = vhost_process_iotlb_msg(dev, &node.msg.iotlb);
- if (err)
- ret = err;
+ /* There maybe a hole after type for V1 message type,
+ * so skip it here.
+ */
+ offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
+ break;
+ case VHOST_IOTLB_MSG_V2:
+ offset = sizeof(__u32);
break;
default:
ret = -EINVAL;
- break;
+ goto done;
+ }
+
+ iov_iter_advance(from, offset);
+ ret = copy_from_iter(&msg, sizeof(msg), from);
+ if (ret != sizeof(msg))
+ goto done;
+ if (vhost_process_iotlb_msg(dev, &msg)) {
+ ret = -EFAULT;
+ goto done;
}
+ ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
+ sizeof(struct vhost_msg_v2);
done:
return ret;
}
@@ -1107,13 +1120,28 @@ ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
finish_wait(&dev->wait, &wait);
if (node) {
- ret = copy_to_iter(&node->msg, size, to);
+ struct vhost_iotlb_msg *msg;
+ void *start = &node->msg;
- if (ret != size || node->msg.type != VHOST_IOTLB_MISS) {
+ switch (node->msg.type) {
+ case VHOST_IOTLB_MSG:
+ size = sizeof(node->msg);
+ msg = &node->msg.iotlb;
+ break;
+ case VHOST_IOTLB_MSG_V2:
+ size = sizeof(node->msg_v2);
+ msg = &node->msg_v2.iotlb;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ ret = copy_to_iter(start, size, to);
+ if (ret != size || msg->type != VHOST_IOTLB_MISS) {
kfree(node);
return ret;
}
-
vhost_enqueue_msg(dev, &dev->pending_list, node);
}
@@ -1126,12 +1154,19 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
struct vhost_dev *dev = vq->dev;
struct vhost_msg_node *node;
struct vhost_iotlb_msg *msg;
+ bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
- node = vhost_new_msg(vq, VHOST_IOTLB_MISS);
+ node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
if (!node)
return -ENOMEM;
- msg = &node->msg.iotlb;
+ if (v2) {
+ node->msg_v2.type = VHOST_IOTLB_MSG_V2;
+ msg = &node->msg_v2.iotlb;
+ } else {
+ msg = &node->msg.iotlb;
+ }
+
msg->type = VHOST_IOTLB_MISS;
msg->iova = iova;
msg->perm = access;
@@ -1560,9 +1595,12 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
d->iotlb = niotlb;
for (i = 0; i < d->nvqs; ++i) {
- mutex_lock(&d->vqs[i]->mutex);
- d->vqs[i]->iotlb = niotlb;
- mutex_unlock(&d->vqs[i]->mutex);
+ struct vhost_virtqueue *vq = d->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->iotlb = niotlb;
+ __vhost_vq_meta_reset(vq);
+ mutex_unlock(&vq->mutex);
}
vhost_umem_clean(oiotlb);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 6c844b90a168..466ef7542291 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -132,6 +132,7 @@ struct vhost_virtqueue {
struct vhost_umem *iotlb;
void *private_data;
u64 acked_features;
+ u64 acked_backend_features;
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;
@@ -147,7 +148,10 @@ struct vhost_virtqueue {
};
struct vhost_msg_node {
- struct vhost_msg msg;
+ union {
+ struct vhost_msg msg;
+ struct vhost_msg_v2 msg_v2;
+ };
struct vhost_virtqueue *vq;
struct list_head node;
};
@@ -238,6 +242,11 @@ static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
return vq->acked_features & (1ULL << bit);
}
+static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
+{
+ return vq->acked_backend_features & (1ULL << bit);
+}
+
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
{
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 16119bde9750..f1dc41cf19e3 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -690,6 +690,7 @@ static int adp8860_probe(struct i2c_client *client,
switch (ADP8860_MANID(reg_val)) {
case ADP8863_MANUFID:
data->gdwn_dis = !!pdata->gdwn_dis;
+ /* fall through */
case ADP8860_MANUFID:
data->en_ambl_sens = !!pdata->en_ambl_sens;
break;
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 44ac5bde4e9d..bdfcc0a71db1 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -143,11 +143,116 @@ static const struct backlight_ops pwm_backlight_ops = {
};
#ifdef CONFIG_OF
+#define PWM_LUMINANCE_SCALE 10000 /* luminance scale */
+
+/* An integer based power function */
+static u64 int_pow(u64 base, int exp)
+{
+ u64 result = 1;
+
+ while (exp) {
+ if (exp & 1)
+ result *= base;
+ exp >>= 1;
+ base *= base;
+ }
+
+ return result;
+}
+
+/*
+ * CIE lightness to PWM conversion.
+ *
+ * The CIE 1931 lightness formula is what actually describes how we perceive
+ * light:
+ * Y = (L* / 902.3) if L* ≤ 0.08856
+ * Y = ((L* + 16) / 116)^3 if L* > 0.08856
+ *
+ * Where Y is the luminance, the amount of light coming out of the screen, and
+ * is a number between 0.0 and 1.0; and L* is the lightness, how bright a human
+ * perceives the screen to be, and is a number between 0 and 100.
+ *
+ * The following function does the fixed point maths needed to implement the
+ * above formula.
+ */
+static u64 cie1931(unsigned int lightness, unsigned int scale)
+{
+ u64 retval;
+
+ lightness *= 100;
+ if (lightness <= (8 * scale)) {
+ retval = DIV_ROUND_CLOSEST_ULL(lightness * 10, 9023);
+ } else {
+ retval = int_pow((lightness + (16 * scale)) / 116, 3);
+ retval = DIV_ROUND_CLOSEST_ULL(retval, (scale * scale));
+ }
+
+ return retval;
+}
+
+/*
+ * Create a default correction table for PWM values to create linear brightness
+ * for LED based backlights using the CIE1931 algorithm.
+ */
+static
+int pwm_backlight_brightness_default(struct device *dev,
+ struct platform_pwm_backlight_data *data,
+ unsigned int period)
+{
+ unsigned int counter = 0;
+ unsigned int i, n;
+ u64 retval;
+
+ /*
+ * Count the number of bits needed to represent the period number. The
+ * number of bits is used to calculate the number of levels used for the
+ * brightness-levels table, the purpose of this calculation is have a
+ * pre-computed table with enough levels to get linear brightness
+ * perception. The period is divided by the number of bits so for a
+ * 8-bit PWM we have 255 / 8 = 32 brightness levels or for a 16-bit PWM
+ * we have 65535 / 16 = 4096 brightness levels.
+ *
+ * Note that this method is based on empirical testing on different
+ * devices with PWM of 8 and 16 bits of resolution.
+ */
+ n = period;
+ while (n) {
+ counter += n % 2;
+ n >>= 1;
+ }
+
+ data->max_brightness = DIV_ROUND_UP(period, counter);
+ data->levels = devm_kcalloc(dev, data->max_brightness,
+ sizeof(*data->levels), GFP_KERNEL);
+ if (!data->levels)
+ return -ENOMEM;
+
+ /* Fill the table using the cie1931 algorithm */
+ for (i = 0; i < data->max_brightness; i++) {
+ retval = cie1931((i * PWM_LUMINANCE_SCALE) /
+ data->max_brightness, PWM_LUMINANCE_SCALE) *
+ period;
+ retval = DIV_ROUND_CLOSEST_ULL(retval, PWM_LUMINANCE_SCALE);
+ if (retval > UINT_MAX)
+ return -EINVAL;
+ data->levels[i] = (unsigned int)retval;
+ }
+
+ data->dft_brightness = data->max_brightness / 2;
+ data->max_brightness--;
+
+ return 0;
+}
+
static int pwm_backlight_parse_dt(struct device *dev,
struct platform_pwm_backlight_data *data)
{
struct device_node *node = dev->of_node;
+ unsigned int num_levels = 0;
+ unsigned int levels_count;
+ unsigned int num_steps = 0;
struct property *prop;
+ unsigned int *table;
int length;
u32 value;
int ret;
@@ -157,16 +262,20 @@ static int pwm_backlight_parse_dt(struct device *dev,
memset(data, 0, sizeof(*data));
- /* determine the number of brightness levels */
+ /*
+ * Determine the number of brightness levels, if this property is not
+ * set a default table of brightness levels will be used.
+ */
prop = of_find_property(node, "brightness-levels", &length);
if (!prop)
- return -EINVAL;
+ return 0;
data->max_brightness = length / sizeof(u32);
/* read brightness levels from DT property */
if (data->max_brightness > 0) {
size_t size = sizeof(*data->levels) * data->max_brightness;
+ unsigned int i, j, n = 0;
data->levels = devm_kzalloc(dev, size, GFP_KERNEL);
if (!data->levels)
@@ -184,6 +293,84 @@ static int pwm_backlight_parse_dt(struct device *dev,
return ret;
data->dft_brightness = value;
+
+ /*
+ * This property is optional, if is set enables linear
+ * interpolation between each of the values of brightness levels
+ * and creates a new pre-computed table.
+ */
+ of_property_read_u32(node, "num-interpolated-steps",
+ &num_steps);
+
+ /*
+ * Make sure that there is at least two entries in the
+ * brightness-levels table, otherwise we can't interpolate
+ * between two points.
+ */
+ if (num_steps) {
+ if (data->max_brightness < 2) {
+ dev_err(dev, "can't interpolate\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Recalculate the number of brightness levels, now
+ * taking in consideration the number of interpolated
+ * steps between two levels.
+ */
+ for (i = 0; i < data->max_brightness - 1; i++) {
+ if ((data->levels[i + 1] - data->levels[i]) /
+ num_steps)
+ num_levels += num_steps;
+ else
+ num_levels++;
+ }
+ num_levels++;
+ dev_dbg(dev, "new number of brightness levels: %d\n",
+ num_levels);
+
+ /*
+ * Create a new table of brightness levels with all the
+ * interpolated steps.
+ */
+ size = sizeof(*table) * num_levels;
+ table = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ /* Fill the interpolated table. */
+ levels_count = 0;
+ for (i = 0; i < data->max_brightness - 1; i++) {
+ value = data->levels[i];
+ n = (data->levels[i + 1] - value) / num_steps;
+ if (n > 0) {
+ for (j = 0; j < num_steps; j++) {
+ table[levels_count] = value;
+ value += n;
+ levels_count++;
+ }
+ } else {
+ table[levels_count] = data->levels[i];
+ levels_count++;
+ }
+ }
+ table[levels_count] = data->levels[i];
+
+ /*
+ * As we use interpolation lets remove current
+ * brightness levels table and replace for the
+ * new interpolated table.
+ */
+ devm_kfree(dev, data->levels);
+ data->levels = table;
+
+ /*
+ * Reassign max_brightness value to the new total number
+ * of brightness levels.
+ */
+ data->max_brightness = num_levels;
+ }
+
data->max_brightness--;
}
@@ -211,6 +398,14 @@ static int pwm_backlight_parse_dt(struct device *dev,
{
return -ENODEV;
}
+
+static
+int pwm_backlight_brightness_default(struct device *dev,
+ struct platform_pwm_backlight_data *data,
+ unsigned int period)
+{
+ return -ENODEV;
+}
#endif
static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
@@ -251,7 +446,9 @@ static int pwm_backlight_probe(struct platform_device *pdev)
struct backlight_device *bl;
struct device_node *node = pdev->dev.of_node;
struct pwm_bl_data *pb;
+ struct pwm_state state;
struct pwm_args pargs;
+ unsigned int i;
int ret;
if (!data) {
@@ -276,17 +473,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
goto err_alloc;
}
- if (data->levels) {
- unsigned int i;
-
- for (i = 0; i <= data->max_brightness; i++)
- if (data->levels[i] > pb->scale)
- pb->scale = data->levels[i];
-
- pb->levels = data->levels;
- } else
- pb->scale = data->max_brightness;
-
pb->notify = data->notify;
pb->notify_after = data->notify_after;
pb->check_fb = data->check_fb;
@@ -353,6 +539,26 @@ static int pwm_backlight_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "got pwm for backlight\n");
+ if (!data->levels) {
+ /* Get the PWM period (in nanoseconds) */
+ pwm_get_state(pb->pwm, &state);
+
+ ret = pwm_backlight_brightness_default(&pdev->dev, data,
+ state.period);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to setup default brightness table\n");
+ goto err_alloc;
+ }
+ }
+
+ for (i = 0; i <= data->max_brightness; i++) {
+ if (data->levels[i] > pb->scale)
+ pb->scale = data->levels[i];
+
+ pb->levels = data->levels;
+ }
+
/*
* FIXME: pwm_apply_args() should be removed when switching to
* the atomic PWM API.
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 4110ba7d7ca9..787792c3d08d 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -150,6 +150,17 @@ config FRAMEBUFFER_CONSOLE_ROTATION
such that other users of the framebuffer will remain normally
oriented.
+config FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+ bool "Framebuffer Console Deferred Takeover"
+ depends on FB=y && FRAMEBUFFER_CONSOLE && DUMMY_CONSOLE
+ help
+ If enabled this defers the framebuffer console taking over the
+ console from the dummy console until the first text is displayed on
+ the console. This is useful in combination with the "quiet" kernel
+ commandline option to keep the framebuffer contents initially put up
+ by the firmware in place, rather then replacing the contents with a
+ black screen as soon as fbcon loads.
+
config STI_CONSOLE
bool "STI text console"
depends on PARISC && HAS_IOMEM
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index f2eafe2ed980..45ad925ad5f8 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -26,6 +26,65 @@
#define DUMMY_ROWS CONFIG_DUMMY_CONSOLE_ROWS
#endif
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+/* These are both protected by the console_lock */
+static RAW_NOTIFIER_HEAD(dummycon_output_nh);
+static bool dummycon_putc_called;
+
+void dummycon_register_output_notifier(struct notifier_block *nb)
+{
+ raw_notifier_chain_register(&dummycon_output_nh, nb);
+
+ if (dummycon_putc_called)
+ nb->notifier_call(nb, 0, NULL);
+}
+
+void dummycon_unregister_output_notifier(struct notifier_block *nb)
+{
+ raw_notifier_chain_unregister(&dummycon_output_nh, nb);
+}
+
+static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+{
+ dummycon_putc_called = true;
+ raw_notifier_call_chain(&dummycon_output_nh, 0, NULL);
+}
+
+static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
+ int count, int ypos, int xpos)
+{
+ int i;
+
+ if (!dummycon_putc_called) {
+ /* Ignore erases */
+ for (i = 0 ; i < count; i++) {
+ if (s[i] != vc->vc_video_erase_char)
+ break;
+ }
+ if (i == count)
+ return;
+
+ dummycon_putc_called = true;
+ }
+
+ raw_notifier_call_chain(&dummycon_output_nh, 0, NULL);
+}
+
+static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
+{
+ /* Redraw, so that we get putc(s) for output done while blanked */
+ return 1;
+}
+#else
+static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos) { }
+static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
+ int count, int ypos, int xpos) { }
+static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
+{
+ return 0;
+}
+#endif
+
static const char *dummycon_startup(void)
{
return "dummy device";
@@ -44,9 +103,6 @@ static void dummycon_init(struct vc_data *vc, int init)
static void dummycon_deinit(struct vc_data *vc) { }
static void dummycon_clear(struct vc_data *vc, int sy, int sx, int height,
int width) { }
-static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos) { }
-static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos) { }
static void dummycon_cursor(struct vc_data *vc, int mode) { }
static bool dummycon_scroll(struct vc_data *vc, unsigned int top,
@@ -61,11 +117,6 @@ static int dummycon_switch(struct vc_data *vc)
return 0;
}
-static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
-{
- return 0;
-}
-
static int dummycon_font_set(struct vc_data *vc, struct console_font *font,
unsigned int flags)
{
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index f09e17b60e45..09731b2f6815 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -112,6 +112,11 @@ EXPORT_SYMBOL(vgacon_text_force);
static int __init text_mode(char *str)
{
vgacon_text_mode_force = true;
+
+ pr_warning("You have booted with nomodeset. This means your GPU drivers are DISABLED\n");
+ pr_warning("Any video related functionality will be severely degraded, and you may not even be able to suspend the system properly\n");
+ pr_warning("Unless you actually understand what nomodeset does, you should reboot without enabling it\n");
+
return 1;
}
diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c
index cc11c6061298..0777aff211e5 100644
--- a/drivers/video/fbdev/amifb.c
+++ b/drivers/video/fbdev/amifb.c
@@ -2303,7 +2303,7 @@ static void ami_build_copper(struct fb_info *info)
ami_rebuild_copper(info->par);
}
-
+#ifndef MODULE
static void __init amifb_setup_mcap(char *spec)
{
char *p;
@@ -2368,7 +2368,7 @@ static int __init amifb_setup(char *options)
return 0;
}
-
+#endif
static int amifb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index c910e74d46ff..75ebbbf0a1fb 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -129,6 +129,12 @@ static inline void fbcon_map_override(void)
}
#endif /* CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY */
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+static bool deferred_takeover = true;
+#else
+#define deferred_takeover false
+#endif
+
/* font data */
static char fontname[40];
@@ -499,6 +505,12 @@ static int __init fb_console_setup(char *this_opt)
margin_color = simple_strtoul(options, &options, 0);
continue;
}
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+ if (!strcmp(options, "nodefer")) {
+ deferred_takeover = false;
+ continue;
+ }
+#endif
}
return 1;
}
@@ -828,6 +840,8 @@ static int set_con2fb_map(int unit, int newidx, int user)
struct fb_info *oldinfo = NULL;
int found, err = 0;
+ WARN_CONSOLE_UNLOCKED();
+
if (oldidx == newidx)
return 0;
@@ -2220,8 +2234,8 @@ static int fbcon_switch(struct vc_data *vc)
*
* info->currcon = vc->vc_num;
*/
- for (i = 0; i < FB_MAX; i++) {
- if (registered_fb[i] != NULL && registered_fb[i]->fbcon_par) {
+ for_each_registered_fb(i) {
+ if (registered_fb[i]->fbcon_par) {
struct fbcon_ops *o = registered_fb[i]->fbcon_par;
o->currcon = vc->vc_num;
@@ -3044,6 +3058,8 @@ static int fbcon_fb_unbind(int idx)
{
int i, new_idx = -1, ret = 0;
+ WARN_CONSOLE_UNLOCKED();
+
if (!fbcon_has_console_bind)
return 0;
@@ -3094,6 +3110,11 @@ static int fbcon_fb_unregistered(struct fb_info *info)
{
int i, idx;
+ WARN_CONSOLE_UNLOCKED();
+
+ if (deferred_takeover)
+ return 0;
+
idx = info->node;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] == idx)
@@ -3103,11 +3124,9 @@ static int fbcon_fb_unregistered(struct fb_info *info)
if (idx == info_idx) {
info_idx = -1;
- for (i = 0; i < FB_MAX; i++) {
- if (registered_fb[i] != NULL) {
- info_idx = i;
- break;
- }
+ for_each_registered_fb(i) {
+ info_idx = i;
+ break;
}
}
@@ -3131,6 +3150,16 @@ static int fbcon_fb_unregistered(struct fb_info *info)
static void fbcon_remap_all(int idx)
{
int i;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ if (deferred_takeover) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ con2fb_map_boot[i] = idx;
+ fbcon_map_override();
+ return;
+ }
+
for (i = first_fb_vc; i <= last_fb_vc; i++)
set_con2fb_map(i, idx, 0);
@@ -3177,9 +3206,16 @@ static int fbcon_fb_registered(struct fb_info *info)
{
int ret = 0, i, idx;
+ WARN_CONSOLE_UNLOCKED();
+
idx = info->node;
fbcon_select_primary(info);
+ if (deferred_takeover) {
+ pr_info("fbcon: Deferring console take-over\n");
+ return 0;
+ }
+
if (info_idx == -1) {
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map_boot[i] == idx) {
@@ -3555,23 +3591,64 @@ static int fbcon_init_device(void)
return 0;
}
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+static void fbcon_register_existing_fbs(struct work_struct *work)
+{
+ int i;
+
+ console_lock();
+
+ for_each_registered_fb(i)
+ fbcon_fb_registered(registered_fb[i]);
+
+ console_unlock();
+}
+
+static struct notifier_block fbcon_output_nb;
+static DECLARE_WORK(fbcon_deferred_takeover_work, fbcon_register_existing_fbs);
+
+static int fbcon_output_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ WARN_CONSOLE_UNLOCKED();
+
+ pr_info("fbcon: Taking over console\n");
+
+ dummycon_unregister_output_notifier(&fbcon_output_nb);
+ deferred_takeover = false;
+ logo_shown = FBCON_LOGO_DONTSHOW;
+
+ /* We may get called in atomic context */
+ schedule_work(&fbcon_deferred_takeover_work);
+
+ return NOTIFY_OK;
+}
+#endif
+
static void fbcon_start(void)
{
+ WARN_CONSOLE_UNLOCKED();
+
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+ if (conswitchp != &dummy_con)
+ deferred_takeover = false;
+
+ if (deferred_takeover) {
+ fbcon_output_nb.notifier_call = fbcon_output_notifier;
+ dummycon_register_output_notifier(&fbcon_output_nb);
+ return;
+ }
+#endif
+
if (num_registered_fb) {
int i;
- console_lock();
-
- for (i = 0; i < FB_MAX; i++) {
- if (registered_fb[i] != NULL) {
- info_idx = i;
- break;
- }
+ for_each_registered_fb(i) {
+ info_idx = i;
+ break;
}
do_fbcon_takeover(0);
- console_unlock();
-
}
}
@@ -3583,18 +3660,22 @@ static void fbcon_exit(void)
if (fbcon_has_exited)
return;
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+ if (deferred_takeover) {
+ dummycon_unregister_output_notifier(&fbcon_output_nb);
+ deferred_takeover = false;
+ }
+#endif
+
kfree((void *)softback_buf);
softback_buf = 0UL;
- for (i = 0; i < FB_MAX; i++) {
+ for_each_registered_fb(i) {
int pending = 0;
mapped = 0;
info = registered_fb[i];
- if (info == NULL)
- continue;
-
if (info->queue.func)
pending = cancel_work_sync(&info->queue);
DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
@@ -3650,8 +3731,8 @@ void __init fb_console_init(void)
for (i = 0; i < MAX_NR_CONSOLES; i++)
con2fb_map[i] = -1;
- console_unlock();
fbcon_start();
+ console_unlock();
}
#ifdef MODULE
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 609438d2465b..20405421a5ed 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1347,6 +1347,7 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
case FBIOGET_CON2FBMAP:
case FBIOPUT_CON2FBMAP:
arg = (unsigned long) compat_ptr(arg);
+ /* fall through */
case FBIOBLANK:
ret = do_fb_ioctl(info, cmd, arg);
break;
@@ -1593,10 +1594,8 @@ static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
int i, ret;
/* check all firmware fbs and kick off if the base addr overlaps */
- for (i = 0 ; i < FB_MAX; i++) {
+ for_each_registered_fb(i) {
struct apertures_struct *gen_aper;
- if (!registered_fb[i])
- continue;
if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
continue;
@@ -1691,25 +1690,30 @@ static int do_register_framebuffer(struct fb_info *fb_info)
event.info = fb_info;
if (!lockless_register_fb)
console_lock();
+ else
+ atomic_inc(&ignore_console_lock_warning);
if (!lock_fb_info(fb_info)) {
- if (!lockless_register_fb)
- console_unlock();
- return -ENODEV;
+ ret = -ENODEV;
+ goto unlock_console;
}
+ ret = 0;
fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
unlock_fb_info(fb_info);
+unlock_console:
if (!lockless_register_fb)
console_unlock();
- return 0;
+ else
+ atomic_dec(&ignore_console_lock_warning);
+ return ret;
}
-static int do_unregister_framebuffer(struct fb_info *fb_info)
+static int unbind_console(struct fb_info *fb_info)
{
struct fb_event event;
- int i, ret = 0;
+ int ret;
+ int i = fb_info->node;
- i = fb_info->node;
if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
return -EINVAL;
@@ -1724,17 +1728,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
unlock_fb_info(fb_info);
console_unlock();
+ return ret;
+}
+
+static int __unlink_framebuffer(struct fb_info *fb_info);
+
+static int do_unregister_framebuffer(struct fb_info *fb_info)
+{
+ struct fb_event event;
+ int ret;
+
+ ret = unbind_console(fb_info);
+
if (ret)
return -EINVAL;
pm_vt_switch_unregister(fb_info->dev);
- unlink_framebuffer(fb_info);
+ __unlink_framebuffer(fb_info);
if (fb_info->pixmap.addr &&
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
kfree(fb_info->pixmap.addr);
fb_destroy_modelist(&fb_info->modelist);
- registered_fb[i] = NULL;
+ registered_fb[fb_info->node] = NULL;
num_registered_fb--;
fb_cleanup_device(fb_info);
event.info = fb_info;
@@ -1747,7 +1763,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
return 0;
}
-int unlink_framebuffer(struct fb_info *fb_info)
+static int __unlink_framebuffer(struct fb_info *fb_info)
{
int i;
@@ -1759,6 +1775,20 @@ int unlink_framebuffer(struct fb_info *fb_info)
device_destroy(fb_class, MKDEV(FB_MAJOR, i));
fb_info->dev = NULL;
}
+
+ return 0;
+}
+
+int unlink_framebuffer(struct fb_info *fb_info)
+{
+ int ret;
+
+ ret = __unlink_framebuffer(fb_info);
+ if (ret)
+ return ret;
+
+ unbind_console(fb_info);
+
return 0;
}
EXPORT_SYMBOL(unlink_framebuffer);
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 2510fa728d77..283d9307df21 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -628,45 +628,47 @@ static int fb_try_mode(struct fb_var_screeninfo *var, struct fb_info *info,
}
/**
- * fb_find_mode - finds a valid video mode
- * @var: frame buffer user defined part of display
- * @info: frame buffer info structure
- * @mode_option: string video mode to find
- * @db: video mode database
- * @dbsize: size of @db
- * @default_mode: default video mode to fall back to
- * @default_bpp: default color depth in bits per pixel
+ * fb_find_mode - finds a valid video mode
+ * @var: frame buffer user defined part of display
+ * @info: frame buffer info structure
+ * @mode_option: string video mode to find
+ * @db: video mode database
+ * @dbsize: size of @db
+ * @default_mode: default video mode to fall back to
+ * @default_bpp: default color depth in bits per pixel
*
- * Finds a suitable video mode, starting with the specified mode
- * in @mode_option with fallback to @default_mode. If
- * @default_mode fails, all modes in the video mode database will
- * be tried.
+ * Finds a suitable video mode, starting with the specified mode
+ * in @mode_option with fallback to @default_mode. If
+ * @default_mode fails, all modes in the video mode database will
+ * be tried.
*
- * Valid mode specifiers for @mode_option:
+ * Valid mode specifiers for @mode_option::
*
- * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m] or
- * <name>[-<bpp>][@<refresh>]
+ * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][p][m]
+ *
+ * or ::
*
- * with <xres>, <yres>, <bpp> and <refresh> decimal numbers and
- * <name> a string.
+ * <name>[-<bpp>][@<refresh>]
*
- * If 'M' is present after yres (and before refresh/bpp if present),
- * the function will compute the timings using VESA(tm) Coordinated
- * Video Timings (CVT). If 'R' is present after 'M', will compute with
- * reduced blanking (for flatpanels). If 'i' is present, compute
- * interlaced mode. If 'm' is present, add margins equal to 1.8%
- * of xres rounded down to 8 pixels, and 1.8% of yres. The char
- * 'i' and 'm' must be after 'M' and 'R'. Example:
+ * with <xres>, <yres>, <bpp> and <refresh> decimal numbers and
+ * <name> a string.
*
- * 1024x768MR-8@60m - Reduced blank with margins at 60Hz.
+ * If 'M' is present after yres (and before refresh/bpp if present),
+ * the function will compute the timings using VESA(tm) Coordinated
+ * Video Timings (CVT). If 'R' is present after 'M', will compute with
+ * reduced blanking (for flatpanels). If 'i' or 'p' are present, compute
+ * interlaced or progressive mode. If 'm' is present, add margins equal
+ * to 1.8% of xres rounded down to 8 pixels, and 1.8% of yres. The char
+ * 'i', 'p' and 'm' must be after 'M' and 'R'. Example::
*
- * NOTE: The passed struct @var is _not_ cleared! This allows you
- * to supply values for e.g. the grayscale and accel_flags fields.
+ * 1024x768MR-8@60m - Reduced blank with margins at 60Hz.
*
- * Returns zero for failure, 1 if using specified @mode_option,
- * 2 if using specified @mode_option with an ignored refresh rate,
- * 3 if default mode is used, 4 if fall back to any valid mode.
+ * NOTE: The passed struct @var is _not_ cleared! This allows you
+ * to supply values for e.g. the grayscale and accel_flags fields.
*
+ * Returns zero for failure, 1 if using specified @mode_option,
+ * 2 if using specified @mode_option with an ignored refresh rate,
+ * 3 if default mode is used, 4 if fall back to any valid mode.
*/
int fb_find_mode(struct fb_var_screeninfo *var,
@@ -697,7 +699,8 @@ int fb_find_mode(struct fb_var_screeninfo *var,
unsigned int namelen = strlen(name);
int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
unsigned int xres = 0, yres = 0, bpp = default_bpp, refresh = 0;
- int yres_specified = 0, cvt = 0, rb = 0, interlace = 0;
+ int yres_specified = 0, cvt = 0, rb = 0;
+ int interlace_specified = 0, interlace = 0;
int margins = 0;
u32 best, diff, tdiff;
@@ -748,9 +751,17 @@ int fb_find_mode(struct fb_var_screeninfo *var,
if (!cvt)
margins = 1;
break;
+ case 'p':
+ if (!cvt) {
+ interlace = 0;
+ interlace_specified = 1;
+ }
+ break;
case 'i':
- if (!cvt)
+ if (!cvt) {
interlace = 1;
+ interlace_specified = 1;
+ }
break;
default:
goto done;
@@ -819,11 +830,21 @@ done:
if ((name_matches(db[i], name, namelen) ||
(res_specified && res_matches(db[i], xres, yres))) &&
!fb_try_mode(var, info, &db[i], bpp)) {
- if (refresh_specified && db[i].refresh == refresh)
- return 1;
+ const int db_interlace = (db[i].vmode &
+ FB_VMODE_INTERLACED ? 1 : 0);
+ int score = abs(db[i].refresh - refresh);
+
+ if (interlace_specified)
+ score += abs(db_interlace - interlace);
+
+ if (!interlace_specified ||
+ db_interlace == interlace)
+ if (refresh_specified &&
+ db[i].refresh == refresh)
+ return 1;
- if (abs(db[i].refresh - refresh) < diff) {
- diff = abs(db[i].refresh - refresh);
+ if (score < diff) {
+ diff = score;
best = i;
}
}
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 46a4484e3da7..3946649b85c8 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -9,18 +9,41 @@
#include <linux/kernel.h>
#include <linux/efi.h>
+#include <linux/efi-bgrt.h>
#include <linux/errno.h>
#include <linux/fb.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/printk.h>
#include <linux/screen_info.h>
#include <video/vga.h>
#include <asm/efi.h>
#include <drm/drm_utils.h> /* For drm_get_panel_orientation_quirk */
#include <drm/drm_connector.h> /* For DRM_MODE_PANEL_ORIENTATION_* */
+struct bmp_file_header {
+ u16 id;
+ u32 file_size;
+ u32 reserved;
+ u32 bitmap_offset;
+} __packed;
+
+struct bmp_dib_header {
+ u32 dib_header_size;
+ s32 width;
+ s32 height;
+ u16 planes;
+ u16 bpp;
+ u32 compression;
+ u32 bitmap_size;
+ u32 horz_resolution;
+ u32 vert_resolution;
+ u32 colors_used;
+ u32 colors_important;
+} __packed;
+
static bool request_mem_succeeded = false;
-static bool nowc = false;
+static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
static struct fb_var_screeninfo efifb_defined = {
.activate = FB_ACTIVATE_NOW,
@@ -66,10 +89,172 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
+/*
+ * If fbcon deffered console takeover is configured, the intent is for the
+ * framebuffer to show the boot graphics (e.g. vendor logo) until there is some
+ * (error) message to display. But the boot graphics may have been destroyed by
+ * e.g. option ROM output, detect this and restore the boot graphics.
+ */
+#if defined CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER && \
+ defined CONFIG_ACPI_BGRT
+static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si)
+{
+ u8 r, g, b;
+
+ while (width--) {
+ b = *src++;
+ g = *src++;
+ r = *src++;
+ *dst++ = (r << si->red_pos) |
+ (g << si->green_pos) |
+ (b << si->blue_pos);
+ }
+}
+
+#ifdef CONFIG_X86
+/*
+ * On x86 some firmwares use a low non native resolution for the display when
+ * they have shown some text messages. While keeping the bgrt filled with info
+ * for the native resolution. If the bgrt image intended for the native
+ * resolution still fits, it will be displayed very close to the right edge of
+ * the display looking quite bad. This function checks for this.
+ */
+static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
+{
+ static const int default_resolutions[][2] = {
+ { 800, 600 },
+ { 1024, 768 },
+ { 1280, 1024 },
+ };
+ u32 i, right_margin;
+
+ for (i = 0; i < ARRAY_SIZE(default_resolutions); i++) {
+ if (default_resolutions[i][0] == si->lfb_width &&
+ default_resolutions[i][1] == si->lfb_height)
+ break;
+ }
+ /* If not a default resolution used for textmode, this should be fine */
+ if (i >= ARRAY_SIZE(default_resolutions))
+ return true;
+
+ /* If the right margin is 5 times smaller then the left one, reject */
+ right_margin = si->lfb_width - (bgrt_tab.image_offset_x + bmp_width);
+ if (right_margin < (bgrt_tab.image_offset_x / 5))
+ return false;
+
+ return true;
+}
+#else
+static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
+{
+ return true;
+}
+#endif
+
+static void efifb_show_boot_graphics(struct fb_info *info)
+{
+ u32 bmp_width, bmp_height, bmp_pitch, screen_pitch, dst_x, y, src_y;
+ struct screen_info *si = &screen_info;
+ struct bmp_file_header *file_header;
+ struct bmp_dib_header *dib_header;
+ void *bgrt_image = NULL;
+ u8 *dst = info->screen_base;
+
+ if (!bgrt_tab.image_address) {
+ pr_info("efifb: No BGRT, not showing boot graphics\n");
+ return;
+ }
+
+ /* Avoid flashing the logo if we're going to print std probe messages */
+ if (console_loglevel > CONSOLE_LOGLEVEL_QUIET)
+ return;
+
+ /* bgrt_tab.status is unreliable, so we don't check it */
+
+ if (si->lfb_depth != 32) {
+ pr_info("efifb: not 32 bits, not showing boot graphics\n");
+ return;
+ }
+
+ bgrt_image = memremap(bgrt_tab.image_address, bgrt_image_size,
+ MEMREMAP_WB);
+ if (!bgrt_image) {
+ pr_warn("efifb: Ignoring BGRT: failed to map image memory\n");
+ return;
+ }
+
+ if (bgrt_image_size < (sizeof(*file_header) + sizeof(*dib_header)))
+ goto error;
+
+ file_header = bgrt_image;
+ if (file_header->id != 0x4d42 || file_header->reserved != 0)
+ goto error;
+
+ dib_header = bgrt_image + sizeof(*file_header);
+ if (dib_header->dib_header_size != 40 || dib_header->width < 0 ||
+ dib_header->planes != 1 || dib_header->bpp != 24 ||
+ dib_header->compression != 0)
+ goto error;
+
+ bmp_width = dib_header->width;
+ bmp_height = abs(dib_header->height);
+ bmp_pitch = round_up(3 * bmp_width, 4);
+ screen_pitch = si->lfb_linelength;
+
+ if ((file_header->bitmap_offset + bmp_pitch * bmp_height) >
+ bgrt_image_size)
+ goto error;
+
+ if ((bgrt_tab.image_offset_x + bmp_width) > si->lfb_width ||
+ (bgrt_tab.image_offset_y + bmp_height) > si->lfb_height)
+ goto error;
+
+ if (!efifb_bgrt_sanity_check(si, bmp_width))
+ goto error;
+
+ pr_info("efifb: showing boot graphics\n");
+
+ for (y = 0; y < si->lfb_height; y++, dst += si->lfb_linelength) {
+ /* Only background? */
+ if (y < bgrt_tab.image_offset_y ||
+ y >= (bgrt_tab.image_offset_y + bmp_height)) {
+ memset(dst, 0, 4 * si->lfb_width);
+ continue;
+ }
+
+ src_y = y - bgrt_tab.image_offset_y;
+ /* Positive header height means upside down row order */
+ if (dib_header->height > 0)
+ src_y = (bmp_height - 1) - src_y;
+
+ memset(dst, 0, bgrt_tab.image_offset_x * 4);
+ dst_x = bgrt_tab.image_offset_x;
+ efifb_copy_bmp(bgrt_image + file_header->bitmap_offset +
+ src_y * bmp_pitch,
+ (u32 *)dst + dst_x, bmp_width, si);
+ dst_x += bmp_width;
+ memset((u32 *)dst + dst_x, 0, (si->lfb_width - dst_x) * 4);
+ }
+
+ memunmap(bgrt_image);
+ return;
+
+error:
+ memunmap(bgrt_image);
+ pr_warn("efifb: Ignoring BGRT: unexpected or invalid BMP data\n");
+}
+#else
+static inline void efifb_show_boot_graphics(struct fb_info *info) {}
+#endif
+
static void efifb_destroy(struct fb_info *info)
{
- if (info->screen_base)
- iounmap(info->screen_base);
+ if (info->screen_base) {
+ if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+ iounmap(info->screen_base);
+ else
+ memunmap(info->screen_base);
+ }
if (request_mem_succeeded)
release_mem_region(info->apertures->ranges[0].base,
info->apertures->ranges[0].size);
@@ -104,7 +289,7 @@ static int efifb_setup(char *options)
else if (!strncmp(this_opt, "width:", 6))
screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
else if (!strcmp(this_opt, "nowc"))
- nowc = true;
+ mem_flags &= ~EFI_MEMORY_WC;
}
}
@@ -164,6 +349,7 @@ static int efifb_probe(struct platform_device *dev)
unsigned int size_remap;
unsigned int size_total;
char *option = NULL;
+ efi_memory_desc_t md;
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
return -ENODEV;
@@ -272,17 +458,42 @@ static int efifb_probe(struct platform_device *dev)
info->apertures->ranges[0].base = efifb_fix.smem_start;
info->apertures->ranges[0].size = size_remap;
- if (nowc)
- info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
- else
- info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
+ if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
+ if ((efifb_fix.smem_start + efifb_fix.smem_len) >
+ (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
+ pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
+ efifb_fix.smem_start);
+ err = -EIO;
+ goto err_release_fb;
+ }
+ /*
+ * If the UEFI memory map covers the efifb region, we may only
+ * remap it using the attributes the memory map prescribes.
+ */
+ mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+ mem_flags &= md.attribute;
+ }
+ if (mem_flags & EFI_MEMORY_WC)
+ info->screen_base = ioremap_wc(efifb_fix.smem_start,
+ efifb_fix.smem_len);
+ else if (mem_flags & EFI_MEMORY_UC)
+ info->screen_base = ioremap(efifb_fix.smem_start,
+ efifb_fix.smem_len);
+ else if (mem_flags & EFI_MEMORY_WT)
+ info->screen_base = memremap(efifb_fix.smem_start,
+ efifb_fix.smem_len, MEMREMAP_WT);
+ else if (mem_flags & EFI_MEMORY_WB)
+ info->screen_base = memremap(efifb_fix.smem_start,
+ efifb_fix.smem_len, MEMREMAP_WB);
if (!info->screen_base) {
- pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
+ pr_err("efifb: abort, cannot remap video memory 0x%x @ 0x%lx\n",
efifb_fix.smem_len, efifb_fix.smem_start);
err = -EIO;
goto err_release_fb;
}
+ efifb_show_boot_graphics(info);
+
pr_info("efifb: framebuffer at 0x%lx, using %dk, total %dk\n",
efifb_fix.smem_start, size_remap/1024, size_total/1024);
pr_info("efifb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
@@ -371,7 +582,10 @@ err_fb_dealoc:
err_groups:
sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
err_unmap:
- iounmap(info->screen_base);
+ if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+ iounmap(info->screen_base);
+ else
+ memunmap(info->screen_base);
err_release_fb:
framebuffer_release(info);
err_release_mem:
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index 1bfd13cbd4e3..bc9eb8afc313 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -360,6 +360,10 @@ struct mfb_info {
* @ad[]: Area Descriptors for each real AOI
* @gamma: gamma color table
* @cursor: hardware cursor data
+ * @blank_cursor: blank cursor for hiding cursor
+ * @next_cursor: scratch space to build load cursor
+ * @edid_data: EDID information buffer
+ * @has_edid: whether or not the EDID buffer is valid
*
* This data structure must be allocated with 32-byte alignment, so that the
* internal fields can be aligned properly.
@@ -381,6 +385,8 @@ struct fsl_diu_data {
__le16 cursor[MAX_CURS * MAX_CURS] __aligned(32);
/* Blank cursor data -- used to hide the cursor */
__le16 blank_cursor[MAX_CURS * MAX_CURS] __aligned(32);
+ /* Scratch cursor data -- used to build new cursor */
+ __le16 next_cursor[MAX_CURS * MAX_CURS] __aligned(32);
uint8_t edid_data[EDID_LENGTH];
bool has_edid;
} __aligned(32);
@@ -1056,13 +1062,17 @@ static int fsl_diu_cursor(struct fb_info *info, struct fb_cursor *cursor)
* FB_CUR_SETSHAPE - the cursor bitmask has changed
*/
if (cursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETCMAP | FB_CUR_SETIMAGE)) {
+ /*
+ * Determine the size of the cursor image data. Normally,
+ * it's 8x16.
+ */
unsigned int image_size =
- DIV_ROUND_UP(cursor->image.width, 8) * cursor->image.height;
+ DIV_ROUND_UP(cursor->image.width, 8) *
+ cursor->image.height;
unsigned int image_words =
DIV_ROUND_UP(image_size, sizeof(uint32_t));
unsigned int bg_idx = cursor->image.bg_color;
unsigned int fg_idx = cursor->image.fg_color;
- uint8_t buffer[image_size];
uint32_t *image, *source, *mask;
uint16_t fg, bg;
unsigned int i;
@@ -1070,13 +1080,6 @@ static int fsl_diu_cursor(struct fb_info *info, struct fb_cursor *cursor)
if (info->state != FBINFO_STATE_RUNNING)
return 0;
- /*
- * Determine the size of the cursor image data. Normally,
- * it's 8x16.
- */
- image_size = DIV_ROUND_UP(cursor->image.width, 8) *
- cursor->image.height;
-
bg = ((info->cmap.red[bg_idx] & 0xf8) << 7) |
((info->cmap.green[bg_idx] & 0xf8) << 2) |
((info->cmap.blue[bg_idx] & 0xf8) >> 3) |
@@ -1088,7 +1091,7 @@ static int fsl_diu_cursor(struct fb_info *info, struct fb_cursor *cursor)
1 << 15;
/* Use 32-bit operations on the data to improve performance */
- image = (uint32_t *)buffer;
+ image = (uint32_t *)data->next_cursor;
source = (uint32_t *)cursor->image.data;
mask = (uint32_t *)cursor->mask;
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index 3b70044773b6..4377e3442638 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
+#include <linux/acpi.h>
enum {
FB_GET_WIDTH = 0x00,
@@ -124,6 +125,7 @@ static int goldfish_fb_check_var(struct fb_var_screeninfo *var,
static int goldfish_fb_set_par(struct fb_info *info)
{
struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
+
if (fb->rotation != fb->fb.var.rotate) {
info->fix.line_length = info->var.xres * 2;
fb->rotation = fb->fb.var.rotate;
@@ -148,13 +150,14 @@ static int goldfish_fb_pan_display(struct fb_var_screeninfo *var,
wait_event_timeout(fb->wait,
fb->base_update_count != base_update_count, HZ / 15);
if (fb->base_update_count == base_update_count)
- pr_err("goldfish_fb_pan_display: timeout waiting for base update\n");
+ pr_err("%s: timeout waiting for base update\n", __func__);
return 0;
}
static int goldfish_fb_blank(int blank, struct fb_info *info)
{
struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
+
switch (blank) {
case FB_BLANK_NORMAL:
writel(1, fb->reg_base + FB_SET_BLANK);
@@ -234,7 +237,7 @@ static int goldfish_fb_probe(struct platform_device *pdev)
fb->fb.var.activate = FB_ACTIVATE_NOW;
fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT);
fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH);
- fb->fb.var.pixclock = 10000;
+ fb->fb.var.pixclock = 0;
fb->fb.var.red.offset = 11;
fb->fb.var.red.length = 5;
@@ -301,6 +304,7 @@ static int goldfish_fb_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, framesize, (void *)fb->fb.screen_base,
fb->fb.fix.smem_start);
iounmap(fb->reg_base);
+ kfree(fb);
return 0;
}
@@ -310,12 +314,19 @@ static const struct of_device_id goldfish_fb_of_match[] = {
};
MODULE_DEVICE_TABLE(of, goldfish_fb_of_match);
+static const struct acpi_device_id goldfish_fb_acpi_match[] = {
+ { "GFSH0004", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_fb_acpi_match);
+
static struct platform_driver goldfish_fb_driver = {
.probe = goldfish_fb_probe,
.remove = goldfish_fb_remove,
.driver = {
.name = "goldfish_fb",
.of_match_table = goldfish_fb_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_fb_acpi_match),
}
};
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 2fd49b2358f8..403d8cd3e582 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -912,6 +912,9 @@ static struct hv_driver hvfb_drv = {
.id_table = id_table,
.probe = hvfb_probe,
.remove = hvfb_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int hvfb_pci_stub_probe(struct pci_dev *pdev,
@@ -929,6 +932,9 @@ static struct pci_driver hvfb_pci_stub_driver = {
.id_table = pci_stub_id_table,
.probe = hvfb_pci_stub_probe,
.remove = hvfb_pci_stub_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ }
};
static int __init hvfb_drv_init(void)
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 7bc5f6056c77..f6d7b04d6dff 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -429,6 +429,7 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
break;
case 9 ... 15:
bpp = 15;
+ /* fall through */
case 16:
if ((1000000 / var->pixclock) > DACSPEED16) {
dev_err(info->device, "requested pixclock %i MHz out of range (max. %i MHz at 15/16bpp)\n",
diff --git a/drivers/video/fbdev/metronomefb.c b/drivers/video/fbdev/metronomefb.c
index 9085e9525341..bb4fee52e501 100644
--- a/drivers/video/fbdev/metronomefb.c
+++ b/drivers/video/fbdev/metronomefb.c
@@ -233,7 +233,7 @@ static int load_waveform(u8 *mem, size_t size, int m, int t,
/* check temperature range table checksum */
cksum_idx = sizeof(*wfm_hdr) + wfm_hdr->trc + 1;
- if (cksum_idx > size)
+ if (cksum_idx >= size)
return -EINVAL;
cksum = calc_cksum(sizeof(*wfm_hdr), cksum_idx, mem);
if (cksum != mem[cksum_idx]) {
@@ -245,7 +245,7 @@ static int load_waveform(u8 *mem, size_t size, int m, int t,
/* check waveform mode table address checksum */
wmta = get_unaligned_le32(wfm_hdr->wmta) & 0x00FFFFFF;
cksum_idx = wmta + m*4 + 3;
- if (cksum_idx > size)
+ if (cksum_idx >= size)
return -EINVAL;
cksum = calc_cksum(cksum_idx - 3, cksum_idx, mem);
if (cksum != mem[cksum_idx]) {
@@ -257,7 +257,7 @@ static int load_waveform(u8 *mem, size_t size, int m, int t,
/* check waveform temperature table address checksum */
tta = get_unaligned_le32(mem + wmta + m * 4) & 0x00FFFFFF;
cksum_idx = tta + trn*4 + 3;
- if (cksum_idx > size)
+ if (cksum_idx >= size)
return -EINVAL;
cksum = calc_cksum(cksum_idx - 3, cksum_idx, mem);
if (cksum != mem[cksum_idx]) {
@@ -270,7 +270,7 @@ static int load_waveform(u8 *mem, size_t size, int m, int t,
metromem buffer. this does runlength decoding of the waveform */
wfm_idx = get_unaligned_le32(mem + tta + trn * 4) & 0x00FFFFFF;
owfm_idx = wfm_idx;
- if (wfm_idx > size)
+ if (wfm_idx >= size)
return -EINVAL;
while (wfm_idx < size) {
unsigned char rl;
@@ -292,7 +292,7 @@ static int load_waveform(u8 *mem, size_t size, int m, int t,
}
cksum_idx = wfm_idx;
- if (cksum_idx > size)
+ if (cksum_idx >= size)
return -EINVAL;
cksum = calc_cksum(owfm_idx, cksum_idx, mem);
if (cksum != mem[cksum_idx]) {
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 585f39efcff6..1c75f4806ed3 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -958,7 +958,7 @@ int omapfb_register_client(struct omapfb_notifier_block *omapfb_nb,
{
int r;
- if ((unsigned)omapfb_nb->plane_idx > OMAPFB_PLANE_NUM)
+ if ((unsigned)omapfb_nb->plane_idx >= OMAPFB_PLANE_NUM)
return -EINVAL;
if (!notifier_inited) {
diff --git a/drivers/video/fbdev/omap2/omapfb/Makefile b/drivers/video/fbdev/omap2/omapfb/Makefile
index 602edfed09df..f54c3f56b641 100644
--- a/drivers/video/fbdev/omap2/omapfb/Makefile
+++ b/drivers/video/fbdev/omap2/omapfb/Makefile
@@ -2,5 +2,5 @@
obj-$(CONFIG_OMAP2_VRFB) += vrfb.o
obj-y += dss/
obj-y += displays/
-obj-$(CONFIG_FB_OMAP2) += omapfb.o
-omapfb-y := omapfb-main.o omapfb-sysfs.o omapfb-ioctl.o
+obj-$(CONFIG_FB_OMAP2) += omap2fb.o
+omap2fb-y := omapfb-main.o omapfb-sysfs.o omapfb-ioctl.o
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
index 80dc47347e21..47f0459e3551 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
@@ -12,6 +12,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
@@ -218,7 +219,7 @@ static int tpd_probe_of(struct platform_device *pdev)
static int tpd_probe(struct platform_device *pdev)
{
- struct omap_dss_device *in, *dssdev;
+ struct omap_dss_device *dssdev;
struct panel_drv_data *ddata;
int r;
struct gpio_desc *gpio;
@@ -237,25 +238,30 @@ static int tpd_probe(struct platform_device *pdev)
return -ENODEV;
}
-
gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
+ if (IS_ERR(gpio)) {
+ r = PTR_ERR(gpio);
goto err_gpio;
+ }
ddata->ct_cp_hpd_gpio = gpio;
gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
+ if (IS_ERR(gpio)) {
+ r = PTR_ERR(gpio);
goto err_gpio;
+ }
ddata->ls_oe_gpio = gpio;
gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2,
GPIOD_IN);
- if (IS_ERR(gpio))
+ if (IS_ERR(gpio)) {
+ r = PTR_ERR(gpio);
goto err_gpio;
+ }
ddata->hpd_gpio = gpio;
@@ -267,8 +273,6 @@ static int tpd_probe(struct platform_device *pdev)
dssdev->owner = THIS_MODULE;
dssdev->port_num = 1;
- in = ddata->in;
-
r = omapdss_register_output(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register output\n");
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index 09e5bb013d28..a5e58a829ea0 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -137,8 +137,7 @@ static int dss_initialize_debugfs(void)
static void dss_uninitialize_debugfs(void)
{
- if (dss_debugfs_dir)
- debugfs_remove_recursive(dss_debugfs_dir);
+ debugfs_remove_recursive(dss_debugfs_dir);
}
int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index fb605aefd9b1..a06d9c25765c 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -1858,7 +1858,7 @@ static s32 pixinc(int pixels, u8 ps)
return 1 - (-pixels + 1) * ps;
else
BUG();
- return 0;
+ return 0;
}
static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
@@ -1905,6 +1905,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY)
width = width >> 1;
+ /* fall through */
case OMAP_DSS_ROT_90:
case OMAP_DSS_ROT_270:
*offset1 = 0;
@@ -1927,6 +1928,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY)
width = width >> 1;
+ /* fall through */
case OMAP_DSS_ROT_90 + 4:
case OMAP_DSS_ROT_270 + 4:
*offset1 = 0;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
index 8fc843b56b26..e8d428bc47e3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
@@ -891,8 +891,7 @@ bool dss_has_feature(enum dss_feat_id id)
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end)
{
- if (id >= omap_current_dss_features->num_reg_fields)
- BUG();
+ BUG_ON(id >= omap_current_dss_features->num_reg_fields);
*start = omap_current_dss_features->reg_fields[id].start;
*end = omap_current_dss_features->reg_fields[id].end;
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index e08e5664e330..b8b5b4ac0e09 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -287,7 +287,7 @@ static bool cmp_var_to_colormode(struct fb_var_screeninfo *var,
var->red.length == 0 ||
var->blue.length == 0 ||
var->green.length == 0)
- return 0;
+ return false;
return var->bits_per_pixel == color->bits_per_pixel &&
cmp_component(&var->red, &color->red) &&
@@ -893,6 +893,7 @@ int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
/ (var->bits_per_pixel >> 2);
break;
}
+ /* fall through */
default:
screen_width = fix->line_length / (var->bits_per_pixel >> 3);
break;
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index bd6c2f5f6095..1dcf02e12af4 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -233,8 +233,10 @@ static u32 to3264(u32 timing, int bpp, int is64)
switch (bpp) {
case 24:
timing *= 3;
+ /* fall through */
case 8:
timing >>= 1;
+ /* fall through */
case 16:
timing >>= 1;
case 32:
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 0955622a1227..69cfb337c857 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -44,6 +44,7 @@
#include <linux/clk.h>
#include <linux/fs.h>
#include <linux/io.h>
+#include <linux/of.h>
#include "pxa3xx-gcu.h"
@@ -703,11 +704,20 @@ static int pxa3xx_gcu_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id pxa3xx_gcu_of_match[] = {
+ { .compatible = "marvell,pxa300-gcu", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pxa3xx_gcu_of_match);
+#endif
+
static struct platform_driver pxa3xx_gcu_driver = {
.probe = pxa3xx_gcu_probe,
.remove = pxa3xx_gcu_remove,
.driver = {
.name = DRV_NAME,
+ .of_match_table = of_match_ptr(pxa3xx_gcu_of_match),
},
};
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 76722a59f55e..bbed039617a4 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -56,6 +56,7 @@
#include <linux/freezer.h>
#include <linux/console.h>
#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
@@ -1423,6 +1424,21 @@ static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on)
if (fbi->lcd_power)
fbi->lcd_power(on, &fbi->fb.var);
+
+ if (fbi->lcd_supply && fbi->lcd_supply_enabled != on) {
+ int ret;
+
+ if (on)
+ ret = regulator_enable(fbi->lcd_supply);
+ else
+ ret = regulator_disable(fbi->lcd_supply);
+
+ if (ret < 0)
+ pr_warn("Unable to %s LCD supply regulator: %d\n",
+ on ? "enable" : "disable", ret);
+ else
+ fbi->lcd_supply_enabled = on;
+ }
}
static void pxafb_enable_controller(struct pxafb_info *fbi)
@@ -1799,19 +1815,17 @@ static struct pxafb_info *pxafb_init_fbinfo(struct device *dev,
void *addr;
/* Alloc the pxafb_info and pseudo_palette in one step */
- fbi = kmalloc(sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL);
+ fbi = devm_kzalloc(dev, sizeof(struct pxafb_info) + sizeof(u32) * 16,
+ GFP_KERNEL);
if (!fbi)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- memset(fbi, 0, sizeof(struct pxafb_info));
fbi->dev = dev;
fbi->inf = inf;
- fbi->clk = clk_get(dev, NULL);
- if (IS_ERR(fbi->clk)) {
- kfree(fbi);
- return NULL;
- }
+ fbi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(fbi->clk))
+ return ERR_CAST(fbi->clk);
strcpy(fbi->fb.fix.id, PXA_NAME);
@@ -2128,8 +2142,9 @@ static int of_get_pxafb_display(struct device *dev, struct device_node *disp,
return -EINVAL;
ret = -ENOMEM;
- info->modes = kmalloc_array(timings->num_timings,
- sizeof(info->modes[0]), GFP_KERNEL);
+ info->modes = devm_kcalloc(dev, timings->num_timings,
+ sizeof(info->modes[0]),
+ GFP_KERNEL);
if (!info->modes)
goto out;
info->num_modes = timings->num_timings;
@@ -2288,10 +2303,9 @@ static int pxafb_probe(struct platform_device *dev)
}
fbi = pxafb_init_fbinfo(&dev->dev, inf);
- if (!fbi) {
- /* only reason for pxafb_init_fbinfo to fail is kmalloc */
+ if (IS_ERR(fbi)) {
dev_err(&dev->dev, "Failed to initialize framebuffer device\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(fbi);
goto failed;
}
@@ -2301,25 +2315,26 @@ static int pxafb_probe(struct platform_device *dev)
fbi->backlight_power = inf->pxafb_backlight_power;
fbi->lcd_power = inf->pxafb_lcd_power;
+ fbi->lcd_supply = devm_regulator_get_optional(&dev->dev, "lcd");
+ if (IS_ERR(fbi->lcd_supply)) {
+ if (PTR_ERR(fbi->lcd_supply) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ fbi->lcd_supply = NULL;
+ }
+
r = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (r == NULL) {
dev_err(&dev->dev, "no I/O memory resource defined\n");
ret = -ENODEV;
- goto failed_fbi;
- }
-
- r = request_mem_region(r->start, resource_size(r), dev->name);
- if (r == NULL) {
- dev_err(&dev->dev, "failed to request I/O memory\n");
- ret = -EBUSY;
- goto failed_fbi;
+ goto failed;
}
- fbi->mmio_base = ioremap(r->start, resource_size(r));
- if (fbi->mmio_base == NULL) {
- dev_err(&dev->dev, "failed to map I/O memory\n");
+ fbi->mmio_base = devm_ioremap_resource(&dev->dev, r);
+ if (IS_ERR(fbi->mmio_base)) {
+ dev_err(&dev->dev, "failed to get I/O memory\n");
ret = -EBUSY;
- goto failed_free_res;
+ goto failed;
}
fbi->dma_buff_size = PAGE_ALIGN(sizeof(struct pxafb_dma_buff));
@@ -2328,7 +2343,7 @@ static int pxafb_probe(struct platform_device *dev)
if (fbi->dma_buff == NULL) {
dev_err(&dev->dev, "failed to allocate memory for DMA\n");
ret = -ENOMEM;
- goto failed_free_io;
+ goto failed;
}
ret = pxafb_init_video_memory(fbi);
@@ -2345,7 +2360,7 @@ static int pxafb_probe(struct platform_device *dev)
goto failed_free_mem;
}
- ret = request_irq(irq, pxafb_handle_irq, 0, "LCD", fbi);
+ ret = devm_request_irq(&dev->dev, irq, pxafb_handle_irq, 0, "LCD", fbi);
if (ret) {
dev_err(&dev->dev, "request_irq failed: %d\n", ret);
ret = -EBUSY;
@@ -2355,7 +2370,7 @@ static int pxafb_probe(struct platform_device *dev)
ret = pxafb_smart_init(fbi);
if (ret) {
dev_err(&dev->dev, "failed to initialize smartpanel\n");
- goto failed_free_irq;
+ goto failed_free_mem;
}
/*
@@ -2365,13 +2380,13 @@ static int pxafb_probe(struct platform_device *dev)
ret = pxafb_check_var(&fbi->fb.var, &fbi->fb);
if (ret) {
dev_err(&dev->dev, "failed to get suitable mode\n");
- goto failed_free_irq;
+ goto failed_free_mem;
}
ret = pxafb_set_par(&fbi->fb);
if (ret) {
dev_err(&dev->dev, "Failed to set parameters\n");
- goto failed_free_irq;
+ goto failed_free_mem;
}
platform_set_drvdata(dev, fbi);
@@ -2404,20 +2419,11 @@ static int pxafb_probe(struct platform_device *dev)
failed_free_cmap:
if (fbi->fb.cmap.len)
fb_dealloc_cmap(&fbi->fb.cmap);
-failed_free_irq:
- free_irq(irq, fbi);
failed_free_mem:
free_pages_exact(fbi->video_mem, fbi->video_mem_size);
failed_free_dma:
dma_free_coherent(&dev->dev, fbi->dma_buff_size,
fbi->dma_buff, fbi->dma_buff_phys);
-failed_free_io:
- iounmap(fbi->mmio_base);
-failed_free_res:
- release_mem_region(r->start, resource_size(r));
-failed_fbi:
- clk_put(fbi->clk);
- kfree(fbi);
failed:
return ret;
}
@@ -2425,8 +2431,6 @@ failed:
static int pxafb_remove(struct platform_device *dev)
{
struct pxafb_info *fbi = platform_get_drvdata(dev);
- struct resource *r;
- int irq;
struct fb_info *info;
if (!fbi)
@@ -2442,22 +2446,11 @@ static int pxafb_remove(struct platform_device *dev)
if (fbi->fb.cmap.len)
fb_dealloc_cmap(&fbi->fb.cmap);
- irq = platform_get_irq(dev, 0);
- free_irq(irq, fbi);
-
free_pages_exact(fbi->video_mem, fbi->video_mem_size);
dma_free_wc(&dev->dev, fbi->dma_buff_size, fbi->dma_buff,
fbi->dma_buff_phys);
- iounmap(fbi->mmio_base);
-
- r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, resource_size(r));
-
- clk_put(fbi->clk);
- kfree(fbi);
-
return 0;
}
diff --git a/drivers/video/fbdev/pxafb.h b/drivers/video/fbdev/pxafb.h
index 5dc414e26fc8..b641289c8a99 100644
--- a/drivers/video/fbdev/pxafb.h
+++ b/drivers/video/fbdev/pxafb.h
@@ -165,6 +165,9 @@ struct pxafb_info {
struct notifier_block freq_policy;
#endif
+ struct regulator *lcd_supply;
+ bool lcd_supply_enabled;
+
void (*lcd_power)(int, struct fb_var_screeninfo *);
void (*backlight_power)(int);
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index a3c44ecf4523..9a9d748b07f2 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -27,8 +27,8 @@
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/clk-provider.h>
#include <linux/of.h>
+#include <linux/of_clk.h>
#include <linux/of_platform.h>
#include <linux/parser.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index dec1fed9880e..fbbf26b170f7 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -522,6 +522,7 @@ static int tdfxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
case 32:
var->transp.offset = 24;
var->transp.length = 8;
+ /* fall through */
case 24:
var->red.offset = 16;
var->green.offset = 8;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 284706184b1b..f4b745590600 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -777,9 +777,6 @@ static int get_nativex(struct tridentfb_par *par)
case 3:
x = 800; y = 600;
break;
- case 4:
- x = 1400; y = 1050;
- break;
case 1:
default:
x = 640; y = 480;
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index f365d4862015..afbd6101c78e 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -25,8 +25,8 @@
#include <linux/fb.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
-#include <linux/prefetch.h>
#include <linux/delay.h>
+#include <asm/unaligned.h>
#include <video/udlfb.h>
#include "edid.h"
@@ -72,6 +72,13 @@ static bool fb_defio = 1; /* Detect mmap writes using page faults */
static bool shadow = 1; /* Optionally disable shadow framebuffer */
static int pixel_limit; /* Optionally force a pixel resolution limit */
+struct dlfb_deferred_free {
+ struct list_head list;
+ void *mem;
+};
+
+static int dlfb_realloc_framebuffer(struct dlfb_data *dlfb, struct fb_info *info, u32 new_len);
+
/* dlfb keeps a list of urbs for efficient bulk transfers */
static void dlfb_urb_completion(struct urb *urb);
static struct urb *dlfb_get_urb(struct dlfb_data *dlfb);
@@ -367,9 +374,6 @@ static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
int start = width;
int end = width;
- prefetch((void *) front);
- prefetch((void *) back);
-
for (j = 0; j < width; j++) {
if (back[j] != front[j]) {
start = j;
@@ -423,7 +427,9 @@ static void dlfb_compress_hline(
const uint16_t *const pixel_end,
uint32_t *device_address_ptr,
uint8_t **command_buffer_ptr,
- const uint8_t *const cmd_buffer_end)
+ const uint8_t *const cmd_buffer_end,
+ unsigned long back_buffer_offset,
+ int *ident_ptr)
{
const uint16_t *pixel = *pixel_start_ptr;
uint32_t dev_addr = *device_address_ptr;
@@ -436,7 +442,13 @@ static void dlfb_compress_hline(
const uint16_t *raw_pixel_start = NULL;
const uint16_t *cmd_pixel_start, *cmd_pixel_end = NULL;
- prefetchw((void *) cmd); /* pull in one cache line at least */
+ if (back_buffer_offset &&
+ *pixel == *(u16 *)((u8 *)pixel + back_buffer_offset)) {
+ pixel++;
+ dev_addr += BPP;
+ (*ident_ptr)++;
+ continue;
+ }
*cmd++ = 0xAF;
*cmd++ = 0x6B;
@@ -450,29 +462,39 @@ static void dlfb_compress_hline(
raw_pixels_count_byte = cmd++; /* we'll know this later */
raw_pixel_start = pixel;
- cmd_pixel_end = pixel + min(MAX_CMD_PIXELS + 1,
- min((int)(pixel_end - pixel),
- (int)(cmd_buffer_end - cmd) / BPP));
+ cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL,
+ (unsigned long)(pixel_end - pixel),
+ (unsigned long)(cmd_buffer_end - 1 - cmd) / BPP);
- prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * BPP);
+ if (back_buffer_offset) {
+ /* note: the framebuffer may change under us, so we must test for underflow */
+ while (cmd_pixel_end - 1 > pixel &&
+ *(cmd_pixel_end - 1) == *(u16 *)((u8 *)(cmd_pixel_end - 1) + back_buffer_offset))
+ cmd_pixel_end--;
+ }
while (pixel < cmd_pixel_end) {
const uint16_t * const repeating_pixel = pixel;
+ u16 pixel_value = *pixel;
- *cmd++ = *pixel >> 8;
- *cmd++ = *pixel;
+ put_unaligned_be16(pixel_value, cmd);
+ if (back_buffer_offset)
+ *(u16 *)((u8 *)pixel + back_buffer_offset) = pixel_value;
+ cmd += 2;
pixel++;
if (unlikely((pixel < cmd_pixel_end) &&
- (*pixel == *repeating_pixel))) {
+ (*pixel == pixel_value))) {
/* go back and fill in raw pixel count */
*raw_pixels_count_byte = ((repeating_pixel -
raw_pixel_start) + 1) & 0xFF;
- while ((pixel < cmd_pixel_end)
- && (*pixel == *repeating_pixel)) {
+ do {
+ if (back_buffer_offset)
+ *(u16 *)((u8 *)pixel + back_buffer_offset) = pixel_value;
pixel++;
- }
+ } while ((pixel < cmd_pixel_end) &&
+ (*pixel == pixel_value));
/* immediately after raw data is repeat byte */
*cmd++ = ((pixel - repeating_pixel) - 1) & 0xFF;
@@ -486,13 +508,16 @@ static void dlfb_compress_hline(
if (pixel > raw_pixel_start) {
/* finalize last RAW span */
*raw_pixels_count_byte = (pixel-raw_pixel_start) & 0xFF;
+ } else {
+ /* undo unused byte */
+ cmd--;
}
*cmd_pixels_count_byte = (pixel - cmd_pixel_start) & 0xFF;
- dev_addr += (pixel - cmd_pixel_start) * BPP;
+ dev_addr += (u8 *)pixel - (u8 *)cmd_pixel_start;
}
- if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
+ if (cmd_buffer_end - MIN_RLX_CMD_BYTES <= cmd) {
/* Fill leftover bytes with no-ops */
if (cmd_buffer_end > cmd)
memset(cmd, 0xAF, cmd_buffer_end - cmd);
@@ -520,6 +545,7 @@ static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr,
struct urb *urb = *urb_ptr;
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
+ unsigned long back_buffer_offset = 0;
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
@@ -530,6 +556,8 @@ static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr,
const u8 *back_start = (u8 *) (dlfb->backing_buffer
+ byte_offset);
+ back_buffer_offset = (unsigned long)back_start - (unsigned long)line_start;
+
*ident_ptr += dlfb_trim_hline(back_start, &next_pixel,
&byte_width);
@@ -538,16 +566,14 @@ static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr,
dev_addr += offset;
back_start += offset;
line_start += offset;
-
- memcpy((char *)back_start, (char *) line_start,
- byte_width);
}
while (next_pixel < line_end) {
dlfb_compress_hline((const uint16_t **) &next_pixel,
(const uint16_t *) line_end, &dev_addr,
- (u8 **) &cmd, (u8 *) cmd_end);
+ (u8 **) &cmd, (u8 *) cmd_end, back_buffer_offset,
+ ident_ptr);
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
@@ -610,8 +636,11 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
}
if (cmd > (char *) urb->transfer_buffer) {
+ int len;
+ if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
+ *cmd++ = 0xAF;
/* Send partial buffer remaining before exiting */
- int len = cmd - (char *) urb->transfer_buffer;
+ len = cmd - (char *) urb->transfer_buffer;
ret = dlfb_submit_urb(dlfb, urb, len);
bytes_sent += len;
} else
@@ -735,8 +764,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
}
if (cmd > (char *) urb->transfer_buffer) {
+ int len;
+ if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
+ *cmd++ = 0xAF;
/* Send partial buffer remaining before exiting */
- int len = cmd - (char *) urb->transfer_buffer;
+ len = cmd - (char *) urb->transfer_buffer;
dlfb_submit_urb(dlfb, urb, len);
bytes_sent += len;
} else
@@ -917,19 +949,17 @@ static void dlfb_free(struct kref *kref)
{
struct dlfb_data *dlfb = container_of(kref, struct dlfb_data, kref);
+ while (!list_empty(&dlfb->deferred_free)) {
+ struct dlfb_deferred_free *d = list_entry(dlfb->deferred_free.next, struct dlfb_deferred_free, list);
+ list_del(&d->list);
+ vfree(d->mem);
+ kfree(d);
+ }
vfree(dlfb->backing_buffer);
kfree(dlfb->edid);
kfree(dlfb);
}
-static void dlfb_release_urb_work(struct work_struct *work)
-{
- struct urb_node *unode = container_of(work, struct urb_node,
- release_urb_work.work);
-
- up(&unode->dlfb->urbs.limit_sem);
-}
-
static void dlfb_free_framebuffer(struct dlfb_data *dlfb)
{
struct fb_info *info = dlfb->info;
@@ -1018,10 +1048,6 @@ static int dlfb_ops_check_var(struct fb_var_screeninfo *var,
struct fb_videomode mode;
struct dlfb_data *dlfb = info->par;
- /* TODO: support dynamically changing framebuffer size */
- if ((var->xres * var->yres * 2) > info->fix.smem_len)
- return -EINVAL;
-
/* set device-specific elements of var unrelated to mode */
dlfb_var_color_format(var);
@@ -1039,22 +1065,42 @@ static int dlfb_ops_set_par(struct fb_info *info)
int result;
u16 *pix_framebuffer;
int i;
+ struct fb_var_screeninfo fvs;
+ u32 line_length = info->var.xres * (info->var.bits_per_pixel / 8);
+
+ /* clear the activate field because it causes spurious miscompares */
+ fvs = info->var;
+ fvs.activate = 0;
+ fvs.vmode &= ~FB_VMODE_SMOOTH_XPAN;
+
+ if (!memcmp(&dlfb->current_mode, &fvs, sizeof(struct fb_var_screeninfo)))
+ return 0;
+
+ result = dlfb_realloc_framebuffer(dlfb, info, info->var.yres * line_length);
+ if (result)
+ return result;
result = dlfb_set_video_mode(dlfb, &info->var);
- if ((result == 0) && (dlfb->fb_count == 0)) {
+ if (result)
+ return result;
+
+ dlfb->current_mode = fvs;
+ info->fix.line_length = line_length;
+
+ if (dlfb->fb_count == 0) {
/* paint greenscreen */
pix_framebuffer = (u16 *) info->screen_base;
for (i = 0; i < info->fix.smem_len / 2; i++)
pix_framebuffer[i] = 0x37e6;
-
- dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres,
- info->screen_base);
}
- return result;
+ dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres,
+ info->screen_base);
+
+ return 0;
}
/* To fonzi the jukebox (e.g. make blanking changes take effect) */
@@ -1129,21 +1175,29 @@ static struct fb_ops dlfb_ops = {
};
+static void dlfb_deferred_vfree(struct dlfb_data *dlfb, void *mem)
+{
+ struct dlfb_deferred_free *d = kmalloc(sizeof(struct dlfb_deferred_free), GFP_KERNEL);
+ if (!d)
+ return;
+ d->mem = mem;
+ list_add(&d->list, &dlfb->deferred_free);
+}
+
/*
* Assumes &info->lock held by caller
* Assumes no active clients have framebuffer open
*/
-static int dlfb_realloc_framebuffer(struct dlfb_data *dlfb, struct fb_info *info)
+static int dlfb_realloc_framebuffer(struct dlfb_data *dlfb, struct fb_info *info, u32 new_len)
{
- int old_len = info->fix.smem_len;
- int new_len;
- unsigned char *old_fb = info->screen_base;
+ u32 old_len = info->fix.smem_len;
+ const void *old_fb = (const void __force *)info->screen_base;
unsigned char *new_fb;
unsigned char *new_back = NULL;
- new_len = info->fix.line_length * info->var.yres;
+ new_len = PAGE_ALIGN(new_len);
- if (PAGE_ALIGN(new_len) > old_len) {
+ if (new_len > old_len) {
/*
* Alloc system memory for virtual framebuffer
*/
@@ -1152,14 +1206,15 @@ static int dlfb_realloc_framebuffer(struct dlfb_data *dlfb, struct fb_info *info
dev_err(info->dev, "Virtual framebuffer alloc failed\n");
return -ENOMEM;
}
+ memset(new_fb, 0xff, new_len);
if (info->screen_base) {
memcpy(new_fb, old_fb, old_len);
- vfree(info->screen_base);
+ dlfb_deferred_vfree(dlfb, (void __force *)info->screen_base);
}
- info->screen_base = new_fb;
- info->fix.smem_len = PAGE_ALIGN(new_len);
+ info->screen_base = (char __iomem *)new_fb;
+ info->fix.smem_len = new_len;
info->fix.smem_start = (unsigned long) new_fb;
info->flags = udlfb_info_flags;
@@ -1175,7 +1230,7 @@ static int dlfb_realloc_framebuffer(struct dlfb_data *dlfb, struct fb_info *info
dev_info(info->dev,
"No shadow/backing buffer allocated\n");
else {
- vfree(dlfb->backing_buffer);
+ dlfb_deferred_vfree(dlfb, dlfb->backing_buffer);
dlfb->backing_buffer = new_back;
}
}
@@ -1327,11 +1382,6 @@ static int dlfb_setup_modes(struct dlfb_data *dlfb,
* with mode size info, we can now alloc our framebuffer.
*/
memcpy(&info->fix, &dlfb_fix, sizeof(dlfb_fix));
- info->fix.line_length = info->var.xres *
- (info->var.bits_per_pixel / 8);
-
- result = dlfb_realloc_framebuffer(dlfb, info);
-
} else
result = -EINVAL;
@@ -1419,7 +1469,10 @@ static ssize_t edid_store(
if (!dlfb->edid || memcmp(src, dlfb->edid, src_size))
return -EINVAL;
- dlfb_ops_set_par(fb_info);
+ ret = dlfb_ops_set_par(fb_info);
+ if (ret)
+ return ret;
+
return src_size;
}
@@ -1579,6 +1632,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
}
kref_init(&dlfb->kref); /* matching kref_put in usb .disconnect fn */
+ INIT_LIST_HEAD(&dlfb->deferred_free);
dlfb->udev = usbdev;
usb_set_intfdata(intf, dlfb);
@@ -1649,7 +1703,8 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
dlfb->info = info;
info->par = dlfb;
info->pseudo_palette = dlfb->pseudo_palette;
- info->fbops = &dlfb_ops;
+ dlfb->ops = dlfb_ops;
+ info->fbops = &dlfb->ops;
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0) {
@@ -1675,7 +1730,9 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
dlfb_select_std_channel(dlfb);
dlfb_ops_check_var(&info->var, info);
- dlfb_ops_set_par(info);
+ retval = dlfb_ops_set_par(info);
+ if (retval)
+ goto error;
retval = register_framebuffer(info);
if (retval < 0) {
@@ -1789,14 +1846,7 @@ static void dlfb_urb_completion(struct urb *urb)
dlfb->urbs.available++;
spin_unlock_irqrestore(&dlfb->urbs.lock, flags);
- /*
- * When using fb_defio, we deadlock if up() is called
- * while another is waiting. So queue to another process.
- */
- if (fb_defio)
- schedule_delayed_work(&unode->release_urb_work, 0);
- else
- up(&dlfb->urbs.limit_sem);
+ up(&dlfb->urbs.limit_sem);
}
static void dlfb_free_urb_list(struct dlfb_data *dlfb)
@@ -1805,23 +1855,17 @@ static void dlfb_free_urb_list(struct dlfb_data *dlfb)
struct list_head *node;
struct urb_node *unode;
struct urb *urb;
- int ret;
- unsigned long flags;
/* keep waiting and freeing, until we've got 'em all */
while (count--) {
+ down(&dlfb->urbs.limit_sem);
- /* Getting interrupted means a leak, but ok at disconnect */
- ret = down_interruptible(&dlfb->urbs.limit_sem);
- if (ret)
- break;
-
- spin_lock_irqsave(&dlfb->urbs.lock, flags);
+ spin_lock_irq(&dlfb->urbs.lock);
node = dlfb->urbs.list.next; /* have reserved one with sem */
list_del_init(node);
- spin_unlock_irqrestore(&dlfb->urbs.lock, flags);
+ spin_unlock_irq(&dlfb->urbs.lock);
unode = list_entry(node, struct urb_node, entry);
urb = unode->urb;
@@ -1838,25 +1882,27 @@ static void dlfb_free_urb_list(struct dlfb_data *dlfb)
static int dlfb_alloc_urb_list(struct dlfb_data *dlfb, int count, size_t size)
{
- int i = 0;
struct urb *urb;
struct urb_node *unode;
char *buf;
+ size_t wanted_size = count * size;
spin_lock_init(&dlfb->urbs.lock);
+retry:
dlfb->urbs.size = size;
INIT_LIST_HEAD(&dlfb->urbs.list);
- while (i < count) {
+ sema_init(&dlfb->urbs.limit_sem, 0);
+ dlfb->urbs.count = 0;
+ dlfb->urbs.available = 0;
+
+ while (dlfb->urbs.count * size < wanted_size) {
unode = kzalloc(sizeof(*unode), GFP_KERNEL);
if (!unode)
break;
unode->dlfb = dlfb;
- INIT_DELAYED_WORK(&unode->release_urb_work,
- dlfb_release_urb_work);
-
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
kfree(unode);
@@ -1864,11 +1910,16 @@ static int dlfb_alloc_urb_list(struct dlfb_data *dlfb, int count, size_t size)
}
unode->urb = urb;
- buf = usb_alloc_coherent(dlfb->udev, MAX_TRANSFER, GFP_KERNEL,
+ buf = usb_alloc_coherent(dlfb->udev, size, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
+ if (size > PAGE_SIZE) {
+ size /= 2;
+ dlfb_free_urb_list(dlfb);
+ goto retry;
+ }
break;
}
@@ -1879,14 +1930,12 @@ static int dlfb_alloc_urb_list(struct dlfb_data *dlfb, int count, size_t size)
list_add_tail(&unode->entry, &dlfb->urbs.list);
- i++;
+ up(&dlfb->urbs.limit_sem);
+ dlfb->urbs.count++;
+ dlfb->urbs.available++;
}
- sema_init(&dlfb->urbs.limit_sem, i);
- dlfb->urbs.count = i;
- dlfb->urbs.available = i;
-
- return i;
+ return dlfb->urbs.count;
}
static struct urb *dlfb_get_urb(struct dlfb_data *dlfb)
@@ -1894,7 +1943,6 @@ static struct urb *dlfb_get_urb(struct dlfb_data *dlfb)
int ret;
struct list_head *entry;
struct urb_node *unode;
- unsigned long flags;
/* Wait for an in-flight buffer to complete and get re-queued */
ret = down_timeout(&dlfb->urbs.limit_sem, GET_URB_TIMEOUT);
@@ -1906,14 +1954,14 @@ static struct urb *dlfb_get_urb(struct dlfb_data *dlfb)
return NULL;
}
- spin_lock_irqsave(&dlfb->urbs.lock, flags);
+ spin_lock_irq(&dlfb->urbs.lock);
BUG_ON(list_empty(&dlfb->urbs.list)); /* reserved one with limit_sem */
entry = dlfb->urbs.list.next;
list_del_init(entry);
dlfb->urbs.available--;
- spin_unlock_irqrestore(&dlfb->urbs.lock, flags);
+ spin_unlock_irq(&dlfb->urbs.lock);
unode = list_entry(entry, struct urb_node, entry);
return unode->urb;
diff --git a/drivers/video/fbdev/via/lcd.c b/drivers/video/fbdev/via/lcd.c
index 5d21ff436ec8..b9305d73a1e5 100644
--- a/drivers/video/fbdev/via/lcd.c
+++ b/drivers/video/fbdev/via/lcd.c
@@ -758,6 +758,7 @@ static void set_lcd_output_path(int set_iga, int output_interface)
viaparinfo->chip_info->gfx_chip_name))
viafb_write_reg_mask(CR97, VIACR, 0x84,
BIT7 + BIT2 + BIT1 + BIT0);
+ /* fall through */
case INTERFACE_DVP0:
case INTERFACE_DVP1:
case INTERFACE_DFP_HIGH:
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index d2f785068ef4..7bb7e90b8f00 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -1468,7 +1469,7 @@ static const struct file_operations viafb_vt1636_proc_fops = {
#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
-static int viafb_sup_odev_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused viafb_sup_odev_proc_show(struct seq_file *m, void *v)
{
via_odev_to_seq(m, supported_odev_map[
viaparinfo->shared->chip_info.gfx_chip_name]);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 6b237e3f4983..d1c1f6283729 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/balloon_compaction.h>
-#include <linux/oom.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/mount.h>
@@ -40,13 +39,8 @@
*/
#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
-#define OOM_VBALLOON_DEFAULT_PAGES 256
#define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80
-static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES;
-module_param(oom_pages, int, S_IRUSR | S_IWUSR);
-MODULE_PARM_DESC(oom_pages, "pages to free on OOM");
-
#ifdef CONFIG_BALLOON_COMPACTION
static struct vfsmount *balloon_mnt;
#endif
@@ -86,8 +80,8 @@ struct virtio_balloon {
/* Memory statistics */
struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
- /* To register callback in oom notifier call chain */
- struct notifier_block nb;
+ /* To register a shrinker to shrink memory upon memory pressure */
+ struct shrinker shrinker;
};
static struct virtio_device_id id_table[] = {
@@ -365,38 +359,6 @@ static void update_balloon_size(struct virtio_balloon *vb)
&actual);
}
-/*
- * virtballoon_oom_notify - release pages when system is under severe
- * memory pressure (called from out_of_memory())
- * @self : notifier block struct
- * @dummy: not used
- * @parm : returned - number of freed pages
- *
- * The balancing of memory by use of the virtio balloon should not cause
- * the termination of processes while there are pages in the balloon.
- * If virtio balloon manages to release some memory, it will make the
- * system return and retry the allocation that forced the OOM killer
- * to run.
- */
-static int virtballoon_oom_notify(struct notifier_block *self,
- unsigned long dummy, void *parm)
-{
- struct virtio_balloon *vb;
- unsigned long *freed;
- unsigned num_freed_pages;
-
- vb = container_of(self, struct virtio_balloon, nb);
- if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
- return NOTIFY_OK;
-
- freed = parm;
- num_freed_pages = leak_balloon(vb, oom_pages);
- update_balloon_size(vb);
- *freed += num_freed_pages;
-
- return NOTIFY_OK;
-}
-
static void update_balloon_stats_func(struct work_struct *work)
{
struct virtio_balloon *vb;
@@ -455,9 +417,13 @@ static int init_vqs(struct virtio_balloon *vb)
num_stats = update_balloon_stats(vb);
sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
- if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
- < 0)
- BUG();
+ err = virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb,
+ GFP_KERNEL);
+ if (err) {
+ dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n",
+ __func__);
+ return err;
+ }
virtqueue_kick(vb->stats_vq);
}
return 0;
@@ -513,7 +479,9 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
tell_host(vb, vb->inflate_vq);
/* balloon's page migration 2nd step -- deflate "page" */
+ spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
balloon_page_delete(page);
+ spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
set_page_pfns(vb, vb->pfns, page);
tell_host(vb, vb->deflate_vq);
@@ -544,6 +512,52 @@ static struct file_system_type balloon_fs = {
#endif /* CONFIG_BALLOON_COMPACTION */
+static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ unsigned long pages_to_free, pages_freed = 0;
+ struct virtio_balloon *vb = container_of(shrinker,
+ struct virtio_balloon, shrinker);
+
+ pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE;
+
+ /*
+ * One invocation of leak_balloon can deflate at most
+ * VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it
+ * multiple times to deflate pages till reaching pages_to_free.
+ */
+ while (vb->num_pages && pages_to_free) {
+ pages_to_free -= pages_freed;
+ pages_freed += leak_balloon(vb, pages_to_free);
+ }
+ update_balloon_size(vb);
+
+ return pages_freed / VIRTIO_BALLOON_PAGES_PER_PAGE;
+}
+
+static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct virtio_balloon *vb = container_of(shrinker,
+ struct virtio_balloon, shrinker);
+
+ return vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
+}
+
+static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
+{
+ unregister_shrinker(&vb->shrinker);
+}
+
+static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
+{
+ vb->shrinker.scan_objects = virtio_balloon_shrinker_scan;
+ vb->shrinker.count_objects = virtio_balloon_shrinker_count;
+ vb->shrinker.seeks = DEFAULT_SEEKS;
+
+ return register_shrinker(&vb->shrinker);
+}
+
static int virtballoon_probe(struct virtio_device *vdev)
{
struct virtio_balloon *vb;
@@ -555,7 +569,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
return -EINVAL;
}
- vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
+ vdev->priv = vb = kzalloc(sizeof(*vb), GFP_KERNEL);
if (!vb) {
err = -ENOMEM;
goto out;
@@ -564,8 +578,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func);
INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func);
spin_lock_init(&vb->stop_update_lock);
- vb->stop_update = false;
- vb->num_pages = 0;
mutex_init(&vb->balloon_lock);
init_waitqueue_head(&vb->acked);
vb->vdev = vdev;
@@ -576,17 +588,10 @@ static int virtballoon_probe(struct virtio_device *vdev)
if (err)
goto out_free_vb;
- vb->nb.notifier_call = virtballoon_oom_notify;
- vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
- err = register_oom_notifier(&vb->nb);
- if (err < 0)
- goto out_del_vqs;
-
#ifdef CONFIG_BALLOON_COMPACTION
balloon_mnt = kern_mount(&balloon_fs);
if (IS_ERR(balloon_mnt)) {
err = PTR_ERR(balloon_mnt);
- unregister_oom_notifier(&vb->nb);
goto out_del_vqs;
}
@@ -595,13 +600,19 @@ static int virtballoon_probe(struct virtio_device *vdev)
if (IS_ERR(vb->vb_dev_info.inode)) {
err = PTR_ERR(vb->vb_dev_info.inode);
kern_unmount(balloon_mnt);
- unregister_oom_notifier(&vb->nb);
- vb->vb_dev_info.inode = NULL;
goto out_del_vqs;
}
vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
#endif
-
+ /*
+ * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a
+ * shrinker needs to be registered to relieve memory pressure.
+ */
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
+ err = virtio_balloon_register_shrinker(vb);
+ if (err)
+ goto out_del_vqs;
+ }
virtio_device_ready(vdev);
if (towards_target(vb))
@@ -633,8 +644,8 @@ static void virtballoon_remove(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
- unregister_oom_notifier(&vb->nb);
-
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ virtio_balloon_unregister_shrinker(vb);
spin_lock_irq(&vb->stop_update_lock);
vb->stop_update = true;
spin_unlock_irq(&vb->stop_update_lock);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 67763d3c7abf..4cd9ea5c75be 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -397,9 +397,23 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
/* Activate the queue */
writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
if (vm_dev->version == 1) {
+ u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
+
+ /*
+ * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
+ * that doesn't fit in 32bit, fail the setup rather than
+ * pretending to be successful.
+ */
+ if (q_pfn >> 32) {
+ dev_err(&vdev->dev,
+ "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
+ 0x1ULL << (32 + PAGE_SHIFT - 30));
+ err = -E2BIG;
+ goto error_bad_pfn;
+ }
+
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
- writel(virtqueue_get_desc_addr(vq) >> PAGE_SHIFT,
- vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
+ writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
} else {
u64 addr;
@@ -430,6 +444,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
return vq;
+error_bad_pfn:
+ vring_del_virtqueue(vq);
error_new_virtqueue:
if (vm_dev->version == 1) {
writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 705aebd74e56..465a6f5142cc 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -421,7 +421,7 @@ const char *vp_bus_name(struct virtio_device *vdev)
* - OR over all affinities for shared MSI
* - ignore the affinity request if we're using INTX
*/
-int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
+int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
{
struct virtio_device *vdev = vq->vdev;
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -435,11 +435,10 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
if (vp_dev->msix_enabled) {
mask = vp_dev->msix_affinity_masks[info->msix_vector];
irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
- if (cpu == -1)
+ if (!cpu_mask)
irq_set_affinity_hint(irq, NULL);
else {
- cpumask_clear(mask);
- cpumask_set_cpu(cpu, mask);
+ cpumask_copy(mask, cpu_mask);
irq_set_affinity_hint(irq, mask);
}
}
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 135ee3cf7175..02271002c2f3 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -141,7 +141,7 @@ const char *vp_bus_name(struct virtio_device *vdev);
* - OR over all affinities for shared MSI
* - ignore the affinity request if we're using INTX
*/
-int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
+int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask);
const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 2780886e8ba3..de062fb201bc 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -122,6 +122,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtqueue *vq;
u16 num;
int err;
+ u64 q_pfn;
/* Select the queue we're interested in */
iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
@@ -141,9 +142,17 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!vq)
return ERR_PTR(-ENOMEM);
+ q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
+ if (q_pfn >> 32) {
+ dev_err(&vp_dev->pci_dev->dev,
+ "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
+ 0x1ULL << (32 + PAGE_SHIFT - 30));
+ err = -E2BIG;
+ goto out_del_vq;
+ }
+
/* activate the queue */
- iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
- vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+ iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
@@ -160,6 +169,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
out_deactivate:
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+out_del_vq:
vring_del_virtqueue(vq);
return ERR_PTR(err);
}
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 5dd284008630..53bdc256805f 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -970,7 +970,6 @@ static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
{
u32 result;
uintptr_t pci_addr;
- int i;
struct ca91cx42_driver *bridge;
struct device *dev;
@@ -978,7 +977,6 @@ static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
dev = image->parent->parent;
/* Find the PCI address that maps to the desired VME address */
- i = image->number;
/* Locking as we can only do one of these at a time */
mutex_lock(&bridge->vme_rmw);
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 1708b2300c7a..00827d2897b5 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -49,7 +49,7 @@ config W1_MASTER_DS1WM
config W1_MASTER_GPIO
tristate "GPIO 1-wire busmaster"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you want to communicate with your 1-wire devices using
GPIO pins. This driver uses the GPIO API to control the wire.
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 5b3e017d9276..8b5e598ffdb3 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
#define DS2482_REG_CFG_APU 0x01 /* active pull-up */
/* extra configurations - e.g. 1WS */
-int extra_config;
+static int extra_config;
/**
* Write and verify codes for the CHANNEL_SELECT command (DS2482-800 only).
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index c423bdb982bb..0f4ecfcdb549 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -134,8 +134,7 @@
#define EP_DATA_OUT 2
#define EP_DATA_IN 3
-struct ds_device
-{
+struct ds_device {
struct list_head ds_entry;
struct usb_device *udev;
@@ -158,8 +157,7 @@ struct ds_device
struct w1_bus_master master;
};
-struct ds_status
-{
+struct ds_status {
u8 enable;
u8 speed;
u8 pullup_dur;
@@ -236,7 +234,7 @@ static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
int i;
pr_info("0x%x: count=%d, status: ", dev->ep[EP_STATUS], count);
- for (i=0; i<count; ++i)
+ for (i = 0; i < count; ++i)
pr_info("%02x ", buf[i]);
pr_info("\n");
@@ -358,7 +356,7 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
int i;
printk("%s: count=%d: ", __func__, count);
- for (i=0; i<count; ++i)
+ for (i = 0; i < count; ++i)
printk("%02x ", buf[i]);
printk("\n");
}
@@ -404,7 +402,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
if (err)
break;
}
- } while(++count < limit);
+ } while (++count < limit);
return err;
}
@@ -447,7 +445,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
if (err >= 0) {
int i;
printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err);
- for (i=0; i<err; ++i)
+ for (i = 0; i < err; ++i)
printk("%02x ", dev->st_buf[i]);
printk("\n");
}
@@ -613,7 +611,7 @@ static int ds_read_byte(struct ds_device *dev, u8 *byte)
int err;
struct ds_status st;
- err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM , 0xff);
+ err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM, 0xff);
if (err)
return err;
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 8851d441e5fd..50b46c4399ea 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/w1.h>
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 7931231d8e80..e22fdeddada1 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -100,18 +100,6 @@ config W1_SLAVE_DS2438
Say Y here if you want to use a 1-wire
DS2438 Smart Battery Monitor device support
-config W1_SLAVE_DS2760
- tristate "Dallas 2760 battery monitor chip (HP iPAQ & others)"
- help
- If you enable this you will have the DS2760 battery monitor
- chip support.
-
- The battery monitor chip is used in many batteries/devices
- as the one who is responsible for charging/discharging/monitoring
- Li+ batteries.
-
- If you are unsure, say N.
-
config W1_SLAVE_DS2780
tristate "Dallas 2780 battery monitor chip"
help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index d5f4f4d5b9e5..eab29f151413 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
obj-$(CONFIG_W1_SLAVE_DS2805) += w1_ds2805.o
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
obj-$(CONFIG_W1_SLAVE_DS2438) += w1_ds2438.o
-obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o
obj-$(CONFIG_W1_SLAVE_DS2781) += w1_ds2781.o
obj-$(CONFIG_W1_SLAVE_DS28E04) += w1_ds28e04.o
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
deleted file mode 100644
index 26168abfb8b8..000000000000
--- a/drivers/w1/slaves/w1_ds2760.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * 1-Wire implementation for the ds2760 chip
- *
- * Copyright © 2004-2005, Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>
- *
- * Use consistent with the GNU GPL is permitted,
- * provided that this copyright notice is
- * preserved in its entirety in all copies and derived works.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/idr.h>
-#include <linux/gfp.h>
-
-#include <linux/w1.h>
-
-#include "w1_ds2760.h"
-
-#define W1_FAMILY_DS2760 0x30
-
-static int w1_ds2760_io(struct device *dev, char *buf, int addr, size_t count,
- int io)
-{
- struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
-
- if (!dev)
- return 0;
-
- mutex_lock(&sl->master->bus_mutex);
-
- if (addr > DS2760_DATA_SIZE || addr < 0) {
- count = 0;
- goto out;
- }
- if (addr + count > DS2760_DATA_SIZE)
- count = DS2760_DATA_SIZE - addr;
-
- if (!w1_reset_select_slave(sl)) {
- if (!io) {
- w1_write_8(sl->master, W1_DS2760_READ_DATA);
- w1_write_8(sl->master, addr);
- count = w1_read_block(sl->master, buf, count);
- } else {
- w1_write_8(sl->master, W1_DS2760_WRITE_DATA);
- w1_write_8(sl->master, addr);
- w1_write_block(sl->master, buf, count);
- /* XXX w1_write_block returns void, not n_written */
- }
- }
-
-out:
- mutex_unlock(&sl->master->bus_mutex);
-
- return count;
-}
-
-int w1_ds2760_read(struct device *dev, char *buf, int addr, size_t count)
-{
- return w1_ds2760_io(dev, buf, addr, count, 0);
-}
-EXPORT_SYMBOL(w1_ds2760_read);
-
-int w1_ds2760_write(struct device *dev, char *buf, int addr, size_t count)
-{
- return w1_ds2760_io(dev, buf, addr, count, 1);
-}
-EXPORT_SYMBOL(w1_ds2760_write);
-
-static int w1_ds2760_eeprom_cmd(struct device *dev, int addr, int cmd)
-{
- struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
-
- if (!dev)
- return -EINVAL;
-
- mutex_lock(&sl->master->bus_mutex);
-
- if (w1_reset_select_slave(sl) == 0) {
- w1_write_8(sl->master, cmd);
- w1_write_8(sl->master, addr);
- }
-
- mutex_unlock(&sl->master->bus_mutex);
- return 0;
-}
-
-int w1_ds2760_store_eeprom(struct device *dev, int addr)
-{
- return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_COPY_DATA);
-}
-EXPORT_SYMBOL(w1_ds2760_store_eeprom);
-
-int w1_ds2760_recall_eeprom(struct device *dev, int addr)
-{
- return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_RECALL_DATA);
-}
-EXPORT_SYMBOL(w1_ds2760_recall_eeprom);
-
-static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- return w1_ds2760_read(dev, buf, off, count);
-}
-
-static BIN_ATTR_RO(w1_slave, DS2760_DATA_SIZE);
-
-static struct bin_attribute *w1_ds2760_bin_attrs[] = {
- &bin_attr_w1_slave,
- NULL,
-};
-
-static const struct attribute_group w1_ds2760_group = {
- .bin_attrs = w1_ds2760_bin_attrs,
-};
-
-static const struct attribute_group *w1_ds2760_groups[] = {
- &w1_ds2760_group,
- NULL,
-};
-
-static int w1_ds2760_add_slave(struct w1_slave *sl)
-{
- int ret;
- struct platform_device *pdev;
-
- pdev = platform_device_alloc("ds2760-battery", PLATFORM_DEVID_AUTO);
- if (!pdev)
- return -ENOMEM;
- pdev->dev.parent = &sl->dev;
-
- ret = platform_device_add(pdev);
- if (ret)
- goto pdev_add_failed;
-
- dev_set_drvdata(&sl->dev, pdev);
-
- return 0;
-
-pdev_add_failed:
- platform_device_put(pdev);
-
- return ret;
-}
-
-static void w1_ds2760_remove_slave(struct w1_slave *sl)
-{
- struct platform_device *pdev = dev_get_drvdata(&sl->dev);
-
- platform_device_unregister(pdev);
-}
-
-static struct w1_family_ops w1_ds2760_fops = {
- .add_slave = w1_ds2760_add_slave,
- .remove_slave = w1_ds2760_remove_slave,
- .groups = w1_ds2760_groups,
-};
-
-static struct w1_family w1_ds2760_family = {
- .fid = W1_FAMILY_DS2760,
- .fops = &w1_ds2760_fops,
-};
-module_w1_family(w1_ds2760_family);
-
-MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>");
-MODULE_DESCRIPTION("1-wire Driver Dallas 2760 battery monitor chip");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2760));
diff --git a/drivers/w1/slaves/w1_ds2760.h b/drivers/w1/slaves/w1_ds2760.h
deleted file mode 100644
index 24168c94eeae..000000000000
--- a/drivers/w1/slaves/w1_ds2760.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * 1-Wire implementation for the ds2760 chip
- *
- * Copyright © 2004-2005, Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>
- *
- * Use consistent with the GNU GPL is permitted,
- * provided that this copyright notice is
- * preserved in its entirety in all copies and derived works.
- *
- */
-
-#ifndef __w1_ds2760_h__
-#define __w1_ds2760_h__
-
-/* Known commands to the DS2760 chip */
-#define W1_DS2760_SWAP 0xAA
-#define W1_DS2760_READ_DATA 0x69
-#define W1_DS2760_WRITE_DATA 0x6C
-#define W1_DS2760_COPY_DATA 0x48
-#define W1_DS2760_RECALL_DATA 0xB8
-#define W1_DS2760_LOCK 0x6A
-
-/* Number of valid register addresses */
-#define DS2760_DATA_SIZE 0x40
-
-#define DS2760_PROTECTION_REG 0x00
-
-#define DS2760_STATUS_REG 0x01
-#define DS2760_STATUS_IE (1 << 2)
-#define DS2760_STATUS_SWEN (1 << 3)
-#define DS2760_STATUS_RNAOP (1 << 4)
-#define DS2760_STATUS_PMOD (1 << 5)
-
-#define DS2760_EEPROM_REG 0x07
-#define DS2760_SPECIAL_FEATURE_REG 0x08
-#define DS2760_VOLTAGE_MSB 0x0c
-#define DS2760_VOLTAGE_LSB 0x0d
-#define DS2760_CURRENT_MSB 0x0e
-#define DS2760_CURRENT_LSB 0x0f
-#define DS2760_CURRENT_ACCUM_MSB 0x10
-#define DS2760_CURRENT_ACCUM_LSB 0x11
-#define DS2760_TEMP_MSB 0x18
-#define DS2760_TEMP_LSB 0x19
-#define DS2760_EEPROM_BLOCK0 0x20
-#define DS2760_ACTIVE_FULL 0x20
-#define DS2760_EEPROM_BLOCK1 0x30
-#define DS2760_STATUS_WRITE_REG 0x31
-#define DS2760_RATED_CAPACITY 0x32
-#define DS2760_CURRENT_OFFSET_BIAS 0x33
-#define DS2760_ACTIVE_EMPTY 0x3b
-
-extern int w1_ds2760_read(struct device *dev, char *buf, int addr,
- size_t count);
-extern int w1_ds2760_write(struct device *dev, char *buf, int addr,
- size_t count);
-extern int w1_ds2760_store_eeprom(struct device *dev, int addr);
-extern int w1_ds2760_recall_eeprom(struct device *dev, int addr);
-
-#endif /* !__w1_ds2760_h__ */
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index caef0e0fd817..890c038c25f8 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -26,6 +26,7 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/hwmon.h>
+#include <linux/of.h>
#include <linux/atomic.h>
@@ -686,6 +687,8 @@ static int __w1_attach_slave_device(struct w1_slave *sl)
sl->dev.bus = &w1_bus_type;
sl->dev.release = &w1_slave_release;
sl->dev.groups = w1_slave_groups;
+ sl->dev.of_node = of_find_matching_node(sl->master->dev.of_node,
+ sl->family->of_match_table);
dev_set_name(&sl->dev, "%02x-%012llx",
(unsigned int) sl->reg_num.family,
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 9af07fd92763..5ea8909a41f9 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -161,6 +161,16 @@ config MENF21BMC_WATCHDOG
This driver can also be built as a module. If so the module
will be called menf21bmc_wdt.
+config MENZ069_WATCHDOG
+ tristate "MEN 16Z069 Watchdog"
+ depends on MCB
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the MEN 16Z069 Watchdog.
+
+ This driver can also be built as a module. If so the module
+ will be called menz069_wdt.
+
config TANGOX_WATCHDOG
tristate "Sigma Designs SMP86xx/SMP87xx watchdog"
select WATCHDOG_CORE
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 1d3c6b094fe5..bf92e7bf9ce0 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -215,4 +215,5 @@ obj-$(CONFIG_MAX77620_WATCHDOG) += max77620_wdt.o
obj-$(CONFIG_ZIIRAVE_WATCHDOG) += ziirave_wdt.o
obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o
+obj-$(CONFIG_MENZ069_WATCHDOG) += menz69_wdt.o
obj-$(CONFIG_RAVE_SP_WATCHDOG) += rave-sp-wdt.o
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index e3a78f927f83..f29d1edc5bad 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -7,6 +7,7 @@
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/interrupt.h>
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 6c6594261cb7..ebb85d60b6d5 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index 6ed39dee995f..a3134ffa59f8 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -44,6 +44,7 @@
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index f07850d2c977..2b52514eaa86 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Watchdog driver for IMX2 and later processors
*
@@ -7,10 +8,6 @@
* some parts adapted by similar drivers from Darius Augulis and Vladimir
* Zapolskiy, additional improvements by Wim Van Sebroeck.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
* NOTE: MX1 has a slightly different Watchdog than MX2 and later:
*
* MX1: MX2+:
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index 2f3b049ea301..e268add43010 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -146,12 +146,7 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
u32 remainder;
u8 stage_cfg;
-#if GCC_VERSION < 40400
- /* work around a bug compiling do_div() */
- prescaler = READ_ONCE(kempld_prescaler[PRESCALER_21]);
-#else
prescaler = kempld_prescaler[PRESCALER_21];
-#endif
if (!stage)
return -EINVAL;
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index ac5840d9689a..bf6a068245ba 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
index 2c9f53eaff4f..70c9cd3ba938 100644
--- a/drivers/watchdog/max77620_wdt.c
+++ b/drivers/watchdog/max77620_wdt.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mfd/max77620.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/watchdog/menz69_wdt.c b/drivers/watchdog/menz69_wdt.c
new file mode 100644
index 000000000000..ed18238c5407
--- /dev/null
+++ b/drivers/watchdog/menz69_wdt.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Watchdog driver for the MEN z069 IP-Core
+ *
+ * Copyright (C) 2018 Johannes Thumshirn <jth@kernel.org>
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mcb.h>
+#include <linux/module.h>
+#include <linux/watchdog.h>
+
+struct men_z069_drv {
+ struct watchdog_device wdt;
+ void __iomem *base;
+ struct resource *mem;
+};
+
+#define MEN_Z069_WTR 0x10
+#define MEN_Z069_WTR_WDEN BIT(15)
+#define MEN_Z069_WTR_WDET_MASK 0x7fff
+#define MEN_Z069_WVR 0x14
+
+#define MEN_Z069_TIMER_FREQ 500 /* 500 Hz */
+#define MEN_Z069_WDT_COUNTER_MIN 1
+#define MEN_Z069_WDT_COUNTER_MAX 0x7fff
+#define MEN_Z069_DEFAULT_TIMEOUT 30
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int men_z069_wdt_start(struct watchdog_device *wdt)
+{
+ struct men_z069_drv *drv = watchdog_get_drvdata(wdt);
+ u16 val;
+
+ val = readw(drv->base + MEN_Z069_WTR);
+ val |= MEN_Z069_WTR_WDEN;
+ writew(val, drv->base + MEN_Z069_WTR);
+
+ return 0;
+}
+
+static int men_z069_wdt_stop(struct watchdog_device *wdt)
+{
+ struct men_z069_drv *drv = watchdog_get_drvdata(wdt);
+ u16 val;
+
+ val = readw(drv->base + MEN_Z069_WTR);
+ val &= ~MEN_Z069_WTR_WDEN;
+ writew(val, drv->base + MEN_Z069_WTR);
+
+ return 0;
+}
+
+static int men_z069_wdt_ping(struct watchdog_device *wdt)
+{
+ struct men_z069_drv *drv = watchdog_get_drvdata(wdt);
+ u16 val;
+
+ /* The watchdog trigger value toggles between 0x5555 and 0xaaaa */
+ val = readw(drv->base + MEN_Z069_WVR);
+ val ^= 0xffff;
+ writew(val, drv->base + MEN_Z069_WVR);
+
+ return 0;
+}
+
+static int men_z069_wdt_set_timeout(struct watchdog_device *wdt,
+ unsigned int timeout)
+{
+ struct men_z069_drv *drv = watchdog_get_drvdata(wdt);
+ u16 reg, val, ena;
+
+ wdt->timeout = timeout;
+ val = timeout * MEN_Z069_TIMER_FREQ;
+
+ reg = readw(drv->base + MEN_Z069_WVR);
+ ena = reg & MEN_Z069_WTR_WDEN;
+ reg = ena | val;
+ writew(reg, drv->base + MEN_Z069_WTR);
+
+ return 0;
+}
+
+static const struct watchdog_info men_z069_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "MEN z069 Watchdog",
+};
+
+static const struct watchdog_ops men_z069_ops = {
+ .owner = THIS_MODULE,
+ .start = men_z069_wdt_start,
+ .stop = men_z069_wdt_stop,
+ .ping = men_z069_wdt_ping,
+ .set_timeout = men_z069_wdt_set_timeout,
+};
+
+static struct watchdog_device men_z069_wdt = {
+ .info = &men_z069_info,
+ .ops = &men_z069_ops,
+ .timeout = MEN_Z069_DEFAULT_TIMEOUT,
+ .min_timeout = 1,
+ .max_timeout = MEN_Z069_WDT_COUNTER_MAX / MEN_Z069_TIMER_FREQ,
+};
+
+static int men_z069_probe(struct mcb_device *dev,
+ const struct mcb_device_id *id)
+{
+ struct men_z069_drv *drv;
+ struct resource *mem;
+
+ drv = devm_kzalloc(&dev->dev, sizeof(struct men_z069_drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ mem = mcb_request_mem(dev, "z069-wdt");
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ drv->base = devm_ioremap(&dev->dev, mem->start, resource_size(mem));
+ if (drv->base == NULL)
+ goto release_mem;
+
+ drv->mem = mem;
+
+ drv->wdt = men_z069_wdt;
+ watchdog_init_timeout(&drv->wdt, 0, &dev->dev);
+ watchdog_set_nowayout(&drv->wdt, nowayout);
+ watchdog_set_drvdata(&drv->wdt, drv);
+ drv->wdt.parent = &dev->dev;
+ mcb_set_drvdata(dev, drv);
+
+ return watchdog_register_device(&men_z069_wdt);
+
+release_mem:
+ mcb_release_mem(mem);
+ return -ENOMEM;
+}
+
+static void men_z069_remove(struct mcb_device *dev)
+{
+ struct men_z069_drv *drv = mcb_get_drvdata(dev);
+
+ watchdog_unregister_device(&drv->wdt);
+ mcb_release_mem(drv->mem);
+}
+
+static const struct mcb_device_id men_z069_ids[] = {
+ { .device = 0x45 },
+ { }
+};
+MODULE_DEVICE_TABLE(mcb, men_z069_ids);
+
+static struct mcb_driver men_z069_driver = {
+ .driver = {
+ .name = "z069-wdt",
+ .owner = THIS_MODULE,
+ },
+ .probe = men_z069_probe,
+ .remove = men_z069_remove,
+ .id_table = men_z069_ids,
+};
+module_mcb_driver(men_z069_driver);
+
+MODULE_AUTHOR("Johannes Thumshirn <jth@kernel.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("mcb:16z069");
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 2c4a73d1e214..430c3ab84c07 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index ae77112ce97f..cbd752f9ac56 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -29,6 +29,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index ea676d233e1e..9db3b09f7568 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -581,6 +581,8 @@ static int orion_wdt_probe(struct platform_device *pdev)
*/
if (!orion_wdt_enabled(&dev->wdt))
orion_wdt_stop(&dev->wdt);
+ else
+ set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
/* Request the IRQ only after the watchdog is disabled */
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 9849db0743a7..072986d461b7 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -11,6 +11,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/resource.h>
#include <linux/amba/bus.h>
@@ -22,6 +23,7 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -42,6 +44,7 @@
/* control register masks */
#define INT_ENABLE (1 << 0)
#define RESET_ENABLE (1 << 1)
+ #define ENABLE_MASK (INT_ENABLE | RESET_ENABLE)
#define WDTINTCLR 0x00C
#define WDTRIS 0x010
#define WDTMIS 0x014
@@ -65,6 +68,7 @@ struct sp805_wdt {
spinlock_t lock;
void __iomem *base;
struct clk *clk;
+ u64 rate;
struct amba_device *adev;
unsigned int load_val;
};
@@ -74,13 +78,22 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Set to 1 to keep watchdog running after device release");
+/* returns true if wdt is running; otherwise returns false */
+static bool wdt_is_running(struct watchdog_device *wdd)
+{
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
+ u32 wdtcontrol = readl_relaxed(wdt->base + WDTCONTROL);
+
+ return (wdtcontrol & ENABLE_MASK) == ENABLE_MASK;
+}
+
/* This routine finds load value that will reset system in required timout */
static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
{
struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
u64 load, rate;
- rate = clk_get_rate(wdt->clk);
+ rate = wdt->rate;
/*
* sp805 runs counter with given value twice, after the end of first
@@ -106,9 +119,7 @@ static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
static unsigned int wdt_timeleft(struct watchdog_device *wdd)
{
struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
- u64 load, rate;
-
- rate = clk_get_rate(wdt->clk);
+ u64 load;
spin_lock(&wdt->lock);
load = readl_relaxed(wdt->base + WDTVALUE);
@@ -118,7 +129,7 @@ static unsigned int wdt_timeleft(struct watchdog_device *wdd)
load += wdt->load_val + 1;
spin_unlock(&wdt->lock);
- return div_u64(load, rate);
+ return div_u64(load, wdt->rate);
}
static int
@@ -228,11 +239,25 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- wdt->clk = devm_clk_get(&adev->dev, NULL);
- if (IS_ERR(wdt->clk)) {
- dev_warn(&adev->dev, "Clock not found\n");
- ret = PTR_ERR(wdt->clk);
- goto err;
+ if (adev->dev.of_node) {
+ wdt->clk = devm_clk_get(&adev->dev, NULL);
+ if (IS_ERR(wdt->clk)) {
+ dev_err(&adev->dev, "Clock not found\n");
+ return PTR_ERR(wdt->clk);
+ }
+ wdt->rate = clk_get_rate(wdt->clk);
+ } else if (has_acpi_companion(&adev->dev)) {
+ /*
+ * When Driver probe with ACPI device, clock devices
+ * are not available, so watchdog rate get from
+ * clock-frequency property given in _DSD object.
+ */
+ device_property_read_u64(&adev->dev, "clock-frequency",
+ &wdt->rate);
+ if (!wdt->rate) {
+ dev_err(&adev->dev, "no clock-frequency property\n");
+ return -ENODEV;
+ }
}
wdt->adev = adev;
@@ -244,7 +269,23 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
watchdog_set_nowayout(&wdt->wdd, nowayout);
watchdog_set_drvdata(&wdt->wdd, wdt);
watchdog_set_restart_priority(&wdt->wdd, 128);
- wdt_setload(&wdt->wdd, DEFAULT_TIMEOUT);
+
+ /*
+ * If 'timeout-sec' devicetree property is specified, use that.
+ * Otherwise, use DEFAULT_TIMEOUT
+ */
+ wdt->wdd.timeout = DEFAULT_TIMEOUT;
+ watchdog_init_timeout(&wdt->wdd, 0, &adev->dev);
+ wdt_setload(&wdt->wdd, wdt->wdd.timeout);
+
+ /*
+ * If HW is already running, enable/reset the wdt and set the running
+ * bit to tell the wdt subsystem
+ */
+ if (wdt_is_running(&wdt->wdd)) {
+ wdt_enable(&wdt->wdd);
+ set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
+ }
ret = watchdog_register_device(&wdt->wdd);
if (ret) {
diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
index b4d484a42b70..ff9397d9638a 100644
--- a/drivers/watchdog/sprd_wdt.c
+++ b/drivers/watchdog/sprd_wdt.c
@@ -279,10 +279,8 @@ static int sprd_wdt_probe(struct platform_device *pdev)
wdt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
wdt->base = devm_ioremap_resource(&pdev->dev, wdt_res);
- if (IS_ERR(wdt->base)) {
- dev_err(&pdev->dev, "failed to map memory resource\n");
+ if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- }
wdt->enable = devm_clk_get(&pdev->dev, "enable");
if (IS_ERR(wdt->enable)) {
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index c97ad5619cb0..e00e3b3526c6 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -11,12 +11,13 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
@@ -54,11 +55,15 @@
#define TIMEOUT_US 100000
#define SLEEP_US 1000
+#define HAS_PCLK true
+
struct stm32_iwdg {
struct watchdog_device wdd;
void __iomem *regs;
- struct clk *clk;
+ struct clk *clk_lsi;
+ struct clk *clk_pclk;
unsigned int rate;
+ bool has_pclk;
};
static inline u32 reg_read(void __iomem *base, u32 reg)
@@ -133,6 +138,44 @@ static int stm32_iwdg_set_timeout(struct watchdog_device *wdd,
return 0;
}
+static int stm32_iwdg_clk_init(struct platform_device *pdev,
+ struct stm32_iwdg *wdt)
+{
+ u32 ret;
+
+ wdt->clk_lsi = devm_clk_get(&pdev->dev, "lsi");
+ if (IS_ERR(wdt->clk_lsi)) {
+ dev_err(&pdev->dev, "Unable to get lsi clock\n");
+ return PTR_ERR(wdt->clk_lsi);
+ }
+
+ /* optional peripheral clock */
+ if (wdt->has_pclk) {
+ wdt->clk_pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(wdt->clk_pclk)) {
+ dev_err(&pdev->dev, "Unable to get pclk clock\n");
+ return PTR_ERR(wdt->clk_pclk);
+ }
+
+ ret = clk_prepare_enable(wdt->clk_pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to prepare pclk clock\n");
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(wdt->clk_lsi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to prepare lsi clock\n");
+ clk_disable_unprepare(wdt->clk_pclk);
+ return ret;
+ }
+
+ wdt->rate = clk_get_rate(wdt->clk_lsi);
+
+ return 0;
+}
+
static const struct watchdog_info stm32_iwdg_info = {
.options = WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE |
@@ -147,49 +190,42 @@ static const struct watchdog_ops stm32_iwdg_ops = {
.set_timeout = stm32_iwdg_set_timeout,
};
+static const struct of_device_id stm32_iwdg_of_match[] = {
+ { .compatible = "st,stm32-iwdg", .data = (void *)!HAS_PCLK },
+ { .compatible = "st,stm32mp1-iwdg", .data = (void *)HAS_PCLK },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, stm32_iwdg_of_match);
+
static int stm32_iwdg_probe(struct platform_device *pdev)
{
struct watchdog_device *wdd;
+ const struct of_device_id *match;
struct stm32_iwdg *wdt;
struct resource *res;
- void __iomem *regs;
- struct clk *clk;
int ret;
+ match = of_match_device(stm32_iwdg_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->has_pclk = match->data;
+
/* This is the timer base. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(regs)) {
+ wdt->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wdt->regs)) {
dev_err(&pdev->dev, "Could not get resource\n");
- return PTR_ERR(regs);
+ return PTR_ERR(wdt->regs);
}
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "Unable to get clock\n");
- return PTR_ERR(clk);
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to prepare clock %p\n", clk);
+ ret = stm32_iwdg_clk_init(pdev, wdt);
+ if (ret)
return ret;
- }
-
- /*
- * Allocate our watchdog driver data, which has the
- * struct watchdog_device nested within it.
- */
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
- if (!wdt) {
- ret = -ENOMEM;
- goto err;
- }
-
- /* Initialize struct stm32_iwdg. */
- wdt->regs = regs;
- wdt->clk = clk;
- wdt->rate = clk_get_rate(clk);
/* Initialize struct watchdog_device. */
wdd = &wdt->wdd;
@@ -217,7 +253,8 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
return 0;
err:
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(wdt->clk_lsi);
+ clk_disable_unprepare(wdt->clk_pclk);
return ret;
}
@@ -227,23 +264,18 @@ static int stm32_iwdg_remove(struct platform_device *pdev)
struct stm32_iwdg *wdt = platform_get_drvdata(pdev);
watchdog_unregister_device(&wdt->wdd);
- clk_disable_unprepare(wdt->clk);
+ clk_disable_unprepare(wdt->clk_lsi);
+ clk_disable_unprepare(wdt->clk_pclk);
return 0;
}
-static const struct of_device_id stm32_iwdg_of_match[] = {
- { .compatible = "st,stm32-iwdg" },
- { /* end node */ }
-};
-MODULE_DEVICE_TABLE(of, stm32_iwdg_of_match);
-
static struct platform_driver stm32_iwdg_driver = {
.probe = stm32_iwdg_probe,
.remove = stm32_iwdg_remove,
.driver = {
.name = "iwdg",
- .of_match_table = stm32_iwdg_of_match,
+ .of_match_table = of_match_ptr(stm32_iwdg_of_match),
},
};
module_platform_driver(stm32_iwdg_driver);
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index d8b11eb269ad..994c54cc68e9 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Watchdog driver for the RTC based watchdog in STMP3xxx and i.MX23/28
*
* Author: Wolfram Sang <kernel@pengutronix.de>
*
* Copyright (C) 2011-12 Wolfram Sang, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/watchdog/tangox_wdt.c b/drivers/watchdog/tangox_wdt.c
index b1de8297fa40..d0b53f3c0d17 100644
--- a/drivers/watchdog/tangox_wdt.c
+++ b/drivers/watchdog/tangox_wdt.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index e5d0c28372ea..b459edfacff3 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -152,6 +152,16 @@ config XEN_GNTDEV
help
Allows userspace processes to use grants.
+config XEN_GNTDEV_DMABUF
+ bool "Add support for dma-buf grant access device driver extension"
+ depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC && DMA_SHARED_BUFFER
+ help
+ Allows userspace processes and kernel modules to use Xen backed
+ dma-buf implementation. With this extension grant references to
+ the pages of an imported dma-buf can be exported for other domain
+ use and grant references coming from a foreign domain can be
+ converted into a local dma-buf for local export.
+
config XEN_GRANT_DEV_ALLOC
tristate "User-space grant reference allocator driver"
depends on XEN
@@ -161,6 +171,20 @@ config XEN_GRANT_DEV_ALLOC
to other domains. This can be used to implement frontend drivers
or as part of an inter-domain shared memory channel.
+config XEN_GRANT_DMA_ALLOC
+ bool "Allow allocating DMA capable buffers with grant reference module"
+ depends on XEN && HAS_DMA
+ help
+ Extends grant table module API to allow allocating DMA capable
+ buffers and mapping foreign grant references on top of it.
+ The resulting buffer is similar to one allocated by the balloon
+ driver in that proper memory reservation is made by
+ ({increase|decrease}_reservation and VA mappings are updated if
+ needed).
+ This is useful for sharing foreign buffers with HW drivers which
+ cannot work with scattered buffers provided by the balloon driver,
+ but require DMAable memory instead.
+
config SWIOTLB_XEN
def_bool y
select SWIOTLB
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 48b154276179..3e542f60f29f 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_X86) += fallback.o
obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o
+obj-y += mem-reservation.o
obj-y += events/
obj-y += xenbus/
@@ -40,5 +41,6 @@ obj-$(CONFIG_XEN_PVCALLS_BACKEND) += pvcalls-back.o
obj-$(CONFIG_XEN_PVCALLS_FRONTEND) += pvcalls-front.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
+xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o
xen-gntalloc-y := gntalloc.o
xen-privcmd-y := privcmd.o privcmd-buf.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 065f0b607373..e12bb256036f 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -71,6 +71,7 @@
#include <xen/balloon.h>
#include <xen/features.h>
#include <xen/page.h>
+#include <xen/mem-reservation.h>
static int xen_hotplug_unpopulated;
@@ -157,13 +158,6 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
#define GFP_BALLOON \
(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
-static void scrub_page(struct page *page)
-{
-#ifdef CONFIG_XEN_SCRUB_PAGES
- clear_highpage(page);
-#endif
-}
-
/* balloon_append: add the given page to the balloon. */
static void __balloon_append(struct page *page)
{
@@ -463,11 +457,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
int rc;
unsigned long i;
struct page *page;
- struct xen_memory_reservation reservation = {
- .address_bits = 0,
- .extent_order = EXTENT_ORDER,
- .domid = DOMID_SELF
- };
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
@@ -479,16 +468,11 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
break;
}
- /* XENMEM_populate_physmap requires a PFN based on Xen
- * granularity.
- */
frame_list[i] = page_to_xen_pfn(page);
page = balloon_next_page(page);
}
- set_xen_guest_handle(reservation.extent_start, frame_list);
- reservation.nr_extents = nr_pages;
- rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
+ rc = xenmem_reservation_increase(nr_pages, frame_list);
if (rc <= 0)
return BP_EAGAIN;
@@ -496,29 +480,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
page = balloon_retrieve(false);
BUG_ON(page == NULL);
-#ifdef CONFIG_XEN_HAVE_PVMMU
- /*
- * We don't support PV MMU when Linux and Xen is using
- * different page granularity.
- */
- BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
-
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- unsigned long pfn = page_to_pfn(page);
-
- set_phys_to_machine(pfn, frame_list[i]);
-
- /* Link back into the page tables if not highmem. */
- if (!PageHighMem(page)) {
- int ret;
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- mfn_pte(frame_list[i], PAGE_KERNEL),
- 0);
- BUG_ON(ret);
- }
- }
-#endif
+ xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
/* Relinquish the page back to the allocator. */
free_reserved_page(page);
@@ -535,11 +497,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
unsigned long i;
struct page *page, *tmp;
int ret;
- struct xen_memory_reservation reservation = {
- .address_bits = 0,
- .extent_order = EXTENT_ORDER,
- .domid = DOMID_SELF
- };
LIST_HEAD(pages);
if (nr_pages > ARRAY_SIZE(frame_list))
@@ -553,7 +510,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
break;
}
adjust_managed_page_count(page, -1);
- scrub_page(page);
+ xenmem_reservation_scrub_page(page);
list_add(&page->lru, &pages);
}
@@ -572,28 +529,10 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
*/
i = 0;
list_for_each_entry_safe(page, tmp, &pages, lru) {
- /* XENMEM_decrease_reservation requires a GFN */
frame_list[i++] = xen_page_to_gfn(page);
-#ifdef CONFIG_XEN_HAVE_PVMMU
- /*
- * We don't support PV MMU when Linux and Xen is using
- * different page granularity.
- */
- BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
-
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- unsigned long pfn = page_to_pfn(page);
+ xenmem_reservation_va_mapping_reset(1, &page);
- if (!PageHighMem(page)) {
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- __pte_ma(0), 0);
- BUG_ON(ret);
- }
- __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
- }
-#endif
list_del(&page->lru);
balloon_append(page);
@@ -601,9 +540,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
flush_tlb_all();
- set_xen_guest_handle(reservation.extent_start, frame_list);
- reservation.nr_extents = nr_pages;
- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
+ ret = xenmem_reservation_decrease(nr_pages, frame_list);
BUG_ON(ret != nr_pages);
balloon_stats.current_pages -= nr_pages;
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 30d7f52eb7ca..55ed80c3a17c 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -17,7 +17,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
* XXX: Add support for merging bio_vec when using different page
* size in Xen and Linux.
*/
- return 0;
+ return false;
#endif
}
EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
new file mode 100644
index 000000000000..2f8b949c3eeb
--- /dev/null
+++ b/drivers/xen/gntdev-common.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Common functionality of grant device.
+ *
+ * Copyright (c) 2006-2007, D G Murray.
+ * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
+ * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
+ */
+
+#ifndef _GNTDEV_COMMON_H
+#define _GNTDEV_COMMON_H
+
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/mmu_notifier.h>
+#include <linux/types.h>
+
+struct gntdev_dmabuf_priv;
+
+struct gntdev_priv {
+ /* Maps with visible offsets in the file descriptor. */
+ struct list_head maps;
+ /*
+ * Maps that are not visible; will be freed on munmap.
+ * Only populated if populate_freeable_maps == 1
+ */
+ struct list_head freeable_maps;
+ /* lock protects maps and freeable_maps. */
+ struct mutex lock;
+ struct mm_struct *mm;
+ struct mmu_notifier mn;
+
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ /* Device for which DMA memory is allocated. */
+ struct device *dma_dev;
+#endif
+
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ struct gntdev_dmabuf_priv *dmabuf_priv;
+#endif
+};
+
+struct gntdev_unmap_notify {
+ int flags;
+ /* Address relative to the start of the gntdev_grant_map. */
+ int addr;
+ int event;
+};
+
+struct gntdev_grant_map {
+ struct list_head next;
+ struct vm_area_struct *vma;
+ int index;
+ int count;
+ int flags;
+ refcount_t users;
+ struct gntdev_unmap_notify notify;
+ struct ioctl_gntdev_grant_ref *grants;
+ struct gnttab_map_grant_ref *map_ops;
+ struct gnttab_unmap_grant_ref *unmap_ops;
+ struct gnttab_map_grant_ref *kmap_ops;
+ struct gnttab_unmap_grant_ref *kunmap_ops;
+ struct page **pages;
+ unsigned long pages_vm_start;
+
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ /*
+ * If dmabuf_vaddr is not NULL then this mapping is backed by DMA
+ * capable memory.
+ */
+
+ struct device *dma_dev;
+ /* Flags used to create this DMA buffer: GNTDEV_DMA_FLAG_XXX. */
+ int dma_flags;
+ void *dma_vaddr;
+ dma_addr_t dma_bus_addr;
+ /* Needed to avoid allocation in gnttab_dma_free_pages(). */
+ xen_pfn_t *frames;
+#endif
+};
+
+struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
+ int dma_flags);
+
+void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add);
+
+void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map);
+
+bool gntdev_account_mapped_pages(int count);
+
+int gntdev_map_grant_pages(struct gntdev_grant_map *map);
+
+#endif
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
new file mode 100644
index 000000000000..cba6b586bfbd
--- /dev/null
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -0,0 +1,856 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Xen dma-buf functionality for gntdev.
+ *
+ * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
+ *
+ * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include <xen/xen.h>
+#include <xen/grant_table.h>
+
+#include "gntdev-common.h"
+#include "gntdev-dmabuf.h"
+
+#ifndef GRANT_INVALID_REF
+/*
+ * Note on usage of grant reference 0 as invalid grant reference:
+ * grant reference 0 is valid, but never exposed to a driver,
+ * because of the fact it is already in use/reserved by the PV console.
+ */
+#define GRANT_INVALID_REF 0
+#endif
+
+struct gntdev_dmabuf {
+ struct gntdev_dmabuf_priv *priv;
+ struct dma_buf *dmabuf;
+ struct list_head next;
+ int fd;
+
+ union {
+ struct {
+ /* Exported buffers are reference counted. */
+ struct kref refcount;
+
+ struct gntdev_priv *priv;
+ struct gntdev_grant_map *map;
+ } exp;
+ struct {
+ /* Granted references of the imported buffer. */
+ grant_ref_t *refs;
+ /* Scatter-gather table of the imported buffer. */
+ struct sg_table *sgt;
+ /* dma-buf attachment of the imported buffer. */
+ struct dma_buf_attachment *attach;
+ } imp;
+ } u;
+
+ /* Number of pages this buffer has. */
+ int nr_pages;
+ /* Pages of this buffer. */
+ struct page **pages;
+};
+
+struct gntdev_dmabuf_wait_obj {
+ struct list_head next;
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ struct completion completion;
+};
+
+struct gntdev_dmabuf_attachment {
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+};
+
+struct gntdev_dmabuf_priv {
+ /* List of exported DMA buffers. */
+ struct list_head exp_list;
+ /* List of wait objects. */
+ struct list_head exp_wait_list;
+ /* List of imported DMA buffers. */
+ struct list_head imp_list;
+ /* This is the lock which protects dma_buf_xxx lists. */
+ struct mutex lock;
+};
+
+/* DMA buffer export support. */
+
+/* Implementation of wait for exported DMA buffer to be released. */
+
+static void dmabuf_exp_release(struct kref *kref);
+
+static struct gntdev_dmabuf_wait_obj *
+dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
+ struct gntdev_dmabuf *gntdev_dmabuf)
+{
+ struct gntdev_dmabuf_wait_obj *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ init_completion(&obj->completion);
+ obj->gntdev_dmabuf = gntdev_dmabuf;
+
+ mutex_lock(&priv->lock);
+ list_add(&obj->next, &priv->exp_wait_list);
+ /* Put our reference and wait for gntdev_dmabuf's release to fire. */
+ kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
+ mutex_unlock(&priv->lock);
+ return obj;
+}
+
+static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
+ struct gntdev_dmabuf_wait_obj *obj)
+{
+ mutex_lock(&priv->lock);
+ list_del(&obj->next);
+ mutex_unlock(&priv->lock);
+ kfree(obj);
+}
+
+static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
+ u32 wait_to_ms)
+{
+ if (wait_for_completion_timeout(&obj->completion,
+ msecs_to_jiffies(wait_to_ms)) <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
+ struct gntdev_dmabuf *gntdev_dmabuf)
+{
+ struct gntdev_dmabuf_wait_obj *obj;
+
+ list_for_each_entry(obj, &priv->exp_wait_list, next)
+ if (obj->gntdev_dmabuf == gntdev_dmabuf) {
+ pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
+ complete_all(&obj->completion);
+ break;
+ }
+}
+
+static struct gntdev_dmabuf *
+dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
+
+ mutex_lock(&priv->lock);
+ list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
+ if (gntdev_dmabuf->fd == fd) {
+ pr_debug("Found gntdev_dmabuf in the wait list\n");
+ kref_get(&gntdev_dmabuf->u.exp.refcount);
+ ret = gntdev_dmabuf;
+ break;
+ }
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
+ int wait_to_ms)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ struct gntdev_dmabuf_wait_obj *obj;
+ int ret;
+
+ pr_debug("Will wait for dma-buf with fd %d\n", fd);
+ /*
+ * Try to find the DMA buffer: if not found means that
+ * either the buffer has already been released or file descriptor
+ * provided is wrong.
+ */
+ gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
+ if (IS_ERR(gntdev_dmabuf))
+ return PTR_ERR(gntdev_dmabuf);
+
+ /*
+ * gntdev_dmabuf still exists and is reference count locked by us now,
+ * so prepare to wait: allocate wait object and add it to the wait list,
+ * so we can find it on release.
+ */
+ obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
+ dmabuf_exp_wait_obj_free(priv, obj);
+ return ret;
+}
+
+/* DMA buffer export support. */
+
+static struct sg_table *
+dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
+{
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ return sgt;
+
+out:
+ kfree(sgt);
+ return ERR_PTR(ret);
+}
+
+static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+ struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
+
+ gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
+ GFP_KERNEL);
+ if (!gntdev_dmabuf_attach)
+ return -ENOMEM;
+
+ gntdev_dmabuf_attach->dir = DMA_NONE;
+ attach->priv = gntdev_dmabuf_attach;
+ return 0;
+}
+
+static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+ struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
+
+ if (gntdev_dmabuf_attach) {
+ struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
+
+ if (sgt) {
+ if (gntdev_dmabuf_attach->dir != DMA_NONE)
+ dma_unmap_sg_attrs(attach->dev, sgt->sgl,
+ sgt->nents,
+ gntdev_dmabuf_attach->dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ }
+
+ kfree(sgt);
+ kfree(gntdev_dmabuf_attach);
+ attach->priv = NULL;
+ }
+}
+
+static struct sg_table *
+dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
+ struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
+ struct sg_table *sgt;
+
+ pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
+ attach->dev);
+
+ if (dir == DMA_NONE || !gntdev_dmabuf_attach)
+ return ERR_PTR(-EINVAL);
+
+ /* Return the cached mapping when possible. */
+ if (gntdev_dmabuf_attach->dir == dir)
+ return gntdev_dmabuf_attach->sgt;
+
+ /*
+ * Two mappings with different directions for the same attachment are
+ * not allowed.
+ */
+ if (gntdev_dmabuf_attach->dir != DMA_NONE)
+ return ERR_PTR(-EBUSY);
+
+ sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
+ gntdev_dmabuf->nr_pages);
+ if (!IS_ERR(sgt)) {
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = ERR_PTR(-ENOMEM);
+ } else {
+ gntdev_dmabuf_attach->sgt = sgt;
+ gntdev_dmabuf_attach->dir = dir;
+ }
+ }
+ if (IS_ERR(sgt))
+ pr_debug("Failed to map sg table for dev %p\n", attach->dev);
+ return sgt;
+}
+
+static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
+}
+
+static void dmabuf_exp_release(struct kref *kref)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf =
+ container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
+
+ dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
+ list_del(&gntdev_dmabuf->next);
+ kfree(gntdev_dmabuf);
+}
+
+static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
+ struct gntdev_grant_map *map)
+{
+ mutex_lock(&priv->lock);
+ list_del(&map->next);
+ gntdev_put_map(NULL /* already removed */, map);
+ mutex_unlock(&priv->lock);
+}
+
+static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
+ struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
+
+ dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
+ gntdev_dmabuf->u.exp.map);
+ mutex_lock(&priv->lock);
+ kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
+ mutex_unlock(&priv->lock);
+}
+
+static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ /* Not implemented. */
+ return NULL;
+}
+
+static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+ /* Not implemented. */
+}
+
+static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ /* Not implemented. */
+ return 0;
+}
+
+static const struct dma_buf_ops dmabuf_exp_ops = {
+ .attach = dmabuf_exp_ops_attach,
+ .detach = dmabuf_exp_ops_detach,
+ .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
+ .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
+ .release = dmabuf_exp_ops_release,
+ .map = dmabuf_exp_ops_kmap,
+ .unmap = dmabuf_exp_ops_kunmap,
+ .mmap = dmabuf_exp_ops_mmap,
+};
+
+struct gntdev_dmabuf_export_args {
+ struct gntdev_priv *priv;
+ struct gntdev_grant_map *map;
+ struct gntdev_dmabuf_priv *dmabuf_priv;
+ struct device *dev;
+ int count;
+ struct page **pages;
+ u32 fd;
+};
+
+static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ int ret;
+
+ gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
+ if (!gntdev_dmabuf)
+ return -ENOMEM;
+
+ kref_init(&gntdev_dmabuf->u.exp.refcount);
+
+ gntdev_dmabuf->priv = args->dmabuf_priv;
+ gntdev_dmabuf->nr_pages = args->count;
+ gntdev_dmabuf->pages = args->pages;
+ gntdev_dmabuf->u.exp.priv = args->priv;
+ gntdev_dmabuf->u.exp.map = args->map;
+
+ exp_info.exp_name = KBUILD_MODNAME;
+ if (args->dev->driver && args->dev->driver->owner)
+ exp_info.owner = args->dev->driver->owner;
+ else
+ exp_info.owner = THIS_MODULE;
+ exp_info.ops = &dmabuf_exp_ops;
+ exp_info.size = args->count << PAGE_SHIFT;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = gntdev_dmabuf;
+
+ gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(gntdev_dmabuf->dmabuf)) {
+ ret = PTR_ERR(gntdev_dmabuf->dmabuf);
+ gntdev_dmabuf->dmabuf = NULL;
+ goto fail;
+ }
+
+ ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
+ if (ret < 0)
+ goto fail;
+
+ gntdev_dmabuf->fd = ret;
+ args->fd = ret;
+
+ pr_debug("Exporting DMA buffer with fd %d\n", ret);
+
+ mutex_lock(&args->dmabuf_priv->lock);
+ list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
+ mutex_unlock(&args->dmabuf_priv->lock);
+ return 0;
+
+fail:
+ if (gntdev_dmabuf->dmabuf)
+ dma_buf_put(gntdev_dmabuf->dmabuf);
+ kfree(gntdev_dmabuf);
+ return ret;
+}
+
+static struct gntdev_grant_map *
+dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
+ int count)
+{
+ struct gntdev_grant_map *map;
+
+ if (unlikely(count <= 0))
+ return ERR_PTR(-EINVAL);
+
+ if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
+ (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
+ pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
+ return ERR_PTR(-EINVAL);
+ }
+
+ map = gntdev_alloc_map(priv, count, dmabuf_flags);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(gntdev_account_mapped_pages(count))) {
+ pr_debug("can't map %d pages: over limit\n", count);
+ gntdev_put_map(NULL, map);
+ return ERR_PTR(-ENOMEM);
+ }
+ return map;
+}
+
+static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
+ int count, u32 domid, u32 *refs, u32 *fd)
+{
+ struct gntdev_grant_map *map;
+ struct gntdev_dmabuf_export_args args;
+ int i, ret;
+
+ map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ for (i = 0; i < count; i++) {
+ map->grants[i].domid = domid;
+ map->grants[i].ref = refs[i];
+ }
+
+ mutex_lock(&priv->lock);
+ gntdev_add_map(priv, map);
+ mutex_unlock(&priv->lock);
+
+ map->flags |= GNTMAP_host_map;
+#if defined(CONFIG_X86)
+ map->flags |= GNTMAP_device_map;
+#endif
+
+ ret = gntdev_map_grant_pages(map);
+ if (ret < 0)
+ goto out;
+
+ args.priv = priv;
+ args.map = map;
+ args.dev = priv->dma_dev;
+ args.dmabuf_priv = priv->dmabuf_priv;
+ args.count = map->count;
+ args.pages = map->pages;
+ args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
+
+ ret = dmabuf_exp_from_pages(&args);
+ if (ret < 0)
+ goto out;
+
+ *fd = args.fd;
+ return 0;
+
+out:
+ dmabuf_exp_remove_map(priv, map);
+ return ret;
+}
+
+/* DMA buffer import support. */
+
+static int
+dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
+ int count, int domid)
+{
+ grant_ref_t priv_gref_head;
+ int i, ret;
+
+ ret = gnttab_alloc_grant_references(count, &priv_gref_head);
+ if (ret < 0) {
+ pr_debug("Cannot allocate grant references, ret %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < count; i++) {
+ int cur_ref;
+
+ cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
+ if (cur_ref < 0) {
+ ret = cur_ref;
+ pr_debug("Cannot claim grant reference, ret %d\n", ret);
+ goto out;
+ }
+
+ gnttab_grant_foreign_access_ref(cur_ref, domid,
+ xen_page_to_gfn(pages[i]), 0);
+ refs[i] = cur_ref;
+ }
+
+ return 0;
+
+out:
+ gnttab_free_grant_references(priv_gref_head);
+ return ret;
+}
+
+static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (refs[i] != GRANT_INVALID_REF)
+ gnttab_end_foreign_access(refs[i], 0, 0UL);
+}
+
+static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
+{
+ kfree(gntdev_dmabuf->pages);
+ kfree(gntdev_dmabuf->u.imp.refs);
+ kfree(gntdev_dmabuf);
+}
+
+static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ int i;
+
+ gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
+ if (!gntdev_dmabuf)
+ goto fail_no_free;
+
+ gntdev_dmabuf->u.imp.refs = kcalloc(count,
+ sizeof(gntdev_dmabuf->u.imp.refs[0]),
+ GFP_KERNEL);
+ if (!gntdev_dmabuf->u.imp.refs)
+ goto fail;
+
+ gntdev_dmabuf->pages = kcalloc(count,
+ sizeof(gntdev_dmabuf->pages[0]),
+ GFP_KERNEL);
+ if (!gntdev_dmabuf->pages)
+ goto fail;
+
+ gntdev_dmabuf->nr_pages = count;
+
+ for (i = 0; i < count; i++)
+ gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
+
+ return gntdev_dmabuf;
+
+fail:
+ dmabuf_imp_free_storage(gntdev_dmabuf);
+fail_no_free:
+ return ERR_PTR(-ENOMEM);
+}
+
+static struct gntdev_dmabuf *
+dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
+ int fd, int count, int domid)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf, *ret;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct sg_page_iter sg_iter;
+ int i;
+
+ dma_buf = dma_buf_get(fd);
+ if (IS_ERR(dma_buf))
+ return ERR_CAST(dma_buf);
+
+ gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
+ if (IS_ERR(gntdev_dmabuf)) {
+ ret = gntdev_dmabuf;
+ goto fail_put;
+ }
+
+ gntdev_dmabuf->priv = priv;
+ gntdev_dmabuf->fd = fd;
+
+ attach = dma_buf_attach(dma_buf, dev);
+ if (IS_ERR(attach)) {
+ ret = ERR_CAST(attach);
+ goto fail_free_obj;
+ }
+
+ gntdev_dmabuf->u.imp.attach = attach;
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = ERR_CAST(sgt);
+ goto fail_detach;
+ }
+
+ /* Check number of pages that imported buffer has. */
+ if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
+ ret = ERR_PTR(-EINVAL);
+ pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
+ attach->dmabuf->size, gntdev_dmabuf->nr_pages);
+ goto fail_unmap;
+ }
+
+ gntdev_dmabuf->u.imp.sgt = sgt;
+
+ /* Now convert sgt to array of pages and check for page validity. */
+ i = 0;
+ for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ /*
+ * Check if page is valid: this can happen if we are given
+ * a page from VRAM or other resources which are not backed
+ * by a struct page.
+ */
+ if (!pfn_valid(page_to_pfn(page))) {
+ ret = ERR_PTR(-EINVAL);
+ goto fail_unmap;
+ }
+
+ gntdev_dmabuf->pages[i++] = page;
+ }
+
+ ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
+ gntdev_dmabuf->u.imp.refs,
+ count, domid));
+ if (IS_ERR(ret))
+ goto fail_end_access;
+
+ pr_debug("Imported DMA buffer with fd %d\n", fd);
+
+ mutex_lock(&priv->lock);
+ list_add(&gntdev_dmabuf->next, &priv->imp_list);
+ mutex_unlock(&priv->lock);
+
+ return gntdev_dmabuf;
+
+fail_end_access:
+ dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+fail_free_obj:
+ dmabuf_imp_free_storage(gntdev_dmabuf);
+fail_put:
+ dma_buf_put(dma_buf);
+ return ret;
+}
+
+/*
+ * Find the hyper dma-buf by its file descriptor and remove
+ * it from the buffer's list.
+ */
+static struct gntdev_dmabuf *
+dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
+{
+ struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
+
+ mutex_lock(&priv->lock);
+ list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
+ if (gntdev_dmabuf->fd == fd) {
+ pr_debug("Found gntdev_dmabuf in the import list\n");
+ ret = gntdev_dmabuf;
+ list_del(&gntdev_dmabuf->next);
+ break;
+ }
+ }
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ struct dma_buf_attachment *attach;
+ struct dma_buf *dma_buf;
+
+ gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
+ if (IS_ERR(gntdev_dmabuf))
+ return PTR_ERR(gntdev_dmabuf);
+
+ pr_debug("Releasing DMA buffer with fd %d\n", fd);
+
+ dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
+ gntdev_dmabuf->nr_pages);
+
+ attach = gntdev_dmabuf->u.imp.attach;
+
+ if (gntdev_dmabuf->u.imp.sgt)
+ dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
+ DMA_BIDIRECTIONAL);
+ dma_buf = attach->dmabuf;
+ dma_buf_detach(attach->dmabuf, attach);
+ dma_buf_put(dma_buf);
+
+ dmabuf_imp_free_storage(gntdev_dmabuf);
+ return 0;
+}
+
+/* DMA buffer IOCTL support. */
+
+long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
+ struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
+{
+ struct ioctl_gntdev_dmabuf_exp_from_refs op;
+ u32 *refs;
+ long ret;
+
+ if (use_ptemod) {
+ pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
+ use_ptemod);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+
+ if (unlikely(op.count <= 0))
+ return -EINVAL;
+
+ refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
+ if (!refs)
+ return -ENOMEM;
+
+ if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
+ op.domid, refs, &op.fd);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(u, &op, sizeof(op)) != 0)
+ ret = -EFAULT;
+
+out:
+ kfree(refs);
+ return ret;
+}
+
+long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
+{
+ struct ioctl_gntdev_dmabuf_exp_wait_released op;
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+
+ return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
+ op.wait_to_ms);
+}
+
+long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
+{
+ struct ioctl_gntdev_dmabuf_imp_to_refs op;
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ long ret;
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+
+ if (unlikely(op.count <= 0))
+ return -EINVAL;
+
+ gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
+ priv->dma_dev, op.fd,
+ op.count, op.domid);
+ if (IS_ERR(gntdev_dmabuf))
+ return PTR_ERR(gntdev_dmabuf);
+
+ if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
+ sizeof(*u->refs) * op.count) != 0) {
+ ret = -EFAULT;
+ goto out_release;
+ }
+ return 0;
+
+out_release:
+ dmabuf_imp_release(priv->dmabuf_priv, op.fd);
+ return ret;
+}
+
+long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_imp_release __user *u)
+{
+ struct ioctl_gntdev_dmabuf_imp_release op;
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+
+ return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
+}
+
+struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
+{
+ struct gntdev_dmabuf_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&priv->lock);
+ INIT_LIST_HEAD(&priv->exp_list);
+ INIT_LIST_HEAD(&priv->exp_wait_list);
+ INIT_LIST_HEAD(&priv->imp_list);
+
+ return priv;
+}
+
+void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
+{
+ kfree(priv);
+}
diff --git a/drivers/xen/gntdev-dmabuf.h b/drivers/xen/gntdev-dmabuf.h
new file mode 100644
index 000000000000..7220a53d0fc5
--- /dev/null
+++ b/drivers/xen/gntdev-dmabuf.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Xen dma-buf functionality for gntdev.
+ *
+ * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
+ */
+
+#ifndef _GNTDEV_DMABUF_H
+#define _GNTDEV_DMABUF_H
+
+#include <xen/gntdev.h>
+
+struct gntdev_dmabuf_priv;
+struct gntdev_priv;
+
+struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void);
+
+void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv);
+
+long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
+ struct ioctl_gntdev_dmabuf_exp_from_refs __user *u);
+
+long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_exp_wait_released __user *u);
+
+long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_imp_to_refs __user *u);
+
+long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_imp_release __user *u);
+
+#endif
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index bd56653b9bbc..57390c7666e5 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -6,6 +6,7 @@
*
* Copyright (c) 2006-2007, D G Murray.
* (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
+ * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -26,10 +27,6 @@
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/mmu_notifier.h>
-#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
@@ -37,6 +34,9 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/refcount.h>
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+#include <linux/of_device.h>
+#endif
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -47,6 +47,11 @@
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
+#include "gntdev-common.h"
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+#include "gntdev-dmabuf.h"
+#endif
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
"Gerd Hoffmann <kraxel@redhat.com>");
@@ -62,51 +67,23 @@ static atomic_t pages_mapped = ATOMIC_INIT(0);
static int use_ptemod;
#define populate_freeable_maps use_ptemod
-struct gntdev_priv {
- /* maps with visible offsets in the file descriptor */
- struct list_head maps;
- /* maps that are not visible; will be freed on munmap.
- * Only populated if populate_freeable_maps == 1 */
- struct list_head freeable_maps;
- /* lock protects maps and freeable_maps */
- struct mutex lock;
- struct mm_struct *mm;
- struct mmu_notifier mn;
-};
-
-struct unmap_notify {
- int flags;
- /* Address relative to the start of the grant_map */
- int addr;
- int event;
-};
-
-struct grant_map {
- struct list_head next;
- struct vm_area_struct *vma;
- int index;
- int count;
- int flags;
- refcount_t users;
- struct unmap_notify notify;
- struct ioctl_gntdev_grant_ref *grants;
- struct gnttab_map_grant_ref *map_ops;
- struct gnttab_unmap_grant_ref *unmap_ops;
- struct gnttab_map_grant_ref *kmap_ops;
- struct gnttab_unmap_grant_ref *kunmap_ops;
- struct page **pages;
- unsigned long pages_vm_start;
-};
+static int unmap_grant_pages(struct gntdev_grant_map *map,
+ int offset, int pages);
-static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
+static struct miscdevice gntdev_miscdev;
/* ------------------------------------------------------------------ */
+bool gntdev_account_mapped_pages(int count)
+{
+ return atomic_add_return(count, &pages_mapped) > limit;
+}
+
static void gntdev_print_maps(struct gntdev_priv *priv,
char *text, int text_index)
{
#ifdef DEBUG
- struct grant_map *map;
+ struct gntdev_grant_map *map;
pr_debug("%s: maps list (priv %p)\n", __func__, priv);
list_for_each_entry(map, &priv->maps, next)
@@ -116,13 +93,32 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
#endif
}
-static void gntdev_free_map(struct grant_map *map)
+static void gntdev_free_map(struct gntdev_grant_map *map)
{
if (map == NULL)
return;
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ if (map->dma_vaddr) {
+ struct gnttab_dma_alloc_args args;
+
+ args.dev = map->dma_dev;
+ args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
+ args.nr_pages = map->count;
+ args.pages = map->pages;
+ args.frames = map->frames;
+ args.vaddr = map->dma_vaddr;
+ args.dev_bus_addr = map->dma_bus_addr;
+
+ gnttab_dma_free_pages(&args);
+ } else
+#endif
if (map->pages)
gnttab_free_pages(map->count, map->pages);
+
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ kfree(map->frames);
+#endif
kfree(map->pages);
kfree(map->grants);
kfree(map->map_ops);
@@ -132,12 +128,13 @@ static void gntdev_free_map(struct grant_map *map)
kfree(map);
}
-static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
+ int dma_flags)
{
- struct grant_map *add;
+ struct gntdev_grant_map *add;
int i;
- add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
+ add = kzalloc(sizeof(*add), GFP_KERNEL);
if (NULL == add)
return NULL;
@@ -155,6 +152,37 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
NULL == add->pages)
goto err;
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ add->dma_flags = dma_flags;
+
+ /*
+ * Check if this mapping is requested to be backed
+ * by a DMA buffer.
+ */
+ if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
+ struct gnttab_dma_alloc_args args;
+
+ add->frames = kcalloc(count, sizeof(add->frames[0]),
+ GFP_KERNEL);
+ if (!add->frames)
+ goto err;
+
+ /* Remember the device, so we can free DMA memory. */
+ add->dma_dev = priv->dma_dev;
+
+ args.dev = priv->dma_dev;
+ args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT);
+ args.nr_pages = count;
+ args.pages = add->pages;
+ args.frames = add->frames;
+
+ if (gnttab_dma_alloc_pages(&args))
+ goto err;
+
+ add->dma_vaddr = args.vaddr;
+ add->dma_bus_addr = args.dev_bus_addr;
+ } else
+#endif
if (gnttab_alloc_pages(count, add->pages))
goto err;
@@ -176,9 +204,9 @@ err:
return NULL;
}
-static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
+void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add)
{
- struct grant_map *map;
+ struct gntdev_grant_map *map;
list_for_each_entry(map, &priv->maps, next) {
if (add->index + add->count < map->index) {
@@ -193,10 +221,10 @@ done:
gntdev_print_maps(priv, "[new]", add->index);
}
-static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
- int index, int count)
+static struct gntdev_grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
+ int index, int count)
{
- struct grant_map *map;
+ struct gntdev_grant_map *map;
list_for_each_entry(map, &priv->maps, next) {
if (map->index != index)
@@ -208,7 +236,7 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
return NULL;
}
-static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
+void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
{
if (!map)
return;
@@ -239,7 +267,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
static int find_grant_ptes(pte_t *pte, pgtable_t token,
unsigned long addr, void *data)
{
- struct grant_map *map = data;
+ struct gntdev_grant_map *map = data;
unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
u64 pte_maddr;
@@ -272,7 +300,7 @@ static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
}
#endif
-static int map_grant_pages(struct grant_map *map)
+int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
int i, err = 0;
@@ -325,11 +353,20 @@ static int map_grant_pages(struct grant_map *map)
map->unmap_ops[i].handle = map->map_ops[i].handle;
if (use_ptemod)
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ else if (map->dma_vaddr) {
+ unsigned long bfn;
+
+ bfn = pfn_to_bfn(page_to_pfn(map->pages[i]));
+ map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn);
+ }
+#endif
}
return err;
}
-static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
+static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
{
int i, err = 0;
struct gntab_unmap_queue_data unmap_data;
@@ -364,7 +401,8 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
return err;
}
-static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
{
int range, err = 0;
@@ -396,7 +434,7 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
static void gntdev_vma_open(struct vm_area_struct *vma)
{
- struct grant_map *map = vma->vm_private_data;
+ struct gntdev_grant_map *map = vma->vm_private_data;
pr_debug("gntdev_vma_open %p\n", vma);
refcount_inc(&map->users);
@@ -404,7 +442,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
static void gntdev_vma_close(struct vm_area_struct *vma)
{
- struct grant_map *map = vma->vm_private_data;
+ struct gntdev_grant_map *map = vma->vm_private_data;
struct file *file = vma->vm_file;
struct gntdev_priv *priv = file->private_data;
@@ -428,7 +466,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
unsigned long addr)
{
- struct grant_map *map = vma->vm_private_data;
+ struct gntdev_grant_map *map = vma->vm_private_data;
return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
}
@@ -441,18 +479,25 @@ static const struct vm_operations_struct gntdev_vmops = {
/* ------------------------------------------------------------------ */
-static void unmap_if_in_range(struct grant_map *map,
+static bool in_range(struct gntdev_grant_map *map,
unsigned long start, unsigned long end)
{
- unsigned long mstart, mend;
- int err;
-
if (!map->vma)
- return;
+ return false;
if (map->vma->vm_start >= end)
- return;
+ return false;
if (map->vma->vm_end <= start)
- return;
+ return false;
+
+ return true;
+}
+
+static void unmap_if_in_range(struct gntdev_grant_map *map,
+ unsigned long start, unsigned long end)
+{
+ unsigned long mstart, mend;
+ int err;
+
mstart = max(start, map->vma->vm_start);
mend = min(end, map->vma->vm_end);
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
@@ -465,28 +510,47 @@ static void unmap_if_in_range(struct grant_map *map,
WARN_ON(err);
}
-static void mn_invl_range_start(struct mmu_notifier *mn,
+static int mn_invl_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ bool blockable)
{
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
- struct grant_map *map;
+ struct gntdev_grant_map *map;
+ int ret = 0;
+
+ /* TODO do we really need a mutex here? */
+ if (blockable)
+ mutex_lock(&priv->lock);
+ else if (!mutex_trylock(&priv->lock))
+ return -EAGAIN;
- mutex_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
+ if (in_range(map, start, end)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
unmap_if_in_range(map, start, end);
}
list_for_each_entry(map, &priv->freeable_maps, next) {
+ if (in_range(map, start, end)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
unmap_if_in_range(map, start, end);
}
+
+out_unlock:
mutex_unlock(&priv->lock);
+
+ return ret;
}
static void mn_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int err;
mutex_lock(&priv->lock);
@@ -531,6 +595,15 @@ static int gntdev_open(struct inode *inode, struct file *flip)
INIT_LIST_HEAD(&priv->freeable_maps);
mutex_init(&priv->lock);
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ priv->dmabuf_priv = gntdev_dmabuf_init();
+ if (IS_ERR(priv->dmabuf_priv)) {
+ ret = PTR_ERR(priv->dmabuf_priv);
+ kfree(priv);
+ return ret;
+ }
+#endif
+
if (use_ptemod) {
priv->mm = get_task_mm(current);
if (!priv->mm) {
@@ -548,6 +621,17 @@ static int gntdev_open(struct inode *inode, struct file *flip)
}
flip->private_data = priv;
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ priv->dma_dev = gntdev_miscdev.this_device;
+
+ /*
+ * The device is not spawn from a device tree, so arch_setup_dma_ops
+ * is not called, thus leaving the device with dummy DMA ops.
+ * Fix this by calling of_dma_configure() with a NULL node to set
+ * default DMA ops.
+ */
+ of_dma_configure(priv->dma_dev, NULL, true);
+#endif
pr_debug("priv %p\n", priv);
return 0;
@@ -556,21 +640,27 @@ static int gntdev_open(struct inode *inode, struct file *flip)
static int gntdev_release(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv = flip->private_data;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
pr_debug("priv %p\n", priv);
mutex_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
- map = list_entry(priv->maps.next, struct grant_map, next);
+ map = list_entry(priv->maps.next,
+ struct gntdev_grant_map, next);
list_del(&map->next);
gntdev_put_map(NULL /* already removed */, map);
}
WARN_ON(!list_empty(&priv->freeable_maps));
mutex_unlock(&priv->lock);
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ gntdev_dmabuf_fini(priv->dmabuf_priv);
+#endif
+
if (use_ptemod)
mmu_notifier_unregister(&priv->mn, priv->mm);
+
kfree(priv);
return 0;
}
@@ -579,7 +669,7 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
struct ioctl_gntdev_map_grant_ref __user *u)
{
struct ioctl_gntdev_map_grant_ref op;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int err;
if (copy_from_user(&op, u, sizeof(op)) != 0)
@@ -589,11 +679,11 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
return -EINVAL;
err = -ENOMEM;
- map = gntdev_alloc_map(priv, op.count);
+ map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
if (!map)
return err;
- if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
+ if (unlikely(gntdev_account_mapped_pages(op.count))) {
pr_debug("can't map: over limit\n");
gntdev_put_map(NULL, map);
return err;
@@ -620,7 +710,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
struct ioctl_gntdev_unmap_grant_ref __user *u)
{
struct ioctl_gntdev_unmap_grant_ref op;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int err = -ENOENT;
if (copy_from_user(&op, u, sizeof(op)) != 0)
@@ -646,7 +736,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
{
struct ioctl_gntdev_get_offset_for_vaddr op;
struct vm_area_struct *vma;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int rv = -EINVAL;
if (copy_from_user(&op, u, sizeof(op)) != 0)
@@ -677,7 +767,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
{
struct ioctl_gntdev_unmap_notify op;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int rc;
int out_flags;
unsigned int out_event;
@@ -962,6 +1052,20 @@ static long gntdev_ioctl(struct file *flip,
case IOCTL_GNTDEV_GRANT_COPY:
return gntdev_ioctl_grant_copy(priv, ptr);
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
+ return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
+
+ case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
+ return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
+
+ case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS:
+ return gntdev_ioctl_dmabuf_imp_to_refs(priv, ptr);
+
+ case IOCTL_GNTDEV_DMABUF_IMP_RELEASE:
+ return gntdev_ioctl_dmabuf_imp_release(priv, ptr);
+#endif
+
default:
pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
return -ENOIOCTLCMD;
@@ -975,7 +1079,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
struct gntdev_priv *priv = flip->private_data;
int index = vma->vm_pgoff;
int count = vma_pages(vma);
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int i, err = -EINVAL;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
@@ -1032,7 +1136,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
}
}
- err = map_grant_pages(map);
+ err = gntdev_map_grant_pages(map);
if (err)
goto out_put_map;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index ba9f3eec2bd0..7bafa703a992 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -45,6 +45,9 @@
#include <linux/workqueue.h>
#include <linux/ratelimit.h>
#include <linux/moduleparam.h>
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+#include <linux/dma-mapping.h>
+#endif
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -57,6 +60,7 @@
#ifdef CONFIG_X86
#include <asm/xen/cpuid.h>
#endif
+#include <xen/mem-reservation.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
@@ -769,29 +773,18 @@ void gnttab_free_auto_xlat_frames(void)
}
EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
-/**
- * gnttab_alloc_pages - alloc pages suitable for grant mapping into
- * @nr_pages: number of pages to alloc
- * @pages: returns the pages
- */
-int gnttab_alloc_pages(int nr_pages, struct page **pages)
+int gnttab_pages_set_private(int nr_pages, struct page **pages)
{
int i;
- int ret;
-
- ret = alloc_xenballooned_pages(nr_pages, pages);
- if (ret < 0)
- return ret;
for (i = 0; i < nr_pages; i++) {
#if BITS_PER_LONG < 64
struct xen_page_foreign *foreign;
foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
- if (!foreign) {
- gnttab_free_pages(nr_pages, pages);
+ if (!foreign)
return -ENOMEM;
- }
+
set_page_private(pages[i], (unsigned long)foreign);
#endif
SetPagePrivate(pages[i]);
@@ -799,14 +792,30 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
return 0;
}
-EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
+EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
/**
- * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
- * @nr_pages; number of pages to free
- * @pages: the pages
+ * gnttab_alloc_pages - alloc pages suitable for grant mapping into
+ * @nr_pages: number of pages to alloc
+ * @pages: returns the pages
*/
-void gnttab_free_pages(int nr_pages, struct page **pages)
+int gnttab_alloc_pages(int nr_pages, struct page **pages)
+{
+ int ret;
+
+ ret = alloc_xenballooned_pages(nr_pages, pages);
+ if (ret < 0)
+ return ret;
+
+ ret = gnttab_pages_set_private(nr_pages, pages);
+ if (ret < 0)
+ gnttab_free_pages(nr_pages, pages);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
+
+void gnttab_pages_clear_private(int nr_pages, struct page **pages)
{
int i;
@@ -818,10 +827,114 @@ void gnttab_free_pages(int nr_pages, struct page **pages)
ClearPagePrivate(pages[i]);
}
}
+}
+EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
+
+/**
+ * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
+ * @nr_pages; number of pages to free
+ * @pages: the pages
+ */
+void gnttab_free_pages(int nr_pages, struct page **pages)
+{
+ gnttab_pages_clear_private(nr_pages, pages);
free_xenballooned_pages(nr_pages, pages);
}
EXPORT_SYMBOL_GPL(gnttab_free_pages);
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+/**
+ * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
+ * @args: arguments to the function
+ */
+int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
+{
+ unsigned long pfn, start_pfn;
+ size_t size;
+ int i, ret;
+
+ size = args->nr_pages << PAGE_SHIFT;
+ if (args->coherent)
+ args->vaddr = dma_alloc_coherent(args->dev, size,
+ &args->dev_bus_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ else
+ args->vaddr = dma_alloc_wc(args->dev, size,
+ &args->dev_bus_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!args->vaddr) {
+ pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
+ return -ENOMEM;
+ }
+
+ start_pfn = __phys_to_pfn(args->dev_bus_addr);
+ for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
+ pfn++, i++) {
+ struct page *page = pfn_to_page(pfn);
+
+ args->pages[i] = page;
+ args->frames[i] = xen_page_to_gfn(page);
+ xenmem_reservation_scrub_page(page);
+ }
+
+ xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
+
+ ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
+ if (ret != args->nr_pages) {
+ pr_debug("Failed to decrease reservation for DMA buffer\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ ret = gnttab_pages_set_private(args->nr_pages, args->pages);
+ if (ret < 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ gnttab_dma_free_pages(args);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
+
+/**
+ * gnttab_dma_free_pages - free DMAable pages
+ * @args: arguments to the function
+ */
+int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
+{
+ size_t size;
+ int i, ret;
+
+ gnttab_pages_clear_private(args->nr_pages, args->pages);
+
+ for (i = 0; i < args->nr_pages; i++)
+ args->frames[i] = page_to_xen_pfn(args->pages[i]);
+
+ ret = xenmem_reservation_increase(args->nr_pages, args->frames);
+ if (ret != args->nr_pages) {
+ pr_debug("Failed to decrease reservation for DMA buffer\n");
+ ret = -EFAULT;
+ } else {
+ ret = 0;
+ }
+
+ xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
+ args->frames);
+
+ size = args->nr_pages << PAGE_SHIFT;
+ if (args->coherent)
+ dma_free_coherent(args->dev, size,
+ args->vaddr, args->dev_bus_addr);
+ else
+ dma_free_wc(args->dev, size,
+ args->vaddr, args->dev_bus_addr);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
+#endif
+
/* Handling of paged out grant targets (GNTST_eagain) */
#define MAX_DELAY 256
static inline void
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
index 262835ace35d..b8bf61abb65b 100644
--- a/drivers/xen/mcelog.c
+++ b/drivers/xen/mcelog.c
@@ -288,7 +288,6 @@ static int mc_queue_handle(uint32_t flags)
int ret = 0;
mc_op.cmd = XEN_MC_fetch;
- mc_op.interface_version = XEN_MCA_INTERFACE_VERSION;
set_xen_guest_handle(mc_op.u.mc_fetch.data, &g_mi);
do {
mc_op.u.mc_fetch.flags = flags;
@@ -358,7 +357,6 @@ static int bind_virq_for_mce(void)
/* Fetch physical CPU Numbers */
mc_op.cmd = XEN_MC_physcpuinfo;
- mc_op.interface_version = XEN_MCA_INTERFACE_VERSION;
set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
ret = HYPERVISOR_mca(&mc_op);
if (ret) {
diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c
new file mode 100644
index 000000000000..084799c6180e
--- /dev/null
+++ b/drivers/xen/mem-reservation.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/******************************************************************************
+ * Xen memory reservation utilities.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
+ * Copyright (c) 2010 Daniel Kiper
+ * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
+ */
+
+#include <asm/xen/hypercall.h>
+
+#include <xen/interface/memory.h>
+#include <xen/mem-reservation.h>
+
+/*
+ * Use one extent per PAGE_SIZE to avoid to break down the page into
+ * multiple frame.
+ */
+#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+void __xenmem_reservation_va_mapping_update(unsigned long count,
+ struct page **pages,
+ xen_pfn_t *frames)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct page *page = pages[i];
+ unsigned long pfn = page_to_pfn(page);
+
+ BUG_ON(!page);
+
+ /*
+ * We don't support PV MMU when Linux and Xen is using
+ * different page granularity.
+ */
+ BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
+
+ set_phys_to_machine(pfn, frames[i]);
+
+ /* Link back into the page tables if not highmem. */
+ if (!PageHighMem(page)) {
+ int ret;
+
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ mfn_pte(frames[i], PAGE_KERNEL),
+ 0);
+ BUG_ON(ret);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
+
+void __xenmem_reservation_va_mapping_reset(unsigned long count,
+ struct page **pages)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct page *page = pages[i];
+ unsigned long pfn = page_to_pfn(page);
+
+ /*
+ * We don't support PV MMU when Linux and Xen are using
+ * different page granularity.
+ */
+ BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
+
+ if (!PageHighMem(page)) {
+ int ret;
+
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ __pte_ma(0), 0);
+ BUG_ON(ret);
+ }
+ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ }
+}
+EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
+#endif /* CONFIG_XEN_HAVE_PVMMU */
+
+/* @frames is an array of PFNs */
+int xenmem_reservation_increase(int count, xen_pfn_t *frames)
+{
+ struct xen_memory_reservation reservation = {
+ .address_bits = 0,
+ .extent_order = EXTENT_ORDER,
+ .domid = DOMID_SELF
+ };
+
+ /* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
+ set_xen_guest_handle(reservation.extent_start, frames);
+ reservation.nr_extents = count;
+ return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
+}
+EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
+
+/* @frames is an array of GFNs */
+int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
+{
+ struct xen_memory_reservation reservation = {
+ .address_bits = 0,
+ .extent_order = EXTENT_ORDER,
+ .domid = DOMID_SELF
+ };
+
+ /* XENMEM_decrease_reservation requires a GFN */
+ set_xen_guest_handle(reservation.extent_start, frames);
+ reservation.nr_extents = count;
+ return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
+}
+EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index b29f4e40851f..fbb9137c7d02 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -362,6 +362,12 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
default:
return AE_OK;
}
+ if (invalid_phys_cpuid(acpi_get_phys_id(handle,
+ acpi_type == ACPI_TYPE_DEVICE,
+ acpi_id))) {
+ pr_debug("CPU with ACPI ID %u is unavailable\n", acpi_id);
+ return AE_OK;
+ }
/* There are more ACPI Processor objects than in x2APIC or MADT.
* This can happen with incorrect ACPI SSDT declerations. */
if (acpi_id >= nr_acpi_bits) {
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index b437fccd4e62..294f35ce9e46 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -81,7 +81,7 @@ static void watch_target(struct xenbus_watch *watch,
static_max = new_target;
else
static_max >>= PAGE_SHIFT - 10;
- target_diff = xen_pv_domain() ? 0
+ target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
: static_max - balloon_stats.target_pages;
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index e2f3e8b0fba9..14a3d4cbc2a7 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -654,9 +654,9 @@ static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring
struct scsiback_nexus *nexus = tpg->tpg_nexus;
struct se_session *se_sess = nexus->tvn_se_sess;
struct vscsibk_pend *req;
- int tag, i;
+ int tag, cpu, i;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0) {
pr_err("Unable to obtain tag for vscsiif_request\n");
return ERR_PTR(-ENOMEM);
@@ -665,6 +665,7 @@ static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring
req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag];
memset(req, 0, sizeof(*req));
req->se_cmd.map_tag = tag;
+ req->se_cmd.map_cpu = cpu;
for (i = 0; i < VSCSI_MAX_GRANTS; i++)
req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
@@ -1387,9 +1388,7 @@ static int scsiback_check_stop_free(struct se_cmd *se_cmd)
static void scsiback_release_cmd(struct se_cmd *se_cmd)
{
- struct se_session *se_sess = se_cmd->se_sess;
-
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_cmd->se_sess, se_cmd);
}
static u32 scsiback_sess_get_index(struct se_session *se_sess)
@@ -1532,7 +1531,7 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg,
goto out_unlock;
}
- tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
VSCSI_DEFAULT_SESSION_TAGS,
sizeof(struct vscsibk_pend),
TARGET_PROT_NORMAL, name,
@@ -1587,7 +1586,7 @@ static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
/*
* Release the SCSI I_T Nexus to the emulated xen-pvscsi Target Port
*/
- transport_deregister_session(tv_nexus->tvn_se_sess);
+ target_remove_session(se_sess);
tpg->tpg_nexus = NULL;
mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1743,9 +1742,7 @@ static void scsiback_port_unlink(struct se_portal_group *se_tpg,
}
static struct se_portal_group *
-scsiback_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+scsiback_make_tpg(struct se_wwn *wwn, const char *name)
{
struct scsiback_tport *tport = container_of(wwn,
struct scsiback_tport, tport_wwn);